Файловый менеджер - Редактировать - /var/www/xthruster/html/wp-content/uploads/flags/linux.tar
Назад
mv643xx.h 0000644 00000146421 14722070374 0006172 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * mv643xx.h - MV-643XX Internal registers definition file. * * Copyright 2002 Momentum Computer, Inc. * Author: Matthew Dharm <mdharm@momenco.com> * Copyright 2002 GALILEO TECHNOLOGY, LTD. */ #ifndef __ASM_MV643XX_H #define __ASM_MV643XX_H #include <asm/types.h> #include <linux/mv643xx_eth.h> #include <linux/mv643xx_i2c.h> /****************************************/ /* Processor Address Space */ /****************************************/ /* DDR SDRAM BAR and size registers */ #define MV64340_CS_0_BASE_ADDR 0x008 #define MV64340_CS_0_SIZE 0x010 #define MV64340_CS_1_BASE_ADDR 0x208 #define MV64340_CS_1_SIZE 0x210 #define MV64340_CS_2_BASE_ADDR 0x018 #define MV64340_CS_2_SIZE 0x020 #define MV64340_CS_3_BASE_ADDR 0x218 #define MV64340_CS_3_SIZE 0x220 /* Devices BAR and size registers */ #define MV64340_DEV_CS0_BASE_ADDR 0x028 #define MV64340_DEV_CS0_SIZE 0x030 #define MV64340_DEV_CS1_BASE_ADDR 0x228 #define MV64340_DEV_CS1_SIZE 0x230 #define MV64340_DEV_CS2_BASE_ADDR 0x248 #define MV64340_DEV_CS2_SIZE 0x250 #define MV64340_DEV_CS3_BASE_ADDR 0x038 #define MV64340_DEV_CS3_SIZE 0x040 #define MV64340_BOOTCS_BASE_ADDR 0x238 #define MV64340_BOOTCS_SIZE 0x240 /* PCI 0 BAR and size registers */ #define MV64340_PCI_0_IO_BASE_ADDR 0x048 #define MV64340_PCI_0_IO_SIZE 0x050 #define MV64340_PCI_0_MEMORY0_BASE_ADDR 0x058 #define MV64340_PCI_0_MEMORY0_SIZE 0x060 #define MV64340_PCI_0_MEMORY1_BASE_ADDR 0x080 #define MV64340_PCI_0_MEMORY1_SIZE 0x088 #define MV64340_PCI_0_MEMORY2_BASE_ADDR 0x258 #define MV64340_PCI_0_MEMORY2_SIZE 0x260 #define MV64340_PCI_0_MEMORY3_BASE_ADDR 0x280 #define MV64340_PCI_0_MEMORY3_SIZE 0x288 /* PCI 1 BAR and size registers */ #define MV64340_PCI_1_IO_BASE_ADDR 0x090 #define MV64340_PCI_1_IO_SIZE 0x098 #define MV64340_PCI_1_MEMORY0_BASE_ADDR 0x0a0 #define MV64340_PCI_1_MEMORY0_SIZE 0x0a8 #define MV64340_PCI_1_MEMORY1_BASE_ADDR 0x0b0 #define MV64340_PCI_1_MEMORY1_SIZE 0x0b8 #define MV64340_PCI_1_MEMORY2_BASE_ADDR 0x2a0 #define MV64340_PCI_1_MEMORY2_SIZE 0x2a8 #define MV64340_PCI_1_MEMORY3_BASE_ADDR 0x2b0 #define MV64340_PCI_1_MEMORY3_SIZE 0x2b8 /* SRAM base address */ #define MV64340_INTEGRATED_SRAM_BASE_ADDR 0x268 /* internal registers space base address */ #define MV64340_INTERNAL_SPACE_BASE_ADDR 0x068 /* Enables the CS , DEV_CS , PCI 0 and PCI 1 windows above */ #define MV64340_BASE_ADDR_ENABLE 0x278 /****************************************/ /* PCI remap registers */ /****************************************/ /* PCI 0 */ #define MV64340_PCI_0_IO_ADDR_REMAP 0x0f0 #define MV64340_PCI_0_MEMORY0_LOW_ADDR_REMAP 0x0f8 #define MV64340_PCI_0_MEMORY0_HIGH_ADDR_REMAP 0x320 #define MV64340_PCI_0_MEMORY1_LOW_ADDR_REMAP 0x100 #define MV64340_PCI_0_MEMORY1_HIGH_ADDR_REMAP 0x328 #define MV64340_PCI_0_MEMORY2_LOW_ADDR_REMAP 0x2f8 #define MV64340_PCI_0_MEMORY2_HIGH_ADDR_REMAP 0x330 #define MV64340_PCI_0_MEMORY3_LOW_ADDR_REMAP 0x300 #define MV64340_PCI_0_MEMORY3_HIGH_ADDR_REMAP 0x338 /* PCI 1 */ #define MV64340_PCI_1_IO_ADDR_REMAP 0x108 #define MV64340_PCI_1_MEMORY0_LOW_ADDR_REMAP 0x110 #define MV64340_PCI_1_MEMORY0_HIGH_ADDR_REMAP 0x340 #define MV64340_PCI_1_MEMORY1_LOW_ADDR_REMAP 0x118 #define MV64340_PCI_1_MEMORY1_HIGH_ADDR_REMAP 0x348 #define MV64340_PCI_1_MEMORY2_LOW_ADDR_REMAP 0x310 #define MV64340_PCI_1_MEMORY2_HIGH_ADDR_REMAP 0x350 #define MV64340_PCI_1_MEMORY3_LOW_ADDR_REMAP 0x318 #define MV64340_PCI_1_MEMORY3_HIGH_ADDR_REMAP 0x358 #define MV64340_CPU_PCI_0_HEADERS_RETARGET_CONTROL 0x3b0 #define MV64340_CPU_PCI_0_HEADERS_RETARGET_BASE 0x3b8 #define MV64340_CPU_PCI_1_HEADERS_RETARGET_CONTROL 0x3c0 #define MV64340_CPU_PCI_1_HEADERS_RETARGET_BASE 0x3c8 #define MV64340_CPU_GE_HEADERS_RETARGET_CONTROL 0x3d0 #define MV64340_CPU_GE_HEADERS_RETARGET_BASE 0x3d8 #define MV64340_CPU_IDMA_HEADERS_RETARGET_CONTROL 0x3e0 #define MV64340_CPU_IDMA_HEADERS_RETARGET_BASE 0x3e8 /****************************************/ /* CPU Control Registers */ /****************************************/ #define MV64340_CPU_CONFIG 0x000 #define MV64340_CPU_MODE 0x120 #define MV64340_CPU_MASTER_CONTROL 0x160 #define MV64340_CPU_CROSS_BAR_CONTROL_LOW 0x150 #define MV64340_CPU_CROSS_BAR_CONTROL_HIGH 0x158 #define MV64340_CPU_CROSS_BAR_TIMEOUT 0x168 /****************************************/ /* SMP RegisterS */ /****************************************/ #define MV64340_SMP_WHO_AM_I 0x200 #define MV64340_SMP_CPU0_DOORBELL 0x214 #define MV64340_SMP_CPU0_DOORBELL_CLEAR 0x21C #define MV64340_SMP_CPU1_DOORBELL 0x224 #define MV64340_SMP_CPU1_DOORBELL_CLEAR 0x22C #define MV64340_SMP_CPU0_DOORBELL_MASK 0x234 #define MV64340_SMP_CPU1_DOORBELL_MASK 0x23C #define MV64340_SMP_SEMAPHOR0 0x244 #define MV64340_SMP_SEMAPHOR1 0x24c #define MV64340_SMP_SEMAPHOR2 0x254 #define MV64340_SMP_SEMAPHOR3 0x25c #define MV64340_SMP_SEMAPHOR4 0x264 #define MV64340_SMP_SEMAPHOR5 0x26c #define MV64340_SMP_SEMAPHOR6 0x274 #define MV64340_SMP_SEMAPHOR7 0x27c /****************************************/ /* CPU Sync Barrier Register */ /****************************************/ #define MV64340_CPU_0_SYNC_BARRIER_TRIGGER 0x0c0 #define MV64340_CPU_0_SYNC_BARRIER_VIRTUAL 0x0c8 #define MV64340_CPU_1_SYNC_BARRIER_TRIGGER 0x0d0 #define MV64340_CPU_1_SYNC_BARRIER_VIRTUAL 0x0d8 /****************************************/ /* CPU Access Protect */ /****************************************/ #define MV64340_CPU_PROTECT_WINDOW_0_BASE_ADDR 0x180 #define MV64340_CPU_PROTECT_WINDOW_0_SIZE 0x188 #define MV64340_CPU_PROTECT_WINDOW_1_BASE_ADDR 0x190 #define MV64340_CPU_PROTECT_WINDOW_1_SIZE 0x198 #define MV64340_CPU_PROTECT_WINDOW_2_BASE_ADDR 0x1a0 #define MV64340_CPU_PROTECT_WINDOW_2_SIZE 0x1a8 #define MV64340_CPU_PROTECT_WINDOW_3_BASE_ADDR 0x1b0 #define MV64340_CPU_PROTECT_WINDOW_3_SIZE 0x1b8 /****************************************/ /* CPU Error Report */ /****************************************/ #define MV64340_CPU_ERROR_ADDR_LOW 0x070 #define MV64340_CPU_ERROR_ADDR_HIGH 0x078 #define MV64340_CPU_ERROR_DATA_LOW 0x128 #define MV64340_CPU_ERROR_DATA_HIGH 0x130 #define MV64340_CPU_ERROR_PARITY 0x138 #define MV64340_CPU_ERROR_CAUSE 0x140 #define MV64340_CPU_ERROR_MASK 0x148 /****************************************/ /* CPU Interface Debug Registers */ /****************************************/ #define MV64340_PUNIT_SLAVE_DEBUG_LOW 0x360 #define MV64340_PUNIT_SLAVE_DEBUG_HIGH 0x368 #define MV64340_PUNIT_MASTER_DEBUG_LOW 0x370 #define MV64340_PUNIT_MASTER_DEBUG_HIGH 0x378 #define MV64340_PUNIT_MMASK 0x3e4 /****************************************/ /* Integrated SRAM Registers */ /****************************************/ #define MV64340_SRAM_CONFIG 0x380 #define MV64340_SRAM_TEST_MODE 0X3F4 #define MV64340_SRAM_ERROR_CAUSE 0x388 #define MV64340_SRAM_ERROR_ADDR 0x390 #define MV64340_SRAM_ERROR_ADDR_HIGH 0X3F8 #define MV64340_SRAM_ERROR_DATA_LOW 0x398 #define MV64340_SRAM_ERROR_DATA_HIGH 0x3a0 #define MV64340_SRAM_ERROR_DATA_PARITY 0x3a8 /****************************************/ /* SDRAM Configuration */ /****************************************/ #define MV64340_SDRAM_CONFIG 0x1400 #define MV64340_D_UNIT_CONTROL_LOW 0x1404 #define MV64340_D_UNIT_CONTROL_HIGH 0x1424 #define MV64340_SDRAM_TIMING_CONTROL_LOW 0x1408 #define MV64340_SDRAM_TIMING_CONTROL_HIGH 0x140c #define MV64340_SDRAM_ADDR_CONTROL 0x1410 #define MV64340_SDRAM_OPEN_PAGES_CONTROL 0x1414 #define MV64340_SDRAM_OPERATION 0x1418 #define MV64340_SDRAM_MODE 0x141c #define MV64340_EXTENDED_DRAM_MODE 0x1420 #define MV64340_SDRAM_CROSS_BAR_CONTROL_LOW 0x1430 #define MV64340_SDRAM_CROSS_BAR_CONTROL_HIGH 0x1434 #define MV64340_SDRAM_CROSS_BAR_TIMEOUT 0x1438 #define MV64340_SDRAM_ADDR_CTRL_PADS_CALIBRATION 0x14c0 #define MV64340_SDRAM_DATA_PADS_CALIBRATION 0x14c4 /****************************************/ /* SDRAM Error Report */ /****************************************/ #define MV64340_SDRAM_ERROR_DATA_LOW 0x1444 #define MV64340_SDRAM_ERROR_DATA_HIGH 0x1440 #define MV64340_SDRAM_ERROR_ADDR 0x1450 #define MV64340_SDRAM_RECEIVED_ECC 0x1448 #define MV64340_SDRAM_CALCULATED_ECC 0x144c #define MV64340_SDRAM_ECC_CONTROL 0x1454 #define MV64340_SDRAM_ECC_ERROR_COUNTER 0x1458 /******************************************/ /* Controlled Delay Line (CDL) Registers */ /******************************************/ #define MV64340_DFCDL_CONFIG0 0x1480 #define MV64340_DFCDL_CONFIG1 0x1484 #define MV64340_DLL_WRITE 0x1488 #define MV64340_DLL_READ 0x148c #define MV64340_SRAM_ADDR 0x1490 #define MV64340_SRAM_DATA0 0x1494 #define MV64340_SRAM_DATA1 0x1498 #define MV64340_SRAM_DATA2 0x149c #define MV64340_DFCL_PROBE 0x14a0 /******************************************/ /* Debug Registers */ /******************************************/ #define MV64340_DUNIT_DEBUG_LOW 0x1460 #define MV64340_DUNIT_DEBUG_HIGH 0x1464 #define MV64340_DUNIT_MMASK 0X1b40 /****************************************/ /* Device Parameters */ /****************************************/ #define MV64340_DEVICE_BANK0_PARAMETERS 0x45c #define MV64340_DEVICE_BANK1_PARAMETERS 0x460 #define MV64340_DEVICE_BANK2_PARAMETERS 0x464 #define MV64340_DEVICE_BANK3_PARAMETERS 0x468 #define MV64340_DEVICE_BOOT_BANK_PARAMETERS 0x46c #define MV64340_DEVICE_INTERFACE_CONTROL 0x4c0 #define MV64340_DEVICE_INTERFACE_CROSS_BAR_CONTROL_LOW 0x4c8 #define MV64340_DEVICE_INTERFACE_CROSS_BAR_CONTROL_HIGH 0x4cc #define MV64340_DEVICE_INTERFACE_CROSS_BAR_TIMEOUT 0x4c4 /****************************************/ /* Device interrupt registers */ /****************************************/ #define MV64340_DEVICE_INTERRUPT_CAUSE 0x4d0 #define MV64340_DEVICE_INTERRUPT_MASK 0x4d4 #define MV64340_DEVICE_ERROR_ADDR 0x4d8 #define MV64340_DEVICE_ERROR_DATA 0x4dc #define MV64340_DEVICE_ERROR_PARITY 0x4e0 /****************************************/ /* Device debug registers */ /****************************************/ #define MV64340_DEVICE_DEBUG_LOW 0x4e4 #define MV64340_DEVICE_DEBUG_HIGH 0x4e8 #define MV64340_RUNIT_MMASK 0x4f0 /****************************************/ /* PCI Slave Address Decoding registers */ /****************************************/ #define MV64340_PCI_0_CS_0_BANK_SIZE 0xc08 #define MV64340_PCI_1_CS_0_BANK_SIZE 0xc88 #define MV64340_PCI_0_CS_1_BANK_SIZE 0xd08 #define MV64340_PCI_1_CS_1_BANK_SIZE 0xd88 #define MV64340_PCI_0_CS_2_BANK_SIZE 0xc0c #define MV64340_PCI_1_CS_2_BANK_SIZE 0xc8c #define MV64340_PCI_0_CS_3_BANK_SIZE 0xd0c #define MV64340_PCI_1_CS_3_BANK_SIZE 0xd8c #define MV64340_PCI_0_DEVCS_0_BANK_SIZE 0xc10 #define MV64340_PCI_1_DEVCS_0_BANK_SIZE 0xc90 #define MV64340_PCI_0_DEVCS_1_BANK_SIZE 0xd10 #define MV64340_PCI_1_DEVCS_1_BANK_SIZE 0xd90 #define MV64340_PCI_0_DEVCS_2_BANK_SIZE 0xd18 #define MV64340_PCI_1_DEVCS_2_BANK_SIZE 0xd98 #define MV64340_PCI_0_DEVCS_3_BANK_SIZE 0xc14 #define MV64340_PCI_1_DEVCS_3_BANK_SIZE 0xc94 #define MV64340_PCI_0_DEVCS_BOOT_BANK_SIZE 0xd14 #define MV64340_PCI_1_DEVCS_BOOT_BANK_SIZE 0xd94 #define MV64340_PCI_0_P2P_MEM0_BAR_SIZE 0xd1c #define MV64340_PCI_1_P2P_MEM0_BAR_SIZE 0xd9c #define MV64340_PCI_0_P2P_MEM1_BAR_SIZE 0xd20 #define MV64340_PCI_1_P2P_MEM1_BAR_SIZE 0xda0 #define MV64340_PCI_0_P2P_I_O_BAR_SIZE 0xd24 #define MV64340_PCI_1_P2P_I_O_BAR_SIZE 0xda4 #define MV64340_PCI_0_CPU_BAR_SIZE 0xd28 #define MV64340_PCI_1_CPU_BAR_SIZE 0xda8 #define MV64340_PCI_0_INTERNAL_SRAM_BAR_SIZE 0xe00 #define MV64340_PCI_1_INTERNAL_SRAM_BAR_SIZE 0xe80 #define MV64340_PCI_0_EXPANSION_ROM_BAR_SIZE 0xd2c #define MV64340_PCI_1_EXPANSION_ROM_BAR_SIZE 0xd9c #define MV64340_PCI_0_BASE_ADDR_REG_ENABLE 0xc3c #define MV64340_PCI_1_BASE_ADDR_REG_ENABLE 0xcbc #define MV64340_PCI_0_CS_0_BASE_ADDR_REMAP 0xc48 #define MV64340_PCI_1_CS_0_BASE_ADDR_REMAP 0xcc8 #define MV64340_PCI_0_CS_1_BASE_ADDR_REMAP 0xd48 #define MV64340_PCI_1_CS_1_BASE_ADDR_REMAP 0xdc8 #define MV64340_PCI_0_CS_2_BASE_ADDR_REMAP 0xc4c #define MV64340_PCI_1_CS_2_BASE_ADDR_REMAP 0xccc #define MV64340_PCI_0_CS_3_BASE_ADDR_REMAP 0xd4c #define MV64340_PCI_1_CS_3_BASE_ADDR_REMAP 0xdcc #define MV64340_PCI_0_CS_0_BASE_HIGH_ADDR_REMAP 0xF04 #define MV64340_PCI_1_CS_0_BASE_HIGH_ADDR_REMAP 0xF84 #define MV64340_PCI_0_CS_1_BASE_HIGH_ADDR_REMAP 0xF08 #define MV64340_PCI_1_CS_1_BASE_HIGH_ADDR_REMAP 0xF88 #define MV64340_PCI_0_CS_2_BASE_HIGH_ADDR_REMAP 0xF0C #define MV64340_PCI_1_CS_2_BASE_HIGH_ADDR_REMAP 0xF8C #define MV64340_PCI_0_CS_3_BASE_HIGH_ADDR_REMAP 0xF10 #define MV64340_PCI_1_CS_3_BASE_HIGH_ADDR_REMAP 0xF90 #define MV64340_PCI_0_DEVCS_0_BASE_ADDR_REMAP 0xc50 #define MV64340_PCI_1_DEVCS_0_BASE_ADDR_REMAP 0xcd0 #define MV64340_PCI_0_DEVCS_1_BASE_ADDR_REMAP 0xd50 #define MV64340_PCI_1_DEVCS_1_BASE_ADDR_REMAP 0xdd0 #define MV64340_PCI_0_DEVCS_2_BASE_ADDR_REMAP 0xd58 #define MV64340_PCI_1_DEVCS_2_BASE_ADDR_REMAP 0xdd8 #define MV64340_PCI_0_DEVCS_3_BASE_ADDR_REMAP 0xc54 #define MV64340_PCI_1_DEVCS_3_BASE_ADDR_REMAP 0xcd4 #define MV64340_PCI_0_DEVCS_BOOTCS_BASE_ADDR_REMAP 0xd54 #define MV64340_PCI_1_DEVCS_BOOTCS_BASE_ADDR_REMAP 0xdd4 #define MV64340_PCI_0_P2P_MEM0_BASE_ADDR_REMAP_LOW 0xd5c #define MV64340_PCI_1_P2P_MEM0_BASE_ADDR_REMAP_LOW 0xddc #define MV64340_PCI_0_P2P_MEM0_BASE_ADDR_REMAP_HIGH 0xd60 #define MV64340_PCI_1_P2P_MEM0_BASE_ADDR_REMAP_HIGH 0xde0 #define MV64340_PCI_0_P2P_MEM1_BASE_ADDR_REMAP_LOW 0xd64 #define MV64340_PCI_1_P2P_MEM1_BASE_ADDR_REMAP_LOW 0xde4 #define MV64340_PCI_0_P2P_MEM1_BASE_ADDR_REMAP_HIGH 0xd68 #define MV64340_PCI_1_P2P_MEM1_BASE_ADDR_REMAP_HIGH 0xde8 #define MV64340_PCI_0_P2P_I_O_BASE_ADDR_REMAP 0xd6c #define MV64340_PCI_1_P2P_I_O_BASE_ADDR_REMAP 0xdec #define MV64340_PCI_0_CPU_BASE_ADDR_REMAP_LOW 0xd70 #define MV64340_PCI_1_CPU_BASE_ADDR_REMAP_LOW 0xdf0 #define MV64340_PCI_0_CPU_BASE_ADDR_REMAP_HIGH 0xd74 #define MV64340_PCI_1_CPU_BASE_ADDR_REMAP_HIGH 0xdf4 #define MV64340_PCI_0_INTEGRATED_SRAM_BASE_ADDR_REMAP 0xf00 #define MV64340_PCI_1_INTEGRATED_SRAM_BASE_ADDR_REMAP 0xf80 #define MV64340_PCI_0_EXPANSION_ROM_BASE_ADDR_REMAP 0xf38 #define MV64340_PCI_1_EXPANSION_ROM_BASE_ADDR_REMAP 0xfb8 #define MV64340_PCI_0_ADDR_DECODE_CONTROL 0xd3c #define MV64340_PCI_1_ADDR_DECODE_CONTROL 0xdbc #define MV64340_PCI_0_HEADERS_RETARGET_CONTROL 0xF40 #define MV64340_PCI_1_HEADERS_RETARGET_CONTROL 0xFc0 #define MV64340_PCI_0_HEADERS_RETARGET_BASE 0xF44 #define MV64340_PCI_1_HEADERS_RETARGET_BASE 0xFc4 #define MV64340_PCI_0_HEADERS_RETARGET_HIGH 0xF48 #define MV64340_PCI_1_HEADERS_RETARGET_HIGH 0xFc8 /***********************************/ /* PCI Control Register Map */ /***********************************/ #define MV64340_PCI_0_DLL_STATUS_AND_COMMAND 0x1d20 #define MV64340_PCI_1_DLL_STATUS_AND_COMMAND 0x1da0 #define MV64340_PCI_0_MPP_PADS_DRIVE_CONTROL 0x1d1C #define MV64340_PCI_1_MPP_PADS_DRIVE_CONTROL 0x1d9C #define MV64340_PCI_0_COMMAND 0xc00 #define MV64340_PCI_1_COMMAND 0xc80 #define MV64340_PCI_0_MODE 0xd00 #define MV64340_PCI_1_MODE 0xd80 #define MV64340_PCI_0_RETRY 0xc04 #define MV64340_PCI_1_RETRY 0xc84 #define MV64340_PCI_0_READ_BUFFER_DISCARD_TIMER 0xd04 #define MV64340_PCI_1_READ_BUFFER_DISCARD_TIMER 0xd84 #define MV64340_PCI_0_MSI_TRIGGER_TIMER 0xc38 #define MV64340_PCI_1_MSI_TRIGGER_TIMER 0xcb8 #define MV64340_PCI_0_ARBITER_CONTROL 0x1d00 #define MV64340_PCI_1_ARBITER_CONTROL 0x1d80 #define MV64340_PCI_0_CROSS_BAR_CONTROL_LOW 0x1d08 #define MV64340_PCI_1_CROSS_BAR_CONTROL_LOW 0x1d88 #define MV64340_PCI_0_CROSS_BAR_CONTROL_HIGH 0x1d0c #define MV64340_PCI_1_CROSS_BAR_CONTROL_HIGH 0x1d8c #define MV64340_PCI_0_CROSS_BAR_TIMEOUT 0x1d04 #define MV64340_PCI_1_CROSS_BAR_TIMEOUT 0x1d84 #define MV64340_PCI_0_SYNC_BARRIER_TRIGGER_REG 0x1D18 #define MV64340_PCI_1_SYNC_BARRIER_TRIGGER_REG 0x1D98 #define MV64340_PCI_0_SYNC_BARRIER_VIRTUAL_REG 0x1d10 #define MV64340_PCI_1_SYNC_BARRIER_VIRTUAL_REG 0x1d90 #define MV64340_PCI_0_P2P_CONFIG 0x1d14 #define MV64340_PCI_1_P2P_CONFIG 0x1d94 #define MV64340_PCI_0_ACCESS_CONTROL_BASE_0_LOW 0x1e00 #define MV64340_PCI_0_ACCESS_CONTROL_BASE_0_HIGH 0x1e04 #define MV64340_PCI_0_ACCESS_CONTROL_SIZE_0 0x1e08 #define MV64340_PCI_0_ACCESS_CONTROL_BASE_1_LOW 0x1e10 #define MV64340_PCI_0_ACCESS_CONTROL_BASE_1_HIGH 0x1e14 #define MV64340_PCI_0_ACCESS_CONTROL_SIZE_1 0x1e18 #define MV64340_PCI_0_ACCESS_CONTROL_BASE_2_LOW 0x1e20 #define MV64340_PCI_0_ACCESS_CONTROL_BASE_2_HIGH 0x1e24 #define MV64340_PCI_0_ACCESS_CONTROL_SIZE_2 0x1e28 #define MV64340_PCI_0_ACCESS_CONTROL_BASE_3_LOW 0x1e30 #define MV64340_PCI_0_ACCESS_CONTROL_BASE_3_HIGH 0x1e34 #define MV64340_PCI_0_ACCESS_CONTROL_SIZE_3 0x1e38 #define MV64340_PCI_0_ACCESS_CONTROL_BASE_4_LOW 0x1e40 #define MV64340_PCI_0_ACCESS_CONTROL_BASE_4_HIGH 0x1e44 #define MV64340_PCI_0_ACCESS_CONTROL_SIZE_4 0x1e48 #define MV64340_PCI_0_ACCESS_CONTROL_BASE_5_LOW 0x1e50 #define MV64340_PCI_0_ACCESS_CONTROL_BASE_5_HIGH 0x1e54 #define MV64340_PCI_0_ACCESS_CONTROL_SIZE_5 0x1e58 #define MV64340_PCI_1_ACCESS_CONTROL_BASE_0_LOW 0x1e80 #define MV64340_PCI_1_ACCESS_CONTROL_BASE_0_HIGH 0x1e84 #define MV64340_PCI_1_ACCESS_CONTROL_SIZE_0 0x1e88 #define MV64340_PCI_1_ACCESS_CONTROL_BASE_1_LOW 0x1e90 #define MV64340_PCI_1_ACCESS_CONTROL_BASE_1_HIGH 0x1e94 #define MV64340_PCI_1_ACCESS_CONTROL_SIZE_1 0x1e98 #define MV64340_PCI_1_ACCESS_CONTROL_BASE_2_LOW 0x1ea0 #define MV64340_PCI_1_ACCESS_CONTROL_BASE_2_HIGH 0x1ea4 #define MV64340_PCI_1_ACCESS_CONTROL_SIZE_2 0x1ea8 #define MV64340_PCI_1_ACCESS_CONTROL_BASE_3_LOW 0x1eb0 #define MV64340_PCI_1_ACCESS_CONTROL_BASE_3_HIGH 0x1eb4 #define MV64340_PCI_1_ACCESS_CONTROL_SIZE_3 0x1eb8 #define MV64340_PCI_1_ACCESS_CONTROL_BASE_4_LOW 0x1ec0 #define MV64340_PCI_1_ACCESS_CONTROL_BASE_4_HIGH 0x1ec4 #define MV64340_PCI_1_ACCESS_CONTROL_SIZE_4 0x1ec8 #define MV64340_PCI_1_ACCESS_CONTROL_BASE_5_LOW 0x1ed0 #define MV64340_PCI_1_ACCESS_CONTROL_BASE_5_HIGH 0x1ed4 #define MV64340_PCI_1_ACCESS_CONTROL_SIZE_5 0x1ed8 /****************************************/ /* PCI Configuration Access Registers */ /****************************************/ #define MV64340_PCI_0_CONFIG_ADDR 0xcf8 #define MV64340_PCI_0_CONFIG_DATA_VIRTUAL_REG 0xcfc #define MV64340_PCI_1_CONFIG_ADDR 0xc78 #define MV64340_PCI_1_CONFIG_DATA_VIRTUAL_REG 0xc7c #define MV64340_PCI_0_INTERRUPT_ACKNOWLEDGE_VIRTUAL_REG 0xc34 #define MV64340_PCI_1_INTERRUPT_ACKNOWLEDGE_VIRTUAL_REG 0xcb4 /****************************************/ /* PCI Error Report Registers */ /****************************************/ #define MV64340_PCI_0_SERR_MASK 0xc28 #define MV64340_PCI_1_SERR_MASK 0xca8 #define MV64340_PCI_0_ERROR_ADDR_LOW 0x1d40 #define MV64340_PCI_1_ERROR_ADDR_LOW 0x1dc0 #define MV64340_PCI_0_ERROR_ADDR_HIGH 0x1d44 #define MV64340_PCI_1_ERROR_ADDR_HIGH 0x1dc4 #define MV64340_PCI_0_ERROR_ATTRIBUTE 0x1d48 #define MV64340_PCI_1_ERROR_ATTRIBUTE 0x1dc8 #define MV64340_PCI_0_ERROR_COMMAND 0x1d50 #define MV64340_PCI_1_ERROR_COMMAND 0x1dd0 #define MV64340_PCI_0_ERROR_CAUSE 0x1d58 #define MV64340_PCI_1_ERROR_CAUSE 0x1dd8 #define MV64340_PCI_0_ERROR_MASK 0x1d5c #define MV64340_PCI_1_ERROR_MASK 0x1ddc /****************************************/ /* PCI Debug Registers */ /****************************************/ #define MV64340_PCI_0_MMASK 0X1D24 #define MV64340_PCI_1_MMASK 0X1DA4 /*********************************************/ /* PCI Configuration, Function 0, Registers */ /*********************************************/ #define MV64340_PCI_DEVICE_AND_VENDOR_ID 0x000 #define MV64340_PCI_STATUS_AND_COMMAND 0x004 #define MV64340_PCI_CLASS_CODE_AND_REVISION_ID 0x008 #define MV64340_PCI_BIST_HEADER_TYPE_LATENCY_TIMER_CACHE_LINE 0x00C #define MV64340_PCI_SCS_0_BASE_ADDR_LOW 0x010 #define MV64340_PCI_SCS_0_BASE_ADDR_HIGH 0x014 #define MV64340_PCI_SCS_1_BASE_ADDR_LOW 0x018 #define MV64340_PCI_SCS_1_BASE_ADDR_HIGH 0x01C #define MV64340_PCI_INTERNAL_REG_MEM_MAPPED_BASE_ADDR_LOW 0x020 #define MV64340_PCI_INTERNAL_REG_MEM_MAPPED_BASE_ADDR_HIGH 0x024 #define MV64340_PCI_SUBSYSTEM_ID_AND_SUBSYSTEM_VENDOR_ID 0x02c #define MV64340_PCI_EXPANSION_ROM_BASE_ADDR_REG 0x030 #define MV64340_PCI_CAPABILTY_LIST_POINTER 0x034 #define MV64340_PCI_INTERRUPT_PIN_AND_LINE 0x03C /* capability list */ #define MV64340_PCI_POWER_MANAGEMENT_CAPABILITY 0x040 #define MV64340_PCI_POWER_MANAGEMENT_STATUS_AND_CONTROL 0x044 #define MV64340_PCI_VPD_ADDR 0x048 #define MV64340_PCI_VPD_DATA 0x04c #define MV64340_PCI_MSI_MESSAGE_CONTROL 0x050 #define MV64340_PCI_MSI_MESSAGE_ADDR 0x054 #define MV64340_PCI_MSI_MESSAGE_UPPER_ADDR 0x058 #define MV64340_PCI_MSI_MESSAGE_DATA 0x05c #define MV64340_PCI_X_COMMAND 0x060 #define MV64340_PCI_X_STATUS 0x064 #define MV64340_PCI_COMPACT_PCI_HOT_SWAP 0x068 /***********************************************/ /* PCI Configuration, Function 1, Registers */ /***********************************************/ #define MV64340_PCI_SCS_2_BASE_ADDR_LOW 0x110 #define MV64340_PCI_SCS_2_BASE_ADDR_HIGH 0x114 #define MV64340_PCI_SCS_3_BASE_ADDR_LOW 0x118 #define MV64340_PCI_SCS_3_BASE_ADDR_HIGH 0x11c #define MV64340_PCI_INTERNAL_SRAM_BASE_ADDR_LOW 0x120 #define MV64340_PCI_INTERNAL_SRAM_BASE_ADDR_HIGH 0x124 /***********************************************/ /* PCI Configuration, Function 2, Registers */ /***********************************************/ #define MV64340_PCI_DEVCS_0_BASE_ADDR_LOW 0x210 #define MV64340_PCI_DEVCS_0_BASE_ADDR_HIGH 0x214 #define MV64340_PCI_DEVCS_1_BASE_ADDR_LOW 0x218 #define MV64340_PCI_DEVCS_1_BASE_ADDR_HIGH 0x21c #define MV64340_PCI_DEVCS_2_BASE_ADDR_LOW 0x220 #define MV64340_PCI_DEVCS_2_BASE_ADDR_HIGH 0x224 /***********************************************/ /* PCI Configuration, Function 3, Registers */ /***********************************************/ #define MV64340_PCI_DEVCS_3_BASE_ADDR_LOW 0x310 #define MV64340_PCI_DEVCS_3_BASE_ADDR_HIGH 0x314 #define MV64340_PCI_BOOT_CS_BASE_ADDR_LOW 0x318 #define MV64340_PCI_BOOT_CS_BASE_ADDR_HIGH 0x31c #define MV64340_PCI_CPU_BASE_ADDR_LOW 0x220 #define MV64340_PCI_CPU_BASE_ADDR_HIGH 0x224 /***********************************************/ /* PCI Configuration, Function 4, Registers */ /***********************************************/ #define MV64340_PCI_P2P_MEM0_BASE_ADDR_LOW 0x410 #define MV64340_PCI_P2P_MEM0_BASE_ADDR_HIGH 0x414 #define MV64340_PCI_P2P_MEM1_BASE_ADDR_LOW 0x418 #define MV64340_PCI_P2P_MEM1_BASE_ADDR_HIGH 0x41c #define MV64340_PCI_P2P_I_O_BASE_ADDR 0x420 #define MV64340_PCI_INTERNAL_REGS_I_O_MAPPED_BASE_ADDR 0x424 /****************************************/ /* Messaging Unit Registers (I20) */ /****************************************/ #define MV64340_I2O_INBOUND_MESSAGE_REG0_PCI_0_SIDE 0x010 #define MV64340_I2O_INBOUND_MESSAGE_REG1_PCI_0_SIDE 0x014 #define MV64340_I2O_OUTBOUND_MESSAGE_REG0_PCI_0_SIDE 0x018 #define MV64340_I2O_OUTBOUND_MESSAGE_REG1_PCI_0_SIDE 0x01C #define MV64340_I2O_INBOUND_DOORBELL_REG_PCI_0_SIDE 0x020 #define MV64340_I2O_INBOUND_INTERRUPT_CAUSE_REG_PCI_0_SIDE 0x024 #define MV64340_I2O_INBOUND_INTERRUPT_MASK_REG_PCI_0_SIDE 0x028 #define MV64340_I2O_OUTBOUND_DOORBELL_REG_PCI_0_SIDE 0x02C #define MV64340_I2O_OUTBOUND_INTERRUPT_CAUSE_REG_PCI_0_SIDE 0x030 #define MV64340_I2O_OUTBOUND_INTERRUPT_MASK_REG_PCI_0_SIDE 0x034 #define MV64340_I2O_INBOUND_QUEUE_PORT_VIRTUAL_REG_PCI_0_SIDE 0x040 #define MV64340_I2O_OUTBOUND_QUEUE_PORT_VIRTUAL_REG_PCI_0_SIDE 0x044 #define MV64340_I2O_QUEUE_CONTROL_REG_PCI_0_SIDE 0x050 #define MV64340_I2O_QUEUE_BASE_ADDR_REG_PCI_0_SIDE 0x054 #define MV64340_I2O_INBOUND_FREE_HEAD_POINTER_REG_PCI_0_SIDE 0x060 #define MV64340_I2O_INBOUND_FREE_TAIL_POINTER_REG_PCI_0_SIDE 0x064 #define MV64340_I2O_INBOUND_POST_HEAD_POINTER_REG_PCI_0_SIDE 0x068 #define MV64340_I2O_INBOUND_POST_TAIL_POINTER_REG_PCI_0_SIDE 0x06C #define MV64340_I2O_OUTBOUND_FREE_HEAD_POINTER_REG_PCI_0_SIDE 0x070 #define MV64340_I2O_OUTBOUND_FREE_TAIL_POINTER_REG_PCI_0_SIDE 0x074 #define MV64340_I2O_OUTBOUND_POST_HEAD_POINTER_REG_PCI_0_SIDE 0x0F8 #define MV64340_I2O_OUTBOUND_POST_TAIL_POINTER_REG_PCI_0_SIDE 0x0FC #define MV64340_I2O_INBOUND_MESSAGE_REG0_PCI_1_SIDE 0x090 #define MV64340_I2O_INBOUND_MESSAGE_REG1_PCI_1_SIDE 0x094 #define MV64340_I2O_OUTBOUND_MESSAGE_REG0_PCI_1_SIDE 0x098 #define MV64340_I2O_OUTBOUND_MESSAGE_REG1_PCI_1_SIDE 0x09C #define MV64340_I2O_INBOUND_DOORBELL_REG_PCI_1_SIDE 0x0A0 #define MV64340_I2O_INBOUND_INTERRUPT_CAUSE_REG_PCI_1_SIDE 0x0A4 #define MV64340_I2O_INBOUND_INTERRUPT_MASK_REG_PCI_1_SIDE 0x0A8 #define MV64340_I2O_OUTBOUND_DOORBELL_REG_PCI_1_SIDE 0x0AC #define MV64340_I2O_OUTBOUND_INTERRUPT_CAUSE_REG_PCI_1_SIDE 0x0B0 #define MV64340_I2O_OUTBOUND_INTERRUPT_MASK_REG_PCI_1_SIDE 0x0B4 #define MV64340_I2O_INBOUND_QUEUE_PORT_VIRTUAL_REG_PCI_1_SIDE 0x0C0 #define MV64340_I2O_OUTBOUND_QUEUE_PORT_VIRTUAL_REG_PCI_1_SIDE 0x0C4 #define MV64340_I2O_QUEUE_CONTROL_REG_PCI_1_SIDE 0x0D0 #define MV64340_I2O_QUEUE_BASE_ADDR_REG_PCI_1_SIDE 0x0D4 #define MV64340_I2O_INBOUND_FREE_HEAD_POINTER_REG_PCI_1_SIDE 0x0E0 #define MV64340_I2O_INBOUND_FREE_TAIL_POINTER_REG_PCI_1_SIDE 0x0E4 #define MV64340_I2O_INBOUND_POST_HEAD_POINTER_REG_PCI_1_SIDE 0x0E8 #define MV64340_I2O_INBOUND_POST_TAIL_POINTER_REG_PCI_1_SIDE 0x0EC #define MV64340_I2O_OUTBOUND_FREE_HEAD_POINTER_REG_PCI_1_SIDE 0x0F0 #define MV64340_I2O_OUTBOUND_FREE_TAIL_POINTER_REG_PCI_1_SIDE 0x0F4 #define MV64340_I2O_OUTBOUND_POST_HEAD_POINTER_REG_PCI_1_SIDE 0x078 #define MV64340_I2O_OUTBOUND_POST_TAIL_POINTER_REG_PCI_1_SIDE 0x07C #define MV64340_I2O_INBOUND_MESSAGE_REG0_CPU0_SIDE 0x1C10 #define MV64340_I2O_INBOUND_MESSAGE_REG1_CPU0_SIDE 0x1C14 #define MV64340_I2O_OUTBOUND_MESSAGE_REG0_CPU0_SIDE 0x1C18 #define MV64340_I2O_OUTBOUND_MESSAGE_REG1_CPU0_SIDE 0x1C1C #define MV64340_I2O_INBOUND_DOORBELL_REG_CPU0_SIDE 0x1C20 #define MV64340_I2O_INBOUND_INTERRUPT_CAUSE_REG_CPU0_SIDE 0x1C24 #define MV64340_I2O_INBOUND_INTERRUPT_MASK_REG_CPU0_SIDE 0x1C28 #define MV64340_I2O_OUTBOUND_DOORBELL_REG_CPU0_SIDE 0x1C2C #define MV64340_I2O_OUTBOUND_INTERRUPT_CAUSE_REG_CPU0_SIDE 0x1C30 #define MV64340_I2O_OUTBOUND_INTERRUPT_MASK_REG_CPU0_SIDE 0x1C34 #define MV64340_I2O_INBOUND_QUEUE_PORT_VIRTUAL_REG_CPU0_SIDE 0x1C40 #define MV64340_I2O_OUTBOUND_QUEUE_PORT_VIRTUAL_REG_CPU0_SIDE 0x1C44 #define MV64340_I2O_QUEUE_CONTROL_REG_CPU0_SIDE 0x1C50 #define MV64340_I2O_QUEUE_BASE_ADDR_REG_CPU0_SIDE 0x1C54 #define MV64340_I2O_INBOUND_FREE_HEAD_POINTER_REG_CPU0_SIDE 0x1C60 #define MV64340_I2O_INBOUND_FREE_TAIL_POINTER_REG_CPU0_SIDE 0x1C64 #define MV64340_I2O_INBOUND_POST_HEAD_POINTER_REG_CPU0_SIDE 0x1C68 #define MV64340_I2O_INBOUND_POST_TAIL_POINTER_REG_CPU0_SIDE 0x1C6C #define MV64340_I2O_OUTBOUND_FREE_HEAD_POINTER_REG_CPU0_SIDE 0x1C70 #define MV64340_I2O_OUTBOUND_FREE_TAIL_POINTER_REG_CPU0_SIDE 0x1C74 #define MV64340_I2O_OUTBOUND_POST_HEAD_POINTER_REG_CPU0_SIDE 0x1CF8 #define MV64340_I2O_OUTBOUND_POST_TAIL_POINTER_REG_CPU0_SIDE 0x1CFC #define MV64340_I2O_INBOUND_MESSAGE_REG0_CPU1_SIDE 0x1C90 #define MV64340_I2O_INBOUND_MESSAGE_REG1_CPU1_SIDE 0x1C94 #define MV64340_I2O_OUTBOUND_MESSAGE_REG0_CPU1_SIDE 0x1C98 #define MV64340_I2O_OUTBOUND_MESSAGE_REG1_CPU1_SIDE 0x1C9C #define MV64340_I2O_INBOUND_DOORBELL_REG_CPU1_SIDE 0x1CA0 #define MV64340_I2O_INBOUND_INTERRUPT_CAUSE_REG_CPU1_SIDE 0x1CA4 #define MV64340_I2O_INBOUND_INTERRUPT_MASK_REG_CPU1_SIDE 0x1CA8 #define MV64340_I2O_OUTBOUND_DOORBELL_REG_CPU1_SIDE 0x1CAC #define MV64340_I2O_OUTBOUND_INTERRUPT_CAUSE_REG_CPU1_SIDE 0x1CB0 #define MV64340_I2O_OUTBOUND_INTERRUPT_MASK_REG_CPU1_SIDE 0x1CB4 #define MV64340_I2O_INBOUND_QUEUE_PORT_VIRTUAL_REG_CPU1_SIDE 0x1CC0 #define MV64340_I2O_OUTBOUND_QUEUE_PORT_VIRTUAL_REG_CPU1_SIDE 0x1CC4 #define MV64340_I2O_QUEUE_CONTROL_REG_CPU1_SIDE 0x1CD0 #define MV64340_I2O_QUEUE_BASE_ADDR_REG_CPU1_SIDE 0x1CD4 #define MV64340_I2O_INBOUND_FREE_HEAD_POINTER_REG_CPU1_SIDE 0x1CE0 #define MV64340_I2O_INBOUND_FREE_TAIL_POINTER_REG_CPU1_SIDE 0x1CE4 #define MV64340_I2O_INBOUND_POST_HEAD_POINTER_REG_CPU1_SIDE 0x1CE8 #define MV64340_I2O_INBOUND_POST_TAIL_POINTER_REG_CPU1_SIDE 0x1CEC #define MV64340_I2O_OUTBOUND_FREE_HEAD_POINTER_REG_CPU1_SIDE 0x1CF0 #define MV64340_I2O_OUTBOUND_FREE_TAIL_POINTER_REG_CPU1_SIDE 0x1CF4 #define MV64340_I2O_OUTBOUND_POST_HEAD_POINTER_REG_CPU1_SIDE 0x1C78 #define MV64340_I2O_OUTBOUND_POST_TAIL_POINTER_REG_CPU1_SIDE 0x1C7C /****************************************/ /* Ethernet Unit Registers */ /****************************************/ /*******************************************/ /* CUNIT Registers */ /*******************************************/ /* Address Decoding Register Map */ #define MV64340_CUNIT_BASE_ADDR_REG0 0xf200 #define MV64340_CUNIT_BASE_ADDR_REG1 0xf208 #define MV64340_CUNIT_BASE_ADDR_REG2 0xf210 #define MV64340_CUNIT_BASE_ADDR_REG3 0xf218 #define MV64340_CUNIT_SIZE0 0xf204 #define MV64340_CUNIT_SIZE1 0xf20c #define MV64340_CUNIT_SIZE2 0xf214 #define MV64340_CUNIT_SIZE3 0xf21c #define MV64340_CUNIT_HIGH_ADDR_REMAP_REG0 0xf240 #define MV64340_CUNIT_HIGH_ADDR_REMAP_REG1 0xf244 #define MV64340_CUNIT_BASE_ADDR_ENABLE_REG 0xf250 #define MV64340_MPSC0_ACCESS_PROTECTION_REG 0xf254 #define MV64340_MPSC1_ACCESS_PROTECTION_REG 0xf258 #define MV64340_CUNIT_INTERNAL_SPACE_BASE_ADDR_REG 0xf25C /* Error Report Registers */ #define MV64340_CUNIT_INTERRUPT_CAUSE_REG 0xf310 #define MV64340_CUNIT_INTERRUPT_MASK_REG 0xf314 #define MV64340_CUNIT_ERROR_ADDR 0xf318 /* Cunit Control Registers */ #define MV64340_CUNIT_ARBITER_CONTROL_REG 0xf300 #define MV64340_CUNIT_CONFIG_REG 0xb40c #define MV64340_CUNIT_CRROSBAR_TIMEOUT_REG 0xf304 /* Cunit Debug Registers */ #define MV64340_CUNIT_DEBUG_LOW 0xf340 #define MV64340_CUNIT_DEBUG_HIGH 0xf344 #define MV64340_CUNIT_MMASK 0xf380 /* MPSCs Clocks Routing Registers */ #define MV64340_MPSC_ROUTING_REG 0xb400 #define MV64340_MPSC_RX_CLOCK_ROUTING_REG 0xb404 #define MV64340_MPSC_TX_CLOCK_ROUTING_REG 0xb408 /* MPSCs Interrupts Registers */ #define MV64340_MPSC_CAUSE_REG(port) (0xb804 + (port<<3)) #define MV64340_MPSC_MASK_REG(port) (0xb884 + (port<<3)) #define MV64340_MPSC_MAIN_CONFIG_LOW(port) (0x8000 + (port<<12)) #define MV64340_MPSC_MAIN_CONFIG_HIGH(port) (0x8004 + (port<<12)) #define MV64340_MPSC_PROTOCOL_CONFIG(port) (0x8008 + (port<<12)) #define MV64340_MPSC_CHANNEL_REG1(port) (0x800c + (port<<12)) #define MV64340_MPSC_CHANNEL_REG2(port) (0x8010 + (port<<12)) #define MV64340_MPSC_CHANNEL_REG3(port) (0x8014 + (port<<12)) #define MV64340_MPSC_CHANNEL_REG4(port) (0x8018 + (port<<12)) #define MV64340_MPSC_CHANNEL_REG5(port) (0x801c + (port<<12)) #define MV64340_MPSC_CHANNEL_REG6(port) (0x8020 + (port<<12)) #define MV64340_MPSC_CHANNEL_REG7(port) (0x8024 + (port<<12)) #define MV64340_MPSC_CHANNEL_REG8(port) (0x8028 + (port<<12)) #define MV64340_MPSC_CHANNEL_REG9(port) (0x802c + (port<<12)) #define MV64340_MPSC_CHANNEL_REG10(port) (0x8030 + (port<<12)) /* MPSC0 Registers */ /***************************************/ /* SDMA Registers */ /***************************************/ #define MV64340_SDMA_CONFIG_REG(channel) (0x4000 + (channel<<13)) #define MV64340_SDMA_COMMAND_REG(channel) (0x4008 + (channel<<13)) #define MV64340_SDMA_CURRENT_RX_DESCRIPTOR_POINTER(channel) (0x4810 + (channel<<13)) #define MV64340_SDMA_CURRENT_TX_DESCRIPTOR_POINTER(channel) (0x4c10 + (channel<<13)) #define MV64340_SDMA_FIRST_TX_DESCRIPTOR_POINTER(channel) (0x4c14 + (channel<<13)) #define MV64340_SDMA_CAUSE_REG 0xb800 #define MV64340_SDMA_MASK_REG 0xb880 /* BRG Interrupts */ #define MV64340_BRG_CONFIG_REG(brg) (0xb200 + (brg<<3)) #define MV64340_BRG_BAUDE_TUNING_REG(brg) (0xb208 + (brg<<3)) #define MV64340_BRG_CAUSE_REG 0xb834 #define MV64340_BRG_MASK_REG 0xb8b4 /****************************************/ /* DMA Channel Control */ /****************************************/ #define MV64340_DMA_CHANNEL0_CONTROL 0x840 #define MV64340_DMA_CHANNEL0_CONTROL_HIGH 0x880 #define MV64340_DMA_CHANNEL1_CONTROL 0x844 #define MV64340_DMA_CHANNEL1_CONTROL_HIGH 0x884 #define MV64340_DMA_CHANNEL2_CONTROL 0x848 #define MV64340_DMA_CHANNEL2_CONTROL_HIGH 0x888 #define MV64340_DMA_CHANNEL3_CONTROL 0x84C #define MV64340_DMA_CHANNEL3_CONTROL_HIGH 0x88C /****************************************/ /* IDMA Registers */ /****************************************/ #define MV64340_DMA_CHANNEL0_BYTE_COUNT 0x800 #define MV64340_DMA_CHANNEL1_BYTE_COUNT 0x804 #define MV64340_DMA_CHANNEL2_BYTE_COUNT 0x808 #define MV64340_DMA_CHANNEL3_BYTE_COUNT 0x80C #define MV64340_DMA_CHANNEL0_SOURCE_ADDR 0x810 #define MV64340_DMA_CHANNEL1_SOURCE_ADDR 0x814 #define MV64340_DMA_CHANNEL2_SOURCE_ADDR 0x818 #define MV64340_DMA_CHANNEL3_SOURCE_ADDR 0x81c #define MV64340_DMA_CHANNEL0_DESTINATION_ADDR 0x820 #define MV64340_DMA_CHANNEL1_DESTINATION_ADDR 0x824 #define MV64340_DMA_CHANNEL2_DESTINATION_ADDR 0x828 #define MV64340_DMA_CHANNEL3_DESTINATION_ADDR 0x82C #define MV64340_DMA_CHANNEL0_NEXT_DESCRIPTOR_POINTER 0x830 #define MV64340_DMA_CHANNEL1_NEXT_DESCRIPTOR_POINTER 0x834 #define MV64340_DMA_CHANNEL2_NEXT_DESCRIPTOR_POINTER 0x838 #define MV64340_DMA_CHANNEL3_NEXT_DESCRIPTOR_POINTER 0x83C #define MV64340_DMA_CHANNEL0_CURRENT_DESCRIPTOR_POINTER 0x870 #define MV64340_DMA_CHANNEL1_CURRENT_DESCRIPTOR_POINTER 0x874 #define MV64340_DMA_CHANNEL2_CURRENT_DESCRIPTOR_POINTER 0x878 #define MV64340_DMA_CHANNEL3_CURRENT_DESCRIPTOR_POINTER 0x87C /* IDMA Address Decoding Base Address Registers */ #define MV64340_DMA_BASE_ADDR_REG0 0xa00 #define MV64340_DMA_BASE_ADDR_REG1 0xa08 #define MV64340_DMA_BASE_ADDR_REG2 0xa10 #define MV64340_DMA_BASE_ADDR_REG3 0xa18 #define MV64340_DMA_BASE_ADDR_REG4 0xa20 #define MV64340_DMA_BASE_ADDR_REG5 0xa28 #define MV64340_DMA_BASE_ADDR_REG6 0xa30 #define MV64340_DMA_BASE_ADDR_REG7 0xa38 /* IDMA Address Decoding Size Address Register */ #define MV64340_DMA_SIZE_REG0 0xa04 #define MV64340_DMA_SIZE_REG1 0xa0c #define MV64340_DMA_SIZE_REG2 0xa14 #define MV64340_DMA_SIZE_REG3 0xa1c #define MV64340_DMA_SIZE_REG4 0xa24 #define MV64340_DMA_SIZE_REG5 0xa2c #define MV64340_DMA_SIZE_REG6 0xa34 #define MV64340_DMA_SIZE_REG7 0xa3C /* IDMA Address Decoding High Address Remap and Access Protection Registers */ #define MV64340_DMA_HIGH_ADDR_REMAP_REG0 0xa60 #define MV64340_DMA_HIGH_ADDR_REMAP_REG1 0xa64 #define MV64340_DMA_HIGH_ADDR_REMAP_REG2 0xa68 #define MV64340_DMA_HIGH_ADDR_REMAP_REG3 0xa6C #define MV64340_DMA_BASE_ADDR_ENABLE_REG 0xa80 #define MV64340_DMA_CHANNEL0_ACCESS_PROTECTION_REG 0xa70 #define MV64340_DMA_CHANNEL1_ACCESS_PROTECTION_REG 0xa74 #define MV64340_DMA_CHANNEL2_ACCESS_PROTECTION_REG 0xa78 #define MV64340_DMA_CHANNEL3_ACCESS_PROTECTION_REG 0xa7c #define MV64340_DMA_ARBITER_CONTROL 0x860 #define MV64340_DMA_CROSS_BAR_TIMEOUT 0x8d0 /* IDMA Headers Retarget Registers */ #define MV64340_DMA_HEADERS_RETARGET_CONTROL 0xa84 #define MV64340_DMA_HEADERS_RETARGET_BASE 0xa88 /* IDMA Interrupt Register */ #define MV64340_DMA_INTERRUPT_CAUSE_REG 0x8c0 #define MV64340_DMA_INTERRUPT_CAUSE_MASK 0x8c4 #define MV64340_DMA_ERROR_ADDR 0x8c8 #define MV64340_DMA_ERROR_SELECT 0x8cc /* IDMA Debug Register ( for internal use ) */ #define MV64340_DMA_DEBUG_LOW 0x8e0 #define MV64340_DMA_DEBUG_HIGH 0x8e4 #define MV64340_DMA_SPARE 0xA8C /****************************************/ /* Timer_Counter */ /****************************************/ #define MV64340_TIMER_COUNTER0 0x850 #define MV64340_TIMER_COUNTER1 0x854 #define MV64340_TIMER_COUNTER2 0x858 #define MV64340_TIMER_COUNTER3 0x85C #define MV64340_TIMER_COUNTER_0_3_CONTROL 0x864 #define MV64340_TIMER_COUNTER_0_3_INTERRUPT_CAUSE 0x868 #define MV64340_TIMER_COUNTER_0_3_INTERRUPT_MASK 0x86c /****************************************/ /* Watchdog registers */ /****************************************/ #define MV64340_WATCHDOG_CONFIG_REG 0xb410 #define MV64340_WATCHDOG_VALUE_REG 0xb414 /****************************************/ /* I2C Registers */ /****************************************/ #define MV64XXX_I2C_OFFSET 0xc000 #define MV64XXX_I2C_REG_BLOCK_SIZE 0x0020 /****************************************/ /* GPP Interface Registers */ /****************************************/ #define MV64340_GPP_IO_CONTROL 0xf100 #define MV64340_GPP_LEVEL_CONTROL 0xf110 #define MV64340_GPP_VALUE 0xf104 #define MV64340_GPP_INTERRUPT_CAUSE 0xf108 #define MV64340_GPP_INTERRUPT_MASK0 0xf10c #define MV64340_GPP_INTERRUPT_MASK1 0xf114 #define MV64340_GPP_VALUE_SET 0xf118 #define MV64340_GPP_VALUE_CLEAR 0xf11c /****************************************/ /* Interrupt Controller Registers */ /****************************************/ /****************************************/ /* Interrupts */ /****************************************/ #define MV64340_MAIN_INTERRUPT_CAUSE_LOW 0x004 #define MV64340_MAIN_INTERRUPT_CAUSE_HIGH 0x00c #define MV64340_CPU_INTERRUPT0_MASK_LOW 0x014 #define MV64340_CPU_INTERRUPT0_MASK_HIGH 0x01c #define MV64340_CPU_INTERRUPT0_SELECT_CAUSE 0x024 #define MV64340_CPU_INTERRUPT1_MASK_LOW 0x034 #define MV64340_CPU_INTERRUPT1_MASK_HIGH 0x03c #define MV64340_CPU_INTERRUPT1_SELECT_CAUSE 0x044 #define MV64340_INTERRUPT0_MASK_0_LOW 0x054 #define MV64340_INTERRUPT0_MASK_0_HIGH 0x05c #define MV64340_INTERRUPT0_SELECT_CAUSE 0x064 #define MV64340_INTERRUPT1_MASK_0_LOW 0x074 #define MV64340_INTERRUPT1_MASK_0_HIGH 0x07c #define MV64340_INTERRUPT1_SELECT_CAUSE 0x084 /****************************************/ /* MPP Interface Registers */ /****************************************/ #define MV64340_MPP_CONTROL0 0xf000 #define MV64340_MPP_CONTROL1 0xf004 #define MV64340_MPP_CONTROL2 0xf008 #define MV64340_MPP_CONTROL3 0xf00c /****************************************/ /* Serial Initialization registers */ /****************************************/ #define MV64340_SERIAL_INIT_LAST_DATA 0xf324 #define MV64340_SERIAL_INIT_CONTROL 0xf328 #define MV64340_SERIAL_INIT_STATUS 0xf32c extern void mv64340_irq_init(unsigned int base); /* Watchdog Platform Device, Driver Data */ #define MV64x60_WDT_NAME "mv64x60_wdt" struct mv64x60_wdt_pdata { int timeout; /* watchdog expiry in seconds, default 10 */ int bus_clk; /* bus clock in MHz, default 133 */ }; #endif /* __ASM_MV643XX_H */ sem.h 0000644 00000001127 14722070374 0005510 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SEM_H #define _LINUX_SEM_H #include <uapi/linux/sem.h> struct task_struct; struct sem_undo_list; #ifdef CONFIG_SYSVIPC struct sysv_sem { struct sem_undo_list *undo_list; }; extern int copy_semundo(unsigned long clone_flags, struct task_struct *tsk); extern void exit_sem(struct task_struct *tsk); #else struct sysv_sem { /* empty */ }; static inline int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) { return 0; } static inline void exit_sem(struct task_struct *tsk) { return; } #endif #endif /* _LINUX_SEM_H */ cyclades.h 0000644 00000024557 14722070374 0006527 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* $Revision: 3.0 $$Date: 1998/11/02 14:20:59 $ * linux/include/linux/cyclades.h * * This file was initially written by * Randolph Bentson <bentson@grieg.seaslug.org> and is maintained by * Ivan Passos <ivan@cyclades.com>. * * This file contains the general definitions for the cyclades.c driver *$Log: cyclades.h,v $ *Revision 3.1 2002/01/29 11:36:16 henrique *added throttle field on struct cyclades_port to indicate whether the *port is throttled or not * *Revision 3.1 2000/04/19 18:52:52 ivan *converted address fields to unsigned long and added fields for physical *addresses on cyclades_card structure; * *Revision 3.0 1998/11/02 14:20:59 ivan *added nports field on cyclades_card structure; * *Revision 2.5 1998/08/03 16:57:01 ivan *added cyclades_idle_stats structure; * *Revision 2.4 1998/06/01 12:09:53 ivan *removed closing_wait2 from cyclades_port structure; * *Revision 2.3 1998/03/16 18:01:12 ivan *changes in the cyclades_port structure to get it closer to the *standard serial port structure; *added constants for new ioctls; * *Revision 2.2 1998/02/17 16:50:00 ivan *changes in the cyclades_port structure (addition of shutdown_wait and *chip_rev variables); *added constants for new ioctls and for CD1400 rev. numbers. * *Revision 2.1 1997/10/24 16:03:00 ivan *added rflow (which allows enabling the CD1400 special flow control *feature) and rtsdtr_inv (which allows DTR/RTS pin inversion) to *cyclades_port structure; *added Alpha support * *Revision 2.0 1997/06/30 10:30:00 ivan *added some new doorbell command constants related to IOCTLW and *UART error signaling * *Revision 1.8 1997/06/03 15:30:00 ivan *added constant ZFIRM_HLT *added constant CyPCI_Ze_win ( = 2 * Cy_PCI_Zwin) * *Revision 1.7 1997/03/26 10:30:00 daniel *new entries at the end of cyclades_port struct to reallocate *variables illegally allocated within card memory. * *Revision 1.6 1996/09/09 18:35:30 bentson *fold in changes for Cyclom-Z -- including structures for *communicating with board as well modest changes to original *structures to support new features. * *Revision 1.5 1995/11/13 21:13:31 bentson *changes suggested by Michael Chastain <mec@duracef.shout.net> *to support use of this file in non-kernel applications * * */ #ifndef _LINUX_CYCLADES_H #define _LINUX_CYCLADES_H #include <uapi/linux/cyclades.h> /* Per card data structure */ struct cyclades_card { void __iomem *base_addr; union { void __iomem *p9050; struct RUNTIME_9060 __iomem *p9060; } ctl_addr; struct BOARD_CTRL __iomem *board_ctrl; /* cyz specific */ int irq; unsigned int num_chips; /* 0 if card absent, -1 if Z/PCI, else Y */ unsigned int first_line; /* minor number of first channel on card */ unsigned int nports; /* Number of ports in the card */ int bus_index; /* address shift - 0 for ISA, 1 for PCI */ int intr_enabled; /* FW Interrupt flag - 0 disabled, 1 enabled */ u32 hw_ver; spinlock_t card_lock; struct cyclades_port *ports; }; /*************************************** * Memory access functions/macros * * (required to support Alpha systems) * ***************************************/ #define cy_writeb(port,val) do { writeb((val), (port)); mb(); } while (0) #define cy_writew(port,val) do { writew((val), (port)); mb(); } while (0) #define cy_writel(port,val) do { writel((val), (port)); mb(); } while (0) /* * Statistics counters */ struct cyclades_icount { __u32 cts, dsr, rng, dcd, tx, rx; __u32 frame, parity, overrun, brk; __u32 buf_overrun; }; /* * This is our internal structure for each serial port's state. * * Many fields are paralleled by the structure used by the serial_struct * structure. * * For definitions of the flags field, see tty.h */ struct cyclades_port { int magic; struct tty_port port; struct cyclades_card *card; union { struct { void __iomem *base_addr; } cyy; struct { struct CH_CTRL __iomem *ch_ctrl; struct BUF_CTRL __iomem *buf_ctrl; } cyz; } u; int line; int flags; /* defined in tty.h */ int type; /* UART type */ int read_status_mask; int ignore_status_mask; int timeout; int xmit_fifo_size; int cor1,cor2,cor3,cor4,cor5; int tbpr,tco,rbpr,rco; int baud; int rflow; int rtsdtr_inv; int chip_rev; int custom_divisor; u8 x_char; /* to be pushed out ASAP */ int breakon; int breakoff; int xmit_head; int xmit_tail; int xmit_cnt; int default_threshold; int default_timeout; unsigned long rflush_count; struct cyclades_monitor mon; struct cyclades_idle_stats idle_stats; struct cyclades_icount icount; struct completion shutdown_wait; int throttle; #ifdef CONFIG_CYZ_INTR struct timer_list rx_full_timer; #endif }; #define CLOSING_WAIT_DELAY 30*HZ #define CY_CLOSING_WAIT_NONE ASYNC_CLOSING_WAIT_NONE #define CY_CLOSING_WAIT_INF ASYNC_CLOSING_WAIT_INF #define CyMAX_CHIPS_PER_CARD 8 #define CyMAX_CHAR_FIFO 12 #define CyPORTS_PER_CHIP 4 #define CD1400_MAX_SPEED 115200 #define CyISA_Ywin 0x2000 #define CyPCI_Ywin 0x4000 #define CyPCI_Yctl 0x80 #define CyPCI_Zctl CTRL_WINDOW_SIZE #define CyPCI_Zwin 0x80000 #define CyPCI_Ze_win (2 * CyPCI_Zwin) #define PCI_DEVICE_ID_MASK 0x06 /**** CD1400 registers ****/ #define CD1400_REV_G 0x46 #define CD1400_REV_J 0x48 #define CyRegSize 0x0400 #define Cy_HwReset 0x1400 #define Cy_ClrIntr 0x1800 #define Cy_EpldRev 0x1e00 /* Global Registers */ #define CyGFRCR (0x40*2) #define CyRevE (44) #define CyCAR (0x68*2) #define CyCHAN_0 (0x00) #define CyCHAN_1 (0x01) #define CyCHAN_2 (0x02) #define CyCHAN_3 (0x03) #define CyGCR (0x4B*2) #define CyCH0_SERIAL (0x00) #define CyCH0_PARALLEL (0x80) #define CySVRR (0x67*2) #define CySRModem (0x04) #define CySRTransmit (0x02) #define CySRReceive (0x01) #define CyRICR (0x44*2) #define CyTICR (0x45*2) #define CyMICR (0x46*2) #define CyICR0 (0x00) #define CyICR1 (0x01) #define CyICR2 (0x02) #define CyICR3 (0x03) #define CyRIR (0x6B*2) #define CyTIR (0x6A*2) #define CyMIR (0x69*2) #define CyIRDirEq (0x80) #define CyIRBusy (0x40) #define CyIRUnfair (0x20) #define CyIRContext (0x1C) #define CyIRChannel (0x03) #define CyPPR (0x7E*2) #define CyCLOCK_20_1MS (0x27) #define CyCLOCK_25_1MS (0x31) #define CyCLOCK_25_5MS (0xf4) #define CyCLOCK_60_1MS (0x75) #define CyCLOCK_60_2MS (0xea) /* Virtual Registers */ #define CyRIVR (0x43*2) #define CyTIVR (0x42*2) #define CyMIVR (0x41*2) #define CyIVRMask (0x07) #define CyIVRRxEx (0x07) #define CyIVRRxOK (0x03) #define CyIVRTxOK (0x02) #define CyIVRMdmOK (0x01) #define CyTDR (0x63*2) #define CyRDSR (0x62*2) #define CyTIMEOUT (0x80) #define CySPECHAR (0x70) #define CyBREAK (0x08) #define CyPARITY (0x04) #define CyFRAME (0x02) #define CyOVERRUN (0x01) #define CyMISR (0x4C*2) /* see CyMCOR_ and CyMSVR_ for bits*/ #define CyEOSRR (0x60*2) /* Channel Registers */ #define CyLIVR (0x18*2) #define CyMscsr (0x01) #define CyTdsr (0x02) #define CyRgdsr (0x03) #define CyRedsr (0x07) #define CyCCR (0x05*2) /* Format 1 */ #define CyCHAN_RESET (0x80) #define CyCHIP_RESET (0x81) #define CyFlushTransFIFO (0x82) /* Format 2 */ #define CyCOR_CHANGE (0x40) #define CyCOR1ch (0x02) #define CyCOR2ch (0x04) #define CyCOR3ch (0x08) /* Format 3 */ #define CySEND_SPEC_1 (0x21) #define CySEND_SPEC_2 (0x22) #define CySEND_SPEC_3 (0x23) #define CySEND_SPEC_4 (0x24) /* Format 4 */ #define CyCHAN_CTL (0x10) #define CyDIS_RCVR (0x01) #define CyENB_RCVR (0x02) #define CyDIS_XMTR (0x04) #define CyENB_XMTR (0x08) #define CySRER (0x06*2) #define CyMdmCh (0x80) #define CyRxData (0x10) #define CyTxRdy (0x04) #define CyTxMpty (0x02) #define CyNNDT (0x01) #define CyCOR1 (0x08*2) #define CyPARITY_NONE (0x00) #define CyPARITY_0 (0x20) #define CyPARITY_1 (0xA0) #define CyPARITY_E (0x40) #define CyPARITY_O (0xC0) #define Cy_1_STOP (0x00) #define Cy_1_5_STOP (0x04) #define Cy_2_STOP (0x08) #define Cy_5_BITS (0x00) #define Cy_6_BITS (0x01) #define Cy_7_BITS (0x02) #define Cy_8_BITS (0x03) #define CyCOR2 (0x09*2) #define CyIXM (0x80) #define CyTxIBE (0x40) #define CyETC (0x20) #define CyAUTO_TXFL (0x60) #define CyLLM (0x10) #define CyRLM (0x08) #define CyRtsAO (0x04) #define CyCtsAE (0x02) #define CyDsrAE (0x01) #define CyCOR3 (0x0A*2) #define CySPL_CH_DRANGE (0x80) /* special character detect range */ #define CySPL_CH_DET1 (0x40) /* enable special character detection on SCHR4-SCHR3 */ #define CyFL_CTRL_TRNSP (0x20) /* Flow Control Transparency */ #define CySPL_CH_DET2 (0x10) /* Enable special character detection on SCHR2-SCHR1 */ #define CyREC_FIFO (0x0F) /* Receive FIFO threshold */ #define CyCOR4 (0x1E*2) #define CyCOR5 (0x1F*2) #define CyCCSR (0x0B*2) #define CyRxEN (0x80) #define CyRxFloff (0x40) #define CyRxFlon (0x20) #define CyTxEN (0x08) #define CyTxFloff (0x04) #define CyTxFlon (0x02) #define CyRDCR (0x0E*2) #define CySCHR1 (0x1A*2) #define CySCHR2 (0x1B*2) #define CySCHR3 (0x1C*2) #define CySCHR4 (0x1D*2) #define CySCRL (0x22*2) #define CySCRH (0x23*2) #define CyLNC (0x24*2) #define CyMCOR1 (0x15*2) #define CyMCOR2 (0x16*2) #define CyRTPR (0x21*2) #define CyMSVR1 (0x6C*2) #define CyMSVR2 (0x6D*2) #define CyANY_DELTA (0xF0) #define CyDSR (0x80) #define CyCTS (0x40) #define CyRI (0x20) #define CyDCD (0x10) #define CyDTR (0x02) #define CyRTS (0x01) #define CyPVSR (0x6F*2) #define CyRBPR (0x78*2) #define CyRCOR (0x7C*2) #define CyTBPR (0x72*2) #define CyTCOR (0x76*2) /* Custom Registers */ #define CyPLX_VER (0x3400) #define PLX_9050 0x0b #define PLX_9060 0x0c #define PLX_9080 0x0d /***************************************************************************/ #endif /* _LINUX_CYCLADES_H */ rcupdate.h 0000644 00000105056 14722070374 0006541 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0+ */ /* * Read-Copy Update mechanism for mutual exclusion * * Copyright IBM Corporation, 2001 * * Author: Dipankar Sarma <dipankar@in.ibm.com> * * Based on the original work by Paul McKenney <paulmck@vnet.ibm.com> * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * Papers: * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) * * For detailed explanation of Read-Copy Update mechanism see - * http://lse.sourceforge.net/locking/rcupdate.html * */ #ifndef __LINUX_RCUPDATE_H #define __LINUX_RCUPDATE_H #include <linux/types.h> #include <linux/compiler.h> #include <linux/atomic.h> #include <linux/irqflags.h> #include <linux/preempt.h> #include <linux/bottom_half.h> #include <linux/lockdep.h> #include <asm/processor.h> #include <linux/cpumask.h> #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) #define ulong2long(a) (*(long *)(&(a))) /* Exported common interfaces */ void call_rcu(struct rcu_head *head, rcu_callback_t func); void rcu_barrier_tasks(void); void synchronize_rcu(void); #ifdef CONFIG_PREEMPT_RCU void __rcu_read_lock(void); void __rcu_read_unlock(void); /* * Defined as a macro as it is a very low level header included from * areas that don't even know about current. This gives the rcu_read_lock() * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. */ #define rcu_preempt_depth() (current->rcu_read_lock_nesting) #else /* #ifdef CONFIG_PREEMPT_RCU */ static inline void __rcu_read_lock(void) { preempt_disable(); } static inline void __rcu_read_unlock(void) { preempt_enable(); } static inline int rcu_preempt_depth(void) { return 0; } #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ /* Internal to kernel */ void rcu_init(void); extern int rcu_scheduler_active __read_mostly; void rcu_sched_clock_irq(int user); void rcu_report_dead(unsigned int cpu); void rcutree_migrate_callbacks(int cpu); #ifdef CONFIG_RCU_STALL_COMMON void rcu_sysrq_start(void); void rcu_sysrq_end(void); #else /* #ifdef CONFIG_RCU_STALL_COMMON */ static inline void rcu_sysrq_start(void) { } static inline void rcu_sysrq_end(void) { } #endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */ #ifdef CONFIG_NO_HZ_FULL void rcu_user_enter(void); void rcu_user_exit(void); #else static inline void rcu_user_enter(void) { } static inline void rcu_user_exit(void) { } #endif /* CONFIG_NO_HZ_FULL */ #ifdef CONFIG_RCU_NOCB_CPU void rcu_init_nohz(void); void rcu_nocb_flush_deferred_wakeup(void); #else /* #ifdef CONFIG_RCU_NOCB_CPU */ static inline void rcu_init_nohz(void) { } static inline void rcu_nocb_flush_deferred_wakeup(void) { } #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ /** * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers * @a: Code that RCU needs to pay attention to. * * RCU read-side critical sections are forbidden in the inner idle loop, * that is, between the rcu_idle_enter() and the rcu_idle_exit() -- RCU * will happily ignore any such read-side critical sections. However, * things like powertop need tracepoints in the inner idle loop. * * This macro provides the way out: RCU_NONIDLE(do_something_with_RCU()) * will tell RCU that it needs to pay attention, invoke its argument * (in this example, calling the do_something_with_RCU() function), * and then tell RCU to go back to ignoring this CPU. It is permissible * to nest RCU_NONIDLE() wrappers, but not indefinitely (but the limit is * on the order of a million or so, even on 32-bit systems). It is * not legal to block within RCU_NONIDLE(), nor is it permissible to * transfer control either into or out of RCU_NONIDLE()'s statement. */ #define RCU_NONIDLE(a) \ do { \ rcu_irq_enter_irqson(); \ do { a; } while (0); \ rcu_irq_exit_irqson(); \ } while (0) /* * Note a quasi-voluntary context switch for RCU-tasks's benefit. * This is a macro rather than an inline function to avoid #include hell. */ #ifdef CONFIG_TASKS_RCU #define rcu_tasks_qs(t) \ do { \ if (READ_ONCE((t)->rcu_tasks_holdout)) \ WRITE_ONCE((t)->rcu_tasks_holdout, false); \ } while (0) #define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t) void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func); void synchronize_rcu_tasks(void); void exit_tasks_rcu_start(void); void exit_tasks_rcu_finish(void); #else /* #ifdef CONFIG_TASKS_RCU */ #define rcu_tasks_qs(t) do { } while (0) #define rcu_note_voluntary_context_switch(t) do { } while (0) #define call_rcu_tasks call_rcu #define synchronize_rcu_tasks synchronize_rcu static inline void exit_tasks_rcu_start(void) { } static inline void exit_tasks_rcu_finish(void) { } #endif /* #else #ifdef CONFIG_TASKS_RCU */ /** * cond_resched_tasks_rcu_qs - Report potential quiescent states to RCU * * This macro resembles cond_resched(), except that it is defined to * report potential quiescent states to RCU-tasks even if the cond_resched() * machinery were to be shut off, as some advocate for PREEMPT kernels. */ #define cond_resched_tasks_rcu_qs() \ do { \ rcu_tasks_qs(current); \ cond_resched(); \ } while (0) /** * rcu_softirq_qs_periodic - Report RCU and RCU-Tasks quiescent states * @old_ts: jiffies at start of processing. * * This helper is for long-running softirq handlers, such as NAPI threads in * networking. The caller should initialize the variable passed in as @old_ts * at the beginning of the softirq handler. When invoked frequently, this macro * will invoke rcu_softirq_qs() every 100 milliseconds thereafter, which will * provide both RCU and RCU-Tasks quiescent states. Note that this macro * modifies its old_ts argument. * * Because regions of code that have disabled softirq act as RCU read-side * critical sections, this macro should be invoked with softirq (and * preemption) enabled. * * The macro is not needed when CONFIG_PREEMPT_RT is defined. RT kernels would * have more chance to invoke schedule() calls and provide necessary quiescent * states. As a contrast, calling cond_resched() only won't achieve the same * effect because cond_resched() does not provide RCU-Tasks quiescent states. */ #define rcu_softirq_qs_periodic(old_ts) \ do { \ if (!IS_ENABLED(CONFIG_PREEMPT_RT) && \ time_after(jiffies, (old_ts) + HZ / 10)) { \ preempt_disable(); \ rcu_softirq_qs(); \ preempt_enable(); \ (old_ts) = jiffies; \ } \ } while (0) /* * Infrastructure to implement the synchronize_() primitives in * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. */ #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) #include <linux/rcutree.h> #elif defined(CONFIG_TINY_RCU) #include <linux/rcutiny.h> #else #error "Unknown RCU implementation specified to kernel configuration" #endif /* * The init_rcu_head_on_stack() and destroy_rcu_head_on_stack() calls * are needed for dynamic initialization and destruction of rcu_head * on the stack, and init_rcu_head()/destroy_rcu_head() are needed for * dynamic initialization and destruction of statically allocated rcu_head * structures. However, rcu_head structures allocated dynamically in the * heap don't need any initialization. */ #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD void init_rcu_head(struct rcu_head *head); void destroy_rcu_head(struct rcu_head *head); void init_rcu_head_on_stack(struct rcu_head *head); void destroy_rcu_head_on_stack(struct rcu_head *head); #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ static inline void init_rcu_head(struct rcu_head *head) { } static inline void destroy_rcu_head(struct rcu_head *head) { } static inline void init_rcu_head_on_stack(struct rcu_head *head) { } static inline void destroy_rcu_head_on_stack(struct rcu_head *head) { } #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) bool rcu_lockdep_current_cpu_online(void); #else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ static inline bool rcu_lockdep_current_cpu_online(void) { return true; } #endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ #ifdef CONFIG_DEBUG_LOCK_ALLOC static inline void rcu_lock_acquire(struct lockdep_map *map) { lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_); } static inline void rcu_lock_release(struct lockdep_map *map) { lock_release(map, 1, _THIS_IP_); } extern struct lockdep_map rcu_lock_map; extern struct lockdep_map rcu_bh_lock_map; extern struct lockdep_map rcu_sched_lock_map; extern struct lockdep_map rcu_callback_map; int debug_lockdep_rcu_enabled(void); int rcu_read_lock_held(void); int rcu_read_lock_bh_held(void); int rcu_read_lock_sched_held(void); int rcu_read_lock_any_held(void); #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ # define rcu_lock_acquire(a) do { } while (0) # define rcu_lock_release(a) do { } while (0) static inline int rcu_read_lock_held(void) { return 1; } static inline int rcu_read_lock_bh_held(void) { return 1; } static inline int rcu_read_lock_sched_held(void) { return !preemptible(); } static inline int rcu_read_lock_any_held(void) { return !preemptible(); } #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ #ifdef CONFIG_PROVE_RCU /** * RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met * @c: condition to check * @s: informative message */ #define RCU_LOCKDEP_WARN(c, s) \ do { \ static bool __section(.data.unlikely) __warned; \ if (debug_lockdep_rcu_enabled() && !__warned && (c)) { \ __warned = true; \ lockdep_rcu_suspicious(__FILE__, __LINE__, s); \ } \ } while (0) #if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU) static inline void rcu_preempt_sleep_check(void) { RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map), "Illegal context switch in RCU read-side critical section"); } #else /* #ifdef CONFIG_PROVE_RCU */ static inline void rcu_preempt_sleep_check(void) { } #endif /* #else #ifdef CONFIG_PROVE_RCU */ #define rcu_sleep_check() \ do { \ rcu_preempt_sleep_check(); \ RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \ "Illegal context switch in RCU-bh read-side critical section"); \ RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), \ "Illegal context switch in RCU-sched read-side critical section"); \ } while (0) #else /* #ifdef CONFIG_PROVE_RCU */ #define RCU_LOCKDEP_WARN(c, s) do { } while (0) #define rcu_sleep_check() do { } while (0) #endif /* #else #ifdef CONFIG_PROVE_RCU */ /* * Helper functions for rcu_dereference_check(), rcu_dereference_protected() * and rcu_assign_pointer(). Some of these could be folded into their * callers, but they are left separate in order to ease introduction of * multiple pointers markings to match different RCU implementations * (e.g., __srcu), should this make sense in the future. */ #ifdef __CHECKER__ #define rcu_check_sparse(p, space) \ ((void)(((typeof(*p) space *)p) == p)) #else /* #ifdef __CHECKER__ */ #define rcu_check_sparse(p, space) #endif /* #else #ifdef __CHECKER__ */ #define __rcu_access_pointer(p, space) \ ({ \ typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \ rcu_check_sparse(p, space); \ ((typeof(*p) __force __kernel *)(_________p1)); \ }) #define __rcu_dereference_check(p, c, space) \ ({ \ /* Dependency order vs. p above. */ \ typeof(*p) *________p1 = (typeof(*p) *__force)READ_ONCE(p); \ RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \ rcu_check_sparse(p, space); \ ((typeof(*p) __force __kernel *)(________p1)); \ }) #define __rcu_dereference_protected(p, c, space) \ ({ \ RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \ rcu_check_sparse(p, space); \ ((typeof(*p) __force __kernel *)(p)); \ }) #define rcu_dereference_raw(p) \ ({ \ /* Dependency order vs. p above. */ \ typeof(p) ________p1 = READ_ONCE(p); \ ((typeof(*p) __force __kernel *)(________p1)); \ }) /** * RCU_INITIALIZER() - statically initialize an RCU-protected global variable * @v: The value to statically initialize with. */ #define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v) /** * rcu_assign_pointer() - assign to RCU-protected pointer * @p: pointer to assign to * @v: value to assign (publish) * * Assigns the specified value to the specified RCU-protected * pointer, ensuring that any concurrent RCU readers will see * any prior initialization. * * Inserts memory barriers on architectures that require them * (which is most of them), and also prevents the compiler from * reordering the code that initializes the structure after the pointer * assignment. More importantly, this call documents which pointers * will be dereferenced by RCU read-side code. * * In some special cases, you may use RCU_INIT_POINTER() instead * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due * to the fact that it does not constrain either the CPU or the compiler. * That said, using RCU_INIT_POINTER() when you should have used * rcu_assign_pointer() is a very bad thing that results in * impossible-to-diagnose memory corruption. So please be careful. * See the RCU_INIT_POINTER() comment header for details. * * Note that rcu_assign_pointer() evaluates each of its arguments only * once, appearances notwithstanding. One of the "extra" evaluations * is in typeof() and the other visible only to sparse (__CHECKER__), * neither of which actually execute the argument. As with most cpp * macros, this execute-arguments-only-once property is important, so * please be careful when making changes to rcu_assign_pointer() and the * other macros that it invokes. */ #define rcu_assign_pointer(p, v) \ do { \ uintptr_t _r_a_p__v = (uintptr_t)(v); \ rcu_check_sparse(p, __rcu); \ \ if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \ WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \ else \ smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \ } while (0) /** * rcu_replace_pointer() - replace an RCU pointer, returning its old value * @rcu_ptr: RCU pointer, whose old value is returned * @ptr: regular pointer * @c: the lockdep conditions under which the dereference will take place * * Perform a replacement, where @rcu_ptr is an RCU-annotated * pointer and @c is the lockdep argument that is passed to the * rcu_dereference_protected() call used to read that pointer. The old * value of @rcu_ptr is returned, and @rcu_ptr is set to @ptr. */ #define rcu_replace_pointer(rcu_ptr, ptr, c) \ ({ \ typeof(ptr) __tmp = rcu_dereference_protected((rcu_ptr), (c)); \ rcu_assign_pointer((rcu_ptr), (ptr)); \ __tmp; \ }) /** * rcu_swap_protected() - swap an RCU and a regular pointer * @rcu_ptr: RCU pointer * @ptr: regular pointer * @c: the conditions under which the dereference will take place * * Perform swap(@rcu_ptr, @ptr) where @rcu_ptr is an RCU-annotated pointer and * @c is the argument that is passed to the rcu_dereference_protected() call * used to read that pointer. */ #define rcu_swap_protected(rcu_ptr, ptr, c) do { \ typeof(ptr) __tmp = rcu_dereference_protected((rcu_ptr), (c)); \ rcu_assign_pointer((rcu_ptr), (ptr)); \ (ptr) = __tmp; \ } while (0) /** * rcu_access_pointer() - fetch RCU pointer with no dereferencing * @p: The pointer to read * * Return the value of the specified RCU-protected pointer, but omit the * lockdep checks for being in an RCU read-side critical section. This is * useful when the value of this pointer is accessed, but the pointer is * not dereferenced, for example, when testing an RCU-protected pointer * against NULL. Although rcu_access_pointer() may also be used in cases * where update-side locks prevent the value of the pointer from changing, * you should instead use rcu_dereference_protected() for this use case. * * It is also permissible to use rcu_access_pointer() when read-side * access to the pointer was removed at least one grace period ago, as * is the case in the context of the RCU callback that is freeing up * the data, or after a synchronize_rcu() returns. This can be useful * when tearing down multi-linked structures after a grace period * has elapsed. */ #define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu) /** * rcu_dereference_check() - rcu_dereference with debug checking * @p: The pointer to read, prior to dereferencing * @c: The conditions under which the dereference will take place * * Do an rcu_dereference(), but check that the conditions under which the * dereference will take place are correct. Typically the conditions * indicate the various locking conditions that should be held at that * point. The check should return true if the conditions are satisfied. * An implicit check for being in an RCU read-side critical section * (rcu_read_lock()) is included. * * For example: * * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock)); * * could be used to indicate to lockdep that foo->bar may only be dereferenced * if either rcu_read_lock() is held, or that the lock required to replace * the bar struct at foo->bar is held. * * Note that the list of conditions may also include indications of when a lock * need not be held, for example during initialisation or destruction of the * target struct: * * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) || * atomic_read(&foo->usage) == 0); * * Inserts memory barriers on architectures that require them * (currently only the Alpha), prevents the compiler from refetching * (and from merging fetches), and, more importantly, documents exactly * which pointers are protected by RCU and checks that the pointer is * annotated as __rcu. */ #define rcu_dereference_check(p, c) \ __rcu_dereference_check((p), (c) || rcu_read_lock_held(), __rcu) /** * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking * @p: The pointer to read, prior to dereferencing * @c: The conditions under which the dereference will take place * * This is the RCU-bh counterpart to rcu_dereference_check(). */ #define rcu_dereference_bh_check(p, c) \ __rcu_dereference_check((p), (c) || rcu_read_lock_bh_held(), __rcu) /** * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking * @p: The pointer to read, prior to dereferencing * @c: The conditions under which the dereference will take place * * This is the RCU-sched counterpart to rcu_dereference_check(). */ #define rcu_dereference_sched_check(p, c) \ __rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \ __rcu) /* * The tracing infrastructure traces RCU (we want that), but unfortunately * some of the RCU checks causes tracing to lock up the system. * * The no-tracing version of rcu_dereference_raw() must not call * rcu_read_lock_held(). */ #define rcu_dereference_raw_check(p) __rcu_dereference_check((p), 1, __rcu) /** * rcu_dereference_protected() - fetch RCU pointer when updates prevented * @p: The pointer to read, prior to dereferencing * @c: The conditions under which the dereference will take place * * Return the value of the specified RCU-protected pointer, but omit * the READ_ONCE(). This is useful in cases where update-side locks * prevent the value of the pointer from changing. Please note that this * primitive does *not* prevent the compiler from repeating this reference * or combining it with other references, so it should not be used without * protection of appropriate locks. * * This function is only for update-side use. Using this function * when protected only by rcu_read_lock() will result in infrequent * but very ugly failures. */ #define rcu_dereference_protected(p, c) \ __rcu_dereference_protected((p), (c), __rcu) /** * rcu_dereference() - fetch RCU-protected pointer for dereferencing * @p: The pointer to read, prior to dereferencing * * This is a simple wrapper around rcu_dereference_check(). */ #define rcu_dereference(p) rcu_dereference_check(p, 0) /** * rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing * @p: The pointer to read, prior to dereferencing * * Makes rcu_dereference_check() do the dirty work. */ #define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0) /** * rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing * @p: The pointer to read, prior to dereferencing * * Makes rcu_dereference_check() do the dirty work. */ #define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0) /** * rcu_pointer_handoff() - Hand off a pointer from RCU to other mechanism * @p: The pointer to hand off * * This is simply an identity function, but it documents where a pointer * is handed off from RCU to some other synchronization mechanism, for * example, reference counting or locking. In C11, it would map to * kill_dependency(). It could be used as follows:: * * rcu_read_lock(); * p = rcu_dereference(gp); * long_lived = is_long_lived(p); * if (long_lived) { * if (!atomic_inc_not_zero(p->refcnt)) * long_lived = false; * else * p = rcu_pointer_handoff(p); * } * rcu_read_unlock(); */ #define rcu_pointer_handoff(p) (p) /** * rcu_read_lock() - mark the beginning of an RCU read-side critical section * * When synchronize_rcu() is invoked on one CPU while other CPUs * are within RCU read-side critical sections, then the * synchronize_rcu() is guaranteed to block until after all the other * CPUs exit their critical sections. Similarly, if call_rcu() is invoked * on one CPU while other CPUs are within RCU read-side critical * sections, invocation of the corresponding RCU callback is deferred * until after the all the other CPUs exit their critical sections. * * Note, however, that RCU callbacks are permitted to run concurrently * with new RCU read-side critical sections. One way that this can happen * is via the following sequence of events: (1) CPU 0 enters an RCU * read-side critical section, (2) CPU 1 invokes call_rcu() to register * an RCU callback, (3) CPU 0 exits the RCU read-side critical section, * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU * callback is invoked. This is legal, because the RCU read-side critical * section that was running concurrently with the call_rcu() (and which * therefore might be referencing something that the corresponding RCU * callback would free up) has completed before the corresponding * RCU callback is invoked. * * RCU read-side critical sections may be nested. Any deferred actions * will be deferred until the outermost RCU read-side critical section * completes. * * You can avoid reading and understanding the next paragraph by * following this rule: don't put anything in an rcu_read_lock() RCU * read-side critical section that would block in a !PREEMPT kernel. * But if you want the full story, read on! * * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), * it is illegal to block while in an RCU read-side critical section. * In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPTION * kernel builds, RCU read-side critical sections may be preempted, * but explicit blocking is illegal. Finally, in preemptible RCU * implementations in real-time (with -rt patchset) kernel builds, RCU * read-side critical sections may be preempted and they may also block, but * only when acquiring spinlocks that are subject to priority inheritance. */ static __always_inline void rcu_read_lock(void) { __rcu_read_lock(); __acquire(RCU); rcu_lock_acquire(&rcu_lock_map); RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_lock() used illegally while idle"); } /* * So where is rcu_write_lock()? It does not exist, as there is no * way for writers to lock out RCU readers. This is a feature, not * a bug -- this property is what provides RCU's performance benefits. * Of course, writers must coordinate with each other. The normal * spinlock primitives work well for this, but any other technique may be * used as well. RCU does not care how the writers keep out of each * others' way, as long as they do so. */ /** * rcu_read_unlock() - marks the end of an RCU read-side critical section. * * In most situations, rcu_read_unlock() is immune from deadlock. * However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock() * is responsible for deboosting, which it does via rt_mutex_unlock(). * Unfortunately, this function acquires the scheduler's runqueue and * priority-inheritance spinlocks. This means that deadlock could result * if the caller of rcu_read_unlock() already holds one of these locks or * any lock that is ever acquired while holding them. * * That said, RCU readers are never priority boosted unless they were * preempted. Therefore, one way to avoid deadlock is to make sure * that preemption never happens within any RCU read-side critical * section whose outermost rcu_read_unlock() is called with one of * rt_mutex_unlock()'s locks held. Such preemption can be avoided in * a number of ways, for example, by invoking preempt_disable() before * critical section's outermost rcu_read_lock(). * * Given that the set of locks acquired by rt_mutex_unlock() might change * at any time, a somewhat more future-proofed approach is to make sure * that that preemption never happens within any RCU read-side critical * section whose outermost rcu_read_unlock() is called with irqs disabled. * This approach relies on the fact that rt_mutex_unlock() currently only * acquires irq-disabled locks. * * The second of these two approaches is best in most situations, * however, the first approach can also be useful, at least to those * developers willing to keep abreast of the set of locks acquired by * rt_mutex_unlock(). * * See rcu_read_lock() for more information. */ static inline void rcu_read_unlock(void) { RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_unlock() used illegally while idle"); __release(RCU); __rcu_read_unlock(); rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */ } /** * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section * * This is equivalent of rcu_read_lock(), but also disables softirqs. * Note that anything else that disables softirqs can also serve as * an RCU read-side critical section. * * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh() * must occur in the same context, for example, it is illegal to invoke * rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh() * was invoked from some other task. */ static inline void rcu_read_lock_bh(void) { local_bh_disable(); __acquire(RCU_BH); rcu_lock_acquire(&rcu_bh_lock_map); RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_lock_bh() used illegally while idle"); } /* * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section * * See rcu_read_lock_bh() for more information. */ static inline void rcu_read_unlock_bh(void) { RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_unlock_bh() used illegally while idle"); rcu_lock_release(&rcu_bh_lock_map); __release(RCU_BH); local_bh_enable(); } /** * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section * * This is equivalent of rcu_read_lock(), but disables preemption. * Read-side critical sections can also be introduced by anything else * that disables preemption, including local_irq_disable() and friends. * * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched() * must occur in the same context, for example, it is illegal to invoke * rcu_read_unlock_sched() from process context if the matching * rcu_read_lock_sched() was invoked from an NMI handler. */ static inline void rcu_read_lock_sched(void) { preempt_disable(); __acquire(RCU_SCHED); rcu_lock_acquire(&rcu_sched_lock_map); RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_lock_sched() used illegally while idle"); } /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ static inline notrace void rcu_read_lock_sched_notrace(void) { preempt_disable_notrace(); __acquire(RCU_SCHED); } /* * rcu_read_unlock_sched - marks the end of a RCU-classic critical section * * See rcu_read_lock_sched for more information. */ static inline void rcu_read_unlock_sched(void) { RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_unlock_sched() used illegally while idle"); rcu_lock_release(&rcu_sched_lock_map); __release(RCU_SCHED); preempt_enable(); } /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ static inline notrace void rcu_read_unlock_sched_notrace(void) { __release(RCU_SCHED); preempt_enable_notrace(); } /** * RCU_INIT_POINTER() - initialize an RCU protected pointer * @p: The pointer to be initialized. * @v: The value to initialized the pointer to. * * Initialize an RCU-protected pointer in special cases where readers * do not need ordering constraints on the CPU or the compiler. These * special cases are: * * 1. This use of RCU_INIT_POINTER() is NULLing out the pointer *or* * 2. The caller has taken whatever steps are required to prevent * RCU readers from concurrently accessing this pointer *or* * 3. The referenced data structure has already been exposed to * readers either at compile time or via rcu_assign_pointer() *and* * * a. You have not made *any* reader-visible changes to * this structure since then *or* * b. It is OK for readers accessing this structure from its * new location to see the old state of the structure. (For * example, the changes were to statistical counters or to * other state where exact synchronization is not required.) * * Failure to follow these rules governing use of RCU_INIT_POINTER() will * result in impossible-to-diagnose memory corruption. As in the structures * will look OK in crash dumps, but any concurrent RCU readers might * see pre-initialized values of the referenced data structure. So * please be very careful how you use RCU_INIT_POINTER()!!! * * If you are creating an RCU-protected linked structure that is accessed * by a single external-to-structure RCU-protected pointer, then you may * use RCU_INIT_POINTER() to initialize the internal RCU-protected * pointers, but you must use rcu_assign_pointer() to initialize the * external-to-structure pointer *after* you have completely initialized * the reader-accessible portions of the linked structure. * * Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no * ordering guarantees for either the CPU or the compiler. */ #define RCU_INIT_POINTER(p, v) \ do { \ rcu_check_sparse(p, __rcu); \ WRITE_ONCE(p, RCU_INITIALIZER(v)); \ } while (0) /** * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer * @p: The pointer to be initialized. * @v: The value to initialized the pointer to. * * GCC-style initialization for an RCU-protected pointer in a structure field. */ #define RCU_POINTER_INITIALIZER(p, v) \ .p = RCU_INITIALIZER(v) /* * Does the specified offset indicate that the corresponding rcu_head * structure can be handled by kfree_rcu()? */ #define __is_kfree_rcu_offset(offset) ((offset) < 4096) /* * Helper macro for kfree_rcu() to prevent argument-expansion eyestrain. */ #define __kfree_rcu(head, offset) \ do { \ BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \ kfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \ } while (0) /** * kfree_rcu() - kfree an object after a grace period. * @ptr: pointer to kfree * @rhf: the name of the struct rcu_head within the type of @ptr. * * Many rcu callbacks functions just call kfree() on the base structure. * These functions are trivial, but their size adds up, and furthermore * when they are used in a kernel module, that module must invoke the * high-latency rcu_barrier() function at module-unload time. * * The kfree_rcu() function handles this issue. Rather than encoding a * function address in the embedded rcu_head structure, kfree_rcu() instead * encodes the offset of the rcu_head structure within the base structure. * Because the functions are not allowed in the low-order 4096 bytes of * kernel virtual memory, offsets up to 4095 bytes can be accommodated. * If the offset is larger than 4095 bytes, a compile-time error will * be generated in __kfree_rcu(). If this error is triggered, you can * either fall back to use of call_rcu() or rearrange the structure to * position the rcu_head structure into the first 4096 bytes. * * Note that the allowable offset might decrease in the future, for example, * to allow something like kmem_cache_free_rcu(). * * The BUILD_BUG_ON check must not involve any function calls, hence the * checks are done in macros here. */ #define kfree_rcu(ptr, rhf) \ do { \ typeof (ptr) ___p = (ptr); \ \ if (___p) \ __kfree_rcu(&((___p)->rhf), offsetof(typeof(*(ptr)), rhf)); \ } while (0) /* * Place this after a lock-acquisition primitive to guarantee that * an UNLOCK+LOCK pair acts as a full barrier. This guarantee applies * if the UNLOCK and LOCK are executed by the same CPU or if the * UNLOCK and LOCK operate on the same lock variable. */ #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE #define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */ #else /* #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */ #define smp_mb__after_unlock_lock() do { } while (0) #endif /* #else #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */ /* Has the specified rcu_head structure been handed to call_rcu()? */ /** * rcu_head_init - Initialize rcu_head for rcu_head_after_call_rcu() * @rhp: The rcu_head structure to initialize. * * If you intend to invoke rcu_head_after_call_rcu() to test whether a * given rcu_head structure has already been passed to call_rcu(), then * you must also invoke this rcu_head_init() function on it just after * allocating that structure. Calls to this function must not race with * calls to call_rcu(), rcu_head_after_call_rcu(), or callback invocation. */ static inline void rcu_head_init(struct rcu_head *rhp) { rhp->func = (rcu_callback_t)~0L; } /** * rcu_head_after_call_rcu - Has this rcu_head been passed to call_rcu()? * @rhp: The rcu_head structure to test. * @f: The function passed to call_rcu() along with @rhp. * * Returns @true if the @rhp has been passed to call_rcu() with @func, * and @false otherwise. Emits a warning in any other case, including * the case where @rhp has already been invoked after a grace period. * Calls to this function must not race with callback invocation. One way * to avoid such races is to enclose the call to rcu_head_after_call_rcu() * in an RCU read-side critical section that includes a read-side fetch * of the pointer to the structure containing @rhp. */ static inline bool rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f) { rcu_callback_t func = READ_ONCE(rhp->func); if (func == f) return true; WARN_ON_ONCE(func != (rcu_callback_t)~0L); return false; } #endif /* __LINUX_RCUPDATE_H */ cb710.h 0000644 00000012563 14722070374 0005546 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * cb710/cb710.h * * Copyright by Michał Mirosław, 2008-2009 */ #ifndef LINUX_CB710_DRIVER_H #define LINUX_CB710_DRIVER_H #include <linux/io.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/mmc/host.h> struct cb710_slot; typedef int (*cb710_irq_handler_t)(struct cb710_slot *); /* per-virtual-slot structure */ struct cb710_slot { struct platform_device pdev; void __iomem *iobase; cb710_irq_handler_t irq_handler; }; /* per-device structure */ struct cb710_chip { struct pci_dev *pdev; void __iomem *iobase; unsigned platform_id; #ifdef CONFIG_CB710_DEBUG_ASSUMPTIONS atomic_t slot_refs_count; #endif unsigned slot_mask; unsigned slots; spinlock_t irq_lock; struct cb710_slot slot[0]; }; /* NOTE: cb710_chip.slots is modified only during device init/exit and * they are all serialized wrt themselves */ /* cb710_chip.slot_mask values */ #define CB710_SLOT_MMC 1 #define CB710_SLOT_MS 2 #define CB710_SLOT_SM 4 /* slot port accessors - so the logic is more clear in the code */ #define CB710_PORT_ACCESSORS(t) \ static inline void cb710_write_port_##t(struct cb710_slot *slot, \ unsigned port, u##t value) \ { \ iowrite##t(value, slot->iobase + port); \ } \ \ static inline u##t cb710_read_port_##t(struct cb710_slot *slot, \ unsigned port) \ { \ return ioread##t(slot->iobase + port); \ } \ \ static inline void cb710_modify_port_##t(struct cb710_slot *slot, \ unsigned port, u##t set, u##t clear) \ { \ iowrite##t( \ (ioread##t(slot->iobase + port) & ~clear)|set, \ slot->iobase + port); \ } CB710_PORT_ACCESSORS(8) CB710_PORT_ACCESSORS(16) CB710_PORT_ACCESSORS(32) void cb710_pci_update_config_reg(struct pci_dev *pdev, int reg, uint32_t and, uint32_t xor); void cb710_set_irq_handler(struct cb710_slot *slot, cb710_irq_handler_t handler); /* some device struct walking */ static inline struct cb710_slot *cb710_pdev_to_slot( struct platform_device *pdev) { return container_of(pdev, struct cb710_slot, pdev); } static inline struct cb710_chip *cb710_slot_to_chip(struct cb710_slot *slot) { return dev_get_drvdata(slot->pdev.dev.parent); } static inline struct device *cb710_slot_dev(struct cb710_slot *slot) { return &slot->pdev.dev; } static inline struct device *cb710_chip_dev(struct cb710_chip *chip) { return &chip->pdev->dev; } /* debugging aids */ #ifdef CONFIG_CB710_DEBUG void cb710_dump_regs(struct cb710_chip *chip, unsigned dump); #else #define cb710_dump_regs(c, d) do {} while (0) #endif #define CB710_DUMP_REGS_MMC 0x0F #define CB710_DUMP_REGS_MS 0x30 #define CB710_DUMP_REGS_SM 0xC0 #define CB710_DUMP_REGS_ALL 0xFF #define CB710_DUMP_REGS_MASK 0xFF #define CB710_DUMP_ACCESS_8 0x100 #define CB710_DUMP_ACCESS_16 0x200 #define CB710_DUMP_ACCESS_32 0x400 #define CB710_DUMP_ACCESS_ALL 0x700 #define CB710_DUMP_ACCESS_MASK 0x700 #endif /* LINUX_CB710_DRIVER_H */ /* * cb710/sgbuf2.h * * Copyright by Michał Mirosław, 2008-2009 */ #ifndef LINUX_CB710_SG_H #define LINUX_CB710_SG_H #include <linux/highmem.h> #include <linux/scatterlist.h> /* * 32-bit PIO mapping sg iterator * * Hides scatterlist access issues - fragment boundaries, alignment, page * mapping - for drivers using 32-bit-word-at-a-time-PIO (ie. PCI devices * without DMA support). * * Best-case reading (transfer from device): * sg_miter_start(, SG_MITER_TO_SG); * cb710_sg_dwiter_write_from_io(); * sg_miter_stop(); * * Best-case writing (transfer to device): * sg_miter_start(, SG_MITER_FROM_SG); * cb710_sg_dwiter_read_to_io(); * sg_miter_stop(); */ uint32_t cb710_sg_dwiter_read_next_block(struct sg_mapping_iter *miter); void cb710_sg_dwiter_write_next_block(struct sg_mapping_iter *miter, uint32_t data); /** * cb710_sg_dwiter_write_from_io - transfer data to mapped buffer from 32-bit IO port * @miter: sg mapping iter * @port: PIO port - IO or MMIO address * @count: number of 32-bit words to transfer * * Description: * Reads @count 32-bit words from register @port and stores it in * buffer iterated by @miter. Data that would overflow the buffer * is silently ignored. Iterator is advanced by 4*@count bytes * or to the buffer's end whichever is closer. * * Context: * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise. */ static inline void cb710_sg_dwiter_write_from_io(struct sg_mapping_iter *miter, void __iomem *port, size_t count) { while (count-- > 0) cb710_sg_dwiter_write_next_block(miter, ioread32(port)); } /** * cb710_sg_dwiter_read_to_io - transfer data to 32-bit IO port from mapped buffer * @miter: sg mapping iter * @port: PIO port - IO or MMIO address * @count: number of 32-bit words to transfer * * Description: * Writes @count 32-bit words to register @port from buffer iterated * through @miter. If buffer ends before @count words are written * missing data is replaced by zeroes. @miter is advanced by 4*@count * bytes or to the buffer's end whichever is closer. * * Context: * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise. */ static inline void cb710_sg_dwiter_read_to_io(struct sg_mapping_iter *miter, void __iomem *port, size_t count) { while (count-- > 0) iowrite32(cb710_sg_dwiter_read_next_block(miter), port); } #endif /* LINUX_CB710_SG_H */ resource_ext.h 0000644 00000003517 14722070374 0007440 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2015, Intel Corporation * Author: Jiang Liu <jiang.liu@linux.intel.com> */ #ifndef _LINUX_RESOURCE_EXT_H #define _LINUX_RESOURCE_EXT_H #include <linux/types.h> #include <linux/list.h> #include <linux/ioport.h> #include <linux/slab.h> /* Represent resource window for bridge devices */ struct resource_win { struct resource res; /* In master (CPU) address space */ resource_size_t offset; /* Translation offset for bridge */ }; /* * Common resource list management data structure and interfaces to support * ACPI, PNP and PCI host bridge etc. */ struct resource_entry { struct list_head node; struct resource *res; /* In master (CPU) address space */ resource_size_t offset; /* Translation offset for bridge */ struct resource __res; /* Default storage for res */ }; extern struct resource_entry * resource_list_create_entry(struct resource *res, size_t extra_size); extern void resource_list_free(struct list_head *head); static inline void resource_list_add(struct resource_entry *entry, struct list_head *head) { list_add(&entry->node, head); } static inline void resource_list_add_tail(struct resource_entry *entry, struct list_head *head) { list_add_tail(&entry->node, head); } static inline void resource_list_del(struct resource_entry *entry) { list_del(&entry->node); } static inline void resource_list_free_entry(struct resource_entry *entry) { kfree(entry); } static inline void resource_list_destroy_entry(struct resource_entry *entry) { resource_list_del(entry); resource_list_free_entry(entry); } #define resource_list_for_each_entry(entry, list) \ list_for_each_entry((entry), (list), node) #define resource_list_for_each_entry_safe(entry, tmp, list) \ list_for_each_entry_safe((entry), (tmp), (list), node) #endif /* _LINUX_RESOURCE_EXT_H */ refcount.h 0000644 00000024467 14722070374 0006565 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Variant of atomic_t specialized for reference counts. * * The interface matches the atomic_t interface (to aid in porting) but only * provides the few functions one should use for reference counting. * * Saturation semantics * ==================== * * refcount_t differs from atomic_t in that the counter saturates at * REFCOUNT_SATURATED and will not move once there. This avoids wrapping the * counter and causing 'spurious' use-after-free issues. In order to avoid the * cost associated with introducing cmpxchg() loops into all of the saturating * operations, we temporarily allow the counter to take on an unchecked value * and then explicitly set it to REFCOUNT_SATURATED on detecting that underflow * or overflow has occurred. Although this is racy when multiple threads * access the refcount concurrently, by placing REFCOUNT_SATURATED roughly * equidistant from 0 and INT_MAX we minimise the scope for error: * * INT_MAX REFCOUNT_SATURATED UINT_MAX * 0 (0x7fff_ffff) (0xc000_0000) (0xffff_ffff) * +--------------------------------+----------------+----------------+ * <---------- bad value! ----------> * * (in a signed view of the world, the "bad value" range corresponds to * a negative counter value). * * As an example, consider a refcount_inc() operation that causes the counter * to overflow: * * int old = atomic_fetch_add_relaxed(r); * // old is INT_MAX, refcount now INT_MIN (0x8000_0000) * if (old < 0) * atomic_set(r, REFCOUNT_SATURATED); * * If another thread also performs a refcount_inc() operation between the two * atomic operations, then the count will continue to edge closer to 0. If it * reaches a value of 1 before /any/ of the threads reset it to the saturated * value, then a concurrent refcount_dec_and_test() may erroneously free the * underlying object. Given the precise timing details involved with the * round-robin scheduling of each thread manipulating the refcount and the need * to hit the race multiple times in succession, there doesn't appear to be a * practical avenue of attack even if using refcount_add() operations with * larger increments. * * Memory ordering * =============== * * Memory ordering rules are slightly relaxed wrt regular atomic_t functions * and provide only what is strictly required for refcounts. * * The increments are fully relaxed; these will not provide ordering. The * rationale is that whatever is used to obtain the object we're increasing the * reference count on will provide the ordering. For locked data structures, * its the lock acquire, for RCU/lockless data structures its the dependent * load. * * Do note that inc_not_zero() provides a control dependency which will order * future stores against the inc, this ensures we'll never modify the object * if we did not in fact acquire a reference. * * The decrements will provide release order, such that all the prior loads and * stores will be issued before, it also provides a control dependency, which * will order us against the subsequent free(). * * The control dependency is against the load of the cmpxchg (ll/sc) that * succeeded. This means the stores aren't fully ordered, but this is fine * because the 1->0 transition indicates no concurrency. * * Note that the allocator is responsible for ordering things between free() * and alloc(). * * The decrements dec_and_test() and sub_and_test() also provide acquire * ordering on success. * */ #ifndef _LINUX_REFCOUNT_H #define _LINUX_REFCOUNT_H #include <linux/atomic.h> #include <linux/bug.h> #include <linux/compiler.h> #include <linux/limits.h> #include <linux/spinlock_types.h> struct mutex; /** * struct refcount_t - variant of atomic_t specialized for reference counts * @refs: atomic_t counter field * * The counter saturates at REFCOUNT_SATURATED and will not move once * there. This avoids wrapping the counter and causing 'spurious' * use-after-free bugs. */ typedef struct refcount_struct { atomic_t refs; } refcount_t; #define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), } #define REFCOUNT_MAX INT_MAX #define REFCOUNT_SATURATED (INT_MIN / 2) enum refcount_saturation_type { REFCOUNT_ADD_NOT_ZERO_OVF, REFCOUNT_ADD_OVF, REFCOUNT_ADD_UAF, REFCOUNT_SUB_UAF, REFCOUNT_DEC_LEAK, }; void refcount_warn_saturate(refcount_t *r, enum refcount_saturation_type t); /** * refcount_set - set a refcount's value * @r: the refcount * @n: value to which the refcount will be set */ static inline void refcount_set(refcount_t *r, int n) { atomic_set(&r->refs, n); } /** * refcount_read - get a refcount's value * @r: the refcount * * Return: the refcount's value */ static inline unsigned int refcount_read(const refcount_t *r) { return atomic_read(&r->refs); } /** * refcount_add_not_zero - add a value to a refcount unless it is 0 * @i: the value to add to the refcount * @r: the refcount * * Will saturate at REFCOUNT_SATURATED and WARN. * * Provides no memory ordering, it is assumed the caller has guaranteed the * object memory to be stable (RCU, etc.). It does provide a control dependency * and thereby orders future stores. See the comment on top. * * Use of this function is not recommended for the normal reference counting * use case in which references are taken and released one at a time. In these * cases, refcount_inc(), or one of its variants, should instead be used to * increment a reference count. * * Return: false if the passed refcount is 0, true otherwise */ static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r) { int old = refcount_read(r); do { if (!old) break; } while (!atomic_try_cmpxchg_relaxed(&r->refs, &old, old + i)); if (unlikely(old < 0 || old + i < 0)) refcount_warn_saturate(r, REFCOUNT_ADD_NOT_ZERO_OVF); return old; } /** * refcount_add - add a value to a refcount * @i: the value to add to the refcount * @r: the refcount * * Similar to atomic_add(), but will saturate at REFCOUNT_SATURATED and WARN. * * Provides no memory ordering, it is assumed the caller has guaranteed the * object memory to be stable (RCU, etc.). It does provide a control dependency * and thereby orders future stores. See the comment on top. * * Use of this function is not recommended for the normal reference counting * use case in which references are taken and released one at a time. In these * cases, refcount_inc(), or one of its variants, should instead be used to * increment a reference count. */ static inline void refcount_add(int i, refcount_t *r) { int old = atomic_fetch_add_relaxed(i, &r->refs); if (unlikely(!old)) refcount_warn_saturate(r, REFCOUNT_ADD_UAF); else if (unlikely(old < 0 || old + i < 0)) refcount_warn_saturate(r, REFCOUNT_ADD_OVF); } /** * refcount_inc_not_zero - increment a refcount unless it is 0 * @r: the refcount to increment * * Similar to atomic_inc_not_zero(), but will saturate at REFCOUNT_SATURATED * and WARN. * * Provides no memory ordering, it is assumed the caller has guaranteed the * object memory to be stable (RCU, etc.). It does provide a control dependency * and thereby orders future stores. See the comment on top. * * Return: true if the increment was successful, false otherwise */ static inline __must_check bool refcount_inc_not_zero(refcount_t *r) { return refcount_add_not_zero(1, r); } /** * refcount_inc - increment a refcount * @r: the refcount to increment * * Similar to atomic_inc(), but will saturate at REFCOUNT_SATURATED and WARN. * * Provides no memory ordering, it is assumed the caller already has a * reference on the object. * * Will WARN if the refcount is 0, as this represents a possible use-after-free * condition. */ static inline void refcount_inc(refcount_t *r) { refcount_add(1, r); } /** * refcount_sub_and_test - subtract from a refcount and test if it is 0 * @i: amount to subtract from the refcount * @r: the refcount * * Similar to atomic_dec_and_test(), but it will WARN, return false and * ultimately leak on underflow and will fail to decrement when saturated * at REFCOUNT_SATURATED. * * Provides release memory ordering, such that prior loads and stores are done * before, and provides an acquire ordering on success such that free() * must come after. * * Use of this function is not recommended for the normal reference counting * use case in which references are taken and released one at a time. In these * cases, refcount_dec(), or one of its variants, should instead be used to * decrement a reference count. * * Return: true if the resulting refcount is 0, false otherwise */ static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r) { int old = atomic_fetch_sub_release(i, &r->refs); if (old == i) { smp_acquire__after_ctrl_dep(); return true; } if (unlikely(old < 0 || old - i < 0)) refcount_warn_saturate(r, REFCOUNT_SUB_UAF); return false; } /** * refcount_dec_and_test - decrement a refcount and test if it is 0 * @r: the refcount * * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to * decrement when saturated at REFCOUNT_SATURATED. * * Provides release memory ordering, such that prior loads and stores are done * before, and provides an acquire ordering on success such that free() * must come after. * * Return: true if the resulting refcount is 0, false otherwise */ static inline __must_check bool refcount_dec_and_test(refcount_t *r) { return refcount_sub_and_test(1, r); } /** * refcount_dec - decrement a refcount * @r: the refcount * * Similar to atomic_dec(), it will WARN on underflow and fail to decrement * when saturated at REFCOUNT_SATURATED. * * Provides release memory ordering, such that prior loads and stores are done * before. */ static inline void refcount_dec(refcount_t *r) { if (unlikely(atomic_fetch_sub_release(1, &r->refs) <= 1)) refcount_warn_saturate(r, REFCOUNT_DEC_LEAK); } extern __must_check bool refcount_dec_if_one(refcount_t *r); extern __must_check bool refcount_dec_not_one(refcount_t *r); extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock); extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock); extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock, unsigned long *flags); #endif /* _LINUX_REFCOUNT_H */ serio.h 0000644 00000010437 14722070374 0006051 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 1999-2002 Vojtech Pavlik */ #ifndef _SERIO_H #define _SERIO_H #include <linux/types.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/mod_devicetable.h> #include <uapi/linux/serio.h> extern struct bus_type serio_bus; struct serio { void *port_data; char name[32]; char phys[32]; char firmware_id[128]; bool manual_bind; struct serio_device_id id; /* Protects critical sections from port's interrupt handler */ spinlock_t lock; int (*write)(struct serio *, unsigned char); int (*open)(struct serio *); void (*close)(struct serio *); int (*start)(struct serio *); void (*stop)(struct serio *); struct serio *parent; /* Entry in parent->children list */ struct list_head child_node; struct list_head children; /* Level of nesting in serio hierarchy */ unsigned int depth; /* * serio->drv is accessed from interrupt handlers; when modifying * caller should acquire serio->drv_mutex and serio->lock. */ struct serio_driver *drv; /* Protects serio->drv so attributes can pin current driver */ struct mutex drv_mutex; struct device dev; struct list_head node; /* * For use by PS/2 layer when several ports share hardware and * may get indigestion when exposed to concurrent access (i8042). */ struct mutex *ps2_cmd_mutex; }; #define to_serio_port(d) container_of(d, struct serio, dev) struct serio_driver { const char *description; const struct serio_device_id *id_table; bool manual_bind; void (*write_wakeup)(struct serio *); irqreturn_t (*interrupt)(struct serio *, unsigned char, unsigned int); int (*connect)(struct serio *, struct serio_driver *drv); int (*reconnect)(struct serio *); int (*fast_reconnect)(struct serio *); void (*disconnect)(struct serio *); void (*cleanup)(struct serio *); struct device_driver driver; }; #define to_serio_driver(d) container_of(d, struct serio_driver, driver) int serio_open(struct serio *serio, struct serio_driver *drv); void serio_close(struct serio *serio); void serio_rescan(struct serio *serio); void serio_reconnect(struct serio *serio); irqreturn_t serio_interrupt(struct serio *serio, unsigned char data, unsigned int flags); void __serio_register_port(struct serio *serio, struct module *owner); /* use a define to avoid include chaining to get THIS_MODULE */ #define serio_register_port(serio) \ __serio_register_port(serio, THIS_MODULE) void serio_unregister_port(struct serio *serio); void serio_unregister_child_port(struct serio *serio); int __must_check __serio_register_driver(struct serio_driver *drv, struct module *owner, const char *mod_name); /* use a define to avoid include chaining to get THIS_MODULE & friends */ #define serio_register_driver(drv) \ __serio_register_driver(drv, THIS_MODULE, KBUILD_MODNAME) void serio_unregister_driver(struct serio_driver *drv); /** * module_serio_driver() - Helper macro for registering a serio driver * @__serio_driver: serio_driver struct * * Helper macro for serio drivers which do not do anything special in * module init/exit. This eliminates a lot of boilerplate. Each module * may only use this macro once, and calling it replaces module_init() * and module_exit(). */ #define module_serio_driver(__serio_driver) \ module_driver(__serio_driver, serio_register_driver, \ serio_unregister_driver) static inline int serio_write(struct serio *serio, unsigned char data) { if (serio->write) return serio->write(serio, data); else return -1; } static inline void serio_drv_write_wakeup(struct serio *serio) { if (serio->drv && serio->drv->write_wakeup) serio->drv->write_wakeup(serio); } /* * Use the following functions to manipulate serio's per-port * driver-specific data. */ static inline void *serio_get_drvdata(struct serio *serio) { return dev_get_drvdata(&serio->dev); } static inline void serio_set_drvdata(struct serio *serio, void *data) { dev_set_drvdata(&serio->dev, data); } /* * Use the following functions to protect critical sections in * driver code from port's interrupt handler */ static inline void serio_pause_rx(struct serio *serio) { spin_lock_irq(&serio->lock); } static inline void serio_continue_rx(struct serio *serio) { spin_unlock_irq(&serio->lock); } #endif objagg.h 0000644 00000003677 14722070374 0006171 0 ustar 00 /* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ /* Copyright (c) 2018 Mellanox Technologies. All rights reserved */ #ifndef _OBJAGG_H #define _OBJAGG_H struct objagg_ops { size_t obj_size; bool (*delta_check)(void *priv, const void *parent_obj, const void *obj); void * (*delta_create)(void *priv, void *parent_obj, void *obj); void (*delta_destroy)(void *priv, void *delta_priv); void * (*root_create)(void *priv, void *obj, unsigned int root_id); #define OBJAGG_OBJ_ROOT_ID_INVALID UINT_MAX void (*root_destroy)(void *priv, void *root_priv); }; struct objagg; struct objagg_obj; struct objagg_hints; const void *objagg_obj_root_priv(const struct objagg_obj *objagg_obj); const void *objagg_obj_delta_priv(const struct objagg_obj *objagg_obj); const void *objagg_obj_raw(const struct objagg_obj *objagg_obj); struct objagg_obj *objagg_obj_get(struct objagg *objagg, void *obj); void objagg_obj_put(struct objagg *objagg, struct objagg_obj *objagg_obj); struct objagg *objagg_create(const struct objagg_ops *ops, struct objagg_hints *hints, void *priv); void objagg_destroy(struct objagg *objagg); struct objagg_obj_stats { unsigned int user_count; unsigned int delta_user_count; /* includes delta object users */ }; struct objagg_obj_stats_info { struct objagg_obj_stats stats; struct objagg_obj *objagg_obj; /* associated object */ bool is_root; }; struct objagg_stats { unsigned int root_count; unsigned int stats_info_count; struct objagg_obj_stats_info stats_info[]; }; const struct objagg_stats *objagg_stats_get(struct objagg *objagg); void objagg_stats_put(const struct objagg_stats *objagg_stats); enum objagg_opt_algo_type { OBJAGG_OPT_ALGO_SIMPLE_GREEDY, }; struct objagg_hints *objagg_hints_get(struct objagg *objagg, enum objagg_opt_algo_type opt_algo_type); void objagg_hints_put(struct objagg_hints *objagg_hints); const struct objagg_stats * objagg_hints_stats_get(struct objagg_hints *objagg_hints); #endif omap-gpmc.h 0000644 00000005352 14722070374 0006610 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * OMAP GPMC (General Purpose Memory Controller) defines */ #include <linux/platform_data/gpmc-omap.h> #define GPMC_CONFIG_WP 0x00000005 /* IRQ numbers in GPMC IRQ domain for legacy boot use */ #define GPMC_IRQ_FIFOEVENTENABLE 0 #define GPMC_IRQ_COUNT_EVENT 1 /** * gpmc_nand_ops - Interface between NAND and GPMC * @nand_write_buffer_empty: get the NAND write buffer empty status. */ struct gpmc_nand_ops { bool (*nand_writebuffer_empty)(void); }; struct gpmc_nand_regs; struct gpmc_onenand_info { bool sync_read; bool sync_write; int burst_len; }; #if IS_ENABLED(CONFIG_OMAP_GPMC) struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs, int cs); /** * gpmc_omap_onenand_set_timings - set optimized sync timings. * @cs: Chip Select Region * @freq: Chip frequency * @latency: Burst latency cycle count * @info: Structure describing parameters used * * Sets optimized timings for the @cs region based on @freq and @latency. * Updates the @info structure based on the GPMC settings. */ int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq, int latency, struct gpmc_onenand_info *info); #else static inline struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs, int cs) { return NULL; } static inline int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq, int latency, struct gpmc_onenand_info *info) { return -EINVAL; } #endif /* CONFIG_OMAP_GPMC */ extern int gpmc_calc_timings(struct gpmc_timings *gpmc_t, struct gpmc_settings *gpmc_s, struct gpmc_device_timings *dev_t); struct device_node; extern int gpmc_get_client_irq(unsigned irq_config); extern unsigned int gpmc_ticks_to_ns(unsigned int ticks); extern void gpmc_cs_write_reg(int cs, int idx, u32 val); extern int gpmc_calc_divider(unsigned int sync_clk); extern int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t, const struct gpmc_settings *s); extern int gpmc_cs_program_settings(int cs, struct gpmc_settings *p); extern int gpmc_cs_request(int cs, unsigned long size, unsigned long *base); extern void gpmc_cs_free(int cs); extern int gpmc_configure(int cmd, int wval); extern void gpmc_read_settings_dt(struct device_node *np, struct gpmc_settings *p); extern void omap3_gpmc_save_context(void); extern void omap3_gpmc_restore_context(void); struct gpmc_timings; struct omap_nand_platform_data; struct omap_onenand_platform_data; #if IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2) extern int gpmc_onenand_init(struct omap_onenand_platform_data *d); #else #define board_onenand_data NULL static inline int gpmc_onenand_init(struct omap_onenand_platform_data *d) { return 0; } #endif dqblk_qtree.h 0000644 00000004276 14722070374 0007231 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Definitions of structures and functions for quota formats using trie */ #ifndef _LINUX_DQBLK_QTREE_H #define _LINUX_DQBLK_QTREE_H #include <linux/types.h> /* Numbers of blocks needed for updates - we count with the smallest * possible block size (1024) */ #define QTREE_INIT_ALLOC 4 #define QTREE_INIT_REWRITE 2 #define QTREE_DEL_ALLOC 0 #define QTREE_DEL_REWRITE 6 struct dquot; struct kqid; /* Operations */ struct qtree_fmt_operations { void (*mem2disk_dqblk)(void *disk, struct dquot *dquot); /* Convert given entry from in memory format to disk one */ void (*disk2mem_dqblk)(struct dquot *dquot, void *disk); /* Convert given entry from disk format to in memory one */ int (*is_id)(void *disk, struct dquot *dquot); /* Is this structure for given id? */ }; /* Inmemory copy of version specific information */ struct qtree_mem_dqinfo { struct super_block *dqi_sb; /* Sb quota is on */ int dqi_type; /* Quota type */ unsigned int dqi_blocks; /* # of blocks in quota file */ unsigned int dqi_free_blk; /* First block in list of free blocks */ unsigned int dqi_free_entry; /* First block with free entry */ unsigned int dqi_blocksize_bits; /* Block size of quota file */ unsigned int dqi_entry_size; /* Size of quota entry in quota file */ unsigned int dqi_usable_bs; /* Space usable in block for quota data */ unsigned int dqi_qtree_depth; /* Precomputed depth of quota tree */ const struct qtree_fmt_operations *dqi_ops; /* Operations for entry manipulation */ }; int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot); int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot); int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot); int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot); int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk); static inline int qtree_depth(struct qtree_mem_dqinfo *info) { unsigned int epb = info->dqi_usable_bs >> 2; unsigned long long entries = epb; int i; for (i = 1; entries < (1ULL << 32); i++) entries *= epb; return i; } int qtree_get_next_id(struct qtree_mem_dqinfo *info, struct kqid *qid); #endif /* _LINUX_DQBLK_QTREE_H */ debugobjects.h 0000644 00000007624 14722070374 0007374 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_DEBUGOBJECTS_H #define _LINUX_DEBUGOBJECTS_H #include <linux/list.h> #include <linux/spinlock.h> enum debug_obj_state { ODEBUG_STATE_NONE, ODEBUG_STATE_INIT, ODEBUG_STATE_INACTIVE, ODEBUG_STATE_ACTIVE, ODEBUG_STATE_DESTROYED, ODEBUG_STATE_NOTAVAILABLE, ODEBUG_STATE_MAX, }; struct debug_obj_descr; /** * struct debug_obj - representaion of an tracked object * @node: hlist node to link the object into the tracker list * @state: tracked object state * @astate: current active state * @object: pointer to the real object * @descr: pointer to an object type specific debug description structure */ struct debug_obj { struct hlist_node node; enum debug_obj_state state; unsigned int astate; void *object; struct debug_obj_descr *descr; }; /** * struct debug_obj_descr - object type specific debug description structure * * @name: name of the object typee * @debug_hint: function returning address, which have associated * kernel symbol, to allow identify the object * @is_static_object: return true if the obj is static, otherwise return false * @fixup_init: fixup function, which is called when the init check * fails. All fixup functions must return true if fixup * was successful, otherwise return false * @fixup_activate: fixup function, which is called when the activate check * fails * @fixup_destroy: fixup function, which is called when the destroy check * fails * @fixup_free: fixup function, which is called when the free check * fails * @fixup_assert_init: fixup function, which is called when the assert_init * check fails */ struct debug_obj_descr { const char *name; void *(*debug_hint)(void *addr); bool (*is_static_object)(void *addr); bool (*fixup_init)(void *addr, enum debug_obj_state state); bool (*fixup_activate)(void *addr, enum debug_obj_state state); bool (*fixup_destroy)(void *addr, enum debug_obj_state state); bool (*fixup_free)(void *addr, enum debug_obj_state state); bool (*fixup_assert_init)(void *addr, enum debug_obj_state state); }; #ifdef CONFIG_DEBUG_OBJECTS extern void debug_object_init (void *addr, struct debug_obj_descr *descr); extern void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr); extern int debug_object_activate (void *addr, struct debug_obj_descr *descr); extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr); extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr); extern void debug_object_free (void *addr, struct debug_obj_descr *descr); extern void debug_object_assert_init(void *addr, struct debug_obj_descr *descr); /* * Active state: * - Set at 0 upon initialization. * - Must return to 0 before deactivation. */ extern void debug_object_active_state(void *addr, struct debug_obj_descr *descr, unsigned int expect, unsigned int next); extern void debug_objects_early_init(void); extern void debug_objects_mem_init(void); #else static inline void debug_object_init (void *addr, struct debug_obj_descr *descr) { } static inline void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) { } static inline int debug_object_activate (void *addr, struct debug_obj_descr *descr) { return 0; } static inline void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) { } static inline void debug_object_destroy (void *addr, struct debug_obj_descr *descr) { } static inline void debug_object_free (void *addr, struct debug_obj_descr *descr) { } static inline void debug_object_assert_init(void *addr, struct debug_obj_descr *descr) { } static inline void debug_objects_early_init(void) { } static inline void debug_objects_mem_init(void) { } #endif #ifdef CONFIG_DEBUG_OBJECTS_FREE extern void debug_check_no_obj_freed(const void *address, unsigned long size); #else static inline void debug_check_no_obj_freed(const void *address, unsigned long size) { } #endif #endif page-flags.h 0000644 00000066233 14722070374 0006743 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Macros for manipulating and testing page->flags */ #ifndef PAGE_FLAGS_H #define PAGE_FLAGS_H #include <linux/types.h> #include <linux/bug.h> #include <linux/mmdebug.h> #ifndef __GENERATING_BOUNDS_H #include <linux/mm_types.h> #include <generated/bounds.h> #endif /* !__GENERATING_BOUNDS_H */ /* * Various page->flags bits: * * PG_reserved is set for special pages. The "struct page" of such a page * should in general not be touched (e.g. set dirty) except by its owner. * Pages marked as PG_reserved include: * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS, * initrd, HW tables) * - Pages reserved or allocated early during boot (before the page allocator * was initialized). This includes (depending on the architecture) the * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much * much more. Once (if ever) freed, PG_reserved is cleared and they will * be given to the page allocator. * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying * to read/write these pages might end badly. Don't touch! * - The zero page(s) * - Pages not added to the page allocator when onlining a section because * they were excluded via the online_page_callback() or because they are * PG_hwpoison. * - Pages allocated in the context of kexec/kdump (loaded kernel image, * control pages, vmcoreinfo) * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are * not marked PG_reserved (as they might be in use by somebody else who does * not respect the caching strategy). * - Pages part of an offline section (struct pages of offline sections should * not be trusted as they will be initialized when first onlined). * - MCA pages on ia64 * - Pages holding CPU notes for POWER Firmware Assisted Dump * - Device memory (e.g. PMEM, DAX, HMM) * Some PG_reserved pages will be excluded from the hibernation image. * PG_reserved does in general not hinder anybody from dumping or swapping * and is no longer required for remap_pfn_range(). ioremap might require it. * Consequently, PG_reserved for a page mapped into user space can indicate * the zero page, the vDSO, MMIO pages or device memory. * * The PG_private bitflag is set on pagecache pages if they contain filesystem * specific data (which is normally at page->private). It can be used by * private allocations for its own usage. * * During initiation of disk I/O, PG_locked is set. This bit is set before I/O * and cleared when writeback _starts_ or when read _completes_. PG_writeback * is set before writeback starts and cleared when it finishes. * * PG_locked also pins a page in pagecache, and blocks truncation of the file * while it is held. * * page_waitqueue(page) is a wait queue of all tasks waiting for the page * to become unlocked. * * PG_uptodate tells whether the page's contents is valid. When a read * completes, the page becomes uptodate, unless a disk I/O error happened. * * PG_referenced, PG_reclaim are used for page reclaim for anonymous and * file-backed pagecache (see mm/vmscan.c). * * PG_error is set to indicate that an I/O error occurred on this page. * * PG_arch_1 is an architecture specific page state bit. The generic code * guarantees that this bit is cleared for a page when it first is entered into * the page cache. * * PG_hwpoison indicates that a page got corrupted in hardware and contains * data with incorrect ECC bits that triggered a machine check. Accessing is * not safe since it may cause another machine check. Don't touch! */ /* * Don't use the *_dontuse flags. Use the macros. Otherwise you'll break * locked- and dirty-page accounting. * * The page flags field is split into two parts, the main flags area * which extends from the low bits upwards, and the fields area which * extends from the high bits downwards. * * | FIELD | ... | FLAGS | * N-1 ^ 0 * (NR_PAGEFLAGS) * * The fields area is reserved for fields mapping zone, node (for NUMA) and * SPARSEMEM section (for variants of SPARSEMEM that require section ids like * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP). */ enum pageflags { PG_locked, /* Page is locked. Don't touch. */ PG_referenced, PG_uptodate, PG_dirty, PG_lru, PG_active, PG_workingset, PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */ PG_error, PG_slab, PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/ PG_arch_1, PG_reserved, PG_private, /* If pagecache, has fs-private data */ PG_private_2, /* If pagecache, has fs aux data */ PG_writeback, /* Page is under writeback */ PG_head, /* A head page */ PG_mappedtodisk, /* Has blocks allocated on-disk */ PG_reclaim, /* To be reclaimed asap */ PG_swapbacked, /* Page is backed by RAM/swap */ PG_unevictable, /* Page is "unevictable" */ #ifdef CONFIG_MMU PG_mlocked, /* Page is vma mlocked */ #endif #ifdef CONFIG_ARCH_USES_PG_UNCACHED PG_uncached, /* Page has been mapped as uncached */ #endif #ifdef CONFIG_MEMORY_FAILURE PG_hwpoison, /* hardware poisoned page. Don't touch */ #endif #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT) PG_young, PG_idle, #endif __NR_PAGEFLAGS, /* Filesystems */ PG_checked = PG_owner_priv_1, /* SwapBacked */ PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */ /* Two page bits are conscripted by FS-Cache to maintain local caching * state. These bits are set on pages belonging to the netfs's inodes * when those inodes are being locally cached. */ PG_fscache = PG_private_2, /* page backed by cache */ /* XEN */ /* Pinned in Xen as a read-only pagetable page. */ PG_pinned = PG_owner_priv_1, /* Pinned as part of domain save (see xen_mm_pin_all()). */ PG_savepinned = PG_dirty, /* Has a grant mapping of another (foreign) domain's page. */ PG_foreign = PG_owner_priv_1, /* Remapped by swiotlb-xen. */ PG_xen_remapped = PG_owner_priv_1, /* SLOB */ PG_slob_free = PG_private, /* Compound pages. Stored in first tail page's flags */ PG_double_map = PG_private_2, /* non-lru isolated movable page */ PG_isolated = PG_reclaim, }; #ifndef __GENERATING_BOUNDS_H struct page; /* forward declaration */ static inline struct page *compound_head(struct page *page) { unsigned long head = READ_ONCE(page->compound_head); if (unlikely(head & 1)) return (struct page *) (head - 1); return page; } static __always_inline int PageTail(struct page *page) { return READ_ONCE(page->compound_head) & 1; } static __always_inline int PageCompound(struct page *page) { return test_bit(PG_head, &page->flags) || PageTail(page); } #define PAGE_POISON_PATTERN -1l static inline int PagePoisoned(const struct page *page) { return page->flags == PAGE_POISON_PATTERN; } #ifdef CONFIG_DEBUG_VM void page_init_poison(struct page *page, size_t size); #else static inline void page_init_poison(struct page *page, size_t size) { } #endif /* * Page flags policies wrt compound pages * * PF_POISONED_CHECK * check if this struct page poisoned/uninitialized * * PF_ANY: * the page flag is relevant for small, head and tail pages. * * PF_HEAD: * for compound page all operations related to the page flag applied to * head page. * * PF_ONLY_HEAD: * for compound page, callers only ever operate on the head page. * * PF_NO_TAIL: * modifications of the page flag must be done on small or head pages, * checks can be done on tail pages too. * * PF_NO_COMPOUND: * the page flag is not relevant for compound pages. */ #define PF_POISONED_CHECK(page) ({ \ VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \ page; }) #define PF_ANY(page, enforce) PF_POISONED_CHECK(page) #define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page)) #define PF_ONLY_HEAD(page, enforce) ({ \ VM_BUG_ON_PGFLAGS(PageTail(page), page); \ PF_POISONED_CHECK(page); }) #define PF_NO_TAIL(page, enforce) ({ \ VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \ PF_POISONED_CHECK(compound_head(page)); }) #define PF_NO_COMPOUND(page, enforce) ({ \ VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \ PF_POISONED_CHECK(page); }) /* * Macros to create function definitions for page flags */ #define TESTPAGEFLAG(uname, lname, policy) \ static __always_inline int Page##uname(struct page *page) \ { return test_bit(PG_##lname, &policy(page, 0)->flags); } #define SETPAGEFLAG(uname, lname, policy) \ static __always_inline void SetPage##uname(struct page *page) \ { set_bit(PG_##lname, &policy(page, 1)->flags); } #define CLEARPAGEFLAG(uname, lname, policy) \ static __always_inline void ClearPage##uname(struct page *page) \ { clear_bit(PG_##lname, &policy(page, 1)->flags); } #define __SETPAGEFLAG(uname, lname, policy) \ static __always_inline void __SetPage##uname(struct page *page) \ { __set_bit(PG_##lname, &policy(page, 1)->flags); } #define __CLEARPAGEFLAG(uname, lname, policy) \ static __always_inline void __ClearPage##uname(struct page *page) \ { __clear_bit(PG_##lname, &policy(page, 1)->flags); } #define TESTSETFLAG(uname, lname, policy) \ static __always_inline int TestSetPage##uname(struct page *page) \ { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); } #define TESTCLEARFLAG(uname, lname, policy) \ static __always_inline int TestClearPage##uname(struct page *page) \ { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); } #define PAGEFLAG(uname, lname, policy) \ TESTPAGEFLAG(uname, lname, policy) \ SETPAGEFLAG(uname, lname, policy) \ CLEARPAGEFLAG(uname, lname, policy) #define __PAGEFLAG(uname, lname, policy) \ TESTPAGEFLAG(uname, lname, policy) \ __SETPAGEFLAG(uname, lname, policy) \ __CLEARPAGEFLAG(uname, lname, policy) #define TESTSCFLAG(uname, lname, policy) \ TESTSETFLAG(uname, lname, policy) \ TESTCLEARFLAG(uname, lname, policy) #define TESTPAGEFLAG_FALSE(uname) \ static inline int Page##uname(const struct page *page) { return 0; } #define SETPAGEFLAG_NOOP(uname) \ static inline void SetPage##uname(struct page *page) { } #define CLEARPAGEFLAG_NOOP(uname) \ static inline void ClearPage##uname(struct page *page) { } #define __CLEARPAGEFLAG_NOOP(uname) \ static inline void __ClearPage##uname(struct page *page) { } #define TESTSETFLAG_FALSE(uname) \ static inline int TestSetPage##uname(struct page *page) { return 0; } #define TESTCLEARFLAG_FALSE(uname) \ static inline int TestClearPage##uname(struct page *page) { return 0; } #define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname) \ SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname) #define TESTSCFLAG_FALSE(uname) \ TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname) __PAGEFLAG(Locked, locked, PF_NO_TAIL) PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL) PAGEFLAG(Referenced, referenced, PF_HEAD) TESTCLEARFLAG(Referenced, referenced, PF_HEAD) __SETPAGEFLAG(Referenced, referenced, PF_HEAD) PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD) __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD) PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD) PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD) TESTCLEARFLAG(Active, active, PF_HEAD) PAGEFLAG(Workingset, workingset, PF_HEAD) TESTCLEARFLAG(Workingset, workingset, PF_HEAD) __PAGEFLAG(Slab, slab, PF_NO_TAIL) __PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL) PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */ /* Xen */ PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND) TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND) PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND); PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND); PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND) TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND) PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) __SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) /* * Private page markings that may be used by the filesystem that owns the page * for its own purposes. * - PG_private and PG_private_2 cause releasepage() and co to be invoked */ PAGEFLAG(Private, private, PF_ANY) __SETPAGEFLAG(Private, private, PF_ANY) __CLEARPAGEFLAG(Private, private, PF_ANY) PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY) PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY) TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY) /* * Only test-and-set exist for PG_writeback. The unconditional operators are * risky: they bypass page accounting. */ TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL) TESTSCFLAG(Writeback, writeback, PF_NO_TAIL) PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL) /* PG_readahead is only used for reads; PG_reclaim is only for writes */ PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL) TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL) PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND) TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND) #ifdef CONFIG_HIGHMEM /* * Must use a macro here due to header dependency issues. page_zone() is not * available at this point. */ #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p)) #else PAGEFLAG_FALSE(HighMem) #endif #ifdef CONFIG_SWAP static __always_inline int PageSwapCache(struct page *page) { #ifdef CONFIG_THP_SWAP page = compound_head(page); #endif return PageSwapBacked(page) && test_bit(PG_swapcache, &page->flags); } SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL) CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL) #else PAGEFLAG_FALSE(SwapCache) #endif PAGEFLAG(Unevictable, unevictable, PF_HEAD) __CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD) TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD) #ifdef CONFIG_MMU PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL) __CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL) TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL) #else PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked) TESTSCFLAG_FALSE(Mlocked) #endif #ifdef CONFIG_ARCH_USES_PG_UNCACHED PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND) #else PAGEFLAG_FALSE(Uncached) #endif #ifdef CONFIG_MEMORY_FAILURE PAGEFLAG(HWPoison, hwpoison, PF_ANY) TESTSCFLAG(HWPoison, hwpoison, PF_ANY) #define __PG_HWPOISON (1UL << PG_hwpoison) extern bool set_hwpoison_free_buddy_page(struct page *page); #else PAGEFLAG_FALSE(HWPoison) static inline bool set_hwpoison_free_buddy_page(struct page *page) { return 0; } #define __PG_HWPOISON 0 #endif #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT) TESTPAGEFLAG(Young, young, PF_ANY) SETPAGEFLAG(Young, young, PF_ANY) TESTCLEARFLAG(Young, young, PF_ANY) PAGEFLAG(Idle, idle, PF_ANY) #endif /* * On an anonymous page mapped into a user virtual memory area, * page->mapping points to its anon_vma, not to a struct address_space; * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h. * * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled, * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON * bit; and then page->mapping points, not to an anon_vma, but to a private * structure which KSM associates with that merged page. See ksm.h. * * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable * page and then page->mapping points a struct address_space. * * Please note that, confusingly, "page_mapping" refers to the inode * address_space which maps the page from disk; whereas "page_mapped" * refers to user virtual address space into which the page is mapped. */ #define PAGE_MAPPING_ANON 0x1 #define PAGE_MAPPING_MOVABLE 0x2 #define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) static __always_inline int PageMappingFlags(struct page *page) { return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0; } static __always_inline int PageAnon(struct page *page) { page = compound_head(page); return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; } static __always_inline int __PageMovable(struct page *page) { return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == PAGE_MAPPING_MOVABLE; } #ifdef CONFIG_KSM /* * A KSM page is one of those write-protected "shared pages" or "merged pages" * which KSM maps into multiple mms, wherever identical anonymous page content * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any * anon_vma, but to that page's node of the stable tree. */ static __always_inline int PageKsm(struct page *page) { page = compound_head(page); return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == PAGE_MAPPING_KSM; } #else TESTPAGEFLAG_FALSE(Ksm) #endif u64 stable_page_flags(struct page *page); static inline int PageUptodate(struct page *page) { int ret; page = compound_head(page); ret = test_bit(PG_uptodate, &(page)->flags); /* * Must ensure that the data we read out of the page is loaded * _after_ we've loaded page->flags to check for PageUptodate. * We can skip the barrier if the page is not uptodate, because * we wouldn't be reading anything from it. * * See SetPageUptodate() for the other side of the story. */ if (ret) smp_rmb(); return ret; } static __always_inline void __SetPageUptodate(struct page *page) { VM_BUG_ON_PAGE(PageTail(page), page); smp_wmb(); __set_bit(PG_uptodate, &page->flags); } static __always_inline void SetPageUptodate(struct page *page) { VM_BUG_ON_PAGE(PageTail(page), page); /* * Memory barrier must be issued before setting the PG_uptodate bit, * so that all previous stores issued in order to bring the page * uptodate are actually visible before PageUptodate becomes true. */ smp_wmb(); set_bit(PG_uptodate, &page->flags); } CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL) int test_clear_page_writeback(struct page *page); int __test_set_page_writeback(struct page *page, bool keep_write); #define test_set_page_writeback(page) \ __test_set_page_writeback(page, false) #define test_set_page_writeback_keepwrite(page) \ __test_set_page_writeback(page, true) static inline void set_page_writeback(struct page *page) { test_set_page_writeback(page); } static inline void set_page_writeback_keepwrite(struct page *page) { test_set_page_writeback_keepwrite(page); } __PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY) static __always_inline void set_compound_head(struct page *page, struct page *head) { WRITE_ONCE(page->compound_head, (unsigned long)head + 1); } static __always_inline void clear_compound_head(struct page *page) { WRITE_ONCE(page->compound_head, 0); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline void ClearPageCompound(struct page *page) { BUG_ON(!PageHead(page)); ClearPageHead(page); } #endif #define PG_head_mask ((1UL << PG_head)) #ifdef CONFIG_HUGETLB_PAGE int PageHuge(struct page *page); int PageHeadHuge(struct page *page); bool page_huge_active(struct page *page); #else TESTPAGEFLAG_FALSE(Huge) TESTPAGEFLAG_FALSE(HeadHuge) static inline bool page_huge_active(struct page *page) { return 0; } #endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE /* * PageHuge() only returns true for hugetlbfs pages, but not for * normal or transparent huge pages. * * PageTransHuge() returns true for both transparent huge and * hugetlbfs pages, but not normal pages. PageTransHuge() can only be * called only in the core VM paths where hugetlbfs pages can't exist. */ static inline int PageTransHuge(struct page *page) { VM_BUG_ON_PAGE(PageTail(page), page); return PageHead(page); } /* * PageTransCompound returns true for both transparent huge pages * and hugetlbfs pages, so it should only be called when it's known * that hugetlbfs pages aren't involved. */ static inline int PageTransCompound(struct page *page) { return PageCompound(page); } /* * PageTransCompoundMap is the same as PageTransCompound, but it also * guarantees the primary MMU has the entire compound page mapped * through pmd_trans_huge, which in turn guarantees the secondary MMUs * can also map the entire compound page. This allows the secondary * MMUs to call get_user_pages() only once for each compound page and * to immediately map the entire compound page with a single secondary * MMU fault. If there will be a pmd split later, the secondary MMUs * will get an update through the MMU notifier invalidation through * split_huge_pmd(). * * Unlike PageTransCompound, this is safe to be called only while * split_huge_pmd() cannot run from under us, like if protected by the * MMU notifier, otherwise it may result in page->_mapcount check false * positives. * * We have to treat page cache THP differently since every subpage of it * would get _mapcount inc'ed once it is PMD mapped. But, it may be PTE * mapped in the current process so comparing subpage's _mapcount to * compound_mapcount to filter out PTE mapped case. */ static inline int PageTransCompoundMap(struct page *page) { struct page *head; if (!PageTransCompound(page)) return 0; if (PageAnon(page)) return atomic_read(&page->_mapcount) < 0; head = compound_head(page); /* File THP is PMD mapped and not PTE mapped */ return atomic_read(&page->_mapcount) == atomic_read(compound_mapcount_ptr(head)); } /* * PageTransTail returns true for both transparent huge pages * and hugetlbfs pages, so it should only be called when it's known * that hugetlbfs pages aren't involved. */ static inline int PageTransTail(struct page *page) { return PageTail(page); } /* * PageDoubleMap indicates that the compound page is mapped with PTEs as well * as PMDs. * * This is required for optimization of rmap operations for THP: we can postpone * per small page mapcount accounting (and its overhead from atomic operations) * until the first PMD split. * * For the page PageDoubleMap means ->_mapcount in all sub-pages is offset up * by one. This reference will go away with last compound_mapcount. * * See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap(). */ static inline int PageDoubleMap(struct page *page) { return PageHead(page) && test_bit(PG_double_map, &page[1].flags); } static inline void SetPageDoubleMap(struct page *page) { VM_BUG_ON_PAGE(!PageHead(page), page); set_bit(PG_double_map, &page[1].flags); } static inline void ClearPageDoubleMap(struct page *page) { VM_BUG_ON_PAGE(!PageHead(page), page); clear_bit(PG_double_map, &page[1].flags); } static inline int TestSetPageDoubleMap(struct page *page) { VM_BUG_ON_PAGE(!PageHead(page), page); return test_and_set_bit(PG_double_map, &page[1].flags); } static inline int TestClearPageDoubleMap(struct page *page) { VM_BUG_ON_PAGE(!PageHead(page), page); return test_and_clear_bit(PG_double_map, &page[1].flags); } #else TESTPAGEFLAG_FALSE(TransHuge) TESTPAGEFLAG_FALSE(TransCompound) TESTPAGEFLAG_FALSE(TransCompoundMap) TESTPAGEFLAG_FALSE(TransTail) PAGEFLAG_FALSE(DoubleMap) TESTSETFLAG_FALSE(DoubleMap) TESTCLEARFLAG_FALSE(DoubleMap) #endif /* * For pages that are never mapped to userspace (and aren't PageSlab), * page_type may be used. Because it is initialised to -1, we invert the * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and * __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and * low bits so that an underflow or overflow of page_mapcount() won't be * mistaken for a page type value. */ #define PAGE_TYPE_BASE 0xf0000000 /* Reserve 0x0000007f to catch underflows of page_mapcount */ #define PAGE_MAPCOUNT_RESERVE -128 #define PG_buddy 0x00000080 #define PG_offline 0x00000100 #define PG_kmemcg 0x00000200 #define PG_table 0x00000400 #define PG_guard 0x00000800 #define PageType(page, flag) \ ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE) static inline int page_has_type(struct page *page) { return (int)page->page_type < PAGE_MAPCOUNT_RESERVE; } #define PAGE_TYPE_OPS(uname, lname) \ static __always_inline int Page##uname(struct page *page) \ { \ return PageType(page, PG_##lname); \ } \ static __always_inline void __SetPage##uname(struct page *page) \ { \ VM_BUG_ON_PAGE(!PageType(page, 0), page); \ page->page_type &= ~PG_##lname; \ } \ static __always_inline void __ClearPage##uname(struct page *page) \ { \ VM_BUG_ON_PAGE(!Page##uname(page), page); \ page->page_type |= PG_##lname; \ } /* * PageBuddy() indicates that the page is free and in the buddy system * (see mm/page_alloc.c). */ PAGE_TYPE_OPS(Buddy, buddy) /* * PageOffline() indicates that the page is logically offline although the * containing section is online. (e.g. inflated in a balloon driver or * not onlined when onlining the section). * The content of these pages is effectively stale. Such pages should not * be touched (read/write/dump/save) except by their owner. */ PAGE_TYPE_OPS(Offline, offline) /* * If kmemcg is enabled, the buddy allocator will set PageKmemcg() on * pages allocated with __GFP_ACCOUNT. It gets cleared on page free. */ PAGE_TYPE_OPS(Kmemcg, kmemcg) /* * Marks pages in use as page tables. */ PAGE_TYPE_OPS(Table, table) /* * Marks guardpages used with debug_pagealloc. */ PAGE_TYPE_OPS(Guard, guard) extern bool is_free_buddy_page(struct page *page); __PAGEFLAG(Isolated, isolated, PF_ANY); /* * If network-based swap is enabled, sl*b must keep track of whether pages * were allocated from pfmemalloc reserves. */ static inline int PageSlabPfmemalloc(struct page *page) { VM_BUG_ON_PAGE(!PageSlab(page), page); return PageActive(page); } static inline void SetPageSlabPfmemalloc(struct page *page) { VM_BUG_ON_PAGE(!PageSlab(page), page); SetPageActive(page); } static inline void __ClearPageSlabPfmemalloc(struct page *page) { VM_BUG_ON_PAGE(!PageSlab(page), page); __ClearPageActive(page); } static inline void ClearPageSlabPfmemalloc(struct page *page) { VM_BUG_ON_PAGE(!PageSlab(page), page); ClearPageActive(page); } #ifdef CONFIG_MMU #define __PG_MLOCKED (1UL << PG_mlocked) #else #define __PG_MLOCKED 0 #endif /* * Flags checked when a page is freed. Pages being freed should not have * these flags set. It they are, there is a problem. */ #define PAGE_FLAGS_CHECK_AT_FREE \ (1UL << PG_lru | 1UL << PG_locked | \ 1UL << PG_private | 1UL << PG_private_2 | \ 1UL << PG_writeback | 1UL << PG_reserved | \ 1UL << PG_slab | 1UL << PG_active | \ 1UL << PG_unevictable | __PG_MLOCKED) /* * Flags checked when a page is prepped for return by the page allocator. * Pages being prepped should not have these flags set. It they are set, * there has been a kernel bug or struct page corruption. * * __PG_HWPOISON is exceptional because it needs to be kept beyond page's * alloc-free cycle to prevent from reusing the page. */ #define PAGE_FLAGS_CHECK_AT_PREP \ (((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON) #define PAGE_FLAGS_PRIVATE \ (1UL << PG_private | 1UL << PG_private_2) /** * page_has_private - Determine if page has private stuff * @page: The page to be checked * * Determine if a page has private stuff, indicating that release routines * should be invoked upon it. */ static inline int page_has_private(struct page *page) { return !!(page->flags & PAGE_FLAGS_PRIVATE); } #undef PF_ANY #undef PF_HEAD #undef PF_ONLY_HEAD #undef PF_NO_TAIL #undef PF_NO_COMPOUND #endif /* !__GENERATING_BOUNDS_H */ #endif /* PAGE_FLAGS_H */ dynamic_debug.h 0000644 00000014071 14722070374 0007520 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _DYNAMIC_DEBUG_H #define _DYNAMIC_DEBUG_H #if defined(CONFIG_JUMP_LABEL) #include <linux/jump_label.h> #endif /* * An instance of this structure is created in a special * ELF section at every dynamic debug callsite. At runtime, * the special section is treated as an array of these. */ struct _ddebug { /* * These fields are used to drive the user interface * for selecting and displaying debug callsites. */ const char *modname; const char *function; const char *filename; const char *format; unsigned int lineno:18; /* * The flags field controls the behaviour at the callsite. * The bits here are changed dynamically when the user * writes commands to <debugfs>/dynamic_debug/control */ #define _DPRINTK_FLAGS_NONE 0 #define _DPRINTK_FLAGS_PRINT (1<<0) /* printk() a message using the format */ #define _DPRINTK_FLAGS_INCL_MODNAME (1<<1) #define _DPRINTK_FLAGS_INCL_FUNCNAME (1<<2) #define _DPRINTK_FLAGS_INCL_LINENO (1<<3) #define _DPRINTK_FLAGS_INCL_TID (1<<4) #if defined DEBUG #define _DPRINTK_FLAGS_DEFAULT _DPRINTK_FLAGS_PRINT #else #define _DPRINTK_FLAGS_DEFAULT 0 #endif unsigned int flags:8; #ifdef CONFIG_JUMP_LABEL union { struct static_key_true dd_key_true; struct static_key_false dd_key_false; } key; #endif } __attribute__((aligned(8))); #if defined(CONFIG_DYNAMIC_DEBUG) int ddebug_add_module(struct _ddebug *tab, unsigned int n, const char *modname); extern int ddebug_remove_module(const char *mod_name); extern __printf(2, 3) void __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...); extern int ddebug_dyndbg_module_param_cb(char *param, char *val, const char *modname); struct device; extern __printf(3, 4) void __dynamic_dev_dbg(struct _ddebug *descriptor, const struct device *dev, const char *fmt, ...); struct net_device; extern __printf(3, 4) void __dynamic_netdev_dbg(struct _ddebug *descriptor, const struct net_device *dev, const char *fmt, ...); struct ib_device; extern __printf(3, 4) void __dynamic_ibdev_dbg(struct _ddebug *descriptor, const struct ib_device *ibdev, const char *fmt, ...); #define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \ static struct _ddebug __aligned(8) \ __attribute__((section("__verbose"))) name = { \ .modname = KBUILD_MODNAME, \ .function = __func__, \ .filename = __FILE__, \ .format = (fmt), \ .lineno = __LINE__, \ .flags = _DPRINTK_FLAGS_DEFAULT, \ _DPRINTK_KEY_INIT \ } #ifdef CONFIG_JUMP_LABEL #ifdef DEBUG #define _DPRINTK_KEY_INIT .key.dd_key_true = (STATIC_KEY_TRUE_INIT) #define DYNAMIC_DEBUG_BRANCH(descriptor) \ static_branch_likely(&descriptor.key.dd_key_true) #else #define _DPRINTK_KEY_INIT .key.dd_key_false = (STATIC_KEY_FALSE_INIT) #define DYNAMIC_DEBUG_BRANCH(descriptor) \ static_branch_unlikely(&descriptor.key.dd_key_false) #endif #else /* !HAVE_JUMP_LABEL */ #define _DPRINTK_KEY_INIT #ifdef DEBUG #define DYNAMIC_DEBUG_BRANCH(descriptor) \ likely(descriptor.flags & _DPRINTK_FLAGS_PRINT) #else #define DYNAMIC_DEBUG_BRANCH(descriptor) \ unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) #endif #endif #define __dynamic_func_call(id, fmt, func, ...) do { \ DEFINE_DYNAMIC_DEBUG_METADATA(id, fmt); \ if (DYNAMIC_DEBUG_BRANCH(id)) \ func(&id, ##__VA_ARGS__); \ } while (0) #define __dynamic_func_call_no_desc(id, fmt, func, ...) do { \ DEFINE_DYNAMIC_DEBUG_METADATA(id, fmt); \ if (DYNAMIC_DEBUG_BRANCH(id)) \ func(__VA_ARGS__); \ } while (0) /* * "Factory macro" for generating a call to func, guarded by a * DYNAMIC_DEBUG_BRANCH. The dynamic debug decriptor will be * initialized using the fmt argument. The function will be called with * the address of the descriptor as first argument, followed by all * the varargs. Note that fmt is repeated in invocations of this * macro. */ #define _dynamic_func_call(fmt, func, ...) \ __dynamic_func_call(__UNIQUE_ID(ddebug), fmt, func, ##__VA_ARGS__) /* * A variant that does the same, except that the descriptor is not * passed as the first argument to the function; it is only called * with precisely the macro's varargs. */ #define _dynamic_func_call_no_desc(fmt, func, ...) \ __dynamic_func_call_no_desc(__UNIQUE_ID(ddebug), fmt, func, ##__VA_ARGS__) #define dynamic_pr_debug(fmt, ...) \ _dynamic_func_call(fmt, __dynamic_pr_debug, \ pr_fmt(fmt), ##__VA_ARGS__) #define dynamic_dev_dbg(dev, fmt, ...) \ _dynamic_func_call(fmt,__dynamic_dev_dbg, \ dev, fmt, ##__VA_ARGS__) #define dynamic_netdev_dbg(dev, fmt, ...) \ _dynamic_func_call(fmt, __dynamic_netdev_dbg, \ dev, fmt, ##__VA_ARGS__) #define dynamic_ibdev_dbg(dev, fmt, ...) \ _dynamic_func_call(fmt, __dynamic_ibdev_dbg, \ dev, fmt, ##__VA_ARGS__) #define dynamic_hex_dump(prefix_str, prefix_type, rowsize, \ groupsize, buf, len, ascii) \ _dynamic_func_call_no_desc(__builtin_constant_p(prefix_str) ? prefix_str : "hexdump", \ print_hex_dump, \ KERN_DEBUG, prefix_str, prefix_type, \ rowsize, groupsize, buf, len, ascii) #else #include <linux/string.h> #include <linux/errno.h> static inline int ddebug_add_module(struct _ddebug *tab, unsigned int n, const char *modname) { return 0; } static inline int ddebug_remove_module(const char *mod) { return 0; } static inline int ddebug_dyndbg_module_param_cb(char *param, char *val, const char *modname) { if (!strcmp(param, "dyndbg")) { /* avoid pr_warn(), which wants pr_fmt() fully defined */ printk(KERN_WARNING "dyndbg param is supported only in " "CONFIG_DYNAMIC_DEBUG builds\n"); return 0; /* allow and ignore */ } return -EINVAL; } #define dynamic_pr_debug(fmt, ...) \ do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0) #define dynamic_dev_dbg(dev, fmt, ...) \ do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0) #define dynamic_hex_dump(prefix_str, prefix_type, rowsize, \ groupsize, buf, len, ascii) \ do { if (0) \ print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, \ rowsize, groupsize, buf, len, ascii); \ } while (0) #endif #endif sctp.h 0000644 00000053763 14722070374 0005712 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* SCTP kernel reference Implementation * (C) Copyright IBM Corp. 2001, 2004 * Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 2001 Intel Corp. * Copyright (c) 2001 Nokia, Inc. * Copyright (c) 2001 La Monte H.P. Yarroll * * This file is part of the SCTP kernel reference Implementation * * Various protocol defined structures. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <linux-sctp@vger.kernel.org> * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp * * Written or modified by: * La Monte H.P. Yarroll <piggy@acm.org> * Karl Knutson <karl@athena.chicago.il.us> * Jon Grimm <jgrimm@us.ibm.com> * Xingang Guo <xingang.guo@intel.com> * randall@sctp.chicago.il.us * kmorneau@cisco.com * qxie1@email.mot.com * Sridhar Samudrala <sri@us.ibm.com> * Kevin Gao <kevin.gao@intel.com> * * Any bugs reported given to us we will try to fix... any fixes shared will * be incorporated into the next SCTP release. */ #ifndef __LINUX_SCTP_H__ #define __LINUX_SCTP_H__ #include <linux/in.h> /* We need in_addr. */ #include <linux/in6.h> /* We need in6_addr. */ #include <linux/skbuff.h> #include <uapi/linux/sctp.h> /* Section 3.1. SCTP Common Header Format */ struct sctphdr { __be16 source; __be16 dest; __be32 vtag; __le32 checksum; }; static inline struct sctphdr *sctp_hdr(const struct sk_buff *skb) { return (struct sctphdr *)skb_transport_header(skb); } /* Section 3.2. Chunk Field Descriptions. */ struct sctp_chunkhdr { __u8 type; __u8 flags; __be16 length; }; /* Section 3.2. Chunk Type Values. * [Chunk Type] identifies the type of information contained in the Chunk * Value field. It takes a value from 0 to 254. The value of 255 is * reserved for future use as an extension field. */ enum sctp_cid { SCTP_CID_DATA = 0, SCTP_CID_INIT = 1, SCTP_CID_INIT_ACK = 2, SCTP_CID_SACK = 3, SCTP_CID_HEARTBEAT = 4, SCTP_CID_HEARTBEAT_ACK = 5, SCTP_CID_ABORT = 6, SCTP_CID_SHUTDOWN = 7, SCTP_CID_SHUTDOWN_ACK = 8, SCTP_CID_ERROR = 9, SCTP_CID_COOKIE_ECHO = 10, SCTP_CID_COOKIE_ACK = 11, SCTP_CID_ECN_ECNE = 12, SCTP_CID_ECN_CWR = 13, SCTP_CID_SHUTDOWN_COMPLETE = 14, /* AUTH Extension Section 4.1 */ SCTP_CID_AUTH = 0x0F, /* sctp ndata 5.1. I-DATA */ SCTP_CID_I_DATA = 0x40, /* PR-SCTP Sec 3.2 */ SCTP_CID_FWD_TSN = 0xC0, /* Use hex, as defined in ADDIP sec. 3.1 */ SCTP_CID_ASCONF = 0xC1, SCTP_CID_I_FWD_TSN = 0xC2, SCTP_CID_ASCONF_ACK = 0x80, SCTP_CID_RECONF = 0x82, }; /* enum */ /* Section 3.2 * Chunk Types are encoded such that the highest-order two bits specify * the action that must be taken if the processing endpoint does not * recognize the Chunk Type. */ enum { SCTP_CID_ACTION_DISCARD = 0x00, SCTP_CID_ACTION_DISCARD_ERR = 0x40, SCTP_CID_ACTION_SKIP = 0x80, SCTP_CID_ACTION_SKIP_ERR = 0xc0, }; enum { SCTP_CID_ACTION_MASK = 0xc0, }; /* This flag is used in Chunk Flags for ABORT and SHUTDOWN COMPLETE. * * 3.3.7 Abort Association (ABORT) (6): * The T bit is set to 0 if the sender had a TCB that it destroyed. * If the sender did not have a TCB it should set this bit to 1. */ enum { SCTP_CHUNK_FLAG_T = 0x01 }; /* * Set the T bit * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type = 14 |Reserved |T| Length = 4 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Chunk Flags: 8 bits * * Reserved: 7 bits * Set to 0 on transmit and ignored on receipt. * * T bit: 1 bit * The T bit is set to 0 if the sender had a TCB that it destroyed. If * the sender did NOT have a TCB it should set this bit to 1. * * Note: Special rules apply to this chunk for verification, please * see Section 8.5.1 for details. */ #define sctp_test_T_bit(c) ((c)->chunk_hdr->flags & SCTP_CHUNK_FLAG_T) /* RFC 2960 * Section 3.2.1 Optional/Variable-length Parmaeter Format. */ struct sctp_paramhdr { __be16 type; __be16 length; }; enum sctp_param { /* RFC 2960 Section 3.3.5 */ SCTP_PARAM_HEARTBEAT_INFO = cpu_to_be16(1), /* RFC 2960 Section 3.3.2.1 */ SCTP_PARAM_IPV4_ADDRESS = cpu_to_be16(5), SCTP_PARAM_IPV6_ADDRESS = cpu_to_be16(6), SCTP_PARAM_STATE_COOKIE = cpu_to_be16(7), SCTP_PARAM_UNRECOGNIZED_PARAMETERS = cpu_to_be16(8), SCTP_PARAM_COOKIE_PRESERVATIVE = cpu_to_be16(9), SCTP_PARAM_HOST_NAME_ADDRESS = cpu_to_be16(11), SCTP_PARAM_SUPPORTED_ADDRESS_TYPES = cpu_to_be16(12), SCTP_PARAM_ECN_CAPABLE = cpu_to_be16(0x8000), /* AUTH Extension Section 3 */ SCTP_PARAM_RANDOM = cpu_to_be16(0x8002), SCTP_PARAM_CHUNKS = cpu_to_be16(0x8003), SCTP_PARAM_HMAC_ALGO = cpu_to_be16(0x8004), /* Add-IP: Supported Extensions, Section 4.2 */ SCTP_PARAM_SUPPORTED_EXT = cpu_to_be16(0x8008), /* PR-SCTP Sec 3.1 */ SCTP_PARAM_FWD_TSN_SUPPORT = cpu_to_be16(0xc000), /* Add-IP Extension. Section 3.2 */ SCTP_PARAM_ADD_IP = cpu_to_be16(0xc001), SCTP_PARAM_DEL_IP = cpu_to_be16(0xc002), SCTP_PARAM_ERR_CAUSE = cpu_to_be16(0xc003), SCTP_PARAM_SET_PRIMARY = cpu_to_be16(0xc004), SCTP_PARAM_SUCCESS_REPORT = cpu_to_be16(0xc005), SCTP_PARAM_ADAPTATION_LAYER_IND = cpu_to_be16(0xc006), /* RE-CONFIG. Section 4 */ SCTP_PARAM_RESET_OUT_REQUEST = cpu_to_be16(0x000d), SCTP_PARAM_RESET_IN_REQUEST = cpu_to_be16(0x000e), SCTP_PARAM_RESET_TSN_REQUEST = cpu_to_be16(0x000f), SCTP_PARAM_RESET_RESPONSE = cpu_to_be16(0x0010), SCTP_PARAM_RESET_ADD_OUT_STREAMS = cpu_to_be16(0x0011), SCTP_PARAM_RESET_ADD_IN_STREAMS = cpu_to_be16(0x0012), }; /* enum */ /* RFC 2960 Section 3.2.1 * The Parameter Types are encoded such that the highest-order two bits * specify the action that must be taken if the processing endpoint does * not recognize the Parameter Type. * */ enum { SCTP_PARAM_ACTION_DISCARD = cpu_to_be16(0x0000), SCTP_PARAM_ACTION_DISCARD_ERR = cpu_to_be16(0x4000), SCTP_PARAM_ACTION_SKIP = cpu_to_be16(0x8000), SCTP_PARAM_ACTION_SKIP_ERR = cpu_to_be16(0xc000), }; enum { SCTP_PARAM_ACTION_MASK = cpu_to_be16(0xc000), }; /* RFC 2960 Section 3.3.1 Payload Data (DATA) (0) */ struct sctp_datahdr { __be32 tsn; __be16 stream; __be16 ssn; __u32 ppid; __u8 payload[0]; }; struct sctp_data_chunk { struct sctp_chunkhdr chunk_hdr; struct sctp_datahdr data_hdr; }; struct sctp_idatahdr { __be32 tsn; __be16 stream; __be16 reserved; __be32 mid; union { __u32 ppid; __be32 fsn; }; __u8 payload[0]; }; struct sctp_idata_chunk { struct sctp_chunkhdr chunk_hdr; struct sctp_idatahdr data_hdr; }; /* DATA Chuck Specific Flags */ enum { SCTP_DATA_MIDDLE_FRAG = 0x00, SCTP_DATA_LAST_FRAG = 0x01, SCTP_DATA_FIRST_FRAG = 0x02, SCTP_DATA_NOT_FRAG = 0x03, SCTP_DATA_UNORDERED = 0x04, SCTP_DATA_SACK_IMM = 0x08, }; enum { SCTP_DATA_FRAG_MASK = 0x03, }; /* RFC 2960 Section 3.3.2 Initiation (INIT) (1) * * This chunk is used to initiate a SCTP association between two * endpoints. */ struct sctp_inithdr { __be32 init_tag; __be32 a_rwnd; __be16 num_outbound_streams; __be16 num_inbound_streams; __be32 initial_tsn; __u8 params[0]; }; struct sctp_init_chunk { struct sctp_chunkhdr chunk_hdr; struct sctp_inithdr init_hdr; }; /* Section 3.3.2.1. IPv4 Address Parameter (5) */ struct sctp_ipv4addr_param { struct sctp_paramhdr param_hdr; struct in_addr addr; }; /* Section 3.3.2.1. IPv6 Address Parameter (6) */ struct sctp_ipv6addr_param { struct sctp_paramhdr param_hdr; struct in6_addr addr; }; /* Section 3.3.2.1 Cookie Preservative (9) */ struct sctp_cookie_preserve_param { struct sctp_paramhdr param_hdr; __be32 lifespan_increment; }; /* Section 3.3.2.1 Host Name Address (11) */ struct sctp_hostname_param { struct sctp_paramhdr param_hdr; uint8_t hostname[0]; }; /* Section 3.3.2.1 Supported Address Types (12) */ struct sctp_supported_addrs_param { struct sctp_paramhdr param_hdr; __be16 types[0]; }; /* ADDIP Section 3.2.6 Adaptation Layer Indication */ struct sctp_adaptation_ind_param { struct sctp_paramhdr param_hdr; __be32 adaptation_ind; }; /* ADDIP Section 4.2.7 Supported Extensions Parameter */ struct sctp_supported_ext_param { struct sctp_paramhdr param_hdr; __u8 chunks[0]; }; /* AUTH Section 3.1 Random */ struct sctp_random_param { struct sctp_paramhdr param_hdr; __u8 random_val[0]; }; /* AUTH Section 3.2 Chunk List */ struct sctp_chunks_param { struct sctp_paramhdr param_hdr; __u8 chunks[0]; }; /* AUTH Section 3.3 HMAC Algorithm */ struct sctp_hmac_algo_param { struct sctp_paramhdr param_hdr; __be16 hmac_ids[0]; }; /* RFC 2960. Section 3.3.3 Initiation Acknowledgement (INIT ACK) (2): * The INIT ACK chunk is used to acknowledge the initiation of an SCTP * association. */ struct sctp_initack_chunk { struct sctp_chunkhdr chunk_hdr; struct sctp_inithdr init_hdr; }; /* Section 3.3.3.1 State Cookie (7) */ struct sctp_cookie_param { struct sctp_paramhdr p; __u8 body[0]; }; /* Section 3.3.3.1 Unrecognized Parameters (8) */ struct sctp_unrecognized_param { struct sctp_paramhdr param_hdr; struct sctp_paramhdr unrecognized; }; /* * 3.3.4 Selective Acknowledgement (SACK) (3): * * This chunk is sent to the peer endpoint to acknowledge received DATA * chunks and to inform the peer endpoint of gaps in the received * subsequences of DATA chunks as represented by their TSNs. */ struct sctp_gap_ack_block { __be16 start; __be16 end; }; union sctp_sack_variable { struct sctp_gap_ack_block gab; __be32 dup; }; struct sctp_sackhdr { __be32 cum_tsn_ack; __be32 a_rwnd; __be16 num_gap_ack_blocks; __be16 num_dup_tsns; union sctp_sack_variable variable[0]; }; struct sctp_sack_chunk { struct sctp_chunkhdr chunk_hdr; struct sctp_sackhdr sack_hdr; }; /* RFC 2960. Section 3.3.5 Heartbeat Request (HEARTBEAT) (4): * * An endpoint should send this chunk to its peer endpoint to probe the * reachability of a particular destination transport address defined in * the present association. */ struct sctp_heartbeathdr { struct sctp_paramhdr info; }; struct sctp_heartbeat_chunk { struct sctp_chunkhdr chunk_hdr; struct sctp_heartbeathdr hb_hdr; }; /* For the abort and shutdown ACK we must carry the init tag in the * common header. Just the common header is all that is needed with a * chunk descriptor. */ struct sctp_abort_chunk { struct sctp_chunkhdr uh; }; /* For the graceful shutdown we must carry the tag (in common header) * and the highest consecutive acking value. */ struct sctp_shutdownhdr { __be32 cum_tsn_ack; }; struct sctp_shutdown_chunk { struct sctp_chunkhdr chunk_hdr; struct sctp_shutdownhdr shutdown_hdr; }; /* RFC 2960. Section 3.3.10 Operation Error (ERROR) (9) */ struct sctp_errhdr { __be16 cause; __be16 length; __u8 variable[0]; }; struct sctp_operr_chunk { struct sctp_chunkhdr chunk_hdr; struct sctp_errhdr err_hdr; }; /* RFC 2960 3.3.10 - Operation Error * * Cause Code: 16 bits (unsigned integer) * * Defines the type of error conditions being reported. * Cause Code * Value Cause Code * --------- ---------------- * 1 Invalid Stream Identifier * 2 Missing Mandatory Parameter * 3 Stale Cookie Error * 4 Out of Resource * 5 Unresolvable Address * 6 Unrecognized Chunk Type * 7 Invalid Mandatory Parameter * 8 Unrecognized Parameters * 9 No User Data * 10 Cookie Received While Shutting Down */ enum sctp_error { SCTP_ERROR_NO_ERROR = cpu_to_be16(0x00), SCTP_ERROR_INV_STRM = cpu_to_be16(0x01), SCTP_ERROR_MISS_PARAM = cpu_to_be16(0x02), SCTP_ERROR_STALE_COOKIE = cpu_to_be16(0x03), SCTP_ERROR_NO_RESOURCE = cpu_to_be16(0x04), SCTP_ERROR_DNS_FAILED = cpu_to_be16(0x05), SCTP_ERROR_UNKNOWN_CHUNK = cpu_to_be16(0x06), SCTP_ERROR_INV_PARAM = cpu_to_be16(0x07), SCTP_ERROR_UNKNOWN_PARAM = cpu_to_be16(0x08), SCTP_ERROR_NO_DATA = cpu_to_be16(0x09), SCTP_ERROR_COOKIE_IN_SHUTDOWN = cpu_to_be16(0x0a), /* SCTP Implementation Guide: * 11 Restart of an association with new addresses * 12 User Initiated Abort * 13 Protocol Violation */ SCTP_ERROR_RESTART = cpu_to_be16(0x0b), SCTP_ERROR_USER_ABORT = cpu_to_be16(0x0c), SCTP_ERROR_PROTO_VIOLATION = cpu_to_be16(0x0d), /* ADDIP Section 3.3 New Error Causes * * Four new Error Causes are added to the SCTP Operational Errors, * primarily for use in the ASCONF-ACK chunk. * * Value Cause Code * --------- ---------------- * 0x00A0 Request to Delete Last Remaining IP Address. * 0x00A1 Operation Refused Due to Resource Shortage. * 0x00A2 Request to Delete Source IP Address. * 0x00A3 Association Aborted due to illegal ASCONF-ACK * 0x00A4 Request refused - no authorization. */ SCTP_ERROR_DEL_LAST_IP = cpu_to_be16(0x00A0), SCTP_ERROR_RSRC_LOW = cpu_to_be16(0x00A1), SCTP_ERROR_DEL_SRC_IP = cpu_to_be16(0x00A2), SCTP_ERROR_ASCONF_ACK = cpu_to_be16(0x00A3), SCTP_ERROR_REQ_REFUSED = cpu_to_be16(0x00A4), /* AUTH Section 4. New Error Cause * * This section defines a new error cause that will be sent if an AUTH * chunk is received with an unsupported HMAC identifier. * illustrates the new error cause. * * Cause Code Error Cause Name * -------------------------------------------------------------- * 0x0105 Unsupported HMAC Identifier */ SCTP_ERROR_UNSUP_HMAC = cpu_to_be16(0x0105) }; /* RFC 2960. Appendix A. Explicit Congestion Notification. * Explicit Congestion Notification Echo (ECNE) (12) */ struct sctp_ecnehdr { __be32 lowest_tsn; }; struct sctp_ecne_chunk { struct sctp_chunkhdr chunk_hdr; struct sctp_ecnehdr ence_hdr; }; /* RFC 2960. Appendix A. Explicit Congestion Notification. * Congestion Window Reduced (CWR) (13) */ struct sctp_cwrhdr { __be32 lowest_tsn; }; /* PR-SCTP * 3.2 Forward Cumulative TSN Chunk Definition (FORWARD TSN) * * Forward Cumulative TSN chunk has the following format: * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type = 192 | Flags = 0x00 | Length = Variable | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | New Cumulative TSN | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Stream-1 | Stream Sequence-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * \ / * / \ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Stream-N | Stream Sequence-N | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Chunk Flags: * * Set to all zeros on transmit and ignored on receipt. * * New Cumulative TSN: 32 bit u_int * * This indicates the new cumulative TSN to the data receiver. Upon * the reception of this value, the data receiver MUST consider * any missing TSNs earlier than or equal to this value as received * and stop reporting them as gaps in any subsequent SACKs. * * Stream-N: 16 bit u_int * * This field holds a stream number that was skipped by this * FWD-TSN. * * Stream Sequence-N: 16 bit u_int * This field holds the sequence number associated with the stream * that was skipped. The stream sequence field holds the largest stream * sequence number in this stream being skipped. The receiver of * the FWD-TSN's can use the Stream-N and Stream Sequence-N fields * to enable delivery of any stranded TSN's that remain on the stream * re-ordering queues. This field MUST NOT report TSN's corresponding * to DATA chunk that are marked as unordered. For ordered DATA * chunks this field MUST be filled in. */ struct sctp_fwdtsn_skip { __be16 stream; __be16 ssn; }; struct sctp_fwdtsn_hdr { __be32 new_cum_tsn; struct sctp_fwdtsn_skip skip[0]; }; struct sctp_fwdtsn_chunk { struct sctp_chunkhdr chunk_hdr; struct sctp_fwdtsn_hdr fwdtsn_hdr; }; struct sctp_ifwdtsn_skip { __be16 stream; __u8 reserved; __u8 flags; __be32 mid; }; struct sctp_ifwdtsn_hdr { __be32 new_cum_tsn; struct sctp_ifwdtsn_skip skip[0]; }; struct sctp_ifwdtsn_chunk { struct sctp_chunkhdr chunk_hdr; struct sctp_ifwdtsn_hdr fwdtsn_hdr; }; /* ADDIP * Section 3.1.1 Address Configuration Change Chunk (ASCONF) * * Serial Number: 32 bits (unsigned integer) * This value represents a Serial Number for the ASCONF Chunk. The * valid range of Serial Number is from 0 to 2^32-1. * Serial Numbers wrap back to 0 after reaching 2^32 -1. * * Address Parameter: 8 or 20 bytes (depending on type) * The address is an address of the sender of the ASCONF chunk, * the address MUST be considered part of the association by the * peer endpoint. This field may be used by the receiver of the * ASCONF to help in finding the association. This parameter MUST * be present in every ASCONF message i.e. it is a mandatory TLV * parameter. * * ASCONF Parameter: TLV format * Each Address configuration change is represented by a TLV * parameter as defined in Section 3.2. One or more requests may * be present in an ASCONF Chunk. * * Section 3.1.2 Address Configuration Acknowledgement Chunk (ASCONF-ACK) * * Serial Number: 32 bits (unsigned integer) * This value represents the Serial Number for the received ASCONF * Chunk that is acknowledged by this chunk. This value is copied * from the received ASCONF Chunk. * * ASCONF Parameter Response: TLV format * The ASCONF Parameter Response is used in the ASCONF-ACK to * report status of ASCONF processing. */ struct sctp_addip_param { struct sctp_paramhdr param_hdr; __be32 crr_id; }; struct sctp_addiphdr { __be32 serial; __u8 params[0]; }; struct sctp_addip_chunk { struct sctp_chunkhdr chunk_hdr; struct sctp_addiphdr addip_hdr; }; /* AUTH * Section 4.1 Authentication Chunk (AUTH) * * This chunk is used to hold the result of the HMAC calculation. * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type = 0x0F | Flags=0 | Length | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Shared Key Identifier | HMAC Identifier | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | | * \ HMAC / * / \ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Type: 1 byte (unsigned integer) * This value MUST be set to 0x0F for all AUTH-chunks. * * Flags: 1 byte (unsigned integer) * Set to zero on transmit and ignored on receipt. * * Length: 2 bytes (unsigned integer) * This value holds the length of the HMAC in bytes plus 8. * * Shared Key Identifier: 2 bytes (unsigned integer) * This value describes which endpoint pair shared key is used. * * HMAC Identifier: 2 bytes (unsigned integer) * This value describes which message digest is being used. Table 2 * shows the currently defined values. * * The following Table 2 shows the currently defined values for HMAC * identifiers. * * +-----------------+--------------------------+ * | HMAC Identifier | Message Digest Algorithm | * +-----------------+--------------------------+ * | 0 | Reserved | * | 1 | SHA-1 defined in [8] | * | 2 | Reserved | * | 3 | SHA-256 defined in [8] | * +-----------------+--------------------------+ * * * HMAC: n bytes (unsigned integer) This hold the result of the HMAC * calculation. */ struct sctp_authhdr { __be16 shkey_id; __be16 hmac_id; __u8 hmac[0]; }; struct sctp_auth_chunk { struct sctp_chunkhdr chunk_hdr; struct sctp_authhdr auth_hdr; }; struct sctp_infox { struct sctp_info *sctpinfo; struct sctp_association *asoc; }; struct sctp_reconf_chunk { struct sctp_chunkhdr chunk_hdr; __u8 params[0]; }; struct sctp_strreset_outreq { struct sctp_paramhdr param_hdr; __be32 request_seq; __be32 response_seq; __be32 send_reset_at_tsn; __be16 list_of_streams[0]; }; struct sctp_strreset_inreq { struct sctp_paramhdr param_hdr; __be32 request_seq; __be16 list_of_streams[0]; }; struct sctp_strreset_tsnreq { struct sctp_paramhdr param_hdr; __be32 request_seq; }; struct sctp_strreset_addstrm { struct sctp_paramhdr param_hdr; __be32 request_seq; __be16 number_of_streams; __be16 reserved; }; enum { SCTP_STRRESET_NOTHING_TO_DO = 0x00, SCTP_STRRESET_PERFORMED = 0x01, SCTP_STRRESET_DENIED = 0x02, SCTP_STRRESET_ERR_WRONG_SSN = 0x03, SCTP_STRRESET_ERR_IN_PROGRESS = 0x04, SCTP_STRRESET_ERR_BAD_SEQNO = 0x05, SCTP_STRRESET_IN_PROGRESS = 0x06, }; struct sctp_strreset_resp { struct sctp_paramhdr param_hdr; __be32 response_seq; __be32 result; }; struct sctp_strreset_resptsn { struct sctp_paramhdr param_hdr; __be32 response_seq; __be32 result; __be32 senders_next_tsn; __be32 receivers_next_tsn; }; enum { SCTP_DSCP_SET_MASK = 0x1, SCTP_DSCP_VAL_MASK = 0xfc, SCTP_FLOWLABEL_SET_MASK = 0x100000, SCTP_FLOWLABEL_VAL_MASK = 0xfffff }; #endif /* __LINUX_SCTP_H__ */ i8042.h 0000644 00000003771 14722070374 0005501 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ #ifndef _LINUX_I8042_H #define _LINUX_I8042_H #include <linux/types.h> /* * Standard commands. */ #define I8042_CMD_CTL_RCTR 0x0120 #define I8042_CMD_CTL_WCTR 0x1060 #define I8042_CMD_CTL_TEST 0x01aa #define I8042_CMD_KBD_DISABLE 0x00ad #define I8042_CMD_KBD_ENABLE 0x00ae #define I8042_CMD_KBD_TEST 0x01ab #define I8042_CMD_KBD_LOOP 0x11d2 #define I8042_CMD_AUX_DISABLE 0x00a7 #define I8042_CMD_AUX_ENABLE 0x00a8 #define I8042_CMD_AUX_TEST 0x01a9 #define I8042_CMD_AUX_SEND 0x10d4 #define I8042_CMD_AUX_LOOP 0x11d3 #define I8042_CMD_MUX_PFX 0x0090 #define I8042_CMD_MUX_SEND 0x1090 /* * Status register bits. */ #define I8042_STR_PARITY 0x80 #define I8042_STR_TIMEOUT 0x40 #define I8042_STR_AUXDATA 0x20 #define I8042_STR_KEYLOCK 0x10 #define I8042_STR_CMDDAT 0x08 #define I8042_STR_MUXERR 0x04 #define I8042_STR_IBF 0x02 #define I8042_STR_OBF 0x01 /* * Control register bits. */ #define I8042_CTR_KBDINT 0x01 #define I8042_CTR_AUXINT 0x02 #define I8042_CTR_IGNKEYLOCK 0x08 #define I8042_CTR_KBDDIS 0x10 #define I8042_CTR_AUXDIS 0x20 #define I8042_CTR_XLATE 0x40 struct serio; #if defined(CONFIG_SERIO_I8042) || defined(CONFIG_SERIO_I8042_MODULE) void i8042_lock_chip(void); void i8042_unlock_chip(void); int i8042_command(unsigned char *param, int command); int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str, struct serio *serio)); int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str, struct serio *serio)); #else static inline void i8042_lock_chip(void) { } static inline void i8042_unlock_chip(void) { } static inline int i8042_command(unsigned char *param, int command) { return -ENODEV; } static inline int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str, struct serio *serio)) { return -ENODEV; } static inline int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str, struct serio *serio)) { return -ENODEV; } #endif #endif llc.h 0000644 00000001355 14722070374 0005501 0 ustar 00 /* * IEEE 802.2 User Interface SAPs for Linux, data structures and indicators. * * Copyright (c) 2001 by Jay Schulist <jschlst@samba.org> * * This program can be redistributed or modified under the terms of the * GNU General Public License as published by the Free Software Foundation. * This program is distributed without any warranty or implied warranty * of merchantability or fitness for a particular purpose. * * See the GNU General Public License for more details. */ #ifndef __LINUX_LLC_H #define __LINUX_LLC_H #include <uapi/linux/llc.h> #define LLC_SAP_DYN_START 0xC0 #define LLC_SAP_DYN_STOP 0xDE #define LLC_SAP_DYN_TRIES 4 #define llc_ui_skb_cb(__skb) ((struct sockaddr_llc *)&((__skb)->cb[0])) #endif /* __LINUX_LLC_H */ cpu_rmap.h 0000644 00000003272 14722070374 0006535 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ #ifndef __LINUX_CPU_RMAP_H #define __LINUX_CPU_RMAP_H /* * cpu_rmap.c: CPU affinity reverse-map support * Copyright 2011 Solarflare Communications Inc. */ #include <linux/cpumask.h> #include <linux/gfp.h> #include <linux/slab.h> #include <linux/kref.h> /** * struct cpu_rmap - CPU affinity reverse-map * @refcount: kref for object * @size: Number of objects to be reverse-mapped * @used: Number of objects added * @obj: Pointer to array of object pointers * @near: For each CPU, the index and distance to the nearest object, * based on affinity masks */ struct cpu_rmap { struct kref refcount; u16 size, used; void **obj; struct { u16 index; u16 dist; } near[0]; }; #define CPU_RMAP_DIST_INF 0xffff extern struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags); extern int cpu_rmap_put(struct cpu_rmap *rmap); extern int cpu_rmap_add(struct cpu_rmap *rmap, void *obj); extern int cpu_rmap_update(struct cpu_rmap *rmap, u16 index, const struct cpumask *affinity); static inline u16 cpu_rmap_lookup_index(struct cpu_rmap *rmap, unsigned int cpu) { return rmap->near[cpu].index; } static inline void *cpu_rmap_lookup_obj(struct cpu_rmap *rmap, unsigned int cpu) { return rmap->obj[rmap->near[cpu].index]; } /** * alloc_irq_cpu_rmap - allocate CPU affinity reverse-map for IRQs * @size: Number of objects to be mapped * * Must be called in process context. */ static inline struct cpu_rmap *alloc_irq_cpu_rmap(unsigned int size) { return alloc_cpu_rmap(size, GFP_KERNEL); } extern void free_irq_cpu_rmap(struct cpu_rmap *rmap); extern int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq); #endif /* __LINUX_CPU_RMAP_H */ arm_sdei.h 0000644 00000005135 14722070374 0006512 0 ustar 00 // SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2017 Arm Ltd. #ifndef __LINUX_ARM_SDEI_H #define __LINUX_ARM_SDEI_H #include <uapi/linux/arm_sdei.h> enum sdei_conduit_types { CONDUIT_INVALID = 0, CONDUIT_SMC, CONDUIT_HVC, }; #include <acpi/ghes.h> #ifdef CONFIG_ARM_SDE_INTERFACE #include <asm/sdei.h> #endif /* Arch code should override this to set the entry point from firmware... */ #ifndef sdei_arch_get_entry_point #define sdei_arch_get_entry_point(conduit) (0) #endif /* * When an event occurs sdei_event_handler() will call a user-provided callback * like this in NMI context on the CPU that received the event. */ typedef int (sdei_event_callback)(u32 event, struct pt_regs *regs, void *arg); /* * Register your callback to claim an event. The event must be described * by firmware. */ int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg); /* * Calls to sdei_event_unregister() may return EINPROGRESS. Keep calling * it until it succeeds. */ int sdei_event_unregister(u32 event_num); int sdei_event_enable(u32 event_num); int sdei_event_disable(u32 event_num); /* GHES register/unregister helpers */ int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb, sdei_event_callback *critical_cb); int sdei_unregister_ghes(struct ghes *ghes); #ifdef CONFIG_ARM_SDE_INTERFACE /* For use by arch code when CPU hotplug notifiers are not appropriate. */ int sdei_mask_local_cpu(void); int sdei_unmask_local_cpu(void); #else static inline int sdei_mask_local_cpu(void) { return 0; } static inline int sdei_unmask_local_cpu(void) { return 0; } #endif /* CONFIG_ARM_SDE_INTERFACE */ /* * This struct represents an event that has been registered. The driver * maintains a list of all events, and which ones are registered. (Private * events have one entry in the list, but are registered on each CPU). * A pointer to this struct is passed to firmware, and back to the event * handler. The event handler can then use this to invoke the registered * callback, without having to walk the list. * * For CPU private events, this structure is per-cpu. */ struct sdei_registered_event { /* For use by arch code: */ struct pt_regs interrupted_regs; sdei_event_callback *callback; void *callback_arg; u32 event_num; u8 priority; }; /* The arch code entry point should then call this when an event arrives. */ int notrace sdei_event_handler(struct pt_regs *regs, struct sdei_registered_event *arg); /* arch code may use this to retrieve the extra registers. */ int sdei_api_event_context(u32 query, u64 *result); #endif /* __LINUX_ARM_SDEI_H */ joystick.h 0000644 00000000665 14722070374 0006571 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 1996-2000 Vojtech Pavlik * * Sponsored by SuSE */ /* */ #ifndef _LINUX_JOYSTICK_H #define _LINUX_JOYSTICK_H #include <uapi/linux/joystick.h> #if BITS_PER_LONG == 64 #define JS_DATA_SAVE_TYPE JS_DATA_SAVE_TYPE_64 #elif BITS_PER_LONG == 32 #define JS_DATA_SAVE_TYPE JS_DATA_SAVE_TYPE_32 #else #error Unexpected BITS_PER_LONG #endif #endif /* _LINUX_JOYSTICK_H */ pci-epc.h 0000644 00000015672 14722070374 0006256 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /** * PCI Endpoint *Controller* (EPC) header file * * Copyright (C) 2017 Texas Instruments * Author: Kishon Vijay Abraham I <kishon@ti.com> */ #ifndef __LINUX_PCI_EPC_H #define __LINUX_PCI_EPC_H #include <linux/pci-epf.h> struct pci_epc; enum pci_epc_irq_type { PCI_EPC_IRQ_UNKNOWN, PCI_EPC_IRQ_LEGACY, PCI_EPC_IRQ_MSI, PCI_EPC_IRQ_MSIX, }; /** * struct pci_epc_ops - set of function pointers for performing EPC operations * @write_header: ops to populate configuration space header * @set_bar: ops to configure the BAR * @clear_bar: ops to reset the BAR * @map_addr: ops to map CPU address to PCI address * @unmap_addr: ops to unmap CPU address and PCI address * @set_msi: ops to set the requested number of MSI interrupts in the MSI * capability register * @get_msi: ops to get the number of MSI interrupts allocated by the RC from * the MSI capability register * @set_msix: ops to set the requested number of MSI-X interrupts in the * MSI-X capability register * @get_msix: ops to get the number of MSI-X interrupts allocated by the RC * from the MSI-X capability register * @raise_irq: ops to raise a legacy, MSI or MSI-X interrupt * @start: ops to start the PCI link * @stop: ops to stop the PCI link * @owner: the module owner containing the ops */ struct pci_epc_ops { int (*write_header)(struct pci_epc *epc, u8 func_no, struct pci_epf_header *hdr); int (*set_bar)(struct pci_epc *epc, u8 func_no, struct pci_epf_bar *epf_bar); void (*clear_bar)(struct pci_epc *epc, u8 func_no, struct pci_epf_bar *epf_bar); int (*map_addr)(struct pci_epc *epc, u8 func_no, phys_addr_t addr, u64 pci_addr, size_t size); void (*unmap_addr)(struct pci_epc *epc, u8 func_no, phys_addr_t addr); int (*set_msi)(struct pci_epc *epc, u8 func_no, u8 interrupts); int (*get_msi)(struct pci_epc *epc, u8 func_no); int (*set_msix)(struct pci_epc *epc, u8 func_no, u16 interrupts); int (*get_msix)(struct pci_epc *epc, u8 func_no); int (*raise_irq)(struct pci_epc *epc, u8 func_no, enum pci_epc_irq_type type, u16 interrupt_num); int (*start)(struct pci_epc *epc); void (*stop)(struct pci_epc *epc); const struct pci_epc_features* (*get_features)(struct pci_epc *epc, u8 func_no); struct module *owner; }; /** * struct pci_epc_mem - address space of the endpoint controller * @phys_base: physical base address of the PCI address space * @size: the size of the PCI address space * @bitmap: bitmap to manage the PCI address space * @pages: number of bits representing the address region * @page_size: size of each page * @lock: mutex to protect bitmap */ struct pci_epc_mem { phys_addr_t phys_base; size_t size; unsigned long *bitmap; size_t page_size; int pages; /* mutex to protect against concurrent access for memory allocation*/ struct mutex lock; }; /** * struct pci_epc - represents the PCI EPC device * @dev: PCI EPC device * @pci_epf: list of endpoint functions present in this EPC device * @ops: function pointers for performing endpoint operations * @mem: address space of the endpoint controller * @max_functions: max number of functions that can be configured in this EPC * @group: configfs group representing the PCI EPC device * @lock: spinlock to protect pci_epc ops */ struct pci_epc { struct device dev; struct list_head pci_epf; const struct pci_epc_ops *ops; struct pci_epc_mem *mem; u8 max_functions; struct config_group *group; /* spinlock to protect against concurrent access of EP controller */ spinlock_t lock; }; /** * struct pci_epc_features - features supported by a EPC device per function * @linkup_notifier: indicate if the EPC device can notify EPF driver on link up * @msi_capable: indicate if the endpoint function has MSI capability * @msix_capable: indicate if the endpoint function has MSI-X capability * @reserved_bar: bitmap to indicate reserved BAR unavailable to function driver * @bar_fixed_64bit: bitmap to indicate fixed 64bit BARs * @bar_fixed_size: Array specifying the size supported by each BAR * @align: alignment size required for BAR buffer allocation */ struct pci_epc_features { unsigned int linkup_notifier : 1; unsigned int msi_capable : 1; unsigned int msix_capable : 1; u8 reserved_bar; u8 bar_fixed_64bit; u64 bar_fixed_size[BAR_5 + 1]; size_t align; }; #define to_pci_epc(device) container_of((device), struct pci_epc, dev) #define pci_epc_create(dev, ops) \ __pci_epc_create((dev), (ops), THIS_MODULE) #define devm_pci_epc_create(dev, ops) \ __devm_pci_epc_create((dev), (ops), THIS_MODULE) #define pci_epc_mem_init(epc, phys_addr, size) \ __pci_epc_mem_init((epc), (phys_addr), (size), PAGE_SIZE) static inline void epc_set_drvdata(struct pci_epc *epc, void *data) { dev_set_drvdata(&epc->dev, data); } static inline void *epc_get_drvdata(struct pci_epc *epc) { return dev_get_drvdata(&epc->dev); } struct pci_epc * __devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops, struct module *owner); struct pci_epc * __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops, struct module *owner); void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc); void pci_epc_destroy(struct pci_epc *epc); int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf); void pci_epc_linkup(struct pci_epc *epc); void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf); int pci_epc_write_header(struct pci_epc *epc, u8 func_no, struct pci_epf_header *hdr); int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, struct pci_epf_bar *epf_bar); void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, struct pci_epf_bar *epf_bar); int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, phys_addr_t phys_addr, u64 pci_addr, size_t size); void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, phys_addr_t phys_addr); int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts); int pci_epc_get_msi(struct pci_epc *epc, u8 func_no); int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts); int pci_epc_get_msix(struct pci_epc *epc, u8 func_no); int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, enum pci_epc_irq_type type, u16 interrupt_num); int pci_epc_start(struct pci_epc *epc); void pci_epc_stop(struct pci_epc *epc); const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc, u8 func_no); unsigned int pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features); struct pci_epc *pci_epc_get(const char *epc_name); void pci_epc_put(struct pci_epc *epc); int __pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_addr, size_t size, size_t page_size); void pci_epc_mem_exit(struct pci_epc *epc); void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc, phys_addr_t *phys_addr, size_t size); void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr, void __iomem *virt_addr, size_t size); #endif /* __LINUX_PCI_EPC_H */ fsnotify_backend.h 0000644 00000050461 14722070374 0010241 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Filesystem access notification for Linux * * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com> */ #ifndef __LINUX_FSNOTIFY_BACKEND_H #define __LINUX_FSNOTIFY_BACKEND_H #ifdef __KERNEL__ #include <linux/idr.h> /* inotify uses this */ #include <linux/fs.h> /* struct inode */ #include <linux/list.h> #include <linux/path.h> /* struct path */ #include <linux/spinlock.h> #include <linux/types.h> #include <linux/atomic.h> #include <linux/user_namespace.h> #include <linux/refcount.h> /* * IN_* from inotfy.h lines up EXACTLY with FS_*, this is so we can easily * convert between them. dnotify only needs conversion at watch creation * so no perf loss there. fanotify isn't defined yet, so it can use the * wholes if it needs more events. */ #define FS_ACCESS 0x00000001 /* File was accessed */ #define FS_MODIFY 0x00000002 /* File was modified */ #define FS_ATTRIB 0x00000004 /* Metadata changed */ #define FS_CLOSE_WRITE 0x00000008 /* Writtable file was closed */ #define FS_CLOSE_NOWRITE 0x00000010 /* Unwrittable file closed */ #define FS_OPEN 0x00000020 /* File was opened */ #define FS_MOVED_FROM 0x00000040 /* File was moved from X */ #define FS_MOVED_TO 0x00000080 /* File was moved to Y */ #define FS_CREATE 0x00000100 /* Subfile was created */ #define FS_DELETE 0x00000200 /* Subfile was deleted */ #define FS_DELETE_SELF 0x00000400 /* Self was deleted */ #define FS_MOVE_SELF 0x00000800 /* Self was moved */ #define FS_OPEN_EXEC 0x00001000 /* File was opened for exec */ #define FS_UNMOUNT 0x00002000 /* inode on umount fs */ #define FS_Q_OVERFLOW 0x00004000 /* Event queued overflowed */ #define FS_IN_IGNORED 0x00008000 /* last inotify event here */ #define FS_OPEN_PERM 0x00010000 /* open event in an permission hook */ #define FS_ACCESS_PERM 0x00020000 /* access event in a permissions hook */ #define FS_OPEN_EXEC_PERM 0x00040000 /* open/exec event in a permission hook */ #define FS_EXCL_UNLINK 0x04000000 /* do not send events if object is unlinked */ #define FS_ISDIR 0x40000000 /* event occurred against dir */ #define FS_IN_ONESHOT 0x80000000 /* only send event once */ #define FS_DN_RENAME 0x10000000 /* file renamed */ #define FS_DN_MULTISHOT 0x20000000 /* dnotify multishot */ /* This inode cares about things that happen to its children. Always set for * dnotify and inotify. */ #define FS_EVENT_ON_CHILD 0x08000000 #define FS_MOVE (FS_MOVED_FROM | FS_MOVED_TO) /* * Directory entry modification events - reported only to directory * where entry is modified and not to a watching parent. * The watching parent may get an FS_ATTRIB|FS_EVENT_ON_CHILD event * when a directory entry inside a child subdir changes. */ #define ALL_FSNOTIFY_DIRENT_EVENTS (FS_CREATE | FS_DELETE | FS_MOVE) #define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM | \ FS_OPEN_EXEC_PERM) /* * This is a list of all events that may get sent to a parent based on fs event * happening to inodes inside that directory. */ #define FS_EVENTS_POSS_ON_CHILD (ALL_FSNOTIFY_PERM_EVENTS | \ FS_ACCESS | FS_MODIFY | FS_ATTRIB | \ FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | \ FS_OPEN | FS_OPEN_EXEC) /* Events that can be reported to backends */ #define ALL_FSNOTIFY_EVENTS (ALL_FSNOTIFY_DIRENT_EVENTS | \ FS_EVENTS_POSS_ON_CHILD | \ FS_DELETE_SELF | FS_MOVE_SELF | FS_DN_RENAME | \ FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED) /* Extra flags that may be reported with event or control handling of events */ #define ALL_FSNOTIFY_FLAGS (FS_EXCL_UNLINK | FS_ISDIR | FS_IN_ONESHOT | \ FS_DN_MULTISHOT | FS_EVENT_ON_CHILD) #define ALL_FSNOTIFY_BITS (ALL_FSNOTIFY_EVENTS | ALL_FSNOTIFY_FLAGS) struct fsnotify_group; struct fsnotify_event; struct fsnotify_mark; struct fsnotify_event_private_data; struct fsnotify_fname; struct fsnotify_iter_info; struct mem_cgroup; /* * Each group much define these ops. The fsnotify infrastructure will call * these operations for each relevant group. * * handle_event - main call for a group to handle an fs event * free_group_priv - called when a group refcnt hits 0 to clean up the private union * freeing_mark - called when a mark is being destroyed for some reason. The group * MUST be holding a reference on each mark and that reference must be * dropped in this function. inotify uses this function to send * userspace messages that marks have been removed. */ struct fsnotify_ops { int (*handle_event)(struct fsnotify_group *group, struct inode *inode, u32 mask, const void *data, int data_type, const struct qstr *file_name, u32 cookie, struct fsnotify_iter_info *iter_info); void (*free_group_priv)(struct fsnotify_group *group); void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group); void (*free_event)(struct fsnotify_event *event); /* called on final put+free to free memory */ void (*free_mark)(struct fsnotify_mark *mark); }; /* * all of the information about the original object we want to now send to * a group. If you want to carry more info from the accessing task to the * listener this structure is where you need to be adding fields. */ struct fsnotify_event { struct list_head list; unsigned long objectid; /* identifier for queue merges */ }; /* * A group is a "thing" that wants to receive notification about filesystem * events. The mask holds the subset of event types this group cares about. * refcnt on a group is up to the implementor and at any moment if it goes 0 * everything will be cleaned up. */ struct fsnotify_group { const struct fsnotify_ops *ops; /* how this group handles things */ /* * How the refcnt is used is up to each group. When the refcnt hits 0 * fsnotify will clean up all of the resources associated with this group. * As an example, the dnotify group will always have a refcnt=1 and that * will never change. Inotify, on the other hand, has a group per * inotify_init() and the refcnt will hit 0 only when that fd has been * closed. */ refcount_t refcnt; /* things with interest in this group */ /* needed to send notification to userspace */ spinlock_t notification_lock; /* protect the notification_list */ struct list_head notification_list; /* list of event_holder this group needs to send to userspace */ wait_queue_head_t notification_waitq; /* read() on the notification file blocks on this waitq */ unsigned int q_len; /* events on the queue */ unsigned int max_events; /* maximum events allowed on the list */ /* * Valid fsnotify group priorities. Events are send in order from highest * priority to lowest priority. We default to the lowest priority. */ #define FS_PRIO_0 0 /* normal notifiers, no permissions */ #define FS_PRIO_1 1 /* fanotify content based access control */ #define FS_PRIO_2 2 /* fanotify pre-content access */ unsigned int priority; bool shutdown; /* group is being shut down, don't queue more events */ /* stores all fastpath marks assoc with this group so they can be cleaned on unregister */ struct mutex mark_mutex; /* protect marks_list */ atomic_t num_marks; /* 1 for each mark and 1 for not being * past the point of no return when freeing * a group */ atomic_t user_waits; /* Number of tasks waiting for user * response */ struct list_head marks_list; /* all inode marks for this group */ struct fasync_struct *fsn_fa; /* async notification */ struct fsnotify_event *overflow_event; /* Event we queue when the * notification list is too * full */ struct mem_cgroup *memcg; /* memcg to charge allocations */ /* groups can define private fields here or use the void *private */ union { void *private; #ifdef CONFIG_INOTIFY_USER struct inotify_group_private_data { spinlock_t idr_lock; struct idr idr; struct ucounts *ucounts; } inotify_data; #endif #ifdef CONFIG_FANOTIFY struct fanotify_group_private_data { /* allows a group to block waiting for a userspace response */ struct list_head access_list; wait_queue_head_t access_waitq; int flags; /* flags from fanotify_init() */ int f_flags; /* event_f_flags from fanotify_init() */ unsigned int max_marks; struct user_struct *user; } fanotify_data; #endif /* CONFIG_FANOTIFY */ }; }; /* when calling fsnotify tell it if the data is a path or inode */ #define FSNOTIFY_EVENT_NONE 0 #define FSNOTIFY_EVENT_PATH 1 #define FSNOTIFY_EVENT_INODE 2 enum fsnotify_obj_type { FSNOTIFY_OBJ_TYPE_INODE, FSNOTIFY_OBJ_TYPE_VFSMOUNT, FSNOTIFY_OBJ_TYPE_SB, FSNOTIFY_OBJ_TYPE_COUNT, FSNOTIFY_OBJ_TYPE_DETACHED = FSNOTIFY_OBJ_TYPE_COUNT }; #define FSNOTIFY_OBJ_TYPE_INODE_FL (1U << FSNOTIFY_OBJ_TYPE_INODE) #define FSNOTIFY_OBJ_TYPE_VFSMOUNT_FL (1U << FSNOTIFY_OBJ_TYPE_VFSMOUNT) #define FSNOTIFY_OBJ_TYPE_SB_FL (1U << FSNOTIFY_OBJ_TYPE_SB) #define FSNOTIFY_OBJ_ALL_TYPES_MASK ((1U << FSNOTIFY_OBJ_TYPE_COUNT) - 1) static inline bool fsnotify_valid_obj_type(unsigned int type) { return (type < FSNOTIFY_OBJ_TYPE_COUNT); } struct fsnotify_iter_info { struct fsnotify_mark *marks[FSNOTIFY_OBJ_TYPE_COUNT]; unsigned int report_mask; int srcu_idx; }; static inline bool fsnotify_iter_should_report_type( struct fsnotify_iter_info *iter_info, int type) { return (iter_info->report_mask & (1U << type)); } static inline void fsnotify_iter_set_report_type( struct fsnotify_iter_info *iter_info, int type) { iter_info->report_mask |= (1U << type); } static inline void fsnotify_iter_set_report_type_mark( struct fsnotify_iter_info *iter_info, int type, struct fsnotify_mark *mark) { iter_info->marks[type] = mark; iter_info->report_mask |= (1U << type); } #define FSNOTIFY_ITER_FUNCS(name, NAME) \ static inline struct fsnotify_mark *fsnotify_iter_##name##_mark( \ struct fsnotify_iter_info *iter_info) \ { \ return (iter_info->report_mask & FSNOTIFY_OBJ_TYPE_##NAME##_FL) ? \ iter_info->marks[FSNOTIFY_OBJ_TYPE_##NAME] : NULL; \ } FSNOTIFY_ITER_FUNCS(inode, INODE) FSNOTIFY_ITER_FUNCS(vfsmount, VFSMOUNT) FSNOTIFY_ITER_FUNCS(sb, SB) #define fsnotify_foreach_obj_type(type) \ for (type = 0; type < FSNOTIFY_OBJ_TYPE_COUNT; type++) /* * fsnotify_connp_t is what we embed in objects which connector can be attached * to. fsnotify_connp_t * is how we refer from connector back to object. */ struct fsnotify_mark_connector; typedef struct fsnotify_mark_connector __rcu *fsnotify_connp_t; /* * Inode/vfsmount/sb point to this structure which tracks all marks attached to * the inode/vfsmount/sb. The reference to inode/vfsmount/sb is held by this * structure. We destroy this structure when there are no more marks attached * to it. The structure is protected by fsnotify_mark_srcu. */ struct fsnotify_mark_connector { spinlock_t lock; unsigned short type; /* Type of object [lock] */ #define FSNOTIFY_CONN_FLAG_HAS_FSID 0x01 unsigned short flags; /* flags [lock] */ __kernel_fsid_t fsid; /* fsid of filesystem containing object */ union { /* Object pointer [lock] */ fsnotify_connp_t *obj; /* Used listing heads to free after srcu period expires */ struct fsnotify_mark_connector *destroy_next; }; struct hlist_head list; }; /* * A mark is simply an object attached to an in core inode which allows an * fsnotify listener to indicate they are either no longer interested in events * of a type matching mask or only interested in those events. * * These are flushed when an inode is evicted from core and may be flushed * when the inode is modified (as seen by fsnotify_access). Some fsnotify * users (such as dnotify) will flush these when the open fd is closed and not * at inode eviction or modification. * * Text in brackets is showing the lock(s) protecting modifications of a * particular entry. obj_lock means either inode->i_lock or * mnt->mnt_root->d_lock depending on the mark type. */ struct fsnotify_mark { /* Mask this mark is for [mark->lock, group->mark_mutex] */ __u32 mask; /* We hold one for presence in g_list. Also one ref for each 'thing' * in kernel that found and may be using this mark. */ refcount_t refcnt; /* Group this mark is for. Set on mark creation, stable until last ref * is dropped */ struct fsnotify_group *group; /* List of marks by group->marks_list. Also reused for queueing * mark into destroy_list when it's waiting for the end of SRCU period * before it can be freed. [group->mark_mutex] */ struct list_head g_list; /* Protects inode / mnt pointers, flags, masks */ spinlock_t lock; /* List of marks for inode / vfsmount [connector->lock, mark ref] */ struct hlist_node obj_list; /* Head of list of marks for an object [mark ref] */ struct fsnotify_mark_connector *connector; /* Events types to ignore [mark->lock, group->mark_mutex] */ __u32 ignored_mask; #define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x01 #define FSNOTIFY_MARK_FLAG_ALIVE 0x02 #define FSNOTIFY_MARK_FLAG_ATTACHED 0x04 unsigned int flags; /* flags [mark->lock] */ }; #ifdef CONFIG_FSNOTIFY /* called from the vfs helpers */ /* main fsnotify call to send events */ extern int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is, const struct qstr *name, u32 cookie); extern int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask); extern void __fsnotify_inode_delete(struct inode *inode); extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt); extern void fsnotify_sb_delete(struct super_block *sb); extern u32 fsnotify_get_cookie(void); static inline int fsnotify_inode_watches_children(struct inode *inode) { /* FS_EVENT_ON_CHILD is set if the inode may care */ if (!(inode->i_fsnotify_mask & FS_EVENT_ON_CHILD)) return 0; /* this inode might care about child events, does it care about the * specific set of events that can happen on a child? */ return inode->i_fsnotify_mask & FS_EVENTS_POSS_ON_CHILD; } /* * Update the dentry with a flag indicating the interest of its parent to receive * filesystem events when those events happens to this dentry->d_inode. */ static inline void fsnotify_update_flags(struct dentry *dentry) { assert_spin_locked(&dentry->d_lock); /* * Serialisation of setting PARENT_WATCHED on the dentries is provided * by d_lock. If inotify_inode_watched changes after we have taken * d_lock, the following __fsnotify_update_child_dentry_flags call will * find our entry, so it will spin until we complete here, and update * us with the new state. */ if (fsnotify_inode_watches_children(dentry->d_parent->d_inode)) dentry->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED; else dentry->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED; } /* called from fsnotify listeners, such as fanotify or dnotify */ /* create a new group */ extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops); /* get reference to a group */ extern void fsnotify_get_group(struct fsnotify_group *group); /* drop reference on a group from fsnotify_alloc_group */ extern void fsnotify_put_group(struct fsnotify_group *group); /* group destruction begins, stop queuing new events */ extern void fsnotify_group_stop_queueing(struct fsnotify_group *group); /* destroy group */ extern void fsnotify_destroy_group(struct fsnotify_group *group); /* fasync handler function */ extern int fsnotify_fasync(int fd, struct file *file, int on); /* Free event from memory */ extern void fsnotify_destroy_event(struct fsnotify_group *group, struct fsnotify_event *event); /* attach the event to the group notification queue */ extern int fsnotify_add_event(struct fsnotify_group *group, struct fsnotify_event *event, int (*merge)(struct list_head *, struct fsnotify_event *)); /* Queue overflow event to a notification group */ static inline void fsnotify_queue_overflow(struct fsnotify_group *group) { fsnotify_add_event(group, group->overflow_event, NULL); } /* true if the group notification queue is empty */ extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group); /* return, but do not dequeue the first event on the notification queue */ extern struct fsnotify_event *fsnotify_peek_first_event(struct fsnotify_group *group); /* return AND dequeue the first event on the notification queue */ extern struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group); /* Remove event queued in the notification list */ extern void fsnotify_remove_queued_event(struct fsnotify_group *group, struct fsnotify_event *event); /* functions used to manipulate the marks attached to inodes */ /* Get mask of events for a list of marks */ extern __u32 fsnotify_conn_mask(struct fsnotify_mark_connector *conn); /* Calculate mask of events for a list of marks */ extern void fsnotify_recalc_mask(struct fsnotify_mark_connector *conn); extern void fsnotify_init_mark(struct fsnotify_mark *mark, struct fsnotify_group *group); /* Find mark belonging to given group in the list of marks */ extern struct fsnotify_mark *fsnotify_find_mark(fsnotify_connp_t *connp, struct fsnotify_group *group); /* Get cached fsid of filesystem containing object */ extern int fsnotify_get_conn_fsid(const struct fsnotify_mark_connector *conn, __kernel_fsid_t *fsid); /* attach the mark to the object */ extern int fsnotify_add_mark(struct fsnotify_mark *mark, fsnotify_connp_t *connp, unsigned int type, int allow_dups, __kernel_fsid_t *fsid); extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark, fsnotify_connp_t *connp, unsigned int type, int allow_dups, __kernel_fsid_t *fsid); /* attach the mark to the inode */ static inline int fsnotify_add_inode_mark(struct fsnotify_mark *mark, struct inode *inode, int allow_dups) { return fsnotify_add_mark(mark, &inode->i_fsnotify_marks, FSNOTIFY_OBJ_TYPE_INODE, allow_dups, NULL); } static inline int fsnotify_add_inode_mark_locked(struct fsnotify_mark *mark, struct inode *inode, int allow_dups) { return fsnotify_add_mark_locked(mark, &inode->i_fsnotify_marks, FSNOTIFY_OBJ_TYPE_INODE, allow_dups, NULL); } /* given a group and a mark, flag mark to be freed when all references are dropped */ extern void fsnotify_destroy_mark(struct fsnotify_mark *mark, struct fsnotify_group *group); /* detach mark from inode / mount list, group list, drop inode reference */ extern void fsnotify_detach_mark(struct fsnotify_mark *mark); /* free mark */ extern void fsnotify_free_mark(struct fsnotify_mark *mark); /* Wait until all marks queued for destruction are destroyed */ extern void fsnotify_wait_marks_destroyed(void); /* run all the marks in a group, and clear all of the marks attached to given object type */ extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group, unsigned int type); /* run all the marks in a group, and clear all of the vfsmount marks */ static inline void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group) { fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_VFSMOUNT_FL); } /* run all the marks in a group, and clear all of the inode marks */ static inline void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group) { fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_INODE_FL); } /* run all the marks in a group, and clear all of the sn marks */ static inline void fsnotify_clear_sb_marks_by_group(struct fsnotify_group *group) { fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_SB_FL); } extern void fsnotify_get_mark(struct fsnotify_mark *mark); extern void fsnotify_put_mark(struct fsnotify_mark *mark); extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info); extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info); static inline void fsnotify_init_event(struct fsnotify_event *event, unsigned long objectid) { INIT_LIST_HEAD(&event->list); event->objectid = objectid; } #else static inline int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is, const struct qstr *name, u32 cookie) { return 0; } static inline int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask) { return 0; } static inline void __fsnotify_inode_delete(struct inode *inode) {} static inline void __fsnotify_vfsmount_delete(struct vfsmount *mnt) {} static inline void fsnotify_sb_delete(struct super_block *sb) {} static inline void fsnotify_update_flags(struct dentry *dentry) {} static inline u32 fsnotify_get_cookie(void) { return 0; } static inline void fsnotify_unmount_inodes(struct super_block *sb) {} #endif /* CONFIG_FSNOTIFY */ #endif /* __KERNEL __ */ #endif /* __LINUX_FSNOTIFY_BACKEND_H */ pvclock_gtod.h 0000644 00000001044 14722070374 0007400 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _PVCLOCK_GTOD_H #define _PVCLOCK_GTOD_H #include <linux/notifier.h> /* * The pvclock gtod notifier is called when the system time is updated * and is used to keep guest time synchronized with host time. * * The 'action' parameter in the notifier function is false (0), or * true (non-zero) if system time was stepped. */ extern int pvclock_gtod_register_notifier(struct notifier_block *nb); extern int pvclock_gtod_unregister_notifier(struct notifier_block *nb); #endif /* _PVCLOCK_GTOD_H */ jbd2.h 0000644 00000135500 14722070374 0005550 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * linux/include/linux/jbd2.h * * Written by Stephen C. Tweedie <sct@redhat.com> * * Copyright 1998-2000 Red Hat, Inc --- All Rights Reserved * * Definitions for transaction data structures for the buffer cache * filesystem journaling support. */ #ifndef _LINUX_JBD2_H #define _LINUX_JBD2_H /* Allow this file to be included directly into e2fsprogs */ #ifndef __KERNEL__ #include "jfs_compat.h" #define JBD2_DEBUG #else #include <linux/types.h> #include <linux/buffer_head.h> #include <linux/journal-head.h> #include <linux/stddef.h> #include <linux/mutex.h> #include <linux/timer.h> #include <linux/slab.h> #include <linux/bit_spinlock.h> #include <crypto/hash.h> #endif #define journal_oom_retry 1 /* * Define JBD2_PARANIOD_IOFAIL to cause a kernel BUG() if ext4 finds * certain classes of error which can occur due to failed IOs. Under * normal use we want ext4 to continue after such errors, because * hardware _can_ fail, but for debugging purposes when running tests on * known-good hardware we may want to trap these errors. */ #undef JBD2_PARANOID_IOFAIL /* * The default maximum commit age, in seconds. */ #define JBD2_DEFAULT_MAX_COMMIT_AGE 5 #ifdef CONFIG_JBD2_DEBUG /* * Define JBD2_EXPENSIVE_CHECKING to enable more expensive internal * consistency checks. By default we don't do this unless * CONFIG_JBD2_DEBUG is on. */ #define JBD2_EXPENSIVE_CHECKING extern ushort jbd2_journal_enable_debug; void __jbd2_debug(int level, const char *file, const char *func, unsigned int line, const char *fmt, ...); #define jbd_debug(n, fmt, a...) \ __jbd2_debug((n), __FILE__, __func__, __LINE__, (fmt), ##a) #else #define jbd_debug(n, fmt, a...) /**/ #endif extern void *jbd2_alloc(size_t size, gfp_t flags); extern void jbd2_free(void *ptr, size_t size); #define JBD2_MIN_JOURNAL_BLOCKS 1024 #ifdef __KERNEL__ /** * typedef handle_t - The handle_t type represents a single atomic update being performed by some process. * * All filesystem modifications made by the process go * through this handle. Recursive operations (such as quota operations) * are gathered into a single update. * * The buffer credits field is used to account for journaled buffers * being modified by the running process. To ensure that there is * enough log space for all outstanding operations, we need to limit the * number of outstanding buffers possible at any time. When the * operation completes, any buffer credits not used are credited back to * the transaction, so that at all times we know how many buffers the * outstanding updates on a transaction might possibly touch. * * This is an opaque datatype. **/ typedef struct jbd2_journal_handle handle_t; /* Atomic operation type */ /** * typedef journal_t - The journal_t maintains all of the journaling state information for a single filesystem. * * journal_t is linked to from the fs superblock structure. * * We use the journal_t to keep track of all outstanding transaction * activity on the filesystem, and to manage the state of the log * writing process. * * This is an opaque datatype. **/ typedef struct journal_s journal_t; /* Journal control structure */ #endif /* * Internal structures used by the logging mechanism: */ #define JBD2_MAGIC_NUMBER 0xc03b3998U /* The first 4 bytes of /dev/random! */ /* * On-disk structures */ /* * Descriptor block types: */ #define JBD2_DESCRIPTOR_BLOCK 1 #define JBD2_COMMIT_BLOCK 2 #define JBD2_SUPERBLOCK_V1 3 #define JBD2_SUPERBLOCK_V2 4 #define JBD2_REVOKE_BLOCK 5 /* * Standard header for all descriptor blocks: */ typedef struct journal_header_s { __be32 h_magic; __be32 h_blocktype; __be32 h_sequence; } journal_header_t; /* * Checksum types. */ #define JBD2_CRC32_CHKSUM 1 #define JBD2_MD5_CHKSUM 2 #define JBD2_SHA1_CHKSUM 3 #define JBD2_CRC32C_CHKSUM 4 #define JBD2_CRC32_CHKSUM_SIZE 4 #define JBD2_CHECKSUM_BYTES (32 / sizeof(u32)) /* * Commit block header for storing transactional checksums: * * NOTE: If FEATURE_COMPAT_CHECKSUM (checksum v1) is set, the h_chksum* * fields are used to store a checksum of the descriptor and data blocks. * * If FEATURE_INCOMPAT_CSUM_V2 (checksum v2) is set, then the h_chksum * field is used to store crc32c(uuid+commit_block). Each journal metadata * block gets its own checksum, and data block checksums are stored in * journal_block_tag (in the descriptor). The other h_chksum* fields are * not used. * * If FEATURE_INCOMPAT_CSUM_V3 is set, the descriptor block uses * journal_block_tag3_t to store a full 32-bit checksum. Everything else * is the same as v2. * * Checksum v1, v2, and v3 are mutually exclusive features. */ struct commit_header { __be32 h_magic; __be32 h_blocktype; __be32 h_sequence; unsigned char h_chksum_type; unsigned char h_chksum_size; unsigned char h_padding[2]; __be32 h_chksum[JBD2_CHECKSUM_BYTES]; __be64 h_commit_sec; __be32 h_commit_nsec; }; /* * The block tag: used to describe a single buffer in the journal. * t_blocknr_high is only used if INCOMPAT_64BIT is set, so this * raw struct shouldn't be used for pointer math or sizeof() - use * journal_tag_bytes(journal) instead to compute this. */ typedef struct journal_block_tag3_s { __be32 t_blocknr; /* The on-disk block number */ __be32 t_flags; /* See below */ __be32 t_blocknr_high; /* most-significant high 32bits. */ __be32 t_checksum; /* crc32c(uuid+seq+block) */ } journal_block_tag3_t; typedef struct journal_block_tag_s { __be32 t_blocknr; /* The on-disk block number */ __be16 t_checksum; /* truncated crc32c(uuid+seq+block) */ __be16 t_flags; /* See below */ __be32 t_blocknr_high; /* most-significant high 32bits. */ } journal_block_tag_t; /* Tail of descriptor or revoke block, for checksumming */ struct jbd2_journal_block_tail { __be32 t_checksum; /* crc32c(uuid+descr_block) */ }; /* * The revoke descriptor: used on disk to describe a series of blocks to * be revoked from the log */ typedef struct jbd2_journal_revoke_header_s { journal_header_t r_header; __be32 r_count; /* Count of bytes used in the block */ } jbd2_journal_revoke_header_t; /* Definitions for the journal tag flags word: */ #define JBD2_FLAG_ESCAPE 1 /* on-disk block is escaped */ #define JBD2_FLAG_SAME_UUID 2 /* block has same uuid as previous */ #define JBD2_FLAG_DELETED 4 /* block deleted by this transaction */ #define JBD2_FLAG_LAST_TAG 8 /* last tag in this descriptor block */ /* * The journal superblock. All fields are in big-endian byte order. */ typedef struct journal_superblock_s { /* 0x0000 */ journal_header_t s_header; /* 0x000C */ /* Static information describing the journal */ __be32 s_blocksize; /* journal device blocksize */ __be32 s_maxlen; /* total blocks in journal file */ __be32 s_first; /* first block of log information */ /* 0x0018 */ /* Dynamic information describing the current state of the log */ __be32 s_sequence; /* first commit ID expected in log */ __be32 s_start; /* blocknr of start of log */ /* 0x0020 */ /* Error value, as set by jbd2_journal_abort(). */ __be32 s_errno; /* 0x0024 */ /* Remaining fields are only valid in a version-2 superblock */ __be32 s_feature_compat; /* compatible feature set */ __be32 s_feature_incompat; /* incompatible feature set */ __be32 s_feature_ro_compat; /* readonly-compatible feature set */ /* 0x0030 */ __u8 s_uuid[16]; /* 128-bit uuid for journal */ /* 0x0040 */ __be32 s_nr_users; /* Nr of filesystems sharing log */ __be32 s_dynsuper; /* Blocknr of dynamic superblock copy*/ /* 0x0048 */ __be32 s_max_transaction; /* Limit of journal blocks per trans.*/ __be32 s_max_trans_data; /* Limit of data blocks per trans. */ /* 0x0050 */ __u8 s_checksum_type; /* checksum type */ __u8 s_padding2[3]; __u32 s_padding[42]; __be32 s_checksum; /* crc32c(superblock) */ /* 0x0100 */ __u8 s_users[16*48]; /* ids of all fs'es sharing the log */ /* 0x0400 */ } journal_superblock_t; /* Use the jbd2_{has,set,clear}_feature_* helpers; these will be removed */ #define JBD2_HAS_COMPAT_FEATURE(j,mask) \ ((j)->j_format_version >= 2 && \ ((j)->j_superblock->s_feature_compat & cpu_to_be32((mask)))) #define JBD2_HAS_RO_COMPAT_FEATURE(j,mask) \ ((j)->j_format_version >= 2 && \ ((j)->j_superblock->s_feature_ro_compat & cpu_to_be32((mask)))) #define JBD2_HAS_INCOMPAT_FEATURE(j,mask) \ ((j)->j_format_version >= 2 && \ ((j)->j_superblock->s_feature_incompat & cpu_to_be32((mask)))) #define JBD2_FEATURE_COMPAT_CHECKSUM 0x00000001 #define JBD2_FEATURE_INCOMPAT_REVOKE 0x00000001 #define JBD2_FEATURE_INCOMPAT_64BIT 0x00000002 #define JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT 0x00000004 #define JBD2_FEATURE_INCOMPAT_CSUM_V2 0x00000008 #define JBD2_FEATURE_INCOMPAT_CSUM_V3 0x00000010 /* See "journal feature predicate functions" below */ /* Features known to this kernel version: */ #define JBD2_KNOWN_COMPAT_FEATURES JBD2_FEATURE_COMPAT_CHECKSUM #define JBD2_KNOWN_ROCOMPAT_FEATURES 0 #define JBD2_KNOWN_INCOMPAT_FEATURES (JBD2_FEATURE_INCOMPAT_REVOKE | \ JBD2_FEATURE_INCOMPAT_64BIT | \ JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | \ JBD2_FEATURE_INCOMPAT_CSUM_V2 | \ JBD2_FEATURE_INCOMPAT_CSUM_V3) #ifdef __KERNEL__ #include <linux/fs.h> #include <linux/sched.h> enum jbd_state_bits { BH_JBD /* Has an attached ext3 journal_head */ = BH_PrivateStart, BH_JWrite, /* Being written to log (@@@ DEBUGGING) */ BH_Freed, /* Has been freed (truncated) */ BH_Revoked, /* Has been revoked from the log */ BH_RevokeValid, /* Revoked flag is valid */ BH_JBDDirty, /* Is dirty but journaled */ BH_State, /* Pins most journal_head state */ BH_JournalHead, /* Pins bh->b_private and jh->b_bh */ BH_Shadow, /* IO on shadow buffer is running */ BH_Verified, /* Metadata block has been verified ok */ BH_JBDPrivateStart, /* First bit available for private use by FS */ }; BUFFER_FNS(JBD, jbd) BUFFER_FNS(JWrite, jwrite) BUFFER_FNS(JBDDirty, jbddirty) TAS_BUFFER_FNS(JBDDirty, jbddirty) BUFFER_FNS(Revoked, revoked) TAS_BUFFER_FNS(Revoked, revoked) BUFFER_FNS(RevokeValid, revokevalid) TAS_BUFFER_FNS(RevokeValid, revokevalid) BUFFER_FNS(Freed, freed) BUFFER_FNS(Shadow, shadow) BUFFER_FNS(Verified, verified) static inline struct buffer_head *jh2bh(struct journal_head *jh) { return jh->b_bh; } static inline struct journal_head *bh2jh(struct buffer_head *bh) { return bh->b_private; } static inline void jbd_lock_bh_state(struct buffer_head *bh) { bit_spin_lock(BH_State, &bh->b_state); } static inline int jbd_trylock_bh_state(struct buffer_head *bh) { return bit_spin_trylock(BH_State, &bh->b_state); } static inline int jbd_is_locked_bh_state(struct buffer_head *bh) { return bit_spin_is_locked(BH_State, &bh->b_state); } static inline void jbd_unlock_bh_state(struct buffer_head *bh) { bit_spin_unlock(BH_State, &bh->b_state); } static inline void jbd_lock_bh_journal_head(struct buffer_head *bh) { bit_spin_lock(BH_JournalHead, &bh->b_state); } static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh) { bit_spin_unlock(BH_JournalHead, &bh->b_state); } #define J_ASSERT(assert) BUG_ON(!(assert)) #define J_ASSERT_BH(bh, expr) J_ASSERT(expr) #define J_ASSERT_JH(jh, expr) J_ASSERT(expr) #if defined(JBD2_PARANOID_IOFAIL) #define J_EXPECT(expr, why...) J_ASSERT(expr) #define J_EXPECT_BH(bh, expr, why...) J_ASSERT_BH(bh, expr) #define J_EXPECT_JH(jh, expr, why...) J_ASSERT_JH(jh, expr) #else #define __journal_expect(expr, why...) \ ({ \ int val = (expr); \ if (!val) { \ printk(KERN_ERR \ "JBD2 unexpected failure: %s: %s;\n", \ __func__, #expr); \ printk(KERN_ERR why "\n"); \ } \ val; \ }) #define J_EXPECT(expr, why...) __journal_expect(expr, ## why) #define J_EXPECT_BH(bh, expr, why...) __journal_expect(expr, ## why) #define J_EXPECT_JH(jh, expr, why...) __journal_expect(expr, ## why) #endif /* Flags in jbd_inode->i_flags */ #define __JI_COMMIT_RUNNING 0 #define __JI_WRITE_DATA 1 #define __JI_WAIT_DATA 2 /* * Commit of the inode data in progress. We use this flag to protect us from * concurrent deletion of inode. We cannot use reference to inode for this * since we cannot afford doing last iput() on behalf of kjournald */ #define JI_COMMIT_RUNNING (1 << __JI_COMMIT_RUNNING) /* Write allocated dirty buffers in this inode before commit */ #define JI_WRITE_DATA (1 << __JI_WRITE_DATA) /* Wait for outstanding data writes for this inode before commit */ #define JI_WAIT_DATA (1 << __JI_WAIT_DATA) /** * struct jbd2_inode - The jbd_inode type is the structure linking inodes in * ordered mode present in a transaction so that we can sync them during commit. */ struct jbd2_inode { /** * @i_transaction: * * Which transaction does this inode belong to? Either the running * transaction or the committing one. [j_list_lock] */ transaction_t *i_transaction; /** * @i_next_transaction: * * Pointer to the running transaction modifying inode's data in case * there is already a committing transaction touching it. [j_list_lock] */ transaction_t *i_next_transaction; /** * @i_list: List of inodes in the i_transaction [j_list_lock] */ struct list_head i_list; /** * @i_vfs_inode: * * VFS inode this inode belongs to [constant for lifetime of structure] */ struct inode *i_vfs_inode; /** * @i_flags: Flags of inode [j_list_lock] */ unsigned long i_flags; /** * @i_dirty_start: * * Offset in bytes where the dirty range for this inode starts. * [j_list_lock] */ loff_t i_dirty_start; /** * @i_dirty_end: * * Inclusive offset in bytes where the dirty range for this inode * ends. [j_list_lock] */ loff_t i_dirty_end; }; struct jbd2_revoke_table_s; /** * struct handle_s - The handle_s type is the concrete type associated with * handle_t. * @h_transaction: Which compound transaction is this update a part of? * @h_journal: Which journal handle belongs to - used iff h_reserved set. * @h_rsv_handle: Handle reserved for finishing the logical operation. * @h_buffer_credits: Number of remaining buffers we are allowed to dirty. * @h_ref: Reference count on this handle. * @h_err: Field for caller's use to track errors through large fs operations. * @h_sync: Flag for sync-on-close. * @h_jdata: Flag to force data journaling. * @h_reserved: Flag for handle for reserved credits. * @h_aborted: Flag indicating fatal error on handle. * @h_type: For handle statistics. * @h_line_no: For handle statistics. * @h_start_jiffies: Handle Start time. * @h_requested_credits: Holds @h_buffer_credits after handle is started. * @saved_alloc_context: Saved context while transaction is open. **/ /* Docbook can't yet cope with the bit fields, but will leave the documentation * in so it can be fixed later. */ struct jbd2_journal_handle { union { transaction_t *h_transaction; /* Which journal handle belongs to - used iff h_reserved set */ journal_t *h_journal; }; handle_t *h_rsv_handle; int h_buffer_credits; int h_ref; int h_err; /* Flags [no locking] */ unsigned int h_sync: 1; unsigned int h_jdata: 1; unsigned int h_reserved: 1; unsigned int h_aborted: 1; unsigned int h_type: 8; unsigned int h_line_no: 16; unsigned long h_start_jiffies; unsigned int h_requested_credits; unsigned int saved_alloc_context; }; /* * Some stats for checkpoint phase */ struct transaction_chp_stats_s { unsigned long cs_chp_time; __u32 cs_forced_to_close; __u32 cs_written; __u32 cs_dropped; }; /* The transaction_t type is the guts of the journaling mechanism. It * tracks a compound transaction through its various states: * * RUNNING: accepting new updates * LOCKED: Updates still running but we don't accept new ones * RUNDOWN: Updates are tidying up but have finished requesting * new buffers to modify (state not used for now) * FLUSH: All updates complete, but we are still writing to disk * COMMIT: All data on disk, writing commit record * FINISHED: We still have to keep the transaction for checkpointing. * * The transaction keeps track of all of the buffers modified by a * running transaction, and all of the buffers committed but not yet * flushed to home for finished transactions. */ /* * Lock ranking: * * j_list_lock * ->jbd_lock_bh_journal_head() (This is "innermost") * * j_state_lock * ->jbd_lock_bh_state() * * jbd_lock_bh_state() * ->j_list_lock * * j_state_lock * ->t_handle_lock * * j_state_lock * ->j_list_lock (journal_unmap_buffer) * */ struct transaction_s { /* Pointer to the journal for this transaction. [no locking] */ journal_t *t_journal; /* Sequence number for this transaction [no locking] */ tid_t t_tid; /* * Transaction's current state * [no locking - only kjournald2 alters this] * [j_list_lock] guards transition of a transaction into T_FINISHED * state and subsequent call of __jbd2_journal_drop_transaction() * FIXME: needs barriers * KLUDGE: [use j_state_lock] */ enum { T_RUNNING, T_LOCKED, T_SWITCH, T_FLUSH, T_COMMIT, T_COMMIT_DFLUSH, T_COMMIT_JFLUSH, T_COMMIT_CALLBACK, T_FINISHED } t_state; /* * Where in the log does this transaction's commit start? [no locking] */ unsigned long t_log_start; /* Number of buffers on the t_buffers list [j_list_lock] */ int t_nr_buffers; /* * Doubly-linked circular list of all buffers reserved but not yet * modified by this transaction [j_list_lock] */ struct journal_head *t_reserved_list; /* * Doubly-linked circular list of all metadata buffers owned by this * transaction [j_list_lock] */ struct journal_head *t_buffers; /* * Doubly-linked circular list of all forget buffers (superseded * buffers which we can un-checkpoint once this transaction commits) * [j_list_lock] */ struct journal_head *t_forget; /* * Doubly-linked circular list of all buffers still to be flushed before * this transaction can be checkpointed. [j_list_lock] */ struct journal_head *t_checkpoint_list; /* * Doubly-linked circular list of all buffers submitted for IO while * checkpointing. [j_list_lock] */ struct journal_head *t_checkpoint_io_list; /* * Doubly-linked circular list of metadata buffers being shadowed by log * IO. The IO buffers on the iobuf list and the shadow buffers on this * list match each other one for one at all times. [j_list_lock] */ struct journal_head *t_shadow_list; /* * List of inodes associated with the transaction; e.g., ext4 uses * this to track inodes in data=ordered and data=journal mode that * need special handling on transaction commit; also used by ocfs2. * [j_list_lock] */ struct list_head t_inode_list; /* * Protects info related to handles */ spinlock_t t_handle_lock; /* * Longest time some handle had to wait for running transaction */ unsigned long t_max_wait; /* * When transaction started */ unsigned long t_start; /* * When commit was requested */ unsigned long t_requested; /* * Checkpointing stats [j_checkpoint_sem] */ struct transaction_chp_stats_s t_chp_stats; /* * Number of outstanding updates running on this transaction * [none] */ atomic_t t_updates; /* * Number of buffers reserved for use by all handles in this transaction * handle but not yet modified. [none] */ atomic_t t_outstanding_credits; /* * Forward and backward links for the circular list of all transactions * awaiting checkpoint. [j_list_lock] */ transaction_t *t_cpnext, *t_cpprev; /* * When will the transaction expire (become due for commit), in jiffies? * [no locking] */ unsigned long t_expires; /* * When this transaction started, in nanoseconds [no locking] */ ktime_t t_start_time; /* * How many handles used this transaction? [none] */ atomic_t t_handle_count; /* * This transaction is being forced and some process is * waiting for it to finish. */ unsigned int t_synchronous_commit:1; /* Disk flush needs to be sent to fs partition [no locking] */ int t_need_data_flush; /* * For use by the filesystem to store fs-specific data * structures associated with the transaction */ struct list_head t_private_list; }; struct transaction_run_stats_s { unsigned long rs_wait; unsigned long rs_request_delay; unsigned long rs_running; unsigned long rs_locked; unsigned long rs_flushing; unsigned long rs_logging; __u32 rs_handle_count; __u32 rs_blocks; __u32 rs_blocks_logged; }; struct transaction_stats_s { unsigned long ts_tid; unsigned long ts_requested; struct transaction_run_stats_s run; }; static inline unsigned long jbd2_time_diff(unsigned long start, unsigned long end) { if (end >= start) return end - start; return end + (MAX_JIFFY_OFFSET - start); } #define JBD2_NR_BATCH 64 /** * struct journal_s - The journal_s type is the concrete type associated with * journal_t. */ struct journal_s { /** * @j_flags: General journaling state flags [j_state_lock] */ unsigned long j_flags; /** * @j_errno: * * Is there an outstanding uncleared error on the journal (from a prior * abort)? [j_state_lock] */ int j_errno; /** * @j_sb_buffer: The first part of the superblock buffer. */ struct buffer_head *j_sb_buffer; /** * @j_superblock: The second part of the superblock buffer. */ journal_superblock_t *j_superblock; /** * @j_format_version: Version of the superblock format. */ int j_format_version; /** * @j_state_lock: Protect the various scalars in the journal. */ rwlock_t j_state_lock; /** * @j_barrier_count: * * Number of processes waiting to create a barrier lock [j_state_lock] */ int j_barrier_count; /** * @j_barrier: The barrier lock itself. */ struct mutex j_barrier; /** * @j_running_transaction: * * Transactions: The current running transaction... * [j_state_lock] [caller holding open handle] */ transaction_t *j_running_transaction; /** * @j_committing_transaction: * * the transaction we are pushing to disk * [j_state_lock] [caller holding open handle] */ transaction_t *j_committing_transaction; /** * @j_checkpoint_transactions: * * ... and a linked circular list of all transactions waiting for * checkpointing. [j_list_lock] */ transaction_t *j_checkpoint_transactions; /** * @j_wait_transaction_locked: * * Wait queue for waiting for a locked transaction to start committing, * or for a barrier lock to be released. */ wait_queue_head_t j_wait_transaction_locked; /** * @j_wait_done_commit: Wait queue for waiting for commit to complete. */ wait_queue_head_t j_wait_done_commit; /** * @j_wait_commit: Wait queue to trigger commit. */ wait_queue_head_t j_wait_commit; /** * @j_wait_updates: Wait queue to wait for updates to complete. */ wait_queue_head_t j_wait_updates; /** * @j_wait_reserved: * * Wait queue to wait for reserved buffer credits to drop. */ wait_queue_head_t j_wait_reserved; /** * @j_checkpoint_mutex: * * Semaphore for locking against concurrent checkpoints. */ struct mutex j_checkpoint_mutex; /** * @j_chkpt_bhs: * * List of buffer heads used by the checkpoint routine. This * was moved from jbd2_log_do_checkpoint() to reduce stack * usage. Access to this array is controlled by the * @j_checkpoint_mutex. [j_checkpoint_mutex] */ struct buffer_head *j_chkpt_bhs[JBD2_NR_BATCH]; /** * @j_head: * * Journal head: identifies the first unused block in the journal. * [j_state_lock] */ unsigned long j_head; /** * @j_tail: * * Journal tail: identifies the oldest still-used block in the journal. * [j_state_lock] */ unsigned long j_tail; /** * @j_free: * * Journal free: how many free blocks are there in the journal? * [j_state_lock] */ unsigned long j_free; /** * @j_first: * * The block number of the first usable block in the journal * [j_state_lock]. */ unsigned long j_first; /** * @j_last: * * The block number one beyond the last usable block in the journal * [j_state_lock]. */ unsigned long j_last; /** * @j_dev: Device where we store the journal. */ struct block_device *j_dev; /** * @j_blocksize: Block size for the location where we store the journal. */ int j_blocksize; /** * @j_blk_offset: * * Starting block offset into the device where we store the journal. */ unsigned long long j_blk_offset; /** * @j_devname: Journal device name. */ char j_devname[BDEVNAME_SIZE+24]; /** * @j_fs_dev: * * Device which holds the client fs. For internal journal this will be * equal to j_dev. */ struct block_device *j_fs_dev; /** * @j_maxlen: Total maximum capacity of the journal region on disk. */ unsigned int j_maxlen; /** * @j_reserved_credits: * * Number of buffers reserved from the running transaction. */ atomic_t j_reserved_credits; /** * @j_list_lock: Protects the buffer lists and internal buffer state. */ spinlock_t j_list_lock; /** * @j_inode: * * Optional inode where we store the journal. If present, all * journal block numbers are mapped into this inode via bmap(). */ struct inode *j_inode; /** * @j_tail_sequence: * * Sequence number of the oldest transaction in the log [j_state_lock] */ tid_t j_tail_sequence; /** * @j_transaction_sequence: * * Sequence number of the next transaction to grant [j_state_lock] */ tid_t j_transaction_sequence; /** * @j_commit_sequence: * * Sequence number of the most recently committed transaction * [j_state_lock]. */ tid_t j_commit_sequence; /** * @j_commit_request: * * Sequence number of the most recent transaction wanting commit * [j_state_lock] */ tid_t j_commit_request; /** * @j_uuid: * * Journal uuid: identifies the object (filesystem, LVM volume etc) * backed by this journal. This will eventually be replaced by an array * of uuids, allowing us to index multiple devices within a single * journal and to perform atomic updates across them. */ __u8 j_uuid[16]; /** * @j_task: Pointer to the current commit thread for this journal. */ struct task_struct *j_task; /** * @j_max_transaction_buffers: * * Maximum number of metadata buffers to allow in a single compound * commit transaction. */ int j_max_transaction_buffers; /** * @j_commit_interval: * * What is the maximum transaction lifetime before we begin a commit? */ unsigned long j_commit_interval; /** * @j_commit_timer: The timer used to wakeup the commit thread. */ struct timer_list j_commit_timer; /** * @j_revoke_lock: Protect the revoke table. */ spinlock_t j_revoke_lock; /** * @j_revoke: * * The revoke table - maintains the list of revoked blocks in the * current transaction. */ struct jbd2_revoke_table_s *j_revoke; /** * @j_revoke_table: Alternate revoke tables for j_revoke. */ struct jbd2_revoke_table_s *j_revoke_table[2]; /** * @j_wbuf: Array of bhs for jbd2_journal_commit_transaction. */ struct buffer_head **j_wbuf; /** * @j_wbufsize: * * Size of @j_wbuf array. */ int j_wbufsize; /** * @j_last_sync_writer: * * The pid of the last person to run a synchronous operation * through the journal. */ pid_t j_last_sync_writer; /** * @j_average_commit_time: * * The average amount of time in nanoseconds it takes to commit a * transaction to disk. [j_state_lock] */ u64 j_average_commit_time; /** * @j_min_batch_time: * * Minimum time that we should wait for additional filesystem operations * to get batched into a synchronous handle in microseconds. */ u32 j_min_batch_time; /** * @j_max_batch_time: * * Maximum time that we should wait for additional filesystem operations * to get batched into a synchronous handle in microseconds. */ u32 j_max_batch_time; /** * @j_commit_callback: * * This function is called when a transaction is closed. */ void (*j_commit_callback)(journal_t *, transaction_t *); /** * @j_submit_inode_data_buffers: * * This function is called for all inodes associated with the * committing transaction marked with JI_WRITE_DATA flag * before we start to write out the transaction to the journal. */ int (*j_submit_inode_data_buffers) (struct jbd2_inode *); /** * @j_finish_inode_data_buffers: * * This function is called for all inodes associated with the * committing transaction marked with JI_WAIT_DATA flag * after we have written the transaction to the journal * but before we write out the commit block. */ int (*j_finish_inode_data_buffers) (struct jbd2_inode *); /* * Journal statistics */ /** * @j_history_lock: Protect the transactions statistics history. */ spinlock_t j_history_lock; /** * @j_proc_entry: procfs entry for the jbd statistics directory. */ struct proc_dir_entry *j_proc_entry; /** * @j_stats: Overall statistics. */ struct transaction_stats_s j_stats; /** * @j_failed_commit: Failed journal commit ID. */ unsigned int j_failed_commit; /** * @j_private: * * An opaque pointer to fs-private information. ext3 puts its * superblock pointer here. */ void *j_private; /** * @j_chksum_driver: * * Reference to checksum algorithm driver via cryptoapi. */ struct crypto_shash *j_chksum_driver; /** * @j_csum_seed: * * Precomputed journal UUID checksum for seeding other checksums. */ __u32 j_csum_seed; #ifdef CONFIG_DEBUG_LOCK_ALLOC /** * @j_trans_commit_map: * * Lockdep entity to track transaction commit dependencies. Handles * hold this "lock" for read, when we wait for commit, we acquire the * "lock" for writing. This matches the properties of jbd2 journalling * where the running transaction has to wait for all handles to be * dropped to commit that transaction and also acquiring a handle may * require transaction commit to finish. */ struct lockdep_map j_trans_commit_map; #endif }; #define jbd2_might_wait_for_commit(j) \ do { \ rwsem_acquire(&j->j_trans_commit_map, 0, 0, _THIS_IP_); \ rwsem_release(&j->j_trans_commit_map, 1, _THIS_IP_); \ } while (0) /* journal feature predicate functions */ #define JBD2_FEATURE_COMPAT_FUNCS(name, flagname) \ static inline bool jbd2_has_feature_##name(journal_t *j) \ { \ return ((j)->j_format_version >= 2 && \ ((j)->j_superblock->s_feature_compat & \ cpu_to_be32(JBD2_FEATURE_COMPAT_##flagname)) != 0); \ } \ static inline void jbd2_set_feature_##name(journal_t *j) \ { \ (j)->j_superblock->s_feature_compat |= \ cpu_to_be32(JBD2_FEATURE_COMPAT_##flagname); \ } \ static inline void jbd2_clear_feature_##name(journal_t *j) \ { \ (j)->j_superblock->s_feature_compat &= \ ~cpu_to_be32(JBD2_FEATURE_COMPAT_##flagname); \ } #define JBD2_FEATURE_RO_COMPAT_FUNCS(name, flagname) \ static inline bool jbd2_has_feature_##name(journal_t *j) \ { \ return ((j)->j_format_version >= 2 && \ ((j)->j_superblock->s_feature_ro_compat & \ cpu_to_be32(JBD2_FEATURE_RO_COMPAT_##flagname)) != 0); \ } \ static inline void jbd2_set_feature_##name(journal_t *j) \ { \ (j)->j_superblock->s_feature_ro_compat |= \ cpu_to_be32(JBD2_FEATURE_RO_COMPAT_##flagname); \ } \ static inline void jbd2_clear_feature_##name(journal_t *j) \ { \ (j)->j_superblock->s_feature_ro_compat &= \ ~cpu_to_be32(JBD2_FEATURE_RO_COMPAT_##flagname); \ } #define JBD2_FEATURE_INCOMPAT_FUNCS(name, flagname) \ static inline bool jbd2_has_feature_##name(journal_t *j) \ { \ return ((j)->j_format_version >= 2 && \ ((j)->j_superblock->s_feature_incompat & \ cpu_to_be32(JBD2_FEATURE_INCOMPAT_##flagname)) != 0); \ } \ static inline void jbd2_set_feature_##name(journal_t *j) \ { \ (j)->j_superblock->s_feature_incompat |= \ cpu_to_be32(JBD2_FEATURE_INCOMPAT_##flagname); \ } \ static inline void jbd2_clear_feature_##name(journal_t *j) \ { \ (j)->j_superblock->s_feature_incompat &= \ ~cpu_to_be32(JBD2_FEATURE_INCOMPAT_##flagname); \ } JBD2_FEATURE_COMPAT_FUNCS(checksum, CHECKSUM) JBD2_FEATURE_INCOMPAT_FUNCS(revoke, REVOKE) JBD2_FEATURE_INCOMPAT_FUNCS(64bit, 64BIT) JBD2_FEATURE_INCOMPAT_FUNCS(async_commit, ASYNC_COMMIT) JBD2_FEATURE_INCOMPAT_FUNCS(csum2, CSUM_V2) JBD2_FEATURE_INCOMPAT_FUNCS(csum3, CSUM_V3) /* * Journal flag definitions */ #define JBD2_UNMOUNT 0x001 /* Journal thread is being destroyed */ #define JBD2_ABORT 0x002 /* Journaling has been aborted for errors. */ #define JBD2_ACK_ERR 0x004 /* The errno in the sb has been acked */ #define JBD2_FLUSHED 0x008 /* The journal superblock has been flushed */ #define JBD2_LOADED 0x010 /* The journal superblock has been loaded */ #define JBD2_BARRIER 0x020 /* Use IDE barriers */ #define JBD2_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file * data write error in ordered * mode */ #define JBD2_REC_ERR 0x080 /* The errno in the sb has been recorded */ /* * Function declarations for the journaling transaction and buffer * management */ /* Filing buffers */ extern void jbd2_journal_unfile_buffer(journal_t *, struct journal_head *); extern void __jbd2_journal_refile_buffer(struct journal_head *); extern void jbd2_journal_refile_buffer(journal_t *, struct journal_head *); extern void __jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int); extern void __journal_free_buffer(struct journal_head *bh); extern void jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int); extern void __journal_clean_data_list(transaction_t *transaction); static inline void jbd2_file_log_bh(struct list_head *head, struct buffer_head *bh) { list_add_tail(&bh->b_assoc_buffers, head); } static inline void jbd2_unfile_log_bh(struct buffer_head *bh) { list_del_init(&bh->b_assoc_buffers); } /* Log buffer allocation */ struct buffer_head *jbd2_journal_get_descriptor_buffer(transaction_t *, int); void jbd2_descriptor_block_csum_set(journal_t *, struct buffer_head *); int jbd2_journal_next_log_block(journal_t *, unsigned long long *); int jbd2_journal_get_log_tail(journal_t *journal, tid_t *tid, unsigned long *block); int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block); void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block); /* Commit management */ extern void jbd2_journal_commit_transaction(journal_t *); /* Checkpoint list management */ void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy); int __jbd2_journal_remove_checkpoint(struct journal_head *); void jbd2_journal_destroy_checkpoint(journal_t *journal); void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *); /* * Triggers */ struct jbd2_buffer_trigger_type { /* * Fired a the moment data to write to the journal are known to be * stable - so either at the moment b_frozen_data is created or just * before a buffer is written to the journal. mapped_data is a mapped * buffer that is the frozen data for commit. */ void (*t_frozen)(struct jbd2_buffer_trigger_type *type, struct buffer_head *bh, void *mapped_data, size_t size); /* * Fired during journal abort for dirty buffers that will not be * committed. */ void (*t_abort)(struct jbd2_buffer_trigger_type *type, struct buffer_head *bh); }; extern void jbd2_buffer_frozen_trigger(struct journal_head *jh, void *mapped_data, struct jbd2_buffer_trigger_type *triggers); extern void jbd2_buffer_abort_trigger(struct journal_head *jh, struct jbd2_buffer_trigger_type *triggers); /* Buffer IO */ extern int jbd2_journal_write_metadata_buffer(transaction_t *transaction, struct journal_head *jh_in, struct buffer_head **bh_out, sector_t blocknr); /* Transaction locking */ extern void __wait_on_journal (journal_t *); /* Transaction cache support */ extern void jbd2_journal_destroy_transaction_cache(void); extern int __init jbd2_journal_init_transaction_cache(void); extern void jbd2_journal_free_transaction(transaction_t *); /* * Journal locking. * * We need to lock the journal during transaction state changes so that nobody * ever tries to take a handle on the running transaction while we are in the * middle of moving it to the commit phase. j_state_lock does this. * * Note that the locking is completely interrupt unsafe. We never touch * journal structures from interrupts. */ static inline handle_t *journal_current_handle(void) { return current->journal_info; } /* The journaling code user interface: * * Create and destroy handles * Register buffer modifications against the current transaction. */ extern handle_t *jbd2_journal_start(journal_t *, int nblocks); extern handle_t *jbd2__journal_start(journal_t *, int blocks, int rsv_blocks, gfp_t gfp_mask, unsigned int type, unsigned int line_no); extern int jbd2_journal_restart(handle_t *, int nblocks); extern int jbd2__journal_restart(handle_t *, int nblocks, gfp_t gfp_mask); extern int jbd2_journal_start_reserved(handle_t *handle, unsigned int type, unsigned int line_no); extern void jbd2_journal_free_reserved(handle_t *handle); extern int jbd2_journal_extend (handle_t *, int nblocks); extern int jbd2_journal_get_write_access(handle_t *, struct buffer_head *); extern int jbd2_journal_get_create_access (handle_t *, struct buffer_head *); extern int jbd2_journal_get_undo_access(handle_t *, struct buffer_head *); void jbd2_journal_set_triggers(struct buffer_head *, struct jbd2_buffer_trigger_type *type); extern int jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *); extern int jbd2_journal_forget (handle_t *, struct buffer_head *); extern int jbd2_journal_invalidatepage(journal_t *, struct page *, unsigned int, unsigned int); extern int jbd2_journal_try_to_free_buffers(journal_t *, struct page *, gfp_t); extern int jbd2_journal_stop(handle_t *); extern int jbd2_journal_flush (journal_t *); extern void jbd2_journal_lock_updates (journal_t *); extern void jbd2_journal_unlock_updates (journal_t *); extern journal_t * jbd2_journal_init_dev(struct block_device *bdev, struct block_device *fs_dev, unsigned long long start, int len, int bsize); extern journal_t * jbd2_journal_init_inode (struct inode *); extern int jbd2_journal_update_format (journal_t *); extern int jbd2_journal_check_used_features (journal_t *, unsigned long, unsigned long, unsigned long); extern int jbd2_journal_check_available_features (journal_t *, unsigned long, unsigned long, unsigned long); extern int jbd2_journal_set_features (journal_t *, unsigned long, unsigned long, unsigned long); extern void jbd2_journal_clear_features (journal_t *, unsigned long, unsigned long, unsigned long); extern int jbd2_journal_load (journal_t *journal); extern int jbd2_journal_destroy (journal_t *); extern int jbd2_journal_recover (journal_t *journal); extern int jbd2_journal_wipe (journal_t *, int); extern int jbd2_journal_skip_recovery (journal_t *); extern void jbd2_journal_update_sb_errno(journal_t *); extern int jbd2_journal_update_sb_log_tail (journal_t *, tid_t, unsigned long, int); extern void jbd2_journal_abort (journal_t *, int); extern int jbd2_journal_errno (journal_t *); extern void jbd2_journal_ack_err (journal_t *); extern int jbd2_journal_clear_err (journal_t *); extern int jbd2_journal_bmap(journal_t *, unsigned long, unsigned long long *); extern int jbd2_journal_force_commit(journal_t *); extern int jbd2_journal_force_commit_nested(journal_t *); extern int jbd2_journal_inode_ranged_write(handle_t *handle, struct jbd2_inode *inode, loff_t start_byte, loff_t length); extern int jbd2_journal_inode_ranged_wait(handle_t *handle, struct jbd2_inode *inode, loff_t start_byte, loff_t length); extern int jbd2_journal_submit_inode_data_buffers( struct jbd2_inode *jinode); extern int jbd2_journal_finish_inode_data_buffers( struct jbd2_inode *jinode); extern int jbd2_journal_begin_ordered_truncate(journal_t *journal, struct jbd2_inode *inode, loff_t new_size); extern void jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, struct inode *inode); extern void jbd2_journal_release_jbd_inode(journal_t *journal, struct jbd2_inode *jinode); /* * journal_head management */ struct journal_head *jbd2_journal_add_journal_head(struct buffer_head *bh); struct journal_head *jbd2_journal_grab_journal_head(struct buffer_head *bh); void jbd2_journal_put_journal_head(struct journal_head *jh); /* * handle management */ extern struct kmem_cache *jbd2_handle_cache; static inline handle_t *jbd2_alloc_handle(gfp_t gfp_flags) { return kmem_cache_zalloc(jbd2_handle_cache, gfp_flags); } static inline void jbd2_free_handle(handle_t *handle) { kmem_cache_free(jbd2_handle_cache, handle); } /* * jbd2_inode management (optional, for those file systems that want to use * dynamically allocated jbd2_inode structures) */ extern struct kmem_cache *jbd2_inode_cache; static inline struct jbd2_inode *jbd2_alloc_inode(gfp_t gfp_flags) { return kmem_cache_alloc(jbd2_inode_cache, gfp_flags); } static inline void jbd2_free_inode(struct jbd2_inode *jinode) { kmem_cache_free(jbd2_inode_cache, jinode); } /* Primary revoke support */ #define JOURNAL_REVOKE_DEFAULT_HASH 256 extern int jbd2_journal_init_revoke(journal_t *, int); extern void jbd2_journal_destroy_revoke_record_cache(void); extern void jbd2_journal_destroy_revoke_table_cache(void); extern int __init jbd2_journal_init_revoke_record_cache(void); extern int __init jbd2_journal_init_revoke_table_cache(void); extern void jbd2_journal_destroy_revoke(journal_t *); extern int jbd2_journal_revoke (handle_t *, unsigned long long, struct buffer_head *); extern int jbd2_journal_cancel_revoke(handle_t *, struct journal_head *); extern void jbd2_journal_write_revoke_records(transaction_t *transaction, struct list_head *log_bufs); /* Recovery revoke support */ extern int jbd2_journal_set_revoke(journal_t *, unsigned long long, tid_t); extern int jbd2_journal_test_revoke(journal_t *, unsigned long long, tid_t); extern void jbd2_journal_clear_revoke(journal_t *); extern void jbd2_journal_switch_revoke_table(journal_t *journal); extern void jbd2_clear_buffer_revoked_flags(journal_t *journal); /* * The log thread user interface: * * Request space in the current transaction, and force transaction commit * transitions on demand. */ int jbd2_log_start_commit(journal_t *journal, tid_t tid); int __jbd2_log_start_commit(journal_t *journal, tid_t tid); int jbd2_journal_start_commit(journal_t *journal, tid_t *tid); int jbd2_log_wait_commit(journal_t *journal, tid_t tid); int jbd2_transaction_committed(journal_t *journal, tid_t tid); int jbd2_complete_transaction(journal_t *journal, tid_t tid); int jbd2_log_do_checkpoint(journal_t *journal); int jbd2_trans_will_send_data_barrier(journal_t *journal, tid_t tid); void __jbd2_log_wait_for_space(journal_t *journal); extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *); extern int jbd2_cleanup_journal_tail(journal_t *); /* * is_journal_abort * * Simple test wrapper function to test the JBD2_ABORT state flag. This * bit, when set, indicates that we have had a fatal error somewhere, * either inside the journaling layer or indicated to us by the client * (eg. ext3), and that we and should not commit any further * transactions. */ static inline int is_journal_aborted(journal_t *journal) { return journal->j_flags & JBD2_ABORT; } static inline int is_handle_aborted(handle_t *handle) { if (handle->h_aborted || !handle->h_transaction) return 1; return is_journal_aborted(handle->h_transaction->t_journal); } static inline void jbd2_journal_abort_handle(handle_t *handle) { handle->h_aborted = 1; } #endif /* __KERNEL__ */ /* Comparison functions for transaction IDs: perform comparisons using * modulo arithmetic so that they work over sequence number wraps. */ static inline int tid_gt(tid_t x, tid_t y) { int difference = (x - y); return (difference > 0); } static inline int tid_geq(tid_t x, tid_t y) { int difference = (x - y); return (difference >= 0); } extern int jbd2_journal_blocks_per_page(struct inode *inode); extern size_t journal_tag_bytes(journal_t *journal); static inline bool jbd2_journal_has_csum_v2or3_feature(journal_t *j) { return jbd2_has_feature_csum2(j) || jbd2_has_feature_csum3(j); } static inline int jbd2_journal_has_csum_v2or3(journal_t *journal) { WARN_ON_ONCE(jbd2_journal_has_csum_v2or3_feature(journal) && journal->j_chksum_driver == NULL); return journal->j_chksum_driver != NULL; } /* * We reserve t_outstanding_credits >> JBD2_CONTROL_BLOCKS_SHIFT for * transaction control blocks. */ #define JBD2_CONTROL_BLOCKS_SHIFT 5 /* * Return the minimum number of blocks which must be free in the journal * before a new transaction may be started. Must be called under j_state_lock. */ static inline int jbd2_space_needed(journal_t *journal) { int nblocks = journal->j_max_transaction_buffers; return nblocks + (nblocks >> JBD2_CONTROL_BLOCKS_SHIFT); } /* * Return number of free blocks in the log. Must be called under j_state_lock. */ static inline unsigned long jbd2_log_space_left(journal_t *journal) { /* Allow for rounding errors */ long free = journal->j_free - 32; if (journal->j_committing_transaction) { unsigned long committing = atomic_read(&journal-> j_committing_transaction->t_outstanding_credits); /* Transaction + control blocks */ free -= committing + (committing >> JBD2_CONTROL_BLOCKS_SHIFT); } return max_t(long, free, 0); } /* * Definitions which augment the buffer_head layer */ /* journaling buffer types */ #define BJ_None 0 /* Not journaled */ #define BJ_Metadata 1 /* Normal journaled metadata */ #define BJ_Forget 2 /* Buffer superseded by this transaction */ #define BJ_Shadow 3 /* Buffer contents being shadowed to the log */ #define BJ_Reserved 4 /* Buffer is reserved for access by journal */ #define BJ_Types 5 extern int jbd_blocks_per_page(struct inode *inode); /* JBD uses a CRC32 checksum */ #define JBD_MAX_CHECKSUM_SIZE 4 static inline u32 jbd2_chksum(journal_t *journal, u32 crc, const void *address, unsigned int length) { struct { struct shash_desc shash; char ctx[JBD_MAX_CHECKSUM_SIZE]; } desc; int err; BUG_ON(crypto_shash_descsize(journal->j_chksum_driver) > JBD_MAX_CHECKSUM_SIZE); desc.shash.tfm = journal->j_chksum_driver; *(u32 *)desc.ctx = crc; err = crypto_shash_update(&desc.shash, address, length); BUG_ON(err); return *(u32 *)desc.ctx; } /* Return most recent uncommitted transaction */ static inline tid_t jbd2_get_latest_transaction(journal_t *journal) { tid_t tid; read_lock(&journal->j_state_lock); tid = journal->j_commit_request; if (journal->j_running_transaction) tid = journal->j_running_transaction->t_tid; read_unlock(&journal->j_state_lock); return tid; } #ifdef __KERNEL__ #define buffer_trace_init(bh) do {} while (0) #define print_buffer_fields(bh) do {} while (0) #define print_buffer_trace(bh) do {} while (0) #define BUFFER_TRACE(bh, info) do {} while (0) #define BUFFER_TRACE2(bh, bh2, info) do {} while (0) #define JBUFFER_TRACE(jh, info) do {} while (0) #endif /* __KERNEL__ */ #define EFSBADCRC EBADMSG /* Bad CRC detected */ #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */ #endif /* _LINUX_JBD2_H */ bsg.h 0000644 00000001764 14722070374 0005506 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_BSG_H #define _LINUX_BSG_H #include <uapi/linux/bsg.h> struct request; #ifdef CONFIG_BLK_DEV_BSG struct bsg_ops { int (*check_proto)(struct sg_io_v4 *hdr); int (*fill_hdr)(struct request *rq, struct sg_io_v4 *hdr, fmode_t mode); int (*complete_rq)(struct request *rq, struct sg_io_v4 *hdr); void (*free_rq)(struct request *rq); }; struct bsg_class_device { struct device *class_dev; int minor; struct request_queue *queue; const struct bsg_ops *ops; }; int bsg_register_queue(struct request_queue *q, struct device *parent, const char *name, const struct bsg_ops *ops); int bsg_scsi_register_queue(struct request_queue *q, struct device *parent); void bsg_unregister_queue(struct request_queue *q); #else static inline int bsg_scsi_register_queue(struct request_queue *q, struct device *parent) { return 0; } static inline void bsg_unregister_queue(struct request_queue *q) { } #endif /* CONFIG_BLK_DEV_BSG */ #endif /* _LINUX_BSG_H */ hid-debug.h 0000644 00000002662 14722070374 0006561 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef __HID_DEBUG_H #define __HID_DEBUG_H /* * Copyright (c) 2007-2009 Jiri Kosina */ /* */ #ifdef CONFIG_DEBUG_FS #include <linux/kfifo.h> #define HID_DEBUG_BUFSIZE 512 #define HID_DEBUG_FIFOSIZE 512 void hid_dump_input(struct hid_device *, struct hid_usage *, __s32); void hid_dump_report(struct hid_device *, int , u8 *, int); void hid_dump_device(struct hid_device *, struct seq_file *); void hid_dump_field(struct hid_field *, int, struct seq_file *); char *hid_resolv_usage(unsigned, struct seq_file *); void hid_debug_register(struct hid_device *, const char *); void hid_debug_unregister(struct hid_device *); void hid_debug_init(void); void hid_debug_exit(void); void hid_debug_event(struct hid_device *, char *); struct hid_debug_list { DECLARE_KFIFO_PTR(hid_debug_fifo, char); struct fasync_struct *fasync; struct hid_device *hdev; struct list_head node; struct mutex read_mutex; }; #else #define hid_dump_input(a,b,c) do { } while (0) #define hid_dump_report(a,b,c,d) do { } while (0) #define hid_dump_device(a,b) do { } while (0) #define hid_dump_field(a,b,c) do { } while (0) #define hid_resolv_usage(a,b) do { } while (0) #define hid_debug_register(a, b) do { } while (0) #define hid_debug_unregister(a) do { } while (0) #define hid_debug_init() do { } while (0) #define hid_debug_exit() do { } while (0) #define hid_debug_event(a,b) do { } while (0) #endif #endif assoc_array_priv.h 0000644 00000012457 14722070374 0010302 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* Private definitions for the generic associative array implementation. * * See Documentation/core-api/assoc_array.rst for information. * * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #ifndef _LINUX_ASSOC_ARRAY_PRIV_H #define _LINUX_ASSOC_ARRAY_PRIV_H #ifdef CONFIG_ASSOCIATIVE_ARRAY #include <linux/assoc_array.h> #define ASSOC_ARRAY_FAN_OUT 16 /* Number of slots per node */ #define ASSOC_ARRAY_FAN_MASK (ASSOC_ARRAY_FAN_OUT - 1) #define ASSOC_ARRAY_LEVEL_STEP (ilog2(ASSOC_ARRAY_FAN_OUT)) #define ASSOC_ARRAY_LEVEL_STEP_MASK (ASSOC_ARRAY_LEVEL_STEP - 1) #define ASSOC_ARRAY_KEY_CHUNK_MASK (ASSOC_ARRAY_KEY_CHUNK_SIZE - 1) #define ASSOC_ARRAY_KEY_CHUNK_SHIFT (ilog2(BITS_PER_LONG)) /* * Undefined type representing a pointer with type information in the bottom * two bits. */ struct assoc_array_ptr; /* * An N-way node in the tree. * * Each slot contains one of four things: * * (1) Nothing (NULL). * * (2) A leaf object (pointer types 0). * * (3) A next-level node (pointer type 1, subtype 0). * * (4) A shortcut (pointer type 1, subtype 1). * * The tree is optimised for search-by-ID, but permits reasonable iteration * also. * * The tree is navigated by constructing an index key consisting of an array of * segments, where each segment is ilog2(ASSOC_ARRAY_FAN_OUT) bits in size. * * The segments correspond to levels of the tree (the first segment is used at * level 0, the second at level 1, etc.). */ struct assoc_array_node { struct assoc_array_ptr *back_pointer; u8 parent_slot; struct assoc_array_ptr *slots[ASSOC_ARRAY_FAN_OUT]; unsigned long nr_leaves_on_branch; }; /* * A shortcut through the index space out to where a collection of nodes/leaves * with the same IDs live. */ struct assoc_array_shortcut { struct assoc_array_ptr *back_pointer; int parent_slot; int skip_to_level; struct assoc_array_ptr *next_node; unsigned long index_key[]; }; /* * Preallocation cache. */ struct assoc_array_edit { struct rcu_head rcu; struct assoc_array *array; const struct assoc_array_ops *ops; const struct assoc_array_ops *ops_for_excised_subtree; struct assoc_array_ptr *leaf; struct assoc_array_ptr **leaf_p; struct assoc_array_ptr *dead_leaf; struct assoc_array_ptr *new_meta[3]; struct assoc_array_ptr *excised_meta[1]; struct assoc_array_ptr *excised_subtree; struct assoc_array_ptr **set_backpointers[ASSOC_ARRAY_FAN_OUT]; struct assoc_array_ptr *set_backpointers_to; struct assoc_array_node *adjust_count_on; long adjust_count_by; struct { struct assoc_array_ptr **ptr; struct assoc_array_ptr *to; } set[2]; struct { u8 *p; u8 to; } set_parent_slot[1]; u8 segment_cache[ASSOC_ARRAY_FAN_OUT + 1]; }; /* * Internal tree member pointers are marked in the bottom one or two bits to * indicate what type they are so that we don't have to look behind every * pointer to see what it points to. * * We provide functions to test type annotations and to create and translate * the annotated pointers. */ #define ASSOC_ARRAY_PTR_TYPE_MASK 0x1UL #define ASSOC_ARRAY_PTR_LEAF_TYPE 0x0UL /* Points to leaf (or nowhere) */ #define ASSOC_ARRAY_PTR_META_TYPE 0x1UL /* Points to node or shortcut */ #define ASSOC_ARRAY_PTR_SUBTYPE_MASK 0x2UL #define ASSOC_ARRAY_PTR_NODE_SUBTYPE 0x0UL #define ASSOC_ARRAY_PTR_SHORTCUT_SUBTYPE 0x2UL static inline bool assoc_array_ptr_is_meta(const struct assoc_array_ptr *x) { return (unsigned long)x & ASSOC_ARRAY_PTR_TYPE_MASK; } static inline bool assoc_array_ptr_is_leaf(const struct assoc_array_ptr *x) { return !assoc_array_ptr_is_meta(x); } static inline bool assoc_array_ptr_is_shortcut(const struct assoc_array_ptr *x) { return (unsigned long)x & ASSOC_ARRAY_PTR_SUBTYPE_MASK; } static inline bool assoc_array_ptr_is_node(const struct assoc_array_ptr *x) { return !assoc_array_ptr_is_shortcut(x); } static inline void *assoc_array_ptr_to_leaf(const struct assoc_array_ptr *x) { return (void *)((unsigned long)x & ~ASSOC_ARRAY_PTR_TYPE_MASK); } static inline unsigned long __assoc_array_ptr_to_meta(const struct assoc_array_ptr *x) { return (unsigned long)x & ~(ASSOC_ARRAY_PTR_SUBTYPE_MASK | ASSOC_ARRAY_PTR_TYPE_MASK); } static inline struct assoc_array_node *assoc_array_ptr_to_node(const struct assoc_array_ptr *x) { return (struct assoc_array_node *)__assoc_array_ptr_to_meta(x); } static inline struct assoc_array_shortcut *assoc_array_ptr_to_shortcut(const struct assoc_array_ptr *x) { return (struct assoc_array_shortcut *)__assoc_array_ptr_to_meta(x); } static inline struct assoc_array_ptr *__assoc_array_x_to_ptr(const void *p, unsigned long t) { return (struct assoc_array_ptr *)((unsigned long)p | t); } static inline struct assoc_array_ptr *assoc_array_leaf_to_ptr(const void *p) { return __assoc_array_x_to_ptr(p, ASSOC_ARRAY_PTR_LEAF_TYPE); } static inline struct assoc_array_ptr *assoc_array_node_to_ptr(const struct assoc_array_node *p) { return __assoc_array_x_to_ptr( p, ASSOC_ARRAY_PTR_META_TYPE | ASSOC_ARRAY_PTR_NODE_SUBTYPE); } static inline struct assoc_array_ptr *assoc_array_shortcut_to_ptr(const struct assoc_array_shortcut *p) { return __assoc_array_x_to_ptr( p, ASSOC_ARRAY_PTR_META_TYPE | ASSOC_ARRAY_PTR_SHORTCUT_SUBTYPE); } #endif /* CONFIG_ASSOCIATIVE_ARRAY */ #endif /* _LINUX_ASSOC_ARRAY_PRIV_H */ proc_ns.h 0000644 00000005150 14722070374 0006367 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * procfs namespace bits */ #ifndef _LINUX_PROC_NS_H #define _LINUX_PROC_NS_H #include <linux/ns_common.h> struct pid_namespace; struct nsproxy; struct path; struct task_struct; struct inode; struct proc_ns_operations { const char *name; const char *real_ns_name; int type; struct ns_common *(*get)(struct task_struct *task); void (*put)(struct ns_common *ns); int (*install)(struct nsproxy *nsproxy, struct ns_common *ns); struct user_namespace *(*owner)(struct ns_common *ns); struct ns_common *(*get_parent)(struct ns_common *ns); } __randomize_layout; extern const struct proc_ns_operations netns_operations; extern const struct proc_ns_operations utsns_operations; extern const struct proc_ns_operations ipcns_operations; extern const struct proc_ns_operations pidns_operations; extern const struct proc_ns_operations pidns_for_children_operations; extern const struct proc_ns_operations userns_operations; extern const struct proc_ns_operations mntns_operations; extern const struct proc_ns_operations cgroupns_operations; /* * We always define these enumerators */ enum { PROC_ROOT_INO = 1, PROC_IPC_INIT_INO = 0xEFFFFFFFU, PROC_UTS_INIT_INO = 0xEFFFFFFEU, PROC_USER_INIT_INO = 0xEFFFFFFDU, PROC_PID_INIT_INO = 0xEFFFFFFCU, PROC_CGROUP_INIT_INO = 0xEFFFFFFBU, }; #ifdef CONFIG_PROC_FS extern int pid_ns_prepare_proc(struct pid_namespace *ns); extern void pid_ns_release_proc(struct pid_namespace *ns); extern int proc_alloc_inum(unsigned int *pino); extern void proc_free_inum(unsigned int inum); #else /* CONFIG_PROC_FS */ static inline int pid_ns_prepare_proc(struct pid_namespace *ns) { return 0; } static inline void pid_ns_release_proc(struct pid_namespace *ns) {} static inline int proc_alloc_inum(unsigned int *inum) { *inum = 1; return 0; } static inline void proc_free_inum(unsigned int inum) {} #endif /* CONFIG_PROC_FS */ static inline int ns_alloc_inum(struct ns_common *ns) { atomic_long_set(&ns->stashed, 0); return proc_alloc_inum(&ns->inum); } #define ns_free_inum(ns) proc_free_inum((ns)->inum) extern struct file *proc_ns_fget(int fd); #define get_proc_ns(inode) ((struct ns_common *)(inode)->i_private) extern void *ns_get_path(struct path *path, struct task_struct *task, const struct proc_ns_operations *ns_ops); typedef struct ns_common *ns_get_path_helper_t(void *); extern void *ns_get_path_cb(struct path *path, ns_get_path_helper_t ns_get_cb, void *private_data); extern int ns_get_name(char *buf, size_t size, struct task_struct *task, const struct proc_ns_operations *ns_ops); extern void nsfs_init(void); #endif /* _LINUX_PROC_NS_H */ serial_max3100.h 0000644 00000002300 14722070374 0007346 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * * Copyright (C) 2007 Christian Pellegrin */ #ifndef _LINUX_SERIAL_MAX3100_H #define _LINUX_SERIAL_MAX3100_H 1 /** * struct plat_max3100 - MAX3100 SPI UART platform data * @loopback: force MAX3100 in loopback * @crystal: 1 for 3.6864 Mhz, 0 for 1.8432 * @max3100_hw_suspend: MAX3100 has a shutdown pin. This is a hook * called on suspend and resume to activate it. * @poll_time: poll time for CTS signal in ms, 0 disables (so no hw * flow ctrl is possible but you have less CPU usage) * * You should use this structure in your machine description to specify * how the MAX3100 is connected. Example: * * static struct plat_max3100 max3100_plat_data = { * .loopback = 0, * .crystal = 0, * .poll_time = 100, * }; * * static struct spi_board_info spi_board_info[] = { * { * .modalias = "max3100", * .platform_data = &max3100_plat_data, * .irq = IRQ_EINT12, * .max_speed_hz = 5*1000*1000, * .chip_select = 0, * }, * }; * **/ struct plat_max3100 { int loopback; int crystal; void (*max3100_hw_suspend) (int suspend); int poll_time; }; #endif mem_encrypt.h 0000644 00000001565 14722070374 0007254 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * AMD Memory Encryption Support * * Copyright (C) 2016 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <thomas.lendacky@amd.com> */ #ifndef __MEM_ENCRYPT_H__ #define __MEM_ENCRYPT_H__ #ifndef __ASSEMBLY__ #ifdef CONFIG_ARCH_HAS_MEM_ENCRYPT #include <asm/mem_encrypt.h> #else /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */ static inline bool mem_encrypt_active(void) { return false; } #endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */ #ifdef CONFIG_AMD_MEM_ENCRYPT /* * The __sme_set() and __sme_clr() macros are useful for adding or removing * the encryption mask from a value (e.g. when dealing with pagetable * entries). */ #define __sme_set(x) ((x) | sme_me_mask) #define __sme_clr(x) ((x) & ~sme_me_mask) #else #define __sme_set(x) (x) #define __sme_clr(x) (x) #endif #endif /* __ASSEMBLY__ */ #endif /* __MEM_ENCRYPT_H__ */ irq.h 0000644 00000120546 14722070374 0005526 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_IRQ_H #define _LINUX_IRQ_H /* * Please do not include this file in generic code. There is currently * no requirement for any architecture to implement anything held * within this file. * * Thanks. --rmk */ #include <linux/cache.h> #include <linux/spinlock.h> #include <linux/cpumask.h> #include <linux/irqhandler.h> #include <linux/irqreturn.h> #include <linux/irqnr.h> #include <linux/topology.h> #include <linux/io.h> #include <linux/slab.h> #include <asm/irq.h> #include <asm/ptrace.h> #include <asm/irq_regs.h> struct seq_file; struct module; struct msi_msg; struct irq_affinity_desc; enum irqchip_irq_state; /* * IRQ line status. * * Bits 0-7 are the same as the IRQF_* bits in linux/interrupt.h * * IRQ_TYPE_NONE - default, unspecified type * IRQ_TYPE_EDGE_RISING - rising edge triggered * IRQ_TYPE_EDGE_FALLING - falling edge triggered * IRQ_TYPE_EDGE_BOTH - rising and falling edge triggered * IRQ_TYPE_LEVEL_HIGH - high level triggered * IRQ_TYPE_LEVEL_LOW - low level triggered * IRQ_TYPE_LEVEL_MASK - Mask to filter out the level bits * IRQ_TYPE_SENSE_MASK - Mask for all the above bits * IRQ_TYPE_DEFAULT - For use by some PICs to ask irq_set_type * to setup the HW to a sane default (used * by irqdomain map() callbacks to synchronize * the HW state and SW flags for a newly * allocated descriptor). * * IRQ_TYPE_PROBE - Special flag for probing in progress * * Bits which can be modified via irq_set/clear/modify_status_flags() * IRQ_LEVEL - Interrupt is level type. Will be also * updated in the code when the above trigger * bits are modified via irq_set_irq_type() * IRQ_PER_CPU - Mark an interrupt PER_CPU. Will protect * it from affinity setting * IRQ_NOPROBE - Interrupt cannot be probed by autoprobing * IRQ_NOREQUEST - Interrupt cannot be requested via * request_irq() * IRQ_NOTHREAD - Interrupt cannot be threaded * IRQ_NOAUTOEN - Interrupt is not automatically enabled in * request/setup_irq() * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set) * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context * IRQ_NESTED_THREAD - Interrupt nests into another thread * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable * IRQ_IS_POLLED - Always polled by another interrupt. Exclude * it from the spurious interrupt detection * mechanism and from core side polling. * IRQ_DISABLE_UNLAZY - Disable lazy irq disable */ enum { IRQ_TYPE_NONE = 0x00000000, IRQ_TYPE_EDGE_RISING = 0x00000001, IRQ_TYPE_EDGE_FALLING = 0x00000002, IRQ_TYPE_EDGE_BOTH = (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING), IRQ_TYPE_LEVEL_HIGH = 0x00000004, IRQ_TYPE_LEVEL_LOW = 0x00000008, IRQ_TYPE_LEVEL_MASK = (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH), IRQ_TYPE_SENSE_MASK = 0x0000000f, IRQ_TYPE_DEFAULT = IRQ_TYPE_SENSE_MASK, IRQ_TYPE_PROBE = 0x00000010, IRQ_LEVEL = (1 << 8), IRQ_PER_CPU = (1 << 9), IRQ_NOPROBE = (1 << 10), IRQ_NOREQUEST = (1 << 11), IRQ_NOAUTOEN = (1 << 12), IRQ_NO_BALANCING = (1 << 13), IRQ_MOVE_PCNTXT = (1 << 14), IRQ_NESTED_THREAD = (1 << 15), IRQ_NOTHREAD = (1 << 16), IRQ_PER_CPU_DEVID = (1 << 17), IRQ_IS_POLLED = (1 << 18), IRQ_DISABLE_UNLAZY = (1 << 19), }; #define IRQF_MODIFY_MASK \ (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY) #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) /* * Return value for chip->irq_set_affinity() * * IRQ_SET_MASK_OK - OK, core updates irq_common_data.affinity * IRQ_SET_MASK_NOCPY - OK, chip did update irq_common_data.affinity * IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to * support stacked irqchips, which indicates skipping * all descendent irqchips. */ enum { IRQ_SET_MASK_OK = 0, IRQ_SET_MASK_OK_NOCOPY, IRQ_SET_MASK_OK_DONE, }; struct msi_desc; struct irq_domain; /** * struct irq_common_data - per irq data shared by all irqchips * @state_use_accessors: status information for irq chip functions. * Use accessor functions to deal with it * @node: node index useful for balancing * @handler_data: per-IRQ data for the irq_chip methods * @affinity: IRQ affinity on SMP. If this is an IPI * related irq, then this is the mask of the * CPUs to which an IPI can be sent. * @effective_affinity: The effective IRQ affinity on SMP as some irq * chips do not allow multi CPU destinations. * A subset of @affinity. * @msi_desc: MSI descriptor * @ipi_offset: Offset of first IPI target cpu in @affinity. Optional. */ struct irq_common_data { unsigned int __private state_use_accessors; #ifdef CONFIG_NUMA unsigned int node; #endif void *handler_data; struct msi_desc *msi_desc; cpumask_var_t affinity; #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK cpumask_var_t effective_affinity; #endif #ifdef CONFIG_GENERIC_IRQ_IPI unsigned int ipi_offset; #endif }; /** * struct irq_data - per irq chip data passed down to chip functions * @mask: precomputed bitmask for accessing the chip registers * @irq: interrupt number * @hwirq: hardware interrupt number, local to the interrupt domain * @common: point to data shared by all irqchips * @chip: low level interrupt hardware access * @domain: Interrupt translation domain; responsible for mapping * between hwirq number and linux irq number. * @parent_data: pointer to parent struct irq_data to support hierarchy * irq_domain * @chip_data: platform-specific per-chip private data for the chip * methods, to allow shared chip implementations */ struct irq_data { u32 mask; unsigned int irq; unsigned long hwirq; struct irq_common_data *common; struct irq_chip *chip; struct irq_domain *domain; #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY struct irq_data *parent_data; #endif void *chip_data; }; /* * Bit masks for irq_common_data.state_use_accessors * * IRQD_TRIGGER_MASK - Mask for the trigger type bits * IRQD_SETAFFINITY_PENDING - Affinity setting is pending * IRQD_ACTIVATED - Interrupt has already been activated * IRQD_NO_BALANCING - Balancing disabled for this IRQ * IRQD_PER_CPU - Interrupt is per cpu * IRQD_AFFINITY_SET - Interrupt affinity was set * IRQD_LEVEL - Interrupt is level triggered * IRQD_WAKEUP_STATE - Interrupt is configured for wakeup * from suspend * IRQD_MOVE_PCNTXT - Interrupt can be moved in process * context * IRQD_IRQ_DISABLED - Disabled state of the interrupt * IRQD_IRQ_MASKED - Masked state of the interrupt * IRQD_IRQ_INPROGRESS - In progress state of the interrupt * IRQD_WAKEUP_ARMED - Wakeup mode armed * IRQD_FORWARDED_TO_VCPU - The interrupt is forwarded to a VCPU * IRQD_AFFINITY_MANAGED - Affinity is auto-managed by the kernel * IRQD_IRQ_STARTED - Startup state of the interrupt * IRQD_MANAGED_SHUTDOWN - Interrupt was shutdown due to empty affinity * mask. Applies only to affinity managed irqs. * IRQD_SINGLE_TARGET - IRQ allows only a single affinity target * IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set * IRQD_CAN_RESERVE - Can use reservation mode * IRQD_MSI_NOMASK_QUIRK - Non-maskable MSI quirk for affinity change * required * IRQD_AFFINITY_ON_ACTIVATE - Affinity is set on activation. Don't call * irq_chip::irq_set_affinity() when deactivated. */ enum { IRQD_TRIGGER_MASK = 0xf, IRQD_SETAFFINITY_PENDING = (1 << 8), IRQD_ACTIVATED = (1 << 9), IRQD_NO_BALANCING = (1 << 10), IRQD_PER_CPU = (1 << 11), IRQD_AFFINITY_SET = (1 << 12), IRQD_LEVEL = (1 << 13), IRQD_WAKEUP_STATE = (1 << 14), IRQD_MOVE_PCNTXT = (1 << 15), IRQD_IRQ_DISABLED = (1 << 16), IRQD_IRQ_MASKED = (1 << 17), IRQD_IRQ_INPROGRESS = (1 << 18), IRQD_WAKEUP_ARMED = (1 << 19), IRQD_FORWARDED_TO_VCPU = (1 << 20), IRQD_AFFINITY_MANAGED = (1 << 21), IRQD_IRQ_STARTED = (1 << 22), IRQD_MANAGED_SHUTDOWN = (1 << 23), IRQD_SINGLE_TARGET = (1 << 24), IRQD_DEFAULT_TRIGGER_SET = (1 << 25), IRQD_CAN_RESERVE = (1 << 26), IRQD_MSI_NOMASK_QUIRK = (1 << 27), IRQD_AFFINITY_ON_ACTIVATE = (1 << 29), }; #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) static inline bool irqd_is_setaffinity_pending(struct irq_data *d) { return __irqd_to_state(d) & IRQD_SETAFFINITY_PENDING; } static inline bool irqd_is_per_cpu(struct irq_data *d) { return __irqd_to_state(d) & IRQD_PER_CPU; } static inline bool irqd_can_balance(struct irq_data *d) { return !(__irqd_to_state(d) & (IRQD_PER_CPU | IRQD_NO_BALANCING)); } static inline bool irqd_affinity_was_set(struct irq_data *d) { return __irqd_to_state(d) & IRQD_AFFINITY_SET; } static inline void irqd_mark_affinity_was_set(struct irq_data *d) { __irqd_to_state(d) |= IRQD_AFFINITY_SET; } static inline bool irqd_trigger_type_was_set(struct irq_data *d) { return __irqd_to_state(d) & IRQD_DEFAULT_TRIGGER_SET; } static inline u32 irqd_get_trigger_type(struct irq_data *d) { return __irqd_to_state(d) & IRQD_TRIGGER_MASK; } /* * Must only be called inside irq_chip.irq_set_type() functions or * from the DT/ACPI setup code. */ static inline void irqd_set_trigger_type(struct irq_data *d, u32 type) { __irqd_to_state(d) &= ~IRQD_TRIGGER_MASK; __irqd_to_state(d) |= type & IRQD_TRIGGER_MASK; __irqd_to_state(d) |= IRQD_DEFAULT_TRIGGER_SET; } static inline bool irqd_is_level_type(struct irq_data *d) { return __irqd_to_state(d) & IRQD_LEVEL; } /* * Must only be called of irqchip.irq_set_affinity() or low level * hieararchy domain allocation functions. */ static inline void irqd_set_single_target(struct irq_data *d) { __irqd_to_state(d) |= IRQD_SINGLE_TARGET; } static inline bool irqd_is_single_target(struct irq_data *d) { return __irqd_to_state(d) & IRQD_SINGLE_TARGET; } static inline bool irqd_is_wakeup_set(struct irq_data *d) { return __irqd_to_state(d) & IRQD_WAKEUP_STATE; } static inline bool irqd_can_move_in_process_context(struct irq_data *d) { return __irqd_to_state(d) & IRQD_MOVE_PCNTXT; } static inline bool irqd_irq_disabled(struct irq_data *d) { return __irqd_to_state(d) & IRQD_IRQ_DISABLED; } static inline bool irqd_irq_masked(struct irq_data *d) { return __irqd_to_state(d) & IRQD_IRQ_MASKED; } static inline bool irqd_irq_inprogress(struct irq_data *d) { return __irqd_to_state(d) & IRQD_IRQ_INPROGRESS; } static inline bool irqd_is_wakeup_armed(struct irq_data *d) { return __irqd_to_state(d) & IRQD_WAKEUP_ARMED; } static inline bool irqd_is_forwarded_to_vcpu(struct irq_data *d) { return __irqd_to_state(d) & IRQD_FORWARDED_TO_VCPU; } static inline void irqd_set_forwarded_to_vcpu(struct irq_data *d) { __irqd_to_state(d) |= IRQD_FORWARDED_TO_VCPU; } static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d) { __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU; } static inline bool irqd_affinity_is_managed(struct irq_data *d) { return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED; } static inline bool irqd_is_activated(struct irq_data *d) { return __irqd_to_state(d) & IRQD_ACTIVATED; } static inline void irqd_set_activated(struct irq_data *d) { __irqd_to_state(d) |= IRQD_ACTIVATED; } static inline void irqd_clr_activated(struct irq_data *d) { __irqd_to_state(d) &= ~IRQD_ACTIVATED; } static inline bool irqd_is_started(struct irq_data *d) { return __irqd_to_state(d) & IRQD_IRQ_STARTED; } static inline bool irqd_is_managed_and_shutdown(struct irq_data *d) { return __irqd_to_state(d) & IRQD_MANAGED_SHUTDOWN; } static inline void irqd_set_can_reserve(struct irq_data *d) { __irqd_to_state(d) |= IRQD_CAN_RESERVE; } static inline void irqd_clr_can_reserve(struct irq_data *d) { __irqd_to_state(d) &= ~IRQD_CAN_RESERVE; } static inline bool irqd_can_reserve(struct irq_data *d) { return __irqd_to_state(d) & IRQD_CAN_RESERVE; } static inline void irqd_set_msi_nomask_quirk(struct irq_data *d) { __irqd_to_state(d) |= IRQD_MSI_NOMASK_QUIRK; } static inline void irqd_clr_msi_nomask_quirk(struct irq_data *d) { __irqd_to_state(d) &= ~IRQD_MSI_NOMASK_QUIRK; } static inline bool irqd_msi_nomask_quirk(struct irq_data *d) { return __irqd_to_state(d) & IRQD_MSI_NOMASK_QUIRK; } static inline void irqd_set_affinity_on_activate(struct irq_data *d) { __irqd_to_state(d) |= IRQD_AFFINITY_ON_ACTIVATE; } static inline bool irqd_affinity_on_activate(struct irq_data *d) { return __irqd_to_state(d) & IRQD_AFFINITY_ON_ACTIVATE; } #undef __irqd_to_state static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) { return d->hwirq; } /** * struct irq_chip - hardware interrupt chip descriptor * * @parent_device: pointer to parent device for irqchip * @name: name for /proc/interrupts * @irq_startup: start up the interrupt (defaults to ->enable if NULL) * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL) * @irq_enable: enable the interrupt (defaults to chip->unmask if NULL) * @irq_disable: disable the interrupt * @irq_ack: start of a new interrupt * @irq_mask: mask an interrupt source * @irq_mask_ack: ack and mask an interrupt source * @irq_unmask: unmask an interrupt source * @irq_eoi: end of interrupt * @irq_set_affinity: Set the CPU affinity on SMP machines. If the force * argument is true, it tells the driver to * unconditionally apply the affinity setting. Sanity * checks against the supplied affinity mask are not * required. This is used for CPU hotplug where the * target CPU is not yet set in the cpu_online_mask. * @irq_retrigger: resend an IRQ to the CPU * @irq_set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ * @irq_set_wake: enable/disable power-management wake-on of an IRQ * @irq_bus_lock: function to lock access to slow bus (i2c) chips * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips * @irq_cpu_online: configure an interrupt source for a secondary CPU * @irq_cpu_offline: un-configure an interrupt source for a secondary CPU * @irq_suspend: function called from core code on suspend once per * chip, when one or more interrupts are installed * @irq_resume: function called from core code on resume once per chip, * when one ore more interrupts are installed * @irq_pm_shutdown: function called from core code on shutdown once per chip * @irq_calc_mask: Optional function to set irq_data.mask for special cases * @irq_print_chip: optional to print special chip info in show_interrupts * @irq_request_resources: optional to request resources before calling * any other callback related to this irq * @irq_release_resources: optional to release resources acquired with * irq_request_resources * @irq_compose_msi_msg: optional to compose message content for MSI * @irq_write_msi_msg: optional to write message content for MSI * @irq_get_irqchip_state: return the internal state of an interrupt * @irq_set_irqchip_state: set the internal state of a interrupt * @irq_set_vcpu_affinity: optional to target a vCPU in a virtual machine * @ipi_send_single: send a single IPI to destination cpus * @ipi_send_mask: send an IPI to destination cpus in cpumask * @irq_nmi_setup: function called from core code before enabling an NMI * @irq_nmi_teardown: function called from core code after disabling an NMI * @flags: chip specific flags */ struct irq_chip { struct device *parent_device; const char *name; unsigned int (*irq_startup)(struct irq_data *data); void (*irq_shutdown)(struct irq_data *data); void (*irq_enable)(struct irq_data *data); void (*irq_disable)(struct irq_data *data); void (*irq_ack)(struct irq_data *data); void (*irq_mask)(struct irq_data *data); void (*irq_mask_ack)(struct irq_data *data); void (*irq_unmask)(struct irq_data *data); void (*irq_eoi)(struct irq_data *data); int (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force); int (*irq_retrigger)(struct irq_data *data); int (*irq_set_type)(struct irq_data *data, unsigned int flow_type); int (*irq_set_wake)(struct irq_data *data, unsigned int on); void (*irq_bus_lock)(struct irq_data *data); void (*irq_bus_sync_unlock)(struct irq_data *data); void (*irq_cpu_online)(struct irq_data *data); void (*irq_cpu_offline)(struct irq_data *data); void (*irq_suspend)(struct irq_data *data); void (*irq_resume)(struct irq_data *data); void (*irq_pm_shutdown)(struct irq_data *data); void (*irq_calc_mask)(struct irq_data *data); void (*irq_print_chip)(struct irq_data *data, struct seq_file *p); int (*irq_request_resources)(struct irq_data *data); void (*irq_release_resources)(struct irq_data *data); void (*irq_compose_msi_msg)(struct irq_data *data, struct msi_msg *msg); void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg); int (*irq_get_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool *state); int (*irq_set_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool state); int (*irq_set_vcpu_affinity)(struct irq_data *data, void *vcpu_info); void (*ipi_send_single)(struct irq_data *data, unsigned int cpu); void (*ipi_send_mask)(struct irq_data *data, const struct cpumask *dest); int (*irq_nmi_setup)(struct irq_data *data); void (*irq_nmi_teardown)(struct irq_data *data); unsigned long flags; }; /* * irq_chip specific flags * * IRQCHIP_SET_TYPE_MASKED: Mask before calling chip.irq_set_type() * IRQCHIP_EOI_IF_HANDLED: Only issue irq_eoi() when irq was handled * IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path * IRQCHIP_ONOFFLINE_ENABLED: Only call irq_on/off_line callbacks * when irq enabled * IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip * IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask * IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode * IRQCHIP_SUPPORTS_LEVEL_MSI Chip can provide two doorbells for Level MSIs * IRQCHIP_SUPPORTS_NMI: Chip can deliver NMIs, only for root irqchips * IRQCHIP_AFFINITY_PRE_STARTUP: Default affinity update before startup */ enum { IRQCHIP_SET_TYPE_MASKED = (1 << 0), IRQCHIP_EOI_IF_HANDLED = (1 << 1), IRQCHIP_MASK_ON_SUSPEND = (1 << 2), IRQCHIP_ONOFFLINE_ENABLED = (1 << 3), IRQCHIP_SKIP_SET_WAKE = (1 << 4), IRQCHIP_ONESHOT_SAFE = (1 << 5), IRQCHIP_EOI_THREADED = (1 << 6), IRQCHIP_SUPPORTS_LEVEL_MSI = (1 << 7), IRQCHIP_SUPPORTS_NMI = (1 << 8), IRQCHIP_AFFINITY_PRE_STARTUP = (1 << 10), }; #include <linux/irqdesc.h> /* * Pick up the arch-dependent methods: */ #include <asm/hw_irq.h> #ifndef NR_IRQS_LEGACY # define NR_IRQS_LEGACY 0 #endif #ifndef ARCH_IRQ_INIT_FLAGS # define ARCH_IRQ_INIT_FLAGS 0 #endif #define IRQ_DEFAULT_INIT_FLAGS ARCH_IRQ_INIT_FLAGS struct irqaction; extern int setup_irq(unsigned int irq, struct irqaction *new); extern void remove_irq(unsigned int irq, struct irqaction *act); extern int setup_percpu_irq(unsigned int irq, struct irqaction *new); extern void remove_percpu_irq(unsigned int irq, struct irqaction *act); extern void irq_cpu_online(void); extern void irq_cpu_offline(void); extern int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *cpumask, bool force); extern int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info); #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_IRQ_MIGRATION) extern void irq_migrate_all_off_this_cpu(void); extern int irq_affinity_online_cpu(unsigned int cpu); #else # define irq_affinity_online_cpu NULL #endif #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) void __irq_move_irq(struct irq_data *data); static inline void irq_move_irq(struct irq_data *data) { if (unlikely(irqd_is_setaffinity_pending(data))) __irq_move_irq(data); } void irq_move_masked_irq(struct irq_data *data); void irq_force_complete_move(struct irq_desc *desc); #else static inline void irq_move_irq(struct irq_data *data) { } static inline void irq_move_masked_irq(struct irq_data *data) { } static inline void irq_force_complete_move(struct irq_desc *desc) { } #endif extern int no_irq_affinity; #ifdef CONFIG_HARDIRQS_SW_RESEND int irq_set_parent(int irq, int parent_irq); #else static inline int irq_set_parent(int irq, int parent_irq) { return 0; } #endif /* * Built-in IRQ handlers for various IRQ types, * callable via desc->handle_irq() */ extern void handle_level_irq(struct irq_desc *desc); extern void handle_fasteoi_irq(struct irq_desc *desc); extern void handle_edge_irq(struct irq_desc *desc); extern void handle_edge_eoi_irq(struct irq_desc *desc); extern void handle_simple_irq(struct irq_desc *desc); extern void handle_untracked_irq(struct irq_desc *desc); extern void handle_percpu_irq(struct irq_desc *desc); extern void handle_percpu_devid_irq(struct irq_desc *desc); extern void handle_bad_irq(struct irq_desc *desc); extern void handle_nested_irq(unsigned int irq); extern void handle_fasteoi_nmi(struct irq_desc *desc); extern void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc); extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg); extern int irq_chip_pm_get(struct irq_data *data); extern int irq_chip_pm_put(struct irq_data *data); #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY extern void handle_fasteoi_ack_irq(struct irq_desc *desc); extern void handle_fasteoi_mask_irq(struct irq_desc *desc); extern void irq_chip_enable_parent(struct irq_data *data); extern void irq_chip_disable_parent(struct irq_data *data); extern void irq_chip_ack_parent(struct irq_data *data); extern int irq_chip_retrigger_hierarchy(struct irq_data *data); extern void irq_chip_mask_parent(struct irq_data *data); extern void irq_chip_mask_ack_parent(struct irq_data *data); extern void irq_chip_unmask_parent(struct irq_data *data); extern void irq_chip_eoi_parent(struct irq_data *data); extern int irq_chip_set_affinity_parent(struct irq_data *data, const struct cpumask *dest, bool force); extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on); extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info); extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type); extern int irq_chip_request_resources_parent(struct irq_data *data); extern void irq_chip_release_resources_parent(struct irq_data *data); #endif /* Handling of unhandled and spurious interrupts: */ extern void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret); /* Enable/disable irq debugging output: */ extern int noirqdebug_setup(char *str); /* Checks whether the interrupt can be requested by request_irq(): */ extern int can_request_irq(unsigned int irq, unsigned long irqflags); /* Dummy irq-chip implementations: */ extern struct irq_chip no_irq_chip; extern struct irq_chip dummy_irq_chip; extern void irq_set_chip_and_handler_name(unsigned int irq, const struct irq_chip *chip, irq_flow_handler_t handle, const char *name); static inline void irq_set_chip_and_handler(unsigned int irq, const struct irq_chip *chip, irq_flow_handler_t handle) { irq_set_chip_and_handler_name(irq, chip, handle, NULL); } extern int irq_set_percpu_devid(unsigned int irq); extern int irq_set_percpu_devid_partition(unsigned int irq, const struct cpumask *affinity); extern int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity); extern void __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, const char *name); static inline void irq_set_handler(unsigned int irq, irq_flow_handler_t handle) { __irq_set_handler(irq, handle, 0, NULL); } /* * Set a highlevel chained flow handler for a given IRQ. * (a chained handler is automatically enabled and set to * IRQ_NOREQUEST, IRQ_NOPROBE, and IRQ_NOTHREAD) */ static inline void irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle) { __irq_set_handler(irq, handle, 1, NULL); } /* * Set a highlevel chained flow handler and its data for a given IRQ. * (a chained handler is automatically enabled and set to * IRQ_NOREQUEST, IRQ_NOPROBE, and IRQ_NOTHREAD) */ void irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle, void *data); void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set); static inline void irq_set_status_flags(unsigned int irq, unsigned long set) { irq_modify_status(irq, 0, set); } static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr) { irq_modify_status(irq, clr, 0); } static inline void irq_set_noprobe(unsigned int irq) { irq_modify_status(irq, 0, IRQ_NOPROBE); } static inline void irq_set_probe(unsigned int irq) { irq_modify_status(irq, IRQ_NOPROBE, 0); } static inline void irq_set_nothread(unsigned int irq) { irq_modify_status(irq, 0, IRQ_NOTHREAD); } static inline void irq_set_thread(unsigned int irq) { irq_modify_status(irq, IRQ_NOTHREAD, 0); } static inline void irq_set_nested_thread(unsigned int irq, bool nest) { if (nest) irq_set_status_flags(irq, IRQ_NESTED_THREAD); else irq_clear_status_flags(irq, IRQ_NESTED_THREAD); } static inline void irq_set_percpu_devid_flags(unsigned int irq) { irq_set_status_flags(irq, IRQ_NOAUTOEN | IRQ_PER_CPU | IRQ_NOTHREAD | IRQ_NOPROBE | IRQ_PER_CPU_DEVID); } /* Set/get chip/data for an IRQ: */ extern int irq_set_chip(unsigned int irq, const struct irq_chip *chip); extern int irq_set_handler_data(unsigned int irq, void *data); extern int irq_set_chip_data(unsigned int irq, void *data); extern int irq_set_irq_type(unsigned int irq, unsigned int type); extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry); extern int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, struct msi_desc *entry); extern struct irq_data *irq_get_irq_data(unsigned int irq); static inline struct irq_chip *irq_get_chip(unsigned int irq) { struct irq_data *d = irq_get_irq_data(irq); return d ? d->chip : NULL; } static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d) { return d->chip; } static inline void *irq_get_chip_data(unsigned int irq) { struct irq_data *d = irq_get_irq_data(irq); return d ? d->chip_data : NULL; } static inline void *irq_data_get_irq_chip_data(struct irq_data *d) { return d->chip_data; } static inline void *irq_get_handler_data(unsigned int irq) { struct irq_data *d = irq_get_irq_data(irq); return d ? d->common->handler_data : NULL; } static inline void *irq_data_get_irq_handler_data(struct irq_data *d) { return d->common->handler_data; } static inline struct msi_desc *irq_get_msi_desc(unsigned int irq) { struct irq_data *d = irq_get_irq_data(irq); return d ? d->common->msi_desc : NULL; } static inline struct msi_desc *irq_data_get_msi_desc(struct irq_data *d) { return d->common->msi_desc; } static inline u32 irq_get_trigger_type(unsigned int irq) { struct irq_data *d = irq_get_irq_data(irq); return d ? irqd_get_trigger_type(d) : 0; } static inline int irq_common_data_get_node(struct irq_common_data *d) { #ifdef CONFIG_NUMA return d->node; #else return 0; #endif } static inline int irq_data_get_node(struct irq_data *d) { return irq_common_data_get_node(d->common); } static inline struct cpumask *irq_get_affinity_mask(int irq) { struct irq_data *d = irq_get_irq_data(irq); return d ? d->common->affinity : NULL; } static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d) { return d->common->affinity; } #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK static inline struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d) { return d->common->effective_affinity; } static inline void irq_data_update_effective_affinity(struct irq_data *d, const struct cpumask *m) { cpumask_copy(d->common->effective_affinity, m); } #else static inline void irq_data_update_effective_affinity(struct irq_data *d, const struct cpumask *m) { } static inline struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d) { return d->common->affinity; } #endif unsigned int arch_dynirq_lower_bound(unsigned int from); int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, struct module *owner, const struct irq_affinity_desc *affinity); int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from, unsigned int cnt, int node, struct module *owner, const struct irq_affinity_desc *affinity); /* use macros to avoid needing export.h for THIS_MODULE */ #define irq_alloc_descs(irq, from, cnt, node) \ __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE, NULL) #define irq_alloc_desc(node) \ irq_alloc_descs(-1, 0, 1, node) #define irq_alloc_desc_at(at, node) \ irq_alloc_descs(at, at, 1, node) #define irq_alloc_desc_from(from, node) \ irq_alloc_descs(-1, from, 1, node) #define irq_alloc_descs_from(from, cnt, node) \ irq_alloc_descs(-1, from, cnt, node) #define devm_irq_alloc_descs(dev, irq, from, cnt, node) \ __devm_irq_alloc_descs(dev, irq, from, cnt, node, THIS_MODULE, NULL) #define devm_irq_alloc_desc(dev, node) \ devm_irq_alloc_descs(dev, -1, 0, 1, node) #define devm_irq_alloc_desc_at(dev, at, node) \ devm_irq_alloc_descs(dev, at, at, 1, node) #define devm_irq_alloc_desc_from(dev, from, node) \ devm_irq_alloc_descs(dev, -1, from, 1, node) #define devm_irq_alloc_descs_from(dev, from, cnt, node) \ devm_irq_alloc_descs(dev, -1, from, cnt, node) void irq_free_descs(unsigned int irq, unsigned int cnt); static inline void irq_free_desc(unsigned int irq) { irq_free_descs(irq, 1); } #ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ unsigned int irq_alloc_hwirqs(int cnt, int node); static inline unsigned int irq_alloc_hwirq(int node) { return irq_alloc_hwirqs(1, node); } void irq_free_hwirqs(unsigned int from, int cnt); static inline void irq_free_hwirq(unsigned int irq) { return irq_free_hwirqs(irq, 1); } int arch_setup_hwirq(unsigned int irq, int node); void arch_teardown_hwirq(unsigned int irq); #endif #ifdef CONFIG_GENERIC_IRQ_LEGACY void irq_init_desc(unsigned int irq); #endif /** * struct irq_chip_regs - register offsets for struct irq_gci * @enable: Enable register offset to reg_base * @disable: Disable register offset to reg_base * @mask: Mask register offset to reg_base * @ack: Ack register offset to reg_base * @eoi: Eoi register offset to reg_base * @type: Type configuration register offset to reg_base * @polarity: Polarity configuration register offset to reg_base */ struct irq_chip_regs { unsigned long enable; unsigned long disable; unsigned long mask; unsigned long ack; unsigned long eoi; unsigned long type; unsigned long polarity; }; /** * struct irq_chip_type - Generic interrupt chip instance for a flow type * @chip: The real interrupt chip which provides the callbacks * @regs: Register offsets for this chip * @handler: Flow handler associated with this chip * @type: Chip can handle these flow types * @mask_cache_priv: Cached mask register private to the chip type * @mask_cache: Pointer to cached mask register * * A irq_generic_chip can have several instances of irq_chip_type when * it requires different functions and register offsets for different * flow types. */ struct irq_chip_type { struct irq_chip chip; struct irq_chip_regs regs; irq_flow_handler_t handler; u32 type; u32 mask_cache_priv; u32 *mask_cache; }; /** * struct irq_chip_generic - Generic irq chip data structure * @lock: Lock to protect register and cache data access * @reg_base: Register base address (virtual) * @reg_readl: Alternate I/O accessor (defaults to readl if NULL) * @reg_writel: Alternate I/O accessor (defaults to writel if NULL) * @suspend: Function called from core code on suspend once per * chip; can be useful instead of irq_chip::suspend to * handle chip details even when no interrupts are in use * @resume: Function called from core code on resume once per chip; * can be useful instead of irq_chip::suspend to handle * chip details even when no interrupts are in use * @irq_base: Interrupt base nr for this chip * @irq_cnt: Number of interrupts handled by this chip * @mask_cache: Cached mask register shared between all chip types * @type_cache: Cached type register * @polarity_cache: Cached polarity register * @wake_enabled: Interrupt can wakeup from suspend * @wake_active: Interrupt is marked as an wakeup from suspend source * @num_ct: Number of available irq_chip_type instances (usually 1) * @private: Private data for non generic chip callbacks * @installed: bitfield to denote installed interrupts * @unused: bitfield to denote unused interrupts * @domain: irq domain pointer * @list: List head for keeping track of instances * @chip_types: Array of interrupt irq_chip_types * * Note, that irq_chip_generic can have multiple irq_chip_type * implementations which can be associated to a particular irq line of * an irq_chip_generic instance. That allows to share and protect * state in an irq_chip_generic instance when we need to implement * different flow mechanisms (level/edge) for it. */ struct irq_chip_generic { raw_spinlock_t lock; void __iomem *reg_base; u32 (*reg_readl)(void __iomem *addr); void (*reg_writel)(u32 val, void __iomem *addr); void (*suspend)(struct irq_chip_generic *gc); void (*resume)(struct irq_chip_generic *gc); unsigned int irq_base; unsigned int irq_cnt; u32 mask_cache; u32 type_cache; u32 polarity_cache; u32 wake_enabled; u32 wake_active; unsigned int num_ct; void *private; unsigned long installed; unsigned long unused; struct irq_domain *domain; struct list_head list; struct irq_chip_type chip_types[0]; }; /** * enum irq_gc_flags - Initialization flags for generic irq chips * @IRQ_GC_INIT_MASK_CACHE: Initialize the mask_cache by reading mask reg * @IRQ_GC_INIT_NESTED_LOCK: Set the lock class of the irqs to nested for * irq chips which need to call irq_set_wake() on * the parent irq. Usually GPIO implementations * @IRQ_GC_MASK_CACHE_PER_TYPE: Mask cache is chip type private * @IRQ_GC_NO_MASK: Do not calculate irq_data->mask * @IRQ_GC_BE_IO: Use big-endian register accesses (default: LE) */ enum irq_gc_flags { IRQ_GC_INIT_MASK_CACHE = 1 << 0, IRQ_GC_INIT_NESTED_LOCK = 1 << 1, IRQ_GC_MASK_CACHE_PER_TYPE = 1 << 2, IRQ_GC_NO_MASK = 1 << 3, IRQ_GC_BE_IO = 1 << 4, }; /* * struct irq_domain_chip_generic - Generic irq chip data structure for irq domains * @irqs_per_chip: Number of interrupts per chip * @num_chips: Number of chips * @irq_flags_to_set: IRQ* flags to set on irq setup * @irq_flags_to_clear: IRQ* flags to clear on irq setup * @gc_flags: Generic chip specific setup flags * @gc: Array of pointers to generic interrupt chips */ struct irq_domain_chip_generic { unsigned int irqs_per_chip; unsigned int num_chips; unsigned int irq_flags_to_clear; unsigned int irq_flags_to_set; enum irq_gc_flags gc_flags; struct irq_chip_generic *gc[0]; }; /* Generic chip callback functions */ void irq_gc_noop(struct irq_data *d); void irq_gc_mask_disable_reg(struct irq_data *d); void irq_gc_mask_set_bit(struct irq_data *d); void irq_gc_mask_clr_bit(struct irq_data *d); void irq_gc_unmask_enable_reg(struct irq_data *d); void irq_gc_ack_set_bit(struct irq_data *d); void irq_gc_ack_clr_bit(struct irq_data *d); void irq_gc_mask_disable_and_ack_set(struct irq_data *d); void irq_gc_eoi(struct irq_data *d); int irq_gc_set_wake(struct irq_data *d, unsigned int on); /* Setup functions for irq_chip_generic */ int irq_map_generic_chip(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw_irq); struct irq_chip_generic * irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base, void __iomem *reg_base, irq_flow_handler_t handler); void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk, enum irq_gc_flags flags, unsigned int clr, unsigned int set); int irq_setup_alt_chip(struct irq_data *d, unsigned int type); void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk, unsigned int clr, unsigned int set); struct irq_chip_generic * devm_irq_alloc_generic_chip(struct device *dev, const char *name, int num_ct, unsigned int irq_base, void __iomem *reg_base, irq_flow_handler_t handler); int devm_irq_setup_generic_chip(struct device *dev, struct irq_chip_generic *gc, u32 msk, enum irq_gc_flags flags, unsigned int clr, unsigned int set); struct irq_chip_generic *irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq); int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip, int num_ct, const char *name, irq_flow_handler_t handler, unsigned int clr, unsigned int set, enum irq_gc_flags flags); #define irq_alloc_domain_generic_chips(d, irqs_per_chip, num_ct, name, \ handler, clr, set, flags) \ ({ \ MAYBE_BUILD_BUG_ON(irqs_per_chip > 32); \ __irq_alloc_domain_generic_chips(d, irqs_per_chip, num_ct, name,\ handler, clr, set, flags); \ }) static inline void irq_free_generic_chip(struct irq_chip_generic *gc) { kfree(gc); } static inline void irq_destroy_generic_chip(struct irq_chip_generic *gc, u32 msk, unsigned int clr, unsigned int set) { irq_remove_generic_chip(gc, msk, clr, set); irq_free_generic_chip(gc); } static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d) { return container_of(d->chip, struct irq_chip_type, chip); } #define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX) #ifdef CONFIG_SMP static inline void irq_gc_lock(struct irq_chip_generic *gc) { raw_spin_lock(&gc->lock); } static inline void irq_gc_unlock(struct irq_chip_generic *gc) { raw_spin_unlock(&gc->lock); } #else static inline void irq_gc_lock(struct irq_chip_generic *gc) { } static inline void irq_gc_unlock(struct irq_chip_generic *gc) { } #endif /* * The irqsave variants are for usage in non interrupt code. Do not use * them in irq_chip callbacks. Use irq_gc_lock() instead. */ #define irq_gc_lock_irqsave(gc, flags) \ raw_spin_lock_irqsave(&(gc)->lock, flags) #define irq_gc_unlock_irqrestore(gc, flags) \ raw_spin_unlock_irqrestore(&(gc)->lock, flags) static inline void irq_reg_writel(struct irq_chip_generic *gc, u32 val, int reg_offset) { if (gc->reg_writel) gc->reg_writel(val, gc->reg_base + reg_offset); else writel(val, gc->reg_base + reg_offset); } static inline u32 irq_reg_readl(struct irq_chip_generic *gc, int reg_offset) { if (gc->reg_readl) return gc->reg_readl(gc->reg_base + reg_offset); else return readl(gc->reg_base + reg_offset); } struct irq_matrix; struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits, unsigned int alloc_start, unsigned int alloc_end); void irq_matrix_online(struct irq_matrix *m); void irq_matrix_offline(struct irq_matrix *m); void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace); int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk); void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk); int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk, unsigned int *mapped_cpu); void irq_matrix_reserve(struct irq_matrix *m); void irq_matrix_remove_reserved(struct irq_matrix *m); int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk, bool reserved, unsigned int *mapped_cpu); void irq_matrix_free(struct irq_matrix *m, unsigned int cpu, unsigned int bit, bool managed); void irq_matrix_assign(struct irq_matrix *m, unsigned int bit); unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown); unsigned int irq_matrix_allocated(struct irq_matrix *m); unsigned int irq_matrix_reserved(struct irq_matrix *m); void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind); /* Contrary to Linux irqs, for hardware irqs the irq number 0 is valid */ #define INVALID_HWIRQ (~0UL) irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu); int __ipi_send_single(struct irq_desc *desc, unsigned int cpu); int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest); int ipi_send_single(unsigned int virq, unsigned int cpu); int ipi_send_mask(unsigned int virq, const struct cpumask *dest); #ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER /* * Registers a generic IRQ handling function as the top-level IRQ handler in * the system, which is generally the first C code called from an assembly * architecture-specific interrupt handler. * * Returns 0 on success, or -EBUSY if an IRQ handler has already been * registered. */ int __init set_handle_irq(void (*handle_irq)(struct pt_regs *)); /* * Allows interrupt handlers to find the irqchip that's been registered as the * top-level IRQ handler. */ extern void (*handle_arch_irq)(struct pt_regs *) __ro_after_init; #endif #endif /* _LINUX_IRQ_H */ pipe_fs_i.h 0000644 00000014260 14722070374 0006663 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PIPE_FS_I_H #define _LINUX_PIPE_FS_I_H #define PIPE_DEF_BUFFERS 16 #define PIPE_BUF_FLAG_LRU 0x01 /* page is on the LRU */ #define PIPE_BUF_FLAG_ATOMIC 0x02 /* was atomically mapped */ #define PIPE_BUF_FLAG_GIFT 0x04 /* page is a gift */ #define PIPE_BUF_FLAG_PACKET 0x08 /* read() as a packet */ /** * struct pipe_buffer - a linux kernel pipe buffer * @page: the page containing the data for the pipe buffer * @offset: offset of data inside the @page * @len: length of data inside the @page * @ops: operations associated with this buffer. See @pipe_buf_operations. * @flags: pipe buffer flags. See above. * @private: private data owned by the ops. **/ struct pipe_buffer { struct page *page; unsigned int offset, len; const struct pipe_buf_operations *ops; unsigned int flags; unsigned long private; }; /** * struct pipe_inode_info - a linux kernel pipe * @mutex: mutex protecting the whole thing * @wait: reader/writer wait point in case of empty/full pipe * @nrbufs: the number of non-empty pipe buffers in this pipe * @buffers: total number of buffers (should be a power of 2) * @curbuf: the current pipe buffer entry * @tmp_page: cached released page * @readers: number of current readers of this pipe * @writers: number of current writers of this pipe * @files: number of struct file referring this pipe (protected by ->i_lock) * @waiting_writers: number of writers blocked waiting for room * @r_counter: reader counter * @w_counter: writer counter * @fasync_readers: reader side fasync * @fasync_writers: writer side fasync * @bufs: the circular array of pipe buffers * @user: the user who created this pipe **/ struct pipe_inode_info { struct mutex mutex; wait_queue_head_t wait; unsigned int nrbufs, curbuf, buffers; unsigned int readers; unsigned int writers; unsigned int files; unsigned int waiting_writers; unsigned int r_counter; unsigned int w_counter; struct page *tmp_page; struct fasync_struct *fasync_readers; struct fasync_struct *fasync_writers; struct pipe_buffer *bufs; struct user_struct *user; }; /* * Note on the nesting of these functions: * * ->confirm() * ->steal() * * That is, ->steal() must be called on a confirmed buffer. * See below for the meaning of each operation. Also see kerneldoc * in fs/pipe.c for the pipe and generic variants of these hooks. */ struct pipe_buf_operations { /* * ->confirm() verifies that the data in the pipe buffer is there * and that the contents are good. If the pages in the pipe belong * to a file system, we may need to wait for IO completion in this * hook. Returns 0 for good, or a negative error value in case of * error. */ int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *); /* * When the contents of this pipe buffer has been completely * consumed by a reader, ->release() is called. */ void (*release)(struct pipe_inode_info *, struct pipe_buffer *); /* * Attempt to take ownership of the pipe buffer and its contents. * ->steal() returns 0 for success, in which case the contents * of the pipe (the buf->page) is locked and now completely owned * by the caller. The page may then be transferred to a different * mapping, the most often used case is insertion into different * file address space cache. */ int (*steal)(struct pipe_inode_info *, struct pipe_buffer *); /* * Get a reference to the pipe buffer. */ bool (*get)(struct pipe_inode_info *, struct pipe_buffer *); }; /** * pipe_buf_get - get a reference to a pipe_buffer * @pipe: the pipe that the buffer belongs to * @buf: the buffer to get a reference to * * Return: %true if the reference was successfully obtained. */ static inline __must_check bool pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { return buf->ops->get(pipe, buf); } /** * pipe_buf_release - put a reference to a pipe_buffer * @pipe: the pipe that the buffer belongs to * @buf: the buffer to put a reference to */ static inline void pipe_buf_release(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { const struct pipe_buf_operations *ops = buf->ops; buf->ops = NULL; ops->release(pipe, buf); } /** * pipe_buf_confirm - verify contents of the pipe buffer * @pipe: the pipe that the buffer belongs to * @buf: the buffer to confirm */ static inline int pipe_buf_confirm(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { return buf->ops->confirm(pipe, buf); } /** * pipe_buf_steal - attempt to take ownership of a pipe_buffer * @pipe: the pipe that the buffer belongs to * @buf: the buffer to attempt to steal */ static inline int pipe_buf_steal(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { return buf->ops->steal(pipe, buf); } /* Differs from PIPE_BUF in that PIPE_SIZE is the length of the actual memory allocation, whereas PIPE_BUF makes atomicity guarantees. */ #define PIPE_SIZE PAGE_SIZE /* Pipe lock and unlock operations */ void pipe_lock(struct pipe_inode_info *); void pipe_unlock(struct pipe_inode_info *); void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *); extern unsigned int pipe_max_size; extern unsigned long pipe_user_pages_hard; extern unsigned long pipe_user_pages_soft; /* Drop the inode semaphore and wait for a pipe event, atomically */ void pipe_wait(struct pipe_inode_info *pipe); struct pipe_inode_info *alloc_pipe_info(void); void free_pipe_info(struct pipe_inode_info *); /* Generic pipe buffer ops functions */ bool generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *); int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *); int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *); int generic_pipe_buf_nosteal(struct pipe_inode_info *, struct pipe_buffer *); void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *); void pipe_buf_mark_unmergeable(struct pipe_buffer *buf); extern const struct pipe_buf_operations nosteal_pipe_buf_ops; /* for F_SETPIPE_SZ and F_GETPIPE_SZ */ long pipe_fcntl(struct file *, unsigned int, unsigned long arg); struct pipe_inode_info *get_pipe_info(struct file *file); int create_pipe_files(struct file **, int); unsigned int round_pipe_size(unsigned long size); #endif hwmon.h 0000644 00000032473 14722070374 0006064 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* hwmon.h - part of lm_sensors, Linux kernel modules for hardware monitoring This file declares helper functions for the sysfs class "hwmon", for use by sensors drivers. Copyright (C) 2005 Mark M. Hoffman <mhoffman@lightlink.com> */ #ifndef _HWMON_H_ #define _HWMON_H_ #include <linux/bitops.h> struct device; struct attribute_group; enum hwmon_sensor_types { hwmon_chip, hwmon_temp, hwmon_in, hwmon_curr, hwmon_power, hwmon_energy, hwmon_humidity, hwmon_fan, hwmon_pwm, hwmon_max, }; enum hwmon_chip_attributes { hwmon_chip_temp_reset_history, hwmon_chip_in_reset_history, hwmon_chip_curr_reset_history, hwmon_chip_power_reset_history, hwmon_chip_register_tz, hwmon_chip_update_interval, hwmon_chip_alarms, hwmon_chip_samples, hwmon_chip_curr_samples, hwmon_chip_in_samples, hwmon_chip_power_samples, hwmon_chip_temp_samples, }; #define HWMON_C_TEMP_RESET_HISTORY BIT(hwmon_chip_temp_reset_history) #define HWMON_C_IN_RESET_HISTORY BIT(hwmon_chip_in_reset_history) #define HWMON_C_CURR_RESET_HISTORY BIT(hwmon_chip_curr_reset_history) #define HWMON_C_POWER_RESET_HISTORY BIT(hwmon_chip_power_reset_history) #define HWMON_C_REGISTER_TZ BIT(hwmon_chip_register_tz) #define HWMON_C_UPDATE_INTERVAL BIT(hwmon_chip_update_interval) #define HWMON_C_ALARMS BIT(hwmon_chip_alarms) #define HWMON_C_SAMPLES BIT(hwmon_chip_samples) #define HWMON_C_CURR_SAMPLES BIT(hwmon_chip_curr_samples) #define HWMON_C_IN_SAMPLES BIT(hwmon_chip_in_samples) #define HWMON_C_POWER_SAMPLES BIT(hwmon_chip_power_samples) #define HWMON_C_TEMP_SAMPLES BIT(hwmon_chip_temp_samples) enum hwmon_temp_attributes { hwmon_temp_input = 0, hwmon_temp_type, hwmon_temp_lcrit, hwmon_temp_lcrit_hyst, hwmon_temp_min, hwmon_temp_min_hyst, hwmon_temp_max, hwmon_temp_max_hyst, hwmon_temp_crit, hwmon_temp_crit_hyst, hwmon_temp_emergency, hwmon_temp_emergency_hyst, hwmon_temp_alarm, hwmon_temp_lcrit_alarm, hwmon_temp_min_alarm, hwmon_temp_max_alarm, hwmon_temp_crit_alarm, hwmon_temp_emergency_alarm, hwmon_temp_fault, hwmon_temp_offset, hwmon_temp_label, hwmon_temp_lowest, hwmon_temp_highest, hwmon_temp_reset_history, }; #define HWMON_T_INPUT BIT(hwmon_temp_input) #define HWMON_T_TYPE BIT(hwmon_temp_type) #define HWMON_T_LCRIT BIT(hwmon_temp_lcrit) #define HWMON_T_LCRIT_HYST BIT(hwmon_temp_lcrit_hyst) #define HWMON_T_MIN BIT(hwmon_temp_min) #define HWMON_T_MIN_HYST BIT(hwmon_temp_min_hyst) #define HWMON_T_MAX BIT(hwmon_temp_max) #define HWMON_T_MAX_HYST BIT(hwmon_temp_max_hyst) #define HWMON_T_CRIT BIT(hwmon_temp_crit) #define HWMON_T_CRIT_HYST BIT(hwmon_temp_crit_hyst) #define HWMON_T_EMERGENCY BIT(hwmon_temp_emergency) #define HWMON_T_EMERGENCY_HYST BIT(hwmon_temp_emergency_hyst) #define HWMON_T_ALARM BIT(hwmon_temp_alarm) #define HWMON_T_MIN_ALARM BIT(hwmon_temp_min_alarm) #define HWMON_T_MAX_ALARM BIT(hwmon_temp_max_alarm) #define HWMON_T_CRIT_ALARM BIT(hwmon_temp_crit_alarm) #define HWMON_T_LCRIT_ALARM BIT(hwmon_temp_lcrit_alarm) #define HWMON_T_EMERGENCY_ALARM BIT(hwmon_temp_emergency_alarm) #define HWMON_T_FAULT BIT(hwmon_temp_fault) #define HWMON_T_OFFSET BIT(hwmon_temp_offset) #define HWMON_T_LABEL BIT(hwmon_temp_label) #define HWMON_T_LOWEST BIT(hwmon_temp_lowest) #define HWMON_T_HIGHEST BIT(hwmon_temp_highest) #define HWMON_T_RESET_HISTORY BIT(hwmon_temp_reset_history) enum hwmon_in_attributes { hwmon_in_input, hwmon_in_min, hwmon_in_max, hwmon_in_lcrit, hwmon_in_crit, hwmon_in_average, hwmon_in_lowest, hwmon_in_highest, hwmon_in_reset_history, hwmon_in_label, hwmon_in_alarm, hwmon_in_min_alarm, hwmon_in_max_alarm, hwmon_in_lcrit_alarm, hwmon_in_crit_alarm, hwmon_in_enable, }; #define HWMON_I_INPUT BIT(hwmon_in_input) #define HWMON_I_MIN BIT(hwmon_in_min) #define HWMON_I_MAX BIT(hwmon_in_max) #define HWMON_I_LCRIT BIT(hwmon_in_lcrit) #define HWMON_I_CRIT BIT(hwmon_in_crit) #define HWMON_I_AVERAGE BIT(hwmon_in_average) #define HWMON_I_LOWEST BIT(hwmon_in_lowest) #define HWMON_I_HIGHEST BIT(hwmon_in_highest) #define HWMON_I_RESET_HISTORY BIT(hwmon_in_reset_history) #define HWMON_I_LABEL BIT(hwmon_in_label) #define HWMON_I_ALARM BIT(hwmon_in_alarm) #define HWMON_I_MIN_ALARM BIT(hwmon_in_min_alarm) #define HWMON_I_MAX_ALARM BIT(hwmon_in_max_alarm) #define HWMON_I_LCRIT_ALARM BIT(hwmon_in_lcrit_alarm) #define HWMON_I_CRIT_ALARM BIT(hwmon_in_crit_alarm) #define HWMON_I_ENABLE BIT(hwmon_in_enable) enum hwmon_curr_attributes { hwmon_curr_input, hwmon_curr_min, hwmon_curr_max, hwmon_curr_lcrit, hwmon_curr_crit, hwmon_curr_average, hwmon_curr_lowest, hwmon_curr_highest, hwmon_curr_reset_history, hwmon_curr_label, hwmon_curr_alarm, hwmon_curr_min_alarm, hwmon_curr_max_alarm, hwmon_curr_lcrit_alarm, hwmon_curr_crit_alarm, }; #define HWMON_C_INPUT BIT(hwmon_curr_input) #define HWMON_C_MIN BIT(hwmon_curr_min) #define HWMON_C_MAX BIT(hwmon_curr_max) #define HWMON_C_LCRIT BIT(hwmon_curr_lcrit) #define HWMON_C_CRIT BIT(hwmon_curr_crit) #define HWMON_C_AVERAGE BIT(hwmon_curr_average) #define HWMON_C_LOWEST BIT(hwmon_curr_lowest) #define HWMON_C_HIGHEST BIT(hwmon_curr_highest) #define HWMON_C_RESET_HISTORY BIT(hwmon_curr_reset_history) #define HWMON_C_LABEL BIT(hwmon_curr_label) #define HWMON_C_ALARM BIT(hwmon_curr_alarm) #define HWMON_C_MIN_ALARM BIT(hwmon_curr_min_alarm) #define HWMON_C_MAX_ALARM BIT(hwmon_curr_max_alarm) #define HWMON_C_LCRIT_ALARM BIT(hwmon_curr_lcrit_alarm) #define HWMON_C_CRIT_ALARM BIT(hwmon_curr_crit_alarm) enum hwmon_power_attributes { hwmon_power_average, hwmon_power_average_interval, hwmon_power_average_interval_max, hwmon_power_average_interval_min, hwmon_power_average_highest, hwmon_power_average_lowest, hwmon_power_average_max, hwmon_power_average_min, hwmon_power_input, hwmon_power_input_highest, hwmon_power_input_lowest, hwmon_power_reset_history, hwmon_power_accuracy, hwmon_power_cap, hwmon_power_cap_hyst, hwmon_power_cap_max, hwmon_power_cap_min, hwmon_power_min, hwmon_power_max, hwmon_power_crit, hwmon_power_lcrit, hwmon_power_label, hwmon_power_alarm, hwmon_power_cap_alarm, hwmon_power_min_alarm, hwmon_power_max_alarm, hwmon_power_lcrit_alarm, hwmon_power_crit_alarm, }; #define HWMON_P_AVERAGE BIT(hwmon_power_average) #define HWMON_P_AVERAGE_INTERVAL BIT(hwmon_power_average_interval) #define HWMON_P_AVERAGE_INTERVAL_MAX BIT(hwmon_power_average_interval_max) #define HWMON_P_AVERAGE_INTERVAL_MIN BIT(hwmon_power_average_interval_min) #define HWMON_P_AVERAGE_HIGHEST BIT(hwmon_power_average_highest) #define HWMON_P_AVERAGE_LOWEST BIT(hwmon_power_average_lowest) #define HWMON_P_AVERAGE_MAX BIT(hwmon_power_average_max) #define HWMON_P_AVERAGE_MIN BIT(hwmon_power_average_min) #define HWMON_P_INPUT BIT(hwmon_power_input) #define HWMON_P_INPUT_HIGHEST BIT(hwmon_power_input_highest) #define HWMON_P_INPUT_LOWEST BIT(hwmon_power_input_lowest) #define HWMON_P_RESET_HISTORY BIT(hwmon_power_reset_history) #define HWMON_P_ACCURACY BIT(hwmon_power_accuracy) #define HWMON_P_CAP BIT(hwmon_power_cap) #define HWMON_P_CAP_HYST BIT(hwmon_power_cap_hyst) #define HWMON_P_CAP_MAX BIT(hwmon_power_cap_max) #define HWMON_P_CAP_MIN BIT(hwmon_power_cap_min) #define HWMON_P_MIN BIT(hwmon_power_min) #define HWMON_P_MAX BIT(hwmon_power_max) #define HWMON_P_LCRIT BIT(hwmon_power_lcrit) #define HWMON_P_CRIT BIT(hwmon_power_crit) #define HWMON_P_LABEL BIT(hwmon_power_label) #define HWMON_P_ALARM BIT(hwmon_power_alarm) #define HWMON_P_CAP_ALARM BIT(hwmon_power_cap_alarm) #define HWMON_P_MIN_ALARM BIT(hwmon_power_min_alarm) #define HWMON_P_MAX_ALARM BIT(hwmon_power_max_alarm) #define HWMON_P_LCRIT_ALARM BIT(hwmon_power_lcrit_alarm) #define HWMON_P_CRIT_ALARM BIT(hwmon_power_crit_alarm) enum hwmon_energy_attributes { hwmon_energy_input, hwmon_energy_label, }; #define HWMON_E_INPUT BIT(hwmon_energy_input) #define HWMON_E_LABEL BIT(hwmon_energy_label) enum hwmon_humidity_attributes { hwmon_humidity_input, hwmon_humidity_label, hwmon_humidity_min, hwmon_humidity_min_hyst, hwmon_humidity_max, hwmon_humidity_max_hyst, hwmon_humidity_alarm, hwmon_humidity_fault, }; #define HWMON_H_INPUT BIT(hwmon_humidity_input) #define HWMON_H_LABEL BIT(hwmon_humidity_label) #define HWMON_H_MIN BIT(hwmon_humidity_min) #define HWMON_H_MIN_HYST BIT(hwmon_humidity_min_hyst) #define HWMON_H_MAX BIT(hwmon_humidity_max) #define HWMON_H_MAX_HYST BIT(hwmon_humidity_max_hyst) #define HWMON_H_ALARM BIT(hwmon_humidity_alarm) #define HWMON_H_FAULT BIT(hwmon_humidity_fault) enum hwmon_fan_attributes { hwmon_fan_input, hwmon_fan_label, hwmon_fan_min, hwmon_fan_max, hwmon_fan_div, hwmon_fan_pulses, hwmon_fan_target, hwmon_fan_alarm, hwmon_fan_min_alarm, hwmon_fan_max_alarm, hwmon_fan_fault, }; #define HWMON_F_INPUT BIT(hwmon_fan_input) #define HWMON_F_LABEL BIT(hwmon_fan_label) #define HWMON_F_MIN BIT(hwmon_fan_min) #define HWMON_F_MAX BIT(hwmon_fan_max) #define HWMON_F_DIV BIT(hwmon_fan_div) #define HWMON_F_PULSES BIT(hwmon_fan_pulses) #define HWMON_F_TARGET BIT(hwmon_fan_target) #define HWMON_F_ALARM BIT(hwmon_fan_alarm) #define HWMON_F_MIN_ALARM BIT(hwmon_fan_min_alarm) #define HWMON_F_MAX_ALARM BIT(hwmon_fan_max_alarm) #define HWMON_F_FAULT BIT(hwmon_fan_fault) enum hwmon_pwm_attributes { hwmon_pwm_input, hwmon_pwm_enable, hwmon_pwm_mode, hwmon_pwm_freq, }; #define HWMON_PWM_INPUT BIT(hwmon_pwm_input) #define HWMON_PWM_ENABLE BIT(hwmon_pwm_enable) #define HWMON_PWM_MODE BIT(hwmon_pwm_mode) #define HWMON_PWM_FREQ BIT(hwmon_pwm_freq) /** * struct hwmon_ops - hwmon device operations * @is_visible: Callback to return attribute visibility. Mandatory. * Parameters are: * @const void *drvdata: * Pointer to driver-private data structure passed * as argument to hwmon_device_register_with_info(). * @type: Sensor type * @attr: Sensor attribute * @channel: * Channel number * The function returns the file permissions. * If the return value is 0, no attribute will be created. * @read: Read callback for data attributes. Mandatory if readable * data attributes are present. * Parameters are: * @dev: Pointer to hardware monitoring device * @type: Sensor type * @attr: Sensor attribute * @channel: * Channel number * @val: Pointer to returned value * The function returns 0 on success or a negative error number. * @read_string: * Read callback for string attributes. Mandatory if string * attributes are present. * Parameters are: * @dev: Pointer to hardware monitoring device * @type: Sensor type * @attr: Sensor attribute * @channel: * Channel number * @str: Pointer to returned string * The function returns 0 on success or a negative error number. * @write: Write callback for data attributes. Mandatory if writeable * data attributes are present. * Parameters are: * @dev: Pointer to hardware monitoring device * @type: Sensor type * @attr: Sensor attribute * @channel: * Channel number * @val: Value to write * The function returns 0 on success or a negative error number. */ struct hwmon_ops { umode_t (*is_visible)(const void *drvdata, enum hwmon_sensor_types type, u32 attr, int channel); int (*read)(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long *val); int (*read_string)(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, const char **str); int (*write)(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long val); }; /** * Channel information * @type: Channel type. * @config: Pointer to NULL-terminated list of channel parameters. * Use for per-channel attributes. */ struct hwmon_channel_info { enum hwmon_sensor_types type; const u32 *config; }; #define HWMON_CHANNEL_INFO(stype, ...) \ (&(struct hwmon_channel_info) { \ .type = hwmon_##stype, \ .config = (u32 []) { \ __VA_ARGS__, 0 \ } \ }) /** * Chip configuration * @ops: Pointer to hwmon operations. * @info: Null-terminated list of channel information. */ struct hwmon_chip_info { const struct hwmon_ops *ops; const struct hwmon_channel_info **info; }; /* hwmon_device_register() is deprecated */ struct device *hwmon_device_register(struct device *dev); struct device * hwmon_device_register_with_groups(struct device *dev, const char *name, void *drvdata, const struct attribute_group **groups); struct device * devm_hwmon_device_register_with_groups(struct device *dev, const char *name, void *drvdata, const struct attribute_group **groups); struct device * hwmon_device_register_with_info(struct device *dev, const char *name, void *drvdata, const struct hwmon_chip_info *info, const struct attribute_group **extra_groups); struct device * devm_hwmon_device_register_with_info(struct device *dev, const char *name, void *drvdata, const struct hwmon_chip_info *info, const struct attribute_group **extra_groups); void hwmon_device_unregister(struct device *dev); void devm_hwmon_device_unregister(struct device *dev); /** * hwmon_is_bad_char - Is the char invalid in a hwmon name * @ch: the char to be considered * * hwmon_is_bad_char() can be used to determine if the given character * may not be used in a hwmon name. * * Returns true if the char is invalid, false otherwise. */ static inline bool hwmon_is_bad_char(const char ch) { switch (ch) { case '-': case '*': case ' ': case '\t': case '\n': return true; default: return false; } } #endif sh_timer.h 0000644 00000000254 14722070374 0006536 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __SH_TIMER_H__ #define __SH_TIMER_H__ struct sh_timer_config { unsigned int channels_mask; }; #endif /* __SH_TIMER_H__ */ mroute_base.h 0000644 00000030033 14722070374 0007227 0 ustar 00 #ifndef __LINUX_MROUTE_BASE_H #define __LINUX_MROUTE_BASE_H #include <linux/netdevice.h> #include <linux/rhashtable-types.h> #include <linux/spinlock.h> #include <net/net_namespace.h> #include <net/sock.h> #include <net/fib_notifier.h> #include <net/ip_fib.h> /** * struct vif_device - interface representor for multicast routing * @dev: network device being used * @bytes_in: statistic; bytes ingressing * @bytes_out: statistic; bytes egresing * @pkt_in: statistic; packets ingressing * @pkt_out: statistic; packets egressing * @rate_limit: Traffic shaping (NI) * @threshold: TTL threshold * @flags: Control flags * @link: Physical interface index * @dev_parent_id: device parent id * @local: Local address * @remote: Remote address for tunnels */ struct vif_device { struct net_device *dev; unsigned long bytes_in, bytes_out; unsigned long pkt_in, pkt_out; unsigned long rate_limit; unsigned char threshold; unsigned short flags; int link; /* Currently only used by ipmr */ struct netdev_phys_item_id dev_parent_id; __be32 local, remote; }; struct vif_entry_notifier_info { struct fib_notifier_info info; struct net_device *dev; unsigned short vif_index; unsigned short vif_flags; u32 tb_id; }; static inline int mr_call_vif_notifier(struct notifier_block *nb, struct net *net, unsigned short family, enum fib_event_type event_type, struct vif_device *vif, unsigned short vif_index, u32 tb_id) { struct vif_entry_notifier_info info = { .info = { .family = family, .net = net, }, .dev = vif->dev, .vif_index = vif_index, .vif_flags = vif->flags, .tb_id = tb_id, }; return call_fib_notifier(nb, net, event_type, &info.info); } static inline int mr_call_vif_notifiers(struct net *net, unsigned short family, enum fib_event_type event_type, struct vif_device *vif, unsigned short vif_index, u32 tb_id, unsigned int *ipmr_seq) { struct vif_entry_notifier_info info = { .info = { .family = family, .net = net, }, .dev = vif->dev, .vif_index = vif_index, .vif_flags = vif->flags, .tb_id = tb_id, }; ASSERT_RTNL(); (*ipmr_seq)++; return call_fib_notifiers(net, event_type, &info.info); } #ifndef MAXVIFS /* This one is nasty; value is defined in uapi using different symbols for * mroute and morute6 but both map into same 32. */ #define MAXVIFS 32 #endif #define VIF_EXISTS(_mrt, _idx) (!!((_mrt)->vif_table[_idx].dev)) /* mfc_flags: * MFC_STATIC - the entry was added statically (not by a routing daemon) * MFC_OFFLOAD - the entry was offloaded to the hardware */ enum { MFC_STATIC = BIT(0), MFC_OFFLOAD = BIT(1), }; /** * struct mr_mfc - common multicast routing entries * @mnode: rhashtable list * @mfc_parent: source interface (iif) * @mfc_flags: entry flags * @expires: unresolved entry expire time * @unresolved: unresolved cached skbs * @last_assert: time of last assert * @minvif: minimum VIF id * @maxvif: maximum VIF id * @bytes: bytes that have passed for this entry * @pkt: packets that have passed for this entry * @wrong_if: number of wrong source interface hits * @lastuse: time of last use of the group (traffic or update) * @ttls: OIF TTL threshold array * @refcount: reference count for this entry * @list: global entry list * @rcu: used for entry destruction * @free: Operation used for freeing an entry under RCU */ struct mr_mfc { struct rhlist_head mnode; unsigned short mfc_parent; int mfc_flags; union { struct { unsigned long expires; struct sk_buff_head unresolved; } unres; struct { unsigned long last_assert; int minvif; int maxvif; unsigned long bytes; unsigned long pkt; unsigned long wrong_if; unsigned long lastuse; unsigned char ttls[MAXVIFS]; refcount_t refcount; } res; } mfc_un; struct list_head list; struct rcu_head rcu; void (*free)(struct rcu_head *head); }; static inline void mr_cache_put(struct mr_mfc *c) { if (refcount_dec_and_test(&c->mfc_un.res.refcount)) call_rcu(&c->rcu, c->free); } static inline void mr_cache_hold(struct mr_mfc *c) { refcount_inc(&c->mfc_un.res.refcount); } struct mfc_entry_notifier_info { struct fib_notifier_info info; struct mr_mfc *mfc; u32 tb_id; }; static inline int mr_call_mfc_notifier(struct notifier_block *nb, struct net *net, unsigned short family, enum fib_event_type event_type, struct mr_mfc *mfc, u32 tb_id) { struct mfc_entry_notifier_info info = { .info = { .family = family, .net = net, }, .mfc = mfc, .tb_id = tb_id }; return call_fib_notifier(nb, net, event_type, &info.info); } static inline int mr_call_mfc_notifiers(struct net *net, unsigned short family, enum fib_event_type event_type, struct mr_mfc *mfc, u32 tb_id, unsigned int *ipmr_seq) { struct mfc_entry_notifier_info info = { .info = { .family = family, .net = net, }, .mfc = mfc, .tb_id = tb_id }; ASSERT_RTNL(); (*ipmr_seq)++; return call_fib_notifiers(net, event_type, &info.info); } struct mr_table; /** * struct mr_table_ops - callbacks and info for protocol-specific ops * @rht_params: parameters for accessing the MFC hash * @cmparg_any: a hash key to be used for matching on (*,*) routes */ struct mr_table_ops { const struct rhashtable_params *rht_params; void *cmparg_any; }; /** * struct mr_table - a multicast routing table * @list: entry within a list of multicast routing tables * @net: net where this table belongs * @ops: protocol specific operations * @id: identifier of the table * @mroute_sk: socket associated with the table * @ipmr_expire_timer: timer for handling unresolved routes * @mfc_unres_queue: list of unresolved MFC entries * @vif_table: array containing all possible vifs * @mfc_hash: Hash table of all resolved routes for easy lookup * @mfc_cache_list: list of resovled routes for possible traversal * @maxvif: Identifier of highest value vif currently in use * @cache_resolve_queue_len: current size of unresolved queue * @mroute_do_assert: Whether to inform userspace on wrong ingress * @mroute_do_pim: Whether to receive IGMP PIMv1 * @mroute_reg_vif_num: PIM-device vif index */ struct mr_table { struct list_head list; possible_net_t net; struct mr_table_ops ops; u32 id; struct sock __rcu *mroute_sk; struct timer_list ipmr_expire_timer; struct list_head mfc_unres_queue; struct vif_device vif_table[MAXVIFS]; struct rhltable mfc_hash; struct list_head mfc_cache_list; int maxvif; atomic_t cache_resolve_queue_len; bool mroute_do_assert; bool mroute_do_pim; bool mroute_do_wrvifwhole; int mroute_reg_vif_num; }; #ifdef CONFIG_IP_MROUTE_COMMON void vif_device_init(struct vif_device *v, struct net_device *dev, unsigned long rate_limit, unsigned char threshold, unsigned short flags, unsigned short get_iflink_mask); struct mr_table * mr_table_alloc(struct net *net, u32 id, struct mr_table_ops *ops, void (*expire_func)(struct timer_list *t), void (*table_set)(struct mr_table *mrt, struct net *net)); /* These actually return 'struct mr_mfc *', but to avoid need for explicit * castings they simply return void. */ void *mr_mfc_find_parent(struct mr_table *mrt, void *hasharg, int parent); void *mr_mfc_find_any_parent(struct mr_table *mrt, int vifi); void *mr_mfc_find_any(struct mr_table *mrt, int vifi, void *hasharg); int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, struct mr_mfc *c, struct rtmsg *rtm); int mr_table_dump(struct mr_table *mrt, struct sk_buff *skb, struct netlink_callback *cb, int (*fill)(struct mr_table *mrt, struct sk_buff *skb, u32 portid, u32 seq, struct mr_mfc *c, int cmd, int flags), spinlock_t *lock, struct fib_dump_filter *filter); int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb, struct mr_table *(*iter)(struct net *net, struct mr_table *mrt), int (*fill)(struct mr_table *mrt, struct sk_buff *skb, u32 portid, u32 seq, struct mr_mfc *c, int cmd, int flags), spinlock_t *lock, struct fib_dump_filter *filter); int mr_dump(struct net *net, struct notifier_block *nb, unsigned short family, int (*rules_dump)(struct net *net, struct notifier_block *nb), struct mr_table *(*mr_iter)(struct net *net, struct mr_table *mrt), rwlock_t *mrt_lock); #else static inline void vif_device_init(struct vif_device *v, struct net_device *dev, unsigned long rate_limit, unsigned char threshold, unsigned short flags, unsigned short get_iflink_mask) { } static inline void *mr_mfc_find_parent(struct mr_table *mrt, void *hasharg, int parent) { return NULL; } static inline void *mr_mfc_find_any_parent(struct mr_table *mrt, int vifi) { return NULL; } static inline struct mr_mfc *mr_mfc_find_any(struct mr_table *mrt, int vifi, void *hasharg) { return NULL; } static inline int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, struct mr_mfc *c, struct rtmsg *rtm) { return -EINVAL; } static inline int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb, struct mr_table *(*iter)(struct net *net, struct mr_table *mrt), int (*fill)(struct mr_table *mrt, struct sk_buff *skb, u32 portid, u32 seq, struct mr_mfc *c, int cmd, int flags), spinlock_t *lock, struct fib_dump_filter *filter) { return -EINVAL; } static inline int mr_dump(struct net *net, struct notifier_block *nb, unsigned short family, int (*rules_dump)(struct net *net, struct notifier_block *nb), struct mr_table *(*mr_iter)(struct net *net, struct mr_table *mrt), rwlock_t *mrt_lock) { return -EINVAL; } #endif static inline void *mr_mfc_find(struct mr_table *mrt, void *hasharg) { return mr_mfc_find_parent(mrt, hasharg, -1); } #ifdef CONFIG_PROC_FS struct mr_vif_iter { struct seq_net_private p; struct mr_table *mrt; int ct; }; struct mr_mfc_iter { struct seq_net_private p; struct mr_table *mrt; struct list_head *cache; /* Lock protecting the mr_table's unresolved queue */ spinlock_t *lock; }; #ifdef CONFIG_IP_MROUTE_COMMON void *mr_vif_seq_idx(struct net *net, struct mr_vif_iter *iter, loff_t pos); void *mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos); static inline void *mr_vif_seq_start(struct seq_file *seq, loff_t *pos) { return *pos ? mr_vif_seq_idx(seq_file_net(seq), seq->private, *pos - 1) : SEQ_START_TOKEN; } /* These actually return 'struct mr_mfc *', but to avoid need for explicit * castings they simply return void. */ void *mr_mfc_seq_idx(struct net *net, struct mr_mfc_iter *it, loff_t pos); void *mr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos); static inline void *mr_mfc_seq_start(struct seq_file *seq, loff_t *pos, struct mr_table *mrt, spinlock_t *lock) { struct mr_mfc_iter *it = seq->private; it->mrt = mrt; it->cache = NULL; it->lock = lock; return *pos ? mr_mfc_seq_idx(seq_file_net(seq), seq->private, *pos - 1) : SEQ_START_TOKEN; } static inline void mr_mfc_seq_stop(struct seq_file *seq, void *v) { struct mr_mfc_iter *it = seq->private; struct mr_table *mrt = it->mrt; if (it->cache == &mrt->mfc_unres_queue) spin_unlock_bh(it->lock); else if (it->cache == &mrt->mfc_cache_list) rcu_read_unlock(); } #else static inline void *mr_vif_seq_idx(struct net *net, struct mr_vif_iter *iter, loff_t pos) { return NULL; } static inline void *mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos) { return NULL; } static inline void *mr_vif_seq_start(struct seq_file *seq, loff_t *pos) { return NULL; } static inline void *mr_mfc_seq_idx(struct net *net, struct mr_mfc_iter *it, loff_t pos) { return NULL; } static inline void *mr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) { return NULL; } static inline void *mr_mfc_seq_start(struct seq_file *seq, loff_t *pos, struct mr_table *mrt, spinlock_t *lock) { return NULL; } static inline void mr_mfc_seq_stop(struct seq_file *seq, void *v) { } #endif #endif #endif platform_data/tsc2007.h 0000644 00000001217 14722070374 0010643 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_I2C_TSC2007_H #define __LINUX_I2C_TSC2007_H /* linux/platform_data/tsc2007.h */ struct tsc2007_platform_data { u16 model; /* 2007. */ u16 x_plate_ohms; /* must be non-zero value */ u16 max_rt; /* max. resistance above which samples are ignored */ unsigned long poll_period; /* time (in ms) between samples */ int fuzzx; /* fuzz factor for X, Y and pressure axes */ int fuzzy; int fuzzz; int (*get_pendown_state)(struct device *); /* If needed, clear 2nd level interrupt source */ void (*clear_penirq)(void); int (*init_platform_hw)(void); void (*exit_platform_hw)(void); }; #endif platform_data/davinci-cpufreq.h 0000644 00000000653 14722070374 0012624 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * TI DaVinci CPUFreq platform support. * * Copyright (C) 2009 Texas Instruments, Inc. http://www.ti.com/ */ #ifndef _MACH_DAVINCI_CPUFREQ_H #define _MACH_DAVINCI_CPUFREQ_H #include <linux/cpufreq.h> struct davinci_cpufreq_config { struct cpufreq_frequency_table *freq_table; int (*set_voltage)(unsigned int index); int (*init)(void); }; #endif /* _MACH_DAVINCI_CPUFREQ_H */ platform_data/phy-da8xx-usb.h 0000644 00000000732 14722070374 0012163 0 ustar 00 // SPDX-License-Identifier: GPL-2.0 /* * phy-da8xx-usb - TI DaVinci DA8xx USB PHY driver * * Copyright (C) 2018 David Lechner <david@lechnology.com> */ #ifndef __LINUX_PLATFORM_DATA_PHY_DA8XX_USB_H__ #define __LINUX_PLATFORM_DATA_PHY_DA8XX_USB_H__ #include <linux/regmap.h> /** * da8xx_usb_phy_platform_data * @cfgchip: CFGCHIP syscon regmap */ struct da8xx_usb_phy_platform_data { struct regmap *cfgchip; }; #endif /* __LINUX_PLATFORM_DATA_PHY_DA8XX_USB_H__ */ platform_data/asoc-imx-ssi.h 0000644 00000001256 14722070374 0012060 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __MACH_SSI_H #define __MACH_SSI_H struct snd_ac97; extern unsigned char imx_ssi_fiq_start, imx_ssi_fiq_end; extern unsigned long imx_ssi_fiq_base, imx_ssi_fiq_tx_buffer, imx_ssi_fiq_rx_buffer; struct imx_ssi_platform_data { unsigned int flags; #define IMX_SSI_DMA (1 << 0) #define IMX_SSI_USE_AC97 (1 << 1) #define IMX_SSI_NET (1 << 2) #define IMX_SSI_SYN (1 << 3) #define IMX_SSI_USE_I2S_SLAVE (1 << 4) void (*ac97_reset) (struct snd_ac97 *ac97); void (*ac97_warm_reset)(struct snd_ac97 *ac97); }; extern int mxc_set_irq_fiq(unsigned int irq, unsigned int type); #endif /* __MACH_SSI_H */ platform_data/ad7291.h 0000644 00000000450 14722070374 0010446 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __IIO_AD7291_H__ #define __IIO_AD7291_H__ /** * struct ad7291_platform_data - AD7291 platform data * @use_external_ref: Whether to use an external or internal reference voltage */ struct ad7291_platform_data { bool use_external_ref; }; #endif platform_data/mdio-bcm-unimac.h 0000644 00000000423 14722070374 0012500 0 ustar 00 #ifndef __MDIO_BCM_UNIMAC_PDATA_H #define __MDIO_BCM_UNIMAC_PDATA_H struct unimac_mdio_pdata { u32 phy_mask; int (*wait_func)(void *data); void *wait_func_data; const char *bus_name; }; #define UNIMAC_MDIO_DRV_NAME "unimac-mdio" #endif /* __MDIO_BCM_UNIMAC_PDATA_H */ platform_data/txx9/ndfmc.h 0000644 00000001446 14722070374 0011550 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * * (C) Copyright TOSHIBA CORPORATION 2007 */ #ifndef __TXX9_NDFMC_H #define __TXX9_NDFMC_H #define NDFMC_PLAT_FLAG_USE_BSPRT 0x01 #define NDFMC_PLAT_FLAG_NO_RSTR 0x02 #define NDFMC_PLAT_FLAG_HOLDADD 0x04 #define NDFMC_PLAT_FLAG_DUMMYWRITE 0x08 struct txx9ndfmc_platform_data { unsigned int shift; unsigned int gbus_clock; unsigned int hold; /* hold time in nanosecond */ unsigned int spw; /* strobe pulse width in nanosecond */ unsigned int flags; unsigned char ch_mask; /* available channel bitmask */ unsigned char wp_mask; /* write-protect bitmask */ unsigned char wide_mask; /* 16bit-nand bitmask */ }; void txx9_ndfmc_init(unsigned long baseaddr, const struct txx9ndfmc_platform_data *plat_data); #endif /* __TXX9_NDFMC_H */ platform_data/tda9950.h 0000644 00000000432 14722070374 0010636 0 ustar 00 #ifndef LINUX_PLATFORM_DATA_TDA9950_H #define LINUX_PLATFORM_DATA_TDA9950_H struct device; struct tda9950_glue { struct device *parent; unsigned long irq_flags; void *data; int (*init)(void *); void (*exit)(void *); int (*open)(void *); void (*release)(void *); }; #endif platform_data/serial-imx.h 0000644 00000000420 14722070374 0011606 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2008 by Sascha Hauer <kernel@pengutronix.de> */ #ifndef ASMARM_ARCH_UART_H #define ASMARM_ARCH_UART_H #define IMXUART_HAVE_RTSCTS (1<<0) struct imxuart_platform_data { unsigned int flags; }; #endif platform_data/i2c-pxa.h 0000644 00000004614 14722070374 0011010 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * i2c_pxa.h * * Copyright (C) 2002 Intrinsyc Software Inc. */ #ifndef _I2C_PXA_H_ #define _I2C_PXA_H_ #if 0 #define DEF_TIMEOUT 3 #else /* need a longer timeout if we're dealing with the fact we may well be * looking at a multi-master environment */ #define DEF_TIMEOUT 32 #endif #define BUS_ERROR (-EREMOTEIO) #define XFER_NAKED (-ECONNREFUSED) #define I2C_RETRY (-2000) /* an error has occurred retry transmit */ /* ICR initialize bit values * * 15. FM 0 (100 Khz operation) * 14. UR 0 (No unit reset) * 13. SADIE 0 (Disables the unit from interrupting on slave addresses * matching its slave address) * 12. ALDIE 0 (Disables the unit from interrupt when it loses arbitration * in master mode) * 11. SSDIE 0 (Disables interrupts from a slave stop detected, in slave mode) * 10. BEIE 1 (Enable interrupts from detected bus errors, no ACK sent) * 9. IRFIE 1 (Enable interrupts from full buffer received) * 8. ITEIE 1 (Enables the I2C unit to interrupt when transmit buffer empty) * 7. GCD 1 (Disables i2c unit response to general call messages as a slave) * 6. IUE 0 (Disable unit until we change settings) * 5. SCLE 1 (Enables the i2c clock output for master mode (drives SCL) * 4. MA 0 (Only send stop with the ICR stop bit) * 3. TB 0 (We are not transmitting a byte initially) * 2. ACKNAK 0 (Send an ACK after the unit receives a byte) * 1. STOP 0 (Do not send a STOP) * 0. START 0 (Do not send a START) * */ #define I2C_ICR_INIT (ICR_BEIE | ICR_IRFIE | ICR_ITEIE | ICR_GCD | ICR_SCLE) /* I2C status register init values * * 10. BED 1 (Clear bus error detected) * 9. SAD 1 (Clear slave address detected) * 7. IRF 1 (Clear IDBR Receive Full) * 6. ITE 1 (Clear IDBR Transmit Empty) * 5. ALD 1 (Clear Arbitration Loss Detected) * 4. SSD 1 (Clear Slave Stop Detected) */ #define I2C_ISR_INIT 0x7FF /* status register init */ struct i2c_slave_client; struct i2c_pxa_platform_data { unsigned int slave_addr; struct i2c_slave_client *slave; unsigned int class; unsigned int use_pio :1; unsigned int fast_mode :1; unsigned int high_mode:1; unsigned char master_code; unsigned long rate; }; #endif platform_data/st_sensors_pdata.h 0000644 00000001462 14722070374 0013116 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * STMicroelectronics sensors platform-data driver * * Copyright 2013 STMicroelectronics Inc. * * Denis Ciocca <denis.ciocca@st.com> */ #ifndef ST_SENSORS_PDATA_H #define ST_SENSORS_PDATA_H /** * struct st_sensors_platform_data - Platform data for the ST sensors * @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2). * Available only for accelerometer and pressure sensors. * Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet). * @open_drain: set the interrupt line to be open drain if possible. * @spi_3wire: enable spi-3wire mode. * @pullups: enable/disable i2c controller pullup resistors. */ struct st_sensors_platform_data { u8 drdy_int_pin; bool open_drain; bool spi_3wire; bool pullups; }; #endif /* ST_SENSORS_PDATA_H */ platform_data/ata-samsung_cf.h 0000644 00000001460 14722070374 0012431 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2010 Samsung Electronics Co., Ltd. * http://www.samsung.com * * Samsung CF-ATA platform_device info */ #ifndef __ATA_SAMSUNG_CF_H #define __ATA_SAMSUNG_CF_H __FILE__ /** * struct s3c_ide_platdata - S3C IDE driver platform data. * @setup_gpio: Setup the external GPIO pins to the right state for data * transfer in true-ide mode. */ struct s3c_ide_platdata { void (*setup_gpio)(void); }; /* * s3c_ide_set_platdata() - Setup the platform specifc data for IDE driver. * @pdata: Platform data for IDE driver. */ extern void s3c_ide_set_platdata(struct s3c_ide_platdata *pdata); /* architecture-specific IDE configuration */ extern void s3c64xx_ide_setup_gpio(void); extern void s5pv210_ide_setup_gpio(void); #endif /*__ATA_SAMSUNG_CF_H */ platform_data/lcd-mipid.h 0000644 00000001112 14722070374 0011375 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LCD_MIPID_H #define __LCD_MIPID_H enum mipid_test_num { MIPID_TEST_RGB_LINES, }; enum mipid_test_result { MIPID_TEST_SUCCESS, MIPID_TEST_INVALID, MIPID_TEST_FAILED, }; #ifdef __KERNEL__ struct mipid_platform_data { int nreset_gpio; int data_lines; void (*shutdown)(struct mipid_platform_data *pdata); void (*set_bklight_level)(struct mipid_platform_data *pdata, int level); int (*get_bklight_level)(struct mipid_platform_data *pdata); int (*get_bklight_max)(struct mipid_platform_data *pdata); }; #endif #endif platform_data/uio_dmem_genirq.h 0000644 00000001440 14722070374 0012702 0 ustar 00 /* * include/linux/platform_data/uio_dmem_genirq.h * * Copyright (C) 2012 Damian Hobson-Garcia * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifndef _UIO_DMEM_GENIRQ_H #define _UIO_DMEM_GENIRQ_H #include <linux/uio_driver.h> struct uio_dmem_genirq_pdata { struct uio_info uioinfo; unsigned int *dynamic_region_sizes; unsigned int num_dynamic_regions; }; #endif /* _UIO_DMEM_GENIRQ_H */ platform_data/usb-ehci-orion.h 0000644 00000000670 14722070374 0012366 0 ustar 00 /* * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #ifndef __USB_EHCI_ORION_H #define __USB_EHCI_ORION_H #include <linux/mbus.h> enum orion_ehci_phy_ver { EHCI_PHY_ORION, EHCI_PHY_DD, EHCI_PHY_KW, EHCI_PHY_NA, }; struct orion_ehci_data { enum orion_ehci_phy_ver phy_version; }; #endif platform_data/dma-dw.h 0000644 00000004426 14722070374 0010717 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Driver for the Synopsys DesignWare DMA Controller * * Copyright (C) 2007 Atmel Corporation * Copyright (C) 2010-2011 ST Microelectronics */ #ifndef _PLATFORM_DATA_DMA_DW_H #define _PLATFORM_DATA_DMA_DW_H #include <linux/device.h> #define DW_DMA_MAX_NR_MASTERS 4 #define DW_DMA_MAX_NR_CHANNELS 8 /** * struct dw_dma_slave - Controller-specific information about a slave * * @dma_dev: required DMA master device * @src_id: src request line * @dst_id: dst request line * @m_master: memory master for transfers on allocated channel * @p_master: peripheral master for transfers on allocated channel * @channels: mask of the channels permitted for allocation (zero value means any) * @hs_polarity:set active low polarity of handshake interface */ struct dw_dma_slave { struct device *dma_dev; u8 src_id; u8 dst_id; u8 m_master; u8 p_master; u8 channels; bool hs_polarity; }; /** * struct dw_dma_platform_data - Controller configuration parameters * @nr_channels: Number of channels supported by hardware (max 8) * @chan_allocation_order: Allocate channels starting from 0 or 7 * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0. * @block_size: Maximum block size supported by the controller * @nr_masters: Number of AHB masters supported by the controller * @data_width: Maximum data width supported by hardware per AHB master * (in bytes, power of 2) * @multi_block: Multi block transfers supported by hardware per channel. * @protctl: Protection control signals setting per channel. */ struct dw_dma_platform_data { unsigned int nr_channels; #define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */ #define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */ unsigned char chan_allocation_order; #define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */ #define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */ unsigned char chan_priority; unsigned int block_size; unsigned char nr_masters; unsigned char data_width[DW_DMA_MAX_NR_MASTERS]; unsigned char multi_block[DW_DMA_MAX_NR_CHANNELS]; #define CHAN_PROTCTL_PRIVILEGED BIT(0) #define CHAN_PROTCTL_BUFFERABLE BIT(1) #define CHAN_PROTCTL_CACHEABLE BIT(2) #define CHAN_PROTCTL_MASK GENMASK(2, 0) unsigned char protctl; }; #endif /* _PLATFORM_DATA_DMA_DW_H */ platform_data/ad5449.h 0000644 00000002254 14722070374 0010455 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * AD5415, AD5426, AD5429, AD5432, AD5439, AD5443, AD5449 Digital to Analog * Converter driver. * * Copyright 2012 Analog Devices Inc. * Author: Lars-Peter Clausen <lars@metafoo.de> */ #ifndef __LINUX_PLATFORM_DATA_AD5449_H__ #define __LINUX_PLATFORM_DATA_AD5449_H__ /** * enum ad5449_sdo_mode - AD5449 SDO pin configuration * @AD5449_SDO_DRIVE_FULL: Drive the SDO pin with full strength. * @AD5449_SDO_DRIVE_WEAK: Drive the SDO pin with not full strength. * @AD5449_SDO_OPEN_DRAIN: Operate the SDO pin in open-drain mode. * @AD5449_SDO_DISABLED: Disable the SDO pin, in this mode it is not possible to * read back from the device. */ enum ad5449_sdo_mode { AD5449_SDO_DRIVE_FULL = 0x0, AD5449_SDO_DRIVE_WEAK = 0x1, AD5449_SDO_OPEN_DRAIN = 0x2, AD5449_SDO_DISABLED = 0x3, }; /** * struct ad5449_platform_data - Platform data for the ad5449 DAC driver * @sdo_mode: SDO pin mode * @hardware_clear_to_midscale: Whether asserting the hardware CLR pin sets the * outputs to midscale (true) or to zero scale(false). */ struct ad5449_platform_data { enum ad5449_sdo_mode sdo_mode; bool hardware_clear_to_midscale; }; #endif platform_data/mlxreg.h 0000644 00000012611 14722070374 0011037 0 ustar 00 /* * Copyright (c) 2017 Mellanox Technologies. All rights reserved. * Copyright (c) 2017 Vadim Pasternak <vadimp@mellanox.com> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the names of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef __LINUX_PLATFORM_DATA_MLXREG_H #define __LINUX_PLATFORM_DATA_MLXREG_H #define MLXREG_CORE_LABEL_MAX_SIZE 32 #define MLXREG_CORE_WD_FEATURE_NOWAYOUT BIT(0) #define MLXREG_CORE_WD_FEATURE_START_AT_BOOT BIT(1) /** * enum mlxreg_wdt_type - type of HW watchdog * * TYPE1 HW watchdog implementation exist in old systems. * All new systems have TYPE2 HW watchdog. */ enum mlxreg_wdt_type { MLX_WDT_TYPE1, MLX_WDT_TYPE2, }; /** * struct mlxreg_hotplug_device - I2C device data: * * @adapter: I2C device adapter; * @client: I2C device client; * @brdinfo: device board information; * @nr: I2C device adapter number, to which device is to be attached; * * Structure represents I2C hotplug device static data (board topology) and * dynamic data (related kernel objects handles). */ struct mlxreg_hotplug_device { struct i2c_adapter *adapter; struct i2c_client *client; struct i2c_board_info *brdinfo; int nr; }; /** * struct mlxreg_core_data - attributes control data: * * @label: attribute label; * @reg: attribute register; * @mask: attribute access mask; * @bit: attribute effective bit; * @capability: attribute capability register; * @mode: access mode; * @np - pointer to node platform associated with attribute; * @hpdev - hotplug device data; * @health_cntr: dynamic device health indication counter; * @attached: true if device has been attached after good health indication; */ struct mlxreg_core_data { char label[MLXREG_CORE_LABEL_MAX_SIZE]; u32 reg; u32 mask; u32 bit; u32 capability; umode_t mode; struct device_node *np; struct mlxreg_hotplug_device hpdev; u8 health_cntr; bool attached; }; /** * struct mlxreg_core_item - same type components controlled by the driver: * * @data: component data; * @aggr_mask: group aggregation mask; * @reg: group interrupt status register; * @mask: group interrupt mask; * @cache: last status value for elements fro the same group; * @count: number of available elements in the group; * @ind: element's index inside the group; * @inversed: if 0: 0 for signal status is OK, if 1 - 1 is OK; * @health: true if device has health indication, false in other case; */ struct mlxreg_core_item { struct mlxreg_core_data *data; u32 aggr_mask; u32 reg; u32 mask; u32 cache; u8 count; u8 ind; u8 inversed; u8 health; }; /** * struct mlxreg_core_platform_data - platform data: * * @data: instance private data; * @regmap: register map of parent device; * @counter: number of instances; * @features: supported features of device; * @version: implementation version; * @identity: device identity name; */ struct mlxreg_core_platform_data { struct mlxreg_core_data *data; void *regmap; int counter; u32 features; u32 version; char identity[MLXREG_CORE_LABEL_MAX_SIZE]; }; /** * struct mlxreg_core_hotplug_platform_data - hotplug platform data: * * @items: same type components with the hotplug capability; * @irq: platform interrupt number; * @regmap: register map of parent device; * @counter: number of the components with the hotplug capability; * @cell: location of top aggregation interrupt register; * @mask: top aggregation interrupt common mask; * @cell_low: location of low aggregation interrupt register; * @mask_low: low aggregation interrupt common mask; * @deferred_nr: I2C adapter number must be exist prior probing execution; * @shift_nr: I2C adapter numbers must be incremented by this value; */ struct mlxreg_core_hotplug_platform_data { struct mlxreg_core_item *items; int irq; void *regmap; int counter; u32 cell; u32 mask; u32 cell_low; u32 mask_low; int deferred_nr; int shift_nr; }; #endif /* __LINUX_PLATFORM_DATA_MLXREG_H */ platform_data/voltage-omap.h 0000644 00000002234 14722070374 0012134 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * OMAP Voltage Management Routines * * Copyright (C) 2011, Texas Instruments, Inc. */ #ifndef __ARCH_ARM_OMAP_VOLTAGE_H #define __ARCH_ARM_OMAP_VOLTAGE_H /** * struct omap_volt_data - Omap voltage specific data. * @voltage_nominal: The possible voltage value in uV * @sr_efuse_offs: The offset of the efuse register(from system * control module base address) from where to read * the n-target value for the smartreflex module. * @sr_errminlimit: Error min limit value for smartreflex. This value * differs at differnet opp and thus is linked * with voltage. * @vp_errorgain: Error gain value for the voltage processor. This * field also differs according to the voltage/opp. */ struct omap_volt_data { u32 volt_nominal; u32 sr_efuse_offs; u8 sr_errminlimit; u8 vp_errgain; }; struct voltagedomain; struct voltagedomain *voltdm_lookup(const char *name); int voltdm_scale(struct voltagedomain *voltdm, unsigned long target_volt); unsigned long voltdm_get_voltage(struct voltagedomain *voltdm); struct omap_volt_data *omap_voltage_get_voltdata(struct voltagedomain *voltdm, unsigned long volt); #endif platform_data/usb3503.h 0000644 00000000715 14722070374 0010647 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __USB3503_H__ #define __USB3503_H__ #define USB3503_I2C_NAME "usb3503" #define USB3503_OFF_PORT1 (1 << 1) #define USB3503_OFF_PORT2 (1 << 2) #define USB3503_OFF_PORT3 (1 << 3) enum usb3503_mode { USB3503_MODE_UNKNOWN, USB3503_MODE_HUB, USB3503_MODE_STANDBY, }; struct usb3503_platform_data { enum usb3503_mode initial_mode; u8 port_off_mask; int gpio_intn; int gpio_connect; int gpio_reset; }; #endif platform_data/leds-pca963x.h 0000644 00000001205 14722070374 0011660 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * PCA963X LED chip driver. * * Copyright 2012 bct electronic GmbH * Copyright 2013 Qtechnology A/S */ #ifndef __LINUX_PCA963X_H #define __LINUX_PCA963X_H #include <linux/leds.h> enum pca963x_outdrv { PCA963X_OPEN_DRAIN, PCA963X_TOTEM_POLE, /* aka push-pull */ }; enum pca963x_blink_type { PCA963X_SW_BLINK, PCA963X_HW_BLINK, }; enum pca963x_direction { PCA963X_NORMAL, PCA963X_INVERTED, }; struct pca963x_platform_data { struct led_platform_data leds; enum pca963x_outdrv outdrv; enum pca963x_blink_type blink_type; enum pca963x_direction dir; }; #endif /* __LINUX_PCA963X_H*/ platform_data/video-imxfb.h 0000644 00000003404 14722070374 0011752 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * This structure describes the machine which we are running on. */ #ifndef __MACH_IMXFB_H__ #define __MACH_IMXFB_H__ #include <linux/fb.h> #define PCR_TFT (1 << 31) #define PCR_COLOR (1 << 30) #define PCR_PBSIZ_1 (0 << 28) #define PCR_PBSIZ_2 (1 << 28) #define PCR_PBSIZ_4 (2 << 28) #define PCR_PBSIZ_8 (3 << 28) #define PCR_BPIX_1 (0 << 25) #define PCR_BPIX_2 (1 << 25) #define PCR_BPIX_4 (2 << 25) #define PCR_BPIX_8 (3 << 25) #define PCR_BPIX_12 (4 << 25) #define PCR_BPIX_16 (5 << 25) #define PCR_BPIX_18 (6 << 25) #define PCR_PIXPOL (1 << 24) #define PCR_FLMPOL (1 << 23) #define PCR_LPPOL (1 << 22) #define PCR_CLKPOL (1 << 21) #define PCR_OEPOL (1 << 20) #define PCR_SCLKIDLE (1 << 19) #define PCR_END_SEL (1 << 18) #define PCR_END_BYTE_SWAP (1 << 17) #define PCR_REV_VS (1 << 16) #define PCR_ACD_SEL (1 << 15) #define PCR_ACD(x) (((x) & 0x7f) << 8) #define PCR_SCLK_SEL (1 << 7) #define PCR_SHARP (1 << 6) #define PCR_PCD(x) ((x) & 0x3f) #define PWMR_CLS(x) (((x) & 0x1ff) << 16) #define PWMR_LDMSK (1 << 15) #define PWMR_SCR1 (1 << 10) #define PWMR_SCR0 (1 << 9) #define PWMR_CC_EN (1 << 8) #define PWMR_PW(x) ((x) & 0xff) #define LSCR1_PS_RISE_DELAY(x) (((x) & 0x7f) << 26) #define LSCR1_CLS_RISE_DELAY(x) (((x) & 0x3f) << 16) #define LSCR1_REV_TOGGLE_DELAY(x) (((x) & 0xf) << 8) #define LSCR1_GRAY2(x) (((x) & 0xf) << 4) #define LSCR1_GRAY1(x) (((x) & 0xf)) struct imx_fb_videomode { struct fb_videomode mode; u32 pcr; bool aus_mode; unsigned char bpp; }; struct imx_fb_platform_data { struct imx_fb_videomode *mode; int num_modes; u_int pwmr; u_int lscr1; u_int dmacr; int (*init)(struct platform_device *); void (*exit)(struct platform_device *); }; #endif /* ifndef __MACH_IMXFB_H__ */ platform_data/sgi-w1.h 0000644 00000000336 14722070374 0010651 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * SGI One-Wire (W1) IP */ #ifndef PLATFORM_DATA_SGI_W1_H #define PLATFORM_DATA_SGI_W1_H struct sgi_w1_platform_data { char dev_id[64]; }; #endif /* PLATFORM_DATA_SGI_W1_H */ platform_data/i2c-imx.h 0000644 00000000644 14722070374 0011014 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * i2c.h - i.MX I2C driver header file * * Copyright (c) 2008, Darius Augulis <augulis.darius@gmail.com> */ #ifndef __ASM_ARCH_I2C_H_ #define __ASM_ARCH_I2C_H_ /** * struct imxi2c_platform_data - structure of platform data for MXC I2C driver * @bitrate: Bus speed measured in Hz * **/ struct imxi2c_platform_data { u32 bitrate; }; #endif /* __ASM_ARCH_I2C_H_ */ platform_data/ux500_wdt.h 0000644 00000000471 14722070374 0011301 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) ST Ericsson SA 2011 * * STE Ux500 Watchdog platform data */ #ifndef __UX500_WDT_H #define __UX500_WDT_H /** * struct ux500_wdt_data */ struct ux500_wdt_data { unsigned int timeout; bool has_28_bits_resolution; }; #endif /* __UX500_WDT_H */ platform_data/pcmcia-pxa2xx_viper.h 0000644 00000000310 14722070374 0013423 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ARCOM_PCMCIA_H #define __ARCOM_PCMCIA_H struct arcom_pcmcia_pdata { int cd_gpio; int rdy_gpio; int pwr_gpio; void (*reset)(int state); }; #endif platform_data/timer-ixp4xx.h 0000644 00000000364 14722070374 0012125 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __TIMER_IXP4XX_H #define __TIMER_IXP4XX_H #include <linux/ioport.h> void __init ixp4xx_timer_setup(resource_size_t timerbase, int timer_irq, unsigned int timer_freq); #endif platform_data/pixcir_i2c_ts.h 0000644 00000002716 14722070374 0012307 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _PIXCIR_I2C_TS_H #define _PIXCIR_I2C_TS_H /* * Register map */ #define PIXCIR_REG_POWER_MODE 51 #define PIXCIR_REG_INT_MODE 52 /* * Power modes: * active: max scan speed * idle: lower scan speed with automatic transition to active on touch * halt: datasheet says sleep but this is more like halt as the chip * clocks are cut and it can only be brought out of this mode * using the RESET pin. */ enum pixcir_power_mode { PIXCIR_POWER_ACTIVE, PIXCIR_POWER_IDLE, PIXCIR_POWER_HALT, }; #define PIXCIR_POWER_MODE_MASK 0x03 #define PIXCIR_POWER_ALLOW_IDLE (1UL << 2) /* * Interrupt modes: * periodical: interrupt is asserted periodicaly * diff coordinates: interrupt is asserted when coordinates change * level on touch: interrupt level asserted during touch * pulse on touch: interrupt pulse asserted druing touch * */ enum pixcir_int_mode { PIXCIR_INT_PERIODICAL, PIXCIR_INT_DIFF_COORD, PIXCIR_INT_LEVEL_TOUCH, PIXCIR_INT_PULSE_TOUCH, }; #define PIXCIR_INT_MODE_MASK 0x03 #define PIXCIR_INT_ENABLE (1UL << 3) #define PIXCIR_INT_POL_HIGH (1UL << 2) /** * struct pixcir_irc_chip_data - chip related data * @max_fingers: Max number of fingers reported simultaneously by h/w * @has_hw_ids: Hardware supports finger tracking IDs * */ struct pixcir_i2c_chip_data { u8 max_fingers; bool has_hw_ids; }; struct pixcir_ts_platform_data { int x_max; int y_max; struct pixcir_i2c_chip_data chip; }; #endif platform_data/mtd-orion_nand.h 0000644 00000001010 14722070374 0012440 0 ustar 00 /* * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #ifndef __MTD_ORION_NAND_H #define __MTD_ORION_NAND_H /* * Device bus NAND private data */ struct orion_nand_data { struct mtd_partition *parts; u32 nr_parts; u8 ale; /* address line number connected to ALE */ u8 cle; /* address line number connected to CLE */ u8 width; /* buswidth */ u8 chip_delay; }; #endif platform_data/mmp_dma.h 0000644 00000000536 14722070374 0011156 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * MMP Platform DMA Management * * Copyright (c) 2011 Marvell Semiconductors Inc. */ #ifndef MMP_DMA_H #define MMP_DMA_H struct dma_slave_map; struct mmp_dma_platdata { int dma_channels; int nb_requestors; int slave_map_cnt; const struct dma_slave_map *slave_map; }; #endif /* MMP_DMA_H */ platform_data/mfd-mcp-sa11x0.h 0000644 00000000420 14722070374 0012072 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2005 Russell King. */ #ifndef __MFD_MCP_SA11X0_H #define __MFD_MCP_SA11X0_H #include <linux/types.h> struct mcp_plat_data { u32 mccr0; u32 mccr1; unsigned int sclk_rate; void *codec_pdata; }; #endif platform_data/ds620.h 0000644 00000000656 14722070374 0010405 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_DS620_H #define _LINUX_DS620_H #include <linux/types.h> #include <linux/i2c.h> /* platform data for the DS620 temperature sensor and thermostat */ struct ds620_platform_data { /* * Thermostat output pin PO mode: * 0 = always low (default) * 1 = PO_LOW * 2 = PO_HIGH * * (see Documentation/hwmon/ds620.rst) */ int pomode; }; #endif /* _LINUX_DS620_H */ platform_data/leds-lp55xx.h 0000644 00000003630 14722070374 0011634 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * LP55XX Platform Data Header * * Copyright (C) 2012 Texas Instruments * * Author: Milo(Woogyom) Kim <milo.kim@ti.com> * * Derived from leds-lp5521.h, leds-lp5523.h */ #ifndef _LEDS_LP55XX_H #define _LEDS_LP55XX_H /* Clock configuration */ #define LP55XX_CLOCK_AUTO 0 #define LP55XX_CLOCK_INT 1 #define LP55XX_CLOCK_EXT 2 struct lp55xx_led_config { const char *name; const char *default_trigger; u8 chan_nr; u8 led_current; /* mA x10, 0 if led is not connected */ u8 max_current; }; struct lp55xx_predef_pattern { const u8 *r; const u8 *g; const u8 *b; u8 size_r; u8 size_g; u8 size_b; }; enum lp8501_pwr_sel { LP8501_ALL_VDD, /* D1~9 are connected to VDD */ LP8501_6VDD_3VOUT, /* D1~6 with VDD, D7~9 with VOUT */ LP8501_3VDD_6VOUT, /* D1~6 with VOUT, D7~9 with VDD */ LP8501_ALL_VOUT, /* D1~9 are connected to VOUT */ }; /* * struct lp55xx_platform_data * @led_config : Configurable led class device * @num_channels : Number of LED channels * @label : Used for naming LEDs * @clock_mode : Input clock mode. LP55XX_CLOCK_AUTO or _INT or _EXT * @setup_resources : Platform specific function before enabling the chip * @release_resources : Platform specific function after disabling the chip * @enable : EN pin control by platform side * @patterns : Predefined pattern data for RGB channels * @num_patterns : Number of patterns * @update_config : Value of CONFIG register */ struct lp55xx_platform_data { /* LED channel configuration */ struct lp55xx_led_config *led_config; u8 num_channels; const char *label; /* Clock configuration */ u8 clock_mode; /* optional enable GPIO */ int enable_gpio; /* Predefined pattern data */ struct lp55xx_predef_pattern *patterns; unsigned int num_patterns; /* LP8501 specific */ enum lp8501_pwr_sel pwr_sel; }; #endif /* _LEDS_LP55XX_H */ platform_data/keypad-pxa27x.h 0000644 00000004245 14722070374 0012151 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_ARCH_PXA27x_KEYPAD_H #define __ASM_ARCH_PXA27x_KEYPAD_H #include <linux/input.h> #include <linux/input/matrix_keypad.h> #define MAX_MATRIX_KEY_ROWS (8) #define MAX_MATRIX_KEY_COLS (8) #define MATRIX_ROW_SHIFT (3) #define MAX_DIRECT_KEY_NUM (8) /* pxa3xx keypad platform specific parameters * * NOTE: * 1. direct_key_num indicates the number of keys in the direct keypad * _plus_ the number of rotary-encoder sensor inputs, this can be * left as 0 if only rotary encoders are enabled, the driver will * automatically calculate this * * 2. direct_key_map is the key code map for the direct keys, if rotary * encoder(s) are enabled, direct key 0/1(2/3) will be ignored * * 3. rotary can be either interpreted as a relative input event (e.g. * REL_WHEEL/REL_HWHEEL) or specific keys (e.g. UP/DOWN/LEFT/RIGHT) * * 4. matrix key and direct key will use the same debounce_interval by * default, which should be sufficient in most cases * * pxa168 keypad platform specific parameter * * NOTE: * clear_wakeup_event callback is a workaround required to clear the * keypad interrupt. The keypad wake must be cleared in addition to * reading the MI/DI bits in the KPC register. */ struct pxa27x_keypad_platform_data { /* code map for the matrix keys */ const struct matrix_keymap_data *matrix_keymap_data; unsigned int matrix_key_rows; unsigned int matrix_key_cols; /* direct keys */ int direct_key_num; unsigned int direct_key_map[MAX_DIRECT_KEY_NUM]; /* the key output may be low active */ int direct_key_low_active; /* give board a chance to choose the start direct key */ unsigned int direct_key_mask; /* rotary encoders 0 */ int enable_rotary0; int rotary0_rel_code; int rotary0_up_key; int rotary0_down_key; /* rotary encoders 1 */ int enable_rotary1; int rotary1_rel_code; int rotary1_up_key; int rotary1_down_key; /* key debounce interval */ unsigned int debounce_interval; /* clear wakeup event requirement for pxa168 */ void (*clear_wakeup_event)(void); }; extern void pxa_set_keypad_info(struct pxa27x_keypad_platform_data *info); #endif /* __ASM_ARCH_PXA27x_KEYPAD_H */ platform_data/i2c-omap.h 0000644 00000002331 14722070374 0011146 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __I2C_OMAP_H__ #define __I2C_OMAP_H__ #include <linux/platform_device.h> /* * Version 2 of the I2C peripheral unit has a different register * layout and extra registers. The ID register in the V2 peripheral * unit on the OMAP4430 reports the same ID as the V1 peripheral * unit on the OMAP3530, so we must inform the driver which IP * version we know it is running on from platform / cpu-specific * code using these constants in the hwmod class definition. */ #define OMAP_I2C_IP_VERSION_1 1 #define OMAP_I2C_IP_VERSION_2 2 /* struct omap_i2c_bus_platform_data .flags meanings */ #define OMAP_I2C_FLAG_NO_FIFO BIT(0) #define OMAP_I2C_FLAG_SIMPLE_CLOCK BIT(1) #define OMAP_I2C_FLAG_16BIT_DATA_REG BIT(2) #define OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK BIT(5) #define OMAP_I2C_FLAG_FORCE_19200_INT_CLK BIT(6) /* how the CPU address bus must be translated for I2C unit access */ #define OMAP_I2C_FLAG_BUS_SHIFT_NONE 0 #define OMAP_I2C_FLAG_BUS_SHIFT_1 BIT(7) #define OMAP_I2C_FLAG_BUS_SHIFT_2 BIT(8) #define OMAP_I2C_FLAG_BUS_SHIFT__SHIFT 7 struct omap_i2c_bus_platform_data { u32 clkrate; u32 rev; u32 flags; void (*set_mpu_wkup_lat)(struct device *dev, long set); }; #endif platform_data/clk-davinci-pll.h 0000644 00000000721 14722070374 0012511 0 ustar 00 // SPDX-License-Identifier: GPL-2.0 /* * PLL clock driver for TI Davinci SoCs * * Copyright (C) 2018 David Lechner <david@lechnology.com> */ #ifndef __LINUX_PLATFORM_DATA_CLK_DAVINCI_PLL_H__ #define __LINUX_PLATFORM_DATA_CLK_DAVINCI_PLL_H__ #include <linux/regmap.h> /** * davinci_pll_platform_data * @cfgchip: CFGCHIP syscon regmap */ struct davinci_pll_platform_data { struct regmap *cfgchip; }; #endif /* __LINUX_PLATFORM_DATA_CLK_DAVINCI_PLL_H__ */ platform_data/cros_ec_chardev.h 0000644 00000002115 14722070374 0012650 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * ChromeOS EC device interface. * * Copyright (C) 2014 Google, Inc. */ #ifndef _UAPI_LINUX_CROS_EC_DEV_H_ #define _UAPI_LINUX_CROS_EC_DEV_H_ #include <linux/bits.h> #include <linux/ioctl.h> #include <linux/types.h> #include <linux/platform_data/cros_ec_commands.h> #define CROS_EC_DEV_VERSION "1.0.0" /** * struct cros_ec_readmem - Struct used to read mapped memory. * @offset: Within EC_LPC_ADDR_MEMMAP region. * @bytes: Number of bytes to read. Zero means "read a string" (including '\0') * At most only EC_MEMMAP_SIZE bytes can be read. * @buffer: Where to store the result. The ioctl returns the number of bytes * read or negative on error. */ struct cros_ec_readmem { uint32_t offset; uint32_t bytes; uint8_t buffer[EC_MEMMAP_SIZE]; }; #define CROS_EC_DEV_IOC 0xEC #define CROS_EC_DEV_IOCXCMD _IOWR(CROS_EC_DEV_IOC, 0, struct cros_ec_command) #define CROS_EC_DEV_IOCRDMEM _IOWR(CROS_EC_DEV_IOC, 1, struct cros_ec_readmem) #define CROS_EC_DEV_IOCEVENTMASK _IO(CROS_EC_DEV_IOC, 2) #endif /* _CROS_EC_DEV_H_ */ platform_data/gpio-omap.h 0000644 00000013220 14722070374 0011426 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * OMAP GPIO handling defines and functions * * Copyright (C) 2003-2005 Nokia Corporation * * Written by Juha Yrjölä <juha.yrjola@nokia.com> */ #ifndef __ASM_ARCH_OMAP_GPIO_H #define __ASM_ARCH_OMAP_GPIO_H #ifndef __ASSEMBLER__ #include <linux/io.h> #include <linux/platform_device.h> #endif #define OMAP1_MPUIO_BASE 0xfffb5000 /* * These are the omap15xx/16xx offsets. The omap7xx offset are * OMAP_MPUIO_ / 2 offsets below. */ #define OMAP_MPUIO_INPUT_LATCH 0x00 #define OMAP_MPUIO_OUTPUT 0x04 #define OMAP_MPUIO_IO_CNTL 0x08 #define OMAP_MPUIO_KBR_LATCH 0x10 #define OMAP_MPUIO_KBC 0x14 #define OMAP_MPUIO_GPIO_EVENT_MODE 0x18 #define OMAP_MPUIO_GPIO_INT_EDGE 0x1c #define OMAP_MPUIO_KBD_INT 0x20 #define OMAP_MPUIO_GPIO_INT 0x24 #define OMAP_MPUIO_KBD_MASKIT 0x28 #define OMAP_MPUIO_GPIO_MASKIT 0x2c #define OMAP_MPUIO_GPIO_DEBOUNCING 0x30 #define OMAP_MPUIO_LATCH 0x34 #define OMAP34XX_NR_GPIOS 6 /* * OMAP1510 GPIO registers */ #define OMAP1510_GPIO_DATA_INPUT 0x00 #define OMAP1510_GPIO_DATA_OUTPUT 0x04 #define OMAP1510_GPIO_DIR_CONTROL 0x08 #define OMAP1510_GPIO_INT_CONTROL 0x0c #define OMAP1510_GPIO_INT_MASK 0x10 #define OMAP1510_GPIO_INT_STATUS 0x14 #define OMAP1510_GPIO_PIN_CONTROL 0x18 #define OMAP1510_IH_GPIO_BASE 64 /* * OMAP1610 specific GPIO registers */ #define OMAP1610_GPIO_REVISION 0x0000 #define OMAP1610_GPIO_SYSCONFIG 0x0010 #define OMAP1610_GPIO_SYSSTATUS 0x0014 #define OMAP1610_GPIO_IRQSTATUS1 0x0018 #define OMAP1610_GPIO_IRQENABLE1 0x001c #define OMAP1610_GPIO_WAKEUPENABLE 0x0028 #define OMAP1610_GPIO_DATAIN 0x002c #define OMAP1610_GPIO_DATAOUT 0x0030 #define OMAP1610_GPIO_DIRECTION 0x0034 #define OMAP1610_GPIO_EDGE_CTRL1 0x0038 #define OMAP1610_GPIO_EDGE_CTRL2 0x003c #define OMAP1610_GPIO_CLEAR_IRQENABLE1 0x009c #define OMAP1610_GPIO_CLEAR_WAKEUPENA 0x00a8 #define OMAP1610_GPIO_CLEAR_DATAOUT 0x00b0 #define OMAP1610_GPIO_SET_IRQENABLE1 0x00dc #define OMAP1610_GPIO_SET_WAKEUPENA 0x00e8 #define OMAP1610_GPIO_SET_DATAOUT 0x00f0 /* * OMAP7XX specific GPIO registers */ #define OMAP7XX_GPIO_DATA_INPUT 0x00 #define OMAP7XX_GPIO_DATA_OUTPUT 0x04 #define OMAP7XX_GPIO_DIR_CONTROL 0x08 #define OMAP7XX_GPIO_INT_CONTROL 0x0c #define OMAP7XX_GPIO_INT_MASK 0x10 #define OMAP7XX_GPIO_INT_STATUS 0x14 /* * omap2+ specific GPIO registers */ #define OMAP24XX_GPIO_REVISION 0x0000 #define OMAP24XX_GPIO_SYSCONFIG 0x0010 #define OMAP24XX_GPIO_IRQSTATUS1 0x0018 #define OMAP24XX_GPIO_IRQSTATUS2 0x0028 #define OMAP24XX_GPIO_IRQENABLE2 0x002c #define OMAP24XX_GPIO_IRQENABLE1 0x001c #define OMAP24XX_GPIO_WAKE_EN 0x0020 #define OMAP24XX_GPIO_CTRL 0x0030 #define OMAP24XX_GPIO_OE 0x0034 #define OMAP24XX_GPIO_DATAIN 0x0038 #define OMAP24XX_GPIO_DATAOUT 0x003c #define OMAP24XX_GPIO_LEVELDETECT0 0x0040 #define OMAP24XX_GPIO_LEVELDETECT1 0x0044 #define OMAP24XX_GPIO_RISINGDETECT 0x0048 #define OMAP24XX_GPIO_FALLINGDETECT 0x004c #define OMAP24XX_GPIO_DEBOUNCE_EN 0x0050 #define OMAP24XX_GPIO_DEBOUNCE_VAL 0x0054 #define OMAP24XX_GPIO_CLEARIRQENABLE1 0x0060 #define OMAP24XX_GPIO_SETIRQENABLE1 0x0064 #define OMAP24XX_GPIO_CLEARWKUENA 0x0080 #define OMAP24XX_GPIO_SETWKUENA 0x0084 #define OMAP24XX_GPIO_CLEARDATAOUT 0x0090 #define OMAP24XX_GPIO_SETDATAOUT 0x0094 #define OMAP4_GPIO_REVISION 0x0000 #define OMAP4_GPIO_SYSCONFIG 0x0010 #define OMAP4_GPIO_EOI 0x0020 #define OMAP4_GPIO_IRQSTATUSRAW0 0x0024 #define OMAP4_GPIO_IRQSTATUSRAW1 0x0028 #define OMAP4_GPIO_IRQSTATUS0 0x002c #define OMAP4_GPIO_IRQSTATUS1 0x0030 #define OMAP4_GPIO_IRQSTATUSSET0 0x0034 #define OMAP4_GPIO_IRQSTATUSSET1 0x0038 #define OMAP4_GPIO_IRQSTATUSCLR0 0x003c #define OMAP4_GPIO_IRQSTATUSCLR1 0x0040 #define OMAP4_GPIO_IRQWAKEN0 0x0044 #define OMAP4_GPIO_IRQWAKEN1 0x0048 #define OMAP4_GPIO_IRQENABLE1 0x011c #define OMAP4_GPIO_WAKE_EN 0x0120 #define OMAP4_GPIO_IRQSTATUS2 0x0128 #define OMAP4_GPIO_IRQENABLE2 0x012c #define OMAP4_GPIO_CTRL 0x0130 #define OMAP4_GPIO_OE 0x0134 #define OMAP4_GPIO_DATAIN 0x0138 #define OMAP4_GPIO_DATAOUT 0x013c #define OMAP4_GPIO_LEVELDETECT0 0x0140 #define OMAP4_GPIO_LEVELDETECT1 0x0144 #define OMAP4_GPIO_RISINGDETECT 0x0148 #define OMAP4_GPIO_FALLINGDETECT 0x014c #define OMAP4_GPIO_DEBOUNCENABLE 0x0150 #define OMAP4_GPIO_DEBOUNCINGTIME 0x0154 #define OMAP4_GPIO_CLEARIRQENABLE1 0x0160 #define OMAP4_GPIO_SETIRQENABLE1 0x0164 #define OMAP4_GPIO_CLEARWKUENA 0x0180 #define OMAP4_GPIO_SETWKUENA 0x0184 #define OMAP4_GPIO_CLEARDATAOUT 0x0190 #define OMAP4_GPIO_SETDATAOUT 0x0194 #define OMAP_MAX_GPIO_LINES 192 #define OMAP_MPUIO(nr) (OMAP_MAX_GPIO_LINES + (nr)) #define OMAP_GPIO_IS_MPUIO(nr) ((nr) >= OMAP_MAX_GPIO_LINES) #ifndef __ASSEMBLER__ struct omap_gpio_reg_offs { u16 revision; u16 sysconfig; u16 direction; u16 datain; u16 dataout; u16 set_dataout; u16 clr_dataout; u16 irqstatus; u16 irqstatus2; u16 irqstatus_raw0; u16 irqstatus_raw1; u16 irqenable; u16 irqenable2; u16 set_irqenable; u16 clr_irqenable; u16 debounce; u16 debounce_en; u16 ctrl; u16 wkup_en; u16 leveldetect0; u16 leveldetect1; u16 risingdetect; u16 fallingdetect; u16 irqctrl; u16 edgectrl1; u16 edgectrl2; u16 pinctrl; bool irqenable_inv; }; struct omap_gpio_platform_data { int bank_type; int bank_width; /* GPIO bank width */ int bank_stride; /* Only needed for omap1 MPUIO */ bool dbck_flag; /* dbck required or not - True for OMAP3&4 */ bool loses_context; /* whether the bank would ever lose context */ bool is_mpuio; /* whether the bank is of type MPUIO */ u32 non_wakeup_gpios; const struct omap_gpio_reg_offs *regs; /* Return context loss count due to PM states changing */ int (*get_context_loss_count)(struct device *dev); }; #endif /* __ASSEMBLER__ */ #endif platform_data/usb-ehci-mxc.h 0000644 00000000514 14722070374 0012024 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __INCLUDE_ASM_ARCH_MXC_EHCI_H #define __INCLUDE_ASM_ARCH_MXC_EHCI_H struct mxc_usbh_platform_data { int (*init)(struct platform_device *pdev); int (*exit)(struct platform_device *pdev); unsigned int portsc; struct usb_phy *otg; }; #endif /* __INCLUDE_ASM_ARCH_MXC_EHCI_H */ platform_data/i2c-designware.h 0000644 00000000324 14722070374 0012342 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright(c) 2014 Intel Corporation. */ #ifndef I2C_DESIGNWARE_H #define I2C_DESIGNWARE_H struct dw_i2c_platform_data { unsigned int i2c_scl_freq; }; #endif platform_data/asoc-kirkwood.h 0000644 00000000221 14722070374 0012307 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __PLAT_AUDIO_H #define __PLAT_AUDIO_H struct kirkwood_asoc_platform_data { int burst; }; #endif platform_data/bcmgenet.h 0000644 00000000622 14722070374 0011324 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_PLATFORM_DATA_BCMGENET_H__ #define __LINUX_PLATFORM_DATA_BCMGENET_H__ #include <linux/types.h> #include <linux/if_ether.h> #include <linux/phy.h> struct bcmgenet_platform_data { bool mdio_enabled; phy_interface_t phy_interface; int phy_address; int phy_speed; int phy_duplex; u8 mac_address[ETH_ALEN]; int genet_version; }; #endif platform_data/i2c-ocores.h 0000644 00000001204 14722070374 0011502 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * i2c-ocores.h - definitions for the i2c-ocores interface * * Peter Korsgaard <peter@korsgaard.com> */ #ifndef _LINUX_I2C_OCORES_H #define _LINUX_I2C_OCORES_H struct ocores_i2c_platform_data { u32 reg_shift; /* register offset shift value */ u32 reg_io_width; /* register io read/write width */ u32 clock_khz; /* input clock in kHz */ u32 bus_khz; /* bus clock in kHz */ bool big_endian; /* registers are big endian */ u8 num_devices; /* number of devices in the devices list */ struct i2c_board_info const *devices; /* devices connected to the bus */ }; #endif /* _LINUX_I2C_OCORES_H */ platform_data/s3c-hsudc.h 0000644 00000001767 14722070374 0011347 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * S3C24XX USB 2.0 High-speed USB controller gadget driver * * Copyright (c) 2010 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * The S3C24XX USB 2.0 high-speed USB controller supports upto 9 endpoints. * Each endpoint can be configured as either in or out endpoint. Endpoints * can be configured for Bulk or Interrupt transfer mode. */ #ifndef __LINUX_USB_S3C_HSUDC_H #define __LINUX_USB_S3C_HSUDC_H /** * s3c24xx_hsudc_platdata - Platform data for USB High-Speed gadget controller. * @epnum: Number of endpoints to be instantiated by the controller driver. * @gpio_init: Platform specific USB related GPIO initialization. * @gpio_uninit: Platform specific USB releted GPIO uninitialzation. * * Representation of platform data for the S3C24XX USB 2.0 High Speed gadget * controllers. */ struct s3c24xx_hsudc_platdata { unsigned int epnum; void (*gpio_init)(void); void (*gpio_uninit)(void); }; #endif /* __LINUX_USB_S3C_HSUDC_H */ platform_data/ltc4245.h 0000644 00000000513 14722070374 0010640 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Platform Data for LTC4245 hardware monitor chip * * Copyright (c) 2010 Ira W. Snyder <iws@ovro.caltech.edu> */ #ifndef LINUX_LTC4245_H #define LINUX_LTC4245_H #include <linux/types.h> struct ltc4245_platform_data { bool use_extra_gpios; }; #endif /* LINUX_LTC4245_H */ platform_data/nfcmrvl.h 0000644 00000002356 14722070374 0011215 0 ustar 00 /* * Copyright (C) 2015, Marvell International Ltd. * * This software file (the "File") is distributed by Marvell International * Ltd. under the terms of the GNU General Public License Version 2, June 1991 * (the "License"). You may use, redistribute and/or modify this File in * accordance with the terms and conditions of the License, a copy of which * is available on the worldwide web at * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. * * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE * ARE EXPRESSLY DISCLAIMED. The License provides additional details about * this warranty disclaimer. */ #ifndef _NFCMRVL_PTF_H_ #define _NFCMRVL_PTF_H_ struct nfcmrvl_platform_data { /* * Generic */ /* GPIO that is wired to RESET_N signal */ int reset_n_io; /* Tell if transport is muxed in HCI one */ unsigned int hci_muxed; /* * UART specific */ /* Tell if UART needs flow control at init */ unsigned int flow_control; /* Tell if firmware supports break control for power management */ unsigned int break_control; /* * I2C specific */ unsigned int irq; unsigned int irq_polarity; }; #endif /* _NFCMRVL_PTF_H_ */ platform_data/gpmc-omap.h 0000644 00000013032 14722070374 0011417 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * OMAP GPMC Platform data * * Copyright (C) 2014 Texas Instruments, Inc. - http://www.ti.com * Roger Quadros <rogerq@ti.com> */ #ifndef _GPMC_OMAP_H_ #define _GPMC_OMAP_H_ /* Maximum Number of Chip Selects */ #define GPMC_CS_NUM 8 /* bool type time settings */ struct gpmc_bool_timings { bool cycle2cyclediffcsen; bool cycle2cyclesamecsen; bool we_extra_delay; bool oe_extra_delay; bool adv_extra_delay; bool cs_extra_delay; bool time_para_granularity; }; /* * Note that all values in this struct are in nanoseconds except sync_clk * (which is in picoseconds), while the register values are in gpmc_fck cycles. */ struct gpmc_timings { /* Minimum clock period for synchronous mode (in picoseconds) */ u32 sync_clk; /* Chip-select signal timings corresponding to GPMC_CS_CONFIG2 */ u32 cs_on; /* Assertion time */ u32 cs_rd_off; /* Read deassertion time */ u32 cs_wr_off; /* Write deassertion time */ /* ADV signal timings corresponding to GPMC_CONFIG3 */ u32 adv_on; /* Assertion time */ u32 adv_rd_off; /* Read deassertion time */ u32 adv_wr_off; /* Write deassertion time */ u32 adv_aad_mux_on; /* ADV assertion time for AAD */ u32 adv_aad_mux_rd_off; /* ADV read deassertion time for AAD */ u32 adv_aad_mux_wr_off; /* ADV write deassertion time for AAD */ /* WE signals timings corresponding to GPMC_CONFIG4 */ u32 we_on; /* WE assertion time */ u32 we_off; /* WE deassertion time */ /* OE signals timings corresponding to GPMC_CONFIG4 */ u32 oe_on; /* OE assertion time */ u32 oe_off; /* OE deassertion time */ u32 oe_aad_mux_on; /* OE assertion time for AAD */ u32 oe_aad_mux_off; /* OE deassertion time for AAD */ /* Access time and cycle time timings corresponding to GPMC_CONFIG5 */ u32 page_burst_access; /* Multiple access word delay */ u32 access; /* Start-cycle to first data valid delay */ u32 rd_cycle; /* Total read cycle time */ u32 wr_cycle; /* Total write cycle time */ u32 bus_turnaround; u32 cycle2cycle_delay; u32 wait_monitoring; u32 clk_activation; /* The following are only on OMAP3430 */ u32 wr_access; /* WRACCESSTIME */ u32 wr_data_mux_bus; /* WRDATAONADMUXBUS */ struct gpmc_bool_timings bool_timings; }; /* Device timings in picoseconds */ struct gpmc_device_timings { u32 t_ceasu; /* address setup to CS valid */ u32 t_avdasu; /* address setup to ADV valid */ /* XXX: try to combine t_avdp_r & t_avdp_w. Issue is * of tusb using these timings even for sync whilst * ideally for adv_rd/(wr)_off it should have considered * t_avdh instead. This indirectly necessitates r/w * variations of t_avdp as it is possible to have one * sync & other async */ u32 t_avdp_r; /* ADV low time (what about t_cer ?) */ u32 t_avdp_w; u32 t_aavdh; /* address hold time */ u32 t_oeasu; /* address setup to OE valid */ u32 t_aa; /* access time from ADV assertion */ u32 t_iaa; /* initial access time */ u32 t_oe; /* access time from OE assertion */ u32 t_ce; /* access time from CS asertion */ u32 t_rd_cycle; /* read cycle time */ u32 t_cez_r; /* read CS deassertion to high Z */ u32 t_cez_w; /* write CS deassertion to high Z */ u32 t_oez; /* OE deassertion to high Z */ u32 t_weasu; /* address setup to WE valid */ u32 t_wpl; /* write assertion time */ u32 t_wph; /* write deassertion time */ u32 t_wr_cycle; /* write cycle time */ u32 clk; u32 t_bacc; /* burst access valid clock to output delay */ u32 t_ces; /* CS setup time to clk */ u32 t_avds; /* ADV setup time to clk */ u32 t_avdh; /* ADV hold time from clk */ u32 t_ach; /* address hold time from clk */ u32 t_rdyo; /* clk to ready valid */ u32 t_ce_rdyz; /* XXX: description ?, or use t_cez instead */ u32 t_ce_avd; /* CS on to ADV on delay */ /* XXX: check the possibility of combining * cyc_aavhd_oe & cyc_aavdh_we */ u8 cyc_aavdh_oe;/* read address hold time in cycles */ u8 cyc_aavdh_we;/* write address hold time in cycles */ u8 cyc_oe; /* access time from OE assertion in cycles */ u8 cyc_wpl; /* write deassertion time in cycles */ u32 cyc_iaa; /* initial access time in cycles */ /* extra delays */ bool ce_xdelay; bool avd_xdelay; bool oe_xdelay; bool we_xdelay; }; #define GPMC_BURST_4 4 /* 4 word burst */ #define GPMC_BURST_8 8 /* 8 word burst */ #define GPMC_BURST_16 16 /* 16 word burst */ #define GPMC_DEVWIDTH_8BIT 1 /* 8-bit device width */ #define GPMC_DEVWIDTH_16BIT 2 /* 16-bit device width */ #define GPMC_MUX_AAD 1 /* Addr-Addr-Data multiplex */ #define GPMC_MUX_AD 2 /* Addr-Data multiplex */ struct gpmc_settings { bool burst_wrap; /* enables wrap bursting */ bool burst_read; /* enables read page/burst mode */ bool burst_write; /* enables write page/burst mode */ bool device_nand; /* device is NAND */ bool sync_read; /* enables synchronous reads */ bool sync_write; /* enables synchronous writes */ bool wait_on_read; /* monitor wait on reads */ bool wait_on_write; /* monitor wait on writes */ u32 burst_len; /* page/burst length */ u32 device_width; /* device bus width (8 or 16 bit) */ u32 mux_add_data; /* multiplex address & data */ u32 wait_pin; /* wait-pin to be used */ }; /* Data for each chip select */ struct gpmc_omap_cs_data { bool valid; /* data is valid */ bool is_nand; /* device within this CS is NAND */ struct gpmc_settings *settings; struct gpmc_device_timings *device_timings; struct gpmc_timings *gpmc_timings; struct platform_device *pdev; /* device within this CS region */ unsigned int pdata_size; }; struct gpmc_omap_platform_data { struct gpmc_omap_cs_data cs[GPMC_CS_NUM]; }; #endif /* _GPMC_OMAP_H */ platform_data/st33zp24.h 0000644 00000000557 14722070374 0011063 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * STMicroelectronics TPM Linux driver for TPM 1.2 ST33ZP24 * Copyright (C) 2009 - 2016 STMicroelectronics */ #ifndef __ST33ZP24_H__ #define __ST33ZP24_H__ #define TPM_ST33_I2C "st33zp24-i2c" #define TPM_ST33_SPI "st33zp24-spi" struct st33zp24_platform_data { int io_lpcpd; }; #endif /* __ST33ZP24_H__ */ platform_data/asoc-palm27x.h 0000644 00000000230 14722070374 0011750 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _INCLUDE_PALMASOC_H_ #define _INCLUDE_PALMASOC_H_ struct palm27x_asoc_info { int jack_gpio; }; #endif platform_data/keyscan-davinci.h 0000644 00000001005 14722070374 0012604 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2009 Texas Instruments, Inc * * Author: Miguel Aguilar <miguel.aguilar@ridgerun.com> */ #ifndef DAVINCI_KEYSCAN_H #define DAVINCI_KEYSCAN_H #include <linux/io.h> enum davinci_matrix_types { DAVINCI_KEYSCAN_MATRIX_4X4, DAVINCI_KEYSCAN_MATRIX_5X3, }; struct davinci_ks_platform_data { int (*device_enable)(struct device *dev); unsigned short *keymap; u32 keymapsize; u8 rep:1; u8 strobe; u8 interval; u8 matrix_type; }; #endif platform_data/lv5207lp.h 0000644 00000000421 14722070374 0011030 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * lv5207lp.h - Sanyo LV5207LP LEDs Driver */ #ifndef __LV5207LP_H__ #define __LV5207LP_H__ struct device; struct lv5207lp_platform_data { struct device *fbdev; unsigned int max_value; unsigned int def_value; }; #endif platform_data/video-mx3fb.h 0000644 00000002623 14722070374 0011666 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2008 * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de> */ #ifndef __ASM_ARCH_MX3FB_H__ #define __ASM_ARCH_MX3FB_H__ #include <linux/device.h> #include <linux/fb.h> /* Proprietary FB_SYNC_ flags */ #define FB_SYNC_OE_ACT_HIGH 0x80000000 #define FB_SYNC_CLK_INVERT 0x40000000 #define FB_SYNC_DATA_INVERT 0x20000000 #define FB_SYNC_CLK_IDLE_EN 0x10000000 #define FB_SYNC_SHARP_MODE 0x08000000 #define FB_SYNC_SWAP_RGB 0x04000000 #define FB_SYNC_CLK_SEL_EN 0x02000000 /* * Specify the way your display is connected. The IPU can arbitrarily * map the internal colors to the external data lines. We only support * the following mappings at the moment. */ enum disp_data_mapping { /* blue -> d[0..5], green -> d[6..11], red -> d[12..17] */ IPU_DISP_DATA_MAPPING_RGB666, /* blue -> d[0..4], green -> d[5..10], red -> d[11..15] */ IPU_DISP_DATA_MAPPING_RGB565, /* blue -> d[0..7], green -> d[8..15], red -> d[16..23] */ IPU_DISP_DATA_MAPPING_RGB888, }; /** * struct mx3fb_platform_data - mx3fb platform data * * @dma_dev: pointer to the dma-device, used for dma-slave connection * @mode: pointer to a platform-provided per mxc_register_fb() videomode */ struct mx3fb_platform_data { struct device *dma_dev; const char *name; const struct fb_videomode *mode; int num_modes; enum disp_data_mapping disp_data_fmt; }; #endif platform_data/gpio/gpio-amd-fch.h 0000644 00000002317 14722070374 0012734 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0+ */ /* * AMD FCH gpio driver platform-data * * Copyright (C) 2018 metux IT consult * Author: Enrico Weigelt <info@metux.net> * */ #ifndef __LINUX_PLATFORM_DATA_GPIO_AMD_FCH_H #define __LINUX_PLATFORM_DATA_GPIO_AMD_FCH_H #define AMD_FCH_GPIO_DRIVER_NAME "gpio_amd_fch" /* * gpio register index definitions */ #define AMD_FCH_GPIO_REG_GPIO49 0x40 #define AMD_FCH_GPIO_REG_GPIO50 0x41 #define AMD_FCH_GPIO_REG_GPIO51 0x42 #define AMD_FCH_GPIO_REG_GPIO59_DEVSLP0 0x43 #define AMD_FCH_GPIO_REG_GPIO57 0x44 #define AMD_FCH_GPIO_REG_GPIO58 0x45 #define AMD_FCH_GPIO_REG_GPIO59_DEVSLP1 0x46 #define AMD_FCH_GPIO_REG_GPIO64 0x47 #define AMD_FCH_GPIO_REG_GPIO68 0x48 #define AMD_FCH_GPIO_REG_GPIO66_SPKR 0x5B #define AMD_FCH_GPIO_REG_GPIO71 0x4D #define AMD_FCH_GPIO_REG_GPIO32_GE1 0x59 #define AMD_FCH_GPIO_REG_GPIO33_GE2 0x5A #define AMT_FCH_GPIO_REG_GEVT22 0x09 /* * struct amd_fch_gpio_pdata - GPIO chip platform data * @gpio_num: number of entries * @gpio_reg: array of gpio registers * @gpio_names: array of gpio names */ struct amd_fch_gpio_pdata { int gpio_num; int *gpio_reg; const char * const *gpio_names; }; #endif /* __LINUX_PLATFORM_DATA_GPIO_AMD_FCH_H */ platform_data/usb-pxa3xx-ulpi.h 0000644 00000001243 14722070374 0012531 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * PXA3xx U2D header * * Copyright (C) 2010 CompuLab Ltd. * * Igor Grinberg <grinberg@compulab.co.il> */ #ifndef __PXA310_U2D__ #define __PXA310_U2D__ #include <linux/usb/ulpi.h> struct pxa3xx_u2d_platform_data { #define ULPI_SER_6PIN (1 << 0) #define ULPI_SER_3PIN (1 << 1) unsigned int ulpi_mode; int (*init)(struct device *); void (*exit)(struct device *); }; /* Start PXA3xx U2D host */ int pxa3xx_u2d_start_hc(struct usb_bus *host); /* Stop PXA3xx U2D host */ void pxa3xx_u2d_stop_hc(struct usb_bus *host); extern void pxa3xx_set_u2d_info(struct pxa3xx_u2d_platform_data *info); #endif /* __PXA310_U2D__ */ platform_data/si5351.h 0000644 00000007062 14722070374 0010476 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Si5351A/B/C programmable clock generator platform_data. */ #ifndef __LINUX_PLATFORM_DATA_SI5351_H__ #define __LINUX_PLATFORM_DATA_SI5351_H__ /** * enum si5351_pll_src - Si5351 pll clock source * @SI5351_PLL_SRC_DEFAULT: default, do not change eeprom config * @SI5351_PLL_SRC_XTAL: pll source clock is XTAL input * @SI5351_PLL_SRC_CLKIN: pll source clock is CLKIN input (Si5351C only) */ enum si5351_pll_src { SI5351_PLL_SRC_DEFAULT = 0, SI5351_PLL_SRC_XTAL = 1, SI5351_PLL_SRC_CLKIN = 2, }; /** * enum si5351_multisynth_src - Si5351 multisynth clock source * @SI5351_MULTISYNTH_SRC_DEFAULT: default, do not change eeprom config * @SI5351_MULTISYNTH_SRC_VCO0: multisynth source clock is VCO0 * @SI5351_MULTISYNTH_SRC_VCO1: multisynth source clock is VCO1/VXCO */ enum si5351_multisynth_src { SI5351_MULTISYNTH_SRC_DEFAULT = 0, SI5351_MULTISYNTH_SRC_VCO0 = 1, SI5351_MULTISYNTH_SRC_VCO1 = 2, }; /** * enum si5351_clkout_src - Si5351 clock output clock source * @SI5351_CLKOUT_SRC_DEFAULT: default, do not change eeprom config * @SI5351_CLKOUT_SRC_MSYNTH_N: clkout N source clock is multisynth N * @SI5351_CLKOUT_SRC_MSYNTH_0_4: clkout N source clock is multisynth 0 (N<4) * or 4 (N>=4) * @SI5351_CLKOUT_SRC_XTAL: clkout N source clock is XTAL * @SI5351_CLKOUT_SRC_CLKIN: clkout N source clock is CLKIN (Si5351C only) */ enum si5351_clkout_src { SI5351_CLKOUT_SRC_DEFAULT = 0, SI5351_CLKOUT_SRC_MSYNTH_N = 1, SI5351_CLKOUT_SRC_MSYNTH_0_4 = 2, SI5351_CLKOUT_SRC_XTAL = 3, SI5351_CLKOUT_SRC_CLKIN = 4, }; /** * enum si5351_drive_strength - Si5351 clock output drive strength * @SI5351_DRIVE_DEFAULT: default, do not change eeprom config * @SI5351_DRIVE_2MA: 2mA clock output drive strength * @SI5351_DRIVE_4MA: 4mA clock output drive strength * @SI5351_DRIVE_6MA: 6mA clock output drive strength * @SI5351_DRIVE_8MA: 8mA clock output drive strength */ enum si5351_drive_strength { SI5351_DRIVE_DEFAULT = 0, SI5351_DRIVE_2MA = 2, SI5351_DRIVE_4MA = 4, SI5351_DRIVE_6MA = 6, SI5351_DRIVE_8MA = 8, }; /** * enum si5351_disable_state - Si5351 clock output disable state * @SI5351_DISABLE_DEFAULT: default, do not change eeprom config * @SI5351_DISABLE_LOW: CLKx is set to a LOW state when disabled * @SI5351_DISABLE_HIGH: CLKx is set to a HIGH state when disabled * @SI5351_DISABLE_FLOATING: CLKx is set to a FLOATING state when * disabled * @SI5351_DISABLE_NEVER: CLKx is NEVER disabled */ enum si5351_disable_state { SI5351_DISABLE_DEFAULT = 0, SI5351_DISABLE_LOW, SI5351_DISABLE_HIGH, SI5351_DISABLE_FLOATING, SI5351_DISABLE_NEVER, }; /** * struct si5351_clkout_config - Si5351 clock output configuration * @clkout: clkout number * @multisynth_src: multisynth source clock * @clkout_src: clkout source clock * @pll_master: if true, clkout can also change pll rate * @pll_reset: if true, clkout can reset its pll * @drive: output drive strength * @rate: initial clkout rate, or default if 0 */ struct si5351_clkout_config { enum si5351_multisynth_src multisynth_src; enum si5351_clkout_src clkout_src; enum si5351_drive_strength drive; enum si5351_disable_state disable_state; bool pll_master; bool pll_reset; unsigned long rate; }; /** * struct si5351_platform_data - Platform data for the Si5351 clock driver * @clk_xtal: xtal input clock * @clk_clkin: clkin input clock * @pll_src: array of pll source clock setting * @clkout: array of clkout configuration */ struct si5351_platform_data { enum si5351_pll_src pll_src[2]; struct si5351_clkout_config clkout[8]; }; #endif platform_data/usb-musb-ux500.h 0000644 00000001056 14722070374 0012156 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) ST-Ericsson SA 2011 * * Author: Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com> */ #ifndef __ASM_ARCH_USB_H #define __ASM_ARCH_USB_H #include <linux/dmaengine.h> #define UX500_MUSB_DMA_NUM_RX_TX_CHANNELS 8 struct ux500_musb_board_data { void **dma_rx_param_array; void **dma_tx_param_array; bool (*dma_filter)(struct dma_chan *chan, void *filter_param); }; void ux500_add_usb(struct device *parent, resource_size_t base, int irq, int *dma_rx_cfg, int *dma_tx_cfg); #endif platform_data/irda-pxaficp.h 0000644 00000001300 14722070374 0012101 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef ASMARM_ARCH_IRDA_H #define ASMARM_ARCH_IRDA_H /* board specific transceiver capabilities */ #define IR_OFF 1 #define IR_SIRMODE 2 #define IR_FIRMODE 4 struct pxaficp_platform_data { int transceiver_cap; void (*transceiver_mode)(struct device *dev, int mode); int (*startup)(struct device *dev); void (*shutdown)(struct device *dev); int gpio_pwdown; /* powerdown GPIO for the IrDA chip */ bool gpio_pwdown_inverted; /* gpio_pwdown is inverted */ }; extern void pxa_set_ficp_info(struct pxaficp_platform_data *info); #if defined(CONFIG_PXA25x) || defined(CONFIG_PXA27x) void pxa2xx_transceiver_mode(struct device *dev, int mode); #endif #endif platform_data/i2c-s3c2410.h 0000644 00000005561 14722070374 0011221 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2004-2009 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * S3C - I2C Controller platform_device info */ #ifndef __I2C_S3C2410_H #define __I2C_S3C2410_H __FILE__ #define S3C_IICFLG_FILTER (1<<0) /* enable s3c2440 filter */ struct platform_device; /** * struct s3c2410_platform_i2c - Platform data for s3c I2C. * @bus_num: The bus number to use (if possible). * @flags: Any flags for the I2C bus (E.g. S3C_IICFLK_FILTER). * @slave_addr: The I2C address for the slave device (if enabled). * @frequency: The desired frequency in Hz of the bus. This is * guaranteed to not be exceeded. If the caller does * not care, use zero and the driver will select a * useful default. * @sda_delay: The delay (in ns) applied to SDA edges. * @cfg_gpio: A callback to configure the pins for I2C operation. */ struct s3c2410_platform_i2c { int bus_num; unsigned int flags; unsigned int slave_addr; unsigned long frequency; unsigned int sda_delay; void (*cfg_gpio)(struct platform_device *dev); }; /** * s3c_i2c0_set_platdata - set platform data for i2c0 device * @i2c: The platform data to set, or NULL for default data. * * Register the given platform data for use with the i2c0 device. This * call copies the platform data, so the caller can use __initdata for * their copy. * * This call will set cfg_gpio if is null to the default platform * implementation. * * Any user of s3c_device_i2c0 should call this, even if it is with * NULL to ensure that the device is given the default platform data * as the driver will no longer carry defaults. */ extern void s3c_i2c0_set_platdata(struct s3c2410_platform_i2c *i2c); extern void s3c_i2c1_set_platdata(struct s3c2410_platform_i2c *i2c); extern void s3c_i2c2_set_platdata(struct s3c2410_platform_i2c *i2c); extern void s3c_i2c3_set_platdata(struct s3c2410_platform_i2c *i2c); extern void s3c_i2c4_set_platdata(struct s3c2410_platform_i2c *i2c); extern void s3c_i2c5_set_platdata(struct s3c2410_platform_i2c *i2c); extern void s3c_i2c6_set_platdata(struct s3c2410_platform_i2c *i2c); extern void s3c_i2c7_set_platdata(struct s3c2410_platform_i2c *i2c); extern void s5p_i2c_hdmiphy_set_platdata(struct s3c2410_platform_i2c *i2c); /* defined by architecture to configure gpio */ extern void s3c_i2c0_cfg_gpio(struct platform_device *dev); extern void s3c_i2c1_cfg_gpio(struct platform_device *dev); extern void s3c_i2c2_cfg_gpio(struct platform_device *dev); extern void s3c_i2c3_cfg_gpio(struct platform_device *dev); extern void s3c_i2c4_cfg_gpio(struct platform_device *dev); extern void s3c_i2c5_cfg_gpio(struct platform_device *dev); extern void s3c_i2c6_cfg_gpio(struct platform_device *dev); extern void s3c_i2c7_cfg_gpio(struct platform_device *dev); extern struct s3c2410_platform_i2c default_i2c_data; #endif /* __I2C_S3C2410_H */ platform_data/xilinx-ll-temac.h 0000644 00000002450 14722070374 0012550 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_XILINX_LL_TEMAC_H #define __LINUX_XILINX_LL_TEMAC_H #include <linux/if_ether.h> #include <linux/phy.h> #include <linux/spinlock.h> struct ll_temac_platform_data { bool txcsum; /* Enable/disable TX checksum */ bool rxcsum; /* Enable/disable RX checksum */ u8 mac_addr[ETH_ALEN]; /* MAC address (6 bytes) */ /* Clock frequency for input to MDIO clock generator */ u32 mdio_clk_freq; unsigned long long mdio_bus_id; /* Unique id for MDIO bus */ int phy_addr; /* Address of the PHY to connect to */ phy_interface_t phy_interface; /* PHY interface mode */ bool reg_little_endian; /* Little endian TEMAC register access */ bool dma_little_endian; /* Little endian DMA register access */ /* Pre-initialized mutex to use for synchronizing indirect * register access. When using both interfaces of a single * TEMAC IP block, the same mutex should be passed here, as * they share the same DCR bus bridge. */ spinlock_t *indirect_lock; /* DMA channel control setup */ u8 tx_irq_timeout; /* TX Interrupt Delay Time-out */ u8 tx_irq_count; /* TX Interrupt Coalescing Threshold Count */ u8 rx_irq_timeout; /* RX Interrupt Delay Time-out */ u8 rx_irq_count; /* RX Interrupt Coalescing Threshold Count */ }; #endif /* __LINUX_XILINX_LL_TEMAC_H */ platform_data/touchscreen-s3c2410.h 0000644 00000001123 14722070374 0013054 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2005 Arnaud Patard <arnaud.patard@rtp-net.org> */ #ifndef __TOUCHSCREEN_S3C2410_H #define __TOUCHSCREEN_S3C2410_H struct s3c2410_ts_mach_info { int delay; int presc; int oversampling_shift; void (*cfg_gpio)(struct platform_device *dev); }; extern void s3c24xx_ts_set_platdata(struct s3c2410_ts_mach_info *); extern void s3c64xx_ts_set_platdata(struct s3c2410_ts_mach_info *); /* defined by architecture to configure gpio */ extern void s3c24xx_ts_cfg_gpio(struct platform_device *dev); #endif /*__TOUCHSCREEN_S3C2410_H */ platform_data/serial-omap.h 0000644 00000002000 14722070374 0011741 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Driver for OMAP-UART controller. * Based on drivers/serial/8250.c * * Copyright (C) 2010 Texas Instruments. * * Authors: * Govindraj R <govindraj.raja@ti.com> * Thara Gopinath <thara@ti.com> */ #ifndef __OMAP_SERIAL_H__ #define __OMAP_SERIAL_H__ #include <linux/serial_core.h> #include <linux/device.h> #include <linux/pm_qos.h> #define OMAP_SERIAL_DRIVER_NAME "omap_uart" /* * Use tty device name as ttyO, [O -> OMAP] * in bootargs we specify as console=ttyO0 if uart1 * is used as console uart. */ #define OMAP_SERIAL_NAME "ttyO" struct omap_uart_port_info { bool dma_enabled; /* To specify DMA Mode */ unsigned int uartclk; /* UART clock rate */ upf_t flags; /* UPF_* flags */ unsigned int dma_rx_buf_size; unsigned int dma_rx_timeout; unsigned int autosuspend_timeout; unsigned int dma_rx_poll_rate; int (*get_context_loss_count)(struct device *); void (*enable_wakeup)(struct device *, bool); }; #endif /* __OMAP_SERIAL_H__ */ platform_data/wiznet.h 0000644 00000000777 14722070374 0011073 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Ethernet driver for the WIZnet W5x00 chip. */ #ifndef PLATFORM_DATA_WIZNET_H #define PLATFORM_DATA_WIZNET_H #include <linux/if_ether.h> struct wiznet_platform_data { int link_gpio; u8 mac_addr[ETH_ALEN]; }; #ifndef CONFIG_WIZNET_BUS_SHIFT #define CONFIG_WIZNET_BUS_SHIFT 0 #endif #define W5100_BUS_DIRECT_SIZE (0x8000 << CONFIG_WIZNET_BUS_SHIFT) #define W5300_BUS_DIRECT_SIZE (0x0400 << CONFIG_WIZNET_BUS_SHIFT) #endif /* PLATFORM_DATA_WIZNET_H */ platform_data/wilco-ec.h 0000644 00000014563 14722070374 0011253 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * ChromeOS Wilco Embedded Controller * * Copyright 2018 Google LLC */ #ifndef WILCO_EC_H #define WILCO_EC_H #include <linux/device.h> #include <linux/kernel.h> /* Message flags for using the mailbox() interface */ #define WILCO_EC_FLAG_NO_RESPONSE BIT(0) /* EC does not respond */ /* Normal commands have a maximum 32 bytes of data */ #define EC_MAILBOX_DATA_SIZE 32 /** * struct wilco_ec_device - Wilco Embedded Controller handle. * @dev: Device handle. * @mailbox_lock: Mutex to ensure one mailbox command at a time. * @io_command: I/O port for mailbox command. Provided by ACPI. * @io_data: I/O port for mailbox data. Provided by ACPI. * @io_packet: I/O port for mailbox packet data. Provided by ACPI. * @data_buffer: Buffer used for EC communication. The same buffer * is used to hold the request and the response. * @data_size: Size of the data buffer used for EC communication. * @debugfs_pdev: The child platform_device used by the debugfs sub-driver. * @rtc_pdev: The child platform_device used by the RTC sub-driver. * @telem_pdev: The child platform_device used by the telemetry sub-driver. */ struct wilco_ec_device { struct device *dev; struct mutex mailbox_lock; struct resource *io_command; struct resource *io_data; struct resource *io_packet; void *data_buffer; size_t data_size; struct platform_device *debugfs_pdev; struct platform_device *rtc_pdev; struct platform_device *telem_pdev; }; /** * struct wilco_ec_request - Mailbox request message format. * @struct_version: Should be %EC_MAILBOX_PROTO_VERSION * @checksum: Sum of all bytes must be 0. * @mailbox_id: Mailbox identifier, specifies the command set. * @mailbox_version: Mailbox interface version %EC_MAILBOX_VERSION * @reserved: Set to zero. * @data_size: Length of following data. */ struct wilco_ec_request { u8 struct_version; u8 checksum; u16 mailbox_id; u8 mailbox_version; u8 reserved; u16 data_size; } __packed; /** * struct wilco_ec_response - Mailbox response message format. * @struct_version: Should be %EC_MAILBOX_PROTO_VERSION * @checksum: Sum of all bytes must be 0. * @result: Result code from the EC. Non-zero indicates an error. * @data_size: Length of the response data buffer. * @reserved: Set to zero. * @data: Response data buffer. Max size is %EC_MAILBOX_DATA_SIZE_EXTENDED. */ struct wilco_ec_response { u8 struct_version; u8 checksum; u16 result; u16 data_size; u8 reserved[2]; u8 data[0]; } __packed; /** * enum wilco_ec_msg_type - Message type to select a set of command codes. * @WILCO_EC_MSG_LEGACY: Legacy EC messages for standard EC behavior. * @WILCO_EC_MSG_PROPERTY: Get/Set/Sync EC controlled NVRAM property. * @WILCO_EC_MSG_TELEMETRY: Request telemetry data from the EC. */ enum wilco_ec_msg_type { WILCO_EC_MSG_LEGACY = 0x00f0, WILCO_EC_MSG_PROPERTY = 0x00f2, WILCO_EC_MSG_TELEMETRY = 0x00f5, }; /** * struct wilco_ec_message - Request and response message. * @type: Mailbox message type. * @flags: Message flags, e.g. %WILCO_EC_FLAG_NO_RESPONSE. * @request_size: Number of bytes to send to the EC. * @request_data: Buffer containing the request data. * @response_size: Number of bytes to read from EC. * @response_data: Buffer containing the response data, should be * response_size bytes and allocated by caller. */ struct wilco_ec_message { enum wilco_ec_msg_type type; u8 flags; size_t request_size; void *request_data; size_t response_size; void *response_data; }; /** * wilco_ec_mailbox() - Send request to the EC and receive the response. * @ec: Wilco EC device. * @msg: Wilco EC message. * * Return: Number of bytes received or negative error code on failure. */ int wilco_ec_mailbox(struct wilco_ec_device *ec, struct wilco_ec_message *msg); /* * A Property is typically a data item that is stored to NVRAM * by the EC. Each of these data items has an index associated * with it, known as the Property ID (PID). Properties may have * variable lengths, up to a max of WILCO_EC_PROPERTY_MAX_SIZE * bytes. Properties can be simple integers, or they may be more * complex binary data. */ #define WILCO_EC_PROPERTY_MAX_SIZE 4 /** * struct ec_property_set_msg - Message to get or set a property. * @property_id: Which property to get or set. * @length: Number of bytes of |data| that are used. * @data: Actual property data. */ struct wilco_ec_property_msg { u32 property_id; int length; u8 data[WILCO_EC_PROPERTY_MAX_SIZE]; }; /** * wilco_ec_get_property() - Retrieve a property from the EC. * @ec: Embedded Controller device. * @prop_msg: Message for request and response. * * The property_id field of |prop_msg| should be filled before calling this * function. The result will be stored in the data and length fields. * * Return: 0 on success, negative error code on failure. */ int wilco_ec_get_property(struct wilco_ec_device *ec, struct wilco_ec_property_msg *prop_msg); /** * wilco_ec_set_property() - Store a property on the EC. * @ec: Embedded Controller device. * @prop_msg: Message for request and response. * * The property_id, length, and data fields of |prop_msg| should be * filled before calling this function. * * Return: 0 on success, negative error code on failure. */ int wilco_ec_set_property(struct wilco_ec_device *ec, struct wilco_ec_property_msg *prop_msg); /** * wilco_ec_get_byte_property() - Retrieve a byte-size property from the EC. * @ec: Embedded Controller device. * @property_id: Which property to retrieve. * @val: The result value, will be filled by this function. * * Return: 0 on success, negative error code on failure. */ int wilco_ec_get_byte_property(struct wilco_ec_device *ec, u32 property_id, u8 *val); /** * wilco_ec_get_byte_property() - Store a byte-size property on the EC. * @ec: Embedded Controller device. * @property_id: Which property to store. * @val: Value to store. * * Return: 0 on success, negative error code on failure. */ int wilco_ec_set_byte_property(struct wilco_ec_device *ec, u32 property_id, u8 val); /** * wilco_ec_add_sysfs() - Create sysfs entries * @ec: Wilco EC device * * wilco_ec_remove_sysfs() needs to be called afterwards * to perform the necessary cleanup. * * Return: 0 on success or negative error code on failure. */ int wilco_ec_add_sysfs(struct wilco_ec_device *ec); void wilco_ec_remove_sysfs(struct wilco_ec_device *ec); #endif /* WILCO_EC_H */ platform_data/dma-s3c24xx.h 0000644 00000002463 14722070374 0011522 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * S3C24XX DMA handling * * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de> */ /* Helper to encode the source selection constraints for early s3c socs. */ #define S3C24XX_DMA_CHANREQ(src, chan) ((BIT(3) | src) << chan * 4) enum s3c24xx_dma_bus { S3C24XX_DMA_APB, S3C24XX_DMA_AHB, }; /** * @bus: on which bus does the peripheral reside - AHB or APB. * @handshake: is a handshake with the peripheral necessary * @chansel: channel selection information, depending on variant; reqsel for * s3c2443 and later and channel-selection map for earlier SoCs * see CHANSEL doc in s3c2443-dma.c */ struct s3c24xx_dma_channel { enum s3c24xx_dma_bus bus; bool handshake; u16 chansel; }; struct dma_slave_map; /** * struct s3c24xx_dma_platdata - platform specific settings * @num_phy_channels: number of physical channels * @channels: array of virtual channel descriptions * @num_channels: number of virtual channels * @slave_map: dma slave map matching table * @slavecnt: number of elements in slave_map */ struct s3c24xx_dma_platdata { int num_phy_channels; struct s3c24xx_dma_channel *channels; int num_channels; const struct dma_slave_map *slave_map; int slavecnt; }; struct dma_chan; bool s3c24xx_dma_filter(struct dma_chan *chan, void *param); platform_data/max6639.h 0000644 00000000661 14722070374 0010660 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MAX6639_H #define _LINUX_MAX6639_H #include <linux/types.h> /* platform data for the MAX6639 temperature sensor and fan control */ struct max6639_platform_data { bool pwm_polarity; /* Polarity low (0) or high (1, default) */ int ppr; /* Pulses per rotation 1..4 (default == 2) */ int rpm_range; /* 2000, 4000 (default), 8000 or 16000 */ }; #endif /* _LINUX_MAX6639_H */ platform_data/video-pxafb.h 0000644 00000012352 14722070374 0011747 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Support for the xscale frame buffer. * * Author: Jean-Frederic Clere * Created: Sep 22, 2003 * Copyright: jfclere@sinix.net */ #include <linux/fb.h> #include <mach/regs-lcd.h> /* * Supported LCD connections * * bits 0 - 3: for LCD panel type: * * STN - for passive matrix * DSTN - for dual scan passive matrix * TFT - for active matrix * * bits 4 - 9 : for bus width * bits 10-17 : for AC Bias Pin Frequency * bit 18 : for output enable polarity * bit 19 : for pixel clock edge * bit 20 : for output pixel format when base is RGBT16 */ #define LCD_CONN_TYPE(_x) ((_x) & 0x0f) #define LCD_CONN_WIDTH(_x) (((_x) >> 4) & 0x1f) #define LCD_TYPE_MASK 0xf #define LCD_TYPE_UNKNOWN 0 #define LCD_TYPE_MONO_STN 1 #define LCD_TYPE_MONO_DSTN 2 #define LCD_TYPE_COLOR_STN 3 #define LCD_TYPE_COLOR_DSTN 4 #define LCD_TYPE_COLOR_TFT 5 #define LCD_TYPE_SMART_PANEL 6 #define LCD_TYPE_MAX 7 #define LCD_MONO_STN_4BPP ((4 << 4) | LCD_TYPE_MONO_STN) #define LCD_MONO_STN_8BPP ((8 << 4) | LCD_TYPE_MONO_STN) #define LCD_MONO_DSTN_8BPP ((8 << 4) | LCD_TYPE_MONO_DSTN) #define LCD_COLOR_STN_8BPP ((8 << 4) | LCD_TYPE_COLOR_STN) #define LCD_COLOR_DSTN_16BPP ((16 << 4) | LCD_TYPE_COLOR_DSTN) #define LCD_COLOR_TFT_8BPP ((8 << 4) | LCD_TYPE_COLOR_TFT) #define LCD_COLOR_TFT_16BPP ((16 << 4) | LCD_TYPE_COLOR_TFT) #define LCD_COLOR_TFT_18BPP ((18 << 4) | LCD_TYPE_COLOR_TFT) #define LCD_SMART_PANEL_8BPP ((8 << 4) | LCD_TYPE_SMART_PANEL) #define LCD_SMART_PANEL_16BPP ((16 << 4) | LCD_TYPE_SMART_PANEL) #define LCD_SMART_PANEL_18BPP ((18 << 4) | LCD_TYPE_SMART_PANEL) #define LCD_AC_BIAS_FREQ(x) (((x) & 0xff) << 10) #define LCD_BIAS_ACTIVE_HIGH (0 << 18) #define LCD_BIAS_ACTIVE_LOW (1 << 18) #define LCD_PCLK_EDGE_RISE (0 << 19) #define LCD_PCLK_EDGE_FALL (1 << 19) #define LCD_ALTERNATE_MAPPING (1 << 20) /* * This structure describes the machine which we are running on. * It is set in linux/arch/arm/mach-pxa/machine_name.c and used in the probe routine * of linux/drivers/video/pxafb.c */ struct pxafb_mode_info { u_long pixclock; u_short xres; u_short yres; u_char bpp; u_int cmap_greyscale:1, depth:8, transparency:1, unused:22; /* Parallel Mode Timing */ u_char hsync_len; u_char left_margin; u_char right_margin; u_char vsync_len; u_char upper_margin; u_char lower_margin; u_char sync; /* Smart Panel Mode Timing - see PXA27x DM 7.4.15.0.3 for details * Note: * 1. all parameters in nanosecond (ns) * 2. a0cs{rd,wr}_set_hld are controlled by the same register bits * in pxa27x and pxa3xx, initialize them to the same value or * the larger one will be used * 3. same to {rd,wr}_pulse_width * * 4. LCD_PCLK_EDGE_{RISE,FALL} controls the L_PCLK_WR polarity * 5. sync & FB_SYNC_HOR_HIGH_ACT controls the L_LCLK_A0 * 6. sync & FB_SYNC_VERT_HIGH_ACT controls the L_LCLK_RD */ unsigned a0csrd_set_hld; /* A0 and CS Setup/Hold Time before/after L_FCLK_RD */ unsigned a0cswr_set_hld; /* A0 and CS Setup/Hold Time before/after L_PCLK_WR */ unsigned wr_pulse_width; /* L_PCLK_WR pulse width */ unsigned rd_pulse_width; /* L_FCLK_RD pulse width */ unsigned cmd_inh_time; /* Command Inhibit time between two writes */ unsigned op_hold_time; /* Output Hold time from L_FCLK_RD negation */ }; struct pxafb_mach_info { struct pxafb_mode_info *modes; unsigned int num_modes; unsigned int lcd_conn; unsigned long video_mem_size; u_int fixed_modes:1, cmap_inverse:1, cmap_static:1, acceleration_enabled:1, unused:28; /* The following should be defined in LCCR0 * LCCR0_Act or LCCR0_Pas Active or Passive * LCCR0_Sngl or LCCR0_Dual Single/Dual panel * LCCR0_Mono or LCCR0_Color Mono/Color * LCCR0_4PixMono or LCCR0_8PixMono (in mono single mode) * LCCR0_DMADel(Tcpu) (optional) DMA request delay * * The following should not be defined in LCCR0: * LCCR0_OUM, LCCR0_BM, LCCR0_QDM, LCCR0_DIS, LCCR0_EFM * LCCR0_IUM, LCCR0_SFM, LCCR0_LDM, LCCR0_ENB */ u_int lccr0; /* The following should be defined in LCCR3 * LCCR3_OutEnH or LCCR3_OutEnL Output enable polarity * LCCR3_PixRsEdg or LCCR3_PixFlEdg Pixel clock edge type * LCCR3_Acb(X) AB Bias pin frequency * LCCR3_DPC (optional) Double Pixel Clock mode (untested) * * The following should not be defined in LCCR3 * LCCR3_HSP, LCCR3_VSP, LCCR0_Pcd(x), LCCR3_Bpp */ u_int lccr3; /* The following should be defined in LCCR4 * LCCR4_PAL_FOR_0 or LCCR4_PAL_FOR_1 or LCCR4_PAL_FOR_2 * * All other bits in LCCR4 should be left alone. */ u_int lccr4; void (*pxafb_backlight_power)(int); void (*pxafb_lcd_power)(int, struct fb_var_screeninfo *); void (*smart_update)(struct fb_info *); }; void pxa_set_fb_info(struct device *, struct pxafb_mach_info *); unsigned long pxafb_get_hsync_time(struct device *dev); #ifdef CONFIG_FB_PXA_SMARTPANEL extern int pxafb_smart_queue(struct fb_info *info, uint16_t *cmds, int); extern int pxafb_smart_flush(struct fb_info *info); #else static inline int pxafb_smart_queue(struct fb_info *info, uint16_t *cmds, int n) { return 0; } static inline int pxafb_smart_flush(struct fb_info *info) { return 0; } #endif platform_data/adau1977.h 0000644 00000002417 14722070374 0011006 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * ADAU1977/ADAU1978/ADAU1979 driver * * Copyright 2014 Analog Devices Inc. * Author: Lars-Peter Clausen <lars@metafoo.de> */ #ifndef __LINUX_PLATFORM_DATA_ADAU1977_H__ #define __LINUX_PLATFORM_DATA_ADAU1977_H__ /** * enum adau1977_micbias - ADAU1977 MICBIAS pin voltage setting * @ADAU1977_MICBIAS_5V0: MICBIAS is set to 5.0 V * @ADAU1977_MICBIAS_5V5: MICBIAS is set to 5.5 V * @ADAU1977_MICBIAS_6V0: MICBIAS is set to 6.0 V * @ADAU1977_MICBIAS_6V5: MICBIAS is set to 6.5 V * @ADAU1977_MICBIAS_7V0: MICBIAS is set to 7.0 V * @ADAU1977_MICBIAS_7V5: MICBIAS is set to 7.5 V * @ADAU1977_MICBIAS_8V0: MICBIAS is set to 8.0 V * @ADAU1977_MICBIAS_8V5: MICBIAS is set to 8.5 V * @ADAU1977_MICBIAS_9V0: MICBIAS is set to 9.0 V */ enum adau1977_micbias { ADAU1977_MICBIAS_5V0 = 0x0, ADAU1977_MICBIAS_5V5 = 0x1, ADAU1977_MICBIAS_6V0 = 0x2, ADAU1977_MICBIAS_6V5 = 0x3, ADAU1977_MICBIAS_7V0 = 0x4, ADAU1977_MICBIAS_7V5 = 0x5, ADAU1977_MICBIAS_8V0 = 0x6, ADAU1977_MICBIAS_8V5 = 0x7, ADAU1977_MICBIAS_9V0 = 0x8, }; /** * struct adau1977_platform_data - Platform configuration data for the ADAU1977 * @micbias: Specifies the voltage for the MICBIAS pin */ struct adau1977_platform_data { enum adau1977_micbias micbias; }; #endif platform_data/dma-iop32x.h 0000644 00000006423 14722070374 0011430 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright © 2006, Intel Corporation. */ #ifndef IOP_ADMA_H #define IOP_ADMA_H #include <linux/types.h> #include <linux/dmaengine.h> #include <linux/interrupt.h> #define IOP_ADMA_SLOT_SIZE 32 #define IOP_ADMA_THRESHOLD 4 #ifdef DEBUG #define IOP_PARANOIA 1 #else #define IOP_PARANOIA 0 #endif #define iop_paranoia(x) BUG_ON(IOP_PARANOIA && (x)) #define DMA0_ID 0 #define DMA1_ID 1 #define AAU_ID 2 /** * struct iop_adma_device - internal representation of an ADMA device * @pdev: Platform device * @id: HW ADMA Device selector * @dma_desc_pool: base of DMA descriptor region (DMA address) * @dma_desc_pool_virt: base of DMA descriptor region (CPU address) * @common: embedded struct dma_device */ struct iop_adma_device { struct platform_device *pdev; int id; dma_addr_t dma_desc_pool; void *dma_desc_pool_virt; struct dma_device common; }; /** * struct iop_adma_chan - internal representation of an ADMA device * @pending: allows batching of hardware operations * @lock: serializes enqueue/dequeue operations to the slot pool * @mmr_base: memory mapped register base * @chain: device chain view of the descriptors * @device: parent device * @common: common dmaengine channel object members * @last_used: place holder for allocation to continue from where it left off * @all_slots: complete domain of slots usable by the channel * @slots_allocated: records the actual size of the descriptor slot pool * @irq_tasklet: bottom half where iop_adma_slot_cleanup runs */ struct iop_adma_chan { int pending; spinlock_t lock; /* protects the descriptor slot pool */ void __iomem *mmr_base; struct list_head chain; struct iop_adma_device *device; struct dma_chan common; struct iop_adma_desc_slot *last_used; struct list_head all_slots; int slots_allocated; struct tasklet_struct irq_tasklet; }; /** * struct iop_adma_desc_slot - IOP-ADMA software descriptor * @slot_node: node on the iop_adma_chan.all_slots list * @chain_node: node on the op_adma_chan.chain list * @hw_desc: virtual address of the hardware descriptor chain * @phys: hardware address of the hardware descriptor chain * @group_head: first operation in a transaction * @slot_cnt: total slots used in an transaction (group of operations) * @slots_per_op: number of slots per operation * @idx: pool index * @tx_list: list of descriptors that are associated with one operation * @async_tx: support for the async_tx api * @group_list: list of slots that make up a multi-descriptor transaction * for example transfer lengths larger than the supported hw max * @xor_check_result: result of zero sum * @crc32_result: result crc calculation */ struct iop_adma_desc_slot { struct list_head slot_node; struct list_head chain_node; void *hw_desc; struct iop_adma_desc_slot *group_head; u16 slot_cnt; u16 slots_per_op; u16 idx; struct list_head tx_list; struct dma_async_tx_descriptor async_tx; union { u32 *xor_check_result; u32 *crc32_result; u32 *pq_check_result; }; }; struct iop_adma_platform_data { int hw_id; dma_cap_mask_t cap_mask; size_t pool_size; }; #define to_iop_sw_desc(addr_hw_desc) \ container_of(addr_hw_desc, struct iop_adma_desc_slot, hw_desc) #define iop_hw_desc_slot_idx(hw_desc, idx) \ ( (void *) (((unsigned long) hw_desc) + ((idx) << 5)) ) #endif platform_data/rtc-v3020.h 0000644 00000002041 14722070374 0011075 0 ustar 00 /* * v3020.h - Registers definition and platform data structure for the v3020 RTC. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2006, 8D Technologies inc. */ #ifndef __LINUX_V3020_H #define __LINUX_V3020_H /* The v3020 has only one data pin but which one * is used depends on the board. */ struct v3020_platform_data { int leftshift; /* (1<<(leftshift)) & readl() */ unsigned int use_gpio:1; unsigned int gpio_cs; unsigned int gpio_wr; unsigned int gpio_rd; unsigned int gpio_io; }; #define V3020_STATUS_0 0x00 #define V3020_STATUS_1 0x01 #define V3020_SECONDS 0x02 #define V3020_MINUTES 0x03 #define V3020_HOURS 0x04 #define V3020_MONTH_DAY 0x05 #define V3020_MONTH 0x06 #define V3020_YEAR 0x07 #define V3020_WEEK_DAY 0x08 #define V3020_WEEK 0x09 #define V3020_IS_COMMAND(val) ((val)>=0x0E) #define V3020_CMD_RAM2CLOCK 0x0E #define V3020_CMD_CLOCK2RAM 0x0F #endif /* __LINUX_V3020_H */ platform_data/i2c-xiic.h 0000644 00000001525 14722070374 0011152 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * i2c-xiic.h * Copyright (c) 2009 Intel Corporation */ /* Supports: * Xilinx IIC */ #ifndef _LINUX_I2C_XIIC_H #define _LINUX_I2C_XIIC_H /** * struct xiic_i2c_platform_data - Platform data of the Xilinx I2C driver * @num_devices: Number of devices that shall be added when the driver * is probed. * @devices: The actuall devices to add. * * This purpose of this platform data struct is to be able to provide a number * of devices that should be added to the I2C bus. The reason is that sometimes * the I2C board info is not enough, a new PCI board can for instance be * plugged into a standard PC, and the bus number might be unknown at * early init time. */ struct xiic_i2c_platform_data { u8 num_devices; struct i2c_board_info const *devices; }; #endif /* _LINUX_I2C_XIIC_H */ platform_data/simplefb.h 0000644 00000004071 14722070374 0011343 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * simplefb.h - Simple Framebuffer Device * * Copyright (C) 2013 David Herrmann <dh.herrmann@gmail.com> */ #ifndef __PLATFORM_DATA_SIMPLEFB_H__ #define __PLATFORM_DATA_SIMPLEFB_H__ #include <drm/drm_fourcc.h> #include <linux/fb.h> #include <linux/kernel.h> /* format array, use it to initialize a "struct simplefb_format" array */ #define SIMPLEFB_FORMATS \ { \ { "r5g6b5", 16, {11, 5}, {5, 6}, {0, 5}, {0, 0}, DRM_FORMAT_RGB565 }, \ { "x1r5g5b5", 16, {10, 5}, {5, 5}, {0, 5}, {0, 0}, DRM_FORMAT_XRGB1555 }, \ { "a1r5g5b5", 16, {10, 5}, {5, 5}, {0, 5}, {15, 1}, DRM_FORMAT_ARGB1555 }, \ { "r8g8b8", 24, {16, 8}, {8, 8}, {0, 8}, {0, 0}, DRM_FORMAT_RGB888 }, \ { "x8r8g8b8", 32, {16, 8}, {8, 8}, {0, 8}, {0, 0}, DRM_FORMAT_XRGB8888 }, \ { "a8r8g8b8", 32, {16, 8}, {8, 8}, {0, 8}, {24, 8}, DRM_FORMAT_ARGB8888 }, \ { "a8b8g8r8", 32, {0, 8}, {8, 8}, {16, 8}, {24, 8}, DRM_FORMAT_ABGR8888 }, \ { "x2r10g10b10", 32, {20, 10}, {10, 10}, {0, 10}, {0, 0}, DRM_FORMAT_XRGB2101010 }, \ { "a2r10g10b10", 32, {20, 10}, {10, 10}, {0, 10}, {30, 2}, DRM_FORMAT_ARGB2101010 }, \ } /* * Data-Format for Simple-Framebuffers * @name: unique 0-terminated name that can be used to identify the mode * @red,green,blue: Offsets and sizes of the single RGB parts * @transp: Offset and size of the alpha bits. length=0 means no alpha * @fourcc: 32bit DRM four-CC code (see drm_fourcc.h) */ struct simplefb_format { const char *name; u32 bits_per_pixel; struct fb_bitfield red; struct fb_bitfield green; struct fb_bitfield blue; struct fb_bitfield transp; u32 fourcc; }; /* * Simple-Framebuffer description * If the arch-boot code creates simple-framebuffers without DT support, it * can pass the width, height, stride and format via this platform-data object. * The framebuffer location must be given as IORESOURCE_MEM resource. * @format must be a format as described in "struct simplefb_format" above. */ struct simplefb_platform_data { u32 width; u32 height; u32 stride; const char *format; }; #endif /* __PLATFORM_DATA_SIMPLEFB_H__ */ platform_data/pca953x.h 0000644 00000001262 14722070374 0010735 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PCA953X_H #define _LINUX_PCA953X_H #include <linux/types.h> #include <linux/i2c.h> /* platform data for the PCA9539 16-bit I/O expander driver */ struct pca953x_platform_data { /* number of the first GPIO */ unsigned gpio_base; /* initial polarity inversion setting */ u32 invert; /* interrupt base */ int irq_base; void *context; /* param to setup/teardown */ int (*setup)(struct i2c_client *client, unsigned gpio, unsigned ngpio, void *context); int (*teardown)(struct i2c_client *client, unsigned gpio, unsigned ngpio, void *context); const char *const *names; }; #endif /* _LINUX_PCA953X_H */ platform_data/gpio-htc-egpio.h 0000644 00000003174 14722070374 0012360 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * HTC simple EGPIO irq and gpio extender */ #ifndef __HTC_EGPIO_H__ #define __HTC_EGPIO_H__ /* Descriptive values for all-in or all-out htc_egpio_chip descriptors. */ #define HTC_EGPIO_OUTPUT (~0) #define HTC_EGPIO_INPUT 0 /** * struct htc_egpio_chip - descriptor to create gpio_chip for register range * @reg_start: index of first register * @gpio_base: gpio number of first pin in this register range * @num_gpios: number of gpios in this register range, max BITS_PER_LONG * (number of registers = DIV_ROUND_UP(num_gpios, reg_width)) * @direction: bitfield, '0' = input, '1' = output, */ struct htc_egpio_chip { int reg_start; int gpio_base; int num_gpios; unsigned long direction; unsigned long initial_values; }; /** * struct htc_egpio_platform_data - description provided by the arch * @irq_base: beginning of available IRQs (eg, IRQ_BOARD_START) * @num_irqs: number of irqs * @reg_width: number of bits per register, either 8 or 16 bit * @bus_width: alignment of the registers, either 16 or 32 bit * @invert_acks: set if chip requires writing '0' to ack an irq, instead of '1' * @ack_register: location of the irq/ack register * @chip: pointer to array of htc_egpio_chip descriptors * @num_chips: number of egpio chip descriptors */ struct htc_egpio_platform_data { int bus_width; int reg_width; int irq_base; int num_irqs; int invert_acks; int ack_register; struct htc_egpio_chip *chip; int num_chips; }; #endif platform_data/usb-omap1.h 0000644 00000002741 14722070374 0011350 0 ustar 00 /* * Platform data for OMAP1 USB * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive for * more details. */ #ifndef __LINUX_USB_OMAP1_H #define __LINUX_USB_OMAP1_H #include <linux/platform_device.h> struct omap_usb_config { /* Configure drivers according to the connectors on your board: * - "A" connector (rectagular) * ... for host/OHCI use, set "register_host". * - "B" connector (squarish) or "Mini-B" * ... for device/gadget use, set "register_dev". * - "Mini-AB" connector (very similar to Mini-B) * ... for OTG use as device OR host, initialize "otg" */ unsigned register_host:1; unsigned register_dev:1; u8 otg; /* port number, 1-based: usb1 == 2 */ const char *extcon; /* extcon device for OTG */ u8 hmc_mode; /* implicitly true if otg: host supports remote wakeup? */ u8 rwc; /* signaling pins used to talk to transceiver on usbN: * 0 == usbN unused * 2 == usb0-only, using internal transceiver * 3 == 3 wire bidirectional * 4 == 4 wire bidirectional * 6 == 6 wire unidirectional (or TLL) */ u8 pins[3]; struct platform_device *udc_device; struct platform_device *ohci_device; struct platform_device *otg_device; u32 (*usb0_init)(unsigned nwires, unsigned is_device); u32 (*usb1_init)(unsigned nwires); u32 (*usb2_init)(unsigned nwires, unsigned alt_pingroup); int (*ocpi_enable)(void); }; #endif /* __LINUX_USB_OMAP1_H */ platform_data/intel-spi.h 0000644 00000001135 14722070374 0011444 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Intel PCH/PCU SPI flash driver. * * Copyright (C) 2016, Intel Corporation * Author: Mika Westerberg <mika.westerberg@linux.intel.com> */ #ifndef INTEL_SPI_PDATA_H #define INTEL_SPI_PDATA_H enum intel_spi_type { INTEL_SPI_BYT = 1, INTEL_SPI_LPT, INTEL_SPI_BXT, }; /** * struct intel_spi_boardinfo - Board specific data for Intel SPI driver * @type: Type which this controller is compatible with * @writeable: The chip is writeable */ struct intel_spi_boardinfo { enum intel_spi_type type; bool writeable; }; #endif /* INTEL_SPI_PDATA_H */ platform_data/crypto-atmel.h 0000644 00000001041 14722070374 0012154 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_CRYPTO_ATMEL_H #define __LINUX_CRYPTO_ATMEL_H #include <linux/platform_data/dma-atmel.h> /** * struct crypto_dma_data - DMA data for AES/TDES/SHA */ struct crypto_dma_data { struct at_dma_slave txdata; struct at_dma_slave rxdata; }; /** * struct crypto_platform_data - board-specific AES/TDES/SHA configuration * @dma_slave: DMA slave interface to use in data transfers. */ struct crypto_platform_data { struct crypto_dma_data *dma_slave; }; #endif /* __LINUX_CRYPTO_ATMEL_H */ platform_data/asoc-s3c.h 0000644 00000002513 14722070374 0011154 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2009 Samsung Electronics Co. Ltd * Author: Jaswinder Singh <jassi.brar@samsung.com> */ /* The machine init code calls s3c*_ac97_setup_gpio with * one of these defines in order to select appropriate bank * of GPIO for AC97 pins */ #define S3C64XX_AC97_GPD 0 #define S3C64XX_AC97_GPE 1 #include <linux/dmaengine.h> extern void s3c64xx_ac97_setup_gpio(int); struct samsung_i2s_type { /* If the Primary DAI has 5.1 Channels */ #define QUIRK_PRI_6CHAN (1 << 0) /* If the I2S block has a Stereo Overlay Channel */ #define QUIRK_SEC_DAI (1 << 1) /* * If the I2S block has no internal prescalar or MUX (I2SMOD[10] bit) * The Machine driver must provide suitably set clock to the I2S block. */ #define QUIRK_NO_MUXPSR (1 << 2) #define QUIRK_NEED_RSTCLR (1 << 3) #define QUIRK_SUPPORTS_TDM (1 << 4) #define QUIRK_SUPPORTS_IDMA (1 << 5) /* Quirks of the I2S controller */ u32 quirks; dma_addr_t idma_addr; }; /** * struct s3c_audio_pdata - common platform data for audio device drivers * @cfg_gpio: Callback function to setup mux'ed pins in I2S/PCM/AC97 mode */ struct s3c_audio_pdata { int (*cfg_gpio)(struct platform_device *); dma_filter_fn dma_filter; void *dma_playback; void *dma_capture; void *dma_play_sec; void *dma_capture_mic; struct samsung_i2s_type type; }; platform_data/video_s3c.h 0000644 00000003334 14722070374 0011421 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __PLATFORM_DATA_VIDEO_S3C #define __PLATFORM_DATA_VIDEO_S3C /* S3C_FB_MAX_WIN * Set to the maximum number of windows that any of the supported hardware * can use. Since the platform data uses this for an array size, having it * set to the maximum of any version of the hardware can do is safe. */ #define S3C_FB_MAX_WIN (5) /** * struct s3c_fb_pd_win - per window setup data * @xres : The window X size. * @yres : The window Y size. * @virtual_x: The virtual X size. * @virtual_y: The virtual Y size. */ struct s3c_fb_pd_win { unsigned short default_bpp; unsigned short max_bpp; unsigned short xres; unsigned short yres; unsigned short virtual_x; unsigned short virtual_y; }; /** * struct s3c_fb_platdata - S3C driver platform specific information * @setup_gpio: Setup the external GPIO pins to the right state to transfer * the data from the display system to the connected display * device. * @vidcon0: The base vidcon0 values to control the panel data format. * @vidcon1: The base vidcon1 values to control the panel data output. * @vtiming: Video timing when connected to a RGB type panel. * @win: The setup data for each hardware window, or NULL for unused. * @display_mode: The LCD output display mode. * * The platform data supplies the video driver with all the information * it requires to work with the display(s) attached to the machine. It * controls the initial mode, the number of display windows (0 is always * the base framebuffer) that are initialised etc. * */ struct s3c_fb_platdata { void (*setup_gpio)(void); struct s3c_fb_pd_win *win[S3C_FB_MAX_WIN]; struct fb_videomode *vtiming; u32 vidcon0; u32 vidcon1; }; #endif platform_data/irda-sa11x0.h 0000644 00000000633 14722070374 0011474 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * arch/arm/include/asm/mach/irda.h * * Copyright (C) 2004 Russell King. */ #ifndef __ASM_ARM_MACH_IRDA_H #define __ASM_ARM_MACH_IRDA_H struct irda_platform_data { int (*startup)(struct device *); void (*shutdown)(struct device *); int (*set_power)(struct device *, unsigned int state); void (*set_speed)(struct device *, unsigned int speed); }; #endif platform_data/b53.h 0000644 00000002211 14722070374 0010125 0 ustar 00 /* * B53 platform data * * Copyright (C) 2013 Jonas Gorski <jogo@openwrt.org> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef __B53_H #define __B53_H #include <linux/kernel.h> #include <linux/platform_data/dsa.h> struct b53_platform_data { /* Must be first such that dsa_register_switch() can access it */ struct dsa_chip_data cd; u32 chip_id; u16 enabled_ports; /* only used by MMAP'd driver */ unsigned big_endian:1; void __iomem *regs; }; #endif platform_data/mtd-mxc_nand.h 0000644 00000001132 14722070374 0012106 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved. * Copyright 2008 Sascha Hauer, kernel@pengutronix.de */ #ifndef __ASM_ARCH_NAND_H #define __ASM_ARCH_NAND_H #include <linux/mtd/partitions.h> struct mxc_nand_platform_data { unsigned int width; /* data bus width in bytes */ unsigned int hw_ecc:1; /* 0 if suppress hardware ECC */ unsigned int flash_bbt:1; /* set to 1 to use a flash based bbt */ struct mtd_partition *parts; /* partition table */ int nr_parts; /* size of parts */ }; #endif /* __ASM_ARCH_NAND_H */ platform_data/tsl2772.h 0000644 00000007353 14722070374 0010674 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0+ */ /* * Device driver for monitoring ambient light intensity (lux) * and proximity (prox) within the TAOS TSL2772 family of devices. * * Copyright (c) 2012, TAOS Corporation. * Copyright (c) 2017-2018 Brian Masney <masneyb@onstation.org> */ #ifndef __TSL2772_H #define __TSL2772_H struct tsl2772_lux { unsigned int ch0; unsigned int ch1; }; /* Max number of segments allowable in LUX table */ #define TSL2772_MAX_LUX_TABLE_SIZE 6 /* The default LUX tables all have 3 elements. */ #define TSL2772_DEF_LUX_TABLE_SZ 3 #define TSL2772_DEFAULT_TABLE_BYTES (sizeof(struct tsl2772_lux) * \ TSL2772_DEF_LUX_TABLE_SZ) /* Proximity diode to use */ #define TSL2772_DIODE0 0x01 #define TSL2772_DIODE1 0x02 #define TSL2772_DIODE_BOTH 0x03 /* LED Power */ #define TSL2772_100_mA 0x00 #define TSL2772_50_mA 0x01 #define TSL2772_25_mA 0x02 #define TSL2772_13_mA 0x03 /** * struct tsl2772_settings - Settings for the tsl2772 driver * @als_time: Integration time of the ALS channel ADCs in 2.73 ms * increments. Total integration time is * (256 - als_time) * 2.73. * @als_gain: Index into the tsl2772_als_gain array. * @als_gain_trim: Default gain trim to account for aperture effects. * @wait_time: Time between proximity and ALS cycles in 2.73 * periods. * @prox_time: Integration time of the proximity ADC in 2.73 ms * increments. Total integration time is * (256 - prx_time) * 2.73. * @prox_gain: Index into the tsl2772_prx_gain array. * @als_prox_config: The value of the ALS / Proximity configuration * register. * @als_cal_target: Known external ALS reading for calibration. * @als_persistence: H/W Filters, Number of 'out of limits' ALS readings. * @als_interrupt_en: Enable/Disable ALS interrupts * @als_thresh_low: CH0 'low' count to trigger interrupt. * @als_thresh_high: CH0 'high' count to trigger interrupt. * @prox_persistence: H/W Filters, Number of 'out of limits' proximity * readings. * @prox_interrupt_en: Enable/Disable proximity interrupts. * @prox_thres_low: Low threshold proximity detection. * @prox_thres_high: High threshold proximity detection. * @prox_pulse_count: Number if proximity emitter pulses. * @prox_max_samples_cal: The number of samples that are taken when performing * a proximity calibration. * @prox_diode Which diode(s) to use for driving the external * LED(s) for proximity sensing. * @prox_power The amount of power to use for the external LED(s). */ struct tsl2772_settings { int als_time; int als_gain; int als_gain_trim; int wait_time; int prox_time; int prox_gain; int als_prox_config; int als_cal_target; u8 als_persistence; bool als_interrupt_en; int als_thresh_low; int als_thresh_high; u8 prox_persistence; bool prox_interrupt_en; int prox_thres_low; int prox_thres_high; int prox_pulse_count; int prox_max_samples_cal; int prox_diode; int prox_power; }; /** * struct tsl2772_platform_data - Platform callback, glass and defaults * @platform_lux_table: Device specific glass coefficents * @platform_default_settings: Device specific power on defaults */ struct tsl2772_platform_data { struct tsl2772_lux platform_lux_table[TSL2772_MAX_LUX_TABLE_SIZE]; struct tsl2772_settings *platform_default_settings; }; #endif /* __TSL2772_H */ platform_data/itco_wdt.h 0000644 00000001071 14722070374 0011353 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Platform data for the Intel TCO Watchdog */ #ifndef _ITCO_WDT_H_ #define _ITCO_WDT_H_ /* Watchdog resources */ #define ICH_RES_IO_TCO 0 #define ICH_RES_IO_SMI 1 #define ICH_RES_MEM_OFF 2 #define ICH_RES_MEM_GCS_PMC 0 struct itco_wdt_platform_data { char name[32]; unsigned int version; /* private data to be passed to update_no_reboot_bit API */ void *no_reboot_priv; /* pointer for platform specific no reboot update function */ int (*update_no_reboot_bit)(void *priv, bool set); }; #endif /* _ITCO_WDT_H_ */ platform_data/asoc-ux500-msp.h 0000644 00000000661 14722070374 0012144 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) ST-Ericsson SA 2010 * * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson */ #ifndef __MSP_H #define __MSP_H #include <linux/platform_data/dma-ste-dma40.h> /* Platform data structure for a MSP I2S-device */ struct msp_i2s_platform_data { int id; struct stedma40_chan_cfg *msp_i2s_dma_rx; struct stedma40_chan_cfg *msp_i2s_dma_tx; }; #endif platform_data/serial-sccnxp.h 0000644 00000003622 14722070374 0012316 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * NXP (Philips) SCC+++(SCN+++) serial driver * * Copyright (C) 2012 Alexander Shiyan <shc_work@mail.ru> * * Based on sc26xx.c, by Thomas Bogendörfer (tsbogend@alpha.franken.de) */ #ifndef _PLATFORM_DATA_SERIAL_SCCNXP_H_ #define _PLATFORM_DATA_SERIAL_SCCNXP_H_ #define SCCNXP_MAX_UARTS 2 /* Output lines */ #define LINE_OP0 1 #define LINE_OP1 2 #define LINE_OP2 3 #define LINE_OP3 4 #define LINE_OP4 5 #define LINE_OP5 6 #define LINE_OP6 7 #define LINE_OP7 8 /* Input lines */ #define LINE_IP0 9 #define LINE_IP1 10 #define LINE_IP2 11 #define LINE_IP3 12 #define LINE_IP4 13 #define LINE_IP5 14 #define LINE_IP6 15 /* Signals */ #define DTR_OP 0 /* DTR */ #define RTS_OP 4 /* RTS */ #define DSR_IP 8 /* DSR */ #define CTS_IP 12 /* CTS */ #define DCD_IP 16 /* DCD */ #define RNG_IP 20 /* RNG */ #define DIR_OP 24 /* Special signal for control RS-485. * Goes high when transmit, * then goes low. */ /* Routing control signal 'sig' to line 'line' */ #define MCTRL_SIG(sig, line) ((line) << (sig)) /* * Example board initialization data: * * static struct resource sc2892_resources[] = { * DEFINE_RES_MEM(UART_PHYS_START, 0x10), * DEFINE_RES_IRQ(IRQ_EXT2), * }; * * static struct sccnxp_pdata sc2892_info = { * .mctrl_cfg[0] = MCTRL_SIG(DIR_OP, LINE_OP0), * .mctrl_cfg[1] = MCTRL_SIG(DIR_OP, LINE_OP1), * }; * * static struct platform_device sc2892 = { * .name = "sc2892", * .id = -1, * .resource = sc2892_resources, * .num_resources = ARRAY_SIZE(sc2892_resources), * .dev = { * .platform_data = &sc2892_info, * }, * }; */ /* SCCNXP platform data structure */ struct sccnxp_pdata { /* Shift for A0 line */ const u8 reg_shift; /* Modem control lines configuration */ const u32 mctrl_cfg[SCCNXP_MAX_UARTS]; /* Timer value for polling mode (usecs) */ const unsigned int poll_time_us; }; #endif platform_data/pxa_sdhci.h 0000644 00000002712 14722070374 0011504 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * include/linux/platform_data/pxa_sdhci.h * * Copyright 2010 Marvell * Zhangfei Gao <zhangfei.gao@marvell.com> * * PXA Platform - SDHCI platform data definitions */ #ifndef _PXA_SDHCI_H_ #define _PXA_SDHCI_H_ /* pxa specific flag */ /* Require clock free running */ #define PXA_FLAG_ENABLE_CLOCK_GATING (1<<0) /* card always wired to host, like on-chip emmc */ #define PXA_FLAG_CARD_PERMANENT (1<<1) /* Board design supports 8-bit data on SD/SDIO BUS */ #define PXA_FLAG_SD_8_BIT_CAPABLE_SLOT (1<<2) /* * struct pxa_sdhci_platdata() - Platform device data for PXA SDHCI * @flags: flags for platform requirement * @clk_delay_cycles: * mmp2: each step is roughly 100ps, 5bits width * pxa910: each step is 1ns, 4bits width * @clk_delay_sel: select clk_delay, used on pxa910 * 0: choose feedback clk * 1: choose feedback clk + delay value * 2: choose internal clk * @clk_delay_enable: enable clk_delay or not, used on pxa910 * @max_speed: the maximum speed supported * @host_caps: Standard MMC host capabilities bit field. * @quirks: quirks of platfrom * @quirks2: quirks2 of platfrom * @pm_caps: pm_caps of platfrom */ struct sdhci_pxa_platdata { unsigned int flags; unsigned int clk_delay_cycles; unsigned int clk_delay_sel; bool clk_delay_enable; unsigned int max_speed; u32 host_caps; u32 host_caps2; unsigned int quirks; unsigned int quirks2; unsigned int pm_caps; }; #endif /* _PXA_SDHCI_H_ */ platform_data/keypad-omap.h 0000644 00000002464 14722070374 0011755 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2006 Komal Shah <komal_shah802003@yahoo.com> */ #ifndef __KEYPAD_OMAP_H #define __KEYPAD_OMAP_H #ifndef CONFIG_ARCH_OMAP1 #warning Please update the board to use matrix-keypad driver #define omap_readw(reg) 0 #define omap_writew(val, reg) do {} while (0) #endif #include <linux/input/matrix_keypad.h> struct omap_kp_platform_data { int rows; int cols; const struct matrix_keymap_data *keymap_data; bool rep; unsigned long delay; bool dbounce; /* specific to OMAP242x*/ unsigned int *row_gpios; unsigned int *col_gpios; }; /* Group (0..3) -- when multiple keys are pressed, only the * keys pressed in the same group are considered as pressed. This is * in order to workaround certain crappy HW designs that produce ghost * keypresses. Two free bits, not used by neither row/col nor keynum, * must be available for use as group bits. The below GROUP_SHIFT * macro definition is based on some prior knowledge of the * matrix_keypad defined KEY() macro internals. */ #define GROUP_SHIFT 14 #define GROUP_0 (0 << GROUP_SHIFT) #define GROUP_1 (1 << GROUP_SHIFT) #define GROUP_2 (2 << GROUP_SHIFT) #define GROUP_3 (3 << GROUP_SHIFT) #define GROUP_MASK GROUP_3 #if KEY_MAX & GROUP_MASK #error Group bits in conflict with keynum bits #endif #endif platform_data/omapdss.h 0000644 00000001601 14722070374 0011204 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2016 Texas Instruments, Inc. */ #ifndef __OMAPDSS_PDATA_H #define __OMAPDSS_PDATA_H enum omapdss_version { OMAPDSS_VER_UNKNOWN = 0, OMAPDSS_VER_OMAP24xx, OMAPDSS_VER_OMAP34xx_ES1, /* OMAP3430 ES1.0, 2.0 */ OMAPDSS_VER_OMAP34xx_ES3, /* OMAP3430 ES3.0+ */ OMAPDSS_VER_OMAP3630, OMAPDSS_VER_AM35xx, OMAPDSS_VER_OMAP4430_ES1, /* OMAP4430 ES1.0 */ OMAPDSS_VER_OMAP4430_ES2, /* OMAP4430 ES2.0, 2.1, 2.2 */ OMAPDSS_VER_OMAP4, /* All other OMAP4s */ OMAPDSS_VER_OMAP5, OMAPDSS_VER_AM43xx, OMAPDSS_VER_DRA7xx, }; /* Board specific data */ struct omap_dss_board_info { int (*dsi_enable_pads)(int dsi_id, unsigned int lane_mask); void (*dsi_disable_pads)(int dsi_id, unsigned int lane_mask); int (*set_min_bus_tput)(struct device *dev, unsigned long r); enum omapdss_version version; }; #endif /* __OMAPDSS_PDATA_H */ platform_data/mmc-mxcmmc.h 0000644 00000002113 14722070374 0011573 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef ASMARM_ARCH_MMC_H #define ASMARM_ARCH_MMC_H #include <linux/interrupt.h> #include <linux/mmc/host.h> struct device; /* board specific SDHC data, optional. * If not present, a writable card with 3,3V is assumed. */ struct imxmmc_platform_data { /* Return values for the get_ro callback should be: * 0 for a read/write card * 1 for a read-only card * -ENOSYS when not supported (equal to NULL callback) * or a negative errno value when something bad happened */ int (*get_ro)(struct device *); /* board specific hook to (de)initialize the SD slot. * The board code can call 'handler' on a card detection * change giving data as argument. */ int (*init)(struct device *dev, irq_handler_t handler, void *data); void (*exit)(struct device *dev, void *data); /* available voltages. If not given, assume * MMC_VDD_32_33 | MMC_VDD_33_34 */ unsigned int ocr_avail; /* adjust slot voltage */ void (*setpower)(struct device *, unsigned int vdd); /* enable card detect using DAT3 */ int dat3_card_detect; }; #endif platform_data/gpio-ath79.h 0000644 00000000512 14722070374 0011426 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Atheros AR7XXX/AR9XXX GPIO controller platform data * * Copyright (C) 2015 Alban Bedel <albeu@free.fr> */ #ifndef __LINUX_PLATFORM_DATA_GPIO_ATH79_H #define __LINUX_PLATFORM_DATA_GPIO_ATH79_H struct ath79_gpio_platform_data { unsigned ngpios; bool oe_inverted; }; #endif platform_data/tc35876x.h 0000644 00000000254 14722070374 0010754 0 ustar 00 #ifndef _TC35876X_H #define _TC35876X_H struct tc35876x_platform_data { int gpio_bridge_reset; int gpio_panel_bl_en; int gpio_panel_vadd; }; #endif /* _TC35876X_H */ platform_data/mmc-omap.h 0000644 00000006365 14722070374 0011260 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * MMC definitions for OMAP2 * * Copyright (C) 2006 Nokia Corporation */ #define OMAP_MMC_MAX_SLOTS 2 struct mmc_card; struct omap_mmc_platform_data { /* back-link to device */ struct device *dev; /* number of slots per controller */ unsigned nr_slots:2; /* set if your board has components or wiring that limits the * maximum frequency on the MMC bus */ unsigned int max_freq; /* switch the bus to a new slot */ int (*switch_slot)(struct device *dev, int slot); /* initialize board-specific MMC functionality, can be NULL if * not supported */ int (*init)(struct device *dev); void (*cleanup)(struct device *dev); void (*shutdown)(struct device *dev); /* Return context loss count due to PM states changing */ int (*get_context_loss_count)(struct device *dev); /* Integrating attributes from the omap_hwmod layer */ u8 controller_flags; /* Register offset deviation */ u16 reg_offset; struct omap_mmc_slot_data { /* * 4/8 wires and any additional host capabilities * need to OR'd all capabilities (ref. linux/mmc/host.h) */ u8 wires; /* Used for the MMC driver on omap1 and 2420 */ u32 caps; /* Used for the MMC driver on 2430 and later */ u32 pm_caps; /* PM capabilities of the mmc */ /* * nomux means "standard" muxing is wrong on this board, and * that board-specific code handled it before common init logic. */ unsigned nomux:1; /* switch pin can be for card detect (default) or card cover */ unsigned cover:1; /* use the internal clock */ unsigned internal_clock:1; /* nonremovable e.g. eMMC */ unsigned nonremovable:1; /* Try to sleep or power off when possible */ unsigned power_saving:1; /* If using power_saving and the MMC power is not to go off */ unsigned no_off:1; /* eMMC does not handle power off when not in sleep state */ unsigned no_regulator_off_init:1; /* Regulator off remapped to sleep */ unsigned vcc_aux_disable_is_sleep:1; /* we can put the features above into this variable */ #define MMC_OMAP7XX (1 << 3) #define MMC_OMAP15XX (1 << 4) #define MMC_OMAP16XX (1 << 5) unsigned features; int switch_pin; /* gpio (card detect) */ int gpio_wp; /* gpio (write protect) */ int (*set_bus_mode)(struct device *dev, int slot, int bus_mode); int (*set_power)(struct device *dev, int slot, int power_on, int vdd); int (*get_ro)(struct device *dev, int slot); void (*remux)(struct device *dev, int slot, int power_on); /* Call back before enabling / disabling regulators */ void (*before_set_reg)(struct device *dev, int slot, int power_on, int vdd); /* Call back after enabling / disabling regulators */ void (*after_set_reg)(struct device *dev, int slot, int power_on, int vdd); /* if we have special card, init it using this callback */ void (*init_card)(struct mmc_card *card); /* return MMC cover switch state, can be NULL if not supported. * * possible return values: * 0 - closed * 1 - open */ int (*get_cover_state)(struct device *dev, int slot); const char *name; u32 ocr_mask; /* Card detection IRQs */ int card_detect_irq; int (*card_detect)(struct device *dev, int slot); unsigned int ban_openended:1; } slots[OMAP_MMC_MAX_SLOTS]; }; platform_data/ad7298.h 0000644 00000000646 14722070374 0010464 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * AD7298 SPI ADC driver * * Copyright 2011 Analog Devices Inc. */ #ifndef __LINUX_PLATFORM_DATA_AD7298_H__ #define __LINUX_PLATFORM_DATA_AD7298_H__ /** * struct ad7298_platform_data - Platform data for the ad7298 ADC driver * @ext_ref: Whether to use an external reference voltage. **/ struct ad7298_platform_data { bool ext_ref; }; #endif /* IIO_ADC_AD7298_H_ */ platform_data/davinci_asp.h 0000644 00000006363 14722070374 0012030 0 ustar 00 /* * TI DaVinci Audio Serial Port support * * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifndef __DAVINCI_ASP_H #define __DAVINCI_ASP_H #include <linux/genalloc.h> struct davinci_mcasp_pdata { u32 tx_dma_offset; u32 rx_dma_offset; int asp_chan_q; /* event queue number for ASP channel */ int ram_chan_q; /* event queue number for RAM channel */ /* * Allowing this is more efficient and eliminates left and right swaps * caused by underruns, but will swap the left and right channels * when compared to previous behavior. */ unsigned enable_channel_combine:1; unsigned sram_size_playback; unsigned sram_size_capture; struct gen_pool *sram_pool; /* * If McBSP peripheral gets the clock from an external pin, * there are three chooses, that are MCBSP_CLKX, MCBSP_CLKR * and MCBSP_CLKS. * Depending on different hardware connections it is possible * to use this setting to change the behaviour of McBSP * driver. */ int clk_input_pin; /* * This flag works when both clock and FS are outputs for the cpu * and makes clock more accurate (FS is not symmetrical and the * clock is very fast. * The clock becoming faster is named * i2s continuous serial clock (I2S_SCK) and it is an externally * visible bit clock. * * first line : WordSelect * second line : ContinuousSerialClock * third line: SerialData * * SYMMETRICAL APPROACH: * _______________________ LEFT * _| RIGHT |______________________| * _ _ _ _ _ _ _ _ * _| |_| |_ x16 _| |_| |_| |_| |_ x16 _| |_| |_ * _ _ _ _ _ _ _ _ * _/ \_/ \_ ... _/ \_/ \_/ \_/ \_ ... _/ \_/ \_ * \_/ \_/ \_/ \_/ \_/ \_/ \_/ \_/ * * ACCURATE CLOCK APPROACH: * ______________ LEFT * _| RIGHT |_______________________________| * _ _ _ _ _ _ _ _ _ * _| |_ x16 _| |_| |_ x16 _| |_| |_| |_| |_| |_| | * _ _ _ _ dummy cycles * _/ \_ ... _/ \_/ \_ ... _/ \__________________ * \_/ \_/ \_/ \_/ * */ bool i2s_accurate_sck; /* McASP specific fields */ int tdm_slots; u8 op_mode; u8 dismod; u8 num_serializer; u8 *serial_dir; u8 version; u8 txnumevt; u8 rxnumevt; int tx_dma_channel; int rx_dma_channel; }; /* TODO: Fix arch/arm/mach-davinci/ users and remove this define */ #define snd_platform_data davinci_mcasp_pdata enum { MCASP_VERSION_1 = 0, /* DM646x */ MCASP_VERSION_2, /* DA8xx/OMAPL1x */ MCASP_VERSION_3, /* TI81xx/AM33xx */ MCASP_VERSION_4, /* DRA7xxx */ }; enum mcbsp_clk_input_pin { MCBSP_CLKR = 0, /* as in DM365 */ MCBSP_CLKS, }; #define INACTIVE_MODE 0 #define TX_MODE 1 #define RX_MODE 2 #define DAVINCI_MCASP_IIS_MODE 0 #define DAVINCI_MCASP_DIT_MODE 1 #endif platform_data/ina2xx.h 0000644 00000000634 14722070374 0010754 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Driver for Texas Instruments INA219, INA226 power monitor chips * * Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com> * * For further information, see the Documentation/hwmon/ina2xx.rst file. */ /** * struct ina2xx_platform_data - ina2xx info * @shunt_uohms shunt resistance in microohms */ struct ina2xx_platform_data { long shunt_uohms; }; platform_data/pm33xx.h 0000644 00000004271 14722070374 0010706 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * TI pm33xx platform data * * Copyright (C) 2016-2018 Texas Instruments, Inc. * Dave Gerlach <d-gerlach@ti.com> */ #ifndef _LINUX_PLATFORM_DATA_PM33XX_H #define _LINUX_PLATFORM_DATA_PM33XX_H #include <linux/kbuild.h> #include <linux/types.h> /* * WFI Flags for sleep code control * * These flags allow PM code to exclude certain operations from happening * in the low level ASM code found in sleep33xx.S and sleep43xx.S * * WFI_FLAG_FLUSH_CACHE: Flush the ARM caches and disable caching. Only * needed when MPU will lose context. * WFI_FLAG_SELF_REFRESH: Let EMIF place DDR memory into self-refresh and * disable EMIF. * WFI_FLAG_SAVE_EMIF: Save context of all EMIF registers and restore in * resume path. Only needed if PER domain loses context * and must also have WFI_FLAG_SELF_REFRESH set. * WFI_FLAG_WAKE_M3: Disable MPU clock or clockdomain to cause wkup_m3 to * execute when WFI instruction executes. * WFI_FLAG_RTC_ONLY: Configure the RTC to enter RTC+DDR mode. */ #define WFI_FLAG_FLUSH_CACHE BIT(0) #define WFI_FLAG_SELF_REFRESH BIT(1) #define WFI_FLAG_SAVE_EMIF BIT(2) #define WFI_FLAG_WAKE_M3 BIT(3) #define WFI_FLAG_RTC_ONLY BIT(4) #ifndef __ASSEMBLER__ struct am33xx_pm_sram_addr { void (*do_wfi)(void); unsigned long *do_wfi_sz; unsigned long *resume_offset; unsigned long *emif_sram_table; unsigned long *ro_sram_data; unsigned long resume_address; }; struct am33xx_pm_platform_data { int (*init)(void); int (*soc_suspend)(unsigned int state, int (*fn)(unsigned long), unsigned long args); struct am33xx_pm_sram_addr *(*get_sram_addrs)(void); void __iomem *(*get_rtc_base_addr)(void); void (*save_context)(void); void (*restore_context)(void); void (*prepare_rtc_suspend)(void); void (*prepare_rtc_resume)(void); int (*check_off_mode_enable)(void); }; struct am33xx_pm_sram_data { u32 wfi_flags; u32 l2_aux_ctrl_val; u32 l2_prefetch_ctrl_val; } __packed __aligned(8); struct am33xx_pm_ro_sram_data { u32 amx3_pm_sram_data_virt; u32 amx3_pm_sram_data_phys; void __iomem *rtc_base_virt; } __packed __aligned(8); #endif /* __ASSEMBLER__ */ #endif /* _LINUX_PLATFORM_DATA_PM33XX_H */ platform_data/spi-omap2-mcspi.h 0000644 00000000705 14722070374 0012462 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _OMAP2_MCSPI_H #define _OMAP2_MCSPI_H #define OMAP4_MCSPI_REG_OFFSET 0x100 #define MCSPI_PINDIR_D0_IN_D1_OUT 0 #define MCSPI_PINDIR_D0_OUT_D1_IN 1 struct omap2_mcspi_platform_config { unsigned short num_cs; unsigned int regs_offset; unsigned int pin_dir:1; }; struct omap2_mcspi_device_config { unsigned turbo_mode:1; /* toggle chip select after every word */ unsigned cs_per_word:1; }; #endif platform_data/at91_adc.h 0000644 00000002612 14722070374 0011126 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2011 Free Electrons */ #ifndef _AT91_ADC_H_ #define _AT91_ADC_H_ enum atmel_adc_ts_type { ATMEL_ADC_TOUCHSCREEN_NONE = 0, ATMEL_ADC_TOUCHSCREEN_4WIRE = 4, ATMEL_ADC_TOUCHSCREEN_5WIRE = 5, }; /** * struct at91_adc_trigger - description of triggers * @name: name of the trigger advertised to the user * @value: value to set in the ADC's trigger setup register to enable the trigger * @is_external: Does the trigger rely on an external pin? */ struct at91_adc_trigger { const char *name; u8 value; bool is_external; }; /** * struct at91_adc_data - platform data for ADC driver * @channels_used: channels in use on the board as a bitmask * @startup_time: startup time of the ADC in microseconds * @trigger_list: Triggers available in the ADC * @trigger_number: Number of triggers available in the ADC * @use_external_triggers: does the board has external triggers availables * @vref: Reference voltage for the ADC in millivolts * @touchscreen_type: If a touchscreen is connected, its type (4 or 5 wires) */ struct at91_adc_data { unsigned long channels_used; u8 startup_time; struct at91_adc_trigger *trigger_list; u8 trigger_number; bool use_external_triggers; u16 vref; enum atmel_adc_ts_type touchscreen_type; }; extern void __init at91_add_device_adc(struct at91_adc_data *data); #endif platform_data/omap-wd-timer.h 0000644 00000001605 14722070374 0012224 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * OMAP2+ WDTIMER-specific function prototypes * * Copyright (C) 2012 Texas Instruments, Inc. * Paul Walmsley */ #ifndef __LINUX_PLATFORM_DATA_OMAP_WD_TIMER_H #define __LINUX_PLATFORM_DATA_OMAP_WD_TIMER_H #include <linux/types.h> /* * Standardized OMAP reset source bits * * This is a subset of the ones listed in arch/arm/mach-omap2/prm.h * and are the only ones needed in the watchdog driver. */ #define OMAP_MPU_WD_RST_SRC_ID_SHIFT 3 /** * struct omap_wd_timer_platform_data - WDTIMER integration to the host SoC * @read_reset_sources - fn ptr for the SoC to indicate the last reset cause * * The function pointed to by @read_reset_sources must return its data * in a standard format - search for RST_SRC_ID_SHIFT in * arch/arm/mach-omap2 */ struct omap_wd_timer_platform_data { u32 (*read_reset_sources)(void); }; #endif platform_data/dsa.h 0000644 00000003165 14722070374 0010314 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __DSA_PDATA_H #define __DSA_PDATA_H struct device; struct net_device; #define DSA_MAX_SWITCHES 4 #define DSA_MAX_PORTS 12 #define DSA_RTABLE_NONE -1 struct dsa_chip_data { /* * How to access the switch configuration registers. */ struct device *host_dev; int sw_addr; /* * Reference to network devices */ struct device *netdev[DSA_MAX_PORTS]; /* set to size of eeprom if supported by the switch */ int eeprom_len; /* Device tree node pointer for this specific switch chip * used during switch setup in case additional properties * and resources needs to be used */ struct device_node *of_node; /* * The names of the switch's ports. Use "cpu" to * designate the switch port that the cpu is connected to, * "dsa" to indicate that this port is a DSA link to * another switch, NULL to indicate the port is unused, * or any other string to indicate this is a physical port. */ char *port_names[DSA_MAX_PORTS]; struct device_node *port_dn[DSA_MAX_PORTS]; /* * An array of which element [a] indicates which port on this * switch should be used to send packets to that are destined * for switch a. Can be NULL if there is only one switch chip. */ s8 rtable[DSA_MAX_SWITCHES]; }; struct dsa_platform_data { /* * Reference to a Linux network interface that connects * to the root switch chip of the tree. */ struct device *netdev; struct net_device *of_netdev; /* * Info structs describing each of the switch chips * connected via this network interface. */ int nr_chips; struct dsa_chip_data *chip; }; #endif /* __DSA_PDATA_H */ platform_data/mtd-davinci.h 0000644 00000004462 14722070374 0011745 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * mach-davinci/nand.h * * Copyright © 2006 Texas Instruments. * * Ported to 2.6.23 Copyright © 2008 by * Sander Huijsen <Shuijsen@optelecom-nkf.com> * Troy Kisky <troy.kisky@boundarydevices.com> * Dirk Behme <Dirk.Behme@gmail.com> * * -------------------------------------------------------------------------- */ #ifndef __ARCH_ARM_DAVINCI_NAND_H #define __ARCH_ARM_DAVINCI_NAND_H #include <linux/mtd/rawnand.h> #define NANDFCR_OFFSET 0x60 #define NANDFSR_OFFSET 0x64 #define NANDF1ECC_OFFSET 0x70 /* 4-bit ECC syndrome registers */ #define NAND_4BIT_ECC_LOAD_OFFSET 0xbc #define NAND_4BIT_ECC1_OFFSET 0xc0 #define NAND_4BIT_ECC2_OFFSET 0xc4 #define NAND_4BIT_ECC3_OFFSET 0xc8 #define NAND_4BIT_ECC4_OFFSET 0xcc #define NAND_ERR_ADD1_OFFSET 0xd0 #define NAND_ERR_ADD2_OFFSET 0xd4 #define NAND_ERR_ERRVAL1_OFFSET 0xd8 #define NAND_ERR_ERRVAL2_OFFSET 0xdc /* NOTE: boards don't need to use these address bits * for ALE/CLE unless they support booting from NAND. * They're used unless platform data overrides them. */ #define MASK_ALE 0x08 #define MASK_CLE 0x10 struct davinci_nand_pdata { /* platform_data */ uint32_t mask_ale; uint32_t mask_cle; /* * 0-indexed chip-select number of the asynchronous * interface to which the NAND device has been connected. * * So, if you have NAND connected to CS3 of DA850, you * will pass '1' here. Since the asynchronous interface * on DA850 starts from CS2. */ uint32_t core_chipsel; /* for packages using two chipselects */ uint32_t mask_chipsel; /* board's default static partition info */ struct mtd_partition *parts; unsigned nr_parts; /* none == NAND_ECC_NONE (strongly *not* advised!!) * soft == NAND_ECC_SOFT * else == NAND_ECC_HW, according to ecc_bits * * All DaVinci-family chips support 1-bit hardware ECC. * Newer ones also support 4-bit ECC, but are awkward * using it with large page chips. */ nand_ecc_modes_t ecc_mode; u8 ecc_bits; /* e.g. NAND_BUSWIDTH_16 */ unsigned options; /* e.g. NAND_BBT_USE_FLASH */ unsigned bbt_options; /* Main and mirror bbt descriptor overrides */ struct nand_bbt_descr *bbt_td; struct nand_bbt_descr *bbt_md; /* Access timings */ struct davinci_aemif_timing *timing; }; #endif /* __ARCH_ARM_DAVINCI_NAND_H */ platform_data/max732x.h 0000644 00000001075 14722070374 0010754 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_I2C_MAX732X_H #define __LINUX_I2C_MAX732X_H /* platform data for the MAX732x 8/16-bit I/O expander driver */ struct max732x_platform_data { /* number of the first GPIO */ unsigned gpio_base; /* interrupt base */ int irq_base; void *context; /* param to setup/teardown */ int (*setup)(struct i2c_client *client, unsigned gpio, unsigned ngpio, void *context); int (*teardown)(struct i2c_client *client, unsigned gpio, unsigned ngpio, void *context); }; #endif /* __LINUX_I2C_MAX732X_H */ platform_data/ssm2518.h 0000644 00000000765 14722070374 0010672 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * SSM2518 amplifier audio driver * * Copyright 2013 Analog Devices Inc. * Author: Lars-Peter Clausen <lars@metafoo.de> */ #ifndef __LINUX_PLATFORM_DATA_SSM2518_H__ #define __LINUX_PLATFORM_DATA_SSM2518_H__ /** * struct ssm2518_platform_data - Platform data for the ssm2518 driver * @enable_gpio: GPIO connected to the nSD pin. Set to -1 if the nSD pin is * hardwired. */ struct ssm2518_platform_data { int enable_gpio; }; #endif platform_data/cros_ec_commands.h 0000644 00000510414 14722070374 0013043 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Host communication command constants for ChromeOS EC * * Copyright (C) 2012 Google, Inc * * NOTE: This file is auto-generated from ChromeOS EC Open Source code from * https://chromium.googlesource.com/chromiumos/platform/ec/+/master/include/ec_commands.h */ /* Host communication command constants for Chrome EC */ #ifndef __CROS_EC_COMMANDS_H #define __CROS_EC_COMMANDS_H #define BUILD_ASSERT(_cond) /* * Current version of this protocol * * TODO(crosbug.com/p/11223): This is effectively useless; protocol is * determined in other ways. Remove this once the kernel code no longer * depends on it. */ #define EC_PROTO_VERSION 0x00000002 /* Command version mask */ #define EC_VER_MASK(version) BIT(version) /* I/O addresses for ACPI commands */ #define EC_LPC_ADDR_ACPI_DATA 0x62 #define EC_LPC_ADDR_ACPI_CMD 0x66 /* I/O addresses for host command */ #define EC_LPC_ADDR_HOST_DATA 0x200 #define EC_LPC_ADDR_HOST_CMD 0x204 /* I/O addresses for host command args and params */ /* Protocol version 2 */ #define EC_LPC_ADDR_HOST_ARGS 0x800 /* And 0x801, 0x802, 0x803 */ #define EC_LPC_ADDR_HOST_PARAM 0x804 /* For version 2 params; size is * EC_PROTO2_MAX_PARAM_SIZE */ /* Protocol version 3 */ #define EC_LPC_ADDR_HOST_PACKET 0x800 /* Offset of version 3 packet */ #define EC_LPC_HOST_PACKET_SIZE 0x100 /* Max size of version 3 packet */ /* * The actual block is 0x800-0x8ff, but some BIOSes think it's 0x880-0x8ff * and they tell the kernel that so we have to think of it as two parts. */ #define EC_HOST_CMD_REGION0 0x800 #define EC_HOST_CMD_REGION1 0x880 #define EC_HOST_CMD_REGION_SIZE 0x80 /* EC command register bit functions */ #define EC_LPC_CMDR_DATA BIT(0) /* Data ready for host to read */ #define EC_LPC_CMDR_PENDING BIT(1) /* Write pending to EC */ #define EC_LPC_CMDR_BUSY BIT(2) /* EC is busy processing a command */ #define EC_LPC_CMDR_CMD BIT(3) /* Last host write was a command */ #define EC_LPC_CMDR_ACPI_BRST BIT(4) /* Burst mode (not used) */ #define EC_LPC_CMDR_SCI BIT(5) /* SCI event is pending */ #define EC_LPC_CMDR_SMI BIT(6) /* SMI event is pending */ #define EC_LPC_ADDR_MEMMAP 0x900 #define EC_MEMMAP_SIZE 255 /* ACPI IO buffer max is 255 bytes */ #define EC_MEMMAP_TEXT_MAX 8 /* Size of a string in the memory map */ /* The offset address of each type of data in mapped memory. */ #define EC_MEMMAP_TEMP_SENSOR 0x00 /* Temp sensors 0x00 - 0x0f */ #define EC_MEMMAP_FAN 0x10 /* Fan speeds 0x10 - 0x17 */ #define EC_MEMMAP_TEMP_SENSOR_B 0x18 /* More temp sensors 0x18 - 0x1f */ #define EC_MEMMAP_ID 0x20 /* 0x20 == 'E', 0x21 == 'C' */ #define EC_MEMMAP_ID_VERSION 0x22 /* Version of data in 0x20 - 0x2f */ #define EC_MEMMAP_THERMAL_VERSION 0x23 /* Version of data in 0x00 - 0x1f */ #define EC_MEMMAP_BATTERY_VERSION 0x24 /* Version of data in 0x40 - 0x7f */ #define EC_MEMMAP_SWITCHES_VERSION 0x25 /* Version of data in 0x30 - 0x33 */ #define EC_MEMMAP_EVENTS_VERSION 0x26 /* Version of data in 0x34 - 0x3f */ #define EC_MEMMAP_HOST_CMD_FLAGS 0x27 /* Host cmd interface flags (8 bits) */ /* Unused 0x28 - 0x2f */ #define EC_MEMMAP_SWITCHES 0x30 /* 8 bits */ /* Unused 0x31 - 0x33 */ #define EC_MEMMAP_HOST_EVENTS 0x34 /* 64 bits */ /* Battery values are all 32 bits, unless otherwise noted. */ #define EC_MEMMAP_BATT_VOLT 0x40 /* Battery Present Voltage */ #define EC_MEMMAP_BATT_RATE 0x44 /* Battery Present Rate */ #define EC_MEMMAP_BATT_CAP 0x48 /* Battery Remaining Capacity */ #define EC_MEMMAP_BATT_FLAG 0x4c /* Battery State, see below (8-bit) */ #define EC_MEMMAP_BATT_COUNT 0x4d /* Battery Count (8-bit) */ #define EC_MEMMAP_BATT_INDEX 0x4e /* Current Battery Data Index (8-bit) */ /* Unused 0x4f */ #define EC_MEMMAP_BATT_DCAP 0x50 /* Battery Design Capacity */ #define EC_MEMMAP_BATT_DVLT 0x54 /* Battery Design Voltage */ #define EC_MEMMAP_BATT_LFCC 0x58 /* Battery Last Full Charge Capacity */ #define EC_MEMMAP_BATT_CCNT 0x5c /* Battery Cycle Count */ /* Strings are all 8 bytes (EC_MEMMAP_TEXT_MAX) */ #define EC_MEMMAP_BATT_MFGR 0x60 /* Battery Manufacturer String */ #define EC_MEMMAP_BATT_MODEL 0x68 /* Battery Model Number String */ #define EC_MEMMAP_BATT_SERIAL 0x70 /* Battery Serial Number String */ #define EC_MEMMAP_BATT_TYPE 0x78 /* Battery Type String */ #define EC_MEMMAP_ALS 0x80 /* ALS readings in lux (2 X 16 bits) */ /* Unused 0x84 - 0x8f */ #define EC_MEMMAP_ACC_STATUS 0x90 /* Accelerometer status (8 bits )*/ /* Unused 0x91 */ #define EC_MEMMAP_ACC_DATA 0x92 /* Accelerometers data 0x92 - 0x9f */ /* 0x92: Lid Angle if available, LID_ANGLE_UNRELIABLE otherwise */ /* 0x94 - 0x99: 1st Accelerometer */ /* 0x9a - 0x9f: 2nd Accelerometer */ #define EC_MEMMAP_GYRO_DATA 0xa0 /* Gyroscope data 0xa0 - 0xa5 */ /* Unused 0xa6 - 0xdf */ /* * ACPI is unable to access memory mapped data at or above this offset due to * limitations of the ACPI protocol. Do not place data in the range 0xe0 - 0xfe * which might be needed by ACPI. */ #define EC_MEMMAP_NO_ACPI 0xe0 /* Define the format of the accelerometer mapped memory status byte. */ #define EC_MEMMAP_ACC_STATUS_SAMPLE_ID_MASK 0x0f #define EC_MEMMAP_ACC_STATUS_BUSY_BIT BIT(4) #define EC_MEMMAP_ACC_STATUS_PRESENCE_BIT BIT(7) /* Number of temp sensors at EC_MEMMAP_TEMP_SENSOR */ #define EC_TEMP_SENSOR_ENTRIES 16 /* * Number of temp sensors at EC_MEMMAP_TEMP_SENSOR_B. * * Valid only if EC_MEMMAP_THERMAL_VERSION returns >= 2. */ #define EC_TEMP_SENSOR_B_ENTRIES 8 /* Special values for mapped temperature sensors */ #define EC_TEMP_SENSOR_NOT_PRESENT 0xff #define EC_TEMP_SENSOR_ERROR 0xfe #define EC_TEMP_SENSOR_NOT_POWERED 0xfd #define EC_TEMP_SENSOR_NOT_CALIBRATED 0xfc /* * The offset of temperature value stored in mapped memory. This allows * reporting a temperature range of 200K to 454K = -73C to 181C. */ #define EC_TEMP_SENSOR_OFFSET 200 /* * Number of ALS readings at EC_MEMMAP_ALS */ #define EC_ALS_ENTRIES 2 /* * The default value a temperature sensor will return when it is present but * has not been read this boot. This is a reasonable number to avoid * triggering alarms on the host. */ #define EC_TEMP_SENSOR_DEFAULT (296 - EC_TEMP_SENSOR_OFFSET) #define EC_FAN_SPEED_ENTRIES 4 /* Number of fans at EC_MEMMAP_FAN */ #define EC_FAN_SPEED_NOT_PRESENT 0xffff /* Entry not present */ #define EC_FAN_SPEED_STALLED 0xfffe /* Fan stalled */ /* Battery bit flags at EC_MEMMAP_BATT_FLAG. */ #define EC_BATT_FLAG_AC_PRESENT 0x01 #define EC_BATT_FLAG_BATT_PRESENT 0x02 #define EC_BATT_FLAG_DISCHARGING 0x04 #define EC_BATT_FLAG_CHARGING 0x08 #define EC_BATT_FLAG_LEVEL_CRITICAL 0x10 /* Set if some of the static/dynamic data is invalid (or outdated). */ #define EC_BATT_FLAG_INVALID_DATA 0x20 /* Switch flags at EC_MEMMAP_SWITCHES */ #define EC_SWITCH_LID_OPEN 0x01 #define EC_SWITCH_POWER_BUTTON_PRESSED 0x02 #define EC_SWITCH_WRITE_PROTECT_DISABLED 0x04 /* Was recovery requested via keyboard; now unused. */ #define EC_SWITCH_IGNORE1 0x08 /* Recovery requested via dedicated signal (from servo board) */ #define EC_SWITCH_DEDICATED_RECOVERY 0x10 /* Was fake developer mode switch; now unused. Remove in next refactor. */ #define EC_SWITCH_IGNORE0 0x20 /* Host command interface flags */ /* Host command interface supports LPC args (LPC interface only) */ #define EC_HOST_CMD_FLAG_LPC_ARGS_SUPPORTED 0x01 /* Host command interface supports version 3 protocol */ #define EC_HOST_CMD_FLAG_VERSION_3 0x02 /* Wireless switch flags */ #define EC_WIRELESS_SWITCH_ALL ~0x00 /* All flags */ #define EC_WIRELESS_SWITCH_WLAN 0x01 /* WLAN radio */ #define EC_WIRELESS_SWITCH_BLUETOOTH 0x02 /* Bluetooth radio */ #define EC_WIRELESS_SWITCH_WWAN 0x04 /* WWAN power */ #define EC_WIRELESS_SWITCH_WLAN_POWER 0x08 /* WLAN power */ /*****************************************************************************/ /* * ACPI commands * * These are valid ONLY on the ACPI command/data port. */ /* * ACPI Read Embedded Controller * * This reads from ACPI memory space on the EC (EC_ACPI_MEM_*). * * Use the following sequence: * * - Write EC_CMD_ACPI_READ to EC_LPC_ADDR_ACPI_CMD * - Wait for EC_LPC_CMDR_PENDING bit to clear * - Write address to EC_LPC_ADDR_ACPI_DATA * - Wait for EC_LPC_CMDR_DATA bit to set * - Read value from EC_LPC_ADDR_ACPI_DATA */ #define EC_CMD_ACPI_READ 0x0080 /* * ACPI Write Embedded Controller * * This reads from ACPI memory space on the EC (EC_ACPI_MEM_*). * * Use the following sequence: * * - Write EC_CMD_ACPI_WRITE to EC_LPC_ADDR_ACPI_CMD * - Wait for EC_LPC_CMDR_PENDING bit to clear * - Write address to EC_LPC_ADDR_ACPI_DATA * - Wait for EC_LPC_CMDR_PENDING bit to clear * - Write value to EC_LPC_ADDR_ACPI_DATA */ #define EC_CMD_ACPI_WRITE 0x0081 /* * ACPI Burst Enable Embedded Controller * * This enables burst mode on the EC to allow the host to issue several * commands back-to-back. While in this mode, writes to mapped multi-byte * data are locked out to ensure data consistency. */ #define EC_CMD_ACPI_BURST_ENABLE 0x0082 /* * ACPI Burst Disable Embedded Controller * * This disables burst mode on the EC and stops preventing EC writes to mapped * multi-byte data. */ #define EC_CMD_ACPI_BURST_DISABLE 0x0083 /* * ACPI Query Embedded Controller * * This clears the lowest-order bit in the currently pending host events, and * sets the result code to the 1-based index of the bit (event 0x00000001 = 1, * event 0x80000000 = 32), or 0 if no event was pending. */ #define EC_CMD_ACPI_QUERY_EVENT 0x0084 /* Valid addresses in ACPI memory space, for read/write commands */ /* Memory space version; set to EC_ACPI_MEM_VERSION_CURRENT */ #define EC_ACPI_MEM_VERSION 0x00 /* * Test location; writing value here updates test compliment byte to (0xff - * value). */ #define EC_ACPI_MEM_TEST 0x01 /* Test compliment; writes here are ignored. */ #define EC_ACPI_MEM_TEST_COMPLIMENT 0x02 /* Keyboard backlight brightness percent (0 - 100) */ #define EC_ACPI_MEM_KEYBOARD_BACKLIGHT 0x03 /* DPTF Target Fan Duty (0-100, 0xff for auto/none) */ #define EC_ACPI_MEM_FAN_DUTY 0x04 /* * DPTF temp thresholds. Any of the EC's temp sensors can have up to two * independent thresholds attached to them. The current value of the ID * register determines which sensor is affected by the THRESHOLD and COMMIT * registers. The THRESHOLD register uses the same EC_TEMP_SENSOR_OFFSET scheme * as the memory-mapped sensors. The COMMIT register applies those settings. * * The spec does not mandate any way to read back the threshold settings * themselves, but when a threshold is crossed the AP needs a way to determine * which sensor(s) are responsible. Each reading of the ID register clears and * returns one sensor ID that has crossed one of its threshold (in either * direction) since the last read. A value of 0xFF means "no new thresholds * have tripped". Setting or enabling the thresholds for a sensor will clear * the unread event count for that sensor. */ #define EC_ACPI_MEM_TEMP_ID 0x05 #define EC_ACPI_MEM_TEMP_THRESHOLD 0x06 #define EC_ACPI_MEM_TEMP_COMMIT 0x07 /* * Here are the bits for the COMMIT register: * bit 0 selects the threshold index for the chosen sensor (0/1) * bit 1 enables/disables the selected threshold (0 = off, 1 = on) * Each write to the commit register affects one threshold. */ #define EC_ACPI_MEM_TEMP_COMMIT_SELECT_MASK BIT(0) #define EC_ACPI_MEM_TEMP_COMMIT_ENABLE_MASK BIT(1) /* * Example: * * Set the thresholds for sensor 2 to 50 C and 60 C: * write 2 to [0x05] -- select temp sensor 2 * write 0x7b to [0x06] -- C_TO_K(50) - EC_TEMP_SENSOR_OFFSET * write 0x2 to [0x07] -- enable threshold 0 with this value * write 0x85 to [0x06] -- C_TO_K(60) - EC_TEMP_SENSOR_OFFSET * write 0x3 to [0x07] -- enable threshold 1 with this value * * Disable the 60 C threshold, leaving the 50 C threshold unchanged: * write 2 to [0x05] -- select temp sensor 2 * write 0x1 to [0x07] -- disable threshold 1 */ /* DPTF battery charging current limit */ #define EC_ACPI_MEM_CHARGING_LIMIT 0x08 /* Charging limit is specified in 64 mA steps */ #define EC_ACPI_MEM_CHARGING_LIMIT_STEP_MA 64 /* Value to disable DPTF battery charging limit */ #define EC_ACPI_MEM_CHARGING_LIMIT_DISABLED 0xff /* * Report device orientation * Bits Definition * 3:1 Device DPTF Profile Number (DDPN) * 0 = Reserved for backward compatibility (indicates no valid * profile number. Host should fall back to using TBMD). * 1..7 = DPTF Profile number to indicate to host which table needs * to be loaded. * 0 Tablet Mode Device Indicator (TBMD) */ #define EC_ACPI_MEM_DEVICE_ORIENTATION 0x09 #define EC_ACPI_MEM_TBMD_SHIFT 0 #define EC_ACPI_MEM_TBMD_MASK 0x1 #define EC_ACPI_MEM_DDPN_SHIFT 1 #define EC_ACPI_MEM_DDPN_MASK 0x7 /* * Report device features. Uses the same format as the host command, except: * * bit 0 (EC_FEATURE_LIMITED) changes meaning from "EC code has a limited set * of features", which is of limited interest when the system is already * interpreting ACPI bytecode, to "EC_FEATURES[0-7] is not supported". Since * these are supported, it defaults to 0. * This allows detecting the presence of this field since older versions of * the EC codebase would simply return 0xff to that unknown address. Check * FEATURES0 != 0xff (or FEATURES0[0] == 0) to make sure that the other bits * are valid. */ #define EC_ACPI_MEM_DEVICE_FEATURES0 0x0a #define EC_ACPI_MEM_DEVICE_FEATURES1 0x0b #define EC_ACPI_MEM_DEVICE_FEATURES2 0x0c #define EC_ACPI_MEM_DEVICE_FEATURES3 0x0d #define EC_ACPI_MEM_DEVICE_FEATURES4 0x0e #define EC_ACPI_MEM_DEVICE_FEATURES5 0x0f #define EC_ACPI_MEM_DEVICE_FEATURES6 0x10 #define EC_ACPI_MEM_DEVICE_FEATURES7 0x11 #define EC_ACPI_MEM_BATTERY_INDEX 0x12 /* * USB Port Power. Each bit indicates whether the corresponding USB ports' power * is enabled (1) or disabled (0). * bit 0 USB port ID 0 * ... * bit 7 USB port ID 7 */ #define EC_ACPI_MEM_USB_PORT_POWER 0x13 /* * ACPI addresses 0x20 - 0xff map to EC_MEMMAP offset 0x00 - 0xdf. This data * is read-only from the AP. Added in EC_ACPI_MEM_VERSION 2. */ #define EC_ACPI_MEM_MAPPED_BEGIN 0x20 #define EC_ACPI_MEM_MAPPED_SIZE 0xe0 /* Current version of ACPI memory address space */ #define EC_ACPI_MEM_VERSION_CURRENT 2 /* * This header file is used in coreboot both in C and ACPI code. The ACPI code * is pre-processed to handle constants but the ASL compiler is unable to * handle actual C code so keep it separate. */ /* * Attributes for EC request and response packets. Just defining __packed * results in inefficient assembly code on ARM, if the structure is actually * 32-bit aligned, as it should be for all buffers. * * Be very careful when adding these to existing structures. They will round * up the structure size to the specified boundary. * * Also be very careful to make that if a structure is included in some other * parent structure that the alignment will still be true given the packing of * the parent structure. This is particularly important if the sub-structure * will be passed as a pointer to another function, since that function will * not know about the misaligment caused by the parent structure's packing. * * Also be very careful using __packed - particularly when nesting non-packed * structures inside packed ones. In fact, DO NOT use __packed directly; * always use one of these attributes. * * Once everything is annotated properly, the following search strings should * not return ANY matches in this file other than right here: * * "__packed" - generates inefficient code; all sub-structs must also be packed * * "struct [^_]" - all structs should be annotated, except for structs that are * members of other structs/unions (and their original declarations should be * annotated). */ /* * Packed structures make no assumption about alignment, so they do inefficient * byte-wise reads. */ #define __ec_align1 __packed #define __ec_align2 __packed #define __ec_align4 __packed #define __ec_align_size1 __packed #define __ec_align_offset1 __packed #define __ec_align_offset2 __packed #define __ec_todo_packed __packed #define __ec_todo_unpacked /* LPC command status byte masks */ /* EC has written a byte in the data register and host hasn't read it yet */ #define EC_LPC_STATUS_TO_HOST 0x01 /* Host has written a command/data byte and the EC hasn't read it yet */ #define EC_LPC_STATUS_FROM_HOST 0x02 /* EC is processing a command */ #define EC_LPC_STATUS_PROCESSING 0x04 /* Last write to EC was a command, not data */ #define EC_LPC_STATUS_LAST_CMD 0x08 /* EC is in burst mode */ #define EC_LPC_STATUS_BURST_MODE 0x10 /* SCI event is pending (requesting SCI query) */ #define EC_LPC_STATUS_SCI_PENDING 0x20 /* SMI event is pending (requesting SMI query) */ #define EC_LPC_STATUS_SMI_PENDING 0x40 /* (reserved) */ #define EC_LPC_STATUS_RESERVED 0x80 /* * EC is busy. This covers both the EC processing a command, and the host has * written a new command but the EC hasn't picked it up yet. */ #define EC_LPC_STATUS_BUSY_MASK \ (EC_LPC_STATUS_FROM_HOST | EC_LPC_STATUS_PROCESSING) /* * Host command response codes (16-bit). Note that response codes should be * stored in a uint16_t rather than directly in a value of this type. */ enum ec_status { EC_RES_SUCCESS = 0, EC_RES_INVALID_COMMAND = 1, EC_RES_ERROR = 2, EC_RES_INVALID_PARAM = 3, EC_RES_ACCESS_DENIED = 4, EC_RES_INVALID_RESPONSE = 5, EC_RES_INVALID_VERSION = 6, EC_RES_INVALID_CHECKSUM = 7, EC_RES_IN_PROGRESS = 8, /* Accepted, command in progress */ EC_RES_UNAVAILABLE = 9, /* No response available */ EC_RES_TIMEOUT = 10, /* We got a timeout */ EC_RES_OVERFLOW = 11, /* Table / data overflow */ EC_RES_INVALID_HEADER = 12, /* Header contains invalid data */ EC_RES_REQUEST_TRUNCATED = 13, /* Didn't get the entire request */ EC_RES_RESPONSE_TOO_BIG = 14, /* Response was too big to handle */ EC_RES_BUS_ERROR = 15, /* Communications bus error */ EC_RES_BUSY = 16, /* Up but too busy. Should retry */ EC_RES_INVALID_HEADER_VERSION = 17, /* Header version invalid */ EC_RES_INVALID_HEADER_CRC = 18, /* Header CRC invalid */ EC_RES_INVALID_DATA_CRC = 19, /* Data CRC invalid */ EC_RES_DUP_UNAVAILABLE = 20, /* Can't resend response */ }; /* * Host event codes. Note these are 1-based, not 0-based, because ACPI query * EC command uses code 0 to mean "no event pending". We explicitly specify * each value in the enum listing so they won't change if we delete/insert an * item or rearrange the list (it needs to be stable across platforms, not * just within a single compiled instance). */ enum host_event_code { EC_HOST_EVENT_LID_CLOSED = 1, EC_HOST_EVENT_LID_OPEN = 2, EC_HOST_EVENT_POWER_BUTTON = 3, EC_HOST_EVENT_AC_CONNECTED = 4, EC_HOST_EVENT_AC_DISCONNECTED = 5, EC_HOST_EVENT_BATTERY_LOW = 6, EC_HOST_EVENT_BATTERY_CRITICAL = 7, EC_HOST_EVENT_BATTERY = 8, EC_HOST_EVENT_THERMAL_THRESHOLD = 9, /* Event generated by a device attached to the EC */ EC_HOST_EVENT_DEVICE = 10, EC_HOST_EVENT_THERMAL = 11, EC_HOST_EVENT_USB_CHARGER = 12, EC_HOST_EVENT_KEY_PRESSED = 13, /* * EC has finished initializing the host interface. The host can check * for this event following sending a EC_CMD_REBOOT_EC command to * determine when the EC is ready to accept subsequent commands. */ EC_HOST_EVENT_INTERFACE_READY = 14, /* Keyboard recovery combo has been pressed */ EC_HOST_EVENT_KEYBOARD_RECOVERY = 15, /* Shutdown due to thermal overload */ EC_HOST_EVENT_THERMAL_SHUTDOWN = 16, /* Shutdown due to battery level too low */ EC_HOST_EVENT_BATTERY_SHUTDOWN = 17, /* Suggest that the AP throttle itself */ EC_HOST_EVENT_THROTTLE_START = 18, /* Suggest that the AP resume normal speed */ EC_HOST_EVENT_THROTTLE_STOP = 19, /* Hang detect logic detected a hang and host event timeout expired */ EC_HOST_EVENT_HANG_DETECT = 20, /* Hang detect logic detected a hang and warm rebooted the AP */ EC_HOST_EVENT_HANG_REBOOT = 21, /* PD MCU triggering host event */ EC_HOST_EVENT_PD_MCU = 22, /* Battery Status flags have changed */ EC_HOST_EVENT_BATTERY_STATUS = 23, /* EC encountered a panic, triggering a reset */ EC_HOST_EVENT_PANIC = 24, /* Keyboard fastboot combo has been pressed */ EC_HOST_EVENT_KEYBOARD_FASTBOOT = 25, /* EC RTC event occurred */ EC_HOST_EVENT_RTC = 26, /* Emulate MKBP event */ EC_HOST_EVENT_MKBP = 27, /* EC desires to change state of host-controlled USB mux */ EC_HOST_EVENT_USB_MUX = 28, /* TABLET/LAPTOP mode or detachable base attach/detach event */ EC_HOST_EVENT_MODE_CHANGE = 29, /* Keyboard recovery combo with hardware reinitialization */ EC_HOST_EVENT_KEYBOARD_RECOVERY_HW_REINIT = 30, /* * The high bit of the event mask is not used as a host event code. If * it reads back as set, then the entire event mask should be * considered invalid by the host. This can happen when reading the * raw event status via EC_MEMMAP_HOST_EVENTS but the LPC interface is * not initialized on the EC, or improperly configured on the host. */ EC_HOST_EVENT_INVALID = 32 }; /* Host event mask */ #define EC_HOST_EVENT_MASK(event_code) BIT_ULL((event_code) - 1) /** * struct ec_lpc_host_args - Arguments at EC_LPC_ADDR_HOST_ARGS * @flags: The host argument flags. * @command_version: Command version. * @data_size: The length of data. * @checksum: Checksum; sum of command + flags + command_version + data_size + * all params/response data bytes. */ struct ec_lpc_host_args { uint8_t flags; uint8_t command_version; uint8_t data_size; uint8_t checksum; } __ec_align4; /* Flags for ec_lpc_host_args.flags */ /* * Args are from host. Data area at EC_LPC_ADDR_HOST_PARAM contains command * params. * * If EC gets a command and this flag is not set, this is an old-style command. * Command version is 0 and params from host are at EC_LPC_ADDR_OLD_PARAM with * unknown length. EC must respond with an old-style response (that is, * without setting EC_HOST_ARGS_FLAG_TO_HOST). */ #define EC_HOST_ARGS_FLAG_FROM_HOST 0x01 /* * Args are from EC. Data area at EC_LPC_ADDR_HOST_PARAM contains response. * * If EC responds to a command and this flag is not set, this is an old-style * response. Command version is 0 and response data from EC is at * EC_LPC_ADDR_OLD_PARAM with unknown length. */ #define EC_HOST_ARGS_FLAG_TO_HOST 0x02 /*****************************************************************************/ /* * Byte codes returned by EC over SPI interface. * * These can be used by the AP to debug the EC interface, and to determine * when the EC is not in a state where it will ever get around to responding * to the AP. * * Example of sequence of bytes read from EC for a current good transfer: * 1. - - AP asserts chip select (CS#) * 2. EC_SPI_OLD_READY - AP sends first byte(s) of request * 3. - - EC starts handling CS# interrupt * 4. EC_SPI_RECEIVING - AP sends remaining byte(s) of request * 5. EC_SPI_PROCESSING - EC starts processing request; AP is clocking in * bytes looking for EC_SPI_FRAME_START * 6. - - EC finishes processing and sets up response * 7. EC_SPI_FRAME_START - AP reads frame byte * 8. (response packet) - AP reads response packet * 9. EC_SPI_PAST_END - Any additional bytes read by AP * 10 - - AP deasserts chip select * 11 - - EC processes CS# interrupt and sets up DMA for * next request * * If the AP is waiting for EC_SPI_FRAME_START and sees any value other than * the following byte values: * EC_SPI_OLD_READY * EC_SPI_RX_READY * EC_SPI_RECEIVING * EC_SPI_PROCESSING * * Then the EC found an error in the request, or was not ready for the request * and lost data. The AP should give up waiting for EC_SPI_FRAME_START, * because the EC is unable to tell when the AP is done sending its request. */ /* * Framing byte which precedes a response packet from the EC. After sending a * request, the AP will clock in bytes until it sees the framing byte, then * clock in the response packet. */ #define EC_SPI_FRAME_START 0xec /* * Padding bytes which are clocked out after the end of a response packet. */ #define EC_SPI_PAST_END 0xed /* * EC is ready to receive, and has ignored the byte sent by the AP. EC expects * that the AP will send a valid packet header (starting with * EC_COMMAND_PROTOCOL_3) in the next 32 bytes. */ #define EC_SPI_RX_READY 0xf8 /* * EC has started receiving the request from the AP, but hasn't started * processing it yet. */ #define EC_SPI_RECEIVING 0xf9 /* EC has received the entire request from the AP and is processing it. */ #define EC_SPI_PROCESSING 0xfa /* * EC received bad data from the AP, such as a packet header with an invalid * length. EC will ignore all data until chip select deasserts. */ #define EC_SPI_RX_BAD_DATA 0xfb /* * EC received data from the AP before it was ready. That is, the AP asserted * chip select and started clocking data before the EC was ready to receive it. * EC will ignore all data until chip select deasserts. */ #define EC_SPI_NOT_READY 0xfc /* * EC was ready to receive a request from the AP. EC has treated the byte sent * by the AP as part of a request packet, or (for old-style ECs) is processing * a fully received packet but is not ready to respond yet. */ #define EC_SPI_OLD_READY 0xfd /*****************************************************************************/ /* * Protocol version 2 for I2C and SPI send a request this way: * * 0 EC_CMD_VERSION0 + (command version) * 1 Command number * 2 Length of params = N * 3..N+2 Params, if any * N+3 8-bit checksum of bytes 0..N+2 * * The corresponding response is: * * 0 Result code (EC_RES_*) * 1 Length of params = M * 2..M+1 Params, if any * M+2 8-bit checksum of bytes 0..M+1 */ #define EC_PROTO2_REQUEST_HEADER_BYTES 3 #define EC_PROTO2_REQUEST_TRAILER_BYTES 1 #define EC_PROTO2_REQUEST_OVERHEAD (EC_PROTO2_REQUEST_HEADER_BYTES + \ EC_PROTO2_REQUEST_TRAILER_BYTES) #define EC_PROTO2_RESPONSE_HEADER_BYTES 2 #define EC_PROTO2_RESPONSE_TRAILER_BYTES 1 #define EC_PROTO2_RESPONSE_OVERHEAD (EC_PROTO2_RESPONSE_HEADER_BYTES + \ EC_PROTO2_RESPONSE_TRAILER_BYTES) /* Parameter length was limited by the LPC interface */ #define EC_PROTO2_MAX_PARAM_SIZE 0xfc /* Maximum request and response packet sizes for protocol version 2 */ #define EC_PROTO2_MAX_REQUEST_SIZE (EC_PROTO2_REQUEST_OVERHEAD + \ EC_PROTO2_MAX_PARAM_SIZE) #define EC_PROTO2_MAX_RESPONSE_SIZE (EC_PROTO2_RESPONSE_OVERHEAD + \ EC_PROTO2_MAX_PARAM_SIZE) /*****************************************************************************/ /* * Value written to legacy command port / prefix byte to indicate protocol * 3+ structs are being used. Usage is bus-dependent. */ #define EC_COMMAND_PROTOCOL_3 0xda #define EC_HOST_REQUEST_VERSION 3 /** * struct ec_host_request - Version 3 request from host. * @struct_version: Should be 3. The EC will return EC_RES_INVALID_HEADER if it * receives a header with a version it doesn't know how to * parse. * @checksum: Checksum of request and data; sum of all bytes including checksum * should total to 0. * @command: Command to send (EC_CMD_...) * @command_version: Command version. * @reserved: Unused byte in current protocol version; set to 0. * @data_len: Length of data which follows this header. */ struct ec_host_request { uint8_t struct_version; uint8_t checksum; uint16_t command; uint8_t command_version; uint8_t reserved; uint16_t data_len; } __ec_align4; #define EC_HOST_RESPONSE_VERSION 3 /** * struct ec_host_response - Version 3 response from EC. * @struct_version: Struct version (=3). * @checksum: Checksum of response and data; sum of all bytes including * checksum should total to 0. * @result: EC's response to the command (separate from communication failure) * @data_len: Length of data which follows this header. * @reserved: Unused bytes in current protocol version; set to 0. */ struct ec_host_response { uint8_t struct_version; uint8_t checksum; uint16_t result; uint16_t data_len; uint16_t reserved; } __ec_align4; /*****************************************************************************/ /* * Host command protocol V4. * * Packets always start with a request or response header. They are followed * by data_len bytes of data. If the data_crc_present flag is set, the data * bytes are followed by a CRC-8 of that data, using using x^8 + x^2 + x + 1 * polynomial. * * Host algorithm when sending a request q: * * 101) tries_left=(some value, e.g. 3); * 102) q.seq_num++ * 103) q.seq_dup=0 * 104) Calculate q.header_crc. * 105) Send request q to EC. * 106) Wait for response r. Go to 201 if received or 301 if timeout. * * 201) If r.struct_version != 4, go to 301. * 202) If r.header_crc mismatches calculated CRC for r header, go to 301. * 203) If r.data_crc_present and r.data_crc mismatches, go to 301. * 204) If r.seq_num != q.seq_num, go to 301. * 205) If r.seq_dup == q.seq_dup, return success. * 207) If r.seq_dup == 1, go to 301. * 208) Return error. * * 301) If --tries_left <= 0, return error. * 302) If q.seq_dup == 1, go to 105. * 303) q.seq_dup = 1 * 304) Go to 104. * * EC algorithm when receiving a request q. * EC has response buffer r, error buffer e. * * 101) If q.struct_version != 4, set e.result = EC_RES_INVALID_HEADER_VERSION * and go to 301 * 102) If q.header_crc mismatches calculated CRC, set e.result = * EC_RES_INVALID_HEADER_CRC and go to 301 * 103) If q.data_crc_present, calculate data CRC. If that mismatches the CRC * byte at the end of the packet, set e.result = EC_RES_INVALID_DATA_CRC * and go to 301. * 104) If q.seq_dup == 0, go to 201. * 105) If q.seq_num != r.seq_num, go to 201. * 106) If q.seq_dup == r.seq_dup, go to 205, else go to 203. * * 201) Process request q into response r. * 202) r.seq_num = q.seq_num * 203) r.seq_dup = q.seq_dup * 204) Calculate r.header_crc * 205) If r.data_len > 0 and data is no longer available, set e.result = * EC_RES_DUP_UNAVAILABLE and go to 301. * 206) Send response r. * * 301) e.seq_num = q.seq_num * 302) e.seq_dup = q.seq_dup * 303) Calculate e.header_crc. * 304) Send error response e. */ /* Version 4 request from host */ struct ec_host_request4 { /* * bits 0-3: struct_version: Structure version (=4) * bit 4: is_response: Is response (=0) * bits 5-6: seq_num: Sequence number * bit 7: seq_dup: Sequence duplicate flag */ uint8_t fields0; /* * bits 0-4: command_version: Command version * bits 5-6: Reserved (set 0, ignore on read) * bit 7: data_crc_present: Is data CRC present after data */ uint8_t fields1; /* Command code (EC_CMD_*) */ uint16_t command; /* Length of data which follows this header (not including data CRC) */ uint16_t data_len; /* Reserved (set 0, ignore on read) */ uint8_t reserved; /* CRC-8 of above fields, using x^8 + x^2 + x + 1 polynomial */ uint8_t header_crc; } __ec_align4; /* Version 4 response from EC */ struct ec_host_response4 { /* * bits 0-3: struct_version: Structure version (=4) * bit 4: is_response: Is response (=1) * bits 5-6: seq_num: Sequence number * bit 7: seq_dup: Sequence duplicate flag */ uint8_t fields0; /* * bits 0-6: Reserved (set 0, ignore on read) * bit 7: data_crc_present: Is data CRC present after data */ uint8_t fields1; /* Result code (EC_RES_*) */ uint16_t result; /* Length of data which follows this header (not including data CRC) */ uint16_t data_len; /* Reserved (set 0, ignore on read) */ uint8_t reserved; /* CRC-8 of above fields, using x^8 + x^2 + x + 1 polynomial */ uint8_t header_crc; } __ec_align4; /* Fields in fields0 byte */ #define EC_PACKET4_0_STRUCT_VERSION_MASK 0x0f #define EC_PACKET4_0_IS_RESPONSE_MASK 0x10 #define EC_PACKET4_0_SEQ_NUM_SHIFT 5 #define EC_PACKET4_0_SEQ_NUM_MASK 0x60 #define EC_PACKET4_0_SEQ_DUP_MASK 0x80 /* Fields in fields1 byte */ #define EC_PACKET4_1_COMMAND_VERSION_MASK 0x1f /* (request only) */ #define EC_PACKET4_1_DATA_CRC_PRESENT_MASK 0x80 /*****************************************************************************/ /* * Notes on commands: * * Each command is an 16-bit command value. Commands which take params or * return response data specify structures for that data. If no structure is * specified, the command does not input or output data, respectively. * Parameter/response length is implicit in the structs. Some underlying * communication protocols (I2C, SPI) may add length or checksum headers, but * those are implementation-dependent and not defined here. * * All commands MUST be #defined to be 4-digit UPPER CASE hex values * (e.g., 0x00AB, not 0xab) for CONFIG_HOSTCMD_SECTION_SORTED to work. */ /*****************************************************************************/ /* General / test commands */ /* * Get protocol version, used to deal with non-backward compatible protocol * changes. */ #define EC_CMD_PROTO_VERSION 0x0000 /** * struct ec_response_proto_version - Response to the proto version command. * @version: The protocol version. */ struct ec_response_proto_version { uint32_t version; } __ec_align4; /* * Hello. This is a simple command to test the EC is responsive to * commands. */ #define EC_CMD_HELLO 0x0001 /** * struct ec_params_hello - Parameters to the hello command. * @in_data: Pass anything here. */ struct ec_params_hello { uint32_t in_data; } __ec_align4; /** * struct ec_response_hello - Response to the hello command. * @out_data: Output will be in_data + 0x01020304. */ struct ec_response_hello { uint32_t out_data; } __ec_align4; /* Get version number */ #define EC_CMD_GET_VERSION 0x0002 enum ec_current_image { EC_IMAGE_UNKNOWN = 0, EC_IMAGE_RO, EC_IMAGE_RW }; /** * struct ec_response_get_version - Response to the get version command. * @version_string_ro: Null-terminated RO firmware version string. * @version_string_rw: Null-terminated RW firmware version string. * @reserved: Unused bytes; was previously RW-B firmware version string. * @current_image: One of ec_current_image. */ struct ec_response_get_version { char version_string_ro[32]; char version_string_rw[32]; char reserved[32]; uint32_t current_image; } __ec_align4; /* Read test */ #define EC_CMD_READ_TEST 0x0003 /** * struct ec_params_read_test - Parameters for the read test command. * @offset: Starting value for read buffer. * @size: Size to read in bytes. */ struct ec_params_read_test { uint32_t offset; uint32_t size; } __ec_align4; /** * struct ec_response_read_test - Response to the read test command. * @data: Data returned by the read test command. */ struct ec_response_read_test { uint32_t data[32]; } __ec_align4; /* * Get build information * * Response is null-terminated string. */ #define EC_CMD_GET_BUILD_INFO 0x0004 /* Get chip info */ #define EC_CMD_GET_CHIP_INFO 0x0005 /** * struct ec_response_get_chip_info - Response to the get chip info command. * @vendor: Null-terminated string for chip vendor. * @name: Null-terminated string for chip name. * @revision: Null-terminated string for chip mask version. */ struct ec_response_get_chip_info { char vendor[32]; char name[32]; char revision[32]; } __ec_align4; /* Get board HW version */ #define EC_CMD_GET_BOARD_VERSION 0x0006 /** * struct ec_response_board_version - Response to the board version command. * @board_version: A monotonously incrementing number. */ struct ec_response_board_version { uint16_t board_version; } __ec_align2; /* * Read memory-mapped data. * * This is an alternate interface to memory-mapped data for bus protocols * which don't support direct-mapped memory - I2C, SPI, etc. * * Response is params.size bytes of data. */ #define EC_CMD_READ_MEMMAP 0x0007 /** * struct ec_params_read_memmap - Parameters for the read memory map command. * @offset: Offset in memmap (EC_MEMMAP_*). * @size: Size to read in bytes. */ struct ec_params_read_memmap { uint8_t offset; uint8_t size; } __ec_align1; /* Read versions supported for a command */ #define EC_CMD_GET_CMD_VERSIONS 0x0008 /** * struct ec_params_get_cmd_versions - Parameters for the get command versions. * @cmd: Command to check. */ struct ec_params_get_cmd_versions { uint8_t cmd; } __ec_align1; /** * struct ec_params_get_cmd_versions_v1 - Parameters for the get command * versions (v1) * @cmd: Command to check. */ struct ec_params_get_cmd_versions_v1 { uint16_t cmd; } __ec_align2; /** * struct ec_response_get_cmd_version - Response to the get command versions. * @version_mask: Mask of supported versions; use EC_VER_MASK() to compare with * a desired version. */ struct ec_response_get_cmd_versions { uint32_t version_mask; } __ec_align4; /* * Check EC communications status (busy). This is needed on i2c/spi but not * on lpc since it has its own out-of-band busy indicator. * * lpc must read the status from the command register. Attempting this on * lpc will overwrite the args/parameter space and corrupt its data. */ #define EC_CMD_GET_COMMS_STATUS 0x0009 /* Avoid using ec_status which is for return values */ enum ec_comms_status { EC_COMMS_STATUS_PROCESSING = BIT(0), /* Processing cmd */ }; /** * struct ec_response_get_comms_status - Response to the get comms status * command. * @flags: Mask of enum ec_comms_status. */ struct ec_response_get_comms_status { uint32_t flags; /* Mask of enum ec_comms_status */ } __ec_align4; /* Fake a variety of responses, purely for testing purposes. */ #define EC_CMD_TEST_PROTOCOL 0x000A /* Tell the EC what to send back to us. */ struct ec_params_test_protocol { uint32_t ec_result; uint32_t ret_len; uint8_t buf[32]; } __ec_align4; /* Here it comes... */ struct ec_response_test_protocol { uint8_t buf[32]; } __ec_align4; /* Get protocol information */ #define EC_CMD_GET_PROTOCOL_INFO 0x000B /* Flags for ec_response_get_protocol_info.flags */ /* EC_RES_IN_PROGRESS may be returned if a command is slow */ #define EC_PROTOCOL_INFO_IN_PROGRESS_SUPPORTED BIT(0) /** * struct ec_response_get_protocol_info - Response to the get protocol info. * @protocol_versions: Bitmask of protocol versions supported (1 << n means * version n). * @max_request_packet_size: Maximum request packet size in bytes. * @max_response_packet_size: Maximum response packet size in bytes. * @flags: see EC_PROTOCOL_INFO_* */ struct ec_response_get_protocol_info { /* Fields which exist if at least protocol version 3 supported */ uint32_t protocol_versions; uint16_t max_request_packet_size; uint16_t max_response_packet_size; uint32_t flags; } __ec_align4; /*****************************************************************************/ /* Get/Set miscellaneous values */ /* The upper byte of .flags tells what to do (nothing means "get") */ #define EC_GSV_SET 0x80000000 /* * The lower three bytes of .flags identifies the parameter, if that has * meaning for an individual command. */ #define EC_GSV_PARAM_MASK 0x00ffffff struct ec_params_get_set_value { uint32_t flags; uint32_t value; } __ec_align4; struct ec_response_get_set_value { uint32_t flags; uint32_t value; } __ec_align4; /* More than one command can use these structs to get/set parameters. */ #define EC_CMD_GSV_PAUSE_IN_S5 0x000C /*****************************************************************************/ /* List the features supported by the firmware */ #define EC_CMD_GET_FEATURES 0x000D /* Supported features */ enum ec_feature_code { /* * This image contains a limited set of features. Another image * in RW partition may support more features. */ EC_FEATURE_LIMITED = 0, /* * Commands for probing/reading/writing/erasing the flash in the * EC are present. */ EC_FEATURE_FLASH = 1, /* * Can control the fan speed directly. */ EC_FEATURE_PWM_FAN = 2, /* * Can control the intensity of the keyboard backlight. */ EC_FEATURE_PWM_KEYB = 3, /* * Support Google lightbar, introduced on Pixel. */ EC_FEATURE_LIGHTBAR = 4, /* Control of LEDs */ EC_FEATURE_LED = 5, /* Exposes an interface to control gyro and sensors. * The host goes through the EC to access these sensors. * In addition, the EC may provide composite sensors, like lid angle. */ EC_FEATURE_MOTION_SENSE = 6, /* The keyboard is controlled by the EC */ EC_FEATURE_KEYB = 7, /* The AP can use part of the EC flash as persistent storage. */ EC_FEATURE_PSTORE = 8, /* The EC monitors BIOS port 80h, and can return POST codes. */ EC_FEATURE_PORT80 = 9, /* * Thermal management: include TMP specific commands. * Higher level than direct fan control. */ EC_FEATURE_THERMAL = 10, /* Can switch the screen backlight on/off */ EC_FEATURE_BKLIGHT_SWITCH = 11, /* Can switch the wifi module on/off */ EC_FEATURE_WIFI_SWITCH = 12, /* Monitor host events, through for example SMI or SCI */ EC_FEATURE_HOST_EVENTS = 13, /* The EC exposes GPIO commands to control/monitor connected devices. */ EC_FEATURE_GPIO = 14, /* The EC can send i2c messages to downstream devices. */ EC_FEATURE_I2C = 15, /* Command to control charger are included */ EC_FEATURE_CHARGER = 16, /* Simple battery support. */ EC_FEATURE_BATTERY = 17, /* * Support Smart battery protocol * (Common Smart Battery System Interface Specification) */ EC_FEATURE_SMART_BATTERY = 18, /* EC can detect when the host hangs. */ EC_FEATURE_HANG_DETECT = 19, /* Report power information, for pit only */ EC_FEATURE_PMU = 20, /* Another Cros EC device is present downstream of this one */ EC_FEATURE_SUB_MCU = 21, /* Support USB Power delivery (PD) commands */ EC_FEATURE_USB_PD = 22, /* Control USB multiplexer, for audio through USB port for instance. */ EC_FEATURE_USB_MUX = 23, /* Motion Sensor code has an internal software FIFO */ EC_FEATURE_MOTION_SENSE_FIFO = 24, /* Support temporary secure vstore */ EC_FEATURE_VSTORE = 25, /* EC decides on USB-C SS mux state, muxes configured by host */ EC_FEATURE_USBC_SS_MUX_VIRTUAL = 26, /* EC has RTC feature that can be controlled by host commands */ EC_FEATURE_RTC = 27, /* The MCU exposes a Fingerprint sensor */ EC_FEATURE_FINGERPRINT = 28, /* The MCU exposes a Touchpad */ EC_FEATURE_TOUCHPAD = 29, /* The MCU has RWSIG task enabled */ EC_FEATURE_RWSIG = 30, /* EC has device events support */ EC_FEATURE_DEVICE_EVENT = 31, /* EC supports the unified wake masks for LPC/eSPI systems */ EC_FEATURE_UNIFIED_WAKE_MASKS = 32, /* EC supports 64-bit host events */ EC_FEATURE_HOST_EVENT64 = 33, /* EC runs code in RAM (not in place, a.k.a. XIP) */ EC_FEATURE_EXEC_IN_RAM = 34, /* EC supports CEC commands */ EC_FEATURE_CEC = 35, /* EC supports tight sensor timestamping. */ EC_FEATURE_MOTION_SENSE_TIGHT_TIMESTAMPS = 36, /* * EC supports tablet mode detection aligned to Chrome and allows * setting of threshold by host command using * MOTIONSENSE_CMD_TABLET_MODE_LID_ANGLE. */ EC_FEATURE_REFINED_TABLET_MODE_HYSTERESIS = 37, /* EC supports audio codec. */ EC_FEATURE_AUDIO_CODEC = 38, /* The MCU is a System Companion Processor (SCP). */ EC_FEATURE_SCP = 39, /* The MCU is an Integrated Sensor Hub */ EC_FEATURE_ISH = 40, }; #define EC_FEATURE_MASK_0(event_code) BIT(event_code % 32) #define EC_FEATURE_MASK_1(event_code) BIT(event_code - 32) struct ec_response_get_features { uint32_t flags[2]; } __ec_align4; /*****************************************************************************/ /* Get the board's SKU ID from EC */ #define EC_CMD_GET_SKU_ID 0x000E /* Set SKU ID from AP */ #define EC_CMD_SET_SKU_ID 0x000F struct ec_sku_id_info { uint32_t sku_id; } __ec_align4; /*****************************************************************************/ /* Flash commands */ /* Get flash info */ #define EC_CMD_FLASH_INFO 0x0010 #define EC_VER_FLASH_INFO 2 /** * struct ec_response_flash_info - Response to the flash info command. * @flash_size: Usable flash size in bytes. * @write_block_size: Write block size. Write offset and size must be a * multiple of this. * @erase_block_size: Erase block size. Erase offset and size must be a * multiple of this. * @protect_block_size: Protection block size. Protection offset and size * must be a multiple of this. * * Version 0 returns these fields. */ struct ec_response_flash_info { uint32_t flash_size; uint32_t write_block_size; uint32_t erase_block_size; uint32_t protect_block_size; } __ec_align4; /* * Flags for version 1+ flash info command * EC flash erases bits to 0 instead of 1. */ #define EC_FLASH_INFO_ERASE_TO_0 BIT(0) /* * Flash must be selected for read/write/erase operations to succeed. This may * be necessary on a chip where write/erase can be corrupted by other board * activity, or where the chip needs to enable some sort of programming voltage, * or where the read/write/erase operations require cleanly suspending other * chip functionality. */ #define EC_FLASH_INFO_SELECT_REQUIRED BIT(1) /** * struct ec_response_flash_info_1 - Response to the flash info v1 command. * @flash_size: Usable flash size in bytes. * @write_block_size: Write block size. Write offset and size must be a * multiple of this. * @erase_block_size: Erase block size. Erase offset and size must be a * multiple of this. * @protect_block_size: Protection block size. Protection offset and size * must be a multiple of this. * @write_ideal_size: Ideal write size in bytes. Writes will be fastest if * size is exactly this and offset is a multiple of this. * For example, an EC may have a write buffer which can do * half-page operations if data is aligned, and a slower * word-at-a-time write mode. * @flags: Flags; see EC_FLASH_INFO_* * * Version 1 returns the same initial fields as version 0, with additional * fields following. * * gcc anonymous structs don't seem to get along with the __packed directive; * if they did we'd define the version 0 structure as a sub-structure of this * one. * * Version 2 supports flash banks of different sizes: * The caller specified the number of banks it has preallocated * (num_banks_desc) * The EC returns the number of banks describing the flash memory. * It adds banks descriptions up to num_banks_desc. */ struct ec_response_flash_info_1 { /* Version 0 fields; see above for description */ uint32_t flash_size; uint32_t write_block_size; uint32_t erase_block_size; uint32_t protect_block_size; /* Version 1 adds these fields: */ uint32_t write_ideal_size; uint32_t flags; } __ec_align4; struct ec_params_flash_info_2 { /* Number of banks to describe */ uint16_t num_banks_desc; /* Reserved; set 0; ignore on read */ uint8_t reserved[2]; } __ec_align4; struct ec_flash_bank { /* Number of sector is in this bank. */ uint16_t count; /* Size in power of 2 of each sector (8 --> 256 bytes) */ uint8_t size_exp; /* Minimal write size for the sectors in this bank */ uint8_t write_size_exp; /* Erase size for the sectors in this bank */ uint8_t erase_size_exp; /* Size for write protection, usually identical to erase size. */ uint8_t protect_size_exp; /* Reserved; set 0; ignore on read */ uint8_t reserved[2]; }; struct ec_response_flash_info_2 { /* Total flash in the EC. */ uint32_t flash_size; /* Flags; see EC_FLASH_INFO_* */ uint32_t flags; /* Maximum size to use to send data to write to the EC. */ uint32_t write_ideal_size; /* Number of banks present in the EC. */ uint16_t num_banks_total; /* Number of banks described in banks array. */ uint16_t num_banks_desc; struct ec_flash_bank banks[0]; } __ec_align4; /* * Read flash * * Response is params.size bytes of data. */ #define EC_CMD_FLASH_READ 0x0011 /** * struct ec_params_flash_read - Parameters for the flash read command. * @offset: Byte offset to read. * @size: Size to read in bytes. */ struct ec_params_flash_read { uint32_t offset; uint32_t size; } __ec_align4; /* Write flash */ #define EC_CMD_FLASH_WRITE 0x0012 #define EC_VER_FLASH_WRITE 1 /* Version 0 of the flash command supported only 64 bytes of data */ #define EC_FLASH_WRITE_VER0_SIZE 64 /** * struct ec_params_flash_write - Parameters for the flash write command. * @offset: Byte offset to write. * @size: Size to write in bytes. */ struct ec_params_flash_write { uint32_t offset; uint32_t size; /* Followed by data to write */ } __ec_align4; /* Erase flash */ #define EC_CMD_FLASH_ERASE 0x0013 /** * struct ec_params_flash_erase - Parameters for the flash erase command, v0. * @offset: Byte offset to erase. * @size: Size to erase in bytes. */ struct ec_params_flash_erase { uint32_t offset; uint32_t size; } __ec_align4; /* * v1 add async erase: * subcommands can returns: * EC_RES_SUCCESS : erased (see ERASE_SECTOR_ASYNC case below). * EC_RES_INVALID_PARAM : offset/size are not aligned on a erase boundary. * EC_RES_ERROR : other errors. * EC_RES_BUSY : an existing erase operation is in progress. * EC_RES_ACCESS_DENIED: Trying to erase running image. * * When ERASE_SECTOR_ASYNC returns EC_RES_SUCCESS, the operation is just * properly queued. The user must call ERASE_GET_RESULT subcommand to get * the proper result. * When ERASE_GET_RESULT returns EC_RES_BUSY, the caller must wait and send * ERASE_GET_RESULT again to get the result of ERASE_SECTOR_ASYNC. * ERASE_GET_RESULT command may timeout on EC where flash access is not * permitted while erasing. (For instance, STM32F4). */ enum ec_flash_erase_cmd { FLASH_ERASE_SECTOR, /* Erase and wait for result */ FLASH_ERASE_SECTOR_ASYNC, /* Erase and return immediately. */ FLASH_ERASE_GET_RESULT, /* Ask for last erase result */ }; /** * struct ec_params_flash_erase_v1 - Parameters for the flash erase command, v1. * @cmd: One of ec_flash_erase_cmd. * @reserved: Pad byte; currently always contains 0. * @flag: No flags defined yet; set to 0. * @params: Same as v0 parameters. */ struct ec_params_flash_erase_v1 { uint8_t cmd; uint8_t reserved; uint16_t flag; struct ec_params_flash_erase params; } __ec_align4; /* * Get/set flash protection. * * If mask!=0, sets/clear the requested bits of flags. Depending on the * firmware write protect GPIO, not all flags will take effect immediately; * some flags require a subsequent hard reset to take effect. Check the * returned flags bits to see what actually happened. * * If mask=0, simply returns the current flags state. */ #define EC_CMD_FLASH_PROTECT 0x0015 #define EC_VER_FLASH_PROTECT 1 /* Command version 1 */ /* Flags for flash protection */ /* RO flash code protected when the EC boots */ #define EC_FLASH_PROTECT_RO_AT_BOOT BIT(0) /* * RO flash code protected now. If this bit is set, at-boot status cannot * be changed. */ #define EC_FLASH_PROTECT_RO_NOW BIT(1) /* Entire flash code protected now, until reboot. */ #define EC_FLASH_PROTECT_ALL_NOW BIT(2) /* Flash write protect GPIO is asserted now */ #define EC_FLASH_PROTECT_GPIO_ASSERTED BIT(3) /* Error - at least one bank of flash is stuck locked, and cannot be unlocked */ #define EC_FLASH_PROTECT_ERROR_STUCK BIT(4) /* * Error - flash protection is in inconsistent state. At least one bank of * flash which should be protected is not protected. Usually fixed by * re-requesting the desired flags, or by a hard reset if that fails. */ #define EC_FLASH_PROTECT_ERROR_INCONSISTENT BIT(5) /* Entire flash code protected when the EC boots */ #define EC_FLASH_PROTECT_ALL_AT_BOOT BIT(6) /* RW flash code protected when the EC boots */ #define EC_FLASH_PROTECT_RW_AT_BOOT BIT(7) /* RW flash code protected now. */ #define EC_FLASH_PROTECT_RW_NOW BIT(8) /* Rollback information flash region protected when the EC boots */ #define EC_FLASH_PROTECT_ROLLBACK_AT_BOOT BIT(9) /* Rollback information flash region protected now */ #define EC_FLASH_PROTECT_ROLLBACK_NOW BIT(10) /** * struct ec_params_flash_protect - Parameters for the flash protect command. * @mask: Bits in flags to apply. * @flags: New flags to apply. */ struct ec_params_flash_protect { uint32_t mask; uint32_t flags; } __ec_align4; /** * struct ec_response_flash_protect - Response to the flash protect command. * @flags: Current value of flash protect flags. * @valid_flags: Flags which are valid on this platform. This allows the * caller to distinguish between flags which aren't set vs. flags * which can't be set on this platform. * @writable_flags: Flags which can be changed given the current protection * state. */ struct ec_response_flash_protect { uint32_t flags; uint32_t valid_flags; uint32_t writable_flags; } __ec_align4; /* * Note: commands 0x14 - 0x19 version 0 were old commands to get/set flash * write protect. These commands may be reused with version > 0. */ /* Get the region offset/size */ #define EC_CMD_FLASH_REGION_INFO 0x0016 #define EC_VER_FLASH_REGION_INFO 1 enum ec_flash_region { /* Region which holds read-only EC image */ EC_FLASH_REGION_RO = 0, /* * Region which holds active RW image. 'Active' is different from * 'running'. Active means 'scheduled-to-run'. Since RO image always * scheduled to run, active/non-active applies only to RW images (for * the same reason 'update' applies only to RW images. It's a state of * an image on a flash. Running image can be RO, RW_A, RW_B but active * image can only be RW_A or RW_B. In recovery mode, an active RW image * doesn't enter 'running' state but it's still active on a flash. */ EC_FLASH_REGION_ACTIVE, /* * Region which should be write-protected in the factory (a superset of * EC_FLASH_REGION_RO) */ EC_FLASH_REGION_WP_RO, /* Region which holds updatable (non-active) RW image */ EC_FLASH_REGION_UPDATE, /* Number of regions */ EC_FLASH_REGION_COUNT, }; /* * 'RW' is vague if there are multiple RW images; we mean the active one, * so the old constant is deprecated. */ #define EC_FLASH_REGION_RW EC_FLASH_REGION_ACTIVE /** * struct ec_params_flash_region_info - Parameters for the flash region info * command. * @region: Flash region; see EC_FLASH_REGION_* */ struct ec_params_flash_region_info { uint32_t region; } __ec_align4; struct ec_response_flash_region_info { uint32_t offset; uint32_t size; } __ec_align4; /* Read/write VbNvContext */ #define EC_CMD_VBNV_CONTEXT 0x0017 #define EC_VER_VBNV_CONTEXT 1 #define EC_VBNV_BLOCK_SIZE 16 enum ec_vbnvcontext_op { EC_VBNV_CONTEXT_OP_READ, EC_VBNV_CONTEXT_OP_WRITE, }; struct ec_params_vbnvcontext { uint32_t op; uint8_t block[EC_VBNV_BLOCK_SIZE]; } __ec_align4; struct ec_response_vbnvcontext { uint8_t block[EC_VBNV_BLOCK_SIZE]; } __ec_align4; /* Get SPI flash information */ #define EC_CMD_FLASH_SPI_INFO 0x0018 struct ec_response_flash_spi_info { /* JEDEC info from command 0x9F (manufacturer, memory type, size) */ uint8_t jedec[3]; /* Pad byte; currently always contains 0 */ uint8_t reserved0; /* Manufacturer / device ID from command 0x90 */ uint8_t mfr_dev_id[2]; /* Status registers from command 0x05 and 0x35 */ uint8_t sr1, sr2; } __ec_align1; /* Select flash during flash operations */ #define EC_CMD_FLASH_SELECT 0x0019 /** * struct ec_params_flash_select - Parameters for the flash select command. * @select: 1 to select flash, 0 to deselect flash */ struct ec_params_flash_select { uint8_t select; } __ec_align4; /*****************************************************************************/ /* PWM commands */ /* Get fan target RPM */ #define EC_CMD_PWM_GET_FAN_TARGET_RPM 0x0020 struct ec_response_pwm_get_fan_rpm { uint32_t rpm; } __ec_align4; /* Set target fan RPM */ #define EC_CMD_PWM_SET_FAN_TARGET_RPM 0x0021 /* Version 0 of input params */ struct ec_params_pwm_set_fan_target_rpm_v0 { uint32_t rpm; } __ec_align4; /* Version 1 of input params */ struct ec_params_pwm_set_fan_target_rpm_v1 { uint32_t rpm; uint8_t fan_idx; } __ec_align_size1; /* Get keyboard backlight */ /* OBSOLETE - Use EC_CMD_PWM_SET_DUTY */ #define EC_CMD_PWM_GET_KEYBOARD_BACKLIGHT 0x0022 struct ec_response_pwm_get_keyboard_backlight { uint8_t percent; uint8_t enabled; } __ec_align1; /* Set keyboard backlight */ /* OBSOLETE - Use EC_CMD_PWM_SET_DUTY */ #define EC_CMD_PWM_SET_KEYBOARD_BACKLIGHT 0x0023 struct ec_params_pwm_set_keyboard_backlight { uint8_t percent; } __ec_align1; /* Set target fan PWM duty cycle */ #define EC_CMD_PWM_SET_FAN_DUTY 0x0024 /* Version 0 of input params */ struct ec_params_pwm_set_fan_duty_v0 { uint32_t percent; } __ec_align4; /* Version 1 of input params */ struct ec_params_pwm_set_fan_duty_v1 { uint32_t percent; uint8_t fan_idx; } __ec_align_size1; #define EC_CMD_PWM_SET_DUTY 0x0025 /* 16 bit duty cycle, 0xffff = 100% */ #define EC_PWM_MAX_DUTY 0xffff enum ec_pwm_type { /* All types, indexed by board-specific enum pwm_channel */ EC_PWM_TYPE_GENERIC = 0, /* Keyboard backlight */ EC_PWM_TYPE_KB_LIGHT, /* Display backlight */ EC_PWM_TYPE_DISPLAY_LIGHT, EC_PWM_TYPE_COUNT, }; struct ec_params_pwm_set_duty { uint16_t duty; /* Duty cycle, EC_PWM_MAX_DUTY = 100% */ uint8_t pwm_type; /* ec_pwm_type */ uint8_t index; /* Type-specific index, or 0 if unique */ } __ec_align4; #define EC_CMD_PWM_GET_DUTY 0x0026 struct ec_params_pwm_get_duty { uint8_t pwm_type; /* ec_pwm_type */ uint8_t index; /* Type-specific index, or 0 if unique */ } __ec_align1; struct ec_response_pwm_get_duty { uint16_t duty; /* Duty cycle, EC_PWM_MAX_DUTY = 100% */ } __ec_align2; /*****************************************************************************/ /* * Lightbar commands. This looks worse than it is. Since we only use one HOST * command to say "talk to the lightbar", we put the "and tell it to do X" part * into a subcommand. We'll make separate structs for subcommands with * different input args, so that we know how much to expect. */ #define EC_CMD_LIGHTBAR_CMD 0x0028 struct rgb_s { uint8_t r, g, b; } __ec_todo_unpacked; #define LB_BATTERY_LEVELS 4 /* * List of tweakable parameters. NOTE: It's __packed so it can be sent in a * host command, but the alignment is the same regardless. Keep it that way. */ struct lightbar_params_v0 { /* Timing */ int32_t google_ramp_up; int32_t google_ramp_down; int32_t s3s0_ramp_up; int32_t s0_tick_delay[2]; /* AC=0/1 */ int32_t s0a_tick_delay[2]; /* AC=0/1 */ int32_t s0s3_ramp_down; int32_t s3_sleep_for; int32_t s3_ramp_up; int32_t s3_ramp_down; /* Oscillation */ uint8_t new_s0; uint8_t osc_min[2]; /* AC=0/1 */ uint8_t osc_max[2]; /* AC=0/1 */ uint8_t w_ofs[2]; /* AC=0/1 */ /* Brightness limits based on the backlight and AC. */ uint8_t bright_bl_off_fixed[2]; /* AC=0/1 */ uint8_t bright_bl_on_min[2]; /* AC=0/1 */ uint8_t bright_bl_on_max[2]; /* AC=0/1 */ /* Battery level thresholds */ uint8_t battery_threshold[LB_BATTERY_LEVELS - 1]; /* Map [AC][battery_level] to color index */ uint8_t s0_idx[2][LB_BATTERY_LEVELS]; /* AP is running */ uint8_t s3_idx[2][LB_BATTERY_LEVELS]; /* AP is sleeping */ /* Color palette */ struct rgb_s color[8]; /* 0-3 are Google colors */ } __ec_todo_packed; struct lightbar_params_v1 { /* Timing */ int32_t google_ramp_up; int32_t google_ramp_down; int32_t s3s0_ramp_up; int32_t s0_tick_delay[2]; /* AC=0/1 */ int32_t s0a_tick_delay[2]; /* AC=0/1 */ int32_t s0s3_ramp_down; int32_t s3_sleep_for; int32_t s3_ramp_up; int32_t s3_ramp_down; int32_t s5_ramp_up; int32_t s5_ramp_down; int32_t tap_tick_delay; int32_t tap_gate_delay; int32_t tap_display_time; /* Tap-for-battery params */ uint8_t tap_pct_red; uint8_t tap_pct_green; uint8_t tap_seg_min_on; uint8_t tap_seg_max_on; uint8_t tap_seg_osc; uint8_t tap_idx[3]; /* Oscillation */ uint8_t osc_min[2]; /* AC=0/1 */ uint8_t osc_max[2]; /* AC=0/1 */ uint8_t w_ofs[2]; /* AC=0/1 */ /* Brightness limits based on the backlight and AC. */ uint8_t bright_bl_off_fixed[2]; /* AC=0/1 */ uint8_t bright_bl_on_min[2]; /* AC=0/1 */ uint8_t bright_bl_on_max[2]; /* AC=0/1 */ /* Battery level thresholds */ uint8_t battery_threshold[LB_BATTERY_LEVELS - 1]; /* Map [AC][battery_level] to color index */ uint8_t s0_idx[2][LB_BATTERY_LEVELS]; /* AP is running */ uint8_t s3_idx[2][LB_BATTERY_LEVELS]; /* AP is sleeping */ /* s5: single color pulse on inhibited power-up */ uint8_t s5_idx; /* Color palette */ struct rgb_s color[8]; /* 0-3 are Google colors */ } __ec_todo_packed; /* Lightbar command params v2 * crbug.com/467716 * * lightbar_parms_v1 was too big for i2c, therefore in v2, we split them up by * logical groups to make it more manageable ( < 120 bytes). * * NOTE: Each of these groups must be less than 120 bytes. */ struct lightbar_params_v2_timing { /* Timing */ int32_t google_ramp_up; int32_t google_ramp_down; int32_t s3s0_ramp_up; int32_t s0_tick_delay[2]; /* AC=0/1 */ int32_t s0a_tick_delay[2]; /* AC=0/1 */ int32_t s0s3_ramp_down; int32_t s3_sleep_for; int32_t s3_ramp_up; int32_t s3_ramp_down; int32_t s5_ramp_up; int32_t s5_ramp_down; int32_t tap_tick_delay; int32_t tap_gate_delay; int32_t tap_display_time; } __ec_todo_packed; struct lightbar_params_v2_tap { /* Tap-for-battery params */ uint8_t tap_pct_red; uint8_t tap_pct_green; uint8_t tap_seg_min_on; uint8_t tap_seg_max_on; uint8_t tap_seg_osc; uint8_t tap_idx[3]; } __ec_todo_packed; struct lightbar_params_v2_oscillation { /* Oscillation */ uint8_t osc_min[2]; /* AC=0/1 */ uint8_t osc_max[2]; /* AC=0/1 */ uint8_t w_ofs[2]; /* AC=0/1 */ } __ec_todo_packed; struct lightbar_params_v2_brightness { /* Brightness limits based on the backlight and AC. */ uint8_t bright_bl_off_fixed[2]; /* AC=0/1 */ uint8_t bright_bl_on_min[2]; /* AC=0/1 */ uint8_t bright_bl_on_max[2]; /* AC=0/1 */ } __ec_todo_packed; struct lightbar_params_v2_thresholds { /* Battery level thresholds */ uint8_t battery_threshold[LB_BATTERY_LEVELS - 1]; } __ec_todo_packed; struct lightbar_params_v2_colors { /* Map [AC][battery_level] to color index */ uint8_t s0_idx[2][LB_BATTERY_LEVELS]; /* AP is running */ uint8_t s3_idx[2][LB_BATTERY_LEVELS]; /* AP is sleeping */ /* s5: single color pulse on inhibited power-up */ uint8_t s5_idx; /* Color palette */ struct rgb_s color[8]; /* 0-3 are Google colors */ } __ec_todo_packed; /* Lightbar program. */ #define EC_LB_PROG_LEN 192 struct lightbar_program { uint8_t size; uint8_t data[EC_LB_PROG_LEN]; } __ec_todo_unpacked; struct ec_params_lightbar { uint8_t cmd; /* Command (see enum lightbar_command) */ union { /* * The following commands have no args: * * dump, off, on, init, get_seq, get_params_v0, get_params_v1, * version, get_brightness, get_demo, suspend, resume, * get_params_v2_timing, get_params_v2_tap, get_params_v2_osc, * get_params_v2_bright, get_params_v2_thlds, * get_params_v2_colors * * Don't use an empty struct, because C++ hates that. */ struct __ec_todo_unpacked { uint8_t num; } set_brightness, seq, demo; struct __ec_todo_unpacked { uint8_t ctrl, reg, value; } reg; struct __ec_todo_unpacked { uint8_t led, red, green, blue; } set_rgb; struct __ec_todo_unpacked { uint8_t led; } get_rgb; struct __ec_todo_unpacked { uint8_t enable; } manual_suspend_ctrl; struct lightbar_params_v0 set_params_v0; struct lightbar_params_v1 set_params_v1; struct lightbar_params_v2_timing set_v2par_timing; struct lightbar_params_v2_tap set_v2par_tap; struct lightbar_params_v2_oscillation set_v2par_osc; struct lightbar_params_v2_brightness set_v2par_bright; struct lightbar_params_v2_thresholds set_v2par_thlds; struct lightbar_params_v2_colors set_v2par_colors; struct lightbar_program set_program; }; } __ec_todo_packed; struct ec_response_lightbar { union { struct __ec_todo_unpacked { struct __ec_todo_unpacked { uint8_t reg; uint8_t ic0; uint8_t ic1; } vals[23]; } dump; struct __ec_todo_unpacked { uint8_t num; } get_seq, get_brightness, get_demo; struct lightbar_params_v0 get_params_v0; struct lightbar_params_v1 get_params_v1; struct lightbar_params_v2_timing get_params_v2_timing; struct lightbar_params_v2_tap get_params_v2_tap; struct lightbar_params_v2_oscillation get_params_v2_osc; struct lightbar_params_v2_brightness get_params_v2_bright; struct lightbar_params_v2_thresholds get_params_v2_thlds; struct lightbar_params_v2_colors get_params_v2_colors; struct __ec_todo_unpacked { uint32_t num; uint32_t flags; } version; struct __ec_todo_unpacked { uint8_t red, green, blue; } get_rgb; /* * The following commands have no response: * * off, on, init, set_brightness, seq, reg, set_rgb, demo, * set_params_v0, set_params_v1, set_program, * manual_suspend_ctrl, suspend, resume, set_v2par_timing, * set_v2par_tap, set_v2par_osc, set_v2par_bright, * set_v2par_thlds, set_v2par_colors */ }; } __ec_todo_packed; /* Lightbar commands */ enum lightbar_command { LIGHTBAR_CMD_DUMP = 0, LIGHTBAR_CMD_OFF = 1, LIGHTBAR_CMD_ON = 2, LIGHTBAR_CMD_INIT = 3, LIGHTBAR_CMD_SET_BRIGHTNESS = 4, LIGHTBAR_CMD_SEQ = 5, LIGHTBAR_CMD_REG = 6, LIGHTBAR_CMD_SET_RGB = 7, LIGHTBAR_CMD_GET_SEQ = 8, LIGHTBAR_CMD_DEMO = 9, LIGHTBAR_CMD_GET_PARAMS_V0 = 10, LIGHTBAR_CMD_SET_PARAMS_V0 = 11, LIGHTBAR_CMD_VERSION = 12, LIGHTBAR_CMD_GET_BRIGHTNESS = 13, LIGHTBAR_CMD_GET_RGB = 14, LIGHTBAR_CMD_GET_DEMO = 15, LIGHTBAR_CMD_GET_PARAMS_V1 = 16, LIGHTBAR_CMD_SET_PARAMS_V1 = 17, LIGHTBAR_CMD_SET_PROGRAM = 18, LIGHTBAR_CMD_MANUAL_SUSPEND_CTRL = 19, LIGHTBAR_CMD_SUSPEND = 20, LIGHTBAR_CMD_RESUME = 21, LIGHTBAR_CMD_GET_PARAMS_V2_TIMING = 22, LIGHTBAR_CMD_SET_PARAMS_V2_TIMING = 23, LIGHTBAR_CMD_GET_PARAMS_V2_TAP = 24, LIGHTBAR_CMD_SET_PARAMS_V2_TAP = 25, LIGHTBAR_CMD_GET_PARAMS_V2_OSCILLATION = 26, LIGHTBAR_CMD_SET_PARAMS_V2_OSCILLATION = 27, LIGHTBAR_CMD_GET_PARAMS_V2_BRIGHTNESS = 28, LIGHTBAR_CMD_SET_PARAMS_V2_BRIGHTNESS = 29, LIGHTBAR_CMD_GET_PARAMS_V2_THRESHOLDS = 30, LIGHTBAR_CMD_SET_PARAMS_V2_THRESHOLDS = 31, LIGHTBAR_CMD_GET_PARAMS_V2_COLORS = 32, LIGHTBAR_CMD_SET_PARAMS_V2_COLORS = 33, LIGHTBAR_NUM_CMDS }; /*****************************************************************************/ /* LED control commands */ #define EC_CMD_LED_CONTROL 0x0029 enum ec_led_id { /* LED to indicate battery state of charge */ EC_LED_ID_BATTERY_LED = 0, /* * LED to indicate system power state (on or in suspend). * May be on power button or on C-panel. */ EC_LED_ID_POWER_LED, /* LED on power adapter or its plug */ EC_LED_ID_ADAPTER_LED, /* LED to indicate left side */ EC_LED_ID_LEFT_LED, /* LED to indicate right side */ EC_LED_ID_RIGHT_LED, /* LED to indicate recovery mode with HW_REINIT */ EC_LED_ID_RECOVERY_HW_REINIT_LED, /* LED to indicate sysrq debug mode. */ EC_LED_ID_SYSRQ_DEBUG_LED, EC_LED_ID_COUNT }; /* LED control flags */ #define EC_LED_FLAGS_QUERY BIT(0) /* Query LED capability only */ #define EC_LED_FLAGS_AUTO BIT(1) /* Switch LED back to automatic control */ enum ec_led_colors { EC_LED_COLOR_RED = 0, EC_LED_COLOR_GREEN, EC_LED_COLOR_BLUE, EC_LED_COLOR_YELLOW, EC_LED_COLOR_WHITE, EC_LED_COLOR_AMBER, EC_LED_COLOR_COUNT }; struct ec_params_led_control { uint8_t led_id; /* Which LED to control */ uint8_t flags; /* Control flags */ uint8_t brightness[EC_LED_COLOR_COUNT]; } __ec_align1; struct ec_response_led_control { /* * Available brightness value range. * * Range 0 means color channel not present. * Range 1 means on/off control. * Other values means the LED is control by PWM. */ uint8_t brightness_range[EC_LED_COLOR_COUNT]; } __ec_align1; /*****************************************************************************/ /* Verified boot commands */ /* * Note: command code 0x29 version 0 was VBOOT_CMD in Link EVT; it may be * reused for other purposes with version > 0. */ /* Verified boot hash command */ #define EC_CMD_VBOOT_HASH 0x002A struct ec_params_vboot_hash { uint8_t cmd; /* enum ec_vboot_hash_cmd */ uint8_t hash_type; /* enum ec_vboot_hash_type */ uint8_t nonce_size; /* Nonce size; may be 0 */ uint8_t reserved0; /* Reserved; set 0 */ uint32_t offset; /* Offset in flash to hash */ uint32_t size; /* Number of bytes to hash */ uint8_t nonce_data[64]; /* Nonce data; ignored if nonce_size=0 */ } __ec_align4; struct ec_response_vboot_hash { uint8_t status; /* enum ec_vboot_hash_status */ uint8_t hash_type; /* enum ec_vboot_hash_type */ uint8_t digest_size; /* Size of hash digest in bytes */ uint8_t reserved0; /* Ignore; will be 0 */ uint32_t offset; /* Offset in flash which was hashed */ uint32_t size; /* Number of bytes hashed */ uint8_t hash_digest[64]; /* Hash digest data */ } __ec_align4; enum ec_vboot_hash_cmd { EC_VBOOT_HASH_GET = 0, /* Get current hash status */ EC_VBOOT_HASH_ABORT = 1, /* Abort calculating current hash */ EC_VBOOT_HASH_START = 2, /* Start computing a new hash */ EC_VBOOT_HASH_RECALC = 3, /* Synchronously compute a new hash */ }; enum ec_vboot_hash_type { EC_VBOOT_HASH_TYPE_SHA256 = 0, /* SHA-256 */ }; enum ec_vboot_hash_status { EC_VBOOT_HASH_STATUS_NONE = 0, /* No hash (not started, or aborted) */ EC_VBOOT_HASH_STATUS_DONE = 1, /* Finished computing a hash */ EC_VBOOT_HASH_STATUS_BUSY = 2, /* Busy computing a hash */ }; /* * Special values for offset for EC_VBOOT_HASH_START and EC_VBOOT_HASH_RECALC. * If one of these is specified, the EC will automatically update offset and * size to the correct values for the specified image (RO or RW). */ #define EC_VBOOT_HASH_OFFSET_RO 0xfffffffe #define EC_VBOOT_HASH_OFFSET_ACTIVE 0xfffffffd #define EC_VBOOT_HASH_OFFSET_UPDATE 0xfffffffc /* * 'RW' is vague if there are multiple RW images; we mean the active one, * so the old constant is deprecated. */ #define EC_VBOOT_HASH_OFFSET_RW EC_VBOOT_HASH_OFFSET_ACTIVE /*****************************************************************************/ /* * Motion sense commands. We'll make separate structs for sub-commands with * different input args, so that we know how much to expect. */ #define EC_CMD_MOTION_SENSE_CMD 0x002B /* Motion sense commands */ enum motionsense_command { /* * Dump command returns all motion sensor data including motion sense * module flags and individual sensor flags. */ MOTIONSENSE_CMD_DUMP = 0, /* * Info command returns data describing the details of a given sensor, * including enum motionsensor_type, enum motionsensor_location, and * enum motionsensor_chip. */ MOTIONSENSE_CMD_INFO = 1, /* * EC Rate command is a setter/getter command for the EC sampling rate * in milliseconds. * It is per sensor, the EC run sample task at the minimum of all * sensors EC_RATE. * For sensors without hardware FIFO, EC_RATE should be equals to 1/ODR * to collect all the sensor samples. * For sensor with hardware FIFO, EC_RATE is used as the maximal delay * to process of all motion sensors in milliseconds. */ MOTIONSENSE_CMD_EC_RATE = 2, /* * Sensor ODR command is a setter/getter command for the output data * rate of a specific motion sensor in millihertz. */ MOTIONSENSE_CMD_SENSOR_ODR = 3, /* * Sensor range command is a setter/getter command for the range of * a specified motion sensor in +/-G's or +/- deg/s. */ MOTIONSENSE_CMD_SENSOR_RANGE = 4, /* * Setter/getter command for the keyboard wake angle. When the lid * angle is greater than this value, keyboard wake is disabled in S3, * and when the lid angle goes less than this value, keyboard wake is * enabled. Note, the lid angle measurement is an approximate, * un-calibrated value, hence the wake angle isn't exact. */ MOTIONSENSE_CMD_KB_WAKE_ANGLE = 5, /* * Returns a single sensor data. */ MOTIONSENSE_CMD_DATA = 6, /* * Return sensor fifo info. */ MOTIONSENSE_CMD_FIFO_INFO = 7, /* * Insert a flush element in the fifo and return sensor fifo info. * The host can use that element to synchronize its operation. */ MOTIONSENSE_CMD_FIFO_FLUSH = 8, /* * Return a portion of the fifo. */ MOTIONSENSE_CMD_FIFO_READ = 9, /* * Perform low level calibration. * On sensors that support it, ask to do offset calibration. */ MOTIONSENSE_CMD_PERFORM_CALIB = 10, /* * Sensor Offset command is a setter/getter command for the offset * used for calibration. * The offsets can be calculated by the host, or via * PERFORM_CALIB command. */ MOTIONSENSE_CMD_SENSOR_OFFSET = 11, /* * List available activities for a MOTION sensor. * Indicates if they are enabled or disabled. */ MOTIONSENSE_CMD_LIST_ACTIVITIES = 12, /* * Activity management * Enable/Disable activity recognition. */ MOTIONSENSE_CMD_SET_ACTIVITY = 13, /* * Lid Angle */ MOTIONSENSE_CMD_LID_ANGLE = 14, /* * Allow the FIFO to trigger interrupt via MKBP events. * By default the FIFO does not send interrupt to process the FIFO * until the AP is ready or it is coming from a wakeup sensor. */ MOTIONSENSE_CMD_FIFO_INT_ENABLE = 15, /* * Spoof the readings of the sensors. The spoofed readings can be set * to arbitrary values, or will lock to the last read actual values. */ MOTIONSENSE_CMD_SPOOF = 16, /* Set lid angle for tablet mode detection. */ MOTIONSENSE_CMD_TABLET_MODE_LID_ANGLE = 17, /* * Sensor Scale command is a setter/getter command for the calibration * scale. */ MOTIONSENSE_CMD_SENSOR_SCALE = 18, /* Number of motionsense sub-commands. */ MOTIONSENSE_NUM_CMDS }; /* List of motion sensor types. */ enum motionsensor_type { MOTIONSENSE_TYPE_ACCEL = 0, MOTIONSENSE_TYPE_GYRO = 1, MOTIONSENSE_TYPE_MAG = 2, MOTIONSENSE_TYPE_PROX = 3, MOTIONSENSE_TYPE_LIGHT = 4, MOTIONSENSE_TYPE_ACTIVITY = 5, MOTIONSENSE_TYPE_BARO = 6, MOTIONSENSE_TYPE_SYNC = 7, MOTIONSENSE_TYPE_MAX, }; /* List of motion sensor locations. */ enum motionsensor_location { MOTIONSENSE_LOC_BASE = 0, MOTIONSENSE_LOC_LID = 1, MOTIONSENSE_LOC_CAMERA = 2, MOTIONSENSE_LOC_MAX, }; /* List of motion sensor chips. */ enum motionsensor_chip { MOTIONSENSE_CHIP_KXCJ9 = 0, MOTIONSENSE_CHIP_LSM6DS0 = 1, MOTIONSENSE_CHIP_BMI160 = 2, MOTIONSENSE_CHIP_SI1141 = 3, MOTIONSENSE_CHIP_SI1142 = 4, MOTIONSENSE_CHIP_SI1143 = 5, MOTIONSENSE_CHIP_KX022 = 6, MOTIONSENSE_CHIP_L3GD20H = 7, MOTIONSENSE_CHIP_BMA255 = 8, MOTIONSENSE_CHIP_BMP280 = 9, MOTIONSENSE_CHIP_OPT3001 = 10, MOTIONSENSE_CHIP_BH1730 = 11, MOTIONSENSE_CHIP_GPIO = 12, MOTIONSENSE_CHIP_LIS2DH = 13, MOTIONSENSE_CHIP_LSM6DSM = 14, MOTIONSENSE_CHIP_LIS2DE = 15, MOTIONSENSE_CHIP_LIS2MDL = 16, MOTIONSENSE_CHIP_LSM6DS3 = 17, MOTIONSENSE_CHIP_LSM6DSO = 18, MOTIONSENSE_CHIP_LNG2DM = 19, MOTIONSENSE_CHIP_MAX, }; /* List of orientation positions */ enum motionsensor_orientation { MOTIONSENSE_ORIENTATION_LANDSCAPE = 0, MOTIONSENSE_ORIENTATION_PORTRAIT = 1, MOTIONSENSE_ORIENTATION_UPSIDE_DOWN_PORTRAIT = 2, MOTIONSENSE_ORIENTATION_UPSIDE_DOWN_LANDSCAPE = 3, MOTIONSENSE_ORIENTATION_UNKNOWN = 4, }; struct ec_response_motion_sensor_data { /* Flags for each sensor. */ uint8_t flags; /* Sensor number the data comes from. */ uint8_t sensor_num; /* Each sensor is up to 3-axis. */ union { int16_t data[3]; struct __ec_todo_packed { uint16_t reserved; uint32_t timestamp; }; struct __ec_todo_unpacked { uint8_t activity; /* motionsensor_activity */ uint8_t state; int16_t add_info[2]; }; }; } __ec_todo_packed; /* Note: used in ec_response_get_next_data */ struct ec_response_motion_sense_fifo_info { /* Size of the fifo */ uint16_t size; /* Amount of space used in the fifo */ uint16_t count; /* Timestamp recorded in us. * aka accurate timestamp when host event was triggered. */ uint32_t timestamp; /* Total amount of vector lost */ uint16_t total_lost; /* Lost events since the last fifo_info, per sensors */ uint16_t lost[0]; } __ec_todo_packed; struct ec_response_motion_sense_fifo_data { uint32_t number_data; struct ec_response_motion_sensor_data data[0]; } __ec_todo_packed; /* List supported activity recognition */ enum motionsensor_activity { MOTIONSENSE_ACTIVITY_RESERVED = 0, MOTIONSENSE_ACTIVITY_SIG_MOTION = 1, MOTIONSENSE_ACTIVITY_DOUBLE_TAP = 2, MOTIONSENSE_ACTIVITY_ORIENTATION = 3, }; struct ec_motion_sense_activity { uint8_t sensor_num; uint8_t activity; /* one of enum motionsensor_activity */ uint8_t enable; /* 1: enable, 0: disable */ uint8_t reserved; uint16_t parameters[3]; /* activity dependent parameters */ } __ec_todo_unpacked; /* Module flag masks used for the dump sub-command. */ #define MOTIONSENSE_MODULE_FLAG_ACTIVE BIT(0) /* Sensor flag masks used for the dump sub-command. */ #define MOTIONSENSE_SENSOR_FLAG_PRESENT BIT(0) /* * Flush entry for synchronization. * data contains time stamp */ #define MOTIONSENSE_SENSOR_FLAG_FLUSH BIT(0) #define MOTIONSENSE_SENSOR_FLAG_TIMESTAMP BIT(1) #define MOTIONSENSE_SENSOR_FLAG_WAKEUP BIT(2) #define MOTIONSENSE_SENSOR_FLAG_TABLET_MODE BIT(3) #define MOTIONSENSE_SENSOR_FLAG_ODR BIT(4) /* * Send this value for the data element to only perform a read. If you * send any other value, the EC will interpret it as data to set and will * return the actual value set. */ #define EC_MOTION_SENSE_NO_VALUE -1 #define EC_MOTION_SENSE_INVALID_CALIB_TEMP 0x8000 /* MOTIONSENSE_CMD_SENSOR_OFFSET subcommand flag */ /* Set Calibration information */ #define MOTION_SENSE_SET_OFFSET BIT(0) /* Default Scale value, factor 1. */ #define MOTION_SENSE_DEFAULT_SCALE BIT(15) #define LID_ANGLE_UNRELIABLE 500 enum motionsense_spoof_mode { /* Disable spoof mode. */ MOTIONSENSE_SPOOF_MODE_DISABLE = 0, /* Enable spoof mode, but use provided component values. */ MOTIONSENSE_SPOOF_MODE_CUSTOM, /* Enable spoof mode, but use the current sensor values. */ MOTIONSENSE_SPOOF_MODE_LOCK_CURRENT, /* Query the current spoof mode status for the sensor. */ MOTIONSENSE_SPOOF_MODE_QUERY, }; struct ec_params_motion_sense { uint8_t cmd; union { /* Used for MOTIONSENSE_CMD_DUMP. */ struct __ec_todo_unpacked { /* * Maximal number of sensor the host is expecting. * 0 means the host is only interested in the number * of sensors controlled by the EC. */ uint8_t max_sensor_count; } dump; /* * Used for MOTIONSENSE_CMD_KB_WAKE_ANGLE. */ struct __ec_todo_unpacked { /* Data to set or EC_MOTION_SENSE_NO_VALUE to read. * kb_wake_angle: angle to wakup AP. */ int16_t data; } kb_wake_angle; /* * Used for MOTIONSENSE_CMD_INFO, MOTIONSENSE_CMD_DATA * and MOTIONSENSE_CMD_PERFORM_CALIB. */ struct __ec_todo_unpacked { uint8_t sensor_num; } info, info_3, data, fifo_flush, perform_calib, list_activities; /* * Used for MOTIONSENSE_CMD_EC_RATE, MOTIONSENSE_CMD_SENSOR_ODR * and MOTIONSENSE_CMD_SENSOR_RANGE. */ struct __ec_todo_unpacked { uint8_t sensor_num; /* Rounding flag, true for round-up, false for down. */ uint8_t roundup; uint16_t reserved; /* Data to set or EC_MOTION_SENSE_NO_VALUE to read. */ int32_t data; } ec_rate, sensor_odr, sensor_range; /* Used for MOTIONSENSE_CMD_SENSOR_OFFSET */ struct __ec_todo_packed { uint8_t sensor_num; /* * bit 0: If set (MOTION_SENSE_SET_OFFSET), set * the calibration information in the EC. * If unset, just retrieve calibration information. */ uint16_t flags; /* * Temperature at calibration, in units of 0.01 C * 0x8000: invalid / unknown. * 0x0: 0C * 0x7fff: +327.67C */ int16_t temp; /* * Offset for calibration. * Unit: * Accelerometer: 1/1024 g * Gyro: 1/1024 deg/s * Compass: 1/16 uT */ int16_t offset[3]; } sensor_offset; /* Used for MOTIONSENSE_CMD_SENSOR_SCALE */ struct __ec_todo_packed { uint8_t sensor_num; /* * bit 0: If set (MOTION_SENSE_SET_OFFSET), set * the calibration information in the EC. * If unset, just retrieve calibration information. */ uint16_t flags; /* * Temperature at calibration, in units of 0.01 C * 0x8000: invalid / unknown. * 0x0: 0C * 0x7fff: +327.67C */ int16_t temp; /* * Scale for calibration: * By default scale is 1, it is encoded on 16bits: * 1 = BIT(15) * ~2 = 0xFFFF * ~0 = 0. */ uint16_t scale[3]; } sensor_scale; /* Used for MOTIONSENSE_CMD_FIFO_INFO */ /* (no params) */ /* Used for MOTIONSENSE_CMD_FIFO_READ */ struct __ec_todo_unpacked { /* * Number of expected vector to return. * EC may return less or 0 if none available. */ uint32_t max_data_vector; } fifo_read; struct ec_motion_sense_activity set_activity; /* Used for MOTIONSENSE_CMD_LID_ANGLE */ /* (no params) */ /* Used for MOTIONSENSE_CMD_FIFO_INT_ENABLE */ struct __ec_todo_unpacked { /* * 1: enable, 0 disable fifo, * EC_MOTION_SENSE_NO_VALUE return value. */ int8_t enable; } fifo_int_enable; /* Used for MOTIONSENSE_CMD_SPOOF */ struct __ec_todo_packed { uint8_t sensor_id; /* See enum motionsense_spoof_mode. */ uint8_t spoof_enable; /* Ignored, used for alignment. */ uint8_t reserved; /* Individual component values to spoof. */ int16_t components[3]; } spoof; /* Used for MOTIONSENSE_CMD_TABLET_MODE_LID_ANGLE. */ struct __ec_todo_unpacked { /* * Lid angle threshold for switching between tablet and * clamshell mode. */ int16_t lid_angle; /* * Hysteresis degree to prevent fluctuations between * clamshell and tablet mode if lid angle keeps * changing around the threshold. Lid motion driver will * use lid_angle + hys_degree to trigger tablet mode and * lid_angle - hys_degree to trigger clamshell mode. */ int16_t hys_degree; } tablet_mode_threshold; }; } __ec_todo_packed; struct ec_response_motion_sense { union { /* Used for MOTIONSENSE_CMD_DUMP */ struct __ec_todo_unpacked { /* Flags representing the motion sensor module. */ uint8_t module_flags; /* Number of sensors managed directly by the EC. */ uint8_t sensor_count; /* * Sensor data is truncated if response_max is too small * for holding all the data. */ struct ec_response_motion_sensor_data sensor[0]; } dump; /* Used for MOTIONSENSE_CMD_INFO. */ struct __ec_todo_unpacked { /* Should be element of enum motionsensor_type. */ uint8_t type; /* Should be element of enum motionsensor_location. */ uint8_t location; /* Should be element of enum motionsensor_chip. */ uint8_t chip; } info; /* Used for MOTIONSENSE_CMD_INFO version 3 */ struct __ec_todo_unpacked { /* Should be element of enum motionsensor_type. */ uint8_t type; /* Should be element of enum motionsensor_location. */ uint8_t location; /* Should be element of enum motionsensor_chip. */ uint8_t chip; /* Minimum sensor sampling frequency */ uint32_t min_frequency; /* Maximum sensor sampling frequency */ uint32_t max_frequency; /* Max number of sensor events that could be in fifo */ uint32_t fifo_max_event_count; } info_3; /* Used for MOTIONSENSE_CMD_DATA */ struct ec_response_motion_sensor_data data; /* * Used for MOTIONSENSE_CMD_EC_RATE, MOTIONSENSE_CMD_SENSOR_ODR, * MOTIONSENSE_CMD_SENSOR_RANGE, * MOTIONSENSE_CMD_KB_WAKE_ANGLE, * MOTIONSENSE_CMD_FIFO_INT_ENABLE and * MOTIONSENSE_CMD_SPOOF. */ struct __ec_todo_unpacked { /* Current value of the parameter queried. */ int32_t ret; } ec_rate, sensor_odr, sensor_range, kb_wake_angle, fifo_int_enable, spoof; /* * Used for MOTIONSENSE_CMD_SENSOR_OFFSET, * PERFORM_CALIB. */ struct __ec_todo_unpacked { int16_t temp; int16_t offset[3]; } sensor_offset, perform_calib; /* Used for MOTIONSENSE_CMD_SENSOR_SCALE */ struct __ec_todo_unpacked { int16_t temp; uint16_t scale[3]; } sensor_scale; struct ec_response_motion_sense_fifo_info fifo_info, fifo_flush; struct ec_response_motion_sense_fifo_data fifo_read; struct __ec_todo_packed { uint16_t reserved; uint32_t enabled; uint32_t disabled; } list_activities; /* No params for set activity */ /* Used for MOTIONSENSE_CMD_LID_ANGLE */ struct __ec_todo_unpacked { /* * Angle between 0 and 360 degree if available, * LID_ANGLE_UNRELIABLE otherwise. */ uint16_t value; } lid_angle; /* Used for MOTIONSENSE_CMD_TABLET_MODE_LID_ANGLE. */ struct __ec_todo_unpacked { /* * Lid angle threshold for switching between tablet and * clamshell mode. */ uint16_t lid_angle; /* Hysteresis degree. */ uint16_t hys_degree; } tablet_mode_threshold; }; } __ec_todo_packed; /*****************************************************************************/ /* Force lid open command */ /* Make lid event always open */ #define EC_CMD_FORCE_LID_OPEN 0x002C struct ec_params_force_lid_open { uint8_t enabled; } __ec_align1; /*****************************************************************************/ /* Configure the behavior of the power button */ #define EC_CMD_CONFIG_POWER_BUTTON 0x002D enum ec_config_power_button_flags { /* Enable/Disable power button pulses for x86 devices */ EC_POWER_BUTTON_ENABLE_PULSE = BIT(0), }; struct ec_params_config_power_button { /* See enum ec_config_power_button_flags */ uint8_t flags; } __ec_align1; /*****************************************************************************/ /* USB charging control commands */ /* Set USB port charging mode */ #define EC_CMD_USB_CHARGE_SET_MODE 0x0030 struct ec_params_usb_charge_set_mode { uint8_t usb_port_id; uint8_t mode:7; uint8_t inhibit_charge:1; } __ec_align1; /*****************************************************************************/ /* Persistent storage for host */ /* Maximum bytes that can be read/written in a single command */ #define EC_PSTORE_SIZE_MAX 64 /* Get persistent storage info */ #define EC_CMD_PSTORE_INFO 0x0040 struct ec_response_pstore_info { /* Persistent storage size, in bytes */ uint32_t pstore_size; /* Access size; read/write offset and size must be a multiple of this */ uint32_t access_size; } __ec_align4; /* * Read persistent storage * * Response is params.size bytes of data. */ #define EC_CMD_PSTORE_READ 0x0041 struct ec_params_pstore_read { uint32_t offset; /* Byte offset to read */ uint32_t size; /* Size to read in bytes */ } __ec_align4; /* Write persistent storage */ #define EC_CMD_PSTORE_WRITE 0x0042 struct ec_params_pstore_write { uint32_t offset; /* Byte offset to write */ uint32_t size; /* Size to write in bytes */ uint8_t data[EC_PSTORE_SIZE_MAX]; } __ec_align4; /*****************************************************************************/ /* Real-time clock */ /* RTC params and response structures */ struct ec_params_rtc { uint32_t time; } __ec_align4; struct ec_response_rtc { uint32_t time; } __ec_align4; /* These use ec_response_rtc */ #define EC_CMD_RTC_GET_VALUE 0x0044 #define EC_CMD_RTC_GET_ALARM 0x0045 /* These all use ec_params_rtc */ #define EC_CMD_RTC_SET_VALUE 0x0046 #define EC_CMD_RTC_SET_ALARM 0x0047 /* Pass as time param to SET_ALARM to clear the current alarm */ #define EC_RTC_ALARM_CLEAR 0 /*****************************************************************************/ /* Port80 log access */ /* Maximum entries that can be read/written in a single command */ #define EC_PORT80_SIZE_MAX 32 /* Get last port80 code from previous boot */ #define EC_CMD_PORT80_LAST_BOOT 0x0048 #define EC_CMD_PORT80_READ 0x0048 enum ec_port80_subcmd { EC_PORT80_GET_INFO = 0, EC_PORT80_READ_BUFFER, }; struct ec_params_port80_read { uint16_t subcmd; union { struct __ec_todo_unpacked { uint32_t offset; uint32_t num_entries; } read_buffer; }; } __ec_todo_packed; struct ec_response_port80_read { union { struct __ec_todo_unpacked { uint32_t writes; uint32_t history_size; uint32_t last_boot; } get_info; struct __ec_todo_unpacked { uint16_t codes[EC_PORT80_SIZE_MAX]; } data; }; } __ec_todo_packed; struct ec_response_port80_last_boot { uint16_t code; } __ec_align2; /*****************************************************************************/ /* Temporary secure storage for host verified boot use */ /* Number of bytes in a vstore slot */ #define EC_VSTORE_SLOT_SIZE 64 /* Maximum number of vstore slots */ #define EC_VSTORE_SLOT_MAX 32 /* Get persistent storage info */ #define EC_CMD_VSTORE_INFO 0x0049 struct ec_response_vstore_info { /* Indicates which slots are locked */ uint32_t slot_locked; /* Total number of slots available */ uint8_t slot_count; } __ec_align_size1; /* * Read temporary secure storage * * Response is EC_VSTORE_SLOT_SIZE bytes of data. */ #define EC_CMD_VSTORE_READ 0x004A struct ec_params_vstore_read { uint8_t slot; /* Slot to read from */ } __ec_align1; struct ec_response_vstore_read { uint8_t data[EC_VSTORE_SLOT_SIZE]; } __ec_align1; /* * Write temporary secure storage and lock it. */ #define EC_CMD_VSTORE_WRITE 0x004B struct ec_params_vstore_write { uint8_t slot; /* Slot to write to */ uint8_t data[EC_VSTORE_SLOT_SIZE]; } __ec_align1; /*****************************************************************************/ /* Thermal engine commands. Note that there are two implementations. We'll * reuse the command number, but the data and behavior is incompatible. * Version 0 is what originally shipped on Link. * Version 1 separates the CPU thermal limits from the fan control. */ #define EC_CMD_THERMAL_SET_THRESHOLD 0x0050 #define EC_CMD_THERMAL_GET_THRESHOLD 0x0051 /* The version 0 structs are opaque. You have to know what they are for * the get/set commands to make any sense. */ /* Version 0 - set */ struct ec_params_thermal_set_threshold { uint8_t sensor_type; uint8_t threshold_id; uint16_t value; } __ec_align2; /* Version 0 - get */ struct ec_params_thermal_get_threshold { uint8_t sensor_type; uint8_t threshold_id; } __ec_align1; struct ec_response_thermal_get_threshold { uint16_t value; } __ec_align2; /* The version 1 structs are visible. */ enum ec_temp_thresholds { EC_TEMP_THRESH_WARN = 0, EC_TEMP_THRESH_HIGH, EC_TEMP_THRESH_HALT, EC_TEMP_THRESH_COUNT }; /* * Thermal configuration for one temperature sensor. Temps are in degrees K. * Zero values will be silently ignored by the thermal task. * * Set 'temp_host' value allows thermal task to trigger some event with 1 degree * hysteresis. * For example, * temp_host[EC_TEMP_THRESH_HIGH] = 300 K * temp_host_release[EC_TEMP_THRESH_HIGH] = 0 K * EC will throttle ap when temperature >= 301 K, and release throttling when * temperature <= 299 K. * * Set 'temp_host_release' value allows thermal task has a custom hysteresis. * For example, * temp_host[EC_TEMP_THRESH_HIGH] = 300 K * temp_host_release[EC_TEMP_THRESH_HIGH] = 295 K * EC will throttle ap when temperature >= 301 K, and release throttling when * temperature <= 294 K. * * Note that this structure is a sub-structure of * ec_params_thermal_set_threshold_v1, but maintains its alignment there. */ struct ec_thermal_config { uint32_t temp_host[EC_TEMP_THRESH_COUNT]; /* levels of hotness */ uint32_t temp_host_release[EC_TEMP_THRESH_COUNT]; /* release levels */ uint32_t temp_fan_off; /* no active cooling needed */ uint32_t temp_fan_max; /* max active cooling needed */ } __ec_align4; /* Version 1 - get config for one sensor. */ struct ec_params_thermal_get_threshold_v1 { uint32_t sensor_num; } __ec_align4; /* This returns a struct ec_thermal_config */ /* * Version 1 - set config for one sensor. * Use read-modify-write for best results! */ struct ec_params_thermal_set_threshold_v1 { uint32_t sensor_num; struct ec_thermal_config cfg; } __ec_align4; /* This returns no data */ /****************************************************************************/ /* Toggle automatic fan control */ #define EC_CMD_THERMAL_AUTO_FAN_CTRL 0x0052 /* Version 1 of input params */ struct ec_params_auto_fan_ctrl_v1 { uint8_t fan_idx; } __ec_align1; /* Get/Set TMP006 calibration data */ #define EC_CMD_TMP006_GET_CALIBRATION 0x0053 #define EC_CMD_TMP006_SET_CALIBRATION 0x0054 /* * The original TMP006 calibration only needed four params, but now we need * more. Since the algorithm is nothing but magic numbers anyway, we'll leave * the params opaque. The v1 "get" response will include the algorithm number * and how many params it requires. That way we can change the EC code without * needing to update this file. We can also use a different algorithm on each * sensor. */ /* This is the same struct for both v0 and v1. */ struct ec_params_tmp006_get_calibration { uint8_t index; } __ec_align1; /* Version 0 */ struct ec_response_tmp006_get_calibration_v0 { float s0; float b0; float b1; float b2; } __ec_align4; struct ec_params_tmp006_set_calibration_v0 { uint8_t index; uint8_t reserved[3]; float s0; float b0; float b1; float b2; } __ec_align4; /* Version 1 */ struct ec_response_tmp006_get_calibration_v1 { uint8_t algorithm; uint8_t num_params; uint8_t reserved[2]; float val[0]; } __ec_align4; struct ec_params_tmp006_set_calibration_v1 { uint8_t index; uint8_t algorithm; uint8_t num_params; uint8_t reserved; float val[0]; } __ec_align4; /* Read raw TMP006 data */ #define EC_CMD_TMP006_GET_RAW 0x0055 struct ec_params_tmp006_get_raw { uint8_t index; } __ec_align1; struct ec_response_tmp006_get_raw { int32_t t; /* In 1/100 K */ int32_t v; /* In nV */ } __ec_align4; /*****************************************************************************/ /* MKBP - Matrix KeyBoard Protocol */ /* * Read key state * * Returns raw data for keyboard cols; see ec_response_mkbp_info.cols for * expected response size. * * NOTE: This has been superseded by EC_CMD_MKBP_GET_NEXT_EVENT. If you wish * to obtain the instantaneous state, use EC_CMD_MKBP_INFO with the type * EC_MKBP_INFO_CURRENT and event EC_MKBP_EVENT_KEY_MATRIX. */ #define EC_CMD_MKBP_STATE 0x0060 /* * Provide information about various MKBP things. See enum ec_mkbp_info_type. */ #define EC_CMD_MKBP_INFO 0x0061 struct ec_response_mkbp_info { uint32_t rows; uint32_t cols; /* Formerly "switches", which was 0. */ uint8_t reserved; } __ec_align_size1; struct ec_params_mkbp_info { uint8_t info_type; uint8_t event_type; } __ec_align1; enum ec_mkbp_info_type { /* * Info about the keyboard matrix: number of rows and columns. * * Returns struct ec_response_mkbp_info. */ EC_MKBP_INFO_KBD = 0, /* * For buttons and switches, info about which specifically are * supported. event_type must be set to one of the values in enum * ec_mkbp_event. * * For EC_MKBP_EVENT_BUTTON and EC_MKBP_EVENT_SWITCH, returns a 4 byte * bitmask indicating which buttons or switches are present. See the * bit inidices below. */ EC_MKBP_INFO_SUPPORTED = 1, /* * Instantaneous state of buttons and switches. * * event_type must be set to one of the values in enum ec_mkbp_event. * * For EC_MKBP_EVENT_KEY_MATRIX, returns uint8_t key_matrix[13] * indicating the current state of the keyboard matrix. * * For EC_MKBP_EVENT_HOST_EVENT, return uint32_t host_event, the raw * event state. * * For EC_MKBP_EVENT_BUTTON, returns uint32_t buttons, indicating the * state of supported buttons. * * For EC_MKBP_EVENT_SWITCH, returns uint32_t switches, indicating the * state of supported switches. */ EC_MKBP_INFO_CURRENT = 2, }; /* Simulate key press */ #define EC_CMD_MKBP_SIMULATE_KEY 0x0062 struct ec_params_mkbp_simulate_key { uint8_t col; uint8_t row; uint8_t pressed; } __ec_align1; #define EC_CMD_GET_KEYBOARD_ID 0x0063 struct ec_response_keyboard_id { uint32_t keyboard_id; } __ec_align4; enum keyboard_id { KEYBOARD_ID_UNSUPPORTED = 0, KEYBOARD_ID_UNREADABLE = 0xffffffff, }; /* Configure keyboard scanning */ #define EC_CMD_MKBP_SET_CONFIG 0x0064 #define EC_CMD_MKBP_GET_CONFIG 0x0065 /* flags */ enum mkbp_config_flags { EC_MKBP_FLAGS_ENABLE = 1, /* Enable keyboard scanning */ }; enum mkbp_config_valid { EC_MKBP_VALID_SCAN_PERIOD = BIT(0), EC_MKBP_VALID_POLL_TIMEOUT = BIT(1), EC_MKBP_VALID_MIN_POST_SCAN_DELAY = BIT(3), EC_MKBP_VALID_OUTPUT_SETTLE = BIT(4), EC_MKBP_VALID_DEBOUNCE_DOWN = BIT(5), EC_MKBP_VALID_DEBOUNCE_UP = BIT(6), EC_MKBP_VALID_FIFO_MAX_DEPTH = BIT(7), }; /* * Configuration for our key scanning algorithm. * * Note that this is used as a sub-structure of * ec_{params/response}_mkbp_get_config. */ struct ec_mkbp_config { uint32_t valid_mask; /* valid fields */ uint8_t flags; /* some flags (enum mkbp_config_flags) */ uint8_t valid_flags; /* which flags are valid */ uint16_t scan_period_us; /* period between start of scans */ /* revert to interrupt mode after no activity for this long */ uint32_t poll_timeout_us; /* * minimum post-scan relax time. Once we finish a scan we check * the time until we are due to start the next one. If this time is * shorter this field, we use this instead. */ uint16_t min_post_scan_delay_us; /* delay between setting up output and waiting for it to settle */ uint16_t output_settle_us; uint16_t debounce_down_us; /* time for debounce on key down */ uint16_t debounce_up_us; /* time for debounce on key up */ /* maximum depth to allow for fifo (0 = no keyscan output) */ uint8_t fifo_max_depth; } __ec_align_size1; struct ec_params_mkbp_set_config { struct ec_mkbp_config config; } __ec_align_size1; struct ec_response_mkbp_get_config { struct ec_mkbp_config config; } __ec_align_size1; /* Run the key scan emulation */ #define EC_CMD_KEYSCAN_SEQ_CTRL 0x0066 enum ec_keyscan_seq_cmd { EC_KEYSCAN_SEQ_STATUS = 0, /* Get status information */ EC_KEYSCAN_SEQ_CLEAR = 1, /* Clear sequence */ EC_KEYSCAN_SEQ_ADD = 2, /* Add item to sequence */ EC_KEYSCAN_SEQ_START = 3, /* Start running sequence */ EC_KEYSCAN_SEQ_COLLECT = 4, /* Collect sequence summary data */ }; enum ec_collect_flags { /* * Indicates this scan was processed by the EC. Due to timing, some * scans may be skipped. */ EC_KEYSCAN_SEQ_FLAG_DONE = BIT(0), }; struct ec_collect_item { uint8_t flags; /* some flags (enum ec_collect_flags) */ } __ec_align1; struct ec_params_keyscan_seq_ctrl { uint8_t cmd; /* Command to send (enum ec_keyscan_seq_cmd) */ union { struct __ec_align1 { uint8_t active; /* still active */ uint8_t num_items; /* number of items */ /* Current item being presented */ uint8_t cur_item; } status; struct __ec_todo_unpacked { /* * Absolute time for this scan, measured from the * start of the sequence. */ uint32_t time_us; uint8_t scan[0]; /* keyscan data */ } add; struct __ec_align1 { uint8_t start_item; /* First item to return */ uint8_t num_items; /* Number of items to return */ } collect; }; } __ec_todo_packed; struct ec_result_keyscan_seq_ctrl { union { struct __ec_todo_unpacked { uint8_t num_items; /* Number of items */ /* Data for each item */ struct ec_collect_item item[0]; } collect; }; } __ec_todo_packed; /* * Get the next pending MKBP event. * * Returns EC_RES_UNAVAILABLE if there is no event pending. */ #define EC_CMD_GET_NEXT_EVENT 0x0067 #define EC_MKBP_HAS_MORE_EVENTS_SHIFT 7 /* * We use the most significant bit of the event type to indicate to the host * that the EC has more MKBP events available to provide. */ #define EC_MKBP_HAS_MORE_EVENTS BIT(EC_MKBP_HAS_MORE_EVENTS_SHIFT) /* The mask to apply to get the raw event type */ #define EC_MKBP_EVENT_TYPE_MASK (BIT(EC_MKBP_HAS_MORE_EVENTS_SHIFT) - 1) enum ec_mkbp_event { /* Keyboard matrix changed. The event data is the new matrix state. */ EC_MKBP_EVENT_KEY_MATRIX = 0, /* New host event. The event data is 4 bytes of host event flags. */ EC_MKBP_EVENT_HOST_EVENT = 1, /* New Sensor FIFO data. The event data is fifo_info structure. */ EC_MKBP_EVENT_SENSOR_FIFO = 2, /* The state of the non-matrixed buttons have changed. */ EC_MKBP_EVENT_BUTTON = 3, /* The state of the switches have changed. */ EC_MKBP_EVENT_SWITCH = 4, /* New Fingerprint sensor event, the event data is fp_events bitmap. */ EC_MKBP_EVENT_FINGERPRINT = 5, /* * Sysrq event: send emulated sysrq. The event data is sysrq, * corresponding to the key to be pressed. */ EC_MKBP_EVENT_SYSRQ = 6, /* * New 64-bit host event. * The event data is 8 bytes of host event flags. */ EC_MKBP_EVENT_HOST_EVENT64 = 7, /* Notify the AP that something happened on CEC */ EC_MKBP_EVENT_CEC_EVENT = 8, /* Send an incoming CEC message to the AP */ EC_MKBP_EVENT_CEC_MESSAGE = 9, /* Number of MKBP events */ EC_MKBP_EVENT_COUNT, }; BUILD_ASSERT(EC_MKBP_EVENT_COUNT <= EC_MKBP_EVENT_TYPE_MASK); union __ec_align_offset1 ec_response_get_next_data { uint8_t key_matrix[13]; /* Unaligned */ uint32_t host_event; uint64_t host_event64; struct __ec_todo_unpacked { /* For aligning the fifo_info */ uint8_t reserved[3]; struct ec_response_motion_sense_fifo_info info; } sensor_fifo; uint32_t buttons; uint32_t switches; uint32_t fp_events; uint32_t sysrq; /* CEC events from enum mkbp_cec_event */ uint32_t cec_events; }; union __ec_align_offset1 ec_response_get_next_data_v1 { uint8_t key_matrix[16]; /* Unaligned */ uint32_t host_event; uint64_t host_event64; struct __ec_todo_unpacked { /* For aligning the fifo_info */ uint8_t reserved[3]; struct ec_response_motion_sense_fifo_info info; } sensor_fifo; uint32_t buttons; uint32_t switches; uint32_t fp_events; uint32_t sysrq; /* CEC events from enum mkbp_cec_event */ uint32_t cec_events; uint8_t cec_message[16]; }; BUILD_ASSERT(sizeof(union ec_response_get_next_data_v1) == 16); struct ec_response_get_next_event { uint8_t event_type; /* Followed by event data if any */ union ec_response_get_next_data data; } __ec_align1; struct ec_response_get_next_event_v1 { uint8_t event_type; /* Followed by event data if any */ union ec_response_get_next_data_v1 data; } __ec_align1; /* Bit indices for buttons and switches.*/ /* Buttons */ #define EC_MKBP_POWER_BUTTON 0 #define EC_MKBP_VOL_UP 1 #define EC_MKBP_VOL_DOWN 2 #define EC_MKBP_RECOVERY 3 /* Switches */ #define EC_MKBP_LID_OPEN 0 #define EC_MKBP_TABLET_MODE 1 #define EC_MKBP_BASE_ATTACHED 2 /* Run keyboard factory test scanning */ #define EC_CMD_KEYBOARD_FACTORY_TEST 0x0068 struct ec_response_keyboard_factory_test { uint16_t shorted; /* Keyboard pins are shorted */ } __ec_align2; /* Fingerprint events in 'fp_events' for EC_MKBP_EVENT_FINGERPRINT */ #define EC_MKBP_FP_RAW_EVENT(fp_events) ((fp_events) & 0x00FFFFFF) #define EC_MKBP_FP_ERRCODE(fp_events) ((fp_events) & 0x0000000F) #define EC_MKBP_FP_ENROLL_PROGRESS_OFFSET 4 #define EC_MKBP_FP_ENROLL_PROGRESS(fpe) (((fpe) & 0x00000FF0) \ >> EC_MKBP_FP_ENROLL_PROGRESS_OFFSET) #define EC_MKBP_FP_MATCH_IDX_OFFSET 12 #define EC_MKBP_FP_MATCH_IDX_MASK 0x0000F000 #define EC_MKBP_FP_MATCH_IDX(fpe) (((fpe) & EC_MKBP_FP_MATCH_IDX_MASK) \ >> EC_MKBP_FP_MATCH_IDX_OFFSET) #define EC_MKBP_FP_ENROLL BIT(27) #define EC_MKBP_FP_MATCH BIT(28) #define EC_MKBP_FP_FINGER_DOWN BIT(29) #define EC_MKBP_FP_FINGER_UP BIT(30) #define EC_MKBP_FP_IMAGE_READY BIT(31) /* code given by EC_MKBP_FP_ERRCODE() when EC_MKBP_FP_ENROLL is set */ #define EC_MKBP_FP_ERR_ENROLL_OK 0 #define EC_MKBP_FP_ERR_ENROLL_LOW_QUALITY 1 #define EC_MKBP_FP_ERR_ENROLL_IMMOBILE 2 #define EC_MKBP_FP_ERR_ENROLL_LOW_COVERAGE 3 #define EC_MKBP_FP_ERR_ENROLL_INTERNAL 5 /* Can be used to detect if image was usable for enrollment or not. */ #define EC_MKBP_FP_ERR_ENROLL_PROBLEM_MASK 1 /* code given by EC_MKBP_FP_ERRCODE() when EC_MKBP_FP_MATCH is set */ #define EC_MKBP_FP_ERR_MATCH_NO 0 #define EC_MKBP_FP_ERR_MATCH_NO_INTERNAL 6 #define EC_MKBP_FP_ERR_MATCH_NO_TEMPLATES 7 #define EC_MKBP_FP_ERR_MATCH_NO_LOW_QUALITY 2 #define EC_MKBP_FP_ERR_MATCH_NO_LOW_COVERAGE 4 #define EC_MKBP_FP_ERR_MATCH_YES 1 #define EC_MKBP_FP_ERR_MATCH_YES_UPDATED 3 #define EC_MKBP_FP_ERR_MATCH_YES_UPDATE_FAILED 5 /*****************************************************************************/ /* Temperature sensor commands */ /* Read temperature sensor info */ #define EC_CMD_TEMP_SENSOR_GET_INFO 0x0070 struct ec_params_temp_sensor_get_info { uint8_t id; } __ec_align1; struct ec_response_temp_sensor_get_info { char sensor_name[32]; uint8_t sensor_type; } __ec_align1; /*****************************************************************************/ /* * Note: host commands 0x80 - 0x87 are reserved to avoid conflict with ACPI * commands accidentally sent to the wrong interface. See the ACPI section * below. */ /*****************************************************************************/ /* Host event commands */ /* Obsolete. New implementation should use EC_CMD_HOST_EVENT instead */ /* * Host event mask params and response structures, shared by all of the host * event commands below. */ struct ec_params_host_event_mask { uint32_t mask; } __ec_align4; struct ec_response_host_event_mask { uint32_t mask; } __ec_align4; /* These all use ec_response_host_event_mask */ #define EC_CMD_HOST_EVENT_GET_B 0x0087 #define EC_CMD_HOST_EVENT_GET_SMI_MASK 0x0088 #define EC_CMD_HOST_EVENT_GET_SCI_MASK 0x0089 #define EC_CMD_HOST_EVENT_GET_WAKE_MASK 0x008D /* These all use ec_params_host_event_mask */ #define EC_CMD_HOST_EVENT_SET_SMI_MASK 0x008A #define EC_CMD_HOST_EVENT_SET_SCI_MASK 0x008B #define EC_CMD_HOST_EVENT_CLEAR 0x008C #define EC_CMD_HOST_EVENT_SET_WAKE_MASK 0x008E #define EC_CMD_HOST_EVENT_CLEAR_B 0x008F /* * Unified host event programming interface - Should be used by newer versions * of BIOS/OS to program host events and masks */ struct ec_params_host_event { /* Action requested by host - one of enum ec_host_event_action. */ uint8_t action; /* * Mask type that the host requested the action on - one of * enum ec_host_event_mask_type. */ uint8_t mask_type; /* Set to 0, ignore on read */ uint16_t reserved; /* Value to be used in case of set operations. */ uint64_t value; } __ec_align4; /* * Response structure returned by EC_CMD_HOST_EVENT. * Update the value on a GET request. Set to 0 on GET/CLEAR */ struct ec_response_host_event { /* Mask value in case of get operation */ uint64_t value; } __ec_align4; enum ec_host_event_action { /* * params.value is ignored. Value of mask_type populated * in response.value */ EC_HOST_EVENT_GET, /* Bits in params.value are set */ EC_HOST_EVENT_SET, /* Bits in params.value are cleared */ EC_HOST_EVENT_CLEAR, }; enum ec_host_event_mask_type { /* Main host event copy */ EC_HOST_EVENT_MAIN, /* Copy B of host events */ EC_HOST_EVENT_B, /* SCI Mask */ EC_HOST_EVENT_SCI_MASK, /* SMI Mask */ EC_HOST_EVENT_SMI_MASK, /* Mask of events that should be always reported in hostevents */ EC_HOST_EVENT_ALWAYS_REPORT_MASK, /* Active wake mask */ EC_HOST_EVENT_ACTIVE_WAKE_MASK, /* Lazy wake mask for S0ix */ EC_HOST_EVENT_LAZY_WAKE_MASK_S0IX, /* Lazy wake mask for S3 */ EC_HOST_EVENT_LAZY_WAKE_MASK_S3, /* Lazy wake mask for S5 */ EC_HOST_EVENT_LAZY_WAKE_MASK_S5, }; #define EC_CMD_HOST_EVENT 0x00A4 /*****************************************************************************/ /* Switch commands */ /* Enable/disable LCD backlight */ #define EC_CMD_SWITCH_ENABLE_BKLIGHT 0x0090 struct ec_params_switch_enable_backlight { uint8_t enabled; } __ec_align1; /* Enable/disable WLAN/Bluetooth */ #define EC_CMD_SWITCH_ENABLE_WIRELESS 0x0091 #define EC_VER_SWITCH_ENABLE_WIRELESS 1 /* Version 0 params; no response */ struct ec_params_switch_enable_wireless_v0 { uint8_t enabled; } __ec_align1; /* Version 1 params */ struct ec_params_switch_enable_wireless_v1 { /* Flags to enable now */ uint8_t now_flags; /* Which flags to copy from now_flags */ uint8_t now_mask; /* * Flags to leave enabled in S3, if they're on at the S0->S3 * transition. (Other flags will be disabled by the S0->S3 * transition.) */ uint8_t suspend_flags; /* Which flags to copy from suspend_flags */ uint8_t suspend_mask; } __ec_align1; /* Version 1 response */ struct ec_response_switch_enable_wireless_v1 { /* Flags to enable now */ uint8_t now_flags; /* Flags to leave enabled in S3 */ uint8_t suspend_flags; } __ec_align1; /*****************************************************************************/ /* GPIO commands. Only available on EC if write protect has been disabled. */ /* Set GPIO output value */ #define EC_CMD_GPIO_SET 0x0092 struct ec_params_gpio_set { char name[32]; uint8_t val; } __ec_align1; /* Get GPIO value */ #define EC_CMD_GPIO_GET 0x0093 /* Version 0 of input params and response */ struct ec_params_gpio_get { char name[32]; } __ec_align1; struct ec_response_gpio_get { uint8_t val; } __ec_align1; /* Version 1 of input params and response */ struct ec_params_gpio_get_v1 { uint8_t subcmd; union { struct __ec_align1 { char name[32]; } get_value_by_name; struct __ec_align1 { uint8_t index; } get_info; }; } __ec_align1; struct ec_response_gpio_get_v1 { union { struct __ec_align1 { uint8_t val; } get_value_by_name, get_count; struct __ec_todo_unpacked { uint8_t val; char name[32]; uint32_t flags; } get_info; }; } __ec_todo_packed; enum gpio_get_subcmd { EC_GPIO_GET_BY_NAME = 0, EC_GPIO_GET_COUNT = 1, EC_GPIO_GET_INFO = 2, }; /*****************************************************************************/ /* I2C commands. Only available when flash write protect is unlocked. */ /* * CAUTION: These commands are deprecated, and are not supported anymore in EC * builds >= 8398.0.0 (see crosbug.com/p/23570). * * Use EC_CMD_I2C_PASSTHRU instead. */ /* Read I2C bus */ #define EC_CMD_I2C_READ 0x0094 struct ec_params_i2c_read { uint16_t addr; /* 8-bit address (7-bit shifted << 1) */ uint8_t read_size; /* Either 8 or 16. */ uint8_t port; uint8_t offset; } __ec_align_size1; struct ec_response_i2c_read { uint16_t data; } __ec_align2; /* Write I2C bus */ #define EC_CMD_I2C_WRITE 0x0095 struct ec_params_i2c_write { uint16_t data; uint16_t addr; /* 8-bit address (7-bit shifted << 1) */ uint8_t write_size; /* Either 8 or 16. */ uint8_t port; uint8_t offset; } __ec_align_size1; /*****************************************************************************/ /* Charge state commands. Only available when flash write protect unlocked. */ /* Force charge state machine to stop charging the battery or force it to * discharge the battery. */ #define EC_CMD_CHARGE_CONTROL 0x0096 #define EC_VER_CHARGE_CONTROL 1 enum ec_charge_control_mode { CHARGE_CONTROL_NORMAL = 0, CHARGE_CONTROL_IDLE, CHARGE_CONTROL_DISCHARGE, }; struct ec_params_charge_control { uint32_t mode; /* enum charge_control_mode */ } __ec_align4; /*****************************************************************************/ /* Snapshot console output buffer for use by EC_CMD_CONSOLE_READ. */ #define EC_CMD_CONSOLE_SNAPSHOT 0x0097 /* * Read data from the saved snapshot. If the subcmd parameter is * CONSOLE_READ_NEXT, this will return data starting from the beginning of * the latest snapshot. If it is CONSOLE_READ_RECENT, it will start from the * end of the previous snapshot. * * The params are only looked at in version >= 1 of this command. Prior * versions will just default to CONSOLE_READ_NEXT behavior. * * Response is null-terminated string. Empty string, if there is no more * remaining output. */ #define EC_CMD_CONSOLE_READ 0x0098 enum ec_console_read_subcmd { CONSOLE_READ_NEXT = 0, CONSOLE_READ_RECENT }; struct ec_params_console_read_v1 { uint8_t subcmd; /* enum ec_console_read_subcmd */ } __ec_align1; /*****************************************************************************/ /* * Cut off battery power immediately or after the host has shut down. * * return EC_RES_INVALID_COMMAND if unsupported by a board/battery. * EC_RES_SUCCESS if the command was successful. * EC_RES_ERROR if the cut off command failed. */ #define EC_CMD_BATTERY_CUT_OFF 0x0099 #define EC_BATTERY_CUTOFF_FLAG_AT_SHUTDOWN BIT(0) struct ec_params_battery_cutoff { uint8_t flags; } __ec_align1; /*****************************************************************************/ /* USB port mux control. */ /* * Switch USB mux or return to automatic switching. */ #define EC_CMD_USB_MUX 0x009A struct ec_params_usb_mux { uint8_t mux; } __ec_align1; /*****************************************************************************/ /* LDOs / FETs control. */ enum ec_ldo_state { EC_LDO_STATE_OFF = 0, /* the LDO / FET is shut down */ EC_LDO_STATE_ON = 1, /* the LDO / FET is ON / providing power */ }; /* * Switch on/off a LDO. */ #define EC_CMD_LDO_SET 0x009B struct ec_params_ldo_set { uint8_t index; uint8_t state; } __ec_align1; /* * Get LDO state. */ #define EC_CMD_LDO_GET 0x009C struct ec_params_ldo_get { uint8_t index; } __ec_align1; struct ec_response_ldo_get { uint8_t state; } __ec_align1; /*****************************************************************************/ /* Power info. */ /* * Get power info. */ #define EC_CMD_POWER_INFO 0x009D struct ec_response_power_info { uint32_t usb_dev_type; uint16_t voltage_ac; uint16_t voltage_system; uint16_t current_system; uint16_t usb_current_limit; } __ec_align4; /*****************************************************************************/ /* I2C passthru command */ #define EC_CMD_I2C_PASSTHRU 0x009E /* Read data; if not present, message is a write */ #define EC_I2C_FLAG_READ BIT(15) /* Mask for address */ #define EC_I2C_ADDR_MASK 0x3ff #define EC_I2C_STATUS_NAK BIT(0) /* Transfer was not acknowledged */ #define EC_I2C_STATUS_TIMEOUT BIT(1) /* Timeout during transfer */ /* Any error */ #define EC_I2C_STATUS_ERROR (EC_I2C_STATUS_NAK | EC_I2C_STATUS_TIMEOUT) struct ec_params_i2c_passthru_msg { uint16_t addr_flags; /* I2C slave address (7 or 10 bits) and flags */ uint16_t len; /* Number of bytes to read or write */ } __ec_align2; struct ec_params_i2c_passthru { uint8_t port; /* I2C port number */ uint8_t num_msgs; /* Number of messages */ struct ec_params_i2c_passthru_msg msg[]; /* Data to write for all messages is concatenated here */ } __ec_align2; struct ec_response_i2c_passthru { uint8_t i2c_status; /* Status flags (EC_I2C_STATUS_...) */ uint8_t num_msgs; /* Number of messages processed */ uint8_t data[]; /* Data read by messages concatenated here */ } __ec_align1; /*****************************************************************************/ /* Power button hang detect */ #define EC_CMD_HANG_DETECT 0x009F /* Reasons to start hang detection timer */ /* Power button pressed */ #define EC_HANG_START_ON_POWER_PRESS BIT(0) /* Lid closed */ #define EC_HANG_START_ON_LID_CLOSE BIT(1) /* Lid opened */ #define EC_HANG_START_ON_LID_OPEN BIT(2) /* Start of AP S3->S0 transition (booting or resuming from suspend) */ #define EC_HANG_START_ON_RESUME BIT(3) /* Reasons to cancel hang detection */ /* Power button released */ #define EC_HANG_STOP_ON_POWER_RELEASE BIT(8) /* Any host command from AP received */ #define EC_HANG_STOP_ON_HOST_COMMAND BIT(9) /* Stop on end of AP S0->S3 transition (suspending or shutting down) */ #define EC_HANG_STOP_ON_SUSPEND BIT(10) /* * If this flag is set, all the other fields are ignored, and the hang detect * timer is started. This provides the AP a way to start the hang timer * without reconfiguring any of the other hang detect settings. Note that * you must previously have configured the timeouts. */ #define EC_HANG_START_NOW BIT(30) /* * If this flag is set, all the other fields are ignored (including * EC_HANG_START_NOW). This provides the AP a way to stop the hang timer * without reconfiguring any of the other hang detect settings. */ #define EC_HANG_STOP_NOW BIT(31) struct ec_params_hang_detect { /* Flags; see EC_HANG_* */ uint32_t flags; /* Timeout in msec before generating host event, if enabled */ uint16_t host_event_timeout_msec; /* Timeout in msec before generating warm reboot, if enabled */ uint16_t warm_reboot_timeout_msec; } __ec_align4; /*****************************************************************************/ /* Commands for battery charging */ /* * This is the single catch-all host command to exchange data regarding the * charge state machine (v2 and up). */ #define EC_CMD_CHARGE_STATE 0x00A0 /* Subcommands for this host command */ enum charge_state_command { CHARGE_STATE_CMD_GET_STATE, CHARGE_STATE_CMD_GET_PARAM, CHARGE_STATE_CMD_SET_PARAM, CHARGE_STATE_NUM_CMDS }; /* * Known param numbers are defined here. Ranges are reserved for board-specific * params, which are handled by the particular implementations. */ enum charge_state_params { CS_PARAM_CHG_VOLTAGE, /* charger voltage limit */ CS_PARAM_CHG_CURRENT, /* charger current limit */ CS_PARAM_CHG_INPUT_CURRENT, /* charger input current limit */ CS_PARAM_CHG_STATUS, /* charger-specific status */ CS_PARAM_CHG_OPTION, /* charger-specific options */ CS_PARAM_LIMIT_POWER, /* * Check if power is limited due to * low battery and / or a weak external * charger. READ ONLY. */ /* How many so far? */ CS_NUM_BASE_PARAMS, /* Range for CONFIG_CHARGER_PROFILE_OVERRIDE params */ CS_PARAM_CUSTOM_PROFILE_MIN = 0x10000, CS_PARAM_CUSTOM_PROFILE_MAX = 0x1ffff, /* Range for CONFIG_CHARGE_STATE_DEBUG params */ CS_PARAM_DEBUG_MIN = 0x20000, CS_PARAM_DEBUG_CTL_MODE = 0x20000, CS_PARAM_DEBUG_MANUAL_MODE, CS_PARAM_DEBUG_SEEMS_DEAD, CS_PARAM_DEBUG_SEEMS_DISCONNECTED, CS_PARAM_DEBUG_BATT_REMOVED, CS_PARAM_DEBUG_MANUAL_CURRENT, CS_PARAM_DEBUG_MANUAL_VOLTAGE, CS_PARAM_DEBUG_MAX = 0x2ffff, /* Other custom param ranges go here... */ }; struct ec_params_charge_state { uint8_t cmd; /* enum charge_state_command */ union { /* get_state has no args */ struct __ec_todo_unpacked { uint32_t param; /* enum charge_state_param */ } get_param; struct __ec_todo_unpacked { uint32_t param; /* param to set */ uint32_t value; /* value to set */ } set_param; }; } __ec_todo_packed; struct ec_response_charge_state { union { struct __ec_align4 { int ac; int chg_voltage; int chg_current; int chg_input_current; int batt_state_of_charge; } get_state; struct __ec_align4 { uint32_t value; } get_param; /* set_param returns no args */ }; } __ec_align4; /* * Set maximum battery charging current. */ #define EC_CMD_CHARGE_CURRENT_LIMIT 0x00A1 struct ec_params_current_limit { uint32_t limit; /* in mA */ } __ec_align4; /* * Set maximum external voltage / current. */ #define EC_CMD_EXTERNAL_POWER_LIMIT 0x00A2 /* Command v0 is used only on Spring and is obsolete + unsupported */ struct ec_params_external_power_limit_v1 { uint16_t current_lim; /* in mA, or EC_POWER_LIMIT_NONE to clear limit */ uint16_t voltage_lim; /* in mV, or EC_POWER_LIMIT_NONE to clear limit */ } __ec_align2; #define EC_POWER_LIMIT_NONE 0xffff /* * Set maximum voltage & current of a dedicated charge port */ #define EC_CMD_OVERRIDE_DEDICATED_CHARGER_LIMIT 0x00A3 struct ec_params_dedicated_charger_limit { uint16_t current_lim; /* in mA */ uint16_t voltage_lim; /* in mV */ } __ec_align2; /*****************************************************************************/ /* Hibernate/Deep Sleep Commands */ /* Set the delay before going into hibernation. */ #define EC_CMD_HIBERNATION_DELAY 0x00A8 struct ec_params_hibernation_delay { /* * Seconds to wait in G3 before hibernate. Pass in 0 to read the * current settings without changing them. */ uint32_t seconds; } __ec_align4; struct ec_response_hibernation_delay { /* * The current time in seconds in which the system has been in the G3 * state. This value is reset if the EC transitions out of G3. */ uint32_t time_g3; /* * The current time remaining in seconds until the EC should hibernate. * This value is also reset if the EC transitions out of G3. */ uint32_t time_remaining; /* * The current time in seconds that the EC should wait in G3 before * hibernating. */ uint32_t hibernate_delay; } __ec_align4; /* Inform the EC when entering a sleep state */ #define EC_CMD_HOST_SLEEP_EVENT 0x00A9 enum host_sleep_event { HOST_SLEEP_EVENT_S3_SUSPEND = 1, HOST_SLEEP_EVENT_S3_RESUME = 2, HOST_SLEEP_EVENT_S0IX_SUSPEND = 3, HOST_SLEEP_EVENT_S0IX_RESUME = 4, /* S3 suspend with additional enabled wake sources */ HOST_SLEEP_EVENT_S3_WAKEABLE_SUSPEND = 5, }; struct ec_params_host_sleep_event { uint8_t sleep_event; } __ec_align1; /* * Use a default timeout value (CONFIG_SLEEP_TIMEOUT_MS) for detecting sleep * transition failures */ #define EC_HOST_SLEEP_TIMEOUT_DEFAULT 0 /* Disable timeout detection for this sleep transition */ #define EC_HOST_SLEEP_TIMEOUT_INFINITE 0xFFFF struct ec_params_host_sleep_event_v1 { /* The type of sleep being entered or exited. */ uint8_t sleep_event; /* Padding */ uint8_t reserved; union { /* Parameters that apply for suspend messages. */ struct { /* * The timeout in milliseconds between when this message * is received and when the EC will declare sleep * transition failure if the sleep signal is not * asserted. */ uint16_t sleep_timeout_ms; } suspend_params; /* No parameters for non-suspend messages. */ }; } __ec_align2; /* A timeout occurred when this bit is set */ #define EC_HOST_RESUME_SLEEP_TIMEOUT 0x80000000 /* * The mask defining which bits correspond to the number of sleep transitions, * as well as the maximum number of suspend line transitions that will be * reported back to the host. */ #define EC_HOST_RESUME_SLEEP_TRANSITIONS_MASK 0x7FFFFFFF struct ec_response_host_sleep_event_v1 { union { /* Response fields that apply for resume messages. */ struct { /* * The number of sleep power signal transitions that * occurred since the suspend message. The high bit * indicates a timeout occurred. */ uint32_t sleep_transitions; } resume_response; /* No response fields for non-resume messages. */ }; } __ec_align4; /*****************************************************************************/ /* Device events */ #define EC_CMD_DEVICE_EVENT 0x00AA enum ec_device_event { EC_DEVICE_EVENT_TRACKPAD, EC_DEVICE_EVENT_DSP, EC_DEVICE_EVENT_WIFI, }; enum ec_device_event_param { /* Get and clear pending device events */ EC_DEVICE_EVENT_PARAM_GET_CURRENT_EVENTS, /* Get device event mask */ EC_DEVICE_EVENT_PARAM_GET_ENABLED_EVENTS, /* Set device event mask */ EC_DEVICE_EVENT_PARAM_SET_ENABLED_EVENTS, }; #define EC_DEVICE_EVENT_MASK(event_code) BIT(event_code % 32) struct ec_params_device_event { uint32_t event_mask; uint8_t param; } __ec_align_size1; struct ec_response_device_event { uint32_t event_mask; } __ec_align4; /*****************************************************************************/ /* Smart battery pass-through */ /* Get / Set 16-bit smart battery registers */ #define EC_CMD_SB_READ_WORD 0x00B0 #define EC_CMD_SB_WRITE_WORD 0x00B1 /* Get / Set string smart battery parameters * formatted as SMBUS "block". */ #define EC_CMD_SB_READ_BLOCK 0x00B2 #define EC_CMD_SB_WRITE_BLOCK 0x00B3 struct ec_params_sb_rd { uint8_t reg; } __ec_align1; struct ec_response_sb_rd_word { uint16_t value; } __ec_align2; struct ec_params_sb_wr_word { uint8_t reg; uint16_t value; } __ec_align1; struct ec_response_sb_rd_block { uint8_t data[32]; } __ec_align1; struct ec_params_sb_wr_block { uint8_t reg; uint16_t data[32]; } __ec_align1; /*****************************************************************************/ /* Battery vendor parameters * * Get or set vendor-specific parameters in the battery. Implementations may * differ between boards or batteries. On a set operation, the response * contains the actual value set, which may be rounded or clipped from the * requested value. */ #define EC_CMD_BATTERY_VENDOR_PARAM 0x00B4 enum ec_battery_vendor_param_mode { BATTERY_VENDOR_PARAM_MODE_GET = 0, BATTERY_VENDOR_PARAM_MODE_SET, }; struct ec_params_battery_vendor_param { uint32_t param; uint32_t value; uint8_t mode; } __ec_align_size1; struct ec_response_battery_vendor_param { uint32_t value; } __ec_align4; /*****************************************************************************/ /* * Smart Battery Firmware Update Commands */ #define EC_CMD_SB_FW_UPDATE 0x00B5 enum ec_sb_fw_update_subcmd { EC_SB_FW_UPDATE_PREPARE = 0x0, EC_SB_FW_UPDATE_INFO = 0x1, /*query sb info */ EC_SB_FW_UPDATE_BEGIN = 0x2, /*check if protected */ EC_SB_FW_UPDATE_WRITE = 0x3, /*check if protected */ EC_SB_FW_UPDATE_END = 0x4, EC_SB_FW_UPDATE_STATUS = 0x5, EC_SB_FW_UPDATE_PROTECT = 0x6, EC_SB_FW_UPDATE_MAX = 0x7, }; #define SB_FW_UPDATE_CMD_WRITE_BLOCK_SIZE 32 #define SB_FW_UPDATE_CMD_STATUS_SIZE 2 #define SB_FW_UPDATE_CMD_INFO_SIZE 8 struct ec_sb_fw_update_header { uint16_t subcmd; /* enum ec_sb_fw_update_subcmd */ uint16_t fw_id; /* firmware id */ } __ec_align4; struct ec_params_sb_fw_update { struct ec_sb_fw_update_header hdr; union { /* EC_SB_FW_UPDATE_PREPARE = 0x0 */ /* EC_SB_FW_UPDATE_INFO = 0x1 */ /* EC_SB_FW_UPDATE_BEGIN = 0x2 */ /* EC_SB_FW_UPDATE_END = 0x4 */ /* EC_SB_FW_UPDATE_STATUS = 0x5 */ /* EC_SB_FW_UPDATE_PROTECT = 0x6 */ /* Those have no args */ /* EC_SB_FW_UPDATE_WRITE = 0x3 */ struct __ec_align4 { uint8_t data[SB_FW_UPDATE_CMD_WRITE_BLOCK_SIZE]; } write; }; } __ec_align4; struct ec_response_sb_fw_update { union { /* EC_SB_FW_UPDATE_INFO = 0x1 */ struct __ec_align1 { uint8_t data[SB_FW_UPDATE_CMD_INFO_SIZE]; } info; /* EC_SB_FW_UPDATE_STATUS = 0x5 */ struct __ec_align1 { uint8_t data[SB_FW_UPDATE_CMD_STATUS_SIZE]; } status; }; } __ec_align1; /* * Entering Verified Boot Mode Command * Default mode is VBOOT_MODE_NORMAL if EC did not receive this command. * Valid Modes are: normal, developer, and recovery. */ #define EC_CMD_ENTERING_MODE 0x00B6 struct ec_params_entering_mode { int vboot_mode; } __ec_align4; #define VBOOT_MODE_NORMAL 0 #define VBOOT_MODE_DEVELOPER 1 #define VBOOT_MODE_RECOVERY 2 /*****************************************************************************/ /* * I2C passthru protection command: Protects I2C tunnels against access on * certain addresses (board-specific). */ #define EC_CMD_I2C_PASSTHRU_PROTECT 0x00B7 enum ec_i2c_passthru_protect_subcmd { EC_CMD_I2C_PASSTHRU_PROTECT_STATUS = 0x0, EC_CMD_I2C_PASSTHRU_PROTECT_ENABLE = 0x1, }; struct ec_params_i2c_passthru_protect { uint8_t subcmd; uint8_t port; /* I2C port number */ } __ec_align1; struct ec_response_i2c_passthru_protect { uint8_t status; /* Status flags (0: unlocked, 1: locked) */ } __ec_align1; /*****************************************************************************/ /* * HDMI CEC commands * * These commands are for sending and receiving message via HDMI CEC */ #define MAX_CEC_MSG_LEN 16 /* CEC message from the AP to be written on the CEC bus */ #define EC_CMD_CEC_WRITE_MSG 0x00B8 /** * struct ec_params_cec_write - Message to write to the CEC bus * @msg: message content to write to the CEC bus */ struct ec_params_cec_write { uint8_t msg[MAX_CEC_MSG_LEN]; } __ec_align1; /* Set various CEC parameters */ #define EC_CMD_CEC_SET 0x00BA /** * struct ec_params_cec_set - CEC parameters set * @cmd: parameter type, can be CEC_CMD_ENABLE or CEC_CMD_LOGICAL_ADDRESS * @val: in case cmd is CEC_CMD_ENABLE, this field can be 0 to disable CEC * or 1 to enable CEC functionality, in case cmd is * CEC_CMD_LOGICAL_ADDRESS, this field encodes the requested logical * address between 0 and 15 or 0xff to unregister */ struct ec_params_cec_set { uint8_t cmd; /* enum cec_command */ uint8_t val; } __ec_align1; /* Read various CEC parameters */ #define EC_CMD_CEC_GET 0x00BB /** * struct ec_params_cec_get - CEC parameters get * @cmd: parameter type, can be CEC_CMD_ENABLE or CEC_CMD_LOGICAL_ADDRESS */ struct ec_params_cec_get { uint8_t cmd; /* enum cec_command */ } __ec_align1; /** * struct ec_response_cec_get - CEC parameters get response * @val: in case cmd was CEC_CMD_ENABLE, this field will 0 if CEC is * disabled or 1 if CEC functionality is enabled, * in case cmd was CEC_CMD_LOGICAL_ADDRESS, this will encode the * configured logical address between 0 and 15 or 0xff if unregistered */ struct ec_response_cec_get { uint8_t val; } __ec_align1; /* CEC parameters command */ enum cec_command { /* CEC reading, writing and events enable */ CEC_CMD_ENABLE, /* CEC logical address */ CEC_CMD_LOGICAL_ADDRESS, }; /* Events from CEC to AP */ enum mkbp_cec_event { /* Outgoing message was acknowledged by a follower */ EC_MKBP_CEC_SEND_OK = BIT(0), /* Outgoing message was not acknowledged */ EC_MKBP_CEC_SEND_FAILED = BIT(1), }; /*****************************************************************************/ /* Commands for I2S recording on audio codec. */ #define EC_CMD_CODEC_I2S 0x00BC #define EC_WOV_I2S_SAMPLE_RATE 48000 enum ec_codec_i2s_subcmd { EC_CODEC_SET_SAMPLE_DEPTH = 0x0, EC_CODEC_SET_GAIN = 0x1, EC_CODEC_GET_GAIN = 0x2, EC_CODEC_I2S_ENABLE = 0x3, EC_CODEC_I2S_SET_CONFIG = 0x4, EC_CODEC_I2S_SET_TDM_CONFIG = 0x5, EC_CODEC_I2S_SET_BCLK = 0x6, EC_CODEC_I2S_SUBCMD_COUNT = 0x7, }; enum ec_sample_depth_value { EC_CODEC_SAMPLE_DEPTH_16 = 0, EC_CODEC_SAMPLE_DEPTH_24 = 1, }; enum ec_i2s_config { EC_DAI_FMT_I2S = 0, EC_DAI_FMT_RIGHT_J = 1, EC_DAI_FMT_LEFT_J = 2, EC_DAI_FMT_PCM_A = 3, EC_DAI_FMT_PCM_B = 4, EC_DAI_FMT_PCM_TDM = 5, }; /* * For subcommand EC_CODEC_GET_GAIN. */ struct __ec_align1 ec_codec_i2s_gain { uint8_t left; uint8_t right; }; struct __ec_todo_unpacked ec_param_codec_i2s_tdm { int16_t ch0_delay; /* 0 to 496 */ int16_t ch1_delay; /* -1 to 496 */ uint8_t adjacent_to_ch0; uint8_t adjacent_to_ch1; }; struct __ec_todo_packed ec_param_codec_i2s { /* enum ec_codec_i2s_subcmd */ uint8_t cmd; union { /* * EC_CODEC_SET_SAMPLE_DEPTH * Value should be one of ec_sample_depth_value. */ uint8_t depth; /* * EC_CODEC_SET_GAIN * Value should be 0~43 for both channels. */ struct ec_codec_i2s_gain gain; /* * EC_CODEC_I2S_ENABLE * 1 to enable, 0 to disable. */ uint8_t i2s_enable; /* * EC_CODEC_I2S_SET_CONFIG * Value should be one of ec_i2s_config. */ uint8_t i2s_config; /* * EC_CODEC_I2S_SET_TDM_CONFIG * Value should be one of ec_i2s_config. */ struct ec_param_codec_i2s_tdm tdm_param; /* * EC_CODEC_I2S_SET_BCLK */ uint32_t bclk; }; }; /*****************************************************************************/ /* System commands */ /* * TODO(crosbug.com/p/23747): This is a confusing name, since it doesn't * necessarily reboot the EC. Rename to "image" or something similar? */ #define EC_CMD_REBOOT_EC 0x00D2 /* Command */ enum ec_reboot_cmd { EC_REBOOT_CANCEL = 0, /* Cancel a pending reboot */ EC_REBOOT_JUMP_RO = 1, /* Jump to RO without rebooting */ EC_REBOOT_JUMP_RW = 2, /* Jump to active RW without rebooting */ /* (command 3 was jump to RW-B) */ EC_REBOOT_COLD = 4, /* Cold-reboot */ EC_REBOOT_DISABLE_JUMP = 5, /* Disable jump until next reboot */ EC_REBOOT_HIBERNATE = 6, /* Hibernate EC */ EC_REBOOT_HIBERNATE_CLEAR_AP_OFF = 7, /* and clears AP_OFF flag */ }; /* Flags for ec_params_reboot_ec.reboot_flags */ #define EC_REBOOT_FLAG_RESERVED0 BIT(0) /* Was recovery request */ #define EC_REBOOT_FLAG_ON_AP_SHUTDOWN BIT(1) /* Reboot after AP shutdown */ #define EC_REBOOT_FLAG_SWITCH_RW_SLOT BIT(2) /* Switch RW slot */ struct ec_params_reboot_ec { uint8_t cmd; /* enum ec_reboot_cmd */ uint8_t flags; /* See EC_REBOOT_FLAG_* */ } __ec_align1; /* * Get information on last EC panic. * * Returns variable-length platform-dependent panic information. See panic.h * for details. */ #define EC_CMD_GET_PANIC_INFO 0x00D3 /*****************************************************************************/ /* * Special commands * * These do not follow the normal rules for commands. See each command for * details. */ /* * Reboot NOW * * This command will work even when the EC LPC interface is busy, because the * reboot command is processed at interrupt level. Note that when the EC * reboots, the host will reboot too, so there is no response to this command. * * Use EC_CMD_REBOOT_EC to reboot the EC more politely. */ #define EC_CMD_REBOOT 0x00D1 /* Think "die" */ /* * Resend last response (not supported on LPC). * * Returns EC_RES_UNAVAILABLE if there is no response available - for example, * there was no previous command, or the previous command's response was too * big to save. */ #define EC_CMD_RESEND_RESPONSE 0x00DB /* * This header byte on a command indicate version 0. Any header byte less * than this means that we are talking to an old EC which doesn't support * versioning. In that case, we assume version 0. * * Header bytes greater than this indicate a later version. For example, * EC_CMD_VERSION0 + 1 means we are using version 1. * * The old EC interface must not use commands 0xdc or higher. */ #define EC_CMD_VERSION0 0x00DC /*****************************************************************************/ /* * PD commands * * These commands are for PD MCU communication. */ /* EC to PD MCU exchange status command */ #define EC_CMD_PD_EXCHANGE_STATUS 0x0100 #define EC_VER_PD_EXCHANGE_STATUS 2 enum pd_charge_state { PD_CHARGE_NO_CHANGE = 0, /* Don't change charge state */ PD_CHARGE_NONE, /* No charging allowed */ PD_CHARGE_5V, /* 5V charging only */ PD_CHARGE_MAX /* Charge at max voltage */ }; /* Status of EC being sent to PD */ #define EC_STATUS_HIBERNATING BIT(0) struct ec_params_pd_status { uint8_t status; /* EC status */ int8_t batt_soc; /* battery state of charge */ uint8_t charge_state; /* charging state (from enum pd_charge_state) */ } __ec_align1; /* Status of PD being sent back to EC */ #define PD_STATUS_HOST_EVENT BIT(0) /* Forward host event to AP */ #define PD_STATUS_IN_RW BIT(1) /* Running RW image */ #define PD_STATUS_JUMPED_TO_IMAGE BIT(2) /* Current image was jumped to */ #define PD_STATUS_TCPC_ALERT_0 BIT(3) /* Alert active in port 0 TCPC */ #define PD_STATUS_TCPC_ALERT_1 BIT(4) /* Alert active in port 1 TCPC */ #define PD_STATUS_TCPC_ALERT_2 BIT(5) /* Alert active in port 2 TCPC */ #define PD_STATUS_TCPC_ALERT_3 BIT(6) /* Alert active in port 3 TCPC */ #define PD_STATUS_EC_INT_ACTIVE (PD_STATUS_TCPC_ALERT_0 | \ PD_STATUS_TCPC_ALERT_1 | \ PD_STATUS_HOST_EVENT) struct ec_response_pd_status { uint32_t curr_lim_ma; /* input current limit */ uint16_t status; /* PD MCU status */ int8_t active_charge_port; /* active charging port */ } __ec_align_size1; /* AP to PD MCU host event status command, cleared on read */ #define EC_CMD_PD_HOST_EVENT_STATUS 0x0104 /* PD MCU host event status bits */ #define PD_EVENT_UPDATE_DEVICE BIT(0) #define PD_EVENT_POWER_CHANGE BIT(1) #define PD_EVENT_IDENTITY_RECEIVED BIT(2) #define PD_EVENT_DATA_SWAP BIT(3) struct ec_response_host_event_status { uint32_t status; /* PD MCU host event status */ } __ec_align4; /* Set USB type-C port role and muxes */ #define EC_CMD_USB_PD_CONTROL 0x0101 enum usb_pd_control_role { USB_PD_CTRL_ROLE_NO_CHANGE = 0, USB_PD_CTRL_ROLE_TOGGLE_ON = 1, /* == AUTO */ USB_PD_CTRL_ROLE_TOGGLE_OFF = 2, USB_PD_CTRL_ROLE_FORCE_SINK = 3, USB_PD_CTRL_ROLE_FORCE_SOURCE = 4, USB_PD_CTRL_ROLE_FREEZE = 5, USB_PD_CTRL_ROLE_COUNT }; enum usb_pd_control_mux { USB_PD_CTRL_MUX_NO_CHANGE = 0, USB_PD_CTRL_MUX_NONE = 1, USB_PD_CTRL_MUX_USB = 2, USB_PD_CTRL_MUX_DP = 3, USB_PD_CTRL_MUX_DOCK = 4, USB_PD_CTRL_MUX_AUTO = 5, USB_PD_CTRL_MUX_COUNT }; enum usb_pd_control_swap { USB_PD_CTRL_SWAP_NONE = 0, USB_PD_CTRL_SWAP_DATA = 1, USB_PD_CTRL_SWAP_POWER = 2, USB_PD_CTRL_SWAP_VCONN = 3, USB_PD_CTRL_SWAP_COUNT }; struct ec_params_usb_pd_control { uint8_t port; uint8_t role; uint8_t mux; uint8_t swap; } __ec_align1; #define PD_CTRL_RESP_ENABLED_COMMS BIT(0) /* Communication enabled */ #define PD_CTRL_RESP_ENABLED_CONNECTED BIT(1) /* Device connected */ #define PD_CTRL_RESP_ENABLED_PD_CAPABLE BIT(2) /* Partner is PD capable */ #define PD_CTRL_RESP_ROLE_POWER BIT(0) /* 0=SNK/1=SRC */ #define PD_CTRL_RESP_ROLE_DATA BIT(1) /* 0=UFP/1=DFP */ #define PD_CTRL_RESP_ROLE_VCONN BIT(2) /* Vconn status */ #define PD_CTRL_RESP_ROLE_DR_POWER BIT(3) /* Partner is dualrole power */ #define PD_CTRL_RESP_ROLE_DR_DATA BIT(4) /* Partner is dualrole data */ #define PD_CTRL_RESP_ROLE_USB_COMM BIT(5) /* Partner USB comm capable */ #define PD_CTRL_RESP_ROLE_EXT_POWERED BIT(6) /* Partner externally powerd */ struct ec_response_usb_pd_control { uint8_t enabled; uint8_t role; uint8_t polarity; uint8_t state; } __ec_align1; struct ec_response_usb_pd_control_v1 { uint8_t enabled; uint8_t role; uint8_t polarity; char state[32]; } __ec_align1; /* Values representing usbc PD CC state */ #define USBC_PD_CC_NONE 0 /* No accessory connected */ #define USBC_PD_CC_NO_UFP 1 /* No UFP accessory connected */ #define USBC_PD_CC_AUDIO_ACC 2 /* Audio accessory connected */ #define USBC_PD_CC_DEBUG_ACC 3 /* Debug accessory connected */ #define USBC_PD_CC_UFP_ATTACHED 4 /* UFP attached to usbc */ #define USBC_PD_CC_DFP_ATTACHED 5 /* DPF attached to usbc */ struct ec_response_usb_pd_control_v2 { uint8_t enabled; uint8_t role; uint8_t polarity; char state[32]; uint8_t cc_state; /* USBC_PD_CC_*Encoded cc state */ uint8_t dp_mode; /* Current DP pin mode (MODE_DP_PIN_[A-E]) */ /* CL:1500994 Current cable type */ uint8_t reserved_cable_type; } __ec_align1; #define EC_CMD_USB_PD_PORTS 0x0102 /* Maximum number of PD ports on a device, num_ports will be <= this */ #define EC_USB_PD_MAX_PORTS 8 struct ec_response_usb_pd_ports { uint8_t num_ports; } __ec_align1; #define EC_CMD_USB_PD_POWER_INFO 0x0103 #define PD_POWER_CHARGING_PORT 0xff struct ec_params_usb_pd_power_info { uint8_t port; } __ec_align1; enum usb_chg_type { USB_CHG_TYPE_NONE, USB_CHG_TYPE_PD, USB_CHG_TYPE_C, USB_CHG_TYPE_PROPRIETARY, USB_CHG_TYPE_BC12_DCP, USB_CHG_TYPE_BC12_CDP, USB_CHG_TYPE_BC12_SDP, USB_CHG_TYPE_OTHER, USB_CHG_TYPE_VBUS, USB_CHG_TYPE_UNKNOWN, USB_CHG_TYPE_DEDICATED, }; enum usb_power_roles { USB_PD_PORT_POWER_DISCONNECTED, USB_PD_PORT_POWER_SOURCE, USB_PD_PORT_POWER_SINK, USB_PD_PORT_POWER_SINK_NOT_CHARGING, }; struct usb_chg_measures { uint16_t voltage_max; uint16_t voltage_now; uint16_t current_max; uint16_t current_lim; } __ec_align2; struct ec_response_usb_pd_power_info { uint8_t role; uint8_t type; uint8_t dualrole; uint8_t reserved1; struct usb_chg_measures meas; uint32_t max_power; } __ec_align4; /* * This command will return the number of USB PD charge port + the number * of dedicated port present. * EC_CMD_USB_PD_PORTS does NOT include the dedicated ports */ #define EC_CMD_CHARGE_PORT_COUNT 0x0105 struct ec_response_charge_port_count { uint8_t port_count; } __ec_align1; /* Write USB-PD device FW */ #define EC_CMD_USB_PD_FW_UPDATE 0x0110 enum usb_pd_fw_update_cmds { USB_PD_FW_REBOOT, USB_PD_FW_FLASH_ERASE, USB_PD_FW_FLASH_WRITE, USB_PD_FW_ERASE_SIG, }; struct ec_params_usb_pd_fw_update { uint16_t dev_id; uint8_t cmd; uint8_t port; uint32_t size; /* Size to write in bytes */ /* Followed by data to write */ } __ec_align4; /* Write USB-PD Accessory RW_HASH table entry */ #define EC_CMD_USB_PD_RW_HASH_ENTRY 0x0111 /* RW hash is first 20 bytes of SHA-256 of RW section */ #define PD_RW_HASH_SIZE 20 struct ec_params_usb_pd_rw_hash_entry { uint16_t dev_id; uint8_t dev_rw_hash[PD_RW_HASH_SIZE]; uint8_t reserved; /* * For alignment of current_image * TODO(rspangler) but it's not aligned! * Should have been reserved[2]. */ uint32_t current_image; /* One of ec_current_image */ } __ec_align1; /* Read USB-PD Accessory info */ #define EC_CMD_USB_PD_DEV_INFO 0x0112 struct ec_params_usb_pd_info_request { uint8_t port; } __ec_align1; /* Read USB-PD Device discovery info */ #define EC_CMD_USB_PD_DISCOVERY 0x0113 struct ec_params_usb_pd_discovery_entry { uint16_t vid; /* USB-IF VID */ uint16_t pid; /* USB-IF PID */ uint8_t ptype; /* product type (hub,periph,cable,ama) */ } __ec_align_size1; /* Override default charge behavior */ #define EC_CMD_PD_CHARGE_PORT_OVERRIDE 0x0114 /* Negative port parameters have special meaning */ enum usb_pd_override_ports { OVERRIDE_DONT_CHARGE = -2, OVERRIDE_OFF = -1, /* [0, CONFIG_USB_PD_PORT_COUNT): Port# */ }; struct ec_params_charge_port_override { int16_t override_port; /* Override port# */ } __ec_align2; /* * Read (and delete) one entry of PD event log. * TODO(crbug.com/751742): Make this host command more generic to accommodate * future non-PD logs that use the same internal EC event_log. */ #define EC_CMD_PD_GET_LOG_ENTRY 0x0115 struct ec_response_pd_log { uint32_t timestamp; /* relative timestamp in milliseconds */ uint8_t type; /* event type : see PD_EVENT_xx below */ uint8_t size_port; /* [7:5] port number [4:0] payload size in bytes */ uint16_t data; /* type-defined data payload */ uint8_t payload[0]; /* optional additional data payload: 0..16 bytes */ } __ec_align4; /* The timestamp is the microsecond counter shifted to get about a ms. */ #define PD_LOG_TIMESTAMP_SHIFT 10 /* 1 LSB = 1024us */ #define PD_LOG_SIZE_MASK 0x1f #define PD_LOG_PORT_MASK 0xe0 #define PD_LOG_PORT_SHIFT 5 #define PD_LOG_PORT_SIZE(port, size) (((port) << PD_LOG_PORT_SHIFT) | \ ((size) & PD_LOG_SIZE_MASK)) #define PD_LOG_PORT(size_port) ((size_port) >> PD_LOG_PORT_SHIFT) #define PD_LOG_SIZE(size_port) ((size_port) & PD_LOG_SIZE_MASK) /* PD event log : entry types */ /* PD MCU events */ #define PD_EVENT_MCU_BASE 0x00 #define PD_EVENT_MCU_CHARGE (PD_EVENT_MCU_BASE+0) #define PD_EVENT_MCU_CONNECT (PD_EVENT_MCU_BASE+1) /* Reserved for custom board event */ #define PD_EVENT_MCU_BOARD_CUSTOM (PD_EVENT_MCU_BASE+2) /* PD generic accessory events */ #define PD_EVENT_ACC_BASE 0x20 #define PD_EVENT_ACC_RW_FAIL (PD_EVENT_ACC_BASE+0) #define PD_EVENT_ACC_RW_ERASE (PD_EVENT_ACC_BASE+1) /* PD power supply events */ #define PD_EVENT_PS_BASE 0x40 #define PD_EVENT_PS_FAULT (PD_EVENT_PS_BASE+0) /* PD video dongles events */ #define PD_EVENT_VIDEO_BASE 0x60 #define PD_EVENT_VIDEO_DP_MODE (PD_EVENT_VIDEO_BASE+0) #define PD_EVENT_VIDEO_CODEC (PD_EVENT_VIDEO_BASE+1) /* Returned in the "type" field, when there is no entry available */ #define PD_EVENT_NO_ENTRY 0xff /* * PD_EVENT_MCU_CHARGE event definition : * the payload is "struct usb_chg_measures" * the data field contains the port state flags as defined below : */ /* Port partner is a dual role device */ #define CHARGE_FLAGS_DUAL_ROLE BIT(15) /* Port is the pending override port */ #define CHARGE_FLAGS_DELAYED_OVERRIDE BIT(14) /* Port is the override port */ #define CHARGE_FLAGS_OVERRIDE BIT(13) /* Charger type */ #define CHARGE_FLAGS_TYPE_SHIFT 3 #define CHARGE_FLAGS_TYPE_MASK (0xf << CHARGE_FLAGS_TYPE_SHIFT) /* Power delivery role */ #define CHARGE_FLAGS_ROLE_MASK (7 << 0) /* * PD_EVENT_PS_FAULT data field flags definition : */ #define PS_FAULT_OCP 1 #define PS_FAULT_FAST_OCP 2 #define PS_FAULT_OVP 3 #define PS_FAULT_DISCH 4 /* * PD_EVENT_VIDEO_CODEC payload is "struct mcdp_info". */ struct mcdp_version { uint8_t major; uint8_t minor; uint16_t build; } __ec_align4; struct mcdp_info { uint8_t family[2]; uint8_t chipid[2]; struct mcdp_version irom; struct mcdp_version fw; } __ec_align4; /* struct mcdp_info field decoding */ #define MCDP_CHIPID(chipid) ((chipid[0] << 8) | chipid[1]) #define MCDP_FAMILY(family) ((family[0] << 8) | family[1]) /* Get/Set USB-PD Alternate mode info */ #define EC_CMD_USB_PD_GET_AMODE 0x0116 struct ec_params_usb_pd_get_mode_request { uint16_t svid_idx; /* SVID index to get */ uint8_t port; /* port */ } __ec_align_size1; struct ec_params_usb_pd_get_mode_response { uint16_t svid; /* SVID */ uint16_t opos; /* Object Position */ uint32_t vdo[6]; /* Mode VDOs */ } __ec_align4; #define EC_CMD_USB_PD_SET_AMODE 0x0117 enum pd_mode_cmd { PD_EXIT_MODE = 0, PD_ENTER_MODE = 1, /* Not a command. Do NOT remove. */ PD_MODE_CMD_COUNT, }; struct ec_params_usb_pd_set_mode_request { uint32_t cmd; /* enum pd_mode_cmd */ uint16_t svid; /* SVID to set */ uint8_t opos; /* Object Position */ uint8_t port; /* port */ } __ec_align4; /* Ask the PD MCU to record a log of a requested type */ #define EC_CMD_PD_WRITE_LOG_ENTRY 0x0118 struct ec_params_pd_write_log_entry { uint8_t type; /* event type : see PD_EVENT_xx above */ uint8_t port; /* port#, or 0 for events unrelated to a given port */ } __ec_align1; /* Control USB-PD chip */ #define EC_CMD_PD_CONTROL 0x0119 enum ec_pd_control_cmd { PD_SUSPEND = 0, /* Suspend the PD chip (EC: stop talking to PD) */ PD_RESUME, /* Resume the PD chip (EC: start talking to PD) */ PD_RESET, /* Force reset the PD chip */ PD_CONTROL_DISABLE, /* Disable further calls to this command */ PD_CHIP_ON, /* Power on the PD chip */ }; struct ec_params_pd_control { uint8_t chip; /* chip id */ uint8_t subcmd; } __ec_align1; /* Get info about USB-C SS muxes */ #define EC_CMD_USB_PD_MUX_INFO 0x011A struct ec_params_usb_pd_mux_info { uint8_t port; /* USB-C port number */ } __ec_align1; /* Flags representing mux state */ #define USB_PD_MUX_USB_ENABLED BIT(0) /* USB connected */ #define USB_PD_MUX_DP_ENABLED BIT(1) /* DP connected */ #define USB_PD_MUX_POLARITY_INVERTED BIT(2) /* CC line Polarity inverted */ #define USB_PD_MUX_HPD_IRQ BIT(3) /* HPD IRQ is asserted */ #define USB_PD_MUX_HPD_LVL BIT(4) /* HPD level is asserted */ struct ec_response_usb_pd_mux_info { uint8_t flags; /* USB_PD_MUX_*-encoded USB mux state */ } __ec_align1; #define EC_CMD_PD_CHIP_INFO 0x011B struct ec_params_pd_chip_info { uint8_t port; /* USB-C port number */ uint8_t renew; /* Force renewal */ } __ec_align1; struct ec_response_pd_chip_info { uint16_t vendor_id; uint16_t product_id; uint16_t device_id; union { uint8_t fw_version_string[8]; uint64_t fw_version_number; }; } __ec_align2; struct ec_response_pd_chip_info_v1 { uint16_t vendor_id; uint16_t product_id; uint16_t device_id; union { uint8_t fw_version_string[8]; uint64_t fw_version_number; }; union { uint8_t min_req_fw_version_string[8]; uint64_t min_req_fw_version_number; }; } __ec_align2; /* Run RW signature verification and get status */ #define EC_CMD_RWSIG_CHECK_STATUS 0x011C struct ec_response_rwsig_check_status { uint32_t status; } __ec_align4; /* For controlling RWSIG task */ #define EC_CMD_RWSIG_ACTION 0x011D enum rwsig_action { RWSIG_ACTION_ABORT = 0, /* Abort RWSIG and prevent jumping */ RWSIG_ACTION_CONTINUE = 1, /* Jump to RW immediately */ }; struct ec_params_rwsig_action { uint32_t action; } __ec_align4; /* Run verification on a slot */ #define EC_CMD_EFS_VERIFY 0x011E struct ec_params_efs_verify { uint8_t region; /* enum ec_flash_region */ } __ec_align1; /* * Retrieve info from Cros Board Info store. Response is based on the data * type. Integers return a uint32. Strings return a string, using the response * size to determine how big it is. */ #define EC_CMD_GET_CROS_BOARD_INFO 0x011F /* * Write info into Cros Board Info on EEPROM. Write fails if the board has * hardware write-protect enabled. */ #define EC_CMD_SET_CROS_BOARD_INFO 0x0120 enum cbi_data_tag { CBI_TAG_BOARD_VERSION = 0, /* uint32_t or smaller */ CBI_TAG_OEM_ID = 1, /* uint32_t or smaller */ CBI_TAG_SKU_ID = 2, /* uint32_t or smaller */ CBI_TAG_DRAM_PART_NUM = 3, /* variable length ascii, nul terminated. */ CBI_TAG_OEM_NAME = 4, /* variable length ascii, nul terminated. */ CBI_TAG_MODEL_ID = 5, /* uint32_t or smaller */ CBI_TAG_COUNT, }; /* * Flags to control read operation * * RELOAD: Invalidate cache and read data from EEPROM. Useful to verify * write was successful without reboot. */ #define CBI_GET_RELOAD BIT(0) struct ec_params_get_cbi { uint32_t tag; /* enum cbi_data_tag */ uint32_t flag; /* CBI_GET_* */ } __ec_align4; /* * Flags to control write behavior. * * NO_SYNC: Makes EC update data in RAM but skip writing to EEPROM. It's * useful when writing multiple fields in a row. * INIT: Need to be set when creating a new CBI from scratch. All fields * will be initialized to zero first. */ #define CBI_SET_NO_SYNC BIT(0) #define CBI_SET_INIT BIT(1) struct ec_params_set_cbi { uint32_t tag; /* enum cbi_data_tag */ uint32_t flag; /* CBI_SET_* */ uint32_t size; /* Data size */ uint8_t data[]; /* For string and raw data */ } __ec_align1; /* * Information about resets of the AP by the EC and the EC's own uptime. */ #define EC_CMD_GET_UPTIME_INFO 0x0121 struct ec_response_uptime_info { /* * Number of milliseconds since the last EC boot. Sysjump resets * typically do not restart the EC's time_since_boot epoch. * * WARNING: The EC's sense of time is much less accurate than the AP's * sense of time, in both phase and frequency. This timebase is similar * to CLOCK_MONOTONIC_RAW, but with 1% or more frequency error. */ uint32_t time_since_ec_boot_ms; /* * Number of times the AP was reset by the EC since the last EC boot. * Note that the AP may be held in reset by the EC during the initial * boot sequence, such that the very first AP boot may count as more * than one here. */ uint32_t ap_resets_since_ec_boot; /* * The set of flags which describe the EC's most recent reset. See * include/system.h RESET_FLAG_* for details. */ uint32_t ec_reset_flags; /* Empty log entries have both the cause and timestamp set to zero. */ struct ap_reset_log_entry { /* * See include/chipset.h: enum chipset_{reset,shutdown}_reason * for details. */ uint16_t reset_cause; /* Reserved for protocol growth. */ uint16_t reserved; /* * The time of the reset's assertion, in milliseconds since the * last EC boot, in the same epoch as time_since_ec_boot_ms. * Set to zero if the log entry is empty. */ uint32_t reset_time_ms; } recent_ap_reset[4]; } __ec_align4; /* * Add entropy to the device secret (stored in the rollback region). * * Depending on the chip, the operation may take a long time (e.g. to erase * flash), so the commands are asynchronous. */ #define EC_CMD_ADD_ENTROPY 0x0122 enum add_entropy_action { /* Add entropy to the current secret. */ ADD_ENTROPY_ASYNC = 0, /* * Add entropy, and also make sure that the previous secret is erased. * (this can be implemented by adding entropy multiple times until * all rolback blocks have been overwritten). */ ADD_ENTROPY_RESET_ASYNC = 1, /* Read back result from the previous operation. */ ADD_ENTROPY_GET_RESULT = 2, }; struct ec_params_rollback_add_entropy { uint8_t action; } __ec_align1; /* * Perform a single read of a given ADC channel. */ #define EC_CMD_ADC_READ 0x0123 struct ec_params_adc_read { uint8_t adc_channel; } __ec_align1; struct ec_response_adc_read { int32_t adc_value; } __ec_align4; /* * Read back rollback info */ #define EC_CMD_ROLLBACK_INFO 0x0124 struct ec_response_rollback_info { int32_t id; /* Incrementing number to indicate which region to use. */ int32_t rollback_min_version; int32_t rw_rollback_version; } __ec_align4; /* Issue AP reset */ #define EC_CMD_AP_RESET 0x0125 /*****************************************************************************/ /* The command range 0x200-0x2FF is reserved for Rotor. */ /*****************************************************************************/ /* * Reserve a range of host commands for the CR51 firmware. */ #define EC_CMD_CR51_BASE 0x0300 #define EC_CMD_CR51_LAST 0x03FF /*****************************************************************************/ /* Fingerprint MCU commands: range 0x0400-0x040x */ /* Fingerprint SPI sensor passthru command: prototyping ONLY */ #define EC_CMD_FP_PASSTHRU 0x0400 #define EC_FP_FLAG_NOT_COMPLETE 0x1 struct ec_params_fp_passthru { uint16_t len; /* Number of bytes to write then read */ uint16_t flags; /* EC_FP_FLAG_xxx */ uint8_t data[]; /* Data to send */ } __ec_align2; /* Configure the Fingerprint MCU behavior */ #define EC_CMD_FP_MODE 0x0402 /* Put the sensor in its lowest power mode */ #define FP_MODE_DEEPSLEEP BIT(0) /* Wait to see a finger on the sensor */ #define FP_MODE_FINGER_DOWN BIT(1) /* Poll until the finger has left the sensor */ #define FP_MODE_FINGER_UP BIT(2) /* Capture the current finger image */ #define FP_MODE_CAPTURE BIT(3) /* Finger enrollment session on-going */ #define FP_MODE_ENROLL_SESSION BIT(4) /* Enroll the current finger image */ #define FP_MODE_ENROLL_IMAGE BIT(5) /* Try to match the current finger image */ #define FP_MODE_MATCH BIT(6) /* Reset and re-initialize the sensor. */ #define FP_MODE_RESET_SENSOR BIT(7) /* special value: don't change anything just read back current mode */ #define FP_MODE_DONT_CHANGE BIT(31) #define FP_VALID_MODES (FP_MODE_DEEPSLEEP | \ FP_MODE_FINGER_DOWN | \ FP_MODE_FINGER_UP | \ FP_MODE_CAPTURE | \ FP_MODE_ENROLL_SESSION | \ FP_MODE_ENROLL_IMAGE | \ FP_MODE_MATCH | \ FP_MODE_RESET_SENSOR | \ FP_MODE_DONT_CHANGE) /* Capture types defined in bits [30..28] */ #define FP_MODE_CAPTURE_TYPE_SHIFT 28 #define FP_MODE_CAPTURE_TYPE_MASK (0x7 << FP_MODE_CAPTURE_TYPE_SHIFT) /* * This enum must remain ordered, if you add new values you must ensure that * FP_CAPTURE_TYPE_MAX is still the last one. */ enum fp_capture_type { /* Full blown vendor-defined capture (produces 'frame_size' bytes) */ FP_CAPTURE_VENDOR_FORMAT = 0, /* Simple raw image capture (produces width x height x bpp bits) */ FP_CAPTURE_SIMPLE_IMAGE = 1, /* Self test pattern (e.g. checkerboard) */ FP_CAPTURE_PATTERN0 = 2, /* Self test pattern (e.g. inverted checkerboard) */ FP_CAPTURE_PATTERN1 = 3, /* Capture for Quality test with fixed contrast */ FP_CAPTURE_QUALITY_TEST = 4, /* Capture for pixel reset value test */ FP_CAPTURE_RESET_TEST = 5, FP_CAPTURE_TYPE_MAX, }; /* Extracts the capture type from the sensor 'mode' word */ #define FP_CAPTURE_TYPE(mode) (((mode) & FP_MODE_CAPTURE_TYPE_MASK) \ >> FP_MODE_CAPTURE_TYPE_SHIFT) struct ec_params_fp_mode { uint32_t mode; /* as defined by FP_MODE_ constants */ } __ec_align4; struct ec_response_fp_mode { uint32_t mode; /* as defined by FP_MODE_ constants */ } __ec_align4; /* Retrieve Fingerprint sensor information */ #define EC_CMD_FP_INFO 0x0403 /* Number of dead pixels detected on the last maintenance */ #define FP_ERROR_DEAD_PIXELS(errors) ((errors) & 0x3FF) /* Unknown number of dead pixels detected on the last maintenance */ #define FP_ERROR_DEAD_PIXELS_UNKNOWN (0x3FF) /* No interrupt from the sensor */ #define FP_ERROR_NO_IRQ BIT(12) /* SPI communication error */ #define FP_ERROR_SPI_COMM BIT(13) /* Invalid sensor Hardware ID */ #define FP_ERROR_BAD_HWID BIT(14) /* Sensor initialization failed */ #define FP_ERROR_INIT_FAIL BIT(15) struct ec_response_fp_info_v0 { /* Sensor identification */ uint32_t vendor_id; uint32_t product_id; uint32_t model_id; uint32_t version; /* Image frame characteristics */ uint32_t frame_size; uint32_t pixel_format; /* using V4L2_PIX_FMT_ */ uint16_t width; uint16_t height; uint16_t bpp; uint16_t errors; /* see FP_ERROR_ flags above */ } __ec_align4; struct ec_response_fp_info { /* Sensor identification */ uint32_t vendor_id; uint32_t product_id; uint32_t model_id; uint32_t version; /* Image frame characteristics */ uint32_t frame_size; uint32_t pixel_format; /* using V4L2_PIX_FMT_ */ uint16_t width; uint16_t height; uint16_t bpp; uint16_t errors; /* see FP_ERROR_ flags above */ /* Template/finger current information */ uint32_t template_size; /* max template size in bytes */ uint16_t template_max; /* maximum number of fingers/templates */ uint16_t template_valid; /* number of valid fingers/templates */ uint32_t template_dirty; /* bitmap of templates with MCU side changes */ uint32_t template_version; /* version of the template format */ } __ec_align4; /* Get the last captured finger frame or a template content */ #define EC_CMD_FP_FRAME 0x0404 /* constants defining the 'offset' field which also contains the frame index */ #define FP_FRAME_INDEX_SHIFT 28 /* Frame buffer where the captured image is stored */ #define FP_FRAME_INDEX_RAW_IMAGE 0 /* First frame buffer holding a template */ #define FP_FRAME_INDEX_TEMPLATE 1 #define FP_FRAME_GET_BUFFER_INDEX(offset) ((offset) >> FP_FRAME_INDEX_SHIFT) #define FP_FRAME_OFFSET_MASK 0x0FFFFFFF /* Version of the format of the encrypted templates. */ #define FP_TEMPLATE_FORMAT_VERSION 3 /* Constants for encryption parameters */ #define FP_CONTEXT_NONCE_BYTES 12 #define FP_CONTEXT_USERID_WORDS (32 / sizeof(uint32_t)) #define FP_CONTEXT_TAG_BYTES 16 #define FP_CONTEXT_SALT_BYTES 16 #define FP_CONTEXT_TPM_BYTES 32 struct ec_fp_template_encryption_metadata { /* * Version of the structure format (N=3). */ uint16_t struct_version; /* Reserved bytes, set to 0. */ uint16_t reserved; /* * The salt is *only* ever used for key derivation. The nonce is unique, * a different one is used for every message. */ uint8_t nonce[FP_CONTEXT_NONCE_BYTES]; uint8_t salt[FP_CONTEXT_SALT_BYTES]; uint8_t tag[FP_CONTEXT_TAG_BYTES]; }; struct ec_params_fp_frame { /* * The offset contains the template index or FP_FRAME_INDEX_RAW_IMAGE * in the high nibble, and the real offset within the frame in * FP_FRAME_OFFSET_MASK. */ uint32_t offset; uint32_t size; } __ec_align4; /* Load a template into the MCU */ #define EC_CMD_FP_TEMPLATE 0x0405 /* Flag in the 'size' field indicating that the full template has been sent */ #define FP_TEMPLATE_COMMIT 0x80000000 struct ec_params_fp_template { uint32_t offset; uint32_t size; uint8_t data[]; } __ec_align4; /* Clear the current fingerprint user context and set a new one */ #define EC_CMD_FP_CONTEXT 0x0406 struct ec_params_fp_context { uint32_t userid[FP_CONTEXT_USERID_WORDS]; } __ec_align4; #define EC_CMD_FP_STATS 0x0407 #define FPSTATS_CAPTURE_INV BIT(0) #define FPSTATS_MATCHING_INV BIT(1) struct ec_response_fp_stats { uint32_t capture_time_us; uint32_t matching_time_us; uint32_t overall_time_us; struct { uint32_t lo; uint32_t hi; } overall_t0; uint8_t timestamps_invalid; int8_t template_matched; } __ec_align2; #define EC_CMD_FP_SEED 0x0408 struct ec_params_fp_seed { /* * Version of the structure format (N=3). */ uint16_t struct_version; /* Reserved bytes, set to 0. */ uint16_t reserved; /* Seed from the TPM. */ uint8_t seed[FP_CONTEXT_TPM_BYTES]; } __ec_align4; #define EC_CMD_FP_ENC_STATUS 0x0409 /* FP TPM seed has been set or not */ #define FP_ENC_STATUS_SEED_SET BIT(0) struct ec_response_fp_encryption_status { /* Used bits in encryption engine status */ uint32_t valid_flags; /* Encryption engine status */ uint32_t status; } __ec_align4; /*****************************************************************************/ /* Touchpad MCU commands: range 0x0500-0x05FF */ /* Perform touchpad self test */ #define EC_CMD_TP_SELF_TEST 0x0500 /* Get number of frame types, and the size of each type */ #define EC_CMD_TP_FRAME_INFO 0x0501 struct ec_response_tp_frame_info { uint32_t n_frames; uint32_t frame_sizes[0]; } __ec_align4; /* Create a snapshot of current frame readings */ #define EC_CMD_TP_FRAME_SNAPSHOT 0x0502 /* Read the frame */ #define EC_CMD_TP_FRAME_GET 0x0503 struct ec_params_tp_frame_get { uint32_t frame_index; uint32_t offset; uint32_t size; } __ec_align4; /*****************************************************************************/ /* EC-EC communication commands: range 0x0600-0x06FF */ #define EC_COMM_TEXT_MAX 8 /* * Get battery static information, i.e. information that never changes, or * very infrequently. */ #define EC_CMD_BATTERY_GET_STATIC 0x0600 /** * struct ec_params_battery_static_info - Battery static info parameters * @index: Battery index. */ struct ec_params_battery_static_info { uint8_t index; } __ec_align_size1; /** * struct ec_response_battery_static_info - Battery static info response * @design_capacity: Battery Design Capacity (mAh) * @design_voltage: Battery Design Voltage (mV) * @manufacturer: Battery Manufacturer String * @model: Battery Model Number String * @serial: Battery Serial Number String * @type: Battery Type String * @cycle_count: Battery Cycle Count */ struct ec_response_battery_static_info { uint16_t design_capacity; uint16_t design_voltage; char manufacturer[EC_COMM_TEXT_MAX]; char model[EC_COMM_TEXT_MAX]; char serial[EC_COMM_TEXT_MAX]; char type[EC_COMM_TEXT_MAX]; /* TODO(crbug.com/795991): Consider moving to dynamic structure. */ uint32_t cycle_count; } __ec_align4; /* * Get battery dynamic information, i.e. information that is likely to change * every time it is read. */ #define EC_CMD_BATTERY_GET_DYNAMIC 0x0601 /** * struct ec_params_battery_dynamic_info - Battery dynamic info parameters * @index: Battery index. */ struct ec_params_battery_dynamic_info { uint8_t index; } __ec_align_size1; /** * struct ec_response_battery_dynamic_info - Battery dynamic info response * @actual_voltage: Battery voltage (mV) * @actual_current: Battery current (mA); negative=discharging * @remaining_capacity: Remaining capacity (mAh) * @full_capacity: Capacity (mAh, might change occasionally) * @flags: Flags, see EC_BATT_FLAG_* * @desired_voltage: Charging voltage desired by battery (mV) * @desired_current: Charging current desired by battery (mA) */ struct ec_response_battery_dynamic_info { int16_t actual_voltage; int16_t actual_current; int16_t remaining_capacity; int16_t full_capacity; int16_t flags; int16_t desired_voltage; int16_t desired_current; } __ec_align2; /* * Control charger chip. Used to control charger chip on the slave. */ #define EC_CMD_CHARGER_CONTROL 0x0602 /** * struct ec_params_charger_control - Charger control parameters * @max_current: Charger current (mA). Positive to allow base to draw up to * max_current and (possibly) charge battery, negative to request current * from base (OTG). * @otg_voltage: Voltage (mV) to use in OTG mode, ignored if max_current is * >= 0. * @allow_charging: Allow base battery charging (only makes sense if * max_current > 0). */ struct ec_params_charger_control { int16_t max_current; uint16_t otg_voltage; uint8_t allow_charging; } __ec_align_size1; /*****************************************************************************/ /* * Reserve a range of host commands for board-specific, experimental, or * special purpose features. These can be (re)used without updating this file. * * CAUTION: Don't go nuts with this. Shipping products should document ALL * their EC commands for easier development, testing, debugging, and support. * * All commands MUST be #defined to be 4-digit UPPER CASE hex values * (e.g., 0x00AB, not 0xab) for CONFIG_HOSTCMD_SECTION_SORTED to work. * * In your experimental code, you may want to do something like this: * * #define EC_CMD_MAGIC_FOO 0x0000 * #define EC_CMD_MAGIC_BAR 0x0001 * #define EC_CMD_MAGIC_HEY 0x0002 * * DECLARE_PRIVATE_HOST_COMMAND(EC_CMD_MAGIC_FOO, magic_foo_handler, * EC_VER_MASK(0); * * DECLARE_PRIVATE_HOST_COMMAND(EC_CMD_MAGIC_BAR, magic_bar_handler, * EC_VER_MASK(0); * * DECLARE_PRIVATE_HOST_COMMAND(EC_CMD_MAGIC_HEY, magic_hey_handler, * EC_VER_MASK(0); */ #define EC_CMD_BOARD_SPECIFIC_BASE 0x3E00 #define EC_CMD_BOARD_SPECIFIC_LAST 0x3FFF /* * Given the private host command offset, calculate the true private host * command value. */ #define EC_PRIVATE_HOST_COMMAND_VALUE(command) \ (EC_CMD_BOARD_SPECIFIC_BASE + (command)) /*****************************************************************************/ /* * Passthru commands * * Some platforms have sub-processors chained to each other. For example. * * AP <--> EC <--> PD MCU * * The top 2 bits of the command number are used to indicate which device the * command is intended for. Device 0 is always the device receiving the * command; other device mapping is board-specific. * * When a device receives a command to be passed to a sub-processor, it passes * it on with the device number set back to 0. This allows the sub-processor * to remain blissfully unaware of whether the command originated on the next * device up the chain, or was passed through from the AP. * * In the above example, if the AP wants to send command 0x0002 to the PD MCU, * AP sends command 0x4002 to the EC * EC sends command 0x0002 to the PD MCU * EC forwards PD MCU response back to the AP */ /* Offset and max command number for sub-device n */ #define EC_CMD_PASSTHRU_OFFSET(n) (0x4000 * (n)) #define EC_CMD_PASSTHRU_MAX(n) (EC_CMD_PASSTHRU_OFFSET(n) + 0x3fff) /*****************************************************************************/ /* * Deprecated constants. These constants have been renamed for clarity. The * meaning and size has not changed. Programs that use the old names should * switch to the new names soon, as the old names may not be carried forward * forever. */ #define EC_HOST_PARAM_SIZE EC_PROTO2_MAX_PARAM_SIZE #define EC_LPC_ADDR_OLD_PARAM EC_HOST_CMD_REGION1 #define EC_OLD_PARAM_SIZE EC_HOST_CMD_REGION_SIZE #endif /* __CROS_EC_COMMANDS_H */ platform_data/mcs.h 0000644 00000001265 14722070374 0010326 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2009 - 2010 Samsung Electronics Co.Ltd * Author: Joonyoung Shim <jy0922.shim@samsung.com> * Author: HeungJun Kim <riverful.kim@samsung.com> */ #ifndef __LINUX_MCS_H #define __LINUX_MCS_H #define MCS_KEY_MAP(v, c) ((((v) & 0xff) << 16) | ((c) & 0xffff)) #define MCS_KEY_VAL(v) (((v) >> 16) & 0xff) #define MCS_KEY_CODE(v) ((v) & 0xffff) struct mcs_platform_data { void (*poweron)(bool); void (*cfg_pin)(void); /* touchscreen */ unsigned int x_size; unsigned int y_size; /* touchkey */ const u32 *keymap; unsigned int keymap_size; unsigned int key_maxval; bool no_autorepeat; }; #endif /* __LINUX_MCS_H */ platform_data/omap1_bl.h 0000644 00000000345 14722070374 0011234 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __OMAP1_BL_H__ #define __OMAP1_BL_H__ #include <linux/device.h> struct omap_backlight_config { int default_intensity; int (*set_power)(struct device *dev, int state); }; #endif platform_data/mv88e6xxx.h 0000644 00000000640 14722070374 0011345 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __DSA_MV88E6XXX_H #define __DSA_MV88E6XXX_H #include <linux/platform_data/dsa.h> struct dsa_mv88e6xxx_pdata { /* Must be first, such that dsa_register_switch() can access this * without gory pointer manipulations */ struct dsa_chip_data cd; const char *compatible; unsigned int enabled_ports; struct net_device *netdev; u32 eeprom_len; int irq; }; #endif platform_data/usb-omap.h 0000644 00000005500 14722070374 0011263 0 ustar 00 /* * usb-omap.h - Platform data for the various OMAP USB IPs * * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com * * This software is distributed under the terms of the GNU General Public * License ("GPL") version 2, as published by the Free Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #define OMAP3_HS_USB_PORTS 3 enum usbhs_omap_port_mode { OMAP_USBHS_PORT_MODE_UNUSED, OMAP_EHCI_PORT_MODE_PHY, OMAP_EHCI_PORT_MODE_TLL, OMAP_EHCI_PORT_MODE_HSIC, OMAP_OHCI_PORT_MODE_PHY_6PIN_DATSE0, OMAP_OHCI_PORT_MODE_PHY_6PIN_DPDM, OMAP_OHCI_PORT_MODE_PHY_3PIN_DATSE0, OMAP_OHCI_PORT_MODE_PHY_4PIN_DPDM, OMAP_OHCI_PORT_MODE_TLL_6PIN_DATSE0, OMAP_OHCI_PORT_MODE_TLL_6PIN_DPDM, OMAP_OHCI_PORT_MODE_TLL_3PIN_DATSE0, OMAP_OHCI_PORT_MODE_TLL_4PIN_DPDM, OMAP_OHCI_PORT_MODE_TLL_2PIN_DATSE0, OMAP_OHCI_PORT_MODE_TLL_2PIN_DPDM }; struct usbtll_omap_platform_data { enum usbhs_omap_port_mode port_mode[OMAP3_HS_USB_PORTS]; }; struct ehci_hcd_omap_platform_data { enum usbhs_omap_port_mode port_mode[OMAP3_HS_USB_PORTS]; int reset_gpio_port[OMAP3_HS_USB_PORTS]; struct regulator *regulator[OMAP3_HS_USB_PORTS]; unsigned phy_reset:1; }; struct ohci_hcd_omap_platform_data { enum usbhs_omap_port_mode port_mode[OMAP3_HS_USB_PORTS]; unsigned es2_compatibility:1; }; struct usbhs_omap_platform_data { int nports; enum usbhs_omap_port_mode port_mode[OMAP3_HS_USB_PORTS]; int reset_gpio_port[OMAP3_HS_USB_PORTS]; struct regulator *regulator[OMAP3_HS_USB_PORTS]; struct ehci_hcd_omap_platform_data *ehci_data; struct ohci_hcd_omap_platform_data *ohci_data; /* OMAP3 <= ES2.1 have a single ulpi bypass control bit */ unsigned single_ulpi_bypass:1; unsigned es2_compatibility:1; unsigned phy_reset:1; }; /*-------------------------------------------------------------------------*/ struct omap_musb_board_data { u8 interface_type; u8 mode; u16 power; unsigned extvbus:1; void (*set_phy_power)(u8 on); void (*clear_irq)(void); void (*set_mode)(u8 mode); void (*reset)(void); }; enum musb_interface { MUSB_INTERFACE_ULPI, MUSB_INTERFACE_UTMI }; platform_data/dma-hsu.h 0000644 00000000514 14722070374 0011076 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Driver for the High Speed UART DMA * * Copyright (C) 2015 Intel Corporation */ #ifndef _PLATFORM_DATA_DMA_HSU_H #define _PLATFORM_DATA_DMA_HSU_H #include <linux/device.h> struct hsu_dma_slave { struct device *dma_dev; int chan_id; }; #endif /* _PLATFORM_DATA_DMA_HSU_H */ platform_data/dma-atmel.h 0000644 00000004401 14722070374 0011400 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Header file for the Atmel AHB DMA Controller driver * * Copyright (C) 2008 Atmel Corporation */ #ifndef AT_HDMAC_H #define AT_HDMAC_H #include <linux/dmaengine.h> /** * struct at_dma_platform_data - Controller configuration parameters * @nr_channels: Number of channels supported by hardware (max 8) * @cap_mask: dma_capability flags supported by the platform */ struct at_dma_platform_data { unsigned int nr_channels; dma_cap_mask_t cap_mask; }; /** * struct at_dma_slave - Controller-specific information about a slave * @dma_dev: required DMA master device * @cfg: Platform-specific initializer for the CFG register */ struct at_dma_slave { struct device *dma_dev; u32 cfg; }; /* Platform-configurable bits in CFG */ #define ATC_PER_MSB(h) ((0x30U & (h)) >> 4) /* Extract most significant bits of a handshaking identifier */ #define ATC_SRC_PER(h) (0xFU & (h)) /* Channel src rq associated with periph handshaking ifc h */ #define ATC_DST_PER(h) ((0xFU & (h)) << 4) /* Channel dst rq associated with periph handshaking ifc h */ #define ATC_SRC_REP (0x1 << 8) /* Source Replay Mod */ #define ATC_SRC_H2SEL (0x1 << 9) /* Source Handshaking Mod */ #define ATC_SRC_H2SEL_SW (0x0 << 9) #define ATC_SRC_H2SEL_HW (0x1 << 9) #define ATC_SRC_PER_MSB(h) (ATC_PER_MSB(h) << 10) /* Channel src rq (most significant bits) */ #define ATC_DST_REP (0x1 << 12) /* Destination Replay Mod */ #define ATC_DST_H2SEL (0x1 << 13) /* Destination Handshaking Mod */ #define ATC_DST_H2SEL_SW (0x0 << 13) #define ATC_DST_H2SEL_HW (0x1 << 13) #define ATC_DST_PER_MSB(h) (ATC_PER_MSB(h) << 14) /* Channel dst rq (most significant bits) */ #define ATC_SOD (0x1 << 16) /* Stop On Done */ #define ATC_LOCK_IF (0x1 << 20) /* Interface Lock */ #define ATC_LOCK_B (0x1 << 21) /* AHB Bus Lock */ #define ATC_LOCK_IF_L (0x1 << 22) /* Master Interface Arbiter Lock */ #define ATC_LOCK_IF_L_CHUNK (0x0 << 22) #define ATC_LOCK_IF_L_BUFFER (0x1 << 22) #define ATC_AHB_PROT_MASK (0x7 << 24) /* AHB Protection */ #define ATC_FIFOCFG_MASK (0x3 << 28) /* FIFO Request Configuration */ #define ATC_FIFOCFG_LARGESTBURST (0x0 << 28) #define ATC_FIFOCFG_HALFFIFO (0x1 << 28) #define ATC_FIFOCFG_ENOUGHSPACE (0x2 << 28) #endif /* AT_HDMAC_H */ platform_data/i2c-davinci.h 0000644 00000001503 14722070374 0011627 0 ustar 00 /* * DaVinci I2C controller platform_device info * * Author: Vladimir Barinov, MontaVista Software, Inc. <source@mvista.com> * * 2007 (c) MontaVista Software, Inc. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. */ #ifndef __ASM_ARCH_I2C_H #define __ASM_ARCH_I2C_H /* All frequencies are expressed in kHz */ struct davinci_i2c_platform_data { unsigned int bus_freq; /* standard bus frequency (kHz) */ unsigned int bus_delay; /* post-transaction delay (usec) */ bool gpio_recovery; /* Use GPIO recovery method */ bool has_pfunc; /* Chip has a ICPFUNC register */ }; /* for board setup code */ void davinci_init_i2c(struct davinci_i2c_platform_data *); #endif /* __ASM_ARCH_I2C_H */ platform_data/ti-sysc.h 0000644 00000011231 14722070374 0011131 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __TI_SYSC_DATA_H__ #define __TI_SYSC_DATA_H__ enum ti_sysc_module_type { TI_SYSC_OMAP2, TI_SYSC_OMAP2_TIMER, TI_SYSC_OMAP3_SHAM, TI_SYSC_OMAP3_AES, TI_SYSC_OMAP4, TI_SYSC_OMAP4_TIMER, TI_SYSC_OMAP4_SIMPLE, TI_SYSC_OMAP34XX_SR, TI_SYSC_OMAP36XX_SR, TI_SYSC_OMAP4_SR, TI_SYSC_OMAP4_MCASP, TI_SYSC_OMAP4_USB_HOST_FS, TI_SYSC_DRA7_MCAN, }; struct ti_sysc_cookie { void *data; void *clkdm; }; /** * struct sysc_regbits - TI OCP_SYSCONFIG register field offsets * @midle_shift: Offset of the midle bit * @clkact_shift: Offset of the clockactivity bit * @sidle_shift: Offset of the sidle bit * @enwkup_shift: Offset of the enawakeup bit * @srst_shift: Offset of the softreset bit * @autoidle_shift: Offset of the autoidle bit * @dmadisable_shift: Offset of the dmadisable bit * @emufree_shift; Offset of the emufree bit * * Note that 0 is a valid shift, and for ti-sysc.c -ENODEV can be used if a * feature is not available. */ struct sysc_regbits { s8 midle_shift; s8 clkact_shift; s8 sidle_shift; s8 enwkup_shift; s8 srst_shift; s8 autoidle_shift; s8 dmadisable_shift; s8 emufree_shift; }; #define SYSC_MODULE_QUIRK_RTC_UNLOCK BIT(22) #define SYSC_QUIRK_CLKDM_NOAUTO BIT(21) #define SYSC_QUIRK_FORCE_MSTANDBY BIT(20) #define SYSC_MODULE_QUIRK_AESS BIT(19) #define SYSC_MODULE_QUIRK_SGX BIT(18) #define SYSC_MODULE_QUIRK_HDQ1W BIT(17) #define SYSC_MODULE_QUIRK_I2C BIT(16) #define SYSC_MODULE_QUIRK_WDT BIT(15) #define SYSS_QUIRK_RESETDONE_INVERTED BIT(14) #define SYSC_QUIRK_SWSUP_MSTANDBY BIT(13) #define SYSC_QUIRK_SWSUP_SIDLE_ACT BIT(12) #define SYSC_QUIRK_SWSUP_SIDLE BIT(11) #define SYSC_QUIRK_EXT_OPT_CLOCK BIT(10) #define SYSC_QUIRK_LEGACY_IDLE BIT(9) #define SYSC_QUIRK_RESET_STATUS BIT(8) #define SYSC_QUIRK_NO_IDLE BIT(7) #define SYSC_QUIRK_NO_IDLE_ON_INIT BIT(6) #define SYSC_QUIRK_NO_RESET_ON_INIT BIT(5) #define SYSC_QUIRK_OPT_CLKS_NEEDED BIT(4) #define SYSC_QUIRK_OPT_CLKS_IN_RESET BIT(3) #define SYSC_QUIRK_16BIT BIT(2) #define SYSC_QUIRK_UNCACHED BIT(1) #define SYSC_QUIRK_USE_CLOCKACT BIT(0) #define SYSC_NR_IDLEMODES 4 /** * struct sysc_capabilities - capabilities for an interconnect target module * @type: sysc type identifier for the module * @sysc_mask: bitmask of supported SYSCONFIG register bits * @regbits: bitmask of SYSCONFIG register bits * @mod_quirks: bitmask of module specific quirks */ struct sysc_capabilities { const enum ti_sysc_module_type type; const u32 sysc_mask; const struct sysc_regbits *regbits; const u32 mod_quirks; }; /** * struct sysc_config - configuration for an interconnect target module * @sysc_val: configured value for sysc register * @syss_mask: configured mask value for SYSSTATUS register * @midlemodes: bitmask of supported master idle modes * @sidlemodes: bitmask of supported slave idle modes * @srst_udelay: optional delay needed after OCP soft reset * @quirks: bitmask of enabled quirks */ struct sysc_config { u32 sysc_val; u32 syss_mask; u8 midlemodes; u8 sidlemodes; u8 srst_udelay; u32 quirks; }; enum sysc_registers { SYSC_REVISION, SYSC_SYSCONFIG, SYSC_SYSSTATUS, SYSC_MAX_REGS, }; /** * struct ti_sysc_module_data - ti-sysc to hwmod translation data for a module * @name: legacy "ti,hwmods" module name * @module_pa: physical address of the interconnect target module * @module_size: size of the interconnect target module * @offsets: array of register offsets as listed in enum sysc_registers * @nr_offsets: number of registers * @cap: interconnect target module capabilities * @cfg: interconnect target module configuration * * This data is enough to allocate a new struct omap_hwmod_class_sysconfig * based on device tree data parsed by ti-sysc driver. */ struct ti_sysc_module_data { const char *name; u64 module_pa; u32 module_size; int *offsets; int nr_offsets; const struct sysc_capabilities *cap; struct sysc_config *cfg; }; struct device; struct clk; struct ti_sysc_platform_data { struct of_dev_auxdata *auxdata; int (*init_clockdomain)(struct device *dev, struct clk *fck, struct clk *ick, struct ti_sysc_cookie *cookie); void (*clkdm_deny_idle)(struct device *dev, const struct ti_sysc_cookie *cookie); void (*clkdm_allow_idle)(struct device *dev, const struct ti_sysc_cookie *cookie); int (*init_module)(struct device *dev, const struct ti_sysc_module_data *data, struct ti_sysc_cookie *cookie); int (*enable_module)(struct device *dev, const struct ti_sysc_cookie *cookie); int (*idle_module)(struct device *dev, const struct ti_sysc_cookie *cookie); int (*shutdown_module)(struct device *dev, const struct ti_sysc_cookie *cookie); }; #endif /* __TI_SYSC_DATA_H__ */ platform_data/remoteproc-omap.h 0000644 00000002511 14722070374 0012650 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Remote Processor - omap-specific bits * * Copyright (C) 2011 Texas Instruments, Inc. * Copyright (C) 2011 Google, Inc. */ #ifndef _PLAT_REMOTEPROC_H #define _PLAT_REMOTEPROC_H struct rproc_ops; struct platform_device; /* * struct omap_rproc_pdata - omap remoteproc's platform data * @name: the remoteproc's name * @oh_name: omap hwmod device * @oh_name_opt: optional, secondary omap hwmod device * @firmware: name of firmware file to load * @mbox_name: name of omap mailbox device to use with this rproc * @ops: start/stop rproc handlers * @device_enable: omap-specific handler for enabling a device * @device_shutdown: omap-specific handler for shutting down a device * @set_bootaddr: omap-specific handler for setting the rproc boot address */ struct omap_rproc_pdata { const char *name; const char *oh_name; const char *oh_name_opt; const char *firmware; const char *mbox_name; const struct rproc_ops *ops; int (*device_enable)(struct platform_device *pdev); int (*device_shutdown)(struct platform_device *pdev); void (*set_bootaddr)(u32); }; #if defined(CONFIG_OMAP_REMOTEPROC) || defined(CONFIG_OMAP_REMOTEPROC_MODULE) void __init omap_rproc_reserve_cma(void); #else static inline void __init omap_rproc_reserve_cma(void) { } #endif #endif /* _PLAT_REMOTEPROC_H */ platform_data/adp8860.h 0000644 00000011160 14722070374 0010631 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Definitions and platform data for Analog Devices * Backlight drivers ADP8860 * * Copyright 2009-2010 Analog Devices Inc. */ #ifndef __LINUX_I2C_ADP8860_H #define __LINUX_I2C_ADP8860_H #include <linux/leds.h> #include <linux/types.h> #define ID_ADP8860 8860 #define ADP8860_MAX_BRIGHTNESS 0x7F #define FLAG_OFFT_SHIFT 8 /* * LEDs subdevice platform data */ #define ADP8860_LED_DIS_BLINK (0 << FLAG_OFFT_SHIFT) #define ADP8860_LED_OFFT_600ms (1 << FLAG_OFFT_SHIFT) #define ADP8860_LED_OFFT_1200ms (2 << FLAG_OFFT_SHIFT) #define ADP8860_LED_OFFT_1800ms (3 << FLAG_OFFT_SHIFT) #define ADP8860_LED_ONT_200ms 0 #define ADP8860_LED_ONT_600ms 1 #define ADP8860_LED_ONT_800ms 2 #define ADP8860_LED_ONT_1200ms 3 #define ADP8860_LED_D7 (7) #define ADP8860_LED_D6 (6) #define ADP8860_LED_D5 (5) #define ADP8860_LED_D4 (4) #define ADP8860_LED_D3 (3) #define ADP8860_LED_D2 (2) #define ADP8860_LED_D1 (1) /* * Backlight subdevice platform data */ #define ADP8860_BL_D7 (1 << 6) #define ADP8860_BL_D6 (1 << 5) #define ADP8860_BL_D5 (1 << 4) #define ADP8860_BL_D4 (1 << 3) #define ADP8860_BL_D3 (1 << 2) #define ADP8860_BL_D2 (1 << 1) #define ADP8860_BL_D1 (1 << 0) #define ADP8860_FADE_T_DIS 0 /* Fade Timer Disabled */ #define ADP8860_FADE_T_300ms 1 /* 0.3 Sec */ #define ADP8860_FADE_T_600ms 2 #define ADP8860_FADE_T_900ms 3 #define ADP8860_FADE_T_1200ms 4 #define ADP8860_FADE_T_1500ms 5 #define ADP8860_FADE_T_1800ms 6 #define ADP8860_FADE_T_2100ms 7 #define ADP8860_FADE_T_2400ms 8 #define ADP8860_FADE_T_2700ms 9 #define ADP8860_FADE_T_3000ms 10 #define ADP8860_FADE_T_3500ms 11 #define ADP8860_FADE_T_4000ms 12 #define ADP8860_FADE_T_4500ms 13 #define ADP8860_FADE_T_5000ms 14 #define ADP8860_FADE_T_5500ms 15 /* 5.5 Sec */ #define ADP8860_FADE_LAW_LINEAR 0 #define ADP8860_FADE_LAW_SQUARE 1 #define ADP8860_FADE_LAW_CUBIC1 2 #define ADP8860_FADE_LAW_CUBIC2 3 #define ADP8860_BL_AMBL_FILT_80ms 0 /* Light sensor filter time */ #define ADP8860_BL_AMBL_FILT_160ms 1 #define ADP8860_BL_AMBL_FILT_320ms 2 #define ADP8860_BL_AMBL_FILT_640ms 3 #define ADP8860_BL_AMBL_FILT_1280ms 4 #define ADP8860_BL_AMBL_FILT_2560ms 5 #define ADP8860_BL_AMBL_FILT_5120ms 6 #define ADP8860_BL_AMBL_FILT_10240ms 7 /* 10.24 sec */ /* * Blacklight current 0..30mA */ #define ADP8860_BL_CUR_mA(I) ((I * 127) / 30) /* * L2 comparator current 0..1106uA */ #define ADP8860_L2_COMP_CURR_uA(I) ((I * 255) / 1106) /* * L3 comparator current 0..138uA */ #define ADP8860_L3_COMP_CURR_uA(I) ((I * 255) / 138) struct adp8860_backlight_platform_data { u8 bl_led_assign; /* 1 = Backlight 0 = Individual LED */ u8 bl_fade_in; /* Backlight Fade-In Timer */ u8 bl_fade_out; /* Backlight Fade-Out Timer */ u8 bl_fade_law; /* fade-on/fade-off transfer characteristic */ u8 en_ambl_sens; /* 1 = enable ambient light sensor */ u8 abml_filt; /* Light sensor filter time */ u8 l1_daylight_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ u8 l1_daylight_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ u8 l2_office_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ u8 l2_office_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ u8 l3_dark_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ u8 l3_dark_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ u8 l2_trip; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */ u8 l2_hyst; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */ u8 l3_trip; /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */ u8 l3_hyst; /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */ /** * Independent Current Sinks / LEDS * Sinks not assigned to the Backlight can be exposed to * user space using the LEDS CLASS interface */ int num_leds; struct led_info *leds; u8 led_fade_in; /* LED Fade-In Timer */ u8 led_fade_out; /* LED Fade-Out Timer */ u8 led_fade_law; /* fade-on/fade-off transfer characteristic */ u8 led_on_time; /** * Gain down disable. Setting this option does not allow the * charge pump to switch to lower gains. NOT AVAILABLE on ADP8860 * 1 = the charge pump doesn't switch down in gain until all LEDs are 0. * The charge pump switches up in gain as needed. This feature is * useful if the ADP8863 charge pump is used to drive an external load. * This feature must be used when utilizing small fly capacitors * (0402 or smaller). * 0 = the charge pump automatically switches up and down in gain. * This provides optimal efficiency, but is not suitable for driving * loads that are not connected through the ADP8863 diode drivers. * Additionally, the charge pump fly capacitors should be low ESR * and sized 0603 or greater. */ u8 gdwn_dis; }; #endif /* __LINUX_I2C_ADP8860_H */ platform_data/dma-imx-sdma.h 0000644 00000003133 14722070374 0012016 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __MACH_MXC_SDMA_H__ #define __MACH_MXC_SDMA_H__ /** * struct sdma_script_start_addrs - SDMA script start pointers * * start addresses of the different functions in the physical * address space of the SDMA engine. */ struct sdma_script_start_addrs { s32 ap_2_ap_addr; s32 ap_2_bp_addr; s32 ap_2_ap_fixed_addr; s32 bp_2_ap_addr; s32 loopback_on_dsp_side_addr; s32 mcu_interrupt_only_addr; s32 firi_2_per_addr; s32 firi_2_mcu_addr; s32 per_2_firi_addr; s32 mcu_2_firi_addr; s32 uart_2_per_addr; s32 uart_2_mcu_addr; s32 per_2_app_addr; s32 mcu_2_app_addr; s32 per_2_per_addr; s32 uartsh_2_per_addr; s32 uartsh_2_mcu_addr; s32 per_2_shp_addr; s32 mcu_2_shp_addr; s32 ata_2_mcu_addr; s32 mcu_2_ata_addr; s32 app_2_per_addr; s32 app_2_mcu_addr; s32 shp_2_per_addr; s32 shp_2_mcu_addr; s32 mshc_2_mcu_addr; s32 mcu_2_mshc_addr; s32 spdif_2_mcu_addr; s32 mcu_2_spdif_addr; s32 asrc_2_mcu_addr; s32 ext_mem_2_ipu_addr; s32 descrambler_addr; s32 dptc_dvfs_addr; s32 utra_addr; s32 ram_code_start_addr; /* End of v1 array */ s32 mcu_2_ssish_addr; s32 ssish_2_mcu_addr; s32 hdmi_dma_addr; /* End of v2 array */ s32 zcanfd_2_mcu_addr; s32 zqspi_2_mcu_addr; s32 mcu_2_ecspi_addr; /* End of v3 array */ s32 mcu_2_zqspi_addr; /* End of v4 array */ }; /** * struct sdma_platform_data - platform specific data for SDMA engine * * @fw_name The firmware name * @script_addrs SDMA scripts addresses in SDMA ROM */ struct sdma_platform_data { char *fw_name; struct sdma_script_start_addrs *script_addrs; }; #endif /* __MACH_MXC_SDMA_H__ */ platform_data/usb-ohci-s3c2410.h 0000644 00000001655 14722070374 0012255 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* arch/arm/plat-samsung/include/plat/usb-control.h * * Copyright (c) 2004 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * S3C - USB host port information */ #ifndef __ASM_ARCH_USBCONTROL_H #define __ASM_ARCH_USBCONTROL_H #define S3C_HCDFLG_USED (1) struct s3c2410_hcd_port { unsigned char flags; unsigned char power; unsigned char oc_status; unsigned char oc_changed; }; struct s3c2410_hcd_info { struct usb_hcd *hcd; struct s3c2410_hcd_port port[2]; void (*power_control)(int port, int to); void (*enable_oc)(struct s3c2410_hcd_info *, int on); void (*report_oc)(struct s3c2410_hcd_info *, int ports); }; static inline void s3c2410_usb_report_oc(struct s3c2410_hcd_info *info, int ports) { if (info->report_oc != NULL) { (info->report_oc)(info, ports); } } extern void s3c_ohci_set_platdata(struct s3c2410_hcd_info *info); #endif /*__ASM_ARCH_USBCONTROL_H */ platform_data/ams-delta-fiq.h 0000644 00000003171 14722070374 0012166 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * include/linux/platform_data/ams-delta-fiq.h * * Taken from the original Amstrad modifications to fiq.h * * Copyright (c) 2004 Amstrad Plc * Copyright (c) 2006 Matt Callow * Copyright (c) 2010 Janusz Krzysztofik * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef __LINUX_PLATFORM_DATA_AMS_DELTA_FIQ_H #define __LINUX_PLATFORM_DATA_AMS_DELTA_FIQ_H /* * These are the offsets from the beginning of the fiq_buffer. They are put here * since the buffer and header need to be accessed by drivers servicing devices * which generate GPIO interrupts - e.g. keyboard, modem, hook switch. */ #define FIQ_MASK 0 #define FIQ_STATE 1 #define FIQ_KEYS_CNT 2 #define FIQ_TAIL_OFFSET 3 #define FIQ_HEAD_OFFSET 4 #define FIQ_BUF_LEN 5 #define FIQ_KEY 6 #define FIQ_MISSED_KEYS 7 #define FIQ_BUFFER_START 8 #define FIQ_GPIO_INT_MASK 9 #define FIQ_KEYS_HICNT 10 #define FIQ_IRQ_PEND 11 #define FIQ_SIR_CODE_L1 12 #define IRQ_SIR_CODE_L2 13 #define FIQ_CNT_INT_00 14 #define FIQ_CNT_INT_KEY 15 #define FIQ_CNT_INT_MDM 16 #define FIQ_CNT_INT_03 17 #define FIQ_CNT_INT_HSW 18 #define FIQ_CNT_INT_05 19 #define FIQ_CNT_INT_06 20 #define FIQ_CNT_INT_07 21 #define FIQ_CNT_INT_08 22 #define FIQ_CNT_INT_09 23 #define FIQ_CNT_INT_10 24 #define FIQ_CNT_INT_11 25 #define FIQ_CNT_INT_12 26 #define FIQ_CNT_INT_13 27 #define FIQ_CNT_INT_14 28 #define FIQ_CNT_INT_15 29 #define FIQ_CIRC_BUFF 30 /*Start of circular buffer */ #endif platform_data/dma-ste-dma40.h 0000644 00000014040 14722070374 0011774 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) ST-Ericsson SA 2007-2010 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson */ #ifndef STE_DMA40_H #define STE_DMA40_H #include <linux/dmaengine.h> #include <linux/scatterlist.h> #include <linux/workqueue.h> #include <linux/interrupt.h> /* * Maxium size for a single dma descriptor * Size is limited to 16 bits. * Size is in the units of addr-widths (1,2,4,8 bytes) * Larger transfers will be split up to multiple linked desc */ #define STEDMA40_MAX_SEG_SIZE 0xFFFF /* dev types for memcpy */ #define STEDMA40_DEV_DST_MEMORY (-1) #define STEDMA40_DEV_SRC_MEMORY (-1) enum stedma40_mode { STEDMA40_MODE_LOGICAL = 0, STEDMA40_MODE_PHYSICAL, STEDMA40_MODE_OPERATION, }; enum stedma40_mode_opt { STEDMA40_PCHAN_BASIC_MODE = 0, STEDMA40_LCHAN_SRC_LOG_DST_LOG = 0, STEDMA40_PCHAN_MODULO_MODE, STEDMA40_PCHAN_DOUBLE_DST_MODE, STEDMA40_LCHAN_SRC_PHY_DST_LOG, STEDMA40_LCHAN_SRC_LOG_DST_PHY, }; #define STEDMA40_ESIZE_8_BIT 0x0 #define STEDMA40_ESIZE_16_BIT 0x1 #define STEDMA40_ESIZE_32_BIT 0x2 #define STEDMA40_ESIZE_64_BIT 0x3 /* The value 4 indicates that PEN-reg shall be set to 0 */ #define STEDMA40_PSIZE_PHY_1 0x4 #define STEDMA40_PSIZE_PHY_2 0x0 #define STEDMA40_PSIZE_PHY_4 0x1 #define STEDMA40_PSIZE_PHY_8 0x2 #define STEDMA40_PSIZE_PHY_16 0x3 /* * The number of elements differ in logical and * physical mode */ #define STEDMA40_PSIZE_LOG_1 STEDMA40_PSIZE_PHY_2 #define STEDMA40_PSIZE_LOG_4 STEDMA40_PSIZE_PHY_4 #define STEDMA40_PSIZE_LOG_8 STEDMA40_PSIZE_PHY_8 #define STEDMA40_PSIZE_LOG_16 STEDMA40_PSIZE_PHY_16 /* Maximum number of possible physical channels */ #define STEDMA40_MAX_PHYS 32 enum stedma40_flow_ctrl { STEDMA40_NO_FLOW_CTRL, STEDMA40_FLOW_CTRL, }; /** * struct stedma40_half_channel_info - dst/src channel configuration * * @big_endian: true if the src/dst should be read as big endian * @data_width: Data width of the src/dst hardware * @p_size: Burst size * @flow_ctrl: Flow control on/off. */ struct stedma40_half_channel_info { bool big_endian; enum dma_slave_buswidth data_width; int psize; enum stedma40_flow_ctrl flow_ctrl; }; /** * struct stedma40_chan_cfg - Structure to be filled by client drivers. * * @dir: MEM 2 MEM, PERIPH 2 MEM , MEM 2 PERIPH, PERIPH 2 PERIPH * @high_priority: true if high-priority * @realtime: true if realtime mode is to be enabled. Only available on DMA40 * version 3+, i.e DB8500v2+ * @mode: channel mode: physical, logical, or operation * @mode_opt: options for the chosen channel mode * @dev_type: src/dst device type (driver uses dir to figure out which) * @src_info: Parameters for dst half channel * @dst_info: Parameters for dst half channel * @use_fixed_channel: if true, use physical channel specified by phy_channel * @phy_channel: physical channel to use, only if use_fixed_channel is true * * This structure has to be filled by the client drivers. * It is recommended to do all dma configurations for clients in the machine. * */ struct stedma40_chan_cfg { enum dma_transfer_direction dir; bool high_priority; bool realtime; enum stedma40_mode mode; enum stedma40_mode_opt mode_opt; int dev_type; struct stedma40_half_channel_info src_info; struct stedma40_half_channel_info dst_info; bool use_fixed_channel; int phy_channel; }; /** * struct stedma40_platform_data - Configuration struct for the dma device. * * @dev_tx: mapping between destination event line and io address * @dev_rx: mapping between source event line and io address * @disabled_channels: A vector, ending with -1, that marks physical channels * that are for different reasons not available for the driver. * @soft_lli_chans: A vector, that marks physical channels will use LLI by SW * which avoids HW bug that exists in some versions of the controller. * SoftLLI introduces relink overhead that could impact performace for * certain use cases. * @num_of_soft_lli_chans: The number of channels that needs to be configured * to use SoftLLI. * @use_esram_lcla: flag for mapping the lcla into esram region * @num_of_memcpy_chans: The number of channels reserved for memcpy. * @num_of_phy_chans: The number of physical channels implemented in HW. * 0 means reading the number of channels from DMA HW but this is only valid * for 'multiple of 4' channels, like 8. */ struct stedma40_platform_data { int disabled_channels[STEDMA40_MAX_PHYS]; int *soft_lli_chans; int num_of_soft_lli_chans; bool use_esram_lcla; int num_of_memcpy_chans; int num_of_phy_chans; }; #ifdef CONFIG_STE_DMA40 /** * stedma40_filter() - Provides stedma40_chan_cfg to the * ste_dma40 dma driver via the dmaengine framework. * does some checking of what's provided. * * Never directly called by client. It used by dmaengine. * @chan: dmaengine handle. * @data: Must be of type: struct stedma40_chan_cfg and is * the configuration of the framework. * * */ bool stedma40_filter(struct dma_chan *chan, void *data); /** * stedma40_slave_mem() - Transfers a raw data buffer to or from a slave * (=device) * * @chan: dmaengine handle * @addr: source or destination physicall address. * @size: bytes to transfer * @direction: direction of transfer * @flags: is actually enum dma_ctrl_flags. See dmaengine.h */ static inline struct dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan, dma_addr_t addr, unsigned int size, enum dma_transfer_direction direction, unsigned long flags) { struct scatterlist sg; sg_init_table(&sg, 1); sg.dma_address = addr; sg.length = size; return dmaengine_prep_slave_sg(chan, &sg, 1, direction, flags); } #else static inline bool stedma40_filter(struct dma_chan *chan, void *data) { return false; } static inline struct dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan, dma_addr_t addr, unsigned int size, enum dma_transfer_direction direction, unsigned long flags) { return NULL; } #endif #endif platform_data/spi-mt65xx.h 0000644 00000000545 14722070374 0011510 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * MTK SPI bus driver definitions * * Copyright (c) 2015 MediaTek Inc. * Author: Leilk Liu <leilk.liu@mediatek.com> */ #ifndef ____LINUX_PLATFORM_DATA_SPI_MTK_H #define ____LINUX_PLATFORM_DATA_SPI_MTK_H /* Board specific platform_data */ struct mtk_chip_config { u32 cs_pol; u32 sample_sel; }; #endif platform_data/gpio-davinci.h 0000644 00000001525 14722070374 0012114 0 ustar 00 /* * DaVinci GPIO Platform Related Defines * * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifndef __DAVINCI_GPIO_PLATFORM_H #define __DAVINCI_GPIO_PLATFORM_H struct davinci_gpio_platform_data { bool no_auto_base; u32 base; u32 ngpio; u32 gpio_unbanked; }; /* Convert GPIO signal to GPIO pin number */ #define GPIO_TO_PIN(bank, gpio) (16 * (bank) + (gpio)) #endif platform_data/clk-integrator.h 0000644 00000000166 14722070374 0012470 0 ustar 00 void integrator_impd1_clk_init(void __iomem *base, unsigned int id); void integrator_impd1_clk_exit(unsigned int id); platform_data/video-ep93xx.h 0000644 00000002765 14722070374 0012016 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __VIDEO_EP93XX_H #define __VIDEO_EP93XX_H struct platform_device; struct fb_info; /* VideoAttributes flags */ #define EP93XXFB_STATE_MACHINE_ENABLE (1 << 0) #define EP93XXFB_PIXEL_CLOCK_ENABLE (1 << 1) #define EP93XXFB_VSYNC_ENABLE (1 << 2) #define EP93XXFB_PIXEL_DATA_ENABLE (1 << 3) #define EP93XXFB_COMPOSITE_SYNC (1 << 4) #define EP93XXFB_SYNC_VERT_HIGH (1 << 5) #define EP93XXFB_SYNC_HORIZ_HIGH (1 << 6) #define EP93XXFB_SYNC_BLANK_HIGH (1 << 7) #define EP93XXFB_PCLK_FALLING (1 << 8) #define EP93XXFB_ENABLE_AC (1 << 9) #define EP93XXFB_ENABLE_LCD (1 << 10) #define EP93XXFB_ENABLE_CCIR (1 << 12) #define EP93XXFB_USE_PARALLEL_INTERFACE (1 << 13) #define EP93XXFB_ENABLE_INTERRUPT (1 << 14) #define EP93XXFB_USB_INTERLACE (1 << 16) #define EP93XXFB_USE_EQUALIZATION (1 << 17) #define EP93XXFB_USE_DOUBLE_HORZ (1 << 18) #define EP93XXFB_USE_DOUBLE_VERT (1 << 19) #define EP93XXFB_USE_BLANK_PIXEL (1 << 20) #define EP93XXFB_USE_SDCSN0 (0 << 21) #define EP93XXFB_USE_SDCSN1 (1 << 21) #define EP93XXFB_USE_SDCSN2 (2 << 21) #define EP93XXFB_USE_SDCSN3 (3 << 21) #define EP93XXFB_ENABLE (EP93XXFB_STATE_MACHINE_ENABLE | \ EP93XXFB_PIXEL_CLOCK_ENABLE | \ EP93XXFB_VSYNC_ENABLE | \ EP93XXFB_PIXEL_DATA_ENABLE) struct ep93xxfb_mach_info { unsigned int flags; int (*setup)(struct platform_device *pdev); void (*teardown)(struct platform_device *pdev); void (*blank)(int blank_mode, struct fb_info *info); }; #endif /* __VIDEO_EP93XX_H */ platform_data/ehci-sh.h 0000644 00000000553 14722070374 0011063 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 * * EHCI SuperH driver platform data * * Copyright (C) 2012 Nobuhiro Iwamatsu <nobuhiro.iwamatsu.yj@renesas.com> * Copyright (C) 2012 Renesas Solutions Corp. */ #ifndef __USB_EHCI_SH_H #define __USB_EHCI_SH_H struct ehci_sh_platdata { void (*phy_init)(void); /* Phy init function */ }; #endif /* __USB_EHCI_SH_H */ platform_data/omap-twl4030.h 0000644 00000002052 14722070374 0011606 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /** * omap-twl4030.h - ASoC machine driver for TI SoC based boards with twl4030 * codec, header. * * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com * All rights reserved. * * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> */ #ifndef _OMAP_TWL4030_H_ #define _OMAP_TWL4030_H_ /* To select if only one channel is connected in a stereo port */ #define OMAP_TWL4030_LEFT (1 << 0) #define OMAP_TWL4030_RIGHT (1 << 1) struct omap_tw4030_pdata { const char *card_name; /* Voice port is connected to McBSP3 */ bool voice_connected; /* The driver will parse the connection flags if this flag is set */ bool custom_routing; /* Flags to indicate connected audio ports. */ u8 has_hs; u8 has_hf; u8 has_predriv; u8 has_carkit; bool has_ear; bool has_mainmic; bool has_submic; bool has_hsmic; bool has_carkitmic; bool has_digimic0; bool has_digimic1; u8 has_linein; /* Jack detect GPIO or <= 0 if it is not implemented */ int jack_detect; }; #endif /* _OMAP_TWL4030_H_ */ platform_data/clk-u300.h 0000644 00000000057 14722070374 0011000 0 ustar 00 void __init u300_clk_init(void __iomem *base); platform_data/pinctrl-single.h 0000644 00000000651 14722070374 0012474 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _PINCTRL_SINGLE_H #define _PINCTRL_SINGLE_H /** * irq: optional wake-up interrupt * rearm: optional soc specific rearm function * * Note that the irq and rearm setup should come from device * tree except for omap where there are still some dependencies * to the legacy PRM code. */ struct pcs_pdata { int irq; void (*rearm)(void); }; #endif /* _PINCTRL_SINGLE_H */ platform_data/mmc-esdhc-imx.h 0000644 00000002312 14722070374 0012171 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2010 Wolfram Sang <w.sang@pengutronix.de> */ #ifndef __ASM_ARCH_IMX_ESDHC_H #define __ASM_ARCH_IMX_ESDHC_H #include <linux/types.h> enum wp_types { ESDHC_WP_NONE, /* no WP, neither controller nor gpio */ ESDHC_WP_CONTROLLER, /* mmc controller internal WP */ ESDHC_WP_GPIO, /* external gpio pin for WP */ }; enum cd_types { ESDHC_CD_NONE, /* no CD, neither controller nor gpio */ ESDHC_CD_CONTROLLER, /* mmc controller internal CD */ ESDHC_CD_GPIO, /* external gpio pin for CD */ ESDHC_CD_PERMANENT, /* no CD, card permanently wired to host */ }; /** * struct esdhc_platform_data - platform data for esdhc on i.MX * * ESDHC_WP(CD)_CONTROLLER type is not available on i.MX25/35. * * @wp_type: type of write_protect method (see wp_types enum above) * @cd_type: type of card_detect method (see cd_types enum above) */ struct esdhc_platform_data { enum wp_types wp_type; enum cd_types cd_type; int max_bus_width; unsigned int delay_line; unsigned int tuning_step; /* The delay cell steps in tuning procedure */ unsigned int tuning_start_tap; /* The start delay cell point in tuning procedure */ }; #endif /* __ASM_ARCH_IMX_ESDHC_H */ platform_data/invensense_mpu6050.h 0000644 00000001543 14722070374 0013114 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2012 Invensense, Inc. */ #ifndef __INV_MPU6050_PLATFORM_H_ #define __INV_MPU6050_PLATFORM_H_ /** * struct inv_mpu6050_platform_data - Platform data for the mpu driver * @orientation: Orientation matrix of the chip (deprecated in favor of * mounting matrix retrieved from device-tree) * * Contains platform specific information on how to configure the MPU6050 to * work on this platform. The orientation matricies are 3x3 rotation matricies * that are applied to the data to rotate from the mounting orientation to the * platform orientation. The values must be one of 0, 1, or -1 and each row and * column should have exactly 1 non-zero value. * * Deprecated in favor of mounting matrix retrieved from device-tree. */ struct inv_mpu6050_platform_data { __s8 orientation[9]; }; #endif platform_data/cyttsp4.h 0000644 00000003133 14722070374 0011152 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Header file for: * Cypress TrueTouch(TM) Standard Product (TTSP) touchscreen drivers. * For use with Cypress Txx3xx parts. * Supported parts include: * CY8CTST341 * CY8CTMA340 * * Copyright (C) 2009, 2010, 2011 Cypress Semiconductor, Inc. * Copyright (C) 2012 Javier Martinez Canillas <javier@dowhile0.org> * * Contact Cypress Semiconductor at www.cypress.com (kev@cypress.com) */ #ifndef _CYTTSP4_H_ #define _CYTTSP4_H_ #define CYTTSP4_MT_NAME "cyttsp4_mt" #define CYTTSP4_I2C_NAME "cyttsp4_i2c_adapter" #define CYTTSP4_SPI_NAME "cyttsp4_spi_adapter" #define CY_TOUCH_SETTINGS_MAX 32 struct touch_framework { const uint16_t *abs; uint8_t size; uint8_t enable_vkeys; } __packed; struct cyttsp4_mt_platform_data { struct touch_framework *frmwrk; unsigned short flags; char const *inp_dev_name; }; struct touch_settings { const uint8_t *data; uint32_t size; uint8_t tag; } __packed; struct cyttsp4_core_platform_data { int irq_gpio; int rst_gpio; int level_irq_udelay; int (*xres)(struct cyttsp4_core_platform_data *pdata, struct device *dev); int (*init)(struct cyttsp4_core_platform_data *pdata, int on, struct device *dev); int (*power)(struct cyttsp4_core_platform_data *pdata, int on, struct device *dev, atomic_t *ignore_irq); int (*irq_stat)(struct cyttsp4_core_platform_data *pdata, struct device *dev); struct touch_settings *sett[CY_TOUCH_SETTINGS_MAX]; }; struct cyttsp4_platform_data { struct cyttsp4_core_platform_data *core_pdata; struct cyttsp4_mt_platform_data *mt_pdata; }; #endif /* _CYTTSP4_H_ */ platform_data/s3c-hsotg.h 0000644 00000002050 14722070374 0011347 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* include/linux/platform_data/s3c-hsotg.h * * Copyright 2008 Openmoko, Inc. * Copyright 2008 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * http://armlinux.simtec.co.uk/ * * S3C USB2.0 High-speed / OtG platform information */ #ifndef __LINUX_USB_S3C_HSOTG_H #define __LINUX_USB_S3C_HSOTG_H struct platform_device; enum dwc2_hsotg_dmamode { S3C_HSOTG_DMA_NONE, /* do not use DMA at-all */ S3C_HSOTG_DMA_ONLY, /* always use DMA */ S3C_HSOTG_DMA_DRV, /* DMA is chosen by driver */ }; /** * struct dwc2_hsotg_plat - platform data for high-speed otg/udc * @dma: Whether to use DMA or not. * @is_osc: The clock source is an oscillator, not a crystal */ struct dwc2_hsotg_plat { enum dwc2_hsotg_dmamode dma; unsigned int is_osc:1; int phy_type; int (*phy_init)(struct platform_device *pdev, int type); int (*phy_exit)(struct platform_device *pdev, int type); }; extern void dwc2_hsotg_set_platdata(struct dwc2_hsotg_plat *pd); #endif /* __LINUX_USB_S3C_HSOTG_H */ platform_data/bh1770glc.h 0000644 00000002266 14722070374 0011144 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * This file is part of the ROHM BH1770GLC / OSRAM SFH7770 sensor driver. * Chip is combined proximity and ambient light sensor. * * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies). * * Contact: Samu Onkalo <samu.p.onkalo@nokia.com> */ #ifndef __BH1770_H__ #define __BH1770_H__ /** * struct bh1770_platform_data - platform data for bh1770glc driver * @led_def_curr: IR led driving current. * @glass_attenuation: Attenuation factor for covering window. * @setup_resources: Call back for interrupt line setup function * @release_resources: Call back for interrupte line release function * * Example of glass attenuation: 16384 * 385 / 100 means attenuation factor * of 3.85. i.e. light_above_sensor = light_above_cover_window / 3.85 */ struct bh1770_platform_data { #define BH1770_LED_5mA 0 #define BH1770_LED_10mA 1 #define BH1770_LED_20mA 2 #define BH1770_LED_50mA 3 #define BH1770_LED_100mA 4 #define BH1770_LED_150mA 5 #define BH1770_LED_200mA 6 __u8 led_def_curr; #define BH1770_NEUTRAL_GA 16384 /* 16384 / 16384 = 1 */ __u32 glass_attenuation; int (*setup_resources)(void); int (*release_resources)(void); }; #endif platform_data/bd6107.h 0000644 00000000414 14722070374 0010442 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * bd6107.h - Rohm BD6107 LEDs Driver */ #ifndef __BD6107_H__ #define __BD6107_H__ struct device; struct bd6107_platform_data { struct device *fbdev; int reset; /* Reset GPIO */ unsigned int def_value; }; #endif platform_data/mtd-davinci-aemif.h 0000644 00000001312 14722070374 0013013 0 ustar 00 /* * TI DaVinci AEMIF support * * Copyright 2010 (C) Texas Instruments, Inc. http://www.ti.com/ * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #ifndef _MACH_DAVINCI_AEMIF_H #define _MACH_DAVINCI_AEMIF_H #include <linux/platform_device.h> #define NRCSR_OFFSET 0x00 #define AWCCR_OFFSET 0x04 #define A1CR_OFFSET 0x10 #define ACR_ASIZE_MASK 0x3 #define ACR_EW_MASK BIT(30) #define ACR_SS_MASK BIT(31) /* All timings in nanoseconds */ struct davinci_aemif_timing { u8 wsetup; u8 wstrobe; u8 whold; u8 rsetup; u8 rstrobe; u8 rhold; u8 ta; }; #endif platform_data/mdio-gpio.h 0000644 00000000421 14722070374 0011421 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * MDIO-GPIO bus platform data structure */ #ifndef __LINUX_MDIO_GPIO_PDATA_H #define __LINUX_MDIO_GPIO_PDATA_H struct mdio_gpio_platform_data { u32 phy_mask; u32 phy_ignore_ta_mask; }; #endif /* __LINUX_MDIO_GPIO_PDATA_H */ platform_data/lm3630a_bl.h 0000644 00000003207 14722070374 0011304 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Simple driver for Texas Instruments LM3630A LED Flash driver chip * Copyright (C) 2012 Texas Instruments */ #ifndef __LINUX_LM3630A_H #define __LINUX_LM3630A_H #define LM3630A_NAME "lm3630a_bl" enum lm3630a_pwm_ctrl { LM3630A_PWM_DISABLE = 0x00, LM3630A_PWM_BANK_A, LM3630A_PWM_BANK_B, LM3630A_PWM_BANK_ALL, LM3630A_PWM_BANK_A_ACT_LOW = 0x05, LM3630A_PWM_BANK_B_ACT_LOW, LM3630A_PWM_BANK_ALL_ACT_LOW, }; enum lm3630a_leda_ctrl { LM3630A_LEDA_DISABLE = 0x00, LM3630A_LEDA_ENABLE = 0x04, LM3630A_LEDA_ENABLE_LINEAR = 0x14, }; enum lm3630a_ledb_ctrl { LM3630A_LEDB_DISABLE = 0x00, LM3630A_LEDB_ON_A = 0x01, LM3630A_LEDB_ENABLE = 0x02, LM3630A_LEDB_ENABLE_LINEAR = 0x0A, }; #define LM3630A_MAX_BRIGHTNESS 255 /* *@leda_label : optional led a label. *@leda_init_brt : led a init brightness. 4~255 *@leda_max_brt : led a max brightness. 4~255 *@leda_ctrl : led a disable, enable linear, enable exponential *@ledb_label : optional led b label. *@ledb_init_brt : led b init brightness. 4~255 *@ledb_max_brt : led b max brightness. 4~255 *@ledb_ctrl : led b disable, enable linear, enable exponential *@pwm_period : pwm period *@pwm_ctrl : pwm disable, bank a or b, active high or low */ struct lm3630a_platform_data { /* led a config. */ const char *leda_label; int leda_init_brt; int leda_max_brt; enum lm3630a_leda_ctrl leda_ctrl; /* led b config. */ const char *ledb_label; int ledb_init_brt; int ledb_max_brt; enum lm3630a_ledb_ctrl ledb_ctrl; /* pwm config. */ unsigned int pwm_period; enum lm3630a_pwm_ctrl pwm_ctrl; }; #endif /* __LINUX_LM3630A_H */ platform_data/adp5588.h 0000644 00000015063 14722070374 0010643 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Analog Devices ADP5588 I/O Expander and QWERTY Keypad Controller * * Copyright 2009-2010 Analog Devices Inc. */ #ifndef _ADP5588_H #define _ADP5588_H #define DEV_ID 0x00 /* Device ID */ #define CFG 0x01 /* Configuration Register1 */ #define INT_STAT 0x02 /* Interrupt Status Register */ #define KEY_LCK_EC_STAT 0x03 /* Key Lock and Event Counter Register */ #define Key_EVENTA 0x04 /* Key Event Register A */ #define Key_EVENTB 0x05 /* Key Event Register B */ #define Key_EVENTC 0x06 /* Key Event Register C */ #define Key_EVENTD 0x07 /* Key Event Register D */ #define Key_EVENTE 0x08 /* Key Event Register E */ #define Key_EVENTF 0x09 /* Key Event Register F */ #define Key_EVENTG 0x0A /* Key Event Register G */ #define Key_EVENTH 0x0B /* Key Event Register H */ #define Key_EVENTI 0x0C /* Key Event Register I */ #define Key_EVENTJ 0x0D /* Key Event Register J */ #define KP_LCK_TMR 0x0E /* Keypad Lock1 to Lock2 Timer */ #define UNLOCK1 0x0F /* Unlock Key1 */ #define UNLOCK2 0x10 /* Unlock Key2 */ #define GPIO_INT_STAT1 0x11 /* GPIO Interrupt Status */ #define GPIO_INT_STAT2 0x12 /* GPIO Interrupt Status */ #define GPIO_INT_STAT3 0x13 /* GPIO Interrupt Status */ #define GPIO_DAT_STAT1 0x14 /* GPIO Data Status, Read twice to clear */ #define GPIO_DAT_STAT2 0x15 /* GPIO Data Status, Read twice to clear */ #define GPIO_DAT_STAT3 0x16 /* GPIO Data Status, Read twice to clear */ #define GPIO_DAT_OUT1 0x17 /* GPIO DATA OUT */ #define GPIO_DAT_OUT2 0x18 /* GPIO DATA OUT */ #define GPIO_DAT_OUT3 0x19 /* GPIO DATA OUT */ #define GPIO_INT_EN1 0x1A /* GPIO Interrupt Enable */ #define GPIO_INT_EN2 0x1B /* GPIO Interrupt Enable */ #define GPIO_INT_EN3 0x1C /* GPIO Interrupt Enable */ #define KP_GPIO1 0x1D /* Keypad or GPIO Selection */ #define KP_GPIO2 0x1E /* Keypad or GPIO Selection */ #define KP_GPIO3 0x1F /* Keypad or GPIO Selection */ #define GPI_EM1 0x20 /* GPI Event Mode 1 */ #define GPI_EM2 0x21 /* GPI Event Mode 2 */ #define GPI_EM3 0x22 /* GPI Event Mode 3 */ #define GPIO_DIR1 0x23 /* GPIO Data Direction */ #define GPIO_DIR2 0x24 /* GPIO Data Direction */ #define GPIO_DIR3 0x25 /* GPIO Data Direction */ #define GPIO_INT_LVL1 0x26 /* GPIO Edge/Level Detect */ #define GPIO_INT_LVL2 0x27 /* GPIO Edge/Level Detect */ #define GPIO_INT_LVL3 0x28 /* GPIO Edge/Level Detect */ #define Debounce_DIS1 0x29 /* Debounce Disable */ #define Debounce_DIS2 0x2A /* Debounce Disable */ #define Debounce_DIS3 0x2B /* Debounce Disable */ #define GPIO_PULL1 0x2C /* GPIO Pull Disable */ #define GPIO_PULL2 0x2D /* GPIO Pull Disable */ #define GPIO_PULL3 0x2E /* GPIO Pull Disable */ #define CMP_CFG_STAT 0x30 /* Comparator Configuration and Status Register */ #define CMP_CONFG_SENS1 0x31 /* Sensor1 Comparator Configuration Register */ #define CMP_CONFG_SENS2 0x32 /* L2 Light Sensor Reference Level, Output Falling for Sensor 1 */ #define CMP1_LVL2_TRIP 0x33 /* L2 Light Sensor Hysteresis (Active when Output Rising) for Sensor 1 */ #define CMP1_LVL2_HYS 0x34 /* L3 Light Sensor Reference Level, Output Falling For Sensor 1 */ #define CMP1_LVL3_TRIP 0x35 /* L3 Light Sensor Hysteresis (Active when Output Rising) For Sensor 1 */ #define CMP1_LVL3_HYS 0x36 /* Sensor 2 Comparator Configuration Register */ #define CMP2_LVL2_TRIP 0x37 /* L2 Light Sensor Reference Level, Output Falling for Sensor 2 */ #define CMP2_LVL2_HYS 0x38 /* L2 Light Sensor Hysteresis (Active when Output Rising) for Sensor 2 */ #define CMP2_LVL3_TRIP 0x39 /* L3 Light Sensor Reference Level, Output Falling For Sensor 2 */ #define CMP2_LVL3_HYS 0x3A /* L3 Light Sensor Hysteresis (Active when Output Rising) For Sensor 2 */ #define CMP1_ADC_DAT_R1 0x3B /* Comparator 1 ADC data Register1 */ #define CMP1_ADC_DAT_R2 0x3C /* Comparator 1 ADC data Register2 */ #define CMP2_ADC_DAT_R1 0x3D /* Comparator 2 ADC data Register1 */ #define CMP2_ADC_DAT_R2 0x3E /* Comparator 2 ADC data Register2 */ #define ADP5588_DEVICE_ID_MASK 0xF /* Configuration Register1 */ #define ADP5588_AUTO_INC (1 << 7) #define ADP5588_GPIEM_CFG (1 << 6) #define ADP5588_OVR_FLOW_M (1 << 5) #define ADP5588_INT_CFG (1 << 4) #define ADP5588_OVR_FLOW_IEN (1 << 3) #define ADP5588_K_LCK_IM (1 << 2) #define ADP5588_GPI_IEN (1 << 1) #define ADP5588_KE_IEN (1 << 0) /* Interrupt Status Register */ #define ADP5588_CMP2_INT (1 << 5) #define ADP5588_CMP1_INT (1 << 4) #define ADP5588_OVR_FLOW_INT (1 << 3) #define ADP5588_K_LCK_INT (1 << 2) #define ADP5588_GPI_INT (1 << 1) #define ADP5588_KE_INT (1 << 0) /* Key Lock and Event Counter Register */ #define ADP5588_K_LCK_EN (1 << 6) #define ADP5588_LCK21 0x30 #define ADP5588_KEC 0xF #define ADP5588_MAXGPIO 18 #define ADP5588_BANK(offs) ((offs) >> 3) #define ADP5588_BIT(offs) (1u << ((offs) & 0x7)) /* Put one of these structures in i2c_board_info platform_data */ #define ADP5588_KEYMAPSIZE 80 #define GPI_PIN_ROW0 97 #define GPI_PIN_ROW1 98 #define GPI_PIN_ROW2 99 #define GPI_PIN_ROW3 100 #define GPI_PIN_ROW4 101 #define GPI_PIN_ROW5 102 #define GPI_PIN_ROW6 103 #define GPI_PIN_ROW7 104 #define GPI_PIN_COL0 105 #define GPI_PIN_COL1 106 #define GPI_PIN_COL2 107 #define GPI_PIN_COL3 108 #define GPI_PIN_COL4 109 #define GPI_PIN_COL5 110 #define GPI_PIN_COL6 111 #define GPI_PIN_COL7 112 #define GPI_PIN_COL8 113 #define GPI_PIN_COL9 114 #define GPI_PIN_ROW_BASE GPI_PIN_ROW0 #define GPI_PIN_ROW_END GPI_PIN_ROW7 #define GPI_PIN_COL_BASE GPI_PIN_COL0 #define GPI_PIN_COL_END GPI_PIN_COL9 #define GPI_PIN_BASE GPI_PIN_ROW_BASE #define GPI_PIN_END GPI_PIN_COL_END #define ADP5588_GPIMAPSIZE_MAX (GPI_PIN_END - GPI_PIN_BASE + 1) struct adp5588_gpi_map { unsigned short pin; unsigned short sw_evt; }; struct adp5588_kpad_platform_data { int rows; /* Number of rows */ int cols; /* Number of columns */ const unsigned short *keymap; /* Pointer to keymap */ unsigned short keymapsize; /* Keymap size */ unsigned repeat:1; /* Enable key repeat */ unsigned en_keylock:1; /* Enable Key Lock feature */ unsigned short unlock_key1; /* Unlock Key 1 */ unsigned short unlock_key2; /* Unlock Key 2 */ const struct adp5588_gpi_map *gpimap; unsigned short gpimapsize; const struct adp5588_gpio_platform_data *gpio_data; }; struct i2c_client; /* forward declaration */ struct adp5588_gpio_platform_data { int gpio_start; /* GPIO Chip base # */ const char *const *names; unsigned irq_base; /* interrupt base # */ unsigned pullup_dis_mask; /* Pull-Up Disable Mask */ int (*setup)(struct i2c_client *client, unsigned gpio, unsigned ngpio, void *context); int (*teardown)(struct i2c_client *client, unsigned gpio, unsigned ngpio, void *context); void *context; }; #endif platform_data/edma.h 0000644 00000005021 14722070374 0010444 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * TI EDMA definitions * * Copyright (C) 2006-2013 Texas Instruments. */ /* * This EDMA3 programming framework exposes two basic kinds of resource: * * Channel Triggers transfers, usually from a hardware event but * also manually or by "chaining" from DMA completions. * Each channel is coupled to a Parameter RAM (PaRAM) slot. * * Slot Each PaRAM slot holds a DMA transfer descriptor (PaRAM * "set"), source and destination addresses, a link to a * next PaRAM slot (if any), options for the transfer, and * instructions for updating those addresses. There are * more than twice as many slots as event channels. * * Each PaRAM set describes a sequence of transfers, either for one large * buffer or for several discontiguous smaller buffers. An EDMA transfer * is driven only from a channel, which performs the transfers specified * in its PaRAM slot until there are no more transfers. When that last * transfer completes, the "link" field may be used to reload the channel's * PaRAM slot with a new transfer descriptor. * * The EDMA Channel Controller (CC) maps requests from channels into physical * Transfer Controller (TC) requests when the channel triggers (by hardware * or software events, or by chaining). The two physical DMA channels provided * by the TCs are thus shared by many logical channels. * * DaVinci hardware also has a "QDMA" mechanism which is not currently * supported through this interface. (DSP firmware uses it though.) */ #ifndef EDMA_H_ #define EDMA_H_ enum dma_event_q { EVENTQ_0 = 0, EVENTQ_1 = 1, EVENTQ_2 = 2, EVENTQ_3 = 3, EVENTQ_DEFAULT = -1 }; #define EDMA_CTLR_CHAN(ctlr, chan) (((ctlr) << 16) | (chan)) #define EDMA_CTLR(i) ((i) >> 16) #define EDMA_CHAN_SLOT(i) ((i) & 0xffff) #define EDMA_FILTER_PARAM(ctlr, chan) ((int[]) { EDMA_CTLR_CHAN(ctlr, chan) }) struct edma_rsv_info { const s16 (*rsv_chans)[2]; const s16 (*rsv_slots)[2]; }; struct dma_slave_map; /* platform_data for EDMA driver */ struct edma_soc_info { /* * Default queue is expected to be a low-priority queue. * This way, long transfers on the default queue started * by the codec engine will not cause audio defects. */ enum dma_event_q default_queue; /* Resource reservation for other cores */ struct edma_rsv_info *rsv; /* List of channels allocated for memcpy, terminated with -1 */ s32 *memcpy_channels; s8 (*queue_priority_mapping)[2]; const s16 (*xbar_chans)[2]; const struct dma_slave_map *slave_map; int slavecnt; }; #endif platform_data/leds-s3c24xx.h 0000644 00000001013 14722070374 0011676 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2006 Simtec Electronics * http://armlinux.simtec.co.uk/ * Ben Dooks <ben@simtec.co.uk> * * S3C24XX - LEDs GPIO connector */ #ifndef __LEDS_S3C24XX_H #define __LEDS_S3C24XX_H #define S3C24XX_LEDF_ACTLOW (1<<0) /* LED is on when GPIO low */ #define S3C24XX_LEDF_TRISTATE (1<<1) /* tristate to turn off */ struct s3c24xx_led_platdata { unsigned int gpio; unsigned int flags; char *name; char *def_trigger; }; #endif /* __LEDS_S3C24XX_H */ platform_data/cros_ec_proto.h 0000644 00000025435 14722070374 0012411 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * ChromeOS Embedded Controller protocol interface. * * Copyright (C) 2012 Google, Inc */ #ifndef __LINUX_CROS_EC_PROTO_H #define __LINUX_CROS_EC_PROTO_H #include <linux/device.h> #include <linux/mutex.h> #include <linux/notifier.h> #include <linux/platform_data/cros_ec_commands.h> #define CROS_EC_DEV_NAME "cros_ec" #define CROS_EC_DEV_FP_NAME "cros_fp" #define CROS_EC_DEV_ISH_NAME "cros_ish" #define CROS_EC_DEV_PD_NAME "cros_pd" #define CROS_EC_DEV_SCP_NAME "cros_scp" #define CROS_EC_DEV_TP_NAME "cros_tp" /* * The EC is unresponsive for a time after a reboot command. Add a * simple delay to make sure that the bus stays locked. */ #define EC_REBOOT_DELAY_MS 50 /* * Max bus-specific overhead incurred by request/responses. * I2C requires 1 additional byte for requests. * I2C requires 2 additional bytes for responses. * SPI requires up to 32 additional bytes for responses. */ #define EC_PROTO_VERSION_UNKNOWN 0 #define EC_MAX_REQUEST_OVERHEAD 1 #define EC_MAX_RESPONSE_OVERHEAD 32 /* * Command interface between EC and AP, for LPC, I2C and SPI interfaces. */ enum { EC_MSG_TX_HEADER_BYTES = 3, EC_MSG_TX_TRAILER_BYTES = 1, EC_MSG_TX_PROTO_BYTES = EC_MSG_TX_HEADER_BYTES + EC_MSG_TX_TRAILER_BYTES, EC_MSG_RX_PROTO_BYTES = 3, /* Max length of messages for proto 2*/ EC_PROTO2_MSG_BYTES = EC_PROTO2_MAX_PARAM_SIZE + EC_MSG_TX_PROTO_BYTES, EC_MAX_MSG_BYTES = 64 * 1024, }; /** * struct cros_ec_command - Information about a ChromeOS EC command. * @version: Command version number (often 0). * @command: Command to send (EC_CMD_...). * @outsize: Outgoing length in bytes. * @insize: Max number of bytes to accept from the EC. * @result: EC's response to the command (separate from communication failure). * @data: Where to put the incoming data from EC and outgoing data to EC. */ struct cros_ec_command { uint32_t version; uint32_t command; uint32_t outsize; uint32_t insize; uint32_t result; uint8_t data[0]; }; /** * struct cros_ec_device - Information about a ChromeOS EC device. * @phys_name: Name of physical comms layer (e.g. 'i2c-4'). * @dev: Device pointer for physical comms device * @was_wake_device: True if this device was set to wake the system from * sleep at the last suspend. * @cros_class: The class structure for this device. * @cmd_readmem: Direct read of the EC memory-mapped region, if supported. * @offset: Is within EC_LPC_ADDR_MEMMAP region. * @bytes: Number of bytes to read. zero means "read a string" (including * the trailing '\0'). At most only EC_MEMMAP_SIZE bytes can be * read. Caller must ensure that the buffer is large enough for the * result when reading a string. * @max_request: Max size of message requested. * @max_response: Max size of message response. * @max_passthru: Max sice of passthru message. * @proto_version: The protocol version used for this device. * @priv: Private data. * @irq: Interrupt to use. * @id: Device id. * @din: Input buffer (for data from EC). This buffer will always be * dword-aligned and include enough space for up to 7 word-alignment * bytes also, so we can ensure that the body of the message is always * dword-aligned (64-bit). We use this alignment to keep ARM and x86 * happy. Probably word alignment would be OK, there might be a small * performance advantage to using dword. * @dout: Output buffer (for data to EC). This buffer will always be * dword-aligned and include enough space for up to 7 word-alignment * bytes also, so we can ensure that the body of the message is always * dword-aligned (64-bit). We use this alignment to keep ARM and x86 * happy. Probably word alignment would be OK, there might be a small * performance advantage to using dword. * @din_size: Size of din buffer to allocate (zero to use static din). * @dout_size: Size of dout buffer to allocate (zero to use static dout). * @wake_enabled: True if this device can wake the system from sleep. * @suspended: True if this device had been suspended. * @cmd_xfer: Send command to EC and get response. * Returns the number of bytes received if the communication * succeeded, but that doesn't mean the EC was happy with the * command. The caller should check msg.result for the EC's result * code. * @pkt_xfer: Send packet to EC and get response. * @lock: One transaction at a time. * @mkbp_event_supported: True if this EC supports the MKBP event protocol. * @host_sleep_v1: True if this EC supports the sleep v1 command. * @event_notifier: Interrupt event notifier for transport devices. * @event_data: Raw payload transferred with the MKBP event. * @event_size: Size in bytes of the event data. * @host_event_wake_mask: Mask of host events that cause wake from suspend. * @ec: The platform_device used by the mfd driver to interface with the * main EC. * @pd: The platform_device used by the mfd driver to interface with the * PD behind an EC. */ struct cros_ec_device { /* These are used by other drivers that want to talk to the EC */ const char *phys_name; struct device *dev; bool was_wake_device; struct class *cros_class; int (*cmd_readmem)(struct cros_ec_device *ec, unsigned int offset, unsigned int bytes, void *dest); /* These are used to implement the platform-specific interface */ u16 max_request; u16 max_response; u16 max_passthru; u16 proto_version; void *priv; int irq; u8 *din; u8 *dout; int din_size; int dout_size; bool wake_enabled; bool suspended; int (*cmd_xfer)(struct cros_ec_device *ec, struct cros_ec_command *msg); int (*pkt_xfer)(struct cros_ec_device *ec, struct cros_ec_command *msg); struct mutex lock; bool mkbp_event_supported; bool host_sleep_v1; struct blocking_notifier_head event_notifier; struct ec_response_get_next_event_v1 event_data; int event_size; u32 host_event_wake_mask; u32 last_resume_result; /* The platform devices used by the mfd driver */ struct platform_device *ec; struct platform_device *pd; }; /** * struct cros_ec_sensor_platform - ChromeOS EC sensor platform information. * @sensor_num: Id of the sensor, as reported by the EC. */ struct cros_ec_sensor_platform { u8 sensor_num; }; /** * struct cros_ec_platform - ChromeOS EC platform information. * @ec_name: Name of EC device (e.g. 'cros-ec', 'cros-pd', ...) * used in /dev/ and sysfs. * @cmd_offset: Offset to apply for each command. Set when * registering a device behind another one. */ struct cros_ec_platform { const char *ec_name; u16 cmd_offset; }; /** * cros_ec_suspend() - Handle a suspend operation for the ChromeOS EC device. * @ec_dev: Device to suspend. * * This can be called by drivers to handle a suspend event. * * Return: 0 on success or negative error code. */ int cros_ec_suspend(struct cros_ec_device *ec_dev); /** * cros_ec_resume() - Handle a resume operation for the ChromeOS EC device. * @ec_dev: Device to resume. * * This can be called by drivers to handle a resume event. * * Return: 0 on success or negative error code. */ int cros_ec_resume(struct cros_ec_device *ec_dev); /** * cros_ec_prepare_tx() - Prepare an outgoing message in the output buffer. * @ec_dev: Device to register. * @msg: Message to write. * * This is intended to be used by all ChromeOS EC drivers, but at present * only SPI uses it. Once LPC uses the same protocol it can start using it. * I2C could use it now, with a refactor of the existing code. * * Return: 0 on success or negative error code. */ int cros_ec_prepare_tx(struct cros_ec_device *ec_dev, struct cros_ec_command *msg); /** * cros_ec_check_result() - Check ec_msg->result. * @ec_dev: EC device. * @msg: Message to check. * * This is used by ChromeOS EC drivers to check the ec_msg->result for * errors and to warn about them. * * Return: 0 on success or negative error code. */ int cros_ec_check_result(struct cros_ec_device *ec_dev, struct cros_ec_command *msg); /** * cros_ec_cmd_xfer() - Send a command to the ChromeOS EC. * @ec_dev: EC device. * @msg: Message to write. * * Call this to send a command to the ChromeOS EC. This should be used * instead of calling the EC's cmd_xfer() callback directly. * * Return: 0 on success or negative error code. */ int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, struct cros_ec_command *msg); /** * cros_ec_cmd_xfer_status() - Send a command to the ChromeOS EC. * @ec_dev: EC device. * @msg: Message to write. * * This function is identical to cros_ec_cmd_xfer, except it returns success * status only if both the command was transmitted successfully and the EC * replied with success status. It's not necessary to check msg->result when * using this function. * * Return: The number of bytes transferred on success or negative error code. */ int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev, struct cros_ec_command *msg); /** * cros_ec_register() - Register a new ChromeOS EC, using the provided info. * @ec_dev: Device to register. * * Before calling this, allocate a pointer to a new device and then fill * in all the fields up to the --private-- marker. * * Return: 0 on success or negative error code. */ int cros_ec_register(struct cros_ec_device *ec_dev); /** * cros_ec_unregister() - Remove a ChromeOS EC. * @ec_dev: Device to unregister. * * Call this to deregister a ChromeOS EC, then clean up any private data. * * Return: 0 on success or negative error code. */ int cros_ec_unregister(struct cros_ec_device *ec_dev); /** * cros_ec_query_all() - Query the protocol version supported by the * ChromeOS EC. * @ec_dev: Device to register. * * Return: 0 on success or negative error code. */ int cros_ec_query_all(struct cros_ec_device *ec_dev); /** * cros_ec_get_next_event() - Fetch next event from the ChromeOS EC. * @ec_dev: Device to fetch event from. * @wake_event: Pointer to a bool set to true upon return if the event might be * treated as a wake event. Ignored if null. * * Return: negative error code on errors; 0 for no data; or else number of * bytes received (i.e., an event was retrieved successfully). Event types are * written out to @ec_dev->event_data.event_type on success. */ int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event); /** * cros_ec_get_host_event() - Return a mask of event set by the ChromeOS EC. * @ec_dev: Device to fetch event from. * * When MKBP is supported, when the EC raises an interrupt, we collect the * events raised and call the functions in the ec notifier. This function * is a helper to know which events are raised. * * Return: 0 on error or non-zero bitmask of one or more EC_HOST_EVENT_*. */ u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev); #endif /* __LINUX_CROS_EC_PROTO_H */ platform_data/adau17x1.h 0000644 00000007015 14722070374 0011076 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Driver for ADAU1361/ADAU1461/ADAU1761/ADAU1961/ADAU1381/ADAU1781 codecs * * Copyright 2011-2014 Analog Devices Inc. * Author: Lars-Peter Clausen <lars@metafoo.de> */ #ifndef __LINUX_PLATFORM_DATA_ADAU17X1_H__ #define __LINUX_PLATFORM_DATA_ADAU17X1_H__ /** * enum adau17x1_micbias_voltage - Microphone bias voltage * @ADAU17X1_MICBIAS_0_90_AVDD: 0.9 * AVDD * @ADAU17X1_MICBIAS_0_65_AVDD: 0.65 * AVDD */ enum adau17x1_micbias_voltage { ADAU17X1_MICBIAS_0_90_AVDD = 0, ADAU17X1_MICBIAS_0_65_AVDD = 1, }; /** * enum adau1761_digmic_jackdet_pin_mode - Configuration of the JACKDET/MICIN pin * @ADAU1761_DIGMIC_JACKDET_PIN_MODE_NONE: Disable the pin * @ADAU1761_DIGMIC_JACKDET_PIN_MODE_DIGMIC: Configure the pin for usage as * digital microphone input. * @ADAU1761_DIGMIC_JACKDET_PIN_MODE_JACKDETECT: Configure the pin for jack * insertion detection. */ enum adau1761_digmic_jackdet_pin_mode { ADAU1761_DIGMIC_JACKDET_PIN_MODE_NONE, ADAU1761_DIGMIC_JACKDET_PIN_MODE_DIGMIC, ADAU1761_DIGMIC_JACKDET_PIN_MODE_JACKDETECT, }; /** * adau1761_jackdetect_debounce_time - Jack insertion detection debounce time * @ADAU1761_JACKDETECT_DEBOUNCE_5MS: 5 milliseconds * @ADAU1761_JACKDETECT_DEBOUNCE_10MS: 10 milliseconds * @ADAU1761_JACKDETECT_DEBOUNCE_20MS: 20 milliseconds * @ADAU1761_JACKDETECT_DEBOUNCE_40MS: 40 milliseconds */ enum adau1761_jackdetect_debounce_time { ADAU1761_JACKDETECT_DEBOUNCE_5MS = 0, ADAU1761_JACKDETECT_DEBOUNCE_10MS = 1, ADAU1761_JACKDETECT_DEBOUNCE_20MS = 2, ADAU1761_JACKDETECT_DEBOUNCE_40MS = 3, }; /** * enum adau1761_output_mode - Output mode configuration * @ADAU1761_OUTPUT_MODE_HEADPHONE: Headphone output * @ADAU1761_OUTPUT_MODE_HEADPHONE_CAPLESS: Capless headphone output * @ADAU1761_OUTPUT_MODE_LINE: Line output */ enum adau1761_output_mode { ADAU1761_OUTPUT_MODE_HEADPHONE, ADAU1761_OUTPUT_MODE_HEADPHONE_CAPLESS, ADAU1761_OUTPUT_MODE_LINE, }; /** * struct adau1761_platform_data - ADAU1761 Codec driver platform data * @input_differential: If true the input pins will be configured in * differential mode. * @lineout_mode: Output mode for the LOUT/ROUT pins * @headphone_mode: Output mode for the LHP/RHP pins * @digmic_jackdetect_pin_mode: JACKDET/MICIN pin configuration * @jackdetect_debounce_time: Jack insertion detection debounce time. * Note: This value will only be used, if the JACKDET/MICIN pin is configured * for jack insertion detection. * @jackdetect_active_low: If true the jack insertion detection is active low. * Othwise it will be active high. * @micbias_voltage: Microphone voltage bias */ struct adau1761_platform_data { bool input_differential; enum adau1761_output_mode lineout_mode; enum adau1761_output_mode headphone_mode; enum adau1761_digmic_jackdet_pin_mode digmic_jackdetect_pin_mode; enum adau1761_jackdetect_debounce_time jackdetect_debounce_time; bool jackdetect_active_low; enum adau17x1_micbias_voltage micbias_voltage; }; /** * struct adau1781_platform_data - ADAU1781 Codec driver platform data * @left_input_differential: If true configure the left input as * differential input. * @right_input_differential: If true configure the right input as differntial * input. * @use_dmic: If true configure the MIC pins as digital microphone pins instead * of analog microphone pins. * @micbias_voltage: Microphone voltage bias */ struct adau1781_platform_data { bool left_input_differential; bool right_input_differential; bool use_dmic; enum adau17x1_micbias_voltage micbias_voltage; }; #endif platform_data/atmel.h 0000644 00000001240 14722070374 0010637 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * atmel platform data */ #ifndef __ATMEL_H__ #define __ATMEL_H__ /* Compact Flash */ struct at91_cf_data { int irq_pin; /* I/O IRQ */ int det_pin; /* Card detect */ int vcc_pin; /* power switching */ int rst_pin; /* card reset */ u8 chipselect; /* EBI Chip Select number */ u8 flags; #define AT91_CF_TRUE_IDE 0x01 #define AT91_IDE_SWAP_A0_A2 0x02 }; /* FIXME: this needs a better location, but gets stuff building again */ #ifdef CONFIG_ATMEL_PM extern int at91_suspend_entering_slow_clock(void); #else static inline int at91_suspend_entering_slow_clock(void) { return 0; } #endif #endif /* __ATMEL_H__ */ platform_data/keypad-ep93xx.h 0000644 00000002006 14722070374 0012151 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __KEYPAD_EP93XX_H #define __KEYPAD_EP93XX_H struct matrix_keymap_data; /* flags for the ep93xx_keypad driver */ #define EP93XX_KEYPAD_DISABLE_3_KEY (1<<0) /* disable 3-key reset */ #define EP93XX_KEYPAD_DIAG_MODE (1<<1) /* diagnostic mode */ #define EP93XX_KEYPAD_BACK_DRIVE (1<<2) /* back driving mode */ #define EP93XX_KEYPAD_TEST_MODE (1<<3) /* scan only column 0 */ #define EP93XX_KEYPAD_AUTOREPEAT (1<<4) /* enable key autorepeat */ /** * struct ep93xx_keypad_platform_data - platform specific device structure * @keymap_data: pointer to &matrix_keymap_data * @debounce: debounce start count; terminal count is 0xff * @prescale: row/column counter pre-scaler load value * @flags: see above */ struct ep93xx_keypad_platform_data { struct matrix_keymap_data *keymap_data; unsigned int debounce; unsigned int prescale; unsigned int flags; unsigned int clk_rate; }; #define EP93XX_MATRIX_ROWS (8) #define EP93XX_MATRIX_COLS (8) #endif /* __KEYPAD_EP93XX_H */ platform_data/uio_pruss.h 0000644 00000001473 14722070374 0011575 0 ustar 00 /* * include/linux/platform_data/uio_pruss.h * * Platform data for uio_pruss driver * * Copyright (C) 2010-11 Texas Instruments Incorporated - http://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifndef _UIO_PRUSS_H_ #define _UIO_PRUSS_H_ /* To configure the PRUSS INTC base offset for UIO driver */ struct uio_pruss_pdata { u32 pintc_base; struct gen_pool *sram_pool; }; #endif /* _UIO_PRUSS_H_ */ platform_data/mouse-pxa930_trkball.h 0000644 00000000342 14722070374 0013424 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_ARCH_PXA930_TRKBALL_H #define __ASM_ARCH_PXA930_TRKBALL_H struct pxa930_trkball_platform_data { int x_filter; int y_filter; }; #endif /* __ASM_ARCH_PXA930_TRKBALL_H */ platform_data/jz4740/jz4740_nand.h 0000644 00000001073 14722070374 0012445 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de> * JZ4740 SoC NAND controller driver */ #ifndef __JZ4740_NAND_H__ #define __JZ4740_NAND_H__ #include <linux/mtd/rawnand.h> #include <linux/mtd/partitions.h> #define JZ_NAND_NUM_BANKS 4 struct jz_nand_platform_data { int num_partitions; struct mtd_partition *partitions; unsigned char banks[JZ_NAND_NUM_BANKS]; void (*ident_callback)(struct platform_device *, struct mtd_info *, struct mtd_partition **, int *num_partitions); }; #endif platform_data/x86/apple.h 0000644 00000000370 14722070374 0011266 0 ustar 00 #ifndef PLATFORM_DATA_X86_APPLE_H #define PLATFORM_DATA_X86_APPLE_H #ifdef CONFIG_X86 /** * x86_apple_machine - whether the machine is an x86 Apple Macintosh */ extern bool x86_apple_machine; #else #define x86_apple_machine false #endif #endif platform_data/x86/asus-wmi.h 0000644 00000007641 14722070374 0011742 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __PLATFORM_DATA_X86_ASUS_WMI_H #define __PLATFORM_DATA_X86_ASUS_WMI_H #include <linux/errno.h> #include <linux/types.h> /* WMI Methods */ #define ASUS_WMI_METHODID_SPEC 0x43455053 /* BIOS SPECification */ #define ASUS_WMI_METHODID_SFBD 0x44424653 /* Set First Boot Device */ #define ASUS_WMI_METHODID_GLCD 0x44434C47 /* Get LCD status */ #define ASUS_WMI_METHODID_GPID 0x44495047 /* Get Panel ID?? (Resol) */ #define ASUS_WMI_METHODID_QMOD 0x444F4D51 /* Quiet MODe */ #define ASUS_WMI_METHODID_SPLV 0x4C425053 /* Set Panel Light Value */ #define ASUS_WMI_METHODID_AGFN 0x4E464741 /* Atk Generic FuNction */ #define ASUS_WMI_METHODID_SFUN 0x4E554653 /* FUNCtionalities */ #define ASUS_WMI_METHODID_SDSP 0x50534453 /* Set DiSPlay output */ #define ASUS_WMI_METHODID_GDSP 0x50534447 /* Get DiSPlay output */ #define ASUS_WMI_METHODID_DEVP 0x50564544 /* DEVice Policy */ #define ASUS_WMI_METHODID_OSVR 0x5256534F /* OS VeRsion */ #define ASUS_WMI_METHODID_DCTS 0x53544344 /* Device status (DCTS) */ #define ASUS_WMI_METHODID_DSTS 0x53545344 /* Device status (DSTS) */ #define ASUS_WMI_METHODID_BSTS 0x53545342 /* Bios STatuS ? */ #define ASUS_WMI_METHODID_DEVS 0x53564544 /* DEVice Set */ #define ASUS_WMI_METHODID_CFVS 0x53564643 /* CPU Frequency Volt Set */ #define ASUS_WMI_METHODID_KBFT 0x5446424B /* KeyBoard FilTer */ #define ASUS_WMI_METHODID_INIT 0x54494E49 /* INITialize */ #define ASUS_WMI_METHODID_HKEY 0x59454B48 /* Hot KEY ?? */ #define ASUS_WMI_UNSUPPORTED_METHOD 0xFFFFFFFE /* Wireless */ #define ASUS_WMI_DEVID_HW_SWITCH 0x00010001 #define ASUS_WMI_DEVID_WIRELESS_LED 0x00010002 #define ASUS_WMI_DEVID_CWAP 0x00010003 #define ASUS_WMI_DEVID_WLAN 0x00010011 #define ASUS_WMI_DEVID_WLAN_LED 0x00010012 #define ASUS_WMI_DEVID_BLUETOOTH 0x00010013 #define ASUS_WMI_DEVID_GPS 0x00010015 #define ASUS_WMI_DEVID_WIMAX 0x00010017 #define ASUS_WMI_DEVID_WWAN3G 0x00010019 #define ASUS_WMI_DEVID_UWB 0x00010021 /* Leds */ /* 0x000200XX and 0x000400XX */ #define ASUS_WMI_DEVID_LED1 0x00020011 #define ASUS_WMI_DEVID_LED2 0x00020012 #define ASUS_WMI_DEVID_LED3 0x00020013 #define ASUS_WMI_DEVID_LED4 0x00020014 #define ASUS_WMI_DEVID_LED5 0x00020015 #define ASUS_WMI_DEVID_LED6 0x00020016 /* Backlight and Brightness */ #define ASUS_WMI_DEVID_ALS_ENABLE 0x00050001 /* Ambient Light Sensor */ #define ASUS_WMI_DEVID_BACKLIGHT 0x00050011 #define ASUS_WMI_DEVID_BRIGHTNESS 0x00050012 #define ASUS_WMI_DEVID_KBD_BACKLIGHT 0x00050021 #define ASUS_WMI_DEVID_LIGHT_SENSOR 0x00050022 /* ?? */ #define ASUS_WMI_DEVID_LIGHTBAR 0x00050025 #define ASUS_WMI_DEVID_FAN_BOOST_MODE 0x00110018 /* Misc */ #define ASUS_WMI_DEVID_CAMERA 0x00060013 /* Storage */ #define ASUS_WMI_DEVID_CARDREADER 0x00080013 /* Input */ #define ASUS_WMI_DEVID_TOUCHPAD 0x00100011 #define ASUS_WMI_DEVID_TOUCHPAD_LED 0x00100012 #define ASUS_WMI_DEVID_FNLOCK 0x00100023 /* Fan, Thermal */ #define ASUS_WMI_DEVID_THERMAL_CTRL 0x00110011 #define ASUS_WMI_DEVID_FAN_CTRL 0x00110012 /* deprecated */ #define ASUS_WMI_DEVID_CPU_FAN_CTRL 0x00110013 /* Power */ #define ASUS_WMI_DEVID_PROCESSOR_STATE 0x00120012 /* Deep S3 / Resume on LID open */ #define ASUS_WMI_DEVID_LID_RESUME 0x00120031 /* Maximum charging percentage */ #define ASUS_WMI_DEVID_RSOC 0x00120057 /* DSTS masks */ #define ASUS_WMI_DSTS_STATUS_BIT 0x00000001 #define ASUS_WMI_DSTS_UNKNOWN_BIT 0x00000002 #define ASUS_WMI_DSTS_PRESENCE_BIT 0x00010000 #define ASUS_WMI_DSTS_USER_BIT 0x00020000 #define ASUS_WMI_DSTS_BIOS_BIT 0x00040000 #define ASUS_WMI_DSTS_BRIGHTNESS_MASK 0x000000FF #define ASUS_WMI_DSTS_MAX_BRIGTH_MASK 0x0000FF00 #define ASUS_WMI_DSTS_LIGHTBAR_MASK 0x0000000F #if IS_REACHABLE(CONFIG_ASUS_WMI) int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1, u32 *retval); #else static inline int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1, u32 *retval) { return -ENODEV; } #endif #endif /* __PLATFORM_DATA_X86_ASUS_WMI_H */ platform_data/x86/clk-lpss.h 0000644 00000000643 14722070374 0011720 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Intel Low Power Subsystem clocks. * * Copyright (C) 2013, Intel Corporation * Authors: Mika Westerberg <mika.westerberg@linux.intel.com> * Rafael J. Wysocki <rafael.j.wysocki@intel.com> */ #ifndef __CLK_LPSS_H #define __CLK_LPSS_H struct lpss_clk_data { const char *name; struct clk *clk; }; extern int lpt_clk_init(void); #endif /* __CLK_LPSS_H */ platform_data/x86/clk-pmc-atom.h 0000644 00000001774 14722070374 0012462 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Intel Atom platform clocks for BayTrail and CherryTrail SoC. * * Copyright (C) 2016, Intel Corporation * Author: Irina Tirdea <irina.tirdea@intel.com> */ #ifndef __PLATFORM_DATA_X86_CLK_PMC_ATOM_H #define __PLATFORM_DATA_X86_CLK_PMC_ATOM_H /** * struct pmc_clk - PMC platform clock configuration * * @name: identified, typically pmc_plt_clk_<x>, x=[0..5] * @freq: in Hz, 19.2MHz and 25MHz (Baytrail only) supported * @parent_name: one of 'xtal' or 'osc' */ struct pmc_clk { const char *name; unsigned long freq; const char *parent_name; }; /** * struct pmc_clk_data - common PMC clock configuration * * @base: PMC clock register base offset * @clks: pointer to set of registered clocks, typically 0..5 * @critical: flag to indicate if firmware enabled pmc_plt_clks * should be marked as critial or not */ struct pmc_clk_data { void __iomem *base; const struct pmc_clk *clks; bool critical; }; #endif /* __PLATFORM_DATA_X86_CLK_PMC_ATOM_H */ platform_data/x86/mlxcpld.h 0000644 00000004410 14722070374 0011627 0 ustar 00 /* * mlxcpld.h - Mellanox I2C multiplexer support in CPLD * * Copyright (c) 2016 Mellanox Technologies. All rights reserved. * Copyright (c) 2016 Michael Shych <michaels@mellanox.com> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the names of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _LINUX_I2C_MLXCPLD_H #define _LINUX_I2C_MLXCPLD_H /* Platform data for the CPLD I2C multiplexers */ /* mlxcpld_mux_plat_data - per mux data, used with i2c_register_board_info * @adap_ids - adapter array * @num_adaps - number of adapters * @sel_reg_addr - mux select register offset in CPLD space */ struct mlxcpld_mux_plat_data { int *adap_ids; int num_adaps; int sel_reg_addr; }; #endif /* _LINUX_I2C_MLXCPLD_H */ platform_data/x86/soc.h 0000644 00000002403 14722070374 0010750 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Helpers for Intel SoC model detection * * Copyright (c) 2019, Intel Corporation. */ #ifndef __PLATFORM_DATA_X86_SOC_H #define __PLATFORM_DATA_X86_SOC_H #if IS_ENABLED(CONFIG_X86) #include <asm/cpu_device_id.h> #include <asm/intel-family.h> #define SOC_INTEL_IS_CPU(soc, type) \ static inline bool soc_intel_is_##soc(void) \ { \ static const struct x86_cpu_id soc##_cpu_ids[] = { \ X86_MATCH_INTEL_FAM6_MODEL(type, NULL), \ {} \ }; \ const struct x86_cpu_id *id; \ \ id = x86_match_cpu(soc##_cpu_ids); \ if (id) \ return true; \ return false; \ } SOC_INTEL_IS_CPU(byt, ATOM_SILVERMONT); SOC_INTEL_IS_CPU(cht, ATOM_AIRMONT); SOC_INTEL_IS_CPU(apl, ATOM_GOLDMONT); SOC_INTEL_IS_CPU(glk, ATOM_GOLDMONT_PLUS); SOC_INTEL_IS_CPU(cml, KABYLAKE_L); #else /* IS_ENABLED(CONFIG_X86) */ static inline bool soc_intel_is_byt(void) { return false; } static inline bool soc_intel_is_cht(void) { return false; } static inline bool soc_intel_is_apl(void) { return false; } static inline bool soc_intel_is_glk(void) { return false; } static inline bool soc_intel_is_cml(void) { return false; } #endif /* IS_ENABLED(CONFIG_X86) */ #endif /* __PLATFORM_DATA_X86_SOC_H */ platform_data/x86/pmc_atom.h 0000644 00000010545 14722070374 0011771 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Intel Atom SOC Power Management Controller Header File * Copyright (c) 2014, Intel Corporation. */ #ifndef PMC_ATOM_H #define PMC_ATOM_H #include <linux/bits.h> /* ValleyView Power Control Unit PCI Device ID */ #define PCI_DEVICE_ID_VLV_PMC 0x0F1C /* CherryTrail Power Control Unit PCI Device ID */ #define PCI_DEVICE_ID_CHT_PMC 0x229C /* PMC Memory mapped IO registers */ #define PMC_BASE_ADDR_OFFSET 0x44 #define PMC_BASE_ADDR_MASK 0xFFFFFE00 #define PMC_MMIO_REG_LEN 0x100 #define PMC_REG_BIT_WIDTH 32 /* BIOS uses FUNC_DIS to disable specific function */ #define PMC_FUNC_DIS 0x34 #define PMC_FUNC_DIS_2 0x38 /* CHT specific bits in FUNC_DIS2 register */ #define BIT_FD_GMM BIT(3) #define BIT_FD_ISH BIT(4) /* S0ix wake event control */ #define PMC_S0IX_WAKE_EN 0x3C #define BIT_LPC_CLOCK_RUN BIT(4) #define BIT_SHARED_IRQ_GPSC BIT(5) #define BIT_ORED_DEDICATED_IRQ_GPSS BIT(18) #define BIT_ORED_DEDICATED_IRQ_GPSC BIT(19) #define BIT_SHARED_IRQ_GPSS BIT(20) #define PMC_WAKE_EN_SETTING ~(BIT_LPC_CLOCK_RUN | \ BIT_SHARED_IRQ_GPSC | \ BIT_ORED_DEDICATED_IRQ_GPSS | \ BIT_ORED_DEDICATED_IRQ_GPSC | \ BIT_SHARED_IRQ_GPSS) /* The timers accumulate time spent in sleep state */ #define PMC_S0IR_TMR 0x80 #define PMC_S0I1_TMR 0x84 #define PMC_S0I2_TMR 0x88 #define PMC_S0I3_TMR 0x8C #define PMC_S0_TMR 0x90 /* Sleep state counter is in units of of 32us */ #define PMC_TMR_SHIFT 5 /* Power status of power islands */ #define PMC_PSS 0x98 #define PMC_PSS_BIT_GBE BIT(0) #define PMC_PSS_BIT_SATA BIT(1) #define PMC_PSS_BIT_HDA BIT(2) #define PMC_PSS_BIT_SEC BIT(3) #define PMC_PSS_BIT_PCIE BIT(4) #define PMC_PSS_BIT_LPSS BIT(5) #define PMC_PSS_BIT_LPE BIT(6) #define PMC_PSS_BIT_DFX BIT(7) #define PMC_PSS_BIT_USH_CTRL BIT(8) #define PMC_PSS_BIT_USH_SUS BIT(9) #define PMC_PSS_BIT_USH_VCCS BIT(10) #define PMC_PSS_BIT_USH_VCCA BIT(11) #define PMC_PSS_BIT_OTG_CTRL BIT(12) #define PMC_PSS_BIT_OTG_VCCS BIT(13) #define PMC_PSS_BIT_OTG_VCCA_CLK BIT(14) #define PMC_PSS_BIT_OTG_VCCA BIT(15) #define PMC_PSS_BIT_USB BIT(16) #define PMC_PSS_BIT_USB_SUS BIT(17) /* CHT specific bits in PSS register */ #define PMC_PSS_BIT_CHT_UFS BIT(7) #define PMC_PSS_BIT_CHT_UXD BIT(11) #define PMC_PSS_BIT_CHT_UXD_FD BIT(12) #define PMC_PSS_BIT_CHT_UX_ENG BIT(15) #define PMC_PSS_BIT_CHT_USB_SUS BIT(16) #define PMC_PSS_BIT_CHT_GMM BIT(17) #define PMC_PSS_BIT_CHT_ISH BIT(18) #define PMC_PSS_BIT_CHT_DFX_MASTER BIT(26) #define PMC_PSS_BIT_CHT_DFX_CLUSTER1 BIT(27) #define PMC_PSS_BIT_CHT_DFX_CLUSTER2 BIT(28) #define PMC_PSS_BIT_CHT_DFX_CLUSTER3 BIT(29) #define PMC_PSS_BIT_CHT_DFX_CLUSTER4 BIT(30) #define PMC_PSS_BIT_CHT_DFX_CLUSTER5 BIT(31) /* These registers reflect D3 status of functions */ #define PMC_D3_STS_0 0xA0 #define BIT_LPSS1_F0_DMA BIT(0) #define BIT_LPSS1_F1_PWM1 BIT(1) #define BIT_LPSS1_F2_PWM2 BIT(2) #define BIT_LPSS1_F3_HSUART1 BIT(3) #define BIT_LPSS1_F4_HSUART2 BIT(4) #define BIT_LPSS1_F5_SPI BIT(5) #define BIT_LPSS1_F6_XXX BIT(6) #define BIT_LPSS1_F7_XXX BIT(7) #define BIT_SCC_EMMC BIT(8) #define BIT_SCC_SDIO BIT(9) #define BIT_SCC_SDCARD BIT(10) #define BIT_SCC_MIPI BIT(11) #define BIT_HDA BIT(12) #define BIT_LPE BIT(13) #define BIT_OTG BIT(14) #define BIT_USH BIT(15) #define BIT_GBE BIT(16) #define BIT_SATA BIT(17) #define BIT_USB_EHCI BIT(18) #define BIT_SEC BIT(19) #define BIT_PCIE_PORT0 BIT(20) #define BIT_PCIE_PORT1 BIT(21) #define BIT_PCIE_PORT2 BIT(22) #define BIT_PCIE_PORT3 BIT(23) #define BIT_LPSS2_F0_DMA BIT(24) #define BIT_LPSS2_F1_I2C1 BIT(25) #define BIT_LPSS2_F2_I2C2 BIT(26) #define BIT_LPSS2_F3_I2C3 BIT(27) #define BIT_LPSS2_F4_I2C4 BIT(28) #define BIT_LPSS2_F5_I2C5 BIT(29) #define BIT_LPSS2_F6_I2C6 BIT(30) #define BIT_LPSS2_F7_I2C7 BIT(31) #define PMC_D3_STS_1 0xA4 #define BIT_SMB BIT(0) #define BIT_OTG_SS_PHY BIT(1) #define BIT_USH_SS_PHY BIT(2) #define BIT_DFX BIT(3) /* CHT specific bits in PMC_D3_STS_1 register */ #define BIT_STS_GMM BIT(1) #define BIT_STS_ISH BIT(2) /* PMC I/O Registers */ #define ACPI_BASE_ADDR_OFFSET 0x40 #define ACPI_BASE_ADDR_MASK 0xFFFFFE00 #define ACPI_MMIO_REG_LEN 0x100 #define PM1_CNT 0x4 #define SLEEP_TYPE_MASK GENMASK(12, 10) #define SLEEP_TYPE_S5 0x1C00 #define SLEEP_ENABLE BIT(13) extern int pmc_atom_read(int offset, u32 *value); extern int pmc_atom_write(int offset, u32 value); #endif /* PMC_ATOM_H */ platform_data/usb-s3c2410_udc.h 0000644 00000002055 14722070374 0012163 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* arch/arm/plat-samsung/include/plat/udc.h * * Copyright (c) 2005 Arnaud Patard <arnaud.patard@rtp-net.org> * * Changelog: * 14-Mar-2005 RTP Created file * 02-Aug-2005 RTP File rename * 07-Sep-2005 BJD Minor cleanups, changed cmd to enum * 18-Jan-2007 HMW Add per-platform vbus_draw function */ #ifndef __ASM_ARM_ARCH_UDC_H #define __ASM_ARM_ARCH_UDC_H enum s3c2410_udc_cmd_e { S3C2410_UDC_P_ENABLE = 1, /* Pull-up enable */ S3C2410_UDC_P_DISABLE = 2, /* Pull-up disable */ S3C2410_UDC_P_RESET = 3, /* UDC reset, in case of */ }; struct s3c2410_udc_mach_info { void (*udc_command)(enum s3c2410_udc_cmd_e); void (*vbus_draw)(unsigned int ma); unsigned int pullup_pin; unsigned int pullup_pin_inverted; unsigned int vbus_pin; unsigned char vbus_pin_inverted; }; extern void __init s3c24xx_udc_set_platdata(struct s3c2410_udc_mach_info *); struct s3c24xx_hsudc_platdata; extern void __init s3c24xx_hsudc_set_platdata(struct s3c24xx_hsudc_platdata *pd); #endif /* __ASM_ARM_ARCH_UDC_H */ platform_data/ad7266.h 0000644 00000002736 14722070374 0010461 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * AD7266/65 SPI ADC driver * * Copyright 2012 Analog Devices Inc. */ #ifndef __IIO_ADC_AD7266_H__ #define __IIO_ADC_AD7266_H__ /** * enum ad7266_range - AD7266 reference voltage range * @AD7266_RANGE_VREF: Device is configured for input range 0V - VREF * (RANGE pin set to low) * @AD7266_RANGE_2VREF: Device is configured for input range 0V - 2VREF * (RANGE pin set to high) */ enum ad7266_range { AD7266_RANGE_VREF, AD7266_RANGE_2VREF, }; /** * enum ad7266_mode - AD7266 sample mode * @AD7266_MODE_DIFF: Device is configured for full differential mode * (SGL/DIFF pin set to low, AD0 pin set to low) * @AD7266_MODE_PSEUDO_DIFF: Device is configured for pseudo differential mode * (SGL/DIFF pin set to low, AD0 pin set to high) * @AD7266_MODE_SINGLE_ENDED: Device is configured for single-ended mode * (SGL/DIFF pin set to high) */ enum ad7266_mode { AD7266_MODE_DIFF, AD7266_MODE_PSEUDO_DIFF, AD7266_MODE_SINGLE_ENDED, }; /** * struct ad7266_platform_data - Platform data for the AD7266 driver * @range: Reference voltage range the device is configured for * @mode: Sample mode the device is configured for * @fixed_addr: Whether the address pins are hard-wired * @addr_gpios: GPIOs used for controlling the address pins, only used if * fixed_addr is set to false. */ struct ad7266_platform_data { enum ad7266_range range; enum ad7266_mode mode; bool fixed_addr; unsigned int addr_gpios[3]; }; #endif platform_data/gpio_backlight.h 0000644 00000000453 14722070374 0012510 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * gpio_backlight.h - Simple GPIO-controlled backlight */ #ifndef __GPIO_BACKLIGHT_H__ #define __GPIO_BACKLIGHT_H__ struct device; struct gpio_backlight_platform_data { struct device *fbdev; int gpio; int def_value; const char *name; }; #endif platform_data/spi-imx.h 0000644 00000002712 14722070374 0011130 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __MACH_SPI_H_ #define __MACH_SPI_H_ /* * struct spi_imx_master - device.platform_data for SPI controller devices. * @chipselect: Array of chipselects for this master or NULL. Numbers >= 0 * mean GPIO pins, -ENOENT means internal CSPI chipselect * matching the position in the array. E.g., if chipselect[1] = * -ENOENT then a SPI slave using chip select 1 will use the * native SS1 line of the CSPI. Omitting the array will use * all native chip selects. * Normally you want to use gpio based chip selects as the CSPI * module tries to be intelligent about when to assert the * chipselect: The CSPI module deasserts the chipselect once it * runs out of input data. The other problem is that it is not * possible to mix between high active and low active chipselects * on one single bus using the internal chipselects. * Unfortunately, on some SoCs, Freescale decided to put some * chipselects on dedicated pins which are not usable as gpios, * so we have to support the internal chipselects. * * @num_chipselect: If @chipselect is specified, ARRAY_SIZE(chipselect), * otherwise the number of native chip selects. */ struct spi_imx_master { int *chipselect; int num_chipselect; }; #endif /* __MACH_SPI_H_*/ platform_data/sht3x.h 0000644 00000000526 14722070374 0010614 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2016 Sensirion AG, Switzerland * Author: David Frey <david.frey@sensirion.com> * Author: Pascal Sachs <pascal.sachs@sensirion.com> */ #ifndef __SHT3X_H_ #define __SHT3X_H_ struct sht3x_platform_data { bool blocking_io; bool high_precision; }; #endif /* __SHT3X_H_ */ platform_data/cpuidle-exynos.h 0000644 00000000551 14722070374 0012511 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2014 Samsung Electronics Co., Ltd. * http://www.samsung.com */ #ifndef __CPUIDLE_EXYNOS_H #define __CPUIDLE_EXYNOS_H struct cpuidle_exynos_data { int (*cpu0_enter_aftr)(void); int (*cpu1_powerdown)(void); void (*pre_enter_aftr)(void); void (*post_enter_aftr)(void); }; #endif platform_data/sky81452-backlight.h 0000644 00000001775 14722070374 0012712 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * sky81452.h SKY81452 backlight driver * * Copyright 2014 Skyworks Solutions Inc. * Author : Gyungoh Yoo <jack.yoo@skyworksinc.com> */ #ifndef _SKY81452_BACKLIGHT_H #define _SKY81452_BACKLIGHT_H /** * struct sky81452_platform_data * @name: backlight driver name. If it is not defined, default name is lcd-backlight. * @gpio_enable:GPIO number which control EN pin * @enable: Enable mask for current sink channel 1, 2, 3, 4, 5 and 6. * @ignore_pwm: true if DPWMI should be ignored. * @dpwm_mode: true is DPWM dimming mode, otherwise Analog dimming mode. * @phase_shift:true is phase shift mode. * @short_detecion_threshold: It should be one of 4, 5, 6 and 7V. * @boost_current_limit: It should be one of 2300, 2750mA. */ struct sky81452_bl_platform_data { const char *name; int gpio_enable; unsigned int enable; bool ignore_pwm; bool dpwm_mode; bool phase_shift; unsigned int short_detection_threshold; unsigned int boost_current_limit; }; #endif platform_data/ad5755.h 0000644 00000005375 14722070374 0010464 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2012 Analog Devices Inc. */ #ifndef __LINUX_PLATFORM_DATA_AD5755_H__ #define __LINUX_PLATFORM_DATA_AD5755_H__ enum ad5755_mode { AD5755_MODE_VOLTAGE_0V_5V = 0, AD5755_MODE_VOLTAGE_0V_10V = 1, AD5755_MODE_VOLTAGE_PLUSMINUS_5V = 2, AD5755_MODE_VOLTAGE_PLUSMINUS_10V = 3, AD5755_MODE_CURRENT_4mA_20mA = 4, AD5755_MODE_CURRENT_0mA_20mA = 5, AD5755_MODE_CURRENT_0mA_24mA = 6, }; enum ad5755_dc_dc_phase { AD5755_DC_DC_PHASE_ALL_SAME_EDGE = 0, AD5755_DC_DC_PHASE_A_B_SAME_EDGE_C_D_OPP_EDGE = 1, AD5755_DC_DC_PHASE_A_C_SAME_EDGE_B_D_OPP_EDGE = 2, AD5755_DC_DC_PHASE_90_DEGREE = 3, }; enum ad5755_dc_dc_freq { AD5755_DC_DC_FREQ_250kHZ = 0, AD5755_DC_DC_FREQ_410kHZ = 1, AD5755_DC_DC_FREQ_650kHZ = 2, }; enum ad5755_dc_dc_maxv { AD5755_DC_DC_MAXV_23V = 0, AD5755_DC_DC_MAXV_24V5 = 1, AD5755_DC_DC_MAXV_27V = 2, AD5755_DC_DC_MAXV_29V5 = 3, }; enum ad5755_slew_rate { AD5755_SLEW_RATE_64k = 0, AD5755_SLEW_RATE_32k = 1, AD5755_SLEW_RATE_16k = 2, AD5755_SLEW_RATE_8k = 3, AD5755_SLEW_RATE_4k = 4, AD5755_SLEW_RATE_2k = 5, AD5755_SLEW_RATE_1k = 6, AD5755_SLEW_RATE_500 = 7, AD5755_SLEW_RATE_250 = 8, AD5755_SLEW_RATE_125 = 9, AD5755_SLEW_RATE_64 = 10, AD5755_SLEW_RATE_32 = 11, AD5755_SLEW_RATE_16 = 12, AD5755_SLEW_RATE_8 = 13, AD5755_SLEW_RATE_4 = 14, AD5755_SLEW_RATE_0_5 = 15, }; enum ad5755_slew_step_size { AD5755_SLEW_STEP_SIZE_1 = 0, AD5755_SLEW_STEP_SIZE_2 = 1, AD5755_SLEW_STEP_SIZE_4 = 2, AD5755_SLEW_STEP_SIZE_8 = 3, AD5755_SLEW_STEP_SIZE_16 = 4, AD5755_SLEW_STEP_SIZE_32 = 5, AD5755_SLEW_STEP_SIZE_64 = 6, AD5755_SLEW_STEP_SIZE_128 = 7, AD5755_SLEW_STEP_SIZE_256 = 8, }; /** * struct ad5755_platform_data - AD5755 DAC driver platform data * @ext_dc_dc_compenstation_resistor: Whether an external DC-DC converter * compensation register is used. * @dc_dc_phase: DC-DC converter phase. * @dc_dc_freq: DC-DC converter frequency. * @dc_dc_maxv: DC-DC maximum allowed boost voltage. * @dac.mode: The mode to be used for the DAC output. * @dac.ext_current_sense_resistor: Whether an external current sense resistor * is used. * @dac.enable_voltage_overrange: Whether to enable 20% voltage output overrange. * @dac.slew.enable: Whether to enable digital slew. * @dac.slew.rate: Slew rate of the digital slew. * @dac.slew.step_size: Slew step size of the digital slew. **/ struct ad5755_platform_data { bool ext_dc_dc_compenstation_resistor; enum ad5755_dc_dc_phase dc_dc_phase; enum ad5755_dc_dc_freq dc_dc_freq; enum ad5755_dc_dc_maxv dc_dc_maxv; struct { enum ad5755_mode mode; bool ext_current_sense_resistor; bool enable_voltage_overrange; struct { bool enable; enum ad5755_slew_rate rate; enum ad5755_slew_step_size step_size; } slew; } dac[4]; }; #endif platform_data/wkup_m3.h 0000644 00000001036 14722070374 0011125 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * TI Wakeup M3 remote processor platform data * * Copyright (C) 2014-2015 Texas Instruments, Inc. * * Dave Gerlach <d-gerlach@ti.com> */ #ifndef _LINUX_PLATFORM_DATA_WKUP_M3_H #define _LINUX_PLATFORM_DATA_WKUP_M3_H struct platform_device; struct wkup_m3_platform_data { const char *reset_name; int (*assert_reset)(struct platform_device *pdev, const char *name); int (*deassert_reset)(struct platform_device *pdev, const char *name); }; #endif /* _LINUX_PLATFORM_DATA_WKUP_M3_H */ platform_data/asoc-ti-mcbsp.h 0000644 00000001464 14722070374 0012206 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Defines for Multi-Channel Buffered Serial Port * * Copyright (C) 2002 RidgeRun, Inc. * Author: Steve Johnson */ #ifndef __ASOC_TI_MCBSP_H #define __ASOC_TI_MCBSP_H #include <linux/spinlock.h> #include <linux/clk.h> /* Platform specific configuration */ struct omap_mcbsp_ops { void (*request)(unsigned int); void (*free)(unsigned int); }; struct omap_mcbsp_platform_data { struct omap_mcbsp_ops *ops; u16 buffer_size; u8 reg_size; u8 reg_step; /* McBSP platform and instance specific features */ bool has_wakeup; /* Wakeup capability */ bool has_ccr; /* Transceiver has configuration control registers */ int (*force_ick_on)(struct clk *clk, bool force_on); }; void omap3_mcbsp_init_pdata_callback(struct omap_mcbsp_platform_data *pdata); #endif platform_data/hsmmc-omap.h 0000644 00000004310 14722070374 0011577 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * MMC definitions for OMAP2 * * Copyright (C) 2006 Nokia Corporation */ /* * struct omap_hsmmc_dev_attr.flags possibilities * * OMAP_HSMMC_SUPPORTS_DUAL_VOLT: Some HSMMC controller instances can * operate with either 1.8Vdc or 3.0Vdc card voltages; this flag * should be set if this is the case. See for example Section 22.5.3 * "MMC/SD/SDIO1 Bus Voltage Selection" of the OMAP34xx Multimedia * Device Silicon Revision 3.1.x Revision ZR (July 2011) (SWPU223R). * * OMAP_HSMMC_BROKEN_MULTIBLOCK_READ: Multiple-block read transfers * don't work correctly on some MMC controller instances on some * OMAP3 SoCs; this flag should be set if this is the case. See * for example Advisory 2.1.1.128 "MMC: Multiple Block Read * Operation Issue" in _OMAP3530/3525/3515/3503 Silicon Errata_ * Revision F (October 2010) (SPRZ278F). */ #define OMAP_HSMMC_SUPPORTS_DUAL_VOLT BIT(0) #define OMAP_HSMMC_BROKEN_MULTIBLOCK_READ BIT(1) #define OMAP_HSMMC_SWAKEUP_MISSING BIT(2) struct omap_hsmmc_dev_attr { u8 flags; }; struct mmc_card; struct omap_hsmmc_platform_data { /* back-link to device */ struct device *dev; /* set if your board has components or wiring that limits the * maximum frequency on the MMC bus */ unsigned int max_freq; /* Integrating attributes from the omap_hwmod layer */ u8 controller_flags; /* Register offset deviation */ u16 reg_offset; /* * 4/8 wires and any additional host capabilities * need to OR'd all capabilities (ref. linux/mmc/host.h) */ u32 caps; /* Used for the MMC driver on 2430 and later */ u32 pm_caps; /* PM capabilities of the mmc */ /* nonremovable e.g. eMMC */ unsigned nonremovable:1; /* eMMC does not handle power off when not in sleep state */ unsigned no_regulator_off_init:1; /* we can put the features above into this variable */ #define HSMMC_HAS_PBIAS (1 << 0) #define HSMMC_HAS_UPDATED_RESET (1 << 1) #define HSMMC_HAS_HSPE_SUPPORT (1 << 2) unsigned features; /* string specifying a particular variant of hardware */ char *version; /* if we have special card, init it using this callback */ void (*init_card)(struct mmc_card *card); const char *name; u32 ocr_mask; }; platform_data/keyboard-pxa930_rotary.h 0000644 00000001202 14722070374 0013755 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_ARCH_PXA930_ROTARY_H #define __ASM_ARCH_PXA930_ROTARY_H /* NOTE: * * rotary can be either interpreted as a ralative input event (e.g. * REL_WHEEL or REL_HWHEEL) or a specific key event (e.g. UP/DOWN * or LEFT/RIGHT), depending on if up_key & down_key are assigned * or rel_code is assigned a non-zero value. When all are non-zero, * up_key and down_key will be preferred. */ struct pxa930_rotary_platform_data { int up_key; int down_key; int rel_code; }; void __init pxa930_set_rotarykey_info(struct pxa930_rotary_platform_data *info); #endif /* __ASM_ARCH_PXA930_ROTARY_H */ platform_data/clk-st.h 0000644 00000000425 14722070374 0010736 0 ustar 00 /* SPDX-License-Identifier: MIT */ /* * clock framework for AMD Stoney based clock * * Copyright 2018 Advanced Micro Devices, Inc. */ #ifndef __CLK_ST_H #define __CLK_ST_H #include <linux/compiler.h> struct st_clk_data { void __iomem *base; }; #endif /* __CLK_ST_H */ platform_data/pxa2xx_udc.h 0000644 00000001727 14722070374 0011634 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * This supports machine-specific differences in how the PXA2xx * USB Device Controller (UDC) is wired. * * It is set in linux/arch/arm/mach-pxa/<machine>.c or in * linux/arch/mach-ixp4xx/<machine>.c and used in * the probe routine of linux/drivers/usb/gadget/pxa2xx_udc.c */ #ifndef PXA2XX_UDC_H #define PXA2XX_UDC_H struct pxa2xx_udc_mach_info { int (*udc_is_connected)(void); /* do we see host? */ void (*udc_command)(int cmd); #define PXA2XX_UDC_CMD_CONNECT 0 /* let host see us */ #define PXA2XX_UDC_CMD_DISCONNECT 1 /* so host won't see us */ /* Boards following the design guidelines in the developer's manual, * with on-chip GPIOs not Lubbock's weird hardware, can have a sane * VBUS IRQ and omit the methods above. Store the GPIO number * here. Note that sometimes the signals go through inverters... */ bool gpio_pullup_inverted; int gpio_pullup; /* high == pullup activated */ }; #endif platform_data/keypad-nomadik-ske.h 0000644 00000002254 14722070374 0013220 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) ST-Ericsson SA 2010 * * Author: Naveen Kumar Gaddipati <naveen.gaddipati@stericsson.com> * * ux500 Scroll key and Keypad Encoder (SKE) header */ #ifndef __SKE_H #define __SKE_H #include <linux/input/matrix_keypad.h> /* register definitions for SKE peripheral */ #define SKE_CR 0x00 #define SKE_VAL0 0x04 #define SKE_VAL1 0x08 #define SKE_DBCR 0x0C #define SKE_IMSC 0x10 #define SKE_RIS 0x14 #define SKE_MIS 0x18 #define SKE_ICR 0x1C /* * Keypad module */ /** * struct keypad_platform_data - structure for platform specific data * @init: pointer to keypad init function * @exit: pointer to keypad deinitialisation function * @keymap_data: matrix scan code table for keycodes * @krow: maximum number of rows * @kcol: maximum number of columns * @debounce_ms: platform specific debounce time * @no_autorepeat: flag for auto repetition * @wakeup_enable: allow waking up the system */ struct ske_keypad_platform_data { int (*init)(void); int (*exit)(void); const struct matrix_keymap_data *keymap_data; u8 krow; u8 kcol; u8 debounce_ms; bool no_autorepeat; bool wakeup_enable; }; #endif /*__SKE_KPD_H*/ platform_data/dma-imx.h 0000644 00000003523 14722070374 0011077 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved. */ #ifndef __ASM_ARCH_MXC_DMA_H__ #define __ASM_ARCH_MXC_DMA_H__ #include <linux/scatterlist.h> #include <linux/device.h> #include <linux/dmaengine.h> /* * This enumerates peripheral types. Used for SDMA. */ enum sdma_peripheral_type { IMX_DMATYPE_SSI, /* MCU domain SSI */ IMX_DMATYPE_SSI_SP, /* Shared SSI */ IMX_DMATYPE_MMC, /* MMC */ IMX_DMATYPE_SDHC, /* SDHC */ IMX_DMATYPE_UART, /* MCU domain UART */ IMX_DMATYPE_UART_SP, /* Shared UART */ IMX_DMATYPE_FIRI, /* FIRI */ IMX_DMATYPE_CSPI, /* MCU domain CSPI */ IMX_DMATYPE_CSPI_SP, /* Shared CSPI */ IMX_DMATYPE_SIM, /* SIM */ IMX_DMATYPE_ATA, /* ATA */ IMX_DMATYPE_CCM, /* CCM */ IMX_DMATYPE_EXT, /* External peripheral */ IMX_DMATYPE_MSHC, /* Memory Stick Host Controller */ IMX_DMATYPE_MSHC_SP, /* Shared Memory Stick Host Controller */ IMX_DMATYPE_DSP, /* DSP */ IMX_DMATYPE_MEMORY, /* Memory */ IMX_DMATYPE_FIFO_MEMORY,/* FIFO type Memory */ IMX_DMATYPE_SPDIF, /* SPDIF */ IMX_DMATYPE_IPU_MEMORY, /* IPU Memory */ IMX_DMATYPE_ASRC, /* ASRC */ IMX_DMATYPE_ESAI, /* ESAI */ IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */ IMX_DMATYPE_ASRC_SP, /* Shared ASRC */ IMX_DMATYPE_SAI, /* SAI */ }; enum imx_dma_prio { DMA_PRIO_HIGH = 0, DMA_PRIO_MEDIUM = 1, DMA_PRIO_LOW = 2 }; struct imx_dma_data { int dma_request; /* DMA request line */ int dma_request2; /* secondary DMA request line */ enum sdma_peripheral_type peripheral_type; int priority; }; static inline int imx_dma_is_ipu(struct dma_chan *chan) { return !strcmp(dev_name(chan->device->dev), "ipu-core"); } static inline int imx_dma_is_general_purpose(struct dma_chan *chan) { return !strcmp(chan->device->dev->driver->name, "imx-sdma") || !strcmp(chan->device->dev->driver->name, "imx-dma"); } #endif platform_data/ad7791.h 0000644 00000001022 14722070374 0010447 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_PLATFORM_DATA_AD7791__ #define __LINUX_PLATFORM_DATA_AD7791__ /** * struct ad7791_platform_data - AD7791 device platform data * @buffered: If set to true configure the device for buffered input mode. * @burnout_current: If set to true the 100mA burnout current is enabled. * @unipolar: If set to true sample in unipolar mode, if set to false sample in * bipolar mode. */ struct ad7791_platform_data { bool buffered; bool burnout_current; bool unipolar; }; #endif platform_data/macb.h 0000644 00000000605 14722070374 0010443 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2004-2006 Atmel Corporation */ #ifndef __MACB_PDATA_H__ #define __MACB_PDATA_H__ #include <linux/clk.h> /** * struct macb_platform_data - platform data for MACB Ethernet * @pclk: platform clock * @hclk: AHB clock */ struct macb_platform_data { struct clk *pclk; struct clk *hclk; }; #endif /* __MACB_PDATA_H__ */ platform_data/mmc-s3cmci.h 0000644 00000003544 14722070374 0011501 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ARCH_MCI_H #define _ARCH_MCI_H /** * struct s3c24xx_mci_pdata - sd/mmc controller platform data * @no_wprotect: Set this to indicate there is no write-protect switch. * @no_detect: Set this if there is no detect switch. * @wprotect_invert: Invert the default sense of the write protect switch. * @use_dma: Set to allow the use of DMA. * @gpio_detect: GPIO number for the card detect line. * @gpio_wprotect: GPIO number for the write protect line. * @ocr_avail: The mask of the available power states, non-zero to use. * @set_power: Callback to control the power mode. * * The @gpio_detect is used for card detection when @no_wprotect is unset, * and the default sense is that 0 returned from gpio_get_value() means * that a card is inserted. If @detect_invert is set, then the value from * gpio_get_value() is inverted, which makes 1 mean card inserted. * * The driver will use @gpio_wprotect to signal whether the card is write * protected if @no_wprotect is not set. A 0 returned from gpio_get_value() * means the card is read/write, and 1 means read-only. The @wprotect_invert * will invert the value returned from gpio_get_value(). * * Card power is set by @ocr_availa, using MCC_VDD_ constants if it is set * to a non-zero value, otherwise the default of 3.2-3.4V is used. */ struct s3c24xx_mci_pdata { unsigned int no_wprotect:1; unsigned int no_detect:1; unsigned int wprotect_invert:1; unsigned int use_dma:1; unsigned long ocr_avail; void (*set_power)(unsigned char power_mode, unsigned short vdd); }; /** * s3c24xx_mci_set_platdata - set platform data for mmc/sdi device * @pdata: The platform data * * Copy the platform data supplied by @pdata so that this can be marked * __initdata. */ extern void s3c24xx_mci_set_platdata(struct s3c24xx_mci_pdata *pdata); #endif /* _ARCH_NCI_H */ platform_data/spi-s3c64xx.h 0000644 00000004011 14722070374 0011547 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2009 Samsung Electronics Ltd. * Jaswinder Singh <jassi.brar@samsung.com> */ #ifndef __SPI_S3C64XX_H #define __SPI_S3C64XX_H #include <linux/dmaengine.h> struct platform_device; /** * struct s3c64xx_spi_csinfo - ChipSelect description * @fb_delay: Slave specific feedback delay. * Refer to FB_CLK_SEL register definition in SPI chapter. * @line: Custom 'identity' of the CS line. * * This is per SPI-Slave Chipselect information. * Allocate and initialize one in machine init code and make the * spi_board_info.controller_data point to it. */ struct s3c64xx_spi_csinfo { u8 fb_delay; unsigned line; }; /** * struct s3c64xx_spi_info - SPI Controller defining structure * @src_clk_nr: Clock source index for the CLK_CFG[SPI_CLKSEL] field. * @num_cs: Number of CS this controller emulates. * @cfg_gpio: Configure pins for this SPI controller. */ struct s3c64xx_spi_info { int src_clk_nr; int num_cs; bool no_cs; int (*cfg_gpio)(void); }; /** * s3c64xx_spi_set_platdata - SPI Controller configure callback by the board * initialization code. * @cfg_gpio: Pointer to gpio setup function. * @src_clk_nr: Clock the SPI controller is to use to generate SPI clocks. * @num_cs: Number of elements in the 'cs' array. * * Call this from machine init code for each SPI Controller that * has some chips attached to it. */ extern void s3c64xx_spi0_set_platdata(int (*cfg_gpio)(void), int src_clk_nr, int num_cs); extern void s3c64xx_spi1_set_platdata(int (*cfg_gpio)(void), int src_clk_nr, int num_cs); extern void s3c64xx_spi2_set_platdata(int (*cfg_gpio)(void), int src_clk_nr, int num_cs); /* defined by architecture to configure gpio */ extern int s3c64xx_spi0_cfg_gpio(void); extern int s3c64xx_spi1_cfg_gpio(void); extern int s3c64xx_spi2_cfg_gpio(void); extern struct s3c64xx_spi_info s3c64xx_spi0_pdata; extern struct s3c64xx_spi_info s3c64xx_spi1_pdata; extern struct s3c64xx_spi_info s3c64xx_spi2_pdata; #endif /*__SPI_S3C64XX_H */ platform_data/emif_plat.h 0000644 00000007652 14722070374 0011512 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Definitions for TI EMIF device platform data * * Copyright (C) 2012 Texas Instruments, Inc. * * Aneesh V <aneesh@ti.com> */ #ifndef __EMIF_PLAT_H #define __EMIF_PLAT_H /* Low power modes - EMIF_PWR_MGMT_CTRL */ #define EMIF_LP_MODE_DISABLE 0 #define EMIF_LP_MODE_CLOCK_STOP 1 #define EMIF_LP_MODE_SELF_REFRESH 2 #define EMIF_LP_MODE_PWR_DN 4 /* Hardware capabilities */ #define EMIF_HW_CAPS_LL_INTERFACE 0x00000001 /* * EMIF IP Revisions * EMIF4D - Used in OMAP4 * EMIF4D5 - Used in OMAP5 */ #define EMIF_4D 1 #define EMIF_4D5 2 /* * PHY types * ATTILAPHY - Used in OMAP4 * INTELLIPHY - Used in OMAP5 */ #define EMIF_PHY_TYPE_ATTILAPHY 1 #define EMIF_PHY_TYPE_INTELLIPHY 2 /* Custom config requests */ #define EMIF_CUSTOM_CONFIG_LPMODE 0x00000001 #define EMIF_CUSTOM_CONFIG_TEMP_ALERT_POLL_INTERVAL 0x00000002 #define EMIF_CUSTOM_CONFIG_EXTENDED_TEMP_PART 0x00000004 #ifndef __ASSEMBLY__ /** * struct ddr_device_info - All information about the DDR device except AC * timing parameters * @type: Device type (LPDDR2-S4, LPDDR2-S2 etc) * @density: Device density * @io_width: Bus width * @cs1_used: Whether there is a DDR device attached to the second * chip-select(CS1) of this EMIF instance * @cal_resistors_per_cs: Whether there is one calibration resistor per * chip-select or whether it's a single one for both * @manufacturer: Manufacturer name string */ struct ddr_device_info { u32 type; u32 density; u32 io_width; u32 cs1_used; u32 cal_resistors_per_cs; char manufacturer[10]; }; /** * struct emif_custom_configs - Custom configuration parameters/policies * passed from the platform layer * @mask: Mask to indicate which configs are requested * @lpmode: LPMODE to be used in PWR_MGMT_CTRL register * @lpmode_timeout_performance: Timeout before LPMODE entry when higher * performance is desired at the cost of power (typically * at higher OPPs) * @lpmode_timeout_power: Timeout before LPMODE entry when better power * savings is desired and performance is not important * (typically at lower loads indicated by lower OPPs) * @lpmode_freq_threshold: The DDR frequency threshold to identify between * the above two cases: * timeout = (freq >= lpmode_freq_threshold) ? * lpmode_timeout_performance : * lpmode_timeout_power; * @temp_alert_poll_interval_ms: LPDDR2 MR4 polling interval at nominal * temperature(in milliseconds). When temperature is high * polling is done 4 times as frequently. */ struct emif_custom_configs { u32 mask; u32 lpmode; u32 lpmode_timeout_performance; u32 lpmode_timeout_power; u32 lpmode_freq_threshold; u32 temp_alert_poll_interval_ms; }; /** * struct emif_platform_data - Platform data passed on EMIF platform * device creation. Used by the driver. * @hw_caps: Hw capabilities of the EMIF IP in the respective SoC * @device_info: Device info structure containing information such * as type, bus width, density etc * @timings: Timings information from device datasheet passed * as an array of 'struct lpddr2_timings'. Can be NULL * if if default timings are ok * @timings_arr_size: Size of the timings array. Depends on the number * of different frequencies for which timings data * is provided * @min_tck: Minimum value of some timing parameters in terms * of number of cycles. Can be NULL if default values * are ok * @custom_configs: Custom configurations requested by SoC or board * code and the data for them. Can be NULL if default * configurations done by the driver are ok. See * documentation for 'struct emif_custom_configs' for * more details */ struct emif_platform_data { u32 hw_caps; struct ddr_device_info *device_info; const struct lpddr2_timings *timings; u32 timings_arr_size; const struct lpddr2_min_tck *min_tck; struct emif_custom_configs *custom_configs; u32 ip_rev; u32 phy_type; }; #endif /* __ASSEMBLY__ */ #endif /* __LINUX_EMIF_H */ platform_data/ad7303.h 0000644 00000000726 14722070374 0010446 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Analog Devices AD7303 DAC driver * * Copyright 2013 Analog Devices Inc. */ #ifndef __IIO_ADC_AD7303_H__ #define __IIO_ADC_AD7303_H__ /** * struct ad7303_platform_data - AD7303 platform data * @use_external_ref: If set to true use an external voltage reference connected * to the REF pin, otherwise use the internal reference derived from Vdd. */ struct ad7303_platform_data { bool use_external_ref; }; #endif platform_data/i2c-pca-platform.h 0000644 00000000443 14722070374 0012601 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef I2C_PCA9564_PLATFORM_H #define I2C_PCA9564_PLATFORM_H struct i2c_pca9564_pf_platform_data { int i2c_clock_speed; /* values are defined in linux/i2c-algo-pca.h */ int timeout; /* timeout in jiffies */ }; #endif /* I2C_PCA9564_PLATFORM_H */ platform_data/ads1015.h 0000644 00000000740 14722070374 0010617 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Platform Data for ADS1015 12-bit 4-input ADC * (C) Copyright 2010 * Dirk Eibach, Guntermann & Drunck GmbH <eibach@gdsys.de> */ #ifndef LINUX_ADS1015_H #define LINUX_ADS1015_H #define ADS1015_CHANNELS 8 struct ads1015_channel_data { bool enabled; unsigned int pga; unsigned int data_rate; }; struct ads1015_platform_data { struct ads1015_channel_data channel_data[ADS1015_CHANNELS]; }; #endif /* LINUX_ADS1015_H */ platform_data/clk-da8xx-cfgchip.h 0000644 00000000770 14722070374 0012750 0 ustar 00 // SPDX-License-Identifier: GPL-2.0 /* * clk-da8xx-cfgchip - TI DaVinci DA8xx CFGCHIP clock driver * * Copyright (C) 2018 David Lechner <david@lechnology.com> */ #ifndef __LINUX_PLATFORM_DATA_CLK_DA8XX_CFGCHIP_H__ #define __LINUX_PLATFORM_DATA_CLK_DA8XX_CFGCHIP_H__ #include <linux/regmap.h> /** * da8xx_cfgchip_clk_platform_data * @cfgchip: CFGCHIP syscon regmap */ struct da8xx_cfgchip_clk_platform_data { struct regmap *cfgchip; }; #endif /* __LINUX_PLATFORM_DATA_CLK_DA8XX_CFGCHIP_H__ */ platform_data/gpio-dwapb.h 0000644 00000000643 14722070374 0011574 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright(c) 2014 Intel Corporation. */ #ifndef GPIO_DW_APB_H #define GPIO_DW_APB_H struct dwapb_port_property { struct fwnode_handle *fwnode; unsigned int idx; unsigned int ngpio; unsigned int gpio_base; int irq[32]; bool has_irq; bool irq_shared; }; struct dwapb_platform_data { struct dwapb_port_property *properties; unsigned int nports; }; #endif platform_data/intel-mid_wdt.h 0000644 00000000710 14722070374 0012276 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * intel-mid_wdt: generic Intel MID SCU watchdog driver * * Copyright (C) 2014 Intel Corporation. All rights reserved. * Contact: David Cohen <david.a.cohen@linux.intel.com> */ #ifndef __INTEL_MID_WDT_H__ #define __INTEL_MID_WDT_H__ #include <linux/platform_device.h> struct intel_mid_wdt_pdata { int irq; int (*probe)(struct platform_device *pdev); }; #endif /*__INTEL_MID_WDT_H__*/ platform_data/ad7793.h 0000644 00000007406 14722070374 0010465 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * AD7792/AD7793 SPI ADC driver * * Copyright 2011 Analog Devices Inc. */ #ifndef __LINUX_PLATFORM_DATA_AD7793_H__ #define __LINUX_PLATFORM_DATA_AD7793_H__ /** * enum ad7793_clock_source - AD7793 clock source selection * @AD7793_CLK_SRC_INT: Internal 64 kHz clock, not available at the CLK pin. * @AD7793_CLK_SRC_INT_CO: Internal 64 kHz clock, available at the CLK pin. * @AD7793_CLK_SRC_EXT: Use external clock. * @AD7793_CLK_SRC_EXT_DIV2: Use external clock divided by 2. */ enum ad7793_clock_source { AD7793_CLK_SRC_INT, AD7793_CLK_SRC_INT_CO, AD7793_CLK_SRC_EXT, AD7793_CLK_SRC_EXT_DIV2, }; /** * enum ad7793_bias_voltage - AD7793 bias voltage selection * @AD7793_BIAS_VOLTAGE_DISABLED: Bias voltage generator disabled * @AD7793_BIAS_VOLTAGE_AIN1: Bias voltage connected to AIN1(-). * @AD7793_BIAS_VOLTAGE_AIN2: Bias voltage connected to AIN2(-). * @AD7793_BIAS_VOLTAGE_AIN3: Bias voltage connected to AIN3(-). * Only valid for AD7795/AD7796. */ enum ad7793_bias_voltage { AD7793_BIAS_VOLTAGE_DISABLED, AD7793_BIAS_VOLTAGE_AIN1, AD7793_BIAS_VOLTAGE_AIN2, AD7793_BIAS_VOLTAGE_AIN3, }; /** * enum ad7793_refsel - AD7793 reference voltage selection * @AD7793_REFSEL_REFIN1: External reference applied between REFIN1(+) * and REFIN1(-). * @AD7793_REFSEL_REFIN2: External reference applied between REFIN2(+) and * and REFIN1(-). Only valid for AD7795/AD7796. * @AD7793_REFSEL_INTERNAL: Internal 1.17 V reference. */ enum ad7793_refsel { AD7793_REFSEL_REFIN1 = 0, AD7793_REFSEL_REFIN2 = 1, AD7793_REFSEL_INTERNAL = 2, }; /** * enum ad7793_current_source_direction - AD7793 excitation current direction * @AD7793_IEXEC1_IOUT1_IEXEC2_IOUT2: Current source IEXC1 connected to pin * IOUT1, current source IEXC2 connected to pin IOUT2. * @AD7793_IEXEC1_IOUT2_IEXEC2_IOUT1: Current source IEXC2 connected to pin * IOUT1, current source IEXC1 connected to pin IOUT2. * @AD7793_IEXEC1_IEXEC2_IOUT1: Both current sources connected to pin IOUT1. * Only valid when the current sources are set to 10 uA or 210 uA. * @AD7793_IEXEC1_IEXEC2_IOUT2: Both current sources connected to Pin IOUT2. * Only valid when the current ources are set to 10 uA or 210 uA. */ enum ad7793_current_source_direction { AD7793_IEXEC1_IOUT1_IEXEC2_IOUT2 = 0, AD7793_IEXEC1_IOUT2_IEXEC2_IOUT1 = 1, AD7793_IEXEC1_IEXEC2_IOUT1 = 2, AD7793_IEXEC1_IEXEC2_IOUT2 = 3, }; /** * enum ad7793_excitation_current - AD7793 excitation current selection * @AD7793_IX_DISABLED: Excitation current Disabled. * @AD7793_IX_10uA: Enable 10 micro-ampere excitation current. * @AD7793_IX_210uA: Enable 210 micro-ampere excitation current. * @AD7793_IX_1mA: Enable 1 milli-Ampere excitation current. */ enum ad7793_excitation_current { AD7793_IX_DISABLED = 0, AD7793_IX_10uA = 1, AD7793_IX_210uA = 2, AD7793_IX_1mA = 3, }; /** * struct ad7793_platform_data - AD7793 platform data * @clock_src: Clock source selection * @burnout_current: If set to true the 100nA burnout current is enabled. * @boost_enable: Enable boost for the bias voltage generator. * @buffered: If set to true configure the device for buffered input mode. * @unipolar: If set to true sample in unipolar mode, if set to false sample in * bipolar mode. * @refsel: Reference voltage selection * @bias_voltage: Bias voltage selection * @exitation_current: Excitation current selection * @current_source_direction: Excitation current direction selection */ struct ad7793_platform_data { enum ad7793_clock_source clock_src; bool burnout_current; bool boost_enable; bool buffered; bool unipolar; enum ad7793_refsel refsel; enum ad7793_bias_voltage bias_voltage; enum ad7793_excitation_current exitation_current; enum ad7793_current_source_direction current_source_direction; }; #endif /* IIO_ADC_AD7793_H_ */ platform_data/spi-ep93xx.h 0000644 00000000467 14722070374 0011500 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_MACH_EP93XX_SPI_H #define __ASM_MACH_EP93XX_SPI_H struct spi_device; /** * struct ep93xx_spi_info - EP93xx specific SPI descriptor * @use_dma: use DMA for the transfers */ struct ep93xx_spi_info { bool use_dma; }; #endif /* __ASM_MACH_EP93XX_SPI_H */ platform_data/ti-aemif.h 0000644 00000002124 14722070374 0011232 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * TI DaVinci AEMIF platform glue. * * Copyright (C) 2017 BayLibre SAS * * Author: * Bartosz Golaszewski <bgolaszewski@baylibre.com> */ #ifndef __TI_DAVINCI_AEMIF_DATA_H__ #define __TI_DAVINCI_AEMIF_DATA_H__ #include <linux/of_platform.h> /** * struct aemif_abus_data - Async bus configuration parameters. * * @cs - Chip-select number. */ struct aemif_abus_data { u32 cs; }; /** * struct aemif_platform_data - Data to set up the TI aemif driver. * * @dev_lookup: of_dev_auxdata passed to of_platform_populate() for aemif * subdevices. * @cs_offset: Lowest allowed chip-select number. * @abus_data: Array of async bus configuration entries. * @num_abus_data: Number of abus entries. * @sub_devices: Array of platform subdevices. * @num_sub_devices: Number of subdevices. */ struct aemif_platform_data { struct of_dev_auxdata *dev_lookup; u32 cs_offset; struct aemif_abus_data *abus_data; size_t num_abus_data; struct platform_device *sub_devices; size_t num_sub_devices; }; #endif /* __TI_DAVINCI_AEMIF_DATA_H__ */ platform_data/max197.h 0000644 00000001147 14722070374 0010571 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Maxim MAX197 A/D Converter Driver * * Copyright (c) 2012 Savoir-faire Linux Inc. * Vivien Didelot <vivien.didelot@savoirfairelinux.com> * * For further information, see the Documentation/hwmon/max197.rst file. */ #ifndef _PDATA_MAX197_H #define _PDATA_MAX197_H /** * struct max197_platform_data - MAX197 connectivity info * @convert: Function used to start a conversion with control byte ctrl. * It must return the raw data, or a negative error code. */ struct max197_platform_data { int (*convert)(u8 ctrl); }; #endif /* _PDATA_MAX197_H */ platform_data/media/timb_video.h 0000644 00000000673 14722070374 0012746 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * timb_video.h Platform struct for the Timberdale video driver * Copyright (c) 2009-2010 Intel Corporation */ #ifndef _TIMB_VIDEO_ #define _TIMB_VIDEO_ 1 #include <linux/i2c.h> struct timb_video_platform_data { int dma_channel; int i2c_adapter; /* The I2C adapter where the encoder is attached */ struct { const char *module_name; struct i2c_board_info *info; } encoder; }; #endif platform_data/media/camera-mx2.h 0000644 00000001567 14722070374 0012564 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * mx2-cam.h - i.MX27/i.MX25 camera driver header file * * Copyright (C) 2003, Intel Corporation * Copyright (C) 2008, Sascha Hauer <s.hauer@pengutronix.de> * Copyright (C) 2010, Baruch Siach <baruch@tkos.co.il> */ #ifndef __MACH_MX2_CAM_H_ #define __MACH_MX2_CAM_H_ #define MX2_CAMERA_EXT_VSYNC (1 << 1) #define MX2_CAMERA_CCIR (1 << 2) #define MX2_CAMERA_CCIR_INTERLACE (1 << 3) #define MX2_CAMERA_HSYNC_HIGH (1 << 4) #define MX2_CAMERA_GATED_CLOCK (1 << 5) #define MX2_CAMERA_INV_DATA (1 << 6) #define MX2_CAMERA_PCLK_SAMPLE_RISING (1 << 7) /** * struct mx2_camera_platform_data - optional platform data for mx2_camera * @flags: any combination of MX2_CAMERA_* * @clk: clock rate of the csi block / 2 */ struct mx2_camera_platform_data { unsigned long flags; unsigned long clk; }; #endif /* __MACH_MX2_CAM_H_ */ platform_data/media/mmp-camera.h 0000644 00000001160 14722070374 0012634 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Information for the Marvell Armada MMP camera */ #include <media/v4l2-mediabus.h> enum dphy3_algo { DPHY3_ALGO_DEFAULT = 0, DPHY3_ALGO_PXA910, DPHY3_ALGO_PXA2128 }; struct mmp_camera_platform_data { enum v4l2_mbus_type bus_type; int mclk_src; /* which clock source the MCLK derives from */ int mclk_div; /* Clock Divider Value for MCLK */ /* * MIPI support */ int dphy[3]; /* DPHY: CSI2_DPHY3, CSI2_DPHY5, CSI2_DPHY6 */ enum dphy3_algo dphy3_algo; /* algos for calculate CSI2_DPHY3 */ int lane; /* ccic used lane number; 0 means DVP mode */ int lane_clk; }; platform_data/media/camera-pxa.h 0000644 00000001545 14722070374 0012642 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* camera.h - PXA camera driver header file Copyright (C) 2003, Intel Corporation Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de> */ #ifndef __ASM_ARCH_CAMERA_H_ #define __ASM_ARCH_CAMERA_H_ #define PXA_CAMERA_MASTER 1 #define PXA_CAMERA_DATAWIDTH_4 2 #define PXA_CAMERA_DATAWIDTH_5 4 #define PXA_CAMERA_DATAWIDTH_8 8 #define PXA_CAMERA_DATAWIDTH_9 0x10 #define PXA_CAMERA_DATAWIDTH_10 0x20 #define PXA_CAMERA_PCLK_EN 0x40 #define PXA_CAMERA_MCLK_EN 0x80 #define PXA_CAMERA_PCP 0x100 #define PXA_CAMERA_HSP 0x200 #define PXA_CAMERA_VSP 0x400 struct pxacamera_platform_data { unsigned long flags; unsigned long mclk_10khz; int sensor_i2c_adapter_id; int sensor_i2c_address; }; extern void pxa_set_camera_info(struct pxacamera_platform_data *); #endif /* __ASM_ARCH_CAMERA_H_ */ platform_data/media/omap1_camera.h 0000644 00000001336 14722070374 0013147 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Header for V4L2 SoC Camera driver for OMAP1 Camera Interface * * Copyright (C) 2010, Janusz Krzysztofik <jkrzyszt@tis.icnet.pl> */ #ifndef __MEDIA_OMAP1_CAMERA_H_ #define __MEDIA_OMAP1_CAMERA_H_ #include <linux/bitops.h> #define OMAP1_CAMERA_IOSIZE 0x1c enum omap1_cam_vb_mode { OMAP1_CAM_DMA_CONTIG = 0, OMAP1_CAM_DMA_SG, }; #define OMAP1_CAMERA_MIN_BUF_COUNT(x) ((x) == OMAP1_CAM_DMA_CONTIG ? 3 : 2) struct omap1_cam_platform_data { unsigned long camexclk_khz; unsigned long lclk_khz_max; unsigned long flags; }; #define OMAP1_CAMERA_LCLK_RISING BIT(0) #define OMAP1_CAMERA_RST_LOW BIT(1) #define OMAP1_CAMERA_RST_HIGH BIT(2) #endif /* __MEDIA_OMAP1_CAMERA_H_ */ platform_data/media/timb_radio.h 0000644 00000000623 14722070374 0012731 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * timb_radio.h Platform struct for the Timberdale radio driver * Copyright (c) 2009 Intel Corporation */ #ifndef _TIMB_RADIO_ #define _TIMB_RADIO_ 1 #include <linux/i2c.h> struct timb_radio_platform_data { int i2c_adapter; /* I2C adapter where the tuner and dsp are attached */ struct i2c_board_info *tuner; struct i2c_board_info *dsp; }; #endif platform_data/media/omap4iss.h 0000644 00000002731 14722070374 0012361 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef ARCH_ARM_PLAT_OMAP4_ISS_H #define ARCH_ARM_PLAT_OMAP4_ISS_H #include <linux/i2c.h> struct iss_device; enum iss_interface_type { ISS_INTERFACE_CSI2A_PHY1, ISS_INTERFACE_CSI2B_PHY2, }; /** * struct iss_csiphy_lane: CSI2 lane position and polarity * @pos: position of the lane * @pol: polarity of the lane */ struct iss_csiphy_lane { u8 pos; u8 pol; }; #define ISS_CSIPHY1_NUM_DATA_LANES 4 #define ISS_CSIPHY2_NUM_DATA_LANES 1 /** * struct iss_csiphy_lanes_cfg - CSI2 lane configuration * @data: Configuration of one or two data lanes * @clk: Clock lane configuration */ struct iss_csiphy_lanes_cfg { struct iss_csiphy_lane data[ISS_CSIPHY1_NUM_DATA_LANES]; struct iss_csiphy_lane clk; }; /** * struct iss_csi2_platform_data - CSI2 interface platform data * @crc: Enable the cyclic redundancy check * @vpclk_div: Video port output clock control */ struct iss_csi2_platform_data { unsigned crc:1; unsigned vpclk_div:2; struct iss_csiphy_lanes_cfg lanecfg; }; struct iss_subdev_i2c_board_info { struct i2c_board_info *board_info; int i2c_adapter_id; }; struct iss_v4l2_subdevs_group { struct iss_subdev_i2c_board_info *subdevs; enum iss_interface_type interface; union { struct iss_csi2_platform_data csi2; } bus; /* gcc < 4.6.0 chokes on anonymous union initializers */ }; struct iss_platform_data { struct iss_v4l2_subdevs_group *subdevs; void (*set_constraints)(struct iss_device *iss, bool enable); }; #endif platform_data/media/si4713.h 0000644 00000002606 14722070374 0011555 0 ustar 00 /* * include/linux/platform_data/media/si4713.h * * Board related data definitions for Si4713 i2c device driver. * * Copyright (c) 2009 Nokia Corporation * Contact: Eduardo Valentin <eduardo.valentin@nokia.com> * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. * */ #ifndef SI4713_H #define SI4713_H /* The SI4713 I2C sensor chip has a fixed slave address of 0xc6 or 0x22. */ #define SI4713_I2C_ADDR_BUSEN_HIGH 0x63 #define SI4713_I2C_ADDR_BUSEN_LOW 0x11 /* * Platform dependent definition */ struct si4713_platform_data { bool is_platform_device; }; /* * Structure to query for Received Noise Level (RNL). */ struct si4713_rnl { __u32 index; /* modulator index */ __u32 frequency; /* frequency to perform rnl measurement */ __s32 rnl; /* result of measurement in dBuV */ __u32 reserved[4]; /* drivers and apps must init this to 0 */ }; /* * This is the ioctl number to query for rnl. Users must pass a * struct si4713_rnl pointer specifying desired frequency in 'frequency' field * following driver capabilities (i.e V4L2_TUNER_CAP_LOW). * Driver must return measured value in the same structure, filling 'rnl' field. */ #define SI4713_IOC_MEASURE_RNL _IOWR('V', BASE_VIDIOC_PRIVATE + 0, \ struct si4713_rnl) #endif /* ifndef SI4713_H*/ platform_data/media/s5p_hdmi.h 0000644 00000001411 14722070374 0012324 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Driver header for S5P HDMI chip. * * Copyright (c) 2011 Samsung Electronics, Co. Ltd * Contact: Tomasz Stanislawski <t.stanislaws@samsung.com> */ #ifndef S5P_HDMI_H #define S5P_HDMI_H struct i2c_board_info; /** * @hdmiphy_bus: controller id for HDMIPHY bus * @hdmiphy_info: template for HDMIPHY I2C device * @mhl_bus: controller id for MHL control bus * @mhl_info: template for MHL I2C device * @hpd_gpio: GPIO for Hot-Plug-Detect pin * * NULL pointer for *_info fields indicates that * the corresponding chip is not present */ struct s5p_hdmi_platform_data { int hdmiphy_bus; struct i2c_board_info *hdmiphy_info; int mhl_bus; struct i2c_board_info *mhl_info; int hpd_gpio; }; #endif /* S5P_HDMI_H */ platform_data/media/coda.h 0000644 00000000352 14722070374 0011525 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2013 Philipp Zabel, Pengutronix */ #ifndef PLATFORM_CODA_H #define PLATFORM_CODA_H struct device; struct coda_platform_data { struct device *iram_dev; }; #endif platform_data/media/camera-mx3.h 0000644 00000002305 14722070374 0012554 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * mx3_camera.h - i.MX3x camera driver header file * * Copyright (C) 2008, Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de> */ #ifndef _MX3_CAMERA_H_ #define _MX3_CAMERA_H_ #include <linux/device.h> #define MX3_CAMERA_CLK_SRC 1 #define MX3_CAMERA_EXT_VSYNC 2 #define MX3_CAMERA_DP 4 #define MX3_CAMERA_PCP 8 #define MX3_CAMERA_HSP 0x10 #define MX3_CAMERA_VSP 0x20 #define MX3_CAMERA_DATAWIDTH_4 0x40 #define MX3_CAMERA_DATAWIDTH_8 0x80 #define MX3_CAMERA_DATAWIDTH_10 0x100 #define MX3_CAMERA_DATAWIDTH_15 0x200 #define MX3_CAMERA_DATAWIDTH_MASK (MX3_CAMERA_DATAWIDTH_4 | MX3_CAMERA_DATAWIDTH_8 | \ MX3_CAMERA_DATAWIDTH_10 | MX3_CAMERA_DATAWIDTH_15) struct v4l2_async_subdev; /** * struct mx3_camera_pdata - i.MX3x camera platform data * @flags: MX3_CAMERA_* flags * @mclk_10khz: master clock frequency in 10kHz units * @dma_dev: IPU DMA device to match against in channel allocation */ struct mx3_camera_pdata { unsigned long flags; unsigned long mclk_10khz; struct device *dma_dev; struct v4l2_async_subdev **asd; /* Flat array, arranged in groups */ int *asd_sizes; /* 0-terminated array of asd group sizes */ }; #endif platform_data/leds-omap.h 0000644 00000000540 14722070374 0011420 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2006 Samsung Electronics * Kyungmin Park <kyungmin.park@samsung.com> */ #ifndef ASMARM_ARCH_LED_H #define ASMARM_ARCH_LED_H struct omap_led_config { struct led_classdev cdev; s16 gpio; }; struct omap_led_platform_data { s16 nr_leds; struct omap_led_config *leds; }; #endif platform_data/sc18is602.h 0000644 00000000621 14722070374 0011101 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Platform data for NXP SC18IS602/603 * * Copyright (C) 2012 Guenter Roeck <linux@roeck-us.net> * * For further information, see the Documentation/spi/spi-sc18is602.rst file. */ /** * struct sc18is602_platform_data - sc18is602 info * @clock_frequency SC18IS603 oscillator frequency */ struct sc18is602_platform_data { u32 clock_frequency; }; platform_data/eth-ep93xx.h 0000644 00000000317 14722070374 0011457 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PLATFORM_DATA_ETH_EP93XX #define _LINUX_PLATFORM_DATA_ETH_EP93XX struct ep93xx_eth_data { unsigned char dev_addr[6]; unsigned char phy_id; }; #endif platform_data/arm-ux500-pm.h 0000644 00000001066 14722070374 0011613 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) ST-Ericsson SA 2010-2013 * Author: Rickard Andersson <rickard.andersson@stericsson.com> for * ST-Ericsson. * Author: Daniel Lezcano <daniel.lezcano@linaro.org> for Linaro. */ #ifndef ARM_UX500_PM_H #define ARM_UX500_PM_H int prcmu_gic_decouple(void); int prcmu_gic_recouple(void); bool prcmu_gic_pending_irq(void); bool prcmu_pending_irq(void); bool prcmu_is_cpu_in_wfi(int cpu); int prcmu_copy_gic_settings(void); void ux500_pm_init(u32 phy_base, u32 size); #endif /* ARM_UX500_PM_H */ platform_data/leds-lm355x.h 0000644 00000002656 14722070374 0011533 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2012 Texas Instruments * * Simple driver for Texas Instruments LM355x LED driver chip * * Author: G.Shark Jeong <gshark.jeong@gmail.com> * Daniel Jeong <daniel.jeong@ti.com> */ #define LM355x_NAME "leds-lm355x" #define LM3554_NAME "leds-lm3554" #define LM3556_NAME "leds-lm3556" /* lm3554 : strobe def. on */ enum lm355x_strobe { LM355x_PIN_STROBE_DISABLE = 0x00, LM355x_PIN_STROBE_ENABLE = 0x01, }; enum lm355x_torch { LM355x_PIN_TORCH_DISABLE = 0, LM3554_PIN_TORCH_ENABLE = 0x80, LM3556_PIN_TORCH_ENABLE = 0x10, }; enum lm355x_tx2 { LM355x_PIN_TX_DISABLE = 0, LM3554_PIN_TX_ENABLE = 0x20, LM3556_PIN_TX_ENABLE = 0x40, }; enum lm355x_ntc { LM355x_PIN_NTC_DISABLE = 0, LM3554_PIN_NTC_ENABLE = 0x08, LM3556_PIN_NTC_ENABLE = 0x80, }; enum lm355x_pmode { LM355x_PMODE_DISABLE = 0, LM355x_PMODE_ENABLE = 0x04, }; /* * struct lm3554_platform_data * @pin_strobe: strobe input * @pin_torch : input pin * lm3554-tx1/torch/gpio1 * lm3556-torch * @pin_tx2 : input pin * lm3554-envm/tx2/gpio2 * lm3556-tx pin * @ntc_pin : output pin * lm3554-ledi/ntc * lm3556-temp pin * @pass_mode : pass mode */ struct lm355x_platform_data { enum lm355x_strobe pin_strobe; enum lm355x_torch pin_tx1; enum lm355x_tx2 pin_tx2; enum lm355x_ntc ntc_pin; enum lm355x_pmode pass_mode; }; platform_data/usb-mx2.h 0000644 00000001540 14722070374 0011035 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2009 Martin Fuzzey <mfuzzey@gmail.com> */ #ifndef __ASM_ARCH_MX21_USBH #define __ASM_ARCH_MX21_USBH enum mx21_usbh_xcvr { /* Values below as used by hardware (HWMODE register) */ MX21_USBXCVR_TXDIF_RXDIF = 0, MX21_USBXCVR_TXDIF_RXSE = 1, MX21_USBXCVR_TXSE_RXDIF = 2, MX21_USBXCVR_TXSE_RXSE = 3, }; struct mx21_usbh_platform_data { enum mx21_usbh_xcvr host_xcvr; /* tranceiver mode host 1,2 ports */ enum mx21_usbh_xcvr otg_xcvr; /* tranceiver mode otg (as host) port */ u16 enable_host1:1, enable_host2:1, enable_otg_host:1, /* enable "OTG" port (as host) */ host1_xcverless:1, /* traceiverless host1 port */ host1_txenoe:1, /* output enable host1 transmit enable */ otg_ext_xcvr:1, /* external tranceiver for OTG port */ unused:10; }; #endif /* __ASM_ARCH_MX21_USBH */ platform_data/leds-kirkwood-ns2.h 0000644 00000001346 14722070374 0013022 0 ustar 00 /* * Platform data structure for Network Space v2 LED driver * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #ifndef __LEDS_KIRKWOOD_NS2_H #define __LEDS_KIRKWOOD_NS2_H enum ns2_led_modes { NS_V2_LED_OFF, NS_V2_LED_ON, NS_V2_LED_SATA, }; struct ns2_led_modval { enum ns2_led_modes mode; int cmd_level; int slow_level; }; struct ns2_led { const char *name; const char *default_trigger; unsigned cmd; unsigned slow; int num_modes; struct ns2_led_modval *modval; }; struct ns2_led_platform_data { int num_leds; struct ns2_led *leds; }; #endif /* __LEDS_KIRKWOOD_NS2_H */ platform_data/dma-mmp_tdma.h 0000644 00000001207 14722070374 0012075 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * SRAM Memory Management * * Copyright (c) 2011 Marvell Semiconductors Inc. */ #ifndef __DMA_MMP_TDMA_H #define __DMA_MMP_TDMA_H #include <linux/genalloc.h> /* ARBITRARY: SRAM allocations are multiples of this 2^N size */ #define SRAM_GRANULARITY 512 enum sram_type { MMP_SRAM_UNDEFINED = 0, MMP_ASRAM, MMP_ISRAM, }; struct sram_platdata { char *pool_name; int granularity; }; #ifdef CONFIG_MMP_SRAM extern struct gen_pool *sram_get_gpool(char *pool_name); #else static inline struct gen_pool *sram_get_gpool(char *pool_name) { return NULL; } #endif #endif /* __DMA_MMP_TDMA_H */ platform_data/ads7828.h 0000644 00000001262 14722070374 0010641 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * TI ADS7828 A/D Converter platform data definition * * Copyright (c) 2012 Savoir-faire Linux Inc. * Vivien Didelot <vivien.didelot@savoirfairelinux.com> * * For further information, see the Documentation/hwmon/ads7828.rst file. */ #ifndef _PDATA_ADS7828_H #define _PDATA_ADS7828_H /** * struct ads7828_platform_data - optional ADS7828 connectivity info * @diff_input: Differential input mode. * @ext_vref: Use an external voltage reference. * @vref_mv: Voltage reference value, if external. */ struct ads7828_platform_data { bool diff_input; bool ext_vref; unsigned int vref_mv; }; #endif /* _PDATA_ADS7828_H */ platform_data/lp855x.h 0000644 00000007666 14722070374 0010624 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * LP855x Backlight Driver * * Copyright (C) 2011 Texas Instruments */ #ifndef _LP855X_H #define _LP855X_H #define BL_CTL_SHFT (0) #define BRT_MODE_SHFT (1) #define BRT_MODE_MASK (0x06) /* Enable backlight. Only valid when BRT_MODE=10(I2C only) */ #define ENABLE_BL (1) #define DISABLE_BL (0) #define I2C_CONFIG(id) id ## _I2C_CONFIG #define PWM_CONFIG(id) id ## _PWM_CONFIG /* DEVICE CONTROL register - LP8550 */ #define LP8550_PWM_CONFIG (LP8550_PWM_ONLY << BRT_MODE_SHFT) #define LP8550_I2C_CONFIG ((ENABLE_BL << BL_CTL_SHFT) | \ (LP8550_I2C_ONLY << BRT_MODE_SHFT)) /* DEVICE CONTROL register - LP8551 */ #define LP8551_PWM_CONFIG LP8550_PWM_CONFIG #define LP8551_I2C_CONFIG LP8550_I2C_CONFIG /* DEVICE CONTROL register - LP8552 */ #define LP8552_PWM_CONFIG LP8550_PWM_CONFIG #define LP8552_I2C_CONFIG LP8550_I2C_CONFIG /* DEVICE CONTROL register - LP8553 */ #define LP8553_PWM_CONFIG LP8550_PWM_CONFIG #define LP8553_I2C_CONFIG LP8550_I2C_CONFIG /* CONFIG register - LP8555 */ #define LP8555_PWM_STANDBY BIT(7) #define LP8555_PWM_FILTER BIT(6) #define LP8555_RELOAD_EPROM BIT(3) /* use it if EPROMs should be reset when the backlight turns on */ #define LP8555_OFF_OPENLEDS BIT(2) #define LP8555_PWM_CONFIG LP8555_PWM_ONLY #define LP8555_I2C_CONFIG LP8555_I2C_ONLY #define LP8555_COMB1_CONFIG LP8555_COMBINED1 #define LP8555_COMB2_CONFIG LP8555_COMBINED2 /* DEVICE CONTROL register - LP8556 */ #define LP8556_PWM_CONFIG (LP8556_PWM_ONLY << BRT_MODE_SHFT) #define LP8556_COMB1_CONFIG (LP8556_COMBINED1 << BRT_MODE_SHFT) #define LP8556_I2C_CONFIG ((ENABLE_BL << BL_CTL_SHFT) | \ (LP8556_I2C_ONLY << BRT_MODE_SHFT)) #define LP8556_COMB2_CONFIG (LP8556_COMBINED2 << BRT_MODE_SHFT) #define LP8556_FAST_CONFIG BIT(7) /* use it if EPROMs should be maintained when exiting the low power mode */ /* CONFIG register - LP8557 */ #define LP8557_PWM_STANDBY BIT(7) #define LP8557_PWM_FILTER BIT(6) #define LP8557_RELOAD_EPROM BIT(3) /* use it if EPROMs should be reset when the backlight turns on */ #define LP8557_OFF_OPENLEDS BIT(2) #define LP8557_PWM_CONFIG LP8557_PWM_ONLY #define LP8557_I2C_CONFIG LP8557_I2C_ONLY #define LP8557_COMB1_CONFIG LP8557_COMBINED1 #define LP8557_COMB2_CONFIG LP8557_COMBINED2 enum lp855x_chip_id { LP8550, LP8551, LP8552, LP8553, LP8555, LP8556, LP8557, }; enum lp8550_brighntess_source { LP8550_PWM_ONLY, LP8550_I2C_ONLY = 2, }; enum lp8551_brighntess_source { LP8551_PWM_ONLY = LP8550_PWM_ONLY, LP8551_I2C_ONLY = LP8550_I2C_ONLY, }; enum lp8552_brighntess_source { LP8552_PWM_ONLY = LP8550_PWM_ONLY, LP8552_I2C_ONLY = LP8550_I2C_ONLY, }; enum lp8553_brighntess_source { LP8553_PWM_ONLY = LP8550_PWM_ONLY, LP8553_I2C_ONLY = LP8550_I2C_ONLY, }; enum lp8555_brightness_source { LP8555_PWM_ONLY, LP8555_I2C_ONLY, LP8555_COMBINED1, /* Brightness register with shaped PWM */ LP8555_COMBINED2, /* PWM with shaped brightness register */ }; enum lp8556_brightness_source { LP8556_PWM_ONLY, LP8556_COMBINED1, /* pwm + i2c before the shaper block */ LP8556_I2C_ONLY, LP8556_COMBINED2, /* pwm + i2c after the shaper block */ }; enum lp8557_brightness_source { LP8557_PWM_ONLY, LP8557_I2C_ONLY, LP8557_COMBINED1, /* pwm + i2c after the shaper block */ LP8557_COMBINED2, /* pwm + i2c before the shaper block */ }; struct lp855x_rom_data { u8 addr; u8 val; }; /** * struct lp855x_platform_data * @name : Backlight driver name. If it is not defined, default name is set. * @device_control : value of DEVICE CONTROL register * @initial_brightness : initial value of backlight brightness * @period_ns : platform specific pwm period value. unit is nano. Only valid when mode is PWM_BASED. * @size_program : total size of lp855x_rom_data * @rom_data : list of new eeprom/eprom registers */ struct lp855x_platform_data { const char *name; u8 device_control; u8 initial_brightness; unsigned int period_ns; int size_program; struct lp855x_rom_data *rom_data; }; #endif platform_data/sdhci-pic32.h 0000644 00000000550 14722070374 0011550 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Purna Chandra Mandal, purna.mandal@microchip.com * Copyright (C) 2015 Microchip Technology Inc. All rights reserved. */ #ifndef __PIC32_SDHCI_PDATA_H__ #define __PIC32_SDHCI_PDATA_H__ struct pic32_sdhci_platform_data { /* read & write fifo threshold */ int (*setup_dma)(u32 rfifo, u32 wfifo); }; #endif platform_data/hwmon-s3c.h 0000644 00000002274 14722070374 0011363 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2005 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * http://armlinux.simtec.co.uk/ * * S3C - HWMon interface for ADC */ #ifndef __HWMON_S3C_H__ #define __HWMON_S3C_H__ /** * s3c_hwmon_chcfg - channel configuration * @name: The name to give this channel. * @mult: Multiply the ADC value read by this. * @div: Divide the value from the ADC by this. * * The value read from the ADC is converted to a value that * hwmon expects (mV) by result = (value_read * @mult) / @div. */ struct s3c_hwmon_chcfg { const char *name; unsigned int mult; unsigned int div; }; /** * s3c_hwmon_pdata - HWMON platform data * @in: One configuration for each possible channel used. */ struct s3c_hwmon_pdata { struct s3c_hwmon_chcfg *in[8]; }; /** * s3c_hwmon_set_platdata - Set platform data for S3C HWMON device * @pd: Platform data to register to device. * * Register the given platform data for use with the S3C HWMON device. * The call will copy the platform data, so the board definitions can * make the structure itself __initdata. */ extern void __init s3c_hwmon_set_platdata(struct s3c_hwmon_pdata *pd); #endif /* __HWMON_S3C_H__ */ platform_data/asoc-s3c24xx_simtec.h 0000644 00000001352 14722070374 0013246 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2008 Simtec Electronics * http://armlinux.simtec.co.uk/ * Ben Dooks <ben@simtec.co.uk> * * Simtec Audio support. */ /** * struct s3c24xx_audio_simtec_pdata - platform data for simtec audio * @use_mpllin: Select codec clock from MPLLin * @output_cdclk: Need to output CDCLK to the codec * @have_mic: Set if we have a MIC socket * @have_lout: Set if we have a LineOut socket * @amp_gpio: GPIO pin to enable the AMP * @amp_gain: Option GPIO to control AMP gain */ struct s3c24xx_audio_simtec_pdata { unsigned int use_mpllin:1; unsigned int output_cdclk:1; unsigned int have_mic:1; unsigned int have_lout:1; int amp_gpio; int amp_gain[2]; void (*startup)(void); }; platform_data/mtd-nand-s3c2410.h 0000644 00000004051 14722070374 0012237 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2004 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * S3C2410 - NAND device controller platform_device info */ #ifndef __MTD_NAND_S3C2410_H #define __MTD_NAND_S3C2410_H #include <linux/mtd/rawnand.h> /** * struct s3c2410_nand_set - define a set of one or more nand chips * @flash_bbt: Openmoko u-boot can create a Bad Block Table * Setting this flag will allow the kernel to * look for it at boot time and also skip the NAND * scan. * @options: Default value to set into 'struct nand_chip' options. * @nr_chips: Number of chips in this set * @nr_partitions: Number of partitions pointed to by @partitions * @name: Name of set (optional) * @nr_map: Map for low-layer logical to physical chip numbers (option) * @partitions: The mtd partition list * * define a set of one or more nand chips registered with an unique mtd. Also * allows to pass flag to the underlying NAND layer. 'disable_ecc' will trigger * a warning at boot time. */ struct s3c2410_nand_set { unsigned int flash_bbt:1; unsigned int options; int nr_chips; int nr_partitions; char *name; int *nr_map; struct mtd_partition *partitions; struct device_node *of_node; }; struct s3c2410_platform_nand { /* timing information for controller, all times in nanoseconds */ int tacls; /* time for active CLE/ALE to nWE/nOE */ int twrph0; /* active time for nWE/nOE */ int twrph1; /* time for release CLE/ALE from nWE/nOE inactive */ unsigned int ignore_unset_ecc:1; nand_ecc_modes_t ecc_mode; int nr_sets; struct s3c2410_nand_set *sets; void (*select_chip)(struct s3c2410_nand_set *, int chip); }; /** * s3c_nand_set_platdata() - register NAND platform data. * @nand: The NAND platform data to register with s3c_device_nand. * * This function copies the given NAND platform data, @nand and registers * it with the s3c_device_nand. This allows @nand to be __initdata. */ extern void s3c_nand_set_platdata(struct s3c2410_platform_nand *nand); #endif /*__MTD_NAND_S3C2410_H */ platform_data/dma-mv_xor.h 0000644 00000000567 14722070374 0011621 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Marvell XOR platform device data definition file. */ #ifndef __DMA_MV_XOR_H #define __DMA_MV_XOR_H #include <linux/dmaengine.h> #include <linux/mbus.h> #define MV_XOR_NAME "mv_xor" struct mv_xor_channel_data { dma_cap_mask_t cap_mask; }; struct mv_xor_platform_data { struct mv_xor_channel_data *channels; }; #endif platform_data/shtc1.h 0000644 00000000457 14722070374 0010570 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2014 Sensirion AG, Switzerland * Author: Johannes Winkelmann <johannes.winkelmann@sensirion.com> */ #ifndef __SHTC1_H_ #define __SHTC1_H_ struct shtc1_platform_data { bool blocking_io; bool high_precision; }; #endif /* __SHTC1_H_ */ platform_data/mv_usb.h 0000644 00000002054 14722070374 0011034 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2011 Marvell International Ltd. All rights reserved. */ #ifndef __MV_PLATFORM_USB_H #define __MV_PLATFORM_USB_H enum pxa_ehci_type { EHCI_UNDEFINED = 0, PXA_U2OEHCI, /* pxa 168, 9xx */ PXA_SPH, /* pxa 168, 9xx SPH */ MMP3_HSIC, /* mmp3 hsic */ MMP3_FSIC, /* mmp3 fsic */ }; enum { MV_USB_MODE_OTG, MV_USB_MODE_HOST, }; enum { VBUS_LOW = 0, VBUS_HIGH = 1 << 0, }; struct mv_usb_addon_irq { unsigned int irq; int (*poll)(void); }; struct mv_usb_platform_data { struct mv_usb_addon_irq *id; /* Only valid for OTG. ID pin change*/ struct mv_usb_addon_irq *vbus; /* valid for OTG/UDC. VBUS change*/ /* only valid for HCD. OTG or Host only*/ unsigned int mode; /* This flag is used for that needs id pin checked by otg */ unsigned int disable_otg_clock_gating:1; /* Force a_bus_req to be asserted */ unsigned int otg_force_a_bus_req:1; int (*phy_init)(void __iomem *regbase); void (*phy_deinit)(void __iomem *regbase); int (*set_vbus)(unsigned int vbus); }; #endif platform_data/spi-ath79.h 0000644 00000000563 14722070374 0011271 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Platform data definition for Atheros AR71XX/AR724X/AR913X SPI controller * * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org> */ #ifndef _ATH79_SPI_PLATFORM_H #define _ATH79_SPI_PLATFORM_H struct ath79_spi_platform_data { unsigned bus_num; unsigned num_chipselect; }; #endif /* _ATH79_SPI_PLATFORM_H */ platform_data/shmob_drm.h 0000644 00000005104 14722070374 0011512 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0+ */ /* * shmob_drm.h -- SH Mobile DRM driver * * Copyright (C) 2012 Renesas Corporation * * Laurent Pinchart (laurent.pinchart@ideasonboard.com) */ #ifndef __SHMOB_DRM_H__ #define __SHMOB_DRM_H__ #include <linux/kernel.h> #include <drm/drm_mode.h> enum shmob_drm_clk_source { SHMOB_DRM_CLK_BUS, SHMOB_DRM_CLK_PERIPHERAL, SHMOB_DRM_CLK_EXTERNAL, }; enum shmob_drm_interface { SHMOB_DRM_IFACE_RGB8, /* 24bpp, 8:8:8 */ SHMOB_DRM_IFACE_RGB9, /* 18bpp, 9:9 */ SHMOB_DRM_IFACE_RGB12A, /* 24bpp, 12:12 */ SHMOB_DRM_IFACE_RGB12B, /* 12bpp */ SHMOB_DRM_IFACE_RGB16, /* 16bpp */ SHMOB_DRM_IFACE_RGB18, /* 18bpp */ SHMOB_DRM_IFACE_RGB24, /* 24bpp */ SHMOB_DRM_IFACE_YUV422, /* 16bpp */ SHMOB_DRM_IFACE_SYS8A, /* 24bpp, 8:8:8 */ SHMOB_DRM_IFACE_SYS8B, /* 18bpp, 8:8:2 */ SHMOB_DRM_IFACE_SYS8C, /* 18bpp, 2:8:8 */ SHMOB_DRM_IFACE_SYS8D, /* 16bpp, 8:8 */ SHMOB_DRM_IFACE_SYS9, /* 18bpp, 9:9 */ SHMOB_DRM_IFACE_SYS12, /* 24bpp, 12:12 */ SHMOB_DRM_IFACE_SYS16A, /* 16bpp */ SHMOB_DRM_IFACE_SYS16B, /* 18bpp, 16:2 */ SHMOB_DRM_IFACE_SYS16C, /* 18bpp, 2:16 */ SHMOB_DRM_IFACE_SYS18, /* 18bpp */ SHMOB_DRM_IFACE_SYS24, /* 24bpp */ }; struct shmob_drm_backlight_data { const char *name; int max_brightness; int (*get_brightness)(void); int (*set_brightness)(int brightness); }; struct shmob_drm_panel_data { unsigned int width_mm; /* Panel width in mm */ unsigned int height_mm; /* Panel height in mm */ struct drm_mode_modeinfo mode; }; struct shmob_drm_sys_interface_data { unsigned int read_latch:6; unsigned int read_setup:8; unsigned int read_cycle:8; unsigned int read_strobe:8; unsigned int write_setup:8; unsigned int write_cycle:8; unsigned int write_strobe:8; unsigned int cs_setup:3; unsigned int vsync_active_high:1; unsigned int vsync_dir_input:1; }; #define SHMOB_DRM_IFACE_FL_DWPOL (1 << 0) /* Rising edge dot clock data latch */ #define SHMOB_DRM_IFACE_FL_DIPOL (1 << 1) /* Active low display enable */ #define SHMOB_DRM_IFACE_FL_DAPOL (1 << 2) /* Active low display data */ #define SHMOB_DRM_IFACE_FL_HSCNT (1 << 3) /* Disable HSYNC during VBLANK */ #define SHMOB_DRM_IFACE_FL_DWCNT (1 << 4) /* Disable dotclock during blanking */ struct shmob_drm_interface_data { enum shmob_drm_interface interface; struct shmob_drm_sys_interface_data sys; unsigned int clk_div; unsigned int flags; }; struct shmob_drm_platform_data { enum shmob_drm_clk_source clk_source; struct shmob_drm_interface_data iface; struct shmob_drm_panel_data panel; struct shmob_drm_backlight_data backlight; }; #endif /* __SHMOB_DRM_H__ */ platform_data/dma-coh901318.h 0000644 00000003746 14722070374 0011550 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Platform data for the COH901318 DMA controller * Copyright (C) 2007-2013 ST-Ericsson */ #ifndef PLAT_COH901318_H #define PLAT_COH901318_H #ifdef CONFIG_COH901318 /* We only support the U300 DMA channels */ #define U300_DMA_MSL_TX_0 0 #define U300_DMA_MSL_TX_1 1 #define U300_DMA_MSL_TX_2 2 #define U300_DMA_MSL_TX_3 3 #define U300_DMA_MSL_TX_4 4 #define U300_DMA_MSL_TX_5 5 #define U300_DMA_MSL_TX_6 6 #define U300_DMA_MSL_RX_0 7 #define U300_DMA_MSL_RX_1 8 #define U300_DMA_MSL_RX_2 9 #define U300_DMA_MSL_RX_3 10 #define U300_DMA_MSL_RX_4 11 #define U300_DMA_MSL_RX_5 12 #define U300_DMA_MSL_RX_6 13 #define U300_DMA_MMCSD_RX_TX 14 #define U300_DMA_MSPRO_TX 15 #define U300_DMA_MSPRO_RX 16 #define U300_DMA_UART0_TX 17 #define U300_DMA_UART0_RX 18 #define U300_DMA_APEX_TX 19 #define U300_DMA_APEX_RX 20 #define U300_DMA_PCM_I2S0_TX 21 #define U300_DMA_PCM_I2S0_RX 22 #define U300_DMA_PCM_I2S1_TX 23 #define U300_DMA_PCM_I2S1_RX 24 #define U300_DMA_XGAM_CDI 25 #define U300_DMA_XGAM_PDI 26 #define U300_DMA_SPI_TX 27 #define U300_DMA_SPI_RX 28 #define U300_DMA_GENERAL_PURPOSE_0 29 #define U300_DMA_GENERAL_PURPOSE_1 30 #define U300_DMA_GENERAL_PURPOSE_2 31 #define U300_DMA_GENERAL_PURPOSE_3 32 #define U300_DMA_GENERAL_PURPOSE_4 33 #define U300_DMA_GENERAL_PURPOSE_5 34 #define U300_DMA_GENERAL_PURPOSE_6 35 #define U300_DMA_GENERAL_PURPOSE_7 36 #define U300_DMA_GENERAL_PURPOSE_8 37 #define U300_DMA_UART1_TX 38 #define U300_DMA_UART1_RX 39 #define U300_DMA_DEVICE_CHANNELS 32 #define U300_DMA_CHANNELS 40 /** * coh901318_filter_id() - DMA channel filter function * @chan: dma channel handle * @chan_id: id of dma channel to be filter out * * In dma_request_channel() it specifies what channel id to be requested */ bool coh901318_filter_id(struct dma_chan *chan, void *chan_id); #else static inline bool coh901318_filter_id(struct dma_chan *chan, void *chan_id) { return false; } #endif #endif /* PLAT_COH901318_H */ platform_data/max3421-hcd.h 0000644 00000001450 14722070374 0011373 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2014 eGauge Systems LLC * Contributed by David Mosberger-Tang <davidm@egauge.net> * * Platform-data structure for MAX3421 USB HCD driver. * */ #ifndef MAX3421_HCD_PLAT_H_INCLUDED #define MAX3421_HCD_PLAT_H_INCLUDED /* * This structure defines the mapping of certain auxiliary functions to the * MAX3421E GPIO pins. The chip has eight GP inputs and eight GP outputs. * A value of 0 indicates that the pin is not used/wired to anything. * * At this point, the only control the max3421-hcd driver cares about is * to control Vbus (5V to the peripheral). */ struct max3421_hcd_platform_data { u8 vbus_gpout; /* pin controlling Vbus */ u8 vbus_active_level; /* level that turns on power */ }; #endif /* MAX3421_HCD_PLAT_H_INCLUDED */ platform_data/keyboard-spear.h 0000644 00000007471 14722070374 0012461 0 ustar 00 /* * Copyright (C) 2010 ST Microelectronics * Rajeev Kumar <rajeevkumar.linux@gmail.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #ifndef __PLAT_KEYBOARD_H #define __PLAT_KEYBOARD_H #include <linux/bitops.h> #include <linux/input.h> #include <linux/input/matrix_keypad.h> #include <linux/types.h> #define DECLARE_9x9_KEYMAP(_name) \ int _name[] = { \ KEY(0, 0, KEY_ESC), \ KEY(0, 1, KEY_1), \ KEY(0, 2, KEY_2), \ KEY(0, 3, KEY_3), \ KEY(0, 4, KEY_4), \ KEY(0, 5, KEY_5), \ KEY(0, 6, KEY_6), \ KEY(0, 7, KEY_7), \ KEY(0, 8, KEY_8), \ KEY(1, 0, KEY_9), \ KEY(1, 1, KEY_MINUS), \ KEY(1, 2, KEY_EQUAL), \ KEY(1, 3, KEY_BACKSPACE), \ KEY(1, 4, KEY_TAB), \ KEY(1, 5, KEY_Q), \ KEY(1, 6, KEY_W), \ KEY(1, 7, KEY_E), \ KEY(1, 8, KEY_R), \ KEY(2, 0, KEY_T), \ KEY(2, 1, KEY_Y), \ KEY(2, 2, KEY_U), \ KEY(2, 3, KEY_I), \ KEY(2, 4, KEY_O), \ KEY(2, 5, KEY_P), \ KEY(2, 6, KEY_LEFTBRACE), \ KEY(2, 7, KEY_RIGHTBRACE), \ KEY(2, 8, KEY_ENTER), \ KEY(3, 0, KEY_LEFTCTRL), \ KEY(3, 1, KEY_A), \ KEY(3, 2, KEY_S), \ KEY(3, 3, KEY_D), \ KEY(3, 4, KEY_F), \ KEY(3, 5, KEY_G), \ KEY(3, 6, KEY_H), \ KEY(3, 7, KEY_J), \ KEY(3, 8, KEY_K), \ KEY(4, 0, KEY_L), \ KEY(4, 1, KEY_SEMICOLON), \ KEY(4, 2, KEY_APOSTROPHE), \ KEY(4, 3, KEY_GRAVE), \ KEY(4, 4, KEY_LEFTSHIFT), \ KEY(4, 5, KEY_BACKSLASH), \ KEY(4, 6, KEY_Z), \ KEY(4, 7, KEY_X), \ KEY(4, 8, KEY_C), \ KEY(5, 0, KEY_V), \ KEY(5, 1, KEY_B), \ KEY(5, 2, KEY_N), \ KEY(5, 3, KEY_M), \ KEY(5, 4, KEY_COMMA), \ KEY(5, 5, KEY_DOT), \ KEY(5, 6, KEY_SLASH), \ KEY(5, 7, KEY_RIGHTSHIFT), \ KEY(5, 8, KEY_KPASTERISK), \ KEY(6, 0, KEY_LEFTALT), \ KEY(6, 1, KEY_SPACE), \ KEY(6, 2, KEY_CAPSLOCK), \ KEY(6, 3, KEY_F1), \ KEY(6, 4, KEY_F2), \ KEY(6, 5, KEY_F3), \ KEY(6, 6, KEY_F4), \ KEY(6, 7, KEY_F5), \ KEY(6, 8, KEY_F6), \ KEY(7, 0, KEY_F7), \ KEY(7, 1, KEY_F8), \ KEY(7, 2, KEY_F9), \ KEY(7, 3, KEY_F10), \ KEY(7, 4, KEY_NUMLOCK), \ KEY(7, 5, KEY_SCROLLLOCK), \ KEY(7, 6, KEY_KP7), \ KEY(7, 7, KEY_KP8), \ KEY(7, 8, KEY_KP9), \ KEY(8, 0, KEY_KPMINUS), \ KEY(8, 1, KEY_KP4), \ KEY(8, 2, KEY_KP5), \ KEY(8, 3, KEY_KP6), \ KEY(8, 4, KEY_KPPLUS), \ KEY(8, 5, KEY_KP1), \ KEY(8, 6, KEY_KP2), \ KEY(8, 7, KEY_KP3), \ KEY(8, 8, KEY_KP0), \ } #define DECLARE_6x6_KEYMAP(_name) \ int _name[] = { \ KEY(0, 0, KEY_RESERVED), \ KEY(0, 1, KEY_1), \ KEY(0, 2, KEY_2), \ KEY(0, 3, KEY_3), \ KEY(0, 4, KEY_4), \ KEY(0, 5, KEY_5), \ KEY(1, 0, KEY_Q), \ KEY(1, 1, KEY_W), \ KEY(1, 2, KEY_E), \ KEY(1, 3, KEY_R), \ KEY(1, 4, KEY_T), \ KEY(1, 5, KEY_Y), \ KEY(2, 0, KEY_D), \ KEY(2, 1, KEY_F), \ KEY(2, 2, KEY_G), \ KEY(2, 3, KEY_H), \ KEY(2, 4, KEY_J), \ KEY(2, 5, KEY_K), \ KEY(3, 0, KEY_B), \ KEY(3, 1, KEY_N), \ KEY(3, 2, KEY_M), \ KEY(3, 3, KEY_COMMA), \ KEY(3, 4, KEY_DOT), \ KEY(3, 5, KEY_SLASH), \ KEY(4, 0, KEY_F6), \ KEY(4, 1, KEY_F7), \ KEY(4, 2, KEY_F8), \ KEY(4, 3, KEY_F9), \ KEY(4, 4, KEY_F10), \ KEY(4, 5, KEY_NUMLOCK), \ KEY(5, 0, KEY_KP2), \ KEY(5, 1, KEY_KP3), \ KEY(5, 2, KEY_KP0), \ KEY(5, 3, KEY_KPDOT), \ KEY(5, 4, KEY_RO), \ KEY(5, 5, KEY_ZENKAKUHANKAKU), \ } #define KEYPAD_9x9 0 #define KEYPAD_6x6 1 #define KEYPAD_2x2 2 /** * struct kbd_platform_data - spear keyboard platform data * keymap: pointer to keymap data (table and size) * rep: enables key autorepeat * mode: choose keyboard support(9x9, 6x6, 2x2) * suspended_rate: rate at which keyboard would operate in suspended mode * * This structure is supposed to be used by platform code to supply * keymaps to drivers that implement keyboards. */ struct kbd_platform_data { const struct matrix_keymap_data *keymap; bool rep; unsigned int mode; unsigned int suspended_rate; }; #endif /* __PLAT_KEYBOARD_H */ platform_data/dma-mcf-edma.h 0000644 00000002173 14722070374 0011753 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Freescale eDMA platform data, ColdFire SoC's family. * * Copyright (c) 2017 Angelo Dureghello <angelo@sysam.it> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifndef __LINUX_PLATFORM_DATA_MCF_EDMA_H__ #define __LINUX_PLATFORM_DATA_MCF_EDMA_H__ struct dma_slave_map; bool mcf_edma_filter_fn(struct dma_chan *chan, void *param); #define MCF_EDMA_FILTER_PARAM(ch) ((void *)ch) /** * struct mcf_edma_platform_data - platform specific data for eDMA engine * * @ver The eDMA module version. * @dma_channels The number of eDMA channels. */ struct mcf_edma_platform_data { int dma_channels; const struct dma_slave_map *slave_map; int slavecnt; }; #endif /* __LINUX_PLATFORM_DATA_MCF_EDMA_H__ */ platform_data/microchip-ksz.h 0000644 00000001702 14722070374 0012322 0 ustar 00 /* * Microchip KSZ series switch platform data * * Copyright (C) 2017 * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef __MICROCHIP_KSZ_H #define __MICROCHIP_KSZ_H #include <linux/kernel.h> struct ksz_platform_data { u32 chip_id; u16 enabled_ports; }; #endif platform_data/mmp_audio.h 0000644 00000000524 14722070374 0011513 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * MMP Platform AUDIO Management * * Copyright (c) 2011 Marvell Semiconductors Inc. */ #ifndef MMP_AUDIO_H #define MMP_AUDIO_H struct mmp_audio_platdata { u32 period_max_capture; u32 buffer_max_capture; u32 period_max_playback; u32 buffer_max_playback; }; #endif /* MMP_AUDIO_H */ platform_data/lm3639_bl.h 0000644 00000002571 14722070374 0011157 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Simple driver for Texas Instruments LM3630 LED Flash driver chip * Copyright (C) 2012 Texas Instruments */ #ifndef __LINUX_LM3639_H #define __LINUX_LM3639_H #define LM3639_NAME "lm3639_bl" enum lm3639_pwm { LM3639_PWM_DISABLE = 0x00, LM3639_PWM_EN_ACTLOW = 0x48, LM3639_PWM_EN_ACTHIGH = 0x40, }; enum lm3639_strobe { LM3639_STROBE_DISABLE = 0x00, LM3639_STROBE_EN_ACTLOW = 0x10, LM3639_STROBE_EN_ACTHIGH = 0x30, }; enum lm3639_txpin { LM3639_TXPIN_DISABLE = 0x00, LM3639_TXPIN_EN_ACTLOW = 0x04, LM3639_TXPIN_EN_ACTHIGH = 0x0C, }; enum lm3639_fleds { LM3639_FLED_DIASBLE_ALL = 0x00, LM3639_FLED_EN_1 = 0x40, LM3639_FLED_EN_2 = 0x20, LM3639_FLED_EN_ALL = 0x60, }; enum lm3639_bleds { LM3639_BLED_DIASBLE_ALL = 0x00, LM3639_BLED_EN_1 = 0x10, LM3639_BLED_EN_2 = 0x08, LM3639_BLED_EN_ALL = 0x18, }; enum lm3639_bled_mode { LM3639_BLED_MODE_EXPONETIAL = 0x00, LM3639_BLED_MODE_LINEAR = 0x10, }; struct lm3639_platform_data { unsigned int max_brt_led; unsigned int init_brt_led; /* input pins */ enum lm3639_pwm pin_pwm; enum lm3639_strobe pin_strobe; enum lm3639_txpin pin_tx; /* output pins */ enum lm3639_fleds fled_pins; enum lm3639_bleds bled_pins; enum lm3639_bled_mode bled_mode; void (*pwm_set_intensity) (int brightness, int max_brightness); int (*pwm_get_intensity) (void); }; #endif /* __LINUX_LM3639_H */ platform_data/i2c-hid.h 0000644 00000002624 14722070374 0010763 0 ustar 00 /* * HID over I2C protocol implementation * * Copyright (c) 2012 Benjamin Tissoires <benjamin.tissoires@gmail.com> * Copyright (c) 2012 Ecole Nationale de l'Aviation Civile, France * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #ifndef __LINUX_I2C_HID_H #define __LINUX_I2C_HID_H #include <linux/regulator/consumer.h> #include <linux/types.h> /** * struct i2chid_platform_data - used by hid over i2c implementation. * @hid_descriptor_address: i2c register where the HID descriptor is stored. * @supplies: regulators for powering on the device. * @post_power_delay_ms: delay after powering on before device is usable. * * Note that it is the responsibility of the platform driver (or the acpi 5.0 * driver, or the flattened device tree) to setup the irq related to the gpio in * the struct i2c_board_info. * The platform driver should also setup the gpio according to the device: * * A typical example is the following: * irq = gpio_to_irq(intr_gpio); * hkdk4412_i2c_devs5[0].irq = irq; // store the irq in i2c_board_info * gpio_request(intr_gpio, "elan-irq"); * s3c_gpio_setpull(intr_gpio, S3C_GPIO_PULL_UP); */ struct i2c_hid_platform_data { u16 hid_descriptor_address; struct regulator_bulk_data supplies[2]; int post_power_delay_ms; }; #endif /* __LINUX_I2C_HID_H */ platform_data/mtd-nand-omap2.h 0000644 00000004001 14722070374 0012251 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2006 Micron Technology Inc. */ #ifndef _MTD_NAND_OMAP2_H #define _MTD_NAND_OMAP2_H #include <linux/mtd/partitions.h> #define GPMC_BCH_NUM_REMAINDER 8 enum nand_io { NAND_OMAP_PREFETCH_POLLED = 0, /* prefetch polled mode, default */ NAND_OMAP_POLLED, /* polled mode, without prefetch */ NAND_OMAP_PREFETCH_DMA, /* prefetch enabled sDMA mode */ NAND_OMAP_PREFETCH_IRQ /* prefetch enabled irq mode */ }; enum omap_ecc { /* * 1-bit ECC: calculation and correction by SW * ECC stored at end of spare area */ OMAP_ECC_HAM1_CODE_SW = 0, /* * 1-bit ECC: calculation by GPMC, Error detection by Software * ECC layout compatible with ROM code layout */ OMAP_ECC_HAM1_CODE_HW, /* 4-bit ECC calculation by GPMC, Error detection by Software */ OMAP_ECC_BCH4_CODE_HW_DETECTION_SW, /* 4-bit ECC calculation by GPMC, Error detection by ELM */ OMAP_ECC_BCH4_CODE_HW, /* 8-bit ECC calculation by GPMC, Error detection by Software */ OMAP_ECC_BCH8_CODE_HW_DETECTION_SW, /* 8-bit ECC calculation by GPMC, Error detection by ELM */ OMAP_ECC_BCH8_CODE_HW, /* 16-bit ECC calculation by GPMC, Error detection by ELM */ OMAP_ECC_BCH16_CODE_HW, }; struct gpmc_nand_regs { void __iomem *gpmc_nand_command; void __iomem *gpmc_nand_address; void __iomem *gpmc_nand_data; void __iomem *gpmc_prefetch_config1; void __iomem *gpmc_prefetch_config2; void __iomem *gpmc_prefetch_control; void __iomem *gpmc_prefetch_status; void __iomem *gpmc_ecc_config; void __iomem *gpmc_ecc_control; void __iomem *gpmc_ecc_size_config; void __iomem *gpmc_ecc1_result; void __iomem *gpmc_bch_result0[GPMC_BCH_NUM_REMAINDER]; void __iomem *gpmc_bch_result1[GPMC_BCH_NUM_REMAINDER]; void __iomem *gpmc_bch_result2[GPMC_BCH_NUM_REMAINDER]; void __iomem *gpmc_bch_result3[GPMC_BCH_NUM_REMAINDER]; void __iomem *gpmc_bch_result4[GPMC_BCH_NUM_REMAINDER]; void __iomem *gpmc_bch_result5[GPMC_BCH_NUM_REMAINDER]; void __iomem *gpmc_bch_result6[GPMC_BCH_NUM_REMAINDER]; }; #endif platform_data/ad5761.h 0000644 00000002371 14722070374 0010452 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * AD5721, AD5721R, AD5761, AD5761R, Voltage Output Digital to Analog Converter * * Copyright 2016 Qtechnology A/S * 2016 Ricardo Ribalda <ricardo.ribalda@gmail.com> */ #ifndef __LINUX_PLATFORM_DATA_AD5761_H__ #define __LINUX_PLATFORM_DATA_AD5761_H__ /** * enum ad5761_voltage_range - Voltage range the AD5761 is configured for. * @AD5761_VOLTAGE_RANGE_M10V_10V: -10V to 10V * @AD5761_VOLTAGE_RANGE_0V_10V: 0V to 10V * @AD5761_VOLTAGE_RANGE_M5V_5V: -5V to 5V * @AD5761_VOLTAGE_RANGE_0V_5V: 0V to 5V * @AD5761_VOLTAGE_RANGE_M2V5_7V5: -2.5V to 7.5V * @AD5761_VOLTAGE_RANGE_M3V_3V: -3V to 3V * @AD5761_VOLTAGE_RANGE_0V_16V: 0V to 16V * @AD5761_VOLTAGE_RANGE_0V_20V: 0V to 20V */ enum ad5761_voltage_range { AD5761_VOLTAGE_RANGE_M10V_10V, AD5761_VOLTAGE_RANGE_0V_10V, AD5761_VOLTAGE_RANGE_M5V_5V, AD5761_VOLTAGE_RANGE_0V_5V, AD5761_VOLTAGE_RANGE_M2V5_7V5, AD5761_VOLTAGE_RANGE_M3V_3V, AD5761_VOLTAGE_RANGE_0V_16V, AD5761_VOLTAGE_RANGE_0V_20V, }; /** * struct ad5761_platform_data - AD5761 DAC driver platform data * @voltage_range: Voltage range the AD5761 is configured for */ struct ad5761_platform_data { enum ad5761_voltage_range voltage_range; }; #endif platform_data/mmc-davinci.h 0000644 00000001340 14722070374 0011725 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Board-specific MMC configuration */ #ifndef _DAVINCI_MMC_H #define _DAVINCI_MMC_H #include <linux/types.h> #include <linux/mmc/host.h> struct davinci_mmc_config { /* get_cd()/get_wp() may sleep */ int (*get_cd)(int module); int (*get_ro)(int module); void (*set_power)(int module, bool on); /* wires == 0 is equivalent to wires == 4 (4-bit parallel) */ u8 wires; u32 max_freq; /* any additional host capabilities: OR'd in to mmc->f_caps */ u32 caps; /* Number of sg segments */ u8 nr_sg; }; void davinci_setup_mmc(int module, struct davinci_mmc_config *config); enum { MMC_CTLR_VERSION_1 = 0, /* DM644x and DM355 */ MMC_CTLR_VERSION_2, /* DA830 */ }; #endif platform_data/apds990x.h 0000644 00000003524 14722070374 0011125 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * This file is part of the APDS990x sensor driver. * Chip is combined proximity and ambient light sensor. * * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies). * * Contact: Samu Onkalo <samu.p.onkalo@nokia.com> */ #ifndef __APDS990X_H__ #define __APDS990X_H__ #define APDS_IRLED_CURR_12mA 0x3 #define APDS_IRLED_CURR_25mA 0x2 #define APDS_IRLED_CURR_50mA 0x1 #define APDS_IRLED_CURR_100mA 0x0 /** * struct apds990x_chip_factors - defines effect of the cover window * @ga: Total glass attenuation * @cf1: clear channel factor 1 for raw to lux conversion * @irf1: IR channel factor 1 for raw to lux conversion * @cf2: clear channel factor 2 for raw to lux conversion * @irf2: IR channel factor 2 for raw to lux conversion * @df: device factor for conversion formulas * * Structure for tuning ALS calculation to match with environment. * Values depend on the material above the sensor and the sensor * itself. If the GA is zero, driver will use uncovered sensor default values * format: decimal value * APDS_PARAM_SCALE except df which is plain integer. */ #define APDS_PARAM_SCALE 4096 struct apds990x_chip_factors { int ga; int cf1; int irf1; int cf2; int irf2; int df; }; /** * struct apds990x_platform_data - platform data for apsd990x.c driver * @cf: chip factor data * @pddrive: IR-led driving current * @ppcount: number of IR pulses used for proximity estimation * @setup_resources: interrupt line setup call back function * @release_resources: interrupt line release call back function * * Proximity detection result depends heavily on correct ppcount, pdrive * and cover window. * */ struct apds990x_platform_data { struct apds990x_chip_factors cf; u8 pdrive; u8 ppcount; int (*setup_resources)(void); int (*release_resources)(void); }; #endif platform_data/rtc-ds2404.h 0000644 00000000723 14722070374 0011250 0 ustar 00 /* * ds2404.h - platform data structure for the DS2404 RTC. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2012 Sven Schnelle <svens@stackframe.org> */ #ifndef __LINUX_DS2404_H #define __LINUX_DS2404_H struct ds2404_platform_data { unsigned int gpio_rst; unsigned int gpio_clk; unsigned int gpio_dq; }; #endif platform_data/efm32-uart.h 0000644 00000000611 14722070374 0011423 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * * */ #ifndef __LINUX_PLATFORM_DATA_EFM32_UART_H__ #define __LINUX_PLATFORM_DATA_EFM32_UART_H__ #include <linux/types.h> /** * struct efm32_uart_pdata * @location: pinmux location for the I/O pins (to be written to the ROUTE * register) */ struct efm32_uart_pdata { u8 location; }; #endif /* ifndef __LINUX_PLATFORM_DATA_EFM32_UART_H__ */ platform_data/dma-ep93xx.h 0000644 00000005272 14722070374 0011445 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_ARCH_DMA_H #define __ASM_ARCH_DMA_H #include <linux/types.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> /* * M2P channels. * * Note that these values are also directly used for setting the PPALLOC * register. */ #define EP93XX_DMA_I2S1 0 #define EP93XX_DMA_I2S2 1 #define EP93XX_DMA_AAC1 2 #define EP93XX_DMA_AAC2 3 #define EP93XX_DMA_AAC3 4 #define EP93XX_DMA_I2S3 5 #define EP93XX_DMA_UART1 6 #define EP93XX_DMA_UART2 7 #define EP93XX_DMA_UART3 8 #define EP93XX_DMA_IRDA 9 /* M2M channels */ #define EP93XX_DMA_SSP 10 #define EP93XX_DMA_IDE 11 /** * struct ep93xx_dma_data - configuration data for the EP93xx dmaengine * @port: peripheral which is requesting the channel * @direction: TX/RX channel * @name: optional name for the channel, this is displayed in /proc/interrupts * * This information is passed as private channel parameter in a filter * function. Note that this is only needed for slave/cyclic channels. For * memcpy channels %NULL data should be passed. */ struct ep93xx_dma_data { int port; enum dma_transfer_direction direction; const char *name; }; /** * struct ep93xx_dma_chan_data - platform specific data for a DMA channel * @name: name of the channel, used for getting the right clock for the channel * @base: mapped registers * @irq: interrupt number used by this channel */ struct ep93xx_dma_chan_data { const char *name; void __iomem *base; int irq; }; /** * struct ep93xx_dma_platform_data - platform data for the dmaengine driver * @channels: array of channels which are passed to the driver * @num_channels: number of channels in the array * * This structure is passed to the DMA engine driver via platform data. For * M2P channels, contract is that even channels are for TX and odd for RX. * There is no requirement for the M2M channels. */ struct ep93xx_dma_platform_data { struct ep93xx_dma_chan_data *channels; size_t num_channels; }; static inline bool ep93xx_dma_chan_is_m2p(struct dma_chan *chan) { return !strcmp(dev_name(chan->device->dev), "ep93xx-dma-m2p"); } /** * ep93xx_dma_chan_direction - returns direction the channel can be used * @chan: channel * * This function can be used in filter functions to find out whether the * channel supports given DMA direction. Only M2P channels have such * limitation, for M2M channels the direction is configurable. */ static inline enum dma_transfer_direction ep93xx_dma_chan_direction(struct dma_chan *chan) { if (!ep93xx_dma_chan_is_m2p(chan)) return DMA_TRANS_NONE; /* even channels are for TX, odd for RX */ return (chan->chan_id % 2 == 0) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; } #endif /* __ASM_ARCH_DMA_H */ platform_data/i2c-gpio.h 0000644 00000002060 14722070374 0011147 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * i2c-gpio interface to platform code * * Copyright (C) 2007 Atmel Corporation */ #ifndef _LINUX_I2C_GPIO_H #define _LINUX_I2C_GPIO_H /** * struct i2c_gpio_platform_data - Platform-dependent data for i2c-gpio * @udelay: signal toggle delay. SCL frequency is (500 / udelay) kHz * @timeout: clock stretching timeout in jiffies. If the slave keeps * SCL low for longer than this, the transfer will time out. * @sda_is_open_drain: SDA is configured as open drain, i.e. the pin * isn't actively driven high when setting the output value high. * gpio_get_value() must return the actual pin state even if the * pin is configured as an output. * @scl_is_open_drain: SCL is set up as open drain. Same requirements * as for sda_is_open_drain apply. * @scl_is_output_only: SCL output drivers cannot be turned off. */ struct i2c_gpio_platform_data { int udelay; int timeout; unsigned int sda_is_open_drain:1; unsigned int scl_is_open_drain:1; unsigned int scl_is_output_only:1; }; #endif /* _LINUX_I2C_GPIO_H */ platform_data/spi-davinci.h 0000644 00000004404 14722070374 0011750 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright 2009 Texas Instruments. */ #ifndef __ARCH_ARM_DAVINCI_SPI_H #define __ARCH_ARM_DAVINCI_SPI_H #include <linux/platform_data/edma.h> #define SPI_INTERN_CS 0xFF enum { SPI_VERSION_1, /* For DM355/DM365/DM6467 */ SPI_VERSION_2, /* For DA8xx */ }; /** * davinci_spi_platform_data - Platform data for SPI master device on DaVinci * * @version: version of the SPI IP. Different DaVinci devices have slightly * varying versions of the same IP. * @num_chipselect: number of chipselects supported by this SPI master * @intr_line: interrupt line used to connect the SPI IP to the ARM interrupt * controller withn the SoC. Possible values are 0 and 1. * @cshold_bug: set this to true if the SPI controller on your chip requires * a write to CSHOLD bit in between transfers (like in DM355). * @dma_event_q: DMA event queue to use if SPI_IO_TYPE_DMA is used for any * device on the bus. */ struct davinci_spi_platform_data { u8 version; u8 num_chipselect; u8 intr_line; u8 prescaler_limit; bool cshold_bug; enum dma_event_q dma_event_q; }; /** * davinci_spi_config - Per-chip-select configuration for SPI slave devices * * @wdelay: amount of delay between transmissions. Measured in number of * SPI module clocks. * @odd_parity: polarity of parity flag at the end of transmit data stream. * 0 - odd parity, 1 - even parity. * @parity_enable: enable transmission of parity at end of each transmit * data stream. * @io_type: type of IO transfer. Choose between polled, interrupt and DMA. * @timer_disable: disable chip-select timers (setup and hold) * @c2tdelay: chip-select setup time. Measured in number of SPI module clocks. * @t2cdelay: chip-select hold time. Measured in number of SPI module clocks. * @t2edelay: transmit data finished to SPI ENAn pin inactive time. Measured * in number of SPI clocks. * @c2edelay: chip-select active to SPI ENAn signal active time. Measured in * number of SPI clocks. */ struct davinci_spi_config { u8 wdelay; u8 odd_parity; u8 parity_enable; #define SPI_IO_TYPE_INTR 0 #define SPI_IO_TYPE_POLL 1 #define SPI_IO_TYPE_DMA 2 u8 io_type; u8 timer_disable; u8 c2tdelay; u8 t2cdelay; u8 t2edelay; u8 c2edelay; }; #endif /* __ARCH_ARM_DAVINCI_SPI_H */ platform_data/g762.h 0000644 00000001272 14722070374 0010227 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Platform data structure for g762 fan controller driver * * Copyright (C) 2013, Arnaud EBALARD <arno@natisbad.org> */ #ifndef __LINUX_PLATFORM_DATA_G762_H__ #define __LINUX_PLATFORM_DATA_G762_H__ /* * Following structure can be used to set g762 driver platform specific data * during board init. Note that passing a sparse structure is possible but * will result in non-specified attributes to be set to default value, hence * overloading those installed during boot (e.g. by u-boot). */ struct g762_platform_data { u32 fan_startv; u32 fan_gear_mode; u32 pwm_polarity; u32 clk_freq; }; #endif /* __LINUX_PLATFORM_DATA_G762_H__ */ platform_data/net-cw1200.h 0000644 00000005314 14722070374 0011243 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) ST-Ericsson SA 2011 * * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com> */ #ifndef CW1200_PLAT_H_INCLUDED #define CW1200_PLAT_H_INCLUDED struct cw1200_platform_data_spi { u8 spi_bits_per_word; /* REQUIRED */ u16 ref_clk; /* REQUIRED (in KHz) */ /* All others are optional */ bool have_5ghz; int reset; /* GPIO to RSTn signal (0 disables) */ int powerup; /* GPIO to POWERUP signal (0 disables) */ int (*power_ctrl)(const struct cw1200_platform_data_spi *pdata, bool enable); /* Control 3v3 / 1v8 supply */ int (*clk_ctrl)(const struct cw1200_platform_data_spi *pdata, bool enable); /* Control CLK32K */ const u8 *macaddr; /* if NULL, use cw1200_mac_template module parameter */ const char *sdd_file; /* if NULL, will use default for detected hw type */ }; struct cw1200_platform_data_sdio { u16 ref_clk; /* REQUIRED (in KHz) */ /* All others are optional */ bool have_5ghz; bool no_nptb; /* SDIO hardware does not support non-power-of-2-blocksizes */ int reset; /* GPIO to RSTn signal (0 disables) */ int powerup; /* GPIO to POWERUP signal (0 disables) */ int irq; /* IRQ line or 0 to use SDIO IRQ */ int (*power_ctrl)(const struct cw1200_platform_data_sdio *pdata, bool enable); /* Control 3v3 / 1v8 supply */ int (*clk_ctrl)(const struct cw1200_platform_data_sdio *pdata, bool enable); /* Control CLK32K */ const u8 *macaddr; /* if NULL, use cw1200_mac_template module parameter */ const char *sdd_file; /* if NULL, will use default for detected hw type */ }; /* An example of SPI support in your board setup file: static struct cw1200_platform_data_spi cw1200_platform_data = { .ref_clk = 38400, .spi_bits_per_word = 16, .reset = GPIO_RF_RESET, .powerup = GPIO_RF_POWERUP, .macaddr = wifi_mac_addr, .sdd_file = "sdd_sagrad_1091_1098.bin", }; static struct spi_board_info myboard_spi_devices[] __initdata = { { .modalias = "cw1200_wlan_spi", .max_speed_hz = 52000000, .bus_num = 0, .irq = WIFI_IRQ, .platform_data = &cw1200_platform_data, .chip_select = 0, }, }; */ /* An example of SDIO support in your board setup file: static struct cw1200_platform_data_sdio my_cw1200_platform_data = { .ref_clk = 38400, .have_5ghz = false, .sdd_file = "sdd_myplatform.bin", }; cw1200_sdio_set_platform_data(&my_cw1200_platform_data); */ void __init cw1200_sdio_set_platform_data(struct cw1200_platform_data_sdio *pdata); #endif /* CW1200_PLAT_H_INCLUDED */ platform_data/adp8870.h 0000644 00000011020 14722070374 0010625 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Definitions and platform data for Analog Devices * Backlight drivers ADP8870 * * Copyright 2009-2010 Analog Devices Inc. */ #ifndef __LINUX_I2C_ADP8870_H #define __LINUX_I2C_ADP8870_H #define ID_ADP8870 8870 #define ADP8870_MAX_BRIGHTNESS 0x7F #define FLAG_OFFT_SHIFT 8 /* * LEDs subdevice platform data */ #define ADP8870_LED_DIS_BLINK (0 << FLAG_OFFT_SHIFT) #define ADP8870_LED_OFFT_600ms (1 << FLAG_OFFT_SHIFT) #define ADP8870_LED_OFFT_1200ms (2 << FLAG_OFFT_SHIFT) #define ADP8870_LED_OFFT_1800ms (3 << FLAG_OFFT_SHIFT) #define ADP8870_LED_ONT_200ms 0 #define ADP8870_LED_ONT_600ms 1 #define ADP8870_LED_ONT_800ms 2 #define ADP8870_LED_ONT_1200ms 3 #define ADP8870_LED_D7 (7) #define ADP8870_LED_D6 (6) #define ADP8870_LED_D5 (5) #define ADP8870_LED_D4 (4) #define ADP8870_LED_D3 (3) #define ADP8870_LED_D2 (2) #define ADP8870_LED_D1 (1) /* * Backlight subdevice platform data */ #define ADP8870_BL_D7 (1 << 6) #define ADP8870_BL_D6 (1 << 5) #define ADP8870_BL_D5 (1 << 4) #define ADP8870_BL_D4 (1 << 3) #define ADP8870_BL_D3 (1 << 2) #define ADP8870_BL_D2 (1 << 1) #define ADP8870_BL_D1 (1 << 0) #define ADP8870_FADE_T_DIS 0 /* Fade Timer Disabled */ #define ADP8870_FADE_T_300ms 1 /* 0.3 Sec */ #define ADP8870_FADE_T_600ms 2 #define ADP8870_FADE_T_900ms 3 #define ADP8870_FADE_T_1200ms 4 #define ADP8870_FADE_T_1500ms 5 #define ADP8870_FADE_T_1800ms 6 #define ADP8870_FADE_T_2100ms 7 #define ADP8870_FADE_T_2400ms 8 #define ADP8870_FADE_T_2700ms 9 #define ADP8870_FADE_T_3000ms 10 #define ADP8870_FADE_T_3500ms 11 #define ADP8870_FADE_T_4000ms 12 #define ADP8870_FADE_T_4500ms 13 #define ADP8870_FADE_T_5000ms 14 #define ADP8870_FADE_T_5500ms 15 /* 5.5 Sec */ #define ADP8870_FADE_LAW_LINEAR 0 #define ADP8870_FADE_LAW_SQUARE 1 #define ADP8870_FADE_LAW_CUBIC1 2 #define ADP8870_FADE_LAW_CUBIC2 3 #define ADP8870_BL_AMBL_FILT_80ms 0 /* Light sensor filter time */ #define ADP8870_BL_AMBL_FILT_160ms 1 #define ADP8870_BL_AMBL_FILT_320ms 2 #define ADP8870_BL_AMBL_FILT_640ms 3 #define ADP8870_BL_AMBL_FILT_1280ms 4 #define ADP8870_BL_AMBL_FILT_2560ms 5 #define ADP8870_BL_AMBL_FILT_5120ms 6 #define ADP8870_BL_AMBL_FILT_10240ms 7 /* 10.24 sec */ /* * Blacklight current 0..30mA */ #define ADP8870_BL_CUR_mA(I) ((I * 127) / 30) /* * L2 comparator current 0..1106uA */ #define ADP8870_L2_COMP_CURR_uA(I) ((I * 255) / 1106) /* * L3 comparator current 0..551uA */ #define ADP8870_L3_COMP_CURR_uA(I) ((I * 255) / 551) /* * L4 comparator current 0..275uA */ #define ADP8870_L4_COMP_CURR_uA(I) ((I * 255) / 275) /* * L5 comparator current 0..138uA */ #define ADP8870_L5_COMP_CURR_uA(I) ((I * 255) / 138) struct adp8870_backlight_platform_data { u8 bl_led_assign; /* 1 = Backlight 0 = Individual LED */ u8 pwm_assign; /* 1 = Enables PWM mode */ u8 bl_fade_in; /* Backlight Fade-In Timer */ u8 bl_fade_out; /* Backlight Fade-Out Timer */ u8 bl_fade_law; /* fade-on/fade-off transfer characteristic */ u8 en_ambl_sens; /* 1 = enable ambient light sensor */ u8 abml_filt; /* Light sensor filter time */ u8 l1_daylight_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ u8 l1_daylight_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ u8 l2_bright_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ u8 l2_bright_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ u8 l3_office_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ u8 l3_office_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ u8 l4_indoor_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ u8 l4_indor_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ u8 l5_dark_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ u8 l5_dark_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ u8 l2_trip; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */ u8 l2_hyst; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */ u8 l3_trip; /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */ u8 l3_hyst; /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */ u8 l4_trip; /* use L4_COMP_CURR_uA(I) 0 <= I <= 275 uA */ u8 l4_hyst; /* use L4_COMP_CURR_uA(I) 0 <= I <= 275 uA */ u8 l5_trip; /* use L5_COMP_CURR_uA(I) 0 <= I <= 138 uA */ u8 l5_hyst; /* use L6_COMP_CURR_uA(I) 0 <= I <= 138 uA */ /** * Independent Current Sinks / LEDS * Sinks not assigned to the Backlight can be exposed to * user space using the LEDS CLASS interface */ int num_leds; struct led_info *leds; u8 led_fade_in; /* LED Fade-In Timer */ u8 led_fade_out; /* LED Fade-Out Timer */ u8 led_fade_law; /* fade-on/fade-off transfer characteristic */ u8 led_on_time; }; #endif /* __LINUX_I2C_ADP8870_H */ platform_data/sa11x0-serial.h 0000644 00000001552 14722070374 0012035 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Author: Nicolas Pitre * * Moved and changed lots, Russell King * * Low level machine dependent UART functions. */ #ifndef SA11X0_SERIAL_H #define SA11X0_SERIAL_H struct uart_port; struct uart_info; /* * This is a temporary structure for registering these * functions; it is intended to be discarded after boot. */ struct sa1100_port_fns { void (*set_mctrl)(struct uart_port *, u_int); u_int (*get_mctrl)(struct uart_port *); void (*pm)(struct uart_port *, u_int, u_int); int (*set_wake)(struct uart_port *, u_int); }; #ifdef CONFIG_SERIAL_SA1100 void sa1100_register_uart_fns(struct sa1100_port_fns *fns); void sa1100_register_uart(int idx, int port); #else static inline void sa1100_register_uart_fns(struct sa1100_port_fns *fns) { } static inline void sa1100_register_uart(int idx, int port) { } #endif #endif platform_data/lp8755.h 0000644 00000002741 14722070374 0010510 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * LP8755 High Performance Power Management Unit Driver:System Interface Driver * * Copyright (C) 2012 Texas Instruments * * Author: Daniel(Geon Si) Jeong <daniel.jeong@ti.com> * G.Shark Jeong <gshark.jeong@gmail.com> */ #ifndef _LP8755_H #define _LP8755_H #include <linux/regulator/consumer.h> #define LP8755_NAME "lp8755-regulator" /* *PWR FAULT : power fault detected *OCP : over current protect activated *OVP : over voltage protect activated *TEMP_WARN : thermal warning *TEMP_SHDN : thermal shutdonw detected *I_LOAD : current measured */ #define LP8755_EVENT_PWR_FAULT REGULATOR_EVENT_FAIL #define LP8755_EVENT_OCP REGULATOR_EVENT_OVER_CURRENT #define LP8755_EVENT_OVP 0x10000 #define LP8755_EVENT_TEMP_WARN 0x2000 #define LP8755_EVENT_TEMP_SHDN REGULATOR_EVENT_OVER_TEMP #define LP8755_EVENT_I_LOAD 0x40000 enum lp8755_bucks { LP8755_BUCK0 = 0, LP8755_BUCK1, LP8755_BUCK2, LP8755_BUCK3, LP8755_BUCK4, LP8755_BUCK5, LP8755_BUCK_MAX, }; /** * multiphase configuration options */ enum lp8755_mphase_config { MPHASE_CONF0, MPHASE_CONF1, MPHASE_CONF2, MPHASE_CONF3, MPHASE_CONF4, MPHASE_CONF5, MPHASE_CONF6, MPHASE_CONF7, MPHASE_CONF8, MPHASE_CONF_MAX }; /** * struct lp8755_platform_data * @mphase_type : Multiphase Switcher Configurations. * @buck_data : buck0~6 init voltage in uV */ struct lp8755_platform_data { int mphase; struct regulator_init_data *buck_data[LP8755_BUCK_MAX]; }; #endif platform_data/syscon.h 0000644 00000000243 14722070374 0011055 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef PLATFORM_DATA_SYSCON_H #define PLATFORM_DATA_SYSCON_H struct syscon_platform_data { const char *label; }; #endif platform_data/brcmfmac.h 0000644 00000015207 14722070374 0011317 0 ustar 00 /* * Copyright (c) 201 Broadcom Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _LINUX_BRCMFMAC_PLATFORM_H #define _LINUX_BRCMFMAC_PLATFORM_H #define BRCMFMAC_PDATA_NAME "brcmfmac" #define BRCMFMAC_COUNTRY_BUF_SZ 4 /* * Platform specific driver functions and data. Through the platform specific * device data functions and data can be provided to help the brcmfmac driver to * operate with the device in combination with the used platform. */ /** * Note: the brcmfmac can be loaded as module or be statically built-in into * the kernel. If built-in then do note that it uses module_init (and * module_exit) routines which equal device_initcall. So if you intend to * create a module with the platform specific data for the brcmfmac and have * it built-in to the kernel then use a higher initcall then device_initcall * (see init.h). If this is not done then brcmfmac will load without problems * but will not pickup the platform data. * * When the driver does not "detect" platform driver data then it will continue * without reporting anything and just assume there is no data needed. Which is * probably true for most platforms. */ /** * enum brcmf_bus_type - Bus type identifier. Currently SDIO, USB and PCIE are * supported. */ enum brcmf_bus_type { BRCMF_BUSTYPE_SDIO, BRCMF_BUSTYPE_USB, BRCMF_BUSTYPE_PCIE }; /** * struct brcmfmac_sdio_pd - SDIO Device specific platform data. * * @txglomsz: SDIO txglom size. Use 0 if default of driver is to be * used. * @drive_strength: is the preferred drive_strength to be used for the SDIO * pins. If 0 then a default value will be used. This is * the target drive strength, the exact drive strength * which will be used depends on the capabilities of the * device. * @oob_irq_supported: does the board have support for OOB interrupts. SDIO * in-band interrupts are relatively slow and for having * less overhead on interrupt processing an out of band * interrupt can be used. If the HW supports this then * enable this by setting this field to true and configure * the oob related fields. * @oob_irq_nr, * @oob_irq_flags: the OOB interrupt information. The values are used for * registering the irq using request_irq function. * @broken_sg_support: flag for broken sg list support of SDIO host controller. * Set this to true if the SDIO host controller has higher * align requirement than 32 bytes for each scatterlist * item. * @sd_head_align: alignment requirement for start of data buffer. * @sd_sgentry_align: length alignment requirement for each sg entry. * @reset: This function can get called if the device communication * broke down. This functionality is particularly useful in * case of SDIO type devices. It is possible to reset a * dongle via sdio data interface, but it requires that * this is fully functional. This function is chip/module * specific and this function should return only after the * complete reset has completed. */ struct brcmfmac_sdio_pd { int txglomsz; unsigned int drive_strength; bool oob_irq_supported; unsigned int oob_irq_nr; unsigned long oob_irq_flags; bool broken_sg_support; unsigned short sd_head_align; unsigned short sd_sgentry_align; void (*reset)(void); }; /** * struct brcmfmac_pd_cc_entry - Struct for translating user space country code * (iso3166) to firmware country code and * revision. * * @iso3166: iso3166 alpha 2 country code string. * @cc: firmware country code string. * @rev: firmware country code revision. */ struct brcmfmac_pd_cc_entry { char iso3166[BRCMFMAC_COUNTRY_BUF_SZ]; char cc[BRCMFMAC_COUNTRY_BUF_SZ]; s32 rev; }; /** * struct brcmfmac_pd_cc - Struct for translating country codes as set by user * space to a country code and rev which can be used by * firmware. * * @table_size: number of entries in table (> 0) * @table: array of 1 or more elements with translation information. */ struct brcmfmac_pd_cc { int table_size; struct brcmfmac_pd_cc_entry table[0]; }; /** * struct brcmfmac_pd_device - Device specific platform data. (id/rev/bus_type) * is the unique identifier of the device. * * @id: ID of the device for which this data is. In case of SDIO * or PCIE this is the chipid as identified by chip.c In * case of USB this is the chipid as identified by the * device query. * @rev: chip revision, see id. * @bus_type: The type of bus. Some chipid/rev exist for different bus * types. Each bus type has its own set of settings. * @feature_disable: Bitmask of features to disable (override), See feature.c * in brcmfmac for details. * @country_codes: If available, pointer to struct for translating country * codes. * @bus: Bus specific (union) device settings. Currently only * SDIO. */ struct brcmfmac_pd_device { unsigned int id; unsigned int rev; enum brcmf_bus_type bus_type; unsigned int feature_disable; struct brcmfmac_pd_cc *country_codes; union { struct brcmfmac_sdio_pd sdio; } bus; }; /** * struct brcmfmac_platform_data - BRCMFMAC specific platform data. * * @power_on: This function is called by the brcmfmac driver when the module * gets loaded. This can be particularly useful for low power * devices. The platform spcific routine may for example decide to * power up the complete device. If there is no use-case for this * function then provide NULL. * @power_off: This function is called by the brcmfmac when the module gets * unloaded. At this point the devices can be powered down or * otherwise be reset. So if an actual power_off is not supported * but reset is supported by the devices then reset the devices * when this function gets called. This can be particularly useful * for low power devices. If there is no use-case for this * function then provide NULL. */ struct brcmfmac_platform_data { void (*power_on)(void); void (*power_off)(void); char *fw_alternative_path; int device_count; struct brcmfmac_pd_device devices[0]; }; #endif /* _LINUX_BRCMFMAC_PLATFORM_H */ platform_data/mmc-pxamci.h 0000644 00000001451 14722070374 0011574 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef ASMARM_ARCH_MMC_H #define ASMARM_ARCH_MMC_H #include <linux/mmc/host.h> #include <linux/interrupt.h> struct device; struct mmc_host; struct pxamci_platform_data { unsigned int ocr_mask; /* available voltages */ unsigned long detect_delay_ms; /* delay in millisecond before detecting cards after interrupt */ int (*init)(struct device *, irq_handler_t , void *); int (*get_ro)(struct device *); int (*setpower)(struct device *, unsigned int); void (*exit)(struct device *, void *); bool gpio_card_ro_invert; /* gpio ro is inverted */ }; extern void pxa_set_mci_info(struct pxamci_platform_data *info); extern void pxa3xx_set_mci2_info(struct pxamci_platform_data *info); extern void pxa3xx_set_mci3_info(struct pxamci_platform_data *info); #endif platform_data/regulator-haptic.h 0000644 00000001263 14722070374 0013014 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Regulator Haptic Platform Data * * Copyright (c) 2014 Samsung Electronics Co., Ltd. * Author: Jaewon Kim <jaewon02.kim@samsung.com> * Author: Hyunhee Kim <hyunhee.kim@samsung.com> */ #ifndef _REGULATOR_HAPTIC_H #define _REGULATOR_HAPTIC_H /* * struct regulator_haptic_data - Platform device data * * @max_volt: maximum voltage value supplied to the haptic motor. * <The unit of the voltage is a micro> * @min_volt: minimum voltage value supplied to the haptic motor. * <The unit of the voltage is a micro> */ struct regulator_haptic_data { unsigned int max_volt; unsigned int min_volt; }; #endif /* _REGULATOR_HAPTIC_H */ platform_data/crypto-ux500.h 0000644 00000001016 14722070374 0011735 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) ST-Ericsson SA 2011 * * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson */ #ifndef _CRYPTO_UX500_H #define _CRYPTO_UX500_H #include <linux/dmaengine.h> #include <linux/platform_data/dma-ste-dma40.h> struct hash_platform_data { void *mem_to_engine; bool (*dma_filter)(struct dma_chan *chan, void *filter_param); }; struct cryp_platform_data { struct stedma40_chan_cfg mem_to_engine; struct stedma40_chan_cfg engine_to_mem; }; #endif platform_data/ad7887.h 0000644 00000001364 14722070374 0010466 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * AD7887 SPI ADC driver * * Copyright 2010 Analog Devices Inc. */ #ifndef IIO_ADC_AD7887_H_ #define IIO_ADC_AD7887_H_ /** * struct ad7887_platform_data - AD7887 ADC driver platform data * @en_dual: Whether to use dual channel mode. If set to true AIN1 becomes the * second input channel, and Vref is internally connected to Vdd. If set to * false the device is used in single channel mode and AIN1/Vref is used as * VREF input. * @use_onchip_ref: Whether to use the onchip reference. If set to true the * internal 2.5V reference is used. If set to false a external reference is * used. */ struct ad7887_platform_data { bool en_dual; bool use_onchip_ref; }; #endif /* IIO_ADC_AD7887_H_ */ platform_data/i2c-mux-reg.h 0000644 00000002323 14722070374 0011577 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * I2C multiplexer using a single register * * Copyright 2015 Freescale Semiconductor * York Sun <yorksun@freescale.com> */ #ifndef __LINUX_PLATFORM_DATA_I2C_MUX_REG_H #define __LINUX_PLATFORM_DATA_I2C_MUX_REG_H /** * struct i2c_mux_reg_platform_data - Platform-dependent data for i2c-mux-reg * @parent: Parent I2C bus adapter number * @base_nr: Base I2C bus number to number adapters from or zero for dynamic * @values: Array of value for each channel * @n_values: Number of multiplexer channels * @little_endian: Indicating if the register is in little endian * @write_only: Reading the register is not allowed by hardware * @classes: Optional I2C auto-detection classes * @idle: Value to write to mux when idle * @idle_in_use: indicate if idle value is in use * @reg: Virtual address of the register to switch channel * @reg_size: register size in bytes */ struct i2c_mux_reg_platform_data { int parent; int base_nr; const unsigned int *values; int n_values; bool little_endian; bool write_only; const unsigned int *classes; u32 idle; bool idle_in_use; void __iomem *reg; resource_size_t reg_size; }; #endif /* __LINUX_PLATFORM_DATA_I2C_MUX_REG_H */ platform_data/zforce_ts.h 0000644 00000000471 14722070374 0011540 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* drivers/input/touchscreen/zforce.c * * Copyright (C) 2012-2013 MundoReader S.L. */ #ifndef _LINUX_INPUT_ZFORCE_TS_H #define _LINUX_INPUT_ZFORCE_TS_H struct zforce_ts_platdata { unsigned int x_max; unsigned int y_max; }; #endif /* _LINUX_INPUT_ZFORCE_TS_H */ platform_data/spi-clps711x.h 0000644 00000000645 14722070374 0011720 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * CLPS711X SPI bus driver definitions * * Copyright (C) 2012 Alexander Shiyan <shc_work@mail.ru> */ #ifndef ____LINUX_PLATFORM_DATA_SPI_CLPS711X_H #define ____LINUX_PLATFORM_DATA_SPI_CLPS711X_H /* Board specific platform_data */ struct spi_clps711x_pdata { int *chipselect; /* Array of GPIO-numbers */ int num_chipselect; /* Total count of GPIOs */ }; #endif platform_data/max6697.h 0000644 00000001620 14722070374 0010660 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * max6697.h * Copyright (c) 2012 Guenter Roeck <linux@roeck-us.net> */ #ifndef MAX6697_H #define MAX6697_H #include <linux/types.h> /* * For all bit masks: * bit 0: local temperature * bit 1..7: remote temperatures */ struct max6697_platform_data { bool smbus_timeout_disable; /* set to disable SMBus timeouts */ bool extended_range_enable; /* set to enable extended temp range */ bool beta_compensation; /* set to enable beta compensation */ u8 alert_mask; /* set bit to 1 to disable alert */ u8 over_temperature_mask; /* set bit to 1 to disable */ u8 resistance_cancellation; /* set bit to 0 to disable * bit mask for MAX6581, * boolean for other chips */ u8 ideality_mask; /* set bit to 0 to disable */ u8 ideality_value; /* transistor ideality as per * MAX6581 datasheet */ }; #endif /* MAX6697_H */ platform_data/iommu-omap.h 0000644 00000001152 14722070374 0011617 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * omap iommu: main structures * * Copyright (C) 2008-2009 Nokia Corporation * * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com> */ #include <linux/platform_device.h> struct iommu_platform_data { const char *reset_name; int (*assert_reset)(struct platform_device *pdev, const char *name); int (*deassert_reset)(struct platform_device *pdev, const char *name); int (*device_enable)(struct platform_device *pdev); int (*device_idle)(struct platform_device *pdev); int (*set_pwrdm_constraint)(struct platform_device *pdev, bool request, u8 *pwrst); }; platform_data/mtd-nand-pxa3xx.h 0000644 00000001454 14722070374 0012477 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_ARCH_PXA3XX_NAND_H #define __ASM_ARCH_PXA3XX_NAND_H #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> /* * Current pxa3xx_nand controller has two chip select which both be workable but * historically all platforms remaining on platform data used only one. Switch * to device tree if you need more. */ struct pxa3xx_nand_platform_data { /* Keep OBM/bootloader NFC timing configuration */ bool keep_config; /* Use a flash-based bad block table */ bool flash_bbt; /* Requested ECC strength and ECC step size */ int ecc_strength, ecc_step_size; /* Partitions */ const struct mtd_partition *parts; unsigned int nr_parts; }; extern void pxa3xx_set_nand_info(struct pxa3xx_nand_platform_data *info); #endif /* __ASM_ARCH_PXA3XX_NAND_H */ platform_data/usb-ohci-pxa27x.h 0000644 00000001635 14722070374 0012405 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef ASMARM_ARCH_OHCI_H #define ASMARM_ARCH_OHCI_H struct device; struct pxaohci_platform_data { int (*init)(struct device *); void (*exit)(struct device *); unsigned long flags; #define ENABLE_PORT1 (1 << 0) #define ENABLE_PORT2 (1 << 1) #define ENABLE_PORT3 (1 << 2) #define ENABLE_PORT_ALL (ENABLE_PORT1 | ENABLE_PORT2 | ENABLE_PORT3) #define POWER_SENSE_LOW (1 << 3) #define POWER_CONTROL_LOW (1 << 4) #define NO_OC_PROTECTION (1 << 5) #define OC_MODE_GLOBAL (0 << 6) #define OC_MODE_PERPORT (1 << 6) int power_on_delay; /* Power On to Power Good time - in ms * HCD must wait for this duration before * accessing a powered on port */ int port_mode; #define PMM_NPS_MODE 1 #define PMM_GLOBAL_MODE 2 #define PMM_PERPORT_MODE 3 int power_budget; }; extern void pxa_set_ohci_info(struct pxaohci_platform_data *info); #endif platform_data/leds-lm3642.h 0000644 00000001462 14722070374 0011417 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2012 Texas Instruments * * Simple driver for Texas Instruments LM3642 LED driver chip * * Author: G.Shark Jeong <gshark.jeong@gmail.com> * Daniel Jeong <daniel.jeong@ti.com> */ #ifndef __LINUX_LM3642_H #define __LINUX_LM3642_H #define LM3642_NAME "leds-lm3642" enum lm3642_torch_pin_enable { LM3642_TORCH_PIN_DISABLE = 0x00, LM3642_TORCH_PIN_ENABLE = 0x10, }; enum lm3642_strobe_pin_enable { LM3642_STROBE_PIN_DISABLE = 0x00, LM3642_STROBE_PIN_ENABLE = 0x20, }; enum lm3642_tx_pin_enable { LM3642_TX_PIN_DISABLE = 0x00, LM3642_TX_PIN_ENABLE = 0x40, }; struct lm3642_platform_data { enum lm3642_torch_pin_enable torch_pin; enum lm3642_strobe_pin_enable strobe_pin; enum lm3642_tx_pin_enable tx_pin; }; #endif /* __LINUX_LM3642_H */ platform_data/mmc-sdhci-s3c.h 0000644 00000004346 14722070374 0012101 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __PLATFORM_DATA_SDHCI_S3C_H #define __PLATFORM_DATA_SDHCI_S3C_H struct platform_device; enum cd_types { S3C_SDHCI_CD_INTERNAL, /* use mmc internal CD line */ S3C_SDHCI_CD_EXTERNAL, /* use external callback */ S3C_SDHCI_CD_GPIO, /* use external gpio pin for CD line */ S3C_SDHCI_CD_NONE, /* no CD line, use polling to detect card */ S3C_SDHCI_CD_PERMANENT, /* no CD line, card permanently wired to host */ }; /** * struct s3c_sdhci_platdata() - Platform device data for Samsung SDHCI * @max_width: The maximum number of data bits supported. * @host_caps: Standard MMC host capabilities bit field. * @host_caps2: The second standard MMC host capabilities bit field. * @cd_type: Type of Card Detection method (see cd_types enum above) * @ext_cd_init: Initialize external card detect subsystem. Called on * sdhci-s3c driver probe when cd_type == S3C_SDHCI_CD_EXTERNAL. * notify_func argument is a callback to the sdhci-s3c driver * that triggers the card detection event. Callback arguments: * dev is pointer to platform device of the host controller, * state is new state of the card (0 - removed, 1 - inserted). * @ext_cd_cleanup: Cleanup external card detect subsystem. Called on * sdhci-s3c driver remove when cd_type == S3C_SDHCI_CD_EXTERNAL. * notify_func argument is the same callback as for ext_cd_init. * @ext_cd_gpio: gpio pin used for external CD line, valid only if * cd_type == S3C_SDHCI_CD_GPIO * @ext_cd_gpio_invert: invert values for external CD gpio line * @cfg_gpio: Configure the GPIO for a specific card bit-width * * Initialisation data specific to either the machine or the platform * for the device driver to use or call-back when configuring gpio or * card speed information. */ struct s3c_sdhci_platdata { unsigned int max_width; unsigned int host_caps; unsigned int host_caps2; unsigned int pm_caps; enum cd_types cd_type; int ext_cd_gpio; bool ext_cd_gpio_invert; int (*ext_cd_init)(void (*notify_func)(struct platform_device *, int state)); int (*ext_cd_cleanup)(void (*notify_func)(struct platform_device *, int state)); void (*cfg_gpio)(struct platform_device *dev, int width); }; #endif /* __PLATFORM_DATA_SDHCI_S3C_H */ platform_data/lp8727.h 0000644 00000002705 14722070374 0010507 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * LP8727 Micro/Mini USB IC with integrated charger * * Copyright (C) 2011 Texas Instruments * Copyright (C) 2011 National Semiconductor */ #ifndef _LP8727_H #define _LP8727_H enum lp8727_eoc_level { LP8727_EOC_5P, LP8727_EOC_10P, LP8727_EOC_16P, LP8727_EOC_20P, LP8727_EOC_25P, LP8727_EOC_33P, LP8727_EOC_50P, }; enum lp8727_ichg { LP8727_ICHG_90mA, LP8727_ICHG_100mA, LP8727_ICHG_400mA, LP8727_ICHG_450mA, LP8727_ICHG_500mA, LP8727_ICHG_600mA, LP8727_ICHG_700mA, LP8727_ICHG_800mA, LP8727_ICHG_900mA, LP8727_ICHG_1000mA, }; /** * struct lp8727_chg_param * @eoc_level : end of charge level setting * @ichg : charging current */ struct lp8727_chg_param { enum lp8727_eoc_level eoc_level; enum lp8727_ichg ichg; }; /** * struct lp8727_platform_data * @get_batt_present : check battery status - exists or not * @get_batt_level : get battery voltage (mV) * @get_batt_capacity : get battery capacity (%) * @get_batt_temp : get battery temperature * @ac : charging parameters for AC type charger * @usb : charging parameters for USB type charger * @debounce_msec : interrupt debounce time */ struct lp8727_platform_data { u8 (*get_batt_present)(void); u16 (*get_batt_level)(void); u8 (*get_batt_capacity)(void); u8 (*get_batt_temp)(void); struct lp8727_chg_param *ac; struct lp8727_chg_param *usb; unsigned int debounce_msec; }; #endif platform_data/ata-pxa.h 0000644 00000000610 14722070374 0011070 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Generic PXA PATA driver * * Copyright (C) 2010 Marek Vasut <marek.vasut@gmail.com> */ #ifndef __MACH_PATA_PXA_H__ #define __MACH_PATA_PXA_H__ struct pata_pxa_pdata { /* PXA DMA DREQ<0:2> pin */ uint32_t dma_dreq; /* Register shift */ uint32_t reg_shift; /* IRQ flags */ uint32_t irq_flags; }; #endif /* __MACH_PATA_PXA_H__ */ platform_data/tsl2563.h 0000644 00000000264 14722070374 0010664 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_TSL2563_H #define __LINUX_TSL2563_H struct tsl2563_platform_data { int cover_comp_gain; }; #endif /* __LINUX_TSL2563_H */ platform_data/pwm_omap_dmtimer.h 0000644 00000006513 14722070374 0013105 0 ustar 00 /* * include/linux/platform_data/pwm_omap_dmtimer.h * * OMAP Dual-Mode Timer PWM platform data * * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/ * Tarun Kanti DebBarma <tarun.kanti@ti.com> * Thara Gopinath <thara@ti.com> * * Platform device conversion and hwmod support. * * Copyright (C) 2005 Nokia Corporation * Author: Lauri Leukkunen <lauri.leukkunen@nokia.com> * PWM and clock framework support by Timo Teras. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef __PWM_OMAP_DMTIMER_PDATA_H #define __PWM_OMAP_DMTIMER_PDATA_H /* clock sources */ #define PWM_OMAP_DMTIMER_SRC_SYS_CLK 0x00 #define PWM_OMAP_DMTIMER_SRC_32_KHZ 0x01 #define PWM_OMAP_DMTIMER_SRC_EXT_CLK 0x02 /* timer interrupt enable bits */ #define PWM_OMAP_DMTIMER_INT_CAPTURE (1 << 2) #define PWM_OMAP_DMTIMER_INT_OVERFLOW (1 << 1) #define PWM_OMAP_DMTIMER_INT_MATCH (1 << 0) /* trigger types */ #define PWM_OMAP_DMTIMER_TRIGGER_NONE 0x00 #define PWM_OMAP_DMTIMER_TRIGGER_OVERFLOW 0x01 #define PWM_OMAP_DMTIMER_TRIGGER_OVERFLOW_AND_COMPARE 0x02 struct omap_dm_timer; typedef struct omap_dm_timer pwm_omap_dmtimer; struct pwm_omap_dmtimer_pdata { pwm_omap_dmtimer *(*request_by_node)(struct device_node *np); pwm_omap_dmtimer *(*request_specific)(int timer_id); pwm_omap_dmtimer *(*request)(void); int (*free)(pwm_omap_dmtimer *timer); void (*enable)(pwm_omap_dmtimer *timer); void (*disable)(pwm_omap_dmtimer *timer); int (*get_irq)(pwm_omap_dmtimer *timer); int (*set_int_enable)(pwm_omap_dmtimer *timer, unsigned int value); int (*set_int_disable)(pwm_omap_dmtimer *timer, u32 mask); struct clk *(*get_fclk)(pwm_omap_dmtimer *timer); int (*start)(pwm_omap_dmtimer *timer); int (*stop)(pwm_omap_dmtimer *timer); int (*set_source)(pwm_omap_dmtimer *timer, int source); int (*set_load)(pwm_omap_dmtimer *timer, int autoreload, unsigned int value); int (*set_match)(pwm_omap_dmtimer *timer, int enable, unsigned int match); int (*set_pwm)(pwm_omap_dmtimer *timer, int def_on, int toggle, int trigger); int (*set_prescaler)(pwm_omap_dmtimer *timer, int prescaler); unsigned int (*read_counter)(pwm_omap_dmtimer *timer); int (*write_counter)(pwm_omap_dmtimer *timer, unsigned int value); unsigned int (*read_status)(pwm_omap_dmtimer *timer); int (*write_status)(pwm_omap_dmtimer *timer, unsigned int value); }; #endif /* __PWM_OMAP_DMTIMER_PDATA_H */ platform_data/pcf857x.h 0000644 00000003441 14722070374 0010746 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_PCF857X_H #define __LINUX_PCF857X_H /** * struct pcf857x_platform_data - data to set up pcf857x driver * @gpio_base: number of the chip's first GPIO * @n_latch: optional bit-inverse of initial register value; if * you leave this initialized to zero the driver will act * like the chip was just reset * @setup: optional callback issued once the GPIOs are valid * @teardown: optional callback issued before the GPIOs are invalidated * @context: optional parameter passed to setup() and teardown() * * In addition to the I2C_BOARD_INFO() state appropriate to each chip, * the i2c_board_info used with the pcf875x driver must provide its * platform_data (pointer to one of these structures) with at least * the gpio_base value initialized. * * The @setup callback may be used with the kind of board-specific glue * which hands the (now-valid) GPIOs to other drivers, or which puts * devices in their initial states using these GPIOs. * * These GPIO chips are only "quasi-bidirectional"; read the chip specs * to understand the behavior. They don't have separate registers to * record which pins are used for input or output, record which output * values are driven, or provide access to input values. That must be * inferred by reading the chip's value and knowing the last value written * to it. If you leave n_latch initialized to zero, that last written * value is presumed to be all ones (as if the chip were just reset). */ struct pcf857x_platform_data { unsigned gpio_base; unsigned n_latch; int (*setup)(struct i2c_client *client, int gpio, unsigned ngpio, void *context); int (*teardown)(struct i2c_client *client, int gpio, unsigned ngpio, void *context); void *context; }; #endif /* __LINUX_PCF857X_H */ platform_data/lm8323.h 0000644 00000001352 14722070374 0010471 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * lm8323.h - Configuration for LM8323 keypad driver. */ #ifndef __LINUX_LM8323_H #define __LINUX_LM8323_H #include <linux/types.h> /* * Largest keycode that the chip can send, plus one, * so keys can be mapped directly at the index of the * LM8323 keycode instead of subtracting one. */ #define LM8323_KEYMAP_SIZE (0x7f + 1) #define LM8323_NUM_PWMS 3 struct lm8323_platform_data { int debounce_time; /* Time to watch for key bouncing, in ms. */ int active_time; /* Idle time until sleep, in ms. */ int size_x; int size_y; bool repeat; const unsigned short *keymap; const char *pwm_names[LM8323_NUM_PWMS]; const char *name; /* Device name. */ }; #endif /* __LINUX_LM8323_H */ platform_data/isl9305.h 0000644 00000000747 14722070374 0010660 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * isl9305 - Intersil ISL9305 DCDC regulator * * Copyright 2014 Linaro Ltd * * Author: Mark Brown <broonie@kernel.org> */ #ifndef __ISL9305_H #define __ISL9305_H #define ISL9305_DCD1 0 #define ISL9305_DCD2 1 #define ISL9305_LDO1 2 #define ISL9305_LDO2 3 #define ISL9305_MAX_REGULATOR ISL9305_LDO2 struct regulator_init_data; struct isl9305_pdata { struct regulator_init_data *init_data[ISL9305_MAX_REGULATOR + 1]; }; #endif platform_data/ntc_thermistor.h 0000644 00000002414 14722070374 0012605 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * ntc_thermistor.h - NTC Thermistors * * Copyright (C) 2010 Samsung Electronics * MyungJoo Ham <myungjoo.ham@samsung.com> */ #ifndef _LINUX_NTC_H #define _LINUX_NTC_H struct iio_channel; enum ntc_thermistor_type { TYPE_B57330V2103, TYPE_B57891S0103, TYPE_NCPXXWB473, TYPE_NCPXXWF104, TYPE_NCPXXWL333, TYPE_NCPXXXH103, }; struct ntc_thermistor_platform_data { /* * One (not both) of read_uV and read_ohm should be provided and only * one of the two should be provided. * Both functions should return negative value for an error case. * * pullup_uV, pullup_ohm, pulldown_ohm, and connect are required to use * read_uV() * * How to setup pullup_ohm, pulldown_ohm, and connect is * described at Documentation/hwmon/ntc_thermistor.rst * * pullup/down_ohm: 0 for infinite / not-connected * * chan: iio_channel pointer to communicate with the ADC which the * thermistor is using for conversion of the analog values. */ int (*read_uv)(struct ntc_thermistor_platform_data *); unsigned int pullup_uv; unsigned int pullup_ohm; unsigned int pulldown_ohm; enum { NTC_CONNECTED_POSITIVE, NTC_CONNECTED_GROUND } connect; struct iio_channel *chan; int (*read_ohm)(void); }; #endif /* _LINUX_NTC_H */ platform_data/i2c-mux-gpio.h 0000644 00000001673 14722070374 0011767 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * i2c-mux-gpio interface to platform code * * Peter Korsgaard <peter.korsgaard@barco.com> */ #ifndef _LINUX_I2C_MUX_GPIO_H #define _LINUX_I2C_MUX_GPIO_H /* MUX has no specific idle mode */ #define I2C_MUX_GPIO_NO_IDLE ((unsigned)-1) /** * struct i2c_mux_gpio_platform_data - Platform-dependent data for i2c-mux-gpio * @parent: Parent I2C bus adapter number * @base_nr: Base I2C bus number to number adapters from or zero for dynamic * @values: Array of bitmasks of GPIO settings (low/high) for each * position * @n_values: Number of multiplexer positions (busses to instantiate) * @classes: Optional I2C auto-detection classes * @idle: Bitmask to write to MUX when idle or GPIO_I2CMUX_NO_IDLE if not used */ struct i2c_mux_gpio_platform_data { int parent; int base_nr; const unsigned *values; int n_values; const unsigned *classes; unsigned idle; }; #endif /* _LINUX_I2C_MUX_GPIO_H */ platform_data/elm.h 0000644 00000002451 14722070374 0010317 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * BCH Error Location Module * * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ */ #ifndef __ELM_H #define __ELM_H enum bch_ecc { BCH4_ECC = 0, BCH8_ECC, BCH16_ECC, }; /* ELM support 8 error syndrome process */ #define ERROR_VECTOR_MAX 8 /** * struct elm_errorvec - error vector for elm * @error_reported: set true for vectors error is reported * @error_uncorrectable: number of uncorrectable errors * @error_count: number of correctable errors in the sector * @error_loc: buffer for error location * */ struct elm_errorvec { bool error_reported; bool error_uncorrectable; int error_count; int error_loc[16]; }; #if IS_ENABLED(CONFIG_MTD_NAND_OMAP_BCH) void elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc, struct elm_errorvec *err_vec); int elm_config(struct device *dev, enum bch_ecc bch_type, int ecc_steps, int ecc_step_size, int ecc_syndrome_size); #else static inline void elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc, struct elm_errorvec *err_vec) { } static inline int elm_config(struct device *dev, enum bch_ecc bch_type, int ecc_steps, int ecc_step_size, int ecc_syndrome_size) { return -ENOSYS; } #endif /* CONFIG_MTD_NAND_OMAP_BCH */ #endif /* __ELM_H */ platform_data/efm32-spi.h 0000644 00000000567 14722070374 0011255 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_PLATFORM_DATA_EFM32_SPI_H__ #define __LINUX_PLATFORM_DATA_EFM32_SPI_H__ #include <linux/types.h> /** * struct efm32_spi_pdata * @location: pinmux location for the I/O pins (to be written to the ROUTE * register) */ struct efm32_spi_pdata { u8 location; }; #endif /* ifndef __LINUX_PLATFORM_DATA_EFM32_SPI_H__ */ platform_data/usb-davinci.h 0000644 00000001124 14722070374 0011742 0 ustar 00 /* * USB related definitions * * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com> * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #ifndef __ASM_ARCH_USB_H #define __ASM_ARCH_USB_H /* Passed as the platform data to the OHCI driver */ struct da8xx_ohci_root_hub { /* Time from power on to power good (in 2 ms units) */ u8 potpgt; }; void davinci_setup_usb(unsigned mA, unsigned potpgt_ms); #endif /* ifndef __ASM_ARCH_USB_H */ platform_data/asoc-mx27vis.h 0000644 00000000435 14722070374 0012004 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __PLATFORM_DATA_ASOC_MX27VIS_H #define __PLATFORM_DATA_ASOC_MX27VIS_H struct snd_mx27vis_platform_data { int amp_gain0_gpio; int amp_gain1_gpio; int amp_mutel_gpio; int amp_muter_gpio; }; #endif /* __PLATFORM_DATA_ASOC_MX27VIS_H */ platform_data/xtalk-bridge.h 0000644 00000000665 14722070374 0012124 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * SGI PCI Xtalk Bridge */ #ifndef PLATFORM_DATA_XTALK_BRIDGE_H #define PLATFORM_DATA_XTALK_BRIDGE_H #include <asm/sn/types.h> struct xtalk_bridge_platform_data { struct resource mem; struct resource io; unsigned long bridge_addr; unsigned long intr_addr; unsigned long mem_offset; unsigned long io_offset; nasid_t nasid; int masterwid; }; #endif /* PLATFORM_DATA_XTALK_BRIDGE_H */ platform_data/dmtimer-omap.h 0000644 00000003620 14722070374 0012134 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * DMTIMER platform data for TI OMAP platforms * * Copyright (C) 2012 Texas Instruments * Author: Jon Hunter <jon-hunter@ti.com> */ #ifndef __PLATFORM_DATA_DMTIMER_OMAP_H__ #define __PLATFORM_DATA_DMTIMER_OMAP_H__ struct omap_dm_timer_ops { struct omap_dm_timer *(*request_by_node)(struct device_node *np); struct omap_dm_timer *(*request_specific)(int timer_id); struct omap_dm_timer *(*request)(void); int (*free)(struct omap_dm_timer *timer); void (*enable)(struct omap_dm_timer *timer); void (*disable)(struct omap_dm_timer *timer); int (*get_irq)(struct omap_dm_timer *timer); int (*set_int_enable)(struct omap_dm_timer *timer, unsigned int value); int (*set_int_disable)(struct omap_dm_timer *timer, u32 mask); struct clk *(*get_fclk)(struct omap_dm_timer *timer); int (*start)(struct omap_dm_timer *timer); int (*stop)(struct omap_dm_timer *timer); int (*set_source)(struct omap_dm_timer *timer, int source); int (*set_load)(struct omap_dm_timer *timer, int autoreload, unsigned int value); int (*set_match)(struct omap_dm_timer *timer, int enable, unsigned int match); int (*set_pwm)(struct omap_dm_timer *timer, int def_on, int toggle, int trigger); int (*set_prescaler)(struct omap_dm_timer *timer, int prescaler); unsigned int (*read_counter)(struct omap_dm_timer *timer); int (*write_counter)(struct omap_dm_timer *timer, unsigned int value); unsigned int (*read_status)(struct omap_dm_timer *timer); int (*write_status)(struct omap_dm_timer *timer, unsigned int value); }; struct dmtimer_platform_data { /* set_timer_src - Only used for OMAP1 devices */ int (*set_timer_src)(struct platform_device *pdev, int source); u32 timer_capability; u32 timer_errata; int (*get_context_loss_count)(struct device *); const struct omap_dm_timer_ops *timer_ops; }; #endif /* __PLATFORM_DATA_DMTIMER_OMAP_H__ */ i2c-algo-pca.h 0000644 00000005614 14722070374 0007067 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_I2C_ALGO_PCA_H #define _LINUX_I2C_ALGO_PCA_H /* Chips known to the pca algo */ #define I2C_PCA_CHIP_9564 0x00 #define I2C_PCA_CHIP_9665 0x01 /* Internal period for PCA9665 oscilator */ #define I2C_PCA_OSC_PER 3 /* e10-8s */ /* Clock speeds for the bus for PCA9564*/ #define I2C_PCA_CON_330kHz 0x00 #define I2C_PCA_CON_288kHz 0x01 #define I2C_PCA_CON_217kHz 0x02 #define I2C_PCA_CON_146kHz 0x03 #define I2C_PCA_CON_88kHz 0x04 #define I2C_PCA_CON_59kHz 0x05 #define I2C_PCA_CON_44kHz 0x06 #define I2C_PCA_CON_36kHz 0x07 /* PCA9564 registers */ #define I2C_PCA_STA 0x00 /* STATUS Read Only */ #define I2C_PCA_TO 0x00 /* TIMEOUT Write Only */ #define I2C_PCA_DAT 0x01 /* DATA Read/Write */ #define I2C_PCA_ADR 0x02 /* OWN ADR Read/Write */ #define I2C_PCA_CON 0x03 /* CONTROL Read/Write */ /* PCA9665 registers */ #define I2C_PCA_INDPTR 0x00 /* INDIRECT Pointer Write Only */ #define I2C_PCA_IND 0x02 /* INDIRECT Read/Write */ /* PCA9665 indirect registers */ #define I2C_PCA_ICOUNT 0x00 /* Byte Count for buffered mode */ #define I2C_PCA_IADR 0x01 /* OWN ADR */ #define I2C_PCA_ISCLL 0x02 /* SCL LOW period */ #define I2C_PCA_ISCLH 0x03 /* SCL HIGH period */ #define I2C_PCA_ITO 0x04 /* TIMEOUT */ #define I2C_PCA_IPRESET 0x05 /* Parallel bus reset */ #define I2C_PCA_IMODE 0x06 /* I2C Bus mode */ /* PCA9665 I2C bus mode */ #define I2C_PCA_MODE_STD 0x00 /* Standard mode */ #define I2C_PCA_MODE_FAST 0x01 /* Fast mode */ #define I2C_PCA_MODE_FASTP 0x02 /* Fast Plus mode */ #define I2C_PCA_MODE_TURBO 0x03 /* Turbo mode */ #define I2C_PCA_CON_AA 0x80 /* Assert Acknowledge */ #define I2C_PCA_CON_ENSIO 0x40 /* Enable */ #define I2C_PCA_CON_STA 0x20 /* Start */ #define I2C_PCA_CON_STO 0x10 /* Stop */ #define I2C_PCA_CON_SI 0x08 /* Serial Interrupt */ #define I2C_PCA_CON_CR 0x07 /* Clock Rate (MASK) */ /** * struct pca_i2c_bus_settings - The configured PCA i2c bus settings * @mode: Configured i2c bus mode * @tlow: Configured SCL LOW period * @thi: Configured SCL HIGH period * @clock_freq: The configured clock frequency */ struct pca_i2c_bus_settings { int mode; int tlow; int thi; int clock_freq; }; struct i2c_algo_pca_data { void *data; /* private low level data */ void (*write_byte) (void *data, int reg, int val); int (*read_byte) (void *data, int reg); int (*wait_for_completion) (void *data); void (*reset_chip) (void *data); /* For PCA9564, use one of the predefined frequencies: * 330000, 288000, 217000, 146000, 88000, 59000, 44000, 36000 * For PCA9665, use the frequency you want here. */ unsigned int i2c_clock; unsigned int chip; struct pca_i2c_bus_settings bus_settings; }; int i2c_pca_add_bus(struct i2c_adapter *); int i2c_pca_add_numbered_bus(struct i2c_adapter *); #endif /* _LINUX_I2C_ALGO_PCA_H */ blktrace_api.h 0000644 00000007574 14722070374 0007360 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef BLKTRACE_H #define BLKTRACE_H #include <linux/blkdev.h> #include <linux/relay.h> #include <linux/compat.h> #include <uapi/linux/blktrace_api.h> #include <linux/list.h> #if defined(CONFIG_BLK_DEV_IO_TRACE) #include <linux/sysfs.h> struct blk_trace { int trace_state; struct rchan *rchan; unsigned long __percpu *sequence; unsigned char __percpu *msg_data; u16 act_mask; u64 start_lba; u64 end_lba; u32 pid; u32 dev; struct dentry *dir; struct dentry *dropped_file; struct dentry *msg_file; struct list_head running_list; atomic_t dropped; }; struct blkcg; extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); extern void blk_trace_shutdown(struct request_queue *); extern __printf(3, 4) void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *fmt, ...); /** * blk_add_trace_msg - Add a (simple) message to the blktrace stream * @q: queue the io is for * @fmt: format to print message in * args... Variable argument list for format * * Description: * Records a (simple) message onto the blktrace stream. * * NOTE: BLK_TN_MAX_MSG characters are output at most. * NOTE: Can not use 'static inline' due to presence of var args... * **/ #define blk_add_cgroup_trace_msg(q, cg, fmt, ...) \ do { \ struct blk_trace *bt; \ \ rcu_read_lock(); \ bt = rcu_dereference((q)->blk_trace); \ if (unlikely(bt)) \ __trace_note_message(bt, cg, fmt, ##__VA_ARGS__);\ rcu_read_unlock(); \ } while (0) #define blk_add_trace_msg(q, fmt, ...) \ blk_add_cgroup_trace_msg(q, NULL, fmt, ##__VA_ARGS__) #define BLK_TN_MAX_MSG 128 static inline bool blk_trace_note_message_enabled(struct request_queue *q) { struct blk_trace *bt; bool ret; rcu_read_lock(); bt = rcu_dereference(q->blk_trace); ret = bt && (bt->act_mask & BLK_TC_NOTIFY); rcu_read_unlock(); return ret; } extern void blk_add_driver_data(struct request_queue *q, struct request *rq, void *data, size_t len); extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, struct block_device *bdev, char __user *arg); extern int blk_trace_startstop(struct request_queue *q, int start); extern int blk_trace_remove(struct request_queue *q); extern void blk_trace_remove_sysfs(struct device *dev); extern int blk_trace_init_sysfs(struct device *dev); extern struct attribute_group blk_trace_attr_group; #else /* !CONFIG_BLK_DEV_IO_TRACE */ # define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) # define blk_trace_shutdown(q) do { } while (0) # define blk_add_driver_data(q, rq, data, len) do {} while (0) # define blk_trace_setup(q, name, dev, bdev, arg) (-ENOTTY) # define blk_trace_startstop(q, start) (-ENOTTY) # define blk_trace_remove(q) (-ENOTTY) # define blk_add_trace_msg(q, fmt, ...) do { } while (0) # define blk_add_cgroup_trace_msg(q, cg, fmt, ...) do { } while (0) # define blk_trace_remove_sysfs(dev) do { } while (0) # define blk_trace_note_message_enabled(q) (false) static inline int blk_trace_init_sysfs(struct device *dev) { return 0; } #endif /* CONFIG_BLK_DEV_IO_TRACE */ #ifdef CONFIG_COMPAT struct compat_blk_user_trace_setup { char name[BLKTRACE_BDEV_SIZE]; u16 act_mask; u32 buf_size; u32 buf_nr; compat_u64 start_lba; compat_u64 end_lba; u32 pid; }; #define BLKTRACESETUP32 _IOWR(0x12, 115, struct compat_blk_user_trace_setup) #endif extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes); static inline sector_t blk_rq_trace_sector(struct request *rq) { /* * Tracing should ignore starting sector for passthrough requests and * requests where starting sector didn't get set. */ if (blk_rq_is_passthrough(rq) || blk_rq_pos(rq) == (sector_t)-1) return 0; return blk_rq_pos(rq); } static inline unsigned int blk_rq_trace_nr_sectors(struct request *rq) { return blk_rq_is_passthrough(rq) ? 0 : blk_rq_sectors(rq); } #endif in.h 0000644 00000004451 14722070374 0005335 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Definitions of the Internet Protocol. * * Version: @(#)in.h 1.0.1 04/21/93 * * Authors: Original taken from the GNU Project <netinet/in.h> file. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> */ #ifndef _LINUX_IN_H #define _LINUX_IN_H #include <linux/errno.h> #include <uapi/linux/in.h> static inline int proto_ports_offset(int proto) { switch (proto) { case IPPROTO_TCP: case IPPROTO_UDP: case IPPROTO_DCCP: case IPPROTO_ESP: /* SPI */ case IPPROTO_SCTP: case IPPROTO_UDPLITE: return 0; case IPPROTO_AH: /* SPI */ return 4; default: return -EINVAL; } } static inline bool ipv4_is_loopback(__be32 addr) { return (addr & htonl(0xff000000)) == htonl(0x7f000000); } static inline bool ipv4_is_multicast(__be32 addr) { return (addr & htonl(0xf0000000)) == htonl(0xe0000000); } static inline bool ipv4_is_local_multicast(__be32 addr) { return (addr & htonl(0xffffff00)) == htonl(0xe0000000); } static inline bool ipv4_is_lbcast(__be32 addr) { /* limited broadcast */ return addr == htonl(INADDR_BROADCAST); } static inline bool ipv4_is_all_snoopers(__be32 addr) { return addr == htonl(INADDR_ALLSNOOPERS_GROUP); } static inline bool ipv4_is_zeronet(__be32 addr) { return (addr == 0); } /* Special-Use IPv4 Addresses (RFC3330) */ static inline bool ipv4_is_private_10(__be32 addr) { return (addr & htonl(0xff000000)) == htonl(0x0a000000); } static inline bool ipv4_is_private_172(__be32 addr) { return (addr & htonl(0xfff00000)) == htonl(0xac100000); } static inline bool ipv4_is_private_192(__be32 addr) { return (addr & htonl(0xffff0000)) == htonl(0xc0a80000); } static inline bool ipv4_is_linklocal_169(__be32 addr) { return (addr & htonl(0xffff0000)) == htonl(0xa9fe0000); } static inline bool ipv4_is_anycast_6to4(__be32 addr) { return (addr & htonl(0xffffff00)) == htonl(0xc0586300); } static inline bool ipv4_is_test_192(__be32 addr) { return (addr & htonl(0xffffff00)) == htonl(0xc0000200); } static inline bool ipv4_is_test_198(__be32 addr) { return (addr & htonl(0xfffe0000)) == htonl(0xc6120000); } #endif /* _LINUX_IN_H */ irqchip.h 0000644 00000003050 14722070374 0006360 0 ustar 00 /* * Copyright (C) 2012 Thomas Petazzoni * * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #ifndef _LINUX_IRQCHIP_H #define _LINUX_IRQCHIP_H #include <linux/acpi.h> #include <linux/of.h> /* * This macro must be used by the different irqchip drivers to declare * the association between their DT compatible string and their * initialization function. * * @name: name that must be unique across all IRQCHIP_DECLARE of the * same file. * @compstr: compatible string of the irqchip driver * @fn: initialization function */ #define IRQCHIP_DECLARE(name, compat, fn) OF_DECLARE_2(irqchip, name, compat, fn) /* * This macro must be used by the different irqchip drivers to declare * the association between their version and their initialization function. * * @name: name that must be unique across all IRQCHIP_ACPI_DECLARE of the * same file. * @subtable: Subtable to be identified in MADT * @validate: Function to be called on that subtable to check its validity. * Can be NULL. * @data: data to be checked by the validate function. * @fn: initialization function */ #define IRQCHIP_ACPI_DECLARE(name, subtable, validate, data, fn) \ ACPI_DECLARE_PROBE_ENTRY(irqchip, name, ACPI_SIG_MADT, \ subtable, validate, data, fn) #ifdef CONFIG_IRQCHIP void irqchip_init(void); #else static inline void irqchip_init(void) {} #endif #endif counter.h 0000644 00000041564 14722070374 0006414 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Counter interface * Copyright (C) 2018 William Breathitt Gray */ #ifndef _COUNTER_H_ #define _COUNTER_H_ #include <linux/counter_enum.h> #include <linux/device.h> #include <linux/types.h> enum counter_count_direction { COUNTER_COUNT_DIRECTION_FORWARD = 0, COUNTER_COUNT_DIRECTION_BACKWARD }; extern const char *const counter_count_direction_str[2]; enum counter_count_mode { COUNTER_COUNT_MODE_NORMAL = 0, COUNTER_COUNT_MODE_RANGE_LIMIT, COUNTER_COUNT_MODE_NON_RECYCLE, COUNTER_COUNT_MODE_MODULO_N }; extern const char *const counter_count_mode_str[4]; struct counter_device; struct counter_signal; /** * struct counter_signal_ext - Counter Signal extensions * @name: attribute name * @read: read callback for this attribute; may be NULL * @write: write callback for this attribute; may be NULL * @priv: data private to the driver */ struct counter_signal_ext { const char *name; ssize_t (*read)(struct counter_device *counter, struct counter_signal *signal, void *priv, char *buf); ssize_t (*write)(struct counter_device *counter, struct counter_signal *signal, void *priv, const char *buf, size_t len); void *priv; }; /** * struct counter_signal - Counter Signal node * @id: unique ID used to identify signal * @name: device-specific Signal name; ideally, this should match the name * as it appears in the datasheet documentation * @ext: optional array of Counter Signal extensions * @num_ext: number of Counter Signal extensions specified in @ext * @priv: optional private data supplied by driver */ struct counter_signal { int id; const char *name; const struct counter_signal_ext *ext; size_t num_ext; void *priv; }; /** * struct counter_signal_enum_ext - Signal enum extension attribute * @items: Array of strings * @num_items: Number of items specified in @items * @set: Set callback function; may be NULL * @get: Get callback function; may be NULL * * The counter_signal_enum_ext structure can be used to implement enum style * Signal extension attributes. Enum style attributes are those which have a set * of strings that map to unsigned integer values. The Generic Counter Signal * enum extension helper code takes care of mapping between value and string, as * well as generating a "_available" file which contains a list of all available * items. The get callback is used to query the currently active item; the index * of the item within the respective items array is returned via the 'item' * parameter. The set callback is called when the attribute is updated; the * 'item' parameter contains the index of the newly activated item within the * respective items array. */ struct counter_signal_enum_ext { const char * const *items; size_t num_items; int (*get)(struct counter_device *counter, struct counter_signal *signal, size_t *item); int (*set)(struct counter_device *counter, struct counter_signal *signal, size_t item); }; /** * COUNTER_SIGNAL_ENUM() - Initialize Signal enum extension * @_name: Attribute name * @_e: Pointer to a counter_signal_enum_ext structure * * This should usually be used together with COUNTER_SIGNAL_ENUM_AVAILABLE() */ #define COUNTER_SIGNAL_ENUM(_name, _e) \ { \ .name = (_name), \ .read = counter_signal_enum_read, \ .write = counter_signal_enum_write, \ .priv = (_e) \ } /** * COUNTER_SIGNAL_ENUM_AVAILABLE() - Initialize Signal enum available extension * @_name: Attribute name ("_available" will be appended to the name) * @_e: Pointer to a counter_signal_enum_ext structure * * Creates a read only attribute that lists all the available enum items in a * newline separated list. This should usually be used together with * COUNTER_SIGNAL_ENUM() */ #define COUNTER_SIGNAL_ENUM_AVAILABLE(_name, _e) \ { \ .name = (_name "_available"), \ .read = counter_signal_enum_available_read, \ .priv = (_e) \ } enum counter_synapse_action { COUNTER_SYNAPSE_ACTION_NONE = 0, COUNTER_SYNAPSE_ACTION_RISING_EDGE, COUNTER_SYNAPSE_ACTION_FALLING_EDGE, COUNTER_SYNAPSE_ACTION_BOTH_EDGES }; /** * struct counter_synapse - Counter Synapse node * @action: index of current action mode * @actions_list: array of available action modes * @num_actions: number of action modes specified in @actions_list * @signal: pointer to associated signal */ struct counter_synapse { size_t action; const enum counter_synapse_action *actions_list; size_t num_actions; struct counter_signal *signal; }; struct counter_count; /** * struct counter_count_ext - Counter Count extension * @name: attribute name * @read: read callback for this attribute; may be NULL * @write: write callback for this attribute; may be NULL * @priv: data private to the driver */ struct counter_count_ext { const char *name; ssize_t (*read)(struct counter_device *counter, struct counter_count *count, void *priv, char *buf); ssize_t (*write)(struct counter_device *counter, struct counter_count *count, void *priv, const char *buf, size_t len); void *priv; }; enum counter_count_function { COUNTER_COUNT_FUNCTION_INCREASE = 0, COUNTER_COUNT_FUNCTION_DECREASE, COUNTER_COUNT_FUNCTION_PULSE_DIRECTION, COUNTER_COUNT_FUNCTION_QUADRATURE_X1_A, COUNTER_COUNT_FUNCTION_QUADRATURE_X1_B, COUNTER_COUNT_FUNCTION_QUADRATURE_X2_A, COUNTER_COUNT_FUNCTION_QUADRATURE_X2_B, COUNTER_COUNT_FUNCTION_QUADRATURE_X4 }; /** * struct counter_count - Counter Count node * @id: unique ID used to identify Count * @name: device-specific Count name; ideally, this should match * the name as it appears in the datasheet documentation * @function: index of current function mode * @functions_list: array available function modes * @num_functions: number of function modes specified in @functions_list * @synapses: array of synapses for initialization * @num_synapses: number of synapses specified in @synapses * @ext: optional array of Counter Count extensions * @num_ext: number of Counter Count extensions specified in @ext * @priv: optional private data supplied by driver */ struct counter_count { int id; const char *name; size_t function; const enum counter_count_function *functions_list; size_t num_functions; struct counter_synapse *synapses; size_t num_synapses; const struct counter_count_ext *ext; size_t num_ext; void *priv; }; /** * struct counter_count_enum_ext - Count enum extension attribute * @items: Array of strings * @num_items: Number of items specified in @items * @set: Set callback function; may be NULL * @get: Get callback function; may be NULL * * The counter_count_enum_ext structure can be used to implement enum style * Count extension attributes. Enum style attributes are those which have a set * of strings that map to unsigned integer values. The Generic Counter Count * enum extension helper code takes care of mapping between value and string, as * well as generating a "_available" file which contains a list of all available * items. The get callback is used to query the currently active item; the index * of the item within the respective items array is returned via the 'item' * parameter. The set callback is called when the attribute is updated; the * 'item' parameter contains the index of the newly activated item within the * respective items array. */ struct counter_count_enum_ext { const char * const *items; size_t num_items; int (*get)(struct counter_device *counter, struct counter_count *count, size_t *item); int (*set)(struct counter_device *counter, struct counter_count *count, size_t item); }; /** * COUNTER_COUNT_ENUM() - Initialize Count enum extension * @_name: Attribute name * @_e: Pointer to a counter_count_enum_ext structure * * This should usually be used together with COUNTER_COUNT_ENUM_AVAILABLE() */ #define COUNTER_COUNT_ENUM(_name, _e) \ { \ .name = (_name), \ .read = counter_count_enum_read, \ .write = counter_count_enum_write, \ .priv = (_e) \ } /** * COUNTER_COUNT_ENUM_AVAILABLE() - Initialize Count enum available extension * @_name: Attribute name ("_available" will be appended to the name) * @_e: Pointer to a counter_count_enum_ext structure * * Creates a read only attribute that lists all the available enum items in a * newline separated list. This should usually be used together with * COUNTER_COUNT_ENUM() */ #define COUNTER_COUNT_ENUM_AVAILABLE(_name, _e) \ { \ .name = (_name "_available"), \ .read = counter_count_enum_available_read, \ .priv = (_e) \ } /** * struct counter_device_attr_group - internal container for attribute group * @attr_group: Counter sysfs attributes group * @attr_list: list to keep track of created Counter sysfs attributes * @num_attr: number of Counter sysfs attributes */ struct counter_device_attr_group { struct attribute_group attr_group; struct list_head attr_list; size_t num_attr; }; /** * struct counter_device_state - internal state container for a Counter device * @id: unique ID used to identify the Counter * @dev: internal device structure * @groups_list: attribute groups list (for Signals, Counts, and ext) * @num_groups: number of attribute groups containers * @groups: Counter sysfs attribute groups (to populate @dev.groups) */ struct counter_device_state { int id; struct device dev; struct counter_device_attr_group *groups_list; size_t num_groups; const struct attribute_group **groups; }; /** * struct counter_signal_read_value - Opaque Signal read value * @buf: string representation of Signal read value * @len: length of string in @buf */ struct counter_signal_read_value { char *buf; size_t len; }; /** * struct counter_count_read_value - Opaque Count read value * @buf: string representation of Count read value * @len: length of string in @buf */ struct counter_count_read_value { char *buf; size_t len; }; /** * struct counter_count_write_value - Opaque Count write value * @buf: string representation of Count write value */ struct counter_count_write_value { const char *buf; }; /** * struct counter_ops - Callbacks from driver * @signal_read: optional read callback for Signal attribute. The read * value of the respective Signal should be passed back via * the val parameter. val points to an opaque type which * should be set only by calling the * counter_signal_read_value_set function from within the * signal_read callback. * @count_read: optional read callback for Count attribute. The read * value of the respective Count should be passed back via * the val parameter. val points to an opaque type which * should be set only by calling the * counter_count_read_value_set function from within the * count_read callback. * @count_write: optional write callback for Count attribute. The write * value for the respective Count is passed in via the val * parameter. val points to an opaque type which should be * accessed only by calling the * counter_count_write_value_get function. * @function_get: function to get the current count function mode. Returns * 0 on success and negative error code on error. The index * of the respective Count's returned function mode should * be passed back via the function parameter. * @function_set: function to set the count function mode. function is the * index of the requested function mode from the respective * Count's functions_list array. * @action_get: function to get the current action mode. Returns 0 on * success and negative error code on error. The index of * the respective Signal's returned action mode should be * passed back via the action parameter. * @action_set: function to set the action mode. action is the index of * the requested action mode from the respective Synapse's * actions_list array. */ struct counter_ops { int (*signal_read)(struct counter_device *counter, struct counter_signal *signal, struct counter_signal_read_value *val); int (*count_read)(struct counter_device *counter, struct counter_count *count, struct counter_count_read_value *val); int (*count_write)(struct counter_device *counter, struct counter_count *count, struct counter_count_write_value *val); int (*function_get)(struct counter_device *counter, struct counter_count *count, size_t *function); int (*function_set)(struct counter_device *counter, struct counter_count *count, size_t function); int (*action_get)(struct counter_device *counter, struct counter_count *count, struct counter_synapse *synapse, size_t *action); int (*action_set)(struct counter_device *counter, struct counter_count *count, struct counter_synapse *synapse, size_t action); }; /** * struct counter_device_ext - Counter device extension * @name: attribute name * @read: read callback for this attribute; may be NULL * @write: write callback for this attribute; may be NULL * @priv: data private to the driver */ struct counter_device_ext { const char *name; ssize_t (*read)(struct counter_device *counter, void *priv, char *buf); ssize_t (*write)(struct counter_device *counter, void *priv, const char *buf, size_t len); void *priv; }; /** * struct counter_device_enum_ext - Counter enum extension attribute * @items: Array of strings * @num_items: Number of items specified in @items * @set: Set callback function; may be NULL * @get: Get callback function; may be NULL * * The counter_device_enum_ext structure can be used to implement enum style * Counter extension attributes. Enum style attributes are those which have a * set of strings that map to unsigned integer values. The Generic Counter enum * extension helper code takes care of mapping between value and string, as well * as generating a "_available" file which contains a list of all available * items. The get callback is used to query the currently active item; the index * of the item within the respective items array is returned via the 'item' * parameter. The set callback is called when the attribute is updated; the * 'item' parameter contains the index of the newly activated item within the * respective items array. */ struct counter_device_enum_ext { const char * const *items; size_t num_items; int (*get)(struct counter_device *counter, size_t *item); int (*set)(struct counter_device *counter, size_t item); }; /** * COUNTER_DEVICE_ENUM() - Initialize Counter enum extension * @_name: Attribute name * @_e: Pointer to a counter_device_enum_ext structure * * This should usually be used together with COUNTER_DEVICE_ENUM_AVAILABLE() */ #define COUNTER_DEVICE_ENUM(_name, _e) \ { \ .name = (_name), \ .read = counter_device_enum_read, \ .write = counter_device_enum_write, \ .priv = (_e) \ } /** * COUNTER_DEVICE_ENUM_AVAILABLE() - Initialize Counter enum available extension * @_name: Attribute name ("_available" will be appended to the name) * @_e: Pointer to a counter_device_enum_ext structure * * Creates a read only attribute that lists all the available enum items in a * newline separated list. This should usually be used together with * COUNTER_DEVICE_ENUM() */ #define COUNTER_DEVICE_ENUM_AVAILABLE(_name, _e) \ { \ .name = (_name "_available"), \ .read = counter_device_enum_available_read, \ .priv = (_e) \ } /** * struct counter_device - Counter data structure * @name: name of the device as it appears in the datasheet * @parent: optional parent device providing the counters * @device_state: internal device state container * @ops: callbacks from driver * @signals: array of Signals * @num_signals: number of Signals specified in @signals * @counts: array of Counts * @num_counts: number of Counts specified in @counts * @ext: optional array of Counter device extensions * @num_ext: number of Counter device extensions specified in @ext * @priv: optional private data supplied by driver */ struct counter_device { const char *name; struct device *parent; struct counter_device_state *device_state; const struct counter_ops *ops; struct counter_signal *signals; size_t num_signals; struct counter_count *counts; size_t num_counts; const struct counter_device_ext *ext; size_t num_ext; void *priv; }; enum counter_signal_level { COUNTER_SIGNAL_LEVEL_LOW = 0, COUNTER_SIGNAL_LEVEL_HIGH }; enum counter_signal_value_type { COUNTER_SIGNAL_LEVEL = 0 }; enum counter_count_value_type { COUNTER_COUNT_POSITION = 0, }; void counter_signal_read_value_set(struct counter_signal_read_value *const val, const enum counter_signal_value_type type, void *const data); void counter_count_read_value_set(struct counter_count_read_value *const val, const enum counter_count_value_type type, void *const data); int counter_count_write_value_get(void *const data, const enum counter_count_value_type type, const struct counter_count_write_value *const val); int counter_register(struct counter_device *const counter); void counter_unregister(struct counter_device *const counter); int devm_counter_register(struct device *dev, struct counter_device *const counter); void devm_counter_unregister(struct device *dev, struct counter_device *const counter); #endif /* _COUNTER_H_ */ devfreq-event.h 0000644 00000013514 14722070374 0007502 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * devfreq-event: a framework to provide raw data and events of devfreq devices * * Copyright (C) 2014 Samsung Electronics * Author: Chanwoo Choi <cw00.choi@samsung.com> */ #ifndef __LINUX_DEVFREQ_EVENT_H__ #define __LINUX_DEVFREQ_EVENT_H__ #include <linux/device.h> /** * struct devfreq_event_dev - the devfreq-event device * * @node : Contain the devfreq-event device that have been registered. * @dev : the device registered by devfreq-event class. dev.parent is * the device using devfreq-event. * @lock : a mutex to protect accessing devfreq-event. * @enable_count: the number of enable function have been called. * @desc : the description for devfreq-event device. * * This structure contains devfreq-event device information. */ struct devfreq_event_dev { struct list_head node; struct device dev; struct mutex lock; u32 enable_count; const struct devfreq_event_desc *desc; }; /** * struct devfreq_event_data - the devfreq-event data * * @load_count : load count of devfreq-event device for the given period. * @total_count : total count of devfreq-event device for the given period. * each count may represent a clock cycle, a time unit * (ns/us/...), or anything the device driver wants. * Generally, utilization is load_count / total_count. * * This structure contains the data of devfreq-event device for polling period. */ struct devfreq_event_data { unsigned long load_count; unsigned long total_count; }; /** * struct devfreq_event_ops - the operations of devfreq-event device * * @enable : Enable the devfreq-event device. * @disable : Disable the devfreq-event device. * @reset : Reset all setting of the devfreq-event device. * @set_event : Set the specific event type for the devfreq-event device. * @get_event : Get the result of the devfreq-event devie with specific * event type. * * This structure contains devfreq-event device operations which can be * implemented by devfreq-event device drivers. */ struct devfreq_event_ops { /* Optional functions */ int (*enable)(struct devfreq_event_dev *edev); int (*disable)(struct devfreq_event_dev *edev); int (*reset)(struct devfreq_event_dev *edev); /* Mandatory functions */ int (*set_event)(struct devfreq_event_dev *edev); int (*get_event)(struct devfreq_event_dev *edev, struct devfreq_event_data *edata); }; /** * struct devfreq_event_desc - the descriptor of devfreq-event device * * @name : the name of devfreq-event device. * @event_type : the type of the event determined and used by driver * @driver_data : the private data for devfreq-event driver. * @ops : the operation to control devfreq-event device. * * Each devfreq-event device is described with a this structure. * This structure contains the various data for devfreq-event device. * The event_type describes what is going to be counted in the register. * It might choose to count e.g. read requests, write data in bytes, etc. * The full supported list of types is present in specyfic header in: * include/dt-bindings/pmu/. */ struct devfreq_event_desc { const char *name; u32 event_type; void *driver_data; const struct devfreq_event_ops *ops; }; #if defined(CONFIG_PM_DEVFREQ_EVENT) extern int devfreq_event_enable_edev(struct devfreq_event_dev *edev); extern int devfreq_event_disable_edev(struct devfreq_event_dev *edev); extern bool devfreq_event_is_enabled(struct devfreq_event_dev *edev); extern int devfreq_event_set_event(struct devfreq_event_dev *edev); extern int devfreq_event_get_event(struct devfreq_event_dev *edev, struct devfreq_event_data *edata); extern int devfreq_event_reset_event(struct devfreq_event_dev *edev); extern struct devfreq_event_dev *devfreq_event_get_edev_by_phandle( struct device *dev, int index); extern int devfreq_event_get_edev_count(struct device *dev); extern struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev, struct devfreq_event_desc *desc); extern int devfreq_event_remove_edev(struct devfreq_event_dev *edev); extern struct devfreq_event_dev *devm_devfreq_event_add_edev(struct device *dev, struct devfreq_event_desc *desc); extern void devm_devfreq_event_remove_edev(struct device *dev, struct devfreq_event_dev *edev); static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev) { return edev->desc->driver_data; } #else static inline int devfreq_event_enable_edev(struct devfreq_event_dev *edev) { return -EINVAL; } static inline int devfreq_event_disable_edev(struct devfreq_event_dev *edev) { return -EINVAL; } static inline bool devfreq_event_is_enabled(struct devfreq_event_dev *edev) { return false; } static inline int devfreq_event_set_event(struct devfreq_event_dev *edev) { return -EINVAL; } static inline int devfreq_event_get_event(struct devfreq_event_dev *edev, struct devfreq_event_data *edata) { return -EINVAL; } static inline int devfreq_event_reset_event(struct devfreq_event_dev *edev) { return -EINVAL; } static inline struct devfreq_event_dev *devfreq_event_get_edev_by_phandle( struct device *dev, int index) { return ERR_PTR(-EINVAL); } static inline int devfreq_event_get_edev_count(struct device *dev) { return -EINVAL; } static inline struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev, struct devfreq_event_desc *desc) { return ERR_PTR(-EINVAL); } static inline int devfreq_event_remove_edev(struct devfreq_event_dev *edev) { return -EINVAL; } static inline struct devfreq_event_dev *devm_devfreq_event_add_edev( struct device *dev, struct devfreq_event_desc *desc) { return ERR_PTR(-EINVAL); } static inline void devm_devfreq_event_remove_edev(struct device *dev, struct devfreq_event_dev *edev) { } static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev) { return NULL; } #endif /* CONFIG_PM_DEVFREQ_EVENT */ #endif /* __LINUX_DEVFREQ_EVENT_H__ */ scif.h 0000644 00000165570 14722070374 0005665 0 ustar 00 /* * Intel MIC Platform Software Stack (MPSS) * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * BSD LICENSE * * Copyright(c) 2014 Intel Corporation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Intel SCIF driver. * */ #ifndef __SCIF_H__ #define __SCIF_H__ #include <linux/types.h> #include <linux/poll.h> #include <linux/device.h> #include <linux/scif_ioctl.h> #define SCIF_ACCEPT_SYNC 1 #define SCIF_SEND_BLOCK 1 #define SCIF_RECV_BLOCK 1 enum { SCIF_PROT_READ = (1 << 0), SCIF_PROT_WRITE = (1 << 1) }; enum { SCIF_MAP_FIXED = 0x10, SCIF_MAP_KERNEL = 0x20, }; enum { SCIF_FENCE_INIT_SELF = (1 << 0), SCIF_FENCE_INIT_PEER = (1 << 1), SCIF_SIGNAL_LOCAL = (1 << 4), SCIF_SIGNAL_REMOTE = (1 << 5) }; enum { SCIF_RMA_USECPU = (1 << 0), SCIF_RMA_USECACHE = (1 << 1), SCIF_RMA_SYNC = (1 << 2), SCIF_RMA_ORDERED = (1 << 3) }; /* End of SCIF Admin Reserved Ports */ #define SCIF_ADMIN_PORT_END 1024 /* End of SCIF Reserved Ports */ #define SCIF_PORT_RSVD 1088 typedef struct scif_endpt *scif_epd_t; typedef struct scif_pinned_pages *scif_pinned_pages_t; /** * struct scif_range - SCIF registered range used in kernel mode * @cookie: cookie used internally by SCIF * @nr_pages: number of pages of PAGE_SIZE * @prot_flags: R/W protection * @phys_addr: Array of bus addresses * @va: Array of kernel virtual addresses backed by the pages in the phys_addr * array. The va is populated only when called on the host for a remote * SCIF connection on MIC. This is required to support the use case of DMA * between MIC and another device which is not a SCIF node e.g., an IB or * ethernet NIC. */ struct scif_range { void *cookie; int nr_pages; int prot_flags; dma_addr_t *phys_addr; void __iomem **va; }; /** * struct scif_pollepd - SCIF endpoint to be monitored via scif_poll * @epd: SCIF endpoint * @events: requested events * @revents: returned events */ struct scif_pollepd { scif_epd_t epd; __poll_t events; __poll_t revents; }; /** * scif_peer_dev - representation of a peer SCIF device * * Peer devices show up as PCIe devices for the mgmt node but not the cards. * The mgmt node discovers all the cards on the PCIe bus and informs the other * cards about their peers. Upon notification of a peer a node adds a peer * device to the peer bus to maintain symmetry in the way devices are * discovered across all nodes in the SCIF network. * * @dev: underlying device * @dnode - The destination node which this device will communicate with. */ struct scif_peer_dev { struct device dev; u8 dnode; }; /** * scif_client - representation of a SCIF client * @name: client name * @probe - client method called when a peer device is registered * @remove - client method called when a peer device is unregistered * @si - subsys_interface used internally for implementing SCIF clients */ struct scif_client { const char *name; void (*probe)(struct scif_peer_dev *spdev); void (*remove)(struct scif_peer_dev *spdev); struct subsys_interface si; }; #define SCIF_OPEN_FAILED ((scif_epd_t)-1) #define SCIF_REGISTER_FAILED ((off_t)-1) #define SCIF_MMAP_FAILED ((void *)-1) /** * scif_open() - Create an endpoint * * Return: * Upon successful completion, scif_open() returns an endpoint descriptor to * be used in subsequent SCIF functions calls to refer to that endpoint; * otherwise in user mode SCIF_OPEN_FAILED (that is ((scif_epd_t)-1)) is * returned and errno is set to indicate the error; in kernel mode a NULL * scif_epd_t is returned. * * Errors: * ENOMEM - Insufficient kernel memory was available */ scif_epd_t scif_open(void); /** * scif_bind() - Bind an endpoint to a port * @epd: endpoint descriptor * @pn: port number * * scif_bind() binds endpoint epd to port pn, where pn is a port number on the * local node. If pn is zero, a port number greater than or equal to * SCIF_PORT_RSVD is assigned and returned. Each endpoint may be bound to * exactly one local port. Ports less than 1024 when requested can only be bound * by system (or root) processes or by processes executed by privileged users. * * Return: * Upon successful completion, scif_bind() returns the port number to which epd * is bound; otherwise in user mode -1 is returned and errno is set to * indicate the error; in kernel mode the negative of one of the following * errors is returned. * * Errors: * EBADF, ENOTTY - epd is not a valid endpoint descriptor * EINVAL - the endpoint or the port is already bound * EISCONN - The endpoint is already connected * ENOSPC - No port number available for assignment * EACCES - The port requested is protected and the user is not the superuser */ int scif_bind(scif_epd_t epd, u16 pn); /** * scif_listen() - Listen for connections on an endpoint * @epd: endpoint descriptor * @backlog: maximum pending connection requests * * scif_listen() marks the endpoint epd as a listening endpoint - that is, as * an endpoint that will be used to accept incoming connection requests. Once * so marked, the endpoint is said to be in the listening state and may not be * used as the endpoint of a connection. * * The endpoint, epd, must have been bound to a port. * * The backlog argument defines the maximum length to which the queue of * pending connections for epd may grow. If a connection request arrives when * the queue is full, the client may receive an error with an indication that * the connection was refused. * * Return: * Upon successful completion, scif_listen() returns 0; otherwise in user mode * -1 is returned and errno is set to indicate the error; in kernel mode the * negative of one of the following errors is returned. * * Errors: * EBADF, ENOTTY - epd is not a valid endpoint descriptor * EINVAL - the endpoint is not bound to a port * EISCONN - The endpoint is already connected or listening */ int scif_listen(scif_epd_t epd, int backlog); /** * scif_connect() - Initiate a connection on a port * @epd: endpoint descriptor * @dst: global id of port to which to connect * * The scif_connect() function requests the connection of endpoint epd to remote * port dst. If the connection is successful, a peer endpoint, bound to dst, is * created on node dst.node. On successful return, the connection is complete. * * If the endpoint epd has not already been bound to a port, scif_connect() * will bind it to an unused local port. * * A connection is terminated when an endpoint of the connection is closed, * either explicitly by scif_close(), or when a process that owns one of the * endpoints of the connection is terminated. * * In user space, scif_connect() supports an asynchronous connection mode * if the application has set the O_NONBLOCK flag on the endpoint via the * fcntl() system call. Setting this flag will result in the calling process * not to wait during scif_connect(). * * Return: * Upon successful completion, scif_connect() returns the port ID to which the * endpoint, epd, is bound; otherwise in user mode -1 is returned and errno is * set to indicate the error; in kernel mode the negative of one of the * following errors is returned. * * Errors: * EBADF, ENOTTY - epd is not a valid endpoint descriptor * ECONNREFUSED - The destination was not listening for connections or refused * the connection request * EINVAL - dst.port is not a valid port ID * EISCONN - The endpoint is already connected * ENOMEM - No buffer space is available * ENODEV - The destination node does not exist, or the node is lost or existed, * but is not currently in the network since it may have crashed * ENOSPC - No port number available for assignment * EOPNOTSUPP - The endpoint is listening and cannot be connected */ int scif_connect(scif_epd_t epd, struct scif_port_id *dst); /** * scif_accept() - Accept a connection on an endpoint * @epd: endpoint descriptor * @peer: global id of port to which connected * @newepd: new connected endpoint descriptor * @flags: flags * * The scif_accept() call extracts the first connection request from the queue * of pending connections for the port on which epd is listening. scif_accept() * creates a new endpoint, bound to the same port as epd, and allocates a new * SCIF endpoint descriptor, returned in newepd, for the endpoint. The new * endpoint is connected to the endpoint through which the connection was * requested. epd is unaffected by this call, and remains in the listening * state. * * On successful return, peer holds the global port identifier (node id and * local port number) of the port which requested the connection. * * A connection is terminated when an endpoint of the connection is closed, * either explicitly by scif_close(), or when a process that owns one of the * endpoints of the connection is terminated. * * The number of connections that can (subsequently) be accepted on epd is only * limited by system resources (memory). * * The flags argument is formed by OR'ing together zero or more of the * following values. * SCIF_ACCEPT_SYNC - block until a connection request is presented. If * SCIF_ACCEPT_SYNC is not in flags, and no pending * connections are present on the queue, scif_accept() * fails with an EAGAIN error * * In user mode, the select() and poll() functions can be used to determine * when there is a connection request. In kernel mode, the scif_poll() * function may be used for this purpose. A readable event will be delivered * when a connection is requested. * * Return: * Upon successful completion, scif_accept() returns 0; otherwise in user mode * -1 is returned and errno is set to indicate the error; in kernel mode the * negative of one of the following errors is returned. * * Errors: * EAGAIN - SCIF_ACCEPT_SYNC is not set and no connections are present to be * accepted or SCIF_ACCEPT_SYNC is not set and remote node failed to complete * its connection request * EBADF, ENOTTY - epd is not a valid endpoint descriptor * EINTR - Interrupted function * EINVAL - epd is not a listening endpoint, or flags is invalid, or peer is * NULL, or newepd is NULL * ENODEV - The requesting node is lost or existed, but is not currently in the * network since it may have crashed * ENOMEM - Not enough space * ENOENT - Secondary part of epd registration failed */ int scif_accept(scif_epd_t epd, struct scif_port_id *peer, scif_epd_t *newepd, int flags); /** * scif_close() - Close an endpoint * @epd: endpoint descriptor * * scif_close() closes an endpoint and performs necessary teardown of * facilities associated with that endpoint. * * If epd is a listening endpoint then it will no longer accept connection * requests on the port to which it is bound. Any pending connection requests * are rejected. * * If epd is a connected endpoint, then its peer endpoint is also closed. RMAs * which are in-process through epd or its peer endpoint will complete before * scif_close() returns. Registered windows of the local and peer endpoints are * released as if scif_unregister() was called against each window. * * Closing a SCIF endpoint does not affect local registered memory mapped by * a SCIF endpoint on a remote node. The local memory remains mapped by the peer * SCIF endpoint explicitly removed by calling munmap(..) by the peer. * * If the peer endpoint's receive queue is not empty at the time that epd is * closed, then the peer endpoint can be passed as the endpoint parameter to * scif_recv() until the receive queue is empty. * * epd is freed and may no longer be accessed. * * Return: * Upon successful completion, scif_close() returns 0; otherwise in user mode * -1 is returned and errno is set to indicate the error; in kernel mode the * negative of one of the following errors is returned. * * Errors: * EBADF, ENOTTY - epd is not a valid endpoint descriptor */ int scif_close(scif_epd_t epd); /** * scif_send() - Send a message * @epd: endpoint descriptor * @msg: message buffer address * @len: message length * @flags: blocking mode flags * * scif_send() sends data to the peer of endpoint epd. Up to len bytes of data * are copied from memory starting at address msg. On successful execution the * return value of scif_send() is the number of bytes that were sent, and is * zero if no bytes were sent because len was zero. scif_send() may be called * only when the endpoint is in a connected state. * * If a scif_send() call is non-blocking, then it sends only those bytes which * can be sent without waiting, up to a maximum of len bytes. * * If a scif_send() call is blocking, then it normally returns after sending * all len bytes. If a blocking call is interrupted or the connection is * reset, the call is considered successful if some bytes were sent or len is * zero, otherwise the call is considered unsuccessful. * * In user mode, the select() and poll() functions can be used to determine * when the send queue is not full. In kernel mode, the scif_poll() function * may be used for this purpose. * * It is recommended that scif_send()/scif_recv() only be used for short * control-type message communication between SCIF endpoints. The SCIF RMA * APIs are expected to provide better performance for transfer sizes of * 1024 bytes or longer for the current MIC hardware and software * implementation. * * scif_send() will block until the entire message is sent if SCIF_SEND_BLOCK * is passed as the flags argument. * * Return: * Upon successful completion, scif_send() returns the number of bytes sent; * otherwise in user mode -1 is returned and errno is set to indicate the * error; in kernel mode the negative of one of the following errors is * returned. * * Errors: * EBADF, ENOTTY - epd is not a valid endpoint descriptor * ECONNRESET - Connection reset by peer * EINVAL - flags is invalid, or len is negative * ENODEV - The remote node is lost or existed, but is not currently in the * network since it may have crashed * ENOMEM - Not enough space * ENOTCONN - The endpoint is not connected */ int scif_send(scif_epd_t epd, void *msg, int len, int flags); /** * scif_recv() - Receive a message * @epd: endpoint descriptor * @msg: message buffer address * @len: message buffer length * @flags: blocking mode flags * * scif_recv() receives data from the peer of endpoint epd. Up to len bytes of * data are copied to memory starting at address msg. On successful execution * the return value of scif_recv() is the number of bytes that were received, * and is zero if no bytes were received because len was zero. scif_recv() may * be called only when the endpoint is in a connected state. * * If a scif_recv() call is non-blocking, then it receives only those bytes * which can be received without waiting, up to a maximum of len bytes. * * If a scif_recv() call is blocking, then it normally returns after receiving * all len bytes. If the blocking call was interrupted due to a disconnection, * subsequent calls to scif_recv() will copy all bytes received upto the point * of disconnection. * * In user mode, the select() and poll() functions can be used to determine * when data is available to be received. In kernel mode, the scif_poll() * function may be used for this purpose. * * It is recommended that scif_send()/scif_recv() only be used for short * control-type message communication between SCIF endpoints. The SCIF RMA * APIs are expected to provide better performance for transfer sizes of * 1024 bytes or longer for the current MIC hardware and software * implementation. * * scif_recv() will block until the entire message is received if * SCIF_RECV_BLOCK is passed as the flags argument. * * Return: * Upon successful completion, scif_recv() returns the number of bytes * received; otherwise in user mode -1 is returned and errno is set to * indicate the error; in kernel mode the negative of one of the following * errors is returned. * * Errors: * EAGAIN - The destination node is returning from a low power state * EBADF, ENOTTY - epd is not a valid endpoint descriptor * ECONNRESET - Connection reset by peer * EINVAL - flags is invalid, or len is negative * ENODEV - The remote node is lost or existed, but is not currently in the * network since it may have crashed * ENOMEM - Not enough space * ENOTCONN - The endpoint is not connected */ int scif_recv(scif_epd_t epd, void *msg, int len, int flags); /** * scif_register() - Mark a memory region for remote access. * @epd: endpoint descriptor * @addr: starting virtual address * @len: length of range * @offset: offset of window * @prot_flags: read/write protection flags * @map_flags: mapping flags * * The scif_register() function opens a window, a range of whole pages of the * registered address space of the endpoint epd, starting at offset po and * continuing for len bytes. The value of po, further described below, is a * function of the parameters offset and len, and the value of map_flags. Each * page of the window represents the physical memory page which backs the * corresponding page of the range of virtual address pages starting at addr * and continuing for len bytes. addr and len are constrained to be multiples * of the page size. A successful scif_register() call returns po. * * When SCIF_MAP_FIXED is set in the map_flags argument, po will be offset * exactly, and offset is constrained to be a multiple of the page size. The * mapping established by scif_register() will not replace any existing * registration; an error is returned if any page within the range [offset, * offset + len - 1] intersects an existing window. * * When SCIF_MAP_FIXED is not set, the implementation uses offset in an * implementation-defined manner to arrive at po. The po value so chosen will * be an area of the registered address space that the implementation deems * suitable for a mapping of len bytes. An offset value of 0 is interpreted as * granting the implementation complete freedom in selecting po, subject to * constraints described below. A non-zero value of offset is taken to be a * suggestion of an offset near which the mapping should be placed. When the * implementation selects a value for po, it does not replace any extant * window. In all cases, po will be a multiple of the page size. * * The physical pages which are so represented by a window are available for * access in calls to mmap(), scif_readfrom(), scif_writeto(), * scif_vreadfrom(), and scif_vwriteto(). While a window is registered, the * physical pages represented by the window will not be reused by the memory * subsystem for any other purpose. Note that the same physical page may be * represented by multiple windows. * * Subsequent operations which change the memory pages to which virtual * addresses are mapped (such as mmap(), munmap()) have no effect on * existing window. * * If the process will fork(), it is recommended that the registered * virtual address range be marked with MADV_DONTFORK. Doing so will prevent * problems due to copy-on-write semantics. * * The prot_flags argument is formed by OR'ing together one or more of the * following values. * SCIF_PROT_READ - allow read operations from the window * SCIF_PROT_WRITE - allow write operations to the window * * Return: * Upon successful completion, scif_register() returns the offset at which the * mapping was placed (po); otherwise in user mode SCIF_REGISTER_FAILED (that * is (off_t *)-1) is returned and errno is set to indicate the error; in * kernel mode the negative of one of the following errors is returned. * * Errors: * EADDRINUSE - SCIF_MAP_FIXED is set in map_flags, and pages in the range * [offset, offset + len -1] are already registered * EAGAIN - The mapping could not be performed due to lack of resources * EBADF, ENOTTY - epd is not a valid endpoint descriptor * ECONNRESET - Connection reset by peer * EINVAL - map_flags is invalid, or prot_flags is invalid, or SCIF_MAP_FIXED is * set in flags, and offset is not a multiple of the page size, or addr is not a * multiple of the page size, or len is not a multiple of the page size, or is * 0, or offset is negative * ENODEV - The remote node is lost or existed, but is not currently in the * network since it may have crashed * ENOMEM - Not enough space * ENOTCONN -The endpoint is not connected */ off_t scif_register(scif_epd_t epd, void *addr, size_t len, off_t offset, int prot_flags, int map_flags); /** * scif_unregister() - Mark a memory region for remote access. * @epd: endpoint descriptor * @offset: start of range to unregister * @len: length of range to unregister * * The scif_unregister() function closes those previously registered windows * which are entirely within the range [offset, offset + len - 1]. It is an * error to specify a range which intersects only a subrange of a window. * * On a successful return, pages within the window may no longer be specified * in calls to mmap(), scif_readfrom(), scif_writeto(), scif_vreadfrom(), * scif_vwriteto(), scif_get_pages, and scif_fence_signal(). The window, * however, continues to exist until all previous references against it are * removed. A window is referenced if there is a mapping to it created by * mmap(), or if scif_get_pages() was called against the window * (and the pages have not been returned via scif_put_pages()). A window is * also referenced while an RMA, in which some range of the window is a source * or destination, is in progress. Finally a window is referenced while some * offset in that window was specified to scif_fence_signal(), and the RMAs * marked by that call to scif_fence_signal() have not completed. While a * window is in this state, its registered address space pages are not * available for use in a new registered window. * * When all such references to the window have been removed, its references to * all the physical pages which it represents are removed. Similarly, the * registered address space pages of the window become available for * registration in a new window. * * Return: * Upon successful completion, scif_unregister() returns 0; otherwise in user * mode -1 is returned and errno is set to indicate the error; in kernel mode * the negative of one of the following errors is returned. In the event of an * error, no windows are unregistered. * * Errors: * EBADF, ENOTTY - epd is not a valid endpoint descriptor * ECONNRESET - Connection reset by peer * EINVAL - the range [offset, offset + len - 1] intersects a subrange of a * window, or offset is negative * ENODEV - The remote node is lost or existed, but is not currently in the * network since it may have crashed * ENOTCONN - The endpoint is not connected * ENXIO - Offsets in the range [offset, offset + len - 1] are invalid for the * registered address space of epd */ int scif_unregister(scif_epd_t epd, off_t offset, size_t len); /** * scif_readfrom() - Copy from a remote address space * @epd: endpoint descriptor * @loffset: offset in local registered address space to * which to copy * @len: length of range to copy * @roffset: offset in remote registered address space * from which to copy * @rma_flags: transfer mode flags * * scif_readfrom() copies len bytes from the remote registered address space of * the peer of endpoint epd, starting at the offset roffset to the local * registered address space of epd, starting at the offset loffset. * * Each of the specified ranges [loffset, loffset + len - 1] and [roffset, * roffset + len - 1] must be within some registered window or windows of the * local and remote nodes. A range may intersect multiple registered windows, * but only if those windows are contiguous in the registered address space. * * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using * programmed read/writes. Otherwise the data is copied using DMA. If rma_- * flags includes SCIF_RMA_SYNC, then scif_readfrom() will return after the * transfer is complete. Otherwise, the transfer may be performed asynchron- * ously. The order in which any two asynchronous RMA operations complete * is non-deterministic. The synchronization functions, scif_fence_mark()/ * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to * the completion of asynchronous RMA operations on the same endpoint. * * The DMA transfer of individual bytes is not guaranteed to complete in * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last * cacheline or partial cacheline of the source range will become visible on * the destination node after all other transferred data in the source * range has become visible on the destination node. * * The optimal DMA performance will likely be realized if both * loffset and roffset are cacheline aligned (are a multiple of 64). Lower * performance will likely be realized if loffset and roffset are not * cacheline aligned but are separated by some multiple of 64. The lowest level * of performance is likely if loffset and roffset are not separated by a * multiple of 64. * * The rma_flags argument is formed by ORing together zero or more of the * following values. * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA * engine. * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the * transfer has completed. Passing this flag results in the * current implementation busy waiting and consuming CPU cycles * while the DMA transfer is in progress for best performance by * avoiding the interrupt latency. * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of * the source range becomes visible on the destination node * after all other transferred data in the source range has * become visible on the destination * * Return: * Upon successful completion, scif_readfrom() returns 0; otherwise in user * mode -1 is returned and errno is set to indicate the error; in kernel mode * the negative of one of the following errors is returned. * * Errors: * EACCESS - Attempt to write to a read-only range * EBADF, ENOTTY - epd is not a valid endpoint descriptor * ECONNRESET - Connection reset by peer * EINVAL - rma_flags is invalid * ENODEV - The remote node is lost or existed, but is not currently in the * network since it may have crashed * ENOTCONN - The endpoint is not connected * ENXIO - The range [loffset, loffset + len - 1] is invalid for the registered * address space of epd, or, The range [roffset, roffset + len - 1] is invalid * for the registered address space of the peer of epd, or loffset or roffset * is negative */ int scif_readfrom(scif_epd_t epd, off_t loffset, size_t len, off_t roffset, int rma_flags); /** * scif_writeto() - Copy to a remote address space * @epd: endpoint descriptor * @loffset: offset in local registered address space * from which to copy * @len: length of range to copy * @roffset: offset in remote registered address space to * which to copy * @rma_flags: transfer mode flags * * scif_writeto() copies len bytes from the local registered address space of * epd, starting at the offset loffset to the remote registered address space * of the peer of endpoint epd, starting at the offset roffset. * * Each of the specified ranges [loffset, loffset + len - 1] and [roffset, * roffset + len - 1] must be within some registered window or windows of the * local and remote nodes. A range may intersect multiple registered windows, * but only if those windows are contiguous in the registered address space. * * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using * programmed read/writes. Otherwise the data is copied using DMA. If rma_- * flags includes SCIF_RMA_SYNC, then scif_writeto() will return after the * transfer is complete. Otherwise, the transfer may be performed asynchron- * ously. The order in which any two asynchronous RMA operations complete * is non-deterministic. The synchronization functions, scif_fence_mark()/ * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to * the completion of asynchronous RMA operations on the same endpoint. * * The DMA transfer of individual bytes is not guaranteed to complete in * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last * cacheline or partial cacheline of the source range will become visible on * the destination node after all other transferred data in the source * range has become visible on the destination node. * * The optimal DMA performance will likely be realized if both * loffset and roffset are cacheline aligned (are a multiple of 64). Lower * performance will likely be realized if loffset and roffset are not cacheline * aligned but are separated by some multiple of 64. The lowest level of * performance is likely if loffset and roffset are not separated by a multiple * of 64. * * The rma_flags argument is formed by ORing together zero or more of the * following values. * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA * engine. * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the * transfer has completed. Passing this flag results in the * current implementation busy waiting and consuming CPU cycles * while the DMA transfer is in progress for best performance by * avoiding the interrupt latency. * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of * the source range becomes visible on the destination node * after all other transferred data in the source range has * become visible on the destination * * Return: * Upon successful completion, scif_readfrom() returns 0; otherwise in user * mode -1 is returned and errno is set to indicate the error; in kernel mode * the negative of one of the following errors is returned. * * Errors: * EACCESS - Attempt to write to a read-only range * EBADF, ENOTTY - epd is not a valid endpoint descriptor * ECONNRESET - Connection reset by peer * EINVAL - rma_flags is invalid * ENODEV - The remote node is lost or existed, but is not currently in the * network since it may have crashed * ENOTCONN - The endpoint is not connected * ENXIO - The range [loffset, loffset + len - 1] is invalid for the registered * address space of epd, or, The range [roffset , roffset + len -1] is invalid * for the registered address space of the peer of epd, or loffset or roffset * is negative */ int scif_writeto(scif_epd_t epd, off_t loffset, size_t len, off_t roffset, int rma_flags); /** * scif_vreadfrom() - Copy from a remote address space * @epd: endpoint descriptor * @addr: address to which to copy * @len: length of range to copy * @roffset: offset in remote registered address space * from which to copy * @rma_flags: transfer mode flags * * scif_vreadfrom() copies len bytes from the remote registered address * space of the peer of endpoint epd, starting at the offset roffset, to local * memory, starting at addr. * * The specified range [roffset, roffset + len - 1] must be within some * registered window or windows of the remote nodes. The range may * intersect multiple registered windows, but only if those windows are * contiguous in the registered address space. * * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using * programmed read/writes. Otherwise the data is copied using DMA. If rma_- * flags includes SCIF_RMA_SYNC, then scif_vreadfrom() will return after the * transfer is complete. Otherwise, the transfer may be performed asynchron- * ously. The order in which any two asynchronous RMA operations complete * is non-deterministic. The synchronization functions, scif_fence_mark()/ * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to * the completion of asynchronous RMA operations on the same endpoint. * * The DMA transfer of individual bytes is not guaranteed to complete in * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last * cacheline or partial cacheline of the source range will become visible on * the destination node after all other transferred data in the source * range has become visible on the destination node. * * If rma_flags includes SCIF_RMA_USECACHE, then the physical pages which back * the specified local memory range may be remain in a pinned state even after * the specified transfer completes. This may reduce overhead if some or all of * the same virtual address range is referenced in a subsequent call of * scif_vreadfrom() or scif_vwriteto(). * * The optimal DMA performance will likely be realized if both * addr and roffset are cacheline aligned (are a multiple of 64). Lower * performance will likely be realized if addr and roffset are not * cacheline aligned but are separated by some multiple of 64. The lowest level * of performance is likely if addr and roffset are not separated by a * multiple of 64. * * The rma_flags argument is formed by ORing together zero or more of the * following values. * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA * engine. * SCIF_RMA_USECACHE - enable registration caching * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the * transfer has completed. Passing this flag results in the * current implementation busy waiting and consuming CPU cycles * while the DMA transfer is in progress for best performance by * avoiding the interrupt latency. * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of * the source range becomes visible on the destination node * after all other transferred data in the source range has * become visible on the destination * * Return: * Upon successful completion, scif_vreadfrom() returns 0; otherwise in user * mode -1 is returned and errno is set to indicate the error; in kernel mode * the negative of one of the following errors is returned. * * Errors: * EACCESS - Attempt to write to a read-only range * EBADF, ENOTTY - epd is not a valid endpoint descriptor * ECONNRESET - Connection reset by peer * EINVAL - rma_flags is invalid * ENODEV - The remote node is lost or existed, but is not currently in the * network since it may have crashed * ENOTCONN - The endpoint is not connected * ENXIO - Offsets in the range [roffset, roffset + len - 1] are invalid for the * registered address space of epd */ int scif_vreadfrom(scif_epd_t epd, void *addr, size_t len, off_t roffset, int rma_flags); /** * scif_vwriteto() - Copy to a remote address space * @epd: endpoint descriptor * @addr: address from which to copy * @len: length of range to copy * @roffset: offset in remote registered address space to * which to copy * @rma_flags: transfer mode flags * * scif_vwriteto() copies len bytes from the local memory, starting at addr, to * the remote registered address space of the peer of endpoint epd, starting at * the offset roffset. * * The specified range [roffset, roffset + len - 1] must be within some * registered window or windows of the remote nodes. The range may intersect * multiple registered windows, but only if those windows are contiguous in the * registered address space. * * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using * programmed read/writes. Otherwise the data is copied using DMA. If rma_- * flags includes SCIF_RMA_SYNC, then scif_vwriteto() will return after the * transfer is complete. Otherwise, the transfer may be performed asynchron- * ously. The order in which any two asynchronous RMA operations complete * is non-deterministic. The synchronization functions, scif_fence_mark()/ * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to * the completion of asynchronous RMA operations on the same endpoint. * * The DMA transfer of individual bytes is not guaranteed to complete in * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last * cacheline or partial cacheline of the source range will become visible on * the destination node after all other transferred data in the source * range has become visible on the destination node. * * If rma_flags includes SCIF_RMA_USECACHE, then the physical pages which back * the specified local memory range may be remain in a pinned state even after * the specified transfer completes. This may reduce overhead if some or all of * the same virtual address range is referenced in a subsequent call of * scif_vreadfrom() or scif_vwriteto(). * * The optimal DMA performance will likely be realized if both * addr and offset are cacheline aligned (are a multiple of 64). Lower * performance will likely be realized if addr and offset are not cacheline * aligned but are separated by some multiple of 64. The lowest level of * performance is likely if addr and offset are not separated by a multiple of * 64. * * The rma_flags argument is formed by ORing together zero or more of the * following values. * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA * engine. * SCIF_RMA_USECACHE - allow registration caching * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the * transfer has completed. Passing this flag results in the * current implementation busy waiting and consuming CPU cycles * while the DMA transfer is in progress for best performance by * avoiding the interrupt latency. * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of * the source range becomes visible on the destination node * after all other transferred data in the source range has * become visible on the destination * * Return: * Upon successful completion, scif_vwriteto() returns 0; otherwise in user * mode -1 is returned and errno is set to indicate the error; in kernel mode * the negative of one of the following errors is returned. * * Errors: * EACCESS - Attempt to write to a read-only range * EBADF, ENOTTY - epd is not a valid endpoint descriptor * ECONNRESET - Connection reset by peer * EINVAL - rma_flags is invalid * ENODEV - The remote node is lost or existed, but is not currently in the * network since it may have crashed * ENOTCONN - The endpoint is not connected * ENXIO - Offsets in the range [roffset, roffset + len - 1] are invalid for the * registered address space of epd */ int scif_vwriteto(scif_epd_t epd, void *addr, size_t len, off_t roffset, int rma_flags); /** * scif_fence_mark() - Mark previously issued RMAs * @epd: endpoint descriptor * @flags: control flags * @mark: marked value returned as output. * * scif_fence_mark() returns after marking the current set of all uncompleted * RMAs initiated through the endpoint epd or the current set of all * uncompleted RMAs initiated through the peer of endpoint epd. The RMAs are * marked with a value returned at mark. The application may subsequently call * scif_fence_wait(), passing the value returned at mark, to await completion * of all RMAs so marked. * * The flags argument has exactly one of the following values. * SCIF_FENCE_INIT_SELF - RMA operations initiated through endpoint * epd are marked * SCIF_FENCE_INIT_PEER - RMA operations initiated through the peer * of endpoint epd are marked * * Return: * Upon successful completion, scif_fence_mark() returns 0; otherwise in user * mode -1 is returned and errno is set to indicate the error; in kernel mode * the negative of one of the following errors is returned. * * Errors: * EBADF, ENOTTY - epd is not a valid endpoint descriptor * ECONNRESET - Connection reset by peer * EINVAL - flags is invalid * ENODEV - The remote node is lost or existed, but is not currently in the * network since it may have crashed * ENOTCONN - The endpoint is not connected * ENOMEM - Insufficient kernel memory was available */ int scif_fence_mark(scif_epd_t epd, int flags, int *mark); /** * scif_fence_wait() - Wait for completion of marked RMAs * @epd: endpoint descriptor * @mark: mark request * * scif_fence_wait() returns after all RMAs marked with mark have completed. * The value passed in mark must have been obtained in a previous call to * scif_fence_mark(). * * Return: * Upon successful completion, scif_fence_wait() returns 0; otherwise in user * mode -1 is returned and errno is set to indicate the error; in kernel mode * the negative of one of the following errors is returned. * * Errors: * EBADF, ENOTTY - epd is not a valid endpoint descriptor * ECONNRESET - Connection reset by peer * ENODEV - The remote node is lost or existed, but is not currently in the * network since it may have crashed * ENOTCONN - The endpoint is not connected * ENOMEM - Insufficient kernel memory was available */ int scif_fence_wait(scif_epd_t epd, int mark); /** * scif_fence_signal() - Request a memory update on completion of RMAs * @epd: endpoint descriptor * @loff: local offset * @lval: local value to write to loffset * @roff: remote offset * @rval: remote value to write to roffset * @flags: flags * * scif_fence_signal() returns after marking the current set of all uncompleted * RMAs initiated through the endpoint epd or marking the current set of all * uncompleted RMAs initiated through the peer of endpoint epd. * * If flags includes SCIF_SIGNAL_LOCAL, then on completion of the RMAs in the * marked set, lval is written to memory at the address corresponding to offset * loff in the local registered address space of epd. loff must be within a * registered window. If flags includes SCIF_SIGNAL_REMOTE, then on completion * of the RMAs in the marked set, rval is written to memory at the address * corresponding to offset roff in the remote registered address space of epd. * roff must be within a remote registered window of the peer of epd. Note * that any specified offset must be DWORD (4 byte / 32 bit) aligned. * * The flags argument is formed by OR'ing together the following. * Exactly one of the following values. * SCIF_FENCE_INIT_SELF - RMA operations initiated through endpoint * epd are marked * SCIF_FENCE_INIT_PEER - RMA operations initiated through the peer * of endpoint epd are marked * One or more of the following values. * SCIF_SIGNAL_LOCAL - On completion of the marked set of RMAs, write lval to * memory at the address corresponding to offset loff in the local * registered address space of epd. * SCIF_SIGNAL_REMOTE - On completion of the marked set of RMAs, write rval to * memory at the address corresponding to offset roff in the remote * registered address space of epd. * * Return: * Upon successful completion, scif_fence_signal() returns 0; otherwise in * user mode -1 is returned and errno is set to indicate the error; in kernel * mode the negative of one of the following errors is returned. * * Errors: * EBADF, ENOTTY - epd is not a valid endpoint descriptor * ECONNRESET - Connection reset by peer * EINVAL - flags is invalid, or loff or roff are not DWORD aligned * ENODEV - The remote node is lost or existed, but is not currently in the * network since it may have crashed * ENOTCONN - The endpoint is not connected * ENXIO - loff is invalid for the registered address of epd, or roff is invalid * for the registered address space, of the peer of epd */ int scif_fence_signal(scif_epd_t epd, off_t loff, u64 lval, off_t roff, u64 rval, int flags); /** * scif_get_node_ids() - Return information about online nodes * @nodes: array in which to return online node IDs * @len: number of entries in the nodes array * @self: address to place the node ID of the local node * * scif_get_node_ids() fills in the nodes array with up to len node IDs of the * nodes in the SCIF network. If there is not enough space in nodes, as * indicated by the len parameter, only len node IDs are returned in nodes. The * return value of scif_get_node_ids() is the total number of nodes currently in * the SCIF network. By checking the return value against the len parameter, * the user may determine if enough space for nodes was allocated. * * The node ID of the local node is returned at self. * * Return: * Upon successful completion, scif_get_node_ids() returns the actual number of * online nodes in the SCIF network including 'self'; otherwise in user mode * -1 is returned and errno is set to indicate the error; in kernel mode no * errors are returned. */ int scif_get_node_ids(u16 *nodes, int len, u16 *self); /** * scif_pin_pages() - Pin a set of pages * @addr: Virtual address of range to pin * @len: Length of range to pin * @prot_flags: Page protection flags * @map_flags: Page classification flags * @pinned_pages: Handle to pinned pages * * scif_pin_pages() pins (locks in physical memory) the physical pages which * back the range of virtual address pages starting at addr and continuing for * len bytes. addr and len are constrained to be multiples of the page size. A * successful scif_pin_pages() call returns a handle to pinned_pages which may * be used in subsequent calls to scif_register_pinned_pages(). * * The pages will remain pinned as long as there is a reference against the * scif_pinned_pages_t value returned by scif_pin_pages() and until * scif_unpin_pages() is called, passing the scif_pinned_pages_t value. A * reference is added to a scif_pinned_pages_t value each time a window is * created by calling scif_register_pinned_pages() and passing the * scif_pinned_pages_t value. A reference is removed from a * scif_pinned_pages_t value each time such a window is deleted. * * Subsequent operations which change the memory pages to which virtual * addresses are mapped (such as mmap(), munmap()) have no effect on the * scif_pinned_pages_t value or windows created against it. * * If the process will fork(), it is recommended that the registered * virtual address range be marked with MADV_DONTFORK. Doing so will prevent * problems due to copy-on-write semantics. * * The prot_flags argument is formed by OR'ing together one or more of the * following values. * SCIF_PROT_READ - allow read operations against the pages * SCIF_PROT_WRITE - allow write operations against the pages * The map_flags argument can be set as SCIF_MAP_KERNEL to interpret addr as a * kernel space address. By default, addr is interpreted as a user space * address. * * Return: * Upon successful completion, scif_pin_pages() returns 0; otherwise the * negative of one of the following errors is returned. * * Errors: * EINVAL - prot_flags is invalid, map_flags is invalid, or offset is negative * ENOMEM - Not enough space */ int scif_pin_pages(void *addr, size_t len, int prot_flags, int map_flags, scif_pinned_pages_t *pinned_pages); /** * scif_unpin_pages() - Unpin a set of pages * @pinned_pages: Handle to pinned pages to be unpinned * * scif_unpin_pages() prevents scif_register_pinned_pages() from registering new * windows against pinned_pages. The physical pages represented by pinned_pages * will remain pinned until all windows previously registered against * pinned_pages are deleted (the window is scif_unregister()'d and all * references to the window are removed (see scif_unregister()). * * pinned_pages must have been obtain from a previous call to scif_pin_pages(). * After calling scif_unpin_pages(), it is an error to pass pinned_pages to * scif_register_pinned_pages(). * * Return: * Upon successful completion, scif_unpin_pages() returns 0; otherwise the * negative of one of the following errors is returned. * * Errors: * EINVAL - pinned_pages is not valid */ int scif_unpin_pages(scif_pinned_pages_t pinned_pages); /** * scif_register_pinned_pages() - Mark a memory region for remote access. * @epd: endpoint descriptor * @pinned_pages: Handle to pinned pages * @offset: Registered address space offset * @map_flags: Flags which control where pages are mapped * * The scif_register_pinned_pages() function opens a window, a range of whole * pages of the registered address space of the endpoint epd, starting at * offset po. The value of po, further described below, is a function of the * parameters offset and pinned_pages, and the value of map_flags. Each page of * the window represents a corresponding physical memory page of the range * represented by pinned_pages; the length of the window is the same as the * length of range represented by pinned_pages. A successful * scif_register_pinned_pages() call returns po as the return value. * * When SCIF_MAP_FIXED is set in the map_flags argument, po will be offset * exactly, and offset is constrained to be a multiple of the page size. The * mapping established by scif_register_pinned_pages() will not replace any * existing registration; an error is returned if any page of the new window * would intersect an existing window. * * When SCIF_MAP_FIXED is not set, the implementation uses offset in an * implementation-defined manner to arrive at po. The po so chosen will be an * area of the registered address space that the implementation deems suitable * for a mapping of the required size. An offset value of 0 is interpreted as * granting the implementation complete freedom in selecting po, subject to * constraints described below. A non-zero value of offset is taken to be a * suggestion of an offset near which the mapping should be placed. When the * implementation selects a value for po, it does not replace any extant * window. In all cases, po will be a multiple of the page size. * * The physical pages which are so represented by a window are available for * access in calls to scif_get_pages(), scif_readfrom(), scif_writeto(), * scif_vreadfrom(), and scif_vwriteto(). While a window is registered, the * physical pages represented by the window will not be reused by the memory * subsystem for any other purpose. Note that the same physical page may be * represented by multiple windows. * * Windows created by scif_register_pinned_pages() are unregistered by * scif_unregister(). * * The map_flags argument can be set to SCIF_MAP_FIXED which interprets a * fixed offset. * * Return: * Upon successful completion, scif_register_pinned_pages() returns the offset * at which the mapping was placed (po); otherwise the negative of one of the * following errors is returned. * * Errors: * EADDRINUSE - SCIF_MAP_FIXED is set in map_flags and pages in the new window * would intersect an existing window * EAGAIN - The mapping could not be performed due to lack of resources * ECONNRESET - Connection reset by peer * EINVAL - map_flags is invalid, or SCIF_MAP_FIXED is set in map_flags, and * offset is not a multiple of the page size, or offset is negative * ENODEV - The remote node is lost or existed, but is not currently in the * network since it may have crashed * ENOMEM - Not enough space * ENOTCONN - The endpoint is not connected */ off_t scif_register_pinned_pages(scif_epd_t epd, scif_pinned_pages_t pinned_pages, off_t offset, int map_flags); /** * scif_get_pages() - Add references to remote registered pages * @epd: endpoint descriptor * @offset: remote registered offset * @len: length of range of pages * @pages: returned scif_range structure * * scif_get_pages() returns the addresses of the physical pages represented by * those pages of the registered address space of the peer of epd, starting at * offset and continuing for len bytes. offset and len are constrained to be * multiples of the page size. * * All of the pages in the specified range [offset, offset + len - 1] must be * within a single window of the registered address space of the peer of epd. * * The addresses are returned as a virtually contiguous array pointed to by the * phys_addr component of the scif_range structure whose address is returned in * pages. The nr_pages component of scif_range is the length of the array. The * prot_flags component of scif_range holds the protection flag value passed * when the pages were registered. * * Each physical page whose address is returned by scif_get_pages() remains * available and will not be released for reuse until the scif_range structure * is returned in a call to scif_put_pages(). The scif_range structure returned * by scif_get_pages() must be unmodified. * * It is an error to call scif_close() on an endpoint on which a scif_range * structure of that endpoint has not been returned to scif_put_pages(). * * Return: * Upon successful completion, scif_get_pages() returns 0; otherwise the * negative of one of the following errors is returned. * Errors: * ECONNRESET - Connection reset by peer. * EINVAL - offset is not a multiple of the page size, or offset is negative, or * len is not a multiple of the page size * ENODEV - The remote node is lost or existed, but is not currently in the * network since it may have crashed * ENOTCONN - The endpoint is not connected * ENXIO - Offsets in the range [offset, offset + len - 1] are invalid * for the registered address space of the peer epd */ int scif_get_pages(scif_epd_t epd, off_t offset, size_t len, struct scif_range **pages); /** * scif_put_pages() - Remove references from remote registered pages * @pages: pages to be returned * * scif_put_pages() releases a scif_range structure previously obtained by * calling scif_get_pages(). The physical pages represented by pages may * be reused when the window which represented those pages is unregistered. * Therefore, those pages must not be accessed after calling scif_put_pages(). * * Return: * Upon successful completion, scif_put_pages() returns 0; otherwise the * negative of one of the following errors is returned. * Errors: * EINVAL - pages does not point to a valid scif_range structure, or * the scif_range structure pointed to by pages was already returned * ENODEV - The remote node is lost or existed, but is not currently in the * network since it may have crashed * ENOTCONN - The endpoint is not connected */ int scif_put_pages(struct scif_range *pages); /** * scif_poll() - Wait for some event on an endpoint * @epds: Array of endpoint descriptors * @nepds: Length of epds * @timeout: Upper limit on time for which scif_poll() will block * * scif_poll() waits for one of a set of endpoints to become ready to perform * an I/O operation. * * The epds argument specifies the endpoint descriptors to be examined and the * events of interest for each endpoint descriptor. epds is a pointer to an * array with one member for each open endpoint descriptor of interest. * * The number of items in the epds array is specified in nepds. The epd field * of scif_pollepd is an endpoint descriptor of an open endpoint. The field * events is a bitmask specifying the events which the application is * interested in. The field revents is an output parameter, filled by the * kernel with the events that actually occurred. The bits returned in revents * can include any of those specified in events, or one of the values EPOLLERR, * EPOLLHUP, or EPOLLNVAL. (These three bits are meaningless in the events * field, and will be set in the revents field whenever the corresponding * condition is true.) * * If none of the events requested (and no error) has occurred for any of the * endpoint descriptors, then scif_poll() blocks until one of the events occurs. * * The timeout argument specifies an upper limit on the time for which * scif_poll() will block, in milliseconds. Specifying a negative value in * timeout means an infinite timeout. * * The following bits may be set in events and returned in revents. * EPOLLIN - Data may be received without blocking. For a connected * endpoint, this means that scif_recv() may be called without blocking. For a * listening endpoint, this means that scif_accept() may be called without * blocking. * EPOLLOUT - Data may be sent without blocking. For a connected endpoint, this * means that scif_send() may be called without blocking. EPOLLOUT may also be * used to block waiting for a non-blocking connect to complete. This bit value * has no meaning for a listening endpoint and is ignored if specified. * * The following bits are only returned in revents, and are ignored if set in * events. * EPOLLERR - An error occurred on the endpoint * EPOLLHUP - The connection to the peer endpoint was disconnected * EPOLLNVAL - The specified endpoint descriptor is invalid. * * Return: * Upon successful completion, scif_poll() returns a non-negative value. A * positive value indicates the total number of endpoint descriptors that have * been selected (that is, endpoint descriptors for which the revents member is * non-zero). A value of 0 indicates that the call timed out and no endpoint * descriptors have been selected. Otherwise in user mode -1 is returned and * errno is set to indicate the error; in kernel mode the negative of one of * the following errors is returned. * * Errors: * EINTR - A signal occurred before any requested event * EINVAL - The nepds argument is greater than {OPEN_MAX} * ENOMEM - There was no space to allocate file descriptor tables */ int scif_poll(struct scif_pollepd *epds, unsigned int nepds, long timeout); /** * scif_client_register() - Register a SCIF client * @client: client to be registered * * scif_client_register() registers a SCIF client. The probe() method * of the client is called when SCIF peer devices come online and the * remove() method is called when the peer devices disappear. * * Return: * Upon successful completion, scif_client_register() returns a non-negative * value. Otherwise the return value is the same as subsys_interface_register() * in the kernel. */ int scif_client_register(struct scif_client *client); /** * scif_client_unregister() - Unregister a SCIF client * @client: client to be unregistered * * scif_client_unregister() unregisters a SCIF client. * * Return: * None */ void scif_client_unregister(struct scif_client *client); #endif /* __SCIF_H__ */ fs.h 0000644 00000354761 14722070374 0005353 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_FS_H #define _LINUX_FS_H #include <linux/linkage.h> #include <linux/wait_bit.h> #include <linux/kdev_t.h> #include <linux/dcache.h> #include <linux/path.h> #include <linux/stat.h> #include <linux/cache.h> #include <linux/list.h> #include <linux/list_lru.h> #include <linux/llist.h> #include <linux/radix-tree.h> #include <linux/xarray.h> #include <linux/rbtree.h> #include <linux/init.h> #include <linux/pid.h> #include <linux/bug.h> #include <linux/mutex.h> #include <linux/rwsem.h> #include <linux/mm_types.h> #include <linux/capability.h> #include <linux/semaphore.h> #include <linux/fcntl.h> #include <linux/fiemap.h> #include <linux/rculist_bl.h> #include <linux/atomic.h> #include <linux/shrinker.h> #include <linux/migrate_mode.h> #include <linux/uidgid.h> #include <linux/lockdep.h> #include <linux/percpu-rwsem.h> #include <linux/workqueue.h> #include <linux/delayed_call.h> #include <linux/uuid.h> #include <linux/errseq.h> #include <linux/ioprio.h> #include <linux/fs_types.h> #include <linux/build_bug.h> #include <linux/stddef.h> #include <asm/byteorder.h> #include <uapi/linux/fs.h> struct backing_dev_info; struct bdi_writeback; struct bio; struct export_operations; struct hd_geometry; struct iovec; struct kiocb; struct kobject; struct pipe_inode_info; struct poll_table_struct; struct kstatfs; struct vm_area_struct; struct vfsmount; struct cred; struct swap_info_struct; struct seq_file; struct workqueue_struct; struct iov_iter; struct fscrypt_info; struct fscrypt_operations; struct fsverity_info; struct fsverity_operations; struct fs_context; struct fs_parameter_description; extern void __init inode_init(void); extern void __init inode_init_early(void); extern void __init files_init(void); extern void __init files_maxfiles_init(void); extern struct files_stat_struct files_stat; extern unsigned long get_max_files(void); extern unsigned int sysctl_nr_open; extern struct inodes_stat_t inodes_stat; extern int leases_enable, lease_break_time; extern int sysctl_protected_symlinks; extern int sysctl_protected_hardlinks; extern int sysctl_protected_fifos; extern int sysctl_protected_regular; typedef __kernel_rwf_t rwf_t; struct buffer_head; typedef int (get_block_t)(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create); typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, ssize_t bytes, void *private); #define MAY_EXEC 0x00000001 #define MAY_WRITE 0x00000002 #define MAY_READ 0x00000004 #define MAY_APPEND 0x00000008 #define MAY_ACCESS 0x00000010 #define MAY_OPEN 0x00000020 #define MAY_CHDIR 0x00000040 /* called from RCU mode, don't block */ #define MAY_NOT_BLOCK 0x00000080 /* * flags in file.f_mode. Note that FMODE_READ and FMODE_WRITE must correspond * to O_WRONLY and O_RDWR via the strange trick in do_dentry_open() */ /* file is open for reading */ #define FMODE_READ ((__force fmode_t)0x1) /* file is open for writing */ #define FMODE_WRITE ((__force fmode_t)0x2) /* file is seekable */ #define FMODE_LSEEK ((__force fmode_t)0x4) /* file can be accessed using pread */ #define FMODE_PREAD ((__force fmode_t)0x8) /* file can be accessed using pwrite */ #define FMODE_PWRITE ((__force fmode_t)0x10) /* File is opened for execution with sys_execve / sys_uselib */ #define FMODE_EXEC ((__force fmode_t)0x20) /* File is opened with O_NDELAY (only set for block devices) */ #define FMODE_NDELAY ((__force fmode_t)0x40) /* File is opened with O_EXCL (only set for block devices) */ #define FMODE_EXCL ((__force fmode_t)0x80) /* File is opened using open(.., 3, ..) and is writeable only for ioctls (specialy hack for floppy.c) */ #define FMODE_WRITE_IOCTL ((__force fmode_t)0x100) /* 32bit hashes as llseek() offset (for directories) */ #define FMODE_32BITHASH ((__force fmode_t)0x200) /* 64bit hashes as llseek() offset (for directories) */ #define FMODE_64BITHASH ((__force fmode_t)0x400) /* * Don't update ctime and mtime. * * Currently a special hack for the XFS open_by_handle ioctl, but we'll * hopefully graduate it to a proper O_CMTIME flag supported by open(2) soon. */ #define FMODE_NOCMTIME ((__force fmode_t)0x800) /* Expect random access pattern */ #define FMODE_RANDOM ((__force fmode_t)0x1000) /* File is huge (eg. /dev/kmem): treat loff_t as unsigned */ #define FMODE_UNSIGNED_OFFSET ((__force fmode_t)0x2000) /* File is opened with O_PATH; almost nothing can be done with it */ #define FMODE_PATH ((__force fmode_t)0x4000) /* File needs atomic accesses to f_pos */ #define FMODE_ATOMIC_POS ((__force fmode_t)0x8000) /* Write access to underlying fs */ #define FMODE_WRITER ((__force fmode_t)0x10000) /* Has read method(s) */ #define FMODE_CAN_READ ((__force fmode_t)0x20000) /* Has write method(s) */ #define FMODE_CAN_WRITE ((__force fmode_t)0x40000) #define FMODE_OPENED ((__force fmode_t)0x80000) #define FMODE_CREATED ((__force fmode_t)0x100000) /* File is stream-like */ #define FMODE_STREAM ((__force fmode_t)0x200000) /* File was opened by fanotify and shouldn't generate fanotify events */ #define FMODE_NONOTIFY ((__force fmode_t)0x4000000) /* File is capable of returning -EAGAIN if I/O will block */ #define FMODE_NOWAIT ((__force fmode_t)0x8000000) /* File represents mount that needs unmounting */ #define FMODE_NEED_UNMOUNT ((__force fmode_t)0x10000000) /* File does not contribute to nr_files count */ #define FMODE_NOACCOUNT ((__force fmode_t)0x20000000) /* * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector * that indicates that they should check the contents of the iovec are * valid, but not check the memory that the iovec elements * points too. */ #define CHECK_IOVEC_ONLY -1 /* * Attribute flags. These should be or-ed together to figure out what * has been changed! */ #define ATTR_MODE (1 << 0) #define ATTR_UID (1 << 1) #define ATTR_GID (1 << 2) #define ATTR_SIZE (1 << 3) #define ATTR_ATIME (1 << 4) #define ATTR_MTIME (1 << 5) #define ATTR_CTIME (1 << 6) #define ATTR_ATIME_SET (1 << 7) #define ATTR_MTIME_SET (1 << 8) #define ATTR_FORCE (1 << 9) /* Not a change, but a change it */ #define ATTR_KILL_SUID (1 << 11) #define ATTR_KILL_SGID (1 << 12) #define ATTR_FILE (1 << 13) #define ATTR_KILL_PRIV (1 << 14) #define ATTR_OPEN (1 << 15) /* Truncating from open(O_TRUNC) */ #define ATTR_TIMES_SET (1 << 16) #define ATTR_TOUCH (1 << 17) /* * Whiteout is represented by a char device. The following constants define the * mode and device number to use. */ #define WHITEOUT_MODE 0 #define WHITEOUT_DEV 0 /* * This is the Inode Attributes structure, used for notify_change(). It * uses the above definitions as flags, to know which values have changed. * Also, in this manner, a Filesystem can look at only the values it cares * about. Basically, these are the attributes that the VFS layer can * request to change from the FS layer. * * Derek Atkins <warlord@MIT.EDU> 94-10-20 */ struct iattr { unsigned int ia_valid; umode_t ia_mode; kuid_t ia_uid; kgid_t ia_gid; loff_t ia_size; struct timespec64 ia_atime; struct timespec64 ia_mtime; struct timespec64 ia_ctime; /* * Not an attribute, but an auxiliary info for filesystems wanting to * implement an ftruncate() like method. NOTE: filesystem should * check for (ia_valid & ATTR_FILE), and not for (ia_file != NULL). */ struct file *ia_file; }; /* * Includes for diskquotas. */ #include <linux/quota.h> /* * Maximum number of layers of fs stack. Needs to be limited to * prevent kernel stack overflow */ #define FILESYSTEM_MAX_STACK_DEPTH 2 /** * enum positive_aop_returns - aop return codes with specific semantics * * @AOP_WRITEPAGE_ACTIVATE: Informs the caller that page writeback has * completed, that the page is still locked, and * should be considered active. The VM uses this hint * to return the page to the active list -- it won't * be a candidate for writeback again in the near * future. Other callers must be careful to unlock * the page if they get this return. Returned by * writepage(); * * @AOP_TRUNCATED_PAGE: The AOP method that was handed a locked page has * unlocked it and the page might have been truncated. * The caller should back up to acquiring a new page and * trying again. The aop will be taking reasonable * precautions not to livelock. If the caller held a page * reference, it should drop it before retrying. Returned * by readpage(). * * address_space_operation functions return these large constants to indicate * special semantics to the caller. These are much larger than the bytes in a * page to allow for functions that return the number of bytes operated on in a * given page. */ enum positive_aop_returns { AOP_WRITEPAGE_ACTIVATE = 0x80000, AOP_TRUNCATED_PAGE = 0x80001, }; #define AOP_FLAG_CONT_EXPAND 0x0001 /* called from cont_expand */ #define AOP_FLAG_NOFS 0x0002 /* used by filesystem to direct * helper code (eg buffer layer) * to clear GFP_FS from alloc */ /* * oh the beauties of C type declarations. */ struct page; struct address_space; struct writeback_control; /* * Write life time hint values. * Stored in struct inode as u8. */ enum rw_hint { WRITE_LIFE_NOT_SET = 0, WRITE_LIFE_NONE = RWH_WRITE_LIFE_NONE, WRITE_LIFE_SHORT = RWH_WRITE_LIFE_SHORT, WRITE_LIFE_MEDIUM = RWH_WRITE_LIFE_MEDIUM, WRITE_LIFE_LONG = RWH_WRITE_LIFE_LONG, WRITE_LIFE_EXTREME = RWH_WRITE_LIFE_EXTREME, }; #define IOCB_EVENTFD (1 << 0) #define IOCB_APPEND (1 << 1) #define IOCB_DIRECT (1 << 2) #define IOCB_HIPRI (1 << 3) #define IOCB_DSYNC (1 << 4) #define IOCB_SYNC (1 << 5) #define IOCB_WRITE (1 << 6) #define IOCB_NOWAIT (1 << 7) /* kiocb is a read or write operation submitted by fs/aio.c. */ #define IOCB_AIO_RW (1 << 23) struct kiocb { struct file *ki_filp; /* The 'ki_filp' pointer is shared in a union for aio */ randomized_struct_fields_start loff_t ki_pos; void (*ki_complete)(struct kiocb *iocb, long ret, long ret2); void *private; int ki_flags; u16 ki_hint; u16 ki_ioprio; /* See linux/ioprio.h */ unsigned int ki_cookie; /* for ->iopoll */ randomized_struct_fields_end }; static inline bool is_sync_kiocb(struct kiocb *kiocb) { return kiocb->ki_complete == NULL; } /* * "descriptor" for what we're up to with a read. * This allows us to use the same read code yet * have multiple different users of the data that * we read from a file. * * The simplest case just copies the data to user * mode. */ typedef struct { size_t written; size_t count; union { char __user *buf; void *data; } arg; int error; } read_descriptor_t; typedef int (*read_actor_t)(read_descriptor_t *, struct page *, unsigned long, unsigned long); struct address_space_operations { int (*writepage)(struct page *page, struct writeback_control *wbc); int (*readpage)(struct file *, struct page *); /* Write back some dirty pages from this mapping. */ int (*writepages)(struct address_space *, struct writeback_control *); /* Set a page dirty. Return true if this dirtied it */ int (*set_page_dirty)(struct page *page); /* * Reads in the requested pages. Unlike ->readpage(), this is * PURELY used for read-ahead!. */ int (*readpages)(struct file *filp, struct address_space *mapping, struct list_head *pages, unsigned nr_pages); int (*write_begin)(struct file *, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata); int (*write_end)(struct file *, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata); /* Unfortunately this kludge is needed for FIBMAP. Don't use it */ sector_t (*bmap)(struct address_space *, sector_t); void (*invalidatepage) (struct page *, unsigned int, unsigned int); int (*releasepage) (struct page *, gfp_t); void (*freepage)(struct page *); ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter); /* * migrate the contents of a page to the specified target. If * migrate_mode is MIGRATE_ASYNC, it must not block. */ int (*migratepage) (struct address_space *, struct page *, struct page *, enum migrate_mode); bool (*isolate_page)(struct page *, isolate_mode_t); void (*putback_page)(struct page *); int (*launder_page) (struct page *); int (*is_partially_uptodate) (struct page *, unsigned long, unsigned long); void (*is_dirty_writeback) (struct page *, bool *, bool *); int (*error_remove_page)(struct address_space *, struct page *); /* swapfile support */ int (*swap_activate)(struct swap_info_struct *sis, struct file *file, sector_t *span); void (*swap_deactivate)(struct file *file); }; extern const struct address_space_operations empty_aops; /* * pagecache_write_begin/pagecache_write_end must be used by general code * to write into the pagecache. */ int pagecache_write_begin(struct file *, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata); int pagecache_write_end(struct file *, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata); /** * struct address_space - Contents of a cacheable, mappable object. * @host: Owner, either the inode or the block_device. * @i_pages: Cached pages. * @gfp_mask: Memory allocation flags to use for allocating pages. * @i_mmap_writable: Number of VM_SHARED mappings. * @nr_thps: Number of THPs in the pagecache (non-shmem only). * @i_mmap: Tree of private and shared mappings. * @i_mmap_rwsem: Protects @i_mmap and @i_mmap_writable. * @nrpages: Number of page entries, protected by the i_pages lock. * @nrexceptional: Shadow or DAX entries, protected by the i_pages lock. * @writeback_index: Writeback starts here. * @a_ops: Methods. * @flags: Error bits and flags (AS_*). * @wb_err: The most recent error which has occurred. * @private_lock: For use by the owner of the address_space. * @private_list: For use by the owner of the address_space. * @private_data: For use by the owner of the address_space. */ struct address_space { struct inode *host; struct xarray i_pages; gfp_t gfp_mask; atomic_t i_mmap_writable; #ifdef CONFIG_READ_ONLY_THP_FOR_FS /* number of thp, only for non-shmem files */ atomic_t nr_thps; #endif struct rb_root_cached i_mmap; struct rw_semaphore i_mmap_rwsem; unsigned long nrpages; unsigned long nrexceptional; pgoff_t writeback_index; const struct address_space_operations *a_ops; unsigned long flags; errseq_t wb_err; spinlock_t private_lock; struct list_head private_list; void *private_data; } __attribute__((aligned(sizeof(long)))) __randomize_layout; /* * On most architectures that alignment is already the case; but * must be enforced here for CRIS, to let the least significant bit * of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON. */ struct request_queue; struct block_device { dev_t bd_dev; /* not a kdev_t - it's a search key */ int bd_openers; struct inode * bd_inode; /* will die */ struct super_block * bd_super; struct mutex bd_mutex; /* open/close mutex */ void * bd_claiming; void * bd_holder; int bd_holders; bool bd_write_holder; #ifdef CONFIG_SYSFS struct list_head bd_holder_disks; #endif struct block_device * bd_contains; unsigned bd_block_size; u8 bd_partno; struct hd_struct * bd_part; /* number of times partitions within this device have been opened. */ unsigned bd_part_count; int bd_invalidated; struct gendisk * bd_disk; struct request_queue * bd_queue; struct backing_dev_info *bd_bdi; struct list_head bd_list; /* * Private data. You must have bd_claim'ed the block_device * to use this. NOTE: bd_claim allows an owner to claim * the same device multiple times, the owner must take special * care to not mess up bd_private for that case. */ unsigned long bd_private; /* The counter of freeze processes */ int bd_fsfreeze_count; /* Mutex for freeze */ struct mutex bd_fsfreeze_mutex; } __randomize_layout; /* XArray tags, for tagging dirty and writeback pages in the pagecache. */ #define PAGECACHE_TAG_DIRTY XA_MARK_0 #define PAGECACHE_TAG_WRITEBACK XA_MARK_1 #define PAGECACHE_TAG_TOWRITE XA_MARK_2 /* * Returns true if any of the pages in the mapping are marked with the tag. */ static inline bool mapping_tagged(struct address_space *mapping, xa_mark_t tag) { return xa_marked(&mapping->i_pages, tag); } static inline void i_mmap_lock_write(struct address_space *mapping) { down_write(&mapping->i_mmap_rwsem); } static inline void i_mmap_unlock_write(struct address_space *mapping) { up_write(&mapping->i_mmap_rwsem); } static inline void i_mmap_lock_read(struct address_space *mapping) { down_read(&mapping->i_mmap_rwsem); } static inline void i_mmap_unlock_read(struct address_space *mapping) { up_read(&mapping->i_mmap_rwsem); } /* * Might pages of this file be mapped into userspace? */ static inline int mapping_mapped(struct address_space *mapping) { return !RB_EMPTY_ROOT(&mapping->i_mmap.rb_root); } /* * Might pages of this file have been modified in userspace? * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap_pgoff * marks vma as VM_SHARED if it is shared, and the file was opened for * writing i.e. vma may be mprotected writable even if now readonly. * * If i_mmap_writable is negative, no new writable mappings are allowed. You * can only deny writable mappings, if none exists right now. */ static inline int mapping_writably_mapped(struct address_space *mapping) { return atomic_read(&mapping->i_mmap_writable) > 0; } static inline int mapping_map_writable(struct address_space *mapping) { return atomic_inc_unless_negative(&mapping->i_mmap_writable) ? 0 : -EPERM; } static inline void mapping_unmap_writable(struct address_space *mapping) { atomic_dec(&mapping->i_mmap_writable); } static inline int mapping_deny_writable(struct address_space *mapping) { return atomic_dec_unless_positive(&mapping->i_mmap_writable) ? 0 : -EBUSY; } static inline void mapping_allow_writable(struct address_space *mapping) { atomic_inc(&mapping->i_mmap_writable); } /* * Use sequence counter to get consistent i_size on 32-bit processors. */ #if BITS_PER_LONG==32 && defined(CONFIG_SMP) #include <linux/seqlock.h> #define __NEED_I_SIZE_ORDERED #define i_size_ordered_init(inode) seqcount_init(&inode->i_size_seqcount) #else #define i_size_ordered_init(inode) do { } while (0) #endif struct posix_acl; #define ACL_NOT_CACHED ((void *)(-1)) #define ACL_DONT_CACHE ((void *)(-3)) static inline struct posix_acl * uncached_acl_sentinel(struct task_struct *task) { return (void *)task + 1; } static inline bool is_uncached_acl(struct posix_acl *acl) { return (long)acl & 1; } #define IOP_FASTPERM 0x0001 #define IOP_LOOKUP 0x0002 #define IOP_NOFOLLOW 0x0004 #define IOP_XATTR 0x0008 #define IOP_DEFAULT_READLINK 0x0010 struct fsnotify_mark_connector; /* * Keep mostly read-only and often accessed (especially for * the RCU path lookup and 'stat' data) fields at the beginning * of the 'struct inode' */ struct inode { umode_t i_mode; unsigned short i_opflags; kuid_t i_uid; kgid_t i_gid; unsigned int i_flags; #ifdef CONFIG_FS_POSIX_ACL struct posix_acl *i_acl; struct posix_acl *i_default_acl; #endif const struct inode_operations *i_op; struct super_block *i_sb; struct address_space *i_mapping; #ifdef CONFIG_SECURITY void *i_security; #endif /* Stat data, not accessed from path walking */ unsigned long i_ino; /* * Filesystems may only read i_nlink directly. They shall use the * following functions for modification: * * (set|clear|inc|drop)_nlink * inode_(inc|dec)_link_count */ union { const unsigned int i_nlink; unsigned int __i_nlink; }; dev_t i_rdev; loff_t i_size; struct timespec64 i_atime; struct timespec64 i_mtime; struct timespec64 i_ctime; spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ unsigned short i_bytes; u8 i_blkbits; u8 i_write_hint; blkcnt_t i_blocks; #ifdef __NEED_I_SIZE_ORDERED seqcount_t i_size_seqcount; #endif /* Misc */ unsigned long i_state; struct rw_semaphore i_rwsem; unsigned long dirtied_when; /* jiffies of first dirtying */ unsigned long dirtied_time_when; struct hlist_node i_hash; struct list_head i_io_list; /* backing dev IO list */ #ifdef CONFIG_CGROUP_WRITEBACK struct bdi_writeback *i_wb; /* the associated cgroup wb */ /* foreign inode detection, see wbc_detach_inode() */ int i_wb_frn_winner; u16 i_wb_frn_avg_time; u16 i_wb_frn_history; #endif struct list_head i_lru; /* inode LRU list */ struct list_head i_sb_list; struct list_head i_wb_list; /* backing dev writeback list */ union { struct hlist_head i_dentry; struct rcu_head i_rcu; }; atomic64_t i_version; atomic64_t i_sequence; /* see futex */ atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; #if defined(CONFIG_IMA) || defined(CONFIG_FILE_LOCKING) atomic_t i_readcount; /* struct files open RO */ #endif union { const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ void (*free_inode)(struct inode *); }; struct file_lock_context *i_flctx; struct address_space i_data; struct list_head i_devices; union { struct pipe_inode_info *i_pipe; struct block_device *i_bdev; struct cdev *i_cdev; char *i_link; unsigned i_dir_seq; }; __u32 i_generation; #ifdef CONFIG_FSNOTIFY __u32 i_fsnotify_mask; /* all events this inode cares about */ struct fsnotify_mark_connector __rcu *i_fsnotify_marks; #endif #ifdef CONFIG_FS_ENCRYPTION struct fscrypt_info *i_crypt_info; #endif #ifdef CONFIG_FS_VERITY struct fsverity_info *i_verity_info; #endif void *i_private; /* fs or device private pointer */ } __randomize_layout; struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode); static inline unsigned int i_blocksize(const struct inode *node) { return (1 << node->i_blkbits); } static inline int inode_unhashed(struct inode *inode) { return hlist_unhashed(&inode->i_hash); } /* * __mark_inode_dirty expects inodes to be hashed. Since we don't * want special inodes in the fileset inode space, we make them * appear hashed, but do not put on any lists. hlist_del() * will work fine and require no locking. */ static inline void inode_fake_hash(struct inode *inode) { hlist_add_fake(&inode->i_hash); } /* * inode->i_mutex nesting subclasses for the lock validator: * * 0: the object of the current VFS operation * 1: parent * 2: child/target * 3: xattr * 4: second non-directory * 5: second parent (when locking independent directories in rename) * * I_MUTEX_NONDIR2 is for certain operations (such as rename) which lock two * non-directories at once. * * The locking order between these classes is * parent[2] -> child -> grandchild -> normal -> xattr -> second non-directory */ enum inode_i_mutex_lock_class { I_MUTEX_NORMAL, I_MUTEX_PARENT, I_MUTEX_CHILD, I_MUTEX_XATTR, I_MUTEX_NONDIR2, I_MUTEX_PARENT2, }; static inline void inode_lock(struct inode *inode) { down_write(&inode->i_rwsem); } static inline void inode_unlock(struct inode *inode) { up_write(&inode->i_rwsem); } static inline void inode_lock_shared(struct inode *inode) { down_read(&inode->i_rwsem); } static inline void inode_unlock_shared(struct inode *inode) { up_read(&inode->i_rwsem); } static inline int inode_trylock(struct inode *inode) { return down_write_trylock(&inode->i_rwsem); } static inline int inode_trylock_shared(struct inode *inode) { return down_read_trylock(&inode->i_rwsem); } static inline int inode_is_locked(struct inode *inode) { return rwsem_is_locked(&inode->i_rwsem); } static inline void inode_lock_nested(struct inode *inode, unsigned subclass) { down_write_nested(&inode->i_rwsem, subclass); } static inline void inode_lock_shared_nested(struct inode *inode, unsigned subclass) { down_read_nested(&inode->i_rwsem, subclass); } void lock_two_nondirectories(struct inode *, struct inode*); void unlock_two_nondirectories(struct inode *, struct inode*); /* * NOTE: in a 32bit arch with a preemptable kernel and * an UP compile the i_size_read/write must be atomic * with respect to the local cpu (unlike with preempt disabled), * but they don't need to be atomic with respect to other cpus like in * true SMP (so they need either to either locally disable irq around * the read or for example on x86 they can be still implemented as a * cmpxchg8b without the need of the lock prefix). For SMP compiles * and 64bit archs it makes no difference if preempt is enabled or not. */ static inline loff_t i_size_read(const struct inode *inode) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) loff_t i_size; unsigned int seq; do { seq = read_seqcount_begin(&inode->i_size_seqcount); i_size = inode->i_size; } while (read_seqcount_retry(&inode->i_size_seqcount, seq)); return i_size; #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT) loff_t i_size; preempt_disable(); i_size = inode->i_size; preempt_enable(); return i_size; #else return inode->i_size; #endif } /* * NOTE: unlike i_size_read(), i_size_write() does need locking around it * (normally i_mutex), otherwise on 32bit/SMP an update of i_size_seqcount * can be lost, resulting in subsequent i_size_read() calls spinning forever. */ static inline void i_size_write(struct inode *inode, loff_t i_size) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) preempt_disable(); write_seqcount_begin(&inode->i_size_seqcount); inode->i_size = i_size; write_seqcount_end(&inode->i_size_seqcount); preempt_enable(); #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT) preempt_disable(); inode->i_size = i_size; preempt_enable(); #else inode->i_size = i_size; #endif } static inline unsigned iminor(const struct inode *inode) { return MINOR(inode->i_rdev); } static inline unsigned imajor(const struct inode *inode) { return MAJOR(inode->i_rdev); } extern struct block_device *I_BDEV(struct inode *inode); struct fown_struct { rwlock_t lock; /* protects pid, uid, euid fields */ struct pid *pid; /* pid or -pgrp where SIGIO should be sent */ enum pid_type pid_type; /* Kind of process group SIGIO should be sent to */ kuid_t uid, euid; /* uid/euid of process setting the owner */ int signum; /* posix.1b rt signal to be delivered on IO */ }; /* * Track a single file's readahead state */ struct file_ra_state { pgoff_t start; /* where readahead started */ unsigned int size; /* # of readahead pages */ unsigned int async_size; /* do asynchronous readahead when there are only # of pages ahead */ unsigned int ra_pages; /* Maximum readahead window */ unsigned int mmap_miss; /* Cache miss stat for mmap accesses */ loff_t prev_pos; /* Cache last read() position */ }; /* * Check if @index falls in the readahead windows. */ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index) { return (index >= ra->start && index < ra->start + ra->size); } struct file { union { struct llist_node fu_llist; struct rcu_head fu_rcuhead; } f_u; struct path f_path; struct inode *f_inode; /* cached value */ const struct file_operations *f_op; /* * Protects f_ep_links, f_flags. * Must not be taken from IRQ context. */ spinlock_t f_lock; enum rw_hint f_write_hint; atomic_long_t f_count; unsigned int f_flags; fmode_t f_mode; struct mutex f_pos_lock; loff_t f_pos; struct fown_struct f_owner; const struct cred *f_cred; struct file_ra_state f_ra; u64 f_version; #ifdef CONFIG_SECURITY void *f_security; #endif /* needed for tty driver, and maybe others */ void *private_data; #ifdef CONFIG_EPOLL /* Used by fs/eventpoll.c to link all the hooks to this file */ struct list_head f_ep_links; struct list_head f_tfile_llink; #endif /* #ifdef CONFIG_EPOLL */ struct address_space *f_mapping; errseq_t f_wb_err; } __randomize_layout __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */ struct file_handle { __u32 handle_bytes; int handle_type; /* file identifier */ unsigned char f_handle[]; }; static inline struct file *get_file(struct file *f) { atomic_long_inc(&f->f_count); return f; } #define get_file_rcu_many(x, cnt) \ atomic_long_add_unless(&(x)->f_count, (cnt), 0) #define get_file_rcu(x) get_file_rcu_many((x), 1) #define file_count(x) atomic_long_read(&(x)->f_count) #define MAX_NON_LFS ((1UL<<31) - 1) /* Page cache limit. The filesystems should put that into their s_maxbytes limits, otherwise bad things can happen in VM. */ #if BITS_PER_LONG==32 #define MAX_LFS_FILESIZE ((loff_t)ULONG_MAX << PAGE_SHIFT) #elif BITS_PER_LONG==64 #define MAX_LFS_FILESIZE ((loff_t)LLONG_MAX) #endif #define FL_POSIX 1 #define FL_FLOCK 2 #define FL_DELEG 4 /* NFSv4 delegation */ #define FL_ACCESS 8 /* not trying to lock, just looking */ #define FL_EXISTS 16 /* when unlocking, test for existence */ #define FL_LEASE 32 /* lease held on this file */ #define FL_CLOSE 64 /* unlock on close */ #define FL_SLEEP 128 /* A blocking lock */ #define FL_DOWNGRADE_PENDING 256 /* Lease is being downgraded */ #define FL_UNLOCK_PENDING 512 /* Lease is being broken */ #define FL_OFDLCK 1024 /* lock is "owned" by struct file */ #define FL_LAYOUT 2048 /* outstanding pNFS layout */ #define FL_CLOSE_POSIX (FL_POSIX | FL_CLOSE) /* * Special return value from posix_lock_file() and vfs_lock_file() for * asynchronous locking. */ #define FILE_LOCK_DEFERRED 1 /* legacy typedef, should eventually be removed */ typedef void *fl_owner_t; struct file_lock; struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); }; struct lock_manager_operations { fl_owner_t (*lm_get_owner)(fl_owner_t); void (*lm_put_owner)(fl_owner_t); void (*lm_notify)(struct file_lock *); /* unblock callback */ int (*lm_grant)(struct file_lock *, int); bool (*lm_break)(struct file_lock *); int (*lm_change)(struct file_lock *, int, struct list_head *); void (*lm_setup)(struct file_lock *, void **); }; struct lock_manager { struct list_head list; /* * NFSv4 and up also want opens blocked during the grace period; * NLM doesn't care: */ bool block_opens; }; struct net; void locks_start_grace(struct net *, struct lock_manager *); void locks_end_grace(struct lock_manager *); bool locks_in_grace(struct net *); bool opens_in_grace(struct net *); /* that will die - we need it for nfs_lock_info */ #include <linux/nfs_fs_i.h> /* * struct file_lock represents a generic "file lock". It's used to represent * POSIX byte range locks, BSD (flock) locks, and leases. It's important to * note that the same struct is used to represent both a request for a lock and * the lock itself, but the same object is never used for both. * * FIXME: should we create a separate "struct lock_request" to help distinguish * these two uses? * * The varous i_flctx lists are ordered by: * * 1) lock owner * 2) lock range start * 3) lock range end * * Obviously, the last two criteria only matter for POSIX locks. */ struct file_lock { struct file_lock *fl_blocker; /* The lock, that is blocking us */ struct list_head fl_list; /* link into file_lock_context */ struct hlist_node fl_link; /* node in global lists */ struct list_head fl_blocked_requests; /* list of requests with * ->fl_blocker pointing here */ struct list_head fl_blocked_member; /* node in * ->fl_blocker->fl_blocked_requests */ fl_owner_t fl_owner; unsigned int fl_flags; unsigned char fl_type; unsigned int fl_pid; int fl_link_cpu; /* what cpu's list is this on? */ wait_queue_head_t fl_wait; struct file *fl_file; loff_t fl_start; loff_t fl_end; struct fasync_struct * fl_fasync; /* for lease break notifications */ /* for lease breaks: */ unsigned long fl_break_time; unsigned long fl_downgrade_time; const struct file_lock_operations *fl_ops; /* Callbacks for filesystems */ const struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */ union { struct nfs_lock_info nfs_fl; struct nfs4_lock_info nfs4_fl; struct { struct list_head link; /* link in AFS vnode's pending_locks list */ int state; /* state of grant or error if -ve */ unsigned int debug_id; } afs; } fl_u; } __randomize_layout; struct file_lock_context { spinlock_t flc_lock; struct list_head flc_flock; struct list_head flc_posix; struct list_head flc_lease; }; /* The following constant reflects the upper bound of the file/locking space */ #ifndef OFFSET_MAX #define INT_LIMIT(x) (~((x)1 << (sizeof(x)*8 - 1))) #define OFFSET_MAX INT_LIMIT(loff_t) #define OFFT_OFFSET_MAX INT_LIMIT(off_t) #endif extern void send_sigio(struct fown_struct *fown, int fd, int band); #define locks_inode(f) file_inode(f) #ifdef CONFIG_FILE_LOCKING extern int fcntl_getlk(struct file *, unsigned int, struct flock *); extern int fcntl_setlk(unsigned int, struct file *, unsigned int, struct flock *); #if BITS_PER_LONG == 32 extern int fcntl_getlk64(struct file *, unsigned int, struct flock64 *); extern int fcntl_setlk64(unsigned int, struct file *, unsigned int, struct flock64 *); #endif extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg); extern int fcntl_getlease(struct file *filp); /* fs/locks.c */ void locks_free_lock_context(struct inode *inode); void locks_free_lock(struct file_lock *fl); extern void locks_init_lock(struct file_lock *); extern struct file_lock * locks_alloc_lock(void); extern void locks_copy_lock(struct file_lock *, struct file_lock *); extern void locks_copy_conflock(struct file_lock *, struct file_lock *); extern void locks_remove_posix(struct file *, fl_owner_t); extern void locks_remove_file(struct file *); extern void locks_release_private(struct file_lock *); extern void posix_test_lock(struct file *, struct file_lock *); extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *); extern int locks_delete_block(struct file_lock *); extern int vfs_test_lock(struct file *, struct file_lock *); extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *); extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl); extern int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl); extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type); extern void lease_get_mtime(struct inode *, struct timespec64 *time); extern int generic_setlease(struct file *, long, struct file_lock **, void **priv); extern int vfs_setlease(struct file *, long, struct file_lock **, void **); extern int lease_modify(struct file_lock *, int, struct list_head *); struct notifier_block; extern int lease_register_notifier(struct notifier_block *); extern void lease_unregister_notifier(struct notifier_block *); struct files_struct; extern void show_fd_locks(struct seq_file *f, struct file *filp, struct files_struct *files); #else /* !CONFIG_FILE_LOCKING */ static inline int fcntl_getlk(struct file *file, unsigned int cmd, struct flock __user *user) { return -EINVAL; } static inline int fcntl_setlk(unsigned int fd, struct file *file, unsigned int cmd, struct flock __user *user) { return -EACCES; } #if BITS_PER_LONG == 32 static inline int fcntl_getlk64(struct file *file, unsigned int cmd, struct flock64 __user *user) { return -EINVAL; } static inline int fcntl_setlk64(unsigned int fd, struct file *file, unsigned int cmd, struct flock64 __user *user) { return -EACCES; } #endif static inline int fcntl_setlease(unsigned int fd, struct file *filp, long arg) { return -EINVAL; } static inline int fcntl_getlease(struct file *filp) { return F_UNLCK; } static inline void locks_free_lock_context(struct inode *inode) { } static inline void locks_init_lock(struct file_lock *fl) { return; } static inline void locks_copy_conflock(struct file_lock *new, struct file_lock *fl) { return; } static inline void locks_copy_lock(struct file_lock *new, struct file_lock *fl) { return; } static inline void locks_remove_posix(struct file *filp, fl_owner_t owner) { return; } static inline void locks_remove_file(struct file *filp) { return; } static inline void posix_test_lock(struct file *filp, struct file_lock *fl) { return; } static inline int posix_lock_file(struct file *filp, struct file_lock *fl, struct file_lock *conflock) { return -ENOLCK; } static inline int locks_delete_block(struct file_lock *waiter) { return -ENOENT; } static inline int vfs_test_lock(struct file *filp, struct file_lock *fl) { return 0; } static inline int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf) { return -ENOLCK; } static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl) { return 0; } static inline int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl) { return -ENOLCK; } static inline int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) { return 0; } static inline void lease_get_mtime(struct inode *inode, struct timespec64 *time) { return; } static inline int generic_setlease(struct file *filp, long arg, struct file_lock **flp, void **priv) { return -EINVAL; } static inline int vfs_setlease(struct file *filp, long arg, struct file_lock **lease, void **priv) { return -EINVAL; } static inline int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose) { return -EINVAL; } struct files_struct; static inline void show_fd_locks(struct seq_file *f, struct file *filp, struct files_struct *files) {} #endif /* !CONFIG_FILE_LOCKING */ static inline struct inode *file_inode(const struct file *f) { return f->f_inode; } static inline struct dentry *file_dentry(const struct file *file) { return d_real(file->f_path.dentry, file_inode(file)); } static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl) { return locks_lock_inode_wait(locks_inode(filp), fl); } struct fasync_struct { rwlock_t fa_lock; int magic; int fa_fd; struct fasync_struct *fa_next; /* singly linked list */ struct file *fa_file; struct rcu_head fa_rcu; }; #define FASYNC_MAGIC 0x4601 /* SMP safe fasync helpers: */ extern int fasync_helper(int, struct file *, int, struct fasync_struct **); extern struct fasync_struct *fasync_insert_entry(int, struct file *, struct fasync_struct **, struct fasync_struct *); extern int fasync_remove_entry(struct file *, struct fasync_struct **); extern struct fasync_struct *fasync_alloc(void); extern void fasync_free(struct fasync_struct *); /* can be called from interrupts */ extern void kill_fasync(struct fasync_struct **, int, int); extern int setfl(int fd, struct file *filp, unsigned long arg); extern void __f_setown(struct file *filp, struct pid *, enum pid_type, int force); extern int f_setown(struct file *filp, unsigned long arg, int force); extern void f_delown(struct file *filp); extern pid_t f_getown(struct file *filp); extern int send_sigurg(struct fown_struct *fown); /* * sb->s_flags. Note that these mirror the equivalent MS_* flags where * represented in both. */ #define SB_RDONLY BIT(0) /* Mount read-only */ #define SB_NOSUID BIT(1) /* Ignore suid and sgid bits */ #define SB_NODEV BIT(2) /* Disallow access to device special files */ #define SB_NOEXEC BIT(3) /* Disallow program execution */ #define SB_SYNCHRONOUS BIT(4) /* Writes are synced at once */ #define SB_MANDLOCK BIT(6) /* Allow mandatory locks on an FS */ #define SB_DIRSYNC BIT(7) /* Directory modifications are synchronous */ #define SB_NOATIME BIT(10) /* Do not update access times. */ #define SB_NODIRATIME BIT(11) /* Do not update directory access times */ #define SB_SILENT BIT(15) #define SB_POSIXACL BIT(16) /* VFS does not apply the umask */ #define SB_KERNMOUNT BIT(22) /* this is a kern_mount call */ #define SB_I_VERSION BIT(23) /* Update inode I_version field */ #define SB_LAZYTIME BIT(25) /* Update the on-disk [acm]times lazily */ /* These sb flags are internal to the kernel */ #define SB_SUBMOUNT BIT(26) #define SB_FORCE BIT(27) #define SB_NOSEC BIT(28) #define SB_BORN BIT(29) #define SB_ACTIVE BIT(30) #define SB_NOUSER BIT(31) /* * Umount options */ #define MNT_FORCE 0x00000001 /* Attempt to forcibily umount */ #define MNT_DETACH 0x00000002 /* Just detach from the tree */ #define MNT_EXPIRE 0x00000004 /* Mark for expiry */ #define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */ #define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */ /* sb->s_iflags */ #define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */ #define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */ #define SB_I_NODEV 0x00000004 /* Ignore devices on this fs */ #define SB_I_MULTIROOT 0x00000008 /* Multiple roots to the dentry tree */ /* sb->s_iflags to limit user namespace mounts */ #define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */ #define SB_I_IMA_UNVERIFIABLE_SIGNATURE 0x00000020 #define SB_I_UNTRUSTED_MOUNTER 0x00000040 #define SB_I_NOSUID 0x80000000 /* Ignore suid on this fs */ #define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */ /* Possible states of 'frozen' field */ enum { SB_UNFROZEN = 0, /* FS is unfrozen */ SB_FREEZE_WRITE = 1, /* Writes, dir ops, ioctls frozen */ SB_FREEZE_PAGEFAULT = 2, /* Page faults stopped as well */ SB_FREEZE_FS = 3, /* For internal FS use (e.g. to stop * internal threads if needed) */ SB_FREEZE_COMPLETE = 4, /* ->freeze_fs finished successfully */ }; #define SB_FREEZE_LEVELS (SB_FREEZE_COMPLETE - 1) struct sb_writers { int frozen; /* Is sb frozen? */ wait_queue_head_t wait_unfrozen; /* for get_super_thawed() */ struct percpu_rw_semaphore rw_sem[SB_FREEZE_LEVELS]; }; struct super_block { struct list_head s_list; /* Keep this first */ dev_t s_dev; /* search index; _not_ kdev_t */ unsigned char s_blocksize_bits; unsigned long s_blocksize; loff_t s_maxbytes; /* Max file size */ struct file_system_type *s_type; const struct super_operations *s_op; const struct dquot_operations *dq_op; const struct quotactl_ops *s_qcop; const struct export_operations *s_export_op; unsigned long s_flags; unsigned long s_iflags; /* internal SB_I_* flags */ unsigned long s_magic; struct dentry *s_root; struct rw_semaphore s_umount; int s_count; atomic_t s_active; #ifdef CONFIG_SECURITY void *s_security; #endif const struct xattr_handler **s_xattr; #ifdef CONFIG_FS_ENCRYPTION const struct fscrypt_operations *s_cop; struct key *s_master_keys; /* master crypto keys in use */ #endif #ifdef CONFIG_FS_VERITY const struct fsverity_operations *s_vop; #endif struct hlist_bl_head s_roots; /* alternate root dentries for NFS */ struct list_head s_mounts; /* list of mounts; _not_ for fs use */ struct block_device *s_bdev; struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; struct hlist_node s_instances; unsigned int s_quota_types; /* Bitmask of supported quota types */ struct quota_info s_dquot; /* Diskquota specific options */ struct sb_writers s_writers; /* * Keep s_fs_info, s_time_gran, s_fsnotify_mask, and * s_fsnotify_marks together for cache efficiency. They are frequently * accessed and rarely modified. */ void *s_fs_info; /* Filesystem private info */ /* Granularity of c/m/atime in ns (cannot be worse than a second) */ u32 s_time_gran; /* Time limits for c/m/atime in seconds */ time64_t s_time_min; time64_t s_time_max; #ifdef CONFIG_FSNOTIFY __u32 s_fsnotify_mask; struct fsnotify_mark_connector __rcu *s_fsnotify_marks; #endif char s_id[32]; /* Informational name */ uuid_t s_uuid; /* UUID */ unsigned int s_max_links; fmode_t s_mode; /* * The next field is for VFS *only*. No filesystems have any business * even looking at it. You had been warned. */ struct mutex s_vfs_rename_mutex; /* Kludge */ /* * Filesystem subtype. If non-empty the filesystem type field * in /proc/mounts will be "type.subtype" */ const char *s_subtype; const struct dentry_operations *s_d_op; /* default d_op for dentries */ /* * Saved pool identifier for cleancache (-1 means none) */ int cleancache_poolid; struct shrinker s_shrink; /* per-sb shrinker handle */ /* Number of inodes with nlink == 0 but still referenced */ atomic_long_t s_remove_count; /* Pending fsnotify inode refs */ atomic_long_t s_fsnotify_inode_refs; /* Being remounted read-only */ int s_readonly_remount; /* AIO completions deferred from interrupt context */ struct workqueue_struct *s_dio_done_wq; struct hlist_head s_pins; /* * Owning user namespace and default context in which to * interpret filesystem uids, gids, quotas, device nodes, * xattrs and security labels. */ struct user_namespace *s_user_ns; /* * The list_lru structure is essentially just a pointer to a table * of per-node lru lists, each of which has its own spinlock. * There is no need to put them into separate cachelines. */ struct list_lru s_dentry_lru; struct list_lru s_inode_lru; struct rcu_head rcu; struct work_struct destroy_work; struct mutex s_sync_lock; /* sync serialisation lock */ /* * Indicates how deep in a filesystem stack this SB is */ int s_stack_depth; /* s_inode_list_lock protects s_inodes */ spinlock_t s_inode_list_lock ____cacheline_aligned_in_smp; struct list_head s_inodes; /* all inodes */ spinlock_t s_inode_wblist_lock; struct list_head s_inodes_wb; /* writeback inodes */ } __randomize_layout; /* Helper functions so that in most cases filesystems will * not need to deal directly with kuid_t and kgid_t and can * instead deal with the raw numeric values that are stored * in the filesystem. */ static inline uid_t i_uid_read(const struct inode *inode) { return from_kuid(inode->i_sb->s_user_ns, inode->i_uid); } static inline gid_t i_gid_read(const struct inode *inode) { return from_kgid(inode->i_sb->s_user_ns, inode->i_gid); } static inline void i_uid_write(struct inode *inode, uid_t uid) { inode->i_uid = make_kuid(inode->i_sb->s_user_ns, uid); } static inline void i_gid_write(struct inode *inode, gid_t gid) { inode->i_gid = make_kgid(inode->i_sb->s_user_ns, gid); } extern struct timespec64 timespec64_trunc(struct timespec64 t, unsigned gran); extern struct timespec64 current_time(struct inode *inode); /* * Snapshotting support. */ void __sb_end_write(struct super_block *sb, int level); int __sb_start_write(struct super_block *sb, int level, bool wait); #define __sb_writers_acquired(sb, lev) \ percpu_rwsem_acquire(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_) #define __sb_writers_release(sb, lev) \ percpu_rwsem_release(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_) /** * sb_end_write - drop write access to a superblock * @sb: the super we wrote to * * Decrement number of writers to the filesystem. Wake up possible waiters * wanting to freeze the filesystem. */ static inline void sb_end_write(struct super_block *sb) { __sb_end_write(sb, SB_FREEZE_WRITE); } /** * sb_end_pagefault - drop write access to a superblock from a page fault * @sb: the super we wrote to * * Decrement number of processes handling write page fault to the filesystem. * Wake up possible waiters wanting to freeze the filesystem. */ static inline void sb_end_pagefault(struct super_block *sb) { __sb_end_write(sb, SB_FREEZE_PAGEFAULT); } /** * sb_end_intwrite - drop write access to a superblock for internal fs purposes * @sb: the super we wrote to * * Decrement fs-internal number of writers to the filesystem. Wake up possible * waiters wanting to freeze the filesystem. */ static inline void sb_end_intwrite(struct super_block *sb) { __sb_end_write(sb, SB_FREEZE_FS); } /** * sb_start_write - get write access to a superblock * @sb: the super we write to * * When a process wants to write data or metadata to a file system (i.e. dirty * a page or an inode), it should embed the operation in a sb_start_write() - * sb_end_write() pair to get exclusion against file system freezing. This * function increments number of writers preventing freezing. If the file * system is already frozen, the function waits until the file system is * thawed. * * Since freeze protection behaves as a lock, users have to preserve * ordering of freeze protection and other filesystem locks. Generally, * freeze protection should be the outermost lock. In particular, we have: * * sb_start_write * -> i_mutex (write path, truncate, directory ops, ...) * -> s_umount (freeze_super, thaw_super) */ static inline void sb_start_write(struct super_block *sb) { __sb_start_write(sb, SB_FREEZE_WRITE, true); } static inline int sb_start_write_trylock(struct super_block *sb) { return __sb_start_write(sb, SB_FREEZE_WRITE, false); } /** * sb_start_pagefault - get write access to a superblock from a page fault * @sb: the super we write to * * When a process starts handling write page fault, it should embed the * operation into sb_start_pagefault() - sb_end_pagefault() pair to get * exclusion against file system freezing. This is needed since the page fault * is going to dirty a page. This function increments number of running page * faults preventing freezing. If the file system is already frozen, the * function waits until the file system is thawed. * * Since page fault freeze protection behaves as a lock, users have to preserve * ordering of freeze protection and other filesystem locks. It is advised to * put sb_start_pagefault() close to mmap_sem in lock ordering. Page fault * handling code implies lock dependency: * * mmap_sem * -> sb_start_pagefault */ static inline void sb_start_pagefault(struct super_block *sb) { __sb_start_write(sb, SB_FREEZE_PAGEFAULT, true); } /* * sb_start_intwrite - get write access to a superblock for internal fs purposes * @sb: the super we write to * * This is the third level of protection against filesystem freezing. It is * free for use by a filesystem. The only requirement is that it must rank * below sb_start_pagefault. * * For example filesystem can call sb_start_intwrite() when starting a * transaction which somewhat eases handling of freezing for internal sources * of filesystem changes (internal fs threads, discarding preallocation on file * close, etc.). */ static inline void sb_start_intwrite(struct super_block *sb) { __sb_start_write(sb, SB_FREEZE_FS, true); } static inline int sb_start_intwrite_trylock(struct super_block *sb) { return __sb_start_write(sb, SB_FREEZE_FS, false); } extern bool inode_owner_or_capable(const struct inode *inode); /* * VFS helper functions.. */ extern int vfs_create(struct inode *, struct dentry *, umode_t, bool); extern int vfs_mkdir(struct inode *, struct dentry *, umode_t); extern int vfs_mknod(struct inode *, struct dentry *, umode_t, dev_t); extern int vfs_symlink(struct inode *, struct dentry *, const char *); extern int vfs_link(struct dentry *, struct inode *, struct dentry *, struct inode **); extern int vfs_rmdir(struct inode *, struct dentry *); extern int vfs_unlink(struct inode *, struct dentry *, struct inode **); extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **, unsigned int); extern int vfs_whiteout(struct inode *, struct dentry *); extern struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode, int open_flag); int vfs_mkobj(struct dentry *, umode_t, int (*f)(struct dentry *, umode_t, void *), void *); int vfs_fchown(struct file *file, uid_t user, gid_t group); int vfs_fchmod(struct file *file, umode_t mode); extern long vfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); #ifdef CONFIG_COMPAT extern long compat_ptr_ioctl(struct file *file, unsigned int cmd, unsigned long arg); #else #define compat_ptr_ioctl NULL #endif /* * VFS file helper functions. */ extern void inode_init_owner(struct inode *inode, const struct inode *dir, umode_t mode); extern bool may_open_dev(const struct path *path); umode_t mode_strip_sgid(const struct inode *dir, umode_t mode); /* * VFS FS_IOC_FIEMAP helper definitions. */ struct fiemap_extent_info { unsigned int fi_flags; /* Flags as passed from user */ unsigned int fi_extents_mapped; /* Number of mapped extents */ unsigned int fi_extents_max; /* Size of fiemap_extent array */ struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent array */ }; int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical, u64 phys, u64 len, u32 flags); int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags); /* * This is the "filldir" function type, used by readdir() to let * the kernel specify what kind of dirent layout it wants to have. * This allows the kernel to read directories into kernel space or * to have different dirent layouts depending on the binary type. */ struct dir_context; typedef int (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64, unsigned); struct dir_context { filldir_t actor; loff_t pos; }; struct block_device_operations; /* These macros are for out of kernel modules to test that * the kernel supports the unlocked_ioctl and compat_ioctl * fields in struct file_operations. */ #define HAVE_COMPAT_IOCTL 1 #define HAVE_UNLOCKED_IOCTL 1 /* * These flags let !MMU mmap() govern direct device mapping vs immediate * copying more easily for MAP_PRIVATE, especially for ROM filesystems. * * NOMMU_MAP_COPY: Copy can be mapped (MAP_PRIVATE) * NOMMU_MAP_DIRECT: Can be mapped directly (MAP_SHARED) * NOMMU_MAP_READ: Can be mapped for reading * NOMMU_MAP_WRITE: Can be mapped for writing * NOMMU_MAP_EXEC: Can be mapped for execution */ #define NOMMU_MAP_COPY 0x00000001 #define NOMMU_MAP_DIRECT 0x00000008 #define NOMMU_MAP_READ VM_MAYREAD #define NOMMU_MAP_WRITE VM_MAYWRITE #define NOMMU_MAP_EXEC VM_MAYEXEC #define NOMMU_VMFLAGS \ (NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC) /* * These flags control the behavior of the remap_file_range function pointer. * If it is called with len == 0 that means "remap to end of source file". * See Documentation/filesystems/vfs.rst for more details about this call. * * REMAP_FILE_DEDUP: only remap if contents identical (i.e. deduplicate) * REMAP_FILE_CAN_SHORTEN: caller can handle a shortened request */ #define REMAP_FILE_DEDUP (1 << 0) #define REMAP_FILE_CAN_SHORTEN (1 << 1) /* * These flags signal that the caller is ok with altering various aspects of * the behavior of the remap operation. The changes must be made by the * implementation; the vfs remap helper functions can take advantage of them. * Flags in this category exist to preserve the quirky behavior of the hoisted * btrfs clone/dedupe ioctls. */ #define REMAP_FILE_ADVISORY (REMAP_FILE_CAN_SHORTEN) struct iov_iter; struct file_operations { struct module *owner; loff_t (*llseek) (struct file *, loff_t, int); ssize_t (*read) (struct file *, char __user *, size_t, loff_t *); ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); ssize_t (*read_iter) (struct kiocb *, struct iov_iter *); ssize_t (*write_iter) (struct kiocb *, struct iov_iter *); int (*iopoll)(struct kiocb *kiocb, bool spin); int (*iterate) (struct file *, struct dir_context *); int (*iterate_shared) (struct file *, struct dir_context *); __poll_t (*poll) (struct file *, struct poll_table_struct *); long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); long (*compat_ioctl) (struct file *, unsigned int, unsigned long); int (*mmap) (struct file *, struct vm_area_struct *); unsigned long mmap_supported_flags; int (*open) (struct inode *, struct file *); int (*flush) (struct file *, fl_owner_t id); int (*release) (struct inode *, struct file *); int (*fsync) (struct file *, loff_t, loff_t, int datasync); int (*fasync) (int, struct file *, int); int (*lock) (struct file *, int, struct file_lock *); ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int); unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); int (*setfl)(struct file *, unsigned long); int (*flock) (struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); int (*setlease)(struct file *, long, struct file_lock **, void **); long (*fallocate)(struct file *file, int mode, loff_t offset, loff_t len); void (*show_fdinfo)(struct seq_file *m, struct file *f); #ifndef CONFIG_MMU unsigned (*mmap_capabilities)(struct file *); #endif ssize_t (*copy_file_range)(struct file *, loff_t, struct file *, loff_t, size_t, unsigned int); loff_t (*remap_file_range)(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, loff_t len, unsigned int remap_flags); int (*fadvise)(struct file *, loff_t, loff_t, int); bool may_pollfree; } __randomize_layout; struct inode_operations { struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int); const char * (*get_link) (struct dentry *, struct inode *, struct delayed_call *); int (*permission) (struct inode *, int); struct posix_acl * (*get_acl)(struct inode *, int); int (*readlink) (struct dentry *, char __user *,int); int (*create) (struct inode *,struct dentry *, umode_t, bool); int (*link) (struct dentry *,struct inode *,struct dentry *); int (*unlink) (struct inode *,struct dentry *); int (*symlink) (struct inode *,struct dentry *,const char *); int (*mkdir) (struct inode *,struct dentry *,umode_t); int (*rmdir) (struct inode *,struct dentry *); int (*mknod) (struct inode *,struct dentry *,umode_t,dev_t); int (*rename) (struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); int (*setattr) (struct dentry *, struct iattr *); int (*getattr) (const struct path *, struct kstat *, u32, unsigned int); ssize_t (*listxattr) (struct dentry *, char *, size_t); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len); int (*update_time)(struct inode *, struct timespec64 *, int); int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned open_flag, umode_t create_mode); int (*tmpfile) (struct inode *, struct dentry *, umode_t); int (*set_acl)(struct inode *, struct posix_acl *, int); } ____cacheline_aligned; static inline ssize_t call_read_iter(struct file *file, struct kiocb *kio, struct iov_iter *iter) { return file->f_op->read_iter(kio, iter); } static inline ssize_t call_write_iter(struct file *file, struct kiocb *kio, struct iov_iter *iter) { return file->f_op->write_iter(kio, iter); } static inline int call_mmap(struct file *file, struct vm_area_struct *vma) { return file->f_op->mmap(file, vma); } ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector, unsigned long nr_segs, unsigned long fast_segs, struct iovec *fast_pointer, struct iovec **ret_pointer); typedef ssize_t (*vfs_readf_t)(struct file *, char __user *, size_t, loff_t *); typedef ssize_t (*vfs_writef_t)(struct file *, const char __user *, size_t, loff_t *); vfs_readf_t vfs_readf(struct file *file); vfs_writef_t vfs_writef(struct file *file); extern ssize_t __vfs_read(struct file *, char __user *, size_t, loff_t *); extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *); extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *); extern ssize_t vfs_readv(struct file *, const struct iovec __user *, unsigned long, loff_t *, rwf_t); extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *, loff_t, size_t, unsigned int); extern ssize_t generic_copy_file_range(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, size_t len, unsigned int flags); extern int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, loff_t *count, unsigned int remap_flags); extern loff_t do_clone_file_range(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, loff_t len, unsigned int remap_flags); extern loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, loff_t len, unsigned int remap_flags); extern int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same); extern loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos, struct file *dst_file, loff_t dst_pos, loff_t len, unsigned int remap_flags); struct super_operations { struct inode *(*alloc_inode)(struct super_block *sb); void (*destroy_inode)(struct inode *); void (*free_inode)(struct inode *); void (*dirty_inode) (struct inode *, int flags); int (*write_inode) (struct inode *, struct writeback_control *wbc); int (*drop_inode) (struct inode *); void (*evict_inode) (struct inode *); void (*put_super) (struct super_block *); int (*sync_fs)(struct super_block *sb, int wait); int (*freeze_super) (struct super_block *); int (*freeze_fs) (struct super_block *); int (*thaw_super) (struct super_block *); int (*unfreeze_fs) (struct super_block *); int (*statfs) (struct dentry *, struct kstatfs *); int (*remount_fs) (struct super_block *, int *, char *); void (*umount_begin) (struct super_block *); int (*show_options)(struct seq_file *, struct dentry *); int (*show_devname)(struct seq_file *, struct dentry *); int (*show_path)(struct seq_file *, struct dentry *); int (*show_stats)(struct seq_file *, struct dentry *); #ifdef CONFIG_QUOTA ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); struct dquot **(*get_dquots)(struct inode *); #endif int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t); long (*nr_cached_objects)(struct super_block *, struct shrink_control *); long (*free_cached_objects)(struct super_block *, struct shrink_control *); #if IS_ENABLED(CONFIG_BLK_DEV_LOOP) || IS_ENABLED(CONFIG_BLK_DEV_LOOP_MODULE) /* and aufs */ struct file *(*real_loop)(struct file *); #endif }; /* * Inode flags - they have no relation to superblock flags now */ #define S_SYNC 1 /* Writes are synced at once */ #define S_NOATIME 2 /* Do not update access times */ #define S_APPEND 4 /* Append-only file */ #define S_IMMUTABLE 8 /* Immutable file */ #define S_DEAD 16 /* removed, but still open directory */ #define S_NOQUOTA 32 /* Inode is not counted to quota */ #define S_DIRSYNC 64 /* Directory modifications are synchronous */ #define S_NOCMTIME 128 /* Do not update file c/mtime */ #define S_SWAPFILE 256 /* Do not truncate: swapon got its bmaps */ #define S_PRIVATE 512 /* Inode is fs-internal */ #define S_IMA 1024 /* Inode has an associated IMA struct */ #define S_AUTOMOUNT 2048 /* Automount/referral quasi-directory */ #define S_NOSEC 4096 /* no suid or xattr security attributes */ #ifdef CONFIG_FS_DAX #define S_DAX 8192 /* Direct Access, avoiding the page cache */ #else #define S_DAX 0 /* Make all the DAX code disappear */ #endif #define S_ENCRYPTED 16384 /* Encrypted file (using fs/crypto/) */ #define S_CASEFOLD 32768 /* Casefolded file */ #define S_VERITY 65536 /* Verity file (using fs/verity/) */ /* * Note that nosuid etc flags are inode-specific: setting some file-system * flags just means all the inodes inherit those flags by default. It might be * possible to override it selectively if you really wanted to with some * ioctl() that is not currently implemented. * * Exception: SB_RDONLY is always applied to the entire file system. * * Unfortunately, it is possible to change a filesystems flags with it mounted * with files in use. This means that all of the inodes will not have their * i_flags updated. Hence, i_flags no longer inherit the superblock mount * flags, so these have to be checked separately. -- rmk@arm.uk.linux.org */ #define __IS_FLG(inode, flg) ((inode)->i_sb->s_flags & (flg)) static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags & SB_RDONLY; } #define IS_RDONLY(inode) sb_rdonly((inode)->i_sb) #define IS_SYNC(inode) (__IS_FLG(inode, SB_SYNCHRONOUS) || \ ((inode)->i_flags & S_SYNC)) #define IS_DIRSYNC(inode) (__IS_FLG(inode, SB_SYNCHRONOUS|SB_DIRSYNC) || \ ((inode)->i_flags & (S_SYNC|S_DIRSYNC))) #define IS_MANDLOCK(inode) __IS_FLG(inode, SB_MANDLOCK) #define IS_NOATIME(inode) __IS_FLG(inode, SB_RDONLY|SB_NOATIME) #define IS_I_VERSION(inode) __IS_FLG(inode, SB_I_VERSION) #define IS_NOQUOTA(inode) ((inode)->i_flags & S_NOQUOTA) #define IS_APPEND(inode) ((inode)->i_flags & S_APPEND) #define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE) #define IS_POSIXACL(inode) __IS_FLG(inode, SB_POSIXACL) #define IS_DEADDIR(inode) ((inode)->i_flags & S_DEAD) #define IS_NOCMTIME(inode) ((inode)->i_flags & S_NOCMTIME) #define IS_SWAPFILE(inode) ((inode)->i_flags & S_SWAPFILE) #define IS_PRIVATE(inode) ((inode)->i_flags & S_PRIVATE) #define IS_IMA(inode) ((inode)->i_flags & S_IMA) #define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT) #define IS_NOSEC(inode) ((inode)->i_flags & S_NOSEC) #define IS_DAX(inode) ((inode)->i_flags & S_DAX) #define IS_ENCRYPTED(inode) ((inode)->i_flags & S_ENCRYPTED) #define IS_CASEFOLDED(inode) ((inode)->i_flags & S_CASEFOLD) #define IS_VERITY(inode) ((inode)->i_flags & S_VERITY) #define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \ (inode)->i_rdev == WHITEOUT_DEV) static inline bool HAS_UNMAPPED_ID(struct inode *inode) { return !uid_valid(inode->i_uid) || !gid_valid(inode->i_gid); } static inline enum rw_hint file_write_hint(struct file *file) { if (file->f_write_hint != WRITE_LIFE_NOT_SET) return file->f_write_hint; return file_inode(file)->i_write_hint; } static inline int iocb_flags(struct file *file); static inline u16 ki_hint_validate(enum rw_hint hint) { typeof(((struct kiocb *)0)->ki_hint) max_hint = -1; if (hint <= max_hint) return hint; return 0; } static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp) { *kiocb = (struct kiocb) { .ki_filp = filp, .ki_flags = iocb_flags(filp), .ki_hint = ki_hint_validate(file_write_hint(filp)), .ki_ioprio = get_current_ioprio(), }; } /* * Inode state bits. Protected by inode->i_lock * * Three bits determine the dirty state of the inode, I_DIRTY_SYNC, * I_DIRTY_DATASYNC and I_DIRTY_PAGES. * * Four bits define the lifetime of an inode. Initially, inodes are I_NEW, * until that flag is cleared. I_WILL_FREE, I_FREEING and I_CLEAR are set at * various stages of removing an inode. * * Two bits are used for locking and completion notification, I_NEW and I_SYNC. * * I_DIRTY_SYNC Inode is dirty, but doesn't have to be written on * fdatasync(). i_atime is the usual cause. * I_DIRTY_DATASYNC Data-related inode changes pending. We keep track of * these changes separately from I_DIRTY_SYNC so that we * don't have to write inode on fdatasync() when only * mtime has changed in it. * I_DIRTY_PAGES Inode has dirty pages. Inode itself may be clean. * I_NEW Serves as both a mutex and completion notification. * New inodes set I_NEW. If two processes both create * the same inode, one of them will release its inode and * wait for I_NEW to be released before returning. * Inodes in I_WILL_FREE, I_FREEING or I_CLEAR state can * also cause waiting on I_NEW, without I_NEW actually * being set. find_inode() uses this to prevent returning * nearly-dead inodes. * I_WILL_FREE Must be set when calling write_inode_now() if i_count * is zero. I_FREEING must be set when I_WILL_FREE is * cleared. * I_FREEING Set when inode is about to be freed but still has dirty * pages or buffers attached or the inode itself is still * dirty. * I_CLEAR Added by clear_inode(). In this state the inode is * clean and can be destroyed. Inode keeps I_FREEING. * * Inodes that are I_WILL_FREE, I_FREEING or I_CLEAR are * prohibited for many purposes. iget() must wait for * the inode to be completely released, then create it * anew. Other functions will just ignore such inodes, * if appropriate. I_NEW is used for waiting. * * I_SYNC Writeback of inode is running. The bit is set during * data writeback, and cleared with a wakeup on the bit * address once it is done. The bit is also used to pin * the inode in memory for flusher thread. * * I_REFERENCED Marks the inode as recently references on the LRU list. * * I_DIO_WAKEUP Never set. Only used as a key for wait_on_bit(). * * I_WB_SWITCH Cgroup bdi_writeback switching in progress. Used to * synchronize competing switching instances and to tell * wb stat updates to grab the i_pages lock. See * inode_switch_wbs_work_fn() for details. * * I_OVL_INUSE Used by overlayfs to get exclusive ownership on upper * and work dirs among overlayfs mounts. * * I_CREATING New object's inode in the middle of setting up. * * I_SYNC_QUEUED Inode is queued in b_io or b_more_io writeback lists. * Used to detect that mark_inode_dirty() should not move * inode between dirty lists. * * I_LRU_ISOLATING Inode is pinned being isolated from LRU without holding * i_count. * * Q: What is the difference between I_WILL_FREE and I_FREEING? */ #define I_DIRTY_SYNC (1 << 0) #define I_DIRTY_DATASYNC (1 << 1) #define I_DIRTY_PAGES (1 << 2) #define __I_NEW 3 #define I_NEW (1 << __I_NEW) #define I_WILL_FREE (1 << 4) #define I_FREEING (1 << 5) #define I_CLEAR (1 << 6) #define __I_SYNC 7 #define I_SYNC (1 << __I_SYNC) #define I_REFERENCED (1 << 8) #define __I_DIO_WAKEUP 9 #define I_DIO_WAKEUP (1 << __I_DIO_WAKEUP) #define I_LINKABLE (1 << 10) #define I_DIRTY_TIME (1 << 11) #define I_WB_SWITCH (1 << 13) #define I_OVL_INUSE (1 << 14) #define I_CREATING (1 << 15) #define I_SYNC_QUEUED (1 << 17) #define __I_LRU_ISOLATING 19 #define I_LRU_ISOLATING (1 << __I_LRU_ISOLATING) #define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC) #define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES) #define I_DIRTY_ALL (I_DIRTY | I_DIRTY_TIME) extern void __mark_inode_dirty(struct inode *, int); static inline void mark_inode_dirty(struct inode *inode) { __mark_inode_dirty(inode, I_DIRTY); } static inline void mark_inode_dirty_sync(struct inode *inode) { __mark_inode_dirty(inode, I_DIRTY_SYNC); } extern void inc_nlink(struct inode *inode); extern void drop_nlink(struct inode *inode); extern void clear_nlink(struct inode *inode); extern void set_nlink(struct inode *inode, unsigned int nlink); static inline void inode_inc_link_count(struct inode *inode) { inc_nlink(inode); mark_inode_dirty(inode); } static inline void inode_dec_link_count(struct inode *inode) { drop_nlink(inode); mark_inode_dirty(inode); } enum file_time_flags { S_ATIME = 1, S_MTIME = 2, S_CTIME = 4, S_VERSION = 8, }; extern bool atime_needs_update(const struct path *, struct inode *); extern void touch_atime(const struct path *); static inline void file_accessed(struct file *file) { if (!(file->f_flags & O_NOATIME)) touch_atime(&file->f_path); } extern int file_modified(struct file *file); int sync_inode(struct inode *inode, struct writeback_control *wbc); int sync_inode_metadata(struct inode *inode, int wait); struct file_system_type { const char *name; int fs_flags; #define FS_REQUIRES_DEV 1 #define FS_BINARY_MOUNTDATA 2 #define FS_HAS_SUBTYPE 4 #define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */ #define FS_DISALLOW_NOTIFY_PERM 16 /* Disable fanotify permission events */ #define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */ int (*init_fs_context)(struct fs_context *); const struct fs_parameter_description *parameters; struct dentry *(*mount) (struct file_system_type *, int, const char *, void *); void (*kill_sb) (struct super_block *); struct module *owner; struct file_system_type * next; struct hlist_head fs_supers; struct lock_class_key s_lock_key; struct lock_class_key s_umount_key; struct lock_class_key s_vfs_rename_key; struct lock_class_key s_writers_key[SB_FREEZE_LEVELS]; struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; struct lock_class_key i_mutex_dir_key; }; #define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME) #ifdef CONFIG_BLOCK extern struct dentry *mount_bdev(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, int (*fill_super)(struct super_block *, void *, int)); #else static inline struct dentry *mount_bdev(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, int (*fill_super)(struct super_block *, void *, int)) { return ERR_PTR(-ENODEV); } #endif extern struct dentry *mount_single(struct file_system_type *fs_type, int flags, void *data, int (*fill_super)(struct super_block *, void *, int)); extern struct dentry *mount_nodev(struct file_system_type *fs_type, int flags, void *data, int (*fill_super)(struct super_block *, void *, int)); extern struct dentry *mount_subtree(struct vfsmount *mnt, const char *path); void generic_shutdown_super(struct super_block *sb); #ifdef CONFIG_BLOCK void kill_block_super(struct super_block *sb); #else static inline void kill_block_super(struct super_block *sb) { BUG(); } #endif void kill_anon_super(struct super_block *sb); void kill_litter_super(struct super_block *sb); void deactivate_super(struct super_block *sb); void deactivate_locked_super(struct super_block *sb); int set_anon_super(struct super_block *s, void *data); int set_anon_super_fc(struct super_block *s, struct fs_context *fc); int get_anon_bdev(dev_t *); void free_anon_bdev(dev_t); struct super_block *sget_fc(struct fs_context *fc, int (*test)(struct super_block *, struct fs_context *), int (*set)(struct super_block *, struct fs_context *)); struct super_block *sget(struct file_system_type *type, int (*test)(struct super_block *,void *), int (*set)(struct super_block *,void *), int flags, void *data); /* Alas, no aliases. Too much hassle with bringing module.h everywhere */ #define fops_get(fops) \ (((fops) && try_module_get((fops)->owner) ? (fops) : NULL)) #define fops_put(fops) \ do { if (fops) module_put((fops)->owner); } while(0) /* * This one is to be used *ONLY* from ->open() instances. * fops must be non-NULL, pinned down *and* module dependencies * should be sufficient to pin the caller down as well. */ #define replace_fops(f, fops) \ do { \ struct file *__file = (f); \ fops_put(__file->f_op); \ BUG_ON(!(__file->f_op = (fops))); \ } while(0) extern int register_filesystem(struct file_system_type *); extern int unregister_filesystem(struct file_system_type *); extern struct vfsmount *kern_mount(struct file_system_type *); extern void kern_unmount(struct vfsmount *mnt); extern int may_umount_tree(struct vfsmount *); extern int may_umount(struct vfsmount *); extern long do_mount(const char *, const char __user *, const char *, unsigned long, void *); extern struct vfsmount *collect_mounts(const struct path *); extern void drop_collected_mounts(struct vfsmount *); extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *, struct vfsmount *); extern int vfs_statfs(const struct path *, struct kstatfs *); extern int user_statfs(const char __user *, struct kstatfs *); extern int fd_statfs(int, struct kstatfs *); extern int freeze_super(struct super_block *super); extern int thaw_super(struct super_block *super); extern bool our_mnt(struct vfsmount *mnt); extern __printf(2, 3) int super_setup_bdi_name(struct super_block *sb, char *fmt, ...); extern int super_setup_bdi(struct super_block *sb); extern int current_umask(void); extern void ihold(struct inode * inode); extern void iput(struct inode *); extern int generic_update_time(struct inode *, struct timespec64 *, int); extern int update_time(struct inode *, struct timespec64 *, int); /* /sys/fs */ extern struct kobject *fs_kobj; #define MAX_RW_COUNT (INT_MAX & PAGE_MASK) #ifdef CONFIG_MANDATORY_FILE_LOCKING extern int locks_mandatory_locked(struct file *); extern int locks_mandatory_area(struct inode *, struct file *, loff_t, loff_t, unsigned char); /* * Candidates for mandatory locking have the setgid bit set * but no group execute bit - an otherwise meaningless combination. */ static inline int __mandatory_lock(struct inode *ino) { return (ino->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID; } /* * ... and these candidates should be on SB_MANDLOCK mounted fs, * otherwise these will be advisory locks */ static inline int mandatory_lock(struct inode *ino) { return IS_MANDLOCK(ino) && __mandatory_lock(ino); } static inline int locks_verify_locked(struct file *file) { if (mandatory_lock(locks_inode(file))) return locks_mandatory_locked(file); return 0; } static inline int locks_verify_truncate(struct inode *inode, struct file *f, loff_t size) { if (!inode->i_flctx || !mandatory_lock(inode)) return 0; if (size < inode->i_size) { return locks_mandatory_area(inode, f, size, inode->i_size - 1, F_WRLCK); } else { return locks_mandatory_area(inode, f, inode->i_size, size - 1, F_WRLCK); } } #else /* !CONFIG_MANDATORY_FILE_LOCKING */ static inline int locks_mandatory_locked(struct file *file) { return 0; } static inline int locks_mandatory_area(struct inode *inode, struct file *filp, loff_t start, loff_t end, unsigned char type) { return 0; } static inline int __mandatory_lock(struct inode *inode) { return 0; } static inline int mandatory_lock(struct inode *inode) { return 0; } static inline int locks_verify_locked(struct file *file) { return 0; } static inline int locks_verify_truncate(struct inode *inode, struct file *filp, size_t size) { return 0; } #endif /* CONFIG_MANDATORY_FILE_LOCKING */ #ifdef CONFIG_FILE_LOCKING static inline int break_lease(struct inode *inode, unsigned int mode) { /* * Since this check is lockless, we must ensure that any refcounts * taken are done before checking i_flctx->flc_lease. Otherwise, we * could end up racing with tasks trying to set a new lease on this * file. */ smp_mb(); if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease)) return __break_lease(inode, mode, FL_LEASE); return 0; } static inline int break_deleg(struct inode *inode, unsigned int mode) { /* * Since this check is lockless, we must ensure that any refcounts * taken are done before checking i_flctx->flc_lease. Otherwise, we * could end up racing with tasks trying to set a new lease on this * file. */ smp_mb(); if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease)) return __break_lease(inode, mode, FL_DELEG); return 0; } static inline int try_break_deleg(struct inode *inode, struct inode **delegated_inode) { int ret; ret = break_deleg(inode, O_WRONLY|O_NONBLOCK); if (ret == -EWOULDBLOCK && delegated_inode) { *delegated_inode = inode; ihold(inode); } return ret; } static inline int break_deleg_wait(struct inode **delegated_inode) { int ret; ret = break_deleg(*delegated_inode, O_WRONLY); iput(*delegated_inode); *delegated_inode = NULL; return ret; } static inline int break_layout(struct inode *inode, bool wait) { smp_mb(); if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease)) return __break_lease(inode, wait ? O_WRONLY : O_WRONLY | O_NONBLOCK, FL_LAYOUT); return 0; } #else /* !CONFIG_FILE_LOCKING */ static inline int break_lease(struct inode *inode, unsigned int mode) { return 0; } static inline int break_deleg(struct inode *inode, unsigned int mode) { return 0; } static inline int try_break_deleg(struct inode *inode, struct inode **delegated_inode) { return 0; } static inline int break_deleg_wait(struct inode **delegated_inode) { BUG(); return 0; } static inline int break_layout(struct inode *inode, bool wait) { return 0; } #endif /* CONFIG_FILE_LOCKING */ /* fs/open.c */ struct audit_names; struct filename { const char *name; /* pointer to actual string */ const __user char *uptr; /* original userland pointer */ int refcnt; struct audit_names *aname; const char iname[]; }; static_assert(offsetof(struct filename, iname) % sizeof(long) == 0); extern long vfs_truncate(const struct path *, loff_t); extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs, struct file *filp); extern int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len); extern long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode); extern struct file *file_open_name(struct filename *, int, umode_t); extern struct file *filp_open(const char *, int, umode_t); extern struct file *file_open_root(struct dentry *, struct vfsmount *, const char *, int, umode_t); extern struct file * dentry_open(const struct path *, int, const struct cred *); extern struct file * open_with_fake_path(const struct path *, int, struct inode*, const struct cred *); static inline struct file *file_clone_open(struct file *file) { return dentry_open(&file->f_path, file->f_flags, file->f_cred); } extern int filp_close(struct file *, fl_owner_t id); extern struct filename *getname_flags(const char __user *, int, int *); extern struct filename *getname(const char __user *); extern struct filename *getname_kernel(const char *); extern void putname(struct filename *name); extern int finish_open(struct file *file, struct dentry *dentry, int (*open)(struct inode *, struct file *)); extern int finish_no_open(struct file *file, struct dentry *dentry); /* fs/ioctl.c */ extern int ioctl_preallocate(struct file *filp, void __user *argp); /* fs/dcache.c */ extern void __init vfs_caches_init_early(void); extern void __init vfs_caches_init(void); extern struct kmem_cache *names_cachep; #define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL) #define __putname(name) kmem_cache_free(names_cachep, (void *)(name)) #ifdef CONFIG_BLOCK extern int register_blkdev(unsigned int, const char *); extern void unregister_blkdev(unsigned int, const char *); extern void bdev_unhash_inode(dev_t dev); extern struct block_device *bdget(dev_t); extern struct block_device *bdgrab(struct block_device *bdev); extern void bd_set_size(struct block_device *, loff_t size); extern void bd_forget(struct inode *inode); extern void bdput(struct block_device *); extern void invalidate_bdev(struct block_device *); extern void iterate_bdevs(void (*)(struct block_device *, void *), void *); extern int sync_blockdev(struct block_device *bdev); extern void kill_bdev(struct block_device *); extern struct super_block *freeze_bdev(struct block_device *); extern void emergency_thaw_all(void); extern void emergency_thaw_bdev(struct super_block *sb); extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); extern int fsync_bdev(struct block_device *); extern struct super_block *blockdev_superblock; static inline bool sb_is_blkdev_sb(struct super_block *sb) { return sb == blockdev_superblock; } #else static inline void bd_forget(struct inode *inode) {} static inline int sync_blockdev(struct block_device *bdev) { return 0; } static inline void kill_bdev(struct block_device *bdev) {} static inline void invalidate_bdev(struct block_device *bdev) {} static inline struct super_block *freeze_bdev(struct block_device *sb) { return NULL; } static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb) { return 0; } static inline int emergency_thaw_bdev(struct super_block *sb) { return 0; } static inline void iterate_bdevs(void (*f)(struct block_device *, void *), void *arg) { } static inline bool sb_is_blkdev_sb(struct super_block *sb) { return false; } #endif extern int __sync_filesystem(struct super_block *, int); extern int sync_filesystem(struct super_block *); extern const struct file_operations def_blk_fops; extern const struct file_operations def_chr_fops; #ifdef CONFIG_BLOCK extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long); extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long); extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long); extern int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder); extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, void *holder); extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder); extern struct block_device *bd_start_claiming(struct block_device *bdev, void *holder); extern void bd_finish_claiming(struct block_device *bdev, struct block_device *whole, void *holder); extern void bd_abort_claiming(struct block_device *bdev, struct block_device *whole, void *holder); extern void blkdev_put(struct block_device *bdev, fmode_t mode); extern int __blkdev_reread_part(struct block_device *bdev); extern int blkdev_reread_part(struct block_device *bdev); #ifdef CONFIG_SYSFS extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk); extern void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk); #else static inline int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk) { return 0; } static inline void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk) { } #endif #endif /* fs/char_dev.c */ #define CHRDEV_MAJOR_MAX 512 /* Marks the bottom of the first segment of free char majors */ #define CHRDEV_MAJOR_DYN_END 234 /* Marks the top and bottom of the second segment of free char majors */ #define CHRDEV_MAJOR_DYN_EXT_START 511 #define CHRDEV_MAJOR_DYN_EXT_END 384 extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *); extern int register_chrdev_region(dev_t, unsigned, const char *); extern int __register_chrdev(unsigned int major, unsigned int baseminor, unsigned int count, const char *name, const struct file_operations *fops); extern void __unregister_chrdev(unsigned int major, unsigned int baseminor, unsigned int count, const char *name); extern void unregister_chrdev_region(dev_t, unsigned); extern void chrdev_show(struct seq_file *,off_t); static inline int register_chrdev(unsigned int major, const char *name, const struct file_operations *fops) { return __register_chrdev(major, 0, 256, name, fops); } static inline void unregister_chrdev(unsigned int major, const char *name) { __unregister_chrdev(major, 0, 256, name); } /* fs/block_dev.c */ #define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */ #define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */ #ifdef CONFIG_BLOCK #define BLKDEV_MAJOR_MAX 512 extern const char *__bdevname(dev_t, char *buffer); extern const char *bdevname(struct block_device *bdev, char *buffer); extern struct block_device *lookup_bdev(const char *); extern void blkdev_show(struct seq_file *,off_t); #else #define BLKDEV_MAJOR_MAX 0 #endif extern void init_special_inode(struct inode *, umode_t, dev_t); /* Invalid inode operations -- fs/bad_inode.c */ extern void make_bad_inode(struct inode *); extern bool is_bad_inode(struct inode *); #ifdef CONFIG_BLOCK extern void check_disk_size_change(struct gendisk *disk, struct block_device *bdev, bool verbose); extern int revalidate_disk(struct gendisk *); extern int check_disk_change(struct block_device *); extern int __invalidate_device(struct block_device *, bool); extern int invalidate_partition(struct gendisk *, int); #endif unsigned long invalidate_mapping_pages(struct address_space *mapping, pgoff_t start, pgoff_t end); static inline void invalidate_remote_inode(struct inode *inode) { if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) invalidate_mapping_pages(inode->i_mapping, 0, -1); } extern int invalidate_inode_pages2(struct address_space *mapping); extern int invalidate_inode_pages2_range(struct address_space *mapping, pgoff_t start, pgoff_t end); extern int write_inode_now(struct inode *, int); extern int filemap_fdatawrite(struct address_space *); extern int filemap_flush(struct address_space *); extern int filemap_fdatawait_keep_errors(struct address_space *mapping); extern int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend); extern int filemap_fdatawait_range_keep_errors(struct address_space *mapping, loff_t start_byte, loff_t end_byte); static inline int filemap_fdatawait(struct address_space *mapping) { return filemap_fdatawait_range(mapping, 0, LLONG_MAX); } extern bool filemap_range_has_page(struct address_space *, loff_t lstart, loff_t lend); extern int filemap_write_and_wait(struct address_space *mapping); extern int filemap_write_and_wait_range(struct address_space *mapping, loff_t lstart, loff_t lend); extern int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, loff_t end, int sync_mode); extern int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, loff_t end); extern int filemap_check_errors(struct address_space *mapping); extern void __filemap_set_wb_err(struct address_space *mapping, int err); extern int __must_check file_fdatawait_range(struct file *file, loff_t lstart, loff_t lend); extern int __must_check file_check_and_advance_wb_err(struct file *file); extern int __must_check file_write_and_wait_range(struct file *file, loff_t start, loff_t end); static inline int file_write_and_wait(struct file *file) { return file_write_and_wait_range(file, 0, LLONG_MAX); } /** * filemap_set_wb_err - set a writeback error on an address_space * @mapping: mapping in which to set writeback error * @err: error to be set in mapping * * When writeback fails in some way, we must record that error so that * userspace can be informed when fsync and the like are called. We endeavor * to report errors on any file that was open at the time of the error. Some * internal callers also need to know when writeback errors have occurred. * * When a writeback error occurs, most filesystems will want to call * filemap_set_wb_err to record the error in the mapping so that it will be * automatically reported whenever fsync is called on the file. */ static inline void filemap_set_wb_err(struct address_space *mapping, int err) { /* Fastpath for common case of no error */ if (unlikely(err)) __filemap_set_wb_err(mapping, err); } /** * filemap_check_wb_error - has an error occurred since the mark was sampled? * @mapping: mapping to check for writeback errors * @since: previously-sampled errseq_t * * Grab the errseq_t value from the mapping, and see if it has changed "since" * the given value was sampled. * * If it has then report the latest error set, otherwise return 0. */ static inline int filemap_check_wb_err(struct address_space *mapping, errseq_t since) { return errseq_check(&mapping->wb_err, since); } /** * filemap_sample_wb_err - sample the current errseq_t to test for later errors * @mapping: mapping to be sampled * * Writeback errors are always reported relative to a particular sample point * in the past. This function provides those sample points. */ static inline errseq_t filemap_sample_wb_err(struct address_space *mapping) { return errseq_sample(&mapping->wb_err); } static inline int filemap_nr_thps(struct address_space *mapping) { #ifdef CONFIG_READ_ONLY_THP_FOR_FS return atomic_read(&mapping->nr_thps); #else return 0; #endif } static inline void filemap_nr_thps_inc(struct address_space *mapping) { #ifdef CONFIG_READ_ONLY_THP_FOR_FS atomic_inc(&mapping->nr_thps); #else WARN_ON_ONCE(1); #endif } static inline void filemap_nr_thps_dec(struct address_space *mapping) { #ifdef CONFIG_READ_ONLY_THP_FOR_FS atomic_dec(&mapping->nr_thps); #else WARN_ON_ONCE(1); #endif } extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync); extern int vfs_fsync(struct file *file, int datasync); extern int sync_file_range(struct file *file, loff_t offset, loff_t nbytes, unsigned int flags); /* * Sync the bytes written if this was a synchronous write. Expect ki_pos * to already be updated for the write, and will return either the amount * of bytes passed in, or an error if syncing the file failed. */ static inline ssize_t generic_write_sync(struct kiocb *iocb, ssize_t count) { if (iocb->ki_flags & IOCB_DSYNC) { int ret = vfs_fsync_range(iocb->ki_filp, iocb->ki_pos - count, iocb->ki_pos - 1, (iocb->ki_flags & IOCB_SYNC) ? 0 : 1); if (ret) return ret; } return count; } extern void emergency_sync(void); extern void emergency_remount(void); #ifdef CONFIG_BLOCK extern sector_t bmap(struct inode *, sector_t); #endif extern int notify_change(struct dentry *, struct iattr *, struct inode **); extern int inode_permission(struct inode *, int); extern int generic_permission(struct inode *, int); extern int __check_sticky(struct inode *dir, struct inode *inode); static inline bool execute_ok(struct inode *inode) { return (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode); } static inline void file_start_write(struct file *file) { if (!S_ISREG(file_inode(file)->i_mode)) return; __sb_start_write(file_inode(file)->i_sb, SB_FREEZE_WRITE, true); } static inline bool file_start_write_trylock(struct file *file) { if (!S_ISREG(file_inode(file)->i_mode)) return true; return __sb_start_write(file_inode(file)->i_sb, SB_FREEZE_WRITE, false); } static inline void file_end_write(struct file *file) { if (!S_ISREG(file_inode(file)->i_mode)) return; __sb_end_write(file_inode(file)->i_sb, SB_FREEZE_WRITE); } /* * get_write_access() gets write permission for a file. * put_write_access() releases this write permission. * This is used for regular files. * We cannot support write (and maybe mmap read-write shared) accesses and * MAP_DENYWRITE mmappings simultaneously. The i_writecount field of an inode * can have the following values: * 0: no writers, no VM_DENYWRITE mappings * < 0: (-i_writecount) vm_area_structs with VM_DENYWRITE set exist * > 0: (i_writecount) users are writing to the file. * * Normally we operate on that counter with atomic_{inc,dec} and it's safe * except for the cases where we don't hold i_writecount yet. Then we need to * use {get,deny}_write_access() - these functions check the sign and refuse * to do the change if sign is wrong. */ static inline int get_write_access(struct inode *inode) { return atomic_inc_unless_negative(&inode->i_writecount) ? 0 : -ETXTBSY; } static inline int deny_write_access(struct file *file) { struct inode *inode = file_inode(file); return atomic_dec_unless_positive(&inode->i_writecount) ? 0 : -ETXTBSY; } static inline void put_write_access(struct inode * inode) { atomic_dec(&inode->i_writecount); } static inline void allow_write_access(struct file *file) { if (file) atomic_inc(&file_inode(file)->i_writecount); } static inline bool inode_is_open_for_write(const struct inode *inode) { return atomic_read(&inode->i_writecount) > 0; } #if defined(CONFIG_IMA) || defined(CONFIG_FILE_LOCKING) static inline void i_readcount_dec(struct inode *inode) { BUG_ON(!atomic_read(&inode->i_readcount)); atomic_dec(&inode->i_readcount); } static inline void i_readcount_inc(struct inode *inode) { atomic_inc(&inode->i_readcount); } #else static inline void i_readcount_dec(struct inode *inode) { return; } static inline void i_readcount_inc(struct inode *inode) { return; } #endif extern int do_pipe_flags(int *, int); #define __kernel_read_file_id(id) \ id(UNKNOWN, unknown) \ id(FIRMWARE, firmware) \ id(FIRMWARE_PREALLOC_BUFFER, firmware) \ id(MODULE, kernel-module) \ id(KEXEC_IMAGE, kexec-image) \ id(KEXEC_INITRAMFS, kexec-initramfs) \ id(POLICY, security-policy) \ id(X509_CERTIFICATE, x509-certificate) \ id(MAX_ID, ) #define __fid_enumify(ENUM, dummy) READING_ ## ENUM, #define __fid_stringify(dummy, str) #str, enum kernel_read_file_id { __kernel_read_file_id(__fid_enumify) }; static const char * const kernel_read_file_str[] = { __kernel_read_file_id(__fid_stringify) }; static inline const char *kernel_read_file_id_str(enum kernel_read_file_id id) { if ((unsigned)id >= READING_MAX_ID) return kernel_read_file_str[READING_UNKNOWN]; return kernel_read_file_str[id]; } extern int kernel_read_file(struct file *, void **, loff_t *, loff_t, enum kernel_read_file_id); extern int kernel_read_file_from_path(const char *, void **, loff_t *, loff_t, enum kernel_read_file_id); extern int kernel_read_file_from_fd(int, void **, loff_t *, loff_t, enum kernel_read_file_id); extern ssize_t kernel_read(struct file *, void *, size_t, loff_t *); extern ssize_t kernel_write(struct file *, const void *, size_t, loff_t *); extern ssize_t __kernel_write(struct file *, const void *, size_t, loff_t *); extern struct file * open_exec(const char *); /* fs/dcache.c -- generic fs support functions */ extern bool is_subdir(struct dentry *, struct dentry *); extern bool path_is_under(const struct path *, const struct path *); extern char *file_path(struct file *, char *, int); #include <linux/err.h> /* needed for stackable file system support */ extern loff_t default_llseek(struct file *file, loff_t offset, int whence); extern loff_t vfs_llseek(struct file *file, loff_t offset, int whence); extern int inode_init_always(struct super_block *, struct inode *); extern void inode_init_once(struct inode *); extern void address_space_init_once(struct address_space *mapping); extern struct inode * igrab(struct inode *); extern ino_t iunique(struct super_block *, ino_t); extern int inode_needs_sync(struct inode *inode); extern int generic_delete_inode(struct inode *inode); static inline int generic_drop_inode(struct inode *inode) { return !inode->i_nlink || inode_unhashed(inode); } extern struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval, int (*test)(struct inode *, void *), void *data); extern struct inode *ilookup5(struct super_block *sb, unsigned long hashval, int (*test)(struct inode *, void *), void *data); extern struct inode *ilookup(struct super_block *sb, unsigned long ino); extern struct inode *inode_insert5(struct inode *inode, unsigned long hashval, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *data); extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *); extern struct inode * iget_locked(struct super_block *, unsigned long); extern struct inode *find_inode_nowait(struct super_block *, unsigned long, int (*match)(struct inode *, unsigned long, void *), void *data); extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *); extern int insert_inode_locked(struct inode *); #ifdef CONFIG_DEBUG_LOCK_ALLOC extern void lockdep_annotate_inode_mutex_key(struct inode *inode); #else static inline void lockdep_annotate_inode_mutex_key(struct inode *inode) { }; #endif extern void unlock_new_inode(struct inode *); extern void discard_new_inode(struct inode *); extern unsigned int get_next_ino(void); extern void evict_inodes(struct super_block *sb); extern void __iget(struct inode * inode); extern void iget_failed(struct inode *); extern void clear_inode(struct inode *); extern void __destroy_inode(struct inode *); extern struct inode *new_inode_pseudo(struct super_block *sb); extern struct inode *new_inode(struct super_block *sb); extern void free_inode_nonrcu(struct inode *inode); extern int should_remove_suid(struct dentry *); extern int file_remove_privs(struct file *); extern void __insert_inode_hash(struct inode *, unsigned long hashval); static inline void insert_inode_hash(struct inode *inode) { __insert_inode_hash(inode, inode->i_ino); } extern void __remove_inode_hash(struct inode *); static inline void remove_inode_hash(struct inode *inode) { if (!inode_unhashed(inode) && !hlist_fake(&inode->i_hash)) __remove_inode_hash(inode); } extern void inode_sb_list_add(struct inode *inode); #ifdef CONFIG_BLOCK extern int bdev_read_only(struct block_device *); #endif extern int set_blocksize(struct block_device *, int); extern int sb_set_blocksize(struct super_block *, int); extern int sb_min_blocksize(struct super_block *, int); extern int generic_file_mmap(struct file *, struct vm_area_struct *); extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *); extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *); extern int generic_remap_checks(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, loff_t *count, unsigned int remap_flags); extern int generic_file_rw_checks(struct file *file_in, struct file *file_out); extern int generic_copy_file_checks(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, size_t *count, unsigned int flags); extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *); extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *); extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *); extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *); extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t); ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos, rwf_t flags); ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos, rwf_t flags); /* fs/block_dev.c */ extern ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to); extern ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from); extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync); extern void block_sync_page(struct page *page); /* fs/splice.c */ extern ssize_t generic_file_splice_read(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); extern ssize_t iter_file_splice_write(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out, loff_t *, size_t len, unsigned int flags); extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, loff_t *opos, size_t len, unsigned int flags); extern void file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping); extern loff_t noop_llseek(struct file *file, loff_t offset, int whence); extern loff_t no_llseek(struct file *file, loff_t offset, int whence); extern loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize); extern loff_t generic_file_llseek(struct file *file, loff_t offset, int whence); extern loff_t generic_file_llseek_size(struct file *file, loff_t offset, int whence, loff_t maxsize, loff_t eof); extern loff_t fixed_size_llseek(struct file *file, loff_t offset, int whence, loff_t size); extern loff_t no_seek_end_llseek_size(struct file *, loff_t, int, loff_t); extern loff_t no_seek_end_llseek(struct file *, loff_t, int); extern int generic_file_open(struct inode * inode, struct file * filp); extern int nonseekable_open(struct inode * inode, struct file * filp); extern int stream_open(struct inode * inode, struct file * filp); #ifdef CONFIG_BLOCK typedef void (dio_submit_t)(struct bio *bio, struct inode *inode, loff_t file_offset); enum { /* need locking between buffered and direct access */ DIO_LOCKING = 0x01, /* filesystem does not support filling holes */ DIO_SKIP_HOLES = 0x02, }; void dio_end_io(struct bio *bio); void dio_warn_stale_pagecache(struct file *filp); ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, struct block_device *bdev, struct iov_iter *iter, get_block_t get_block, dio_iodone_t end_io, dio_submit_t submit_io, int flags); static inline ssize_t blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, struct iov_iter *iter, get_block_t get_block) { return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter, get_block, NULL, NULL, DIO_LOCKING | DIO_SKIP_HOLES); } #endif void inode_dio_wait(struct inode *inode); /* * inode_dio_begin - signal start of a direct I/O requests * @inode: inode the direct I/O happens on * * This is called once we've finished processing a direct I/O request, * and is used to wake up callers waiting for direct I/O to be quiesced. */ static inline void inode_dio_begin(struct inode *inode) { atomic_inc(&inode->i_dio_count); } /* * inode_dio_end - signal finish of a direct I/O requests * @inode: inode the direct I/O happens on * * This is called once we've finished processing a direct I/O request, * and is used to wake up callers waiting for direct I/O to be quiesced. */ static inline void inode_dio_end(struct inode *inode) { if (atomic_dec_and_test(&inode->i_dio_count)) wake_up_bit(&inode->i_state, __I_DIO_WAKEUP); } extern void inode_set_flags(struct inode *inode, unsigned int flags, unsigned int mask); extern const struct file_operations generic_ro_fops; #define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m)) extern int readlink_copy(char __user *, int, const char *); extern int page_readlink(struct dentry *, char __user *, int); extern const char *page_get_link(struct dentry *, struct inode *, struct delayed_call *); extern void page_put_link(void *); extern int __page_symlink(struct inode *inode, const char *symname, int len, int nofs); extern int page_symlink(struct inode *inode, const char *symname, int len); extern const struct inode_operations page_symlink_inode_operations; extern void kfree_link(void *); extern void generic_fillattr(struct inode *, struct kstat *); extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int); extern int vfs_getattr(const struct path *, struct kstat *, u32, unsigned int); void __inode_add_bytes(struct inode *inode, loff_t bytes); void inode_add_bytes(struct inode *inode, loff_t bytes); void __inode_sub_bytes(struct inode *inode, loff_t bytes); void inode_sub_bytes(struct inode *inode, loff_t bytes); static inline loff_t __inode_get_bytes(struct inode *inode) { return (((loff_t)inode->i_blocks) << 9) + inode->i_bytes; } loff_t inode_get_bytes(struct inode *inode); void inode_set_bytes(struct inode *inode, loff_t bytes); const char *simple_get_link(struct dentry *, struct inode *, struct delayed_call *); extern const struct inode_operations simple_symlink_inode_operations; extern int iterate_dir(struct file *, struct dir_context *); extern int vfs_statx(int, const char __user *, int, struct kstat *, u32); extern int vfs_statx_fd(unsigned int, struct kstat *, u32, unsigned int); static inline int vfs_stat(const char __user *filename, struct kstat *stat) { return vfs_statx(AT_FDCWD, filename, AT_NO_AUTOMOUNT, stat, STATX_BASIC_STATS); } static inline int vfs_lstat(const char __user *name, struct kstat *stat) { return vfs_statx(AT_FDCWD, name, AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT, stat, STATX_BASIC_STATS); } static inline int vfs_fstatat(int dfd, const char __user *filename, struct kstat *stat, int flags) { return vfs_statx(dfd, filename, flags | AT_NO_AUTOMOUNT, stat, STATX_BASIC_STATS); } static inline int vfs_fstat(int fd, struct kstat *stat) { return vfs_statx_fd(fd, stat, STATX_BASIC_STATS, 0); } extern const char *vfs_get_link(struct dentry *, struct delayed_call *); extern int vfs_readlink(struct dentry *, char __user *, int); extern int __generic_block_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, loff_t start, loff_t len, get_block_t *get_block); extern int generic_block_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len, get_block_t *get_block); extern struct file_system_type *get_filesystem(struct file_system_type *fs); extern void put_filesystem(struct file_system_type *fs); extern struct file_system_type *get_fs_type(const char *name); extern struct super_block *get_super(struct block_device *); extern struct super_block *get_super_thawed(struct block_device *); extern struct super_block *get_super_exclusive_thawed(struct block_device *bdev); extern struct super_block *get_active_super(struct block_device *bdev); extern void drop_super(struct super_block *sb); extern void drop_super_exclusive(struct super_block *sb); extern void iterate_supers(void (*)(struct super_block *, void *), void *); extern void iterate_supers_type(struct file_system_type *, void (*)(struct super_block *, void *), void *); extern int dcache_dir_open(struct inode *, struct file *); extern int dcache_dir_close(struct inode *, struct file *); extern loff_t dcache_dir_lseek(struct file *, loff_t, int); extern int dcache_readdir(struct file *, struct dir_context *); extern int simple_setattr(struct dentry *, struct iattr *); extern int simple_getattr(const struct path *, struct kstat *, u32, unsigned int); extern int simple_statfs(struct dentry *, struct kstatfs *); extern int simple_open(struct inode *inode, struct file *file); extern int simple_link(struct dentry *, struct inode *, struct dentry *); extern int simple_unlink(struct inode *, struct dentry *); extern int simple_rmdir(struct inode *, struct dentry *); extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); extern int noop_fsync(struct file *, loff_t, loff_t, int); extern int noop_set_page_dirty(struct page *page); extern void noop_invalidatepage(struct page *page, unsigned int offset, unsigned int length); extern ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter); extern int simple_empty(struct dentry *); extern int simple_readpage(struct file *file, struct page *page); extern int simple_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata); extern int simple_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata); extern int always_delete_dentry(const struct dentry *); extern struct inode *alloc_anon_inode(struct super_block *); extern int simple_nosetlease(struct file *, long, struct file_lock **, void **); extern const struct dentry_operations simple_dentry_operations; extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags); extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *); extern const struct file_operations simple_dir_operations; extern const struct inode_operations simple_dir_inode_operations; extern void make_empty_dir_inode(struct inode *inode); extern bool is_empty_dir_inode(struct inode *inode); struct tree_descr { const char *name; const struct file_operations *ops; int mode; }; struct dentry *d_alloc_name(struct dentry *, const char *); extern int simple_fill_super(struct super_block *, unsigned long, const struct tree_descr *); extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count); extern void simple_release_fs(struct vfsmount **mount, int *count); extern ssize_t simple_read_from_buffer(void __user *to, size_t count, loff_t *ppos, const void *from, size_t available); extern ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos, const void __user *from, size_t count); extern int __generic_file_fsync(struct file *, loff_t, loff_t, int); extern int generic_file_fsync(struct file *, loff_t, loff_t, int); extern int generic_check_addressable(unsigned, u64); #ifdef CONFIG_MIGRATION extern int buffer_migrate_page(struct address_space *, struct page *, struct page *, enum migrate_mode); extern int buffer_migrate_page_norefs(struct address_space *, struct page *, struct page *, enum migrate_mode); #else #define buffer_migrate_page NULL #define buffer_migrate_page_norefs NULL #endif extern int setattr_prepare(struct dentry *, struct iattr *); extern int inode_newsize_ok(const struct inode *, loff_t offset); extern void setattr_copy(struct inode *inode, const struct iattr *attr); extern int file_update_time(struct file *file); static inline bool io_is_direct(struct file *filp) { return (filp->f_flags & O_DIRECT) || IS_DAX(filp->f_mapping->host); } static inline bool vma_is_dax(struct vm_area_struct *vma) { return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host); } static inline bool vma_is_fsdax(struct vm_area_struct *vma) { struct inode *inode; if (!vma->vm_file) return false; if (!vma_is_dax(vma)) return false; inode = file_inode(vma->vm_file); if (S_ISCHR(inode->i_mode)) return false; /* device-dax */ return true; } static inline int iocb_flags(struct file *file) { int res = 0; if (file->f_flags & O_APPEND) res |= IOCB_APPEND; if (io_is_direct(file)) res |= IOCB_DIRECT; if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host)) res |= IOCB_DSYNC; if (file->f_flags & __O_SYNC) res |= IOCB_SYNC; return res; } static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags) { if (unlikely(flags & ~RWF_SUPPORTED)) return -EOPNOTSUPP; if (flags & RWF_NOWAIT) { if (!(ki->ki_filp->f_mode & FMODE_NOWAIT)) return -EOPNOTSUPP; ki->ki_flags |= IOCB_NOWAIT; } if (flags & RWF_HIPRI) ki->ki_flags |= IOCB_HIPRI; if (flags & RWF_DSYNC) ki->ki_flags |= IOCB_DSYNC; if (flags & RWF_SYNC) ki->ki_flags |= (IOCB_DSYNC | IOCB_SYNC); if (flags & RWF_APPEND) ki->ki_flags |= IOCB_APPEND; return 0; } static inline ino_t parent_ino(struct dentry *dentry) { ino_t res; /* * Don't strictly need d_lock here? If the parent ino could change * then surely we'd have a deeper race in the caller? */ spin_lock(&dentry->d_lock); res = dentry->d_parent->d_inode->i_ino; spin_unlock(&dentry->d_lock); return res; } /* Transaction based IO helpers */ /* * An argresp is stored in an allocated page and holds the * size of the argument or response, along with its content */ struct simple_transaction_argresp { ssize_t size; char data[0]; }; #define SIMPLE_TRANSACTION_LIMIT (PAGE_SIZE - sizeof(struct simple_transaction_argresp)) char *simple_transaction_get(struct file *file, const char __user *buf, size_t size); ssize_t simple_transaction_read(struct file *file, char __user *buf, size_t size, loff_t *pos); int simple_transaction_release(struct inode *inode, struct file *file); void simple_transaction_set(struct file *file, size_t n); /* * simple attribute files * * These attributes behave similar to those in sysfs: * * Writing to an attribute immediately sets a value, an open file can be * written to multiple times. * * Reading from an attribute creates a buffer from the value that might get * read with multiple read calls. When the attribute has been read * completely, no further read calls are possible until the file is opened * again. * * All attributes contain a text representation of a numeric value * that are accessed with the get() and set() functions. */ #define DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, __is_signed) \ static int __fops ## _open(struct inode *inode, struct file *file) \ { \ __simple_attr_check_format(__fmt, 0ull); \ return simple_attr_open(inode, file, __get, __set, __fmt); \ } \ static const struct file_operations __fops = { \ .owner = THIS_MODULE, \ .open = __fops ## _open, \ .release = simple_attr_release, \ .read = simple_attr_read, \ .write = (__is_signed) ? simple_attr_write_signed : simple_attr_write, \ .llseek = generic_file_llseek, \ } #define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \ DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, false) #define DEFINE_SIMPLE_ATTRIBUTE_SIGNED(__fops, __get, __set, __fmt) \ DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, true) static inline __printf(1, 2) void __simple_attr_check_format(const char *fmt, ...) { /* don't do anything, just let the compiler check the arguments; */ } int simple_attr_open(struct inode *inode, struct file *file, int (*get)(void *, u64 *), int (*set)(void *, u64), const char *fmt); int simple_attr_release(struct inode *inode, struct file *file); ssize_t simple_attr_read(struct file *file, char __user *buf, size_t len, loff_t *ppos); ssize_t simple_attr_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos); ssize_t simple_attr_write_signed(struct file *file, const char __user *buf, size_t len, loff_t *ppos); struct ctl_table; int proc_nr_files(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); int proc_nr_inodes(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); int __init get_filesystem_list(char *buf); #define __FMODE_EXEC ((__force int) FMODE_EXEC) #define __FMODE_NONOTIFY ((__force int) FMODE_NONOTIFY) #define ACC_MODE(x) ("\004\002\006\006"[(x)&O_ACCMODE]) #define OPEN_FMODE(flag) ((__force fmode_t)(((flag + 1) & O_ACCMODE) | \ (flag & __FMODE_NONOTIFY))) static inline bool is_sxid(umode_t mode) { return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP)); } static inline int check_sticky(struct inode *dir, struct inode *inode) { if (!(dir->i_mode & S_ISVTX)) return 0; return __check_sticky(dir, inode); } static inline void inode_has_no_xattr(struct inode *inode) { if (!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & SB_NOSEC)) inode->i_flags |= S_NOSEC; } static inline bool is_root_inode(struct inode *inode) { return inode == inode->i_sb->s_root->d_inode; } static inline bool dir_emit(struct dir_context *ctx, const char *name, int namelen, u64 ino, unsigned type) { return ctx->actor(ctx, name, namelen, ctx->pos, ino, type) == 0; } static inline bool dir_emit_dot(struct file *file, struct dir_context *ctx) { return ctx->actor(ctx, ".", 1, ctx->pos, file->f_path.dentry->d_inode->i_ino, DT_DIR) == 0; } static inline bool dir_emit_dotdot(struct file *file, struct dir_context *ctx) { return ctx->actor(ctx, "..", 2, ctx->pos, parent_ino(file->f_path.dentry), DT_DIR) == 0; } static inline bool dir_emit_dots(struct file *file, struct dir_context *ctx) { if (ctx->pos == 0) { if (!dir_emit_dot(file, ctx)) return false; ctx->pos = 1; } if (ctx->pos == 1) { if (!dir_emit_dotdot(file, ctx)) return false; ctx->pos = 2; } return true; } static inline bool dir_relax(struct inode *inode) { inode_unlock(inode); inode_lock(inode); return !IS_DEADDIR(inode); } static inline bool dir_relax_shared(struct inode *inode) { inode_unlock_shared(inode); inode_lock_shared(inode); return !IS_DEADDIR(inode); } extern bool path_noexec(const struct path *path); extern bool path_nosuid(const struct path *path); extern void inode_nohighmem(struct inode *inode); /* mm/fadvise.c */ extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len, int advice); extern int generic_fadvise(struct file *file, loff_t offset, loff_t len, int advice); #if defined(CONFIG_IO_URING) bool io_is_uring_fops(struct file *file); #else static inline bool io_is_uring_fops(struct file *file) { return false; } #endif int vfs_ioc_setflags_prepare(struct inode *inode, unsigned int oldflags, unsigned int flags); int vfs_ioc_fssetxattr_check(struct inode *inode, const struct fsxattr *old_fa, struct fsxattr *fa); static inline void simple_fill_fsxattr(struct fsxattr *fa, __u32 xflags) { memset(fa, 0, sizeof(*fa)); fa->fsx_xflags = xflags; } /* * Flush file data before changing attributes. Caller must hold any locks * required to prevent further writes to this file until we're done setting * flags. */ static inline int inode_drain_writes(struct inode *inode) { inode_dio_wait(inode); return filemap_write_and_wait(inode->i_mapping); } #endif /* _LINUX_FS_H */ aer.h 0000644 00000003354 14722070374 0005477 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2006 Intel Corp. * Tom Long Nguyen (tom.l.nguyen@intel.com) * Zhang Yanmin (yanmin.zhang@intel.com) */ #ifndef _AER_H_ #define _AER_H_ #include <linux/errno.h> #include <linux/types.h> #define AER_NONFATAL 0 #define AER_FATAL 1 #define AER_CORRECTABLE 2 #define DPC_FATAL 3 struct pci_dev; struct aer_header_log_regs { unsigned int dw0; unsigned int dw1; unsigned int dw2; unsigned int dw3; }; struct aer_capability_regs { u32 header; u32 uncor_status; u32 uncor_mask; u32 uncor_severity; u32 cor_status; u32 cor_mask; u32 cap_control; struct aer_header_log_regs header_log; u32 root_command; u32 root_status; u16 cor_err_source; u16 uncor_err_source; }; #if defined(CONFIG_PCIEAER) /* PCIe port driver needs this function to enable AER */ int pci_enable_pcie_error_reporting(struct pci_dev *dev); int pci_disable_pcie_error_reporting(struct pci_dev *dev); int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev); int pci_cleanup_aer_error_status_regs(struct pci_dev *dev); #else static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev) { return -EINVAL; } static inline int pci_disable_pcie_error_reporting(struct pci_dev *dev) { return -EINVAL; } static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) { return -EINVAL; } static inline int pci_cleanup_aer_error_status_regs(struct pci_dev *dev) { return -EINVAL; } #endif void cper_print_aer(struct pci_dev *dev, int aer_severity, struct aer_capability_regs *aer); int cper_severity_to_aer(int cper_severity); void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn, int severity, struct aer_capability_regs *aer_regs); #endif //_AER_H_ build-salt.h 0000644 00000000567 14722070374 0006773 0 ustar 00 #ifndef __BUILD_SALT_H #define __BUILD_SALT_H #include <linux/elfnote.h> #define LINUX_ELFNOTE_BUILD_SALT 0x100 #ifdef __ASSEMBLER__ #define BUILD_SALT \ ELFNOTE(Linux, LINUX_ELFNOTE_BUILD_SALT, .asciz CONFIG_BUILD_SALT) #else #define BUILD_SALT \ ELFNOTE32("Linux", LINUX_ELFNOTE_BUILD_SALT, CONFIG_BUILD_SALT) #endif #endif /* __BUILD_SALT_H */ bpf_trace.h 0000644 00000000246 14722070374 0006652 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_BPF_TRACE_H__ #define __LINUX_BPF_TRACE_H__ #include <trace/events/xdp.h> #endif /* __LINUX_BPF_TRACE_H__ */ bcm47xx_sprom.h 0000644 00000000602 14722070374 0007435 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* */ #ifndef __BCM47XX_SPROM_H #define __BCM47XX_SPROM_H #include <linux/types.h> #include <linux/kernel.h> #include <linux/vmalloc.h> #ifdef CONFIG_BCM47XX_SPROM int bcm47xx_sprom_register_fallbacks(void); #else static inline int bcm47xx_sprom_register_fallbacks(void) { return -ENOTSUPP; }; #endif #endif /* __BCM47XX_SPROM_H */ pci-ats.h 0000644 00000003025 14722070374 0006263 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_PCI_ATS_H #define LINUX_PCI_ATS_H #include <linux/pci.h> #ifdef CONFIG_PCI_PRI int pci_enable_pri(struct pci_dev *pdev, u32 reqs); void pci_disable_pri(struct pci_dev *pdev); void pci_restore_pri_state(struct pci_dev *pdev); int pci_reset_pri(struct pci_dev *pdev); #else /* CONFIG_PCI_PRI */ static inline int pci_enable_pri(struct pci_dev *pdev, u32 reqs) { return -ENODEV; } static inline void pci_disable_pri(struct pci_dev *pdev) { } static inline void pci_restore_pri_state(struct pci_dev *pdev) { } static inline int pci_reset_pri(struct pci_dev *pdev) { return -ENODEV; } #endif /* CONFIG_PCI_PRI */ #ifdef CONFIG_PCI_PASID int pci_enable_pasid(struct pci_dev *pdev, int features); void pci_disable_pasid(struct pci_dev *pdev); void pci_restore_pasid_state(struct pci_dev *pdev); int pci_pasid_features(struct pci_dev *pdev); int pci_max_pasids(struct pci_dev *pdev); int pci_prg_resp_pasid_required(struct pci_dev *pdev); #else /* CONFIG_PCI_PASID */ static inline int pci_enable_pasid(struct pci_dev *pdev, int features) { return -EINVAL; } static inline void pci_disable_pasid(struct pci_dev *pdev) { } static inline void pci_restore_pasid_state(struct pci_dev *pdev) { } static inline int pci_pasid_features(struct pci_dev *pdev) { return -EINVAL; } static inline int pci_max_pasids(struct pci_dev *pdev) { return -EINVAL; } static inline int pci_prg_resp_pasid_required(struct pci_dev *pdev) { return 0; } #endif /* CONFIG_PCI_PASID */ #endif /* LINUX_PCI_ATS_H*/ kvm_para.h 0000644 00000000622 14722070374 0006523 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_KVM_PARA_H #define __LINUX_KVM_PARA_H #include <uapi/linux/kvm_para.h> static inline bool kvm_para_has_feature(unsigned int feature) { return !!(kvm_arch_para_features() & (1UL << feature)); } static inline bool kvm_para_has_hint(unsigned int feature) { return !!(kvm_arch_para_hints() & (1UL << feature)); } #endif /* __LINUX_KVM_PARA_H */ if_bridge.h 0000644 00000007346 14722070374 0006647 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Linux ethernet bridge * * Authors: * Lennert Buytenhek <buytenh@gnu.org> */ #ifndef _LINUX_IF_BRIDGE_H #define _LINUX_IF_BRIDGE_H #include <linux/netdevice.h> #include <uapi/linux/if_bridge.h> #include <linux/bitops.h> struct br_ip { union { __be32 ip4; #if IS_ENABLED(CONFIG_IPV6) struct in6_addr ip6; #endif } u; __be16 proto; __u16 vid; }; struct br_ip_list { struct list_head list; struct br_ip addr; }; #define BR_HAIRPIN_MODE BIT(0) #define BR_BPDU_GUARD BIT(1) #define BR_ROOT_BLOCK BIT(2) #define BR_MULTICAST_FAST_LEAVE BIT(3) #define BR_ADMIN_COST BIT(4) #define BR_LEARNING BIT(5) #define BR_FLOOD BIT(6) #define BR_AUTO_MASK (BR_FLOOD | BR_LEARNING) #define BR_PROMISC BIT(7) #define BR_PROXYARP BIT(8) #define BR_LEARNING_SYNC BIT(9) #define BR_PROXYARP_WIFI BIT(10) #define BR_MCAST_FLOOD BIT(11) #define BR_MULTICAST_TO_UNICAST BIT(12) #define BR_VLAN_TUNNEL BIT(13) #define BR_BCAST_FLOOD BIT(14) #define BR_NEIGH_SUPPRESS BIT(15) #define BR_ISOLATED BIT(16) #define BR_DEFAULT_AGEING_TIME (300 * HZ) extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING) int br_multicast_list_adjacent(struct net_device *dev, struct list_head *br_ip_list); bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto); bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto); bool br_multicast_enabled(const struct net_device *dev); bool br_multicast_router(const struct net_device *dev); #else static inline int br_multicast_list_adjacent(struct net_device *dev, struct list_head *br_ip_list) { return 0; } static inline bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto) { return false; } static inline bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto) { return false; } static inline bool br_multicast_enabled(const struct net_device *dev) { return false; } static inline bool br_multicast_router(const struct net_device *dev) { return false; } #endif #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING) bool br_vlan_enabled(const struct net_device *dev); int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid); int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid); int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto); int br_vlan_get_info(const struct net_device *dev, u16 vid, struct bridge_vlan_info *p_vinfo); #else static inline bool br_vlan_enabled(const struct net_device *dev) { return false; } static inline int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid) { return -EINVAL; } static inline int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto) { return -EINVAL; } static inline int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid) { return -EINVAL; } static inline int br_vlan_get_info(const struct net_device *dev, u16 vid, struct bridge_vlan_info *p_vinfo) { return -EINVAL; } #endif #if IS_ENABLED(CONFIG_BRIDGE) struct net_device *br_fdb_find_port(const struct net_device *br_dev, const unsigned char *addr, __u16 vid); void br_fdb_clear_offload(const struct net_device *dev, u16 vid); bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag); #else static inline struct net_device * br_fdb_find_port(const struct net_device *br_dev, const unsigned char *addr, __u16 vid) { return NULL; } static inline void br_fdb_clear_offload(const struct net_device *dev, u16 vid) { } static inline bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag) { return false; } #endif #endif fsi.h 0000644 00000004416 14722070374 0005511 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* FSI device & driver interfaces * * Copyright (C) IBM Corporation 2016 */ #ifndef LINUX_FSI_H #define LINUX_FSI_H #include <linux/device.h> struct fsi_device { struct device dev; u8 engine_type; u8 version; u8 unit; struct fsi_slave *slave; uint32_t addr; uint32_t size; }; extern int fsi_device_read(struct fsi_device *dev, uint32_t addr, void *val, size_t size); extern int fsi_device_write(struct fsi_device *dev, uint32_t addr, const void *val, size_t size); extern int fsi_device_peek(struct fsi_device *dev, void *val); struct fsi_device_id { u8 engine_type; u8 version; }; #define FSI_VERSION_ANY 0 #define FSI_DEVICE(t) \ .engine_type = (t), .version = FSI_VERSION_ANY, #define FSI_DEVICE_VERSIONED(t, v) \ .engine_type = (t), .version = (v), struct fsi_driver { struct device_driver drv; const struct fsi_device_id *id_table; }; #define to_fsi_dev(devp) container_of(devp, struct fsi_device, dev) #define to_fsi_drv(drvp) container_of(drvp, struct fsi_driver, drv) extern int fsi_driver_register(struct fsi_driver *fsi_drv); extern void fsi_driver_unregister(struct fsi_driver *fsi_drv); /* module_fsi_driver() - Helper macro for drivers that don't do * anything special in module init/exit. This eliminates a lot of * boilerplate. Each module may only use this macro once, and * calling it replaces module_init() and module_exit() */ #define module_fsi_driver(__fsi_driver) \ module_driver(__fsi_driver, fsi_driver_register, \ fsi_driver_unregister) /* direct slave API */ extern int fsi_slave_claim_range(struct fsi_slave *slave, uint32_t addr, uint32_t size); extern void fsi_slave_release_range(struct fsi_slave *slave, uint32_t addr, uint32_t size); extern int fsi_slave_read(struct fsi_slave *slave, uint32_t addr, void *val, size_t size); extern int fsi_slave_write(struct fsi_slave *slave, uint32_t addr, const void *val, size_t size); extern struct bus_type fsi_bus_type; extern const struct device_type fsi_cdev_type; enum fsi_dev_type { fsi_dev_cfam, fsi_dev_sbefifo, fsi_dev_scom, fsi_dev_occ }; extern int fsi_get_new_minor(struct fsi_device *fdev, enum fsi_dev_type type, dev_t *out_dev, int *out_index); extern void fsi_free_minor(dev_t dev); #endif /* LINUX_FSI_H */ memory_hotplug.h 0000644 00000025731 14722070374 0010005 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_MEMORY_HOTPLUG_H #define __LINUX_MEMORY_HOTPLUG_H #include <linux/mmzone.h> #include <linux/spinlock.h> #include <linux/notifier.h> #include <linux/bug.h> struct page; struct zone; struct pglist_data; struct mem_section; struct memory_block; struct resource; struct vmem_altmap; #ifdef CONFIG_MEMORY_HOTPLUG /* * Return page for the valid pfn only if the page is online. All pfn * walkers which rely on the fully initialized page->flags and others * should use this rather than pfn_valid && pfn_to_page */ #define pfn_to_online_page(pfn) \ ({ \ struct page *___page = NULL; \ unsigned long ___pfn = pfn; \ unsigned long ___nr = pfn_to_section_nr(___pfn); \ \ if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr) && \ pfn_valid_within(___pfn)) \ ___page = pfn_to_page(___pfn); \ ___page; \ }) /* * Types for free bootmem stored in page->lru.next. These have to be in * some random range in unsigned long space for debugging purposes. */ enum { MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12, SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE, MIX_SECTION_INFO, NODE_INFO, MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO, }; /* Types for control the zone type of onlined and offlined memory */ enum { MMOP_OFFLINE = -1, MMOP_ONLINE_KEEP, MMOP_ONLINE_KERNEL, MMOP_ONLINE_MOVABLE, }; /* * Restrictions for the memory hotplug: * flags: MHP_ flags * altmap: alternative allocator for memmap array */ struct mhp_restrictions { unsigned long flags; struct vmem_altmap *altmap; }; /* * Zone resizing functions * * Note: any attempt to resize a zone should has pgdat_resize_lock() * zone_span_writelock() both held. This ensure the size of a zone * can't be changed while pgdat_resize_lock() held. */ static inline unsigned zone_span_seqbegin(struct zone *zone) { return read_seqbegin(&zone->span_seqlock); } static inline int zone_span_seqretry(struct zone *zone, unsigned iv) { return read_seqretry(&zone->span_seqlock, iv); } static inline void zone_span_writelock(struct zone *zone) { write_seqlock(&zone->span_seqlock); } static inline void zone_span_writeunlock(struct zone *zone) { write_sequnlock(&zone->span_seqlock); } static inline void zone_seqlock_init(struct zone *zone) { seqlock_init(&zone->span_seqlock); } extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages); extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages); extern int add_one_highpage(struct page *page, int pfn, int bad_ppro); /* VM interface that may be used by firmware interface */ extern int online_pages(unsigned long, unsigned long, int); extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, unsigned long *valid_start, unsigned long *valid_end); extern unsigned long __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn); typedef void (*online_page_callback_t)(struct page *page, unsigned int order); extern int set_online_page_callback(online_page_callback_t callback); extern int restore_online_page_callback(online_page_callback_t callback); extern void __online_page_set_limits(struct page *page); extern void __online_page_increment_counters(struct page *page); extern void __online_page_free(struct page *page); extern int try_online_node(int nid); extern int arch_add_memory(int nid, u64 start, u64 size, struct mhp_restrictions *restrictions); extern u64 max_mem_size; extern bool memhp_auto_online; /* If movable_node boot option specified */ extern bool movable_node_enabled; static inline bool movable_node_is_enabled(void) { return movable_node_enabled; } extern void arch_remove_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap); extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages, struct vmem_altmap *altmap); /* reasonably generic interface to expand the physical pages */ extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, struct mhp_restrictions *restrictions); #ifndef CONFIG_ARCH_HAS_ADD_PAGES static inline int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, struct mhp_restrictions *restrictions) { return __add_pages(nid, start_pfn, nr_pages, restrictions); } #else /* ARCH_HAS_ADD_PAGES */ int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, struct mhp_restrictions *restrictions); #endif /* ARCH_HAS_ADD_PAGES */ #ifdef CONFIG_NUMA extern int memory_add_physaddr_to_nid(u64 start); #else static inline int memory_add_physaddr_to_nid(u64 start) { return 0; } #endif #ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION /* * For supporting node-hotadd, we have to allocate a new pgdat. * * If an arch has generic style NODE_DATA(), * node_data[nid] = kzalloc() works well. But it depends on the architecture. * * In general, generic_alloc_nodedata() is used. * Now, arch_free_nodedata() is just defined for error path of node_hot_add. * */ extern pg_data_t *arch_alloc_nodedata(int nid); extern void arch_free_nodedata(pg_data_t *pgdat); extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat); #else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ #define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid) #define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat) #ifdef CONFIG_NUMA /* * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat. * XXX: kmalloc_node() can't work well to get new node's memory at this time. * Because, pgdat for the new node is not allocated/initialized yet itself. * To use new node's memory, more consideration will be necessary. */ #define generic_alloc_nodedata(nid) \ ({ \ kzalloc(sizeof(pg_data_t), GFP_KERNEL); \ }) /* * This definition is just for error path in node hotadd. * For node hotremove, we have to replace this. */ #define generic_free_nodedata(pgdat) kfree(pgdat) extern pg_data_t *node_data[]; static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) { node_data[nid] = pgdat; } #else /* !CONFIG_NUMA */ /* never called */ static inline pg_data_t *generic_alloc_nodedata(int nid) { BUG(); return NULL; } static inline void generic_free_nodedata(pg_data_t *pgdat) { } static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) { } #endif /* CONFIG_NUMA */ #endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE extern void __init register_page_bootmem_info_node(struct pglist_data *pgdat); #else static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) { } #endif extern void put_page_bootmem(struct page *page); extern void get_page_bootmem(unsigned long ingo, struct page *page, unsigned long type); void get_online_mems(void); void put_online_mems(void); void mem_hotplug_begin(void); void mem_hotplug_done(void); extern void set_zone_contiguous(struct zone *zone); extern void clear_zone_contiguous(struct zone *zone); void set_default_mem_hotplug_zone(enum zone_type zone); #else /* ! CONFIG_MEMORY_HOTPLUG */ #define pfn_to_online_page(pfn) \ ({ \ struct page *___page = NULL; \ if (pfn_valid(pfn)) \ ___page = pfn_to_page(pfn); \ ___page; \ }) static inline unsigned zone_span_seqbegin(struct zone *zone) { return 0; } static inline int zone_span_seqretry(struct zone *zone, unsigned iv) { return 0; } static inline void zone_span_writelock(struct zone *zone) {} static inline void zone_span_writeunlock(struct zone *zone) {} static inline void zone_seqlock_init(struct zone *zone) {} static inline int mhp_notimplemented(const char *func) { printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func); dump_stack(); return -ENOSYS; } static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) { } static inline int try_online_node(int nid) { return 0; } static inline void get_online_mems(void) {} static inline void put_online_mems(void) {} static inline void mem_hotplug_begin(void) {} static inline void mem_hotplug_done(void) {} static inline void set_default_mem_hotplug_zone(enum zone_type zone) {} static inline bool movable_node_is_enabled(void) { return false; } #endif /* ! CONFIG_MEMORY_HOTPLUG */ #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) /* * pgdat resizing functions */ static inline void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags) { spin_lock_irqsave(&pgdat->node_size_lock, *flags); } static inline void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags) { spin_unlock_irqrestore(&pgdat->node_size_lock, *flags); } static inline void pgdat_resize_init(struct pglist_data *pgdat) { spin_lock_init(&pgdat->node_size_lock); } #else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */ /* * Stub functions for when hotplug is off */ static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {} static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {} static inline void pgdat_resize_init(struct pglist_data *pgdat) {} #endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */ #ifdef CONFIG_MEMORY_HOTREMOVE extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages); extern void try_offline_node(int nid); extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); extern int remove_memory(int nid, u64 start, u64 size); extern void __remove_memory(int nid, u64 start, u64 size); #else static inline bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages) { return false; } static inline void try_offline_node(int nid) {} static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages) { return -EINVAL; } static inline int remove_memory(int nid, u64 start, u64 size) { return -EBUSY; } static inline void __remove_memory(int nid, u64 start, u64 size) {} #endif /* CONFIG_MEMORY_HOTREMOVE */ extern void __ref free_area_init_core_hotplug(int nid); extern int __add_memory(int nid, u64 start, u64 size); extern int add_memory(int nid, u64 start, u64 size); extern int add_memory_resource(int nid, struct resource *resource); extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, unsigned long nr_pages, struct vmem_altmap *altmap); extern void remove_pfn_range_from_zone(struct zone *zone, unsigned long start_pfn, unsigned long nr_pages); extern bool is_memblock_offlined(struct memory_block *mem); extern int sparse_add_section(int nid, unsigned long pfn, unsigned long nr_pages, struct vmem_altmap *altmap); extern void sparse_remove_section(struct mem_section *ms, unsigned long pfn, unsigned long nr_pages, unsigned long map_offset, struct vmem_altmap *altmap); extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum); extern bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages, int online_type); extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned long start_pfn, unsigned long nr_pages); #endif /* __LINUX_MEMORY_HOTPLUG_H */ selection.h 0000644 00000003366 14722070374 0006720 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * selection.h * * Interface between console.c, tty_io.c, vt.c, vc_screen.c and selection.c */ #ifndef _LINUX_SELECTION_H_ #define _LINUX_SELECTION_H_ #include <linux/tiocl.h> #include <linux/vt_buffer.h> struct tty_struct; struct vc_data; extern void clear_selection(void); extern int set_selection_user(const struct tiocl_selection __user *sel, struct tty_struct *tty); extern int set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty); extern int paste_selection(struct tty_struct *tty); extern int sel_loadlut(char __user *p); extern int mouse_reporting(void); extern void mouse_report(struct tty_struct * tty, int butt, int mrx, int mry); bool vc_is_sel(struct vc_data *vc); extern int console_blanked; extern const unsigned char color_table[]; extern unsigned char default_red[]; extern unsigned char default_grn[]; extern unsigned char default_blu[]; extern unsigned short *screen_pos(struct vc_data *vc, int w_offset, int viewed); extern u16 screen_glyph(struct vc_data *vc, int offset); extern u32 screen_glyph_unicode(struct vc_data *vc, int offset); extern void complement_pos(struct vc_data *vc, int offset); extern void invert_screen(struct vc_data *vc, int offset, int count, int shift); extern void getconsxy(struct vc_data *vc, unsigned char *p); extern void putconsxy(struct vc_data *vc, unsigned char *p); extern u16 vcs_scr_readw(struct vc_data *vc, const u16 *org); extern void vcs_scr_writew(struct vc_data *vc, u16 val, u16 *org); extern void vcs_scr_updated(struct vc_data *vc); extern int vc_uniscr_check(struct vc_data *vc); extern void vc_uniscr_copy_line(struct vc_data *vc, void *dest, int viewed, unsigned int row, unsigned int col, unsigned int nr); #endif bpfilter.h 0000644 00000001330 14722070374 0006527 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_BPFILTER_H #define _LINUX_BPFILTER_H #include <uapi/linux/bpfilter.h> #include <linux/umh.h> struct sock; int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen); int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen); struct bpfilter_umh_ops { struct umh_info info; /* since ip_getsockopt() can run in parallel, serialize access to umh */ struct mutex lock; int (*sockopt)(struct sock *sk, int optname, char __user *optval, unsigned int optlen, bool is_set); int (*start)(void); bool stop; }; extern struct bpfilter_umh_ops bpfilter_ops; #endif virtio_net.h 0000644 00000012764 14722070374 0007117 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_VIRTIO_NET_H #define _LINUX_VIRTIO_NET_H #include <linux/if_vlan.h> #include <uapi/linux/tcp.h> #include <uapi/linux/udp.h> #include <uapi/linux/virtio_net.h> static inline bool virtio_net_hdr_match_proto(__be16 protocol, __u8 gso_type) { switch (gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { case VIRTIO_NET_HDR_GSO_TCPV4: return protocol == cpu_to_be16(ETH_P_IP); case VIRTIO_NET_HDR_GSO_TCPV6: return protocol == cpu_to_be16(ETH_P_IPV6); case VIRTIO_NET_HDR_GSO_UDP: return protocol == cpu_to_be16(ETH_P_IP) || protocol == cpu_to_be16(ETH_P_IPV6); default: return false; } } static inline int virtio_net_hdr_set_proto(struct sk_buff *skb, const struct virtio_net_hdr *hdr) { if (skb->protocol) return 0; switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { case VIRTIO_NET_HDR_GSO_TCPV4: case VIRTIO_NET_HDR_GSO_UDP: skb->protocol = cpu_to_be16(ETH_P_IP); break; case VIRTIO_NET_HDR_GSO_TCPV6: skb->protocol = cpu_to_be16(ETH_P_IPV6); break; default: return -EINVAL; } return 0; } static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, const struct virtio_net_hdr *hdr, bool little_endian) { unsigned int gso_type = 0; unsigned int thlen = 0; unsigned int p_off = 0; unsigned int ip_proto; if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { case VIRTIO_NET_HDR_GSO_TCPV4: gso_type = SKB_GSO_TCPV4; ip_proto = IPPROTO_TCP; thlen = sizeof(struct tcphdr); break; case VIRTIO_NET_HDR_GSO_TCPV6: gso_type = SKB_GSO_TCPV6; ip_proto = IPPROTO_TCP; thlen = sizeof(struct tcphdr); break; case VIRTIO_NET_HDR_GSO_UDP: gso_type = SKB_GSO_UDP; ip_proto = IPPROTO_UDP; thlen = sizeof(struct udphdr); break; default: return -EINVAL; } if (hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) gso_type |= SKB_GSO_TCP_ECN; if (hdr->gso_size == 0) return -EINVAL; } skb_reset_mac_header(skb); if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { u32 start = __virtio16_to_cpu(little_endian, hdr->csum_start); u32 off = __virtio16_to_cpu(little_endian, hdr->csum_offset); u32 needed = start + max_t(u32, thlen, off + sizeof(__sum16)); if (!pskb_may_pull(skb, needed)) return -EINVAL; if (!skb_partial_csum_set(skb, start, off)) return -EINVAL; p_off = skb_transport_offset(skb) + thlen; if (!pskb_may_pull(skb, p_off)) return -EINVAL; } else { /* gso packets without NEEDS_CSUM do not set transport_offset. * probe and drop if does not match one of the above types. */ if (gso_type && skb->network_header) { struct flow_keys_basic keys; if (!skb->protocol) { __be16 protocol = dev_parse_header_protocol(skb); if (!protocol) virtio_net_hdr_set_proto(skb, hdr); else if (!virtio_net_hdr_match_proto(protocol, hdr->gso_type)) return -EINVAL; else skb->protocol = protocol; } retry: if (!skb_flow_dissect_flow_keys_basic(NULL, skb, &keys, NULL, 0, 0, 0, 0)) { /* UFO does not specify ipv4 or 6: try both */ if (gso_type & SKB_GSO_UDP && skb->protocol == htons(ETH_P_IP)) { skb->protocol = htons(ETH_P_IPV6); goto retry; } return -EINVAL; } p_off = keys.control.thoff + thlen; if (!pskb_may_pull(skb, p_off) || keys.basic.ip_proto != ip_proto) return -EINVAL; skb_set_transport_header(skb, keys.control.thoff); } else if (gso_type) { p_off = thlen; if (!pskb_may_pull(skb, p_off)) return -EINVAL; } } if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { u16 gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size); unsigned int nh_off = p_off; struct skb_shared_info *shinfo = skb_shinfo(skb); /* UFO may not include transport header in gso_size. */ if (gso_type & SKB_GSO_UDP) nh_off -= thlen; /* Kernel has a special handling for GSO_BY_FRAGS. */ if (gso_size == GSO_BY_FRAGS) return -EINVAL; /* Too small packets are not really GSO ones. */ if (skb->len - nh_off > gso_size) { shinfo->gso_size = gso_size; shinfo->gso_type = gso_type; /* Header must be checked, and gso_segs computed. */ shinfo->gso_type |= SKB_GSO_DODGY; shinfo->gso_segs = 0; } } return 0; } static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb, struct virtio_net_hdr *hdr, bool little_endian, bool has_data_valid, int vlan_hlen) { memset(hdr, 0, sizeof(*hdr)); /* no info leak */ if (skb_is_gso(skb)) { struct skb_shared_info *sinfo = skb_shinfo(skb); /* This is a hint as to how much should be linear. */ hdr->hdr_len = __cpu_to_virtio16(little_endian, skb_headlen(skb)); hdr->gso_size = __cpu_to_virtio16(little_endian, sinfo->gso_size); if (sinfo->gso_type & SKB_GSO_TCPV4) hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; else if (sinfo->gso_type & SKB_GSO_TCPV6) hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; else return -EINVAL; if (sinfo->gso_type & SKB_GSO_TCP_ECN) hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; } else hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE; if (skb->ip_summed == CHECKSUM_PARTIAL) { hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; hdr->csum_start = __cpu_to_virtio16(little_endian, skb_checksum_start_offset(skb) + vlan_hlen); hdr->csum_offset = __cpu_to_virtio16(little_endian, skb->csum_offset); } else if (has_data_valid && skb->ip_summed == CHECKSUM_UNNECESSARY) { hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID; } /* else everything is zero */ return 0; } #endif /* _LINUX_VIRTIO_NET_H */ fwnode.h 0000644 00000010652 14722070374 0006211 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * fwnode.h - Firmware device node object handle type definition. * * Copyright (C) 2015, Intel Corporation * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> */ #ifndef _LINUX_FWNODE_H_ #define _LINUX_FWNODE_H_ #include <linux/types.h> struct fwnode_operations; struct device; struct fwnode_handle { struct fwnode_handle *secondary; const struct fwnode_operations *ops; }; /** * struct fwnode_endpoint - Fwnode graph endpoint * @port: Port number * @id: Endpoint id * @local_fwnode: reference to the related fwnode */ struct fwnode_endpoint { unsigned int port; unsigned int id; const struct fwnode_handle *local_fwnode; }; #define NR_FWNODE_REFERENCE_ARGS 8 /** * struct fwnode_reference_args - Fwnode reference with additional arguments * @fwnode:- A reference to the base fwnode * @nargs: Number of elements in @args array * @args: Integer arguments on the fwnode */ struct fwnode_reference_args { struct fwnode_handle *fwnode; unsigned int nargs; u64 args[NR_FWNODE_REFERENCE_ARGS]; }; /** * struct fwnode_operations - Operations for fwnode interface * @get: Get a reference to an fwnode. * @put: Put a reference to an fwnode. * @device_get_match_data: Return the device driver match data. * @property_present: Return true if a property is present. * @property_read_integer_array: Read an array of integer properties. Return * zero on success, a negative error code * otherwise. * @property_read_string_array: Read an array of string properties. Return zero * on success, a negative error code otherwise. * @get_parent: Return the parent of an fwnode. * @get_next_child_node: Return the next child node in an iteration. * @get_named_child_node: Return a child node with a given name. * @get_reference_args: Return a reference pointed to by a property, with args * @graph_get_next_endpoint: Return an endpoint node in an iteration. * @graph_get_remote_endpoint: Return the remote endpoint node of a local * endpoint node. * @graph_get_port_parent: Return the parent node of a port node. * @graph_parse_endpoint: Parse endpoint for port and endpoint id. */ struct fwnode_operations { struct fwnode_handle *(*get)(struct fwnode_handle *fwnode); void (*put)(struct fwnode_handle *fwnode); bool (*device_is_available)(const struct fwnode_handle *fwnode); const void *(*device_get_match_data)(const struct fwnode_handle *fwnode, const struct device *dev); bool (*property_present)(const struct fwnode_handle *fwnode, const char *propname); int (*property_read_int_array)(const struct fwnode_handle *fwnode, const char *propname, unsigned int elem_size, void *val, size_t nval); int (*property_read_string_array)(const struct fwnode_handle *fwnode_handle, const char *propname, const char **val, size_t nval); struct fwnode_handle *(*get_parent)(const struct fwnode_handle *fwnode); struct fwnode_handle * (*get_next_child_node)(const struct fwnode_handle *fwnode, struct fwnode_handle *child); struct fwnode_handle * (*get_named_child_node)(const struct fwnode_handle *fwnode, const char *name); int (*get_reference_args)(const struct fwnode_handle *fwnode, const char *prop, const char *nargs_prop, unsigned int nargs, unsigned int index, struct fwnode_reference_args *args); struct fwnode_handle * (*graph_get_next_endpoint)(const struct fwnode_handle *fwnode, struct fwnode_handle *prev); struct fwnode_handle * (*graph_get_remote_endpoint)(const struct fwnode_handle *fwnode); struct fwnode_handle * (*graph_get_port_parent)(struct fwnode_handle *fwnode); int (*graph_parse_endpoint)(const struct fwnode_handle *fwnode, struct fwnode_endpoint *endpoint); }; #define fwnode_has_op(fwnode, op) \ ((fwnode) && (fwnode)->ops && (fwnode)->ops->op) #define fwnode_call_int_op(fwnode, op, ...) \ (fwnode ? (fwnode_has_op(fwnode, op) ? \ (fwnode)->ops->op(fwnode, ## __VA_ARGS__) : -ENXIO) : \ -EINVAL) #define fwnode_call_bool_op(fwnode, op, ...) \ (fwnode_has_op(fwnode, op) ? \ (fwnode)->ops->op(fwnode, ## __VA_ARGS__) : false) #define fwnode_call_ptr_op(fwnode, op, ...) \ (fwnode_has_op(fwnode, op) ? \ (fwnode)->ops->op(fwnode, ## __VA_ARGS__) : NULL) #define fwnode_call_void_op(fwnode, op, ...) \ do { \ if (fwnode_has_op(fwnode, op)) \ (fwnode)->ops->op(fwnode, ## __VA_ARGS__); \ } while (false) #endif oprofile.h 0000644 00000014146 14722070374 0006550 0 ustar 00 /** * @file oprofile.h * * API for machine-specific interrupts to interface * to oprofile. * * @remark Copyright 2002 OProfile authors * @remark Read the file COPYING * * @author John Levon <levon@movementarian.org> */ #ifndef OPROFILE_H #define OPROFILE_H #include <linux/types.h> #include <linux/spinlock.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/printk.h> #include <linux/atomic.h> /* Each escaped entry is prefixed by ESCAPE_CODE * then one of the following codes, then the * relevant data. * These #defines live in this file so that arch-specific * buffer sync'ing code can access them. */ #define ESCAPE_CODE ~0UL #define CTX_SWITCH_CODE 1 #define CPU_SWITCH_CODE 2 #define COOKIE_SWITCH_CODE 3 #define KERNEL_ENTER_SWITCH_CODE 4 #define KERNEL_EXIT_SWITCH_CODE 5 #define MODULE_LOADED_CODE 6 #define CTX_TGID_CODE 7 #define TRACE_BEGIN_CODE 8 #define TRACE_END_CODE 9 #define XEN_ENTER_SWITCH_CODE 10 #define SPU_PROFILING_CODE 11 #define SPU_CTX_SWITCH_CODE 12 #define IBS_FETCH_CODE 13 #define IBS_OP_CODE 14 struct dentry; struct file_operations; struct pt_regs; /* Operations structure to be filled in */ struct oprofile_operations { /* create any necessary configuration files in the oprofile fs. * Optional. */ int (*create_files)(struct dentry * root); /* Do any necessary interrupt setup. Optional. */ int (*setup)(void); /* Do any necessary interrupt shutdown. Optional. */ void (*shutdown)(void); /* Start delivering interrupts. */ int (*start)(void); /* Stop delivering interrupts. */ void (*stop)(void); /* Arch-specific buffer sync functions. * Return value = 0: Success * Return value = -1: Failure * Return value = 1: Run generic sync function */ int (*sync_start)(void); int (*sync_stop)(void); /* Initiate a stack backtrace. Optional. */ void (*backtrace)(struct pt_regs * const regs, unsigned int depth); /* Multiplex between different events. Optional. */ int (*switch_events)(void); /* CPU identification string. */ char * cpu_type; }; /** * One-time initialisation. *ops must be set to a filled-in * operations structure. This is called even in timer interrupt * mode so an arch can set a backtrace callback. * * If an error occurs, the fields should be left untouched. */ int oprofile_arch_init(struct oprofile_operations * ops); /** * One-time exit/cleanup for the arch. */ void oprofile_arch_exit(void); /** * Add a sample. This may be called from any context. */ void oprofile_add_sample(struct pt_regs * const regs, unsigned long event); /** * Add an extended sample. Use this when the PC is not from the regs, and * we cannot determine if we're in kernel mode from the regs. * * This function does perform a backtrace. * */ void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, unsigned long event, int is_kernel); /** * Add an hardware sample. */ void oprofile_add_ext_hw_sample(unsigned long pc, struct pt_regs * const regs, unsigned long event, int is_kernel, struct task_struct *task); /* Use this instead when the PC value is not from the regs. Doesn't * backtrace. */ void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event); /* add a backtrace entry, to be called from the ->backtrace callback */ void oprofile_add_trace(unsigned long eip); /** * Create a file of the given name as a child of the given root, with * the specified file operations. */ int oprofilefs_create_file(struct dentry * root, char const * name, const struct file_operations * fops); int oprofilefs_create_file_perm(struct dentry * root, char const * name, const struct file_operations * fops, int perm); /** Create a file for read/write access to an unsigned long. */ int oprofilefs_create_ulong(struct dentry * root, char const * name, ulong * val); /** Create a file for read-only access to an unsigned long. */ int oprofilefs_create_ro_ulong(struct dentry * root, char const * name, ulong * val); /** Create a file for read-only access to an atomic_t. */ int oprofilefs_create_ro_atomic(struct dentry * root, char const * name, atomic_t * val); /** create a directory */ struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name); /** * Write the given asciz string to the given user buffer @buf, updating *offset * appropriately. Returns bytes written or -EFAULT. */ ssize_t oprofilefs_str_to_user(char const * str, char __user * buf, size_t count, loff_t * offset); /** * Convert an unsigned long value into ASCII and copy it to the user buffer @buf, * updating *offset appropriately. Returns bytes written or -EFAULT. */ ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t count, loff_t * offset); /** * Read an ASCII string for a number from a userspace buffer and fill *val on success. * Returns 0 on success, < 0 on error. */ int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count); /** lock for read/write safety */ extern raw_spinlock_t oprofilefs_lock; /** * Add the contents of a circular buffer to the event buffer. */ void oprofile_put_buff(unsigned long *buf, unsigned int start, unsigned int stop, unsigned int max); unsigned long oprofile_get_cpu_buffer_size(void); void oprofile_cpu_buffer_inc_smpl_lost(void); /* cpu buffer functions */ struct op_sample; struct op_entry { struct ring_buffer_event *event; struct op_sample *sample; unsigned long size; unsigned long *data; }; void oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs, unsigned long pc, int code, int size); int oprofile_add_data(struct op_entry *entry, unsigned long val); int oprofile_add_data64(struct op_entry *entry, u64 val); int oprofile_write_commit(struct op_entry *entry); #ifdef CONFIG_HW_PERF_EVENTS int __init oprofile_perf_init(struct oprofile_operations *ops); void oprofile_perf_exit(void); char *op_name_from_perf_id(void); #else static inline int __init oprofile_perf_init(struct oprofile_operations *ops) { pr_info("oprofile: hardware counters not available\n"); return -ENODEV; } static inline void oprofile_perf_exit(void) { } #endif /* CONFIG_HW_PERF_EVENTS */ #endif /* OPROFILE_H */ swait.h 0000644 00000024054 14722070374 0006057 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SWAIT_H #define _LINUX_SWAIT_H #include <linux/list.h> #include <linux/stddef.h> #include <linux/spinlock.h> #include <linux/wait.h> #include <asm/current.h> /* * BROKEN wait-queues. * * These "simple" wait-queues are broken garbage, and should never be * used. The comments below claim that they are "similar" to regular * wait-queues, but the semantics are actually completely different, and * every single user we have ever had has been buggy (or pointless). * * A "swake_up_one()" only wakes up _one_ waiter, which is not at all what * "wake_up()" does, and has led to problems. In other cases, it has * been fine, because there's only ever one waiter (kvm), but in that * case gthe whole "simple" wait-queue is just pointless to begin with, * since there is no "queue". Use "wake_up_process()" with a direct * pointer instead. * * While these are very similar to regular wait queues (wait.h) the most * important difference is that the simple waitqueue allows for deterministic * behaviour -- IOW it has strictly bounded IRQ and lock hold times. * * Mainly, this is accomplished by two things. Firstly not allowing swake_up_all * from IRQ disabled, and dropping the lock upon every wakeup, giving a higher * priority task a chance to run. * * Secondly, we had to drop a fair number of features of the other waitqueue * code; notably: * * - mixing INTERRUPTIBLE and UNINTERRUPTIBLE sleeps on the same waitqueue; * all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right * sleeper state. * * - the !exclusive mode; because that leads to O(n) wakeups, everything is * exclusive. * * - custom wake callback functions; because you cannot give any guarantees * about random code. This also allows swait to be used in RT, such that * raw spinlock can be used for the swait queue head. * * As a side effect of these; the data structures are slimmer albeit more ad-hoc. * For all the above, note that simple wait queues should _only_ be used under * very specific realtime constraints -- it is best to stick with the regular * wait queues in most cases. */ struct task_struct; struct swait_queue_head { raw_spinlock_t lock; struct list_head task_list; }; struct swait_queue { struct task_struct *task; struct list_head task_list; }; #define __SWAITQUEUE_INITIALIZER(name) { \ .task = current, \ .task_list = LIST_HEAD_INIT((name).task_list), \ } #define DECLARE_SWAITQUEUE(name) \ struct swait_queue name = __SWAITQUEUE_INITIALIZER(name) #define __SWAIT_QUEUE_HEAD_INITIALIZER(name) { \ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ .task_list = LIST_HEAD_INIT((name).task_list), \ } #define DECLARE_SWAIT_QUEUE_HEAD(name) \ struct swait_queue_head name = __SWAIT_QUEUE_HEAD_INITIALIZER(name) extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name, struct lock_class_key *key); #define init_swait_queue_head(q) \ do { \ static struct lock_class_key __key; \ __init_swait_queue_head((q), #q, &__key); \ } while (0) #ifdef CONFIG_LOCKDEP # define __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name) \ ({ init_swait_queue_head(&name); name; }) # define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name) \ struct swait_queue_head name = __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name) #else # define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name) \ DECLARE_SWAIT_QUEUE_HEAD(name) #endif /** * swait_active -- locklessly test for waiters on the queue * @wq: the waitqueue to test for waiters * * returns true if the wait list is not empty * * NOTE: this function is lockless and requires care, incorrect usage _will_ * lead to sporadic and non-obvious failure. * * NOTE2: this function has the same above implications as regular waitqueues. * * Use either while holding swait_queue_head::lock or when used for wakeups * with an extra smp_mb() like: * * CPU0 - waker CPU1 - waiter * * for (;;) { * @cond = true; prepare_to_swait_exclusive(&wq_head, &wait, state); * smp_mb(); // smp_mb() from set_current_state() * if (swait_active(wq_head)) if (@cond) * wake_up(wq_head); break; * schedule(); * } * finish_swait(&wq_head, &wait); * * Because without the explicit smp_mb() it's possible for the * swait_active() load to get hoisted over the @cond store such that we'll * observe an empty wait list while the waiter might not observe @cond. * This, in turn, can trigger missing wakeups. * * Also note that this 'optimization' trades a spin_lock() for an smp_mb(), * which (when the lock is uncontended) are of roughly equal cost. */ static inline int swait_active(struct swait_queue_head *wq) { return !list_empty(&wq->task_list); } /** * swq_has_sleeper - check if there are any waiting processes * @wq: the waitqueue to test for waiters * * Returns true if @wq has waiting processes * * Please refer to the comment for swait_active. */ static inline bool swq_has_sleeper(struct swait_queue_head *wq) { /* * We need to be sure we are in sync with the list_add() * modifications to the wait queue (task_list). * * This memory barrier should be paired with one on the * waiting side. */ smp_mb(); return swait_active(wq); } extern void swake_up_one(struct swait_queue_head *q); extern void swake_up_all(struct swait_queue_head *q); extern void swake_up_locked(struct swait_queue_head *q); extern void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state); extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state); extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait); extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait); /* as per ___wait_event() but for swait, therefore "exclusive == 1" */ #define ___swait_event(wq, condition, state, ret, cmd) \ ({ \ __label__ __out; \ struct swait_queue __wait; \ long __ret = ret; \ \ INIT_LIST_HEAD(&__wait.task_list); \ for (;;) { \ long __int = prepare_to_swait_event(&wq, &__wait, state);\ \ if (condition) \ break; \ \ if (___wait_is_interruptible(state) && __int) { \ __ret = __int; \ goto __out; \ } \ \ cmd; \ } \ finish_swait(&wq, &__wait); \ __out: __ret; \ }) #define __swait_event(wq, condition) \ (void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \ schedule()) #define swait_event_exclusive(wq, condition) \ do { \ if (condition) \ break; \ __swait_event(wq, condition); \ } while (0) #define __swait_event_timeout(wq, condition, timeout) \ ___swait_event(wq, ___wait_cond_timeout(condition), \ TASK_UNINTERRUPTIBLE, timeout, \ __ret = schedule_timeout(__ret)) #define swait_event_timeout_exclusive(wq, condition, timeout) \ ({ \ long __ret = timeout; \ if (!___wait_cond_timeout(condition)) \ __ret = __swait_event_timeout(wq, condition, timeout); \ __ret; \ }) #define __swait_event_interruptible(wq, condition) \ ___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0, \ schedule()) #define swait_event_interruptible_exclusive(wq, condition) \ ({ \ int __ret = 0; \ if (!(condition)) \ __ret = __swait_event_interruptible(wq, condition); \ __ret; \ }) #define __swait_event_interruptible_timeout(wq, condition, timeout) \ ___swait_event(wq, ___wait_cond_timeout(condition), \ TASK_INTERRUPTIBLE, timeout, \ __ret = schedule_timeout(__ret)) #define swait_event_interruptible_timeout_exclusive(wq, condition, timeout)\ ({ \ long __ret = timeout; \ if (!___wait_cond_timeout(condition)) \ __ret = __swait_event_interruptible_timeout(wq, \ condition, timeout); \ __ret; \ }) #define __swait_event_idle(wq, condition) \ (void)___swait_event(wq, condition, TASK_IDLE, 0, schedule()) /** * swait_event_idle_exclusive - wait without system load contribution * @wq: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_IDLE) until the @condition evaluates to * true. The @condition is checked each time the waitqueue @wq is woken up. * * This function is mostly used when a kthread or workqueue waits for some * condition and doesn't want to contribute to system load. Signals are * ignored. */ #define swait_event_idle_exclusive(wq, condition) \ do { \ if (condition) \ break; \ __swait_event_idle(wq, condition); \ } while (0) #define __swait_event_idle_timeout(wq, condition, timeout) \ ___swait_event(wq, ___wait_cond_timeout(condition), \ TASK_IDLE, timeout, \ __ret = schedule_timeout(__ret)) /** * swait_event_idle_timeout_exclusive - wait up to timeout without load contribution * @wq: the waitqueue to wait on * @condition: a C expression for the event to wait for * @timeout: timeout at which we'll give up in jiffies * * The process is put to sleep (TASK_IDLE) until the @condition evaluates to * true. The @condition is checked each time the waitqueue @wq is woken up. * * This function is mostly used when a kthread or workqueue waits for some * condition and doesn't want to contribute to system load. Signals are * ignored. * * Returns: * 0 if the @condition evaluated to %false after the @timeout elapsed, * 1 if the @condition evaluated to %true after the @timeout elapsed, * or the remaining jiffies (at least 1) if the @condition evaluated * to %true before the @timeout elapsed. */ #define swait_event_idle_timeout_exclusive(wq, condition, timeout) \ ({ \ long __ret = timeout; \ if (!___wait_cond_timeout(condition)) \ __ret = __swait_event_idle_timeout(wq, \ condition, timeout); \ __ret; \ }) #endif /* _LINUX_SWAIT_H */ ipc.h 0000644 00000001145 14722070374 0005477 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_IPC_H #define _LINUX_IPC_H #include <linux/spinlock.h> #include <linux/uidgid.h> #include <linux/rhashtable-types.h> #include <uapi/linux/ipc.h> #include <linux/refcount.h> /* used by in-kernel data structures */ struct kern_ipc_perm { spinlock_t lock; bool deleted; int id; key_t key; kuid_t uid; kgid_t gid; kuid_t cuid; kgid_t cgid; umode_t mode; unsigned long seq; void *security; struct rhash_head khtnode; struct rcu_head rcu; refcount_t refcount; } ____cacheline_aligned_in_smp __randomize_layout; #endif /* _LINUX_IPC_H */ fddidevice.h 0000644 00000001501 14722070374 0007006 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Definitions for the FDDI handlers. * * Version: @(#)fddidevice.h 1.0.0 08/12/96 * * Author: Lawrence V. Stefani, <stefani@lkg.dec.com> * * fddidevice.h is based on previous trdevice.h work by * Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Alan Cox, <gw4pts@gw4pts.ampr.org> */ #ifndef _LINUX_FDDIDEVICE_H #define _LINUX_FDDIDEVICE_H #include <linux/if_fddi.h> #ifdef __KERNEL__ __be16 fddi_type_trans(struct sk_buff *skb, struct net_device *dev); struct net_device *alloc_fddidev(int sizeof_priv); #endif #endif /* _LINUX_FDDIDEVICE_H */ nvme.h 0000644 00000102513 14722070374 0005672 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Definitions for the NVM Express interface * Copyright (c) 2011-2014, Intel Corporation. */ #ifndef _LINUX_NVME_H #define _LINUX_NVME_H #include <linux/bits.h> #include <linux/types.h> #include <linux/uuid.h> /* NQN names in commands fields specified one size */ #define NVMF_NQN_FIELD_LEN 256 /* However the max length of a qualified name is another size */ #define NVMF_NQN_SIZE 223 #define NVMF_TRSVCID_SIZE 32 #define NVMF_TRADDR_SIZE 256 #define NVMF_TSAS_SIZE 256 #define NVME_DISC_SUBSYS_NAME "nqn.2014-08.org.nvmexpress.discovery" #define NVME_RDMA_IP_PORT 4420 #define NVME_NSID_ALL 0xffffffff enum nvme_subsys_type { NVME_NQN_DISC = 1, /* Discovery type target subsystem */ NVME_NQN_NVME = 2, /* NVME type target subsystem */ }; /* Address Family codes for Discovery Log Page entry ADRFAM field */ enum { NVMF_ADDR_FAMILY_PCI = 0, /* PCIe */ NVMF_ADDR_FAMILY_IP4 = 1, /* IP4 */ NVMF_ADDR_FAMILY_IP6 = 2, /* IP6 */ NVMF_ADDR_FAMILY_IB = 3, /* InfiniBand */ NVMF_ADDR_FAMILY_FC = 4, /* Fibre Channel */ }; /* Transport Type codes for Discovery Log Page entry TRTYPE field */ enum { NVMF_TRTYPE_RDMA = 1, /* RDMA */ NVMF_TRTYPE_FC = 2, /* Fibre Channel */ NVMF_TRTYPE_TCP = 3, /* TCP/IP */ NVMF_TRTYPE_LOOP = 254, /* Reserved for host usage */ NVMF_TRTYPE_MAX, }; /* Transport Requirements codes for Discovery Log Page entry TREQ field */ enum { NVMF_TREQ_NOT_SPECIFIED = 0, /* Not specified */ NVMF_TREQ_REQUIRED = 1, /* Required */ NVMF_TREQ_NOT_REQUIRED = 2, /* Not Required */ #define NVME_TREQ_SECURE_CHANNEL_MASK \ (NVMF_TREQ_REQUIRED | NVMF_TREQ_NOT_REQUIRED) NVMF_TREQ_DISABLE_SQFLOW = (1 << 2), /* Supports SQ flow control disable */ }; /* RDMA QP Service Type codes for Discovery Log Page entry TSAS * RDMA_QPTYPE field */ enum { NVMF_RDMA_QPTYPE_CONNECTED = 1, /* Reliable Connected */ NVMF_RDMA_QPTYPE_DATAGRAM = 2, /* Reliable Datagram */ }; /* RDMA Provider Type codes for Discovery Log Page entry TSAS * RDMA_PRTYPE field */ enum { NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 1, /* No Provider Specified */ NVMF_RDMA_PRTYPE_IB = 2, /* InfiniBand */ NVMF_RDMA_PRTYPE_ROCE = 3, /* InfiniBand RoCE */ NVMF_RDMA_PRTYPE_ROCEV2 = 4, /* InfiniBand RoCEV2 */ NVMF_RDMA_PRTYPE_IWARP = 5, /* IWARP */ }; /* RDMA Connection Management Service Type codes for Discovery Log Page * entry TSAS RDMA_CMS field */ enum { NVMF_RDMA_CMS_RDMA_CM = 1, /* Sockets based endpoint addressing */ }; #define NVME_AQ_DEPTH 32 #define NVME_NR_AEN_COMMANDS 1 #define NVME_AQ_BLK_MQ_DEPTH (NVME_AQ_DEPTH - NVME_NR_AEN_COMMANDS) /* * Subtract one to leave an empty queue entry for 'Full Queue' condition. See * NVM-Express 1.2 specification, section 4.1.2. */ #define NVME_AQ_MQ_TAG_DEPTH (NVME_AQ_BLK_MQ_DEPTH - 1) enum { NVME_REG_CAP = 0x0000, /* Controller Capabilities */ NVME_REG_VS = 0x0008, /* Version */ NVME_REG_INTMS = 0x000c, /* Interrupt Mask Set */ NVME_REG_INTMC = 0x0010, /* Interrupt Mask Clear */ NVME_REG_CC = 0x0014, /* Controller Configuration */ NVME_REG_CSTS = 0x001c, /* Controller Status */ NVME_REG_NSSR = 0x0020, /* NVM Subsystem Reset */ NVME_REG_AQA = 0x0024, /* Admin Queue Attributes */ NVME_REG_ASQ = 0x0028, /* Admin SQ Base Address */ NVME_REG_ACQ = 0x0030, /* Admin CQ Base Address */ NVME_REG_CMBLOC = 0x0038, /* Controller Memory Buffer Location */ NVME_REG_CMBSZ = 0x003c, /* Controller Memory Buffer Size */ NVME_REG_BPINFO = 0x0040, /* Boot Partition Information */ NVME_REG_BPRSEL = 0x0044, /* Boot Partition Read Select */ NVME_REG_BPMBL = 0x0048, /* Boot Partition Memory Buffer * Location */ NVME_REG_PMRCAP = 0x0e00, /* Persistent Memory Capabilities */ NVME_REG_PMRCTL = 0x0e04, /* Persistent Memory Region Control */ NVME_REG_PMRSTS = 0x0e08, /* Persistent Memory Region Status */ NVME_REG_PMREBS = 0x0e0c, /* Persistent Memory Region Elasticity * Buffer Size */ NVME_REG_PMRSWTP = 0x0e10, /* Persistent Memory Region Sustained * Write Throughput */ NVME_REG_DBS = 0x1000, /* SQ 0 Tail Doorbell */ }; #define NVME_CAP_MQES(cap) ((cap) & 0xffff) #define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff) #define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf) #define NVME_CAP_NSSRC(cap) (((cap) >> 36) & 0x1) #define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf) #define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf) #define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7) #define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff) enum { NVME_CMBSZ_SQS = 1 << 0, NVME_CMBSZ_CQS = 1 << 1, NVME_CMBSZ_LISTS = 1 << 2, NVME_CMBSZ_RDS = 1 << 3, NVME_CMBSZ_WDS = 1 << 4, NVME_CMBSZ_SZ_SHIFT = 12, NVME_CMBSZ_SZ_MASK = 0xfffff, NVME_CMBSZ_SZU_SHIFT = 8, NVME_CMBSZ_SZU_MASK = 0xf, }; /* * Submission and Completion Queue Entry Sizes for the NVM command set. * (In bytes and specified as a power of two (2^n)). */ #define NVME_ADM_SQES 6 #define NVME_NVM_IOSQES 6 #define NVME_NVM_IOCQES 4 enum { NVME_CC_ENABLE = 1 << 0, NVME_CC_CSS_NVM = 0 << 4, NVME_CC_EN_SHIFT = 0, NVME_CC_CSS_SHIFT = 4, NVME_CC_MPS_SHIFT = 7, NVME_CC_AMS_SHIFT = 11, NVME_CC_SHN_SHIFT = 14, NVME_CC_IOSQES_SHIFT = 16, NVME_CC_IOCQES_SHIFT = 20, NVME_CC_AMS_RR = 0 << NVME_CC_AMS_SHIFT, NVME_CC_AMS_WRRU = 1 << NVME_CC_AMS_SHIFT, NVME_CC_AMS_VS = 7 << NVME_CC_AMS_SHIFT, NVME_CC_SHN_NONE = 0 << NVME_CC_SHN_SHIFT, NVME_CC_SHN_NORMAL = 1 << NVME_CC_SHN_SHIFT, NVME_CC_SHN_ABRUPT = 2 << NVME_CC_SHN_SHIFT, NVME_CC_SHN_MASK = 3 << NVME_CC_SHN_SHIFT, NVME_CC_IOSQES = NVME_NVM_IOSQES << NVME_CC_IOSQES_SHIFT, NVME_CC_IOCQES = NVME_NVM_IOCQES << NVME_CC_IOCQES_SHIFT, NVME_CSTS_RDY = 1 << 0, NVME_CSTS_CFS = 1 << 1, NVME_CSTS_NSSRO = 1 << 4, NVME_CSTS_PP = 1 << 5, NVME_CSTS_SHST_NORMAL = 0 << 2, NVME_CSTS_SHST_OCCUR = 1 << 2, NVME_CSTS_SHST_CMPLT = 2 << 2, NVME_CSTS_SHST_MASK = 3 << 2, }; struct nvme_id_power_state { __le16 max_power; /* centiwatts */ __u8 rsvd2; __u8 flags; __le32 entry_lat; /* microseconds */ __le32 exit_lat; /* microseconds */ __u8 read_tput; __u8 read_lat; __u8 write_tput; __u8 write_lat; __le16 idle_power; __u8 idle_scale; __u8 rsvd19; __le16 active_power; __u8 active_work_scale; __u8 rsvd23[9]; }; enum { NVME_PS_FLAGS_MAX_POWER_SCALE = 1 << 0, NVME_PS_FLAGS_NON_OP_STATE = 1 << 1, }; enum nvme_ctrl_attr { NVME_CTRL_ATTR_HID_128_BIT = (1 << 0), NVME_CTRL_ATTR_TBKAS = (1 << 6), }; struct nvme_id_ctrl { __le16 vid; __le16 ssvid; char sn[20]; char mn[40]; char fr[8]; __u8 rab; __u8 ieee[3]; __u8 cmic; __u8 mdts; __le16 cntlid; __le32 ver; __le32 rtd3r; __le32 rtd3e; __le32 oaes; __le32 ctratt; __u8 rsvd100[28]; __le16 crdt1; __le16 crdt2; __le16 crdt3; __u8 rsvd134[122]; __le16 oacs; __u8 acl; __u8 aerl; __u8 frmw; __u8 lpa; __u8 elpe; __u8 npss; __u8 avscc; __u8 apsta; __le16 wctemp; __le16 cctemp; __le16 mtfa; __le32 hmpre; __le32 hmmin; __u8 tnvmcap[16]; __u8 unvmcap[16]; __le32 rpmbs; __le16 edstt; __u8 dsto; __u8 fwug; __le16 kas; __le16 hctma; __le16 mntmt; __le16 mxtmt; __le32 sanicap; __le32 hmminds; __le16 hmmaxd; __u8 rsvd338[4]; __u8 anatt; __u8 anacap; __le32 anagrpmax; __le32 nanagrpid; __u8 rsvd352[160]; __u8 sqes; __u8 cqes; __le16 maxcmd; __le32 nn; __le16 oncs; __le16 fuses; __u8 fna; __u8 vwc; __le16 awun; __le16 awupf; __u8 nvscc; __u8 nwpc; __le16 acwu; __u8 rsvd534[2]; __le32 sgls; __le32 mnan; __u8 rsvd544[224]; char subnqn[256]; __u8 rsvd1024[768]; __le32 ioccsz; __le32 iorcsz; __le16 icdoff; __u8 ctrattr; __u8 msdbd; __u8 rsvd1804[244]; struct nvme_id_power_state psd[32]; __u8 vs[1024]; }; enum { NVME_CTRL_ONCS_COMPARE = 1 << 0, NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1, NVME_CTRL_ONCS_DSM = 1 << 2, NVME_CTRL_ONCS_WRITE_ZEROES = 1 << 3, NVME_CTRL_ONCS_TIMESTAMP = 1 << 6, NVME_CTRL_VWC_PRESENT = 1 << 0, NVME_CTRL_OACS_SEC_SUPP = 1 << 0, NVME_CTRL_OACS_DIRECTIVES = 1 << 5, NVME_CTRL_OACS_DBBUF_SUPP = 1 << 8, NVME_CTRL_LPA_CMD_EFFECTS_LOG = 1 << 1, NVME_CTRL_CTRATT_128_ID = 1 << 0, NVME_CTRL_CTRATT_NON_OP_PSP = 1 << 1, NVME_CTRL_CTRATT_NVM_SETS = 1 << 2, NVME_CTRL_CTRATT_READ_RECV_LVLS = 1 << 3, NVME_CTRL_CTRATT_ENDURANCE_GROUPS = 1 << 4, NVME_CTRL_CTRATT_PREDICTABLE_LAT = 1 << 5, NVME_CTRL_CTRATT_NAMESPACE_GRANULARITY = 1 << 7, NVME_CTRL_CTRATT_UUID_LIST = 1 << 9, }; struct nvme_lbaf { __le16 ms; __u8 ds; __u8 rp; }; struct nvme_id_ns { __le64 nsze; __le64 ncap; __le64 nuse; __u8 nsfeat; __u8 nlbaf; __u8 flbas; __u8 mc; __u8 dpc; __u8 dps; __u8 nmic; __u8 rescap; __u8 fpi; __u8 dlfeat; __le16 nawun; __le16 nawupf; __le16 nacwu; __le16 nabsn; __le16 nabo; __le16 nabspf; __le16 noiob; __u8 nvmcap[16]; __le16 npwg; __le16 npwa; __le16 npdg; __le16 npda; __le16 nows; __u8 rsvd74[18]; __le32 anagrpid; __u8 rsvd96[3]; __u8 nsattr; __le16 nvmsetid; __le16 endgid; __u8 nguid[16]; __u8 eui64[8]; struct nvme_lbaf lbaf[16]; __u8 rsvd192[192]; __u8 vs[3712]; }; enum { NVME_ID_CNS_NS = 0x00, NVME_ID_CNS_CTRL = 0x01, NVME_ID_CNS_NS_ACTIVE_LIST = 0x02, NVME_ID_CNS_NS_DESC_LIST = 0x03, NVME_ID_CNS_NS_PRESENT_LIST = 0x10, NVME_ID_CNS_NS_PRESENT = 0x11, NVME_ID_CNS_CTRL_NS_LIST = 0x12, NVME_ID_CNS_CTRL_LIST = 0x13, NVME_ID_CNS_SCNDRY_CTRL_LIST = 0x15, NVME_ID_CNS_NS_GRANULARITY = 0x16, NVME_ID_CNS_UUID_LIST = 0x17, }; enum { NVME_DIR_IDENTIFY = 0x00, NVME_DIR_STREAMS = 0x01, NVME_DIR_SND_ID_OP_ENABLE = 0x01, NVME_DIR_SND_ST_OP_REL_ID = 0x01, NVME_DIR_SND_ST_OP_REL_RSC = 0x02, NVME_DIR_RCV_ID_OP_PARAM = 0x01, NVME_DIR_RCV_ST_OP_PARAM = 0x01, NVME_DIR_RCV_ST_OP_STATUS = 0x02, NVME_DIR_RCV_ST_OP_RESOURCE = 0x03, NVME_DIR_ENDIR = 0x01, }; enum { NVME_NS_FEAT_THIN = 1 << 0, NVME_NS_FLBAS_LBA_MASK = 0xf, NVME_NS_FLBAS_META_EXT = 0x10, NVME_LBAF_RP_BEST = 0, NVME_LBAF_RP_BETTER = 1, NVME_LBAF_RP_GOOD = 2, NVME_LBAF_RP_DEGRADED = 3, NVME_NS_DPC_PI_LAST = 1 << 4, NVME_NS_DPC_PI_FIRST = 1 << 3, NVME_NS_DPC_PI_TYPE3 = 1 << 2, NVME_NS_DPC_PI_TYPE2 = 1 << 1, NVME_NS_DPC_PI_TYPE1 = 1 << 0, NVME_NS_DPS_PI_FIRST = 1 << 3, NVME_NS_DPS_PI_MASK = 0x7, NVME_NS_DPS_PI_TYPE1 = 1, NVME_NS_DPS_PI_TYPE2 = 2, NVME_NS_DPS_PI_TYPE3 = 3, }; struct nvme_ns_id_desc { __u8 nidt; __u8 nidl; __le16 reserved; }; #define NVME_NIDT_EUI64_LEN 8 #define NVME_NIDT_NGUID_LEN 16 #define NVME_NIDT_UUID_LEN 16 enum { NVME_NIDT_EUI64 = 0x01, NVME_NIDT_NGUID = 0x02, NVME_NIDT_UUID = 0x03, }; struct nvme_smart_log { __u8 critical_warning; __u8 temperature[2]; __u8 avail_spare; __u8 spare_thresh; __u8 percent_used; __u8 endu_grp_crit_warn_sumry; __u8 rsvd7[25]; __u8 data_units_read[16]; __u8 data_units_written[16]; __u8 host_reads[16]; __u8 host_writes[16]; __u8 ctrl_busy_time[16]; __u8 power_cycles[16]; __u8 power_on_hours[16]; __u8 unsafe_shutdowns[16]; __u8 media_errors[16]; __u8 num_err_log_entries[16]; __le32 warning_temp_time; __le32 critical_comp_time; __le16 temp_sensor[8]; __le32 thm_temp1_trans_count; __le32 thm_temp2_trans_count; __le32 thm_temp1_total_time; __le32 thm_temp2_total_time; __u8 rsvd232[280]; }; struct nvme_fw_slot_info_log { __u8 afi; __u8 rsvd1[7]; __le64 frs[7]; __u8 rsvd64[448]; }; enum { NVME_CMD_EFFECTS_CSUPP = 1 << 0, NVME_CMD_EFFECTS_LBCC = 1 << 1, NVME_CMD_EFFECTS_NCC = 1 << 2, NVME_CMD_EFFECTS_NIC = 1 << 3, NVME_CMD_EFFECTS_CCC = 1 << 4, NVME_CMD_EFFECTS_CSE_MASK = GENMASK(18, 16), NVME_CMD_EFFECTS_UUID_SEL = 1 << 19, }; struct nvme_effects_log { __le32 acs[256]; __le32 iocs[256]; __u8 resv[2048]; }; enum nvme_ana_state { NVME_ANA_OPTIMIZED = 0x01, NVME_ANA_NONOPTIMIZED = 0x02, NVME_ANA_INACCESSIBLE = 0x03, NVME_ANA_PERSISTENT_LOSS = 0x04, NVME_ANA_CHANGE = 0x0f, }; struct nvme_ana_group_desc { __le32 grpid; __le32 nnsids; __le64 chgcnt; __u8 state; __u8 rsvd17[15]; __le32 nsids[]; }; /* flag for the log specific field of the ANA log */ #define NVME_ANA_LOG_RGO (1 << 0) struct nvme_ana_rsp_hdr { __le64 chgcnt; __le16 ngrps; __le16 rsvd10[3]; }; enum { NVME_SMART_CRIT_SPARE = 1 << 0, NVME_SMART_CRIT_TEMPERATURE = 1 << 1, NVME_SMART_CRIT_RELIABILITY = 1 << 2, NVME_SMART_CRIT_MEDIA = 1 << 3, NVME_SMART_CRIT_VOLATILE_MEMORY = 1 << 4, }; enum { NVME_AER_ERROR = 0, NVME_AER_SMART = 1, NVME_AER_NOTICE = 2, NVME_AER_CSS = 6, NVME_AER_VS = 7, }; enum { NVME_AER_ERROR_PERSIST_INT_ERR = 0x03, }; enum { NVME_AER_NOTICE_NS_CHANGED = 0x00, NVME_AER_NOTICE_FW_ACT_STARTING = 0x01, NVME_AER_NOTICE_ANA = 0x03, NVME_AER_NOTICE_DISC_CHANGED = 0xf0, }; enum { NVME_AEN_BIT_NS_ATTR = 8, NVME_AEN_BIT_FW_ACT = 9, NVME_AEN_BIT_ANA_CHANGE = 11, NVME_AEN_BIT_DISC_CHANGE = 31, }; enum { NVME_AEN_CFG_NS_ATTR = 1 << NVME_AEN_BIT_NS_ATTR, NVME_AEN_CFG_FW_ACT = 1 << NVME_AEN_BIT_FW_ACT, NVME_AEN_CFG_ANA_CHANGE = 1 << NVME_AEN_BIT_ANA_CHANGE, NVME_AEN_CFG_DISC_CHANGE = 1 << NVME_AEN_BIT_DISC_CHANGE, }; struct nvme_lba_range_type { __u8 type; __u8 attributes; __u8 rsvd2[14]; __u64 slba; __u64 nlb; __u8 guid[16]; __u8 rsvd48[16]; }; enum { NVME_LBART_TYPE_FS = 0x01, NVME_LBART_TYPE_RAID = 0x02, NVME_LBART_TYPE_CACHE = 0x03, NVME_LBART_TYPE_SWAP = 0x04, NVME_LBART_ATTRIB_TEMP = 1 << 0, NVME_LBART_ATTRIB_HIDE = 1 << 1, }; struct nvme_reservation_status { __le32 gen; __u8 rtype; __u8 regctl[2]; __u8 resv5[2]; __u8 ptpls; __u8 resv10[13]; struct { __le16 cntlid; __u8 rcsts; __u8 resv3[5]; __le64 hostid; __le64 rkey; } regctl_ds[]; }; enum nvme_async_event_type { NVME_AER_TYPE_ERROR = 0, NVME_AER_TYPE_SMART = 1, NVME_AER_TYPE_NOTICE = 2, }; /* I/O commands */ enum nvme_opcode { nvme_cmd_flush = 0x00, nvme_cmd_write = 0x01, nvme_cmd_read = 0x02, nvme_cmd_write_uncor = 0x04, nvme_cmd_compare = 0x05, nvme_cmd_write_zeroes = 0x08, nvme_cmd_dsm = 0x09, nvme_cmd_verify = 0x0c, nvme_cmd_resv_register = 0x0d, nvme_cmd_resv_report = 0x0e, nvme_cmd_resv_acquire = 0x11, nvme_cmd_resv_release = 0x15, }; #define nvme_opcode_name(opcode) { opcode, #opcode } #define show_nvm_opcode_name(val) \ __print_symbolic(val, \ nvme_opcode_name(nvme_cmd_flush), \ nvme_opcode_name(nvme_cmd_write), \ nvme_opcode_name(nvme_cmd_read), \ nvme_opcode_name(nvme_cmd_write_uncor), \ nvme_opcode_name(nvme_cmd_compare), \ nvme_opcode_name(nvme_cmd_write_zeroes), \ nvme_opcode_name(nvme_cmd_dsm), \ nvme_opcode_name(nvme_cmd_resv_register), \ nvme_opcode_name(nvme_cmd_resv_report), \ nvme_opcode_name(nvme_cmd_resv_acquire), \ nvme_opcode_name(nvme_cmd_resv_release)) /* * Descriptor subtype - lower 4 bits of nvme_(keyed_)sgl_desc identifier * * @NVME_SGL_FMT_ADDRESS: absolute address of the data block * @NVME_SGL_FMT_OFFSET: relative offset of the in-capsule data block * @NVME_SGL_FMT_TRANSPORT_A: transport defined format, value 0xA * @NVME_SGL_FMT_INVALIDATE: RDMA transport specific remote invalidation * request subtype */ enum { NVME_SGL_FMT_ADDRESS = 0x00, NVME_SGL_FMT_OFFSET = 0x01, NVME_SGL_FMT_TRANSPORT_A = 0x0A, NVME_SGL_FMT_INVALIDATE = 0x0f, }; /* * Descriptor type - upper 4 bits of nvme_(keyed_)sgl_desc identifier * * For struct nvme_sgl_desc: * @NVME_SGL_FMT_DATA_DESC: data block descriptor * @NVME_SGL_FMT_SEG_DESC: sgl segment descriptor * @NVME_SGL_FMT_LAST_SEG_DESC: last sgl segment descriptor * * For struct nvme_keyed_sgl_desc: * @NVME_KEY_SGL_FMT_DATA_DESC: keyed data block descriptor * * Transport-specific SGL types: * @NVME_TRANSPORT_SGL_DATA_DESC: Transport SGL data dlock descriptor */ enum { NVME_SGL_FMT_DATA_DESC = 0x00, NVME_SGL_FMT_SEG_DESC = 0x02, NVME_SGL_FMT_LAST_SEG_DESC = 0x03, NVME_KEY_SGL_FMT_DATA_DESC = 0x04, NVME_TRANSPORT_SGL_DATA_DESC = 0x05, }; struct nvme_sgl_desc { __le64 addr; __le32 length; __u8 rsvd[3]; __u8 type; }; struct nvme_keyed_sgl_desc { __le64 addr; __u8 length[3]; __u8 key[4]; __u8 type; }; union nvme_data_ptr { struct { __le64 prp1; __le64 prp2; }; struct nvme_sgl_desc sgl; struct nvme_keyed_sgl_desc ksgl; }; /* * Lowest two bits of our flags field (FUSE field in the spec): * * @NVME_CMD_FUSE_FIRST: Fused Operation, first command * @NVME_CMD_FUSE_SECOND: Fused Operation, second command * * Highest two bits in our flags field (PSDT field in the spec): * * @NVME_CMD_PSDT_SGL_METABUF: Use SGLS for this transfer, * If used, MPTR contains addr of single physical buffer (byte aligned). * @NVME_CMD_PSDT_SGL_METASEG: Use SGLS for this transfer, * If used, MPTR contains an address of an SGL segment containing * exactly 1 SGL descriptor (qword aligned). */ enum { NVME_CMD_FUSE_FIRST = (1 << 0), NVME_CMD_FUSE_SECOND = (1 << 1), NVME_CMD_SGL_METABUF = (1 << 6), NVME_CMD_SGL_METASEG = (1 << 7), NVME_CMD_SGL_ALL = NVME_CMD_SGL_METABUF | NVME_CMD_SGL_METASEG, }; struct nvme_common_command { __u8 opcode; __u8 flags; __u16 command_id; __le32 nsid; __le32 cdw2[2]; __le64 metadata; union nvme_data_ptr dptr; __le32 cdw10; __le32 cdw11; __le32 cdw12; __le32 cdw13; __le32 cdw14; __le32 cdw15; }; struct nvme_rw_command { __u8 opcode; __u8 flags; __u16 command_id; __le32 nsid; __u64 rsvd2; __le64 metadata; union nvme_data_ptr dptr; __le64 slba; __le16 length; __le16 control; __le32 dsmgmt; __le32 reftag; __le16 apptag; __le16 appmask; }; enum { NVME_RW_LR = 1 << 15, NVME_RW_FUA = 1 << 14, NVME_RW_DSM_FREQ_UNSPEC = 0, NVME_RW_DSM_FREQ_TYPICAL = 1, NVME_RW_DSM_FREQ_RARE = 2, NVME_RW_DSM_FREQ_READS = 3, NVME_RW_DSM_FREQ_WRITES = 4, NVME_RW_DSM_FREQ_RW = 5, NVME_RW_DSM_FREQ_ONCE = 6, NVME_RW_DSM_FREQ_PREFETCH = 7, NVME_RW_DSM_FREQ_TEMP = 8, NVME_RW_DSM_LATENCY_NONE = 0 << 4, NVME_RW_DSM_LATENCY_IDLE = 1 << 4, NVME_RW_DSM_LATENCY_NORM = 2 << 4, NVME_RW_DSM_LATENCY_LOW = 3 << 4, NVME_RW_DSM_SEQ_REQ = 1 << 6, NVME_RW_DSM_COMPRESSED = 1 << 7, NVME_RW_PRINFO_PRCHK_REF = 1 << 10, NVME_RW_PRINFO_PRCHK_APP = 1 << 11, NVME_RW_PRINFO_PRCHK_GUARD = 1 << 12, NVME_RW_PRINFO_PRACT = 1 << 13, NVME_RW_DTYPE_STREAMS = 1 << 4, }; struct nvme_dsm_cmd { __u8 opcode; __u8 flags; __u16 command_id; __le32 nsid; __u64 rsvd2[2]; union nvme_data_ptr dptr; __le32 nr; __le32 attributes; __u32 rsvd12[4]; }; enum { NVME_DSMGMT_IDR = 1 << 0, NVME_DSMGMT_IDW = 1 << 1, NVME_DSMGMT_AD = 1 << 2, }; #define NVME_DSM_MAX_RANGES 256 struct nvme_dsm_range { __le32 cattr; __le32 nlb; __le64 slba; }; struct nvme_write_zeroes_cmd { __u8 opcode; __u8 flags; __u16 command_id; __le32 nsid; __u64 rsvd2; __le64 metadata; union nvme_data_ptr dptr; __le64 slba; __le16 length; __le16 control; __le32 dsmgmt; __le32 reftag; __le16 apptag; __le16 appmask; }; /* Features */ struct nvme_feat_auto_pst { __le64 entries[32]; }; enum { NVME_HOST_MEM_ENABLE = (1 << 0), NVME_HOST_MEM_RETURN = (1 << 1), }; struct nvme_feat_host_behavior { __u8 acre; __u8 resv1[511]; }; enum { NVME_ENABLE_ACRE = 1, }; /* Admin commands */ enum nvme_admin_opcode { nvme_admin_delete_sq = 0x00, nvme_admin_create_sq = 0x01, nvme_admin_get_log_page = 0x02, nvme_admin_delete_cq = 0x04, nvme_admin_create_cq = 0x05, nvme_admin_identify = 0x06, nvme_admin_abort_cmd = 0x08, nvme_admin_set_features = 0x09, nvme_admin_get_features = 0x0a, nvme_admin_async_event = 0x0c, nvme_admin_ns_mgmt = 0x0d, nvme_admin_activate_fw = 0x10, nvme_admin_download_fw = 0x11, nvme_admin_dev_self_test = 0x14, nvme_admin_ns_attach = 0x15, nvme_admin_keep_alive = 0x18, nvme_admin_directive_send = 0x19, nvme_admin_directive_recv = 0x1a, nvme_admin_virtual_mgmt = 0x1c, nvme_admin_nvme_mi_send = 0x1d, nvme_admin_nvme_mi_recv = 0x1e, nvme_admin_dbbuf = 0x7C, nvme_admin_format_nvm = 0x80, nvme_admin_security_send = 0x81, nvme_admin_security_recv = 0x82, nvme_admin_sanitize_nvm = 0x84, nvme_admin_get_lba_status = 0x86, }; #define nvme_admin_opcode_name(opcode) { opcode, #opcode } #define show_admin_opcode_name(val) \ __print_symbolic(val, \ nvme_admin_opcode_name(nvme_admin_delete_sq), \ nvme_admin_opcode_name(nvme_admin_create_sq), \ nvme_admin_opcode_name(nvme_admin_get_log_page), \ nvme_admin_opcode_name(nvme_admin_delete_cq), \ nvme_admin_opcode_name(nvme_admin_create_cq), \ nvme_admin_opcode_name(nvme_admin_identify), \ nvme_admin_opcode_name(nvme_admin_abort_cmd), \ nvme_admin_opcode_name(nvme_admin_set_features), \ nvme_admin_opcode_name(nvme_admin_get_features), \ nvme_admin_opcode_name(nvme_admin_async_event), \ nvme_admin_opcode_name(nvme_admin_ns_mgmt), \ nvme_admin_opcode_name(nvme_admin_activate_fw), \ nvme_admin_opcode_name(nvme_admin_download_fw), \ nvme_admin_opcode_name(nvme_admin_ns_attach), \ nvme_admin_opcode_name(nvme_admin_keep_alive), \ nvme_admin_opcode_name(nvme_admin_directive_send), \ nvme_admin_opcode_name(nvme_admin_directive_recv), \ nvme_admin_opcode_name(nvme_admin_dbbuf), \ nvme_admin_opcode_name(nvme_admin_format_nvm), \ nvme_admin_opcode_name(nvme_admin_security_send), \ nvme_admin_opcode_name(nvme_admin_security_recv), \ nvme_admin_opcode_name(nvme_admin_sanitize_nvm), \ nvme_admin_opcode_name(nvme_admin_get_lba_status)) enum { NVME_QUEUE_PHYS_CONTIG = (1 << 0), NVME_CQ_IRQ_ENABLED = (1 << 1), NVME_SQ_PRIO_URGENT = (0 << 1), NVME_SQ_PRIO_HIGH = (1 << 1), NVME_SQ_PRIO_MEDIUM = (2 << 1), NVME_SQ_PRIO_LOW = (3 << 1), NVME_FEAT_ARBITRATION = 0x01, NVME_FEAT_POWER_MGMT = 0x02, NVME_FEAT_LBA_RANGE = 0x03, NVME_FEAT_TEMP_THRESH = 0x04, NVME_FEAT_ERR_RECOVERY = 0x05, NVME_FEAT_VOLATILE_WC = 0x06, NVME_FEAT_NUM_QUEUES = 0x07, NVME_FEAT_IRQ_COALESCE = 0x08, NVME_FEAT_IRQ_CONFIG = 0x09, NVME_FEAT_WRITE_ATOMIC = 0x0a, NVME_FEAT_ASYNC_EVENT = 0x0b, NVME_FEAT_AUTO_PST = 0x0c, NVME_FEAT_HOST_MEM_BUF = 0x0d, NVME_FEAT_TIMESTAMP = 0x0e, NVME_FEAT_KATO = 0x0f, NVME_FEAT_HCTM = 0x10, NVME_FEAT_NOPSC = 0x11, NVME_FEAT_RRL = 0x12, NVME_FEAT_PLM_CONFIG = 0x13, NVME_FEAT_PLM_WINDOW = 0x14, NVME_FEAT_HOST_BEHAVIOR = 0x16, NVME_FEAT_SANITIZE = 0x17, NVME_FEAT_SW_PROGRESS = 0x80, NVME_FEAT_HOST_ID = 0x81, NVME_FEAT_RESV_MASK = 0x82, NVME_FEAT_RESV_PERSIST = 0x83, NVME_FEAT_WRITE_PROTECT = 0x84, NVME_LOG_ERROR = 0x01, NVME_LOG_SMART = 0x02, NVME_LOG_FW_SLOT = 0x03, NVME_LOG_CHANGED_NS = 0x04, NVME_LOG_CMD_EFFECTS = 0x05, NVME_LOG_DEVICE_SELF_TEST = 0x06, NVME_LOG_TELEMETRY_HOST = 0x07, NVME_LOG_TELEMETRY_CTRL = 0x08, NVME_LOG_ENDURANCE_GROUP = 0x09, NVME_LOG_ANA = 0x0c, NVME_LOG_DISC = 0x70, NVME_LOG_RESERVATION = 0x80, NVME_FWACT_REPL = (0 << 3), NVME_FWACT_REPL_ACTV = (1 << 3), NVME_FWACT_ACTV = (2 << 3), }; /* NVMe Namespace Write Protect State */ enum { NVME_NS_NO_WRITE_PROTECT = 0, NVME_NS_WRITE_PROTECT, NVME_NS_WRITE_PROTECT_POWER_CYCLE, NVME_NS_WRITE_PROTECT_PERMANENT, }; #define NVME_MAX_CHANGED_NAMESPACES 1024 struct nvme_identify { __u8 opcode; __u8 flags; __u16 command_id; __le32 nsid; __u64 rsvd2[2]; union nvme_data_ptr dptr; __u8 cns; __u8 rsvd3; __le16 ctrlid; __u32 rsvd11[5]; }; #define NVME_IDENTIFY_DATA_SIZE 4096 struct nvme_features { __u8 opcode; __u8 flags; __u16 command_id; __le32 nsid; __u64 rsvd2[2]; union nvme_data_ptr dptr; __le32 fid; __le32 dword11; __le32 dword12; __le32 dword13; __le32 dword14; __le32 dword15; }; struct nvme_host_mem_buf_desc { __le64 addr; __le32 size; __u32 rsvd; }; struct nvme_create_cq { __u8 opcode; __u8 flags; __u16 command_id; __u32 rsvd1[5]; __le64 prp1; __u64 rsvd8; __le16 cqid; __le16 qsize; __le16 cq_flags; __le16 irq_vector; __u32 rsvd12[4]; }; struct nvme_create_sq { __u8 opcode; __u8 flags; __u16 command_id; __u32 rsvd1[5]; __le64 prp1; __u64 rsvd8; __le16 sqid; __le16 qsize; __le16 sq_flags; __le16 cqid; __u32 rsvd12[4]; }; struct nvme_delete_queue { __u8 opcode; __u8 flags; __u16 command_id; __u32 rsvd1[9]; __le16 qid; __u16 rsvd10; __u32 rsvd11[5]; }; struct nvme_abort_cmd { __u8 opcode; __u8 flags; __u16 command_id; __u32 rsvd1[9]; __le16 sqid; __u16 cid; __u32 rsvd11[5]; }; struct nvme_download_firmware { __u8 opcode; __u8 flags; __u16 command_id; __u32 rsvd1[5]; union nvme_data_ptr dptr; __le32 numd; __le32 offset; __u32 rsvd12[4]; }; struct nvme_format_cmd { __u8 opcode; __u8 flags; __u16 command_id; __le32 nsid; __u64 rsvd2[4]; __le32 cdw10; __u32 rsvd11[5]; }; struct nvme_get_log_page_command { __u8 opcode; __u8 flags; __u16 command_id; __le32 nsid; __u64 rsvd2[2]; union nvme_data_ptr dptr; __u8 lid; __u8 lsp; /* upper 4 bits reserved */ __le16 numdl; __le16 numdu; __u16 rsvd11; union { struct { __le32 lpol; __le32 lpou; }; __le64 lpo; }; __u32 rsvd14[2]; }; struct nvme_directive_cmd { __u8 opcode; __u8 flags; __u16 command_id; __le32 nsid; __u64 rsvd2[2]; union nvme_data_ptr dptr; __le32 numd; __u8 doper; __u8 dtype; __le16 dspec; __u8 endir; __u8 tdtype; __u16 rsvd15; __u32 rsvd16[3]; }; /* * Fabrics subcommands. */ enum nvmf_fabrics_opcode { nvme_fabrics_command = 0x7f, }; enum nvmf_capsule_command { nvme_fabrics_type_property_set = 0x00, nvme_fabrics_type_connect = 0x01, nvme_fabrics_type_property_get = 0x04, }; #define nvme_fabrics_type_name(type) { type, #type } #define show_fabrics_type_name(type) \ __print_symbolic(type, \ nvme_fabrics_type_name(nvme_fabrics_type_property_set), \ nvme_fabrics_type_name(nvme_fabrics_type_connect), \ nvme_fabrics_type_name(nvme_fabrics_type_property_get)) /* * If not fabrics command, fctype will be ignored. */ #define show_opcode_name(qid, opcode, fctype) \ ((opcode) == nvme_fabrics_command ? \ show_fabrics_type_name(fctype) : \ ((qid) ? \ show_nvm_opcode_name(opcode) : \ show_admin_opcode_name(opcode))) struct nvmf_common_command { __u8 opcode; __u8 resv1; __u16 command_id; __u8 fctype; __u8 resv2[35]; __u8 ts[24]; }; /* * The legal cntlid range a NVMe Target will provide. * Note that cntlid of value 0 is considered illegal in the fabrics world. * Devices based on earlier specs did not have the subsystem concept; * therefore, those devices had their cntlid value set to 0 as a result. */ #define NVME_CNTLID_MIN 1 #define NVME_CNTLID_MAX 0xffef #define NVME_CNTLID_DYNAMIC 0xffff #define MAX_DISC_LOGS 255 /* Discovery log page entry */ struct nvmf_disc_rsp_page_entry { __u8 trtype; __u8 adrfam; __u8 subtype; __u8 treq; __le16 portid; __le16 cntlid; __le16 asqsz; __u8 resv8[22]; char trsvcid[NVMF_TRSVCID_SIZE]; __u8 resv64[192]; char subnqn[NVMF_NQN_FIELD_LEN]; char traddr[NVMF_TRADDR_SIZE]; union tsas { char common[NVMF_TSAS_SIZE]; struct rdma { __u8 qptype; __u8 prtype; __u8 cms; __u8 resv3[5]; __u16 pkey; __u8 resv10[246]; } rdma; } tsas; }; /* Discovery log page header */ struct nvmf_disc_rsp_page_hdr { __le64 genctr; __le64 numrec; __le16 recfmt; __u8 resv14[1006]; struct nvmf_disc_rsp_page_entry entries[0]; }; enum { NVME_CONNECT_DISABLE_SQFLOW = (1 << 2), }; struct nvmf_connect_command { __u8 opcode; __u8 resv1; __u16 command_id; __u8 fctype; __u8 resv2[19]; union nvme_data_ptr dptr; __le16 recfmt; __le16 qid; __le16 sqsize; __u8 cattr; __u8 resv3; __le32 kato; __u8 resv4[12]; }; struct nvmf_connect_data { uuid_t hostid; __le16 cntlid; char resv4[238]; char subsysnqn[NVMF_NQN_FIELD_LEN]; char hostnqn[NVMF_NQN_FIELD_LEN]; char resv5[256]; }; struct nvmf_property_set_command { __u8 opcode; __u8 resv1; __u16 command_id; __u8 fctype; __u8 resv2[35]; __u8 attrib; __u8 resv3[3]; __le32 offset; __le64 value; __u8 resv4[8]; }; struct nvmf_property_get_command { __u8 opcode; __u8 resv1; __u16 command_id; __u8 fctype; __u8 resv2[35]; __u8 attrib; __u8 resv3[3]; __le32 offset; __u8 resv4[16]; }; struct nvme_dbbuf { __u8 opcode; __u8 flags; __u16 command_id; __u32 rsvd1[5]; __le64 prp1; __le64 prp2; __u32 rsvd12[6]; }; struct streams_directive_params { __le16 msl; __le16 nssa; __le16 nsso; __u8 rsvd[10]; __le32 sws; __le16 sgs; __le16 nsa; __le16 nso; __u8 rsvd2[6]; }; struct nvme_command { union { struct nvme_common_command common; struct nvme_rw_command rw; struct nvme_identify identify; struct nvme_features features; struct nvme_create_cq create_cq; struct nvme_create_sq create_sq; struct nvme_delete_queue delete_queue; struct nvme_download_firmware dlfw; struct nvme_format_cmd format; struct nvme_dsm_cmd dsm; struct nvme_write_zeroes_cmd write_zeroes; struct nvme_abort_cmd abort; struct nvme_get_log_page_command get_log_page; struct nvmf_common_command fabrics; struct nvmf_connect_command connect; struct nvmf_property_set_command prop_set; struct nvmf_property_get_command prop_get; struct nvme_dbbuf dbbuf; struct nvme_directive_cmd directive; }; }; static inline bool nvme_is_fabrics(struct nvme_command *cmd) { return cmd->common.opcode == nvme_fabrics_command; } struct nvme_error_slot { __le64 error_count; __le16 sqid; __le16 cmdid; __le16 status_field; __le16 param_error_location; __le64 lba; __le32 nsid; __u8 vs; __u8 resv[3]; __le64 cs; __u8 resv2[24]; }; static inline bool nvme_is_write(struct nvme_command *cmd) { /* * What a mess... * * Why can't we simply have a Fabrics In and Fabrics out command? */ if (unlikely(nvme_is_fabrics(cmd))) return cmd->fabrics.fctype & 1; return cmd->common.opcode & 1; } enum { /* * Generic Command Status: */ NVME_SC_SUCCESS = 0x0, NVME_SC_INVALID_OPCODE = 0x1, NVME_SC_INVALID_FIELD = 0x2, NVME_SC_CMDID_CONFLICT = 0x3, NVME_SC_DATA_XFER_ERROR = 0x4, NVME_SC_POWER_LOSS = 0x5, NVME_SC_INTERNAL = 0x6, NVME_SC_ABORT_REQ = 0x7, NVME_SC_ABORT_QUEUE = 0x8, NVME_SC_FUSED_FAIL = 0x9, NVME_SC_FUSED_MISSING = 0xa, NVME_SC_INVALID_NS = 0xb, NVME_SC_CMD_SEQ_ERROR = 0xc, NVME_SC_SGL_INVALID_LAST = 0xd, NVME_SC_SGL_INVALID_COUNT = 0xe, NVME_SC_SGL_INVALID_DATA = 0xf, NVME_SC_SGL_INVALID_METADATA = 0x10, NVME_SC_SGL_INVALID_TYPE = 0x11, NVME_SC_SGL_INVALID_OFFSET = 0x16, NVME_SC_SGL_INVALID_SUBTYPE = 0x17, NVME_SC_SANITIZE_FAILED = 0x1C, NVME_SC_SANITIZE_IN_PROGRESS = 0x1D, NVME_SC_NS_WRITE_PROTECTED = 0x20, NVME_SC_CMD_INTERRUPTED = 0x21, NVME_SC_LBA_RANGE = 0x80, NVME_SC_CAP_EXCEEDED = 0x81, NVME_SC_NS_NOT_READY = 0x82, NVME_SC_RESERVATION_CONFLICT = 0x83, /* * Command Specific Status: */ NVME_SC_CQ_INVALID = 0x100, NVME_SC_QID_INVALID = 0x101, NVME_SC_QUEUE_SIZE = 0x102, NVME_SC_ABORT_LIMIT = 0x103, NVME_SC_ABORT_MISSING = 0x104, NVME_SC_ASYNC_LIMIT = 0x105, NVME_SC_FIRMWARE_SLOT = 0x106, NVME_SC_FIRMWARE_IMAGE = 0x107, NVME_SC_INVALID_VECTOR = 0x108, NVME_SC_INVALID_LOG_PAGE = 0x109, NVME_SC_INVALID_FORMAT = 0x10a, NVME_SC_FW_NEEDS_CONV_RESET = 0x10b, NVME_SC_INVALID_QUEUE = 0x10c, NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d, NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e, NVME_SC_FEATURE_NOT_PER_NS = 0x10f, NVME_SC_FW_NEEDS_SUBSYS_RESET = 0x110, NVME_SC_FW_NEEDS_RESET = 0x111, NVME_SC_FW_NEEDS_MAX_TIME = 0x112, NVME_SC_FW_ACTIVATE_PROHIBITED = 0x113, NVME_SC_OVERLAPPING_RANGE = 0x114, NVME_SC_NS_INSUFFICIENT_CAP = 0x115, NVME_SC_NS_ID_UNAVAILABLE = 0x116, NVME_SC_NS_ALREADY_ATTACHED = 0x118, NVME_SC_NS_IS_PRIVATE = 0x119, NVME_SC_NS_NOT_ATTACHED = 0x11a, NVME_SC_THIN_PROV_NOT_SUPP = 0x11b, NVME_SC_CTRL_LIST_INVALID = 0x11c, NVME_SC_BP_WRITE_PROHIBITED = 0x11e, NVME_SC_PMR_SAN_PROHIBITED = 0x123, /* * I/O Command Set Specific - NVM commands: */ NVME_SC_BAD_ATTRIBUTES = 0x180, NVME_SC_INVALID_PI = 0x181, NVME_SC_READ_ONLY = 0x182, NVME_SC_ONCS_NOT_SUPPORTED = 0x183, /* * I/O Command Set Specific - Fabrics commands: */ NVME_SC_CONNECT_FORMAT = 0x180, NVME_SC_CONNECT_CTRL_BUSY = 0x181, NVME_SC_CONNECT_INVALID_PARAM = 0x182, NVME_SC_CONNECT_RESTART_DISC = 0x183, NVME_SC_CONNECT_INVALID_HOST = 0x184, NVME_SC_DISCOVERY_RESTART = 0x190, NVME_SC_AUTH_REQUIRED = 0x191, /* * Media and Data Integrity Errors: */ NVME_SC_WRITE_FAULT = 0x280, NVME_SC_READ_ERROR = 0x281, NVME_SC_GUARD_CHECK = 0x282, NVME_SC_APPTAG_CHECK = 0x283, NVME_SC_REFTAG_CHECK = 0x284, NVME_SC_COMPARE_FAILED = 0x285, NVME_SC_ACCESS_DENIED = 0x286, NVME_SC_UNWRITTEN_BLOCK = 0x287, /* * Path-related Errors: */ NVME_SC_ANA_PERSISTENT_LOSS = 0x301, NVME_SC_ANA_INACCESSIBLE = 0x302, NVME_SC_ANA_TRANSITION = 0x303, NVME_SC_HOST_PATH_ERROR = 0x370, NVME_SC_HOST_ABORTED_CMD = 0x371, NVME_SC_CRD = 0x1800, NVME_SC_DNR = 0x4000, }; struct nvme_completion { /* * Used by Admin and Fabrics commands to return data: */ union nvme_result { __le16 u16; __le32 u32; __le64 u64; } result; __le16 sq_head; /* how much of this queue may be reclaimed */ __le16 sq_id; /* submission queue that generated this entry */ __u16 command_id; /* of the command which completed */ __le16 status; /* did the command fail, and if so, why? */ }; #define NVME_VS(major, minor, tertiary) \ (((major) << 16) | ((minor) << 8) | (tertiary)) #define NVME_MAJOR(ver) ((ver) >> 16) #define NVME_MINOR(ver) (((ver) >> 8) & 0xff) #define NVME_TERTIARY(ver) ((ver) & 0xff) #endif /* _LINUX_NVME_H */ kernel.h 0000644 00000104476 14722070374 0006217 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_KERNEL_H #define _LINUX_KERNEL_H #include <stdarg.h> #include <linux/limits.h> #include <linux/linkage.h> #include <linux/stddef.h> #include <linux/types.h> #include <linux/compiler.h> #include <linux/bitops.h> #include <linux/log2.h> #include <linux/typecheck.h> #include <linux/printk.h> #include <linux/build_bug.h> #include <asm/byteorder.h> #include <asm/div64.h> #include <uapi/linux/kernel.h> #include <asm/div64.h> #define STACK_MAGIC 0xdeadbeef /** * REPEAT_BYTE - repeat the value @x multiple times as an unsigned long value * @x: value to repeat * * NOTE: @x is not checked for > 0xff; larger values produce odd results. */ #define REPEAT_BYTE(x) ((~0ul / 0xff) * (x)) /* @a is a power of 2 value */ #define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) #define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a)) #define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask)) #define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) #define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) /* generic data direction definitions */ #define READ 0 #define WRITE 1 /** * ARRAY_SIZE - get the number of elements in array @arr * @arr: array to be sized */ #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) #define u64_to_user_ptr(x) ( \ { \ typecheck(u64, (x)); \ (void __user *)(uintptr_t)(x); \ } \ ) /* * This looks more complex than it should be. But we need to * get the type for the ~ right in round_down (it needs to be * as wide as the result!), and we want to evaluate the macro * arguments just once each. */ #define __round_mask(x, y) ((__typeof__(x))((y)-1)) /** * round_up - round up to next specified power of 2 * @x: the value to round * @y: multiple to round up to (must be a power of 2) * * Rounds @x up to next multiple of @y (which must be a power of 2). * To perform arbitrary rounding up, use roundup() below. */ #define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) /** * round_down - round down to next specified power of 2 * @x: the value to round * @y: multiple to round down to (must be a power of 2) * * Rounds @x down to next multiple of @y (which must be a power of 2). * To perform arbitrary rounding down, use rounddown() below. */ #define round_down(x, y) ((x) & ~__round_mask(x, y)) /** * FIELD_SIZEOF - get the size of a struct's field * @t: the target struct * @f: the target struct's field * Return: the size of @f in the struct definition without having a * declared instance of @t. */ #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) #define typeof_member(T, m) typeof(((T*)0)->m) #define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP #define DIV_ROUND_DOWN_ULL(ll, d) \ ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; }) #define DIV_ROUND_UP_ULL(ll, d) \ DIV_ROUND_DOWN_ULL((unsigned long long)(ll) + (d) - 1, (d)) #if BITS_PER_LONG == 32 # define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d) #else # define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP(ll,d) #endif /** * roundup - round up to the next specified multiple * @x: the value to up * @y: multiple to round up to * * Rounds @x up to next multiple of @y. If @y will always be a power * of 2, consider using the faster round_up(). */ #define roundup(x, y) ( \ { \ typeof(y) __y = y; \ (((x) + (__y - 1)) / __y) * __y; \ } \ ) /** * rounddown - round down to next specified multiple * @x: the value to round * @y: multiple to round down to * * Rounds @x down to next multiple of @y. If @y will always be a power * of 2, consider using the faster round_down(). */ #define rounddown(x, y) ( \ { \ typeof(x) __x = (x); \ __x - (__x % (y)); \ } \ ) /* * Divide positive or negative dividend by positive or negative divisor * and round to closest integer. Result is undefined for negative * divisors if the dividend variable type is unsigned and for negative * dividends if the divisor variable type is unsigned. */ #define DIV_ROUND_CLOSEST(x, divisor)( \ { \ typeof(x) __x = x; \ typeof(divisor) __d = divisor; \ (((typeof(x))-1) > 0 || \ ((typeof(divisor))-1) > 0 || \ (((__x) > 0) == ((__d) > 0))) ? \ (((__x) + ((__d) / 2)) / (__d)) : \ (((__x) - ((__d) / 2)) / (__d)); \ } \ ) /* * Same as above but for u64 dividends. divisor must be a 32-bit * number. */ #define DIV_ROUND_CLOSEST_ULL(x, divisor)( \ { \ typeof(divisor) __d = divisor; \ unsigned long long _tmp = (x) + (__d) / 2; \ do_div(_tmp, __d); \ _tmp; \ } \ ) /* * Multiplies an integer by a fraction, while avoiding unnecessary * overflow or loss of precision. */ #define mult_frac(x, numer, denom)( \ { \ typeof(x) quot = (x) / (denom); \ typeof(x) rem = (x) % (denom); \ (quot * (numer)) + ((rem * (numer)) / (denom)); \ } \ ) #define _RET_IP_ (unsigned long)__builtin_return_address(0) #define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; }) #define sector_div(a, b) do_div(a, b) /** * upper_32_bits - return bits 32-63 of a number * @n: the number we're accessing * * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress * the "right shift count >= width of type" warning when that quantity is * 32-bits. */ #define upper_32_bits(n) ((u32)(((n) >> 16) >> 16)) /** * lower_32_bits - return bits 0-31 of a number * @n: the number we're accessing */ #define lower_32_bits(n) ((u32)(n)) struct completion; struct pt_regs; struct user; #ifdef CONFIG_PREEMPT_VOLUNTARY extern int _cond_resched(void); # define might_resched() _cond_resched() #else # define might_resched() do { } while (0) #endif #ifdef CONFIG_DEBUG_ATOMIC_SLEEP extern void ___might_sleep(const char *file, int line, int preempt_offset); extern void __might_sleep(const char *file, int line, int preempt_offset); extern void __cant_sleep(const char *file, int line, int preempt_offset); /** * might_sleep - annotation for functions that can sleep * * this macro will print a stack trace if it is executed in an atomic * context (spinlock, irq-handler, ...). Additional sections where blocking is * not allowed can be annotated with non_block_start() and non_block_end() * pairs. * * This is a useful debugging help to be able to catch problems early and not * be bitten later when the calling function happens to sleep when it is not * supposed to. */ # define might_sleep() \ do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) /** * cant_sleep - annotation for functions that cannot sleep * * this macro will print a stack trace if it is executed with preemption enabled */ # define cant_sleep() \ do { __cant_sleep(__FILE__, __LINE__, 0); } while (0) # define sched_annotate_sleep() (current->task_state_change = 0) /** * non_block_start - annotate the start of section where sleeping is prohibited * * This is on behalf of the oom reaper, specifically when it is calling the mmu * notifiers. The problem is that if the notifier were to block on, for example, * mutex_lock() and if the process which holds that mutex were to perform a * sleeping memory allocation, the oom reaper is now blocked on completion of * that memory allocation. Other blocking calls like wait_event() pose similar * issues. */ # define non_block_start() (current->non_block_count++) /** * non_block_end - annotate the end of section where sleeping is prohibited * * Closes a section opened by non_block_start(). */ # define non_block_end() WARN_ON(current->non_block_count-- == 0) #else static inline void ___might_sleep(const char *file, int line, int preempt_offset) { } static inline void __might_sleep(const char *file, int line, int preempt_offset) { } # define might_sleep() do { might_resched(); } while (0) # define cant_sleep() do { } while (0) # define sched_annotate_sleep() do { } while (0) # define non_block_start() do { } while (0) # define non_block_end() do { } while (0) #endif #define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0) /** * abs - return absolute value of an argument * @x: the value. If it is unsigned type, it is converted to signed type first. * char is treated as if it was signed (regardless of whether it really is) * but the macro's return type is preserved as char. * * Return: an absolute value of x. */ #define abs(x) __abs_choose_expr(x, long long, \ __abs_choose_expr(x, long, \ __abs_choose_expr(x, int, \ __abs_choose_expr(x, short, \ __abs_choose_expr(x, char, \ __builtin_choose_expr( \ __builtin_types_compatible_p(typeof(x), char), \ (char)({ signed char __x = (x); __x<0?-__x:__x; }), \ ((void)0))))))) #define __abs_choose_expr(x, type, other) __builtin_choose_expr( \ __builtin_types_compatible_p(typeof(x), signed type) || \ __builtin_types_compatible_p(typeof(x), unsigned type), \ ({ signed type __x = (x); __x < 0 ? -__x : __x; }), other) /** * reciprocal_scale - "scale" a value into range [0, ep_ro) * @val: value * @ep_ro: right open interval endpoint * * Perform a "reciprocal multiplication" in order to "scale" a value into * range [0, @ep_ro), where the upper interval endpoint is right-open. * This is useful, e.g. for accessing a index of an array containing * @ep_ro elements, for example. Think of it as sort of modulus, only that * the result isn't that of modulo. ;) Note that if initial input is a * small value, then result will return 0. * * Return: a result based on @val in interval [0, @ep_ro). */ static inline u32 reciprocal_scale(u32 val, u32 ep_ro) { return (u32)(((u64) val * ep_ro) >> 32); } #if defined(CONFIG_MMU) && \ (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)) #define might_fault() __might_fault(__FILE__, __LINE__) void __might_fault(const char *file, int line); #else static inline void might_fault(void) { } #endif extern struct atomic_notifier_head panic_notifier_list; extern long (*panic_blink)(int state); __printf(1, 2) void panic(const char *fmt, ...) __noreturn __cold; void nmi_panic(struct pt_regs *regs, const char *msg); void check_panic_on_warn(const char *origin); extern void oops_enter(void); extern void oops_exit(void); void print_oops_end_marker(void); extern int oops_may_print(void); void do_exit(long error_code) __noreturn; void complete_and_exit(struct completion *, long) __noreturn; #ifdef CONFIG_ARCH_HAS_REFCOUNT void refcount_error_report(struct pt_regs *regs, const char *err); #else static inline void refcount_error_report(struct pt_regs *regs, const char *err) { } #endif /* Internal, do not use. */ int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res); int __must_check _kstrtol(const char *s, unsigned int base, long *res); int __must_check kstrtoull(const char *s, unsigned int base, unsigned long long *res); int __must_check kstrtoll(const char *s, unsigned int base, long long *res); /** * kstrtoul - convert a string to an unsigned long * @s: The start of the string. The string must be null-terminated, and may also * include a single newline before its terminating null. The first character * may also be a plus sign, but not a minus sign. * @base: The number base to use. The maximum supported base is 16. If base is * given as 0, then the base of the string is automatically detected with the * conventional semantics - If it begins with 0x the number will be parsed as a * hexadecimal (case insensitive), if it otherwise begins with 0, it will be * parsed as an octal number. Otherwise it will be parsed as a decimal. * @res: Where to write the result of the conversion on success. * * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. * Used as a replacement for the obsolete simple_strtoull. Return code must * be checked. */ static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res) { /* * We want to shortcut function call, but * __builtin_types_compatible_p(unsigned long, unsigned long long) = 0. */ if (sizeof(unsigned long) == sizeof(unsigned long long) && __alignof__(unsigned long) == __alignof__(unsigned long long)) return kstrtoull(s, base, (unsigned long long *)res); else return _kstrtoul(s, base, res); } /** * kstrtol - convert a string to a long * @s: The start of the string. The string must be null-terminated, and may also * include a single newline before its terminating null. The first character * may also be a plus sign or a minus sign. * @base: The number base to use. The maximum supported base is 16. If base is * given as 0, then the base of the string is automatically detected with the * conventional semantics - If it begins with 0x the number will be parsed as a * hexadecimal (case insensitive), if it otherwise begins with 0, it will be * parsed as an octal number. Otherwise it will be parsed as a decimal. * @res: Where to write the result of the conversion on success. * * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. * Used as a replacement for the obsolete simple_strtoull. Return code must * be checked. */ static inline int __must_check kstrtol(const char *s, unsigned int base, long *res) { /* * We want to shortcut function call, but * __builtin_types_compatible_p(long, long long) = 0. */ if (sizeof(long) == sizeof(long long) && __alignof__(long) == __alignof__(long long)) return kstrtoll(s, base, (long long *)res); else return _kstrtol(s, base, res); } int __must_check kstrtouint(const char *s, unsigned int base, unsigned int *res); int __must_check kstrtoint(const char *s, unsigned int base, int *res); static inline int __must_check kstrtou64(const char *s, unsigned int base, u64 *res) { return kstrtoull(s, base, res); } static inline int __must_check kstrtos64(const char *s, unsigned int base, s64 *res) { return kstrtoll(s, base, res); } static inline int __must_check kstrtou32(const char *s, unsigned int base, u32 *res) { return kstrtouint(s, base, res); } static inline int __must_check kstrtos32(const char *s, unsigned int base, s32 *res) { return kstrtoint(s, base, res); } int __must_check kstrtou16(const char *s, unsigned int base, u16 *res); int __must_check kstrtos16(const char *s, unsigned int base, s16 *res); int __must_check kstrtou8(const char *s, unsigned int base, u8 *res); int __must_check kstrtos8(const char *s, unsigned int base, s8 *res); int __must_check kstrtobool(const char *s, bool *res); int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res); int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res); int __must_check kstrtoul_from_user(const char __user *s, size_t count, unsigned int base, unsigned long *res); int __must_check kstrtol_from_user(const char __user *s, size_t count, unsigned int base, long *res); int __must_check kstrtouint_from_user(const char __user *s, size_t count, unsigned int base, unsigned int *res); int __must_check kstrtoint_from_user(const char __user *s, size_t count, unsigned int base, int *res); int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigned int base, u16 *res); int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res); int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res); int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res); int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *res); static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res) { return kstrtoull_from_user(s, count, base, res); } static inline int __must_check kstrtos64_from_user(const char __user *s, size_t count, unsigned int base, s64 *res) { return kstrtoll_from_user(s, count, base, res); } static inline int __must_check kstrtou32_from_user(const char __user *s, size_t count, unsigned int base, u32 *res) { return kstrtouint_from_user(s, count, base, res); } static inline int __must_check kstrtos32_from_user(const char __user *s, size_t count, unsigned int base, s32 *res) { return kstrtoint_from_user(s, count, base, res); } /* Obsolete, do not use. Use kstrto<foo> instead */ extern unsigned long simple_strtoul(const char *,char **,unsigned int); extern long simple_strtol(const char *,char **,unsigned int); extern unsigned long long simple_strtoull(const char *,char **,unsigned int); extern long long simple_strtoll(const char *,char **,unsigned int); extern int num_to_str(char *buf, int size, unsigned long long num, unsigned int width); /* lib/printf utilities */ extern __printf(2, 3) int sprintf(char *buf, const char * fmt, ...); extern __printf(2, 0) int vsprintf(char *buf, const char *, va_list); extern __printf(3, 4) int snprintf(char *buf, size_t size, const char *fmt, ...); extern __printf(3, 0) int vsnprintf(char *buf, size_t size, const char *fmt, va_list args); extern __printf(3, 4) int scnprintf(char *buf, size_t size, const char *fmt, ...); extern __printf(3, 0) int vscnprintf(char *buf, size_t size, const char *fmt, va_list args); extern __printf(2, 3) __malloc char *kasprintf(gfp_t gfp, const char *fmt, ...); extern __printf(2, 0) __malloc char *kvasprintf(gfp_t gfp, const char *fmt, va_list args); extern __printf(2, 0) const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args); extern __scanf(2, 3) int sscanf(const char *, const char *, ...); extern __scanf(2, 0) int vsscanf(const char *, const char *, va_list); extern int get_option(char **str, int *pint); extern char *get_options(const char *str, int nints, int *ints); extern unsigned long long memparse(const char *ptr, char **retptr); extern bool parse_option_str(const char *str, const char *option); extern char *next_arg(char *args, char **param, char **val); extern int core_kernel_text(unsigned long addr); extern int init_kernel_text(unsigned long addr); extern int core_kernel_data(unsigned long addr); extern int __kernel_text_address(unsigned long addr); extern int kernel_text_address(unsigned long addr); extern int func_ptr_is_kernel_text(void *ptr); u64 int_pow(u64 base, unsigned int exp); unsigned long int_sqrt(unsigned long); #if BITS_PER_LONG < 64 u32 int_sqrt64(u64 x); #else static inline u32 int_sqrt64(u64 x) { return (u32)int_sqrt(x); } #endif extern void bust_spinlocks(int yes); extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */ extern int panic_timeout; extern unsigned long panic_print; extern int panic_on_oops; extern int panic_on_unrecovered_nmi; extern int panic_on_io_nmi; extern int panic_on_warn; extern int sysctl_panic_on_rcu_stall; extern int sysctl_panic_on_stackoverflow; extern bool crash_kexec_post_notifiers; /* * panic_cpu is used for synchronizing panic() and crash_kexec() execution. It * holds a CPU number which is executing panic() currently. A value of * PANIC_CPU_INVALID means no CPU has entered panic() or crash_kexec(). */ extern atomic_t panic_cpu; #define PANIC_CPU_INVALID -1 /* * Only to be used by arch init code. If the user over-wrote the default * CONFIG_PANIC_TIMEOUT, honor it. */ static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout) { if (panic_timeout == arch_default_timeout) panic_timeout = timeout; } extern const char *print_tainted(void); enum lockdep_ok { LOCKDEP_STILL_OK, LOCKDEP_NOW_UNRELIABLE }; extern void add_taint(unsigned flag, enum lockdep_ok); extern int test_taint(unsigned flag); extern unsigned long get_taint(void); extern int root_mountflags; extern bool early_boot_irqs_disabled; /* * Values used for system_state. Ordering of the states must not be changed * as code checks for <, <=, >, >= STATE. */ extern enum system_states { SYSTEM_BOOTING, SYSTEM_SCHEDULING, SYSTEM_RUNNING, SYSTEM_HALT, SYSTEM_POWER_OFF, SYSTEM_RESTART, SYSTEM_SUSPEND, } system_state; /* This cannot be an enum because some may be used in assembly source. */ #define TAINT_PROPRIETARY_MODULE 0 #define TAINT_FORCED_MODULE 1 #define TAINT_CPU_OUT_OF_SPEC 2 #define TAINT_FORCED_RMMOD 3 #define TAINT_MACHINE_CHECK 4 #define TAINT_BAD_PAGE 5 #define TAINT_USER 6 #define TAINT_DIE 7 #define TAINT_OVERRIDDEN_ACPI_TABLE 8 #define TAINT_WARN 9 #define TAINT_CRAP 10 #define TAINT_FIRMWARE_WORKAROUND 11 #define TAINT_OOT_MODULE 12 #define TAINT_UNSIGNED_MODULE 13 #define TAINT_SOFTLOCKUP 14 #define TAINT_LIVEPATCH 15 #define TAINT_AUX 16 #define TAINT_RANDSTRUCT 17 #define TAINT_FLAGS_COUNT 18 struct taint_flag { char c_true; /* character printed when tainted */ char c_false; /* character printed when not tainted */ bool module; /* also show as a per-module taint flag */ }; extern const struct taint_flag taint_flags[TAINT_FLAGS_COUNT]; extern const char hex_asc[]; #define hex_asc_lo(x) hex_asc[((x) & 0x0f)] #define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4] static inline char *hex_byte_pack(char *buf, u8 byte) { *buf++ = hex_asc_hi(byte); *buf++ = hex_asc_lo(byte); return buf; } extern const char hex_asc_upper[]; #define hex_asc_upper_lo(x) hex_asc_upper[((x) & 0x0f)] #define hex_asc_upper_hi(x) hex_asc_upper[((x) & 0xf0) >> 4] static inline char *hex_byte_pack_upper(char *buf, u8 byte) { *buf++ = hex_asc_upper_hi(byte); *buf++ = hex_asc_upper_lo(byte); return buf; } extern int hex_to_bin(unsigned char ch); extern int __must_check hex2bin(u8 *dst, const char *src, size_t count); extern char *bin2hex(char *dst, const void *src, size_t count); bool mac_pton(const char *s, u8 *mac); /* * General tracing related utility functions - trace_printk(), * tracing_on/tracing_off and tracing_start()/tracing_stop * * Use tracing_on/tracing_off when you want to quickly turn on or off * tracing. It simply enables or disables the recording of the trace events. * This also corresponds to the user space /sys/kernel/debug/tracing/tracing_on * file, which gives a means for the kernel and userspace to interact. * Place a tracing_off() in the kernel where you want tracing to end. * From user space, examine the trace, and then echo 1 > tracing_on * to continue tracing. * * tracing_stop/tracing_start has slightly more overhead. It is used * by things like suspend to ram where disabling the recording of the * trace is not enough, but tracing must actually stop because things * like calling smp_processor_id() may crash the system. * * Most likely, you want to use tracing_on/tracing_off. */ enum ftrace_dump_mode { DUMP_NONE, DUMP_ALL, DUMP_ORIG, }; #ifdef CONFIG_TRACING void tracing_on(void); void tracing_off(void); int tracing_is_on(void); void tracing_snapshot(void); void tracing_snapshot_alloc(void); extern void tracing_start(void); extern void tracing_stop(void); static inline __printf(1, 2) void ____trace_printk_check_format(const char *fmt, ...) { } #define __trace_printk_check_format(fmt, args...) \ do { \ if (0) \ ____trace_printk_check_format(fmt, ##args); \ } while (0) /** * trace_printk - printf formatting in the ftrace buffer * @fmt: the printf format for printing * * Note: __trace_printk is an internal function for trace_printk() and * the @ip is passed in via the trace_printk() macro. * * This function allows a kernel developer to debug fast path sections * that printk is not appropriate for. By scattering in various * printk like tracing in the code, a developer can quickly see * where problems are occurring. * * This is intended as a debugging tool for the developer only. * Please refrain from leaving trace_printks scattered around in * your code. (Extra memory is used for special buffers that are * allocated when trace_printk() is used.) * * A little optimization trick is done here. If there's only one * argument, there's no need to scan the string for printf formats. * The trace_puts() will suffice. But how can we take advantage of * using trace_puts() when trace_printk() has only one argument? * By stringifying the args and checking the size we can tell * whether or not there are args. __stringify((__VA_ARGS__)) will * turn into "()\0" with a size of 3 when there are no args, anything * else will be bigger. All we need to do is define a string to this, * and then take its size and compare to 3. If it's bigger, use * do_trace_printk() otherwise, optimize it to trace_puts(). Then just * let gcc optimize the rest. */ #define trace_printk(fmt, ...) \ do { \ char _______STR[] = __stringify((__VA_ARGS__)); \ if (sizeof(_______STR) > 3) \ do_trace_printk(fmt, ##__VA_ARGS__); \ else \ trace_puts(fmt); \ } while (0) #define do_trace_printk(fmt, args...) \ do { \ static const char *trace_printk_fmt __used \ __attribute__((section("__trace_printk_fmt"))) = \ __builtin_constant_p(fmt) ? fmt : NULL; \ \ __trace_printk_check_format(fmt, ##args); \ \ if (__builtin_constant_p(fmt)) \ __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \ else \ __trace_printk(_THIS_IP_, fmt, ##args); \ } while (0) extern __printf(2, 3) int __trace_bprintk(unsigned long ip, const char *fmt, ...); extern __printf(2, 3) int __trace_printk(unsigned long ip, const char *fmt, ...); /** * trace_puts - write a string into the ftrace buffer * @str: the string to record * * Note: __trace_bputs is an internal function for trace_puts and * the @ip is passed in via the trace_puts macro. * * This is similar to trace_printk() but is made for those really fast * paths that a developer wants the least amount of "Heisenbug" effects, * where the processing of the print format is still too much. * * This function allows a kernel developer to debug fast path sections * that printk is not appropriate for. By scattering in various * printk like tracing in the code, a developer can quickly see * where problems are occurring. * * This is intended as a debugging tool for the developer only. * Please refrain from leaving trace_puts scattered around in * your code. (Extra memory is used for special buffers that are * allocated when trace_puts() is used.) * * Returns: 0 if nothing was written, positive # if string was. * (1 when __trace_bputs is used, strlen(str) when __trace_puts is used) */ #define trace_puts(str) ({ \ static const char *trace_printk_fmt __used \ __attribute__((section("__trace_printk_fmt"))) = \ __builtin_constant_p(str) ? str : NULL; \ \ if (__builtin_constant_p(str)) \ __trace_bputs(_THIS_IP_, trace_printk_fmt); \ else \ __trace_puts(_THIS_IP_, str, strlen(str)); \ }) extern int __trace_bputs(unsigned long ip, const char *str); extern int __trace_puts(unsigned long ip, const char *str, int size); extern void trace_dump_stack(int skip); /* * The double __builtin_constant_p is because gcc will give us an error * if we try to allocate the static variable to fmt if it is not a * constant. Even with the outer if statement. */ #define ftrace_vprintk(fmt, vargs) \ do { \ if (__builtin_constant_p(fmt)) { \ static const char *trace_printk_fmt __used \ __attribute__((section("__trace_printk_fmt"))) = \ __builtin_constant_p(fmt) ? fmt : NULL; \ \ __ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \ } else \ __ftrace_vprintk(_THIS_IP_, fmt, vargs); \ } while (0) extern __printf(2, 0) int __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap); extern __printf(2, 0) int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap); extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode); #else static inline void tracing_start(void) { } static inline void tracing_stop(void) { } static inline void trace_dump_stack(int skip) { } static inline void tracing_on(void) { } static inline void tracing_off(void) { } static inline int tracing_is_on(void) { return 0; } static inline void tracing_snapshot(void) { } static inline void tracing_snapshot_alloc(void) { } static inline __printf(1, 2) int trace_printk(const char *fmt, ...) { return 0; } static __printf(1, 0) inline int ftrace_vprintk(const char *fmt, va_list ap) { return 0; } static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } #endif /* CONFIG_TRACING */ /* * min()/max()/clamp() macros must accomplish three things: * * - avoid multiple evaluations of the arguments (so side-effects like * "x++" happen only once) when non-constant. * - perform strict type-checking (to generate warnings instead of * nasty runtime surprises). See the "unnecessary" pointer comparison * in __typecheck(). * - retain result as a constant expressions when called with only * constant expressions (to avoid tripping VLA warnings in stack * allocation usage). */ #define __typecheck(x, y) \ (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1))) /* * This returns a constant expression while determining if an argument is * a constant expression, most importantly without evaluating the argument. * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de> */ #define __is_constexpr(x) \ (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8))) #define __no_side_effects(x, y) \ (__is_constexpr(x) && __is_constexpr(y)) #define __safe_cmp(x, y) \ (__typecheck(x, y) && __no_side_effects(x, y)) #define __cmp(x, y, op) ((x) op (y) ? (x) : (y)) #define __cmp_once(x, y, unique_x, unique_y, op) ({ \ typeof(x) unique_x = (x); \ typeof(y) unique_y = (y); \ __cmp(unique_x, unique_y, op); }) #define __careful_cmp(x, y, op) \ __builtin_choose_expr(__safe_cmp(x, y), \ __cmp(x, y, op), \ __cmp_once(x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y), op)) /** * min - return minimum of two values of the same or compatible types * @x: first value * @y: second value */ #define min(x, y) __careful_cmp(x, y, <) /** * max - return maximum of two values of the same or compatible types * @x: first value * @y: second value */ #define max(x, y) __careful_cmp(x, y, >) /** * min3 - return minimum of three values * @x: first value * @y: second value * @z: third value */ #define min3(x, y, z) min((typeof(x))min(x, y), z) /** * max3 - return maximum of three values * @x: first value * @y: second value * @z: third value */ #define max3(x, y, z) max((typeof(x))max(x, y), z) /** * min_not_zero - return the minimum that is _not_ zero, unless both are zero * @x: value1 * @y: value2 */ #define min_not_zero(x, y) ({ \ typeof(x) __x = (x); \ typeof(y) __y = (y); \ __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); }) /** * clamp - return a value clamped to a given range with strict typechecking * @val: current value * @lo: lowest allowable value * @hi: highest allowable value * * This macro does strict typechecking of @lo/@hi to make sure they are of the * same type as @val. See the unnecessary pointer comparisons. */ #define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi) /* * ..and if you can't take the strict * types, you can specify one yourself. * * Or not use min/max/clamp at all, of course. */ /** * min_t - return minimum of two values, using the specified type * @type: data type to use * @x: first value * @y: second value */ #define min_t(type, x, y) __careful_cmp((type)(x), (type)(y), <) /** * max_t - return maximum of two values, using the specified type * @type: data type to use * @x: first value * @y: second value */ #define max_t(type, x, y) __careful_cmp((type)(x), (type)(y), >) /** * clamp_t - return a value clamped to a given range using a given type * @type: the type of variable to use * @val: current value * @lo: minimum allowable value * @hi: maximum allowable value * * This macro does no typechecking and uses temporary variables of type * @type to make all the comparisons. */ #define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi) /** * clamp_val - return a value clamped to a given range using val's type * @val: current value * @lo: minimum allowable value * @hi: maximum allowable value * * This macro does no typechecking and uses temporary variables of whatever * type the input argument @val is. This is useful when @val is an unsigned * type and @lo and @hi are literals that will otherwise be assigned a signed * integer type. */ #define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi) /** * swap - swap values of @a and @b * @a: first value * @b: second value */ #define swap(a, b) \ do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) /* This counts to 12. Any more, it will return 13th argument. */ #define __COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _n, X...) _n #define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) #define __CONCAT(a, b) a ## b #define CONCATENATE(a, b) __CONCAT(a, b) /** * container_of - cast a member of a structure out to the containing structure * @ptr: the pointer to the member. * @type: the type of the container struct this is embedded in. * @member: the name of the member within the struct. * */ #define container_of(ptr, type, member) ({ \ void *__mptr = (void *)(ptr); \ BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \ !__same_type(*(ptr), void), \ "pointer type mismatch in container_of()"); \ ((type *)(__mptr - offsetof(type, member))); }) /** * container_of_safe - cast a member of a structure out to the containing structure * @ptr: the pointer to the member. * @type: the type of the container struct this is embedded in. * @member: the name of the member within the struct. * * If IS_ERR_OR_NULL(ptr), ptr is returned unchanged. */ #define container_of_safe(ptr, type, member) ({ \ void *__mptr = (void *)(ptr); \ BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \ !__same_type(*(ptr), void), \ "pointer type mismatch in container_of()"); \ IS_ERR_OR_NULL(__mptr) ? ERR_CAST(__mptr) : \ ((type *)(__mptr - offsetof(type, member))); }) /* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */ #ifdef CONFIG_FTRACE_MCOUNT_RECORD # define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD #endif /* Permissions on a sysfs file: you didn't miss the 0 prefix did you? */ #define VERIFY_OCTAL_PERMISSIONS(perms) \ (BUILD_BUG_ON_ZERO((perms) < 0) + \ BUILD_BUG_ON_ZERO((perms) > 0777) + \ /* USER_READABLE >= GROUP_READABLE >= OTHER_READABLE */ \ BUILD_BUG_ON_ZERO((((perms) >> 6) & 4) < (((perms) >> 3) & 4)) + \ BUILD_BUG_ON_ZERO((((perms) >> 3) & 4) < ((perms) & 4)) + \ /* USER_WRITABLE >= GROUP_WRITABLE */ \ BUILD_BUG_ON_ZERO((((perms) >> 6) & 2) < (((perms) >> 3) & 2)) + \ /* OTHER_WRITABLE? Generally considered a bad idea. */ \ BUILD_BUG_ON_ZERO((perms) & 2) + \ (perms)) #endif percpu-rwsem.h 0000644 00000007664 14722070374 0007371 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PERCPU_RWSEM_H #define _LINUX_PERCPU_RWSEM_H #include <linux/atomic.h> #include <linux/rwsem.h> #include <linux/percpu.h> #include <linux/rcuwait.h> #include <linux/rcu_sync.h> #include <linux/lockdep.h> struct percpu_rw_semaphore { struct rcu_sync rss; unsigned int __percpu *read_count; struct rw_semaphore rw_sem; /* slowpath */ struct rcuwait writer; /* blocked writer */ int readers_block; }; #define __DEFINE_PERCPU_RWSEM(name, is_static) \ static DEFINE_PER_CPU(unsigned int, __percpu_rwsem_rc_##name); \ is_static struct percpu_rw_semaphore name = { \ .rss = __RCU_SYNC_INITIALIZER(name.rss), \ .read_count = &__percpu_rwsem_rc_##name, \ .rw_sem = __RWSEM_INITIALIZER(name.rw_sem), \ .writer = __RCUWAIT_INITIALIZER(name.writer), \ } #define DEFINE_PERCPU_RWSEM(name) \ __DEFINE_PERCPU_RWSEM(name, /* not static */) #define DEFINE_STATIC_PERCPU_RWSEM(name) \ __DEFINE_PERCPU_RWSEM(name, static) extern int __percpu_down_read(struct percpu_rw_semaphore *, int); extern void __percpu_up_read(struct percpu_rw_semaphore *); static inline void percpu_down_read(struct percpu_rw_semaphore *sem) { might_sleep(); rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 0, _RET_IP_); preempt_disable(); /* * We are in an RCU-sched read-side critical section, so the writer * cannot both change sem->state from readers_fast and start checking * counters while we are here. So if we see !sem->state, we know that * the writer won't be checking until we're past the preempt_enable() * and that once the synchronize_rcu() is done, the writer will see * anything we did within this RCU-sched read-size critical section. */ __this_cpu_inc(*sem->read_count); if (unlikely(!rcu_sync_is_idle(&sem->rss))) __percpu_down_read(sem, false); /* Unconditional memory barrier */ /* * The preempt_enable() prevents the compiler from * bleeding the critical section out. */ preempt_enable(); } static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem) { int ret = 1; preempt_disable(); /* * Same as in percpu_down_read(). */ __this_cpu_inc(*sem->read_count); if (unlikely(!rcu_sync_is_idle(&sem->rss))) ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */ preempt_enable(); /* * The barrier() from preempt_enable() prevents the compiler from * bleeding the critical section out. */ if (ret) rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 1, _RET_IP_); return ret; } static inline void percpu_up_read(struct percpu_rw_semaphore *sem) { preempt_disable(); /* * Same as in percpu_down_read(). */ if (likely(rcu_sync_is_idle(&sem->rss))) __this_cpu_dec(*sem->read_count); else __percpu_up_read(sem); /* Unconditional memory barrier */ preempt_enable(); rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_); } extern void percpu_down_write(struct percpu_rw_semaphore *); extern void percpu_up_write(struct percpu_rw_semaphore *); extern int __percpu_init_rwsem(struct percpu_rw_semaphore *, const char *, struct lock_class_key *); extern void percpu_free_rwsem(struct percpu_rw_semaphore *); #define percpu_init_rwsem(sem) \ ({ \ static struct lock_class_key rwsem_key; \ __percpu_init_rwsem(sem, #sem, &rwsem_key); \ }) #define percpu_rwsem_is_held(sem) lockdep_is_held(&(sem)->rw_sem) #define percpu_rwsem_assert_held(sem) \ lockdep_assert_held(&(sem)->rw_sem) static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem, bool read, unsigned long ip) { lock_release(&sem->rw_sem.dep_map, 1, ip); #ifdef CONFIG_RWSEM_SPIN_ON_OWNER if (!read) atomic_long_set(&sem->rw_sem.owner, RWSEM_OWNER_UNKNOWN); #endif } static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem, bool read, unsigned long ip) { lock_acquire(&sem->rw_sem.dep_map, 0, 1, read, 1, NULL, ip); #ifdef CONFIG_RWSEM_SPIN_ON_OWNER if (!read) atomic_long_set(&sem->rw_sem.owner, (long)current); #endif } #endif of_pci.h 0000644 00000001647 14722070374 0006172 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __OF_PCI_H #define __OF_PCI_H #include <linux/pci.h> #include <linux/msi.h> struct pci_dev; struct of_phandle_args; struct device_node; #if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_PCI) struct device_node *of_pci_find_child_device(struct device_node *parent, unsigned int devfn); int of_pci_get_devfn(struct device_node *np); void of_pci_check_probe_only(void); #else static inline struct device_node *of_pci_find_child_device(struct device_node *parent, unsigned int devfn) { return NULL; } static inline int of_pci_get_devfn(struct device_node *np) { return -EINVAL; } static inline void of_pci_check_probe_only(void) { } #endif #if IS_ENABLED(CONFIG_OF_IRQ) int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin); #else static inline int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin) { return 0; } #endif #endif leds-pca9532.h 0000644 00000001542 14722070374 0006740 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * pca9532.h - platform data structure for pca9532 led controller * * Copyright (C) 2008 Riku Voipio <riku.voipio@movial.fi> * * Datasheet: http://www.nxp.com/acrobat/datasheets/PCA9532_3.pdf */ #ifndef __LINUX_PCA9532_H #define __LINUX_PCA9532_H #include <linux/leds.h> #include <linux/workqueue.h> #include <dt-bindings/leds/leds-pca9532.h> enum pca9532_state { PCA9532_OFF = 0x0, PCA9532_ON = 0x1, PCA9532_PWM0 = 0x2, PCA9532_PWM1 = 0x3, PCA9532_KEEP = 0xff, }; struct pca9532_led { u8 id; struct i2c_client *client; const char *name; const char *default_trigger; struct led_classdev ldev; struct work_struct work; u32 type; enum pca9532_state state; }; struct pca9532_platform_data { struct pca9532_led leds[16]; u8 pwm[2]; u8 psc[2]; int gpio_base; }; #endif /* __LINUX_PCA9532_H */ acpi_pmtmr.h 0000644 00000001242 14722070374 0007055 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ACPI_PMTMR_H_ #define _ACPI_PMTMR_H_ #include <linux/clocksource.h> /* Number of PMTMR ticks expected during calibration run */ #define PMTMR_TICKS_PER_SEC 3579545 /* limit it to 24 bits */ #define ACPI_PM_MASK CLOCKSOURCE_MASK(24) /* Overrun value */ #define ACPI_PM_OVRRUN (1<<24) #ifdef CONFIG_X86_PM_TIMER extern u32 acpi_pm_read_verified(void); extern u32 pmtmr_ioport; static inline u32 acpi_pm_read_early(void) { if (!pmtmr_ioport) return 0; /* mask the output to 24 bits */ return acpi_pm_read_verified() & ACPI_PM_MASK; } #else static inline u32 acpi_pm_read_early(void) { return 0; } #endif #endif once.h 0000644 00000005456 14722070374 0005661 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_ONCE_H #define _LINUX_ONCE_H #include <linux/types.h> #include <linux/jump_label.h> /* Helpers used from arbitrary contexts. * Hard irqs are blocked, be cautious. */ bool __do_once_start(bool *done, unsigned long *flags); void __do_once_done(bool *done, struct static_key_true *once_key, unsigned long *flags, struct module *mod); /* Variant for process contexts only. */ bool __do_once_slow_start(bool *done); void __do_once_slow_done(bool *done, struct static_key_true *once_key, struct module *mod); /* Call a function exactly once. The idea of DO_ONCE() is to perform * a function call such as initialization of random seeds, etc, only * once, where DO_ONCE() can live in the fast-path. After @func has * been called with the passed arguments, the static key will patch * out the condition into a nop. DO_ONCE() guarantees type safety of * arguments! * * Not that the following is not equivalent ... * * DO_ONCE(func, arg); * DO_ONCE(func, arg); * * ... to this version: * * void foo(void) * { * DO_ONCE(func, arg); * } * * foo(); * foo(); * * In case the one-time invocation could be triggered from multiple * places, then a common helper function must be defined, so that only * a single static key will be placed there! */ #define DO_ONCE(func, ...) \ ({ \ bool ___ret = false; \ static bool ___done = false; \ static DEFINE_STATIC_KEY_TRUE(___once_key); \ if (static_branch_unlikely(&___once_key)) { \ unsigned long ___flags; \ ___ret = __do_once_start(&___done, &___flags); \ if (unlikely(___ret)) { \ func(__VA_ARGS__); \ __do_once_done(&___done, &___once_key, \ &___flags, THIS_MODULE); \ } \ } \ ___ret; \ }) /* Variant of DO_ONCE() for process/sleepable contexts. */ #define DO_ONCE_SLOW(func, ...) \ ({ \ bool ___ret = false; \ static bool __section(.data.once) ___done = false; \ static DEFINE_STATIC_KEY_TRUE(___once_key); \ if (static_branch_unlikely(&___once_key)) { \ ___ret = __do_once_slow_start(&___done); \ if (unlikely(___ret)) { \ func(__VA_ARGS__); \ __do_once_slow_done(&___done, &___once_key, \ THIS_MODULE); \ } \ } \ ___ret; \ }) #define get_random_once(buf, nbytes) \ DO_ONCE(get_random_bytes, (buf), (nbytes)) #define get_random_once_wait(buf, nbytes) \ DO_ONCE(get_random_bytes_wait, (buf), (nbytes)) \ #define get_random_slow_once(buf, nbytes) \ DO_ONCE_SLOW(get_random_bytes, (buf), (nbytes)) #endif /* _LINUX_ONCE_H */ isdn/capilli.h 0000644 00000006677 14722070374 0007315 0 ustar 00 /* $Id: capilli.h,v 1.1.2.2 2004/01/16 21:09:27 keil Exp $ * * Kernel CAPI 2.0 Driver Interface for Linux * * Copyright 1999 by Carsten Paeth <calle@calle.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #ifndef __CAPILLI_H__ #define __CAPILLI_H__ #include <linux/kernel.h> #include <linux/list.h> #include <linux/capi.h> #include <linux/kernelcapi.h> typedef struct capiloaddatapart { int user; /* data in userspace ? */ int len; unsigned char *data; } capiloaddatapart; typedef struct capiloaddata { capiloaddatapart firmware; capiloaddatapart configuration; } capiloaddata; typedef struct capicardparams { unsigned int port; unsigned irq; int cardtype; int cardnr; unsigned int membase; } capicardparams; struct capi_ctr { /* filled in before calling attach_capi_ctr */ struct module *owner; void *driverdata; /* driver specific */ char name[32]; /* name of controller */ char *driver_name; /* name of driver */ int (*load_firmware)(struct capi_ctr *, capiloaddata *); void (*reset_ctr)(struct capi_ctr *); void (*register_appl)(struct capi_ctr *, u16 appl, capi_register_params *); void (*release_appl)(struct capi_ctr *, u16 appl); u16 (*send_message)(struct capi_ctr *, struct sk_buff *skb); char *(*procinfo)(struct capi_ctr *); int (*proc_show)(struct seq_file *, void *); /* filled in before calling ready callback */ u8 manu[CAPI_MANUFACTURER_LEN]; /* CAPI_GET_MANUFACTURER */ capi_version version; /* CAPI_GET_VERSION */ capi_profile profile; /* CAPI_GET_PROFILE */ u8 serial[CAPI_SERIAL_LEN]; /* CAPI_GET_SERIAL */ /* management information for kcapi */ unsigned long nrecvctlpkt; unsigned long nrecvdatapkt; unsigned long nsentctlpkt; unsigned long nsentdatapkt; int cnr; /* controller number */ unsigned short state; /* controller state */ int blocked; /* output blocked */ int traceflag; /* capi trace */ wait_queue_head_t state_wait_queue; struct proc_dir_entry *procent; char procfn[128]; }; int attach_capi_ctr(struct capi_ctr *); int detach_capi_ctr(struct capi_ctr *); void capi_ctr_ready(struct capi_ctr * card); void capi_ctr_down(struct capi_ctr * card); void capi_ctr_suspend_output(struct capi_ctr * card); void capi_ctr_resume_output(struct capi_ctr * card); void capi_ctr_handle_message(struct capi_ctr * card, u16 appl, struct sk_buff *skb); // --------------------------------------------------------------------------- // needed for AVM capi drivers struct capi_driver { char name[32]; /* driver name */ char revision[32]; int (*add_card)(struct capi_driver *driver, capicardparams *data); /* management information for kcapi */ struct list_head list; }; void register_capi_driver(struct capi_driver *driver); void unregister_capi_driver(struct capi_driver *driver); // --------------------------------------------------------------------------- // library functions for use by hardware controller drivers void capilib_new_ncci(struct list_head *head, u16 applid, u32 ncci, u32 winsize); void capilib_free_ncci(struct list_head *head, u16 applid, u32 ncci); void capilib_release_appl(struct list_head *head, u16 applid); void capilib_release(struct list_head *head); void capilib_data_b3_conf(struct list_head *head, u16 applid, u32 ncci, u16 msgid); u16 capilib_data_b3_req(struct list_head *head, u16 applid, u32 ncci, u16 msgid); #endif /* __CAPILLI_H__ */ isdn/capiutil.h 0000644 00000034314 14722070374 0007477 0 ustar 00 /* $Id: capiutil.h,v 1.5.6.2 2001/09/23 22:24:33 kai Exp $ * * CAPI 2.0 defines & types * * From CAPI 2.0 Development Kit AVM 1995 (msg.c) * Rewritten for Linux 1996 by Carsten Paeth <calle@calle.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #ifndef __CAPIUTIL_H__ #define __CAPIUTIL_H__ #include <asm/types.h> #define CAPIMSG_BASELEN 8 #define CAPIMSG_U8(m, off) (m[off]) #define CAPIMSG_U16(m, off) (m[off]|(m[(off)+1]<<8)) #define CAPIMSG_U32(m, off) (m[off]|(m[(off)+1]<<8)|(m[(off)+2]<<16)|(m[(off)+3]<<24)) #define CAPIMSG_LEN(m) CAPIMSG_U16(m,0) #define CAPIMSG_APPID(m) CAPIMSG_U16(m,2) #define CAPIMSG_COMMAND(m) CAPIMSG_U8(m,4) #define CAPIMSG_SUBCOMMAND(m) CAPIMSG_U8(m,5) #define CAPIMSG_CMD(m) (((m[4])<<8)|(m[5])) #define CAPIMSG_MSGID(m) CAPIMSG_U16(m,6) #define CAPIMSG_CONTROLLER(m) (m[8] & 0x7f) #define CAPIMSG_CONTROL(m) CAPIMSG_U32(m, 8) #define CAPIMSG_NCCI(m) CAPIMSG_CONTROL(m) #define CAPIMSG_DATALEN(m) CAPIMSG_U16(m,16) /* DATA_B3_REQ */ static inline void capimsg_setu8(void *m, int off, __u8 val) { ((__u8 *)m)[off] = val; } static inline void capimsg_setu16(void *m, int off, __u16 val) { ((__u8 *)m)[off] = val & 0xff; ((__u8 *)m)[off+1] = (val >> 8) & 0xff; } static inline void capimsg_setu32(void *m, int off, __u32 val) { ((__u8 *)m)[off] = val & 0xff; ((__u8 *)m)[off+1] = (val >> 8) & 0xff; ((__u8 *)m)[off+2] = (val >> 16) & 0xff; ((__u8 *)m)[off+3] = (val >> 24) & 0xff; } #define CAPIMSG_SETLEN(m, len) capimsg_setu16(m, 0, len) #define CAPIMSG_SETAPPID(m, applid) capimsg_setu16(m, 2, applid) #define CAPIMSG_SETCOMMAND(m,cmd) capimsg_setu8(m, 4, cmd) #define CAPIMSG_SETSUBCOMMAND(m, cmd) capimsg_setu8(m, 5, cmd) #define CAPIMSG_SETMSGID(m, msgid) capimsg_setu16(m, 6, msgid) #define CAPIMSG_SETCONTROL(m, contr) capimsg_setu32(m, 8, contr) #define CAPIMSG_SETDATALEN(m, len) capimsg_setu16(m, 16, len) /*----- basic-type definitions -----*/ typedef __u8 *_cstruct; typedef enum { CAPI_COMPOSE, CAPI_DEFAULT } _cmstruct; /* The _cmsg structure contains all possible CAPI 2.0 parameter. All parameters are stored here first. The function CAPI_CMSG_2_MESSAGE assembles the parameter and builds CAPI2.0 conform messages. CAPI_MESSAGE_2_CMSG disassembles CAPI 2.0 messages and stores the parameter in the _cmsg structure */ typedef struct { /* Header */ __u16 ApplId; __u8 Command; __u8 Subcommand; __u16 Messagenumber; /* Parameter */ union { __u32 adrController; __u32 adrPLCI; __u32 adrNCCI; } adr; _cmstruct AdditionalInfo; _cstruct B1configuration; __u16 B1protocol; _cstruct B2configuration; __u16 B2protocol; _cstruct B3configuration; __u16 B3protocol; _cstruct BC; _cstruct BChannelinformation; _cmstruct BProtocol; _cstruct CalledPartyNumber; _cstruct CalledPartySubaddress; _cstruct CallingPartyNumber; _cstruct CallingPartySubaddress; __u32 CIPmask; __u32 CIPmask2; __u16 CIPValue; __u32 Class; _cstruct ConnectedNumber; _cstruct ConnectedSubaddress; __u32 Data; __u16 DataHandle; __u16 DataLength; _cstruct FacilityConfirmationParameter; _cstruct Facilitydataarray; _cstruct FacilityIndicationParameter; _cstruct FacilityRequestParameter; __u16 FacilitySelector; __u16 Flags; __u32 Function; _cstruct HLC; __u16 Info; _cstruct InfoElement; __u32 InfoMask; __u16 InfoNumber; _cstruct Keypadfacility; _cstruct LLC; _cstruct ManuData; __u32 ManuID; _cstruct NCPI; __u16 Reason; __u16 Reason_B3; __u16 Reject; _cstruct Useruserdata; /* intern */ unsigned l, p; unsigned char *par; __u8 *m; /* buffer to construct message */ __u8 buf[180]; } _cmsg; /* * capi_cmsg2message() assembles the parameter from _cmsg to a CAPI 2.0 * conform message */ unsigned capi_cmsg2message(_cmsg * cmsg, __u8 * msg); /* * capi_message2cmsg disassembles a CAPI message an writes the parameter * into _cmsg for easy access */ unsigned capi_message2cmsg(_cmsg * cmsg, __u8 * msg); /* * capi_cmsg_header() fills the _cmsg structure with default values, so only * parameter with non default values must be changed before sending the * message. */ unsigned capi_cmsg_header(_cmsg * cmsg, __u16 _ApplId, __u8 _Command, __u8 _Subcommand, __u16 _Messagenumber, __u32 _Controller); /*-----------------------------------------------------------------------*/ /* * Debugging / Tracing functions */ char *capi_cmd2str(__u8 cmd, __u8 subcmd); typedef struct { u_char *buf; u_char *p; size_t size; size_t pos; } _cdebbuf; #define CDEBUG_SIZE 1024 #define CDEBUG_GSIZE 4096 void cdebbuf_free(_cdebbuf *cdb); int cdebug_init(void); void cdebug_exit(void); _cdebbuf *capi_cmsg2str(_cmsg *cmsg); _cdebbuf *capi_message2str(__u8 *msg); /*-----------------------------------------------------------------------*/ static inline void capi_cmsg_answer(_cmsg * cmsg) { cmsg->Subcommand |= 0x01; } /*-----------------------------------------------------------------------*/ static inline void capi_fill_CONNECT_B3_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, __u32 adr, _cstruct NCPI) { capi_cmsg_header(cmsg, ApplId, 0x82, 0x80, Messagenumber, adr); cmsg->NCPI = NCPI; } static inline void capi_fill_FACILITY_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, __u32 adr, __u16 FacilitySelector, _cstruct FacilityRequestParameter) { capi_cmsg_header(cmsg, ApplId, 0x80, 0x80, Messagenumber, adr); cmsg->FacilitySelector = FacilitySelector; cmsg->FacilityRequestParameter = FacilityRequestParameter; } static inline void capi_fill_INFO_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, __u32 adr, _cstruct CalledPartyNumber, _cstruct BChannelinformation, _cstruct Keypadfacility, _cstruct Useruserdata, _cstruct Facilitydataarray) { capi_cmsg_header(cmsg, ApplId, 0x08, 0x80, Messagenumber, adr); cmsg->CalledPartyNumber = CalledPartyNumber; cmsg->BChannelinformation = BChannelinformation; cmsg->Keypadfacility = Keypadfacility; cmsg->Useruserdata = Useruserdata; cmsg->Facilitydataarray = Facilitydataarray; } static inline void capi_fill_LISTEN_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, __u32 adr, __u32 InfoMask, __u32 CIPmask, __u32 CIPmask2, _cstruct CallingPartyNumber, _cstruct CallingPartySubaddress) { capi_cmsg_header(cmsg, ApplId, 0x05, 0x80, Messagenumber, adr); cmsg->InfoMask = InfoMask; cmsg->CIPmask = CIPmask; cmsg->CIPmask2 = CIPmask2; cmsg->CallingPartyNumber = CallingPartyNumber; cmsg->CallingPartySubaddress = CallingPartySubaddress; } static inline void capi_fill_ALERT_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, __u32 adr, _cstruct BChannelinformation, _cstruct Keypadfacility, _cstruct Useruserdata, _cstruct Facilitydataarray) { capi_cmsg_header(cmsg, ApplId, 0x01, 0x80, Messagenumber, adr); cmsg->BChannelinformation = BChannelinformation; cmsg->Keypadfacility = Keypadfacility; cmsg->Useruserdata = Useruserdata; cmsg->Facilitydataarray = Facilitydataarray; } static inline void capi_fill_CONNECT_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, __u32 adr, __u16 CIPValue, _cstruct CalledPartyNumber, _cstruct CallingPartyNumber, _cstruct CalledPartySubaddress, _cstruct CallingPartySubaddress, __u16 B1protocol, __u16 B2protocol, __u16 B3protocol, _cstruct B1configuration, _cstruct B2configuration, _cstruct B3configuration, _cstruct BC, _cstruct LLC, _cstruct HLC, _cstruct BChannelinformation, _cstruct Keypadfacility, _cstruct Useruserdata, _cstruct Facilitydataarray) { capi_cmsg_header(cmsg, ApplId, 0x02, 0x80, Messagenumber, adr); cmsg->CIPValue = CIPValue; cmsg->CalledPartyNumber = CalledPartyNumber; cmsg->CallingPartyNumber = CallingPartyNumber; cmsg->CalledPartySubaddress = CalledPartySubaddress; cmsg->CallingPartySubaddress = CallingPartySubaddress; cmsg->B1protocol = B1protocol; cmsg->B2protocol = B2protocol; cmsg->B3protocol = B3protocol; cmsg->B1configuration = B1configuration; cmsg->B2configuration = B2configuration; cmsg->B3configuration = B3configuration; cmsg->BC = BC; cmsg->LLC = LLC; cmsg->HLC = HLC; cmsg->BChannelinformation = BChannelinformation; cmsg->Keypadfacility = Keypadfacility; cmsg->Useruserdata = Useruserdata; cmsg->Facilitydataarray = Facilitydataarray; } static inline void capi_fill_DATA_B3_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, __u32 adr, __u32 Data, __u16 DataLength, __u16 DataHandle, __u16 Flags) { capi_cmsg_header(cmsg, ApplId, 0x86, 0x80, Messagenumber, adr); cmsg->Data = Data; cmsg->DataLength = DataLength; cmsg->DataHandle = DataHandle; cmsg->Flags = Flags; } static inline void capi_fill_DISCONNECT_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, __u32 adr, _cstruct BChannelinformation, _cstruct Keypadfacility, _cstruct Useruserdata, _cstruct Facilitydataarray) { capi_cmsg_header(cmsg, ApplId, 0x04, 0x80, Messagenumber, adr); cmsg->BChannelinformation = BChannelinformation; cmsg->Keypadfacility = Keypadfacility; cmsg->Useruserdata = Useruserdata; cmsg->Facilitydataarray = Facilitydataarray; } static inline void capi_fill_DISCONNECT_B3_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, __u32 adr, _cstruct NCPI) { capi_cmsg_header(cmsg, ApplId, 0x84, 0x80, Messagenumber, adr); cmsg->NCPI = NCPI; } static inline void capi_fill_MANUFACTURER_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, __u32 adr, __u32 ManuID, __u32 Class, __u32 Function, _cstruct ManuData) { capi_cmsg_header(cmsg, ApplId, 0xff, 0x80, Messagenumber, adr); cmsg->ManuID = ManuID; cmsg->Class = Class; cmsg->Function = Function; cmsg->ManuData = ManuData; } static inline void capi_fill_RESET_B3_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, __u32 adr, _cstruct NCPI) { capi_cmsg_header(cmsg, ApplId, 0x87, 0x80, Messagenumber, adr); cmsg->NCPI = NCPI; } static inline void capi_fill_SELECT_B_PROTOCOL_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, __u32 adr, __u16 B1protocol, __u16 B2protocol, __u16 B3protocol, _cstruct B1configuration, _cstruct B2configuration, _cstruct B3configuration) { capi_cmsg_header(cmsg, ApplId, 0x41, 0x80, Messagenumber, adr); cmsg->B1protocol = B1protocol; cmsg->B2protocol = B2protocol; cmsg->B3protocol = B3protocol; cmsg->B1configuration = B1configuration; cmsg->B2configuration = B2configuration; cmsg->B3configuration = B3configuration; } static inline void capi_fill_CONNECT_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, __u32 adr, __u16 Reject, __u16 B1protocol, __u16 B2protocol, __u16 B3protocol, _cstruct B1configuration, _cstruct B2configuration, _cstruct B3configuration, _cstruct ConnectedNumber, _cstruct ConnectedSubaddress, _cstruct LLC, _cstruct BChannelinformation, _cstruct Keypadfacility, _cstruct Useruserdata, _cstruct Facilitydataarray) { capi_cmsg_header(cmsg, ApplId, 0x02, 0x83, Messagenumber, adr); cmsg->Reject = Reject; cmsg->B1protocol = B1protocol; cmsg->B2protocol = B2protocol; cmsg->B3protocol = B3protocol; cmsg->B1configuration = B1configuration; cmsg->B2configuration = B2configuration; cmsg->B3configuration = B3configuration; cmsg->ConnectedNumber = ConnectedNumber; cmsg->ConnectedSubaddress = ConnectedSubaddress; cmsg->LLC = LLC; cmsg->BChannelinformation = BChannelinformation; cmsg->Keypadfacility = Keypadfacility; cmsg->Useruserdata = Useruserdata; cmsg->Facilitydataarray = Facilitydataarray; } static inline void capi_fill_CONNECT_ACTIVE_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, __u32 adr) { capi_cmsg_header(cmsg, ApplId, 0x03, 0x83, Messagenumber, adr); } static inline void capi_fill_CONNECT_B3_ACTIVE_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, __u32 adr) { capi_cmsg_header(cmsg, ApplId, 0x83, 0x83, Messagenumber, adr); } static inline void capi_fill_CONNECT_B3_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, __u32 adr, __u16 Reject, _cstruct NCPI) { capi_cmsg_header(cmsg, ApplId, 0x82, 0x83, Messagenumber, adr); cmsg->Reject = Reject; cmsg->NCPI = NCPI; } static inline void capi_fill_CONNECT_B3_T90_ACTIVE_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, __u32 adr) { capi_cmsg_header(cmsg, ApplId, 0x88, 0x83, Messagenumber, adr); } static inline void capi_fill_DATA_B3_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, __u32 adr, __u16 DataHandle) { capi_cmsg_header(cmsg, ApplId, 0x86, 0x83, Messagenumber, adr); cmsg->DataHandle = DataHandle; } static inline void capi_fill_DISCONNECT_B3_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, __u32 adr) { capi_cmsg_header(cmsg, ApplId, 0x84, 0x83, Messagenumber, adr); } static inline void capi_fill_DISCONNECT_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, __u32 adr) { capi_cmsg_header(cmsg, ApplId, 0x04, 0x83, Messagenumber, adr); } static inline void capi_fill_FACILITY_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, __u32 adr, __u16 FacilitySelector) { capi_cmsg_header(cmsg, ApplId, 0x80, 0x83, Messagenumber, adr); cmsg->FacilitySelector = FacilitySelector; } static inline void capi_fill_INFO_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, __u32 adr) { capi_cmsg_header(cmsg, ApplId, 0x08, 0x83, Messagenumber, adr); } static inline void capi_fill_MANUFACTURER_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, __u32 adr, __u32 ManuID, __u32 Class, __u32 Function, _cstruct ManuData) { capi_cmsg_header(cmsg, ApplId, 0xff, 0x83, Messagenumber, adr); cmsg->ManuID = ManuID; cmsg->Class = Class; cmsg->Function = Function; cmsg->ManuData = ManuData; } static inline void capi_fill_RESET_B3_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber, __u32 adr) { capi_cmsg_header(cmsg, ApplId, 0x87, 0x83, Messagenumber, adr); } #endif /* __CAPIUTIL_H__ */ kasan.h 0000644 00000013246 14722070374 0006026 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_KASAN_H #define _LINUX_KASAN_H #include <linux/types.h> struct kmem_cache; struct page; struct vm_struct; struct task_struct; #ifdef CONFIG_KASAN #include <asm/kasan.h> #include <asm/pgtable.h> extern unsigned char kasan_early_shadow_page[PAGE_SIZE]; extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE]; extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD]; extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD]; extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D]; int kasan_populate_early_shadow(const void *shadow_start, const void *shadow_end); static inline void *kasan_mem_to_shadow(const void *addr) { return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT) + KASAN_SHADOW_OFFSET; } /* Enable reporting bugs after kasan_disable_current() */ extern void kasan_enable_current(void); /* Disable reporting bugs for current task */ extern void kasan_disable_current(void); void kasan_unpoison_shadow(const void *address, size_t size); void kasan_unpoison_task_stack(struct task_struct *task); void kasan_unpoison_stack_above_sp_to(const void *watermark); void kasan_alloc_pages(struct page *page, unsigned int order); void kasan_free_pages(struct page *page, unsigned int order); void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, slab_flags_t *flags); void kasan_poison_slab(struct page *page); void kasan_unpoison_object_data(struct kmem_cache *cache, void *object); void kasan_poison_object_data(struct kmem_cache *cache, void *object); void * __must_check kasan_init_slab_obj(struct kmem_cache *cache, const void *object); void * __must_check kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags); void kasan_kfree_large(void *ptr, unsigned long ip); void kasan_poison_kfree(void *ptr, unsigned long ip); void * __must_check kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size, gfp_t flags); void * __must_check kasan_krealloc(const void *object, size_t new_size, gfp_t flags); void * __must_check kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags); bool kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip); struct kasan_cache { int alloc_meta_offset; int free_meta_offset; }; int kasan_module_alloc(void *addr, size_t size); void kasan_free_shadow(const struct vm_struct *vm); int kasan_add_zero_shadow(void *start, unsigned long size); void kasan_remove_zero_shadow(void *start, unsigned long size); size_t __ksize(const void *); static inline void kasan_unpoison_slab(const void *ptr) { kasan_unpoison_shadow(ptr, __ksize(ptr)); } size_t kasan_metadata_size(struct kmem_cache *cache); bool kasan_save_enable_multi_shot(void); void kasan_restore_multi_shot(bool enabled); #else /* CONFIG_KASAN */ static inline void kasan_unpoison_shadow(const void *address, size_t size) {} static inline void kasan_unpoison_task_stack(struct task_struct *task) {} static inline void kasan_unpoison_stack_above_sp_to(const void *watermark) {} static inline void kasan_enable_current(void) {} static inline void kasan_disable_current(void) {} static inline void kasan_alloc_pages(struct page *page, unsigned int order) {} static inline void kasan_free_pages(struct page *page, unsigned int order) {} static inline void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, slab_flags_t *flags) {} static inline void kasan_poison_slab(struct page *page) {} static inline void kasan_unpoison_object_data(struct kmem_cache *cache, void *object) {} static inline void kasan_poison_object_data(struct kmem_cache *cache, void *object) {} static inline void *kasan_init_slab_obj(struct kmem_cache *cache, const void *object) { return (void *)object; } static inline void *kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) { return ptr; } static inline void kasan_kfree_large(void *ptr, unsigned long ip) {} static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {} static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size, gfp_t flags) { return (void *)object; } static inline void *kasan_krealloc(const void *object, size_t new_size, gfp_t flags) { return (void *)object; } static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags) { return object; } static inline bool kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip) { return false; } static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } static inline void kasan_free_shadow(const struct vm_struct *vm) {} static inline int kasan_add_zero_shadow(void *start, unsigned long size) { return 0; } static inline void kasan_remove_zero_shadow(void *start, unsigned long size) {} static inline void kasan_unpoison_slab(const void *ptr) { } static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } #endif /* CONFIG_KASAN */ #ifdef CONFIG_KASAN_GENERIC #define KASAN_SHADOW_INIT 0 void kasan_cache_shrink(struct kmem_cache *cache); void kasan_cache_shutdown(struct kmem_cache *cache); #else /* CONFIG_KASAN_GENERIC */ static inline void kasan_cache_shrink(struct kmem_cache *cache) {} static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} #endif /* CONFIG_KASAN_GENERIC */ #ifdef CONFIG_KASAN_SW_TAGS #define KASAN_SHADOW_INIT 0xFF void kasan_init_tags(void); void *kasan_reset_tag(const void *addr); void kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip); #else /* CONFIG_KASAN_SW_TAGS */ static inline void kasan_init_tags(void) { } static inline void *kasan_reset_tag(const void *addr) { return (void *)addr; } #endif /* CONFIG_KASAN_SW_TAGS */ #endif /* LINUX_KASAN_H */ rio_regs.h 0000644 00000046104 14722070374 0006541 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * RapidIO register definitions * * Copyright 2005 MontaVista Software, Inc. * Matt Porter <mporter@kernel.crashing.org> */ #ifndef LINUX_RIO_REGS_H #define LINUX_RIO_REGS_H /* * In RapidIO, each device has a 16MB configuration space that is * accessed via maintenance transactions. Portions of configuration * space are standardized and/or reserved. */ #define RIO_MAINT_SPACE_SZ 0x1000000 /* 16MB of RapidIO mainenance space */ #define RIO_DEV_ID_CAR 0x00 /* [I] Device Identity CAR */ #define RIO_DEV_INFO_CAR 0x04 /* [I] Device Information CAR */ #define RIO_ASM_ID_CAR 0x08 /* [I] Assembly Identity CAR */ #define RIO_ASM_ID_MASK 0xffff0000 /* [I] Asm ID Mask */ #define RIO_ASM_VEN_ID_MASK 0x0000ffff /* [I] Asm Vend Mask */ #define RIO_ASM_INFO_CAR 0x0c /* [I] Assembly Information CAR */ #define RIO_ASM_REV_MASK 0xffff0000 /* [I] Asm Rev Mask */ #define RIO_EXT_FTR_PTR_MASK 0x0000ffff /* [I] EF_PTR Mask */ #define RIO_PEF_CAR 0x10 /* [I] Processing Element Features CAR */ #define RIO_PEF_BRIDGE 0x80000000 /* [I] Bridge */ #define RIO_PEF_MEMORY 0x40000000 /* [I] MMIO */ #define RIO_PEF_PROCESSOR 0x20000000 /* [I] Processor */ #define RIO_PEF_SWITCH 0x10000000 /* [I] Switch */ #define RIO_PEF_MULTIPORT 0x08000000 /* [VI, 2.1] Multiport */ #define RIO_PEF_INB_MBOX 0x00f00000 /* [II, <= 1.2] Mailboxes */ #define RIO_PEF_INB_MBOX0 0x00800000 /* [II, <= 1.2] Mailbox 0 */ #define RIO_PEF_INB_MBOX1 0x00400000 /* [II, <= 1.2] Mailbox 1 */ #define RIO_PEF_INB_MBOX2 0x00200000 /* [II, <= 1.2] Mailbox 2 */ #define RIO_PEF_INB_MBOX3 0x00100000 /* [II, <= 1.2] Mailbox 3 */ #define RIO_PEF_INB_DOORBELL 0x00080000 /* [II, <= 1.2] Doorbells */ #define RIO_PEF_DEV32 0x00001000 /* [III] PE supports Common TRansport Dev32 */ #define RIO_PEF_EXT_RT 0x00000200 /* [III, 1.3] Extended route table support */ #define RIO_PEF_STD_RT 0x00000100 /* [III, 1.3] Standard route table support */ #define RIO_PEF_CTLS 0x00000010 /* [III] Common Transport Large System (< rev.3) */ #define RIO_PEF_DEV16 0x00000010 /* [III] PE Supports Common Transport Dev16 (rev.3) */ #define RIO_PEF_EXT_FEATURES 0x00000008 /* [I] EFT_PTR valid */ #define RIO_PEF_ADDR_66 0x00000004 /* [I] 66 bits */ #define RIO_PEF_ADDR_50 0x00000002 /* [I] 50 bits */ #define RIO_PEF_ADDR_34 0x00000001 /* [I] 34 bits */ #define RIO_SWP_INFO_CAR 0x14 /* [I] Switch Port Information CAR */ #define RIO_SWP_INFO_PORT_TOTAL_MASK 0x0000ff00 /* [I] Total number of ports */ #define RIO_SWP_INFO_PORT_NUM_MASK 0x000000ff /* [I] Maintenance transaction port number */ #define RIO_GET_TOTAL_PORTS(x) ((x & RIO_SWP_INFO_PORT_TOTAL_MASK) >> 8) #define RIO_GET_PORT_NUM(x) (x & RIO_SWP_INFO_PORT_NUM_MASK) #define RIO_SRC_OPS_CAR 0x18 /* [I] Source Operations CAR */ #define RIO_SRC_OPS_READ 0x00008000 /* [I] Read op */ #define RIO_SRC_OPS_WRITE 0x00004000 /* [I] Write op */ #define RIO_SRC_OPS_STREAM_WRITE 0x00002000 /* [I] Str-write op */ #define RIO_SRC_OPS_WRITE_RESPONSE 0x00001000 /* [I] Write/resp op */ #define RIO_SRC_OPS_DATA_MSG 0x00000800 /* [II] Data msg op */ #define RIO_SRC_OPS_DOORBELL 0x00000400 /* [II] Doorbell op */ #define RIO_SRC_OPS_ATOMIC_TST_SWP 0x00000100 /* [I] Atomic TAS op */ #define RIO_SRC_OPS_ATOMIC_INC 0x00000080 /* [I] Atomic inc op */ #define RIO_SRC_OPS_ATOMIC_DEC 0x00000040 /* [I] Atomic dec op */ #define RIO_SRC_OPS_ATOMIC_SET 0x00000020 /* [I] Atomic set op */ #define RIO_SRC_OPS_ATOMIC_CLR 0x00000010 /* [I] Atomic clr op */ #define RIO_SRC_OPS_PORT_WRITE 0x00000004 /* [I] Port-write op */ #define RIO_DST_OPS_CAR 0x1c /* Destination Operations CAR */ #define RIO_DST_OPS_READ 0x00008000 /* [I] Read op */ #define RIO_DST_OPS_WRITE 0x00004000 /* [I] Write op */ #define RIO_DST_OPS_STREAM_WRITE 0x00002000 /* [I] Str-write op */ #define RIO_DST_OPS_WRITE_RESPONSE 0x00001000 /* [I] Write/resp op */ #define RIO_DST_OPS_DATA_MSG 0x00000800 /* [II] Data msg op */ #define RIO_DST_OPS_DOORBELL 0x00000400 /* [II] Doorbell op */ #define RIO_DST_OPS_ATOMIC_TST_SWP 0x00000100 /* [I] Atomic TAS op */ #define RIO_DST_OPS_ATOMIC_INC 0x00000080 /* [I] Atomic inc op */ #define RIO_DST_OPS_ATOMIC_DEC 0x00000040 /* [I] Atomic dec op */ #define RIO_DST_OPS_ATOMIC_SET 0x00000020 /* [I] Atomic set op */ #define RIO_DST_OPS_ATOMIC_CLR 0x00000010 /* [I] Atomic clr op */ #define RIO_DST_OPS_PORT_WRITE 0x00000004 /* [I] Port-write op */ #define RIO_OPS_READ 0x00008000 /* [I] Read op */ #define RIO_OPS_WRITE 0x00004000 /* [I] Write op */ #define RIO_OPS_STREAM_WRITE 0x00002000 /* [I] Str-write op */ #define RIO_OPS_WRITE_RESPONSE 0x00001000 /* [I] Write/resp op */ #define RIO_OPS_DATA_MSG 0x00000800 /* [II] Data msg op */ #define RIO_OPS_DOORBELL 0x00000400 /* [II] Doorbell op */ #define RIO_OPS_ATOMIC_TST_SWP 0x00000100 /* [I] Atomic TAS op */ #define RIO_OPS_ATOMIC_INC 0x00000080 /* [I] Atomic inc op */ #define RIO_OPS_ATOMIC_DEC 0x00000040 /* [I] Atomic dec op */ #define RIO_OPS_ATOMIC_SET 0x00000020 /* [I] Atomic set op */ #define RIO_OPS_ATOMIC_CLR 0x00000010 /* [I] Atomic clr op */ #define RIO_OPS_PORT_WRITE 0x00000004 /* [I] Port-write op */ /* 0x20-0x30 *//* Reserved */ #define RIO_SWITCH_RT_LIMIT 0x34 /* [III, 1.3] Switch Route Table Destination ID Limit CAR */ #define RIO_RT_MAX_DESTID 0x0000ffff #define RIO_MBOX_CSR 0x40 /* [II, <= 1.2] Mailbox CSR */ #define RIO_MBOX0_AVAIL 0x80000000 /* [II] Mbox 0 avail */ #define RIO_MBOX0_FULL 0x40000000 /* [II] Mbox 0 full */ #define RIO_MBOX0_EMPTY 0x20000000 /* [II] Mbox 0 empty */ #define RIO_MBOX0_BUSY 0x10000000 /* [II] Mbox 0 busy */ #define RIO_MBOX0_FAIL 0x08000000 /* [II] Mbox 0 fail */ #define RIO_MBOX0_ERROR 0x04000000 /* [II] Mbox 0 error */ #define RIO_MBOX1_AVAIL 0x00800000 /* [II] Mbox 1 avail */ #define RIO_MBOX1_FULL 0x00200000 /* [II] Mbox 1 full */ #define RIO_MBOX1_EMPTY 0x00200000 /* [II] Mbox 1 empty */ #define RIO_MBOX1_BUSY 0x00100000 /* [II] Mbox 1 busy */ #define RIO_MBOX1_FAIL 0x00080000 /* [II] Mbox 1 fail */ #define RIO_MBOX1_ERROR 0x00040000 /* [II] Mbox 1 error */ #define RIO_MBOX2_AVAIL 0x00008000 /* [II] Mbox 2 avail */ #define RIO_MBOX2_FULL 0x00004000 /* [II] Mbox 2 full */ #define RIO_MBOX2_EMPTY 0x00002000 /* [II] Mbox 2 empty */ #define RIO_MBOX2_BUSY 0x00001000 /* [II] Mbox 2 busy */ #define RIO_MBOX2_FAIL 0x00000800 /* [II] Mbox 2 fail */ #define RIO_MBOX2_ERROR 0x00000400 /* [II] Mbox 2 error */ #define RIO_MBOX3_AVAIL 0x00000080 /* [II] Mbox 3 avail */ #define RIO_MBOX3_FULL 0x00000040 /* [II] Mbox 3 full */ #define RIO_MBOX3_EMPTY 0x00000020 /* [II] Mbox 3 empty */ #define RIO_MBOX3_BUSY 0x00000010 /* [II] Mbox 3 busy */ #define RIO_MBOX3_FAIL 0x00000008 /* [II] Mbox 3 fail */ #define RIO_MBOX3_ERROR 0x00000004 /* [II] Mbox 3 error */ #define RIO_WRITE_PORT_CSR 0x44 /* [I, <= 1.2] Write Port CSR */ #define RIO_DOORBELL_CSR 0x44 /* [II, <= 1.2] Doorbell CSR */ #define RIO_DOORBELL_AVAIL 0x80000000 /* [II] Doorbell avail */ #define RIO_DOORBELL_FULL 0x40000000 /* [II] Doorbell full */ #define RIO_DOORBELL_EMPTY 0x20000000 /* [II] Doorbell empty */ #define RIO_DOORBELL_BUSY 0x10000000 /* [II] Doorbell busy */ #define RIO_DOORBELL_FAILED 0x08000000 /* [II] Doorbell failed */ #define RIO_DOORBELL_ERROR 0x04000000 /* [II] Doorbell error */ #define RIO_WRITE_PORT_AVAILABLE 0x00000080 /* [I] Write Port Available */ #define RIO_WRITE_PORT_FULL 0x00000040 /* [I] Write Port Full */ #define RIO_WRITE_PORT_EMPTY 0x00000020 /* [I] Write Port Empty */ #define RIO_WRITE_PORT_BUSY 0x00000010 /* [I] Write Port Busy */ #define RIO_WRITE_PORT_FAILED 0x00000008 /* [I] Write Port Failed */ #define RIO_WRITE_PORT_ERROR 0x00000004 /* [I] Write Port Error */ /* 0x48 *//* Reserved */ #define RIO_PELL_CTRL_CSR 0x4c /* [I] PE Logical Layer Control CSR */ #define RIO_PELL_ADDR_66 0x00000004 /* [I] 66-bit addr */ #define RIO_PELL_ADDR_50 0x00000002 /* [I] 50-bit addr */ #define RIO_PELL_ADDR_34 0x00000001 /* [I] 34-bit addr */ /* 0x50-0x54 *//* Reserved */ #define RIO_LCSH_BA 0x58 /* [I] LCS High Base Address */ #define RIO_LCSL_BA 0x5c /* [I] LCS Base Address */ #define RIO_DID_CSR 0x60 /* [III] Base Device ID CSR */ /* 0x64 *//* Reserved */ #define RIO_HOST_DID_LOCK_CSR 0x68 /* [III] Host Base Device ID Lock CSR */ #define RIO_COMPONENT_TAG_CSR 0x6c /* [III] Component Tag CSR */ #define RIO_STD_RTE_CONF_DESTID_SEL_CSR 0x70 #define RIO_STD_RTE_CONF_EXTCFGEN 0x80000000 #define RIO_STD_RTE_CONF_PORT_SEL_CSR 0x74 #define RIO_STD_RTE_DEFAULT_PORT 0x78 /* 0x7c-0xf8 *//* Reserved */ /* 0x100-0xfff8 *//* [I] Extended Features Space */ /* 0x10000-0xfffff8 *//* [I] Implementation-defined Space */ /* * Extended Features Space is a configuration space area where * functionality is mapped into extended feature blocks via a * singly linked list of extended feature pointers (EFT_PTR). * * Each extended feature block can be identified/located in * Extended Features Space by walking the extended feature * list starting with the Extended Feature Pointer located * in the Assembly Information CAR. * * Extended Feature Blocks (EFBs) are identified with an assigned * EFB ID. Extended feature block offsets in the definitions are * relative to the offset of the EFB within the Extended Features * Space. */ /* Helper macros to parse the Extended Feature Block header */ #define RIO_EFB_PTR_MASK 0xffff0000 #define RIO_EFB_ID_MASK 0x0000ffff #define RIO_GET_BLOCK_PTR(x) ((x & RIO_EFB_PTR_MASK) >> 16) #define RIO_GET_BLOCK_ID(x) (x & RIO_EFB_ID_MASK) /* Extended Feature Block IDs */ #define RIO_EFB_SER_EP_M1_ID 0x0001 /* [VI] LP-Serial EP Devices, Map I */ #define RIO_EFB_SER_EP_SW_M1_ID 0x0002 /* [VI] LP-Serial EP w SW Recovery Devices, Map I */ #define RIO_EFB_SER_EPF_M1_ID 0x0003 /* [VI] LP-Serial EP Free Devices, Map I */ #define RIO_EFB_SER_EP_ID 0x0004 /* [VI] LP-Serial EP Devices, RIO 1.2 */ #define RIO_EFB_SER_EP_REC_ID 0x0005 /* [VI] LP-Serial EP w SW Recovery Devices, RIO 1.2 */ #define RIO_EFB_SER_EP_FREE_ID 0x0006 /* [VI] LP-Serial EP Free Devices, RIO 1.2 */ #define RIO_EFB_ERR_MGMNT 0x0007 /* [VIII] Error Management Extensions */ #define RIO_EFB_SER_EPF_SW_M1_ID 0x0009 /* [VI] LP-Serial EP Free w SW Recovery Devices, Map I */ #define RIO_EFB_SW_ROUTING_TBL 0x000E /* [III] Switch Routing Table Block */ #define RIO_EFB_SER_EP_M2_ID 0x0011 /* [VI] LP-Serial EP Devices, Map II */ #define RIO_EFB_SER_EP_SW_M2_ID 0x0012 /* [VI] LP-Serial EP w SW Recovery Devices, Map II */ #define RIO_EFB_SER_EPF_M2_ID 0x0013 /* [VI] LP-Serial EP Free Devices, Map II */ #define RIO_EFB_ERR_MGMNT_HS 0x0017 /* [VIII] Error Management Extensions, Hot-Swap only */ #define RIO_EFB_SER_EPF_SW_M2_ID 0x0019 /* [VI] LP-Serial EP Free w SW Recovery Devices, Map II */ /* * Physical LP-Serial Registers Definitions * Parameters in register macros: * n - port number, m - Register Map Type (1 or 2) */ #define RIO_PORT_MNT_HEADER 0x0000 #define RIO_PORT_REQ_CTL_CSR 0x0020 #define RIO_PORT_RSP_CTL_CSR 0x0024 #define RIO_PORT_LINKTO_CTL_CSR 0x0020 #define RIO_PORT_RSPTO_CTL_CSR 0x0024 #define RIO_PORT_GEN_CTL_CSR 0x003c #define RIO_PORT_GEN_HOST 0x80000000 #define RIO_PORT_GEN_MASTER 0x40000000 #define RIO_PORT_GEN_DISCOVERED 0x20000000 #define RIO_PORT_N_MNT_REQ_CSR(n, m) (0x40 + (n) * (0x20 * (m))) #define RIO_MNT_REQ_CMD_RD 0x03 /* Reset-device command */ #define RIO_MNT_REQ_CMD_IS 0x04 /* Input-status command */ #define RIO_PORT_N_MNT_RSP_CSR(n, m) (0x44 + (n) * (0x20 * (m))) #define RIO_PORT_N_MNT_RSP_RVAL 0x80000000 /* Response Valid */ #define RIO_PORT_N_MNT_RSP_ASTAT 0x000007e0 /* ackID Status */ #define RIO_PORT_N_MNT_RSP_LSTAT 0x0000001f /* Link Status */ #define RIO_PORT_N_ACK_STS_CSR(n) (0x48 + (n) * 0x20) /* Only in RM-I */ #define RIO_PORT_N_ACK_CLEAR 0x80000000 #define RIO_PORT_N_ACK_INBOUND 0x3f000000 #define RIO_PORT_N_ACK_OUTSTAND 0x00003f00 #define RIO_PORT_N_ACK_OUTBOUND 0x0000003f #define RIO_PORT_N_CTL2_CSR(n, m) (0x54 + (n) * (0x20 * (m))) #define RIO_PORT_N_CTL2_SEL_BAUD 0xf0000000 #define RIO_PORT_N_ERR_STS_CSR(n, m) (0x58 + (n) * (0x20 * (m))) #define RIO_PORT_N_ERR_STS_OUT_ES 0x00010000 /* Output Error-stopped */ #define RIO_PORT_N_ERR_STS_INP_ES 0x00000100 /* Input Error-stopped */ #define RIO_PORT_N_ERR_STS_PW_PEND 0x00000010 /* Port-Write Pending */ #define RIO_PORT_N_ERR_STS_PORT_UA 0x00000008 /* Port Unavailable */ #define RIO_PORT_N_ERR_STS_PORT_ERR 0x00000004 #define RIO_PORT_N_ERR_STS_PORT_OK 0x00000002 #define RIO_PORT_N_ERR_STS_PORT_UNINIT 0x00000001 #define RIO_PORT_N_CTL_CSR(n, m) (0x5c + (n) * (0x20 * (m))) #define RIO_PORT_N_CTL_PWIDTH 0xc0000000 #define RIO_PORT_N_CTL_PWIDTH_1 0x00000000 #define RIO_PORT_N_CTL_PWIDTH_4 0x40000000 #define RIO_PORT_N_CTL_IPW 0x38000000 /* Initialized Port Width */ #define RIO_PORT_N_CTL_P_TYP_SER 0x00000001 #define RIO_PORT_N_CTL_LOCKOUT 0x00000002 #define RIO_PORT_N_CTL_EN_RX 0x00200000 #define RIO_PORT_N_CTL_EN_TX 0x00400000 #define RIO_PORT_N_OB_ACK_CSR(n) (0x60 + (n) * 0x40) /* Only in RM-II */ #define RIO_PORT_N_OB_ACK_CLEAR 0x80000000 #define RIO_PORT_N_OB_ACK_OUTSTD 0x00fff000 #define RIO_PORT_N_OB_ACK_OUTBND 0x00000fff #define RIO_PORT_N_IB_ACK_CSR(n) (0x64 + (n) * 0x40) /* Only in RM-II */ #define RIO_PORT_N_IB_ACK_INBND 0x00000fff /* * Device-based helper macros for serial port register access. * d - pointer to rapidio device object, n - port number */ #define RIO_DEV_PORT_N_MNT_REQ_CSR(d, n) \ (d->phys_efptr + RIO_PORT_N_MNT_REQ_CSR(n, d->phys_rmap)) #define RIO_DEV_PORT_N_MNT_RSP_CSR(d, n) \ (d->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(n, d->phys_rmap)) #define RIO_DEV_PORT_N_ACK_STS_CSR(d, n) \ (d->phys_efptr + RIO_PORT_N_ACK_STS_CSR(n)) #define RIO_DEV_PORT_N_CTL2_CSR(d, n) \ (d->phys_efptr + RIO_PORT_N_CTL2_CSR(n, d->phys_rmap)) #define RIO_DEV_PORT_N_ERR_STS_CSR(d, n) \ (d->phys_efptr + RIO_PORT_N_ERR_STS_CSR(n, d->phys_rmap)) #define RIO_DEV_PORT_N_CTL_CSR(d, n) \ (d->phys_efptr + RIO_PORT_N_CTL_CSR(n, d->phys_rmap)) #define RIO_DEV_PORT_N_OB_ACK_CSR(d, n) \ (d->phys_efptr + RIO_PORT_N_OB_ACK_CSR(n)) #define RIO_DEV_PORT_N_IB_ACK_CSR(d, n) \ (d->phys_efptr + RIO_PORT_N_IB_ACK_CSR(n)) /* * Error Management Extensions (RapidIO 1.3+, Part 8) * * Extended Features Block ID=0x0007 */ /* General EM Registers (Common for all Ports) */ #define RIO_EM_EFB_HEADER 0x000 /* Error Management Extensions Block Header */ #define RIO_EM_EMHS_CAR 0x004 /* EM Functionality CAR */ #define RIO_EM_LTL_ERR_DETECT 0x008 /* Logical/Transport Layer Error Detect CSR */ #define RIO_EM_LTL_ERR_EN 0x00c /* Logical/Transport Layer Error Enable CSR */ #define REM_LTL_ERR_ILLTRAN 0x08000000 /* Illegal Transaction decode */ #define REM_LTL_ERR_UNSOLR 0x00800000 /* Unsolicited Response */ #define REM_LTL_ERR_UNSUPTR 0x00400000 /* Unsupported Transaction */ #define REM_LTL_ERR_IMPSPEC 0x000000ff /* Implementation Specific */ #define RIO_EM_LTL_HIADDR_CAP 0x010 /* Logical/Transport Layer High Address Capture CSR */ #define RIO_EM_LTL_ADDR_CAP 0x014 /* Logical/Transport Layer Address Capture CSR */ #define RIO_EM_LTL_DEVID_CAP 0x018 /* Logical/Transport Layer Device ID Capture CSR */ #define RIO_EM_LTL_CTRL_CAP 0x01c /* Logical/Transport Layer Control Capture CSR */ #define RIO_EM_LTL_DID32_CAP 0x020 /* Logical/Transport Layer Dev32 DestID Capture CSR */ #define RIO_EM_LTL_SID32_CAP 0x024 /* Logical/Transport Layer Dev32 source ID Capture CSR */ #define RIO_EM_PW_TGT_DEVID 0x028 /* Port-write Target deviceID CSR */ #define RIO_EM_PW_TGT_DEVID_D16M 0xff000000 /* Port-write Target DID16 MSB */ #define RIO_EM_PW_TGT_DEVID_D8 0x00ff0000 /* Port-write Target DID16 LSB or DID8 */ #define RIO_EM_PW_TGT_DEVID_DEV16 0x00008000 /* Port-write Target DID16 LSB or DID8 */ #define RIO_EM_PW_TGT_DEVID_DEV32 0x00004000 /* Port-write Target DID16 LSB or DID8 */ #define RIO_EM_PKT_TTL 0x02c /* Packet Time-to-live CSR */ #define RIO_EM_PKT_TTL_VAL 0xffff0000 /* Packet Time-to-live value */ #define RIO_EM_PW_TGT32_DEVID 0x030 /* Port-write Dev32 Target deviceID CSR */ #define RIO_EM_PW_TX_CTRL 0x034 /* Port-write Transmission Control CSR */ #define RIO_EM_PW_TX_CTRL_PW_DIS 0x00000001 /* Port-write Transmission Disable bit */ /* Per-Port EM Registers */ #define RIO_EM_PN_ERR_DETECT(x) (0x040 + x*0x40) /* Port N Error Detect CSR */ #define REM_PED_IMPL_SPEC 0x80000000 #define REM_PED_LINK_OK2U 0x40000000 /* Link OK to Uninit transition */ #define REM_PED_LINK_UPDA 0x20000000 /* Link Uninit Packet Discard Active */ #define REM_PED_LINK_U2OK 0x10000000 /* Link Uninit to OK transition */ #define REM_PED_LINK_TO 0x00000001 #define RIO_EM_PN_ERRRATE_EN(x) (0x044 + x*0x40) /* Port N Error Rate Enable CSR */ #define RIO_EM_PN_ERRRATE_EN_OK2U 0x40000000 /* Enable notification for OK2U */ #define RIO_EM_PN_ERRRATE_EN_UPDA 0x20000000 /* Enable notification for UPDA */ #define RIO_EM_PN_ERRRATE_EN_U2OK 0x10000000 /* Enable notification for U2OK */ #define RIO_EM_PN_ATTRIB_CAP(x) (0x048 + x*0x40) /* Port N Attributes Capture CSR */ #define RIO_EM_PN_PKT_CAP_0(x) (0x04c + x*0x40) /* Port N Packet/Control Symbol Capture 0 CSR */ #define RIO_EM_PN_PKT_CAP_1(x) (0x050 + x*0x40) /* Port N Packet Capture 1 CSR */ #define RIO_EM_PN_PKT_CAP_2(x) (0x054 + x*0x40) /* Port N Packet Capture 2 CSR */ #define RIO_EM_PN_PKT_CAP_3(x) (0x058 + x*0x40) /* Port N Packet Capture 3 CSR */ #define RIO_EM_PN_ERRRATE(x) (0x068 + x*0x40) /* Port N Error Rate CSR */ #define RIO_EM_PN_ERRRATE_TR(x) (0x06c + x*0x40) /* Port N Error Rate Threshold CSR */ #define RIO_EM_PN_LINK_UDT(x) (0x070 + x*0x40) /* Port N Link Uninit Discard Timer CSR */ #define RIO_EM_PN_LINK_UDT_TO 0xffffff00 /* Link Uninit Timeout value */ /* * Switch Routing Table Register Block ID=0x000E (RapidIO 3.0+, part 3) * Register offsets are defined from beginning of the block. */ /* Broadcast Routing Table Control CSR */ #define RIO_BC_RT_CTL_CSR 0x020 #define RIO_RT_CTL_THREE_LVL 0x80000000 #define RIO_RT_CTL_DEV32_RT_CTRL 0x40000000 #define RIO_RT_CTL_MC_MASK_SZ 0x03000000 /* 3.0+ Part 11: Multicast */ /* Broadcast Level 0 Info CSR */ #define RIO_BC_RT_LVL0_INFO_CSR 0x030 #define RIO_RT_L0I_NUM_GR 0xff000000 #define RIO_RT_L0I_GR_PTR 0x00fffc00 /* Broadcast Level 1 Info CSR */ #define RIO_BC_RT_LVL1_INFO_CSR 0x034 #define RIO_RT_L1I_NUM_GR 0xff000000 #define RIO_RT_L1I_GR_PTR 0x00fffc00 /* Broadcast Level 2 Info CSR */ #define RIO_BC_RT_LVL2_INFO_CSR 0x038 #define RIO_RT_L2I_NUM_GR 0xff000000 #define RIO_RT_L2I_GR_PTR 0x00fffc00 /* Per-Port Routing Table registers. * Register fields defined in the broadcast section above are * applicable to the corresponding registers below. */ #define RIO_SPx_RT_CTL_CSR(x) (0x040 + (0x20 * x)) #define RIO_SPx_RT_LVL0_INFO_CSR(x) (0x50 + (0x20 * x)) #define RIO_SPx_RT_LVL1_INFO_CSR(x) (0x54 + (0x20 * x)) #define RIO_SPx_RT_LVL2_INFO_CSR(x) (0x58 + (0x20 * x)) /* Register Formats for Routing Table Group entry. * Register offsets are calculated using GR_PTR field in the corresponding * table Level_N and group/entry numbers (see RapidIO 3.0+ Part 3). */ #define RIO_RT_Ln_ENTRY_IMPL_DEF 0xf0000000 #define RIO_RT_Ln_ENTRY_RTE_VAL 0x000003ff #define RIO_RT_ENTRY_DROP_PKT 0x300 #endif /* LINUX_RIO_REGS_H */ pata_arasan_cf_data.h 0000644 00000002342 14722070374 0010637 0 ustar 00 /* * include/linux/pata_arasan_cf_data.h * * Arasan Compact Flash host controller platform data header file * * Copyright (C) 2011 ST Microelectronics * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #ifndef _PATA_ARASAN_CF_DATA_H #define _PATA_ARASAN_CF_DATA_H #include <linux/platform_device.h> struct arasan_cf_pdata { u8 cf_if_clk; #define CF_IF_CLK_100M (0x0) #define CF_IF_CLK_75M (0x1) #define CF_IF_CLK_66M (0x2) #define CF_IF_CLK_50M (0x3) #define CF_IF_CLK_40M (0x4) #define CF_IF_CLK_33M (0x5) #define CF_IF_CLK_25M (0x6) #define CF_IF_CLK_125M (0x7) #define CF_IF_CLK_150M (0x8) #define CF_IF_CLK_166M (0x9) #define CF_IF_CLK_200M (0xA) /* * Platform specific incapabilities of CF controller is handled via * quirks */ u32 quirk; #define CF_BROKEN_PIO (1) #define CF_BROKEN_MWDMA (1 << 1) #define CF_BROKEN_UDMA (1 << 2) }; static inline void set_arasan_cf_pdata(struct platform_device *pdev, struct arasan_cf_pdata *data) { pdev->dev.platform_data = data; } #endif /* _PATA_ARASAN_CF_DATA_H */ i2c-pxa.h 0000644 00000000666 14722070374 0006176 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_I2C_ALGO_PXA_H #define _LINUX_I2C_ALGO_PXA_H typedef enum i2c_slave_event_e { I2C_SLAVE_EVENT_START_READ, I2C_SLAVE_EVENT_START_WRITE, I2C_SLAVE_EVENT_STOP } i2c_slave_event_t; struct i2c_slave_client { void *data; void (*event)(void *ptr, i2c_slave_event_t event); int (*read) (void *ptr); void (*write)(void *ptr, unsigned int val); }; #endif /* _LINUX_I2C_ALGO_PXA_H */ average.h 0000644 00000004661 14722070374 0006344 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_AVERAGE_H #define _LINUX_AVERAGE_H #include <linux/bug.h> #include <linux/compiler.h> #include <linux/log2.h> /* * Exponentially weighted moving average (EWMA) * * This implements a fixed-precision EWMA algorithm, with both the * precision and fall-off coefficient determined at compile-time * and built into the generated helper funtions. * * The first argument to the macro is the name that will be used * for the struct and helper functions. * * The second argument, the precision, expresses how many bits are * used for the fractional part of the fixed-precision values. * * The third argument, the weight reciprocal, determines how the * new values will be weighed vs. the old state, new values will * get weight 1/weight_rcp and old values 1-1/weight_rcp. Note * that this parameter must be a power of two for efficiency. */ #define DECLARE_EWMA(name, _precision, _weight_rcp) \ struct ewma_##name { \ unsigned long internal; \ }; \ static inline void ewma_##name##_init(struct ewma_##name *e) \ { \ BUILD_BUG_ON(!__builtin_constant_p(_precision)); \ BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \ /* \ * Even if you want to feed it just 0/1 you should have \ * some bits for the non-fractional part... \ */ \ BUILD_BUG_ON((_precision) > 30); \ BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \ e->internal = 0; \ } \ static inline unsigned long \ ewma_##name##_read(struct ewma_##name *e) \ { \ BUILD_BUG_ON(!__builtin_constant_p(_precision)); \ BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \ BUILD_BUG_ON((_precision) > 30); \ BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \ return e->internal >> (_precision); \ } \ static inline void ewma_##name##_add(struct ewma_##name *e, \ unsigned long val) \ { \ unsigned long internal = READ_ONCE(e->internal); \ unsigned long weight_rcp = ilog2(_weight_rcp); \ unsigned long precision = _precision; \ \ BUILD_BUG_ON(!__builtin_constant_p(_precision)); \ BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \ BUILD_BUG_ON((_precision) > 30); \ BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \ \ WRITE_ONCE(e->internal, internal ? \ (((internal << weight_rcp) - internal) + \ (val << precision)) >> weight_rcp : \ (val << precision)); \ } #endif /* _LINUX_AVERAGE_H */ kernel-page-flags.h 0000644 00000000771 14722070374 0010214 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_KERNEL_PAGE_FLAGS_H #define LINUX_KERNEL_PAGE_FLAGS_H #include <uapi/linux/kernel-page-flags.h> /* kernel hacking assistances * WARNING: subject to change, never rely on them! */ #define KPF_RESERVED 32 #define KPF_MLOCKED 33 #define KPF_MAPPEDTODISK 34 #define KPF_PRIVATE 35 #define KPF_PRIVATE_2 36 #define KPF_OWNER_PRIVATE 37 #define KPF_ARCH 38 #define KPF_UNCACHED 39 #define KPF_SOFTDIRTY 40 #endif /* LINUX_KERNEL_PAGE_FLAGS_H */ evm.h 0000644 00000005226 14722070374 0005517 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * evm.h * * Copyright (c) 2009 IBM Corporation * Author: Mimi Zohar <zohar@us.ibm.com> */ #ifndef _LINUX_EVM_H #define _LINUX_EVM_H #include <linux/integrity.h> #include <linux/xattr.h> struct integrity_iint_cache; #ifdef CONFIG_EVM extern int evm_set_key(void *key, size_t keylen); extern enum integrity_status evm_verifyxattr(struct dentry *dentry, const char *xattr_name, void *xattr_value, size_t xattr_value_len, struct integrity_iint_cache *iint); extern int evm_inode_setattr(struct dentry *dentry, struct iattr *attr); extern void evm_inode_post_setattr(struct dentry *dentry, int ia_valid); extern int evm_inode_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size); extern void evm_inode_post_setxattr(struct dentry *dentry, const char *xattr_name, const void *xattr_value, size_t xattr_value_len); extern int evm_inode_removexattr(struct dentry *dentry, const char *xattr_name); extern void evm_inode_post_removexattr(struct dentry *dentry, const char *xattr_name); extern int evm_inode_init_security(struct inode *inode, const struct xattr *xattr_array, struct xattr *evm); #ifdef CONFIG_FS_POSIX_ACL extern int posix_xattr_acl(const char *xattrname); #else static inline int posix_xattr_acl(const char *xattrname) { return 0; } #endif #else static inline int evm_set_key(void *key, size_t keylen) { return -EOPNOTSUPP; } #ifdef CONFIG_INTEGRITY static inline enum integrity_status evm_verifyxattr(struct dentry *dentry, const char *xattr_name, void *xattr_value, size_t xattr_value_len, struct integrity_iint_cache *iint) { return INTEGRITY_UNKNOWN; } #endif static inline int evm_inode_setattr(struct dentry *dentry, struct iattr *attr) { return 0; } static inline void evm_inode_post_setattr(struct dentry *dentry, int ia_valid) { return; } static inline int evm_inode_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size) { return 0; } static inline void evm_inode_post_setxattr(struct dentry *dentry, const char *xattr_name, const void *xattr_value, size_t xattr_value_len) { return; } static inline int evm_inode_removexattr(struct dentry *dentry, const char *xattr_name) { return 0; } static inline void evm_inode_post_removexattr(struct dentry *dentry, const char *xattr_name) { return; } static inline int evm_inode_init_security(struct inode *inode, const struct xattr *xattr_array, struct xattr *evm) { return 0; } #endif /* CONFIG_EVM */ #endif /* LINUX_EVM_H */ serial.h 0000644 00000001166 14722070374 0006206 0 ustar 00 /* * include/linux/serial.h * * Copyright (C) 1992 by Theodore Ts'o. * * Redistribution of this file is permitted under the terms of the GNU * Public License (GPL) */ #ifndef _LINUX_SERIAL_H #define _LINUX_SERIAL_H #include <asm/page.h> #include <uapi/linux/serial.h> /* * Counters of the input lines (CTS, DSR, RI, CD) interrupts */ struct async_icount { __u32 cts, dsr, rng, dcd, tx, rx; __u32 frame, parity, overrun, brk; __u32 buf_overrun; }; /* * The size of the serial xmit buffer is 1 page, or 4096 bytes */ #define SERIAL_XMIT_SIZE PAGE_SIZE #include <linux/compiler.h> #endif /* _LINUX_SERIAL_H */ pm_wakeirq.h 0000644 00000002756 14722070374 0007074 0 ustar 00 /* * pm_wakeirq.h - Device wakeirq helper functions * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifndef _LINUX_PM_WAKEIRQ_H #define _LINUX_PM_WAKEIRQ_H #ifdef CONFIG_PM extern int dev_pm_set_wake_irq(struct device *dev, int irq); extern int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq); extern int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq); extern void dev_pm_clear_wake_irq(struct device *dev); extern void dev_pm_enable_wake_irq(struct device *dev); extern void dev_pm_disable_wake_irq(struct device *dev); #else /* !CONFIG_PM */ static inline int dev_pm_set_wake_irq(struct device *dev, int irq) { return 0; } static inline int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) { return 0; } static inline int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq) { return 0; } static inline void dev_pm_clear_wake_irq(struct device *dev) { } static inline void dev_pm_enable_wake_irq(struct device *dev) { } static inline void dev_pm_disable_wake_irq(struct device *dev) { } #endif /* CONFIG_PM */ #endif /* _LINUX_PM_WAKEIRQ_H */ ras.h 0000644 00000002273 14722070374 0005514 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __RAS_H__ #define __RAS_H__ #include <asm/errno.h> #include <linux/uuid.h> #include <linux/cper.h> #ifdef CONFIG_DEBUG_FS int ras_userspace_consumers(void); void ras_debugfs_init(void); int ras_add_daemon_trace(void); #else static inline int ras_userspace_consumers(void) { return 0; } static inline void ras_debugfs_init(void) { } static inline int ras_add_daemon_trace(void) { return 0; } #endif #ifdef CONFIG_RAS_CEC void __init cec_init(void); int __init parse_cec_param(char *str); int cec_add_elem(u64 pfn); #else static inline void __init cec_init(void) { } static inline int cec_add_elem(u64 pfn) { return -ENODEV; } #endif #ifdef CONFIG_RAS void log_non_standard_event(const guid_t *sec_type, const guid_t *fru_id, const char *fru_text, const u8 sev, const u8 *err, const u32 len); void log_arm_hw_error(struct cper_sec_proc_arm *err); #else static inline void log_non_standard_event(const guid_t *sec_type, const guid_t *fru_id, const char *fru_text, const u8 sev, const u8 *err, const u32 len) { return; } static inline void log_arm_hw_error(struct cper_sec_proc_arm *err) { return; } #endif #endif /* __RAS_H__ */ pm2301_charger.h 0000644 00000002132 14722070374 0007336 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * PM2301 charger driver. * * Copyright (C) 2012 ST Ericsson Corporation * * Contact: Olivier LAUNAY (olivier.launay@stericsson.com */ #ifndef __LINUX_PM2301_H #define __LINUX_PM2301_H /** * struct pm2xxx_bm_charger_parameters - Charger specific parameters * @ac_volt_max: maximum allowed AC charger voltage in mV * @ac_curr_max: maximum allowed AC charger current in mA */ struct pm2xxx_bm_charger_parameters { int ac_volt_max; int ac_curr_max; }; /** * struct pm2xxx_bm_data - pm2xxx battery management data * @enable_overshoot flag to enable VBAT overshoot control * @chg_params charger parameters */ struct pm2xxx_bm_data { bool enable_overshoot; const struct pm2xxx_bm_charger_parameters *chg_params; }; struct pm2xxx_charger_platform_data { char **supplied_to; size_t num_supplicants; int i2c_bus; const char *label; int gpio_irq_number; unsigned int lpn_gpio; int irq_type; }; struct pm2xxx_platform_data { struct pm2xxx_charger_platform_data *wall_charger; struct pm2xxx_bm_data *battery; }; #endif /* __LINUX_PM2301_H */ spinlock.h 0000644 00000032703 14722070374 0006552 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_SPINLOCK_H #define __LINUX_SPINLOCK_H /* * include/linux/spinlock.h - generic spinlock/rwlock declarations * * here's the role of the various spinlock/rwlock related include files: * * on SMP builds: * * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the * initializers * * linux/spinlock_types.h: * defines the generic type and initializers * * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel * implementations, mostly inline assembly code * * (also included on UP-debug builds:) * * linux/spinlock_api_smp.h: * contains the prototypes for the _spin_*() APIs. * * linux/spinlock.h: builds the final spin_*() APIs. * * on UP builds: * * linux/spinlock_type_up.h: * contains the generic, simplified UP spinlock type. * (which is an empty structure on non-debug builds) * * linux/spinlock_types.h: * defines the generic type and initializers * * linux/spinlock_up.h: * contains the arch_spin_*()/etc. version of UP * builds. (which are NOPs on non-debug, non-preempt * builds) * * (included on UP-non-debug builds:) * * linux/spinlock_api_up.h: * builds the _spin_*() APIs. * * linux/spinlock.h: builds the final spin_*() APIs. */ #include <linux/typecheck.h> #include <linux/preempt.h> #include <linux/linkage.h> #include <linux/compiler.h> #include <linux/irqflags.h> #include <linux/thread_info.h> #include <linux/kernel.h> #include <linux/stringify.h> #include <linux/bottom_half.h> #include <asm/barrier.h> #include <asm/mmiowb.h> /* * Must define these before including other files, inline functions need them */ #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME #define LOCK_SECTION_START(extra) \ ".subsection 1\n\t" \ extra \ ".ifndef " LOCK_SECTION_NAME "\n\t" \ LOCK_SECTION_NAME ":\n\t" \ ".endif\n" #define LOCK_SECTION_END \ ".previous\n\t" #define __lockfunc __attribute__((section(".spinlock.text"))) /* * Pull the arch_spinlock_t and arch_rwlock_t definitions: */ #include <linux/spinlock_types.h> /* * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them): */ #ifdef CONFIG_SMP # include <asm/spinlock.h> #else # include <linux/spinlock_up.h> #endif #ifdef CONFIG_DEBUG_SPINLOCK extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, struct lock_class_key *key); # define raw_spin_lock_init(lock) \ do { \ static struct lock_class_key __key; \ \ __raw_spin_lock_init((lock), #lock, &__key); \ } while (0) #else # define raw_spin_lock_init(lock) \ do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) #endif #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) #ifdef arch_spin_is_contended #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) #else #define raw_spin_is_contended(lock) (((void)(lock), 0)) #endif /*arch_spin_is_contended*/ /* * smp_mb__after_spinlock() provides the equivalent of a full memory barrier * between program-order earlier lock acquisitions and program-order later * memory accesses. * * This guarantees that the following two properties hold: * * 1) Given the snippet: * * { X = 0; Y = 0; } * * CPU0 CPU1 * * WRITE_ONCE(X, 1); WRITE_ONCE(Y, 1); * spin_lock(S); smp_mb(); * smp_mb__after_spinlock(); r1 = READ_ONCE(X); * r0 = READ_ONCE(Y); * spin_unlock(S); * * it is forbidden that CPU0 does not observe CPU1's store to Y (r0 = 0) * and CPU1 does not observe CPU0's store to X (r1 = 0); see the comments * preceding the call to smp_mb__after_spinlock() in __schedule() and in * try_to_wake_up(). * * 2) Given the snippet: * * { X = 0; Y = 0; } * * CPU0 CPU1 CPU2 * * spin_lock(S); spin_lock(S); r1 = READ_ONCE(Y); * WRITE_ONCE(X, 1); smp_mb__after_spinlock(); smp_rmb(); * spin_unlock(S); r0 = READ_ONCE(X); r2 = READ_ONCE(X); * WRITE_ONCE(Y, 1); * spin_unlock(S); * * it is forbidden that CPU0's critical section executes before CPU1's * critical section (r0 = 1), CPU2 observes CPU1's store to Y (r1 = 1) * and CPU2 does not observe CPU0's store to X (r2 = 0); see the comments * preceding the calls to smp_rmb() in try_to_wake_up() for similar * snippets but "projected" onto two CPUs. * * Property (2) upgrades the lock to an RCsc lock. * * Since most load-store architectures implement ACQUIRE with an smp_mb() after * the LL/SC loop, they need no further barriers. Similarly all our TSO * architectures imply an smp_mb() for each atomic instruction and equally don't * need more. * * Architectures that can implement ACQUIRE better need to take care. */ #ifndef smp_mb__after_spinlock #define smp_mb__after_spinlock() do { } while (0) #endif #ifdef CONFIG_DEBUG_SPINLOCK extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) extern int do_raw_spin_trylock(raw_spinlock_t *lock); extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); #else static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock) { __acquire(lock); arch_spin_lock(&lock->raw_lock); mmiowb_spin_lock(); } #ifndef arch_spin_lock_flags #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) #endif static inline void do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock) { __acquire(lock); arch_spin_lock_flags(&lock->raw_lock, *flags); mmiowb_spin_lock(); } static inline int do_raw_spin_trylock(raw_spinlock_t *lock) { int ret = arch_spin_trylock(&(lock)->raw_lock); if (ret) mmiowb_spin_lock(); return ret; } static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) { mmiowb_spin_unlock(); arch_spin_unlock(&lock->raw_lock); __release(lock); } #endif /* * Define the various spin_lock methods. Note we define these * regardless of whether CONFIG_SMP or CONFIG_PREEMPTION are set. The * various methods are defined as nops in the case they are not * required. */ #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock)) #define raw_spin_lock(lock) _raw_spin_lock(lock) #ifdef CONFIG_DEBUG_LOCK_ALLOC # define raw_spin_lock_nested(lock, subclass) \ _raw_spin_lock_nested(lock, subclass) # define raw_spin_lock_nest_lock(lock, nest_lock) \ do { \ typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ } while (0) #else /* * Always evaluate the 'subclass' argument to avoid that the compiler * warns about set-but-not-used variables when building with * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1. */ # define raw_spin_lock_nested(lock, subclass) \ _raw_spin_lock(((void)(subclass), (lock))) # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) #endif #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) #define raw_spin_lock_irqsave(lock, flags) \ do { \ typecheck(unsigned long, flags); \ flags = _raw_spin_lock_irqsave(lock); \ } while (0) #ifdef CONFIG_DEBUG_LOCK_ALLOC #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ do { \ typecheck(unsigned long, flags); \ flags = _raw_spin_lock_irqsave_nested(lock, subclass); \ } while (0) #else #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ do { \ typecheck(unsigned long, flags); \ flags = _raw_spin_lock_irqsave(lock); \ } while (0) #endif #else #define raw_spin_lock_irqsave(lock, flags) \ do { \ typecheck(unsigned long, flags); \ _raw_spin_lock_irqsave(lock, flags); \ } while (0) #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ raw_spin_lock_irqsave(lock, flags) #endif #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock) #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock) #define raw_spin_unlock(lock) _raw_spin_unlock(lock) #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock) #define raw_spin_unlock_irqrestore(lock, flags) \ do { \ typecheck(unsigned long, flags); \ _raw_spin_unlock_irqrestore(lock, flags); \ } while (0) #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock) #define raw_spin_trylock_bh(lock) \ __cond_lock(lock, _raw_spin_trylock_bh(lock)) #define raw_spin_trylock_irq(lock) \ ({ \ local_irq_disable(); \ raw_spin_trylock(lock) ? \ 1 : ({ local_irq_enable(); 0; }); \ }) #define raw_spin_trylock_irqsave(lock, flags) \ ({ \ local_irq_save(flags); \ raw_spin_trylock(lock) ? \ 1 : ({ local_irq_restore(flags); 0; }); \ }) /* Include rwlock functions */ #include <linux/rwlock.h> /* * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: */ #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) # include <linux/spinlock_api_smp.h> #else # include <linux/spinlock_api_up.h> #endif /* * Map the spin_lock functions to the raw variants for PREEMPT_RT=n */ static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock) { return &lock->rlock; } #define spin_lock_init(_lock) \ do { \ spinlock_check(_lock); \ raw_spin_lock_init(&(_lock)->rlock); \ } while (0) static __always_inline void spin_lock(spinlock_t *lock) { raw_spin_lock(&lock->rlock); } static __always_inline void spin_lock_bh(spinlock_t *lock) { raw_spin_lock_bh(&lock->rlock); } static __always_inline int spin_trylock(spinlock_t *lock) { return raw_spin_trylock(&lock->rlock); } #define spin_lock_nested(lock, subclass) \ do { \ raw_spin_lock_nested(spinlock_check(lock), subclass); \ } while (0) #define spin_lock_nest_lock(lock, nest_lock) \ do { \ raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ } while (0) static __always_inline void spin_lock_irq(spinlock_t *lock) { raw_spin_lock_irq(&lock->rlock); } #define spin_lock_irqsave(lock, flags) \ do { \ raw_spin_lock_irqsave(spinlock_check(lock), flags); \ } while (0) #define spin_lock_irqsave_nested(lock, flags, subclass) \ do { \ raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ } while (0) static __always_inline void spin_unlock(spinlock_t *lock) { raw_spin_unlock(&lock->rlock); } static __always_inline void spin_unlock_bh(spinlock_t *lock) { raw_spin_unlock_bh(&lock->rlock); } static __always_inline void spin_unlock_irq(spinlock_t *lock) { raw_spin_unlock_irq(&lock->rlock); } static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) { raw_spin_unlock_irqrestore(&lock->rlock, flags); } static __always_inline int spin_trylock_bh(spinlock_t *lock) { return raw_spin_trylock_bh(&lock->rlock); } static __always_inline int spin_trylock_irq(spinlock_t *lock) { return raw_spin_trylock_irq(&lock->rlock); } #define spin_trylock_irqsave(lock, flags) \ ({ \ raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ }) /** * spin_is_locked() - Check whether a spinlock is locked. * @lock: Pointer to the spinlock. * * This function is NOT required to provide any memory ordering * guarantees; it could be used for debugging purposes or, when * additional synchronization is needed, accompanied with other * constructs (memory barriers) enforcing the synchronization. * * Returns: 1 if @lock is locked, 0 otherwise. * * Note that the function only tells you that the spinlock is * seen to be locked, not that it is locked on your CPU. * * Further, on CONFIG_SMP=n builds with CONFIG_DEBUG_SPINLOCK=n, * the return value is always 0 (see include/linux/spinlock_up.h). * Therefore you should not rely heavily on the return value. */ static __always_inline int spin_is_locked(spinlock_t *lock) { return raw_spin_is_locked(&lock->rlock); } static __always_inline int spin_is_contended(spinlock_t *lock) { return raw_spin_is_contended(&lock->rlock); } #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) /* * Pull the atomic_t declaration: * (asm-mips/atomic.h needs above definitions) */ #include <linux/atomic.h> /** * atomic_dec_and_lock - lock on reaching reference count zero * @atomic: the atomic counter * @lock: the spinlock in question * * Decrements @atomic by 1. If the result is 0, returns true and locks * @lock. Returns false for all other cases. */ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); #define atomic_dec_and_lock(atomic, lock) \ __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock, unsigned long *flags); #define atomic_dec_and_lock_irqsave(atomic, lock, flags) \ __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags))) int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask, size_t max_size, unsigned int cpu_mult, gfp_t gfp, const char *name, struct lock_class_key *key); #define alloc_bucket_spinlocks(locks, lock_mask, max_size, cpu_mult, gfp) \ ({ \ static struct lock_class_key key; \ int ret; \ \ ret = __alloc_bucket_spinlocks(locks, lock_mask, max_size, \ cpu_mult, gfp, #locks, &key); \ ret; \ }) void free_bucket_spinlocks(spinlock_t *locks); #endif /* __LINUX_SPINLOCK_H */ fbcon.h 0000644 00000004130 14722070374 0006010 0 ustar 00 #ifndef _LINUX_FBCON_H #define _LINUX_FBCON_H #ifdef CONFIG_FRAMEBUFFER_CONSOLE void __init fb_console_init(void); void __exit fb_console_exit(void); int fbcon_fb_registered(struct fb_info *info); void fbcon_fb_unregistered(struct fb_info *info); void fbcon_fb_unbind(struct fb_info *info); void fbcon_suspended(struct fb_info *info); void fbcon_resumed(struct fb_info *info); int fbcon_mode_deleted(struct fb_info *info, struct fb_videomode *mode); void fbcon_new_modelist(struct fb_info *info); void fbcon_get_requirement(struct fb_info *info, struct fb_blit_caps *caps); void fbcon_fb_blanked(struct fb_info *info, int blank); int fbcon_modechange_possible(struct fb_info *info, struct fb_var_screeninfo *var); void fbcon_update_vcs(struct fb_info *info, bool all); void fbcon_remap_all(struct fb_info *info); int fbcon_set_con2fb_map_ioctl(void __user *argp); int fbcon_get_con2fb_map_ioctl(void __user *argp); #else static inline void fb_console_init(void) {} static inline void fb_console_exit(void) {} static inline int fbcon_fb_registered(struct fb_info *info) { return 0; } static inline void fbcon_fb_unregistered(struct fb_info *info) {} static inline void fbcon_fb_unbind(struct fb_info *info) {} static inline void fbcon_suspended(struct fb_info *info) {} static inline void fbcon_resumed(struct fb_info *info) {} static inline int fbcon_mode_deleted(struct fb_info *info, struct fb_videomode *mode) { return 0; } static inline void fbcon_new_modelist(struct fb_info *info) {} static inline void fbcon_get_requirement(struct fb_info *info, struct fb_blit_caps *caps) {} static inline void fbcon_fb_blanked(struct fb_info *info, int blank) {} static inline int fbcon_modechange_possible(struct fb_info *info, struct fb_var_screeninfo *var) { return 0; } static inline void fbcon_update_vcs(struct fb_info *info, bool all) {} static inline void fbcon_remap_all(struct fb_info *info) {} static inline int fbcon_set_con2fb_map_ioctl(void __user *argp) { return 0; } static inline int fbcon_get_con2fb_map_ioctl(void __user *argp) { return 0; } #endif #endif /* _LINUX_FBCON_H */ btf.h 0000644 00000004505 14722070374 0005502 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2018 Facebook */ #ifndef _LINUX_BTF_H #define _LINUX_BTF_H 1 #include <linux/types.h> struct btf; struct btf_member; struct btf_type; union bpf_attr; extern const struct file_operations btf_fops; void btf_put(struct btf *btf); int btf_new_fd(const union bpf_attr *attr); struct btf *btf_get_by_fd(int fd); int btf_get_info_by_fd(const struct btf *btf, const union bpf_attr *attr, union bpf_attr __user *uattr); /* Figure out the size of a type_id. If type_id is a modifier * (e.g. const), it will be resolved to find out the type with size. * * For example: * In describing "const void *", type_id is "const" and "const" * refers to "void *". The return type will be "void *". * * If type_id is a simple "int", then return type will be "int". * * @btf: struct btf object * @type_id: Find out the size of type_id. The type_id of the return * type is set to *type_id. * @ret_size: It can be NULL. If not NULL, the size of the return * type is set to *ret_size. * Return: The btf_type (resolved to another type with size info if needed). * NULL is returned if type_id itself does not have size info * (e.g. void) or it cannot be resolved to another type that * has size info. * *type_id and *ret_size will not be changed in the * NULL return case. */ const struct btf_type *btf_type_id_size(const struct btf *btf, u32 *type_id, u32 *ret_size); void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj, struct seq_file *m); int btf_get_fd_by_id(u32 id); u32 btf_id(const struct btf *btf); bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s, const struct btf_member *m, u32 expected_offset, u32 expected_size); int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t); bool btf_type_is_void(const struct btf_type *t); #ifdef CONFIG_BPF_SYSCALL const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id); const char *btf_name_by_offset(const struct btf *btf, u32 offset); #else static inline const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id) { return NULL; } static inline const char *btf_name_by_offset(const struct btf *btf, u32 offset) { return NULL; } #endif #endif nvme-fc-driver.h 0000644 00000112272 14722070374 0007554 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2016, Avago Technologies */ #ifndef _NVME_FC_DRIVER_H #define _NVME_FC_DRIVER_H 1 #include <linux/scatterlist.h> /* * ********************** LLDD FC-NVME Host API ******************** * * For FC LLDD's that are the NVME Host role. * * ****************************************************************** */ /** * struct nvme_fc_port_info - port-specific ids and FC connection-specific * data element used during NVME Host role * registrations * * Static fields describing the port being registered: * @node_name: FC WWNN for the port * @port_name: FC WWPN for the port * @port_role: What NVME roles are supported (see FC_PORT_ROLE_xxx) * @dev_loss_tmo: maximum delay for reconnects to an association on * this device. Used only on a remoteport. * * Initialization values for dynamic port fields: * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must * be set to 0. */ struct nvme_fc_port_info { u64 node_name; u64 port_name; u32 port_role; u32 port_id; u32 dev_loss_tmo; }; /** * struct nvmefc_ls_req - Request structure passed from NVME-FC transport * to LLDD in order to perform a NVME FC-4 LS * request and obtain a response. * * Values set by the NVME-FC layer prior to calling the LLDD ls_req * entrypoint. * @rqstaddr: pointer to request buffer * @rqstdma: PCI DMA address of request buffer * @rqstlen: Length, in bytes, of request buffer * @rspaddr: pointer to response buffer * @rspdma: PCI DMA address of response buffer * @rsplen: Length, in bytes, of response buffer * @timeout: Maximum amount of time, in seconds, to wait for the LS response. * If timeout exceeded, LLDD to abort LS exchange and complete * LS request with error status. * @private: pointer to memory allocated alongside the ls request structure * that is specifically for the LLDD to use while processing the * request. The length of the buffer corresponds to the * lsrqst_priv_sz value specified in the nvme_fc_port_template * supplied by the LLDD. * @done: The callback routine the LLDD is to invoke upon completion of * the LS request. req argument is the pointer to the original LS * request structure. Status argument must be 0 upon success, a * negative errno on failure (example: -ENXIO). */ struct nvmefc_ls_req { void *rqstaddr; dma_addr_t rqstdma; u32 rqstlen; void *rspaddr; dma_addr_t rspdma; u32 rsplen; u32 timeout; void *private; void (*done)(struct nvmefc_ls_req *req, int status); } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ enum nvmefc_fcp_datadir { NVMEFC_FCP_NODATA, /* payload_length and sg_cnt will be zero */ NVMEFC_FCP_WRITE, NVMEFC_FCP_READ, }; /** * struct nvmefc_fcp_req - Request structure passed from NVME-FC transport * to LLDD in order to perform a NVME FCP IO operation. * * Values set by the NVME-FC layer prior to calling the LLDD fcp_io * entrypoint. * @cmdaddr: pointer to the FCP CMD IU buffer * @rspaddr: pointer to the FCP RSP IU buffer * @cmddma: PCI DMA address of the FCP CMD IU buffer * @rspdma: PCI DMA address of the FCP RSP IU buffer * @cmdlen: Length, in bytes, of the FCP CMD IU buffer * @rsplen: Length, in bytes, of the FCP RSP IU buffer * @payload_length: Length of DATA_IN or DATA_OUT payload data to transfer * @sg_table: scatter/gather structure for payload data * @first_sgl: memory for 1st scatter/gather list segment for payload data * @sg_cnt: number of elements in the scatter/gather list * @io_dir: direction of the FCP request (see NVMEFC_FCP_xxx) * @sqid: The nvme SQID the command is being issued on * @done: The callback routine the LLDD is to invoke upon completion of * the FCP operation. req argument is the pointer to the original * FCP IO operation. * @private: pointer to memory allocated alongside the FCP operation * request structure that is specifically for the LLDD to use * while processing the operation. The length of the buffer * corresponds to the fcprqst_priv_sz value specified in the * nvme_fc_port_template supplied by the LLDD. * * Values set by the LLDD indicating completion status of the FCP operation. * Must be set prior to calling the done() callback. * @transferred_length: amount of payload data, in bytes, that were * transferred. Should equal payload_length on success. * @rcv_rsplen: length, in bytes, of the FCP RSP IU received. * @status: Completion status of the FCP operation. must be 0 upon success, * negative errno value upon failure (ex: -EIO). Note: this is * NOT a reflection of the NVME CQE completion status. Only the * status of the FCP operation at the NVME-FC level. */ struct nvmefc_fcp_req { void *cmdaddr; void *rspaddr; dma_addr_t cmddma; dma_addr_t rspdma; u16 cmdlen; u16 rsplen; u32 payload_length; struct sg_table sg_table; struct scatterlist *first_sgl; int sg_cnt; enum nvmefc_fcp_datadir io_dir; __le16 sqid; void (*done)(struct nvmefc_fcp_req *req); void *private; u32 transferred_length; u16 rcv_rsplen; u32 status; } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ /* * Direct copy of fc_port_state enum. For later merging */ enum nvme_fc_obj_state { FC_OBJSTATE_UNKNOWN, FC_OBJSTATE_NOTPRESENT, FC_OBJSTATE_ONLINE, FC_OBJSTATE_OFFLINE, /* User has taken Port Offline */ FC_OBJSTATE_BLOCKED, FC_OBJSTATE_BYPASSED, FC_OBJSTATE_DIAGNOSTICS, FC_OBJSTATE_LINKDOWN, FC_OBJSTATE_ERROR, FC_OBJSTATE_LOOPBACK, FC_OBJSTATE_DELETED, }; /** * struct nvme_fc_local_port - structure used between NVME-FC transport and * a LLDD to reference a local NVME host port. * Allocated/created by the nvme_fc_register_localport() * transport interface. * * Fields with static values for the port. Initialized by the * port_info struct supplied to the registration call. * @port_num: NVME-FC transport host port number * @port_role: NVME roles are supported on the port (see FC_PORT_ROLE_xxx) * @node_name: FC WWNN for the port * @port_name: FC WWPN for the port * @private: pointer to memory allocated alongside the local port * structure that is specifically for the LLDD to use. * The length of the buffer corresponds to the local_priv_sz * value specified in the nvme_fc_port_template supplied by * the LLDD. * @dev_loss_tmo: maximum delay for reconnects to an association on * this device. To modify, lldd must call * nvme_fc_set_remoteport_devloss(). * * Fields with dynamic values. Values may change base on link state. LLDD * may reference fields directly to change them. Initialized by the * port_info struct supplied to the registration call. * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must * be set to 0. * @port_state: Operational state of the port. */ struct nvme_fc_local_port { /* static/read-only fields */ u32 port_num; u32 port_role; u64 node_name; u64 port_name; void *private; /* dynamic fields */ u32 port_id; enum nvme_fc_obj_state port_state; } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ /** * struct nvme_fc_remote_port - structure used between NVME-FC transport and * a LLDD to reference a remote NVME subsystem port. * Allocated/created by the nvme_fc_register_remoteport() * transport interface. * * Fields with static values for the port. Initialized by the * port_info struct supplied to the registration call. * @port_num: NVME-FC transport remote subsystem port number * @port_role: NVME roles are supported on the port (see FC_PORT_ROLE_xxx) * @node_name: FC WWNN for the port * @port_name: FC WWPN for the port * @localport: pointer to the NVME-FC local host port the subsystem is * connected to. * @private: pointer to memory allocated alongside the remote port * structure that is specifically for the LLDD to use. * The length of the buffer corresponds to the remote_priv_sz * value specified in the nvme_fc_port_template supplied by * the LLDD. * * Fields with dynamic values. Values may change base on link or login * state. LLDD may reference fields directly to change them. Initialized by * the port_info struct supplied to the registration call. * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must * be set to 0. * @port_state: Operational state of the remote port. Valid values are * ONLINE or UNKNOWN. */ struct nvme_fc_remote_port { /* static fields */ u32 port_num; u32 port_role; u64 node_name; u64 port_name; struct nvme_fc_local_port *localport; void *private; u32 dev_loss_tmo; /* dynamic fields */ u32 port_id; enum nvme_fc_obj_state port_state; } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ /** * struct nvme_fc_port_template - structure containing static entrypoints and * operational parameters for an LLDD that supports NVME host * behavior. Passed by reference in port registrations. * NVME-FC transport remembers template reference and may * access it during runtime operation. * * Host/Initiator Transport Entrypoints/Parameters: * * @localport_delete: The LLDD initiates deletion of a localport via * nvme_fc_deregister_localport(). However, the teardown is * asynchronous. This routine is called upon the completion of the * teardown to inform the LLDD that the localport has been deleted. * Entrypoint is Mandatory. * * @remoteport_delete: The LLDD initiates deletion of a remoteport via * nvme_fc_deregister_remoteport(). However, the teardown is * asynchronous. This routine is called upon the completion of the * teardown to inform the LLDD that the remoteport has been deleted. * Entrypoint is Mandatory. * * @create_queue: Upon creating a host<->controller association, queues are * created such that they can be affinitized to cpus/cores. This * callback into the LLDD to notify that a controller queue is being * created. The LLDD may choose to allocate an associated hw queue * or map it onto a shared hw queue. Upon return from the call, the * LLDD specifies a handle that will be given back to it for any * command that is posted to the controller queue. The handle can * be used by the LLDD to map quickly to the proper hw queue for * command execution. The mask of cpu's that will map to this queue * at the block-level is also passed in. The LLDD should use the * queue id and/or cpu masks to ensure proper affinitization of the * controller queue to the hw queue. * Entrypoint is Optional. * * @delete_queue: This is the inverse of the crete_queue. During * host<->controller association teardown, this routine is called * when a controller queue is being terminated. Any association with * a hw queue should be termined. If there is a unique hw queue, the * hw queue should be torn down. * Entrypoint is Optional. * * @poll_queue: Called to poll for the completion of an io on a blk queue. * Entrypoint is Optional. * * @ls_req: Called to issue a FC-NVME FC-4 LS service request. * The nvme_fc_ls_req structure will fully describe the buffers for * the request payload and where to place the response payload. The * LLDD is to allocate an exchange, issue the LS request, obtain the * LS response, and call the "done" routine specified in the request * structure (argument to done is the ls request structure itself). * Entrypoint is Mandatory. * * @fcp_io: called to issue a FC-NVME I/O request. The I/O may be for * an admin queue or an i/o queue. The nvmefc_fcp_req structure will * fully describe the io: the buffer containing the FC-NVME CMD IU * (which contains the SQE), the sg list for the payload if applicable, * and the buffer to place the FC-NVME RSP IU into. The LLDD will * complete the i/o, indicating the amount of data transferred or * any transport error, and call the "done" routine specified in the * request structure (argument to done is the fcp request structure * itself). * Entrypoint is Mandatory. * * @ls_abort: called to request the LLDD to abort the indicated ls request. * The call may return before the abort has completed. After aborting * the request, the LLDD must still call the ls request done routine * indicating an FC transport Aborted status. * Entrypoint is Mandatory. * * @fcp_abort: called to request the LLDD to abort the indicated fcp request. * The call may return before the abort has completed. After aborting * the request, the LLDD must still call the fcp request done routine * indicating an FC transport Aborted status. * Entrypoint is Mandatory. * * @max_hw_queues: indicates the maximum number of hw queues the LLDD * supports for cpu affinitization. * Value is Mandatory. Must be at least 1. * * @max_sgl_segments: indicates the maximum number of sgl segments supported * by the LLDD * Value is Mandatory. Must be at least 1. Recommend at least 256. * * @max_dif_sgl_segments: indicates the maximum number of sgl segments * supported by the LLDD for DIF operations. * Value is Mandatory. Must be at least 1. Recommend at least 256. * * @dma_boundary: indicates the dma address boundary where dma mappings * will be split across. * Value is Mandatory. Typical value is 0xFFFFFFFF to split across * 4Gig address boundarys * * @local_priv_sz: The LLDD sets this field to the amount of additional * memory that it would like fc nvme layer to allocate on the LLDD's * behalf whenever a localport is allocated. The additional memory * area solely for the of the LLDD and its location is specified by * the localport->private pointer. * Value is Mandatory. Allowed to be zero. * * @remote_priv_sz: The LLDD sets this field to the amount of additional * memory that it would like fc nvme layer to allocate on the LLDD's * behalf whenever a remoteport is allocated. The additional memory * area solely for the of the LLDD and its location is specified by * the remoteport->private pointer. * Value is Mandatory. Allowed to be zero. * * @lsrqst_priv_sz: The LLDD sets this field to the amount of additional * memory that it would like fc nvme layer to allocate on the LLDD's * behalf whenever a ls request structure is allocated. The additional * memory area solely for the of the LLDD and its location is * specified by the ls_request->private pointer. * Value is Mandatory. Allowed to be zero. * * @fcprqst_priv_sz: The LLDD sets this field to the amount of additional * memory that it would like fc nvme layer to allocate on the LLDD's * behalf whenever a fcp request structure is allocated. The additional * memory area solely for the of the LLDD and its location is * specified by the fcp_request->private pointer. * Value is Mandatory. Allowed to be zero. */ struct nvme_fc_port_template { /* initiator-based functions */ void (*localport_delete)(struct nvme_fc_local_port *); void (*remoteport_delete)(struct nvme_fc_remote_port *); int (*create_queue)(struct nvme_fc_local_port *, unsigned int qidx, u16 qsize, void **handle); void (*delete_queue)(struct nvme_fc_local_port *, unsigned int qidx, void *handle); int (*ls_req)(struct nvme_fc_local_port *, struct nvme_fc_remote_port *, struct nvmefc_ls_req *); int (*fcp_io)(struct nvme_fc_local_port *, struct nvme_fc_remote_port *, void *hw_queue_handle, struct nvmefc_fcp_req *); void (*ls_abort)(struct nvme_fc_local_port *, struct nvme_fc_remote_port *, struct nvmefc_ls_req *); void (*fcp_abort)(struct nvme_fc_local_port *, struct nvme_fc_remote_port *, void *hw_queue_handle, struct nvmefc_fcp_req *); u32 max_hw_queues; u16 max_sgl_segments; u16 max_dif_sgl_segments; u64 dma_boundary; /* sizes of additional private data for data structures */ u32 local_priv_sz; u32 remote_priv_sz; u32 lsrqst_priv_sz; u32 fcprqst_priv_sz; }; /* * Initiator/Host functions */ int nvme_fc_register_localport(struct nvme_fc_port_info *pinfo, struct nvme_fc_port_template *template, struct device *dev, struct nvme_fc_local_port **lport_p); int nvme_fc_unregister_localport(struct nvme_fc_local_port *localport); int nvme_fc_register_remoteport(struct nvme_fc_local_port *localport, struct nvme_fc_port_info *pinfo, struct nvme_fc_remote_port **rport_p); int nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *remoteport); void nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport); int nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *remoteport, u32 dev_loss_tmo); /* * *************** LLDD FC-NVME Target/Subsystem API *************** * * For FC LLDD's that are the NVME Subsystem role * * ****************************************************************** */ /** * struct nvmet_fc_port_info - port-specific ids and FC connection-specific * data element used during NVME Subsystem role * registrations * * Static fields describing the port being registered: * @node_name: FC WWNN for the port * @port_name: FC WWPN for the port * * Initialization values for dynamic port fields: * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must * be set to 0. */ struct nvmet_fc_port_info { u64 node_name; u64 port_name; u32 port_id; }; /** * struct nvmefc_tgt_ls_req - Structure used between LLDD and NVMET-FC * layer to represent the exchange context for * a FC-NVME Link Service (LS). * * The structure is allocated by the LLDD whenever a LS Request is received * from the FC link. The address of the structure is passed to the nvmet-fc * layer via the nvmet_fc_rcv_ls_req() call. The address of the structure * will be passed back to the LLDD when the response is to be transmit. * The LLDD is to use the address to map back to the LLDD exchange structure * which maintains information such as the targetport the LS was received * on, the remote FC NVME initiator that sent the LS, and any FC exchange * context. Upon completion of the LS response transmit, the address of the * structure will be passed back to the LS rsp done() routine, allowing the * nvmet-fc layer to release dma resources. Upon completion of the done() * routine, no further access will be made by the nvmet-fc layer and the * LLDD can de-allocate the structure. * * Field initialization: * At the time of the nvmet_fc_rcv_ls_req() call, there is no content that * is valid in the structure. * * When the structure is used for the LLDD->xmt_ls_rsp() call, the nvmet-fc * layer will fully set the fields in order to specify the response * payload buffer and its length as well as the done routine to be called * upon compeletion of the transmit. The nvmet-fc layer will also set a * private pointer for its own use in the done routine. * * Values set by the NVMET-FC layer prior to calling the LLDD xmt_ls_rsp * entrypoint. * @rspbuf: pointer to the LS response buffer * @rspdma: PCI DMA address of the LS response buffer * @rsplen: Length, in bytes, of the LS response buffer * @done: The callback routine the LLDD is to invoke upon completion of * transmitting the LS response. req argument is the pointer to * the original ls request. * @nvmet_fc_private: pointer to an internal NVMET-FC layer structure used * as part of the NVMET-FC processing. The LLDD is not to access * this pointer. */ struct nvmefc_tgt_ls_req { void *rspbuf; dma_addr_t rspdma; u16 rsplen; void (*done)(struct nvmefc_tgt_ls_req *req); void *nvmet_fc_private; /* LLDD is not to access !! */ }; /* Operations that NVME-FC layer may request the LLDD to perform for FCP */ enum { NVMET_FCOP_READDATA = 1, /* xmt data to initiator */ NVMET_FCOP_WRITEDATA = 2, /* xmt data from initiator */ NVMET_FCOP_READDATA_RSP = 3, /* xmt data to initiator and send * rsp as well */ NVMET_FCOP_RSP = 4, /* send rsp frame */ }; /** * struct nvmefc_tgt_fcp_req - Structure used between LLDD and NVMET-FC * layer to represent the exchange context and * the specific FC-NVME IU operation(s) to perform * for a FC-NVME FCP IO. * * Structure used between LLDD and nvmet-fc layer to represent the exchange * context for a FC-NVME FCP I/O operation (e.g. a nvme sqe, the sqe-related * memory transfers, and its assocated cqe transfer). * * The structure is allocated by the LLDD whenever a FCP CMD IU is received * from the FC link. The address of the structure is passed to the nvmet-fc * layer via the nvmet_fc_rcv_fcp_req() call. The address of the structure * will be passed back to the LLDD for the data operations and transmit of * the response. The LLDD is to use the address to map back to the LLDD * exchange structure which maintains information such as the targetport * the FCP I/O was received on, the remote FC NVME initiator that sent the * FCP I/O, and any FC exchange context. Upon completion of the FCP target * operation, the address of the structure will be passed back to the FCP * op done() routine, allowing the nvmet-fc layer to release dma resources. * Upon completion of the done() routine for either RSP or ABORT ops, no * further access will be made by the nvmet-fc layer and the LLDD can * de-allocate the structure. * * Field initialization: * At the time of the nvmet_fc_rcv_fcp_req() call, there is no content that * is valid in the structure. * * When the structure is used for an FCP target operation, the nvmet-fc * layer will fully set the fields in order to specify the scattergather * list, the transfer length, as well as the done routine to be called * upon compeletion of the operation. The nvmet-fc layer will also set a * private pointer for its own use in the done routine. * * Values set by the NVMET-FC layer prior to calling the LLDD fcp_op * entrypoint. * @op: Indicates the FCP IU operation to perform (see NVMET_FCOP_xxx) * @hwqid: Specifies the hw queue index (0..N-1, where N is the * max_hw_queues value from the LLD's nvmet_fc_target_template) * that the operation is to use. * @offset: Indicates the DATA_OUT/DATA_IN payload offset to be tranferred. * Field is only valid on WRITEDATA, READDATA, or READDATA_RSP ops. * @timeout: amount of time, in seconds, to wait for a response from the NVME * host. A value of 0 is an infinite wait. * Valid only for the following ops: * WRITEDATA: caps the wait for data reception * READDATA_RSP & RSP: caps wait for FCP_CONF reception (if used) * @transfer_length: the length, in bytes, of the DATA_OUT or DATA_IN payload * that is to be transferred. * Valid only for the WRITEDATA, READDATA, or READDATA_RSP ops. * @ba_rjt: Contains the BA_RJT payload that is to be transferred. * Valid only for the NVMET_FCOP_BA_RJT op. * @sg: Scatter/gather list for the DATA_OUT/DATA_IN payload data. * Valid only for the WRITEDATA, READDATA, or READDATA_RSP ops. * @sg_cnt: Number of valid entries in the scatter/gather list. * Valid only for the WRITEDATA, READDATA, or READDATA_RSP ops. * @rspaddr: pointer to the FCP RSP IU buffer to be transmit * Used by RSP and READDATA_RSP ops * @rspdma: PCI DMA address of the FCP RSP IU buffer * Used by RSP and READDATA_RSP ops * @rsplen: Length, in bytes, of the FCP RSP IU buffer * Used by RSP and READDATA_RSP ops * @done: The callback routine the LLDD is to invoke upon completion of * the operation. req argument is the pointer to the original * FCP subsystem op request. * @nvmet_fc_private: pointer to an internal NVMET-FC layer structure used * as part of the NVMET-FC processing. The LLDD is not to * reference this field. * * Values set by the LLDD indicating completion status of the FCP operation. * Must be set prior to calling the done() callback. * @transferred_length: amount of DATA_OUT payload data received by a * a WRITEDATA operation. If not a WRITEDATA operation, value must * be set to 0. Should equal transfer_length on success. * @fcp_error: status of the FCP operation. Must be 0 on success; on failure * must be a NVME_SC_FC_xxxx value. */ struct nvmefc_tgt_fcp_req { u8 op; u16 hwqid; u32 offset; u32 timeout; u32 transfer_length; struct fc_ba_rjt ba_rjt; struct scatterlist *sg; int sg_cnt; void *rspaddr; dma_addr_t rspdma; u16 rsplen; void (*done)(struct nvmefc_tgt_fcp_req *); void *nvmet_fc_private; /* LLDD is not to access !! */ u32 transferred_length; int fcp_error; }; /* Target Features (Bit fields) LLDD supports */ enum { NVMET_FCTGTFEAT_READDATA_RSP = (1 << 0), /* Bit 0: supports the NVMET_FCPOP_READDATA_RSP op, which * sends (the last) Read Data sequence followed by the RSP * sequence in one LLDD operation. Errors during Data * sequence transmit must not allow RSP sequence to be sent. */ }; /** * struct nvmet_fc_target_port - structure used between NVME-FC transport and * a LLDD to reference a local NVME subsystem port. * Allocated/created by the nvme_fc_register_targetport() * transport interface. * * Fields with static values for the port. Initialized by the * port_info struct supplied to the registration call. * @port_num: NVME-FC transport subsytem port number * @node_name: FC WWNN for the port * @port_name: FC WWPN for the port * @private: pointer to memory allocated alongside the local port * structure that is specifically for the LLDD to use. * The length of the buffer corresponds to the target_priv_sz * value specified in the nvme_fc_target_template supplied by * the LLDD. * * Fields with dynamic values. Values may change base on link state. LLDD * may reference fields directly to change them. Initialized by the * port_info struct supplied to the registration call. * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must * be set to 0. * @port_state: Operational state of the port. */ struct nvmet_fc_target_port { /* static/read-only fields */ u32 port_num; u64 node_name; u64 port_name; void *private; /* dynamic fields */ u32 port_id; enum nvme_fc_obj_state port_state; } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ /** * struct nvmet_fc_target_template - structure containing static entrypoints * and operational parameters for an LLDD that supports NVME * subsystem behavior. Passed by reference in port * registrations. NVME-FC transport remembers template * reference and may access it during runtime operation. * * Subsystem/Target Transport Entrypoints/Parameters: * * @targetport_delete: The LLDD initiates deletion of a targetport via * nvmet_fc_unregister_targetport(). However, the teardown is * asynchronous. This routine is called upon the completion of the * teardown to inform the LLDD that the targetport has been deleted. * Entrypoint is Mandatory. * * @xmt_ls_rsp: Called to transmit the response to a FC-NVME FC-4 LS service. * The nvmefc_tgt_ls_req structure is the same LLDD-supplied exchange * structure specified in the nvmet_fc_rcv_ls_req() call made when * the LS request was received. The structure will fully describe * the buffers for the response payload and the dma address of the * payload. The LLDD is to transmit the response (or return a non-zero * errno status), and upon completion of the transmit, call the * "done" routine specified in the nvmefc_tgt_ls_req structure * (argument to done is the ls reqwuest structure itself). * After calling the done routine, the LLDD shall consider the * LS handling complete and the nvmefc_tgt_ls_req structure may * be freed/released. * Entrypoint is Mandatory. * * @fcp_op: Called to perform a data transfer or transmit a response. * The nvmefc_tgt_fcp_req structure is the same LLDD-supplied * exchange structure specified in the nvmet_fc_rcv_fcp_req() call * made when the FCP CMD IU was received. The op field in the * structure shall indicate the operation for the LLDD to perform * relative to the io. * NVMET_FCOP_READDATA operation: the LLDD is to send the * payload data (described by sglist) to the host in 1 or * more FC sequences (preferrably 1). Note: the fc-nvme layer * may call the READDATA operation multiple times for longer * payloads. * NVMET_FCOP_WRITEDATA operation: the LLDD is to receive the * payload data (described by sglist) from the host via 1 or * more FC sequences (preferrably 1). The LLDD is to generate * the XFER_RDY IU(s) corresponding to the data being requested. * Note: the FC-NVME layer may call the WRITEDATA operation * multiple times for longer payloads. * NVMET_FCOP_READDATA_RSP operation: the LLDD is to send the * payload data (described by sglist) to the host in 1 or * more FC sequences (preferrably 1). If an error occurs during * payload data transmission, the LLDD is to set the * nvmefc_tgt_fcp_req fcp_error and transferred_length field, then * consider the operation complete. On error, the LLDD is to not * transmit the FCP_RSP iu. If all payload data is transferred * successfully, the LLDD is to update the nvmefc_tgt_fcp_req * transferred_length field and may subsequently transmit the * FCP_RSP iu payload (described by rspbuf, rspdma, rsplen). * If FCP_CONF is supported, the LLDD is to await FCP_CONF * reception to confirm the RSP reception by the host. The LLDD * may retramsit the FCP_RSP iu if necessary per FC-NVME. Upon * transmission of the FCP_RSP iu if FCP_CONF is not supported, * or upon success/failure of FCP_CONF if it is supported, the * LLDD is to set the nvmefc_tgt_fcp_req fcp_error field and * consider the operation complete. * NVMET_FCOP_RSP: the LLDD is to transmit the FCP_RSP iu payload * (described by rspbuf, rspdma, rsplen). If FCP_CONF is * supported, the LLDD is to await FCP_CONF reception to confirm * the RSP reception by the host. The LLDD may retramsit the * FCP_RSP iu if FCP_CONF is not received per FC-NVME. Upon * transmission of the FCP_RSP iu if FCP_CONF is not supported, * or upon success/failure of FCP_CONF if it is supported, the * LLDD is to set the nvmefc_tgt_fcp_req fcp_error field and * consider the operation complete. * Upon completing the indicated operation, the LLDD is to set the * status fields for the operation (tranferred_length and fcp_error * status) in the request, then call the "done" routine * indicated in the fcp request. After the operation completes, * regardless of whether the FCP_RSP iu was successfully transmit, * the LLDD-supplied exchange structure must remain valid until the * transport calls the fcp_req_release() callback to return ownership * of the exchange structure back to the LLDD so that it may be used * for another fcp command. * Note: when calling the done routine for READDATA or WRITEDATA * operations, the fc-nvme layer may immediate convert, in the same * thread and before returning to the LLDD, the fcp operation to * the next operation for the fcp io and call the LLDDs fcp_op * call again. If fields in the fcp request are to be accessed post * the done call, the LLDD should save their values prior to calling * the done routine, and inspect the save values after the done * routine. * Returns 0 on success, -<errno> on failure (Ex: -EIO) * Entrypoint is Mandatory. * * @fcp_abort: Called by the transport to abort an active command. * The command may be in-between operations (nothing active in LLDD) * or may have an active WRITEDATA operation pending. The LLDD is to * initiate the ABTS process for the command and return from the * callback. The ABTS does not need to be complete on the command. * The fcp_abort callback inherently cannot fail. After the * fcp_abort() callback completes, the transport will wait for any * outstanding operation (if there was one) to complete, then will * call the fcp_req_release() callback to return the command's * exchange context back to the LLDD. * Entrypoint is Mandatory. * * @fcp_req_release: Called by the transport to return a nvmefc_tgt_fcp_req * to the LLDD after all operations on the fcp operation are complete. * This may be due to the command completing or upon completion of * abort cleanup. * Entrypoint is Mandatory. * * @defer_rcv: Called by the transport to signal the LLLD that it has * begun processing of a previously received NVME CMD IU. The LLDD * is now free to re-use the rcv buffer associated with the * nvmefc_tgt_fcp_req. * Entrypoint is Optional. * * @discovery_event: Called by the transport to generate an RSCN * change notifications to NVME initiators. The RSCN notifications * should cause the initiator to rescan the discovery controller * on the targetport. * * @max_hw_queues: indicates the maximum number of hw queues the LLDD * supports for cpu affinitization. * Value is Mandatory. Must be at least 1. * * @max_sgl_segments: indicates the maximum number of sgl segments supported * by the LLDD * Value is Mandatory. Must be at least 1. Recommend at least 256. * * @max_dif_sgl_segments: indicates the maximum number of sgl segments * supported by the LLDD for DIF operations. * Value is Mandatory. Must be at least 1. Recommend at least 256. * * @dma_boundary: indicates the dma address boundary where dma mappings * will be split across. * Value is Mandatory. Typical value is 0xFFFFFFFF to split across * 4Gig address boundarys * * @target_features: The LLDD sets bits in this field to correspond to * optional features that are supported by the LLDD. * Refer to the NVMET_FCTGTFEAT_xxx values. * Value is Mandatory. Allowed to be zero. * * @target_priv_sz: The LLDD sets this field to the amount of additional * memory that it would like fc nvme layer to allocate on the LLDD's * behalf whenever a targetport is allocated. The additional memory * area solely for the of the LLDD and its location is specified by * the targetport->private pointer. * Value is Mandatory. Allowed to be zero. */ struct nvmet_fc_target_template { void (*targetport_delete)(struct nvmet_fc_target_port *tgtport); int (*xmt_ls_rsp)(struct nvmet_fc_target_port *tgtport, struct nvmefc_tgt_ls_req *tls_req); int (*fcp_op)(struct nvmet_fc_target_port *tgtport, struct nvmefc_tgt_fcp_req *fcpreq); void (*fcp_abort)(struct nvmet_fc_target_port *tgtport, struct nvmefc_tgt_fcp_req *fcpreq); void (*fcp_req_release)(struct nvmet_fc_target_port *tgtport, struct nvmefc_tgt_fcp_req *fcpreq); void (*defer_rcv)(struct nvmet_fc_target_port *tgtport, struct nvmefc_tgt_fcp_req *fcpreq); void (*discovery_event)(struct nvmet_fc_target_port *tgtport); u32 max_hw_queues; u16 max_sgl_segments; u16 max_dif_sgl_segments; u64 dma_boundary; u32 target_features; u32 target_priv_sz; }; int nvmet_fc_register_targetport(struct nvmet_fc_port_info *portinfo, struct nvmet_fc_target_template *template, struct device *dev, struct nvmet_fc_target_port **tgtport_p); int nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *tgtport); int nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *tgtport, struct nvmefc_tgt_ls_req *lsreq, void *lsreqbuf, u32 lsreqbuf_len); int nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *tgtport, struct nvmefc_tgt_fcp_req *fcpreq, void *cmdiubuf, u32 cmdiubuf_len); void nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *tgtport, struct nvmefc_tgt_fcp_req *fcpreq); #endif /* _NVME_FC_DRIVER_H */ thermal.h 0000644 00000046534 14722070374 0006373 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * thermal.h ($Revision: 0 $) * * Copyright (C) 2008 Intel Corp * Copyright (C) 2008 Zhang Rui <rui.zhang@intel.com> * Copyright (C) 2008 Sujith Thomas <sujith.thomas@intel.com> */ #ifndef __THERMAL_H__ #define __THERMAL_H__ #include <linux/of.h> #include <linux/idr.h> #include <linux/device.h> #include <linux/sysfs.h> #include <linux/workqueue.h> #include <uapi/linux/thermal.h> #define THERMAL_TRIPS_NONE -1 #define THERMAL_MAX_TRIPS 12 /* invalid cooling state */ #define THERMAL_CSTATE_INVALID -1UL /* No upper/lower limit requirement */ #define THERMAL_NO_LIMIT ((u32)~0) /* Default weight of a bound cooling device */ #define THERMAL_WEIGHT_DEFAULT 0 /* use value, which < 0K, to indicate an invalid/uninitialized temperature */ #define THERMAL_TEMP_INVALID -274000 /* Unit conversion macros */ #define DECI_KELVIN_TO_CELSIUS(t) ({ \ long _t = (t); \ ((_t-2732 >= 0) ? (_t-2732+5)/10 : (_t-2732-5)/10); \ }) #define CELSIUS_TO_DECI_KELVIN(t) ((t)*10+2732) #define DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(t, off) (((t) - (off)) * 100) #define DECI_KELVIN_TO_MILLICELSIUS(t) DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(t, 2732) #define MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, off) (((t) / 100) + (off)) #define MILLICELSIUS_TO_DECI_KELVIN(t) MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, 2732) /* Default Thermal Governor */ #if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE) #define DEFAULT_THERMAL_GOVERNOR "step_wise" #elif defined(CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE) #define DEFAULT_THERMAL_GOVERNOR "fair_share" #elif defined(CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE) #define DEFAULT_THERMAL_GOVERNOR "user_space" #elif defined(CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR) #define DEFAULT_THERMAL_GOVERNOR "power_allocator" #endif struct thermal_zone_device; struct thermal_cooling_device; struct thermal_instance; enum thermal_device_mode { THERMAL_DEVICE_DISABLED = 0, THERMAL_DEVICE_ENABLED, }; enum thermal_trip_type { THERMAL_TRIP_ACTIVE = 0, THERMAL_TRIP_PASSIVE, THERMAL_TRIP_HOT, THERMAL_TRIP_CRITICAL, }; enum thermal_trend { THERMAL_TREND_STABLE, /* temperature is stable */ THERMAL_TREND_RAISING, /* temperature is raising */ THERMAL_TREND_DROPPING, /* temperature is dropping */ THERMAL_TREND_RAISE_FULL, /* apply highest cooling action */ THERMAL_TREND_DROP_FULL, /* apply lowest cooling action */ }; /* Thermal notification reason */ enum thermal_notify_event { THERMAL_EVENT_UNSPECIFIED, /* Unspecified event */ THERMAL_EVENT_TEMP_SAMPLE, /* New Temperature sample */ THERMAL_TRIP_VIOLATED, /* TRIP Point violation */ THERMAL_TRIP_CHANGED, /* TRIP Point temperature changed */ THERMAL_DEVICE_DOWN, /* Thermal device is down */ THERMAL_DEVICE_UP, /* Thermal device is up after a down event */ THERMAL_DEVICE_POWER_CAPABILITY_CHANGED, /* power capability changed */ THERMAL_TABLE_CHANGED, /* Thermal table(s) changed */ }; struct thermal_zone_device_ops { int (*bind) (struct thermal_zone_device *, struct thermal_cooling_device *); int (*unbind) (struct thermal_zone_device *, struct thermal_cooling_device *); int (*get_temp) (struct thermal_zone_device *, int *); int (*set_trips) (struct thermal_zone_device *, int, int); int (*get_mode) (struct thermal_zone_device *, enum thermal_device_mode *); int (*set_mode) (struct thermal_zone_device *, enum thermal_device_mode); int (*get_trip_type) (struct thermal_zone_device *, int, enum thermal_trip_type *); int (*get_trip_temp) (struct thermal_zone_device *, int, int *); int (*set_trip_temp) (struct thermal_zone_device *, int, int); int (*get_trip_hyst) (struct thermal_zone_device *, int, int *); int (*set_trip_hyst) (struct thermal_zone_device *, int, int); int (*get_crit_temp) (struct thermal_zone_device *, int *); int (*set_emul_temp) (struct thermal_zone_device *, int); int (*get_trend) (struct thermal_zone_device *, int, enum thermal_trend *); int (*notify) (struct thermal_zone_device *, int, enum thermal_trip_type); }; struct thermal_cooling_device_ops { int (*get_max_state) (struct thermal_cooling_device *, unsigned long *); int (*get_cur_state) (struct thermal_cooling_device *, unsigned long *); int (*set_cur_state) (struct thermal_cooling_device *, unsigned long); int (*get_requested_power)(struct thermal_cooling_device *, struct thermal_zone_device *, u32 *); int (*state2power)(struct thermal_cooling_device *, struct thermal_zone_device *, unsigned long, u32 *); int (*power2state)(struct thermal_cooling_device *, struct thermal_zone_device *, u32, unsigned long *); }; struct thermal_cooling_device { int id; char type[THERMAL_NAME_LENGTH]; struct device device; struct device_node *np; void *devdata; void *stats; const struct thermal_cooling_device_ops *ops; bool updated; /* true if the cooling device does not need update */ struct mutex lock; /* protect thermal_instances list */ struct list_head thermal_instances; struct list_head node; }; struct thermal_attr { struct device_attribute attr; char name[THERMAL_NAME_LENGTH]; }; /** * struct thermal_zone_device - structure for a thermal zone * @id: unique id number for each thermal zone * @type: the thermal zone device type * @device: &struct device for this thermal zone * @trip_temp_attrs: attributes for trip points for sysfs: trip temperature * @trip_type_attrs: attributes for trip points for sysfs: trip type * @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis * @devdata: private pointer for device private data * @trips: number of trip points the thermal zone supports * @trips_disabled; bitmap for disabled trips * @passive_delay: number of milliseconds to wait between polls when * performing passive cooling. * @polling_delay: number of milliseconds to wait between polls when * checking whether trip points have been crossed (0 for * interrupt driven systems) * @temperature: current temperature. This is only for core code, * drivers should use thermal_zone_get_temp() to get the * current temperature * @last_temperature: previous temperature read * @emul_temperature: emulated temperature when using CONFIG_THERMAL_EMULATION * @passive: 1 if you've crossed a passive trip point, 0 otherwise. * @prev_low_trip: the low current temperature if you've crossed a passive trip point. * @prev_high_trip: the above current temperature if you've crossed a passive trip point. * @forced_passive: If > 0, temperature at which to switch on all ACPI * processor cooling devices. Currently only used by the * step-wise governor. * @need_update: if equals 1, thermal_zone_device_update needs to be invoked. * @ops: operations this &thermal_zone_device supports * @tzp: thermal zone parameters * @governor: pointer to the governor for this thermal zone * @governor_data: private pointer for governor data * @thermal_instances: list of &struct thermal_instance of this thermal zone * @ida: &struct ida to generate unique id for this zone's cooling * devices * @lock: lock to protect thermal_instances list * @node: node in thermal_tz_list (in thermal_core.c) * @poll_queue: delayed work for polling * @notify_event: Last notification event */ struct thermal_zone_device { int id; char type[THERMAL_NAME_LENGTH]; struct device device; struct attribute_group trips_attribute_group; struct thermal_attr *trip_temp_attrs; struct thermal_attr *trip_type_attrs; struct thermal_attr *trip_hyst_attrs; void *devdata; int trips; unsigned long trips_disabled; /* bitmap for disabled trips */ int passive_delay; int polling_delay; int temperature; int last_temperature; int emul_temperature; int passive; int prev_low_trip; int prev_high_trip; unsigned int forced_passive; atomic_t need_update; struct thermal_zone_device_ops *ops; struct thermal_zone_params *tzp; struct thermal_governor *governor; void *governor_data; struct list_head thermal_instances; struct ida ida; struct mutex lock; struct list_head node; struct delayed_work poll_queue; enum thermal_notify_event notify_event; }; /** * struct thermal_governor - structure that holds thermal governor information * @name: name of the governor * @bind_to_tz: callback called when binding to a thermal zone. If it * returns 0, the governor is bound to the thermal zone, * otherwise it fails. * @unbind_from_tz: callback called when a governor is unbound from a * thermal zone. * @throttle: callback called for every trip point even if temperature is * below the trip point temperature * @governor_list: node in thermal_governor_list (in thermal_core.c) */ struct thermal_governor { char name[THERMAL_NAME_LENGTH]; int (*bind_to_tz)(struct thermal_zone_device *tz); void (*unbind_from_tz)(struct thermal_zone_device *tz); int (*throttle)(struct thermal_zone_device *tz, int trip); struct list_head governor_list; }; /* Structure that holds binding parameters for a zone */ struct thermal_bind_params { struct thermal_cooling_device *cdev; /* * This is a measure of 'how effectively these devices can * cool 'this' thermal zone. It shall be determined by * platform characterization. This value is relative to the * rest of the weights so a cooling device whose weight is * double that of another cooling device is twice as * effective. See Documentation/driver-api/thermal/sysfs-api.rst for more * information. */ int weight; /* * This is a bit mask that gives the binding relation between this * thermal zone and cdev, for a particular trip point. * See Documentation/driver-api/thermal/sysfs-api.rst for more information. */ int trip_mask; /* * This is an array of cooling state limits. Must have exactly * 2 * thermal_zone.number_of_trip_points. It is an array consisting * of tuples <lower-state upper-state> of state limits. Each trip * will be associated with one state limit tuple when binding. * A NULL pointer means <THERMAL_NO_LIMITS THERMAL_NO_LIMITS> * on all trips. */ unsigned long *binding_limits; int (*match) (struct thermal_zone_device *tz, struct thermal_cooling_device *cdev); }; /* Structure to define Thermal Zone parameters */ struct thermal_zone_params { char governor_name[THERMAL_NAME_LENGTH]; /* * a boolean to indicate if the thermal to hwmon sysfs interface * is required. when no_hwmon == false, a hwmon sysfs interface * will be created. when no_hwmon == true, nothing will be done */ bool no_hwmon; int num_tbps; /* Number of tbp entries */ struct thermal_bind_params *tbp; /* * Sustainable power (heat) that this thermal zone can dissipate in * mW */ u32 sustainable_power; /* * Proportional parameter of the PID controller when * overshooting (i.e., when temperature is below the target) */ s32 k_po; /* * Proportional parameter of the PID controller when * undershooting */ s32 k_pu; /* Integral parameter of the PID controller */ s32 k_i; /* Derivative parameter of the PID controller */ s32 k_d; /* threshold below which the error is no longer accumulated */ s32 integral_cutoff; /* * @slope: slope of a linear temperature adjustment curve. * Used by thermal zone drivers. */ int slope; /* * @offset: offset of a linear temperature adjustment curve. * Used by thermal zone drivers (default 0). */ int offset; }; struct thermal_genl_event { u32 orig; enum events event; }; /** * struct thermal_zone_of_device_ops - scallbacks for handling DT based zones * * Mandatory: * @get_temp: a pointer to a function that reads the sensor temperature. * * Optional: * @get_trend: a pointer to a function that reads the sensor temperature trend. * @set_trips: a pointer to a function that sets a temperature window. When * this window is left the driver must inform the thermal core via * thermal_zone_device_update. * @set_emul_temp: a pointer to a function that sets sensor emulated * temperature. * @set_trip_temp: a pointer to a function that sets the trip temperature on * hardware. */ struct thermal_zone_of_device_ops { int (*get_temp)(void *, int *); int (*get_trend)(void *, int, enum thermal_trend *); int (*set_trips)(void *, int, int); int (*set_emul_temp)(void *, int); int (*set_trip_temp)(void *, int, int); }; /** * struct thermal_trip - representation of a point in temperature domain * @np: pointer to struct device_node that this trip point was created from * @temperature: temperature value in miliCelsius * @hysteresis: relative hysteresis in miliCelsius * @type: trip point type */ struct thermal_trip { struct device_node *np; int temperature; int hysteresis; enum thermal_trip_type type; }; /* Function declarations */ #ifdef CONFIG_THERMAL_OF struct thermal_zone_device * thermal_zone_of_sensor_register(struct device *dev, int id, void *data, const struct thermal_zone_of_device_ops *ops); void thermal_zone_of_sensor_unregister(struct device *dev, struct thermal_zone_device *tz); struct thermal_zone_device *devm_thermal_zone_of_sensor_register( struct device *dev, int id, void *data, const struct thermal_zone_of_device_ops *ops); void devm_thermal_zone_of_sensor_unregister(struct device *dev, struct thermal_zone_device *tz); #else static inline struct thermal_zone_device * thermal_zone_of_sensor_register(struct device *dev, int id, void *data, const struct thermal_zone_of_device_ops *ops) { return ERR_PTR(-ENODEV); } static inline void thermal_zone_of_sensor_unregister(struct device *dev, struct thermal_zone_device *tz) { } static inline struct thermal_zone_device *devm_thermal_zone_of_sensor_register( struct device *dev, int id, void *data, const struct thermal_zone_of_device_ops *ops) { return ERR_PTR(-ENODEV); } static inline void devm_thermal_zone_of_sensor_unregister(struct device *dev, struct thermal_zone_device *tz) { } #endif #if IS_ENABLED(CONFIG_THERMAL) static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev) { return cdev->ops->get_requested_power && cdev->ops->state2power && cdev->ops->power2state; } int power_actor_get_max_power(struct thermal_cooling_device *, struct thermal_zone_device *tz, u32 *max_power); int power_actor_get_min_power(struct thermal_cooling_device *, struct thermal_zone_device *tz, u32 *min_power); int power_actor_set_power(struct thermal_cooling_device *, struct thermal_instance *, u32); struct thermal_zone_device *thermal_zone_device_register(const char *, int, int, void *, struct thermal_zone_device_ops *, struct thermal_zone_params *, int, int); void thermal_zone_device_unregister(struct thermal_zone_device *); int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int, struct thermal_cooling_device *, unsigned long, unsigned long, unsigned int); int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int, struct thermal_cooling_device *); void thermal_zone_device_update(struct thermal_zone_device *, enum thermal_notify_event); void thermal_zone_set_trips(struct thermal_zone_device *); struct thermal_cooling_device *thermal_cooling_device_register(const char *, void *, const struct thermal_cooling_device_ops *); struct thermal_cooling_device * thermal_of_cooling_device_register(struct device_node *np, const char *, void *, const struct thermal_cooling_device_ops *); struct thermal_cooling_device * devm_thermal_of_cooling_device_register(struct device *dev, struct device_node *np, char *type, void *devdata, const struct thermal_cooling_device_ops *ops); void thermal_cooling_device_unregister(struct thermal_cooling_device *); struct thermal_zone_device *thermal_zone_get_zone_by_name(const char *name); int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp); int thermal_zone_get_slope(struct thermal_zone_device *tz); int thermal_zone_get_offset(struct thermal_zone_device *tz); int get_tz_trend(struct thermal_zone_device *, int); struct thermal_instance *get_thermal_instance(struct thermal_zone_device *, struct thermal_cooling_device *, int); void thermal_cdev_update(struct thermal_cooling_device *); void thermal_notify_framework(struct thermal_zone_device *, int); #else static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev) { return false; } static inline int power_actor_get_max_power(struct thermal_cooling_device *cdev, struct thermal_zone_device *tz, u32 *max_power) { return 0; } static inline int power_actor_get_min_power(struct thermal_cooling_device *cdev, struct thermal_zone_device *tz, u32 *min_power) { return -ENODEV; } static inline int power_actor_set_power(struct thermal_cooling_device *cdev, struct thermal_instance *tz, u32 power) { return 0; } static inline struct thermal_zone_device *thermal_zone_device_register( const char *type, int trips, int mask, void *devdata, struct thermal_zone_device_ops *ops, struct thermal_zone_params *tzp, int passive_delay, int polling_delay) { return ERR_PTR(-ENODEV); } static inline void thermal_zone_device_unregister( struct thermal_zone_device *tz) { } static inline int thermal_zone_bind_cooling_device( struct thermal_zone_device *tz, int trip, struct thermal_cooling_device *cdev, unsigned long upper, unsigned long lower, unsigned int weight) { return -ENODEV; } static inline int thermal_zone_unbind_cooling_device( struct thermal_zone_device *tz, int trip, struct thermal_cooling_device *cdev) { return -ENODEV; } static inline void thermal_zone_device_update(struct thermal_zone_device *tz, enum thermal_notify_event event) { } static inline void thermal_zone_set_trips(struct thermal_zone_device *tz) { } static inline struct thermal_cooling_device * thermal_cooling_device_register(const char *type, void *devdata, const struct thermal_cooling_device_ops *ops) { return ERR_PTR(-ENODEV); } static inline struct thermal_cooling_device * thermal_of_cooling_device_register(struct device_node *np, const char *type, void *devdata, const struct thermal_cooling_device_ops *ops) { return ERR_PTR(-ENODEV); } static inline struct thermal_cooling_device * devm_thermal_of_cooling_device_register(struct device *dev, struct device_node *np, char *type, void *devdata, const struct thermal_cooling_device_ops *ops) { return ERR_PTR(-ENODEV); } static inline void thermal_cooling_device_unregister( struct thermal_cooling_device *cdev) { } static inline struct thermal_zone_device *thermal_zone_get_zone_by_name( const char *name) { return ERR_PTR(-ENODEV); } static inline int thermal_zone_get_temp( struct thermal_zone_device *tz, int *temp) { return -ENODEV; } static inline int thermal_zone_get_slope( struct thermal_zone_device *tz) { return -ENODEV; } static inline int thermal_zone_get_offset( struct thermal_zone_device *tz) { return -ENODEV; } static inline int get_tz_trend(struct thermal_zone_device *tz, int trip) { return -ENODEV; } static inline struct thermal_instance * get_thermal_instance(struct thermal_zone_device *tz, struct thermal_cooling_device *cdev, int trip) { return ERR_PTR(-ENODEV); } static inline void thermal_cdev_update(struct thermal_cooling_device *cdev) { } static inline void thermal_notify_framework(struct thermal_zone_device *tz, int trip) { } #endif /* CONFIG_THERMAL */ #if defined(CONFIG_NET) && IS_ENABLED(CONFIG_THERMAL) extern int thermal_generate_netlink_event(struct thermal_zone_device *tz, enum events event); #else static inline int thermal_generate_netlink_event(struct thermal_zone_device *tz, enum events event) { return 0; } #endif #endif /* __THERMAL_H__ */ list.h 0000644 00000065632 14722070374 0005712 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_LIST_H #define _LINUX_LIST_H #include <linux/types.h> #include <linux/stddef.h> #include <linux/poison.h> #include <linux/const.h> #include <linux/kernel.h> /* * Simple doubly linked list implementation. * * Some of the internal functions ("__xxx") are useful when * manipulating whole lists rather than single entries, as * sometimes we already know the next/prev entries and we can * generate better code by using them directly rather than * using the generic single-entry routines. */ #define LIST_HEAD_INIT(name) { &(name), &(name) } #define LIST_HEAD(name) \ struct list_head name = LIST_HEAD_INIT(name) static inline void INIT_LIST_HEAD(struct list_head *list) { WRITE_ONCE(list->next, list); list->prev = list; } #ifdef CONFIG_DEBUG_LIST extern bool __list_add_valid(struct list_head *new, struct list_head *prev, struct list_head *next); extern bool __list_del_entry_valid(struct list_head *entry); #else static inline bool __list_add_valid(struct list_head *new, struct list_head *prev, struct list_head *next) { return true; } static inline bool __list_del_entry_valid(struct list_head *entry) { return true; } #endif /* * Insert a new entry between two known consecutive entries. * * This is only for internal list manipulation where we know * the prev/next entries already! */ static inline void __list_add(struct list_head *new, struct list_head *prev, struct list_head *next) { if (!__list_add_valid(new, prev, next)) return; next->prev = new; new->next = next; new->prev = prev; WRITE_ONCE(prev->next, new); } /** * list_add - add a new entry * @new: new entry to be added * @head: list head to add it after * * Insert a new entry after the specified head. * This is good for implementing stacks. */ static inline void list_add(struct list_head *new, struct list_head *head) { __list_add(new, head, head->next); } /** * list_add_tail - add a new entry * @new: new entry to be added * @head: list head to add it before * * Insert a new entry before the specified head. * This is useful for implementing queues. */ static inline void list_add_tail(struct list_head *new, struct list_head *head) { __list_add(new, head->prev, head); } /* * Delete a list entry by making the prev/next entries * point to each other. * * This is only for internal list manipulation where we know * the prev/next entries already! */ static inline void __list_del(struct list_head * prev, struct list_head * next) { next->prev = prev; WRITE_ONCE(prev->next, next); } /* * Delete a list entry and clear the 'prev' pointer. * * This is a special-purpose list clearing method used in the networking code * for lists allocated as per-cpu, where we don't want to incur the extra * WRITE_ONCE() overhead of a regular list_del_init(). The code that uses this * needs to check the node 'prev' pointer instead of calling list_empty(). */ static inline void __list_del_clearprev(struct list_head *entry) { __list_del(entry->prev, entry->next); entry->prev = NULL; } /** * list_del - deletes entry from list. * @entry: the element to delete from the list. * Note: list_empty() on entry does not return true after this, the entry is * in an undefined state. */ static inline void __list_del_entry(struct list_head *entry) { if (!__list_del_entry_valid(entry)) return; __list_del(entry->prev, entry->next); } static inline void list_del(struct list_head *entry) { __list_del_entry(entry); entry->next = LIST_POISON1; entry->prev = LIST_POISON2; } /** * list_replace - replace old entry by new one * @old : the element to be replaced * @new : the new element to insert * * If @old was empty, it will be overwritten. */ static inline void list_replace(struct list_head *old, struct list_head *new) { new->next = old->next; new->next->prev = new; new->prev = old->prev; new->prev->next = new; } static inline void list_replace_init(struct list_head *old, struct list_head *new) { list_replace(old, new); INIT_LIST_HEAD(old); } /** * list_swap - replace entry1 with entry2 and re-add entry1 at entry2's position * @entry1: the location to place entry2 * @entry2: the location to place entry1 */ static inline void list_swap(struct list_head *entry1, struct list_head *entry2) { struct list_head *pos = entry2->prev; list_del(entry2); list_replace(entry1, entry2); if (pos == entry1) pos = entry2; list_add(entry1, pos); } /** * list_del_init - deletes entry from list and reinitialize it. * @entry: the element to delete from the list. */ static inline void list_del_init(struct list_head *entry) { __list_del_entry(entry); INIT_LIST_HEAD(entry); } /** * list_move - delete from one list and add as another's head * @list: the entry to move * @head: the head that will precede our entry */ static inline void list_move(struct list_head *list, struct list_head *head) { __list_del_entry(list); list_add(list, head); } /** * list_move_tail - delete from one list and add as another's tail * @list: the entry to move * @head: the head that will follow our entry */ static inline void list_move_tail(struct list_head *list, struct list_head *head) { __list_del_entry(list); list_add_tail(list, head); } /** * list_bulk_move_tail - move a subsection of a list to its tail * @head: the head that will follow our entry * @first: first entry to move * @last: last entry to move, can be the same as first * * Move all entries between @first and including @last before @head. * All three entries must belong to the same linked list. */ static inline void list_bulk_move_tail(struct list_head *head, struct list_head *first, struct list_head *last) { first->prev->next = last->next; last->next->prev = first->prev; head->prev->next = first; first->prev = head->prev; last->next = head; head->prev = last; } /** * list_is_first -- tests whether @list is the first entry in list @head * @list: the entry to test * @head: the head of the list */ static inline int list_is_first(const struct list_head *list, const struct list_head *head) { return list->prev == head; } /** * list_is_last - tests whether @list is the last entry in list @head * @list: the entry to test * @head: the head of the list */ static inline int list_is_last(const struct list_head *list, const struct list_head *head) { return list->next == head; } /** * list_empty - tests whether a list is empty * @head: the list to test. */ static inline int list_empty(const struct list_head *head) { return READ_ONCE(head->next) == head; } /** * list_del_init_careful - deletes entry from list and reinitialize it. * @entry: the element to delete from the list. * * This is the same as list_del_init(), except designed to be used * together with list_empty_careful() in a way to guarantee ordering * of other memory operations. * * Any memory operations done before a list_del_init_careful() are * guaranteed to be visible after a list_empty_careful() test. */ static inline void list_del_init_careful(struct list_head *entry) { __list_del_entry(entry); entry->prev = entry; smp_store_release(&entry->next, entry); } /** * list_empty_careful - tests whether a list is empty and not being modified * @head: the list to test * * Description: * tests whether a list is empty _and_ checks that no other CPU might be * in the process of modifying either member (next or prev) * * NOTE: using list_empty_careful() without synchronization * can only be safe if the only activity that can happen * to the list entry is list_del_init(). Eg. it cannot be used * if another CPU could re-list_add() it. */ static inline int list_empty_careful(const struct list_head *head) { struct list_head *next = smp_load_acquire(&head->next); return (next == head) && (next == head->prev); } /** * list_rotate_left - rotate the list to the left * @head: the head of the list */ static inline void list_rotate_left(struct list_head *head) { struct list_head *first; if (!list_empty(head)) { first = head->next; list_move_tail(first, head); } } /** * list_rotate_to_front() - Rotate list to specific item. * @list: The desired new front of the list. * @head: The head of the list. * * Rotates list so that @list becomes the new front of the list. */ static inline void list_rotate_to_front(struct list_head *list, struct list_head *head) { /* * Deletes the list head from the list denoted by @head and * places it as the tail of @list, this effectively rotates the * list so that @list is at the front. */ list_move_tail(head, list); } /** * list_is_singular - tests whether a list has just one entry. * @head: the list to test. */ static inline int list_is_singular(const struct list_head *head) { return !list_empty(head) && (head->next == head->prev); } static inline void __list_cut_position(struct list_head *list, struct list_head *head, struct list_head *entry) { struct list_head *new_first = entry->next; list->next = head->next; list->next->prev = list; list->prev = entry; entry->next = list; head->next = new_first; new_first->prev = head; } /** * list_cut_position - cut a list into two * @list: a new list to add all removed entries * @head: a list with entries * @entry: an entry within head, could be the head itself * and if so we won't cut the list * * This helper moves the initial part of @head, up to and * including @entry, from @head to @list. You should * pass on @entry an element you know is on @head. @list * should be an empty list or a list you do not care about * losing its data. * */ static inline void list_cut_position(struct list_head *list, struct list_head *head, struct list_head *entry) { if (list_empty(head)) return; if (list_is_singular(head) && (head->next != entry && head != entry)) return; if (entry == head) INIT_LIST_HEAD(list); else __list_cut_position(list, head, entry); } /** * list_cut_before - cut a list into two, before given entry * @list: a new list to add all removed entries * @head: a list with entries * @entry: an entry within head, could be the head itself * * This helper moves the initial part of @head, up to but * excluding @entry, from @head to @list. You should pass * in @entry an element you know is on @head. @list should * be an empty list or a list you do not care about losing * its data. * If @entry == @head, all entries on @head are moved to * @list. */ static inline void list_cut_before(struct list_head *list, struct list_head *head, struct list_head *entry) { if (head->next == entry) { INIT_LIST_HEAD(list); return; } list->next = head->next; list->next->prev = list; list->prev = entry->prev; list->prev->next = list; head->next = entry; entry->prev = head; } static inline void __list_splice(const struct list_head *list, struct list_head *prev, struct list_head *next) { struct list_head *first = list->next; struct list_head *last = list->prev; first->prev = prev; prev->next = first; last->next = next; next->prev = last; } /** * list_splice - join two lists, this is designed for stacks * @list: the new list to add. * @head: the place to add it in the first list. */ static inline void list_splice(const struct list_head *list, struct list_head *head) { if (!list_empty(list)) __list_splice(list, head, head->next); } /** * list_splice_tail - join two lists, each list being a queue * @list: the new list to add. * @head: the place to add it in the first list. */ static inline void list_splice_tail(struct list_head *list, struct list_head *head) { if (!list_empty(list)) __list_splice(list, head->prev, head); } /** * list_splice_init - join two lists and reinitialise the emptied list. * @list: the new list to add. * @head: the place to add it in the first list. * * The list at @list is reinitialised */ static inline void list_splice_init(struct list_head *list, struct list_head *head) { if (!list_empty(list)) { __list_splice(list, head, head->next); INIT_LIST_HEAD(list); } } /** * list_splice_tail_init - join two lists and reinitialise the emptied list * @list: the new list to add. * @head: the place to add it in the first list. * * Each of the lists is a queue. * The list at @list is reinitialised */ static inline void list_splice_tail_init(struct list_head *list, struct list_head *head) { if (!list_empty(list)) { __list_splice(list, head->prev, head); INIT_LIST_HEAD(list); } } /** * list_entry - get the struct for this entry * @ptr: the &struct list_head pointer. * @type: the type of the struct this is embedded in. * @member: the name of the list_head within the struct. */ #define list_entry(ptr, type, member) \ container_of(ptr, type, member) /** * list_first_entry - get the first element from a list * @ptr: the list head to take the element from. * @type: the type of the struct this is embedded in. * @member: the name of the list_head within the struct. * * Note, that list is expected to be not empty. */ #define list_first_entry(ptr, type, member) \ list_entry((ptr)->next, type, member) /** * list_last_entry - get the last element from a list * @ptr: the list head to take the element from. * @type: the type of the struct this is embedded in. * @member: the name of the list_head within the struct. * * Note, that list is expected to be not empty. */ #define list_last_entry(ptr, type, member) \ list_entry((ptr)->prev, type, member) /** * list_first_entry_or_null - get the first element from a list * @ptr: the list head to take the element from. * @type: the type of the struct this is embedded in. * @member: the name of the list_head within the struct. * * Note that if the list is empty, it returns NULL. */ #define list_first_entry_or_null(ptr, type, member) ({ \ struct list_head *head__ = (ptr); \ struct list_head *pos__ = READ_ONCE(head__->next); \ pos__ != head__ ? list_entry(pos__, type, member) : NULL; \ }) /** * list_next_entry - get the next element in list * @pos: the type * to cursor * @member: the name of the list_head within the struct. */ #define list_next_entry(pos, member) \ list_entry((pos)->member.next, typeof(*(pos)), member) /** * list_prev_entry - get the prev element in list * @pos: the type * to cursor * @member: the name of the list_head within the struct. */ #define list_prev_entry(pos, member) \ list_entry((pos)->member.prev, typeof(*(pos)), member) /** * list_for_each - iterate over a list * @pos: the &struct list_head to use as a loop cursor. * @head: the head for your list. */ #define list_for_each(pos, head) \ for (pos = (head)->next; pos != (head); pos = pos->next) /** * list_for_each_continue - continue iteration over a list * @pos: the &struct list_head to use as a loop cursor. * @head: the head for your list. * * Continue to iterate over a list, continuing after the current position. */ #define list_for_each_continue(pos, head) \ for (pos = pos->next; pos != (head); pos = pos->next) /** * list_for_each_prev - iterate over a list backwards * @pos: the &struct list_head to use as a loop cursor. * @head: the head for your list. */ #define list_for_each_prev(pos, head) \ for (pos = (head)->prev; pos != (head); pos = pos->prev) /** * list_for_each_safe - iterate over a list safe against removal of list entry * @pos: the &struct list_head to use as a loop cursor. * @n: another &struct list_head to use as temporary storage * @head: the head for your list. */ #define list_for_each_safe(pos, n, head) \ for (pos = (head)->next, n = pos->next; pos != (head); \ pos = n, n = pos->next) /** * list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry * @pos: the &struct list_head to use as a loop cursor. * @n: another &struct list_head to use as temporary storage * @head: the head for your list. */ #define list_for_each_prev_safe(pos, n, head) \ for (pos = (head)->prev, n = pos->prev; \ pos != (head); \ pos = n, n = pos->prev) /** * list_entry_is_head - test if the entry points to the head of the list * @pos: the type * to cursor * @head: the head for your list. * @member: the name of the list_head within the struct. */ #define list_entry_is_head(pos, head, member) \ (&pos->member == (head)) /** * list_for_each_entry - iterate over list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the list_head within the struct. */ #define list_for_each_entry(pos, head, member) \ for (pos = list_first_entry(head, typeof(*pos), member); \ !list_entry_is_head(pos, head, member); \ pos = list_next_entry(pos, member)) /** * list_for_each_entry_reverse - iterate backwards over list of given type. * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the list_head within the struct. */ #define list_for_each_entry_reverse(pos, head, member) \ for (pos = list_last_entry(head, typeof(*pos), member); \ !list_entry_is_head(pos, head, member); \ pos = list_prev_entry(pos, member)) /** * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue() * @pos: the type * to use as a start point * @head: the head of the list * @member: the name of the list_head within the struct. * * Prepares a pos entry for use as a start point in list_for_each_entry_continue(). */ #define list_prepare_entry(pos, head, member) \ ((pos) ? : list_entry(head, typeof(*pos), member)) /** * list_for_each_entry_continue - continue iteration over list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the list_head within the struct. * * Continue to iterate over list of given type, continuing after * the current position. */ #define list_for_each_entry_continue(pos, head, member) \ for (pos = list_next_entry(pos, member); \ !list_entry_is_head(pos, head, member); \ pos = list_next_entry(pos, member)) /** * list_for_each_entry_continue_reverse - iterate backwards from the given point * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the list_head within the struct. * * Start to iterate over list of given type backwards, continuing after * the current position. */ #define list_for_each_entry_continue_reverse(pos, head, member) \ for (pos = list_prev_entry(pos, member); \ !list_entry_is_head(pos, head, member); \ pos = list_prev_entry(pos, member)) /** * list_for_each_entry_from - iterate over list of given type from the current point * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the list_head within the struct. * * Iterate over list of given type, continuing from current position. */ #define list_for_each_entry_from(pos, head, member) \ for (; !list_entry_is_head(pos, head, member); \ pos = list_next_entry(pos, member)) /** * list_for_each_entry_from_reverse - iterate backwards over list of given type * from the current point * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the list_head within the struct. * * Iterate backwards over list of given type, continuing from current position. */ #define list_for_each_entry_from_reverse(pos, head, member) \ for (; !list_entry_is_head(pos, head, member); \ pos = list_prev_entry(pos, member)) /** * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry * @pos: the type * to use as a loop cursor. * @n: another type * to use as temporary storage * @head: the head for your list. * @member: the name of the list_head within the struct. */ #define list_for_each_entry_safe(pos, n, head, member) \ for (pos = list_first_entry(head, typeof(*pos), member), \ n = list_next_entry(pos, member); \ !list_entry_is_head(pos, head, member); \ pos = n, n = list_next_entry(n, member)) /** * list_for_each_entry_safe_continue - continue list iteration safe against removal * @pos: the type * to use as a loop cursor. * @n: another type * to use as temporary storage * @head: the head for your list. * @member: the name of the list_head within the struct. * * Iterate over list of given type, continuing after current point, * safe against removal of list entry. */ #define list_for_each_entry_safe_continue(pos, n, head, member) \ for (pos = list_next_entry(pos, member), \ n = list_next_entry(pos, member); \ !list_entry_is_head(pos, head, member); \ pos = n, n = list_next_entry(n, member)) /** * list_for_each_entry_safe_from - iterate over list from current point safe against removal * @pos: the type * to use as a loop cursor. * @n: another type * to use as temporary storage * @head: the head for your list. * @member: the name of the list_head within the struct. * * Iterate over list of given type from current point, safe against * removal of list entry. */ #define list_for_each_entry_safe_from(pos, n, head, member) \ for (n = list_next_entry(pos, member); \ !list_entry_is_head(pos, head, member); \ pos = n, n = list_next_entry(n, member)) /** * list_for_each_entry_safe_reverse - iterate backwards over list safe against removal * @pos: the type * to use as a loop cursor. * @n: another type * to use as temporary storage * @head: the head for your list. * @member: the name of the list_head within the struct. * * Iterate backwards over list of given type, safe against removal * of list entry. */ #define list_for_each_entry_safe_reverse(pos, n, head, member) \ for (pos = list_last_entry(head, typeof(*pos), member), \ n = list_prev_entry(pos, member); \ !list_entry_is_head(pos, head, member); \ pos = n, n = list_prev_entry(n, member)) /** * list_safe_reset_next - reset a stale list_for_each_entry_safe loop * @pos: the loop cursor used in the list_for_each_entry_safe loop * @n: temporary storage used in list_for_each_entry_safe * @member: the name of the list_head within the struct. * * list_safe_reset_next is not safe to use in general if the list may be * modified concurrently (eg. the lock is dropped in the loop body). An * exception to this is if the cursor element (pos) is pinned in the list, * and list_safe_reset_next is called after re-taking the lock and before * completing the current iteration of the loop body. */ #define list_safe_reset_next(pos, n, member) \ n = list_next_entry(pos, member) /* * Double linked lists with a single pointer list head. * Mostly useful for hash tables where the two pointer list head is * too wasteful. * You lose the ability to access the tail in O(1). */ #define HLIST_HEAD_INIT { .first = NULL } #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) static inline void INIT_HLIST_NODE(struct hlist_node *h) { h->next = NULL; h->pprev = NULL; } static inline int hlist_unhashed(const struct hlist_node *h) { return !h->pprev; } static inline int hlist_empty(const struct hlist_head *h) { return !READ_ONCE(h->first); } static inline void __hlist_del(struct hlist_node *n) { struct hlist_node *next = n->next; struct hlist_node **pprev = n->pprev; WRITE_ONCE(*pprev, next); if (next) next->pprev = pprev; } static inline void hlist_del(struct hlist_node *n) { __hlist_del(n); n->next = LIST_POISON1; n->pprev = LIST_POISON2; } static inline void hlist_del_init(struct hlist_node *n) { if (!hlist_unhashed(n)) { __hlist_del(n); INIT_HLIST_NODE(n); } } static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) { struct hlist_node *first = h->first; n->next = first; if (first) first->pprev = &n->next; WRITE_ONCE(h->first, n); n->pprev = &h->first; } /* next must be != NULL */ static inline void hlist_add_before(struct hlist_node *n, struct hlist_node *next) { n->pprev = next->pprev; n->next = next; next->pprev = &n->next; WRITE_ONCE(*(n->pprev), n); } static inline void hlist_add_behind(struct hlist_node *n, struct hlist_node *prev) { n->next = prev->next; prev->next = n; n->pprev = &prev->next; if (n->next) n->next->pprev = &n->next; } /* after that we'll appear to be on some hlist and hlist_del will work */ static inline void hlist_add_fake(struct hlist_node *n) { n->pprev = &n->next; } static inline bool hlist_fake(struct hlist_node *h) { return h->pprev == &h->next; } /* * Check whether the node is the only node of the head without * accessing head: */ static inline bool hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h) { return !n->next && n->pprev == &h->first; } /* * Move a list from one list head to another. Fixup the pprev * reference of the first entry if it exists. */ static inline void hlist_move_list(struct hlist_head *old, struct hlist_head *new) { new->first = old->first; if (new->first) new->first->pprev = &new->first; old->first = NULL; } #define hlist_entry(ptr, type, member) container_of(ptr,type,member) #define hlist_for_each(pos, head) \ for (pos = (head)->first; pos ; pos = pos->next) #define hlist_for_each_safe(pos, n, head) \ for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ pos = n) #define hlist_entry_safe(ptr, type, member) \ ({ typeof(ptr) ____ptr = (ptr); \ ____ptr ? hlist_entry(____ptr, type, member) : NULL; \ }) /** * hlist_for_each_entry - iterate over list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the hlist_node within the struct. */ #define hlist_for_each_entry(pos, head, member) \ for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\ pos; \ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) /** * hlist_for_each_entry_continue - iterate over a hlist continuing after current point * @pos: the type * to use as a loop cursor. * @member: the name of the hlist_node within the struct. */ #define hlist_for_each_entry_continue(pos, member) \ for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\ pos; \ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) /** * hlist_for_each_entry_from - iterate over a hlist continuing from current point * @pos: the type * to use as a loop cursor. * @member: the name of the hlist_node within the struct. */ #define hlist_for_each_entry_from(pos, member) \ for (; pos; \ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) /** * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry * @pos: the type * to use as a loop cursor. * @n: another &struct hlist_node to use as temporary storage * @head: the head for your list. * @member: the name of the hlist_node within the struct. */ #define hlist_for_each_entry_safe(pos, n, head, member) \ for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\ pos && ({ n = pos->member.next; 1; }); \ pos = hlist_entry_safe(n, typeof(*pos), member)) #endif kconfig.h 0000644 00000004661 14722070374 0006352 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_KCONFIG_H #define __LINUX_KCONFIG_H #include <generated/autoconf.h> #ifdef CONFIG_CPU_BIG_ENDIAN #define __BIG_ENDIAN 4321 #else #define __LITTLE_ENDIAN 1234 #endif #define __ARG_PLACEHOLDER_1 0, #define __take_second_arg(__ignored, val, ...) val /* * The use of "&&" / "||" is limited in certain expressions. * The following enable to calculate "and" / "or" with macro expansion only. */ #define __and(x, y) ___and(x, y) #define ___and(x, y) ____and(__ARG_PLACEHOLDER_##x, y) #define ____and(arg1_or_junk, y) __take_second_arg(arg1_or_junk y, 0) #define __or(x, y) ___or(x, y) #define ___or(x, y) ____or(__ARG_PLACEHOLDER_##x, y) #define ____or(arg1_or_junk, y) __take_second_arg(arg1_or_junk 1, y) /* * Helper macros to use CONFIG_ options in C/CPP expressions. Note that * these only work with boolean and tristate options. */ /* * Getting something that works in C and CPP for an arg that may or may * not be defined is tricky. Here, if we have "#define CONFIG_BOOGER 1" * we match on the placeholder define, insert the "0," for arg1 and generate * the triplet (0, 1, 0). Then the last step cherry picks the 2nd arg (a one). * When CONFIG_BOOGER is not defined, we generate a (... 1, 0) pair, and when * the last step cherry picks the 2nd arg, we get a zero. */ #define __is_defined(x) ___is_defined(x) #define ___is_defined(val) ____is_defined(__ARG_PLACEHOLDER_##val) #define ____is_defined(arg1_or_junk) __take_second_arg(arg1_or_junk 1, 0) /* * IS_BUILTIN(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y', 0 * otherwise. For boolean options, this is equivalent to * IS_ENABLED(CONFIG_FOO). */ #define IS_BUILTIN(option) __is_defined(option) /* * IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0 * otherwise. */ #define IS_MODULE(option) __is_defined(option##_MODULE) /* * IS_REACHABLE(CONFIG_FOO) evaluates to 1 if the currently compiled * code can call a function defined in code compiled based on CONFIG_FOO. * This is similar to IS_ENABLED(), but returns false when invoked from * built-in code when CONFIG_FOO is set to 'm'. */ #define IS_REACHABLE(option) __or(IS_BUILTIN(option), \ __and(IS_MODULE(option), __is_defined(MODULE))) /* * IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm', * 0 otherwise. */ #define IS_ENABLED(option) __or(IS_BUILTIN(option), IS_MODULE(option)) #endif /* __LINUX_KCONFIG_H */ srcu.h 0000644 00000015112 14722070374 0005677 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0+ */ /* * Sleepable Read-Copy Update mechanism for mutual exclusion * * Copyright (C) IBM Corporation, 2006 * Copyright (C) Fujitsu, 2012 * * Author: Paul McKenney <paulmck@linux.ibm.com> * Lai Jiangshan <laijs@cn.fujitsu.com> * * For detailed explanation of Read-Copy Update mechanism see - * Documentation/RCU/ *.txt * */ #ifndef _LINUX_SRCU_H #define _LINUX_SRCU_H #include <linux/mutex.h> #include <linux/rcupdate.h> #include <linux/workqueue.h> #include <linux/rcu_segcblist.h> struct srcu_struct; #ifdef CONFIG_DEBUG_LOCK_ALLOC int __init_srcu_struct(struct srcu_struct *ssp, const char *name, struct lock_class_key *key); #define init_srcu_struct(ssp) \ ({ \ static struct lock_class_key __srcu_key; \ \ __init_srcu_struct((ssp), #ssp, &__srcu_key); \ }) #define __SRCU_DEP_MAP_INIT(srcu_name) .dep_map = { .name = #srcu_name }, #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ int init_srcu_struct(struct srcu_struct *ssp); #define __SRCU_DEP_MAP_INIT(srcu_name) #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ #ifdef CONFIG_TINY_SRCU #include <linux/srcutiny.h> #elif defined(CONFIG_TREE_SRCU) #include <linux/srcutree.h> #elif defined(CONFIG_SRCU) #error "Unknown SRCU implementation specified to kernel configuration" #else /* Dummy definition for things like notifiers. Actual use gets link error. */ struct srcu_struct { }; #endif void call_srcu(struct srcu_struct *ssp, struct rcu_head *head, void (*func)(struct rcu_head *head)); void cleanup_srcu_struct(struct srcu_struct *ssp); int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp); void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp); void synchronize_srcu(struct srcu_struct *ssp); #ifdef CONFIG_DEBUG_LOCK_ALLOC /** * srcu_read_lock_held - might we be in SRCU read-side critical section? * @ssp: The srcu_struct structure to check * * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, * this assumes we are in an SRCU read-side critical section unless it can * prove otherwise. * * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot * and while lockdep is disabled. * * Note that SRCU is based on its own statemachine and it doesn't * relies on normal RCU, it can be called from the CPU which * is in the idle loop from an RCU point of view or offline. */ static inline int srcu_read_lock_held(const struct srcu_struct *ssp) { if (!debug_lockdep_rcu_enabled()) return 1; return lock_is_held(&ssp->dep_map); } #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ static inline int srcu_read_lock_held(const struct srcu_struct *ssp) { return 1; } #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ /** * srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing * @p: the pointer to fetch and protect for later dereferencing * @ssp: pointer to the srcu_struct, which is used to check that we * really are in an SRCU read-side critical section. * @c: condition to check for update-side use * * If PROVE_RCU is enabled, invoking this outside of an RCU read-side * critical section will result in an RCU-lockdep splat, unless @c evaluates * to 1. The @c argument will normally be a logical expression containing * lockdep_is_held() calls. */ #define srcu_dereference_check(p, ssp, c) \ __rcu_dereference_check((p), (c) || srcu_read_lock_held(ssp), __rcu) /** * srcu_dereference - fetch SRCU-protected pointer for later dereferencing * @p: the pointer to fetch and protect for later dereferencing * @ssp: pointer to the srcu_struct, which is used to check that we * really are in an SRCU read-side critical section. * * Makes rcu_dereference_check() do the dirty work. If PROVE_RCU * is enabled, invoking this outside of an RCU read-side critical * section will result in an RCU-lockdep splat. */ #define srcu_dereference(p, ssp) srcu_dereference_check((p), (ssp), 0) /** * srcu_dereference_notrace - no tracing and no lockdep calls from here * @p: the pointer to fetch and protect for later dereferencing * @ssp: pointer to the srcu_struct, which is used to check that we * really are in an SRCU read-side critical section. */ #define srcu_dereference_notrace(p, ssp) srcu_dereference_check((p), (ssp), 1) /** * srcu_read_lock - register a new reader for an SRCU-protected structure. * @ssp: srcu_struct in which to register the new reader. * * Enter an SRCU read-side critical section. Note that SRCU read-side * critical sections may be nested. However, it is illegal to * call anything that waits on an SRCU grace period for the same * srcu_struct, whether directly or indirectly. Please note that * one way to indirectly wait on an SRCU grace period is to acquire * a mutex that is held elsewhere while calling synchronize_srcu() or * synchronize_srcu_expedited(). * * Note that srcu_read_lock() and the matching srcu_read_unlock() must * occur in the same context, for example, it is illegal to invoke * srcu_read_unlock() in an irq handler if the matching srcu_read_lock() * was invoked in process context. */ static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp) { int retval; retval = __srcu_read_lock(ssp); rcu_lock_acquire(&(ssp)->dep_map); return retval; } /* Used by tracing, cannot be traced and cannot invoke lockdep. */ static inline notrace int srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp) { int retval; retval = __srcu_read_lock(ssp); return retval; } /** * srcu_read_unlock - unregister a old reader from an SRCU-protected structure. * @ssp: srcu_struct in which to unregister the old reader. * @idx: return value from corresponding srcu_read_lock(). * * Exit an SRCU read-side critical section. */ static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp) { WARN_ON_ONCE(idx & ~0x1); rcu_lock_release(&(ssp)->dep_map); __srcu_read_unlock(ssp, idx); } /* Used by tracing, cannot be traced and cannot call lockdep. */ static inline notrace void srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp) { __srcu_read_unlock(ssp, idx); } /** * smp_mb__after_srcu_read_unlock - ensure full ordering after srcu_read_unlock * * Converts the preceding srcu_read_unlock into a two-way memory barrier. * * Call this after srcu_read_unlock, to guarantee that all memory operations * that occur after smp_mb__after_srcu_read_unlock will appear to happen after * the preceding srcu_read_unlock. */ static inline void smp_mb__after_srcu_read_unlock(void) { /* __srcu_read_unlock has smp_mb() internally so nothing to do here. */ } #endif netpoll.h 0000644 00000005276 14722070374 0006412 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Common code for low-level network console, dump, and debugger code * * Derived from netconsole, kgdb-over-ethernet, and netdump patches */ #ifndef _LINUX_NETPOLL_H #define _LINUX_NETPOLL_H #include <linux/netdevice.h> #include <linux/interrupt.h> #include <linux/rcupdate.h> #include <linux/list.h> #include <linux/refcount.h> union inet_addr { __u32 all[4]; __be32 ip; __be32 ip6[4]; struct in_addr in; struct in6_addr in6; }; struct netpoll { struct net_device *dev; char dev_name[IFNAMSIZ]; const char *name; union inet_addr local_ip, remote_ip; bool ipv6; u16 local_port, remote_port; u8 remote_mac[ETH_ALEN]; }; struct netpoll_info { refcount_t refcnt; struct semaphore dev_lock; struct sk_buff_head txq; struct delayed_work tx_work; struct netpoll *netpoll; struct rcu_head rcu; }; #ifdef CONFIG_NETPOLL void netpoll_poll_dev(struct net_device *dev); void netpoll_poll_disable(struct net_device *dev); void netpoll_poll_enable(struct net_device *dev); #else static inline void netpoll_poll_disable(struct net_device *dev) { return; } static inline void netpoll_poll_enable(struct net_device *dev) { return; } #endif void netpoll_send_udp(struct netpoll *np, const char *msg, int len); void netpoll_print_options(struct netpoll *np); int netpoll_parse_options(struct netpoll *np, char *opt); int __netpoll_setup(struct netpoll *np, struct net_device *ndev); int netpoll_setup(struct netpoll *np); void __netpoll_cleanup(struct netpoll *np); void __netpoll_free(struct netpoll *np); void netpoll_cleanup(struct netpoll *np); void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, struct net_device *dev); static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) { unsigned long flags; local_irq_save(flags); netpoll_send_skb_on_dev(np, skb, np->dev); local_irq_restore(flags); } #ifdef CONFIG_NETPOLL static inline void *netpoll_poll_lock(struct napi_struct *napi) { struct net_device *dev = napi->dev; if (dev && dev->npinfo) { int owner = smp_processor_id(); while (cmpxchg(&napi->poll_owner, -1, owner) != -1) cpu_relax(); return napi; } return NULL; } static inline void netpoll_poll_unlock(void *have) { struct napi_struct *napi = have; if (napi) smp_store_release(&napi->poll_owner, -1); } static inline bool netpoll_tx_running(struct net_device *dev) { return irqs_disabled(); } #else static inline void *netpoll_poll_lock(struct napi_struct *napi) { return NULL; } static inline void netpoll_poll_unlock(void *have) { } static inline void netpoll_netdev_init(struct net_device *dev) { } static inline bool netpoll_tx_running(struct net_device *dev) { return false; } #endif #endif ioc3.h 0000644 00000006216 14722070374 0005565 0 ustar 00 /* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (c) 2005 Stanislaw Skowronek <skylark@linux-mips.org> */ #ifndef _LINUX_IOC3_H #define _LINUX_IOC3_H #include <asm/sn/ioc3.h> #define IOC3_MAX_SUBMODULES 32 #define IOC3_CLASS_NONE 0 #define IOC3_CLASS_BASE_IP27 1 #define IOC3_CLASS_BASE_IP30 2 #define IOC3_CLASS_MENET_123 3 #define IOC3_CLASS_MENET_4 4 #define IOC3_CLASS_CADDUO 5 #define IOC3_CLASS_SERIAL 6 /* One of these per IOC3 */ struct ioc3_driver_data { struct list_head list; int id; /* IOC3 sequence number */ /* PCI mapping */ unsigned long pma; /* physical address */ struct ioc3 __iomem *vma; /* pointer to registers */ struct pci_dev *pdev; /* PCI device */ /* IRQ stuff */ int dual_irq; /* set if separate IRQs are used */ int irq_io, irq_eth; /* IRQ numbers */ /* GPIO magic */ spinlock_t gpio_lock; unsigned int gpdr_shadow; /* NIC identifiers */ char nic_part[32]; char nic_serial[16]; char nic_mac[6]; /* submodule set */ int class; void *data[IOC3_MAX_SUBMODULES]; /* for submodule use */ int active[IOC3_MAX_SUBMODULES]; /* set if probe succeeds */ /* is_ir_lock must be held while * modifying sio_ie values, so * we can be sure that sio_ie is * not changing when we read it * along with sio_ir. */ spinlock_t ir_lock; /* SIO_IE[SC] mod lock */ }; /* One per submodule */ struct ioc3_submodule { char *name; /* descriptive submodule name */ struct module *owner; /* owning kernel module */ int ethernet; /* set for ethernet drivers */ int (*probe) (struct ioc3_submodule *, struct ioc3_driver_data *); int (*remove) (struct ioc3_submodule *, struct ioc3_driver_data *); int id; /* assigned by IOC3, index for the "data" array */ /* IRQ stuff */ unsigned int irq_mask; /* IOC3 IRQ mask, leave clear for Ethernet */ int reset_mask; /* non-zero if you want the ioc3.c module to reset interrupts */ int (*intr) (struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int); /* private submodule data */ void *data; /* assigned by submodule */ }; /********************************** * Functions needed by submodules * **********************************/ #define IOC3_W_IES 0 #define IOC3_W_IEC 1 /* registers a submodule for all existing and future IOC3 chips */ extern int ioc3_register_submodule(struct ioc3_submodule *); /* unregisters a submodule */ extern void ioc3_unregister_submodule(struct ioc3_submodule *); /* enables IRQs indicated by irq_mask for a specified IOC3 chip */ extern void ioc3_enable(struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int); /* ackowledges specified IRQs */ extern void ioc3_ack(struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int); /* disables IRQs indicated by irq_mask for a specified IOC3 chip */ extern void ioc3_disable(struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int); /* atomically sets GPCR bits */ extern void ioc3_gpcr_set(struct ioc3_driver_data *, unsigned int); /* general ireg writer */ extern void ioc3_write_ireg(struct ioc3_driver_data *idd, uint32_t value, int reg); #endif kcov.h 0000644 00000002204 14722070374 0005663 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_KCOV_H #define _LINUX_KCOV_H #include <uapi/linux/kcov.h> struct task_struct; #ifdef CONFIG_KCOV enum kcov_mode { /* Coverage collection is not enabled yet. */ KCOV_MODE_DISABLED = 0, /* KCOV was initialized, but tracing mode hasn't been chosen yet. */ KCOV_MODE_INIT = 1, /* * Tracing coverage collection mode. * Covered PCs are collected in a per-task buffer. */ KCOV_MODE_TRACE_PC = 2, /* Collecting comparison operands mode. */ KCOV_MODE_TRACE_CMP = 3, }; #define KCOV_IN_CTXSW (1 << 30) void kcov_task_init(struct task_struct *t); void kcov_task_exit(struct task_struct *t); #define kcov_prepare_switch(t) \ do { \ (t)->kcov_mode |= KCOV_IN_CTXSW; \ } while (0) #define kcov_finish_switch(t) \ do { \ (t)->kcov_mode &= ~KCOV_IN_CTXSW; \ } while (0) #else static inline void kcov_task_init(struct task_struct *t) {} static inline void kcov_task_exit(struct task_struct *t) {} static inline void kcov_prepare_switch(struct task_struct *t) {} static inline void kcov_finish_switch(struct task_struct *t) {} #endif /* CONFIG_KCOV */ #endif /* _LINUX_KCOV_H */ vt_kern.h 0000644 00000014056 14722070374 0006401 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _VT_KERN_H #define _VT_KERN_H /* * this really is an extension of the vc_cons structure in console.c, but * with information needed by the vt package */ #include <linux/vt.h> #include <linux/kd.h> #include <linux/tty.h> #include <linux/mutex.h> #include <linux/console_struct.h> #include <linux/mm.h> #include <linux/consolemap.h> #include <linux/notifier.h> /* * Presently, a lot of graphics programs do not restore the contents of * the higher font pages. Defining this flag will avoid use of them, but * will lose support for PIO_FONTRESET. Note that many font operations are * not likely to work with these programs anyway; they need to be * fixed. The linux/Documentation directory includes a code snippet * to save and restore the text font. */ #ifdef CONFIG_VGA_CONSOLE #define BROKEN_GRAPHICS_PROGRAMS 1 #endif extern void kd_mksound(unsigned int hz, unsigned int ticks); extern int kbd_rate(struct kbd_repeat *rep); extern int fg_console, last_console, want_console; /* console.c */ int vc_allocate(unsigned int console); int vc_cons_allocated(unsigned int console); int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines); struct vc_data *vc_deallocate(unsigned int console); void reset_palette(struct vc_data *vc); void do_blank_screen(int entering_gfx); void do_unblank_screen(int leaving_gfx); void unblank_screen(void); void poke_blanked_console(void); int con_font_op(struct vc_data *vc, struct console_font_op *op); int con_set_cmap(unsigned char __user *cmap); int con_get_cmap(unsigned char __user *cmap); void scrollback(struct vc_data *vc); void scrollfront(struct vc_data *vc, int lines); void clear_buffer_attributes(struct vc_data *vc); void update_region(struct vc_data *vc, unsigned long start, int count); void redraw_screen(struct vc_data *vc, int is_switch); #define update_screen(x) redraw_screen(x, 0) #define switch_screen(x) redraw_screen(x, 1) struct tty_struct; int tioclinux(struct tty_struct *tty, unsigned long arg); #ifdef CONFIG_CONSOLE_TRANSLATIONS /* consolemap.c */ struct unipair; int con_set_trans_old(unsigned char __user * table); int con_get_trans_old(unsigned char __user * table); int con_set_trans_new(unsigned short __user * table); int con_get_trans_new(unsigned short __user * table); int con_clear_unimap(struct vc_data *vc); int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list); int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, struct unipair __user *list); int con_set_default_unimap(struct vc_data *vc); void con_free_unimap(struct vc_data *vc); int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc); #define vc_translate(vc, c) ((vc)->vc_translate[(c) | \ ((vc)->vc_toggle_meta ? 0x80 : 0)]) #else static inline int con_set_trans_old(unsigned char __user *table) { return 0; } static inline int con_get_trans_old(unsigned char __user *table) { return -EINVAL; } static inline int con_set_trans_new(unsigned short __user *table) { return 0; } static inline int con_get_trans_new(unsigned short __user *table) { return -EINVAL; } static inline int con_clear_unimap(struct vc_data *vc) { return 0; } static inline int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list) { return 0; } static inline int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, struct unipair __user *list) { return -EINVAL; } static inline int con_set_default_unimap(struct vc_data *vc) { return 0; } static inline void con_free_unimap(struct vc_data *vc) { } static inline void con_protect_unimap(struct vc_data *vc, int rdonly) { } static inline int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc) { return 0; } #define vc_translate(vc, c) (c) #endif /* vt.c */ void vt_event_post(unsigned int event, unsigned int old, unsigned int new); int vt_waitactive(int n); void change_console(struct vc_data *new_vc); void reset_vc(struct vc_data *vc, int mode); extern int do_unbind_con_driver(const struct consw *csw, int first, int last, int deflt); int vty_init(const struct file_operations *console_fops); extern bool vt_dont_switch; extern int default_utf8; extern int global_cursor_default; struct vt_spawn_console { spinlock_t lock; struct pid *pid; int sig; }; extern struct vt_spawn_console vt_spawn_con; extern int vt_move_to_console(unsigned int vt, int alloc); /* Interfaces for VC notification of character events (for accessibility etc) */ struct vt_notifier_param { struct vc_data *vc; /* VC on which the update happened */ unsigned int c; /* Printed char */ }; extern int register_vt_notifier(struct notifier_block *nb); extern int unregister_vt_notifier(struct notifier_block *nb); extern void hide_boot_cursor(bool hide); /* keyboard provided interfaces */ extern int vt_do_diacrit(unsigned int cmd, void __user *up, int eperm); extern int vt_do_kdskbmode(int console, unsigned int arg); extern int vt_do_kdskbmeta(int console, unsigned int arg); extern int vt_do_kbkeycode_ioctl(int cmd, struct kbkeycode __user *user_kbkc, int perm); extern int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, int console); extern int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm); extern int vt_do_kdskled(int console, int cmd, unsigned long arg, int perm); extern int vt_do_kdgkbmode(int console); extern int vt_do_kdgkbmeta(int console); extern void vt_reset_unicode(int console); extern int vt_get_shift_state(void); extern void vt_reset_keyboard(int console); extern int vt_get_leds(int console, int flag); extern int vt_get_kbd_mode_bit(int console, int bit); extern void vt_set_kbd_mode_bit(int console, int bit); extern void vt_clr_kbd_mode_bit(int console, int bit); extern void vt_set_led_state(int console, int leds); extern void vt_set_led_state(int console, int leds); extern void vt_kbd_con_start(int console); extern void vt_kbd_con_stop(int console); void vc_scrolldelta_helper(struct vc_data *c, int lines, unsigned int rolled_over, void *_base, unsigned int size); #endif /* _VT_KERN_H */ personality.h 0000644 00000000611 14722070374 0007272 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PERSONALITY_H #define _LINUX_PERSONALITY_H #include <uapi/linux/personality.h> /* * Return the base personality without flags. */ #define personality(pers) (pers & PER_MASK) /* * Change personality of the currently running process. */ #define set_personality(pers) (current->personality = (pers)) #endif /* _LINUX_PERSONALITY_H */ backing-dev.h 0000644 00000033657 14722070374 0007113 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * include/linux/backing-dev.h * * low-level device information and state which is propagated up through * to high-level code. */ #ifndef _LINUX_BACKING_DEV_H #define _LINUX_BACKING_DEV_H #include <linux/kernel.h> #include <linux/fs.h> #include <linux/sched.h> #include <linux/blkdev.h> #include <linux/device.h> #include <linux/writeback.h> #include <linux/blk-cgroup.h> #include <linux/backing-dev-defs.h> #include <linux/slab.h> static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi) { kref_get(&bdi->refcnt); return bdi; } struct backing_dev_info *bdi_get_by_id(u64 id); void bdi_put(struct backing_dev_info *bdi); __printf(2, 3) int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...); __printf(2, 0) int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args); int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner); void bdi_unregister(struct backing_dev_info *bdi); struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id); static inline struct backing_dev_info *bdi_alloc(gfp_t gfp_mask) { return bdi_alloc_node(gfp_mask, NUMA_NO_NODE); } void wb_start_background_writeback(struct bdi_writeback *wb); void wb_workfn(struct work_struct *work); void wb_wakeup_delayed(struct bdi_writeback *wb); void wb_wait_for_completion(struct wb_completion *done); extern spinlock_t bdi_lock; extern struct list_head bdi_list; extern struct workqueue_struct *bdi_wq; extern struct workqueue_struct *bdi_async_bio_wq; static inline bool wb_has_dirty_io(struct bdi_writeback *wb) { return test_bit(WB_has_dirty_io, &wb->state); } static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi) { /* * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are * any dirty wbs. See wb_update_write_bandwidth(). */ return atomic_long_read(&bdi->tot_write_bandwidth); } static inline void __add_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item, s64 amount) { percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH); } static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) { __add_wb_stat(wb, item, 1); } static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) { __add_wb_stat(wb, item, -1); } static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) { return percpu_counter_read_positive(&wb->stat[item]); } static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item) { return percpu_counter_sum_positive(&wb->stat[item]); } extern void wb_writeout_inc(struct bdi_writeback *wb); /* * maximal error of a stat counter. */ static inline unsigned long wb_stat_error(void) { #ifdef CONFIG_SMP return nr_cpu_ids * WB_STAT_BATCH; #else return 1; #endif } int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio); int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio); /* * Flags in backing_dev_info::capability * * The first three flags control whether dirty pages will contribute to the * VM's accounting and whether writepages() should be called for dirty pages * (something that would not, for example, be appropriate for ramfs) * * WARNING: these flags are closely related and should not normally be * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these * three flags into a single convenience macro. * * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting * BDI_CAP_NO_WRITEBACK: Don't write pages back * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold. * * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback. * BDI_CAP_SYNCHRONOUS_IO: Device is so fast that asynchronous IO would be * inefficient. */ #define BDI_CAP_NO_ACCT_DIRTY 0x00000001 #define BDI_CAP_NO_WRITEBACK 0x00000002 #define BDI_CAP_NO_ACCT_WB 0x00000004 #define BDI_CAP_STABLE_WRITES 0x00000008 #define BDI_CAP_STRICTLIMIT 0x00000010 #define BDI_CAP_CGROUP_WRITEBACK 0x00000020 #define BDI_CAP_SYNCHRONOUS_IO 0x00000040 #define BDI_CAP_NO_ACCT_AND_WRITEBACK \ (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB) extern struct backing_dev_info noop_backing_dev_info; /** * writeback_in_progress - determine whether there is writeback in progress * @wb: bdi_writeback of interest * * Determine whether there is writeback waiting to be handled against a * bdi_writeback. */ static inline bool writeback_in_progress(struct bdi_writeback *wb) { return test_bit(WB_writeback_running, &wb->state); } static inline struct backing_dev_info *inode_to_bdi(struct inode *inode) { struct super_block *sb; if (!inode) return &noop_backing_dev_info; sb = inode->i_sb; #ifdef CONFIG_BLOCK if (sb_is_blkdev_sb(sb)) return I_BDEV(inode)->bd_bdi; #endif return sb->s_bdi; } static inline int wb_congested(struct bdi_writeback *wb, int cong_bits) { struct backing_dev_info *bdi = wb->bdi; if (bdi->congested_fn) return bdi->congested_fn(bdi->congested_data, cong_bits); return wb->congested->state & cong_bits; } long congestion_wait(int sync, long timeout); long wait_iff_congested(int sync, long timeout); static inline bool bdi_cap_synchronous_io(struct backing_dev_info *bdi) { return bdi->capabilities & BDI_CAP_SYNCHRONOUS_IO; } static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi) { return bdi->capabilities & BDI_CAP_STABLE_WRITES; } static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi) { return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK); } static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi) { return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY); } static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi) { /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */ return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB | BDI_CAP_NO_WRITEBACK)); } static inline bool mapping_cap_writeback_dirty(struct address_space *mapping) { return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host)); } static inline bool mapping_cap_account_dirty(struct address_space *mapping) { return bdi_cap_account_dirty(inode_to_bdi(mapping->host)); } static inline int bdi_sched_wait(void *word) { schedule(); return 0; } #ifdef CONFIG_CGROUP_WRITEBACK struct bdi_writeback_congested * wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp); void wb_congested_put(struct bdi_writeback_congested *congested); struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi, struct cgroup_subsys_state *memcg_css); struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi, struct cgroup_subsys_state *memcg_css, gfp_t gfp); void wb_memcg_offline(struct mem_cgroup *memcg); void wb_blkcg_offline(struct blkcg *blkcg); int inode_congested(struct inode *inode, int cong_bits); /** * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode * @inode: inode of interest * * cgroup writeback requires support from both the bdi and filesystem. * Also, both memcg and iocg have to be on the default hierarchy. Test * whether all conditions are met. * * Note that the test result may change dynamically on the same inode * depending on how memcg and iocg are configured. */ static inline bool inode_cgwb_enabled(struct inode *inode) { struct backing_dev_info *bdi = inode_to_bdi(inode); return cgroup_subsys_on_dfl(memory_cgrp_subsys) && cgroup_subsys_on_dfl(io_cgrp_subsys) && bdi_cap_account_dirty(bdi) && (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) && (inode->i_sb->s_iflags & SB_I_CGROUPWB); } /** * wb_find_current - find wb for %current on a bdi * @bdi: bdi of interest * * Find the wb of @bdi which matches both the memcg and blkcg of %current. * Must be called under rcu_read_lock() which protects the returend wb. * NULL if not found. */ static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi) { struct cgroup_subsys_state *memcg_css; struct bdi_writeback *wb; memcg_css = task_css(current, memory_cgrp_id); if (!memcg_css->parent) return &bdi->wb; wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id); /* * %current's blkcg equals the effective blkcg of its memcg. No * need to use the relatively expensive cgroup_get_e_css(). */ if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id))) return wb; return NULL; } /** * wb_get_create_current - get or create wb for %current on a bdi * @bdi: bdi of interest * @gfp: allocation mask * * Equivalent to wb_get_create() on %current's memcg. This function is * called from a relatively hot path and optimizes the common cases using * wb_find_current(). */ static inline struct bdi_writeback * wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp) { struct bdi_writeback *wb; rcu_read_lock(); wb = wb_find_current(bdi); if (wb && unlikely(!wb_tryget(wb))) wb = NULL; rcu_read_unlock(); if (unlikely(!wb)) { struct cgroup_subsys_state *memcg_css; memcg_css = task_get_css(current, memory_cgrp_id); wb = wb_get_create(bdi, memcg_css, gfp); css_put(memcg_css); } return wb; } /** * inode_to_wb_is_valid - test whether an inode has a wb associated * @inode: inode of interest * * Returns %true if @inode has a wb associated. May be called without any * locking. */ static inline bool inode_to_wb_is_valid(struct inode *inode) { return inode->i_wb; } /** * inode_to_wb - determine the wb of an inode * @inode: inode of interest * * Returns the wb @inode is currently associated with. The caller must be * holding either @inode->i_lock, the i_pages lock, or the * associated wb's list_lock. */ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode) { #ifdef CONFIG_LOCKDEP WARN_ON_ONCE(debug_locks && (!lockdep_is_held(&inode->i_lock) && !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) && !lockdep_is_held(&inode->i_wb->list_lock))); #endif return inode->i_wb; } /** * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction * @inode: target inode * @cookie: output param, to be passed to the end function * * The caller wants to access the wb associated with @inode but isn't * holding inode->i_lock, the i_pages lock or wb->list_lock. This * function determines the wb associated with @inode and ensures that the * association doesn't change until the transaction is finished with * unlocked_inode_to_wb_end(). * * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and * can't sleep during the transaction. IRQs may or may not be disabled on * return. */ static inline struct bdi_writeback * unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) { rcu_read_lock(); /* * Paired with store_release in inode_switch_wbs_work_fn() and * ensures that we see the new wb if we see cleared I_WB_SWITCH. */ cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; if (unlikely(cookie->locked)) xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags); /* * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages * lock. inode_to_wb() will bark. Deref directly. */ return inode->i_wb; } /** * unlocked_inode_to_wb_end - end inode wb access transaction * @inode: target inode * @cookie: @cookie from unlocked_inode_to_wb_begin() */ static inline void unlocked_inode_to_wb_end(struct inode *inode, struct wb_lock_cookie *cookie) { if (unlikely(cookie->locked)) xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags); rcu_read_unlock(); } #else /* CONFIG_CGROUP_WRITEBACK */ static inline bool inode_cgwb_enabled(struct inode *inode) { return false; } static inline struct bdi_writeback_congested * wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp) { refcount_inc(&bdi->wb_congested->refcnt); return bdi->wb_congested; } static inline void wb_congested_put(struct bdi_writeback_congested *congested) { if (refcount_dec_and_test(&congested->refcnt)) kfree(congested); } static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi) { return &bdi->wb; } static inline struct bdi_writeback * wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp) { return &bdi->wb; } static inline bool inode_to_wb_is_valid(struct inode *inode) { return true; } static inline struct bdi_writeback *inode_to_wb(struct inode *inode) { return &inode_to_bdi(inode)->wb; } static inline struct bdi_writeback * unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) { return inode_to_wb(inode); } static inline void unlocked_inode_to_wb_end(struct inode *inode, struct wb_lock_cookie *cookie) { } static inline void wb_memcg_offline(struct mem_cgroup *memcg) { } static inline void wb_blkcg_offline(struct blkcg *blkcg) { } static inline int inode_congested(struct inode *inode, int cong_bits) { return wb_congested(&inode_to_bdi(inode)->wb, cong_bits); } #endif /* CONFIG_CGROUP_WRITEBACK */ static inline int inode_read_congested(struct inode *inode) { return inode_congested(inode, 1 << WB_sync_congested); } static inline int inode_write_congested(struct inode *inode) { return inode_congested(inode, 1 << WB_async_congested); } static inline int inode_rw_congested(struct inode *inode) { return inode_congested(inode, (1 << WB_sync_congested) | (1 << WB_async_congested)); } static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits) { return wb_congested(&bdi->wb, cong_bits); } static inline int bdi_read_congested(struct backing_dev_info *bdi) { return bdi_congested(bdi, 1 << WB_sync_congested); } static inline int bdi_write_congested(struct backing_dev_info *bdi) { return bdi_congested(bdi, 1 << WB_async_congested); } static inline int bdi_rw_congested(struct backing_dev_info *bdi) { return bdi_congested(bdi, (1 << WB_sync_congested) | (1 << WB_async_congested)); } const char *bdi_dev_name(struct backing_dev_info *bdi); #endif /* _LINUX_BACKING_DEV_H */ tifm.h 0000644 00000011243 14722070374 0005663 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * tifm.h - TI FlashMedia driver * * Copyright (C) 2006 Alex Dubov <oakad@yahoo.com> */ #ifndef _TIFM_H #define _TIFM_H #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/workqueue.h> /* Host registers (relative to pci base address): */ enum { FM_SET_INTERRUPT_ENABLE = 0x008, FM_CLEAR_INTERRUPT_ENABLE = 0x00c, FM_INTERRUPT_STATUS = 0x014 }; /* Socket registers (relative to socket base address): */ enum { SOCK_CONTROL = 0x004, SOCK_PRESENT_STATE = 0x008, SOCK_DMA_ADDRESS = 0x00c, SOCK_DMA_CONTROL = 0x010, SOCK_DMA_FIFO_INT_ENABLE_SET = 0x014, SOCK_DMA_FIFO_INT_ENABLE_CLEAR = 0x018, SOCK_DMA_FIFO_STATUS = 0x020, SOCK_FIFO_CONTROL = 0x024, SOCK_FIFO_PAGE_SIZE = 0x028, SOCK_MMCSD_COMMAND = 0x104, SOCK_MMCSD_ARG_LOW = 0x108, SOCK_MMCSD_ARG_HIGH = 0x10c, SOCK_MMCSD_CONFIG = 0x110, SOCK_MMCSD_STATUS = 0x114, SOCK_MMCSD_INT_ENABLE = 0x118, SOCK_MMCSD_COMMAND_TO = 0x11c, SOCK_MMCSD_DATA_TO = 0x120, SOCK_MMCSD_DATA = 0x124, SOCK_MMCSD_BLOCK_LEN = 0x128, SOCK_MMCSD_NUM_BLOCKS = 0x12c, SOCK_MMCSD_BUFFER_CONFIG = 0x130, SOCK_MMCSD_SPI_CONFIG = 0x134, SOCK_MMCSD_SDIO_MODE_CONFIG = 0x138, SOCK_MMCSD_RESPONSE = 0x144, SOCK_MMCSD_SDIO_SR = 0x164, SOCK_MMCSD_SYSTEM_CONTROL = 0x168, SOCK_MMCSD_SYSTEM_STATUS = 0x16c, SOCK_MS_COMMAND = 0x184, SOCK_MS_DATA = 0x188, SOCK_MS_STATUS = 0x18c, SOCK_MS_SYSTEM = 0x190, SOCK_FIFO_ACCESS = 0x200 }; #define TIFM_CTRL_LED 0x00000040 #define TIFM_CTRL_FAST_CLK 0x00000100 #define TIFM_CTRL_POWER_MASK 0x00000007 #define TIFM_SOCK_STATE_OCCUPIED 0x00000008 #define TIFM_SOCK_STATE_POWERED 0x00000080 #define TIFM_FIFO_ENABLE 0x00000001 #define TIFM_FIFO_READY 0x00000001 #define TIFM_FIFO_MORE 0x00000008 #define TIFM_FIFO_INT_SETALL 0x0000ffff #define TIFM_FIFO_INTMASK 0x00000005 #define TIFM_DMA_RESET 0x00000002 #define TIFM_DMA_TX 0x00008000 #define TIFM_DMA_EN 0x00000001 #define TIFM_DMA_TSIZE 0x0000007f #define TIFM_TYPE_XD 1 #define TIFM_TYPE_MS 2 #define TIFM_TYPE_SD 3 struct tifm_device_id { unsigned char type; }; struct tifm_driver; struct tifm_dev { char __iomem *addr; spinlock_t lock; unsigned char type; unsigned int socket_id; void (*card_event)(struct tifm_dev *sock); void (*data_event)(struct tifm_dev *sock); struct device dev; }; struct tifm_driver { struct tifm_device_id *id_table; int (*probe)(struct tifm_dev *dev); void (*remove)(struct tifm_dev *dev); int (*suspend)(struct tifm_dev *dev, pm_message_t state); int (*resume)(struct tifm_dev *dev); struct device_driver driver; }; struct tifm_adapter { char __iomem *addr; spinlock_t lock; unsigned int irq_status; unsigned int socket_change_set; unsigned int id; unsigned int num_sockets; struct completion *finish_me; struct work_struct media_switcher; struct device dev; void (*eject)(struct tifm_adapter *fm, struct tifm_dev *sock); int (*has_ms_pif)(struct tifm_adapter *fm, struct tifm_dev *sock); struct tifm_dev *sockets[0]; }; struct tifm_adapter *tifm_alloc_adapter(unsigned int num_sockets, struct device *dev); int tifm_add_adapter(struct tifm_adapter *fm); void tifm_remove_adapter(struct tifm_adapter *fm); void tifm_free_adapter(struct tifm_adapter *fm); void tifm_free_device(struct device *dev); struct tifm_dev *tifm_alloc_device(struct tifm_adapter *fm, unsigned int id, unsigned char type); int tifm_register_driver(struct tifm_driver *drv); void tifm_unregister_driver(struct tifm_driver *drv); void tifm_eject(struct tifm_dev *sock); int tifm_has_ms_pif(struct tifm_dev *sock); int tifm_map_sg(struct tifm_dev *sock, struct scatterlist *sg, int nents, int direction); void tifm_unmap_sg(struct tifm_dev *sock, struct scatterlist *sg, int nents, int direction); void tifm_queue_work(struct work_struct *work); static inline void *tifm_get_drvdata(struct tifm_dev *dev) { return dev_get_drvdata(&dev->dev); } static inline void tifm_set_drvdata(struct tifm_dev *dev, void *data) { dev_set_drvdata(&dev->dev, data); } #endif adfs_fs.h 0000644 00000001076 14722070374 0006334 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ADFS_FS_H #define _ADFS_FS_H #include <uapi/linux/adfs_fs.h> /* * Calculate the boot block checksum on an ADFS drive. Note that this will * appear to be correct if the sector contains all zeros, so also check that * the disk size is non-zero!!! */ static inline int adfs_checkbblk(unsigned char *ptr) { unsigned int result = 0; unsigned char *p = ptr + 511; do { result = (result & 0xff) + (result >> 8); result = result + *--p; } while (p != ptr); return (result & 0xff) != ptr[511]; } #endif smc91x.h 0000644 00000003104 14722070374 0006045 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __SMC91X_H__ #define __SMC91X_H__ /* * These bits define which access sizes a platform can support, rather * than the maximal access size. So, if your platform can do 16-bit * and 32-bit accesses to the SMC91x device, but not 8-bit, set both * SMC91X_USE_16BIT and SMC91X_USE_32BIT. * * The SMC91x driver requires at least one of SMC91X_USE_8BIT or * SMC91X_USE_16BIT to be supported - just setting SMC91X_USE_32BIT is * an invalid configuration. */ #define SMC91X_USE_8BIT (1 << 0) #define SMC91X_USE_16BIT (1 << 1) #define SMC91X_USE_32BIT (1 << 2) #define SMC91X_NOWAIT (1 << 3) /* two bits for IO_SHIFT, let's hope later designs will keep this sane */ #define SMC91X_IO_SHIFT_0 (0 << 4) #define SMC91X_IO_SHIFT_1 (1 << 4) #define SMC91X_IO_SHIFT_2 (2 << 4) #define SMC91X_IO_SHIFT_3 (3 << 4) #define SMC91X_IO_SHIFT(x) (((x) >> 4) & 0x3) #define SMC91X_USE_DMA (1 << 6) #define RPC_LED_100_10 (0x00) /* LED = 100Mbps OR's with 10Mbps link detect */ #define RPC_LED_RES (0x01) /* LED = Reserved */ #define RPC_LED_10 (0x02) /* LED = 10Mbps link detect */ #define RPC_LED_FD (0x03) /* LED = Full Duplex Mode */ #define RPC_LED_TX_RX (0x04) /* LED = TX or RX packet occurred */ #define RPC_LED_100 (0x05) /* LED = 100Mbps link detect */ #define RPC_LED_TX (0x06) /* LED = TX packet occurred */ #define RPC_LED_RX (0x07) /* LED = RX packet occurred */ struct smc91x_platdata { unsigned long flags; unsigned char leda; unsigned char ledb; bool pxa_u16_align4; /* PXA buggy u16 writes on 4*n+2 addresses */ }; #endif /* __SMC91X_H__ */ nfs_fs.h 0000644 00000042312 14722070374 0006203 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/nfs_fs.h * * Copyright (C) 1992 Rick Sladkey * * OS-specific nfs filesystem definitions and declarations */ #ifndef _LINUX_NFS_FS_H #define _LINUX_NFS_FS_H #include <uapi/linux/nfs_fs.h> /* * Enable dprintk() debugging support for nfs client. */ #ifdef CONFIG_NFS_DEBUG # define NFS_DEBUG #endif #include <linux/in.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/rbtree.h> #include <linux/refcount.h> #include <linux/rwsem.h> #include <linux/wait.h> #include <linux/sunrpc/debug.h> #include <linux/sunrpc/auth.h> #include <linux/sunrpc/clnt.h> #include <linux/nfs.h> #include <linux/nfs2.h> #include <linux/nfs3.h> #include <linux/nfs4.h> #include <linux/nfs_xdr.h> #include <linux/nfs_fs_sb.h> #include <linux/mempool.h> /* * These are the default flags for swap requests */ #define NFS_RPC_SWAPFLAGS (RPC_TASK_SWAPPER|RPC_TASK_ROOTCREDS) /* * NFSv3/v4 Access mode cache entry */ struct nfs_access_entry { struct rb_node rb_node; struct list_head lru; const struct cred * cred; u64 timestamp; __u32 mask; struct rcu_head rcu_head; }; struct nfs_lock_context { refcount_t count; struct list_head list; struct nfs_open_context *open_context; fl_owner_t lockowner; atomic_t io_count; struct rcu_head rcu_head; }; struct nfs4_state; struct nfs_open_context { struct nfs_lock_context lock_context; fl_owner_t flock_owner; struct dentry *dentry; const struct cred *cred; struct rpc_cred __rcu *ll_cred; /* low-level cred - use to check for expiry */ struct nfs4_state *state; fmode_t mode; unsigned long flags; #define NFS_CONTEXT_RESEND_WRITES (1) #define NFS_CONTEXT_BAD (2) #define NFS_CONTEXT_UNLOCK (3) #define NFS_CONTEXT_FILE_OPEN (4) int error; struct list_head list; struct nfs4_threshold *mdsthreshold; struct rcu_head rcu_head; }; struct nfs_open_dir_context { struct list_head list; const struct cred *cred; unsigned long attr_gencount; __u64 dir_cookie; __u64 dup_cookie; signed char duped; }; /* * NFSv4 delegation */ struct nfs_delegation; struct posix_acl; /* * nfs fs inode data in memory */ struct nfs_inode { /* * The 64bit 'inode number' */ __u64 fileid; /* * NFS file handle */ struct nfs_fh fh; /* * Various flags */ unsigned long flags; /* atomic bit ops */ unsigned long cache_validity; /* bit mask */ /* * read_cache_jiffies is when we started read-caching this inode. * attrtimeo is for how long the cached information is assumed * to be valid. A successful attribute revalidation doubles * attrtimeo (up to acregmax/acdirmax), a failure resets it to * acregmin/acdirmin. * * We need to revalidate the cached attrs for this inode if * * jiffies - read_cache_jiffies >= attrtimeo * * Please note the comparison is greater than or equal * so that zero timeout values can be specified. */ unsigned long read_cache_jiffies; unsigned long attrtimeo; unsigned long attrtimeo_timestamp; unsigned long attr_gencount; /* "Generation counter" for the attribute cache. This is * bumped whenever we update the metadata on the * server. */ unsigned long cache_change_attribute; struct rb_root access_cache; struct list_head access_cache_entry_lru; struct list_head access_cache_inode_lru; /* * This is the cookie verifier used for NFSv3 readdir * operations */ __be32 cookieverf[2]; atomic_long_t nrequests; struct nfs_mds_commit_info commit_info; /* Open contexts for shared mmap writes */ struct list_head open_files; /* Readers: in-flight sillydelete RPC calls */ /* Writers: rmdir */ struct rw_semaphore rmdir_sem; struct mutex commit_mutex; #if IS_ENABLED(CONFIG_NFS_V4) struct nfs4_cached_acl *nfs4_acl; /* NFSv4 state */ struct list_head open_states; struct nfs_delegation __rcu *delegation; struct rw_semaphore rwsem; /* pNFS layout information */ struct pnfs_layout_hdr *layout; #endif /* CONFIG_NFS_V4*/ /* how many bytes have been written/read and how many bytes queued up */ __u64 write_io; __u64 read_io; #ifdef CONFIG_NFS_FSCACHE struct fscache_cookie *fscache; #endif struct inode vfs_inode; }; struct nfs4_copy_state { struct list_head copies; nfs4_stateid stateid; struct completion completion; uint64_t count; struct nfs_writeverf verf; int error; int flags; struct nfs4_state *parent_state; }; /* * Access bit flags */ #define NFS_ACCESS_READ 0x0001 #define NFS_ACCESS_LOOKUP 0x0002 #define NFS_ACCESS_MODIFY 0x0004 #define NFS_ACCESS_EXTEND 0x0008 #define NFS_ACCESS_DELETE 0x0010 #define NFS_ACCESS_EXECUTE 0x0020 /* * Cache validity bit flags */ #define NFS_INO_INVALID_DATA BIT(1) /* cached data is invalid */ #define NFS_INO_INVALID_ATIME BIT(2) /* cached atime is invalid */ #define NFS_INO_INVALID_ACCESS BIT(3) /* cached access cred invalid */ #define NFS_INO_INVALID_ACL BIT(4) /* cached acls are invalid */ #define NFS_INO_REVAL_PAGECACHE BIT(5) /* must revalidate pagecache */ #define NFS_INO_REVAL_FORCED BIT(6) /* force revalidation ignoring a delegation */ #define NFS_INO_INVALID_LABEL BIT(7) /* cached label is invalid */ #define NFS_INO_INVALID_CHANGE BIT(8) /* cached change is invalid */ #define NFS_INO_INVALID_CTIME BIT(9) /* cached ctime is invalid */ #define NFS_INO_INVALID_MTIME BIT(10) /* cached mtime is invalid */ #define NFS_INO_INVALID_SIZE BIT(11) /* cached size is invalid */ #define NFS_INO_INVALID_OTHER BIT(12) /* other attrs are invalid */ #define NFS_INO_DATA_INVAL_DEFER \ BIT(13) /* Deferred cache invalidation */ #define NFS_INO_INVALID_BLOCKS BIT(14) /* cached blocks are invalid */ #define NFS_INO_INVALID_ATTR (NFS_INO_INVALID_CHANGE \ | NFS_INO_INVALID_CTIME \ | NFS_INO_INVALID_MTIME \ | NFS_INO_INVALID_SIZE \ | NFS_INO_INVALID_OTHER) /* inode metadata is invalid */ /* * Bit offsets in flags field */ #define NFS_INO_ADVISE_RDPLUS (0) /* advise readdirplus */ #define NFS_INO_STALE (1) /* possible stale inode */ #define NFS_INO_ACL_LRU_SET (2) /* Inode is on the LRU list */ #define NFS_INO_INVALIDATING (3) /* inode is being invalidated */ #define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */ #define NFS_INO_FSCACHE_LOCK (6) /* FS-Cache cookie management lock */ #define NFS_INO_LAYOUTCOMMIT (9) /* layoutcommit required */ #define NFS_INO_LAYOUTCOMMITTING (10) /* layoutcommit inflight */ #define NFS_INO_LAYOUTSTATS (11) /* layoutstats inflight */ #define NFS_INO_ODIRECT (12) /* I/O setting is O_DIRECT */ static inline struct nfs_inode *NFS_I(const struct inode *inode) { return container_of(inode, struct nfs_inode, vfs_inode); } static inline struct nfs_server *NFS_SB(const struct super_block *s) { return (struct nfs_server *)(s->s_fs_info); } static inline struct nfs_fh *NFS_FH(const struct inode *inode) { return &NFS_I(inode)->fh; } static inline struct nfs_server *NFS_SERVER(const struct inode *inode) { return NFS_SB(inode->i_sb); } static inline struct rpc_clnt *NFS_CLIENT(const struct inode *inode) { return NFS_SERVER(inode)->client; } static inline const struct nfs_rpc_ops *NFS_PROTO(const struct inode *inode) { return NFS_SERVER(inode)->nfs_client->rpc_ops; } static inline unsigned NFS_MINATTRTIMEO(const struct inode *inode) { struct nfs_server *nfss = NFS_SERVER(inode); return S_ISDIR(inode->i_mode) ? nfss->acdirmin : nfss->acregmin; } static inline unsigned NFS_MAXATTRTIMEO(const struct inode *inode) { struct nfs_server *nfss = NFS_SERVER(inode); return S_ISDIR(inode->i_mode) ? nfss->acdirmax : nfss->acregmax; } static inline int NFS_STALE(const struct inode *inode) { return test_bit(NFS_INO_STALE, &NFS_I(inode)->flags); } static inline struct fscache_cookie *nfs_i_fscache(struct inode *inode) { #ifdef CONFIG_NFS_FSCACHE return NFS_I(inode)->fscache; #else return NULL; #endif } static inline __u64 NFS_FILEID(const struct inode *inode) { return NFS_I(inode)->fileid; } static inline void set_nfs_fileid(struct inode *inode, __u64 fileid) { NFS_I(inode)->fileid = fileid; } static inline void nfs_mark_for_revalidate(struct inode *inode) { struct nfs_inode *nfsi = NFS_I(inode); spin_lock(&inode->i_lock); nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL | NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_CTIME; if (S_ISDIR(inode->i_mode)) nfsi->cache_validity |= NFS_INO_INVALID_DATA; spin_unlock(&inode->i_lock); } static inline int nfs_server_capable(struct inode *inode, int cap) { return NFS_SERVER(inode)->caps & cap; } static inline void nfs_set_verifier(struct dentry * dentry, unsigned long verf) { dentry->d_time = verf; } /** * nfs_save_change_attribute - Returns the inode attribute change cookie * @dir - pointer to parent directory inode * The "change attribute" is updated every time we finish an operation * that will result in a metadata change on the server. */ static inline unsigned long nfs_save_change_attribute(struct inode *dir) { return NFS_I(dir)->cache_change_attribute; } /** * nfs_verify_change_attribute - Detects NFS remote directory changes * @dir - pointer to parent directory inode * @chattr - previously saved change attribute * Return "false" if the verifiers doesn't match the change attribute. * This would usually indicate that the directory contents have changed on * the server, and that any dentries need revalidating. */ static inline int nfs_verify_change_attribute(struct inode *dir, unsigned long chattr) { return chattr == NFS_I(dir)->cache_change_attribute; } /* * linux/fs/nfs/inode.c */ extern int nfs_sync_mapping(struct address_space *mapping); extern void nfs_zap_mapping(struct inode *inode, struct address_space *mapping); extern void nfs_zap_caches(struct inode *); extern void nfs_invalidate_atime(struct inode *); extern struct inode *nfs_fhget(struct super_block *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *); struct inode *nfs_ilookup(struct super_block *sb, struct nfs_fattr *, struct nfs_fh *); extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *); extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr); extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr); extern int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fattr *fattr); extern int nfs_getattr(const struct path *, struct kstat *, u32, unsigned int); extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *); extern void nfs_access_set_mask(struct nfs_access_entry *, u32); extern int nfs_permission(struct inode *, int); extern int nfs_open(struct inode *, struct file *); extern int nfs_attribute_cache_expired(struct inode *inode); extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode); extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *); extern bool nfs_mapping_need_revalidate_inode(struct inode *inode); extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping); extern int nfs_revalidate_mapping_rcu(struct inode *inode); extern int nfs_setattr(struct dentry *, struct iattr *); extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr, struct nfs_fattr *); extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr, struct nfs4_label *label); extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx); extern void put_nfs_open_context(struct nfs_open_context *ctx); extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, const struct cred *cred, fmode_t mode); extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode, struct file *filp); extern void nfs_inode_attach_open_context(struct nfs_open_context *ctx); extern void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx); extern void nfs_file_clear_open_context(struct file *flip); extern struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx); extern void nfs_put_lock_context(struct nfs_lock_context *l_ctx); extern u64 nfs_compat_user_ino64(u64 fileid); extern void nfs_fattr_init(struct nfs_fattr *fattr); extern void nfs_fattr_set_barrier(struct nfs_fattr *fattr); extern unsigned long nfs_inc_attr_generation_counter(void); extern struct nfs_fattr *nfs_alloc_fattr(void); static inline void nfs_free_fattr(const struct nfs_fattr *fattr) { kfree(fattr); } extern struct nfs_fh *nfs_alloc_fhandle(void); static inline void nfs_free_fhandle(const struct nfs_fh *fh) { kfree(fh); } #ifdef NFS_DEBUG extern u32 _nfs_display_fhandle_hash(const struct nfs_fh *fh); static inline u32 nfs_display_fhandle_hash(const struct nfs_fh *fh) { return _nfs_display_fhandle_hash(fh); } extern void _nfs_display_fhandle(const struct nfs_fh *fh, const char *caption); #define nfs_display_fhandle(fh, caption) \ do { \ if (unlikely(nfs_debug & NFSDBG_FACILITY)) \ _nfs_display_fhandle(fh, caption); \ } while (0) #else static inline u32 nfs_display_fhandle_hash(const struct nfs_fh *fh) { return 0; } static inline void nfs_display_fhandle(const struct nfs_fh *fh, const char *caption) { } #endif /* * linux/fs/nfs/nfsroot.c */ extern int nfs_root_data(char **root_device, char **root_data); /*__init*/ /* linux/net/ipv4/ipconfig.c: trims ip addr off front of name, too. */ extern __be32 root_nfs_parse_addr(char *name); /*__init*/ /* * linux/fs/nfs/file.c */ extern const struct file_operations nfs_file_operations; #if IS_ENABLED(CONFIG_NFS_V4) extern const struct file_operations nfs4_file_operations; #endif /* CONFIG_NFS_V4 */ extern const struct address_space_operations nfs_file_aops; extern const struct address_space_operations nfs_dir_aops; static inline struct nfs_open_context *nfs_file_open_context(struct file *filp) { return filp->private_data; } static inline const struct cred *nfs_file_cred(struct file *file) { if (file != NULL) { struct nfs_open_context *ctx = nfs_file_open_context(file); if (ctx) return ctx->cred; } return NULL; } /* * linux/fs/nfs/direct.c */ extern ssize_t nfs_direct_IO(struct kiocb *, struct iov_iter *); ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter, bool swap); ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter, bool swap); /* * linux/fs/nfs/dir.c */ extern const struct file_operations nfs_dir_operations; extern const struct dentry_operations nfs_dentry_operations; extern void nfs_force_lookup_revalidate(struct inode *dir); extern struct dentry *nfs_add_or_obtain(struct dentry *dentry, struct nfs_fh *fh, struct nfs_fattr *fattr, struct nfs4_label *label); extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh, struct nfs_fattr *fattr, struct nfs4_label *label); extern int nfs_may_open(struct inode *inode, const struct cred *cred, int openflags); extern void nfs_access_zap_cache(struct inode *inode); /* * linux/fs/nfs/symlink.c */ extern const struct inode_operations nfs_symlink_inode_operations; /* * linux/fs/nfs/sysctl.c */ #ifdef CONFIG_SYSCTL extern int nfs_register_sysctl(void); extern void nfs_unregister_sysctl(void); #else #define nfs_register_sysctl() 0 #define nfs_unregister_sysctl() do { } while(0) #endif /* * linux/fs/nfs/namespace.c */ extern const struct inode_operations nfs_mountpoint_inode_operations; extern const struct inode_operations nfs_referral_inode_operations; extern int nfs_mountpoint_expiry_timeout; extern void nfs_release_automount_timer(void); /* * linux/fs/nfs/unlink.c */ extern void nfs_complete_unlink(struct dentry *dentry, struct inode *); /* * linux/fs/nfs/write.c */ extern int nfs_congestion_kb; extern int nfs_writepage(struct page *page, struct writeback_control *wbc); extern int nfs_writepages(struct address_space *, struct writeback_control *); extern int nfs_flush_incompatible(struct file *file, struct page *page); extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned int); /* * Try to write back everything synchronously (but check the * return value!) */ extern int nfs_sync_inode(struct inode *inode); extern int nfs_wb_all(struct inode *inode); extern int nfs_wb_page(struct inode *inode, struct page *page); extern int nfs_wb_page_cancel(struct inode *inode, struct page* page); extern int nfs_commit_inode(struct inode *, int); extern struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail); extern void nfs_commit_free(struct nfs_commit_data *data); static inline int nfs_have_writebacks(struct inode *inode) { return atomic_long_read(&NFS_I(inode)->nrequests) != 0; } /* * linux/fs/nfs/read.c */ extern int nfs_readpage(struct file *, struct page *); extern int nfs_readpages(struct file *, struct address_space *, struct list_head *, unsigned); extern int nfs_readpage_async(struct nfs_open_context *, struct inode *, struct page *); /* * inline functions */ static inline loff_t nfs_size_to_loff_t(__u64 size) { return min_t(u64, size, OFFSET_MAX); } static inline ino_t nfs_fileid_to_ino_t(u64 fileid) { ino_t ino = (ino_t) fileid; if (sizeof(ino_t) < sizeof(u64)) ino ^= fileid >> (sizeof(u64)-sizeof(ino_t)) * 8; return ino; } #define NFS_JUKEBOX_RETRY_TIME (5 * HZ) # undef ifdebug # ifdef NFS_DEBUG # define ifdebug(fac) if (unlikely(nfs_debug & NFSDBG_##fac)) # define NFS_IFDEBUG(x) x # else # define ifdebug(fac) if (0) # define NFS_IFDEBUG(x) # endif #endif dm9000.h 0000644 00000001733 14722070374 0005640 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* include/linux/dm9000.h * * Copyright (c) 2004 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * Header file for dm9000 platform data */ #ifndef __DM9000_PLATFORM_DATA #define __DM9000_PLATFORM_DATA __FILE__ #include <linux/if_ether.h> /* IO control flags */ #define DM9000_PLATF_8BITONLY (0x0001) #define DM9000_PLATF_16BITONLY (0x0002) #define DM9000_PLATF_32BITONLY (0x0004) #define DM9000_PLATF_EXT_PHY (0x0008) #define DM9000_PLATF_NO_EEPROM (0x0010) #define DM9000_PLATF_SIMPLE_PHY (0x0020) /* Use NSR to find LinkStatus */ /* platform data for platform device structure's platform_data field */ struct dm9000_plat_data { unsigned int flags; unsigned char dev_addr[ETH_ALEN]; /* allow replacement IO routines */ void (*inblk)(void __iomem *reg, void *data, int len); void (*outblk)(void __iomem *reg, void *data, int len); void (*dumpblk)(void __iomem *reg, int len); }; #endif /* __DM9000_PLATFORM_DATA */ zconf.h 0000644 00000003352 14722070374 0006045 0 ustar 00 /* zconf.h -- configuration of the zlib compression library * Copyright (C) 1995-1998 Jean-loup Gailly. * For conditions of distribution and use, see copyright notice in zlib.h */ /* @(#) $Id$ */ #ifndef _ZCONF_H #define _ZCONF_H /* The memory requirements for deflate are (in bytes): (1 << (windowBits+2)) + (1 << (memLevel+9)) that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values) plus a few kilobytes for small objects. For example, if you want to reduce the default memory requirements from 256K to 128K, compile with make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7" Of course this will generally degrade compression (there's no free lunch). The memory requirements for inflate are (in bytes) 1 << windowBits that is, 32K for windowBits=15 (default value) plus a few kilobytes for small objects. */ /* Maximum value for memLevel in deflateInit2 */ #ifndef MAX_MEM_LEVEL # define MAX_MEM_LEVEL 8 #endif /* Maximum value for windowBits in deflateInit2 and inflateInit2. * WARNING: reducing MAX_WBITS makes minigzip unable to extract .gz files * created by gzip. (Files created by minigzip can still be extracted by * gzip.) */ #ifndef MAX_WBITS # define MAX_WBITS 15 /* 32K LZ77 window */ #endif /* default windowBits for decompression. MAX_WBITS is for compression only */ #ifndef DEF_WBITS # define DEF_WBITS MAX_WBITS #endif /* default memLevel */ #if MAX_MEM_LEVEL >= 8 # define DEF_MEM_LEVEL 8 #else # define DEF_MEM_LEVEL MAX_MEM_LEVEL #endif /* Type declarations */ typedef unsigned char Byte; /* 8 bits */ typedef unsigned int uInt; /* 16 bits or more */ typedef unsigned long uLong; /* 32 bits or more */ typedef void *voidp; #endif /* _ZCONF_H */ rfkill.h 0000644 00000023554 14722070374 0006217 0 ustar 00 /* * Copyright (C) 2006 - 2007 Ivo van Doorn * Copyright (C) 2007 Dmitry Torokhov * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef __RFKILL_H #define __RFKILL_H #include <uapi/linux/rfkill.h> /* don't allow anyone to use these in the kernel */ enum rfkill_user_states { RFKILL_USER_STATE_SOFT_BLOCKED = RFKILL_STATE_SOFT_BLOCKED, RFKILL_USER_STATE_UNBLOCKED = RFKILL_STATE_UNBLOCKED, RFKILL_USER_STATE_HARD_BLOCKED = RFKILL_STATE_HARD_BLOCKED, }; #undef RFKILL_STATE_SOFT_BLOCKED #undef RFKILL_STATE_UNBLOCKED #undef RFKILL_STATE_HARD_BLOCKED #include <linux/kernel.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/leds.h> #include <linux/err.h> struct device; /* this is opaque */ struct rfkill; /** * struct rfkill_ops - rfkill driver methods * * @poll: poll the rfkill block state(s) -- only assign this method * when you need polling. When called, simply call one of the * rfkill_set{,_hw,_sw}_state family of functions. If the hw * is getting unblocked you need to take into account the return * value of those functions to make sure the software block is * properly used. * @query: query the rfkill block state(s) and call exactly one of the * rfkill_set{,_hw,_sw}_state family of functions. Assign this * method if input events can cause hardware state changes to make * the rfkill core query your driver before setting a requested * block. * @set_block: turn the transmitter on (blocked == false) or off * (blocked == true) -- ignore and return 0 when hard blocked. * This callback must be assigned. */ struct rfkill_ops { void (*poll)(struct rfkill *rfkill, void *data); void (*query)(struct rfkill *rfkill, void *data); int (*set_block)(void *data, bool blocked); }; #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) /** * rfkill_alloc - Allocate rfkill structure * @name: name of the struct -- the string is not copied internally * @parent: device that has rf switch on it * @type: type of the switch (RFKILL_TYPE_*) * @ops: rfkill methods * @ops_data: data passed to each method * * This function should be called by the transmitter driver to allocate an * rfkill structure. Returns %NULL on failure. */ struct rfkill * __must_check rfkill_alloc(const char *name, struct device *parent, const enum rfkill_type type, const struct rfkill_ops *ops, void *ops_data); /** * rfkill_register - Register a rfkill structure. * @rfkill: rfkill structure to be registered * * This function should be called by the transmitter driver to register * the rfkill structure. Before calling this function the driver needs * to be ready to service method calls from rfkill. * * If rfkill_init_sw_state() is not called before registration, * set_block() will be called to initialize the software blocked state * to a default value. * * If the hardware blocked state is not set before registration, * it is assumed to be unblocked. */ int __must_check rfkill_register(struct rfkill *rfkill); /** * rfkill_pause_polling(struct rfkill *rfkill) * * Pause polling -- say transmitter is off for other reasons. * NOTE: not necessary for suspend/resume -- in that case the * core stops polling anyway (but will also correctly handle * the case of polling having been paused before suspend.) */ void rfkill_pause_polling(struct rfkill *rfkill); /** * rfkill_resume_polling(struct rfkill *rfkill) * * Resume polling * NOTE: not necessary for suspend/resume -- in that case the * core stops polling anyway */ void rfkill_resume_polling(struct rfkill *rfkill); /** * rfkill_unregister - Unregister a rfkill structure. * @rfkill: rfkill structure to be unregistered * * This function should be called by the network driver during device * teardown to destroy rfkill structure. Until it returns, the driver * needs to be able to service method calls. */ void rfkill_unregister(struct rfkill *rfkill); /** * rfkill_destroy - Free rfkill structure * @rfkill: rfkill structure to be destroyed * * Destroys the rfkill structure. */ void rfkill_destroy(struct rfkill *rfkill); /** * rfkill_set_hw_state - Set the internal rfkill hardware block state * @rfkill: pointer to the rfkill class to modify. * @blocked: the current hardware block state to set * * rfkill drivers that get events when the hard-blocked state changes * use this function to notify the rfkill core (and through that also * userspace) of the current state. They should also use this after * resume if the state could have changed. * * You need not (but may) call this function if poll_state is assigned. * * This function can be called in any context, even from within rfkill * callbacks. * * The function returns the combined block state (true if transmitter * should be blocked) so that drivers need not keep track of the soft * block state -- which they might not be able to. */ bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked); /** * rfkill_set_sw_state - Set the internal rfkill software block state * @rfkill: pointer to the rfkill class to modify. * @blocked: the current software block state to set * * rfkill drivers that get events when the soft-blocked state changes * (yes, some platforms directly act on input but allow changing again) * use this function to notify the rfkill core (and through that also * userspace) of the current state. * * Drivers should also call this function after resume if the state has * been changed by the user. This only makes sense for "persistent" * devices (see rfkill_init_sw_state()). * * This function can be called in any context, even from within rfkill * callbacks. * * The function returns the combined block state (true if transmitter * should be blocked). */ bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked); /** * rfkill_init_sw_state - Initialize persistent software block state * @rfkill: pointer to the rfkill class to modify. * @blocked: the current software block state to set * * rfkill drivers that preserve their software block state over power off * use this function to notify the rfkill core (and through that also * userspace) of their initial state. It should only be used before * registration. * * In addition, it marks the device as "persistent", an attribute which * can be read by userspace. Persistent devices are expected to preserve * their own state when suspended. */ void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked); /** * rfkill_set_states - Set the internal rfkill block states * @rfkill: pointer to the rfkill class to modify. * @sw: the current software block state to set * @hw: the current hardware block state to set * * This function can be called in any context, even from within rfkill * callbacks. */ void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw); /** * rfkill_blocked - Query rfkill block state * * @rfkill: rfkill struct to query */ bool rfkill_blocked(struct rfkill *rfkill); /** * rfkill_find_type - Helper for finding rfkill type by name * @name: the name of the type * * Returns enum rfkill_type that corresponds to the name. */ enum rfkill_type rfkill_find_type(const char *name); #else /* !RFKILL */ static inline struct rfkill * __must_check rfkill_alloc(const char *name, struct device *parent, const enum rfkill_type type, const struct rfkill_ops *ops, void *ops_data) { return ERR_PTR(-ENODEV); } static inline int __must_check rfkill_register(struct rfkill *rfkill) { if (rfkill == ERR_PTR(-ENODEV)) return 0; return -EINVAL; } static inline void rfkill_pause_polling(struct rfkill *rfkill) { } static inline void rfkill_resume_polling(struct rfkill *rfkill) { } static inline void rfkill_unregister(struct rfkill *rfkill) { } static inline void rfkill_destroy(struct rfkill *rfkill) { } static inline bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked) { return blocked; } static inline bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked) { return blocked; } static inline void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked) { } static inline void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw) { } static inline bool rfkill_blocked(struct rfkill *rfkill) { return false; } static inline enum rfkill_type rfkill_find_type(const char *name) { return RFKILL_TYPE_ALL; } #endif /* RFKILL || RFKILL_MODULE */ #ifdef CONFIG_RFKILL_LEDS /** * rfkill_get_led_trigger_name - Get the LED trigger name for the button's LED. * This function might return a NULL pointer if registering of the * LED trigger failed. Use this as "default_trigger" for the LED. */ const char *rfkill_get_led_trigger_name(struct rfkill *rfkill); /** * rfkill_set_led_trigger_name - Set the LED trigger name * @rfkill: rfkill struct * @name: LED trigger name * * This function sets the LED trigger name of the radio LED * trigger that rfkill creates. It is optional, but if called * must be called before rfkill_register() to be effective. */ void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name); #else static inline const char *rfkill_get_led_trigger_name(struct rfkill *rfkill) { return NULL; } static inline void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name) { } #endif #endif /* RFKILL_H */ tc.h 0000644 00000006716 14722070374 0005343 0 ustar 00 /* * Interface to the TURBOchannel related routines. * * Copyright (c) 1998 Harald Koerfgen * Copyright (c) 2005 James Simmons * Copyright (c) 2006 Maciej W. Rozycki * * Based on: * * "TURBOchannel Firmware Specification", EK-TCAAD-FS-004 * * from Digital Equipment Corporation. * * This file is subject to the terms and conditions of the GNU * General Public License. See the file "COPYING" in the main * directory of this archive for more details. */ #ifndef _LINUX_TC_H #define _LINUX_TC_H #include <linux/compiler.h> #include <linux/device.h> #include <linux/ioport.h> #include <linux/types.h> /* * Offsets for the ROM header locations for TURBOchannel cards. */ #define TC_OLDCARD 0x3c0000 #define TC_NEWCARD 0x000000 #define TC_ROM_WIDTH 0x3e0 #define TC_ROM_STRIDE 0x3e4 #define TC_ROM_SIZE 0x3e8 #define TC_SLOT_SIZE 0x3ec #define TC_PATTERN0 0x3f0 #define TC_PATTERN1 0x3f4 #define TC_PATTERN2 0x3f8 #define TC_PATTERN3 0x3fc #define TC_FIRM_VER 0x400 #define TC_VENDOR 0x420 #define TC_MODULE 0x440 #define TC_FIRM_TYPE 0x460 #define TC_FLAGS 0x470 #define TC_ROM_OBJECTS 0x480 /* * Information obtained through the get_tcinfo() PROM call. */ struct tcinfo { s32 revision; /* Hardware revision level. */ s32 clk_period; /* Clock period in nanoseconds. */ s32 slot_size; /* Slot size in megabytes. */ s32 io_timeout; /* I/O timeout in cycles. */ s32 dma_range; /* DMA address range in megabytes. */ s32 max_dma_burst; /* Maximum DMA burst length. */ s32 parity; /* System module supports TC parity. */ s32 reserved[4]; }; /* * TURBOchannel bus. */ struct tc_bus { struct list_head devices; /* List of devices on this bus. */ struct resource resource[2]; /* Address space routed to this bus. */ struct device dev; char name[13]; resource_size_t slot_base; resource_size_t ext_slot_base; resource_size_t ext_slot_size; int num_tcslots; struct tcinfo info; }; /* * TURBOchannel device. */ struct tc_dev { struct list_head node; /* Node in list of all TC devices. */ struct tc_bus *bus; /* Bus this device is on. */ struct tc_driver *driver; /* Which driver has allocated this device. */ struct device dev; /* Generic device interface. */ struct resource resource; /* Address space of this device. */ u64 dma_mask; /* DMA addressable range. */ char vendor[9]; char name[9]; char firmware[9]; int interrupt; int slot; }; #define to_tc_dev(n) container_of(n, struct tc_dev, dev) struct tc_device_id { char vendor[9]; char name[9]; }; /* * TURBOchannel driver. */ struct tc_driver { struct list_head node; const struct tc_device_id *id_table; struct device_driver driver; }; #define to_tc_driver(drv) container_of(drv, struct tc_driver, driver) /* * Return TURBOchannel clock frequency in Hz. */ static inline unsigned long tc_get_speed(struct tc_bus *tbus) { return 100000 * (10000 / (unsigned long)tbus->info.clk_period); } #ifdef CONFIG_TC extern struct bus_type tc_bus_type; extern int tc_register_driver(struct tc_driver *tdrv); extern void tc_unregister_driver(struct tc_driver *tdrv); #else /* !CONFIG_TC */ static inline int tc_register_driver(struct tc_driver *tdrv) { return 0; } static inline void tc_unregister_driver(struct tc_driver *tdrv) { } #endif /* CONFIG_TC */ /* * These have to be provided by the architecture. */ extern int tc_preadb(u8 *valp, void __iomem *addr); extern int tc_bus_get_info(struct tc_bus *tbus); extern void tc_device_get_irq(struct tc_dev *tdev); #endif /* _LINUX_TC_H */ overflow.h 0000644 00000026375 14722070374 0006603 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 OR MIT */ #ifndef __LINUX_OVERFLOW_H #define __LINUX_OVERFLOW_H #include <linux/compiler.h> #include <linux/limits.h> /* * In the fallback code below, we need to compute the minimum and * maximum values representable in a given type. These macros may also * be useful elsewhere, so we provide them outside the * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block. * * It would seem more obvious to do something like * * #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0) * #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0) * * Unfortunately, the middle expressions, strictly speaking, have * undefined behaviour, and at least some versions of gcc warn about * the type_max expression (but not if -fsanitize=undefined is in * effect; in that case, the warning is deferred to runtime...). * * The slightly excessive casting in type_min is to make sure the * macros also produce sensible values for the exotic type _Bool. [The * overflow checkers only almost work for _Bool, but that's * a-feature-not-a-bug, since people shouldn't be doing arithmetic on * _Bools. Besides, the gcc builtins don't allow _Bool* as third * argument.] * * Idea stolen from * https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html - * credit to Christian Biere. */ #define is_signed_type(type) (((type)(-1)) < (type)1) #define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type))) #define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T))) #define type_min(T) ((T)((T)-type_max(T)-(T)1)) /* * Avoids triggering -Wtype-limits compilation warning, * while using unsigned data types to check a < 0. */ #define is_non_negative(a) ((a) > 0 || (a) == 0) #define is_negative(a) (!(is_non_negative(a))) #ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW /* * For simplicity and code hygiene, the fallback code below insists on * a, b and *d having the same type (similar to the min() and max() * macros), whereas gcc's type-generic overflow checkers accept * different types. Hence we don't just make check_add_overflow an * alias for __builtin_add_overflow, but add type checks similar to * below. */ #define check_add_overflow(a, b, d) ({ \ typeof(a) __a = (a); \ typeof(b) __b = (b); \ typeof(d) __d = (d); \ (void) (&__a == &__b); \ (void) (&__a == __d); \ __builtin_add_overflow(__a, __b, __d); \ }) #define check_sub_overflow(a, b, d) ({ \ typeof(a) __a = (a); \ typeof(b) __b = (b); \ typeof(d) __d = (d); \ (void) (&__a == &__b); \ (void) (&__a == __d); \ __builtin_sub_overflow(__a, __b, __d); \ }) #define check_mul_overflow(a, b, d) ({ \ typeof(a) __a = (a); \ typeof(b) __b = (b); \ typeof(d) __d = (d); \ (void) (&__a == &__b); \ (void) (&__a == __d); \ __builtin_mul_overflow(__a, __b, __d); \ }) #else /* Checking for unsigned overflow is relatively easy without causing UB. */ #define __unsigned_add_overflow(a, b, d) ({ \ typeof(a) __a = (a); \ typeof(b) __b = (b); \ typeof(d) __d = (d); \ (void) (&__a == &__b); \ (void) (&__a == __d); \ *__d = __a + __b; \ *__d < __a; \ }) #define __unsigned_sub_overflow(a, b, d) ({ \ typeof(a) __a = (a); \ typeof(b) __b = (b); \ typeof(d) __d = (d); \ (void) (&__a == &__b); \ (void) (&__a == __d); \ *__d = __a - __b; \ __a < __b; \ }) /* * If one of a or b is a compile-time constant, this avoids a division. */ #define __unsigned_mul_overflow(a, b, d) ({ \ typeof(a) __a = (a); \ typeof(b) __b = (b); \ typeof(d) __d = (d); \ (void) (&__a == &__b); \ (void) (&__a == __d); \ *__d = __a * __b; \ __builtin_constant_p(__b) ? \ __b > 0 && __a > type_max(typeof(__a)) / __b : \ __a > 0 && __b > type_max(typeof(__b)) / __a; \ }) /* * For signed types, detecting overflow is much harder, especially if * we want to avoid UB. But the interface of these macros is such that * we must provide a result in *d, and in fact we must produce the * result promised by gcc's builtins, which is simply the possibly * wrapped-around value. Fortunately, we can just formally do the * operations in the widest relevant unsigned type (u64) and then * truncate the result - gcc is smart enough to generate the same code * with and without the (u64) casts. */ /* * Adding two signed integers can overflow only if they have the same * sign, and overflow has happened iff the result has the opposite * sign. */ #define __signed_add_overflow(a, b, d) ({ \ typeof(a) __a = (a); \ typeof(b) __b = (b); \ typeof(d) __d = (d); \ (void) (&__a == &__b); \ (void) (&__a == __d); \ *__d = (u64)__a + (u64)__b; \ (((~(__a ^ __b)) & (*__d ^ __a)) \ & type_min(typeof(__a))) != 0; \ }) /* * Subtraction is similar, except that overflow can now happen only * when the signs are opposite. In this case, overflow has happened if * the result has the opposite sign of a. */ #define __signed_sub_overflow(a, b, d) ({ \ typeof(a) __a = (a); \ typeof(b) __b = (b); \ typeof(d) __d = (d); \ (void) (&__a == &__b); \ (void) (&__a == __d); \ *__d = (u64)__a - (u64)__b; \ ((((__a ^ __b)) & (*__d ^ __a)) \ & type_min(typeof(__a))) != 0; \ }) /* * Signed multiplication is rather hard. gcc always follows C99, so * division is truncated towards 0. This means that we can write the * overflow check like this: * * (a > 0 && (b > MAX/a || b < MIN/a)) || * (a < -1 && (b > MIN/a || b < MAX/a) || * (a == -1 && b == MIN) * * The redundant casts of -1 are to silence an annoying -Wtype-limits * (included in -Wextra) warning: When the type is u8 or u16, the * __b_c_e in check_mul_overflow obviously selects * __unsigned_mul_overflow, but unfortunately gcc still parses this * code and warns about the limited range of __b. */ #define __signed_mul_overflow(a, b, d) ({ \ typeof(a) __a = (a); \ typeof(b) __b = (b); \ typeof(d) __d = (d); \ typeof(a) __tmax = type_max(typeof(a)); \ typeof(a) __tmin = type_min(typeof(a)); \ (void) (&__a == &__b); \ (void) (&__a == __d); \ *__d = (u64)__a * (u64)__b; \ (__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \ (__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \ (__b == (typeof(__b))-1 && __a == __tmin); \ }) #define check_add_overflow(a, b, d) \ __builtin_choose_expr(is_signed_type(typeof(a)), \ __signed_add_overflow(a, b, d), \ __unsigned_add_overflow(a, b, d)) #define check_sub_overflow(a, b, d) \ __builtin_choose_expr(is_signed_type(typeof(a)), \ __signed_sub_overflow(a, b, d), \ __unsigned_sub_overflow(a, b, d)) #define check_mul_overflow(a, b, d) \ __builtin_choose_expr(is_signed_type(typeof(a)), \ __signed_mul_overflow(a, b, d), \ __unsigned_mul_overflow(a, b, d)) #endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */ /** check_shl_overflow() - Calculate a left-shifted value and check overflow * * @a: Value to be shifted * @s: How many bits left to shift * @d: Pointer to where to store the result * * Computes *@d = (@a << @s) * * Returns true if '*d' cannot hold the result or when 'a << s' doesn't * make sense. Example conditions: * - 'a << s' causes bits to be lost when stored in *d. * - 's' is garbage (e.g. negative) or so large that the result of * 'a << s' is guaranteed to be 0. * - 'a' is negative. * - 'a << s' sets the sign bit, if any, in '*d'. * * '*d' will hold the results of the attempted shift, but is not * considered "safe for use" if false is returned. */ #define check_shl_overflow(a, s, d) ({ \ typeof(a) _a = a; \ typeof(s) _s = s; \ typeof(d) _d = d; \ u64 _a_full = _a; \ unsigned int _to_shift = \ is_non_negative(_s) && _s < 8 * sizeof(*d) ? _s : 0; \ *_d = (_a_full << _to_shift); \ (_to_shift != _s || is_negative(*_d) || is_negative(_a) || \ (*_d >> _to_shift) != _a); \ }) /** * size_mul() - Calculate size_t multiplication with saturation at SIZE_MAX * * @factor1: first factor * @factor2: second factor * * Returns: calculate @factor1 * @factor2, both promoted to size_t, * with any overflow causing the return value to be SIZE_MAX. The * lvalue must be size_t to avoid implicit type conversion. */ static inline size_t __must_check size_mul(size_t factor1, size_t factor2) { size_t bytes; if (check_mul_overflow(factor1, factor2, &bytes)) return SIZE_MAX; return bytes; } /** * size_add() - Calculate size_t addition with saturation at SIZE_MAX * * @addend1: first addend * @addend2: second addend * * Returns: calculate @addend1 + @addend2, both promoted to size_t, * with any overflow causing the return value to be SIZE_MAX. The * lvalue must be size_t to avoid implicit type conversion. */ static inline size_t __must_check size_add(size_t addend1, size_t addend2) { size_t bytes; if (check_add_overflow(addend1, addend2, &bytes)) return SIZE_MAX; return bytes; } /** * size_sub() - Calculate size_t subtraction with saturation at SIZE_MAX * * @minuend: value to subtract from * @subtrahend: value to subtract from @minuend * * Returns: calculate @minuend - @subtrahend, both promoted to size_t, * with any overflow causing the return value to be SIZE_MAX. For * composition with the size_add() and size_mul() helpers, neither * argument may be SIZE_MAX (or the result with be forced to SIZE_MAX). * The lvalue must be size_t to avoid implicit type conversion. */ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend) { size_t bytes; if (minuend == SIZE_MAX || subtrahend == SIZE_MAX || check_sub_overflow(minuend, subtrahend, &bytes)) return SIZE_MAX; return bytes; } /** * array_size() - Calculate size of 2-dimensional array. * * @a: dimension one * @b: dimension two * * Calculates size of 2-dimensional array: @a * @b. * * Returns: number of bytes needed to represent the array or SIZE_MAX on * overflow. */ #define array_size(a, b) size_mul(a, b) /** * array3_size() - Calculate size of 3-dimensional array. * * @a: dimension one * @b: dimension two * @c: dimension three * * Calculates size of 3-dimensional array: @a * @b * @c. * * Returns: number of bytes needed to represent the array or SIZE_MAX on * overflow. */ #define array3_size(a, b, c) size_mul(size_mul(a, b), c) /** * flex_array_size() - Calculate size of a flexible array member * within an enclosing structure. * * @p: Pointer to the structure. * @member: Name of the flexible array member. * @count: Number of elements in the array. * * Calculates size of a flexible array of @count number of @member * elements, at the end of structure @p. * * Return: number of bytes needed or SIZE_MAX on overflow. */ #define flex_array_size(p, member, count) \ size_mul(count, \ sizeof(*(p)->member) + __must_be_array((p)->member)) /** * struct_size() - Calculate size of structure with trailing flexible array. * * @p: Pointer to the structure. * @member: Name of the array member. * @count: Number of elements in the array. * * Calculates size of memory needed for structure @p followed by an * array of @count number of @member elements. * * Return: number of bytes needed or SIZE_MAX on overflow. */ #define struct_size(p, member, count) \ size_add(sizeof(*(p)), flex_array_size(p, member, count)) #endif /* __LINUX_OVERFLOW_H */ latencytop.h 0000644 00000002267 14722070374 0007114 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * latencytop.h: Infrastructure for displaying latency * * (C) Copyright 2008 Intel Corporation * Author: Arjan van de Ven <arjan@linux.intel.com> * */ #ifndef _INCLUDE_GUARD_LATENCYTOP_H_ #define _INCLUDE_GUARD_LATENCYTOP_H_ #include <linux/compiler.h> struct task_struct; #ifdef CONFIG_LATENCYTOP #define LT_SAVECOUNT 32 #define LT_BACKTRACEDEPTH 12 struct latency_record { unsigned long backtrace[LT_BACKTRACEDEPTH]; unsigned int count; unsigned long time; unsigned long max; }; extern int latencytop_enabled; void __account_scheduler_latency(struct task_struct *task, int usecs, int inter); static inline void account_scheduler_latency(struct task_struct *task, int usecs, int inter) { if (unlikely(latencytop_enabled)) __account_scheduler_latency(task, usecs, inter); } void clear_tsk_latency_tracing(struct task_struct *p); extern int sysctl_latencytop(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); #else static inline void account_scheduler_latency(struct task_struct *task, int usecs, int inter) { } static inline void clear_tsk_latency_tracing(struct task_struct *p) { } #endif #endif irqchip/irq-madera.h 0000644 00000007241 14722070374 0010410 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Interrupt support for Cirrus Logic Madera codecs * * Copyright (C) 2016-2018 Cirrus Logic, Inc. and * Cirrus Logic International Semiconductor Ltd. */ #ifndef IRQCHIP_MADERA_H #define IRQCHIP_MADERA_H #include <linux/interrupt.h> #include <linux/mfd/madera/core.h> #define MADERA_IRQ_FLL1_LOCK 0 #define MADERA_IRQ_FLL2_LOCK 1 #define MADERA_IRQ_FLL3_LOCK 2 #define MADERA_IRQ_FLLAO_LOCK 3 #define MADERA_IRQ_CLK_SYS_ERR 4 #define MADERA_IRQ_CLK_ASYNC_ERR 5 #define MADERA_IRQ_CLK_DSP_ERR 6 #define MADERA_IRQ_HPDET 7 #define MADERA_IRQ_MICDET1 8 #define MADERA_IRQ_MICDET2 9 #define MADERA_IRQ_JD1_RISE 10 #define MADERA_IRQ_JD1_FALL 11 #define MADERA_IRQ_JD2_RISE 12 #define MADERA_IRQ_JD2_FALL 13 #define MADERA_IRQ_MICD_CLAMP_RISE 14 #define MADERA_IRQ_MICD_CLAMP_FALL 15 #define MADERA_IRQ_DRC2_SIG_DET 16 #define MADERA_IRQ_DRC1_SIG_DET 17 #define MADERA_IRQ_ASRC1_IN1_LOCK 18 #define MADERA_IRQ_ASRC1_IN2_LOCK 19 #define MADERA_IRQ_ASRC2_IN1_LOCK 20 #define MADERA_IRQ_ASRC2_IN2_LOCK 21 #define MADERA_IRQ_DSP_IRQ1 22 #define MADERA_IRQ_DSP_IRQ2 23 #define MADERA_IRQ_DSP_IRQ3 24 #define MADERA_IRQ_DSP_IRQ4 25 #define MADERA_IRQ_DSP_IRQ5 26 #define MADERA_IRQ_DSP_IRQ6 27 #define MADERA_IRQ_DSP_IRQ7 28 #define MADERA_IRQ_DSP_IRQ8 29 #define MADERA_IRQ_DSP_IRQ9 30 #define MADERA_IRQ_DSP_IRQ10 31 #define MADERA_IRQ_DSP_IRQ11 32 #define MADERA_IRQ_DSP_IRQ12 33 #define MADERA_IRQ_DSP_IRQ13 34 #define MADERA_IRQ_DSP_IRQ14 35 #define MADERA_IRQ_DSP_IRQ15 36 #define MADERA_IRQ_DSP_IRQ16 37 #define MADERA_IRQ_HP1L_SC 38 #define MADERA_IRQ_HP1R_SC 39 #define MADERA_IRQ_HP2L_SC 40 #define MADERA_IRQ_HP2R_SC 41 #define MADERA_IRQ_HP3L_SC 42 #define MADERA_IRQ_HP3R_SC 43 #define MADERA_IRQ_SPKOUTL_SC 44 #define MADERA_IRQ_SPKOUTR_SC 45 #define MADERA_IRQ_HP1L_ENABLE_DONE 46 #define MADERA_IRQ_HP1R_ENABLE_DONE 47 #define MADERA_IRQ_HP2L_ENABLE_DONE 48 #define MADERA_IRQ_HP2R_ENABLE_DONE 49 #define MADERA_IRQ_HP3L_ENABLE_DONE 50 #define MADERA_IRQ_HP3R_ENABLE_DONE 51 #define MADERA_IRQ_SPKOUTL_ENABLE_DONE 52 #define MADERA_IRQ_SPKOUTR_ENABLE_DONE 53 #define MADERA_IRQ_SPK_SHUTDOWN 54 #define MADERA_IRQ_SPK_OVERHEAT 55 #define MADERA_IRQ_SPK_OVERHEAT_WARN 56 #define MADERA_IRQ_GPIO1 57 #define MADERA_IRQ_GPIO2 58 #define MADERA_IRQ_GPIO3 59 #define MADERA_IRQ_GPIO4 60 #define MADERA_IRQ_GPIO5 61 #define MADERA_IRQ_GPIO6 62 #define MADERA_IRQ_GPIO7 63 #define MADERA_IRQ_GPIO8 64 #define MADERA_IRQ_DSP1_BUS_ERR 65 #define MADERA_IRQ_DSP2_BUS_ERR 66 #define MADERA_IRQ_DSP3_BUS_ERR 67 #define MADERA_IRQ_DSP4_BUS_ERR 68 #define MADERA_IRQ_DSP5_BUS_ERR 69 #define MADERA_IRQ_DSP6_BUS_ERR 70 #define MADERA_IRQ_DSP7_BUS_ERR 71 #define MADERA_NUM_IRQ 72 /* * These wrapper functions are for use by other child drivers of the * same parent MFD. */ static inline int madera_get_irq_mapping(struct madera *madera, int irq) { if (!madera->irq_dev) return -ENODEV; return regmap_irq_get_virq(madera->irq_data, irq); } static inline int madera_request_irq(struct madera *madera, int irq, const char *name, irq_handler_t handler, void *data) { irq = madera_get_irq_mapping(madera, irq); if (irq < 0) return irq; return request_threaded_irq(irq, NULL, handler, IRQF_ONESHOT, name, data); } static inline void madera_free_irq(struct madera *madera, int irq, void *data) { irq = madera_get_irq_mapping(madera, irq); if (irq < 0) return; free_irq(irq, data); } static inline int madera_set_irq_wake(struct madera *madera, int irq, int on) { irq = madera_get_irq_mapping(madera, irq); if (irq < 0) return irq; return irq_set_irq_wake(irq, on); } #endif irqchip/mxs.h 0000644 00000000335 14722070374 0007172 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 Freescale Semiconductor, Inc. */ #ifndef __LINUX_IRQCHIP_MXS_H #define __LINUX_IRQCHIP_MXS_H extern void icoll_handle_irq(struct pt_regs *); #endif irqchip/irq-sa11x0.h 0000644 00000000544 14722070374 0010173 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Generic IRQ handling for the SA11x0. * * Copyright (C) 2015 Dmitry Eremin-Solenikov * Copyright (C) 1999-2001 Nicolas Pitre */ #ifndef __INCLUDE_LINUX_IRQCHIP_IRQ_SA11x0_H #define __INCLUDE_LINUX_IRQCHIP_IRQ_SA11x0_H void __init sa11x0_init_irq_nodt(int irq_start, resource_size_t io_start); #endif irqchip/mmp.h 0000644 00000000233 14722070374 0007151 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __IRQCHIP_MMP_H #define __IRQCHIP_MMP_H extern struct irq_chip icu_irq_chip; #endif /* __IRQCHIP_MMP_H */ irqchip/irq-ixp4xx.h 0000644 00000000424 14722070374 0010417 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __IRQ_IXP4XX_H #define __IRQ_IXP4XX_H #include <linux/ioport.h> struct irq_domain; void ixp4xx_irq_init(resource_size_t irqbase, bool is_356); struct irq_domain *ixp4xx_get_irq_domain(void); #endif /* __IRQ_IXP4XX_H */ irqchip/arm-gic.h 0000644 00000012627 14722070374 0007711 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * include/linux/irqchip/arm-gic.h * * Copyright (C) 2002 ARM Limited, All Rights Reserved. */ #ifndef __LINUX_IRQCHIP_ARM_GIC_H #define __LINUX_IRQCHIP_ARM_GIC_H #define GIC_CPU_CTRL 0x00 #define GIC_CPU_PRIMASK 0x04 #define GIC_CPU_BINPOINT 0x08 #define GIC_CPU_INTACK 0x0c #define GIC_CPU_EOI 0x10 #define GIC_CPU_RUNNINGPRI 0x14 #define GIC_CPU_HIGHPRI 0x18 #define GIC_CPU_ALIAS_BINPOINT 0x1c #define GIC_CPU_ACTIVEPRIO 0xd0 #define GIC_CPU_IDENT 0xfc #define GIC_CPU_DEACTIVATE 0x1000 #define GICC_ENABLE 0x1 #define GICC_INT_PRI_THRESHOLD 0xf0 #define GIC_CPU_CTRL_EnableGrp0_SHIFT 0 #define GIC_CPU_CTRL_EnableGrp0 (1 << GIC_CPU_CTRL_EnableGrp0_SHIFT) #define GIC_CPU_CTRL_EnableGrp1_SHIFT 1 #define GIC_CPU_CTRL_EnableGrp1 (1 << GIC_CPU_CTRL_EnableGrp1_SHIFT) #define GIC_CPU_CTRL_AckCtl_SHIFT 2 #define GIC_CPU_CTRL_AckCtl (1 << GIC_CPU_CTRL_AckCtl_SHIFT) #define GIC_CPU_CTRL_FIQEn_SHIFT 3 #define GIC_CPU_CTRL_FIQEn (1 << GIC_CPU_CTRL_FIQEn_SHIFT) #define GIC_CPU_CTRL_CBPR_SHIFT 4 #define GIC_CPU_CTRL_CBPR (1 << GIC_CPU_CTRL_CBPR_SHIFT) #define GIC_CPU_CTRL_EOImodeNS_SHIFT 9 #define GIC_CPU_CTRL_EOImodeNS (1 << GIC_CPU_CTRL_EOImodeNS_SHIFT) #define GICC_IAR_INT_ID_MASK 0x3ff #define GICC_INT_SPURIOUS 1023 #define GICC_DIS_BYPASS_MASK 0x1e0 #define GIC_DIST_CTRL 0x000 #define GIC_DIST_CTR 0x004 #define GIC_DIST_IIDR 0x008 #define GIC_DIST_IGROUP 0x080 #define GIC_DIST_ENABLE_SET 0x100 #define GIC_DIST_ENABLE_CLEAR 0x180 #define GIC_DIST_PENDING_SET 0x200 #define GIC_DIST_PENDING_CLEAR 0x280 #define GIC_DIST_ACTIVE_SET 0x300 #define GIC_DIST_ACTIVE_CLEAR 0x380 #define GIC_DIST_PRI 0x400 #define GIC_DIST_TARGET 0x800 #define GIC_DIST_CONFIG 0xc00 #define GIC_DIST_SOFTINT 0xf00 #define GIC_DIST_SGI_PENDING_CLEAR 0xf10 #define GIC_DIST_SGI_PENDING_SET 0xf20 #define GICD_ENABLE 0x1 #define GICD_DISABLE 0x0 #define GICD_INT_ACTLOW_LVLTRIG 0x0 #define GICD_INT_EN_CLR_X32 0xffffffff #define GICD_INT_EN_SET_SGI 0x0000ffff #define GICD_INT_EN_CLR_PPI 0xffff0000 #define GICD_IIDR_IMPLEMENTER_SHIFT 0 #define GICD_IIDR_IMPLEMENTER_MASK (0xfff << GICD_IIDR_IMPLEMENTER_SHIFT) #define GICD_IIDR_REVISION_SHIFT 12 #define GICD_IIDR_REVISION_MASK (0xf << GICD_IIDR_REVISION_SHIFT) #define GICD_IIDR_VARIANT_SHIFT 16 #define GICD_IIDR_VARIANT_MASK (0xf << GICD_IIDR_VARIANT_SHIFT) #define GICD_IIDR_PRODUCT_ID_SHIFT 24 #define GICD_IIDR_PRODUCT_ID_MASK (0xff << GICD_IIDR_PRODUCT_ID_SHIFT) #define GICH_HCR 0x0 #define GICH_VTR 0x4 #define GICH_VMCR 0x8 #define GICH_MISR 0x10 #define GICH_EISR0 0x20 #define GICH_EISR1 0x24 #define GICH_ELRSR0 0x30 #define GICH_ELRSR1 0x34 #define GICH_APR 0xf0 #define GICH_LR0 0x100 #define GICH_HCR_EN (1 << 0) #define GICH_HCR_UIE (1 << 1) #define GICH_HCR_NPIE (1 << 3) #define GICH_LR_VIRTUALID (0x3ff << 0) #define GICH_LR_PHYSID_CPUID_SHIFT (10) #define GICH_LR_PHYSID_CPUID (0x3ff << GICH_LR_PHYSID_CPUID_SHIFT) #define GICH_LR_PRIORITY_SHIFT 23 #define GICH_LR_STATE (3 << 28) #define GICH_LR_PENDING_BIT (1 << 28) #define GICH_LR_ACTIVE_BIT (1 << 29) #define GICH_LR_EOI (1 << 19) #define GICH_LR_GROUP1 (1 << 30) #define GICH_LR_HW (1 << 31) #define GICH_VMCR_ENABLE_GRP0_SHIFT 0 #define GICH_VMCR_ENABLE_GRP0_MASK (1 << GICH_VMCR_ENABLE_GRP0_SHIFT) #define GICH_VMCR_ENABLE_GRP1_SHIFT 1 #define GICH_VMCR_ENABLE_GRP1_MASK (1 << GICH_VMCR_ENABLE_GRP1_SHIFT) #define GICH_VMCR_ACK_CTL_SHIFT 2 #define GICH_VMCR_ACK_CTL_MASK (1 << GICH_VMCR_ACK_CTL_SHIFT) #define GICH_VMCR_FIQ_EN_SHIFT 3 #define GICH_VMCR_FIQ_EN_MASK (1 << GICH_VMCR_FIQ_EN_SHIFT) #define GICH_VMCR_CBPR_SHIFT 4 #define GICH_VMCR_CBPR_MASK (1 << GICH_VMCR_CBPR_SHIFT) #define GICH_VMCR_EOI_MODE_SHIFT 9 #define GICH_VMCR_EOI_MODE_MASK (1 << GICH_VMCR_EOI_MODE_SHIFT) #define GICH_VMCR_PRIMASK_SHIFT 27 #define GICH_VMCR_PRIMASK_MASK (0x1f << GICH_VMCR_PRIMASK_SHIFT) #define GICH_VMCR_BINPOINT_SHIFT 21 #define GICH_VMCR_BINPOINT_MASK (0x7 << GICH_VMCR_BINPOINT_SHIFT) #define GICH_VMCR_ALIAS_BINPOINT_SHIFT 18 #define GICH_VMCR_ALIAS_BINPOINT_MASK (0x7 << GICH_VMCR_ALIAS_BINPOINT_SHIFT) #define GICH_MISR_EOI (1 << 0) #define GICH_MISR_U (1 << 1) #define GICV_PMR_PRIORITY_SHIFT 3 #define GICV_PMR_PRIORITY_MASK (0x1f << GICV_PMR_PRIORITY_SHIFT) #ifndef __ASSEMBLY__ #include <linux/irqdomain.h> struct device_node; struct gic_chip_data; void gic_cascade_irq(unsigned int gic_nr, unsigned int irq); int gic_cpu_if_down(unsigned int gic_nr); void gic_cpu_save(struct gic_chip_data *gic); void gic_cpu_restore(struct gic_chip_data *gic); void gic_dist_save(struct gic_chip_data *gic); void gic_dist_restore(struct gic_chip_data *gic); /* * Subdrivers that need some preparatory work can initialize their * chips and call this to register their GICs. */ int gic_of_init(struct device_node *node, struct device_node *parent); /* * Initialises and registers a non-root or child GIC chip. Memory for * the gic_chip_data structure is dynamically allocated. */ int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq); /* * Legacy platforms not converted to DT yet must use this to init * their GIC */ void gic_init(void __iomem *dist , void __iomem *cpu); void gic_send_sgi(unsigned int cpu_id, unsigned int irq); int gic_get_cpu_id(unsigned int cpu); void gic_migrate_target(unsigned int new_cpu_id); unsigned long gic_get_sgir_physaddr(void); #endif /* __ASSEMBLY */ #endif irqchip/irq-davinci-cp-intc.h 0000644 00000001150 14722070374 0012120 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2019 Texas Instruments */ #ifndef _LINUX_IRQ_DAVINCI_CP_INTC_ #define _LINUX_IRQ_DAVINCI_CP_INTC_ #include <linux/ioport.h> /** * struct davinci_cp_intc_config - configuration data for davinci-cp-intc * driver. * * @reg: register range to map * @num_irqs: number of HW interrupts supported by the controller */ struct davinci_cp_intc_config { struct resource reg; unsigned int num_irqs; }; int davinci_cp_intc_init(const struct davinci_cp_intc_config *config); #endif /* _LINUX_IRQ_DAVINCI_CP_INTC_ */ irqchip/chained_irq.h 0000644 00000001643 14722070374 0010634 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Chained IRQ handlers support. * * Copyright (C) 2011 ARM Ltd. */ #ifndef __IRQCHIP_CHAINED_IRQ_H #define __IRQCHIP_CHAINED_IRQ_H #include <linux/irq.h> /* * Entry/exit functions for chained handlers where the primary IRQ chip * may implement either fasteoi or level-trigger flow control. */ static inline void chained_irq_enter(struct irq_chip *chip, struct irq_desc *desc) { /* FastEOI controllers require no action on entry. */ if (chip->irq_eoi) return; if (chip->irq_mask_ack) { chip->irq_mask_ack(&desc->irq_data); } else { chip->irq_mask(&desc->irq_data); if (chip->irq_ack) chip->irq_ack(&desc->irq_data); } } static inline void chained_irq_exit(struct irq_chip *chip, struct irq_desc *desc) { if (chip->irq_eoi) chip->irq_eoi(&desc->irq_data); else chip->irq_unmask(&desc->irq_data); } #endif /* __IRQCHIP_CHAINED_IRQ_H */ irqchip/xtensa-mx.h 0000644 00000000723 14722070374 0010310 0 ustar 00 /* * Xtensa MX interrupt distributor * * Copyright (C) 2002 - 2013 Tensilica, Inc. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #ifndef __LINUX_IRQCHIP_XTENSA_MX_H #define __LINUX_IRQCHIP_XTENSA_MX_H struct device_node; int xtensa_mx_init_legacy(struct device_node *interrupt_parent); #endif /* __LINUX_IRQCHIP_XTENSA_MX_H */ irqchip/irq-bcm2836.h 0000644 00000003623 14722070374 0010243 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Root interrupt controller for the BCM2836 (Raspberry Pi 2). * * Copyright 2015 Broadcom */ #define LOCAL_CONTROL 0x000 #define LOCAL_PRESCALER 0x008 /* * The low 2 bits identify the CPU that the GPU IRQ goes to, and the * next 2 bits identify the CPU that the GPU FIQ goes to. */ #define LOCAL_GPU_ROUTING 0x00c /* When setting bits 0-3, enables PMU interrupts on that CPU. */ #define LOCAL_PM_ROUTING_SET 0x010 /* When setting bits 0-3, disables PMU interrupts on that CPU. */ #define LOCAL_PM_ROUTING_CLR 0x014 /* * The low 4 bits of this are the CPU's timer IRQ enables, and the * next 4 bits are the CPU's timer FIQ enables (which override the IRQ * bits). */ #define LOCAL_TIMER_INT_CONTROL0 0x040 /* * The low 4 bits of this are the CPU's per-mailbox IRQ enables, and * the next 4 bits are the CPU's per-mailbox FIQ enables (which * override the IRQ bits). */ #define LOCAL_MAILBOX_INT_CONTROL0 0x050 /* * The CPU's interrupt status register. Bits are defined by the the * LOCAL_IRQ_* bits below. */ #define LOCAL_IRQ_PENDING0 0x060 /* Same status bits as above, but for FIQ. */ #define LOCAL_FIQ_PENDING0 0x070 /* * Mailbox write-to-set bits. There are 16 mailboxes, 4 per CPU, and * these bits are organized by mailbox number and then CPU number. We * use mailbox 0 for IPIs. The mailbox's interrupt is raised while * any bit is set. */ #define LOCAL_MAILBOX0_SET0 0x080 #define LOCAL_MAILBOX3_SET0 0x08c /* Mailbox write-to-clear bits. */ #define LOCAL_MAILBOX0_CLR0 0x0c0 #define LOCAL_MAILBOX3_CLR0 0x0cc #define LOCAL_IRQ_CNTPSIRQ 0 #define LOCAL_IRQ_CNTPNSIRQ 1 #define LOCAL_IRQ_CNTHPIRQ 2 #define LOCAL_IRQ_CNTVIRQ 3 #define LOCAL_IRQ_MAILBOX0 4 #define LOCAL_IRQ_MAILBOX1 5 #define LOCAL_IRQ_MAILBOX2 6 #define LOCAL_IRQ_MAILBOX3 7 #define LOCAL_IRQ_GPU_FAST 8 #define LOCAL_IRQ_PMU_FAST 9 #define LAST_IRQ LOCAL_IRQ_PMU_FAST irqchip/irq-omap-intc.h 0000644 00000001066 14722070374 0011045 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /** * irq-omap-intc.h - INTC Idle Functions * * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com * * Author: Felipe Balbi <balbi@ti.com> */ #ifndef __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H #define __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H int omap_irq_pending(void); void omap_intc_save_context(void); void omap_intc_restore_context(void); void omap3_intc_suspend(void); void omap3_intc_prepare_idle(void); void omap3_intc_resume_idle(void); #endif /* __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H */ irqchip/arm-gic-v4.h 0000644 00000005113 14722070374 0010230 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2016,2017 ARM Limited, All Rights Reserved. * Author: Marc Zyngier <marc.zyngier@arm.com> */ #ifndef __LINUX_IRQCHIP_ARM_GIC_V4_H #define __LINUX_IRQCHIP_ARM_GIC_V4_H struct its_vpe; /* * Maximum number of ITTs when GITS_TYPER.VMOVP == 0, using the * ITSList mechanism to perform inter-ITS synchronization. */ #define GICv4_ITS_LIST_MAX 16 /* Embedded in kvm.arch */ struct its_vm { struct fwnode_handle *fwnode; struct irq_domain *domain; struct page *vprop_page; struct its_vpe **vpes; int nr_vpes; irq_hw_number_t db_lpi_base; unsigned long *db_bitmap; int nr_db_lpis; u32 vlpi_count[GICv4_ITS_LIST_MAX]; }; /* Embedded in kvm_vcpu.arch */ struct its_vpe { struct page *vpt_page; struct its_vm *its_vm; /* Doorbell interrupt */ int irq; irq_hw_number_t vpe_db_lpi; /* VPE proxy mapping */ int vpe_proxy_event; /* * This collection ID is used to indirect the target * redistributor for this VPE. The ID itself isn't involved in * programming of the ITS. */ u16 col_idx; /* Unique (system-wide) VPE identifier */ u16 vpe_id; /* Implementation Defined Area Invalid */ bool idai; /* Pending VLPIs on schedule out? */ bool pending_last; }; /* * struct its_vlpi_map: structure describing the mapping of a * VLPI. Only to be interpreted in the context of a physical interrupt * it complements. To be used as the vcpu_info passed to * irq_set_vcpu_affinity(). * * @vm: Pointer to the GICv4 notion of a VM * @vpe: Pointer to the GICv4 notion of a virtual CPU (VPE) * @vintid: Virtual LPI number * @properties: Priority and enable bits (as written in the prop table) * @db_enabled: Is the VPE doorbell to be generated? */ struct its_vlpi_map { struct its_vm *vm; struct its_vpe *vpe; u32 vintid; u8 properties; bool db_enabled; }; enum its_vcpu_info_cmd_type { MAP_VLPI, GET_VLPI, PROP_UPDATE_VLPI, PROP_UPDATE_AND_INV_VLPI, SCHEDULE_VPE, DESCHEDULE_VPE, INVALL_VPE, }; struct its_cmd_info { enum its_vcpu_info_cmd_type cmd_type; union { struct its_vlpi_map *map; u8 config; }; }; int its_alloc_vcpu_irqs(struct its_vm *vm); void its_free_vcpu_irqs(struct its_vm *vm); int its_schedule_vpe(struct its_vpe *vpe, bool on); int its_invall_vpe(struct its_vpe *vpe); int its_map_vlpi(int irq, struct its_vlpi_map *map); int its_get_vlpi(int irq, struct its_vlpi_map *map); int its_unmap_vlpi(int irq); int its_prop_update_vlpi(int irq, u8 config, bool inv); struct irq_domain_ops; int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops); #endif irqchip/arm-gic-common.h 0000644 00000001726 14722070374 0011175 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * include/linux/irqchip/arm-gic-common.h * * Copyright (C) 2016 ARM Limited, All Rights Reserved. */ #ifndef __LINUX_IRQCHIP_ARM_GIC_COMMON_H #define __LINUX_IRQCHIP_ARM_GIC_COMMON_H #include <linux/types.h> #include <linux/ioport.h> #define GICD_INT_DEF_PRI 0xa0 #define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\ (GICD_INT_DEF_PRI << 16) |\ (GICD_INT_DEF_PRI << 8) |\ GICD_INT_DEF_PRI) enum gic_type { GIC_V2, GIC_V3, }; struct gic_kvm_info { /* GIC type */ enum gic_type type; /* Virtual CPU interface */ struct resource vcpu; /* Interrupt number */ unsigned int maint_irq; /* Virtual control interface */ struct resource vctrl; /* vlpi support */ bool has_v4; }; const struct gic_kvm_info *gic_get_kvm_info(void); struct irq_domain; struct fwnode_handle; int gicv2m_init(struct fwnode_handle *parent_handle, struct irq_domain *parent); #endif /* __LINUX_IRQCHIP_ARM_GIC_COMMON_H */ irqchip/arm-gic-v3.h 0000644 00000057051 14722070374 0010237 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved. * Author: Marc Zyngier <marc.zyngier@arm.com> */ #ifndef __LINUX_IRQCHIP_ARM_GIC_V3_H #define __LINUX_IRQCHIP_ARM_GIC_V3_H /* * Distributor registers. We assume we're running non-secure, with ARE * being set. Secure-only and non-ARE registers are not described. */ #define GICD_CTLR 0x0000 #define GICD_TYPER 0x0004 #define GICD_IIDR 0x0008 #define GICD_STATUSR 0x0010 #define GICD_SETSPI_NSR 0x0040 #define GICD_CLRSPI_NSR 0x0048 #define GICD_SETSPI_SR 0x0050 #define GICD_CLRSPI_SR 0x0058 #define GICD_SEIR 0x0068 #define GICD_IGROUPR 0x0080 #define GICD_ISENABLER 0x0100 #define GICD_ICENABLER 0x0180 #define GICD_ISPENDR 0x0200 #define GICD_ICPENDR 0x0280 #define GICD_ISACTIVER 0x0300 #define GICD_ICACTIVER 0x0380 #define GICD_IPRIORITYR 0x0400 #define GICD_ICFGR 0x0C00 #define GICD_IGRPMODR 0x0D00 #define GICD_NSACR 0x0E00 #define GICD_IGROUPRnE 0x1000 #define GICD_ISENABLERnE 0x1200 #define GICD_ICENABLERnE 0x1400 #define GICD_ISPENDRnE 0x1600 #define GICD_ICPENDRnE 0x1800 #define GICD_ISACTIVERnE 0x1A00 #define GICD_ICACTIVERnE 0x1C00 #define GICD_IPRIORITYRnE 0x2000 #define GICD_ICFGRnE 0x3000 #define GICD_IROUTER 0x6000 #define GICD_IROUTERnE 0x8000 #define GICD_IDREGS 0xFFD0 #define GICD_PIDR2 0xFFE8 #define ESPI_BASE_INTID 4096 /* * Those registers are actually from GICv2, but the spec demands that they * are implemented as RES0 if ARE is 1 (which we do in KVM's emulated GICv3). */ #define GICD_ITARGETSR 0x0800 #define GICD_SGIR 0x0F00 #define GICD_CPENDSGIR 0x0F10 #define GICD_SPENDSGIR 0x0F20 #define GICD_CTLR_RWP (1U << 31) #define GICD_CTLR_DS (1U << 6) #define GICD_CTLR_ARE_NS (1U << 4) #define GICD_CTLR_ENABLE_G1A (1U << 1) #define GICD_CTLR_ENABLE_G1 (1U << 0) #define GICD_IIDR_IMPLEMENTER_SHIFT 0 #define GICD_IIDR_IMPLEMENTER_MASK (0xfff << GICD_IIDR_IMPLEMENTER_SHIFT) #define GICD_IIDR_REVISION_SHIFT 12 #define GICD_IIDR_REVISION_MASK (0xf << GICD_IIDR_REVISION_SHIFT) #define GICD_IIDR_VARIANT_SHIFT 16 #define GICD_IIDR_VARIANT_MASK (0xf << GICD_IIDR_VARIANT_SHIFT) #define GICD_IIDR_PRODUCT_ID_SHIFT 24 #define GICD_IIDR_PRODUCT_ID_MASK (0xff << GICD_IIDR_PRODUCT_ID_SHIFT) /* * In systems with a single security state (what we emulate in KVM) * the meaning of the interrupt group enable bits is slightly different */ #define GICD_CTLR_ENABLE_SS_G1 (1U << 1) #define GICD_CTLR_ENABLE_SS_G0 (1U << 0) #define GICD_TYPER_RSS (1U << 26) #define GICD_TYPER_LPIS (1U << 17) #define GICD_TYPER_MBIS (1U << 16) #define GICD_TYPER_ESPI (1U << 8) #define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1) #define GICD_TYPER_NUM_LPIS(typer) ((((typer) >> 11) & 0x1f) + 1) #define GICD_TYPER_SPIS(typer) ((((typer) & 0x1f) + 1) * 32) #define GICD_TYPER_ESPIS(typer) \ (((typer) & GICD_TYPER_ESPI) ? GICD_TYPER_SPIS((typer) >> 27) : 0) #define GICD_IROUTER_SPI_MODE_ONE (0U << 31) #define GICD_IROUTER_SPI_MODE_ANY (1U << 31) #define GIC_PIDR2_ARCH_MASK 0xf0 #define GIC_PIDR2_ARCH_GICv3 0x30 #define GIC_PIDR2_ARCH_GICv4 0x40 #define GIC_V3_DIST_SIZE 0x10000 /* * Re-Distributor registers, offsets from RD_base */ #define GICR_CTLR GICD_CTLR #define GICR_IIDR 0x0004 #define GICR_TYPER 0x0008 #define GICR_STATUSR GICD_STATUSR #define GICR_WAKER 0x0014 #define GICR_SETLPIR 0x0040 #define GICR_CLRLPIR 0x0048 #define GICR_SEIR GICD_SEIR #define GICR_PROPBASER 0x0070 #define GICR_PENDBASER 0x0078 #define GICR_INVLPIR 0x00A0 #define GICR_INVALLR 0x00B0 #define GICR_SYNCR 0x00C0 #define GICR_MOVLPIR 0x0100 #define GICR_MOVALLR 0x0110 #define GICR_IDREGS GICD_IDREGS #define GICR_PIDR2 GICD_PIDR2 #define GICR_CTLR_ENABLE_LPIS (1UL << 0) #define GICR_CTLR_RWP (1UL << 3) #define GICR_TYPER_CPU_NUMBER(r) (((r) >> 8) & 0xffff) #define EPPI_BASE_INTID 1056 #define GICR_TYPER_NR_PPIS(r) \ ({ \ unsigned int __ppinum = ((r) >> 27) & 0x1f; \ unsigned int __nr_ppis = 16; \ if (__ppinum == 1 || __ppinum == 2) \ __nr_ppis += __ppinum * 32; \ \ __nr_ppis; \ }) #define GICR_WAKER_ProcessorSleep (1U << 1) #define GICR_WAKER_ChildrenAsleep (1U << 2) #define GIC_BASER_CACHE_nCnB 0ULL #define GIC_BASER_CACHE_SameAsInner 0ULL #define GIC_BASER_CACHE_nC 1ULL #define GIC_BASER_CACHE_RaWt 2ULL #define GIC_BASER_CACHE_RaWb 3ULL #define GIC_BASER_CACHE_WaWt 4ULL #define GIC_BASER_CACHE_WaWb 5ULL #define GIC_BASER_CACHE_RaWaWt 6ULL #define GIC_BASER_CACHE_RaWaWb 7ULL #define GIC_BASER_CACHE_MASK 7ULL #define GIC_BASER_NonShareable 0ULL #define GIC_BASER_InnerShareable 1ULL #define GIC_BASER_OuterShareable 2ULL #define GIC_BASER_SHAREABILITY_MASK 3ULL #define GIC_BASER_CACHEABILITY(reg, inner_outer, type) \ (GIC_BASER_CACHE_##type << reg##_##inner_outer##_CACHEABILITY_SHIFT) #define GIC_BASER_SHAREABILITY(reg, type) \ (GIC_BASER_##type << reg##_SHAREABILITY_SHIFT) /* encode a size field of width @w containing @n - 1 units */ #define GIC_ENCODE_SZ(n, w) (((unsigned long)(n) - 1) & GENMASK_ULL(((w) - 1), 0)) #define GICR_PROPBASER_SHAREABILITY_SHIFT (10) #define GICR_PROPBASER_INNER_CACHEABILITY_SHIFT (7) #define GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT (56) #define GICR_PROPBASER_SHAREABILITY_MASK \ GIC_BASER_SHAREABILITY(GICR_PROPBASER, SHAREABILITY_MASK) #define GICR_PROPBASER_INNER_CACHEABILITY_MASK \ GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, MASK) #define GICR_PROPBASER_OUTER_CACHEABILITY_MASK \ GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, MASK) #define GICR_PROPBASER_CACHEABILITY_MASK GICR_PROPBASER_INNER_CACHEABILITY_MASK #define GICR_PROPBASER_InnerShareable \ GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable) #define GICR_PROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nCnB) #define GICR_PROPBASER_nC GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nC) #define GICR_PROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWt) #define GICR_PROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb) #define GICR_PROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWt) #define GICR_PROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWb) #define GICR_PROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWaWt) #define GICR_PROPBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWaWb) #define GICR_PROPBASER_IDBITS_MASK (0x1f) #define GICR_PROPBASER_ADDRESS(x) ((x) & GENMASK_ULL(51, 12)) #define GICR_PENDBASER_ADDRESS(x) ((x) & GENMASK_ULL(51, 16)) #define GICR_PENDBASER_SHAREABILITY_SHIFT (10) #define GICR_PENDBASER_INNER_CACHEABILITY_SHIFT (7) #define GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT (56) #define GICR_PENDBASER_SHAREABILITY_MASK \ GIC_BASER_SHAREABILITY(GICR_PENDBASER, SHAREABILITY_MASK) #define GICR_PENDBASER_INNER_CACHEABILITY_MASK \ GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, MASK) #define GICR_PENDBASER_OUTER_CACHEABILITY_MASK \ GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, MASK) #define GICR_PENDBASER_CACHEABILITY_MASK GICR_PENDBASER_INNER_CACHEABILITY_MASK #define GICR_PENDBASER_InnerShareable \ GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable) #define GICR_PENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nCnB) #define GICR_PENDBASER_nC GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nC) #define GICR_PENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWt) #define GICR_PENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb) #define GICR_PENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWt) #define GICR_PENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWb) #define GICR_PENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWaWt) #define GICR_PENDBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWaWb) #define GICR_PENDBASER_PTZ BIT_ULL(62) /* * Re-Distributor registers, offsets from SGI_base */ #define GICR_IGROUPR0 GICD_IGROUPR #define GICR_ISENABLER0 GICD_ISENABLER #define GICR_ICENABLER0 GICD_ICENABLER #define GICR_ISPENDR0 GICD_ISPENDR #define GICR_ICPENDR0 GICD_ICPENDR #define GICR_ISACTIVER0 GICD_ISACTIVER #define GICR_ICACTIVER0 GICD_ICACTIVER #define GICR_IPRIORITYR0 GICD_IPRIORITYR #define GICR_ICFGR0 GICD_ICFGR #define GICR_IGRPMODR0 GICD_IGRPMODR #define GICR_NSACR GICD_NSACR #define GICR_TYPER_PLPIS (1U << 0) #define GICR_TYPER_VLPIS (1U << 1) #define GICR_TYPER_DirectLPIS (1U << 3) #define GICR_TYPER_LAST (1U << 4) #define GIC_V3_REDIST_SIZE 0x20000 #define LPI_PROP_GROUP1 (1 << 1) #define LPI_PROP_ENABLED (1 << 0) /* * Re-Distributor registers, offsets from VLPI_base */ #define GICR_VPROPBASER 0x0070 #define GICR_VPROPBASER_IDBITS_MASK 0x1f #define GICR_VPROPBASER_SHAREABILITY_SHIFT (10) #define GICR_VPROPBASER_INNER_CACHEABILITY_SHIFT (7) #define GICR_VPROPBASER_OUTER_CACHEABILITY_SHIFT (56) #define GICR_VPROPBASER_SHAREABILITY_MASK \ GIC_BASER_SHAREABILITY(GICR_VPROPBASER, SHAREABILITY_MASK) #define GICR_VPROPBASER_INNER_CACHEABILITY_MASK \ GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, MASK) #define GICR_VPROPBASER_OUTER_CACHEABILITY_MASK \ GIC_BASER_CACHEABILITY(GICR_VPROPBASER, OUTER, MASK) #define GICR_VPROPBASER_CACHEABILITY_MASK \ GICR_VPROPBASER_INNER_CACHEABILITY_MASK #define GICR_VPROPBASER_InnerShareable \ GIC_BASER_SHAREABILITY(GICR_VPROPBASER, InnerShareable) #define GICR_VPROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nCnB) #define GICR_VPROPBASER_nC GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nC) #define GICR_VPROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWt) #define GICR_VPROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWb) #define GICR_VPROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWt) #define GICR_VPROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWb) #define GICR_VPROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWt) #define GICR_VPROPBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWb) #define GICR_VPENDBASER 0x0078 #define GICR_VPENDBASER_SHAREABILITY_SHIFT (10) #define GICR_VPENDBASER_INNER_CACHEABILITY_SHIFT (7) #define GICR_VPENDBASER_OUTER_CACHEABILITY_SHIFT (56) #define GICR_VPENDBASER_SHAREABILITY_MASK \ GIC_BASER_SHAREABILITY(GICR_VPENDBASER, SHAREABILITY_MASK) #define GICR_VPENDBASER_INNER_CACHEABILITY_MASK \ GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, MASK) #define GICR_VPENDBASER_OUTER_CACHEABILITY_MASK \ GIC_BASER_CACHEABILITY(GICR_VPENDBASER, OUTER, MASK) #define GICR_VPENDBASER_CACHEABILITY_MASK \ GICR_VPENDBASER_INNER_CACHEABILITY_MASK #define GICR_VPENDBASER_NonShareable \ GIC_BASER_SHAREABILITY(GICR_VPENDBASER, NonShareable) #define GICR_VPENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nCnB) #define GICR_VPENDBASER_nC GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nC) #define GICR_VPENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt) #define GICR_VPENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWb) #define GICR_VPENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWt) #define GICR_VPENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWb) #define GICR_VPENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWt) #define GICR_VPENDBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWb) #define GICR_VPENDBASER_Dirty (1ULL << 60) #define GICR_VPENDBASER_PendingLast (1ULL << 61) #define GICR_VPENDBASER_IDAI (1ULL << 62) #define GICR_VPENDBASER_Valid (1ULL << 63) /* * ITS registers, offsets from ITS_base */ #define GITS_CTLR 0x0000 #define GITS_IIDR 0x0004 #define GITS_TYPER 0x0008 #define GITS_CBASER 0x0080 #define GITS_CWRITER 0x0088 #define GITS_CREADR 0x0090 #define GITS_BASER 0x0100 #define GITS_IDREGS_BASE 0xffd0 #define GITS_PIDR0 0xffe0 #define GITS_PIDR1 0xffe4 #define GITS_PIDR2 GICR_PIDR2 #define GITS_PIDR4 0xffd0 #define GITS_CIDR0 0xfff0 #define GITS_CIDR1 0xfff4 #define GITS_CIDR2 0xfff8 #define GITS_CIDR3 0xfffc #define GITS_TRANSLATER 0x10040 #define GITS_CTLR_ENABLE (1U << 0) #define GITS_CTLR_ImDe (1U << 1) #define GITS_CTLR_ITS_NUMBER_SHIFT 4 #define GITS_CTLR_ITS_NUMBER (0xFU << GITS_CTLR_ITS_NUMBER_SHIFT) #define GITS_CTLR_QUIESCENT (1U << 31) #define GITS_TYPER_PLPIS (1UL << 0) #define GITS_TYPER_VLPIS (1UL << 1) #define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4 #define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0xf) + 1) #define GITS_TYPER_IDBITS_SHIFT 8 #define GITS_TYPER_DEVBITS_SHIFT 13 #define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1) #define GITS_TYPER_PTA (1UL << 19) #define GITS_TYPER_HCC_SHIFT 24 #define GITS_TYPER_HCC(r) (((r) >> GITS_TYPER_HCC_SHIFT) & 0xff) #define GITS_TYPER_VMOVP (1ULL << 37) #define GITS_IIDR_REV_SHIFT 12 #define GITS_IIDR_REV_MASK (0xf << GITS_IIDR_REV_SHIFT) #define GITS_IIDR_REV(r) (((r) >> GITS_IIDR_REV_SHIFT) & 0xf) #define GITS_IIDR_PRODUCTID_SHIFT 24 #define GITS_CBASER_VALID (1ULL << 63) #define GITS_CBASER_SHAREABILITY_SHIFT (10) #define GITS_CBASER_INNER_CACHEABILITY_SHIFT (59) #define GITS_CBASER_OUTER_CACHEABILITY_SHIFT (53) #define GITS_CBASER_SHAREABILITY_MASK \ GIC_BASER_SHAREABILITY(GITS_CBASER, SHAREABILITY_MASK) #define GITS_CBASER_INNER_CACHEABILITY_MASK \ GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, MASK) #define GITS_CBASER_OUTER_CACHEABILITY_MASK \ GIC_BASER_CACHEABILITY(GITS_CBASER, OUTER, MASK) #define GITS_CBASER_CACHEABILITY_MASK GITS_CBASER_INNER_CACHEABILITY_MASK #define GITS_CBASER_InnerShareable \ GIC_BASER_SHAREABILITY(GITS_CBASER, InnerShareable) #define GITS_CBASER_nCnB GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nCnB) #define GITS_CBASER_nC GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nC) #define GITS_CBASER_RaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWt) #define GITS_CBASER_RaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWb) #define GITS_CBASER_WaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWt) #define GITS_CBASER_WaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWb) #define GITS_CBASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWt) #define GITS_CBASER_RaWaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWb) #define GITS_CBASER_ADDRESS(cbaser) ((cbaser) & GENMASK_ULL(51, 12)) #define GITS_BASER_NR_REGS 8 #define GITS_BASER_VALID (1ULL << 63) #define GITS_BASER_INDIRECT (1ULL << 62) #define GITS_BASER_INNER_CACHEABILITY_SHIFT (59) #define GITS_BASER_OUTER_CACHEABILITY_SHIFT (53) #define GITS_BASER_INNER_CACHEABILITY_MASK \ GIC_BASER_CACHEABILITY(GITS_BASER, INNER, MASK) #define GITS_BASER_CACHEABILITY_MASK GITS_BASER_INNER_CACHEABILITY_MASK #define GITS_BASER_OUTER_CACHEABILITY_MASK \ GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, MASK) #define GITS_BASER_SHAREABILITY_MASK \ GIC_BASER_SHAREABILITY(GITS_BASER, SHAREABILITY_MASK) #define GITS_BASER_nCnB GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nCnB) #define GITS_BASER_nC GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nC) #define GITS_BASER_RaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWt) #define GITS_BASER_RaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb) #define GITS_BASER_WaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWt) #define GITS_BASER_WaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWb) #define GITS_BASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWaWt) #define GITS_BASER_RaWaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWaWb) #define GITS_BASER_TYPE_SHIFT (56) #define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7) #define GITS_BASER_ENTRY_SIZE_SHIFT (48) #define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1) #define GITS_BASER_ENTRY_SIZE_MASK GENMASK_ULL(52, 48) #define GITS_BASER_PHYS_52_to_48(phys) \ (((phys) & GENMASK_ULL(47, 16)) | (((phys) >> 48) & 0xf) << 12) #define GITS_BASER_ADDR_48_to_52(baser) \ (((baser) & GENMASK_ULL(47, 16)) | (((baser) >> 12) & 0xf) << 48) #define GITS_BASER_SHAREABILITY_SHIFT (10) #define GITS_BASER_InnerShareable \ GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) #define GITS_BASER_PAGE_SIZE_SHIFT (8) #define GITS_BASER_PAGE_SIZE_4K (0ULL << GITS_BASER_PAGE_SIZE_SHIFT) #define GITS_BASER_PAGE_SIZE_16K (1ULL << GITS_BASER_PAGE_SIZE_SHIFT) #define GITS_BASER_PAGE_SIZE_64K (2ULL << GITS_BASER_PAGE_SIZE_SHIFT) #define GITS_BASER_PAGE_SIZE_MASK (3ULL << GITS_BASER_PAGE_SIZE_SHIFT) #define GITS_BASER_PAGES_MAX 256 #define GITS_BASER_PAGES_SHIFT (0) #define GITS_BASER_NR_PAGES(r) (((r) & 0xff) + 1) #define GITS_BASER_TYPE_NONE 0 #define GITS_BASER_TYPE_DEVICE 1 #define GITS_BASER_TYPE_VCPU 2 #define GITS_BASER_TYPE_RESERVED3 3 #define GITS_BASER_TYPE_COLLECTION 4 #define GITS_BASER_TYPE_RESERVED5 5 #define GITS_BASER_TYPE_RESERVED6 6 #define GITS_BASER_TYPE_RESERVED7 7 #define GITS_LVL1_ENTRY_SIZE (8UL) /* * ITS commands */ #define GITS_CMD_MAPD 0x08 #define GITS_CMD_MAPC 0x09 #define GITS_CMD_MAPTI 0x0a #define GITS_CMD_MAPI 0x0b #define GITS_CMD_MOVI 0x01 #define GITS_CMD_DISCARD 0x0f #define GITS_CMD_INV 0x0c #define GITS_CMD_MOVALL 0x0e #define GITS_CMD_INVALL 0x0d #define GITS_CMD_INT 0x03 #define GITS_CMD_CLEAR 0x04 #define GITS_CMD_SYNC 0x05 /* * GICv4 ITS specific commands */ #define GITS_CMD_GICv4(x) ((x) | 0x20) #define GITS_CMD_VINVALL GITS_CMD_GICv4(GITS_CMD_INVALL) #define GITS_CMD_VMAPP GITS_CMD_GICv4(GITS_CMD_MAPC) #define GITS_CMD_VMAPTI GITS_CMD_GICv4(GITS_CMD_MAPTI) #define GITS_CMD_VMOVI GITS_CMD_GICv4(GITS_CMD_MOVI) #define GITS_CMD_VSYNC GITS_CMD_GICv4(GITS_CMD_SYNC) /* VMOVP is the odd one, as it doesn't have a physical counterpart */ #define GITS_CMD_VMOVP GITS_CMD_GICv4(2) /* * ITS error numbers */ #define E_ITS_MOVI_UNMAPPED_INTERRUPT 0x010107 #define E_ITS_MOVI_UNMAPPED_COLLECTION 0x010109 #define E_ITS_INT_UNMAPPED_INTERRUPT 0x010307 #define E_ITS_CLEAR_UNMAPPED_INTERRUPT 0x010507 #define E_ITS_MAPD_DEVICE_OOR 0x010801 #define E_ITS_MAPD_ITTSIZE_OOR 0x010802 #define E_ITS_MAPC_PROCNUM_OOR 0x010902 #define E_ITS_MAPC_COLLECTION_OOR 0x010903 #define E_ITS_MAPTI_UNMAPPED_DEVICE 0x010a04 #define E_ITS_MAPTI_ID_OOR 0x010a05 #define E_ITS_MAPTI_PHYSICALID_OOR 0x010a06 #define E_ITS_INV_UNMAPPED_INTERRUPT 0x010c07 #define E_ITS_INVALL_UNMAPPED_COLLECTION 0x010d09 #define E_ITS_MOVALL_PROCNUM_OOR 0x010e01 #define E_ITS_DISCARD_UNMAPPED_INTERRUPT 0x010f07 /* * CPU interface registers */ #define ICC_CTLR_EL1_EOImode_SHIFT (1) #define ICC_CTLR_EL1_EOImode_drop_dir (0U << ICC_CTLR_EL1_EOImode_SHIFT) #define ICC_CTLR_EL1_EOImode_drop (1U << ICC_CTLR_EL1_EOImode_SHIFT) #define ICC_CTLR_EL1_EOImode_MASK (1 << ICC_CTLR_EL1_EOImode_SHIFT) #define ICC_CTLR_EL1_CBPR_SHIFT 0 #define ICC_CTLR_EL1_CBPR_MASK (1 << ICC_CTLR_EL1_CBPR_SHIFT) #define ICC_CTLR_EL1_PRI_BITS_SHIFT 8 #define ICC_CTLR_EL1_PRI_BITS_MASK (0x7 << ICC_CTLR_EL1_PRI_BITS_SHIFT) #define ICC_CTLR_EL1_ID_BITS_SHIFT 11 #define ICC_CTLR_EL1_ID_BITS_MASK (0x7 << ICC_CTLR_EL1_ID_BITS_SHIFT) #define ICC_CTLR_EL1_SEIS_SHIFT 14 #define ICC_CTLR_EL1_SEIS_MASK (0x1 << ICC_CTLR_EL1_SEIS_SHIFT) #define ICC_CTLR_EL1_A3V_SHIFT 15 #define ICC_CTLR_EL1_A3V_MASK (0x1 << ICC_CTLR_EL1_A3V_SHIFT) #define ICC_CTLR_EL1_RSS (0x1 << 18) #define ICC_CTLR_EL1_ExtRange (0x1 << 19) #define ICC_PMR_EL1_SHIFT 0 #define ICC_PMR_EL1_MASK (0xff << ICC_PMR_EL1_SHIFT) #define ICC_BPR0_EL1_SHIFT 0 #define ICC_BPR0_EL1_MASK (0x7 << ICC_BPR0_EL1_SHIFT) #define ICC_BPR1_EL1_SHIFT 0 #define ICC_BPR1_EL1_MASK (0x7 << ICC_BPR1_EL1_SHIFT) #define ICC_IGRPEN0_EL1_SHIFT 0 #define ICC_IGRPEN0_EL1_MASK (1 << ICC_IGRPEN0_EL1_SHIFT) #define ICC_IGRPEN1_EL1_SHIFT 0 #define ICC_IGRPEN1_EL1_MASK (1 << ICC_IGRPEN1_EL1_SHIFT) #define ICC_SRE_EL1_DIB (1U << 2) #define ICC_SRE_EL1_DFB (1U << 1) #define ICC_SRE_EL1_SRE (1U << 0) /* * Hypervisor interface registers (SRE only) */ #define ICH_LR_VIRTUAL_ID_MASK ((1ULL << 32) - 1) #define ICH_LR_EOI (1ULL << 41) #define ICH_LR_GROUP (1ULL << 60) #define ICH_LR_HW (1ULL << 61) #define ICH_LR_STATE (3ULL << 62) #define ICH_LR_PENDING_BIT (1ULL << 62) #define ICH_LR_ACTIVE_BIT (1ULL << 63) #define ICH_LR_PHYS_ID_SHIFT 32 #define ICH_LR_PHYS_ID_MASK (0x3ffULL << ICH_LR_PHYS_ID_SHIFT) #define ICH_LR_PRIORITY_SHIFT 48 #define ICH_LR_PRIORITY_MASK (0xffULL << ICH_LR_PRIORITY_SHIFT) /* These are for GICv2 emulation only */ #define GICH_LR_VIRTUALID (0x3ffUL << 0) #define GICH_LR_PHYSID_CPUID_SHIFT (10) #define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT) #define ICH_MISR_EOI (1 << 0) #define ICH_MISR_U (1 << 1) #define ICH_HCR_EN (1 << 0) #define ICH_HCR_UIE (1 << 1) #define ICH_HCR_NPIE (1 << 3) #define ICH_HCR_TC (1 << 10) #define ICH_HCR_TALL0 (1 << 11) #define ICH_HCR_TALL1 (1 << 12) #define ICH_HCR_EOIcount_SHIFT 27 #define ICH_HCR_EOIcount_MASK (0x1f << ICH_HCR_EOIcount_SHIFT) #define ICH_VMCR_ACK_CTL_SHIFT 2 #define ICH_VMCR_ACK_CTL_MASK (1 << ICH_VMCR_ACK_CTL_SHIFT) #define ICH_VMCR_FIQ_EN_SHIFT 3 #define ICH_VMCR_FIQ_EN_MASK (1 << ICH_VMCR_FIQ_EN_SHIFT) #define ICH_VMCR_CBPR_SHIFT 4 #define ICH_VMCR_CBPR_MASK (1 << ICH_VMCR_CBPR_SHIFT) #define ICH_VMCR_EOIM_SHIFT 9 #define ICH_VMCR_EOIM_MASK (1 << ICH_VMCR_EOIM_SHIFT) #define ICH_VMCR_BPR1_SHIFT 18 #define ICH_VMCR_BPR1_MASK (7 << ICH_VMCR_BPR1_SHIFT) #define ICH_VMCR_BPR0_SHIFT 21 #define ICH_VMCR_BPR0_MASK (7 << ICH_VMCR_BPR0_SHIFT) #define ICH_VMCR_PMR_SHIFT 24 #define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT) #define ICH_VMCR_ENG0_SHIFT 0 #define ICH_VMCR_ENG0_MASK (1 << ICH_VMCR_ENG0_SHIFT) #define ICH_VMCR_ENG1_SHIFT 1 #define ICH_VMCR_ENG1_MASK (1 << ICH_VMCR_ENG1_SHIFT) #define ICH_VTR_PRI_BITS_SHIFT 29 #define ICH_VTR_PRI_BITS_MASK (7 << ICH_VTR_PRI_BITS_SHIFT) #define ICH_VTR_ID_BITS_SHIFT 23 #define ICH_VTR_ID_BITS_MASK (7 << ICH_VTR_ID_BITS_SHIFT) #define ICH_VTR_SEIS_SHIFT 22 #define ICH_VTR_SEIS_MASK (1 << ICH_VTR_SEIS_SHIFT) #define ICH_VTR_A3V_SHIFT 21 #define ICH_VTR_A3V_MASK (1 << ICH_VTR_A3V_SHIFT) #define ICC_IAR1_EL1_SPURIOUS 0x3ff #define ICC_SRE_EL2_SRE (1 << 0) #define ICC_SRE_EL2_ENABLE (1 << 3) #define ICC_SGI1R_TARGET_LIST_SHIFT 0 #define ICC_SGI1R_TARGET_LIST_MASK (0xffff << ICC_SGI1R_TARGET_LIST_SHIFT) #define ICC_SGI1R_AFFINITY_1_SHIFT 16 #define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT) #define ICC_SGI1R_SGI_ID_SHIFT 24 #define ICC_SGI1R_SGI_ID_MASK (0xfULL << ICC_SGI1R_SGI_ID_SHIFT) #define ICC_SGI1R_AFFINITY_2_SHIFT 32 #define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_2_SHIFT) #define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40 #define ICC_SGI1R_RS_SHIFT 44 #define ICC_SGI1R_RS_MASK (0xfULL << ICC_SGI1R_RS_SHIFT) #define ICC_SGI1R_AFFINITY_3_SHIFT 48 #define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_3_SHIFT) #include <asm/arch_gicv3.h> #ifndef __ASSEMBLY__ /* * We need a value to serve as a irq-type for LPIs. Choose one that will * hopefully pique the interest of the reviewer. */ #define GIC_IRQ_TYPE_LPI 0xa110c8ed struct rdists { struct { void __iomem *rd_base; struct page *pend_page; phys_addr_t phys_base; bool lpi_enabled; } __percpu *rdist; phys_addr_t prop_table_pa; void *prop_table_va; u64 flags; u32 gicd_typer; bool has_vlpis; bool has_direct_lpi; }; struct irq_domain; struct fwnode_handle; int its_cpu_init(void); int its_init(struct fwnode_handle *handle, struct rdists *rdists, struct irq_domain *domain); int mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent); static inline bool gic_enable_sre(void) { u32 val; val = gic_read_sre(); if (val & ICC_SRE_EL1_SRE) return true; val |= ICC_SRE_EL1_SRE; gic_write_sre(val); val = gic_read_sre(); return !!(val & ICC_SRE_EL1_SRE); } #endif #endif irqchip/ingenic.h 0000644 00000000527 14722070374 0010002 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de> */ #ifndef __LINUX_IRQCHIP_INGENIC_H__ #define __LINUX_IRQCHIP_INGENIC_H__ #include <linux/irq.h> extern void ingenic_intc_irq_suspend(struct irq_data *data); extern void ingenic_intc_irq_resume(struct irq_data *data); #endif irqchip/irq-partition-percpu.h 0000644 00000002513 14722070374 0012461 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2016 ARM Limited, All Rights Reserved. * Author: Marc Zyngier <marc.zyngier@arm.com> */ #ifndef __LINUX_IRQCHIP_IRQ_PARTITION_PERCPU_H #define __LINUX_IRQCHIP_IRQ_PARTITION_PERCPU_H #include <linux/fwnode.h> #include <linux/cpumask.h> #include <linux/irqdomain.h> struct partition_affinity { cpumask_t mask; void *partition_id; }; struct partition_desc; #ifdef CONFIG_PARTITION_PERCPU int partition_translate_id(struct partition_desc *desc, void *partition_id); struct partition_desc *partition_create_desc(struct fwnode_handle *fwnode, struct partition_affinity *parts, int nr_parts, int chained_irq, const struct irq_domain_ops *ops); struct irq_domain *partition_get_domain(struct partition_desc *dsc); #else static inline int partition_translate_id(struct partition_desc *desc, void *partition_id) { return -EINVAL; } static inline struct partition_desc *partition_create_desc(struct fwnode_handle *fwnode, struct partition_affinity *parts, int nr_parts, int chained_irq, const struct irq_domain_ops *ops) { return NULL; } static inline struct irq_domain *partition_get_domain(struct partition_desc *dsc) { return NULL; } #endif #endif /* __LINUX_IRQCHIP_IRQ_PARTITION_PERCPU_H */ irqchip/irq-davinci-aintc.h 0000644 00000001242 14722070374 0011663 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2019 Texas Instruments */ #ifndef _LINUX_IRQ_DAVINCI_AINTC_ #define _LINUX_IRQ_DAVINCI_AINTC_ #include <linux/ioport.h> /** * struct davinci_aintc_config - configuration data for davinci-aintc driver. * * @reg: register range to map * @num_irqs: number of HW interrupts supported by the controller * @prios: an array of size num_irqs containing priority settings for * each interrupt */ struct davinci_aintc_config { struct resource reg; unsigned int num_irqs; u8 *prios; }; void davinci_aintc_init(const struct davinci_aintc_config *config); #endif /* _LINUX_IRQ_DAVINCI_AINTC_ */ irqchip/versatile-fpga.h 0000644 00000000541 14722070374 0011273 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef PLAT_FPGA_IRQ_H #define PLAT_FPGA_IRQ_H struct device_node; struct pt_regs; void fpga_handle_irq(struct pt_regs *regs); void fpga_irq_init(void __iomem *, const char *, int, int, u32, struct device_node *node); int fpga_irq_of_init(struct device_node *node, struct device_node *parent); #endif irqchip/arm-vic.h 0000644 00000001372 14722070374 0007723 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * arch/arm/include/asm/hardware/vic.h * * Copyright (c) ARM Limited 2003. All rights reserved. */ #ifndef __ASM_ARM_HARDWARE_VIC_H #define __ASM_ARM_HARDWARE_VIC_H #include <linux/types.h> #define VIC_RAW_STATUS 0x08 #define VIC_INT_ENABLE 0x10 /* 1 = enable, 0 = disable */ #define VIC_INT_ENABLE_CLEAR 0x14 struct device_node; struct pt_regs; void __vic_init(void __iomem *base, int parent_irq, int irq_start, u32 vic_sources, u32 resume_sources, struct device_node *node); void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources); int vic_init_cascaded(void __iomem *base, unsigned int parent_irq, u32 vic_sources, u32 resume_sources); #endif irqchip/xtensa-pic.h 0000644 00000001024 14722070374 0010432 0 ustar 00 /* * Xtensa built-in interrupt controller * * Copyright (C) 2002 - 2013 Tensilica, Inc. * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #ifndef __LINUX_IRQCHIP_XTENSA_PIC_H #define __LINUX_IRQCHIP_XTENSA_PIC_H struct device_node; int xtensa_pic_init_legacy(struct device_node *interrupt_parent); #endif /* __LINUX_IRQCHIP_XTENSA_PIC_H */ firmware-map.h 0000644 00000001677 14722070374 0007325 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * include/linux/firmware-map.h: * Copyright (C) 2008 SUSE LINUX Products GmbH * by Bernhard Walle <bernhard.walle@gmx.de> */ #ifndef _LINUX_FIRMWARE_MAP_H #define _LINUX_FIRMWARE_MAP_H #include <linux/list.h> /* * provide a dummy interface if CONFIG_FIRMWARE_MEMMAP is disabled */ #ifdef CONFIG_FIRMWARE_MEMMAP int firmware_map_add_early(u64 start, u64 end, const char *type); int firmware_map_add_hotplug(u64 start, u64 end, const char *type); int firmware_map_remove(u64 start, u64 end, const char *type); #else /* CONFIG_FIRMWARE_MEMMAP */ static inline int firmware_map_add_early(u64 start, u64 end, const char *type) { return 0; } static inline int firmware_map_add_hotplug(u64 start, u64 end, const char *type) { return 0; } static inline int firmware_map_remove(u64 start, u64 end, const char *type) { return 0; } #endif /* CONFIG_FIRMWARE_MEMMAP */ #endif /* _LINUX_FIRMWARE_MAP_H */ parser.h 0000644 00000002050 14722070374 0006214 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/parser.h * * Header for lib/parser.c * Intended use of these functions is parsing filesystem argument lists, * but could potentially be used anywhere else that simple option=arg * parsing is required. */ /* associates an integer enumerator with a pattern string. */ struct match_token { int token; const char *pattern; }; typedef struct match_token match_table_t[]; /* Maximum number of arguments that match_token will find in a pattern */ enum {MAX_OPT_ARGS = 3}; /* Describe the location within a string of a substring */ typedef struct { char *from; char *to; } substring_t; int match_token(char *, const match_table_t table, substring_t args[]); int match_int(substring_t *, int *result); int match_u64(substring_t *, u64 *result); int match_octal(substring_t *, int *result); int match_hex(substring_t *, int *result); bool match_wildcard(const char *pattern, const char *str); size_t match_strlcpy(char *, const substring_t *, size_t); char *match_strdup(const substring_t *); maple.h 0000644 00000005327 14722070374 0006030 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_MAPLE_H #define __LINUX_MAPLE_H #include <mach/maple.h> struct device; extern struct bus_type maple_bus_type; /* Maple Bus command and response codes */ enum maple_code { MAPLE_RESPONSE_FILEERR = -5, MAPLE_RESPONSE_AGAIN, /* retransmit */ MAPLE_RESPONSE_BADCMD, MAPLE_RESPONSE_BADFUNC, MAPLE_RESPONSE_NONE, /* unit didn't respond*/ MAPLE_COMMAND_DEVINFO = 1, MAPLE_COMMAND_ALLINFO, MAPLE_COMMAND_RESET, MAPLE_COMMAND_KILL, MAPLE_RESPONSE_DEVINFO, MAPLE_RESPONSE_ALLINFO, MAPLE_RESPONSE_OK, MAPLE_RESPONSE_DATATRF, MAPLE_COMMAND_GETCOND, MAPLE_COMMAND_GETMINFO, MAPLE_COMMAND_BREAD, MAPLE_COMMAND_BWRITE, MAPLE_COMMAND_BSYNC, MAPLE_COMMAND_SETCOND, MAPLE_COMMAND_MICCONTROL }; enum maple_file_errors { MAPLE_FILEERR_INVALID_PARTITION = 0x01000000, MAPLE_FILEERR_PHASE_ERROR = 0x02000000, MAPLE_FILEERR_INVALID_BLOCK = 0x04000000, MAPLE_FILEERR_WRITE_ERROR = 0x08000000, MAPLE_FILEERR_INVALID_WRITE_LENGTH = 0x10000000, MAPLE_FILEERR_BAD_CRC = 0x20000000 }; struct maple_buffer { char bufx[0x400]; void *buf; }; struct mapleq { struct list_head list; struct maple_device *dev; struct maple_buffer *recvbuf; void *sendbuf, *recvbuf_p2; unsigned char length; enum maple_code command; }; struct maple_devinfo { unsigned long function; unsigned long function_data[3]; unsigned char area_code; unsigned char connector_direction; char product_name[31]; char product_licence[61]; unsigned short standby_power; unsigned short max_power; }; struct maple_device { struct maple_driver *driver; struct mapleq *mq; void (*callback) (struct mapleq * mq); void (*fileerr_handler)(struct maple_device *mdev, void *recvbuf); int (*can_unload)(struct maple_device *mdev); unsigned long when, interval, function; struct maple_devinfo devinfo; unsigned char port, unit; char product_name[32]; char product_licence[64]; atomic_t busy; wait_queue_head_t maple_wait; struct device dev; }; struct maple_driver { unsigned long function; struct device_driver drv; }; void maple_getcond_callback(struct maple_device *dev, void (*callback) (struct mapleq * mq), unsigned long interval, unsigned long function); int maple_driver_register(struct maple_driver *); void maple_driver_unregister(struct maple_driver *); int maple_add_packet(struct maple_device *mdev, u32 function, u32 command, u32 length, void *data); void maple_clear_dev(struct maple_device *mdev); #define to_maple_dev(n) container_of(n, struct maple_device, dev) #define to_maple_driver(n) container_of(n, struct maple_driver, drv) #define maple_get_drvdata(d) dev_get_drvdata(&(d)->dev) #define maple_set_drvdata(d,p) dev_set_drvdata(&(d)->dev, (p)) #endif /* __LINUX_MAPLE_H */ remoteproc/qcom_rproc.h 0000644 00000000654 14722070374 0011253 0 ustar 00 #ifndef __QCOM_RPROC_H__ #define __QCOM_RPROC_H__ struct notifier_block; #if IS_ENABLED(CONFIG_QCOM_RPROC_COMMON) int qcom_register_ssr_notifier(struct notifier_block *nb); void qcom_unregister_ssr_notifier(struct notifier_block *nb); #else static inline int qcom_register_ssr_notifier(struct notifier_block *nb) { return 0; } static inline void qcom_unregister_ssr_notifier(struct notifier_block *nb) {} #endif #endif remoteproc/st_slim_rproc.h 0000644 00000002254 14722070374 0011764 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * SLIM core rproc driver header * * Copyright (C) 2016 STMicroelectronics * * Author: Peter Griffin <peter.griffin@linaro.org> */ #ifndef _ST_REMOTEPROC_SLIM_H #define _ST_REMOTEPROC_SLIM_H #define ST_SLIM_MEM_MAX 2 #define ST_SLIM_MAX_CLK 4 enum { ST_SLIM_DMEM, ST_SLIM_IMEM, }; /** * struct st_slim_mem - slim internal memory structure * @cpu_addr: MPU virtual address of the memory region * @bus_addr: Bus address used to access the memory region * @size: Size of the memory region */ struct st_slim_mem { void __iomem *cpu_addr; phys_addr_t bus_addr; size_t size; }; /** * struct st_slim_rproc - SLIM slim core * @rproc: rproc handle * @mem: slim memory information * @slimcore: slim slimcore regs * @peri: slim peripheral regs * @clks: slim clocks */ struct st_slim_rproc { struct rproc *rproc; struct st_slim_mem mem[ST_SLIM_MEM_MAX]; void __iomem *slimcore; void __iomem *peri; /* st_slim_rproc private */ struct clk *clks[ST_SLIM_MAX_CLK]; }; struct st_slim_rproc *st_slim_rproc_alloc(struct platform_device *pdev, char *fw_name); void st_slim_rproc_put(struct st_slim_rproc *slim_rproc); #endif error-injection.h 0000644 00000001137 14722070374 0010036 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_ERROR_INJECTION_H #define _LINUX_ERROR_INJECTION_H #include <linux/compiler.h> #include <asm-generic/error-injection.h> #ifdef CONFIG_FUNCTION_ERROR_INJECTION extern bool within_error_injection_list(unsigned long addr); extern int get_injectable_error_type(unsigned long addr); #else /* !CONFIG_FUNCTION_ERROR_INJECTION */ static inline bool within_error_injection_list(unsigned long addr) { return false; } static inline int get_injectable_error_type(unsigned long addr) { return EI_ETYPE_NONE; } #endif #endif /* _LINUX_ERROR_INJECTION_H */ path.h 0000644 00000001074 14722070374 0005661 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PATH_H #define _LINUX_PATH_H struct dentry; struct vfsmount; struct path { struct vfsmount *mnt; struct dentry *dentry; } __randomize_layout; extern void path_get(const struct path *); extern void path_put(const struct path *); static inline int path_equal(const struct path *path1, const struct path *path2) { return path1->mnt == path2->mnt && path1->dentry == path2->dentry; } static inline void path_put_init(struct path *path) { path_put(path); *path = (struct path) { }; } #endif /* _LINUX_PATH_H */ pm_runtime.h 0000644 00000023071 14722070374 0007105 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * pm_runtime.h - Device run-time power management helper functions. * * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl> */ #ifndef _LINUX_PM_RUNTIME_H #define _LINUX_PM_RUNTIME_H #include <linux/device.h> #include <linux/notifier.h> #include <linux/pm.h> #include <linux/jiffies.h> /* Runtime PM flag argument bits */ #define RPM_ASYNC 0x01 /* Request is asynchronous */ #define RPM_NOWAIT 0x02 /* Don't wait for concurrent state change */ #define RPM_GET_PUT 0x04 /* Increment/decrement the usage_count */ #define RPM_AUTO 0x08 /* Use autosuspend_delay */ #ifdef CONFIG_PM extern struct workqueue_struct *pm_wq; static inline bool queue_pm_work(struct work_struct *work) { return queue_work(pm_wq, work); } extern int pm_generic_runtime_suspend(struct device *dev); extern int pm_generic_runtime_resume(struct device *dev); extern int pm_runtime_force_suspend(struct device *dev); extern int pm_runtime_force_resume(struct device *dev); extern int __pm_runtime_idle(struct device *dev, int rpmflags); extern int __pm_runtime_suspend(struct device *dev, int rpmflags); extern int __pm_runtime_resume(struct device *dev, int rpmflags); extern int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count); extern int pm_schedule_suspend(struct device *dev, unsigned int delay); extern int __pm_runtime_set_status(struct device *dev, unsigned int status); extern int pm_runtime_barrier(struct device *dev); extern void pm_runtime_enable(struct device *dev); extern void __pm_runtime_disable(struct device *dev, bool check_resume); extern void pm_runtime_allow(struct device *dev); extern void pm_runtime_forbid(struct device *dev); extern void pm_runtime_no_callbacks(struct device *dev); extern void pm_runtime_irq_safe(struct device *dev); extern void __pm_runtime_use_autosuspend(struct device *dev, bool use); extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay); extern u64 pm_runtime_autosuspend_expiration(struct device *dev); extern void pm_runtime_update_max_time_suspended(struct device *dev, s64 delta_ns); extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable); extern void pm_runtime_get_suppliers(struct device *dev); extern void pm_runtime_put_suppliers(struct device *dev); extern void pm_runtime_new_link(struct device *dev); extern void pm_runtime_drop_link(struct device_link *link); static inline int pm_runtime_get_if_in_use(struct device *dev) { return pm_runtime_get_if_active(dev, false); } extern int devm_pm_runtime_enable(struct device *dev); static inline void pm_suspend_ignore_children(struct device *dev, bool enable) { dev->power.ignore_children = enable; } static inline void pm_runtime_get_noresume(struct device *dev) { atomic_inc(&dev->power.usage_count); } static inline void pm_runtime_put_noidle(struct device *dev) { atomic_add_unless(&dev->power.usage_count, -1, 0); } static inline bool pm_runtime_suspended(struct device *dev) { return dev->power.runtime_status == RPM_SUSPENDED && !dev->power.disable_depth; } static inline bool pm_runtime_active(struct device *dev) { return dev->power.runtime_status == RPM_ACTIVE || dev->power.disable_depth; } static inline bool pm_runtime_status_suspended(struct device *dev) { return dev->power.runtime_status == RPM_SUSPENDED; } static inline bool pm_runtime_enabled(struct device *dev) { return !dev->power.disable_depth; } static inline bool pm_runtime_callbacks_present(struct device *dev) { return !dev->power.no_callbacks; } static inline void pm_runtime_mark_last_busy(struct device *dev) { WRITE_ONCE(dev->power.last_busy, ktime_get_mono_fast_ns()); } static inline bool pm_runtime_is_irq_safe(struct device *dev) { return dev->power.irq_safe; } extern u64 pm_runtime_suspended_time(struct device *dev); #else /* !CONFIG_PM */ static inline bool queue_pm_work(struct work_struct *work) { return false; } static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; } static inline int pm_generic_runtime_resume(struct device *dev) { return 0; } static inline int pm_runtime_force_suspend(struct device *dev) { return 0; } static inline int pm_runtime_force_resume(struct device *dev) { return 0; } static inline int __pm_runtime_idle(struct device *dev, int rpmflags) { return -ENOSYS; } static inline int __pm_runtime_suspend(struct device *dev, int rpmflags) { return -ENOSYS; } static inline int __pm_runtime_resume(struct device *dev, int rpmflags) { return 1; } static inline int pm_schedule_suspend(struct device *dev, unsigned int delay) { return -ENOSYS; } static inline int pm_runtime_get_if_in_use(struct device *dev) { return -EINVAL; } static inline int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count) { return -EINVAL; } static inline int __pm_runtime_set_status(struct device *dev, unsigned int status) { return 0; } static inline int pm_runtime_barrier(struct device *dev) { return 0; } static inline void pm_runtime_enable(struct device *dev) {} static inline void __pm_runtime_disable(struct device *dev, bool c) {} static inline void pm_runtime_allow(struct device *dev) {} static inline void pm_runtime_forbid(struct device *dev) {} static inline int devm_pm_runtime_enable(struct device *dev) { return 0; } static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {} static inline void pm_runtime_get_noresume(struct device *dev) {} static inline void pm_runtime_put_noidle(struct device *dev) {} static inline bool pm_runtime_suspended(struct device *dev) { return false; } static inline bool pm_runtime_active(struct device *dev) { return true; } static inline bool pm_runtime_status_suspended(struct device *dev) { return false; } static inline bool pm_runtime_enabled(struct device *dev) { return false; } static inline void pm_runtime_no_callbacks(struct device *dev) {} static inline void pm_runtime_irq_safe(struct device *dev) {} static inline bool pm_runtime_is_irq_safe(struct device *dev) { return false; } static inline bool pm_runtime_callbacks_present(struct device *dev) { return false; } static inline void pm_runtime_mark_last_busy(struct device *dev) {} static inline void __pm_runtime_use_autosuspend(struct device *dev, bool use) {} static inline void pm_runtime_set_autosuspend_delay(struct device *dev, int delay) {} static inline u64 pm_runtime_autosuspend_expiration( struct device *dev) { return 0; } static inline void pm_runtime_set_memalloc_noio(struct device *dev, bool enable){} static inline void pm_runtime_get_suppliers(struct device *dev) {} static inline void pm_runtime_put_suppliers(struct device *dev) {} static inline void pm_runtime_new_link(struct device *dev) {} static inline void pm_runtime_drop_link(struct device_link *link) {} #endif /* !CONFIG_PM */ static inline int pm_runtime_idle(struct device *dev) { return __pm_runtime_idle(dev, 0); } static inline int pm_runtime_suspend(struct device *dev) { return __pm_runtime_suspend(dev, 0); } static inline int pm_runtime_autosuspend(struct device *dev) { return __pm_runtime_suspend(dev, RPM_AUTO); } static inline int pm_runtime_resume(struct device *dev) { return __pm_runtime_resume(dev, 0); } static inline int pm_request_idle(struct device *dev) { return __pm_runtime_idle(dev, RPM_ASYNC); } static inline int pm_request_resume(struct device *dev) { return __pm_runtime_resume(dev, RPM_ASYNC); } static inline int pm_request_autosuspend(struct device *dev) { return __pm_runtime_suspend(dev, RPM_ASYNC | RPM_AUTO); } static inline int pm_runtime_get(struct device *dev) { return __pm_runtime_resume(dev, RPM_GET_PUT | RPM_ASYNC); } static inline int pm_runtime_get_sync(struct device *dev) { return __pm_runtime_resume(dev, RPM_GET_PUT); } /** * pm_runtime_resume_and_get - Bump up usage counter of a device and resume it. * @dev: Target device. * * Resume @dev synchronously and if that is successful, increment its runtime * PM usage counter. Return 0 if the runtime PM usage counter of @dev has been * incremented or a negative error code otherwise. */ static inline int pm_runtime_resume_and_get(struct device *dev) { int ret; ret = __pm_runtime_resume(dev, RPM_GET_PUT); if (ret < 0) { pm_runtime_put_noidle(dev); return ret; } return 0; } static inline int pm_runtime_put(struct device *dev) { return __pm_runtime_idle(dev, RPM_GET_PUT | RPM_ASYNC); } static inline int pm_runtime_put_autosuspend(struct device *dev) { return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_ASYNC | RPM_AUTO); } static inline int pm_runtime_put_sync(struct device *dev) { return __pm_runtime_idle(dev, RPM_GET_PUT); } static inline int pm_runtime_put_sync_suspend(struct device *dev) { return __pm_runtime_suspend(dev, RPM_GET_PUT); } static inline int pm_runtime_put_sync_autosuspend(struct device *dev) { return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_AUTO); } static inline int pm_runtime_set_active(struct device *dev) { return __pm_runtime_set_status(dev, RPM_ACTIVE); } static inline int pm_runtime_set_suspended(struct device *dev) { return __pm_runtime_set_status(dev, RPM_SUSPENDED); } static inline void pm_runtime_disable(struct device *dev) { __pm_runtime_disable(dev, true); } /** * NOTE: It's important to undo this with pm_runtime_dont_use_autosuspend() * at driver exit time unless your driver initially enabled pm_runtime * with devm_pm_runtime_enable() (which handles it for you). */ static inline void pm_runtime_use_autosuspend(struct device *dev) { __pm_runtime_use_autosuspend(dev, true); } static inline void pm_runtime_dont_use_autosuspend(struct device *dev) { __pm_runtime_use_autosuspend(dev, false); } #endif bio.h 0000644 00000051325 14722070374 0005502 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2001 Jens Axboe <axboe@suse.de> */ #ifndef __LINUX_BIO_H #define __LINUX_BIO_H #include <linux/highmem.h> #include <linux/mempool.h> #include <linux/ioprio.h> #ifdef CONFIG_BLOCK /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ #include <linux/blk_types.h> #define BIO_DEBUG #ifdef BIO_DEBUG #define BIO_BUG_ON BUG_ON #else #define BIO_BUG_ON #endif #define BIO_MAX_PAGES 256 #define bio_prio(bio) (bio)->bi_ioprio #define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio) #define bio_iter_iovec(bio, iter) \ bvec_iter_bvec((bio)->bi_io_vec, (iter)) #define bio_iter_page(bio, iter) \ bvec_iter_page((bio)->bi_io_vec, (iter)) #define bio_iter_len(bio, iter) \ bvec_iter_len((bio)->bi_io_vec, (iter)) #define bio_iter_offset(bio, iter) \ bvec_iter_offset((bio)->bi_io_vec, (iter)) #define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter) #define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter) #define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter) #define bvec_iter_sectors(iter) ((iter).bi_size >> 9) #define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter))) #define bio_sectors(bio) bvec_iter_sectors((bio)->bi_iter) #define bio_end_sector(bio) bvec_iter_end_sector((bio)->bi_iter) /* * Return the data direction, READ or WRITE. */ #define bio_data_dir(bio) \ (op_is_write(bio_op(bio)) ? WRITE : READ) /* * Check whether this bio carries any data or not. A NULL bio is allowed. */ static inline bool bio_has_data(struct bio *bio) { if (bio && bio->bi_iter.bi_size && bio_op(bio) != REQ_OP_DISCARD && bio_op(bio) != REQ_OP_SECURE_ERASE && bio_op(bio) != REQ_OP_WRITE_ZEROES) return true; return false; } static inline bool bio_no_advance_iter(struct bio *bio) { return bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE || bio_op(bio) == REQ_OP_WRITE_SAME || bio_op(bio) == REQ_OP_WRITE_ZEROES; } static inline bool bio_mergeable(struct bio *bio) { if (bio->bi_opf & REQ_NOMERGE_FLAGS) return false; return true; } static inline unsigned int bio_cur_bytes(struct bio *bio) { if (bio_has_data(bio)) return bio_iovec(bio).bv_len; else /* dataless requests such as discard */ return bio->bi_iter.bi_size; } static inline void *bio_data(struct bio *bio) { if (bio_has_data(bio)) return page_address(bio_page(bio)) + bio_offset(bio); return NULL; } /** * bio_full - check if the bio is full * @bio: bio to check * @len: length of one segment to be added * * Return true if @bio is full and one segment with @len bytes can't be * added to the bio, otherwise return false */ static inline bool bio_full(struct bio *bio, unsigned len) { if (bio->bi_vcnt >= bio->bi_max_vecs) return true; if (bio->bi_iter.bi_size > UINT_MAX - len) return true; return false; } static inline bool bio_next_segment(const struct bio *bio, struct bvec_iter_all *iter) { if (iter->idx >= bio->bi_vcnt) return false; bvec_advance(&bio->bi_io_vec[iter->idx], iter); return true; } /* * drivers should _never_ use the all version - the bio may have been split * before it got to the driver and the driver won't own all of it */ #define bio_for_each_segment_all(bvl, bio, iter) \ for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); ) static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, unsigned bytes) { iter->bi_sector += bytes >> 9; if (bio_no_advance_iter(bio)) iter->bi_size -= bytes; else bvec_iter_advance(bio->bi_io_vec, iter, bytes); /* TODO: It is reasonable to complete bio with error here. */ } #define __bio_for_each_segment(bvl, bio, iter, start) \ for (iter = (start); \ (iter).bi_size && \ ((bvl = bio_iter_iovec((bio), (iter))), 1); \ bio_advance_iter((bio), &(iter), (bvl).bv_len)) #define bio_for_each_segment(bvl, bio, iter) \ __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter) #define __bio_for_each_bvec(bvl, bio, iter, start) \ for (iter = (start); \ (iter).bi_size && \ ((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \ bio_advance_iter((bio), &(iter), (bvl).bv_len)) /* iterate over multi-page bvec */ #define bio_for_each_bvec(bvl, bio, iter) \ __bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter) #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len) static inline unsigned bio_segments(struct bio *bio) { unsigned segs = 0; struct bio_vec bv; struct bvec_iter iter; /* * We special case discard/write same/write zeroes, because they * interpret bi_size differently: */ switch (bio_op(bio)) { case REQ_OP_DISCARD: case REQ_OP_SECURE_ERASE: case REQ_OP_WRITE_ZEROES: return 0; case REQ_OP_WRITE_SAME: return 1; default: break; } bio_for_each_segment(bv, bio, iter) segs++; return segs; } /* * get a reference to a bio, so it won't disappear. the intended use is * something like: * * bio_get(bio); * submit_bio(rw, bio); * if (bio->bi_flags ...) * do_something * bio_put(bio); * * without the bio_get(), it could potentially complete I/O before submit_bio * returns. and then bio would be freed memory when if (bio->bi_flags ...) * runs */ static inline void bio_get(struct bio *bio) { bio->bi_flags |= (1 << BIO_REFFED); smp_mb__before_atomic(); atomic_inc(&bio->__bi_cnt); } static inline void bio_cnt_set(struct bio *bio, unsigned int count) { if (count != 1) { bio->bi_flags |= (1 << BIO_REFFED); smp_mb(); } atomic_set(&bio->__bi_cnt, count); } static inline bool bio_flagged(struct bio *bio, unsigned int bit) { return (bio->bi_flags & (1U << bit)) != 0; } static inline void bio_set_flag(struct bio *bio, unsigned int bit) { bio->bi_flags |= (1U << bit); } static inline void bio_clear_flag(struct bio *bio, unsigned int bit) { bio->bi_flags &= ~(1U << bit); } static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv) { *bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); } static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv) { struct bvec_iter iter = bio->bi_iter; int idx; bio_get_first_bvec(bio, bv); if (bv->bv_len == bio->bi_iter.bi_size) return; /* this bio only has a single bvec */ bio_advance_iter(bio, &iter, iter.bi_size); if (!iter.bi_bvec_done) idx = iter.bi_idx - 1; else /* in the middle of bvec */ idx = iter.bi_idx; *bv = bio->bi_io_vec[idx]; /* * iter.bi_bvec_done records actual length of the last bvec * if this bio ends in the middle of one io vector */ if (iter.bi_bvec_done) bv->bv_len = iter.bi_bvec_done; } static inline struct bio_vec *bio_first_bvec_all(struct bio *bio) { WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); return bio->bi_io_vec; } static inline struct page *bio_first_page_all(struct bio *bio) { return bio_first_bvec_all(bio)->bv_page; } static inline struct bio_vec *bio_last_bvec_all(struct bio *bio) { WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); return &bio->bi_io_vec[bio->bi_vcnt - 1]; } enum bip_flags { BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */ BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */ BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */ BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */ BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */ }; /* * bio integrity payload */ struct bio_integrity_payload { struct bio *bip_bio; /* parent bio */ struct bvec_iter bip_iter; unsigned short bip_slab; /* slab the bip came from */ unsigned short bip_vcnt; /* # of integrity bio_vecs */ unsigned short bip_max_vcnt; /* integrity bio_vec slots */ unsigned short bip_flags; /* control flags */ struct bvec_iter bio_iter; /* for rewinding parent bio */ struct work_struct bip_work; /* I/O completion */ struct bio_vec *bip_vec; struct bio_vec bip_inline_vecs[0];/* embedded bvec array */ }; #if defined(CONFIG_BLK_DEV_INTEGRITY) static inline struct bio_integrity_payload *bio_integrity(struct bio *bio) { if (bio->bi_opf & REQ_INTEGRITY) return bio->bi_integrity; return NULL; } static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) { struct bio_integrity_payload *bip = bio_integrity(bio); if (bip) return bip->bip_flags & flag; return false; } static inline sector_t bip_get_seed(struct bio_integrity_payload *bip) { return bip->bip_iter.bi_sector; } static inline void bip_set_seed(struct bio_integrity_payload *bip, sector_t seed) { bip->bip_iter.bi_sector = seed; } #endif /* CONFIG_BLK_DEV_INTEGRITY */ extern void bio_trim(struct bio *bio, int offset, int size); extern struct bio *bio_split(struct bio *bio, int sectors, gfp_t gfp, struct bio_set *bs); /** * bio_next_split - get next @sectors from a bio, splitting if necessary * @bio: bio to split * @sectors: number of sectors to split from the front of @bio * @gfp: gfp mask * @bs: bio set to allocate from * * Returns a bio representing the next @sectors of @bio - if the bio is smaller * than @sectors, returns the original bio unchanged. */ static inline struct bio *bio_next_split(struct bio *bio, int sectors, gfp_t gfp, struct bio_set *bs) { if (sectors >= bio_sectors(bio)) return bio; return bio_split(bio, sectors, gfp, bs); } enum { BIOSET_NEED_BVECS = BIT(0), BIOSET_NEED_RESCUER = BIT(1), }; extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags); extern void bioset_exit(struct bio_set *); extern int biovec_init_pool(mempool_t *pool, int pool_entries); extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src); extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *); extern void bio_put(struct bio *); extern void __bio_clone_fast(struct bio *, struct bio *); extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *); extern struct bio_set fs_bio_set; static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) { return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set); } static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs) { return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL); } extern blk_qc_t submit_bio(struct bio *); extern void bio_endio(struct bio *); static inline void bio_io_error(struct bio *bio) { bio->bi_status = BLK_STS_IOERR; bio_endio(bio); } static inline void bio_wouldblock_error(struct bio *bio) { bio->bi_status = BLK_STS_AGAIN; bio_endio(bio); } struct request_queue; extern int submit_bio_wait(struct bio *bio); extern void bio_advance(struct bio *, unsigned); extern void bio_init(struct bio *bio, struct bio_vec *table, unsigned short max_vecs); extern void bio_uninit(struct bio *); extern void bio_reset(struct bio *); void bio_chain(struct bio *, struct bio *); extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int); extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, unsigned int, unsigned int); bool __bio_try_merge_page(struct bio *bio, struct page *page, unsigned int len, unsigned int off, bool *same_page); void __bio_add_page(struct bio *bio, struct page *page, unsigned int len, unsigned int off); int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter); void bio_release_pages(struct bio *bio, bool mark_dirty); struct rq_map_data; extern struct bio *bio_map_user_iov(struct request_queue *, struct iov_iter *, gfp_t); extern void bio_unmap_user(struct bio *); extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int, gfp_t); extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int, gfp_t, int); extern void bio_set_pages_dirty(struct bio *bio); extern void bio_check_pages_dirty(struct bio *bio); void generic_start_io_acct(struct request_queue *q, int op, unsigned long sectors, struct hd_struct *part); void generic_end_io_acct(struct request_queue *q, int op, struct hd_struct *part, unsigned long start_time); extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, struct bio *src, struct bvec_iter *src_iter); extern void bio_copy_data(struct bio *dst, struct bio *src); extern void bio_list_copy_data(struct bio *dst, struct bio *src); extern void bio_free_pages(struct bio *bio); extern struct bio *bio_copy_user_iov(struct request_queue *, struct rq_map_data *, struct iov_iter *, gfp_t); extern int bio_uncopy_user(struct bio *); void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter); void bio_truncate(struct bio *bio, unsigned new_size); static inline void zero_fill_bio(struct bio *bio) { zero_fill_bio_iter(bio, bio->bi_iter); } extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *); extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int); extern unsigned int bvec_nr_vecs(unsigned short idx); extern const char *bio_devname(struct bio *bio, char *buffer); #define bio_set_dev(bio, bdev) \ do { \ if ((bio)->bi_disk != (bdev)->bd_disk) \ bio_clear_flag(bio, BIO_THROTTLED);\ (bio)->bi_disk = (bdev)->bd_disk; \ (bio)->bi_partno = (bdev)->bd_partno; \ bio_associate_blkg(bio); \ } while (0) #define bio_copy_dev(dst, src) \ do { \ (dst)->bi_disk = (src)->bi_disk; \ (dst)->bi_partno = (src)->bi_partno; \ bio_clone_blkg_association(dst, src); \ } while (0) #define bio_dev(bio) \ disk_devt((bio)->bi_disk) #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) void bio_associate_blkg_from_page(struct bio *bio, struct page *page); #else static inline void bio_associate_blkg_from_page(struct bio *bio, struct page *page) { } #endif #ifdef CONFIG_BLK_CGROUP void bio_disassociate_blkg(struct bio *bio); void bio_associate_blkg(struct bio *bio); void bio_associate_blkg_from_css(struct bio *bio, struct cgroup_subsys_state *css); void bio_clone_blkg_association(struct bio *dst, struct bio *src); #else /* CONFIG_BLK_CGROUP */ static inline void bio_disassociate_blkg(struct bio *bio) { } static inline void bio_associate_blkg(struct bio *bio) { } static inline void bio_associate_blkg_from_css(struct bio *bio, struct cgroup_subsys_state *css) { } static inline void bio_clone_blkg_association(struct bio *dst, struct bio *src) { } #endif /* CONFIG_BLK_CGROUP */ #ifdef CONFIG_HIGHMEM /* * remember never ever reenable interrupts between a bvec_kmap_irq and * bvec_kunmap_irq! */ static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) { unsigned long addr; /* * might not be a highmem page, but the preempt/irq count * balancing is a lot nicer this way */ local_irq_save(*flags); addr = (unsigned long) kmap_atomic(bvec->bv_page); BUG_ON(addr & ~PAGE_MASK); return (char *) addr + bvec->bv_offset; } static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) { unsigned long ptr = (unsigned long) buffer & PAGE_MASK; kunmap_atomic((void *) ptr); local_irq_restore(*flags); } #else static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) { return page_address(bvec->bv_page) + bvec->bv_offset; } static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) { *flags = 0; } #endif /* * BIO list management for use by remapping drivers (e.g. DM or MD) and loop. * * A bio_list anchors a singly-linked list of bios chained through the bi_next * member of the bio. The bio_list also caches the last list member to allow * fast access to the tail. */ struct bio_list { struct bio *head; struct bio *tail; }; static inline int bio_list_empty(const struct bio_list *bl) { return bl->head == NULL; } static inline void bio_list_init(struct bio_list *bl) { bl->head = bl->tail = NULL; } #define BIO_EMPTY_LIST { NULL, NULL } #define bio_list_for_each(bio, bl) \ for (bio = (bl)->head; bio; bio = bio->bi_next) static inline unsigned bio_list_size(const struct bio_list *bl) { unsigned sz = 0; struct bio *bio; bio_list_for_each(bio, bl) sz++; return sz; } static inline void bio_list_add(struct bio_list *bl, struct bio *bio) { bio->bi_next = NULL; if (bl->tail) bl->tail->bi_next = bio; else bl->head = bio; bl->tail = bio; } static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio) { bio->bi_next = bl->head; bl->head = bio; if (!bl->tail) bl->tail = bio; } static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2) { if (!bl2->head) return; if (bl->tail) bl->tail->bi_next = bl2->head; else bl->head = bl2->head; bl->tail = bl2->tail; } static inline void bio_list_merge_head(struct bio_list *bl, struct bio_list *bl2) { if (!bl2->head) return; if (bl->head) bl2->tail->bi_next = bl->head; else bl->tail = bl2->tail; bl->head = bl2->head; } static inline struct bio *bio_list_peek(struct bio_list *bl) { return bl->head; } static inline struct bio *bio_list_pop(struct bio_list *bl) { struct bio *bio = bl->head; if (bio) { bl->head = bl->head->bi_next; if (!bl->head) bl->tail = NULL; bio->bi_next = NULL; } return bio; } static inline struct bio *bio_list_get(struct bio_list *bl) { struct bio *bio = bl->head; bl->head = bl->tail = NULL; return bio; } /* * Increment chain count for the bio. Make sure the CHAIN flag update * is visible before the raised count. */ static inline void bio_inc_remaining(struct bio *bio) { bio_set_flag(bio, BIO_CHAIN); smp_mb__before_atomic(); atomic_inc(&bio->__bi_remaining); } /* * bio_set is used to allow other portions of the IO system to * allocate their own private memory pools for bio and iovec structures. * These memory pools in turn all allocate from the bio_slab * and the bvec_slabs[]. */ #define BIO_POOL_SIZE 2 struct bio_set { struct kmem_cache *bio_slab; unsigned int front_pad; mempool_t bio_pool; mempool_t bvec_pool; #if defined(CONFIG_BLK_DEV_INTEGRITY) mempool_t bio_integrity_pool; mempool_t bvec_integrity_pool; #endif /* * Deadlock avoidance for stacking block drivers: see comments in * bio_alloc_bioset() for details */ spinlock_t rescue_lock; struct bio_list rescue_list; struct work_struct rescue_work; struct workqueue_struct *rescue_workqueue; }; struct biovec_slab { int nr_vecs; char *name; struct kmem_cache *slab; }; static inline bool bioset_initialized(struct bio_set *bs) { return bs->bio_slab != NULL; } /* * a small number of entries is fine, not going to be performance critical. * basically we just need to survive */ #define BIO_SPLIT_ENTRIES 2 #if defined(CONFIG_BLK_DEV_INTEGRITY) #define bip_for_each_vec(bvl, bip, iter) \ for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter) #define bio_for_each_integrity_vec(_bvl, _bio, _iter) \ for_each_bio(_bio) \ bip_for_each_vec(_bvl, _bio->bi_integrity, _iter) extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int); extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int); extern bool bio_integrity_prep(struct bio *); extern void bio_integrity_advance(struct bio *, unsigned int); extern void bio_integrity_trim(struct bio *); extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t); extern int bioset_integrity_create(struct bio_set *, int); extern void bioset_integrity_free(struct bio_set *); extern void bio_integrity_init(void); #else /* CONFIG_BLK_DEV_INTEGRITY */ static inline void *bio_integrity(struct bio *bio) { return NULL; } static inline int bioset_integrity_create(struct bio_set *bs, int pool_size) { return 0; } static inline void bioset_integrity_free (struct bio_set *bs) { return; } static inline bool bio_integrity_prep(struct bio *bio) { return true; } static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp_mask) { return 0; } static inline void bio_integrity_advance(struct bio *bio, unsigned int bytes_done) { return; } static inline void bio_integrity_trim(struct bio *bio) { return; } static inline void bio_integrity_init(void) { return; } static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) { return false; } static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp, unsigned int nr) { return ERR_PTR(-EINVAL); } static inline int bio_integrity_add_page(struct bio *bio, struct page *page, unsigned int len, unsigned int offset) { return 0; } #endif /* CONFIG_BLK_DEV_INTEGRITY */ /* * Mark a bio as polled. Note that for async polled IO, the caller must * expect -EWOULDBLOCK if we cannot allocate a request (or other resources). * We cannot block waiting for requests on polled IO, as those completions * must be found by the caller. This is different than IRQ driven IO, where * it's safe to wait for IO to complete. */ static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb) { bio->bi_opf |= REQ_HIPRI; if (!is_sync_kiocb(kiocb)) bio->bi_opf |= REQ_NOWAIT; } #endif /* CONFIG_BLOCK */ #endif /* __LINUX_BIO_H */ livepatch.h 0000644 00000016671 14722070374 0006715 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * livepatch.h - Kernel Live Patching Core * * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com> * Copyright (C) 2014 SUSE */ #ifndef _LINUX_LIVEPATCH_H_ #define _LINUX_LIVEPATCH_H_ #include <linux/module.h> #include <linux/ftrace.h> #include <linux/completion.h> #include <linux/list.h> #if IS_ENABLED(CONFIG_LIVEPATCH) #include <asm/livepatch.h> /* task patch states */ #define KLP_UNDEFINED -1 #define KLP_UNPATCHED 0 #define KLP_PATCHED 1 /** * struct klp_func - function structure for live patching * @old_name: name of the function to be patched * @new_func: pointer to the patched function code * @old_sympos: a hint indicating which symbol position the old function * can be found (optional) * @old_func: pointer to the function being patched * @kobj: kobject for sysfs resources * @node: list node for klp_object func_list * @stack_node: list node for klp_ops func_stack list * @old_size: size of the old function * @new_size: size of the new function * @nop: temporary patch to use the original code again; dyn. allocated * @patched: the func has been added to the klp_ops list * @transition: the func is currently being applied or reverted * * The patched and transition variables define the func's patching state. When * patching, a func is always in one of the following states: * * patched=0 transition=0: unpatched * patched=0 transition=1: unpatched, temporary starting state * patched=1 transition=1: patched, may be visible to some tasks * patched=1 transition=0: patched, visible to all tasks * * And when unpatching, it goes in the reverse order: * * patched=1 transition=0: patched, visible to all tasks * patched=1 transition=1: patched, may be visible to some tasks * patched=0 transition=1: unpatched, temporary ending state * patched=0 transition=0: unpatched */ struct klp_func { /* external */ const char *old_name; void *new_func; /* * The old_sympos field is optional and can be used to resolve * duplicate symbol names in livepatch objects. If this field is zero, * it is expected the symbol is unique, otherwise patching fails. If * this value is greater than zero then that occurrence of the symbol * in kallsyms for the given object is used. */ unsigned long old_sympos; /* internal */ void *old_func; struct kobject kobj; struct list_head node; struct list_head stack_node; unsigned long old_size, new_size; bool nop; bool patched; bool transition; }; struct klp_object; /** * struct klp_callbacks - pre/post live-(un)patch callback structure * @pre_patch: executed before code patching * @post_patch: executed after code patching * @pre_unpatch: executed before code unpatching * @post_unpatch: executed after code unpatching * @post_unpatch_enabled: flag indicating if post-unpatch callback * should run * * All callbacks are optional. Only the pre-patch callback, if provided, * will be unconditionally executed. If the parent klp_object fails to * patch for any reason, including a non-zero error status returned from * the pre-patch callback, no further callbacks will be executed. */ struct klp_callbacks { int (*pre_patch)(struct klp_object *obj); void (*post_patch)(struct klp_object *obj); void (*pre_unpatch)(struct klp_object *obj); void (*post_unpatch)(struct klp_object *obj); bool post_unpatch_enabled; }; /** * struct klp_object - kernel object structure for live patching * @name: module name (or NULL for vmlinux) * @funcs: function entries for functions to be patched in the object * @callbacks: functions to be executed pre/post (un)patching * @kobj: kobject for sysfs resources * @func_list: dynamic list of the function entries * @node: list node for klp_patch obj_list * @mod: kernel module associated with the patched object * (NULL for vmlinux) * @dynamic: temporary object for nop functions; dynamically allocated * @patched: the object's funcs have been added to the klp_ops list */ struct klp_object { /* external */ const char *name; struct klp_func *funcs; struct klp_callbacks callbacks; /* internal */ struct kobject kobj; struct list_head func_list; struct list_head node; struct module *mod; bool dynamic; bool patched; }; /** * struct klp_patch - patch structure for live patching * @mod: reference to the live patch module * @objs: object entries for kernel objects to be patched * @replace: replace all actively used patches * @list: list node for global list of actively used patches * @kobj: kobject for sysfs resources * @obj_list: dynamic list of the object entries * @enabled: the patch is enabled (but operation may be incomplete) * @forced: was involved in a forced transition * @free_work: patch cleanup from workqueue-context * @finish: for waiting till it is safe to remove the patch module */ struct klp_patch { /* external */ struct module *mod; struct klp_object *objs; bool replace; /* internal */ struct list_head list; struct kobject kobj; struct list_head obj_list; bool enabled; bool forced; struct work_struct free_work; struct completion finish; }; #define klp_for_each_object_static(patch, obj) \ for (obj = patch->objs; obj->funcs || obj->name; obj++) #define klp_for_each_object_safe(patch, obj, tmp_obj) \ list_for_each_entry_safe(obj, tmp_obj, &patch->obj_list, node) #define klp_for_each_object(patch, obj) \ list_for_each_entry(obj, &patch->obj_list, node) #define klp_for_each_func_static(obj, func) \ for (func = obj->funcs; \ func->old_name || func->new_func || func->old_sympos; \ func++) #define klp_for_each_func_safe(obj, func, tmp_func) \ list_for_each_entry_safe(func, tmp_func, &obj->func_list, node) #define klp_for_each_func(obj, func) \ list_for_each_entry(func, &obj->func_list, node) int klp_enable_patch(struct klp_patch *); void arch_klp_init_object_loaded(struct klp_patch *patch, struct klp_object *obj); /* Called from the module loader during module coming/going states */ int klp_module_coming(struct module *mod); void klp_module_going(struct module *mod); void klp_copy_process(struct task_struct *child); void klp_update_patch_state(struct task_struct *task); static inline bool klp_patch_pending(struct task_struct *task) { return test_tsk_thread_flag(task, TIF_PATCH_PENDING); } static inline bool klp_have_reliable_stack(void) { return IS_ENABLED(CONFIG_STACKTRACE) && IS_ENABLED(CONFIG_HAVE_RELIABLE_STACKTRACE); } typedef int (*klp_shadow_ctor_t)(void *obj, void *shadow_data, void *ctor_data); typedef void (*klp_shadow_dtor_t)(void *obj, void *shadow_data); void *klp_shadow_get(void *obj, unsigned long id); void *klp_shadow_alloc(void *obj, unsigned long id, size_t size, gfp_t gfp_flags, klp_shadow_ctor_t ctor, void *ctor_data); void *klp_shadow_get_or_alloc(void *obj, unsigned long id, size_t size, gfp_t gfp_flags, klp_shadow_ctor_t ctor, void *ctor_data); void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor); void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor); #else /* !CONFIG_LIVEPATCH */ static inline int klp_module_coming(struct module *mod) { return 0; } static inline void klp_module_going(struct module *mod) {} static inline bool klp_patch_pending(struct task_struct *task) { return false; } static inline void klp_update_patch_state(struct task_struct *task) {} static inline void klp_copy_process(struct task_struct *child) {} #endif /* CONFIG_LIVEPATCH */ #endif /* _LINUX_LIVEPATCH_H_ */ ipmi.h 0000644 00000025546 14722070374 0005675 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0+ */ /* * ipmi.h * * MontaVista IPMI interface * * Author: MontaVista Software, Inc. * Corey Minyard <minyard@mvista.com> * source@mvista.com * * Copyright 2002 MontaVista Software Inc. * */ #ifndef __LINUX_IPMI_H #define __LINUX_IPMI_H #include <uapi/linux/ipmi.h> #include <linux/list.h> #include <linux/proc_fs.h> #include <linux/acpi.h> /* For acpi_handle */ struct module; struct device; /* * Opaque type for a IPMI message user. One of these is needed to * send and receive messages. */ struct ipmi_user; /* * Stuff coming from the receive interface comes as one of these. * They are allocated, the receiver must free them with * ipmi_free_recv_msg() when done with the message. The link is not * used after the message is delivered, so the upper layer may use the * link to build a linked list, if it likes. */ struct ipmi_recv_msg { struct list_head link; /* * The type of message as defined in the "Receive Types" * defines above. */ int recv_type; struct ipmi_user *user; struct ipmi_addr addr; long msgid; struct kernel_ipmi_msg msg; /* * The user_msg_data is the data supplied when a message was * sent, if this is a response to a sent message. If this is * not a response to a sent message, then user_msg_data will * be NULL. If the user above is NULL, then this will be the * intf. */ void *user_msg_data; /* * Call this when done with the message. It will presumably free * the message and do any other necessary cleanup. */ void (*done)(struct ipmi_recv_msg *msg); /* * Place-holder for the data, don't make any assumptions about * the size or existence of this, since it may change. */ unsigned char msg_data[IPMI_MAX_MSG_LENGTH]; }; /* Allocate and free the receive message. */ void ipmi_free_recv_msg(struct ipmi_recv_msg *msg); struct ipmi_user_hndl { /* * Routine type to call when a message needs to be routed to * the upper layer. This will be called with some locks held, * the only IPMI routines that can be called are ipmi_request * and the alloc/free operations. The handler_data is the * variable supplied when the receive handler was registered. */ void (*ipmi_recv_hndl)(struct ipmi_recv_msg *msg, void *user_msg_data); /* * Called when the interface detects a watchdog pre-timeout. If * this is NULL, it will be ignored for the user. */ void (*ipmi_watchdog_pretimeout)(void *handler_data); /* * If not NULL, called at panic time after the interface has * been set up to handle run to completion. */ void (*ipmi_panic_handler)(void *handler_data); /* * Called when the interface has been removed. After this returns * the user handle will be invalid. The interface may or may * not be usable when this is called, but it will return errors * if it is not usable. */ void (*shutdown)(void *handler_data); }; /* Create a new user of the IPMI layer on the given interface number. */ int ipmi_create_user(unsigned int if_num, const struct ipmi_user_hndl *handler, void *handler_data, struct ipmi_user **user); /* * Destroy the given user of the IPMI layer. Note that after this * function returns, the system is guaranteed to not call any * callbacks for the user. Thus as long as you destroy all the users * before you unload a module, you will be safe. And if you destroy * the users before you destroy the callback structures, it should be * safe, too. */ int ipmi_destroy_user(struct ipmi_user *user); /* Get the IPMI version of the BMC we are talking to. */ int ipmi_get_version(struct ipmi_user *user, unsigned char *major, unsigned char *minor); /* * Set and get the slave address and LUN that we will use for our * source messages. Note that this affects the interface, not just * this user, so it will affect all users of this interface. This is * so some initialization code can come in and do the OEM-specific * things it takes to determine your address (if not the BMC) and set * it for everyone else. Note that each channel can have its own * address. */ int ipmi_set_my_address(struct ipmi_user *user, unsigned int channel, unsigned char address); int ipmi_get_my_address(struct ipmi_user *user, unsigned int channel, unsigned char *address); int ipmi_set_my_LUN(struct ipmi_user *user, unsigned int channel, unsigned char LUN); int ipmi_get_my_LUN(struct ipmi_user *user, unsigned int channel, unsigned char *LUN); /* * Like ipmi_request, but lets you specify the number of retries and * the retry time. The retries is the number of times the message * will be resent if no reply is received. If set to -1, the default * value will be used. The retry time is the time in milliseconds * between retries. If set to zero, the default value will be * used. * * Don't use this unless you *really* have to. It's primarily for the * IPMI over LAN converter; since the LAN stuff does its own retries, * it makes no sense to do it here. However, this can be used if you * have unusual requirements. */ int ipmi_request_settime(struct ipmi_user *user, struct ipmi_addr *addr, long msgid, struct kernel_ipmi_msg *msg, void *user_msg_data, int priority, int max_retries, unsigned int retry_time_ms); /* * Like ipmi_request, but with messages supplied. This will not * allocate any memory, and the messages may be statically allocated * (just make sure to do the "done" handling on them). Note that this * is primarily for the watchdog timer, since it should be able to * send messages even if no memory is available. This is subject to * change as the system changes, so don't use it unless you REALLY * have to. */ int ipmi_request_supply_msgs(struct ipmi_user *user, struct ipmi_addr *addr, long msgid, struct kernel_ipmi_msg *msg, void *user_msg_data, void *supplied_smi, struct ipmi_recv_msg *supplied_recv, int priority); /* * Poll the IPMI interface for the user. This causes the IPMI code to * do an immediate check for information from the driver and handle * anything that is immediately pending. This will not block in any * way. This is useful if you need to spin waiting for something to * happen in the IPMI driver. */ void ipmi_poll_interface(struct ipmi_user *user); /* * When commands come in to the SMS, the user can register to receive * them. Only one user can be listening on a specific netfn/cmd/chan tuple * at a time, you will get an EBUSY error if the command is already * registered. If a command is received that does not have a user * registered, the driver will automatically return the proper * error. Channels are specified as a bitfield, use IPMI_CHAN_ALL to * mean all channels. */ int ipmi_register_for_cmd(struct ipmi_user *user, unsigned char netfn, unsigned char cmd, unsigned int chans); int ipmi_unregister_for_cmd(struct ipmi_user *user, unsigned char netfn, unsigned char cmd, unsigned int chans); /* * Go into a mode where the driver will not autonomously attempt to do * things with the interface. It will still respond to attentions and * interrupts, and it will expect that commands will complete. It * will not automatcially check for flags, events, or things of that * nature. * * This is primarily used for firmware upgrades. The idea is that * when you go into firmware upgrade mode, you do this operation * and the driver will not attempt to do anything but what you tell * it or what the BMC asks for. * * Note that if you send a command that resets the BMC, the driver * will still expect a response from that command. So the BMC should * reset itself *after* the response is sent. Resetting before the * response is just silly. * * If in auto maintenance mode, the driver will automatically go into * maintenance mode for 30 seconds if it sees a cold reset, a warm * reset, or a firmware NetFN. This means that code that uses only * firmware NetFN commands to do upgrades will work automatically * without change, assuming it sends a message every 30 seconds or * less. * * See the IPMI_MAINTENANCE_MODE_xxx defines for what the mode means. */ int ipmi_get_maintenance_mode(struct ipmi_user *user); int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode); /* * When the user is created, it will not receive IPMI events by * default. The user must set this to TRUE to get incoming events. * The first user that sets this to TRUE will receive all events that * have been queued while no one was waiting for events. */ int ipmi_set_gets_events(struct ipmi_user *user, bool val); /* * Called when a new SMI is registered. This will also be called on * every existing interface when a new watcher is registered with * ipmi_smi_watcher_register(). */ struct ipmi_smi_watcher { struct list_head link; /* * You must set the owner to the current module, if you are in * a module (generally just set it to "THIS_MODULE"). */ struct module *owner; /* * These two are called with read locks held for the interface * the watcher list. So you can add and remove users from the * IPMI interface, send messages, etc., but you cannot add * or remove SMI watchers or SMI interfaces. */ void (*new_smi)(int if_num, struct device *dev); void (*smi_gone)(int if_num); }; int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher); int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher); /* * The following are various helper functions for dealing with IPMI * addresses. */ /* Return the maximum length of an IPMI address given it's type. */ unsigned int ipmi_addr_length(int addr_type); /* Validate that the given IPMI address is valid. */ int ipmi_validate_addr(struct ipmi_addr *addr, int len); /* * How did the IPMI driver find out about the device? */ enum ipmi_addr_src { SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS, SI_PCI, SI_DEVICETREE, SI_PLATFORM, SI_LAST }; const char *ipmi_addr_src_to_str(enum ipmi_addr_src src); union ipmi_smi_info_union { #ifdef CONFIG_ACPI /* * the acpi_info element is defined for the SI_ACPI * address type */ struct { acpi_handle acpi_handle; } acpi_info; #endif }; struct ipmi_smi_info { enum ipmi_addr_src addr_src; /* * Base device for the interface. Don't forget to put this when * you are done. */ struct device *dev; /* * The addr_info provides more detailed info for some IPMI * devices, depending on the addr_src. Currently only SI_ACPI * info is provided. */ union ipmi_smi_info_union addr_info; }; /* This is to get the private info of struct ipmi_smi */ extern int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data); #endif /* __LINUX_IPMI_H */ clockchips.h 0000644 00000016425 14722070374 0007055 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* linux/include/linux/clockchips.h * * This file contains the structure definitions for clockchips. * * If you are not a clockchip, or the time of day code, you should * not be including this file! */ #ifndef _LINUX_CLOCKCHIPS_H #define _LINUX_CLOCKCHIPS_H #ifdef CONFIG_GENERIC_CLOCKEVENTS # include <linux/clocksource.h> # include <linux/cpumask.h> # include <linux/ktime.h> # include <linux/notifier.h> struct clock_event_device; struct module; /* * Possible states of a clock event device. * * DETACHED: Device is not used by clockevents core. Initial state or can be * reached from SHUTDOWN. * SHUTDOWN: Device is powered-off. Can be reached from PERIODIC or ONESHOT. * PERIODIC: Device is programmed to generate events periodically. Can be * reached from DETACHED or SHUTDOWN. * ONESHOT: Device is programmed to generate event only once. Can be reached * from DETACHED or SHUTDOWN. * ONESHOT_STOPPED: Device was programmed in ONESHOT mode and is temporarily * stopped. */ enum clock_event_state { CLOCK_EVT_STATE_DETACHED, CLOCK_EVT_STATE_SHUTDOWN, CLOCK_EVT_STATE_PERIODIC, CLOCK_EVT_STATE_ONESHOT, CLOCK_EVT_STATE_ONESHOT_STOPPED, }; /* * Clock event features */ # define CLOCK_EVT_FEAT_PERIODIC 0x000001 # define CLOCK_EVT_FEAT_ONESHOT 0x000002 # define CLOCK_EVT_FEAT_KTIME 0x000004 /* * x86(64) specific (mis)features: * * - Clockevent source stops in C3 State and needs broadcast support. * - Local APIC timer is used as a dummy device. */ # define CLOCK_EVT_FEAT_C3STOP 0x000008 # define CLOCK_EVT_FEAT_DUMMY 0x000010 /* * Core shall set the interrupt affinity dynamically in broadcast mode */ # define CLOCK_EVT_FEAT_DYNIRQ 0x000020 # define CLOCK_EVT_FEAT_PERCPU 0x000040 /* * Clockevent device is based on a hrtimer for broadcast */ # define CLOCK_EVT_FEAT_HRTIMER 0x000080 /** * struct clock_event_device - clock event device descriptor * @event_handler: Assigned by the framework to be called by the low * level handler of the event source * @set_next_event: set next event function using a clocksource delta * @set_next_ktime: set next event function using a direct ktime value * @next_event: local storage for the next event in oneshot mode * @max_delta_ns: maximum delta value in ns * @min_delta_ns: minimum delta value in ns * @mult: nanosecond to cycles multiplier * @shift: nanoseconds to cycles divisor (power of two) * @state_use_accessors:current state of the device, assigned by the core code * @features: features * @retries: number of forced programming retries * @set_state_periodic: switch state to periodic * @set_state_oneshot: switch state to oneshot * @set_state_oneshot_stopped: switch state to oneshot_stopped * @set_state_shutdown: switch state to shutdown * @tick_resume: resume clkevt device * @broadcast: function to broadcast events * @min_delta_ticks: minimum delta value in ticks stored for reconfiguration * @max_delta_ticks: maximum delta value in ticks stored for reconfiguration * @name: ptr to clock event name * @rating: variable to rate clock event devices * @irq: IRQ number (only for non CPU local devices) * @bound_on: Bound on CPU * @cpumask: cpumask to indicate for which CPUs this device works * @list: list head for the management code * @owner: module reference */ struct clock_event_device { void (*event_handler)(struct clock_event_device *); int (*set_next_event)(unsigned long evt, struct clock_event_device *); int (*set_next_ktime)(ktime_t expires, struct clock_event_device *); ktime_t next_event; u64 max_delta_ns; u64 min_delta_ns; u32 mult; u32 shift; enum clock_event_state state_use_accessors; unsigned int features; unsigned long retries; int (*set_state_periodic)(struct clock_event_device *); int (*set_state_oneshot)(struct clock_event_device *); int (*set_state_oneshot_stopped)(struct clock_event_device *); int (*set_state_shutdown)(struct clock_event_device *); int (*tick_resume)(struct clock_event_device *); void (*broadcast)(const struct cpumask *mask); void (*suspend)(struct clock_event_device *); void (*resume)(struct clock_event_device *); unsigned long min_delta_ticks; unsigned long max_delta_ticks; const char *name; int rating; int irq; int bound_on; const struct cpumask *cpumask; struct list_head list; struct module *owner; } ____cacheline_aligned; /* Helpers to verify state of a clockevent device */ static inline bool clockevent_state_detached(struct clock_event_device *dev) { return dev->state_use_accessors == CLOCK_EVT_STATE_DETACHED; } static inline bool clockevent_state_shutdown(struct clock_event_device *dev) { return dev->state_use_accessors == CLOCK_EVT_STATE_SHUTDOWN; } static inline bool clockevent_state_periodic(struct clock_event_device *dev) { return dev->state_use_accessors == CLOCK_EVT_STATE_PERIODIC; } static inline bool clockevent_state_oneshot(struct clock_event_device *dev) { return dev->state_use_accessors == CLOCK_EVT_STATE_ONESHOT; } static inline bool clockevent_state_oneshot_stopped(struct clock_event_device *dev) { return dev->state_use_accessors == CLOCK_EVT_STATE_ONESHOT_STOPPED; } /* * Calculate a multiplication factor for scaled math, which is used to convert * nanoseconds based values to clock ticks: * * clock_ticks = (nanoseconds * factor) >> shift. * * div_sc is the rearranged equation to calculate a factor from a given clock * ticks / nanoseconds ratio: * * factor = (clock_ticks << shift) / nanoseconds */ static inline unsigned long div_sc(unsigned long ticks, unsigned long nsec, int shift) { u64 tmp = ((u64)ticks) << shift; do_div(tmp, nsec); return (unsigned long) tmp; } /* Clock event layer functions */ extern u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt); extern void clockevents_register_device(struct clock_event_device *dev); extern int clockevents_unbind_device(struct clock_event_device *ced, int cpu); extern void clockevents_config_and_register(struct clock_event_device *dev, u32 freq, unsigned long min_delta, unsigned long max_delta); extern int clockevents_update_freq(struct clock_event_device *ce, u32 freq); static inline void clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 maxsec) { return clocks_calc_mult_shift(&ce->mult, &ce->shift, NSEC_PER_SEC, freq, maxsec); } extern void clockevents_suspend(void); extern void clockevents_resume(void); # ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST # ifdef CONFIG_ARCH_HAS_TICK_BROADCAST extern void tick_broadcast(const struct cpumask *mask); # else # define tick_broadcast NULL # endif extern int tick_receive_broadcast(void); # endif # if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT) extern void tick_setup_hrtimer_broadcast(void); extern int tick_check_broadcast_expired(void); # else static inline int tick_check_broadcast_expired(void) { return 0; } static inline void tick_setup_hrtimer_broadcast(void) { } # endif #else /* !CONFIG_GENERIC_CLOCKEVENTS: */ static inline void clockevents_suspend(void) { } static inline void clockevents_resume(void) { } static inline int tick_check_broadcast_expired(void) { return 0; } static inline void tick_setup_hrtimer_broadcast(void) { } #endif /* !CONFIG_GENERIC_CLOCKEVENTS */ #endif /* _LINUX_CLOCKCHIPS_H */ dma-contiguous.h 0000644 00000011357 14722070374 0007670 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef __LINUX_CMA_H #define __LINUX_CMA_H /* * Contiguous Memory Allocator for DMA mapping framework * Copyright (c) 2010-2011 by Samsung Electronics. * Written by: * Marek Szyprowski <m.szyprowski@samsung.com> * Michal Nazarewicz <mina86@mina86.com> */ /* * Contiguous Memory Allocator * * The Contiguous Memory Allocator (CMA) makes it possible to * allocate big contiguous chunks of memory after the system has * booted. * * Why is it needed? * * Various devices on embedded systems have no scatter-getter and/or * IO map support and require contiguous blocks of memory to * operate. They include devices such as cameras, hardware video * coders, etc. * * Such devices often require big memory buffers (a full HD frame * is, for instance, more then 2 mega pixels large, i.e. more than 6 * MB of memory), which makes mechanisms such as kmalloc() or * alloc_page() ineffective. * * At the same time, a solution where a big memory region is * reserved for a device is suboptimal since often more memory is * reserved then strictly required and, moreover, the memory is * inaccessible to page system even if device drivers don't use it. * * CMA tries to solve this issue by operating on memory regions * where only movable pages can be allocated from. This way, kernel * can use the memory for pagecache and when device driver requests * it, allocated pages can be migrated. * * Driver usage * * CMA should not be used by the device drivers directly. It is * only a helper framework for dma-mapping subsystem. * * For more information, see kernel-docs in kernel/dma/contiguous.c */ #ifdef __KERNEL__ #include <linux/device.h> #include <linux/mm.h> struct cma; struct page; #ifdef CONFIG_DMA_CMA extern struct cma *dma_contiguous_default_area; static inline struct cma *dev_get_cma_area(struct device *dev) { if (dev && dev->cma_area) return dev->cma_area; return dma_contiguous_default_area; } static inline void dev_set_cma_area(struct device *dev, struct cma *cma) { if (dev) dev->cma_area = cma; } static inline void dma_contiguous_set_default(struct cma *cma) { dma_contiguous_default_area = cma; } void dma_contiguous_reserve(phys_addr_t addr_limit); int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, phys_addr_t limit, struct cma **res_cma, bool fixed); /** * dma_declare_contiguous() - reserve area for contiguous memory handling * for particular device * @dev: Pointer to device structure. * @size: Size of the reserved memory. * @base: Start address of the reserved memory (optional, 0 for any). * @limit: End address of the reserved memory (optional, 0 for any). * * This function reserves memory for specified device. It should be * called by board specific code when early allocator (memblock or bootmem) * is still activate. */ static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size, phys_addr_t base, phys_addr_t limit) { struct cma *cma; int ret; ret = dma_contiguous_reserve_area(size, base, limit, &cma, true); if (ret == 0) dev_set_cma_area(dev, cma); return ret; } struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, unsigned int order, bool no_warn); bool dma_release_from_contiguous(struct device *dev, struct page *pages, int count); struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp); void dma_free_contiguous(struct device *dev, struct page *page, size_t size); #else static inline struct cma *dev_get_cma_area(struct device *dev) { return NULL; } static inline void dev_set_cma_area(struct device *dev, struct cma *cma) { } static inline void dma_contiguous_set_default(struct cma *cma) { } static inline void dma_contiguous_reserve(phys_addr_t limit) { } static inline int dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, phys_addr_t limit, struct cma **res_cma, bool fixed) { return -ENOSYS; } static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size, phys_addr_t base, phys_addr_t limit) { return -ENOSYS; } static inline struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, unsigned int order, bool no_warn) { return NULL; } static inline bool dma_release_from_contiguous(struct device *dev, struct page *pages, int count) { return false; } /* Use fallback alloc() and free() when CONFIG_DMA_CMA=n */ static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp) { return NULL; } static inline void dma_free_contiguous(struct device *dev, struct page *page, size_t size) { __free_pages(page, get_order(size)); } #endif #endif #endif switchtec.h 0000644 00000020525 14722070374 0006724 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Microsemi Switchtec PCIe Driver * Copyright (c) 2017, Microsemi Corporation */ #ifndef _SWITCHTEC_H #define _SWITCHTEC_H #include <linux/pci.h> #include <linux/cdev.h> #define SWITCHTEC_MRPC_PAYLOAD_SIZE 1024 #define SWITCHTEC_MAX_PFF_CSR 255 #define SWITCHTEC_EVENT_OCCURRED BIT(0) #define SWITCHTEC_EVENT_CLEAR BIT(0) #define SWITCHTEC_EVENT_EN_LOG BIT(1) #define SWITCHTEC_EVENT_EN_CLI BIT(2) #define SWITCHTEC_EVENT_EN_IRQ BIT(3) #define SWITCHTEC_EVENT_FATAL BIT(4) #define SWITCHTEC_DMA_MRPC_EN BIT(0) enum { SWITCHTEC_GAS_MRPC_OFFSET = 0x0000, SWITCHTEC_GAS_TOP_CFG_OFFSET = 0x1000, SWITCHTEC_GAS_SW_EVENT_OFFSET = 0x1800, SWITCHTEC_GAS_SYS_INFO_OFFSET = 0x2000, SWITCHTEC_GAS_FLASH_INFO_OFFSET = 0x2200, SWITCHTEC_GAS_PART_CFG_OFFSET = 0x4000, SWITCHTEC_GAS_NTB_OFFSET = 0x10000, SWITCHTEC_GAS_PFF_CSR_OFFSET = 0x134000, }; struct mrpc_regs { u8 input_data[SWITCHTEC_MRPC_PAYLOAD_SIZE]; u8 output_data[SWITCHTEC_MRPC_PAYLOAD_SIZE]; u32 cmd; u32 status; u32 ret_value; u32 dma_en; u64 dma_addr; u32 dma_vector; u32 dma_ver; } __packed; enum mrpc_status { SWITCHTEC_MRPC_STATUS_INPROGRESS = 1, SWITCHTEC_MRPC_STATUS_DONE = 2, SWITCHTEC_MRPC_STATUS_ERROR = 0xFF, SWITCHTEC_MRPC_STATUS_INTERRUPTED = 0x100, }; struct sw_event_regs { u64 event_report_ctrl; u64 reserved1; u64 part_event_bitmap; u64 reserved2; u32 global_summary; u32 reserved3[3]; u32 stack_error_event_hdr; u32 stack_error_event_data; u32 reserved4[4]; u32 ppu_error_event_hdr; u32 ppu_error_event_data; u32 reserved5[4]; u32 isp_error_event_hdr; u32 isp_error_event_data; u32 reserved6[4]; u32 sys_reset_event_hdr; u32 reserved7[5]; u32 fw_exception_hdr; u32 reserved8[5]; u32 fw_nmi_hdr; u32 reserved9[5]; u32 fw_non_fatal_hdr; u32 reserved10[5]; u32 fw_fatal_hdr; u32 reserved11[5]; u32 twi_mrpc_comp_hdr; u32 twi_mrpc_comp_data; u32 reserved12[4]; u32 twi_mrpc_comp_async_hdr; u32 twi_mrpc_comp_async_data; u32 reserved13[4]; u32 cli_mrpc_comp_hdr; u32 cli_mrpc_comp_data; u32 reserved14[4]; u32 cli_mrpc_comp_async_hdr; u32 cli_mrpc_comp_async_data; u32 reserved15[4]; u32 gpio_interrupt_hdr; u32 gpio_interrupt_data; u32 reserved16[4]; u32 gfms_event_hdr; u32 gfms_event_data; u32 reserved17[4]; } __packed; enum { SWITCHTEC_CFG0_RUNNING = 0x04, SWITCHTEC_CFG1_RUNNING = 0x05, SWITCHTEC_IMG0_RUNNING = 0x03, SWITCHTEC_IMG1_RUNNING = 0x07, }; struct sys_info_regs { u32 device_id; u32 device_version; u32 firmware_version; u32 reserved1; u32 vendor_table_revision; u32 table_format_version; u32 partition_id; u32 cfg_file_fmt_version; u16 cfg_running; u16 img_running; u32 reserved2[57]; char vendor_id[8]; char product_id[16]; char product_revision[4]; char component_vendor[8]; u16 component_id; u8 component_revision; } __packed; struct flash_info_regs { u32 flash_part_map_upd_idx; struct active_partition_info { u32 address; u32 build_version; u32 build_string; } active_img; struct active_partition_info active_cfg; struct active_partition_info inactive_img; struct active_partition_info inactive_cfg; u32 flash_length; struct partition_info { u32 address; u32 length; } cfg0; struct partition_info cfg1; struct partition_info img0; struct partition_info img1; struct partition_info nvlog; struct partition_info vendor[8]; }; enum { SWITCHTEC_NTB_REG_INFO_OFFSET = 0x0000, SWITCHTEC_NTB_REG_CTRL_OFFSET = 0x4000, SWITCHTEC_NTB_REG_DBMSG_OFFSET = 0x64000, }; struct ntb_info_regs { u8 partition_count; u8 partition_id; u16 reserved1; u64 ep_map; u16 requester_id; u16 reserved2; u32 reserved3[4]; struct nt_partition_info { u32 xlink_enabled; u32 target_part_low; u32 target_part_high; u32 reserved; } ntp_info[48]; } __packed; struct part_cfg_regs { u32 status; u32 state; u32 port_cnt; u32 usp_port_mode; u32 usp_pff_inst_id; u32 vep_pff_inst_id; u32 dsp_pff_inst_id[47]; u32 reserved1[11]; u16 vep_vector_number; u16 usp_vector_number; u32 port_event_bitmap; u32 reserved2[3]; u32 part_event_summary; u32 reserved3[3]; u32 part_reset_hdr; u32 part_reset_data[5]; u32 mrpc_comp_hdr; u32 mrpc_comp_data[5]; u32 mrpc_comp_async_hdr; u32 mrpc_comp_async_data[5]; u32 dyn_binding_hdr; u32 dyn_binding_data[5]; u32 reserved4[159]; } __packed; enum { NTB_CTRL_PART_OP_LOCK = 0x1, NTB_CTRL_PART_OP_CFG = 0x2, NTB_CTRL_PART_OP_RESET = 0x3, NTB_CTRL_PART_STATUS_NORMAL = 0x1, NTB_CTRL_PART_STATUS_LOCKED = 0x2, NTB_CTRL_PART_STATUS_LOCKING = 0x3, NTB_CTRL_PART_STATUS_CONFIGURING = 0x4, NTB_CTRL_PART_STATUS_RESETTING = 0x5, NTB_CTRL_BAR_VALID = 1 << 0, NTB_CTRL_BAR_DIR_WIN_EN = 1 << 4, NTB_CTRL_BAR_LUT_WIN_EN = 1 << 5, NTB_CTRL_REQ_ID_EN = 1 << 0, NTB_CTRL_LUT_EN = 1 << 0, NTB_PART_CTRL_ID_PROT_DIS = 1 << 0, }; struct ntb_ctrl_regs { u32 partition_status; u32 partition_op; u32 partition_ctrl; u32 bar_setup; u32 bar_error; u16 lut_table_entries; u16 lut_table_offset; u32 lut_error; u16 req_id_table_size; u16 req_id_table_offset; u32 req_id_error; u32 reserved1[7]; struct { u32 ctl; u32 win_size; u64 xlate_addr; } bar_entry[6]; struct { u32 win_size; u32 reserved[3]; } bar_ext_entry[6]; u32 reserved2[192]; u32 req_id_table[512]; u32 reserved3[256]; u64 lut_entry[512]; } __packed; #define NTB_DBMSG_IMSG_STATUS BIT_ULL(32) #define NTB_DBMSG_IMSG_MASK BIT_ULL(40) struct ntb_dbmsg_regs { u32 reserved1[1024]; u64 odb; u64 odb_mask; u64 idb; u64 idb_mask; u8 idb_vec_map[64]; u32 msg_map; u32 reserved2; struct { u32 msg; u32 status; } omsg[4]; struct { u32 msg; u8 status; u8 mask; u8 src; u8 reserved; } imsg[4]; u8 reserved3[3928]; u8 msix_table[1024]; u8 reserved4[3072]; u8 pba[24]; u8 reserved5[4072]; } __packed; enum { SWITCHTEC_PART_CFG_EVENT_RESET = 1 << 0, SWITCHTEC_PART_CFG_EVENT_MRPC_CMP = 1 << 1, SWITCHTEC_PART_CFG_EVENT_MRPC_ASYNC_CMP = 1 << 2, SWITCHTEC_PART_CFG_EVENT_DYN_PART_CMP = 1 << 3, }; struct pff_csr_regs { u16 vendor_id; u16 device_id; u16 pcicmd; u16 pcists; u32 pci_class; u32 pci_opts; union { u32 pci_bar[6]; u64 pci_bar64[3]; }; u32 pci_cardbus; u32 pci_subsystem_id; u32 pci_expansion_rom; u32 pci_cap_ptr; u32 reserved1; u32 pci_irq; u32 pci_cap_region[48]; u32 pcie_cap_region[448]; u32 indirect_gas_window[128]; u32 indirect_gas_window_off; u32 reserved[127]; u32 pff_event_summary; u32 reserved2[3]; u32 aer_in_p2p_hdr; u32 aer_in_p2p_data[5]; u32 aer_in_vep_hdr; u32 aer_in_vep_data[5]; u32 dpc_hdr; u32 dpc_data[5]; u32 cts_hdr; u32 cts_data[5]; u32 reserved3[6]; u32 hotplug_hdr; u32 hotplug_data[5]; u32 ier_hdr; u32 ier_data[5]; u32 threshold_hdr; u32 threshold_data[5]; u32 power_mgmt_hdr; u32 power_mgmt_data[5]; u32 tlp_throttling_hdr; u32 tlp_throttling_data[5]; u32 force_speed_hdr; u32 force_speed_data[5]; u32 credit_timeout_hdr; u32 credit_timeout_data[5]; u32 link_state_hdr; u32 link_state_data[5]; u32 reserved4[174]; } __packed; struct switchtec_ntb; struct dma_mrpc_output { u32 status; u32 cmd_id; u32 rtn_code; u32 output_size; u8 data[SWITCHTEC_MRPC_PAYLOAD_SIZE]; }; struct switchtec_dev { struct pci_dev *pdev; struct device dev; struct cdev cdev; int partition; int partition_count; int pff_csr_count; char pff_local[SWITCHTEC_MAX_PFF_CSR]; void __iomem *mmio; struct mrpc_regs __iomem *mmio_mrpc; struct sw_event_regs __iomem *mmio_sw_event; struct sys_info_regs __iomem *mmio_sys_info; struct flash_info_regs __iomem *mmio_flash_info; struct ntb_info_regs __iomem *mmio_ntb; struct part_cfg_regs __iomem *mmio_part_cfg; struct part_cfg_regs __iomem *mmio_part_cfg_all; struct pff_csr_regs __iomem *mmio_pff_csr; /* * The mrpc mutex must be held when accessing the other * mrpc_ fields, alive flag and stuser->state field */ struct mutex mrpc_mutex; struct list_head mrpc_queue; int mrpc_busy; struct work_struct mrpc_work; struct delayed_work mrpc_timeout; bool alive; wait_queue_head_t event_wq; atomic_t event_cnt; struct work_struct link_event_work; void (*link_notifier)(struct switchtec_dev *stdev); u8 link_event_count[SWITCHTEC_MAX_PFF_CSR]; struct switchtec_ntb *sndev; struct dma_mrpc_output *dma_mrpc; dma_addr_t dma_mrpc_dma_addr; }; static inline struct switchtec_dev *to_stdev(struct device *dev) { return container_of(dev, struct switchtec_dev, dev); } extern struct class *switchtec_class; #endif topology.h 0000644 00000013060 14722070374 0006577 0 ustar 00 /* * include/linux/topology.h * * Written by: Matthew Dobson, IBM Corporation * * Copyright (C) 2002, IBM Corp. * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to <colpatch@us.ibm.com> */ #ifndef _LINUX_TOPOLOGY_H #define _LINUX_TOPOLOGY_H #include <linux/arch_topology.h> #include <linux/cpumask.h> #include <linux/bitops.h> #include <linux/mmzone.h> #include <linux/smp.h> #include <linux/percpu.h> #include <asm/topology.h> #ifndef nr_cpus_node #define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node)) #endif #define for_each_node_with_cpus(node) \ for_each_online_node(node) \ if (nr_cpus_node(node)) int arch_update_cpu_topology(void); /* Conform to ACPI 2.0 SLIT distance definitions */ #define LOCAL_DISTANCE 10 #define REMOTE_DISTANCE 20 #define DISTANCE_BITS 8 #ifndef node_distance #define node_distance(from,to) ((from) == (to) ? LOCAL_DISTANCE : REMOTE_DISTANCE) #endif #ifndef RECLAIM_DISTANCE /* * If the distance between nodes in a system is larger than RECLAIM_DISTANCE * (in whatever arch specific measurement units returned by node_distance()) * and node_reclaim_mode is enabled then the VM will only call node_reclaim() * on nodes within this distance. */ #define RECLAIM_DISTANCE 30 #endif /* * The following tunable allows platforms to override the default node * reclaim distance (RECLAIM_DISTANCE) if remote memory accesses are * sufficiently fast that the default value actually hurts * performance. * * AMD EPYC machines use this because even though the 2-hop distance * is 32 (3.2x slower than a local memory access) performance actually * *improves* if allowed to reclaim memory and load balance tasks * between NUMA nodes 2-hops apart. */ extern int __read_mostly node_reclaim_distance; #ifndef PENALTY_FOR_NODE_WITH_CPUS #define PENALTY_FOR_NODE_WITH_CPUS (1) #endif #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID DECLARE_PER_CPU(int, numa_node); #ifndef numa_node_id /* Returns the number of the current Node. */ static inline int numa_node_id(void) { return raw_cpu_read(numa_node); } #endif #ifndef cpu_to_node static inline int cpu_to_node(int cpu) { return per_cpu(numa_node, cpu); } #endif #ifndef set_numa_node static inline void set_numa_node(int node) { this_cpu_write(numa_node, node); } #endif #ifndef set_cpu_numa_node static inline void set_cpu_numa_node(int cpu, int node) { per_cpu(numa_node, cpu) = node; } #endif #else /* !CONFIG_USE_PERCPU_NUMA_NODE_ID */ /* Returns the number of the current Node. */ #ifndef numa_node_id static inline int numa_node_id(void) { return cpu_to_node(raw_smp_processor_id()); } #endif #endif /* [!]CONFIG_USE_PERCPU_NUMA_NODE_ID */ #ifdef CONFIG_HAVE_MEMORYLESS_NODES /* * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly. * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined. * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem(). */ DECLARE_PER_CPU(int, _numa_mem_); extern int _node_numa_mem_[MAX_NUMNODES]; #ifndef set_numa_mem static inline void set_numa_mem(int node) { this_cpu_write(_numa_mem_, node); _node_numa_mem_[numa_node_id()] = node; } #endif #ifndef node_to_mem_node static inline int node_to_mem_node(int node) { return _node_numa_mem_[node]; } #endif #ifndef numa_mem_id /* Returns the number of the nearest Node with memory */ static inline int numa_mem_id(void) { return raw_cpu_read(_numa_mem_); } #endif #ifndef cpu_to_mem static inline int cpu_to_mem(int cpu) { return per_cpu(_numa_mem_, cpu); } #endif #ifndef set_cpu_numa_mem static inline void set_cpu_numa_mem(int cpu, int node) { per_cpu(_numa_mem_, cpu) = node; _node_numa_mem_[cpu_to_node(cpu)] = node; } #endif #else /* !CONFIG_HAVE_MEMORYLESS_NODES */ #ifndef numa_mem_id /* Returns the number of the nearest Node with memory */ static inline int numa_mem_id(void) { return numa_node_id(); } #endif #ifndef node_to_mem_node static inline int node_to_mem_node(int node) { return node; } #endif #ifndef cpu_to_mem static inline int cpu_to_mem(int cpu) { return cpu_to_node(cpu); } #endif #endif /* [!]CONFIG_HAVE_MEMORYLESS_NODES */ #ifndef topology_physical_package_id #define topology_physical_package_id(cpu) ((void)(cpu), -1) #endif #ifndef topology_die_id #define topology_die_id(cpu) ((void)(cpu), -1) #endif #ifndef topology_core_id #define topology_core_id(cpu) ((void)(cpu), 0) #endif #ifndef topology_sibling_cpumask #define topology_sibling_cpumask(cpu) cpumask_of(cpu) #endif #ifndef topology_core_cpumask #define topology_core_cpumask(cpu) cpumask_of(cpu) #endif #ifndef topology_die_cpumask #define topology_die_cpumask(cpu) cpumask_of(cpu) #endif #ifdef CONFIG_SCHED_SMT static inline const struct cpumask *cpu_smt_mask(int cpu) { return topology_sibling_cpumask(cpu); } #endif static inline const struct cpumask *cpu_cpu_mask(int cpu) { return cpumask_of_node(cpu_to_node(cpu)); } #endif /* _LINUX_TOPOLOGY_H */ mc146818rtc.h 0000644 00000011106 14722070374 0006526 0 ustar 00 /* mc146818rtc.h - register definitions for the Real-Time-Clock / CMOS RAM * Copyright Torsten Duwe <duwe@informatik.uni-erlangen.de> 1993 * derived from Data Sheet, Copyright Motorola 1984 (!). * It was written to be part of the Linux operating system. */ /* permission is hereby granted to copy, modify and redistribute this code * in terms of the GNU Library General Public License, Version 2 or later, * at your option. */ #ifndef _MC146818RTC_H #define _MC146818RTC_H #include <asm/io.h> #include <linux/rtc.h> /* get the user-level API */ #include <asm/mc146818rtc.h> /* register access macros */ #include <linux/bcd.h> #include <linux/delay.h> #include <linux/pm-trace.h> #ifdef __KERNEL__ #include <linux/spinlock.h> /* spinlock_t */ extern spinlock_t rtc_lock; /* serialize CMOS RAM access */ /* Some RTCs extend the mc146818 register set to support alarms of more * than 24 hours in the future; or dates that include a century code. * This platform_data structure can pass this information to the driver. * * Also, some platforms need suspend()/resume() hooks to kick in special * handling of wake alarms, e.g. activating ACPI BIOS hooks or setting up * a separate wakeup alarm used by some almost-clone chips. */ struct cmos_rtc_board_info { void (*wake_on)(struct device *dev); void (*wake_off)(struct device *dev); u32 flags; #define CMOS_RTC_FLAGS_NOFREQ (1 << 0) int address_space; u8 rtc_day_alarm; /* zero, or register index */ u8 rtc_mon_alarm; /* zero, or register index */ u8 rtc_century; /* zero, or register index */ }; #endif /********************************************************************** * register summary **********************************************************************/ #define RTC_SECONDS 0 #define RTC_SECONDS_ALARM 1 #define RTC_MINUTES 2 #define RTC_MINUTES_ALARM 3 #define RTC_HOURS 4 #define RTC_HOURS_ALARM 5 /* RTC_*_alarm is always true if 2 MSBs are set */ # define RTC_ALARM_DONT_CARE 0xC0 #define RTC_DAY_OF_WEEK 6 #define RTC_DAY_OF_MONTH 7 #define RTC_MONTH 8 #define RTC_YEAR 9 /* control registers - Moto names */ #define RTC_REG_A 10 #define RTC_REG_B 11 #define RTC_REG_C 12 #define RTC_REG_D 13 /********************************************************************** * register details **********************************************************************/ #define RTC_FREQ_SELECT RTC_REG_A /* update-in-progress - set to "1" 244 microsecs before RTC goes off the bus, * reset after update (may take 1.984ms @ 32768Hz RefClock) is complete, * totalling to a max high interval of 2.228 ms. */ # define RTC_UIP 0x80 # define RTC_DIV_CTL 0x70 /* divider control: refclock values 4.194 / 1.049 MHz / 32.768 kHz */ # define RTC_REF_CLCK_4MHZ 0x00 # define RTC_REF_CLCK_1MHZ 0x10 # define RTC_REF_CLCK_32KHZ 0x20 /* 2 values for divider stage reset, others for "testing purposes only" */ # define RTC_DIV_RESET1 0x60 # define RTC_DIV_RESET2 0x70 /* In AMD BKDG bit 5 and 6 are reserved, bit 4 is for select dv0 bank */ # define RTC_AMD_BANK_SELECT 0x10 /* Periodic intr. / Square wave rate select. 0=none, 1=32.8kHz,... 15=2Hz */ # define RTC_RATE_SELECT 0x0F /**********************************************************************/ #define RTC_CONTROL RTC_REG_B # define RTC_SET 0x80 /* disable updates for clock setting */ # define RTC_PIE 0x40 /* periodic interrupt enable */ # define RTC_AIE 0x20 /* alarm interrupt enable */ # define RTC_UIE 0x10 /* update-finished interrupt enable */ # define RTC_SQWE 0x08 /* enable square-wave output */ # define RTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */ # define RTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */ # define RTC_DST_EN 0x01 /* auto switch DST - works f. USA only */ /**********************************************************************/ #define RTC_INTR_FLAGS RTC_REG_C /* caution - cleared by read */ # define RTC_IRQF 0x80 /* any of the following 3 is active */ # define RTC_PF 0x40 # define RTC_AF 0x20 # define RTC_UF 0x10 /**********************************************************************/ #define RTC_VALID RTC_REG_D # define RTC_VRT 0x80 /* valid RAM and time */ /**********************************************************************/ #ifndef ARCH_RTC_LOCATION /* Override by <asm/mc146818rtc.h>? */ #define RTC_IO_EXTENT 0x8 #define RTC_IO_EXTENT_USED 0x2 #define RTC_IOMAPPED 1 /* Default to I/O mapping. */ #else #define RTC_IO_EXTENT_USED RTC_IO_EXTENT #endif /* ARCH_RTC_LOCATION */ unsigned int mc146818_get_time(struct rtc_time *time); int mc146818_set_time(struct rtc_time *time); #endif /* _MC146818RTC_H */ crc32poly.h 0000644 00000001142 14722070374 0006541 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_CRC32_POLY_H #define _LINUX_CRC32_POLY_H /* * There are multiple 16-bit CRC polynomials in common use, but this is * *the* standard CRC-32 polynomial, first popularized by Ethernet. * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0 */ #define CRC32_POLY_LE 0xedb88320 #define CRC32_POLY_BE 0x04c11db7 /* * This is the CRC32c polynomial, as outlined by Castagnoli. * x^32+x^28+x^27+x^26+x^25+x^23+x^22+x^20+x^19+x^18+x^14+x^13+x^11+x^10+x^9+ * x^8+x^6+x^0 */ #define CRC32C_POLY_LE 0x82F63B78 #endif /* _LINUX_CRC32_POLY_H */ indirect_call_wrapper.h 0000644 00000003036 14722070374 0011261 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_INDIRECT_CALL_WRAPPER_H #define _LINUX_INDIRECT_CALL_WRAPPER_H #ifdef CONFIG_RETPOLINE /* * INDIRECT_CALL_$NR - wrapper for indirect calls with $NR known builtin * @f: function pointer * @f$NR: builtin functions names, up to $NR of them * @__VA_ARGS__: arguments for @f * * Avoid retpoline overhead for known builtin, checking @f vs each of them and * eventually invoking directly the builtin function. The functions are check * in the given order. Fallback to the indirect call. */ #define INDIRECT_CALL_1(f, f1, ...) \ ({ \ likely(f == f1) ? f1(__VA_ARGS__) : f(__VA_ARGS__); \ }) #define INDIRECT_CALL_2(f, f2, f1, ...) \ ({ \ likely(f == f2) ? f2(__VA_ARGS__) : \ INDIRECT_CALL_1(f, f1, __VA_ARGS__); \ }) #define INDIRECT_CALLABLE_DECLARE(f) f #define INDIRECT_CALLABLE_SCOPE #else #define INDIRECT_CALL_1(f, f1, ...) f(__VA_ARGS__) #define INDIRECT_CALL_2(f, f2, f1, ...) f(__VA_ARGS__) #define INDIRECT_CALLABLE_DECLARE(f) #define INDIRECT_CALLABLE_SCOPE static #endif /* * We can use INDIRECT_CALL_$NR for ipv6 related functions only if ipv6 is * builtin, this macro simplify dealing with indirect calls with only ipv4/ipv6 * alternatives */ #if IS_BUILTIN(CONFIG_IPV6) #define INDIRECT_CALL_INET(f, f2, f1, ...) \ INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__) #elif IS_ENABLED(CONFIG_INET) #define INDIRECT_CALL_INET(f, f2, f1, ...) INDIRECT_CALL_1(f, f1, __VA_ARGS__) #else #define INDIRECT_CALL_INET(f, f2, f1, ...) f(__VA_ARGS__) #endif #endif irqdomain.h 0000644 00000050147 14722070374 0006715 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * irq_domain - IRQ translation domains * * Translation infrastructure between hw and linux irq numbers. This is * helpful for interrupt controllers to implement mapping between hardware * irq numbers and the Linux irq number space. * * irq_domains also have hooks for translating device tree or other * firmware interrupt representations into a hardware irq number that * can be mapped back to a Linux irq number without any extra platform * support code. * * Interrupt controller "domain" data structure. This could be defined as a * irq domain controller. That is, it handles the mapping between hardware * and virtual interrupt numbers for a given interrupt domain. The domain * structure is generally created by the PIC code for a given PIC instance * (though a domain can cover more than one PIC if they have a flat number * model). It's the domain callbacks that are responsible for setting the * irq_chip on a given irq_desc after it's been mapped. * * The host code and data structures use a fwnode_handle pointer to * identify the domain. In some cases, and in order to preserve source * code compatibility, this fwnode pointer is "upgraded" to a DT * device_node. For those firmware infrastructures that do not provide * a unique identifier for an interrupt controller, the irq_domain * code offers a fwnode allocator. */ #ifndef _LINUX_IRQDOMAIN_H #define _LINUX_IRQDOMAIN_H #include <linux/types.h> #include <linux/irqhandler.h> #include <linux/of.h> #include <linux/mutex.h> #include <linux/radix-tree.h> struct device_node; struct irq_domain; struct of_device_id; struct irq_chip; struct irq_data; struct cpumask; struct seq_file; struct irq_affinity_desc; /* Number of irqs reserved for a legacy isa controller */ #define NUM_ISA_INTERRUPTS 16 #define IRQ_DOMAIN_IRQ_SPEC_PARAMS 16 /** * struct irq_fwspec - generic IRQ specifier structure * * @fwnode: Pointer to a firmware-specific descriptor * @param_count: Number of device-specific parameters * @param: Device-specific parameters * * This structure, directly modeled after of_phandle_args, is used to * pass a device-specific description of an interrupt. */ struct irq_fwspec { struct fwnode_handle *fwnode; int param_count; u32 param[IRQ_DOMAIN_IRQ_SPEC_PARAMS]; }; /* * Should several domains have the same device node, but serve * different purposes (for example one domain is for PCI/MSI, and the * other for wired IRQs), they can be distinguished using a * bus-specific token. Most domains are expected to only carry * DOMAIN_BUS_ANY. */ enum irq_domain_bus_token { DOMAIN_BUS_ANY = 0, DOMAIN_BUS_WIRED, DOMAIN_BUS_GENERIC_MSI, DOMAIN_BUS_PCI_MSI, DOMAIN_BUS_PLATFORM_MSI, DOMAIN_BUS_NEXUS, DOMAIN_BUS_IPI, DOMAIN_BUS_FSL_MC_MSI, DOMAIN_BUS_TI_SCI_INTA_MSI, }; /** * struct irq_domain_ops - Methods for irq_domain objects * @match: Match an interrupt controller device node to a host, returns * 1 on a match * @map: Create or update a mapping between a virtual irq number and a hw * irq number. This is called only once for a given mapping. * @unmap: Dispose of such a mapping * @xlate: Given a device tree node and interrupt specifier, decode * the hardware irq number and linux irq type value. * * Functions below are provided by the driver and called whenever a new mapping * is created or an old mapping is disposed. The driver can then proceed to * whatever internal data structures management is required. It also needs * to setup the irq_desc when returning from map(). */ struct irq_domain_ops { int (*match)(struct irq_domain *d, struct device_node *node, enum irq_domain_bus_token bus_token); int (*select)(struct irq_domain *d, struct irq_fwspec *fwspec, enum irq_domain_bus_token bus_token); int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw); void (*unmap)(struct irq_domain *d, unsigned int virq); int (*xlate)(struct irq_domain *d, struct device_node *node, const u32 *intspec, unsigned int intsize, unsigned long *out_hwirq, unsigned int *out_type); #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY /* extended V2 interfaces to support hierarchy irq_domains */ int (*alloc)(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs, void *arg); void (*free)(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs); int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool reserve); void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data); int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec, unsigned long *out_hwirq, unsigned int *out_type); #endif #ifdef CONFIG_GENERIC_IRQ_DEBUGFS void (*debug_show)(struct seq_file *m, struct irq_domain *d, struct irq_data *irqd, int ind); #endif }; extern struct irq_domain_ops irq_generic_chip_ops; struct irq_domain_chip_generic; /** * struct irq_domain - Hardware interrupt number translation object * @link: Element in global irq_domain list. * @name: Name of interrupt domain * @ops: pointer to irq_domain methods * @host_data: private data pointer for use by owner. Not touched by irq_domain * core code. * @flags: host per irq_domain flags * @mapcount: The number of mapped interrupts * * Optional elements * @fwnode: Pointer to firmware node associated with the irq_domain. Pretty easy * to swap it for the of_node via the irq_domain_get_of_node accessor * @gc: Pointer to a list of generic chips. There is a helper function for * setting up one or more generic chips for interrupt controllers * drivers using the generic chip library which uses this pointer. * @dev: Pointer to a device that the domain represent, and that will be * used for power management purposes. * @parent: Pointer to parent irq_domain to support hierarchy irq_domains * @debugfs_file: dentry for the domain debugfs file * * Revmap data, used internally by irq_domain * @revmap_direct_max_irq: The largest hwirq that can be set for controllers that * support direct mapping * @revmap_size: Size of the linear map table @linear_revmap[] * @revmap_tree: Radix map tree for hwirqs that don't fit in the linear map * @linear_revmap: Linear table of hwirq->virq reverse mappings */ struct irq_domain { struct list_head link; const char *name; const struct irq_domain_ops *ops; void *host_data; unsigned int flags; unsigned int mapcount; /* Optional data */ struct fwnode_handle *fwnode; enum irq_domain_bus_token bus_token; struct irq_domain_chip_generic *gc; struct device *dev; #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY struct irq_domain *parent; #endif #ifdef CONFIG_GENERIC_IRQ_DEBUGFS struct dentry *debugfs_file; #endif /* reverse map data. The linear map gets appended to the irq_domain */ irq_hw_number_t hwirq_max; unsigned int revmap_direct_max_irq; unsigned int revmap_size; struct radix_tree_root revmap_tree; struct mutex revmap_tree_mutex; unsigned int linear_revmap[]; }; /* Irq domain flags */ enum { /* Irq domain is hierarchical */ IRQ_DOMAIN_FLAG_HIERARCHY = (1 << 0), /* Irq domain name was allocated in __irq_domain_add() */ IRQ_DOMAIN_NAME_ALLOCATED = (1 << 1), /* Irq domain is an IPI domain with virq per cpu */ IRQ_DOMAIN_FLAG_IPI_PER_CPU = (1 << 2), /* Irq domain is an IPI domain with single virq */ IRQ_DOMAIN_FLAG_IPI_SINGLE = (1 << 3), /* Irq domain implements MSIs */ IRQ_DOMAIN_FLAG_MSI = (1 << 4), /* Irq domain implements MSI remapping */ IRQ_DOMAIN_FLAG_MSI_REMAP = (1 << 5), /* * Quirk to handle MSI implementations which do not provide * masking. Currently known to affect x86, but partially * handled in core code. */ IRQ_DOMAIN_MSI_NOMASK_QUIRK = (1 << 6), /* * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved * for implementation specific purposes and ignored by the * core code. */ IRQ_DOMAIN_FLAG_NONCORE = (1 << 16), }; static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d) { return to_of_node(d->fwnode); } static inline void irq_domain_set_pm_device(struct irq_domain *d, struct device *dev) { if (d) d->dev = dev; } #ifdef CONFIG_IRQ_DOMAIN struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id, const char *name, phys_addr_t *pa); enum { IRQCHIP_FWNODE_REAL, IRQCHIP_FWNODE_NAMED, IRQCHIP_FWNODE_NAMED_ID, }; static inline struct fwnode_handle *irq_domain_alloc_named_fwnode(const char *name) { return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_NAMED, 0, name, NULL); } static inline struct fwnode_handle *irq_domain_alloc_named_id_fwnode(const char *name, int id) { return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_NAMED_ID, id, name, NULL); } static inline struct fwnode_handle *irq_domain_alloc_fwnode(phys_addr_t *pa) { return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_REAL, 0, NULL, pa); } void irq_domain_free_fwnode(struct fwnode_handle *fwnode); struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int size, irq_hw_number_t hwirq_max, int direct_max, const struct irq_domain_ops *ops, void *host_data); struct irq_domain *irq_domain_add_simple(struct device_node *of_node, unsigned int size, unsigned int first_irq, const struct irq_domain_ops *ops, void *host_data); struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, unsigned int size, unsigned int first_irq, irq_hw_number_t first_hwirq, const struct irq_domain_ops *ops, void *host_data); extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec, enum irq_domain_bus_token bus_token); extern bool irq_domain_check_msi_remap(void); extern void irq_set_default_host(struct irq_domain *host); extern struct irq_domain *irq_get_default_host(void); extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, irq_hw_number_t hwirq, int node, const struct irq_affinity_desc *affinity); static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node) { return node ? &node->fwnode : NULL; } extern const struct fwnode_operations irqchip_fwnode_ops; static inline bool is_fwnode_irqchip(struct fwnode_handle *fwnode) { return fwnode && fwnode->ops == &irqchip_fwnode_ops; } extern void irq_domain_update_bus_token(struct irq_domain *domain, enum irq_domain_bus_token bus_token); static inline struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode, enum irq_domain_bus_token bus_token) { struct irq_fwspec fwspec = { .fwnode = fwnode, }; return irq_find_matching_fwspec(&fwspec, bus_token); } static inline struct irq_domain *irq_find_matching_host(struct device_node *node, enum irq_domain_bus_token bus_token) { return irq_find_matching_fwnode(of_node_to_fwnode(node), bus_token); } static inline struct irq_domain *irq_find_host(struct device_node *node) { struct irq_domain *d; d = irq_find_matching_host(node, DOMAIN_BUS_WIRED); if (!d) d = irq_find_matching_host(node, DOMAIN_BUS_ANY); return d; } /** * irq_domain_add_linear() - Allocate and register a linear revmap irq_domain. * @of_node: pointer to interrupt controller's device tree node. * @size: Number of interrupts in the domain. * @ops: map/unmap domain callbacks * @host_data: Controller private data pointer */ static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_node, unsigned int size, const struct irq_domain_ops *ops, void *host_data) { return __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data); } static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_node, unsigned int max_irq, const struct irq_domain_ops *ops, void *host_data) { return __irq_domain_add(of_node_to_fwnode(of_node), 0, max_irq, max_irq, ops, host_data); } static inline struct irq_domain *irq_domain_add_legacy_isa( struct device_node *of_node, const struct irq_domain_ops *ops, void *host_data) { return irq_domain_add_legacy(of_node, NUM_ISA_INTERRUPTS, 0, 0, ops, host_data); } static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node, const struct irq_domain_ops *ops, void *host_data) { return __irq_domain_add(of_node_to_fwnode(of_node), 0, ~0, 0, ops, host_data); } static inline struct irq_domain *irq_domain_create_linear(struct fwnode_handle *fwnode, unsigned int size, const struct irq_domain_ops *ops, void *host_data) { return __irq_domain_add(fwnode, size, size, 0, ops, host_data); } static inline struct irq_domain *irq_domain_create_tree(struct fwnode_handle *fwnode, const struct irq_domain_ops *ops, void *host_data) { return __irq_domain_add(fwnode, 0, ~0, 0, ops, host_data); } extern void irq_domain_remove(struct irq_domain *host); extern int irq_domain_associate(struct irq_domain *domain, unsigned int irq, irq_hw_number_t hwirq); extern void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base, irq_hw_number_t hwirq_base, int count); extern void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq); extern unsigned int irq_create_mapping_affinity(struct irq_domain *host, irq_hw_number_t hwirq, const struct irq_affinity_desc *affinity); extern unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec); extern void irq_dispose_mapping(unsigned int virq); static inline unsigned int irq_create_mapping(struct irq_domain *host, irq_hw_number_t hwirq) { return irq_create_mapping_affinity(host, hwirq, NULL); } /** * irq_linear_revmap() - Find a linux irq from a hw irq number. * @domain: domain owning this hardware interrupt * @hwirq: hardware irq number in that domain space * * This is a fast path alternative to irq_find_mapping() that can be * called directly by irq controller code to save a handful of * instructions. It is always safe to call, but won't find irqs mapped * using the radix tree. */ static inline unsigned int irq_linear_revmap(struct irq_domain *domain, irq_hw_number_t hwirq) { return hwirq < domain->revmap_size ? domain->linear_revmap[hwirq] : 0; } extern unsigned int irq_find_mapping(struct irq_domain *host, irq_hw_number_t hwirq); extern unsigned int irq_create_direct_mapping(struct irq_domain *host); extern int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base, irq_hw_number_t hwirq_base, int count); static inline int irq_create_identity_mapping(struct irq_domain *host, irq_hw_number_t hwirq) { return irq_create_strict_mappings(host, hwirq, hwirq, 1); } extern const struct irq_domain_ops irq_domain_simple_ops; /* stock xlate functions */ int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_type); int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_type); int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_type); int irq_domain_translate_twocell(struct irq_domain *d, struct irq_fwspec *fwspec, unsigned long *out_hwirq, unsigned int *out_type); /* IPI functions */ int irq_reserve_ipi(struct irq_domain *domain, const struct cpumask *dest); int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest); /* V2 interfaces to support hierarchy IRQ domains. */ extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, unsigned int virq); extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq, struct irq_chip *chip, void *chip_data, irq_flow_handler_t handler, void *handler_data, const char *handler_name); #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY extern struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent, unsigned int flags, unsigned int size, struct fwnode_handle *fwnode, const struct irq_domain_ops *ops, void *host_data); static inline struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent, unsigned int flags, unsigned int size, struct device_node *node, const struct irq_domain_ops *ops, void *host_data) { return irq_domain_create_hierarchy(parent, flags, size, of_node_to_fwnode(node), ops, host_data); } extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, unsigned int nr_irqs, int node, void *arg, bool realloc, const struct irq_affinity_desc *affinity); extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs); extern int irq_domain_activate_irq(struct irq_data *irq_data, bool early); extern void irq_domain_deactivate_irq(struct irq_data *irq_data); static inline int irq_domain_alloc_irqs(struct irq_domain *domain, unsigned int nr_irqs, int node, void *arg) { return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false, NULL); } extern int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain, unsigned int irq_base, unsigned int nr_irqs, void *arg); extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq, struct irq_chip *chip, void *chip_data); extern void irq_domain_reset_irq_data(struct irq_data *irq_data); extern void irq_domain_free_irqs_common(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs); extern void irq_domain_free_irqs_top(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs); extern int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg); extern int irq_domain_pop_irq(struct irq_domain *domain, int virq); extern int irq_domain_alloc_irqs_parent(struct irq_domain *domain, unsigned int irq_base, unsigned int nr_irqs, void *arg); extern void irq_domain_free_irqs_parent(struct irq_domain *domain, unsigned int irq_base, unsigned int nr_irqs); static inline bool irq_domain_is_hierarchy(struct irq_domain *domain) { return domain->flags & IRQ_DOMAIN_FLAG_HIERARCHY; } static inline bool irq_domain_is_ipi(struct irq_domain *domain) { return domain->flags & (IRQ_DOMAIN_FLAG_IPI_PER_CPU | IRQ_DOMAIN_FLAG_IPI_SINGLE); } static inline bool irq_domain_is_ipi_per_cpu(struct irq_domain *domain) { return domain->flags & IRQ_DOMAIN_FLAG_IPI_PER_CPU; } static inline bool irq_domain_is_ipi_single(struct irq_domain *domain) { return domain->flags & IRQ_DOMAIN_FLAG_IPI_SINGLE; } static inline bool irq_domain_is_msi(struct irq_domain *domain) { return domain->flags & IRQ_DOMAIN_FLAG_MSI; } static inline bool irq_domain_is_msi_remap(struct irq_domain *domain) { return domain->flags & IRQ_DOMAIN_FLAG_MSI_REMAP; } extern bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain); #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ static inline int irq_domain_alloc_irqs(struct irq_domain *domain, unsigned int nr_irqs, int node, void *arg) { return -1; } static inline void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs) { } static inline bool irq_domain_is_hierarchy(struct irq_domain *domain) { return false; } static inline bool irq_domain_is_ipi(struct irq_domain *domain) { return false; } static inline bool irq_domain_is_ipi_per_cpu(struct irq_domain *domain) { return false; } static inline bool irq_domain_is_ipi_single(struct irq_domain *domain) { return false; } static inline bool irq_domain_is_msi(struct irq_domain *domain) { return false; } static inline bool irq_domain_is_msi_remap(struct irq_domain *domain) { return false; } static inline bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain) { return false; } #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ #else /* CONFIG_IRQ_DOMAIN */ static inline void irq_dispose_mapping(unsigned int virq) { } static inline struct irq_domain *irq_find_matching_fwnode( struct fwnode_handle *fwnode, enum irq_domain_bus_token bus_token) { return NULL; } static inline bool irq_domain_check_msi_remap(void) { return false; } #endif /* !CONFIG_IRQ_DOMAIN */ #endif /* _LINUX_IRQDOMAIN_H */ efi.h 0000644 00000153734 14722070374 0005503 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_EFI_H #define _LINUX_EFI_H /* * Extensible Firmware Interface * Based on 'Extensible Firmware Interface Specification' version 0.9, April 30, 1999 * * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> * Copyright (C) 1999, 2002-2003 Hewlett-Packard Co. * David Mosberger-Tang <davidm@hpl.hp.com> * Stephane Eranian <eranian@hpl.hp.com> */ #include <linux/init.h> #include <linux/string.h> #include <linux/time.h> #include <linux/types.h> #include <linux/proc_fs.h> #include <linux/rtc.h> #include <linux/ioport.h> #include <linux/pfn.h> #include <linux/pstore.h> #include <linux/range.h> #include <linux/reboot.h> #include <linux/uuid.h> #include <linux/screen_info.h> #include <asm/page.h> #define EFI_SUCCESS 0 #define EFI_LOAD_ERROR ( 1 | (1UL << (BITS_PER_LONG-1))) #define EFI_INVALID_PARAMETER ( 2 | (1UL << (BITS_PER_LONG-1))) #define EFI_UNSUPPORTED ( 3 | (1UL << (BITS_PER_LONG-1))) #define EFI_BAD_BUFFER_SIZE ( 4 | (1UL << (BITS_PER_LONG-1))) #define EFI_BUFFER_TOO_SMALL ( 5 | (1UL << (BITS_PER_LONG-1))) #define EFI_NOT_READY ( 6 | (1UL << (BITS_PER_LONG-1))) #define EFI_DEVICE_ERROR ( 7 | (1UL << (BITS_PER_LONG-1))) #define EFI_WRITE_PROTECTED ( 8 | (1UL << (BITS_PER_LONG-1))) #define EFI_OUT_OF_RESOURCES ( 9 | (1UL << (BITS_PER_LONG-1))) #define EFI_NOT_FOUND (14 | (1UL << (BITS_PER_LONG-1))) #define EFI_ABORTED (21 | (1UL << (BITS_PER_LONG-1))) #define EFI_SECURITY_VIOLATION (26 | (1UL << (BITS_PER_LONG-1))) #define EFI_IS_ERROR(x) ((x) & (1UL << (BITS_PER_LONG-1))) typedef unsigned long efi_status_t; typedef u8 efi_bool_t; typedef u16 efi_char16_t; /* UNICODE character */ typedef u64 efi_physical_addr_t; typedef void *efi_handle_t; /* * The UEFI spec and EDK2 reference implementation both define EFI_GUID as * struct { u32 a; u16; b; u16 c; u8 d[8]; }; and so the implied alignment * is 32 bits not 8 bits like our guid_t. In some cases (i.e., on 32-bit ARM), * this means that firmware services invoked by the kernel may assume that * efi_guid_t* arguments are 32-bit aligned, and use memory accessors that * do not tolerate misalignment. So let's set the minimum alignment to 32 bits. * * Note that the UEFI spec as well as some comments in the EDK2 code base * suggest that EFI_GUID should be 64-bit aligned, but this appears to be * a mistake, given that no code seems to exist that actually enforces that * or relies on it. */ typedef guid_t efi_guid_t __aligned(__alignof__(u32)); #define EFI_GUID(a, b, c, d...) (efi_guid_t){ { \ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \ (b) & 0xff, ((b) >> 8) & 0xff, \ (c) & 0xff, ((c) >> 8) & 0xff, d } } /* * Generic EFI table header */ typedef struct { u64 signature; u32 revision; u32 headersize; u32 crc32; u32 reserved; } efi_table_hdr_t; /* * Memory map descriptor: */ /* Memory types: */ #define EFI_RESERVED_TYPE 0 #define EFI_LOADER_CODE 1 #define EFI_LOADER_DATA 2 #define EFI_BOOT_SERVICES_CODE 3 #define EFI_BOOT_SERVICES_DATA 4 #define EFI_RUNTIME_SERVICES_CODE 5 #define EFI_RUNTIME_SERVICES_DATA 6 #define EFI_CONVENTIONAL_MEMORY 7 #define EFI_UNUSABLE_MEMORY 8 #define EFI_ACPI_RECLAIM_MEMORY 9 #define EFI_ACPI_MEMORY_NVS 10 #define EFI_MEMORY_MAPPED_IO 11 #define EFI_MEMORY_MAPPED_IO_PORT_SPACE 12 #define EFI_PAL_CODE 13 #define EFI_PERSISTENT_MEMORY 14 #define EFI_MAX_MEMORY_TYPE 15 /* Attribute values: */ #define EFI_MEMORY_UC ((u64)0x0000000000000001ULL) /* uncached */ #define EFI_MEMORY_WC ((u64)0x0000000000000002ULL) /* write-coalescing */ #define EFI_MEMORY_WT ((u64)0x0000000000000004ULL) /* write-through */ #define EFI_MEMORY_WB ((u64)0x0000000000000008ULL) /* write-back */ #define EFI_MEMORY_UCE ((u64)0x0000000000000010ULL) /* uncached, exported */ #define EFI_MEMORY_WP ((u64)0x0000000000001000ULL) /* write-protect */ #define EFI_MEMORY_RP ((u64)0x0000000000002000ULL) /* read-protect */ #define EFI_MEMORY_XP ((u64)0x0000000000004000ULL) /* execute-protect */ #define EFI_MEMORY_NV ((u64)0x0000000000008000ULL) /* non-volatile */ #define EFI_MEMORY_MORE_RELIABLE \ ((u64)0x0000000000010000ULL) /* higher reliability */ #define EFI_MEMORY_RO ((u64)0x0000000000020000ULL) /* read-only */ #define EFI_MEMORY_RUNTIME ((u64)0x8000000000000000ULL) /* range requires runtime mapping */ #define EFI_MEMORY_DESCRIPTOR_VERSION 1 #define EFI_PAGE_SHIFT 12 #define EFI_PAGE_SIZE (1UL << EFI_PAGE_SHIFT) #define EFI_PAGES_MAX (U64_MAX >> EFI_PAGE_SHIFT) typedef struct { u32 type; u32 pad; u64 phys_addr; u64 virt_addr; u64 num_pages; u64 attribute; } efi_memory_desc_t; typedef struct { efi_guid_t guid; u32 headersize; u32 flags; u32 imagesize; } efi_capsule_header_t; struct efi_boot_memmap { efi_memory_desc_t **map; unsigned long *map_size; unsigned long *desc_size; u32 *desc_ver; unsigned long *key_ptr; unsigned long *buff_size; }; /* * EFI capsule flags */ #define EFI_CAPSULE_PERSIST_ACROSS_RESET 0x00010000 #define EFI_CAPSULE_POPULATE_SYSTEM_TABLE 0x00020000 #define EFI_CAPSULE_INITIATE_RESET 0x00040000 struct capsule_info { efi_capsule_header_t header; efi_capsule_header_t *capsule; int reset_type; long index; size_t count; size_t total_size; struct page **pages; phys_addr_t *phys; size_t page_bytes_remain; }; int efi_capsule_setup_info(struct capsule_info *cap_info, void *kbuff, size_t hdr_bytes); int __efi_capsule_setup_info(struct capsule_info *cap_info); /* * Allocation types for calls to boottime->allocate_pages. */ #define EFI_ALLOCATE_ANY_PAGES 0 #define EFI_ALLOCATE_MAX_ADDRESS 1 #define EFI_ALLOCATE_ADDRESS 2 #define EFI_MAX_ALLOCATE_TYPE 3 typedef int (*efi_freemem_callback_t) (u64 start, u64 end, void *arg); /* * Types and defines for Time Services */ #define EFI_TIME_ADJUST_DAYLIGHT 0x1 #define EFI_TIME_IN_DAYLIGHT 0x2 #define EFI_UNSPECIFIED_TIMEZONE 0x07ff typedef struct { u16 year; u8 month; u8 day; u8 hour; u8 minute; u8 second; u8 pad1; u32 nanosecond; s16 timezone; u8 daylight; u8 pad2; } efi_time_t; typedef struct { u32 resolution; u32 accuracy; u8 sets_to_zero; } efi_time_cap_t; typedef struct { efi_table_hdr_t hdr; u32 raise_tpl; u32 restore_tpl; u32 allocate_pages; u32 free_pages; u32 get_memory_map; u32 allocate_pool; u32 free_pool; u32 create_event; u32 set_timer; u32 wait_for_event; u32 signal_event; u32 close_event; u32 check_event; u32 install_protocol_interface; u32 reinstall_protocol_interface; u32 uninstall_protocol_interface; u32 handle_protocol; u32 __reserved; u32 register_protocol_notify; u32 locate_handle; u32 locate_device_path; u32 install_configuration_table; u32 load_image; u32 start_image; u32 exit; u32 unload_image; u32 exit_boot_services; u32 get_next_monotonic_count; u32 stall; u32 set_watchdog_timer; u32 connect_controller; u32 disconnect_controller; u32 open_protocol; u32 close_protocol; u32 open_protocol_information; u32 protocols_per_handle; u32 locate_handle_buffer; u32 locate_protocol; u32 install_multiple_protocol_interfaces; u32 uninstall_multiple_protocol_interfaces; u32 calculate_crc32; u32 copy_mem; u32 set_mem; u32 create_event_ex; } __packed efi_boot_services_32_t; typedef struct { efi_table_hdr_t hdr; u64 raise_tpl; u64 restore_tpl; u64 allocate_pages; u64 free_pages; u64 get_memory_map; u64 allocate_pool; u64 free_pool; u64 create_event; u64 set_timer; u64 wait_for_event; u64 signal_event; u64 close_event; u64 check_event; u64 install_protocol_interface; u64 reinstall_protocol_interface; u64 uninstall_protocol_interface; u64 handle_protocol; u64 __reserved; u64 register_protocol_notify; u64 locate_handle; u64 locate_device_path; u64 install_configuration_table; u64 load_image; u64 start_image; u64 exit; u64 unload_image; u64 exit_boot_services; u64 get_next_monotonic_count; u64 stall; u64 set_watchdog_timer; u64 connect_controller; u64 disconnect_controller; u64 open_protocol; u64 close_protocol; u64 open_protocol_information; u64 protocols_per_handle; u64 locate_handle_buffer; u64 locate_protocol; u64 install_multiple_protocol_interfaces; u64 uninstall_multiple_protocol_interfaces; u64 calculate_crc32; u64 copy_mem; u64 set_mem; u64 create_event_ex; } __packed efi_boot_services_64_t; /* * EFI Boot Services table */ typedef struct { efi_table_hdr_t hdr; void *raise_tpl; void *restore_tpl; efi_status_t (*allocate_pages)(int, int, unsigned long, efi_physical_addr_t *); efi_status_t (*free_pages)(efi_physical_addr_t, unsigned long); efi_status_t (*get_memory_map)(unsigned long *, void *, unsigned long *, unsigned long *, u32 *); efi_status_t (*allocate_pool)(int, unsigned long, void **); efi_status_t (*free_pool)(void *); void *create_event; void *set_timer; void *wait_for_event; void *signal_event; void *close_event; void *check_event; void *install_protocol_interface; void *reinstall_protocol_interface; void *uninstall_protocol_interface; efi_status_t (*handle_protocol)(efi_handle_t, efi_guid_t *, void **); void *__reserved; void *register_protocol_notify; efi_status_t (*locate_handle)(int, efi_guid_t *, void *, unsigned long *, efi_handle_t *); void *locate_device_path; efi_status_t (*install_configuration_table)(efi_guid_t *, void *); void *load_image; void *start_image; void *exit; void *unload_image; efi_status_t (*exit_boot_services)(efi_handle_t, unsigned long); void *get_next_monotonic_count; void *stall; void *set_watchdog_timer; void *connect_controller; void *disconnect_controller; void *open_protocol; void *close_protocol; void *open_protocol_information; void *protocols_per_handle; void *locate_handle_buffer; efi_status_t (*locate_protocol)(efi_guid_t *, void *, void **); void *install_multiple_protocol_interfaces; void *uninstall_multiple_protocol_interfaces; void *calculate_crc32; void *copy_mem; void *set_mem; void *create_event_ex; } efi_boot_services_t; typedef enum { EfiPciIoWidthUint8, EfiPciIoWidthUint16, EfiPciIoWidthUint32, EfiPciIoWidthUint64, EfiPciIoWidthFifoUint8, EfiPciIoWidthFifoUint16, EfiPciIoWidthFifoUint32, EfiPciIoWidthFifoUint64, EfiPciIoWidthFillUint8, EfiPciIoWidthFillUint16, EfiPciIoWidthFillUint32, EfiPciIoWidthFillUint64, EfiPciIoWidthMaximum } EFI_PCI_IO_PROTOCOL_WIDTH; typedef enum { EfiPciIoAttributeOperationGet, EfiPciIoAttributeOperationSet, EfiPciIoAttributeOperationEnable, EfiPciIoAttributeOperationDisable, EfiPciIoAttributeOperationSupported, EfiPciIoAttributeOperationMaximum } EFI_PCI_IO_PROTOCOL_ATTRIBUTE_OPERATION; typedef struct { u32 read; u32 write; } efi_pci_io_protocol_access_32_t; typedef struct { u64 read; u64 write; } efi_pci_io_protocol_access_64_t; typedef struct { void *read; void *write; } efi_pci_io_protocol_access_t; typedef struct { u32 poll_mem; u32 poll_io; efi_pci_io_protocol_access_32_t mem; efi_pci_io_protocol_access_32_t io; efi_pci_io_protocol_access_32_t pci; u32 copy_mem; u32 map; u32 unmap; u32 allocate_buffer; u32 free_buffer; u32 flush; u32 get_location; u32 attributes; u32 get_bar_attributes; u32 set_bar_attributes; u64 romsize; u32 romimage; } efi_pci_io_protocol_32_t; typedef struct { u64 poll_mem; u64 poll_io; efi_pci_io_protocol_access_64_t mem; efi_pci_io_protocol_access_64_t io; efi_pci_io_protocol_access_64_t pci; u64 copy_mem; u64 map; u64 unmap; u64 allocate_buffer; u64 free_buffer; u64 flush; u64 get_location; u64 attributes; u64 get_bar_attributes; u64 set_bar_attributes; u64 romsize; u64 romimage; } efi_pci_io_protocol_64_t; typedef struct { void *poll_mem; void *poll_io; efi_pci_io_protocol_access_t mem; efi_pci_io_protocol_access_t io; efi_pci_io_protocol_access_t pci; void *copy_mem; void *map; void *unmap; void *allocate_buffer; void *free_buffer; void *flush; void *get_location; void *attributes; void *get_bar_attributes; void *set_bar_attributes; uint64_t romsize; void *romimage; } efi_pci_io_protocol_t; #define EFI_PCI_IO_ATTRIBUTE_ISA_MOTHERBOARD_IO 0x0001 #define EFI_PCI_IO_ATTRIBUTE_ISA_IO 0x0002 #define EFI_PCI_IO_ATTRIBUTE_VGA_PALETTE_IO 0x0004 #define EFI_PCI_IO_ATTRIBUTE_VGA_MEMORY 0x0008 #define EFI_PCI_IO_ATTRIBUTE_VGA_IO 0x0010 #define EFI_PCI_IO_ATTRIBUTE_IDE_PRIMARY_IO 0x0020 #define EFI_PCI_IO_ATTRIBUTE_IDE_SECONDARY_IO 0x0040 #define EFI_PCI_IO_ATTRIBUTE_MEMORY_WRITE_COMBINE 0x0080 #define EFI_PCI_IO_ATTRIBUTE_IO 0x0100 #define EFI_PCI_IO_ATTRIBUTE_MEMORY 0x0200 #define EFI_PCI_IO_ATTRIBUTE_BUS_MASTER 0x0400 #define EFI_PCI_IO_ATTRIBUTE_MEMORY_CACHED 0x0800 #define EFI_PCI_IO_ATTRIBUTE_MEMORY_DISABLE 0x1000 #define EFI_PCI_IO_ATTRIBUTE_EMBEDDED_DEVICE 0x2000 #define EFI_PCI_IO_ATTRIBUTE_EMBEDDED_ROM 0x4000 #define EFI_PCI_IO_ATTRIBUTE_DUAL_ADDRESS_CYCLE 0x8000 #define EFI_PCI_IO_ATTRIBUTE_ISA_IO_16 0x10000 #define EFI_PCI_IO_ATTRIBUTE_VGA_PALETTE_IO_16 0x20000 #define EFI_PCI_IO_ATTRIBUTE_VGA_IO_16 0x40000 typedef struct { u32 version; u32 get; u32 set; u32 del; u32 get_all; } apple_properties_protocol_32_t; typedef struct { u64 version; u64 get; u64 set; u64 del; u64 get_all; } apple_properties_protocol_64_t; typedef struct { u32 get_capability; u32 get_event_log; u32 hash_log_extend_event; u32 submit_command; u32 get_active_pcr_banks; u32 set_active_pcr_banks; u32 get_result_of_set_active_pcr_banks; } efi_tcg2_protocol_32_t; typedef struct { u64 get_capability; u64 get_event_log; u64 hash_log_extend_event; u64 submit_command; u64 get_active_pcr_banks; u64 set_active_pcr_banks; u64 get_result_of_set_active_pcr_banks; } efi_tcg2_protocol_64_t; typedef u32 efi_tcg2_event_log_format; typedef struct { void *get_capability; efi_status_t (*get_event_log)(efi_handle_t, efi_tcg2_event_log_format, efi_physical_addr_t *, efi_physical_addr_t *, efi_bool_t *); void *hash_log_extend_event; void *submit_command; void *get_active_pcr_banks; void *set_active_pcr_banks; void *get_result_of_set_active_pcr_banks; } efi_tcg2_protocol_t; /* * Types and defines for EFI ResetSystem */ #define EFI_RESET_COLD 0 #define EFI_RESET_WARM 1 #define EFI_RESET_SHUTDOWN 2 /* * EFI Runtime Services table */ #define EFI_RUNTIME_SERVICES_SIGNATURE ((u64)0x5652453544e5552ULL) #define EFI_RUNTIME_SERVICES_REVISION 0x00010000 typedef struct { efi_table_hdr_t hdr; u32 get_time; u32 set_time; u32 get_wakeup_time; u32 set_wakeup_time; u32 set_virtual_address_map; u32 convert_pointer; u32 get_variable; u32 get_next_variable; u32 set_variable; u32 get_next_high_mono_count; u32 reset_system; u32 update_capsule; u32 query_capsule_caps; u32 query_variable_info; } efi_runtime_services_32_t; typedef struct { efi_table_hdr_t hdr; u64 get_time; u64 set_time; u64 get_wakeup_time; u64 set_wakeup_time; u64 set_virtual_address_map; u64 convert_pointer; u64 get_variable; u64 get_next_variable; u64 set_variable; u64 get_next_high_mono_count; u64 reset_system; u64 update_capsule; u64 query_capsule_caps; u64 query_variable_info; } efi_runtime_services_64_t; typedef efi_status_t efi_get_time_t (efi_time_t *tm, efi_time_cap_t *tc); typedef efi_status_t efi_set_time_t (efi_time_t *tm); typedef efi_status_t efi_get_wakeup_time_t (efi_bool_t *enabled, efi_bool_t *pending, efi_time_t *tm); typedef efi_status_t efi_set_wakeup_time_t (efi_bool_t enabled, efi_time_t *tm); typedef efi_status_t efi_get_variable_t (efi_char16_t *name, efi_guid_t *vendor, u32 *attr, unsigned long *data_size, void *data); typedef efi_status_t efi_get_next_variable_t (unsigned long *name_size, efi_char16_t *name, efi_guid_t *vendor); typedef efi_status_t efi_set_variable_t (efi_char16_t *name, efi_guid_t *vendor, u32 attr, unsigned long data_size, void *data); typedef efi_status_t efi_get_next_high_mono_count_t (u32 *count); typedef void efi_reset_system_t (int reset_type, efi_status_t status, unsigned long data_size, efi_char16_t *data); typedef efi_status_t efi_set_virtual_address_map_t (unsigned long memory_map_size, unsigned long descriptor_size, u32 descriptor_version, efi_memory_desc_t *virtual_map); typedef efi_status_t efi_query_variable_info_t(u32 attr, u64 *storage_space, u64 *remaining_space, u64 *max_variable_size); typedef efi_status_t efi_update_capsule_t(efi_capsule_header_t **capsules, unsigned long count, unsigned long sg_list); typedef efi_status_t efi_query_capsule_caps_t(efi_capsule_header_t **capsules, unsigned long count, u64 *max_size, int *reset_type); typedef efi_status_t efi_query_variable_store_t(u32 attributes, unsigned long size, bool nonblocking); typedef struct { efi_table_hdr_t hdr; efi_get_time_t *get_time; efi_set_time_t *set_time; efi_get_wakeup_time_t *get_wakeup_time; efi_set_wakeup_time_t *set_wakeup_time; efi_set_virtual_address_map_t *set_virtual_address_map; void *convert_pointer; efi_get_variable_t *get_variable; efi_get_next_variable_t *get_next_variable; efi_set_variable_t *set_variable; efi_get_next_high_mono_count_t *get_next_high_mono_count; efi_reset_system_t *reset_system; efi_update_capsule_t *update_capsule; efi_query_capsule_caps_t *query_capsule_caps; efi_query_variable_info_t *query_variable_info; } efi_runtime_services_t; void efi_native_runtime_setup(void); /* * EFI Configuration Table and GUID definitions * * These are all defined in a single line to make them easier to * grep for and to see them at a glance - while still having a * similar structure to the definitions in the spec. * * Here's how they are structured: * * GUID: 12345678-1234-1234-1234-123456789012 * Spec: * #define EFI_SOME_PROTOCOL_GUID \ * {0x12345678,0x1234,0x1234,\ * {0x12,0x34,0x12,0x34,0x56,0x78,0x90,0x12}} * Here: * #define SOME_PROTOCOL_GUID EFI_GUID(0x12345678, 0x1234, 0x1234, 0x12, 0x34, 0x12, 0x34, 0x56, 0x78, 0x90, 0x12) * ^ tabs ^extra space * * Note that the 'extra space' separates the values at the same place * where the UEFI SPEC breaks the line. */ #define NULL_GUID EFI_GUID(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00) #define MPS_TABLE_GUID EFI_GUID(0xeb9d2d2f, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) #define ACPI_TABLE_GUID EFI_GUID(0xeb9d2d30, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) #define ACPI_20_TABLE_GUID EFI_GUID(0x8868e871, 0xe4f1, 0x11d3, 0xbc, 0x22, 0x00, 0x80, 0xc7, 0x3c, 0x88, 0x81) #define SMBIOS_TABLE_GUID EFI_GUID(0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) #define SMBIOS3_TABLE_GUID EFI_GUID(0xf2fd1544, 0x9794, 0x4a2c, 0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94) #define SAL_SYSTEM_TABLE_GUID EFI_GUID(0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) #define HCDP_TABLE_GUID EFI_GUID(0xf951938d, 0x620b, 0x42ef, 0x82, 0x79, 0xa8, 0x4b, 0x79, 0x61, 0x78, 0x98) #define UGA_IO_PROTOCOL_GUID EFI_GUID(0x61a4d49e, 0x6f68, 0x4f1b, 0xb9, 0x22, 0xa8, 0x6e, 0xed, 0x0b, 0x07, 0xa2) #define EFI_GLOBAL_VARIABLE_GUID EFI_GUID(0x8be4df61, 0x93ca, 0x11d2, 0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c) #define UV_SYSTEM_TABLE_GUID EFI_GUID(0x3b13a7d4, 0x633e, 0x11dd, 0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93) #define LINUX_EFI_CRASH_GUID EFI_GUID(0xcfc8fc79, 0xbe2e, 0x4ddc, 0x97, 0xf0, 0x9f, 0x98, 0xbf, 0xe2, 0x98, 0xa0) #define LOADED_IMAGE_PROTOCOL_GUID EFI_GUID(0x5b1b31a1, 0x9562, 0x11d2, 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b) #define EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID EFI_GUID(0x9042a9de, 0x23dc, 0x4a38, 0x96, 0xfb, 0x7a, 0xde, 0xd0, 0x80, 0x51, 0x6a) #define EFI_UGA_PROTOCOL_GUID EFI_GUID(0x982c298b, 0xf4fa, 0x41cb, 0xb8, 0x38, 0x77, 0xaa, 0x68, 0x8f, 0xb8, 0x39) #define EFI_PCI_IO_PROTOCOL_GUID EFI_GUID(0x4cf5b200, 0x68b8, 0x4ca5, 0x9e, 0xec, 0xb2, 0x3e, 0x3f, 0x50, 0x02, 0x9a) #define EFI_FILE_INFO_ID EFI_GUID(0x09576e92, 0x6d3f, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b) #define EFI_SYSTEM_RESOURCE_TABLE_GUID EFI_GUID(0xb122a263, 0x3661, 0x4f68, 0x99, 0x29, 0x78, 0xf8, 0xb0, 0xd6, 0x21, 0x80) #define EFI_FILE_SYSTEM_GUID EFI_GUID(0x964e5b22, 0x6459, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b) #define DEVICE_TREE_GUID EFI_GUID(0xb1b621d5, 0xf19c, 0x41a5, 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0) #define EFI_PROPERTIES_TABLE_GUID EFI_GUID(0x880aaca3, 0x4adc, 0x4a04, 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5) #define EFI_RNG_PROTOCOL_GUID EFI_GUID(0x3152bca5, 0xeade, 0x433d, 0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44) #define EFI_RNG_ALGORITHM_RAW EFI_GUID(0xe43176d7, 0xb6e8, 0x4827, 0xb7, 0x84, 0x7f, 0xfd, 0xc4, 0xb6, 0x85, 0x61) #define EFI_MEMORY_ATTRIBUTES_TABLE_GUID EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20) #define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) #define APPLE_PROPERTIES_PROTOCOL_GUID EFI_GUID(0x91bd12fe, 0xf6c3, 0x44fb, 0xa5, 0xb7, 0x51, 0x22, 0xab, 0x30, 0x3a, 0xe0) #define EFI_TCG2_PROTOCOL_GUID EFI_GUID(0x607f766c, 0x7455, 0x42be, 0x93, 0x0b, 0xe4, 0xd7, 0x6d, 0xb2, 0x72, 0x0f) #define EFI_IMAGE_SECURITY_DATABASE_GUID EFI_GUID(0xd719b2cb, 0x3d3a, 0x4596, 0xa3, 0xbc, 0xda, 0xd0, 0x0e, 0x67, 0x65, 0x6f) #define EFI_SHIM_LOCK_GUID EFI_GUID(0x605dab50, 0xe046, 0x4300, 0xab, 0xb6, 0x3d, 0xd8, 0x10, 0xdd, 0x8b, 0x23) #define EFI_CERT_SHA256_GUID EFI_GUID(0xc1c41626, 0x504c, 0x4092, 0xac, 0xa9, 0x41, 0xf9, 0x36, 0x93, 0x43, 0x28) #define EFI_CERT_X509_GUID EFI_GUID(0xa5c059a1, 0x94e4, 0x4aa7, 0x87, 0xb5, 0xab, 0x15, 0x5c, 0x2b, 0xf0, 0x72) #define EFI_CERT_X509_SHA256_GUID EFI_GUID(0x3bd2a492, 0x96c0, 0x4079, 0xb4, 0x20, 0xfc, 0xf9, 0x8e, 0xf1, 0x03, 0xed) /* * This GUID is used to pass to the kernel proper the struct screen_info * structure that was populated by the stub based on the GOP protocol instance * associated with ConOut */ #define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95) #define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f) #define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b) #define LINUX_EFI_TPM_EVENT_LOG_GUID EFI_GUID(0xb7799cb0, 0xeca2, 0x4943, 0x96, 0x67, 0x1f, 0xae, 0x07, 0xb7, 0x47, 0xfa) #define LINUX_EFI_TPM_FINAL_LOG_GUID EFI_GUID(0x1e2ed096, 0x30e2, 0x4254, 0xbd, 0x89, 0x86, 0x3b, 0xbe, 0xf8, 0x23, 0x25) #define LINUX_EFI_MEMRESERVE_TABLE_GUID EFI_GUID(0x888eb0c6, 0x8ede, 0x4ff5, 0xa8, 0xf0, 0x9a, 0xee, 0x5c, 0xb9, 0x77, 0xc2) #define LINUX_EFI_MOK_VARIABLE_TABLE_GUID EFI_GUID(0xc451ed2b, 0x9694, 0x45d3, 0xba, 0xba, 0xed, 0x9f, 0x89, 0x88, 0xa3, 0x89) /* OEM GUIDs */ #define DELLEMC_EFI_RCI2_TABLE_GUID EFI_GUID(0x2d9f28a2, 0xa886, 0x456a, 0x97, 0xa8, 0xf1, 0x1e, 0xf2, 0x4f, 0xf4, 0x55) typedef struct { efi_guid_t guid; u64 table; } efi_config_table_64_t; typedef struct { efi_guid_t guid; u32 table; } efi_config_table_32_t; typedef struct { efi_guid_t guid; unsigned long table; } efi_config_table_t; typedef struct { efi_guid_t guid; const char *name; unsigned long *ptr; } efi_config_table_type_t; #define EFI_SYSTEM_TABLE_SIGNATURE ((u64)0x5453595320494249ULL) #define EFI_2_30_SYSTEM_TABLE_REVISION ((2 << 16) | (30)) #define EFI_2_20_SYSTEM_TABLE_REVISION ((2 << 16) | (20)) #define EFI_2_10_SYSTEM_TABLE_REVISION ((2 << 16) | (10)) #define EFI_2_00_SYSTEM_TABLE_REVISION ((2 << 16) | (00)) #define EFI_1_10_SYSTEM_TABLE_REVISION ((1 << 16) | (10)) #define EFI_1_02_SYSTEM_TABLE_REVISION ((1 << 16) | (02)) typedef struct { efi_table_hdr_t hdr; u64 fw_vendor; /* physical addr of CHAR16 vendor string */ u32 fw_revision; u32 __pad1; u64 con_in_handle; u64 con_in; u64 con_out_handle; u64 con_out; u64 stderr_handle; u64 stderr; u64 runtime; u64 boottime; u32 nr_tables; u32 __pad2; u64 tables; } efi_system_table_64_t; typedef struct { efi_table_hdr_t hdr; u32 fw_vendor; /* physical addr of CHAR16 vendor string */ u32 fw_revision; u32 con_in_handle; u32 con_in; u32 con_out_handle; u32 con_out; u32 stderr_handle; u32 stderr; u32 runtime; u32 boottime; u32 nr_tables; u32 tables; } efi_system_table_32_t; typedef struct { efi_table_hdr_t hdr; unsigned long fw_vendor; /* physical addr of CHAR16 vendor string */ u32 fw_revision; unsigned long con_in_handle; unsigned long con_in; unsigned long con_out_handle; unsigned long con_out; unsigned long stderr_handle; unsigned long stderr; efi_runtime_services_t *runtime; efi_boot_services_t *boottime; unsigned long nr_tables; unsigned long tables; } efi_system_table_t; /* * Architecture independent structure for describing a memory map for the * benefit of efi_memmap_init_early(), saving us the need to pass four * parameters. */ struct efi_memory_map_data { phys_addr_t phys_map; unsigned long size; unsigned long desc_version; unsigned long desc_size; }; struct efi_memory_map { phys_addr_t phys_map; void *map; void *map_end; int nr_map; unsigned long desc_version; unsigned long desc_size; bool late; }; struct efi_mem_range { struct range range; u64 attribute; }; struct efi_fdt_params { u64 system_table; u64 mmap; u32 mmap_size; u32 desc_size; u32 desc_ver; u32 secure_boot; }; typedef struct { u32 revision; u32 parent_handle; u32 system_table; u32 device_handle; u32 file_path; u32 reserved; u32 load_options_size; u32 load_options; u32 image_base; __aligned_u64 image_size; unsigned int image_code_type; unsigned int image_data_type; unsigned long unload; } efi_loaded_image_32_t; typedef struct { u32 revision; u64 parent_handle; u64 system_table; u64 device_handle; u64 file_path; u64 reserved; u32 load_options_size; u64 load_options; u64 image_base; __aligned_u64 image_size; unsigned int image_code_type; unsigned int image_data_type; unsigned long unload; } efi_loaded_image_64_t; typedef struct { u32 revision; void *parent_handle; efi_system_table_t *system_table; void *device_handle; void *file_path; void *reserved; u32 load_options_size; void *load_options; void *image_base; __aligned_u64 image_size; unsigned int image_code_type; unsigned int image_data_type; unsigned long unload; } efi_loaded_image_t; typedef struct { u64 size; u64 file_size; u64 phys_size; efi_time_t create_time; efi_time_t last_access_time; efi_time_t modification_time; __aligned_u64 attribute; efi_char16_t filename[1]; } efi_file_info_t; typedef struct { u64 revision; u32 open; u32 close; u32 delete; u32 read; u32 write; u32 get_position; u32 set_position; u32 get_info; u32 set_info; u32 flush; } efi_file_handle_32_t; typedef struct { u64 revision; u64 open; u64 close; u64 delete; u64 read; u64 write; u64 get_position; u64 set_position; u64 get_info; u64 set_info; u64 flush; } efi_file_handle_64_t; typedef struct _efi_file_handle { u64 revision; efi_status_t (*open)(struct _efi_file_handle *, struct _efi_file_handle **, efi_char16_t *, u64, u64); efi_status_t (*close)(struct _efi_file_handle *); void *delete; efi_status_t (*read)(struct _efi_file_handle *, unsigned long *, void *); void *write; void *get_position; void *set_position; efi_status_t (*get_info)(struct _efi_file_handle *, efi_guid_t *, unsigned long *, void *); void *set_info; void *flush; } efi_file_handle_t; typedef struct { u64 revision; u32 open_volume; } efi_file_io_interface_32_t; typedef struct { u64 revision; u64 open_volume; } efi_file_io_interface_64_t; typedef struct _efi_file_io_interface { u64 revision; int (*open_volume)(struct _efi_file_io_interface *, efi_file_handle_t **); } efi_file_io_interface_t; #define EFI_FILE_MODE_READ 0x0000000000000001 #define EFI_FILE_MODE_WRITE 0x0000000000000002 #define EFI_FILE_MODE_CREATE 0x8000000000000000 typedef struct { u32 version; u32 length; u64 memory_protection_attribute; } efi_properties_table_t; #define EFI_PROPERTIES_TABLE_VERSION 0x00010000 #define EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA 0x1 #define EFI_INVALID_TABLE_ADDR (~0UL) typedef struct { u32 version; u32 num_entries; u32 desc_size; u32 reserved; efi_memory_desc_t entry[0]; } efi_memory_attributes_table_t; typedef struct { efi_guid_t signature_owner; u8 signature_data[]; } efi_signature_data_t; typedef struct { efi_guid_t signature_type; u32 signature_list_size; u32 signature_header_size; u32 signature_size; u8 signature_header[]; /* efi_signature_data_t signatures[][] */ } efi_signature_list_t; typedef u8 efi_sha256_hash_t[32]; typedef struct { efi_sha256_hash_t to_be_signed_hash; efi_time_t time_of_revocation; } efi_cert_x509_sha256_t; /* * All runtime access to EFI goes through this structure: */ extern struct efi { efi_system_table_t *systab; /* EFI system table */ unsigned int runtime_version; /* Runtime services version */ unsigned long mps; /* MPS table */ unsigned long acpi; /* ACPI table (IA64 ext 0.71) */ unsigned long acpi20; /* ACPI table (ACPI 2.0) */ unsigned long smbios; /* SMBIOS table (32 bit entry point) */ unsigned long smbios3; /* SMBIOS table (64 bit entry point) */ unsigned long boot_info; /* boot info table */ unsigned long hcdp; /* HCDP table */ unsigned long uga; /* UGA table */ unsigned long fw_vendor; /* fw_vendor */ unsigned long runtime; /* runtime table */ unsigned long config_table; /* config tables */ unsigned long esrt; /* ESRT table */ unsigned long properties_table; /* properties table */ unsigned long mem_attr_table; /* memory attributes table */ unsigned long rng_seed; /* UEFI firmware random seed */ unsigned long tpm_log; /* TPM2 Event Log table */ unsigned long tpm_final_log; /* TPM2 Final Events Log table */ unsigned long mokvar_table; /* MOK variable config table */ unsigned long mem_reserve; /* Linux EFI memreserve table */ efi_get_time_t *get_time; efi_set_time_t *set_time; efi_get_wakeup_time_t *get_wakeup_time; efi_set_wakeup_time_t *set_wakeup_time; efi_get_variable_t *get_variable; efi_get_next_variable_t *get_next_variable; efi_set_variable_t *set_variable; efi_set_variable_t *set_variable_nonblocking; efi_query_variable_info_t *query_variable_info; efi_query_variable_info_t *query_variable_info_nonblocking; efi_update_capsule_t *update_capsule; efi_query_capsule_caps_t *query_capsule_caps; efi_get_next_high_mono_count_t *get_next_high_mono_count; efi_reset_system_t *reset_system; efi_set_virtual_address_map_t *set_virtual_address_map; struct efi_memory_map memmap; unsigned long flags; } efi; extern struct mm_struct efi_mm; static inline int efi_guidcmp (efi_guid_t left, efi_guid_t right) { return memcmp(&left, &right, sizeof (efi_guid_t)); } static inline char * efi_guid_to_str(efi_guid_t *guid, char *out) { sprintf(out, "%pUl", guid->b); return out; } extern void efi_init (void); extern void *efi_get_pal_addr (void); extern void efi_map_pal_code (void); extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg); extern void efi_gettimeofday (struct timespec64 *ts); #ifdef CONFIG_EFI extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */ #else static inline void efi_enter_virtual_mode (void) {} #endif #ifdef CONFIG_X86 extern efi_status_t efi_query_variable_store(u32 attributes, unsigned long size, bool nonblocking); extern void efi_find_mirror(void); #else static inline efi_status_t efi_query_variable_store(u32 attributes, unsigned long size, bool nonblocking) { return EFI_SUCCESS; } #endif extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr); extern phys_addr_t __init efi_memmap_alloc(unsigned int num_entries); extern int __init efi_memmap_init_early(struct efi_memory_map_data *data); extern int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size); extern void __init efi_memmap_unmap(void); extern int __init efi_memmap_install(phys_addr_t addr, unsigned int nr_map); extern int __init efi_memmap_split_count(efi_memory_desc_t *md, struct range *range); extern void __init efi_memmap_insert(struct efi_memory_map *old_memmap, void *buf, struct efi_mem_range *mem); extern int efi_config_init(efi_config_table_type_t *arch_tables); #ifdef CONFIG_EFI_ESRT extern void __init efi_esrt_init(void); #else static inline void efi_esrt_init(void) { } #endif extern int efi_config_parse_tables(void *config_tables, int count, int sz, efi_config_table_type_t *arch_tables); extern u64 efi_get_iobase (void); extern int efi_mem_type(unsigned long phys_addr); extern u64 efi_mem_attributes (unsigned long phys_addr); extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size); extern int __init efi_uart_console_only (void); extern u64 efi_mem_desc_end(efi_memory_desc_t *md); extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md); extern void efi_mem_reserve(phys_addr_t addr, u64 size); extern int efi_mem_reserve_persistent(phys_addr_t addr, u64 size); extern void efi_initialize_iomem_resources(struct resource *code_resource, struct resource *data_resource, struct resource *bss_resource); extern int efi_get_fdt_params(struct efi_fdt_params *params); extern struct kobject *efi_kobj; extern int efi_reboot_quirk_mode; extern bool efi_poweroff_required(void); #ifdef CONFIG_EFI_FAKE_MEMMAP extern void __init efi_fake_memmap(void); #else static inline void efi_fake_memmap(void) { } #endif /* * efi_memattr_perm_setter - arch specific callback function passed into * efi_memattr_apply_permissions() that updates the * mapping permissions described by the second * argument in the page tables referred to by the * first argument. */ typedef int (*efi_memattr_perm_setter)(struct mm_struct *, efi_memory_desc_t *); extern int efi_memattr_init(void); extern int efi_memattr_apply_permissions(struct mm_struct *mm, efi_memattr_perm_setter fn); /* * efi_early_memdesc_ptr - get the n-th EFI memmap descriptor * @map: the start of efi memmap * @desc_size: the size of space for each EFI memmap descriptor * @n: the index of efi memmap descriptor * * EFI boot service provides the GetMemoryMap() function to get a copy of the * current memory map which is an array of memory descriptors, each of * which describes a contiguous block of memory. It also gets the size of the * map, and the size of each descriptor, etc. * * Note that per section 6.2 of UEFI Spec 2.6 Errata A, the returned size of * each descriptor might not be equal to sizeof(efi_memory_memdesc_t), * since efi_memory_memdesc_t may be extended in the future. Thus the OS * MUST use the returned size of the descriptor to find the start of each * efi_memory_memdesc_t in the memory map array. This should only be used * during bootup since for_each_efi_memory_desc_xxx() is available after the * kernel initializes the EFI subsystem to set up struct efi_memory_map. */ #define efi_early_memdesc_ptr(map, desc_size, n) \ (efi_memory_desc_t *)((void *)(map) + ((n) * (desc_size))) /* Iterate through an efi_memory_map */ #define for_each_efi_memory_desc_in_map(m, md) \ for ((md) = (m)->map; \ (md) && ((void *)(md) + (m)->desc_size) <= (m)->map_end; \ (md) = (void *)(md) + (m)->desc_size) /** * for_each_efi_memory_desc - iterate over descriptors in efi.memmap * @md: the efi_memory_desc_t * iterator * * Once the loop finishes @md must not be accessed. */ #define for_each_efi_memory_desc(md) \ for_each_efi_memory_desc_in_map(&efi.memmap, md) /* * Format an EFI memory descriptor's type and attributes to a user-provided * character buffer, as per snprintf(), and return the buffer. */ char * __init efi_md_typeattr_format(char *buf, size_t size, const efi_memory_desc_t *md); typedef void (*efi_element_handler_t)(const char *source, const void *element_data, size_t element_size); extern int __init parse_efi_signature_list( const char *source, const void *data, size_t size, efi_element_handler_t (*get_handler_for_guid)(const efi_guid_t *)); /** * efi_range_is_wc - check the WC bit on an address range * @start: starting kvirt address * @len: length of range * * Consult the EFI memory map and make sure it's ok to set this range WC. * Returns true or false. */ static inline int efi_range_is_wc(unsigned long start, unsigned long len) { unsigned long i; for (i = 0; i < len; i += (1UL << EFI_PAGE_SHIFT)) { unsigned long paddr = __pa(start + i); if (!(efi_mem_attributes(paddr) & EFI_MEMORY_WC)) return 0; } /* The range checked out */ return 1; } #ifdef CONFIG_EFI_PCDP extern int __init efi_setup_pcdp_console(char *); #endif /* * We play games with efi_enabled so that the compiler will, if * possible, remove EFI-related code altogether. */ #define EFI_BOOT 0 /* Were we booted from EFI? */ #define EFI_CONFIG_TABLES 2 /* Can we use EFI config tables? */ #define EFI_RUNTIME_SERVICES 3 /* Can we use runtime services? */ #define EFI_MEMMAP 4 /* Can we use EFI memory map? */ #define EFI_64BIT 5 /* Is the firmware 64-bit? */ #define EFI_PARAVIRT 6 /* Access is via a paravirt interface */ #define EFI_ARCH_1 7 /* First arch-specific bit */ #define EFI_DBG 8 /* Print additional debug info at runtime */ #define EFI_NX_PE_DATA 9 /* Can runtime data regions be mapped non-executable? */ #define EFI_MEM_ATTR 10 /* Did firmware publish an EFI_MEMORY_ATTRIBUTES table? */ #define EFI_SECURE_BOOT 11 /* Are we in Secure Boot mode? */ enum efi_secureboot_mode { efi_secureboot_mode_unset, efi_secureboot_mode_unknown, efi_secureboot_mode_disabled, efi_secureboot_mode_enabled, }; #ifdef CONFIG_EFI /* * Test whether the above EFI_* bits are enabled. */ static inline bool efi_enabled(int feature) { return test_bit(feature, &efi.flags) != 0; } extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused); extern void __init efi_set_secure_boot(enum efi_secureboot_mode mode); #else static inline bool efi_enabled(int feature) { return false; } static inline void efi_reboot(enum reboot_mode reboot_mode, const char *__unused) {} static inline bool efi_capsule_pending(int *reset_type) { return false; } static inline void efi_set_secure_boot(enum efi_secureboot_mode mode) {} #endif extern int efi_status_to_err(efi_status_t status); extern const char *efi_status_to_str(efi_status_t status); /* * Variable Attributes */ #define EFI_VARIABLE_NON_VOLATILE 0x0000000000000001 #define EFI_VARIABLE_BOOTSERVICE_ACCESS 0x0000000000000002 #define EFI_VARIABLE_RUNTIME_ACCESS 0x0000000000000004 #define EFI_VARIABLE_HARDWARE_ERROR_RECORD 0x0000000000000008 #define EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS 0x0000000000000010 #define EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS 0x0000000000000020 #define EFI_VARIABLE_APPEND_WRITE 0x0000000000000040 #define EFI_VARIABLE_MASK (EFI_VARIABLE_NON_VOLATILE | \ EFI_VARIABLE_BOOTSERVICE_ACCESS | \ EFI_VARIABLE_RUNTIME_ACCESS | \ EFI_VARIABLE_HARDWARE_ERROR_RECORD | \ EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS | \ EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS | \ EFI_VARIABLE_APPEND_WRITE) /* * Length of a GUID string (strlen("aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee")) * not including trailing NUL */ #define EFI_VARIABLE_GUID_LEN UUID_STRING_LEN /* * The type of search to perform when calling boottime->locate_handle */ #define EFI_LOCATE_ALL_HANDLES 0 #define EFI_LOCATE_BY_REGISTER_NOTIFY 1 #define EFI_LOCATE_BY_PROTOCOL 2 /* * EFI Device Path information */ #define EFI_DEV_HW 0x01 #define EFI_DEV_PCI 1 #define EFI_DEV_PCCARD 2 #define EFI_DEV_MEM_MAPPED 3 #define EFI_DEV_VENDOR 4 #define EFI_DEV_CONTROLLER 5 #define EFI_DEV_ACPI 0x02 #define EFI_DEV_BASIC_ACPI 1 #define EFI_DEV_EXPANDED_ACPI 2 #define EFI_DEV_MSG 0x03 #define EFI_DEV_MSG_ATAPI 1 #define EFI_DEV_MSG_SCSI 2 #define EFI_DEV_MSG_FC 3 #define EFI_DEV_MSG_1394 4 #define EFI_DEV_MSG_USB 5 #define EFI_DEV_MSG_USB_CLASS 15 #define EFI_DEV_MSG_I20 6 #define EFI_DEV_MSG_MAC 11 #define EFI_DEV_MSG_IPV4 12 #define EFI_DEV_MSG_IPV6 13 #define EFI_DEV_MSG_INFINIBAND 9 #define EFI_DEV_MSG_UART 14 #define EFI_DEV_MSG_VENDOR 10 #define EFI_DEV_MEDIA 0x04 #define EFI_DEV_MEDIA_HARD_DRIVE 1 #define EFI_DEV_MEDIA_CDROM 2 #define EFI_DEV_MEDIA_VENDOR 3 #define EFI_DEV_MEDIA_FILE 4 #define EFI_DEV_MEDIA_PROTOCOL 5 #define EFI_DEV_BIOS_BOOT 0x05 #define EFI_DEV_END_PATH 0x7F #define EFI_DEV_END_PATH2 0xFF #define EFI_DEV_END_INSTANCE 0x01 #define EFI_DEV_END_ENTIRE 0xFF struct efi_generic_dev_path { u8 type; u8 sub_type; u16 length; } __attribute ((packed)); struct efi_dev_path { u8 type; /* can be replaced with unnamed */ u8 sub_type; /* struct efi_generic_dev_path; */ u16 length; /* once we've moved to -std=c11 */ union { struct { u32 hid; u32 uid; } acpi; struct { u8 fn; u8 dev; } pci; }; } __attribute ((packed)); #if IS_ENABLED(CONFIG_EFI_DEV_PATH_PARSER) struct device *efi_get_device_by_path(struct efi_dev_path **node, size_t *len); #endif static inline void memrange_efi_to_native(u64 *addr, u64 *npages) { *npages = PFN_UP(*addr + (*npages<<EFI_PAGE_SHIFT)) - PFN_DOWN(*addr); *addr &= PAGE_MASK; } /* * EFI Variable support. * * Different firmware drivers can expose their EFI-like variables using * the following. */ struct efivar_operations { efi_get_variable_t *get_variable; efi_get_next_variable_t *get_next_variable; efi_set_variable_t *set_variable; efi_set_variable_t *set_variable_nonblocking; efi_query_variable_store_t *query_variable_store; }; struct efivars { struct kset *kset; struct kobject *kobject; const struct efivar_operations *ops; }; /* * The maximum size of VariableName + Data = 1024 * Therefore, it's reasonable to save that much * space in each part of the structure, * and we use a page for reading/writing. */ #define EFI_VAR_NAME_LEN 1024 struct efi_variable { efi_char16_t VariableName[EFI_VAR_NAME_LEN/sizeof(efi_char16_t)]; efi_guid_t VendorGuid; unsigned long DataSize; __u8 Data[1024]; efi_status_t Status; __u32 Attributes; } __attribute__((packed)); struct efivar_entry { struct efi_variable var; struct list_head list; struct kobject kobj; bool scanning; bool deleting; }; typedef struct { u32 reset; u32 output_string; u32 test_string; } efi_simple_text_output_protocol_32_t; typedef struct { u64 reset; u64 output_string; u64 test_string; } efi_simple_text_output_protocol_64_t; struct efi_simple_text_output_protocol { void *reset; efi_status_t (*output_string)(void *, void *); void *test_string; }; #define PIXEL_RGB_RESERVED_8BIT_PER_COLOR 0 #define PIXEL_BGR_RESERVED_8BIT_PER_COLOR 1 #define PIXEL_BIT_MASK 2 #define PIXEL_BLT_ONLY 3 #define PIXEL_FORMAT_MAX 4 struct efi_pixel_bitmask { u32 red_mask; u32 green_mask; u32 blue_mask; u32 reserved_mask; }; struct efi_graphics_output_mode_info { u32 version; u32 horizontal_resolution; u32 vertical_resolution; int pixel_format; struct efi_pixel_bitmask pixel_information; u32 pixels_per_scan_line; } __packed; struct efi_graphics_output_protocol_mode_32 { u32 max_mode; u32 mode; u32 info; u32 size_of_info; u64 frame_buffer_base; u32 frame_buffer_size; } __packed; struct efi_graphics_output_protocol_mode_64 { u32 max_mode; u32 mode; u64 info; u64 size_of_info; u64 frame_buffer_base; u64 frame_buffer_size; } __packed; struct efi_graphics_output_protocol_mode { u32 max_mode; u32 mode; unsigned long info; unsigned long size_of_info; u64 frame_buffer_base; unsigned long frame_buffer_size; } __packed; struct efi_graphics_output_protocol_32 { u32 query_mode; u32 set_mode; u32 blt; u32 mode; }; struct efi_graphics_output_protocol_64 { u64 query_mode; u64 set_mode; u64 blt; u64 mode; }; struct efi_graphics_output_protocol { unsigned long query_mode; unsigned long set_mode; unsigned long blt; struct efi_graphics_output_protocol_mode *mode; }; typedef efi_status_t (*efi_graphics_output_protocol_query_mode)( struct efi_graphics_output_protocol *, u32, unsigned long *, struct efi_graphics_output_mode_info **); extern struct list_head efivar_sysfs_list; static inline void efivar_unregister(struct efivar_entry *var) { kobject_put(&var->kobj); } int efivars_register(struct efivars *efivars, const struct efivar_operations *ops, struct kobject *kobject); int efivars_unregister(struct efivars *efivars); struct kobject *efivars_kobject(void); int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *), void *data, bool duplicates, struct list_head *head); int efivar_entry_add(struct efivar_entry *entry, struct list_head *head); int efivar_entry_remove(struct efivar_entry *entry); int __efivar_entry_delete(struct efivar_entry *entry); int efivar_entry_delete(struct efivar_entry *entry); int efivar_entry_size(struct efivar_entry *entry, unsigned long *size); int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes, unsigned long *size, void *data); int efivar_entry_get(struct efivar_entry *entry, u32 *attributes, unsigned long *size, void *data); int efivar_entry_set(struct efivar_entry *entry, u32 attributes, unsigned long size, void *data, struct list_head *head); int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes, unsigned long *size, void *data, bool *set); int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes, bool block, unsigned long size, void *data); int efivar_entry_iter_begin(void); void efivar_entry_iter_end(void); int __efivar_entry_iter(int (*func)(struct efivar_entry *, void *), struct list_head *head, void *data, struct efivar_entry **prev); int efivar_entry_iter(int (*func)(struct efivar_entry *, void *), struct list_head *head, void *data); struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid, struct list_head *head, bool remove); bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data, unsigned long data_size); bool efivar_variable_is_removable(efi_guid_t vendor, const char *name, size_t len); extern struct work_struct efivar_work; void efivar_run_worker(void); #if defined(CONFIG_EFI_VARS) || defined(CONFIG_EFI_VARS_MODULE) int efivars_sysfs_init(void); #define EFIVARS_DATA_SIZE_MAX 1024 #endif /* CONFIG_EFI_VARS */ extern bool efi_capsule_pending(int *reset_type); extern int efi_capsule_supported(efi_guid_t guid, u32 flags, size_t size, int *reset); extern int efi_capsule_update(efi_capsule_header_t *capsule, phys_addr_t *pages); #ifdef CONFIG_EFI_RUNTIME_MAP int efi_runtime_map_init(struct kobject *); int efi_get_runtime_map_size(void); int efi_get_runtime_map_desc_size(void); int efi_runtime_map_copy(void *buf, size_t bufsz); #else static inline int efi_runtime_map_init(struct kobject *kobj) { return 0; } static inline int efi_get_runtime_map_size(void) { return 0; } static inline int efi_get_runtime_map_desc_size(void) { return 0; } static inline int efi_runtime_map_copy(void *buf, size_t bufsz) { return 0; } #endif /* prototypes shared between arch specific and generic stub code */ void efi_printk(efi_system_table_t *sys_table_arg, char *str); void efi_free(efi_system_table_t *sys_table_arg, unsigned long size, unsigned long addr); char *efi_convert_cmdline(efi_system_table_t *sys_table_arg, efi_loaded_image_t *image, int *cmd_line_len); efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg, struct efi_boot_memmap *map); efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg, unsigned long size, unsigned long align, unsigned long *addr, unsigned long min); static inline efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg, unsigned long size, unsigned long align, unsigned long *addr) { /* * Don't allocate at 0x0. It will confuse code that * checks pointers against NULL. Skip the first 8 * bytes so we start at a nice even number. */ return efi_low_alloc_above(sys_table_arg, size, align, addr, 0x8); } efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg, unsigned long size, unsigned long align, unsigned long *addr, unsigned long max); efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg, unsigned long *image_addr, unsigned long image_size, unsigned long alloc_size, unsigned long preferred_addr, unsigned long alignment, unsigned long min_addr); efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg, efi_loaded_image_t *image, char *cmd_line, char *option_string, unsigned long max_addr, unsigned long *load_addr, unsigned long *load_size); efi_status_t efi_parse_options(char const *cmdline); efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg, struct screen_info *si, efi_guid_t *proto, unsigned long size); #ifdef CONFIG_EFI extern bool efi_runtime_disabled(void); #else static inline bool efi_runtime_disabled(void) { return true; } #endif extern void efi_call_virt_check_flags(unsigned long flags, const char *call); extern unsigned long efi_call_virt_save_flags(void); enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table); #ifdef CONFIG_RESET_ATTACK_MITIGATION void efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg); #else static inline void efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg) { } #endif void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table); /* * Arch code can implement the following three template macros, avoiding * reptition for the void/non-void return cases of {__,}efi_call_virt(): * * * arch_efi_call_virt_setup() * * Sets up the environment for the call (e.g. switching page tables, * allowing kernel-mode use of floating point, if required). * * * arch_efi_call_virt() * * Performs the call. The last expression in the macro must be the call * itself, allowing the logic to be shared by the void and non-void * cases. * * * arch_efi_call_virt_teardown() * * Restores the usual kernel environment once the call has returned. */ #define efi_call_virt_pointer(p, f, args...) \ ({ \ efi_status_t __s; \ unsigned long __flags; \ \ arch_efi_call_virt_setup(); \ \ __flags = efi_call_virt_save_flags(); \ __s = arch_efi_call_virt(p, f, args); \ efi_call_virt_check_flags(__flags, __stringify(f)); \ \ arch_efi_call_virt_teardown(); \ \ __s; \ }) #define __efi_call_virt_pointer(p, f, args...) \ ({ \ unsigned long __flags; \ \ arch_efi_call_virt_setup(); \ \ __flags = efi_call_virt_save_flags(); \ arch_efi_call_virt(p, f, args); \ efi_call_virt_check_flags(__flags, __stringify(f)); \ \ arch_efi_call_virt_teardown(); \ }) typedef efi_status_t (*efi_exit_boot_map_processing)( efi_system_table_t *sys_table_arg, struct efi_boot_memmap *map, void *priv); efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table, void *handle, struct efi_boot_memmap *map, void *priv, efi_exit_boot_map_processing priv_func); #define EFI_RANDOM_SEED_SIZE 32U // BLAKE2S_HASH_SIZE struct linux_efi_random_seed { u32 size; u8 bits[]; }; struct linux_efi_tpm_eventlog { u32 size; u32 final_events_preboot_size; u8 version; u8 log[]; }; extern int efi_tpm_eventlog_init(void); struct efi_tcg2_final_events_table { u64 version; u64 nr_events; u8 events[]; }; extern int efi_tpm_final_log_size; extern unsigned long rci2_table_phys; /* * efi_runtime_service() function identifiers. * "NONE" is used by efi_recover_from_page_fault() to check if the page * fault happened while executing an efi runtime service. */ enum efi_rts_ids { EFI_NONE, EFI_GET_TIME, EFI_SET_TIME, EFI_GET_WAKEUP_TIME, EFI_SET_WAKEUP_TIME, EFI_GET_VARIABLE, EFI_GET_NEXT_VARIABLE, EFI_SET_VARIABLE, EFI_QUERY_VARIABLE_INFO, EFI_GET_NEXT_HIGH_MONO_COUNT, EFI_RESET_SYSTEM, EFI_UPDATE_CAPSULE, EFI_QUERY_CAPSULE_CAPS, }; /* * efi_runtime_work: Details of EFI Runtime Service work * @arg<1-5>: EFI Runtime Service function arguments * @status: Status of executing EFI Runtime Service * @efi_rts_id: EFI Runtime Service function identifier * @efi_rts_comp: Struct used for handling completions */ struct efi_runtime_work { void *arg1; void *arg2; void *arg3; void *arg4; void *arg5; efi_status_t status; struct work_struct work; enum efi_rts_ids efi_rts_id; struct completion efi_rts_comp; }; extern struct efi_runtime_work efi_rts_work; /* Workqueue to queue EFI Runtime Services */ extern struct workqueue_struct *efi_rts_wq; struct linux_efi_memreserve { int size; // allocated size of the array atomic_t count; // number of entries used phys_addr_t next; // pa of next struct instance struct { phys_addr_t base; phys_addr_t size; } entry[0]; }; #define EFI_MEMRESERVE_SIZE(count) (sizeof(struct linux_efi_memreserve) + \ (count) * sizeof(((struct linux_efi_memreserve *)0)->entry[0])) #define EFI_MEMRESERVE_COUNT(size) (((size) - sizeof(struct linux_efi_memreserve)) \ / sizeof(((struct linux_efi_memreserve *)0)->entry[0])) /* * The LINUX_EFI_MOK_VARIABLE_TABLE_GUID config table can be provided * to the kernel by an EFI boot loader. The table contains a packed * sequence of these entries, one for each named MOK variable. * The sequence is terminated by an entry with a completely NULL * name and 0 data size. */ struct efi_mokvar_table_entry { char name[256]; u64 data_size; u8 data[]; } __attribute((packed)); #ifdef CONFIG_LOAD_UEFI_KEYS extern void __init efi_mokvar_table_init(void); extern struct efi_mokvar_table_entry *efi_mokvar_entry_next( struct efi_mokvar_table_entry **mokvar_entry); extern struct efi_mokvar_table_entry *efi_mokvar_entry_find(const char *name); #else static inline void efi_mokvar_table_init(void) { } static inline struct efi_mokvar_table_entry *efi_mokvar_entry_next( struct efi_mokvar_table_entry **mokvar_entry) { return NULL; } static inline struct efi_mokvar_table_entry *efi_mokvar_entry_find( const char *name) { return NULL; } #endif #endif /* _LINUX_EFI_H */ lzo.h 0000644 00000003070 14722070374 0005527 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LZO_H__ #define __LZO_H__ /* * LZO Public Kernel Interface * A mini subset of the LZO real-time data compression library * * Copyright (C) 1996-2012 Markus F.X.J. Oberhumer <markus@oberhumer.com> * * The full LZO package can be found at: * http://www.oberhumer.com/opensource/lzo/ * * Changed for Linux kernel use by: * Nitin Gupta <nitingupta910@gmail.com> * Richard Purdie <rpurdie@openedhand.com> */ #define LZO1X_1_MEM_COMPRESS (8192 * sizeof(unsigned short)) #define LZO1X_MEM_COMPRESS LZO1X_1_MEM_COMPRESS #define lzo1x_worst_compress(x) ((x) + ((x) / 16) + 64 + 3 + 2) /* This requires 'wrkmem' of size LZO1X_1_MEM_COMPRESS */ int lzo1x_1_compress(const unsigned char *src, size_t src_len, unsigned char *dst, size_t *dst_len, void *wrkmem); /* This requires 'wrkmem' of size LZO1X_1_MEM_COMPRESS */ int lzorle1x_1_compress(const unsigned char *src, size_t src_len, unsigned char *dst, size_t *dst_len, void *wrkmem); /* safe decompression with overrun testing */ int lzo1x_decompress_safe(const unsigned char *src, size_t src_len, unsigned char *dst, size_t *dst_len); /* * Return values (< 0 = Error) */ #define LZO_E_OK 0 #define LZO_E_ERROR (-1) #define LZO_E_OUT_OF_MEMORY (-2) #define LZO_E_NOT_COMPRESSIBLE (-3) #define LZO_E_INPUT_OVERRUN (-4) #define LZO_E_OUTPUT_OVERRUN (-5) #define LZO_E_LOOKBEHIND_OVERRUN (-6) #define LZO_E_EOF_NOT_FOUND (-7) #define LZO_E_INPUT_NOT_CONSUMED (-8) #define LZO_E_NOT_YET_IMPLEMENTED (-9) #define LZO_E_INVALID_ARGUMENT (-10) #endif agp_backend.h 0000644 00000006716 14722070374 0007153 0 ustar 00 /* * AGPGART backend specific includes. Not for userspace consumption. * * Copyright (C) 2004 Silicon Graphics, Inc. * Copyright (C) 2002-2003 Dave Jones * Copyright (C) 1999 Jeff Hartmann * Copyright (C) 1999 Precision Insight, Inc. * Copyright (C) 1999 Xi Graphics, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #ifndef _AGP_BACKEND_H #define _AGP_BACKEND_H 1 #include <linux/list.h> enum chipset_type { NOT_SUPPORTED, SUPPORTED, }; struct agp_version { u16 major; u16 minor; }; struct agp_kern_info { struct agp_version version; struct pci_dev *device; enum chipset_type chipset; unsigned long mode; unsigned long aper_base; size_t aper_size; int max_memory; /* In pages */ int current_memory; bool cant_use_aperture; unsigned long page_mask; const struct vm_operations_struct *vm_ops; }; /* * The agp_memory structure has information about the block of agp memory * allocated. A caller may manipulate the next and prev pointers to link * each allocated item into a list. These pointers are ignored by the backend. * Everything else should never be written to, but the caller may read any of * the items to determine the status of this block of agp memory. */ struct agp_bridge_data; struct agp_memory { struct agp_memory *next; struct agp_memory *prev; struct agp_bridge_data *bridge; struct page **pages; size_t page_count; int key; int num_scratch_pages; off_t pg_start; u32 type; u32 physical; bool is_bound; bool is_flushed; /* list of agp_memory mapped to the aperture */ struct list_head mapped_list; /* DMA-mapped addresses */ struct scatterlist *sg_list; int num_sg; }; #define AGP_NORMAL_MEMORY 0 #define AGP_USER_TYPES (1 << 16) #define AGP_USER_MEMORY (AGP_USER_TYPES) #define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1) extern struct agp_bridge_data *agp_bridge; extern struct list_head agp_bridges; extern struct agp_bridge_data *(*agp_find_bridge)(struct pci_dev *); extern void agp_free_memory(struct agp_memory *); extern struct agp_memory *agp_allocate_memory(struct agp_bridge_data *, size_t, u32); extern int agp_copy_info(struct agp_bridge_data *, struct agp_kern_info *); extern int agp_bind_memory(struct agp_memory *, off_t); extern int agp_unbind_memory(struct agp_memory *); extern void agp_enable(struct agp_bridge_data *, u32); extern struct agp_bridge_data *agp_backend_acquire(struct pci_dev *); extern void agp_backend_release(struct agp_bridge_data *); #endif /* _AGP_BACKEND_H */ fsverity.h 0000644 00000013743 14722070374 0006606 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * fs-verity: read-only file-based authenticity protection * * This header declares the interface between the fs/verity/ support layer and * filesystems that support fs-verity. * * Copyright 2019 Google LLC */ #ifndef _LINUX_FSVERITY_H #define _LINUX_FSVERITY_H #include <linux/fs.h> #include <uapi/linux/fsverity.h> /* Verity operations for filesystems */ struct fsverity_operations { /** * Begin enabling verity on the given file. * * @filp: a readonly file descriptor for the file * * The filesystem must do any needed filesystem-specific preparations * for enabling verity, e.g. evicting inline data. It also must return * -EBUSY if verity is already being enabled on the given file. * * i_rwsem is held for write. * * Return: 0 on success, -errno on failure */ int (*begin_enable_verity)(struct file *filp); /** * End enabling verity on the given file. * * @filp: a readonly file descriptor for the file * @desc: the verity descriptor to write, or NULL on failure * @desc_size: size of verity descriptor, or 0 on failure * @merkle_tree_size: total bytes the Merkle tree took up * * If desc == NULL, then enabling verity failed and the filesystem only * must do any necessary cleanups. Else, it must also store the given * verity descriptor to a fs-specific location associated with the inode * and do any fs-specific actions needed to mark the inode as a verity * inode, e.g. setting a bit in the on-disk inode. The filesystem is * also responsible for setting the S_VERITY flag in the VFS inode. * * i_rwsem is held for write, but it may have been dropped between * ->begin_enable_verity() and ->end_enable_verity(). * * Return: 0 on success, -errno on failure */ int (*end_enable_verity)(struct file *filp, const void *desc, size_t desc_size, u64 merkle_tree_size); /** * Get the verity descriptor of the given inode. * * @inode: an inode with the S_VERITY flag set * @buf: buffer in which to place the verity descriptor * @bufsize: size of @buf, or 0 to retrieve the size only * * If bufsize == 0, then the size of the verity descriptor is returned. * Otherwise the verity descriptor is written to 'buf' and its actual * size is returned; -ERANGE is returned if it's too large. This may be * called by multiple processes concurrently on the same inode. * * Return: the size on success, -errno on failure */ int (*get_verity_descriptor)(struct inode *inode, void *buf, size_t bufsize); /** * Read a Merkle tree page of the given inode. * * @inode: the inode * @index: 0-based index of the page within the Merkle tree * * This can be called at any time on an open verity file, as well as * between ->begin_enable_verity() and ->end_enable_verity(). It may be * called by multiple processes concurrently, even with the same page. * * Note that this must retrieve a *page*, not necessarily a *block*. * * Return: the page on success, ERR_PTR() on failure */ struct page *(*read_merkle_tree_page)(struct inode *inode, pgoff_t index); /** * Write a Merkle tree block to the given inode. * * @inode: the inode for which the Merkle tree is being built * @buf: block to write * @index: 0-based index of the block within the Merkle tree * @log_blocksize: log base 2 of the Merkle tree block size * * This is only called between ->begin_enable_verity() and * ->end_enable_verity(). * * Return: 0 on success, -errno on failure */ int (*write_merkle_tree_block)(struct inode *inode, const void *buf, u64 index, int log_blocksize); }; #ifdef CONFIG_FS_VERITY static inline struct fsverity_info *fsverity_get_info(const struct inode *inode) { /* pairs with the cmpxchg() in fsverity_set_info() */ return READ_ONCE(inode->i_verity_info); } /* enable.c */ extern int fsverity_ioctl_enable(struct file *filp, const void __user *arg); /* measure.c */ extern int fsverity_ioctl_measure(struct file *filp, void __user *arg); /* open.c */ extern int fsverity_file_open(struct inode *inode, struct file *filp); extern int fsverity_prepare_setattr(struct dentry *dentry, struct iattr *attr); extern void fsverity_cleanup_inode(struct inode *inode); /* verify.c */ extern bool fsverity_verify_page(struct page *page); extern void fsverity_verify_bio(struct bio *bio); extern void fsverity_enqueue_verify_work(struct work_struct *work); #else /* !CONFIG_FS_VERITY */ static inline struct fsverity_info *fsverity_get_info(const struct inode *inode) { return NULL; } /* enable.c */ static inline int fsverity_ioctl_enable(struct file *filp, const void __user *arg) { return -EOPNOTSUPP; } /* measure.c */ static inline int fsverity_ioctl_measure(struct file *filp, void __user *arg) { return -EOPNOTSUPP; } /* open.c */ static inline int fsverity_file_open(struct inode *inode, struct file *filp) { return IS_VERITY(inode) ? -EOPNOTSUPP : 0; } static inline int fsverity_prepare_setattr(struct dentry *dentry, struct iattr *attr) { return IS_VERITY(d_inode(dentry)) ? -EOPNOTSUPP : 0; } static inline void fsverity_cleanup_inode(struct inode *inode) { } /* verify.c */ static inline bool fsverity_verify_page(struct page *page) { WARN_ON(1); return false; } static inline void fsverity_verify_bio(struct bio *bio) { WARN_ON(1); } static inline void fsverity_enqueue_verify_work(struct work_struct *work) { WARN_ON(1); } #endif /* !CONFIG_FS_VERITY */ /** * fsverity_active() - do reads from the inode need to go through fs-verity? * * This checks whether ->i_verity_info has been set. * * Filesystems call this from ->readpages() to check whether the pages need to * be verified or not. Don't use IS_VERITY() for this purpose; it's subject to * a race condition where the file is being read concurrently with * FS_IOC_ENABLE_VERITY completing. (S_VERITY is set before ->i_verity_info.) */ static inline bool fsverity_active(const struct inode *inode) { return fsverity_get_info(inode) != NULL; } #endif /* _LINUX_FSVERITY_H */ mpage.h 0000644 00000001371 14722070374 0006016 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * include/linux/mpage.h * * Contains declarations related to preparing and submitting BIOS which contain * multiple pagecache pages. */ /* * (And no, it doesn't do the #ifdef __MPAGE_H thing, and it doesn't do * nested includes. Get it right in the .c file). */ #ifdef CONFIG_BLOCK struct writeback_control; int mpage_readpages(struct address_space *mapping, struct list_head *pages, unsigned nr_pages, get_block_t get_block); int mpage_readpage(struct page *page, get_block_t get_block); int mpage_writepages(struct address_space *mapping, struct writeback_control *wbc, get_block_t get_block); int mpage_writepage(struct page *page, get_block_t *get_block, struct writeback_control *wbc); #endif bvec.h 0000644 00000011337 14722070374 0005647 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * bvec iterator * * Copyright (C) 2001 Ming Lei <ming.lei@canonical.com> */ #ifndef __LINUX_BVEC_ITER_H #define __LINUX_BVEC_ITER_H #include <linux/kernel.h> #include <linux/bug.h> #include <linux/errno.h> #include <linux/mm.h> /* * was unsigned short, but we might as well be ready for > 64kB I/O pages */ struct bio_vec { struct page *bv_page; unsigned int bv_len; unsigned int bv_offset; }; struct bvec_iter { sector_t bi_sector; /* device address in 512 byte sectors */ unsigned int bi_size; /* residual I/O count */ unsigned int bi_idx; /* current index into bvl_vec */ unsigned int bi_bvec_done; /* number of bytes completed in current bvec */ }; struct bvec_iter_all { struct bio_vec bv; int idx; unsigned done; }; /* * various member access, note that bio_data should of course not be used * on highmem page vectors */ #define __bvec_iter_bvec(bvec, iter) (&(bvec)[(iter).bi_idx]) /* multi-page (mp_bvec) helpers */ #define mp_bvec_iter_page(bvec, iter) \ (__bvec_iter_bvec((bvec), (iter))->bv_page) #define mp_bvec_iter_len(bvec, iter) \ min((iter).bi_size, \ __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done) #define mp_bvec_iter_offset(bvec, iter) \ (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done) #define mp_bvec_iter_page_idx(bvec, iter) \ (mp_bvec_iter_offset((bvec), (iter)) / PAGE_SIZE) #define mp_bvec_iter_bvec(bvec, iter) \ ((struct bio_vec) { \ .bv_page = mp_bvec_iter_page((bvec), (iter)), \ .bv_len = mp_bvec_iter_len((bvec), (iter)), \ .bv_offset = mp_bvec_iter_offset((bvec), (iter)), \ }) /* For building single-page bvec in flight */ #define bvec_iter_offset(bvec, iter) \ (mp_bvec_iter_offset((bvec), (iter)) % PAGE_SIZE) #define bvec_iter_len(bvec, iter) \ min_t(unsigned, mp_bvec_iter_len((bvec), (iter)), \ PAGE_SIZE - bvec_iter_offset((bvec), (iter))) #define bvec_iter_page(bvec, iter) \ (mp_bvec_iter_page((bvec), (iter)) + \ mp_bvec_iter_page_idx((bvec), (iter))) #define bvec_iter_bvec(bvec, iter) \ ((struct bio_vec) { \ .bv_page = bvec_iter_page((bvec), (iter)), \ .bv_len = bvec_iter_len((bvec), (iter)), \ .bv_offset = bvec_iter_offset((bvec), (iter)), \ }) static inline bool bvec_iter_advance(const struct bio_vec *bv, struct bvec_iter *iter, unsigned bytes) { if (WARN_ONCE(bytes > iter->bi_size, "Attempted to advance past end of bvec iter\n")) { iter->bi_size = 0; return false; } while (bytes) { const struct bio_vec *cur = bv + iter->bi_idx; unsigned len = min3(bytes, iter->bi_size, cur->bv_len - iter->bi_bvec_done); bytes -= len; iter->bi_size -= len; iter->bi_bvec_done += len; if (iter->bi_bvec_done == cur->bv_len) { iter->bi_bvec_done = 0; iter->bi_idx++; } } return true; } static inline void bvec_iter_skip_zero_bvec(struct bvec_iter *iter) { iter->bi_bvec_done = 0; iter->bi_idx++; } #define for_each_bvec(bvl, bio_vec, iter, start) \ for (iter = (start); \ (iter).bi_size && \ ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \ (bvl).bv_len ? (void)bvec_iter_advance((bio_vec), &(iter), \ (bvl).bv_len) : bvec_iter_skip_zero_bvec(&(iter))) /* for iterating one bio from start to end */ #define BVEC_ITER_ALL_INIT (struct bvec_iter) \ { \ .bi_sector = 0, \ .bi_size = UINT_MAX, \ .bi_idx = 0, \ .bi_bvec_done = 0, \ } static inline struct bio_vec *bvec_init_iter_all(struct bvec_iter_all *iter_all) { iter_all->done = 0; iter_all->idx = 0; return &iter_all->bv; } static inline void bvec_advance(const struct bio_vec *bvec, struct bvec_iter_all *iter_all) { struct bio_vec *bv = &iter_all->bv; if (iter_all->done) { bv->bv_page++; bv->bv_offset = 0; } else { bv->bv_page = bvec->bv_page + (bvec->bv_offset >> PAGE_SHIFT); bv->bv_offset = bvec->bv_offset & ~PAGE_MASK; } bv->bv_len = min_t(unsigned int, PAGE_SIZE - bv->bv_offset, bvec->bv_len - iter_all->done); iter_all->done += bv->bv_len; if (iter_all->done == bvec->bv_len) { iter_all->idx++; iter_all->done = 0; } } /* * Get the last single-page segment from the multi-page bvec and store it * in @seg */ static inline void mp_bvec_last_segment(const struct bio_vec *bvec, struct bio_vec *seg) { unsigned total = bvec->bv_offset + bvec->bv_len; unsigned last_page = (total - 1) / PAGE_SIZE; seg->bv_page = bvec->bv_page + last_page; /* the whole segment is inside the last page */ if (bvec->bv_offset >= last_page * PAGE_SIZE) { seg->bv_offset = bvec->bv_offset % PAGE_SIZE; seg->bv_len = bvec->bv_len; } else { seg->bv_offset = 0; seg->bv_len = total - last_page * PAGE_SIZE; } } #endif /* __LINUX_BVEC_ITER_H */ thunderbolt.h 0000644 00000046362 14722070374 0007270 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Thunderbolt service API * * Copyright (C) 2014 Andreas Noever <andreas.noever@gmail.com> * Copyright (C) 2017, Intel Corporation * Authors: Michael Jamet <michael.jamet@intel.com> * Mika Westerberg <mika.westerberg@linux.intel.com> */ #ifndef THUNDERBOLT_H_ #define THUNDERBOLT_H_ #include <linux/device.h> #include <linux/idr.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/mod_devicetable.h> #include <linux/pci.h> #include <linux/uuid.h> #include <linux/workqueue.h> enum tb_cfg_pkg_type { TB_CFG_PKG_READ = 1, TB_CFG_PKG_WRITE = 2, TB_CFG_PKG_ERROR = 3, TB_CFG_PKG_NOTIFY_ACK = 4, TB_CFG_PKG_EVENT = 5, TB_CFG_PKG_XDOMAIN_REQ = 6, TB_CFG_PKG_XDOMAIN_RESP = 7, TB_CFG_PKG_OVERRIDE = 8, TB_CFG_PKG_RESET = 9, TB_CFG_PKG_ICM_EVENT = 10, TB_CFG_PKG_ICM_CMD = 11, TB_CFG_PKG_ICM_RESP = 12, TB_CFG_PKG_PREPARE_TO_SLEEP = 13, }; /** * enum tb_security_level - Thunderbolt security level * @TB_SECURITY_NONE: No security, legacy mode * @TB_SECURITY_USER: User approval required at minimum * @TB_SECURITY_SECURE: One time saved key required at minimum * @TB_SECURITY_DPONLY: Only tunnel Display port (and USB) * @TB_SECURITY_USBONLY: Only tunnel USB controller of the connected * Thunderbolt dock (and Display Port). All PCIe * links downstream of the dock are removed. */ enum tb_security_level { TB_SECURITY_NONE, TB_SECURITY_USER, TB_SECURITY_SECURE, TB_SECURITY_DPONLY, TB_SECURITY_USBONLY, }; /** * struct tb - main thunderbolt bus structure * @dev: Domain device * @lock: Big lock. Must be held when accessing any struct * tb_switch / struct tb_port. * @nhi: Pointer to the NHI structure * @ctl: Control channel for this domain * @wq: Ordered workqueue for all domain specific work * @root_switch: Root switch of this domain * @cm_ops: Connection manager specific operations vector * @index: Linux assigned domain number * @security_level: Current security level * @nboot_acl: Number of boot ACLs the domain supports * @privdata: Private connection manager specific data */ struct tb { struct device dev; struct mutex lock; struct tb_nhi *nhi; struct tb_ctl *ctl; struct workqueue_struct *wq; struct tb_switch *root_switch; const struct tb_cm_ops *cm_ops; int index; enum tb_security_level security_level; size_t nboot_acl; unsigned long privdata[0]; }; extern struct bus_type tb_bus_type; extern struct device_type tb_service_type; extern struct device_type tb_xdomain_type; #define TB_LINKS_PER_PHY_PORT 2 static inline unsigned int tb_phy_port_from_link(unsigned int link) { return (link - 1) / TB_LINKS_PER_PHY_PORT; } /** * struct tb_property_dir - XDomain property directory * @uuid: Directory UUID or %NULL if root directory * @properties: List of properties in this directory * * User needs to provide serialization if needed. */ struct tb_property_dir { const uuid_t *uuid; struct list_head properties; }; enum tb_property_type { TB_PROPERTY_TYPE_UNKNOWN = 0x00, TB_PROPERTY_TYPE_DIRECTORY = 0x44, TB_PROPERTY_TYPE_DATA = 0x64, TB_PROPERTY_TYPE_TEXT = 0x74, TB_PROPERTY_TYPE_VALUE = 0x76, }; #define TB_PROPERTY_KEY_SIZE 8 /** * struct tb_property - XDomain property * @list: Used to link properties together in a directory * @key: Key for the property (always terminated). * @type: Type of the property * @length: Length of the property data in dwords * @value: Property value * * Users use @type to determine which field in @value is filled. */ struct tb_property { struct list_head list; char key[TB_PROPERTY_KEY_SIZE + 1]; enum tb_property_type type; size_t length; union { struct tb_property_dir *dir; u8 *data; char *text; u32 immediate; } value; }; struct tb_property_dir *tb_property_parse_dir(const u32 *block, size_t block_len); ssize_t tb_property_format_dir(const struct tb_property_dir *dir, u32 *block, size_t block_len); struct tb_property_dir *tb_property_create_dir(const uuid_t *uuid); void tb_property_free_dir(struct tb_property_dir *dir); int tb_property_add_immediate(struct tb_property_dir *parent, const char *key, u32 value); int tb_property_add_data(struct tb_property_dir *parent, const char *key, const void *buf, size_t buflen); int tb_property_add_text(struct tb_property_dir *parent, const char *key, const char *text); int tb_property_add_dir(struct tb_property_dir *parent, const char *key, struct tb_property_dir *dir); void tb_property_remove(struct tb_property *tb_property); struct tb_property *tb_property_find(struct tb_property_dir *dir, const char *key, enum tb_property_type type); struct tb_property *tb_property_get_next(struct tb_property_dir *dir, struct tb_property *prev); #define tb_property_for_each(dir, property) \ for (property = tb_property_get_next(dir, NULL); \ property; \ property = tb_property_get_next(dir, property)) int tb_register_property_dir(const char *key, struct tb_property_dir *dir); void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir); /** * struct tb_xdomain - Cross-domain (XDomain) connection * @dev: XDomain device * @tb: Pointer to the domain * @remote_uuid: UUID of the remote domain (host) * @local_uuid: Cached local UUID * @route: Route string the other domain can be reached * @vendor: Vendor ID of the remote domain * @device: Device ID of the demote domain * @lock: Lock to serialize access to the following fields of this structure * @vendor_name: Name of the vendor (or %NULL if not known) * @device_name: Name of the device (or %NULL if not known) * @is_unplugged: The XDomain is unplugged * @resume: The XDomain is being resumed * @needs_uuid: If the XDomain does not have @remote_uuid it will be * queried first * @transmit_path: HopID which the remote end expects us to transmit * @transmit_ring: Local ring (hop) where outgoing packets are pushed * @receive_path: HopID which we expect the remote end to transmit * @receive_ring: Local ring (hop) where incoming packets arrive * @service_ids: Used to generate IDs for the services * @properties: Properties exported by the remote domain * @property_block_gen: Generation of @properties * @properties_lock: Lock protecting @properties. * @get_uuid_work: Work used to retrieve @remote_uuid * @uuid_retries: Number of times left @remote_uuid is requested before * giving up * @get_properties_work: Work used to get remote domain properties * @properties_retries: Number of times left to read properties * @properties_changed_work: Work used to notify the remote domain that * our properties have changed * @properties_changed_retries: Number of times left to send properties * changed notification * @link: Root switch link the remote domain is connected (ICM only) * @depth: Depth in the chain the remote domain is connected (ICM only) * * This structure represents connection across two domains (hosts). * Each XDomain contains zero or more services which are exposed as * &struct tb_service objects. * * Service drivers may access this structure if they need to enumerate * non-standard properties but they need hold @lock when doing so * because properties can be changed asynchronously in response to * changes in the remote domain. */ struct tb_xdomain { struct device dev; struct tb *tb; uuid_t *remote_uuid; const uuid_t *local_uuid; u64 route; u16 vendor; u16 device; struct mutex lock; const char *vendor_name; const char *device_name; bool is_unplugged; bool resume; bool needs_uuid; u16 transmit_path; u16 transmit_ring; u16 receive_path; u16 receive_ring; struct ida service_ids; struct tb_property_dir *properties; u32 property_block_gen; struct delayed_work get_uuid_work; int uuid_retries; struct delayed_work get_properties_work; int properties_retries; struct delayed_work properties_changed_work; int properties_changed_retries; u8 link; u8 depth; }; int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path, u16 transmit_ring, u16 receive_path, u16 receive_ring); int tb_xdomain_disable_paths(struct tb_xdomain *xd); struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid); struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route); static inline struct tb_xdomain * tb_xdomain_find_by_uuid_locked(struct tb *tb, const uuid_t *uuid) { struct tb_xdomain *xd; mutex_lock(&tb->lock); xd = tb_xdomain_find_by_uuid(tb, uuid); mutex_unlock(&tb->lock); return xd; } static inline struct tb_xdomain * tb_xdomain_find_by_route_locked(struct tb *tb, u64 route) { struct tb_xdomain *xd; mutex_lock(&tb->lock); xd = tb_xdomain_find_by_route(tb, route); mutex_unlock(&tb->lock); return xd; } static inline struct tb_xdomain *tb_xdomain_get(struct tb_xdomain *xd) { if (xd) get_device(&xd->dev); return xd; } static inline void tb_xdomain_put(struct tb_xdomain *xd) { if (xd) put_device(&xd->dev); } static inline bool tb_is_xdomain(const struct device *dev) { return dev->type == &tb_xdomain_type; } static inline struct tb_xdomain *tb_to_xdomain(struct device *dev) { if (tb_is_xdomain(dev)) return container_of(dev, struct tb_xdomain, dev); return NULL; } int tb_xdomain_response(struct tb_xdomain *xd, const void *response, size_t size, enum tb_cfg_pkg_type type); int tb_xdomain_request(struct tb_xdomain *xd, const void *request, size_t request_size, enum tb_cfg_pkg_type request_type, void *response, size_t response_size, enum tb_cfg_pkg_type response_type, unsigned int timeout_msec); /** * tb_protocol_handler - Protocol specific handler * @uuid: XDomain messages with this UUID are dispatched to this handler * @callback: Callback called with the XDomain message. Returning %1 * here tells the XDomain core that the message was handled * by this handler and should not be forwared to other * handlers. * @data: Data passed with the callback * @list: Handlers are linked using this * * Thunderbolt services can hook into incoming XDomain requests by * registering protocol handler. Only limitation is that the XDomain * discovery protocol UUID cannot be registered since it is handled by * the core XDomain code. * * The @callback must check that the message is really directed to the * service the driver implements. */ struct tb_protocol_handler { const uuid_t *uuid; int (*callback)(const void *buf, size_t size, void *data); void *data; struct list_head list; }; int tb_register_protocol_handler(struct tb_protocol_handler *handler); void tb_unregister_protocol_handler(struct tb_protocol_handler *handler); /** * struct tb_service - Thunderbolt service * @dev: XDomain device * @id: ID of the service (shown in sysfs) * @key: Protocol key from the properties directory * @prtcid: Protocol ID from the properties directory * @prtcvers: Protocol version from the properties directory * @prtcrevs: Protocol software revision from the properties directory * @prtcstns: Protocol settings mask from the properties directory * * Each domain exposes set of services it supports as collection of * properties. For each service there will be one corresponding * &struct tb_service. Service drivers are bound to these. */ struct tb_service { struct device dev; int id; const char *key; u32 prtcid; u32 prtcvers; u32 prtcrevs; u32 prtcstns; }; static inline struct tb_service *tb_service_get(struct tb_service *svc) { if (svc) get_device(&svc->dev); return svc; } static inline void tb_service_put(struct tb_service *svc) { if (svc) put_device(&svc->dev); } static inline bool tb_is_service(const struct device *dev) { return dev->type == &tb_service_type; } static inline struct tb_service *tb_to_service(struct device *dev) { if (tb_is_service(dev)) return container_of(dev, struct tb_service, dev); return NULL; } /** * tb_service_driver - Thunderbolt service driver * @driver: Driver structure * @probe: Called when the driver is probed * @remove: Called when the driver is removed (optional) * @shutdown: Called at shutdown time to stop the service (optional) * @id_table: Table of service identifiers the driver supports */ struct tb_service_driver { struct device_driver driver; int (*probe)(struct tb_service *svc, const struct tb_service_id *id); void (*remove)(struct tb_service *svc); void (*shutdown)(struct tb_service *svc); const struct tb_service_id *id_table; }; #define TB_SERVICE(key, id) \ .match_flags = TBSVC_MATCH_PROTOCOL_KEY | \ TBSVC_MATCH_PROTOCOL_ID, \ .protocol_key = (key), \ .protocol_id = (id) int tb_register_service_driver(struct tb_service_driver *drv); void tb_unregister_service_driver(struct tb_service_driver *drv); static inline void *tb_service_get_drvdata(const struct tb_service *svc) { return dev_get_drvdata(&svc->dev); } static inline void tb_service_set_drvdata(struct tb_service *svc, void *data) { dev_set_drvdata(&svc->dev, data); } static inline struct tb_xdomain *tb_service_parent(struct tb_service *svc) { return tb_to_xdomain(svc->dev.parent); } /** * struct tb_nhi - thunderbolt native host interface * @lock: Must be held during ring creation/destruction. Is acquired by * interrupt_work when dispatching interrupts to individual rings. * @pdev: Pointer to the PCI device * @ops: NHI specific optional ops * @iobase: MMIO space of the NHI * @tx_rings: All Tx rings available on this host controller * @rx_rings: All Rx rings available on this host controller * @msix_ida: Used to allocate MSI-X vectors for rings * @going_away: The host controller device is about to disappear so when * this flag is set, avoid touching the hardware anymore. * @interrupt_work: Work scheduled to handle ring interrupt when no * MSI-X is used. * @hop_count: Number of rings (end point hops) supported by NHI. */ struct tb_nhi { spinlock_t lock; struct pci_dev *pdev; const struct tb_nhi_ops *ops; void __iomem *iobase; struct tb_ring **tx_rings; struct tb_ring **rx_rings; struct ida msix_ida; bool going_away; struct work_struct interrupt_work; u32 hop_count; }; /** * struct tb_ring - thunderbolt TX or RX ring associated with a NHI * @lock: Lock serializing actions to this ring. Must be acquired after * nhi->lock. * @nhi: Pointer to the native host controller interface * @size: Size of the ring * @hop: Hop (DMA channel) associated with this ring * @head: Head of the ring (write next descriptor here) * @tail: Tail of the ring (complete next descriptor here) * @descriptors: Allocated descriptors for this ring * @queue: Queue holding frames to be transferred over this ring * @in_flight: Queue holding frames that are currently in flight * @work: Interrupt work structure * @is_tx: Is the ring Tx or Rx * @running: Is the ring running * @irq: MSI-X irq number if the ring uses MSI-X. %0 otherwise. * @vector: MSI-X vector number the ring uses (only set if @irq is > 0) * @flags: Ring specific flags * @sof_mask: Bit mask used to detect start of frame PDF * @eof_mask: Bit mask used to detect end of frame PDF * @start_poll: Called when ring interrupt is triggered to start * polling. Passing %NULL keeps the ring in interrupt mode. * @poll_data: Data passed to @start_poll */ struct tb_ring { spinlock_t lock; struct tb_nhi *nhi; int size; int hop; int head; int tail; struct ring_desc *descriptors; dma_addr_t descriptors_dma; struct list_head queue; struct list_head in_flight; struct work_struct work; bool is_tx:1; bool running:1; int irq; u8 vector; unsigned int flags; u16 sof_mask; u16 eof_mask; void (*start_poll)(void *data); void *poll_data; }; /* Leave ring interrupt enabled on suspend */ #define RING_FLAG_NO_SUSPEND BIT(0) /* Configure the ring to be in frame mode */ #define RING_FLAG_FRAME BIT(1) /* Enable end-to-end flow control */ #define RING_FLAG_E2E BIT(2) struct ring_frame; typedef void (*ring_cb)(struct tb_ring *, struct ring_frame *, bool canceled); /** * enum ring_desc_flags - Flags for DMA ring descriptor * %RING_DESC_ISOCH: Enable isonchronous DMA (Tx only) * %RING_DESC_CRC_ERROR: In frame mode CRC check failed for the frame (Rx only) * %RING_DESC_COMPLETED: Descriptor completed (set by NHI) * %RING_DESC_POSTED: Always set this * %RING_DESC_BUFFER_OVERRUN: RX buffer overrun * %RING_DESC_INTERRUPT: Request an interrupt on completion */ enum ring_desc_flags { RING_DESC_ISOCH = 0x1, RING_DESC_CRC_ERROR = 0x1, RING_DESC_COMPLETED = 0x2, RING_DESC_POSTED = 0x4, RING_DESC_BUFFER_OVERRUN = 0x04, RING_DESC_INTERRUPT = 0x8, }; /** * struct ring_frame - For use with ring_rx/ring_tx * @buffer_phy: DMA mapped address of the frame * @callback: Callback called when the frame is finished (optional) * @list: Frame is linked to a queue using this * @size: Size of the frame in bytes (%0 means %4096) * @flags: Flags for the frame (see &enum ring_desc_flags) * @eof: End of frame protocol defined field * @sof: Start of frame protocol defined field */ struct ring_frame { dma_addr_t buffer_phy; ring_cb callback; struct list_head list; u32 size:12; u32 flags:12; u32 eof:4; u32 sof:4; }; /* Minimum size for ring_rx */ #define TB_FRAME_SIZE 0x100 struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size, unsigned int flags); struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size, unsigned int flags, u16 sof_mask, u16 eof_mask, void (*start_poll)(void *), void *poll_data); void tb_ring_start(struct tb_ring *ring); void tb_ring_stop(struct tb_ring *ring); void tb_ring_free(struct tb_ring *ring); int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame); /** * tb_ring_rx() - enqueue a frame on an RX ring * @ring: Ring to enqueue the frame * @frame: Frame to enqueue * * @frame->buffer, @frame->buffer_phy have to be set. The buffer must * contain at least %TB_FRAME_SIZE bytes. * * @frame->callback will be invoked with @frame->size, @frame->flags, * @frame->eof, @frame->sof set once the frame has been received. * * If ring_stop() is called after the packet has been enqueued * @frame->callback will be called with canceled set to true. * * Return: Returns %-ESHUTDOWN if ring_stop has been called. Zero otherwise. */ static inline int tb_ring_rx(struct tb_ring *ring, struct ring_frame *frame) { WARN_ON(ring->is_tx); return __tb_ring_enqueue(ring, frame); } /** * tb_ring_tx() - enqueue a frame on an TX ring * @ring: Ring the enqueue the frame * @frame: Frame to enqueue * * @frame->buffer, @frame->buffer_phy, @frame->size, @frame->eof and * @frame->sof have to be set. * * @frame->callback will be invoked with once the frame has been transmitted. * * If ring_stop() is called after the packet has been enqueued @frame->callback * will be called with canceled set to true. * * Return: Returns %-ESHUTDOWN if ring_stop has been called. Zero otherwise. */ static inline int tb_ring_tx(struct tb_ring *ring, struct ring_frame *frame) { WARN_ON(!ring->is_tx); return __tb_ring_enqueue(ring, frame); } /* Used only when the ring is in polling mode */ struct ring_frame *tb_ring_poll(struct tb_ring *ring); void tb_ring_poll_complete(struct tb_ring *ring); /** * tb_ring_dma_device() - Return device used for DMA mapping * @ring: Ring whose DMA device is retrieved * * Use this function when you are mapping DMA for buffers that are * passed to the ring for sending/receiving. */ static inline struct device *tb_ring_dma_device(struct tb_ring *ring) { return &ring->nhi->pdev->dev; } #endif /* THUNDERBOLT_H_ */ textsearch_fsm.h 0000644 00000002276 14722070374 0007751 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_TEXTSEARCH_FSM_H #define __LINUX_TEXTSEARCH_FSM_H #include <linux/types.h> enum { TS_FSM_SPECIFIC, /* specific character */ TS_FSM_WILDCARD, /* any character */ TS_FSM_DIGIT, /* isdigit() */ TS_FSM_XDIGIT, /* isxdigit() */ TS_FSM_PRINT, /* isprint() */ TS_FSM_ALPHA, /* isalpha() */ TS_FSM_ALNUM, /* isalnum() */ TS_FSM_ASCII, /* isascii() */ TS_FSM_CNTRL, /* iscntrl() */ TS_FSM_GRAPH, /* isgraph() */ TS_FSM_LOWER, /* islower() */ TS_FSM_UPPER, /* isupper() */ TS_FSM_PUNCT, /* ispunct() */ TS_FSM_SPACE, /* isspace() */ __TS_FSM_TYPE_MAX, }; #define TS_FSM_TYPE_MAX (__TS_FSM_TYPE_MAX - 1) enum { TS_FSM_SINGLE, /* 1 occurrence */ TS_FSM_PERHAPS, /* 1 or 0 occurrence */ TS_FSM_ANY, /* 0..n occurrences */ TS_FSM_MULTI, /* 1..n occurrences */ TS_FSM_HEAD_IGNORE, /* 0..n ignored occurrences at head */ __TS_FSM_RECUR_MAX, }; #define TS_FSM_RECUR_MAX (__TS_FSM_RECUR_MAX - 1) /** * struct ts_fsm_token - state machine token (state) * @type: type of token * @recur: number of recurrences * @value: character value for TS_FSM_SPECIFIC */ struct ts_fsm_token { __u16 type; __u8 recur; __u8 value; }; #endif hid.h 0000644 00000111621 14722070374 0005471 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2001 Vojtech Pavlik * Copyright (c) 2006-2007 Jiri Kosina */ /* * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #ifndef __HID_H #define __HID_H #include <linux/bitops.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/mod_devicetable.h> /* hid_device_id */ #include <linux/timer.h> #include <linux/workqueue.h> #include <linux/input.h> #include <linux/semaphore.h> #include <linux/mutex.h> #include <linux/power_supply.h> #include <uapi/linux/hid.h> /* * We parse each description item into this structure. Short items data * values are expanded to 32-bit signed int, long items contain a pointer * into the data area. */ struct hid_item { unsigned format; __u8 size; __u8 type; __u8 tag; union { __u8 u8; __s8 s8; __u16 u16; __s16 s16; __u32 u32; __s32 s32; __u8 *longdata; } data; }; /* * HID report item format */ #define HID_ITEM_FORMAT_SHORT 0 #define HID_ITEM_FORMAT_LONG 1 /* * Special tag indicating long items */ #define HID_ITEM_TAG_LONG 15 /* * HID report descriptor item type (prefix bit 2,3) */ #define HID_ITEM_TYPE_MAIN 0 #define HID_ITEM_TYPE_GLOBAL 1 #define HID_ITEM_TYPE_LOCAL 2 #define HID_ITEM_TYPE_RESERVED 3 /* * HID report descriptor main item tags */ #define HID_MAIN_ITEM_TAG_INPUT 8 #define HID_MAIN_ITEM_TAG_OUTPUT 9 #define HID_MAIN_ITEM_TAG_FEATURE 11 #define HID_MAIN_ITEM_TAG_BEGIN_COLLECTION 10 #define HID_MAIN_ITEM_TAG_END_COLLECTION 12 /* * HID report descriptor main item contents */ #define HID_MAIN_ITEM_CONSTANT 0x001 #define HID_MAIN_ITEM_VARIABLE 0x002 #define HID_MAIN_ITEM_RELATIVE 0x004 #define HID_MAIN_ITEM_WRAP 0x008 #define HID_MAIN_ITEM_NONLINEAR 0x010 #define HID_MAIN_ITEM_NO_PREFERRED 0x020 #define HID_MAIN_ITEM_NULL_STATE 0x040 #define HID_MAIN_ITEM_VOLATILE 0x080 #define HID_MAIN_ITEM_BUFFERED_BYTE 0x100 /* * HID report descriptor collection item types */ #define HID_COLLECTION_PHYSICAL 0 #define HID_COLLECTION_APPLICATION 1 #define HID_COLLECTION_LOGICAL 2 /* * HID report descriptor global item tags */ #define HID_GLOBAL_ITEM_TAG_USAGE_PAGE 0 #define HID_GLOBAL_ITEM_TAG_LOGICAL_MINIMUM 1 #define HID_GLOBAL_ITEM_TAG_LOGICAL_MAXIMUM 2 #define HID_GLOBAL_ITEM_TAG_PHYSICAL_MINIMUM 3 #define HID_GLOBAL_ITEM_TAG_PHYSICAL_MAXIMUM 4 #define HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT 5 #define HID_GLOBAL_ITEM_TAG_UNIT 6 #define HID_GLOBAL_ITEM_TAG_REPORT_SIZE 7 #define HID_GLOBAL_ITEM_TAG_REPORT_ID 8 #define HID_GLOBAL_ITEM_TAG_REPORT_COUNT 9 #define HID_GLOBAL_ITEM_TAG_PUSH 10 #define HID_GLOBAL_ITEM_TAG_POP 11 /* * HID report descriptor local item tags */ #define HID_LOCAL_ITEM_TAG_USAGE 0 #define HID_LOCAL_ITEM_TAG_USAGE_MINIMUM 1 #define HID_LOCAL_ITEM_TAG_USAGE_MAXIMUM 2 #define HID_LOCAL_ITEM_TAG_DESIGNATOR_INDEX 3 #define HID_LOCAL_ITEM_TAG_DESIGNATOR_MINIMUM 4 #define HID_LOCAL_ITEM_TAG_DESIGNATOR_MAXIMUM 5 #define HID_LOCAL_ITEM_TAG_STRING_INDEX 7 #define HID_LOCAL_ITEM_TAG_STRING_MINIMUM 8 #define HID_LOCAL_ITEM_TAG_STRING_MAXIMUM 9 #define HID_LOCAL_ITEM_TAG_DELIMITER 10 /* * HID usage tables */ #define HID_USAGE_PAGE 0xffff0000 #define HID_UP_UNDEFINED 0x00000000 #define HID_UP_GENDESK 0x00010000 #define HID_UP_SIMULATION 0x00020000 #define HID_UP_GENDEVCTRLS 0x00060000 #define HID_UP_KEYBOARD 0x00070000 #define HID_UP_LED 0x00080000 #define HID_UP_BUTTON 0x00090000 #define HID_UP_ORDINAL 0x000a0000 #define HID_UP_TELEPHONY 0x000b0000 #define HID_UP_CONSUMER 0x000c0000 #define HID_UP_DIGITIZER 0x000d0000 #define HID_UP_PID 0x000f0000 #define HID_UP_HPVENDOR 0xff7f0000 #define HID_UP_HPVENDOR2 0xff010000 #define HID_UP_MSVENDOR 0xff000000 #define HID_UP_CUSTOM 0x00ff0000 #define HID_UP_LOGIVENDOR 0xffbc0000 #define HID_UP_LOGIVENDOR2 0xff090000 #define HID_UP_LOGIVENDOR3 0xff430000 #define HID_UP_LNVENDOR 0xffa00000 #define HID_UP_SENSOR 0x00200000 #define HID_UP_ASUSVENDOR 0xff310000 #define HID_USAGE 0x0000ffff #define HID_GD_POINTER 0x00010001 #define HID_GD_MOUSE 0x00010002 #define HID_GD_JOYSTICK 0x00010004 #define HID_GD_GAMEPAD 0x00010005 #define HID_GD_KEYBOARD 0x00010006 #define HID_GD_KEYPAD 0x00010007 #define HID_GD_MULTIAXIS 0x00010008 /* * Microsoft Win8 Wireless Radio Controls extensions CA, see: * http://www.usb.org/developers/hidpage/HUTRR40RadioHIDUsagesFinal.pdf */ #define HID_GD_WIRELESS_RADIO_CTLS 0x0001000c /* * System Multi-Axis, see: * http://www.usb.org/developers/hidpage/HUTRR62_-_Generic_Desktop_CA_for_System_Multi-Axis_Controllers.txt */ #define HID_GD_SYSTEM_MULTIAXIS 0x0001000e #define HID_GD_X 0x00010030 #define HID_GD_Y 0x00010031 #define HID_GD_Z 0x00010032 #define HID_GD_RX 0x00010033 #define HID_GD_RY 0x00010034 #define HID_GD_RZ 0x00010035 #define HID_GD_SLIDER 0x00010036 #define HID_GD_DIAL 0x00010037 #define HID_GD_WHEEL 0x00010038 #define HID_GD_HATSWITCH 0x00010039 #define HID_GD_BUFFER 0x0001003a #define HID_GD_BYTECOUNT 0x0001003b #define HID_GD_MOTION 0x0001003c #define HID_GD_START 0x0001003d #define HID_GD_SELECT 0x0001003e #define HID_GD_VX 0x00010040 #define HID_GD_VY 0x00010041 #define HID_GD_VZ 0x00010042 #define HID_GD_VBRX 0x00010043 #define HID_GD_VBRY 0x00010044 #define HID_GD_VBRZ 0x00010045 #define HID_GD_VNO 0x00010046 #define HID_GD_FEATURE 0x00010047 #define HID_GD_RESOLUTION_MULTIPLIER 0x00010048 #define HID_GD_SYSTEM_CONTROL 0x00010080 #define HID_GD_UP 0x00010090 #define HID_GD_DOWN 0x00010091 #define HID_GD_RIGHT 0x00010092 #define HID_GD_LEFT 0x00010093 /* Microsoft Win8 Wireless Radio Controls CA usage codes */ #define HID_GD_RFKILL_BTN 0x000100c6 #define HID_GD_RFKILL_LED 0x000100c7 #define HID_GD_RFKILL_SWITCH 0x000100c8 #define HID_DC_BATTERYSTRENGTH 0x00060020 #define HID_CP_CONSUMER_CONTROL 0x000c0001 #define HID_CP_AC_PAN 0x000c0238 #define HID_DG_DIGITIZER 0x000d0001 #define HID_DG_PEN 0x000d0002 #define HID_DG_LIGHTPEN 0x000d0003 #define HID_DG_TOUCHSCREEN 0x000d0004 #define HID_DG_TOUCHPAD 0x000d0005 #define HID_DG_WHITEBOARD 0x000d0006 #define HID_DG_STYLUS 0x000d0020 #define HID_DG_PUCK 0x000d0021 #define HID_DG_FINGER 0x000d0022 #define HID_DG_TIPPRESSURE 0x000d0030 #define HID_DG_BARRELPRESSURE 0x000d0031 #define HID_DG_INRANGE 0x000d0032 #define HID_DG_TOUCH 0x000d0033 #define HID_DG_UNTOUCH 0x000d0034 #define HID_DG_TAP 0x000d0035 #define HID_DG_TABLETFUNCTIONKEY 0x000d0039 #define HID_DG_PROGRAMCHANGEKEY 0x000d003a #define HID_DG_BATTERYSTRENGTH 0x000d003b #define HID_DG_INVERT 0x000d003c #define HID_DG_TILT_X 0x000d003d #define HID_DG_TILT_Y 0x000d003e #define HID_DG_TWIST 0x000d0041 #define HID_DG_TIPSWITCH 0x000d0042 #define HID_DG_TIPSWITCH2 0x000d0043 #define HID_DG_BARRELSWITCH 0x000d0044 #define HID_DG_ERASER 0x000d0045 #define HID_DG_TABLETPICK 0x000d0046 #define HID_CP_CONSUMERCONTROL 0x000c0001 #define HID_CP_NUMERICKEYPAD 0x000c0002 #define HID_CP_PROGRAMMABLEBUTTONS 0x000c0003 #define HID_CP_MICROPHONE 0x000c0004 #define HID_CP_HEADPHONE 0x000c0005 #define HID_CP_GRAPHICEQUALIZER 0x000c0006 #define HID_CP_FUNCTIONBUTTONS 0x000c0036 #define HID_CP_SELECTION 0x000c0080 #define HID_CP_MEDIASELECTION 0x000c0087 #define HID_CP_SELECTDISC 0x000c00ba #define HID_CP_VOLUMEUP 0x000c00e9 #define HID_CP_VOLUMEDOWN 0x000c00ea #define HID_CP_PLAYBACKSPEED 0x000c00f1 #define HID_CP_PROXIMITY 0x000c0109 #define HID_CP_SPEAKERSYSTEM 0x000c0160 #define HID_CP_CHANNELLEFT 0x000c0161 #define HID_CP_CHANNELRIGHT 0x000c0162 #define HID_CP_CHANNELCENTER 0x000c0163 #define HID_CP_CHANNELFRONT 0x000c0164 #define HID_CP_CHANNELCENTERFRONT 0x000c0165 #define HID_CP_CHANNELSIDE 0x000c0166 #define HID_CP_CHANNELSURROUND 0x000c0167 #define HID_CP_CHANNELLOWFREQUENCYENHANCEMENT 0x000c0168 #define HID_CP_CHANNELTOP 0x000c0169 #define HID_CP_CHANNELUNKNOWN 0x000c016a #define HID_CP_APPLICATIONLAUNCHBUTTONS 0x000c0180 #define HID_CP_GENERICGUIAPPLICATIONCONTROLS 0x000c0200 #define HID_DG_DEVICECONFIG 0x000d000e #define HID_DG_DEVICESETTINGS 0x000d0023 #define HID_DG_AZIMUTH 0x000d003f #define HID_DG_CONFIDENCE 0x000d0047 #define HID_DG_WIDTH 0x000d0048 #define HID_DG_HEIGHT 0x000d0049 #define HID_DG_CONTACTID 0x000d0051 #define HID_DG_INPUTMODE 0x000d0052 #define HID_DG_DEVICEINDEX 0x000d0053 #define HID_DG_CONTACTCOUNT 0x000d0054 #define HID_DG_CONTACTMAX 0x000d0055 #define HID_DG_SCANTIME 0x000d0056 #define HID_DG_SURFACESWITCH 0x000d0057 #define HID_DG_BUTTONSWITCH 0x000d0058 #define HID_DG_BUTTONTYPE 0x000d0059 #define HID_DG_BARRELSWITCH2 0x000d005a #define HID_DG_TOOLSERIALNUMBER 0x000d005b #define HID_DG_LATENCYMODE 0x000d0060 #define HID_VD_ASUS_CUSTOM_MEDIA_KEYS 0xff310076 /* * HID report types --- Ouch! HID spec says 1 2 3! */ #define HID_INPUT_REPORT 0 #define HID_OUTPUT_REPORT 1 #define HID_FEATURE_REPORT 2 #define HID_REPORT_TYPES 3 /* * HID connect requests */ #define HID_CONNECT_HIDINPUT BIT(0) #define HID_CONNECT_HIDINPUT_FORCE BIT(1) #define HID_CONNECT_HIDRAW BIT(2) #define HID_CONNECT_HIDDEV BIT(3) #define HID_CONNECT_HIDDEV_FORCE BIT(4) #define HID_CONNECT_FF BIT(5) #define HID_CONNECT_DRIVER BIT(6) #define HID_CONNECT_DEFAULT (HID_CONNECT_HIDINPUT|HID_CONNECT_HIDRAW| \ HID_CONNECT_HIDDEV|HID_CONNECT_FF) /* * HID device quirks. */ /* * Increase this if you need to configure more HID quirks at module load time */ #define MAX_USBHID_BOOT_QUIRKS 4 #define HID_QUIRK_INVERT BIT(0) #define HID_QUIRK_NOTOUCH BIT(1) #define HID_QUIRK_IGNORE BIT(2) #define HID_QUIRK_NOGET BIT(3) #define HID_QUIRK_HIDDEV_FORCE BIT(4) #define HID_QUIRK_BADPAD BIT(5) #define HID_QUIRK_MULTI_INPUT BIT(6) #define HID_QUIRK_HIDINPUT_FORCE BIT(7) /* BIT(8) reserved for backward compatibility, was HID_QUIRK_NO_EMPTY_INPUT */ /* BIT(9) reserved for backward compatibility, was NO_INIT_INPUT_REPORTS */ #define HID_QUIRK_ALWAYS_POLL BIT(10) #define HID_QUIRK_INPUT_PER_APP BIT(11) #define HID_QUIRK_X_INVERT BIT(12) #define HID_QUIRK_Y_INVERT BIT(13) #define HID_QUIRK_SKIP_OUTPUT_REPORTS BIT(16) #define HID_QUIRK_SKIP_OUTPUT_REPORT_ID BIT(17) #define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP BIT(18) #define HID_QUIRK_HAVE_SPECIAL_DRIVER BIT(19) #define HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE BIT(20) #define HID_QUIRK_FULLSPEED_INTERVAL BIT(28) #define HID_QUIRK_NO_INIT_REPORTS BIT(29) #define HID_QUIRK_NO_IGNORE BIT(30) #define HID_QUIRK_NO_INPUT_SYNC BIT(31) /* * HID device groups * * Note: HID_GROUP_ANY is declared in linux/mod_devicetable.h * and has a value of 0x0000 */ #define HID_GROUP_GENERIC 0x0001 #define HID_GROUP_MULTITOUCH 0x0002 #define HID_GROUP_SENSOR_HUB 0x0003 #define HID_GROUP_MULTITOUCH_WIN_8 0x0004 /* * Vendor specific HID device groups */ #define HID_GROUP_RMI 0x0100 #define HID_GROUP_WACOM 0x0101 #define HID_GROUP_LOGITECH_DJ_DEVICE 0x0102 #define HID_GROUP_STEAM 0x0103 #define HID_GROUP_LOGITECH_27MHZ_DEVICE 0x0104 /* * HID protocol status */ #define HID_REPORT_PROTOCOL 1 #define HID_BOOT_PROTOCOL 0 /* * This is the global environment of the parser. This information is * persistent for main-items. The global environment can be saved and * restored with PUSH/POP statements. */ struct hid_global { unsigned usage_page; __s32 logical_minimum; __s32 logical_maximum; __s32 physical_minimum; __s32 physical_maximum; __s32 unit_exponent; unsigned unit; unsigned report_id; unsigned report_size; unsigned report_count; }; /* * This is the local environment. It is persistent up the next main-item. */ #define HID_MAX_USAGES 12288 #define HID_DEFAULT_NUM_COLLECTIONS 16 struct hid_local { unsigned usage[HID_MAX_USAGES]; /* usage array */ u8 usage_size[HID_MAX_USAGES]; /* usage size array */ unsigned collection_index[HID_MAX_USAGES]; /* collection index array */ unsigned usage_index; unsigned usage_minimum; unsigned delimiter_depth; unsigned delimiter_branch; }; /* * This is the collection stack. We climb up the stack to determine * application and function of each field. */ struct hid_collection { int parent_idx; /* device->collection */ unsigned type; unsigned usage; unsigned level; }; struct hid_usage { unsigned hid; /* hid usage code */ unsigned collection_index; /* index into collection array */ unsigned usage_index; /* index into usage array */ __s8 resolution_multiplier;/* Effective Resolution Multiplier (HUT v1.12, 4.3.1), default: 1 */ /* hidinput data */ __s8 wheel_factor; /* 120/resolution_multiplier */ __u16 code; /* input driver code */ __u8 type; /* input driver type */ __s8 hat_min; /* hat switch fun */ __s8 hat_max; /* ditto */ __s8 hat_dir; /* ditto */ __s16 wheel_accumulated; /* hi-res wheel */ }; struct hid_input; struct hid_field { unsigned physical; /* physical usage for this field */ unsigned logical; /* logical usage for this field */ unsigned application; /* application usage for this field */ struct hid_usage *usage; /* usage table for this function */ unsigned maxusage; /* maximum usage index */ unsigned flags; /* main-item flags (i.e. volatile,array,constant) */ unsigned report_offset; /* bit offset in the report */ unsigned report_size; /* size of this field in the report */ unsigned report_count; /* number of this field in the report */ unsigned report_type; /* (input,output,feature) */ __s32 *value; /* last known value(s) */ __s32 logical_minimum; __s32 logical_maximum; __s32 physical_minimum; __s32 physical_maximum; __s32 unit_exponent; unsigned unit; struct hid_report *report; /* associated report */ unsigned index; /* index into report->field[] */ /* hidinput data */ struct hid_input *hidinput; /* associated input structure */ __u16 dpad; /* dpad input code */ }; #define HID_MAX_FIELDS 256 struct hid_report { struct list_head list; struct list_head hidinput_list; unsigned int id; /* id of this report */ unsigned int type; /* report type */ unsigned int application; /* application usage for this report */ struct hid_field *field[HID_MAX_FIELDS]; /* fields of the report */ unsigned maxfield; /* maximum valid field index */ unsigned size; /* size of the report (bits) */ struct hid_device *device; /* associated device */ }; #define HID_MAX_IDS 256 struct hid_report_enum { unsigned numbered; struct list_head report_list; struct hid_report *report_id_hash[HID_MAX_IDS]; }; #define HID_MIN_BUFFER_SIZE 64 /* make sure there is at least a packet size of space */ #define HID_MAX_BUFFER_SIZE 8192 /* 8kb */ #define HID_CONTROL_FIFO_SIZE 256 /* to init devices with >100 reports */ #define HID_OUTPUT_FIFO_SIZE 64 struct hid_control_fifo { unsigned char dir; struct hid_report *report; char *raw_report; }; struct hid_output_fifo { struct hid_report *report; char *raw_report; }; #define HID_CLAIMED_INPUT BIT(0) #define HID_CLAIMED_HIDDEV BIT(1) #define HID_CLAIMED_HIDRAW BIT(2) #define HID_CLAIMED_DRIVER BIT(3) #define HID_STAT_ADDED BIT(0) #define HID_STAT_PARSED BIT(1) #define HID_STAT_DUP_DETECTED BIT(2) #define HID_STAT_REPROBED BIT(3) struct hid_input { struct list_head list; struct hid_report *report; struct input_dev *input; const char *name; bool registered; struct list_head reports; /* the list of reports */ unsigned int application; /* application usage for this input */ }; enum hid_type { HID_TYPE_OTHER = 0, HID_TYPE_USBMOUSE, HID_TYPE_USBNONE }; enum hid_battery_status { HID_BATTERY_UNKNOWN = 0, HID_BATTERY_QUERIED, /* Kernel explicitly queried battery strength */ HID_BATTERY_REPORTED, /* Device sent unsolicited battery strength report */ }; struct hid_driver; struct hid_ll_driver; struct hid_device { /* device report descriptor */ __u8 *dev_rdesc; unsigned dev_rsize; __u8 *rdesc; unsigned rsize; struct hid_collection *collection; /* List of HID collections */ unsigned collection_size; /* Number of allocated hid_collections */ unsigned maxcollection; /* Number of parsed collections */ unsigned maxapplication; /* Number of applications */ __u16 bus; /* BUS ID */ __u16 group; /* Report group */ __u32 vendor; /* Vendor ID */ __u32 product; /* Product ID */ __u32 version; /* HID version */ enum hid_type type; /* device type (mouse, kbd, ...) */ unsigned country; /* HID country */ struct hid_report_enum report_enum[HID_REPORT_TYPES]; struct work_struct led_work; /* delayed LED worker */ struct semaphore driver_input_lock; /* protects the current driver */ struct device dev; /* device */ struct hid_driver *driver; struct hid_ll_driver *ll_driver; struct mutex ll_open_lock; unsigned int ll_open_count; #ifdef CONFIG_HID_BATTERY_STRENGTH /* * Power supply information for HID devices which report * battery strength. power_supply was successfully registered if * battery is non-NULL. */ struct power_supply *battery; __s32 battery_capacity; __s32 battery_min; __s32 battery_max; __s32 battery_report_type; __s32 battery_report_id; enum hid_battery_status battery_status; bool battery_avoid_query; #endif unsigned long status; /* see STAT flags above */ unsigned claimed; /* Claimed by hidinput, hiddev? */ unsigned quirks; /* Various quirks the device can pull on us */ bool io_started; /* If IO has started */ struct list_head inputs; /* The list of inputs */ void *hiddev; /* The hiddev structure */ void *hidraw; char name[128]; /* Device name */ char phys[64]; /* Device physical location */ char uniq[64]; /* Device unique identifier (serial #) */ void *driver_data; /* temporary hid_ff handling (until moved to the drivers) */ int (*ff_init)(struct hid_device *); /* hiddev event handler */ int (*hiddev_connect)(struct hid_device *, unsigned int); void (*hiddev_disconnect)(struct hid_device *); void (*hiddev_hid_event) (struct hid_device *, struct hid_field *field, struct hid_usage *, __s32); void (*hiddev_report_event) (struct hid_device *, struct hid_report *); /* debugging support via debugfs */ unsigned short debug; struct dentry *debug_dir; struct dentry *debug_rdesc; struct dentry *debug_events; struct list_head debug_list; spinlock_t debug_list_lock; wait_queue_head_t debug_wait; struct kref ref; unsigned int id; /* system unique id */ }; void hiddev_free(struct kref *ref); #define to_hid_device(pdev) \ container_of(pdev, struct hid_device, dev) static inline void *hid_get_drvdata(struct hid_device *hdev) { return dev_get_drvdata(&hdev->dev); } static inline void hid_set_drvdata(struct hid_device *hdev, void *data) { dev_set_drvdata(&hdev->dev, data); } #define HID_GLOBAL_STACK_SIZE 4 #define HID_COLLECTION_STACK_SIZE 4 #define HID_SCAN_FLAG_MT_WIN_8 BIT(0) #define HID_SCAN_FLAG_VENDOR_SPECIFIC BIT(1) #define HID_SCAN_FLAG_GD_POINTER BIT(2) struct hid_parser { struct hid_global global; struct hid_global global_stack[HID_GLOBAL_STACK_SIZE]; unsigned int global_stack_ptr; struct hid_local local; unsigned int *collection_stack; unsigned int collection_stack_ptr; unsigned int collection_stack_size; struct hid_device *device; unsigned int scan_flags; }; struct hid_class_descriptor { __u8 bDescriptorType; __le16 wDescriptorLength; } __attribute__ ((packed)); struct hid_descriptor { __u8 bLength; __u8 bDescriptorType; __le16 bcdHID; __u8 bCountryCode; __u8 bNumDescriptors; struct hid_class_descriptor desc[1]; } __attribute__ ((packed)); #define HID_DEVICE(b, g, ven, prod) \ .bus = (b), .group = (g), .vendor = (ven), .product = (prod) #define HID_USB_DEVICE(ven, prod) \ .bus = BUS_USB, .vendor = (ven), .product = (prod) #define HID_BLUETOOTH_DEVICE(ven, prod) \ .bus = BUS_BLUETOOTH, .vendor = (ven), .product = (prod) #define HID_I2C_DEVICE(ven, prod) \ .bus = BUS_I2C, .vendor = (ven), .product = (prod) #define HID_REPORT_ID(rep) \ .report_type = (rep) #define HID_USAGE_ID(uhid, utype, ucode) \ .usage_hid = (uhid), .usage_type = (utype), .usage_code = (ucode) /* we don't want to catch types and codes equal to 0 */ #define HID_TERMINATOR (HID_ANY_ID - 1) struct hid_report_id { __u32 report_type; }; struct hid_usage_id { __u32 usage_hid; __u32 usage_type; __u32 usage_code; }; /** * struct hid_driver * @name: driver name (e.g. "Footech_bar-wheel") * @id_table: which devices is this driver for (must be non-NULL for probe * to be called) * @dyn_list: list of dynamically added device ids * @dyn_lock: lock protecting @dyn_list * @match: check if the given device is handled by this driver * @probe: new device inserted * @remove: device removed (NULL if not a hot-plug capable driver) * @report_table: on which reports to call raw_event (NULL means all) * @raw_event: if report in report_table, this hook is called (NULL means nop) * @usage_table: on which events to call event (NULL means all) * @event: if usage in usage_table, this hook is called (NULL means nop) * @report: this hook is called after parsing a report (NULL means nop) * @report_fixup: called before report descriptor parsing (NULL means nop) * @input_mapping: invoked on input registering before mapping an usage * @input_mapped: invoked on input registering after mapping an usage * @input_configured: invoked just before the device is registered * @feature_mapping: invoked on feature registering * @suspend: invoked on suspend (NULL means nop) * @resume: invoked on resume if device was not reset (NULL means nop) * @reset_resume: invoked on resume if device was reset (NULL means nop) * * probe should return -errno on error, or 0 on success. During probe, * input will not be passed to raw_event unless hid_device_io_start is * called. * * raw_event and event should return negative on error, any other value will * pass the event on to .event() typically return 0 for success. * * input_mapping shall return a negative value to completely ignore this usage * (e.g. doubled or invalid usage), zero to continue with parsing of this * usage by generic code (no special handling needed) or positive to skip * generic parsing (needed special handling which was done in the hook already) * input_mapped shall return negative to inform the layer that this usage * should not be considered for further processing or zero to notify that * no processing was performed and should be done in a generic manner * Both these functions may be NULL which means the same behavior as returning * zero from them. */ struct hid_driver { char *name; const struct hid_device_id *id_table; struct list_head dyn_list; spinlock_t dyn_lock; bool (*match)(struct hid_device *dev, bool ignore_special_driver); int (*probe)(struct hid_device *dev, const struct hid_device_id *id); void (*remove)(struct hid_device *dev); const struct hid_report_id *report_table; int (*raw_event)(struct hid_device *hdev, struct hid_report *report, u8 *data, int size); const struct hid_usage_id *usage_table; int (*event)(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value); void (*report)(struct hid_device *hdev, struct hid_report *report); __u8 *(*report_fixup)(struct hid_device *hdev, __u8 *buf, unsigned int *size); int (*input_mapping)(struct hid_device *hdev, struct hid_input *hidinput, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max); int (*input_mapped)(struct hid_device *hdev, struct hid_input *hidinput, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max); int (*input_configured)(struct hid_device *hdev, struct hid_input *hidinput); void (*feature_mapping)(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage); #ifdef CONFIG_PM int (*suspend)(struct hid_device *hdev, pm_message_t message); int (*resume)(struct hid_device *hdev); int (*reset_resume)(struct hid_device *hdev); #endif /* private: */ struct device_driver driver; }; #define to_hid_driver(pdrv) \ container_of(pdrv, struct hid_driver, driver) /** * hid_ll_driver - low level driver callbacks * @start: called on probe to start the device * @stop: called on remove * @open: called by input layer on open * @close: called by input layer on close * @power: request underlying hardware to enter requested power mode * @parse: this method is called only once to parse the device data, * shouldn't allocate anything to not leak memory * @request: send report request to device (e.g. feature report) * @wait: wait for buffered io to complete (send/recv reports) * @raw_request: send raw report request to device (e.g. feature report) * @output_report: send output report to device * @idle: send idle request to device * @max_buffer_size: over-ride maximum data buffer size (default: HID_MAX_BUFFER_SIZE) */ struct hid_ll_driver { int (*start)(struct hid_device *hdev); void (*stop)(struct hid_device *hdev); int (*open)(struct hid_device *hdev); void (*close)(struct hid_device *hdev); int (*power)(struct hid_device *hdev, int level); int (*parse)(struct hid_device *hdev); void (*request)(struct hid_device *hdev, struct hid_report *report, int reqtype); int (*wait)(struct hid_device *hdev); int (*raw_request) (struct hid_device *hdev, unsigned char reportnum, __u8 *buf, size_t len, unsigned char rtype, int reqtype); int (*output_report) (struct hid_device *hdev, __u8 *buf, size_t len); int (*idle)(struct hid_device *hdev, int report, int idle, int reqtype); unsigned int max_buffer_size; }; extern struct hid_ll_driver i2c_hid_ll_driver; extern struct hid_ll_driver hidp_hid_driver; extern struct hid_ll_driver uhid_hid_driver; extern struct hid_ll_driver usb_hid_driver; static inline bool hid_is_using_ll_driver(struct hid_device *hdev, struct hid_ll_driver *driver) { return hdev->ll_driver == driver; } static inline bool hid_is_usb(struct hid_device *hdev) { return hid_is_using_ll_driver(hdev, &usb_hid_driver); } #define PM_HINT_FULLON 1<<5 #define PM_HINT_NORMAL 1<<1 /* Applications from HID Usage Tables 4/8/99 Version 1.1 */ /* We ignore a few input applications that are not widely used */ #define IS_INPUT_APPLICATION(a) \ (((a >= HID_UP_GENDESK) && (a <= HID_GD_MULTIAXIS)) \ || ((a >= HID_DG_PEN) && (a <= HID_DG_WHITEBOARD)) \ || (a == HID_GD_SYSTEM_CONTROL) || (a == HID_CP_CONSUMER_CONTROL) \ || (a == HID_GD_WIRELESS_RADIO_CTLS)) /* HID core API */ extern int hid_debug; extern bool hid_ignore(struct hid_device *); extern int hid_add_device(struct hid_device *); extern void hid_destroy_device(struct hid_device *); extern struct bus_type hid_bus_type; extern int __must_check __hid_register_driver(struct hid_driver *, struct module *, const char *mod_name); /* use a define to avoid include chaining to get THIS_MODULE & friends */ #define hid_register_driver(driver) \ __hid_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) extern void hid_unregister_driver(struct hid_driver *); /** * module_hid_driver() - Helper macro for registering a HID driver * @__hid_driver: hid_driver struct * * Helper macro for HID drivers which do not do anything special in module * init/exit. This eliminates a lot of boilerplate. Each module may only * use this macro once, and calling it replaces module_init() and module_exit() */ #define module_hid_driver(__hid_driver) \ module_driver(__hid_driver, hid_register_driver, \ hid_unregister_driver) extern void hidinput_hid_event(struct hid_device *, struct hid_field *, struct hid_usage *, __s32); extern void hidinput_report_event(struct hid_device *hid, struct hid_report *report); extern int hidinput_connect(struct hid_device *hid, unsigned int force); extern void hidinput_disconnect(struct hid_device *); int hid_set_field(struct hid_field *, unsigned, __s32); int hid_input_report(struct hid_device *, int type, u8 *, u32, int); int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int code, struct hid_field **field); struct hid_field *hidinput_get_led_field(struct hid_device *hid); unsigned int hidinput_count_leds(struct hid_device *hid); __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code); void hid_output_report(struct hid_report *report, __u8 *data); int __hid_request(struct hid_device *hid, struct hid_report *rep, int reqtype); u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags); struct hid_device *hid_allocate_device(void); struct hid_report *hid_register_report(struct hid_device *device, unsigned int type, unsigned int id, unsigned int application); int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size); struct hid_report *hid_validate_values(struct hid_device *hid, unsigned int type, unsigned int id, unsigned int field_index, unsigned int report_counts); void hid_setup_resolution_multiplier(struct hid_device *hid); int hid_open_report(struct hid_device *device); int hid_check_keys_pressed(struct hid_device *hid); int hid_connect(struct hid_device *hid, unsigned int connect_mask); void hid_disconnect(struct hid_device *hid); bool hid_match_one_id(const struct hid_device *hdev, const struct hid_device_id *id); const struct hid_device_id *hid_match_id(const struct hid_device *hdev, const struct hid_device_id *id); const struct hid_device_id *hid_match_device(struct hid_device *hdev, struct hid_driver *hdrv); bool hid_compare_device_paths(struct hid_device *hdev_a, struct hid_device *hdev_b, char separator); s32 hid_snto32(__u32 value, unsigned n); __u32 hid_field_extract(const struct hid_device *hid, __u8 *report, unsigned offset, unsigned n); /** * hid_device_io_start - enable HID input during probe, remove * * @hid - the device * * This should only be called during probe or remove and only be * called by the thread calling probe or remove. It will allow * incoming packets to be delivered to the driver. */ static inline void hid_device_io_start(struct hid_device *hid) { if (hid->io_started) { dev_warn(&hid->dev, "io already started\n"); return; } hid->io_started = true; up(&hid->driver_input_lock); } /** * hid_device_io_stop - disable HID input during probe, remove * * @hid - the device * * Should only be called after hid_device_io_start. It will prevent * incoming packets from going to the driver for the duration of * probe, remove. If called during probe, packets will still go to the * driver after probe is complete. This function should only be called * by the thread calling probe or remove. */ static inline void hid_device_io_stop(struct hid_device *hid) { if (!hid->io_started) { dev_warn(&hid->dev, "io already stopped\n"); return; } hid->io_started = false; down(&hid->driver_input_lock); } /** * hid_map_usage - map usage input bits * * @hidinput: hidinput which we are interested in * @usage: usage to fill in * @bit: pointer to input->{}bit (out parameter) * @max: maximal valid usage->code to consider later (out parameter) * @type: input event type (EV_KEY, EV_REL, ...) * @c: code which corresponds to this usage and type * * The value pointed to by @bit will be set to NULL if either @type is * an unhandled event type, or if @c is out of range for @type. This * can be used as an error condition. */ static inline void hid_map_usage(struct hid_input *hidinput, struct hid_usage *usage, unsigned long **bit, int *max, __u8 type, unsigned int c) { struct input_dev *input = hidinput->input; unsigned long *bmap = NULL; unsigned int limit = 0; switch (type) { case EV_ABS: bmap = input->absbit; limit = ABS_MAX; break; case EV_REL: bmap = input->relbit; limit = REL_MAX; break; case EV_KEY: bmap = input->keybit; limit = KEY_MAX; break; case EV_LED: bmap = input->ledbit; limit = LED_MAX; break; } if (unlikely(c > limit || !bmap)) { pr_warn_ratelimited("%s: Invalid code %d type %d\n", input->name, c, type); *bit = NULL; return; } usage->type = type; usage->code = c; *max = limit; *bit = bmap; } /** * hid_map_usage_clear - map usage input bits and clear the input bit * * The same as hid_map_usage, except the @c bit is also cleared in supported * bits (@bit). */ static inline void hid_map_usage_clear(struct hid_input *hidinput, struct hid_usage *usage, unsigned long **bit, int *max, __u8 type, __u16 c) { hid_map_usage(hidinput, usage, bit, max, type, c); if (*bit) clear_bit(usage->code, *bit); } /** * hid_parse - parse HW reports * * @hdev: hid device * * Call this from probe after you set up the device (if needed). Your * report_fixup will be called (if non-NULL) after reading raw report from * device before passing it to hid layer for real parsing. */ static inline int __must_check hid_parse(struct hid_device *hdev) { return hid_open_report(hdev); } int __must_check hid_hw_start(struct hid_device *hdev, unsigned int connect_mask); void hid_hw_stop(struct hid_device *hdev); int __must_check hid_hw_open(struct hid_device *hdev); void hid_hw_close(struct hid_device *hdev); /** * hid_hw_power - requests underlying HW to go into given power mode * * @hdev: hid device * @level: requested power level (one of %PM_HINT_* defines) * * This function requests underlying hardware to enter requested power * mode. */ static inline int hid_hw_power(struct hid_device *hdev, int level) { return hdev->ll_driver->power ? hdev->ll_driver->power(hdev, level) : 0; } /** * hid_hw_request - send report request to device * * @hdev: hid device * @report: report to send * @reqtype: hid request type */ static inline void hid_hw_request(struct hid_device *hdev, struct hid_report *report, int reqtype) { if (hdev->ll_driver->request) return hdev->ll_driver->request(hdev, report, reqtype); __hid_request(hdev, report, reqtype); } /** * hid_hw_raw_request - send report request to device * * @hdev: hid device * @reportnum: report ID * @buf: in/out data to transfer * @len: length of buf * @rtype: HID report type * @reqtype: HID_REQ_GET_REPORT or HID_REQ_SET_REPORT * * @return: count of data transfered, negative if error * * Same behavior as hid_hw_request, but with raw buffers instead. */ static inline int hid_hw_raw_request(struct hid_device *hdev, unsigned char reportnum, __u8 *buf, size_t len, unsigned char rtype, int reqtype) { if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf) return -EINVAL; return hdev->ll_driver->raw_request(hdev, reportnum, buf, len, rtype, reqtype); } /** * hid_hw_output_report - send output report to device * * @hdev: hid device * @buf: raw data to transfer * @len: length of buf * * @return: count of data transfered, negative if error */ static inline int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len) { if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf) return -EINVAL; if (hdev->ll_driver->output_report) return hdev->ll_driver->output_report(hdev, buf, len); return -ENOSYS; } /** * hid_hw_idle - send idle request to device * * @hdev: hid device * @report: report to control * @idle: idle state * @reqtype: hid request type */ static inline int hid_hw_idle(struct hid_device *hdev, int report, int idle, int reqtype) { if (hdev->ll_driver->idle) return hdev->ll_driver->idle(hdev, report, idle, reqtype); return 0; } /** * hid_hw_wait - wait for buffered io to complete * * @hdev: hid device */ static inline void hid_hw_wait(struct hid_device *hdev) { if (hdev->ll_driver->wait) hdev->ll_driver->wait(hdev); } /** * hid_report_len - calculate the report length * * @report: the report we want to know the length */ static inline u32 hid_report_len(struct hid_report *report) { return DIV_ROUND_UP(report->size, 8) + (report->id > 0); } int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size, int interrupt); /* HID quirks API */ unsigned long hid_lookup_quirk(const struct hid_device *hdev); int hid_quirks_init(char **quirks_param, __u16 bus, int count); void hid_quirks_exit(__u16 bus); #ifdef CONFIG_HID_PID int hid_pidff_init(struct hid_device *hid); #else #define hid_pidff_init NULL #endif #define dbg_hid(fmt, ...) \ do { \ if (hid_debug) \ printk(KERN_DEBUG "%s: " fmt, __FILE__, ##__VA_ARGS__); \ } while (0) #define hid_err(hid, fmt, ...) \ dev_err(&(hid)->dev, fmt, ##__VA_ARGS__) #define hid_notice(hid, fmt, ...) \ dev_notice(&(hid)->dev, fmt, ##__VA_ARGS__) #define hid_warn(hid, fmt, ...) \ dev_warn(&(hid)->dev, fmt, ##__VA_ARGS__) #define hid_info(hid, fmt, ...) \ dev_info(&(hid)->dev, fmt, ##__VA_ARGS__) #define hid_dbg(hid, fmt, ...) \ dev_dbg(&(hid)->dev, fmt, ##__VA_ARGS__) #define hid_err_once(hid, fmt, ...) \ dev_err_once(&(hid)->dev, fmt, ##__VA_ARGS__) #define hid_notice_once(hid, fmt, ...) \ dev_notice_once(&(hid)->dev, fmt, ##__VA_ARGS__) #define hid_warn_once(hid, fmt, ...) \ dev_warn_once(&(hid)->dev, fmt, ##__VA_ARGS__) #define hid_info_once(hid, fmt, ...) \ dev_info_once(&(hid)->dev, fmt, ##__VA_ARGS__) #define hid_dbg_once(hid, fmt, ...) \ dev_dbg_once(&(hid)->dev, fmt, ##__VA_ARGS__) #endif user_namespace.h 0000644 00000011234 14722070374 0007716 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_USER_NAMESPACE_H #define _LINUX_USER_NAMESPACE_H #include <linux/kref.h> #include <linux/nsproxy.h> #include <linux/ns_common.h> #include <linux/sched.h> #include <linux/workqueue.h> #include <linux/rwsem.h> #include <linux/sysctl.h> #include <linux/err.h> #define UID_GID_MAP_MAX_BASE_EXTENTS 5 #define UID_GID_MAP_MAX_EXTENTS 340 struct uid_gid_extent { u32 first; u32 lower_first; u32 count; }; struct uid_gid_map { /* 64 bytes -- 1 cache line */ u32 nr_extents; union { struct uid_gid_extent extent[UID_GID_MAP_MAX_BASE_EXTENTS]; struct { struct uid_gid_extent *forward; struct uid_gid_extent *reverse; }; }; }; #define USERNS_SETGROUPS_ALLOWED 1UL #define USERNS_INIT_FLAGS USERNS_SETGROUPS_ALLOWED struct ucounts; enum ucount_type { UCOUNT_USER_NAMESPACES, UCOUNT_PID_NAMESPACES, UCOUNT_UTS_NAMESPACES, UCOUNT_IPC_NAMESPACES, UCOUNT_NET_NAMESPACES, UCOUNT_MNT_NAMESPACES, UCOUNT_CGROUP_NAMESPACES, #ifdef CONFIG_INOTIFY_USER UCOUNT_INOTIFY_INSTANCES, UCOUNT_INOTIFY_WATCHES, #endif UCOUNT_COUNTS, }; struct user_namespace { struct uid_gid_map uid_map; struct uid_gid_map gid_map; struct uid_gid_map projid_map; atomic_t count; struct user_namespace *parent; int level; kuid_t owner; kgid_t group; struct ns_common ns; unsigned long flags; #ifdef CONFIG_KEYS /* List of joinable keyrings in this namespace. Modification access of * these pointers is controlled by keyring_sem. Once * user_keyring_register is set, it won't be changed, so it can be * accessed directly with READ_ONCE(). */ struct list_head keyring_name_list; struct key *user_keyring_register; struct rw_semaphore keyring_sem; #endif /* Register of per-UID persistent keyrings for this namespace */ #ifdef CONFIG_PERSISTENT_KEYRINGS struct key *persistent_keyring_register; #endif struct work_struct work; #ifdef CONFIG_SYSCTL struct ctl_table_set set; struct ctl_table_header *sysctls; #endif struct ucounts *ucounts; int ucount_max[UCOUNT_COUNTS]; } __randomize_layout; struct ucounts { struct hlist_node node; struct user_namespace *ns; kuid_t uid; int count; atomic_t ucount[UCOUNT_COUNTS]; }; extern struct user_namespace init_user_ns; bool setup_userns_sysctls(struct user_namespace *ns); void retire_userns_sysctls(struct user_namespace *ns); struct ucounts *inc_ucount(struct user_namespace *ns, kuid_t uid, enum ucount_type type); void dec_ucount(struct ucounts *ucounts, enum ucount_type type); #ifdef CONFIG_USER_NS static inline struct user_namespace *get_user_ns(struct user_namespace *ns) { if (ns) atomic_inc(&ns->count); return ns; } extern int create_user_ns(struct cred *new); extern int unshare_userns(unsigned long unshare_flags, struct cred **new_cred); extern void __put_user_ns(struct user_namespace *ns); static inline void put_user_ns(struct user_namespace *ns) { if (ns && atomic_dec_and_test(&ns->count)) __put_user_ns(ns); } struct seq_operations; extern const struct seq_operations proc_uid_seq_operations; extern const struct seq_operations proc_gid_seq_operations; extern const struct seq_operations proc_projid_seq_operations; extern ssize_t proc_uid_map_write(struct file *, const char __user *, size_t, loff_t *); extern ssize_t proc_gid_map_write(struct file *, const char __user *, size_t, loff_t *); extern ssize_t proc_projid_map_write(struct file *, const char __user *, size_t, loff_t *); extern ssize_t proc_setgroups_write(struct file *, const char __user *, size_t, loff_t *); extern int proc_setgroups_show(struct seq_file *m, void *v); extern bool userns_may_setgroups(const struct user_namespace *ns); extern bool in_userns(const struct user_namespace *ancestor, const struct user_namespace *child); extern bool current_in_userns(const struct user_namespace *target_ns); struct ns_common *ns_get_owner(struct ns_common *ns); #else static inline struct user_namespace *get_user_ns(struct user_namespace *ns) { return &init_user_ns; } static inline int create_user_ns(struct cred *new) { return -EINVAL; } static inline int unshare_userns(unsigned long unshare_flags, struct cred **new_cred) { if (unshare_flags & CLONE_NEWUSER) return -EINVAL; return 0; } static inline void put_user_ns(struct user_namespace *ns) { } static inline bool userns_may_setgroups(const struct user_namespace *ns) { return true; } static inline bool in_userns(const struct user_namespace *ancestor, const struct user_namespace *child) { return true; } static inline bool current_in_userns(const struct user_namespace *target_ns) { return true; } static inline struct ns_common *ns_get_owner(struct ns_common *ns) { return ERR_PTR(-EPERM); } #endif #endif /* _LINUX_USER_H */ frontswap.h 0000644 00000006061 14722070374 0006751 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_FRONTSWAP_H #define _LINUX_FRONTSWAP_H #include <linux/swap.h> #include <linux/mm.h> #include <linux/bitops.h> #include <linux/jump_label.h> /* * Return code to denote that requested number of * frontswap pages are unused(moved to page cache). * Used in in shmem_unuse and try_to_unuse. */ #define FRONTSWAP_PAGES_UNUSED 2 struct frontswap_ops { void (*init)(unsigned); /* this swap type was just swapon'ed */ int (*store)(unsigned, pgoff_t, struct page *); /* store a page */ int (*load)(unsigned, pgoff_t, struct page *); /* load a page */ void (*invalidate_page)(unsigned, pgoff_t); /* page no longer needed */ void (*invalidate_area)(unsigned); /* swap type just swapoff'ed */ struct frontswap_ops *next; /* private pointer to next ops */ }; extern void frontswap_register_ops(struct frontswap_ops *ops); extern void frontswap_shrink(unsigned long); extern unsigned long frontswap_curr_pages(void); extern void frontswap_writethrough(bool); #define FRONTSWAP_HAS_EXCLUSIVE_GETS extern void frontswap_tmem_exclusive_gets(bool); extern bool __frontswap_test(struct swap_info_struct *, pgoff_t); extern void __frontswap_init(unsigned type, unsigned long *map); extern int __frontswap_store(struct page *page); extern int __frontswap_load(struct page *page); extern void __frontswap_invalidate_page(unsigned, pgoff_t); extern void __frontswap_invalidate_area(unsigned); #ifdef CONFIG_FRONTSWAP extern struct static_key_false frontswap_enabled_key; static inline bool frontswap_enabled(void) { return static_branch_unlikely(&frontswap_enabled_key); } static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset) { return __frontswap_test(sis, offset); } static inline void frontswap_map_set(struct swap_info_struct *p, unsigned long *map) { p->frontswap_map = map; } static inline unsigned long *frontswap_map_get(struct swap_info_struct *p) { return p->frontswap_map; } #else /* all inline routines become no-ops and all externs are ignored */ static inline bool frontswap_enabled(void) { return false; } static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset) { return false; } static inline void frontswap_map_set(struct swap_info_struct *p, unsigned long *map) { } static inline unsigned long *frontswap_map_get(struct swap_info_struct *p) { return NULL; } #endif static inline int frontswap_store(struct page *page) { if (frontswap_enabled()) return __frontswap_store(page); return -1; } static inline int frontswap_load(struct page *page) { if (frontswap_enabled()) return __frontswap_load(page); return -1; } static inline void frontswap_invalidate_page(unsigned type, pgoff_t offset) { if (frontswap_enabled()) __frontswap_invalidate_page(type, offset); } static inline void frontswap_invalidate_area(unsigned type) { if (frontswap_enabled()) __frontswap_invalidate_area(type); } static inline void frontswap_init(unsigned type, unsigned long *map) { #ifdef CONFIG_FRONTSWAP __frontswap_init(type, map); #endif } #endif /* _LINUX_FRONTSWAP_H */ trace_events.h 0000644 00000051020 14722070374 0007403 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_TRACE_EVENT_H #define _LINUX_TRACE_EVENT_H #include <linux/ring_buffer.h> #include <linux/trace_seq.h> #include <linux/percpu.h> #include <linux/hardirq.h> #include <linux/perf_event.h> #include <linux/tracepoint.h> struct trace_array; struct trace_buffer; struct tracer; struct dentry; struct bpf_prog; const char *trace_print_flags_seq(struct trace_seq *p, const char *delim, unsigned long flags, const struct trace_print_flags *flag_array); const char *trace_print_symbols_seq(struct trace_seq *p, unsigned long val, const struct trace_print_flags *symbol_array); #if BITS_PER_LONG == 32 const char *trace_print_flags_seq_u64(struct trace_seq *p, const char *delim, unsigned long long flags, const struct trace_print_flags_u64 *flag_array); const char *trace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val, const struct trace_print_flags_u64 *symbol_array); #endif const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr, unsigned int bitmask_size); const char *trace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int len, bool concatenate); const char *trace_print_array_seq(struct trace_seq *p, const void *buf, int count, size_t el_size); struct trace_iterator; struct trace_event; int trace_raw_output_prep(struct trace_iterator *iter, struct trace_event *event); /* * The trace entry - the most basic unit of tracing. This is what * is printed in the end as a single line in the trace output, such as: * * bash-15816 [01] 235.197585: idle_cpu <- irq_enter */ struct trace_entry { unsigned short type; unsigned char flags; unsigned char preempt_count; int pid; }; #define TRACE_EVENT_TYPE_MAX \ ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1) /* * Trace iterator - used by printout routines who present trace * results to users and which routines might sleep, etc: */ struct trace_iterator { struct trace_array *tr; struct tracer *trace; struct trace_buffer *trace_buffer; void *private; int cpu_file; struct mutex mutex; struct ring_buffer_iter **buffer_iter; unsigned long iter_flags; /* trace_seq for __print_flags() and __print_symbolic() etc. */ struct trace_seq tmp_seq; cpumask_var_t started; /* it's true when current open file is snapshot */ bool snapshot; /* The below is zeroed out in pipe_read */ struct trace_seq seq; struct trace_entry *ent; unsigned long lost_events; int leftover; int ent_size; int cpu; u64 ts; loff_t pos; long idx; /* All new field here will be zeroed out in pipe_read */ }; enum trace_iter_flags { TRACE_FILE_LAT_FMT = 1, TRACE_FILE_ANNOTATE = 2, TRACE_FILE_TIME_IN_NS = 4, }; typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter, int flags, struct trace_event *event); struct trace_event_functions { trace_print_func trace; trace_print_func raw; trace_print_func hex; trace_print_func binary; }; struct trace_event { struct hlist_node node; struct list_head list; int type; struct trace_event_functions *funcs; }; extern int register_trace_event(struct trace_event *event); extern int unregister_trace_event(struct trace_event *event); /* Return values for print_line callback */ enum print_line_t { TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */ TRACE_TYPE_HANDLED = 1, TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */ TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ }; enum print_line_t trace_handle_return(struct trace_seq *s); void tracing_generic_entry_update(struct trace_entry *entry, unsigned short type, unsigned long flags, int pc); struct trace_event_file; struct ring_buffer_event * trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer, struct trace_event_file *trace_file, int type, unsigned long len, unsigned long flags, int pc); #define TRACE_RECORD_CMDLINE BIT(0) #define TRACE_RECORD_TGID BIT(1) void tracing_record_taskinfo(struct task_struct *task, int flags); void tracing_record_taskinfo_sched_switch(struct task_struct *prev, struct task_struct *next, int flags); void tracing_record_cmdline(struct task_struct *task); void tracing_record_tgid(struct task_struct *task); int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...); struct event_filter; enum trace_reg { TRACE_REG_REGISTER, TRACE_REG_UNREGISTER, #ifdef CONFIG_PERF_EVENTS TRACE_REG_PERF_REGISTER, TRACE_REG_PERF_UNREGISTER, TRACE_REG_PERF_OPEN, TRACE_REG_PERF_CLOSE, /* * These (ADD/DEL) use a 'boolean' return value, where 1 (true) means a * custom action was taken and the default action is not to be * performed. */ TRACE_REG_PERF_ADD, TRACE_REG_PERF_DEL, #endif }; struct trace_event_call; struct trace_event_class { const char *system; void *probe; #ifdef CONFIG_PERF_EVENTS void *perf_probe; #endif int (*reg)(struct trace_event_call *event, enum trace_reg type, void *data); int (*define_fields)(struct trace_event_call *); struct list_head *(*get_fields)(struct trace_event_call *); struct list_head fields; int (*raw_init)(struct trace_event_call *); }; extern int trace_event_reg(struct trace_event_call *event, enum trace_reg type, void *data); struct trace_event_buffer { struct ring_buffer *buffer; struct ring_buffer_event *event; struct trace_event_file *trace_file; void *entry; unsigned long flags; int pc; }; void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer, struct trace_event_file *trace_file, unsigned long len); void trace_event_buffer_commit(struct trace_event_buffer *fbuffer); enum { TRACE_EVENT_FL_FILTERED_BIT, TRACE_EVENT_FL_CAP_ANY_BIT, TRACE_EVENT_FL_NO_SET_FILTER_BIT, TRACE_EVENT_FL_IGNORE_ENABLE_BIT, TRACE_EVENT_FL_TRACEPOINT_BIT, TRACE_EVENT_FL_KPROBE_BIT, TRACE_EVENT_FL_UPROBE_BIT, }; /* * Event flags: * FILTERED - The event has a filter attached * CAP_ANY - Any user can enable for perf * NO_SET_FILTER - Set when filter has error and is to be ignored * IGNORE_ENABLE - For trace internal events, do not enable with debugfs file * TRACEPOINT - Event is a tracepoint * KPROBE - Event is a kprobe * UPROBE - Event is a uprobe */ enum { TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT), TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT), TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT), TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT), TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT), TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT), }; #define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE) struct trace_event_call { struct list_head list; struct trace_event_class *class; union { char *name; /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */ struct tracepoint *tp; }; struct trace_event event; char *print_fmt; struct event_filter *filter; void *mod; void *data; /* * bit 0: filter_active * bit 1: allow trace by non root (cap any) * bit 2: failed to apply filter * bit 3: trace internal event (do not enable) * bit 4: Event was enabled by module * bit 5: use call filter rather than file filter * bit 6: Event is a tracepoint */ int flags; /* static flags of different events */ #ifdef CONFIG_PERF_EVENTS int perf_refcount; struct hlist_head __percpu *perf_events; struct bpf_prog_array __rcu *prog_array; int (*perf_perm)(struct trace_event_call *, struct perf_event *); #endif }; #ifdef CONFIG_PERF_EVENTS static inline bool bpf_prog_array_valid(struct trace_event_call *call) { /* * This inline function checks whether call->prog_array * is valid or not. The function is called in various places, * outside rcu_read_lock/unlock, as a heuristic to speed up execution. * * If this function returns true, and later call->prog_array * becomes false inside rcu_read_lock/unlock region, * we bail out then. If this function return false, * there is a risk that we might miss a few events if the checking * were delayed until inside rcu_read_lock/unlock region and * call->prog_array happened to become non-NULL then. * * Here, READ_ONCE() is used instead of rcu_access_pointer(). * rcu_access_pointer() requires the actual definition of * "struct bpf_prog_array" while READ_ONCE() only needs * a declaration of the same type. */ return !!READ_ONCE(call->prog_array); } #endif static inline const char * trace_event_name(struct trace_event_call *call) { if (call->flags & TRACE_EVENT_FL_TRACEPOINT) return call->tp ? call->tp->name : NULL; else return call->name; } static inline struct list_head * trace_get_fields(struct trace_event_call *event_call) { if (!event_call->class->get_fields) return &event_call->class->fields; return event_call->class->get_fields(event_call); } struct trace_array; struct trace_subsystem_dir; enum { EVENT_FILE_FL_ENABLED_BIT, EVENT_FILE_FL_RECORDED_CMD_BIT, EVENT_FILE_FL_RECORDED_TGID_BIT, EVENT_FILE_FL_FILTERED_BIT, EVENT_FILE_FL_NO_SET_FILTER_BIT, EVENT_FILE_FL_SOFT_MODE_BIT, EVENT_FILE_FL_SOFT_DISABLED_BIT, EVENT_FILE_FL_TRIGGER_MODE_BIT, EVENT_FILE_FL_TRIGGER_COND_BIT, EVENT_FILE_FL_PID_FILTER_BIT, EVENT_FILE_FL_WAS_ENABLED_BIT, EVENT_FILE_FL_FREED_BIT, }; /* * Event file flags: * ENABLED - The event is enabled * RECORDED_CMD - The comms should be recorded at sched_switch * RECORDED_TGID - The tgids should be recorded at sched_switch * FILTERED - The event has a filter attached * NO_SET_FILTER - Set when filter has error and is to be ignored * SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED * SOFT_DISABLED - When set, do not trace the event (even though its * tracepoint may be enabled) * TRIGGER_MODE - When set, invoke the triggers associated with the event * TRIGGER_COND - When set, one or more triggers has an associated filter * PID_FILTER - When set, the event is filtered based on pid * WAS_ENABLED - Set when enabled to know to clear trace on module removal * FREED - File descriptor is freed, all fields should be considered invalid */ enum { EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT), EVENT_FILE_FL_RECORDED_CMD = (1 << EVENT_FILE_FL_RECORDED_CMD_BIT), EVENT_FILE_FL_RECORDED_TGID = (1 << EVENT_FILE_FL_RECORDED_TGID_BIT), EVENT_FILE_FL_FILTERED = (1 << EVENT_FILE_FL_FILTERED_BIT), EVENT_FILE_FL_NO_SET_FILTER = (1 << EVENT_FILE_FL_NO_SET_FILTER_BIT), EVENT_FILE_FL_SOFT_MODE = (1 << EVENT_FILE_FL_SOFT_MODE_BIT), EVENT_FILE_FL_SOFT_DISABLED = (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT), EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT), EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT), EVENT_FILE_FL_PID_FILTER = (1 << EVENT_FILE_FL_PID_FILTER_BIT), EVENT_FILE_FL_WAS_ENABLED = (1 << EVENT_FILE_FL_WAS_ENABLED_BIT), EVENT_FILE_FL_FREED = (1 << EVENT_FILE_FL_FREED_BIT), }; struct trace_event_file { struct list_head list; struct trace_event_call *event_call; struct event_filter __rcu *filter; struct dentry *dir; struct trace_array *tr; struct trace_subsystem_dir *system; struct list_head triggers; /* * 32 bit flags: * bit 0: enabled * bit 1: enabled cmd record * bit 2: enable/disable with the soft disable bit * bit 3: soft disabled * bit 4: trigger enabled * * Note: The bits must be set atomically to prevent races * from other writers. Reads of flags do not need to be in * sync as they occur in critical sections. But the way flags * is currently used, these changes do not affect the code * except that when a change is made, it may have a slight * delay in propagating the changes to other CPUs due to * caching and such. Which is mostly OK ;-) */ unsigned long flags; atomic_t ref; /* ref count for opened files */ atomic_t sm_ref; /* soft-mode reference counter */ atomic_t tm_ref; /* trigger-mode reference counter */ }; #define __TRACE_EVENT_FLAGS(name, value) \ static int __init trace_init_flags_##name(void) \ { \ event_##name.flags |= value; \ return 0; \ } \ early_initcall(trace_init_flags_##name); #define __TRACE_EVENT_PERF_PERM(name, expr...) \ static int perf_perm_##name(struct trace_event_call *tp_event, \ struct perf_event *p_event) \ { \ return ({ expr; }); \ } \ static int __init trace_init_perf_perm_##name(void) \ { \ event_##name.perf_perm = &perf_perm_##name; \ return 0; \ } \ early_initcall(trace_init_perf_perm_##name); #define PERF_MAX_TRACE_SIZE 8192 #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ enum event_trigger_type { ETT_NONE = (0), ETT_TRACE_ONOFF = (1 << 0), ETT_SNAPSHOT = (1 << 1), ETT_STACKTRACE = (1 << 2), ETT_EVENT_ENABLE = (1 << 3), ETT_EVENT_HIST = (1 << 4), ETT_HIST_ENABLE = (1 << 5), }; extern int filter_match_preds(struct event_filter *filter, void *rec); extern enum event_trigger_type event_triggers_call(struct trace_event_file *file, void *rec, struct ring_buffer_event *event); extern void event_triggers_post_call(struct trace_event_file *file, enum event_trigger_type tt); bool trace_event_ignore_this_pid(struct trace_event_file *trace_file); /** * trace_trigger_soft_disabled - do triggers and test if soft disabled * @file: The file pointer of the event to test * * If any triggers without filters are attached to this event, they * will be called here. If the event is soft disabled and has no * triggers that require testing the fields, it will return true, * otherwise false. */ static inline bool trace_trigger_soft_disabled(struct trace_event_file *file) { unsigned long eflags = file->flags; if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) { if (eflags & EVENT_FILE_FL_TRIGGER_MODE) event_triggers_call(file, NULL, NULL); if (eflags & EVENT_FILE_FL_SOFT_DISABLED) return true; if (eflags & EVENT_FILE_FL_PID_FILTER) return trace_event_ignore_this_pid(file); } return false; } #ifdef CONFIG_BPF_EVENTS unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx); int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog); void perf_event_detach_bpf_prog(struct perf_event *event); int perf_event_query_prog_array(struct perf_event *event, void __user *info); int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog); int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog); struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name); void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp); int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, u32 *fd_type, const char **buf, u64 *probe_offset, u64 *probe_addr); #else static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) { return 1; } static inline int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog) { return -EOPNOTSUPP; } static inline void perf_event_detach_bpf_prog(struct perf_event *event) { } static inline int perf_event_query_prog_array(struct perf_event *event, void __user *info) { return -EOPNOTSUPP; } static inline int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *p) { return -EOPNOTSUPP; } static inline int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *p) { return -EOPNOTSUPP; } static inline struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name) { return NULL; } static inline void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp) { } static inline int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, u32 *fd_type, const char **buf, u64 *probe_offset, u64 *probe_addr) { return -EOPNOTSUPP; } #endif enum { FILTER_OTHER = 0, FILTER_STATIC_STRING, FILTER_DYN_STRING, FILTER_PTR_STRING, FILTER_TRACE_FN, FILTER_COMM, FILTER_CPU, }; extern int trace_event_raw_init(struct trace_event_call *call); extern int trace_define_field(struct trace_event_call *call, const char *type, const char *name, int offset, int size, int is_signed, int filter_type); extern int trace_add_event_call(struct trace_event_call *call); extern int trace_remove_event_call(struct trace_event_call *call); extern int trace_event_get_offsets(struct trace_event_call *call); #define is_signed_type(type) (((type)(-1)) < (type)1) int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set); int trace_set_clr_event(const char *system, const char *event, int set); /* * The double __builtin_constant_p is because gcc will give us an error * if we try to allocate the static variable to fmt if it is not a * constant. Even with the outer if statement optimizing out. */ #define event_trace_printk(ip, fmt, args...) \ do { \ __trace_printk_check_format(fmt, ##args); \ tracing_record_cmdline(current); \ if (__builtin_constant_p(fmt)) { \ static const char *trace_printk_fmt \ __attribute__((section("__trace_printk_fmt"))) = \ __builtin_constant_p(fmt) ? fmt : NULL; \ \ __trace_bprintk(ip, trace_printk_fmt, ##args); \ } else \ __trace_printk(ip, fmt, ##args); \ } while (0) #ifdef CONFIG_PERF_EVENTS struct perf_event; DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); extern int perf_trace_init(struct perf_event *event); extern void perf_trace_destroy(struct perf_event *event); extern int perf_trace_add(struct perf_event *event, int flags); extern void perf_trace_del(struct perf_event *event, int flags); #ifdef CONFIG_KPROBE_EVENTS extern int perf_kprobe_init(struct perf_event *event, bool is_retprobe); extern void perf_kprobe_destroy(struct perf_event *event); extern int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type, const char **symbol, u64 *probe_offset, u64 *probe_addr, bool perf_type_tracepoint); #endif #ifdef CONFIG_UPROBE_EVENTS extern int perf_uprobe_init(struct perf_event *event, unsigned long ref_ctr_offset, bool is_retprobe); extern void perf_uprobe_destroy(struct perf_event *event); extern int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type, const char **filename, u64 *probe_offset, u64 *probe_addr, bool perf_type_tracepoint); #endif extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, char *filter_str); extern void ftrace_profile_free_filter(struct perf_event *event); void perf_trace_buf_update(void *record, u16 type); void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp); void bpf_trace_run1(struct bpf_prog *prog, u64 arg1); void bpf_trace_run2(struct bpf_prog *prog, u64 arg1, u64 arg2); void bpf_trace_run3(struct bpf_prog *prog, u64 arg1, u64 arg2, u64 arg3); void bpf_trace_run4(struct bpf_prog *prog, u64 arg1, u64 arg2, u64 arg3, u64 arg4); void bpf_trace_run5(struct bpf_prog *prog, u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5); void bpf_trace_run6(struct bpf_prog *prog, u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6); void bpf_trace_run7(struct bpf_prog *prog, u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7); void bpf_trace_run8(struct bpf_prog *prog, u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, u64 arg8); void bpf_trace_run9(struct bpf_prog *prog, u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, u64 arg8, u64 arg9); void bpf_trace_run10(struct bpf_prog *prog, u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, u64 arg8, u64 arg9, u64 arg10); void bpf_trace_run11(struct bpf_prog *prog, u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, u64 arg8, u64 arg9, u64 arg10, u64 arg11); void bpf_trace_run12(struct bpf_prog *prog, u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, u64 arg8, u64 arg9, u64 arg10, u64 arg11, u64 arg12); void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx, struct trace_event_call *call, u64 count, struct pt_regs *regs, struct hlist_head *head, struct task_struct *task); static inline void perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type, u64 count, struct pt_regs *regs, void *head, struct task_struct *task) { perf_tp_event(type, count, raw_data, size, regs, head, rctx, task); } #endif #endif /* _LINUX_TRACE_EVENT_H */ greybus/greybus_id.h 0000644 00000001116 14722070374 0010536 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* FIXME * move this to include/linux/mod_devicetable.h when merging */ #ifndef __LINUX_GREYBUS_ID_H #define __LINUX_GREYBUS_ID_H #include <linux/types.h> #include <linux/mod_devicetable.h> struct greybus_bundle_id { __u16 match_flags; __u32 vendor; __u32 product; __u8 class; kernel_ulong_t driver_info __aligned(sizeof(kernel_ulong_t)); }; /* Used to match the greybus_bundle_id */ #define GREYBUS_ID_MATCH_VENDOR BIT(0) #define GREYBUS_ID_MATCH_PRODUCT BIT(1) #define GREYBUS_ID_MATCH_CLASS BIT(2) #endif /* __LINUX_GREYBUS_ID_H */ greybus/operation.h 0000644 00000015036 14722070374 0010410 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Greybus operations * * Copyright 2014 Google Inc. * Copyright 2014 Linaro Ltd. */ #ifndef __OPERATION_H #define __OPERATION_H #include <linux/completion.h> #include <linux/kref.h> #include <linux/timer.h> #include <linux/types.h> #include <linux/workqueue.h> struct gb_host_device; struct gb_operation; /* The default amount of time a request is given to complete */ #define GB_OPERATION_TIMEOUT_DEFAULT 1000 /* milliseconds */ /* * The top bit of the type in an operation message header indicates * whether the message is a request (bit clear) or response (bit set) */ #define GB_MESSAGE_TYPE_RESPONSE ((u8)0x80) enum gb_operation_result { GB_OP_SUCCESS = 0x00, GB_OP_INTERRUPTED = 0x01, GB_OP_TIMEOUT = 0x02, GB_OP_NO_MEMORY = 0x03, GB_OP_PROTOCOL_BAD = 0x04, GB_OP_OVERFLOW = 0x05, GB_OP_INVALID = 0x06, GB_OP_RETRY = 0x07, GB_OP_NONEXISTENT = 0x08, GB_OP_UNKNOWN_ERROR = 0xfe, GB_OP_MALFUNCTION = 0xff, }; #define GB_OPERATION_MESSAGE_SIZE_MIN sizeof(struct gb_operation_msg_hdr) #define GB_OPERATION_MESSAGE_SIZE_MAX U16_MAX /* * Protocol code should only examine the payload and payload_size fields, and * host-controller drivers may use the hcpriv field. All other fields are * intended to be private to the operations core code. */ struct gb_message { struct gb_operation *operation; struct gb_operation_msg_hdr *header; void *payload; size_t payload_size; void *buffer; void *hcpriv; }; #define GB_OPERATION_FLAG_INCOMING BIT(0) #define GB_OPERATION_FLAG_UNIDIRECTIONAL BIT(1) #define GB_OPERATION_FLAG_SHORT_RESPONSE BIT(2) #define GB_OPERATION_FLAG_CORE BIT(3) #define GB_OPERATION_FLAG_USER_MASK (GB_OPERATION_FLAG_SHORT_RESPONSE | \ GB_OPERATION_FLAG_UNIDIRECTIONAL) /* * A Greybus operation is a remote procedure call performed over a * connection between two UniPro interfaces. * * Every operation consists of a request message sent to the other * end of the connection coupled with a reply message returned to * the sender. Every operation has a type, whose interpretation is * dependent on the protocol associated with the connection. * * Only four things in an operation structure are intended to be * directly usable by protocol handlers: the operation's connection * pointer; the operation type; the request message payload (and * size); and the response message payload (and size). Note that a * message with a 0-byte payload has a null message payload pointer. * * In addition, every operation has a result, which is an errno * value. Protocol handlers access the operation result using * gb_operation_result(). */ typedef void (*gb_operation_callback)(struct gb_operation *); struct gb_operation { struct gb_connection *connection; struct gb_message *request; struct gb_message *response; unsigned long flags; u8 type; u16 id; int errno; /* Operation result */ struct work_struct work; gb_operation_callback callback; struct completion completion; struct timer_list timer; struct kref kref; atomic_t waiters; int active; struct list_head links; /* connection->operations */ void *private; }; static inline bool gb_operation_is_incoming(struct gb_operation *operation) { return operation->flags & GB_OPERATION_FLAG_INCOMING; } static inline bool gb_operation_is_unidirectional(struct gb_operation *operation) { return operation->flags & GB_OPERATION_FLAG_UNIDIRECTIONAL; } static inline bool gb_operation_short_response_allowed(struct gb_operation *operation) { return operation->flags & GB_OPERATION_FLAG_SHORT_RESPONSE; } static inline bool gb_operation_is_core(struct gb_operation *operation) { return operation->flags & GB_OPERATION_FLAG_CORE; } void gb_connection_recv(struct gb_connection *connection, void *data, size_t size); int gb_operation_result(struct gb_operation *operation); size_t gb_operation_get_payload_size_max(struct gb_connection *connection); struct gb_operation * gb_operation_create_flags(struct gb_connection *connection, u8 type, size_t request_size, size_t response_size, unsigned long flags, gfp_t gfp); static inline struct gb_operation * gb_operation_create(struct gb_connection *connection, u8 type, size_t request_size, size_t response_size, gfp_t gfp) { return gb_operation_create_flags(connection, type, request_size, response_size, 0, gfp); } struct gb_operation * gb_operation_create_core(struct gb_connection *connection, u8 type, size_t request_size, size_t response_size, unsigned long flags, gfp_t gfp); void gb_operation_get(struct gb_operation *operation); void gb_operation_put(struct gb_operation *operation); bool gb_operation_response_alloc(struct gb_operation *operation, size_t response_size, gfp_t gfp); int gb_operation_request_send(struct gb_operation *operation, gb_operation_callback callback, unsigned int timeout, gfp_t gfp); int gb_operation_request_send_sync_timeout(struct gb_operation *operation, unsigned int timeout); static inline int gb_operation_request_send_sync(struct gb_operation *operation) { return gb_operation_request_send_sync_timeout(operation, GB_OPERATION_TIMEOUT_DEFAULT); } void gb_operation_cancel(struct gb_operation *operation, int errno); void gb_operation_cancel_incoming(struct gb_operation *operation, int errno); void greybus_message_sent(struct gb_host_device *hd, struct gb_message *message, int status); int gb_operation_sync_timeout(struct gb_connection *connection, int type, void *request, int request_size, void *response, int response_size, unsigned int timeout); int gb_operation_unidirectional_timeout(struct gb_connection *connection, int type, void *request, int request_size, unsigned int timeout); static inline int gb_operation_sync(struct gb_connection *connection, int type, void *request, int request_size, void *response, int response_size) { return gb_operation_sync_timeout(connection, type, request, request_size, response, response_size, GB_OPERATION_TIMEOUT_DEFAULT); } static inline int gb_operation_unidirectional(struct gb_connection *connection, int type, void *request, int request_size) { return gb_operation_unidirectional_timeout(connection, type, request, request_size, GB_OPERATION_TIMEOUT_DEFAULT); } static inline void *gb_operation_get_data(struct gb_operation *operation) { return operation->private; } static inline void gb_operation_set_data(struct gb_operation *operation, void *data) { operation->private = data; } int gb_operation_init(void); void gb_operation_exit(void); #endif /* !__OPERATION_H */ greybus/greybus_manifest.h 0000644 00000011450 14722070374 0011752 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Greybus manifest definition * * See "Greybus Application Protocol" document (version 0.1) for * details on these values and structures. * * Copyright 2014-2015 Google Inc. * Copyright 2014-2015 Linaro Ltd. * * Released under the GPLv2 and BSD licenses. */ #ifndef __GREYBUS_MANIFEST_H #define __GREYBUS_MANIFEST_H #include <linux/bits.h> #include <linux/types.h> enum greybus_descriptor_type { GREYBUS_TYPE_INVALID = 0x00, GREYBUS_TYPE_INTERFACE = 0x01, GREYBUS_TYPE_STRING = 0x02, GREYBUS_TYPE_BUNDLE = 0x03, GREYBUS_TYPE_CPORT = 0x04, }; enum greybus_protocol { GREYBUS_PROTOCOL_CONTROL = 0x00, /* 0x01 is unused */ GREYBUS_PROTOCOL_GPIO = 0x02, GREYBUS_PROTOCOL_I2C = 0x03, GREYBUS_PROTOCOL_UART = 0x04, GREYBUS_PROTOCOL_HID = 0x05, GREYBUS_PROTOCOL_USB = 0x06, GREYBUS_PROTOCOL_SDIO = 0x07, GREYBUS_PROTOCOL_POWER_SUPPLY = 0x08, GREYBUS_PROTOCOL_PWM = 0x09, /* 0x0a is unused */ GREYBUS_PROTOCOL_SPI = 0x0b, GREYBUS_PROTOCOL_DISPLAY = 0x0c, GREYBUS_PROTOCOL_CAMERA_MGMT = 0x0d, GREYBUS_PROTOCOL_SENSOR = 0x0e, GREYBUS_PROTOCOL_LIGHTS = 0x0f, GREYBUS_PROTOCOL_VIBRATOR = 0x10, GREYBUS_PROTOCOL_LOOPBACK = 0x11, GREYBUS_PROTOCOL_AUDIO_MGMT = 0x12, GREYBUS_PROTOCOL_AUDIO_DATA = 0x13, GREYBUS_PROTOCOL_SVC = 0x14, GREYBUS_PROTOCOL_BOOTROM = 0x15, GREYBUS_PROTOCOL_CAMERA_DATA = 0x16, GREYBUS_PROTOCOL_FW_DOWNLOAD = 0x17, GREYBUS_PROTOCOL_FW_MANAGEMENT = 0x18, GREYBUS_PROTOCOL_AUTHENTICATION = 0x19, GREYBUS_PROTOCOL_LOG = 0x1a, /* ... */ GREYBUS_PROTOCOL_RAW = 0xfe, GREYBUS_PROTOCOL_VENDOR = 0xff, }; enum greybus_class_type { GREYBUS_CLASS_CONTROL = 0x00, /* 0x01 is unused */ /* 0x02 is unused */ /* 0x03 is unused */ /* 0x04 is unused */ GREYBUS_CLASS_HID = 0x05, /* 0x06 is unused */ /* 0x07 is unused */ GREYBUS_CLASS_POWER_SUPPLY = 0x08, /* 0x09 is unused */ GREYBUS_CLASS_BRIDGED_PHY = 0x0a, /* 0x0b is unused */ GREYBUS_CLASS_DISPLAY = 0x0c, GREYBUS_CLASS_CAMERA = 0x0d, GREYBUS_CLASS_SENSOR = 0x0e, GREYBUS_CLASS_LIGHTS = 0x0f, GREYBUS_CLASS_VIBRATOR = 0x10, GREYBUS_CLASS_LOOPBACK = 0x11, GREYBUS_CLASS_AUDIO = 0x12, /* 0x13 is unused */ /* 0x14 is unused */ GREYBUS_CLASS_BOOTROM = 0x15, GREYBUS_CLASS_FW_MANAGEMENT = 0x16, GREYBUS_CLASS_LOG = 0x17, /* ... */ GREYBUS_CLASS_RAW = 0xfe, GREYBUS_CLASS_VENDOR = 0xff, }; enum { GREYBUS_INTERFACE_FEATURE_TIMESYNC = BIT(0), }; /* * The string in a string descriptor is not NUL-terminated. The * size of the descriptor will be rounded up to a multiple of 4 * bytes, by padding the string with 0x00 bytes if necessary. */ struct greybus_descriptor_string { __u8 length; __u8 id; __u8 string[0]; } __packed; /* * An interface descriptor describes information about an interface as a whole, * *not* the functions within it. */ struct greybus_descriptor_interface { __u8 vendor_stringid; __u8 product_stringid; __u8 features; __u8 pad; } __packed; /* * An bundle descriptor defines an identification number and a class for * each bundle. * * @id: Uniquely identifies a bundle within a interface, its sole purpose is to * allow CPort descriptors to specify which bundle they are associated with. * The first bundle will have id 0, second will have 1 and so on. * * The largest CPort id associated with an bundle (defined by a * CPort descriptor in the manifest) is used to determine how to * encode the device id and module number in UniPro packets * that use the bundle. * * @class: It is used by kernel to know the functionality provided by the * bundle and will be matched against drivers functinality while probing greybus * driver. It should contain one of the values defined in * 'enum greybus_class_type'. * */ struct greybus_descriptor_bundle { __u8 id; /* interface-relative id (0..) */ __u8 class; __u8 pad[2]; } __packed; /* * A CPort descriptor indicates the id of the bundle within the * module it's associated with, along with the CPort id used to * address the CPort. The protocol id defines the format of messages * exchanged using the CPort. */ struct greybus_descriptor_cport { __le16 id; __u8 bundle; __u8 protocol_id; /* enum greybus_protocol */ } __packed; struct greybus_descriptor_header { __le16 size; __u8 type; /* enum greybus_descriptor_type */ __u8 pad; } __packed; struct greybus_descriptor { struct greybus_descriptor_header header; union { struct greybus_descriptor_string string; struct greybus_descriptor_interface interface; struct greybus_descriptor_bundle bundle; struct greybus_descriptor_cport cport; }; } __packed; struct greybus_manifest_header { __le16 size; __u8 version_major; __u8 version_minor; } __packed; struct greybus_manifest { struct greybus_manifest_header header; struct greybus_descriptor descriptors[0]; } __packed; #endif /* __GREYBUS_MANIFEST_H */ greybus/greybus_protocols.h 0000644 00000166747 14722070374 0012214 0 ustar 00 /* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ /* * Copyright(c) 2014 - 2015 Google Inc. All rights reserved. * Copyright(c) 2014 - 2015 Linaro Ltd. All rights reserved. */ #ifndef __GREYBUS_PROTOCOLS_H #define __GREYBUS_PROTOCOLS_H #include <linux/types.h> /* Fixed IDs for control/svc protocols */ /* SVC switch-port device ids */ #define GB_SVC_DEVICE_ID_SVC 0 #define GB_SVC_DEVICE_ID_AP 1 #define GB_SVC_DEVICE_ID_MIN 2 #define GB_SVC_DEVICE_ID_MAX 31 #define GB_SVC_CPORT_ID 0 #define GB_CONTROL_BUNDLE_ID 0 #define GB_CONTROL_CPORT_ID 0 /* * All operation messages (both requests and responses) begin with * a header that encodes the size of the message (header included). * This header also contains a unique identifier, that associates a * response message with its operation. The header contains an * operation type field, whose interpretation is dependent on what * type of protocol is used over the connection. The high bit * (0x80) of the operation type field is used to indicate whether * the message is a request (clear) or a response (set). * * Response messages include an additional result byte, which * communicates the result of the corresponding request. A zero * result value means the operation completed successfully. Any * other value indicates an error; in this case, the payload of the * response message (if any) is ignored. The result byte must be * zero in the header for a request message. * * The wire format for all numeric fields in the header is little * endian. Any operation-specific data begins immediately after the * header. */ struct gb_operation_msg_hdr { __le16 size; /* Size in bytes of header + payload */ __le16 operation_id; /* Operation unique id */ __u8 type; /* E.g GB_I2C_TYPE_* or GB_GPIO_TYPE_* */ __u8 result; /* Result of request (in responses only) */ __u8 pad[2]; /* must be zero (ignore when read) */ } __packed; /* Generic request types */ #define GB_REQUEST_TYPE_CPORT_SHUTDOWN 0x00 #define GB_REQUEST_TYPE_INVALID 0x7f struct gb_cport_shutdown_request { __u8 phase; } __packed; /* Control Protocol */ /* Greybus control request types */ #define GB_CONTROL_TYPE_VERSION 0x01 #define GB_CONTROL_TYPE_PROBE_AP 0x02 #define GB_CONTROL_TYPE_GET_MANIFEST_SIZE 0x03 #define GB_CONTROL_TYPE_GET_MANIFEST 0x04 #define GB_CONTROL_TYPE_CONNECTED 0x05 #define GB_CONTROL_TYPE_DISCONNECTED 0x06 #define GB_CONTROL_TYPE_TIMESYNC_ENABLE 0x07 #define GB_CONTROL_TYPE_TIMESYNC_DISABLE 0x08 #define GB_CONTROL_TYPE_TIMESYNC_AUTHORITATIVE 0x09 /* Unused 0x0a */ #define GB_CONTROL_TYPE_BUNDLE_VERSION 0x0b #define GB_CONTROL_TYPE_DISCONNECTING 0x0c #define GB_CONTROL_TYPE_TIMESYNC_GET_LAST_EVENT 0x0d #define GB_CONTROL_TYPE_MODE_SWITCH 0x0e #define GB_CONTROL_TYPE_BUNDLE_SUSPEND 0x0f #define GB_CONTROL_TYPE_BUNDLE_RESUME 0x10 #define GB_CONTROL_TYPE_BUNDLE_DEACTIVATE 0x11 #define GB_CONTROL_TYPE_BUNDLE_ACTIVATE 0x12 #define GB_CONTROL_TYPE_INTF_SUSPEND_PREPARE 0x13 #define GB_CONTROL_TYPE_INTF_DEACTIVATE_PREPARE 0x14 #define GB_CONTROL_TYPE_INTF_HIBERNATE_ABORT 0x15 struct gb_control_version_request { __u8 major; __u8 minor; } __packed; struct gb_control_version_response { __u8 major; __u8 minor; } __packed; struct gb_control_bundle_version_request { __u8 bundle_id; } __packed; struct gb_control_bundle_version_response { __u8 major; __u8 minor; } __packed; /* Control protocol manifest get size request has no payload*/ struct gb_control_get_manifest_size_response { __le16 size; } __packed; /* Control protocol manifest get request has no payload */ struct gb_control_get_manifest_response { __u8 data[0]; } __packed; /* Control protocol [dis]connected request */ struct gb_control_connected_request { __le16 cport_id; } __packed; struct gb_control_disconnecting_request { __le16 cport_id; } __packed; /* disconnecting response has no payload */ struct gb_control_disconnected_request { __le16 cport_id; } __packed; /* Control protocol [dis]connected response has no payload */ /* * All Bundle power management operations use the same request and response * layout and status codes. */ #define GB_CONTROL_BUNDLE_PM_OK 0x00 #define GB_CONTROL_BUNDLE_PM_INVAL 0x01 #define GB_CONTROL_BUNDLE_PM_BUSY 0x02 #define GB_CONTROL_BUNDLE_PM_FAIL 0x03 #define GB_CONTROL_BUNDLE_PM_NA 0x04 struct gb_control_bundle_pm_request { __u8 bundle_id; } __packed; struct gb_control_bundle_pm_response { __u8 status; } __packed; /* * Interface Suspend Prepare and Deactivate Prepare operations use the same * response layout and error codes. Define a single response structure and reuse * it. Both operations have no payload. */ #define GB_CONTROL_INTF_PM_OK 0x00 #define GB_CONTROL_INTF_PM_BUSY 0x01 #define GB_CONTROL_INTF_PM_NA 0x02 struct gb_control_intf_pm_response { __u8 status; } __packed; /* APBridge protocol */ /* request APB1 log */ #define GB_APB_REQUEST_LOG 0x02 /* request to map a cport to bulk in and bulk out endpoints */ #define GB_APB_REQUEST_EP_MAPPING 0x03 /* request to get the number of cports available */ #define GB_APB_REQUEST_CPORT_COUNT 0x04 /* request to reset a cport state */ #define GB_APB_REQUEST_RESET_CPORT 0x05 /* request to time the latency of messages on a given cport */ #define GB_APB_REQUEST_LATENCY_TAG_EN 0x06 #define GB_APB_REQUEST_LATENCY_TAG_DIS 0x07 /* request to control the CSI transmitter */ #define GB_APB_REQUEST_CSI_TX_CONTROL 0x08 /* request to control audio streaming */ #define GB_APB_REQUEST_AUDIO_CONTROL 0x09 /* TimeSync requests */ #define GB_APB_REQUEST_TIMESYNC_ENABLE 0x0d #define GB_APB_REQUEST_TIMESYNC_DISABLE 0x0e #define GB_APB_REQUEST_TIMESYNC_AUTHORITATIVE 0x0f #define GB_APB_REQUEST_TIMESYNC_GET_LAST_EVENT 0x10 /* requests to set Greybus CPort flags */ #define GB_APB_REQUEST_CPORT_FLAGS 0x11 /* ARPC request */ #define GB_APB_REQUEST_ARPC_RUN 0x12 struct gb_apb_request_cport_flags { __le32 flags; #define GB_APB_CPORT_FLAG_CONTROL 0x01 #define GB_APB_CPORT_FLAG_HIGH_PRIO 0x02 } __packed; /* Firmware Download Protocol */ /* Request Types */ #define GB_FW_DOWNLOAD_TYPE_FIND_FIRMWARE 0x01 #define GB_FW_DOWNLOAD_TYPE_FETCH_FIRMWARE 0x02 #define GB_FW_DOWNLOAD_TYPE_RELEASE_FIRMWARE 0x03 #define GB_FIRMWARE_TAG_MAX_SIZE 10 /* firmware download find firmware request/response */ struct gb_fw_download_find_firmware_request { __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE]; } __packed; struct gb_fw_download_find_firmware_response { __u8 firmware_id; __le32 size; } __packed; /* firmware download fetch firmware request/response */ struct gb_fw_download_fetch_firmware_request { __u8 firmware_id; __le32 offset; __le32 size; } __packed; struct gb_fw_download_fetch_firmware_response { __u8 data[0]; } __packed; /* firmware download release firmware request */ struct gb_fw_download_release_firmware_request { __u8 firmware_id; } __packed; /* firmware download release firmware response has no payload */ /* Firmware Management Protocol */ /* Request Types */ #define GB_FW_MGMT_TYPE_INTERFACE_FW_VERSION 0x01 #define GB_FW_MGMT_TYPE_LOAD_AND_VALIDATE_FW 0x02 #define GB_FW_MGMT_TYPE_LOADED_FW 0x03 #define GB_FW_MGMT_TYPE_BACKEND_FW_VERSION 0x04 #define GB_FW_MGMT_TYPE_BACKEND_FW_UPDATE 0x05 #define GB_FW_MGMT_TYPE_BACKEND_FW_UPDATED 0x06 #define GB_FW_LOAD_METHOD_UNIPRO 0x01 #define GB_FW_LOAD_METHOD_INTERNAL 0x02 #define GB_FW_LOAD_STATUS_FAILED 0x00 #define GB_FW_LOAD_STATUS_UNVALIDATED 0x01 #define GB_FW_LOAD_STATUS_VALIDATED 0x02 #define GB_FW_LOAD_STATUS_VALIDATION_FAILED 0x03 #define GB_FW_BACKEND_FW_STATUS_SUCCESS 0x01 #define GB_FW_BACKEND_FW_STATUS_FAIL_FIND 0x02 #define GB_FW_BACKEND_FW_STATUS_FAIL_FETCH 0x03 #define GB_FW_BACKEND_FW_STATUS_FAIL_WRITE 0x04 #define GB_FW_BACKEND_FW_STATUS_INT 0x05 #define GB_FW_BACKEND_FW_STATUS_RETRY 0x06 #define GB_FW_BACKEND_FW_STATUS_NOT_SUPPORTED 0x07 #define GB_FW_BACKEND_VERSION_STATUS_SUCCESS 0x01 #define GB_FW_BACKEND_VERSION_STATUS_NOT_AVAILABLE 0x02 #define GB_FW_BACKEND_VERSION_STATUS_NOT_SUPPORTED 0x03 #define GB_FW_BACKEND_VERSION_STATUS_RETRY 0x04 #define GB_FW_BACKEND_VERSION_STATUS_FAIL_INT 0x05 /* firmware management interface firmware version request has no payload */ struct gb_fw_mgmt_interface_fw_version_response { __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE]; __le16 major; __le16 minor; } __packed; /* firmware management load and validate firmware request/response */ struct gb_fw_mgmt_load_and_validate_fw_request { __u8 request_id; __u8 load_method; __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE]; } __packed; /* firmware management load and validate firmware response has no payload*/ /* firmware management loaded firmware request */ struct gb_fw_mgmt_loaded_fw_request { __u8 request_id; __u8 status; __le16 major; __le16 minor; } __packed; /* firmware management loaded firmware response has no payload */ /* firmware management backend firmware version request/response */ struct gb_fw_mgmt_backend_fw_version_request { __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE]; } __packed; struct gb_fw_mgmt_backend_fw_version_response { __le16 major; __le16 minor; __u8 status; } __packed; /* firmware management backend firmware update request */ struct gb_fw_mgmt_backend_fw_update_request { __u8 request_id; __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE]; } __packed; /* firmware management backend firmware update response has no payload */ /* firmware management backend firmware updated request */ struct gb_fw_mgmt_backend_fw_updated_request { __u8 request_id; __u8 status; } __packed; /* firmware management backend firmware updated response has no payload */ /* Component Authentication Protocol (CAP) */ /* Request Types */ #define GB_CAP_TYPE_GET_ENDPOINT_UID 0x01 #define GB_CAP_TYPE_GET_IMS_CERTIFICATE 0x02 #define GB_CAP_TYPE_AUTHENTICATE 0x03 /* CAP get endpoint uid request has no payload */ struct gb_cap_get_endpoint_uid_response { __u8 uid[8]; } __packed; /* CAP get endpoint ims certificate request/response */ struct gb_cap_get_ims_certificate_request { __le32 certificate_class; __le32 certificate_id; } __packed; struct gb_cap_get_ims_certificate_response { __u8 result_code; __u8 certificate[0]; } __packed; /* CAP authenticate request/response */ struct gb_cap_authenticate_request { __le32 auth_type; __u8 uid[8]; __u8 challenge[32]; } __packed; struct gb_cap_authenticate_response { __u8 result_code; __u8 response[64]; __u8 signature[0]; } __packed; /* Bootrom Protocol */ /* Version of the Greybus bootrom protocol we support */ #define GB_BOOTROM_VERSION_MAJOR 0x00 #define GB_BOOTROM_VERSION_MINOR 0x01 /* Greybus bootrom request types */ #define GB_BOOTROM_TYPE_VERSION 0x01 #define GB_BOOTROM_TYPE_FIRMWARE_SIZE 0x02 #define GB_BOOTROM_TYPE_GET_FIRMWARE 0x03 #define GB_BOOTROM_TYPE_READY_TO_BOOT 0x04 #define GB_BOOTROM_TYPE_AP_READY 0x05 /* Request with no-payload */ #define GB_BOOTROM_TYPE_GET_VID_PID 0x06 /* Request with no-payload */ /* Greybus bootrom boot stages */ #define GB_BOOTROM_BOOT_STAGE_ONE 0x01 /* Reserved for the boot ROM */ #define GB_BOOTROM_BOOT_STAGE_TWO 0x02 /* Bootrom package to be loaded by the boot ROM */ #define GB_BOOTROM_BOOT_STAGE_THREE 0x03 /* Module personality package loaded by Stage 2 firmware */ /* Greybus bootrom ready to boot status */ #define GB_BOOTROM_BOOT_STATUS_INVALID 0x00 /* Firmware blob could not be validated */ #define GB_BOOTROM_BOOT_STATUS_INSECURE 0x01 /* Firmware blob is valid but insecure */ #define GB_BOOTROM_BOOT_STATUS_SECURE 0x02 /* Firmware blob is valid and secure */ /* Max bootrom data fetch size in bytes */ #define GB_BOOTROM_FETCH_MAX 2000 struct gb_bootrom_version_request { __u8 major; __u8 minor; } __packed; struct gb_bootrom_version_response { __u8 major; __u8 minor; } __packed; /* Bootrom protocol firmware size request/response */ struct gb_bootrom_firmware_size_request { __u8 stage; } __packed; struct gb_bootrom_firmware_size_response { __le32 size; } __packed; /* Bootrom protocol get firmware request/response */ struct gb_bootrom_get_firmware_request { __le32 offset; __le32 size; } __packed; struct gb_bootrom_get_firmware_response { __u8 data[0]; } __packed; /* Bootrom protocol Ready to boot request */ struct gb_bootrom_ready_to_boot_request { __u8 status; } __packed; /* Bootrom protocol Ready to boot response has no payload */ /* Bootrom protocol get VID/PID request has no payload */ struct gb_bootrom_get_vid_pid_response { __le32 vendor_id; __le32 product_id; } __packed; /* Power Supply */ /* Greybus power supply request types */ #define GB_POWER_SUPPLY_TYPE_GET_SUPPLIES 0x02 #define GB_POWER_SUPPLY_TYPE_GET_DESCRIPTION 0x03 #define GB_POWER_SUPPLY_TYPE_GET_PROP_DESCRIPTORS 0x04 #define GB_POWER_SUPPLY_TYPE_GET_PROPERTY 0x05 #define GB_POWER_SUPPLY_TYPE_SET_PROPERTY 0x06 #define GB_POWER_SUPPLY_TYPE_EVENT 0x07 /* Greybus power supply battery technologies types */ #define GB_POWER_SUPPLY_TECH_UNKNOWN 0x0000 #define GB_POWER_SUPPLY_TECH_NiMH 0x0001 #define GB_POWER_SUPPLY_TECH_LION 0x0002 #define GB_POWER_SUPPLY_TECH_LIPO 0x0003 #define GB_POWER_SUPPLY_TECH_LiFe 0x0004 #define GB_POWER_SUPPLY_TECH_NiCd 0x0005 #define GB_POWER_SUPPLY_TECH_LiMn 0x0006 /* Greybus power supply types */ #define GB_POWER_SUPPLY_UNKNOWN_TYPE 0x0000 #define GB_POWER_SUPPLY_BATTERY_TYPE 0x0001 #define GB_POWER_SUPPLY_UPS_TYPE 0x0002 #define GB_POWER_SUPPLY_MAINS_TYPE 0x0003 #define GB_POWER_SUPPLY_USB_TYPE 0x0004 #define GB_POWER_SUPPLY_USB_DCP_TYPE 0x0005 #define GB_POWER_SUPPLY_USB_CDP_TYPE 0x0006 #define GB_POWER_SUPPLY_USB_ACA_TYPE 0x0007 /* Greybus power supply health values */ #define GB_POWER_SUPPLY_HEALTH_UNKNOWN 0x0000 #define GB_POWER_SUPPLY_HEALTH_GOOD 0x0001 #define GB_POWER_SUPPLY_HEALTH_OVERHEAT 0x0002 #define GB_POWER_SUPPLY_HEALTH_DEAD 0x0003 #define GB_POWER_SUPPLY_HEALTH_OVERVOLTAGE 0x0004 #define GB_POWER_SUPPLY_HEALTH_UNSPEC_FAILURE 0x0005 #define GB_POWER_SUPPLY_HEALTH_COLD 0x0006 #define GB_POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE 0x0007 #define GB_POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE 0x0008 /* Greybus power supply status values */ #define GB_POWER_SUPPLY_STATUS_UNKNOWN 0x0000 #define GB_POWER_SUPPLY_STATUS_CHARGING 0x0001 #define GB_POWER_SUPPLY_STATUS_DISCHARGING 0x0002 #define GB_POWER_SUPPLY_STATUS_NOT_CHARGING 0x0003 #define GB_POWER_SUPPLY_STATUS_FULL 0x0004 /* Greybus power supply capacity level values */ #define GB_POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN 0x0000 #define GB_POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL 0x0001 #define GB_POWER_SUPPLY_CAPACITY_LEVEL_LOW 0x0002 #define GB_POWER_SUPPLY_CAPACITY_LEVEL_NORMAL 0x0003 #define GB_POWER_SUPPLY_CAPACITY_LEVEL_HIGH 0x0004 #define GB_POWER_SUPPLY_CAPACITY_LEVEL_FULL 0x0005 /* Greybus power supply scope values */ #define GB_POWER_SUPPLY_SCOPE_UNKNOWN 0x0000 #define GB_POWER_SUPPLY_SCOPE_SYSTEM 0x0001 #define GB_POWER_SUPPLY_SCOPE_DEVICE 0x0002 struct gb_power_supply_get_supplies_response { __u8 supplies_count; } __packed; struct gb_power_supply_get_description_request { __u8 psy_id; } __packed; struct gb_power_supply_get_description_response { __u8 manufacturer[32]; __u8 model[32]; __u8 serial_number[32]; __le16 type; __u8 properties_count; } __packed; struct gb_power_supply_props_desc { __u8 property; #define GB_POWER_SUPPLY_PROP_STATUS 0x00 #define GB_POWER_SUPPLY_PROP_CHARGE_TYPE 0x01 #define GB_POWER_SUPPLY_PROP_HEALTH 0x02 #define GB_POWER_SUPPLY_PROP_PRESENT 0x03 #define GB_POWER_SUPPLY_PROP_ONLINE 0x04 #define GB_POWER_SUPPLY_PROP_AUTHENTIC 0x05 #define GB_POWER_SUPPLY_PROP_TECHNOLOGY 0x06 #define GB_POWER_SUPPLY_PROP_CYCLE_COUNT 0x07 #define GB_POWER_SUPPLY_PROP_VOLTAGE_MAX 0x08 #define GB_POWER_SUPPLY_PROP_VOLTAGE_MIN 0x09 #define GB_POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN 0x0A #define GB_POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN 0x0B #define GB_POWER_SUPPLY_PROP_VOLTAGE_NOW 0x0C #define GB_POWER_SUPPLY_PROP_VOLTAGE_AVG 0x0D #define GB_POWER_SUPPLY_PROP_VOLTAGE_OCV 0x0E #define GB_POWER_SUPPLY_PROP_VOLTAGE_BOOT 0x0F #define GB_POWER_SUPPLY_PROP_CURRENT_MAX 0x10 #define GB_POWER_SUPPLY_PROP_CURRENT_NOW 0x11 #define GB_POWER_SUPPLY_PROP_CURRENT_AVG 0x12 #define GB_POWER_SUPPLY_PROP_CURRENT_BOOT 0x13 #define GB_POWER_SUPPLY_PROP_POWER_NOW 0x14 #define GB_POWER_SUPPLY_PROP_POWER_AVG 0x15 #define GB_POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN 0x16 #define GB_POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN 0x17 #define GB_POWER_SUPPLY_PROP_CHARGE_FULL 0x18 #define GB_POWER_SUPPLY_PROP_CHARGE_EMPTY 0x19 #define GB_POWER_SUPPLY_PROP_CHARGE_NOW 0x1A #define GB_POWER_SUPPLY_PROP_CHARGE_AVG 0x1B #define GB_POWER_SUPPLY_PROP_CHARGE_COUNTER 0x1C #define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT 0x1D #define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX 0x1E #define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE 0x1F #define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX 0x20 #define GB_POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT 0x21 #define GB_POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX 0x22 #define GB_POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT 0x23 #define GB_POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN 0x24 #define GB_POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN 0x25 #define GB_POWER_SUPPLY_PROP_ENERGY_FULL 0x26 #define GB_POWER_SUPPLY_PROP_ENERGY_EMPTY 0x27 #define GB_POWER_SUPPLY_PROP_ENERGY_NOW 0x28 #define GB_POWER_SUPPLY_PROP_ENERGY_AVG 0x29 #define GB_POWER_SUPPLY_PROP_CAPACITY 0x2A #define GB_POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN 0x2B #define GB_POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX 0x2C #define GB_POWER_SUPPLY_PROP_CAPACITY_LEVEL 0x2D #define GB_POWER_SUPPLY_PROP_TEMP 0x2E #define GB_POWER_SUPPLY_PROP_TEMP_MAX 0x2F #define GB_POWER_SUPPLY_PROP_TEMP_MIN 0x30 #define GB_POWER_SUPPLY_PROP_TEMP_ALERT_MIN 0x31 #define GB_POWER_SUPPLY_PROP_TEMP_ALERT_MAX 0x32 #define GB_POWER_SUPPLY_PROP_TEMP_AMBIENT 0x33 #define GB_POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN 0x34 #define GB_POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX 0x35 #define GB_POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW 0x36 #define GB_POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG 0x37 #define GB_POWER_SUPPLY_PROP_TIME_TO_FULL_NOW 0x38 #define GB_POWER_SUPPLY_PROP_TIME_TO_FULL_AVG 0x39 #define GB_POWER_SUPPLY_PROP_TYPE 0x3A #define GB_POWER_SUPPLY_PROP_SCOPE 0x3B #define GB_POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT 0x3C #define GB_POWER_SUPPLY_PROP_CALIBRATE 0x3D __u8 is_writeable; } __packed; struct gb_power_supply_get_property_descriptors_request { __u8 psy_id; } __packed; struct gb_power_supply_get_property_descriptors_response { __u8 properties_count; struct gb_power_supply_props_desc props[]; } __packed; struct gb_power_supply_get_property_request { __u8 psy_id; __u8 property; } __packed; struct gb_power_supply_get_property_response { __le32 prop_val; }; struct gb_power_supply_set_property_request { __u8 psy_id; __u8 property; __le32 prop_val; } __packed; struct gb_power_supply_event_request { __u8 psy_id; __u8 event; #define GB_POWER_SUPPLY_UPDATE 0x01 } __packed; /* HID */ /* Greybus HID operation types */ #define GB_HID_TYPE_GET_DESC 0x02 #define GB_HID_TYPE_GET_REPORT_DESC 0x03 #define GB_HID_TYPE_PWR_ON 0x04 #define GB_HID_TYPE_PWR_OFF 0x05 #define GB_HID_TYPE_GET_REPORT 0x06 #define GB_HID_TYPE_SET_REPORT 0x07 #define GB_HID_TYPE_IRQ_EVENT 0x08 /* Report type */ #define GB_HID_INPUT_REPORT 0 #define GB_HID_OUTPUT_REPORT 1 #define GB_HID_FEATURE_REPORT 2 /* Different request/response structures */ /* HID get descriptor response */ struct gb_hid_desc_response { __u8 bLength; __le16 wReportDescLength; __le16 bcdHID; __le16 wProductID; __le16 wVendorID; __u8 bCountryCode; } __packed; /* HID get report request/response */ struct gb_hid_get_report_request { __u8 report_type; __u8 report_id; } __packed; /* HID set report request */ struct gb_hid_set_report_request { __u8 report_type; __u8 report_id; __u8 report[0]; } __packed; /* HID input report request, via interrupt pipe */ struct gb_hid_input_report_request { __u8 report[0]; } __packed; /* I2C */ /* Greybus i2c request types */ #define GB_I2C_TYPE_FUNCTIONALITY 0x02 #define GB_I2C_TYPE_TRANSFER 0x05 /* functionality request has no payload */ struct gb_i2c_functionality_response { __le32 functionality; } __packed; /* * Outgoing data immediately follows the op count and ops array. * The data for each write (master -> slave) op in the array is sent * in order, with no (e.g. pad) bytes separating them. * * Short reads cause the entire transfer request to fail So response * payload consists only of bytes read, and the number of bytes is * exactly what was specified in the corresponding op. Like * outgoing data, the incoming data is in order and contiguous. */ struct gb_i2c_transfer_op { __le16 addr; __le16 flags; __le16 size; } __packed; struct gb_i2c_transfer_request { __le16 op_count; struct gb_i2c_transfer_op ops[0]; /* op_count of these */ } __packed; struct gb_i2c_transfer_response { __u8 data[0]; /* inbound data */ } __packed; /* GPIO */ /* Greybus GPIO request types */ #define GB_GPIO_TYPE_LINE_COUNT 0x02 #define GB_GPIO_TYPE_ACTIVATE 0x03 #define GB_GPIO_TYPE_DEACTIVATE 0x04 #define GB_GPIO_TYPE_GET_DIRECTION 0x05 #define GB_GPIO_TYPE_DIRECTION_IN 0x06 #define GB_GPIO_TYPE_DIRECTION_OUT 0x07 #define GB_GPIO_TYPE_GET_VALUE 0x08 #define GB_GPIO_TYPE_SET_VALUE 0x09 #define GB_GPIO_TYPE_SET_DEBOUNCE 0x0a #define GB_GPIO_TYPE_IRQ_TYPE 0x0b #define GB_GPIO_TYPE_IRQ_MASK 0x0c #define GB_GPIO_TYPE_IRQ_UNMASK 0x0d #define GB_GPIO_TYPE_IRQ_EVENT 0x0e #define GB_GPIO_IRQ_TYPE_NONE 0x00 #define GB_GPIO_IRQ_TYPE_EDGE_RISING 0x01 #define GB_GPIO_IRQ_TYPE_EDGE_FALLING 0x02 #define GB_GPIO_IRQ_TYPE_EDGE_BOTH 0x03 #define GB_GPIO_IRQ_TYPE_LEVEL_HIGH 0x04 #define GB_GPIO_IRQ_TYPE_LEVEL_LOW 0x08 /* line count request has no payload */ struct gb_gpio_line_count_response { __u8 count; } __packed; struct gb_gpio_activate_request { __u8 which; } __packed; /* activate response has no payload */ struct gb_gpio_deactivate_request { __u8 which; } __packed; /* deactivate response has no payload */ struct gb_gpio_get_direction_request { __u8 which; } __packed; struct gb_gpio_get_direction_response { __u8 direction; } __packed; struct gb_gpio_direction_in_request { __u8 which; } __packed; /* direction in response has no payload */ struct gb_gpio_direction_out_request { __u8 which; __u8 value; } __packed; /* direction out response has no payload */ struct gb_gpio_get_value_request { __u8 which; } __packed; struct gb_gpio_get_value_response { __u8 value; } __packed; struct gb_gpio_set_value_request { __u8 which; __u8 value; } __packed; /* set value response has no payload */ struct gb_gpio_set_debounce_request { __u8 which; __le16 usec; } __packed; /* debounce response has no payload */ struct gb_gpio_irq_type_request { __u8 which; __u8 type; } __packed; /* irq type response has no payload */ struct gb_gpio_irq_mask_request { __u8 which; } __packed; /* irq mask response has no payload */ struct gb_gpio_irq_unmask_request { __u8 which; } __packed; /* irq unmask response has no payload */ /* irq event requests originate on another module and are handled on the AP */ struct gb_gpio_irq_event_request { __u8 which; } __packed; /* irq event has no response */ /* PWM */ /* Greybus PWM operation types */ #define GB_PWM_TYPE_PWM_COUNT 0x02 #define GB_PWM_TYPE_ACTIVATE 0x03 #define GB_PWM_TYPE_DEACTIVATE 0x04 #define GB_PWM_TYPE_CONFIG 0x05 #define GB_PWM_TYPE_POLARITY 0x06 #define GB_PWM_TYPE_ENABLE 0x07 #define GB_PWM_TYPE_DISABLE 0x08 /* pwm count request has no payload */ struct gb_pwm_count_response { __u8 count; } __packed; struct gb_pwm_activate_request { __u8 which; } __packed; struct gb_pwm_deactivate_request { __u8 which; } __packed; struct gb_pwm_config_request { __u8 which; __le32 duty; __le32 period; } __packed; struct gb_pwm_polarity_request { __u8 which; __u8 polarity; } __packed; struct gb_pwm_enable_request { __u8 which; } __packed; struct gb_pwm_disable_request { __u8 which; } __packed; /* SPI */ /* Should match up with modes in linux/spi/spi.h */ #define GB_SPI_MODE_CPHA 0x01 /* clock phase */ #define GB_SPI_MODE_CPOL 0x02 /* clock polarity */ #define GB_SPI_MODE_MODE_0 (0 | 0) /* (original MicroWire) */ #define GB_SPI_MODE_MODE_1 (0 | GB_SPI_MODE_CPHA) #define GB_SPI_MODE_MODE_2 (GB_SPI_MODE_CPOL | 0) #define GB_SPI_MODE_MODE_3 (GB_SPI_MODE_CPOL | GB_SPI_MODE_CPHA) #define GB_SPI_MODE_CS_HIGH 0x04 /* chipselect active high? */ #define GB_SPI_MODE_LSB_FIRST 0x08 /* per-word bits-on-wire */ #define GB_SPI_MODE_3WIRE 0x10 /* SI/SO signals shared */ #define GB_SPI_MODE_LOOP 0x20 /* loopback mode */ #define GB_SPI_MODE_NO_CS 0x40 /* 1 dev/bus, no chipselect */ #define GB_SPI_MODE_READY 0x80 /* slave pulls low to pause */ /* Should match up with flags in linux/spi/spi.h */ #define GB_SPI_FLAG_HALF_DUPLEX BIT(0) /* can't do full duplex */ #define GB_SPI_FLAG_NO_RX BIT(1) /* can't do buffer read */ #define GB_SPI_FLAG_NO_TX BIT(2) /* can't do buffer write */ /* Greybus spi operation types */ #define GB_SPI_TYPE_MASTER_CONFIG 0x02 #define GB_SPI_TYPE_DEVICE_CONFIG 0x03 #define GB_SPI_TYPE_TRANSFER 0x04 /* mode request has no payload */ struct gb_spi_master_config_response { __le32 bits_per_word_mask; __le32 min_speed_hz; __le32 max_speed_hz; __le16 mode; __le16 flags; __u8 num_chipselect; } __packed; struct gb_spi_device_config_request { __u8 chip_select; } __packed; struct gb_spi_device_config_response { __le16 mode; __u8 bits_per_word; __le32 max_speed_hz; __u8 device_type; #define GB_SPI_SPI_DEV 0x00 #define GB_SPI_SPI_NOR 0x01 #define GB_SPI_SPI_MODALIAS 0x02 __u8 name[32]; } __packed; /** * struct gb_spi_transfer - a read/write buffer pair * @speed_hz: Select a speed other than the device default for this transfer. If * 0 the default (from @spi_device) is used. * @len: size of rx and tx buffers (in bytes) * @delay_usecs: microseconds to delay after this transfer before (optionally) * changing the chipselect status, then starting the next transfer or * completing this spi_message. * @cs_change: affects chipselect after this transfer completes * @bits_per_word: select a bits_per_word other than the device default for this * transfer. If 0 the default (from @spi_device) is used. */ struct gb_spi_transfer { __le32 speed_hz; __le32 len; __le16 delay_usecs; __u8 cs_change; __u8 bits_per_word; __u8 xfer_flags; #define GB_SPI_XFER_READ 0x01 #define GB_SPI_XFER_WRITE 0x02 #define GB_SPI_XFER_INPROGRESS 0x04 } __packed; struct gb_spi_transfer_request { __u8 chip_select; /* of the spi device */ __u8 mode; /* of the spi device */ __le16 count; struct gb_spi_transfer transfers[0]; /* count of these */ } __packed; struct gb_spi_transfer_response { __u8 data[0]; /* inbound data */ } __packed; /* Version of the Greybus SVC protocol we support */ #define GB_SVC_VERSION_MAJOR 0x00 #define GB_SVC_VERSION_MINOR 0x01 /* Greybus SVC request types */ #define GB_SVC_TYPE_PROTOCOL_VERSION 0x01 #define GB_SVC_TYPE_SVC_HELLO 0x02 #define GB_SVC_TYPE_INTF_DEVICE_ID 0x03 #define GB_SVC_TYPE_INTF_RESET 0x06 #define GB_SVC_TYPE_CONN_CREATE 0x07 #define GB_SVC_TYPE_CONN_DESTROY 0x08 #define GB_SVC_TYPE_DME_PEER_GET 0x09 #define GB_SVC_TYPE_DME_PEER_SET 0x0a #define GB_SVC_TYPE_ROUTE_CREATE 0x0b #define GB_SVC_TYPE_ROUTE_DESTROY 0x0c #define GB_SVC_TYPE_TIMESYNC_ENABLE 0x0d #define GB_SVC_TYPE_TIMESYNC_DISABLE 0x0e #define GB_SVC_TYPE_TIMESYNC_AUTHORITATIVE 0x0f #define GB_SVC_TYPE_INTF_SET_PWRM 0x10 #define GB_SVC_TYPE_INTF_EJECT 0x11 #define GB_SVC_TYPE_PING 0x13 #define GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET 0x14 #define GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET 0x15 #define GB_SVC_TYPE_PWRMON_SAMPLE_GET 0x16 #define GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET 0x17 #define GB_SVC_TYPE_TIMESYNC_WAKE_PINS_ACQUIRE 0x18 #define GB_SVC_TYPE_TIMESYNC_WAKE_PINS_RELEASE 0x19 #define GB_SVC_TYPE_TIMESYNC_PING 0x1a #define GB_SVC_TYPE_MODULE_INSERTED 0x1f #define GB_SVC_TYPE_MODULE_REMOVED 0x20 #define GB_SVC_TYPE_INTF_VSYS_ENABLE 0x21 #define GB_SVC_TYPE_INTF_VSYS_DISABLE 0x22 #define GB_SVC_TYPE_INTF_REFCLK_ENABLE 0x23 #define GB_SVC_TYPE_INTF_REFCLK_DISABLE 0x24 #define GB_SVC_TYPE_INTF_UNIPRO_ENABLE 0x25 #define GB_SVC_TYPE_INTF_UNIPRO_DISABLE 0x26 #define GB_SVC_TYPE_INTF_ACTIVATE 0x27 #define GB_SVC_TYPE_INTF_RESUME 0x28 #define GB_SVC_TYPE_INTF_MAILBOX_EVENT 0x29 #define GB_SVC_TYPE_INTF_OOPS 0x2a /* Greybus SVC protocol status values */ #define GB_SVC_OP_SUCCESS 0x00 #define GB_SVC_OP_UNKNOWN_ERROR 0x01 #define GB_SVC_INTF_NOT_DETECTED 0x02 #define GB_SVC_INTF_NO_UPRO_LINK 0x03 #define GB_SVC_INTF_UPRO_NOT_DOWN 0x04 #define GB_SVC_INTF_UPRO_NOT_HIBERNATED 0x05 #define GB_SVC_INTF_NO_V_SYS 0x06 #define GB_SVC_INTF_V_CHG 0x07 #define GB_SVC_INTF_WAKE_BUSY 0x08 #define GB_SVC_INTF_NO_REFCLK 0x09 #define GB_SVC_INTF_RELEASING 0x0a #define GB_SVC_INTF_NO_ORDER 0x0b #define GB_SVC_INTF_MBOX_SET 0x0c #define GB_SVC_INTF_BAD_MBOX 0x0d #define GB_SVC_INTF_OP_TIMEOUT 0x0e #define GB_SVC_PWRMON_OP_NOT_PRESENT 0x0f struct gb_svc_version_request { __u8 major; __u8 minor; } __packed; struct gb_svc_version_response { __u8 major; __u8 minor; } __packed; /* SVC protocol hello request */ struct gb_svc_hello_request { __le16 endo_id; __u8 interface_id; } __packed; /* hello response has no payload */ struct gb_svc_intf_device_id_request { __u8 intf_id; __u8 device_id; } __packed; /* device id response has no payload */ struct gb_svc_intf_reset_request { __u8 intf_id; } __packed; /* interface reset response has no payload */ struct gb_svc_intf_eject_request { __u8 intf_id; } __packed; /* interface eject response has no payload */ struct gb_svc_conn_create_request { __u8 intf1_id; __le16 cport1_id; __u8 intf2_id; __le16 cport2_id; __u8 tc; __u8 flags; } __packed; /* connection create response has no payload */ struct gb_svc_conn_destroy_request { __u8 intf1_id; __le16 cport1_id; __u8 intf2_id; __le16 cport2_id; } __packed; /* connection destroy response has no payload */ struct gb_svc_dme_peer_get_request { __u8 intf_id; __le16 attr; __le16 selector; } __packed; struct gb_svc_dme_peer_get_response { __le16 result_code; __le32 attr_value; } __packed; struct gb_svc_dme_peer_set_request { __u8 intf_id; __le16 attr; __le16 selector; __le32 value; } __packed; struct gb_svc_dme_peer_set_response { __le16 result_code; } __packed; /* Greybus init-status values, currently retrieved using DME peer gets. */ #define GB_INIT_SPI_BOOT_STARTED 0x02 #define GB_INIT_TRUSTED_SPI_BOOT_FINISHED 0x03 #define GB_INIT_UNTRUSTED_SPI_BOOT_FINISHED 0x04 #define GB_INIT_BOOTROM_UNIPRO_BOOT_STARTED 0x06 #define GB_INIT_BOOTROM_FALLBACK_UNIPRO_BOOT_STARTED 0x09 #define GB_INIT_S2_LOADER_BOOT_STARTED 0x0D struct gb_svc_route_create_request { __u8 intf1_id; __u8 dev1_id; __u8 intf2_id; __u8 dev2_id; } __packed; /* route create response has no payload */ struct gb_svc_route_destroy_request { __u8 intf1_id; __u8 intf2_id; } __packed; /* route destroy response has no payload */ /* used for svc_intf_vsys_{enable,disable} */ struct gb_svc_intf_vsys_request { __u8 intf_id; } __packed; struct gb_svc_intf_vsys_response { __u8 result_code; #define GB_SVC_INTF_VSYS_OK 0x00 /* 0x01 is reserved */ #define GB_SVC_INTF_VSYS_FAIL 0x02 } __packed; /* used for svc_intf_refclk_{enable,disable} */ struct gb_svc_intf_refclk_request { __u8 intf_id; } __packed; struct gb_svc_intf_refclk_response { __u8 result_code; #define GB_SVC_INTF_REFCLK_OK 0x00 /* 0x01 is reserved */ #define GB_SVC_INTF_REFCLK_FAIL 0x02 } __packed; /* used for svc_intf_unipro_{enable,disable} */ struct gb_svc_intf_unipro_request { __u8 intf_id; } __packed; struct gb_svc_intf_unipro_response { __u8 result_code; #define GB_SVC_INTF_UNIPRO_OK 0x00 /* 0x01 is reserved */ #define GB_SVC_INTF_UNIPRO_FAIL 0x02 #define GB_SVC_INTF_UNIPRO_NOT_OFF 0x03 } __packed; #define GB_SVC_UNIPRO_FAST_MODE 0x01 #define GB_SVC_UNIPRO_SLOW_MODE 0x02 #define GB_SVC_UNIPRO_FAST_AUTO_MODE 0x04 #define GB_SVC_UNIPRO_SLOW_AUTO_MODE 0x05 #define GB_SVC_UNIPRO_MODE_UNCHANGED 0x07 #define GB_SVC_UNIPRO_HIBERNATE_MODE 0x11 #define GB_SVC_UNIPRO_OFF_MODE 0x12 #define GB_SVC_SMALL_AMPLITUDE 0x01 #define GB_SVC_LARGE_AMPLITUDE 0x02 #define GB_SVC_NO_DE_EMPHASIS 0x00 #define GB_SVC_SMALL_DE_EMPHASIS 0x01 #define GB_SVC_LARGE_DE_EMPHASIS 0x02 #define GB_SVC_PWRM_RXTERMINATION 0x01 #define GB_SVC_PWRM_TXTERMINATION 0x02 #define GB_SVC_PWRM_LINE_RESET 0x04 #define GB_SVC_PWRM_SCRAMBLING 0x20 #define GB_SVC_PWRM_QUIRK_HSSER 0x00000001 #define GB_SVC_UNIPRO_HS_SERIES_A 0x01 #define GB_SVC_UNIPRO_HS_SERIES_B 0x02 #define GB_SVC_SETPWRM_PWR_OK 0x00 #define GB_SVC_SETPWRM_PWR_LOCAL 0x01 #define GB_SVC_SETPWRM_PWR_REMOTE 0x02 #define GB_SVC_SETPWRM_PWR_BUSY 0x03 #define GB_SVC_SETPWRM_PWR_ERROR_CAP 0x04 #define GB_SVC_SETPWRM_PWR_FATAL_ERROR 0x05 struct gb_svc_l2_timer_cfg { __le16 tsb_fc0_protection_timeout; __le16 tsb_tc0_replay_timeout; __le16 tsb_afc0_req_timeout; __le16 tsb_fc1_protection_timeout; __le16 tsb_tc1_replay_timeout; __le16 tsb_afc1_req_timeout; __le16 reserved_for_tc2[3]; __le16 reserved_for_tc3[3]; } __packed; struct gb_svc_intf_set_pwrm_request { __u8 intf_id; __u8 hs_series; __u8 tx_mode; __u8 tx_gear; __u8 tx_nlanes; __u8 tx_amplitude; __u8 tx_hs_equalizer; __u8 rx_mode; __u8 rx_gear; __u8 rx_nlanes; __u8 flags; __le32 quirks; struct gb_svc_l2_timer_cfg local_l2timerdata, remote_l2timerdata; } __packed; struct gb_svc_intf_set_pwrm_response { __u8 result_code; } __packed; struct gb_svc_key_event_request { __le16 key_code; #define GB_KEYCODE_ARA 0x00 __u8 key_event; #define GB_SVC_KEY_RELEASED 0x00 #define GB_SVC_KEY_PRESSED 0x01 } __packed; #define GB_SVC_PWRMON_MAX_RAIL_COUNT 254 struct gb_svc_pwrmon_rail_count_get_response { __u8 rail_count; } __packed; #define GB_SVC_PWRMON_RAIL_NAME_BUFSIZE 32 struct gb_svc_pwrmon_rail_names_get_response { __u8 status; __u8 name[0][GB_SVC_PWRMON_RAIL_NAME_BUFSIZE]; } __packed; #define GB_SVC_PWRMON_TYPE_CURR 0x01 #define GB_SVC_PWRMON_TYPE_VOL 0x02 #define GB_SVC_PWRMON_TYPE_PWR 0x03 #define GB_SVC_PWRMON_GET_SAMPLE_OK 0x00 #define GB_SVC_PWRMON_GET_SAMPLE_INVAL 0x01 #define GB_SVC_PWRMON_GET_SAMPLE_NOSUPP 0x02 #define GB_SVC_PWRMON_GET_SAMPLE_HWERR 0x03 struct gb_svc_pwrmon_sample_get_request { __u8 rail_id; __u8 measurement_type; } __packed; struct gb_svc_pwrmon_sample_get_response { __u8 result; __le32 measurement; } __packed; struct gb_svc_pwrmon_intf_sample_get_request { __u8 intf_id; __u8 measurement_type; } __packed; struct gb_svc_pwrmon_intf_sample_get_response { __u8 result; __le32 measurement; } __packed; #define GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY 0x0001 struct gb_svc_module_inserted_request { __u8 primary_intf_id; __u8 intf_count; __le16 flags; } __packed; /* module_inserted response has no payload */ struct gb_svc_module_removed_request { __u8 primary_intf_id; } __packed; /* module_removed response has no payload */ struct gb_svc_intf_activate_request { __u8 intf_id; } __packed; #define GB_SVC_INTF_TYPE_UNKNOWN 0x00 #define GB_SVC_INTF_TYPE_DUMMY 0x01 #define GB_SVC_INTF_TYPE_UNIPRO 0x02 #define GB_SVC_INTF_TYPE_GREYBUS 0x03 struct gb_svc_intf_activate_response { __u8 status; __u8 intf_type; } __packed; struct gb_svc_intf_resume_request { __u8 intf_id; } __packed; struct gb_svc_intf_resume_response { __u8 status; } __packed; #define GB_SVC_INTF_MAILBOX_NONE 0x00 #define GB_SVC_INTF_MAILBOX_AP 0x01 #define GB_SVC_INTF_MAILBOX_GREYBUS 0x02 struct gb_svc_intf_mailbox_event_request { __u8 intf_id; __le16 result_code; __le32 mailbox; } __packed; /* intf_mailbox_event response has no payload */ struct gb_svc_intf_oops_request { __u8 intf_id; __u8 reason; } __packed; /* intf_oops response has no payload */ /* RAW */ /* Greybus raw request types */ #define GB_RAW_TYPE_SEND 0x02 struct gb_raw_send_request { __le32 len; __u8 data[0]; } __packed; /* UART */ /* Greybus UART operation types */ #define GB_UART_TYPE_SEND_DATA 0x02 #define GB_UART_TYPE_RECEIVE_DATA 0x03 /* Unsolicited data */ #define GB_UART_TYPE_SET_LINE_CODING 0x04 #define GB_UART_TYPE_SET_CONTROL_LINE_STATE 0x05 #define GB_UART_TYPE_SEND_BREAK 0x06 #define GB_UART_TYPE_SERIAL_STATE 0x07 /* Unsolicited data */ #define GB_UART_TYPE_RECEIVE_CREDITS 0x08 #define GB_UART_TYPE_FLUSH_FIFOS 0x09 /* Represents data from AP -> Module */ struct gb_uart_send_data_request { __le16 size; __u8 data[0]; } __packed; /* recv-data-request flags */ #define GB_UART_RECV_FLAG_FRAMING 0x01 /* Framing error */ #define GB_UART_RECV_FLAG_PARITY 0x02 /* Parity error */ #define GB_UART_RECV_FLAG_OVERRUN 0x04 /* Overrun error */ #define GB_UART_RECV_FLAG_BREAK 0x08 /* Break */ /* Represents data from Module -> AP */ struct gb_uart_recv_data_request { __le16 size; __u8 flags; __u8 data[0]; } __packed; struct gb_uart_receive_credits_request { __le16 count; } __packed; struct gb_uart_set_line_coding_request { __le32 rate; __u8 format; #define GB_SERIAL_1_STOP_BITS 0 #define GB_SERIAL_1_5_STOP_BITS 1 #define GB_SERIAL_2_STOP_BITS 2 __u8 parity; #define GB_SERIAL_NO_PARITY 0 #define GB_SERIAL_ODD_PARITY 1 #define GB_SERIAL_EVEN_PARITY 2 #define GB_SERIAL_MARK_PARITY 3 #define GB_SERIAL_SPACE_PARITY 4 __u8 data_bits; __u8 flow_control; #define GB_SERIAL_AUTO_RTSCTS_EN 0x1 } __packed; /* output control lines */ #define GB_UART_CTRL_DTR 0x01 #define GB_UART_CTRL_RTS 0x02 struct gb_uart_set_control_line_state_request { __u8 control; } __packed; struct gb_uart_set_break_request { __u8 state; } __packed; /* input control lines and line errors */ #define GB_UART_CTRL_DCD 0x01 #define GB_UART_CTRL_DSR 0x02 #define GB_UART_CTRL_RI 0x04 struct gb_uart_serial_state_request { __u8 control; } __packed; struct gb_uart_serial_flush_request { __u8 flags; #define GB_SERIAL_FLAG_FLUSH_TRANSMITTER 0x01 #define GB_SERIAL_FLAG_FLUSH_RECEIVER 0x02 } __packed; /* Loopback */ /* Greybus loopback request types */ #define GB_LOOPBACK_TYPE_PING 0x02 #define GB_LOOPBACK_TYPE_TRANSFER 0x03 #define GB_LOOPBACK_TYPE_SINK 0x04 /* * Loopback request/response header format should be identical * to simplify bandwidth and data movement analysis. */ struct gb_loopback_transfer_request { __le32 len; __le32 reserved0; __le32 reserved1; __u8 data[0]; } __packed; struct gb_loopback_transfer_response { __le32 len; __le32 reserved0; __le32 reserved1; __u8 data[0]; } __packed; /* SDIO */ /* Greybus SDIO operation types */ #define GB_SDIO_TYPE_GET_CAPABILITIES 0x02 #define GB_SDIO_TYPE_SET_IOS 0x03 #define GB_SDIO_TYPE_COMMAND 0x04 #define GB_SDIO_TYPE_TRANSFER 0x05 #define GB_SDIO_TYPE_EVENT 0x06 /* get caps response: request has no payload */ struct gb_sdio_get_caps_response { __le32 caps; #define GB_SDIO_CAP_NONREMOVABLE 0x00000001 #define GB_SDIO_CAP_4_BIT_DATA 0x00000002 #define GB_SDIO_CAP_8_BIT_DATA 0x00000004 #define GB_SDIO_CAP_MMC_HS 0x00000008 #define GB_SDIO_CAP_SD_HS 0x00000010 #define GB_SDIO_CAP_ERASE 0x00000020 #define GB_SDIO_CAP_1_2V_DDR 0x00000040 #define GB_SDIO_CAP_1_8V_DDR 0x00000080 #define GB_SDIO_CAP_POWER_OFF_CARD 0x00000100 #define GB_SDIO_CAP_UHS_SDR12 0x00000200 #define GB_SDIO_CAP_UHS_SDR25 0x00000400 #define GB_SDIO_CAP_UHS_SDR50 0x00000800 #define GB_SDIO_CAP_UHS_SDR104 0x00001000 #define GB_SDIO_CAP_UHS_DDR50 0x00002000 #define GB_SDIO_CAP_DRIVER_TYPE_A 0x00004000 #define GB_SDIO_CAP_DRIVER_TYPE_C 0x00008000 #define GB_SDIO_CAP_DRIVER_TYPE_D 0x00010000 #define GB_SDIO_CAP_HS200_1_2V 0x00020000 #define GB_SDIO_CAP_HS200_1_8V 0x00040000 #define GB_SDIO_CAP_HS400_1_2V 0x00080000 #define GB_SDIO_CAP_HS400_1_8V 0x00100000 /* see possible values below at vdd */ __le32 ocr; __le32 f_min; __le32 f_max; __le16 max_blk_count; __le16 max_blk_size; } __packed; /* set ios request: response has no payload */ struct gb_sdio_set_ios_request { __le32 clock; __le32 vdd; #define GB_SDIO_VDD_165_195 0x00000001 #define GB_SDIO_VDD_20_21 0x00000002 #define GB_SDIO_VDD_21_22 0x00000004 #define GB_SDIO_VDD_22_23 0x00000008 #define GB_SDIO_VDD_23_24 0x00000010 #define GB_SDIO_VDD_24_25 0x00000020 #define GB_SDIO_VDD_25_26 0x00000040 #define GB_SDIO_VDD_26_27 0x00000080 #define GB_SDIO_VDD_27_28 0x00000100 #define GB_SDIO_VDD_28_29 0x00000200 #define GB_SDIO_VDD_29_30 0x00000400 #define GB_SDIO_VDD_30_31 0x00000800 #define GB_SDIO_VDD_31_32 0x00001000 #define GB_SDIO_VDD_32_33 0x00002000 #define GB_SDIO_VDD_33_34 0x00004000 #define GB_SDIO_VDD_34_35 0x00008000 #define GB_SDIO_VDD_35_36 0x00010000 __u8 bus_mode; #define GB_SDIO_BUSMODE_OPENDRAIN 0x00 #define GB_SDIO_BUSMODE_PUSHPULL 0x01 __u8 power_mode; #define GB_SDIO_POWER_OFF 0x00 #define GB_SDIO_POWER_UP 0x01 #define GB_SDIO_POWER_ON 0x02 #define GB_SDIO_POWER_UNDEFINED 0x03 __u8 bus_width; #define GB_SDIO_BUS_WIDTH_1 0x00 #define GB_SDIO_BUS_WIDTH_4 0x02 #define GB_SDIO_BUS_WIDTH_8 0x03 __u8 timing; #define GB_SDIO_TIMING_LEGACY 0x00 #define GB_SDIO_TIMING_MMC_HS 0x01 #define GB_SDIO_TIMING_SD_HS 0x02 #define GB_SDIO_TIMING_UHS_SDR12 0x03 #define GB_SDIO_TIMING_UHS_SDR25 0x04 #define GB_SDIO_TIMING_UHS_SDR50 0x05 #define GB_SDIO_TIMING_UHS_SDR104 0x06 #define GB_SDIO_TIMING_UHS_DDR50 0x07 #define GB_SDIO_TIMING_MMC_DDR52 0x08 #define GB_SDIO_TIMING_MMC_HS200 0x09 #define GB_SDIO_TIMING_MMC_HS400 0x0A __u8 signal_voltage; #define GB_SDIO_SIGNAL_VOLTAGE_330 0x00 #define GB_SDIO_SIGNAL_VOLTAGE_180 0x01 #define GB_SDIO_SIGNAL_VOLTAGE_120 0x02 __u8 drv_type; #define GB_SDIO_SET_DRIVER_TYPE_B 0x00 #define GB_SDIO_SET_DRIVER_TYPE_A 0x01 #define GB_SDIO_SET_DRIVER_TYPE_C 0x02 #define GB_SDIO_SET_DRIVER_TYPE_D 0x03 } __packed; /* command request */ struct gb_sdio_command_request { __u8 cmd; __u8 cmd_flags; #define GB_SDIO_RSP_NONE 0x00 #define GB_SDIO_RSP_PRESENT 0x01 #define GB_SDIO_RSP_136 0x02 #define GB_SDIO_RSP_CRC 0x04 #define GB_SDIO_RSP_BUSY 0x08 #define GB_SDIO_RSP_OPCODE 0x10 __u8 cmd_type; #define GB_SDIO_CMD_AC 0x00 #define GB_SDIO_CMD_ADTC 0x01 #define GB_SDIO_CMD_BC 0x02 #define GB_SDIO_CMD_BCR 0x03 __le32 cmd_arg; __le16 data_blocks; __le16 data_blksz; } __packed; struct gb_sdio_command_response { __le32 resp[4]; } __packed; /* transfer request */ struct gb_sdio_transfer_request { __u8 data_flags; #define GB_SDIO_DATA_WRITE 0x01 #define GB_SDIO_DATA_READ 0x02 #define GB_SDIO_DATA_STREAM 0x04 __le16 data_blocks; __le16 data_blksz; __u8 data[0]; } __packed; struct gb_sdio_transfer_response { __le16 data_blocks; __le16 data_blksz; __u8 data[0]; } __packed; /* event request: generated by module and is defined as unidirectional */ struct gb_sdio_event_request { __u8 event; #define GB_SDIO_CARD_INSERTED 0x01 #define GB_SDIO_CARD_REMOVED 0x02 #define GB_SDIO_WP 0x04 } __packed; /* Camera */ /* Greybus Camera request types */ #define GB_CAMERA_TYPE_CAPABILITIES 0x02 #define GB_CAMERA_TYPE_CONFIGURE_STREAMS 0x03 #define GB_CAMERA_TYPE_CAPTURE 0x04 #define GB_CAMERA_TYPE_FLUSH 0x05 #define GB_CAMERA_TYPE_METADATA 0x06 #define GB_CAMERA_MAX_STREAMS 4 #define GB_CAMERA_MAX_SETTINGS_SIZE 8192 /* Greybus Camera Configure Streams request payload */ struct gb_camera_stream_config_request { __le16 width; __le16 height; __le16 format; __le16 padding; } __packed; struct gb_camera_configure_streams_request { __u8 num_streams; __u8 flags; #define GB_CAMERA_CONFIGURE_STREAMS_TEST_ONLY 0x01 __le16 padding; struct gb_camera_stream_config_request config[0]; } __packed; /* Greybus Camera Configure Streams response payload */ struct gb_camera_stream_config_response { __le16 width; __le16 height; __le16 format; __u8 virtual_channel; __u8 data_type[2]; __le16 max_pkt_size; __u8 padding; __le32 max_size; } __packed; struct gb_camera_configure_streams_response { __u8 num_streams; #define GB_CAMERA_CONFIGURE_STREAMS_ADJUSTED 0x01 __u8 flags; __u8 padding[2]; __le32 data_rate; struct gb_camera_stream_config_response config[0]; }; /* Greybus Camera Capture request payload - response has no payload */ struct gb_camera_capture_request { __le32 request_id; __u8 streams; __u8 padding; __le16 num_frames; __u8 settings[0]; } __packed; /* Greybus Camera Flush response payload - request has no payload */ struct gb_camera_flush_response { __le32 request_id; } __packed; /* Greybus Camera Metadata request payload - operation has no response */ struct gb_camera_metadata_request { __le32 request_id; __le16 frame_number; __u8 stream; __u8 padding; __u8 metadata[0]; } __packed; /* Lights */ /* Greybus Lights request types */ #define GB_LIGHTS_TYPE_GET_LIGHTS 0x02 #define GB_LIGHTS_TYPE_GET_LIGHT_CONFIG 0x03 #define GB_LIGHTS_TYPE_GET_CHANNEL_CONFIG 0x04 #define GB_LIGHTS_TYPE_GET_CHANNEL_FLASH_CONFIG 0x05 #define GB_LIGHTS_TYPE_SET_BRIGHTNESS 0x06 #define GB_LIGHTS_TYPE_SET_BLINK 0x07 #define GB_LIGHTS_TYPE_SET_COLOR 0x08 #define GB_LIGHTS_TYPE_SET_FADE 0x09 #define GB_LIGHTS_TYPE_EVENT 0x0A #define GB_LIGHTS_TYPE_SET_FLASH_INTENSITY 0x0B #define GB_LIGHTS_TYPE_SET_FLASH_STROBE 0x0C #define GB_LIGHTS_TYPE_SET_FLASH_TIMEOUT 0x0D #define GB_LIGHTS_TYPE_GET_FLASH_FAULT 0x0E /* Greybus Light modes */ /* * if you add any specific mode below, update also the * GB_CHANNEL_MODE_DEFINED_RANGE value accordingly */ #define GB_CHANNEL_MODE_NONE 0x00000000 #define GB_CHANNEL_MODE_BATTERY 0x00000001 #define GB_CHANNEL_MODE_POWER 0x00000002 #define GB_CHANNEL_MODE_WIRELESS 0x00000004 #define GB_CHANNEL_MODE_BLUETOOTH 0x00000008 #define GB_CHANNEL_MODE_KEYBOARD 0x00000010 #define GB_CHANNEL_MODE_BUTTONS 0x00000020 #define GB_CHANNEL_MODE_NOTIFICATION 0x00000040 #define GB_CHANNEL_MODE_ATTENTION 0x00000080 #define GB_CHANNEL_MODE_FLASH 0x00000100 #define GB_CHANNEL_MODE_TORCH 0x00000200 #define GB_CHANNEL_MODE_INDICATOR 0x00000400 /* Lights Mode valid bit values */ #define GB_CHANNEL_MODE_DEFINED_RANGE 0x000004FF #define GB_CHANNEL_MODE_VENDOR_RANGE 0x00F00000 /* Greybus Light Channels Flags */ #define GB_LIGHT_CHANNEL_MULTICOLOR 0x00000001 #define GB_LIGHT_CHANNEL_FADER 0x00000002 #define GB_LIGHT_CHANNEL_BLINK 0x00000004 /* get count of lights in module */ struct gb_lights_get_lights_response { __u8 lights_count; } __packed; /* light config request payload */ struct gb_lights_get_light_config_request { __u8 id; } __packed; /* light config response payload */ struct gb_lights_get_light_config_response { __u8 channel_count; __u8 name[32]; } __packed; /* channel config request payload */ struct gb_lights_get_channel_config_request { __u8 light_id; __u8 channel_id; } __packed; /* channel flash config request payload */ struct gb_lights_get_channel_flash_config_request { __u8 light_id; __u8 channel_id; } __packed; /* channel config response payload */ struct gb_lights_get_channel_config_response { __u8 max_brightness; __le32 flags; __le32 color; __u8 color_name[32]; __le32 mode; __u8 mode_name[32]; } __packed; /* channel flash config response payload */ struct gb_lights_get_channel_flash_config_response { __le32 intensity_min_uA; __le32 intensity_max_uA; __le32 intensity_step_uA; __le32 timeout_min_us; __le32 timeout_max_us; __le32 timeout_step_us; } __packed; /* blink request payload: response have no payload */ struct gb_lights_blink_request { __u8 light_id; __u8 channel_id; __le16 time_on_ms; __le16 time_off_ms; } __packed; /* set brightness request payload: response have no payload */ struct gb_lights_set_brightness_request { __u8 light_id; __u8 channel_id; __u8 brightness; } __packed; /* set color request payload: response have no payload */ struct gb_lights_set_color_request { __u8 light_id; __u8 channel_id; __le32 color; } __packed; /* set fade request payload: response have no payload */ struct gb_lights_set_fade_request { __u8 light_id; __u8 channel_id; __u8 fade_in; __u8 fade_out; } __packed; /* event request: generated by module */ struct gb_lights_event_request { __u8 light_id; __u8 event; #define GB_LIGHTS_LIGHT_CONFIG 0x01 } __packed; /* set flash intensity request payload: response have no payload */ struct gb_lights_set_flash_intensity_request { __u8 light_id; __u8 channel_id; __le32 intensity_uA; } __packed; /* set flash strobe state request payload: response have no payload */ struct gb_lights_set_flash_strobe_request { __u8 light_id; __u8 channel_id; __u8 state; } __packed; /* set flash timeout request payload: response have no payload */ struct gb_lights_set_flash_timeout_request { __u8 light_id; __u8 channel_id; __le32 timeout_us; } __packed; /* get flash fault request payload */ struct gb_lights_get_flash_fault_request { __u8 light_id; __u8 channel_id; } __packed; /* get flash fault response payload */ struct gb_lights_get_flash_fault_response { __le32 fault; #define GB_LIGHTS_FLASH_FAULT_OVER_VOLTAGE 0x00000000 #define GB_LIGHTS_FLASH_FAULT_TIMEOUT 0x00000001 #define GB_LIGHTS_FLASH_FAULT_OVER_TEMPERATURE 0x00000002 #define GB_LIGHTS_FLASH_FAULT_SHORT_CIRCUIT 0x00000004 #define GB_LIGHTS_FLASH_FAULT_OVER_CURRENT 0x00000008 #define GB_LIGHTS_FLASH_FAULT_INDICATOR 0x00000010 #define GB_LIGHTS_FLASH_FAULT_UNDER_VOLTAGE 0x00000020 #define GB_LIGHTS_FLASH_FAULT_INPUT_VOLTAGE 0x00000040 #define GB_LIGHTS_FLASH_FAULT_LED_OVER_TEMPERATURE 0x00000080 } __packed; /* Audio */ #define GB_AUDIO_TYPE_GET_TOPOLOGY_SIZE 0x02 #define GB_AUDIO_TYPE_GET_TOPOLOGY 0x03 #define GB_AUDIO_TYPE_GET_CONTROL 0x04 #define GB_AUDIO_TYPE_SET_CONTROL 0x05 #define GB_AUDIO_TYPE_ENABLE_WIDGET 0x06 #define GB_AUDIO_TYPE_DISABLE_WIDGET 0x07 #define GB_AUDIO_TYPE_GET_PCM 0x08 #define GB_AUDIO_TYPE_SET_PCM 0x09 #define GB_AUDIO_TYPE_SET_TX_DATA_SIZE 0x0a /* 0x0b unused */ #define GB_AUDIO_TYPE_ACTIVATE_TX 0x0c #define GB_AUDIO_TYPE_DEACTIVATE_TX 0x0d #define GB_AUDIO_TYPE_SET_RX_DATA_SIZE 0x0e /* 0x0f unused */ #define GB_AUDIO_TYPE_ACTIVATE_RX 0x10 #define GB_AUDIO_TYPE_DEACTIVATE_RX 0x11 #define GB_AUDIO_TYPE_JACK_EVENT 0x12 #define GB_AUDIO_TYPE_BUTTON_EVENT 0x13 #define GB_AUDIO_TYPE_STREAMING_EVENT 0x14 #define GB_AUDIO_TYPE_SEND_DATA 0x15 /* Module must be able to buffer 10ms of audio data, minimum */ #define GB_AUDIO_SAMPLE_BUFFER_MIN_US 10000 #define GB_AUDIO_PCM_NAME_MAX 32 #define AUDIO_DAI_NAME_MAX 32 #define AUDIO_CONTROL_NAME_MAX 32 #define AUDIO_CTL_ELEM_NAME_MAX 44 #define AUDIO_ENUM_NAME_MAX 64 #define AUDIO_WIDGET_NAME_MAX 32 /* See SNDRV_PCM_FMTBIT_* in Linux source */ #define GB_AUDIO_PCM_FMT_S8 BIT(0) #define GB_AUDIO_PCM_FMT_U8 BIT(1) #define GB_AUDIO_PCM_FMT_S16_LE BIT(2) #define GB_AUDIO_PCM_FMT_S16_BE BIT(3) #define GB_AUDIO_PCM_FMT_U16_LE BIT(4) #define GB_AUDIO_PCM_FMT_U16_BE BIT(5) #define GB_AUDIO_PCM_FMT_S24_LE BIT(6) #define GB_AUDIO_PCM_FMT_S24_BE BIT(7) #define GB_AUDIO_PCM_FMT_U24_LE BIT(8) #define GB_AUDIO_PCM_FMT_U24_BE BIT(9) #define GB_AUDIO_PCM_FMT_S32_LE BIT(10) #define GB_AUDIO_PCM_FMT_S32_BE BIT(11) #define GB_AUDIO_PCM_FMT_U32_LE BIT(12) #define GB_AUDIO_PCM_FMT_U32_BE BIT(13) /* See SNDRV_PCM_RATE_* in Linux source */ #define GB_AUDIO_PCM_RATE_5512 BIT(0) #define GB_AUDIO_PCM_RATE_8000 BIT(1) #define GB_AUDIO_PCM_RATE_11025 BIT(2) #define GB_AUDIO_PCM_RATE_16000 BIT(3) #define GB_AUDIO_PCM_RATE_22050 BIT(4) #define GB_AUDIO_PCM_RATE_32000 BIT(5) #define GB_AUDIO_PCM_RATE_44100 BIT(6) #define GB_AUDIO_PCM_RATE_48000 BIT(7) #define GB_AUDIO_PCM_RATE_64000 BIT(8) #define GB_AUDIO_PCM_RATE_88200 BIT(9) #define GB_AUDIO_PCM_RATE_96000 BIT(10) #define GB_AUDIO_PCM_RATE_176400 BIT(11) #define GB_AUDIO_PCM_RATE_192000 BIT(12) #define GB_AUDIO_STREAM_TYPE_CAPTURE 0x1 #define GB_AUDIO_STREAM_TYPE_PLAYBACK 0x2 #define GB_AUDIO_CTL_ELEM_ACCESS_READ BIT(0) #define GB_AUDIO_CTL_ELEM_ACCESS_WRITE BIT(1) /* See SNDRV_CTL_ELEM_TYPE_* in Linux source */ #define GB_AUDIO_CTL_ELEM_TYPE_BOOLEAN 0x01 #define GB_AUDIO_CTL_ELEM_TYPE_INTEGER 0x02 #define GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED 0x03 #define GB_AUDIO_CTL_ELEM_TYPE_INTEGER64 0x06 /* See SNDRV_CTL_ELEM_IFACE_* in Linux source */ #define GB_AUDIO_CTL_ELEM_IFACE_CARD 0x00 #define GB_AUDIO_CTL_ELEM_IFACE_HWDEP 0x01 #define GB_AUDIO_CTL_ELEM_IFACE_MIXER 0x02 #define GB_AUDIO_CTL_ELEM_IFACE_PCM 0x03 #define GB_AUDIO_CTL_ELEM_IFACE_RAWMIDI 0x04 #define GB_AUDIO_CTL_ELEM_IFACE_TIMER 0x05 #define GB_AUDIO_CTL_ELEM_IFACE_SEQUENCER 0x06 /* SNDRV_CTL_ELEM_ACCESS_* in Linux source */ #define GB_AUDIO_ACCESS_READ BIT(0) #define GB_AUDIO_ACCESS_WRITE BIT(1) #define GB_AUDIO_ACCESS_VOLATILE BIT(2) #define GB_AUDIO_ACCESS_TIMESTAMP BIT(3) #define GB_AUDIO_ACCESS_TLV_READ BIT(4) #define GB_AUDIO_ACCESS_TLV_WRITE BIT(5) #define GB_AUDIO_ACCESS_TLV_COMMAND BIT(6) #define GB_AUDIO_ACCESS_INACTIVE BIT(7) #define GB_AUDIO_ACCESS_LOCK BIT(8) #define GB_AUDIO_ACCESS_OWNER BIT(9) /* enum snd_soc_dapm_type */ #define GB_AUDIO_WIDGET_TYPE_INPUT 0x0 #define GB_AUDIO_WIDGET_TYPE_OUTPUT 0x1 #define GB_AUDIO_WIDGET_TYPE_MUX 0x2 #define GB_AUDIO_WIDGET_TYPE_VIRT_MUX 0x3 #define GB_AUDIO_WIDGET_TYPE_VALUE_MUX 0x4 #define GB_AUDIO_WIDGET_TYPE_MIXER 0x5 #define GB_AUDIO_WIDGET_TYPE_MIXER_NAMED_CTL 0x6 #define GB_AUDIO_WIDGET_TYPE_PGA 0x7 #define GB_AUDIO_WIDGET_TYPE_OUT_DRV 0x8 #define GB_AUDIO_WIDGET_TYPE_ADC 0x9 #define GB_AUDIO_WIDGET_TYPE_DAC 0xa #define GB_AUDIO_WIDGET_TYPE_MICBIAS 0xb #define GB_AUDIO_WIDGET_TYPE_MIC 0xc #define GB_AUDIO_WIDGET_TYPE_HP 0xd #define GB_AUDIO_WIDGET_TYPE_SPK 0xe #define GB_AUDIO_WIDGET_TYPE_LINE 0xf #define GB_AUDIO_WIDGET_TYPE_SWITCH 0x10 #define GB_AUDIO_WIDGET_TYPE_VMID 0x11 #define GB_AUDIO_WIDGET_TYPE_PRE 0x12 #define GB_AUDIO_WIDGET_TYPE_POST 0x13 #define GB_AUDIO_WIDGET_TYPE_SUPPLY 0x14 #define GB_AUDIO_WIDGET_TYPE_REGULATOR_SUPPLY 0x15 #define GB_AUDIO_WIDGET_TYPE_CLOCK_SUPPLY 0x16 #define GB_AUDIO_WIDGET_TYPE_AIF_IN 0x17 #define GB_AUDIO_WIDGET_TYPE_AIF_OUT 0x18 #define GB_AUDIO_WIDGET_TYPE_SIGGEN 0x19 #define GB_AUDIO_WIDGET_TYPE_DAI_IN 0x1a #define GB_AUDIO_WIDGET_TYPE_DAI_OUT 0x1b #define GB_AUDIO_WIDGET_TYPE_DAI_LINK 0x1c #define GB_AUDIO_WIDGET_STATE_DISABLED 0x01 #define GB_AUDIO_WIDGET_STATE_ENAABLED 0x02 #define GB_AUDIO_JACK_EVENT_INSERTION 0x1 #define GB_AUDIO_JACK_EVENT_REMOVAL 0x2 #define GB_AUDIO_BUTTON_EVENT_PRESS 0x1 #define GB_AUDIO_BUTTON_EVENT_RELEASE 0x2 #define GB_AUDIO_STREAMING_EVENT_UNSPECIFIED 0x1 #define GB_AUDIO_STREAMING_EVENT_HALT 0x2 #define GB_AUDIO_STREAMING_EVENT_INTERNAL_ERROR 0x3 #define GB_AUDIO_STREAMING_EVENT_PROTOCOL_ERROR 0x4 #define GB_AUDIO_STREAMING_EVENT_FAILURE 0x5 #define GB_AUDIO_STREAMING_EVENT_UNDERRUN 0x6 #define GB_AUDIO_STREAMING_EVENT_OVERRUN 0x7 #define GB_AUDIO_STREAMING_EVENT_CLOCKING 0x8 #define GB_AUDIO_STREAMING_EVENT_DATA_LEN 0x9 #define GB_AUDIO_INVALID_INDEX 0xff /* enum snd_jack_types */ #define GB_AUDIO_JACK_HEADPHONE 0x0000001 #define GB_AUDIO_JACK_MICROPHONE 0x0000002 #define GB_AUDIO_JACK_HEADSET (GB_AUDIO_JACK_HEADPHONE | \ GB_AUDIO_JACK_MICROPHONE) #define GB_AUDIO_JACK_LINEOUT 0x0000004 #define GB_AUDIO_JACK_MECHANICAL 0x0000008 #define GB_AUDIO_JACK_VIDEOOUT 0x0000010 #define GB_AUDIO_JACK_AVOUT (GB_AUDIO_JACK_LINEOUT | \ GB_AUDIO_JACK_VIDEOOUT) #define GB_AUDIO_JACK_LINEIN 0x0000020 #define GB_AUDIO_JACK_OC_HPHL 0x0000040 #define GB_AUDIO_JACK_OC_HPHR 0x0000080 #define GB_AUDIO_JACK_MICROPHONE2 0x0000200 #define GB_AUDIO_JACK_ANC_HEADPHONE (GB_AUDIO_JACK_HEADPHONE | \ GB_AUDIO_JACK_MICROPHONE | \ GB_AUDIO_JACK_MICROPHONE2) /* Kept separate from switches to facilitate implementation */ #define GB_AUDIO_JACK_BTN_0 0x4000000 #define GB_AUDIO_JACK_BTN_1 0x2000000 #define GB_AUDIO_JACK_BTN_2 0x1000000 #define GB_AUDIO_JACK_BTN_3 0x0800000 struct gb_audio_pcm { __u8 stream_name[GB_AUDIO_PCM_NAME_MAX]; __le32 formats; /* GB_AUDIO_PCM_FMT_* */ __le32 rates; /* GB_AUDIO_PCM_RATE_* */ __u8 chan_min; __u8 chan_max; __u8 sig_bits; /* number of bits of content */ } __packed; struct gb_audio_dai { __u8 name[AUDIO_DAI_NAME_MAX]; __le16 data_cport; struct gb_audio_pcm capture; struct gb_audio_pcm playback; } __packed; struct gb_audio_integer { __le32 min; __le32 max; __le32 step; } __packed; struct gb_audio_integer64 { __le64 min; __le64 max; __le64 step; } __packed; struct gb_audio_enumerated { __le32 items; __le16 names_length; __u8 names[0]; } __packed; struct gb_audio_ctl_elem_info { /* See snd_ctl_elem_info in Linux source */ __u8 type; /* GB_AUDIO_CTL_ELEM_TYPE_* */ __le16 dimen[4]; union { struct gb_audio_integer integer; struct gb_audio_integer64 integer64; struct gb_audio_enumerated enumerated; } value; } __packed; struct gb_audio_ctl_elem_value { /* See snd_ctl_elem_value in Linux source */ __le64 timestamp; /* XXX needed? */ union { __le32 integer_value[2]; /* consider CTL_DOUBLE_xxx */ __le64 integer64_value[2]; __le32 enumerated_item[2]; } value; } __packed; struct gb_audio_control { __u8 name[AUDIO_CONTROL_NAME_MAX]; __u8 id; /* 0-63 */ __u8 iface; /* GB_AUDIO_IFACE_* */ __le16 data_cport; __le32 access; /* GB_AUDIO_ACCESS_* */ __u8 count; /* count of same elements */ __u8 count_values; /* count of values, max=2 for CTL_DOUBLE_xxx */ struct gb_audio_ctl_elem_info info; } __packed; struct gb_audio_widget { __u8 name[AUDIO_WIDGET_NAME_MAX]; __u8 sname[AUDIO_WIDGET_NAME_MAX]; __u8 id; __u8 type; /* GB_AUDIO_WIDGET_TYPE_* */ __u8 state; /* GB_AUDIO_WIDGET_STATE_* */ __u8 ncontrols; struct gb_audio_control ctl[0]; /* 'ncontrols' entries */ } __packed; struct gb_audio_route { __u8 source_id; /* widget id */ __u8 destination_id; /* widget id */ __u8 control_id; /* 0-63 */ __u8 index; /* Selection within the control */ } __packed; struct gb_audio_topology { __u8 num_dais; __u8 num_controls; __u8 num_widgets; __u8 num_routes; __le32 size_dais; __le32 size_controls; __le32 size_widgets; __le32 size_routes; __le32 jack_type; /* * struct gb_audio_dai dai[num_dais]; * struct gb_audio_control controls[num_controls]; * struct gb_audio_widget widgets[num_widgets]; * struct gb_audio_route routes[num_routes]; */ __u8 data[0]; } __packed; struct gb_audio_get_topology_size_response { __le16 size; } __packed; struct gb_audio_get_topology_response { struct gb_audio_topology topology; } __packed; struct gb_audio_get_control_request { __u8 control_id; __u8 index; } __packed; struct gb_audio_get_control_response { struct gb_audio_ctl_elem_value value; } __packed; struct gb_audio_set_control_request { __u8 control_id; __u8 index; struct gb_audio_ctl_elem_value value; } __packed; struct gb_audio_enable_widget_request { __u8 widget_id; } __packed; struct gb_audio_disable_widget_request { __u8 widget_id; } __packed; struct gb_audio_get_pcm_request { __le16 data_cport; } __packed; struct gb_audio_get_pcm_response { __le32 format; __le32 rate; __u8 channels; __u8 sig_bits; } __packed; struct gb_audio_set_pcm_request { __le16 data_cport; __le32 format; __le32 rate; __u8 channels; __u8 sig_bits; } __packed; struct gb_audio_set_tx_data_size_request { __le16 data_cport; __le16 size; } __packed; struct gb_audio_activate_tx_request { __le16 data_cport; } __packed; struct gb_audio_deactivate_tx_request { __le16 data_cport; } __packed; struct gb_audio_set_rx_data_size_request { __le16 data_cport; __le16 size; } __packed; struct gb_audio_activate_rx_request { __le16 data_cport; } __packed; struct gb_audio_deactivate_rx_request { __le16 data_cport; } __packed; struct gb_audio_jack_event_request { __u8 widget_id; __u8 jack_attribute; __u8 event; } __packed; struct gb_audio_button_event_request { __u8 widget_id; __u8 button_id; __u8 event; } __packed; struct gb_audio_streaming_event_request { __le16 data_cport; __u8 event; } __packed; struct gb_audio_send_data_request { __le64 timestamp; __u8 data[0]; } __packed; /* Log */ /* operations */ #define GB_LOG_TYPE_SEND_LOG 0x02 /* length */ #define GB_LOG_MAX_LEN 1024 struct gb_log_send_log_request { __le16 len; __u8 msg[0]; } __packed; #endif /* __GREYBUS_PROTOCOLS_H */ greybus/control.h 0000644 00000004317 14722070374 0010070 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Greybus CPort control protocol * * Copyright 2015 Google Inc. * Copyright 2015 Linaro Ltd. */ #ifndef __CONTROL_H #define __CONTROL_H #include <linux/types.h> #include <linux/device.h> struct gb_control { struct device dev; struct gb_interface *intf; struct gb_connection *connection; u8 protocol_major; u8 protocol_minor; bool has_bundle_activate; bool has_bundle_version; char *vendor_string; char *product_string; }; #define to_gb_control(d) container_of(d, struct gb_control, dev) struct gb_control *gb_control_create(struct gb_interface *intf); int gb_control_enable(struct gb_control *control); void gb_control_disable(struct gb_control *control); int gb_control_suspend(struct gb_control *control); int gb_control_resume(struct gb_control *control); int gb_control_add(struct gb_control *control); void gb_control_del(struct gb_control *control); struct gb_control *gb_control_get(struct gb_control *control); void gb_control_put(struct gb_control *control); int gb_control_get_bundle_versions(struct gb_control *control); int gb_control_connected_operation(struct gb_control *control, u16 cport_id); int gb_control_disconnected_operation(struct gb_control *control, u16 cport_id); int gb_control_disconnecting_operation(struct gb_control *control, u16 cport_id); int gb_control_mode_switch_operation(struct gb_control *control); void gb_control_mode_switch_prepare(struct gb_control *control); void gb_control_mode_switch_complete(struct gb_control *control); int gb_control_get_manifest_size_operation(struct gb_interface *intf); int gb_control_get_manifest_operation(struct gb_interface *intf, void *manifest, size_t size); int gb_control_bundle_suspend(struct gb_control *control, u8 bundle_id); int gb_control_bundle_resume(struct gb_control *control, u8 bundle_id); int gb_control_bundle_deactivate(struct gb_control *control, u8 bundle_id); int gb_control_bundle_activate(struct gb_control *control, u8 bundle_id); int gb_control_interface_suspend_prepare(struct gb_control *control); int gb_control_interface_deactivate_prepare(struct gb_control *control); int gb_control_interface_hibernate_abort(struct gb_control *control); #endif /* __CONTROL_H */ greybus/bundle.h 0000644 00000004020 14722070374 0007650 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Greybus bundles * * Copyright 2014 Google Inc. * Copyright 2014 Linaro Ltd. */ #ifndef __BUNDLE_H #define __BUNDLE_H #include <linux/types.h> #include <linux/list.h> #include <linux/pm_runtime.h> #include <linux/device.h> #define BUNDLE_ID_NONE U8_MAX /* Greybus "public" definitions" */ struct gb_bundle { struct device dev; struct gb_interface *intf; u8 id; u8 class; u8 class_major; u8 class_minor; size_t num_cports; struct greybus_descriptor_cport *cport_desc; struct list_head connections; u8 *state; struct list_head links; /* interface->bundles */ }; #define to_gb_bundle(d) container_of(d, struct gb_bundle, dev) /* Greybus "private" definitions" */ struct gb_bundle *gb_bundle_create(struct gb_interface *intf, u8 bundle_id, u8 class); int gb_bundle_add(struct gb_bundle *bundle); void gb_bundle_destroy(struct gb_bundle *bundle); /* Bundle Runtime PM wrappers */ #ifdef CONFIG_PM static inline int gb_pm_runtime_get_sync(struct gb_bundle *bundle) { int retval; retval = pm_runtime_get_sync(&bundle->dev); if (retval < 0) { dev_err(&bundle->dev, "pm_runtime_get_sync failed: %d\n", retval); pm_runtime_put_noidle(&bundle->dev); return retval; } return 0; } static inline int gb_pm_runtime_put_autosuspend(struct gb_bundle *bundle) { int retval; pm_runtime_mark_last_busy(&bundle->dev); retval = pm_runtime_put_autosuspend(&bundle->dev); return retval; } static inline void gb_pm_runtime_get_noresume(struct gb_bundle *bundle) { pm_runtime_get_noresume(&bundle->dev); } static inline void gb_pm_runtime_put_noidle(struct gb_bundle *bundle) { pm_runtime_put_noidle(&bundle->dev); } #else static inline int gb_pm_runtime_get_sync(struct gb_bundle *bundle) { return 0; } static inline int gb_pm_runtime_put_autosuspend(struct gb_bundle *bundle) { return 0; } static inline void gb_pm_runtime_get_noresume(struct gb_bundle *bundle) {} static inline void gb_pm_runtime_put_noidle(struct gb_bundle *bundle) {} #endif #endif /* __BUNDLE_H */ greybus/svc.h 0000644 00000006331 14722070374 0007201 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Greybus SVC code * * Copyright 2015 Google Inc. * Copyright 2015 Linaro Ltd. */ #ifndef __SVC_H #define __SVC_H #include <linux/types.h> #include <linux/device.h> struct gb_svc_l2_timer_cfg; #define GB_SVC_CPORT_FLAG_E2EFC BIT(0) #define GB_SVC_CPORT_FLAG_CSD_N BIT(1) #define GB_SVC_CPORT_FLAG_CSV_N BIT(2) enum gb_svc_state { GB_SVC_STATE_RESET, GB_SVC_STATE_PROTOCOL_VERSION, GB_SVC_STATE_SVC_HELLO, }; enum gb_svc_watchdog_bite { GB_SVC_WATCHDOG_BITE_RESET_UNIPRO = 0, GB_SVC_WATCHDOG_BITE_PANIC_KERNEL, }; struct gb_svc_watchdog; struct svc_debugfs_pwrmon_rail { u8 id; struct gb_svc *svc; }; struct gb_svc { struct device dev; struct gb_host_device *hd; struct gb_connection *connection; enum gb_svc_state state; struct ida device_id_map; struct workqueue_struct *wq; u16 endo_id; u8 ap_intf_id; u8 protocol_major; u8 protocol_minor; struct gb_svc_watchdog *watchdog; enum gb_svc_watchdog_bite action; struct dentry *debugfs_dentry; struct svc_debugfs_pwrmon_rail *pwrmon_rails; }; #define to_gb_svc(d) container_of(d, struct gb_svc, dev) struct gb_svc *gb_svc_create(struct gb_host_device *hd); int gb_svc_add(struct gb_svc *svc); void gb_svc_del(struct gb_svc *svc); void gb_svc_put(struct gb_svc *svc); int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id, u8 measurement_type, u32 *value); int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id); int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id, u8 intf2_id, u8 dev2_id); void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id); int gb_svc_connection_create(struct gb_svc *svc, u8 intf1_id, u16 cport1_id, u8 intf2_id, u16 cport2_id, u8 cport_flags); void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id, u8 intf2_id, u16 cport2_id); int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id); int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable); int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable); int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable); int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type); int gb_svc_intf_resume(struct gb_svc *svc, u8 intf_id); int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector, u32 *value); int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector, u32 value); int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series, u8 tx_mode, u8 tx_gear, u8 tx_nlanes, u8 tx_amplitude, u8 tx_hs_equalizer, u8 rx_mode, u8 rx_gear, u8 rx_nlanes, u8 flags, u32 quirks, struct gb_svc_l2_timer_cfg *local, struct gb_svc_l2_timer_cfg *remote); int gb_svc_intf_set_power_mode_hibernate(struct gb_svc *svc, u8 intf_id); int gb_svc_ping(struct gb_svc *svc); int gb_svc_watchdog_create(struct gb_svc *svc); void gb_svc_watchdog_destroy(struct gb_svc *svc); bool gb_svc_watchdog_enabled(struct gb_svc *svc); int gb_svc_watchdog_enable(struct gb_svc *svc); int gb_svc_watchdog_disable(struct gb_svc *svc); int gb_svc_protocol_init(void); void gb_svc_protocol_exit(void); #endif /* __SVC_H */ greybus/connection.h 0000644 00000007237 14722070374 0010553 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Greybus connections * * Copyright 2014 Google Inc. * Copyright 2014 Linaro Ltd. */ #ifndef __CONNECTION_H #define __CONNECTION_H #include <linux/bits.h> #include <linux/list.h> #include <linux/kfifo.h> #include <linux/kref.h> #include <linux/workqueue.h> #define GB_CONNECTION_FLAG_CSD BIT(0) #define GB_CONNECTION_FLAG_NO_FLOWCTRL BIT(1) #define GB_CONNECTION_FLAG_OFFLOADED BIT(2) #define GB_CONNECTION_FLAG_CDSI1 BIT(3) #define GB_CONNECTION_FLAG_CONTROL BIT(4) #define GB_CONNECTION_FLAG_HIGH_PRIO BIT(5) #define GB_CONNECTION_FLAG_CORE_MASK GB_CONNECTION_FLAG_CONTROL enum gb_connection_state { GB_CONNECTION_STATE_DISABLED = 0, GB_CONNECTION_STATE_ENABLED_TX = 1, GB_CONNECTION_STATE_ENABLED = 2, GB_CONNECTION_STATE_DISCONNECTING = 3, }; struct gb_operation; typedef int (*gb_request_handler_t)(struct gb_operation *); struct gb_connection { struct gb_host_device *hd; struct gb_interface *intf; struct gb_bundle *bundle; struct kref kref; u16 hd_cport_id; u16 intf_cport_id; struct list_head hd_links; struct list_head bundle_links; gb_request_handler_t handler; unsigned long flags; struct mutex mutex; spinlock_t lock; enum gb_connection_state state; struct list_head operations; char name[16]; struct workqueue_struct *wq; atomic_t op_cycle; void *private; bool mode_switch; }; struct gb_connection *gb_connection_create_static(struct gb_host_device *hd, u16 hd_cport_id, gb_request_handler_t handler); struct gb_connection *gb_connection_create_control(struct gb_interface *intf); struct gb_connection *gb_connection_create(struct gb_bundle *bundle, u16 cport_id, gb_request_handler_t handler); struct gb_connection *gb_connection_create_flags(struct gb_bundle *bundle, u16 cport_id, gb_request_handler_t handler, unsigned long flags); struct gb_connection *gb_connection_create_offloaded(struct gb_bundle *bundle, u16 cport_id, unsigned long flags); void gb_connection_destroy(struct gb_connection *connection); static inline bool gb_connection_is_static(struct gb_connection *connection) { return !connection->intf; } int gb_connection_enable(struct gb_connection *connection); int gb_connection_enable_tx(struct gb_connection *connection); void gb_connection_disable_rx(struct gb_connection *connection); void gb_connection_disable(struct gb_connection *connection); void gb_connection_disable_forced(struct gb_connection *connection); void gb_connection_mode_switch_prepare(struct gb_connection *connection); void gb_connection_mode_switch_complete(struct gb_connection *connection); void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id, u8 *data, size_t length); void gb_connection_latency_tag_enable(struct gb_connection *connection); void gb_connection_latency_tag_disable(struct gb_connection *connection); static inline bool gb_connection_e2efc_enabled(struct gb_connection *connection) { return !(connection->flags & GB_CONNECTION_FLAG_CSD); } static inline bool gb_connection_flow_control_disabled(struct gb_connection *connection) { return connection->flags & GB_CONNECTION_FLAG_NO_FLOWCTRL; } static inline bool gb_connection_is_offloaded(struct gb_connection *connection) { return connection->flags & GB_CONNECTION_FLAG_OFFLOADED; } static inline bool gb_connection_is_control(struct gb_connection *connection) { return connection->flags & GB_CONNECTION_FLAG_CONTROL; } static inline void *gb_connection_get_data(struct gb_connection *connection) { return connection->private; } static inline void gb_connection_set_data(struct gb_connection *connection, void *data) { connection->private = data; } #endif /* __CONNECTION_H */ greybus/module.h 0000644 00000001362 14722070374 0007672 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Greybus Module code * * Copyright 2016 Google Inc. * Copyright 2016 Linaro Ltd. */ #ifndef __MODULE_H #define __MODULE_H #include <linux/types.h> #include <linux/device.h> struct gb_module { struct device dev; struct gb_host_device *hd; struct list_head hd_node; u8 module_id; size_t num_interfaces; bool disconnected; struct gb_interface *interfaces[0]; }; #define to_gb_module(d) container_of(d, struct gb_module, dev) struct gb_module *gb_module_create(struct gb_host_device *hd, u8 module_id, size_t num_interfaces); int gb_module_add(struct gb_module *module); void gb_module_del(struct gb_module *module); void gb_module_put(struct gb_module *module); #endif /* __MODULE_H */ greybus/manifest.h 0000644 00000000513 14722070374 0010210 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Greybus manifest parsing * * Copyright 2014 Google Inc. * Copyright 2014 Linaro Ltd. */ #ifndef __MANIFEST_H #define __MANIFEST_H #include <linux/types.h> struct gb_interface; bool gb_manifest_parse(struct gb_interface *intf, void *data, size_t size); #endif /* __MANIFEST_H */ greybus/interface.h 0000644 00000004235 14722070374 0010347 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Greybus Interface Block code * * Copyright 2014 Google Inc. * Copyright 2014 Linaro Ltd. */ #ifndef __INTERFACE_H #define __INTERFACE_H #include <linux/types.h> #include <linux/device.h> enum gb_interface_type { GB_INTERFACE_TYPE_INVALID = 0, GB_INTERFACE_TYPE_UNKNOWN, GB_INTERFACE_TYPE_DUMMY, GB_INTERFACE_TYPE_UNIPRO, GB_INTERFACE_TYPE_GREYBUS, }; #define GB_INTERFACE_QUIRK_NO_CPORT_FEATURES BIT(0) #define GB_INTERFACE_QUIRK_NO_INIT_STATUS BIT(1) #define GB_INTERFACE_QUIRK_NO_GMP_IDS BIT(2) #define GB_INTERFACE_QUIRK_FORCED_DISABLE BIT(3) #define GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH BIT(4) #define GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE BIT(5) #define GB_INTERFACE_QUIRK_NO_PM BIT(6) struct gb_interface { struct device dev; struct gb_control *control; struct list_head bundles; struct list_head module_node; struct list_head manifest_descs; u8 interface_id; /* Physical location within the Endo */ u8 device_id; u8 features; /* Feature flags set in the manifest */ enum gb_interface_type type; u32 ddbl1_manufacturer_id; u32 ddbl1_product_id; u32 vendor_id; u32 product_id; u64 serial_number; struct gb_host_device *hd; struct gb_module *module; unsigned long quirks; struct mutex mutex; bool disconnected; bool ejected; bool removed; bool active; bool enabled; bool mode_switch; bool dme_read; struct work_struct mode_switch_work; struct completion mode_switch_completion; }; #define to_gb_interface(d) container_of(d, struct gb_interface, dev) struct gb_interface *gb_interface_create(struct gb_module *module, u8 interface_id); int gb_interface_activate(struct gb_interface *intf); void gb_interface_deactivate(struct gb_interface *intf); int gb_interface_enable(struct gb_interface *intf); void gb_interface_disable(struct gb_interface *intf); int gb_interface_add(struct gb_interface *intf); void gb_interface_del(struct gb_interface *intf); void gb_interface_put(struct gb_interface *intf); void gb_interface_mailbox_event(struct gb_interface *intf, u16 result, u32 mailbox); int gb_interface_request_mode_switch(struct gb_interface *intf); #endif /* __INTERFACE_H */ greybus/hd.h 0000644 00000005227 14722070374 0007004 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Greybus Host Device * * Copyright 2014-2015 Google Inc. * Copyright 2014-2015 Linaro Ltd. */ #ifndef __HD_H #define __HD_H #include <linux/types.h> #include <linux/device.h> struct gb_host_device; struct gb_message; struct gb_hd_driver { size_t hd_priv_size; int (*cport_allocate)(struct gb_host_device *hd, int cport_id, unsigned long flags); void (*cport_release)(struct gb_host_device *hd, u16 cport_id); int (*cport_enable)(struct gb_host_device *hd, u16 cport_id, unsigned long flags); int (*cport_disable)(struct gb_host_device *hd, u16 cport_id); int (*cport_connected)(struct gb_host_device *hd, u16 cport_id); int (*cport_flush)(struct gb_host_device *hd, u16 cport_id); int (*cport_shutdown)(struct gb_host_device *hd, u16 cport_id, u8 phase, unsigned int timeout); int (*cport_quiesce)(struct gb_host_device *hd, u16 cport_id, size_t peer_space, unsigned int timeout); int (*cport_clear)(struct gb_host_device *hd, u16 cport_id); int (*message_send)(struct gb_host_device *hd, u16 dest_cport_id, struct gb_message *message, gfp_t gfp_mask); void (*message_cancel)(struct gb_message *message); int (*latency_tag_enable)(struct gb_host_device *hd, u16 cport_id); int (*latency_tag_disable)(struct gb_host_device *hd, u16 cport_id); int (*output)(struct gb_host_device *hd, void *req, u16 size, u8 cmd, bool async); }; struct gb_host_device { struct device dev; int bus_id; const struct gb_hd_driver *driver; struct list_head modules; struct list_head connections; struct ida cport_id_map; /* Number of CPorts supported by the UniPro IP */ size_t num_cports; /* Host device buffer constraints */ size_t buffer_size_max; struct gb_svc *svc; /* Private data for the host driver */ unsigned long hd_priv[0] __aligned(sizeof(s64)); }; #define to_gb_host_device(d) container_of(d, struct gb_host_device, dev) int gb_hd_cport_reserve(struct gb_host_device *hd, u16 cport_id); void gb_hd_cport_release_reserved(struct gb_host_device *hd, u16 cport_id); int gb_hd_cport_allocate(struct gb_host_device *hd, int cport_id, unsigned long flags); void gb_hd_cport_release(struct gb_host_device *hd, u16 cport_id); struct gb_host_device *gb_hd_create(struct gb_hd_driver *driver, struct device *parent, size_t buffer_size_max, size_t num_cports); int gb_hd_add(struct gb_host_device *hd); void gb_hd_del(struct gb_host_device *hd); void gb_hd_shutdown(struct gb_host_device *hd); void gb_hd_put(struct gb_host_device *hd); int gb_hd_output(struct gb_host_device *hd, void *req, u16 size, u8 cmd, bool in_irq); int gb_hd_init(void); void gb_hd_exit(void); #endif /* __HD_H */ bpf_verifier.h 0000644 00000036267 14722070374 0007403 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com */ #ifndef _LINUX_BPF_VERIFIER_H #define _LINUX_BPF_VERIFIER_H 1 #include <linux/bpf.h> /* for enum bpf_reg_type */ #include <linux/filter.h> /* for MAX_BPF_STACK */ #include <linux/tnum.h> /* Maximum variable offset umax_value permitted when resolving memory accesses. * In practice this is far bigger than any realistic pointer offset; this limit * ensures that umax_value + (int)off + (int)size cannot overflow a u64. */ #define BPF_MAX_VAR_OFF (1 << 29) /* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures * that converting umax_value to int cannot overflow. */ #define BPF_MAX_VAR_SIZ (1 << 29) /* Liveness marks, used for registers and spilled-regs (in stack slots). * Read marks propagate upwards until they find a write mark; they record that * "one of this state's descendants read this reg" (and therefore the reg is * relevant for states_equal() checks). * Write marks collect downwards and do not propagate; they record that "the * straight-line code that reached this state (from its parent) wrote this reg" * (and therefore that reads propagated from this state or its descendants * should not propagate to its parent). * A state with a write mark can receive read marks; it just won't propagate * them to its parent, since the write mark is a property, not of the state, * but of the link between it and its parent. See mark_reg_read() and * mark_stack_slot_read() in kernel/bpf/verifier.c. */ enum bpf_reg_liveness { REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */ REG_LIVE_READ32 = 0x1, /* reg was read, so we're sensitive to initial value */ REG_LIVE_READ64 = 0x2, /* likewise, but full 64-bit content matters */ REG_LIVE_READ = REG_LIVE_READ32 | REG_LIVE_READ64, REG_LIVE_WRITTEN = 0x4, /* reg was written first, screening off later reads */ REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */ }; struct bpf_reg_state { /* Ordering of fields matters. See states_equal() */ enum bpf_reg_type type; union { /* valid when type == PTR_TO_PACKET */ u16 range; /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE | * PTR_TO_MAP_VALUE_OR_NULL */ struct bpf_map *map_ptr; /* Max size from any of the above. */ unsigned long raw; }; /* Fixed part of pointer offset, pointer types only */ s32 off; /* For PTR_TO_PACKET, used to find other pointers with the same variable * offset, so they can share range knowledge. * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we * came from, when one is tested for != NULL. * For PTR_TO_SOCKET this is used to share which pointers retain the * same reference to the socket, to determine proper reference freeing. */ u32 id; /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned * from a pointer-cast helper, bpf_sk_fullsock() and * bpf_tcp_sock(). * * Consider the following where "sk" is a reference counted * pointer returned from "sk = bpf_sk_lookup_tcp();": * * 1: sk = bpf_sk_lookup_tcp(); * 2: if (!sk) { return 0; } * 3: fullsock = bpf_sk_fullsock(sk); * 4: if (!fullsock) { bpf_sk_release(sk); return 0; } * 5: tp = bpf_tcp_sock(fullsock); * 6: if (!tp) { bpf_sk_release(sk); return 0; } * 7: bpf_sk_release(sk); * 8: snd_cwnd = tp->snd_cwnd; // verifier will complain * * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and * "tp" ptr should be invalidated also. In order to do that, * the reg holding "fullsock" and "sk" need to remember * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id * such that the verifier can reset all regs which have * ref_obj_id matching the sk_reg->id. * * sk_reg->ref_obj_id is set to sk_reg->id at line 1. * sk_reg->id will stay as NULL-marking purpose only. * After NULL-marking is done, sk_reg->id can be reset to 0. * * After "fullsock = bpf_sk_fullsock(sk);" at line 3, * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id. * * After "tp = bpf_tcp_sock(fullsock);" at line 5, * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id * which is the same as sk_reg->ref_obj_id. * * From the verifier perspective, if sk, fullsock and tp * are not NULL, they are the same ptr with different * reg->type. In particular, bpf_sk_release(tp) is also * allowed and has the same effect as bpf_sk_release(sk). */ u32 ref_obj_id; /* For scalar types (SCALAR_VALUE), this represents our knowledge of * the actual value. * For pointer types, this represents the variable part of the offset * from the pointed-to object, and is shared with all bpf_reg_states * with the same id as us. */ struct tnum var_off; /* Used to determine if any memory access using this register will * result in a bad access. * These refer to the same value as var_off, not necessarily the actual * contents of the register. */ s64 smin_value; /* minimum possible (s64)value */ s64 smax_value; /* maximum possible (s64)value */ u64 umin_value; /* minimum possible (u64)value */ u64 umax_value; /* maximum possible (u64)value */ /* parentage chain for liveness checking */ struct bpf_reg_state *parent; /* Inside the callee two registers can be both PTR_TO_STACK like * R1=fp-8 and R2=fp-8, but one of them points to this function stack * while another to the caller's stack. To differentiate them 'frameno' * is used which is an index in bpf_verifier_state->frame[] array * pointing to bpf_func_state. */ u32 frameno; /* Tracks subreg definition. The stored value is the insn_idx of the * writing insn. This is safe because subreg_def is used before any insn * patching which only happens after main verification finished. */ s32 subreg_def; enum bpf_reg_liveness live; /* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */ bool precise; }; enum bpf_stack_slot_type { STACK_INVALID, /* nothing was stored in this stack slot */ STACK_SPILL, /* register spilled into stack */ STACK_MISC, /* BPF program wrote some data into this slot */ STACK_ZERO, /* BPF program wrote constant zero */ }; #define BPF_REG_SIZE 8 /* size of eBPF register in bytes */ struct bpf_stack_state { struct bpf_reg_state spilled_ptr; u8 slot_type[BPF_REG_SIZE]; }; struct bpf_reference_state { /* Track each reference created with a unique id, even if the same * instruction creates the reference multiple times (eg, via CALL). */ int id; /* Instruction where the allocation of this reference occurred. This * is used purely to inform the user of a reference leak. */ int insn_idx; }; /* state of the program: * type of all registers and stack info */ struct bpf_func_state { struct bpf_reg_state regs[MAX_BPF_REG]; /* index of call instruction that called into this func */ int callsite; /* stack frame number of this function state from pov of * enclosing bpf_verifier_state. * 0 = main function, 1 = first callee. */ u32 frameno; /* subprog number == index within subprog_stack_depth * zero == main subprog */ u32 subprogno; /* The following fields should be last. See copy_func_state() */ int acquired_refs; struct bpf_reference_state *refs; int allocated_stack; struct bpf_stack_state *stack; }; struct bpf_idx_pair { u32 prev_idx; u32 idx; }; struct bpf_id_pair { u32 old; u32 cur; }; /* Maximum number of register states that can exist at once */ #define BPF_ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) #define MAX_CALL_FRAMES 8 struct bpf_verifier_state { /* call stack tracking */ struct bpf_func_state *frame[MAX_CALL_FRAMES]; struct bpf_verifier_state *parent; /* * 'branches' field is the number of branches left to explore: * 0 - all possible paths from this state reached bpf_exit or * were safely pruned * 1 - at least one path is being explored. * This state hasn't reached bpf_exit * 2 - at least two paths are being explored. * This state is an immediate parent of two children. * One is fallthrough branch with branches==1 and another * state is pushed into stack (to be explored later) also with * branches==1. The parent of this state has branches==1. * The verifier state tree connected via 'parent' pointer looks like: * 1 * 1 * 2 -> 1 (first 'if' pushed into stack) * 1 * 2 -> 1 (second 'if' pushed into stack) * 1 * 1 * 1 bpf_exit. * * Once do_check() reaches bpf_exit, it calls update_branch_counts() * and the verifier state tree will look: * 1 * 1 * 2 -> 1 (first 'if' pushed into stack) * 1 * 1 -> 1 (second 'if' pushed into stack) * 0 * 0 * 0 bpf_exit. * After pop_stack() the do_check() will resume at second 'if'. * * If is_state_visited() sees a state with branches > 0 it means * there is a loop. If such state is exactly equal to the current state * it's an infinite loop. Note states_equal() checks for states * equvalency, so two states being 'states_equal' does not mean * infinite loop. The exact comparison is provided by * states_maybe_looping() function. It's a stronger pre-check and * much faster than states_equal(). * * This algorithm may not find all possible infinite loops or * loop iteration count may be too high. * In such cases BPF_COMPLEXITY_LIMIT_INSNS limit kicks in. */ u32 branches; u32 insn_idx; u32 curframe; u32 active_spin_lock; bool speculative; /* first and last insn idx of this verifier state */ u32 first_insn_idx; u32 last_insn_idx; /* jmp history recorded from first to last. * backtracking is using it to go from last to first. * For most states jmp_history_cnt is [0-3]. * For loops can go up to ~40. */ struct bpf_idx_pair *jmp_history; u32 jmp_history_cnt; }; #define bpf_get_spilled_reg(slot, frame) \ (((slot < frame->allocated_stack / BPF_REG_SIZE) && \ (frame->stack[slot].slot_type[0] == STACK_SPILL)) \ ? &frame->stack[slot].spilled_ptr : NULL) /* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */ #define bpf_for_each_spilled_reg(iter, frame, reg) \ for (iter = 0, reg = bpf_get_spilled_reg(iter, frame); \ iter < frame->allocated_stack / BPF_REG_SIZE; \ iter++, reg = bpf_get_spilled_reg(iter, frame)) /* linked list of verifier states used to prune search */ struct bpf_verifier_state_list { struct bpf_verifier_state state; struct bpf_verifier_state_list *next; int miss_cnt, hit_cnt; }; /* Possible states for alu_state member. */ #define BPF_ALU_SANITIZE_SRC (1U << 0) #define BPF_ALU_SANITIZE_DST (1U << 1) #define BPF_ALU_NEG_VALUE (1U << 2) #define BPF_ALU_NON_POINTER (1U << 3) #define BPF_ALU_IMMEDIATE (1U << 4) #define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \ BPF_ALU_SANITIZE_DST) struct bpf_insn_aux_data { union { enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ unsigned long map_state; /* pointer/poison value for maps */ s32 call_imm; /* saved imm field of call insn */ u32 alu_limit; /* limit for add/sub register with pointer */ struct { u32 map_index; /* index into used_maps[] */ u32 map_off; /* offset from value base address */ }; }; int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ bool seen; /* this insn was processed by the verifier */ bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */ bool zext_dst; /* this insn zero extends dst reg */ u8 alu_state; /* used in combination with alu_limit */ bool prune_point; unsigned int orig_idx; /* original instruction index */ }; #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ #define BPF_VERIFIER_TMP_LOG_SIZE 1024 struct bpf_verifier_log { u32 level; char kbuf[BPF_VERIFIER_TMP_LOG_SIZE]; char __user *ubuf; u32 len_used; u32 len_total; }; static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log) { return log->len_used >= log->len_total - 1; } #define BPF_LOG_LEVEL1 1 #define BPF_LOG_LEVEL2 2 #define BPF_LOG_STATS 4 #define BPF_LOG_LEVEL (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2) #define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS) static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log) { return log->level && log->ubuf && !bpf_verifier_log_full(log); } #define BPF_MAX_SUBPROGS 256 struct bpf_subprog_info { u32 start; /* insn idx of function entry point */ u32 linfo_idx; /* The idx to the main_prog->aux->linfo */ u16 stack_depth; /* max. stack depth used by this function */ bool has_tail_call; }; /* single container for all structs * one verifier_env per bpf_check() call */ struct bpf_verifier_env { u32 insn_idx; u32 prev_insn_idx; struct bpf_prog *prog; /* eBPF program being verified */ const struct bpf_verifier_ops *ops; struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */ int stack_size; /* number of states to be processed */ bool strict_alignment; /* perform strict pointer alignment checks */ bool test_state_freq; /* test verifier with different pruning frequency */ struct bpf_verifier_state *cur_state; /* current verifier state */ struct bpf_verifier_state_list **explored_states; /* search pruning optimization */ struct bpf_verifier_state_list *free_list; struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ u32 used_map_cnt; /* number of used maps */ u32 id_gen; /* used to generate unique reg IDs */ bool explore_alu_limits; bool allow_ptr_leaks; bool seen_direct_write; struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ const struct bpf_line_info *prev_linfo; struct bpf_verifier_log log; struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1]; struct bpf_id_pair idmap_scratch[BPF_ID_MAP_SIZE]; struct { int *insn_state; int *insn_stack; int cur_stack; } cfg; u32 subprog_cnt; /* number of instructions analyzed by the verifier */ u32 prev_insn_processed, insn_processed; /* number of jmps, calls, exits analyzed so far */ u32 prev_jmps_processed, jmps_processed; /* total verification time */ u64 verification_time; /* maximum number of verifier states kept in 'branching' instructions */ u32 max_states_per_insn; /* total number of allocated verifier states */ u32 total_states; /* some states are freed during program analysis. * this is peak number of states. this number dominates kernel * memory consumption during verification */ u32 peak_states; /* longest register parentage chain walked for liveness marking */ u32 longest_mark_read_walk; }; __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt, va_list args); __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, const char *fmt, ...); static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env) { struct bpf_verifier_state *cur = env->cur_state; return cur->frame[cur->curframe]; } static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) { return cur_func(env)->regs; } int bpf_prog_offload_verifier_prep(struct bpf_prog *prog); int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx); int bpf_prog_offload_finalize(struct bpf_verifier_env *env); void bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off, struct bpf_insn *insn); void bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt); #endif /* _LINUX_BPF_VERIFIER_H */ of_net.h 0000644 00000001320 14722070374 0006171 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * OF helpers for network devices. */ #ifndef __LINUX_OF_NET_H #define __LINUX_OF_NET_H #ifdef CONFIG_OF_NET #include <linux/of.h> struct net_device; extern int of_get_phy_mode(struct device_node *np); extern const void *of_get_mac_address(struct device_node *np); extern struct net_device *of_find_net_device_by_node(struct device_node *np); #else static inline int of_get_phy_mode(struct device_node *np) { return -ENODEV; } static inline const void *of_get_mac_address(struct device_node *np) { return ERR_PTR(-ENODEV); } static inline struct net_device *of_find_net_device_by_node(struct device_node *np) { return NULL; } #endif #endif /* __LINUX_OF_NET_H */ interconnect.h 0000644 00000003265 14722070374 0007424 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2018-2019, Linaro Ltd. * Author: Georgi Djakov <georgi.djakov@linaro.org> */ #ifndef __LINUX_INTERCONNECT_H #define __LINUX_INTERCONNECT_H #include <linux/mutex.h> #include <linux/types.h> /* macros for converting to icc units */ #define Bps_to_icc(x) ((x) / 1000) #define kBps_to_icc(x) (x) #define MBps_to_icc(x) ((x) * 1000) #define GBps_to_icc(x) ((x) * 1000 * 1000) #define bps_to_icc(x) (1) #define kbps_to_icc(x) ((x) / 8 + ((x) % 8 ? 1 : 0)) #define Mbps_to_icc(x) ((x) * 1000 / 8) #define Gbps_to_icc(x) ((x) * 1000 * 1000 / 8) struct icc_path; struct device; #if IS_ENABLED(CONFIG_INTERCONNECT) struct icc_path *icc_get(struct device *dev, const int src_id, const int dst_id); struct icc_path *of_icc_get(struct device *dev, const char *name); void icc_put(struct icc_path *path); int icc_enable(struct icc_path *path); int icc_disable(struct icc_path *path); int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw); void icc_set_tag(struct icc_path *path, u32 tag); #else static inline struct icc_path *icc_get(struct device *dev, const int src_id, const int dst_id) { return NULL; } static inline struct icc_path *of_icc_get(struct device *dev, const char *name) { return NULL; } static inline void icc_put(struct icc_path *path) { } static inline int icc_enable(struct icc_path *path) { return 0; } static inline int icc_disable(struct icc_path *path) { return 0; } static inline int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw) { return 0; } static inline void icc_set_tag(struct icc_path *path, u32 tag) { } #endif /* CONFIG_INTERCONNECT */ #endif /* __LINUX_INTERCONNECT_H */ sysfs.h 0000644 00000042202 14722070374 0006072 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * sysfs.h - definitions for the device driver filesystem * * Copyright (c) 2001,2002 Patrick Mochel * Copyright (c) 2004 Silicon Graphics, Inc. * Copyright (c) 2007 SUSE Linux Products GmbH * Copyright (c) 2007 Tejun Heo <teheo@suse.de> * * Please see Documentation/filesystems/sysfs.txt for more information. */ #ifndef _SYSFS_H_ #define _SYSFS_H_ #include <linux/kernfs.h> #include <linux/compiler.h> #include <linux/errno.h> #include <linux/list.h> #include <linux/lockdep.h> #include <linux/kobject_ns.h> #include <linux/stat.h> #include <linux/atomic.h> struct kobject; struct module; struct bin_attribute; enum kobj_ns_type; struct attribute { const char *name; umode_t mode; #ifdef CONFIG_DEBUG_LOCK_ALLOC bool ignore_lockdep:1; struct lock_class_key *key; struct lock_class_key skey; #endif }; /** * sysfs_attr_init - initialize a dynamically allocated sysfs attribute * @attr: struct attribute to initialize * * Initialize a dynamically allocated struct attribute so we can * make lockdep happy. This is a new requirement for attributes * and initially this is only needed when lockdep is enabled. * Lockdep gives a nice error when your attribute is added to * sysfs if you don't have this. */ #ifdef CONFIG_DEBUG_LOCK_ALLOC #define sysfs_attr_init(attr) \ do { \ static struct lock_class_key __key; \ \ (attr)->key = &__key; \ } while (0) #else #define sysfs_attr_init(attr) do {} while (0) #endif /** * struct attribute_group - data structure used to declare an attribute group. * @name: Optional: Attribute group name * If specified, the attribute group will be created in * a new subdirectory with this name. * @is_visible: Optional: Function to return permissions associated with an * attribute of the group. Will be called repeatedly for each * non-binary attribute in the group. Only read/write * permissions as well as SYSFS_PREALLOC are accepted. Must * return 0 if an attribute is not visible. The returned value * will replace static permissions defined in struct attribute. * @is_bin_visible: * Optional: Function to return permissions associated with a * binary attribute of the group. Will be called repeatedly * for each binary attribute in the group. Only read/write * permissions as well as SYSFS_PREALLOC are accepted. Must * return 0 if a binary attribute is not visible. The returned * value will replace static permissions defined in * struct bin_attribute. * @attrs: Pointer to NULL terminated list of attributes. * @bin_attrs: Pointer to NULL terminated list of binary attributes. * Either attrs or bin_attrs or both must be provided. */ struct attribute_group { const char *name; umode_t (*is_visible)(struct kobject *, struct attribute *, int); umode_t (*is_bin_visible)(struct kobject *, struct bin_attribute *, int); struct attribute **attrs; struct bin_attribute **bin_attrs; }; /* * Use these macros to make defining attributes easier. * See include/linux/device.h for examples.. */ #define SYSFS_PREALLOC 010000 #define __ATTR(_name, _mode, _show, _store) { \ .attr = {.name = __stringify(_name), \ .mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \ .show = _show, \ .store = _store, \ } #define __ATTR_PREALLOC(_name, _mode, _show, _store) { \ .attr = {.name = __stringify(_name), \ .mode = SYSFS_PREALLOC | VERIFY_OCTAL_PERMISSIONS(_mode) },\ .show = _show, \ .store = _store, \ } #define __ATTR_RO(_name) { \ .attr = { .name = __stringify(_name), .mode = 0444 }, \ .show = _name##_show, \ } #define __ATTR_RO_MODE(_name, _mode) { \ .attr = { .name = __stringify(_name), \ .mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \ .show = _name##_show, \ } #define __ATTR_WO(_name) { \ .attr = { .name = __stringify(_name), .mode = 0200 }, \ .store = _name##_store, \ } #define __ATTR_RW(_name) __ATTR(_name, 0644, _name##_show, _name##_store) #define __ATTR_NULL { .attr = { .name = NULL } } #ifdef CONFIG_DEBUG_LOCK_ALLOC #define __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) { \ .attr = {.name = __stringify(_name), .mode = _mode, \ .ignore_lockdep = true }, \ .show = _show, \ .store = _store, \ } #else #define __ATTR_IGNORE_LOCKDEP __ATTR #endif #define __ATTRIBUTE_GROUPS(_name) \ static const struct attribute_group *_name##_groups[] = { \ &_name##_group, \ NULL, \ } #define ATTRIBUTE_GROUPS(_name) \ static const struct attribute_group _name##_group = { \ .attrs = _name##_attrs, \ }; \ __ATTRIBUTE_GROUPS(_name) struct file; struct vm_area_struct; struct bin_attribute { struct attribute attr; size_t size; void *private; ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t); int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr, struct vm_area_struct *vma); }; /** * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute * @attr: struct bin_attribute to initialize * * Initialize a dynamically allocated struct bin_attribute so we * can make lockdep happy. This is a new requirement for * attributes and initially this is only needed when lockdep is * enabled. Lockdep gives a nice error when your attribute is * added to sysfs if you don't have this. */ #define sysfs_bin_attr_init(bin_attr) sysfs_attr_init(&(bin_attr)->attr) /* macros to create static binary attributes easier */ #define __BIN_ATTR(_name, _mode, _read, _write, _size) { \ .attr = { .name = __stringify(_name), .mode = _mode }, \ .read = _read, \ .write = _write, \ .size = _size, \ } #define __BIN_ATTR_RO(_name, _size) { \ .attr = { .name = __stringify(_name), .mode = 0444 }, \ .read = _name##_read, \ .size = _size, \ } #define __BIN_ATTR_WO(_name, _size) { \ .attr = { .name = __stringify(_name), .mode = 0200 }, \ .write = _name##_write, \ .size = _size, \ } #define __BIN_ATTR_RW(_name, _size) \ __BIN_ATTR(_name, 0644, _name##_read, _name##_write, _size) #define __BIN_ATTR_NULL __ATTR_NULL #define BIN_ATTR(_name, _mode, _read, _write, _size) \ struct bin_attribute bin_attr_##_name = __BIN_ATTR(_name, _mode, _read, \ _write, _size) #define BIN_ATTR_RO(_name, _size) \ struct bin_attribute bin_attr_##_name = __BIN_ATTR_RO(_name, _size) #define BIN_ATTR_WO(_name, _size) \ struct bin_attribute bin_attr_##_name = __BIN_ATTR_WO(_name, _size) #define BIN_ATTR_RW(_name, _size) \ struct bin_attribute bin_attr_##_name = __BIN_ATTR_RW(_name, _size) struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t); }; #ifdef CONFIG_SYSFS int __must_check sysfs_create_dir_ns(struct kobject *kobj, const void *ns); void sysfs_remove_dir(struct kobject *kobj); int __must_check sysfs_rename_dir_ns(struct kobject *kobj, const char *new_name, const void *new_ns); int __must_check sysfs_move_dir_ns(struct kobject *kobj, struct kobject *new_parent_kobj, const void *new_ns); int __must_check sysfs_create_mount_point(struct kobject *parent_kobj, const char *name); void sysfs_remove_mount_point(struct kobject *parent_kobj, const char *name); int __must_check sysfs_create_file_ns(struct kobject *kobj, const struct attribute *attr, const void *ns); int __must_check sysfs_create_files(struct kobject *kobj, const struct attribute * const *attr); int __must_check sysfs_chmod_file(struct kobject *kobj, const struct attribute *attr, umode_t mode); struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj, const struct attribute *attr); void sysfs_unbreak_active_protection(struct kernfs_node *kn); void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr, const void *ns); bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr); void sysfs_remove_files(struct kobject *kobj, const struct attribute * const *attr); int __must_check sysfs_create_bin_file(struct kobject *kobj, const struct bin_attribute *attr); void sysfs_remove_bin_file(struct kobject *kobj, const struct bin_attribute *attr); int __must_check sysfs_create_link(struct kobject *kobj, struct kobject *target, const char *name); int __must_check sysfs_create_link_nowarn(struct kobject *kobj, struct kobject *target, const char *name); void sysfs_remove_link(struct kobject *kobj, const char *name); int sysfs_rename_link_ns(struct kobject *kobj, struct kobject *target, const char *old_name, const char *new_name, const void *new_ns); void sysfs_delete_link(struct kobject *dir, struct kobject *targ, const char *name); int __must_check sysfs_create_group(struct kobject *kobj, const struct attribute_group *grp); int __must_check sysfs_create_groups(struct kobject *kobj, const struct attribute_group **groups); int __must_check sysfs_update_groups(struct kobject *kobj, const struct attribute_group **groups); int sysfs_update_group(struct kobject *kobj, const struct attribute_group *grp); void sysfs_remove_group(struct kobject *kobj, const struct attribute_group *grp); void sysfs_remove_groups(struct kobject *kobj, const struct attribute_group **groups); int sysfs_add_file_to_group(struct kobject *kobj, const struct attribute *attr, const char *group); void sysfs_remove_file_from_group(struct kobject *kobj, const struct attribute *attr, const char *group); int sysfs_merge_group(struct kobject *kobj, const struct attribute_group *grp); void sysfs_unmerge_group(struct kobject *kobj, const struct attribute_group *grp); int sysfs_add_link_to_group(struct kobject *kobj, const char *group_name, struct kobject *target, const char *link_name); void sysfs_remove_link_from_group(struct kobject *kobj, const char *group_name, const char *link_name); int __compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj, struct kobject *target_kobj, const char *target_name); void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr); int __must_check sysfs_init(void); static inline void sysfs_enable_ns(struct kernfs_node *kn) { return kernfs_enable_ns(kn); } int sysfs_file_change_owner(struct kobject *kobj, const char *name, kuid_t kuid, kgid_t kgid); int sysfs_change_owner(struct kobject *kobj, kuid_t kuid, kgid_t kgid); int sysfs_link_change_owner(struct kobject *kobj, struct kobject *targ, const char *name, kuid_t kuid, kgid_t kgid); int sysfs_groups_change_owner(struct kobject *kobj, const struct attribute_group **groups, kuid_t kuid, kgid_t kgid); int sysfs_group_change_owner(struct kobject *kobj, const struct attribute_group *groups, kuid_t kuid, kgid_t kgid); __printf(2, 3) int sysfs_emit(char *buf, const char *fmt, ...); __printf(3, 4) int sysfs_emit_at(char *buf, int at, const char *fmt, ...); #else /* CONFIG_SYSFS */ static inline int sysfs_create_dir_ns(struct kobject *kobj, const void *ns) { return 0; } static inline void sysfs_remove_dir(struct kobject *kobj) { } static inline int sysfs_rename_dir_ns(struct kobject *kobj, const char *new_name, const void *new_ns) { return 0; } static inline int sysfs_move_dir_ns(struct kobject *kobj, struct kobject *new_parent_kobj, const void *new_ns) { return 0; } static inline int sysfs_create_mount_point(struct kobject *parent_kobj, const char *name) { return 0; } static inline void sysfs_remove_mount_point(struct kobject *parent_kobj, const char *name) { } static inline int sysfs_create_file_ns(struct kobject *kobj, const struct attribute *attr, const void *ns) { return 0; } static inline int sysfs_create_files(struct kobject *kobj, const struct attribute * const *attr) { return 0; } static inline int sysfs_chmod_file(struct kobject *kobj, const struct attribute *attr, umode_t mode) { return 0; } static inline struct kernfs_node * sysfs_break_active_protection(struct kobject *kobj, const struct attribute *attr) { return NULL; } static inline void sysfs_unbreak_active_protection(struct kernfs_node *kn) { } static inline void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr, const void *ns) { } static inline bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr) { return false; } static inline void sysfs_remove_files(struct kobject *kobj, const struct attribute * const *attr) { } static inline int sysfs_create_bin_file(struct kobject *kobj, const struct bin_attribute *attr) { return 0; } static inline void sysfs_remove_bin_file(struct kobject *kobj, const struct bin_attribute *attr) { } static inline int sysfs_create_link(struct kobject *kobj, struct kobject *target, const char *name) { return 0; } static inline int sysfs_create_link_nowarn(struct kobject *kobj, struct kobject *target, const char *name) { return 0; } static inline void sysfs_remove_link(struct kobject *kobj, const char *name) { } static inline int sysfs_rename_link_ns(struct kobject *k, struct kobject *t, const char *old_name, const char *new_name, const void *ns) { return 0; } static inline void sysfs_delete_link(struct kobject *k, struct kobject *t, const char *name) { } static inline int sysfs_create_group(struct kobject *kobj, const struct attribute_group *grp) { return 0; } static inline int sysfs_create_groups(struct kobject *kobj, const struct attribute_group **groups) { return 0; } static inline int sysfs_update_groups(struct kobject *kobj, const struct attribute_group **groups) { return 0; } static inline int sysfs_update_group(struct kobject *kobj, const struct attribute_group *grp) { return 0; } static inline void sysfs_remove_group(struct kobject *kobj, const struct attribute_group *grp) { } static inline void sysfs_remove_groups(struct kobject *kobj, const struct attribute_group **groups) { } static inline int sysfs_add_file_to_group(struct kobject *kobj, const struct attribute *attr, const char *group) { return 0; } static inline void sysfs_remove_file_from_group(struct kobject *kobj, const struct attribute *attr, const char *group) { } static inline int sysfs_merge_group(struct kobject *kobj, const struct attribute_group *grp) { return 0; } static inline void sysfs_unmerge_group(struct kobject *kobj, const struct attribute_group *grp) { } static inline int sysfs_add_link_to_group(struct kobject *kobj, const char *group_name, struct kobject *target, const char *link_name) { return 0; } static inline void sysfs_remove_link_from_group(struct kobject *kobj, const char *group_name, const char *link_name) { } static inline int __compat_only_sysfs_link_entry_to_kobj( struct kobject *kobj, struct kobject *target_kobj, const char *target_name) { return 0; } static inline void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr) { } static inline int __must_check sysfs_init(void) { return 0; } static inline void sysfs_enable_ns(struct kernfs_node *kn) { } static inline int sysfs_file_change_owner(struct kobject *kobj, const char *name, kuid_t kuid, kgid_t kgid) { return 0; } static inline int sysfs_link_change_owner(struct kobject *kobj, struct kobject *targ, const char *name, kuid_t kuid, kgid_t kgid) { return 0; } static inline int sysfs_change_owner(struct kobject *kobj, kuid_t kuid, kgid_t kgid) { return 0; } static inline int sysfs_groups_change_owner(struct kobject *kobj, const struct attribute_group **groups, kuid_t kuid, kgid_t kgid) { return 0; } static inline int sysfs_group_change_owner(struct kobject *kobj, const struct attribute_group *groups, kuid_t kuid, kgid_t kgid) { return 0; } __printf(2, 3) static inline int sysfs_emit(char *buf, const char *fmt, ...) { return 0; } __printf(3, 4) static inline int sysfs_emit_at(char *buf, int at, const char *fmt, ...) { return 0; } #endif /* CONFIG_SYSFS */ static inline int __must_check sysfs_create_file(struct kobject *kobj, const struct attribute *attr) { return sysfs_create_file_ns(kobj, attr, NULL); } static inline void sysfs_remove_file(struct kobject *kobj, const struct attribute *attr) { sysfs_remove_file_ns(kobj, attr, NULL); } static inline int sysfs_rename_link(struct kobject *kobj, struct kobject *target, const char *old_name, const char *new_name) { return sysfs_rename_link_ns(kobj, target, old_name, new_name, NULL); } static inline void sysfs_notify_dirent(struct kernfs_node *kn) { kernfs_notify(kn); } static inline struct kernfs_node *sysfs_get_dirent(struct kernfs_node *parent, const char *name) { return kernfs_find_and_get(parent, name); } static inline struct kernfs_node *sysfs_get(struct kernfs_node *kn) { kernfs_get(kn); return kn; } static inline void sysfs_put(struct kernfs_node *kn) { kernfs_put(kn); } #endif /* _SYSFS_H_ */ uidgid.h 0000644 00000010113 14722070374 0006164 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_UIDGID_H #define _LINUX_UIDGID_H /* * A set of types for the internal kernel types representing uids and gids. * * The types defined in this header allow distinguishing which uids and gids in * the kernel are values used by userspace and which uid and gid values are * the internal kernel values. With the addition of user namespaces the values * can be different. Using the type system makes it possible for the compiler * to detect when we overlook these differences. * */ #include <linux/types.h> #include <linux/highuid.h> struct user_namespace; extern struct user_namespace init_user_ns; typedef struct { uid_t val; } kuid_t; typedef struct { gid_t val; } kgid_t; #define KUIDT_INIT(value) (kuid_t){ value } #define KGIDT_INIT(value) (kgid_t){ value } #ifdef CONFIG_MULTIUSER static inline uid_t __kuid_val(kuid_t uid) { return uid.val; } static inline gid_t __kgid_val(kgid_t gid) { return gid.val; } #else static inline uid_t __kuid_val(kuid_t uid) { return 0; } static inline gid_t __kgid_val(kgid_t gid) { return 0; } #endif #define GLOBAL_ROOT_UID KUIDT_INIT(0) #define GLOBAL_ROOT_GID KGIDT_INIT(0) #define INVALID_UID KUIDT_INIT(-1) #define INVALID_GID KGIDT_INIT(-1) static inline bool uid_eq(kuid_t left, kuid_t right) { return __kuid_val(left) == __kuid_val(right); } static inline bool gid_eq(kgid_t left, kgid_t right) { return __kgid_val(left) == __kgid_val(right); } static inline bool uid_gt(kuid_t left, kuid_t right) { return __kuid_val(left) > __kuid_val(right); } static inline bool gid_gt(kgid_t left, kgid_t right) { return __kgid_val(left) > __kgid_val(right); } static inline bool uid_gte(kuid_t left, kuid_t right) { return __kuid_val(left) >= __kuid_val(right); } static inline bool gid_gte(kgid_t left, kgid_t right) { return __kgid_val(left) >= __kgid_val(right); } static inline bool uid_lt(kuid_t left, kuid_t right) { return __kuid_val(left) < __kuid_val(right); } static inline bool gid_lt(kgid_t left, kgid_t right) { return __kgid_val(left) < __kgid_val(right); } static inline bool uid_lte(kuid_t left, kuid_t right) { return __kuid_val(left) <= __kuid_val(right); } static inline bool gid_lte(kgid_t left, kgid_t right) { return __kgid_val(left) <= __kgid_val(right); } static inline bool uid_valid(kuid_t uid) { return __kuid_val(uid) != (uid_t) -1; } static inline bool gid_valid(kgid_t gid) { return __kgid_val(gid) != (gid_t) -1; } #ifdef CONFIG_USER_NS extern kuid_t make_kuid(struct user_namespace *from, uid_t uid); extern kgid_t make_kgid(struct user_namespace *from, gid_t gid); extern uid_t from_kuid(struct user_namespace *to, kuid_t uid); extern gid_t from_kgid(struct user_namespace *to, kgid_t gid); extern uid_t from_kuid_munged(struct user_namespace *to, kuid_t uid); extern gid_t from_kgid_munged(struct user_namespace *to, kgid_t gid); static inline bool kuid_has_mapping(struct user_namespace *ns, kuid_t uid) { return from_kuid(ns, uid) != (uid_t) -1; } static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid) { return from_kgid(ns, gid) != (gid_t) -1; } #else static inline kuid_t make_kuid(struct user_namespace *from, uid_t uid) { return KUIDT_INIT(uid); } static inline kgid_t make_kgid(struct user_namespace *from, gid_t gid) { return KGIDT_INIT(gid); } static inline uid_t from_kuid(struct user_namespace *to, kuid_t kuid) { return __kuid_val(kuid); } static inline gid_t from_kgid(struct user_namespace *to, kgid_t kgid) { return __kgid_val(kgid); } static inline uid_t from_kuid_munged(struct user_namespace *to, kuid_t kuid) { uid_t uid = from_kuid(to, kuid); if (uid == (uid_t)-1) uid = overflowuid; return uid; } static inline gid_t from_kgid_munged(struct user_namespace *to, kgid_t kgid) { gid_t gid = from_kgid(to, kgid); if (gid == (gid_t)-1) gid = overflowgid; return gid; } static inline bool kuid_has_mapping(struct user_namespace *ns, kuid_t uid) { return uid_valid(uid); } static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid) { return gid_valid(gid); } #endif /* CONFIG_USER_NS */ #endif /* _LINUX_UIDGID_H */ mlx5/fs.h 0000644 00000016122 14722070374 0006222 0 ustar 00 /* * Copyright (c) 2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef _MLX5_FS_ #define _MLX5_FS_ #include <linux/mlx5/driver.h> #include <linux/mlx5/mlx5_ifc.h> #define MLX5_FS_DEFAULT_FLOW_TAG 0x0 enum { MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16, MLX5_FLOW_CONTEXT_ACTION_ENCRYPT = 1 << 17, MLX5_FLOW_CONTEXT_ACTION_DECRYPT = 1 << 18, }; enum { MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT = BIT(0), MLX5_FLOW_TABLE_TUNNEL_EN_DECAP = BIT(1), MLX5_FLOW_TABLE_TERMINATION = BIT(2), }; #define LEFTOVERS_RULE_NUM 2 static inline void build_leftovers_ft_param(int *priority, int *n_ent, int *n_grp) { *priority = 0; /* Priority of leftovers_prio-0 */ *n_ent = LEFTOVERS_RULE_NUM; *n_grp = LEFTOVERS_RULE_NUM; } enum mlx5_flow_namespace_type { MLX5_FLOW_NAMESPACE_BYPASS, MLX5_FLOW_NAMESPACE_LAG, MLX5_FLOW_NAMESPACE_OFFLOADS, MLX5_FLOW_NAMESPACE_ETHTOOL, MLX5_FLOW_NAMESPACE_KERNEL, MLX5_FLOW_NAMESPACE_LEFTOVERS, MLX5_FLOW_NAMESPACE_ANCHOR, MLX5_FLOW_NAMESPACE_FDB, MLX5_FLOW_NAMESPACE_ESW_EGRESS, MLX5_FLOW_NAMESPACE_ESW_INGRESS, MLX5_FLOW_NAMESPACE_SNIFFER_RX, MLX5_FLOW_NAMESPACE_SNIFFER_TX, MLX5_FLOW_NAMESPACE_EGRESS, MLX5_FLOW_NAMESPACE_RDMA_RX, MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL, }; enum { FDB_BYPASS_PATH, FDB_FAST_PATH, FDB_SLOW_PATH, }; struct mlx5_pkt_reformat; struct mlx5_modify_hdr; struct mlx5_flow_table; struct mlx5_flow_group; struct mlx5_flow_namespace; struct mlx5_flow_handle; enum { FLOW_CONTEXT_HAS_TAG = BIT(0), }; struct mlx5_flow_context { u32 flags; u32 flow_tag; u32 flow_source; }; struct mlx5_flow_spec { u8 match_criteria_enable; u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)]; u32 match_value[MLX5_ST_SZ_DW(fte_match_param)]; struct mlx5_flow_context flow_context; }; enum { MLX5_FLOW_DEST_VPORT_VHCA_ID = BIT(0), MLX5_FLOW_DEST_VPORT_REFORMAT_ID = BIT(1), }; struct mlx5_flow_destination { enum mlx5_flow_destination_type type; union { u32 tir_num; u32 ft_num; struct mlx5_flow_table *ft; u32 counter_id; struct { u16 num; u16 vhca_id; struct mlx5_pkt_reformat *pkt_reformat; u8 flags; } vport; }; }; struct mod_hdr_tbl { struct mutex lock; /* protects hlist */ DECLARE_HASHTABLE(hlist, 8); }; struct mlx5_flow_namespace * mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev, int n); struct mlx5_flow_namespace * mlx5_get_flow_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type); struct mlx5_flow_namespace * mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type, int vport); struct mlx5_flow_table * mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, int prio, int num_flow_table_entries, int max_num_groups, u32 level, u32 flags); struct mlx5_flow_table_attr { int prio; int max_fte; u32 level; u32 flags; }; struct mlx5_flow_table * mlx5_create_flow_table(struct mlx5_flow_namespace *ns, struct mlx5_flow_table_attr *ft_attr); struct mlx5_flow_table * mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns, int prio, int num_flow_table_entries, u32 level, u16 vport); struct mlx5_flow_table *mlx5_create_lag_demux_flow_table( struct mlx5_flow_namespace *ns, int prio, u32 level); int mlx5_destroy_flow_table(struct mlx5_flow_table *ft); /* inbox should be set with the following values: * start_flow_index * end_flow_index * match_criteria_enable * match_criteria */ struct mlx5_flow_group * mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in); void mlx5_destroy_flow_group(struct mlx5_flow_group *fg); struct mlx5_fs_vlan { u16 ethtype; u16 vid; u8 prio; }; #define MLX5_FS_VLAN_DEPTH 2 enum { FLOW_ACT_NO_APPEND = BIT(0), }; struct mlx5_flow_act { u32 action; struct mlx5_modify_hdr *modify_hdr; struct mlx5_pkt_reformat *pkt_reformat; uintptr_t esp_id; u32 flags; struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH]; struct ib_counters *counters; }; #define MLX5_DECLARE_FLOW_ACT(name) \ struct mlx5_flow_act name = { .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,\ .flags = 0, } /* Single destination per rule. * Group ID is implied by the match criteria. */ struct mlx5_flow_handle * mlx5_add_flow_rules(struct mlx5_flow_table *ft, const struct mlx5_flow_spec *spec, struct mlx5_flow_act *flow_act, struct mlx5_flow_destination *dest, int num_dest); void mlx5_del_flow_rules(struct mlx5_flow_handle *fr); int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler, struct mlx5_flow_destination *new_dest, struct mlx5_flow_destination *old_dest); struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging); void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter); u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter); void mlx5_fc_query_cached(struct mlx5_fc *counter, u64 *bytes, u64 *packets, u64 *lastuse); int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter, u64 *packets, u64 *bytes); u32 mlx5_fc_id(struct mlx5_fc *counter); int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn); int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn); struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev, u8 ns_type, u8 num_actions, void *modify_actions); void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, struct mlx5_modify_hdr *modify_hdr); struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev, int reformat_type, size_t size, void *reformat_data, enum mlx5_flow_namespace_type ns_type); void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev, struct mlx5_pkt_reformat *reformat); #endif mlx5/cq.h 0000644 00000013301 14722070374 0006211 0 ustar 00 /* * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef MLX5_CORE_CQ_H #define MLX5_CORE_CQ_H #include <rdma/ib_verbs.h> #include <linux/mlx5/driver.h> #include <linux/refcount.h> struct mlx5_core_cq { u32 cqn; int cqe_sz; __be32 *set_ci_db; __be32 *arm_db; struct mlx5_uars_page *uar; refcount_t refcount; struct completion free; unsigned vector; unsigned int irqn; void (*comp)(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe); void (*event) (struct mlx5_core_cq *, enum mlx5_event); u32 cons_index; unsigned arm_sn; struct mlx5_rsc_debug *dbg; int pid; struct { struct list_head list; void (*comp)(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe); void *priv; } tasklet_ctx; int reset_notify_added; struct list_head reset_notify; struct mlx5_eq_comp *eq; u16 uid; }; enum { MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR = 0x01, MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR = 0x02, MLX5_CQE_SYNDROME_LOCAL_PROT_ERR = 0x04, MLX5_CQE_SYNDROME_WR_FLUSH_ERR = 0x05, MLX5_CQE_SYNDROME_MW_BIND_ERR = 0x06, MLX5_CQE_SYNDROME_BAD_RESP_ERR = 0x10, MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR = 0x11, MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR = 0x12, MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR = 0x13, MLX5_CQE_SYNDROME_REMOTE_OP_ERR = 0x14, MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR = 0x15, MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR = 0x16, MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR = 0x22, }; enum { MLX5_CQE_OWNER_MASK = 1, MLX5_CQE_REQ = 0, MLX5_CQE_RESP_WR_IMM = 1, MLX5_CQE_RESP_SEND = 2, MLX5_CQE_RESP_SEND_IMM = 3, MLX5_CQE_RESP_SEND_INV = 4, MLX5_CQE_RESIZE_CQ = 5, MLX5_CQE_SIG_ERR = 12, MLX5_CQE_REQ_ERR = 13, MLX5_CQE_RESP_ERR = 14, MLX5_CQE_INVALID = 15, }; enum { MLX5_CQ_MODIFY_PERIOD = 1 << 0, MLX5_CQ_MODIFY_COUNT = 1 << 1, MLX5_CQ_MODIFY_OVERRUN = 1 << 2, }; enum { MLX5_CQ_OPMOD_RESIZE = 1, MLX5_MODIFY_CQ_MASK_LOG_SIZE = 1 << 0, MLX5_MODIFY_CQ_MASK_PG_OFFSET = 1 << 1, MLX5_MODIFY_CQ_MASK_PG_SIZE = 1 << 2, }; struct mlx5_cq_modify_params { int type; union { struct { u32 page_offset; u8 log_cq_size; } resize; struct { } moder; struct { } mapping; } params; }; enum { CQE_STRIDE_64 = 0, CQE_STRIDE_128 = 1, CQE_STRIDE_128_PAD = 2, }; #define MLX5_MAX_CQ_PERIOD (BIT(__mlx5_bit_sz(cqc, cq_period)) - 1) #define MLX5_MAX_CQ_COUNT (BIT(__mlx5_bit_sz(cqc, cq_max_count)) - 1) static inline int cqe_sz_to_mlx_sz(u8 size, int padding_128_en) { return padding_128_en ? CQE_STRIDE_128_PAD : size == 64 ? CQE_STRIDE_64 : CQE_STRIDE_128; } static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq) { *cq->set_ci_db = cpu_to_be32(cq->cons_index & 0xffffff); } enum { MLX5_CQ_DB_REQ_NOT_SOL = 1 << 24, MLX5_CQ_DB_REQ_NOT = 0 << 24 }; static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd, void __iomem *uar_page, u32 cons_index) { __be32 doorbell[2]; u32 sn; u32 ci; sn = cq->arm_sn & 3; ci = cons_index & 0xffffff; *cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci); /* Make sure that the doorbell record in host memory is * written before ringing the doorbell via PCI MMIO. */ wmb(); doorbell[0] = cpu_to_be32(sn << 28 | cmd | ci); doorbell[1] = cpu_to_be32(cq->cqn); mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL); } static inline void mlx5_cq_hold(struct mlx5_core_cq *cq) { refcount_inc(&cq->refcount); } static inline void mlx5_cq_put(struct mlx5_core_cq *cq) { if (refcount_dec_and_test(&cq->refcount)) complete(&cq->free); } int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, u32 *in, int inlen, u32 *out, int outlen); int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, u32 *out, int outlen); int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, u32 *in, int inlen); int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, u16 cq_period, u16 cq_max_count); static inline void mlx5_dump_err_cqe(struct mlx5_core_dev *dev, struct mlx5_err_cqe *err_cqe) { print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, err_cqe, sizeof(*err_cqe), false); } int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); #endif /* MLX5_CORE_CQ_H */ mlx5/accel.h 0000644 00000010126 14722070374 0006657 0 ustar 00 /* * Copyright (c) 2018 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #ifndef __MLX5_ACCEL_H__ #define __MLX5_ACCEL_H__ #include <linux/mlx5/driver.h> enum mlx5_accel_esp_aes_gcm_keymat_iv_algo { MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ, }; enum mlx5_accel_esp_flags { MLX5_ACCEL_ESP_FLAGS_TUNNEL = 0, /* Default */ MLX5_ACCEL_ESP_FLAGS_TRANSPORT = 1UL << 0, MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED = 1UL << 1, MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP = 1UL << 2, }; enum mlx5_accel_esp_action { MLX5_ACCEL_ESP_ACTION_DECRYPT, MLX5_ACCEL_ESP_ACTION_ENCRYPT, }; enum mlx5_accel_esp_keymats { MLX5_ACCEL_ESP_KEYMAT_AES_NONE, MLX5_ACCEL_ESP_KEYMAT_AES_GCM, }; enum mlx5_accel_esp_replay { MLX5_ACCEL_ESP_REPLAY_NONE, MLX5_ACCEL_ESP_REPLAY_BMP, }; struct aes_gcm_keymat { u64 seq_iv; enum mlx5_accel_esp_aes_gcm_keymat_iv_algo iv_algo; u32 salt; u32 icv_len; u32 key_len; u32 aes_key[256 / 32]; }; struct mlx5_accel_esp_xfrm_attrs { enum mlx5_accel_esp_action action; u32 esn; u32 spi; u32 seq; u32 tfc_pad; u32 flags; u32 sa_handle; enum mlx5_accel_esp_replay replay_type; union { struct { u32 size; } bmp; } replay; enum mlx5_accel_esp_keymats keymat_type; union { struct aes_gcm_keymat aes_gcm; } keymat; }; struct mlx5_accel_esp_xfrm { struct mlx5_core_dev *mdev; struct mlx5_accel_esp_xfrm_attrs attrs; }; enum { MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA = 1UL << 0, }; enum mlx5_accel_ipsec_cap { MLX5_ACCEL_IPSEC_CAP_DEVICE = 1 << 0, MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA = 1 << 1, MLX5_ACCEL_IPSEC_CAP_ESP = 1 << 2, MLX5_ACCEL_IPSEC_CAP_IPV6 = 1 << 3, MLX5_ACCEL_IPSEC_CAP_LSO = 1 << 4, MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER = 1 << 5, MLX5_ACCEL_IPSEC_CAP_ESN = 1 << 6, MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN = 1 << 7, }; #ifdef CONFIG_MLX5_FPGA_IPSEC u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev); struct mlx5_accel_esp_xfrm * mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, const struct mlx5_accel_esp_xfrm_attrs *attrs, u32 flags); void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm); int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, const struct mlx5_accel_esp_xfrm_attrs *attrs); #else static inline u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev) { return 0; } static inline struct mlx5_accel_esp_xfrm * mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, const struct mlx5_accel_esp_xfrm_attrs *attrs, u32 flags) { return ERR_PTR(-EOPNOTSUPP); } static inline void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) {} static inline int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, const struct mlx5_accel_esp_xfrm_attrs *attrs) { return -EOPNOTSUPP; } #endif #endif mlx5/port.h 0000644 00000016101 14722070374 0006573 0 ustar 00 /* * Copyright (c) 2016, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef __MLX5_PORT_H__ #define __MLX5_PORT_H__ #include <linux/mlx5/driver.h> enum mlx5_beacon_duration { MLX5_BEACON_DURATION_OFF = 0x0, MLX5_BEACON_DURATION_INF = 0xffff, }; enum mlx5_module_id { MLX5_MODULE_ID_SFP = 0x3, MLX5_MODULE_ID_QSFP = 0xC, MLX5_MODULE_ID_QSFP_PLUS = 0xD, MLX5_MODULE_ID_QSFP28 = 0x11, }; enum mlx5_an_status { MLX5_AN_UNAVAILABLE = 0, MLX5_AN_COMPLETE = 1, MLX5_AN_FAILED = 2, MLX5_AN_LINK_UP = 3, MLX5_AN_LINK_DOWN = 4, }; #define MLX5_EEPROM_MAX_BYTES 32 #define MLX5_EEPROM_IDENTIFIER_BYTE_MASK 0x000000ff #define MLX5_I2C_ADDR_LOW 0x50 #define MLX5_I2C_ADDR_HIGH 0x51 #define MLX5_EEPROM_PAGE_LENGTH 256 #define MLX5_EEPROM_HIGH_PAGE_LENGTH 128 enum mlx5e_link_mode { MLX5E_1000BASE_CX_SGMII = 0, MLX5E_1000BASE_KX = 1, MLX5E_10GBASE_CX4 = 2, MLX5E_10GBASE_KX4 = 3, MLX5E_10GBASE_KR = 4, MLX5E_20GBASE_KR2 = 5, MLX5E_40GBASE_CR4 = 6, MLX5E_40GBASE_KR4 = 7, MLX5E_56GBASE_R4 = 8, MLX5E_10GBASE_CR = 12, MLX5E_10GBASE_SR = 13, MLX5E_10GBASE_ER = 14, MLX5E_40GBASE_SR4 = 15, MLX5E_40GBASE_LR4 = 16, MLX5E_50GBASE_SR2 = 18, MLX5E_100GBASE_CR4 = 20, MLX5E_100GBASE_SR4 = 21, MLX5E_100GBASE_KR4 = 22, MLX5E_100GBASE_LR4 = 23, MLX5E_100BASE_TX = 24, MLX5E_1000BASE_T = 25, MLX5E_10GBASE_T = 26, MLX5E_25GBASE_CR = 27, MLX5E_25GBASE_KR = 28, MLX5E_25GBASE_SR = 29, MLX5E_50GBASE_CR2 = 30, MLX5E_50GBASE_KR2 = 31, MLX5E_LINK_MODES_NUMBER, }; enum mlx5e_ext_link_mode { MLX5E_SGMII_100M = 0, MLX5E_1000BASE_X_SGMII = 1, MLX5E_5GBASE_R = 3, MLX5E_10GBASE_XFI_XAUI_1 = 4, MLX5E_40GBASE_XLAUI_4_XLPPI_4 = 5, MLX5E_25GAUI_1_25GBASE_CR_KR = 6, MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2 = 7, MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR = 8, MLX5E_CAUI_4_100GBASE_CR4_KR4 = 9, MLX5E_100GAUI_2_100GBASE_CR2_KR2 = 10, MLX5E_200GAUI_4_200GBASE_CR4_KR4 = 12, MLX5E_400GAUI_8 = 15, MLX5E_EXT_LINK_MODES_NUMBER, }; enum mlx5e_connector_type { MLX5E_PORT_UNKNOWN = 0, MLX5E_PORT_NONE = 1, MLX5E_PORT_TP = 2, MLX5E_PORT_AUI = 3, MLX5E_PORT_BNC = 4, MLX5E_PORT_MII = 5, MLX5E_PORT_FIBRE = 6, MLX5E_PORT_DA = 7, MLX5E_PORT_OTHER = 8, MLX5E_CONNECTOR_TYPE_NUMBER, }; #define MLX5E_PROT_MASK(link_mode) (1 << link_mode) #define MLX5_GET_ETH_PROTO(reg, out, ext, field) \ (ext ? MLX5_GET(reg, out, ext_##field) : \ MLX5_GET(reg, out, field)) int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, int ptys_size, int proto_mask, u8 local_port); int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev, u8 *link_width_oper, u8 local_port); int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev, u8 *proto_oper, u8 local_port); void mlx5_toggle_port_link(struct mlx5_core_dev *dev); int mlx5_set_port_admin_status(struct mlx5_core_dev *dev, enum mlx5_port_status status); int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, enum mlx5_port_status *status); int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration); int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port); void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port); void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu, u8 port); int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev, u8 *vl_hw_cap, u8 local_port); int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause); int mlx5_query_port_pause(struct mlx5_core_dev *dev, u32 *rx_pause, u32 *tx_pause); int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx); int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx); int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev, u16 stall_critical_watermark, u16 stall_minor_watermark); int mlx5_query_port_stall_watermark(struct mlx5_core_dev *dev, u16 *stall_critical_watermark, u16 *stall_minor_watermark); int mlx5_max_tc(struct mlx5_core_dev *mdev); int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc); int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev, u8 prio, u8 *tc); int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group); int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev, u8 tc, u8 *tc_group); int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw); int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 tc, u8 *bw_pct); int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev, u8 *max_bw_value, u8 *max_bw_unit); int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev, u8 *max_bw_value, u8 *max_bw_unit); int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode); int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode); int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen); int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen); int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable); void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported, bool *enabled); int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, u16 offset, u16 size, u8 *data); int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out); int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in); int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state); int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state); int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio); int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio); #endif /* __MLX5_PORT_H__ */ mlx5/eswitch.h 0000644 00000005060 14722070374 0007257 0 ustar 00 /* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ /* * Copyright (c) 2018 Mellanox Technologies. All rights reserved. */ #ifndef _MLX5_ESWITCH_ #define _MLX5_ESWITCH_ #include <linux/mlx5/driver.h> #include <net/devlink.h> #define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager) enum { MLX5_ESWITCH_NONE, MLX5_ESWITCH_LEGACY, MLX5_ESWITCH_OFFLOADS }; enum { REP_ETH, REP_IB, NUM_REP_TYPES, }; enum { REP_UNREGISTERED, REP_REGISTERED, REP_LOADED, }; struct mlx5_eswitch_rep; struct mlx5_eswitch_rep_ops { int (*load)(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep); void (*unload)(struct mlx5_eswitch_rep *rep); void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep); }; struct mlx5_eswitch_rep_data { void *priv; atomic_t state; }; struct mlx5_eswitch_rep { struct mlx5_eswitch_rep_data rep_data[NUM_REP_TYPES]; u16 vport; u16 vlan; /* Only IB rep is using vport_index */ u16 vport_index; u32 vlan_refcount; }; void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw, const struct mlx5_eswitch_rep_ops *ops, u8 rep_type); void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type); void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw, u16 vport_num, u8 rep_type); struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw, u16 vport_num); void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type); struct mlx5_flow_handle * mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport_num, u32 sqn); u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev); #ifdef CONFIG_MLX5_ESWITCH enum devlink_eswitch_encap_mode mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev); bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw); u32 mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw, u16 vport_num); u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw); #else /* CONFIG_MLX5_ESWITCH */ static inline u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw) { return MLX5_ESWITCH_NONE; } static inline enum devlink_eswitch_encap_mode mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev) { return DEVLINK_ESWITCH_ENCAP_MODE_NONE; } static inline bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw) { return false; }; static inline u32 mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw, int vport_num) { return 0; }; #endif /* CONFIG_MLX5_ESWITCH */ #endif mlx5/eq.h 0000644 00000003365 14722070374 0006224 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* Copyright (c) 2018 Mellanox Technologies. */ #ifndef MLX5_CORE_EQ_H #define MLX5_CORE_EQ_H #define MLX5_IRQ_VEC_COMP_BASE 1 #define MLX5_NUM_CMD_EQE (32) #define MLX5_NUM_ASYNC_EQE (0x1000) #define MLX5_NUM_SPARE_EQE (0x80) struct mlx5_eq; struct mlx5_core_dev; struct mlx5_eq_param { u8 irq_index; int nent; u64 mask[4]; }; struct mlx5_eq * mlx5_eq_create_generic(struct mlx5_core_dev *dev, struct mlx5_eq_param *param); int mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq); int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq, struct notifier_block *nb); void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq, struct notifier_block *nb); struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc); void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm); /* The HCA will think the queue has overflowed if we * don't tell it we've been processing events. We * create EQs with MLX5_NUM_SPARE_EQE extra entries, * so we must update our consumer index at * least that often. * * mlx5_eq_update_cc must be called on every EQE @EQ irq handler */ static inline u32 mlx5_eq_update_cc(struct mlx5_eq *eq, u32 cc) { if (unlikely(cc >= MLX5_NUM_SPARE_EQE)) { mlx5_eq_update_ci(eq, cc, 0); cc = 0; } return cc; } struct mlx5_nb { struct notifier_block nb; u8 event_type; }; #define mlx5_nb_cof(ptr, type, member) \ (container_of(container_of(ptr, struct mlx5_nb, nb), type, member)) #define MLX5_NB_INIT(name, handler, event) do { \ (name)->nb.notifier_call = handler; \ (name)->event_type = MLX5_EVENT_TYPE_##event; \ } while (0) #endif /* MLX5_CORE_EQ_H */ mlx5/doorbell.h 0000644 00000004337 14722070374 0007421 0 ustar 00 /* * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef MLX5_DOORBELL_H #define MLX5_DOORBELL_H #define MLX5_BF_OFFSET 0x800 #define MLX5_CQ_DOORBELL 0x20 /* Assume that we can just write a 64-bit doorbell atomically. s390 * actually doesn't have writeq() but S/390 systems don't even have * PCI so we won't worry about it. * * Note that the write is not atomic on 32-bit systems! In contrast to 64-bit * ones, it requires proper locking. mlx5_write64 doesn't do any locking, so use * it at your own discretion, protected by some kind of lock on 32 bits. * * TODO: use write{q,l}_relaxed() */ static inline void mlx5_write64(__be32 val[2], void __iomem *dest) { #if BITS_PER_LONG == 64 __raw_writeq(*(u64 *)val, dest); #else __raw_writel((__force u32) val[0], dest); __raw_writel((__force u32) val[1], dest + 4); #endif } #endif /* MLX5_DOORBELL_H */ mlx5/driver.h 0000644 00000076003 14722070374 0007111 0 ustar 00 /* * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef MLX5_DRIVER_H #define MLX5_DRIVER_H #include <linux/kernel.h> #include <linux/completion.h> #include <linux/pci.h> #include <linux/irq.h> #include <linux/spinlock_types.h> #include <linux/semaphore.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/xarray.h> #include <linux/workqueue.h> #include <linux/mempool.h> #include <linux/interrupt.h> #include <linux/idr.h> #include <linux/notifier.h> #include <linux/refcount.h> #include <linux/mlx5/device.h> #include <linux/mlx5/doorbell.h> #include <linux/mlx5/eq.h> #include <linux/timecounter.h> #include <linux/ptp_clock_kernel.h> #include <net/devlink.h> #define MLX5_ADEV_NAME "mlx5_core" enum { MLX5_BOARD_ID_LEN = 64, }; enum { /* one minute for the sake of bringup. Generally, commands must always * complete and we may need to increase this timeout value */ MLX5_CMD_TIMEOUT_MSEC = 60 * 1000, MLX5_CMD_WQ_MAX_NAME = 32, }; enum { CMD_OWNER_SW = 0x0, CMD_OWNER_HW = 0x1, CMD_STATUS_SUCCESS = 0, }; enum mlx5_sqp_t { MLX5_SQP_SMI = 0, MLX5_SQP_GSI = 1, MLX5_SQP_IEEE_1588 = 2, MLX5_SQP_SNIFFER = 3, MLX5_SQP_SYNC_UMR = 4, }; enum { MLX5_MAX_PORTS = 2, }; enum { MLX5_ATOMIC_MODE_OFFSET = 16, MLX5_ATOMIC_MODE_IB_COMP = 1, MLX5_ATOMIC_MODE_CX = 2, MLX5_ATOMIC_MODE_8B = 3, MLX5_ATOMIC_MODE_16B = 4, MLX5_ATOMIC_MODE_32B = 5, MLX5_ATOMIC_MODE_64B = 6, MLX5_ATOMIC_MODE_128B = 7, MLX5_ATOMIC_MODE_256B = 8, }; enum { MLX5_REG_QPTS = 0x4002, MLX5_REG_QETCR = 0x4005, MLX5_REG_QTCT = 0x400a, MLX5_REG_QPDPM = 0x4013, MLX5_REG_QCAM = 0x4019, MLX5_REG_DCBX_PARAM = 0x4020, MLX5_REG_DCBX_APP = 0x4021, MLX5_REG_FPGA_CAP = 0x4022, MLX5_REG_FPGA_CTRL = 0x4023, MLX5_REG_FPGA_ACCESS_REG = 0x4024, MLX5_REG_CORE_DUMP = 0x402e, MLX5_REG_PCAP = 0x5001, MLX5_REG_PMTU = 0x5003, MLX5_REG_PTYS = 0x5004, MLX5_REG_PAOS = 0x5006, MLX5_REG_PFCC = 0x5007, MLX5_REG_PPCNT = 0x5008, MLX5_REG_PPTB = 0x500b, MLX5_REG_PBMC = 0x500c, MLX5_REG_PMAOS = 0x5012, MLX5_REG_PUDE = 0x5009, MLX5_REG_PMPE = 0x5010, MLX5_REG_PELC = 0x500e, MLX5_REG_PVLC = 0x500f, MLX5_REG_PCMR = 0x5041, MLX5_REG_PMLP = 0x5002, MLX5_REG_PPLM = 0x5023, MLX5_REG_PCAM = 0x507f, MLX5_REG_NODE_DESC = 0x6001, MLX5_REG_HOST_ENDIANNESS = 0x7004, MLX5_REG_MCIA = 0x9014, MLX5_REG_MLCR = 0x902b, MLX5_REG_MTRC_CAP = 0x9040, MLX5_REG_MTRC_CONF = 0x9041, MLX5_REG_MTRC_STDB = 0x9042, MLX5_REG_MTRC_CTRL = 0x9043, MLX5_REG_MPEIN = 0x9050, MLX5_REG_MPCNT = 0x9051, MLX5_REG_MTPPS = 0x9053, MLX5_REG_MTPPSE = 0x9054, MLX5_REG_MPEGC = 0x9056, MLX5_REG_MCQS = 0x9060, MLX5_REG_MCQI = 0x9061, MLX5_REG_MCC = 0x9062, MLX5_REG_MCDA = 0x9063, MLX5_REG_MCAM = 0x907f, }; enum mlx5_qpts_trust_state { MLX5_QPTS_TRUST_PCP = 1, MLX5_QPTS_TRUST_DSCP = 2, }; enum mlx5_dcbx_oper_mode { MLX5E_DCBX_PARAM_VER_OPER_HOST = 0x0, MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3, }; enum { MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0, MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1, MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP = 1 << 2, MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD = 1 << 3, }; enum mlx5_page_fault_resume_flags { MLX5_PAGE_FAULT_RESUME_REQUESTOR = 1 << 0, MLX5_PAGE_FAULT_RESUME_WRITE = 1 << 1, MLX5_PAGE_FAULT_RESUME_RDMA = 1 << 2, MLX5_PAGE_FAULT_RESUME_ERROR = 1 << 7, }; enum dbg_rsc_type { MLX5_DBG_RSC_QP, MLX5_DBG_RSC_EQ, MLX5_DBG_RSC_CQ, }; enum port_state_policy { MLX5_POLICY_DOWN = 0, MLX5_POLICY_UP = 1, MLX5_POLICY_FOLLOW = 2, MLX5_POLICY_INVALID = 0xffffffff }; enum mlx5_coredev_type { MLX5_COREDEV_PF, MLX5_COREDEV_VF }; struct mlx5_field_desc { int i; }; struct mlx5_rsc_debug { struct mlx5_core_dev *dev; void *object; enum dbg_rsc_type type; struct dentry *root; struct mlx5_field_desc fields[0]; }; enum mlx5_dev_event { MLX5_DEV_EVENT_SYS_ERROR = 128, /* 0 - 127 are FW events */ MLX5_DEV_EVENT_PORT_AFFINITY = 129, }; enum mlx5_port_status { MLX5_PORT_UP = 1, MLX5_PORT_DOWN = 2, }; struct mlx5_bfreg_info { u32 *sys_pages; int num_low_latency_bfregs; unsigned int *count; /* * protect bfreg allocation data structs */ struct mutex lock; u32 ver; bool lib_uar_4k; u32 num_sys_pages; u32 num_static_sys_pages; u32 total_num_bfregs; u32 num_dyn_bfregs; }; enum mlx5_cmdif_state { MLX5_CMDIF_STATE_UNINITIALIZED, MLX5_CMDIF_STATE_UP, MLX5_CMDIF_STATE_DOWN, }; struct mlx5_cmd_first { __be32 data[4]; }; struct mlx5_cmd_msg { struct list_head list; struct cmd_msg_cache *parent; u32 len; struct mlx5_cmd_first first; struct mlx5_cmd_mailbox *next; }; struct mlx5_cmd_debug { struct dentry *dbg_root; void *in_msg; void *out_msg; u8 status; u16 inlen; u16 outlen; }; struct cmd_msg_cache { /* protect block chain allocations */ spinlock_t lock; struct list_head head; unsigned int max_inbox_size; unsigned int num_ent; }; enum { MLX5_NUM_COMMAND_CACHES = 5, }; struct mlx5_cmd_stats { u64 sum; u64 n; struct dentry *root; /* protect command average calculations */ spinlock_t lock; }; struct mlx5_cmd { struct mlx5_nb nb; enum mlx5_cmdif_state state; void *cmd_alloc_buf; dma_addr_t alloc_dma; int alloc_size; void *cmd_buf; dma_addr_t dma; u16 cmdif_rev; u8 log_sz; u8 log_stride; int max_reg_cmds; int events; u32 __iomem *vector; /* protect command queue allocations */ spinlock_t alloc_lock; /* protect token allocations */ spinlock_t token_lock; u8 token; unsigned long bitmask; char wq_name[MLX5_CMD_WQ_MAX_NAME]; struct workqueue_struct *wq; struct semaphore sem; struct semaphore pages_sem; int mode; u16 allowed_opcode; struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS]; struct dma_pool *pool; struct mlx5_cmd_debug dbg; struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES]; int checksum_disabled; struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX]; }; struct mlx5_port_caps { int gid_table_len; int pkey_table_len; u8 ext_port_cap; bool has_smi; }; struct mlx5_cmd_mailbox { void *buf; dma_addr_t dma; struct mlx5_cmd_mailbox *next; }; struct mlx5_buf_list { void *buf; dma_addr_t map; }; struct mlx5_frag_buf { struct mlx5_buf_list *frags; int npages; int size; u8 page_shift; }; struct mlx5_frag_buf_ctrl { struct mlx5_buf_list *frags; u32 sz_m1; u16 frag_sz_m1; u16 strides_offset; u8 log_sz; u8 log_stride; u8 log_frag_strides; }; struct mlx5_core_psv { u32 psv_idx; struct psv_layout { u32 pd; u16 syndrome; u16 reserved; u16 bg; u16 app_tag; u32 ref_tag; } psv; }; struct mlx5_core_sig_ctx { struct mlx5_core_psv psv_memory; struct mlx5_core_psv psv_wire; struct ib_sig_err err_item; bool sig_status_checked; bool sig_err_exists; u32 sigerr_count; }; enum { MLX5_MKEY_MR = 1, MLX5_MKEY_MW, MLX5_MKEY_INDIRECT_DEVX, }; struct mlx5_core_mkey { u64 iova; u64 size; u32 key; u32 pd; u32 type; }; #define MLX5_24BIT_MASK ((1 << 24) - 1) enum mlx5_res_type { MLX5_RES_QP = MLX5_EVENT_QUEUE_TYPE_QP, MLX5_RES_RQ = MLX5_EVENT_QUEUE_TYPE_RQ, MLX5_RES_SQ = MLX5_EVENT_QUEUE_TYPE_SQ, MLX5_RES_SRQ = 3, MLX5_RES_XSRQ = 4, MLX5_RES_XRQ = 5, MLX5_RES_DCT = MLX5_EVENT_QUEUE_TYPE_DCT, }; struct mlx5_core_rsc_common { enum mlx5_res_type res; refcount_t refcount; struct completion free; }; struct mlx5_uars_page { void __iomem *map; bool wc; u32 index; struct list_head list; unsigned int bfregs; unsigned long *reg_bitmap; /* for non fast path bf regs */ unsigned long *fp_bitmap; unsigned int reg_avail; unsigned int fp_avail; struct kref ref_count; struct mlx5_core_dev *mdev; }; struct mlx5_bfreg_head { /* protect blue flame registers allocations */ struct mutex lock; struct list_head list; }; struct mlx5_bfreg_data { struct mlx5_bfreg_head reg_head; struct mlx5_bfreg_head wc_head; }; struct mlx5_sq_bfreg { void __iomem *map; struct mlx5_uars_page *up; bool wc; u32 index; unsigned int offset; }; struct mlx5_core_health { struct health_buffer __iomem *health; __be32 __iomem *health_counter; struct timer_list timer; u32 prev; int miss_counter; u8 synd; u32 fatal_error; u32 crdump_size; /* wq spinlock to synchronize draining */ spinlock_t wq_lock; struct workqueue_struct *wq; unsigned long flags; struct work_struct fatal_report_work; struct work_struct report_work; struct delayed_work recover_work; struct devlink_health_reporter *fw_reporter; struct devlink_health_reporter *fw_fatal_reporter; }; struct mlx5_qp_table { struct notifier_block nb; /* protect radix tree */ spinlock_t lock; struct radix_tree_root tree; }; struct mlx5_vf_context { int enabled; u64 port_guid; u64 node_guid; enum port_state_policy policy; }; struct mlx5_core_sriov { struct mlx5_vf_context *vfs_ctx; int num_vfs; u16 max_vfs; }; struct mlx5_fc_pool { struct mlx5_core_dev *dev; struct mutex pool_lock; /* protects pool lists */ struct list_head fully_used; struct list_head partially_used; struct list_head unused; int available_fcs; int used_fcs; int threshold; }; struct mlx5_fc_stats { spinlock_t counters_idr_lock; /* protects counters_idr */ struct idr counters_idr; struct list_head counters; struct llist_head addlist; struct llist_head dellist; struct workqueue_struct *wq; struct delayed_work work; unsigned long next_query; unsigned long sampling_interval; /* jiffies */ u32 *bulk_query_out; struct mlx5_fc_pool fc_pool; }; struct mlx5_events; struct mlx5_mpfs; struct mlx5_eswitch; struct mlx5_lag; struct mlx5_devcom; struct mlx5_eq_table; struct mlx5_irq_table; struct mlx5_rate_limit { u32 rate; u32 max_burst_sz; u16 typical_pkt_sz; }; struct mlx5_rl_entry { struct mlx5_rate_limit rl; u16 index; u16 refcount; }; struct mlx5_rl_table { /* protect rate limit table */ struct mutex rl_lock; u16 max_size; u32 max_rate; u32 min_rate; struct mlx5_rl_entry *rl_entry; }; struct mlx5_core_roce { struct mlx5_flow_table *ft; struct mlx5_flow_group *fg; struct mlx5_flow_handle *allow_rule; }; struct mlx5_priv { /* IRQ table valid only for real pci devices PF or VF */ struct mlx5_irq_table *irq_table; struct mlx5_eq_table *eq_table; /* pages stuff */ struct mlx5_nb pg_nb; struct workqueue_struct *pg_wq; struct rb_root page_root; int fw_pages; atomic_t reg_pages; struct list_head free_list; int vfs_pages; int peer_pf_pages; struct mlx5_core_health health; /* start: qp staff */ struct mlx5_qp_table qp_table; struct dentry *qp_debugfs; struct dentry *eq_debugfs; struct dentry *cq_debugfs; struct dentry *cmdif_debugfs; /* end: qp staff */ struct xarray mkey_table; /* start: alloc staff */ /* protect buffer alocation according to numa node */ struct mutex alloc_mutex; int numa_node; struct mutex pgdir_mutex; struct list_head pgdir_list; /* end: alloc staff */ struct dentry *dbg_root; /* protect mkey key part */ spinlock_t mkey_lock; u8 mkey_key; struct list_head dev_list; struct list_head ctx_list; spinlock_t ctx_lock; struct mlx5_events *events; struct mlx5_flow_steering *steering; struct mlx5_mpfs *mpfs; struct mlx5_eswitch *eswitch; struct mlx5_core_sriov sriov; struct mlx5_lag *lag; struct mlx5_devcom *devcom; struct mlx5_core_roce roce; struct mlx5_fc_stats fc_stats; struct mlx5_rl_table rl_table; struct mlx5_bfreg_data bfregs; struct mlx5_uars_page *uar; }; enum mlx5_device_state { MLX5_DEVICE_STATE_UNINITIALIZED, MLX5_DEVICE_STATE_UP, MLX5_DEVICE_STATE_INTERNAL_ERROR, }; enum mlx5_interface_state { MLX5_INTERFACE_STATE_UP = BIT(0), }; enum mlx5_pci_status { MLX5_PCI_STATUS_DISABLED, MLX5_PCI_STATUS_ENABLED, }; enum mlx5_pagefault_type_flags { MLX5_PFAULT_REQUESTOR = 1 << 0, MLX5_PFAULT_WRITE = 1 << 1, MLX5_PFAULT_RDMA = 1 << 2, }; struct mlx5_td { /* protects tirs list changes while tirs refresh */ struct mutex list_lock; struct list_head tirs_list; u32 tdn; }; struct mlx5e_resources { u32 pdn; struct mlx5_td td; struct mlx5_core_mkey mkey; struct mlx5_sq_bfreg bfreg; }; enum mlx5_sw_icm_type { MLX5_SW_ICM_TYPE_STEERING, MLX5_SW_ICM_TYPE_HEADER_MODIFY, }; #define MLX5_MAX_RESERVED_GIDS 8 struct mlx5_rsvd_gids { unsigned int start; unsigned int count; struct ida ida; }; #define MAX_PIN_NUM 8 struct mlx5_pps { u8 pin_caps[MAX_PIN_NUM]; struct work_struct out_work; u64 start[MAX_PIN_NUM]; u8 enabled; }; struct mlx5_clock { struct mlx5_core_dev *mdev; struct mlx5_nb pps_nb; seqlock_t lock; struct cyclecounter cycles; struct timecounter tc; struct hwtstamp_config hwtstamp_config; u32 nominal_c_mult; unsigned long overflow_period; struct delayed_work overflow_work; struct ptp_clock *ptp; struct ptp_clock_info ptp_info; struct mlx5_pps pps_info; }; struct mlx5_dm; struct mlx5_fw_tracer; struct mlx5_vxlan; struct mlx5_geneve; struct mlx5_hv_vhca; #define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity)) #define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)) struct mlx5_core_dev { struct device *device; enum mlx5_coredev_type coredev_type; struct pci_dev *pdev; /* sync pci state */ struct mutex pci_status_mutex; enum mlx5_pci_status pci_status; u8 rev_id; char board_id[MLX5_BOARD_ID_LEN]; struct mlx5_cmd cmd; struct mlx5_port_caps port_caps[MLX5_MAX_PORTS]; struct { u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; u32 pcam[MLX5_ST_SZ_DW(pcam_reg)]; u32 mcam[MLX5_ST_SZ_DW(mcam_reg)]; u32 fpga[MLX5_ST_SZ_DW(fpga_cap)]; u32 qcam[MLX5_ST_SZ_DW(qcam_reg)]; u8 embedded_cpu; } caps; u64 sys_image_guid; phys_addr_t iseg_base; struct mlx5_init_seg __iomem *iseg; phys_addr_t bar_addr; enum mlx5_device_state state; /* sync interface state */ struct mutex intf_state_mutex; unsigned long intf_state; struct mlx5_priv priv; struct mlx5_profile *profile; atomic_t num_qps; u32 issi; struct mlx5e_resources mlx5e_res; struct mlx5_dm *dm; struct mlx5_vxlan *vxlan; struct mlx5_geneve *geneve; struct { struct mlx5_rsvd_gids reserved_gids; u32 roce_en; } roce; #ifdef CONFIG_MLX5_FPGA struct mlx5_fpga_device *fpga; #endif struct mlx5_clock clock; struct mlx5_ib_clock_info *clock_info; struct mlx5_fw_tracer *tracer; u32 vsc_addr; struct mlx5_hv_vhca *hv_vhca; }; struct mlx5_db { __be32 *db; union { struct mlx5_db_pgdir *pgdir; struct mlx5_ib_user_db_page *user_page; } u; dma_addr_t dma; int index; }; enum { MLX5_COMP_EQ_SIZE = 1024, }; enum { MLX5_PTYS_IB = 1 << 0, MLX5_PTYS_EN = 1 << 2, }; typedef void (*mlx5_cmd_cbk_t)(int status, void *context); enum { MLX5_CMD_ENT_STATE_PENDING_COMP, }; struct mlx5_cmd_work_ent { unsigned long state; struct mlx5_cmd_msg *in; struct mlx5_cmd_msg *out; void *uout; int uout_size; mlx5_cmd_cbk_t callback; struct delayed_work cb_timeout_work; void *context; int idx; struct completion handling; struct completion done; struct mlx5_cmd *cmd; struct work_struct work; struct mlx5_cmd_layout *lay; int ret; int page_queue; u8 status; u8 token; u64 ts1; u64 ts2; u16 op; bool polling; /* Track the max comp handlers */ refcount_t refcnt; }; struct mlx5_pas { u64 pa; u8 log_sz; }; enum phy_port_state { MLX5_AAA_111 }; struct mlx5_hca_vport_context { u32 field_select; bool sm_virt_aware; bool has_smi; bool has_raw; enum port_state_policy policy; enum phy_port_state phys_state; enum ib_port_state vport_state; u8 port_physical_state; u64 sys_image_guid; u64 port_guid; u64 node_guid; u32 cap_mask1; u32 cap_mask1_perm; u16 cap_mask2; u16 cap_mask2_perm; u16 lid; u8 init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */ u8 lmc; u8 subnet_timeout; u16 sm_lid; u8 sm_sl; u16 qkey_violation_counter; u16 pkey_violation_counter; bool grh_required; }; static inline void *mlx5_buf_offset(struct mlx5_frag_buf *buf, int offset) { return buf->frags->buf + offset; } #define STRUCT_FIELD(header, field) \ .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \ .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev) { return pci_get_drvdata(pdev); } extern struct dentry *mlx5_debugfs_root; static inline u16 fw_rev_maj(struct mlx5_core_dev *dev) { return ioread32be(&dev->iseg->fw_rev) & 0xffff; } static inline u16 fw_rev_min(struct mlx5_core_dev *dev) { return ioread32be(&dev->iseg->fw_rev) >> 16; } static inline u16 fw_rev_sub(struct mlx5_core_dev *dev) { return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff; } static inline u16 cmdif_rev(struct mlx5_core_dev *dev) { return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; } static inline u32 mlx5_base_mkey(const u32 key) { return key & 0xffffff00u; } static inline void mlx5_init_fbc_offset(struct mlx5_buf_list *frags, u8 log_stride, u8 log_sz, u16 strides_offset, struct mlx5_frag_buf_ctrl *fbc) { fbc->frags = frags; fbc->log_stride = log_stride; fbc->log_sz = log_sz; fbc->sz_m1 = (1 << fbc->log_sz) - 1; fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride; fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1; fbc->strides_offset = strides_offset; } static inline void mlx5_init_fbc(struct mlx5_buf_list *frags, u8 log_stride, u8 log_sz, struct mlx5_frag_buf_ctrl *fbc) { mlx5_init_fbc_offset(frags, log_stride, log_sz, 0, fbc); } static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc, u32 ix) { unsigned int frag; ix += fbc->strides_offset; frag = ix >> fbc->log_frag_strides; return fbc->frags[frag].buf + ((fbc->frag_sz_m1 & ix) << fbc->log_stride); } static inline u32 mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix) { u32 last_frag_stride_idx = (ix + fbc->strides_offset) | fbc->frag_sz_m1; return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1); } enum { CMD_ALLOWED_OPCODE_ALL, }; int mlx5_cmd_init(struct mlx5_core_dev *dev); void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); void mlx5_cmd_set_state(struct mlx5_core_dev *dev, enum mlx5_cmdif_state cmdif_state); void mlx5_cmd_use_events(struct mlx5_core_dev *dev); void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode); struct mlx5_async_ctx { struct mlx5_core_dev *dev; atomic_t num_inflight; struct completion inflight_done; }; struct mlx5_async_work; typedef void (*mlx5_async_cbk_t)(int status, struct mlx5_async_work *context); struct mlx5_async_work { struct mlx5_async_ctx *ctx; mlx5_async_cbk_t user_callback; }; void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev, struct mlx5_async_ctx *ctx); void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx); int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, void *out, int out_size, mlx5_async_cbk_t callback, struct mlx5_async_work *work); int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size); int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size); void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome); bool mlx5_cmd_is_down(struct mlx5_core_dev *dev); int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type); int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); void mlx5_health_flush(struct mlx5_core_dev *dev); void mlx5_health_cleanup(struct mlx5_core_dev *dev); int mlx5_health_init(struct mlx5_core_dev *dev); void mlx5_start_health_poll(struct mlx5_core_dev *dev); void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health); void mlx5_drain_health_wq(struct mlx5_core_dev *dev); void mlx5_trigger_health_work(struct mlx5_core_dev *dev); int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, struct mlx5_frag_buf *buf, int node); int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_frag_buf *buf); void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf); int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size, struct mlx5_frag_buf *buf, int node); void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf); struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev, gfp_t flags, int npages); void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev, struct mlx5_cmd_mailbox *head); void mlx5_init_mkey_table(struct mlx5_core_dev *dev); void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev); int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, struct mlx5_async_ctx *async_ctx, u32 *in, int inlen, u32 *out, int outlen, mlx5_async_cbk_t callback, struct mlx5_async_work *context); int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, u32 *in, int inlen); int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey); int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, u32 *out, int outlen); int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn); int mlx5_pagealloc_init(struct mlx5_core_dev *dev); void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); void mlx5_pagealloc_start(struct mlx5_core_dev *dev); void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, s32 npages, bool ec_function); int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); void mlx5_register_debugfs(void); void mlx5_unregister_debugfs(void); void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas); void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas); int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, unsigned int *irqn); int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev); void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in, void *data_out, int size_out, u16 reg_num, int arg, int write); int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db); int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node); void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db); const char *mlx5_command_str(int command); void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, int npsvs, u32 *sig_index); int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num); void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common); int mlx5_query_odp_caps(struct mlx5_core_dev *dev, struct mlx5_odp_caps *odp_caps); int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev, u8 port_num, void *out, size_t sz); int mlx5_init_rl_table(struct mlx5_core_dev *dev); void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev); int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index, struct mlx5_rate_limit *rl); void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl); bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate); bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0, struct mlx5_rate_limit *rl_1); int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg, bool map_wc, bool fast_path); void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg); unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev); struct cpumask * mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector); unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev); int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index, u8 roce_version, u8 roce_l3_type, const u8 *gid, const u8 *mac, bool vlan, u16 vlan_id, u8 port_num); static inline int fw_initializing(struct mlx5_core_dev *dev) { return ioread32be(&dev->iseg->initializing) >> 31; } static inline u32 mlx5_mkey_to_idx(u32 mkey) { return mkey >> 8; } static inline u32 mlx5_idx_to_mkey(u32 mkey_idx) { return mkey_idx << 8; } static inline u8 mlx5_mkey_variant(u32 mkey) { return mkey & 0xff; } enum { MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0, MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1, }; enum { MR_CACHE_LAST_STD_ENTRY = 20, MLX5_IMR_MTT_CACHE_ENTRY, MLX5_IMR_KSM_CACHE_ENTRY, MAX_MR_CACHE_ENTRIES }; enum { MLX5_INTERFACE_PROTOCOL_IB = 0, MLX5_INTERFACE_PROTOCOL_ETH = 1, }; struct mlx5_interface { void * (*add)(struct mlx5_core_dev *dev); void (*remove)(struct mlx5_core_dev *dev, void *context); int (*attach)(struct mlx5_core_dev *dev, void *context); void (*detach)(struct mlx5_core_dev *dev, void *context); int protocol; struct list_head list; }; int mlx5_register_interface(struct mlx5_interface *intf); void mlx5_unregister_interface(struct mlx5_interface *intf); int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb); int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb); int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb); int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb); int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev); int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev); bool mlx5_lag_is_roce(struct mlx5_core_dev *dev); bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev); bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev); bool mlx5_lag_is_active(struct mlx5_core_dev *dev); struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev); int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, u64 *values, int num_counters, size_t *offsets); struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev); void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up); int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, u64 length, u16 uid, phys_addr_t *addr, u32 *obj_id); int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, u64 length, u16 uid, phys_addr_t addr, u32 obj_id); #ifdef CONFIG_MLX5_CORE_IPOIB struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev, struct ib_device *ibdev, const char *name, void (*setup)(struct net_device *)); #endif /* CONFIG_MLX5_CORE_IPOIB */ int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev, struct ib_device *device, struct rdma_netdev_alloc_params *params); struct mlx5_profile { u64 mask; u8 log_max_qp; struct { int size; int limit; } mr_cache[MAX_MR_CACHE_ENTRIES]; }; enum { MLX5_PCI_DEV_IS_VF = 1 << 0, }; static inline bool mlx5_core_is_pf(const struct mlx5_core_dev *dev) { return dev->coredev_type == MLX5_COREDEV_PF; } static inline bool mlx5_core_is_ecpf(struct mlx5_core_dev *dev) { return dev->caps.embedded_cpu; } static inline bool mlx5_core_is_ecpf_esw_manager(const struct mlx5_core_dev *dev) { return dev->caps.embedded_cpu && MLX5_CAP_GEN(dev, eswitch_manager); } static inline bool mlx5_ecpf_vport_exists(const struct mlx5_core_dev *dev) { return mlx5_core_is_pf(dev) && MLX5_CAP_ESW(dev, ecpf_vport_exists); } static inline u16 mlx5_core_max_vfs(const struct mlx5_core_dev *dev) { return dev->priv.sriov.max_vfs; } static inline int mlx5_get_gid_table_len(u16 param) { if (param > 4) { pr_warn("gid table length is zero\n"); return 0; } return 8 * (1 << param); } static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev) { return !!(dev->priv.rl_table.max_size); } static inline int mlx5_core_is_mp_slave(struct mlx5_core_dev *dev) { return MLX5_CAP_GEN(dev, affiliate_nic_vport_criteria) && MLX5_CAP_GEN(dev, num_vhca_ports) <= 1; } static inline int mlx5_core_is_mp_master(struct mlx5_core_dev *dev) { return MLX5_CAP_GEN(dev, num_vhca_ports) > 1; } static inline int mlx5_core_mp_enabled(struct mlx5_core_dev *dev) { return mlx5_core_is_mp_slave(dev) || mlx5_core_is_mp_master(dev); } static inline int mlx5_core_native_port_num(struct mlx5_core_dev *dev) { if (!mlx5_core_mp_enabled(dev)) return 1; return MLX5_CAP_GEN(dev, native_port_num); } enum { MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32, }; static inline bool mlx5_is_roce_enabled(struct mlx5_core_dev *dev) { struct devlink *devlink = priv_to_devlink(dev); union devlink_param_value val; devlink_param_driverinit_value_get(devlink, DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, &val); return val.vbool; } #endif /* MLX5_DRIVER_H */ mlx5/fs_helpers.h 0000644 00000010507 14722070374 0007745 0 ustar 00 /* * Copyright (c) 2018, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef _MLX5_FS_HELPERS_ #define _MLX5_FS_HELPERS_ #include <linux/mlx5/mlx5_ifc.h> #define MLX5_FS_IPV4_VERSION 4 #define MLX5_FS_IPV6_VERSION 6 static inline bool mlx5_fs_is_ipsec_flow(const u32 *match_c) { void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c, misc_parameters); return MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi); } static inline bool _mlx5_fs_is_outer_ipproto_flow(const u32 *match_c, const u32 *match_v, u8 match) { const void *headers_c = MLX5_ADDR_OF(fte_match_param, match_c, outer_headers); const void *headers_v = MLX5_ADDR_OF(fte_match_param, match_v, outer_headers); return MLX5_GET(fte_match_set_lyr_2_4, headers_c, ip_protocol) == 0xff && MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol) == match; } static inline bool mlx5_fs_is_outer_tcp_flow(const u32 *match_c, const u32 *match_v) { return _mlx5_fs_is_outer_ipproto_flow(match_c, match_v, IPPROTO_TCP); } static inline bool mlx5_fs_is_outer_udp_flow(const u32 *match_c, const u32 *match_v) { return _mlx5_fs_is_outer_ipproto_flow(match_c, match_v, IPPROTO_UDP); } static inline bool mlx5_fs_is_vxlan_flow(const u32 *match_c) { void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c, misc_parameters); return MLX5_GET(fte_match_set_misc, misc_params_c, vxlan_vni); } static inline bool _mlx5_fs_is_outer_ipv_flow(struct mlx5_core_dev *mdev, const u32 *match_c, const u32 *match_v, int version) { int match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.outer_ip_version); const void *headers_c = MLX5_ADDR_OF(fte_match_param, match_c, outer_headers); const void *headers_v = MLX5_ADDR_OF(fte_match_param, match_v, outer_headers); if (!match_ipv) { u16 ethertype; switch (version) { case MLX5_FS_IPV4_VERSION: ethertype = ETH_P_IP; break; case MLX5_FS_IPV6_VERSION: ethertype = ETH_P_IPV6; break; default: return false; } return MLX5_GET(fte_match_set_lyr_2_4, headers_c, ethertype) == 0xffff && MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype) == ethertype; } return MLX5_GET(fte_match_set_lyr_2_4, headers_c, ip_version) == 0xf && MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_version) == version; } static inline bool mlx5_fs_is_outer_ipv4_flow(struct mlx5_core_dev *mdev, const u32 *match_c, const u32 *match_v) { return _mlx5_fs_is_outer_ipv_flow(mdev, match_c, match_v, MLX5_FS_IPV4_VERSION); } static inline bool mlx5_fs_is_outer_ipv6_flow(struct mlx5_core_dev *mdev, const u32 *match_c, const u32 *match_v) { return _mlx5_fs_is_outer_ipv_flow(mdev, match_c, match_v, MLX5_FS_IPV6_VERSION); } static inline bool mlx5_fs_is_outer_ipsec_flow(const u32 *match_c) { void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c, misc_parameters); return MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi); } #endif mlx5/mlx5_ifc.h 0000644 00000732025 14722070374 0007327 0 ustar 00 /* * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef MLX5_IFC_H #define MLX5_IFC_H #include "mlx5_ifc_fpga.h" enum { MLX5_EVENT_TYPE_CODING_COMPLETION_EVENTS = 0x0, MLX5_EVENT_TYPE_CODING_PATH_MIGRATED_SUCCEEDED = 0x1, MLX5_EVENT_TYPE_CODING_COMMUNICATION_ESTABLISHED = 0x2, MLX5_EVENT_TYPE_CODING_SEND_QUEUE_DRAINED = 0x3, MLX5_EVENT_TYPE_CODING_LAST_WQE_REACHED = 0x13, MLX5_EVENT_TYPE_CODING_SRQ_LIMIT = 0x14, MLX5_EVENT_TYPE_CODING_DCT_ALL_CONNECTIONS_CLOSED = 0x1c, MLX5_EVENT_TYPE_CODING_DCT_ACCESS_KEY_VIOLATION = 0x1d, MLX5_EVENT_TYPE_CODING_CQ_ERROR = 0x4, MLX5_EVENT_TYPE_CODING_LOCAL_WQ_CATASTROPHIC_ERROR = 0x5, MLX5_EVENT_TYPE_CODING_PATH_MIGRATION_FAILED = 0x7, MLX5_EVENT_TYPE_CODING_PAGE_FAULT_EVENT = 0xc, MLX5_EVENT_TYPE_CODING_INVALID_REQUEST_LOCAL_WQ_ERROR = 0x10, MLX5_EVENT_TYPE_CODING_LOCAL_ACCESS_VIOLATION_WQ_ERROR = 0x11, MLX5_EVENT_TYPE_CODING_LOCAL_SRQ_CATASTROPHIC_ERROR = 0x12, MLX5_EVENT_TYPE_CODING_INTERNAL_ERROR = 0x8, MLX5_EVENT_TYPE_CODING_PORT_STATE_CHANGE = 0x9, MLX5_EVENT_TYPE_CODING_GPIO_EVENT = 0x15, MLX5_EVENT_TYPE_CODING_REMOTE_CONFIGURATION_PROTOCOL_EVENT = 0x19, MLX5_EVENT_TYPE_CODING_DOORBELL_BLUEFLAME_CONGESTION_EVENT = 0x1a, MLX5_EVENT_TYPE_CODING_STALL_VL_EVENT = 0x1b, MLX5_EVENT_TYPE_CODING_DROPPED_PACKET_LOGGED_EVENT = 0x1f, MLX5_EVENT_TYPE_CODING_COMMAND_INTERFACE_COMPLETION = 0xa, MLX5_EVENT_TYPE_CODING_PAGE_REQUEST = 0xb, MLX5_EVENT_TYPE_CODING_FPGA_ERROR = 0x20, MLX5_EVENT_TYPE_CODING_FPGA_QP_ERROR = 0x21 }; enum { MLX5_MODIFY_TIR_BITMASK_LRO = 0x0, MLX5_MODIFY_TIR_BITMASK_INDIRECT_TABLE = 0x1, MLX5_MODIFY_TIR_BITMASK_HASH = 0x2, MLX5_MODIFY_TIR_BITMASK_TUNNELED_OFFLOAD_EN = 0x3 }; enum { MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE = 0x0, MLX5_SET_HCA_CAP_OP_MOD_ODP = 0x2, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC = 0x3, }; enum { MLX5_SHARED_RESOURCE_UID = 0xffff, }; enum { MLX5_OBJ_TYPE_SW_ICM = 0x0008, }; enum { MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM = (1ULL << MLX5_OBJ_TYPE_SW_ICM), MLX5_GENERAL_OBJ_TYPES_CAP_GENEVE_TLV_OPT = (1ULL << 11), }; enum { MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b, MLX5_OBJ_TYPE_MKEY = 0xff01, MLX5_OBJ_TYPE_QP = 0xff02, MLX5_OBJ_TYPE_PSV = 0xff03, MLX5_OBJ_TYPE_RMP = 0xff04, MLX5_OBJ_TYPE_XRC_SRQ = 0xff05, MLX5_OBJ_TYPE_RQ = 0xff06, MLX5_OBJ_TYPE_SQ = 0xff07, MLX5_OBJ_TYPE_TIR = 0xff08, MLX5_OBJ_TYPE_TIS = 0xff09, MLX5_OBJ_TYPE_DCT = 0xff0a, MLX5_OBJ_TYPE_XRQ = 0xff0b, MLX5_OBJ_TYPE_RQT = 0xff0e, MLX5_OBJ_TYPE_FLOW_COUNTER = 0xff0f, MLX5_OBJ_TYPE_CQ = 0xff10, }; enum { MLX5_CMD_OP_QUERY_HCA_CAP = 0x100, MLX5_CMD_OP_QUERY_ADAPTER = 0x101, MLX5_CMD_OP_INIT_HCA = 0x102, MLX5_CMD_OP_TEARDOWN_HCA = 0x103, MLX5_CMD_OP_ENABLE_HCA = 0x104, MLX5_CMD_OP_DISABLE_HCA = 0x105, MLX5_CMD_OP_QUERY_PAGES = 0x107, MLX5_CMD_OP_MANAGE_PAGES = 0x108, MLX5_CMD_OP_SET_HCA_CAP = 0x109, MLX5_CMD_OP_QUERY_ISSI = 0x10a, MLX5_CMD_OP_SET_ISSI = 0x10b, MLX5_CMD_OP_SET_DRIVER_VERSION = 0x10d, MLX5_CMD_OP_QUERY_SF_PARTITION = 0x111, MLX5_CMD_OP_ALLOC_SF = 0x113, MLX5_CMD_OP_DEALLOC_SF = 0x114, MLX5_CMD_OP_CREATE_MKEY = 0x200, MLX5_CMD_OP_QUERY_MKEY = 0x201, MLX5_CMD_OP_DESTROY_MKEY = 0x202, MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203, MLX5_CMD_OP_PAGE_FAULT_RESUME = 0x204, MLX5_CMD_OP_ALLOC_MEMIC = 0x205, MLX5_CMD_OP_DEALLOC_MEMIC = 0x206, MLX5_CMD_OP_CREATE_EQ = 0x301, MLX5_CMD_OP_DESTROY_EQ = 0x302, MLX5_CMD_OP_QUERY_EQ = 0x303, MLX5_CMD_OP_GEN_EQE = 0x304, MLX5_CMD_OP_CREATE_CQ = 0x400, MLX5_CMD_OP_DESTROY_CQ = 0x401, MLX5_CMD_OP_QUERY_CQ = 0x402, MLX5_CMD_OP_MODIFY_CQ = 0x403, MLX5_CMD_OP_CREATE_QP = 0x500, MLX5_CMD_OP_DESTROY_QP = 0x501, MLX5_CMD_OP_RST2INIT_QP = 0x502, MLX5_CMD_OP_INIT2RTR_QP = 0x503, MLX5_CMD_OP_RTR2RTS_QP = 0x504, MLX5_CMD_OP_RTS2RTS_QP = 0x505, MLX5_CMD_OP_SQERR2RTS_QP = 0x506, MLX5_CMD_OP_2ERR_QP = 0x507, MLX5_CMD_OP_2RST_QP = 0x50a, MLX5_CMD_OP_QUERY_QP = 0x50b, MLX5_CMD_OP_SQD_RTS_QP = 0x50c, MLX5_CMD_OP_INIT2INIT_QP = 0x50e, MLX5_CMD_OP_CREATE_PSV = 0x600, MLX5_CMD_OP_DESTROY_PSV = 0x601, MLX5_CMD_OP_CREATE_SRQ = 0x700, MLX5_CMD_OP_DESTROY_SRQ = 0x701, MLX5_CMD_OP_QUERY_SRQ = 0x702, MLX5_CMD_OP_ARM_RQ = 0x703, MLX5_CMD_OP_CREATE_XRC_SRQ = 0x705, MLX5_CMD_OP_DESTROY_XRC_SRQ = 0x706, MLX5_CMD_OP_QUERY_XRC_SRQ = 0x707, MLX5_CMD_OP_ARM_XRC_SRQ = 0x708, MLX5_CMD_OP_CREATE_DCT = 0x710, MLX5_CMD_OP_DESTROY_DCT = 0x711, MLX5_CMD_OP_DRAIN_DCT = 0x712, MLX5_CMD_OP_QUERY_DCT = 0x713, MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION = 0x714, MLX5_CMD_OP_CREATE_XRQ = 0x717, MLX5_CMD_OP_DESTROY_XRQ = 0x718, MLX5_CMD_OP_QUERY_XRQ = 0x719, MLX5_CMD_OP_ARM_XRQ = 0x71a, MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY = 0x725, MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY = 0x726, MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS = 0x727, MLX5_CMD_OP_RELEASE_XRQ_ERROR = 0x729, MLX5_CMD_OP_MODIFY_XRQ = 0x72a, MLX5_CMD_OP_QUERY_ESW_FUNCTIONS = 0x740, MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750, MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751, MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT = 0x752, MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT = 0x753, MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x754, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT = 0x755, MLX5_CMD_OP_QUERY_ROCE_ADDRESS = 0x760, MLX5_CMD_OP_SET_ROCE_ADDRESS = 0x761, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT = 0x762, MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT = 0x763, MLX5_CMD_OP_QUERY_HCA_VPORT_GID = 0x764, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY = 0x765, MLX5_CMD_OP_QUERY_VNIC_ENV = 0x76f, MLX5_CMD_OP_QUERY_VPORT_COUNTER = 0x770, MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771, MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772, MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773, MLX5_CMD_OP_SET_MONITOR_COUNTER = 0x774, MLX5_CMD_OP_ARM_MONITOR_COUNTER = 0x775, MLX5_CMD_OP_SET_PP_RATE_LIMIT = 0x780, MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781, MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT = 0x782, MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT = 0x783, MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT = 0x784, MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT = 0x785, MLX5_CMD_OP_CREATE_QOS_PARA_VPORT = 0x786, MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT = 0x787, MLX5_CMD_OP_ALLOC_PD = 0x800, MLX5_CMD_OP_DEALLOC_PD = 0x801, MLX5_CMD_OP_ALLOC_UAR = 0x802, MLX5_CMD_OP_DEALLOC_UAR = 0x803, MLX5_CMD_OP_CONFIG_INT_MODERATION = 0x804, MLX5_CMD_OP_ACCESS_REG = 0x805, MLX5_CMD_OP_ATTACH_TO_MCG = 0x806, MLX5_CMD_OP_DETACH_FROM_MCG = 0x807, MLX5_CMD_OP_GET_DROPPED_PACKET_LOG = 0x80a, MLX5_CMD_OP_MAD_IFC = 0x50d, MLX5_CMD_OP_QUERY_MAD_DEMUX = 0x80b, MLX5_CMD_OP_SET_MAD_DEMUX = 0x80c, MLX5_CMD_OP_NOP = 0x80d, MLX5_CMD_OP_ALLOC_XRCD = 0x80e, MLX5_CMD_OP_DEALLOC_XRCD = 0x80f, MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN = 0x816, MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN = 0x817, MLX5_CMD_OP_QUERY_CONG_STATUS = 0x822, MLX5_CMD_OP_MODIFY_CONG_STATUS = 0x823, MLX5_CMD_OP_QUERY_CONG_PARAMS = 0x824, MLX5_CMD_OP_MODIFY_CONG_PARAMS = 0x825, MLX5_CMD_OP_QUERY_CONG_STATISTICS = 0x826, MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT = 0x827, MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT = 0x828, MLX5_CMD_OP_SET_L2_TABLE_ENTRY = 0x829, MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY = 0x82a, MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY = 0x82b, MLX5_CMD_OP_SET_WOL_ROL = 0x830, MLX5_CMD_OP_QUERY_WOL_ROL = 0x831, MLX5_CMD_OP_CREATE_LAG = 0x840, MLX5_CMD_OP_MODIFY_LAG = 0x841, MLX5_CMD_OP_QUERY_LAG = 0x842, MLX5_CMD_OP_DESTROY_LAG = 0x843, MLX5_CMD_OP_CREATE_VPORT_LAG = 0x844, MLX5_CMD_OP_DESTROY_VPORT_LAG = 0x845, MLX5_CMD_OP_CREATE_TIR = 0x900, MLX5_CMD_OP_MODIFY_TIR = 0x901, MLX5_CMD_OP_DESTROY_TIR = 0x902, MLX5_CMD_OP_QUERY_TIR = 0x903, MLX5_CMD_OP_CREATE_SQ = 0x904, MLX5_CMD_OP_MODIFY_SQ = 0x905, MLX5_CMD_OP_DESTROY_SQ = 0x906, MLX5_CMD_OP_QUERY_SQ = 0x907, MLX5_CMD_OP_CREATE_RQ = 0x908, MLX5_CMD_OP_MODIFY_RQ = 0x909, MLX5_CMD_OP_SET_DELAY_DROP_PARAMS = 0x910, MLX5_CMD_OP_DESTROY_RQ = 0x90a, MLX5_CMD_OP_QUERY_RQ = 0x90b, MLX5_CMD_OP_CREATE_RMP = 0x90c, MLX5_CMD_OP_MODIFY_RMP = 0x90d, MLX5_CMD_OP_DESTROY_RMP = 0x90e, MLX5_CMD_OP_QUERY_RMP = 0x90f, MLX5_CMD_OP_CREATE_TIS = 0x912, MLX5_CMD_OP_MODIFY_TIS = 0x913, MLX5_CMD_OP_DESTROY_TIS = 0x914, MLX5_CMD_OP_QUERY_TIS = 0x915, MLX5_CMD_OP_CREATE_RQT = 0x916, MLX5_CMD_OP_MODIFY_RQT = 0x917, MLX5_CMD_OP_DESTROY_RQT = 0x918, MLX5_CMD_OP_QUERY_RQT = 0x919, MLX5_CMD_OP_SET_FLOW_TABLE_ROOT = 0x92f, MLX5_CMD_OP_CREATE_FLOW_TABLE = 0x930, MLX5_CMD_OP_DESTROY_FLOW_TABLE = 0x931, MLX5_CMD_OP_QUERY_FLOW_TABLE = 0x932, MLX5_CMD_OP_CREATE_FLOW_GROUP = 0x933, MLX5_CMD_OP_DESTROY_FLOW_GROUP = 0x934, MLX5_CMD_OP_QUERY_FLOW_GROUP = 0x935, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x936, MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x937, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY = 0x938, MLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939, MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a, MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b, MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c, MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT = 0x93d, MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT = 0x93e, MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT = 0x93f, MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT = 0x940, MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT = 0x941, MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT = 0x942, MLX5_CMD_OP_FPGA_CREATE_QP = 0x960, MLX5_CMD_OP_FPGA_MODIFY_QP = 0x961, MLX5_CMD_OP_FPGA_QUERY_QP = 0x962, MLX5_CMD_OP_FPGA_DESTROY_QP = 0x963, MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS = 0x964, MLX5_CMD_OP_CREATE_GENERAL_OBJECT = 0xa00, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT = 0xa01, MLX5_CMD_OP_QUERY_GENERAL_OBJECT = 0xa02, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT = 0xa03, MLX5_CMD_OP_CREATE_UCTX = 0xa04, MLX5_CMD_OP_DESTROY_UCTX = 0xa06, MLX5_CMD_OP_CREATE_UMEM = 0xa08, MLX5_CMD_OP_DESTROY_UMEM = 0xa0a, MLX5_CMD_OP_SYNC_STEERING = 0xb00, MLX5_CMD_OP_MAX }; /* Valid range for general commands that don't work over an object */ enum { MLX5_CMD_OP_GENERAL_START = 0xb00, MLX5_CMD_OP_GENERAL_END = 0xd00, }; struct mlx5_ifc_flow_table_fields_supported_bits { u8 outer_dmac[0x1]; u8 outer_smac[0x1]; u8 outer_ether_type[0x1]; u8 outer_ip_version[0x1]; u8 outer_first_prio[0x1]; u8 outer_first_cfi[0x1]; u8 outer_first_vid[0x1]; u8 outer_ipv4_ttl[0x1]; u8 outer_second_prio[0x1]; u8 outer_second_cfi[0x1]; u8 outer_second_vid[0x1]; u8 reserved_at_b[0x1]; u8 outer_sip[0x1]; u8 outer_dip[0x1]; u8 outer_frag[0x1]; u8 outer_ip_protocol[0x1]; u8 outer_ip_ecn[0x1]; u8 outer_ip_dscp[0x1]; u8 outer_udp_sport[0x1]; u8 outer_udp_dport[0x1]; u8 outer_tcp_sport[0x1]; u8 outer_tcp_dport[0x1]; u8 outer_tcp_flags[0x1]; u8 outer_gre_protocol[0x1]; u8 outer_gre_key[0x1]; u8 outer_vxlan_vni[0x1]; u8 outer_geneve_vni[0x1]; u8 outer_geneve_oam[0x1]; u8 outer_geneve_protocol_type[0x1]; u8 outer_geneve_opt_len[0x1]; u8 reserved_at_1e[0x1]; u8 source_eswitch_port[0x1]; u8 inner_dmac[0x1]; u8 inner_smac[0x1]; u8 inner_ether_type[0x1]; u8 inner_ip_version[0x1]; u8 inner_first_prio[0x1]; u8 inner_first_cfi[0x1]; u8 inner_first_vid[0x1]; u8 reserved_at_27[0x1]; u8 inner_second_prio[0x1]; u8 inner_second_cfi[0x1]; u8 inner_second_vid[0x1]; u8 reserved_at_2b[0x1]; u8 inner_sip[0x1]; u8 inner_dip[0x1]; u8 inner_frag[0x1]; u8 inner_ip_protocol[0x1]; u8 inner_ip_ecn[0x1]; u8 inner_ip_dscp[0x1]; u8 inner_udp_sport[0x1]; u8 inner_udp_dport[0x1]; u8 inner_tcp_sport[0x1]; u8 inner_tcp_dport[0x1]; u8 inner_tcp_flags[0x1]; u8 reserved_at_37[0x9]; u8 geneve_tlv_option_0_data[0x1]; u8 reserved_at_41[0x4]; u8 outer_first_mpls_over_udp[0x4]; u8 outer_first_mpls_over_gre[0x4]; u8 inner_first_mpls[0x4]; u8 outer_first_mpls[0x4]; u8 reserved_at_55[0x2]; u8 outer_esp_spi[0x1]; u8 reserved_at_58[0x2]; u8 bth_dst_qp[0x1]; u8 reserved_at_5b[0x25]; }; struct mlx5_ifc_flow_table_prop_layout_bits { u8 ft_support[0x1]; u8 reserved_at_1[0x1]; u8 flow_counter[0x1]; u8 flow_modify_en[0x1]; u8 modify_root[0x1]; u8 identified_miss_table_mode[0x1]; u8 flow_table_modify[0x1]; u8 reformat[0x1]; u8 decap[0x1]; u8 reserved_at_9[0x1]; u8 pop_vlan[0x1]; u8 push_vlan[0x1]; u8 reserved_at_c[0x1]; u8 pop_vlan_2[0x1]; u8 push_vlan_2[0x1]; u8 reformat_and_vlan_action[0x1]; u8 reserved_at_10[0x1]; u8 sw_owner[0x1]; u8 reformat_l3_tunnel_to_l2[0x1]; u8 reformat_l2_to_l3_tunnel[0x1]; u8 reformat_and_modify_action[0x1]; u8 reserved_at_15[0x2]; u8 table_miss_action_domain[0x1]; u8 termination_table[0x1]; u8 reserved_at_19[0x7]; u8 reserved_at_20[0x2]; u8 log_max_ft_size[0x6]; u8 log_max_modify_header_context[0x8]; u8 max_modify_header_actions[0x8]; u8 max_ft_level[0x8]; u8 reserved_at_40[0x20]; u8 reserved_at_60[0x18]; u8 log_max_ft_num[0x8]; u8 reserved_at_80[0x10]; u8 log_max_flow_counter[0x8]; u8 log_max_destination[0x8]; u8 reserved_at_a0[0x18]; u8 log_max_flow[0x8]; u8 reserved_at_c0[0x40]; struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support; struct mlx5_ifc_flow_table_fields_supported_bits ft_field_bitmask_support; }; struct mlx5_ifc_odp_per_transport_service_cap_bits { u8 send[0x1]; u8 receive[0x1]; u8 write[0x1]; u8 read[0x1]; u8 atomic[0x1]; u8 srq_receive[0x1]; u8 reserved_at_6[0x1a]; }; struct mlx5_ifc_fte_match_set_lyr_2_4_bits { u8 smac_47_16[0x20]; u8 smac_15_0[0x10]; u8 ethertype[0x10]; u8 dmac_47_16[0x20]; u8 dmac_15_0[0x10]; u8 first_prio[0x3]; u8 first_cfi[0x1]; u8 first_vid[0xc]; u8 ip_protocol[0x8]; u8 ip_dscp[0x6]; u8 ip_ecn[0x2]; u8 cvlan_tag[0x1]; u8 svlan_tag[0x1]; u8 frag[0x1]; u8 ip_version[0x4]; u8 tcp_flags[0x9]; u8 tcp_sport[0x10]; u8 tcp_dport[0x10]; u8 reserved_at_c0[0x18]; u8 ttl_hoplimit[0x8]; u8 udp_sport[0x10]; u8 udp_dport[0x10]; union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6; union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6; }; struct mlx5_ifc_nvgre_key_bits { u8 hi[0x18]; u8 lo[0x8]; }; union mlx5_ifc_gre_key_bits { struct mlx5_ifc_nvgre_key_bits nvgre; u8 key[0x20]; }; struct mlx5_ifc_fte_match_set_misc_bits { u8 gre_c_present[0x1]; u8 reserved_at_1[0x1]; u8 gre_k_present[0x1]; u8 gre_s_present[0x1]; u8 source_vhca_port[0x4]; u8 source_sqn[0x18]; u8 source_eswitch_owner_vhca_id[0x10]; u8 source_port[0x10]; u8 outer_second_prio[0x3]; u8 outer_second_cfi[0x1]; u8 outer_second_vid[0xc]; u8 inner_second_prio[0x3]; u8 inner_second_cfi[0x1]; u8 inner_second_vid[0xc]; u8 outer_second_cvlan_tag[0x1]; u8 inner_second_cvlan_tag[0x1]; u8 outer_second_svlan_tag[0x1]; u8 inner_second_svlan_tag[0x1]; u8 reserved_at_64[0xc]; u8 gre_protocol[0x10]; union mlx5_ifc_gre_key_bits gre_key; u8 vxlan_vni[0x18]; u8 reserved_at_b8[0x8]; u8 geneve_vni[0x18]; u8 reserved_at_d8[0x7]; u8 geneve_oam[0x1]; u8 reserved_at_e0[0xc]; u8 outer_ipv6_flow_label[0x14]; u8 reserved_at_100[0xc]; u8 inner_ipv6_flow_label[0x14]; u8 reserved_at_120[0xa]; u8 geneve_opt_len[0x6]; u8 geneve_protocol_type[0x10]; u8 reserved_at_140[0x8]; u8 bth_dst_qp[0x18]; u8 reserved_at_160[0x20]; u8 outer_esp_spi[0x20]; u8 reserved_at_1a0[0x60]; }; struct mlx5_ifc_fte_match_mpls_bits { u8 mpls_label[0x14]; u8 mpls_exp[0x3]; u8 mpls_s_bos[0x1]; u8 mpls_ttl[0x8]; }; struct mlx5_ifc_fte_match_set_misc2_bits { struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls; struct mlx5_ifc_fte_match_mpls_bits inner_first_mpls; struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_gre; struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_udp; u8 metadata_reg_c_7[0x20]; u8 metadata_reg_c_6[0x20]; u8 metadata_reg_c_5[0x20]; u8 metadata_reg_c_4[0x20]; u8 metadata_reg_c_3[0x20]; u8 metadata_reg_c_2[0x20]; u8 metadata_reg_c_1[0x20]; u8 metadata_reg_c_0[0x20]; u8 metadata_reg_a[0x20]; u8 metadata_reg_b[0x20]; u8 reserved_at_1c0[0x40]; }; struct mlx5_ifc_fte_match_set_misc3_bits { u8 inner_tcp_seq_num[0x20]; u8 outer_tcp_seq_num[0x20]; u8 inner_tcp_ack_num[0x20]; u8 outer_tcp_ack_num[0x20]; u8 reserved_at_80[0x8]; u8 outer_vxlan_gpe_vni[0x18]; u8 outer_vxlan_gpe_next_protocol[0x8]; u8 outer_vxlan_gpe_flags[0x8]; u8 reserved_at_b0[0x10]; u8 icmp_header_data[0x20]; u8 icmpv6_header_data[0x20]; u8 icmp_type[0x8]; u8 icmp_code[0x8]; u8 icmpv6_type[0x8]; u8 icmpv6_code[0x8]; u8 geneve_tlv_option_0_data[0x20]; u8 reserved_at_140[0xc0]; }; struct mlx5_ifc_cmd_pas_bits { u8 pa_h[0x20]; u8 pa_l[0x14]; u8 reserved_at_34[0xc]; }; struct mlx5_ifc_uint64_bits { u8 hi[0x20]; u8 lo[0x20]; }; enum { MLX5_ADS_STAT_RATE_NO_LIMIT = 0x0, MLX5_ADS_STAT_RATE_2_5GBPS = 0x7, MLX5_ADS_STAT_RATE_10GBPS = 0x8, MLX5_ADS_STAT_RATE_30GBPS = 0x9, MLX5_ADS_STAT_RATE_5GBPS = 0xa, MLX5_ADS_STAT_RATE_20GBPS = 0xb, MLX5_ADS_STAT_RATE_40GBPS = 0xc, MLX5_ADS_STAT_RATE_60GBPS = 0xd, MLX5_ADS_STAT_RATE_80GBPS = 0xe, MLX5_ADS_STAT_RATE_120GBPS = 0xf, }; struct mlx5_ifc_ads_bits { u8 fl[0x1]; u8 free_ar[0x1]; u8 reserved_at_2[0xe]; u8 pkey_index[0x10]; u8 reserved_at_20[0x8]; u8 grh[0x1]; u8 mlid[0x7]; u8 rlid[0x10]; u8 ack_timeout[0x5]; u8 reserved_at_45[0x3]; u8 src_addr_index[0x8]; u8 reserved_at_50[0x4]; u8 stat_rate[0x4]; u8 hop_limit[0x8]; u8 reserved_at_60[0x4]; u8 tclass[0x8]; u8 flow_label[0x14]; u8 rgid_rip[16][0x8]; u8 reserved_at_100[0x4]; u8 f_dscp[0x1]; u8 f_ecn[0x1]; u8 reserved_at_106[0x1]; u8 f_eth_prio[0x1]; u8 ecn[0x2]; u8 dscp[0x6]; u8 udp_sport[0x10]; u8 dei_cfi[0x1]; u8 eth_prio[0x3]; u8 sl[0x4]; u8 vhca_port_num[0x8]; u8 rmac_47_32[0x10]; u8 rmac_31_0[0x20]; }; struct mlx5_ifc_flow_table_nic_cap_bits { u8 nic_rx_multi_path_tirs[0x1]; u8 nic_rx_multi_path_tirs_fts[0x1]; u8 allow_sniffer_and_nic_rx_shared_tir[0x1]; u8 reserved_at_3[0x1d]; u8 encap_general_header[0x1]; u8 reserved_at_21[0xa]; u8 log_max_packet_reformat_context[0x5]; u8 reserved_at_30[0x6]; u8 max_encap_header_size[0xa]; u8 reserved_at_40[0x1c0]; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_rdma; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_sniffer; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit; u8 reserved_at_a00[0x200]; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer; u8 reserved_at_e00[0x1200]; u8 sw_steering_nic_rx_action_drop_icm_address[0x40]; u8 sw_steering_nic_tx_action_drop_icm_address[0x40]; u8 sw_steering_nic_tx_action_allow_icm_address[0x40]; u8 reserved_at_20c0[0x5f40]; }; enum { MLX5_FDB_TO_VPORT_REG_C_0 = 0x01, MLX5_FDB_TO_VPORT_REG_C_1 = 0x02, MLX5_FDB_TO_VPORT_REG_C_2 = 0x04, MLX5_FDB_TO_VPORT_REG_C_3 = 0x08, MLX5_FDB_TO_VPORT_REG_C_4 = 0x10, MLX5_FDB_TO_VPORT_REG_C_5 = 0x20, MLX5_FDB_TO_VPORT_REG_C_6 = 0x40, MLX5_FDB_TO_VPORT_REG_C_7 = 0x80, }; struct mlx5_ifc_flow_table_eswitch_cap_bits { u8 fdb_to_vport_reg_c_id[0x8]; u8 reserved_at_8[0xf]; u8 flow_source[0x1]; u8 reserved_at_18[0x2]; u8 multi_fdb_encap[0x1]; u8 reserved_at_1b[0x1]; u8 fdb_multi_path_to_table[0x1]; u8 reserved_at_1d[0x3]; u8 reserved_at_20[0x1e0]; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_ingress; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_egress; u8 reserved_at_800[0x1000]; u8 sw_steering_fdb_action_drop_icm_address_rx[0x40]; u8 sw_steering_fdb_action_drop_icm_address_tx[0x40]; u8 sw_steering_uplink_icm_address_rx[0x40]; u8 sw_steering_uplink_icm_address_tx[0x40]; u8 reserved_at_1900[0x6700]; }; enum { MLX5_COUNTER_SOURCE_ESWITCH = 0x0, MLX5_COUNTER_FLOW_ESWITCH = 0x1, }; struct mlx5_ifc_e_switch_cap_bits { u8 vport_svlan_strip[0x1]; u8 vport_cvlan_strip[0x1]; u8 vport_svlan_insert[0x1]; u8 vport_cvlan_insert_if_not_exist[0x1]; u8 vport_cvlan_insert_overwrite[0x1]; u8 reserved_at_5[0x3]; u8 esw_uplink_ingress_acl[0x1]; u8 reserved_at_9[0x10]; u8 esw_functions_changed[0x1]; u8 reserved_at_1a[0x1]; u8 ecpf_vport_exists[0x1]; u8 counter_eswitch_affinity[0x1]; u8 merged_eswitch[0x1]; u8 nic_vport_node_guid_modify[0x1]; u8 nic_vport_port_guid_modify[0x1]; u8 vxlan_encap_decap[0x1]; u8 nvgre_encap_decap[0x1]; u8 reserved_at_22[0x1]; u8 log_max_fdb_encap_uplink[0x5]; u8 reserved_at_21[0x3]; u8 log_max_packet_reformat_context[0x5]; u8 reserved_2b[0x6]; u8 max_encap_header_size[0xa]; u8 reserved_at_40[0xb]; u8 log_max_esw_sf[0x5]; u8 esw_sf_base_id[0x10]; u8 reserved_at_60[0x7a0]; }; struct mlx5_ifc_qos_cap_bits { u8 packet_pacing[0x1]; u8 esw_scheduling[0x1]; u8 esw_bw_share[0x1]; u8 esw_rate_limit[0x1]; u8 reserved_at_4[0x1]; u8 packet_pacing_burst_bound[0x1]; u8 packet_pacing_typical_size[0x1]; u8 reserved_at_7[0x19]; u8 reserved_at_20[0x20]; u8 packet_pacing_max_rate[0x20]; u8 packet_pacing_min_rate[0x20]; u8 reserved_at_80[0x10]; u8 packet_pacing_rate_table_size[0x10]; u8 esw_element_type[0x10]; u8 esw_tsar_type[0x10]; u8 reserved_at_c0[0x10]; u8 max_qos_para_vport[0x10]; u8 max_tsar_bw_share[0x20]; u8 reserved_at_100[0x700]; }; struct mlx5_ifc_debug_cap_bits { u8 core_dump_general[0x1]; u8 core_dump_qp[0x1]; u8 reserved_at_2[0x1e]; u8 reserved_at_20[0x2]; u8 stall_detect[0x1]; u8 reserved_at_23[0x1d]; u8 reserved_at_40[0x7c0]; }; struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 csum_cap[0x1]; u8 vlan_cap[0x1]; u8 lro_cap[0x1]; u8 lro_psh_flag[0x1]; u8 lro_time_stamp[0x1]; u8 reserved_at_5[0x2]; u8 wqe_vlan_insert[0x1]; u8 self_lb_en_modifiable[0x1]; u8 reserved_at_9[0x2]; u8 max_lso_cap[0x5]; u8 multi_pkt_send_wqe[0x2]; u8 wqe_inline_mode[0x2]; u8 rss_ind_tbl_cap[0x4]; u8 reg_umr_sq[0x1]; u8 scatter_fcs[0x1]; u8 enhanced_multi_pkt_send_wqe[0x1]; u8 tunnel_lso_const_out_ip_id[0x1]; u8 reserved_at_1c[0x2]; u8 tunnel_stateless_gre[0x1]; u8 tunnel_stateless_vxlan[0x1]; u8 swp[0x1]; u8 swp_csum[0x1]; u8 swp_lso[0x1]; u8 cqe_checksum_full[0x1]; u8 tunnel_stateless_geneve_tx[0x1]; u8 tunnel_stateless_mpls_over_udp[0x1]; u8 tunnel_stateless_mpls_over_gre[0x1]; u8 tunnel_stateless_vxlan_gpe[0x1]; u8 tunnel_stateless_ipv4_over_vxlan[0x1]; u8 tunnel_stateless_ip_over_ip[0x1]; u8 reserved_at_2a[0x6]; u8 max_vxlan_udp_ports[0x8]; u8 reserved_at_38[0x6]; u8 max_geneve_opt_len[0x1]; u8 tunnel_stateless_geneve_rx[0x1]; u8 reserved_at_40[0x10]; u8 lro_min_mss_size[0x10]; u8 reserved_at_60[0x120]; u8 lro_timer_supported_periods[4][0x20]; u8 reserved_at_200[0x600]; }; struct mlx5_ifc_roce_cap_bits { u8 roce_apm[0x1]; u8 reserved_at_1[0x1f]; u8 reserved_at_20[0x60]; u8 reserved_at_80[0xc]; u8 l3_type[0x4]; u8 reserved_at_90[0x8]; u8 roce_version[0x8]; u8 reserved_at_a0[0x10]; u8 r_roce_dest_udp_port[0x10]; u8 r_roce_max_src_udp_port[0x10]; u8 r_roce_min_src_udp_port[0x10]; u8 reserved_at_e0[0x10]; u8 roce_address_table_size[0x10]; u8 reserved_at_100[0x700]; }; struct mlx5_ifc_sync_steering_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0xc0]; }; struct mlx5_ifc_sync_steering_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_device_mem_cap_bits { u8 memic[0x1]; u8 reserved_at_1[0x1f]; u8 reserved_at_20[0xb]; u8 log_min_memic_alloc_size[0x5]; u8 reserved_at_30[0x8]; u8 log_max_memic_addr_alignment[0x8]; u8 memic_bar_start_addr[0x40]; u8 memic_bar_size[0x20]; u8 max_memic_size[0x20]; u8 steering_sw_icm_start_address[0x40]; u8 reserved_at_100[0x8]; u8 log_header_modify_sw_icm_size[0x8]; u8 reserved_at_110[0x2]; u8 log_sw_icm_alloc_granularity[0x6]; u8 log_steering_sw_icm_size[0x8]; u8 reserved_at_120[0x20]; u8 header_modify_sw_icm_start_address[0x40]; u8 reserved_at_180[0x680]; }; struct mlx5_ifc_device_event_cap_bits { u8 user_affiliated_events[4][0x40]; u8 user_unaffiliated_events[4][0x40]; }; enum { MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_1_BYTE = 0x0, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_2_BYTES = 0x2, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_4_BYTES = 0x4, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_8_BYTES = 0x8, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_16_BYTES = 0x10, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_32_BYTES = 0x20, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_64_BYTES = 0x40, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_128_BYTES = 0x80, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_256_BYTES = 0x100, }; enum { MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_1_BYTE = 0x1, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_2_BYTES = 0x2, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_4_BYTES = 0x4, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_8_BYTES = 0x8, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_16_BYTES = 0x10, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_32_BYTES = 0x20, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_64_BYTES = 0x40, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_128_BYTES = 0x80, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_256_BYTES = 0x100, }; struct mlx5_ifc_atomic_caps_bits { u8 reserved_at_0[0x40]; u8 atomic_req_8B_endianness_mode[0x2]; u8 reserved_at_42[0x4]; u8 supported_atomic_req_8B_endianness_mode_1[0x1]; u8 reserved_at_47[0x19]; u8 reserved_at_60[0x20]; u8 reserved_at_80[0x10]; u8 atomic_operations[0x10]; u8 reserved_at_a0[0x10]; u8 atomic_size_qp[0x10]; u8 reserved_at_c0[0x10]; u8 atomic_size_dc[0x10]; u8 reserved_at_e0[0x720]; }; struct mlx5_ifc_odp_cap_bits { u8 reserved_at_0[0x40]; u8 sig[0x1]; u8 reserved_at_41[0x1f]; u8 reserved_at_60[0x20]; struct mlx5_ifc_odp_per_transport_service_cap_bits rc_odp_caps; struct mlx5_ifc_odp_per_transport_service_cap_bits uc_odp_caps; struct mlx5_ifc_odp_per_transport_service_cap_bits ud_odp_caps; struct mlx5_ifc_odp_per_transport_service_cap_bits xrc_odp_caps; struct mlx5_ifc_odp_per_transport_service_cap_bits dc_odp_caps; u8 reserved_at_120[0x6E0]; }; struct mlx5_ifc_calc_op { u8 reserved_at_0[0x10]; u8 reserved_at_10[0x9]; u8 op_swap_endianness[0x1]; u8 op_min[0x1]; u8 op_xor[0x1]; u8 op_or[0x1]; u8 op_and[0x1]; u8 op_max[0x1]; u8 op_add[0x1]; }; struct mlx5_ifc_vector_calc_cap_bits { u8 calc_matrix[0x1]; u8 reserved_at_1[0x1f]; u8 reserved_at_20[0x8]; u8 max_vec_count[0x8]; u8 reserved_at_30[0xd]; u8 max_chunk_size[0x3]; struct mlx5_ifc_calc_op calc0; struct mlx5_ifc_calc_op calc1; struct mlx5_ifc_calc_op calc2; struct mlx5_ifc_calc_op calc3; u8 reserved_at_c0[0x720]; }; struct mlx5_ifc_tls_cap_bits { u8 tls_1_2_aes_gcm_128[0x1]; u8 tls_1_3_aes_gcm_128[0x1]; u8 tls_1_2_aes_gcm_256[0x1]; u8 tls_1_3_aes_gcm_256[0x1]; u8 reserved_at_4[0x1c]; u8 reserved_at_20[0x7e0]; }; enum { MLX5_WQ_TYPE_LINKED_LIST = 0x0, MLX5_WQ_TYPE_CYCLIC = 0x1, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ = 0x2, MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ = 0x3, }; enum { MLX5_WQ_END_PAD_MODE_NONE = 0x0, MLX5_WQ_END_PAD_MODE_ALIGN = 0x1, }; enum { MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_8_GID_ENTRIES = 0x0, MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_16_GID_ENTRIES = 0x1, MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_32_GID_ENTRIES = 0x2, MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_64_GID_ENTRIES = 0x3, MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_128_GID_ENTRIES = 0x4, }; enum { MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_128_ENTRIES = 0x0, MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_256_ENTRIES = 0x1, MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_512_ENTRIES = 0x2, MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_1K_ENTRIES = 0x3, MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_2K_ENTRIES = 0x4, MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_4K_ENTRIES = 0x5, }; enum { MLX5_CMD_HCA_CAP_PORT_TYPE_IB = 0x0, MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET = 0x1, }; enum { MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_DISABLED = 0x0, MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_INITIAL_STATE = 0x1, MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_ENABLED = 0x3, }; enum { MLX5_CAP_PORT_TYPE_IB = 0x0, MLX5_CAP_PORT_TYPE_ETH = 0x1, }; enum { MLX5_CAP_UMR_FENCE_STRONG = 0x0, MLX5_CAP_UMR_FENCE_SMALL = 0x1, MLX5_CAP_UMR_FENCE_NONE = 0x2, }; enum { MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED = 1 << 7, MLX5_FLEX_PARSER_ICMP_V4_ENABLED = 1 << 8, MLX5_FLEX_PARSER_ICMP_V6_ENABLED = 1 << 9, }; enum { MLX5_UCTX_CAP_RAW_TX = 1UL << 0, MLX5_UCTX_CAP_INTERNAL_DEV_RES = 1UL << 1, }; #define MLX5_FC_BULK_SIZE_FACTOR 128 enum mlx5_fc_bulk_alloc_bitmask { MLX5_FC_BULK_128 = (1 << 0), MLX5_FC_BULK_256 = (1 << 1), MLX5_FC_BULK_512 = (1 << 2), MLX5_FC_BULK_1024 = (1 << 3), MLX5_FC_BULK_2048 = (1 << 4), MLX5_FC_BULK_4096 = (1 << 5), MLX5_FC_BULK_8192 = (1 << 6), MLX5_FC_BULK_16384 = (1 << 7), }; #define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum)) enum { MLX5_STEERING_FORMAT_CONNECTX_5 = 0, MLX5_STEERING_FORMAT_CONNECTX_6DX = 1, }; struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_0[0x30]; u8 vhca_id[0x10]; u8 reserved_at_40[0x40]; u8 log_max_srq_sz[0x8]; u8 log_max_qp_sz[0x8]; u8 event_cap[0x1]; u8 reserved_at_91[0x7]; u8 prio_tag_required[0x1]; u8 reserved_at_99[0x2]; u8 log_max_qp[0x5]; u8 reserved_at_a0[0xb]; u8 log_max_srq[0x5]; u8 reserved_at_b0[0x1]; u8 uplink_follow[0x1]; u8 reserved_at_b2[0xe]; u8 reserved_at_c0[0x8]; u8 log_max_cq_sz[0x8]; u8 reserved_at_d0[0xb]; u8 log_max_cq[0x5]; u8 log_max_eq_sz[0x8]; u8 reserved_at_e8[0x2]; u8 log_max_mkey[0x6]; u8 reserved_at_f0[0x8]; u8 dump_fill_mkey[0x1]; u8 reserved_at_f9[0x2]; u8 fast_teardown[0x1]; u8 log_max_eq[0x4]; u8 max_indirection[0x8]; u8 fixed_buffer_size[0x1]; u8 log_max_mrw_sz[0x7]; u8 force_teardown[0x1]; u8 reserved_at_111[0x1]; u8 log_max_bsf_list_size[0x6]; u8 umr_extended_translation_offset[0x1]; u8 null_mkey[0x1]; u8 log_max_klm_list_size[0x6]; u8 reserved_at_120[0xa]; u8 log_max_ra_req_dc[0x6]; u8 reserved_at_130[0xa]; u8 log_max_ra_res_dc[0x6]; u8 reserved_at_140[0xa]; u8 log_max_ra_req_qp[0x6]; u8 reserved_at_150[0xa]; u8 log_max_ra_res_qp[0x6]; u8 end_pad[0x1]; u8 cc_query_allowed[0x1]; u8 cc_modify_allowed[0x1]; u8 start_pad[0x1]; u8 cache_line_128byte[0x1]; u8 reserved_at_165[0x4]; u8 rts2rts_qp_counters_set_id[0x1]; u8 reserved_at_16a[0x2]; u8 vnic_env_int_rq_oob[0x1]; u8 sbcam_reg[0x1]; u8 reserved_at_16e[0x1]; u8 qcam_reg[0x1]; u8 gid_table_size[0x10]; u8 out_of_seq_cnt[0x1]; u8 vport_counters[0x1]; u8 retransmission_q_counters[0x1]; u8 debug[0x1]; u8 modify_rq_counter_set_id[0x1]; u8 rq_delay_drop[0x1]; u8 max_qp_cnt[0xa]; u8 pkey_table_size[0x10]; u8 vport_group_manager[0x1]; u8 vhca_group_manager[0x1]; u8 ib_virt[0x1]; u8 eth_virt[0x1]; u8 vnic_env_queue_counters[0x1]; u8 ets[0x1]; u8 nic_flow_table[0x1]; u8 eswitch_manager[0x1]; u8 device_memory[0x1]; u8 mcam_reg[0x1]; u8 pcam_reg[0x1]; u8 local_ca_ack_delay[0x5]; u8 port_module_event[0x1]; u8 enhanced_error_q_counters[0x1]; u8 ports_check[0x1]; u8 reserved_at_1b3[0x1]; u8 disable_link_up[0x1]; u8 beacon_led[0x1]; u8 port_type[0x2]; u8 num_ports[0x8]; u8 reserved_at_1c0[0x1]; u8 pps[0x1]; u8 pps_modify[0x1]; u8 log_max_msg[0x5]; u8 reserved_at_1c8[0x4]; u8 max_tc[0x4]; u8 temp_warn_event[0x1]; u8 dcbx[0x1]; u8 general_notification_event[0x1]; u8 reserved_at_1d3[0x2]; u8 fpga[0x1]; u8 rol_s[0x1]; u8 rol_g[0x1]; u8 reserved_at_1d8[0x1]; u8 wol_s[0x1]; u8 wol_g[0x1]; u8 wol_a[0x1]; u8 wol_b[0x1]; u8 wol_m[0x1]; u8 wol_u[0x1]; u8 wol_p[0x1]; u8 stat_rate_support[0x10]; u8 reserved_at_1f0[0xc]; u8 cqe_version[0x4]; u8 compact_address_vector[0x1]; u8 striding_rq[0x1]; u8 reserved_at_202[0x1]; u8 ipoib_enhanced_offloads[0x1]; u8 ipoib_basic_offloads[0x1]; u8 reserved_at_205[0x1]; u8 repeated_block_disabled[0x1]; u8 umr_modify_entity_size_disabled[0x1]; u8 umr_modify_atomic_disabled[0x1]; u8 umr_indirect_mkey_disabled[0x1]; u8 umr_fence[0x2]; u8 dc_req_scat_data_cqe[0x1]; u8 reserved_at_20d[0x2]; u8 drain_sigerr[0x1]; u8 cmdif_checksum[0x2]; u8 sigerr_cqe[0x1]; u8 reserved_at_213[0x1]; u8 wq_signature[0x1]; u8 sctr_data_cqe[0x1]; u8 reserved_at_216[0x1]; u8 sho[0x1]; u8 tph[0x1]; u8 rf[0x1]; u8 dct[0x1]; u8 qos[0x1]; u8 eth_net_offloads[0x1]; u8 roce[0x1]; u8 atomic[0x1]; u8 reserved_at_21f[0x1]; u8 cq_oi[0x1]; u8 cq_resize[0x1]; u8 cq_moderation[0x1]; u8 reserved_at_223[0x3]; u8 cq_eq_remap[0x1]; u8 pg[0x1]; u8 block_lb_mc[0x1]; u8 reserved_at_229[0x1]; u8 scqe_break_moderation[0x1]; u8 cq_period_start_from_cqe[0x1]; u8 cd[0x1]; u8 reserved_at_22d[0x1]; u8 apm[0x1]; u8 vector_calc[0x1]; u8 umr_ptr_rlky[0x1]; u8 imaicl[0x1]; u8 qp_packet_based[0x1]; u8 reserved_at_233[0x3]; u8 qkv[0x1]; u8 pkv[0x1]; u8 set_deth_sqpn[0x1]; u8 reserved_at_239[0x3]; u8 xrc[0x1]; u8 ud[0x1]; u8 uc[0x1]; u8 rc[0x1]; u8 uar_4k[0x1]; u8 reserved_at_241[0x9]; u8 uar_sz[0x6]; u8 reserved_at_250[0x8]; u8 log_pg_sz[0x8]; u8 bf[0x1]; u8 driver_version[0x1]; u8 pad_tx_eth_packet[0x1]; u8 reserved_at_263[0x8]; u8 log_bf_reg_size[0x5]; u8 reserved_at_270[0x8]; u8 lag_tx_port_affinity[0x1]; u8 reserved_at_279[0x2]; u8 lag_master[0x1]; u8 num_lag_ports[0x4]; u8 reserved_at_280[0x10]; u8 max_wqe_sz_sq[0x10]; u8 reserved_at_2a0[0x10]; u8 max_wqe_sz_rq[0x10]; u8 max_flow_counter_31_16[0x10]; u8 max_wqe_sz_sq_dc[0x10]; u8 reserved_at_2e0[0x7]; u8 max_qp_mcg[0x19]; u8 reserved_at_300[0x10]; u8 flow_counter_bulk_alloc[0x8]; u8 log_max_mcg[0x8]; u8 reserved_at_320[0x3]; u8 log_max_transport_domain[0x5]; u8 reserved_at_328[0x3]; u8 log_max_pd[0x5]; u8 reserved_at_330[0xb]; u8 log_max_xrcd[0x5]; u8 nic_receive_steering_discard[0x1]; u8 receive_discard_vport_down[0x1]; u8 transmit_discard_vport_down[0x1]; u8 reserved_at_343[0x5]; u8 log_max_flow_counter_bulk[0x8]; u8 max_flow_counter_15_0[0x10]; u8 reserved_at_360[0x3]; u8 log_max_rq[0x5]; u8 reserved_at_368[0x3]; u8 log_max_sq[0x5]; u8 reserved_at_370[0x3]; u8 log_max_tir[0x5]; u8 reserved_at_378[0x3]; u8 log_max_tis[0x5]; u8 basic_cyclic_rcv_wqe[0x1]; u8 reserved_at_381[0x2]; u8 log_max_rmp[0x5]; u8 reserved_at_388[0x3]; u8 log_max_rqt[0x5]; u8 reserved_at_390[0x3]; u8 log_max_rqt_size[0x5]; u8 reserved_at_398[0x3]; u8 log_max_tis_per_sq[0x5]; u8 ext_stride_num_range[0x1]; u8 reserved_at_3a1[0x2]; u8 log_max_stride_sz_rq[0x5]; u8 reserved_at_3a8[0x3]; u8 log_min_stride_sz_rq[0x5]; u8 reserved_at_3b0[0x3]; u8 log_max_stride_sz_sq[0x5]; u8 reserved_at_3b8[0x3]; u8 log_min_stride_sz_sq[0x5]; u8 hairpin[0x1]; u8 reserved_at_3c1[0x2]; u8 log_max_hairpin_queues[0x5]; u8 reserved_at_3c8[0x3]; u8 log_max_hairpin_wq_data_sz[0x5]; u8 reserved_at_3d0[0x3]; u8 log_max_hairpin_num_packets[0x5]; u8 reserved_at_3d8[0x3]; u8 log_max_wq_sz[0x5]; u8 nic_vport_change_event[0x1]; u8 disable_local_lb_uc[0x1]; u8 disable_local_lb_mc[0x1]; u8 log_min_hairpin_wq_data_sz[0x5]; u8 reserved_at_3e8[0x3]; u8 log_max_vlan_list[0x5]; u8 reserved_at_3f0[0x3]; u8 log_max_current_mc_list[0x5]; u8 reserved_at_3f8[0x3]; u8 log_max_current_uc_list[0x5]; u8 general_obj_types[0x40]; u8 reserved_at_440[0x4]; u8 steering_format_version[0x4]; u8 create_qp_start_hint[0x18]; u8 reserved_at_460[0x1]; u8 ats[0x1]; u8 reserved_at_462[0x1]; u8 log_max_uctx[0x5]; u8 reserved_at_468[0x3]; u8 log_max_umem[0x5]; u8 max_num_eqs[0x10]; u8 reserved_at_480[0x1]; u8 tls_tx[0x1]; u8 reserved_at_482[0x1]; u8 log_max_l2_table[0x5]; u8 reserved_at_488[0x8]; u8 log_uar_page_sz[0x10]; u8 reserved_at_4a0[0x20]; u8 device_frequency_mhz[0x20]; u8 device_frequency_khz[0x20]; u8 reserved_at_500[0x20]; u8 num_of_uars_per_page[0x20]; u8 flex_parser_protocols[0x20]; u8 max_geneve_tlv_options[0x8]; u8 reserved_at_568[0x3]; u8 max_geneve_tlv_option_data_len[0x5]; u8 reserved_at_570[0x10]; u8 reserved_at_580[0x33]; u8 log_max_dek[0x5]; u8 reserved_at_5b8[0x4]; u8 mini_cqe_resp_stride_index[0x1]; u8 cqe_128_always[0x1]; u8 cqe_compression_128[0x1]; u8 cqe_compression[0x1]; u8 cqe_compression_timeout[0x10]; u8 cqe_compression_max_num[0x10]; u8 reserved_at_5e0[0x10]; u8 tag_matching[0x1]; u8 rndv_offload_rc[0x1]; u8 rndv_offload_dc[0x1]; u8 log_tag_matching_list_sz[0x5]; u8 reserved_at_5f8[0x3]; u8 log_max_xrq[0x5]; u8 affiliate_nic_vport_criteria[0x8]; u8 native_port_num[0x8]; u8 num_vhca_ports[0x8]; u8 reserved_at_618[0x6]; u8 sw_owner_id[0x1]; u8 reserved_at_61f[0x1]; u8 max_num_of_monitor_counters[0x10]; u8 num_ppcnt_monitor_counters[0x10]; u8 reserved_at_640[0x10]; u8 num_q_monitor_counters[0x10]; u8 reserved_at_660[0x20]; u8 sf[0x1]; u8 sf_set_partition[0x1]; u8 reserved_at_682[0x1]; u8 log_max_sf[0x5]; u8 reserved_at_688[0x8]; u8 log_min_sf_size[0x8]; u8 max_num_sf_partitions[0x8]; u8 uctx_cap[0x20]; u8 reserved_at_6c0[0x4]; u8 flex_parser_id_geneve_tlv_option_0[0x4]; u8 flex_parser_id_icmp_dw1[0x4]; u8 flex_parser_id_icmp_dw0[0x4]; u8 flex_parser_id_icmpv6_dw1[0x4]; u8 flex_parser_id_icmpv6_dw0[0x4]; u8 flex_parser_id_outer_first_mpls_over_gre[0x4]; u8 flex_parser_id_outer_first_mpls_over_udp_label[0x4]; u8 reserved_at_6e0[0x10]; u8 sf_base_id[0x10]; u8 reserved_at_700[0x80]; u8 vhca_tunnel_commands[0x40]; u8 reserved_at_7c0[0x40]; }; enum mlx5_flow_destination_type { MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0, MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1, MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2, MLX5_FLOW_DESTINATION_TYPE_PORT = 0x99, MLX5_FLOW_DESTINATION_TYPE_COUNTER = 0x100, MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM = 0x101, }; enum mlx5_flow_table_miss_action { MLX5_FLOW_TABLE_MISS_ACTION_DEF, MLX5_FLOW_TABLE_MISS_ACTION_FWD, MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN, }; struct mlx5_ifc_dest_format_struct_bits { u8 destination_type[0x8]; u8 destination_id[0x18]; u8 destination_eswitch_owner_vhca_id_valid[0x1]; u8 packet_reformat[0x1]; u8 reserved_at_22[0xe]; u8 destination_eswitch_owner_vhca_id[0x10]; }; struct mlx5_ifc_flow_counter_list_bits { u8 flow_counter_id[0x20]; u8 reserved_at_20[0x20]; }; struct mlx5_ifc_extended_dest_format_bits { struct mlx5_ifc_dest_format_struct_bits destination_entry; u8 packet_reformat_id[0x20]; u8 reserved_at_60[0x20]; }; union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits { struct mlx5_ifc_extended_dest_format_bits extended_dest_format; struct mlx5_ifc_flow_counter_list_bits flow_counter_list; }; struct mlx5_ifc_fte_match_param_bits { struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers; struct mlx5_ifc_fte_match_set_misc_bits misc_parameters; struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers; struct mlx5_ifc_fte_match_set_misc2_bits misc_parameters_2; struct mlx5_ifc_fte_match_set_misc3_bits misc_parameters_3; u8 reserved_at_a00[0x600]; }; enum { MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP = 0x0, MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP = 0x1, MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT = 0x2, MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT = 0x3, MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_IPSEC_SPI = 0x4, }; struct mlx5_ifc_rx_hash_field_select_bits { u8 l3_prot_type[0x1]; u8 l4_prot_type[0x1]; u8 selected_fields[0x1e]; }; enum { MLX5_WQ_WQ_TYPE_WQ_LINKED_LIST = 0x0, MLX5_WQ_WQ_TYPE_WQ_CYCLIC = 0x1, }; enum { MLX5_WQ_END_PADDING_MODE_END_PAD_NONE = 0x0, MLX5_WQ_END_PADDING_MODE_END_PAD_ALIGN = 0x1, }; struct mlx5_ifc_wq_bits { u8 wq_type[0x4]; u8 wq_signature[0x1]; u8 end_padding_mode[0x2]; u8 cd_slave[0x1]; u8 reserved_at_8[0x18]; u8 hds_skip_first_sge[0x1]; u8 log2_hds_buf_size[0x3]; u8 reserved_at_24[0x7]; u8 page_offset[0x5]; u8 lwm[0x10]; u8 reserved_at_40[0x8]; u8 pd[0x18]; u8 reserved_at_60[0x8]; u8 uar_page[0x18]; u8 dbr_addr[0x40]; u8 hw_counter[0x20]; u8 sw_counter[0x20]; u8 reserved_at_100[0xc]; u8 log_wq_stride[0x4]; u8 reserved_at_110[0x3]; u8 log_wq_pg_sz[0x5]; u8 reserved_at_118[0x3]; u8 log_wq_sz[0x5]; u8 dbr_umem_valid[0x1]; u8 wq_umem_valid[0x1]; u8 reserved_at_122[0x1]; u8 log_hairpin_num_packets[0x5]; u8 reserved_at_128[0x3]; u8 log_hairpin_data_sz[0x5]; u8 reserved_at_130[0x4]; u8 log_wqe_num_of_strides[0x4]; u8 two_byte_shift_en[0x1]; u8 reserved_at_139[0x4]; u8 log_wqe_stride_size[0x3]; u8 reserved_at_140[0x4c0]; struct mlx5_ifc_cmd_pas_bits pas[0]; }; struct mlx5_ifc_rq_num_bits { u8 reserved_at_0[0x8]; u8 rq_num[0x18]; }; struct mlx5_ifc_mac_address_layout_bits { u8 reserved_at_0[0x10]; u8 mac_addr_47_32[0x10]; u8 mac_addr_31_0[0x20]; }; struct mlx5_ifc_vlan_layout_bits { u8 reserved_at_0[0x14]; u8 vlan[0x0c]; u8 reserved_at_20[0x20]; }; struct mlx5_ifc_cong_control_r_roce_ecn_np_bits { u8 reserved_at_0[0xa0]; u8 min_time_between_cnps[0x20]; u8 reserved_at_c0[0x12]; u8 cnp_dscp[0x6]; u8 reserved_at_d8[0x4]; u8 cnp_prio_mode[0x1]; u8 cnp_802p_prio[0x3]; u8 reserved_at_e0[0x720]; }; struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits { u8 reserved_at_0[0x60]; u8 reserved_at_60[0x4]; u8 clamp_tgt_rate[0x1]; u8 reserved_at_65[0x3]; u8 clamp_tgt_rate_after_time_inc[0x1]; u8 reserved_at_69[0x17]; u8 reserved_at_80[0x20]; u8 rpg_time_reset[0x20]; u8 rpg_byte_reset[0x20]; u8 rpg_threshold[0x20]; u8 rpg_max_rate[0x20]; u8 rpg_ai_rate[0x20]; u8 rpg_hai_rate[0x20]; u8 rpg_gd[0x20]; u8 rpg_min_dec_fac[0x20]; u8 rpg_min_rate[0x20]; u8 reserved_at_1c0[0xe0]; u8 rate_to_set_on_first_cnp[0x20]; u8 dce_tcp_g[0x20]; u8 dce_tcp_rtt[0x20]; u8 rate_reduce_monitor_period[0x20]; u8 reserved_at_320[0x20]; u8 initial_alpha_value[0x20]; u8 reserved_at_360[0x4a0]; }; struct mlx5_ifc_cong_control_802_1qau_rp_bits { u8 reserved_at_0[0x80]; u8 rppp_max_rps[0x20]; u8 rpg_time_reset[0x20]; u8 rpg_byte_reset[0x20]; u8 rpg_threshold[0x20]; u8 rpg_max_rate[0x20]; u8 rpg_ai_rate[0x20]; u8 rpg_hai_rate[0x20]; u8 rpg_gd[0x20]; u8 rpg_min_dec_fac[0x20]; u8 rpg_min_rate[0x20]; u8 reserved_at_1c0[0x640]; }; enum { MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_LOG_CQ_SIZE = 0x1, MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_PAGE_OFFSET = 0x2, MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_LOG_PAGE_SIZE = 0x4, }; struct mlx5_ifc_resize_field_select_bits { u8 resize_field_select[0x20]; }; enum { MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_PERIOD = 0x1, MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_MAX_COUNT = 0x2, MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_OI = 0x4, MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_C_EQN = 0x8, }; struct mlx5_ifc_modify_field_select_bits { u8 modify_field_select[0x20]; }; struct mlx5_ifc_field_select_r_roce_np_bits { u8 field_select_r_roce_np[0x20]; }; struct mlx5_ifc_field_select_r_roce_rp_bits { u8 field_select_r_roce_rp[0x20]; }; enum { MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPPP_MAX_RPS = 0x4, MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_TIME_RESET = 0x8, MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_BYTE_RESET = 0x10, MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_THRESHOLD = 0x20, MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MAX_RATE = 0x40, MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_AI_RATE = 0x80, MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_HAI_RATE = 0x100, MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_GD = 0x200, MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MIN_DEC_FAC = 0x400, MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MIN_RATE = 0x800, }; struct mlx5_ifc_field_select_802_1qau_rp_bits { u8 field_select_8021qaurp[0x20]; }; struct mlx5_ifc_phys_layer_cntrs_bits { u8 time_since_last_clear_high[0x20]; u8 time_since_last_clear_low[0x20]; u8 symbol_errors_high[0x20]; u8 symbol_errors_low[0x20]; u8 sync_headers_errors_high[0x20]; u8 sync_headers_errors_low[0x20]; u8 edpl_bip_errors_lane0_high[0x20]; u8 edpl_bip_errors_lane0_low[0x20]; u8 edpl_bip_errors_lane1_high[0x20]; u8 edpl_bip_errors_lane1_low[0x20]; u8 edpl_bip_errors_lane2_high[0x20]; u8 edpl_bip_errors_lane2_low[0x20]; u8 edpl_bip_errors_lane3_high[0x20]; u8 edpl_bip_errors_lane3_low[0x20]; u8 fc_fec_corrected_blocks_lane0_high[0x20]; u8 fc_fec_corrected_blocks_lane0_low[0x20]; u8 fc_fec_corrected_blocks_lane1_high[0x20]; u8 fc_fec_corrected_blocks_lane1_low[0x20]; u8 fc_fec_corrected_blocks_lane2_high[0x20]; u8 fc_fec_corrected_blocks_lane2_low[0x20]; u8 fc_fec_corrected_blocks_lane3_high[0x20]; u8 fc_fec_corrected_blocks_lane3_low[0x20]; u8 fc_fec_uncorrectable_blocks_lane0_high[0x20]; u8 fc_fec_uncorrectable_blocks_lane0_low[0x20]; u8 fc_fec_uncorrectable_blocks_lane1_high[0x20]; u8 fc_fec_uncorrectable_blocks_lane1_low[0x20]; u8 fc_fec_uncorrectable_blocks_lane2_high[0x20]; u8 fc_fec_uncorrectable_blocks_lane2_low[0x20]; u8 fc_fec_uncorrectable_blocks_lane3_high[0x20]; u8 fc_fec_uncorrectable_blocks_lane3_low[0x20]; u8 rs_fec_corrected_blocks_high[0x20]; u8 rs_fec_corrected_blocks_low[0x20]; u8 rs_fec_uncorrectable_blocks_high[0x20]; u8 rs_fec_uncorrectable_blocks_low[0x20]; u8 rs_fec_no_errors_blocks_high[0x20]; u8 rs_fec_no_errors_blocks_low[0x20]; u8 rs_fec_single_error_blocks_high[0x20]; u8 rs_fec_single_error_blocks_low[0x20]; u8 rs_fec_corrected_symbols_total_high[0x20]; u8 rs_fec_corrected_symbols_total_low[0x20]; u8 rs_fec_corrected_symbols_lane0_high[0x20]; u8 rs_fec_corrected_symbols_lane0_low[0x20]; u8 rs_fec_corrected_symbols_lane1_high[0x20]; u8 rs_fec_corrected_symbols_lane1_low[0x20]; u8 rs_fec_corrected_symbols_lane2_high[0x20]; u8 rs_fec_corrected_symbols_lane2_low[0x20]; u8 rs_fec_corrected_symbols_lane3_high[0x20]; u8 rs_fec_corrected_symbols_lane3_low[0x20]; u8 link_down_events[0x20]; u8 successful_recovery_events[0x20]; u8 reserved_at_640[0x180]; }; struct mlx5_ifc_phys_layer_statistical_cntrs_bits { u8 time_since_last_clear_high[0x20]; u8 time_since_last_clear_low[0x20]; u8 phy_received_bits_high[0x20]; u8 phy_received_bits_low[0x20]; u8 phy_symbol_errors_high[0x20]; u8 phy_symbol_errors_low[0x20]; u8 phy_corrected_bits_high[0x20]; u8 phy_corrected_bits_low[0x20]; u8 phy_corrected_bits_lane0_high[0x20]; u8 phy_corrected_bits_lane0_low[0x20]; u8 phy_corrected_bits_lane1_high[0x20]; u8 phy_corrected_bits_lane1_low[0x20]; u8 phy_corrected_bits_lane2_high[0x20]; u8 phy_corrected_bits_lane2_low[0x20]; u8 phy_corrected_bits_lane3_high[0x20]; u8 phy_corrected_bits_lane3_low[0x20]; u8 reserved_at_200[0x5c0]; }; struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits { u8 symbol_error_counter[0x10]; u8 link_error_recovery_counter[0x8]; u8 link_downed_counter[0x8]; u8 port_rcv_errors[0x10]; u8 port_rcv_remote_physical_errors[0x10]; u8 port_rcv_switch_relay_errors[0x10]; u8 port_xmit_discards[0x10]; u8 port_xmit_constraint_errors[0x8]; u8 port_rcv_constraint_errors[0x8]; u8 reserved_at_70[0x8]; u8 link_overrun_errors[0x8]; u8 reserved_at_80[0x10]; u8 vl_15_dropped[0x10]; u8 reserved_at_a0[0x80]; u8 port_xmit_wait[0x20]; }; struct mlx5_ifc_eth_per_tc_prio_grp_data_layout_bits { u8 transmit_queue_high[0x20]; u8 transmit_queue_low[0x20]; u8 no_buffer_discard_uc_high[0x20]; u8 no_buffer_discard_uc_low[0x20]; u8 reserved_at_80[0x740]; }; struct mlx5_ifc_eth_per_tc_congest_prio_grp_data_layout_bits { u8 wred_discard_high[0x20]; u8 wred_discard_low[0x20]; u8 ecn_marked_tc_high[0x20]; u8 ecn_marked_tc_low[0x20]; u8 reserved_at_80[0x740]; }; struct mlx5_ifc_eth_per_prio_grp_data_layout_bits { u8 rx_octets_high[0x20]; u8 rx_octets_low[0x20]; u8 reserved_at_40[0xc0]; u8 rx_frames_high[0x20]; u8 rx_frames_low[0x20]; u8 tx_octets_high[0x20]; u8 tx_octets_low[0x20]; u8 reserved_at_180[0xc0]; u8 tx_frames_high[0x20]; u8 tx_frames_low[0x20]; u8 rx_pause_high[0x20]; u8 rx_pause_low[0x20]; u8 rx_pause_duration_high[0x20]; u8 rx_pause_duration_low[0x20]; u8 tx_pause_high[0x20]; u8 tx_pause_low[0x20]; u8 tx_pause_duration_high[0x20]; u8 tx_pause_duration_low[0x20]; u8 rx_pause_transition_high[0x20]; u8 rx_pause_transition_low[0x20]; u8 reserved_at_3c0[0x40]; u8 device_stall_minor_watermark_cnt_high[0x20]; u8 device_stall_minor_watermark_cnt_low[0x20]; u8 device_stall_critical_watermark_cnt_high[0x20]; u8 device_stall_critical_watermark_cnt_low[0x20]; u8 reserved_at_480[0x340]; }; struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits { u8 port_transmit_wait_high[0x20]; u8 port_transmit_wait_low[0x20]; u8 reserved_at_40[0x100]; u8 rx_buffer_almost_full_high[0x20]; u8 rx_buffer_almost_full_low[0x20]; u8 rx_buffer_full_high[0x20]; u8 rx_buffer_full_low[0x20]; u8 rx_icrc_encapsulated_high[0x20]; u8 rx_icrc_encapsulated_low[0x20]; u8 reserved_at_200[0x5c0]; }; struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits { u8 dot3stats_alignment_errors_high[0x20]; u8 dot3stats_alignment_errors_low[0x20]; u8 dot3stats_fcs_errors_high[0x20]; u8 dot3stats_fcs_errors_low[0x20]; u8 dot3stats_single_collision_frames_high[0x20]; u8 dot3stats_single_collision_frames_low[0x20]; u8 dot3stats_multiple_collision_frames_high[0x20]; u8 dot3stats_multiple_collision_frames_low[0x20]; u8 dot3stats_sqe_test_errors_high[0x20]; u8 dot3stats_sqe_test_errors_low[0x20]; u8 dot3stats_deferred_transmissions_high[0x20]; u8 dot3stats_deferred_transmissions_low[0x20]; u8 dot3stats_late_collisions_high[0x20]; u8 dot3stats_late_collisions_low[0x20]; u8 dot3stats_excessive_collisions_high[0x20]; u8 dot3stats_excessive_collisions_low[0x20]; u8 dot3stats_internal_mac_transmit_errors_high[0x20]; u8 dot3stats_internal_mac_transmit_errors_low[0x20]; u8 dot3stats_carrier_sense_errors_high[0x20]; u8 dot3stats_carrier_sense_errors_low[0x20]; u8 dot3stats_frame_too_longs_high[0x20]; u8 dot3stats_frame_too_longs_low[0x20]; u8 dot3stats_internal_mac_receive_errors_high[0x20]; u8 dot3stats_internal_mac_receive_errors_low[0x20]; u8 dot3stats_symbol_errors_high[0x20]; u8 dot3stats_symbol_errors_low[0x20]; u8 dot3control_in_unknown_opcodes_high[0x20]; u8 dot3control_in_unknown_opcodes_low[0x20]; u8 dot3in_pause_frames_high[0x20]; u8 dot3in_pause_frames_low[0x20]; u8 dot3out_pause_frames_high[0x20]; u8 dot3out_pause_frames_low[0x20]; u8 reserved_at_400[0x3c0]; }; struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits { u8 ether_stats_drop_events_high[0x20]; u8 ether_stats_drop_events_low[0x20]; u8 ether_stats_octets_high[0x20]; u8 ether_stats_octets_low[0x20]; u8 ether_stats_pkts_high[0x20]; u8 ether_stats_pkts_low[0x20]; u8 ether_stats_broadcast_pkts_high[0x20]; u8 ether_stats_broadcast_pkts_low[0x20]; u8 ether_stats_multicast_pkts_high[0x20]; u8 ether_stats_multicast_pkts_low[0x20]; u8 ether_stats_crc_align_errors_high[0x20]; u8 ether_stats_crc_align_errors_low[0x20]; u8 ether_stats_undersize_pkts_high[0x20]; u8 ether_stats_undersize_pkts_low[0x20]; u8 ether_stats_oversize_pkts_high[0x20]; u8 ether_stats_oversize_pkts_low[0x20]; u8 ether_stats_fragments_high[0x20]; u8 ether_stats_fragments_low[0x20]; u8 ether_stats_jabbers_high[0x20]; u8 ether_stats_jabbers_low[0x20]; u8 ether_stats_collisions_high[0x20]; u8 ether_stats_collisions_low[0x20]; u8 ether_stats_pkts64octets_high[0x20]; u8 ether_stats_pkts64octets_low[0x20]; u8 ether_stats_pkts65to127octets_high[0x20]; u8 ether_stats_pkts65to127octets_low[0x20]; u8 ether_stats_pkts128to255octets_high[0x20]; u8 ether_stats_pkts128to255octets_low[0x20]; u8 ether_stats_pkts256to511octets_high[0x20]; u8 ether_stats_pkts256to511octets_low[0x20]; u8 ether_stats_pkts512to1023octets_high[0x20]; u8 ether_stats_pkts512to1023octets_low[0x20]; u8 ether_stats_pkts1024to1518octets_high[0x20]; u8 ether_stats_pkts1024to1518octets_low[0x20]; u8 ether_stats_pkts1519to2047octets_high[0x20]; u8 ether_stats_pkts1519to2047octets_low[0x20]; u8 ether_stats_pkts2048to4095octets_high[0x20]; u8 ether_stats_pkts2048to4095octets_low[0x20]; u8 ether_stats_pkts4096to8191octets_high[0x20]; u8 ether_stats_pkts4096to8191octets_low[0x20]; u8 ether_stats_pkts8192to10239octets_high[0x20]; u8 ether_stats_pkts8192to10239octets_low[0x20]; u8 reserved_at_540[0x280]; }; struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits { u8 if_in_octets_high[0x20]; u8 if_in_octets_low[0x20]; u8 if_in_ucast_pkts_high[0x20]; u8 if_in_ucast_pkts_low[0x20]; u8 if_in_discards_high[0x20]; u8 if_in_discards_low[0x20]; u8 if_in_errors_high[0x20]; u8 if_in_errors_low[0x20]; u8 if_in_unknown_protos_high[0x20]; u8 if_in_unknown_protos_low[0x20]; u8 if_out_octets_high[0x20]; u8 if_out_octets_low[0x20]; u8 if_out_ucast_pkts_high[0x20]; u8 if_out_ucast_pkts_low[0x20]; u8 if_out_discards_high[0x20]; u8 if_out_discards_low[0x20]; u8 if_out_errors_high[0x20]; u8 if_out_errors_low[0x20]; u8 if_in_multicast_pkts_high[0x20]; u8 if_in_multicast_pkts_low[0x20]; u8 if_in_broadcast_pkts_high[0x20]; u8 if_in_broadcast_pkts_low[0x20]; u8 if_out_multicast_pkts_high[0x20]; u8 if_out_multicast_pkts_low[0x20]; u8 if_out_broadcast_pkts_high[0x20]; u8 if_out_broadcast_pkts_low[0x20]; u8 reserved_at_340[0x480]; }; struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits { u8 a_frames_transmitted_ok_high[0x20]; u8 a_frames_transmitted_ok_low[0x20]; u8 a_frames_received_ok_high[0x20]; u8 a_frames_received_ok_low[0x20]; u8 a_frame_check_sequence_errors_high[0x20]; u8 a_frame_check_sequence_errors_low[0x20]; u8 a_alignment_errors_high[0x20]; u8 a_alignment_errors_low[0x20]; u8 a_octets_transmitted_ok_high[0x20]; u8 a_octets_transmitted_ok_low[0x20]; u8 a_octets_received_ok_high[0x20]; u8 a_octets_received_ok_low[0x20]; u8 a_multicast_frames_xmitted_ok_high[0x20]; u8 a_multicast_frames_xmitted_ok_low[0x20]; u8 a_broadcast_frames_xmitted_ok_high[0x20]; u8 a_broadcast_frames_xmitted_ok_low[0x20]; u8 a_multicast_frames_received_ok_high[0x20]; u8 a_multicast_frames_received_ok_low[0x20]; u8 a_broadcast_frames_received_ok_high[0x20]; u8 a_broadcast_frames_received_ok_low[0x20]; u8 a_in_range_length_errors_high[0x20]; u8 a_in_range_length_errors_low[0x20]; u8 a_out_of_range_length_field_high[0x20]; u8 a_out_of_range_length_field_low[0x20]; u8 a_frame_too_long_errors_high[0x20]; u8 a_frame_too_long_errors_low[0x20]; u8 a_symbol_error_during_carrier_high[0x20]; u8 a_symbol_error_during_carrier_low[0x20]; u8 a_mac_control_frames_transmitted_high[0x20]; u8 a_mac_control_frames_transmitted_low[0x20]; u8 a_mac_control_frames_received_high[0x20]; u8 a_mac_control_frames_received_low[0x20]; u8 a_unsupported_opcodes_received_high[0x20]; u8 a_unsupported_opcodes_received_low[0x20]; u8 a_pause_mac_ctrl_frames_received_high[0x20]; u8 a_pause_mac_ctrl_frames_received_low[0x20]; u8 a_pause_mac_ctrl_frames_transmitted_high[0x20]; u8 a_pause_mac_ctrl_frames_transmitted_low[0x20]; u8 reserved_at_4c0[0x300]; }; struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits { u8 life_time_counter_high[0x20]; u8 life_time_counter_low[0x20]; u8 rx_errors[0x20]; u8 tx_errors[0x20]; u8 l0_to_recovery_eieos[0x20]; u8 l0_to_recovery_ts[0x20]; u8 l0_to_recovery_framing[0x20]; u8 l0_to_recovery_retrain[0x20]; u8 crc_error_dllp[0x20]; u8 crc_error_tlp[0x20]; u8 tx_overflow_buffer_pkt_high[0x20]; u8 tx_overflow_buffer_pkt_low[0x20]; u8 outbound_stalled_reads[0x20]; u8 outbound_stalled_writes[0x20]; u8 outbound_stalled_reads_events[0x20]; u8 outbound_stalled_writes_events[0x20]; u8 reserved_at_200[0x5c0]; }; struct mlx5_ifc_cmd_inter_comp_event_bits { u8 command_completion_vector[0x20]; u8 reserved_at_20[0xc0]; }; struct mlx5_ifc_stall_vl_event_bits { u8 reserved_at_0[0x18]; u8 port_num[0x1]; u8 reserved_at_19[0x3]; u8 vl[0x4]; u8 reserved_at_20[0xa0]; }; struct mlx5_ifc_db_bf_congestion_event_bits { u8 event_subtype[0x8]; u8 reserved_at_8[0x8]; u8 congestion_level[0x8]; u8 reserved_at_18[0x8]; u8 reserved_at_20[0xa0]; }; struct mlx5_ifc_gpio_event_bits { u8 reserved_at_0[0x60]; u8 gpio_event_hi[0x20]; u8 gpio_event_lo[0x20]; u8 reserved_at_a0[0x40]; }; struct mlx5_ifc_port_state_change_event_bits { u8 reserved_at_0[0x40]; u8 port_num[0x4]; u8 reserved_at_44[0x1c]; u8 reserved_at_60[0x80]; }; struct mlx5_ifc_dropped_packet_logged_bits { u8 reserved_at_0[0xe0]; }; enum { MLX5_CQ_ERROR_SYNDROME_CQ_OVERRUN = 0x1, MLX5_CQ_ERROR_SYNDROME_CQ_ACCESS_VIOLATION_ERROR = 0x2, }; struct mlx5_ifc_cq_error_bits { u8 reserved_at_0[0x8]; u8 cqn[0x18]; u8 reserved_at_20[0x20]; u8 reserved_at_40[0x18]; u8 syndrome[0x8]; u8 reserved_at_60[0x80]; }; struct mlx5_ifc_rdma_page_fault_event_bits { u8 bytes_committed[0x20]; u8 r_key[0x20]; u8 reserved_at_40[0x10]; u8 packet_len[0x10]; u8 rdma_op_len[0x20]; u8 rdma_va[0x40]; u8 reserved_at_c0[0x5]; u8 rdma[0x1]; u8 write[0x1]; u8 requestor[0x1]; u8 qp_number[0x18]; }; struct mlx5_ifc_wqe_associated_page_fault_event_bits { u8 bytes_committed[0x20]; u8 reserved_at_20[0x10]; u8 wqe_index[0x10]; u8 reserved_at_40[0x10]; u8 len[0x10]; u8 reserved_at_60[0x60]; u8 reserved_at_c0[0x5]; u8 rdma[0x1]; u8 write_read[0x1]; u8 requestor[0x1]; u8 qpn[0x18]; }; struct mlx5_ifc_qp_events_bits { u8 reserved_at_0[0xa0]; u8 type[0x8]; u8 reserved_at_a8[0x18]; u8 reserved_at_c0[0x8]; u8 qpn_rqn_sqn[0x18]; }; struct mlx5_ifc_dct_events_bits { u8 reserved_at_0[0xc0]; u8 reserved_at_c0[0x8]; u8 dct_number[0x18]; }; struct mlx5_ifc_comp_event_bits { u8 reserved_at_0[0xc0]; u8 reserved_at_c0[0x8]; u8 cq_number[0x18]; }; enum { MLX5_QPC_STATE_RST = 0x0, MLX5_QPC_STATE_INIT = 0x1, MLX5_QPC_STATE_RTR = 0x2, MLX5_QPC_STATE_RTS = 0x3, MLX5_QPC_STATE_SQER = 0x4, MLX5_QPC_STATE_ERR = 0x6, MLX5_QPC_STATE_SQD = 0x7, MLX5_QPC_STATE_SUSPENDED = 0x9, }; enum { MLX5_QPC_ST_RC = 0x0, MLX5_QPC_ST_UC = 0x1, MLX5_QPC_ST_UD = 0x2, MLX5_QPC_ST_XRC = 0x3, MLX5_QPC_ST_DCI = 0x5, MLX5_QPC_ST_QP0 = 0x7, MLX5_QPC_ST_QP1 = 0x8, MLX5_QPC_ST_RAW_DATAGRAM = 0x9, MLX5_QPC_ST_REG_UMR = 0xc, }; enum { MLX5_QPC_PM_STATE_ARMED = 0x0, MLX5_QPC_PM_STATE_REARM = 0x1, MLX5_QPC_PM_STATE_RESERVED = 0x2, MLX5_QPC_PM_STATE_MIGRATED = 0x3, }; enum { MLX5_QPC_OFFLOAD_TYPE_RNDV = 0x1, }; enum { MLX5_QPC_END_PADDING_MODE_SCATTER_AS_IS = 0x0, MLX5_QPC_END_PADDING_MODE_PAD_TO_CACHE_LINE_ALIGNMENT = 0x1, }; enum { MLX5_QPC_MTU_256_BYTES = 0x1, MLX5_QPC_MTU_512_BYTES = 0x2, MLX5_QPC_MTU_1K_BYTES = 0x3, MLX5_QPC_MTU_2K_BYTES = 0x4, MLX5_QPC_MTU_4K_BYTES = 0x5, MLX5_QPC_MTU_RAW_ETHERNET_QP = 0x7, }; enum { MLX5_QPC_ATOMIC_MODE_IB_SPEC = 0x1, MLX5_QPC_ATOMIC_MODE_ONLY_8B = 0x2, MLX5_QPC_ATOMIC_MODE_UP_TO_8B = 0x3, MLX5_QPC_ATOMIC_MODE_UP_TO_16B = 0x4, MLX5_QPC_ATOMIC_MODE_UP_TO_32B = 0x5, MLX5_QPC_ATOMIC_MODE_UP_TO_64B = 0x6, MLX5_QPC_ATOMIC_MODE_UP_TO_128B = 0x7, MLX5_QPC_ATOMIC_MODE_UP_TO_256B = 0x8, }; enum { MLX5_QPC_CS_REQ_DISABLE = 0x0, MLX5_QPC_CS_REQ_UP_TO_32B = 0x11, MLX5_QPC_CS_REQ_UP_TO_64B = 0x22, }; enum { MLX5_QPC_CS_RES_DISABLE = 0x0, MLX5_QPC_CS_RES_UP_TO_32B = 0x1, MLX5_QPC_CS_RES_UP_TO_64B = 0x2, }; struct mlx5_ifc_qpc_bits { u8 state[0x4]; u8 lag_tx_port_affinity[0x4]; u8 st[0x8]; u8 reserved_at_10[0x3]; u8 pm_state[0x2]; u8 reserved_at_15[0x1]; u8 req_e2e_credit_mode[0x2]; u8 offload_type[0x4]; u8 end_padding_mode[0x2]; u8 reserved_at_1e[0x2]; u8 wq_signature[0x1]; u8 block_lb_mc[0x1]; u8 atomic_like_write_en[0x1]; u8 latency_sensitive[0x1]; u8 reserved_at_24[0x1]; u8 drain_sigerr[0x1]; u8 reserved_at_26[0x2]; u8 pd[0x18]; u8 mtu[0x3]; u8 log_msg_max[0x5]; u8 reserved_at_48[0x1]; u8 log_rq_size[0x4]; u8 log_rq_stride[0x3]; u8 no_sq[0x1]; u8 log_sq_size[0x4]; u8 reserved_at_55[0x6]; u8 rlky[0x1]; u8 ulp_stateless_offload_mode[0x4]; u8 counter_set_id[0x8]; u8 uar_page[0x18]; u8 reserved_at_80[0x8]; u8 user_index[0x18]; u8 reserved_at_a0[0x3]; u8 log_page_size[0x5]; u8 remote_qpn[0x18]; struct mlx5_ifc_ads_bits primary_address_path; struct mlx5_ifc_ads_bits secondary_address_path; u8 log_ack_req_freq[0x4]; u8 reserved_at_384[0x4]; u8 log_sra_max[0x3]; u8 reserved_at_38b[0x2]; u8 retry_count[0x3]; u8 rnr_retry[0x3]; u8 reserved_at_393[0x1]; u8 fre[0x1]; u8 cur_rnr_retry[0x3]; u8 cur_retry_count[0x3]; u8 reserved_at_39b[0x5]; u8 reserved_at_3a0[0x20]; u8 reserved_at_3c0[0x8]; u8 next_send_psn[0x18]; u8 reserved_at_3e0[0x8]; u8 cqn_snd[0x18]; u8 reserved_at_400[0x8]; u8 deth_sqpn[0x18]; u8 reserved_at_420[0x20]; u8 reserved_at_440[0x8]; u8 last_acked_psn[0x18]; u8 reserved_at_460[0x8]; u8 ssn[0x18]; u8 reserved_at_480[0x8]; u8 log_rra_max[0x3]; u8 reserved_at_48b[0x1]; u8 atomic_mode[0x4]; u8 rre[0x1]; u8 rwe[0x1]; u8 rae[0x1]; u8 reserved_at_493[0x1]; u8 page_offset[0x6]; u8 reserved_at_49a[0x3]; u8 cd_slave_receive[0x1]; u8 cd_slave_send[0x1]; u8 cd_master[0x1]; u8 reserved_at_4a0[0x3]; u8 min_rnr_nak[0x5]; u8 next_rcv_psn[0x18]; u8 reserved_at_4c0[0x8]; u8 xrcd[0x18]; u8 reserved_at_4e0[0x8]; u8 cqn_rcv[0x18]; u8 dbr_addr[0x40]; u8 q_key[0x20]; u8 reserved_at_560[0x5]; u8 rq_type[0x3]; u8 srqn_rmpn_xrqn[0x18]; u8 reserved_at_580[0x8]; u8 rmsn[0x18]; u8 hw_sq_wqebb_counter[0x10]; u8 sw_sq_wqebb_counter[0x10]; u8 hw_rq_counter[0x20]; u8 sw_rq_counter[0x20]; u8 reserved_at_600[0x20]; u8 reserved_at_620[0xf]; u8 cgs[0x1]; u8 cs_req[0x8]; u8 cs_res[0x8]; u8 dc_access_key[0x40]; u8 reserved_at_680[0x3]; u8 dbr_umem_valid[0x1]; u8 reserved_at_684[0xbc]; }; struct mlx5_ifc_roce_addr_layout_bits { u8 source_l3_address[16][0x8]; u8 reserved_at_80[0x3]; u8 vlan_valid[0x1]; u8 vlan_id[0xc]; u8 source_mac_47_32[0x10]; u8 source_mac_31_0[0x20]; u8 reserved_at_c0[0x14]; u8 roce_l3_type[0x4]; u8 roce_version[0x8]; u8 reserved_at_e0[0x20]; }; union mlx5_ifc_hca_cap_union_bits { struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap; struct mlx5_ifc_odp_cap_bits odp_cap; struct mlx5_ifc_atomic_caps_bits atomic_caps; struct mlx5_ifc_roce_cap_bits roce_cap; struct mlx5_ifc_per_protocol_networking_offload_caps_bits per_protocol_networking_offload_caps; struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap; struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap; struct mlx5_ifc_e_switch_cap_bits e_switch_cap; struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap; struct mlx5_ifc_qos_cap_bits qos_cap; struct mlx5_ifc_debug_cap_bits debug_cap; struct mlx5_ifc_fpga_cap_bits fpga_cap; struct mlx5_ifc_tls_cap_bits tls_cap; struct mlx5_ifc_device_mem_cap_bits device_mem_cap; u8 reserved_at_0[0x8000]; }; enum { MLX5_FLOW_CONTEXT_ACTION_ALLOW = 0x1, MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4, MLX5_FLOW_CONTEXT_ACTION_COUNT = 0x8, MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT = 0x10, MLX5_FLOW_CONTEXT_ACTION_DECAP = 0x20, MLX5_FLOW_CONTEXT_ACTION_MOD_HDR = 0x40, MLX5_FLOW_CONTEXT_ACTION_VLAN_POP = 0x80, MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH = 0x100, MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 = 0x400, MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 = 0x800, }; enum { MLX5_FLOW_CONTEXT_FLOW_SOURCE_ANY_VPORT = 0x0, MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK = 0x1, MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT = 0x2, }; struct mlx5_ifc_vlan_bits { u8 ethtype[0x10]; u8 prio[0x3]; u8 cfi[0x1]; u8 vid[0xc]; }; struct mlx5_ifc_flow_context_bits { struct mlx5_ifc_vlan_bits push_vlan; u8 group_id[0x20]; u8 reserved_at_40[0x8]; u8 flow_tag[0x18]; u8 reserved_at_60[0x10]; u8 action[0x10]; u8 extended_destination[0x1]; u8 reserved_at_81[0x1]; u8 flow_source[0x2]; u8 reserved_at_84[0x4]; u8 destination_list_size[0x18]; u8 reserved_at_a0[0x8]; u8 flow_counter_list_size[0x18]; u8 packet_reformat_id[0x20]; u8 modify_header_id[0x20]; struct mlx5_ifc_vlan_bits push_vlan_2; u8 reserved_at_120[0xe0]; struct mlx5_ifc_fte_match_param_bits match_value; u8 reserved_at_1200[0x600]; union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits destination[0]; }; enum { MLX5_XRC_SRQC_STATE_GOOD = 0x0, MLX5_XRC_SRQC_STATE_ERROR = 0x1, }; struct mlx5_ifc_xrc_srqc_bits { u8 state[0x4]; u8 log_xrc_srq_size[0x4]; u8 reserved_at_8[0x18]; u8 wq_signature[0x1]; u8 cont_srq[0x1]; u8 reserved_at_22[0x1]; u8 rlky[0x1]; u8 basic_cyclic_rcv_wqe[0x1]; u8 log_rq_stride[0x3]; u8 xrcd[0x18]; u8 page_offset[0x6]; u8 reserved_at_46[0x1]; u8 dbr_umem_valid[0x1]; u8 cqn[0x18]; u8 reserved_at_60[0x20]; u8 user_index_equal_xrc_srqn[0x1]; u8 reserved_at_81[0x1]; u8 log_page_size[0x6]; u8 user_index[0x18]; u8 reserved_at_a0[0x20]; u8 reserved_at_c0[0x8]; u8 pd[0x18]; u8 lwm[0x10]; u8 wqe_cnt[0x10]; u8 reserved_at_100[0x40]; u8 db_record_addr_h[0x20]; u8 db_record_addr_l[0x1e]; u8 reserved_at_17e[0x2]; u8 reserved_at_180[0x80]; }; struct mlx5_ifc_vnic_diagnostic_statistics_bits { u8 counter_error_queues[0x20]; u8 total_error_queues[0x20]; u8 send_queue_priority_update_flow[0x20]; u8 reserved_at_60[0x20]; u8 nic_receive_steering_discard[0x40]; u8 receive_discard_vport_down[0x40]; u8 transmit_discard_vport_down[0x40]; u8 reserved_at_140[0xa0]; u8 internal_rq_out_of_buffer[0x20]; u8 reserved_at_200[0xe00]; }; struct mlx5_ifc_traffic_counter_bits { u8 packets[0x40]; u8 octets[0x40]; }; struct mlx5_ifc_tisc_bits { u8 strict_lag_tx_port_affinity[0x1]; u8 tls_en[0x1]; u8 reserved_at_2[0x2]; u8 lag_tx_port_affinity[0x04]; u8 reserved_at_8[0x4]; u8 prio[0x4]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x100]; u8 reserved_at_120[0x8]; u8 transport_domain[0x18]; u8 reserved_at_140[0x8]; u8 underlay_qpn[0x18]; u8 reserved_at_160[0x8]; u8 pd[0x18]; u8 reserved_at_180[0x380]; }; enum { MLX5_TIRC_DISP_TYPE_DIRECT = 0x0, MLX5_TIRC_DISP_TYPE_INDIRECT = 0x1, }; enum { MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO = 0x1, MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO = 0x2, }; enum { MLX5_RX_HASH_FN_NONE = 0x0, MLX5_RX_HASH_FN_INVERTED_XOR8 = 0x1, MLX5_RX_HASH_FN_TOEPLITZ = 0x2, }; enum { MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST = 0x1, MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST = 0x2, }; struct mlx5_ifc_tirc_bits { u8 reserved_at_0[0x20]; u8 disp_type[0x4]; u8 reserved_at_24[0x1c]; u8 reserved_at_40[0x40]; u8 reserved_at_80[0x4]; u8 lro_timeout_period_usecs[0x10]; u8 lro_enable_mask[0x4]; u8 lro_max_ip_payload_size[0x8]; u8 reserved_at_a0[0x40]; u8 reserved_at_e0[0x8]; u8 inline_rqn[0x18]; u8 rx_hash_symmetric[0x1]; u8 reserved_at_101[0x1]; u8 tunneled_offload_en[0x1]; u8 reserved_at_103[0x5]; u8 indirect_table[0x18]; u8 rx_hash_fn[0x4]; u8 reserved_at_124[0x2]; u8 self_lb_block[0x2]; u8 transport_domain[0x18]; u8 rx_hash_toeplitz_key[10][0x20]; struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_outer; struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_inner; u8 reserved_at_2c0[0x4c0]; }; enum { MLX5_SRQC_STATE_GOOD = 0x0, MLX5_SRQC_STATE_ERROR = 0x1, }; struct mlx5_ifc_srqc_bits { u8 state[0x4]; u8 log_srq_size[0x4]; u8 reserved_at_8[0x18]; u8 wq_signature[0x1]; u8 cont_srq[0x1]; u8 reserved_at_22[0x1]; u8 rlky[0x1]; u8 reserved_at_24[0x1]; u8 log_rq_stride[0x3]; u8 xrcd[0x18]; u8 page_offset[0x6]; u8 reserved_at_46[0x2]; u8 cqn[0x18]; u8 reserved_at_60[0x20]; u8 reserved_at_80[0x2]; u8 log_page_size[0x6]; u8 reserved_at_88[0x18]; u8 reserved_at_a0[0x20]; u8 reserved_at_c0[0x8]; u8 pd[0x18]; u8 lwm[0x10]; u8 wqe_cnt[0x10]; u8 reserved_at_100[0x40]; u8 dbr_addr[0x40]; u8 reserved_at_180[0x80]; }; enum { MLX5_SQC_STATE_RST = 0x0, MLX5_SQC_STATE_RDY = 0x1, MLX5_SQC_STATE_ERR = 0x3, }; struct mlx5_ifc_sqc_bits { u8 rlky[0x1]; u8 cd_master[0x1]; u8 fre[0x1]; u8 flush_in_error_en[0x1]; u8 allow_multi_pkt_send_wqe[0x1]; u8 min_wqe_inline_mode[0x3]; u8 state[0x4]; u8 reg_umr[0x1]; u8 allow_swp[0x1]; u8 hairpin[0x1]; u8 reserved_at_f[0x11]; u8 reserved_at_20[0x8]; u8 user_index[0x18]; u8 reserved_at_40[0x8]; u8 cqn[0x18]; u8 reserved_at_60[0x8]; u8 hairpin_peer_rq[0x18]; u8 reserved_at_80[0x10]; u8 hairpin_peer_vhca[0x10]; u8 reserved_at_a0[0x50]; u8 packet_pacing_rate_limit_index[0x10]; u8 tis_lst_sz[0x10]; u8 reserved_at_110[0x10]; u8 reserved_at_120[0x40]; u8 reserved_at_160[0x8]; u8 tis_num_0[0x18]; struct mlx5_ifc_wq_bits wq; }; enum { SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR = 0x0, SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT = 0x1, SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC = 0x2, SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3, }; enum { ELEMENT_TYPE_CAP_MASK_TASR = 1 << 0, ELEMENT_TYPE_CAP_MASK_VPORT = 1 << 1, ELEMENT_TYPE_CAP_MASK_VPORT_TC = 1 << 2, ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC = 1 << 3, }; struct mlx5_ifc_scheduling_context_bits { u8 element_type[0x8]; u8 reserved_at_8[0x18]; u8 element_attributes[0x20]; u8 parent_element_id[0x20]; u8 reserved_at_60[0x40]; u8 bw_share[0x20]; u8 max_average_bw[0x20]; u8 reserved_at_e0[0x120]; }; struct mlx5_ifc_rqtc_bits { u8 reserved_at_0[0xa0]; u8 reserved_at_a0[0x10]; u8 rqt_max_size[0x10]; u8 reserved_at_c0[0x10]; u8 rqt_actual_size[0x10]; u8 reserved_at_e0[0x6a0]; struct mlx5_ifc_rq_num_bits rq_num[0]; }; enum { MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE = 0x0, MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_RMP = 0x1, }; enum { MLX5_RQC_STATE_RST = 0x0, MLX5_RQC_STATE_RDY = 0x1, MLX5_RQC_STATE_ERR = 0x3, }; struct mlx5_ifc_rqc_bits { u8 rlky[0x1]; u8 delay_drop_en[0x1]; u8 scatter_fcs[0x1]; u8 vsd[0x1]; u8 mem_rq_type[0x4]; u8 state[0x4]; u8 reserved_at_c[0x1]; u8 flush_in_error_en[0x1]; u8 hairpin[0x1]; u8 reserved_at_f[0x11]; u8 reserved_at_20[0x8]; u8 user_index[0x18]; u8 reserved_at_40[0x8]; u8 cqn[0x18]; u8 counter_set_id[0x8]; u8 reserved_at_68[0x18]; u8 reserved_at_80[0x8]; u8 rmpn[0x18]; u8 reserved_at_a0[0x8]; u8 hairpin_peer_sq[0x18]; u8 reserved_at_c0[0x10]; u8 hairpin_peer_vhca[0x10]; u8 reserved_at_e0[0xa0]; struct mlx5_ifc_wq_bits wq; }; enum { MLX5_RMPC_STATE_RDY = 0x1, MLX5_RMPC_STATE_ERR = 0x3, }; struct mlx5_ifc_rmpc_bits { u8 reserved_at_0[0x8]; u8 state[0x4]; u8 reserved_at_c[0x14]; u8 basic_cyclic_rcv_wqe[0x1]; u8 reserved_at_21[0x1f]; u8 reserved_at_40[0x140]; struct mlx5_ifc_wq_bits wq; }; struct mlx5_ifc_nic_vport_context_bits { u8 reserved_at_0[0x5]; u8 min_wqe_inline_mode[0x3]; u8 reserved_at_8[0x15]; u8 disable_mc_local_lb[0x1]; u8 disable_uc_local_lb[0x1]; u8 roce_en[0x1]; u8 arm_change_event[0x1]; u8 reserved_at_21[0x1a]; u8 event_on_mtu[0x1]; u8 event_on_promisc_change[0x1]; u8 event_on_vlan_change[0x1]; u8 event_on_mc_address_change[0x1]; u8 event_on_uc_address_change[0x1]; u8 reserved_at_40[0xc]; u8 affiliation_criteria[0x4]; u8 affiliated_vhca_id[0x10]; u8 reserved_at_60[0xd0]; u8 mtu[0x10]; u8 system_image_guid[0x40]; u8 port_guid[0x40]; u8 node_guid[0x40]; u8 reserved_at_200[0x140]; u8 qkey_violation_counter[0x10]; u8 reserved_at_350[0x430]; u8 promisc_uc[0x1]; u8 promisc_mc[0x1]; u8 promisc_all[0x1]; u8 reserved_at_783[0x2]; u8 allowed_list_type[0x3]; u8 reserved_at_788[0xc]; u8 allowed_list_size[0xc]; struct mlx5_ifc_mac_address_layout_bits permanent_address; u8 reserved_at_7e0[0x20]; u8 current_uc_mac_address[0][0x40]; }; enum { MLX5_MKC_ACCESS_MODE_PA = 0x0, MLX5_MKC_ACCESS_MODE_MTT = 0x1, MLX5_MKC_ACCESS_MODE_KLMS = 0x2, MLX5_MKC_ACCESS_MODE_KSM = 0x3, MLX5_MKC_ACCESS_MODE_SW_ICM = 0x4, MLX5_MKC_ACCESS_MODE_MEMIC = 0x5, }; struct mlx5_ifc_mkc_bits { u8 reserved_at_0[0x1]; u8 free[0x1]; u8 reserved_at_2[0x1]; u8 access_mode_4_2[0x3]; u8 reserved_at_6[0x7]; u8 relaxed_ordering_write[0x1]; u8 reserved_at_e[0x1]; u8 small_fence_on_rdma_read_response[0x1]; u8 umr_en[0x1]; u8 a[0x1]; u8 rw[0x1]; u8 rr[0x1]; u8 lw[0x1]; u8 lr[0x1]; u8 access_mode_1_0[0x2]; u8 reserved_at_18[0x2]; u8 ma_tranlation_mode[0x2]; u8 reserved_at_1c[0x4]; u8 qpn[0x18]; u8 mkey_7_0[0x8]; u8 reserved_at_40[0x20]; u8 length64[0x1]; u8 bsf_en[0x1]; u8 sync_umr[0x1]; u8 reserved_at_63[0x2]; u8 expected_sigerr_count[0x1]; u8 reserved_at_66[0x1]; u8 en_rinval[0x1]; u8 pd[0x18]; u8 start_addr[0x40]; u8 len[0x40]; u8 bsf_octword_size[0x20]; u8 reserved_at_120[0x80]; u8 translations_octword_size[0x20]; u8 reserved_at_1c0[0x1b]; u8 log_page_size[0x5]; u8 reserved_at_1e0[0x20]; }; struct mlx5_ifc_pkey_bits { u8 reserved_at_0[0x10]; u8 pkey[0x10]; }; struct mlx5_ifc_array128_auto_bits { u8 array128_auto[16][0x8]; }; struct mlx5_ifc_hca_vport_context_bits { u8 field_select[0x20]; u8 reserved_at_20[0xe0]; u8 sm_virt_aware[0x1]; u8 has_smi[0x1]; u8 has_raw[0x1]; u8 grh_required[0x1]; u8 reserved_at_104[0xc]; u8 port_physical_state[0x4]; u8 vport_state_policy[0x4]; u8 port_state[0x4]; u8 vport_state[0x4]; u8 reserved_at_120[0x20]; u8 system_image_guid[0x40]; u8 port_guid[0x40]; u8 node_guid[0x40]; u8 cap_mask1[0x20]; u8 cap_mask1_field_select[0x20]; u8 cap_mask2[0x20]; u8 cap_mask2_field_select[0x20]; u8 reserved_at_280[0x80]; u8 lid[0x10]; u8 reserved_at_310[0x4]; u8 init_type_reply[0x4]; u8 lmc[0x3]; u8 subnet_timeout[0x5]; u8 sm_lid[0x10]; u8 sm_sl[0x4]; u8 reserved_at_334[0xc]; u8 qkey_violation_counter[0x10]; u8 pkey_violation_counter[0x10]; u8 reserved_at_360[0xca0]; }; struct mlx5_ifc_esw_vport_context_bits { u8 fdb_to_vport_reg_c[0x1]; u8 reserved_at_1[0x2]; u8 vport_svlan_strip[0x1]; u8 vport_cvlan_strip[0x1]; u8 vport_svlan_insert[0x1]; u8 vport_cvlan_insert[0x2]; u8 fdb_to_vport_reg_c_id[0x8]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x20]; u8 svlan_cfi[0x1]; u8 svlan_pcp[0x3]; u8 svlan_id[0xc]; u8 cvlan_cfi[0x1]; u8 cvlan_pcp[0x3]; u8 cvlan_id[0xc]; u8 reserved_at_60[0x720]; u8 sw_steering_vport_icm_address_rx[0x40]; u8 sw_steering_vport_icm_address_tx[0x40]; }; enum { MLX5_EQC_STATUS_OK = 0x0, MLX5_EQC_STATUS_EQ_WRITE_FAILURE = 0xa, }; enum { MLX5_EQC_ST_ARMED = 0x9, MLX5_EQC_ST_FIRED = 0xa, }; struct mlx5_ifc_eqc_bits { u8 status[0x4]; u8 reserved_at_4[0x9]; u8 ec[0x1]; u8 oi[0x1]; u8 reserved_at_f[0x5]; u8 st[0x4]; u8 reserved_at_18[0x8]; u8 reserved_at_20[0x20]; u8 reserved_at_40[0x14]; u8 page_offset[0x6]; u8 reserved_at_5a[0x6]; u8 reserved_at_60[0x3]; u8 log_eq_size[0x5]; u8 uar_page[0x18]; u8 reserved_at_80[0x20]; u8 reserved_at_a0[0x18]; u8 intr[0x8]; u8 reserved_at_c0[0x3]; u8 log_page_size[0x5]; u8 reserved_at_c8[0x18]; u8 reserved_at_e0[0x60]; u8 reserved_at_140[0x8]; u8 consumer_counter[0x18]; u8 reserved_at_160[0x8]; u8 producer_counter[0x18]; u8 reserved_at_180[0x80]; }; enum { MLX5_DCTC_STATE_ACTIVE = 0x0, MLX5_DCTC_STATE_DRAINING = 0x1, MLX5_DCTC_STATE_DRAINED = 0x2, }; enum { MLX5_DCTC_CS_RES_DISABLE = 0x0, MLX5_DCTC_CS_RES_NA = 0x1, MLX5_DCTC_CS_RES_UP_TO_64B = 0x2, }; enum { MLX5_DCTC_MTU_256_BYTES = 0x1, MLX5_DCTC_MTU_512_BYTES = 0x2, MLX5_DCTC_MTU_1K_BYTES = 0x3, MLX5_DCTC_MTU_2K_BYTES = 0x4, MLX5_DCTC_MTU_4K_BYTES = 0x5, }; struct mlx5_ifc_dctc_bits { u8 reserved_at_0[0x4]; u8 state[0x4]; u8 reserved_at_8[0x18]; u8 reserved_at_20[0x8]; u8 user_index[0x18]; u8 reserved_at_40[0x8]; u8 cqn[0x18]; u8 counter_set_id[0x8]; u8 atomic_mode[0x4]; u8 rre[0x1]; u8 rwe[0x1]; u8 rae[0x1]; u8 atomic_like_write_en[0x1]; u8 latency_sensitive[0x1]; u8 rlky[0x1]; u8 free_ar[0x1]; u8 reserved_at_73[0xd]; u8 reserved_at_80[0x8]; u8 cs_res[0x8]; u8 reserved_at_90[0x3]; u8 min_rnr_nak[0x5]; u8 reserved_at_98[0x8]; u8 reserved_at_a0[0x8]; u8 srqn_xrqn[0x18]; u8 reserved_at_c0[0x8]; u8 pd[0x18]; u8 tclass[0x8]; u8 reserved_at_e8[0x4]; u8 flow_label[0x14]; u8 dc_access_key[0x40]; u8 reserved_at_140[0x5]; u8 mtu[0x3]; u8 port[0x8]; u8 pkey_index[0x10]; u8 reserved_at_160[0x8]; u8 my_addr_index[0x8]; u8 reserved_at_170[0x8]; u8 hop_limit[0x8]; u8 dc_access_key_violation_count[0x20]; u8 reserved_at_1a0[0x14]; u8 dei_cfi[0x1]; u8 eth_prio[0x3]; u8 ecn[0x2]; u8 dscp[0x6]; u8 reserved_at_1c0[0x40]; }; enum { MLX5_CQC_STATUS_OK = 0x0, MLX5_CQC_STATUS_CQ_OVERFLOW = 0x9, MLX5_CQC_STATUS_CQ_WRITE_FAIL = 0xa, }; enum { MLX5_CQC_CQE_SZ_64_BYTES = 0x0, MLX5_CQC_CQE_SZ_128_BYTES = 0x1, }; enum { MLX5_CQC_ST_SOLICITED_NOTIFICATION_REQUEST_ARMED = 0x6, MLX5_CQC_ST_NOTIFICATION_REQUEST_ARMED = 0x9, MLX5_CQC_ST_FIRED = 0xa, }; enum { MLX5_CQ_PERIOD_MODE_START_FROM_EQE = 0x0, MLX5_CQ_PERIOD_MODE_START_FROM_CQE = 0x1, MLX5_CQ_PERIOD_NUM_MODES }; struct mlx5_ifc_cqc_bits { u8 status[0x4]; u8 reserved_at_4[0x2]; u8 dbr_umem_valid[0x1]; u8 reserved_at_7[0x1]; u8 cqe_sz[0x3]; u8 cc[0x1]; u8 reserved_at_c[0x1]; u8 scqe_break_moderation_en[0x1]; u8 oi[0x1]; u8 cq_period_mode[0x2]; u8 cqe_comp_en[0x1]; u8 mini_cqe_res_format[0x2]; u8 st[0x4]; u8 reserved_at_18[0x8]; u8 reserved_at_20[0x20]; u8 reserved_at_40[0x14]; u8 page_offset[0x6]; u8 reserved_at_5a[0x6]; u8 reserved_at_60[0x3]; u8 log_cq_size[0x5]; u8 uar_page[0x18]; u8 reserved_at_80[0x4]; u8 cq_period[0xc]; u8 cq_max_count[0x10]; u8 reserved_at_a0[0x18]; u8 c_eqn[0x8]; u8 reserved_at_c0[0x3]; u8 log_page_size[0x5]; u8 reserved_at_c8[0x18]; u8 reserved_at_e0[0x20]; u8 reserved_at_100[0x8]; u8 last_notified_index[0x18]; u8 reserved_at_120[0x8]; u8 last_solicit_index[0x18]; u8 reserved_at_140[0x8]; u8 consumer_counter[0x18]; u8 reserved_at_160[0x8]; u8 producer_counter[0x18]; u8 reserved_at_180[0x40]; u8 dbr_addr[0x40]; }; union mlx5_ifc_cong_control_roce_ecn_auto_bits { struct mlx5_ifc_cong_control_802_1qau_rp_bits cong_control_802_1qau_rp; struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits cong_control_r_roce_ecn_rp; struct mlx5_ifc_cong_control_r_roce_ecn_np_bits cong_control_r_roce_ecn_np; u8 reserved_at_0[0x800]; }; struct mlx5_ifc_query_adapter_param_block_bits { u8 reserved_at_0[0xc0]; u8 reserved_at_c0[0x8]; u8 ieee_vendor_id[0x18]; u8 reserved_at_e0[0x10]; u8 vsd_vendor_id[0x10]; u8 vsd[208][0x8]; u8 vsd_contd_psid[16][0x8]; }; enum { MLX5_XRQC_STATE_GOOD = 0x0, MLX5_XRQC_STATE_ERROR = 0x1, }; enum { MLX5_XRQC_TOPOLOGY_NO_SPECIAL_TOPOLOGY = 0x0, MLX5_XRQC_TOPOLOGY_TAG_MATCHING = 0x1, }; enum { MLX5_XRQC_OFFLOAD_RNDV = 0x1, }; struct mlx5_ifc_tag_matching_topology_context_bits { u8 log_matching_list_sz[0x4]; u8 reserved_at_4[0xc]; u8 append_next_index[0x10]; u8 sw_phase_cnt[0x10]; u8 hw_phase_cnt[0x10]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_xrqc_bits { u8 state[0x4]; u8 rlkey[0x1]; u8 reserved_at_5[0xf]; u8 topology[0x4]; u8 reserved_at_18[0x4]; u8 offload[0x4]; u8 reserved_at_20[0x8]; u8 user_index[0x18]; u8 reserved_at_40[0x8]; u8 cqn[0x18]; u8 reserved_at_60[0xa0]; struct mlx5_ifc_tag_matching_topology_context_bits tag_matching_topology_context; u8 reserved_at_180[0x280]; struct mlx5_ifc_wq_bits wq; }; union mlx5_ifc_modify_field_select_resize_field_select_auto_bits { struct mlx5_ifc_modify_field_select_bits modify_field_select; struct mlx5_ifc_resize_field_select_bits resize_field_select; u8 reserved_at_0[0x20]; }; union mlx5_ifc_field_select_802_1_r_roce_auto_bits { struct mlx5_ifc_field_select_802_1qau_rp_bits field_select_802_1qau_rp; struct mlx5_ifc_field_select_r_roce_rp_bits field_select_r_roce_rp; struct mlx5_ifc_field_select_r_roce_np_bits field_select_r_roce_np; u8 reserved_at_0[0x20]; }; union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits { struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout; struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout; struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout; struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout; struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout; struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout; struct mlx5_ifc_eth_per_tc_prio_grp_data_layout_bits eth_per_tc_prio_grp_data_layout; struct mlx5_ifc_eth_per_tc_congest_prio_grp_data_layout_bits eth_per_tc_congest_prio_grp_data_layout; struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits ib_port_cntrs_grp_data_layout; struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs; struct mlx5_ifc_phys_layer_statistical_cntrs_bits phys_layer_statistical_cntrs; u8 reserved_at_0[0x7c0]; }; union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits { struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits pcie_perf_cntrs_grp_data_layout; u8 reserved_at_0[0x7c0]; }; union mlx5_ifc_event_auto_bits { struct mlx5_ifc_comp_event_bits comp_event; struct mlx5_ifc_dct_events_bits dct_events; struct mlx5_ifc_qp_events_bits qp_events; struct mlx5_ifc_wqe_associated_page_fault_event_bits wqe_associated_page_fault_event; struct mlx5_ifc_rdma_page_fault_event_bits rdma_page_fault_event; struct mlx5_ifc_cq_error_bits cq_error; struct mlx5_ifc_dropped_packet_logged_bits dropped_packet_logged; struct mlx5_ifc_port_state_change_event_bits port_state_change_event; struct mlx5_ifc_gpio_event_bits gpio_event; struct mlx5_ifc_db_bf_congestion_event_bits db_bf_congestion_event; struct mlx5_ifc_stall_vl_event_bits stall_vl_event; struct mlx5_ifc_cmd_inter_comp_event_bits cmd_inter_comp_event; u8 reserved_at_0[0xe0]; }; struct mlx5_ifc_health_buffer_bits { u8 reserved_at_0[0x100]; u8 assert_existptr[0x20]; u8 assert_callra[0x20]; u8 reserved_at_140[0x40]; u8 fw_version[0x20]; u8 hw_id[0x20]; u8 reserved_at_1c0[0x20]; u8 irisc_index[0x8]; u8 synd[0x8]; u8 ext_synd[0x10]; }; struct mlx5_ifc_register_loopback_control_bits { u8 no_lb[0x1]; u8 reserved_at_1[0x7]; u8 port[0x8]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x60]; }; struct mlx5_ifc_vport_tc_element_bits { u8 traffic_class[0x4]; u8 reserved_at_4[0xc]; u8 vport_number[0x10]; }; struct mlx5_ifc_vport_element_bits { u8 reserved_at_0[0x10]; u8 vport_number[0x10]; }; enum { TSAR_ELEMENT_TSAR_TYPE_DWRR = 0x0, TSAR_ELEMENT_TSAR_TYPE_ROUND_ROBIN = 0x1, TSAR_ELEMENT_TSAR_TYPE_ETS = 0x2, }; struct mlx5_ifc_tsar_element_bits { u8 reserved_at_0[0x8]; u8 tsar_type[0x8]; u8 reserved_at_10[0x10]; }; enum { MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_SUCCESS = 0x0, MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL = 0x1, }; struct mlx5_ifc_teardown_hca_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x3f]; u8 state[0x1]; }; enum { MLX5_TEARDOWN_HCA_IN_PROFILE_GRACEFUL_CLOSE = 0x0, MLX5_TEARDOWN_HCA_IN_PROFILE_FORCE_CLOSE = 0x1, MLX5_TEARDOWN_HCA_IN_PROFILE_PREPARE_FAST_TEARDOWN = 0x2, }; struct mlx5_ifc_teardown_hca_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x10]; u8 profile[0x10]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_sqerr2rts_qp_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_sqerr2rts_qp_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 qpn[0x18]; u8 reserved_at_60[0x20]; u8 opt_param_mask[0x20]; u8 reserved_at_a0[0x20]; struct mlx5_ifc_qpc_bits qpc; u8 reserved_at_800[0x80]; }; struct mlx5_ifc_sqd2rts_qp_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_sqd2rts_qp_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 qpn[0x18]; u8 reserved_at_60[0x20]; u8 opt_param_mask[0x20]; u8 reserved_at_a0[0x20]; struct mlx5_ifc_qpc_bits qpc; u8 reserved_at_800[0x80]; }; struct mlx5_ifc_set_roce_address_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_set_roce_address_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 roce_address_index[0x10]; u8 reserved_at_50[0xc]; u8 vhca_port_num[0x4]; u8 reserved_at_60[0x20]; struct mlx5_ifc_roce_addr_layout_bits roce_address; }; struct mlx5_ifc_set_mad_demux_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; enum { MLX5_SET_MAD_DEMUX_IN_DEMUX_MODE_PASS_ALL = 0x0, MLX5_SET_MAD_DEMUX_IN_DEMUX_MODE_SELECTIVE = 0x2, }; struct mlx5_ifc_set_mad_demux_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x20]; u8 reserved_at_60[0x6]; u8 demux_mode[0x2]; u8 reserved_at_68[0x18]; }; struct mlx5_ifc_set_l2_table_entry_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_set_l2_table_entry_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x60]; u8 reserved_at_a0[0x8]; u8 table_index[0x18]; u8 reserved_at_c0[0x20]; u8 reserved_at_e0[0x13]; u8 vlan_valid[0x1]; u8 vlan[0xc]; struct mlx5_ifc_mac_address_layout_bits mac_address; u8 reserved_at_140[0xc0]; }; struct mlx5_ifc_set_issi_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_set_issi_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x10]; u8 current_issi[0x10]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_set_hca_cap_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_set_hca_cap_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; union mlx5_ifc_hca_cap_union_bits capability; }; enum { MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION = 0x0, MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_TAG = 0x1, MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST = 0x2, MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS = 0x3 }; struct mlx5_ifc_set_fte_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_set_fte_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_at_41[0xf]; u8 vport_number[0x10]; u8 reserved_at_60[0x20]; u8 table_type[0x8]; u8 reserved_at_88[0x18]; u8 reserved_at_a0[0x8]; u8 table_id[0x18]; u8 reserved_at_c0[0x18]; u8 modify_enable_mask[0x8]; u8 reserved_at_e0[0x20]; u8 flow_index[0x20]; u8 reserved_at_120[0xe0]; struct mlx5_ifc_flow_context_bits flow_context; }; struct mlx5_ifc_rts2rts_qp_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_rts2rts_qp_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 qpn[0x18]; u8 reserved_at_60[0x20]; u8 opt_param_mask[0x20]; u8 reserved_at_a0[0x20]; struct mlx5_ifc_qpc_bits qpc; u8 reserved_at_800[0x80]; }; struct mlx5_ifc_rtr2rts_qp_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_rtr2rts_qp_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 qpn[0x18]; u8 reserved_at_60[0x20]; u8 opt_param_mask[0x20]; u8 reserved_at_a0[0x20]; struct mlx5_ifc_qpc_bits qpc; u8 reserved_at_800[0x80]; }; struct mlx5_ifc_rst2init_qp_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_rst2init_qp_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 qpn[0x18]; u8 reserved_at_60[0x20]; u8 opt_param_mask[0x20]; u8 reserved_at_a0[0x20]; struct mlx5_ifc_qpc_bits qpc; u8 reserved_at_800[0x80]; }; struct mlx5_ifc_query_xrq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; struct mlx5_ifc_xrqc_bits xrq_context; }; struct mlx5_ifc_query_xrq_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 xrqn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_xrc_srq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry; u8 reserved_at_280[0x600]; u8 pas[0][0x40]; }; struct mlx5_ifc_query_xrc_srq_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 xrc_srqn[0x18]; u8 reserved_at_60[0x20]; }; enum { MLX5_QUERY_VPORT_STATE_OUT_STATE_DOWN = 0x0, MLX5_QUERY_VPORT_STATE_OUT_STATE_UP = 0x1, }; struct mlx5_ifc_query_vport_state_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x20]; u8 reserved_at_60[0x18]; u8 admin_state[0x4]; u8 state[0x4]; }; enum { MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT = 0x0, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT = 0x1, MLX5_VPORT_STATE_OP_MOD_UPLINK = 0x2, }; struct mlx5_ifc_arm_monitor_counter_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x20]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_arm_monitor_counter_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; enum { MLX5_QUERY_MONITOR_CNT_TYPE_PPCNT = 0x0, MLX5_QUERY_MONITOR_CNT_TYPE_Q_COUNTER = 0x1, }; enum mlx5_monitor_counter_ppcnt { MLX5_QUERY_MONITOR_PPCNT_IN_RANGE_LENGTH_ERRORS = 0x0, MLX5_QUERY_MONITOR_PPCNT_OUT_OF_RANGE_LENGTH_FIELD = 0x1, MLX5_QUERY_MONITOR_PPCNT_FRAME_TOO_LONG_ERRORS = 0x2, MLX5_QUERY_MONITOR_PPCNT_FRAME_CHECK_SEQUENCE_ERRORS = 0x3, MLX5_QUERY_MONITOR_PPCNT_ALIGNMENT_ERRORS = 0x4, MLX5_QUERY_MONITOR_PPCNT_IF_OUT_DISCARDS = 0x5, }; enum { MLX5_QUERY_MONITOR_Q_COUNTER_RX_OUT_OF_BUFFER = 0x4, }; struct mlx5_ifc_monitor_counter_output_bits { u8 reserved_at_0[0x4]; u8 type[0x4]; u8 reserved_at_8[0x8]; u8 counter[0x10]; u8 counter_group_id[0x20]; }; #define MLX5_CMD_SET_MONITOR_NUM_PPCNT_COUNTER_SET1 (6) #define MLX5_CMD_SET_MONITOR_NUM_Q_COUNTERS_SET1 (1) #define MLX5_CMD_SET_MONITOR_NUM_COUNTER (MLX5_CMD_SET_MONITOR_NUM_PPCNT_COUNTER_SET1 +\ MLX5_CMD_SET_MONITOR_NUM_Q_COUNTERS_SET1) struct mlx5_ifc_set_monitor_counter_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x10]; u8 num_of_counters[0x10]; u8 reserved_at_60[0x20]; struct mlx5_ifc_monitor_counter_output_bits monitor_counter[MLX5_CMD_SET_MONITOR_NUM_COUNTER]; }; struct mlx5_ifc_set_monitor_counter_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_query_vport_state_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_at_41[0xf]; u8 vport_number[0x10]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_vnic_env_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; struct mlx5_ifc_vnic_diagnostic_statistics_bits vport_env; }; enum { MLX5_QUERY_VNIC_ENV_IN_OP_MOD_VPORT_DIAG_STATISTICS = 0x0, }; struct mlx5_ifc_query_vnic_env_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_at_41[0xf]; u8 vport_number[0x10]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_vport_counter_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; struct mlx5_ifc_traffic_counter_bits received_errors; struct mlx5_ifc_traffic_counter_bits transmit_errors; struct mlx5_ifc_traffic_counter_bits received_ib_unicast; struct mlx5_ifc_traffic_counter_bits transmitted_ib_unicast; struct mlx5_ifc_traffic_counter_bits received_ib_multicast; struct mlx5_ifc_traffic_counter_bits transmitted_ib_multicast; struct mlx5_ifc_traffic_counter_bits received_eth_broadcast; struct mlx5_ifc_traffic_counter_bits transmitted_eth_broadcast; struct mlx5_ifc_traffic_counter_bits received_eth_unicast; struct mlx5_ifc_traffic_counter_bits transmitted_eth_unicast; struct mlx5_ifc_traffic_counter_bits received_eth_multicast; struct mlx5_ifc_traffic_counter_bits transmitted_eth_multicast; u8 reserved_at_680[0xa00]; }; enum { MLX5_QUERY_VPORT_COUNTER_IN_OP_MOD_VPORT_COUNTERS = 0x0, }; struct mlx5_ifc_query_vport_counter_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_at_41[0xb]; u8 port_num[0x4]; u8 vport_number[0x10]; u8 reserved_at_60[0x60]; u8 clear[0x1]; u8 reserved_at_c1[0x1f]; u8 reserved_at_e0[0x20]; }; struct mlx5_ifc_query_tis_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; struct mlx5_ifc_tisc_bits tis_context; }; struct mlx5_ifc_query_tis_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 tisn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_tir_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0xc0]; struct mlx5_ifc_tirc_bits tir_context; }; struct mlx5_ifc_query_tir_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 tirn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_srq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; struct mlx5_ifc_srqc_bits srq_context_entry; u8 reserved_at_280[0x600]; u8 pas[0][0x40]; }; struct mlx5_ifc_query_srq_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 srqn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_sq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0xc0]; struct mlx5_ifc_sqc_bits sq_context; }; struct mlx5_ifc_query_sq_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 sqn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_special_contexts_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 dump_fill_mkey[0x20]; u8 resd_lkey[0x20]; u8 null_mkey[0x20]; u8 reserved_at_a0[0x60]; }; struct mlx5_ifc_query_special_contexts_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_query_scheduling_element_out_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0xc0]; struct mlx5_ifc_scheduling_context_bits scheduling_context; u8 reserved_at_300[0x100]; }; enum { SCHEDULING_HIERARCHY_E_SWITCH = 0x2, }; struct mlx5_ifc_query_scheduling_element_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 scheduling_hierarchy[0x8]; u8 reserved_at_48[0x18]; u8 scheduling_element_id[0x20]; u8 reserved_at_80[0x180]; }; struct mlx5_ifc_query_rqt_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0xc0]; struct mlx5_ifc_rqtc_bits rqt_context; }; struct mlx5_ifc_query_rqt_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 rqtn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_rq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0xc0]; struct mlx5_ifc_rqc_bits rq_context; }; struct mlx5_ifc_query_rq_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 rqn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_roce_address_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; struct mlx5_ifc_roce_addr_layout_bits roce_address; }; struct mlx5_ifc_query_roce_address_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 roce_address_index[0x10]; u8 reserved_at_50[0xc]; u8 vhca_port_num[0x4]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_rmp_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0xc0]; struct mlx5_ifc_rmpc_bits rmp_context; }; struct mlx5_ifc_query_rmp_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 rmpn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_qp_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; u8 opt_param_mask[0x20]; u8 reserved_at_a0[0x20]; struct mlx5_ifc_qpc_bits qpc; u8 reserved_at_800[0x80]; u8 pas[0][0x40]; }; struct mlx5_ifc_query_qp_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 qpn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_q_counter_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; u8 rx_write_requests[0x20]; u8 reserved_at_a0[0x20]; u8 rx_read_requests[0x20]; u8 reserved_at_e0[0x20]; u8 rx_atomic_requests[0x20]; u8 reserved_at_120[0x20]; u8 rx_dct_connect[0x20]; u8 reserved_at_160[0x20]; u8 out_of_buffer[0x20]; u8 reserved_at_1a0[0x20]; u8 out_of_sequence[0x20]; u8 reserved_at_1e0[0x20]; u8 duplicate_request[0x20]; u8 reserved_at_220[0x20]; u8 rnr_nak_retry_err[0x20]; u8 reserved_at_260[0x20]; u8 packet_seq_err[0x20]; u8 reserved_at_2a0[0x20]; u8 implied_nak_seq_err[0x20]; u8 reserved_at_2e0[0x20]; u8 local_ack_timeout_err[0x20]; u8 reserved_at_320[0xa0]; u8 resp_local_length_error[0x20]; u8 req_local_length_error[0x20]; u8 resp_local_qp_error[0x20]; u8 local_operation_error[0x20]; u8 resp_local_protection[0x20]; u8 req_local_protection[0x20]; u8 resp_cqe_error[0x20]; u8 req_cqe_error[0x20]; u8 req_mw_binding[0x20]; u8 req_bad_response[0x20]; u8 req_remote_invalid_request[0x20]; u8 resp_remote_invalid_request[0x20]; u8 req_remote_access_errors[0x20]; u8 resp_remote_access_errors[0x20]; u8 req_remote_operation_errors[0x20]; u8 req_transport_retries_exceeded[0x20]; u8 cq_overflow[0x20]; u8 resp_cqe_flush_error[0x20]; u8 req_cqe_flush_error[0x20]; u8 reserved_at_620[0x1e0]; }; struct mlx5_ifc_query_q_counter_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x80]; u8 clear[0x1]; u8 reserved_at_c1[0x1f]; u8 reserved_at_e0[0x18]; u8 counter_set_id[0x8]; }; struct mlx5_ifc_query_pages_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 embedded_cpu_function[0x1]; u8 reserved_at_41[0xf]; u8 function_id[0x10]; u8 num_pages[0x20]; }; enum { MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES = 0x1, MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES = 0x2, MLX5_QUERY_PAGES_IN_OP_MOD_REGULAR_PAGES = 0x3, }; struct mlx5_ifc_query_pages_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 embedded_cpu_function[0x1]; u8 reserved_at_41[0xf]; u8 function_id[0x10]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_nic_vport_context_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; struct mlx5_ifc_nic_vport_context_bits nic_vport_context; }; struct mlx5_ifc_query_nic_vport_context_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_at_41[0xf]; u8 vport_number[0x10]; u8 reserved_at_60[0x5]; u8 allowed_list_type[0x3]; u8 reserved_at_68[0x18]; }; struct mlx5_ifc_query_mkey_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; struct mlx5_ifc_mkc_bits memory_key_mkey_entry; u8 reserved_at_280[0x600]; u8 bsf0_klm0_pas_mtt0_1[16][0x8]; u8 bsf1_klm1_pas_mtt2_3[16][0x8]; }; struct mlx5_ifc_query_mkey_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 mkey_index[0x18]; u8 pg_access[0x1]; u8 reserved_at_61[0x1f]; }; struct mlx5_ifc_query_mad_demux_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; u8 mad_dumux_parameters_block[0x20]; }; struct mlx5_ifc_query_mad_demux_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_query_l2_table_entry_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0xa0]; u8 reserved_at_e0[0x13]; u8 vlan_valid[0x1]; u8 vlan[0xc]; struct mlx5_ifc_mac_address_layout_bits mac_address; u8 reserved_at_140[0xc0]; }; struct mlx5_ifc_query_l2_table_entry_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x60]; u8 reserved_at_a0[0x8]; u8 table_index[0x18]; u8 reserved_at_c0[0x140]; }; struct mlx5_ifc_query_issi_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x10]; u8 current_issi[0x10]; u8 reserved_at_60[0xa0]; u8 reserved_at_100[76][0x8]; u8 supported_issi_dw0[0x20]; }; struct mlx5_ifc_query_issi_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_set_driver_version_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; u8 syndrome[0x20]; u8 reserved_1[0x40]; }; struct mlx5_ifc_set_driver_version_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; u8 reserved_1[0x10]; u8 op_mod[0x10]; u8 reserved_2[0x40]; u8 driver_version[64][0x8]; }; struct mlx5_ifc_query_hca_vport_pkey_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; struct mlx5_ifc_pkey_bits pkey[0]; }; struct mlx5_ifc_query_hca_vport_pkey_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_at_41[0xb]; u8 port_num[0x4]; u8 vport_number[0x10]; u8 reserved_at_60[0x10]; u8 pkey_index[0x10]; }; enum { MLX5_HCA_VPORT_SEL_PORT_GUID = 1 << 0, MLX5_HCA_VPORT_SEL_NODE_GUID = 1 << 1, MLX5_HCA_VPORT_SEL_STATE_POLICY = 1 << 2, }; struct mlx5_ifc_query_hca_vport_gid_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x20]; u8 gids_num[0x10]; u8 reserved_at_70[0x10]; struct mlx5_ifc_array128_auto_bits gid[0]; }; struct mlx5_ifc_query_hca_vport_gid_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_at_41[0xb]; u8 port_num[0x4]; u8 vport_number[0x10]; u8 reserved_at_60[0x10]; u8 gid_index[0x10]; }; struct mlx5_ifc_query_hca_vport_context_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; struct mlx5_ifc_hca_vport_context_bits hca_vport_context; }; struct mlx5_ifc_query_hca_vport_context_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_at_41[0xb]; u8 port_num[0x4]; u8 vport_number[0x10]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_hca_cap_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; union mlx5_ifc_hca_cap_union_bits capability; }; struct mlx5_ifc_query_hca_cap_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 other_function[0x1]; u8 reserved_at_41[0xf]; u8 function_id[0x10]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_other_hca_cap_bits { u8 roce[0x1]; u8 reserved_at_1[0x27f]; }; struct mlx5_ifc_query_other_hca_cap_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; struct mlx5_ifc_other_hca_cap_bits other_capability; }; struct mlx5_ifc_query_other_hca_cap_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x10]; u8 function_id[0x10]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_modify_other_hca_cap_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_modify_other_hca_cap_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x10]; u8 function_id[0x10]; u8 field_select[0x20]; struct mlx5_ifc_other_hca_cap_bits other_capability; }; struct mlx5_ifc_flow_table_context_bits { u8 reformat_en[0x1]; u8 decap_en[0x1]; u8 sw_owner[0x1]; u8 termination_table[0x1]; u8 table_miss_action[0x4]; u8 level[0x8]; u8 reserved_at_10[0x8]; u8 log_size[0x8]; u8 reserved_at_20[0x8]; u8 table_miss_id[0x18]; u8 reserved_at_40[0x8]; u8 lag_master_next_table_id[0x18]; u8 reserved_at_60[0x60]; u8 sw_owner_icm_root_1[0x40]; u8 sw_owner_icm_root_0[0x40]; }; struct mlx5_ifc_query_flow_table_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x80]; struct mlx5_ifc_flow_table_context_bits flow_table_context; }; struct mlx5_ifc_query_flow_table_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; u8 table_type[0x8]; u8 reserved_at_88[0x18]; u8 reserved_at_a0[0x8]; u8 table_id[0x18]; u8 reserved_at_c0[0x140]; }; struct mlx5_ifc_query_fte_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x1c0]; struct mlx5_ifc_flow_context_bits flow_context; }; struct mlx5_ifc_query_fte_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; u8 table_type[0x8]; u8 reserved_at_88[0x18]; u8 reserved_at_a0[0x8]; u8 table_id[0x18]; u8 reserved_at_c0[0x40]; u8 flow_index[0x20]; u8 reserved_at_120[0xe0]; }; enum { MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0, MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1, MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2, MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0x3, MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_3 = 0x4, }; struct mlx5_ifc_query_flow_group_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0xa0]; u8 start_flow_index[0x20]; u8 reserved_at_100[0x20]; u8 end_flow_index[0x20]; u8 reserved_at_140[0xa0]; u8 reserved_at_1e0[0x18]; u8 match_criteria_enable[0x8]; struct mlx5_ifc_fte_match_param_bits match_criteria; u8 reserved_at_1200[0xe00]; }; struct mlx5_ifc_query_flow_group_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; u8 table_type[0x8]; u8 reserved_at_88[0x18]; u8 reserved_at_a0[0x8]; u8 table_id[0x18]; u8 group_id[0x20]; u8 reserved_at_e0[0x120]; }; struct mlx5_ifc_query_flow_counter_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; struct mlx5_ifc_traffic_counter_bits flow_statistics[0]; }; struct mlx5_ifc_query_flow_counter_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x80]; u8 clear[0x1]; u8 reserved_at_c1[0xf]; u8 num_of_counters[0x10]; u8 flow_counter_id[0x20]; }; struct mlx5_ifc_query_esw_vport_context_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; struct mlx5_ifc_esw_vport_context_bits esw_vport_context; }; struct mlx5_ifc_query_esw_vport_context_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_at_41[0xf]; u8 vport_number[0x10]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_modify_esw_vport_context_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_esw_vport_context_fields_select_bits { u8 reserved_at_0[0x1b]; u8 fdb_to_vport_reg_c_id[0x1]; u8 vport_cvlan_insert[0x1]; u8 vport_svlan_insert[0x1]; u8 vport_cvlan_strip[0x1]; u8 vport_svlan_strip[0x1]; }; struct mlx5_ifc_modify_esw_vport_context_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_at_41[0xf]; u8 vport_number[0x10]; struct mlx5_ifc_esw_vport_context_fields_select_bits field_select; struct mlx5_ifc_esw_vport_context_bits esw_vport_context; }; struct mlx5_ifc_query_eq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; struct mlx5_ifc_eqc_bits eq_context_entry; u8 reserved_at_280[0x40]; u8 event_bitmask[0x40]; u8 reserved_at_300[0x580]; u8 pas[0][0x40]; }; struct mlx5_ifc_query_eq_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x18]; u8 eq_number[0x8]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_packet_reformat_context_in_bits { u8 reserved_at_0[0x5]; u8 reformat_type[0x3]; u8 reserved_at_8[0xe]; u8 reformat_data_size[0xa]; u8 reserved_at_20[0x10]; u8 reformat_data[2][0x8]; u8 more_reformat_data[0][0x8]; }; struct mlx5_ifc_query_packet_reformat_context_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0xa0]; struct mlx5_ifc_packet_reformat_context_in_bits packet_reformat_context[0]; }; struct mlx5_ifc_query_packet_reformat_context_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 packet_reformat_id[0x20]; u8 reserved_at_60[0xa0]; }; struct mlx5_ifc_alloc_packet_reformat_context_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 packet_reformat_id[0x20]; u8 reserved_at_60[0x20]; }; enum mlx5_reformat_ctx_type { MLX5_REFORMAT_TYPE_L2_TO_VXLAN = 0x0, MLX5_REFORMAT_TYPE_L2_TO_NVGRE = 0x1, MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x2, MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2 = 0x3, MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x4, }; struct mlx5_ifc_alloc_packet_reformat_context_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0xa0]; struct mlx5_ifc_packet_reformat_context_in_bits packet_reformat_context; }; struct mlx5_ifc_dealloc_packet_reformat_context_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_dealloc_packet_reformat_context_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_20[0x10]; u8 op_mod[0x10]; u8 packet_reformat_id[0x20]; u8 reserved_60[0x20]; }; struct mlx5_ifc_set_action_in_bits { u8 action_type[0x4]; u8 field[0xc]; u8 reserved_at_10[0x3]; u8 offset[0x5]; u8 reserved_at_18[0x3]; u8 length[0x5]; u8 data[0x20]; }; struct mlx5_ifc_add_action_in_bits { u8 action_type[0x4]; u8 field[0xc]; u8 reserved_at_10[0x10]; u8 data[0x20]; }; union mlx5_ifc_set_action_in_add_action_in_auto_bits { struct mlx5_ifc_set_action_in_bits set_action_in; struct mlx5_ifc_add_action_in_bits add_action_in; u8 reserved_at_0[0x40]; }; enum { MLX5_ACTION_TYPE_SET = 0x1, MLX5_ACTION_TYPE_ADD = 0x2, }; enum { MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16 = 0x1, MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0 = 0x2, MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE = 0x3, MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16 = 0x4, MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0 = 0x5, MLX5_ACTION_IN_FIELD_OUT_IP_DSCP = 0x6, MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS = 0x7, MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT = 0x8, MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT = 0x9, MLX5_ACTION_IN_FIELD_OUT_IP_TTL = 0xa, MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT = 0xb, MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT = 0xc, MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96 = 0xd, MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64 = 0xe, MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32 = 0xf, MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0 = 0x10, MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96 = 0x11, MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64 = 0x12, MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32 = 0x13, MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0 = 0x14, MLX5_ACTION_IN_FIELD_OUT_SIPV4 = 0x15, MLX5_ACTION_IN_FIELD_OUT_DIPV4 = 0x16, MLX5_ACTION_IN_FIELD_OUT_FIRST_VID = 0x17, MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT = 0x47, MLX5_ACTION_IN_FIELD_METADATA_REG_A = 0x49, MLX5_ACTION_IN_FIELD_METADATA_REG_B = 0x50, MLX5_ACTION_IN_FIELD_METADATA_REG_C_0 = 0x51, MLX5_ACTION_IN_FIELD_METADATA_REG_C_1 = 0x52, MLX5_ACTION_IN_FIELD_METADATA_REG_C_2 = 0x53, MLX5_ACTION_IN_FIELD_METADATA_REG_C_3 = 0x54, MLX5_ACTION_IN_FIELD_METADATA_REG_C_4 = 0x55, MLX5_ACTION_IN_FIELD_METADATA_REG_C_5 = 0x56, MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM = 0x59, MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM = 0x5B, }; struct mlx5_ifc_alloc_modify_header_context_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 modify_header_id[0x20]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_alloc_modify_header_context_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x20]; u8 table_type[0x8]; u8 reserved_at_68[0x10]; u8 num_of_actions[0x8]; union mlx5_ifc_set_action_in_add_action_in_auto_bits actions[0]; }; struct mlx5_ifc_dealloc_modify_header_context_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_dealloc_modify_header_context_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 modify_header_id[0x20]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_dct_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; struct mlx5_ifc_dctc_bits dct_context_entry; u8 reserved_at_280[0x180]; }; struct mlx5_ifc_query_dct_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 dctn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_cq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; struct mlx5_ifc_cqc_bits cq_context; u8 reserved_at_280[0x600]; u8 pas[0][0x40]; }; struct mlx5_ifc_query_cq_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 cqn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_cong_status_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x20]; u8 enable[0x1]; u8 tag_enable[0x1]; u8 reserved_at_62[0x1e]; }; struct mlx5_ifc_query_cong_status_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x18]; u8 priority[0x4]; u8 cong_protocol[0x4]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_cong_statistics_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; u8 rp_cur_flows[0x20]; u8 sum_flows[0x20]; u8 rp_cnp_ignored_high[0x20]; u8 rp_cnp_ignored_low[0x20]; u8 rp_cnp_handled_high[0x20]; u8 rp_cnp_handled_low[0x20]; u8 reserved_at_140[0x100]; u8 time_stamp_high[0x20]; u8 time_stamp_low[0x20]; u8 accumulators_period[0x20]; u8 np_ecn_marked_roce_packets_high[0x20]; u8 np_ecn_marked_roce_packets_low[0x20]; u8 np_cnp_sent_high[0x20]; u8 np_cnp_sent_low[0x20]; u8 reserved_at_320[0x560]; }; struct mlx5_ifc_query_cong_statistics_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 clear[0x1]; u8 reserved_at_41[0x1f]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_cong_params_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters; }; struct mlx5_ifc_query_cong_params_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x1c]; u8 cong_protocol[0x4]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_adapter_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; struct mlx5_ifc_query_adapter_param_block_bits query_adapter_struct; }; struct mlx5_ifc_query_adapter_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_qp_2rst_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_qp_2rst_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 qpn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_qp_2err_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_qp_2err_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 qpn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_page_fault_resume_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_page_fault_resume_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 error[0x1]; u8 reserved_at_41[0x4]; u8 page_fault_type[0x3]; u8 wq_number[0x18]; u8 reserved_at_60[0x8]; u8 token[0x18]; }; struct mlx5_ifc_nop_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_nop_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_modify_vport_state_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_modify_vport_state_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_at_41[0xf]; u8 vport_number[0x10]; u8 reserved_at_60[0x18]; u8 admin_state[0x4]; u8 reserved_at_7c[0x4]; }; struct mlx5_ifc_modify_tis_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_modify_tis_bitmask_bits { u8 reserved_at_0[0x20]; u8 reserved_at_20[0x1d]; u8 lag_tx_port_affinity[0x1]; u8 strict_lag_tx_port_affinity[0x1]; u8 prio[0x1]; }; struct mlx5_ifc_modify_tis_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 tisn[0x18]; u8 reserved_at_60[0x20]; struct mlx5_ifc_modify_tis_bitmask_bits bitmask; u8 reserved_at_c0[0x40]; struct mlx5_ifc_tisc_bits ctx; }; struct mlx5_ifc_modify_tir_bitmask_bits { u8 reserved_at_0[0x20]; u8 reserved_at_20[0x1b]; u8 self_lb_en[0x1]; u8 reserved_at_3c[0x1]; u8 hash[0x1]; u8 reserved_at_3e[0x1]; u8 lro[0x1]; }; struct mlx5_ifc_modify_tir_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_modify_tir_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 tirn[0x18]; u8 reserved_at_60[0x20]; struct mlx5_ifc_modify_tir_bitmask_bits bitmask; u8 reserved_at_c0[0x40]; struct mlx5_ifc_tirc_bits ctx; }; struct mlx5_ifc_modify_sq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_modify_sq_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 sq_state[0x4]; u8 reserved_at_44[0x4]; u8 sqn[0x18]; u8 reserved_at_60[0x20]; u8 modify_bitmask[0x40]; u8 reserved_at_c0[0x40]; struct mlx5_ifc_sqc_bits ctx; }; struct mlx5_ifc_modify_scheduling_element_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x1c0]; }; enum { MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE = 0x1, MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW = 0x2, }; struct mlx5_ifc_modify_scheduling_element_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 scheduling_hierarchy[0x8]; u8 reserved_at_48[0x18]; u8 scheduling_element_id[0x20]; u8 reserved_at_80[0x20]; u8 modify_bitmask[0x20]; u8 reserved_at_c0[0x40]; struct mlx5_ifc_scheduling_context_bits scheduling_context; u8 reserved_at_300[0x100]; }; struct mlx5_ifc_modify_rqt_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_rqt_bitmask_bits { u8 reserved_at_0[0x20]; u8 reserved_at_20[0x1f]; u8 rqn_list[0x1]; }; struct mlx5_ifc_modify_rqt_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 rqtn[0x18]; u8 reserved_at_60[0x20]; struct mlx5_ifc_rqt_bitmask_bits bitmask; u8 reserved_at_c0[0x40]; struct mlx5_ifc_rqtc_bits ctx; }; struct mlx5_ifc_modify_rq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; enum { MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD = 1ULL << 1, MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS = 1ULL << 2, MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID = 1ULL << 3, }; struct mlx5_ifc_modify_rq_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 rq_state[0x4]; u8 reserved_at_44[0x4]; u8 rqn[0x18]; u8 reserved_at_60[0x20]; u8 modify_bitmask[0x40]; u8 reserved_at_c0[0x40]; struct mlx5_ifc_rqc_bits ctx; }; struct mlx5_ifc_modify_rmp_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_rmp_bitmask_bits { u8 reserved_at_0[0x20]; u8 reserved_at_20[0x1f]; u8 lwm[0x1]; }; struct mlx5_ifc_modify_rmp_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 rmp_state[0x4]; u8 reserved_at_44[0x4]; u8 rmpn[0x18]; u8 reserved_at_60[0x20]; struct mlx5_ifc_rmp_bitmask_bits bitmask; u8 reserved_at_c0[0x40]; struct mlx5_ifc_rmpc_bits ctx; }; struct mlx5_ifc_modify_nic_vport_context_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_modify_nic_vport_field_select_bits { u8 reserved_at_0[0x12]; u8 affiliation[0x1]; u8 reserved_at_13[0x1]; u8 disable_uc_local_lb[0x1]; u8 disable_mc_local_lb[0x1]; u8 node_guid[0x1]; u8 port_guid[0x1]; u8 min_inline[0x1]; u8 mtu[0x1]; u8 change_event[0x1]; u8 promisc[0x1]; u8 permanent_address[0x1]; u8 addresses_list[0x1]; u8 roce_en[0x1]; u8 reserved_at_1f[0x1]; }; struct mlx5_ifc_modify_nic_vport_context_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_at_41[0xf]; u8 vport_number[0x10]; struct mlx5_ifc_modify_nic_vport_field_select_bits field_select; u8 reserved_at_80[0x780]; struct mlx5_ifc_nic_vport_context_bits nic_vport_context; }; struct mlx5_ifc_modify_hca_vport_context_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_modify_hca_vport_context_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_at_41[0xb]; u8 port_num[0x4]; u8 vport_number[0x10]; u8 reserved_at_60[0x20]; struct mlx5_ifc_hca_vport_context_bits hca_vport_context; }; struct mlx5_ifc_modify_cq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; enum { MLX5_MODIFY_CQ_IN_OP_MOD_MODIFY_CQ = 0x0, MLX5_MODIFY_CQ_IN_OP_MOD_RESIZE_CQ = 0x1, }; struct mlx5_ifc_modify_cq_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 cqn[0x18]; union mlx5_ifc_modify_field_select_resize_field_select_auto_bits modify_field_select_resize_field_select; struct mlx5_ifc_cqc_bits cq_context; u8 reserved_at_280[0x60]; u8 cq_umem_valid[0x1]; u8 reserved_at_2e1[0x1f]; u8 reserved_at_300[0x580]; u8 pas[0][0x40]; }; struct mlx5_ifc_modify_cong_status_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_modify_cong_status_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x18]; u8 priority[0x4]; u8 cong_protocol[0x4]; u8 enable[0x1]; u8 tag_enable[0x1]; u8 reserved_at_62[0x1e]; }; struct mlx5_ifc_modify_cong_params_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_modify_cong_params_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x1c]; u8 cong_protocol[0x4]; union mlx5_ifc_field_select_802_1_r_roce_auto_bits field_select; u8 reserved_at_80[0x80]; union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters; }; struct mlx5_ifc_manage_pages_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 output_num_entries[0x20]; u8 reserved_at_60[0x20]; u8 pas[0][0x40]; }; enum { MLX5_MANAGE_PAGES_IN_OP_MOD_ALLOCATION_FAIL = 0x0, MLX5_MANAGE_PAGES_IN_OP_MOD_ALLOCATION_SUCCESS = 0x1, MLX5_MANAGE_PAGES_IN_OP_MOD_HCA_RETURN_PAGES = 0x2, }; struct mlx5_ifc_manage_pages_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 embedded_cpu_function[0x1]; u8 reserved_at_41[0xf]; u8 function_id[0x10]; u8 input_num_entries[0x20]; u8 pas[0][0x40]; }; struct mlx5_ifc_mad_ifc_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; u8 response_mad_packet[256][0x8]; }; struct mlx5_ifc_mad_ifc_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 remote_lid[0x10]; u8 reserved_at_50[0x8]; u8 port[0x8]; u8 reserved_at_60[0x20]; u8 mad[256][0x8]; }; struct mlx5_ifc_init_hca_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_init_hca_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; u8 sw_owner_id[4][0x20]; }; struct mlx5_ifc_init2rtr_qp_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_init2rtr_qp_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 qpn[0x18]; u8 reserved_at_60[0x20]; u8 opt_param_mask[0x20]; u8 reserved_at_a0[0x20]; struct mlx5_ifc_qpc_bits qpc; u8 reserved_at_800[0x80]; }; struct mlx5_ifc_init2init_qp_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_init2init_qp_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 qpn[0x18]; u8 reserved_at_60[0x20]; u8 opt_param_mask[0x20]; u8 reserved_at_a0[0x20]; struct mlx5_ifc_qpc_bits qpc; u8 reserved_at_800[0x80]; }; struct mlx5_ifc_get_dropped_packet_log_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; u8 packet_headers_log[128][0x8]; u8 packet_syndrome[64][0x8]; }; struct mlx5_ifc_get_dropped_packet_log_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_gen_eqe_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x18]; u8 eq_number[0x8]; u8 reserved_at_60[0x20]; u8 eqe[64][0x8]; }; struct mlx5_ifc_gen_eq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_enable_hca_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x20]; }; struct mlx5_ifc_enable_hca_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 embedded_cpu_function[0x1]; u8 reserved_at_41[0xf]; u8 function_id[0x10]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_drain_dct_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_drain_dct_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 dctn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_disable_hca_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x20]; }; struct mlx5_ifc_disable_hca_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 embedded_cpu_function[0x1]; u8 reserved_at_41[0xf]; u8 function_id[0x10]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_detach_from_mcg_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_detach_from_mcg_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 qpn[0x18]; u8 reserved_at_60[0x20]; u8 multicast_gid[16][0x8]; }; struct mlx5_ifc_destroy_xrq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_destroy_xrq_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 xrqn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_destroy_xrc_srq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_destroy_xrc_srq_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 xrc_srqn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_destroy_tis_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_destroy_tis_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 tisn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_destroy_tir_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_destroy_tir_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 tirn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_destroy_srq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_destroy_srq_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 srqn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_destroy_sq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_destroy_sq_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 sqn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_destroy_scheduling_element_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x1c0]; }; struct mlx5_ifc_destroy_scheduling_element_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 scheduling_hierarchy[0x8]; u8 reserved_at_48[0x18]; u8 scheduling_element_id[0x20]; u8 reserved_at_80[0x180]; }; struct mlx5_ifc_destroy_rqt_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_destroy_rqt_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 rqtn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_destroy_rq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_destroy_rq_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 rqn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_set_delay_drop_params_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x20]; u8 reserved_at_60[0x10]; u8 delay_drop_timeout[0x10]; }; struct mlx5_ifc_set_delay_drop_params_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_destroy_rmp_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_destroy_rmp_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 rmpn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_destroy_qp_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_destroy_qp_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 qpn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_destroy_psv_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_destroy_psv_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 psvn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_destroy_mkey_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_destroy_mkey_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 mkey_index[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_destroy_flow_table_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_destroy_flow_table_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_at_41[0xf]; u8 vport_number[0x10]; u8 reserved_at_60[0x20]; u8 table_type[0x8]; u8 reserved_at_88[0x18]; u8 reserved_at_a0[0x8]; u8 table_id[0x18]; u8 reserved_at_c0[0x140]; }; struct mlx5_ifc_destroy_flow_group_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_destroy_flow_group_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_at_41[0xf]; u8 vport_number[0x10]; u8 reserved_at_60[0x20]; u8 table_type[0x8]; u8 reserved_at_88[0x18]; u8 reserved_at_a0[0x8]; u8 table_id[0x18]; u8 group_id[0x20]; u8 reserved_at_e0[0x120]; }; struct mlx5_ifc_destroy_eq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_destroy_eq_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x18]; u8 eq_number[0x8]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_destroy_dct_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_destroy_dct_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 dctn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_destroy_cq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_destroy_cq_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 cqn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_delete_vxlan_udp_dport_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_delete_vxlan_udp_dport_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x20]; u8 reserved_at_60[0x10]; u8 vxlan_udp_port[0x10]; }; struct mlx5_ifc_delete_l2_table_entry_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_delete_l2_table_entry_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x60]; u8 reserved_at_a0[0x8]; u8 table_index[0x18]; u8 reserved_at_c0[0x140]; }; struct mlx5_ifc_delete_fte_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_delete_fte_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_at_41[0xf]; u8 vport_number[0x10]; u8 reserved_at_60[0x20]; u8 table_type[0x8]; u8 reserved_at_88[0x18]; u8 reserved_at_a0[0x8]; u8 table_id[0x18]; u8 reserved_at_c0[0x40]; u8 flow_index[0x20]; u8 reserved_at_120[0xe0]; }; struct mlx5_ifc_dealloc_xrcd_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_dealloc_xrcd_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 xrcd[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_dealloc_uar_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_dealloc_uar_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 uar[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_dealloc_transport_domain_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_dealloc_transport_domain_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 transport_domain[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_dealloc_q_counter_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_dealloc_q_counter_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x18]; u8 counter_set_id[0x8]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_dealloc_pd_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_dealloc_pd_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 pd[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_dealloc_flow_counter_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_dealloc_flow_counter_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 flow_counter_id[0x20]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_create_xrq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x8]; u8 xrqn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_create_xrq_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; struct mlx5_ifc_xrqc_bits xrq_context; }; struct mlx5_ifc_create_xrc_srq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x8]; u8 xrc_srqn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_create_xrc_srq_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry; u8 reserved_at_280[0x60]; u8 xrc_srq_umem_valid[0x1]; u8 reserved_at_2e1[0x1f]; u8 reserved_at_300[0x580]; u8 pas[0][0x40]; }; struct mlx5_ifc_create_tis_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x8]; u8 tisn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_create_tis_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0xc0]; struct mlx5_ifc_tisc_bits ctx; }; struct mlx5_ifc_create_tir_out_bits { u8 status[0x8]; u8 icm_address_63_40[0x18]; u8 syndrome[0x20]; u8 icm_address_39_32[0x8]; u8 tirn[0x18]; u8 icm_address_31_0[0x20]; }; struct mlx5_ifc_create_tir_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0xc0]; struct mlx5_ifc_tirc_bits ctx; }; struct mlx5_ifc_create_srq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x8]; u8 srqn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_create_srq_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; struct mlx5_ifc_srqc_bits srq_context_entry; u8 reserved_at_280[0x600]; u8 pas[0][0x40]; }; struct mlx5_ifc_create_sq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x8]; u8 sqn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_create_sq_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0xc0]; struct mlx5_ifc_sqc_bits ctx; }; struct mlx5_ifc_create_scheduling_element_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; u8 scheduling_element_id[0x20]; u8 reserved_at_a0[0x160]; }; struct mlx5_ifc_create_scheduling_element_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 scheduling_hierarchy[0x8]; u8 reserved_at_48[0x18]; u8 reserved_at_60[0xa0]; struct mlx5_ifc_scheduling_context_bits scheduling_context; u8 reserved_at_300[0x100]; }; struct mlx5_ifc_create_rqt_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x8]; u8 rqtn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_create_rqt_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0xc0]; struct mlx5_ifc_rqtc_bits rqt_context; }; struct mlx5_ifc_create_rq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x8]; u8 rqn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_create_rq_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0xc0]; struct mlx5_ifc_rqc_bits ctx; }; struct mlx5_ifc_create_rmp_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x8]; u8 rmpn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_create_rmp_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0xc0]; struct mlx5_ifc_rmpc_bits ctx; }; struct mlx5_ifc_create_qp_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x8]; u8 qpn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_create_qp_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; u8 opt_param_mask[0x20]; u8 reserved_at_a0[0x20]; struct mlx5_ifc_qpc_bits qpc; u8 reserved_at_800[0x60]; u8 wq_umem_valid[0x1]; u8 reserved_at_861[0x1f]; u8 pas[0][0x40]; }; struct mlx5_ifc_create_psv_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; u8 reserved_at_80[0x8]; u8 psv0_index[0x18]; u8 reserved_at_a0[0x8]; u8 psv1_index[0x18]; u8 reserved_at_c0[0x8]; u8 psv2_index[0x18]; u8 reserved_at_e0[0x8]; u8 psv3_index[0x18]; }; struct mlx5_ifc_create_psv_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 num_psv[0x4]; u8 reserved_at_44[0x4]; u8 pd[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_create_mkey_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x8]; u8 mkey_index[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_create_mkey_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x20]; u8 pg_access[0x1]; u8 mkey_umem_valid[0x1]; u8 reserved_at_62[0x1e]; struct mlx5_ifc_mkc_bits memory_key_mkey_entry; u8 reserved_at_280[0x80]; u8 translations_octword_actual_size[0x20]; u8 reserved_at_320[0x560]; u8 klm_pas_mtt[0][0x20]; }; enum { MLX5_FLOW_TABLE_TYPE_NIC_RX = 0x0, MLX5_FLOW_TABLE_TYPE_NIC_TX = 0x1, MLX5_FLOW_TABLE_TYPE_ESW_EGRESS_ACL = 0x2, MLX5_FLOW_TABLE_TYPE_ESW_INGRESS_ACL = 0x3, MLX5_FLOW_TABLE_TYPE_FDB = 0X4, MLX5_FLOW_TABLE_TYPE_SNIFFER_RX = 0X5, MLX5_FLOW_TABLE_TYPE_SNIFFER_TX = 0X6, }; struct mlx5_ifc_create_flow_table_out_bits { u8 status[0x8]; u8 icm_address_63_40[0x18]; u8 syndrome[0x20]; u8 icm_address_39_32[0x8]; u8 table_id[0x18]; u8 icm_address_31_0[0x20]; }; struct mlx5_ifc_create_flow_table_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_at_41[0xf]; u8 vport_number[0x10]; u8 reserved_at_60[0x20]; u8 table_type[0x8]; u8 reserved_at_88[0x18]; u8 reserved_at_a0[0x20]; struct mlx5_ifc_flow_table_context_bits flow_table_context; }; struct mlx5_ifc_create_flow_group_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x8]; u8 group_id[0x18]; u8 reserved_at_60[0x20]; }; enum { MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0, MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1, MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2, MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0x3, }; struct mlx5_ifc_create_flow_group_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_at_41[0xf]; u8 vport_number[0x10]; u8 reserved_at_60[0x20]; u8 table_type[0x8]; u8 reserved_at_88[0x18]; u8 reserved_at_a0[0x8]; u8 table_id[0x18]; u8 source_eswitch_owner_vhca_id_valid[0x1]; u8 reserved_at_c1[0x1f]; u8 start_flow_index[0x20]; u8 reserved_at_100[0x20]; u8 end_flow_index[0x20]; u8 reserved_at_140[0xa0]; u8 reserved_at_1e0[0x18]; u8 match_criteria_enable[0x8]; struct mlx5_ifc_fte_match_param_bits match_criteria; u8 reserved_at_1200[0xe00]; }; struct mlx5_ifc_create_eq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x18]; u8 eq_number[0x8]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_create_eq_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; struct mlx5_ifc_eqc_bits eq_context_entry; u8 reserved_at_280[0x40]; u8 event_bitmask[4][0x40]; u8 reserved_at_3c0[0x4c0]; u8 pas[0][0x40]; }; struct mlx5_ifc_create_dct_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x8]; u8 dctn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_create_dct_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; struct mlx5_ifc_dctc_bits dct_context_entry; u8 reserved_at_280[0x180]; }; struct mlx5_ifc_create_cq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x8]; u8 cqn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_create_cq_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; struct mlx5_ifc_cqc_bits cq_context; u8 reserved_at_280[0x60]; u8 cq_umem_valid[0x1]; u8 reserved_at_2e1[0x59f]; u8 pas[0][0x40]; }; struct mlx5_ifc_config_int_moderation_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x4]; u8 min_delay[0xc]; u8 int_vector[0x10]; u8 reserved_at_60[0x20]; }; enum { MLX5_CONFIG_INT_MODERATION_IN_OP_MOD_WRITE = 0x0, MLX5_CONFIG_INT_MODERATION_IN_OP_MOD_READ = 0x1, }; struct mlx5_ifc_config_int_moderation_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x4]; u8 min_delay[0xc]; u8 int_vector[0x10]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_attach_to_mcg_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_attach_to_mcg_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 qpn[0x18]; u8 reserved_at_60[0x20]; u8 multicast_gid[16][0x8]; }; struct mlx5_ifc_arm_xrq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_arm_xrq_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 xrqn[0x18]; u8 reserved_at_60[0x10]; u8 lwm[0x10]; }; struct mlx5_ifc_arm_xrc_srq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; enum { MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ = 0x1, }; struct mlx5_ifc_arm_xrc_srq_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 xrc_srqn[0x18]; u8 reserved_at_60[0x10]; u8 lwm[0x10]; }; struct mlx5_ifc_arm_rq_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; enum { MLX5_ARM_RQ_IN_OP_MOD_SRQ = 0x1, MLX5_ARM_RQ_IN_OP_MOD_XRQ = 0x2, }; struct mlx5_ifc_arm_rq_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 srq_number[0x18]; u8 reserved_at_60[0x10]; u8 lwm[0x10]; }; struct mlx5_ifc_arm_dct_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_arm_dct_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 dct_number[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_alloc_xrcd_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x8]; u8 xrcd[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_alloc_xrcd_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_alloc_uar_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x8]; u8 uar[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_alloc_uar_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_alloc_transport_domain_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x8]; u8 transport_domain[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_alloc_transport_domain_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_alloc_q_counter_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x18]; u8 counter_set_id[0x8]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_alloc_q_counter_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_alloc_pd_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x8]; u8 pd[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_alloc_pd_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_alloc_flow_counter_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 flow_counter_id[0x20]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_alloc_flow_counter_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x38]; u8 flow_counter_bulk[0x8]; }; struct mlx5_ifc_add_vxlan_udp_dport_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_add_vxlan_udp_dport_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x20]; u8 reserved_at_60[0x10]; u8 vxlan_udp_port[0x10]; }; struct mlx5_ifc_set_pp_rate_limit_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_set_pp_rate_limit_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x10]; u8 rate_limit_index[0x10]; u8 reserved_at_60[0x20]; u8 rate_limit[0x20]; u8 burst_upper_bound[0x20]; u8 reserved_at_c0[0x10]; u8 typical_packet_size[0x10]; u8 reserved_at_e0[0x120]; }; struct mlx5_ifc_access_register_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; u8 register_data[0][0x20]; }; enum { MLX5_ACCESS_REGISTER_IN_OP_MOD_WRITE = 0x0, MLX5_ACCESS_REGISTER_IN_OP_MOD_READ = 0x1, }; struct mlx5_ifc_access_register_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x10]; u8 register_id[0x10]; u8 argument[0x20]; u8 register_data[0][0x20]; }; struct mlx5_ifc_sltp_reg_bits { u8 status[0x4]; u8 version[0x4]; u8 local_port[0x8]; u8 pnat[0x2]; u8 reserved_at_12[0x2]; u8 lane[0x4]; u8 reserved_at_18[0x8]; u8 reserved_at_20[0x20]; u8 reserved_at_40[0x7]; u8 polarity[0x1]; u8 ob_tap0[0x8]; u8 ob_tap1[0x8]; u8 ob_tap2[0x8]; u8 reserved_at_60[0xc]; u8 ob_preemp_mode[0x4]; u8 ob_reg[0x8]; u8 ob_bias[0x8]; u8 reserved_at_80[0x20]; }; struct mlx5_ifc_slrg_reg_bits { u8 status[0x4]; u8 version[0x4]; u8 local_port[0x8]; u8 pnat[0x2]; u8 reserved_at_12[0x2]; u8 lane[0x4]; u8 reserved_at_18[0x8]; u8 time_to_link_up[0x10]; u8 reserved_at_30[0xc]; u8 grade_lane_speed[0x4]; u8 grade_version[0x8]; u8 grade[0x18]; u8 reserved_at_60[0x4]; u8 height_grade_type[0x4]; u8 height_grade[0x18]; u8 height_dz[0x10]; u8 height_dv[0x10]; u8 reserved_at_a0[0x10]; u8 height_sigma[0x10]; u8 reserved_at_c0[0x20]; u8 reserved_at_e0[0x4]; u8 phase_grade_type[0x4]; u8 phase_grade[0x18]; u8 reserved_at_100[0x8]; u8 phase_eo_pos[0x8]; u8 reserved_at_110[0x8]; u8 phase_eo_neg[0x8]; u8 ffe_set_tested[0x10]; u8 test_errors_per_lane[0x10]; }; struct mlx5_ifc_pvlc_reg_bits { u8 reserved_at_0[0x8]; u8 local_port[0x8]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x1c]; u8 vl_hw_cap[0x4]; u8 reserved_at_40[0x1c]; u8 vl_admin[0x4]; u8 reserved_at_60[0x1c]; u8 vl_operational[0x4]; }; struct mlx5_ifc_pude_reg_bits { u8 swid[0x8]; u8 local_port[0x8]; u8 reserved_at_10[0x4]; u8 admin_status[0x4]; u8 reserved_at_18[0x4]; u8 oper_status[0x4]; u8 reserved_at_20[0x60]; }; struct mlx5_ifc_ptys_reg_bits { u8 reserved_at_0[0x1]; u8 an_disable_admin[0x1]; u8 an_disable_cap[0x1]; u8 reserved_at_3[0x5]; u8 local_port[0x8]; u8 reserved_at_10[0xd]; u8 proto_mask[0x3]; u8 an_status[0x4]; u8 reserved_at_24[0x1c]; u8 ext_eth_proto_capability[0x20]; u8 eth_proto_capability[0x20]; u8 ib_link_width_capability[0x10]; u8 ib_proto_capability[0x10]; u8 ext_eth_proto_admin[0x20]; u8 eth_proto_admin[0x20]; u8 ib_link_width_admin[0x10]; u8 ib_proto_admin[0x10]; u8 ext_eth_proto_oper[0x20]; u8 eth_proto_oper[0x20]; u8 ib_link_width_oper[0x10]; u8 ib_proto_oper[0x10]; u8 reserved_at_160[0x1c]; u8 connector_type[0x4]; u8 eth_proto_lp_advertise[0x20]; u8 reserved_at_1a0[0x60]; }; struct mlx5_ifc_mlcr_reg_bits { u8 reserved_at_0[0x8]; u8 local_port[0x8]; u8 reserved_at_10[0x20]; u8 beacon_duration[0x10]; u8 reserved_at_40[0x10]; u8 beacon_remain[0x10]; }; struct mlx5_ifc_ptas_reg_bits { u8 reserved_at_0[0x20]; u8 algorithm_options[0x10]; u8 reserved_at_30[0x4]; u8 repetitions_mode[0x4]; u8 num_of_repetitions[0x8]; u8 grade_version[0x8]; u8 height_grade_type[0x4]; u8 phase_grade_type[0x4]; u8 height_grade_weight[0x8]; u8 phase_grade_weight[0x8]; u8 gisim_measure_bits[0x10]; u8 adaptive_tap_measure_bits[0x10]; u8 ber_bath_high_error_threshold[0x10]; u8 ber_bath_mid_error_threshold[0x10]; u8 ber_bath_low_error_threshold[0x10]; u8 one_ratio_high_threshold[0x10]; u8 one_ratio_high_mid_threshold[0x10]; u8 one_ratio_low_mid_threshold[0x10]; u8 one_ratio_low_threshold[0x10]; u8 ndeo_error_threshold[0x10]; u8 mixer_offset_step_size[0x10]; u8 reserved_at_110[0x8]; u8 mix90_phase_for_voltage_bath[0x8]; u8 mixer_offset_start[0x10]; u8 mixer_offset_end[0x10]; u8 reserved_at_140[0x15]; u8 ber_test_time[0xb]; }; struct mlx5_ifc_pspa_reg_bits { u8 swid[0x8]; u8 local_port[0x8]; u8 sub_port[0x8]; u8 reserved_at_18[0x8]; u8 reserved_at_20[0x20]; }; struct mlx5_ifc_pqdr_reg_bits { u8 reserved_at_0[0x8]; u8 local_port[0x8]; u8 reserved_at_10[0x5]; u8 prio[0x3]; u8 reserved_at_18[0x6]; u8 mode[0x2]; u8 reserved_at_20[0x20]; u8 reserved_at_40[0x10]; u8 min_threshold[0x10]; u8 reserved_at_60[0x10]; u8 max_threshold[0x10]; u8 reserved_at_80[0x10]; u8 mark_probability_denominator[0x10]; u8 reserved_at_a0[0x60]; }; struct mlx5_ifc_ppsc_reg_bits { u8 reserved_at_0[0x8]; u8 local_port[0x8]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x60]; u8 reserved_at_80[0x1c]; u8 wrps_admin[0x4]; u8 reserved_at_a0[0x1c]; u8 wrps_status[0x4]; u8 reserved_at_c0[0x8]; u8 up_threshold[0x8]; u8 reserved_at_d0[0x8]; u8 down_threshold[0x8]; u8 reserved_at_e0[0x20]; u8 reserved_at_100[0x1c]; u8 srps_admin[0x4]; u8 reserved_at_120[0x1c]; u8 srps_status[0x4]; u8 reserved_at_140[0x40]; }; struct mlx5_ifc_pplr_reg_bits { u8 reserved_at_0[0x8]; u8 local_port[0x8]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x8]; u8 lb_cap[0x8]; u8 reserved_at_30[0x8]; u8 lb_en[0x8]; }; struct mlx5_ifc_pplm_reg_bits { u8 reserved_at_0[0x8]; u8 local_port[0x8]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x20]; u8 port_profile_mode[0x8]; u8 static_port_profile[0x8]; u8 active_port_profile[0x8]; u8 reserved_at_58[0x8]; u8 retransmission_active[0x8]; u8 fec_mode_active[0x18]; u8 rs_fec_correction_bypass_cap[0x4]; u8 reserved_at_84[0x8]; u8 fec_override_cap_56g[0x4]; u8 fec_override_cap_100g[0x4]; u8 fec_override_cap_50g[0x4]; u8 fec_override_cap_25g[0x4]; u8 fec_override_cap_10g_40g[0x4]; u8 rs_fec_correction_bypass_admin[0x4]; u8 reserved_at_a4[0x8]; u8 fec_override_admin_56g[0x4]; u8 fec_override_admin_100g[0x4]; u8 fec_override_admin_50g[0x4]; u8 fec_override_admin_25g[0x4]; u8 fec_override_admin_10g_40g[0x4]; }; struct mlx5_ifc_ppcnt_reg_bits { u8 swid[0x8]; u8 local_port[0x8]; u8 pnat[0x2]; u8 reserved_at_12[0x8]; u8 grp[0x6]; u8 clr[0x1]; u8 reserved_at_21[0x1c]; u8 prio_tc[0x3]; union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set; }; struct mlx5_ifc_mpein_reg_bits { u8 reserved_at_0[0x2]; u8 depth[0x6]; u8 pcie_index[0x8]; u8 node[0x8]; u8 reserved_at_18[0x8]; u8 capability_mask[0x20]; u8 reserved_at_40[0x8]; u8 link_width_enabled[0x8]; u8 link_speed_enabled[0x10]; u8 lane0_physical_position[0x8]; u8 link_width_active[0x8]; u8 link_speed_active[0x10]; u8 num_of_pfs[0x10]; u8 num_of_vfs[0x10]; u8 bdf0[0x10]; u8 reserved_at_b0[0x10]; u8 max_read_request_size[0x4]; u8 max_payload_size[0x4]; u8 reserved_at_c8[0x5]; u8 pwr_status[0x3]; u8 port_type[0x4]; u8 reserved_at_d4[0xb]; u8 lane_reversal[0x1]; u8 reserved_at_e0[0x14]; u8 pci_power[0xc]; u8 reserved_at_100[0x20]; u8 device_status[0x10]; u8 port_state[0x8]; u8 reserved_at_138[0x8]; u8 reserved_at_140[0x10]; u8 receiver_detect_result[0x10]; u8 reserved_at_160[0x20]; }; struct mlx5_ifc_mpcnt_reg_bits { u8 reserved_at_0[0x8]; u8 pcie_index[0x8]; u8 reserved_at_10[0xa]; u8 grp[0x6]; u8 clr[0x1]; u8 reserved_at_21[0x1f]; union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits counter_set; }; struct mlx5_ifc_ppad_reg_bits { u8 reserved_at_0[0x3]; u8 single_mac[0x1]; u8 reserved_at_4[0x4]; u8 local_port[0x8]; u8 mac_47_32[0x10]; u8 mac_31_0[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_pmtu_reg_bits { u8 reserved_at_0[0x8]; u8 local_port[0x8]; u8 reserved_at_10[0x10]; u8 max_mtu[0x10]; u8 reserved_at_30[0x10]; u8 admin_mtu[0x10]; u8 reserved_at_50[0x10]; u8 oper_mtu[0x10]; u8 reserved_at_70[0x10]; }; struct mlx5_ifc_pmpr_reg_bits { u8 reserved_at_0[0x8]; u8 module[0x8]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x18]; u8 attenuation_5g[0x8]; u8 reserved_at_40[0x18]; u8 attenuation_7g[0x8]; u8 reserved_at_60[0x18]; u8 attenuation_12g[0x8]; }; struct mlx5_ifc_pmpe_reg_bits { u8 reserved_at_0[0x8]; u8 module[0x8]; u8 reserved_at_10[0xc]; u8 module_status[0x4]; u8 reserved_at_20[0x60]; }; struct mlx5_ifc_pmpc_reg_bits { u8 module_state_updated[32][0x8]; }; struct mlx5_ifc_pmlpn_reg_bits { u8 reserved_at_0[0x4]; u8 mlpn_status[0x4]; u8 local_port[0x8]; u8 reserved_at_10[0x10]; u8 e[0x1]; u8 reserved_at_21[0x1f]; }; struct mlx5_ifc_pmlp_reg_bits { u8 rxtx[0x1]; u8 reserved_at_1[0x7]; u8 local_port[0x8]; u8 reserved_at_10[0x8]; u8 width[0x8]; u8 lane0_module_mapping[0x20]; u8 lane1_module_mapping[0x20]; u8 lane2_module_mapping[0x20]; u8 lane3_module_mapping[0x20]; u8 reserved_at_a0[0x160]; }; struct mlx5_ifc_pmaos_reg_bits { u8 reserved_at_0[0x8]; u8 module[0x8]; u8 reserved_at_10[0x4]; u8 admin_status[0x4]; u8 reserved_at_18[0x4]; u8 oper_status[0x4]; u8 ase[0x1]; u8 ee[0x1]; u8 reserved_at_22[0x1c]; u8 e[0x2]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_plpc_reg_bits { u8 reserved_at_0[0x4]; u8 profile_id[0xc]; u8 reserved_at_10[0x4]; u8 proto_mask[0x4]; u8 reserved_at_18[0x8]; u8 reserved_at_20[0x10]; u8 lane_speed[0x10]; u8 reserved_at_40[0x17]; u8 lpbf[0x1]; u8 fec_mode_policy[0x8]; u8 retransmission_capability[0x8]; u8 fec_mode_capability[0x18]; u8 retransmission_support_admin[0x8]; u8 fec_mode_support_admin[0x18]; u8 retransmission_request_admin[0x8]; u8 fec_mode_request_admin[0x18]; u8 reserved_at_c0[0x80]; }; struct mlx5_ifc_plib_reg_bits { u8 reserved_at_0[0x8]; u8 local_port[0x8]; u8 reserved_at_10[0x8]; u8 ib_port[0x8]; u8 reserved_at_20[0x60]; }; struct mlx5_ifc_plbf_reg_bits { u8 reserved_at_0[0x8]; u8 local_port[0x8]; u8 reserved_at_10[0xd]; u8 lbf_mode[0x3]; u8 reserved_at_20[0x20]; }; struct mlx5_ifc_pipg_reg_bits { u8 reserved_at_0[0x8]; u8 local_port[0x8]; u8 reserved_at_10[0x10]; u8 dic[0x1]; u8 reserved_at_21[0x19]; u8 ipg[0x4]; u8 reserved_at_3e[0x2]; }; struct mlx5_ifc_pifr_reg_bits { u8 reserved_at_0[0x8]; u8 local_port[0x8]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0xe0]; u8 port_filter[8][0x20]; u8 port_filter_update_en[8][0x20]; }; struct mlx5_ifc_pfcc_reg_bits { u8 reserved_at_0[0x8]; u8 local_port[0x8]; u8 reserved_at_10[0xb]; u8 ppan_mask_n[0x1]; u8 minor_stall_mask[0x1]; u8 critical_stall_mask[0x1]; u8 reserved_at_1e[0x2]; u8 ppan[0x4]; u8 reserved_at_24[0x4]; u8 prio_mask_tx[0x8]; u8 reserved_at_30[0x8]; u8 prio_mask_rx[0x8]; u8 pptx[0x1]; u8 aptx[0x1]; u8 pptx_mask_n[0x1]; u8 reserved_at_43[0x5]; u8 pfctx[0x8]; u8 reserved_at_50[0x10]; u8 pprx[0x1]; u8 aprx[0x1]; u8 pprx_mask_n[0x1]; u8 reserved_at_63[0x5]; u8 pfcrx[0x8]; u8 reserved_at_70[0x10]; u8 device_stall_minor_watermark[0x10]; u8 device_stall_critical_watermark[0x10]; u8 reserved_at_a0[0x60]; }; struct mlx5_ifc_pelc_reg_bits { u8 op[0x4]; u8 reserved_at_4[0x4]; u8 local_port[0x8]; u8 reserved_at_10[0x10]; u8 op_admin[0x8]; u8 op_capability[0x8]; u8 op_request[0x8]; u8 op_active[0x8]; u8 admin[0x40]; u8 capability[0x40]; u8 request[0x40]; u8 active[0x40]; u8 reserved_at_140[0x80]; }; struct mlx5_ifc_peir_reg_bits { u8 reserved_at_0[0x8]; u8 local_port[0x8]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0xc]; u8 error_count[0x4]; u8 reserved_at_30[0x10]; u8 reserved_at_40[0xc]; u8 lane[0x4]; u8 reserved_at_50[0x8]; u8 error_type[0x8]; }; struct mlx5_ifc_mpegc_reg_bits { u8 reserved_at_0[0x30]; u8 field_select[0x10]; u8 tx_overflow_sense[0x1]; u8 mark_cqe[0x1]; u8 mark_cnp[0x1]; u8 reserved_at_43[0x1b]; u8 tx_lossy_overflow_oper[0x2]; u8 reserved_at_60[0x100]; }; struct mlx5_ifc_pcam_enhanced_features_bits { u8 reserved_at_0[0x6d]; u8 rx_icrc_encapsulated_counter[0x1]; u8 reserved_at_6e[0x4]; u8 ptys_extended_ethernet[0x1]; u8 reserved_at_73[0x3]; u8 pfcc_mask[0x1]; u8 reserved_at_77[0x3]; u8 per_lane_error_counters[0x1]; u8 rx_buffer_fullness_counters[0x1]; u8 ptys_connector_type[0x1]; u8 reserved_at_7d[0x1]; u8 ppcnt_discard_group[0x1]; u8 ppcnt_statistical_group[0x1]; }; struct mlx5_ifc_pcam_regs_5000_to_507f_bits { u8 port_access_reg_cap_mask_127_to_96[0x20]; u8 port_access_reg_cap_mask_95_to_64[0x20]; u8 port_access_reg_cap_mask_63_to_36[0x1c]; u8 pplm[0x1]; u8 port_access_reg_cap_mask_34_to_32[0x3]; u8 port_access_reg_cap_mask_31_to_13[0x13]; u8 pbmc[0x1]; u8 pptb[0x1]; u8 port_access_reg_cap_mask_10_to_09[0x2]; u8 ppcnt[0x1]; u8 port_access_reg_cap_mask_07_to_00[0x8]; }; struct mlx5_ifc_pcam_reg_bits { u8 reserved_at_0[0x8]; u8 feature_group[0x8]; u8 reserved_at_10[0x8]; u8 access_reg_group[0x8]; u8 reserved_at_20[0x20]; union { struct mlx5_ifc_pcam_regs_5000_to_507f_bits regs_5000_to_507f; u8 reserved_at_0[0x80]; } port_access_reg_cap_mask; u8 reserved_at_c0[0x80]; union { struct mlx5_ifc_pcam_enhanced_features_bits enhanced_features; u8 reserved_at_0[0x80]; } feature_cap_mask; u8 reserved_at_1c0[0xc0]; }; struct mlx5_ifc_mcam_enhanced_features_bits { u8 reserved_at_0[0x6e]; u8 pci_status_and_power[0x1]; u8 reserved_at_6f[0x5]; u8 mark_tx_action_cnp[0x1]; u8 mark_tx_action_cqe[0x1]; u8 dynamic_tx_overflow[0x1]; u8 reserved_at_77[0x4]; u8 pcie_outbound_stalled[0x1]; u8 tx_overflow_buffer_pkt[0x1]; u8 mtpps_enh_out_per_adj[0x1]; u8 mtpps_fs[0x1]; u8 pcie_performance_group[0x1]; }; struct mlx5_ifc_mcam_access_reg_bits { u8 reserved_at_0[0x1c]; u8 mcda[0x1]; u8 mcc[0x1]; u8 mcqi[0x1]; u8 mcqs[0x1]; u8 regs_95_to_87[0x9]; u8 mpegc[0x1]; u8 regs_85_to_68[0x12]; u8 tracer_registers[0x4]; u8 regs_63_to_32[0x20]; u8 regs_31_to_0[0x20]; }; struct mlx5_ifc_mcam_reg_bits { u8 reserved_at_0[0x8]; u8 feature_group[0x8]; u8 reserved_at_10[0x8]; u8 access_reg_group[0x8]; u8 reserved_at_20[0x20]; union { struct mlx5_ifc_mcam_access_reg_bits access_regs; u8 reserved_at_0[0x80]; } mng_access_reg_cap_mask; u8 reserved_at_c0[0x80]; union { struct mlx5_ifc_mcam_enhanced_features_bits enhanced_features; u8 reserved_at_0[0x80]; } mng_feature_cap_mask; u8 reserved_at_1c0[0x80]; }; struct mlx5_ifc_qcam_access_reg_cap_mask { u8 qcam_access_reg_cap_mask_127_to_20[0x6C]; u8 qpdpm[0x1]; u8 qcam_access_reg_cap_mask_18_to_4[0x0F]; u8 qdpm[0x1]; u8 qpts[0x1]; u8 qcap[0x1]; u8 qcam_access_reg_cap_mask_0[0x1]; }; struct mlx5_ifc_qcam_qos_feature_cap_mask { u8 qcam_qos_feature_cap_mask_127_to_1[0x7F]; u8 qpts_trust_both[0x1]; }; struct mlx5_ifc_qcam_reg_bits { u8 reserved_at_0[0x8]; u8 feature_group[0x8]; u8 reserved_at_10[0x8]; u8 access_reg_group[0x8]; u8 reserved_at_20[0x20]; union { struct mlx5_ifc_qcam_access_reg_cap_mask reg_cap; u8 reserved_at_0[0x80]; } qos_access_reg_cap_mask; u8 reserved_at_c0[0x80]; union { struct mlx5_ifc_qcam_qos_feature_cap_mask feature_cap; u8 reserved_at_0[0x80]; } qos_feature_cap_mask; u8 reserved_at_1c0[0x80]; }; struct mlx5_ifc_core_dump_reg_bits { u8 reserved_at_0[0x18]; u8 core_dump_type[0x8]; u8 reserved_at_20[0x30]; u8 vhca_id[0x10]; u8 reserved_at_60[0x8]; u8 qpn[0x18]; u8 reserved_at_80[0x180]; }; struct mlx5_ifc_pcap_reg_bits { u8 reserved_at_0[0x8]; u8 local_port[0x8]; u8 reserved_at_10[0x10]; u8 port_capability_mask[4][0x20]; }; struct mlx5_ifc_paos_reg_bits { u8 swid[0x8]; u8 local_port[0x8]; u8 reserved_at_10[0x4]; u8 admin_status[0x4]; u8 reserved_at_18[0x4]; u8 oper_status[0x4]; u8 ase[0x1]; u8 ee[0x1]; u8 reserved_at_22[0x1c]; u8 e[0x2]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_pamp_reg_bits { u8 reserved_at_0[0x8]; u8 opamp_group[0x8]; u8 reserved_at_10[0xc]; u8 opamp_group_type[0x4]; u8 start_index[0x10]; u8 reserved_at_30[0x4]; u8 num_of_indices[0xc]; u8 index_data[18][0x10]; }; struct mlx5_ifc_pcmr_reg_bits { u8 reserved_at_0[0x8]; u8 local_port[0x8]; u8 reserved_at_10[0x10]; u8 entropy_force_cap[0x1]; u8 entropy_calc_cap[0x1]; u8 entropy_gre_calc_cap[0x1]; u8 reserved_at_23[0xf]; u8 rx_ts_over_crc_cap[0x1]; u8 reserved_at_33[0xb]; u8 fcs_cap[0x1]; u8 reserved_at_3f[0x1]; u8 entropy_force[0x1]; u8 entropy_calc[0x1]; u8 entropy_gre_calc[0x1]; u8 reserved_at_43[0xf]; u8 rx_ts_over_crc[0x1]; u8 reserved_at_53[0xb]; u8 fcs_chk[0x1]; u8 reserved_at_5f[0x1]; }; struct mlx5_ifc_lane_2_module_mapping_bits { u8 reserved_at_0[0x6]; u8 rx_lane[0x2]; u8 reserved_at_8[0x6]; u8 tx_lane[0x2]; u8 reserved_at_10[0x8]; u8 module[0x8]; }; struct mlx5_ifc_bufferx_reg_bits { u8 reserved_at_0[0x6]; u8 lossy[0x1]; u8 epsb[0x1]; u8 reserved_at_8[0x8]; u8 size[0x10]; u8 xoff_threshold[0x10]; u8 xon_threshold[0x10]; }; struct mlx5_ifc_set_node_in_bits { u8 node_description[64][0x8]; }; struct mlx5_ifc_register_power_settings_bits { u8 reserved_at_0[0x18]; u8 power_settings_level[0x8]; u8 reserved_at_20[0x60]; }; struct mlx5_ifc_register_host_endianness_bits { u8 he[0x1]; u8 reserved_at_1[0x1f]; u8 reserved_at_20[0x60]; }; struct mlx5_ifc_umr_pointer_desc_argument_bits { u8 reserved_at_0[0x20]; u8 mkey[0x20]; u8 addressh_63_32[0x20]; u8 addressl_31_0[0x20]; }; struct mlx5_ifc_ud_adrs_vector_bits { u8 dc_key[0x40]; u8 ext[0x1]; u8 reserved_at_41[0x7]; u8 destination_qp_dct[0x18]; u8 static_rate[0x4]; u8 sl_eth_prio[0x4]; u8 fl[0x1]; u8 mlid[0x7]; u8 rlid_udp_sport[0x10]; u8 reserved_at_80[0x20]; u8 rmac_47_16[0x20]; u8 rmac_15_0[0x10]; u8 tclass[0x8]; u8 hop_limit[0x8]; u8 reserved_at_e0[0x1]; u8 grh[0x1]; u8 reserved_at_e2[0x2]; u8 src_addr_index[0x8]; u8 flow_label[0x14]; u8 rgid_rip[16][0x8]; }; struct mlx5_ifc_pages_req_event_bits { u8 reserved_at_0[0x10]; u8 function_id[0x10]; u8 num_pages[0x20]; u8 reserved_at_40[0xa0]; }; struct mlx5_ifc_eqe_bits { u8 reserved_at_0[0x8]; u8 event_type[0x8]; u8 reserved_at_10[0x8]; u8 event_sub_type[0x8]; u8 reserved_at_20[0xe0]; union mlx5_ifc_event_auto_bits event_data; u8 reserved_at_1e0[0x10]; u8 signature[0x8]; u8 reserved_at_1f8[0x7]; u8 owner[0x1]; }; enum { MLX5_CMD_QUEUE_ENTRY_TYPE_PCIE_CMD_IF_TRANSPORT = 0x7, }; struct mlx5_ifc_cmd_queue_entry_bits { u8 type[0x8]; u8 reserved_at_8[0x18]; u8 input_length[0x20]; u8 input_mailbox_pointer_63_32[0x20]; u8 input_mailbox_pointer_31_9[0x17]; u8 reserved_at_77[0x9]; u8 command_input_inline_data[16][0x8]; u8 command_output_inline_data[16][0x8]; u8 output_mailbox_pointer_63_32[0x20]; u8 output_mailbox_pointer_31_9[0x17]; u8 reserved_at_1b7[0x9]; u8 output_length[0x20]; u8 token[0x8]; u8 signature[0x8]; u8 reserved_at_1f0[0x8]; u8 status[0x7]; u8 ownership[0x1]; }; struct mlx5_ifc_cmd_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 command_output[0x20]; }; struct mlx5_ifc_cmd_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 command[0][0x20]; }; struct mlx5_ifc_cmd_if_box_bits { u8 mailbox_data[512][0x8]; u8 reserved_at_1000[0x180]; u8 next_pointer_63_32[0x20]; u8 next_pointer_31_10[0x16]; u8 reserved_at_11b6[0xa]; u8 block_number[0x20]; u8 reserved_at_11e0[0x8]; u8 token[0x8]; u8 ctrl_signature[0x8]; u8 signature[0x8]; }; struct mlx5_ifc_mtt_bits { u8 ptag_63_32[0x20]; u8 ptag_31_8[0x18]; u8 reserved_at_38[0x6]; u8 wr_en[0x1]; u8 rd_en[0x1]; }; struct mlx5_ifc_query_wol_rol_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x10]; u8 rol_mode[0x8]; u8 wol_mode[0x8]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_query_wol_rol_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_set_wol_rol_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_set_wol_rol_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 rol_mode_valid[0x1]; u8 wol_mode_valid[0x1]; u8 reserved_at_42[0xe]; u8 rol_mode[0x8]; u8 wol_mode[0x8]; u8 reserved_at_60[0x20]; }; enum { MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER = 0x0, MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED = 0x1, MLX5_INITIAL_SEG_NIC_INTERFACE_NO_DRAM_NIC = 0x2, }; enum { MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_FULL_DRIVER = 0x0, MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_DISABLED = 0x1, MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_NO_DRAM_NIC = 0x2, }; enum { MLX5_INITIAL_SEG_HEALTH_SYNDROME_FW_INTERNAL_ERR = 0x1, MLX5_INITIAL_SEG_HEALTH_SYNDROME_DEAD_IRISC = 0x7, MLX5_INITIAL_SEG_HEALTH_SYNDROME_HW_FATAL_ERR = 0x8, MLX5_INITIAL_SEG_HEALTH_SYNDROME_FW_CRC_ERR = 0x9, MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_FETCH_PCI_ERR = 0xa, MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PAGE_ERR = 0xb, MLX5_INITIAL_SEG_HEALTH_SYNDROME_ASYNCHRONOUS_EQ_BUF_OVERRUN = 0xc, MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_IN_ERR = 0xd, MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_INV = 0xe, MLX5_INITIAL_SEG_HEALTH_SYNDROME_FFSER_ERR = 0xf, MLX5_INITIAL_SEG_HEALTH_SYNDROME_HIGH_TEMP_ERR = 0x10, }; struct mlx5_ifc_initial_seg_bits { u8 fw_rev_minor[0x10]; u8 fw_rev_major[0x10]; u8 cmd_interface_rev[0x10]; u8 fw_rev_subminor[0x10]; u8 reserved_at_40[0x40]; u8 cmdq_phy_addr_63_32[0x20]; u8 cmdq_phy_addr_31_12[0x14]; u8 reserved_at_b4[0x2]; u8 nic_interface[0x2]; u8 log_cmdq_size[0x4]; u8 log_cmdq_stride[0x4]; u8 command_doorbell_vector[0x20]; u8 reserved_at_e0[0xf00]; u8 initializing[0x1]; u8 reserved_at_fe1[0x4]; u8 nic_interface_supported[0x3]; u8 embedded_cpu[0x1]; u8 reserved_at_fe9[0x17]; struct mlx5_ifc_health_buffer_bits health_buffer; u8 no_dram_nic_offset[0x20]; u8 reserved_at_1220[0x6e40]; u8 reserved_at_8060[0x1f]; u8 clear_int[0x1]; u8 health_syndrome[0x8]; u8 health_counter[0x18]; u8 reserved_at_80a0[0x17fc0]; }; struct mlx5_ifc_mtpps_reg_bits { u8 reserved_at_0[0xc]; u8 cap_number_of_pps_pins[0x4]; u8 reserved_at_10[0x4]; u8 cap_max_num_of_pps_in_pins[0x4]; u8 reserved_at_18[0x4]; u8 cap_max_num_of_pps_out_pins[0x4]; u8 reserved_at_20[0x24]; u8 cap_pin_3_mode[0x4]; u8 reserved_at_48[0x4]; u8 cap_pin_2_mode[0x4]; u8 reserved_at_50[0x4]; u8 cap_pin_1_mode[0x4]; u8 reserved_at_58[0x4]; u8 cap_pin_0_mode[0x4]; u8 reserved_at_60[0x4]; u8 cap_pin_7_mode[0x4]; u8 reserved_at_68[0x4]; u8 cap_pin_6_mode[0x4]; u8 reserved_at_70[0x4]; u8 cap_pin_5_mode[0x4]; u8 reserved_at_78[0x4]; u8 cap_pin_4_mode[0x4]; u8 field_select[0x20]; u8 reserved_at_a0[0x60]; u8 enable[0x1]; u8 reserved_at_101[0xb]; u8 pattern[0x4]; u8 reserved_at_110[0x4]; u8 pin_mode[0x4]; u8 pin[0x8]; u8 reserved_at_120[0x20]; u8 time_stamp[0x40]; u8 out_pulse_duration[0x10]; u8 out_periodic_adjustment[0x10]; u8 enhanced_out_periodic_adjustment[0x20]; u8 reserved_at_1c0[0x20]; }; struct mlx5_ifc_mtppse_reg_bits { u8 reserved_at_0[0x18]; u8 pin[0x8]; u8 event_arm[0x1]; u8 reserved_at_21[0x1b]; u8 event_generation_mode[0x4]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_mcqs_reg_bits { u8 last_index_flag[0x1]; u8 reserved_at_1[0x7]; u8 fw_device[0x8]; u8 component_index[0x10]; u8 reserved_at_20[0x10]; u8 identifier[0x10]; u8 reserved_at_40[0x17]; u8 component_status[0x5]; u8 component_update_state[0x4]; u8 last_update_state_changer_type[0x4]; u8 last_update_state_changer_host_id[0x4]; u8 reserved_at_68[0x18]; }; struct mlx5_ifc_mcqi_cap_bits { u8 supported_info_bitmask[0x20]; u8 component_size[0x20]; u8 max_component_size[0x20]; u8 log_mcda_word_size[0x4]; u8 reserved_at_64[0xc]; u8 mcda_max_write_size[0x10]; u8 rd_en[0x1]; u8 reserved_at_81[0x1]; u8 match_chip_id[0x1]; u8 match_psid[0x1]; u8 check_user_timestamp[0x1]; u8 match_base_guid_mac[0x1]; u8 reserved_at_86[0x1a]; }; struct mlx5_ifc_mcqi_version_bits { u8 reserved_at_0[0x2]; u8 build_time_valid[0x1]; u8 user_defined_time_valid[0x1]; u8 reserved_at_4[0x14]; u8 version_string_length[0x8]; u8 version[0x20]; u8 build_time[0x40]; u8 user_defined_time[0x40]; u8 build_tool_version[0x20]; u8 reserved_at_e0[0x20]; u8 version_string[92][0x8]; }; struct mlx5_ifc_mcqi_activation_method_bits { u8 pending_server_ac_power_cycle[0x1]; u8 pending_server_dc_power_cycle[0x1]; u8 pending_server_reboot[0x1]; u8 pending_fw_reset[0x1]; u8 auto_activate[0x1]; u8 all_hosts_sync[0x1]; u8 device_hw_reset[0x1]; u8 reserved_at_7[0x19]; }; union mlx5_ifc_mcqi_reg_data_bits { struct mlx5_ifc_mcqi_cap_bits mcqi_caps; struct mlx5_ifc_mcqi_version_bits mcqi_version; struct mlx5_ifc_mcqi_activation_method_bits mcqi_activation_mathod; }; struct mlx5_ifc_mcqi_reg_bits { u8 read_pending_component[0x1]; u8 reserved_at_1[0xf]; u8 component_index[0x10]; u8 reserved_at_20[0x20]; u8 reserved_at_40[0x1b]; u8 info_type[0x5]; u8 info_size[0x20]; u8 offset[0x20]; u8 reserved_at_a0[0x10]; u8 data_size[0x10]; union mlx5_ifc_mcqi_reg_data_bits data[0]; }; struct mlx5_ifc_mcc_reg_bits { u8 reserved_at_0[0x4]; u8 time_elapsed_since_last_cmd[0xc]; u8 reserved_at_10[0x8]; u8 instruction[0x8]; u8 reserved_at_20[0x10]; u8 component_index[0x10]; u8 reserved_at_40[0x8]; u8 update_handle[0x18]; u8 handle_owner_type[0x4]; u8 handle_owner_host_id[0x4]; u8 reserved_at_68[0x1]; u8 control_progress[0x7]; u8 error_code[0x8]; u8 reserved_at_78[0x4]; u8 control_state[0x4]; u8 component_size[0x20]; u8 reserved_at_a0[0x60]; }; struct mlx5_ifc_mcda_reg_bits { u8 reserved_at_0[0x8]; u8 update_handle[0x18]; u8 offset[0x20]; u8 reserved_at_40[0x10]; u8 size[0x10]; u8 reserved_at_60[0x20]; u8 data[0][0x20]; }; union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_bufferx_reg_bits bufferx_reg; struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout; struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout; struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout; struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout; struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout; struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout; struct mlx5_ifc_eth_per_tc_prio_grp_data_layout_bits eth_per_tc_prio_grp_data_layout; struct mlx5_ifc_eth_per_tc_congest_prio_grp_data_layout_bits eth_per_tc_congest_prio_grp_data_layout; struct mlx5_ifc_lane_2_module_mapping_bits lane_2_module_mapping; struct mlx5_ifc_pamp_reg_bits pamp_reg; struct mlx5_ifc_paos_reg_bits paos_reg; struct mlx5_ifc_pcap_reg_bits pcap_reg; struct mlx5_ifc_peir_reg_bits peir_reg; struct mlx5_ifc_pelc_reg_bits pelc_reg; struct mlx5_ifc_pfcc_reg_bits pfcc_reg; struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits ib_port_cntrs_grp_data_layout; struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs; struct mlx5_ifc_pifr_reg_bits pifr_reg; struct mlx5_ifc_pipg_reg_bits pipg_reg; struct mlx5_ifc_plbf_reg_bits plbf_reg; struct mlx5_ifc_plib_reg_bits plib_reg; struct mlx5_ifc_plpc_reg_bits plpc_reg; struct mlx5_ifc_pmaos_reg_bits pmaos_reg; struct mlx5_ifc_pmlp_reg_bits pmlp_reg; struct mlx5_ifc_pmlpn_reg_bits pmlpn_reg; struct mlx5_ifc_pmpc_reg_bits pmpc_reg; struct mlx5_ifc_pmpe_reg_bits pmpe_reg; struct mlx5_ifc_pmpr_reg_bits pmpr_reg; struct mlx5_ifc_pmtu_reg_bits pmtu_reg; struct mlx5_ifc_ppad_reg_bits ppad_reg; struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg; struct mlx5_ifc_mpein_reg_bits mpein_reg; struct mlx5_ifc_mpcnt_reg_bits mpcnt_reg; struct mlx5_ifc_pplm_reg_bits pplm_reg; struct mlx5_ifc_pplr_reg_bits pplr_reg; struct mlx5_ifc_ppsc_reg_bits ppsc_reg; struct mlx5_ifc_pqdr_reg_bits pqdr_reg; struct mlx5_ifc_pspa_reg_bits pspa_reg; struct mlx5_ifc_ptas_reg_bits ptas_reg; struct mlx5_ifc_ptys_reg_bits ptys_reg; struct mlx5_ifc_mlcr_reg_bits mlcr_reg; struct mlx5_ifc_pude_reg_bits pude_reg; struct mlx5_ifc_pvlc_reg_bits pvlc_reg; struct mlx5_ifc_slrg_reg_bits slrg_reg; struct mlx5_ifc_sltp_reg_bits sltp_reg; struct mlx5_ifc_mtpps_reg_bits mtpps_reg; struct mlx5_ifc_mtppse_reg_bits mtppse_reg; struct mlx5_ifc_fpga_access_reg_bits fpga_access_reg; struct mlx5_ifc_fpga_ctrl_bits fpga_ctrl_bits; struct mlx5_ifc_fpga_cap_bits fpga_cap_bits; struct mlx5_ifc_mcqi_reg_bits mcqi_reg; struct mlx5_ifc_mcc_reg_bits mcc_reg; struct mlx5_ifc_mcda_reg_bits mcda_reg; u8 reserved_at_0[0x60e0]; }; union mlx5_ifc_debug_enhancements_document_bits { struct mlx5_ifc_health_buffer_bits health_buffer; u8 reserved_at_0[0x200]; }; union mlx5_ifc_uplink_pci_interface_document_bits { struct mlx5_ifc_initial_seg_bits initial_seg; u8 reserved_at_0[0x20060]; }; struct mlx5_ifc_set_flow_table_root_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_set_flow_table_root_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_at_41[0xf]; u8 vport_number[0x10]; u8 reserved_at_60[0x20]; u8 table_type[0x8]; u8 reserved_at_88[0x18]; u8 reserved_at_a0[0x8]; u8 table_id[0x18]; u8 reserved_at_c0[0x8]; u8 underlay_qpn[0x18]; u8 reserved_at_e0[0x120]; }; enum { MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID = (1UL << 0), MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID = (1UL << 15), }; struct mlx5_ifc_modify_flow_table_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_modify_flow_table_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 other_vport[0x1]; u8 reserved_at_41[0xf]; u8 vport_number[0x10]; u8 reserved_at_60[0x10]; u8 modify_field_select[0x10]; u8 table_type[0x8]; u8 reserved_at_88[0x18]; u8 reserved_at_a0[0x8]; u8 table_id[0x18]; struct mlx5_ifc_flow_table_context_bits flow_table_context; }; struct mlx5_ifc_ets_tcn_config_reg_bits { u8 g[0x1]; u8 b[0x1]; u8 r[0x1]; u8 reserved_at_3[0x9]; u8 group[0x4]; u8 reserved_at_10[0x9]; u8 bw_allocation[0x7]; u8 reserved_at_20[0xc]; u8 max_bw_units[0x4]; u8 reserved_at_30[0x8]; u8 max_bw_value[0x8]; }; struct mlx5_ifc_ets_global_config_reg_bits { u8 reserved_at_0[0x2]; u8 r[0x1]; u8 reserved_at_3[0x1d]; u8 reserved_at_20[0xc]; u8 max_bw_units[0x4]; u8 reserved_at_30[0x8]; u8 max_bw_value[0x8]; }; struct mlx5_ifc_qetc_reg_bits { u8 reserved_at_0[0x8]; u8 port_number[0x8]; u8 reserved_at_10[0x30]; struct mlx5_ifc_ets_tcn_config_reg_bits tc_configuration[0x8]; struct mlx5_ifc_ets_global_config_reg_bits global_configuration; }; struct mlx5_ifc_qpdpm_dscp_reg_bits { u8 e[0x1]; u8 reserved_at_01[0x0b]; u8 prio[0x04]; }; struct mlx5_ifc_qpdpm_reg_bits { u8 reserved_at_0[0x8]; u8 local_port[0x8]; u8 reserved_at_10[0x10]; struct mlx5_ifc_qpdpm_dscp_reg_bits dscp[64]; }; struct mlx5_ifc_qpts_reg_bits { u8 reserved_at_0[0x8]; u8 local_port[0x8]; u8 reserved_at_10[0x2d]; u8 trust_state[0x3]; }; struct mlx5_ifc_pptb_reg_bits { u8 reserved_at_0[0x2]; u8 mm[0x2]; u8 reserved_at_4[0x4]; u8 local_port[0x8]; u8 reserved_at_10[0x6]; u8 cm[0x1]; u8 um[0x1]; u8 pm[0x8]; u8 prio_x_buff[0x20]; u8 pm_msb[0x8]; u8 reserved_at_48[0x10]; u8 ctrl_buff[0x4]; u8 untagged_buff[0x4]; }; struct mlx5_ifc_pbmc_reg_bits { u8 reserved_at_0[0x8]; u8 local_port[0x8]; u8 reserved_at_10[0x10]; u8 xoff_timer_value[0x10]; u8 xoff_refresh[0x10]; u8 reserved_at_40[0x9]; u8 fullness_threshold[0x7]; u8 port_buffer_size[0x10]; struct mlx5_ifc_bufferx_reg_bits buffer[10]; u8 reserved_at_2e0[0x80]; }; struct mlx5_ifc_qtct_reg_bits { u8 reserved_at_0[0x8]; u8 port_number[0x8]; u8 reserved_at_10[0xd]; u8 prio[0x3]; u8 reserved_at_20[0x1d]; u8 tclass[0x3]; }; struct mlx5_ifc_mcia_reg_bits { u8 l[0x1]; u8 reserved_at_1[0x7]; u8 module[0x8]; u8 reserved_at_10[0x8]; u8 status[0x8]; u8 i2c_device_address[0x8]; u8 page_number[0x8]; u8 device_address[0x10]; u8 reserved_at_40[0x10]; u8 size[0x10]; u8 reserved_at_60[0x20]; u8 dword_0[0x20]; u8 dword_1[0x20]; u8 dword_2[0x20]; u8 dword_3[0x20]; u8 dword_4[0x20]; u8 dword_5[0x20]; u8 dword_6[0x20]; u8 dword_7[0x20]; u8 dword_8[0x20]; u8 dword_9[0x20]; u8 dword_10[0x20]; u8 dword_11[0x20]; }; struct mlx5_ifc_dcbx_param_bits { u8 dcbx_cee_cap[0x1]; u8 dcbx_ieee_cap[0x1]; u8 dcbx_standby_cap[0x1]; u8 reserved_at_3[0x5]; u8 port_number[0x8]; u8 reserved_at_10[0xa]; u8 max_application_table_size[6]; u8 reserved_at_20[0x15]; u8 version_oper[0x3]; u8 reserved_at_38[5]; u8 version_admin[0x3]; u8 willing_admin[0x1]; u8 reserved_at_41[0x3]; u8 pfc_cap_oper[0x4]; u8 reserved_at_48[0x4]; u8 pfc_cap_admin[0x4]; u8 reserved_at_50[0x4]; u8 num_of_tc_oper[0x4]; u8 reserved_at_58[0x4]; u8 num_of_tc_admin[0x4]; u8 remote_willing[0x1]; u8 reserved_at_61[3]; u8 remote_pfc_cap[4]; u8 reserved_at_68[0x14]; u8 remote_num_of_tc[0x4]; u8 reserved_at_80[0x18]; u8 error[0x8]; u8 reserved_at_a0[0x160]; }; struct mlx5_ifc_lagc_bits { u8 reserved_at_0[0x1d]; u8 lag_state[0x3]; u8 reserved_at_20[0x14]; u8 tx_remap_affinity_2[0x4]; u8 reserved_at_38[0x4]; u8 tx_remap_affinity_1[0x4]; }; struct mlx5_ifc_create_lag_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_create_lag_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; struct mlx5_ifc_lagc_bits ctx; }; struct mlx5_ifc_modify_lag_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_modify_lag_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x20]; u8 field_select[0x20]; struct mlx5_ifc_lagc_bits ctx; }; struct mlx5_ifc_query_lag_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; struct mlx5_ifc_lagc_bits ctx; }; struct mlx5_ifc_query_lag_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_destroy_lag_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_destroy_lag_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_create_vport_lag_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_create_vport_lag_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_destroy_vport_lag_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_destroy_vport_lag_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_alloc_memic_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_30[0x20]; u8 reserved_at_40[0x18]; u8 log_memic_addr_alignment[0x8]; u8 range_start_addr[0x40]; u8 range_size[0x20]; u8 memic_size[0x20]; }; struct mlx5_ifc_alloc_memic_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 memic_start_addr[0x40]; }; struct mlx5_ifc_dealloc_memic_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; u8 memic_start_addr[0x40]; u8 memic_size[0x20]; u8 reserved_at_e0[0x20]; }; struct mlx5_ifc_dealloc_memic_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_general_obj_in_cmd_hdr_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 vhca_tunnel_id[0x10]; u8 obj_type[0x10]; u8 obj_id[0x20]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_general_obj_out_cmd_hdr_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 obj_id[0x20]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_umem_bits { u8 reserved_at_0[0x80]; u8 ats[0x1]; u8 reserved_at_81[0x1a]; u8 log_page_size[0x5]; u8 page_offset[0x20]; u8 num_of_mtt[0x40]; struct mlx5_ifc_mtt_bits mtt[0]; }; struct mlx5_ifc_uctx_bits { u8 cap[0x20]; u8 reserved_at_20[0x160]; }; struct mlx5_ifc_sw_icm_bits { u8 modify_field_select[0x40]; u8 reserved_at_40[0x18]; u8 log_sw_icm_size[0x8]; u8 reserved_at_60[0x20]; u8 sw_icm_start_addr[0x40]; u8 reserved_at_c0[0x140]; }; struct mlx5_ifc_geneve_tlv_option_bits { u8 modify_field_select[0x40]; u8 reserved_at_40[0x18]; u8 geneve_option_fte_index[0x8]; u8 option_class[0x10]; u8 option_type[0x8]; u8 reserved_at_78[0x3]; u8 option_data_length[0x5]; u8 reserved_at_80[0x180]; }; struct mlx5_ifc_create_umem_in_bits { u8 opcode[0x10]; u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; struct mlx5_ifc_umem_bits umem; }; struct mlx5_ifc_create_uctx_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; struct mlx5_ifc_uctx_bits uctx; }; struct mlx5_ifc_destroy_uctx_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x10]; u8 uid[0x10]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_create_sw_icm_in_bits { struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; struct mlx5_ifc_sw_icm_bits sw_icm; }; struct mlx5_ifc_create_geneve_tlv_option_in_bits { struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; struct mlx5_ifc_geneve_tlv_option_bits geneve_tlv_opt; }; struct mlx5_ifc_mtrc_string_db_param_bits { u8 string_db_base_address[0x20]; u8 reserved_at_20[0x8]; u8 string_db_size[0x18]; }; struct mlx5_ifc_mtrc_cap_bits { u8 trace_owner[0x1]; u8 trace_to_memory[0x1]; u8 reserved_at_2[0x4]; u8 trc_ver[0x2]; u8 reserved_at_8[0x14]; u8 num_string_db[0x4]; u8 first_string_trace[0x8]; u8 num_string_trace[0x8]; u8 reserved_at_30[0x28]; u8 log_max_trace_buffer_size[0x8]; u8 reserved_at_60[0x20]; struct mlx5_ifc_mtrc_string_db_param_bits string_db_param[8]; u8 reserved_at_280[0x180]; }; struct mlx5_ifc_mtrc_conf_bits { u8 reserved_at_0[0x1c]; u8 trace_mode[0x4]; u8 reserved_at_20[0x18]; u8 log_trace_buffer_size[0x8]; u8 trace_mkey[0x20]; u8 reserved_at_60[0x3a0]; }; struct mlx5_ifc_mtrc_stdb_bits { u8 string_db_index[0x4]; u8 reserved_at_4[0x4]; u8 read_size[0x18]; u8 start_offset[0x20]; u8 string_db_data[0]; }; struct mlx5_ifc_mtrc_ctrl_bits { u8 trace_status[0x2]; u8 reserved_at_2[0x2]; u8 arm_event[0x1]; u8 reserved_at_5[0xb]; u8 modify_field_select[0x10]; u8 reserved_at_20[0x2b]; u8 current_timestamp52_32[0x15]; u8 current_timestamp31_0[0x20]; u8 reserved_at_80[0x180]; }; struct mlx5_ifc_host_params_context_bits { u8 host_number[0x8]; u8 reserved_at_8[0x7]; u8 host_pf_disabled[0x1]; u8 host_num_of_vfs[0x10]; u8 host_total_vfs[0x10]; u8 host_pci_bus[0x10]; u8 reserved_at_40[0x10]; u8 host_pci_device[0x10]; u8 reserved_at_60[0x10]; u8 host_pci_function[0x10]; u8 reserved_at_80[0x180]; }; struct mlx5_ifc_query_esw_functions_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_query_esw_functions_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; struct mlx5_ifc_host_params_context_bits host_params_context; u8 reserved_at_280[0x180]; u8 host_sf_enable[0][0x40]; }; struct mlx5_ifc_sf_partition_bits { u8 reserved_at_0[0x10]; u8 log_num_sf[0x8]; u8 log_sf_bar_size[0x8]; }; struct mlx5_ifc_query_sf_partitions_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x18]; u8 num_sf_partitions[0x8]; u8 reserved_at_60[0x20]; struct mlx5_ifc_sf_partition_bits sf_partition[0]; }; struct mlx5_ifc_query_sf_partitions_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_dealloc_sf_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_dealloc_sf_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x10]; u8 function_id[0x10]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_alloc_sf_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_alloc_sf_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x10]; u8 function_id[0x10]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_affiliated_event_header_bits { u8 reserved_at_0[0x10]; u8 obj_type[0x10]; u8 obj_id[0x20]; }; enum { MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = BIT(0xc), }; enum { MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = 0xc, }; struct mlx5_ifc_encryption_key_obj_bits { u8 modify_field_select[0x40]; u8 reserved_at_40[0x14]; u8 key_size[0x4]; u8 reserved_at_58[0x4]; u8 key_type[0x4]; u8 reserved_at_60[0x8]; u8 pd[0x18]; u8 reserved_at_80[0x180]; u8 key[8][0x20]; u8 reserved_at_300[0x500]; }; struct mlx5_ifc_create_encryption_key_in_bits { struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr; struct mlx5_ifc_encryption_key_obj_bits encryption_key_object; }; enum { MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128 = 0x0, MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_256 = 0x1, }; enum { MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_DEK = 0x1, }; struct mlx5_ifc_tls_static_params_bits { u8 const_2[0x2]; u8 tls_version[0x4]; u8 const_1[0x2]; u8 reserved_at_8[0x14]; u8 encryption_standard[0x4]; u8 reserved_at_20[0x20]; u8 initial_record_number[0x40]; u8 resync_tcp_sn[0x20]; u8 gcm_iv[0x20]; u8 implicit_iv[0x40]; u8 reserved_at_100[0x8]; u8 dek_index[0x18]; u8 reserved_at_120[0xe0]; }; struct mlx5_ifc_tls_progress_params_bits { u8 reserved_at_0[0x8]; u8 tisn[0x18]; u8 next_record_tcp_sn[0x20]; u8 hw_resync_tcp_sn[0x20]; u8 record_tracker_state[0x2]; u8 auth_state[0x2]; u8 reserved_at_64[0x4]; u8 hw_offset_record_number[0x18]; }; #endif /* MLX5_IFC_H */ mlx5/device.h 0000644 00000102534 14722070374 0007054 0 ustar 00 /* * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef MLX5_DEVICE_H #define MLX5_DEVICE_H #include <linux/types.h> #include <rdma/ib_verbs.h> #include <linux/mlx5/mlx5_ifc.h> #if defined(__LITTLE_ENDIAN) #define MLX5_SET_HOST_ENDIANNESS 0 #elif defined(__BIG_ENDIAN) #define MLX5_SET_HOST_ENDIANNESS 0x80 #else #error Host endianness not defined #endif /* helper macros */ #define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0) #define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld) #define __mlx5_bit_off(typ, fld) (offsetof(struct mlx5_ifc_##typ##_bits, fld)) #define __mlx5_16_off(typ, fld) (__mlx5_bit_off(typ, fld) / 16) #define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32) #define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64) #define __mlx5_16_bit_off(typ, fld) (16 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0xf)) #define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f)) #define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1)) #define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld)) #define __mlx5_mask16(typ, fld) ((u16)((1ull << __mlx5_bit_sz(typ, fld)) - 1)) #define __mlx5_16_mask(typ, fld) (__mlx5_mask16(typ, fld) << __mlx5_16_bit_off(typ, fld)) #define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits) #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8) #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8) #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32) #define MLX5_ST_SZ_QW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 64) #define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8) #define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32) #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8) #define MLX5_ADDR_OF(typ, p, fld) ((void *)((uint8_t *)(p) + MLX5_BYTE_OFF(typ, fld))) /* insert a value to a struct */ #define MLX5_SET(typ, p, fld, v) do { \ u32 _v = v; \ BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \ *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \ cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \ (~__mlx5_dw_mask(typ, fld))) | (((_v) & __mlx5_mask(typ, fld)) \ << __mlx5_dw_bit_off(typ, fld))); \ } while (0) #define MLX5_ARRAY_SET(typ, p, fld, idx, v) do { \ BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 32); \ MLX5_SET(typ, p, fld[idx], v); \ } while (0) #define MLX5_SET_TO_ONES(typ, p, fld) do { \ BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \ *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \ cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \ (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \ << __mlx5_dw_bit_off(typ, fld))); \ } while (0) #define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\ __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \ __mlx5_mask(typ, fld)) #define MLX5_GET_PR(typ, p, fld) ({ \ u32 ___t = MLX5_GET(typ, p, fld); \ pr_debug(#fld " = 0x%x\n", ___t); \ ___t; \ }) #define __MLX5_SET64(typ, p, fld, v) do { \ BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \ *((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \ } while (0) #define MLX5_SET64(typ, p, fld, v) do { \ BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \ __MLX5_SET64(typ, p, fld, v); \ } while (0) #define MLX5_ARRAY_SET64(typ, p, fld, idx, v) do { \ BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \ __MLX5_SET64(typ, p, fld[idx], v); \ } while (0) #define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld))) #define MLX5_GET64_PR(typ, p, fld) ({ \ u64 ___t = MLX5_GET64(typ, p, fld); \ pr_debug(#fld " = 0x%llx\n", ___t); \ ___t; \ }) #define MLX5_GET16(typ, p, fld) ((be16_to_cpu(*((__be16 *)(p) +\ __mlx5_16_off(typ, fld))) >> __mlx5_16_bit_off(typ, fld)) & \ __mlx5_mask16(typ, fld)) #define MLX5_SET16(typ, p, fld, v) do { \ u16 _v = v; \ BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 16); \ *((__be16 *)(p) + __mlx5_16_off(typ, fld)) = \ cpu_to_be16((be16_to_cpu(*((__be16 *)(p) + __mlx5_16_off(typ, fld))) & \ (~__mlx5_16_mask(typ, fld))) | (((_v) & __mlx5_mask16(typ, fld)) \ << __mlx5_16_bit_off(typ, fld))); \ } while (0) /* Big endian getters */ #define MLX5_GET64_BE(typ, p, fld) (*((__be64 *)(p) +\ __mlx5_64_off(typ, fld))) #define MLX5_GET_BE(type_t, typ, p, fld) ({ \ type_t tmp; \ switch (sizeof(tmp)) { \ case sizeof(u8): \ tmp = (__force type_t)MLX5_GET(typ, p, fld); \ break; \ case sizeof(u16): \ tmp = (__force type_t)cpu_to_be16(MLX5_GET(typ, p, fld)); \ break; \ case sizeof(u32): \ tmp = (__force type_t)cpu_to_be32(MLX5_GET(typ, p, fld)); \ break; \ case sizeof(u64): \ tmp = (__force type_t)MLX5_GET64_BE(typ, p, fld); \ break; \ } \ tmp; \ }) enum mlx5_inline_modes { MLX5_INLINE_MODE_NONE, MLX5_INLINE_MODE_L2, MLX5_INLINE_MODE_IP, MLX5_INLINE_MODE_TCP_UDP, }; enum { MLX5_MAX_COMMANDS = 32, MLX5_CMD_DATA_BLOCK_SIZE = 512, MLX5_PCI_CMD_XPORT = 7, MLX5_MKEY_BSF_OCTO_SIZE = 4, MLX5_MAX_PSVS = 4, }; enum { MLX5_EXTENDED_UD_AV = 0x80000000, }; enum { MLX5_CQ_STATE_ARMED = 9, MLX5_CQ_STATE_ALWAYS_ARMED = 0xb, MLX5_CQ_STATE_FIRED = 0xa, }; enum { MLX5_STAT_RATE_OFFSET = 5, }; enum { MLX5_INLINE_SEG = 0x80000000, }; enum { MLX5_HW_START_PADDING = MLX5_INLINE_SEG, }; enum { MLX5_MIN_PKEY_TABLE_SIZE = 128, MLX5_MAX_LOG_PKEY_TABLE = 5, }; enum { MLX5_MKEY_INBOX_PG_ACCESS = 1 << 31 }; enum { MLX5_PFAULT_SUBTYPE_WQE = 0, MLX5_PFAULT_SUBTYPE_RDMA = 1, }; enum wqe_page_fault_type { MLX5_WQE_PF_TYPE_RMP = 0, MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE = 1, MLX5_WQE_PF_TYPE_RESP = 2, MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC = 3, }; enum { MLX5_PERM_LOCAL_READ = 1 << 2, MLX5_PERM_LOCAL_WRITE = 1 << 3, MLX5_PERM_REMOTE_READ = 1 << 4, MLX5_PERM_REMOTE_WRITE = 1 << 5, MLX5_PERM_ATOMIC = 1 << 6, MLX5_PERM_UMR_EN = 1 << 7, }; enum { MLX5_PCIE_CTRL_SMALL_FENCE = 1 << 0, MLX5_PCIE_CTRL_RELAXED_ORDERING = 1 << 2, MLX5_PCIE_CTRL_NO_SNOOP = 1 << 3, MLX5_PCIE_CTRL_TLP_PROCE_EN = 1 << 6, MLX5_PCIE_CTRL_TPH_MASK = 3 << 4, }; enum { MLX5_EN_RD = (u64)1, MLX5_EN_WR = (u64)2 }; enum { MLX5_ADAPTER_PAGE_SHIFT = 12, MLX5_ADAPTER_PAGE_SIZE = 1 << MLX5_ADAPTER_PAGE_SHIFT, }; enum { MLX5_BFREGS_PER_UAR = 4, MLX5_MAX_UARS = 1 << 8, MLX5_NON_FP_BFREGS_PER_UAR = 2, MLX5_FP_BFREGS_PER_UAR = MLX5_BFREGS_PER_UAR - MLX5_NON_FP_BFREGS_PER_UAR, MLX5_MAX_BFREGS = MLX5_MAX_UARS * MLX5_NON_FP_BFREGS_PER_UAR, MLX5_UARS_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE, MLX5_NON_FP_BFREGS_IN_PAGE = MLX5_NON_FP_BFREGS_PER_UAR * MLX5_UARS_IN_PAGE, MLX5_MIN_DYN_BFREGS = 512, MLX5_MAX_DYN_BFREGS = 1024, }; enum { MLX5_MKEY_MASK_LEN = 1ull << 0, MLX5_MKEY_MASK_PAGE_SIZE = 1ull << 1, MLX5_MKEY_MASK_START_ADDR = 1ull << 6, MLX5_MKEY_MASK_PD = 1ull << 7, MLX5_MKEY_MASK_EN_RINVAL = 1ull << 8, MLX5_MKEY_MASK_EN_SIGERR = 1ull << 9, MLX5_MKEY_MASK_BSF_EN = 1ull << 12, MLX5_MKEY_MASK_KEY = 1ull << 13, MLX5_MKEY_MASK_QPN = 1ull << 14, MLX5_MKEY_MASK_LR = 1ull << 17, MLX5_MKEY_MASK_LW = 1ull << 18, MLX5_MKEY_MASK_RR = 1ull << 19, MLX5_MKEY_MASK_RW = 1ull << 20, MLX5_MKEY_MASK_A = 1ull << 21, MLX5_MKEY_MASK_SMALL_FENCE = 1ull << 23, MLX5_MKEY_MASK_FREE = 1ull << 29, }; enum { MLX5_UMR_TRANSLATION_OFFSET_EN = (1 << 4), MLX5_UMR_CHECK_NOT_FREE = (1 << 5), MLX5_UMR_CHECK_FREE = (2 << 5), MLX5_UMR_INLINE = (1 << 7), }; #define MLX5_UMR_MTT_ALIGNMENT 0x40 #define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1) #define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT #define MLX5_USER_INDEX_LEN (MLX5_FLD_SZ_BYTES(qpc, user_index) * 8) enum { MLX5_EVENT_QUEUE_TYPE_QP = 0, MLX5_EVENT_QUEUE_TYPE_RQ = 1, MLX5_EVENT_QUEUE_TYPE_SQ = 2, MLX5_EVENT_QUEUE_TYPE_DCT = 6, }; /* mlx5 components can subscribe to any one of these events via * mlx5_eq_notifier_register API. */ enum mlx5_event { /* Special value to subscribe to any event */ MLX5_EVENT_TYPE_NOTIFY_ANY = 0x0, /* HW events enum start: comp events are not subscribable */ MLX5_EVENT_TYPE_COMP = 0x0, /* HW Async events enum start: subscribable events */ MLX5_EVENT_TYPE_PATH_MIG = 0x01, MLX5_EVENT_TYPE_COMM_EST = 0x02, MLX5_EVENT_TYPE_SQ_DRAINED = 0x03, MLX5_EVENT_TYPE_SRQ_LAST_WQE = 0x13, MLX5_EVENT_TYPE_SRQ_RQ_LIMIT = 0x14, MLX5_EVENT_TYPE_CQ_ERROR = 0x04, MLX5_EVENT_TYPE_WQ_CATAS_ERROR = 0x05, MLX5_EVENT_TYPE_PATH_MIG_FAILED = 0x07, MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10, MLX5_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11, MLX5_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12, MLX5_EVENT_TYPE_INTERNAL_ERROR = 0x08, MLX5_EVENT_TYPE_PORT_CHANGE = 0x09, MLX5_EVENT_TYPE_GPIO_EVENT = 0x15, MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16, MLX5_EVENT_TYPE_TEMP_WARN_EVENT = 0x17, MLX5_EVENT_TYPE_XRQ_ERROR = 0x18, MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19, MLX5_EVENT_TYPE_GENERAL_EVENT = 0x22, MLX5_EVENT_TYPE_MONITOR_COUNTER = 0x24, MLX5_EVENT_TYPE_PPS_EVENT = 0x25, MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a, MLX5_EVENT_TYPE_STALL_EVENT = 0x1b, MLX5_EVENT_TYPE_CMD = 0x0a, MLX5_EVENT_TYPE_PAGE_REQUEST = 0xb, MLX5_EVENT_TYPE_PAGE_FAULT = 0xc, MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd, MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED = 0xe, MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c, MLX5_EVENT_TYPE_DCT_KEY_VIOLATION = 0x1d, MLX5_EVENT_TYPE_FPGA_ERROR = 0x20, MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21, MLX5_EVENT_TYPE_DEVICE_TRACER = 0x26, MLX5_EVENT_TYPE_MAX = 0x100, }; enum { MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE = 0x0, MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE = 0x1, }; enum { MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT = 0x1, MLX5_GENERAL_SUBTYPE_PCI_POWER_CHANGE_EVENT = 0x5, }; enum { MLX5_PORT_CHANGE_SUBTYPE_DOWN = 1, MLX5_PORT_CHANGE_SUBTYPE_ACTIVE = 4, MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED = 5, MLX5_PORT_CHANGE_SUBTYPE_LID = 6, MLX5_PORT_CHANGE_SUBTYPE_PKEY = 7, MLX5_PORT_CHANGE_SUBTYPE_GUID = 8, MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG = 9, }; enum { MLX5_DEV_CAP_FLAG_XRC = 1LL << 3, MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8, MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9, MLX5_DEV_CAP_FLAG_APM = 1LL << 17, MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 18, MLX5_DEV_CAP_FLAG_BLOCK_MCAST = 1LL << 23, MLX5_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24, MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29, MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30, MLX5_DEV_CAP_FLAG_DCT = 1LL << 37, MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40, MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46, }; enum { MLX5_ROCE_VERSION_1 = 0, MLX5_ROCE_VERSION_2 = 2, }; enum { MLX5_ROCE_VERSION_1_CAP = 1 << MLX5_ROCE_VERSION_1, MLX5_ROCE_VERSION_2_CAP = 1 << MLX5_ROCE_VERSION_2, }; enum { MLX5_ROCE_L3_TYPE_IPV4 = 0, MLX5_ROCE_L3_TYPE_IPV6 = 1, }; enum { MLX5_ROCE_L3_TYPE_IPV4_CAP = 1 << 1, MLX5_ROCE_L3_TYPE_IPV6_CAP = 1 << 2, }; enum { MLX5_OPCODE_NOP = 0x00, MLX5_OPCODE_SEND_INVAL = 0x01, MLX5_OPCODE_RDMA_WRITE = 0x08, MLX5_OPCODE_RDMA_WRITE_IMM = 0x09, MLX5_OPCODE_SEND = 0x0a, MLX5_OPCODE_SEND_IMM = 0x0b, MLX5_OPCODE_LSO = 0x0e, MLX5_OPCODE_RDMA_READ = 0x10, MLX5_OPCODE_ATOMIC_CS = 0x11, MLX5_OPCODE_ATOMIC_FA = 0x12, MLX5_OPCODE_ATOMIC_MASKED_CS = 0x14, MLX5_OPCODE_ATOMIC_MASKED_FA = 0x15, MLX5_OPCODE_BIND_MW = 0x18, MLX5_OPCODE_CONFIG_CMD = 0x1f, MLX5_OPCODE_ENHANCED_MPSW = 0x29, MLX5_RECV_OPCODE_RDMA_WRITE_IMM = 0x00, MLX5_RECV_OPCODE_SEND = 0x01, MLX5_RECV_OPCODE_SEND_IMM = 0x02, MLX5_RECV_OPCODE_SEND_INVAL = 0x03, MLX5_CQE_OPCODE_ERROR = 0x1e, MLX5_CQE_OPCODE_RESIZE = 0x16, MLX5_OPCODE_SET_PSV = 0x20, MLX5_OPCODE_GET_PSV = 0x21, MLX5_OPCODE_CHECK_PSV = 0x22, MLX5_OPCODE_DUMP = 0x23, MLX5_OPCODE_RGET_PSV = 0x26, MLX5_OPCODE_RCHECK_PSV = 0x27, MLX5_OPCODE_UMR = 0x25, }; enum { MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x1, }; enum { MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x1, }; enum { MLX5_SET_PORT_RESET_QKEY = 0, MLX5_SET_PORT_GUID0 = 16, MLX5_SET_PORT_NODE_GUID = 17, MLX5_SET_PORT_SYS_GUID = 18, MLX5_SET_PORT_GID_TABLE = 19, MLX5_SET_PORT_PKEY_TABLE = 20, }; enum { MLX5_BW_NO_LIMIT = 0, MLX5_100_MBPS_UNIT = 3, MLX5_GBPS_UNIT = 4, }; enum { MLX5_MAX_PAGE_SHIFT = 31 }; enum { MLX5_CAP_OFF_CMDIF_CSUM = 46, }; enum { /* * Max wqe size for rdma read is 512 bytes, so this * limits our max_sge_rd as the wqe needs to fit: * - ctrl segment (16 bytes) * - rdma segment (16 bytes) * - scatter elements (16 bytes each) */ MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16 }; enum mlx5_odp_transport_cap_bits { MLX5_ODP_SUPPORT_SEND = 1 << 31, MLX5_ODP_SUPPORT_RECV = 1 << 30, MLX5_ODP_SUPPORT_WRITE = 1 << 29, MLX5_ODP_SUPPORT_READ = 1 << 28, }; struct mlx5_odp_caps { char reserved[0x10]; struct { __be32 rc_odp_caps; __be32 uc_odp_caps; __be32 ud_odp_caps; } per_transport_caps; char reserved2[0xe4]; }; struct mlx5_cmd_layout { u8 type; u8 rsvd0[3]; __be32 inlen; __be64 in_ptr; __be32 in[4]; __be32 out[4]; __be64 out_ptr; __be32 outlen; u8 token; u8 sig; u8 rsvd1; u8 status_own; }; enum mlx5_fatal_assert_bit_offsets { MLX5_RFR_OFFSET = 31, }; struct health_buffer { __be32 assert_var[5]; __be32 rsvd0[3]; __be32 assert_exit_ptr; __be32 assert_callra; __be32 rsvd1[2]; __be32 fw_ver; __be32 hw_id; __be32 rfr; u8 irisc_index; u8 synd; __be16 ext_synd; }; enum mlx5_initializing_bit_offsets { MLX5_FW_RESET_SUPPORTED_OFFSET = 30, }; enum mlx5_cmd_addr_l_sz_offset { MLX5_NIC_IFC_OFFSET = 8, }; struct mlx5_init_seg { __be32 fw_rev; __be32 cmdif_rev_fw_sub; __be32 rsvd0[2]; __be32 cmdq_addr_h; __be32 cmdq_addr_l_sz; __be32 cmd_dbell; __be32 rsvd1[120]; __be32 initializing; struct health_buffer health; __be32 rsvd2[880]; __be32 internal_timer_h; __be32 internal_timer_l; __be32 rsvd3[2]; __be32 health_counter; __be32 rsvd4[1019]; __be64 ieee1588_clk; __be32 ieee1588_clk_type; __be32 clr_intx; }; struct mlx5_eqe_comp { __be32 reserved[6]; __be32 cqn; }; struct mlx5_eqe_qp_srq { __be32 reserved1[5]; u8 type; u8 reserved2[3]; __be32 qp_srq_n; }; struct mlx5_eqe_cq_err { __be32 cqn; u8 reserved1[7]; u8 syndrome; }; struct mlx5_eqe_xrq_err { __be32 reserved1[5]; __be32 type_xrqn; __be32 reserved2; }; struct mlx5_eqe_port_state { u8 reserved0[8]; u8 port; }; struct mlx5_eqe_gpio { __be32 reserved0[2]; __be64 gpio_event; }; struct mlx5_eqe_congestion { u8 type; u8 rsvd0; u8 congestion_level; }; struct mlx5_eqe_stall_vl { u8 rsvd0[3]; u8 port_vl; }; struct mlx5_eqe_cmd { __be32 vector; __be32 rsvd[6]; }; struct mlx5_eqe_page_req { __be16 ec_function; __be16 func_id; __be32 num_pages; __be32 rsvd1[5]; }; struct mlx5_eqe_page_fault { __be32 bytes_committed; union { struct { u16 reserved1; __be16 wqe_index; u16 reserved2; __be16 packet_length; __be32 token; u8 reserved4[8]; __be32 pftype_wq; } __packed wqe; struct { __be32 r_key; u16 reserved1; __be16 packet_length; __be32 rdma_op_len; __be64 rdma_va; __be32 pftype_token; } __packed rdma; } __packed; } __packed; struct mlx5_eqe_vport_change { u8 rsvd0[2]; __be16 vport_num; __be32 rsvd1[6]; } __packed; struct mlx5_eqe_port_module { u8 reserved_at_0[1]; u8 module; u8 reserved_at_2[1]; u8 module_status; u8 reserved_at_4[2]; u8 error_type; } __packed; struct mlx5_eqe_pps { u8 rsvd0[3]; u8 pin; u8 rsvd1[4]; union { struct { __be32 time_sec; __be32 time_nsec; }; struct { __be64 time_stamp; }; }; u8 rsvd2[12]; } __packed; struct mlx5_eqe_dct { __be32 reserved[6]; __be32 dctn; }; struct mlx5_eqe_temp_warning { __be64 sensor_warning_msb; __be64 sensor_warning_lsb; } __packed; union ev_data { __be32 raw[7]; struct mlx5_eqe_cmd cmd; struct mlx5_eqe_comp comp; struct mlx5_eqe_qp_srq qp_srq; struct mlx5_eqe_cq_err cq_err; struct mlx5_eqe_port_state port; struct mlx5_eqe_gpio gpio; struct mlx5_eqe_congestion cong; struct mlx5_eqe_stall_vl stall_vl; struct mlx5_eqe_page_req req_pages; struct mlx5_eqe_page_fault page_fault; struct mlx5_eqe_vport_change vport_change; struct mlx5_eqe_port_module port_module; struct mlx5_eqe_pps pps; struct mlx5_eqe_dct dct; struct mlx5_eqe_temp_warning temp_warning; struct mlx5_eqe_xrq_err xrq_err; } __packed; struct mlx5_eqe { u8 rsvd0; u8 type; u8 rsvd1; u8 sub_type; __be32 rsvd2[7]; union ev_data data; __be16 rsvd3; u8 signature; u8 owner; } __packed; struct mlx5_cmd_prot_block { u8 data[MLX5_CMD_DATA_BLOCK_SIZE]; u8 rsvd0[48]; __be64 next; __be32 block_num; u8 rsvd1; u8 token; u8 ctrl_sig; u8 sig; }; enum { MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5, }; struct mlx5_err_cqe { u8 rsvd0[32]; __be32 srqn; u8 rsvd1[18]; u8 vendor_err_synd; u8 syndrome; __be32 s_wqe_opcode_qpn; __be16 wqe_counter; u8 signature; u8 op_own; }; struct mlx5_cqe64 { u8 outer_l3_tunneled; u8 rsvd0; __be16 wqe_id; u8 lro_tcppsh_abort_dupack; u8 lro_min_ttl; __be16 lro_tcp_win; __be32 lro_ack_seq_num; __be32 rss_hash_result; u8 rss_hash_type; u8 ml_path; u8 rsvd20[2]; __be16 check_sum; __be16 slid; __be32 flags_rqpn; u8 hds_ip_ext; u8 l4_l3_hdr_type; __be16 vlan_info; __be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */ __be32 imm_inval_pkey; u8 rsvd40[4]; __be32 byte_cnt; __be32 timestamp_h; __be32 timestamp_l; __be32 sop_drop_qpn; __be16 wqe_counter; u8 signature; u8 op_own; }; struct mlx5_mini_cqe8 { union { __be32 rx_hash_result; struct { __be16 checksum; __be16 rsvd; }; struct { __be16 wqe_counter; u8 s_wqe_opcode; u8 reserved; } s_wqe_info; }; __be32 byte_cnt; }; enum { MLX5_NO_INLINE_DATA, MLX5_INLINE_DATA32_SEG, MLX5_INLINE_DATA64_SEG, MLX5_COMPRESSED, }; enum { MLX5_CQE_FORMAT_CSUM = 0x1, }; #define MLX5_MINI_CQE_ARRAY_SIZE 8 static inline u8 mlx5_get_cqe_format(struct mlx5_cqe64 *cqe) { return (cqe->op_own >> 2) & 0x3; } static inline u8 get_cqe_opcode(struct mlx5_cqe64 *cqe) { return cqe->op_own >> 4; } static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe) { return (cqe->lro_tcppsh_abort_dupack >> 6) & 1; } static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe) { return (cqe->l4_l3_hdr_type >> 4) & 0x7; } static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe) { return (cqe->l4_l3_hdr_type >> 2) & 0x3; } static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe) { return cqe->outer_l3_tunneled & 0x1; } static inline bool cqe_has_vlan(struct mlx5_cqe64 *cqe) { return cqe->l4_l3_hdr_type & 0x1; } static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe) { u32 hi, lo; hi = be32_to_cpu(cqe->timestamp_h); lo = be32_to_cpu(cqe->timestamp_l); return (u64)lo | ((u64)hi << 32); } #define MLX5_MPWQE_LOG_NUM_STRIDES_BASE (9) #define MLX5_MPWQE_LOG_STRIDE_SZ_BASE (6) struct mpwrq_cqe_bc { __be16 filler_consumed_strides; __be16 byte_cnt; }; static inline u16 mpwrq_get_cqe_byte_cnt(struct mlx5_cqe64 *cqe) { struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt; return be16_to_cpu(bc->byte_cnt); } static inline u16 mpwrq_get_cqe_bc_consumed_strides(struct mpwrq_cqe_bc *bc) { return 0x7fff & be16_to_cpu(bc->filler_consumed_strides); } static inline u16 mpwrq_get_cqe_consumed_strides(struct mlx5_cqe64 *cqe) { struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt; return mpwrq_get_cqe_bc_consumed_strides(bc); } static inline bool mpwrq_is_filler_cqe(struct mlx5_cqe64 *cqe) { struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt; return 0x8000 & be16_to_cpu(bc->filler_consumed_strides); } static inline u16 mpwrq_get_cqe_stride_index(struct mlx5_cqe64 *cqe) { return be16_to_cpu(cqe->wqe_counter); } enum { CQE_L4_HDR_TYPE_NONE = 0x0, CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1, CQE_L4_HDR_TYPE_UDP = 0x2, CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA = 0x3, CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA = 0x4, }; enum { CQE_RSS_HTYPE_IP = 0x3 << 2, /* cqe->rss_hash_type[3:2] - IP destination selected for hash * (00 = none, 01 = IPv4, 10 = IPv6, 11 = Reserved) */ CQE_RSS_HTYPE_L4 = 0x3 << 6, /* cqe->rss_hash_type[7:6] - L4 destination selected for hash * (00 = none, 01 = TCP. 10 = UDP, 11 = IPSEC.SPI */ }; enum { MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH = 0x0, MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6 = 0x1, MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4 = 0x2, }; enum { CQE_L2_OK = 1 << 0, CQE_L3_OK = 1 << 1, CQE_L4_OK = 1 << 2, }; struct mlx5_sig_err_cqe { u8 rsvd0[16]; __be32 expected_trans_sig; __be32 actual_trans_sig; __be32 expected_reftag; __be32 actual_reftag; __be16 syndrome; u8 rsvd22[2]; __be32 mkey; __be64 err_offset; u8 rsvd30[8]; __be32 qpn; u8 rsvd38[2]; u8 signature; u8 op_own; }; struct mlx5_wqe_srq_next_seg { u8 rsvd0[2]; __be16 next_wqe_index; u8 signature; u8 rsvd1[11]; }; union mlx5_ext_cqe { struct ib_grh grh; u8 inl[64]; }; struct mlx5_cqe128 { union mlx5_ext_cqe inl_grh; struct mlx5_cqe64 cqe64; }; enum { MLX5_MKEY_STATUS_FREE = 1 << 6, }; enum { MLX5_MKEY_REMOTE_INVAL = 1 << 24, MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29, MLX5_MKEY_BSF_EN = 1 << 30, MLX5_MKEY_LEN64 = 1 << 31, }; struct mlx5_mkey_seg { /* This is a two bit field occupying bits 31-30. * bit 31 is always 0, * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation */ u8 status; u8 pcie_control; u8 flags; u8 version; __be32 qpn_mkey7_0; u8 rsvd1[4]; __be32 flags_pd; __be64 start_addr; __be64 len; __be32 bsfs_octo_size; u8 rsvd2[16]; __be32 xlt_oct_size; u8 rsvd3[3]; u8 log2_page_size; u8 rsvd4[4]; }; #define MLX5_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90) enum { MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0 }; enum { VPORT_STATE_DOWN = 0x0, VPORT_STATE_UP = 0x1, }; enum { MLX5_VPORT_ADMIN_STATE_DOWN = 0x0, MLX5_VPORT_ADMIN_STATE_UP = 0x1, MLX5_VPORT_ADMIN_STATE_AUTO = 0x2, }; enum { MLX5_L3_PROT_TYPE_IPV4 = 0, MLX5_L3_PROT_TYPE_IPV6 = 1, }; enum { MLX5_L4_PROT_TYPE_TCP = 0, MLX5_L4_PROT_TYPE_UDP = 1, }; enum { MLX5_HASH_FIELD_SEL_SRC_IP = 1 << 0, MLX5_HASH_FIELD_SEL_DST_IP = 1 << 1, MLX5_HASH_FIELD_SEL_L4_SPORT = 1 << 2, MLX5_HASH_FIELD_SEL_L4_DPORT = 1 << 3, MLX5_HASH_FIELD_SEL_IPSEC_SPI = 1 << 4, }; enum { MLX5_MATCH_OUTER_HEADERS = 1 << 0, MLX5_MATCH_MISC_PARAMETERS = 1 << 1, MLX5_MATCH_INNER_HEADERS = 1 << 2, MLX5_MATCH_MISC_PARAMETERS_2 = 1 << 3, MLX5_MATCH_MISC_PARAMETERS_3 = 1 << 4, }; enum { MLX5_FLOW_TABLE_TYPE_NIC_RCV = 0, MLX5_FLOW_TABLE_TYPE_ESWITCH = 4, }; enum { MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT = 0, MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE = 1, MLX5_FLOW_CONTEXT_DEST_TYPE_TIR = 2, }; enum mlx5_list_type { MLX5_NVPRT_LIST_TYPE_UC = 0x0, MLX5_NVPRT_LIST_TYPE_MC = 0x1, MLX5_NVPRT_LIST_TYPE_VLAN = 0x2, }; enum { MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0, MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM = 0x1, }; enum mlx5_wol_mode { MLX5_WOL_DISABLE = 0, MLX5_WOL_SECURED_MAGIC = 1 << 1, MLX5_WOL_MAGIC = 1 << 2, MLX5_WOL_ARP = 1 << 3, MLX5_WOL_BROADCAST = 1 << 4, MLX5_WOL_MULTICAST = 1 << 5, MLX5_WOL_UNICAST = 1 << 6, MLX5_WOL_PHY_ACTIVITY = 1 << 7, }; enum mlx5_mpls_supported_fields { MLX5_FIELD_SUPPORT_MPLS_LABEL = 1 << 0, MLX5_FIELD_SUPPORT_MPLS_EXP = 1 << 1, MLX5_FIELD_SUPPORT_MPLS_S_BOS = 1 << 2, MLX5_FIELD_SUPPORT_MPLS_TTL = 1 << 3 }; enum mlx5_flex_parser_protos { MLX5_FLEX_PROTO_GENEVE = 1 << 3, MLX5_FLEX_PROTO_CW_MPLS_GRE = 1 << 4, MLX5_FLEX_PROTO_CW_MPLS_UDP = 1 << 5, }; /* MLX5 DEV CAPs */ /* TODO: EAT.ME */ enum mlx5_cap_mode { HCA_CAP_OPMOD_GET_MAX = 0, HCA_CAP_OPMOD_GET_CUR = 1, }; enum mlx5_cap_type { MLX5_CAP_GENERAL = 0, MLX5_CAP_ETHERNET_OFFLOADS, MLX5_CAP_ODP, MLX5_CAP_ATOMIC, MLX5_CAP_ROCE, MLX5_CAP_IPOIB_OFFLOADS, MLX5_CAP_IPOIB_ENHANCED_OFFLOADS, MLX5_CAP_FLOW_TABLE, MLX5_CAP_ESWITCH_FLOW_TABLE, MLX5_CAP_ESWITCH, MLX5_CAP_RESERVED, MLX5_CAP_VECTOR_CALC, MLX5_CAP_QOS, MLX5_CAP_DEBUG, MLX5_CAP_RESERVED_14, MLX5_CAP_DEV_MEM, MLX5_CAP_RESERVED_16, MLX5_CAP_TLS, MLX5_CAP_DEV_EVENT = 0x14, /* NUM OF CAP Types */ MLX5_CAP_NUM }; enum mlx5_pcam_reg_groups { MLX5_PCAM_REGS_5000_TO_507F = 0x0, }; enum mlx5_pcam_feature_groups { MLX5_PCAM_FEATURE_ENHANCED_FEATURES = 0x0, }; enum mlx5_mcam_reg_groups { MLX5_MCAM_REGS_FIRST_128 = 0x0, }; enum mlx5_mcam_feature_groups { MLX5_MCAM_FEATURE_ENHANCED_FEATURES = 0x0, }; enum mlx5_qcam_reg_groups { MLX5_QCAM_REGS_FIRST_128 = 0x0, }; enum mlx5_qcam_feature_groups { MLX5_QCAM_FEATURE_ENHANCED_FEATURES = 0x0, }; /* GET Dev Caps macros */ #define MLX5_CAP_GEN(mdev, cap) \ MLX5_GET(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap) #define MLX5_CAP_GEN_64(mdev, cap) \ MLX5_GET64(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap) #define MLX5_CAP_GEN_MAX(mdev, cap) \ MLX5_GET(cmd_hca_cap, mdev->caps.hca_max[MLX5_CAP_GENERAL], cap) #define MLX5_CAP_ETH(mdev, cap) \ MLX5_GET(per_protocol_networking_offload_caps,\ mdev->caps.hca_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap) #define MLX5_CAP_ETH_MAX(mdev, cap) \ MLX5_GET(per_protocol_networking_offload_caps,\ mdev->caps.hca_max[MLX5_CAP_ETHERNET_OFFLOADS], cap) #define MLX5_CAP_IPOIB_ENHANCED(mdev, cap) \ MLX5_GET(per_protocol_networking_offload_caps,\ mdev->caps.hca_cur[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS], cap) #define MLX5_CAP_ROCE(mdev, cap) \ MLX5_GET(roce_cap, mdev->caps.hca_cur[MLX5_CAP_ROCE], cap) #define MLX5_CAP_ROCE_MAX(mdev, cap) \ MLX5_GET(roce_cap, mdev->caps.hca_max[MLX5_CAP_ROCE], cap) #define MLX5_CAP_ATOMIC(mdev, cap) \ MLX5_GET(atomic_caps, mdev->caps.hca_cur[MLX5_CAP_ATOMIC], cap) #define MLX5_CAP_ATOMIC_MAX(mdev, cap) \ MLX5_GET(atomic_caps, mdev->caps.hca_max[MLX5_CAP_ATOMIC], cap) #define MLX5_CAP_FLOWTABLE(mdev, cap) \ MLX5_GET(flow_table_nic_cap, mdev->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap) #define MLX5_CAP64_FLOWTABLE(mdev, cap) \ MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap) #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \ MLX5_GET(flow_table_nic_cap, mdev->caps.hca_max[MLX5_CAP_FLOW_TABLE], cap) #define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap) #define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \ MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap) #define MLX5_CAP_FLOWTABLE_NIC_TX(mdev, cap) \ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit.cap) #define MLX5_CAP_FLOWTABLE_NIC_TX_MAX(mdev, cap) \ MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit.cap) #define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap) #define MLX5_CAP_FLOWTABLE_SNIFFER_RX_MAX(mdev, cap) \ MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_sniffer.cap) #define MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) \ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_sniffer.cap) #define MLX5_CAP_FLOWTABLE_SNIFFER_TX_MAX(mdev, cap) \ MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_sniffer.cap) #define MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) \ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_rdma.cap) #define MLX5_CAP_FLOWTABLE_RDMA_RX_MAX(mdev, cap) \ MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_rdma.cap) #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \ MLX5_GET(flow_table_eswitch_cap, \ mdev->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) #define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \ MLX5_GET(flow_table_eswitch_cap, \ mdev->caps.hca_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) #define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \ MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap) #define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \ MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap) #define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \ MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap) #define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \ MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap) #define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \ MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap) #define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \ MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap) #define MLX5_CAP_ESW(mdev, cap) \ MLX5_GET(e_switch_cap, \ mdev->caps.hca_cur[MLX5_CAP_ESWITCH], cap) #define MLX5_CAP64_ESW_FLOWTABLE(mdev, cap) \ MLX5_GET64(flow_table_eswitch_cap, \ (mdev)->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) #define MLX5_CAP_ESW_MAX(mdev, cap) \ MLX5_GET(e_switch_cap, \ mdev->caps.hca_max[MLX5_CAP_ESWITCH], cap) #define MLX5_CAP_ODP(mdev, cap)\ MLX5_GET(odp_cap, mdev->caps.hca_cur[MLX5_CAP_ODP], cap) #define MLX5_CAP_ODP_MAX(mdev, cap)\ MLX5_GET(odp_cap, mdev->caps.hca_max[MLX5_CAP_ODP], cap) #define MLX5_CAP_VECTOR_CALC(mdev, cap) \ MLX5_GET(vector_calc_cap, \ mdev->caps.hca_cur[MLX5_CAP_VECTOR_CALC], cap) #define MLX5_CAP_QOS(mdev, cap)\ MLX5_GET(qos_cap, mdev->caps.hca_cur[MLX5_CAP_QOS], cap) #define MLX5_CAP_DEBUG(mdev, cap)\ MLX5_GET(debug_cap, mdev->caps.hca_cur[MLX5_CAP_DEBUG], cap) #define MLX5_CAP_PCAM_FEATURE(mdev, fld) \ MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld) #define MLX5_CAP_PCAM_REG(mdev, reg) \ MLX5_GET(pcam_reg, (mdev)->caps.pcam, port_access_reg_cap_mask.regs_5000_to_507f.reg) #define MLX5_CAP_MCAM_REG(mdev, reg) \ MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_access_reg_cap_mask.access_regs.reg) #define MLX5_CAP_MCAM_FEATURE(mdev, fld) \ MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld) #define MLX5_CAP_QCAM_REG(mdev, fld) \ MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_access_reg_cap_mask.reg_cap.fld) #define MLX5_CAP_QCAM_FEATURE(mdev, fld) \ MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_feature_cap_mask.feature_cap.fld) #define MLX5_CAP_FPGA(mdev, cap) \ MLX5_GET(fpga_cap, (mdev)->caps.fpga, cap) #define MLX5_CAP64_FPGA(mdev, cap) \ MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap) #define MLX5_CAP_DEV_MEM(mdev, cap)\ MLX5_GET(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap) #define MLX5_CAP64_DEV_MEM(mdev, cap)\ MLX5_GET64(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap) #define MLX5_CAP_TLS(mdev, cap) \ MLX5_GET(tls_cap, (mdev)->caps.hca_cur[MLX5_CAP_TLS], cap) #define MLX5_CAP_DEV_EVENT(mdev, cap)\ MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca_cur[MLX5_CAP_DEV_EVENT], cap) enum { MLX5_CMD_STAT_OK = 0x0, MLX5_CMD_STAT_INT_ERR = 0x1, MLX5_CMD_STAT_BAD_OP_ERR = 0x2, MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3, MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4, MLX5_CMD_STAT_BAD_RES_ERR = 0x5, MLX5_CMD_STAT_RES_BUSY = 0x6, MLX5_CMD_STAT_LIM_ERR = 0x8, MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9, MLX5_CMD_STAT_IX_ERR = 0xa, MLX5_CMD_STAT_NO_RES_ERR = 0xf, MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50, MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51, MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10, MLX5_CMD_STAT_BAD_PKT_ERR = 0x30, MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40, }; enum { MLX5_IEEE_802_3_COUNTERS_GROUP = 0x0, MLX5_RFC_2863_COUNTERS_GROUP = 0x1, MLX5_RFC_2819_COUNTERS_GROUP = 0x2, MLX5_RFC_3635_COUNTERS_GROUP = 0x3, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5, MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12, MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP = 0x13, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP = 0x16, MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20, }; enum { MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP = 0x0, }; static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz) { if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE) return 0; return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz; } #define MLX5_BY_PASS_NUM_REGULAR_PRIOS 16 #define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 16 #define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1 #define MLX5_BY_PASS_NUM_PRIOS (MLX5_BY_PASS_NUM_REGULAR_PRIOS +\ MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\ MLX5_BY_PASS_NUM_MULTICAST_PRIOS) #endif /* MLX5_DEVICE_H */ mlx5/mlx5_ifc_fpga.h 0000644 00000034166 14722070374 0010325 0 ustar 00 /* * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef MLX5_IFC_FPGA_H #define MLX5_IFC_FPGA_H struct mlx5_ifc_ipv4_layout_bits { u8 reserved_at_0[0x60]; u8 ipv4[0x20]; }; struct mlx5_ifc_ipv6_layout_bits { u8 ipv6[16][0x8]; }; union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits { struct mlx5_ifc_ipv6_layout_bits ipv6_layout; struct mlx5_ifc_ipv4_layout_bits ipv4_layout; u8 reserved_at_0[0x80]; }; enum { MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX = 0x2c9, }; enum { MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC = 0x2, MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_TLS = 0x3, }; struct mlx5_ifc_fpga_shell_caps_bits { u8 max_num_qps[0x10]; u8 reserved_at_10[0x8]; u8 total_rcv_credits[0x8]; u8 reserved_at_20[0xe]; u8 qp_type[0x2]; u8 reserved_at_30[0x5]; u8 rae[0x1]; u8 rwe[0x1]; u8 rre[0x1]; u8 reserved_at_38[0x4]; u8 dc[0x1]; u8 ud[0x1]; u8 uc[0x1]; u8 rc[0x1]; u8 reserved_at_40[0x1a]; u8 log_ddr_size[0x6]; u8 max_fpga_qp_msg_size[0x20]; u8 reserved_at_80[0x180]; }; struct mlx5_ifc_fpga_cap_bits { u8 fpga_id[0x8]; u8 fpga_device[0x18]; u8 register_file_ver[0x20]; u8 fpga_ctrl_modify[0x1]; u8 reserved_at_41[0x5]; u8 access_reg_query_mode[0x2]; u8 reserved_at_48[0x6]; u8 access_reg_modify_mode[0x2]; u8 reserved_at_50[0x10]; u8 reserved_at_60[0x20]; u8 image_version[0x20]; u8 image_date[0x20]; u8 image_time[0x20]; u8 shell_version[0x20]; u8 reserved_at_100[0x80]; struct mlx5_ifc_fpga_shell_caps_bits shell_caps; u8 reserved_at_380[0x8]; u8 ieee_vendor_id[0x18]; u8 sandbox_product_version[0x10]; u8 sandbox_product_id[0x10]; u8 sandbox_basic_caps[0x20]; u8 reserved_at_3e0[0x10]; u8 sandbox_extended_caps_len[0x10]; u8 sandbox_extended_caps_addr[0x40]; u8 fpga_ddr_start_addr[0x40]; u8 fpga_cr_space_start_addr[0x40]; u8 fpga_ddr_size[0x20]; u8 fpga_cr_space_size[0x20]; u8 reserved_at_500[0x300]; }; enum { MLX5_FPGA_CTRL_OPERATION_LOAD = 0x1, MLX5_FPGA_CTRL_OPERATION_RESET = 0x2, MLX5_FPGA_CTRL_OPERATION_FLASH_SELECT = 0x3, MLX5_FPGA_CTRL_OPERATION_SANDBOX_BYPASS_ON = 0x4, MLX5_FPGA_CTRL_OPERATION_SANDBOX_BYPASS_OFF = 0x5, MLX5_FPGA_CTRL_OPERATION_RESET_SANDBOX = 0x6, }; struct mlx5_ifc_fpga_ctrl_bits { u8 reserved_at_0[0x8]; u8 operation[0x8]; u8 reserved_at_10[0x8]; u8 status[0x8]; u8 reserved_at_20[0x8]; u8 flash_select_admin[0x8]; u8 reserved_at_30[0x8]; u8 flash_select_oper[0x8]; u8 reserved_at_40[0x40]; }; enum { MLX5_FPGA_ERROR_EVENT_SYNDROME_CORRUPTED_DDR = 0x1, MLX5_FPGA_ERROR_EVENT_SYNDROME_FLASH_TIMEOUT = 0x2, MLX5_FPGA_ERROR_EVENT_SYNDROME_INTERNAL_LINK_ERROR = 0x3, MLX5_FPGA_ERROR_EVENT_SYNDROME_WATCHDOG_FAILURE = 0x4, MLX5_FPGA_ERROR_EVENT_SYNDROME_I2C_FAILURE = 0x5, MLX5_FPGA_ERROR_EVENT_SYNDROME_IMAGE_CHANGED = 0x6, MLX5_FPGA_ERROR_EVENT_SYNDROME_TEMPERATURE_CRITICAL = 0x7, }; struct mlx5_ifc_fpga_error_event_bits { u8 reserved_at_0[0x40]; u8 reserved_at_40[0x18]; u8 syndrome[0x8]; u8 reserved_at_60[0x80]; }; #define MLX5_FPGA_ACCESS_REG_SIZE_MAX 64 struct mlx5_ifc_fpga_access_reg_bits { u8 reserved_at_0[0x20]; u8 reserved_at_20[0x10]; u8 size[0x10]; u8 address[0x40]; u8 data[0][0x8]; }; enum mlx5_ifc_fpga_qp_state { MLX5_FPGA_QPC_STATE_INIT = 0x0, MLX5_FPGA_QPC_STATE_ACTIVE = 0x1, MLX5_FPGA_QPC_STATE_ERROR = 0x2, }; enum mlx5_ifc_fpga_qp_type { MLX5_FPGA_QPC_QP_TYPE_SHELL_QP = 0x0, MLX5_FPGA_QPC_QP_TYPE_SANDBOX_QP = 0x1, }; enum mlx5_ifc_fpga_qp_service_type { MLX5_FPGA_QPC_ST_RC = 0x0, }; struct mlx5_ifc_fpga_qpc_bits { u8 state[0x4]; u8 reserved_at_4[0x1b]; u8 qp_type[0x1]; u8 reserved_at_20[0x4]; u8 st[0x4]; u8 reserved_at_28[0x10]; u8 traffic_class[0x8]; u8 ether_type[0x10]; u8 prio[0x3]; u8 dei[0x1]; u8 vid[0xc]; u8 reserved_at_60[0x20]; u8 reserved_at_80[0x8]; u8 next_rcv_psn[0x18]; u8 reserved_at_a0[0x8]; u8 next_send_psn[0x18]; u8 reserved_at_c0[0x10]; u8 pkey[0x10]; u8 reserved_at_e0[0x8]; u8 remote_qpn[0x18]; u8 reserved_at_100[0x15]; u8 rnr_retry[0x3]; u8 reserved_at_118[0x5]; u8 retry_count[0x3]; u8 reserved_at_120[0x20]; u8 reserved_at_140[0x10]; u8 remote_mac_47_32[0x10]; u8 remote_mac_31_0[0x20]; u8 remote_ip[16][0x8]; u8 reserved_at_200[0x40]; u8 reserved_at_240[0x10]; u8 fpga_mac_47_32[0x10]; u8 fpga_mac_31_0[0x20]; u8 fpga_ip[16][0x8]; }; struct mlx5_ifc_fpga_create_qp_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x40]; struct mlx5_ifc_fpga_qpc_bits fpga_qpc; }; struct mlx5_ifc_fpga_create_qp_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x8]; u8 fpga_qpn[0x18]; u8 reserved_at_60[0x20]; struct mlx5_ifc_fpga_qpc_bits fpga_qpc; }; struct mlx5_ifc_fpga_modify_qp_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 fpga_qpn[0x18]; u8 field_select[0x20]; struct mlx5_ifc_fpga_qpc_bits fpga_qpc; }; struct mlx5_ifc_fpga_modify_qp_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_fpga_query_qp_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 fpga_qpn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_fpga_query_qp_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; struct mlx5_ifc_fpga_qpc_bits fpga_qpc; }; struct mlx5_ifc_fpga_query_qp_counters_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 clear[0x1]; u8 reserved_at_41[0x7]; u8 fpga_qpn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_fpga_query_qp_counters_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; u8 rx_ack_packets[0x40]; u8 rx_send_packets[0x40]; u8 tx_ack_packets[0x40]; u8 tx_send_packets[0x40]; u8 rx_total_drop[0x40]; u8 reserved_at_1c0[0x1c0]; }; struct mlx5_ifc_fpga_destroy_qp_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; u8 reserved_at_40[0x8]; u8 fpga_qpn[0x18]; u8 reserved_at_60[0x20]; }; struct mlx5_ifc_fpga_destroy_qp_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; u8 reserved_at_40[0x40]; }; struct mlx5_ifc_tls_extended_cap_bits { u8 aes_gcm_128[0x1]; u8 aes_gcm_256[0x1]; u8 reserved_at_2[0x1e]; u8 reserved_at_20[0x20]; u8 context_capacity_total[0x20]; u8 context_capacity_rx[0x20]; u8 context_capacity_tx[0x20]; u8 reserved_at_a0[0x10]; u8 tls_counter_size[0x10]; u8 tls_counters_addr_low[0x20]; u8 tls_counters_addr_high[0x20]; u8 rx[0x1]; u8 tx[0x1]; u8 tls_v12[0x1]; u8 tls_v13[0x1]; u8 lro[0x1]; u8 ipv6[0x1]; u8 reserved_at_106[0x1a]; }; struct mlx5_ifc_ipsec_extended_cap_bits { u8 encapsulation[0x20]; u8 reserved_0[0x12]; u8 v2_command[0x1]; u8 udp_encap[0x1]; u8 rx_no_trailer[0x1]; u8 ipv4_fragment[0x1]; u8 ipv6[0x1]; u8 esn[0x1]; u8 lso[0x1]; u8 transport_and_tunnel_mode[0x1]; u8 tunnel_mode[0x1]; u8 transport_mode[0x1]; u8 ah_esp[0x1]; u8 esp[0x1]; u8 ah[0x1]; u8 ipv4_options[0x1]; u8 auth_alg[0x20]; u8 enc_alg[0x20]; u8 sa_cap[0x20]; u8 reserved_1[0x10]; u8 number_of_ipsec_counters[0x10]; u8 ipsec_counters_addr_low[0x20]; u8 ipsec_counters_addr_high[0x20]; }; struct mlx5_ifc_ipsec_counters_bits { u8 dec_in_packets[0x40]; u8 dec_out_packets[0x40]; u8 dec_bypass_packets[0x40]; u8 enc_in_packets[0x40]; u8 enc_out_packets[0x40]; u8 enc_bypass_packets[0x40]; u8 drop_dec_packets[0x40]; u8 failed_auth_dec_packets[0x40]; u8 drop_enc_packets[0x40]; u8 success_add_sa[0x40]; u8 fail_add_sa[0x40]; u8 success_delete_sa[0x40]; u8 fail_delete_sa[0x40]; u8 dropped_cmd[0x40]; }; enum { MLX5_FPGA_QP_ERROR_EVENT_SYNDROME_RETRY_COUNTER_EXPIRED = 0x1, MLX5_FPGA_QP_ERROR_EVENT_SYNDROME_RNR_EXPIRED = 0x2, }; struct mlx5_ifc_fpga_qp_error_event_bits { u8 reserved_at_0[0x40]; u8 reserved_at_40[0x18]; u8 syndrome[0x8]; u8 reserved_at_60[0x60]; u8 reserved_at_c0[0x8]; u8 fpga_qpn[0x18]; }; enum mlx5_ifc_fpga_ipsec_response_syndrome { MLX5_FPGA_IPSEC_RESPONSE_SUCCESS = 0, MLX5_FPGA_IPSEC_RESPONSE_ILLEGAL_REQUEST = 1, MLX5_FPGA_IPSEC_RESPONSE_SADB_ISSUE = 2, MLX5_FPGA_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE = 3, }; struct mlx5_ifc_fpga_ipsec_cmd_resp { __be32 syndrome; union { __be32 sw_sa_handle; __be32 flags; }; u8 reserved[24]; } __packed; enum mlx5_ifc_fpga_ipsec_cmd_opcode { MLX5_FPGA_IPSEC_CMD_OP_ADD_SA = 0, MLX5_FPGA_IPSEC_CMD_OP_DEL_SA = 1, MLX5_FPGA_IPSEC_CMD_OP_ADD_SA_V2 = 2, MLX5_FPGA_IPSEC_CMD_OP_DEL_SA_V2 = 3, MLX5_FPGA_IPSEC_CMD_OP_MOD_SA_V2 = 4, MLX5_FPGA_IPSEC_CMD_OP_SET_CAP = 5, }; enum mlx5_ifc_fpga_ipsec_cap { MLX5_FPGA_IPSEC_CAP_NO_TRAILER = BIT(0), }; struct mlx5_ifc_fpga_ipsec_cmd_cap { __be32 cmd; __be32 flags; u8 reserved[24]; } __packed; enum mlx5_ifc_fpga_ipsec_sa_flags { MLX5_FPGA_IPSEC_SA_ESN_EN = BIT(0), MLX5_FPGA_IPSEC_SA_ESN_OVERLAP = BIT(1), MLX5_FPGA_IPSEC_SA_IPV6 = BIT(2), MLX5_FPGA_IPSEC_SA_DIR_SX = BIT(3), MLX5_FPGA_IPSEC_SA_SPI_EN = BIT(4), MLX5_FPGA_IPSEC_SA_SA_VALID = BIT(5), MLX5_FPGA_IPSEC_SA_IP_ESP = BIT(6), MLX5_FPGA_IPSEC_SA_IP_AH = BIT(7), }; enum mlx5_ifc_fpga_ipsec_sa_enc_mode { MLX5_FPGA_IPSEC_SA_ENC_MODE_NONE = 0, MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_128_AUTH_128 = 1, MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_256_AUTH_128 = 3, }; struct mlx5_ifc_fpga_ipsec_sa_v1 { __be32 cmd; u8 key_enc[32]; u8 key_auth[32]; __be32 sip[4]; __be32 dip[4]; union { struct { __be32 reserved; u8 salt_iv[8]; __be32 salt; } __packed gcm; struct { u8 salt[16]; } __packed cbc; }; __be32 spi; __be32 sw_sa_handle; __be16 tfclen; u8 enc_mode; u8 reserved1[2]; u8 flags; u8 reserved2[2]; }; struct mlx5_ifc_fpga_ipsec_sa { struct mlx5_ifc_fpga_ipsec_sa_v1 ipsec_sa_v1; __be16 udp_sp; __be16 udp_dp; u8 reserved1[4]; __be32 esn; __be16 vid; /* only 12 bits, rest is reserved */ __be16 reserved2; } __packed; enum fpga_tls_cmds { CMD_SETUP_STREAM = 0x1001, CMD_TEARDOWN_STREAM = 0x1002, CMD_RESYNC_RX = 0x1003, }; #define MLX5_TLS_1_2 (0) #define MLX5_TLS_ALG_AES_GCM_128 (0) #define MLX5_TLS_ALG_AES_GCM_256 (1) struct mlx5_ifc_tls_cmd_bits { u8 command_type[0x20]; u8 ipv6[0x1]; u8 direction_sx[0x1]; u8 tls_version[0x2]; u8 reserved[0x1c]; u8 swid[0x20]; u8 src_port[0x10]; u8 dst_port[0x10]; union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6; union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6; u8 tls_rcd_sn[0x40]; u8 tcp_sn[0x20]; u8 tls_implicit_iv[0x20]; u8 tls_xor_iv[0x40]; u8 encryption_key[0x100]; u8 alg[4]; u8 reserved2[0x1c]; u8 reserved3[0x4a0]; }; struct mlx5_ifc_tls_resp_bits { u8 syndrome[0x20]; u8 stream_id[0x20]; u8 reserverd[0x40]; }; #define MLX5_TLS_COMMAND_SIZE (0x100) #endif /* MLX5_IFC_FPGA_H */ mlx5/qp.h 0000644 00000035262 14722070374 0006240 0 ustar 00 /* * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef MLX5_QP_H #define MLX5_QP_H #include <linux/mlx5/device.h> #include <linux/mlx5/driver.h> #define MLX5_INVALID_LKEY 0x100 /* UMR (3 WQE_BB's) + SIG (3 WQE_BB's) + PSV (mem) + PSV (wire) */ #define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 8) #define MLX5_DIF_SIZE 8 #define MLX5_STRIDE_BLOCK_OP 0x400 #define MLX5_CPY_GRD_MASK 0xc0 #define MLX5_CPY_APP_MASK 0x30 #define MLX5_CPY_REF_MASK 0x0f #define MLX5_BSF_INC_REFTAG (1 << 6) #define MLX5_BSF_INL_VALID (1 << 15) #define MLX5_BSF_REFRESH_DIF (1 << 14) #define MLX5_BSF_REPEAT_BLOCK (1 << 7) #define MLX5_BSF_APPTAG_ESCAPE 0x1 #define MLX5_BSF_APPREF_ESCAPE 0x2 enum mlx5_qp_optpar { MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, MLX5_QP_OPTPAR_RRE = 1 << 1, MLX5_QP_OPTPAR_RAE = 1 << 2, MLX5_QP_OPTPAR_RWE = 1 << 3, MLX5_QP_OPTPAR_PKEY_INDEX = 1 << 4, MLX5_QP_OPTPAR_Q_KEY = 1 << 5, MLX5_QP_OPTPAR_RNR_TIMEOUT = 1 << 6, MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7, MLX5_QP_OPTPAR_SRA_MAX = 1 << 8, MLX5_QP_OPTPAR_RRA_MAX = 1 << 9, MLX5_QP_OPTPAR_PM_STATE = 1 << 10, MLX5_QP_OPTPAR_RETRY_COUNT = 1 << 12, MLX5_QP_OPTPAR_RNR_RETRY = 1 << 13, MLX5_QP_OPTPAR_ACK_TIMEOUT = 1 << 14, MLX5_QP_OPTPAR_PRI_PORT = 1 << 16, MLX5_QP_OPTPAR_SRQN = 1 << 18, MLX5_QP_OPTPAR_CQN_RCV = 1 << 19, MLX5_QP_OPTPAR_DC_HS = 1 << 20, MLX5_QP_OPTPAR_DC_KEY = 1 << 21, MLX5_QP_OPTPAR_COUNTER_SET_ID = 1 << 25, }; enum mlx5_qp_state { MLX5_QP_STATE_RST = 0, MLX5_QP_STATE_INIT = 1, MLX5_QP_STATE_RTR = 2, MLX5_QP_STATE_RTS = 3, MLX5_QP_STATE_SQER = 4, MLX5_QP_STATE_SQD = 5, MLX5_QP_STATE_ERR = 6, MLX5_QP_STATE_SQ_DRAINING = 7, MLX5_QP_STATE_SUSPENDED = 9, MLX5_QP_NUM_STATE, MLX5_QP_STATE, MLX5_QP_STATE_BAD, }; enum { MLX5_SQ_STATE_NA = MLX5_SQC_STATE_ERR + 1, MLX5_SQ_NUM_STATE = MLX5_SQ_STATE_NA + 1, MLX5_RQ_STATE_NA = MLX5_RQC_STATE_ERR + 1, MLX5_RQ_NUM_STATE = MLX5_RQ_STATE_NA + 1, }; enum { MLX5_QP_ST_RC = 0x0, MLX5_QP_ST_UC = 0x1, MLX5_QP_ST_UD = 0x2, MLX5_QP_ST_XRC = 0x3, MLX5_QP_ST_MLX = 0x4, MLX5_QP_ST_DCI = 0x5, MLX5_QP_ST_DCT = 0x6, MLX5_QP_ST_QP0 = 0x7, MLX5_QP_ST_QP1 = 0x8, MLX5_QP_ST_RAW_ETHERTYPE = 0x9, MLX5_QP_ST_RAW_IPV6 = 0xa, MLX5_QP_ST_SNIFFER = 0xb, MLX5_QP_ST_SYNC_UMR = 0xe, MLX5_QP_ST_PTP_1588 = 0xd, MLX5_QP_ST_REG_UMR = 0xc, MLX5_QP_ST_MAX }; enum { MLX5_QP_PM_MIGRATED = 0x3, MLX5_QP_PM_ARMED = 0x0, MLX5_QP_PM_REARM = 0x1 }; enum { MLX5_NON_ZERO_RQ = 0x0, MLX5_SRQ_RQ = 0x1, MLX5_CRQ_RQ = 0x2, MLX5_ZERO_LEN_RQ = 0x3 }; /* TODO REM */ enum { /* params1 */ MLX5_QP_BIT_SRE = 1 << 15, MLX5_QP_BIT_SWE = 1 << 14, MLX5_QP_BIT_SAE = 1 << 13, /* params2 */ MLX5_QP_BIT_RRE = 1 << 15, MLX5_QP_BIT_RWE = 1 << 14, MLX5_QP_BIT_RAE = 1 << 13, MLX5_QP_BIT_RIC = 1 << 4, MLX5_QP_BIT_CC_SLAVE_RECV = 1 << 2, MLX5_QP_BIT_CC_SLAVE_SEND = 1 << 1, MLX5_QP_BIT_CC_MASTER = 1 << 0 }; enum { MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2, MLX5_WQE_CTRL_CQ_UPDATE_AND_EQE = 3 << 2, MLX5_WQE_CTRL_SOLICITED = 1 << 1, }; enum { MLX5_SEND_WQE_DS = 16, MLX5_SEND_WQE_BB = 64, }; #define MLX5_SEND_WQEBB_NUM_DS (MLX5_SEND_WQE_BB / MLX5_SEND_WQE_DS) enum { MLX5_SEND_WQE_MAX_WQEBBS = 16, }; enum { MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27, MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28, MLX5_WQE_FMR_PERM_REMOTE_READ = 1 << 29, MLX5_WQE_FMR_PERM_REMOTE_WRITE = 1 << 30, MLX5_WQE_FMR_PERM_ATOMIC = 1 << 31 }; enum { MLX5_FENCE_MODE_NONE = 0 << 5, MLX5_FENCE_MODE_INITIATOR_SMALL = 1 << 5, MLX5_FENCE_MODE_FENCE = 2 << 5, MLX5_FENCE_MODE_STRONG_ORDERING = 3 << 5, MLX5_FENCE_MODE_SMALL_AND_FENCE = 4 << 5, }; enum { MLX5_RCV_DBR = 0, MLX5_SND_DBR = 1, }; enum { MLX5_FLAGS_INLINE = 1<<7, MLX5_FLAGS_CHECK_FREE = 1<<5, }; struct mlx5_wqe_fmr_seg { __be32 flags; __be32 mem_key; __be64 buf_list; __be64 start_addr; __be64 reg_len; __be32 offset; __be32 page_size; u32 reserved[2]; }; struct mlx5_wqe_ctrl_seg { __be32 opmod_idx_opcode; __be32 qpn_ds; u8 signature; u8 rsvd[2]; u8 fm_ce_se; union { __be32 general_id; __be32 imm; __be32 umr_mkey; __be32 tisn; }; }; #define MLX5_WQE_CTRL_DS_MASK 0x3f #define MLX5_WQE_CTRL_QPN_MASK 0xffffff00 #define MLX5_WQE_CTRL_QPN_SHIFT 8 #define MLX5_WQE_DS_UNITS 16 #define MLX5_WQE_CTRL_OPCODE_MASK 0xff #define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00 #define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8 enum { MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4, MLX5_ETH_WQE_L4_INNER_CSUM = 1 << 5, MLX5_ETH_WQE_L3_CSUM = 1 << 6, MLX5_ETH_WQE_L4_CSUM = 1 << 7, }; enum { MLX5_ETH_WQE_SVLAN = 1 << 0, MLX5_ETH_WQE_INSERT_VLAN = 1 << 15, }; enum { MLX5_ETH_WQE_SWP_INNER_L3_IPV6 = 1 << 0, MLX5_ETH_WQE_SWP_INNER_L4_UDP = 1 << 1, MLX5_ETH_WQE_SWP_OUTER_L3_IPV6 = 1 << 4, MLX5_ETH_WQE_SWP_OUTER_L4_UDP = 1 << 5, }; struct mlx5_wqe_eth_seg { u8 swp_outer_l4_offset; u8 swp_outer_l3_offset; u8 swp_inner_l4_offset; u8 swp_inner_l3_offset; u8 cs_flags; u8 swp_flags; __be16 mss; __be32 rsvd2; union { struct { __be16 sz; union { u8 start[2]; DECLARE_FLEX_ARRAY(u8, data); }; } inline_hdr; struct { __be16 type; __be16 vlan_tci; } insert; }; }; struct mlx5_wqe_xrc_seg { __be32 xrc_srqn; u8 rsvd[12]; }; struct mlx5_wqe_masked_atomic_seg { __be64 swap_add; __be64 compare; __be64 swap_add_mask; __be64 compare_mask; }; struct mlx5_base_av { union { struct { __be32 qkey; __be32 reserved; } qkey; __be64 dc_key; } key; __be32 dqp_dct; u8 stat_rate_sl; u8 fl_mlid; union { __be16 rlid; __be16 udp_sport; }; }; struct mlx5_av { union { struct { __be32 qkey; __be32 reserved; } qkey; __be64 dc_key; } key; __be32 dqp_dct; u8 stat_rate_sl; u8 fl_mlid; union { __be16 rlid; __be16 udp_sport; }; u8 reserved0[4]; u8 rmac[6]; u8 tclass; u8 hop_limit; __be32 grh_gid_fl; u8 rgid[16]; }; struct mlx5_ib_ah { struct ib_ah ibah; struct mlx5_av av; }; static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah) { return container_of(ibah, struct mlx5_ib_ah, ibah); } struct mlx5_wqe_datagram_seg { struct mlx5_av av; }; struct mlx5_wqe_raddr_seg { __be64 raddr; __be32 rkey; u32 reserved; }; struct mlx5_wqe_atomic_seg { __be64 swap_add; __be64 compare; }; struct mlx5_wqe_data_seg { __be32 byte_count; __be32 lkey; __be64 addr; }; struct mlx5_wqe_umr_ctrl_seg { u8 flags; u8 rsvd0[3]; __be16 xlt_octowords; union { __be16 xlt_offset; __be16 bsf_octowords; }; __be64 mkey_mask; __be32 xlt_offset_47_16; u8 rsvd1[28]; }; struct mlx5_seg_set_psv { __be32 psv_num; __be16 syndrome; __be16 status; __be32 transient_sig; __be32 ref_tag; }; struct mlx5_seg_get_psv { u8 rsvd[19]; u8 num_psv; __be32 l_key; __be64 va; __be32 psv_index[4]; }; struct mlx5_seg_check_psv { u8 rsvd0[2]; __be16 err_coalescing_op; u8 rsvd1[2]; __be16 xport_err_op; u8 rsvd2[2]; __be16 xport_err_mask; u8 rsvd3[7]; u8 num_psv; __be32 l_key; __be64 va; __be32 psv_index[4]; }; struct mlx5_rwqe_sig { u8 rsvd0[4]; u8 signature; u8 rsvd1[11]; }; struct mlx5_wqe_signature_seg { u8 rsvd0[4]; u8 signature; u8 rsvd1[11]; }; #define MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK 0x3ff struct mlx5_wqe_inline_seg { __be32 byte_count; __be32 data[0]; }; enum mlx5_sig_type { MLX5_DIF_CRC = 0x1, MLX5_DIF_IPCS = 0x2, }; struct mlx5_bsf_inl { __be16 vld_refresh; __be16 dif_apptag; __be32 dif_reftag; u8 sig_type; u8 rp_inv_seed; u8 rsvd[3]; u8 dif_inc_ref_guard_check; __be16 dif_app_bitmask_check; }; struct mlx5_bsf { struct mlx5_bsf_basic { u8 bsf_size_sbs; u8 check_byte_mask; union { u8 copy_byte_mask; u8 bs_selector; u8 rsvd_wflags; } wire; union { u8 bs_selector; u8 rsvd_mflags; } mem; __be32 raw_data_size; __be32 w_bfs_psv; __be32 m_bfs_psv; } basic; struct mlx5_bsf_ext { __be32 t_init_gen_pro_size; __be32 rsvd_epi_size; __be32 w_tfs_psv; __be32 m_tfs_psv; } ext; struct mlx5_bsf_inl w_inl; struct mlx5_bsf_inl m_inl; }; struct mlx5_mtt { __be64 ptag; }; struct mlx5_klm { __be32 bcount; __be32 key; __be64 va; }; struct mlx5_stride_block_entry { __be16 stride; __be16 bcount; __be32 key; __be64 va; }; struct mlx5_stride_block_ctrl_seg { __be32 bcount_per_cycle; __be32 op; __be32 repeat_count; u16 rsvd; __be16 num_entries; }; struct mlx5_core_qp { struct mlx5_core_rsc_common common; /* must be first */ void (*event) (struct mlx5_core_qp *, int); int qpn; struct mlx5_rsc_debug *dbg; int pid; u16 uid; }; struct mlx5_core_dct { struct mlx5_core_qp mqp; struct completion drained; }; struct mlx5_qp_path { u8 fl_free_ar; u8 rsvd3; __be16 pkey_index; u8 rsvd0; u8 grh_mlid; __be16 rlid; u8 ackto_lt; u8 mgid_index; u8 static_rate; u8 hop_limit; __be32 tclass_flowlabel; union { u8 rgid[16]; u8 rip[16]; }; u8 f_dscp_ecn_prio; u8 ecn_dscp; __be16 udp_sport; u8 dci_cfi_prio_sl; u8 port; u8 rmac[6]; }; /* FIXME: use mlx5_ifc.h qpc */ struct mlx5_qp_context { __be32 flags; __be32 flags_pd; u8 mtu_msgmax; u8 rq_size_stride; __be16 sq_crq_size; __be32 qp_counter_set_usr_page; __be32 wire_qpn; __be32 log_pg_sz_remote_qpn; struct mlx5_qp_path pri_path; struct mlx5_qp_path alt_path; __be32 params1; u8 reserved2[4]; __be32 next_send_psn; __be32 cqn_send; __be32 deth_sqpn; u8 reserved3[4]; __be32 last_acked_psn; __be32 ssn; __be32 params2; __be32 rnr_nextrecvpsn; __be32 xrcd; __be32 cqn_recv; __be64 db_rec_addr; __be32 qkey; __be32 rq_type_srqn; __be32 rmsn; __be16 hw_sq_wqe_counter; __be16 sw_sq_wqe_counter; __be16 hw_rcyclic_byte_counter; __be16 hw_rq_counter; __be16 sw_rcyclic_byte_counter; __be16 sw_rq_counter; u8 rsvd0[5]; u8 cgs; u8 cs_req; u8 cs_res; __be64 dc_access_key; u8 rsvd1[24]; }; static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn) { return radix_tree_lookup(&dev->priv.qp_table.tree, qpn); } int mlx5_core_create_dct(struct mlx5_core_dev *dev, struct mlx5_core_dct *qp, u32 *in, int inlen, u32 *out, int outlen); int mlx5_core_create_qp(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, u32 *in, int inlen); int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode, u32 opt_param_mask, void *qpc, struct mlx5_core_qp *qp); int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); int mlx5_core_destroy_dct(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct); int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, u32 *out, int outlen); int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct, u32 *out, int outlen); int mlx5_core_set_delay_drop(struct mlx5_core_dev *dev, u32 timeout_usec); int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn); int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn); void mlx5_init_qp_table(struct mlx5_core_dev *dev); void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev); int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, struct mlx5_core_qp *rq); void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev, struct mlx5_core_qp *rq); int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, struct mlx5_core_qp *sq); void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev, struct mlx5_core_qp *sq); int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id); int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id); int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id, int reset, void *out, int out_size); struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_core_dev *dev, int res_num, enum mlx5_res_type res_type); void mlx5_core_res_put(struct mlx5_core_rsc_common *res); static inline const char *mlx5_qp_type_str(int type) { switch (type) { case MLX5_QP_ST_RC: return "RC"; case MLX5_QP_ST_UC: return "C"; case MLX5_QP_ST_UD: return "UD"; case MLX5_QP_ST_XRC: return "XRC"; case MLX5_QP_ST_MLX: return "MLX"; case MLX5_QP_ST_QP0: return "QP0"; case MLX5_QP_ST_QP1: return "QP1"; case MLX5_QP_ST_RAW_ETHERTYPE: return "RAW_ETHERTYPE"; case MLX5_QP_ST_RAW_IPV6: return "RAW_IPV6"; case MLX5_QP_ST_SNIFFER: return "SNIFFER"; case MLX5_QP_ST_SYNC_UMR: return "SYNC_UMR"; case MLX5_QP_ST_PTP_1588: return "PTP_1588"; case MLX5_QP_ST_REG_UMR: return "REG_UMR"; default: return "Invalid transport type"; } } static inline const char *mlx5_qp_state_str(int state) { switch (state) { case MLX5_QP_STATE_RST: return "RST"; case MLX5_QP_STATE_INIT: return "INIT"; case MLX5_QP_STATE_RTR: return "RTR"; case MLX5_QP_STATE_RTS: return "RTS"; case MLX5_QP_STATE_SQER: return "SQER"; case MLX5_QP_STATE_SQD: return "SQD"; case MLX5_QP_STATE_ERR: return "ERR"; case MLX5_QP_STATE_SQ_DRAINING: return "SQ_DRAINING"; case MLX5_QP_STATE_SUSPENDED: return "SUSPENDED"; default: return "Invalid QP state"; } } #endif /* MLX5_QP_H */ mlx5/transobj.h 0000644 00000007264 14722070374 0007443 0 ustar 00 /* * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef __TRANSOBJ_H__ #define __TRANSOBJ_H__ #include <linux/mlx5/driver.h> int mlx5_core_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn); void mlx5_core_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn); int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn); int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen); void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn); int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out); int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn); int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen); void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn); int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out); int mlx5_core_query_sq_state(struct mlx5_core_dev *dev, u32 sqn, u8 *state); int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tirn); int mlx5_core_create_tir_out(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *out, int outlen); int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in, int inlen); void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn); int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tisn); int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in, int inlen); void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn); int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqtn); int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in, int inlen); void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn); struct mlx5_hairpin_params { u8 log_data_size; u8 log_num_packets; u16 q_counter; int num_channels; }; struct mlx5_hairpin { struct mlx5_core_dev *func_mdev; struct mlx5_core_dev *peer_mdev; int num_channels; u32 *rqn; u32 *sqn; bool peer_gone; }; struct mlx5_hairpin * mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev, struct mlx5_core_dev *peer_mdev, struct mlx5_hairpin_params *params); void mlx5_core_hairpin_destroy(struct mlx5_hairpin *pair); void mlx5_core_hairpin_clear_dead_peer(struct mlx5_hairpin *hp); #endif /* __TRANSOBJ_H__ */ mlx5/cmd.h 0000644 00000003225 14722070374 0006355 0 ustar 00 /* * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef MLX5_CMD_H #define MLX5_CMD_H #include <linux/types.h> struct manage_pages_layout { u64 ptr; u32 reserved; u16 num_entries; u16 func_id; }; struct mlx5_cmd_alloc_uar_imm_out { u32 rsvd[3]; u32 uarn; }; #endif /* MLX5_CMD_H */ mlx5/vport.h 0000644 00000013471 14722070374 0006770 0 ustar 00 /* * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef __MLX5_VPORT_H__ #define __MLX5_VPORT_H__ #include <linux/mlx5/driver.h> #include <linux/mlx5/device.h> #define MLX5_VPORT_PF_PLACEHOLDER (1u) #define MLX5_VPORT_UPLINK_PLACEHOLDER (1u) #define MLX5_VPORT_ECPF_PLACEHOLDER(mdev) (mlx5_ecpf_vport_exists(mdev)) #define MLX5_SPECIAL_VPORTS(mdev) (MLX5_VPORT_PF_PLACEHOLDER + \ MLX5_VPORT_UPLINK_PLACEHOLDER + \ MLX5_VPORT_ECPF_PLACEHOLDER(mdev)) #define MLX5_VPORT_MANAGER(mdev) \ (MLX5_CAP_GEN(mdev, vport_group_manager) && \ (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \ mlx5_core_is_pf(mdev)) enum { MLX5_CAP_INLINE_MODE_L2, MLX5_CAP_INLINE_MODE_VPORT_CONTEXT, MLX5_CAP_INLINE_MODE_NOT_REQUIRED, }; /* Vport number for each function must keep unchanged */ enum { MLX5_VPORT_PF = 0x0, MLX5_VPORT_FIRST_VF = 0x1, MLX5_VPORT_ECPF = 0xfffe, MLX5_VPORT_UPLINK = 0xffff }; u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport); int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport, u8 other_vport, u8 state); int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u16 vport, bool other, u8 *addr); int mlx5_query_mac_address(struct mlx5_core_dev *mdev, u8 *addr); int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, u16 vport, u8 *min_inline); void mlx5_query_min_inline(struct mlx5_core_dev *mdev, u8 *min_inline); int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev, u16 vport, u8 min_inline); int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev, u16 vport, u8 *addr); int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu); int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu); int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, u64 *system_image_guid); int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid); int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev, u16 vport, u64 node_guid); int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev, u16 *qkey_viol_cntr); int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport, u8 port_num, u16 vf_num, u16 gid_index, union ib_gid *gid); int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport, u8 port_num, u16 vf_num, u16 pkey_index, u16 *pkey); int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev, u8 other_vport, u8 port_num, u16 vf_num, struct mlx5_hca_vport_context *rep); int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev, u64 *sys_image_guid); int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev, u64 *node_guid); int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev, u16 vport, enum mlx5_list_type list_type, u8 addr_list[][ETH_ALEN], int *list_size); int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev, enum mlx5_list_type list_type, u8 addr_list[][ETH_ALEN], int list_size); int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev, u16 vport, int *promisc_uc, int *promisc_mc, int *promisc_all); int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev, int promisc_uc, int promisc_mc, int promisc_all); int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev, u16 vlans[], int list_size); int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev); int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev); int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport, u8 other_vport, u64 *rx_discard_vport_down, u64 *tx_discard_vport_down); int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport, int vf, u8 port_num, void *out, size_t out_sz); int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev, u8 other_vport, u8 port_num, int vf, struct mlx5_hca_vport_context *req); int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable); int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status); int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev, struct mlx5_core_dev *port_mdev); int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev); u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev); #endif /* __MLX5_VPORT_H__ */ tpm_command.h 0000644 00000001517 14722070374 0007225 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_TPM_COMMAND_H__ #define __LINUX_TPM_COMMAND_H__ /* * TPM Command constants from specifications at * http://www.trustedcomputinggroup.org */ /* Command TAGS */ #define TPM_TAG_RQU_COMMAND 193 #define TPM_TAG_RQU_AUTH1_COMMAND 194 #define TPM_TAG_RQU_AUTH2_COMMAND 195 #define TPM_TAG_RSP_COMMAND 196 #define TPM_TAG_RSP_AUTH1_COMMAND 197 #define TPM_TAG_RSP_AUTH2_COMMAND 198 /* Command Ordinals */ #define TPM_ORD_GETRANDOM 70 #define TPM_ORD_OSAP 11 #define TPM_ORD_OIAP 10 #define TPM_ORD_SEAL 23 #define TPM_ORD_UNSEAL 24 /* Other constants */ #define SRKHANDLE 0x40000000 #define TPM_NONCE_SIZE 20 #endif signalfd.h 0000644 00000001461 14722070374 0006514 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * include/linux/signalfd.h * * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org> * */ #ifndef _LINUX_SIGNALFD_H #define _LINUX_SIGNALFD_H #include <uapi/linux/signalfd.h> #include <linux/sched/signal.h> #ifdef CONFIG_SIGNALFD /* * Deliver the signal to listening signalfd. */ static inline void signalfd_notify(struct task_struct *tsk, int sig) { if (unlikely(waitqueue_active(&tsk->sighand->signalfd_wqh))) wake_up(&tsk->sighand->signalfd_wqh); } extern void signalfd_cleanup(struct sighand_struct *sighand); #else /* CONFIG_SIGNALFD */ static inline void signalfd_notify(struct task_struct *tsk, int sig) { } static inline void signalfd_cleanup(struct sighand_struct *sighand) { } #endif /* CONFIG_SIGNALFD */ #endif /* _LINUX_SIGNALFD_H */ io-64-nonatomic-lo-hi.h 0000644 00000004630 14722070374 0010557 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_IO_64_NONATOMIC_LO_HI_H_ #define _LINUX_IO_64_NONATOMIC_LO_HI_H_ #include <linux/io.h> #include <asm-generic/int-ll64.h> static inline __u64 lo_hi_readq(const volatile void __iomem *addr) { const volatile u32 __iomem *p = addr; u32 low, high; low = readl(p); high = readl(p + 1); return low + ((u64)high << 32); } static inline void lo_hi_writeq(__u64 val, volatile void __iomem *addr) { writel(val, addr); writel(val >> 32, addr + 4); } static inline __u64 lo_hi_readq_relaxed(const volatile void __iomem *addr) { const volatile u32 __iomem *p = addr; u32 low, high; low = readl_relaxed(p); high = readl_relaxed(p + 1); return low + ((u64)high << 32); } static inline void lo_hi_writeq_relaxed(__u64 val, volatile void __iomem *addr) { writel_relaxed(val, addr); writel_relaxed(val >> 32, addr + 4); } #ifndef readq #define readq lo_hi_readq #endif #ifndef writeq #define writeq lo_hi_writeq #endif #ifndef readq_relaxed #define readq_relaxed lo_hi_readq_relaxed #endif #ifndef writeq_relaxed #define writeq_relaxed lo_hi_writeq_relaxed #endif #ifndef ioread64_lo_hi #define ioread64_lo_hi ioread64_lo_hi static inline u64 ioread64_lo_hi(void __iomem *addr) { u32 low, high; low = ioread32(addr); high = ioread32(addr + sizeof(u32)); return low + ((u64)high << 32); } #endif #ifndef iowrite64_lo_hi #define iowrite64_lo_hi iowrite64_lo_hi static inline void iowrite64_lo_hi(u64 val, void __iomem *addr) { iowrite32(val, addr); iowrite32(val >> 32, addr + sizeof(u32)); } #endif #ifndef ioread64be_lo_hi #define ioread64be_lo_hi ioread64be_lo_hi static inline u64 ioread64be_lo_hi(void __iomem *addr) { u32 low, high; low = ioread32be(addr + sizeof(u32)); high = ioread32be(addr); return low + ((u64)high << 32); } #endif #ifndef iowrite64be_lo_hi #define iowrite64be_lo_hi iowrite64be_lo_hi static inline void iowrite64be_lo_hi(u64 val, void __iomem *addr) { iowrite32be(val, addr + sizeof(u32)); iowrite32be(val >> 32, addr); } #endif #ifndef ioread64 #define ioread64_is_nonatomic #define ioread64 ioread64_lo_hi #endif #ifndef iowrite64 #define iowrite64_is_nonatomic #define iowrite64 iowrite64_lo_hi #endif #ifndef ioread64be #define ioread64be_is_nonatomic #define ioread64be ioread64be_lo_hi #endif #ifndef iowrite64be #define iowrite64be_is_nonatomic #define iowrite64be iowrite64be_lo_hi #endif #endif /* _LINUX_IO_64_NONATOMIC_LO_HI_H_ */ page-flags-layout.h 0000644 00000006437 14722070374 0010256 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef PAGE_FLAGS_LAYOUT_H #define PAGE_FLAGS_LAYOUT_H #include <linux/numa.h> #include <generated/bounds.h> /* * When a memory allocation must conform to specific limitations (such * as being suitable for DMA) the caller will pass in hints to the * allocator in the gfp_mask, in the zone modifier bits. These bits * are used to select a priority ordered list of memory zones which * match the requested limits. See gfp_zone() in include/linux/gfp.h */ #if MAX_NR_ZONES < 2 #define ZONES_SHIFT 0 #elif MAX_NR_ZONES <= 2 #define ZONES_SHIFT 1 #elif MAX_NR_ZONES <= 4 #define ZONES_SHIFT 2 #elif MAX_NR_ZONES <= 8 #define ZONES_SHIFT 3 #else #error ZONES_SHIFT -- too many zones configured adjust calculation #endif #ifdef CONFIG_SPARSEMEM #include <asm/sparsemem.h> /* SECTION_SHIFT #bits space required to store a section # */ #define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS) #endif /* CONFIG_SPARSEMEM */ #ifndef BUILD_VDSO32_64 /* * page->flags layout: * * There are five possibilities for how page->flags get laid out. The first * pair is for the normal case without sparsemem. The second pair is for * sparsemem when there is plenty of space for node and section information. * The last is when there is insufficient space in page->flags and a separate * lookup is necessary. * * No sparsemem or sparsemem vmemmap: | NODE | ZONE | ... | FLAGS | * " plus space for last_cpupid: | NODE | ZONE | LAST_CPUPID ... | FLAGS | * classic sparse with space for node:| SECTION | NODE | ZONE | ... | FLAGS | * " plus space for last_cpupid: | SECTION | NODE | ZONE | LAST_CPUPID ... | FLAGS | * classic sparse no space for node: | SECTION | ZONE | ... | FLAGS | */ #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) #define SECTIONS_WIDTH SECTIONS_SHIFT #else #define SECTIONS_WIDTH 0 #endif #define ZONES_WIDTH ZONES_SHIFT #if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS #define NODES_WIDTH NODES_SHIFT #else #ifdef CONFIG_SPARSEMEM_VMEMMAP #error "Vmemmap: No space for nodes field in page flags" #endif #define NODES_WIDTH 0 #endif #ifdef CONFIG_NUMA_BALANCING #define LAST__PID_SHIFT 8 #define LAST__PID_MASK ((1 << LAST__PID_SHIFT)-1) #define LAST__CPU_SHIFT NR_CPUS_BITS #define LAST__CPU_MASK ((1 << LAST__CPU_SHIFT)-1) #define LAST_CPUPID_SHIFT (LAST__PID_SHIFT+LAST__CPU_SHIFT) #else #define LAST_CPUPID_SHIFT 0 #endif #ifdef CONFIG_KASAN_SW_TAGS #define KASAN_TAG_WIDTH 8 #else #define KASAN_TAG_WIDTH 0 #endif #if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT+KASAN_TAG_WIDTH \ <= BITS_PER_LONG - NR_PAGEFLAGS #define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT #else #define LAST_CPUPID_WIDTH 0 #endif #if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH+LAST_CPUPID_WIDTH+KASAN_TAG_WIDTH \ > BITS_PER_LONG - NR_PAGEFLAGS #error "Not enough bits in page flags" #endif /* * We are going to use the flags for the page to node mapping if its in * there. This includes the case where there is no node, so it is implicit. */ #if !(NODES_WIDTH > 0 || NODES_SHIFT == 0) #define NODE_NOT_IN_PAGE_FLAGS #endif #if defined(CONFIG_NUMA_BALANCING) && LAST_CPUPID_WIDTH == 0 #define LAST_CPUPID_NOT_IN_PAGE_FLAGS #endif #endif #endif /* _LINUX_PAGE_FLAGS_LAYOUT */ dw_apb_timer.h 0000644 00000003075 14722070374 0007364 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * (C) Copyright 2009 Intel Corporation * Author: Jacob Pan (jacob.jun.pan@intel.com) * * Shared with ARM platforms, Jamie Iles, Picochip 2011 * * Support for the Synopsys DesignWare APB Timers. */ #ifndef __DW_APB_TIMER_H__ #define __DW_APB_TIMER_H__ #include <linux/clockchips.h> #include <linux/clocksource.h> #include <linux/interrupt.h> #define APBTMRS_REG_SIZE 0x14 struct dw_apb_timer { void __iomem *base; unsigned long freq; int irq; }; struct dw_apb_clock_event_device { struct clock_event_device ced; struct dw_apb_timer timer; struct irqaction irqaction; void (*eoi)(struct dw_apb_timer *); }; struct dw_apb_clocksource { struct dw_apb_timer timer; struct clocksource cs; }; void dw_apb_clockevent_register(struct dw_apb_clock_event_device *dw_ced); void dw_apb_clockevent_pause(struct dw_apb_clock_event_device *dw_ced); void dw_apb_clockevent_resume(struct dw_apb_clock_event_device *dw_ced); void dw_apb_clockevent_stop(struct dw_apb_clock_event_device *dw_ced); struct dw_apb_clock_event_device * dw_apb_clockevent_init(int cpu, const char *name, unsigned rating, void __iomem *base, int irq, unsigned long freq); struct dw_apb_clocksource * dw_apb_clocksource_init(unsigned rating, const char *name, void __iomem *base, unsigned long freq); void dw_apb_clocksource_register(struct dw_apb_clocksource *dw_cs); void dw_apb_clocksource_start(struct dw_apb_clocksource *dw_cs); u64 dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs); #endif /* __DW_APB_TIMER_H__ */ wm97xx.h 0000644 00000025154 14722070374 0006115 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Register bits and API for Wolfson WM97xx series of codecs */ #ifndef _LINUX_WM97XX_H #define _LINUX_WM97XX_H #include <sound/core.h> #include <sound/pcm.h> #include <sound/ac97_codec.h> #include <sound/initval.h> #include <linux/types.h> #include <linux/list.h> #include <linux/input.h> /* Input device layer */ #include <linux/platform_device.h> /* * WM97xx variants */ #define WM97xx_GENERIC 0x0000 #define WM97xx_WM1613 0x1613 /* * WM97xx AC97 Touchscreen registers */ #define AC97_WM97XX_DIGITISER1 0x76 #define AC97_WM97XX_DIGITISER2 0x78 #define AC97_WM97XX_DIGITISER_RD 0x7a #define AC97_WM9713_DIG1 0x74 #define AC97_WM9713_DIG2 AC97_WM97XX_DIGITISER1 #define AC97_WM9713_DIG3 AC97_WM97XX_DIGITISER2 /* * WM97xx register bits */ #define WM97XX_POLL 0x8000 /* initiate a polling measurement */ #define WM97XX_ADCSEL_X 0x1000 /* x coord measurement */ #define WM97XX_ADCSEL_Y 0x2000 /* y coord measurement */ #define WM97XX_ADCSEL_PRES 0x3000 /* pressure measurement */ #define WM97XX_AUX_ID1 0x4000 #define WM97XX_AUX_ID2 0x5000 #define WM97XX_AUX_ID3 0x6000 #define WM97XX_AUX_ID4 0x7000 #define WM97XX_ADCSEL_MASK 0x7000 /* ADC selection mask */ #define WM97XX_COO 0x0800 /* enable coordinate mode */ #define WM97XX_CTC 0x0400 /* enable continuous mode */ #define WM97XX_CM_RATE_93 0x0000 /* 93.75Hz continuous rate */ #define WM97XX_CM_RATE_187 0x0100 /* 187.5Hz continuous rate */ #define WM97XX_CM_RATE_375 0x0200 /* 375Hz continuous rate */ #define WM97XX_CM_RATE_750 0x0300 /* 750Hz continuous rate */ #define WM97XX_CM_RATE_8K 0x00f0 /* 8kHz continuous rate */ #define WM97XX_CM_RATE_12K 0x01f0 /* 12kHz continuous rate */ #define WM97XX_CM_RATE_24K 0x02f0 /* 24kHz continuous rate */ #define WM97XX_CM_RATE_48K 0x03f0 /* 48kHz continuous rate */ #define WM97XX_CM_RATE_MASK 0x03f0 #define WM97XX_RATE(i) (((i & 3) << 8) | ((i & 4) ? 0xf0 : 0)) #define WM97XX_DELAY(i) ((i << 4) & 0x00f0) /* sample delay times */ #define WM97XX_DELAY_MASK 0x00f0 #define WM97XX_SLEN 0x0008 /* slot read back enable */ #define WM97XX_SLT(i) ((i - 5) & 0x7) /* panel slot (5-11) */ #define WM97XX_SLT_MASK 0x0007 #define WM97XX_PRP_DETW 0x4000 /* detect on, digitise off, wake */ #define WM97XX_PRP_DET 0x8000 /* detect on, digitise off, no wake */ #define WM97XX_PRP_DET_DIG 0xc000 /* setect on, digitise on */ #define WM97XX_RPR 0x2000 /* wake up on pen down */ #define WM97XX_PEN_DOWN 0x8000 /* pen is down */ /* WM9712 Bits */ #define WM9712_45W 0x1000 /* set for 5-wire touchscreen */ #define WM9712_PDEN 0x0800 /* measure only when pen down */ #define WM9712_WAIT 0x0200 /* wait until adc is read before next sample */ #define WM9712_PIL 0x0100 /* current used for pressure measurement. set 400uA else 200uA */ #define WM9712_MASK_HI 0x0040 /* hi on mask pin (47) stops conversions */ #define WM9712_MASK_EDGE 0x0080 /* rising/falling edge on pin delays sample */ #define WM9712_MASK_SYNC 0x00c0 /* rising/falling edge on mask initiates sample */ #define WM9712_RPU(i) (i&0x3f) /* internal pull up on pen detect (64k / rpu) */ #define WM9712_PD(i) (0x1 << i) /* power management */ /* WM9712 Registers */ #define AC97_WM9712_POWER 0x24 #define AC97_WM9712_REV 0x58 /* WM9705 Bits */ #define WM9705_PDEN 0x1000 /* measure only when pen is down */ #define WM9705_PINV 0x0800 /* inverts sense of pen down output */ #define WM9705_BSEN 0x0400 /* BUSY flag enable, pin47 is 1 when busy */ #define WM9705_BINV 0x0200 /* invert BUSY (pin47) output */ #define WM9705_WAIT 0x0100 /* wait until adc is read before next sample */ #define WM9705_PIL 0x0080 /* current used for pressure measurement. set 400uA else 200uA */ #define WM9705_PHIZ 0x0040 /* set PHONE and PCBEEP inputs to high impedance */ #define WM9705_MASK_HI 0x0010 /* hi on mask stops conversions */ #define WM9705_MASK_EDGE 0x0020 /* rising/falling edge on pin delays sample */ #define WM9705_MASK_SYNC 0x0030 /* rising/falling edge on mask initiates sample */ #define WM9705_PDD(i) (i & 0x000f) /* pen detect comparator threshold */ /* WM9713 Bits */ #define WM9713_PDPOL 0x0400 /* Pen down polarity */ #define WM9713_POLL 0x0200 /* initiate a polling measurement */ #define WM9713_CTC 0x0100 /* enable continuous mode */ #define WM9713_ADCSEL_X 0x0002 /* X measurement */ #define WM9713_ADCSEL_Y 0x0004 /* Y measurement */ #define WM9713_ADCSEL_PRES 0x0008 /* Pressure measurement */ #define WM9713_COO 0x0001 /* enable coordinate mode */ #define WM9713_45W 0x1000 /* set for 5 wire panel */ #define WM9713_PDEN 0x0800 /* measure only when pen down */ #define WM9713_ADCSEL_MASK 0x00fe /* ADC selection mask */ #define WM9713_WAIT 0x0200 /* coordinate wait */ /* AUX ADC ID's */ #define TS_COMP1 0x0 #define TS_COMP2 0x1 #define TS_BMON 0x2 #define TS_WIPER 0x3 /* ID numbers */ #define WM97XX_ID1 0x574d #define WM9712_ID2 0x4c12 #define WM9705_ID2 0x4c05 #define WM9713_ID2 0x4c13 /* Codec GPIO's */ #define WM97XX_MAX_GPIO 16 #define WM97XX_GPIO_1 (1 << 1) #define WM97XX_GPIO_2 (1 << 2) #define WM97XX_GPIO_3 (1 << 3) #define WM97XX_GPIO_4 (1 << 4) #define WM97XX_GPIO_5 (1 << 5) #define WM97XX_GPIO_6 (1 << 6) #define WM97XX_GPIO_7 (1 << 7) #define WM97XX_GPIO_8 (1 << 8) #define WM97XX_GPIO_9 (1 << 9) #define WM97XX_GPIO_10 (1 << 10) #define WM97XX_GPIO_11 (1 << 11) #define WM97XX_GPIO_12 (1 << 12) #define WM97XX_GPIO_13 (1 << 13) #define WM97XX_GPIO_14 (1 << 14) #define WM97XX_GPIO_15 (1 << 15) #define AC97_LINK_FRAME 21 /* time in uS for AC97 link frame */ /*---------------- Return codes from sample reading functions ---------------*/ /* More data is available; call the sample gathering function again */ #define RC_AGAIN 0x00000001 /* The returned sample is valid */ #define RC_VALID 0x00000002 /* The pen is up (the first RC_VALID without RC_PENUP means pen is down) */ #define RC_PENUP 0x00000004 /* The pen is down (RC_VALID implies RC_PENDOWN, but sometimes it is helpful to tell the handler that the pen is down but we don't know yet his coords, so the handler should not sleep or wait for pendown irq) */ #define RC_PENDOWN 0x00000008 /* * The wm97xx driver provides a private API for writing platform-specific * drivers. */ /* The structure used to return arch specific sampled data into */ struct wm97xx_data { int x; int y; int p; }; /* * Codec GPIO status */ enum wm97xx_gpio_status { WM97XX_GPIO_HIGH, WM97XX_GPIO_LOW }; /* * Codec GPIO direction */ enum wm97xx_gpio_dir { WM97XX_GPIO_IN, WM97XX_GPIO_OUT }; /* * Codec GPIO polarity */ enum wm97xx_gpio_pol { WM97XX_GPIO_POL_HIGH, WM97XX_GPIO_POL_LOW }; /* * Codec GPIO sticky */ enum wm97xx_gpio_sticky { WM97XX_GPIO_STICKY, WM97XX_GPIO_NOTSTICKY }; /* * Codec GPIO wake */ enum wm97xx_gpio_wake { WM97XX_GPIO_WAKE, WM97XX_GPIO_NOWAKE }; /* * Digitiser ioctl commands */ #define WM97XX_DIG_START 0x1 #define WM97XX_DIG_STOP 0x2 #define WM97XX_PHY_INIT 0x3 #define WM97XX_AUX_PREPARE 0x4 #define WM97XX_DIG_RESTORE 0x5 struct wm97xx; extern struct wm97xx_codec_drv wm9705_codec; extern struct wm97xx_codec_drv wm9712_codec; extern struct wm97xx_codec_drv wm9713_codec; /* * Codec driver interface - allows mapping to WM9705/12/13 and newer codecs */ struct wm97xx_codec_drv { u16 id; char *name; /* read 1 sample */ int (*poll_sample) (struct wm97xx *, int adcsel, int *sample); /* read X,Y,[P] in poll */ int (*poll_touch) (struct wm97xx *, struct wm97xx_data *); int (*acc_enable) (struct wm97xx *, int enable); void (*phy_init) (struct wm97xx *); void (*dig_enable) (struct wm97xx *, int enable); void (*dig_restore) (struct wm97xx *); void (*aux_prepare) (struct wm97xx *); }; /* Machine specific and accelerated touch operations */ struct wm97xx_mach_ops { /* accelerated touch readback - coords are transmited on AC97 link */ int acc_enabled; void (*acc_pen_up) (struct wm97xx *); int (*acc_pen_down) (struct wm97xx *); int (*acc_startup) (struct wm97xx *); void (*acc_shutdown) (struct wm97xx *); /* interrupt mask control - required for accelerated operation */ void (*irq_enable) (struct wm97xx *, int enable); /* GPIO pin used for accelerated operation */ int irq_gpio; /* pre and post sample - can be used to minimise any analog noise */ void (*pre_sample) (int); /* function to run before sampling */ void (*post_sample) (int); /* function to run after sampling */ }; struct wm97xx { u16 dig[3], id, gpio[6], misc; /* Cached codec registers */ u16 dig_save[3]; /* saved during aux reading */ struct wm97xx_codec_drv *codec; /* attached codec driver*/ struct input_dev *input_dev; /* touchscreen input device */ struct snd_ac97 *ac97; /* ALSA codec access */ struct device *dev; /* ALSA device */ struct platform_device *battery_dev; struct platform_device *touch_dev; struct wm97xx_mach_ops *mach_ops; struct mutex codec_mutex; struct delayed_work ts_reader; /* Used to poll touchscreen */ unsigned long ts_reader_interval; /* Current interval for timer */ unsigned long ts_reader_min_interval; /* Minimum interval */ unsigned int pen_irq; /* Pen IRQ number in use */ struct workqueue_struct *ts_workq; struct work_struct pen_event_work; u16 acc_slot; /* AC97 slot used for acc touch data */ u16 acc_rate; /* acc touch data rate */ unsigned pen_is_down:1; /* Pen is down */ unsigned aux_waiting:1; /* aux measurement waiting */ unsigned pen_probably_down:1; /* used in polling mode */ u16 variant; /* WM97xx chip variant */ u16 suspend_mode; /* PRP in suspend mode */ }; struct wm97xx_batt_pdata { int batt_aux; int temp_aux; int charge_gpio; int min_voltage; int max_voltage; int batt_div; int batt_mult; int temp_div; int temp_mult; int batt_tech; char *batt_name; }; struct wm97xx_pdata { struct wm97xx_batt_pdata *batt_pdata; /* battery data */ }; /* * Codec GPIO access (not supported on WM9705) * This can be used to set/get codec GPIO and Virtual GPIO status. */ enum wm97xx_gpio_status wm97xx_get_gpio(struct wm97xx *wm, u32 gpio); void wm97xx_set_gpio(struct wm97xx *wm, u32 gpio, enum wm97xx_gpio_status status); void wm97xx_config_gpio(struct wm97xx *wm, u32 gpio, enum wm97xx_gpio_dir dir, enum wm97xx_gpio_pol pol, enum wm97xx_gpio_sticky sticky, enum wm97xx_gpio_wake wake); void wm97xx_set_suspend_mode(struct wm97xx *wm, u16 mode); /* codec AC97 IO access */ int wm97xx_reg_read(struct wm97xx *wm, u16 reg); void wm97xx_reg_write(struct wm97xx *wm, u16 reg, u16 val); /* aux adc readback */ int wm97xx_read_aux_adc(struct wm97xx *wm, u16 adcsel); /* machine ops */ int wm97xx_register_mach_ops(struct wm97xx *, struct wm97xx_mach_ops *); void wm97xx_unregister_mach_ops(struct wm97xx *); #endif ns_common.h 0000644 00000000353 14722070374 0006714 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_NS_COMMON_H #define _LINUX_NS_COMMON_H struct proc_ns_operations; struct ns_common { atomic_long_t stashed; const struct proc_ns_operations *ops; unsigned int inum; }; #endif atm.h 0000644 00000000437 14722070374 0005510 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* atm.h - general ATM declarations */ #ifndef _LINUX_ATM_H #define _LINUX_ATM_H #include <uapi/linux/atm.h> #ifdef CONFIG_COMPAT #include <linux/compat.h> struct compat_atmif_sioc { int number; int length; compat_uptr_t arg; }; #endif #endif w1.h 0000644 00000021600 14722070374 0005251 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> */ #ifndef __LINUX_W1_H #define __LINUX_W1_H #include <linux/device.h> /** * struct w1_reg_num - broken out slave device id * * @family: identifies the type of device * @id: along with family is the unique device id * @crc: checksum of the other bytes */ struct w1_reg_num { #if defined(__LITTLE_ENDIAN_BITFIELD) __u64 family:8, id:48, crc:8; #elif defined(__BIG_ENDIAN_BITFIELD) __u64 crc:8, id:48, family:8; #else #error "Please fix <asm/byteorder.h>" #endif }; #ifdef __KERNEL__ #define W1_MAXNAMELEN 32 #define W1_SEARCH 0xF0 #define W1_ALARM_SEARCH 0xEC #define W1_CONVERT_TEMP 0x44 #define W1_SKIP_ROM 0xCC #define W1_COPY_SCRATCHPAD 0x48 #define W1_WRITE_SCRATCHPAD 0x4E #define W1_READ_SCRATCHPAD 0xBE #define W1_READ_ROM 0x33 #define W1_READ_PSUPPLY 0xB4 #define W1_MATCH_ROM 0x55 #define W1_RESUME_CMD 0xA5 /** * struct w1_slave - holds a single slave device on the bus * * @owner: Points to the one wire "wire" kernel module. * @name: Device id is ascii. * @w1_slave_entry: data for the linked list * @reg_num: the slave id in binary * @refcnt: reference count, delete when 0 * @flags: bit flags for W1_SLAVE_ACTIVE W1_SLAVE_DETACH * @ttl: decrement per search this slave isn't found, deatch at 0 * @master: bus which this slave is on * @family: module for device family type * @family_data: pointer for use by the family module * @dev: kernel device identifier * @hwmon: pointer to hwmon device * */ struct w1_slave { struct module *owner; unsigned char name[W1_MAXNAMELEN]; struct list_head w1_slave_entry; struct w1_reg_num reg_num; atomic_t refcnt; int ttl; unsigned long flags; struct w1_master *master; struct w1_family *family; void *family_data; struct device dev; struct device *hwmon; }; typedef void (*w1_slave_found_callback)(struct w1_master *, u64); /** * struct w1_bus_master - operations available on a bus master * * @data: the first parameter in all the functions below * * @read_bit: Sample the line level @return the level read (0 or 1) * * @write_bit: Sets the line level * * @touch_bit: the lowest-level function for devices that really support the * 1-wire protocol. * touch_bit(0) = write-0 cycle * touch_bit(1) = write-1 / read cycle * @return the bit read (0 or 1) * * @read_byte: Reads a bytes. Same as 8 touch_bit(1) calls. * @return the byte read * * @write_byte: Writes a byte. Same as 8 touch_bit(x) calls. * * @read_block: Same as a series of read_byte() calls * @return the number of bytes read * * @write_block: Same as a series of write_byte() calls * * @triplet: Combines two reads and a smart write for ROM searches * @return bit0=Id bit1=comp_id bit2=dir_taken * * @reset_bus: long write-0 with a read for the presence pulse detection * @return -1=Error, 0=Device present, 1=No device present * * @set_pullup: Put out a strong pull-up pulse of the specified duration. * @return -1=Error, 0=completed * * @search: Really nice hardware can handles the different types of ROM search * w1_master* is passed to the slave found callback. * u8 is search_type, W1_SEARCH or W1_ALARM_SEARCH * * @dev_id: Optional device id string, which w1 slaves could use for * creating names, which then give a connection to the w1 master * * Note: read_bit and write_bit are very low level functions and should only * be used with hardware that doesn't really support 1-wire operations, * like a parallel/serial port. * Either define read_bit and write_bit OR define, at minimum, touch_bit and * reset_bus. * */ struct w1_bus_master { void *data; u8 (*read_bit)(void *); void (*write_bit)(void *, u8); u8 (*touch_bit)(void *, u8); u8 (*read_byte)(void *); void (*write_byte)(void *, u8); u8 (*read_block)(void *, u8 *, int); void (*write_block)(void *, const u8 *, int); u8 (*triplet)(void *, u8); u8 (*reset_bus)(void *); u8 (*set_pullup)(void *, int); void (*search)(void *, struct w1_master *, u8, w1_slave_found_callback); char *dev_id; }; /** * enum w1_master_flags - bitfields used in w1_master.flags * @W1_ABORT_SEARCH: abort searching early on shutdown * @W1_WARN_MAX_COUNT: limit warning when the maximum count is reached */ enum w1_master_flags { W1_ABORT_SEARCH = 0, W1_WARN_MAX_COUNT = 1, }; /** * struct w1_master - one per bus master * @w1_master_entry: master linked list * @owner: module owner * @name: dynamically allocate bus name * @list_mutex: protect slist and async_list * @slist: linked list of slaves * @async_list: linked list of netlink commands to execute * @max_slave_count: maximum number of slaves to search for at a time * @slave_count: current number of slaves known * @attempts: number of searches ran * @slave_ttl: number of searches before a slave is timed out * @initialized: prevent init/removal race conditions * @id: w1 bus number * @search_count: number of automatic searches to run, -1 unlimited * @search_id: allows continuing a search * @refcnt: reference count * @priv: private data storage * @enable_pullup: allows a strong pullup * @pullup_duration: time for the next strong pullup * @flags: one of w1_master_flags * @thread: thread for bus search and netlink commands * @mutex: protect most of w1_master * @bus_mutex: pretect concurrent bus access * @driver: sysfs driver * @dev: sysfs device * @bus_master: io operations available * @seq: sequence number used for netlink broadcasts */ struct w1_master { struct list_head w1_master_entry; struct module *owner; unsigned char name[W1_MAXNAMELEN]; /* list_mutex protects just slist and async_list so slaves can be * searched for and async commands added while the master has * w1_master.mutex locked and is operating on the bus. * lock order w1_mlock, w1_master.mutex, w1_master.list_mutex */ struct mutex list_mutex; struct list_head slist; struct list_head async_list; int max_slave_count, slave_count; unsigned long attempts; int slave_ttl; int initialized; u32 id; int search_count; /* id to start searching on, to continue a search or 0 to restart */ u64 search_id; atomic_t refcnt; void *priv; /** 5V strong pullup enabled flag, 1 enabled, zero disabled. */ int enable_pullup; /** 5V strong pullup duration in milliseconds, zero disabled. */ int pullup_duration; long flags; struct task_struct *thread; struct mutex mutex; struct mutex bus_mutex; struct device_driver *driver; struct device dev; struct w1_bus_master *bus_master; u32 seq; }; int w1_add_master_device(struct w1_bus_master *master); void w1_remove_master_device(struct w1_bus_master *master); /** * struct w1_family_ops - operations for a family type * @add_slave: add_slave * @remove_slave: remove_slave * @groups: sysfs group * @chip_info: pointer to struct hwmon_chip_info */ struct w1_family_ops { int (*add_slave)(struct w1_slave *sl); void (*remove_slave)(struct w1_slave *sl); const struct attribute_group **groups; const struct hwmon_chip_info *chip_info; }; /** * struct w1_family - reference counted family structure. * @family_entry: family linked list * @fid: 8 bit family identifier * @fops: operations for this family * @refcnt: reference counter */ struct w1_family { struct list_head family_entry; u8 fid; struct w1_family_ops *fops; const struct of_device_id *of_match_table; atomic_t refcnt; }; int w1_register_family(struct w1_family *family); void w1_unregister_family(struct w1_family *family); /** * module_w1_driver() - Helper macro for registering a 1-Wire families * @__w1_family: w1_family struct * * Helper macro for 1-Wire families which do not do anything special in module * init/exit. This eliminates a lot of boilerplate. Each module may only * use this macro once, and calling it replaces module_init() and module_exit() */ #define module_w1_family(__w1_family) \ module_driver(__w1_family, w1_register_family, \ w1_unregister_family) u8 w1_triplet(struct w1_master *dev, int bdir); u8 w1_touch_bit(struct w1_master *dev, int bit); void w1_write_8(struct w1_master *, u8); u8 w1_read_8(struct w1_master *); int w1_reset_bus(struct w1_master *); u8 w1_calc_crc8(u8 *, int); void w1_write_block(struct w1_master *, const u8 *, int); void w1_touch_block(struct w1_master *, u8 *, int); u8 w1_read_block(struct w1_master *, u8 *, int); int w1_reset_select_slave(struct w1_slave *sl); int w1_reset_resume_command(struct w1_master *); void w1_next_pullup(struct w1_master *, int); static inline struct w1_slave* dev_to_w1_slave(struct device *dev) { return container_of(dev, struct w1_slave, dev); } static inline struct w1_slave* kobj_to_w1_slave(struct kobject *kobj) { return dev_to_w1_slave(container_of(kobj, struct device, kobj)); } static inline struct w1_master* dev_to_w1_master(struct device *dev) { return container_of(dev, struct w1_master, dev); } #endif /* __KERNEL__ */ #endif /* __LINUX_W1_H */ lockdep.h 0000644 00000050357 14722070374 0006356 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Runtime locking correctness validator * * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra * * see Documentation/locking/lockdep-design.rst for more details. */ #ifndef __LINUX_LOCKDEP_H #define __LINUX_LOCKDEP_H struct task_struct; struct lockdep_map; /* for sysctl */ extern int prove_locking; extern int lock_stat; #define MAX_LOCKDEP_SUBCLASSES 8UL #include <linux/types.h> #ifdef CONFIG_LOCKDEP #include <linux/linkage.h> #include <linux/list.h> #include <linux/debug_locks.h> #include <linux/stacktrace.h> /* * We'd rather not expose kernel/lockdep_states.h this wide, but we do need * the total number of states... :-( */ #define XXX_LOCK_USAGE_STATES (1+2*4) /* * NR_LOCKDEP_CACHING_CLASSES ... Number of classes * cached in the instance of lockdep_map * * Currently main class (subclass == 0) and signle depth subclass * are cached in lockdep_map. This optimization is mainly targeting * on rq->lock. double_rq_lock() acquires this highly competitive with * single depth. */ #define NR_LOCKDEP_CACHING_CLASSES 2 /* * A lockdep key is associated with each lock object. For static locks we use * the lock address itself as the key. Dynamically allocated lock objects can * have a statically or dynamically allocated key. Dynamically allocated lock * keys must be registered before being used and must be unregistered before * the key memory is freed. */ struct lockdep_subclass_key { char __one_byte; } __attribute__ ((__packed__)); /* hash_entry is used to keep track of dynamically allocated keys. */ struct lock_class_key { union { struct hlist_node hash_entry; struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; }; }; extern struct lock_class_key __lockdep_no_validate__; struct lock_trace; #define LOCKSTAT_POINTS 4 /* * The lock-class itself. The order of the structure members matters. * reinit_class() zeroes the key member and all subsequent members. */ struct lock_class { /* * class-hash: */ struct hlist_node hash_entry; /* * Entry in all_lock_classes when in use. Entry in free_lock_classes * when not in use. Instances that are being freed are on one of the * zapped_classes lists. */ struct list_head lock_entry; /* * These fields represent a directed graph of lock dependencies, * to every node we attach a list of "forward" and a list of * "backward" graph nodes. */ struct list_head locks_after, locks_before; const struct lockdep_subclass_key *key; unsigned int subclass; unsigned int dep_gen_id; /* * IRQ/softirq usage tracking bits: */ unsigned long usage_mask; const struct lock_trace *usage_traces[XXX_LOCK_USAGE_STATES]; /* * Generation counter, when doing certain classes of graph walking, * to ensure that we check one node only once: */ int name_version; const char *name; #ifdef CONFIG_LOCK_STAT unsigned long contention_point[LOCKSTAT_POINTS]; unsigned long contending_point[LOCKSTAT_POINTS]; #endif } __no_randomize_layout; #ifdef CONFIG_LOCK_STAT struct lock_time { s64 min; s64 max; s64 total; unsigned long nr; }; enum bounce_type { bounce_acquired_write, bounce_acquired_read, bounce_contended_write, bounce_contended_read, nr_bounce_types, bounce_acquired = bounce_acquired_write, bounce_contended = bounce_contended_write, }; struct lock_class_stats { unsigned long contention_point[LOCKSTAT_POINTS]; unsigned long contending_point[LOCKSTAT_POINTS]; struct lock_time read_waittime; struct lock_time write_waittime; struct lock_time read_holdtime; struct lock_time write_holdtime; unsigned long bounces[nr_bounce_types]; }; struct lock_class_stats lock_stats(struct lock_class *class); void clear_lock_stats(struct lock_class *class); #endif /* * Map the lock object (the lock instance) to the lock-class object. * This is embedded into specific lock instances: */ struct lockdep_map { struct lock_class_key *key; struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES]; const char *name; #ifdef CONFIG_LOCK_STAT int cpu; unsigned long ip; #endif }; static inline void lockdep_copy_map(struct lockdep_map *to, struct lockdep_map *from) { int i; *to = *from; /* * Since the class cache can be modified concurrently we could observe * half pointers (64bit arch using 32bit copy insns). Therefore clear * the caches and take the performance hit. * * XXX it doesn't work well with lockdep_set_class_and_subclass(), since * that relies on cache abuse. */ for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) to->class_cache[i] = NULL; } /* * Every lock has a list of other locks that were taken after it. * We only grow the list, never remove from it: */ struct lock_list { struct list_head entry; struct lock_class *class; struct lock_class *links_to; const struct lock_trace *trace; int distance; /* * The parent field is used to implement breadth-first search, and the * bit 0 is reused to indicate if the lock has been accessed in BFS. */ struct lock_list *parent; }; /** * struct lock_chain - lock dependency chain record * * @irq_context: the same as irq_context in held_lock below * @depth: the number of held locks in this chain * @base: the index in chain_hlocks for this chain * @entry: the collided lock chains in lock_chain hash list * @chain_key: the hash key of this lock_chain */ struct lock_chain { /* see BUILD_BUG_ON()s in add_chain_cache() */ unsigned int irq_context : 2, depth : 6, base : 24; /* 4 byte hole */ struct hlist_node entry; u64 chain_key; }; #define MAX_LOCKDEP_KEYS_BITS 13 #define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS) #define INITIAL_CHAIN_KEY -1 struct held_lock { /* * One-way hash of the dependency chain up to this point. We * hash the hashes step by step as the dependency chain grows. * * We use it for dependency-caching and we skip detection * passes and dependency-updates if there is a cache-hit, so * it is absolutely critical for 100% coverage of the validator * to have a unique key value for every unique dependency path * that can occur in the system, to make a unique hash value * as likely as possible - hence the 64-bit width. * * The task struct holds the current hash value (initialized * with zero), here we store the previous hash value: */ u64 prev_chain_key; unsigned long acquire_ip; struct lockdep_map *instance; struct lockdep_map *nest_lock; #ifdef CONFIG_LOCK_STAT u64 waittime_stamp; u64 holdtime_stamp; #endif /* * class_idx is zero-indexed; it points to the element in * lock_classes this held lock instance belongs to. class_idx is in * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive. */ unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; /* * The lock-stack is unified in that the lock chains of interrupt * contexts nest ontop of process context chains, but we 'separate' * the hashes by starting with 0 if we cross into an interrupt * context, and we also keep do not add cross-context lock * dependencies - the lock usage graph walking covers that area * anyway, and we'd just unnecessarily increase the number of * dependencies otherwise. [Note: hardirq and softirq contexts * are separated from each other too.] * * The following field is used to detect when we cross into an * interrupt context: */ unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ unsigned int trylock:1; /* 16 bits */ unsigned int read:2; /* see lock_acquire() comment */ unsigned int check:1; /* see lock_acquire() comment */ unsigned int hardirqs_off:1; unsigned int references:12; /* 32 bits */ unsigned int pin_count; }; /* * Initialization, self-test and debugging-output methods: */ extern void lockdep_init(void); extern void lockdep_reset(void); extern void lockdep_reset_lock(struct lockdep_map *lock); extern void lockdep_free_key_range(void *start, unsigned long size); extern asmlinkage void lockdep_sys_exit(void); extern void lockdep_set_selftest_task(struct task_struct *task); extern void lockdep_init_task(struct task_struct *task); extern void lockdep_off(void); extern void lockdep_on(void); extern void lockdep_register_key(struct lock_class_key *key); extern void lockdep_unregister_key(struct lock_class_key *key); /* * These methods are used by specific locking variants (spinlocks, * rwlocks, mutexes and rwsems) to pass init/acquire/release events * to lockdep: */ extern void lockdep_init_map(struct lockdep_map *lock, const char *name, struct lock_class_key *key, int subclass); /* * Reinitialize a lock key - for cases where there is special locking or * special initialization of locks so that the validator gets the scope * of dependencies wrong: they are either too broad (they need a class-split) * or they are too narrow (they suffer from a false class-split): */ #define lockdep_set_class(lock, key) \ lockdep_init_map(&(lock)->dep_map, #key, key, 0) #define lockdep_set_class_and_name(lock, key, name) \ lockdep_init_map(&(lock)->dep_map, name, key, 0) #define lockdep_set_class_and_subclass(lock, key, sub) \ lockdep_init_map(&(lock)->dep_map, #key, key, sub) #define lockdep_set_subclass(lock, sub) \ lockdep_init_map(&(lock)->dep_map, #lock, \ (lock)->dep_map.key, sub) #define lockdep_set_novalidate_class(lock) \ lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock) /* * Compare locking classes */ #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key) static inline int lockdep_match_key(struct lockdep_map *lock, struct lock_class_key *key) { return lock->key == key; } struct lock_class *lockdep_hlock_class(struct held_lock *hlock); /* * Acquire a lock. * * Values for "read": * * 0: exclusive (write) acquire * 1: read-acquire (no recursion allowed) * 2: read-acquire with same-instance recursion allowed * * Values for check: * * 0: simple checks (freeing, held-at-exit-time, etc.) * 1: full validation */ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, int trylock, int read, int check, struct lockdep_map *nest_lock, unsigned long ip); extern void lock_release(struct lockdep_map *lock, int nested, unsigned long ip); /* * Same "read" as for lock_acquire(), except -1 means any. */ extern int lock_is_held_type(const struct lockdep_map *lock, int read); static inline int lock_is_held(const struct lockdep_map *lock) { return lock_is_held_type(lock, -1); } #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) #define lockdep_is_held_type(lock, r) lock_is_held_type(&(lock)->dep_map, (r)) extern void lock_set_class(struct lockdep_map *lock, const char *name, struct lock_class_key *key, unsigned int subclass, unsigned long ip); static inline void lock_set_subclass(struct lockdep_map *lock, unsigned int subclass, unsigned long ip) { lock_set_class(lock, lock->name, lock->key, subclass, ip); } extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip); struct pin_cookie { unsigned int val; }; #define NIL_COOKIE (struct pin_cookie){ .val = 0U, } extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock); extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie); extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) #define lockdep_assert_held(l) do { \ WARN_ON(debug_locks && !lockdep_is_held(l)); \ } while (0) #define lockdep_assert_held_write(l) do { \ WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \ } while (0) #define lockdep_assert_held_read(l) do { \ WARN_ON(debug_locks && !lockdep_is_held_type(l, 1)); \ } while (0) #define lockdep_assert_held_once(l) do { \ WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \ } while (0) #define lockdep_assert_none_held_once() do { \ WARN_ON_ONCE(debug_locks && current->lockdep_depth); \ } while (0) #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) #define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map) #define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c)) #define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c)) #else /* !CONFIG_LOCKDEP */ static inline void lockdep_init_task(struct task_struct *task) { } static inline void lockdep_off(void) { } static inline void lockdep_on(void) { } static inline void lockdep_set_selftest_task(struct task_struct *task) { } # define lock_acquire(l, s, t, r, c, n, i) do { } while (0) # define lock_release(l, n, i) do { } while (0) # define lock_downgrade(l, i) do { } while (0) # define lock_set_class(l, n, k, s, i) do { } while (0) # define lock_set_subclass(l, s, i) do { } while (0) # define lockdep_init() do { } while (0) # define lockdep_init_map(lock, name, key, sub) \ do { (void)(name); (void)(key); } while (0) # define lockdep_set_class(lock, key) do { (void)(key); } while (0) # define lockdep_set_class_and_name(lock, key, name) \ do { (void)(key); (void)(name); } while (0) #define lockdep_set_class_and_subclass(lock, key, sub) \ do { (void)(key); } while (0) #define lockdep_set_subclass(lock, sub) do { } while (0) #define lockdep_set_novalidate_class(lock) do { } while (0) /* * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP * case since the result is not well defined and the caller should rather * #ifdef the call himself. */ # define lockdep_reset() do { debug_locks = 1; } while (0) # define lockdep_free_key_range(start, size) do { } while (0) # define lockdep_sys_exit() do { } while (0) /* * The class key takes no space if lockdep is disabled: */ struct lock_class_key { }; static inline void lockdep_register_key(struct lock_class_key *key) { } static inline void lockdep_unregister_key(struct lock_class_key *key) { } /* * The lockdep_map takes no space if lockdep is disabled: */ struct lockdep_map { }; #define lockdep_depth(tsk) (0) #define lockdep_is_held(lock) (1) #define lockdep_is_held_type(l, r) (1) #define lockdep_assert_held(l) do { (void)(l); } while (0) #define lockdep_assert_held_write(l) do { (void)(l); } while (0) #define lockdep_assert_held_read(l) do { (void)(l); } while (0) #define lockdep_assert_held_once(l) do { (void)(l); } while (0) #define lockdep_assert_none_held_once() do { } while (0) #define lockdep_recursing(tsk) (0) struct pin_cookie { }; #define NIL_COOKIE (struct pin_cookie){ } #define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; }) #define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0) #define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0) #endif /* !LOCKDEP */ enum xhlock_context_t { XHLOCK_HARD, XHLOCK_SOFT, XHLOCK_CTX_NR, }; #define lockdep_init_map_crosslock(m, n, k, s) do {} while (0) /* * To initialize a lockdep_map statically use this macro. * Note that _name must not be NULL. */ #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ { .name = (_name), .key = (void *)(_key), } static inline void lockdep_invariant_state(bool force) {} static inline void lockdep_free_task(struct task_struct *task) {} #ifdef CONFIG_LOCK_STAT extern void lock_contended(struct lockdep_map *lock, unsigned long ip); extern void lock_acquired(struct lockdep_map *lock, unsigned long ip); #define LOCK_CONTENDED(_lock, try, lock) \ do { \ if (!try(_lock)) { \ lock_contended(&(_lock)->dep_map, _RET_IP_); \ lock(_lock); \ } \ lock_acquired(&(_lock)->dep_map, _RET_IP_); \ } while (0) #define LOCK_CONTENDED_RETURN(_lock, try, lock) \ ({ \ int ____err = 0; \ if (!try(_lock)) { \ lock_contended(&(_lock)->dep_map, _RET_IP_); \ ____err = lock(_lock); \ } \ if (!____err) \ lock_acquired(&(_lock)->dep_map, _RET_IP_); \ ____err; \ }) #else /* CONFIG_LOCK_STAT */ #define lock_contended(lockdep_map, ip) do {} while (0) #define lock_acquired(lockdep_map, ip) do {} while (0) #define LOCK_CONTENDED(_lock, try, lock) \ lock(_lock) #define LOCK_CONTENDED_RETURN(_lock, try, lock) \ lock(_lock) #endif /* CONFIG_LOCK_STAT */ #ifdef CONFIG_LOCKDEP /* * On lockdep we dont want the hand-coded irq-enable of * _raw_*_lock_flags() code, because lockdep assumes * that interrupts are not re-enabled during lock-acquire: */ #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ LOCK_CONTENDED((_lock), (try), (lock)) #else /* CONFIG_LOCKDEP */ #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ lockfl((_lock), (flags)) #endif /* CONFIG_LOCKDEP */ #ifdef CONFIG_PROVE_LOCKING extern void print_irqtrace_events(struct task_struct *curr); #else static inline void print_irqtrace_events(struct task_struct *curr) { } #endif /* * For trivial one-depth nesting of a lock-class, the following * global define can be used. (Subsystems with multiple levels * of nesting should define their own lock-nesting subclasses.) */ #define SINGLE_DEPTH_NESTING 1 /* * Map the dependency ops to NOP or to real lockdep ops, depending * on the per lock-class debug mode: */ #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i) #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i) #define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) #define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) #define spin_release(l, n, i) lock_release(l, n, i) #define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) #define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) #define rwlock_release(l, n, i) lock_release(l, n, i) #define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) #define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) #define seqcount_release(l, n, i) lock_release(l, n, i) #define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) #define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) #define mutex_release(l, n, i) lock_release(l, n, i) #define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) #define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) #define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i) #define rwsem_release(l, n, i) lock_release(l, n, i) #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) #define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_) #define lock_map_release(l) lock_release(l, 1, _THIS_IP_) #ifdef CONFIG_PROVE_LOCKING # define might_lock(lock) \ do { \ typecheck(struct lockdep_map *, &(lock)->dep_map); \ lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \ lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ } while (0) # define might_lock_read(lock) \ do { \ typecheck(struct lockdep_map *, &(lock)->dep_map); \ lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \ lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ } while (0) #define lockdep_assert_irqs_enabled() do { \ WARN_ONCE(debug_locks && !current->lockdep_recursion && \ !current->hardirqs_enabled, \ "IRQs not enabled as expected\n"); \ } while (0) #define lockdep_assert_irqs_disabled() do { \ WARN_ONCE(debug_locks && !current->lockdep_recursion && \ current->hardirqs_enabled, \ "IRQs not disabled as expected\n"); \ } while (0) #define lockdep_assert_in_irq() do { \ WARN_ONCE(debug_locks && !current->lockdep_recursion && \ !current->hardirq_context, \ "Not in hardirq as expected\n"); \ } while (0) #else # define might_lock(lock) do { } while (0) # define might_lock_read(lock) do { } while (0) # define lockdep_assert_irqs_enabled() do { } while (0) # define lockdep_assert_irqs_disabled() do { } while (0) # define lockdep_assert_in_irq() do { } while (0) #endif #ifdef CONFIG_LOCKDEP void lockdep_rcu_suspicious(const char *file, const int line, const char *s); #else static inline void lockdep_rcu_suspicious(const char *file, const int line, const char *s) { } #endif #endif /* __LINUX_LOCKDEP_H */ etherdevice.h 0000644 00000041742 14722070374 0007222 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. NET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Definitions for the Ethernet handlers. * * Version: @(#)eth.h 1.0.4 05/13/93 * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * * Relocated to include/linux where it belongs by Alan Cox * <gw4pts@gw4pts.ampr.org> */ #ifndef _LINUX_ETHERDEVICE_H #define _LINUX_ETHERDEVICE_H #include <linux/if_ether.h> #include <linux/netdevice.h> #include <linux/random.h> #include <asm/unaligned.h> #include <asm/bitsperlong.h> #ifdef __KERNEL__ struct device; int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr); unsigned char *arch_get_platform_mac_address(void); int nvmem_get_mac_address(struct device *dev, void *addrbuf); u32 eth_get_headlen(const struct net_device *dev, void *data, unsigned int len); __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); extern const struct header_ops eth_header_ops; int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned len); int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr); int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); void eth_header_cache_update(struct hh_cache *hh, const struct net_device *dev, const unsigned char *haddr); __be16 eth_header_parse_protocol(const struct sk_buff *skb); int eth_prepare_mac_addr_change(struct net_device *dev, void *p); void eth_commit_mac_addr_change(struct net_device *dev, void *p); int eth_mac_addr(struct net_device *dev, void *p); int eth_change_mtu(struct net_device *dev, int new_mtu); int eth_validate_addr(struct net_device *dev); struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs, unsigned int rxqs); #define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1) #define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count) struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv, unsigned int txqs, unsigned int rxqs); #define devm_alloc_etherdev(dev, sizeof_priv) devm_alloc_etherdev_mqs(dev, sizeof_priv, 1, 1) struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb); int eth_gro_complete(struct sk_buff *skb, int nhoff); /* Reserved Ethernet Addresses per IEEE 802.1Q */ static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; #define eth_stp_addr eth_reserved_addr_base /** * is_link_local_ether_addr - Determine if given Ethernet address is link-local * @addr: Pointer to a six-byte array containing the Ethernet address * * Return true if address is link local reserved addr (01:80:c2:00:00:0X) per * IEEE 802.1Q 8.6.3 Frame filtering. * * Please note: addr must be aligned to u16. */ static inline bool is_link_local_ether_addr(const u8 *addr) { __be16 *a = (__be16 *)addr; static const __be16 *b = (const __be16 *)eth_reserved_addr_base; static const __be16 m = cpu_to_be16(0xfff0); #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) return (((*(const u32 *)addr) ^ (*(const u32 *)b)) | (__force int)((a[2] ^ b[2]) & m)) == 0; #else return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0; #endif } /** * is_zero_ether_addr - Determine if give Ethernet address is all zeros. * @addr: Pointer to a six-byte array containing the Ethernet address * * Return true if the address is all zeroes. * * Please note: addr must be aligned to u16. */ static inline bool is_zero_ether_addr(const u8 *addr) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) return ((*(const u32 *)addr) | (*(const u16 *)(addr + 4))) == 0; #else return (*(const u16 *)(addr + 0) | *(const u16 *)(addr + 2) | *(const u16 *)(addr + 4)) == 0; #endif } /** * is_multicast_ether_addr - Determine if the Ethernet address is a multicast. * @addr: Pointer to a six-byte array containing the Ethernet address * * Return true if the address is a multicast address. * By definition the broadcast address is also a multicast address. */ static inline bool is_multicast_ether_addr(const u8 *addr) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) u32 a = *(const u32 *)addr; #else u16 a = *(const u16 *)addr; #endif #ifdef __BIG_ENDIAN return 0x01 & (a >> ((sizeof(a) * 8) - 8)); #else return 0x01 & a; #endif } static inline bool is_multicast_ether_addr_64bits(const u8 *addr) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 #ifdef __BIG_ENDIAN return 0x01 & ((*(const u64 *)addr) >> 56); #else return 0x01 & (*(const u64 *)addr); #endif #else return is_multicast_ether_addr(addr); #endif } /** * is_local_ether_addr - Determine if the Ethernet address is locally-assigned one (IEEE 802). * @addr: Pointer to a six-byte array containing the Ethernet address * * Return true if the address is a local address. */ static inline bool is_local_ether_addr(const u8 *addr) { return 0x02 & addr[0]; } /** * is_broadcast_ether_addr - Determine if the Ethernet address is broadcast * @addr: Pointer to a six-byte array containing the Ethernet address * * Return true if the address is the broadcast address. * * Please note: addr must be aligned to u16. */ static inline bool is_broadcast_ether_addr(const u8 *addr) { return (*(const u16 *)(addr + 0) & *(const u16 *)(addr + 2) & *(const u16 *)(addr + 4)) == 0xffff; } /** * is_unicast_ether_addr - Determine if the Ethernet address is unicast * @addr: Pointer to a six-byte array containing the Ethernet address * * Return true if the address is a unicast address. */ static inline bool is_unicast_ether_addr(const u8 *addr) { return !is_multicast_ether_addr(addr); } /** * is_valid_ether_addr - Determine if the given Ethernet address is valid * @addr: Pointer to a six-byte array containing the Ethernet address * * Check that the Ethernet address (MAC) is not 00:00:00:00:00:00, is not * a multicast address, and is not FF:FF:FF:FF:FF:FF. * * Return true if the address is valid. * * Please note: addr must be aligned to u16. */ static inline bool is_valid_ether_addr(const u8 *addr) { /* FF:FF:FF:FF:FF:FF is a multicast address so we don't need to * explicitly check for it here. */ return !is_multicast_ether_addr(addr) && !is_zero_ether_addr(addr); } /** * eth_proto_is_802_3 - Determine if a given Ethertype/length is a protocol * @proto: Ethertype/length value to be tested * * Check that the value from the Ethertype/length field is a valid Ethertype. * * Return true if the valid is an 802.3 supported Ethertype. */ static inline bool eth_proto_is_802_3(__be16 proto) { #ifndef __BIG_ENDIAN /* if CPU is little endian mask off bits representing LSB */ proto &= htons(0xFF00); #endif /* cast both to u16 and compare since LSB can be ignored */ return (__force u16)proto >= (__force u16)htons(ETH_P_802_3_MIN); } /** * eth_random_addr - Generate software assigned random Ethernet address * @addr: Pointer to a six-byte array containing the Ethernet address * * Generate a random Ethernet address (MAC) that is not multicast * and has the local assigned bit set. */ static inline void eth_random_addr(u8 *addr) { get_random_bytes(addr, ETH_ALEN); addr[0] &= 0xfe; /* clear multicast bit */ addr[0] |= 0x02; /* set local assignment bit (IEEE802) */ } #define random_ether_addr(addr) eth_random_addr(addr) /** * eth_broadcast_addr - Assign broadcast address * @addr: Pointer to a six-byte array containing the Ethernet address * * Assign the broadcast address to the given address array. */ static inline void eth_broadcast_addr(u8 *addr) { memset(addr, 0xff, ETH_ALEN); } /** * eth_zero_addr - Assign zero address * @addr: Pointer to a six-byte array containing the Ethernet address * * Assign the zero address to the given address array. */ static inline void eth_zero_addr(u8 *addr) { memset(addr, 0x00, ETH_ALEN); } /** * eth_hw_addr_random - Generate software assigned random Ethernet and * set device flag * @dev: pointer to net_device structure * * Generate a random Ethernet address (MAC) to be used by a net device * and set addr_assign_type so the state can be read by sysfs and be * used by userspace. */ static inline void eth_hw_addr_random(struct net_device *dev) { dev->addr_assign_type = NET_ADDR_RANDOM; eth_random_addr(dev->dev_addr); } /** * ether_addr_copy - Copy an Ethernet address * @dst: Pointer to a six-byte array Ethernet address destination * @src: Pointer to a six-byte array Ethernet address source * * Please note: dst & src must both be aligned to u16. */ static inline void ether_addr_copy(u8 *dst, const u8 *src) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) *(u32 *)dst = *(const u32 *)src; *(u16 *)(dst + 4) = *(const u16 *)(src + 4); #else u16 *a = (u16 *)dst; const u16 *b = (const u16 *)src; a[0] = b[0]; a[1] = b[1]; a[2] = b[2]; #endif } /** * eth_hw_addr_set - Assign Ethernet address to a net_device * @dev: pointer to net_device structure * @addr: address to assign * * Assign given address to the net_device, addr_assign_type is not changed. */ static inline void eth_hw_addr_set(struct net_device *dev, const u8 *addr) { ether_addr_copy(dev->dev_addr, addr); } /** * eth_hw_addr_inherit - Copy dev_addr from another net_device * @dst: pointer to net_device to copy dev_addr to * @src: pointer to net_device to copy dev_addr from * * Copy the Ethernet address from one net_device to another along with * the address attributes (addr_assign_type). */ static inline void eth_hw_addr_inherit(struct net_device *dst, struct net_device *src) { dst->addr_assign_type = src->addr_assign_type; ether_addr_copy(dst->dev_addr, src->dev_addr); } /** * ether_addr_equal - Compare two Ethernet addresses * @addr1: Pointer to a six-byte array containing the Ethernet address * @addr2: Pointer other six-byte array containing the Ethernet address * * Compare two Ethernet addresses, returns true if equal * * Please note: addr1 & addr2 must both be aligned to u16. */ static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) u32 fold = ((*(const u32 *)addr1) ^ (*(const u32 *)addr2)) | ((*(const u16 *)(addr1 + 4)) ^ (*(const u16 *)(addr2 + 4))); return fold == 0; #else const u16 *a = (const u16 *)addr1; const u16 *b = (const u16 *)addr2; return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) == 0; #endif } /** * ether_addr_equal_64bits - Compare two Ethernet addresses * @addr1: Pointer to an array of 8 bytes * @addr2: Pointer to an other array of 8 bytes * * Compare two Ethernet addresses, returns true if equal, false otherwise. * * The function doesn't need any conditional branches and possibly uses * word memory accesses on CPU allowing cheap unaligned memory reads. * arrays = { byte1, byte2, byte3, byte4, byte5, byte6, pad1, pad2 } * * Please note that alignment of addr1 & addr2 are only guaranteed to be 16 bits. */ static inline bool ether_addr_equal_64bits(const u8 *addr1, const u8 *addr2) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 u64 fold = (*(const u64 *)addr1) ^ (*(const u64 *)addr2); #ifdef __BIG_ENDIAN return (fold >> 16) == 0; #else return (fold << 16) == 0; #endif #else return ether_addr_equal(addr1, addr2); #endif } /** * ether_addr_equal_unaligned - Compare two not u16 aligned Ethernet addresses * @addr1: Pointer to a six-byte array containing the Ethernet address * @addr2: Pointer other six-byte array containing the Ethernet address * * Compare two Ethernet addresses, returns true if equal * * Please note: Use only when any Ethernet address may not be u16 aligned. */ static inline bool ether_addr_equal_unaligned(const u8 *addr1, const u8 *addr2) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) return ether_addr_equal(addr1, addr2); #else return memcmp(addr1, addr2, ETH_ALEN) == 0; #endif } /** * ether_addr_equal_masked - Compare two Ethernet addresses with a mask * @addr1: Pointer to a six-byte array containing the 1st Ethernet address * @addr2: Pointer to a six-byte array containing the 2nd Ethernet address * @mask: Pointer to a six-byte array containing the Ethernet address bitmask * * Compare two Ethernet addresses with a mask, returns true if for every bit * set in the bitmask the equivalent bits in the ethernet addresses are equal. * Using a mask with all bits set is a slower ether_addr_equal. */ static inline bool ether_addr_equal_masked(const u8 *addr1, const u8 *addr2, const u8 *mask) { int i; for (i = 0; i < ETH_ALEN; i++) { if ((addr1[i] ^ addr2[i]) & mask[i]) return false; } return true; } /** * ether_addr_to_u64 - Convert an Ethernet address into a u64 value. * @addr: Pointer to a six-byte array containing the Ethernet address * * Return a u64 value of the address */ static inline u64 ether_addr_to_u64(const u8 *addr) { u64 u = 0; int i; for (i = 0; i < ETH_ALEN; i++) u = u << 8 | addr[i]; return u; } /** * u64_to_ether_addr - Convert a u64 to an Ethernet address. * @u: u64 to convert to an Ethernet MAC address * @addr: Pointer to a six-byte array to contain the Ethernet address */ static inline void u64_to_ether_addr(u64 u, u8 *addr) { int i; for (i = ETH_ALEN - 1; i >= 0; i--) { addr[i] = u & 0xff; u = u >> 8; } } /** * eth_addr_dec - Decrement the given MAC address * * @addr: Pointer to a six-byte array containing Ethernet address to decrement */ static inline void eth_addr_dec(u8 *addr) { u64 u = ether_addr_to_u64(addr); u--; u64_to_ether_addr(u, addr); } /** * eth_addr_inc() - Increment the given MAC address. * @addr: Pointer to a six-byte array containing Ethernet address to increment. */ static inline void eth_addr_inc(u8 *addr) { u64 u = ether_addr_to_u64(addr); u++; u64_to_ether_addr(u, addr); } /** * is_etherdev_addr - Tell if given Ethernet address belongs to the device. * @dev: Pointer to a device structure * @addr: Pointer to a six-byte array containing the Ethernet address * * Compare passed address with all addresses of the device. Return true if the * address if one of the device addresses. * * Note that this function calls ether_addr_equal_64bits() so take care of * the right padding. */ static inline bool is_etherdev_addr(const struct net_device *dev, const u8 addr[6 + 2]) { struct netdev_hw_addr *ha; bool res = false; rcu_read_lock(); for_each_dev_addr(dev, ha) { res = ether_addr_equal_64bits(addr, ha->addr); if (res) break; } rcu_read_unlock(); return res; } #endif /* __KERNEL__ */ /** * compare_ether_header - Compare two Ethernet headers * @a: Pointer to Ethernet header * @b: Pointer to Ethernet header * * Compare two Ethernet headers, returns 0 if equal. * This assumes that the network header (i.e., IP header) is 4-byte * aligned OR the platform can handle unaligned access. This is the * case for all packets coming into netif_receive_skb or similar * entry points. */ static inline unsigned long compare_ether_header(const void *a, const void *b) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 unsigned long fold; /* * We want to compare 14 bytes: * [a0 ... a13] ^ [b0 ... b13] * Use two long XOR, ORed together, with an overlap of two bytes. * [a0 a1 a2 a3 a4 a5 a6 a7 ] ^ [b0 b1 b2 b3 b4 b5 b6 b7 ] | * [a6 a7 a8 a9 a10 a11 a12 a13] ^ [b6 b7 b8 b9 b10 b11 b12 b13] * This means the [a6 a7] ^ [b6 b7] part is done two times. */ fold = *(unsigned long *)a ^ *(unsigned long *)b; fold |= *(unsigned long *)(a + 6) ^ *(unsigned long *)(b + 6); return fold; #else u32 *a32 = (u32 *)((u8 *)a + 2); u32 *b32 = (u32 *)((u8 *)b + 2); return (*(u16 *)a ^ *(u16 *)b) | (a32[0] ^ b32[0]) | (a32[1] ^ b32[1]) | (a32[2] ^ b32[2]); #endif } /** * eth_skb_pkt_type - Assign packet type if destination address does not match * @skb: Assigned a packet type if address does not match @dev address * @dev: Network device used to compare packet address against * * If the destination MAC address of the packet does not match the network * device address, assign an appropriate packet type. */ static inline void eth_skb_pkt_type(struct sk_buff *skb, const struct net_device *dev) { const struct ethhdr *eth = eth_hdr(skb); if (unlikely(!ether_addr_equal_64bits(eth->h_dest, dev->dev_addr))) { if (unlikely(is_multicast_ether_addr_64bits(eth->h_dest))) { if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast)) skb->pkt_type = PACKET_BROADCAST; else skb->pkt_type = PACKET_MULTICAST; } else { skb->pkt_type = PACKET_OTHERHOST; } } } /** * eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame * @skb: Buffer to pad * * An Ethernet frame should have a minimum size of 60 bytes. This function * takes short frames and pads them with zeros up to the 60 byte limit. */ static inline int eth_skb_pad(struct sk_buff *skb) { return skb_put_padto(skb, ETH_ZLEN); } #endif /* _LINUX_ETHERDEVICE_H */ linux_logo.h 0000644 00000003644 14722070374 0007111 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_LINUX_LOGO_H #define _LINUX_LINUX_LOGO_H /* * Linux logo to be displayed on boot * * Copyright (C) 1996 Larry Ewing (lewing@isc.tamu.edu) * Copyright (C) 1996,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) * Copyright (C) 2001 Greg Banks <gnb@alphalink.com.au> * Copyright (C) 2001 Jan-Benedict Glaw <jbglaw@lug-owl.de> * Copyright (C) 2003 Geert Uytterhoeven <geert@linux-m68k.org> * * Serial_console ascii image can be any size, * but should contain %s to display the version */ #include <linux/init.h> #define LINUX_LOGO_MONO 1 /* monochrome black/white */ #define LINUX_LOGO_VGA16 2 /* 16 colors VGA text palette */ #define LINUX_LOGO_CLUT224 3 /* 224 colors */ #define LINUX_LOGO_GRAY256 4 /* 256 levels grayscale */ struct linux_logo { int type; /* one of LINUX_LOGO_* */ unsigned int width; unsigned int height; unsigned int clutsize; /* LINUX_LOGO_CLUT224 only */ const unsigned char *clut; /* LINUX_LOGO_CLUT224 only */ const unsigned char *data; }; extern const struct linux_logo logo_linux_mono; extern const struct linux_logo logo_linux_vga16; extern const struct linux_logo logo_linux_clut224; extern const struct linux_logo logo_dec_clut224; extern const struct linux_logo logo_mac_clut224; extern const struct linux_logo logo_parisc_clut224; extern const struct linux_logo logo_sgi_clut224; extern const struct linux_logo logo_sun_clut224; extern const struct linux_logo logo_superh_mono; extern const struct linux_logo logo_superh_vga16; extern const struct linux_logo logo_superh_clut224; extern const struct linux_logo logo_spe_clut224; extern const struct linux_logo *fb_find_logo(int depth); #ifdef CONFIG_FB_LOGO_EXTRA extern void fb_append_extra_logo(const struct linux_logo *logo, unsigned int n); #else static inline void fb_append_extra_logo(const struct linux_logo *logo, unsigned int n) {} #endif #endif /* _LINUX_LINUX_LOGO_H */ stackprotector.h 0000644 00000000501 14722070374 0007766 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_STACKPROTECTOR_H #define _LINUX_STACKPROTECTOR_H 1 #include <linux/compiler.h> #include <linux/sched.h> #include <linux/random.h> #ifdef CONFIG_STACKPROTECTOR # include <asm/stackprotector.h> #else static inline void boot_init_stack_canary(void) { } #endif #endif energy_model.h 0000644 00000014742 14722070374 0007404 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_ENERGY_MODEL_H #define _LINUX_ENERGY_MODEL_H #include <linux/cpumask.h> #include <linux/jump_label.h> #include <linux/kobject.h> #include <linux/rcupdate.h> #include <linux/sched/cpufreq.h> #include <linux/sched/topology.h> #include <linux/types.h> #ifdef CONFIG_ENERGY_MODEL /** * em_cap_state - Capacity state of a performance domain * @frequency: The CPU frequency in KHz, for consistency with CPUFreq * @power: The power consumed by 1 CPU at this level, in milli-watts * @cost: The cost coefficient associated with this level, used during * energy calculation. Equal to: power * max_frequency / frequency */ struct em_cap_state { unsigned long frequency; unsigned long power; unsigned long cost; }; /** * em_perf_domain - Performance domain * @table: List of capacity states, in ascending order * @nr_cap_states: Number of capacity states * @cpus: Cpumask covering the CPUs of the domain * * A "performance domain" represents a group of CPUs whose performance is * scaled together. All CPUs of a performance domain must have the same * micro-architecture. Performance domains often have a 1-to-1 mapping with * CPUFreq policies. */ struct em_perf_domain { struct em_cap_state *table; int nr_cap_states; unsigned long cpus[0]; }; #define EM_CPU_MAX_POWER 0xFFFF /* * Increase resolution of energy estimation calculations for 64-bit * architectures. The extra resolution improves decision made by EAS for the * task placement when two Performance Domains might provide similar energy * estimation values (w/o better resolution the values could be equal). * * We increase resolution only if we have enough bits to allow this increased * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit * are pretty high and the returns do not justify the increased costs. */ #ifdef CONFIG_64BIT #define em_scale_power(p) ((p) * 1000) #else #define em_scale_power(p) (p) #endif struct em_data_callback { /** * active_power() - Provide power at the next capacity state of a CPU * @power : Active power at the capacity state in mW (modified) * @freq : Frequency at the capacity state in kHz (modified) * @cpu : CPU for which we do this operation * * active_power() must find the lowest capacity state of 'cpu' above * 'freq' and update 'power' and 'freq' to the matching active power * and frequency. * * The power is the one of a single CPU in the domain, expressed in * milli-watts. It is expected to fit in the [0, EM_CPU_MAX_POWER] * range. * * Return 0 on success. */ int (*active_power)(unsigned long *power, unsigned long *freq, int cpu); }; #define EM_DATA_CB(_active_power_cb) { .active_power = &_active_power_cb } struct em_perf_domain *em_cpu_get(int cpu); int em_register_perf_domain(cpumask_t *span, unsigned int nr_states, struct em_data_callback *cb); /** * em_pd_energy() - Estimates the energy consumed by the CPUs of a perf. domain * @pd : performance domain for which energy has to be estimated * @max_util : highest utilization among CPUs of the domain * @sum_util : sum of the utilization of all CPUs in the domain * * Return: the sum of the energy consumed by the CPUs of the domain assuming * a capacity state satisfying the max utilization of the domain. */ static inline unsigned long em_pd_energy(struct em_perf_domain *pd, unsigned long max_util, unsigned long sum_util) { unsigned long freq, scale_cpu; struct em_cap_state *cs; int i, cpu; /* * In order to predict the capacity state, map the utilization of the * most utilized CPU of the performance domain to a requested frequency, * like schedutil. */ cpu = cpumask_first(to_cpumask(pd->cpus)); scale_cpu = arch_scale_cpu_capacity(cpu); cs = &pd->table[pd->nr_cap_states - 1]; freq = map_util_freq(max_util, cs->frequency, scale_cpu); /* * Find the lowest capacity state of the Energy Model above the * requested frequency. */ for (i = 0; i < pd->nr_cap_states; i++) { cs = &pd->table[i]; if (cs->frequency >= freq) break; } /* * The capacity of a CPU in the domain at that capacity state (cs) * can be computed as: * * cs->freq * scale_cpu * cs->cap = -------------------- (1) * cpu_max_freq * * So, ignoring the costs of idle states (which are not available in * the EM), the energy consumed by this CPU at that capacity state is * estimated as: * * cs->power * cpu_util * cpu_nrg = -------------------- (2) * cs->cap * * since 'cpu_util / cs->cap' represents its percentage of busy time. * * NOTE: Although the result of this computation actually is in * units of power, it can be manipulated as an energy value * over a scheduling period, since it is assumed to be * constant during that interval. * * By injecting (1) in (2), 'cpu_nrg' can be re-expressed as a product * of two terms: * * cs->power * cpu_max_freq cpu_util * cpu_nrg = ------------------------ * --------- (3) * cs->freq scale_cpu * * The first term is static, and is stored in the em_cap_state struct * as 'cs->cost'. * * Since all CPUs of the domain have the same micro-architecture, they * share the same 'cs->cost', and the same CPU capacity. Hence, the * total energy of the domain (which is the simple sum of the energy of * all of its CPUs) can be factorized as: * * cs->cost * \Sum cpu_util * pd_nrg = ------------------------ (4) * scale_cpu */ return cs->cost * sum_util / scale_cpu; } /** * em_pd_nr_cap_states() - Get the number of capacity states of a perf. domain * @pd : performance domain for which this must be done * * Return: the number of capacity states in the performance domain table */ static inline int em_pd_nr_cap_states(struct em_perf_domain *pd) { return pd->nr_cap_states; } #else struct em_perf_domain {}; struct em_data_callback {}; #define EM_DATA_CB(_active_power_cb) { } static inline int em_register_perf_domain(cpumask_t *span, unsigned int nr_states, struct em_data_callback *cb) { return -EINVAL; } static inline struct em_perf_domain *em_cpu_get(int cpu) { return NULL; } static inline unsigned long em_pd_energy(struct em_perf_domain *pd, unsigned long max_util, unsigned long sum_util) { return 0; } static inline int em_pd_nr_cap_states(struct em_perf_domain *pd) { return 0; } #endif #endif psi_types.h 0000644 00000007033 14722070374 0006745 0 ustar 00 #ifndef _LINUX_PSI_TYPES_H #define _LINUX_PSI_TYPES_H #include <linux/kthread.h> #include <linux/seqlock.h> #include <linux/types.h> #include <linux/kref.h> #include <linux/wait.h> #ifdef CONFIG_PSI /* Tracked task states */ enum psi_task_count { NR_IOWAIT, NR_MEMSTALL, NR_RUNNING, NR_PSI_TASK_COUNTS = 3, }; /* Task state bitmasks */ #define TSK_IOWAIT (1 << NR_IOWAIT) #define TSK_MEMSTALL (1 << NR_MEMSTALL) #define TSK_RUNNING (1 << NR_RUNNING) /* Resources that workloads could be stalled on */ enum psi_res { PSI_IO, PSI_MEM, PSI_CPU, NR_PSI_RESOURCES = 3, }; /* * Pressure states for each resource: * * SOME: Stalled tasks & working tasks * FULL: Stalled tasks & no working tasks */ enum psi_states { PSI_IO_SOME, PSI_IO_FULL, PSI_MEM_SOME, PSI_MEM_FULL, PSI_CPU_SOME, /* Only per-CPU, to weigh the CPU in the global average: */ PSI_NONIDLE, NR_PSI_STATES = 6, }; enum psi_aggregators { PSI_AVGS = 0, PSI_POLL, NR_PSI_AGGREGATORS, }; struct psi_group_cpu { /* 1st cacheline updated by the scheduler */ /* Aggregator needs to know of concurrent changes */ seqcount_t seq ____cacheline_aligned_in_smp; /* States of the tasks belonging to this group */ unsigned int tasks[NR_PSI_TASK_COUNTS]; /* Aggregate pressure state derived from the tasks */ u32 state_mask; /* Period time sampling buckets for each state of interest (ns) */ u32 times[NR_PSI_STATES]; /* Time of last task change in this group (rq_clock) */ u64 state_start; /* 2nd cacheline updated by the aggregator */ /* Delta detection against the sampling buckets */ u32 times_prev[NR_PSI_AGGREGATORS][NR_PSI_STATES] ____cacheline_aligned_in_smp; }; /* PSI growth tracking window */ struct psi_window { /* Window size in ns */ u64 size; /* Start time of the current window in ns */ u64 start_time; /* Value at the start of the window */ u64 start_value; /* Value growth in the previous window */ u64 prev_growth; }; struct psi_trigger { /* PSI state being monitored by the trigger */ enum psi_states state; /* User-spacified threshold in ns */ u64 threshold; /* List node inside triggers list */ struct list_head node; /* Backpointer needed during trigger destruction */ struct psi_group *group; /* Wait queue for polling */ wait_queue_head_t event_wait; /* Pending event flag */ int event; /* Tracking window */ struct psi_window win; /* * Time last event was generated. Used for rate-limiting * events to one per window */ u64 last_event_time; }; struct psi_group { /* Protects data used by the aggregator */ struct mutex avgs_lock; /* Per-cpu task state & time tracking */ struct psi_group_cpu __percpu *pcpu; /* Running pressure averages */ u64 avg_total[NR_PSI_STATES - 1]; u64 avg_last_update; u64 avg_next_update; /* Aggregator work control */ struct delayed_work avgs_work; /* Total stall times and sampled pressure averages */ u64 total[NR_PSI_AGGREGATORS][NR_PSI_STATES - 1]; unsigned long avg[NR_PSI_STATES - 1][3]; /* Monitor work control */ atomic_t poll_scheduled; struct kthread_worker __rcu *poll_kworker; struct kthread_delayed_work poll_work; /* Protects data used by the monitor */ struct mutex trigger_lock; /* Configured polling triggers */ struct list_head triggers; u32 nr_triggers[NR_PSI_STATES - 1]; u32 poll_states; u64 poll_min_period; /* Total stall times at the start of monitor activation */ u64 polling_total[NR_PSI_STATES - 1]; u64 polling_next_update; u64 polling_until; }; #else /* CONFIG_PSI */ struct psi_group { }; #endif /* CONFIG_PSI */ #endif /* _LINUX_PSI_TYPES_H */ ww_mutex.h 0000644 00000031434 14722070374 0006607 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Wound/Wait Mutexes: blocking mutual exclusion locks with deadlock avoidance * * Original mutex implementation started by Ingo Molnar: * * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> * * Wait/Die implementation: * Copyright (C) 2013 Canonical Ltd. * Choice of algorithm: * Copyright (C) 2018 WMWare Inc. * * This file contains the main data structure and API definitions. */ #ifndef __LINUX_WW_MUTEX_H #define __LINUX_WW_MUTEX_H #include <linux/mutex.h> struct ww_class { atomic_long_t stamp; struct lock_class_key acquire_key; struct lock_class_key mutex_key; const char *acquire_name; const char *mutex_name; unsigned int is_wait_die; }; struct ww_acquire_ctx { struct task_struct *task; unsigned long stamp; unsigned int acquired; unsigned short wounded; unsigned short is_wait_die; #ifdef CONFIG_DEBUG_MUTEXES unsigned int done_acquire; struct ww_class *ww_class; struct ww_mutex *contending_lock; #endif #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH unsigned int deadlock_inject_interval; unsigned int deadlock_inject_countdown; #endif }; struct ww_mutex { struct mutex base; struct ww_acquire_ctx *ctx; #ifdef CONFIG_DEBUG_MUTEXES struct ww_class *ww_class; #endif }; #ifdef CONFIG_DEBUG_LOCK_ALLOC # define __WW_CLASS_MUTEX_INITIALIZER(lockname, class) \ , .ww_class = class #else # define __WW_CLASS_MUTEX_INITIALIZER(lockname, class) #endif #define __WW_CLASS_INITIALIZER(ww_class, _is_wait_die) \ { .stamp = ATOMIC_LONG_INIT(0) \ , .acquire_name = #ww_class "_acquire" \ , .mutex_name = #ww_class "_mutex" \ , .is_wait_die = _is_wait_die } #define __WW_MUTEX_INITIALIZER(lockname, class) \ { .base = __MUTEX_INITIALIZER(lockname.base) \ __WW_CLASS_MUTEX_INITIALIZER(lockname, class) } #define DEFINE_WD_CLASS(classname) \ struct ww_class classname = __WW_CLASS_INITIALIZER(classname, 1) #define DEFINE_WW_CLASS(classname) \ struct ww_class classname = __WW_CLASS_INITIALIZER(classname, 0) #define DEFINE_WW_MUTEX(mutexname, ww_class) \ struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class) /** * ww_mutex_init - initialize the w/w mutex * @lock: the mutex to be initialized * @ww_class: the w/w class the mutex should belong to * * Initialize the w/w mutex to unlocked state and associate it with the given * class. * * It is not allowed to initialize an already locked mutex. */ static inline void ww_mutex_init(struct ww_mutex *lock, struct ww_class *ww_class) { __mutex_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key); lock->ctx = NULL; #ifdef CONFIG_DEBUG_MUTEXES lock->ww_class = ww_class; #endif } /** * ww_acquire_init - initialize a w/w acquire context * @ctx: w/w acquire context to initialize * @ww_class: w/w class of the context * * Initializes an context to acquire multiple mutexes of the given w/w class. * * Context-based w/w mutex acquiring can be done in any order whatsoever within * a given lock class. Deadlocks will be detected and handled with the * wait/die logic. * * Mixing of context-based w/w mutex acquiring and single w/w mutex locking can * result in undetected deadlocks and is so forbidden. Mixing different contexts * for the same w/w class when acquiring mutexes can also result in undetected * deadlocks, and is hence also forbidden. Both types of abuse will be caught by * enabling CONFIG_PROVE_LOCKING. * * Nesting of acquire contexts for _different_ w/w classes is possible, subject * to the usual locking rules between different lock classes. * * An acquire context must be released with ww_acquire_fini by the same task * before the memory is freed. It is recommended to allocate the context itself * on the stack. */ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx, struct ww_class *ww_class) { ctx->task = current; ctx->stamp = atomic_long_inc_return_relaxed(&ww_class->stamp); ctx->acquired = 0; ctx->wounded = false; ctx->is_wait_die = ww_class->is_wait_die; #ifdef CONFIG_DEBUG_MUTEXES ctx->ww_class = ww_class; ctx->done_acquire = 0; ctx->contending_lock = NULL; #endif #ifdef CONFIG_DEBUG_LOCK_ALLOC debug_check_no_locks_freed((void *)ctx, sizeof(*ctx)); lockdep_init_map(&ctx->dep_map, ww_class->acquire_name, &ww_class->acquire_key, 0); mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_); #endif #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH ctx->deadlock_inject_interval = 1; ctx->deadlock_inject_countdown = ctx->stamp & 0xf; #endif } /** * ww_acquire_done - marks the end of the acquire phase * @ctx: the acquire context * * Marks the end of the acquire phase, any further w/w mutex lock calls using * this context are forbidden. * * Calling this function is optional, it is just useful to document w/w mutex * code and clearly designated the acquire phase from actually using the locked * data structures. */ static inline void ww_acquire_done(struct ww_acquire_ctx *ctx) { #ifdef CONFIG_DEBUG_MUTEXES lockdep_assert_held(ctx); DEBUG_LOCKS_WARN_ON(ctx->done_acquire); ctx->done_acquire = 1; #endif } /** * ww_acquire_fini - releases a w/w acquire context * @ctx: the acquire context to free * * Releases a w/w acquire context. This must be called _after_ all acquired w/w * mutexes have been released with ww_mutex_unlock. */ static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx) { #ifdef CONFIG_DEBUG_MUTEXES mutex_release(&ctx->dep_map, 0, _THIS_IP_); DEBUG_LOCKS_WARN_ON(ctx->acquired); if (!IS_ENABLED(CONFIG_PROVE_LOCKING)) /* * lockdep will normally handle this, * but fail without anyway */ ctx->done_acquire = 1; if (!IS_ENABLED(CONFIG_DEBUG_LOCK_ALLOC)) /* ensure ww_acquire_fini will still fail if called twice */ ctx->acquired = ~0U; #endif } /** * ww_mutex_lock - acquire the w/w mutex * @lock: the mutex to be acquired * @ctx: w/w acquire context, or NULL to acquire only a single lock. * * Lock the w/w mutex exclusively for this task. * * Deadlocks within a given w/w class of locks are detected and handled with the * wait/die algorithm. If the lock isn't immediately available this function * will either sleep until it is (wait case). Or it selects the current context * for backing off by returning -EDEADLK (die case). Trying to acquire the * same lock with the same context twice is also detected and signalled by * returning -EALREADY. Returns 0 if the mutex was successfully acquired. * * In the die case the caller must release all currently held w/w mutexes for * the given context and then wait for this contending lock to be available by * calling ww_mutex_lock_slow. Alternatively callers can opt to not acquire this * lock and proceed with trying to acquire further w/w mutexes (e.g. when * scanning through lru lists trying to free resources). * * The mutex must later on be released by the same task that * acquired it. The task may not exit without first unlocking the mutex. Also, * kernel memory where the mutex resides must not be freed with the mutex still * locked. The mutex must first be initialized (or statically defined) before it * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be * of the same w/w lock class as was used to initialize the acquire context. * * A mutex acquired with this function must be released with ww_mutex_unlock. */ extern int /* __must_check */ ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx); /** * ww_mutex_lock_interruptible - acquire the w/w mutex, interruptible * @lock: the mutex to be acquired * @ctx: w/w acquire context * * Lock the w/w mutex exclusively for this task. * * Deadlocks within a given w/w class of locks are detected and handled with the * wait/die algorithm. If the lock isn't immediately available this function * will either sleep until it is (wait case). Or it selects the current context * for backing off by returning -EDEADLK (die case). Trying to acquire the * same lock with the same context twice is also detected and signalled by * returning -EALREADY. Returns 0 if the mutex was successfully acquired. If a * signal arrives while waiting for the lock then this function returns -EINTR. * * In the die case the caller must release all currently held w/w mutexes for * the given context and then wait for this contending lock to be available by * calling ww_mutex_lock_slow_interruptible. Alternatively callers can opt to * not acquire this lock and proceed with trying to acquire further w/w mutexes * (e.g. when scanning through lru lists trying to free resources). * * The mutex must later on be released by the same task that * acquired it. The task may not exit without first unlocking the mutex. Also, * kernel memory where the mutex resides must not be freed with the mutex still * locked. The mutex must first be initialized (or statically defined) before it * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be * of the same w/w lock class as was used to initialize the acquire context. * * A mutex acquired with this function must be released with ww_mutex_unlock. */ extern int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx); /** * ww_mutex_lock_slow - slowpath acquiring of the w/w mutex * @lock: the mutex to be acquired * @ctx: w/w acquire context * * Acquires a w/w mutex with the given context after a die case. This function * will sleep until the lock becomes available. * * The caller must have released all w/w mutexes already acquired with the * context and then call this function on the contended lock. * * Afterwards the caller may continue to (re)acquire the other w/w mutexes it * needs with ww_mutex_lock. Note that the -EALREADY return code from * ww_mutex_lock can be used to avoid locking this contended mutex twice. * * It is forbidden to call this function with any other w/w mutexes associated * with the context held. It is forbidden to call this on anything else than the * contending mutex. * * Note that the slowpath lock acquiring can also be done by calling * ww_mutex_lock directly. This function here is simply to help w/w mutex * locking code readability by clearly denoting the slowpath. */ static inline void ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { int ret; #ifdef CONFIG_DEBUG_MUTEXES DEBUG_LOCKS_WARN_ON(!ctx->contending_lock); #endif ret = ww_mutex_lock(lock, ctx); (void)ret; } /** * ww_mutex_lock_slow_interruptible - slowpath acquiring of the w/w mutex, interruptible * @lock: the mutex to be acquired * @ctx: w/w acquire context * * Acquires a w/w mutex with the given context after a die case. This function * will sleep until the lock becomes available and returns 0 when the lock has * been acquired. If a signal arrives while waiting for the lock then this * function returns -EINTR. * * The caller must have released all w/w mutexes already acquired with the * context and then call this function on the contended lock. * * Afterwards the caller may continue to (re)acquire the other w/w mutexes it * needs with ww_mutex_lock. Note that the -EALREADY return code from * ww_mutex_lock can be used to avoid locking this contended mutex twice. * * It is forbidden to call this function with any other w/w mutexes associated * with the given context held. It is forbidden to call this on anything else * than the contending mutex. * * Note that the slowpath lock acquiring can also be done by calling * ww_mutex_lock_interruptible directly. This function here is simply to help * w/w mutex locking code readability by clearly denoting the slowpath. */ static inline int __must_check ww_mutex_lock_slow_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { #ifdef CONFIG_DEBUG_MUTEXES DEBUG_LOCKS_WARN_ON(!ctx->contending_lock); #endif return ww_mutex_lock_interruptible(lock, ctx); } extern void ww_mutex_unlock(struct ww_mutex *lock); /** * ww_mutex_trylock - tries to acquire the w/w mutex without acquire context * @lock: mutex to lock * * Trylocks a mutex without acquire context, so no deadlock detection is * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise. */ static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock) { return mutex_trylock(&lock->base); } /*** * ww_mutex_destroy - mark a w/w mutex unusable * @lock: the mutex to be destroyed * * This function marks the mutex uninitialized, and any subsequent * use of the mutex is forbidden. The mutex must not be locked when * this function is called. */ static inline void ww_mutex_destroy(struct ww_mutex *lock) { mutex_destroy(&lock->base); } /** * ww_mutex_is_locked - is the w/w mutex locked * @lock: the mutex to be queried * * Returns 1 if the mutex is locked, 0 if unlocked. */ static inline bool ww_mutex_is_locked(struct ww_mutex *lock) { return mutex_is_locked(&lock->base); } #endif power_supply.h 0000644 00000040101 14722070374 0007467 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Universal power supply monitor class * * Copyright © 2007 Anton Vorontsov <cbou@mail.ru> * Copyright © 2004 Szabolcs Gyurko * Copyright © 2003 Ian Molton <spyro@f2s.com> * * Modified: 2004, Oct Szabolcs Gyurko */ #ifndef __LINUX_POWER_SUPPLY_H__ #define __LINUX_POWER_SUPPLY_H__ #include <linux/device.h> #include <linux/workqueue.h> #include <linux/leds.h> #include <linux/spinlock.h> #include <linux/notifier.h> /* * All voltages, currents, charges, energies, time and temperatures in uV, * µA, µAh, µWh, seconds and tenths of degree Celsius unless otherwise * stated. It's driver's job to convert its raw values to units in which * this class operates. */ /* * For systems where the charger determines the maximum battery capacity * the min and max fields should be used to present these values to user * space. Unused/unknown fields will not appear in sysfs. */ enum { POWER_SUPPLY_STATUS_UNKNOWN = 0, POWER_SUPPLY_STATUS_CHARGING, POWER_SUPPLY_STATUS_DISCHARGING, POWER_SUPPLY_STATUS_NOT_CHARGING, POWER_SUPPLY_STATUS_FULL, }; /* What algorithm is the charger using? */ enum { POWER_SUPPLY_CHARGE_TYPE_UNKNOWN = 0, POWER_SUPPLY_CHARGE_TYPE_NONE, POWER_SUPPLY_CHARGE_TYPE_TRICKLE, /* slow speed */ POWER_SUPPLY_CHARGE_TYPE_FAST, /* fast speed */ POWER_SUPPLY_CHARGE_TYPE_STANDARD, /* normal speed */ POWER_SUPPLY_CHARGE_TYPE_ADAPTIVE, /* dynamically adjusted speed */ POWER_SUPPLY_CHARGE_TYPE_CUSTOM, /* use CHARGE_CONTROL_* props */ }; enum { POWER_SUPPLY_HEALTH_UNKNOWN = 0, POWER_SUPPLY_HEALTH_GOOD, POWER_SUPPLY_HEALTH_OVERHEAT, POWER_SUPPLY_HEALTH_DEAD, POWER_SUPPLY_HEALTH_OVERVOLTAGE, POWER_SUPPLY_HEALTH_UNSPEC_FAILURE, POWER_SUPPLY_HEALTH_COLD, POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE, POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE, POWER_SUPPLY_HEALTH_OVERCURRENT, }; enum { POWER_SUPPLY_TECHNOLOGY_UNKNOWN = 0, POWER_SUPPLY_TECHNOLOGY_NiMH, POWER_SUPPLY_TECHNOLOGY_LION, POWER_SUPPLY_TECHNOLOGY_LIPO, POWER_SUPPLY_TECHNOLOGY_LiFe, POWER_SUPPLY_TECHNOLOGY_NiCd, POWER_SUPPLY_TECHNOLOGY_LiMn, }; enum { POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN = 0, POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL, POWER_SUPPLY_CAPACITY_LEVEL_LOW, POWER_SUPPLY_CAPACITY_LEVEL_NORMAL, POWER_SUPPLY_CAPACITY_LEVEL_HIGH, POWER_SUPPLY_CAPACITY_LEVEL_FULL, }; enum { POWER_SUPPLY_SCOPE_UNKNOWN = 0, POWER_SUPPLY_SCOPE_SYSTEM, POWER_SUPPLY_SCOPE_DEVICE, }; enum power_supply_property { /* Properties of type `int' */ POWER_SUPPLY_PROP_STATUS = 0, POWER_SUPPLY_PROP_CHARGE_TYPE, POWER_SUPPLY_PROP_HEALTH, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_ONLINE, POWER_SUPPLY_PROP_AUTHENTIC, POWER_SUPPLY_PROP_TECHNOLOGY, POWER_SUPPLY_PROP_CYCLE_COUNT, POWER_SUPPLY_PROP_VOLTAGE_MAX, POWER_SUPPLY_PROP_VOLTAGE_MIN, POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_VOLTAGE_AVG, POWER_SUPPLY_PROP_VOLTAGE_OCV, POWER_SUPPLY_PROP_VOLTAGE_BOOT, POWER_SUPPLY_PROP_CURRENT_MAX, POWER_SUPPLY_PROP_CURRENT_NOW, POWER_SUPPLY_PROP_CURRENT_AVG, POWER_SUPPLY_PROP_CURRENT_BOOT, POWER_SUPPLY_PROP_POWER_NOW, POWER_SUPPLY_PROP_POWER_AVG, POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN, POWER_SUPPLY_PROP_CHARGE_FULL, POWER_SUPPLY_PROP_CHARGE_EMPTY, POWER_SUPPLY_PROP_CHARGE_NOW, POWER_SUPPLY_PROP_CHARGE_AVG, POWER_SUPPLY_PROP_CHARGE_COUNTER, POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT, POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE, POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX, POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD, /* in percents! */ POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD, /* in percents! */ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT, POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT, POWER_SUPPLY_PROP_INPUT_POWER_LIMIT, POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN, POWER_SUPPLY_PROP_ENERGY_FULL, POWER_SUPPLY_PROP_ENERGY_EMPTY, POWER_SUPPLY_PROP_ENERGY_NOW, POWER_SUPPLY_PROP_ENERGY_AVG, POWER_SUPPLY_PROP_CAPACITY, /* in percents! */ POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN, /* in percents! */ POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX, /* in percents! */ POWER_SUPPLY_PROP_CAPACITY_LEVEL, POWER_SUPPLY_PROP_TEMP, POWER_SUPPLY_PROP_TEMP_MAX, POWER_SUPPLY_PROP_TEMP_MIN, POWER_SUPPLY_PROP_TEMP_ALERT_MIN, POWER_SUPPLY_PROP_TEMP_ALERT_MAX, POWER_SUPPLY_PROP_TEMP_AMBIENT, POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN, POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX, POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW, POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, POWER_SUPPLY_PROP_TIME_TO_FULL_NOW, POWER_SUPPLY_PROP_TIME_TO_FULL_AVG, POWER_SUPPLY_PROP_TYPE, /* use power_supply.type instead */ POWER_SUPPLY_PROP_USB_TYPE, POWER_SUPPLY_PROP_SCOPE, POWER_SUPPLY_PROP_PRECHARGE_CURRENT, POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT, POWER_SUPPLY_PROP_CALIBRATE, /* Properties of type `const char *' */ POWER_SUPPLY_PROP_MODEL_NAME, POWER_SUPPLY_PROP_MANUFACTURER, POWER_SUPPLY_PROP_SERIAL_NUMBER, }; enum power_supply_type { POWER_SUPPLY_TYPE_UNKNOWN = 0, POWER_SUPPLY_TYPE_BATTERY, POWER_SUPPLY_TYPE_UPS, POWER_SUPPLY_TYPE_MAINS, POWER_SUPPLY_TYPE_USB, /* Standard Downstream Port */ POWER_SUPPLY_TYPE_USB_DCP, /* Dedicated Charging Port */ POWER_SUPPLY_TYPE_USB_CDP, /* Charging Downstream Port */ POWER_SUPPLY_TYPE_USB_ACA, /* Accessory Charger Adapters */ POWER_SUPPLY_TYPE_USB_TYPE_C, /* Type C Port */ POWER_SUPPLY_TYPE_USB_PD, /* Power Delivery Port */ POWER_SUPPLY_TYPE_USB_PD_DRP, /* PD Dual Role Port */ POWER_SUPPLY_TYPE_APPLE_BRICK_ID, /* Apple Charging Method */ }; enum power_supply_usb_type { POWER_SUPPLY_USB_TYPE_UNKNOWN = 0, POWER_SUPPLY_USB_TYPE_SDP, /* Standard Downstream Port */ POWER_SUPPLY_USB_TYPE_DCP, /* Dedicated Charging Port */ POWER_SUPPLY_USB_TYPE_CDP, /* Charging Downstream Port */ POWER_SUPPLY_USB_TYPE_ACA, /* Accessory Charger Adapters */ POWER_SUPPLY_USB_TYPE_C, /* Type C Port */ POWER_SUPPLY_USB_TYPE_PD, /* Power Delivery Port */ POWER_SUPPLY_USB_TYPE_PD_DRP, /* PD Dual Role Port */ POWER_SUPPLY_USB_TYPE_PD_PPS, /* PD Programmable Power Supply */ POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID, /* Apple Charging Method */ }; enum power_supply_notifier_events { PSY_EVENT_PROP_CHANGED, }; union power_supply_propval { int intval; const char *strval; }; struct device_node; struct power_supply; /* Run-time specific power supply configuration */ struct power_supply_config { struct device_node *of_node; struct fwnode_handle *fwnode; /* Driver private data */ void *drv_data; /* Device specific sysfs attributes */ const struct attribute_group **attr_grp; char **supplied_to; size_t num_supplicants; }; /* Description of power supply */ struct power_supply_desc { const char *name; enum power_supply_type type; enum power_supply_usb_type *usb_types; size_t num_usb_types; enum power_supply_property *properties; size_t num_properties; /* * Functions for drivers implementing power supply class. * These shouldn't be called directly by other drivers for accessing * this power supply. Instead use power_supply_*() functions (for * example power_supply_get_property()). */ int (*get_property)(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val); int (*set_property)(struct power_supply *psy, enum power_supply_property psp, const union power_supply_propval *val); /* * property_is_writeable() will be called during registration * of power supply. If this happens during device probe then it must * not access internal data of device (because probe did not end). */ int (*property_is_writeable)(struct power_supply *psy, enum power_supply_property psp); void (*external_power_changed)(struct power_supply *psy); void (*set_charged)(struct power_supply *psy); /* * Set if thermal zone should not be created for this power supply. * For example for virtual supplies forwarding calls to actual * sensors or other supplies. */ bool no_thermal; /* For APM emulation, think legacy userspace. */ int use_for_apm; }; struct power_supply { const struct power_supply_desc *desc; char **supplied_to; size_t num_supplicants; char **supplied_from; size_t num_supplies; struct device_node *of_node; /* Driver private data */ void *drv_data; /* private */ struct device dev; struct work_struct changed_work; struct delayed_work deferred_register_work; spinlock_t changed_lock; bool changed; bool initialized; bool removing; atomic_t use_cnt; #ifdef CONFIG_THERMAL struct thermal_zone_device *tzd; struct thermal_cooling_device *tcd; #endif #ifdef CONFIG_LEDS_TRIGGERS struct led_trigger *charging_full_trig; char *charging_full_trig_name; struct led_trigger *charging_trig; char *charging_trig_name; struct led_trigger *full_trig; char *full_trig_name; struct led_trigger *online_trig; char *online_trig_name; struct led_trigger *charging_blink_full_solid_trig; char *charging_blink_full_solid_trig_name; #endif }; /* * This is recommended structure to specify static power supply parameters. * Generic one, parametrizable for different power supplies. Power supply * class itself does not use it, but that's what implementing most platform * drivers, should try reuse for consistency. */ struct power_supply_info { const char *name; int technology; int voltage_max_design; int voltage_min_design; int charge_full_design; int charge_empty_design; int energy_full_design; int energy_empty_design; int use_for_apm; }; struct power_supply_battery_ocv_table { int ocv; /* microVolts */ int capacity; /* percent */ }; #define POWER_SUPPLY_OCV_TEMP_MAX 20 /* * This is the recommended struct to manage static battery parameters, * populated by power_supply_get_battery_info(). Most platform drivers should * use these for consistency. * Its field names must correspond to elements in enum power_supply_property. * The default field value is -EINVAL. * Power supply class itself doesn't use this. */ struct power_supply_battery_info { int energy_full_design_uwh; /* microWatt-hours */ int charge_full_design_uah; /* microAmp-hours */ int voltage_min_design_uv; /* microVolts */ int voltage_max_design_uv; /* microVolts */ int precharge_current_ua; /* microAmps */ int charge_term_current_ua; /* microAmps */ int constant_charge_current_max_ua; /* microAmps */ int constant_charge_voltage_max_uv; /* microVolts */ int factory_internal_resistance_uohm; /* microOhms */ int ocv_temp[POWER_SUPPLY_OCV_TEMP_MAX];/* celsius */ struct power_supply_battery_ocv_table *ocv_table[POWER_SUPPLY_OCV_TEMP_MAX]; int ocv_table_size[POWER_SUPPLY_OCV_TEMP_MAX]; }; extern struct atomic_notifier_head power_supply_notifier; extern int power_supply_reg_notifier(struct notifier_block *nb); extern void power_supply_unreg_notifier(struct notifier_block *nb); extern struct power_supply *power_supply_get_by_name(const char *name); extern void power_supply_put(struct power_supply *psy); #ifdef CONFIG_OF extern struct power_supply *power_supply_get_by_phandle(struct device_node *np, const char *property); extern struct power_supply *devm_power_supply_get_by_phandle( struct device *dev, const char *property); #else /* !CONFIG_OF */ static inline struct power_supply * power_supply_get_by_phandle(struct device_node *np, const char *property) { return NULL; } static inline struct power_supply * devm_power_supply_get_by_phandle(struct device *dev, const char *property) { return NULL; } #endif /* CONFIG_OF */ extern int power_supply_get_battery_info(struct power_supply *psy, struct power_supply_battery_info *info); extern void power_supply_put_battery_info(struct power_supply *psy, struct power_supply_battery_info *info); extern int power_supply_ocv2cap_simple(struct power_supply_battery_ocv_table *table, int table_len, int ocv); extern struct power_supply_battery_ocv_table * power_supply_find_ocv2cap_table(struct power_supply_battery_info *info, int temp, int *table_len); extern int power_supply_batinfo_ocv2cap(struct power_supply_battery_info *info, int ocv, int temp); extern void power_supply_changed(struct power_supply *psy); extern int power_supply_am_i_supplied(struct power_supply *psy); int power_supply_get_property_from_supplier(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val); extern int power_supply_set_battery_charged(struct power_supply *psy); #ifdef CONFIG_POWER_SUPPLY extern int power_supply_is_system_supplied(void); #else static inline int power_supply_is_system_supplied(void) { return -ENOSYS; } #endif extern int power_supply_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val); extern int power_supply_set_property(struct power_supply *psy, enum power_supply_property psp, const union power_supply_propval *val); extern int power_supply_property_is_writeable(struct power_supply *psy, enum power_supply_property psp); extern void power_supply_external_power_changed(struct power_supply *psy); extern struct power_supply *__must_check power_supply_register(struct device *parent, const struct power_supply_desc *desc, const struct power_supply_config *cfg); extern struct power_supply *__must_check power_supply_register_no_ws(struct device *parent, const struct power_supply_desc *desc, const struct power_supply_config *cfg); extern struct power_supply *__must_check devm_power_supply_register(struct device *parent, const struct power_supply_desc *desc, const struct power_supply_config *cfg); extern struct power_supply *__must_check devm_power_supply_register_no_ws(struct device *parent, const struct power_supply_desc *desc, const struct power_supply_config *cfg); extern void power_supply_unregister(struct power_supply *psy); extern int power_supply_powers(struct power_supply *psy, struct device *dev); #define to_power_supply(device) container_of(device, struct power_supply, dev) extern void *power_supply_get_drvdata(struct power_supply *psy); /* For APM emulation, think legacy userspace. */ extern struct class *power_supply_class; static inline bool power_supply_is_amp_property(enum power_supply_property psp) { switch (psp) { case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: case POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN: case POWER_SUPPLY_PROP_CHARGE_FULL: case POWER_SUPPLY_PROP_CHARGE_EMPTY: case POWER_SUPPLY_PROP_CHARGE_NOW: case POWER_SUPPLY_PROP_CHARGE_AVG: case POWER_SUPPLY_PROP_CHARGE_COUNTER: case POWER_SUPPLY_PROP_PRECHARGE_CURRENT: case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT: case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT: case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX: case POWER_SUPPLY_PROP_CURRENT_MAX: case POWER_SUPPLY_PROP_CURRENT_NOW: case POWER_SUPPLY_PROP_CURRENT_AVG: case POWER_SUPPLY_PROP_CURRENT_BOOT: return 1; default: break; } return 0; } static inline bool power_supply_is_watt_property(enum power_supply_property psp) { switch (psp) { case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN: case POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN: case POWER_SUPPLY_PROP_ENERGY_FULL: case POWER_SUPPLY_PROP_ENERGY_EMPTY: case POWER_SUPPLY_PROP_ENERGY_NOW: case POWER_SUPPLY_PROP_ENERGY_AVG: case POWER_SUPPLY_PROP_VOLTAGE_MAX: case POWER_SUPPLY_PROP_VOLTAGE_MIN: case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: case POWER_SUPPLY_PROP_VOLTAGE_NOW: case POWER_SUPPLY_PROP_VOLTAGE_AVG: case POWER_SUPPLY_PROP_VOLTAGE_OCV: case POWER_SUPPLY_PROP_VOLTAGE_BOOT: case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE: case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX: case POWER_SUPPLY_PROP_POWER_NOW: return 1; default: break; } return 0; } #ifdef CONFIG_POWER_SUPPLY_HWMON int power_supply_add_hwmon_sysfs(struct power_supply *psy); void power_supply_remove_hwmon_sysfs(struct power_supply *psy); #else static inline int power_supply_add_hwmon_sysfs(struct power_supply *psy) { return 0; } static inline void power_supply_remove_hwmon_sysfs(struct power_supply *psy) {} #endif #endif /* __LINUX_POWER_SUPPLY_H__ */ pci-epf.h 0000644 00000011404 14722070374 0006246 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /** * PCI Endpoint *Function* (EPF) header file * * Copyright (C) 2017 Texas Instruments * Author: Kishon Vijay Abraham I <kishon@ti.com> */ #ifndef __LINUX_PCI_EPF_H #define __LINUX_PCI_EPF_H #include <linux/device.h> #include <linux/mod_devicetable.h> #include <linux/pci.h> struct pci_epf; enum pci_barno { BAR_0, BAR_1, BAR_2, BAR_3, BAR_4, BAR_5, }; /** * struct pci_epf_header - represents standard configuration header * @vendorid: identifies device manufacturer * @deviceid: identifies a particular device * @revid: specifies a device-specific revision identifier * @progif_code: identifies a specific register-level programming interface * @subclass_code: identifies more specifically the function of the device * @baseclass_code: broadly classifies the type of function the device performs * @cache_line_size: specifies the system cacheline size in units of DWORDs * @subsys_vendor_id: vendor of the add-in card or subsystem * @subsys_id: id specific to vendor * @interrupt_pin: interrupt pin the device (or device function) uses */ struct pci_epf_header { u16 vendorid; u16 deviceid; u8 revid; u8 progif_code; u8 subclass_code; u8 baseclass_code; u8 cache_line_size; u16 subsys_vendor_id; u16 subsys_id; enum pci_interrupt_pin interrupt_pin; }; /** * struct pci_epf_ops - set of function pointers for performing EPF operations * @bind: ops to perform when a EPC device has been bound to EPF device * @unbind: ops to perform when a binding has been lost between a EPC device * and EPF device * @linkup: ops to perform when the EPC device has established a connection with * a host system */ struct pci_epf_ops { int (*bind)(struct pci_epf *epf); void (*unbind)(struct pci_epf *epf); void (*linkup)(struct pci_epf *epf); }; /** * struct pci_epf_driver - represents the PCI EPF driver * @probe: ops to perform when a new EPF device has been bound to the EPF driver * @remove: ops to perform when the binding between the EPF device and EPF * driver is broken * @driver: PCI EPF driver * @ops: set of function pointers for performing EPF operations * @owner: the owner of the module that registers the PCI EPF driver * @epf_group: list of configfs group corresponding to the PCI EPF driver * @id_table: identifies EPF devices for probing */ struct pci_epf_driver { int (*probe)(struct pci_epf *epf); int (*remove)(struct pci_epf *epf); struct device_driver driver; struct pci_epf_ops *ops; struct module *owner; struct list_head epf_group; const struct pci_epf_device_id *id_table; }; #define to_pci_epf_driver(drv) (container_of((drv), struct pci_epf_driver, \ driver)) /** * struct pci_epf_bar - represents the BAR of EPF device * @phys_addr: physical address that should be mapped to the BAR * @size: the size of the address space present in BAR */ struct pci_epf_bar { dma_addr_t phys_addr; size_t size; enum pci_barno barno; int flags; }; /** * struct pci_epf - represents the PCI EPF device * @dev: the PCI EPF device * @name: the name of the PCI EPF device * @header: represents standard configuration header * @bar: represents the BAR of EPF device * @msi_interrupts: number of MSI interrupts required by this function * @func_no: unique function number within this endpoint device * @epc: the EPC device to which this EPF device is bound * @driver: the EPF driver to which this EPF device is bound * @list: to add pci_epf as a list of PCI endpoint functions to pci_epc */ struct pci_epf { struct device dev; const char *name; struct pci_epf_header *header; struct pci_epf_bar bar[6]; u8 msi_interrupts; u16 msix_interrupts; u8 func_no; struct pci_epc *epc; struct pci_epf_driver *driver; struct list_head list; }; #define to_pci_epf(epf_dev) container_of((epf_dev), struct pci_epf, dev) #define pci_epf_register_driver(driver) \ __pci_epf_register_driver((driver), THIS_MODULE) static inline void epf_set_drvdata(struct pci_epf *epf, void *data) { dev_set_drvdata(&epf->dev, data); } static inline void *epf_get_drvdata(struct pci_epf *epf) { return dev_get_drvdata(&epf->dev); } const struct pci_epf_device_id * pci_epf_match_device(const struct pci_epf_device_id *id, struct pci_epf *epf); struct pci_epf *pci_epf_create(const char *name); void pci_epf_destroy(struct pci_epf *epf); int __pci_epf_register_driver(struct pci_epf_driver *driver, struct module *owner); void pci_epf_unregister_driver(struct pci_epf_driver *driver); void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar, size_t align); void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar); int pci_epf_bind(struct pci_epf *epf); void pci_epf_unbind(struct pci_epf *epf); void pci_epf_linkup(struct pci_epf *epf); #endif /* __LINUX_PCI_EPF_H */ intel-pti.h 0000644 00000002265 14722070374 0006635 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) Intel 2011 * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * The PTI (Parallel Trace Interface) driver directs trace data routed from * various parts in the system out through the Intel Penwell PTI port and * out of the mobile device for analysis with a debugging tool * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7, * compact JTAG, standard. * * This header file will allow other parts of the OS to use the * interface to write out it's contents for debugging a mobile system. */ #ifndef LINUX_INTEL_PTI_H_ #define LINUX_INTEL_PTI_H_ /* offset for last dword of any PTI message. Part of MIPI P1149.7 */ #define PTI_LASTDWORD_DTS 0x30 /* basic structure used as a write address to the PTI HW */ struct pti_masterchannel { u8 master; u8 channel; }; /* the following functions are defined in misc/pti.c */ void pti_writedata(struct pti_masterchannel *mc, u8 *buf, int count); struct pti_masterchannel *pti_request_masterchannel(u8 type, const char *thread_name); void pti_release_masterchannel(struct pti_masterchannel *mc); #endif /* LINUX_INTEL_PTI_H_ */ mvebu-pmsu.h 0000644 00000001010 14722070374 0007013 0 ustar 00 /* * Copyright (C) 2012 Marvell * * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #ifndef __MVEBU_PMSU_H__ #define __MVEBU_PMSU_H__ #ifdef CONFIG_MACH_MVEBU_V7 int mvebu_pmsu_dfs_request(int cpu); #else static inline int mvebu_pmsu_dfs_request(int cpu) { return -ENODEV; } #endif #endif /* __MVEBU_PMSU_H__ */ oom.h 0000644 00000006415 14722070374 0005523 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __INCLUDE_LINUX_OOM_H #define __INCLUDE_LINUX_OOM_H #include <linux/sched/signal.h> #include <linux/types.h> #include <linux/nodemask.h> #include <uapi/linux/oom.h> #include <linux/sched/coredump.h> /* MMF_* */ #include <linux/mm.h> /* VM_FAULT* */ struct zonelist; struct notifier_block; struct mem_cgroup; struct task_struct; enum oom_constraint { CONSTRAINT_NONE, CONSTRAINT_CPUSET, CONSTRAINT_MEMORY_POLICY, CONSTRAINT_MEMCG, }; /* * Details of the page allocation that triggered the oom killer that are used to * determine what should be killed. */ struct oom_control { /* Used to determine cpuset */ struct zonelist *zonelist; /* Used to determine mempolicy */ nodemask_t *nodemask; /* Memory cgroup in which oom is invoked, or NULL for global oom */ struct mem_cgroup *memcg; /* Used to determine cpuset and node locality requirement */ const gfp_t gfp_mask; /* * order == -1 means the oom kill is required by sysrq, otherwise only * for display purposes. */ const int order; /* Used by oom implementation, do not set */ unsigned long totalpages; struct task_struct *chosen; long chosen_points; /* Used to print the constraint info. */ enum oom_constraint constraint; }; extern struct mutex oom_lock; extern struct mutex oom_adj_mutex; static inline void set_current_oom_origin(void) { current->signal->oom_flag_origin = true; } static inline void clear_current_oom_origin(void) { current->signal->oom_flag_origin = false; } static inline bool oom_task_origin(const struct task_struct *p) { return p->signal->oom_flag_origin; } static inline bool tsk_is_oom_victim(struct task_struct * tsk) { return tsk->signal->oom_mm; } /* * Use this helper if tsk->mm != mm and the victim mm needs a special * handling. This is guaranteed to stay true after once set. */ static inline bool mm_is_oom_victim(struct mm_struct *mm) { return test_bit(MMF_OOM_VICTIM, &mm->flags); } /* * Checks whether a page fault on the given mm is still reliable. * This is no longer true if the oom reaper started to reap the * address space which is reflected by MMF_UNSTABLE flag set in * the mm. At that moment any !shared mapping would lose the content * and could cause a memory corruption (zero pages instead of the * original content). * * User should call this before establishing a page table entry for * a !shared mapping and under the proper page table lock. * * Return 0 when the PF is safe VM_FAULT_SIGBUS otherwise. */ static inline vm_fault_t check_stable_address_space(struct mm_struct *mm) { if (unlikely(test_bit(MMF_UNSTABLE, &mm->flags))) return VM_FAULT_SIGBUS; return 0; } bool __oom_reap_task_mm(struct mm_struct *mm); long oom_badness(struct task_struct *p, unsigned long totalpages); extern bool out_of_memory(struct oom_control *oc); extern void exit_oom_victim(void); extern int register_oom_notifier(struct notifier_block *nb); extern int unregister_oom_notifier(struct notifier_block *nb); extern bool oom_killer_disable(signed long timeout); extern void oom_killer_enable(void); extern struct task_struct *find_lock_task_mm(struct task_struct *p); /* sysctls */ extern int sysctl_oom_dump_tasks; extern int sysctl_oom_kill_allocating_task; extern int sysctl_panic_on_oom; #endif /* _INCLUDE_LINUX_OOM_H */ vgaarb.h 0000644 00000012370 14722070374 0006170 0 ustar 00 /* * The VGA aribiter manages VGA space routing and VGA resource decode to * allow multiple VGA devices to be used in a system in a safe way. * * (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org> * (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com> * (C) Copyright 2007, 2009 Tiago Vignatti <vignatti@freedesktop.org> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS * IN THE SOFTWARE. * */ #ifndef LINUX_VGA_H #define LINUX_VGA_H #include <video/vga.h> /* Legacy VGA regions */ #define VGA_RSRC_NONE 0x00 #define VGA_RSRC_LEGACY_IO 0x01 #define VGA_RSRC_LEGACY_MEM 0x02 #define VGA_RSRC_LEGACY_MASK (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM) /* Non-legacy access */ #define VGA_RSRC_NORMAL_IO 0x04 #define VGA_RSRC_NORMAL_MEM 0x08 /* Passing that instead of a pci_dev to use the system "default" * device, that is the one used by vgacon. Archs will probably * have to provide their own vga_default_device(); */ #define VGA_DEFAULT_DEVICE (NULL) struct pci_dev; /* For use by clients */ /** * vga_set_legacy_decoding * * @pdev: pci device of the VGA card * @decodes: bit mask of what legacy regions the card decodes * * Indicates to the arbiter if the card decodes legacy VGA IOs, * legacy VGA Memory, both, or none. All cards default to both, * the card driver (fbdev for example) should tell the arbiter * if it has disabled legacy decoding, so the card can be left * out of the arbitration process (and can be safe to take * interrupts at any time. */ #if defined(CONFIG_VGA_ARB) extern void vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes); #else static inline void vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes) { }; #endif #if defined(CONFIG_VGA_ARB) extern int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible); #else static inline int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible) { return 0; } #endif /** * vga_get_interruptible * @pdev: pci device of the VGA card or NULL for the system default * @rsrc: bit mask of resources to acquire and lock * * Shortcut to vga_get with interruptible set to true. * * On success, release the VGA resource again with vga_put(). */ static inline int vga_get_interruptible(struct pci_dev *pdev, unsigned int rsrc) { return vga_get(pdev, rsrc, 1); } /** * vga_get_uninterruptible - shortcut to vga_get() * @pdev: pci device of the VGA card or NULL for the system default * @rsrc: bit mask of resources to acquire and lock * * Shortcut to vga_get with interruptible set to false. * * On success, release the VGA resource again with vga_put(). */ static inline int vga_get_uninterruptible(struct pci_dev *pdev, unsigned int rsrc) { return vga_get(pdev, rsrc, 0); } #if defined(CONFIG_VGA_ARB) extern int vga_tryget(struct pci_dev *pdev, unsigned int rsrc); #else static inline int vga_tryget(struct pci_dev *pdev, unsigned int rsrc) { return 0; } #endif #if defined(CONFIG_VGA_ARB) extern void vga_put(struct pci_dev *pdev, unsigned int rsrc); #else #define vga_put(pdev, rsrc) #endif #ifdef CONFIG_VGA_ARB extern struct pci_dev *vga_default_device(void); extern void vga_set_default_device(struct pci_dev *pdev); extern int vga_remove_vgacon(struct pci_dev *pdev); #else static inline struct pci_dev *vga_default_device(void) { return NULL; }; static inline void vga_set_default_device(struct pci_dev *pdev) { }; static inline int vga_remove_vgacon(struct pci_dev *pdev) { return 0; }; #endif /* * Architectures should define this if they have several * independent PCI domains that can afford concurrent VGA * decoding */ #ifndef __ARCH_HAS_VGA_CONFLICT static inline int vga_conflicts(struct pci_dev *p1, struct pci_dev *p2) { return 1; } #endif #if defined(CONFIG_VGA_ARB) int vga_client_register(struct pci_dev *pdev, void *cookie, void (*irq_set_state)(void *cookie, bool state), unsigned int (*set_vga_decode)(void *cookie, bool state)); #else static inline int vga_client_register(struct pci_dev *pdev, void *cookie, void (*irq_set_state)(void *cookie, bool state), unsigned int (*set_vga_decode)(void *cookie, bool state)) { return 0; } #endif #endif /* LINUX_VGA_H */ mtd/nand_bch.h 0000644 00000002726 14722070374 0007252 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright © 2011 Ivan Djelic <ivan.djelic@parrot.com> * * This file is the header for the NAND BCH ECC implementation. */ #ifndef __MTD_NAND_BCH_H__ #define __MTD_NAND_BCH_H__ struct mtd_info; struct nand_chip; struct nand_bch_control; #if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH) static inline int mtd_nand_has_bch(void) { return 1; } /* * Calculate BCH ecc code */ int nand_bch_calculate_ecc(struct nand_chip *chip, const u_char *dat, u_char *ecc_code); /* * Detect and correct bit errors */ int nand_bch_correct_data(struct nand_chip *chip, u_char *dat, u_char *read_ecc, u_char *calc_ecc); /* * Initialize BCH encoder/decoder */ struct nand_bch_control *nand_bch_init(struct mtd_info *mtd); /* * Release BCH encoder/decoder resources */ void nand_bch_free(struct nand_bch_control *nbc); #else /* !CONFIG_MTD_NAND_ECC_SW_BCH */ static inline int mtd_nand_has_bch(void) { return 0; } static inline int nand_bch_calculate_ecc(struct nand_chip *chip, const u_char *dat, u_char *ecc_code) { return -1; } static inline int nand_bch_correct_data(struct nand_chip *chip, unsigned char *buf, unsigned char *read_ecc, unsigned char *calc_ecc) { return -ENOTSUPP; } static inline struct nand_bch_control *nand_bch_init(struct mtd_info *mtd) { return NULL; } static inline void nand_bch_free(struct nand_bch_control *nbc) {} #endif /* CONFIG_MTD_NAND_ECC_SW_BCH */ #endif /* __MTD_NAND_BCH_H__ */ mtd/doc2000.h 0000644 00000012601 14722070374 0006556 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Linux driver for Disk-On-Chip devices * * Copyright © 1999 Machine Vision Holdings, Inc. * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> * Copyright © 2002-2003 Greg Ungerer <gerg@snapgear.com> * Copyright © 2002-2003 SnapGear Inc */ #ifndef __MTD_DOC2000_H__ #define __MTD_DOC2000_H__ #include <linux/mtd/mtd.h> #include <linux/mutex.h> #define DoC_Sig1 0 #define DoC_Sig2 1 #define DoC_ChipID 0x1000 #define DoC_DOCStatus 0x1001 #define DoC_DOCControl 0x1002 #define DoC_FloorSelect 0x1003 #define DoC_CDSNControl 0x1004 #define DoC_CDSNDeviceSelect 0x1005 #define DoC_ECCConf 0x1006 #define DoC_2k_ECCStatus 0x1007 #define DoC_CDSNSlowIO 0x100d #define DoC_ECCSyndrome0 0x1010 #define DoC_ECCSyndrome1 0x1011 #define DoC_ECCSyndrome2 0x1012 #define DoC_ECCSyndrome3 0x1013 #define DoC_ECCSyndrome4 0x1014 #define DoC_ECCSyndrome5 0x1015 #define DoC_AliasResolution 0x101b #define DoC_ConfigInput 0x101c #define DoC_ReadPipeInit 0x101d #define DoC_WritePipeTerm 0x101e #define DoC_LastDataRead 0x101f #define DoC_NOP 0x1020 #define DoC_Mil_CDSN_IO 0x0800 #define DoC_2k_CDSN_IO 0x1800 #define DoC_Mplus_NOP 0x1002 #define DoC_Mplus_AliasResolution 0x1004 #define DoC_Mplus_DOCControl 0x1006 #define DoC_Mplus_AccessStatus 0x1008 #define DoC_Mplus_DeviceSelect 0x1008 #define DoC_Mplus_Configuration 0x100a #define DoC_Mplus_OutputControl 0x100c #define DoC_Mplus_FlashControl 0x1020 #define DoC_Mplus_FlashSelect 0x1022 #define DoC_Mplus_FlashCmd 0x1024 #define DoC_Mplus_FlashAddress 0x1026 #define DoC_Mplus_FlashData0 0x1028 #define DoC_Mplus_FlashData1 0x1029 #define DoC_Mplus_ReadPipeInit 0x102a #define DoC_Mplus_LastDataRead 0x102c #define DoC_Mplus_LastDataRead1 0x102d #define DoC_Mplus_WritePipeTerm 0x102e #define DoC_Mplus_ECCSyndrome0 0x1040 #define DoC_Mplus_ECCSyndrome1 0x1041 #define DoC_Mplus_ECCSyndrome2 0x1042 #define DoC_Mplus_ECCSyndrome3 0x1043 #define DoC_Mplus_ECCSyndrome4 0x1044 #define DoC_Mplus_ECCSyndrome5 0x1045 #define DoC_Mplus_ECCConf 0x1046 #define DoC_Mplus_Toggle 0x1046 #define DoC_Mplus_DownloadStatus 0x1074 #define DoC_Mplus_CtrlConfirm 0x1076 #define DoC_Mplus_Power 0x1fff /* How to access the device? * On ARM, it'll be mmap'd directly with 32-bit wide accesses. * On PPC, it's mmap'd and 16-bit wide. * Others use readb/writeb */ #if defined(__arm__) static inline u8 ReadDOC_(u32 __iomem *addr, unsigned long reg) { return __raw_readl(addr + reg); } static inline void WriteDOC_(u8 data, u32 __iomem *addr, unsigned long reg) { __raw_writel(data, addr + reg); wmb(); } #define DOC_IOREMAP_LEN 0x8000 #elif defined(__ppc__) static inline u8 ReadDOC_(u16 __iomem *addr, unsigned long reg) { return __raw_readw(addr + reg); } static inline void WriteDOC_(u8 data, u16 __iomem *addr, unsigned long reg) { __raw_writew(data, addr + reg); wmb(); } #define DOC_IOREMAP_LEN 0x4000 #else #define ReadDOC_(adr, reg) readb((void __iomem *)(adr) + (reg)) #define WriteDOC_(d, adr, reg) writeb(d, (void __iomem *)(adr) + (reg)) #define DOC_IOREMAP_LEN 0x2000 #endif #if defined(__i386__) || defined(__x86_64__) #define USE_MEMCPY #endif /* These are provided to directly use the DoC_xxx defines */ #define ReadDOC(adr, reg) ReadDOC_(adr,DoC_##reg) #define WriteDOC(d, adr, reg) WriteDOC_(d,adr,DoC_##reg) #define DOC_MODE_RESET 0 #define DOC_MODE_NORMAL 1 #define DOC_MODE_RESERVED1 2 #define DOC_MODE_RESERVED2 3 #define DOC_MODE_CLR_ERR 0x80 #define DOC_MODE_RST_LAT 0x10 #define DOC_MODE_BDECT 0x08 #define DOC_MODE_MDWREN 0x04 #define DOC_ChipID_Doc2k 0x20 #define DOC_ChipID_Doc2kTSOP 0x21 /* internal number for MTD */ #define DOC_ChipID_DocMil 0x30 #define DOC_ChipID_DocMilPlus32 0x40 #define DOC_ChipID_DocMilPlus16 0x41 #define CDSN_CTRL_FR_B 0x80 #define CDSN_CTRL_FR_B0 0x40 #define CDSN_CTRL_FR_B1 0x80 #define CDSN_CTRL_ECC_IO 0x20 #define CDSN_CTRL_FLASH_IO 0x10 #define CDSN_CTRL_WP 0x08 #define CDSN_CTRL_ALE 0x04 #define CDSN_CTRL_CLE 0x02 #define CDSN_CTRL_CE 0x01 #define DOC_ECC_RESET 0 #define DOC_ECC_ERROR 0x80 #define DOC_ECC_RW 0x20 #define DOC_ECC__EN 0x08 #define DOC_TOGGLE_BIT 0x04 #define DOC_ECC_RESV 0x02 #define DOC_ECC_IGNORE 0x01 #define DOC_FLASH_CE 0x80 #define DOC_FLASH_WP 0x40 #define DOC_FLASH_BANK 0x02 /* We have to also set the reserved bit 1 for enable */ #define DOC_ECC_EN (DOC_ECC__EN | DOC_ECC_RESV) #define DOC_ECC_DIS (DOC_ECC_RESV) struct Nand { char floor, chip; unsigned long curadr; unsigned char curmode; /* Also some erase/write/pipeline info when we get that far */ }; #define MAX_FLOORS 4 #define MAX_CHIPS 4 #define MAX_FLOORS_MIL 1 #define MAX_CHIPS_MIL 1 #define MAX_FLOORS_MPLUS 2 #define MAX_CHIPS_MPLUS 1 #define ADDR_COLUMN 1 #define ADDR_PAGE 2 #define ADDR_COLUMN_PAGE 3 struct DiskOnChip { unsigned long physadr; void __iomem *virtadr; unsigned long totlen; unsigned char ChipID; /* Type of DiskOnChip */ int ioreg; unsigned long mfr; /* Flash IDs - only one type of flash per device */ unsigned long id; int chipshift; char page256; char pageadrlen; char interleave; /* Internal interleaving - Millennium Plus style */ unsigned long erasesize; int curfloor; int curchip; int numchips; struct Nand *chips; struct mtd_info *nextdoc; struct mutex lock; }; int doc_decode_ecc(unsigned char sector[512], unsigned char ecc1[6]); #endif /* __MTD_DOC2000_H__ */ mtd/hyperbus.h 0000644 00000004747 14722070374 0007364 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 * * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ */ #ifndef __LINUX_MTD_HYPERBUS_H__ #define __LINUX_MTD_HYPERBUS_H__ #include <linux/mtd/map.h> enum hyperbus_memtype { HYPERFLASH, HYPERRAM, }; /** * struct hyperbus_device - struct representing HyperBus slave device * @map: map_info struct for accessing MMIO HyperBus flash memory * @np: pointer to HyperBus slave device node * @mtd: pointer to MTD struct * @ctlr: pointer to HyperBus controller struct * @memtype: type of memory device: HyperFlash or HyperRAM */ struct hyperbus_device { struct map_info map; struct device_node *np; struct mtd_info *mtd; struct hyperbus_ctlr *ctlr; enum hyperbus_memtype memtype; }; /** * struct hyperbus_ops - struct representing custom HyperBus operations * @read16: read 16 bit of data from flash in a single burst. Used to read * from non default address space, such as ID/CFI space * @write16: write 16 bit of data to flash in a single burst. Used to * send cmd to flash or write single 16 bit word at a time. * @copy_from: copy data from flash memory * @copy_to: copy data to flash memory * @calibrate: calibrate HyperBus controller */ struct hyperbus_ops { u16 (*read16)(struct hyperbus_device *hbdev, unsigned long addr); void (*write16)(struct hyperbus_device *hbdev, unsigned long addr, u16 val); void (*copy_from)(struct hyperbus_device *hbdev, void *to, unsigned long from, ssize_t len); void (*copy_to)(struct hyperbus_device *dev, unsigned long to, const void *from, ssize_t len); int (*calibrate)(struct hyperbus_device *dev); }; /** * struct hyperbus_ctlr - struct representing HyperBus controller * @dev: pointer to HyperBus controller device * @calibrated: flag to indicate ctlr calibration sequence is complete * @ops: HyperBus controller ops */ struct hyperbus_ctlr { struct device *dev; bool calibrated; const struct hyperbus_ops *ops; }; /** * hyperbus_register_device - probe and register a HyperBus slave memory device * @hbdev: hyperbus_device struct with dev, np and ctlr field populated * * Return: 0 for success, others for failure. */ int hyperbus_register_device(struct hyperbus_device *hbdev); /** * hyperbus_unregister_device - deregister HyperBus slave memory device * @hbdev: hyperbus_device to be unregistered * * Return: 0 for success, others for failure. */ int hyperbus_unregister_device(struct hyperbus_device *hbdev); #endif /* __LINUX_MTD_HYPERBUS_H__ */ mtd/spi-nor.h 0000644 00000055675 14722070374 0007120 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (C) 2014 Freescale Semiconductor, Inc. */ #ifndef __LINUX_MTD_SPI_NOR_H #define __LINUX_MTD_SPI_NOR_H #include <linux/bitops.h> #include <linux/mtd/cfi.h> #include <linux/mtd/mtd.h> #include <linux/spi/spi-mem.h> /* * Manufacturer IDs * * The first byte returned from the flash after sending opcode SPINOR_OP_RDID. * Sometimes these are the same as CFI IDs, but sometimes they aren't. */ #define SNOR_MFR_ATMEL CFI_MFR_ATMEL #define SNOR_MFR_GIGADEVICE 0xc8 #define SNOR_MFR_INTEL CFI_MFR_INTEL #define SNOR_MFR_ST CFI_MFR_ST /* ST Micro */ #define SNOR_MFR_MICRON CFI_MFR_MICRON /* Micron */ #define SNOR_MFR_MACRONIX CFI_MFR_MACRONIX #define SNOR_MFR_SPANSION CFI_MFR_AMD #define SNOR_MFR_SST CFI_MFR_SST #define SNOR_MFR_WINBOND 0xef /* Also used by some Spansion */ /* * Note on opcode nomenclature: some opcodes have a format like * SPINOR_OP_FUNCTION{4,}_x_y_z. The numbers x, y, and z stand for the number * of I/O lines used for the opcode, address, and data (respectively). The * FUNCTION has an optional suffix of '4', to represent an opcode which * requires a 4-byte (32-bit) address. */ /* Flash opcodes. */ #define SPINOR_OP_WREN 0x06 /* Write enable */ #define SPINOR_OP_RDSR 0x05 /* Read status register */ #define SPINOR_OP_WRSR 0x01 /* Write status register 1 byte */ #define SPINOR_OP_RDSR2 0x3f /* Read status register 2 */ #define SPINOR_OP_WRSR2 0x3e /* Write status register 2 */ #define SPINOR_OP_READ 0x03 /* Read data bytes (low frequency) */ #define SPINOR_OP_READ_FAST 0x0b /* Read data bytes (high frequency) */ #define SPINOR_OP_READ_1_1_2 0x3b /* Read data bytes (Dual Output SPI) */ #define SPINOR_OP_READ_1_2_2 0xbb /* Read data bytes (Dual I/O SPI) */ #define SPINOR_OP_READ_1_1_4 0x6b /* Read data bytes (Quad Output SPI) */ #define SPINOR_OP_READ_1_4_4 0xeb /* Read data bytes (Quad I/O SPI) */ #define SPINOR_OP_READ_1_1_8 0x8b /* Read data bytes (Octal Output SPI) */ #define SPINOR_OP_READ_1_8_8 0xcb /* Read data bytes (Octal I/O SPI) */ #define SPINOR_OP_PP 0x02 /* Page program (up to 256 bytes) */ #define SPINOR_OP_PP_1_1_4 0x32 /* Quad page program */ #define SPINOR_OP_PP_1_4_4 0x38 /* Quad page program */ #define SPINOR_OP_PP_1_1_8 0x82 /* Octal page program */ #define SPINOR_OP_PP_1_8_8 0xc2 /* Octal page program */ #define SPINOR_OP_BE_4K 0x20 /* Erase 4KiB block */ #define SPINOR_OP_BE_4K_PMC 0xd7 /* Erase 4KiB block on PMC chips */ #define SPINOR_OP_BE_32K 0x52 /* Erase 32KiB block */ #define SPINOR_OP_CHIP_ERASE 0xc7 /* Erase whole flash chip */ #define SPINOR_OP_SE 0xd8 /* Sector erase (usually 64KiB) */ #define SPINOR_OP_RDID 0x9f /* Read JEDEC ID */ #define SPINOR_OP_RDSFDP 0x5a /* Read SFDP */ #define SPINOR_OP_RDCR 0x35 /* Read configuration register */ #define SPINOR_OP_RDFSR 0x70 /* Read flag status register */ #define SPINOR_OP_CLFSR 0x50 /* Clear flag status register */ #define SPINOR_OP_RDEAR 0xc8 /* Read Extended Address Register */ #define SPINOR_OP_WREAR 0xc5 /* Write Extended Address Register */ /* 4-byte address opcodes - used on Spansion and some Macronix flashes. */ #define SPINOR_OP_READ_4B 0x13 /* Read data bytes (low frequency) */ #define SPINOR_OP_READ_FAST_4B 0x0c /* Read data bytes (high frequency) */ #define SPINOR_OP_READ_1_1_2_4B 0x3c /* Read data bytes (Dual Output SPI) */ #define SPINOR_OP_READ_1_2_2_4B 0xbc /* Read data bytes (Dual I/O SPI) */ #define SPINOR_OP_READ_1_1_4_4B 0x6c /* Read data bytes (Quad Output SPI) */ #define SPINOR_OP_READ_1_4_4_4B 0xec /* Read data bytes (Quad I/O SPI) */ #define SPINOR_OP_READ_1_1_8_4B 0x7c /* Read data bytes (Octal Output SPI) */ #define SPINOR_OP_READ_1_8_8_4B 0xcc /* Read data bytes (Octal I/O SPI) */ #define SPINOR_OP_PP_4B 0x12 /* Page program (up to 256 bytes) */ #define SPINOR_OP_PP_1_1_4_4B 0x34 /* Quad page program */ #define SPINOR_OP_PP_1_4_4_4B 0x3e /* Quad page program */ #define SPINOR_OP_PP_1_1_8_4B 0x84 /* Octal page program */ #define SPINOR_OP_PP_1_8_8_4B 0x8e /* Octal page program */ #define SPINOR_OP_BE_4K_4B 0x21 /* Erase 4KiB block */ #define SPINOR_OP_BE_32K_4B 0x5c /* Erase 32KiB block */ #define SPINOR_OP_SE_4B 0xdc /* Sector erase (usually 64KiB) */ /* Double Transfer Rate opcodes - defined in JEDEC JESD216B. */ #define SPINOR_OP_READ_1_1_1_DTR 0x0d #define SPINOR_OP_READ_1_2_2_DTR 0xbd #define SPINOR_OP_READ_1_4_4_DTR 0xed #define SPINOR_OP_READ_1_1_1_DTR_4B 0x0e #define SPINOR_OP_READ_1_2_2_DTR_4B 0xbe #define SPINOR_OP_READ_1_4_4_DTR_4B 0xee /* Used for SST flashes only. */ #define SPINOR_OP_BP 0x02 /* Byte program */ #define SPINOR_OP_WRDI 0x04 /* Write disable */ #define SPINOR_OP_AAI_WP 0xad /* Auto address increment word program */ /* Used for S3AN flashes only */ #define SPINOR_OP_XSE 0x50 /* Sector erase */ #define SPINOR_OP_XPP 0x82 /* Page program */ #define SPINOR_OP_XRDSR 0xd7 /* Read status register */ #define XSR_PAGESIZE BIT(0) /* Page size in Po2 or Linear */ #define XSR_RDY BIT(7) /* Ready */ /* Used for Macronix and Winbond flashes. */ #define SPINOR_OP_EN4B 0xb7 /* Enter 4-byte mode */ #define SPINOR_OP_EX4B 0xe9 /* Exit 4-byte mode */ /* Used for Spansion flashes only. */ #define SPINOR_OP_BRWR 0x17 /* Bank register write */ #define SPINOR_OP_CLSR 0x30 /* Clear status register 1 */ /* Used for Micron flashes only. */ #define SPINOR_OP_RD_EVCR 0x65 /* Read EVCR register */ #define SPINOR_OP_WD_EVCR 0x61 /* Write EVCR register */ /* Status Register bits. */ #define SR_WIP BIT(0) /* Write in progress */ #define SR_WEL BIT(1) /* Write enable latch */ /* meaning of other SR_* bits may differ between vendors */ #define SR_BP0 BIT(2) /* Block protect 0 */ #define SR_BP1 BIT(3) /* Block protect 1 */ #define SR_BP2 BIT(4) /* Block protect 2 */ #define SR_TB BIT(5) /* Top/Bottom protect */ #define SR_SRWD BIT(7) /* SR write protect */ /* Spansion/Cypress specific status bits */ #define SR_E_ERR BIT(5) #define SR_P_ERR BIT(6) #define SR_QUAD_EN_MX BIT(6) /* Macronix Quad I/O */ /* Enhanced Volatile Configuration Register bits */ #define EVCR_QUAD_EN_MICRON BIT(7) /* Micron Quad I/O */ /* Flag Status Register bits */ #define FSR_READY BIT(7) /* Device status, 0 = Busy, 1 = Ready */ #define FSR_E_ERR BIT(5) /* Erase operation status */ #define FSR_P_ERR BIT(4) /* Program operation status */ #define FSR_PT_ERR BIT(1) /* Protection error bit */ /* Configuration Register bits. */ #define CR_QUAD_EN_SPAN BIT(1) /* Spansion Quad I/O */ /* Status Register 2 bits. */ #define SR2_QUAD_EN_BIT7 BIT(7) /* Supported SPI protocols */ #define SNOR_PROTO_INST_MASK GENMASK(23, 16) #define SNOR_PROTO_INST_SHIFT 16 #define SNOR_PROTO_INST(_nbits) \ ((((unsigned long)(_nbits)) << SNOR_PROTO_INST_SHIFT) & \ SNOR_PROTO_INST_MASK) #define SNOR_PROTO_ADDR_MASK GENMASK(15, 8) #define SNOR_PROTO_ADDR_SHIFT 8 #define SNOR_PROTO_ADDR(_nbits) \ ((((unsigned long)(_nbits)) << SNOR_PROTO_ADDR_SHIFT) & \ SNOR_PROTO_ADDR_MASK) #define SNOR_PROTO_DATA_MASK GENMASK(7, 0) #define SNOR_PROTO_DATA_SHIFT 0 #define SNOR_PROTO_DATA(_nbits) \ ((((unsigned long)(_nbits)) << SNOR_PROTO_DATA_SHIFT) & \ SNOR_PROTO_DATA_MASK) #define SNOR_PROTO_IS_DTR BIT(24) /* Double Transfer Rate */ #define SNOR_PROTO_STR(_inst_nbits, _addr_nbits, _data_nbits) \ (SNOR_PROTO_INST(_inst_nbits) | \ SNOR_PROTO_ADDR(_addr_nbits) | \ SNOR_PROTO_DATA(_data_nbits)) #define SNOR_PROTO_DTR(_inst_nbits, _addr_nbits, _data_nbits) \ (SNOR_PROTO_IS_DTR | \ SNOR_PROTO_STR(_inst_nbits, _addr_nbits, _data_nbits)) enum spi_nor_protocol { SNOR_PROTO_1_1_1 = SNOR_PROTO_STR(1, 1, 1), SNOR_PROTO_1_1_2 = SNOR_PROTO_STR(1, 1, 2), SNOR_PROTO_1_1_4 = SNOR_PROTO_STR(1, 1, 4), SNOR_PROTO_1_1_8 = SNOR_PROTO_STR(1, 1, 8), SNOR_PROTO_1_2_2 = SNOR_PROTO_STR(1, 2, 2), SNOR_PROTO_1_4_4 = SNOR_PROTO_STR(1, 4, 4), SNOR_PROTO_1_8_8 = SNOR_PROTO_STR(1, 8, 8), SNOR_PROTO_2_2_2 = SNOR_PROTO_STR(2, 2, 2), SNOR_PROTO_4_4_4 = SNOR_PROTO_STR(4, 4, 4), SNOR_PROTO_8_8_8 = SNOR_PROTO_STR(8, 8, 8), SNOR_PROTO_1_1_1_DTR = SNOR_PROTO_DTR(1, 1, 1), SNOR_PROTO_1_2_2_DTR = SNOR_PROTO_DTR(1, 2, 2), SNOR_PROTO_1_4_4_DTR = SNOR_PROTO_DTR(1, 4, 4), SNOR_PROTO_1_8_8_DTR = SNOR_PROTO_DTR(1, 8, 8), }; static inline bool spi_nor_protocol_is_dtr(enum spi_nor_protocol proto) { return !!(proto & SNOR_PROTO_IS_DTR); } static inline u8 spi_nor_get_protocol_inst_nbits(enum spi_nor_protocol proto) { return ((unsigned long)(proto & SNOR_PROTO_INST_MASK)) >> SNOR_PROTO_INST_SHIFT; } static inline u8 spi_nor_get_protocol_addr_nbits(enum spi_nor_protocol proto) { return ((unsigned long)(proto & SNOR_PROTO_ADDR_MASK)) >> SNOR_PROTO_ADDR_SHIFT; } static inline u8 spi_nor_get_protocol_data_nbits(enum spi_nor_protocol proto) { return ((unsigned long)(proto & SNOR_PROTO_DATA_MASK)) >> SNOR_PROTO_DATA_SHIFT; } static inline u8 spi_nor_get_protocol_width(enum spi_nor_protocol proto) { return spi_nor_get_protocol_data_nbits(proto); } enum spi_nor_ops { SPI_NOR_OPS_READ = 0, SPI_NOR_OPS_WRITE, SPI_NOR_OPS_ERASE, SPI_NOR_OPS_LOCK, SPI_NOR_OPS_UNLOCK, }; enum spi_nor_option_flags { SNOR_F_USE_FSR = BIT(0), SNOR_F_HAS_SR_TB = BIT(1), SNOR_F_NO_OP_CHIP_ERASE = BIT(2), SNOR_F_READY_XSR_RDY = BIT(3), SNOR_F_USE_CLSR = BIT(4), SNOR_F_BROKEN_RESET = BIT(5), SNOR_F_4B_OPCODES = BIT(6), SNOR_F_HAS_4BAIT = BIT(7), SNOR_F_HAS_LOCK = BIT(8), }; /** * struct spi_nor_erase_type - Structure to describe a SPI NOR erase type * @size: the size of the sector/block erased by the erase type. * JEDEC JESD216B imposes erase sizes to be a power of 2. * @size_shift: @size is a power of 2, the shift is stored in * @size_shift. * @size_mask: the size mask based on @size_shift. * @opcode: the SPI command op code to erase the sector/block. * @idx: Erase Type index as sorted in the Basic Flash Parameter * Table. It will be used to synchronize the supported * Erase Types with the ones identified in the SFDP * optional tables. */ struct spi_nor_erase_type { u32 size; u32 size_shift; u32 size_mask; u8 opcode; u8 idx; }; /** * struct spi_nor_erase_command - Used for non-uniform erases * The structure is used to describe a list of erase commands to be executed * once we validate that the erase can be performed. The elements in the list * are run-length encoded. * @list: for inclusion into the list of erase commands. * @count: how many times the same erase command should be * consecutively used. * @size: the size of the sector/block erased by the command. * @opcode: the SPI command op code to erase the sector/block. */ struct spi_nor_erase_command { struct list_head list; u32 count; u32 size; u8 opcode; }; /** * struct spi_nor_erase_region - Structure to describe a SPI NOR erase region * @offset: the offset in the data array of erase region start. * LSB bits are used as a bitmask encoding flags to * determine if this region is overlaid, if this region is * the last in the SPI NOR flash memory and to indicate * all the supported erase commands inside this region. * The erase types are sorted in ascending order with the * smallest Erase Type size being at BIT(0). * @size: the size of the region in bytes. */ struct spi_nor_erase_region { u64 offset; u64 size; }; #define SNOR_ERASE_TYPE_MAX 4 #define SNOR_ERASE_TYPE_MASK GENMASK_ULL(SNOR_ERASE_TYPE_MAX - 1, 0) #define SNOR_LAST_REGION BIT(4) #define SNOR_OVERLAID_REGION BIT(5) #define SNOR_ERASE_FLAGS_MAX 6 #define SNOR_ERASE_FLAGS_MASK GENMASK_ULL(SNOR_ERASE_FLAGS_MAX - 1, 0) /** * struct spi_nor_erase_map - Structure to describe the SPI NOR erase map * @regions: array of erase regions. The regions are consecutive in * address space. Walking through the regions is done * incrementally. * @uniform_region: a pre-allocated erase region for SPI NOR with a uniform * sector size (legacy implementation). * @erase_type: an array of erase types shared by all the regions. * The erase types are sorted in ascending order, with the * smallest Erase Type size being the first member in the * erase_type array. * @uniform_erase_type: bitmask encoding erase types that can erase the * entire memory. This member is completed at init by * uniform and non-uniform SPI NOR flash memories if they * support at least one erase type that can erase the * entire memory. */ struct spi_nor_erase_map { struct spi_nor_erase_region *regions; struct spi_nor_erase_region uniform_region; struct spi_nor_erase_type erase_type[SNOR_ERASE_TYPE_MAX]; u8 uniform_erase_type; }; /** * struct spi_nor_hwcaps - Structure for describing the hardware capabilies * supported by the SPI controller (bus master). * @mask: the bitmask listing all the supported hw capabilies */ struct spi_nor_hwcaps { u32 mask; }; /* *(Fast) Read capabilities. * MUST be ordered by priority: the higher bit position, the higher priority. * As a matter of performances, it is relevant to use Octal SPI protocols first, * then Quad SPI protocols before Dual SPI protocols, Fast Read and lastly * (Slow) Read. */ #define SNOR_HWCAPS_READ_MASK GENMASK(14, 0) #define SNOR_HWCAPS_READ BIT(0) #define SNOR_HWCAPS_READ_FAST BIT(1) #define SNOR_HWCAPS_READ_1_1_1_DTR BIT(2) #define SNOR_HWCAPS_READ_DUAL GENMASK(6, 3) #define SNOR_HWCAPS_READ_1_1_2 BIT(3) #define SNOR_HWCAPS_READ_1_2_2 BIT(4) #define SNOR_HWCAPS_READ_2_2_2 BIT(5) #define SNOR_HWCAPS_READ_1_2_2_DTR BIT(6) #define SNOR_HWCAPS_READ_QUAD GENMASK(10, 7) #define SNOR_HWCAPS_READ_1_1_4 BIT(7) #define SNOR_HWCAPS_READ_1_4_4 BIT(8) #define SNOR_HWCAPS_READ_4_4_4 BIT(9) #define SNOR_HWCAPS_READ_1_4_4_DTR BIT(10) #define SNOR_HWCAPS_READ_OCTAL GENMASK(14, 11) #define SNOR_HWCAPS_READ_1_1_8 BIT(11) #define SNOR_HWCAPS_READ_1_8_8 BIT(12) #define SNOR_HWCAPS_READ_8_8_8 BIT(13) #define SNOR_HWCAPS_READ_1_8_8_DTR BIT(14) /* * Page Program capabilities. * MUST be ordered by priority: the higher bit position, the higher priority. * Like (Fast) Read capabilities, Octal/Quad SPI protocols are preferred to the * legacy SPI 1-1-1 protocol. * Note that Dual Page Programs are not supported because there is no existing * JEDEC/SFDP standard to define them. Also at this moment no SPI flash memory * implements such commands. */ #define SNOR_HWCAPS_PP_MASK GENMASK(22, 16) #define SNOR_HWCAPS_PP BIT(16) #define SNOR_HWCAPS_PP_QUAD GENMASK(19, 17) #define SNOR_HWCAPS_PP_1_1_4 BIT(17) #define SNOR_HWCAPS_PP_1_4_4 BIT(18) #define SNOR_HWCAPS_PP_4_4_4 BIT(19) #define SNOR_HWCAPS_PP_OCTAL GENMASK(22, 20) #define SNOR_HWCAPS_PP_1_1_8 BIT(20) #define SNOR_HWCAPS_PP_1_8_8 BIT(21) #define SNOR_HWCAPS_PP_8_8_8 BIT(22) #define SNOR_HWCAPS_X_X_X (SNOR_HWCAPS_READ_2_2_2 | \ SNOR_HWCAPS_READ_4_4_4 | \ SNOR_HWCAPS_READ_8_8_8 | \ SNOR_HWCAPS_PP_4_4_4 | \ SNOR_HWCAPS_PP_8_8_8) #define SNOR_HWCAPS_DTR (SNOR_HWCAPS_READ_1_1_1_DTR | \ SNOR_HWCAPS_READ_1_2_2_DTR | \ SNOR_HWCAPS_READ_1_4_4_DTR | \ SNOR_HWCAPS_READ_1_8_8_DTR) #define SNOR_HWCAPS_ALL (SNOR_HWCAPS_READ_MASK | \ SNOR_HWCAPS_PP_MASK) struct spi_nor_read_command { u8 num_mode_clocks; u8 num_wait_states; u8 opcode; enum spi_nor_protocol proto; }; struct spi_nor_pp_command { u8 opcode; enum spi_nor_protocol proto; }; enum spi_nor_read_command_index { SNOR_CMD_READ, SNOR_CMD_READ_FAST, SNOR_CMD_READ_1_1_1_DTR, /* Dual SPI */ SNOR_CMD_READ_1_1_2, SNOR_CMD_READ_1_2_2, SNOR_CMD_READ_2_2_2, SNOR_CMD_READ_1_2_2_DTR, /* Quad SPI */ SNOR_CMD_READ_1_1_4, SNOR_CMD_READ_1_4_4, SNOR_CMD_READ_4_4_4, SNOR_CMD_READ_1_4_4_DTR, /* Octal SPI */ SNOR_CMD_READ_1_1_8, SNOR_CMD_READ_1_8_8, SNOR_CMD_READ_8_8_8, SNOR_CMD_READ_1_8_8_DTR, SNOR_CMD_READ_MAX }; enum spi_nor_pp_command_index { SNOR_CMD_PP, /* Quad SPI */ SNOR_CMD_PP_1_1_4, SNOR_CMD_PP_1_4_4, SNOR_CMD_PP_4_4_4, /* Octal SPI */ SNOR_CMD_PP_1_1_8, SNOR_CMD_PP_1_8_8, SNOR_CMD_PP_8_8_8, SNOR_CMD_PP_MAX }; /* Forward declaration that will be used in 'struct spi_nor_flash_parameter' */ struct spi_nor; /** * struct spi_nor_locking_ops - SPI NOR locking methods * @lock: lock a region of the SPI NOR. * @unlock: unlock a region of the SPI NOR. * @is_locked: check if a region of the SPI NOR is completely locked */ struct spi_nor_locking_ops { int (*lock)(struct spi_nor *nor, loff_t ofs, uint64_t len); int (*unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len); int (*is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len); }; /** * struct spi_nor_flash_parameter - SPI NOR flash parameters and settings. * Includes legacy flash parameters and settings that can be overwritten * by the spi_nor_fixups hooks, or dynamically when parsing the JESD216 * Serial Flash Discoverable Parameters (SFDP) tables. * * @size: the flash memory density in bytes. * @page_size: the page size of the SPI NOR flash memory. * @hwcaps: describes the read and page program hardware * capabilities. * @reads: read capabilities ordered by priority: the higher index * in the array, the higher priority. * @page_programs: page program capabilities ordered by priority: the * higher index in the array, the higher priority. * @erase_map: the erase map parsed from the SFDP Sector Map Parameter * Table. * @quad_enable: enables SPI NOR quad mode. * @set_4byte: puts the SPI NOR in 4 byte addressing mode. * @convert_addr: converts an absolute address into something the flash * will understand. Particularly useful when pagesize is * not a power-of-2. * @setup: configures the SPI NOR memory. Useful for SPI NOR * flashes that have peculiarities to the SPI NOR standard * e.g. different opcodes, specific address calculation, * page size, etc. * @locking_ops: SPI NOR locking methods. */ struct spi_nor_flash_parameter { u64 size; u32 page_size; struct spi_nor_hwcaps hwcaps; struct spi_nor_read_command reads[SNOR_CMD_READ_MAX]; struct spi_nor_pp_command page_programs[SNOR_CMD_PP_MAX]; struct spi_nor_erase_map erase_map; int (*quad_enable)(struct spi_nor *nor); int (*set_4byte)(struct spi_nor *nor, bool enable); u32 (*convert_addr)(struct spi_nor *nor, u32 addr); int (*setup)(struct spi_nor *nor, const struct spi_nor_hwcaps *hwcaps); const struct spi_nor_locking_ops *locking_ops; }; /** * struct flash_info - Forward declaration of a structure used internally by * spi_nor_scan() */ struct flash_info; /** * struct spi_nor - Structure for defining a the SPI NOR layer * @mtd: point to a mtd_info structure * @lock: the lock for the read/write/erase/lock/unlock operations * @dev: point to a spi device, or a spi nor controller device. * @spimem: point to the spi mem device * @bouncebuf: bounce buffer used when the buffer passed by the MTD * layer is not DMA-able * @bouncebuf_size: size of the bounce buffer * @info: spi-nor part JDEC MFR id and other info * @page_size: the page size of the SPI NOR * @addr_width: number of address bytes * @erase_opcode: the opcode for erasing a sector * @read_opcode: the read opcode * @read_dummy: the dummy needed by the read operation * @program_opcode: the program opcode * @sst_write_second: used by the SST write operation * @flags: flag options for the current SPI-NOR (SNOR_F_*) * @read_proto: the SPI protocol for read operations * @write_proto: the SPI protocol for write operations * @reg_proto the SPI protocol for read_reg/write_reg/erase operations * @prepare: [OPTIONAL] do some preparations for the * read/write/erase/lock/unlock operations * @unprepare: [OPTIONAL] do some post work after the * read/write/erase/lock/unlock operations * @read_reg: [DRIVER-SPECIFIC] read out the register * @write_reg: [DRIVER-SPECIFIC] write data to the register * @read: [DRIVER-SPECIFIC] read data from the SPI NOR * @write: [DRIVER-SPECIFIC] write data to the SPI NOR * @erase: [DRIVER-SPECIFIC] erase a sector of the SPI NOR * at the offset @offs; if not provided by the driver, * spi-nor will send the erase opcode via write_reg() * @clear_sr_bp: [FLASH-SPECIFIC] clears the Block Protection Bits from * the SPI NOR Status Register. * @params: [FLASH-SPECIFIC] SPI-NOR flash parameters and settings. * The structure includes legacy flash parameters and * settings that can be overwritten by the spi_nor_fixups * hooks, or dynamically when parsing the SFDP tables. * @priv: the private data */ struct spi_nor { struct mtd_info mtd; struct mutex lock; struct device *dev; struct spi_mem *spimem; u8 *bouncebuf; size_t bouncebuf_size; const struct flash_info *info; u32 page_size; u8 addr_width; u8 erase_opcode; u8 read_opcode; u8 read_dummy; u8 program_opcode; enum spi_nor_protocol read_proto; enum spi_nor_protocol write_proto; enum spi_nor_protocol reg_proto; bool sst_write_second; u32 flags; int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops); void (*unprepare)(struct spi_nor *nor, enum spi_nor_ops ops); int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len); int (*write_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len); ssize_t (*read)(struct spi_nor *nor, loff_t from, size_t len, u_char *read_buf); ssize_t (*write)(struct spi_nor *nor, loff_t to, size_t len, const u_char *write_buf); int (*erase)(struct spi_nor *nor, loff_t offs); int (*clear_sr_bp)(struct spi_nor *nor); struct spi_nor_flash_parameter params; void *priv; }; static u64 __maybe_unused spi_nor_region_is_last(const struct spi_nor_erase_region *region) { return region->offset & SNOR_LAST_REGION; } static u64 __maybe_unused spi_nor_region_end(const struct spi_nor_erase_region *region) { return (region->offset & ~SNOR_ERASE_FLAGS_MASK) + region->size; } static void __maybe_unused spi_nor_region_mark_end(struct spi_nor_erase_region *region) { region->offset |= SNOR_LAST_REGION; } static void __maybe_unused spi_nor_region_mark_overlay(struct spi_nor_erase_region *region) { region->offset |= SNOR_OVERLAID_REGION; } static bool __maybe_unused spi_nor_has_uniform_erase(const struct spi_nor *nor) { return !!nor->params.erase_map.uniform_erase_type; } static inline void spi_nor_set_flash_node(struct spi_nor *nor, struct device_node *np) { mtd_set_of_node(&nor->mtd, np); } static inline struct device_node *spi_nor_get_flash_node(struct spi_nor *nor) { return mtd_get_of_node(&nor->mtd); } /** * spi_nor_scan() - scan the SPI NOR * @nor: the spi_nor structure * @name: the chip type name * @hwcaps: the hardware capabilities supported by the controller driver * * The drivers can use this fuction to scan the SPI NOR. * In the scanning, it will try to get all the necessary information to * fill the mtd_info{} and the spi_nor{}. * * The chip type name can be provided through the @name parameter. * * Return: 0 for success, others for failure. */ int spi_nor_scan(struct spi_nor *nor, const char *name, const struct spi_nor_hwcaps *hwcaps); /** * spi_nor_restore_addr_mode() - restore the status of SPI NOR * @nor: the spi_nor structure */ void spi_nor_restore(struct spi_nor *nor); #endif mtd/ftl.h 0000644 00000004767 14722070374 0006312 0 ustar 00 /* * Derived from (and probably identical to): * ftl.h 1.7 1999/10/25 20:23:17 * * The contents of this file are subject to the Mozilla Public License * Version 1.1 (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License * at http://www.mozilla.org/MPL/ * * Software distributed under the License is distributed on an "AS IS" * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See * the License for the specific language governing rights and * limitations under the License. * * The initial developer of the original code is David A. Hinds * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds * are Copyright (C) 1999 David A. Hinds. All Rights Reserved. * * Alternatively, the contents of this file may be used under the * terms of the GNU General Public License version 2 (the "GPL"), in * which case the provisions of the GPL are applicable instead of the * above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use * your version of this file under the MPL, indicate your decision by * deleting the provisions above and replace them with the notice and * other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file * under either the MPL or the GPL. */ #ifndef _LINUX_FTL_H #define _LINUX_FTL_H typedef struct erase_unit_header_t { uint8_t LinkTargetTuple[5]; uint8_t DataOrgTuple[10]; uint8_t NumTransferUnits; uint32_t EraseCount; uint16_t LogicalEUN; uint8_t BlockSize; uint8_t EraseUnitSize; uint16_t FirstPhysicalEUN; uint16_t NumEraseUnits; uint32_t FormattedSize; uint32_t FirstVMAddress; uint16_t NumVMPages; uint8_t Flags; uint8_t Code; uint32_t SerialNumber; uint32_t AltEUHOffset; uint32_t BAMOffset; uint8_t Reserved[12]; uint8_t EndTuple[2]; } erase_unit_header_t; /* Flags in erase_unit_header_t */ #define HIDDEN_AREA 0x01 #define REVERSE_POLARITY 0x02 #define DOUBLE_BAI 0x04 /* Definitions for block allocation information */ #define BLOCK_FREE(b) ((b) == 0xffffffff) #define BLOCK_DELETED(b) (((b) == 0) || ((b) == 0xfffffffe)) #define BLOCK_TYPE(b) ((b) & 0x7f) #define BLOCK_ADDRESS(b) ((b) & ~0x7f) #define BLOCK_NUMBER(b) ((b) >> 9) #define BLOCK_CONTROL 0x30 #define BLOCK_DATA 0x40 #define BLOCK_REPLACEMENT 0x60 #define BLOCK_BAD 0x70 #endif /* _LINUX_FTL_H */ mtd/lpc32xx_slc.h 0000644 00000000601 14722070374 0007650 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Platform data for LPC32xx SoC SLC NAND controller * * Copyright © 2012 Roland Stigge */ #ifndef __LINUX_MTD_LPC32XX_SLC_H #define __LINUX_MTD_LPC32XX_SLC_H #include <linux/dmaengine.h> struct lpc32xx_slc_platform_data { bool (*dma_filter)(struct dma_chan *chan, void *filter_param); }; #endif /* __LINUX_MTD_LPC32XX_SLC_H */ mtd/onfi.h 0000644 00000010660 14722070374 0006445 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> * Steven J. Hill <sjhill@realitydiluted.com> * Thomas Gleixner <tglx@linutronix.de> * * Contains all ONFI related definitions */ #ifndef __LINUX_MTD_ONFI_H #define __LINUX_MTD_ONFI_H #include <linux/types.h> /* ONFI version bits */ #define ONFI_VERSION_1_0 BIT(1) #define ONFI_VERSION_2_0 BIT(2) #define ONFI_VERSION_2_1 BIT(3) #define ONFI_VERSION_2_2 BIT(4) #define ONFI_VERSION_2_3 BIT(5) #define ONFI_VERSION_3_0 BIT(6) #define ONFI_VERSION_3_1 BIT(7) #define ONFI_VERSION_3_2 BIT(8) #define ONFI_VERSION_4_0 BIT(9) /* ONFI features */ #define ONFI_FEATURE_16_BIT_BUS (1 << 0) #define ONFI_FEATURE_EXT_PARAM_PAGE (1 << 7) /* ONFI timing mode, used in both asynchronous and synchronous mode */ #define ONFI_TIMING_MODE_0 (1 << 0) #define ONFI_TIMING_MODE_1 (1 << 1) #define ONFI_TIMING_MODE_2 (1 << 2) #define ONFI_TIMING_MODE_3 (1 << 3) #define ONFI_TIMING_MODE_4 (1 << 4) #define ONFI_TIMING_MODE_5 (1 << 5) #define ONFI_TIMING_MODE_UNKNOWN (1 << 6) /* ONFI feature number/address */ #define ONFI_FEATURE_NUMBER 256 #define ONFI_FEATURE_ADDR_TIMING_MODE 0x1 /* Vendor-specific feature address (Micron) */ #define ONFI_FEATURE_ADDR_READ_RETRY 0x89 #define ONFI_FEATURE_ON_DIE_ECC 0x90 #define ONFI_FEATURE_ON_DIE_ECC_EN BIT(3) /* ONFI subfeature parameters length */ #define ONFI_SUBFEATURE_PARAM_LEN 4 /* ONFI optional commands SET/GET FEATURES supported? */ #define ONFI_OPT_CMD_SET_GET_FEATURES (1 << 2) struct nand_onfi_params { /* rev info and features block */ /* 'O' 'N' 'F' 'I' */ u8 sig[4]; __le16 revision; __le16 features; __le16 opt_cmd; u8 reserved0[2]; __le16 ext_param_page_length; /* since ONFI 2.1 */ u8 num_of_param_pages; /* since ONFI 2.1 */ u8 reserved1[17]; /* manufacturer information block */ char manufacturer[12]; char model[20]; u8 jedec_id; __le16 date_code; u8 reserved2[13]; /* memory organization block */ __le32 byte_per_page; __le16 spare_bytes_per_page; __le32 data_bytes_per_ppage; __le16 spare_bytes_per_ppage; __le32 pages_per_block; __le32 blocks_per_lun; u8 lun_count; u8 addr_cycles; u8 bits_per_cell; __le16 bb_per_lun; __le16 block_endurance; u8 guaranteed_good_blocks; __le16 guaranteed_block_endurance; u8 programs_per_page; u8 ppage_attr; u8 ecc_bits; u8 interleaved_bits; u8 interleaved_ops; u8 reserved3[13]; /* electrical parameter block */ u8 io_pin_capacitance_max; __le16 async_timing_mode; __le16 program_cache_timing_mode; __le16 t_prog; __le16 t_bers; __le16 t_r; __le16 t_ccs; __le16 src_sync_timing_mode; u8 src_ssync_features; __le16 clk_pin_capacitance_typ; __le16 io_pin_capacitance_typ; __le16 input_pin_capacitance_typ; u8 input_pin_capacitance_max; u8 driver_strength_support; __le16 t_int_r; __le16 t_adl; u8 reserved4[8]; /* vendor */ __le16 vendor_revision; u8 vendor[88]; __le16 crc; } __packed; #define ONFI_CRC_BASE 0x4F4E /* Extended ECC information Block Definition (since ONFI 2.1) */ struct onfi_ext_ecc_info { u8 ecc_bits; u8 codeword_size; __le16 bb_per_lun; __le16 block_endurance; u8 reserved[2]; } __packed; #define ONFI_SECTION_TYPE_0 0 /* Unused section. */ #define ONFI_SECTION_TYPE_1 1 /* for additional sections. */ #define ONFI_SECTION_TYPE_2 2 /* for ECC information. */ struct onfi_ext_section { u8 type; u8 length; } __packed; #define ONFI_EXT_SECTION_MAX 8 /* Extended Parameter Page Definition (since ONFI 2.1) */ struct onfi_ext_param_page { __le16 crc; u8 sig[4]; /* 'E' 'P' 'P' 'S' */ u8 reserved0[10]; struct onfi_ext_section sections[ONFI_EXT_SECTION_MAX]; /* * The actual size of the Extended Parameter Page is in * @ext_param_page_length of nand_onfi_params{}. * The following are the variable length sections. * So we do not add any fields below. Please see the ONFI spec. */ } __packed; /** * struct onfi_params - ONFI specific parameters that will be reused * @version: ONFI version (BCD encoded), 0 if ONFI is not supported * @tPROG: Page program time * @tBERS: Block erase time * @tR: Page read time * @tCCS: Change column setup time * @async_timing_mode: Supported asynchronous timing mode * @vendor_revision: Vendor specific revision number * @vendor: Vendor specific data */ struct onfi_params { int version; u16 tPROG; u16 tBERS; u16 tR; u16 tCCS; u16 async_timing_mode; u16 vendor_revision; u8 vendor[88]; }; #endif /* __LINUX_MTD_ONFI_H */ mtd/physmap.h 0000644 00000001450 14722070374 0007170 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * For boards with physically mapped flash and using * drivers/mtd/maps/physmap.c mapping driver. * * Copyright (C) 2003 MontaVista Software Inc. * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net */ #ifndef __LINUX_MTD_PHYSMAP__ #define __LINUX_MTD_PHYSMAP__ #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> struct map_info; struct platform_device; struct physmap_flash_data { unsigned int width; int (*init)(struct platform_device *); void (*exit)(struct platform_device *); void (*set_vpp)(struct platform_device *, int); unsigned int nr_parts; unsigned int pfow_base; char *probe_type; struct mtd_partition *parts; const char * const *part_probe_types; }; #endif /* __LINUX_MTD_PHYSMAP__ */ mtd/spinand.h 0000644 00000034765 14722070374 0007162 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2016-2017 Micron Technology, Inc. * * Authors: * Peter Pan <peterpandong@micron.com> */ #ifndef __LINUX_MTD_SPINAND_H #define __LINUX_MTD_SPINAND_H #include <linux/mutex.h> #include <linux/bitops.h> #include <linux/device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/spi/spi.h> #include <linux/spi/spi-mem.h> /** * Standard SPI NAND flash operations */ #define SPINAND_RESET_OP \ SPI_MEM_OP(SPI_MEM_OP_CMD(0xff, 1), \ SPI_MEM_OP_NO_ADDR, \ SPI_MEM_OP_NO_DUMMY, \ SPI_MEM_OP_NO_DATA) #define SPINAND_WR_EN_DIS_OP(enable) \ SPI_MEM_OP(SPI_MEM_OP_CMD((enable) ? 0x06 : 0x04, 1), \ SPI_MEM_OP_NO_ADDR, \ SPI_MEM_OP_NO_DUMMY, \ SPI_MEM_OP_NO_DATA) #define SPINAND_READID_OP(ndummy, buf, len) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0x9f, 1), \ SPI_MEM_OP_NO_ADDR, \ SPI_MEM_OP_DUMMY(ndummy, 1), \ SPI_MEM_OP_DATA_IN(len, buf, 1)) #define SPINAND_SET_FEATURE_OP(reg, valptr) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0x1f, 1), \ SPI_MEM_OP_ADDR(1, reg, 1), \ SPI_MEM_OP_NO_DUMMY, \ SPI_MEM_OP_DATA_OUT(1, valptr, 1)) #define SPINAND_GET_FEATURE_OP(reg, valptr) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0x0f, 1), \ SPI_MEM_OP_ADDR(1, reg, 1), \ SPI_MEM_OP_NO_DUMMY, \ SPI_MEM_OP_DATA_IN(1, valptr, 1)) #define SPINAND_BLK_ERASE_OP(addr) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0xd8, 1), \ SPI_MEM_OP_ADDR(3, addr, 1), \ SPI_MEM_OP_NO_DUMMY, \ SPI_MEM_OP_NO_DATA) #define SPINAND_PAGE_READ_OP(addr) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0x13, 1), \ SPI_MEM_OP_ADDR(3, addr, 1), \ SPI_MEM_OP_NO_DUMMY, \ SPI_MEM_OP_NO_DATA) #define SPINAND_PAGE_READ_FROM_CACHE_OP(fast, addr, ndummy, buf, len) \ SPI_MEM_OP(SPI_MEM_OP_CMD(fast ? 0x0b : 0x03, 1), \ SPI_MEM_OP_ADDR(2, addr, 1), \ SPI_MEM_OP_DUMMY(ndummy, 1), \ SPI_MEM_OP_DATA_IN(len, buf, 1)) #define SPINAND_PAGE_READ_FROM_CACHE_OP_3A(fast, addr, ndummy, buf, len) \ SPI_MEM_OP(SPI_MEM_OP_CMD(fast ? 0x0b : 0x03, 1), \ SPI_MEM_OP_ADDR(3, addr, 1), \ SPI_MEM_OP_DUMMY(ndummy, 1), \ SPI_MEM_OP_DATA_IN(len, buf, 1)) #define SPINAND_PAGE_READ_FROM_CACHE_X2_OP(addr, ndummy, buf, len) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \ SPI_MEM_OP_ADDR(2, addr, 1), \ SPI_MEM_OP_DUMMY(ndummy, 1), \ SPI_MEM_OP_DATA_IN(len, buf, 2)) #define SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(addr, ndummy, buf, len) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \ SPI_MEM_OP_ADDR(3, addr, 1), \ SPI_MEM_OP_DUMMY(ndummy, 1), \ SPI_MEM_OP_DATA_IN(len, buf, 2)) #define SPINAND_PAGE_READ_FROM_CACHE_X4_OP(addr, ndummy, buf, len) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \ SPI_MEM_OP_ADDR(2, addr, 1), \ SPI_MEM_OP_DUMMY(ndummy, 1), \ SPI_MEM_OP_DATA_IN(len, buf, 4)) #define SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(addr, ndummy, buf, len) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \ SPI_MEM_OP_ADDR(3, addr, 1), \ SPI_MEM_OP_DUMMY(ndummy, 1), \ SPI_MEM_OP_DATA_IN(len, buf, 4)) #define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(addr, ndummy, buf, len) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \ SPI_MEM_OP_ADDR(2, addr, 2), \ SPI_MEM_OP_DUMMY(ndummy, 2), \ SPI_MEM_OP_DATA_IN(len, buf, 2)) #define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP_3A(addr, ndummy, buf, len) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \ SPI_MEM_OP_ADDR(3, addr, 2), \ SPI_MEM_OP_DUMMY(ndummy, 2), \ SPI_MEM_OP_DATA_IN(len, buf, 2)) #define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(addr, ndummy, buf, len) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \ SPI_MEM_OP_ADDR(2, addr, 4), \ SPI_MEM_OP_DUMMY(ndummy, 4), \ SPI_MEM_OP_DATA_IN(len, buf, 4)) #define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP_3A(addr, ndummy, buf, len) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \ SPI_MEM_OP_ADDR(3, addr, 4), \ SPI_MEM_OP_DUMMY(ndummy, 4), \ SPI_MEM_OP_DATA_IN(len, buf, 4)) #define SPINAND_PROG_EXEC_OP(addr) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0x10, 1), \ SPI_MEM_OP_ADDR(3, addr, 1), \ SPI_MEM_OP_NO_DUMMY, \ SPI_MEM_OP_NO_DATA) #define SPINAND_PROG_LOAD(reset, addr, buf, len) \ SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x02 : 0x84, 1), \ SPI_MEM_OP_ADDR(2, addr, 1), \ SPI_MEM_OP_NO_DUMMY, \ SPI_MEM_OP_DATA_OUT(len, buf, 1)) #define SPINAND_PROG_LOAD_X4(reset, addr, buf, len) \ SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x32 : 0x34, 1), \ SPI_MEM_OP_ADDR(2, addr, 1), \ SPI_MEM_OP_NO_DUMMY, \ SPI_MEM_OP_DATA_OUT(len, buf, 4)) /** * Standard SPI NAND flash commands */ #define SPINAND_CMD_PROG_LOAD_X4 0x32 #define SPINAND_CMD_PROG_LOAD_RDM_DATA_X4 0x34 /* feature register */ #define REG_BLOCK_LOCK 0xa0 #define BL_ALL_UNLOCKED 0x00 /* configuration register */ #define REG_CFG 0xb0 #define CFG_OTP_ENABLE BIT(6) #define CFG_ECC_ENABLE BIT(4) #define CFG_QUAD_ENABLE BIT(0) /* status register */ #define REG_STATUS 0xc0 #define STATUS_BUSY BIT(0) #define STATUS_ERASE_FAILED BIT(2) #define STATUS_PROG_FAILED BIT(3) #define STATUS_ECC_MASK GENMASK(5, 4) #define STATUS_ECC_NO_BITFLIPS (0 << 4) #define STATUS_ECC_HAS_BITFLIPS (1 << 4) #define STATUS_ECC_UNCOR_ERROR (2 << 4) struct spinand_op; struct spinand_device; #define SPINAND_MAX_ID_LEN 4 /** * struct spinand_id - SPI NAND id structure * @data: buffer containing the id bytes. Currently 4 bytes large, but can * be extended if required * @len: ID length * * struct_spinand_id->data contains all bytes returned after a READ_ID command, * including dummy bytes if the chip does not emit ID bytes right after the * READ_ID command. The responsibility to extract real ID bytes is left to * struct_manufacurer_ops->detect(). */ struct spinand_id { u8 data[SPINAND_MAX_ID_LEN]; int len; }; /** * struct manufacurer_ops - SPI NAND manufacturer specific operations * @detect: detect a SPI NAND device. Every time a SPI NAND device is probed * the core calls the struct_manufacurer_ops->detect() hook of each * registered manufacturer until one of them return 1. Note that * the first thing to check in this hook is that the manufacturer ID * in struct_spinand_device->id matches the manufacturer whose * ->detect() hook has been called. Should return 1 if there's a * match, 0 if the manufacturer ID does not match and a negative * error code otherwise. When true is returned, the core assumes * that properties of the NAND chip (spinand->base.memorg and * spinand->base.eccreq) have been filled * @init: initialize a SPI NAND device * @cleanup: cleanup a SPI NAND device * * Each SPI NAND manufacturer driver should implement this interface so that * NAND chips coming from this vendor can be detected and initialized properly. */ struct spinand_manufacturer_ops { int (*detect)(struct spinand_device *spinand); int (*init)(struct spinand_device *spinand); void (*cleanup)(struct spinand_device *spinand); }; /** * struct spinand_manufacturer - SPI NAND manufacturer instance * @id: manufacturer ID * @name: manufacturer name * @ops: manufacturer operations */ struct spinand_manufacturer { u8 id; char *name; const struct spinand_manufacturer_ops *ops; }; /* SPI NAND manufacturers */ extern const struct spinand_manufacturer gigadevice_spinand_manufacturer; extern const struct spinand_manufacturer macronix_spinand_manufacturer; extern const struct spinand_manufacturer micron_spinand_manufacturer; extern const struct spinand_manufacturer paragon_spinand_manufacturer; extern const struct spinand_manufacturer toshiba_spinand_manufacturer; extern const struct spinand_manufacturer winbond_spinand_manufacturer; /** * struct spinand_op_variants - SPI NAND operation variants * @ops: the list of variants for a given operation * @nops: the number of variants * * Some operations like read-from-cache/write-to-cache have several variants * depending on the number of IO lines you use to transfer data or address * cycles. This structure is a way to describe the different variants supported * by a chip and let the core pick the best one based on the SPI mem controller * capabilities. */ struct spinand_op_variants { const struct spi_mem_op *ops; unsigned int nops; }; #define SPINAND_OP_VARIANTS(name, ...) \ const struct spinand_op_variants name = { \ .ops = (struct spi_mem_op[]) { __VA_ARGS__ }, \ .nops = sizeof((struct spi_mem_op[]){ __VA_ARGS__ }) / \ sizeof(struct spi_mem_op), \ } /** * spinand_ecc_info - description of the on-die ECC implemented by a SPI NAND * chip * @get_status: get the ECC status. Should return a positive number encoding * the number of corrected bitflips if correction was possible or * -EBADMSG if there are uncorrectable errors. I can also return * other negative error codes if the error is not caused by * uncorrectable bitflips * @ooblayout: the OOB layout used by the on-die ECC implementation */ struct spinand_ecc_info { int (*get_status)(struct spinand_device *spinand, u8 status); const struct mtd_ooblayout_ops *ooblayout; }; #define SPINAND_HAS_QE_BIT BIT(0) /** * struct spinand_info - Structure used to describe SPI NAND chips * @model: model name * @devid: device ID * @flags: OR-ing of the SPINAND_XXX flags * @memorg: memory organization * @eccreq: ECC requirements * @eccinfo: on-die ECC info * @op_variants: operations variants * @op_variants.read_cache: variants of the read-cache operation * @op_variants.write_cache: variants of the write-cache operation * @op_variants.update_cache: variants of the update-cache operation * @select_target: function used to select a target/die. Required only for * multi-die chips * * Each SPI NAND manufacturer driver should have a spinand_info table * describing all the chips supported by the driver. */ struct spinand_info { const char *model; u16 devid; u32 flags; struct nand_memory_organization memorg; struct nand_ecc_req eccreq; struct spinand_ecc_info eccinfo; struct { const struct spinand_op_variants *read_cache; const struct spinand_op_variants *write_cache; const struct spinand_op_variants *update_cache; } op_variants; int (*select_target)(struct spinand_device *spinand, unsigned int target); }; #define SPINAND_INFO_OP_VARIANTS(__read, __write, __update) \ { \ .read_cache = __read, \ .write_cache = __write, \ .update_cache = __update, \ } #define SPINAND_ECCINFO(__ooblayout, __get_status) \ .eccinfo = { \ .ooblayout = __ooblayout, \ .get_status = __get_status, \ } #define SPINAND_SELECT_TARGET(__func) \ .select_target = __func, #define SPINAND_INFO(__model, __id, __memorg, __eccreq, __op_variants, \ __flags, ...) \ { \ .model = __model, \ .devid = __id, \ .memorg = __memorg, \ .eccreq = __eccreq, \ .op_variants = __op_variants, \ .flags = __flags, \ __VA_ARGS__ \ } struct spinand_dirmap { struct spi_mem_dirmap_desc *wdesc; struct spi_mem_dirmap_desc *rdesc; }; /** * struct spinand_device - SPI NAND device instance * @base: NAND device instance * @spimem: pointer to the SPI mem object * @lock: lock used to serialize accesses to the NAND * @id: NAND ID as returned by READ_ID * @flags: NAND flags * @op_templates: various SPI mem op templates * @op_templates.read_cache: read cache op template * @op_templates.write_cache: write cache op template * @op_templates.update_cache: update cache op template * @select_target: select a specific target/die. Usually called before sending * a command addressing a page or an eraseblock embedded in * this die. Only required if your chip exposes several dies * @cur_target: currently selected target/die * @eccinfo: on-die ECC information * @cfg_cache: config register cache. One entry per die * @databuf: bounce buffer for data * @oobbuf: bounce buffer for OOB data * @scratchbuf: buffer used for everything but page accesses. This is needed * because the spi-mem interface explicitly requests that buffers * passed in spi_mem_op be DMA-able, so we can't based the bufs on * the stack * @manufacturer: SPI NAND manufacturer information * @priv: manufacturer private data */ struct spinand_device { struct nand_device base; struct spi_mem *spimem; struct mutex lock; struct spinand_id id; u32 flags; struct { const struct spi_mem_op *read_cache; const struct spi_mem_op *write_cache; const struct spi_mem_op *update_cache; } op_templates; struct spinand_dirmap *dirmaps; int (*select_target)(struct spinand_device *spinand, unsigned int target); unsigned int cur_target; struct spinand_ecc_info eccinfo; u8 *cfg_cache; u8 *databuf; u8 *oobbuf; u8 *scratchbuf; const struct spinand_manufacturer *manufacturer; void *priv; }; /** * mtd_to_spinand() - Get the SPI NAND device attached to an MTD instance * @mtd: MTD instance * * Return: the SPI NAND device attached to @mtd. */ static inline struct spinand_device *mtd_to_spinand(struct mtd_info *mtd) { return container_of(mtd_to_nanddev(mtd), struct spinand_device, base); } /** * spinand_to_mtd() - Get the MTD device embedded in a SPI NAND device * @spinand: SPI NAND device * * Return: the MTD device embedded in @spinand. */ static inline struct mtd_info *spinand_to_mtd(struct spinand_device *spinand) { return nanddev_to_mtd(&spinand->base); } /** * nand_to_spinand() - Get the SPI NAND device embedding an NAND object * @nand: NAND object * * Return: the SPI NAND device embedding @nand. */ static inline struct spinand_device *nand_to_spinand(struct nand_device *nand) { return container_of(nand, struct spinand_device, base); } /** * spinand_to_nand() - Get the NAND device embedded in a SPI NAND object * @spinand: SPI NAND device * * Return: the NAND device embedded in @spinand. */ static inline struct nand_device * spinand_to_nand(struct spinand_device *spinand) { return &spinand->base; } /** * spinand_set_of_node - Attach a DT node to a SPI NAND device * @spinand: SPI NAND device * @np: DT node * * Attach a DT node to a SPI NAND device. */ static inline void spinand_set_of_node(struct spinand_device *spinand, struct device_node *np) { nanddev_set_of_node(&spinand->base, np); } int spinand_match_and_init(struct spinand_device *dev, const struct spinand_info *table, unsigned int table_size, u16 devid); int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val); int spinand_select_target(struct spinand_device *spinand, unsigned int target); #endif /* __LINUX_MTD_SPINAND_H */ mtd/qinfo.h 0000644 00000004743 14722070374 0006633 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_MTD_QINFO_H #define __LINUX_MTD_QINFO_H #include <linux/mtd/map.h> #include <linux/wait.h> #include <linux/spinlock.h> #include <linux/delay.h> #include <linux/mtd/mtd.h> #include <linux/mtd/flashchip.h> #include <linux/mtd/partitions.h> /* lpddr_private describes lpddr flash chip in memory map * @ManufactId - Chip Manufacture ID * @DevId - Chip Device ID * @qinfo - pointer to qinfo records describing the chip * @numchips - number of chips including virual RWW partitions * @chipshift - Chip/partition size 2^chipshift * @chips - per-chip data structure */ struct lpddr_private { uint16_t ManufactId; uint16_t DevId; struct qinfo_chip *qinfo; int numchips; unsigned long chipshift; struct flchip chips[0]; }; /* qinfo_query_info structure contains request information for * each qinfo record * @major - major number of qinfo record * @major - minor number of qinfo record * @id_str - descriptive string to access the record * @desc - detailed description for the qinfo record */ struct qinfo_query_info { uint8_t major; uint8_t minor; char *id_str; char *desc; }; /* * qinfo_chip structure contains necessary qinfo records data * @DevSizeShift - Device size 2^n bytes * @BufSizeShift - Program buffer size 2^n bytes * @TotalBlocksNum - Total number of blocks * @UniformBlockSizeShift - Uniform block size 2^UniformBlockSizeShift bytes * @HWPartsNum - Number of hardware partitions * @SuspEraseSupp - Suspend erase supported * @SingleWordProgTime - Single word program 2^SingleWordProgTime u-sec * @ProgBufferTime - Program buffer write 2^ProgBufferTime u-sec * @BlockEraseTime - Block erase 2^BlockEraseTime m-sec */ struct qinfo_chip { /* General device info */ uint16_t DevSizeShift; uint16_t BufSizeShift; /* Erase block information */ uint16_t TotalBlocksNum; uint16_t UniformBlockSizeShift; /* Partition information */ uint16_t HWPartsNum; /* Optional features */ uint16_t SuspEraseSupp; /* Operation typical time */ uint16_t SingleWordProgTime; uint16_t ProgBufferTime; uint16_t BlockEraseTime; }; /* defines for fixup usage */ #define LPDDR_MFR_ANY 0xffff #define LPDDR_ID_ANY 0xffff #define NUMONYX_MFGR_ID 0x0089 #define R18_DEVICE_ID_1G 0x893c static inline map_word lpddr_build_cmd(u_long cmd, struct map_info *map) { map_word val = { {0} }; val.x[0] = cmd; return val; } #define CMD(x) lpddr_build_cmd(x, map) #define CMDVAL(cmd) cmd.x[0] struct mtd_info *lpddr_cmdset(struct map_info *); #endif mtd/bbm.h 0000644 00000011331 14722070374 0006246 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * NAND family Bad Block Management (BBM) header file * - Bad Block Table (BBT) implementation * * Copyright © 2005 Samsung Electronics * Kyungmin Park <kyungmin.park@samsung.com> * * Copyright © 2000-2005 * Thomas Gleixner <tglx@linuxtronix.de> */ #ifndef __LINUX_MTD_BBM_H #define __LINUX_MTD_BBM_H /* The maximum number of NAND chips in an array */ #define NAND_MAX_CHIPS 8 /** * struct nand_bbt_descr - bad block table descriptor * @options: options for this descriptor * @pages: the page(s) where we find the bbt, used with option BBT_ABSPAGE * when bbt is searched, then we store the found bbts pages here. * Its an array and supports up to 8 chips now * @offs: offset of the pattern in the oob area of the page * @veroffs: offset of the bbt version counter in the oob are of the page * @version: version read from the bbt page during scan * @len: length of the pattern, if 0 no pattern check is performed * @maxblocks: maximum number of blocks to search for a bbt. This number of * blocks is reserved at the end of the device where the tables are * written. * @reserved_block_code: if non-0, this pattern denotes a reserved (rather than * bad) block in the stored bbt * @pattern: pattern to identify bad block table or factory marked good / * bad blocks, can be NULL, if len = 0 * * Descriptor for the bad block table marker and the descriptor for the * pattern which identifies good and bad blocks. The assumption is made * that the pattern and the version count are always located in the oob area * of the first block. */ struct nand_bbt_descr { int options; int pages[NAND_MAX_CHIPS]; int offs; int veroffs; uint8_t version[NAND_MAX_CHIPS]; int len; int maxblocks; int reserved_block_code; uint8_t *pattern; }; /* Options for the bad block table descriptors */ /* The number of bits used per block in the bbt on the device */ #define NAND_BBT_NRBITS_MSK 0x0000000F #define NAND_BBT_1BIT 0x00000001 #define NAND_BBT_2BIT 0x00000002 #define NAND_BBT_4BIT 0x00000004 #define NAND_BBT_8BIT 0x00000008 /* The bad block table is in the last good block of the device */ #define NAND_BBT_LASTBLOCK 0x00000010 /* The bbt is at the given page, else we must scan for the bbt */ #define NAND_BBT_ABSPAGE 0x00000020 /* bbt is stored per chip on multichip devices */ #define NAND_BBT_PERCHIP 0x00000080 /* bbt has a version counter at offset veroffs */ #define NAND_BBT_VERSION 0x00000100 /* Create a bbt if none exists */ #define NAND_BBT_CREATE 0x00000200 /* * Create an empty BBT with no vendor information. Vendor's information may be * unavailable, for example, if the NAND controller has a different data and OOB * layout or if this information is already purged. Must be used in conjunction * with NAND_BBT_CREATE. */ #define NAND_BBT_CREATE_EMPTY 0x00000400 /* Write bbt if neccecary */ #define NAND_BBT_WRITE 0x00002000 /* Read and write back block contents when writing bbt */ #define NAND_BBT_SAVECONTENT 0x00004000 /* * Use a flash based bad block table. By default, OOB identifier is saved in * OOB area. This option is passed to the default bad block table function. */ #define NAND_BBT_USE_FLASH 0x00020000 /* * Do not store flash based bad block table marker in the OOB area; store it * in-band. */ #define NAND_BBT_NO_OOB 0x00040000 /* * Do not write new bad block markers to OOB; useful, e.g., when ECC covers * entire spare area. Must be used with NAND_BBT_USE_FLASH. */ #define NAND_BBT_NO_OOB_BBM 0x00080000 /* * Flag set by nand_create_default_bbt_descr(), marking that the nand_bbt_descr * was allocated dynamicaly and must be freed in nand_release(). Has no meaning * in nand_chip.bbt_options. */ #define NAND_BBT_DYNAMICSTRUCT 0x80000000 /* The maximum number of blocks to scan for a bbt */ #define NAND_BBT_SCAN_MAXBLOCKS 4 /* * Bad block scanning errors */ #define ONENAND_BBT_READ_ERROR 1 #define ONENAND_BBT_READ_ECC_ERROR 2 #define ONENAND_BBT_READ_FATAL_ERROR 4 /** * struct bbm_info - [GENERIC] Bad Block Table data structure * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry * @options: options for this descriptor * @bbt: [INTERN] bad block table pointer * @isbad_bbt: function to determine if a block is bad * @badblock_pattern: [REPLACEABLE] bad block scan pattern used for * initial bad block scan * @priv: [OPTIONAL] pointer to private bbm date */ struct bbm_info { int bbt_erase_shift; int options; uint8_t *bbt; int (*isbad_bbt)(struct mtd_info *mtd, loff_t ofs, int allowbbt); /* TODO Add more NAND specific fileds */ struct nand_bbt_descr *badblock_pattern; void *priv; }; /* OneNAND BBT interface */ extern int onenand_default_bbt(struct mtd_info *mtd); #endif /* __LINUX_MTD_BBM_H */ mtd/lpc32xx_mlc.h 0000644 00000000601 14722070374 0007642 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Platform data for LPC32xx SoC MLC NAND controller * * Copyright © 2012 Roland Stigge */ #ifndef __LINUX_MTD_LPC32XX_MLC_H #define __LINUX_MTD_LPC32XX_MLC_H #include <linux/dmaengine.h> struct lpc32xx_mlc_platform_data { bool (*dma_filter)(struct dma_chan *chan, void *filter_param); }; #endif /* __LINUX_MTD_LPC32XX_MLC_H */ mtd/rawnand.h 0000644 00000130743 14722070374 0007151 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> * Steven J. Hill <sjhill@realitydiluted.com> * Thomas Gleixner <tglx@linutronix.de> * * Info: * Contains standard defines and IDs for NAND flash devices * * Changelog: * See git changelog. */ #ifndef __LINUX_MTD_RAWNAND_H #define __LINUX_MTD_RAWNAND_H #include <linux/mtd/mtd.h> #include <linux/mtd/flashchip.h> #include <linux/mtd/bbm.h> #include <linux/mtd/jedec.h> #include <linux/mtd/nand.h> #include <linux/mtd/onfi.h> #include <linux/mutex.h> #include <linux/of.h> #include <linux/types.h> struct nand_chip; /* The maximum number of NAND chips in an array */ #define NAND_MAX_CHIPS 8 /* * Constants for hardware specific CLE/ALE/NCE function * * These are bits which can be or'ed to set/clear multiple * bits in one go. */ /* Select the chip by setting nCE to low */ #define NAND_NCE 0x01 /* Select the command latch by setting CLE to high */ #define NAND_CLE 0x02 /* Select the address latch by setting ALE to high */ #define NAND_ALE 0x04 #define NAND_CTRL_CLE (NAND_NCE | NAND_CLE) #define NAND_CTRL_ALE (NAND_NCE | NAND_ALE) #define NAND_CTRL_CHANGE 0x80 /* * Standard NAND flash commands */ #define NAND_CMD_READ0 0 #define NAND_CMD_READ1 1 #define NAND_CMD_RNDOUT 5 #define NAND_CMD_PAGEPROG 0x10 #define NAND_CMD_READOOB 0x50 #define NAND_CMD_ERASE1 0x60 #define NAND_CMD_STATUS 0x70 #define NAND_CMD_SEQIN 0x80 #define NAND_CMD_RNDIN 0x85 #define NAND_CMD_READID 0x90 #define NAND_CMD_ERASE2 0xd0 #define NAND_CMD_PARAM 0xec #define NAND_CMD_GET_FEATURES 0xee #define NAND_CMD_SET_FEATURES 0xef #define NAND_CMD_RESET 0xff /* Extended commands for large page devices */ #define NAND_CMD_READSTART 0x30 #define NAND_CMD_RNDOUTSTART 0xE0 #define NAND_CMD_CACHEDPROG 0x15 #define NAND_CMD_NONE -1 /* Status bits */ #define NAND_STATUS_FAIL 0x01 #define NAND_STATUS_FAIL_N1 0x02 #define NAND_STATUS_TRUE_READY 0x20 #define NAND_STATUS_READY 0x40 #define NAND_STATUS_WP 0x80 #define NAND_DATA_IFACE_CHECK_ONLY -1 /* * Constants for ECC_MODES */ typedef enum { NAND_ECC_NONE, NAND_ECC_SOFT, NAND_ECC_HW, NAND_ECC_HW_SYNDROME, NAND_ECC_HW_OOB_FIRST, NAND_ECC_ON_DIE, } nand_ecc_modes_t; enum nand_ecc_algo { NAND_ECC_UNKNOWN, NAND_ECC_HAMMING, NAND_ECC_BCH, NAND_ECC_RS, }; /* * Constants for Hardware ECC */ /* Reset Hardware ECC for read */ #define NAND_ECC_READ 0 /* Reset Hardware ECC for write */ #define NAND_ECC_WRITE 1 /* Enable Hardware ECC before syndrome is read back from flash */ #define NAND_ECC_READSYN 2 /* * Enable generic NAND 'page erased' check. This check is only done when * ecc.correct() returns -EBADMSG. * Set this flag if your implementation does not fix bitflips in erased * pages and you want to rely on the default implementation. */ #define NAND_ECC_GENERIC_ERASED_CHECK BIT(0) #define NAND_ECC_MAXIMIZE BIT(1) /* * When using software implementation of Hamming, we can specify which byte * ordering should be used. */ #define NAND_ECC_SOFT_HAMMING_SM_ORDER BIT(2) /* * Option constants for bizarre disfunctionality and real * features. */ /* Buswidth is 16 bit */ #define NAND_BUSWIDTH_16 0x00000002 /* Chip has cache program function */ #define NAND_CACHEPRG 0x00000008 /* * Chip requires ready check on read (for auto-incremented sequential read). * True only for small page devices; large page devices do not support * autoincrement. */ #define NAND_NEED_READRDY 0x00000100 /* Chip does not allow subpage writes */ #define NAND_NO_SUBPAGE_WRITE 0x00000200 /* Device is one of 'new' xD cards that expose fake nand command set */ #define NAND_BROKEN_XD 0x00000400 /* Device behaves just like nand, but is readonly */ #define NAND_ROM 0x00000800 /* Device supports subpage reads */ #define NAND_SUBPAGE_READ 0x00001000 /* * Some MLC NANDs need data scrambling to limit bitflips caused by repeated * patterns. */ #define NAND_NEED_SCRAMBLING 0x00002000 /* Device needs 3rd row address cycle */ #define NAND_ROW_ADDR_3 0x00004000 /* Options valid for Samsung large page devices */ #define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG /* Macros to identify the above */ #define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ)) /* * There are different places where the manufacturer stores the factory bad * block markers. * * Position within the block: Each of these pages needs to be checked for a * bad block marking pattern. */ #define NAND_BBM_FIRSTPAGE 0x01000000 #define NAND_BBM_SECONDPAGE 0x02000000 #define NAND_BBM_LASTPAGE 0x04000000 /* Position within the OOB data of the page */ #define NAND_BBM_POS_SMALL 5 #define NAND_BBM_POS_LARGE 0 /* Non chip related options */ /* This option skips the bbt scan during initialization. */ #define NAND_SKIP_BBTSCAN 0x00010000 /* Chip may not exist, so silence any errors in scan */ #define NAND_SCAN_SILENT_NODEV 0x00040000 /* * Autodetect nand buswidth with readid/onfi. * This suppose the driver will configure the hardware in 8 bits mode * when calling nand_scan_ident, and update its configuration * before calling nand_scan_tail. */ #define NAND_BUSWIDTH_AUTO 0x00080000 /* * This option could be defined by controller drivers to protect against * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers */ #define NAND_USE_BOUNCE_BUFFER 0x00100000 /* * In case your controller is implementing ->legacy.cmd_ctrl() and is relying * on the default ->cmdfunc() implementation, you may want to let the core * handle the tCCS delay which is required when a column change (RNDIN or * RNDOUT) is requested. * If your controller already takes care of this delay, you don't need to set * this flag. */ #define NAND_WAIT_TCCS 0x00200000 /* * Whether the NAND chip is a boot medium. Drivers might use this information * to select ECC algorithms supported by the boot ROM or similar restrictions. */ #define NAND_IS_BOOT_MEDIUM 0x00400000 /* * Do not try to tweak the timings at runtime. This is needed when the * controller initializes the timings on itself or when it relies on * configuration done by the bootloader. */ #define NAND_KEEP_TIMINGS 0x00800000 /* Cell info constants */ #define NAND_CI_CHIPNR_MSK 0x03 #define NAND_CI_CELLTYPE_MSK 0x0C #define NAND_CI_CELLTYPE_SHIFT 2 /** * struct nand_parameters - NAND generic parameters from the parameter page * @model: Model name * @supports_set_get_features: The NAND chip supports setting/getting features * @set_feature_list: Bitmap of features that can be set * @get_feature_list: Bitmap of features that can be get * @onfi: ONFI specific parameters */ struct nand_parameters { /* Generic parameters */ const char *model; bool supports_set_get_features; DECLARE_BITMAP(set_feature_list, ONFI_FEATURE_NUMBER); DECLARE_BITMAP(get_feature_list, ONFI_FEATURE_NUMBER); /* ONFI parameters */ struct onfi_params *onfi; }; /* The maximum expected count of bytes in the NAND ID sequence */ #define NAND_MAX_ID_LEN 8 /** * struct nand_id - NAND id structure * @data: buffer containing the id bytes. * @len: ID length. */ struct nand_id { u8 data[NAND_MAX_ID_LEN]; int len; }; /** * struct nand_ecc_step_info - ECC step information of ECC engine * @stepsize: data bytes per ECC step * @strengths: array of supported strengths * @nstrengths: number of supported strengths */ struct nand_ecc_step_info { int stepsize; const int *strengths; int nstrengths; }; /** * struct nand_ecc_caps - capability of ECC engine * @stepinfos: array of ECC step information * @nstepinfos: number of ECC step information * @calc_ecc_bytes: driver's hook to calculate ECC bytes per step */ struct nand_ecc_caps { const struct nand_ecc_step_info *stepinfos; int nstepinfos; int (*calc_ecc_bytes)(int step_size, int strength); }; /* a shorthand to generate struct nand_ecc_caps with only one ECC stepsize */ #define NAND_ECC_CAPS_SINGLE(__name, __calc, __step, ...) \ static const int __name##_strengths[] = { __VA_ARGS__ }; \ static const struct nand_ecc_step_info __name##_stepinfo = { \ .stepsize = __step, \ .strengths = __name##_strengths, \ .nstrengths = ARRAY_SIZE(__name##_strengths), \ }; \ static const struct nand_ecc_caps __name = { \ .stepinfos = &__name##_stepinfo, \ .nstepinfos = 1, \ .calc_ecc_bytes = __calc, \ } /** * struct nand_ecc_ctrl - Control structure for ECC * @mode: ECC mode * @algo: ECC algorithm * @steps: number of ECC steps per page * @size: data bytes per ECC step * @bytes: ECC bytes per step * @strength: max number of correctible bits per ECC step * @total: total number of ECC bytes per page * @prepad: padding information for syndrome based ECC generators * @postpad: padding information for syndrome based ECC generators * @options: ECC specific options (see NAND_ECC_XXX flags defined above) * @priv: pointer to private ECC control data * @calc_buf: buffer for calculated ECC, size is oobsize. * @code_buf: buffer for ECC read from flash, size is oobsize. * @hwctl: function to control hardware ECC generator. Must only * be provided if an hardware ECC is available * @calculate: function for ECC calculation or readback from ECC hardware * @correct: function for ECC correction, matching to ECC generator (sw/hw). * Should return a positive number representing the number of * corrected bitflips, -EBADMSG if the number of bitflips exceed * ECC strength, or any other error code if the error is not * directly related to correction. * If -EBADMSG is returned the input buffers should be left * untouched. * @read_page_raw: function to read a raw page without ECC. This function * should hide the specific layout used by the ECC * controller and always return contiguous in-band and * out-of-band data even if they're not stored * contiguously on the NAND chip (e.g. * NAND_ECC_HW_SYNDROME interleaves in-band and * out-of-band data). * @write_page_raw: function to write a raw page without ECC. This function * should hide the specific layout used by the ECC * controller and consider the passed data as contiguous * in-band and out-of-band data. ECC controller is * responsible for doing the appropriate transformations * to adapt to its specific layout (e.g. * NAND_ECC_HW_SYNDROME interleaves in-band and * out-of-band data). * @read_page: function to read a page according to the ECC generator * requirements; returns maximum number of bitflips corrected in * any single ECC step, -EIO hw error * @read_subpage: function to read parts of the page covered by ECC; * returns same as read_page() * @write_subpage: function to write parts of the page covered by ECC. * @write_page: function to write a page according to the ECC generator * requirements. * @write_oob_raw: function to write chip OOB data without ECC * @read_oob_raw: function to read chip OOB data without ECC * @read_oob: function to read chip OOB data * @write_oob: function to write chip OOB data */ struct nand_ecc_ctrl { nand_ecc_modes_t mode; enum nand_ecc_algo algo; int steps; int size; int bytes; int total; int strength; int prepad; int postpad; unsigned int options; void *priv; u8 *calc_buf; u8 *code_buf; void (*hwctl)(struct nand_chip *chip, int mode); int (*calculate)(struct nand_chip *chip, const uint8_t *dat, uint8_t *ecc_code); int (*correct)(struct nand_chip *chip, uint8_t *dat, uint8_t *read_ecc, uint8_t *calc_ecc); int (*read_page_raw)(struct nand_chip *chip, uint8_t *buf, int oob_required, int page); int (*write_page_raw)(struct nand_chip *chip, const uint8_t *buf, int oob_required, int page); int (*read_page)(struct nand_chip *chip, uint8_t *buf, int oob_required, int page); int (*read_subpage)(struct nand_chip *chip, uint32_t offs, uint32_t len, uint8_t *buf, int page); int (*write_subpage)(struct nand_chip *chip, uint32_t offset, uint32_t data_len, const uint8_t *data_buf, int oob_required, int page); int (*write_page)(struct nand_chip *chip, const uint8_t *buf, int oob_required, int page); int (*write_oob_raw)(struct nand_chip *chip, int page); int (*read_oob_raw)(struct nand_chip *chip, int page); int (*read_oob)(struct nand_chip *chip, int page); int (*write_oob)(struct nand_chip *chip, int page); }; /** * struct nand_sdr_timings - SDR NAND chip timings * * This struct defines the timing requirements of a SDR NAND chip. * These information can be found in every NAND datasheets and the timings * meaning are described in the ONFI specifications: * www.onfi.org/~/media/ONFI/specs/onfi_3_1_spec.pdf (chapter 4.15 Timing * Parameters) * * All these timings are expressed in picoseconds. * * @tBERS_max: Block erase time * @tCCS_min: Change column setup time * @tPROG_max: Page program time * @tR_max: Page read time * @tALH_min: ALE hold time * @tADL_min: ALE to data loading time * @tALS_min: ALE setup time * @tAR_min: ALE to RE# delay * @tCEA_max: CE# access time * @tCEH_min: CE# high hold time * @tCH_min: CE# hold time * @tCHZ_max: CE# high to output hi-Z * @tCLH_min: CLE hold time * @tCLR_min: CLE to RE# delay * @tCLS_min: CLE setup time * @tCOH_min: CE# high to output hold * @tCS_min: CE# setup time * @tDH_min: Data hold time * @tDS_min: Data setup time * @tFEAT_max: Busy time for Set Features and Get Features * @tIR_min: Output hi-Z to RE# low * @tITC_max: Interface and Timing Mode Change time * @tRC_min: RE# cycle time * @tREA_max: RE# access time * @tREH_min: RE# high hold time * @tRHOH_min: RE# high to output hold * @tRHW_min: RE# high to WE# low * @tRHZ_max: RE# high to output hi-Z * @tRLOH_min: RE# low to output hold * @tRP_min: RE# pulse width * @tRR_min: Ready to RE# low (data only) * @tRST_max: Device reset time, measured from the falling edge of R/B# to the * rising edge of R/B#. * @tWB_max: WE# high to SR[6] low * @tWC_min: WE# cycle time * @tWH_min: WE# high hold time * @tWHR_min: WE# high to RE# low * @tWP_min: WE# pulse width * @tWW_min: WP# transition to WE# low */ struct nand_sdr_timings { u64 tBERS_max; u32 tCCS_min; u64 tPROG_max; u64 tR_max; u32 tALH_min; u32 tADL_min; u32 tALS_min; u32 tAR_min; u32 tCEA_max; u32 tCEH_min; u32 tCH_min; u32 tCHZ_max; u32 tCLH_min; u32 tCLR_min; u32 tCLS_min; u32 tCOH_min; u32 tCS_min; u32 tDH_min; u32 tDS_min; u32 tFEAT_max; u32 tIR_min; u32 tITC_max; u32 tRC_min; u32 tREA_max; u32 tREH_min; u32 tRHOH_min; u32 tRHW_min; u32 tRHZ_max; u32 tRLOH_min; u32 tRP_min; u32 tRR_min; u64 tRST_max; u32 tWB_max; u32 tWC_min; u32 tWH_min; u32 tWHR_min; u32 tWP_min; u32 tWW_min; }; /** * enum nand_data_interface_type - NAND interface timing type * @NAND_SDR_IFACE: Single Data Rate interface */ enum nand_data_interface_type { NAND_SDR_IFACE, }; /** * struct nand_data_interface - NAND interface timing * @type: type of the timing * @timings: The timing, type according to @type * @timings.sdr: Use it when @type is %NAND_SDR_IFACE. */ struct nand_data_interface { enum nand_data_interface_type type; union { struct nand_sdr_timings sdr; } timings; }; /** * nand_get_sdr_timings - get SDR timing from data interface * @conf: The data interface */ static inline const struct nand_sdr_timings * nand_get_sdr_timings(const struct nand_data_interface *conf) { if (conf->type != NAND_SDR_IFACE) return ERR_PTR(-EINVAL); return &conf->timings.sdr; } /** * struct nand_op_cmd_instr - Definition of a command instruction * @opcode: the command to issue in one cycle */ struct nand_op_cmd_instr { u8 opcode; }; /** * struct nand_op_addr_instr - Definition of an address instruction * @naddrs: length of the @addrs array * @addrs: array containing the address cycles to issue */ struct nand_op_addr_instr { unsigned int naddrs; const u8 *addrs; }; /** * struct nand_op_data_instr - Definition of a data instruction * @len: number of data bytes to move * @buf: buffer to fill * @buf.in: buffer to fill when reading from the NAND chip * @buf.out: buffer to read from when writing to the NAND chip * @force_8bit: force 8-bit access * * Please note that "in" and "out" are inverted from the ONFI specification * and are from the controller perspective, so a "in" is a read from the NAND * chip while a "out" is a write to the NAND chip. */ struct nand_op_data_instr { unsigned int len; union { void *in; const void *out; } buf; bool force_8bit; }; /** * struct nand_op_waitrdy_instr - Definition of a wait ready instruction * @timeout_ms: maximum delay while waiting for the ready/busy pin in ms */ struct nand_op_waitrdy_instr { unsigned int timeout_ms; }; /** * enum nand_op_instr_type - Definition of all instruction types * @NAND_OP_CMD_INSTR: command instruction * @NAND_OP_ADDR_INSTR: address instruction * @NAND_OP_DATA_IN_INSTR: data in instruction * @NAND_OP_DATA_OUT_INSTR: data out instruction * @NAND_OP_WAITRDY_INSTR: wait ready instruction */ enum nand_op_instr_type { NAND_OP_CMD_INSTR, NAND_OP_ADDR_INSTR, NAND_OP_DATA_IN_INSTR, NAND_OP_DATA_OUT_INSTR, NAND_OP_WAITRDY_INSTR, }; /** * struct nand_op_instr - Instruction object * @type: the instruction type * @ctx: extra data associated to the instruction. You'll have to use the * appropriate element depending on @type * @ctx.cmd: use it if @type is %NAND_OP_CMD_INSTR * @ctx.addr: use it if @type is %NAND_OP_ADDR_INSTR * @ctx.data: use it if @type is %NAND_OP_DATA_IN_INSTR * or %NAND_OP_DATA_OUT_INSTR * @ctx.waitrdy: use it if @type is %NAND_OP_WAITRDY_INSTR * @delay_ns: delay the controller should apply after the instruction has been * issued on the bus. Most modern controllers have internal timings * control logic, and in this case, the controller driver can ignore * this field. */ struct nand_op_instr { enum nand_op_instr_type type; union { struct nand_op_cmd_instr cmd; struct nand_op_addr_instr addr; struct nand_op_data_instr data; struct nand_op_waitrdy_instr waitrdy; } ctx; unsigned int delay_ns; }; /* * Special handling must be done for the WAITRDY timeout parameter as it usually * is either tPROG (after a prog), tR (before a read), tRST (during a reset) or * tBERS (during an erase) which all of them are u64 values that cannot be * divided by usual kernel macros and must be handled with the special * DIV_ROUND_UP_ULL() macro. * * Cast to type of dividend is needed here to guarantee that the result won't * be an unsigned long long when the dividend is an unsigned long (or smaller), * which is what the compiler does when it sees ternary operator with 2 * different return types (picks the largest type to make sure there's no * loss). */ #define __DIVIDE(dividend, divisor) ({ \ (__typeof__(dividend))(sizeof(dividend) <= sizeof(unsigned long) ? \ DIV_ROUND_UP(dividend, divisor) : \ DIV_ROUND_UP_ULL(dividend, divisor)); \ }) #define PSEC_TO_NSEC(x) __DIVIDE(x, 1000) #define PSEC_TO_MSEC(x) __DIVIDE(x, 1000000000) #define NAND_OP_CMD(id, ns) \ { \ .type = NAND_OP_CMD_INSTR, \ .ctx.cmd.opcode = id, \ .delay_ns = ns, \ } #define NAND_OP_ADDR(ncycles, cycles, ns) \ { \ .type = NAND_OP_ADDR_INSTR, \ .ctx.addr = { \ .naddrs = ncycles, \ .addrs = cycles, \ }, \ .delay_ns = ns, \ } #define NAND_OP_DATA_IN(l, b, ns) \ { \ .type = NAND_OP_DATA_IN_INSTR, \ .ctx.data = { \ .len = l, \ .buf.in = b, \ .force_8bit = false, \ }, \ .delay_ns = ns, \ } #define NAND_OP_DATA_OUT(l, b, ns) \ { \ .type = NAND_OP_DATA_OUT_INSTR, \ .ctx.data = { \ .len = l, \ .buf.out = b, \ .force_8bit = false, \ }, \ .delay_ns = ns, \ } #define NAND_OP_8BIT_DATA_IN(l, b, ns) \ { \ .type = NAND_OP_DATA_IN_INSTR, \ .ctx.data = { \ .len = l, \ .buf.in = b, \ .force_8bit = true, \ }, \ .delay_ns = ns, \ } #define NAND_OP_8BIT_DATA_OUT(l, b, ns) \ { \ .type = NAND_OP_DATA_OUT_INSTR, \ .ctx.data = { \ .len = l, \ .buf.out = b, \ .force_8bit = true, \ }, \ .delay_ns = ns, \ } #define NAND_OP_WAIT_RDY(tout_ms, ns) \ { \ .type = NAND_OP_WAITRDY_INSTR, \ .ctx.waitrdy.timeout_ms = tout_ms, \ .delay_ns = ns, \ } /** * struct nand_subop - a sub operation * @instrs: array of instructions * @ninstrs: length of the @instrs array * @first_instr_start_off: offset to start from for the first instruction * of the sub-operation * @last_instr_end_off: offset to end at (excluded) for the last instruction * of the sub-operation * * Both @first_instr_start_off and @last_instr_end_off only apply to data or * address instructions. * * When an operation cannot be handled as is by the NAND controller, it will * be split by the parser into sub-operations which will be passed to the * controller driver. */ struct nand_subop { const struct nand_op_instr *instrs; unsigned int ninstrs; unsigned int first_instr_start_off; unsigned int last_instr_end_off; }; unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop, unsigned int op_id); unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop, unsigned int op_id); unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop, unsigned int op_id); unsigned int nand_subop_get_data_len(const struct nand_subop *subop, unsigned int op_id); /** * struct nand_op_parser_addr_constraints - Constraints for address instructions * @maxcycles: maximum number of address cycles the controller can issue in a * single step */ struct nand_op_parser_addr_constraints { unsigned int maxcycles; }; /** * struct nand_op_parser_data_constraints - Constraints for data instructions * @maxlen: maximum data length that the controller can handle in a single step */ struct nand_op_parser_data_constraints { unsigned int maxlen; }; /** * struct nand_op_parser_pattern_elem - One element of a pattern * @type: the instructuction type * @optional: whether this element of the pattern is optional or mandatory * @ctx: address or data constraint * @ctx.addr: address constraint (number of cycles) * @ctx.data: data constraint (data length) */ struct nand_op_parser_pattern_elem { enum nand_op_instr_type type; bool optional; union { struct nand_op_parser_addr_constraints addr; struct nand_op_parser_data_constraints data; } ctx; }; #define NAND_OP_PARSER_PAT_CMD_ELEM(_opt) \ { \ .type = NAND_OP_CMD_INSTR, \ .optional = _opt, \ } #define NAND_OP_PARSER_PAT_ADDR_ELEM(_opt, _maxcycles) \ { \ .type = NAND_OP_ADDR_INSTR, \ .optional = _opt, \ .ctx.addr.maxcycles = _maxcycles, \ } #define NAND_OP_PARSER_PAT_DATA_IN_ELEM(_opt, _maxlen) \ { \ .type = NAND_OP_DATA_IN_INSTR, \ .optional = _opt, \ .ctx.data.maxlen = _maxlen, \ } #define NAND_OP_PARSER_PAT_DATA_OUT_ELEM(_opt, _maxlen) \ { \ .type = NAND_OP_DATA_OUT_INSTR, \ .optional = _opt, \ .ctx.data.maxlen = _maxlen, \ } #define NAND_OP_PARSER_PAT_WAITRDY_ELEM(_opt) \ { \ .type = NAND_OP_WAITRDY_INSTR, \ .optional = _opt, \ } /** * struct nand_op_parser_pattern - NAND sub-operation pattern descriptor * @elems: array of pattern elements * @nelems: number of pattern elements in @elems array * @exec: the function that will issue a sub-operation * * A pattern is a list of elements, each element reprensenting one instruction * with its constraints. The pattern itself is used by the core to match NAND * chip operation with NAND controller operations. * Once a match between a NAND controller operation pattern and a NAND chip * operation (or a sub-set of a NAND operation) is found, the pattern ->exec() * hook is called so that the controller driver can issue the operation on the * bus. * * Controller drivers should declare as many patterns as they support and pass * this list of patterns (created with the help of the following macro) to * the nand_op_parser_exec_op() helper. */ struct nand_op_parser_pattern { const struct nand_op_parser_pattern_elem *elems; unsigned int nelems; int (*exec)(struct nand_chip *chip, const struct nand_subop *subop); }; #define NAND_OP_PARSER_PATTERN(_exec, ...) \ { \ .exec = _exec, \ .elems = (const struct nand_op_parser_pattern_elem[]) { __VA_ARGS__ }, \ .nelems = sizeof((struct nand_op_parser_pattern_elem[]) { __VA_ARGS__ }) / \ sizeof(struct nand_op_parser_pattern_elem), \ } /** * struct nand_op_parser - NAND controller operation parser descriptor * @patterns: array of supported patterns * @npatterns: length of the @patterns array * * The parser descriptor is just an array of supported patterns which will be * iterated by nand_op_parser_exec_op() everytime it tries to execute an * NAND operation (or tries to determine if a specific operation is supported). * * It is worth mentioning that patterns will be tested in their declaration * order, and the first match will be taken, so it's important to order patterns * appropriately so that simple/inefficient patterns are placed at the end of * the list. Usually, this is where you put single instruction patterns. */ struct nand_op_parser { const struct nand_op_parser_pattern *patterns; unsigned int npatterns; }; #define NAND_OP_PARSER(...) \ { \ .patterns = (const struct nand_op_parser_pattern[]) { __VA_ARGS__ }, \ .npatterns = sizeof((struct nand_op_parser_pattern[]) { __VA_ARGS__ }) / \ sizeof(struct nand_op_parser_pattern), \ } /** * struct nand_operation - NAND operation descriptor * @cs: the CS line to select for this NAND operation * @instrs: array of instructions to execute * @ninstrs: length of the @instrs array * * The actual operation structure that will be passed to chip->exec_op(). */ struct nand_operation { unsigned int cs; const struct nand_op_instr *instrs; unsigned int ninstrs; }; #define NAND_OPERATION(_cs, _instrs) \ { \ .cs = _cs, \ .instrs = _instrs, \ .ninstrs = ARRAY_SIZE(_instrs), \ } int nand_op_parser_exec_op(struct nand_chip *chip, const struct nand_op_parser *parser, const struct nand_operation *op, bool check_only); static inline void nand_op_trace(const char *prefix, const struct nand_op_instr *instr) { #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG) switch (instr->type) { case NAND_OP_CMD_INSTR: pr_debug("%sCMD [0x%02x]\n", prefix, instr->ctx.cmd.opcode); break; case NAND_OP_ADDR_INSTR: pr_debug("%sADDR [%d cyc: %*ph]\n", prefix, instr->ctx.addr.naddrs, instr->ctx.addr.naddrs < 64 ? instr->ctx.addr.naddrs : 64, instr->ctx.addr.addrs); break; case NAND_OP_DATA_IN_INSTR: pr_debug("%sDATA_IN [%d B%s]\n", prefix, instr->ctx.data.len, instr->ctx.data.force_8bit ? ", force 8-bit" : ""); break; case NAND_OP_DATA_OUT_INSTR: pr_debug("%sDATA_OUT [%d B%s]\n", prefix, instr->ctx.data.len, instr->ctx.data.force_8bit ? ", force 8-bit" : ""); break; case NAND_OP_WAITRDY_INSTR: pr_debug("%sWAITRDY [max %d ms]\n", prefix, instr->ctx.waitrdy.timeout_ms); break; } #endif } /** * struct nand_controller_ops - Controller operations * * @attach_chip: this method is called after the NAND detection phase after * flash ID and MTD fields such as erase size, page size and OOB * size have been set up. ECC requirements are available if * provided by the NAND chip or device tree. Typically used to * choose the appropriate ECC configuration and allocate * associated resources. * This hook is optional. * @detach_chip: free all resources allocated/claimed in * nand_controller_ops->attach_chip(). * This hook is optional. * @exec_op: controller specific method to execute NAND operations. * This method replaces chip->legacy.cmdfunc(), * chip->legacy.{read,write}_{buf,byte,word}(), * chip->legacy.dev_ready() and chip->legacy.waifunc(). * @setup_data_interface: setup the data interface and timing. If * chipnr is set to %NAND_DATA_IFACE_CHECK_ONLY this * means the configuration should not be applied but * only checked. * This hook is optional. */ struct nand_controller_ops { int (*attach_chip)(struct nand_chip *chip); void (*detach_chip)(struct nand_chip *chip); int (*exec_op)(struct nand_chip *chip, const struct nand_operation *op, bool check_only); int (*setup_data_interface)(struct nand_chip *chip, int chipnr, const struct nand_data_interface *conf); }; /** * struct nand_controller - Structure used to describe a NAND controller * * @lock: lock used to serialize accesses to the NAND controller * @ops: NAND controller operations. */ struct nand_controller { struct mutex lock; const struct nand_controller_ops *ops; }; static inline void nand_controller_init(struct nand_controller *nfc) { mutex_init(&nfc->lock); } /** * struct nand_legacy - NAND chip legacy fields/hooks * @IO_ADDR_R: address to read the 8 I/O lines of the flash device * @IO_ADDR_W: address to write the 8 I/O lines of the flash device * @select_chip: select/deselect a specific target/die * @read_byte: read one byte from the chip * @write_byte: write a single byte to the chip on the low 8 I/O lines * @write_buf: write data from the buffer to the chip * @read_buf: read data from the chip into the buffer * @cmd_ctrl: hardware specific function for controlling ALE/CLE/nCE. Also used * to write command and address * @cmdfunc: hardware specific function for writing commands to the chip. * @dev_ready: hardware specific function for accessing device ready/busy line. * If set to NULL no access to ready/busy is available and the * ready/busy information is read from the chip status register. * @waitfunc: hardware specific function for wait on ready. * @block_bad: check if a block is bad, using OOB markers * @block_markbad: mark a block bad * @set_features: set the NAND chip features * @get_features: get the NAND chip features * @chip_delay: chip dependent delay for transferring data from array to read * regs (tR). * @dummy_controller: dummy controller implementation for drivers that can * only control a single chip * * If you look at this structure you're already wrong. These fields/hooks are * all deprecated. */ struct nand_legacy { void __iomem *IO_ADDR_R; void __iomem *IO_ADDR_W; void (*select_chip)(struct nand_chip *chip, int cs); u8 (*read_byte)(struct nand_chip *chip); void (*write_byte)(struct nand_chip *chip, u8 byte); void (*write_buf)(struct nand_chip *chip, const u8 *buf, int len); void (*read_buf)(struct nand_chip *chip, u8 *buf, int len); void (*cmd_ctrl)(struct nand_chip *chip, int dat, unsigned int ctrl); void (*cmdfunc)(struct nand_chip *chip, unsigned command, int column, int page_addr); int (*dev_ready)(struct nand_chip *chip); int (*waitfunc)(struct nand_chip *chip); int (*block_bad)(struct nand_chip *chip, loff_t ofs); int (*block_markbad)(struct nand_chip *chip, loff_t ofs); int (*set_features)(struct nand_chip *chip, int feature_addr, u8 *subfeature_para); int (*get_features)(struct nand_chip *chip, int feature_addr, u8 *subfeature_para); int chip_delay; struct nand_controller dummy_controller; }; /** * struct nand_chip - NAND Private Flash Chip Data * @base: Inherit from the generic NAND device * @legacy: All legacy fields/hooks. If you develop a new driver, * don't even try to use any of these fields/hooks, and if * you're modifying an existing driver that is using those * fields/hooks, you should consider reworking the driver * avoid using them. * @setup_read_retry: [FLASHSPECIFIC] flash (vendor) specific function for * setting the read-retry mode. Mostly needed for MLC NAND. * @ecc: [BOARDSPECIFIC] ECC control structure * @buf_align: minimum buffer alignment required by a platform * @oob_poi: "poison value buffer," used for laying out OOB data * before writing * @page_shift: [INTERN] number of address bits in a page (column * address bits). * @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry * @chip_shift: [INTERN] number of address bits in one chip * @options: [BOARDSPECIFIC] various chip options. They can partly * be set to inform nand_scan about special functionality. * See the defines for further explanation. * @bbt_options: [INTERN] bad block specific options. All options used * here must come from bbm.h. By default, these options * will be copied to the appropriate nand_bbt_descr's. * @badblockpos: [INTERN] position of the bad block marker in the oob * area. * @badblockbits: [INTERN] minimum number of set bits in a good block's * bad block marker position; i.e., BBM == 11110111b is * not bad when badblockbits == 7 * @onfi_timing_mode_default: [INTERN] default ONFI timing mode. This field is * set to the actually used ONFI mode if the chip is * ONFI compliant or deduced from the datasheet if * the NAND chip is not ONFI compliant. * @pagemask: [INTERN] page number mask = number of (pages / chip) - 1 * @data_buf: [INTERN] buffer for data, size is (page size + oobsize). * @pagecache: Structure containing page cache related fields * @pagecache.bitflips: Number of bitflips of the cached page * @pagecache.page: Page number currently in the cache. -1 means no page is * currently cached * @subpagesize: [INTERN] holds the subpagesize * @id: [INTERN] holds NAND ID * @parameters: [INTERN] holds generic parameters under an easily * readable form. * @data_interface: [INTERN] NAND interface timing information * @cur_cs: currently selected target. -1 means no target selected, * otherwise we should always have cur_cs >= 0 && * cur_cs < nanddev_ntargets(). NAND Controller drivers * should not modify this value, but they're allowed to * read it. * @read_retries: [INTERN] the number of read retry modes supported * @lock: lock protecting the suspended field. Also used to * serialize accesses to the NAND device. * @suspended: set to 1 when the device is suspended, 0 when it's not. * @bbt: [INTERN] bad block table pointer * @bbt_td: [REPLACEABLE] bad block table descriptor for flash * lookup. * @bbt_md: [REPLACEABLE] bad block table mirror descriptor * @badblock_pattern: [REPLACEABLE] bad block scan pattern used for initial * bad block scan. * @controller: [REPLACEABLE] a pointer to a hardware controller * structure which is shared among multiple independent * devices. * @priv: [OPTIONAL] pointer to private chip data * @manufacturer: [INTERN] Contains manufacturer information * @manufacturer.desc: [INTERN] Contains manufacturer's description * @manufacturer.priv: [INTERN] Contains manufacturer private information */ struct nand_chip { struct nand_device base; struct nand_legacy legacy; int (*setup_read_retry)(struct nand_chip *chip, int retry_mode); unsigned int options; unsigned int bbt_options; int page_shift; int phys_erase_shift; int bbt_erase_shift; int chip_shift; int pagemask; u8 *data_buf; struct { unsigned int bitflips; int page; } pagecache; int subpagesize; int onfi_timing_mode_default; unsigned int badblockpos; int badblockbits; struct nand_id id; struct nand_parameters parameters; struct nand_data_interface data_interface; int cur_cs; int read_retries; struct mutex lock; unsigned int suspended : 1; uint8_t *oob_poi; struct nand_controller *controller; struct nand_ecc_ctrl ecc; unsigned long buf_align; uint8_t *bbt; struct nand_bbt_descr *bbt_td; struct nand_bbt_descr *bbt_md; struct nand_bbt_descr *badblock_pattern; void *priv; struct { const struct nand_manufacturer *desc; void *priv; } manufacturer; }; extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops; extern const struct mtd_ooblayout_ops nand_ooblayout_lp_ops; static inline struct nand_chip *mtd_to_nand(struct mtd_info *mtd) { return container_of(mtd, struct nand_chip, base.mtd); } static inline struct mtd_info *nand_to_mtd(struct nand_chip *chip) { return &chip->base.mtd; } static inline void *nand_get_controller_data(struct nand_chip *chip) { return chip->priv; } static inline void nand_set_controller_data(struct nand_chip *chip, void *priv) { chip->priv = priv; } static inline void nand_set_manufacturer_data(struct nand_chip *chip, void *priv) { chip->manufacturer.priv = priv; } static inline void *nand_get_manufacturer_data(struct nand_chip *chip) { return chip->manufacturer.priv; } static inline void nand_set_flash_node(struct nand_chip *chip, struct device_node *np) { mtd_set_of_node(nand_to_mtd(chip), np); } static inline struct device_node *nand_get_flash_node(struct nand_chip *chip) { return mtd_get_of_node(nand_to_mtd(chip)); } /* * A helper for defining older NAND chips where the second ID byte fully * defined the chip, including the geometry (chip size, eraseblock size, page * size). All these chips have 512 bytes NAND page size. */ #define LEGACY_ID_NAND(nm, devid, chipsz, erasesz, opts) \ { .name = (nm), {{ .dev_id = (devid) }}, .pagesize = 512, \ .chipsize = (chipsz), .erasesize = (erasesz), .options = (opts) } /* * A helper for defining newer chips which report their page size and * eraseblock size via the extended ID bytes. * * The real difference between LEGACY_ID_NAND and EXTENDED_ID_NAND is that with * EXTENDED_ID_NAND, manufacturers overloaded the same device ID so that the * device ID now only represented a particular total chip size (and voltage, * buswidth), and the page size, eraseblock size, and OOB size could vary while * using the same device ID. */ #define EXTENDED_ID_NAND(nm, devid, chipsz, opts) \ { .name = (nm), {{ .dev_id = (devid) }}, .chipsize = (chipsz), \ .options = (opts) } #define NAND_ECC_INFO(_strength, _step) \ { .strength_ds = (_strength), .step_ds = (_step) } #define NAND_ECC_STRENGTH(type) ((type)->ecc.strength_ds) #define NAND_ECC_STEP(type) ((type)->ecc.step_ds) /** * struct nand_flash_dev - NAND Flash Device ID Structure * @name: a human-readable name of the NAND chip * @dev_id: the device ID (the second byte of the full chip ID array) * @mfr_id: manufecturer ID part of the full chip ID array (refers the same * memory address as ``id[0]``) * @dev_id: device ID part of the full chip ID array (refers the same memory * address as ``id[1]``) * @id: full device ID array * @pagesize: size of the NAND page in bytes; if 0, then the real page size (as * well as the eraseblock size) is determined from the extended NAND * chip ID array) * @chipsize: total chip size in MiB * @erasesize: eraseblock size in bytes (determined from the extended ID if 0) * @options: stores various chip bit options * @id_len: The valid length of the @id. * @oobsize: OOB size * @ecc: ECC correctability and step information from the datasheet. * @ecc.strength_ds: The ECC correctability from the datasheet, same as the * @ecc_strength_ds in nand_chip{}. * @ecc.step_ds: The ECC step required by the @ecc.strength_ds, same as the * @ecc_step_ds in nand_chip{}, also from the datasheet. * For example, the "4bit ECC for each 512Byte" can be set with * NAND_ECC_INFO(4, 512). * @onfi_timing_mode_default: the default ONFI timing mode entered after a NAND * reset. Should be deduced from timings described * in the datasheet. * */ struct nand_flash_dev { char *name; union { struct { uint8_t mfr_id; uint8_t dev_id; }; uint8_t id[NAND_MAX_ID_LEN]; }; unsigned int pagesize; unsigned int chipsize; unsigned int erasesize; unsigned int options; uint16_t id_len; uint16_t oobsize; struct { uint16_t strength_ds; uint16_t step_ds; } ecc; int onfi_timing_mode_default; }; int nand_create_bbt(struct nand_chip *chip); /* * Check if it is a SLC nand. * The !nand_is_slc() can be used to check the MLC/TLC nand chips. * We do not distinguish the MLC and TLC now. */ static inline bool nand_is_slc(struct nand_chip *chip) { WARN(nanddev_bits_per_cell(&chip->base) == 0, "chip->bits_per_cell is used uninitialized\n"); return nanddev_bits_per_cell(&chip->base) == 1; } /** * Check if the opcode's address should be sent only on the lower 8 bits * @command: opcode to check */ static inline int nand_opcode_8bits(unsigned int command) { switch (command) { case NAND_CMD_READID: case NAND_CMD_PARAM: case NAND_CMD_GET_FEATURES: case NAND_CMD_SET_FEATURES: return 1; default: break; } return 0; } int nand_check_erased_ecc_chunk(void *data, int datalen, void *ecc, int ecclen, void *extraoob, int extraooblen, int threshold); int nand_ecc_choose_conf(struct nand_chip *chip, const struct nand_ecc_caps *caps, int oobavail); /* Default write_oob implementation */ int nand_write_oob_std(struct nand_chip *chip, int page); /* Default read_oob implementation */ int nand_read_oob_std(struct nand_chip *chip, int page); /* Stub used by drivers that do not support GET/SET FEATURES operations */ int nand_get_set_features_notsupp(struct nand_chip *chip, int addr, u8 *subfeature_param); /* Default read_page_raw implementation */ int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required, int page); /* Default write_page_raw implementation */ int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf, int oob_required, int page); /* Reset and initialize a NAND device */ int nand_reset(struct nand_chip *chip, int chipnr); /* NAND operation helpers */ int nand_reset_op(struct nand_chip *chip); int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf, unsigned int len); int nand_status_op(struct nand_chip *chip, u8 *status); int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock); int nand_read_page_op(struct nand_chip *chip, unsigned int page, unsigned int offset_in_page, void *buf, unsigned int len); int nand_change_read_column_op(struct nand_chip *chip, unsigned int offset_in_page, void *buf, unsigned int len, bool force_8bit); int nand_read_oob_op(struct nand_chip *chip, unsigned int page, unsigned int offset_in_page, void *buf, unsigned int len); int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page, unsigned int offset_in_page, const void *buf, unsigned int len); int nand_prog_page_end_op(struct nand_chip *chip); int nand_prog_page_op(struct nand_chip *chip, unsigned int page, unsigned int offset_in_page, const void *buf, unsigned int len); int nand_change_write_column_op(struct nand_chip *chip, unsigned int offset_in_page, const void *buf, unsigned int len, bool force_8bit); int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len, bool force_8bit); int nand_write_data_op(struct nand_chip *chip, const void *buf, unsigned int len, bool force_8bit); /* Scan and identify a NAND device */ int nand_scan_with_ids(struct nand_chip *chip, unsigned int max_chips, struct nand_flash_dev *ids); static inline int nand_scan(struct nand_chip *chip, unsigned int max_chips) { return nand_scan_with_ids(chip, max_chips, NULL); } /* Internal helper for board drivers which need to override command function */ void nand_wait_ready(struct nand_chip *chip); /* * Free resources held by the NAND device, must be called on error after a * sucessful nand_scan(). */ void nand_cleanup(struct nand_chip *chip); /* Unregister the MTD device and calls nand_cleanup() */ void nand_release(struct nand_chip *chip); /* * External helper for controller drivers that have to implement the WAITRDY * instruction and have no physical pin to check it. */ int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms); struct gpio_desc; int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod, unsigned long timeout_ms); /* Select/deselect a NAND target. */ void nand_select_target(struct nand_chip *chip, unsigned int cs); void nand_deselect_target(struct nand_chip *chip); /** * nand_get_data_buf() - Get the internal page buffer * @chip: NAND chip object * * Returns the pre-allocated page buffer after invalidating the cache. This * function should be used by drivers that do not want to allocate their own * bounce buffer and still need such a buffer for specific operations (most * commonly when reading OOB data only). * * Be careful to never call this function in the write/write_oob path, because * the core may have placed the data to be written out in this buffer. * * Return: pointer to the page cache buffer */ static inline void *nand_get_data_buf(struct nand_chip *chip) { chip->pagecache.page = -1; return chip->data_buf; } #endif /* __LINUX_MTD_RAWNAND_H */ mtd/nftl.h 0000644 00000003306 14722070374 0006454 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> */ #ifndef __MTD_NFTL_H__ #define __MTD_NFTL_H__ #include <linux/mtd/mtd.h> #include <linux/mtd/blktrans.h> #include <mtd/nftl-user.h> /* these info are used in ReplUnitTable */ #define BLOCK_NIL 0xffff /* last block of a chain */ #define BLOCK_FREE 0xfffe /* free block */ #define BLOCK_NOTEXPLORED 0xfffd /* non explored block, only used during mounting */ #define BLOCK_RESERVED 0xfffc /* bios block or bad block */ struct NFTLrecord { struct mtd_blktrans_dev mbd; __u16 MediaUnit, SpareMediaUnit; __u32 EraseSize; struct NFTLMediaHeader MediaHdr; int usecount; unsigned char heads; unsigned char sectors; unsigned short cylinders; __u16 numvunits; __u16 lastEUN; /* should be suppressed */ __u16 numfreeEUNs; __u16 LastFreeEUN; /* To speed up finding a free EUN */ int head,sect,cyl; __u16 *EUNtable; /* [numvunits]: First EUN for each virtual unit */ __u16 *ReplUnitTable; /* [numEUNs]: ReplUnitNumber for each */ unsigned int nb_blocks; /* number of physical blocks */ unsigned int nb_boot_blocks; /* number of blocks used by the bios */ struct erase_info instr; }; int NFTL_mount(struct NFTLrecord *s); int NFTL_formatblock(struct NFTLrecord *s, int block); int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len, size_t *retlen, uint8_t *buf); int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len, size_t *retlen, uint8_t *buf); #ifndef NFTL_MAJOR #define NFTL_MAJOR 93 #endif #define MAX_NFTLS 16 #define MAX_SECTORS_PER_UNIT 64 #define NFTL_PARTN_BITS 4 #endif /* __MTD_NFTL_H__ */ mtd/jedec.h 0000644 00000003561 14722070374 0006566 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> * Steven J. Hill <sjhill@realitydiluted.com> * Thomas Gleixner <tglx@linutronix.de> * * Contains all JEDEC related definitions */ #ifndef __LINUX_MTD_JEDEC_H #define __LINUX_MTD_JEDEC_H struct jedec_ecc_info { u8 ecc_bits; u8 codeword_size; __le16 bb_per_lun; __le16 block_endurance; u8 reserved[2]; } __packed; /* JEDEC features */ #define JEDEC_FEATURE_16_BIT_BUS (1 << 0) struct nand_jedec_params { /* rev info and features block */ /* 'J' 'E' 'S' 'D' */ u8 sig[4]; __le16 revision; __le16 features; u8 opt_cmd[3]; __le16 sec_cmd; u8 num_of_param_pages; u8 reserved0[18]; /* manufacturer information block */ char manufacturer[12]; char model[20]; u8 jedec_id[6]; u8 reserved1[10]; /* memory organization block */ __le32 byte_per_page; __le16 spare_bytes_per_page; u8 reserved2[6]; __le32 pages_per_block; __le32 blocks_per_lun; u8 lun_count; u8 addr_cycles; u8 bits_per_cell; u8 programs_per_page; u8 multi_plane_addr; u8 multi_plane_op_attr; u8 reserved3[38]; /* electrical parameter block */ __le16 async_sdr_speed_grade; __le16 toggle_ddr_speed_grade; __le16 sync_ddr_speed_grade; u8 async_sdr_features; u8 toggle_ddr_features; u8 sync_ddr_features; __le16 t_prog; __le16 t_bers; __le16 t_r; __le16 t_r_multi_plane; __le16 t_ccs; __le16 io_pin_capacitance_typ; __le16 input_pin_capacitance_typ; __le16 clk_pin_capacitance_typ; u8 driver_strength_support; __le16 t_adl; u8 reserved4[36]; /* ECC and endurance block */ u8 guaranteed_good_blocks; __le16 guaranteed_block_endurance; struct jedec_ecc_info ecc_info[4]; u8 reserved5[29]; /* reserved */ u8 reserved6[148]; /* vendor */ __le16 vendor_rev_num; u8 reserved7[88]; /* CRC for Parameter Page */ __le16 crc; } __packed; #endif /* __LINUX_MTD_JEDEC_H */ mtd/concat.h 0000644 00000000755 14722070374 0006765 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * MTD device concatenation layer definitions * * Copyright © 2002 Robert Kaiser <rkaiser@sysgo.de> */ #ifndef MTD_CONCAT_H #define MTD_CONCAT_H struct mtd_info *mtd_concat_create( struct mtd_info *subdev[], /* subdevices to concatenate */ int num_devs, /* number of subdevices */ const char *name); /* name for the new device */ void mtd_concat_destroy(struct mtd_info *mtd); #endif mtd/super.h 0000644 00000001102 14722070374 0006637 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* MTD-based superblock handling * * Copyright © 2006 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #ifndef __MTD_SUPER_H__ #define __MTD_SUPER_H__ #ifdef __KERNEL__ #include <linux/mtd/mtd.h> #include <linux/fs.h> #include <linux/mount.h> extern int get_tree_mtd(struct fs_context *fc, int (*fill_super)(struct super_block *sb, struct fs_context *fc)); extern void kill_mtd_super(struct super_block *sb); #endif /* __KERNEL__ */ #endif /* __MTD_SUPER_H__ */ mtd/onenand.h 0000644 00000017446 14722070374 0007145 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/include/linux/mtd/onenand.h * * Copyright © 2005-2009 Samsung Electronics * Kyungmin Park <kyungmin.park@samsung.com> */ #ifndef __LINUX_MTD_ONENAND_H #define __LINUX_MTD_ONENAND_H #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/mtd/flashchip.h> #include <linux/mtd/onenand_regs.h> #include <linux/mtd/bbm.h> #define MAX_DIES 2 #define MAX_BUFFERRAM 2 /* Scan and identify a OneNAND device */ extern int onenand_scan(struct mtd_info *mtd, int max_chips); /* Free resources held by the OneNAND device */ extern void onenand_release(struct mtd_info *mtd); /** * struct onenand_bufferram - OneNAND BufferRAM Data * @blockpage: block & page address in BufferRAM */ struct onenand_bufferram { int blockpage; }; /** * struct onenand_chip - OneNAND Private Flash Chip Data * @base: [BOARDSPECIFIC] address to access OneNAND * @dies: [INTERN][FLEX-ONENAND] number of dies on chip * @boundary: [INTERN][FLEX-ONENAND] Boundary of the dies * @diesize: [INTERN][FLEX-ONENAND] Size of the dies * @chipsize: [INTERN] the size of one chip for multichip arrays * FIXME For Flex-OneNAND, chipsize holds maximum possible * device size ie when all blocks are considered MLC * @device_id: [INTERN] device ID * @density_mask: chip density, used for DDP devices * @verstion_id: [INTERN] version ID * @options: [BOARDSPECIFIC] various chip options. They can * partly be set to inform onenand_scan about * @erase_shift: [INTERN] number of address bits in a block * @page_shift: [INTERN] number of address bits in a page * @page_mask: [INTERN] a page per block mask * @writesize: [INTERN] a real page size * @bufferram_index: [INTERN] BufferRAM index * @bufferram: [INTERN] BufferRAM info * @readw: [REPLACEABLE] hardware specific function for read short * @writew: [REPLACEABLE] hardware specific function for write short * @command: [REPLACEABLE] hardware specific function for writing * commands to the chip * @wait: [REPLACEABLE] hardware specific function for wait on ready * @bbt_wait: [REPLACEABLE] hardware specific function for bbt wait on ready * @unlock_all: [REPLACEABLE] hardware specific function for unlock all * @read_bufferram: [REPLACEABLE] hardware specific function for BufferRAM Area * @write_bufferram: [REPLACEABLE] hardware specific function for BufferRAM Area * @read_word: [REPLACEABLE] hardware specific function for read * register of OneNAND * @write_word: [REPLACEABLE] hardware specific function for write * register of OneNAND * @mmcontrol: sync burst read function * @chip_probe: [REPLACEABLE] hardware specific function for chip probe * @block_markbad: function to mark a block as bad * @scan_bbt: [REPLACEALBE] hardware specific function for scanning * Bad block Table * @chip_lock: [INTERN] spinlock used to protect access to this * structure and the chip * @wq: [INTERN] wait queue to sleep on if a OneNAND * operation is in progress * @state: [INTERN] the current state of the OneNAND device * @page_buf: [INTERN] page main data buffer * @oob_buf: [INTERN] page oob data buffer * @subpagesize: [INTERN] holds the subpagesize * @bbm: [REPLACEABLE] pointer to Bad Block Management * @priv: [OPTIONAL] pointer to private chip date */ struct onenand_chip { void __iomem *base; unsigned dies; unsigned boundary[MAX_DIES]; loff_t diesize[MAX_DIES]; unsigned int chipsize; unsigned int device_id; unsigned int version_id; unsigned int technology; unsigned int density_mask; unsigned int options; unsigned int badblockpos; unsigned int erase_shift; unsigned int page_shift; unsigned int page_mask; unsigned int writesize; unsigned int bufferram_index; struct onenand_bufferram bufferram[MAX_BUFFERRAM]; int (*command)(struct mtd_info *mtd, int cmd, loff_t address, size_t len); int (*wait)(struct mtd_info *mtd, int state); int (*bbt_wait)(struct mtd_info *mtd, int state); void (*unlock_all)(struct mtd_info *mtd); int (*read_bufferram)(struct mtd_info *mtd, int area, unsigned char *buffer, int offset, size_t count); int (*write_bufferram)(struct mtd_info *mtd, int area, const unsigned char *buffer, int offset, size_t count); unsigned short (*read_word)(void __iomem *addr); void (*write_word)(unsigned short value, void __iomem *addr); void (*mmcontrol)(struct mtd_info *mtd, int sync_read); int (*chip_probe)(struct mtd_info *mtd); int (*block_markbad)(struct mtd_info *mtd, loff_t ofs); int (*scan_bbt)(struct mtd_info *mtd); int (*enable)(struct mtd_info *mtd); int (*disable)(struct mtd_info *mtd); struct completion complete; int irq; spinlock_t chip_lock; wait_queue_head_t wq; flstate_t state; unsigned char *page_buf; unsigned char *oob_buf; #ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE unsigned char *verify_buf; #endif int subpagesize; void *bbm; void *priv; /* * Shows that the current operation is composed * of sequence of commands. For example, cache program. * Such command status OnGo bit is checked at the end of * sequence. */ unsigned int ongoing; }; /* * Helper macros */ #define ONENAND_PAGES_PER_BLOCK (1<<6) #define ONENAND_CURRENT_BUFFERRAM(this) (this->bufferram_index) #define ONENAND_NEXT_BUFFERRAM(this) (this->bufferram_index ^ 1) #define ONENAND_SET_NEXT_BUFFERRAM(this) (this->bufferram_index ^= 1) #define ONENAND_SET_PREV_BUFFERRAM(this) (this->bufferram_index ^= 1) #define ONENAND_SET_BUFFERRAM0(this) (this->bufferram_index = 0) #define ONENAND_SET_BUFFERRAM1(this) (this->bufferram_index = 1) #define FLEXONENAND(this) \ (this->device_id & DEVICE_IS_FLEXONENAND) #define ONENAND_GET_SYS_CFG1(this) \ (this->read_word(this->base + ONENAND_REG_SYS_CFG1)) #define ONENAND_SET_SYS_CFG1(v, this) \ (this->write_word(v, this->base + ONENAND_REG_SYS_CFG1)) #define ONENAND_IS_DDP(this) \ (this->device_id & ONENAND_DEVICE_IS_DDP) #define ONENAND_IS_MLC(this) \ (this->technology & ONENAND_TECHNOLOGY_IS_MLC) #ifdef CONFIG_MTD_ONENAND_2X_PROGRAM #define ONENAND_IS_2PLANE(this) \ (this->options & ONENAND_HAS_2PLANE) #else #define ONENAND_IS_2PLANE(this) (0) #endif #define ONENAND_IS_CACHE_PROGRAM(this) \ (this->options & ONENAND_HAS_CACHE_PROGRAM) #define ONENAND_IS_NOP_1(this) \ (this->options & ONENAND_HAS_NOP_1) /* Check byte access in OneNAND */ #define ONENAND_CHECK_BYTE_ACCESS(addr) (addr & 0x1) #define ONENAND_BADBLOCK_POS 0 /* * Options bits */ #define ONENAND_HAS_CONT_LOCK (0x0001) #define ONENAND_HAS_UNLOCK_ALL (0x0002) #define ONENAND_HAS_2PLANE (0x0004) #define ONENAND_HAS_4KB_PAGE (0x0008) #define ONENAND_HAS_CACHE_PROGRAM (0x0010) #define ONENAND_HAS_NOP_1 (0x0020) #define ONENAND_SKIP_UNLOCK_CHECK (0x0100) #define ONENAND_PAGEBUF_ALLOC (0x1000) #define ONENAND_OOBBUF_ALLOC (0x2000) #define ONENAND_SKIP_INITIAL_UNLOCKING (0x4000) #define ONENAND_IS_4KB_PAGE(this) \ (this->options & ONENAND_HAS_4KB_PAGE) /* * OneNAND Flash Manufacturer ID Codes */ #define ONENAND_MFR_SAMSUNG 0xec #define ONENAND_MFR_NUMONYX 0x20 /** * struct onenand_manufacturers - NAND Flash Manufacturer ID Structure * @name: Manufacturer name * @id: manufacturer ID code of device. */ struct onenand_manufacturers { int id; char *name; }; int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops); unsigned onenand_block(struct onenand_chip *this, loff_t addr); loff_t onenand_addr(struct onenand_chip *this, int block); int flexonenand_region(struct mtd_info *mtd, loff_t addr); struct mtd_partition; struct onenand_platform_data { void (*mmcontrol)(struct mtd_info *mtd, int sync_read); int (*read_bufferram)(struct mtd_info *mtd, int area, unsigned char *buffer, int offset, size_t count); struct mtd_partition *parts; unsigned int nr_parts; }; #endif /* __LINUX_MTD_ONENAND_H */ mtd/mtd.h 0000644 00000046310 14722070374 0006277 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> et al. */ #ifndef __MTD_MTD_H__ #define __MTD_MTD_H__ #include <linux/types.h> #include <linux/uio.h> #include <linux/notifier.h> #include <linux/device.h> #include <linux/of.h> #include <linux/nvmem-provider.h> #include <mtd/mtd-abi.h> #include <asm/div64.h> #define MTD_FAIL_ADDR_UNKNOWN -1LL struct mtd_info; /* * If the erase fails, fail_addr might indicate exactly which block failed. If * fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level * or was not specific to any particular block. */ struct erase_info { uint64_t addr; uint64_t len; uint64_t fail_addr; }; struct mtd_erase_region_info { uint64_t offset; /* At which this region starts, from the beginning of the MTD */ uint32_t erasesize; /* For this region */ uint32_t numblocks; /* Number of blocks of erasesize in this region */ unsigned long *lockmap; /* If keeping bitmap of locks */ }; /** * struct mtd_oob_ops - oob operation operands * @mode: operation mode * * @len: number of data bytes to write/read * * @retlen: number of data bytes written/read * * @ooblen: number of oob bytes to write/read * @oobretlen: number of oob bytes written/read * @ooboffs: offset of oob data in the oob area (only relevant when * mode = MTD_OPS_PLACE_OOB or MTD_OPS_RAW) * @datbuf: data buffer - if NULL only oob data are read/written * @oobbuf: oob data buffer * * Note, some MTD drivers do not allow you to write more than one OOB area at * one go. If you try to do that on such an MTD device, -EINVAL will be * returned. If you want to make your implementation portable on all kind of MTD * devices you should split the write request into several sub-requests when the * request crosses a page boundary. */ struct mtd_oob_ops { unsigned int mode; size_t len; size_t retlen; size_t ooblen; size_t oobretlen; uint32_t ooboffs; uint8_t *datbuf; uint8_t *oobbuf; }; #define MTD_MAX_OOBFREE_ENTRIES_LARGE 32 #define MTD_MAX_ECCPOS_ENTRIES_LARGE 640 /** * struct mtd_oob_region - oob region definition * @offset: region offset * @length: region length * * This structure describes a region of the OOB area, and is used * to retrieve ECC or free bytes sections. * Each section is defined by an offset within the OOB area and a * length. */ struct mtd_oob_region { u32 offset; u32 length; }; /* * struct mtd_ooblayout_ops - NAND OOB layout operations * @ecc: function returning an ECC region in the OOB area. * Should return -ERANGE if %section exceeds the total number of * ECC sections. * @free: function returning a free region in the OOB area. * Should return -ERANGE if %section exceeds the total number of * free sections. */ struct mtd_ooblayout_ops { int (*ecc)(struct mtd_info *mtd, int section, struct mtd_oob_region *oobecc); int (*free)(struct mtd_info *mtd, int section, struct mtd_oob_region *oobfree); }; /** * struct mtd_pairing_info - page pairing information * * @pair: pair id * @group: group id * * The term "pair" is used here, even though TLC NANDs might group pages by 3 * (3 bits in a single cell). A pair should regroup all pages that are sharing * the same cell. Pairs are then indexed in ascending order. * * @group is defining the position of a page in a given pair. It can also be * seen as the bit position in the cell: page attached to bit 0 belongs to * group 0, page attached to bit 1 belongs to group 1, etc. * * Example: * The H27UCG8T2BTR-BC datasheet describes the following pairing scheme: * * group-0 group-1 * * pair-0 page-0 page-4 * pair-1 page-1 page-5 * pair-2 page-2 page-8 * ... * pair-127 page-251 page-255 * * * Note that the "group" and "pair" terms were extracted from Samsung and * Hynix datasheets, and might be referenced under other names in other * datasheets (Micron is describing this concept as "shared pages"). */ struct mtd_pairing_info { int pair; int group; }; /** * struct mtd_pairing_scheme - page pairing scheme description * * @ngroups: number of groups. Should be related to the number of bits * per cell. * @get_info: converts a write-unit (page number within an erase block) into * mtd_pairing information (pair + group). This function should * fill the info parameter based on the wunit index or return * -EINVAL if the wunit parameter is invalid. * @get_wunit: converts pairing information into a write-unit (page) number. * This function should return the wunit index pointed by the * pairing information described in the info argument. It should * return -EINVAL, if there's no wunit corresponding to the * passed pairing information. * * See mtd_pairing_info documentation for a detailed explanation of the * pair and group concepts. * * The mtd_pairing_scheme structure provides a generic solution to represent * NAND page pairing scheme. Instead of exposing two big tables to do the * write-unit <-> (pair + group) conversions, we ask the MTD drivers to * implement the ->get_info() and ->get_wunit() functions. * * MTD users will then be able to query these information by using the * mtd_pairing_info_to_wunit() and mtd_wunit_to_pairing_info() helpers. * * @ngroups is here to help MTD users iterating over all the pages in a * given pair. This value can be retrieved by MTD users using the * mtd_pairing_groups() helper. * * Examples are given in the mtd_pairing_info_to_wunit() and * mtd_wunit_to_pairing_info() documentation. */ struct mtd_pairing_scheme { int ngroups; int (*get_info)(struct mtd_info *mtd, int wunit, struct mtd_pairing_info *info); int (*get_wunit)(struct mtd_info *mtd, const struct mtd_pairing_info *info); }; struct module; /* only needed for owner field in mtd_info */ /** * struct mtd_debug_info - debugging information for an MTD device. * * @dfs_dir: direntry object of the MTD device debugfs directory */ struct mtd_debug_info { struct dentry *dfs_dir; const char *partname; const char *partid; }; struct mtd_info { u_char type; uint32_t flags; uint32_t orig_flags; /* Flags as before running mtd checks */ uint64_t size; // Total size of the MTD /* "Major" erase size for the device. Naïve users may take this * to be the only erase size available, or may use the more detailed * information below if they desire */ uint32_t erasesize; /* Minimal writable flash unit size. In case of NOR flash it is 1 (even * though individual bits can be cleared), in case of NAND flash it is * one NAND page (or half, or one-fourths of it), in case of ECC-ed NOR * it is of ECC block size, etc. It is illegal to have writesize = 0. * Any driver registering a struct mtd_info must ensure a writesize of * 1 or larger. */ uint32_t writesize; /* * Size of the write buffer used by the MTD. MTD devices having a write * buffer can write multiple writesize chunks at a time. E.g. while * writing 4 * writesize bytes to a device with 2 * writesize bytes * buffer the MTD driver can (but doesn't have to) do 2 writesize * operations, but not 4. Currently, all NANDs have writebufsize * equivalent to writesize (NAND page size). Some NOR flashes do have * writebufsize greater than writesize. */ uint32_t writebufsize; uint32_t oobsize; // Amount of OOB data per block (e.g. 16) uint32_t oobavail; // Available OOB bytes per block /* * If erasesize is a power of 2 then the shift is stored in * erasesize_shift otherwise erasesize_shift is zero. Ditto writesize. */ unsigned int erasesize_shift; unsigned int writesize_shift; /* Masks based on erasesize_shift and writesize_shift */ unsigned int erasesize_mask; unsigned int writesize_mask; /* * read ops return -EUCLEAN if max number of bitflips corrected on any * one region comprising an ecc step equals or exceeds this value. * Settable by driver, else defaults to ecc_strength. User can override * in sysfs. N.B. The meaning of the -EUCLEAN return code has changed; * see Documentation/ABI/testing/sysfs-class-mtd for more detail. */ unsigned int bitflip_threshold; /* Kernel-only stuff starts here. */ const char *name; int index; /* OOB layout description */ const struct mtd_ooblayout_ops *ooblayout; /* NAND pairing scheme, only provided for MLC/TLC NANDs */ const struct mtd_pairing_scheme *pairing; /* the ecc step size. */ unsigned int ecc_step_size; /* max number of correctible bit errors per ecc step */ unsigned int ecc_strength; /* Data for variable erase regions. If numeraseregions is zero, * it means that the whole device has erasesize as given above. */ int numeraseregions; struct mtd_erase_region_info *eraseregions; /* * Do not call via these pointers, use corresponding mtd_*() * wrappers instead. */ int (*_erase) (struct mtd_info *mtd, struct erase_info *instr); int (*_point) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, void **virt, resource_size_t *phys); int (*_unpoint) (struct mtd_info *mtd, loff_t from, size_t len); int (*_read) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf); int (*_write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf); int (*_panic_write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf); int (*_read_oob) (struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops); int (*_write_oob) (struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops); int (*_get_fact_prot_info) (struct mtd_info *mtd, size_t len, size_t *retlen, struct otp_info *buf); int (*_read_fact_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf); int (*_get_user_prot_info) (struct mtd_info *mtd, size_t len, size_t *retlen, struct otp_info *buf); int (*_read_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf); int (*_write_user_prot_reg) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, u_char *buf); int (*_lock_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len); int (*_writev) (struct mtd_info *mtd, const struct kvec *vecs, unsigned long count, loff_t to, size_t *retlen); void (*_sync) (struct mtd_info *mtd); int (*_lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len); int (*_unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len); int (*_is_locked) (struct mtd_info *mtd, loff_t ofs, uint64_t len); int (*_block_isreserved) (struct mtd_info *mtd, loff_t ofs); int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs); int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs); int (*_max_bad_blocks) (struct mtd_info *mtd, loff_t ofs, size_t len); int (*_suspend) (struct mtd_info *mtd); void (*_resume) (struct mtd_info *mtd); void (*_reboot) (struct mtd_info *mtd); /* * If the driver is something smart, like UBI, it may need to maintain * its own reference counting. The below functions are only for driver. */ int (*_get_device) (struct mtd_info *mtd); void (*_put_device) (struct mtd_info *mtd); /* * flag indicates a panic write, low level drivers can take appropriate * action if required to ensure writes go through */ bool oops_panic_write; struct notifier_block reboot_notifier; /* default mode before reboot */ /* ECC status information */ struct mtd_ecc_stats ecc_stats; /* Subpage shift (NAND) */ int subpage_sft; void *priv; struct module *owner; struct device dev; int usecount; struct mtd_debug_info dbg; struct nvmem_device *nvmem; }; int mtd_ooblayout_ecc(struct mtd_info *mtd, int section, struct mtd_oob_region *oobecc); int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte, int *section, struct mtd_oob_region *oobregion); int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf, const u8 *oobbuf, int start, int nbytes); int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf, u8 *oobbuf, int start, int nbytes); int mtd_ooblayout_free(struct mtd_info *mtd, int section, struct mtd_oob_region *oobfree); int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf, const u8 *oobbuf, int start, int nbytes); int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf, u8 *oobbuf, int start, int nbytes); int mtd_ooblayout_count_freebytes(struct mtd_info *mtd); int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd); static inline void mtd_set_ooblayout(struct mtd_info *mtd, const struct mtd_ooblayout_ops *ooblayout) { mtd->ooblayout = ooblayout; } static inline void mtd_set_pairing_scheme(struct mtd_info *mtd, const struct mtd_pairing_scheme *pairing) { mtd->pairing = pairing; } static inline void mtd_set_of_node(struct mtd_info *mtd, struct device_node *np) { mtd->dev.of_node = np; if (!mtd->name) of_property_read_string(np, "label", &mtd->name); } static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd) { return dev_of_node(&mtd->dev); } static inline u32 mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops) { return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize; } static inline int mtd_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len) { if (!mtd->_max_bad_blocks) return -ENOTSUPP; if (mtd->size < (len + ofs) || ofs < 0) return -EINVAL; return mtd->_max_bad_blocks(mtd, ofs, len); } int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit, struct mtd_pairing_info *info); int mtd_pairing_info_to_wunit(struct mtd_info *mtd, const struct mtd_pairing_info *info); int mtd_pairing_groups(struct mtd_info *mtd); int mtd_erase(struct mtd_info *mtd, struct erase_info *instr); int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, void **virt, resource_size_t *phys); int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len); unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len, unsigned long offset, unsigned long flags); int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf); int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf); int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf); int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops); int mtd_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops); int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, struct otp_info *buf); int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf); int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, struct otp_info *buf); int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf); int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, u_char *buf); int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len); int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, unsigned long count, loff_t to, size_t *retlen); static inline void mtd_sync(struct mtd_info *mtd) { if (mtd->_sync) mtd->_sync(mtd); } int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len); int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs); int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs); int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs); static inline int mtd_suspend(struct mtd_info *mtd) { return mtd->_suspend ? mtd->_suspend(mtd) : 0; } static inline void mtd_resume(struct mtd_info *mtd) { if (mtd->_resume) mtd->_resume(mtd); } static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd) { if (mtd->erasesize_shift) return sz >> mtd->erasesize_shift; do_div(sz, mtd->erasesize); return sz; } static inline uint32_t mtd_mod_by_eb(uint64_t sz, struct mtd_info *mtd) { if (mtd->erasesize_shift) return sz & mtd->erasesize_mask; return do_div(sz, mtd->erasesize); } /** * mtd_align_erase_req - Adjust an erase request to align things on eraseblock * boundaries. * @mtd: the MTD device this erase request applies on * @req: the erase request to adjust * * This function will adjust @req->addr and @req->len to align them on * @mtd->erasesize. Of course we expect @mtd->erasesize to be != 0. */ static inline void mtd_align_erase_req(struct mtd_info *mtd, struct erase_info *req) { u32 mod; if (WARN_ON(!mtd->erasesize)) return; mod = mtd_mod_by_eb(req->addr, mtd); if (mod) { req->addr -= mod; req->len += mod; } mod = mtd_mod_by_eb(req->addr + req->len, mtd); if (mod) req->len += mtd->erasesize - mod; } static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd) { if (mtd->writesize_shift) return sz >> mtd->writesize_shift; do_div(sz, mtd->writesize); return sz; } static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd) { if (mtd->writesize_shift) return sz & mtd->writesize_mask; return do_div(sz, mtd->writesize); } static inline int mtd_wunit_per_eb(struct mtd_info *mtd) { return mtd->erasesize / mtd->writesize; } static inline int mtd_offset_to_wunit(struct mtd_info *mtd, loff_t offs) { return mtd_div_by_ws(mtd_mod_by_eb(offs, mtd), mtd); } static inline loff_t mtd_wunit_to_offset(struct mtd_info *mtd, loff_t base, int wunit) { return base + (wunit * mtd->writesize); } static inline int mtd_has_oob(const struct mtd_info *mtd) { return mtd->_read_oob && mtd->_write_oob; } static inline int mtd_type_is_nand(const struct mtd_info *mtd) { return mtd->type == MTD_NANDFLASH || mtd->type == MTD_MLCNANDFLASH; } static inline int mtd_can_have_bb(const struct mtd_info *mtd) { return !!mtd->_block_isbad; } /* Kernel-side ioctl definitions */ struct mtd_partition; struct mtd_part_parser_data; extern int mtd_device_parse_register(struct mtd_info *mtd, const char * const *part_probe_types, struct mtd_part_parser_data *parser_data, const struct mtd_partition *defparts, int defnr_parts); #define mtd_device_register(master, parts, nr_parts) \ mtd_device_parse_register(master, NULL, NULL, parts, nr_parts) extern int mtd_device_unregister(struct mtd_info *master); extern struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num); extern int __get_mtd_device(struct mtd_info *mtd); extern void __put_mtd_device(struct mtd_info *mtd); extern struct mtd_info *get_mtd_device_nm(const char *name); extern void put_mtd_device(struct mtd_info *mtd); struct mtd_notifier { void (*add)(struct mtd_info *mtd); void (*remove)(struct mtd_info *mtd); struct list_head list; }; extern void register_mtd_user (struct mtd_notifier *new); extern int unregister_mtd_user (struct mtd_notifier *old); void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size); static inline int mtd_is_bitflip(int err) { return err == -EUCLEAN; } static inline int mtd_is_eccerr(int err) { return err == -EBADMSG; } static inline int mtd_is_bitflip_or_eccerr(int err) { return mtd_is_bitflip(err) || mtd_is_eccerr(err); } unsigned mtd_mmap_capabilities(struct mtd_info *mtd); #endif /* __MTD_MTD_H__ */ mtd/ubi.h 0000644 00000023272 14722070374 0006274 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) International Business Machines Corp., 2006 * * Author: Artem Bityutskiy (Битюцкий Артём) */ #ifndef __LINUX_UBI_H__ #define __LINUX_UBI_H__ #include <linux/ioctl.h> #include <linux/types.h> #include <linux/scatterlist.h> #include <mtd/ubi-user.h> /* All voumes/LEBs */ #define UBI_ALL -1 /* * Maximum number of scatter gather list entries, * we use only 64 to have a lower memory foot print. */ #define UBI_MAX_SG_COUNT 64 /* * enum ubi_open_mode - UBI volume open mode constants. * * UBI_READONLY: read-only mode * UBI_READWRITE: read-write mode * UBI_EXCLUSIVE: exclusive mode * UBI_METAONLY: modify only the volume meta-data, * i.e. the data stored in the volume table, but not in any of volume LEBs. */ enum { UBI_READONLY = 1, UBI_READWRITE, UBI_EXCLUSIVE, UBI_METAONLY }; /** * struct ubi_volume_info - UBI volume description data structure. * @vol_id: volume ID * @ubi_num: UBI device number this volume belongs to * @size: how many physical eraseblocks are reserved for this volume * @used_bytes: how many bytes of data this volume contains * @used_ebs: how many physical eraseblocks of this volume actually contain any * data * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME) * @corrupted: non-zero if the volume is corrupted (static volumes only) * @upd_marker: non-zero if the volume has update marker set * @alignment: volume alignment * @usable_leb_size: how many bytes are available in logical eraseblocks of * this volume * @name_len: volume name length * @name: volume name * @cdev: UBI volume character device major and minor numbers * * The @corrupted flag is only relevant to static volumes and is always zero * for dynamic ones. This is because UBI does not care about dynamic volume * data protection and only cares about protecting static volume data. * * The @upd_marker flag is set if the volume update operation was interrupted. * Before touching the volume data during the update operation, UBI first sets * the update marker flag for this volume. If the volume update operation was * further interrupted, the update marker indicates this. If the update marker * is set, the contents of the volume is certainly damaged and a new volume * update operation has to be started. * * To put it differently, @corrupted and @upd_marker fields have different * semantics: * o the @corrupted flag means that this static volume is corrupted for some * reasons, but not because an interrupted volume update * o the @upd_marker field means that the volume is damaged because of an * interrupted update operation. * * I.e., the @corrupted flag is never set if the @upd_marker flag is set. * * The @used_bytes and @used_ebs fields are only really needed for static * volumes and contain the number of bytes stored in this static volume and how * many eraseblock this data occupies. In case of dynamic volumes, the * @used_bytes field is equivalent to @size*@usable_leb_size, and the @used_ebs * field is equivalent to @size. * * In general, logical eraseblock size is a property of the UBI device, not * of the UBI volume. Indeed, the logical eraseblock size depends on the * physical eraseblock size and on how much bytes UBI headers consume. But * because of the volume alignment (@alignment), the usable size of logical * eraseblocks if a volume may be less. The following equation is true: * @usable_leb_size = LEB size - (LEB size mod @alignment), * where LEB size is the logical eraseblock size defined by the UBI device. * * The alignment is multiple to the minimal flash input/output unit size or %1 * if all the available space is used. * * To put this differently, alignment may be considered is a way to change * volume logical eraseblock sizes. */ struct ubi_volume_info { int ubi_num; int vol_id; int size; long long used_bytes; int used_ebs; int vol_type; int corrupted; int upd_marker; int alignment; int usable_leb_size; int name_len; const char *name; dev_t cdev; }; /** * struct ubi_sgl - UBI scatter gather list data structure. * @list_pos: current position in @sg[] * @page_pos: current position in @sg[@list_pos] * @sg: the scatter gather list itself * * ubi_sgl is a wrapper around a scatter list which keeps track of the * current position in the list and the current list item such that * it can be used across multiple ubi_leb_read_sg() calls. */ struct ubi_sgl { int list_pos; int page_pos; struct scatterlist sg[UBI_MAX_SG_COUNT]; }; /** * ubi_sgl_init - initialize an UBI scatter gather list data structure. * @usgl: the UBI scatter gather struct itself * * Please note that you still have to use sg_init_table() or any adequate * function to initialize the unterlaying struct scatterlist. */ static inline void ubi_sgl_init(struct ubi_sgl *usgl) { usgl->list_pos = 0; usgl->page_pos = 0; } /** * struct ubi_device_info - UBI device description data structure. * @ubi_num: ubi device number * @leb_size: logical eraseblock size on this UBI device * @leb_start: starting offset of logical eraseblocks within physical * eraseblocks * @min_io_size: minimal I/O unit size * @max_write_size: maximum amount of bytes the underlying flash can write at a * time (MTD write buffer size) * @ro_mode: if this device is in read-only mode * @cdev: UBI character device major and minor numbers * * Note, @leb_size is the logical eraseblock size offered by the UBI device. * Volumes of this UBI device may have smaller logical eraseblock size if their * alignment is not equivalent to %1. * * The @max_write_size field describes flash write maximum write unit. For * example, NOR flash allows for changing individual bytes, so @min_io_size is * %1. However, it does not mean than NOR flash has to write data byte-by-byte. * Instead, CFI NOR flashes have a write-buffer of, e.g., 64 bytes, and when * writing large chunks of data, they write 64-bytes at a time. Obviously, this * improves write throughput. * * Also, the MTD device may have N interleaved (striped) flash chips * underneath, in which case @min_io_size can be physical min. I/O size of * single flash chip, while @max_write_size can be N * @min_io_size. * * The @max_write_size field is always greater or equivalent to @min_io_size. * E.g., some NOR flashes may have (@min_io_size = 1, @max_write_size = 64). In * contrast, NAND flashes usually have @min_io_size = @max_write_size = NAND * page size. */ struct ubi_device_info { int ubi_num; int leb_size; int leb_start; int min_io_size; int max_write_size; int ro_mode; dev_t cdev; }; /* * Volume notification types. * @UBI_VOLUME_ADDED: a volume has been added (an UBI device was attached or a * volume was created) * @UBI_VOLUME_REMOVED: a volume has been removed (an UBI device was detached * or a volume was removed) * @UBI_VOLUME_RESIZED: a volume has been re-sized * @UBI_VOLUME_RENAMED: a volume has been re-named * @UBI_VOLUME_UPDATED: data has been written to a volume * * These constants define which type of event has happened when a volume * notification function is invoked. */ enum { UBI_VOLUME_ADDED, UBI_VOLUME_REMOVED, UBI_VOLUME_RESIZED, UBI_VOLUME_RENAMED, UBI_VOLUME_UPDATED, }; /* * struct ubi_notification - UBI notification description structure. * @di: UBI device description object * @vi: UBI volume description object * * UBI notifiers are called with a pointer to an object of this type. The * object describes the notification. Namely, it provides a description of the * UBI device and UBI volume the notification informs about. */ struct ubi_notification { struct ubi_device_info di; struct ubi_volume_info vi; }; /* UBI descriptor given to users when they open UBI volumes */ struct ubi_volume_desc; int ubi_get_device_info(int ubi_num, struct ubi_device_info *di); void ubi_get_volume_info(struct ubi_volume_desc *desc, struct ubi_volume_info *vi); struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode); struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name, int mode); struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode); int ubi_register_volume_notifier(struct notifier_block *nb, int ignore_existing); int ubi_unregister_volume_notifier(struct notifier_block *nb); void ubi_close_volume(struct ubi_volume_desc *desc); int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset, int len, int check); int ubi_leb_read_sg(struct ubi_volume_desc *desc, int lnum, struct ubi_sgl *sgl, int offset, int len, int check); int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf, int offset, int len); int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf, int len); int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum); int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum); int ubi_leb_map(struct ubi_volume_desc *desc, int lnum); int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum); int ubi_sync(int ubi_num); int ubi_flush(int ubi_num, int vol_id, int lnum); /* * This function is the same as the 'ubi_leb_read()' function, but it does not * provide the checking capability. */ static inline int ubi_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset, int len) { return ubi_leb_read(desc, lnum, buf, offset, len, 0); } /* * This function is the same as the 'ubi_leb_read_sg()' function, but it does * not provide the checking capability. */ static inline int ubi_read_sg(struct ubi_volume_desc *desc, int lnum, struct ubi_sgl *sgl, int offset, int len) { return ubi_leb_read_sg(desc, lnum, sgl, offset, len, 0); } #endif /* !__LINUX_UBI_H__ */ mtd/cfi_endian.h 0000644 00000002752 14722070374 0007574 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright © 2001-2010 David Woodhouse <dwmw2@infradead.org> */ #include <asm/byteorder.h> #define CFI_HOST_ENDIAN 1 #define CFI_LITTLE_ENDIAN 2 #define CFI_BIG_ENDIAN 3 #if !defined(CONFIG_MTD_CFI_ADV_OPTIONS) || defined(CONFIG_MTD_CFI_NOSWAP) #define CFI_DEFAULT_ENDIAN CFI_HOST_ENDIAN #elif defined(CONFIG_MTD_CFI_LE_BYTE_SWAP) #define CFI_DEFAULT_ENDIAN CFI_LITTLE_ENDIAN #elif defined(CONFIG_MTD_CFI_BE_BYTE_SWAP) #define CFI_DEFAULT_ENDIAN CFI_BIG_ENDIAN #else #error No CFI endianness defined #endif #define cfi_default(s) ((s)?:CFI_DEFAULT_ENDIAN) #define cfi_be(s) (cfi_default(s) == CFI_BIG_ENDIAN) #define cfi_le(s) (cfi_default(s) == CFI_LITTLE_ENDIAN) #define cfi_host(s) (cfi_default(s) == CFI_HOST_ENDIAN) #define cpu_to_cfi8(map, x) (x) #define cfi8_to_cpu(map, x) (x) #define cpu_to_cfi16(map, x) _cpu_to_cfi(16, (map)->swap, (x)) #define cpu_to_cfi32(map, x) _cpu_to_cfi(32, (map)->swap, (x)) #define cpu_to_cfi64(map, x) _cpu_to_cfi(64, (map)->swap, (x)) #define cfi16_to_cpu(map, x) _cfi_to_cpu(16, (map)->swap, (x)) #define cfi32_to_cpu(map, x) _cfi_to_cpu(32, (map)->swap, (x)) #define cfi64_to_cpu(map, x) _cfi_to_cpu(64, (map)->swap, (x)) #define _cpu_to_cfi(w, s, x) (cfi_host(s)?(x):_swap_to_cfi(w, s, x)) #define _cfi_to_cpu(w, s, x) (cfi_host(s)?(x):_swap_to_cpu(w, s, x)) #define _swap_to_cfi(w, s, x) (cfi_be(s)?cpu_to_be##w(x):cpu_to_le##w(x)) #define _swap_to_cpu(w, s, x) (cfi_be(s)?be##w##_to_cpu(x):le##w##_to_cpu(x)) mtd/pismo.h 0000644 00000000417 14722070374 0006640 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * PISMO memory driver - http://www.pismoworld.org/ */ #ifndef __LINUX_MTD_PISMO_H #define __LINUX_MTD_PISMO_H struct pismo_pdata { void (*set_vpp)(void *, int); void *vpp_data; phys_addr_t cs_addrs[5]; }; #endif mtd/cfi.h 0000644 00000023643 14722070374 0006260 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> et al. */ #ifndef __MTD_CFI_H__ #define __MTD_CFI_H__ #include <linux/delay.h> #include <linux/types.h> #include <linux/bug.h> #include <linux/interrupt.h> #include <linux/mtd/flashchip.h> #include <linux/mtd/map.h> #include <linux/mtd/cfi_endian.h> #include <linux/mtd/xip.h> #ifdef CONFIG_MTD_CFI_I1 #define cfi_interleave(cfi) 1 #define cfi_interleave_is_1(cfi) (cfi_interleave(cfi) == 1) #else #define cfi_interleave_is_1(cfi) (0) #endif #ifdef CONFIG_MTD_CFI_I2 # ifdef cfi_interleave # undef cfi_interleave # define cfi_interleave(cfi) ((cfi)->interleave) # else # define cfi_interleave(cfi) 2 # endif #define cfi_interleave_is_2(cfi) (cfi_interleave(cfi) == 2) #else #define cfi_interleave_is_2(cfi) (0) #endif #ifdef CONFIG_MTD_CFI_I4 # ifdef cfi_interleave # undef cfi_interleave # define cfi_interleave(cfi) ((cfi)->interleave) # else # define cfi_interleave(cfi) 4 # endif #define cfi_interleave_is_4(cfi) (cfi_interleave(cfi) == 4) #else #define cfi_interleave_is_4(cfi) (0) #endif #ifdef CONFIG_MTD_CFI_I8 # ifdef cfi_interleave # undef cfi_interleave # define cfi_interleave(cfi) ((cfi)->interleave) # else # define cfi_interleave(cfi) 8 # endif #define cfi_interleave_is_8(cfi) (cfi_interleave(cfi) == 8) #else #define cfi_interleave_is_8(cfi) (0) #endif #ifndef cfi_interleave #warning No CONFIG_MTD_CFI_Ix selected. No NOR chip support can work. static inline int cfi_interleave(void *cfi) { BUG(); return 0; } #endif static inline int cfi_interleave_supported(int i) { switch (i) { #ifdef CONFIG_MTD_CFI_I1 case 1: #endif #ifdef CONFIG_MTD_CFI_I2 case 2: #endif #ifdef CONFIG_MTD_CFI_I4 case 4: #endif #ifdef CONFIG_MTD_CFI_I8 case 8: #endif return 1; default: return 0; } } /* NB: these values must represents the number of bytes needed to meet the * device type (x8, x16, x32). Eg. a 32 bit device is 4 x 8 bytes. * These numbers are used in calculations. */ #define CFI_DEVICETYPE_X8 (8 / 8) #define CFI_DEVICETYPE_X16 (16 / 8) #define CFI_DEVICETYPE_X32 (32 / 8) #define CFI_DEVICETYPE_X64 (64 / 8) /* Device Interface Code Assignments from the "Common Flash Memory Interface * Publication 100" dated December 1, 2001. */ #define CFI_INTERFACE_X8_ASYNC 0x0000 #define CFI_INTERFACE_X16_ASYNC 0x0001 #define CFI_INTERFACE_X8_BY_X16_ASYNC 0x0002 #define CFI_INTERFACE_X32_ASYNC 0x0003 #define CFI_INTERFACE_X16_BY_X32_ASYNC 0x0005 #define CFI_INTERFACE_NOT_ALLOWED 0xffff /* NB: We keep these structures in memory in HOST byteorder, except * where individually noted. */ /* Basic Query Structure */ struct cfi_ident { uint8_t qry[3]; uint16_t P_ID; uint16_t P_ADR; uint16_t A_ID; uint16_t A_ADR; uint8_t VccMin; uint8_t VccMax; uint8_t VppMin; uint8_t VppMax; uint8_t WordWriteTimeoutTyp; uint8_t BufWriteTimeoutTyp; uint8_t BlockEraseTimeoutTyp; uint8_t ChipEraseTimeoutTyp; uint8_t WordWriteTimeoutMax; uint8_t BufWriteTimeoutMax; uint8_t BlockEraseTimeoutMax; uint8_t ChipEraseTimeoutMax; uint8_t DevSize; uint16_t InterfaceDesc; uint16_t MaxBufWriteSize; uint8_t NumEraseRegions; uint32_t EraseRegionInfo[0]; /* Not host ordered */ } __packed; /* Extended Query Structure for both PRI and ALT */ struct cfi_extquery { uint8_t pri[3]; uint8_t MajorVersion; uint8_t MinorVersion; } __packed; /* Vendor-Specific PRI for Intel/Sharp Extended Command Set (0x0001) */ struct cfi_pri_intelext { uint8_t pri[3]; uint8_t MajorVersion; uint8_t MinorVersion; uint32_t FeatureSupport; /* if bit 31 is set then an additional uint32_t feature block follows - FIXME - not currently supported */ uint8_t SuspendCmdSupport; uint16_t BlkStatusRegMask; uint8_t VccOptimal; uint8_t VppOptimal; uint8_t NumProtectionFields; uint16_t ProtRegAddr; uint8_t FactProtRegSize; uint8_t UserProtRegSize; uint8_t extra[0]; } __packed; struct cfi_intelext_otpinfo { uint32_t ProtRegAddr; uint16_t FactGroups; uint8_t FactProtRegSize; uint16_t UserGroups; uint8_t UserProtRegSize; } __packed; struct cfi_intelext_blockinfo { uint16_t NumIdentBlocks; uint16_t BlockSize; uint16_t MinBlockEraseCycles; uint8_t BitsPerCell; uint8_t BlockCap; } __packed; struct cfi_intelext_regioninfo { uint16_t NumIdentPartitions; uint8_t NumOpAllowed; uint8_t NumOpAllowedSimProgMode; uint8_t NumOpAllowedSimEraMode; uint8_t NumBlockTypes; struct cfi_intelext_blockinfo BlockTypes[1]; } __packed; struct cfi_intelext_programming_regioninfo { uint8_t ProgRegShift; uint8_t Reserved1; uint8_t ControlValid; uint8_t Reserved2; uint8_t ControlInvalid; uint8_t Reserved3; } __packed; /* Vendor-Specific PRI for AMD/Fujitsu Extended Command Set (0x0002) */ struct cfi_pri_amdstd { uint8_t pri[3]; uint8_t MajorVersion; uint8_t MinorVersion; uint8_t SiliconRevision; /* bits 1-0: Address Sensitive Unlock */ uint8_t EraseSuspend; uint8_t BlkProt; uint8_t TmpBlkUnprotect; uint8_t BlkProtUnprot; uint8_t SimultaneousOps; uint8_t BurstMode; uint8_t PageMode; uint8_t VppMin; uint8_t VppMax; uint8_t TopBottom; /* Below field are added from version 1.5 */ uint8_t ProgramSuspend; uint8_t UnlockBypass; uint8_t SecureSiliconSector; uint8_t SoftwareFeatures; #define CFI_POLL_STATUS_REG BIT(0) #define CFI_POLL_DQ BIT(1) } __packed; /* Vendor-Specific PRI for Atmel chips (command set 0x0002) */ struct cfi_pri_atmel { uint8_t pri[3]; uint8_t MajorVersion; uint8_t MinorVersion; uint8_t Features; uint8_t BottomBoot; uint8_t BurstMode; uint8_t PageMode; } __packed; struct cfi_pri_query { uint8_t NumFields; uint32_t ProtField[1]; /* Not host ordered */ } __packed; struct cfi_bri_query { uint8_t PageModeReadCap; uint8_t NumFields; uint32_t ConfField[1]; /* Not host ordered */ } __packed; #define P_ID_NONE 0x0000 #define P_ID_INTEL_EXT 0x0001 #define P_ID_AMD_STD 0x0002 #define P_ID_INTEL_STD 0x0003 #define P_ID_AMD_EXT 0x0004 #define P_ID_WINBOND 0x0006 #define P_ID_ST_ADV 0x0020 #define P_ID_MITSUBISHI_STD 0x0100 #define P_ID_MITSUBISHI_EXT 0x0101 #define P_ID_SST_PAGE 0x0102 #define P_ID_SST_OLD 0x0701 #define P_ID_INTEL_PERFORMANCE 0x0200 #define P_ID_INTEL_DATA 0x0210 #define P_ID_RESERVED 0xffff #define CFI_MODE_CFI 1 #define CFI_MODE_JEDEC 0 struct cfi_private { uint16_t cmdset; void *cmdset_priv; int interleave; int device_type; int cfi_mode; /* Are we a JEDEC device pretending to be CFI? */ int addr_unlock1; int addr_unlock2; struct mtd_info *(*cmdset_setup)(struct map_info *); struct cfi_ident *cfiq; /* For now only one. We insist that all devs must be of the same type. */ int mfr, id; int numchips; map_word sector_erase_cmd; unsigned long chipshift; /* Because they're of the same type */ const char *im_name; /* inter_module name for cmdset_setup */ unsigned long quirks; struct flchip chips[0]; /* per-chip data structure for each chip */ }; uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs, struct map_info *map, struct cfi_private *cfi); map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi); #define CMD(x) cfi_build_cmd((x), map, cfi) unsigned long cfi_merge_status(map_word val, struct map_info *map, struct cfi_private *cfi); #define MERGESTATUS(x) cfi_merge_status((x), map, cfi) uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base, struct map_info *map, struct cfi_private *cfi, int type, map_word *prev_val); static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr) { map_word val = map_read(map, addr); if (map_bankwidth_is_1(map)) { return val.x[0]; } else if (map_bankwidth_is_2(map)) { return cfi16_to_cpu(map, val.x[0]); } else { /* No point in a 64-bit byteswap since that would just be swapping the responses from different chips, and we are only interested in one chip (a representative sample) */ return cfi32_to_cpu(map, val.x[0]); } } static inline uint16_t cfi_read_query16(struct map_info *map, uint32_t addr) { map_word val = map_read(map, addr); if (map_bankwidth_is_1(map)) { return val.x[0] & 0xff; } else if (map_bankwidth_is_2(map)) { return cfi16_to_cpu(map, val.x[0]); } else { /* No point in a 64-bit byteswap since that would just be swapping the responses from different chips, and we are only interested in one chip (a representative sample) */ return cfi32_to_cpu(map, val.x[0]); } } void cfi_udelay(int us); int __xipram cfi_qry_present(struct map_info *map, __u32 base, struct cfi_private *cfi); int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map, struct cfi_private *cfi); void __xipram cfi_qry_mode_off(uint32_t base, struct map_info *map, struct cfi_private *cfi); struct cfi_extquery *cfi_read_pri(struct map_info *map, uint16_t adr, uint16_t size, const char* name); struct cfi_fixup { uint16_t mfr; uint16_t id; void (*fixup)(struct mtd_info *mtd); }; #define CFI_MFR_ANY 0xFFFF #define CFI_ID_ANY 0xFFFF #define CFI_MFR_CONTINUATION 0x007F #define CFI_MFR_AMD 0x0001 #define CFI_MFR_AMIC 0x0037 #define CFI_MFR_ATMEL 0x001F #define CFI_MFR_EON 0x001C #define CFI_MFR_FUJITSU 0x0004 #define CFI_MFR_HYUNDAI 0x00AD #define CFI_MFR_INTEL 0x0089 #define CFI_MFR_MACRONIX 0x00C2 #define CFI_MFR_NEC 0x0010 #define CFI_MFR_PMC 0x009D #define CFI_MFR_SAMSUNG 0x00EC #define CFI_MFR_SHARP 0x00B0 #define CFI_MFR_SST 0x00BF #define CFI_MFR_ST 0x0020 /* STMicroelectronics */ #define CFI_MFR_MICRON 0x002C /* Micron */ #define CFI_MFR_TOSHIBA 0x0098 #define CFI_MFR_WINBOND 0x00DA void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups); typedef int (*varsize_frob_t)(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk); int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob, loff_t ofs, size_t len, void *thunk); #endif /* __MTD_CFI_H__ */ mtd/nand.h 0000644 00000052670 14722070374 0006441 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright 2017 - Free Electrons * * Authors: * Boris Brezillon <boris.brezillon@free-electrons.com> * Peter Pan <peterpandong@micron.com> */ #ifndef __LINUX_MTD_NAND_H #define __LINUX_MTD_NAND_H #include <linux/mtd/mtd.h> /** * struct nand_memory_organization - Memory organization structure * @bits_per_cell: number of bits per NAND cell * @pagesize: page size * @oobsize: OOB area size * @pages_per_eraseblock: number of pages per eraseblock * @eraseblocks_per_lun: number of eraseblocks per LUN (Logical Unit Number) * @max_bad_eraseblocks_per_lun: maximum number of eraseblocks per LUN * @planes_per_lun: number of planes per LUN * @luns_per_target: number of LUN per target (target is a synonym for die) * @ntargets: total number of targets exposed by the NAND device */ struct nand_memory_organization { unsigned int bits_per_cell; unsigned int pagesize; unsigned int oobsize; unsigned int pages_per_eraseblock; unsigned int eraseblocks_per_lun; unsigned int max_bad_eraseblocks_per_lun; unsigned int planes_per_lun; unsigned int luns_per_target; unsigned int ntargets; }; #define NAND_MEMORG(bpc, ps, os, ppe, epl, mbb, ppl, lpt, nt) \ { \ .bits_per_cell = (bpc), \ .pagesize = (ps), \ .oobsize = (os), \ .pages_per_eraseblock = (ppe), \ .eraseblocks_per_lun = (epl), \ .max_bad_eraseblocks_per_lun = (mbb), \ .planes_per_lun = (ppl), \ .luns_per_target = (lpt), \ .ntargets = (nt), \ } /** * struct nand_row_converter - Information needed to convert an absolute offset * into a row address * @lun_addr_shift: position of the LUN identifier in the row address * @eraseblock_addr_shift: position of the eraseblock identifier in the row * address */ struct nand_row_converter { unsigned int lun_addr_shift; unsigned int eraseblock_addr_shift; }; /** * struct nand_pos - NAND position object * @target: the NAND target/die * @lun: the LUN identifier * @plane: the plane within the LUN * @eraseblock: the eraseblock within the LUN * @page: the page within the LUN * * These information are usually used by specific sub-layers to select the * appropriate target/die and generate a row address to pass to the device. */ struct nand_pos { unsigned int target; unsigned int lun; unsigned int plane; unsigned int eraseblock; unsigned int page; }; /** * struct nand_page_io_req - NAND I/O request object * @pos: the position this I/O request is targeting * @dataoffs: the offset within the page * @datalen: number of data bytes to read from/write to this page * @databuf: buffer to store data in or get data from * @ooboffs: the OOB offset within the page * @ooblen: the number of OOB bytes to read from/write to this page * @oobbuf: buffer to store OOB data in or get OOB data from * @mode: one of the %MTD_OPS_XXX mode * * This object is used to pass per-page I/O requests to NAND sub-layers. This * way all useful information are already formatted in a useful way and * specific NAND layers can focus on translating these information into * specific commands/operations. */ struct nand_page_io_req { struct nand_pos pos; unsigned int dataoffs; unsigned int datalen; union { const void *out; void *in; } databuf; unsigned int ooboffs; unsigned int ooblen; union { const void *out; void *in; } oobbuf; int mode; }; /** * struct nand_ecc_req - NAND ECC requirements * @strength: ECC strength * @step_size: ECC step/block size */ struct nand_ecc_req { unsigned int strength; unsigned int step_size; }; #define NAND_ECCREQ(str, stp) { .strength = (str), .step_size = (stp) } /** * struct nand_bbt - bad block table object * @cache: in memory BBT cache */ struct nand_bbt { unsigned long *cache; }; struct nand_device; /** * struct nand_ops - NAND operations * @erase: erase a specific block. No need to check if the block is bad before * erasing, this has been taken care of by the generic NAND layer * @markbad: mark a specific block bad. No need to check if the block is * already marked bad, this has been taken care of by the generic * NAND layer. This method should just write the BBM (Bad Block * Marker) so that future call to struct_nand_ops->isbad() return * true * @isbad: check whether a block is bad or not. This method should just read * the BBM and return whether the block is bad or not based on what it * reads * * These are all low level operations that should be implemented by specialized * NAND layers (SPI NAND, raw NAND, ...). */ struct nand_ops { int (*erase)(struct nand_device *nand, const struct nand_pos *pos); int (*markbad)(struct nand_device *nand, const struct nand_pos *pos); bool (*isbad)(struct nand_device *nand, const struct nand_pos *pos); }; /** * struct nand_device - NAND device * @mtd: MTD instance attached to the NAND device * @memorg: memory layout * @eccreq: ECC requirements * @rowconv: position to row address converter * @bbt: bad block table info * @ops: NAND operations attached to the NAND device * * Generic NAND object. Specialized NAND layers (raw NAND, SPI NAND, OneNAND) * should declare their own NAND object embedding a nand_device struct (that's * how inheritance is done). * struct_nand_device->memorg and struct_nand_device->eccreq should be filled * at device detection time to reflect the NAND device * capabilities/requirements. Once this is done nanddev_init() can be called. * It will take care of converting NAND information into MTD ones, which means * the specialized NAND layers should never manually tweak * struct_nand_device->mtd except for the ->_read/write() hooks. */ struct nand_device { struct mtd_info mtd; struct nand_memory_organization memorg; struct nand_ecc_req eccreq; struct nand_row_converter rowconv; struct nand_bbt bbt; const struct nand_ops *ops; }; /** * struct nand_io_iter - NAND I/O iterator * @req: current I/O request * @oobbytes_per_page: maximum number of OOB bytes per page * @dataleft: remaining number of data bytes to read/write * @oobleft: remaining number of OOB bytes to read/write * * Can be used by specialized NAND layers to iterate over all pages covered * by an MTD I/O request, which should greatly simplifies the boiler-plate * code needed to read/write data from/to a NAND device. */ struct nand_io_iter { struct nand_page_io_req req; unsigned int oobbytes_per_page; unsigned int dataleft; unsigned int oobleft; }; /** * mtd_to_nanddev() - Get the NAND device attached to the MTD instance * @mtd: MTD instance * * Return: the NAND device embedding @mtd. */ static inline struct nand_device *mtd_to_nanddev(struct mtd_info *mtd) { return container_of(mtd, struct nand_device, mtd); } /** * nanddev_to_mtd() - Get the MTD device attached to a NAND device * @nand: NAND device * * Return: the MTD device embedded in @nand. */ static inline struct mtd_info *nanddev_to_mtd(struct nand_device *nand) { return &nand->mtd; } /* * nanddev_bits_per_cell() - Get the number of bits per cell * @nand: NAND device * * Return: the number of bits per cell. */ static inline unsigned int nanddev_bits_per_cell(const struct nand_device *nand) { return nand->memorg.bits_per_cell; } /** * nanddev_page_size() - Get NAND page size * @nand: NAND device * * Return: the page size. */ static inline size_t nanddev_page_size(const struct nand_device *nand) { return nand->memorg.pagesize; } /** * nanddev_per_page_oobsize() - Get NAND OOB size * @nand: NAND device * * Return: the OOB size. */ static inline unsigned int nanddev_per_page_oobsize(const struct nand_device *nand) { return nand->memorg.oobsize; } /** * nanddev_pages_per_eraseblock() - Get the number of pages per eraseblock * @nand: NAND device * * Return: the number of pages per eraseblock. */ static inline unsigned int nanddev_pages_per_eraseblock(const struct nand_device *nand) { return nand->memorg.pages_per_eraseblock; } /** * nanddev_pages_per_target() - Get the number of pages per target * @nand: NAND device * * Return: the number of pages per target. */ static inline unsigned int nanddev_pages_per_target(const struct nand_device *nand) { return nand->memorg.pages_per_eraseblock * nand->memorg.eraseblocks_per_lun * nand->memorg.luns_per_target; } /** * nanddev_per_page_oobsize() - Get NAND erase block size * @nand: NAND device * * Return: the eraseblock size. */ static inline size_t nanddev_eraseblock_size(const struct nand_device *nand) { return nand->memorg.pagesize * nand->memorg.pages_per_eraseblock; } /** * nanddev_eraseblocks_per_lun() - Get the number of eraseblocks per LUN * @nand: NAND device * * Return: the number of eraseblocks per LUN. */ static inline unsigned int nanddev_eraseblocks_per_lun(const struct nand_device *nand) { return nand->memorg.eraseblocks_per_lun; } /** * nanddev_eraseblocks_per_target() - Get the number of eraseblocks per target * @nand: NAND device * * Return: the number of eraseblocks per target. */ static inline unsigned int nanddev_eraseblocks_per_target(const struct nand_device *nand) { return nand->memorg.eraseblocks_per_lun * nand->memorg.luns_per_target; } /** * nanddev_target_size() - Get the total size provided by a single target/die * @nand: NAND device * * Return: the total size exposed by a single target/die in bytes. */ static inline u64 nanddev_target_size(const struct nand_device *nand) { return (u64)nand->memorg.luns_per_target * nand->memorg.eraseblocks_per_lun * nand->memorg.pages_per_eraseblock * nand->memorg.pagesize; } /** * nanddev_ntarget() - Get the total of targets * @nand: NAND device * * Return: the number of targets/dies exposed by @nand. */ static inline unsigned int nanddev_ntargets(const struct nand_device *nand) { return nand->memorg.ntargets; } /** * nanddev_neraseblocks() - Get the total number of eraseblocks * @nand: NAND device * * Return: the total number of eraseblocks exposed by @nand. */ static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand) { return nand->memorg.ntargets * nand->memorg.luns_per_target * nand->memorg.eraseblocks_per_lun; } /** * nanddev_size() - Get NAND size * @nand: NAND device * * Return: the total size (in bytes) exposed by @nand. */ static inline u64 nanddev_size(const struct nand_device *nand) { return nanddev_target_size(nand) * nanddev_ntargets(nand); } /** * nanddev_get_memorg() - Extract memory organization info from a NAND device * @nand: NAND device * * This can be used by the upper layer to fill the memorg info before calling * nanddev_init(). * * Return: the memorg object embedded in the NAND device. */ static inline struct nand_memory_organization * nanddev_get_memorg(struct nand_device *nand) { return &nand->memorg; } int nanddev_init(struct nand_device *nand, const struct nand_ops *ops, struct module *owner); void nanddev_cleanup(struct nand_device *nand); /** * nanddev_register() - Register a NAND device * @nand: NAND device * * Register a NAND device. * This function is just a wrapper around mtd_device_register() * registering the MTD device embedded in @nand. * * Return: 0 in case of success, a negative error code otherwise. */ static inline int nanddev_register(struct nand_device *nand) { return mtd_device_register(&nand->mtd, NULL, 0); } /** * nanddev_unregister() - Unregister a NAND device * @nand: NAND device * * Unregister a NAND device. * This function is just a wrapper around mtd_device_unregister() * unregistering the MTD device embedded in @nand. * * Return: 0 in case of success, a negative error code otherwise. */ static inline int nanddev_unregister(struct nand_device *nand) { return mtd_device_unregister(&nand->mtd); } /** * nanddev_set_of_node() - Attach a DT node to a NAND device * @nand: NAND device * @np: DT node * * Attach a DT node to a NAND device. */ static inline void nanddev_set_of_node(struct nand_device *nand, struct device_node *np) { mtd_set_of_node(&nand->mtd, np); } /** * nanddev_get_of_node() - Retrieve the DT node attached to a NAND device * @nand: NAND device * * Return: the DT node attached to @nand. */ static inline struct device_node *nanddev_get_of_node(struct nand_device *nand) { return mtd_get_of_node(&nand->mtd); } /** * nanddev_offs_to_pos() - Convert an absolute NAND offset into a NAND position * @nand: NAND device * @offs: absolute NAND offset (usually passed by the MTD layer) * @pos: a NAND position object to fill in * * Converts @offs into a nand_pos representation. * * Return: the offset within the NAND page pointed by @pos. */ static inline unsigned int nanddev_offs_to_pos(struct nand_device *nand, loff_t offs, struct nand_pos *pos) { unsigned int pageoffs; u64 tmp = offs; pageoffs = do_div(tmp, nand->memorg.pagesize); pos->page = do_div(tmp, nand->memorg.pages_per_eraseblock); pos->eraseblock = do_div(tmp, nand->memorg.eraseblocks_per_lun); pos->plane = pos->eraseblock % nand->memorg.planes_per_lun; pos->lun = do_div(tmp, nand->memorg.luns_per_target); pos->target = tmp; return pageoffs; } /** * nanddev_pos_cmp() - Compare two NAND positions * @a: First NAND position * @b: Second NAND position * * Compares two NAND positions. * * Return: -1 if @a < @b, 0 if @a == @b and 1 if @a > @b. */ static inline int nanddev_pos_cmp(const struct nand_pos *a, const struct nand_pos *b) { if (a->target != b->target) return a->target < b->target ? -1 : 1; if (a->lun != b->lun) return a->lun < b->lun ? -1 : 1; if (a->eraseblock != b->eraseblock) return a->eraseblock < b->eraseblock ? -1 : 1; if (a->page != b->page) return a->page < b->page ? -1 : 1; return 0; } /** * nanddev_pos_to_offs() - Convert a NAND position into an absolute offset * @nand: NAND device * @pos: the NAND position to convert * * Converts @pos NAND position into an absolute offset. * * Return: the absolute offset. Note that @pos points to the beginning of a * page, if one wants to point to a specific offset within this page * the returned offset has to be adjusted manually. */ static inline loff_t nanddev_pos_to_offs(struct nand_device *nand, const struct nand_pos *pos) { unsigned int npages; npages = pos->page + ((pos->eraseblock + (pos->lun + (pos->target * nand->memorg.luns_per_target)) * nand->memorg.eraseblocks_per_lun) * nand->memorg.pages_per_eraseblock); return (loff_t)npages * nand->memorg.pagesize; } /** * nanddev_pos_to_row() - Extract a row address from a NAND position * @nand: NAND device * @pos: the position to convert * * Converts a NAND position into a row address that can then be passed to the * device. * * Return: the row address extracted from @pos. */ static inline unsigned int nanddev_pos_to_row(struct nand_device *nand, const struct nand_pos *pos) { return (pos->lun << nand->rowconv.lun_addr_shift) | (pos->eraseblock << nand->rowconv.eraseblock_addr_shift) | pos->page; } /** * nanddev_pos_next_target() - Move a position to the next target/die * @nand: NAND device * @pos: the position to update * * Updates @pos to point to the start of the next target/die. Useful when you * want to iterate over all targets/dies of a NAND device. */ static inline void nanddev_pos_next_target(struct nand_device *nand, struct nand_pos *pos) { pos->page = 0; pos->plane = 0; pos->eraseblock = 0; pos->lun = 0; pos->target++; } /** * nanddev_pos_next_lun() - Move a position to the next LUN * @nand: NAND device * @pos: the position to update * * Updates @pos to point to the start of the next LUN. Useful when you want to * iterate over all LUNs of a NAND device. */ static inline void nanddev_pos_next_lun(struct nand_device *nand, struct nand_pos *pos) { if (pos->lun >= nand->memorg.luns_per_target - 1) return nanddev_pos_next_target(nand, pos); pos->lun++; pos->page = 0; pos->plane = 0; pos->eraseblock = 0; } /** * nanddev_pos_next_eraseblock() - Move a position to the next eraseblock * @nand: NAND device * @pos: the position to update * * Updates @pos to point to the start of the next eraseblock. Useful when you * want to iterate over all eraseblocks of a NAND device. */ static inline void nanddev_pos_next_eraseblock(struct nand_device *nand, struct nand_pos *pos) { if (pos->eraseblock >= nand->memorg.eraseblocks_per_lun - 1) return nanddev_pos_next_lun(nand, pos); pos->eraseblock++; pos->page = 0; pos->plane = pos->eraseblock % nand->memorg.planes_per_lun; } /** * nanddev_pos_next_page() - Move a position to the next page * @nand: NAND device * @pos: the position to update * * Updates @pos to point to the start of the next page. Useful when you want to * iterate over all pages of a NAND device. */ static inline void nanddev_pos_next_page(struct nand_device *nand, struct nand_pos *pos) { if (pos->page >= nand->memorg.pages_per_eraseblock - 1) return nanddev_pos_next_eraseblock(nand, pos); pos->page++; } /** * nand_io_iter_init - Initialize a NAND I/O iterator * @nand: NAND device * @offs: absolute offset * @req: MTD request * @iter: NAND I/O iterator * * Initializes a NAND iterator based on the information passed by the MTD * layer. */ static inline void nanddev_io_iter_init(struct nand_device *nand, loff_t offs, struct mtd_oob_ops *req, struct nand_io_iter *iter) { struct mtd_info *mtd = nanddev_to_mtd(nand); iter->req.mode = req->mode; iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos); iter->req.ooboffs = req->ooboffs; iter->oobbytes_per_page = mtd_oobavail(mtd, req); iter->dataleft = req->len; iter->oobleft = req->ooblen; iter->req.databuf.in = req->datbuf; iter->req.datalen = min_t(unsigned int, nand->memorg.pagesize - iter->req.dataoffs, iter->dataleft); iter->req.oobbuf.in = req->oobbuf; iter->req.ooblen = min_t(unsigned int, iter->oobbytes_per_page - iter->req.ooboffs, iter->oobleft); } /** * nand_io_iter_next_page - Move to the next page * @nand: NAND device * @iter: NAND I/O iterator * * Updates the @iter to point to the next page. */ static inline void nanddev_io_iter_next_page(struct nand_device *nand, struct nand_io_iter *iter) { nanddev_pos_next_page(nand, &iter->req.pos); iter->dataleft -= iter->req.datalen; iter->req.databuf.in += iter->req.datalen; iter->oobleft -= iter->req.ooblen; iter->req.oobbuf.in += iter->req.ooblen; iter->req.dataoffs = 0; iter->req.ooboffs = 0; iter->req.datalen = min_t(unsigned int, nand->memorg.pagesize, iter->dataleft); iter->req.ooblen = min_t(unsigned int, iter->oobbytes_per_page, iter->oobleft); } /** * nand_io_iter_end - Should end iteration or not * @nand: NAND device * @iter: NAND I/O iterator * * Check whether @iter has reached the end of the NAND portion it was asked to * iterate on or not. * * Return: true if @iter has reached the end of the iteration request, false * otherwise. */ static inline bool nanddev_io_iter_end(struct nand_device *nand, const struct nand_io_iter *iter) { if (iter->dataleft || iter->oobleft) return false; return true; } /** * nand_io_for_each_page - Iterate over all NAND pages contained in an MTD I/O * request * @nand: NAND device * @start: start address to read/write from * @req: MTD I/O request * @iter: NAND I/O iterator * * Should be used for iterate over pages that are contained in an MTD request. */ #define nanddev_io_for_each_page(nand, start, req, iter) \ for (nanddev_io_iter_init(nand, start, req, iter); \ !nanddev_io_iter_end(nand, iter); \ nanddev_io_iter_next_page(nand, iter)) bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos); bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos); int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos); int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos); /* BBT related functions */ enum nand_bbt_block_status { NAND_BBT_BLOCK_STATUS_UNKNOWN, NAND_BBT_BLOCK_GOOD, NAND_BBT_BLOCK_WORN, NAND_BBT_BLOCK_RESERVED, NAND_BBT_BLOCK_FACTORY_BAD, NAND_BBT_BLOCK_NUM_STATUS, }; int nanddev_bbt_init(struct nand_device *nand); void nanddev_bbt_cleanup(struct nand_device *nand); int nanddev_bbt_update(struct nand_device *nand); int nanddev_bbt_get_block_status(const struct nand_device *nand, unsigned int entry); int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry, enum nand_bbt_block_status status); int nanddev_bbt_markbad(struct nand_device *nand, unsigned int block); /** * nanddev_bbt_pos_to_entry() - Convert a NAND position into a BBT entry * @nand: NAND device * @pos: the NAND position we want to get BBT entry for * * Return the BBT entry used to store information about the eraseblock pointed * by @pos. * * Return: the BBT entry storing information about eraseblock pointed by @pos. */ static inline unsigned int nanddev_bbt_pos_to_entry(struct nand_device *nand, const struct nand_pos *pos) { return pos->eraseblock + ((pos->lun + (pos->target * nand->memorg.luns_per_target)) * nand->memorg.eraseblocks_per_lun); } /** * nanddev_bbt_is_initialized() - Check if the BBT has been initialized * @nand: NAND device * * Return: true if the BBT has been initialized, false otherwise. */ static inline bool nanddev_bbt_is_initialized(struct nand_device *nand) { return !!nand->bbt.cache; } /* MTD -> NAND helper functions. */ int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo); int nanddev_mtd_max_bad_blocks(struct mtd_info *mtd, loff_t offs, size_t len); #endif /* __LINUX_MTD_NAND_H */ mtd/inftl.h 0000644 00000003077 14722070374 0006632 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * inftl.h -- defines to support the Inverse NAND Flash Translation Layer * * (C) Copyright 2002, Greg Ungerer (gerg@snapgear.com) */ #ifndef __MTD_INFTL_H__ #define __MTD_INFTL_H__ #ifndef __KERNEL__ #error This is a kernel header. Perhaps include nftl-user.h instead? #endif #include <linux/mtd/blktrans.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nftl.h> #include <mtd/inftl-user.h> #ifndef INFTL_MAJOR #define INFTL_MAJOR 96 #endif #define INFTL_PARTN_BITS 4 #ifdef __KERNEL__ struct INFTLrecord { struct mtd_blktrans_dev mbd; __u16 MediaUnit; __u32 EraseSize; struct INFTLMediaHeader MediaHdr; int usecount; unsigned char heads; unsigned char sectors; unsigned short cylinders; __u16 numvunits; __u16 firstEUN; __u16 lastEUN; __u16 numfreeEUNs; __u16 LastFreeEUN; /* To speed up finding a free EUN */ int head,sect,cyl; __u16 *PUtable; /* Physical Unit Table */ __u16 *VUtable; /* Virtual Unit Table */ unsigned int nb_blocks; /* number of physical blocks */ unsigned int nb_boot_blocks; /* number of blocks used by the bios */ struct erase_info instr; }; int INFTL_mount(struct INFTLrecord *s); int INFTL_formatblock(struct INFTLrecord *s, int block); void INFTL_dumptables(struct INFTLrecord *s); void INFTL_dumpVUchains(struct INFTLrecord *s); int inftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len, size_t *retlen, uint8_t *buf); int inftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len, size_t *retlen, uint8_t *buf); #endif /* __KERNEL__ */ #endif /* __MTD_INFTL_H__ */ mtd/spear_smi.h 0000644 00000003262 14722070374 0007474 0 ustar 00 /* * Copyright © 2010 ST Microelectronics * Shiraz Hashim <shiraz.linux.kernel@gmail.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #ifndef __MTD_SPEAR_SMI_H #define __MTD_SPEAR_SMI_H #include <linux/types.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/platform_device.h> #include <linux/of.h> /* max possible slots for serial-nor flash chip in the SMI controller */ #define MAX_NUM_FLASH_CHIP 4 /* macro to define partitions for flash devices */ #define DEFINE_PARTS(n, of, s) \ { \ .name = n, \ .offset = of, \ .size = s, \ } /** * struct spear_smi_flash_info - platform structure for passing flash * information * * name: name of the serial nor flash for identification * mem_base: the memory base on which the flash is mapped * size: size of the flash in bytes * partitions: parition details * nr_partitions: number of partitions * fast_mode: whether flash supports fast mode */ struct spear_smi_flash_info { char *name; unsigned long mem_base; unsigned long size; struct mtd_partition *partitions; int nr_partitions; u8 fast_mode; }; /** * struct spear_smi_plat_data - platform structure for configuring smi * * clk_rate: clk rate at which SMI must operate * num_flashes: number of flashes present on board * board_flash_info: specific details of each flash present on board */ struct spear_smi_plat_data { unsigned long clk_rate; int num_flashes; struct spear_smi_flash_info *board_flash_info; struct device_node *np[MAX_NUM_FLASH_CHIP]; }; #endif /* __MTD_SPEAR_SMI_H */ mtd/latch-addr-flash.h 0000644 00000001314 14722070374 0010604 0 ustar 00 /* * Interface for NOR flash driver whose high address lines are latched * * Copyright © 2008 MontaVista Software, Inc. <source@mvista.com> * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #ifndef __LATCH_ADDR_FLASH__ #define __LATCH_ADDR_FLASH__ struct map_info; struct mtd_partition; struct latch_addr_flash_data { unsigned int width; unsigned int size; int (*init)(void *data, int cs); void (*done)(void *data); void (*set_window)(unsigned long offset, void *data); void *data; unsigned int nr_parts; struct mtd_partition *parts; }; #endif mtd/pfow.h 0000644 00000013074 14722070374 0006467 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* Primary function overlay window definitions * and service functions used by LPDDR chips */ #ifndef __LINUX_MTD_PFOW_H #define __LINUX_MTD_PFOW_H #include <linux/mtd/qinfo.h> /* PFOW registers addressing */ /* Address of symbol "P" */ #define PFOW_QUERY_STRING_P 0x0000 /* Address of symbol "F" */ #define PFOW_QUERY_STRING_F 0x0002 /* Address of symbol "O" */ #define PFOW_QUERY_STRING_O 0x0004 /* Address of symbol "W" */ #define PFOW_QUERY_STRING_W 0x0006 /* Identification info for LPDDR chip */ #define PFOW_MANUFACTURER_ID 0x0020 #define PFOW_DEVICE_ID 0x0022 /* Address in PFOW where prog buffer can can be found */ #define PFOW_PROGRAM_BUFFER_OFFSET 0x0040 /* Size of program buffer in words */ #define PFOW_PROGRAM_BUFFER_SIZE 0x0042 /* Address command code register */ #define PFOW_COMMAND_CODE 0x0080 /* command data register */ #define PFOW_COMMAND_DATA 0x0084 /* command address register lower address bits */ #define PFOW_COMMAND_ADDRESS_L 0x0088 /* command address register upper address bits */ #define PFOW_COMMAND_ADDRESS_H 0x008a /* number of bytes to be proggrammed lower address bits */ #define PFOW_DATA_COUNT_L 0x0090 /* number of bytes to be proggrammed higher address bits */ #define PFOW_DATA_COUNT_H 0x0092 /* command execution register, the only possible value is 0x01 */ #define PFOW_COMMAND_EXECUTE 0x00c0 /* 0x01 should be written at this address to clear buffer */ #define PFOW_CLEAR_PROGRAM_BUFFER 0x00c4 /* device program/erase suspend register */ #define PFOW_PROGRAM_ERASE_SUSPEND 0x00c8 /* device status register */ #define PFOW_DSR 0x00cc /* LPDDR memory device command codes */ /* They are possible values of PFOW command code register */ #define LPDDR_WORD_PROGRAM 0x0041 #define LPDDR_BUFF_PROGRAM 0x00E9 #define LPDDR_BLOCK_ERASE 0x0020 #define LPDDR_LOCK_BLOCK 0x0061 #define LPDDR_UNLOCK_BLOCK 0x0062 #define LPDDR_READ_BLOCK_LOCK_STATUS 0x0065 #define LPDDR_INFO_QUERY 0x0098 #define LPDDR_READ_OTP 0x0097 #define LPDDR_PROG_OTP 0x00C0 #define LPDDR_RESUME 0x00D0 /* Defines possible value of PFOW command execution register */ #define LPDDR_START_EXECUTION 0x0001 /* Defines possible value of PFOW program/erase suspend register */ #define LPDDR_SUSPEND 0x0001 /* Possible values of PFOW device status register */ /* access R - read; RC read & clearable */ #define DSR_DPS (1<<1) /* RC; device protect status * 0 - not protected 1 - locked */ #define DSR_PSS (1<<2) /* R; program suspend status; * 0-prog in progress/completed, * 1- prog suspended */ #define DSR_VPPS (1<<3) /* RC; 0-Vpp OK, * 1-Vpp low */ #define DSR_PROGRAM_STATUS (1<<4) /* RC; 0-successful, 1-error */ #define DSR_ERASE_STATUS (1<<5) /* RC; erase or blank check status; * 0-success erase/blank check, * 1 blank check error */ #define DSR_ESS (1<<6) /* R; erase suspend status; * 0-erase in progress/complete, * 1 erase suspended */ #define DSR_READY_STATUS (1<<7) /* R; Device status * 0-busy, * 1-ready */ #define DSR_RPS (0x3<<8) /* RC; region program status * 00 - Success, * 01-re-program attempt in region with * object mode data, * 10-object mode program w attempt in * region with control mode data * 11-attempt to program invalid half * with 0x41 command */ #define DSR_AOS (1<<12) /* RC; 1- AO related failure */ #define DSR_AVAILABLE (1<<15) /* R; Device availbility * 1 - Device available * 0 - not available */ /* The superset of all possible error bits in DSR */ #define DSR_ERR 0x133A static inline void send_pfow_command(struct map_info *map, unsigned long cmd_code, unsigned long adr, unsigned long len, map_word *datum) { int bits_per_chip = map_bankwidth(map) * 8; map_write(map, CMD(cmd_code), map->pfow_base + PFOW_COMMAND_CODE); map_write(map, CMD(adr & ((1<<bits_per_chip) - 1)), map->pfow_base + PFOW_COMMAND_ADDRESS_L); map_write(map, CMD(adr>>bits_per_chip), map->pfow_base + PFOW_COMMAND_ADDRESS_H); if (len) { map_write(map, CMD(len & ((1<<bits_per_chip) - 1)), map->pfow_base + PFOW_DATA_COUNT_L); map_write(map, CMD(len>>bits_per_chip), map->pfow_base + PFOW_DATA_COUNT_H); } if (datum) map_write(map, *datum, map->pfow_base + PFOW_COMMAND_DATA); /* Command execution start */ map_write(map, CMD(LPDDR_START_EXECUTION), map->pfow_base + PFOW_COMMAND_EXECUTE); } static inline void print_drs_error(unsigned dsr) { int prog_status = (dsr & DSR_RPS) >> 8; if (!(dsr & DSR_AVAILABLE)) printk(KERN_NOTICE"DSR.15: (0) Device not Available\n"); if ((prog_status & 0x03) == 0x03) printk(KERN_NOTICE"DSR.9,8: (11) Attempt to program invalid " "half with 41h command\n"); else if (prog_status & 0x02) printk(KERN_NOTICE"DSR.9,8: (10) Object Mode Program attempt " "in region with Control Mode data\n"); else if (prog_status & 0x01) printk(KERN_NOTICE"DSR.9,8: (01) Program attempt in region " "with Object Mode data\n"); if (!(dsr & DSR_READY_STATUS)) printk(KERN_NOTICE"DSR.7: (0) Device is Busy\n"); if (dsr & DSR_ESS) printk(KERN_NOTICE"DSR.6: (1) Erase Suspended\n"); if (dsr & DSR_ERASE_STATUS) printk(KERN_NOTICE"DSR.5: (1) Erase/Blank check error\n"); if (dsr & DSR_PROGRAM_STATUS) printk(KERN_NOTICE"DSR.4: (1) Program Error\n"); if (dsr & DSR_VPPS) printk(KERN_NOTICE"DSR.3: (1) Vpp low detect, operation " "aborted\n"); if (dsr & DSR_PSS) printk(KERN_NOTICE"DSR.2: (1) Program suspended\n"); if (dsr & DSR_DPS) printk(KERN_NOTICE"DSR.1: (1) Aborted Erase/Program attempt " "on locked block\n"); } #endif /* __LINUX_MTD_PFOW_H */ mtd/ndfc.h 0000644 00000004026 14722070374 0006423 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2006 Thomas Gleixner <tglx@linutronix.de> * * Info: * Contains defines, datastructures for ndfc nand controller */ #ifndef __LINUX_MTD_NDFC_H #define __LINUX_MTD_NDFC_H /* NDFC Register definitions */ #define NDFC_CMD 0x00 #define NDFC_ALE 0x04 #define NDFC_DATA 0x08 #define NDFC_ECC 0x10 #define NDFC_BCFG0 0x30 #define NDFC_BCFG1 0x34 #define NDFC_BCFG2 0x38 #define NDFC_BCFG3 0x3c #define NDFC_CCR 0x40 #define NDFC_STAT 0x44 #define NDFC_HWCTL 0x48 #define NDFC_REVID 0x50 #define NDFC_STAT_IS_READY 0x01000000 #define NDFC_CCR_RESET_CE 0x80000000 /* CE Reset */ #define NDFC_CCR_RESET_ECC 0x40000000 /* ECC Reset */ #define NDFC_CCR_RIE 0x20000000 /* Interrupt Enable on Device Rdy */ #define NDFC_CCR_REN 0x10000000 /* Enable wait for Rdy in LinearR */ #define NDFC_CCR_ROMEN 0x08000000 /* Enable ROM In LinearR */ #define NDFC_CCR_ARE 0x04000000 /* Auto-Read Enable */ #define NDFC_CCR_BS(x) (((x) & 0x3) << 24) /* Select Bank on CE[x] */ #define NDFC_CCR_BS_MASK 0x03000000 /* Select Bank */ #define NDFC_CCR_ARAC0 0x00000000 /* 3 Addr, 1 Col 2 Row 512b page */ #define NDFC_CCR_ARAC1 0x00001000 /* 4 Addr, 1 Col 3 Row 512b page */ #define NDFC_CCR_ARAC2 0x00002000 /* 4 Addr, 2 Col 2 Row 2K page */ #define NDFC_CCR_ARAC3 0x00003000 /* 5 Addr, 2 Col 3 Row 2K page */ #define NDFC_CCR_ARAC_MASK 0x00003000 /* Auto-Read mode Addr Cycles */ #define NDFC_CCR_RPG 0x0000C000 /* Auto-Read Page */ #define NDFC_CCR_EBCC 0x00000004 /* EBC Configuration Completed */ #define NDFC_CCR_DHC 0x00000002 /* Direct Hardware Control Enable */ #define NDFC_BxCFG_EN 0x80000000 /* Bank Enable */ #define NDFC_BxCFG_CED 0x40000000 /* nCE Style */ #define NDFC_BxCFG_SZ_MASK 0x08000000 /* Bank Size */ #define NDFC_BxCFG_SZ_8BIT 0x00000000 /* 8bit */ #define NDFC_BxCFG_SZ_16BIT 0x08000000 /* 16bit */ #define NDFC_MAX_BANKS 4 struct ndfc_controller_settings { uint32_t ccr_settings; uint64_t ndfc_erpn; }; struct ndfc_chip_settings { uint32_t bank_settings; }; #endif mtd/sharpsl.h 0000644 00000001005 14722070374 0007157 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * SharpSL NAND support * * Copyright (C) 2008 Dmitry Baryshkov */ #ifndef _MTD_SHARPSL_H #define _MTD_SHARPSL_H #include <linux/mtd/rawnand.h> #include <linux/mtd/nand_ecc.h> #include <linux/mtd/partitions.h> struct sharpsl_nand_platform_data { struct nand_bbt_descr *badblock_pattern; const struct mtd_ooblayout_ops *ecc_layout; struct mtd_partition *partitions; unsigned int nr_partitions; const char *const *part_parsers; }; #endif /* _MTD_SHARPSL_H */ mtd/gen_probe.h 0000644 00000001147 14722070374 0007452 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright © 2001 Red Hat UK Limited * Copyright © 2001-2010 David Woodhouse <dwmw2@infradead.org> */ #ifndef __LINUX_MTD_GEN_PROBE_H__ #define __LINUX_MTD_GEN_PROBE_H__ #include <linux/mtd/flashchip.h> #include <linux/mtd/map.h> #include <linux/mtd/cfi.h> #include <linux/bitops.h> struct chip_probe { char *name; int (*probe_chip)(struct map_info *map, __u32 base, unsigned long *chip_map, struct cfi_private *cfi); }; struct mtd_info *mtd_do_chip_probe(struct map_info *map, struct chip_probe *cp); #endif /* __LINUX_MTD_GEN_PROBE_H__ */ mtd/nand_ecc.h 0000644 00000002027 14722070374 0007242 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2000-2010 Steven J. Hill <sjhill@realitydiluted.com> * David Woodhouse <dwmw2@infradead.org> * Thomas Gleixner <tglx@linutronix.de> * * This file is the header for the ECC algorithm. */ #ifndef __MTD_NAND_ECC_H__ #define __MTD_NAND_ECC_H__ struct nand_chip; /* * Calculate 3 byte ECC code for eccsize byte block */ void __nand_calculate_ecc(const u_char *dat, unsigned int eccsize, u_char *ecc_code, bool sm_order); /* * Calculate 3 byte ECC code for 256/512 byte block */ int nand_calculate_ecc(struct nand_chip *chip, const u_char *dat, u_char *ecc_code); /* * Detect and correct a 1 bit error for eccsize byte block */ int __nand_correct_data(u_char *dat, u_char *read_ecc, u_char *calc_ecc, unsigned int eccsize, bool sm_order); /* * Detect and correct a 1 bit error for 256/512 byte block */ int nand_correct_data(struct nand_chip *chip, u_char *dat, u_char *read_ecc, u_char *calc_ecc); #endif /* __MTD_NAND_ECC_H__ */ mtd/blktrans.h 0000644 00000004204 14722070374 0007327 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright © 2003-2010 David Woodhouse <dwmw2@infradead.org> */ #ifndef __MTD_TRANS_H__ #define __MTD_TRANS_H__ #include <linux/mutex.h> #include <linux/kref.h> #include <linux/sysfs.h> struct hd_geometry; struct mtd_info; struct mtd_blktrans_ops; struct file; struct inode; struct mtd_blktrans_dev { struct mtd_blktrans_ops *tr; struct list_head list; struct mtd_info *mtd; struct mutex lock; int devnum; bool bg_stop; unsigned long size; int readonly; int open; struct kref ref; struct gendisk *disk; struct attribute_group *disk_attributes; struct request_queue *rq; struct list_head rq_list; struct blk_mq_tag_set *tag_set; spinlock_t queue_lock; void *priv; fmode_t file_mode; }; struct mtd_blktrans_ops { char *name; int major; int part_bits; int blksize; int blkshift; /* Access functions */ int (*readsect)(struct mtd_blktrans_dev *dev, unsigned long block, char *buffer); int (*writesect)(struct mtd_blktrans_dev *dev, unsigned long block, char *buffer); int (*discard)(struct mtd_blktrans_dev *dev, unsigned long block, unsigned nr_blocks); void (*background)(struct mtd_blktrans_dev *dev); /* Block layer ioctls */ int (*getgeo)(struct mtd_blktrans_dev *dev, struct hd_geometry *geo); int (*flush)(struct mtd_blktrans_dev *dev); /* Called with mtd_table_mutex held; no race with add/remove */ int (*open)(struct mtd_blktrans_dev *dev); void (*release)(struct mtd_blktrans_dev *dev); /* Called on {de,}registration and on subsequent addition/removal of devices, with mtd_table_mutex held. */ void (*add_mtd)(struct mtd_blktrans_ops *tr, struct mtd_info *mtd); void (*remove_dev)(struct mtd_blktrans_dev *dev); struct list_head devs; struct list_head list; struct module *owner; }; extern int register_mtd_blktrans(struct mtd_blktrans_ops *tr); extern int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr); extern int add_mtd_blktrans_dev(struct mtd_blktrans_dev *dev); extern int del_mtd_blktrans_dev(struct mtd_blktrans_dev *dev); extern int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev); #endif /* __MTD_TRANS_H__ */ mtd/platnand.h 0000644 00000004761 14722070374 0007320 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> * Steven J. Hill <sjhill@realitydiluted.com> * Thomas Gleixner <tglx@linutronix.de> * * Contains all platform NAND related definitions. */ #ifndef __LINUX_MTD_PLATNAND_H #define __LINUX_MTD_PLATNAND_H #include <linux/mtd/partitions.h> #include <linux/mtd/rawnand.h> #include <linux/platform_device.h> /** * struct platform_nand_chip - chip level device structure * @nr_chips: max. number of chips to scan for * @chip_offset: chip number offset * @nr_partitions: number of partitions pointed to by partitions (or zero) * @partitions: mtd partition list * @chip_delay: R/B delay value in us * @options: Option flags, e.g. 16bit buswidth * @bbt_options: BBT option flags, e.g. NAND_BBT_USE_FLASH * @part_probe_types: NULL-terminated array of probe types */ struct platform_nand_chip { int nr_chips; int chip_offset; int nr_partitions; struct mtd_partition *partitions; int chip_delay; unsigned int options; unsigned int bbt_options; const char **part_probe_types; }; /** * struct platform_nand_ctrl - controller level device structure * @probe: platform specific function to probe/setup hardware * @remove: platform specific function to remove/teardown hardware * @dev_ready: platform specific function to read ready/busy pin * @select_chip: platform specific chip select function * @cmd_ctrl: platform specific function for controlling * ALE/CLE/nCE. Also used to write command and address * @write_buf: platform specific function for write buffer * @read_buf: platform specific function for read buffer * @priv: private data to transport driver specific settings * * All fields are optional and depend on the hardware driver requirements */ struct platform_nand_ctrl { int (*probe)(struct platform_device *pdev); void (*remove)(struct platform_device *pdev); int (*dev_ready)(struct nand_chip *chip); void (*select_chip)(struct nand_chip *chip, int cs); void (*cmd_ctrl)(struct nand_chip *chip, int dat, unsigned int ctrl); void (*write_buf)(struct nand_chip *chip, const uint8_t *buf, int len); void (*read_buf)(struct nand_chip *chip, uint8_t *buf, int len); void *priv; }; /** * struct platform_nand_data - container structure for platform-specific data * @chip: chip level chip structure * @ctrl: controller level device structure */ struct platform_nand_data { struct platform_nand_chip chip; struct platform_nand_ctrl ctrl; }; #endif /* __LINUX_MTD_PLATNAND_H */ mtd/flashchip.h 0000644 00000004655 14722070374 0007462 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright © 2000 Red Hat UK Limited * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> */ #ifndef __MTD_FLASHCHIP_H__ #define __MTD_FLASHCHIP_H__ /* For spinlocks. sched.h includes spinlock.h from whichever directory it * happens to be in - so we don't have to care whether we're on 2.2, which * has asm/spinlock.h, or 2.4, which has linux/spinlock.h */ #include <linux/sched.h> #include <linux/mutex.h> typedef enum { FL_READY, FL_STATUS, FL_CFI_QUERY, FL_JEDEC_QUERY, FL_ERASING, FL_ERASE_SUSPENDING, FL_ERASE_SUSPENDED, FL_WRITING, FL_WRITING_TO_BUFFER, FL_OTP_WRITE, FL_WRITE_SUSPENDING, FL_WRITE_SUSPENDED, FL_PM_SUSPENDED, FL_SYNCING, FL_UNLOADING, FL_LOCKING, FL_UNLOCKING, FL_POINT, FL_XIP_WHILE_ERASING, FL_XIP_WHILE_WRITING, FL_SHUTDOWN, /* These 2 come from nand_state_t, which has been unified here */ FL_READING, FL_CACHEDPRG, /* These 4 come from onenand_state_t, which has been unified here */ FL_RESETING, FL_OTPING, FL_PREPARING_ERASE, FL_VERIFYING_ERASE, FL_UNKNOWN } flstate_t; /* NOTE: confusingly, this can be used to refer to more than one chip at a time, if they're interleaved. This can even refer to individual partitions on the same physical chip when present. */ struct flchip { unsigned long start; /* Offset within the map */ // unsigned long len; /* We omit len for now, because when we group them together we insist that they're all of the same size, and the chip size is held in the next level up. If we get more versatile later, it'll make it a damn sight harder to find which chip we want from a given offset, and we'll want to add the per-chip length field back in. */ int ref_point_counter; flstate_t state; flstate_t oldstate; unsigned int write_suspended:1; unsigned int erase_suspended:1; unsigned long in_progress_block_addr; unsigned long in_progress_block_mask; struct mutex mutex; wait_queue_head_t wq; /* Wait on here when we're waiting for the chip to be ready */ int word_write_time; int buffer_write_time; int erase_time; int word_write_time_max; int buffer_write_time_max; int erase_time_max; void *priv; }; /* This is used to handle contention on write/erase operations between partitions of the same physical chip. */ struct flchip_shared { struct mutex lock; struct flchip *writing; struct flchip *erasing; }; #endif /* __MTD_FLASHCHIP_H__ */ mtd/partitions.h 0000644 00000007452 14722070374 0007713 0 ustar 00 /* * MTD partitioning layer definitions * * (C) 2000 Nicolas Pitre <nico@fluxnic.net> * * This code is GPL */ #ifndef MTD_PARTITIONS_H #define MTD_PARTITIONS_H #include <linux/types.h> /* * Partition definition structure: * * An array of struct partition is passed along with a MTD object to * mtd_device_register() to create them. * * For each partition, these fields are available: * name: string that will be used to label the partition's MTD device. * types: some partitions can be containers using specific format to describe * embedded subpartitions / volumes. E.g. many home routers use "firmware" * partition that contains at least kernel and rootfs. In such case an * extra parser is needed that will detect these dynamic partitions and * report them to the MTD subsystem. If set this property stores an array * of parser names to use when looking for subpartitions. * size: the partition size; if defined as MTDPART_SIZ_FULL, the partition * will extend to the end of the master MTD device. * offset: absolute starting position within the master MTD device; if * defined as MTDPART_OFS_APPEND, the partition will start where the * previous one ended; if MTDPART_OFS_NXTBLK, at the next erase block; * if MTDPART_OFS_RETAIN, consume as much as possible, leaving size * after the end of partition. * mask_flags: contains flags that have to be masked (removed) from the * master MTD flag set for the corresponding MTD partition. * For example, to force a read-only partition, simply adding * MTD_WRITEABLE to the mask_flags will do the trick. * * Note: writeable partitions require their size and offset be * erasesize aligned (e.g. use MTDPART_OFS_NEXTBLK). */ struct mtd_partition { const char *name; /* identifier string */ const char *const *types; /* names of parsers to use if any */ uint64_t size; /* partition size */ uint64_t offset; /* offset within the master MTD space */ uint32_t mask_flags; /* master MTD flags to mask out for this partition */ struct device_node *of_node; }; #define MTDPART_OFS_RETAIN (-3) #define MTDPART_OFS_NXTBLK (-2) #define MTDPART_OFS_APPEND (-1) #define MTDPART_SIZ_FULL (0) struct mtd_info; struct device_node; /** * struct mtd_part_parser_data - used to pass data to MTD partition parsers. * @origin: for RedBoot, start address of MTD device */ struct mtd_part_parser_data { unsigned long origin; }; /* * Functions dealing with the various ways of partitioning the space */ struct mtd_part_parser { struct list_head list; struct module *owner; const char *name; const struct of_device_id *of_match_table; int (*parse_fn)(struct mtd_info *, const struct mtd_partition **, struct mtd_part_parser_data *); void (*cleanup)(const struct mtd_partition *pparts, int nr_parts); }; /* Container for passing around a set of parsed partitions */ struct mtd_partitions { const struct mtd_partition *parts; int nr_parts; const struct mtd_part_parser *parser; }; extern int __register_mtd_parser(struct mtd_part_parser *parser, struct module *owner); #define register_mtd_parser(parser) __register_mtd_parser(parser, THIS_MODULE) extern void deregister_mtd_parser(struct mtd_part_parser *parser); /* * module_mtd_part_parser() - Helper macro for MTD partition parsers that don't * do anything special in module init/exit. Each driver may only use this macro * once, and calling it replaces module_init() and module_exit(). */ #define module_mtd_part_parser(__mtd_part_parser) \ module_driver(__mtd_part_parser, register_mtd_parser, \ deregister_mtd_parser) int mtd_is_partition(const struct mtd_info *mtd); int mtd_add_partition(struct mtd_info *master, const char *name, long long offset, long long length); int mtd_del_partition(struct mtd_info *master, int partno); uint64_t mtd_get_device_size(const struct mtd_info *mtd); #endif mtd/xip.h 0000644 00000005021 14722070374 0006305 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * MTD primitives for XIP support * * Author: Nicolas Pitre * Created: Nov 2, 2004 * Copyright: (C) 2004 MontaVista Software, Inc. * * This XIP support for MTD has been loosely inspired * by an earlier patch authored by David Woodhouse. */ #ifndef __LINUX_MTD_XIP_H__ #define __LINUX_MTD_XIP_H__ #ifdef CONFIG_MTD_XIP /* * We really don't want gcc to guess anything. * We absolutely _need_ proper inlining. */ #include <linux/compiler.h> /* * Function that are modifying the flash state away from array mode must * obviously not be running from flash. The __xipram is therefore marking * those functions so they get relocated to ram. */ #ifdef CONFIG_XIP_KERNEL #define __xipram noinline __attribute__ ((__section__ (".xiptext"))) #endif /* * Each architecture has to provide the following macros. They must access * the hardware directly and not rely on any other (XIP) functions since they * won't be available when used (flash not in array mode). * * xip_irqpending() * * return non zero when any hardware interrupt is pending. * * xip_currtime() * * return a platform specific time reference to be used with * xip_elapsed_since(). * * xip_elapsed_since(x) * * return in usecs the elapsed timebetween now and the reference x as * returned by xip_currtime(). * * note 1: conversion to usec can be approximated, as long as the * returned value is <= the real elapsed time. * note 2: this should be able to cope with a few seconds without * overflowing. * * xip_iprefetch() * * Macro to fill instruction prefetch * e.g. a series of nops: asm volatile (".rep 8; nop; .endr"); */ #include <asm/mtd-xip.h> #ifndef xip_irqpending #warning "missing IRQ and timer primitives for XIP MTD support" #warning "some of the XIP MTD support code will be disabled" #warning "your system will therefore be unresponsive when writing or erasing flash" #define xip_irqpending() (0) #define xip_currtime() (0) #define xip_elapsed_since(x) (0) #endif #ifndef xip_iprefetch #define xip_iprefetch() do { } while (0) #endif /* * xip_cpu_idle() is used when waiting for a delay equal or larger than * the system timer tick period. This should put the CPU into idle mode * to save power and to be woken up only when some interrupts are pending. * This should not rely upon standard kernel code. */ #ifndef xip_cpu_idle #define xip_cpu_idle() do { } while (0) #endif #endif /* CONFIG_MTD_XIP */ #ifndef __xipram #define __xipram #endif #endif /* __LINUX_MTD_XIP_H__ */ mtd/mtdram.h 0000644 00000000401 14722070374 0006766 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __MTD_MTDRAM_H__ #define __MTD_MTDRAM_H__ #include <linux/mtd/mtd.h> int mtdram_init_device(struct mtd_info *mtd, void *mapped_address, unsigned long size, const char *name); #endif /* __MTD_MTDRAM_H__ */ mtd/sh_flctl.h 0000644 00000013447 14722070374 0007316 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 * * SuperH FLCTL nand controller * * Copyright © 2008 Renesas Solutions Corp. */ #ifndef __SH_FLCTL_H__ #define __SH_FLCTL_H__ #include <linux/completion.h> #include <linux/mtd/mtd.h> #include <linux/mtd/rawnand.h> #include <linux/mtd/partitions.h> #include <linux/pm_qos.h> /* FLCTL registers */ #define FLCMNCR(f) (f->reg + 0x0) #define FLCMDCR(f) (f->reg + 0x4) #define FLCMCDR(f) (f->reg + 0x8) #define FLADR(f) (f->reg + 0xC) #define FLADR2(f) (f->reg + 0x3C) #define FLDATAR(f) (f->reg + 0x10) #define FLDTCNTR(f) (f->reg + 0x14) #define FLINTDMACR(f) (f->reg + 0x18) #define FLBSYTMR(f) (f->reg + 0x1C) #define FLBSYCNT(f) (f->reg + 0x20) #define FLDTFIFO(f) (f->reg + 0x24) #define FLECFIFO(f) (f->reg + 0x28) #define FLTRCR(f) (f->reg + 0x2C) #define FLHOLDCR(f) (f->reg + 0x38) #define FL4ECCRESULT0(f) (f->reg + 0x80) #define FL4ECCRESULT1(f) (f->reg + 0x84) #define FL4ECCRESULT2(f) (f->reg + 0x88) #define FL4ECCRESULT3(f) (f->reg + 0x8C) #define FL4ECCCR(f) (f->reg + 0x90) #define FL4ECCCNT(f) (f->reg + 0x94) #define FLERRADR(f) (f->reg + 0x98) /* FLCMNCR control bits */ #define _4ECCCNTEN (0x1 << 24) #define _4ECCEN (0x1 << 23) #define _4ECCCORRECT (0x1 << 22) #define SHBUSSEL (0x1 << 20) #define SEL_16BIT (0x1 << 19) #define SNAND_E (0x1 << 18) /* SNAND (0=512 1=2048)*/ #define QTSEL_E (0x1 << 17) #define ENDIAN (0x1 << 16) /* 1 = little endian */ #define FCKSEL_E (0x1 << 15) #define ACM_SACCES_MODE (0x01 << 10) #define NANWF_E (0x1 << 9) #define SE_D (0x1 << 8) /* Spare area disable */ #define CE1_ENABLE (0x1 << 4) /* Chip Enable 1 */ #define CE0_ENABLE (0x1 << 3) /* Chip Enable 0 */ #define TYPESEL_SET (0x1 << 0) /* * Clock settings using the PULSEx registers from FLCMNCR * * Some hardware uses bits called PULSEx instead of FCKSEL_E and QTSEL_E * to control the clock divider used between the High-Speed Peripheral Clock * and the FLCTL internal clock. If so, use CLK_8_BIT_xxx for connecting 8 bit * and CLK_16_BIT_xxx for connecting 16 bit bus bandwith NAND chips. For the 16 * bit version the divider is seperate for the pulse width of high and low * signals. */ #define PULSE3 (0x1 << 27) #define PULSE2 (0x1 << 17) #define PULSE1 (0x1 << 15) #define PULSE0 (0x1 << 9) #define CLK_8B_0_5 PULSE1 #define CLK_8B_1 0x0 #define CLK_8B_1_5 (PULSE1 | PULSE2) #define CLK_8B_2 PULSE0 #define CLK_8B_3 (PULSE0 | PULSE1 | PULSE2) #define CLK_8B_4 (PULSE0 | PULSE2) #define CLK_16B_6L_2H PULSE0 #define CLK_16B_9L_3H (PULSE0 | PULSE1 | PULSE2) #define CLK_16B_12L_4H (PULSE0 | PULSE2) /* FLCMDCR control bits */ #define ADRCNT2_E (0x1 << 31) /* 5byte address enable */ #define ADRMD_E (0x1 << 26) /* Sector address access */ #define CDSRC_E (0x1 << 25) /* Data buffer selection */ #define DOSR_E (0x1 << 24) /* Status read check */ #define SELRW (0x1 << 21) /* 0:read 1:write */ #define DOADR_E (0x1 << 20) /* Address stage execute */ #define ADRCNT_1 (0x00 << 18) /* Address data bytes: 1byte */ #define ADRCNT_2 (0x01 << 18) /* Address data bytes: 2byte */ #define ADRCNT_3 (0x02 << 18) /* Address data bytes: 3byte */ #define ADRCNT_4 (0x03 << 18) /* Address data bytes: 4byte */ #define DOCMD2_E (0x1 << 17) /* 2nd cmd stage execute */ #define DOCMD1_E (0x1 << 16) /* 1st cmd stage execute */ /* FLINTDMACR control bits */ #define ESTERINTE (0x1 << 24) /* ECC error interrupt enable */ #define AC1CLR (0x1 << 19) /* ECC FIFO clear */ #define AC0CLR (0x1 << 18) /* Data FIFO clear */ #define DREQ0EN (0x1 << 16) /* FLDTFIFODMA Request Enable */ #define ECERB (0x1 << 9) /* ECC error */ #define STERB (0x1 << 8) /* Status error */ #define STERINTE (0x1 << 4) /* Status error enable */ /* FLTRCR control bits */ #define TRSTRT (0x1 << 0) /* translation start */ #define TREND (0x1 << 1) /* translation end */ /* * FLHOLDCR control bits * * HOLDEN: Bus Occupancy Enable (inverted) * Enable this bit when the external bus might be used in between transfers. * If not set and the bus gets used by other modules, a deadlock occurs. */ #define HOLDEN (0x1 << 0) /* FL4ECCCR control bits */ #define _4ECCFA (0x1 << 2) /* 4 symbols correct fault */ #define _4ECCEND (0x1 << 1) /* 4 symbols end */ #define _4ECCEXST (0x1 << 0) /* 4 symbols exist */ #define LOOP_TIMEOUT_MAX 0x00010000 enum flctl_ecc_res_t { FL_SUCCESS, FL_REPAIRABLE, FL_ERROR, FL_TIMEOUT }; struct dma_chan; struct sh_flctl { struct nand_chip chip; struct platform_device *pdev; struct dev_pm_qos_request pm_qos; void __iomem *reg; resource_size_t fifo; uint8_t done_buff[2048 + 64]; /* max size 2048 + 64 */ int read_bytes; unsigned int index; int seqin_column; /* column in SEQIN cmd */ int seqin_page_addr; /* page_addr in SEQIN cmd */ uint32_t seqin_read_cmd; /* read cmd in SEQIN cmd */ int erase1_page_addr; /* page_addr in ERASE1 cmd */ uint32_t erase_ADRCNT; /* bits of FLCMDCR in ERASE1 cmd */ uint32_t rw_ADRCNT; /* bits of FLCMDCR in READ WRITE cmd */ uint32_t flcmncr_base; /* base value of FLCMNCR */ uint32_t flintdmacr_base; /* irq enable bits */ unsigned page_size:1; /* NAND page size (0 = 512, 1 = 2048) */ unsigned hwecc:1; /* Hardware ECC (0 = disabled, 1 = enabled) */ unsigned holden:1; /* Hardware has FLHOLDCR and HOLDEN is set */ unsigned qos_request:1; /* QoS request to prevent deep power shutdown */ /* DMA related objects */ struct dma_chan *chan_fifo0_rx; struct dma_chan *chan_fifo0_tx; struct completion dma_complete; }; struct sh_flctl_platform_data { struct mtd_partition *parts; int nr_parts; unsigned long flcmncr_val; unsigned has_hwecc:1; unsigned use_holden:1; unsigned int slave_id_fifo0_tx; unsigned int slave_id_fifo0_rx; }; static inline struct sh_flctl *mtd_to_flctl(struct mtd_info *mtdinfo) { return container_of(mtd_to_nand(mtdinfo), struct sh_flctl, chip); } #endif /* __SH_FLCTL_H__ */ mtd/map.h 0000644 00000031330 14722070374 0006264 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> et al. */ /* Overhauled routines for dealing with different mmap regions of flash */ #ifndef __LINUX_MTD_MAP_H__ #define __LINUX_MTD_MAP_H__ #include <linux/types.h> #include <linux/list.h> #include <linux/string.h> #include <linux/bug.h> #include <linux/kernel.h> #include <linux/io.h> #include <asm/unaligned.h> #include <asm/barrier.h> #ifdef CONFIG_MTD_MAP_BANK_WIDTH_1 #define map_bankwidth(map) 1 #define map_bankwidth_is_1(map) (map_bankwidth(map) == 1) #define map_bankwidth_is_large(map) (0) #define map_words(map) (1) #define MAX_MAP_BANKWIDTH 1 #else #define map_bankwidth_is_1(map) (0) #endif #ifdef CONFIG_MTD_MAP_BANK_WIDTH_2 # ifdef map_bankwidth # undef map_bankwidth # define map_bankwidth(map) ((map)->bankwidth) # else # define map_bankwidth(map) 2 # define map_bankwidth_is_large(map) (0) # define map_words(map) (1) # endif #define map_bankwidth_is_2(map) (map_bankwidth(map) == 2) #undef MAX_MAP_BANKWIDTH #define MAX_MAP_BANKWIDTH 2 #else #define map_bankwidth_is_2(map) (0) #endif #ifdef CONFIG_MTD_MAP_BANK_WIDTH_4 # ifdef map_bankwidth # undef map_bankwidth # define map_bankwidth(map) ((map)->bankwidth) # else # define map_bankwidth(map) 4 # define map_bankwidth_is_large(map) (0) # define map_words(map) (1) # endif #define map_bankwidth_is_4(map) (map_bankwidth(map) == 4) #undef MAX_MAP_BANKWIDTH #define MAX_MAP_BANKWIDTH 4 #else #define map_bankwidth_is_4(map) (0) #endif /* ensure we never evaluate anything shorted than an unsigned long * to zero, and ensure we'll never miss the end of an comparison (bjd) */ #define map_calc_words(map) ((map_bankwidth(map) + (sizeof(unsigned long)-1)) / sizeof(unsigned long)) #ifdef CONFIG_MTD_MAP_BANK_WIDTH_8 # ifdef map_bankwidth # undef map_bankwidth # define map_bankwidth(map) ((map)->bankwidth) # if BITS_PER_LONG < 64 # undef map_bankwidth_is_large # define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8) # undef map_words # define map_words(map) map_calc_words(map) # endif # else # define map_bankwidth(map) 8 # define map_bankwidth_is_large(map) (BITS_PER_LONG < 64) # define map_words(map) map_calc_words(map) # endif #define map_bankwidth_is_8(map) (map_bankwidth(map) == 8) #undef MAX_MAP_BANKWIDTH #define MAX_MAP_BANKWIDTH 8 #else #define map_bankwidth_is_8(map) (0) #endif #ifdef CONFIG_MTD_MAP_BANK_WIDTH_16 # ifdef map_bankwidth # undef map_bankwidth # define map_bankwidth(map) ((map)->bankwidth) # undef map_bankwidth_is_large # define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8) # undef map_words # define map_words(map) map_calc_words(map) # else # define map_bankwidth(map) 16 # define map_bankwidth_is_large(map) (1) # define map_words(map) map_calc_words(map) # endif #define map_bankwidth_is_16(map) (map_bankwidth(map) == 16) #undef MAX_MAP_BANKWIDTH #define MAX_MAP_BANKWIDTH 16 #else #define map_bankwidth_is_16(map) (0) #endif #ifdef CONFIG_MTD_MAP_BANK_WIDTH_32 /* always use indirect access for 256-bit to preserve kernel stack */ # undef map_bankwidth # define map_bankwidth(map) ((map)->bankwidth) # undef map_bankwidth_is_large # define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8) # undef map_words # define map_words(map) map_calc_words(map) #define map_bankwidth_is_32(map) (map_bankwidth(map) == 32) #undef MAX_MAP_BANKWIDTH #define MAX_MAP_BANKWIDTH 32 #else #define map_bankwidth_is_32(map) (0) #endif #ifndef map_bankwidth #ifdef CONFIG_MTD #warning "No CONFIG_MTD_MAP_BANK_WIDTH_xx selected. No NOR chip support can work" #endif static inline int map_bankwidth(void *map) { BUG(); return 0; } #define map_bankwidth_is_large(map) (0) #define map_words(map) (0) #define MAX_MAP_BANKWIDTH 1 #endif static inline int map_bankwidth_supported(int w) { switch (w) { #ifdef CONFIG_MTD_MAP_BANK_WIDTH_1 case 1: #endif #ifdef CONFIG_MTD_MAP_BANK_WIDTH_2 case 2: #endif #ifdef CONFIG_MTD_MAP_BANK_WIDTH_4 case 4: #endif #ifdef CONFIG_MTD_MAP_BANK_WIDTH_8 case 8: #endif #ifdef CONFIG_MTD_MAP_BANK_WIDTH_16 case 16: #endif #ifdef CONFIG_MTD_MAP_BANK_WIDTH_32 case 32: #endif return 1; default: return 0; } } #define MAX_MAP_LONGS (((MAX_MAP_BANKWIDTH * 8) + BITS_PER_LONG - 1) / BITS_PER_LONG) typedef union { unsigned long x[MAX_MAP_LONGS]; } map_word; /* The map stuff is very simple. You fill in your struct map_info with a handful of routines for accessing the device, making sure they handle paging etc. correctly if your device needs it. Then you pass it off to a chip probe routine -- either JEDEC or CFI probe or both -- via do_map_probe(). If a chip is recognised, the probe code will invoke the appropriate chip driver (if present) and return a struct mtd_info. At which point, you fill in the mtd->module with your own module address, and register it with the MTD core code. Or you could partition it and register the partitions instead, or keep it for your own private use; whatever. The mtd->priv field will point to the struct map_info, and any further private data required by the chip driver is linked from the mtd->priv->fldrv_priv field. This allows the map driver to get at the destructor function map->fldrv_destroy() when it's tired of living. */ struct map_info { const char *name; unsigned long size; resource_size_t phys; #define NO_XIP (-1UL) void __iomem *virt; void *cached; int swap; /* this mapping's byte-swapping requirement */ int bankwidth; /* in octets. This isn't necessarily the width of actual bus cycles -- it's the repeat interval in bytes, before you are talking to the first chip again. */ #ifdef CONFIG_MTD_COMPLEX_MAPPINGS map_word (*read)(struct map_info *, unsigned long); void (*copy_from)(struct map_info *, void *, unsigned long, ssize_t); void (*write)(struct map_info *, const map_word, unsigned long); void (*copy_to)(struct map_info *, unsigned long, const void *, ssize_t); /* We can perhaps put in 'point' and 'unpoint' methods, if we really want to enable XIP for non-linear mappings. Not yet though. */ #endif /* It's possible for the map driver to use cached memory in its copy_from implementation (and _only_ with copy_from). However, when the chip driver knows some flash area has changed contents, it will signal it to the map driver through this routine to let the map driver invalidate the corresponding cache as needed. If there is no cache to care about this can be set to NULL. */ void (*inval_cache)(struct map_info *, unsigned long, ssize_t); /* This will be called with 1 as parameter when the first map user * needs VPP, and called with 0 when the last user exits. The map * core maintains a reference counter, and assumes that VPP is a * global resource applying to all mapped flash chips on the system. */ void (*set_vpp)(struct map_info *, int); unsigned long pfow_base; unsigned long map_priv_1; unsigned long map_priv_2; struct device_node *device_node; void *fldrv_priv; struct mtd_chip_driver *fldrv; }; struct mtd_chip_driver { struct mtd_info *(*probe)(struct map_info *map); void (*destroy)(struct mtd_info *); struct module *module; char *name; struct list_head list; }; void register_mtd_chip_driver(struct mtd_chip_driver *); void unregister_mtd_chip_driver(struct mtd_chip_driver *); struct mtd_info *do_map_probe(const char *name, struct map_info *map); void map_destroy(struct mtd_info *mtd); #define ENABLE_VPP(map) do { if (map->set_vpp) map->set_vpp(map, 1); } while (0) #define DISABLE_VPP(map) do { if (map->set_vpp) map->set_vpp(map, 0); } while (0) #define INVALIDATE_CACHED_RANGE(map, from, size) \ do { if (map->inval_cache) map->inval_cache(map, from, size); } while (0) #define map_word_equal(map, val1, val2) \ ({ \ int i, ret = 1; \ for (i = 0; i < map_words(map); i++) \ if ((val1).x[i] != (val2).x[i]) { \ ret = 0; \ break; \ } \ ret; \ }) #define map_word_and(map, val1, val2) \ ({ \ map_word r; \ int i; \ for (i = 0; i < map_words(map); i++) \ r.x[i] = (val1).x[i] & (val2).x[i]; \ r; \ }) #define map_word_clr(map, val1, val2) \ ({ \ map_word r; \ int i; \ for (i = 0; i < map_words(map); i++) \ r.x[i] = (val1).x[i] & ~(val2).x[i]; \ r; \ }) #define map_word_or(map, val1, val2) \ ({ \ map_word r; \ int i; \ for (i = 0; i < map_words(map); i++) \ r.x[i] = (val1).x[i] | (val2).x[i]; \ r; \ }) #define map_word_andequal(map, val1, val2, val3) \ ({ \ int i, ret = 1; \ for (i = 0; i < map_words(map); i++) { \ if (((val1).x[i] & (val2).x[i]) != (val3).x[i]) { \ ret = 0; \ break; \ } \ } \ ret; \ }) #define map_word_bitsset(map, val1, val2) \ ({ \ int i, ret = 0; \ for (i = 0; i < map_words(map); i++) { \ if ((val1).x[i] & (val2).x[i]) { \ ret = 1; \ break; \ } \ } \ ret; \ }) static inline map_word map_word_load(struct map_info *map, const void *ptr) { map_word r; if (map_bankwidth_is_1(map)) r.x[0] = *(unsigned char *)ptr; else if (map_bankwidth_is_2(map)) r.x[0] = get_unaligned((uint16_t *)ptr); else if (map_bankwidth_is_4(map)) r.x[0] = get_unaligned((uint32_t *)ptr); #if BITS_PER_LONG >= 64 else if (map_bankwidth_is_8(map)) r.x[0] = get_unaligned((uint64_t *)ptr); #endif else if (map_bankwidth_is_large(map)) memcpy(r.x, ptr, map->bankwidth); else BUG(); return r; } static inline map_word map_word_load_partial(struct map_info *map, map_word orig, const unsigned char *buf, int start, int len) { int i; if (map_bankwidth_is_large(map)) { char *dest = (char *)&orig; memcpy(dest+start, buf, len); } else { for (i = start; i < start+len; i++) { int bitpos; #ifdef __LITTLE_ENDIAN bitpos = i * 8; #else /* __BIG_ENDIAN */ bitpos = (map_bankwidth(map) - 1 - i) * 8; #endif orig.x[0] &= ~(0xff << bitpos); orig.x[0] |= (unsigned long)buf[i-start] << bitpos; } } return orig; } #if BITS_PER_LONG < 64 #define MAP_FF_LIMIT 4 #else #define MAP_FF_LIMIT 8 #endif static inline map_word map_word_ff(struct map_info *map) { map_word r; int i; if (map_bankwidth(map) < MAP_FF_LIMIT) { int bw = 8 * map_bankwidth(map); r.x[0] = (1UL << bw) - 1; } else { for (i = 0; i < map_words(map); i++) r.x[i] = ~0UL; } return r; } static inline map_word inline_map_read(struct map_info *map, unsigned long ofs) { map_word r; if (map_bankwidth_is_1(map)) r.x[0] = __raw_readb(map->virt + ofs); else if (map_bankwidth_is_2(map)) r.x[0] = __raw_readw(map->virt + ofs); else if (map_bankwidth_is_4(map)) r.x[0] = __raw_readl(map->virt + ofs); #if BITS_PER_LONG >= 64 else if (map_bankwidth_is_8(map)) r.x[0] = __raw_readq(map->virt + ofs); #endif else if (map_bankwidth_is_large(map)) memcpy_fromio(r.x, map->virt + ofs, map->bankwidth); else BUG(); return r; } static inline void inline_map_write(struct map_info *map, const map_word datum, unsigned long ofs) { if (map_bankwidth_is_1(map)) __raw_writeb(datum.x[0], map->virt + ofs); else if (map_bankwidth_is_2(map)) __raw_writew(datum.x[0], map->virt + ofs); else if (map_bankwidth_is_4(map)) __raw_writel(datum.x[0], map->virt + ofs); #if BITS_PER_LONG >= 64 else if (map_bankwidth_is_8(map)) __raw_writeq(datum.x[0], map->virt + ofs); #endif else if (map_bankwidth_is_large(map)) memcpy_toio(map->virt+ofs, datum.x, map->bankwidth); else BUG(); mb(); } static inline void inline_map_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) { if (map->cached) memcpy(to, (char *)map->cached + from, len); else memcpy_fromio(to, map->virt + from, len); } static inline void inline_map_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len) { memcpy_toio(map->virt + to, from, len); } #ifdef CONFIG_MTD_COMPLEX_MAPPINGS #define map_read(map, ofs) (map)->read(map, ofs) #define map_copy_from(map, to, from, len) (map)->copy_from(map, to, from, len) #define map_write(map, datum, ofs) (map)->write(map, datum, ofs) #define map_copy_to(map, to, from, len) (map)->copy_to(map, to, from, len) extern void simple_map_init(struct map_info *); #define map_is_linear(map) (map->phys != NO_XIP) #else #define map_read(map, ofs) inline_map_read(map, ofs) #define map_copy_from(map, to, from, len) inline_map_copy_from(map, to, from, len) #define map_write(map, datum, ofs) inline_map_write(map, datum, ofs) #define map_copy_to(map, to, from, len) inline_map_copy_to(map, to, from, len) #define simple_map_init(map) BUG_ON(!map_bankwidth_supported((map)->bankwidth)) #define map_is_linear(map) ({ (void)(map); 1; }) #endif /* !CONFIG_MTD_COMPLEX_MAPPINGS */ #endif /* __LINUX_MTD_MAP_H__ */ mtd/onenand_regs.h 0000644 00000016061 14722070374 0010155 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/include/linux/mtd/onenand_regs.h * * OneNAND Register header file * * Copyright (C) 2005-2007 Samsung Electronics * Kyungmin Park <kyungmin.park@samsung.com> */ #ifndef __ONENAND_REG_H #define __ONENAND_REG_H /* Memory Address Map Translation (Word order) */ #define ONENAND_MEMORY_MAP(x) ((x) << 1) /* * External BufferRAM area */ #define ONENAND_BOOTRAM ONENAND_MEMORY_MAP(0x0000) #define ONENAND_DATARAM ONENAND_MEMORY_MAP(0x0200) #define ONENAND_SPARERAM ONENAND_MEMORY_MAP(0x8010) /* * OneNAND Registers */ #define ONENAND_REG_MANUFACTURER_ID ONENAND_MEMORY_MAP(0xF000) #define ONENAND_REG_DEVICE_ID ONENAND_MEMORY_MAP(0xF001) #define ONENAND_REG_VERSION_ID ONENAND_MEMORY_MAP(0xF002) #define ONENAND_REG_DATA_BUFFER_SIZE ONENAND_MEMORY_MAP(0xF003) #define ONENAND_REG_BOOT_BUFFER_SIZE ONENAND_MEMORY_MAP(0xF004) #define ONENAND_REG_NUM_BUFFERS ONENAND_MEMORY_MAP(0xF005) #define ONENAND_REG_TECHNOLOGY ONENAND_MEMORY_MAP(0xF006) #define ONENAND_REG_START_ADDRESS1 ONENAND_MEMORY_MAP(0xF100) #define ONENAND_REG_START_ADDRESS2 ONENAND_MEMORY_MAP(0xF101) #define ONENAND_REG_START_ADDRESS3 ONENAND_MEMORY_MAP(0xF102) #define ONENAND_REG_START_ADDRESS4 ONENAND_MEMORY_MAP(0xF103) #define ONENAND_REG_START_ADDRESS5 ONENAND_MEMORY_MAP(0xF104) #define ONENAND_REG_START_ADDRESS6 ONENAND_MEMORY_MAP(0xF105) #define ONENAND_REG_START_ADDRESS7 ONENAND_MEMORY_MAP(0xF106) #define ONENAND_REG_START_ADDRESS8 ONENAND_MEMORY_MAP(0xF107) #define ONENAND_REG_START_BUFFER ONENAND_MEMORY_MAP(0xF200) #define ONENAND_REG_COMMAND ONENAND_MEMORY_MAP(0xF220) #define ONENAND_REG_SYS_CFG1 ONENAND_MEMORY_MAP(0xF221) #define ONENAND_REG_SYS_CFG2 ONENAND_MEMORY_MAP(0xF222) #define ONENAND_REG_CTRL_STATUS ONENAND_MEMORY_MAP(0xF240) #define ONENAND_REG_INTERRUPT ONENAND_MEMORY_MAP(0xF241) #define ONENAND_REG_START_BLOCK_ADDRESS ONENAND_MEMORY_MAP(0xF24C) #define ONENAND_REG_END_BLOCK_ADDRESS ONENAND_MEMORY_MAP(0xF24D) #define ONENAND_REG_WP_STATUS ONENAND_MEMORY_MAP(0xF24E) #define ONENAND_REG_ECC_STATUS ONENAND_MEMORY_MAP(0xFF00) #define ONENAND_REG_ECC_M0 ONENAND_MEMORY_MAP(0xFF01) #define ONENAND_REG_ECC_S0 ONENAND_MEMORY_MAP(0xFF02) #define ONENAND_REG_ECC_M1 ONENAND_MEMORY_MAP(0xFF03) #define ONENAND_REG_ECC_S1 ONENAND_MEMORY_MAP(0xFF04) #define ONENAND_REG_ECC_M2 ONENAND_MEMORY_MAP(0xFF05) #define ONENAND_REG_ECC_S2 ONENAND_MEMORY_MAP(0xFF06) #define ONENAND_REG_ECC_M3 ONENAND_MEMORY_MAP(0xFF07) #define ONENAND_REG_ECC_S3 ONENAND_MEMORY_MAP(0xFF08) /* * Device ID Register F001h (R) */ #define DEVICE_IS_FLEXONENAND (1 << 9) #define FLEXONENAND_PI_MASK (0x3ff) #define FLEXONENAND_PI_UNLOCK_SHIFT (14) #define ONENAND_DEVICE_DENSITY_MASK (0xf) #define ONENAND_DEVICE_DENSITY_SHIFT (4) #define ONENAND_DEVICE_IS_DDP (1 << 3) #define ONENAND_DEVICE_IS_DEMUX (1 << 2) #define ONENAND_DEVICE_VCC_MASK (0x3) #define ONENAND_DEVICE_DENSITY_512Mb (0x002) #define ONENAND_DEVICE_DENSITY_1Gb (0x003) #define ONENAND_DEVICE_DENSITY_2Gb (0x004) #define ONENAND_DEVICE_DENSITY_4Gb (0x005) #define ONENAND_DEVICE_DENSITY_8Gb (0x006) /* * Version ID Register F002h (R) */ #define ONENAND_VERSION_PROCESS_SHIFT (8) /* * Technology Register F006h (R) */ #define ONENAND_TECHNOLOGY_IS_MLC (1 << 0) /* * Start Address 1 F100h (R/W) & Start Address 2 F101h (R/W) */ #define ONENAND_DDP_SHIFT (15) #define ONENAND_DDP_CHIP0 (0) #define ONENAND_DDP_CHIP1 (1 << ONENAND_DDP_SHIFT) /* * Start Address 8 F107h (R/W) */ /* Note: It's actually 0x3f in case of SLC */ #define ONENAND_FPA_MASK (0x7f) #define ONENAND_FPA_SHIFT (2) #define ONENAND_FSA_MASK (0x03) /* * Start Buffer Register F200h (R/W) */ #define ONENAND_BSA_MASK (0x03) #define ONENAND_BSA_SHIFT (8) #define ONENAND_BSA_BOOTRAM (0 << 2) #define ONENAND_BSA_DATARAM0 (2 << 2) #define ONENAND_BSA_DATARAM1 (3 << 2) /* Note: It's actually 0x03 in case of SLC */ #define ONENAND_BSC_MASK (0x07) /* * Command Register F220h (R/W) */ #define ONENAND_CMD_READ (0x00) #define ONENAND_CMD_READOOB (0x13) #define ONENAND_CMD_PROG (0x80) #define ONENAND_CMD_PROGOOB (0x1A) #define ONENAND_CMD_2X_PROG (0x7D) #define ONENAND_CMD_2X_CACHE_PROG (0x7F) #define ONENAND_CMD_UNLOCK (0x23) #define ONENAND_CMD_LOCK (0x2A) #define ONENAND_CMD_LOCK_TIGHT (0x2C) #define ONENAND_CMD_UNLOCK_ALL (0x27) #define ONENAND_CMD_ERASE (0x94) #define ONENAND_CMD_MULTIBLOCK_ERASE (0x95) #define ONENAND_CMD_ERASE_VERIFY (0x71) #define ONENAND_CMD_RESET (0xF0) #define ONENAND_CMD_OTP_ACCESS (0x65) #define ONENAND_CMD_READID (0x90) #define FLEXONENAND_CMD_PI_UPDATE (0x05) #define FLEXONENAND_CMD_PI_ACCESS (0x66) #define FLEXONENAND_CMD_RECOVER_LSB (0x05) /* NOTE: Those are not *REAL* commands */ #define ONENAND_CMD_BUFFERRAM (0x1978) #define FLEXONENAND_CMD_READ_PI (0x1985) /* * System Configuration 1 Register F221h (R, R/W) */ #define ONENAND_SYS_CFG1_SYNC_READ (1 << 15) #define ONENAND_SYS_CFG1_BRL_7 (7 << 12) #define ONENAND_SYS_CFG1_BRL_6 (6 << 12) #define ONENAND_SYS_CFG1_BRL_5 (5 << 12) #define ONENAND_SYS_CFG1_BRL_4 (4 << 12) #define ONENAND_SYS_CFG1_BRL_3 (3 << 12) #define ONENAND_SYS_CFG1_BRL_10 (2 << 12) #define ONENAND_SYS_CFG1_BRL_9 (1 << 12) #define ONENAND_SYS_CFG1_BRL_8 (0 << 12) #define ONENAND_SYS_CFG1_BRL_SHIFT (12) #define ONENAND_SYS_CFG1_BL_32 (4 << 9) #define ONENAND_SYS_CFG1_BL_16 (3 << 9) #define ONENAND_SYS_CFG1_BL_8 (2 << 9) #define ONENAND_SYS_CFG1_BL_4 (1 << 9) #define ONENAND_SYS_CFG1_BL_CONT (0 << 9) #define ONENAND_SYS_CFG1_BL_SHIFT (9) #define ONENAND_SYS_CFG1_NO_ECC (1 << 8) #define ONENAND_SYS_CFG1_RDY (1 << 7) #define ONENAND_SYS_CFG1_INT (1 << 6) #define ONENAND_SYS_CFG1_IOBE (1 << 5) #define ONENAND_SYS_CFG1_RDY_CONF (1 << 4) #define ONENAND_SYS_CFG1_VHF (1 << 3) #define ONENAND_SYS_CFG1_HF (1 << 2) #define ONENAND_SYS_CFG1_SYNC_WRITE (1 << 1) /* * Controller Status Register F240h (R) */ #define ONENAND_CTRL_ONGO (1 << 15) #define ONENAND_CTRL_LOCK (1 << 14) #define ONENAND_CTRL_LOAD (1 << 13) #define ONENAND_CTRL_PROGRAM (1 << 12) #define ONENAND_CTRL_ERASE (1 << 11) #define ONENAND_CTRL_ERROR (1 << 10) #define ONENAND_CTRL_RSTB (1 << 7) #define ONENAND_CTRL_OTP_L (1 << 6) #define ONENAND_CTRL_OTP_BL (1 << 5) /* * Interrupt Status Register F241h (R) */ #define ONENAND_INT_MASTER (1 << 15) #define ONENAND_INT_READ (1 << 7) #define ONENAND_INT_WRITE (1 << 6) #define ONENAND_INT_ERASE (1 << 5) #define ONENAND_INT_RESET (1 << 4) #define ONENAND_INT_CLEAR (0 << 0) /* * NAND Flash Write Protection Status Register F24Eh (R) */ #define ONENAND_WP_US (1 << 2) #define ONENAND_WP_LS (1 << 1) #define ONENAND_WP_LTS (1 << 0) /* * ECC Status Reigser FF00h (R) */ #define ONENAND_ECC_1BIT (1 << 0) #define ONENAND_ECC_1BIT_ALL (0x5555) #define ONENAND_ECC_2BIT (1 << 1) #define ONENAND_ECC_2BIT_ALL (0xAAAA) #define FLEXONENAND_UNCORRECTABLE_ERROR (0x1010) #define ONENAND_ECC_3BIT (1 << 2) #define ONENAND_ECC_4BIT (1 << 3) #define ONENAND_ECC_4BIT_UNCORRECTABLE (0x1010) /* * One-Time Programmable (OTP) */ #define FLEXONENAND_OTP_LOCK_OFFSET (2048) #define ONENAND_OTP_LOCK_OFFSET (14) #endif /* __ONENAND_REG_H */ mtd/nand-gpio.h 0000644 00000000512 14722070374 0007361 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_MTD_NAND_GPIO_H #define __LINUX_MTD_NAND_GPIO_H #include <linux/mtd/rawnand.h> struct gpio_nand_platdata { void (*adjust_parts)(struct gpio_nand_platdata *, size_t); struct mtd_partition *parts; unsigned int num_parts; unsigned int options; int chip_delay; }; #endif mtd/plat-ram.h 0000644 00000001234 14722070374 0007224 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* linux/include/linux/mtd/plat-ram.h * * (c) 2004 Simtec Electronics * http://www.simtec.co.uk/products/SWLINUX/ * Ben Dooks <ben@simtec.co.uk> * * Generic platform device based RAM map */ #ifndef __LINUX_MTD_PLATRAM_H #define __LINUX_MTD_PLATRAM_H __FILE__ #define PLATRAM_RO (0) #define PLATRAM_RW (1) struct platdata_mtd_ram { const char *mapname; const char * const *map_probes; const char * const *probes; struct mtd_partition *partitions; int nr_partitions; int bankwidth; /* control callbacks */ void (*set_rw)(struct device *dev, int to); }; #endif /* __LINUX_MTD_PLATRAM_H */ flex_proportions.h 0000644 00000005501 14722070374 0010340 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Floating proportions with flexible aging period * * Copyright (C) 2011, SUSE, Jan Kara <jack@suse.cz> */ #ifndef _LINUX_FLEX_PROPORTIONS_H #define _LINUX_FLEX_PROPORTIONS_H #include <linux/percpu_counter.h> #include <linux/spinlock.h> #include <linux/seqlock.h> #include <linux/gfp.h> /* * When maximum proportion of some event type is specified, this is the * precision with which we allow limitting. Note that this creates an upper * bound on the number of events per period like * ULLONG_MAX >> FPROP_FRAC_SHIFT. */ #define FPROP_FRAC_SHIFT 10 #define FPROP_FRAC_BASE (1UL << FPROP_FRAC_SHIFT) /* * ---- Global proportion definitions ---- */ struct fprop_global { /* Number of events in the current period */ struct percpu_counter events; /* Current period */ unsigned int period; /* Synchronization with period transitions */ seqcount_t sequence; }; int fprop_global_init(struct fprop_global *p, gfp_t gfp); void fprop_global_destroy(struct fprop_global *p); bool fprop_new_period(struct fprop_global *p, int periods); /* * ---- SINGLE ---- */ struct fprop_local_single { /* the local events counter */ unsigned long events; /* Period in which we last updated events */ unsigned int period; raw_spinlock_t lock; /* Protect period and numerator */ }; #define INIT_FPROP_LOCAL_SINGLE(name) \ { .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ } int fprop_local_init_single(struct fprop_local_single *pl); void fprop_local_destroy_single(struct fprop_local_single *pl); void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl); void fprop_fraction_single(struct fprop_global *p, struct fprop_local_single *pl, unsigned long *numerator, unsigned long *denominator); static inline void fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl) { unsigned long flags; local_irq_save(flags); __fprop_inc_single(p, pl); local_irq_restore(flags); } /* * ---- PERCPU ---- */ struct fprop_local_percpu { /* the local events counter */ struct percpu_counter events; /* Period in which we last updated events */ unsigned int period; raw_spinlock_t lock; /* Protect period and numerator */ }; int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp); void fprop_local_destroy_percpu(struct fprop_local_percpu *pl); void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl); void __fprop_inc_percpu_max(struct fprop_global *p, struct fprop_local_percpu *pl, int max_frac); void fprop_fraction_percpu(struct fprop_global *p, struct fprop_local_percpu *pl, unsigned long *numerator, unsigned long *denominator); static inline void fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl) { unsigned long flags; local_irq_save(flags); __fprop_inc_percpu(p, pl); local_irq_restore(flags); } #endif tty_driver.h 0000644 00000037163 14722070374 0007130 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_TTY_DRIVER_H #define _LINUX_TTY_DRIVER_H /* * This structure defines the interface between the low-level tty * driver and the tty routines. The following routines can be * defined; unless noted otherwise, they are optional, and can be * filled in with a null pointer. * * struct tty_struct * (*lookup)(struct tty_driver *self, struct file *, int idx) * * Return the tty device corresponding to idx, NULL if there is not * one currently in use and an ERR_PTR value on error. Called under * tty_mutex (for now!) * * Optional method. Default behaviour is to use the ttys array * * int (*install)(struct tty_driver *self, struct tty_struct *tty) * * Install a new tty into the tty driver internal tables. Used in * conjunction with lookup and remove methods. * * Optional method. Default behaviour is to use the ttys array * * void (*remove)(struct tty_driver *self, struct tty_struct *tty) * * Remove a closed tty from the tty driver internal tables. Used in * conjunction with lookup and remove methods. * * Optional method. Default behaviour is to use the ttys array * * int (*open)(struct tty_struct * tty, struct file * filp); * * This routine is called when a particular tty device is opened. * This routine is mandatory; if this routine is not filled in, * the attempted open will fail with ENODEV. * * Required method. Called with tty lock held. * * void (*close)(struct tty_struct * tty, struct file * filp); * * This routine is called when a particular tty device is closed. * Note: called even if the corresponding open() failed. * * Required method. Called with tty lock held. * * void (*shutdown)(struct tty_struct * tty); * * This routine is called under the tty lock when a particular tty device * is closed for the last time. It executes before the tty resources * are freed so may execute while another function holds a tty kref. * * void (*cleanup)(struct tty_struct * tty); * * This routine is called asynchronously when a particular tty device * is closed for the last time freeing up the resources. This is * actually the second part of shutdown for routines that might sleep. * * * int (*write)(struct tty_struct * tty, * const unsigned char *buf, int count); * * This routine is called by the kernel to write a series of * characters to the tty device. The characters may come from * user space or kernel space. This routine will return the * number of characters actually accepted for writing. * * Optional: Required for writable devices. * * int (*put_char)(struct tty_struct *tty, unsigned char ch); * * This routine is called by the kernel to write a single * character to the tty device. If the kernel uses this routine, * it must call the flush_chars() routine (if defined) when it is * done stuffing characters into the driver. If there is no room * in the queue, the character is ignored. * * Optional: Kernel will use the write method if not provided. * * Note: Do not call this function directly, call tty_put_char * * void (*flush_chars)(struct tty_struct *tty); * * This routine is called by the kernel after it has written a * series of characters to the tty device using put_char(). * * Optional: * * Note: Do not call this function directly, call tty_driver_flush_chars * * int (*write_room)(struct tty_struct *tty); * * This routine returns the numbers of characters the tty driver * will accept for queuing to be written. This number is subject * to change as output buffers get emptied, or if the output flow * control is acted. * * Required if write method is provided else not needed. * * Note: Do not call this function directly, call tty_write_room * * int (*ioctl)(struct tty_struct *tty, unsigned int cmd, unsigned long arg); * * This routine allows the tty driver to implement * device-specific ioctls. If the ioctl number passed in cmd * is not recognized by the driver, it should return ENOIOCTLCMD. * * Optional * * long (*compat_ioctl)(struct tty_struct *tty,, * unsigned int cmd, unsigned long arg); * * implement ioctl processing for 32 bit process on 64 bit system * * Optional * * void (*set_termios)(struct tty_struct *tty, struct ktermios * old); * * This routine allows the tty driver to be notified when * device's termios settings have changed. * * Optional: Called under the termios lock * * * void (*set_ldisc)(struct tty_struct *tty); * * This routine allows the tty driver to be notified when the * device's termios settings have changed. * * Optional: Called under BKL (currently) * * void (*throttle)(struct tty_struct * tty); * * This routine notifies the tty driver that input buffers for * the line discipline are close to full, and it should somehow * signal that no more characters should be sent to the tty. * * Optional: Always invoke via tty_throttle(), called under the * termios lock. * * void (*unthrottle)(struct tty_struct * tty); * * This routine notifies the tty drivers that it should signals * that characters can now be sent to the tty without fear of * overrunning the input buffers of the line disciplines. * * Optional: Always invoke via tty_unthrottle(), called under the * termios lock. * * void (*stop)(struct tty_struct *tty); * * This routine notifies the tty driver that it should stop * outputting characters to the tty device. * * Called with ->flow_lock held. Serialized with start() method. * * Optional: * * Note: Call stop_tty not this method. * * void (*start)(struct tty_struct *tty); * * This routine notifies the tty driver that it resume sending * characters to the tty device. * * Called with ->flow_lock held. Serialized with stop() method. * * Optional: * * Note: Call start_tty not this method. * * void (*hangup)(struct tty_struct *tty); * * This routine notifies the tty driver that it should hang up the * tty device. * * Optional: * * Called with tty lock held. * * int (*break_ctl)(struct tty_struct *tty, int state); * * This optional routine requests the tty driver to turn on or * off BREAK status on the RS-232 port. If state is -1, * then the BREAK status should be turned on; if state is 0, then * BREAK should be turned off. * * If this routine is implemented, the high-level tty driver will * handle the following ioctls: TCSBRK, TCSBRKP, TIOCSBRK, * TIOCCBRK. * * If the driver sets TTY_DRIVER_HARDWARE_BREAK then the interface * will also be called with actual times and the hardware is expected * to do the delay work itself. 0 and -1 are still used for on/off. * * Optional: Required for TCSBRK/BRKP/etc handling. * * void (*wait_until_sent)(struct tty_struct *tty, int timeout); * * This routine waits until the device has written out all of the * characters in its transmitter FIFO. * * Optional: If not provided the device is assumed to have no FIFO * * Note: Usually correct to call tty_wait_until_sent * * void (*send_xchar)(struct tty_struct *tty, char ch); * * This routine is used to send a high-priority XON/XOFF * character to the device. * * Optional: If not provided then the write method is called under * the atomic write lock to keep it serialized with the ldisc. * * int (*resize)(struct tty_struct *tty, struct winsize *ws) * * Called when a termios request is issued which changes the * requested terminal geometry. * * Optional: the default action is to update the termios structure * without error. This is usually the correct behaviour. Drivers should * not force errors here if they are not resizable objects (eg a serial * line). See tty_do_resize() if you need to wrap the standard method * in your own logic - the usual case. * * void (*set_termiox)(struct tty_struct *tty, struct termiox *new); * * Called when the device receives a termiox based ioctl. Passes down * the requested data from user space. This method will not be invoked * unless the tty also has a valid tty->termiox pointer. * * Optional: Called under the termios lock * * int (*get_icount)(struct tty_struct *tty, struct serial_icounter *icount); * * Called when the device receives a TIOCGICOUNT ioctl. Passed a kernel * structure to complete. This method is optional and will only be called * if provided (otherwise ENOTTY will be returned). */ #include <linux/export.h> #include <linux/fs.h> #include <linux/list.h> #include <linux/cdev.h> #include <linux/termios.h> #include <linux/seq_file.h> struct tty_struct; struct tty_driver; struct serial_icounter_struct; struct serial_struct; struct tty_operations { struct tty_struct * (*lookup)(struct tty_driver *driver, struct file *filp, int idx); int (*install)(struct tty_driver *driver, struct tty_struct *tty); void (*remove)(struct tty_driver *driver, struct tty_struct *tty); int (*open)(struct tty_struct * tty, struct file * filp); void (*close)(struct tty_struct * tty, struct file * filp); void (*shutdown)(struct tty_struct *tty); void (*cleanup)(struct tty_struct *tty); int (*write)(struct tty_struct * tty, const unsigned char *buf, int count); int (*put_char)(struct tty_struct *tty, unsigned char ch); void (*flush_chars)(struct tty_struct *tty); int (*write_room)(struct tty_struct *tty); int (*chars_in_buffer)(struct tty_struct *tty); int (*ioctl)(struct tty_struct *tty, unsigned int cmd, unsigned long arg); long (*compat_ioctl)(struct tty_struct *tty, unsigned int cmd, unsigned long arg); void (*set_termios)(struct tty_struct *tty, struct ktermios * old); void (*throttle)(struct tty_struct * tty); void (*unthrottle)(struct tty_struct * tty); void (*stop)(struct tty_struct *tty); void (*start)(struct tty_struct *tty); void (*hangup)(struct tty_struct *tty); int (*break_ctl)(struct tty_struct *tty, int state); void (*flush_buffer)(struct tty_struct *tty); void (*set_ldisc)(struct tty_struct *tty); void (*wait_until_sent)(struct tty_struct *tty, int timeout); void (*send_xchar)(struct tty_struct *tty, char ch); int (*tiocmget)(struct tty_struct *tty); int (*tiocmset)(struct tty_struct *tty, unsigned int set, unsigned int clear); int (*resize)(struct tty_struct *tty, struct winsize *ws); int (*set_termiox)(struct tty_struct *tty, struct termiox *tnew); int (*get_icount)(struct tty_struct *tty, struct serial_icounter_struct *icount); int (*get_serial)(struct tty_struct *tty, struct serial_struct *p); int (*set_serial)(struct tty_struct *tty, struct serial_struct *p); void (*show_fdinfo)(struct tty_struct *tty, struct seq_file *m); #ifdef CONFIG_CONSOLE_POLL int (*poll_init)(struct tty_driver *driver, int line, char *options); int (*poll_get_char)(struct tty_driver *driver, int line); void (*poll_put_char)(struct tty_driver *driver, int line, char ch); #endif int (*proc_show)(struct seq_file *, void *); } __randomize_layout; struct tty_driver { int magic; /* magic number for this structure */ struct kref kref; /* Reference management */ struct cdev **cdevs; struct module *owner; const char *driver_name; const char *name; int name_base; /* offset of printed name */ int major; /* major device number */ int minor_start; /* start of minor device number */ unsigned int num; /* number of devices allocated */ short type; /* type of tty driver */ short subtype; /* subtype of tty driver */ struct ktermios init_termios; /* Initial termios */ unsigned long flags; /* tty driver flags */ struct proc_dir_entry *proc_entry; /* /proc fs entry */ struct tty_driver *other; /* only used for the PTY driver */ /* * Pointer to the tty data structures */ struct tty_struct **ttys; struct tty_port **ports; struct ktermios **termios; void *driver_state; /* * Driver methods */ const struct tty_operations *ops; struct list_head tty_drivers; } __randomize_layout; extern struct list_head tty_drivers; extern struct tty_driver *__tty_alloc_driver(unsigned int lines, struct module *owner, unsigned long flags); extern void put_tty_driver(struct tty_driver *driver); extern void tty_set_operations(struct tty_driver *driver, const struct tty_operations *op); extern struct tty_driver *tty_find_polling_driver(char *name, int *line); extern void tty_driver_kref_put(struct tty_driver *driver); /* Use TTY_DRIVER_* flags below */ #define tty_alloc_driver(lines, flags) \ __tty_alloc_driver(lines, THIS_MODULE, flags) /* * DEPRECATED Do not use this in new code, use tty_alloc_driver instead. * (And change the return value checks.) */ static inline struct tty_driver *alloc_tty_driver(unsigned int lines) { struct tty_driver *ret = tty_alloc_driver(lines, 0); if (IS_ERR(ret)) return NULL; return ret; } static inline struct tty_driver *tty_driver_kref_get(struct tty_driver *d) { kref_get(&d->kref); return d; } /* tty driver magic number */ #define TTY_DRIVER_MAGIC 0x5402 /* * tty driver flags * * TTY_DRIVER_RESET_TERMIOS --- requests the tty layer to reset the * termios setting when the last process has closed the device. * Used for PTY's, in particular. * * TTY_DRIVER_REAL_RAW --- if set, indicates that the driver will * guarantee never not to set any special character handling * flags if ((IGNBRK || (!BRKINT && !PARMRK)) && (IGNPAR || * !INPCK)). That is, if there is no reason for the driver to * send notifications of parity and break characters up to the * line driver, it won't do so. This allows the line driver to * optimize for this case if this flag is set. (Note that there * is also a promise, if the above case is true, not to signal * overruns, either.) * * TTY_DRIVER_DYNAMIC_DEV --- if set, the individual tty devices need * to be registered with a call to tty_register_device() when the * device is found in the system and unregistered with a call to * tty_unregister_device() so the devices will be show up * properly in sysfs. If not set, driver->num entries will be * created by the tty core in sysfs when tty_register_driver() is * called. This is to be used by drivers that have tty devices * that can appear and disappear while the main tty driver is * registered with the tty core. * * TTY_DRIVER_DEVPTS_MEM -- don't use the standard arrays, instead * use dynamic memory keyed through the devpts filesystem. This * is only applicable to the pty driver. * * TTY_DRIVER_HARDWARE_BREAK -- hardware handles break signals. Pass * the requested timeout to the caller instead of using a simple * on/off interface. * * TTY_DRIVER_DYNAMIC_ALLOC -- do not allocate structures which are * needed per line for this driver as it would waste memory. * The driver will take care. * * TTY_DRIVER_UNNUMBERED_NODE -- do not create numbered /dev nodes. In * other words create /dev/ttyprintk and not /dev/ttyprintk0. * Applicable only when a driver for a single tty device is * being allocated. */ #define TTY_DRIVER_INSTALLED 0x0001 #define TTY_DRIVER_RESET_TERMIOS 0x0002 #define TTY_DRIVER_REAL_RAW 0x0004 #define TTY_DRIVER_DYNAMIC_DEV 0x0008 #define TTY_DRIVER_DEVPTS_MEM 0x0010 #define TTY_DRIVER_HARDWARE_BREAK 0x0020 #define TTY_DRIVER_DYNAMIC_ALLOC 0x0040 #define TTY_DRIVER_UNNUMBERED_NODE 0x0080 /* tty driver types */ #define TTY_DRIVER_TYPE_SYSTEM 0x0001 #define TTY_DRIVER_TYPE_CONSOLE 0x0002 #define TTY_DRIVER_TYPE_SERIAL 0x0003 #define TTY_DRIVER_TYPE_PTY 0x0004 #define TTY_DRIVER_TYPE_SCC 0x0005 /* scc driver */ #define TTY_DRIVER_TYPE_SYSCONS 0x0006 /* system subtypes (magic, used by tty_io.c) */ #define SYSTEM_TYPE_TTY 0x0001 #define SYSTEM_TYPE_CONSOLE 0x0002 #define SYSTEM_TYPE_SYSCONS 0x0003 #define SYSTEM_TYPE_SYSPTMX 0x0004 /* pty subtypes (magic, used by tty_io.c) */ #define PTY_TYPE_MASTER 0x0001 #define PTY_TYPE_SLAVE 0x0002 /* serial subtype definitions */ #define SERIAL_TYPE_NORMAL 1 #endif /* #ifdef _LINUX_TTY_DRIVER_H */ mmc/slot-gpio.h 0000644 00000001730 14722070374 0007415 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Generic GPIO card-detect helper header * * Copyright (C) 2011, Guennadi Liakhovetski <g.liakhovetski@gmx.de> */ #ifndef MMC_SLOT_GPIO_H #define MMC_SLOT_GPIO_H #include <linux/types.h> #include <linux/irqreturn.h> struct mmc_host; int mmc_gpio_get_ro(struct mmc_host *host); int mmc_gpio_get_cd(struct mmc_host *host); int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id, unsigned int idx, bool override_active_level, unsigned int debounce, bool *gpio_invert); int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id, unsigned int idx, unsigned int debounce, bool *gpio_invert); void mmc_gpio_set_cd_isr(struct mmc_host *host, irqreturn_t (*isr)(int irq, void *dev_id)); int mmc_gpio_set_cd_wake(struct mmc_host *host, bool on); void mmc_gpiod_request_cd_irq(struct mmc_host *host); bool mmc_can_gpio_cd(struct mmc_host *host); bool mmc_can_gpio_ro(struct mmc_host *host); #endif mmc/card.h 0000644 00000025652 14722070374 0006422 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/include/linux/mmc/card.h * * Card driver specific definitions. */ #ifndef LINUX_MMC_CARD_H #define LINUX_MMC_CARD_H #include <linux/device.h> #include <linux/mod_devicetable.h> struct mmc_cid { unsigned int manfid; char prod_name[8]; unsigned char prv; unsigned int serial; unsigned short oemid; unsigned short year; unsigned char hwrev; unsigned char fwrev; unsigned char month; }; struct mmc_csd { unsigned char structure; unsigned char mmca_vsn; unsigned short cmdclass; unsigned short taac_clks; unsigned int taac_ns; unsigned int c_size; unsigned int r2w_factor; unsigned int max_dtr; unsigned int erase_size; /* In sectors */ unsigned int read_blkbits; unsigned int write_blkbits; unsigned int capacity; unsigned int read_partial:1, read_misalign:1, write_partial:1, write_misalign:1, dsr_imp:1; }; struct mmc_ext_csd { u8 rev; u8 erase_group_def; u8 sec_feature_support; u8 rel_sectors; u8 rel_param; u8 part_config; u8 cache_ctrl; u8 rst_n_function; u8 max_packed_writes; u8 max_packed_reads; u8 packed_event_en; unsigned int part_time; /* Units: ms */ unsigned int sa_timeout; /* Units: 100ns */ unsigned int generic_cmd6_time; /* Units: 10ms */ unsigned int power_off_longtime; /* Units: ms */ u8 power_off_notification; /* state */ unsigned int hs_max_dtr; unsigned int hs200_max_dtr; #define MMC_HIGH_26_MAX_DTR 26000000 #define MMC_HIGH_52_MAX_DTR 52000000 #define MMC_HIGH_DDR_MAX_DTR 52000000 #define MMC_HS200_MAX_DTR 200000000 unsigned int sectors; unsigned int hc_erase_size; /* In sectors */ unsigned int hc_erase_timeout; /* In milliseconds */ unsigned int sec_trim_mult; /* Secure trim multiplier */ unsigned int sec_erase_mult; /* Secure erase multiplier */ unsigned int trim_timeout; /* In milliseconds */ bool partition_setting_completed; /* enable bit */ unsigned long long enhanced_area_offset; /* Units: Byte */ unsigned int enhanced_area_size; /* Units: KB */ unsigned int cache_size; /* Units: KB */ bool hpi_en; /* HPI enablebit */ bool hpi; /* HPI support bit */ unsigned int hpi_cmd; /* cmd used as HPI */ bool bkops; /* background support bit */ bool man_bkops_en; /* manual bkops enable bit */ bool auto_bkops_en; /* auto bkops enable bit */ unsigned int data_sector_size; /* 512 bytes or 4KB */ unsigned int data_tag_unit_size; /* DATA TAG UNIT size */ unsigned int boot_ro_lock; /* ro lock support */ bool boot_ro_lockable; bool ffu_capable; /* Firmware upgrade support */ bool cmdq_en; /* Command Queue enabled */ bool cmdq_support; /* Command Queue supported */ unsigned int cmdq_depth; /* Command Queue depth */ #define MMC_FIRMWARE_LEN 8 u8 fwrev[MMC_FIRMWARE_LEN]; /* FW version */ u8 raw_exception_status; /* 54 */ u8 raw_partition_support; /* 160 */ u8 raw_rpmb_size_mult; /* 168 */ u8 raw_erased_mem_count; /* 181 */ u8 strobe_support; /* 184 */ u8 raw_ext_csd_structure; /* 194 */ u8 raw_card_type; /* 196 */ u8 raw_driver_strength; /* 197 */ u8 out_of_int_time; /* 198 */ u8 raw_pwr_cl_52_195; /* 200 */ u8 raw_pwr_cl_26_195; /* 201 */ u8 raw_pwr_cl_52_360; /* 202 */ u8 raw_pwr_cl_26_360; /* 203 */ u8 raw_s_a_timeout; /* 217 */ u8 raw_hc_erase_gap_size; /* 221 */ u8 raw_erase_timeout_mult; /* 223 */ u8 raw_hc_erase_grp_size; /* 224 */ u8 raw_sec_trim_mult; /* 229 */ u8 raw_sec_erase_mult; /* 230 */ u8 raw_sec_feature_support;/* 231 */ u8 raw_trim_mult; /* 232 */ u8 raw_pwr_cl_200_195; /* 236 */ u8 raw_pwr_cl_200_360; /* 237 */ u8 raw_pwr_cl_ddr_52_195; /* 238 */ u8 raw_pwr_cl_ddr_52_360; /* 239 */ u8 raw_pwr_cl_ddr_200_360; /* 253 */ u8 raw_bkops_status; /* 246 */ u8 raw_sectors[4]; /* 212 - 4 bytes */ u8 pre_eol_info; /* 267 */ u8 device_life_time_est_typ_a; /* 268 */ u8 device_life_time_est_typ_b; /* 269 */ unsigned int feature_support; #define MMC_DISCARD_FEATURE BIT(0) /* CMD38 feature */ }; struct sd_scr { unsigned char sda_vsn; unsigned char sda_spec3; unsigned char sda_spec4; unsigned char sda_specx; unsigned char bus_widths; #define SD_SCR_BUS_WIDTH_1 (1<<0) #define SD_SCR_BUS_WIDTH_4 (1<<2) unsigned char cmds; #define SD_SCR_CMD20_SUPPORT (1<<0) #define SD_SCR_CMD23_SUPPORT (1<<1) }; struct sd_ssr { unsigned int au; /* In sectors */ unsigned int erase_timeout; /* In milliseconds */ unsigned int erase_offset; /* In milliseconds */ }; struct sd_switch_caps { unsigned int hs_max_dtr; unsigned int uhs_max_dtr; #define HIGH_SPEED_MAX_DTR 50000000 #define UHS_SDR104_MAX_DTR 208000000 #define UHS_SDR50_MAX_DTR 100000000 #define UHS_DDR50_MAX_DTR 50000000 #define UHS_SDR25_MAX_DTR UHS_DDR50_MAX_DTR #define UHS_SDR12_MAX_DTR 25000000 #define DEFAULT_SPEED_MAX_DTR UHS_SDR12_MAX_DTR unsigned int sd3_bus_mode; #define UHS_SDR12_BUS_SPEED 0 #define HIGH_SPEED_BUS_SPEED 1 #define UHS_SDR25_BUS_SPEED 1 #define UHS_SDR50_BUS_SPEED 2 #define UHS_SDR104_BUS_SPEED 3 #define UHS_DDR50_BUS_SPEED 4 #define SD_MODE_HIGH_SPEED (1 << HIGH_SPEED_BUS_SPEED) #define SD_MODE_UHS_SDR12 (1 << UHS_SDR12_BUS_SPEED) #define SD_MODE_UHS_SDR25 (1 << UHS_SDR25_BUS_SPEED) #define SD_MODE_UHS_SDR50 (1 << UHS_SDR50_BUS_SPEED) #define SD_MODE_UHS_SDR104 (1 << UHS_SDR104_BUS_SPEED) #define SD_MODE_UHS_DDR50 (1 << UHS_DDR50_BUS_SPEED) unsigned int sd3_drv_type; #define SD_DRIVER_TYPE_B 0x01 #define SD_DRIVER_TYPE_A 0x02 #define SD_DRIVER_TYPE_C 0x04 #define SD_DRIVER_TYPE_D 0x08 unsigned int sd3_curr_limit; #define SD_SET_CURRENT_LIMIT_200 0 #define SD_SET_CURRENT_LIMIT_400 1 #define SD_SET_CURRENT_LIMIT_600 2 #define SD_SET_CURRENT_LIMIT_800 3 #define SD_SET_CURRENT_NO_CHANGE (-1) #define SD_MAX_CURRENT_200 (1 << SD_SET_CURRENT_LIMIT_200) #define SD_MAX_CURRENT_400 (1 << SD_SET_CURRENT_LIMIT_400) #define SD_MAX_CURRENT_600 (1 << SD_SET_CURRENT_LIMIT_600) #define SD_MAX_CURRENT_800 (1 << SD_SET_CURRENT_LIMIT_800) }; struct sdio_cccr { unsigned int sdio_vsn; unsigned int sd_vsn; unsigned int multi_block:1, low_speed:1, wide_bus:1, high_power:1, high_speed:1, disable_cd:1; }; struct sdio_cis { unsigned short vendor; unsigned short device; unsigned short blksize; unsigned int max_dtr; }; struct mmc_host; struct sdio_func; struct sdio_func_tuple; struct mmc_queue_req; #define SDIO_MAX_FUNCS 7 /* The number of MMC physical partitions. These consist of: * boot partitions (2), general purpose partitions (4) and * RPMB partition (1) in MMC v4.4. */ #define MMC_NUM_BOOT_PARTITION 2 #define MMC_NUM_GP_PARTITION 4 #define MMC_NUM_PHY_PARTITION 7 #define MAX_MMC_PART_NAME_LEN 20 /* * MMC Physical partitions */ struct mmc_part { u64 size; /* partition size (in bytes) */ unsigned int part_cfg; /* partition type */ char name[MAX_MMC_PART_NAME_LEN]; bool force_ro; /* to make boot parts RO by default */ unsigned int area_type; #define MMC_BLK_DATA_AREA_MAIN (1<<0) #define MMC_BLK_DATA_AREA_BOOT (1<<1) #define MMC_BLK_DATA_AREA_GP (1<<2) #define MMC_BLK_DATA_AREA_RPMB (1<<3) }; /* * MMC device */ struct mmc_card { struct mmc_host *host; /* the host this device belongs to */ struct device dev; /* the device */ u32 ocr; /* the current OCR setting */ unsigned int rca; /* relative card address of device */ unsigned int type; /* card type */ #define MMC_TYPE_MMC 0 /* MMC card */ #define MMC_TYPE_SD 1 /* SD card */ #define MMC_TYPE_SDIO 2 /* SDIO card */ #define MMC_TYPE_SD_COMBO 3 /* SD combo (IO+mem) card */ unsigned int state; /* (our) card state */ unsigned int quirks; /* card quirks */ unsigned int quirk_max_rate; /* max rate set by quirks */ #define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */ #define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1) /* use func->cur_blksize */ /* for byte mode */ #define MMC_QUIRK_NONSTD_SDIO (1<<2) /* non-standard SDIO card attached */ /* (missing CIA registers) */ #define MMC_QUIRK_NONSTD_FUNC_IF (1<<4) /* SDIO card has nonstd function interfaces */ #define MMC_QUIRK_DISABLE_CD (1<<5) /* disconnect CD/DAT[3] resistor */ #define MMC_QUIRK_INAND_CMD38 (1<<6) /* iNAND devices have broken CMD38 */ #define MMC_QUIRK_BLK_NO_CMD23 (1<<7) /* Avoid CMD23 for regular multiblock */ #define MMC_QUIRK_BROKEN_BYTE_MODE_512 (1<<8) /* Avoid sending 512 bytes in */ /* byte mode */ #define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */ #define MMC_QUIRK_SEC_ERASE_TRIM_BROKEN (1<<10) /* Skip secure for erase/trim */ #define MMC_QUIRK_BROKEN_IRQ_POLLING (1<<11) /* Polling SDIO_CCCR_INTx could create a fake interrupt */ #define MMC_QUIRK_TRIM_BROKEN (1<<12) /* Skip trim */ #define MMC_QUIRK_BROKEN_HPI (1<<13) /* Disable broken HPI support */ bool reenable_cmdq; /* Re-enable Command Queue */ unsigned int erase_size; /* erase size in sectors */ unsigned int erase_shift; /* if erase unit is power 2 */ unsigned int pref_erase; /* in sectors */ unsigned int eg_boundary; /* don't cross erase-group boundaries */ unsigned int erase_arg; /* erase / trim / discard */ u8 erased_byte; /* value of erased bytes */ u32 raw_cid[4]; /* raw card CID */ u32 raw_csd[4]; /* raw card CSD */ u32 raw_scr[2]; /* raw card SCR */ u32 raw_ssr[16]; /* raw card SSR */ struct mmc_cid cid; /* card identification */ struct mmc_csd csd; /* card specific */ struct mmc_ext_csd ext_csd; /* mmc v4 extended card specific */ struct sd_scr scr; /* extra SD information */ struct sd_ssr ssr; /* yet more SD information */ struct sd_switch_caps sw_caps; /* switch (CMD6) caps */ unsigned int sdio_funcs; /* number of SDIO functions */ atomic_t sdio_funcs_probed; /* number of probed SDIO funcs */ struct sdio_cccr cccr; /* common card info */ struct sdio_cis cis; /* common tuple info */ struct sdio_func *sdio_func[SDIO_MAX_FUNCS]; /* SDIO functions (devices) */ struct sdio_func *sdio_single_irq; /* SDIO function when only one IRQ active */ unsigned num_info; /* number of info strings */ const char **info; /* info strings */ struct sdio_func_tuple *tuples; /* unknown common tuples */ unsigned int sd_bus_speed; /* Bus Speed Mode set for the card */ unsigned int mmc_avail_type; /* supported device type by both host and card */ unsigned int drive_strength; /* for UHS-I, HS200 or HS400 */ struct dentry *debugfs_root; struct mmc_part part[MMC_NUM_PHY_PARTITION]; /* physical partitions */ unsigned int nr_parts; unsigned int bouncesz; /* Bounce buffer size */ struct workqueue_struct *complete_wq; /* Private workqueue */ }; static inline bool mmc_large_sector(struct mmc_card *card) { return card->ext_csd.data_sector_size == 4096; } bool mmc_card_is_blockaddr(struct mmc_card *card); #define mmc_card_mmc(c) ((c)->type == MMC_TYPE_MMC) #define mmc_card_sd(c) ((c)->type == MMC_TYPE_SD) #define mmc_card_sdio(c) ((c)->type == MMC_TYPE_SDIO) #endif /* LINUX_MMC_CARD_H */ mmc/sdio_func.h 0000644 00000012621 14722070374 0007452 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/mmc/sdio_func.h * * Copyright 2007-2008 Pierre Ossman */ #ifndef LINUX_MMC_SDIO_FUNC_H #define LINUX_MMC_SDIO_FUNC_H #include <linux/device.h> #include <linux/mod_devicetable.h> #include <linux/mmc/pm.h> struct mmc_card; struct sdio_func; typedef void (sdio_irq_handler_t)(struct sdio_func *); /* * SDIO function CIS tuple (unknown to the core) */ struct sdio_func_tuple { struct sdio_func_tuple *next; unsigned char code; unsigned char size; unsigned char data[0]; }; /* * SDIO function devices */ struct sdio_func { struct mmc_card *card; /* the card this device belongs to */ struct device dev; /* the device */ sdio_irq_handler_t *irq_handler; /* IRQ callback */ unsigned int num; /* function number */ unsigned char class; /* standard interface class */ unsigned short vendor; /* vendor id */ unsigned short device; /* device id */ unsigned max_blksize; /* maximum block size */ unsigned cur_blksize; /* current block size */ unsigned enable_timeout; /* max enable timeout in msec */ unsigned int state; /* function state */ #define SDIO_STATE_PRESENT (1<<0) /* present in sysfs */ u8 *tmpbuf; /* DMA:able scratch buffer */ unsigned num_info; /* number of info strings */ const char **info; /* info strings */ struct sdio_func_tuple *tuples; }; #define sdio_func_present(f) ((f)->state & SDIO_STATE_PRESENT) #define sdio_func_set_present(f) ((f)->state |= SDIO_STATE_PRESENT) #define sdio_func_id(f) (dev_name(&(f)->dev)) #define sdio_get_drvdata(f) dev_get_drvdata(&(f)->dev) #define sdio_set_drvdata(f,d) dev_set_drvdata(&(f)->dev, d) #define dev_to_sdio_func(d) container_of(d, struct sdio_func, dev) /* * SDIO function device driver */ struct sdio_driver { char *name; const struct sdio_device_id *id_table; int (*probe)(struct sdio_func *, const struct sdio_device_id *); void (*remove)(struct sdio_func *); struct device_driver drv; }; /** * SDIO_DEVICE - macro used to describe a specific SDIO device * @vend: the 16 bit manufacturer code * @dev: the 16 bit function id * * This macro is used to create a struct sdio_device_id that matches a * specific device. The class field will be set to SDIO_ANY_ID. */ #define SDIO_DEVICE(vend,dev) \ .class = SDIO_ANY_ID, \ .vendor = (vend), .device = (dev) /** * SDIO_DEVICE_CLASS - macro used to describe a specific SDIO device class * @dev_class: the 8 bit standard interface code * * This macro is used to create a struct sdio_device_id that matches a * specific standard SDIO function type. The vendor and device fields will * be set to SDIO_ANY_ID. */ #define SDIO_DEVICE_CLASS(dev_class) \ .class = (dev_class), \ .vendor = SDIO_ANY_ID, .device = SDIO_ANY_ID extern int sdio_register_driver(struct sdio_driver *); extern void sdio_unregister_driver(struct sdio_driver *); /** * module_sdio_driver() - Helper macro for registering a SDIO driver * @__sdio_driver: sdio_driver struct * * Helper macro for SDIO drivers which do not do anything special in module * init/exit. This eliminates a lot of boilerplate. Each module may only * use this macro once, and calling it replaces module_init() and module_exit() */ #define module_sdio_driver(__sdio_driver) \ module_driver(__sdio_driver, sdio_register_driver, \ sdio_unregister_driver) /* * SDIO I/O operations */ extern void sdio_claim_host(struct sdio_func *func); extern void sdio_release_host(struct sdio_func *func); extern int sdio_enable_func(struct sdio_func *func); extern int sdio_disable_func(struct sdio_func *func); extern int sdio_set_block_size(struct sdio_func *func, unsigned blksz); extern int sdio_claim_irq(struct sdio_func *func, sdio_irq_handler_t *handler); extern int sdio_release_irq(struct sdio_func *func); extern unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz); extern u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret); extern u16 sdio_readw(struct sdio_func *func, unsigned int addr, int *err_ret); extern u32 sdio_readl(struct sdio_func *func, unsigned int addr, int *err_ret); extern int sdio_memcpy_fromio(struct sdio_func *func, void *dst, unsigned int addr, int count); extern int sdio_readsb(struct sdio_func *func, void *dst, unsigned int addr, int count); extern void sdio_writeb(struct sdio_func *func, u8 b, unsigned int addr, int *err_ret); extern void sdio_writew(struct sdio_func *func, u16 b, unsigned int addr, int *err_ret); extern void sdio_writel(struct sdio_func *func, u32 b, unsigned int addr, int *err_ret); extern u8 sdio_writeb_readb(struct sdio_func *func, u8 write_byte, unsigned int addr, int *err_ret); extern int sdio_memcpy_toio(struct sdio_func *func, unsigned int addr, void *src, int count); extern int sdio_writesb(struct sdio_func *func, unsigned int addr, void *src, int count); extern unsigned char sdio_f0_readb(struct sdio_func *func, unsigned int addr, int *err_ret); extern void sdio_f0_writeb(struct sdio_func *func, unsigned char b, unsigned int addr, int *err_ret); extern mmc_pm_flag_t sdio_get_host_pm_caps(struct sdio_func *func); extern int sdio_set_host_pm_flags(struct sdio_func *func, mmc_pm_flag_t flags); extern void sdio_retune_crc_disable(struct sdio_func *func); extern void sdio_retune_crc_enable(struct sdio_func *func); extern void sdio_retune_hold_now(struct sdio_func *func); extern void sdio_retune_release(struct sdio_func *func); #endif /* LINUX_MMC_SDIO_FUNC_H */ mmc/sdio.h 0000644 00000013757 14722070374 0006452 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/mmc/sdio.h * * Copyright 2006-2007 Pierre Ossman */ #ifndef LINUX_MMC_SDIO_H #define LINUX_MMC_SDIO_H /* SDIO commands type argument response */ #define SD_IO_SEND_OP_COND 5 /* bcr [23:0] OCR R4 */ #define SD_IO_RW_DIRECT 52 /* ac [31:0] See below R5 */ #define SD_IO_RW_EXTENDED 53 /* adtc [31:0] See below R5 */ /* * SD_IO_RW_DIRECT argument format: * * [31] R/W flag * [30:28] Function number * [27] RAW flag * [25:9] Register address * [7:0] Data */ /* * SD_IO_RW_EXTENDED argument format: * * [31] R/W flag * [30:28] Function number * [27] Block mode * [26] Increment address * [25:9] Register address * [8:0] Byte/block count */ #define R4_18V_PRESENT (1<<24) #define R4_MEMORY_PRESENT (1 << 27) /* SDIO status in R5 Type e : error bit s : status bit r : detected and set for the actual command response x : detected and set during command execution. the host must poll the card by sending status command in order to read these bits. Clear condition a : according to the card state b : always related to the previous command. Reception of a valid command will clear it (with a delay of one command) c : clear by read */ #define R5_COM_CRC_ERROR (1 << 15) /* er, b */ #define R5_ILLEGAL_COMMAND (1 << 14) /* er, b */ #define R5_ERROR (1 << 11) /* erx, c */ #define R5_FUNCTION_NUMBER (1 << 9) /* er, c */ #define R5_OUT_OF_RANGE (1 << 8) /* er, c */ #define R5_STATUS(x) (x & 0xCB00) #define R5_IO_CURRENT_STATE(x) ((x & 0x3000) >> 12) /* s, b */ /* * Card Common Control Registers (CCCR) */ #define SDIO_CCCR_CCCR 0x00 #define SDIO_CCCR_REV_1_00 0 /* CCCR/FBR Version 1.00 */ #define SDIO_CCCR_REV_1_10 1 /* CCCR/FBR Version 1.10 */ #define SDIO_CCCR_REV_1_20 2 /* CCCR/FBR Version 1.20 */ #define SDIO_CCCR_REV_3_00 3 /* CCCR/FBR Version 3.00 */ #define SDIO_SDIO_REV_1_00 0 /* SDIO Spec Version 1.00 */ #define SDIO_SDIO_REV_1_10 1 /* SDIO Spec Version 1.10 */ #define SDIO_SDIO_REV_1_20 2 /* SDIO Spec Version 1.20 */ #define SDIO_SDIO_REV_2_00 3 /* SDIO Spec Version 2.00 */ #define SDIO_SDIO_REV_3_00 4 /* SDIO Spec Version 3.00 */ #define SDIO_CCCR_SD 0x01 #define SDIO_SD_REV_1_01 0 /* SD Physical Spec Version 1.01 */ #define SDIO_SD_REV_1_10 1 /* SD Physical Spec Version 1.10 */ #define SDIO_SD_REV_2_00 2 /* SD Physical Spec Version 2.00 */ #define SDIO_SD_REV_3_00 3 /* SD Physical Spev Version 3.00 */ #define SDIO_CCCR_IOEx 0x02 #define SDIO_CCCR_IORx 0x03 #define SDIO_CCCR_IENx 0x04 /* Function/Master Interrupt Enable */ #define SDIO_CCCR_INTx 0x05 /* Function Interrupt Pending */ #define SDIO_CCCR_ABORT 0x06 /* function abort/card reset */ #define SDIO_CCCR_IF 0x07 /* bus interface controls */ #define SDIO_BUS_WIDTH_MASK 0x03 /* data bus width setting */ #define SDIO_BUS_WIDTH_1BIT 0x00 #define SDIO_BUS_WIDTH_RESERVED 0x01 #define SDIO_BUS_WIDTH_4BIT 0x02 #define SDIO_BUS_ECSI 0x20 /* Enable continuous SPI interrupt */ #define SDIO_BUS_SCSI 0x40 /* Support continuous SPI interrupt */ #define SDIO_BUS_ASYNC_INT 0x20 #define SDIO_BUS_CD_DISABLE 0x80 /* disable pull-up on DAT3 (pin 1) */ #define SDIO_CCCR_CAPS 0x08 #define SDIO_CCCR_CAP_SDC 0x01 /* can do CMD52 while data transfer */ #define SDIO_CCCR_CAP_SMB 0x02 /* can do multi-block xfers (CMD53) */ #define SDIO_CCCR_CAP_SRW 0x04 /* supports read-wait protocol */ #define SDIO_CCCR_CAP_SBS 0x08 /* supports suspend/resume */ #define SDIO_CCCR_CAP_S4MI 0x10 /* interrupt during 4-bit CMD53 */ #define SDIO_CCCR_CAP_E4MI 0x20 /* enable ints during 4-bit CMD53 */ #define SDIO_CCCR_CAP_LSC 0x40 /* low speed card */ #define SDIO_CCCR_CAP_4BLS 0x80 /* 4 bit low speed card */ #define SDIO_CCCR_CIS 0x09 /* common CIS pointer (3 bytes) */ /* Following 4 regs are valid only if SBS is set */ #define SDIO_CCCR_SUSPEND 0x0c #define SDIO_CCCR_SELx 0x0d #define SDIO_CCCR_EXECx 0x0e #define SDIO_CCCR_READYx 0x0f #define SDIO_CCCR_BLKSIZE 0x10 #define SDIO_CCCR_POWER 0x12 #define SDIO_POWER_SMPC 0x01 /* Supports Master Power Control */ #define SDIO_POWER_EMPC 0x02 /* Enable Master Power Control */ #define SDIO_CCCR_SPEED 0x13 #define SDIO_SPEED_SHS 0x01 /* Supports High-Speed mode */ #define SDIO_SPEED_BSS_SHIFT 1 #define SDIO_SPEED_BSS_MASK (7<<SDIO_SPEED_BSS_SHIFT) #define SDIO_SPEED_SDR12 (0<<SDIO_SPEED_BSS_SHIFT) #define SDIO_SPEED_SDR25 (1<<SDIO_SPEED_BSS_SHIFT) #define SDIO_SPEED_SDR50 (2<<SDIO_SPEED_BSS_SHIFT) #define SDIO_SPEED_SDR104 (3<<SDIO_SPEED_BSS_SHIFT) #define SDIO_SPEED_DDR50 (4<<SDIO_SPEED_BSS_SHIFT) #define SDIO_SPEED_EHS SDIO_SPEED_SDR25 /* Enable High-Speed */ #define SDIO_CCCR_UHS 0x14 #define SDIO_UHS_SDR50 0x01 #define SDIO_UHS_SDR104 0x02 #define SDIO_UHS_DDR50 0x04 #define SDIO_CCCR_DRIVE_STRENGTH 0x15 #define SDIO_SDTx_MASK 0x07 #define SDIO_DRIVE_SDTA (1<<0) #define SDIO_DRIVE_SDTC (1<<1) #define SDIO_DRIVE_SDTD (1<<2) #define SDIO_DRIVE_DTSx_MASK 0x03 #define SDIO_DRIVE_DTSx_SHIFT 4 #define SDIO_DTSx_SET_TYPE_B (0 << SDIO_DRIVE_DTSx_SHIFT) #define SDIO_DTSx_SET_TYPE_A (1 << SDIO_DRIVE_DTSx_SHIFT) #define SDIO_DTSx_SET_TYPE_C (2 << SDIO_DRIVE_DTSx_SHIFT) #define SDIO_DTSx_SET_TYPE_D (3 << SDIO_DRIVE_DTSx_SHIFT) /* * Function Basic Registers (FBR) */ #define SDIO_FBR_BASE(f) ((f) * 0x100) /* base of function f's FBRs */ #define SDIO_FBR_STD_IF 0x00 #define SDIO_FBR_SUPPORTS_CSA 0x40 /* supports Code Storage Area */ #define SDIO_FBR_ENABLE_CSA 0x80 /* enable Code Storage Area */ #define SDIO_FBR_STD_IF_EXT 0x01 #define SDIO_FBR_POWER 0x02 #define SDIO_FBR_POWER_SPS 0x01 /* Supports Power Selection */ #define SDIO_FBR_POWER_EPS 0x02 /* Enable (low) Power Selection */ #define SDIO_FBR_CIS 0x09 /* CIS pointer (3 bytes) */ #define SDIO_FBR_CSA 0x0C /* CSA pointer (3 bytes) */ #define SDIO_FBR_CSA_DATA 0x0F #define SDIO_FBR_BLKSIZE 0x10 /* block size (2 bytes) */ #endif /* LINUX_MMC_SDIO_H */ mmc/pm.h 0000644 00000001604 14722070374 0006114 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/include/linux/mmc/pm.h * * Author: Nicolas Pitre * Copyright: (C) 2009 Marvell Technology Group Ltd. */ #ifndef LINUX_MMC_PM_H #define LINUX_MMC_PM_H /* * These flags are used to describe power management features that * some cards (typically SDIO cards) might wish to benefit from when * the host system is being suspended. There are several layers of * abstractions involved, from the host controller driver, to the MMC core * code, to the SDIO core code, to finally get to the actual SDIO function * driver. This file is therefore used for common definitions shared across * all those layers. */ typedef unsigned int mmc_pm_flag_t; #define MMC_PM_KEEP_POWER (1 << 0) /* preserve card power during suspend */ #define MMC_PM_WAKE_SDIO_IRQ (1 << 1) /* wake up host system on SDIO IRQ assertion */ #endif /* LINUX_MMC_PM_H */ mmc/sdio_ids.h 0000644 00000005666 14722070374 0007311 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * SDIO Classes, Interface Types, Manufacturer IDs, etc. */ #ifndef LINUX_MMC_SDIO_IDS_H #define LINUX_MMC_SDIO_IDS_H /* * Standard SDIO Function Interfaces */ #define SDIO_CLASS_NONE 0x00 /* Not a SDIO standard interface */ #define SDIO_CLASS_UART 0x01 /* standard UART interface */ #define SDIO_CLASS_BT_A 0x02 /* Type-A BlueTooth std interface */ #define SDIO_CLASS_BT_B 0x03 /* Type-B BlueTooth std interface */ #define SDIO_CLASS_GPS 0x04 /* GPS standard interface */ #define SDIO_CLASS_CAMERA 0x05 /* Camera standard interface */ #define SDIO_CLASS_PHS 0x06 /* PHS standard interface */ #define SDIO_CLASS_WLAN 0x07 /* WLAN interface */ #define SDIO_CLASS_ATA 0x08 /* Embedded SDIO-ATA std interface */ #define SDIO_CLASS_BT_AMP 0x09 /* Type-A Bluetooth AMP interface */ /* * Vendors and devices. Sort key: vendor first, device next. */ #define SDIO_VENDOR_ID_BROADCOM 0x02d0 #define SDIO_DEVICE_ID_BROADCOM_43143 0xa887 #define SDIO_DEVICE_ID_BROADCOM_43241 0x4324 #define SDIO_DEVICE_ID_BROADCOM_4329 0x4329 #define SDIO_DEVICE_ID_BROADCOM_4330 0x4330 #define SDIO_DEVICE_ID_BROADCOM_4334 0x4334 #define SDIO_DEVICE_ID_BROADCOM_43340 0xa94c #define SDIO_DEVICE_ID_BROADCOM_43341 0xa94d #define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335 #define SDIO_DEVICE_ID_BROADCOM_4339 0x4339 #define SDIO_DEVICE_ID_BROADCOM_43362 0xa962 #define SDIO_DEVICE_ID_BROADCOM_43364 0xa9a4 #define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6 #define SDIO_DEVICE_ID_BROADCOM_4345 0x4345 #define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf #define SDIO_DEVICE_ID_BROADCOM_4354 0x4354 #define SDIO_DEVICE_ID_BROADCOM_4356 0x4356 #define SDIO_DEVICE_ID_CYPRESS_4373 0x4373 #define SDIO_DEVICE_ID_CYPRESS_43012 43012 #define SDIO_VENDOR_ID_INTEL 0x0089 #define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX 0x1402 #define SDIO_DEVICE_ID_INTEL_IWMC3200WIFI 0x1403 #define SDIO_DEVICE_ID_INTEL_IWMC3200TOP 0x1404 #define SDIO_DEVICE_ID_INTEL_IWMC3200GPS 0x1405 #define SDIO_DEVICE_ID_INTEL_IWMC3200BT 0x1406 #define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX_2G5 0x1407 #define SDIO_VENDOR_ID_MARVELL 0x02df #define SDIO_DEVICE_ID_MARVELL_LIBERTAS 0x9103 #define SDIO_DEVICE_ID_MARVELL_8688WLAN 0x9104 #define SDIO_DEVICE_ID_MARVELL_8688BT 0x9105 #define SDIO_DEVICE_ID_MARVELL_8797_F0 0x9128 #define SDIO_DEVICE_ID_MARVELL_8887WLAN 0x9134 #define SDIO_VENDOR_ID_MEDIATEK 0x037a #define SDIO_VENDOR_ID_SIANO 0x039a #define SDIO_DEVICE_ID_SIANO_NOVA_B0 0x0201 #define SDIO_DEVICE_ID_SIANO_NICE 0x0202 #define SDIO_DEVICE_ID_SIANO_VEGA_A0 0x0300 #define SDIO_DEVICE_ID_SIANO_VENICE 0x0301 #define SDIO_DEVICE_ID_SIANO_NOVA_A0 0x1100 #define SDIO_DEVICE_ID_SIANO_STELLAR 0x5347 #define SDIO_VENDOR_ID_TI 0x0097 #define SDIO_DEVICE_ID_TI_WL1271 0x4076 #define SDIO_VENDOR_ID_TI_WL1251 0x104c #define SDIO_DEVICE_ID_TI_WL1251 0x9066 #define SDIO_VENDOR_ID_STE 0x0020 #define SDIO_DEVICE_ID_STE_CW1200 0x2280 #endif /* LINUX_MMC_SDIO_IDS_H */ mmc/mmc.h 0000644 00000041340 14722070374 0006255 0 ustar 00 /* * Header for MultiMediaCard (MMC) * * Copyright 2002 Hewlett-Packard Company * * Use consistent with the GNU GPL is permitted, * provided that this copyright notice is * preserved in its entirety in all copies and derived works. * * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED, * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS * FITNESS FOR ANY PARTICULAR PURPOSE. * * Many thanks to Alessandro Rubini and Jonathan Corbet! * * Based strongly on code by: * * Author: Yong-iL Joh <tolkien@mizi.com> * * Author: Andrew Christian * 15 May 2002 */ #ifndef LINUX_MMC_MMC_H #define LINUX_MMC_MMC_H #include <linux/types.h> /* Standard MMC commands (4.1) type argument response */ /* class 1 */ #define MMC_GO_IDLE_STATE 0 /* bc */ #define MMC_SEND_OP_COND 1 /* bcr [31:0] OCR R3 */ #define MMC_ALL_SEND_CID 2 /* bcr R2 */ #define MMC_SET_RELATIVE_ADDR 3 /* ac [31:16] RCA R1 */ #define MMC_SET_DSR 4 /* bc [31:16] RCA */ #define MMC_SLEEP_AWAKE 5 /* ac [31:16] RCA 15:flg R1b */ #define MMC_SWITCH 6 /* ac [31:0] See below R1b */ #define MMC_SELECT_CARD 7 /* ac [31:16] RCA R1 */ #define MMC_SEND_EXT_CSD 8 /* adtc R1 */ #define MMC_SEND_CSD 9 /* ac [31:16] RCA R2 */ #define MMC_SEND_CID 10 /* ac [31:16] RCA R2 */ #define MMC_READ_DAT_UNTIL_STOP 11 /* adtc [31:0] dadr R1 */ #define MMC_STOP_TRANSMISSION 12 /* ac R1b */ #define MMC_SEND_STATUS 13 /* ac [31:16] RCA R1 */ #define MMC_BUS_TEST_R 14 /* adtc R1 */ #define MMC_GO_INACTIVE_STATE 15 /* ac [31:16] RCA */ #define MMC_BUS_TEST_W 19 /* adtc R1 */ #define MMC_SPI_READ_OCR 58 /* spi spi_R3 */ #define MMC_SPI_CRC_ON_OFF 59 /* spi [0:0] flag spi_R1 */ /* class 2 */ #define MMC_SET_BLOCKLEN 16 /* ac [31:0] block len R1 */ #define MMC_READ_SINGLE_BLOCK 17 /* adtc [31:0] data addr R1 */ #define MMC_READ_MULTIPLE_BLOCK 18 /* adtc [31:0] data addr R1 */ #define MMC_SEND_TUNING_BLOCK 19 /* adtc R1 */ #define MMC_SEND_TUNING_BLOCK_HS200 21 /* adtc R1 */ /* class 3 */ #define MMC_WRITE_DAT_UNTIL_STOP 20 /* adtc [31:0] data addr R1 */ /* class 4 */ #define MMC_SET_BLOCK_COUNT 23 /* adtc [31:0] data addr R1 */ #define MMC_WRITE_BLOCK 24 /* adtc [31:0] data addr R1 */ #define MMC_WRITE_MULTIPLE_BLOCK 25 /* adtc R1 */ #define MMC_PROGRAM_CID 26 /* adtc R1 */ #define MMC_PROGRAM_CSD 27 /* adtc R1 */ /* class 6 */ #define MMC_SET_WRITE_PROT 28 /* ac [31:0] data addr R1b */ #define MMC_CLR_WRITE_PROT 29 /* ac [31:0] data addr R1b */ #define MMC_SEND_WRITE_PROT 30 /* adtc [31:0] wpdata addr R1 */ /* class 5 */ #define MMC_ERASE_GROUP_START 35 /* ac [31:0] data addr R1 */ #define MMC_ERASE_GROUP_END 36 /* ac [31:0] data addr R1 */ #define MMC_ERASE 38 /* ac R1b */ /* class 9 */ #define MMC_FAST_IO 39 /* ac <Complex> R4 */ #define MMC_GO_IRQ_STATE 40 /* bcr R5 */ /* class 7 */ #define MMC_LOCK_UNLOCK 42 /* adtc R1b */ /* class 8 */ #define MMC_APP_CMD 55 /* ac [31:16] RCA R1 */ #define MMC_GEN_CMD 56 /* adtc [0] RD/WR R1 */ /* class 11 */ #define MMC_QUE_TASK_PARAMS 44 /* ac [20:16] task id R1 */ #define MMC_QUE_TASK_ADDR 45 /* ac [31:0] data addr R1 */ #define MMC_EXECUTE_READ_TASK 46 /* adtc [20:16] task id R1 */ #define MMC_EXECUTE_WRITE_TASK 47 /* adtc [20:16] task id R1 */ #define MMC_CMDQ_TASK_MGMT 48 /* ac [20:16] task id R1b */ static inline bool mmc_op_multi(u32 opcode) { return opcode == MMC_WRITE_MULTIPLE_BLOCK || opcode == MMC_READ_MULTIPLE_BLOCK; } /* * MMC_SWITCH argument format: * * [31:26] Always 0 * [25:24] Access Mode * [23:16] Location of target Byte in EXT_CSD * [15:08] Value Byte * [07:03] Always 0 * [02:00] Command Set */ /* MMC status in R1, for native mode (SPI bits are different) Type e : error bit s : status bit r : detected and set for the actual command response x : detected and set during command execution. the host must poll the card by sending status command in order to read these bits. Clear condition a : according to the card state b : always related to the previous command. Reception of a valid command will clear it (with a delay of one command) c : clear by read */ #define R1_OUT_OF_RANGE (1 << 31) /* er, c */ #define R1_ADDRESS_ERROR (1 << 30) /* erx, c */ #define R1_BLOCK_LEN_ERROR (1 << 29) /* er, c */ #define R1_ERASE_SEQ_ERROR (1 << 28) /* er, c */ #define R1_ERASE_PARAM (1 << 27) /* ex, c */ #define R1_WP_VIOLATION (1 << 26) /* erx, c */ #define R1_CARD_IS_LOCKED (1 << 25) /* sx, a */ #define R1_LOCK_UNLOCK_FAILED (1 << 24) /* erx, c */ #define R1_COM_CRC_ERROR (1 << 23) /* er, b */ #define R1_ILLEGAL_COMMAND (1 << 22) /* er, b */ #define R1_CARD_ECC_FAILED (1 << 21) /* ex, c */ #define R1_CC_ERROR (1 << 20) /* erx, c */ #define R1_ERROR (1 << 19) /* erx, c */ #define R1_UNDERRUN (1 << 18) /* ex, c */ #define R1_OVERRUN (1 << 17) /* ex, c */ #define R1_CID_CSD_OVERWRITE (1 << 16) /* erx, c, CID/CSD overwrite */ #define R1_WP_ERASE_SKIP (1 << 15) /* sx, c */ #define R1_CARD_ECC_DISABLED (1 << 14) /* sx, a */ #define R1_ERASE_RESET (1 << 13) /* sr, c */ #define R1_STATUS(x) (x & 0xFFF9A000) #define R1_CURRENT_STATE(x) ((x & 0x00001E00) >> 9) /* sx, b (4 bits) */ #define R1_READY_FOR_DATA (1 << 8) /* sx, a */ #define R1_SWITCH_ERROR (1 << 7) /* sx, c */ #define R1_EXCEPTION_EVENT (1 << 6) /* sr, a */ #define R1_APP_CMD (1 << 5) /* sr, c */ #define R1_STATE_IDLE 0 #define R1_STATE_READY 1 #define R1_STATE_IDENT 2 #define R1_STATE_STBY 3 #define R1_STATE_TRAN 4 #define R1_STATE_DATA 5 #define R1_STATE_RCV 6 #define R1_STATE_PRG 7 #define R1_STATE_DIS 8 /* * MMC/SD in SPI mode reports R1 status always, and R2 for SEND_STATUS * R1 is the low order byte; R2 is the next highest byte, when present. */ #define R1_SPI_IDLE (1 << 0) #define R1_SPI_ERASE_RESET (1 << 1) #define R1_SPI_ILLEGAL_COMMAND (1 << 2) #define R1_SPI_COM_CRC (1 << 3) #define R1_SPI_ERASE_SEQ (1 << 4) #define R1_SPI_ADDRESS (1 << 5) #define R1_SPI_PARAMETER (1 << 6) /* R1 bit 7 is always zero */ #define R2_SPI_CARD_LOCKED (1 << 8) #define R2_SPI_WP_ERASE_SKIP (1 << 9) /* or lock/unlock fail */ #define R2_SPI_LOCK_UNLOCK_FAIL R2_SPI_WP_ERASE_SKIP #define R2_SPI_ERROR (1 << 10) #define R2_SPI_CC_ERROR (1 << 11) #define R2_SPI_CARD_ECC_ERROR (1 << 12) #define R2_SPI_WP_VIOLATION (1 << 13) #define R2_SPI_ERASE_PARAM (1 << 14) #define R2_SPI_OUT_OF_RANGE (1 << 15) /* or CSD overwrite */ #define R2_SPI_CSD_OVERWRITE R2_SPI_OUT_OF_RANGE /* * OCR bits are mostly in host.h */ #define MMC_CARD_BUSY 0x80000000 /* Card Power up status bit */ /* * Card Command Classes (CCC) */ #define CCC_BASIC (1<<0) /* (0) Basic protocol functions */ /* (CMD0,1,2,3,4,7,9,10,12,13,15) */ /* (and for SPI, CMD58,59) */ #define CCC_STREAM_READ (1<<1) /* (1) Stream read commands */ /* (CMD11) */ #define CCC_BLOCK_READ (1<<2) /* (2) Block read commands */ /* (CMD16,17,18) */ #define CCC_STREAM_WRITE (1<<3) /* (3) Stream write commands */ /* (CMD20) */ #define CCC_BLOCK_WRITE (1<<4) /* (4) Block write commands */ /* (CMD16,24,25,26,27) */ #define CCC_ERASE (1<<5) /* (5) Ability to erase blocks */ /* (CMD32,33,34,35,36,37,38,39) */ #define CCC_WRITE_PROT (1<<6) /* (6) Able to write protect blocks */ /* (CMD28,29,30) */ #define CCC_LOCK_CARD (1<<7) /* (7) Able to lock down card */ /* (CMD16,CMD42) */ #define CCC_APP_SPEC (1<<8) /* (8) Application specific */ /* (CMD55,56,57,ACMD*) */ #define CCC_IO_MODE (1<<9) /* (9) I/O mode */ /* (CMD5,39,40,52,53) */ #define CCC_SWITCH (1<<10) /* (10) High speed switch */ /* (CMD6,34,35,36,37,50) */ /* (11) Reserved */ /* (CMD?) */ /* * CSD field definitions */ #define CSD_STRUCT_VER_1_0 0 /* Valid for system specification 1.0 - 1.2 */ #define CSD_STRUCT_VER_1_1 1 /* Valid for system specification 1.4 - 2.2 */ #define CSD_STRUCT_VER_1_2 2 /* Valid for system specification 3.1 - 3.2 - 3.31 - 4.0 - 4.1 */ #define CSD_STRUCT_EXT_CSD 3 /* Version is coded in CSD_STRUCTURE in EXT_CSD */ #define CSD_SPEC_VER_0 0 /* Implements system specification 1.0 - 1.2 */ #define CSD_SPEC_VER_1 1 /* Implements system specification 1.4 */ #define CSD_SPEC_VER_2 2 /* Implements system specification 2.0 - 2.2 */ #define CSD_SPEC_VER_3 3 /* Implements system specification 3.1 - 3.2 - 3.31 */ #define CSD_SPEC_VER_4 4 /* Implements system specification 4.0 - 4.1 */ /* * EXT_CSD fields */ #define EXT_CSD_CMDQ_MODE_EN 15 /* R/W */ #define EXT_CSD_FLUSH_CACHE 32 /* W */ #define EXT_CSD_CACHE_CTRL 33 /* R/W */ #define EXT_CSD_POWER_OFF_NOTIFICATION 34 /* R/W */ #define EXT_CSD_PACKED_FAILURE_INDEX 35 /* RO */ #define EXT_CSD_PACKED_CMD_STATUS 36 /* RO */ #define EXT_CSD_EXP_EVENTS_STATUS 54 /* RO, 2 bytes */ #define EXT_CSD_EXP_EVENTS_CTRL 56 /* R/W, 2 bytes */ #define EXT_CSD_DATA_SECTOR_SIZE 61 /* R */ #define EXT_CSD_GP_SIZE_MULT 143 /* R/W */ #define EXT_CSD_PARTITION_SETTING_COMPLETED 155 /* R/W */ #define EXT_CSD_PARTITION_ATTRIBUTE 156 /* R/W */ #define EXT_CSD_PARTITION_SUPPORT 160 /* RO */ #define EXT_CSD_HPI_MGMT 161 /* R/W */ #define EXT_CSD_RST_N_FUNCTION 162 /* R/W */ #define EXT_CSD_BKOPS_EN 163 /* R/W */ #define EXT_CSD_BKOPS_START 164 /* W */ #define EXT_CSD_SANITIZE_START 165 /* W */ #define EXT_CSD_WR_REL_PARAM 166 /* RO */ #define EXT_CSD_RPMB_MULT 168 /* RO */ #define EXT_CSD_FW_CONFIG 169 /* R/W */ #define EXT_CSD_BOOT_WP 173 /* R/W */ #define EXT_CSD_ERASE_GROUP_DEF 175 /* R/W */ #define EXT_CSD_PART_CONFIG 179 /* R/W */ #define EXT_CSD_ERASED_MEM_CONT 181 /* RO */ #define EXT_CSD_BUS_WIDTH 183 /* R/W */ #define EXT_CSD_STROBE_SUPPORT 184 /* RO */ #define EXT_CSD_HS_TIMING 185 /* R/W */ #define EXT_CSD_POWER_CLASS 187 /* R/W */ #define EXT_CSD_REV 192 /* RO */ #define EXT_CSD_STRUCTURE 194 /* RO */ #define EXT_CSD_CARD_TYPE 196 /* RO */ #define EXT_CSD_DRIVER_STRENGTH 197 /* RO */ #define EXT_CSD_OUT_OF_INTERRUPT_TIME 198 /* RO */ #define EXT_CSD_PART_SWITCH_TIME 199 /* RO */ #define EXT_CSD_PWR_CL_52_195 200 /* RO */ #define EXT_CSD_PWR_CL_26_195 201 /* RO */ #define EXT_CSD_PWR_CL_52_360 202 /* RO */ #define EXT_CSD_PWR_CL_26_360 203 /* RO */ #define EXT_CSD_SEC_CNT 212 /* RO, 4 bytes */ #define EXT_CSD_S_A_TIMEOUT 217 /* RO */ #define EXT_CSD_REL_WR_SEC_C 222 /* RO */ #define EXT_CSD_HC_WP_GRP_SIZE 221 /* RO */ #define EXT_CSD_ERASE_TIMEOUT_MULT 223 /* RO */ #define EXT_CSD_HC_ERASE_GRP_SIZE 224 /* RO */ #define EXT_CSD_BOOT_MULT 226 /* RO */ #define EXT_CSD_SEC_TRIM_MULT 229 /* RO */ #define EXT_CSD_SEC_ERASE_MULT 230 /* RO */ #define EXT_CSD_SEC_FEATURE_SUPPORT 231 /* RO */ #define EXT_CSD_TRIM_MULT 232 /* RO */ #define EXT_CSD_PWR_CL_200_195 236 /* RO */ #define EXT_CSD_PWR_CL_200_360 237 /* RO */ #define EXT_CSD_PWR_CL_DDR_52_195 238 /* RO */ #define EXT_CSD_PWR_CL_DDR_52_360 239 /* RO */ #define EXT_CSD_BKOPS_STATUS 246 /* RO */ #define EXT_CSD_POWER_OFF_LONG_TIME 247 /* RO */ #define EXT_CSD_GENERIC_CMD6_TIME 248 /* RO */ #define EXT_CSD_CACHE_SIZE 249 /* RO, 4 bytes */ #define EXT_CSD_PWR_CL_DDR_200_360 253 /* RO */ #define EXT_CSD_FIRMWARE_VERSION 254 /* RO, 8 bytes */ #define EXT_CSD_PRE_EOL_INFO 267 /* RO */ #define EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_A 268 /* RO */ #define EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_B 269 /* RO */ #define EXT_CSD_CMDQ_DEPTH 307 /* RO */ #define EXT_CSD_CMDQ_SUPPORT 308 /* RO */ #define EXT_CSD_SUPPORTED_MODE 493 /* RO */ #define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */ #define EXT_CSD_DATA_TAG_SUPPORT 499 /* RO */ #define EXT_CSD_MAX_PACKED_WRITES 500 /* RO */ #define EXT_CSD_MAX_PACKED_READS 501 /* RO */ #define EXT_CSD_BKOPS_SUPPORT 502 /* RO */ #define EXT_CSD_HPI_FEATURES 503 /* RO */ /* * EXT_CSD field definitions */ #define EXT_CSD_WR_REL_PARAM_EN (1<<2) #define EXT_CSD_BOOT_WP_B_PWR_WP_DIS (0x40) #define EXT_CSD_BOOT_WP_B_PERM_WP_DIS (0x10) #define EXT_CSD_BOOT_WP_B_PERM_WP_EN (0x04) #define EXT_CSD_BOOT_WP_B_PWR_WP_EN (0x01) #define EXT_CSD_PART_CONFIG_ACC_MASK (0x7) #define EXT_CSD_PART_CONFIG_ACC_BOOT0 (0x1) #define EXT_CSD_PART_CONFIG_ACC_RPMB (0x3) #define EXT_CSD_PART_CONFIG_ACC_GP0 (0x4) #define EXT_CSD_PART_SETTING_COMPLETED (0x1) #define EXT_CSD_PART_SUPPORT_PART_EN (0x1) #define EXT_CSD_CMD_SET_NORMAL (1<<0) #define EXT_CSD_CMD_SET_SECURE (1<<1) #define EXT_CSD_CMD_SET_CPSECURE (1<<2) #define EXT_CSD_CARD_TYPE_HS_26 (1<<0) /* Card can run at 26MHz */ #define EXT_CSD_CARD_TYPE_HS_52 (1<<1) /* Card can run at 52MHz */ #define EXT_CSD_CARD_TYPE_HS (EXT_CSD_CARD_TYPE_HS_26 | \ EXT_CSD_CARD_TYPE_HS_52) #define EXT_CSD_CARD_TYPE_DDR_1_8V (1<<2) /* Card can run at 52MHz */ /* DDR mode @1.8V or 3V I/O */ #define EXT_CSD_CARD_TYPE_DDR_1_2V (1<<3) /* Card can run at 52MHz */ /* DDR mode @1.2V I/O */ #define EXT_CSD_CARD_TYPE_DDR_52 (EXT_CSD_CARD_TYPE_DDR_1_8V \ | EXT_CSD_CARD_TYPE_DDR_1_2V) #define EXT_CSD_CARD_TYPE_HS200_1_8V (1<<4) /* Card can run at 200MHz */ #define EXT_CSD_CARD_TYPE_HS200_1_2V (1<<5) /* Card can run at 200MHz */ /* SDR mode @1.2V I/O */ #define EXT_CSD_CARD_TYPE_HS200 (EXT_CSD_CARD_TYPE_HS200_1_8V | \ EXT_CSD_CARD_TYPE_HS200_1_2V) #define EXT_CSD_CARD_TYPE_HS400_1_8V (1<<6) /* Card can run at 200MHz DDR, 1.8V */ #define EXT_CSD_CARD_TYPE_HS400_1_2V (1<<7) /* Card can run at 200MHz DDR, 1.2V */ #define EXT_CSD_CARD_TYPE_HS400 (EXT_CSD_CARD_TYPE_HS400_1_8V | \ EXT_CSD_CARD_TYPE_HS400_1_2V) #define EXT_CSD_CARD_TYPE_HS400ES (1<<8) /* Card can run at HS400ES */ #define EXT_CSD_BUS_WIDTH_1 0 /* Card is in 1 bit mode */ #define EXT_CSD_BUS_WIDTH_4 1 /* Card is in 4 bit mode */ #define EXT_CSD_BUS_WIDTH_8 2 /* Card is in 8 bit mode */ #define EXT_CSD_DDR_BUS_WIDTH_4 5 /* Card is in 4 bit DDR mode */ #define EXT_CSD_DDR_BUS_WIDTH_8 6 /* Card is in 8 bit DDR mode */ #define EXT_CSD_BUS_WIDTH_STROBE BIT(7) /* Enhanced strobe mode */ #define EXT_CSD_TIMING_BC 0 /* Backwards compatility */ #define EXT_CSD_TIMING_HS 1 /* High speed */ #define EXT_CSD_TIMING_HS200 2 /* HS200 */ #define EXT_CSD_TIMING_HS400 3 /* HS400 */ #define EXT_CSD_DRV_STR_SHIFT 4 /* Driver Strength shift */ #define EXT_CSD_SEC_ER_EN BIT(0) #define EXT_CSD_SEC_BD_BLK_EN BIT(2) #define EXT_CSD_SEC_GB_CL_EN BIT(4) #define EXT_CSD_SEC_SANITIZE BIT(6) /* v4.5 only */ #define EXT_CSD_RST_N_EN_MASK 0x3 #define EXT_CSD_RST_N_ENABLED 1 /* RST_n is enabled on card */ #define EXT_CSD_NO_POWER_NOTIFICATION 0 #define EXT_CSD_POWER_ON 1 #define EXT_CSD_POWER_OFF_SHORT 2 #define EXT_CSD_POWER_OFF_LONG 3 #define EXT_CSD_PWR_CL_8BIT_MASK 0xF0 /* 8 bit PWR CLS */ #define EXT_CSD_PWR_CL_4BIT_MASK 0x0F /* 8 bit PWR CLS */ #define EXT_CSD_PWR_CL_8BIT_SHIFT 4 #define EXT_CSD_PWR_CL_4BIT_SHIFT 0 #define EXT_CSD_PACKED_EVENT_EN BIT(3) /* * EXCEPTION_EVENT_STATUS field */ #define EXT_CSD_URGENT_BKOPS BIT(0) #define EXT_CSD_DYNCAP_NEEDED BIT(1) #define EXT_CSD_SYSPOOL_EXHAUSTED BIT(2) #define EXT_CSD_PACKED_FAILURE BIT(3) #define EXT_CSD_PACKED_GENERIC_ERROR BIT(0) #define EXT_CSD_PACKED_INDEXED_ERROR BIT(1) /* * BKOPS status level */ #define EXT_CSD_BKOPS_LEVEL_2 0x2 /* * BKOPS modes */ #define EXT_CSD_MANUAL_BKOPS_MASK 0x01 #define EXT_CSD_AUTO_BKOPS_MASK 0x02 /* * Command Queue */ #define EXT_CSD_CMDQ_MODE_ENABLED BIT(0) #define EXT_CSD_CMDQ_DEPTH_MASK GENMASK(4, 0) #define EXT_CSD_CMDQ_SUPPORTED BIT(0) /* * MMC_SWITCH access modes */ #define MMC_SWITCH_MODE_CMD_SET 0x00 /* Change the command set */ #define MMC_SWITCH_MODE_SET_BITS 0x01 /* Set bits which are 1 in value */ #define MMC_SWITCH_MODE_CLEAR_BITS 0x02 /* Clear bits which are 1 in value */ #define MMC_SWITCH_MODE_WRITE_BYTE 0x03 /* Set target to value */ /* * Erase/trim/discard */ #define MMC_ERASE_ARG 0x00000000 #define MMC_SECURE_ERASE_ARG 0x80000000 #define MMC_TRIM_ARG 0x00000001 #define MMC_DISCARD_ARG 0x00000003 #define MMC_SECURE_TRIM1_ARG 0x80000001 #define MMC_SECURE_TRIM2_ARG 0x80008000 #define MMC_SECURE_ARGS 0x80000000 #define MMC_TRIM_OR_DISCARD_ARGS 0x00008003 #define mmc_driver_type_mask(n) (1 << (n)) #endif /* LINUX_MMC_MMC_H */ mmc/sdhci-pci-data.h 0000644 00000000741 14722070374 0010253 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_MMC_SDHCI_PCI_DATA_H #define LINUX_MMC_SDHCI_PCI_DATA_H struct pci_dev; struct sdhci_pci_data { struct pci_dev *pdev; int slotno; int rst_n_gpio; /* Set to -EINVAL if unused */ int cd_gpio; /* Set to -EINVAL if unused */ int (*setup)(struct sdhci_pci_data *data); void (*cleanup)(struct sdhci_pci_data *data); }; extern struct sdhci_pci_data *(*sdhci_pci_get_data)(struct pci_dev *pdev, int slotno); #endif mmc/sd.h 0000644 00000005100 14722070374 0006101 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/mmc/sd.h * * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved. */ #ifndef LINUX_MMC_SD_H #define LINUX_MMC_SD_H /* SD commands type argument response */ /* class 0 */ /* This is basically the same command as for MMC with some quirks. */ #define SD_SEND_RELATIVE_ADDR 3 /* bcr R6 */ #define SD_SEND_IF_COND 8 /* bcr [11:0] See below R7 */ #define SD_SWITCH_VOLTAGE 11 /* ac R1 */ /* class 10 */ #define SD_SWITCH 6 /* adtc [31:0] See below R1 */ /* class 5 */ #define SD_ERASE_WR_BLK_START 32 /* ac [31:0] data addr R1 */ #define SD_ERASE_WR_BLK_END 33 /* ac [31:0] data addr R1 */ /* Application commands */ #define SD_APP_SET_BUS_WIDTH 6 /* ac [1:0] bus width R1 */ #define SD_APP_SD_STATUS 13 /* adtc R1 */ #define SD_APP_SEND_NUM_WR_BLKS 22 /* adtc R1 */ #define SD_APP_OP_COND 41 /* bcr [31:0] OCR R3 */ #define SD_APP_SEND_SCR 51 /* adtc R1 */ /* OCR bit definitions */ #define SD_OCR_S18R (1 << 24) /* 1.8V switching request */ #define SD_ROCR_S18A SD_OCR_S18R /* 1.8V switching accepted by card */ #define SD_OCR_XPC (1 << 28) /* SDXC power control */ #define SD_OCR_CCS (1 << 30) /* Card Capacity Status */ /* * SD_SWITCH argument format: * * [31] Check (0) or switch (1) * [30:24] Reserved (0) * [23:20] Function group 6 * [19:16] Function group 5 * [15:12] Function group 4 * [11:8] Function group 3 * [7:4] Function group 2 * [3:0] Function group 1 */ /* * SD_SEND_IF_COND argument format: * * [31:12] Reserved (0) * [11:8] Host Voltage Supply Flags * [7:0] Check Pattern (0xAA) */ /* * SCR field definitions */ #define SCR_SPEC_VER_0 0 /* Implements system specification 1.0 - 1.01 */ #define SCR_SPEC_VER_1 1 /* Implements system specification 1.10 */ #define SCR_SPEC_VER_2 2 /* Implements system specification 2.00-3.0X */ /* * SD bus widths */ #define SD_BUS_WIDTH_1 0 #define SD_BUS_WIDTH_4 2 /* * SD_SWITCH mode */ #define SD_SWITCH_CHECK 0 #define SD_SWITCH_SET 1 /* * SD_SWITCH function groups */ #define SD_SWITCH_GRP_ACCESS 0 /* * SD_SWITCH access modes */ #define SD_SWITCH_ACCESS_DEF 0 #define SD_SWITCH_ACCESS_HS 1 /* * Erase/discard */ #define SD_ERASE_ARG 0x00000000 #define SD_DISCARD_ARG 0x00000001 #endif /* LINUX_MMC_SD_H */ mmc/host.h 0000644 00000050427 14722070374 0006464 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/include/linux/mmc/host.h * * Host driver specific definitions. */ #ifndef LINUX_MMC_HOST_H #define LINUX_MMC_HOST_H #include <linux/sched.h> #include <linux/device.h> #include <linux/fault-inject.h> #include <linux/mmc/core.h> #include <linux/mmc/card.h> #include <linux/mmc/pm.h> #include <linux/dma-direction.h> struct mmc_ios { unsigned int clock; /* clock rate */ unsigned short vdd; unsigned int power_delay_ms; /* waiting for stable power */ /* vdd stores the bit number of the selected voltage range from below. */ unsigned char bus_mode; /* command output mode */ #define MMC_BUSMODE_OPENDRAIN 1 #define MMC_BUSMODE_PUSHPULL 2 unsigned char chip_select; /* SPI chip select */ #define MMC_CS_DONTCARE 0 #define MMC_CS_HIGH 1 #define MMC_CS_LOW 2 unsigned char power_mode; /* power supply mode */ #define MMC_POWER_OFF 0 #define MMC_POWER_UP 1 #define MMC_POWER_ON 2 #define MMC_POWER_UNDEFINED 3 unsigned char bus_width; /* data bus width */ #define MMC_BUS_WIDTH_1 0 #define MMC_BUS_WIDTH_4 2 #define MMC_BUS_WIDTH_8 3 unsigned char timing; /* timing specification used */ #define MMC_TIMING_LEGACY 0 #define MMC_TIMING_MMC_HS 1 #define MMC_TIMING_SD_HS 2 #define MMC_TIMING_UHS_SDR12 3 #define MMC_TIMING_UHS_SDR25 4 #define MMC_TIMING_UHS_SDR50 5 #define MMC_TIMING_UHS_SDR104 6 #define MMC_TIMING_UHS_DDR50 7 #define MMC_TIMING_MMC_DDR52 8 #define MMC_TIMING_MMC_HS200 9 #define MMC_TIMING_MMC_HS400 10 unsigned char signal_voltage; /* signalling voltage (1.8V or 3.3V) */ #define MMC_SIGNAL_VOLTAGE_330 0 #define MMC_SIGNAL_VOLTAGE_180 1 #define MMC_SIGNAL_VOLTAGE_120 2 unsigned char drv_type; /* driver type (A, B, C, D) */ #define MMC_SET_DRIVER_TYPE_B 0 #define MMC_SET_DRIVER_TYPE_A 1 #define MMC_SET_DRIVER_TYPE_C 2 #define MMC_SET_DRIVER_TYPE_D 3 bool enhanced_strobe; /* hs400es selection */ }; struct mmc_host; struct mmc_host_ops { /* * It is optional for the host to implement pre_req and post_req in * order to support double buffering of requests (prepare one * request while another request is active). * pre_req() must always be followed by a post_req(). * To undo a call made to pre_req(), call post_req() with * a nonzero err condition. */ void (*post_req)(struct mmc_host *host, struct mmc_request *req, int err); void (*pre_req)(struct mmc_host *host, struct mmc_request *req); void (*request)(struct mmc_host *host, struct mmc_request *req); /* * Avoid calling the next three functions too often or in a "fast * path", since underlaying controller might implement them in an * expensive and/or slow way. Also note that these functions might * sleep, so don't call them in the atomic contexts! */ /* * Notes to the set_ios callback: * ios->clock might be 0. For some controllers, setting 0Hz * as any other frequency works. However, some controllers * explicitly need to disable the clock. Otherwise e.g. voltage * switching might fail because the SDCLK is not really quiet. */ void (*set_ios)(struct mmc_host *host, struct mmc_ios *ios); /* * Return values for the get_ro callback should be: * 0 for a read/write card * 1 for a read-only card * -ENOSYS when not supported (equal to NULL callback) * or a negative errno value when something bad happened */ int (*get_ro)(struct mmc_host *host); /* * Return values for the get_cd callback should be: * 0 for a absent card * 1 for a present card * -ENOSYS when not supported (equal to NULL callback) * or a negative errno value when something bad happened */ int (*get_cd)(struct mmc_host *host); void (*enable_sdio_irq)(struct mmc_host *host, int enable); /* Mandatory callback when using MMC_CAP2_SDIO_IRQ_NOTHREAD. */ void (*ack_sdio_irq)(struct mmc_host *host); /* optional callback for HC quirks */ void (*init_card)(struct mmc_host *host, struct mmc_card *card); int (*start_signal_voltage_switch)(struct mmc_host *host, struct mmc_ios *ios); /* Check if the card is pulling dat[0:3] low */ int (*card_busy)(struct mmc_host *host); /* The tuning command opcode value is different for SD and eMMC cards */ int (*execute_tuning)(struct mmc_host *host, u32 opcode); /* Prepare HS400 target operating frequency depending host driver */ int (*prepare_hs400_tuning)(struct mmc_host *host, struct mmc_ios *ios); /* Prepare switch to DDR during the HS400 init sequence */ int (*hs400_prepare_ddr)(struct mmc_host *host); /* Prepare for switching from HS400 to HS200 */ void (*hs400_downgrade)(struct mmc_host *host); /* Complete selection of HS400 */ void (*hs400_complete)(struct mmc_host *host); /* Prepare enhanced strobe depending host driver */ void (*hs400_enhanced_strobe)(struct mmc_host *host, struct mmc_ios *ios); int (*select_drive_strength)(struct mmc_card *card, unsigned int max_dtr, int host_drv, int card_drv, int *drv_type); void (*hw_reset)(struct mmc_host *host); void (*card_event)(struct mmc_host *host); /* * Optional callback to support controllers with HW issues for multiple * I/O. Returns the number of supported blocks for the request. */ int (*multi_io_quirk)(struct mmc_card *card, unsigned int direction, int blk_size); }; struct mmc_cqe_ops { /* Allocate resources, and make the CQE operational */ int (*cqe_enable)(struct mmc_host *host, struct mmc_card *card); /* Free resources, and make the CQE non-operational */ void (*cqe_disable)(struct mmc_host *host); /* * Issue a read, write or DCMD request to the CQE. Also deal with the * effect of ->cqe_off(). */ int (*cqe_request)(struct mmc_host *host, struct mmc_request *mrq); /* Free resources (e.g. DMA mapping) associated with the request */ void (*cqe_post_req)(struct mmc_host *host, struct mmc_request *mrq); /* * Prepare the CQE and host controller to accept non-CQ commands. There * is no corresponding ->cqe_on(), instead ->cqe_request() is required * to deal with that. */ void (*cqe_off)(struct mmc_host *host); /* * Wait for all CQE tasks to complete. Return an error if recovery * becomes necessary. */ int (*cqe_wait_for_idle)(struct mmc_host *host); /* * Notify CQE that a request has timed out. Return false if the request * completed or true if a timeout happened in which case indicate if * recovery is needed. */ bool (*cqe_timeout)(struct mmc_host *host, struct mmc_request *mrq, bool *recovery_needed); /* * Stop all CQE activity and prepare the CQE and host controller to * accept recovery commands. */ void (*cqe_recovery_start)(struct mmc_host *host); /* * Clear the queue and call mmc_cqe_request_done() on all requests. * Requests that errored will have the error set on the mmc_request * (data->error or cmd->error for DCMD). Requests that did not error * will have zero data bytes transferred. */ void (*cqe_recovery_finish)(struct mmc_host *host); }; struct mmc_async_req { /* active mmc request */ struct mmc_request *mrq; /* * Check error status of completed mmc request. * Returns 0 if success otherwise non zero. */ enum mmc_blk_status (*err_check)(struct mmc_card *, struct mmc_async_req *); }; /** * struct mmc_slot - MMC slot functions * * @cd_irq: MMC/SD-card slot hotplug detection IRQ or -EINVAL * @handler_priv: MMC/SD-card slot context * * Some MMC/SD host controllers implement slot-functions like card and * write-protect detection natively. However, a large number of controllers * leave these functions to the CPU. This struct provides a hook to attach * such slot-function drivers. */ struct mmc_slot { int cd_irq; bool cd_wake_enabled; void *handler_priv; }; /** * mmc_context_info - synchronization details for mmc context * @is_done_rcv wake up reason was done request * @is_new_req wake up reason was new request * @is_waiting_last_req mmc context waiting for single running request * @wait wait queue */ struct mmc_context_info { bool is_done_rcv; bool is_new_req; bool is_waiting_last_req; wait_queue_head_t wait; }; struct regulator; struct mmc_pwrseq; struct mmc_supply { struct regulator *vmmc; /* Card power supply */ struct regulator *vqmmc; /* Optional Vccq supply */ }; struct mmc_ctx { struct task_struct *task; }; struct mmc_host { struct device *parent; struct device class_dev; int index; const struct mmc_host_ops *ops; struct mmc_pwrseq *pwrseq; unsigned int f_min; unsigned int f_max; unsigned int f_init; u32 ocr_avail; u32 ocr_avail_sdio; /* SDIO-specific OCR */ u32 ocr_avail_sd; /* SD-specific OCR */ u32 ocr_avail_mmc; /* MMC-specific OCR */ u32 max_current_330; u32 max_current_300; u32 max_current_180; #define MMC_VDD_165_195 0x00000080 /* VDD voltage 1.65 - 1.95 */ #define MMC_VDD_20_21 0x00000100 /* VDD voltage 2.0 ~ 2.1 */ #define MMC_VDD_21_22 0x00000200 /* VDD voltage 2.1 ~ 2.2 */ #define MMC_VDD_22_23 0x00000400 /* VDD voltage 2.2 ~ 2.3 */ #define MMC_VDD_23_24 0x00000800 /* VDD voltage 2.3 ~ 2.4 */ #define MMC_VDD_24_25 0x00001000 /* VDD voltage 2.4 ~ 2.5 */ #define MMC_VDD_25_26 0x00002000 /* VDD voltage 2.5 ~ 2.6 */ #define MMC_VDD_26_27 0x00004000 /* VDD voltage 2.6 ~ 2.7 */ #define MMC_VDD_27_28 0x00008000 /* VDD voltage 2.7 ~ 2.8 */ #define MMC_VDD_28_29 0x00010000 /* VDD voltage 2.8 ~ 2.9 */ #define MMC_VDD_29_30 0x00020000 /* VDD voltage 2.9 ~ 3.0 */ #define MMC_VDD_30_31 0x00040000 /* VDD voltage 3.0 ~ 3.1 */ #define MMC_VDD_31_32 0x00080000 /* VDD voltage 3.1 ~ 3.2 */ #define MMC_VDD_32_33 0x00100000 /* VDD voltage 3.2 ~ 3.3 */ #define MMC_VDD_33_34 0x00200000 /* VDD voltage 3.3 ~ 3.4 */ #define MMC_VDD_34_35 0x00400000 /* VDD voltage 3.4 ~ 3.5 */ #define MMC_VDD_35_36 0x00800000 /* VDD voltage 3.5 ~ 3.6 */ u32 caps; /* Host capabilities */ #define MMC_CAP_4_BIT_DATA (1 << 0) /* Can the host do 4 bit transfers */ #define MMC_CAP_MMC_HIGHSPEED (1 << 1) /* Can do MMC high-speed timing */ #define MMC_CAP_SD_HIGHSPEED (1 << 2) /* Can do SD high-speed timing */ #define MMC_CAP_SDIO_IRQ (1 << 3) /* Can signal pending SDIO IRQs */ #define MMC_CAP_SPI (1 << 4) /* Talks only SPI protocols */ #define MMC_CAP_NEEDS_POLL (1 << 5) /* Needs polling for card-detection */ #define MMC_CAP_8_BIT_DATA (1 << 6) /* Can the host do 8 bit transfers */ #define MMC_CAP_AGGRESSIVE_PM (1 << 7) /* Suspend (e)MMC/SD at idle */ #define MMC_CAP_NONREMOVABLE (1 << 8) /* Nonremovable e.g. eMMC */ #define MMC_CAP_WAIT_WHILE_BUSY (1 << 9) /* Waits while card is busy */ #define MMC_CAP_ERASE (1 << 10) /* Allow erase/trim commands */ #define MMC_CAP_3_3V_DDR (1 << 11) /* Host supports eMMC DDR 3.3V */ #define MMC_CAP_1_8V_DDR (1 << 12) /* Host supports eMMC DDR 1.8V */ #define MMC_CAP_1_2V_DDR (1 << 13) /* Host supports eMMC DDR 1.2V */ #define MMC_CAP_POWER_OFF_CARD (1 << 14) /* Can power off after boot */ #define MMC_CAP_BUS_WIDTH_TEST (1 << 15) /* CMD14/CMD19 bus width ok */ #define MMC_CAP_UHS_SDR12 (1 << 16) /* Host supports UHS SDR12 mode */ #define MMC_CAP_UHS_SDR25 (1 << 17) /* Host supports UHS SDR25 mode */ #define MMC_CAP_UHS_SDR50 (1 << 18) /* Host supports UHS SDR50 mode */ #define MMC_CAP_UHS_SDR104 (1 << 19) /* Host supports UHS SDR104 mode */ #define MMC_CAP_UHS_DDR50 (1 << 20) /* Host supports UHS DDR50 mode */ #define MMC_CAP_UHS (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | \ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | \ MMC_CAP_UHS_DDR50) #define MMC_CAP_SYNC_RUNTIME_PM (1 << 21) /* Synced runtime PM suspends. */ #define MMC_CAP_NEED_RSP_BUSY (1 << 22) /* Commands with R1B can't use R1. */ #define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */ #define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */ #define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */ #define MMC_CAP_DONE_COMPLETE (1 << 27) /* RW reqs can be completed within mmc_request_done() */ #define MMC_CAP_CD_WAKE (1 << 28) /* Enable card detect wake */ #define MMC_CAP_CMD_DURING_TFR (1 << 29) /* Commands during data transfer */ #define MMC_CAP_CMD23 (1 << 30) /* CMD23 supported. */ #define MMC_CAP_HW_RESET (1 << 31) /* Hardware reset */ u32 caps2; /* More host capabilities */ #define MMC_CAP2_BOOTPART_NOACC (1 << 0) /* Boot partition no access */ #define MMC_CAP2_FULL_PWR_CYCLE (1 << 2) /* Can do full power cycle */ #define MMC_CAP2_HS200_1_8V_SDR (1 << 5) /* can support */ #define MMC_CAP2_HS200_1_2V_SDR (1 << 6) /* can support */ #define MMC_CAP2_HS200 (MMC_CAP2_HS200_1_8V_SDR | \ MMC_CAP2_HS200_1_2V_SDR) #define MMC_CAP2_CD_ACTIVE_HIGH (1 << 10) /* Card-detect signal active high */ #define MMC_CAP2_RO_ACTIVE_HIGH (1 << 11) /* Write-protect signal active high */ #define MMC_CAP2_NO_PRESCAN_POWERUP (1 << 14) /* Don't power up before scan */ #define MMC_CAP2_HS400_1_8V (1 << 15) /* Can support HS400 1.8V */ #define MMC_CAP2_HS400_1_2V (1 << 16) /* Can support HS400 1.2V */ #define MMC_CAP2_HS400 (MMC_CAP2_HS400_1_8V | \ MMC_CAP2_HS400_1_2V) #define MMC_CAP2_HSX00_1_8V (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V) #define MMC_CAP2_HSX00_1_2V (MMC_CAP2_HS200_1_2V_SDR | MMC_CAP2_HS400_1_2V) #define MMC_CAP2_SDIO_IRQ_NOTHREAD (1 << 17) #define MMC_CAP2_NO_WRITE_PROTECT (1 << 18) /* No physical write protect pin, assume that card is always read-write */ #define MMC_CAP2_NO_SDIO (1 << 19) /* Do not send SDIO commands during initialization */ #define MMC_CAP2_HS400_ES (1 << 20) /* Host supports enhanced strobe */ #define MMC_CAP2_NO_SD (1 << 21) /* Do not send SD commands during initialization */ #define MMC_CAP2_NO_MMC (1 << 22) /* Do not send (e)MMC commands during initialization */ #define MMC_CAP2_CQE (1 << 23) /* Has eMMC command queue engine */ #define MMC_CAP2_CQE_DCMD (1 << 24) /* CQE can issue a direct command */ #define MMC_CAP2_AVOID_3_3V (1 << 25) /* Host must negotiate down from 3.3V */ #define MMC_CAP2_MERGE_CAPABLE (1 << 26) /* Host can merge a segment over the segment size */ int fixed_drv_type; /* fixed driver type for non-removable media */ mmc_pm_flag_t pm_caps; /* supported pm features */ /* host specific block data */ unsigned int max_seg_size; /* see blk_queue_max_segment_size */ unsigned short max_segs; /* see blk_queue_max_segments */ unsigned short unused; unsigned int max_req_size; /* maximum number of bytes in one req */ unsigned int max_blk_size; /* maximum size of one mmc block */ unsigned int max_blk_count; /* maximum number of blocks in one req */ unsigned int max_busy_timeout; /* max busy timeout in ms */ /* private data */ spinlock_t lock; /* lock for claim and bus ops */ struct mmc_ios ios; /* current io bus settings */ /* group bitfields together to minimize padding */ unsigned int use_spi_crc:1; unsigned int claimed:1; /* host exclusively claimed */ unsigned int bus_dead:1; /* bus has been released */ unsigned int can_retune:1; /* re-tuning can be used */ unsigned int doing_retune:1; /* re-tuning in progress */ unsigned int retune_now:1; /* do re-tuning at next req */ unsigned int retune_paused:1; /* re-tuning is temporarily disabled */ unsigned int use_blk_mq:1; /* use blk-mq */ unsigned int retune_crc_disable:1; /* don't trigger retune upon crc */ unsigned int can_dma_map_merge:1; /* merging can be used */ unsigned int vqmmc_enabled:1; /* vqmmc regulator is enabled */ int rescan_disable; /* disable card detection */ int rescan_entered; /* used with nonremovable devices */ int need_retune; /* re-tuning is needed */ int hold_retune; /* hold off re-tuning */ unsigned int retune_period; /* re-tuning period in secs */ struct timer_list retune_timer; /* for periodic re-tuning */ bool trigger_card_event; /* card_event necessary */ struct mmc_card *card; /* device attached to this host */ wait_queue_head_t wq; struct mmc_ctx *claimer; /* context that has host claimed */ int claim_cnt; /* "claim" nesting count */ struct mmc_ctx default_ctx; /* default context */ struct delayed_work detect; int detect_change; /* card detect flag */ struct mmc_slot slot; const struct mmc_bus_ops *bus_ops; /* current bus driver */ unsigned int bus_refs; /* reference counter */ unsigned int sdio_irqs; struct task_struct *sdio_irq_thread; struct delayed_work sdio_irq_work; bool sdio_irq_pending; atomic_t sdio_irq_thread_abort; mmc_pm_flag_t pm_flags; /* requested pm features */ struct led_trigger *led; /* activity led */ #ifdef CONFIG_REGULATOR bool regulator_enabled; /* regulator state */ #endif struct mmc_supply supply; struct dentry *debugfs_root; /* Ongoing data transfer that allows commands during transfer */ struct mmc_request *ongoing_mrq; #ifdef CONFIG_FAIL_MMC_REQUEST struct fault_attr fail_mmc_request; #endif unsigned int actual_clock; /* Actual HC clock rate */ unsigned int slotno; /* used for sdio acpi binding */ int dsr_req; /* DSR value is valid */ u32 dsr; /* optional driver stage (DSR) value */ /* Command Queue Engine (CQE) support */ const struct mmc_cqe_ops *cqe_ops; void *cqe_private; int cqe_qdepth; bool cqe_enabled; bool cqe_on; unsigned long private[0] ____cacheline_aligned; }; struct device_node; struct mmc_host *mmc_alloc_host(int extra, struct device *); int mmc_add_host(struct mmc_host *); void mmc_remove_host(struct mmc_host *); void mmc_free_host(struct mmc_host *); int mmc_of_parse(struct mmc_host *host); int mmc_of_parse_voltage(struct device_node *np, u32 *mask); static inline void *mmc_priv(struct mmc_host *host) { return (void *)host->private; } static inline struct mmc_host *mmc_from_priv(void *priv) { return container_of(priv, struct mmc_host, private); } #define mmc_host_is_spi(host) ((host)->caps & MMC_CAP_SPI) #define mmc_dev(x) ((x)->parent) #define mmc_classdev(x) (&(x)->class_dev) #define mmc_hostname(x) (dev_name(&(x)->class_dev)) void mmc_detect_change(struct mmc_host *, unsigned long delay); void mmc_request_done(struct mmc_host *, struct mmc_request *); void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq); void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq); /* * May be called from host driver's system/runtime suspend/resume callbacks, * to know if SDIO IRQs has been claimed. */ static inline bool sdio_irq_claimed(struct mmc_host *host) { return host->sdio_irqs > 0; } static inline void mmc_signal_sdio_irq(struct mmc_host *host) { host->ops->enable_sdio_irq(host, 0); host->sdio_irq_pending = true; if (host->sdio_irq_thread) wake_up_process(host->sdio_irq_thread); } void sdio_signal_irq(struct mmc_host *host); #ifdef CONFIG_REGULATOR int mmc_regulator_set_ocr(struct mmc_host *mmc, struct regulator *supply, unsigned short vdd_bit); int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios); #else static inline int mmc_regulator_set_ocr(struct mmc_host *mmc, struct regulator *supply, unsigned short vdd_bit) { return 0; } static inline int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios) { return -EINVAL; } #endif int mmc_regulator_get_supply(struct mmc_host *mmc); int mmc_regulator_enable_vqmmc(struct mmc_host *mmc); void mmc_regulator_disable_vqmmc(struct mmc_host *mmc); static inline int mmc_card_is_removable(struct mmc_host *host) { return !(host->caps & MMC_CAP_NONREMOVABLE); } static inline int mmc_card_keep_power(struct mmc_host *host) { return host->pm_flags & MMC_PM_KEEP_POWER; } static inline int mmc_card_wake_sdio_irq(struct mmc_host *host) { return host->pm_flags & MMC_PM_WAKE_SDIO_IRQ; } /* TODO: Move to private header */ static inline int mmc_card_hs(struct mmc_card *card) { return card->host->ios.timing == MMC_TIMING_SD_HS || card->host->ios.timing == MMC_TIMING_MMC_HS; } /* TODO: Move to private header */ static inline int mmc_card_uhs(struct mmc_card *card) { return card->host->ios.timing >= MMC_TIMING_UHS_SDR12 && card->host->ios.timing <= MMC_TIMING_UHS_DDR50; } void mmc_retune_timer_stop(struct mmc_host *host); static inline void mmc_retune_needed(struct mmc_host *host) { if (host->can_retune) host->need_retune = 1; } static inline bool mmc_can_retune(struct mmc_host *host) { return host->can_retune == 1; } static inline bool mmc_doing_retune(struct mmc_host *host) { return host->doing_retune == 1; } static inline enum dma_data_direction mmc_get_dma_dir(struct mmc_data *data) { return data->flags & MMC_DATA_WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE; } int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error); int mmc_abort_tuning(struct mmc_host *host, u32 opcode); #endif /* LINUX_MMC_HOST_H */ mmc/sh_mmcif.h 0000644 00000012717 14722070374 0007274 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * include/linux/mmc/sh_mmcif.h * * platform data for eMMC driver * * Copyright (C) 2010 Renesas Solutions Corp. */ #ifndef LINUX_MMC_SH_MMCIF_H #define LINUX_MMC_SH_MMCIF_H #include <linux/io.h> #include <linux/platform_device.h> /* * MMCIF : CE_CLK_CTRL [19:16] * 1000 : Peripheral clock / 512 * 0111 : Peripheral clock / 256 * 0110 : Peripheral clock / 128 * 0101 : Peripheral clock / 64 * 0100 : Peripheral clock / 32 * 0011 : Peripheral clock / 16 * 0010 : Peripheral clock / 8 * 0001 : Peripheral clock / 4 * 0000 : Peripheral clock / 2 * 1111 : Peripheral clock (sup_pclk set '1') */ struct sh_mmcif_plat_data { unsigned int slave_id_tx; /* embedded slave_id_[tr]x */ unsigned int slave_id_rx; u8 sup_pclk; /* 1 :SH7757, 0: SH7724/SH7372 */ unsigned long caps; u32 ocr; }; #define MMCIF_CE_CMD_SET 0x00000000 #define MMCIF_CE_ARG 0x00000008 #define MMCIF_CE_ARG_CMD12 0x0000000C #define MMCIF_CE_CMD_CTRL 0x00000010 #define MMCIF_CE_BLOCK_SET 0x00000014 #define MMCIF_CE_CLK_CTRL 0x00000018 #define MMCIF_CE_BUF_ACC 0x0000001C #define MMCIF_CE_RESP3 0x00000020 #define MMCIF_CE_RESP2 0x00000024 #define MMCIF_CE_RESP1 0x00000028 #define MMCIF_CE_RESP0 0x0000002C #define MMCIF_CE_RESP_CMD12 0x00000030 #define MMCIF_CE_DATA 0x00000034 #define MMCIF_CE_INT 0x00000040 #define MMCIF_CE_INT_MASK 0x00000044 #define MMCIF_CE_HOST_STS1 0x00000048 #define MMCIF_CE_HOST_STS2 0x0000004C #define MMCIF_CE_CLK_CTRL2 0x00000070 #define MMCIF_CE_VERSION 0x0000007C /* CE_BUF_ACC */ #define BUF_ACC_DMAWEN (1 << 25) #define BUF_ACC_DMAREN (1 << 24) #define BUF_ACC_BUSW_32 (0 << 17) #define BUF_ACC_BUSW_16 (1 << 17) #define BUF_ACC_ATYP (1 << 16) /* CE_CLK_CTRL */ #define CLK_ENABLE (1 << 24) /* 1: output mmc clock */ #define CLK_CLEAR (0xf << 16) #define CLK_SUP_PCLK (0xf << 16) #define CLKDIV_4 (1 << 16) /* mmc clock frequency. * n: bus clock/(2^(n+1)) */ #define CLKDIV_256 (7 << 16) /* mmc clock frequency. (see above) */ #define SRSPTO_256 (2 << 12) /* resp timeout */ #define SRBSYTO_29 (0xf << 8) /* resp busy timeout */ #define SRWDTO_29 (0xf << 4) /* read/write timeout */ #define SCCSTO_29 (0xf << 0) /* ccs timeout */ /* CE_VERSION */ #define SOFT_RST_ON (1 << 31) #define SOFT_RST_OFF 0 static inline u32 sh_mmcif_readl(void __iomem *addr, int reg) { return __raw_readl(addr + reg); } static inline void sh_mmcif_writel(void __iomem *addr, int reg, u32 val) { __raw_writel(val, addr + reg); } #define SH_MMCIF_BBS 512 /* boot block size */ static inline void sh_mmcif_boot_cmd_send(void __iomem *base, unsigned long cmd, unsigned long arg) { sh_mmcif_writel(base, MMCIF_CE_INT, 0); sh_mmcif_writel(base, MMCIF_CE_ARG, arg); sh_mmcif_writel(base, MMCIF_CE_CMD_SET, cmd); } static inline int sh_mmcif_boot_cmd_poll(void __iomem *base, unsigned long mask) { unsigned long tmp; int cnt; for (cnt = 0; cnt < 1000000; cnt++) { tmp = sh_mmcif_readl(base, MMCIF_CE_INT); if (tmp & mask) { sh_mmcif_writel(base, MMCIF_CE_INT, tmp & ~mask); return 0; } } return -1; } static inline int sh_mmcif_boot_cmd(void __iomem *base, unsigned long cmd, unsigned long arg) { sh_mmcif_boot_cmd_send(base, cmd, arg); return sh_mmcif_boot_cmd_poll(base, 0x00010000); } static inline int sh_mmcif_boot_do_read_single(void __iomem *base, unsigned int block_nr, unsigned long *buf) { int k; /* CMD13 - Status */ sh_mmcif_boot_cmd(base, 0x0d400000, 0x00010000); if (sh_mmcif_readl(base, MMCIF_CE_RESP0) != 0x0900) return -1; /* CMD17 - Read */ sh_mmcif_boot_cmd(base, 0x11480000, block_nr * SH_MMCIF_BBS); if (sh_mmcif_boot_cmd_poll(base, 0x00100000) < 0) return -1; for (k = 0; k < (SH_MMCIF_BBS / 4); k++) buf[k] = sh_mmcif_readl(base, MMCIF_CE_DATA); return 0; } static inline int sh_mmcif_boot_do_read(void __iomem *base, unsigned long first_block, unsigned long nr_blocks, void *buf) { unsigned long k; int ret = 0; /* In data transfer mode: Set clock to Bus clock/4 (about 20Mhz) */ sh_mmcif_writel(base, MMCIF_CE_CLK_CTRL, CLK_ENABLE | CLKDIV_4 | SRSPTO_256 | SRBSYTO_29 | SRWDTO_29 | SCCSTO_29); /* CMD9 - Get CSD */ sh_mmcif_boot_cmd(base, 0x09806000, 0x00010000); /* CMD7 - Select the card */ sh_mmcif_boot_cmd(base, 0x07400000, 0x00010000); /* CMD16 - Set the block size */ sh_mmcif_boot_cmd(base, 0x10400000, SH_MMCIF_BBS); for (k = 0; !ret && k < nr_blocks; k++) ret = sh_mmcif_boot_do_read_single(base, first_block + k, buf + (k * SH_MMCIF_BBS)); return ret; } static inline void sh_mmcif_boot_init(void __iomem *base) { /* reset */ sh_mmcif_writel(base, MMCIF_CE_VERSION, SOFT_RST_ON); sh_mmcif_writel(base, MMCIF_CE_VERSION, SOFT_RST_OFF); /* byte swap */ sh_mmcif_writel(base, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP); /* Set block size in MMCIF hardware */ sh_mmcif_writel(base, MMCIF_CE_BLOCK_SET, SH_MMCIF_BBS); /* Enable the clock, set it to Bus clock/256 (about 325Khz). */ sh_mmcif_writel(base, MMCIF_CE_CLK_CTRL, CLK_ENABLE | CLKDIV_256 | SRSPTO_256 | SRBSYTO_29 | SRWDTO_29 | SCCSTO_29); /* CMD0 */ sh_mmcif_boot_cmd(base, 0x00000040, 0); /* CMD1 - Get OCR */ do { sh_mmcif_boot_cmd(base, 0x01405040, 0x40300000); /* CMD1 */ } while ((sh_mmcif_readl(base, MMCIF_CE_RESP0) & 0x80000000) != 0x80000000); /* CMD2 - Get CID */ sh_mmcif_boot_cmd(base, 0x02806040, 0); /* CMD3 - Set card relative address */ sh_mmcif_boot_cmd(base, 0x03400040, 0x00010000); } #endif /* LINUX_MMC_SH_MMCIF_H */ mmc/core.h 0000644 00000013577 14722070374 0006444 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/include/linux/mmc/core.h */ #ifndef LINUX_MMC_CORE_H #define LINUX_MMC_CORE_H #include <linux/completion.h> #include <linux/types.h> struct mmc_data; struct mmc_request; enum mmc_blk_status { MMC_BLK_SUCCESS = 0, MMC_BLK_PARTIAL, MMC_BLK_CMD_ERR, MMC_BLK_RETRY, MMC_BLK_ABORT, MMC_BLK_DATA_ERR, MMC_BLK_ECC_ERR, MMC_BLK_NOMEDIUM, MMC_BLK_NEW_REQUEST, }; struct mmc_command { u32 opcode; u32 arg; #define MMC_CMD23_ARG_REL_WR (1 << 31) #define MMC_CMD23_ARG_PACKED ((0 << 31) | (1 << 30)) #define MMC_CMD23_ARG_TAG_REQ (1 << 29) u32 resp[4]; unsigned int flags; /* expected response type */ #define MMC_RSP_PRESENT (1 << 0) #define MMC_RSP_136 (1 << 1) /* 136 bit response */ #define MMC_RSP_CRC (1 << 2) /* expect valid crc */ #define MMC_RSP_BUSY (1 << 3) /* card may send busy */ #define MMC_RSP_OPCODE (1 << 4) /* response contains opcode */ #define MMC_CMD_MASK (3 << 5) /* non-SPI command type */ #define MMC_CMD_AC (0 << 5) #define MMC_CMD_ADTC (1 << 5) #define MMC_CMD_BC (2 << 5) #define MMC_CMD_BCR (3 << 5) #define MMC_RSP_SPI_S1 (1 << 7) /* one status byte */ #define MMC_RSP_SPI_S2 (1 << 8) /* second byte */ #define MMC_RSP_SPI_B4 (1 << 9) /* four data bytes */ #define MMC_RSP_SPI_BUSY (1 << 10) /* card may send busy */ /* * These are the native response types, and correspond to valid bit * patterns of the above flags. One additional valid pattern * is all zeros, which means we don't expect a response. */ #define MMC_RSP_NONE (0) #define MMC_RSP_R1 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE) #define MMC_RSP_R1B (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE|MMC_RSP_BUSY) #define MMC_RSP_R2 (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC) #define MMC_RSP_R3 (MMC_RSP_PRESENT) #define MMC_RSP_R4 (MMC_RSP_PRESENT) #define MMC_RSP_R5 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE) #define MMC_RSP_R6 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE) #define MMC_RSP_R7 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE) /* Can be used by core to poll after switch to MMC HS mode */ #define MMC_RSP_R1_NO_CRC (MMC_RSP_PRESENT|MMC_RSP_OPCODE) #define mmc_resp_type(cmd) ((cmd)->flags & (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC|MMC_RSP_BUSY|MMC_RSP_OPCODE)) /* * These are the SPI response types for MMC, SD, and SDIO cards. * Commands return R1, with maybe more info. Zero is an error type; * callers must always provide the appropriate MMC_RSP_SPI_Rx flags. */ #define MMC_RSP_SPI_R1 (MMC_RSP_SPI_S1) #define MMC_RSP_SPI_R1B (MMC_RSP_SPI_S1|MMC_RSP_SPI_BUSY) #define MMC_RSP_SPI_R2 (MMC_RSP_SPI_S1|MMC_RSP_SPI_S2) #define MMC_RSP_SPI_R3 (MMC_RSP_SPI_S1|MMC_RSP_SPI_B4) #define MMC_RSP_SPI_R4 (MMC_RSP_SPI_S1|MMC_RSP_SPI_B4) #define MMC_RSP_SPI_R5 (MMC_RSP_SPI_S1|MMC_RSP_SPI_S2) #define MMC_RSP_SPI_R7 (MMC_RSP_SPI_S1|MMC_RSP_SPI_B4) #define mmc_spi_resp_type(cmd) ((cmd)->flags & \ (MMC_RSP_SPI_S1|MMC_RSP_SPI_BUSY|MMC_RSP_SPI_S2|MMC_RSP_SPI_B4)) /* * These are the command types. */ #define mmc_cmd_type(cmd) ((cmd)->flags & MMC_CMD_MASK) unsigned int retries; /* max number of retries */ int error; /* command error */ /* * Standard errno values are used for errors, but some have specific * meaning in the MMC layer: * * ETIMEDOUT Card took too long to respond * EILSEQ Basic format problem with the received or sent data * (e.g. CRC check failed, incorrect opcode in response * or bad end bit) * EINVAL Request cannot be performed because of restrictions * in hardware and/or the driver * ENOMEDIUM Host can determine that the slot is empty and is * actively failing requests */ unsigned int busy_timeout; /* busy detect timeout in ms */ /* Set this flag only for blocking sanitize request */ bool sanitize_busy; struct mmc_data *data; /* data segment associated with cmd */ struct mmc_request *mrq; /* associated request */ }; struct mmc_data { unsigned int timeout_ns; /* data timeout (in ns, max 80ms) */ unsigned int timeout_clks; /* data timeout (in clocks) */ unsigned int blksz; /* data block size */ unsigned int blocks; /* number of blocks */ unsigned int blk_addr; /* block address */ int error; /* data error */ unsigned int flags; #define MMC_DATA_WRITE BIT(8) #define MMC_DATA_READ BIT(9) /* Extra flags used by CQE */ #define MMC_DATA_QBR BIT(10) /* CQE queue barrier*/ #define MMC_DATA_PRIO BIT(11) /* CQE high priority */ #define MMC_DATA_REL_WR BIT(12) /* Reliable write */ #define MMC_DATA_DAT_TAG BIT(13) /* Tag request */ #define MMC_DATA_FORCED_PRG BIT(14) /* Forced programming */ unsigned int bytes_xfered; struct mmc_command *stop; /* stop command */ struct mmc_request *mrq; /* associated request */ unsigned int sg_len; /* size of scatter list */ int sg_count; /* mapped sg entries */ struct scatterlist *sg; /* I/O scatter list */ s32 host_cookie; /* host private data */ }; struct mmc_host; struct mmc_request { struct mmc_command *sbc; /* SET_BLOCK_COUNT for multiblock */ struct mmc_command *cmd; struct mmc_data *data; struct mmc_command *stop; struct completion completion; struct completion cmd_completion; void (*done)(struct mmc_request *);/* completion function */ /* * Notify uppers layers (e.g. mmc block driver) that recovery is needed * due to an error associated with the mmc_request. Currently used only * by CQE. */ void (*recovery_notifier)(struct mmc_request *); struct mmc_host *host; /* Allow other commands during this ongoing data transfer or busy wait */ bool cap_cmd_during_tfr; int tag; }; struct mmc_card; void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq); int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries); int mmc_hw_reset(struct mmc_host *host); int mmc_sw_reset(struct mmc_host *host); void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card); #endif /* LINUX_MMC_CORE_H */ fsl_ifc.h 0000644 00000060747 14722070374 0006346 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* Freescale Integrated Flash Controller * * Copyright 2011 Freescale Semiconductor, Inc * * Author: Dipen Dudhat <dipen.dudhat@freescale.com> */ #ifndef __ASM_FSL_IFC_H #define __ASM_FSL_IFC_H #include <linux/compiler.h> #include <linux/types.h> #include <linux/io.h> #include <linux/of_platform.h> #include <linux/interrupt.h> /* * The actual number of banks implemented depends on the IFC version * - IFC version 1.0 implements 4 banks. * - IFC version 1.1 onward implements 8 banks. */ #define FSL_IFC_BANK_COUNT 8 #define FSL_IFC_VERSION_MASK 0x0F0F0000 #define FSL_IFC_VERSION_1_0_0 0x01000000 #define FSL_IFC_VERSION_1_1_0 0x01010000 #define FSL_IFC_VERSION_2_0_0 0x02000000 #define PGOFFSET_64K (64*1024) #define PGOFFSET_4K (4*1024) /* * CSPR - Chip Select Property Register */ #define CSPR_BA 0xFFFF0000 #define CSPR_BA_SHIFT 16 #define CSPR_PORT_SIZE 0x00000180 #define CSPR_PORT_SIZE_SHIFT 7 /* Port Size 8 bit */ #define CSPR_PORT_SIZE_8 0x00000080 /* Port Size 16 bit */ #define CSPR_PORT_SIZE_16 0x00000100 /* Port Size 32 bit */ #define CSPR_PORT_SIZE_32 0x00000180 /* Write Protect */ #define CSPR_WP 0x00000040 #define CSPR_WP_SHIFT 6 /* Machine Select */ #define CSPR_MSEL 0x00000006 #define CSPR_MSEL_SHIFT 1 /* NOR */ #define CSPR_MSEL_NOR 0x00000000 /* NAND */ #define CSPR_MSEL_NAND 0x00000002 /* GPCM */ #define CSPR_MSEL_GPCM 0x00000004 /* Bank Valid */ #define CSPR_V 0x00000001 #define CSPR_V_SHIFT 0 /* * Address Mask Register */ #define IFC_AMASK_MASK 0xFFFF0000 #define IFC_AMASK_SHIFT 16 #define IFC_AMASK(n) (IFC_AMASK_MASK << \ (__ilog2(n) - IFC_AMASK_SHIFT)) /* * Chip Select Option Register IFC_NAND Machine */ /* Enable ECC Encoder */ #define CSOR_NAND_ECC_ENC_EN 0x80000000 #define CSOR_NAND_ECC_MODE_MASK 0x30000000 /* 4 bit correction per 520 Byte sector */ #define CSOR_NAND_ECC_MODE_4 0x00000000 /* 8 bit correction per 528 Byte sector */ #define CSOR_NAND_ECC_MODE_8 0x10000000 /* Enable ECC Decoder */ #define CSOR_NAND_ECC_DEC_EN 0x04000000 /* Row Address Length */ #define CSOR_NAND_RAL_MASK 0x01800000 #define CSOR_NAND_RAL_SHIFT 20 #define CSOR_NAND_RAL_1 0x00000000 #define CSOR_NAND_RAL_2 0x00800000 #define CSOR_NAND_RAL_3 0x01000000 #define CSOR_NAND_RAL_4 0x01800000 /* Page Size 512b, 2k, 4k */ #define CSOR_NAND_PGS_MASK 0x00180000 #define CSOR_NAND_PGS_SHIFT 16 #define CSOR_NAND_PGS_512 0x00000000 #define CSOR_NAND_PGS_2K 0x00080000 #define CSOR_NAND_PGS_4K 0x00100000 #define CSOR_NAND_PGS_8K 0x00180000 /* Spare region Size */ #define CSOR_NAND_SPRZ_MASK 0x0000E000 #define CSOR_NAND_SPRZ_SHIFT 13 #define CSOR_NAND_SPRZ_16 0x00000000 #define CSOR_NAND_SPRZ_64 0x00002000 #define CSOR_NAND_SPRZ_128 0x00004000 #define CSOR_NAND_SPRZ_210 0x00006000 #define CSOR_NAND_SPRZ_218 0x00008000 #define CSOR_NAND_SPRZ_224 0x0000A000 #define CSOR_NAND_SPRZ_CSOR_EXT 0x0000C000 /* Pages Per Block */ #define CSOR_NAND_PB_MASK 0x00000700 #define CSOR_NAND_PB_SHIFT 8 #define CSOR_NAND_PB(n) ((__ilog2(n) - 5) << CSOR_NAND_PB_SHIFT) /* Time for Read Enable High to Output High Impedance */ #define CSOR_NAND_TRHZ_MASK 0x0000001C #define CSOR_NAND_TRHZ_SHIFT 2 #define CSOR_NAND_TRHZ_20 0x00000000 #define CSOR_NAND_TRHZ_40 0x00000004 #define CSOR_NAND_TRHZ_60 0x00000008 #define CSOR_NAND_TRHZ_80 0x0000000C #define CSOR_NAND_TRHZ_100 0x00000010 /* Buffer control disable */ #define CSOR_NAND_BCTLD 0x00000001 /* * Chip Select Option Register - NOR Flash Mode */ /* Enable Address shift Mode */ #define CSOR_NOR_ADM_SHFT_MODE_EN 0x80000000 /* Page Read Enable from NOR device */ #define CSOR_NOR_PGRD_EN 0x10000000 /* AVD Toggle Enable during Burst Program */ #define CSOR_NOR_AVD_TGL_PGM_EN 0x01000000 /* Address Data Multiplexing Shift */ #define CSOR_NOR_ADM_MASK 0x0003E000 #define CSOR_NOR_ADM_SHIFT_SHIFT 13 #define CSOR_NOR_ADM_SHIFT(n) ((n) << CSOR_NOR_ADM_SHIFT_SHIFT) /* Type of the NOR device hooked */ #define CSOR_NOR_NOR_MODE_AYSNC_NOR 0x00000000 #define CSOR_NOR_NOR_MODE_AVD_NOR 0x00000020 /* Time for Read Enable High to Output High Impedance */ #define CSOR_NOR_TRHZ_MASK 0x0000001C #define CSOR_NOR_TRHZ_SHIFT 2 #define CSOR_NOR_TRHZ_20 0x00000000 #define CSOR_NOR_TRHZ_40 0x00000004 #define CSOR_NOR_TRHZ_60 0x00000008 #define CSOR_NOR_TRHZ_80 0x0000000C #define CSOR_NOR_TRHZ_100 0x00000010 /* Buffer control disable */ #define CSOR_NOR_BCTLD 0x00000001 /* * Chip Select Option Register - GPCM Mode */ /* GPCM Mode - Normal */ #define CSOR_GPCM_GPMODE_NORMAL 0x00000000 /* GPCM Mode - GenericASIC */ #define CSOR_GPCM_GPMODE_ASIC 0x80000000 /* Parity Mode odd/even */ #define CSOR_GPCM_PARITY_EVEN 0x40000000 /* Parity Checking enable/disable */ #define CSOR_GPCM_PAR_EN 0x20000000 /* GPCM Timeout Count */ #define CSOR_GPCM_GPTO_MASK 0x0F000000 #define CSOR_GPCM_GPTO_SHIFT 24 #define CSOR_GPCM_GPTO(n) ((__ilog2(n) - 8) << CSOR_GPCM_GPTO_SHIFT) /* GPCM External Access Termination mode for read access */ #define CSOR_GPCM_RGETA_EXT 0x00080000 /* GPCM External Access Termination mode for write access */ #define CSOR_GPCM_WGETA_EXT 0x00040000 /* Address Data Multiplexing Shift */ #define CSOR_GPCM_ADM_MASK 0x0003E000 #define CSOR_GPCM_ADM_SHIFT_SHIFT 13 #define CSOR_GPCM_ADM_SHIFT(n) ((n) << CSOR_GPCM_ADM_SHIFT_SHIFT) /* Generic ASIC Parity error indication delay */ #define CSOR_GPCM_GAPERRD_MASK 0x00000180 #define CSOR_GPCM_GAPERRD_SHIFT 7 #define CSOR_GPCM_GAPERRD(n) (((n) - 1) << CSOR_GPCM_GAPERRD_SHIFT) /* Time for Read Enable High to Output High Impedance */ #define CSOR_GPCM_TRHZ_MASK 0x0000001C #define CSOR_GPCM_TRHZ_20 0x00000000 #define CSOR_GPCM_TRHZ_40 0x00000004 #define CSOR_GPCM_TRHZ_60 0x00000008 #define CSOR_GPCM_TRHZ_80 0x0000000C #define CSOR_GPCM_TRHZ_100 0x00000010 /* Buffer control disable */ #define CSOR_GPCM_BCTLD 0x00000001 /* * Ready Busy Status Register (RB_STAT) */ /* CSn is READY */ #define IFC_RB_STAT_READY_CS0 0x80000000 #define IFC_RB_STAT_READY_CS1 0x40000000 #define IFC_RB_STAT_READY_CS2 0x20000000 #define IFC_RB_STAT_READY_CS3 0x10000000 /* * General Control Register (GCR) */ #define IFC_GCR_MASK 0x8000F800 /* reset all IFC hardware */ #define IFC_GCR_SOFT_RST_ALL 0x80000000 /* Turnaroud Time of external buffer */ #define IFC_GCR_TBCTL_TRN_TIME 0x0000F800 #define IFC_GCR_TBCTL_TRN_TIME_SHIFT 11 /* * Common Event and Error Status Register (CM_EVTER_STAT) */ /* Chip select error */ #define IFC_CM_EVTER_STAT_CSER 0x80000000 /* * Common Event and Error Enable Register (CM_EVTER_EN) */ /* Chip select error checking enable */ #define IFC_CM_EVTER_EN_CSEREN 0x80000000 /* * Common Event and Error Interrupt Enable Register (CM_EVTER_INTR_EN) */ /* Chip select error interrupt enable */ #define IFC_CM_EVTER_INTR_EN_CSERIREN 0x80000000 /* * Common Transfer Error Attribute Register-0 (CM_ERATTR0) */ /* transaction type of error Read/Write */ #define IFC_CM_ERATTR0_ERTYP_READ 0x80000000 #define IFC_CM_ERATTR0_ERAID 0x0FF00000 #define IFC_CM_ERATTR0_ERAID_SHIFT 20 #define IFC_CM_ERATTR0_ESRCID 0x0000FF00 #define IFC_CM_ERATTR0_ESRCID_SHIFT 8 /* * Clock Control Register (CCR) */ #define IFC_CCR_MASK 0x0F0F8800 /* Clock division ratio */ #define IFC_CCR_CLK_DIV_MASK 0x0F000000 #define IFC_CCR_CLK_DIV_SHIFT 24 #define IFC_CCR_CLK_DIV(n) ((n-1) << IFC_CCR_CLK_DIV_SHIFT) /* IFC Clock Delay */ #define IFC_CCR_CLK_DLY_MASK 0x000F0000 #define IFC_CCR_CLK_DLY_SHIFT 16 #define IFC_CCR_CLK_DLY(n) ((n) << IFC_CCR_CLK_DLY_SHIFT) /* Invert IFC clock before sending out */ #define IFC_CCR_INV_CLK_EN 0x00008000 /* Fedback IFC Clock */ #define IFC_CCR_FB_IFC_CLK_SEL 0x00000800 /* * Clock Status Register (CSR) */ /* Clk is stable */ #define IFC_CSR_CLK_STAT_STABLE 0x80000000 /* * IFC_NAND Machine Specific Registers */ /* * NAND Configuration Register (NCFGR) */ /* Auto Boot Mode */ #define IFC_NAND_NCFGR_BOOT 0x80000000 /* SRAM Initialization */ #define IFC_NAND_NCFGR_SRAM_INIT_EN 0x20000000 /* Addressing Mode-ROW0+n/COL0 */ #define IFC_NAND_NCFGR_ADDR_MODE_RC0 0x00000000 /* Addressing Mode-ROW0+n/COL0+n */ #define IFC_NAND_NCFGR_ADDR_MODE_RC1 0x00400000 /* Number of loop iterations of FIR sequences for multi page operations */ #define IFC_NAND_NCFGR_NUM_LOOP_MASK 0x0000F000 #define IFC_NAND_NCFGR_NUM_LOOP_SHIFT 12 #define IFC_NAND_NCFGR_NUM_LOOP(n) ((n) << IFC_NAND_NCFGR_NUM_LOOP_SHIFT) /* Number of wait cycles */ #define IFC_NAND_NCFGR_NUM_WAIT_MASK 0x000000FF #define IFC_NAND_NCFGR_NUM_WAIT_SHIFT 0 /* * NAND Flash Command Registers (NAND_FCR0/NAND_FCR1) */ /* General purpose FCM flash command bytes CMD0-CMD7 */ #define IFC_NAND_FCR0_CMD0 0xFF000000 #define IFC_NAND_FCR0_CMD0_SHIFT 24 #define IFC_NAND_FCR0_CMD1 0x00FF0000 #define IFC_NAND_FCR0_CMD1_SHIFT 16 #define IFC_NAND_FCR0_CMD2 0x0000FF00 #define IFC_NAND_FCR0_CMD2_SHIFT 8 #define IFC_NAND_FCR0_CMD3 0x000000FF #define IFC_NAND_FCR0_CMD3_SHIFT 0 #define IFC_NAND_FCR1_CMD4 0xFF000000 #define IFC_NAND_FCR1_CMD4_SHIFT 24 #define IFC_NAND_FCR1_CMD5 0x00FF0000 #define IFC_NAND_FCR1_CMD5_SHIFT 16 #define IFC_NAND_FCR1_CMD6 0x0000FF00 #define IFC_NAND_FCR1_CMD6_SHIFT 8 #define IFC_NAND_FCR1_CMD7 0x000000FF #define IFC_NAND_FCR1_CMD7_SHIFT 0 /* * Flash ROW and COL Address Register (ROWn, COLn) */ /* Main/spare region locator */ #define IFC_NAND_COL_MS 0x80000000 /* Column Address */ #define IFC_NAND_COL_CA_MASK 0x00000FFF /* * NAND Flash Byte Count Register (NAND_BC) */ /* Byte Count for read/Write */ #define IFC_NAND_BC 0x000001FF /* * NAND Flash Instruction Registers (NAND_FIR0/NAND_FIR1/NAND_FIR2) */ /* NAND Machine specific opcodes OP0-OP14*/ #define IFC_NAND_FIR0_OP0 0xFC000000 #define IFC_NAND_FIR0_OP0_SHIFT 26 #define IFC_NAND_FIR0_OP1 0x03F00000 #define IFC_NAND_FIR0_OP1_SHIFT 20 #define IFC_NAND_FIR0_OP2 0x000FC000 #define IFC_NAND_FIR0_OP2_SHIFT 14 #define IFC_NAND_FIR0_OP3 0x00003F00 #define IFC_NAND_FIR0_OP3_SHIFT 8 #define IFC_NAND_FIR0_OP4 0x000000FC #define IFC_NAND_FIR0_OP4_SHIFT 2 #define IFC_NAND_FIR1_OP5 0xFC000000 #define IFC_NAND_FIR1_OP5_SHIFT 26 #define IFC_NAND_FIR1_OP6 0x03F00000 #define IFC_NAND_FIR1_OP6_SHIFT 20 #define IFC_NAND_FIR1_OP7 0x000FC000 #define IFC_NAND_FIR1_OP7_SHIFT 14 #define IFC_NAND_FIR1_OP8 0x00003F00 #define IFC_NAND_FIR1_OP8_SHIFT 8 #define IFC_NAND_FIR1_OP9 0x000000FC #define IFC_NAND_FIR1_OP9_SHIFT 2 #define IFC_NAND_FIR2_OP10 0xFC000000 #define IFC_NAND_FIR2_OP10_SHIFT 26 #define IFC_NAND_FIR2_OP11 0x03F00000 #define IFC_NAND_FIR2_OP11_SHIFT 20 #define IFC_NAND_FIR2_OP12 0x000FC000 #define IFC_NAND_FIR2_OP12_SHIFT 14 #define IFC_NAND_FIR2_OP13 0x00003F00 #define IFC_NAND_FIR2_OP13_SHIFT 8 #define IFC_NAND_FIR2_OP14 0x000000FC #define IFC_NAND_FIR2_OP14_SHIFT 2 /* * Instruction opcodes to be programmed * in FIR registers- 6bits */ enum ifc_nand_fir_opcodes { IFC_FIR_OP_NOP, IFC_FIR_OP_CA0, IFC_FIR_OP_CA1, IFC_FIR_OP_CA2, IFC_FIR_OP_CA3, IFC_FIR_OP_RA0, IFC_FIR_OP_RA1, IFC_FIR_OP_RA2, IFC_FIR_OP_RA3, IFC_FIR_OP_CMD0, IFC_FIR_OP_CMD1, IFC_FIR_OP_CMD2, IFC_FIR_OP_CMD3, IFC_FIR_OP_CMD4, IFC_FIR_OP_CMD5, IFC_FIR_OP_CMD6, IFC_FIR_OP_CMD7, IFC_FIR_OP_CW0, IFC_FIR_OP_CW1, IFC_FIR_OP_CW2, IFC_FIR_OP_CW3, IFC_FIR_OP_CW4, IFC_FIR_OP_CW5, IFC_FIR_OP_CW6, IFC_FIR_OP_CW7, IFC_FIR_OP_WBCD, IFC_FIR_OP_RBCD, IFC_FIR_OP_BTRD, IFC_FIR_OP_RDSTAT, IFC_FIR_OP_NWAIT, IFC_FIR_OP_WFR, IFC_FIR_OP_SBRD, IFC_FIR_OP_UA, IFC_FIR_OP_RB, }; /* * NAND Chip Select Register (NAND_CSEL) */ #define IFC_NAND_CSEL 0x0C000000 #define IFC_NAND_CSEL_SHIFT 26 #define IFC_NAND_CSEL_CS0 0x00000000 #define IFC_NAND_CSEL_CS1 0x04000000 #define IFC_NAND_CSEL_CS2 0x08000000 #define IFC_NAND_CSEL_CS3 0x0C000000 /* * NAND Operation Sequence Start (NANDSEQ_STRT) */ /* NAND Flash Operation Start */ #define IFC_NAND_SEQ_STRT_FIR_STRT 0x80000000 /* Automatic Erase */ #define IFC_NAND_SEQ_STRT_AUTO_ERS 0x00800000 /* Automatic Program */ #define IFC_NAND_SEQ_STRT_AUTO_PGM 0x00100000 /* Automatic Copyback */ #define IFC_NAND_SEQ_STRT_AUTO_CPB 0x00020000 /* Automatic Read Operation */ #define IFC_NAND_SEQ_STRT_AUTO_RD 0x00004000 /* Automatic Status Read */ #define IFC_NAND_SEQ_STRT_AUTO_STAT_RD 0x00000800 /* * NAND Event and Error Status Register (NAND_EVTER_STAT) */ /* Operation Complete */ #define IFC_NAND_EVTER_STAT_OPC 0x80000000 /* Flash Timeout Error */ #define IFC_NAND_EVTER_STAT_FTOER 0x08000000 /* Write Protect Error */ #define IFC_NAND_EVTER_STAT_WPER 0x04000000 /* ECC Error */ #define IFC_NAND_EVTER_STAT_ECCER 0x02000000 /* RCW Load Done */ #define IFC_NAND_EVTER_STAT_RCW_DN 0x00008000 /* Boot Loadr Done */ #define IFC_NAND_EVTER_STAT_BOOT_DN 0x00004000 /* Bad Block Indicator search select */ #define IFC_NAND_EVTER_STAT_BBI_SRCH_SE 0x00000800 /* * NAND Flash Page Read Completion Event Status Register * (PGRDCMPL_EVT_STAT) */ #define PGRDCMPL_EVT_STAT_MASK 0xFFFF0000 /* Small Page 0-15 Done */ #define PGRDCMPL_EVT_STAT_SECTION_SP(n) (1 << (31 - (n))) /* Large Page(2K) 0-3 Done */ #define PGRDCMPL_EVT_STAT_LP_2K(n) (0xF << (28 - (n)*4)) /* Large Page(4K) 0-1 Done */ #define PGRDCMPL_EVT_STAT_LP_4K(n) (0xFF << (24 - (n)*8)) /* * NAND Event and Error Enable Register (NAND_EVTER_EN) */ /* Operation complete event enable */ #define IFC_NAND_EVTER_EN_OPC_EN 0x80000000 /* Page read complete event enable */ #define IFC_NAND_EVTER_EN_PGRDCMPL_EN 0x20000000 /* Flash Timeout error enable */ #define IFC_NAND_EVTER_EN_FTOER_EN 0x08000000 /* Write Protect error enable */ #define IFC_NAND_EVTER_EN_WPER_EN 0x04000000 /* ECC error logging enable */ #define IFC_NAND_EVTER_EN_ECCER_EN 0x02000000 /* * NAND Event and Error Interrupt Enable Register (NAND_EVTER_INTR_EN) */ /* Enable interrupt for operation complete */ #define IFC_NAND_EVTER_INTR_OPCIR_EN 0x80000000 /* Enable interrupt for Page read complete */ #define IFC_NAND_EVTER_INTR_PGRDCMPLIR_EN 0x20000000 /* Enable interrupt for Flash timeout error */ #define IFC_NAND_EVTER_INTR_FTOERIR_EN 0x08000000 /* Enable interrupt for Write protect error */ #define IFC_NAND_EVTER_INTR_WPERIR_EN 0x04000000 /* Enable interrupt for ECC error*/ #define IFC_NAND_EVTER_INTR_ECCERIR_EN 0x02000000 /* * NAND Transfer Error Attribute Register-0 (NAND_ERATTR0) */ #define IFC_NAND_ERATTR0_MASK 0x0C080000 /* Error on CS0-3 for NAND */ #define IFC_NAND_ERATTR0_ERCS_CS0 0x00000000 #define IFC_NAND_ERATTR0_ERCS_CS1 0x04000000 #define IFC_NAND_ERATTR0_ERCS_CS2 0x08000000 #define IFC_NAND_ERATTR0_ERCS_CS3 0x0C000000 /* Transaction type of error Read/Write */ #define IFC_NAND_ERATTR0_ERTTYPE_READ 0x00080000 /* * NAND Flash Status Register (NAND_FSR) */ /* First byte of data read from read status op */ #define IFC_NAND_NFSR_RS0 0xFF000000 /* Second byte of data read from read status op */ #define IFC_NAND_NFSR_RS1 0x00FF0000 /* * ECC Error Status Registers (ECCSTAT0-ECCSTAT3) */ /* Number of ECC errors on sector n (n = 0-15) */ #define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR0_MASK 0x0F000000 #define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR0_SHIFT 24 #define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR1_MASK 0x000F0000 #define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR1_SHIFT 16 #define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR2_MASK 0x00000F00 #define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR2_SHIFT 8 #define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR3_MASK 0x0000000F #define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR3_SHIFT 0 #define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR4_MASK 0x0F000000 #define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR4_SHIFT 24 #define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR5_MASK 0x000F0000 #define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR5_SHIFT 16 #define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR6_MASK 0x00000F00 #define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR6_SHIFT 8 #define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR7_MASK 0x0000000F #define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR7_SHIFT 0 #define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR8_MASK 0x0F000000 #define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR8_SHIFT 24 #define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR9_MASK 0x000F0000 #define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR9_SHIFT 16 #define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR10_MASK 0x00000F00 #define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR10_SHIFT 8 #define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR11_MASK 0x0000000F #define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR11_SHIFT 0 #define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR12_MASK 0x0F000000 #define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR12_SHIFT 24 #define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR13_MASK 0x000F0000 #define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR13_SHIFT 16 #define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR14_MASK 0x00000F00 #define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR14_SHIFT 8 #define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR15_MASK 0x0000000F #define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR15_SHIFT 0 /* * NAND Control Register (NANDCR) */ #define IFC_NAND_NCR_FTOCNT_MASK 0x1E000000 #define IFC_NAND_NCR_FTOCNT_SHIFT 25 #define IFC_NAND_NCR_FTOCNT(n) ((_ilog2(n) - 8) << IFC_NAND_NCR_FTOCNT_SHIFT) /* * NAND_AUTOBOOT_TRGR */ /* Trigger RCW load */ #define IFC_NAND_AUTOBOOT_TRGR_RCW_LD 0x80000000 /* Trigget Auto Boot */ #define IFC_NAND_AUTOBOOT_TRGR_BOOT_LD 0x20000000 /* * NAND_MDR */ /* 1st read data byte when opcode SBRD */ #define IFC_NAND_MDR_RDATA0 0xFF000000 /* 2nd read data byte when opcode SBRD */ #define IFC_NAND_MDR_RDATA1 0x00FF0000 /* * NOR Machine Specific Registers */ /* * NOR Event and Error Status Register (NOR_EVTER_STAT) */ /* NOR Command Sequence Operation Complete */ #define IFC_NOR_EVTER_STAT_OPC_NOR 0x80000000 /* Write Protect Error */ #define IFC_NOR_EVTER_STAT_WPER 0x04000000 /* Command Sequence Timeout Error */ #define IFC_NOR_EVTER_STAT_STOER 0x01000000 /* * NOR Event and Error Enable Register (NOR_EVTER_EN) */ /* NOR Command Seq complete event enable */ #define IFC_NOR_EVTER_EN_OPCEN_NOR 0x80000000 /* Write Protect Error Checking Enable */ #define IFC_NOR_EVTER_EN_WPEREN 0x04000000 /* Timeout Error Enable */ #define IFC_NOR_EVTER_EN_STOEREN 0x01000000 /* * NOR Event and Error Interrupt Enable Register (NOR_EVTER_INTR_EN) */ /* Enable interrupt for OPC complete */ #define IFC_NOR_EVTER_INTR_OPCEN_NOR 0x80000000 /* Enable interrupt for write protect error */ #define IFC_NOR_EVTER_INTR_WPEREN 0x04000000 /* Enable interrupt for timeout error */ #define IFC_NOR_EVTER_INTR_STOEREN 0x01000000 /* * NOR Transfer Error Attribute Register-0 (NOR_ERATTR0) */ /* Source ID for error transaction */ #define IFC_NOR_ERATTR0_ERSRCID 0xFF000000 /* AXI ID for error transation */ #define IFC_NOR_ERATTR0_ERAID 0x000FF000 /* Chip select corresponds to NOR error */ #define IFC_NOR_ERATTR0_ERCS_CS0 0x00000000 #define IFC_NOR_ERATTR0_ERCS_CS1 0x00000010 #define IFC_NOR_ERATTR0_ERCS_CS2 0x00000020 #define IFC_NOR_ERATTR0_ERCS_CS3 0x00000030 /* Type of transaction read/write */ #define IFC_NOR_ERATTR0_ERTYPE_READ 0x00000001 /* * NOR Transfer Error Attribute Register-2 (NOR_ERATTR2) */ #define IFC_NOR_ERATTR2_ER_NUM_PHASE_EXP 0x000F0000 #define IFC_NOR_ERATTR2_ER_NUM_PHASE_PER 0x00000F00 /* * NOR Control Register (NORCR) */ #define IFC_NORCR_MASK 0x0F0F0000 /* No. of Address/Data Phase */ #define IFC_NORCR_NUM_PHASE_MASK 0x0F000000 #define IFC_NORCR_NUM_PHASE_SHIFT 24 #define IFC_NORCR_NUM_PHASE(n) ((n-1) << IFC_NORCR_NUM_PHASE_SHIFT) /* Sequence Timeout Count */ #define IFC_NORCR_STOCNT_MASK 0x000F0000 #define IFC_NORCR_STOCNT_SHIFT 16 #define IFC_NORCR_STOCNT(n) ((__ilog2(n) - 8) << IFC_NORCR_STOCNT_SHIFT) /* * GPCM Machine specific registers */ /* * GPCM Event and Error Status Register (GPCM_EVTER_STAT) */ /* Timeout error */ #define IFC_GPCM_EVTER_STAT_TOER 0x04000000 /* Parity error */ #define IFC_GPCM_EVTER_STAT_PER 0x01000000 /* * GPCM Event and Error Enable Register (GPCM_EVTER_EN) */ /* Timeout error enable */ #define IFC_GPCM_EVTER_EN_TOER_EN 0x04000000 /* Parity error enable */ #define IFC_GPCM_EVTER_EN_PER_EN 0x01000000 /* * GPCM Event and Error Interrupt Enable Register (GPCM_EVTER_INTR_EN) */ /* Enable Interrupt for timeout error */ #define IFC_GPCM_EEIER_TOERIR_EN 0x04000000 /* Enable Interrupt for Parity error */ #define IFC_GPCM_EEIER_PERIR_EN 0x01000000 /* * GPCM Transfer Error Attribute Register-0 (GPCM_ERATTR0) */ /* Source ID for error transaction */ #define IFC_GPCM_ERATTR0_ERSRCID 0xFF000000 /* AXI ID for error transaction */ #define IFC_GPCM_ERATTR0_ERAID 0x000FF000 /* Chip select corresponds to GPCM error */ #define IFC_GPCM_ERATTR0_ERCS_CS0 0x00000000 #define IFC_GPCM_ERATTR0_ERCS_CS1 0x00000040 #define IFC_GPCM_ERATTR0_ERCS_CS2 0x00000080 #define IFC_GPCM_ERATTR0_ERCS_CS3 0x000000C0 /* Type of transaction read/Write */ #define IFC_GPCM_ERATTR0_ERTYPE_READ 0x00000001 /* * GPCM Transfer Error Attribute Register-2 (GPCM_ERATTR2) */ /* On which beat of address/data parity error is observed */ #define IFC_GPCM_ERATTR2_PERR_BEAT 0x00000C00 /* Parity Error on byte */ #define IFC_GPCM_ERATTR2_PERR_BYTE 0x000000F0 /* Parity Error reported in addr or data phase */ #define IFC_GPCM_ERATTR2_PERR_DATA_PHASE 0x00000001 /* * GPCM Status Register (GPCM_STAT) */ #define IFC_GPCM_STAT_BSY 0x80000000 /* GPCM is busy */ /* * IFC Controller NAND Machine registers */ struct fsl_ifc_nand { __be32 ncfgr; u32 res1[0x4]; __be32 nand_fcr0; __be32 nand_fcr1; u32 res2[0x8]; __be32 row0; u32 res3; __be32 col0; u32 res4; __be32 row1; u32 res5; __be32 col1; u32 res6; __be32 row2; u32 res7; __be32 col2; u32 res8; __be32 row3; u32 res9; __be32 col3; u32 res10[0x24]; __be32 nand_fbcr; u32 res11; __be32 nand_fir0; __be32 nand_fir1; __be32 nand_fir2; u32 res12[0x10]; __be32 nand_csel; u32 res13; __be32 nandseq_strt; u32 res14; __be32 nand_evter_stat; u32 res15; __be32 pgrdcmpl_evt_stat; u32 res16[0x2]; __be32 nand_evter_en; u32 res17[0x2]; __be32 nand_evter_intr_en; __be32 nand_vol_addr_stat; u32 res18; __be32 nand_erattr0; __be32 nand_erattr1; u32 res19[0x10]; __be32 nand_fsr; u32 res20; __be32 nand_eccstat[8]; u32 res21[0x1c]; __be32 nanndcr; u32 res22[0x2]; __be32 nand_autoboot_trgr; u32 res23; __be32 nand_mdr; u32 res24[0x1C]; __be32 nand_dll_lowcfg0; __be32 nand_dll_lowcfg1; u32 res25; __be32 nand_dll_lowstat; u32 res26[0x3c]; }; /* * IFC controller NOR Machine registers */ struct fsl_ifc_nor { __be32 nor_evter_stat; u32 res1[0x2]; __be32 nor_evter_en; u32 res2[0x2]; __be32 nor_evter_intr_en; u32 res3[0x2]; __be32 nor_erattr0; __be32 nor_erattr1; __be32 nor_erattr2; u32 res4[0x4]; __be32 norcr; u32 res5[0xEF]; }; /* * IFC controller GPCM Machine registers */ struct fsl_ifc_gpcm { __be32 gpcm_evter_stat; u32 res1[0x2]; __be32 gpcm_evter_en; u32 res2[0x2]; __be32 gpcm_evter_intr_en; u32 res3[0x2]; __be32 gpcm_erattr0; __be32 gpcm_erattr1; __be32 gpcm_erattr2; __be32 gpcm_stat; }; /* * IFC Controller Registers */ struct fsl_ifc_global { __be32 ifc_rev; u32 res1[0x2]; struct { __be32 cspr_ext; __be32 cspr; u32 res2; } cspr_cs[FSL_IFC_BANK_COUNT]; u32 res3[0xd]; struct { __be32 amask; u32 res4[0x2]; } amask_cs[FSL_IFC_BANK_COUNT]; u32 res5[0xc]; struct { __be32 csor; __be32 csor_ext; u32 res6; } csor_cs[FSL_IFC_BANK_COUNT]; u32 res7[0xc]; struct { __be32 ftim[4]; u32 res8[0x8]; } ftim_cs[FSL_IFC_BANK_COUNT]; u32 res9[0x30]; __be32 rb_stat; __be32 rb_map; __be32 wb_map; __be32 ifc_gcr; u32 res10[0x2]; __be32 cm_evter_stat; u32 res11[0x2]; __be32 cm_evter_en; u32 res12[0x2]; __be32 cm_evter_intr_en; u32 res13[0x2]; __be32 cm_erattr0; __be32 cm_erattr1; u32 res14[0x2]; __be32 ifc_ccr; __be32 ifc_csr; __be32 ddr_ccr_low; }; struct fsl_ifc_runtime { struct fsl_ifc_nand ifc_nand; struct fsl_ifc_nor ifc_nor; struct fsl_ifc_gpcm ifc_gpcm; }; extern unsigned int convert_ifc_address(phys_addr_t addr_base); extern int fsl_ifc_find(phys_addr_t addr_base); /* overview of the fsl ifc controller */ struct fsl_ifc_ctrl { /* device info */ struct device *dev; struct fsl_ifc_global __iomem *gregs; struct fsl_ifc_runtime __iomem *rregs; int irq; int nand_irq; spinlock_t lock; void *nand; int version; int banks; u32 nand_stat; wait_queue_head_t nand_wait; bool little_endian; }; extern struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev; static inline u32 ifc_in32(void __iomem *addr) { u32 val; if (fsl_ifc_ctrl_dev->little_endian) val = ioread32(addr); else val = ioread32be(addr); return val; } static inline u16 ifc_in16(void __iomem *addr) { u16 val; if (fsl_ifc_ctrl_dev->little_endian) val = ioread16(addr); else val = ioread16be(addr); return val; } static inline u8 ifc_in8(void __iomem *addr) { return ioread8(addr); } static inline void ifc_out32(u32 val, void __iomem *addr) { if (fsl_ifc_ctrl_dev->little_endian) iowrite32(val, addr); else iowrite32be(val, addr); } static inline void ifc_out16(u16 val, void __iomem *addr) { if (fsl_ifc_ctrl_dev->little_endian) iowrite16(val, addr); else iowrite16be(val, addr); } static inline void ifc_out8(u8 val, void __iomem *addr) { iowrite8(val, addr); } #endif /* __ASM_FSL_IFC_H */ stringhash.h 0000644 00000005233 14722070374 0007100 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_STRINGHASH_H #define __LINUX_STRINGHASH_H #include <linux/compiler.h> /* For __pure */ #include <linux/types.h> /* For u32, u64 */ #include <linux/hash.h> /* * Routines for hashing strings of bytes to a 32-bit hash value. * * These hash functions are NOT GUARANTEED STABLE between kernel * versions, architectures, or even repeated boots of the same kernel. * (E.g. they may depend on boot-time hardware detection or be * deliberately randomized.) * * They are also not intended to be secure against collisions caused by * malicious inputs; much slower hash functions are required for that. * * They are optimized for pathname components, meaning short strings. * Even if a majority of files have longer names, the dynamic profile of * pathname components skews short due to short directory names. * (E.g. /usr/lib/libsesquipedalianism.so.3.141.) */ /* * Version 1: one byte at a time. Example of use: * * unsigned long hash = init_name_hash; * while (*p) * hash = partial_name_hash(tolower(*p++), hash); * hash = end_name_hash(hash); * * Although this is designed for bytes, fs/hfsplus/unicode.c * abuses it to hash 16-bit values. */ /* Hash courtesy of the R5 hash in reiserfs modulo sign bits */ #define init_name_hash(salt) (unsigned long)(salt) /* partial hash update function. Assume roughly 4 bits per character */ static inline unsigned long partial_name_hash(unsigned long c, unsigned long prevhash) { return (prevhash + (c << 4) + (c >> 4)) * 11; } /* * Finally: cut down the number of bits to a int value (and try to avoid * losing bits). This also has the property (wanted by the dcache) * that the msbits make a good hash table index. */ static inline unsigned int end_name_hash(unsigned long hash) { return hash_long(hash, 32); } /* * Version 2: One word (32 or 64 bits) at a time. * If CONFIG_DCACHE_WORD_ACCESS is defined (meaning <asm/word-at-a-time.h> * exists, which describes major Linux platforms like x86 and ARM), then * this computes a different hash function much faster. * * If not set, this falls back to a wrapper around the preceding. */ extern unsigned int __pure full_name_hash(const void *salt, const char *, unsigned int); /* * A hash_len is a u64 with the hash of a string in the low * half and the length in the high half. */ #define hashlen_hash(hashlen) ((u32)(hashlen)) #define hashlen_len(hashlen) ((u32)((hashlen) >> 32)) #define hashlen_create(hash, len) ((u64)(len)<<32 | (u32)(hash)) /* Return the "hash_len" (hash and length) of a null-terminated string */ extern u64 __pure hashlen_string(const void *salt, const char *name); #endif /* __LINUX_STRINGHASH_H */ inetdevice.h 0000644 00000021233 14722070374 0007043 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_INETDEVICE_H #define _LINUX_INETDEVICE_H #ifdef __KERNEL__ #include <linux/bitmap.h> #include <linux/if.h> #include <linux/ip.h> #include <linux/netdevice.h> #include <linux/rcupdate.h> #include <linux/timer.h> #include <linux/sysctl.h> #include <linux/rtnetlink.h> #include <linux/refcount.h> struct ipv4_devconf { void *sysctl; int data[IPV4_DEVCONF_MAX]; DECLARE_BITMAP(state, IPV4_DEVCONF_MAX); }; #define MC_HASH_SZ_LOG 9 struct in_device { struct net_device *dev; refcount_t refcnt; int dead; struct in_ifaddr __rcu *ifa_list;/* IP ifaddr chain */ struct ip_mc_list __rcu *mc_list; /* IP multicast filter chain */ struct ip_mc_list __rcu * __rcu *mc_hash; int mc_count; /* Number of installed mcasts */ spinlock_t mc_tomb_lock; struct ip_mc_list *mc_tomb; unsigned long mr_v1_seen; unsigned long mr_v2_seen; unsigned long mr_maxdelay; unsigned long mr_qi; /* Query Interval */ unsigned long mr_qri; /* Query Response Interval */ unsigned char mr_qrv; /* Query Robustness Variable */ unsigned char mr_gq_running; u32 mr_ifc_count; struct timer_list mr_gq_timer; /* general query timer */ struct timer_list mr_ifc_timer; /* interface change timer */ struct neigh_parms *arp_parms; struct ipv4_devconf cnf; struct rcu_head rcu_head; }; #define IPV4_DEVCONF(cnf, attr) ((cnf).data[IPV4_DEVCONF_ ## attr - 1]) #define IPV4_DEVCONF_ALL(net, attr) \ IPV4_DEVCONF((*(net)->ipv4.devconf_all), attr) static inline int ipv4_devconf_get(struct in_device *in_dev, int index) { index--; return in_dev->cnf.data[index]; } static inline void ipv4_devconf_set(struct in_device *in_dev, int index, int val) { index--; set_bit(index, in_dev->cnf.state); in_dev->cnf.data[index] = val; } static inline void ipv4_devconf_setall(struct in_device *in_dev) { bitmap_fill(in_dev->cnf.state, IPV4_DEVCONF_MAX); } #define IN_DEV_CONF_GET(in_dev, attr) \ ipv4_devconf_get((in_dev), IPV4_DEVCONF_ ## attr) #define IN_DEV_CONF_SET(in_dev, attr, val) \ ipv4_devconf_set((in_dev), IPV4_DEVCONF_ ## attr, (val)) #define IN_DEV_ANDCONF(in_dev, attr) \ (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr) && \ IN_DEV_CONF_GET((in_dev), attr)) #define IN_DEV_NET_ORCONF(in_dev, net, attr) \ (IPV4_DEVCONF_ALL(net, attr) || \ IN_DEV_CONF_GET((in_dev), attr)) #define IN_DEV_ORCONF(in_dev, attr) \ IN_DEV_NET_ORCONF(in_dev, dev_net(in_dev->dev), attr) #define IN_DEV_MAXCONF(in_dev, attr) \ (max(IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr), \ IN_DEV_CONF_GET((in_dev), attr))) #define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING) #define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING) #define IN_DEV_BFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), BC_FORWARDING) #define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER) #define IN_DEV_SRC_VMARK(in_dev) IN_DEV_ORCONF((in_dev), SRC_VMARK) #define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \ ACCEPT_SOURCE_ROUTE) #define IN_DEV_ACCEPT_LOCAL(in_dev) IN_DEV_ORCONF((in_dev), ACCEPT_LOCAL) #define IN_DEV_BOOTP_RELAY(in_dev) IN_DEV_ANDCONF((in_dev), BOOTP_RELAY) #define IN_DEV_LOG_MARTIANS(in_dev) IN_DEV_ORCONF((in_dev), LOG_MARTIANS) #define IN_DEV_PROXY_ARP(in_dev) IN_DEV_ORCONF((in_dev), PROXY_ARP) #define IN_DEV_PROXY_ARP_PVLAN(in_dev) IN_DEV_CONF_GET(in_dev, PROXY_ARP_PVLAN) #define IN_DEV_SHARED_MEDIA(in_dev) IN_DEV_ORCONF((in_dev), SHARED_MEDIA) #define IN_DEV_TX_REDIRECTS(in_dev) IN_DEV_ORCONF((in_dev), SEND_REDIRECTS) #define IN_DEV_SEC_REDIRECTS(in_dev) IN_DEV_ORCONF((in_dev), \ SECURE_REDIRECTS) #define IN_DEV_IDTAG(in_dev) IN_DEV_CONF_GET(in_dev, TAG) #define IN_DEV_MEDIUM_ID(in_dev) IN_DEV_CONF_GET(in_dev, MEDIUM_ID) #define IN_DEV_PROMOTE_SECONDARIES(in_dev) \ IN_DEV_ORCONF((in_dev), \ PROMOTE_SECONDARIES) #define IN_DEV_ROUTE_LOCALNET(in_dev) IN_DEV_ORCONF(in_dev, ROUTE_LOCALNET) #define IN_DEV_NET_ROUTE_LOCALNET(in_dev, net) \ IN_DEV_NET_ORCONF(in_dev, net, ROUTE_LOCALNET) #define IN_DEV_RX_REDIRECTS(in_dev) \ ((IN_DEV_FORWARD(in_dev) && \ IN_DEV_ANDCONF((in_dev), ACCEPT_REDIRECTS)) \ || (!IN_DEV_FORWARD(in_dev) && \ IN_DEV_ORCONF((in_dev), ACCEPT_REDIRECTS))) #define IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) \ IN_DEV_CONF_GET((in_dev), IGNORE_ROUTES_WITH_LINKDOWN) #define IN_DEV_ARPFILTER(in_dev) IN_DEV_ORCONF((in_dev), ARPFILTER) #define IN_DEV_ARP_ACCEPT(in_dev) IN_DEV_ORCONF((in_dev), ARP_ACCEPT) #define IN_DEV_ARP_ANNOUNCE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ANNOUNCE) #define IN_DEV_ARP_IGNORE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_IGNORE) #define IN_DEV_ARP_NOTIFY(in_dev) IN_DEV_MAXCONF((in_dev), ARP_NOTIFY) struct in_ifaddr { struct hlist_node hash; struct in_ifaddr __rcu *ifa_next; struct in_device *ifa_dev; struct rcu_head rcu_head; __be32 ifa_local; __be32 ifa_address; __be32 ifa_mask; __u32 ifa_rt_priority; __be32 ifa_broadcast; unsigned char ifa_scope; unsigned char ifa_prefixlen; __u32 ifa_flags; char ifa_label[IFNAMSIZ]; /* In seconds, relative to tstamp. Expiry is at tstamp + HZ * lft. */ __u32 ifa_valid_lft; __u32 ifa_preferred_lft; unsigned long ifa_cstamp; /* created timestamp */ unsigned long ifa_tstamp; /* updated timestamp */ }; struct in_validator_info { __be32 ivi_addr; struct in_device *ivi_dev; struct netlink_ext_ack *extack; }; int register_inetaddr_notifier(struct notifier_block *nb); int unregister_inetaddr_notifier(struct notifier_block *nb); int register_inetaddr_validator_notifier(struct notifier_block *nb); int unregister_inetaddr_validator_notifier(struct notifier_block *nb); void inet_netconf_notify_devconf(struct net *net, int event, int type, int ifindex, struct ipv4_devconf *devconf); struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref); static inline struct net_device *ip_dev_find(struct net *net, __be32 addr) { return __ip_dev_find(net, addr, true); } int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b); int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *); void devinet_init(void); struct in_device *inetdev_by_index(struct net *, int); __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope); __be32 inet_confirm_addr(struct net *net, struct in_device *in_dev, __be32 dst, __be32 local, int scope); struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix, __be32 mask); struct in_ifaddr *inet_lookup_ifaddr_rcu(struct net *net, __be32 addr); static inline bool inet_ifa_match(__be32 addr, const struct in_ifaddr *ifa) { return !((addr^ifa->ifa_address)&ifa->ifa_mask); } /* * Check if a mask is acceptable. */ static __inline__ bool bad_mask(__be32 mask, __be32 addr) { __u32 hmask; if (addr & (mask = ~mask)) return true; hmask = ntohl(mask); if (hmask & (hmask+1)) return true; return false; } #define in_dev_for_each_ifa_rtnl(ifa, in_dev) \ for (ifa = rtnl_dereference((in_dev)->ifa_list); ifa; \ ifa = rtnl_dereference(ifa->ifa_next)) #define in_dev_for_each_ifa_rcu(ifa, in_dev) \ for (ifa = rcu_dereference((in_dev)->ifa_list); ifa; \ ifa = rcu_dereference(ifa->ifa_next)) static inline struct in_device *__in_dev_get_rcu(const struct net_device *dev) { return rcu_dereference(dev->ip_ptr); } static inline struct in_device *in_dev_get(const struct net_device *dev) { struct in_device *in_dev; rcu_read_lock(); in_dev = __in_dev_get_rcu(dev); if (in_dev) refcount_inc(&in_dev->refcnt); rcu_read_unlock(); return in_dev; } static inline struct in_device *__in_dev_get_rtnl(const struct net_device *dev) { return rtnl_dereference(dev->ip_ptr); } /* called with rcu_read_lock or rtnl held */ static inline bool ip_ignore_linkdown(const struct net_device *dev) { struct in_device *in_dev; bool rc = false; in_dev = rcu_dereference_rtnl(dev->ip_ptr); if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev)) rc = true; return rc; } static inline struct neigh_parms *__in_dev_arp_parms_get_rcu(const struct net_device *dev) { struct in_device *in_dev = __in_dev_get_rcu(dev); return in_dev ? in_dev->arp_parms : NULL; } void in_dev_finish_destroy(struct in_device *idev); static inline void in_dev_put(struct in_device *idev) { if (refcount_dec_and_test(&idev->refcnt)) in_dev_finish_destroy(idev); } #define __in_dev_put(idev) refcount_dec(&(idev)->refcnt) #define in_dev_hold(idev) refcount_inc(&(idev)->refcnt) #endif /* __KERNEL__ */ static __inline__ __be32 inet_make_mask(int logmask) { if (logmask) return htonl(~((1U<<(32-logmask))-1)); return 0; } static __inline__ int inet_mask_len(__be32 mask) { __u32 hmask = ntohl(mask); if (!hmask) return 0; return 32 - ffz(~hmask); } #endif /* _LINUX_INETDEVICE_H */ bcm963xx_nvram.h 0000644 00000005734 14722070374 0007522 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_BCM963XX_NVRAM_H__ #define __LINUX_BCM963XX_NVRAM_H__ #include <linux/crc32.h> #include <linux/if_ether.h> #include <linux/sizes.h> #include <linux/types.h> /* * Broadcom BCM963xx SoC board nvram data structure. * * The nvram structure varies in size depending on the SoC board version. Use * the appropriate minimum BCM963XX_NVRAM_*_SIZE define for the information * you need instead of sizeof(struct bcm963xx_nvram) as this may change. */ #define BCM963XX_NVRAM_V4_SIZE 300 #define BCM963XX_NVRAM_V5_SIZE (1 * SZ_1K) #define BCM963XX_DEFAULT_PSI_SIZE 64 enum bcm963xx_nvram_nand_part { BCM963XX_NVRAM_NAND_PART_BOOT = 0, BCM963XX_NVRAM_NAND_PART_ROOTFS_1, BCM963XX_NVRAM_NAND_PART_ROOTFS_2, BCM963XX_NVRAM_NAND_PART_DATA, BCM963XX_NVRAM_NAND_PART_BBT, __BCM963XX_NVRAM_NAND_NR_PARTS }; struct bcm963xx_nvram { u32 version; char bootline[256]; char name[16]; u32 main_tp_number; u32 psi_size; u32 mac_addr_count; u8 mac_addr_base[ETH_ALEN]; u8 __reserved1[2]; u32 checksum_v4; u8 __reserved2[292]; u32 nand_part_offset[__BCM963XX_NVRAM_NAND_NR_PARTS]; u32 nand_part_size[__BCM963XX_NVRAM_NAND_NR_PARTS]; u8 __reserved3[388]; u32 checksum_v5; }; #define BCM963XX_NVRAM_NAND_PART_OFFSET(nvram, part) \ bcm963xx_nvram_nand_part_offset(nvram, BCM963XX_NVRAM_NAND_PART_ ##part) static inline u64 __pure bcm963xx_nvram_nand_part_offset( const struct bcm963xx_nvram *nvram, enum bcm963xx_nvram_nand_part part) { return nvram->nand_part_offset[part] * SZ_1K; } #define BCM963XX_NVRAM_NAND_PART_SIZE(nvram, part) \ bcm963xx_nvram_nand_part_size(nvram, BCM963XX_NVRAM_NAND_PART_ ##part) static inline u64 __pure bcm963xx_nvram_nand_part_size( const struct bcm963xx_nvram *nvram, enum bcm963xx_nvram_nand_part part) { return nvram->nand_part_size[part] * SZ_1K; } /* * bcm963xx_nvram_checksum - Verify nvram checksum * * @nvram: pointer to full size nvram data structure * @expected_out: optional pointer to store expected checksum value * @actual_out: optional pointer to store actual checksum value * * Return: 0 if the checksum is valid, otherwise -EINVAL */ static int __maybe_unused bcm963xx_nvram_checksum( const struct bcm963xx_nvram *nvram, u32 *expected_out, u32 *actual_out) { u32 expected, actual; size_t len; if (nvram->version <= 4) { expected = nvram->checksum_v4; len = BCM963XX_NVRAM_V4_SIZE - sizeof(u32); } else { expected = nvram->checksum_v5; len = BCM963XX_NVRAM_V5_SIZE - sizeof(u32); } /* * Calculate the CRC32 value for the nvram with a checksum value * of 0 without modifying or copying the nvram by combining: * - The CRC32 of the nvram without the checksum value * - The CRC32 of a zero checksum value (which is also 0) */ actual = crc32_le_combine( crc32_le(~0, (u8 *)nvram, len), 0, sizeof(u32)); if (expected_out) *expected_out = expected; if (actual_out) *actual_out = actual; return expected == actual ? 0 : -EINVAL; }; #endif /* __LINUX_BCM963XX_NVRAM_H__ */ rcuwait.h 0000644 00000002355 14722070374 0006406 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_RCUWAIT_H_ #define _LINUX_RCUWAIT_H_ #include <linux/rcupdate.h> /* * rcuwait provides a way of blocking and waking up a single * task in an rcu-safe manner. * * The only time @task is non-nil is when a user is blocked (or * checking if it needs to) on a condition, and reset as soon as we * know that the condition has succeeded and are awoken. */ struct rcuwait { struct task_struct __rcu *task; }; #define __RCUWAIT_INITIALIZER(name) \ { .task = NULL, } static inline void rcuwait_init(struct rcuwait *w) { w->task = NULL; } extern void rcuwait_wake_up(struct rcuwait *w); /* * The caller is responsible for locking around rcuwait_wait_event(), * such that writes to @task are properly serialized. */ #define rcuwait_wait_event(w, condition) \ ({ \ rcu_assign_pointer((w)->task, current); \ for (;;) { \ /* \ * Implicit barrier (A) pairs with (B) in \ * rcuwait_wake_up(). \ */ \ set_current_state(TASK_UNINTERRUPTIBLE); \ if (condition) \ break; \ \ schedule(); \ } \ \ WRITE_ONCE((w)->task, NULL); \ __set_current_state(TASK_RUNNING); \ }) #endif /* _LINUX_RCUWAIT_H_ */ types.h 0000644 00000012645 14722070374 0006077 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_TYPES_H #define _LINUX_TYPES_H #define __EXPORTED_HEADERS__ #include <uapi/linux/types.h> #ifndef __ASSEMBLY__ #define DECLARE_BITMAP(name,bits) \ unsigned long name[BITS_TO_LONGS(bits)] typedef u32 __kernel_dev_t; typedef __kernel_fd_set fd_set; typedef __kernel_dev_t dev_t; typedef __kernel_ino_t ino_t; typedef __kernel_mode_t mode_t; typedef unsigned short umode_t; typedef u32 nlink_t; typedef __kernel_off_t off_t; typedef __kernel_pid_t pid_t; typedef __kernel_daddr_t daddr_t; typedef __kernel_key_t key_t; typedef __kernel_suseconds_t suseconds_t; typedef __kernel_timer_t timer_t; typedef __kernel_clockid_t clockid_t; typedef __kernel_mqd_t mqd_t; typedef _Bool bool; typedef __kernel_uid32_t uid_t; typedef __kernel_gid32_t gid_t; typedef __kernel_uid16_t uid16_t; typedef __kernel_gid16_t gid16_t; typedef unsigned long uintptr_t; #ifdef CONFIG_HAVE_UID16 /* This is defined by include/asm-{arch}/posix_types.h */ typedef __kernel_old_uid_t old_uid_t; typedef __kernel_old_gid_t old_gid_t; #endif /* CONFIG_UID16 */ #if defined(__GNUC__) typedef __kernel_loff_t loff_t; #endif /* * The following typedefs are also protected by individual ifdefs for * historical reasons: */ #ifndef _SIZE_T #define _SIZE_T typedef __kernel_size_t size_t; #endif #ifndef _SSIZE_T #define _SSIZE_T typedef __kernel_ssize_t ssize_t; #endif #ifndef _PTRDIFF_T #define _PTRDIFF_T typedef __kernel_ptrdiff_t ptrdiff_t; #endif #ifndef _TIME_T #define _TIME_T typedef __kernel_time_t time_t; #endif #ifndef _CLOCK_T #define _CLOCK_T typedef __kernel_clock_t clock_t; #endif #ifndef _CADDR_T #define _CADDR_T typedef __kernel_caddr_t caddr_t; #endif /* bsd */ typedef unsigned char u_char; typedef unsigned short u_short; typedef unsigned int u_int; typedef unsigned long u_long; /* sysv */ typedef unsigned char unchar; typedef unsigned short ushort; typedef unsigned int uint; typedef unsigned long ulong; #ifndef __BIT_TYPES_DEFINED__ #define __BIT_TYPES_DEFINED__ typedef u8 u_int8_t; typedef s8 int8_t; typedef u16 u_int16_t; typedef s16 int16_t; typedef u32 u_int32_t; typedef s32 int32_t; #endif /* !(__BIT_TYPES_DEFINED__) */ typedef u8 uint8_t; typedef u16 uint16_t; typedef u32 uint32_t; #if defined(__GNUC__) typedef u64 uint64_t; typedef u64 u_int64_t; typedef s64 int64_t; #endif /* this is a special 64bit data type that is 8-byte aligned */ #define aligned_u64 __aligned_u64 #define aligned_be64 __aligned_be64 #define aligned_le64 __aligned_le64 /** * The type used for indexing onto a disc or disc partition. * * Linux always considers sectors to be 512 bytes long independently * of the devices real block size. * * blkcnt_t is the type of the inode's block count. */ typedef u64 sector_t; typedef u64 blkcnt_t; /* * The type of an index into the pagecache. */ #define pgoff_t unsigned long /* * A dma_addr_t can hold any valid DMA address, i.e., any address returned * by the DMA API. * * If the DMA API only uses 32-bit addresses, dma_addr_t need only be 32 * bits wide. Bus addresses, e.g., PCI BARs, may be wider than 32 bits, * but drivers do memory-mapped I/O to ioremapped kernel virtual addresses, * so they don't care about the size of the actual bus addresses. */ #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT typedef u64 dma_addr_t; #else typedef u32 dma_addr_t; #endif typedef unsigned int __bitwise gfp_t; typedef unsigned int __bitwise slab_flags_t; typedef unsigned int __bitwise fmode_t; #ifdef CONFIG_PHYS_ADDR_T_64BIT typedef u64 phys_addr_t; #else typedef u32 phys_addr_t; #endif typedef phys_addr_t resource_size_t; /* * This type is the placeholder for a hardware interrupt number. It has to be * big enough to enclose whatever representation is used by a given platform. */ typedef unsigned long irq_hw_number_t; typedef struct { int counter; } atomic_t; #ifdef CONFIG_64BIT typedef struct { s64 counter; } atomic64_t; #endif struct list_head { struct list_head *next, *prev; }; struct hlist_head { struct hlist_node *first; }; struct hlist_node { struct hlist_node *next, **pprev; }; struct ustat { __kernel_daddr_t f_tfree; __kernel_ino_t f_tinode; char f_fname[6]; char f_fpack[6]; }; /** * struct callback_head - callback structure for use with RCU and task_work * @next: next update requests in a list * @func: actual update function to call after the grace period. * * The struct is aligned to size of pointer. On most architectures it happens * naturally due ABI requirements, but some architectures (like CRIS) have * weird ABI and we need to ask it explicitly. * * The alignment is required to guarantee that bit 0 of @next will be * clear under normal conditions -- as long as we use call_rcu() or * call_srcu() to queue the callback. * * This guarantee is important for few reasons: * - future call_rcu_lazy() will make use of lower bits in the pointer; * - the structure shares storage space in struct page with @compound_head, * which encode PageTail() in bit 0. The guarantee is needed to avoid * false-positive PageTail(). */ struct callback_head { struct callback_head *next; void (*func)(struct callback_head *head); } __attribute__((aligned(sizeof(void *)))); #define rcu_head callback_head typedef void (*rcu_callback_t)(struct rcu_head *head); typedef void (*call_rcu_func_t)(struct rcu_head *head, rcu_callback_t func); #endif /* __ASSEMBLY__ */ #endif /* _LINUX_TYPES_H */ page_idle.h 0000644 00000005173 14722070374 0006642 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MM_PAGE_IDLE_H #define _LINUX_MM_PAGE_IDLE_H #include <linux/bitops.h> #include <linux/page-flags.h> #include <linux/page_ext.h> #ifdef CONFIG_IDLE_PAGE_TRACKING #ifdef CONFIG_64BIT static inline bool page_is_young(struct page *page) { return PageYoung(page); } static inline void set_page_young(struct page *page) { SetPageYoung(page); } static inline bool test_and_clear_page_young(struct page *page) { return TestClearPageYoung(page); } static inline bool page_is_idle(struct page *page) { return PageIdle(page); } static inline void set_page_idle(struct page *page) { SetPageIdle(page); } static inline void clear_page_idle(struct page *page) { ClearPageIdle(page); } #else /* !CONFIG_64BIT */ /* * If there is not enough space to store Idle and Young bits in page flags, use * page ext flags instead. */ extern struct page_ext_operations page_idle_ops; static inline bool page_is_young(struct page *page) { struct page_ext *page_ext = lookup_page_ext(page); if (unlikely(!page_ext)) return false; return test_bit(PAGE_EXT_YOUNG, &page_ext->flags); } static inline void set_page_young(struct page *page) { struct page_ext *page_ext = lookup_page_ext(page); if (unlikely(!page_ext)) return; set_bit(PAGE_EXT_YOUNG, &page_ext->flags); } static inline bool test_and_clear_page_young(struct page *page) { struct page_ext *page_ext = lookup_page_ext(page); if (unlikely(!page_ext)) return false; return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags); } static inline bool page_is_idle(struct page *page) { struct page_ext *page_ext = lookup_page_ext(page); if (unlikely(!page_ext)) return false; return test_bit(PAGE_EXT_IDLE, &page_ext->flags); } static inline void set_page_idle(struct page *page) { struct page_ext *page_ext = lookup_page_ext(page); if (unlikely(!page_ext)) return; set_bit(PAGE_EXT_IDLE, &page_ext->flags); } static inline void clear_page_idle(struct page *page) { struct page_ext *page_ext = lookup_page_ext(page); if (unlikely(!page_ext)) return; clear_bit(PAGE_EXT_IDLE, &page_ext->flags); } #endif /* CONFIG_64BIT */ #else /* !CONFIG_IDLE_PAGE_TRACKING */ static inline bool page_is_young(struct page *page) { return false; } static inline void set_page_young(struct page *page) { } static inline bool test_and_clear_page_young(struct page *page) { return false; } static inline bool page_is_idle(struct page *page) { return false; } static inline void set_page_idle(struct page *page) { } static inline void clear_page_idle(struct page *page) { } #endif /* CONFIG_IDLE_PAGE_TRACKING */ #endif /* _LINUX_MM_PAGE_IDLE_H */ ioprio.h 0000644 00000004640 14722070374 0006230 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef IOPRIO_H #define IOPRIO_H #include <linux/sched.h> #include <linux/sched/rt.h> #include <linux/iocontext.h> /* * Gives us 8 prio classes with 13-bits of data for each class */ #define IOPRIO_CLASS_SHIFT (13) #define IOPRIO_PRIO_MASK ((1UL << IOPRIO_CLASS_SHIFT) - 1) #define IOPRIO_PRIO_CLASS(mask) ((mask) >> IOPRIO_CLASS_SHIFT) #define IOPRIO_PRIO_DATA(mask) ((mask) & IOPRIO_PRIO_MASK) #define IOPRIO_PRIO_VALUE(class, data) (((class) << IOPRIO_CLASS_SHIFT) | data) #define ioprio_valid(mask) (IOPRIO_PRIO_CLASS((mask)) != IOPRIO_CLASS_NONE) /* * These are the io priority groups as implemented by CFQ. RT is the realtime * class, it always gets premium service. BE is the best-effort scheduling * class, the default for any process. IDLE is the idle scheduling class, it * is only served when no one else is using the disk. */ enum { IOPRIO_CLASS_NONE, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE, }; /* * 8 best effort priority levels are supported */ #define IOPRIO_BE_NR (8) enum { IOPRIO_WHO_PROCESS = 1, IOPRIO_WHO_PGRP, IOPRIO_WHO_USER, }; /* * Fallback BE priority */ #define IOPRIO_NORM (4) /* * if process has set io priority explicitly, use that. if not, convert * the cpu scheduler nice value to an io priority */ static inline int task_nice_ioprio(struct task_struct *task) { return (task_nice(task) + 20) / 5; } /* * This is for the case where the task hasn't asked for a specific IO class. * Check for idle and rt task process, and return appropriate IO class. */ static inline int task_nice_ioclass(struct task_struct *task) { if (task->policy == SCHED_IDLE) return IOPRIO_CLASS_IDLE; else if (task_is_realtime(task)) return IOPRIO_CLASS_RT; else return IOPRIO_CLASS_BE; } /* * If the calling process has set an I/O priority, use that. Otherwise, return * the default I/O priority. */ static inline int get_current_ioprio(void) { struct io_context *ioc = current->io_context; if (ioc) return ioc->ioprio; return IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0); } /* * For inheritance, return the highest of the two given priorities */ extern int ioprio_best(unsigned short aprio, unsigned short bprio); extern int set_task_ioprio(struct task_struct *task, int ioprio); #ifdef CONFIG_BLOCK extern int ioprio_check_cap(int ioprio); #else static inline int ioprio_check_cap(int ioprio) { return -ENOTBLK; } #endif /* CONFIG_BLOCK */ #endif cnt32_to_63.h 0000644 00000006731 14722070374 0006675 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Extend a 32-bit counter to 63 bits * * Author: Nicolas Pitre * Created: December 3, 2006 * Copyright: MontaVista Software, Inc. */ #ifndef __LINUX_CNT32_TO_63_H__ #define __LINUX_CNT32_TO_63_H__ #include <linux/compiler.h> #include <linux/types.h> #include <asm/byteorder.h> /* this is used only to give gcc a clue about good code generation */ union cnt32_to_63 { struct { #if defined(__LITTLE_ENDIAN) u32 lo, hi; #elif defined(__BIG_ENDIAN) u32 hi, lo; #endif }; u64 val; }; /** * cnt32_to_63 - Expand a 32-bit counter to a 63-bit counter * @cnt_lo: The low part of the counter * * Many hardware clock counters are only 32 bits wide and therefore have * a relatively short period making wrap-arounds rather frequent. This * is a problem when implementing sched_clock() for example, where a 64-bit * non-wrapping monotonic value is expected to be returned. * * To overcome that limitation, let's extend a 32-bit counter to 63 bits * in a completely lock free fashion. Bits 0 to 31 of the clock are provided * by the hardware while bits 32 to 62 are stored in memory. The top bit in * memory is used to synchronize with the hardware clock half-period. When * the top bit of both counters (hardware and in memory) differ then the * memory is updated with a new value, incrementing it when the hardware * counter wraps around. * * Because a word store in memory is atomic then the incremented value will * always be in synch with the top bit indicating to any potential concurrent * reader if the value in memory is up to date or not with regards to the * needed increment. And any race in updating the value in memory is harmless * as the same value would simply be stored more than once. * * The restrictions for the algorithm to work properly are: * * 1) this code must be called at least once per each half period of the * 32-bit counter; * * 2) this code must not be preempted for a duration longer than the * 32-bit counter half period minus the longest period between two * calls to this code; * * Those requirements ensure proper update to the state bit in memory. * This is usually not a problem in practice, but if it is then a kernel * timer should be scheduled to manage for this code to be executed often * enough. * * And finally: * * 3) the cnt_lo argument must be seen as a globally incrementing value, * meaning that it should be a direct reference to the counter data which * can be evaluated according to a specific ordering within the macro, * and not the result of a previous evaluation stored in a variable. * * For example, this is wrong: * * u32 partial = get_hw_count(); * u64 full = cnt32_to_63(partial); * return full; * * This is fine: * * u64 full = cnt32_to_63(get_hw_count()); * return full; * * Note that the top bit (bit 63) in the returned value should be considered * as garbage. It is not cleared here because callers are likely to use a * multiplier on the returned value which can get rid of the top bit * implicitly by making the multiplier even, therefore saving on a runtime * clear-bit instruction. Otherwise caller must remember to clear the top * bit explicitly. */ #define cnt32_to_63(cnt_lo) \ ({ \ static u32 __m_cnt_hi; \ union cnt32_to_63 __x; \ __x.hi = __m_cnt_hi; \ smp_rmb(); \ __x.lo = (cnt_lo); \ if (unlikely((s32)(__x.hi ^ __x.lo) < 0)) \ __m_cnt_hi = __x.hi = (__x.hi ^ 0x80000000) + (__x.hi >> 31); \ __x.val; \ }) #endif bcd.h 0000644 00000001057 14722070374 0005456 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _BCD_H #define _BCD_H #include <linux/compiler.h> #define bcd2bin(x) \ (__builtin_constant_p((u8 )(x)) ? \ const_bcd2bin(x) : \ _bcd2bin(x)) #define bin2bcd(x) \ (__builtin_constant_p((u8 )(x)) ? \ const_bin2bcd(x) : \ _bin2bcd(x)) #define const_bcd2bin(x) (((x) & 0x0f) + ((x) >> 4) * 10) #define const_bin2bcd(x) ((((x) / 10) << 4) + (x) % 10) unsigned _bcd2bin(unsigned char val) __attribute_const__; unsigned char _bin2bcd(unsigned val) __attribute_const__; #endif /* _BCD_H */ user.h 0000644 00000000026 14722070374 0005677 0 ustar 00 #include <asm/user.h> perf_event.h 0000644 00000125051 14722070374 0007064 0 ustar 00 /* * Performance events: * * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra * * Data type definitions, declarations, prototypes. * * Started by: Thomas Gleixner and Ingo Molnar * * For licencing details see kernel-base/COPYING */ #ifndef _LINUX_PERF_EVENT_H #define _LINUX_PERF_EVENT_H #include <uapi/linux/perf_event.h> #include <uapi/linux/bpf_perf_event.h> /* * Kernel-internal data types and definitions: */ #ifdef CONFIG_PERF_EVENTS # include <asm/perf_event.h> # include <asm/local64.h> #endif struct perf_guest_info_callbacks { int (*is_in_guest)(void); int (*is_user_mode)(void); unsigned long (*get_guest_ip)(void); void (*handle_intel_pt_intr)(void); }; #ifdef CONFIG_HAVE_HW_BREAKPOINT #include <asm/hw_breakpoint.h> #endif #include <linux/list.h> #include <linux/mutex.h> #include <linux/rculist.h> #include <linux/rcupdate.h> #include <linux/spinlock.h> #include <linux/hrtimer.h> #include <linux/fs.h> #include <linux/pid_namespace.h> #include <linux/workqueue.h> #include <linux/ftrace.h> #include <linux/cpu.h> #include <linux/irq_work.h> #include <linux/static_key.h> #include <linux/jump_label_ratelimit.h> #include <linux/atomic.h> #include <linux/sysfs.h> #include <linux/perf_regs.h> #include <linux/cgroup.h> #include <linux/refcount.h> #include <asm/local.h> struct perf_callchain_entry { __u64 nr; __u64 ip[0]; /* /proc/sys/kernel/perf_event_max_stack */ }; struct perf_callchain_entry_ctx { struct perf_callchain_entry *entry; u32 max_stack; u32 nr; short contexts; bool contexts_maxed; }; typedef unsigned long (*perf_copy_f)(void *dst, const void *src, unsigned long off, unsigned long len); struct perf_raw_frag { union { struct perf_raw_frag *next; unsigned long pad; }; perf_copy_f copy; void *data; u32 size; } __packed; struct perf_raw_record { struct perf_raw_frag frag; u32 size; }; /* * branch stack layout: * nr: number of taken branches stored in entries[] * * Note that nr can vary from sample to sample * branches (to, from) are stored from most recent * to least recent, i.e., entries[0] contains the most * recent branch. */ struct perf_branch_stack { __u64 nr; struct perf_branch_entry entries[0]; }; struct task_struct; /* * extra PMU register associated with an event */ struct hw_perf_event_extra { u64 config; /* register value */ unsigned int reg; /* register address or index */ int alloc; /* extra register already allocated */ int idx; /* index in shared_regs->regs[] */ }; /** * struct hw_perf_event - performance event hardware details: */ struct hw_perf_event { #ifdef CONFIG_PERF_EVENTS union { struct { /* hardware */ u64 config; u64 last_tag; unsigned long config_base; unsigned long event_base; int event_base_rdpmc; int idx; int last_cpu; int flags; struct hw_perf_event_extra extra_reg; struct hw_perf_event_extra branch_reg; }; struct { /* software */ struct hrtimer hrtimer; }; struct { /* tracepoint */ /* for tp_event->class */ struct list_head tp_list; }; struct { /* amd_power */ u64 pwr_acc; u64 ptsc; }; #ifdef CONFIG_HAVE_HW_BREAKPOINT struct { /* breakpoint */ /* * Crufty hack to avoid the chicken and egg * problem hw_breakpoint has with context * creation and event initalization. */ struct arch_hw_breakpoint info; struct list_head bp_list; }; #endif struct { /* amd_iommu */ u8 iommu_bank; u8 iommu_cntr; u16 padding; u64 conf; u64 conf1; }; }; /* * If the event is a per task event, this will point to the task in * question. See the comment in perf_event_alloc(). */ struct task_struct *target; /* * PMU would store hardware filter configuration * here. */ void *addr_filters; /* Last sync'ed generation of filters */ unsigned long addr_filters_gen; /* * hw_perf_event::state flags; used to track the PERF_EF_* state. */ #define PERF_HES_STOPPED 0x01 /* the counter is stopped */ #define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */ #define PERF_HES_ARCH 0x04 int state; /* * The last observed hardware counter value, updated with a * local64_cmpxchg() such that pmu::read() can be called nested. */ local64_t prev_count; /* * The period to start the next sample with. */ u64 sample_period; /* * The period we started this sample with. */ u64 last_period; /* * However much is left of the current period; note that this is * a full 64bit value and allows for generation of periods longer * than hardware might allow. */ local64_t period_left; /* * State for throttling the event, see __perf_event_overflow() and * perf_adjust_freq_unthr_context(). */ u64 interrupts_seq; u64 interrupts; /* * State for freq target events, see __perf_event_overflow() and * perf_adjust_freq_unthr_context(). */ u64 freq_time_stamp; u64 freq_count_stamp; #endif }; struct perf_event; /* * Common implementation detail of pmu::{start,commit,cancel}_txn */ #define PERF_PMU_TXN_ADD 0x1 /* txn to add/schedule event on PMU */ #define PERF_PMU_TXN_READ 0x2 /* txn to read event group from PMU */ /** * pmu::capabilities flags */ #define PERF_PMU_CAP_NO_INTERRUPT 0x01 #define PERF_PMU_CAP_NO_NMI 0x02 #define PERF_PMU_CAP_AUX_NO_SG 0x04 #define PERF_PMU_CAP_EXTENDED_REGS 0x08 #define PERF_PMU_CAP_EXCLUSIVE 0x10 #define PERF_PMU_CAP_ITRACE 0x20 #define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x40 #define PERF_PMU_CAP_NO_EXCLUDE 0x80 #define PERF_PMU_CAP_AUX_OUTPUT 0x100 /** * struct pmu - generic performance monitoring unit */ struct pmu { struct list_head entry; struct module *module; struct device *dev; const struct attribute_group **attr_groups; const struct attribute_group **attr_update; const char *name; int type; /* * various common per-pmu feature flags */ int capabilities; int __percpu *pmu_disable_count; struct perf_cpu_context __percpu *pmu_cpu_context; atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */ int task_ctx_nr; int hrtimer_interval_ms; /* number of address filters this PMU can do */ unsigned int nr_addr_filters; /* * Fully disable/enable this PMU, can be used to protect from the PMI * as well as for lazy/batch writing of the MSRs. */ void (*pmu_enable) (struct pmu *pmu); /* optional */ void (*pmu_disable) (struct pmu *pmu); /* optional */ /* * Try and initialize the event for this PMU. * * Returns: * -ENOENT -- @event is not for this PMU * * -ENODEV -- @event is for this PMU but PMU not present * -EBUSY -- @event is for this PMU but PMU temporarily unavailable * -EINVAL -- @event is for this PMU but @event is not valid * -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported * -EACCES -- @event is for this PMU, @event is valid, but no privileges * * 0 -- @event is for this PMU and valid * * Other error return values are allowed. */ int (*event_init) (struct perf_event *event); /* * Notification that the event was mapped or unmapped. Called * in the context of the mapping task. */ void (*event_mapped) (struct perf_event *event, struct mm_struct *mm); /* optional */ void (*event_unmapped) (struct perf_event *event, struct mm_struct *mm); /* optional */ /* * Flags for ->add()/->del()/ ->start()/->stop(). There are * matching hw_perf_event::state flags. */ #define PERF_EF_START 0x01 /* start the counter when adding */ #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */ #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */ /* * Adds/Removes a counter to/from the PMU, can be done inside a * transaction, see the ->*_txn() methods. * * The add/del callbacks will reserve all hardware resources required * to service the event, this includes any counter constraint * scheduling etc. * * Called with IRQs disabled and the PMU disabled on the CPU the event * is on. * * ->add() called without PERF_EF_START should result in the same state * as ->add() followed by ->stop(). * * ->del() must always PERF_EF_UPDATE stop an event. If it calls * ->stop() that must deal with already being stopped without * PERF_EF_UPDATE. */ int (*add) (struct perf_event *event, int flags); void (*del) (struct perf_event *event, int flags); /* * Starts/Stops a counter present on the PMU. * * The PMI handler should stop the counter when perf_event_overflow() * returns !0. ->start() will be used to continue. * * Also used to change the sample period. * * Called with IRQs disabled and the PMU disabled on the CPU the event * is on -- will be called from NMI context with the PMU generates * NMIs. * * ->stop() with PERF_EF_UPDATE will read the counter and update * period/count values like ->read() would. * * ->start() with PERF_EF_RELOAD will reprogram the the counter * value, must be preceded by a ->stop() with PERF_EF_UPDATE. */ void (*start) (struct perf_event *event, int flags); void (*stop) (struct perf_event *event, int flags); /* * Updates the counter value of the event. * * For sampling capable PMUs this will also update the software period * hw_perf_event::period_left field. */ void (*read) (struct perf_event *event); /* * Group events scheduling is treated as a transaction, add * group events as a whole and perform one schedulability test. * If the test fails, roll back the whole group * * Start the transaction, after this ->add() doesn't need to * do schedulability tests. * * Optional. */ void (*start_txn) (struct pmu *pmu, unsigned int txn_flags); /* * If ->start_txn() disabled the ->add() schedulability test * then ->commit_txn() is required to perform one. On success * the transaction is closed. On error the transaction is kept * open until ->cancel_txn() is called. * * Optional. */ int (*commit_txn) (struct pmu *pmu); /* * Will cancel the transaction, assumes ->del() is called * for each successful ->add() during the transaction. * * Optional. */ void (*cancel_txn) (struct pmu *pmu); /* * Will return the value for perf_event_mmap_page::index for this event, * if no implementation is provided it will default to: event->hw.idx + 1. */ int (*event_idx) (struct perf_event *event); /*optional */ /* * context-switches callback */ void (*sched_task) (struct perf_event_context *ctx, bool sched_in); /* * PMU specific data size */ size_t task_ctx_size; /* * Set up pmu-private data structures for an AUX area */ void *(*setup_aux) (struct perf_event *event, void **pages, int nr_pages, bool overwrite); /* optional */ /* * Free pmu-private AUX data structures */ void (*free_aux) (void *aux); /* optional */ /* * Validate address range filters: make sure the HW supports the * requested configuration and number of filters; return 0 if the * supplied filters are valid, -errno otherwise. * * Runs in the context of the ioctl()ing process and is not serialized * with the rest of the PMU callbacks. */ int (*addr_filters_validate) (struct list_head *filters); /* optional */ /* * Synchronize address range filter configuration: * translate hw-agnostic filters into hardware configuration in * event::hw::addr_filters. * * Runs as a part of filter sync sequence that is done in ->start() * callback by calling perf_event_addr_filters_sync(). * * May (and should) traverse event::addr_filters::list, for which its * caller provides necessary serialization. */ void (*addr_filters_sync) (struct perf_event *event); /* optional */ /* * Check if event can be used for aux_output purposes for * events of this PMU. * * Runs from perf_event_open(). Should return 0 for "no match" * or non-zero for "match". */ int (*aux_output_match) (struct perf_event *event); /* optional */ /* * Filter events for PMU-specific reasons. */ int (*filter_match) (struct perf_event *event); /* optional */ /* * Check period value for PERF_EVENT_IOC_PERIOD ioctl. */ int (*check_period) (struct perf_event *event, u64 value); /* optional */ }; enum perf_addr_filter_action_t { PERF_ADDR_FILTER_ACTION_STOP = 0, PERF_ADDR_FILTER_ACTION_START, PERF_ADDR_FILTER_ACTION_FILTER, }; /** * struct perf_addr_filter - address range filter definition * @entry: event's filter list linkage * @path: object file's path for file-based filters * @offset: filter range offset * @size: filter range size (size==0 means single address trigger) * @action: filter/start/stop * * This is a hardware-agnostic filter configuration as specified by the user. */ struct perf_addr_filter { struct list_head entry; struct path path; unsigned long offset; unsigned long size; enum perf_addr_filter_action_t action; }; /** * struct perf_addr_filters_head - container for address range filters * @list: list of filters for this event * @lock: spinlock that serializes accesses to the @list and event's * (and its children's) filter generations. * @nr_file_filters: number of file-based filters * * A child event will use parent's @list (and therefore @lock), so they are * bundled together; see perf_event_addr_filters(). */ struct perf_addr_filters_head { struct list_head list; raw_spinlock_t lock; unsigned int nr_file_filters; }; struct perf_addr_filter_range { unsigned long start; unsigned long size; }; /** * enum perf_event_state - the states of an event: */ enum perf_event_state { PERF_EVENT_STATE_DEAD = -4, PERF_EVENT_STATE_EXIT = -3, PERF_EVENT_STATE_ERROR = -2, PERF_EVENT_STATE_OFF = -1, PERF_EVENT_STATE_INACTIVE = 0, PERF_EVENT_STATE_ACTIVE = 1, }; struct file; struct perf_sample_data; typedef void (*perf_overflow_handler_t)(struct perf_event *, struct perf_sample_data *, struct pt_regs *regs); /* * Event capabilities. For event_caps and groups caps. * * PERF_EV_CAP_SOFTWARE: Is a software event. * PERF_EV_CAP_READ_ACTIVE_PKG: A CPU event (or cgroup event) that can be read * from any CPU in the package where it is active. */ #define PERF_EV_CAP_SOFTWARE BIT(0) #define PERF_EV_CAP_READ_ACTIVE_PKG BIT(1) #define SWEVENT_HLIST_BITS 8 #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) struct swevent_hlist { struct hlist_head heads[SWEVENT_HLIST_SIZE]; struct rcu_head rcu_head; }; #define PERF_ATTACH_CONTEXT 0x01 #define PERF_ATTACH_GROUP 0x02 #define PERF_ATTACH_TASK 0x04 #define PERF_ATTACH_TASK_DATA 0x08 #define PERF_ATTACH_ITRACE 0x10 struct perf_cgroup; struct ring_buffer; struct pmu_event_list { raw_spinlock_t lock; struct list_head list; }; #define for_each_sibling_event(sibling, event) \ if ((event)->group_leader == (event)) \ list_for_each_entry((sibling), &(event)->sibling_list, sibling_list) /** * struct perf_event - performance event kernel representation: */ struct perf_event { #ifdef CONFIG_PERF_EVENTS /* * entry onto perf_event_context::event_list; * modifications require ctx->lock * RCU safe iterations. */ struct list_head event_entry; /* * Locked for modification by both ctx->mutex and ctx->lock; holding * either sufficies for read. */ struct list_head sibling_list; struct list_head active_list; /* * Node on the pinned or flexible tree located at the event context; */ struct rb_node group_node; u64 group_index; /* * We need storage to track the entries in perf_pmu_migrate_context; we * cannot use the event_entry because of RCU and we want to keep the * group in tact which avoids us using the other two entries. */ struct list_head migrate_entry; struct hlist_node hlist_entry; struct list_head active_entry; int nr_siblings; /* Not serialized. Only written during event initialization. */ int event_caps; /* The cumulative AND of all event_caps for events in this group. */ int group_caps; unsigned int group_generation; struct perf_event *group_leader; struct pmu *pmu; void *pmu_private; enum perf_event_state state; unsigned int attach_state; local64_t count; atomic64_t child_count; /* * These are the total time in nanoseconds that the event * has been enabled (i.e. eligible to run, and the task has * been scheduled in, if this is a per-task event) * and running (scheduled onto the CPU), respectively. */ u64 total_time_enabled; u64 total_time_running; u64 tstamp; /* * timestamp shadows the actual context timing but it can * be safely used in NMI interrupt context. It reflects the * context time as it was when the event was last scheduled in. * * ctx_time already accounts for ctx->timestamp. Therefore to * compute ctx_time for a sample, simply add perf_clock(). */ u64 shadow_ctx_time; struct perf_event_attr attr; u16 header_size; u16 id_header_size; u16 read_size; struct hw_perf_event hw; struct perf_event_context *ctx; atomic_long_t refcount; /* * These accumulate total time (in nanoseconds) that children * events have been enabled and running, respectively. */ atomic64_t child_total_time_enabled; atomic64_t child_total_time_running; /* * Protect attach/detach and child_list: */ struct mutex child_mutex; struct list_head child_list; struct perf_event *parent; int oncpu; int cpu; struct list_head owner_entry; struct task_struct *owner; /* mmap bits */ struct mutex mmap_mutex; atomic_t mmap_count; struct ring_buffer *rb; struct list_head rb_entry; unsigned long rcu_batches; int rcu_pending; /* poll related */ wait_queue_head_t waitq; struct fasync_struct *fasync; /* delayed work for NMIs and such */ int pending_wakeup; int pending_kill; int pending_disable; struct irq_work pending; atomic_t event_limit; /* address range filters */ struct perf_addr_filters_head addr_filters; /* vma address array for file-based filders */ struct perf_addr_filter_range *addr_filter_ranges; unsigned long addr_filters_gen; /* for aux_output events */ struct perf_event *aux_event; void (*destroy)(struct perf_event *); struct rcu_head rcu_head; struct pid_namespace *ns; u64 id; atomic64_t lost_samples; u64 (*clock)(void); perf_overflow_handler_t overflow_handler; void *overflow_handler_context; #ifdef CONFIG_BPF_SYSCALL perf_overflow_handler_t orig_overflow_handler; struct bpf_prog *prog; #endif #ifdef CONFIG_EVENT_TRACING struct trace_event_call *tp_event; struct event_filter *filter; #ifdef CONFIG_FUNCTION_TRACER struct ftrace_ops ftrace_ops; #endif #endif #ifdef CONFIG_CGROUP_PERF struct perf_cgroup *cgrp; /* cgroup event is attach to */ #endif struct list_head sb_list; #endif /* CONFIG_PERF_EVENTS */ }; struct perf_event_groups { struct rb_root tree; u64 index; }; /** * struct perf_event_context - event context structure * * Used as a container for task events and CPU events as well: */ struct perf_event_context { struct pmu *pmu; /* * Protect the states of the events in the list, * nr_active, and the list: */ raw_spinlock_t lock; /* * Protect the list of events. Locking either mutex or lock * is sufficient to ensure the list doesn't change; to change * the list you need to lock both the mutex and the spinlock. */ struct mutex mutex; struct list_head active_ctx_list; struct perf_event_groups pinned_groups; struct perf_event_groups flexible_groups; struct list_head event_list; struct list_head pinned_active; struct list_head flexible_active; int nr_events; int nr_active; int is_active; int nr_stat; int nr_freq; int rotate_disable; /* * Set when nr_events != nr_active, except tolerant to events not * necessary to be active due to scheduling constraints, such as cgroups. */ int rotate_necessary; refcount_t refcount; struct task_struct *task; /* * Context clock, runs when context enabled. */ u64 time; u64 timestamp; /* * These fields let us detect when two contexts have both * been cloned (inherited) from a common ancestor. */ struct perf_event_context *parent_ctx; u64 parent_gen; u64 generation; int pin_count; #ifdef CONFIG_CGROUP_PERF int nr_cgroups; /* cgroup evts */ #endif void *task_ctx_data; /* pmu specific data */ struct rcu_head rcu_head; }; /* * Number of contexts where an event can trigger: * task, softirq, hardirq, nmi. */ #define PERF_NR_CONTEXTS 4 /** * struct perf_event_cpu_context - per cpu event context structure */ struct perf_cpu_context { struct perf_event_context ctx; struct perf_event_context *task_ctx; int active_oncpu; int exclusive; raw_spinlock_t hrtimer_lock; struct hrtimer hrtimer; ktime_t hrtimer_interval; unsigned int hrtimer_active; #ifdef CONFIG_CGROUP_PERF struct perf_cgroup *cgrp; struct list_head cgrp_cpuctx_entry; #endif struct list_head sched_cb_entry; int sched_cb_usage; int online; }; struct perf_output_handle { struct perf_event *event; struct ring_buffer *rb; unsigned long wakeup; unsigned long size; u64 aux_flags; union { void *addr; unsigned long head; }; int page; }; struct bpf_perf_event_data_kern { bpf_user_pt_regs_t *regs; struct perf_sample_data *data; struct perf_event *event; }; #ifdef CONFIG_CGROUP_PERF /* * perf_cgroup_info keeps track of time_enabled for a cgroup. * This is a per-cpu dynamically allocated data structure. */ struct perf_cgroup_info { u64 time; u64 timestamp; }; struct perf_cgroup { struct cgroup_subsys_state css; struct perf_cgroup_info __percpu *info; }; /* * Must ensure cgroup is pinned (css_get) before calling * this function. In other words, we cannot call this function * if there is no cgroup event for the current CPU context. */ static inline struct perf_cgroup * perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx) { return container_of(task_css_check(task, perf_event_cgrp_id, ctx ? lockdep_is_held(&ctx->lock) : true), struct perf_cgroup, css); } #endif /* CONFIG_CGROUP_PERF */ #ifdef CONFIG_PERF_EVENTS extern void *perf_aux_output_begin(struct perf_output_handle *handle, struct perf_event *event); extern void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size); extern int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size); extern void *perf_get_aux(struct perf_output_handle *handle); extern void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags); extern void perf_event_itrace_started(struct perf_event *event); extern int perf_pmu_register(struct pmu *pmu, const char *name, int type); extern void perf_pmu_unregister(struct pmu *pmu); extern int perf_num_counters(void); extern const char *perf_pmu_name(void); extern void __perf_event_task_sched_in(struct task_struct *prev, struct task_struct *task); extern void __perf_event_task_sched_out(struct task_struct *prev, struct task_struct *next); extern int perf_event_init_task(struct task_struct *child); extern void perf_event_exit_task(struct task_struct *child); extern void perf_event_free_task(struct task_struct *task); extern void perf_event_delayed_put(struct task_struct *task); extern struct file *perf_event_get(unsigned int fd); extern const struct perf_event *perf_get_event(struct file *file); extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event); extern void perf_event_print_debug(void); extern void perf_pmu_disable(struct pmu *pmu); extern void perf_pmu_enable(struct pmu *pmu); extern void perf_sched_cb_dec(struct pmu *pmu); extern void perf_sched_cb_inc(struct pmu *pmu); extern int perf_event_task_disable(void); extern int perf_event_task_enable(void); extern void perf_pmu_resched(struct pmu *pmu); extern int perf_event_refresh(struct perf_event *event, int refresh); extern void perf_event_update_userpage(struct perf_event *event); extern int perf_event_release_kernel(struct perf_event *event); extern struct perf_event * perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, struct task_struct *task, perf_overflow_handler_t callback, void *context); extern void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu); int perf_event_read_local(struct perf_event *event, u64 *value, u64 *enabled, u64 *running); extern u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running); struct perf_sample_data { /* * Fields set by perf_sample_data_init(), group so as to * minimize the cachelines touched. */ u64 addr; struct perf_raw_record *raw; struct perf_branch_stack *br_stack; u64 period; u64 weight; u64 txn; union perf_mem_data_src data_src; /* * The other fields, optionally {set,used} by * perf_{prepare,output}_sample(). */ u64 type; u64 ip; struct { u32 pid; u32 tid; } tid_entry; u64 time; u64 id; u64 stream_id; struct { u32 cpu; u32 reserved; } cpu_entry; struct perf_callchain_entry *callchain; /* * regs_user may point to task_pt_regs or to regs_user_copy, depending * on arch details. */ struct perf_regs regs_user; struct pt_regs regs_user_copy; struct perf_regs regs_intr; u64 stack_user_size; u64 phys_addr; } ____cacheline_aligned; /* default value for data source */ #define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\ PERF_MEM_S(LVL, NA) |\ PERF_MEM_S(SNOOP, NA) |\ PERF_MEM_S(LOCK, NA) |\ PERF_MEM_S(TLB, NA)) static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr, u64 period) { /* remaining struct members initialized in perf_prepare_sample() */ data->addr = addr; data->raw = NULL; data->br_stack = NULL; data->period = period; data->weight = 0; data->data_src.val = PERF_MEM_NA; data->txn = 0; } extern void perf_output_sample(struct perf_output_handle *handle, struct perf_event_header *header, struct perf_sample_data *data, struct perf_event *event); extern void perf_prepare_sample(struct perf_event_header *header, struct perf_sample_data *data, struct perf_event *event, struct pt_regs *regs); extern int perf_event_overflow(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs); extern void perf_event_output_forward(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs); extern void perf_event_output_backward(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs); extern int perf_event_output(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs); static inline bool __is_default_overflow_handler(perf_overflow_handler_t overflow_handler) { if (likely(overflow_handler == perf_event_output_forward)) return true; if (unlikely(overflow_handler == perf_event_output_backward)) return true; return false; } #define is_default_overflow_handler(event) \ __is_default_overflow_handler((event)->overflow_handler) #ifdef CONFIG_BPF_SYSCALL static inline bool uses_default_overflow_handler(struct perf_event *event) { if (likely(is_default_overflow_handler(event))) return true; return __is_default_overflow_handler(event->orig_overflow_handler); } #else #define uses_default_overflow_handler(event) \ is_default_overflow_handler(event) #endif extern void perf_event_header__init_id(struct perf_event_header *header, struct perf_sample_data *data, struct perf_event *event); extern void perf_event__output_id_sample(struct perf_event *event, struct perf_output_handle *handle, struct perf_sample_data *sample); extern void perf_log_lost_samples(struct perf_event *event, u64 lost); static inline bool event_has_any_exclude_flag(struct perf_event *event) { struct perf_event_attr *attr = &event->attr; return attr->exclude_idle || attr->exclude_user || attr->exclude_kernel || attr->exclude_hv || attr->exclude_guest || attr->exclude_host; } static inline bool is_sampling_event(struct perf_event *event) { return event->attr.sample_period != 0; } /* * Return 1 for a software event, 0 for a hardware event */ static inline int is_software_event(struct perf_event *event) { return event->event_caps & PERF_EV_CAP_SOFTWARE; } /* * Return 1 for event in sw context, 0 for event in hw context */ static inline int in_software_context(struct perf_event *event) { return event->ctx->pmu->task_ctx_nr == perf_sw_context; } static inline int is_exclusive_pmu(struct pmu *pmu) { return pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE; } extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64); extern void __perf_sw_event(u32, u64, struct pt_regs *, u64); #ifndef perf_arch_fetch_caller_regs static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } #endif /* * When generating a perf sample in-line, instead of from an interrupt / * exception, we lack a pt_regs. This is typically used from software events * like: SW_CONTEXT_SWITCHES, SW_MIGRATIONS and the tie-in with tracepoints. * * We typically don't need a full set, but (for x86) do require: * - ip for PERF_SAMPLE_IP * - cs for user_mode() tests * - sp for PERF_SAMPLE_CALLCHAIN * - eflags for MISC bits and CALLCHAIN (see: perf_hw_regs()) * * NOTE: assumes @regs is otherwise already 0 filled; this is important for * things like PERF_SAMPLE_REGS_INTR. */ static inline void perf_fetch_caller_regs(struct pt_regs *regs) { perf_arch_fetch_caller_regs(regs, CALLER_ADDR0); } static __always_inline void perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { if (static_key_false(&perf_swevent_enabled[event_id])) __perf_sw_event(event_id, nr, regs, addr); } DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]); /* * 'Special' version for the scheduler, it hard assumes no recursion, * which is guaranteed by us not actually scheduling inside other swevents * because those disable preemption. */ static __always_inline void perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { if (static_key_false(&perf_swevent_enabled[event_id])) { struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]); perf_fetch_caller_regs(regs); ___perf_sw_event(event_id, nr, regs, addr); } } extern struct static_key_false perf_sched_events; static __always_inline bool perf_sw_migrate_enabled(void) { if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS])) return true; return false; } static inline void perf_event_task_migrate(struct task_struct *task) { if (perf_sw_migrate_enabled()) task->sched_migrated = 1; } static inline void perf_event_task_sched_in(struct task_struct *prev, struct task_struct *task) { if (static_branch_unlikely(&perf_sched_events)) __perf_event_task_sched_in(prev, task); if (perf_sw_migrate_enabled() && task->sched_migrated) { struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]); perf_fetch_caller_regs(regs); ___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0); task->sched_migrated = 0; } } static inline void perf_event_task_sched_out(struct task_struct *prev, struct task_struct *next) { perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0); if (static_branch_unlikely(&perf_sched_events)) __perf_event_task_sched_out(prev, next); } extern void perf_event_mmap(struct vm_area_struct *vma); extern void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len, bool unregister, const char *sym); extern void perf_event_bpf_event(struct bpf_prog *prog, enum perf_bpf_event_type type, u16 flags); extern struct perf_guest_info_callbacks __rcu *perf_guest_cbs; static inline struct perf_guest_info_callbacks *perf_get_guest_cbs(void) { /* * Callbacks are RCU-protected and must be READ_ONCE to avoid reloading * the callbacks between a !NULL check and dereferences, to ensure * pending stores/changes to the callback pointers are visible before a * non-NULL perf_guest_cbs is visible to readers, and to prevent a * module from unloading callbacks while readers are active. */ return rcu_dereference(perf_guest_cbs); } extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); extern void perf_event_exec(void); extern void perf_event_comm(struct task_struct *tsk, bool exec); extern void perf_event_namespaces(struct task_struct *tsk); extern void perf_event_fork(struct task_struct *tsk); /* Callchains */ DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs); extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs); extern struct perf_callchain_entry * get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, u32 max_stack, bool crosstask, bool add_mark); extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs); extern int get_callchain_buffers(int max_stack); extern void put_callchain_buffers(void); extern int sysctl_perf_event_max_stack; extern int sysctl_perf_event_max_contexts_per_stack; static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *ctx, u64 ip) { if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) { struct perf_callchain_entry *entry = ctx->entry; entry->ip[entry->nr++] = ip; ++ctx->contexts; return 0; } else { ctx->contexts_maxed = true; return -1; /* no more room, stop walking the stack */ } } static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip) { if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) { struct perf_callchain_entry *entry = ctx->entry; entry->ip[entry->nr++] = ip; ++ctx->nr; return 0; } else { return -1; /* no more room, stop walking the stack */ } } extern int sysctl_perf_event_paranoid; extern int sysctl_perf_event_mlock; extern int sysctl_perf_event_sample_rate; extern int sysctl_perf_cpu_time_max_percent; extern void perf_sample_event_took(u64 sample_len_ns); extern int perf_proc_update_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); int perf_event_max_stack_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); static inline bool perf_paranoid_any(void) { return sysctl_perf_event_paranoid > 2; } static inline bool perf_paranoid_tracepoint_raw(void) { return sysctl_perf_event_paranoid > -1; } static inline bool perf_paranoid_cpu(void) { return sysctl_perf_event_paranoid > 0; } static inline bool perf_paranoid_kernel(void) { return sysctl_perf_event_paranoid > 1; } extern void perf_event_init(void); extern void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size, struct pt_regs *regs, struct hlist_head *head, int rctx, struct task_struct *task); extern void perf_bp_event(struct perf_event *event, void *data); #ifndef perf_misc_flags # define perf_misc_flags(regs) \ (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL) # define perf_instruction_pointer(regs) instruction_pointer(regs) #endif #ifndef perf_arch_bpf_user_pt_regs # define perf_arch_bpf_user_pt_regs(regs) regs #endif static inline bool has_branch_stack(struct perf_event *event) { return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK; } static inline bool needs_branch_stack(struct perf_event *event) { return event->attr.branch_sample_type != 0; } static inline bool has_aux(struct perf_event *event) { return event->pmu->setup_aux; } static inline bool is_write_backward(struct perf_event *event) { return !!event->attr.write_backward; } static inline bool has_addr_filter(struct perf_event *event) { return event->pmu->nr_addr_filters; } /* * An inherited event uses parent's filters */ static inline struct perf_addr_filters_head * perf_event_addr_filters(struct perf_event *event) { struct perf_addr_filters_head *ifh = &event->addr_filters; if (event->parent) ifh = &event->parent->addr_filters; return ifh; } extern void perf_event_addr_filters_sync(struct perf_event *event); extern int perf_output_begin(struct perf_output_handle *handle, struct perf_event *event, unsigned int size); extern int perf_output_begin_forward(struct perf_output_handle *handle, struct perf_event *event, unsigned int size); extern int perf_output_begin_backward(struct perf_output_handle *handle, struct perf_event *event, unsigned int size); extern void perf_output_end(struct perf_output_handle *handle); extern unsigned int perf_output_copy(struct perf_output_handle *handle, const void *buf, unsigned int len); extern unsigned int perf_output_skip(struct perf_output_handle *handle, unsigned int len); extern int perf_swevent_get_recursion_context(void); extern void perf_swevent_put_recursion_context(int rctx); extern u64 perf_swevent_set_period(struct perf_event *event); extern void perf_event_enable(struct perf_event *event); extern void perf_event_disable(struct perf_event *event); extern void perf_event_disable_local(struct perf_event *event); extern void perf_event_disable_inatomic(struct perf_event *event); extern void perf_event_task_tick(void); extern int perf_event_account_interrupt(struct perf_event *event); #else /* !CONFIG_PERF_EVENTS: */ static inline void * perf_aux_output_begin(struct perf_output_handle *handle, struct perf_event *event) { return NULL; } static inline void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size) { } static inline int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size) { return -EINVAL; } static inline void * perf_get_aux(struct perf_output_handle *handle) { return NULL; } static inline void perf_event_task_migrate(struct task_struct *task) { } static inline void perf_event_task_sched_in(struct task_struct *prev, struct task_struct *task) { } static inline void perf_event_task_sched_out(struct task_struct *prev, struct task_struct *next) { } static inline int perf_event_init_task(struct task_struct *child) { return 0; } static inline void perf_event_exit_task(struct task_struct *child) { } static inline void perf_event_free_task(struct task_struct *task) { } static inline void perf_event_delayed_put(struct task_struct *task) { } static inline struct file *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); } static inline const struct perf_event *perf_get_event(struct file *file) { return ERR_PTR(-EINVAL); } static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event) { return ERR_PTR(-EINVAL); } static inline int perf_event_read_local(struct perf_event *event, u64 *value, u64 *enabled, u64 *running) { return -EINVAL; } static inline void perf_event_print_debug(void) { } static inline int perf_event_task_disable(void) { return -EINVAL; } static inline int perf_event_task_enable(void) { return -EINVAL; } static inline int perf_event_refresh(struct perf_event *event, int refresh) { return -EINVAL; } static inline void perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { } static inline void perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { } static inline void perf_bp_event(struct perf_event *event, void *data) { } static inline int perf_register_guest_info_callbacks (struct perf_guest_info_callbacks *callbacks) { return 0; } static inline int perf_unregister_guest_info_callbacks (struct perf_guest_info_callbacks *callbacks) { return 0; } static inline void perf_event_mmap(struct vm_area_struct *vma) { } typedef int (perf_ksymbol_get_name_f)(char *name, int name_len, void *data); static inline void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len, bool unregister, const char *sym) { } static inline void perf_event_bpf_event(struct bpf_prog *prog, enum perf_bpf_event_type type, u16 flags) { } static inline void perf_event_exec(void) { } static inline void perf_event_comm(struct task_struct *tsk, bool exec) { } static inline void perf_event_namespaces(struct task_struct *tsk) { } static inline void perf_event_fork(struct task_struct *tsk) { } static inline void perf_event_init(void) { } static inline int perf_swevent_get_recursion_context(void) { return -1; } static inline void perf_swevent_put_recursion_context(int rctx) { } static inline u64 perf_swevent_set_period(struct perf_event *event) { return 0; } static inline void perf_event_enable(struct perf_event *event) { } static inline void perf_event_disable(struct perf_event *event) { } static inline int __perf_event_disable(void *info) { return -1; } static inline void perf_event_task_tick(void) { } static inline int perf_event_release_kernel(struct perf_event *event) { return 0; } #endif #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL) extern void perf_restore_debug_store(void); #else static inline void perf_restore_debug_store(void) { } #endif static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag) { return frag->pad < sizeof(u64); } #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) struct perf_pmu_events_attr { struct device_attribute attr; u64 id; const char *event_str; }; struct perf_pmu_events_ht_attr { struct device_attribute attr; u64 id; const char *event_str_ht; const char *event_str_noht; }; ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr, char *page); #define PMU_EVENT_ATTR(_name, _var, _id, _show) \ static struct perf_pmu_events_attr _var = { \ .attr = __ATTR(_name, 0444, _show, NULL), \ .id = _id, \ }; #define PMU_EVENT_ATTR_STRING(_name, _var, _str) \ static struct perf_pmu_events_attr _var = { \ .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \ .id = 0, \ .event_str = _str, \ }; #define PMU_FORMAT_ATTR(_name, _format) \ static ssize_t \ _name##_show(struct device *dev, \ struct device_attribute *attr, \ char *page) \ { \ BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ return sprintf(page, _format "\n"); \ } \ \ static struct device_attribute format_attr_##_name = __ATTR_RO(_name) /* Performance counter hotplug functions */ #ifdef CONFIG_PERF_EVENTS int perf_event_init_cpu(unsigned int cpu); int perf_event_exit_cpu(unsigned int cpu); #else #define perf_event_init_cpu NULL #define perf_event_exit_cpu NULL #endif #endif /* _LINUX_PERF_EVENT_H */ sdla.h 0000644 00000015307 14722070374 0005654 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Global definitions for the Frame relay interface. * * Version: @(#)if_ifrad.h 0.20 13 Apr 96 * * Author: Mike McLagan <mike.mclagan@linux.org> * * Changes: * 0.15 Mike McLagan Structure packing * * 0.20 Mike McLagan New flags for S508 buffer handling */ #ifndef SDLA_H #define SDLA_H #include <uapi/linux/sdla.h> /* important Z80 window addresses */ #define SDLA_CONTROL_WND 0xE000 #define SDLA_502_CMD_BUF 0xEF60 #define SDLA_502_RCV_BUF 0xA900 #define SDLA_502_TXN_AVAIL 0xFFF1 #define SDLA_502_RCV_AVAIL 0xFFF2 #define SDLA_502_EVENT_FLAGS 0xFFF3 #define SDLA_502_MDM_STATUS 0xFFF4 #define SDLA_502_IRQ_INTERFACE 0xFFFD #define SDLA_502_IRQ_PERMISSION 0xFFFE #define SDLA_502_DATA_OFS 0x0010 #define SDLA_508_CMD_BUF 0xE000 #define SDLA_508_TXBUF_INFO 0xF100 #define SDLA_508_RXBUF_INFO 0xF120 #define SDLA_508_EVENT_FLAGS 0xF003 #define SDLA_508_MDM_STATUS 0xF004 #define SDLA_508_IRQ_INTERFACE 0xF010 #define SDLA_508_IRQ_PERMISSION 0xF011 #define SDLA_508_TSE_OFFSET 0xF012 /* Event flags */ #define SDLA_EVENT_STATUS 0x01 #define SDLA_EVENT_DLCI_STATUS 0x02 #define SDLA_EVENT_BAD_DLCI 0x04 #define SDLA_EVENT_LINK_DOWN 0x40 /* IRQ Trigger flags */ #define SDLA_INTR_RX 0x01 #define SDLA_INTR_TX 0x02 #define SDLA_INTR_MODEM 0x04 #define SDLA_INTR_COMPLETE 0x08 #define SDLA_INTR_STATUS 0x10 #define SDLA_INTR_TIMER 0x20 /* DLCI status bits */ #define SDLA_DLCI_DELETED 0x01 #define SDLA_DLCI_ACTIVE 0x02 #define SDLA_DLCI_WAITING 0x04 #define SDLA_DLCI_NEW 0x08 #define SDLA_DLCI_INCLUDED 0x40 /* valid command codes */ #define SDLA_INFORMATION_WRITE 0x01 #define SDLA_INFORMATION_READ 0x02 #define SDLA_ISSUE_IN_CHANNEL_SIGNAL 0x03 #define SDLA_SET_DLCI_CONFIGURATION 0x10 #define SDLA_READ_DLCI_CONFIGURATION 0x11 #define SDLA_DISABLE_COMMUNICATIONS 0x12 #define SDLA_ENABLE_COMMUNICATIONS 0x13 #define SDLA_READ_DLC_STATUS 0x14 #define SDLA_READ_DLC_STATISTICS 0x15 #define SDLA_FLUSH_DLC_STATISTICS 0x16 #define SDLA_LIST_ACTIVE_DLCI 0x17 #define SDLA_FLUSH_INFORMATION_BUFFERS 0x18 #define SDLA_ADD_DLCI 0x20 #define SDLA_DELETE_DLCI 0x21 #define SDLA_ACTIVATE_DLCI 0x22 #define SDLA_DEACTIVATE_DLCI 0x23 #define SDLA_READ_MODEM_STATUS 0x30 #define SDLA_SET_MODEM_STATUS 0x31 #define SDLA_READ_COMMS_ERR_STATS 0x32 #define SDLA_FLUSH_COMMS_ERR_STATS 0x33 #define SDLA_READ_CODE_VERSION 0x40 #define SDLA_SET_IRQ_TRIGGER 0x50 #define SDLA_GET_IRQ_TRIGGER 0x51 /* In channel signal types */ #define SDLA_ICS_LINK_VERIFY 0x02 #define SDLA_ICS_STATUS_ENQ 0x03 /* modem status flags */ #define SDLA_MODEM_DTR_HIGH 0x01 #define SDLA_MODEM_RTS_HIGH 0x02 #define SDLA_MODEM_DCD_HIGH 0x08 #define SDLA_MODEM_CTS_HIGH 0x20 /* used for RET_MODEM interpretation */ #define SDLA_MODEM_DCD_LOW 0x01 #define SDLA_MODEM_CTS_LOW 0x02 /* return codes */ #define SDLA_RET_OK 0x00 #define SDLA_RET_COMMUNICATIONS 0x01 #define SDLA_RET_CHANNEL_INACTIVE 0x02 #define SDLA_RET_DLCI_INACTIVE 0x03 #define SDLA_RET_DLCI_CONFIG 0x04 #define SDLA_RET_BUF_TOO_BIG 0x05 #define SDLA_RET_NO_DATA 0x05 #define SDLA_RET_BUF_OVERSIZE 0x06 #define SDLA_RET_CIR_OVERFLOW 0x07 #define SDLA_RET_NO_BUFS 0x08 #define SDLA_RET_TIMEOUT 0x0A #define SDLA_RET_MODEM 0x10 #define SDLA_RET_CHANNEL_OFF 0x11 #define SDLA_RET_CHANNEL_ON 0x12 #define SDLA_RET_DLCI_STATUS 0x13 #define SDLA_RET_DLCI_UNKNOWN 0x14 #define SDLA_RET_COMMAND_INVALID 0x1F /* Configuration flags */ #define SDLA_DIRECT_RECV 0x0080 #define SDLA_TX_NO_EXCEPT 0x0020 #define SDLA_NO_ICF_MSGS 0x1000 #define SDLA_TX50_RX50 0x0000 #define SDLA_TX70_RX30 0x2000 #define SDLA_TX30_RX70 0x4000 /* IRQ selection flags */ #define SDLA_IRQ_RECEIVE 0x01 #define SDLA_IRQ_TRANSMIT 0x02 #define SDLA_IRQ_MODEM_STAT 0x04 #define SDLA_IRQ_COMMAND 0x08 #define SDLA_IRQ_CHANNEL 0x10 #define SDLA_IRQ_TIMER 0x20 /* definitions for PC memory mapping */ #define SDLA_8K_WINDOW 0x01 #define SDLA_S502_SEG_A 0x10 #define SDLA_S502_SEG_C 0x20 #define SDLA_S502_SEG_D 0x00 #define SDLA_S502_SEG_E 0x30 #define SDLA_S507_SEG_A 0x00 #define SDLA_S507_SEG_B 0x40 #define SDLA_S507_SEG_C 0x80 #define SDLA_S507_SEG_E 0xC0 #define SDLA_S508_SEG_A 0x00 #define SDLA_S508_SEG_C 0x10 #define SDLA_S508_SEG_D 0x08 #define SDLA_S508_SEG_E 0x18 /* SDLA adapter port constants */ #define SDLA_IO_EXTENTS 0x04 #define SDLA_REG_CONTROL 0x00 #define SDLA_REG_PC_WINDOW 0x01 /* offset for PC window select latch */ #define SDLA_REG_Z80_WINDOW 0x02 /* offset for Z80 window select latch */ #define SDLA_REG_Z80_CONTROL 0x03 /* offset for Z80 control latch */ #define SDLA_S502_STS 0x00 /* status reg for 502, 502E, 507 */ #define SDLA_S508_GNRL 0x00 /* general purp. reg for 508 */ #define SDLA_S508_STS 0x01 /* status reg for 508 */ #define SDLA_S508_IDR 0x02 /* ID reg for 508 */ /* control register flags */ #define SDLA_S502A_START 0x00 /* start the CPU */ #define SDLA_S502A_INTREQ 0x02 #define SDLA_S502A_INTEN 0x04 #define SDLA_S502A_HALT 0x08 /* halt the CPU */ #define SDLA_S502A_NMI 0x10 /* issue an NMI to the CPU */ #define SDLA_S502E_CPUEN 0x01 #define SDLA_S502E_ENABLE 0x02 #define SDLA_S502E_INTACK 0x04 #define SDLA_S507_ENABLE 0x01 #define SDLA_S507_IRQ3 0x00 #define SDLA_S507_IRQ4 0x20 #define SDLA_S507_IRQ5 0x40 #define SDLA_S507_IRQ7 0x60 #define SDLA_S507_IRQ10 0x80 #define SDLA_S507_IRQ11 0xA0 #define SDLA_S507_IRQ12 0xC0 #define SDLA_S507_IRQ15 0xE0 #define SDLA_HALT 0x00 #define SDLA_CPUEN 0x02 #define SDLA_MEMEN 0x04 #define SDLA_S507_EPROMWR 0x08 #define SDLA_S507_EPROMCLK 0x10 #define SDLA_S508_INTRQ 0x08 #define SDLA_S508_INTEN 0x10 struct sdla_cmd { char opp_flag; char cmd; short length; char retval; short dlci; char flags; short rxlost_int; long rxlost_app; char reserve[2]; char data[SDLA_MAX_DATA]; /* transfer data buffer */ } __attribute__((packed)); struct intr_info { char flags; short txlen; char irq; char flags2; short timeout; } __attribute__((packed)); /* found in the 508's control window at RXBUF_INFO */ struct buf_info { unsigned short rse_num; unsigned long rse_base; unsigned long rse_next; unsigned long buf_base; unsigned short reserved; unsigned long buf_top; } __attribute__((packed)); /* structure pointed to by rse_base in RXBUF_INFO struct */ struct buf_entry { char opp_flag; short length; short dlci; char flags; short timestamp; short reserved[2]; long buf_addr; } __attribute__((packed)); #endif transport_class.h 0000644 00000005011 14722070374 0010141 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * transport_class.h - a generic container for all transport classes * * Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com> */ #ifndef _TRANSPORT_CLASS_H_ #define _TRANSPORT_CLASS_H_ #include <linux/device.h> #include <linux/bug.h> #include <linux/attribute_container.h> struct transport_container; struct transport_class { struct class class; int (*setup)(struct transport_container *, struct device *, struct device *); int (*configure)(struct transport_container *, struct device *, struct device *); int (*remove)(struct transport_container *, struct device *, struct device *); }; #define DECLARE_TRANSPORT_CLASS(cls, nm, su, rm, cfg) \ struct transport_class cls = { \ .class = { \ .name = nm, \ }, \ .setup = su, \ .remove = rm, \ .configure = cfg, \ } struct anon_transport_class { struct transport_class tclass; struct attribute_container container; }; #define DECLARE_ANON_TRANSPORT_CLASS(cls, mtch, cfg) \ struct anon_transport_class cls = { \ .tclass = { \ .configure = cfg, \ }, \ . container = { \ .match = mtch, \ }, \ } #define class_to_transport_class(x) \ container_of(x, struct transport_class, class) struct transport_container { struct attribute_container ac; const struct attribute_group *statistics; }; #define attribute_container_to_transport_container(x) \ container_of(x, struct transport_container, ac) void transport_remove_device(struct device *); void transport_add_device(struct device *); void transport_setup_device(struct device *); void transport_configure_device(struct device *); void transport_destroy_device(struct device *); static inline void transport_register_device(struct device *dev) { transport_setup_device(dev); transport_add_device(dev); } static inline void transport_unregister_device(struct device *dev) { transport_remove_device(dev); transport_destroy_device(dev); } static inline int transport_container_register(struct transport_container *tc) { return attribute_container_register(&tc->ac); } static inline void transport_container_unregister(struct transport_container *tc) { if (unlikely(attribute_container_unregister(&tc->ac))) BUG(); } int transport_class_register(struct transport_class *); int anon_transport_class_register(struct anon_transport_class *); void transport_class_unregister(struct transport_class *); void anon_transport_class_unregister(struct anon_transport_class *); #endif pagemap.h 0000644 00000045414 14722070374 0006345 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PAGEMAP_H #define _LINUX_PAGEMAP_H /* * Copyright 1995 Linus Torvalds */ #include <linux/mm.h> #include <linux/fs.h> #include <linux/list.h> #include <linux/highmem.h> #include <linux/compiler.h> #include <linux/uaccess.h> #include <linux/gfp.h> #include <linux/bitops.h> #include <linux/hardirq.h> /* for in_interrupt() */ #include <linux/hugetlb_inline.h> struct pagevec; /* * Bits in mapping->flags. */ enum mapping_flags { AS_EIO = 0, /* IO error on async write */ AS_ENOSPC = 1, /* ENOSPC on async write */ AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */ AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */ AS_EXITING = 4, /* final truncate in progress */ /* writeback related tags are not used */ AS_NO_WRITEBACK_TAGS = 5, }; /** * mapping_set_error - record a writeback error in the address_space * @mapping - the mapping in which an error should be set * @error - the error to set in the mapping * * When writeback fails in some way, we must record that error so that * userspace can be informed when fsync and the like are called. We endeavor * to report errors on any file that was open at the time of the error. Some * internal callers also need to know when writeback errors have occurred. * * When a writeback error occurs, most filesystems will want to call * mapping_set_error to record the error in the mapping so that it can be * reported when the application calls fsync(2). */ static inline void mapping_set_error(struct address_space *mapping, int error) { if (likely(!error)) return; /* Record in wb_err for checkers using errseq_t based tracking */ filemap_set_wb_err(mapping, error); /* Record it in flags for now, for legacy callers */ if (error == -ENOSPC) set_bit(AS_ENOSPC, &mapping->flags); else set_bit(AS_EIO, &mapping->flags); } static inline void mapping_set_unevictable(struct address_space *mapping) { set_bit(AS_UNEVICTABLE, &mapping->flags); } static inline void mapping_clear_unevictable(struct address_space *mapping) { clear_bit(AS_UNEVICTABLE, &mapping->flags); } static inline int mapping_unevictable(struct address_space *mapping) { if (mapping) return test_bit(AS_UNEVICTABLE, &mapping->flags); return !!mapping; } static inline void mapping_set_exiting(struct address_space *mapping) { set_bit(AS_EXITING, &mapping->flags); } static inline int mapping_exiting(struct address_space *mapping) { return test_bit(AS_EXITING, &mapping->flags); } static inline void mapping_set_no_writeback_tags(struct address_space *mapping) { set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); } static inline int mapping_use_writeback_tags(struct address_space *mapping) { return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); } static inline gfp_t mapping_gfp_mask(struct address_space * mapping) { return mapping->gfp_mask; } /* Restricts the given gfp_mask to what the mapping allows. */ static inline gfp_t mapping_gfp_constraint(struct address_space *mapping, gfp_t gfp_mask) { return mapping_gfp_mask(mapping) & gfp_mask; } /* * This is non-atomic. Only to be used before the mapping is activated. * Probably needs a barrier... */ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) { m->gfp_mask = mask; } void release_pages(struct page **pages, int nr); /* * speculatively take a reference to a page. * If the page is free (_refcount == 0), then _refcount is untouched, and 0 * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned. * * This function must be called inside the same rcu_read_lock() section as has * been used to lookup the page in the pagecache radix-tree (or page table): * this allows allocators to use a synchronize_rcu() to stabilize _refcount. * * Unless an RCU grace period has passed, the count of all pages coming out * of the allocator must be considered unstable. page_count may return higher * than expected, and put_page must be able to do the right thing when the * page has been finished with, no matter what it is subsequently allocated * for (because put_page is what is used here to drop an invalid speculative * reference). * * This is the interesting part of the lockless pagecache (and lockless * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page) * has the following pattern: * 1. find page in radix tree * 2. conditionally increment refcount * 3. check the page is still in pagecache (if no, goto 1) * * Remove-side that cares about stability of _refcount (eg. reclaim) has the * following (with the i_pages lock held): * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg) * B. remove page from pagecache * C. free the page * * There are 2 critical interleavings that matter: * - 2 runs before A: in this case, A sees elevated refcount and bails out * - A runs before 2: in this case, 2 sees zero refcount and retries; * subsequently, B will complete and 1 will find no page, causing the * lookup to return NULL. * * It is possible that between 1 and 2, the page is removed then the exact same * page is inserted into the same position in pagecache. That's OK: the * old find_get_page using a lock could equally have run before or after * such a re-insertion, depending on order that locks are granted. * * Lookups racing against pagecache insertion isn't a big problem: either 1 * will find the page or it will not. Likewise, the old find_get_page could run * either before the insertion or afterwards, depending on timing. */ static inline int __page_cache_add_speculative(struct page *page, int count) { #ifdef CONFIG_TINY_RCU # ifdef CONFIG_PREEMPT_COUNT VM_BUG_ON(!in_atomic() && !irqs_disabled()); # endif /* * Preempt must be disabled here - we rely on rcu_read_lock doing * this for us. * * Pagecache won't be truncated from interrupt context, so if we have * found a page in the radix tree here, we have pinned its refcount by * disabling preempt, and hence no need for the "speculative get" that * SMP requires. */ VM_BUG_ON_PAGE(page_count(page) == 0, page); page_ref_add(page, count); #else if (unlikely(!page_ref_add_unless(page, count, 0))) { /* * Either the page has been freed, or will be freed. * In either case, retry here and the caller should * do the right thing (see comments above). */ return 0; } #endif VM_BUG_ON_PAGE(PageTail(page), page); return 1; } static inline int page_cache_get_speculative(struct page *page) { return __page_cache_add_speculative(page, 1); } static inline int page_cache_add_speculative(struct page *page, int count) { return __page_cache_add_speculative(page, count); } #ifdef CONFIG_NUMA extern struct page *__page_cache_alloc(gfp_t gfp); #else static inline struct page *__page_cache_alloc(gfp_t gfp) { return alloc_pages(gfp, 0); } #endif static inline struct page *page_cache_alloc(struct address_space *x) { return __page_cache_alloc(mapping_gfp_mask(x)); } static inline gfp_t readahead_gfp_mask(struct address_space *x) { return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN; } typedef int filler_t(void *, struct page *); pgoff_t page_cache_next_miss(struct address_space *mapping, pgoff_t index, unsigned long max_scan); pgoff_t page_cache_prev_miss(struct address_space *mapping, pgoff_t index, unsigned long max_scan); #define FGP_ACCESSED 0x00000001 #define FGP_LOCK 0x00000002 #define FGP_CREAT 0x00000004 #define FGP_WRITE 0x00000008 #define FGP_NOFS 0x00000010 #define FGP_NOWAIT 0x00000020 #define FGP_FOR_MMAP 0x00000040 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, int fgp_flags, gfp_t cache_gfp_mask); /** * find_get_page - find and get a page reference * @mapping: the address_space to search * @offset: the page index * * Looks up the page cache slot at @mapping & @offset. If there is a * page cache page, it is returned with an increased refcount. * * Otherwise, %NULL is returned. */ static inline struct page *find_get_page(struct address_space *mapping, pgoff_t offset) { return pagecache_get_page(mapping, offset, 0, 0); } static inline struct page *find_get_page_flags(struct address_space *mapping, pgoff_t offset, int fgp_flags) { return pagecache_get_page(mapping, offset, fgp_flags, 0); } /** * find_lock_page - locate, pin and lock a pagecache page * @mapping: the address_space to search * @offset: the page index * * Looks up the page cache slot at @mapping & @offset. If there is a * page cache page, it is returned locked and with an increased * refcount. * * Otherwise, %NULL is returned. * * find_lock_page() may sleep. */ static inline struct page *find_lock_page(struct address_space *mapping, pgoff_t offset) { return pagecache_get_page(mapping, offset, FGP_LOCK, 0); } /** * find_or_create_page - locate or add a pagecache page * @mapping: the page's address_space * @index: the page's index into the mapping * @gfp_mask: page allocation mode * * Looks up the page cache slot at @mapping & @offset. If there is a * page cache page, it is returned locked and with an increased * refcount. * * If the page is not present, a new page is allocated using @gfp_mask * and added to the page cache and the VM's LRU list. The page is * returned locked and with an increased refcount. * * On memory exhaustion, %NULL is returned. * * find_or_create_page() may sleep, even if @gfp_flags specifies an * atomic allocation! */ static inline struct page *find_or_create_page(struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) { return pagecache_get_page(mapping, offset, FGP_LOCK|FGP_ACCESSED|FGP_CREAT, gfp_mask); } /** * grab_cache_page_nowait - returns locked page at given index in given cache * @mapping: target address_space * @index: the page index * * Same as grab_cache_page(), but do not wait if the page is unavailable. * This is intended for speculative data generators, where the data can * be regenerated if the page couldn't be grabbed. This routine should * be safe to call while holding the lock for another page. * * Clear __GFP_FS when allocating the page to avoid recursion into the fs * and deadlock against the caller's locked page. */ static inline struct page *grab_cache_page_nowait(struct address_space *mapping, pgoff_t index) { return pagecache_get_page(mapping, index, FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, mapping_gfp_mask(mapping)); } static inline struct page *find_subpage(struct page *page, pgoff_t offset) { if (PageHuge(page)) return page; VM_BUG_ON_PAGE(PageTail(page), page); return page + (offset & (compound_nr(page) - 1)); } struct page *find_get_entry(struct address_space *mapping, pgoff_t offset); struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset); unsigned find_get_entries(struct address_space *mapping, pgoff_t start, unsigned int nr_entries, struct page **entries, pgoff_t *indices); unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, pgoff_t end, unsigned int nr_pages, struct page **pages); static inline unsigned find_get_pages(struct address_space *mapping, pgoff_t *start, unsigned int nr_pages, struct page **pages) { return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages, pages); } unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, unsigned int nr_pages, struct page **pages); unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, pgoff_t end, xa_mark_t tag, unsigned int nr_pages, struct page **pages); static inline unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, xa_mark_t tag, unsigned int nr_pages, struct page **pages) { return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag, nr_pages, pages); } struct page *grab_cache_page_write_begin(struct address_space *mapping, pgoff_t index, unsigned flags); /* * Returns locked page at given index in given cache, creating it if needed. */ static inline struct page *grab_cache_page(struct address_space *mapping, pgoff_t index) { return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); } extern struct page * read_cache_page(struct address_space *mapping, pgoff_t index, filler_t *filler, void *data); extern struct page * read_cache_page_gfp(struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); extern int read_cache_pages(struct address_space *mapping, struct list_head *pages, filler_t *filler, void *data); static inline struct page *read_mapping_page(struct address_space *mapping, pgoff_t index, void *data) { return read_cache_page(mapping, index, NULL, data); } /* * Get index of the page within radix-tree (but not for hugetlb pages). * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE) */ static inline pgoff_t page_to_index(struct page *page) { pgoff_t pgoff; if (likely(!PageTransTail(page))) return page->index; /* * We don't initialize ->index for tail pages: calculate based on * head page */ pgoff = compound_head(page)->index; pgoff += page - compound_head(page); return pgoff; } extern pgoff_t hugetlb_basepage_index(struct page *page); /* * Get the offset in PAGE_SIZE (even for hugetlb pages). * (TODO: hugetlb pages should have ->index in PAGE_SIZE) */ static inline pgoff_t page_to_pgoff(struct page *page) { if (unlikely(PageHuge(page))) return hugetlb_basepage_index(page); return page_to_index(page); } /* * Return byte-offset into filesystem object for page. */ static inline loff_t page_offset(struct page *page) { return ((loff_t)page->index) << PAGE_SHIFT; } static inline loff_t page_file_offset(struct page *page) { return ((loff_t)page_index(page)) << PAGE_SHIFT; } extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, unsigned long address); static inline pgoff_t linear_page_index(struct vm_area_struct *vma, unsigned long address) { pgoff_t pgoff; if (unlikely(is_vm_hugetlb_page(vma))) return linear_hugepage_index(vma, address); pgoff = (address - vma->vm_start) >> PAGE_SHIFT; pgoff += vma->vm_pgoff; return pgoff; } extern void __lock_page(struct page *page); extern int __lock_page_killable(struct page *page); extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, unsigned int flags); extern void unlock_page(struct page *page); /* * Return true if the page was successfully locked */ static inline int trylock_page(struct page *page) { page = compound_head(page); return (likely(!test_and_set_bit_lock(PG_locked, &page->flags))); } /* * lock_page may only be called if we have the page's inode pinned. */ static inline void lock_page(struct page *page) { might_sleep(); if (!trylock_page(page)) __lock_page(page); } /* * lock_page_killable is like lock_page but can be interrupted by fatal * signals. It returns 0 if it locked the page and -EINTR if it was * killed while waiting. */ static inline int lock_page_killable(struct page *page) { might_sleep(); if (!trylock_page(page)) return __lock_page_killable(page); return 0; } /* * lock_page_or_retry - Lock the page, unless this would block and the * caller indicated that it can handle a retry. * * Return value and mmap_sem implications depend on flags; see * __lock_page_or_retry(). */ static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm, unsigned int flags) { might_sleep(); return trylock_page(page) || __lock_page_or_retry(page, mm, flags); } /* * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc., * and should not be used directly. */ extern void wait_on_page_bit(struct page *page, int bit_nr); extern int wait_on_page_bit_killable(struct page *page, int bit_nr); /* * Wait for a page to be unlocked. * * This must be called with the caller "holding" the page, * ie with increased "page->count" so that the page won't * go away during the wait.. */ static inline void wait_on_page_locked(struct page *page) { if (PageLocked(page)) wait_on_page_bit(compound_head(page), PG_locked); } static inline int wait_on_page_locked_killable(struct page *page) { if (!PageLocked(page)) return 0; return wait_on_page_bit_killable(compound_head(page), PG_locked); } extern void put_and_wait_on_page_locked(struct page *page); void wait_on_page_writeback(struct page *page); extern void end_page_writeback(struct page *page); void wait_for_stable_page(struct page *page); void page_endio(struct page *page, bool is_write, int err); /* * Add an arbitrary waiter to a page's wait queue */ extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter); /* * Fault everything in given userspace address range in. */ static inline int fault_in_pages_writeable(char __user *uaddr, int size) { char __user *end = uaddr + size - 1; if (unlikely(size == 0)) return 0; if (unlikely(uaddr > end)) return -EFAULT; /* * Writing zeroes into userspace here is OK, because we know that if * the zero gets there, we'll be overwriting it. */ do { if (unlikely(__put_user(0, uaddr) != 0)) return -EFAULT; uaddr += PAGE_SIZE; } while (uaddr <= end); /* Check whether the range spilled into the next page. */ if (((unsigned long)uaddr & PAGE_MASK) == ((unsigned long)end & PAGE_MASK)) return __put_user(0, end); return 0; } static inline int fault_in_pages_readable(const char __user *uaddr, int size) { volatile char c; const char __user *end = uaddr + size - 1; if (unlikely(size == 0)) return 0; if (unlikely(uaddr > end)) return -EFAULT; do { if (unlikely(__get_user(c, uaddr) != 0)) return -EFAULT; uaddr += PAGE_SIZE; } while (uaddr <= end); /* Check whether the range spilled into the next page. */ if (((unsigned long)uaddr & PAGE_MASK) == ((unsigned long)end & PAGE_MASK)) { return __get_user(c, end); } (void)c; return 0; } int add_to_page_cache_locked(struct page *page, struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); int add_to_page_cache_lru(struct page *page, struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); extern void delete_from_page_cache(struct page *page); extern void __delete_from_page_cache(struct page *page, void *shadow); int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); void delete_from_page_cache_batch(struct address_space *mapping, struct pagevec *pvec); /* * Like add_to_page_cache_locked, but used to add newly allocated pages: * the page is new, so we can just run __SetPageLocked() against it. */ static inline int add_to_page_cache(struct page *page, struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) { int error; __SetPageLocked(page); error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); if (unlikely(error)) __ClearPageLocked(page); return error; } static inline unsigned long dir_pages(struct inode *inode) { return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; } #endif /* _LINUX_PAGEMAP_H */ pps-gpio.h 0000644 00000000613 14722070374 0006461 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * pps-gpio.h -- PPS client for GPIOs * * Copyright (C) 2011 James Nuss <jamesnuss@nanometrics.ca> */ #ifndef _PPS_GPIO_H #define _PPS_GPIO_H struct pps_gpio_platform_data { struct gpio_desc *gpio_pin; struct gpio_desc *echo_pin; bool assert_falling_edge; bool capture_clear; unsigned int echo_active_ms; }; #endif /* _PPS_GPIO_H */ scx200.h 0000644 00000003507 14722070374 0005747 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* linux/include/linux/scx200.h Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com> Defines for the National Semiconductor SCx200 Processors */ /* Interesting stuff for the National Semiconductor SCx200 CPU */ extern unsigned scx200_cb_base; #define scx200_cb_present() (scx200_cb_base!=0) /* F0 PCI Header/Bridge Configuration Registers */ #define SCx200_DOCCS_BASE 0x78 /* DOCCS Base Address Register */ #define SCx200_DOCCS_CTRL 0x7c /* DOCCS Control Register */ /* GPIO Register Block */ #define SCx200_GPIO_SIZE 0x2c /* Size of GPIO register block */ /* General Configuration Block */ #define SCx200_CB_BASE_FIXED 0x9000 /* Base fixed at 0x9000 according to errata? */ /* Watchdog Timer */ #define SCx200_WDT_OFFSET 0x00 /* offset within configuration block */ #define SCx200_WDT_SIZE 0x05 /* size */ #define SCx200_WDT_WDTO 0x00 /* Time-Out Register */ #define SCx200_WDT_WDCNFG 0x02 /* Configuration Register */ #define SCx200_WDT_WDSTS 0x04 /* Status Register */ #define SCx200_WDT_WDSTS_WDOVF (1<<0) /* Overflow bit */ /* High Resolution Timer */ #define SCx200_TIMER_OFFSET 0x08 #define SCx200_TIMER_SIZE 0x06 /* Clock Generators */ #define SCx200_CLOCKGEN_OFFSET 0x10 #define SCx200_CLOCKGEN_SIZE 0x10 /* Pin Multiplexing and Miscellaneous Configuration Registers */ #define SCx200_MISC_OFFSET 0x30 #define SCx200_MISC_SIZE 0x10 #define SCx200_PMR 0x30 /* Pin Multiplexing Register */ #define SCx200_MCR 0x34 /* Miscellaneous Configuration Register */ #define SCx200_INTSEL 0x38 /* Interrupt Selection Register */ #define SCx200_IID 0x3c /* IA On a Chip Identification Number Reg */ #define SCx200_REV 0x3d /* Revision Register */ #define SCx200_CBA 0x3e /* Configuration Base Address Register */ #define SCx200_CBA_SCRATCH 0x64 /* Configuration Base Address Scratchpad */ dma-iommu.h 0000644 00000003702 14722070374 0006612 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2014-2015 ARM Ltd. */ #ifndef __DMA_IOMMU_H #define __DMA_IOMMU_H #include <linux/errno.h> #include <linux/types.h> #ifdef CONFIG_IOMMU_DMA #include <linux/dma-mapping.h> #include <linux/iommu.h> #include <linux/msi.h> /* Domain management interface for IOMMU drivers */ int iommu_get_dma_cookie(struct iommu_domain *domain); int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base); void iommu_put_dma_cookie(struct iommu_domain *domain); /* Setup call for arch DMA mapping code */ void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size); /* The DMA API isn't _quite_ the whole story, though... */ /* * iommu_dma_prepare_msi() - Map the MSI page in the IOMMU device * * The MSI page will be stored in @desc. * * Return: 0 on success otherwise an error describing the failure. */ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr); /* Update the MSI message if required. */ void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg); void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list); #else /* CONFIG_IOMMU_DMA */ struct iommu_domain; struct msi_desc; struct msi_msg; struct device; static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size) { } static inline int iommu_get_dma_cookie(struct iommu_domain *domain) { return -ENODEV; } static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) { return -ENODEV; } static inline void iommu_put_dma_cookie(struct iommu_domain *domain) { } static inline int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) { return 0; } static inline void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg) { } static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) { } #endif /* CONFIG_IOMMU_DMA */ #endif /* __DMA_IOMMU_H */ ppp_defs.h 0000644 00000000461 14722070374 0006524 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * ppp_defs.h - PPP definitions. * * Copyright 1994-2000 Paul Mackerras. */ #ifndef _PPP_DEFS_H_ #define _PPP_DEFS_H_ #include <linux/crc-ccitt.h> #include <uapi/linux/ppp_defs.h> #define PPP_FCS(fcs, c) crc_ccitt_byte(fcs, c) #endif /* _PPP_DEFS_H_ */ firmware.h 0000644 00000005237 14722070374 0006546 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_FIRMWARE_H #define _LINUX_FIRMWARE_H #include <linux/types.h> #include <linux/compiler.h> #include <linux/gfp.h> #define FW_ACTION_NOHOTPLUG 0 #define FW_ACTION_HOTPLUG 1 struct firmware { size_t size; const u8 *data; struct page **pages; /* firmware loader private fields */ void *priv; }; struct module; struct device; struct builtin_fw { char *name; void *data; unsigned long size; }; /* We have to play tricks here much like stringify() to get the __COUNTER__ macro to be expanded as we want it */ #define __fw_concat1(x, y) x##y #define __fw_concat(x, y) __fw_concat1(x, y) #define DECLARE_BUILTIN_FIRMWARE(name, blob) \ DECLARE_BUILTIN_FIRMWARE_SIZE(name, &(blob), sizeof(blob)) #define DECLARE_BUILTIN_FIRMWARE_SIZE(name, blob, size) \ static const struct builtin_fw __fw_concat(__builtin_fw,__COUNTER__) \ __used __section(.builtin_fw) = { name, blob, size } #if defined(CONFIG_FW_LOADER) || (defined(CONFIG_FW_LOADER_MODULE) && defined(MODULE)) int request_firmware(const struct firmware **fw, const char *name, struct device *device); int firmware_request_nowarn(const struct firmware **fw, const char *name, struct device *device); int request_firmware_nowait( struct module *module, bool uevent, const char *name, struct device *device, gfp_t gfp, void *context, void (*cont)(const struct firmware *fw, void *context)); int request_firmware_direct(const struct firmware **fw, const char *name, struct device *device); int request_firmware_into_buf(const struct firmware **firmware_p, const char *name, struct device *device, void *buf, size_t size); void release_firmware(const struct firmware *fw); #else static inline int request_firmware(const struct firmware **fw, const char *name, struct device *device) { return -EINVAL; } static inline int firmware_request_nowarn(const struct firmware **fw, const char *name, struct device *device) { return -EINVAL; } static inline int request_firmware_nowait( struct module *module, bool uevent, const char *name, struct device *device, gfp_t gfp, void *context, void (*cont)(const struct firmware *fw, void *context)) { return -EINVAL; } static inline void release_firmware(const struct firmware *fw) { } static inline int request_firmware_direct(const struct firmware **fw, const char *name, struct device *device) { return -EINVAL; } static inline int request_firmware_into_buf(const struct firmware **firmware_p, const char *name, struct device *device, void *buf, size_t size) { return -EINVAL; } #endif int firmware_request_cache(struct device *device, const char *name); #endif if_ltalk.h 0000644 00000000274 14722070374 0006513 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_LTALK_H #define __LINUX_LTALK_H #include <uapi/linux/if_ltalk.h> extern struct net_device *alloc_ltalkdev(int sizeof_priv); #endif stackdepot.h 0000644 00000001033 14722070374 0007061 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * A generic stack depot implementation * * Author: Alexander Potapenko <glider@google.com> * Copyright (C) 2016 Google, Inc. * * Based on code by Dmitry Chernenkov. */ #ifndef _LINUX_STACKDEPOT_H #define _LINUX_STACKDEPOT_H typedef u32 depot_stack_handle_t; depot_stack_handle_t stack_depot_save(unsigned long *entries, unsigned int nr_entries, gfp_t gfp_flags); unsigned int stack_depot_fetch(depot_stack_handle_t handle, unsigned long **entries); #endif timekeeper_internal.h 0000644 00000012517 14722070374 0010757 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * You SHOULD NOT be including this unless you're vsyscall * handling code or timekeeping internal code! */ #ifndef _LINUX_TIMEKEEPER_INTERNAL_H #define _LINUX_TIMEKEEPER_INTERNAL_H #include <linux/clocksource.h> #include <linux/jiffies.h> #include <linux/time.h> /** * struct tk_read_base - base structure for timekeeping readout * @clock: Current clocksource used for timekeeping. * @mask: Bitmask for two's complement subtraction of non 64bit clocks * @cycle_last: @clock cycle value at last update * @mult: (NTP adjusted) multiplier for scaled math conversion * @shift: Shift value for scaled math conversion * @xtime_nsec: Shifted (fractional) nano seconds offset for readout * @base: ktime_t (nanoseconds) base time for readout * @base_real: Nanoseconds base value for clock REALTIME readout * * This struct has size 56 byte on 64 bit. Together with a seqcount it * occupies a single 64byte cache line. * * The struct is separate from struct timekeeper as it is also used * for a fast NMI safe accessors. * * @base_real is for the fast NMI safe accessor to allow reading clock * realtime from any context. */ struct tk_read_base { struct clocksource *clock; u64 mask; u64 cycle_last; u32 mult; u32 shift; u64 xtime_nsec; ktime_t base; u64 base_real; }; /** * struct timekeeper - Structure holding internal timekeeping values. * @tkr_mono: The readout base structure for CLOCK_MONOTONIC * @tkr_raw: The readout base structure for CLOCK_MONOTONIC_RAW * @xtime_sec: Current CLOCK_REALTIME time in seconds * @ktime_sec: Current CLOCK_MONOTONIC time in seconds * @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset * @offs_real: Offset clock monotonic -> clock realtime * @offs_boot: Offset clock monotonic -> clock boottime * @offs_tai: Offset clock monotonic -> clock tai * @tai_offset: The current UTC to TAI offset in seconds * @clock_was_set_seq: The sequence number of clock was set events * @cs_was_changed_seq: The sequence number of clocksource change events * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second * @raw_sec: CLOCK_MONOTONIC_RAW time in seconds * @monotonic_to_boot: CLOCK_MONOTONIC to CLOCK_BOOTTIME offset * @cycle_interval: Number of clock cycles in one NTP interval * @xtime_interval: Number of clock shifted nano seconds in one NTP * interval. * @xtime_remainder: Shifted nano seconds left over when rounding * @cycle_interval * @raw_interval: Shifted raw nano seconds accumulated per NTP interval. * @ntp_error: Difference between accumulated time and NTP time in ntp * shifted nano seconds. * @ntp_error_shift: Shift conversion between clock shifted nano seconds and * ntp shifted nano seconds. * @last_warning: Warning ratelimiter (DEBUG_TIMEKEEPING) * @underflow_seen: Underflow warning flag (DEBUG_TIMEKEEPING) * @overflow_seen: Overflow warning flag (DEBUG_TIMEKEEPING) * * Note: For timespec(64) based interfaces wall_to_monotonic is what * we need to add to xtime (or xtime corrected for sub jiffie times) * to get to monotonic time. Monotonic is pegged at zero at system * boot time, so wall_to_monotonic will be negative, however, we will * ALWAYS keep the tv_nsec part positive so we can use the usual * normalization. * * wall_to_monotonic is moved after resume from suspend for the * monotonic time not to jump. We need to add total_sleep_time to * wall_to_monotonic to get the real boot based time offset. * * wall_to_monotonic is no longer the boot time, getboottime must be * used instead. * * @monotonic_to_boottime is a timespec64 representation of @offs_boot to * accelerate the VDSO update for CLOCK_BOOTTIME. */ struct timekeeper { struct tk_read_base tkr_mono; struct tk_read_base tkr_raw; u64 xtime_sec; unsigned long ktime_sec; struct timespec64 wall_to_monotonic; ktime_t offs_real; ktime_t offs_boot; ktime_t offs_tai; s32 tai_offset; unsigned int clock_was_set_seq; u8 cs_was_changed_seq; ktime_t next_leap_ktime; u64 raw_sec; struct timespec64 monotonic_to_boot; /* The following members are for timekeeping internal use */ u64 cycle_interval; u64 xtime_interval; s64 xtime_remainder; u64 raw_interval; /* The ntp_tick_length() value currently being used. * This cached copy ensures we consistently apply the tick * length for an entire tick, as ntp_tick_length may change * mid-tick, and we don't want to apply that new value to * the tick in progress. */ u64 ntp_tick; /* Difference between accumulated time and NTP time in ntp * shifted nano seconds. */ s64 ntp_error; u32 ntp_error_shift; u32 ntp_err_mult; /* Flag used to avoid updating NTP twice with same second */ u32 skip_second_overflow; #ifdef CONFIG_DEBUG_TIMEKEEPING long last_warning; /* * These simple flag variables are managed * without locks, which is racy, but they are * ok since we don't really care about being * super precise about how many events were * seen, just that a problem was observed. */ int underflow_seen; int overflow_seen; #endif }; #ifdef CONFIG_GENERIC_TIME_VSYSCALL extern void update_vsyscall(struct timekeeper *tk); extern void update_vsyscall_tz(void); #else static inline void update_vsyscall(struct timekeeper *tk) { } static inline void update_vsyscall_tz(void) { } #endif #endif /* _LINUX_TIMEKEEPER_INTERNAL_H */ vt_buffer.h 0000644 00000002772 14722070374 0006715 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * include/linux/vt_buffer.h -- Access to VT screen buffer * * (c) 1998 Martin Mares <mj@ucw.cz> * * This is a set of macros and functions which are used in the * console driver and related code to access the screen buffer. * In most cases the console works with simple in-memory buffer, * but when handling hardware text mode consoles, we store * the foreground console directly in video memory. */ #ifndef _LINUX_VT_BUFFER_H_ #define _LINUX_VT_BUFFER_H_ #include <linux/string.h> #if IS_ENABLED(CONFIG_VGA_CONSOLE) || IS_ENABLED(CONFIG_MDA_CONSOLE) #include <asm/vga.h> #endif #ifndef VT_BUF_HAVE_RW #define scr_writew(val, addr) (*(addr) = (val)) #define scr_readw(addr) (*(addr)) #endif #ifndef VT_BUF_HAVE_MEMSETW static inline void scr_memsetw(u16 *s, u16 c, unsigned int count) { #ifdef VT_BUF_HAVE_RW count /= 2; while (count--) scr_writew(c, s++); #else memset16(s, c, count / 2); #endif } #endif #ifndef VT_BUF_HAVE_MEMCPYW static inline void scr_memcpyw(u16 *d, const u16 *s, unsigned int count) { #ifdef VT_BUF_HAVE_RW count /= 2; while (count--) scr_writew(scr_readw(s++), d++); #else memcpy(d, s, count); #endif } #endif #ifndef VT_BUF_HAVE_MEMMOVEW static inline void scr_memmovew(u16 *d, const u16 *s, unsigned int count) { #ifdef VT_BUF_HAVE_RW if (d < s) scr_memcpyw(d, s, count); else { count /= 2; d += count; s += count; while (count--) scr_writew(scr_readw(--s), --d); } #else memmove(d, s, count); #endif } #endif #endif pm_qos.h 0000644 00000023522 14722070374 0006225 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PM_QOS_H #define _LINUX_PM_QOS_H /* interface for the pm_qos_power infrastructure of the linux kernel. * * Mark Gross <mgross@linux.intel.com> */ #include <linux/plist.h> #include <linux/notifier.h> #include <linux/device.h> #include <linux/workqueue.h> enum { PM_QOS_RESERVED = 0, PM_QOS_CPU_DMA_LATENCY, /* insert new class ID */ PM_QOS_NUM_CLASSES, }; enum pm_qos_flags_status { PM_QOS_FLAGS_UNDEFINED = -1, PM_QOS_FLAGS_NONE, PM_QOS_FLAGS_SOME, PM_QOS_FLAGS_ALL, }; #define PM_QOS_DEFAULT_VALUE (-1) #define PM_QOS_LATENCY_ANY S32_MAX #define PM_QOS_LATENCY_ANY_NS ((s64)PM_QOS_LATENCY_ANY * NSEC_PER_USEC) #define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) #define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE PM_QOS_LATENCY_ANY #define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT PM_QOS_LATENCY_ANY #define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS PM_QOS_LATENCY_ANY_NS #define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0 #define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1) #define PM_QOS_FLAG_NO_POWER_OFF (1 << 0) struct pm_qos_request { struct plist_node node; int pm_qos_class; struct delayed_work work; /* for pm_qos_update_request_timeout */ }; struct pm_qos_flags_request { struct list_head node; s32 flags; /* Do not change to 64 bit */ }; enum dev_pm_qos_req_type { DEV_PM_QOS_RESUME_LATENCY = 1, DEV_PM_QOS_LATENCY_TOLERANCE, DEV_PM_QOS_FLAGS, }; struct dev_pm_qos_request { enum dev_pm_qos_req_type type; union { struct plist_node pnode; struct pm_qos_flags_request flr; } data; struct device *dev; }; enum pm_qos_type { PM_QOS_UNITIALIZED, PM_QOS_MAX, /* return the largest value */ PM_QOS_MIN, /* return the smallest value */ PM_QOS_SUM /* return the sum */ }; /* * Note: The lockless read path depends on the CPU accessing target_value * or effective_flags atomically. Atomic access is only guaranteed on all CPU * types linux supports for 32 bit quantites */ struct pm_qos_constraints { struct plist_head list; s32 target_value; /* Do not change to 64 bit */ s32 default_value; s32 no_constraint_value; enum pm_qos_type type; struct blocking_notifier_head *notifiers; }; struct pm_qos_flags { struct list_head list; s32 effective_flags; /* Do not change to 64 bit */ }; struct dev_pm_qos { struct pm_qos_constraints resume_latency; struct pm_qos_constraints latency_tolerance; struct pm_qos_flags flags; struct dev_pm_qos_request *resume_latency_req; struct dev_pm_qos_request *latency_tolerance_req; struct dev_pm_qos_request *flags_req; }; /* Action requested to pm_qos_update_target */ enum pm_qos_req_action { PM_QOS_ADD_REQ, /* Add a new request */ PM_QOS_UPDATE_REQ, /* Update an existing request */ PM_QOS_REMOVE_REQ /* Remove an existing request */ }; static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req) { return req->dev != NULL; } int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, enum pm_qos_req_action action, int value); bool pm_qos_update_flags(struct pm_qos_flags *pqf, struct pm_qos_flags_request *req, enum pm_qos_req_action action, s32 val); void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class, s32 value); void pm_qos_update_request(struct pm_qos_request *req, s32 new_value); void pm_qos_update_request_timeout(struct pm_qos_request *req, s32 new_value, unsigned long timeout_us); void pm_qos_remove_request(struct pm_qos_request *req); int pm_qos_request(int pm_qos_class); int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier); int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier); int pm_qos_request_active(struct pm_qos_request *req); s32 pm_qos_read_value(struct pm_qos_constraints *c); #ifdef CONFIG_PM enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask); enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask); s32 __dev_pm_qos_resume_latency(struct device *dev); s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type); int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, enum dev_pm_qos_req_type type, s32 value); int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value); int dev_pm_qos_remove_request(struct dev_pm_qos_request *req); int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier, enum dev_pm_qos_req_type type); int dev_pm_qos_remove_notifier(struct device *dev, struct notifier_block *notifier, enum dev_pm_qos_req_type type); void dev_pm_qos_constraints_init(struct device *dev); void dev_pm_qos_constraints_destroy(struct device *dev); int dev_pm_qos_add_ancestor_request(struct device *dev, struct dev_pm_qos_request *req, enum dev_pm_qos_req_type type, s32 value); int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value); void dev_pm_qos_hide_latency_limit(struct device *dev); int dev_pm_qos_expose_flags(struct device *dev, s32 value); void dev_pm_qos_hide_flags(struct device *dev); int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set); s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev); int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val); int dev_pm_qos_expose_latency_tolerance(struct device *dev); void dev_pm_qos_hide_latency_tolerance(struct device *dev); static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return dev->power.qos->resume_latency_req->data.pnode.prio; } static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return dev->power.qos->flags_req->data.flr.flags; } static inline s32 dev_pm_qos_raw_resume_latency(struct device *dev) { return IS_ERR_OR_NULL(dev->power.qos) ? PM_QOS_RESUME_LATENCY_NO_CONSTRAINT : pm_qos_read_value(&dev->power.qos->resume_latency); } #else static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask) { return PM_QOS_FLAGS_UNDEFINED; } static inline enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask) { return PM_QOS_FLAGS_UNDEFINED; } static inline s32 __dev_pm_qos_resume_latency(struct device *dev) { return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; } static inline s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type) { switch (type) { case DEV_PM_QOS_RESUME_LATENCY: return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; default: WARN_ON(1); return 0; } } static inline int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, enum dev_pm_qos_req_type type, s32 value) { return 0; } static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value) { return 0; } static inline int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) { return 0; } static inline int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier, enum dev_pm_qos_req_type type) { return 0; } static inline int dev_pm_qos_remove_notifier(struct device *dev, struct notifier_block *notifier, enum dev_pm_qos_req_type type) { return 0; } static inline void dev_pm_qos_constraints_init(struct device *dev) { dev->power.power_state = PMSG_ON; } static inline void dev_pm_qos_constraints_destroy(struct device *dev) { dev->power.power_state = PMSG_INVALID; } static inline int dev_pm_qos_add_ancestor_request(struct device *dev, struct dev_pm_qos_request *req, enum dev_pm_qos_req_type type, s32 value) { return 0; } static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) { return 0; } static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {} static inline int dev_pm_qos_expose_flags(struct device *dev, s32 value) { return 0; } static inline void dev_pm_qos_hide_flags(struct device *dev) {} static inline int dev_pm_qos_update_flags(struct device *dev, s32 m, bool set) { return 0; } static inline s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev) { return PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; } static inline int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val) { return 0; } static inline int dev_pm_qos_expose_latency_tolerance(struct device *dev) { return 0; } static inline void dev_pm_qos_hide_latency_tolerance(struct device *dev) {} static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; } static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; } static inline s32 dev_pm_qos_raw_resume_latency(struct device *dev) { return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; } #endif #define FREQ_QOS_MIN_DEFAULT_VALUE 0 #define FREQ_QOS_MAX_DEFAULT_VALUE S32_MAX enum freq_qos_req_type { FREQ_QOS_MIN = 1, FREQ_QOS_MAX, }; struct freq_constraints { struct pm_qos_constraints min_freq; struct blocking_notifier_head min_freq_notifiers; struct pm_qos_constraints max_freq; struct blocking_notifier_head max_freq_notifiers; }; struct freq_qos_request { enum freq_qos_req_type type; struct plist_node pnode; struct freq_constraints *qos; }; static inline int freq_qos_request_active(struct freq_qos_request *req) { return !IS_ERR_OR_NULL(req->qos); } void freq_constraints_init(struct freq_constraints *qos); s32 freq_qos_read_value(struct freq_constraints *qos, enum freq_qos_req_type type); int freq_qos_add_request(struct freq_constraints *qos, struct freq_qos_request *req, enum freq_qos_req_type type, s32 value); int freq_qos_update_request(struct freq_qos_request *req, s32 new_value); int freq_qos_remove_request(struct freq_qos_request *req); int freq_qos_add_notifier(struct freq_constraints *qos, enum freq_qos_req_type type, struct notifier_block *notifier); int freq_qos_remove_notifier(struct freq_constraints *qos, enum freq_qos_req_type type, struct notifier_block *notifier); #endif fcdevice.h 0000644 00000001406 14722070374 0006474 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. NET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Definitions for the Fibre Channel handlers. * * Version: @(#)fcdevice.h 1.0.0 09/26/98 * * Authors: Vineet Abraham <vma@iol.unh.edu> * * Relocated to include/linux where it belongs by Alan Cox * <gw4pts@gw4pts.ampr.org> * * WARNING: This move may well be temporary. This file will get merged with others RSN. */ #ifndef _LINUX_FCDEVICE_H #define _LINUX_FCDEVICE_H #include <linux/if_fc.h> #ifdef __KERNEL__ struct net_device *alloc_fcdev(int sizeof_priv); #endif #endif /* _LINUX_FCDEVICE_H */ trace_clock.h 0000644 00000001233 14722070374 0007173 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_TRACE_CLOCK_H #define _LINUX_TRACE_CLOCK_H /* * 3 trace clock variants, with differing scalability/precision * tradeoffs: * * - local: CPU-local trace clock * - medium: scalable global clock with some jitter * - global: globally monotonic, serialized clock */ #include <linux/compiler.h> #include <linux/types.h> #include <asm/trace_clock.h> extern u64 notrace trace_clock_local(void); extern u64 notrace trace_clock(void); extern u64 notrace trace_clock_jiffies(void); extern u64 notrace trace_clock_global(void); extern u64 notrace trace_clock_counter(void); #endif /* _LINUX_TRACE_CLOCK_H */ isapnp.h 0000644 00000006220 14722070374 0006215 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * ISA Plug & Play support * Copyright (c) by Jaroslav Kysela <perex@suse.cz> */ #ifndef LINUX_ISAPNP_H #define LINUX_ISAPNP_H #include <linux/errno.h> #include <linux/pnp.h> /* * */ #define ISAPNP_VENDOR(a,b,c) (((((a)-'A'+1)&0x3f)<<2)|\ ((((b)-'A'+1)&0x18)>>3)|((((b)-'A'+1)&7)<<13)|\ ((((c)-'A'+1)&0x1f)<<8)) #define ISAPNP_DEVICE(x) ((((x)&0xf000)>>8)|\ (((x)&0x0f00)>>8)|\ (((x)&0x00f0)<<8)|\ (((x)&0x000f)<<8)) #define ISAPNP_FUNCTION(x) ISAPNP_DEVICE(x) /* * */ #ifdef __KERNEL__ #include <linux/mod_devicetable.h> #define DEVICE_COUNT_COMPATIBLE 4 #define ISAPNP_CARD_DEVS 8 #define ISAPNP_CARD_ID(_va, _vb, _vc, _device) \ .card_vendor = ISAPNP_VENDOR(_va, _vb, _vc), .card_device = ISAPNP_DEVICE(_device) #define ISAPNP_CARD_END \ .card_vendor = 0, .card_device = 0 #define ISAPNP_DEVICE_ID(_va, _vb, _vc, _function) \ { .vendor = ISAPNP_VENDOR(_va, _vb, _vc), .function = ISAPNP_FUNCTION(_function) } struct isapnp_card_id { unsigned long driver_data; /* data private to the driver */ unsigned short card_vendor, card_device; struct { unsigned short vendor, function; } devs[ISAPNP_CARD_DEVS]; /* logical devices */ }; #define ISAPNP_DEVICE_SINGLE(_cva, _cvb, _cvc, _cdevice, _dva, _dvb, _dvc, _dfunction) \ .card_vendor = ISAPNP_VENDOR(_cva, _cvb, _cvc), .card_device = ISAPNP_DEVICE(_cdevice), \ .vendor = ISAPNP_VENDOR(_dva, _dvb, _dvc), .function = ISAPNP_FUNCTION(_dfunction) #define ISAPNP_DEVICE_SINGLE_END \ .card_vendor = 0, .card_device = 0 #if defined(CONFIG_ISAPNP) || (defined(CONFIG_ISAPNP_MODULE) && defined(MODULE)) #define __ISAPNP__ /* lowlevel configuration */ int isapnp_present(void); int isapnp_cfg_begin(int csn, int device); int isapnp_cfg_end(void); unsigned char isapnp_read_byte(unsigned char idx); void isapnp_write_byte(unsigned char idx, unsigned char val); #ifdef CONFIG_PROC_FS int isapnp_proc_init(void); int isapnp_proc_done(void); #else static inline int isapnp_proc_init(void) { return 0; } static inline int isapnp_proc_done(void) { return 0; } #endif /* compat */ struct pnp_card *pnp_find_card(unsigned short vendor, unsigned short device, struct pnp_card *from); struct pnp_dev *pnp_find_dev(struct pnp_card *card, unsigned short vendor, unsigned short function, struct pnp_dev *from); #else /* !CONFIG_ISAPNP */ /* lowlevel configuration */ static inline int isapnp_present(void) { return 0; } static inline int isapnp_cfg_begin(int csn, int device) { return -ENODEV; } static inline int isapnp_cfg_end(void) { return -ENODEV; } static inline unsigned char isapnp_read_byte(unsigned char idx) { return 0xff; } static inline void isapnp_write_byte(unsigned char idx, unsigned char val) { ; } static inline struct pnp_card *pnp_find_card(unsigned short vendor, unsigned short device, struct pnp_card *from) { return NULL; } static inline struct pnp_dev *pnp_find_dev(struct pnp_card *card, unsigned short vendor, unsigned short function, struct pnp_dev *from) { return NULL; } #endif /* CONFIG_ISAPNP */ #endif /* __KERNEL__ */ #endif /* LINUX_ISAPNP_H */ license.h 0000644 00000000566 14722070374 0006354 0 ustar 00 #ifndef __LICENSE_H #define __LICENSE_H static inline int license_is_gpl_compatible(const char *license) { return (strcmp(license, "GPL") == 0 || strcmp(license, "GPL v2") == 0 || strcmp(license, "GPL and additional rights") == 0 || strcmp(license, "Dual BSD/GPL") == 0 || strcmp(license, "Dual MIT/GPL") == 0 || strcmp(license, "Dual MPL/GPL") == 0); } #endif hsi/hsi.h 0000644 00000026677 14722070374 0006313 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * HSI core header file. * * Copyright (C) 2010 Nokia Corporation. All rights reserved. * * Contact: Carlos Chinea <carlos.chinea@nokia.com> */ #ifndef __LINUX_HSI_H__ #define __LINUX_HSI_H__ #include <linux/device.h> #include <linux/mutex.h> #include <linux/scatterlist.h> #include <linux/list.h> #include <linux/module.h> #include <linux/notifier.h> /* HSI message ttype */ #define HSI_MSG_READ 0 #define HSI_MSG_WRITE 1 /* HSI configuration values */ enum { HSI_MODE_STREAM = 1, HSI_MODE_FRAME, }; enum { HSI_FLOW_SYNC, /* Synchronized flow */ HSI_FLOW_PIPE, /* Pipelined flow */ }; enum { HSI_ARB_RR, /* Round-robin arbitration */ HSI_ARB_PRIO, /* Channel priority arbitration */ }; #define HSI_MAX_CHANNELS 16 /* HSI message status codes */ enum { HSI_STATUS_COMPLETED, /* Message transfer is completed */ HSI_STATUS_PENDING, /* Message pending to be read/write (POLL) */ HSI_STATUS_PROCEEDING, /* Message transfer is ongoing */ HSI_STATUS_QUEUED, /* Message waiting to be served */ HSI_STATUS_ERROR, /* Error when message transfer was ongoing */ }; /* HSI port event codes */ enum { HSI_EVENT_START_RX, HSI_EVENT_STOP_RX, }; /** * struct hsi_channel - channel resource used by the hsi clients * @id: Channel number * @name: Channel name */ struct hsi_channel { unsigned int id; const char *name; }; /** * struct hsi_config - Configuration for RX/TX HSI modules * @mode: Bit transmission mode (STREAM or FRAME) * @channels: Channel resources used by the client * @num_channels: Number of channel resources * @num_hw_channels: Number of channels the transceiver is configured for [1..16] * @speed: Max bit transmission speed (Kbit/s) * @flow: RX flow type (SYNCHRONIZED or PIPELINE) * @arb_mode: Arbitration mode for TX frame (Round robin, priority) */ struct hsi_config { unsigned int mode; struct hsi_channel *channels; unsigned int num_channels; unsigned int num_hw_channels; unsigned int speed; union { unsigned int flow; /* RX only */ unsigned int arb_mode; /* TX only */ }; }; /** * struct hsi_board_info - HSI client board info * @name: Name for the HSI device * @hsi_id: HSI controller id where the client sits * @port: Port number in the controller where the client sits * @tx_cfg: HSI TX configuration * @rx_cfg: HSI RX configuration * @platform_data: Platform related data * @archdata: Architecture-dependent device data */ struct hsi_board_info { const char *name; unsigned int hsi_id; unsigned int port; struct hsi_config tx_cfg; struct hsi_config rx_cfg; void *platform_data; struct dev_archdata *archdata; }; #ifdef CONFIG_HSI_BOARDINFO extern int hsi_register_board_info(struct hsi_board_info const *info, unsigned int len); #else static inline int hsi_register_board_info(struct hsi_board_info const *info, unsigned int len) { return 0; } #endif /* CONFIG_HSI_BOARDINFO */ /** * struct hsi_client - HSI client attached to an HSI port * @device: Driver model representation of the device * @tx_cfg: HSI TX configuration * @rx_cfg: HSI RX configuration */ struct hsi_client { struct device device; struct hsi_config tx_cfg; struct hsi_config rx_cfg; /* private: */ void (*ehandler)(struct hsi_client *, unsigned long); unsigned int pclaimed:1; struct notifier_block nb; }; #define to_hsi_client(dev) container_of(dev, struct hsi_client, device) static inline void hsi_client_set_drvdata(struct hsi_client *cl, void *data) { dev_set_drvdata(&cl->device, data); } static inline void *hsi_client_drvdata(struct hsi_client *cl) { return dev_get_drvdata(&cl->device); } int hsi_register_port_event(struct hsi_client *cl, void (*handler)(struct hsi_client *, unsigned long)); int hsi_unregister_port_event(struct hsi_client *cl); /** * struct hsi_client_driver - Driver associated to an HSI client * @driver: Driver model representation of the driver */ struct hsi_client_driver { struct device_driver driver; }; #define to_hsi_client_driver(drv) container_of(drv, struct hsi_client_driver,\ driver) int hsi_register_client_driver(struct hsi_client_driver *drv); static inline void hsi_unregister_client_driver(struct hsi_client_driver *drv) { driver_unregister(&drv->driver); } /** * struct hsi_msg - HSI message descriptor * @link: Free to use by the current descriptor owner * @cl: HSI device client that issues the transfer * @sgt: Head of the scatterlist array * @context: Client context data associated to the transfer * @complete: Transfer completion callback * @destructor: Destructor to free resources when flushing * @status: Status of the transfer when completed * @actual_len: Actual length of data transferred on completion * @channel: Channel were to TX/RX the message * @ttype: Transfer type (TX if set, RX otherwise) * @break_frame: if true HSI will send/receive a break frame. Data buffers are * ignored in the request. */ struct hsi_msg { struct list_head link; struct hsi_client *cl; struct sg_table sgt; void *context; void (*complete)(struct hsi_msg *msg); void (*destructor)(struct hsi_msg *msg); int status; unsigned int actual_len; unsigned int channel; unsigned int ttype:1; unsigned int break_frame:1; }; struct hsi_msg *hsi_alloc_msg(unsigned int n_frag, gfp_t flags); void hsi_free_msg(struct hsi_msg *msg); /** * struct hsi_port - HSI port device * @device: Driver model representation of the device * @tx_cfg: Current TX path configuration * @rx_cfg: Current RX path configuration * @num: Port number * @shared: Set when port can be shared by different clients * @claimed: Reference count of clients which claimed the port * @lock: Serialize port claim * @async: Asynchronous transfer callback * @setup: Callback to set the HSI client configuration * @flush: Callback to clean the HW state and destroy all pending transfers * @start_tx: Callback to inform that a client wants to TX data * @stop_tx: Callback to inform that a client no longer wishes to TX data * @release: Callback to inform that a client no longer uses the port * @n_head: Notifier chain for signaling port events to the clients. */ struct hsi_port { struct device device; struct hsi_config tx_cfg; struct hsi_config rx_cfg; unsigned int num; unsigned int shared:1; int claimed; struct mutex lock; int (*async)(struct hsi_msg *msg); int (*setup)(struct hsi_client *cl); int (*flush)(struct hsi_client *cl); int (*start_tx)(struct hsi_client *cl); int (*stop_tx)(struct hsi_client *cl); int (*release)(struct hsi_client *cl); /* private */ struct blocking_notifier_head n_head; }; #define to_hsi_port(dev) container_of(dev, struct hsi_port, device) #define hsi_get_port(cl) to_hsi_port((cl)->device.parent) int hsi_event(struct hsi_port *port, unsigned long event); int hsi_claim_port(struct hsi_client *cl, unsigned int share); void hsi_release_port(struct hsi_client *cl); static inline int hsi_port_claimed(struct hsi_client *cl) { return cl->pclaimed; } static inline void hsi_port_set_drvdata(struct hsi_port *port, void *data) { dev_set_drvdata(&port->device, data); } static inline void *hsi_port_drvdata(struct hsi_port *port) { return dev_get_drvdata(&port->device); } /** * struct hsi_controller - HSI controller device * @device: Driver model representation of the device * @owner: Pointer to the module owning the controller * @id: HSI controller ID * @num_ports: Number of ports in the HSI controller * @port: Array of HSI ports */ struct hsi_controller { struct device device; struct module *owner; unsigned int id; unsigned int num_ports; struct hsi_port **port; }; #define to_hsi_controller(dev) container_of(dev, struct hsi_controller, device) struct hsi_controller *hsi_alloc_controller(unsigned int n_ports, gfp_t flags); void hsi_put_controller(struct hsi_controller *hsi); int hsi_register_controller(struct hsi_controller *hsi); void hsi_unregister_controller(struct hsi_controller *hsi); struct hsi_client *hsi_new_client(struct hsi_port *port, struct hsi_board_info *info); int hsi_remove_client(struct device *dev, void *data); void hsi_port_unregister_clients(struct hsi_port *port); #ifdef CONFIG_OF void hsi_add_clients_from_dt(struct hsi_port *port, struct device_node *clients); #else static inline void hsi_add_clients_from_dt(struct hsi_port *port, struct device_node *clients) { return; } #endif static inline void hsi_controller_set_drvdata(struct hsi_controller *hsi, void *data) { dev_set_drvdata(&hsi->device, data); } static inline void *hsi_controller_drvdata(struct hsi_controller *hsi) { return dev_get_drvdata(&hsi->device); } static inline struct hsi_port *hsi_find_port_num(struct hsi_controller *hsi, unsigned int num) { return (num < hsi->num_ports) ? hsi->port[num] : NULL; } /* * API for HSI clients */ int hsi_async(struct hsi_client *cl, struct hsi_msg *msg); int hsi_get_channel_id_by_name(struct hsi_client *cl, char *name); /** * hsi_id - Get HSI controller ID associated to a client * @cl: Pointer to a HSI client * * Return the controller id where the client is attached to */ static inline unsigned int hsi_id(struct hsi_client *cl) { return to_hsi_controller(cl->device.parent->parent)->id; } /** * hsi_port_id - Gets the port number a client is attached to * @cl: Pointer to HSI client * * Return the port number associated to the client */ static inline unsigned int hsi_port_id(struct hsi_client *cl) { return to_hsi_port(cl->device.parent)->num; } /** * hsi_setup - Configure the client's port * @cl: Pointer to the HSI client * * When sharing ports, clients should either relay on a single * client setup or have the same setup for all of them. * * Return -errno on failure, 0 on success */ static inline int hsi_setup(struct hsi_client *cl) { if (!hsi_port_claimed(cl)) return -EACCES; return hsi_get_port(cl)->setup(cl); } /** * hsi_flush - Flush all pending transactions on the client's port * @cl: Pointer to the HSI client * * This function will destroy all pending hsi_msg in the port and reset * the HW port so it is ready to receive and transmit from a clean state. * * Return -errno on failure, 0 on success */ static inline int hsi_flush(struct hsi_client *cl) { if (!hsi_port_claimed(cl)) return -EACCES; return hsi_get_port(cl)->flush(cl); } /** * hsi_async_read - Submit a read transfer * @cl: Pointer to the HSI client * @msg: HSI message descriptor of the transfer * * Return -errno on failure, 0 on success */ static inline int hsi_async_read(struct hsi_client *cl, struct hsi_msg *msg) { msg->ttype = HSI_MSG_READ; return hsi_async(cl, msg); } /** * hsi_async_write - Submit a write transfer * @cl: Pointer to the HSI client * @msg: HSI message descriptor of the transfer * * Return -errno on failure, 0 on success */ static inline int hsi_async_write(struct hsi_client *cl, struct hsi_msg *msg) { msg->ttype = HSI_MSG_WRITE; return hsi_async(cl, msg); } /** * hsi_start_tx - Signal the port that the client wants to start a TX * @cl: Pointer to the HSI client * * Return -errno on failure, 0 on success */ static inline int hsi_start_tx(struct hsi_client *cl) { if (!hsi_port_claimed(cl)) return -EACCES; return hsi_get_port(cl)->start_tx(cl); } /** * hsi_stop_tx - Signal the port that the client no longer wants to transmit * @cl: Pointer to the HSI client * * Return -errno on failure, 0 on success */ static inline int hsi_stop_tx(struct hsi_client *cl) { if (!hsi_port_claimed(cl)) return -EACCES; return hsi_get_port(cl)->stop_tx(cl); } #endif /* __LINUX_HSI_H__ */ hsi/ssi_protocol.h 0000644 00000001274 14722070374 0010231 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * ssip_slave.h * * SSIP slave support header file * * Copyright (C) 2010 Nokia Corporation. All rights reserved. * * Contact: Carlos Chinea <carlos.chinea@nokia.com> */ #ifndef __LINUX_SSIP_SLAVE_H__ #define __LINUX_SSIP_SLAVE_H__ #include <linux/hsi/hsi.h> static inline void ssip_slave_put_master(struct hsi_client *master) { } struct hsi_client *ssip_slave_get_master(struct hsi_client *slave); int ssip_slave_start_tx(struct hsi_client *master); int ssip_slave_stop_tx(struct hsi_client *master); void ssip_reset_event(struct hsi_client *master); int ssip_slave_running(struct hsi_client *master); #endif /* __LINUX_SSIP_SLAVE_H__ */ rhashtable.h 0000644 00000113505 14722070374 0007045 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Resizable, Scalable, Concurrent Hash Table * * Copyright (c) 2015-2016 Herbert Xu <herbert@gondor.apana.org.au> * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch> * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> * * Code partially derived from nft_hash * Rewritten with rehash code from br_multicast plus single list * pointer as suggested by Josh Triplett * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef _LINUX_RHASHTABLE_H #define _LINUX_RHASHTABLE_H #include <linux/err.h> #include <linux/errno.h> #include <linux/jhash.h> #include <linux/list_nulls.h> #include <linux/workqueue.h> #include <linux/rculist.h> #include <linux/bit_spinlock.h> #include <linux/rhashtable-types.h> /* * Objects in an rhashtable have an embedded struct rhash_head * which is linked into as hash chain from the hash table - or one * of two or more hash tables when the rhashtable is being resized. * The end of the chain is marked with a special nulls marks which has * the least significant bit set but otherwise stores the address of * the hash bucket. This allows us to be be sure we've found the end * of the right list. * The value stored in the hash bucket has BIT(0) used as a lock bit. * This bit must be atomically set before any changes are made to * the chain. To avoid dereferencing this pointer without clearing * the bit first, we use an opaque 'struct rhash_lock_head *' for the * pointer stored in the bucket. This struct needs to be defined so * that rcu_dereference() works on it, but it has no content so a * cast is needed for it to be useful. This ensures it isn't * used by mistake with clearing the lock bit first. */ struct rhash_lock_head {}; /* Maximum chain length before rehash * * The maximum (not average) chain length grows with the size of the hash * table, at a rate of (log N)/(log log N). * * The value of 16 is selected so that even if the hash table grew to * 2^32 you would not expect the maximum chain length to exceed it * unless we are under attack (or extremely unlucky). * * As this limit is only to detect attacks, we don't need to set it to a * lower value as you'd need the chain length to vastly exceed 16 to have * any real effect on the system. */ #define RHT_ELASTICITY 16u /** * struct bucket_table - Table of hash buckets * @size: Number of hash buckets * @nest: Number of bits of first-level nested table. * @rehash: Current bucket being rehashed * @hash_rnd: Random seed to fold into hash * @walkers: List of active walkers * @rcu: RCU structure for freeing the table * @future_tbl: Table under construction during rehashing * @ntbl: Nested table used when out of memory. * @buckets: size * hash buckets */ struct bucket_table { unsigned int size; unsigned int nest; u32 hash_rnd; struct list_head walkers; struct rcu_head rcu; struct bucket_table __rcu *future_tbl; struct lockdep_map dep_map; struct rhash_lock_head *buckets[] ____cacheline_aligned_in_smp; }; /* * NULLS_MARKER() expects a hash value with the low * bits mostly likely to be significant, and it discards * the msb. * We give it an address, in which the bottom bit is * always 0, and the msb might be significant. * So we shift the address down one bit to align with * expectations and avoid losing a significant bit. * * We never store the NULLS_MARKER in the hash table * itself as we need the lsb for locking. * Instead we store a NULL */ #define RHT_NULLS_MARKER(ptr) \ ((void *)NULLS_MARKER(((unsigned long) (ptr)) >> 1)) #define INIT_RHT_NULLS_HEAD(ptr) \ ((ptr) = NULL) static inline bool rht_is_a_nulls(const struct rhash_head *ptr) { return ((unsigned long) ptr & 1); } static inline void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he) { return (char *)he - ht->p.head_offset; } static inline unsigned int rht_bucket_index(const struct bucket_table *tbl, unsigned int hash) { return hash & (tbl->size - 1); } static inline unsigned int rht_key_get_hash(struct rhashtable *ht, const void *key, const struct rhashtable_params params, unsigned int hash_rnd) { unsigned int hash; /* params must be equal to ht->p if it isn't constant. */ if (!__builtin_constant_p(params.key_len)) hash = ht->p.hashfn(key, ht->key_len, hash_rnd); else if (params.key_len) { unsigned int key_len = params.key_len; if (params.hashfn) hash = params.hashfn(key, key_len, hash_rnd); else if (key_len & (sizeof(u32) - 1)) hash = jhash(key, key_len, hash_rnd); else hash = jhash2(key, key_len / sizeof(u32), hash_rnd); } else { unsigned int key_len = ht->p.key_len; if (params.hashfn) hash = params.hashfn(key, key_len, hash_rnd); else hash = jhash(key, key_len, hash_rnd); } return hash; } static inline unsigned int rht_key_hashfn( struct rhashtable *ht, const struct bucket_table *tbl, const void *key, const struct rhashtable_params params) { unsigned int hash = rht_key_get_hash(ht, key, params, tbl->hash_rnd); return rht_bucket_index(tbl, hash); } static inline unsigned int rht_head_hashfn( struct rhashtable *ht, const struct bucket_table *tbl, const struct rhash_head *he, const struct rhashtable_params params) { const char *ptr = rht_obj(ht, he); return likely(params.obj_hashfn) ? rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?: ht->p.key_len, tbl->hash_rnd)) : rht_key_hashfn(ht, tbl, ptr + params.key_offset, params); } /** * rht_grow_above_75 - returns true if nelems > 0.75 * table-size * @ht: hash table * @tbl: current table */ static inline bool rht_grow_above_75(const struct rhashtable *ht, const struct bucket_table *tbl) { /* Expand table when exceeding 75% load */ return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) && (!ht->p.max_size || tbl->size < ht->p.max_size); } /** * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size * @ht: hash table * @tbl: current table */ static inline bool rht_shrink_below_30(const struct rhashtable *ht, const struct bucket_table *tbl) { /* Shrink table beneath 30% load */ return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) && tbl->size > ht->p.min_size; } /** * rht_grow_above_100 - returns true if nelems > table-size * @ht: hash table * @tbl: current table */ static inline bool rht_grow_above_100(const struct rhashtable *ht, const struct bucket_table *tbl) { return atomic_read(&ht->nelems) > tbl->size && (!ht->p.max_size || tbl->size < ht->p.max_size); } /** * rht_grow_above_max - returns true if table is above maximum * @ht: hash table * @tbl: current table */ static inline bool rht_grow_above_max(const struct rhashtable *ht, const struct bucket_table *tbl) { return atomic_read(&ht->nelems) >= ht->max_elems; } #ifdef CONFIG_PROVE_LOCKING int lockdep_rht_mutex_is_held(struct rhashtable *ht); int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash); #else static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht) { return 1; } static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) { return 1; } #endif /* CONFIG_PROVE_LOCKING */ void *rhashtable_insert_slow(struct rhashtable *ht, const void *key, struct rhash_head *obj); void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter); void rhashtable_walk_exit(struct rhashtable_iter *iter); int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU); static inline void rhashtable_walk_start(struct rhashtable_iter *iter) { (void)rhashtable_walk_start_check(iter); } void *rhashtable_walk_next(struct rhashtable_iter *iter); void *rhashtable_walk_peek(struct rhashtable_iter *iter); void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU); void rhashtable_free_and_destroy(struct rhashtable *ht, void (*free_fn)(void *ptr, void *arg), void *arg); void rhashtable_destroy(struct rhashtable *ht); struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl, unsigned int hash); struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl, unsigned int hash); struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash); #define rht_dereference(p, ht) \ rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht)) #define rht_dereference_rcu(p, ht) \ rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht)) #define rht_dereference_bucket(p, tbl, hash) \ rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash)) #define rht_dereference_bucket_rcu(p, tbl, hash) \ rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash)) #define rht_entry(tpos, pos, member) \ ({ tpos = container_of(pos, typeof(*tpos), member); 1; }) static inline struct rhash_lock_head *const *rht_bucket( const struct bucket_table *tbl, unsigned int hash) { return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) : &tbl->buckets[hash]; } static inline struct rhash_lock_head **rht_bucket_var( struct bucket_table *tbl, unsigned int hash) { return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) : &tbl->buckets[hash]; } static inline struct rhash_lock_head **rht_bucket_insert( struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) { return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) : &tbl->buckets[hash]; } /* * We lock a bucket by setting BIT(0) in the pointer - this is always * zero in real pointers. The NULLS mark is never stored in the bucket, * rather we store NULL if the bucket is empty. * bit_spin_locks do not handle contention well, but the whole point * of the hashtable design is to achieve minimum per-bucket contention. * A nested hash table might not have a bucket pointer. In that case * we cannot get a lock. For remove and replace the bucket cannot be * interesting and doesn't need locking. * For insert we allocate the bucket if this is the last bucket_table, * and then take the lock. * Sometimes we unlock a bucket by writing a new pointer there. In that * case we don't need to unlock, but we do need to reset state such as * local_bh. For that we have rht_assign_unlock(). As rcu_assign_pointer() * provides the same release semantics that bit_spin_unlock() provides, * this is safe. * When we write to a bucket without unlocking, we use rht_assign_locked(). */ static inline void rht_lock(struct bucket_table *tbl, struct rhash_lock_head **bkt) { local_bh_disable(); bit_spin_lock(0, (unsigned long *)bkt); lock_map_acquire(&tbl->dep_map); } static inline void rht_lock_nested(struct bucket_table *tbl, struct rhash_lock_head **bucket, unsigned int subclass) { local_bh_disable(); bit_spin_lock(0, (unsigned long *)bucket); lock_acquire_exclusive(&tbl->dep_map, subclass, 0, NULL, _THIS_IP_); } static inline void rht_unlock(struct bucket_table *tbl, struct rhash_lock_head **bkt) { lock_map_release(&tbl->dep_map); bit_spin_unlock(0, (unsigned long *)bkt); local_bh_enable(); } static inline struct rhash_head *__rht_ptr( struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt) { return (struct rhash_head *) ((unsigned long)p & ~BIT(0) ?: (unsigned long)RHT_NULLS_MARKER(bkt)); } /* * Where 'bkt' is a bucket and might be locked: * rht_ptr_rcu() dereferences that pointer and clears the lock bit. * rht_ptr() dereferences in a context where the bucket is locked. * rht_ptr_exclusive() dereferences in a context where exclusive * access is guaranteed, such as when destroying the table. */ static inline struct rhash_head *rht_ptr_rcu( struct rhash_lock_head *const *p) { struct rhash_lock_head __rcu *const *bkt = (void *)p; return __rht_ptr(rcu_dereference(*bkt), bkt); } static inline struct rhash_head *rht_ptr( struct rhash_lock_head *const *p, struct bucket_table *tbl, unsigned int hash) { struct rhash_lock_head __rcu *const *bkt = (void *)p; return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt); } static inline struct rhash_head *rht_ptr_exclusive( struct rhash_lock_head *const *p) { struct rhash_lock_head __rcu *const *bkt = (void *)p; return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt); } static inline void rht_assign_locked(struct rhash_lock_head **bkt, struct rhash_head *obj) { struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt; if (rht_is_a_nulls(obj)) obj = NULL; rcu_assign_pointer(*p, (void *)((unsigned long)obj | BIT(0))); } static inline void rht_assign_unlock(struct bucket_table *tbl, struct rhash_lock_head **bkt, struct rhash_head *obj) { struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt; if (rht_is_a_nulls(obj)) obj = NULL; lock_map_release(&tbl->dep_map); rcu_assign_pointer(*p, obj); preempt_enable(); __release(bitlock); local_bh_enable(); } /** * rht_for_each_from - iterate over hash chain from given head * @pos: the &struct rhash_head to use as a loop cursor. * @head: the &struct rhash_head to start from * @tbl: the &struct bucket_table * @hash: the hash value / bucket index */ #define rht_for_each_from(pos, head, tbl, hash) \ for (pos = head; \ !rht_is_a_nulls(pos); \ pos = rht_dereference_bucket((pos)->next, tbl, hash)) /** * rht_for_each - iterate over hash chain * @pos: the &struct rhash_head to use as a loop cursor. * @tbl: the &struct bucket_table * @hash: the hash value / bucket index */ #define rht_for_each(pos, tbl, hash) \ rht_for_each_from(pos, rht_ptr(rht_bucket(tbl, hash), tbl, hash), \ tbl, hash) /** * rht_for_each_entry_from - iterate over hash chain from given head * @tpos: the type * to use as a loop cursor. * @pos: the &struct rhash_head to use as a loop cursor. * @head: the &struct rhash_head to start from * @tbl: the &struct bucket_table * @hash: the hash value / bucket index * @member: name of the &struct rhash_head within the hashable struct. */ #define rht_for_each_entry_from(tpos, pos, head, tbl, hash, member) \ for (pos = head; \ (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ pos = rht_dereference_bucket((pos)->next, tbl, hash)) /** * rht_for_each_entry - iterate over hash chain of given type * @tpos: the type * to use as a loop cursor. * @pos: the &struct rhash_head to use as a loop cursor. * @tbl: the &struct bucket_table * @hash: the hash value / bucket index * @member: name of the &struct rhash_head within the hashable struct. */ #define rht_for_each_entry(tpos, pos, tbl, hash, member) \ rht_for_each_entry_from(tpos, pos, \ rht_ptr(rht_bucket(tbl, hash), tbl, hash), \ tbl, hash, member) /** * rht_for_each_entry_safe - safely iterate over hash chain of given type * @tpos: the type * to use as a loop cursor. * @pos: the &struct rhash_head to use as a loop cursor. * @next: the &struct rhash_head to use as next in loop cursor. * @tbl: the &struct bucket_table * @hash: the hash value / bucket index * @member: name of the &struct rhash_head within the hashable struct. * * This hash chain list-traversal primitive allows for the looped code to * remove the loop cursor from the list. */ #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \ for (pos = rht_ptr(rht_bucket(tbl, hash), tbl, hash), \ next = !rht_is_a_nulls(pos) ? \ rht_dereference_bucket(pos->next, tbl, hash) : NULL; \ (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ pos = next, \ next = !rht_is_a_nulls(pos) ? \ rht_dereference_bucket(pos->next, tbl, hash) : NULL) /** * rht_for_each_rcu_from - iterate over rcu hash chain from given head * @pos: the &struct rhash_head to use as a loop cursor. * @head: the &struct rhash_head to start from * @tbl: the &struct bucket_table * @hash: the hash value / bucket index * * This hash chain list-traversal primitive may safely run concurrently with * the _rcu mutation primitives such as rhashtable_insert() as long as the * traversal is guarded by rcu_read_lock(). */ #define rht_for_each_rcu_from(pos, head, tbl, hash) \ for (({barrier(); }), \ pos = head; \ !rht_is_a_nulls(pos); \ pos = rcu_dereference_raw(pos->next)) /** * rht_for_each_rcu - iterate over rcu hash chain * @pos: the &struct rhash_head to use as a loop cursor. * @tbl: the &struct bucket_table * @hash: the hash value / bucket index * * This hash chain list-traversal primitive may safely run concurrently with * the _rcu mutation primitives such as rhashtable_insert() as long as the * traversal is guarded by rcu_read_lock(). */ #define rht_for_each_rcu(pos, tbl, hash) \ for (({barrier(); }), \ pos = rht_ptr_rcu(rht_bucket(tbl, hash)); \ !rht_is_a_nulls(pos); \ pos = rcu_dereference_raw(pos->next)) /** * rht_for_each_entry_rcu_from - iterated over rcu hash chain from given head * @tpos: the type * to use as a loop cursor. * @pos: the &struct rhash_head to use as a loop cursor. * @head: the &struct rhash_head to start from * @tbl: the &struct bucket_table * @hash: the hash value / bucket index * @member: name of the &struct rhash_head within the hashable struct. * * This hash chain list-traversal primitive may safely run concurrently with * the _rcu mutation primitives such as rhashtable_insert() as long as the * traversal is guarded by rcu_read_lock(). */ #define rht_for_each_entry_rcu_from(tpos, pos, head, tbl, hash, member) \ for (({barrier(); }), \ pos = head; \ (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ pos = rht_dereference_bucket_rcu(pos->next, tbl, hash)) /** * rht_for_each_entry_rcu - iterate over rcu hash chain of given type * @tpos: the type * to use as a loop cursor. * @pos: the &struct rhash_head to use as a loop cursor. * @tbl: the &struct bucket_table * @hash: the hash value / bucket index * @member: name of the &struct rhash_head within the hashable struct. * * This hash chain list-traversal primitive may safely run concurrently with * the _rcu mutation primitives such as rhashtable_insert() as long as the * traversal is guarded by rcu_read_lock(). */ #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \ rht_for_each_entry_rcu_from(tpos, pos, \ rht_ptr_rcu(rht_bucket(tbl, hash)), \ tbl, hash, member) /** * rhl_for_each_rcu - iterate over rcu hash table list * @pos: the &struct rlist_head to use as a loop cursor. * @list: the head of the list * * This hash chain list-traversal primitive should be used on the * list returned by rhltable_lookup. */ #define rhl_for_each_rcu(pos, list) \ for (pos = list; pos; pos = rcu_dereference_raw(pos->next)) /** * rhl_for_each_entry_rcu - iterate over rcu hash table list of given type * @tpos: the type * to use as a loop cursor. * @pos: the &struct rlist_head to use as a loop cursor. * @list: the head of the list * @member: name of the &struct rlist_head within the hashable struct. * * This hash chain list-traversal primitive should be used on the * list returned by rhltable_lookup. */ #define rhl_for_each_entry_rcu(tpos, pos, list, member) \ for (pos = list; pos && rht_entry(tpos, pos, member); \ pos = rcu_dereference_raw(pos->next)) static inline int rhashtable_compare(struct rhashtable_compare_arg *arg, const void *obj) { struct rhashtable *ht = arg->ht; const char *ptr = obj; return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len); } /* Internal function, do not use. */ static inline struct rhash_head *__rhashtable_lookup( struct rhashtable *ht, const void *key, const struct rhashtable_params params) { struct rhashtable_compare_arg arg = { .ht = ht, .key = key, }; struct rhash_lock_head *const *bkt; struct bucket_table *tbl; struct rhash_head *he; unsigned int hash; tbl = rht_dereference_rcu(ht->tbl, ht); restart: hash = rht_key_hashfn(ht, tbl, key, params); bkt = rht_bucket(tbl, hash); do { rht_for_each_rcu_from(he, rht_ptr_rcu(bkt), tbl, hash) { if (params.obj_cmpfn ? params.obj_cmpfn(&arg, rht_obj(ht, he)) : rhashtable_compare(&arg, rht_obj(ht, he))) continue; return he; } /* An object might have been moved to a different hash chain, * while we walk along it - better check and retry. */ } while (he != RHT_NULLS_MARKER(bkt)); /* Ensure we see any new tables. */ smp_rmb(); tbl = rht_dereference_rcu(tbl->future_tbl, ht); if (unlikely(tbl)) goto restart; return NULL; } /** * rhashtable_lookup - search hash table * @ht: hash table * @key: the pointer to the key * @params: hash table parameters * * Computes the hash value for the key and traverses the bucket chain looking * for a entry with an identical key. The first matching entry is returned. * * This must only be called under the RCU read lock. * * Returns the first entry on which the compare function returned true. */ static inline void *rhashtable_lookup( struct rhashtable *ht, const void *key, const struct rhashtable_params params) { struct rhash_head *he = __rhashtable_lookup(ht, key, params); return he ? rht_obj(ht, he) : NULL; } /** * rhashtable_lookup_fast - search hash table, without RCU read lock * @ht: hash table * @key: the pointer to the key * @params: hash table parameters * * Computes the hash value for the key and traverses the bucket chain looking * for a entry with an identical key. The first matching entry is returned. * * Only use this function when you have other mechanisms guaranteeing * that the object won't go away after the RCU read lock is released. * * Returns the first entry on which the compare function returned true. */ static inline void *rhashtable_lookup_fast( struct rhashtable *ht, const void *key, const struct rhashtable_params params) { void *obj; rcu_read_lock(); obj = rhashtable_lookup(ht, key, params); rcu_read_unlock(); return obj; } /** * rhltable_lookup - search hash list table * @hlt: hash table * @key: the pointer to the key * @params: hash table parameters * * Computes the hash value for the key and traverses the bucket chain looking * for a entry with an identical key. All matching entries are returned * in a list. * * This must only be called under the RCU read lock. * * Returns the list of entries that match the given key. */ static inline struct rhlist_head *rhltable_lookup( struct rhltable *hlt, const void *key, const struct rhashtable_params params) { struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params); return he ? container_of(he, struct rhlist_head, rhead) : NULL; } /* Internal function, please use rhashtable_insert_fast() instead. This * function returns the existing element already in hashes in there is a clash, * otherwise it returns an error via ERR_PTR(). */ static inline void *__rhashtable_insert_fast( struct rhashtable *ht, const void *key, struct rhash_head *obj, const struct rhashtable_params params, bool rhlist) { struct rhashtable_compare_arg arg = { .ht = ht, .key = key, }; struct rhash_lock_head **bkt; struct rhash_head __rcu **pprev; struct bucket_table *tbl; struct rhash_head *head; unsigned int hash; int elasticity; void *data; rcu_read_lock(); tbl = rht_dereference_rcu(ht->tbl, ht); hash = rht_head_hashfn(ht, tbl, obj, params); elasticity = RHT_ELASTICITY; bkt = rht_bucket_insert(ht, tbl, hash); data = ERR_PTR(-ENOMEM); if (!bkt) goto out; pprev = NULL; rht_lock(tbl, bkt); if (unlikely(rcu_access_pointer(tbl->future_tbl))) { slow_path: rht_unlock(tbl, bkt); rcu_read_unlock(); return rhashtable_insert_slow(ht, key, obj); } rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) { struct rhlist_head *plist; struct rhlist_head *list; elasticity--; if (!key || (params.obj_cmpfn ? params.obj_cmpfn(&arg, rht_obj(ht, head)) : rhashtable_compare(&arg, rht_obj(ht, head)))) { pprev = &head->next; continue; } data = rht_obj(ht, head); if (!rhlist) goto out_unlock; list = container_of(obj, struct rhlist_head, rhead); plist = container_of(head, struct rhlist_head, rhead); RCU_INIT_POINTER(list->next, plist); head = rht_dereference_bucket(head->next, tbl, hash); RCU_INIT_POINTER(list->rhead.next, head); if (pprev) { rcu_assign_pointer(*pprev, obj); rht_unlock(tbl, bkt); } else rht_assign_unlock(tbl, bkt, obj); data = NULL; goto out; } if (elasticity <= 0) goto slow_path; data = ERR_PTR(-E2BIG); if (unlikely(rht_grow_above_max(ht, tbl))) goto out_unlock; if (unlikely(rht_grow_above_100(ht, tbl))) goto slow_path; /* Inserting at head of list makes unlocking free. */ head = rht_ptr(bkt, tbl, hash); RCU_INIT_POINTER(obj->next, head); if (rhlist) { struct rhlist_head *list; list = container_of(obj, struct rhlist_head, rhead); RCU_INIT_POINTER(list->next, NULL); } atomic_inc(&ht->nelems); rht_assign_unlock(tbl, bkt, obj); if (rht_grow_above_75(ht, tbl)) schedule_work(&ht->run_work); data = NULL; out: rcu_read_unlock(); return data; out_unlock: rht_unlock(tbl, bkt); goto out; } /** * rhashtable_insert_fast - insert object into hash table * @ht: hash table * @obj: pointer to hash head inside object * @params: hash table parameters * * Will take the per bucket bitlock to protect against mutual mutations * on the same bucket. Multiple insertions may occur in parallel unless * they map to the same bucket. * * It is safe to call this function from atomic context. * * Will trigger an automatic deferred table resizing if residency in the * table grows beyond 70%. */ static inline int rhashtable_insert_fast( struct rhashtable *ht, struct rhash_head *obj, const struct rhashtable_params params) { void *ret; ret = __rhashtable_insert_fast(ht, NULL, obj, params, false); if (IS_ERR(ret)) return PTR_ERR(ret); return ret == NULL ? 0 : -EEXIST; } /** * rhltable_insert_key - insert object into hash list table * @hlt: hash list table * @key: the pointer to the key * @list: pointer to hash list head inside object * @params: hash table parameters * * Will take the per bucket bitlock to protect against mutual mutations * on the same bucket. Multiple insertions may occur in parallel unless * they map to the same bucket. * * It is safe to call this function from atomic context. * * Will trigger an automatic deferred table resizing if residency in the * table grows beyond 70%. */ static inline int rhltable_insert_key( struct rhltable *hlt, const void *key, struct rhlist_head *list, const struct rhashtable_params params) { return PTR_ERR(__rhashtable_insert_fast(&hlt->ht, key, &list->rhead, params, true)); } /** * rhltable_insert - insert object into hash list table * @hlt: hash list table * @list: pointer to hash list head inside object * @params: hash table parameters * * Will take the per bucket bitlock to protect against mutual mutations * on the same bucket. Multiple insertions may occur in parallel unless * they map to the same bucket. * * It is safe to call this function from atomic context. * * Will trigger an automatic deferred table resizing if residency in the * table grows beyond 70%. */ static inline int rhltable_insert( struct rhltable *hlt, struct rhlist_head *list, const struct rhashtable_params params) { const char *key = rht_obj(&hlt->ht, &list->rhead); key += params.key_offset; return rhltable_insert_key(hlt, key, list, params); } /** * rhashtable_lookup_insert_fast - lookup and insert object into hash table * @ht: hash table * @obj: pointer to hash head inside object * @params: hash table parameters * * This lookup function may only be used for fixed key hash table (key_len * parameter set). It will BUG() if used inappropriately. * * It is safe to call this function from atomic context. * * Will trigger an automatic deferred table resizing if residency in the * table grows beyond 70%. */ static inline int rhashtable_lookup_insert_fast( struct rhashtable *ht, struct rhash_head *obj, const struct rhashtable_params params) { const char *key = rht_obj(ht, obj); void *ret; BUG_ON(ht->p.obj_hashfn); ret = __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params, false); if (IS_ERR(ret)) return PTR_ERR(ret); return ret == NULL ? 0 : -EEXIST; } /** * rhashtable_lookup_get_insert_fast - lookup and insert object into hash table * @ht: hash table * @obj: pointer to hash head inside object * @params: hash table parameters * * Just like rhashtable_lookup_insert_fast(), but this function returns the * object if it exists, NULL if it did not and the insertion was successful, * and an ERR_PTR otherwise. */ static inline void *rhashtable_lookup_get_insert_fast( struct rhashtable *ht, struct rhash_head *obj, const struct rhashtable_params params) { const char *key = rht_obj(ht, obj); BUG_ON(ht->p.obj_hashfn); return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params, false); } /** * rhashtable_lookup_insert_key - search and insert object to hash table * with explicit key * @ht: hash table * @key: key * @obj: pointer to hash head inside object * @params: hash table parameters * * Lookups may occur in parallel with hashtable mutations and resizing. * * Will trigger an automatic deferred table resizing if residency in the * table grows beyond 70%. * * Returns zero on success. */ static inline int rhashtable_lookup_insert_key( struct rhashtable *ht, const void *key, struct rhash_head *obj, const struct rhashtable_params params) { void *ret; BUG_ON(!ht->p.obj_hashfn || !key); ret = __rhashtable_insert_fast(ht, key, obj, params, false); if (IS_ERR(ret)) return PTR_ERR(ret); return ret == NULL ? 0 : -EEXIST; } /** * rhashtable_lookup_get_insert_key - lookup and insert object into hash table * @ht: hash table * @obj: pointer to hash head inside object * @params: hash table parameters * @data: pointer to element data already in hashes * * Just like rhashtable_lookup_insert_key(), but this function returns the * object if it exists, NULL if it does not and the insertion was successful, * and an ERR_PTR otherwise. */ static inline void *rhashtable_lookup_get_insert_key( struct rhashtable *ht, const void *key, struct rhash_head *obj, const struct rhashtable_params params) { BUG_ON(!ht->p.obj_hashfn || !key); return __rhashtable_insert_fast(ht, key, obj, params, false); } /* Internal function, please use rhashtable_remove_fast() instead */ static inline int __rhashtable_remove_fast_one( struct rhashtable *ht, struct bucket_table *tbl, struct rhash_head *obj, const struct rhashtable_params params, bool rhlist) { struct rhash_lock_head **bkt; struct rhash_head __rcu **pprev; struct rhash_head *he; unsigned int hash; int err = -ENOENT; hash = rht_head_hashfn(ht, tbl, obj, params); bkt = rht_bucket_var(tbl, hash); if (!bkt) return -ENOENT; pprev = NULL; rht_lock(tbl, bkt); rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) { struct rhlist_head *list; list = container_of(he, struct rhlist_head, rhead); if (he != obj) { struct rhlist_head __rcu **lpprev; pprev = &he->next; if (!rhlist) continue; do { lpprev = &list->next; list = rht_dereference_bucket(list->next, tbl, hash); } while (list && obj != &list->rhead); if (!list) continue; list = rht_dereference_bucket(list->next, tbl, hash); RCU_INIT_POINTER(*lpprev, list); err = 0; break; } obj = rht_dereference_bucket(obj->next, tbl, hash); err = 1; if (rhlist) { list = rht_dereference_bucket(list->next, tbl, hash); if (list) { RCU_INIT_POINTER(list->rhead.next, obj); obj = &list->rhead; err = 0; } } if (pprev) { rcu_assign_pointer(*pprev, obj); rht_unlock(tbl, bkt); } else { rht_assign_unlock(tbl, bkt, obj); } goto unlocked; } rht_unlock(tbl, bkt); unlocked: if (err > 0) { atomic_dec(&ht->nelems); if (unlikely(ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))) schedule_work(&ht->run_work); err = 0; } return err; } /* Internal function, please use rhashtable_remove_fast() instead */ static inline int __rhashtable_remove_fast( struct rhashtable *ht, struct rhash_head *obj, const struct rhashtable_params params, bool rhlist) { struct bucket_table *tbl; int err; rcu_read_lock(); tbl = rht_dereference_rcu(ht->tbl, ht); /* Because we have already taken (and released) the bucket * lock in old_tbl, if we find that future_tbl is not yet * visible then that guarantees the entry to still be in * the old tbl if it exists. */ while ((err = __rhashtable_remove_fast_one(ht, tbl, obj, params, rhlist)) && (tbl = rht_dereference_rcu(tbl->future_tbl, ht))) ; rcu_read_unlock(); return err; } /** * rhashtable_remove_fast - remove object from hash table * @ht: hash table * @obj: pointer to hash head inside object * @params: hash table parameters * * Since the hash chain is single linked, the removal operation needs to * walk the bucket chain upon removal. The removal operation is thus * considerable slow if the hash table is not correctly sized. * * Will automatically shrink the table if permitted when residency drops * below 30%. * * Returns zero on success, -ENOENT if the entry could not be found. */ static inline int rhashtable_remove_fast( struct rhashtable *ht, struct rhash_head *obj, const struct rhashtable_params params) { return __rhashtable_remove_fast(ht, obj, params, false); } /** * rhltable_remove - remove object from hash list table * @hlt: hash list table * @list: pointer to hash list head inside object * @params: hash table parameters * * Since the hash chain is single linked, the removal operation needs to * walk the bucket chain upon removal. The removal operation is thus * considerable slow if the hash table is not correctly sized. * * Will automatically shrink the table if permitted when residency drops * below 30% * * Returns zero on success, -ENOENT if the entry could not be found. */ static inline int rhltable_remove( struct rhltable *hlt, struct rhlist_head *list, const struct rhashtable_params params) { return __rhashtable_remove_fast(&hlt->ht, &list->rhead, params, true); } /* Internal function, please use rhashtable_replace_fast() instead */ static inline int __rhashtable_replace_fast( struct rhashtable *ht, struct bucket_table *tbl, struct rhash_head *obj_old, struct rhash_head *obj_new, const struct rhashtable_params params) { struct rhash_lock_head **bkt; struct rhash_head __rcu **pprev; struct rhash_head *he; unsigned int hash; int err = -ENOENT; /* Minimally, the old and new objects must have same hash * (which should mean identifiers are the same). */ hash = rht_head_hashfn(ht, tbl, obj_old, params); if (hash != rht_head_hashfn(ht, tbl, obj_new, params)) return -EINVAL; bkt = rht_bucket_var(tbl, hash); if (!bkt) return -ENOENT; pprev = NULL; rht_lock(tbl, bkt); rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) { if (he != obj_old) { pprev = &he->next; continue; } rcu_assign_pointer(obj_new->next, obj_old->next); if (pprev) { rcu_assign_pointer(*pprev, obj_new); rht_unlock(tbl, bkt); } else { rht_assign_unlock(tbl, bkt, obj_new); } err = 0; goto unlocked; } rht_unlock(tbl, bkt); unlocked: return err; } /** * rhashtable_replace_fast - replace an object in hash table * @ht: hash table * @obj_old: pointer to hash head inside object being replaced * @obj_new: pointer to hash head inside object which is new * @params: hash table parameters * * Replacing an object doesn't affect the number of elements in the hash table * or bucket, so we don't need to worry about shrinking or expanding the * table here. * * Returns zero on success, -ENOENT if the entry could not be found, * -EINVAL if hash is not the same for the old and new objects. */ static inline int rhashtable_replace_fast( struct rhashtable *ht, struct rhash_head *obj_old, struct rhash_head *obj_new, const struct rhashtable_params params) { struct bucket_table *tbl; int err; rcu_read_lock(); tbl = rht_dereference_rcu(ht->tbl, ht); /* Because we have already taken (and released) the bucket * lock in old_tbl, if we find that future_tbl is not yet * visible then that guarantees the entry to still be in * the old tbl if it exists. */ while ((err = __rhashtable_replace_fast(ht, tbl, obj_old, obj_new, params)) && (tbl = rht_dereference_rcu(tbl->future_tbl, ht))) ; rcu_read_unlock(); return err; } /** * rhltable_walk_enter - Initialise an iterator * @hlt: Table to walk over * @iter: Hash table Iterator * * This function prepares a hash table walk. * * Note that if you restart a walk after rhashtable_walk_stop you * may see the same object twice. Also, you may miss objects if * there are removals in between rhashtable_walk_stop and the next * call to rhashtable_walk_start. * * For a completely stable walk you should construct your own data * structure outside the hash table. * * This function may be called from any process context, including * non-preemptable context, but cannot be called from softirq or * hardirq context. * * You must call rhashtable_walk_exit after this function returns. */ static inline void rhltable_walk_enter(struct rhltable *hlt, struct rhashtable_iter *iter) { return rhashtable_walk_enter(&hlt->ht, iter); } /** * rhltable_free_and_destroy - free elements and destroy hash list table * @hlt: the hash list table to destroy * @free_fn: callback to release resources of element * @arg: pointer passed to free_fn * * See documentation for rhashtable_free_and_destroy. */ static inline void rhltable_free_and_destroy(struct rhltable *hlt, void (*free_fn)(void *ptr, void *arg), void *arg) { return rhashtable_free_and_destroy(&hlt->ht, free_fn, arg); } static inline void rhltable_destroy(struct rhltable *hlt) { return rhltable_free_and_destroy(hlt, NULL, NULL); } #endif /* _LINUX_RHASHTABLE_H */ rculist.h 0000644 00000062651 14722070374 0006422 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_RCULIST_H #define _LINUX_RCULIST_H #ifdef __KERNEL__ /* * RCU-protected list version */ #include <linux/list.h> #include <linux/rcupdate.h> /* * Why is there no list_empty_rcu()? Because list_empty() serves this * purpose. The list_empty() function fetches the RCU-protected pointer * and compares it to the address of the list head, but neither dereferences * this pointer itself nor provides this pointer to the caller. Therefore, * it is not necessary to use rcu_dereference(), so that list_empty() can * be used anywhere you would want to use a list_empty_rcu(). */ /* * INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers * @list: list to be initialized * * You should instead use INIT_LIST_HEAD() for normal initialization and * cleanup tasks, when readers have no access to the list being initialized. * However, if the list being initialized is visible to readers, you * need to keep the compiler from being too mischievous. */ static inline void INIT_LIST_HEAD_RCU(struct list_head *list) { WRITE_ONCE(list->next, list); WRITE_ONCE(list->prev, list); } /* * return the ->next pointer of a list_head in an rcu safe * way, we must not access it directly */ #define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next))) /* * Check during list traversal that we are within an RCU reader */ #define check_arg_count_one(dummy) #ifdef CONFIG_PROVE_RCU_LIST #define __list_check_rcu(dummy, cond, extra...) \ ({ \ check_arg_count_one(extra); \ RCU_LOCKDEP_WARN(!cond && !rcu_read_lock_any_held(), \ "RCU-list traversed in non-reader section!"); \ }) #else #define __list_check_rcu(dummy, cond, extra...) \ ({ check_arg_count_one(extra); }) #endif /* * Insert a new entry between two known consecutive entries. * * This is only for internal list manipulation where we know * the prev/next entries already! */ static inline void __list_add_rcu(struct list_head *new, struct list_head *prev, struct list_head *next) { if (!__list_add_valid(new, prev, next)) return; new->next = next; new->prev = prev; rcu_assign_pointer(list_next_rcu(prev), new); next->prev = new; } /** * list_add_rcu - add a new entry to rcu-protected list * @new: new entry to be added * @head: list head to add it after * * Insert a new entry after the specified head. * This is good for implementing stacks. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as list_add_rcu() * or list_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * list_for_each_entry_rcu(). */ static inline void list_add_rcu(struct list_head *new, struct list_head *head) { __list_add_rcu(new, head, head->next); } /** * list_add_tail_rcu - add a new entry to rcu-protected list * @new: new entry to be added * @head: list head to add it before * * Insert a new entry before the specified head. * This is useful for implementing queues. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as list_add_tail_rcu() * or list_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * list_for_each_entry_rcu(). */ static inline void list_add_tail_rcu(struct list_head *new, struct list_head *head) { __list_add_rcu(new, head->prev, head); } /** * list_del_rcu - deletes entry from list without re-initialization * @entry: the element to delete from the list. * * Note: list_empty() on entry does not return true after this, * the entry is in an undefined state. It is useful for RCU based * lockfree traversal. * * In particular, it means that we can not poison the forward * pointers that may still be used for walking the list. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as list_del_rcu() * or list_add_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * list_for_each_entry_rcu(). * * Note that the caller is not permitted to immediately free * the newly deleted entry. Instead, either synchronize_rcu() * or call_rcu() must be used to defer freeing until an RCU * grace period has elapsed. */ static inline void list_del_rcu(struct list_head *entry) { __list_del_entry(entry); entry->prev = LIST_POISON2; } /** * hlist_del_init_rcu - deletes entry from hash list with re-initialization * @n: the element to delete from the hash list. * * Note: list_unhashed() on the node return true after this. It is * useful for RCU based read lockfree traversal if the writer side * must know if the list entry is still hashed or already unhashed. * * In particular, it means that we can not poison the forward pointers * that may still be used for walking the hash list and we can only * zero the pprev pointer so list_unhashed() will return true after * this. * * The caller must take whatever precautions are necessary (such as * holding appropriate locks) to avoid racing with another * list-mutation primitive, such as hlist_add_head_rcu() or * hlist_del_rcu(), running on this same list. However, it is * perfectly legal to run concurrently with the _rcu list-traversal * primitives, such as hlist_for_each_entry_rcu(). */ static inline void hlist_del_init_rcu(struct hlist_node *n) { if (!hlist_unhashed(n)) { __hlist_del(n); n->pprev = NULL; } } /** * list_replace_rcu - replace old entry by new one * @old : the element to be replaced * @new : the new element to insert * * The @old entry will be replaced with the @new entry atomically. * Note: @old should not be empty. */ static inline void list_replace_rcu(struct list_head *old, struct list_head *new) { new->next = old->next; new->prev = old->prev; rcu_assign_pointer(list_next_rcu(new->prev), new); new->next->prev = new; old->prev = LIST_POISON2; } /** * __list_splice_init_rcu - join an RCU-protected list into an existing list. * @list: the RCU-protected list to splice * @prev: points to the last element of the existing list * @next: points to the first element of the existing list * @sync: synchronize_rcu, synchronize_rcu_expedited, ... * * The list pointed to by @prev and @next can be RCU-read traversed * concurrently with this function. * * Note that this function blocks. * * Important note: the caller must take whatever action is necessary to prevent * any other updates to the existing list. In principle, it is possible to * modify the list as soon as sync() begins execution. If this sort of thing * becomes necessary, an alternative version based on call_rcu() could be * created. But only if -really- needed -- there is no shortage of RCU API * members. */ static inline void __list_splice_init_rcu(struct list_head *list, struct list_head *prev, struct list_head *next, void (*sync)(void)) { struct list_head *first = list->next; struct list_head *last = list->prev; /* * "first" and "last" tracking list, so initialize it. RCU readers * have access to this list, so we must use INIT_LIST_HEAD_RCU() * instead of INIT_LIST_HEAD(). */ INIT_LIST_HEAD_RCU(list); /* * At this point, the list body still points to the source list. * Wait for any readers to finish using the list before splicing * the list body into the new list. Any new readers will see * an empty list. */ sync(); /* * Readers are finished with the source list, so perform splice. * The order is important if the new list is global and accessible * to concurrent RCU readers. Note that RCU readers are not * permitted to traverse the prev pointers without excluding * this function. */ last->next = next; rcu_assign_pointer(list_next_rcu(prev), first); first->prev = prev; next->prev = last; } /** * list_splice_init_rcu - splice an RCU-protected list into an existing list, * designed for stacks. * @list: the RCU-protected list to splice * @head: the place in the existing list to splice the first list into * @sync: synchronize_rcu, synchronize_rcu_expedited, ... */ static inline void list_splice_init_rcu(struct list_head *list, struct list_head *head, void (*sync)(void)) { if (!list_empty(list)) __list_splice_init_rcu(list, head, head->next, sync); } /** * list_splice_tail_init_rcu - splice an RCU-protected list into an existing * list, designed for queues. * @list: the RCU-protected list to splice * @head: the place in the existing list to splice the first list into * @sync: synchronize_rcu, synchronize_rcu_expedited, ... */ static inline void list_splice_tail_init_rcu(struct list_head *list, struct list_head *head, void (*sync)(void)) { if (!list_empty(list)) __list_splice_init_rcu(list, head->prev, head, sync); } /** * list_entry_rcu - get the struct for this entry * @ptr: the &struct list_head pointer. * @type: the type of the struct this is embedded in. * @member: the name of the list_head within the struct. * * This primitive may safely run concurrently with the _rcu list-mutation * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). */ #define list_entry_rcu(ptr, type, member) \ container_of(READ_ONCE(ptr), type, member) /* * Where are list_empty_rcu() and list_first_entry_rcu()? * * Implementing those functions following their counterparts list_empty() and * list_first_entry() is not advisable because they lead to subtle race * conditions as the following snippet shows: * * if (!list_empty_rcu(mylist)) { * struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member); * do_something(bar); * } * * The list may not be empty when list_empty_rcu checks it, but it may be when * list_first_entry_rcu rereads the ->next pointer. * * Rereading the ->next pointer is not a problem for list_empty() and * list_first_entry() because they would be protected by a lock that blocks * writers. * * See list_first_or_null_rcu for an alternative. */ /** * list_first_or_null_rcu - get the first element from a list * @ptr: the list head to take the element from. * @type: the type of the struct this is embedded in. * @member: the name of the list_head within the struct. * * Note that if the list is empty, it returns NULL. * * This primitive may safely run concurrently with the _rcu list-mutation * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). */ #define list_first_or_null_rcu(ptr, type, member) \ ({ \ struct list_head *__ptr = (ptr); \ struct list_head *__next = READ_ONCE(__ptr->next); \ likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \ }) /** * list_next_or_null_rcu - get the first element from a list * @head: the head for the list. * @ptr: the list head to take the next element from. * @type: the type of the struct this is embedded in. * @member: the name of the list_head within the struct. * * Note that if the ptr is at the end of the list, NULL is returned. * * This primitive may safely run concurrently with the _rcu list-mutation * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). */ #define list_next_or_null_rcu(head, ptr, type, member) \ ({ \ struct list_head *__head = (head); \ struct list_head *__ptr = (ptr); \ struct list_head *__next = READ_ONCE(__ptr->next); \ likely(__next != __head) ? list_entry_rcu(__next, type, \ member) : NULL; \ }) /** * list_for_each_entry_rcu - iterate over rcu list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the list_head within the struct. * @cond: optional lockdep expression if called from non-RCU protection. * * This list-traversal primitive may safely run concurrently with * the _rcu list-mutation primitives such as list_add_rcu() * as long as the traversal is guarded by rcu_read_lock(). */ #define list_for_each_entry_rcu(pos, head, member, cond...) \ for (__list_check_rcu(dummy, ## cond, 0), \ pos = list_entry_rcu((head)->next, typeof(*pos), member); \ &pos->member != (head); \ pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) /** * list_entry_lockless - get the struct for this entry * @ptr: the &struct list_head pointer. * @type: the type of the struct this is embedded in. * @member: the name of the list_head within the struct. * * This primitive may safely run concurrently with the _rcu * list-mutation primitives such as list_add_rcu(), but requires some * implicit RCU read-side guarding. One example is running within a special * exception-time environment where preemption is disabled and where lockdep * cannot be invoked. Another example is when items are added to the list, * but never deleted. */ #define list_entry_lockless(ptr, type, member) \ container_of((typeof(ptr))READ_ONCE(ptr), type, member) /** * list_for_each_entry_lockless - iterate over rcu list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the list_struct within the struct. * * This primitive may safely run concurrently with the _rcu * list-mutation primitives such as list_add_rcu(), but requires some * implicit RCU read-side guarding. One example is running within a special * exception-time environment where preemption is disabled and where lockdep * cannot be invoked. Another example is when items are added to the list, * but never deleted. */ #define list_for_each_entry_lockless(pos, head, member) \ for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \ &pos->member != (head); \ pos = list_entry_lockless(pos->member.next, typeof(*pos), member)) /** * list_for_each_entry_continue_rcu - continue iteration over list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the list_head within the struct. * * Continue to iterate over list of given type, continuing after * the current position which must have been in the list when the RCU read * lock was taken. * This would typically require either that you obtained the node from a * previous walk of the list in the same RCU read-side critical section, or * that you held some sort of non-RCU reference (such as a reference count) * to keep the node alive *and* in the list. * * This iterator is similar to list_for_each_entry_from_rcu() except * this starts after the given position and that one starts at the given * position. */ #define list_for_each_entry_continue_rcu(pos, head, member) \ for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \ &pos->member != (head); \ pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) /** * list_for_each_entry_from_rcu - iterate over a list from current point * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the list_node within the struct. * * Iterate over the tail of a list starting from a given position, * which must have been in the list when the RCU read lock was taken. * This would typically require either that you obtained the node from a * previous walk of the list in the same RCU read-side critical section, or * that you held some sort of non-RCU reference (such as a reference count) * to keep the node alive *and* in the list. * * This iterator is similar to list_for_each_entry_continue_rcu() except * this starts from the given position and that one starts from the position * after the given position. */ #define list_for_each_entry_from_rcu(pos, head, member) \ for (; &(pos)->member != (head); \ pos = list_entry_rcu(pos->member.next, typeof(*(pos)), member)) /** * hlist_del_rcu - deletes entry from hash list without re-initialization * @n: the element to delete from the hash list. * * Note: list_unhashed() on entry does not return true after this, * the entry is in an undefined state. It is useful for RCU based * lockfree traversal. * * In particular, it means that we can not poison the forward * pointers that may still be used for walking the hash list. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as hlist_add_head_rcu() * or hlist_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * hlist_for_each_entry(). */ static inline void hlist_del_rcu(struct hlist_node *n) { __hlist_del(n); n->pprev = LIST_POISON2; } /** * hlist_replace_rcu - replace old entry by new one * @old : the element to be replaced * @new : the new element to insert * * The @old entry will be replaced with the @new entry atomically. */ static inline void hlist_replace_rcu(struct hlist_node *old, struct hlist_node *new) { struct hlist_node *next = old->next; new->next = next; new->pprev = old->pprev; rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new); if (next) new->next->pprev = &new->next; old->pprev = LIST_POISON2; } /* * return the first or the next element in an RCU protected hlist */ #define hlist_first_rcu(head) (*((struct hlist_node __rcu **)(&(head)->first))) #define hlist_next_rcu(node) (*((struct hlist_node __rcu **)(&(node)->next))) #define hlist_pprev_rcu(node) (*((struct hlist_node __rcu **)((node)->pprev))) /** * hlist_add_head_rcu * @n: the element to add to the hash list. * @h: the list to add to. * * Description: * Adds the specified element to the specified hlist, * while permitting racing traversals. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as hlist_add_head_rcu() * or hlist_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * hlist_for_each_entry_rcu(), used to prevent memory-consistency * problems on Alpha CPUs. Regardless of the type of CPU, the * list-traversal primitive must be guarded by rcu_read_lock(). */ static inline void hlist_add_head_rcu(struct hlist_node *n, struct hlist_head *h) { struct hlist_node *first = h->first; n->next = first; n->pprev = &h->first; rcu_assign_pointer(hlist_first_rcu(h), n); if (first) first->pprev = &n->next; } /** * hlist_add_tail_rcu * @n: the element to add to the hash list. * @h: the list to add to. * * Description: * Adds the specified element to the specified hlist, * while permitting racing traversals. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as hlist_add_head_rcu() * or hlist_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * hlist_for_each_entry_rcu(), used to prevent memory-consistency * problems on Alpha CPUs. Regardless of the type of CPU, the * list-traversal primitive must be guarded by rcu_read_lock(). */ static inline void hlist_add_tail_rcu(struct hlist_node *n, struct hlist_head *h) { struct hlist_node *i, *last = NULL; /* Note: write side code, so rcu accessors are not needed. */ for (i = h->first; i; i = i->next) last = i; if (last) { n->next = last->next; n->pprev = &last->next; rcu_assign_pointer(hlist_next_rcu(last), n); } else { hlist_add_head_rcu(n, h); } } /** * hlist_add_before_rcu * @n: the new element to add to the hash list. * @next: the existing element to add the new element before. * * Description: * Adds the specified element to the specified hlist * before the specified node while permitting racing traversals. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as hlist_add_head_rcu() * or hlist_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * hlist_for_each_entry_rcu(), used to prevent memory-consistency * problems on Alpha CPUs. */ static inline void hlist_add_before_rcu(struct hlist_node *n, struct hlist_node *next) { n->pprev = next->pprev; n->next = next; rcu_assign_pointer(hlist_pprev_rcu(n), n); next->pprev = &n->next; } /** * hlist_add_behind_rcu * @n: the new element to add to the hash list. * @prev: the existing element to add the new element after. * * Description: * Adds the specified element to the specified hlist * after the specified node while permitting racing traversals. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as hlist_add_head_rcu() * or hlist_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * hlist_for_each_entry_rcu(), used to prevent memory-consistency * problems on Alpha CPUs. */ static inline void hlist_add_behind_rcu(struct hlist_node *n, struct hlist_node *prev) { n->next = prev->next; n->pprev = &prev->next; rcu_assign_pointer(hlist_next_rcu(prev), n); if (n->next) n->next->pprev = &n->next; } #define __hlist_for_each_rcu(pos, head) \ for (pos = rcu_dereference(hlist_first_rcu(head)); \ pos; \ pos = rcu_dereference(hlist_next_rcu(pos))) /** * hlist_for_each_entry_rcu - iterate over rcu list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the hlist_node within the struct. * @cond: optional lockdep expression if called from non-RCU protection. * * This list-traversal primitive may safely run concurrently with * the _rcu list-mutation primitives such as hlist_add_head_rcu() * as long as the traversal is guarded by rcu_read_lock(). */ #define hlist_for_each_entry_rcu(pos, head, member, cond...) \ for (__list_check_rcu(dummy, ## cond, 0), \ pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\ typeof(*(pos)), member); \ pos; \ pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\ &(pos)->member)), typeof(*(pos)), member)) /** * hlist_for_each_entry_rcu_notrace - iterate over rcu list of given type (for tracing) * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the hlist_node within the struct. * * This list-traversal primitive may safely run concurrently with * the _rcu list-mutation primitives such as hlist_add_head_rcu() * as long as the traversal is guarded by rcu_read_lock(). * * This is the same as hlist_for_each_entry_rcu() except that it does * not do any RCU debugging or tracing. */ #define hlist_for_each_entry_rcu_notrace(pos, head, member) \ for (pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_first_rcu(head)),\ typeof(*(pos)), member); \ pos; \ pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_next_rcu(\ &(pos)->member)), typeof(*(pos)), member)) /** * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the hlist_node within the struct. * * This list-traversal primitive may safely run concurrently with * the _rcu list-mutation primitives such as hlist_add_head_rcu() * as long as the traversal is guarded by rcu_read_lock(). */ #define hlist_for_each_entry_rcu_bh(pos, head, member) \ for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_first_rcu(head)),\ typeof(*(pos)), member); \ pos; \ pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(\ &(pos)->member)), typeof(*(pos)), member)) /** * hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point * @pos: the type * to use as a loop cursor. * @member: the name of the hlist_node within the struct. */ #define hlist_for_each_entry_continue_rcu(pos, member) \ for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ &(pos)->member)), typeof(*(pos)), member); \ pos; \ pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ &(pos)->member)), typeof(*(pos)), member)) /** * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point * @pos: the type * to use as a loop cursor. * @member: the name of the hlist_node within the struct. */ #define hlist_for_each_entry_continue_rcu_bh(pos, member) \ for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \ &(pos)->member)), typeof(*(pos)), member); \ pos; \ pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \ &(pos)->member)), typeof(*(pos)), member)) /** * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point * @pos: the type * to use as a loop cursor. * @member: the name of the hlist_node within the struct. */ #define hlist_for_each_entry_from_rcu(pos, member) \ for (; pos; \ pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ &(pos)->member)), typeof(*(pos)), member)) #endif /* __KERNEL__ */ #endif pr.h 0000644 00000001066 14722070374 0005347 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_PR_H #define LINUX_PR_H #include <uapi/linux/pr.h> struct pr_ops { int (*pr_register)(struct block_device *bdev, u64 old_key, u64 new_key, u32 flags); int (*pr_reserve)(struct block_device *bdev, u64 key, enum pr_type type, u32 flags); int (*pr_release)(struct block_device *bdev, u64 key, enum pr_type type); int (*pr_preempt)(struct block_device *bdev, u64 old_key, u64 new_key, enum pr_type type, bool abort); int (*pr_clear)(struct block_device *bdev, u64 key); }; #endif /* LINUX_PR_H */ firewire.h 0000644 00000032626 14722070374 0006550 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_FIREWIRE_H #define _LINUX_FIREWIRE_H #include <linux/completion.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/kernel.h> #include <linux/kref.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/spinlock.h> #include <linux/sysfs.h> #include <linux/timer.h> #include <linux/types.h> #include <linux/workqueue.h> #include <linux/atomic.h> #include <asm/byteorder.h> #define CSR_REGISTER_BASE 0xfffff0000000ULL /* register offsets are relative to CSR_REGISTER_BASE */ #define CSR_STATE_CLEAR 0x0 #define CSR_STATE_SET 0x4 #define CSR_NODE_IDS 0x8 #define CSR_RESET_START 0xc #define CSR_SPLIT_TIMEOUT_HI 0x18 #define CSR_SPLIT_TIMEOUT_LO 0x1c #define CSR_CYCLE_TIME 0x200 #define CSR_BUS_TIME 0x204 #define CSR_BUSY_TIMEOUT 0x210 #define CSR_PRIORITY_BUDGET 0x218 #define CSR_BUS_MANAGER_ID 0x21c #define CSR_BANDWIDTH_AVAILABLE 0x220 #define CSR_CHANNELS_AVAILABLE 0x224 #define CSR_CHANNELS_AVAILABLE_HI 0x224 #define CSR_CHANNELS_AVAILABLE_LO 0x228 #define CSR_MAINT_UTILITY 0x230 #define CSR_BROADCAST_CHANNEL 0x234 #define CSR_CONFIG_ROM 0x400 #define CSR_CONFIG_ROM_END 0x800 #define CSR_OMPR 0x900 #define CSR_OPCR(i) (0x904 + (i) * 4) #define CSR_IMPR 0x980 #define CSR_IPCR(i) (0x984 + (i) * 4) #define CSR_FCP_COMMAND 0xB00 #define CSR_FCP_RESPONSE 0xD00 #define CSR_FCP_END 0xF00 #define CSR_TOPOLOGY_MAP 0x1000 #define CSR_TOPOLOGY_MAP_END 0x1400 #define CSR_SPEED_MAP 0x2000 #define CSR_SPEED_MAP_END 0x3000 #define CSR_OFFSET 0x40 #define CSR_LEAF 0x80 #define CSR_DIRECTORY 0xc0 #define CSR_DESCRIPTOR 0x01 #define CSR_VENDOR 0x03 #define CSR_HARDWARE_VERSION 0x04 #define CSR_UNIT 0x11 #define CSR_SPECIFIER_ID 0x12 #define CSR_VERSION 0x13 #define CSR_DEPENDENT_INFO 0x14 #define CSR_MODEL 0x17 #define CSR_DIRECTORY_ID 0x20 struct fw_csr_iterator { const u32 *p; const u32 *end; }; void fw_csr_iterator_init(struct fw_csr_iterator *ci, const u32 *p); int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value); int fw_csr_string(const u32 *directory, int key, char *buf, size_t size); extern struct bus_type fw_bus_type; struct fw_card_driver; struct fw_node; struct fw_card { const struct fw_card_driver *driver; struct device *device; struct kref kref; struct completion done; int node_id; int generation; int current_tlabel; u64 tlabel_mask; struct list_head transaction_list; u64 reset_jiffies; u32 split_timeout_hi; u32 split_timeout_lo; unsigned int split_timeout_cycles; unsigned int split_timeout_jiffies; unsigned long long guid; unsigned max_receive; int link_speed; int config_rom_generation; spinlock_t lock; /* Take this lock when handling the lists in * this struct. */ struct fw_node *local_node; struct fw_node *root_node; struct fw_node *irm_node; u8 color; /* must be u8 to match the definition in struct fw_node */ int gap_count; bool beta_repeaters_present; int index; struct list_head link; struct list_head phy_receiver_list; struct delayed_work br_work; /* bus reset job */ bool br_short; struct delayed_work bm_work; /* bus manager job */ int bm_retries; int bm_generation; int bm_node_id; bool bm_abdicate; bool priority_budget_implemented; /* controller feature */ bool broadcast_channel_auto_allocated; /* controller feature */ bool broadcast_channel_allocated; u32 broadcast_channel; __be32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4]; __be32 maint_utility_register; }; static inline struct fw_card *fw_card_get(struct fw_card *card) { kref_get(&card->kref); return card; } void fw_card_release(struct kref *kref); static inline void fw_card_put(struct fw_card *card) { kref_put(&card->kref, fw_card_release); } struct fw_attribute_group { struct attribute_group *groups[2]; struct attribute_group group; struct attribute *attrs[13]; }; enum fw_device_state { FW_DEVICE_INITIALIZING, FW_DEVICE_RUNNING, FW_DEVICE_GONE, FW_DEVICE_SHUTDOWN, }; /* * Note, fw_device.generation always has to be read before fw_device.node_id. * Use SMP memory barriers to ensure this. Otherwise requests will be sent * to an outdated node_id if the generation was updated in the meantime due * to a bus reset. * * Likewise, fw-core will take care to update .node_id before .generation so * that whenever fw_device.generation is current WRT the actual bus generation, * fw_device.node_id is guaranteed to be current too. * * The same applies to fw_device.card->node_id vs. fw_device.generation. * * fw_device.config_rom and fw_device.config_rom_length may be accessed during * the lifetime of any fw_unit belonging to the fw_device, before device_del() * was called on the last fw_unit. Alternatively, they may be accessed while * holding fw_device_rwsem. */ struct fw_device { atomic_t state; struct fw_node *node; int node_id; int generation; unsigned max_speed; struct fw_card *card; struct device device; struct mutex client_list_mutex; struct list_head client_list; const u32 *config_rom; size_t config_rom_length; int config_rom_retries; unsigned is_local:1; unsigned max_rec:4; unsigned cmc:1; unsigned irmc:1; unsigned bc_implemented:2; work_func_t workfn; struct delayed_work work; struct fw_attribute_group attribute_group; }; static inline struct fw_device *fw_device(struct device *dev) { return container_of(dev, struct fw_device, device); } static inline int fw_device_is_shutdown(struct fw_device *device) { return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN; } int fw_device_enable_phys_dma(struct fw_device *device); /* * fw_unit.directory must not be accessed after device_del(&fw_unit.device). */ struct fw_unit { struct device device; const u32 *directory; struct fw_attribute_group attribute_group; }; static inline struct fw_unit *fw_unit(struct device *dev) { return container_of(dev, struct fw_unit, device); } static inline struct fw_unit *fw_unit_get(struct fw_unit *unit) { get_device(&unit->device); return unit; } static inline void fw_unit_put(struct fw_unit *unit) { put_device(&unit->device); } static inline struct fw_device *fw_parent_device(struct fw_unit *unit) { return fw_device(unit->device.parent); } struct ieee1394_device_id; struct fw_driver { struct device_driver driver; int (*probe)(struct fw_unit *unit, const struct ieee1394_device_id *id); /* Called when the parent device sits through a bus reset. */ void (*update)(struct fw_unit *unit); void (*remove)(struct fw_unit *unit); const struct ieee1394_device_id *id_table; }; struct fw_packet; struct fw_request; typedef void (*fw_packet_callback_t)(struct fw_packet *packet, struct fw_card *card, int status); typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode, void *data, size_t length, void *callback_data); /* * This callback handles an inbound request subaction. It is called in * RCU read-side context, therefore must not sleep. * * The callback should not initiate outbound request subactions directly. * Otherwise there is a danger of recursion of inbound and outbound * transactions from and to the local node. * * The callback is responsible that either fw_send_response() or kfree() * is called on the @request, except for FCP registers for which the core * takes care of that. */ typedef void (*fw_address_callback_t)(struct fw_card *card, struct fw_request *request, int tcode, int destination, int source, int generation, unsigned long long offset, void *data, size_t length, void *callback_data); struct fw_packet { int speed; int generation; u32 header[4]; size_t header_length; void *payload; size_t payload_length; dma_addr_t payload_bus; bool payload_mapped; u32 timestamp; /* * This callback is called when the packet transmission has completed. * For successful transmission, the status code is the ack received * from the destination. Otherwise it is one of the juju-specific * rcodes: RCODE_SEND_ERROR, _CANCELLED, _BUSY, _GENERATION, _NO_ACK. * The callback can be called from tasklet context and thus * must never block. */ fw_packet_callback_t callback; int ack; struct list_head link; void *driver_data; }; struct fw_transaction { int node_id; /* The generation is implied; it is always the current. */ int tlabel; struct list_head link; struct fw_card *card; bool is_split_transaction; struct timer_list split_timeout_timer; struct fw_packet packet; /* * The data passed to the callback is valid only during the * callback. */ fw_transaction_callback_t callback; void *callback_data; }; struct fw_address_handler { u64 offset; u64 length; fw_address_callback_t address_callback; void *callback_data; struct list_head link; }; struct fw_address_region { u64 start; u64 end; }; extern const struct fw_address_region fw_high_memory_region; int fw_core_add_address_handler(struct fw_address_handler *handler, const struct fw_address_region *region); void fw_core_remove_address_handler(struct fw_address_handler *handler); void fw_send_response(struct fw_card *card, struct fw_request *request, int rcode); int fw_get_request_speed(struct fw_request *request); void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode, int destination_id, int generation, int speed, unsigned long long offset, void *payload, size_t length, fw_transaction_callback_t callback, void *callback_data); int fw_cancel_transaction(struct fw_card *card, struct fw_transaction *transaction); int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, int generation, int speed, unsigned long long offset, void *payload, size_t length); const char *fw_rcode_string(int rcode); static inline int fw_stream_packet_destination_id(int tag, int channel, int sy) { return tag << 14 | channel << 8 | sy; } void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset); struct fw_descriptor { struct list_head link; size_t length; u32 immediate; u32 key; const u32 *data; }; int fw_core_add_descriptor(struct fw_descriptor *desc); void fw_core_remove_descriptor(struct fw_descriptor *desc); /* * The iso packet format allows for an immediate header/payload part * stored in 'header' immediately after the packet info plus an * indirect payload part that is pointer to by the 'payload' field. * Applications can use one or the other or both to implement simple * low-bandwidth streaming (e.g. audio) or more advanced * scatter-gather streaming (e.g. assembling video frame automatically). */ struct fw_iso_packet { u16 payload_length; /* Length of indirect payload */ u32 interrupt:1; /* Generate interrupt on this packet */ u32 skip:1; /* tx: Set to not send packet at all */ /* rx: Sync bit, wait for matching sy */ u32 tag:2; /* tx: Tag in packet header */ u32 sy:4; /* tx: Sy in packet header */ u32 header_length:8; /* Length of immediate header */ u32 header[0]; /* tx: Top of 1394 isoch. data_block */ }; #define FW_ISO_CONTEXT_TRANSMIT 0 #define FW_ISO_CONTEXT_RECEIVE 1 #define FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL 2 #define FW_ISO_CONTEXT_MATCH_TAG0 1 #define FW_ISO_CONTEXT_MATCH_TAG1 2 #define FW_ISO_CONTEXT_MATCH_TAG2 4 #define FW_ISO_CONTEXT_MATCH_TAG3 8 #define FW_ISO_CONTEXT_MATCH_ALL_TAGS 15 /* * An iso buffer is just a set of pages mapped for DMA in the * specified direction. Since the pages are to be used for DMA, they * are not mapped into the kernel virtual address space. We store the * DMA address in the page private. The helper function * fw_iso_buffer_map() will map the pages into a given vma. */ struct fw_iso_buffer { enum dma_data_direction direction; struct page **pages; int page_count; int page_count_mapped; }; int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, int page_count, enum dma_data_direction direction); void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card); size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed); struct fw_iso_context; typedef void (*fw_iso_callback_t)(struct fw_iso_context *context, u32 cycle, size_t header_length, void *header, void *data); typedef void (*fw_iso_mc_callback_t)(struct fw_iso_context *context, dma_addr_t completed, void *data); struct fw_iso_context { struct fw_card *card; int type; int channel; int speed; bool drop_overflow_headers; size_t header_size; union { fw_iso_callback_t sc; fw_iso_mc_callback_t mc; } callback; void *callback_data; }; struct fw_iso_context *fw_iso_context_create(struct fw_card *card, int type, int channel, int speed, size_t header_size, fw_iso_callback_t callback, void *callback_data); int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels); int fw_iso_context_queue(struct fw_iso_context *ctx, struct fw_iso_packet *packet, struct fw_iso_buffer *buffer, unsigned long payload); void fw_iso_context_queue_flush(struct fw_iso_context *ctx); int fw_iso_context_flush_completions(struct fw_iso_context *ctx); int fw_iso_context_start(struct fw_iso_context *ctx, int cycle, int sync, int tags); int fw_iso_context_stop(struct fw_iso_context *ctx); void fw_iso_context_destroy(struct fw_iso_context *ctx); void fw_iso_resource_manage(struct fw_card *card, int generation, u64 channels_mask, int *channel, int *bandwidth, bool allocate); extern struct workqueue_struct *fw_workqueue; #endif /* _LINUX_FIREWIRE_H */ mnt_namespace.h 0000644 00000001151 14722070374 0007533 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NAMESPACE_H_ #define _NAMESPACE_H_ #ifdef __KERNEL__ struct mnt_namespace; struct fs_struct; struct user_namespace; struct vfsmount; extern struct mnt_namespace *copy_mnt_ns(unsigned long, struct mnt_namespace *, struct user_namespace *, struct fs_struct *); extern void put_mnt_ns(struct mnt_namespace *ns); extern int is_current_mnt_ns(struct vfsmount *mnt); extern const struct file_operations proc_mounts_operations; extern const struct file_operations proc_mountinfo_operations; extern const struct file_operations proc_mountstats_operations; #endif #endif fs_context.h 0000644 00000017470 14722070374 0007110 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* Filesystem superblock creation and reconfiguration context. * * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #ifndef _LINUX_FS_CONTEXT_H #define _LINUX_FS_CONTEXT_H #include <linux/kernel.h> #include <linux/refcount.h> #include <linux/errno.h> #include <linux/security.h> #include <linux/mutex.h> struct cred; struct dentry; struct file_operations; struct file_system_type; struct mnt_namespace; struct net; struct pid_namespace; struct super_block; struct user_namespace; struct vfsmount; struct path; enum fs_context_purpose { FS_CONTEXT_FOR_MOUNT, /* New superblock for explicit mount */ FS_CONTEXT_FOR_SUBMOUNT, /* New superblock for automatic submount */ FS_CONTEXT_FOR_RECONFIGURE, /* Superblock reconfiguration (remount) */ }; /* * Userspace usage phase for fsopen/fspick. */ enum fs_context_phase { FS_CONTEXT_CREATE_PARAMS, /* Loading params for sb creation */ FS_CONTEXT_CREATING, /* A superblock is being created */ FS_CONTEXT_AWAITING_MOUNT, /* Superblock created, awaiting fsmount() */ FS_CONTEXT_AWAITING_RECONF, /* Awaiting initialisation for reconfiguration */ FS_CONTEXT_RECONF_PARAMS, /* Loading params for reconfiguration */ FS_CONTEXT_RECONFIGURING, /* Reconfiguring the superblock */ FS_CONTEXT_FAILED, /* Failed to correctly transition a context */ }; /* * Type of parameter value. */ enum fs_value_type { fs_value_is_undefined, fs_value_is_flag, /* Value not given a value */ fs_value_is_string, /* Value is a string */ fs_value_is_blob, /* Value is a binary blob */ fs_value_is_filename, /* Value is a filename* + dirfd */ fs_value_is_filename_empty, /* Value is a filename* + dirfd + AT_EMPTY_PATH */ fs_value_is_file, /* Value is a file* */ }; /* * Configuration parameter. */ struct fs_parameter { const char *key; /* Parameter name */ enum fs_value_type type:8; /* The type of value here */ union { char *string; void *blob; struct filename *name; struct file *file; }; size_t size; int dirfd; }; /* * Filesystem context for holding the parameters used in the creation or * reconfiguration of a superblock. * * Superblock creation fills in ->root whereas reconfiguration begins with this * already set. * * See Documentation/filesystems/mount_api.txt */ struct fs_context { const struct fs_context_operations *ops; struct mutex uapi_mutex; /* Userspace access mutex */ struct file_system_type *fs_type; void *fs_private; /* The filesystem's context */ void *sget_key; struct dentry *root; /* The root and superblock */ struct user_namespace *user_ns; /* The user namespace for this mount */ struct net *net_ns; /* The network namespace for this mount */ const struct cred *cred; /* The mounter's credentials */ struct fc_log *log; /* Logging buffer */ const char *source; /* The source name (eg. dev path) */ void *security; /* Linux S&M options */ void *s_fs_info; /* Proposed s_fs_info */ unsigned int sb_flags; /* Proposed superblock flags (SB_*) */ unsigned int sb_flags_mask; /* Superblock flags that were changed */ unsigned int s_iflags; /* OR'd with sb->s_iflags */ unsigned int lsm_flags; /* Information flags from the fs to the LSM */ enum fs_context_purpose purpose:8; enum fs_context_phase phase:8; /* The phase the context is in */ bool need_free:1; /* Need to call ops->free() */ bool global:1; /* Goes into &init_user_ns */ }; struct fs_context_operations { void (*free)(struct fs_context *fc); int (*dup)(struct fs_context *fc, struct fs_context *src_fc); int (*parse_param)(struct fs_context *fc, struct fs_parameter *param); int (*parse_monolithic)(struct fs_context *fc, void *data); int (*get_tree)(struct fs_context *fc); int (*reconfigure)(struct fs_context *fc); }; /* * fs_context manipulation functions. */ extern struct fs_context *fs_context_for_mount(struct file_system_type *fs_type, unsigned int sb_flags); extern struct fs_context *fs_context_for_reconfigure(struct dentry *dentry, unsigned int sb_flags, unsigned int sb_flags_mask); extern struct fs_context *fs_context_for_submount(struct file_system_type *fs_type, struct dentry *reference); extern struct fs_context *vfs_dup_fs_context(struct fs_context *fc); extern int vfs_parse_fs_param(struct fs_context *fc, struct fs_parameter *param); extern int vfs_parse_fs_string(struct fs_context *fc, const char *key, const char *value, size_t v_size); extern int generic_parse_monolithic(struct fs_context *fc, void *data); extern int vfs_get_tree(struct fs_context *fc); extern void put_fs_context(struct fs_context *fc); extern void fc_drop_locked(struct fs_context *fc); int reconfigure_single(struct super_block *s, int flags, void *data); /* * sget() wrappers to be called from the ->get_tree() op. */ enum vfs_get_super_keying { vfs_get_single_super, /* Only one such superblock may exist */ vfs_get_single_reconf_super, /* As above, but reconfigure if it exists */ vfs_get_keyed_super, /* Superblocks with different s_fs_info keys may exist */ vfs_get_independent_super, /* Multiple independent superblocks may exist */ }; extern int vfs_get_super(struct fs_context *fc, enum vfs_get_super_keying keying, int (*fill_super)(struct super_block *sb, struct fs_context *fc)); extern int get_tree_nodev(struct fs_context *fc, int (*fill_super)(struct super_block *sb, struct fs_context *fc)); extern int get_tree_single(struct fs_context *fc, int (*fill_super)(struct super_block *sb, struct fs_context *fc)); extern int get_tree_single_reconf(struct fs_context *fc, int (*fill_super)(struct super_block *sb, struct fs_context *fc)); extern int get_tree_keyed(struct fs_context *fc, int (*fill_super)(struct super_block *sb, struct fs_context *fc), void *key); extern int get_tree_bdev(struct fs_context *fc, int (*fill_super)(struct super_block *sb, struct fs_context *fc)); extern const struct file_operations fscontext_fops; /* * Mount error, warning and informational message logging. This structure is * shareable between a mount and a subordinate mount. */ struct fc_log { refcount_t usage; u8 head; /* Insertion index in buffer[] */ u8 tail; /* Removal index in buffer[] */ u8 need_free; /* Mask of kfree'able items in buffer[] */ struct module *owner; /* Owner module for strings that don't then need freeing */ char *buffer[8]; }; extern __attribute__((format(printf, 2, 3))) void logfc(struct fs_context *fc, const char *fmt, ...); /** * infof - Store supplementary informational message * @fc: The context in which to log the informational message * @fmt: The format string * * Store the supplementary informational message for the process if the process * has enabled the facility. */ #define infof(fc, fmt, ...) ({ logfc(fc, "i "fmt, ## __VA_ARGS__); }) /** * warnf - Store supplementary warning message * @fc: The context in which to log the error message * @fmt: The format string * * Store the supplementary warning message for the process if the process has * enabled the facility. */ #define warnf(fc, fmt, ...) ({ logfc(fc, "w "fmt, ## __VA_ARGS__); }) /** * errorf - Store supplementary error message * @fc: The context in which to log the error message * @fmt: The format string * * Store the supplementary error message for the process if the process has * enabled the facility. */ #define errorf(fc, fmt, ...) ({ logfc(fc, "e "fmt, ## __VA_ARGS__); }) /** * invalf - Store supplementary invalid argument error message * @fc: The context in which to log the error message * @fmt: The format string * * Store the supplementary error message for the process if the process has * enabled the facility and return -EINVAL. */ #define invalf(fc, fmt, ...) ({ errorf(fc, fmt, ## __VA_ARGS__); -EINVAL; }) #endif /* _LINUX_FS_CONTEXT_H */ init.h 0000644 00000023062 14722070374 0005671 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_INIT_H #define _LINUX_INIT_H #include <linux/compiler.h> #include <linux/types.h> /* Built-in __init functions needn't be compiled with retpoline */ #if defined(__noretpoline) && !defined(MODULE) #define __noinitretpoline __noretpoline #else #define __noinitretpoline #endif /* These macros are used to mark some functions or * initialized data (doesn't apply to uninitialized data) * as `initialization' functions. The kernel can take this * as hint that the function is used only during the initialization * phase and free up used memory resources after * * Usage: * For functions: * * You should add __init immediately before the function name, like: * * static void __init initme(int x, int y) * { * extern int z; z = x * y; * } * * If the function has a prototype somewhere, you can also add * __init between closing brace of the prototype and semicolon: * * extern int initialize_foobar_device(int, int, int) __init; * * For initialized data: * You should insert __initdata or __initconst between the variable name * and equal sign followed by value, e.g.: * * static int init_variable __initdata = 0; * static const char linux_logo[] __initconst = { 0x32, 0x36, ... }; * * Don't forget to initialize data not at file scope, i.e. within a function, * as gcc otherwise puts the data into the bss section and not into the init * section. */ /* These are for everybody (although not all archs will actually discard it in modules) */ #define __init __section(.init.text) __cold __latent_entropy __noinitretpoline #define __initdata __section(.init.data) #define __initconst __section(.init.rodata) #define __exitdata __section(.exit.data) #define __exit_call __used __section(.exitcall.exit) /* * modpost check for section mismatches during the kernel build. * A section mismatch happens when there are references from a * code or data section to an init section (both code or data). * The init sections are (for most archs) discarded by the kernel * when early init has completed so all such references are potential bugs. * For exit sections the same issue exists. * * The following markers are used for the cases where the reference to * the *init / *exit section (code or data) is valid and will teach * modpost not to issue a warning. Intended semantics is that a code or * data tagged __ref* can reference code or data from init section without * producing a warning (of course, no warning does not mean code is * correct, so optimally document why the __ref is needed and why it's OK). * * The markers follow same syntax rules as __init / __initdata. */ #define __ref __section(.ref.text) noinline #define __refdata __section(.ref.data) #define __refconst __section(.ref.rodata) #ifdef MODULE #define __exitused #else #define __exitused __used #endif #define __exit __section(.exit.text) __exitused __cold notrace /* Used for MEMORY_HOTPLUG */ #define __meminit __section(.meminit.text) __cold notrace \ __latent_entropy #define __meminitdata __section(.meminit.data) #define __meminitconst __section(.meminit.rodata) #define __memexit __section(.memexit.text) __exitused __cold notrace #define __memexitdata __section(.memexit.data) #define __memexitconst __section(.memexit.rodata) /* For assembly routines */ #define __HEAD .section ".head.text","ax" #define __INIT .section ".init.text","ax" #define __FINIT .previous #define __INITDATA .section ".init.data","aw",%progbits #define __INITRODATA .section ".init.rodata","a",%progbits #define __FINITDATA .previous #define __MEMINIT .section ".meminit.text", "ax" #define __MEMINITDATA .section ".meminit.data", "aw" #define __MEMINITRODATA .section ".meminit.rodata", "a" /* silence warnings when references are OK */ #define __REF .section ".ref.text", "ax" #define __REFDATA .section ".ref.data", "aw" #define __REFCONST .section ".ref.rodata", "a" #ifndef __ASSEMBLY__ /* * Used for initialization calls.. */ typedef int (*initcall_t)(void); typedef void (*exitcall_t)(void); #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS typedef int initcall_entry_t; static inline initcall_t initcall_from_entry(initcall_entry_t *entry) { return offset_to_ptr(entry); } #else typedef initcall_t initcall_entry_t; static inline initcall_t initcall_from_entry(initcall_entry_t *entry) { return *entry; } #endif extern initcall_entry_t __con_initcall_start[], __con_initcall_end[]; /* Used for contructor calls. */ typedef void (*ctor_fn_t)(void); struct file_system_type; /* Defined in init/main.c */ extern int do_one_initcall(initcall_t fn); extern char __initdata boot_command_line[]; extern char *saved_command_line; extern unsigned int reset_devices; /* used by init/main.c */ void setup_arch(char **); void prepare_namespace(void); void __init init_rootfs(void); extern struct file_system_type rootfs_fs_type; #if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_STRICT_MODULE_RWX) extern bool rodata_enabled; #endif #ifdef CONFIG_STRICT_KERNEL_RWX void mark_rodata_ro(void); #endif extern void (*late_time_init)(void); extern bool initcall_debug; #endif #ifndef MODULE #ifndef __ASSEMBLY__ /* * initcalls are now grouped by functionality into separate * subsections. Ordering inside the subsections is determined * by link order. * For backwards compatibility, initcall() puts the call in * the device init subsection. * * The `id' arg to __define_initcall() is needed so that multiple initcalls * can point at the same handler without causing duplicate-symbol build errors. * * Initcalls are run by placing pointers in initcall sections that the * kernel iterates at runtime. The linker can do dead code / data elimination * and remove that completely, so the initcall sections have to be marked * as KEEP() in the linker script. */ #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS #define ___define_initcall(fn, id, __sec) \ __ADDRESSABLE(fn) \ asm(".section \"" #__sec ".init\", \"a\" \n" \ "__initcall_" #fn #id ": \n" \ ".long " #fn " - . \n" \ ".previous \n"); #else #define ___define_initcall(fn, id, __sec) \ static initcall_t __initcall_##fn##id __used \ __attribute__((__section__(#__sec ".init"))) = fn; #endif #define __define_initcall(fn, id) ___define_initcall(fn, id, .initcall##id) /* * Early initcalls run before initializing SMP. * * Only for built-in code, not modules. */ #define early_initcall(fn) __define_initcall(fn, early) /* * A "pure" initcall has no dependencies on anything else, and purely * initializes variables that couldn't be statically initialized. * * This only exists for built-in code, not for modules. * Keep main.c:initcall_level_names[] in sync. */ #define pure_initcall(fn) __define_initcall(fn, 0) #define core_initcall(fn) __define_initcall(fn, 1) #define core_initcall_sync(fn) __define_initcall(fn, 1s) #define postcore_initcall(fn) __define_initcall(fn, 2) #define postcore_initcall_sync(fn) __define_initcall(fn, 2s) #define arch_initcall(fn) __define_initcall(fn, 3) #define arch_initcall_sync(fn) __define_initcall(fn, 3s) #define subsys_initcall(fn) __define_initcall(fn, 4) #define subsys_initcall_sync(fn) __define_initcall(fn, 4s) #define fs_initcall(fn) __define_initcall(fn, 5) #define fs_initcall_sync(fn) __define_initcall(fn, 5s) #define rootfs_initcall(fn) __define_initcall(fn, rootfs) #define device_initcall(fn) __define_initcall(fn, 6) #define device_initcall_sync(fn) __define_initcall(fn, 6s) #define late_initcall(fn) __define_initcall(fn, 7) #define late_initcall_sync(fn) __define_initcall(fn, 7s) #define __initcall(fn) device_initcall(fn) #define __exitcall(fn) \ static exitcall_t __exitcall_##fn __exit_call = fn #define console_initcall(fn) ___define_initcall(fn,, .con_initcall) struct obs_kernel_param { const char *str; int (*setup_func)(char *); int early; }; /* * Only for really core code. See moduleparam.h for the normal way. * * Force the alignment so the compiler doesn't space elements of the * obs_kernel_param "array" too far apart in .init.setup. */ #define __setup_param(str, unique_id, fn, early) \ static const char __setup_str_##unique_id[] __initconst \ __aligned(1) = str; \ static struct obs_kernel_param __setup_##unique_id \ __used __section(.init.setup) \ __attribute__((aligned((sizeof(long))))) \ = { __setup_str_##unique_id, fn, early } #define __setup(str, fn) \ __setup_param(str, fn, fn, 0) /* * NOTE: fn is as per module_param, not __setup! * Emits warning if fn returns non-zero. */ #define early_param(str, fn) \ __setup_param(str, fn, fn, 1) #define early_param_on_off(str_on, str_off, var, config) \ \ int var = IS_ENABLED(config); \ \ static int __init parse_##var##_on(char *arg) \ { \ var = 1; \ return 0; \ } \ __setup_param(str_on, parse_##var##_on, parse_##var##_on, 1); \ \ static int __init parse_##var##_off(char *arg) \ { \ var = 0; \ return 0; \ } \ __setup_param(str_off, parse_##var##_off, parse_##var##_off, 1) /* Relies on boot_command_line being set */ void __init parse_early_param(void); void __init parse_early_options(char *cmdline); #endif /* __ASSEMBLY__ */ #else /* MODULE */ #define __setup_param(str, unique_id, fn) /* nothing */ #define __setup(str, func) /* nothing */ #endif /* Data marked not to be saved by software suspend */ #define __nosavedata __section(.data..nosave) #ifdef MODULE #define __exit_p(x) x #else #define __exit_p(x) NULL #endif #endif /* _LINUX_INIT_H */ bpf-cgroup.h 0000644 00000033474 14722070374 0007002 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _BPF_CGROUP_H #define _BPF_CGROUP_H #include <linux/bpf.h> #include <linux/errno.h> #include <linux/jump_label.h> #include <linux/percpu.h> #include <linux/percpu-refcount.h> #include <linux/rbtree.h> #include <uapi/linux/bpf.h> struct sock; struct sockaddr; struct cgroup; struct sk_buff; struct bpf_map; struct bpf_prog; struct bpf_sock_ops_kern; struct bpf_cgroup_storage; struct ctl_table; struct ctl_table_header; #ifdef CONFIG_CGROUP_BPF extern struct static_key_false cgroup_bpf_enabled_key; #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key) DECLARE_PER_CPU(struct bpf_cgroup_storage*, bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]); #define for_each_cgroup_storage_type(stype) \ for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++) struct bpf_cgroup_storage_map; struct bpf_storage_buffer { struct rcu_head rcu; char data[0]; }; struct bpf_cgroup_storage { union { struct bpf_storage_buffer *buf; void __percpu *percpu_buf; }; struct bpf_cgroup_storage_map *map; struct bpf_cgroup_storage_key key; struct list_head list; struct rb_node node; struct rcu_head rcu; }; struct bpf_prog_list { struct list_head node; struct bpf_prog *prog; struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]; }; struct bpf_prog_array; struct cgroup_bpf { /* array of effective progs in this cgroup */ struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE]; /* attached progs to this cgroup and attach flags * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will * have either zero or one element * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS */ struct list_head progs[MAX_BPF_ATTACH_TYPE]; u32 flags[MAX_BPF_ATTACH_TYPE]; /* temp storage for effective prog array used by prog_attach/detach */ struct bpf_prog_array *inactive; /* reference counter used to detach bpf programs after cgroup removal */ struct percpu_ref refcnt; /* cgroup_bpf is released using a work queue */ struct work_struct release_work; }; int cgroup_bpf_inherit(struct cgroup *cgrp); void cgroup_bpf_offline(struct cgroup *cgrp); int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, enum bpf_attach_type type, u32 flags); int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, enum bpf_attach_type type); int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, union bpf_attr __user *uattr); /* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */ int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, enum bpf_attach_type type, u32 flags); int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, enum bpf_attach_type type, u32 flags); int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, union bpf_attr __user *uattr); int __cgroup_bpf_run_filter_skb(struct sock *sk, struct sk_buff *skb, enum bpf_attach_type type); int __cgroup_bpf_run_filter_sk(struct sock *sk, enum bpf_attach_type type); int __cgroup_bpf_run_filter_sock_addr(struct sock *sk, struct sockaddr *uaddr, enum bpf_attach_type type, void *t_ctx); int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, struct bpf_sock_ops_kern *sock_ops, enum bpf_attach_type type); int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, short access, enum bpf_attach_type type); int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, struct ctl_table *table, int write, void __user *buf, size_t *pcount, loff_t *ppos, void **new_buf, enum bpf_attach_type type); int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level, int *optname, char __user *optval, int *optlen, char **kernel_optval); int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen, int max_optlen, int retval); static inline enum bpf_cgroup_storage_type cgroup_storage_type( struct bpf_map *map) { if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) return BPF_CGROUP_STORAGE_PERCPU; return BPF_CGROUP_STORAGE_SHARED; } static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) { enum bpf_cgroup_storage_type stype; for_each_cgroup_storage_type(stype) this_cpu_write(bpf_cgroup_storage[stype], storage[stype]); } struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog, enum bpf_cgroup_storage_type stype); void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage); void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage, struct cgroup *cgroup, enum bpf_attach_type type); void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage); int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map); void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map); int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value); int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, void *value, u64 flags); /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */ #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \ ({ \ int __ret = 0; \ if (cgroup_bpf_enabled) \ __ret = __cgroup_bpf_run_filter_skb(sk, skb, \ BPF_CGROUP_INET_INGRESS); \ \ __ret; \ }) #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \ ({ \ int __ret = 0; \ if (cgroup_bpf_enabled && sk && sk == skb->sk) { \ typeof(sk) __sk = sk_to_full_sk(sk); \ if (sk_fullsock(__sk)) \ __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \ BPF_CGROUP_INET_EGRESS); \ } \ __ret; \ }) #define BPF_CGROUP_RUN_SK_PROG(sk, type) \ ({ \ int __ret = 0; \ if (cgroup_bpf_enabled) { \ __ret = __cgroup_bpf_run_filter_sk(sk, type); \ } \ __ret; \ }) #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \ BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE) #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \ BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND) #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \ BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND) #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \ ({ \ int __ret = 0; \ if (cgroup_bpf_enabled) \ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \ NULL); \ __ret; \ }) #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \ ({ \ int __ret = 0; \ if (cgroup_bpf_enabled) { \ lock_sock(sk); \ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \ t_ctx); \ release_sock(sk); \ } \ __ret; \ }) #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) \ BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND) #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) \ BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND) #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \ sk->sk_prot->pre_connect) #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \ BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT) #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \ BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT) #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL) #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL) #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx) #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx) #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) \ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_RECVMSG, NULL) #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) \ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_RECVMSG, NULL) #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \ ({ \ int __ret = 0; \ if (cgroup_bpf_enabled && (sock_ops)->sk) { \ typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \ if (__sk && sk_fullsock(__sk)) \ __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \ sock_ops, \ BPF_CGROUP_SOCK_OPS); \ } \ __ret; \ }) #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \ ({ \ int __ret = 0; \ if (cgroup_bpf_enabled) \ __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \ access, \ BPF_CGROUP_DEVICE); \ \ __ret; \ }) #define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos, nbuf) \ ({ \ int __ret = 0; \ if (cgroup_bpf_enabled) \ __ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \ buf, count, pos, nbuf, \ BPF_CGROUP_SYSCTL); \ __ret; \ }) #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \ kernel_optval) \ ({ \ int __ret = 0; \ if (cgroup_bpf_enabled) \ __ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \ optname, optval, \ optlen, \ kernel_optval); \ __ret; \ }) #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \ ({ \ int __ret = 0; \ if (cgroup_bpf_enabled) \ get_user(__ret, optlen); \ __ret; \ }) #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen, \ max_optlen, retval) \ ({ \ int __ret = retval; \ if (cgroup_bpf_enabled) \ __ret = __cgroup_bpf_run_filter_getsockopt(sock, level, \ optname, optval, \ optlen, max_optlen, \ retval); \ __ret; \ }) int cgroup_bpf_prog_attach(const union bpf_attr *attr, enum bpf_prog_type ptype, struct bpf_prog *prog); int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); int cgroup_bpf_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr); #else struct bpf_prog; struct cgroup_bpf {}; static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; } static inline void cgroup_bpf_offline(struct cgroup *cgrp) {} static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr, enum bpf_prog_type ptype, struct bpf_prog *prog) { return -EINVAL; } static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) { return -EINVAL; } static inline int cgroup_bpf_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr) { return -EINVAL; } static inline void bpf_cgroup_storage_set( struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {} static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map) { return 0; } static inline void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map) {} static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc( struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; } static inline void bpf_cgroup_storage_free( struct bpf_cgroup_storage *storage) {} static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value) { return 0; } static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, void *value, u64 flags) { return 0; } #define cgroup_bpf_enabled (0) #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0) #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; }) #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; }) #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; }) #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; }) #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; }) #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; }) #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; }) #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos,nbuf) ({ 0; }) #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; }) #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \ optlen, max_optlen, retval) ({ retval; }) #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \ kernel_optval) ({ 0; }) #define for_each_cgroup_storage_type(stype) for (; false; ) #endif /* CONFIG_CGROUP_BPF */ #endif /* _BPF_CGROUP_H */ netfilter_bridge/ebtables.h 0000644 00000007777 14722070374 0012036 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * ebtables * * Authors: * Bart De Schuymer <bdschuym@pandora.be> * * ebtables.c,v 2.0, April, 2002 * * This code is strongly inspired by the iptables code which is * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling */ #ifndef __LINUX_BRIDGE_EFF_H #define __LINUX_BRIDGE_EFF_H #include <linux/if.h> #include <linux/if_ether.h> #include <uapi/linux/netfilter_bridge/ebtables.h> struct ebt_match { struct list_head list; const char name[EBT_FUNCTION_MAXNAMELEN]; bool (*match)(const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct xt_match *match, const void *matchinfo, int offset, unsigned int protoff, bool *hotdrop); bool (*checkentry)(const char *table, const void *entry, const struct xt_match *match, void *matchinfo, unsigned int hook_mask); void (*destroy)(const struct xt_match *match, void *matchinfo); unsigned int matchsize; u_int8_t revision; u_int8_t family; struct module *me; }; struct ebt_watcher { struct list_head list; const char name[EBT_FUNCTION_MAXNAMELEN]; unsigned int (*target)(struct sk_buff *skb, const struct net_device *in, const struct net_device *out, unsigned int hook_num, const struct xt_target *target, const void *targinfo); bool (*checkentry)(const char *table, const void *entry, const struct xt_target *target, void *targinfo, unsigned int hook_mask); void (*destroy)(const struct xt_target *target, void *targinfo); unsigned int targetsize; u_int8_t revision; u_int8_t family; struct module *me; }; struct ebt_target { struct list_head list; const char name[EBT_FUNCTION_MAXNAMELEN]; /* returns one of the standard EBT_* verdicts */ unsigned int (*target)(struct sk_buff *skb, const struct net_device *in, const struct net_device *out, unsigned int hook_num, const struct xt_target *target, const void *targinfo); bool (*checkentry)(const char *table, const void *entry, const struct xt_target *target, void *targinfo, unsigned int hook_mask); void (*destroy)(const struct xt_target *target, void *targinfo); unsigned int targetsize; u_int8_t revision; u_int8_t family; struct module *me; }; /* used for jumping from and into user defined chains (udc) */ struct ebt_chainstack { struct ebt_entries *chaininfo; /* pointer to chain data */ struct ebt_entry *e; /* pointer to entry data */ unsigned int n; /* n'th entry */ }; struct ebt_table_info { /* total size of the entries */ unsigned int entries_size; unsigned int nentries; /* pointers to the start of the chains */ struct ebt_entries *hook_entry[NF_BR_NUMHOOKS]; /* room to maintain the stack used for jumping from and into udc */ struct ebt_chainstack **chainstack; char *entries; struct ebt_counter counters[0] ____cacheline_aligned; }; struct ebt_table { struct list_head list; char name[EBT_TABLE_MAXNAMELEN]; struct ebt_replace_kernel *table; unsigned int valid_hooks; rwlock_t lock; /* the data used by the kernel */ struct ebt_table_info *private; struct module *me; }; #define EBT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) & \ ~(__alignof__(struct _xt_align)-1)) extern int ebt_register_table(struct net *net, const struct ebt_table *table, const struct nf_hook_ops *ops, struct ebt_table **res); extern void ebt_unregister_table(struct net *net, struct ebt_table *table); void ebt_unregister_table_pre_exit(struct net *net, const char *tablename, const struct nf_hook_ops *ops); extern unsigned int ebt_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct ebt_table *table); /* True if the hook mask denotes that the rule is in a base chain, * used in the check() functions */ #define BASE_CHAIN (par->hook_mask & (1 << NF_BR_NUMHOOKS)) /* Clear the bit in the hook mask that tells if the rule is on a base chain */ #define CLEAR_BASE_CHAIN_BIT (par->hook_mask &= ~(1 << NF_BR_NUMHOOKS)) static inline bool ebt_invalid_target(int target) { return (target < -NUM_STANDARD_TARGETS || target >= 0); } #endif gameport.h 0000644 00000012655 14722070374 0006552 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 1999-2002 Vojtech Pavlik */ #ifndef _GAMEPORT_H #define _GAMEPORT_H #include <asm/io.h> #include <linux/types.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/timer.h> #include <linux/slab.h> #include <uapi/linux/gameport.h> struct gameport { void *port_data; /* Private pointer for gameport drivers */ char name[32]; char phys[32]; int io; int speed; int fuzz; void (*trigger)(struct gameport *); unsigned char (*read)(struct gameport *); int (*cooked_read)(struct gameport *, int *, int *); int (*calibrate)(struct gameport *, int *, int *); int (*open)(struct gameport *, int); void (*close)(struct gameport *); struct timer_list poll_timer; unsigned int poll_interval; /* in msecs */ spinlock_t timer_lock; unsigned int poll_cnt; void (*poll_handler)(struct gameport *); struct gameport *parent, *child; struct gameport_driver *drv; struct mutex drv_mutex; /* protects serio->drv so attributes can pin driver */ struct device dev; struct list_head node; }; #define to_gameport_port(d) container_of(d, struct gameport, dev) struct gameport_driver { const char *description; int (*connect)(struct gameport *, struct gameport_driver *drv); int (*reconnect)(struct gameport *); void (*disconnect)(struct gameport *); struct device_driver driver; bool ignore; }; #define to_gameport_driver(d) container_of(d, struct gameport_driver, driver) int gameport_open(struct gameport *gameport, struct gameport_driver *drv, int mode); void gameport_close(struct gameport *gameport); #if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE)) void __gameport_register_port(struct gameport *gameport, struct module *owner); /* use a define to avoid include chaining to get THIS_MODULE */ #define gameport_register_port(gameport) \ __gameport_register_port(gameport, THIS_MODULE) void gameport_unregister_port(struct gameport *gameport); __printf(2, 3) void gameport_set_phys(struct gameport *gameport, const char *fmt, ...); #else static inline void gameport_register_port(struct gameport *gameport) { return; } static inline void gameport_unregister_port(struct gameport *gameport) { return; } static inline __printf(2, 3) void gameport_set_phys(struct gameport *gameport, const char *fmt, ...) { return; } #endif static inline struct gameport *gameport_allocate_port(void) { struct gameport *gameport = kzalloc(sizeof(struct gameport), GFP_KERNEL); return gameport; } static inline void gameport_free_port(struct gameport *gameport) { kfree(gameport); } static inline void gameport_set_name(struct gameport *gameport, const char *name) { strlcpy(gameport->name, name, sizeof(gameport->name)); } /* * Use the following functions to manipulate gameport's per-port * driver-specific data. */ static inline void *gameport_get_drvdata(struct gameport *gameport) { return dev_get_drvdata(&gameport->dev); } static inline void gameport_set_drvdata(struct gameport *gameport, void *data) { dev_set_drvdata(&gameport->dev, data); } /* * Use the following functions to pin gameport's driver in process context */ static inline int gameport_pin_driver(struct gameport *gameport) { return mutex_lock_interruptible(&gameport->drv_mutex); } static inline void gameport_unpin_driver(struct gameport *gameport) { mutex_unlock(&gameport->drv_mutex); } int __must_check __gameport_register_driver(struct gameport_driver *drv, struct module *owner, const char *mod_name); /* use a define to avoid include chaining to get THIS_MODULE & friends */ #define gameport_register_driver(drv) \ __gameport_register_driver(drv, THIS_MODULE, KBUILD_MODNAME) void gameport_unregister_driver(struct gameport_driver *drv); /** * module_gameport_driver() - Helper macro for registering a gameport driver * @__gameport_driver: gameport_driver struct * * Helper macro for gameport drivers which do not do anything special in * module init/exit. This eliminates a lot of boilerplate. Each module may * only use this macro once, and calling it replaces module_init() and * module_exit(). */ #define module_gameport_driver(__gameport_driver) \ module_driver(__gameport_driver, gameport_register_driver, \ gameport_unregister_driver) static inline void gameport_trigger(struct gameport *gameport) { if (gameport->trigger) gameport->trigger(gameport); else outb(0xff, gameport->io); } static inline unsigned char gameport_read(struct gameport *gameport) { if (gameport->read) return gameport->read(gameport); else return inb(gameport->io); } static inline int gameport_cooked_read(struct gameport *gameport, int *axes, int *buttons) { if (gameport->cooked_read) return gameport->cooked_read(gameport, axes, buttons); else return -1; } static inline int gameport_calibrate(struct gameport *gameport, int *axes, int *max) { if (gameport->calibrate) return gameport->calibrate(gameport, axes, max); else return -1; } static inline int gameport_time(struct gameport *gameport, int time) { return (time * gameport->speed) / 1000; } static inline void gameport_set_poll_handler(struct gameport *gameport, void (*handler)(struct gameport *)) { gameport->poll_handler = handler; } static inline void gameport_set_poll_interval(struct gameport *gameport, unsigned int msecs) { gameport->poll_interval = msecs; } void gameport_start_polling(struct gameport *gameport); void gameport_stop_polling(struct gameport *gameport); #endif swapops.h 0000644 00000021101 14722070374 0006412 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SWAPOPS_H #define _LINUX_SWAPOPS_H #include <linux/radix-tree.h> #include <linux/bug.h> #include <linux/mm_types.h> #ifdef CONFIG_MMU /* * swapcache pages are stored in the swapper_space radix tree. We want to * get good packing density in that tree, so the index should be dense in * the low-order bits. * * We arrange the `type' and `offset' fields so that `type' is at the seven * high-order bits of the swp_entry_t and `offset' is right-aligned in the * remaining bits. Although `type' itself needs only five bits, we allow for * shmem/tmpfs to shift it all up a further two bits: see swp_to_radix_entry(). * * swp_entry_t's are *never* stored anywhere in their arch-dependent format. */ #define SWP_TYPE_SHIFT (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT) #define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1) /* * Store a type+offset into a swp_entry_t in an arch-independent format */ static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset) { swp_entry_t ret; ret.val = (type << SWP_TYPE_SHIFT) | (offset & SWP_OFFSET_MASK); return ret; } /* * Extract the `type' field from a swp_entry_t. The swp_entry_t is in * arch-independent format */ static inline unsigned swp_type(swp_entry_t entry) { return (entry.val >> SWP_TYPE_SHIFT); } /* * Extract the `offset' field from a swp_entry_t. The swp_entry_t is in * arch-independent format */ static inline pgoff_t swp_offset(swp_entry_t entry) { return entry.val & SWP_OFFSET_MASK; } /* check whether a pte points to a swap entry */ static inline int is_swap_pte(pte_t pte) { return !pte_none(pte) && !pte_present(pte); } /* * Convert the arch-dependent pte representation of a swp_entry_t into an * arch-independent swp_entry_t. */ static inline swp_entry_t pte_to_swp_entry(pte_t pte) { swp_entry_t arch_entry; if (pte_swp_soft_dirty(pte)) pte = pte_swp_clear_soft_dirty(pte); arch_entry = __pte_to_swp_entry(pte); return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); } /* * Convert the arch-independent representation of a swp_entry_t into the * arch-dependent pte representation. */ static inline pte_t swp_entry_to_pte(swp_entry_t entry) { swp_entry_t arch_entry; arch_entry = __swp_entry(swp_type(entry), swp_offset(entry)); return __swp_entry_to_pte(arch_entry); } static inline swp_entry_t radix_to_swp_entry(void *arg) { swp_entry_t entry; entry.val = xa_to_value(arg); return entry; } static inline void *swp_to_radix_entry(swp_entry_t entry) { return xa_mk_value(entry.val); } #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) static inline swp_entry_t make_device_private_entry(struct page *page, bool write) { return swp_entry(write ? SWP_DEVICE_WRITE : SWP_DEVICE_READ, page_to_pfn(page)); } static inline bool is_device_private_entry(swp_entry_t entry) { int type = swp_type(entry); return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE; } static inline void make_device_private_entry_read(swp_entry_t *entry) { *entry = swp_entry(SWP_DEVICE_READ, swp_offset(*entry)); } static inline bool is_write_device_private_entry(swp_entry_t entry) { return unlikely(swp_type(entry) == SWP_DEVICE_WRITE); } static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry) { return swp_offset(entry); } static inline struct page *device_private_entry_to_page(swp_entry_t entry) { return pfn_to_page(swp_offset(entry)); } #else /* CONFIG_DEVICE_PRIVATE */ static inline swp_entry_t make_device_private_entry(struct page *page, bool write) { return swp_entry(0, 0); } static inline void make_device_private_entry_read(swp_entry_t *entry) { } static inline bool is_device_private_entry(swp_entry_t entry) { return false; } static inline bool is_write_device_private_entry(swp_entry_t entry) { return false; } static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry) { return 0; } static inline struct page *device_private_entry_to_page(swp_entry_t entry) { return NULL; } #endif /* CONFIG_DEVICE_PRIVATE */ #ifdef CONFIG_MIGRATION static inline swp_entry_t make_migration_entry(struct page *page, int write) { BUG_ON(!PageLocked(compound_head(page))); return swp_entry(write ? SWP_MIGRATION_WRITE : SWP_MIGRATION_READ, page_to_pfn(page)); } static inline int is_migration_entry(swp_entry_t entry) { return unlikely(swp_type(entry) == SWP_MIGRATION_READ || swp_type(entry) == SWP_MIGRATION_WRITE); } static inline int is_write_migration_entry(swp_entry_t entry) { return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE); } static inline unsigned long migration_entry_to_pfn(swp_entry_t entry) { return swp_offset(entry); } static inline struct page *migration_entry_to_page(swp_entry_t entry) { struct page *p = pfn_to_page(swp_offset(entry)); /* * Any use of migration entries may only occur while the * corresponding page is locked */ BUG_ON(!PageLocked(compound_head(p))); return p; } static inline void make_migration_entry_read(swp_entry_t *entry) { *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry)); } extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, spinlock_t *ptl); extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, unsigned long address); extern void migration_entry_wait_huge(struct vm_area_struct *vma, struct mm_struct *mm, pte_t *pte); #else #define make_migration_entry(page, write) swp_entry(0, 0) static inline int is_migration_entry(swp_entry_t swp) { return 0; } static inline unsigned long migration_entry_to_pfn(swp_entry_t entry) { return 0; } static inline struct page *migration_entry_to_page(swp_entry_t entry) { return NULL; } static inline void make_migration_entry_read(swp_entry_t *entryp) { } static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, spinlock_t *ptl) { } static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, unsigned long address) { } static inline void migration_entry_wait_huge(struct vm_area_struct *vma, struct mm_struct *mm, pte_t *pte) { } static inline int is_write_migration_entry(swp_entry_t entry) { return 0; } #endif struct page_vma_mapped_walk; #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION extern void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, struct page *page); extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new); extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd); static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd) { swp_entry_t arch_entry; if (pmd_swp_soft_dirty(pmd)) pmd = pmd_swp_clear_soft_dirty(pmd); arch_entry = __pmd_to_swp_entry(pmd); return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); } static inline pmd_t swp_entry_to_pmd(swp_entry_t entry) { swp_entry_t arch_entry; arch_entry = __swp_entry(swp_type(entry), swp_offset(entry)); return __swp_entry_to_pmd(arch_entry); } static inline int is_pmd_migration_entry(pmd_t pmd) { return !pmd_present(pmd) && is_migration_entry(pmd_to_swp_entry(pmd)); } #else static inline void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, struct page *page) { BUILD_BUG(); } static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) { BUILD_BUG(); } static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { } static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd) { return swp_entry(0, 0); } static inline pmd_t swp_entry_to_pmd(swp_entry_t entry) { return __pmd(0); } static inline int is_pmd_migration_entry(pmd_t pmd) { return 0; } #endif #ifdef CONFIG_MEMORY_FAILURE extern atomic_long_t num_poisoned_pages __read_mostly; /* * Support for hardware poisoned pages */ static inline swp_entry_t make_hwpoison_entry(struct page *page) { BUG_ON(!PageLocked(page)); return swp_entry(SWP_HWPOISON, page_to_pfn(page)); } static inline int is_hwpoison_entry(swp_entry_t entry) { return swp_type(entry) == SWP_HWPOISON; } static inline void num_poisoned_pages_inc(void) { atomic_long_inc(&num_poisoned_pages); } static inline void num_poisoned_pages_dec(void) { atomic_long_dec(&num_poisoned_pages); } #else static inline swp_entry_t make_hwpoison_entry(struct page *page) { return swp_entry(0, 0); } static inline int is_hwpoison_entry(swp_entry_t swp) { return 0; } static inline void num_poisoned_pages_inc(void) { } #endif #if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION) || \ defined(CONFIG_DEVICE_PRIVATE) static inline int non_swap_entry(swp_entry_t entry) { return swp_type(entry) >= MAX_SWAPFILES; } #else static inline int non_swap_entry(swp_entry_t entry) { return 0; } #endif #endif /* CONFIG_MMU */ #endif /* _LINUX_SWAPOPS_H */ vmalloc.h 0000644 00000016323 14722070374 0006365 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_VMALLOC_H #define _LINUX_VMALLOC_H #include <linux/spinlock.h> #include <linux/init.h> #include <linux/list.h> #include <linux/llist.h> #include <asm/page.h> /* pgprot_t */ #include <linux/rbtree.h> #include <linux/overflow.h> struct vm_area_struct; /* vma defining user mapping in mm_types.h */ struct notifier_block; /* in notifier.h */ /* bits in flags of vmalloc's vm_struct below */ #define VM_IOREMAP 0x00000001 /* ioremap() and friends */ #define VM_ALLOC 0x00000002 /* vmalloc() */ #define VM_MAP 0x00000004 /* vmap()ed pages */ #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ #define VM_DMA_COHERENT 0x00000010 /* dma_alloc_coherent */ #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ #define VM_NO_GUARD 0x00000040 /* don't add guard page */ #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ /* * Memory with VM_FLUSH_RESET_PERMS cannot be freed in an interrupt or with * vfree_atomic(). */ #define VM_FLUSH_RESET_PERMS 0x00000100 /* Reset direct map and flush TLB on unmap */ /* bits [20..32] reserved for arch specific ioremap internals */ /* * Maximum alignment for ioremap() regions. * Can be overriden by arch-specific value. */ #ifndef IOREMAP_MAX_ORDER #define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */ #endif struct vm_struct { struct vm_struct *next; void *addr; unsigned long size; unsigned long flags; struct page **pages; unsigned int nr_pages; phys_addr_t phys_addr; const void *caller; }; struct vmap_area { unsigned long va_start; unsigned long va_end; struct rb_node rb_node; /* address sorted rbtree */ struct list_head list; /* address sorted list */ /* * The following three variables can be packed, because * a vmap_area object is always one of the three states: * 1) in "free" tree (root is vmap_area_root) * 2) in "busy" tree (root is free_vmap_area_root) * 3) in purge list (head is vmap_purge_list) */ union { unsigned long subtree_max_size; /* in "free" tree */ struct vm_struct *vm; /* in "busy" tree */ struct llist_node purge_list; /* in purge list */ }; }; /* * Highlevel APIs for driver use */ extern void vm_unmap_ram(const void *mem, unsigned int count); extern void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot); extern void vm_unmap_aliases(void); #ifdef CONFIG_MMU extern void __init vmalloc_init(void); extern unsigned long vmalloc_nr_pages(void); #else static inline void vmalloc_init(void) { } static inline unsigned long vmalloc_nr_pages(void) { return 0; } #endif extern void *vmalloc(unsigned long size); extern void *vzalloc(unsigned long size); extern void *vmalloc_user(unsigned long size); extern void *vmalloc_node(unsigned long size, int node); extern void *vzalloc_node(unsigned long size, int node); extern void *vmalloc_exec(unsigned long size); extern void *vmalloc_32(unsigned long size); extern void *vmalloc_32_user(unsigned long size); extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); extern void *__vmalloc_node_range(unsigned long size, unsigned long align, unsigned long start, unsigned long end, gfp_t gfp_mask, pgprot_t prot, unsigned long vm_flags, int node, const void *caller); #ifndef CONFIG_MMU extern void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags); static inline void *__vmalloc_node_flags_caller(unsigned long size, int node, gfp_t flags, void *caller) { return __vmalloc_node_flags(size, node, flags); } #else extern void *__vmalloc_node_flags_caller(unsigned long size, int node, gfp_t flags, void *caller); #endif extern void vfree(const void *addr); extern void vfree_atomic(const void *addr); extern void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot); extern void vunmap(const void *addr); extern int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, void *kaddr, unsigned long pgoff, unsigned long size); extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, unsigned long pgoff); void vmalloc_sync_mappings(void); void vmalloc_sync_unmappings(void); /* * Lowlevel-APIs (not for driver use!) */ static inline size_t get_vm_area_size(const struct vm_struct *area) { if (!(area->flags & VM_NO_GUARD)) /* return actual size without guard page */ return area->size - PAGE_SIZE; else return area->size; } extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); extern struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, const void *caller); extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, unsigned long start, unsigned long end); extern struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, unsigned long start, unsigned long end, const void *caller); extern struct vm_struct *remove_vm_area(const void *addr); extern struct vm_struct *find_vm_area(const void *addr); extern int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages); #ifdef CONFIG_MMU extern int map_kernel_range_noflush(unsigned long start, unsigned long size, pgprot_t prot, struct page **pages); extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size); extern void unmap_kernel_range(unsigned long addr, unsigned long size); static inline void set_vm_flush_reset_perms(void *addr) { struct vm_struct *vm = find_vm_area(addr); if (vm) vm->flags |= VM_FLUSH_RESET_PERMS; } #else static inline int map_kernel_range_noflush(unsigned long start, unsigned long size, pgprot_t prot, struct page **pages) { return size >> PAGE_SHIFT; } static inline void unmap_kernel_range_noflush(unsigned long addr, unsigned long size) { } static inline void unmap_kernel_range(unsigned long addr, unsigned long size) { } static inline void set_vm_flush_reset_perms(void *addr) { } #endif /* Allocate/destroy a 'vmalloc' VM area. */ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes); extern void free_vm_area(struct vm_struct *area); /* for /dev/kmem */ extern long vread(char *buf, char *addr, unsigned long count); extern long vwrite(char *buf, char *addr, unsigned long count); /* * Internals. Dont't use.. */ extern struct list_head vmap_area_list; extern __init void vm_area_add_early(struct vm_struct *vm); extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); #ifdef CONFIG_SMP # ifdef CONFIG_MMU struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, const size_t *sizes, int nr_vms, size_t align); void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); # else static inline struct vm_struct ** pcpu_get_vm_areas(const unsigned long *offsets, const size_t *sizes, int nr_vms, size_t align) { return NULL; } static inline void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) { } # endif #endif #ifdef CONFIG_MMU #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START) #else #define VMALLOC_TOTAL 0UL #endif int register_vmap_purge_notifier(struct notifier_block *nb); int unregister_vmap_purge_notifier(struct notifier_block *nb); #endif /* _LINUX_VMALLOC_H */ seq_file_net.h 0000644 00000001332 14722070374 0007357 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __SEQ_FILE_NET_H__ #define __SEQ_FILE_NET_H__ #include <linux/seq_file.h> struct net; extern struct net init_net; struct seq_net_private { #ifdef CONFIG_NET_NS struct net *net; #endif }; static inline struct net *seq_file_net(struct seq_file *seq) { #ifdef CONFIG_NET_NS return ((struct seq_net_private *)seq->private)->net; #else return &init_net; #endif } /* * This one is needed for proc_create_net_single since net is stored directly * in private not as a struct i.e. seq_file_net can't be used. */ static inline struct net *seq_file_single_net(struct seq_file *seq) { #ifdef CONFIG_NET_NS return (struct net *)seq->private; #else return &init_net; #endif } #endif powercap.h 0000644 00000030047 14722070374 0006547 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * powercap.h: Data types and headers for sysfs power capping interface * Copyright (c) 2013, Intel Corporation. */ #ifndef __POWERCAP_H__ #define __POWERCAP_H__ #include <linux/device.h> #include <linux/idr.h> /* * A power cap class device can contain multiple powercap control_types. * Each control_type can have multiple power zones, which can be independently * controlled. Each power zone can have one or more constraints. */ struct powercap_control_type; struct powercap_zone; struct powercap_zone_constraint; /** * struct powercap_control_type_ops - Define control type callbacks * @set_enable: Enable/Disable whole control type. * Default is enabled. But this callback allows all zones * to be in disable state and remove any applied power * limits. If disabled power zone can only be monitored * not controlled. * @get_enable: get Enable/Disable status. * @release: Callback to inform that last reference to this * control type is closed. So it is safe to free data * structure associated with this control type. * This callback is mandatory if the client own memory * for the control type. * * This structure defines control type callbacks to be implemented by client * drivers */ struct powercap_control_type_ops { int (*set_enable) (struct powercap_control_type *, bool mode); int (*get_enable) (struct powercap_control_type *, bool *mode); int (*release) (struct powercap_control_type *); }; /** * struct powercap_control_type- Defines a powercap control_type * @name: name of control_type * @dev: device for this control_type * @idr: idr to have unique id for its child * @root_node: Root holding power zones for this control_type * @ops: Pointer to callback struct * @node_lock: mutex for control type * @allocated: This is possible that client owns the memory * used by this structure. In this case * this flag is set to false by framework to * prevent deallocation during release process. * Otherwise this flag is set to true. * @ctrl_inst: link to the control_type list * * Defines powercap control_type. This acts as a container for power * zones, which use same method to control power. E.g. RAPL, RAPL-PCI etc. * All fields are private and should not be used by client drivers. */ struct powercap_control_type { struct device dev; struct idr idr; int nr_zones; const struct powercap_control_type_ops *ops; struct mutex lock; bool allocated; struct list_head node; }; /** * struct powercap_zone_ops - Define power zone callbacks * @get_max_energy_range_uj: Get maximum range of energy counter in * micro-joules. * @get_energy_uj: Get current energy counter in micro-joules. * @reset_energy_uj: Reset micro-joules energy counter. * @get_max_power_range_uw: Get maximum range of power counter in * micro-watts. * @get_power_uw: Get current power counter in micro-watts. * @set_enable: Enable/Disable power zone controls. * Default is enabled. * @get_enable: get Enable/Disable status. * @release: Callback to inform that last reference to this * control type is closed. So it is safe to free * data structure associated with this * control type. Mandatory, if client driver owns * the power_zone memory. * * This structure defines zone callbacks to be implemented by client drivers. * Client drives can define both energy and power related callbacks. But at * the least one type (either power or energy) is mandatory. Client drivers * should handle mutual exclusion, if required in callbacks. */ struct powercap_zone_ops { int (*get_max_energy_range_uj) (struct powercap_zone *, u64 *); int (*get_energy_uj) (struct powercap_zone *, u64 *); int (*reset_energy_uj) (struct powercap_zone *); int (*get_max_power_range_uw) (struct powercap_zone *, u64 *); int (*get_power_uw) (struct powercap_zone *, u64 *); int (*set_enable) (struct powercap_zone *, bool mode); int (*get_enable) (struct powercap_zone *, bool *mode); int (*release) (struct powercap_zone *); }; #define POWERCAP_ZONE_MAX_ATTRS 6 #define POWERCAP_CONSTRAINTS_ATTRS 8 #define MAX_CONSTRAINTS_PER_ZONE 10 /** * struct powercap_zone- Defines instance of a power cap zone * @id: Unique id * @name: Power zone name. * @control_type_inst: Control type instance for this zone. * @ops: Pointer to the zone operation structure. * @dev: Instance of a device. * @const_id_cnt: Number of constraint defined. * @idr: Instance to an idr entry for children zones. * @parent_idr: To remove reference from the parent idr. * @private_data: Private data pointer if any for this zone. * @zone_dev_attrs: Attributes associated with this device. * @zone_attr_count: Attribute count. * @dev_zone_attr_group: Attribute group for attributes. * @dev_attr_groups: Attribute group store to register with device. * @allocated: This is possible that client owns the memory * used by this structure. In this case * this flag is set to false by framework to * prevent deallocation during release process. * Otherwise this flag is set to true. * @constraint_ptr: List of constraints for this zone. * * This defines a power zone instance. The fields of this structure are * private, and should not be used by client drivers. */ struct powercap_zone { int id; char *name; void *control_type_inst; const struct powercap_zone_ops *ops; struct device dev; int const_id_cnt; struct idr idr; struct idr *parent_idr; void *private_data; struct attribute **zone_dev_attrs; int zone_attr_count; struct attribute_group dev_zone_attr_group; const struct attribute_group *dev_attr_groups[2]; /* 1 group + NULL */ bool allocated; struct powercap_zone_constraint *constraints; }; /** * struct powercap_zone_constraint_ops - Define constraint callbacks * @set_power_limit_uw: Set power limit in micro-watts. * @get_power_limit_uw: Get power limit in micro-watts. * @set_time_window_us: Set time window in micro-seconds. * @get_time_window_us: Get time window in micro-seconds. * @get_max_power_uw: Get max power allowed in micro-watts. * @get_min_power_uw: Get min power allowed in micro-watts. * @get_max_time_window_us: Get max time window allowed in micro-seconds. * @get_min_time_window_us: Get min time window allowed in micro-seconds. * @get_name: Get the name of constraint * * This structure is used to define the constraint callbacks for the client * drivers. The following callbacks are mandatory and can't be NULL: * set_power_limit_uw * get_power_limit_uw * set_time_window_us * get_time_window_us * get_name * Client drivers should handle mutual exclusion, if required in callbacks. */ struct powercap_zone_constraint_ops { int (*set_power_limit_uw) (struct powercap_zone *, int, u64); int (*get_power_limit_uw) (struct powercap_zone *, int, u64 *); int (*set_time_window_us) (struct powercap_zone *, int, u64); int (*get_time_window_us) (struct powercap_zone *, int, u64 *); int (*get_max_power_uw) (struct powercap_zone *, int, u64 *); int (*get_min_power_uw) (struct powercap_zone *, int, u64 *); int (*get_max_time_window_us) (struct powercap_zone *, int, u64 *); int (*get_min_time_window_us) (struct powercap_zone *, int, u64 *); const char *(*get_name) (struct powercap_zone *, int); }; /** * struct powercap_zone_constraint- Defines instance of a constraint * @id: Instance Id of this constraint. * @power_zone: Pointer to the power zone for this constraint. * @ops: Pointer to the constraint callbacks. * * This defines a constraint instance. */ struct powercap_zone_constraint { int id; struct powercap_zone *power_zone; const struct powercap_zone_constraint_ops *ops; }; /* For clients to get their device pointer, may be used for dev_dbgs */ #define POWERCAP_GET_DEV(power_zone) (&power_zone->dev) /** * powercap_set_zone_data() - Set private data for a zone * @power_zone: A pointer to the valid zone instance. * @pdata: A pointer to the user private data. * * Allows client drivers to associate some private data to zone instance. */ static inline void powercap_set_zone_data(struct powercap_zone *power_zone, void *pdata) { if (power_zone) power_zone->private_data = pdata; } /** * powercap_get_zone_data() - Get private data for a zone * @power_zone: A pointer to the valid zone instance. * * Allows client drivers to get private data associate with a zone, * using call to powercap_set_zone_data. */ static inline void *powercap_get_zone_data(struct powercap_zone *power_zone) { if (power_zone) return power_zone->private_data; return NULL; } /** * powercap_register_control_type() - Register a control_type with framework * @control_type: Pointer to client allocated memory for the control type * structure storage. If this is NULL, powercap framework * will allocate memory and own it. * Advantage of this parameter is that client can embed * this data in its data structures and allocate in a * single call, preventing multiple allocations. * @control_type_name: The Name of this control_type, which will be shown * in the sysfs Interface. * @ops: Callbacks for control type. This parameter is optional. * * Used to create a control_type with the power capping class. Here control_type * can represent a type of technology, which can control a range of power zones. * For example a control_type can be RAPL (Running Average Power Limit) * Intel® 64 and IA-32 Processor Architectures. The name can be any string * which must be unique, otherwise this function returns NULL. * A pointer to the control_type instance is returned on success. */ struct powercap_control_type *powercap_register_control_type( struct powercap_control_type *control_type, const char *name, const struct powercap_control_type_ops *ops); /** * powercap_unregister_control_type() - Unregister a control_type from framework * @instance: A pointer to the valid control_type instance. * * Used to unregister a control_type with the power capping class. * All power zones registered under this control type have to be unregistered * before calling this function, or it will fail with an error code. */ int powercap_unregister_control_type(struct powercap_control_type *instance); /* Zone register/unregister API */ /** * powercap_register_zone() - Register a power zone * @power_zone: Pointer to client allocated memory for the power zone structure * storage. If this is NULL, powercap framework will allocate * memory and own it. Advantage of this parameter is that client * can embed this data in its data structures and allocate in a * single call, preventing multiple allocations. * @control_type: A control_type instance under which this zone operates. * @name: A name for this zone. * @parent: A pointer to the parent power zone instance if any or NULL * @ops: Pointer to zone operation callback structure. * @no_constraints: Number of constraints for this zone * @const_ops: Pointer to constraint callback structure * * Register a power zone under a given control type. A power zone must register * a pointer to a structure representing zone callbacks. * A power zone can be located under a parent power zone, in which case @parent * should point to it. Otherwise, if @parent is NULL, the new power zone will * be located directly under the given control type * For each power zone there may be a number of constraints that appear in the * sysfs under that zone as attributes with unique numeric IDs. * Returns pointer to the power_zone on success. */ struct powercap_zone *powercap_register_zone( struct powercap_zone *power_zone, struct powercap_control_type *control_type, const char *name, struct powercap_zone *parent, const struct powercap_zone_ops *ops, int nr_constraints, const struct powercap_zone_constraint_ops *const_ops); /** * powercap_unregister_zone() - Unregister a zone device * @control_type: A pointer to the valid instance of a control_type. * @power_zone: A pointer to the valid zone instance for a control_type * * Used to unregister a zone device for a control_type. Caller should * make sure that children for this zone are unregistered first. */ int powercap_unregister_zone(struct powercap_control_type *control_type, struct powercap_zone *power_zone); #endif bit_spinlock.h 0000644 00000004470 14722070374 0007410 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_BIT_SPINLOCK_H #define __LINUX_BIT_SPINLOCK_H #include <linux/kernel.h> #include <linux/preempt.h> #include <linux/atomic.h> #include <linux/bug.h> /* * bit-based spin_lock() * * Don't use this unless you really need to: spin_lock() and spin_unlock() * are significantly faster. */ static inline void bit_spin_lock(int bitnum, unsigned long *addr) { /* * Assuming the lock is uncontended, this never enters * the body of the outer loop. If it is contended, then * within the inner loop a non-atomic test is used to * busywait with less bus contention for a good time to * attempt to acquire the lock bit. */ preempt_disable(); #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) while (unlikely(test_and_set_bit_lock(bitnum, addr))) { preempt_enable(); do { cpu_relax(); } while (test_bit(bitnum, addr)); preempt_disable(); } #endif __acquire(bitlock); } /* * Return true if it was acquired */ static inline int bit_spin_trylock(int bitnum, unsigned long *addr) { preempt_disable(); #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) if (unlikely(test_and_set_bit_lock(bitnum, addr))) { preempt_enable(); return 0; } #endif __acquire(bitlock); return 1; } /* * bit-based spin_unlock() */ static inline void bit_spin_unlock(int bitnum, unsigned long *addr) { #ifdef CONFIG_DEBUG_SPINLOCK BUG_ON(!test_bit(bitnum, addr)); #endif #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) clear_bit_unlock(bitnum, addr); #endif preempt_enable(); __release(bitlock); } /* * bit-based spin_unlock() * non-atomic version, which can be used eg. if the bit lock itself is * protecting the rest of the flags in the word. */ static inline void __bit_spin_unlock(int bitnum, unsigned long *addr) { #ifdef CONFIG_DEBUG_SPINLOCK BUG_ON(!test_bit(bitnum, addr)); #endif #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) __clear_bit_unlock(bitnum, addr); #endif preempt_enable(); __release(bitlock); } /* * Return true if the lock is held. */ static inline int bit_spin_is_locked(int bitnum, unsigned long *addr) { #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) return test_bit(bitnum, addr); #elif defined CONFIG_PREEMPT_COUNT return preempt_count(); #else return 1; #endif } #endif /* __LINUX_BIT_SPINLOCK_H */ rcu_segcblist.h 0000644 00000005424 14722070374 0007560 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0+ */ /* * RCU segmented callback lists * * This seemingly RCU-private file must be available to SRCU users * because the size of the TREE SRCU srcu_struct structure depends * on these definitions. * * Copyright IBM Corporation, 2017 * * Authors: Paul E. McKenney <paulmck@linux.net.ibm.com> */ #ifndef __INCLUDE_LINUX_RCU_SEGCBLIST_H #define __INCLUDE_LINUX_RCU_SEGCBLIST_H #include <linux/types.h> #include <linux/atomic.h> /* Simple unsegmented callback lists. */ struct rcu_cblist { struct rcu_head *head; struct rcu_head **tail; long len; long len_lazy; }; #define RCU_CBLIST_INITIALIZER(n) { .head = NULL, .tail = &n.head } /* Complicated segmented callback lists. ;-) */ /* * Index values for segments in rcu_segcblist structure. * * The segments are as follows: * * [head, *tails[RCU_DONE_TAIL]): * Callbacks whose grace period has elapsed, and thus can be invoked. * [*tails[RCU_DONE_TAIL], *tails[RCU_WAIT_TAIL]): * Callbacks waiting for the current GP from the current CPU's viewpoint. * [*tails[RCU_WAIT_TAIL], *tails[RCU_NEXT_READY_TAIL]): * Callbacks that arrived before the next GP started, again from * the current CPU's viewpoint. These can be handled by the next GP. * [*tails[RCU_NEXT_READY_TAIL], *tails[RCU_NEXT_TAIL]): * Callbacks that might have arrived after the next GP started. * There is some uncertainty as to when a given GP starts and * ends, but a CPU knows the exact times if it is the one starting * or ending the GP. Other CPUs know that the previous GP ends * before the next one starts. * * Note that RCU_WAIT_TAIL cannot be empty unless RCU_NEXT_READY_TAIL is also * empty. * * The ->gp_seq[] array contains the grace-period number at which the * corresponding segment of callbacks will be ready to invoke. A given * element of this array is meaningful only when the corresponding segment * is non-empty, and it is never valid for RCU_DONE_TAIL (whose callbacks * are already ready to invoke) or for RCU_NEXT_TAIL (whose callbacks have * not yet been assigned a grace-period number). */ #define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */ #define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */ #define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */ #define RCU_NEXT_TAIL 3 #define RCU_CBLIST_NSEGS 4 struct rcu_segcblist { struct rcu_head *head; struct rcu_head **tails[RCU_CBLIST_NSEGS]; unsigned long gp_seq[RCU_CBLIST_NSEGS]; #ifdef CONFIG_RCU_NOCB_CPU atomic_long_t len; #else long len; #endif long len_lazy; u8 enabled; u8 offloaded; }; #define RCU_SEGCBLIST_INITIALIZER(n) \ { \ .head = NULL, \ .tails[RCU_DONE_TAIL] = &n.head, \ .tails[RCU_WAIT_TAIL] = &n.head, \ .tails[RCU_NEXT_READY_TAIL] = &n.head, \ .tails[RCU_NEXT_TAIL] = &n.head, \ } #endif /* __INCLUDE_LINUX_RCU_SEGCBLIST_H */ ath9k_platform.h 0000644 00000002705 14722070374 0007653 0 ustar 00 /* * Copyright (c) 2008 Atheros Communications Inc. * Copyright (c) 2009 Gabor Juhos <juhosg@openwrt.org> * Copyright (c) 2009 Imre Kaloz <kaloz@openwrt.org> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _LINUX_ATH9K_PLATFORM_H #define _LINUX_ATH9K_PLATFORM_H #define ATH9K_PLAT_EEP_MAX_WORDS 2048 struct ath9k_platform_data { const char *eeprom_name; u16 eeprom_data[ATH9K_PLAT_EEP_MAX_WORDS]; u8 *macaddr; int led_pin; u32 gpio_mask; u32 gpio_val; u32 bt_active_pin; u32 bt_priority_pin; u32 wlan_active_pin; bool endian_check; bool is_clk_25mhz; bool tx_gain_buffalo; bool disable_2ghz; bool disable_5ghz; bool led_active_high; int (*get_mac_revision)(void); int (*external_reset)(void); bool use_eeprom; }; #endif /* _LINUX_ATH9K_PLATFORM_H */ cpu_cooling.h 0000644 00000003340 14722070374 0007224 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/include/linux/cpu_cooling.h * * Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com) * Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #ifndef __CPU_COOLING_H__ #define __CPU_COOLING_H__ #include <linux/of.h> #include <linux/thermal.h> #include <linux/cpumask.h> struct cpufreq_policy; #ifdef CONFIG_CPU_THERMAL /** * cpufreq_cooling_register - function to create cpufreq cooling device. * @policy: cpufreq policy. */ struct thermal_cooling_device * cpufreq_cooling_register(struct cpufreq_policy *policy); /** * cpufreq_cooling_unregister - function to remove cpufreq cooling device. * @cdev: thermal cooling device pointer. */ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev); #else /* !CONFIG_CPU_THERMAL */ static inline struct thermal_cooling_device * cpufreq_cooling_register(struct cpufreq_policy *policy) { return ERR_PTR(-ENOSYS); } static inline void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) { return; } #endif /* CONFIG_CPU_THERMAL */ #if defined(CONFIG_THERMAL_OF) && defined(CONFIG_CPU_THERMAL) /** * of_cpufreq_cooling_register - create cpufreq cooling device based on DT. * @policy: cpufreq policy. */ struct thermal_cooling_device * of_cpufreq_cooling_register(struct cpufreq_policy *policy); #else static inline struct thermal_cooling_device * of_cpufreq_cooling_register(struct cpufreq_policy *policy) { return NULL; } #endif /* defined(CONFIG_THERMAL_OF) && defined(CONFIG_CPU_THERMAL) */ #endif /* __CPU_COOLING_H__ */ binfmts.h 0000644 00000012226 14722070374 0006370 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_BINFMTS_H #define _LINUX_BINFMTS_H #include <linux/sched.h> #include <linux/unistd.h> #include <asm/exec.h> #include <uapi/linux/binfmts.h> struct filename; #define CORENAME_MAX_SIZE 128 /* * This structure is used to hold the arguments that are used when loading binaries. */ struct linux_binprm { #ifdef CONFIG_MMU struct vm_area_struct *vma; unsigned long vma_pages; #else # define MAX_ARG_PAGES 32 struct page *page[MAX_ARG_PAGES]; #endif struct mm_struct *mm; unsigned long p; /* current top of mem */ unsigned long argmin; /* rlimit marker for copy_strings() */ unsigned int /* * True after the bprm_set_creds hook has been called once * (multiple calls can be made via prepare_binprm() for * binfmt_script/misc). */ called_set_creds:1, /* * True if most recent call to the commoncaps bprm_set_creds * hook (due to multiple prepare_binprm() calls from the * binfmt_script/misc handlers) resulted in elevated * privileges. */ cap_elevated:1, /* * Set by bprm_set_creds hook to indicate a privilege-gaining * exec has happened. Used to sanitize execution environment * and to set AT_SECURE auxv for glibc. */ secureexec:1, /* * Set by flush_old_exec, when exec_mmap has been called. * This is past the point of no return, when the * exec_update_mutex has been taken. */ called_exec_mmap:1; #ifdef __alpha__ unsigned int taso:1; #endif unsigned int recursion_depth; /* only for search_binary_handler() */ struct file * file; struct cred *cred; /* new credentials */ int unsafe; /* how unsafe this exec is (mask of LSM_UNSAFE_*) */ unsigned int per_clear; /* bits to clear in current->personality */ int argc, envc; const char * filename; /* Name of binary as seen by procps */ const char * interp; /* Name of the binary really executed. Most of the time same as filename, but could be different for binfmt_{misc,script} */ unsigned interp_flags; unsigned interp_data; unsigned long loader, exec; struct rlimit rlim_stack; /* Saved RLIMIT_STACK used during exec. */ char buf[BINPRM_BUF_SIZE]; } __randomize_layout; #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0 #define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT) /* fd of the binary should be passed to the interpreter */ #define BINPRM_FLAGS_EXECFD_BIT 1 #define BINPRM_FLAGS_EXECFD (1 << BINPRM_FLAGS_EXECFD_BIT) /* filename of the binary will be inaccessible after exec */ #define BINPRM_FLAGS_PATH_INACCESSIBLE_BIT 2 #define BINPRM_FLAGS_PATH_INACCESSIBLE (1 << BINPRM_FLAGS_PATH_INACCESSIBLE_BIT) /* Function parameter for binfmt->coredump */ struct coredump_params { const kernel_siginfo_t *siginfo; struct pt_regs *regs; struct file *file; unsigned long limit; unsigned long mm_flags; loff_t written; loff_t pos; }; /* * This structure defines the functions that are used to load the binary formats that * linux accepts. */ struct linux_binfmt { struct list_head lh; struct module *module; int (*load_binary)(struct linux_binprm *); int (*load_shlib)(struct file *); int (*core_dump)(struct coredump_params *cprm); unsigned long min_coredump; /* minimal dump size */ } __randomize_layout; extern void __register_binfmt(struct linux_binfmt *fmt, int insert); /* Registration of default binfmt handlers */ static inline void register_binfmt(struct linux_binfmt *fmt) { __register_binfmt(fmt, 0); } /* Same as above, but adds a new binfmt at the top of the list */ static inline void insert_binfmt(struct linux_binfmt *fmt) { __register_binfmt(fmt, 1); } extern void unregister_binfmt(struct linux_binfmt *); extern int prepare_binprm(struct linux_binprm *); extern int __must_check remove_arg_zero(struct linux_binprm *); extern int search_binary_handler(struct linux_binprm *); extern int flush_old_exec(struct linux_binprm * bprm); extern void setup_new_exec(struct linux_binprm * bprm); extern void finalize_exec(struct linux_binprm *bprm); extern void would_dump(struct linux_binprm *, struct file *); extern int suid_dumpable; /* Stack area protections */ #define EXSTACK_DEFAULT 0 /* Whatever the arch defaults to */ #define EXSTACK_DISABLE_X 1 /* Disable executable stacks */ #define EXSTACK_ENABLE_X 2 /* Enable executable stacks */ extern int setup_arg_pages(struct linux_binprm * bprm, unsigned long stack_top, int executable_stack); extern int transfer_args_to_stack(struct linux_binprm *bprm, unsigned long *sp_location); extern int bprm_change_interp(const char *interp, struct linux_binprm *bprm); extern int copy_strings_kernel(int argc, const char *const *argv, struct linux_binprm *bprm); extern void install_exec_creds(struct linux_binprm *bprm); extern void set_binfmt(struct linux_binfmt *new); extern ssize_t read_code(struct file *, unsigned long, loff_t, size_t); extern int do_execve(struct filename *, const char __user * const __user *, const char __user * const __user *); extern int do_execveat(int, struct filename *, const char __user * const __user *, const char __user * const __user *, int); int do_execve_file(struct file *file, void *__argv, void *__envp); #endif /* _LINUX_BINFMTS_H */ nvmem-provider.h 0000644 00000007610 14722070374 0007701 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * nvmem framework provider. * * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> */ #ifndef _LINUX_NVMEM_PROVIDER_H #define _LINUX_NVMEM_PROVIDER_H #include <linux/err.h> #include <linux/errno.h> struct nvmem_device; struct nvmem_cell_info; typedef int (*nvmem_reg_read_t)(void *priv, unsigned int offset, void *val, size_t bytes); typedef int (*nvmem_reg_write_t)(void *priv, unsigned int offset, void *val, size_t bytes); enum nvmem_type { NVMEM_TYPE_UNKNOWN = 0, NVMEM_TYPE_EEPROM, NVMEM_TYPE_OTP, NVMEM_TYPE_BATTERY_BACKED, }; /** * struct nvmem_config - NVMEM device configuration * * @dev: Parent device. * @name: Optional name. * @id: Optional device ID used in full name. Ignored if name is NULL. * @owner: Pointer to exporter module. Used for refcounting. * @cells: Optional array of pre-defined NVMEM cells. * @ncells: Number of elements in cells. * @type: Type of the nvmem storage * @read_only: Device is read-only. * @root_only: Device is accessibly to root only. * @no_of_node: Device should not use the parent's of_node even if it's !NULL. * @reg_read: Callback to read data. * @reg_write: Callback to write data. * @size: Device size. * @word_size: Minimum read/write access granularity. * @stride: Minimum read/write access stride. * @priv: User context passed to read/write callbacks. * * Note: A default "nvmem<id>" name will be assigned to the device if * no name is specified in its configuration. In such case "<id>" is * generated with ida_simple_get() and provided id field is ignored. * * Note: Specifying name and setting id to -1 implies a unique device * whose name is provided as-is (kept unaltered). */ struct nvmem_config { struct device *dev; const char *name; int id; struct module *owner; const struct nvmem_cell_info *cells; int ncells; enum nvmem_type type; bool read_only; bool root_only; bool no_of_node; nvmem_reg_read_t reg_read; nvmem_reg_write_t reg_write; int size; int word_size; int stride; void *priv; /* To be only used by old driver/misc/eeprom drivers */ bool compat; struct device *base_dev; }; /** * struct nvmem_cell_table - NVMEM cell definitions for given provider * * @nvmem_name: Provider name. * @cells: Array of cell definitions. * @ncells: Number of cell definitions in the array. * @node: List node. * * This structure together with related helper functions is provided for users * that don't can't access the nvmem provided structure but wish to register * cell definitions for it e.g. board files registering an EEPROM device. */ struct nvmem_cell_table { const char *nvmem_name; const struct nvmem_cell_info *cells; size_t ncells; struct list_head node; }; #if IS_ENABLED(CONFIG_NVMEM) struct nvmem_device *nvmem_register(const struct nvmem_config *cfg); void nvmem_unregister(struct nvmem_device *nvmem); struct nvmem_device *devm_nvmem_register(struct device *dev, const struct nvmem_config *cfg); int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem); void nvmem_add_cell_table(struct nvmem_cell_table *table); void nvmem_del_cell_table(struct nvmem_cell_table *table); #else static inline struct nvmem_device *nvmem_register(const struct nvmem_config *c) { return ERR_PTR(-EOPNOTSUPP); } static inline void nvmem_unregister(struct nvmem_device *nvmem) {} static inline struct nvmem_device * devm_nvmem_register(struct device *dev, const struct nvmem_config *c) { return nvmem_register(c); } static inline int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem) { return -EOPNOTSUPP; } static inline void nvmem_add_cell_table(struct nvmem_cell_table *table) {} static inline void nvmem_del_cell_table(struct nvmem_cell_table *table) {} #endif /* CONFIG_NVMEM */ #endif /* ifndef _LINUX_NVMEM_PROVIDER_H */ if_pppox.h 0000644 00000005541 14722070374 0006554 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /*************************************************************************** * Linux PPP over X - Generic PPP transport layer sockets * Linux PPP over Ethernet (PPPoE) Socket Implementation (RFC 2516) * * This file supplies definitions required by the PPP over Ethernet driver * (pppox.c). All version information wrt this file is located in pppox.c * * License: */ #ifndef __LINUX_IF_PPPOX_H #define __LINUX_IF_PPPOX_H #include <linux/if.h> #include <linux/netdevice.h> #include <linux/ppp_channel.h> #include <linux/skbuff.h> #include <linux/workqueue.h> #include <uapi/linux/if_pppox.h> static inline struct pppoe_hdr *pppoe_hdr(const struct sk_buff *skb) { return (struct pppoe_hdr *)skb_network_header(skb); } struct pppoe_opt { struct net_device *dev; /* device associated with socket*/ int ifindex; /* ifindex of device associated with socket */ struct pppoe_addr pa; /* what this socket is bound to*/ struct sockaddr_pppox relay; /* what socket data will be relayed to (PPPoE relaying) */ struct work_struct padt_work;/* Work item for handling PADT */ }; struct pptp_opt { struct pptp_addr src_addr; struct pptp_addr dst_addr; u32 ack_sent, ack_recv; u32 seq_sent, seq_recv; int ppp_flags; }; #include <net/sock.h> struct pppox_sock { /* struct sock must be the first member of pppox_sock */ struct sock sk; struct ppp_channel chan; struct pppox_sock *next; /* for hash table */ union { struct pppoe_opt pppoe; struct pptp_opt pptp; } proto; __be16 num; }; #define pppoe_dev proto.pppoe.dev #define pppoe_ifindex proto.pppoe.ifindex #define pppoe_pa proto.pppoe.pa #define pppoe_relay proto.pppoe.relay static inline struct pppox_sock *pppox_sk(struct sock *sk) { return (struct pppox_sock *)sk; } static inline struct sock *sk_pppox(struct pppox_sock *po) { return (struct sock *)po; } struct module; struct pppox_proto { int (*create)(struct net *net, struct socket *sock, int kern); int (*ioctl)(struct socket *sock, unsigned int cmd, unsigned long arg); struct module *owner; }; extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp); extern void unregister_pppox_proto(int proto_num); extern void pppox_unbind_sock(struct sock *sk);/* delete ppp-channel binding */ extern int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); extern int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); #define PPPOEIOCSFWD32 _IOW(0xB1 ,0, compat_size_t) /* PPPoX socket states */ enum { PPPOX_NONE = 0, /* initial state */ PPPOX_CONNECTED = 1, /* connection established ==TCP_ESTABLISHED */ PPPOX_BOUND = 2, /* bound to ppp device */ PPPOX_RELAY = 4, /* forwarding is enabled */ PPPOX_DEAD = 16 /* dead, useless, please clean me up!*/ }; #endif /* !(__LINUX_IF_PPPOX_H) */ pci-ecam.h 0000644 00000004307 14722070374 0006405 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright 2016 Broadcom */ #ifndef DRIVERS_PCI_ECAM_H #define DRIVERS_PCI_ECAM_H #include <linux/pci.h> #include <linux/kernel.h> #include <linux/platform_device.h> /* * struct to hold pci ops and bus shift of the config window * for a PCI controller. */ struct pci_config_window; struct pci_ecam_ops { unsigned int bus_shift; struct pci_ops pci_ops; int (*init)(struct pci_config_window *); }; /* * struct to hold the mappings of a config space window. This * is expected to be used as sysdata for PCI controllers that * use ECAM. */ struct pci_config_window { struct resource res; struct resource busr; void *priv; struct pci_ecam_ops *ops; union { void __iomem *win; /* 64-bit single mapping */ void __iomem **winp; /* 32-bit per-bus mapping */ }; struct device *parent;/* ECAM res was from this dev */ }; /* create and free pci_config_window */ struct pci_config_window *pci_ecam_create(struct device *dev, struct resource *cfgres, struct resource *busr, struct pci_ecam_ops *ops); void pci_ecam_free(struct pci_config_window *cfg); /* map_bus when ->sysdata is an instance of pci_config_window */ void __iomem *pci_ecam_map_bus(struct pci_bus *bus, unsigned int devfn, int where); /* default ECAM ops */ extern struct pci_ecam_ops pci_generic_ecam_ops; #if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) extern struct pci_ecam_ops pci_32b_ops; /* 32-bit accesses only */ extern struct pci_ecam_ops pci_32b_read_ops; /* 32-bit read only */ extern struct pci_ecam_ops hisi_pcie_ops; /* HiSilicon */ extern struct pci_ecam_ops thunder_pem_ecam_ops; /* Cavium ThunderX 1.x & 2.x */ extern struct pci_ecam_ops pci_thunder_ecam_ops; /* Cavium ThunderX 1.x */ extern struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 */ extern struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */ extern struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */ #endif #ifdef CONFIG_PCI_HOST_COMMON /* for DT-based PCI controllers that support ECAM */ int pci_host_common_probe(struct platform_device *pdev, struct pci_ecam_ops *ops); int pci_host_common_remove(struct platform_device *pdev); #endif #endif preempt.h 0000644 00000024003 14722070374 0006376 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_PREEMPT_H #define __LINUX_PREEMPT_H /* * include/linux/preempt.h - macros for accessing and manipulating * preempt_count (used for kernel preemption, interrupt count, etc.) */ #include <linux/linkage.h> #include <linux/list.h> /* * We put the hardirq and softirq counter into the preemption * counter. The bitmask has the following meaning: * * - bits 0-7 are the preemption count (max preemption depth: 256) * - bits 8-15 are the softirq count (max # of softirqs: 256) * * The hardirq count could in theory be the same as the number of * interrupts in the system, but we run all interrupt handlers with * interrupts disabled, so we cannot have nesting interrupts. Though * there are a few palaeontologic drivers which reenable interrupts in * the handler, so we need more than one bit here. * * PREEMPT_MASK: 0x000000ff * SOFTIRQ_MASK: 0x0000ff00 * HARDIRQ_MASK: 0x000f0000 * NMI_MASK: 0x00100000 * PREEMPT_NEED_RESCHED: 0x80000000 */ #define PREEMPT_BITS 8 #define SOFTIRQ_BITS 8 #define HARDIRQ_BITS 4 #define NMI_BITS 1 #define PREEMPT_SHIFT 0 #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) #define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS) #define __IRQ_MASK(x) ((1UL << (x))-1) #define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) #define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) #define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT) #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) #define NMI_OFFSET (1UL << NMI_SHIFT) #define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) #define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) /* * Disable preemption until the scheduler is running -- use an unconditional * value so that it also works on !PREEMPT_COUNT kernels. * * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count(). */ #define INIT_PREEMPT_COUNT PREEMPT_OFFSET /* * Initial preempt_count value; reflects the preempt_count schedule invariant * which states that during context switches: * * preempt_count() == 2*PREEMPT_DISABLE_OFFSET * * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels. * Note: See finish_task_switch(). */ #define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) /* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */ #include <asm/preempt.h> #define hardirq_count() (preempt_count() & HARDIRQ_MASK) #define softirq_count() (preempt_count() & SOFTIRQ_MASK) #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ | NMI_MASK)) /* * Are we doing bottom half or hardware interrupt processing? * * in_irq() - We're in (hard) IRQ context * in_softirq() - We have BH disabled, or are processing softirqs * in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled * in_serving_softirq() - We're in softirq context * in_nmi() - We're in NMI context * in_task() - We're in task context * * Note: due to the BH disabled confusion: in_softirq(),in_interrupt() really * should not be used in new code. */ #define in_irq() (hardirq_count()) #define in_softirq() (softirq_count()) #define in_interrupt() (irq_count()) #define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) #define in_nmi() (preempt_count() & NMI_MASK) #define in_task() (!(preempt_count() & \ (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET))) /* * The preempt_count offset after preempt_disable(); */ #if defined(CONFIG_PREEMPT_COUNT) # define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET #else # define PREEMPT_DISABLE_OFFSET 0 #endif /* * The preempt_count offset after spin_lock() */ #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET /* * The preempt_count offset needed for things like: * * spin_lock_bh() * * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and * softirqs, such that unlock sequences of: * * spin_unlock(); * local_bh_enable(); * * Work as expected. */ #define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET) /* * Are we running in atomic context? WARNING: this macro cannot * always detect atomic context; in particular, it cannot know about * held spinlocks in non-preemptible kernels. Thus it should not be * used in the general case to determine whether sleeping is possible. * Do not use in_atomic() in driver code. */ #define in_atomic() (preempt_count() != 0) /* * Check whether we were atomic before we did preempt_disable(): * (used by the scheduler) */ #define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET) #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE) extern void preempt_count_add(int val); extern void preempt_count_sub(int val); #define preempt_count_dec_and_test() \ ({ preempt_count_sub(1); should_resched(0); }) #else #define preempt_count_add(val) __preempt_count_add(val) #define preempt_count_sub(val) __preempt_count_sub(val) #define preempt_count_dec_and_test() __preempt_count_dec_and_test() #endif #define __preempt_count_inc() __preempt_count_add(1) #define __preempt_count_dec() __preempt_count_sub(1) #define preempt_count_inc() preempt_count_add(1) #define preempt_count_dec() preempt_count_sub(1) #ifdef CONFIG_PREEMPT_COUNT #define preempt_disable() \ do { \ preempt_count_inc(); \ barrier(); \ } while (0) #define sched_preempt_enable_no_resched() \ do { \ barrier(); \ preempt_count_dec(); \ } while (0) #define preempt_enable_no_resched() sched_preempt_enable_no_resched() #define preemptible() (preempt_count() == 0 && !irqs_disabled()) #ifdef CONFIG_PREEMPTION #define preempt_enable() \ do { \ barrier(); \ if (unlikely(preempt_count_dec_and_test())) \ __preempt_schedule(); \ } while (0) #define preempt_enable_notrace() \ do { \ barrier(); \ if (unlikely(__preempt_count_dec_and_test())) \ __preempt_schedule_notrace(); \ } while (0) #define preempt_check_resched() \ do { \ if (should_resched(0)) \ __preempt_schedule(); \ } while (0) #else /* !CONFIG_PREEMPTION */ #define preempt_enable() \ do { \ barrier(); \ preempt_count_dec(); \ } while (0) #define preempt_enable_notrace() \ do { \ barrier(); \ __preempt_count_dec(); \ } while (0) #define preempt_check_resched() do { } while (0) #endif /* CONFIG_PREEMPTION */ #define preempt_disable_notrace() \ do { \ __preempt_count_inc(); \ barrier(); \ } while (0) #define preempt_enable_no_resched_notrace() \ do { \ barrier(); \ __preempt_count_dec(); \ } while (0) #else /* !CONFIG_PREEMPT_COUNT */ /* * Even if we don't have any preemption, we need preempt disable/enable * to be barriers, so that we don't have things like get_user/put_user * that can cause faults and scheduling migrate into our preempt-protected * region. */ #define preempt_disable() barrier() #define sched_preempt_enable_no_resched() barrier() #define preempt_enable_no_resched() barrier() #define preempt_enable() barrier() #define preempt_check_resched() do { } while (0) #define preempt_disable_notrace() barrier() #define preempt_enable_no_resched_notrace() barrier() #define preempt_enable_notrace() barrier() #define preemptible() 0 #endif /* CONFIG_PREEMPT_COUNT */ #ifdef MODULE /* * Modules have no business playing preemption tricks. */ #undef sched_preempt_enable_no_resched #undef preempt_enable_no_resched #undef preempt_enable_no_resched_notrace #undef preempt_check_resched #endif #define preempt_set_need_resched() \ do { \ set_preempt_need_resched(); \ } while (0) #define preempt_fold_need_resched() \ do { \ if (tif_need_resched()) \ set_preempt_need_resched(); \ } while (0) #ifdef CONFIG_PREEMPT_NOTIFIERS struct preempt_notifier; /** * preempt_ops - notifiers called when a task is preempted and rescheduled * @sched_in: we're about to be rescheduled: * notifier: struct preempt_notifier for the task being scheduled * cpu: cpu we're scheduled on * @sched_out: we've just been preempted * notifier: struct preempt_notifier for the task being preempted * next: the task that's kicking us out * * Please note that sched_in and out are called under different * contexts. sched_out is called with rq lock held and irq disabled * while sched_in is called without rq lock and irq enabled. This * difference is intentional and depended upon by its users. */ struct preempt_ops { void (*sched_in)(struct preempt_notifier *notifier, int cpu); void (*sched_out)(struct preempt_notifier *notifier, struct task_struct *next); }; /** * preempt_notifier - key for installing preemption notifiers * @link: internal use * @ops: defines the notifier functions to be called * * Usually used in conjunction with container_of(). */ struct preempt_notifier { struct hlist_node link; struct preempt_ops *ops; }; void preempt_notifier_inc(void); void preempt_notifier_dec(void); void preempt_notifier_register(struct preempt_notifier *notifier); void preempt_notifier_unregister(struct preempt_notifier *notifier); static inline void preempt_notifier_init(struct preempt_notifier *notifier, struct preempt_ops *ops) { INIT_HLIST_NODE(¬ifier->link); notifier->ops = ops; } #endif /** * migrate_disable - Prevent migration of the current task * * Maps to preempt_disable() which also disables preemption. Use * migrate_disable() to annotate that the intent is to prevent migration, * but not necessarily preemption. * * Can be invoked nested like preempt_disable() and needs the corresponding * number of migrate_enable() invocations. */ static __always_inline void migrate_disable(void) { preempt_disable(); } /** * migrate_enable - Allow migration of the current task * * Counterpart to migrate_disable(). * * As migrate_disable() can be invoked nested, only the outermost invocation * reenables migration. * * Currently mapped to preempt_enable(). */ static __always_inline void migrate_enable(void) { preempt_enable(); } #endif /* __LINUX_PREEMPT_H */ rmi.h 0000644 00000030030 14722070374 0005506 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2011-2016 Synaptics Incorporated * Copyright (c) 2011 Unixphere */ #ifndef _RMI_H #define _RMI_H #include <linux/kernel.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/input.h> #include <linux/kfifo.h> #include <linux/list.h> #include <linux/module.h> #include <linux/types.h> #define NAME_BUFFER_SIZE 256 /** * struct rmi_2d_axis_alignment - target axis alignment * @swap_axes: set to TRUE if desired to swap x- and y-axis * @flip_x: set to TRUE if desired to flip direction on x-axis * @flip_y: set to TRUE if desired to flip direction on y-axis * @clip_x_low - reported X coordinates below this setting will be clipped to * the specified value * @clip_x_high - reported X coordinates above this setting will be clipped to * the specified value * @clip_y_low - reported Y coordinates below this setting will be clipped to * the specified value * @clip_y_high - reported Y coordinates above this setting will be clipped to * the specified value * @offset_x - this value will be added to all reported X coordinates * @offset_y - this value will be added to all reported Y coordinates * @rel_report_enabled - if set to true, the relative reporting will be * automatically enabled for this sensor. */ struct rmi_2d_axis_alignment { bool swap_axes; bool flip_x; bool flip_y; u16 clip_x_low; u16 clip_y_low; u16 clip_x_high; u16 clip_y_high; u16 offset_x; u16 offset_y; u8 delta_x_threshold; u8 delta_y_threshold; }; /** This is used to override any hints an F11 2D sensor might have provided * as to what type of sensor it is. * * @rmi_f11_sensor_default - do not override, determine from F11_2D_QUERY14 if * available. * @rmi_f11_sensor_touchscreen - treat the sensor as a touchscreen (direct * pointing). * @rmi_f11_sensor_touchpad - thread the sensor as a touchpad (indirect * pointing). */ enum rmi_sensor_type { rmi_sensor_default = 0, rmi_sensor_touchscreen, rmi_sensor_touchpad }; #define RMI_F11_DISABLE_ABS_REPORT BIT(0) /** * struct rmi_2d_sensor_data - overrides defaults for a 2D sensor. * @axis_align - provides axis alignment overrides (see above). * @sensor_type - Forces the driver to treat the sensor as an indirect * pointing device (touchpad) rather than a direct pointing device * (touchscreen). This is useful when F11_2D_QUERY14 register is not * available. * @disable_report_mask - Force data to not be reported even if it is supported * by the firware. * @topbuttonpad - Used with the "5 buttons touchpads" found on the Lenovo 40 * series * @kernel_tracking - most moderns RMI f11 firmwares implement Multifinger * Type B protocol. However, there are some corner cases where the user * triggers some jumps by tapping with two fingers on the touchpad. * Use this setting and dmax to filter out these jumps. * Also, when using an old sensor using MF Type A behavior, set to true to * report an actual MT protocol B. * @dmax - the maximum distance (in sensor units) the kernel tracking allows two * distincts fingers to be considered the same. */ struct rmi_2d_sensor_platform_data { struct rmi_2d_axis_alignment axis_align; enum rmi_sensor_type sensor_type; int x_mm; int y_mm; int disable_report_mask; u16 rezero_wait; bool topbuttonpad; bool kernel_tracking; int dmax; int dribble; int palm_detect; }; /** * struct rmi_f30_data - overrides defaults for a single F30 GPIOs/LED chip. * @buttonpad - the touchpad is a buttonpad, so enable only the first actual * button that is found. * @trackstick_buttons - Set when the function 30 is handling the physical * buttons of the trackstick (as a PS/2 passthrough device). * @disable - the touchpad incorrectly reports F30 and it should be ignored. * This is a special case which is due to misconfigured firmware. */ struct rmi_f30_data { bool buttonpad; bool trackstick_buttons; bool disable; }; /* * Set the state of a register * DEFAULT - use the default value set by the firmware config * OFF - explicitly disable the register * ON - explicitly enable the register */ enum rmi_reg_state { RMI_REG_STATE_DEFAULT = 0, RMI_REG_STATE_OFF = 1, RMI_REG_STATE_ON = 2 }; /** * struct rmi_f01_power_management -When non-zero, these values will be written * to the touch sensor to override the default firmware settigns. For a * detailed explanation of what each field does, see the corresponding * documention in the RMI4 specification. * * @nosleep - specifies whether the device is permitted to sleep or doze (that * is, enter a temporary low power state) when no fingers are touching the * sensor. * @wakeup_threshold - controls the capacitance threshold at which the touch * sensor will decide to wake up from that low power state. * @doze_holdoff - controls how long the touch sensor waits after the last * finger lifts before entering the doze state, in units of 100ms. * @doze_interval - controls the interval between checks for finger presence * when the touch sensor is in doze mode, in units of 10ms. */ struct rmi_f01_power_management { enum rmi_reg_state nosleep; u8 wakeup_threshold; u8 doze_holdoff; u8 doze_interval; }; /** * struct rmi_device_platform_data_spi - provides parameters used in SPI * communications. All Synaptics SPI products support a standard SPI * interface; some also support what is called SPI V2 mode, depending on * firmware and/or ASIC limitations. In V2 mode, the touch sensor can * support shorter delays during certain operations, and these are specified * separately from the standard mode delays. * * @block_delay - for standard SPI transactions consisting of both a read and * write operation, the delay (in microseconds) between the read and write * operations. * @split_read_block_delay_us - for V2 SPI transactions consisting of both a * read and write operation, the delay (in microseconds) between the read and * write operations. * @read_delay_us - the delay between each byte of a read operation in normal * SPI mode. * @write_delay_us - the delay between each byte of a write operation in normal * SPI mode. * @split_read_byte_delay_us - the delay between each byte of a read operation * in V2 mode. * @pre_delay_us - the delay before the start of a SPI transaction. This is * typically useful in conjunction with custom chip select assertions (see * below). * @post_delay_us - the delay after the completion of an SPI transaction. This * is typically useful in conjunction with custom chip select assertions (see * below). * @cs_assert - For systems where the SPI subsystem does not control the CS/SSB * line, or where such control is broken, you can provide a custom routine to * handle a GPIO as CS/SSB. This routine will be called at the beginning and * end of each SPI transaction. The RMI SPI implementation will wait * pre_delay_us after this routine returns before starting the SPI transfer; * and post_delay_us after completion of the SPI transfer(s) before calling it * with assert==FALSE. */ struct rmi_device_platform_data_spi { u32 block_delay_us; u32 split_read_block_delay_us; u32 read_delay_us; u32 write_delay_us; u32 split_read_byte_delay_us; u32 pre_delay_us; u32 post_delay_us; u8 bits_per_word; u16 mode; void *cs_assert_data; int (*cs_assert)(const void *cs_assert_data, const bool assert); }; /** * struct rmi_device_platform_data - system specific configuration info. * * @reset_delay_ms - after issuing a reset command to the touch sensor, the * driver waits a few milliseconds to give the firmware a chance to * to re-initialize. You can override the default wait period here. * @irq: irq associated with the attn gpio line, or negative */ struct rmi_device_platform_data { int reset_delay_ms; int irq; struct rmi_device_platform_data_spi spi_data; /* function handler pdata */ struct rmi_2d_sensor_platform_data sensor_pdata; struct rmi_f01_power_management power_management; struct rmi_f30_data f30_data; }; /** * struct rmi_function_descriptor - RMI function base addresses * * @query_base_addr: The RMI Query base address * @command_base_addr: The RMI Command base address * @control_base_addr: The RMI Control base address * @data_base_addr: The RMI Data base address * @interrupt_source_count: The number of irqs this RMI function needs * @function_number: The RMI function number * * This struct is used when iterating the Page Description Table. The addresses * are 16-bit values to include the current page address. * */ struct rmi_function_descriptor { u16 query_base_addr; u16 command_base_addr; u16 control_base_addr; u16 data_base_addr; u8 interrupt_source_count; u8 function_number; u8 function_version; }; struct rmi_device; /** * struct rmi_transport_dev - represent an RMI transport device * * @dev: Pointer to the communication device, e.g. i2c or spi * @rmi_dev: Pointer to the RMI device * @proto_name: name of the transport protocol (SPI, i2c, etc) * @ops: pointer to transport operations implementation * * The RMI transport device implements the glue between different communication * buses such as I2C and SPI. * */ struct rmi_transport_dev { struct device *dev; struct rmi_device *rmi_dev; const char *proto_name; const struct rmi_transport_ops *ops; struct rmi_device_platform_data pdata; struct input_dev *input; }; /** * struct rmi_transport_ops - defines transport protocol operations. * * @write_block: Writing a block of data to the specified address * @read_block: Read a block of data from the specified address. */ struct rmi_transport_ops { int (*write_block)(struct rmi_transport_dev *xport, u16 addr, const void *buf, size_t len); int (*read_block)(struct rmi_transport_dev *xport, u16 addr, void *buf, size_t len); int (*reset)(struct rmi_transport_dev *xport, u16 reset_addr); }; /** * struct rmi_driver - driver for an RMI4 sensor on the RMI bus. * * @driver: Device driver model driver * @reset_handler: Called when a reset is detected. * @clear_irq_bits: Clear the specified bits in the current interrupt mask. * @set_irq_bist: Set the specified bits in the current interrupt mask. * @store_productid: Callback for cache product id from function 01 * @data: Private data pointer * */ struct rmi_driver { struct device_driver driver; int (*reset_handler)(struct rmi_device *rmi_dev); int (*clear_irq_bits)(struct rmi_device *rmi_dev, unsigned long *mask); int (*set_irq_bits)(struct rmi_device *rmi_dev, unsigned long *mask); int (*store_productid)(struct rmi_device *rmi_dev); int (*set_input_params)(struct rmi_device *rmi_dev, struct input_dev *input); void *data; }; /** * struct rmi_device - represents an RMI4 sensor device on the RMI bus. * * @dev: The device created for the RMI bus * @number: Unique number for the device on the bus. * @driver: Pointer to associated driver * @xport: Pointer to the transport interface * */ struct rmi_device { struct device dev; int number; struct rmi_driver *driver; struct rmi_transport_dev *xport; }; struct rmi4_attn_data { unsigned long irq_status; size_t size; void *data; }; struct rmi_driver_data { struct list_head function_list; struct rmi_device *rmi_dev; struct rmi_function *f01_container; struct rmi_function *f34_container; bool bootloader_mode; int num_of_irq_regs; int irq_count; void *irq_memory; unsigned long *irq_status; unsigned long *fn_irq_bits; unsigned long *current_irq_mask; unsigned long *new_irq_mask; struct mutex irq_mutex; struct input_dev *input; struct irq_domain *irqdomain; u8 pdt_props; u8 num_rx_electrodes; u8 num_tx_electrodes; bool enabled; struct mutex enabled_mutex; struct rmi4_attn_data attn_data; DECLARE_KFIFO(attn_fifo, struct rmi4_attn_data, 16); }; int rmi_register_transport_device(struct rmi_transport_dev *xport); void rmi_unregister_transport_device(struct rmi_transport_dev *xport); void rmi_set_attn_data(struct rmi_device *rmi_dev, unsigned long irq_status, void *data, size_t size); int rmi_driver_suspend(struct rmi_device *rmi_dev, bool enable_wake); int rmi_driver_resume(struct rmi_device *rmi_dev, bool clear_wake); #endif tracehook.h 0000644 00000016403 14722070374 0006706 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Tracing hooks * * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved. * * This file defines hook entry points called by core code where * user tracing/debugging support might need to do something. These * entry points are called tracehook_*(). Each hook declared below * has a detailed kerneldoc comment giving the context (locking et * al) from which it is called, and the meaning of its return value. * * Each function here typically has only one call site, so it is ok * to have some nontrivial tracehook_*() inlines. In all cases, the * fast path when no tracing is enabled should be very short. * * The purpose of this file and the tracehook_* layer is to consolidate * the interface that the kernel core and arch code uses to enable any * user debugging or tracing facility (such as ptrace). The interfaces * here are carefully documented so that maintainers of core and arch * code do not need to think about the implementation details of the * tracing facilities. Likewise, maintainers of the tracing code do not * need to understand all the calling core or arch code in detail, just * documented circumstances of each call, such as locking conditions. * * If the calling core code changes so that locking is different, then * it is ok to change the interface documented here. The maintainer of * core code changing should notify the maintainers of the tracing code * that they need to work out the change. * * Some tracehook_*() inlines take arguments that the current tracing * implementations might not necessarily use. These function signatures * are chosen to pass in all the information that is on hand in the * caller and might conceivably be relevant to a tracer, so that the * core code won't have to be updated when tracing adds more features. * If a call site changes so that some of those parameters are no longer * already on hand without extra work, then the tracehook_* interface * can change so there is no make-work burden on the core code. The * maintainer of core code changing should notify the maintainers of the * tracing code that they need to work out the change. */ #ifndef _LINUX_TRACEHOOK_H #define _LINUX_TRACEHOOK_H 1 #include <linux/sched.h> #include <linux/ptrace.h> #include <linux/security.h> #include <linux/task_work.h> #include <linux/memcontrol.h> #include <linux/blk-cgroup.h> struct linux_binprm; /* * ptrace report for syscall entry and exit looks identical. */ static inline int ptrace_report_syscall(struct pt_regs *regs, unsigned long message) { int ptrace = current->ptrace; if (!(ptrace & PT_PTRACED)) return 0; current->ptrace_message = message; ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0)); /* * this isn't the same as continuing with a signal, but it will do * for normal use. strace only continues with a signal if the * stopping signal is not SIGTRAP. -brl */ if (current->exit_code) { send_sig(current->exit_code, current, 1); current->exit_code = 0; } current->ptrace_message = 0; return fatal_signal_pending(current); } /** * tracehook_report_syscall_entry - task is about to attempt a system call * @regs: user register state of current task * * This will be called if %TIF_SYSCALL_TRACE or %TIF_SYSCALL_EMU have been set, * when the current task has just entered the kernel for a system call. * Full user register state is available here. Changing the values * in @regs can affect the system call number and arguments to be tried. * It is safe to block here, preventing the system call from beginning. * * Returns zero normally, or nonzero if the calling arch code should abort * the system call. That must prevent normal entry so no system call is * made. If @task ever returns to user mode after this, its register state * is unspecified, but should be something harmless like an %ENOSYS error * return. It should preserve enough information so that syscall_rollback() * can work (see asm-generic/syscall.h). * * Called without locks, just after entering kernel mode. */ static inline __must_check int tracehook_report_syscall_entry( struct pt_regs *regs) { return ptrace_report_syscall(regs, PTRACE_EVENTMSG_SYSCALL_ENTRY); } /** * tracehook_report_syscall_exit - task has just finished a system call * @regs: user register state of current task * @step: nonzero if simulating single-step or block-step * * This will be called if %TIF_SYSCALL_TRACE has been set, when the * current task has just finished an attempted system call. Full * user register state is available here. It is safe to block here, * preventing signals from being processed. * * If @step is nonzero, this report is also in lieu of the normal * trap that would follow the system call instruction because * user_enable_block_step() or user_enable_single_step() was used. * In this case, %TIF_SYSCALL_TRACE might not be set. * * Called without locks, just before checking for pending signals. */ static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step) { if (step) user_single_step_report(regs); else ptrace_report_syscall(regs, PTRACE_EVENTMSG_SYSCALL_EXIT); } /** * tracehook_signal_handler - signal handler setup is complete * @stepping: nonzero if debugger single-step or block-step in use * * Called by the arch code after a signal handler has been set up. * Register and stack state reflects the user handler about to run. * Signal mask changes have already been made. * * Called without locks, shortly before returning to user mode * (or handling more signals). */ static inline void tracehook_signal_handler(int stepping) { if (stepping) ptrace_notify(SIGTRAP); } /** * set_notify_resume - cause tracehook_notify_resume() to be called * @task: task that will call tracehook_notify_resume() * * Calling this arranges that @task will call tracehook_notify_resume() * before returning to user mode. If it's already running in user mode, * it will enter the kernel and call tracehook_notify_resume() soon. * If it's blocked, it will not be woken. */ static inline void set_notify_resume(struct task_struct *task) { #ifdef TIF_NOTIFY_RESUME if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_RESUME)) kick_process(task); #endif } /** * tracehook_notify_resume - report when about to return to user mode * @regs: user-mode registers of @current task * * This is called when %TIF_NOTIFY_RESUME has been set. Now we are * about to return to user mode, and the user state in @regs can be * inspected or adjusted. The caller in arch code has cleared * %TIF_NOTIFY_RESUME before the call. If the flag gets set again * asynchronously, this will be called again before we return to * user mode. * * Called without locks. */ static inline void tracehook_notify_resume(struct pt_regs *regs) { /* * The caller just cleared TIF_NOTIFY_RESUME. This barrier * pairs with task_work_add()->set_notify_resume() after * hlist_add_head(task->task_works); */ smp_mb__after_atomic(); if (unlikely(current->task_works)) task_work_run(); #ifdef CONFIG_KEYS_REQUEST_CACHE if (unlikely(current->cached_requested_key)) { key_put(current->cached_requested_key); current->cached_requested_key = NULL; } #endif mem_cgroup_handle_over_high(); blkcg_maybe_throttle_current(); } #endif /* <linux/tracehook.h> */ xattr.h 0000644 00000006762 14722070374 0006100 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* File: linux/xattr.h Extended attributes handling. Copyright (C) 2001 by Andreas Gruenbacher <a.gruenbacher@computer.org> Copyright (c) 2001-2002 Silicon Graphics, Inc. All Rights Reserved. Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> */ #ifndef _LINUX_XATTR_H #define _LINUX_XATTR_H #include <linux/slab.h> #include <linux/types.h> #include <linux/spinlock.h> #include <uapi/linux/xattr.h> struct inode; struct dentry; /* * struct xattr_handler: When @name is set, match attributes with exactly that * name. When @prefix is set instead, match attributes with that prefix and * with a non-empty suffix. */ struct xattr_handler { const char *name; const char *prefix; int flags; /* fs private flags */ bool (*list)(struct dentry *dentry); int (*get)(const struct xattr_handler *, struct dentry *dentry, struct inode *inode, const char *name, void *buffer, size_t size); int (*set)(const struct xattr_handler *, struct dentry *dentry, struct inode *inode, const char *name, const void *buffer, size_t size, int flags); }; const char *xattr_full_name(const struct xattr_handler *, const char *); struct xattr { const char *name; void *value; size_t value_len; }; ssize_t __vfs_getxattr(struct dentry *, struct inode *, const char *, void *, size_t); ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t); ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size); int __vfs_setxattr(struct dentry *, struct inode *, const char *, const void *, size_t, int); int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int); int __vfs_setxattr_locked(struct dentry *, const char *, const void *, size_t, int, struct inode **); int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int); int __vfs_removexattr(struct dentry *, const char *); int __vfs_removexattr_noperm(struct dentry *dentry, const char *name); int __vfs_removexattr_locked(struct dentry *, const char *, struct inode **); int vfs_removexattr(struct dentry *, const char *); ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size); ssize_t vfs_getxattr_alloc(struct dentry *dentry, const char *name, char **xattr_value, size_t size, gfp_t flags); static inline const char *xattr_prefix(const struct xattr_handler *handler) { return handler->prefix ?: handler->name; } struct simple_xattrs { struct list_head head; spinlock_t lock; }; struct simple_xattr { struct list_head list; char *name; size_t size; char value[0]; }; /* * initialize the simple_xattrs structure */ static inline void simple_xattrs_init(struct simple_xattrs *xattrs) { INIT_LIST_HEAD(&xattrs->head); spin_lock_init(&xattrs->lock); } /* * free all the xattrs */ static inline void simple_xattrs_free(struct simple_xattrs *xattrs) { struct simple_xattr *xattr, *node; list_for_each_entry_safe(xattr, node, &xattrs->head, list) { kfree(xattr->name); kfree(xattr); } } struct simple_xattr *simple_xattr_alloc(const void *value, size_t size); int simple_xattr_get(struct simple_xattrs *xattrs, const char *name, void *buffer, size_t size); int simple_xattr_set(struct simple_xattrs *xattrs, const char *name, const void *value, size_t size, int flags); ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs, char *buffer, size_t size); void simple_xattr_list_add(struct simple_xattrs *xattrs, struct simple_xattr *new_xattr); #endif /* _LINUX_XATTR_H */ mmiotrace.h 0000644 00000006062 14722070374 0006707 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MMIOTRACE_H #define _LINUX_MMIOTRACE_H #include <linux/types.h> #include <linux/list.h> struct kmmio_probe; struct pt_regs; typedef void (*kmmio_pre_handler_t)(struct kmmio_probe *, struct pt_regs *, unsigned long addr); typedef void (*kmmio_post_handler_t)(struct kmmio_probe *, unsigned long condition, struct pt_regs *); struct kmmio_probe { /* kmmio internal list: */ struct list_head list; /* start location of the probe point: */ unsigned long addr; /* length of the probe region: */ unsigned long len; /* Called before addr is executed: */ kmmio_pre_handler_t pre_handler; /* Called after addr is executed: */ kmmio_post_handler_t post_handler; void *private; }; extern unsigned int kmmio_count; extern int register_kmmio_probe(struct kmmio_probe *p); extern void unregister_kmmio_probe(struct kmmio_probe *p); extern int kmmio_init(void); extern void kmmio_cleanup(void); #ifdef CONFIG_MMIOTRACE /* kmmio is active by some kmmio_probes? */ static inline int is_kmmio_active(void) { return kmmio_count; } /* Called from page fault handler. */ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr); /* Called from ioremap.c */ extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size, void __iomem *addr); extern void mmiotrace_iounmap(volatile void __iomem *addr); /* For anyone to insert markers. Remember trailing newline. */ extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...); #else /* !CONFIG_MMIOTRACE: */ static inline int is_kmmio_active(void) { return 0; } static inline int kmmio_handler(struct pt_regs *regs, unsigned long addr) { return 0; } static inline void mmiotrace_ioremap(resource_size_t offset, unsigned long size, void __iomem *addr) { } static inline void mmiotrace_iounmap(volatile void __iomem *addr) { } static inline __printf(1, 2) int mmiotrace_printk(const char *fmt, ...) { return 0; } #endif /* CONFIG_MMIOTRACE */ enum mm_io_opcode { MMIO_READ = 0x1, /* struct mmiotrace_rw */ MMIO_WRITE = 0x2, /* struct mmiotrace_rw */ MMIO_PROBE = 0x3, /* struct mmiotrace_map */ MMIO_UNPROBE = 0x4, /* struct mmiotrace_map */ MMIO_UNKNOWN_OP = 0x5, /* struct mmiotrace_rw */ }; struct mmiotrace_rw { resource_size_t phys; /* PCI address of register */ unsigned long value; unsigned long pc; /* optional program counter */ int map_id; unsigned char opcode; /* one of MMIO_{READ,WRITE,UNKNOWN_OP} */ unsigned char width; /* size of register access in bytes */ }; struct mmiotrace_map { resource_size_t phys; /* base address in PCI space */ unsigned long virt; /* base virtual address */ unsigned long len; /* mapping size */ int map_id; unsigned char opcode; /* MMIO_PROBE or MMIO_UNPROBE */ }; /* in kernel/trace/trace_mmiotrace.c */ extern void enable_mmiotrace(void); extern void disable_mmiotrace(void); extern void mmio_trace_rw(struct mmiotrace_rw *rw); extern void mmio_trace_mapping(struct mmiotrace_map *map); extern __printf(1, 0) int mmio_trace_printk(const char *fmt, va_list args); #endif /* _LINUX_MMIOTRACE_H */ dma-debug.h 0000644 00000010274 14722070374 0006554 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2008 Advanced Micro Devices, Inc. * * Author: Joerg Roedel <joerg.roedel@amd.com> */ #ifndef __DMA_DEBUG_H #define __DMA_DEBUG_H #include <linux/types.h> struct device; struct scatterlist; struct bus_type; #ifdef CONFIG_DMA_API_DEBUG extern void dma_debug_add_bus(struct bus_type *bus); extern void debug_dma_map_single(struct device *dev, const void *addr, unsigned long len); extern void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, size_t size, int direction, dma_addr_t dma_addr); extern void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, size_t size, int direction); extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, int mapped_ents, int direction); extern void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems, int dir); extern void debug_dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t dma_addr, void *virt); extern void debug_dma_free_coherent(struct device *dev, size_t size, void *virt, dma_addr_t addr); extern void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size, int direction, dma_addr_t dma_addr); extern void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr, size_t size, int direction); extern void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, int direction); extern void debug_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, int direction); extern void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, int direction); extern void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, int direction); extern void debug_dma_dump_mappings(struct device *dev); extern void debug_dma_assert_idle(struct page *page); #else /* CONFIG_DMA_API_DEBUG */ static inline void dma_debug_add_bus(struct bus_type *bus) { } static inline void debug_dma_map_single(struct device *dev, const void *addr, unsigned long len) { } static inline void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, size_t size, int direction, dma_addr_t dma_addr) { } static inline void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { } static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, size_t size, int direction) { } static inline void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, int mapped_ents, int direction) { } static inline void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems, int dir) { } static inline void debug_dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t dma_addr, void *virt) { } static inline void debug_dma_free_coherent(struct device *dev, size_t size, void *virt, dma_addr_t addr) { } static inline void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size, int direction, dma_addr_t dma_addr) { } static inline void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr, size_t size, int direction) { } static inline void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, int direction) { } static inline void debug_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, int direction) { } static inline void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, int direction) { } static inline void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, int direction) { } static inline void debug_dma_dump_mappings(struct device *dev) { } static inline void debug_dma_assert_idle(struct page *page) { } #endif /* CONFIG_DMA_API_DEBUG */ #endif /* __DMA_DEBUG_H */ kvm_irqfd.h 0000644 00000003762 14722070374 0006715 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * * irqfd: Allows an fd to be used to inject an interrupt to the guest * Credit goes to Avi Kivity for the original idea. */ #ifndef __LINUX_KVM_IRQFD_H #define __LINUX_KVM_IRQFD_H #include <linux/kvm_host.h> #include <linux/poll.h> /* * Resampling irqfds are a special variety of irqfds used to emulate * level triggered interrupts. The interrupt is asserted on eventfd * trigger. On acknowledgment through the irq ack notifier, the * interrupt is de-asserted and userspace is notified through the * resamplefd. All resamplers on the same gsi are de-asserted * together, so we don't need to track the state of each individual * user. We can also therefore share the same irq source ID. */ struct kvm_kernel_irqfd_resampler { struct kvm *kvm; /* * List of resampling struct _irqfd objects sharing this gsi. * RCU list modified under kvm->irqfds.resampler_lock */ struct list_head list; struct kvm_irq_ack_notifier notifier; /* * Entry in list of kvm->irqfd.resampler_list. Use for sharing * resamplers among irqfds on the same gsi. * Accessed and modified under kvm->irqfds.resampler_lock */ struct list_head link; }; struct kvm_kernel_irqfd { /* Used for MSI fast-path */ struct kvm *kvm; wait_queue_entry_t wait; /* Update side is protected by irqfds.lock */ struct kvm_kernel_irq_routing_entry irq_entry; seqcount_t irq_entry_sc; /* Used for level IRQ fast-path */ int gsi; struct work_struct inject; /* The resampler used by this irqfd (resampler-only) */ struct kvm_kernel_irqfd_resampler *resampler; /* Eventfd notified on resample (resampler-only) */ struct eventfd_ctx *resamplefd; /* Entry in list of irqfds for a resampler (resampler-only) */ struct list_head resampler_link; /* Used for setup/shutdown */ struct eventfd_ctx *eventfd; struct list_head list; poll_table pt; struct work_struct shutdown; struct irq_bypass_consumer consumer; struct irq_bypass_producer *producer; }; #endif /* __LINUX_KVM_IRQFD_H */ shmem_fs.h 0000644 00000012231 14722070374 0006523 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __SHMEM_FS_H #define __SHMEM_FS_H #include <linux/file.h> #include <linux/swap.h> #include <linux/mempolicy.h> #include <linux/pagemap.h> #include <linux/percpu_counter.h> #include <linux/xattr.h> /* inode in-kernel data */ struct shmem_inode_info { spinlock_t lock; unsigned int seals; /* shmem seals */ unsigned long flags; unsigned long alloced; /* data pages alloced to file */ unsigned long swapped; /* subtotal assigned to swap */ struct list_head shrinklist; /* shrinkable hpage inodes */ struct list_head swaplist; /* chain of maybes on swap */ struct shared_policy policy; /* NUMA memory alloc policy */ struct simple_xattrs xattrs; /* list of xattrs */ atomic_t stop_eviction; /* hold when working on inode */ struct inode vfs_inode; }; struct shmem_sb_info { struct mutex idr_lock; bool idr_nouse; struct idr idr; /* manages inode-number */ unsigned long max_blocks; /* How many blocks are allowed */ struct percpu_counter used_blocks; /* How many are allocated */ int max_inodes; /* How many inodes are allowed */ int free_inodes; /* How many are left for allocation */ spinlock_t stat_lock; /* Serialize shmem_sb_info changes */ umode_t mode; /* Mount mode for root directory */ unsigned char huge; /* Whether to try for hugepages */ kuid_t uid; /* Mount uid for root directory */ kgid_t gid; /* Mount gid for root directory */ struct mempolicy *mpol; /* default memory policy for mappings */ spinlock_t shrinklist_lock; /* Protects shrinklist */ struct list_head shrinklist; /* List of shinkable inodes */ unsigned long shrinklist_len; /* Length of shrinklist */ }; static inline struct shmem_inode_info *SHMEM_I(struct inode *inode) { return container_of(inode, struct shmem_inode_info, vfs_inode); } /* * Functions in mm/shmem.c called directly from elsewhere: */ extern const struct fs_parameter_description shmem_fs_parameters; extern int shmem_init(void); extern int shmem_init_fs_context(struct fs_context *fc); extern struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags); extern struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags); extern struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name, loff_t size, unsigned long flags); extern int shmem_zero_setup(struct vm_area_struct *); extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); extern int shmem_lock(struct file *file, int lock, struct user_struct *user); #ifdef CONFIG_SHMEM extern bool shmem_mapping(struct address_space *mapping); #else static inline bool shmem_mapping(struct address_space *mapping) { return false; } #endif /* CONFIG_SHMEM */ extern void shmem_unlock_mapping(struct address_space *mapping); extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end); extern int shmem_unuse(unsigned int type, bool frontswap, unsigned long *fs_pages_to_unuse); extern unsigned long shmem_swap_usage(struct vm_area_struct *vma); extern unsigned long shmem_partial_swap_usage(struct address_space *mapping, pgoff_t start, pgoff_t end); /* Flag allocation requirements to shmem_getpage */ enum sgp_type { SGP_READ, /* don't exceed i_size, don't allocate page */ SGP_CACHE, /* don't exceed i_size, may allocate page */ SGP_NOHUGE, /* like SGP_CACHE, but no huge pages */ SGP_HUGE, /* like SGP_CACHE, huge pages preferred */ SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */ SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */ }; extern int shmem_getpage(struct inode *inode, pgoff_t index, struct page **pagep, enum sgp_type sgp); static inline struct page *shmem_read_mapping_page( struct address_space *mapping, pgoff_t index) { return shmem_read_mapping_page_gfp(mapping, index, mapping_gfp_mask(mapping)); } static inline bool shmem_file(struct file *file) { if (!IS_ENABLED(CONFIG_SHMEM)) return false; if (!file || !file->f_mapping) return false; return shmem_mapping(file->f_mapping); } extern bool shmem_charge(struct inode *inode, long pages); extern void shmem_uncharge(struct inode *inode, long pages); #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE extern bool shmem_huge_enabled(struct vm_area_struct *vma); #else static inline bool shmem_huge_enabled(struct vm_area_struct *vma) { return false; } #endif #ifdef CONFIG_SHMEM extern int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, struct vm_area_struct *dst_vma, unsigned long dst_addr, unsigned long src_addr, struct page **pagep); extern int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, struct vm_area_struct *dst_vma, unsigned long dst_addr); #else #define shmem_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \ src_addr, pagep) ({ BUG(); 0; }) #define shmem_mfill_zeropage_pte(dst_mm, dst_pmd, dst_vma, \ dst_addr) ({ BUG(); 0; }) #endif #endif hid-roccat.h 0000644 00000000744 14722070374 0006745 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef __HID_ROCCAT_H #define __HID_ROCCAT_H /* * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net> */ /* */ #include <linux/hid.h> #include <linux/types.h> #define ROCCATIOCGREPSIZE _IOR('H', 0xf1, int) #ifdef __KERNEL__ int roccat_connect(struct class *klass, struct hid_device *hid, int report_size); void roccat_disconnect(int minor); int roccat_report_event(int minor, u8 const *data); #endif #endif leds-lp3952.h 0000644 00000004552 14722070374 0006614 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * LED driver for TI lp3952 controller * * Copyright (C) 2016, DAQRI, LLC. * Author: Tony Makkiel <tony.makkiel@daqri.com> */ #ifndef LEDS_LP3952_H_ #define LEDS_LP3952_H_ #define LP3952_NAME "lp3952" #define LP3952_CMD_REG_COUNT 8 #define LP3952_BRIGHT_MAX 4 #define LP3952_LABEL_MAX_LEN 15 #define LP3952_REG_LED_CTRL 0x00 #define LP3952_REG_R1_BLNK_TIME_CTRL 0x01 #define LP3952_REG_R1_BLNK_CYCLE_CTRL 0x02 #define LP3952_REG_G1_BLNK_TIME_CTRL 0x03 #define LP3952_REG_G1_BLNK_CYCLE_CTRL 0x04 #define LP3952_REG_B1_BLNK_TIME_CTRL 0x05 #define LP3952_REG_B1_BLNK_CYCLE_CTRL 0x06 #define LP3952_REG_ENABLES 0x0B #define LP3952_REG_PAT_GEN_CTRL 0x11 #define LP3952_REG_RGB1_MAX_I_CTRL 0x12 #define LP3952_REG_RGB2_MAX_I_CTRL 0x13 #define LP3952_REG_CMD_0 0x50 #define LP3952_REG_RESET 0x60 #define REG_MAX LP3952_REG_RESET #define LP3952_PATRN_LOOP BIT(1) #define LP3952_PATRN_GEN_EN BIT(2) #define LP3952_INT_B00ST_LDR BIT(2) #define LP3952_ACTIVE_MODE BIT(6) #define LP3952_LED_MASK_ALL 0x3f /* Transition Time in ms */ enum lp3952_tt { TT0, TT55, TT110, TT221, TT422, TT885, TT1770, TT3539 }; /* Command Execution Time in ms */ enum lp3952_cet { CET197, CET393, CET590, CET786, CET1180, CET1376, CET1573, CET1769, CET1966, CET2163, CET2359, CET2556, CET2763, CET2949, CET3146 }; /* Max Current in % */ enum lp3952_colour_I_log_0 { I0, I7, I14, I21, I32, I46, I71, I100 }; enum lp3952_leds { LP3952_BLUE_2, LP3952_GREEN_2, LP3952_RED_2, LP3952_BLUE_1, LP3952_GREEN_1, LP3952_RED_1, LP3952_LED_ALL }; struct lp3952_ctrl_hdl { struct led_classdev cdev; char name[LP3952_LABEL_MAX_LEN]; enum lp3952_leds channel; void *priv; }; struct ptrn_gen_cmd { union { struct { u16 tt:3; u16 b:3; u16 cet:4; u16 g:3; u16 r:3; }; struct { u8 lsb; u8 msb; } bytes; }; } __packed; struct lp3952_led_array { struct regmap *regmap; struct i2c_client *client; struct gpio_desc *enable_gpio; struct lp3952_ctrl_hdl leds[LP3952_LED_ALL]; }; #endif /* LEDS_LP3952_H_ */ bitops.h 0000644 00000015772 14722070374 0006237 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_BITOPS_H #define _LINUX_BITOPS_H #include <asm/types.h> #include <linux/bits.h> /* Set bits in the first 'n' bytes when loaded from memory */ #ifdef __LITTLE_ENDIAN # define aligned_byte_mask(n) ((1UL << 8*(n))-1) #else # define aligned_byte_mask(n) (~0xffUL << (BITS_PER_LONG - 8 - 8*(n))) #endif #define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(long)) #define BITS_TO_BYTES(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(char)) extern unsigned int __sw_hweight8(unsigned int w); extern unsigned int __sw_hweight16(unsigned int w); extern unsigned int __sw_hweight32(unsigned int w); extern unsigned long __sw_hweight64(__u64 w); /* * Include this here because some architectures need generic_ffs/fls in * scope */ #include <asm/bitops.h> #define for_each_set_bit(bit, addr, size) \ for ((bit) = find_first_bit((addr), (size)); \ (bit) < (size); \ (bit) = find_next_bit((addr), (size), (bit) + 1)) /* same as for_each_set_bit() but use bit as value to start with */ #define for_each_set_bit_from(bit, addr, size) \ for ((bit) = find_next_bit((addr), (size), (bit)); \ (bit) < (size); \ (bit) = find_next_bit((addr), (size), (bit) + 1)) #define for_each_clear_bit(bit, addr, size) \ for ((bit) = find_first_zero_bit((addr), (size)); \ (bit) < (size); \ (bit) = find_next_zero_bit((addr), (size), (bit) + 1)) /* same as for_each_clear_bit() but use bit as value to start with */ #define for_each_clear_bit_from(bit, addr, size) \ for ((bit) = find_next_zero_bit((addr), (size), (bit)); \ (bit) < (size); \ (bit) = find_next_zero_bit((addr), (size), (bit) + 1)) static inline int get_bitmask_order(unsigned int count) { int order; order = fls(count); return order; /* We could be slightly more clever with -1 here... */ } static __always_inline unsigned long hweight_long(unsigned long w) { return sizeof(w) == 4 ? hweight32(w) : hweight64((__u64)w); } /** * rol64 - rotate a 64-bit value left * @word: value to rotate * @shift: bits to roll */ static inline __u64 rol64(__u64 word, unsigned int shift) { return (word << (shift & 63)) | (word >> ((-shift) & 63)); } /** * ror64 - rotate a 64-bit value right * @word: value to rotate * @shift: bits to roll */ static inline __u64 ror64(__u64 word, unsigned int shift) { return (word >> (shift & 63)) | (word << ((-shift) & 63)); } /** * rol32 - rotate a 32-bit value left * @word: value to rotate * @shift: bits to roll */ static inline __u32 rol32(__u32 word, unsigned int shift) { return (word << (shift & 31)) | (word >> ((-shift) & 31)); } /** * ror32 - rotate a 32-bit value right * @word: value to rotate * @shift: bits to roll */ static inline __u32 ror32(__u32 word, unsigned int shift) { return (word >> (shift & 31)) | (word << ((-shift) & 31)); } /** * rol16 - rotate a 16-bit value left * @word: value to rotate * @shift: bits to roll */ static inline __u16 rol16(__u16 word, unsigned int shift) { return (word << (shift & 15)) | (word >> ((-shift) & 15)); } /** * ror16 - rotate a 16-bit value right * @word: value to rotate * @shift: bits to roll */ static inline __u16 ror16(__u16 word, unsigned int shift) { return (word >> (shift & 15)) | (word << ((-shift) & 15)); } /** * rol8 - rotate an 8-bit value left * @word: value to rotate * @shift: bits to roll */ static inline __u8 rol8(__u8 word, unsigned int shift) { return (word << (shift & 7)) | (word >> ((-shift) & 7)); } /** * ror8 - rotate an 8-bit value right * @word: value to rotate * @shift: bits to roll */ static inline __u8 ror8(__u8 word, unsigned int shift) { return (word >> (shift & 7)) | (word << ((-shift) & 7)); } /** * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit * @value: value to sign extend * @index: 0 based bit index (0<=index<32) to sign bit * * This is safe to use for 16- and 8-bit types as well. */ static inline __s32 sign_extend32(__u32 value, int index) { __u8 shift = 31 - index; return (__s32)(value << shift) >> shift; } /** * sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit * @value: value to sign extend * @index: 0 based bit index (0<=index<64) to sign bit */ static inline __s64 sign_extend64(__u64 value, int index) { __u8 shift = 63 - index; return (__s64)(value << shift) >> shift; } static inline unsigned fls_long(unsigned long l) { if (sizeof(l) == 4) return fls(l); return fls64(l); } static inline int get_count_order(unsigned int count) { int order; order = fls(count) - 1; if (count & (count - 1)) order++; return order; } /** * get_count_order_long - get order after rounding @l up to power of 2 * @l: parameter * * it is same as get_count_order() but with long type parameter */ static inline int get_count_order_long(unsigned long l) { if (l == 0UL) return -1; else if (l & (l - 1UL)) return (int)fls_long(l); else return (int)fls_long(l) - 1; } /** * __ffs64 - find first set bit in a 64 bit word * @word: The 64 bit word * * On 64 bit arches this is a synomyn for __ffs * The result is not defined if no bits are set, so check that @word * is non-zero before calling this. */ static inline unsigned long __ffs64(u64 word) { #if BITS_PER_LONG == 32 if (((u32)word) == 0UL) return __ffs((u32)(word >> 32)) + 32; #elif BITS_PER_LONG != 64 #error BITS_PER_LONG not 32 or 64 #endif return __ffs((unsigned long)word); } /** * assign_bit - Assign value to a bit in memory * @nr: the bit to set * @addr: the address to start counting from * @value: the value to assign */ static __always_inline void assign_bit(long nr, volatile unsigned long *addr, bool value) { if (value) set_bit(nr, addr); else clear_bit(nr, addr); } static __always_inline void __assign_bit(long nr, volatile unsigned long *addr, bool value) { if (value) __set_bit(nr, addr); else __clear_bit(nr, addr); } #ifdef __KERNEL__ #ifndef set_mask_bits #define set_mask_bits(ptr, mask, bits) \ ({ \ const typeof(*(ptr)) mask__ = (mask), bits__ = (bits); \ typeof(*(ptr)) old__, new__; \ \ do { \ old__ = READ_ONCE(*(ptr)); \ new__ = (old__ & ~mask__) | bits__; \ } while (cmpxchg(ptr, old__, new__) != old__); \ \ old__; \ }) #endif #ifndef bit_clear_unless #define bit_clear_unless(ptr, clear, test) \ ({ \ const typeof(*(ptr)) clear__ = (clear), test__ = (test);\ typeof(*(ptr)) old__, new__; \ \ do { \ old__ = READ_ONCE(*(ptr)); \ new__ = old__ & ~clear__; \ } while (!(old__ & test__) && \ cmpxchg(ptr, old__, new__) != old__); \ \ !(old__ & test__); \ }) #endif #ifndef find_last_bit /** * find_last_bit - find the last set bit in a memory region * @addr: The address to start the search at * @size: The number of bits to search * * Returns the bit number of the last set bit, or size. */ extern unsigned long find_last_bit(const unsigned long *addr, unsigned long size); #endif #endif /* __KERNEL__ */ #endif kobj_map.h 0000644 00000001041 14722070374 0006501 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * kobj_map.h */ #ifndef _KOBJ_MAP_H_ #define _KOBJ_MAP_H_ #include <linux/mutex.h> typedef struct kobject *kobj_probe_t(dev_t, int *, void *); struct kobj_map; int kobj_map(struct kobj_map *, dev_t, unsigned long, struct module *, kobj_probe_t *, int (*)(dev_t, void *), void *); void kobj_unmap(struct kobj_map *, dev_t, unsigned long); struct kobject *kobj_lookup(struct kobj_map *, dev_t, int *); struct kobj_map *kobj_map_init(kobj_probe_t *, struct mutex *); #endif /* _KOBJ_MAP_H_ */ u64_stats_sync.h 0000644 00000012262 14722070374 0007616 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_U64_STATS_SYNC_H #define _LINUX_U64_STATS_SYNC_H /* * To properly implement 64bits network statistics on 32bit and 64bit hosts, * we provide a synchronization point, that is a noop on 64bit or UP kernels. * * Key points : * 1) Use a seqcount on SMP 32bits, with low overhead. * 2) Whole thing is a noop on 64bit arches or UP kernels. * 3) Write side must ensure mutual exclusion or one seqcount update could * be lost, thus blocking readers forever. * If this synchronization point is not a mutex, but a spinlock or * spinlock_bh() or disable_bh() : * 3.1) Write side should not sleep. * 3.2) Write side should not allow preemption. * 3.3) If applicable, interrupts should be disabled. * * 4) If reader fetches several counters, there is no guarantee the whole values * are consistent (remember point 1) : this is a noop on 64bit arches anyway) * * 5) readers are allowed to sleep or be preempted/interrupted : They perform * pure reads. But if they have to fetch many values, it's better to not allow * preemptions/interruptions to avoid many retries. * * 6) If counter might be written by an interrupt, readers should block interrupts. * (On UP, there is no seqcount_t protection, a reader allowing interrupts could * read partial values) * * 7) For irq and softirq uses, readers can use u64_stats_fetch_begin_irq() and * u64_stats_fetch_retry_irq() helpers * * Usage : * * Stats producer (writer) should use following template granted it already got * an exclusive access to counters (a lock is already taken, or per cpu * data is used [in a non preemptable context]) * * spin_lock_bh(...) or other synchronization to get exclusive access * ... * u64_stats_update_begin(&stats->syncp); * stats->bytes64 += len; // non atomic operation * stats->packets64++; // non atomic operation * u64_stats_update_end(&stats->syncp); * * While a consumer (reader) should use following template to get consistent * snapshot for each variable (but no guarantee on several ones) * * u64 tbytes, tpackets; * unsigned int start; * * do { * start = u64_stats_fetch_begin(&stats->syncp); * tbytes = stats->bytes64; // non atomic operation * tpackets = stats->packets64; // non atomic operation * } while (u64_stats_fetch_retry(&stats->syncp, start)); * * * Example of use in drivers/net/loopback.c, using per_cpu containers, * in BH disabled context. */ #include <linux/seqlock.h> struct u64_stats_sync { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) seqcount_t seq; #endif }; #if BITS_PER_LONG == 32 && defined(CONFIG_SMP) #define u64_stats_init(syncp) \ do { \ struct u64_stats_sync *__s = (syncp); \ seqcount_init(&__s->seq); \ } while (0) #else static inline void u64_stats_init(struct u64_stats_sync *syncp) { } #endif static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) write_seqcount_begin(&syncp->seq); #endif } static inline void u64_stats_update_end(struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) write_seqcount_end(&syncp->seq); #endif } static inline unsigned long u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp) { unsigned long flags = 0; #if BITS_PER_LONG==32 && defined(CONFIG_SMP) local_irq_save(flags); write_seqcount_begin(&syncp->seq); #endif return flags; } static inline void u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp, unsigned long flags) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) write_seqcount_end(&syncp->seq); local_irq_restore(flags); #endif } static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) return read_seqcount_begin(&syncp->seq); #else return 0; #endif } static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && !defined(CONFIG_SMP) preempt_disable(); #endif return __u64_stats_fetch_begin(syncp); } static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp, unsigned int start) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) return read_seqcount_retry(&syncp->seq, start); #else return false; #endif } static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp, unsigned int start) { #if BITS_PER_LONG==32 && !defined(CONFIG_SMP) preempt_enable(); #endif return __u64_stats_fetch_retry(syncp, start); } /* * In case irq handlers can update u64 counters, readers can use following helpers * - SMP 32bit arches use seqcount protection, irq safe. * - UP 32bit must disable irqs. * - 64bit have no problem atomically reading u64 values, irq safe. */ static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && !defined(CONFIG_SMP) local_irq_disable(); #endif return __u64_stats_fetch_begin(syncp); } static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp, unsigned int start) { #if BITS_PER_LONG==32 && !defined(CONFIG_SMP) local_irq_enable(); #endif return __u64_stats_fetch_retry(syncp, start); } #endif /* _LINUX_U64_STATS_SYNC_H */ fscache-cache.h 0000644 00000045147 14722070374 0007373 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* General filesystem caching backing cache interface * * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * NOTE!!! See: * * Documentation/filesystems/caching/backend-api.txt * * for a description of the cache backend interface declared here. */ #ifndef _LINUX_FSCACHE_CACHE_H #define _LINUX_FSCACHE_CACHE_H #include <linux/fscache.h> #include <linux/sched.h> #include <linux/workqueue.h> #define NR_MAXCACHES BITS_PER_LONG struct fscache_cache; struct fscache_cache_ops; struct fscache_object; struct fscache_operation; enum fscache_obj_ref_trace { fscache_obj_get_add_to_deps, fscache_obj_get_queue, fscache_obj_put_alloc_fail, fscache_obj_put_attach_fail, fscache_obj_put_drop_obj, fscache_obj_put_enq_dep, fscache_obj_put_queue, fscache_obj_put_work, fscache_obj_ref__nr_traces }; /* * cache tag definition */ struct fscache_cache_tag { struct list_head link; struct fscache_cache *cache; /* cache referred to by this tag */ unsigned long flags; #define FSCACHE_TAG_RESERVED 0 /* T if tag is reserved for a cache */ atomic_t usage; char name[0]; /* tag name */ }; /* * cache definition */ struct fscache_cache { const struct fscache_cache_ops *ops; struct fscache_cache_tag *tag; /* tag representing this cache */ struct kobject *kobj; /* system representation of this cache */ struct list_head link; /* link in list of caches */ size_t max_index_size; /* maximum size of index data */ char identifier[36]; /* cache label */ /* node management */ struct work_struct op_gc; /* operation garbage collector */ struct list_head object_list; /* list of data/index objects */ struct list_head op_gc_list; /* list of ops to be deleted */ spinlock_t object_list_lock; spinlock_t op_gc_list_lock; atomic_t object_count; /* no. of live objects in this cache */ struct fscache_object *fsdef; /* object for the fsdef index */ unsigned long flags; #define FSCACHE_IOERROR 0 /* cache stopped on I/O error */ #define FSCACHE_CACHE_WITHDRAWN 1 /* cache has been withdrawn */ }; extern wait_queue_head_t fscache_cache_cleared_wq; /* * operation to be applied to a cache object * - retrieval initiation operations are done in the context of the process * that issued them, and not in an async thread pool */ typedef void (*fscache_operation_release_t)(struct fscache_operation *op); typedef void (*fscache_operation_processor_t)(struct fscache_operation *op); typedef void (*fscache_operation_cancel_t)(struct fscache_operation *op); enum fscache_operation_state { FSCACHE_OP_ST_BLANK, /* Op is not yet submitted */ FSCACHE_OP_ST_INITIALISED, /* Op is initialised */ FSCACHE_OP_ST_PENDING, /* Op is blocked from running */ FSCACHE_OP_ST_IN_PROGRESS, /* Op is in progress */ FSCACHE_OP_ST_COMPLETE, /* Op is complete */ FSCACHE_OP_ST_CANCELLED, /* Op has been cancelled */ FSCACHE_OP_ST_DEAD /* Op is now dead */ }; struct fscache_operation { struct work_struct work; /* record for async ops */ struct list_head pend_link; /* link in object->pending_ops */ struct fscache_object *object; /* object to be operated upon */ unsigned long flags; #define FSCACHE_OP_TYPE 0x000f /* operation type */ #define FSCACHE_OP_ASYNC 0x0001 /* - async op, processor may sleep for disk */ #define FSCACHE_OP_MYTHREAD 0x0002 /* - processing is done be issuing thread, not pool */ #define FSCACHE_OP_WAITING 4 /* cleared when op is woken */ #define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */ #define FSCACHE_OP_DEC_READ_CNT 6 /* decrement object->n_reads on destruction */ #define FSCACHE_OP_UNUSE_COOKIE 7 /* call fscache_unuse_cookie() on completion */ #define FSCACHE_OP_KEEP_FLAGS 0x00f0 /* flags to keep when repurposing an op */ enum fscache_operation_state state; atomic_t usage; unsigned debug_id; /* debugging ID */ /* operation processor callback * - can be NULL if FSCACHE_OP_WAITING is going to be used to perform * the op in a non-pool thread */ fscache_operation_processor_t processor; /* Operation cancellation cleanup (optional) */ fscache_operation_cancel_t cancel; /* operation releaser */ fscache_operation_release_t release; }; extern atomic_t fscache_op_debug_id; extern void fscache_op_work_func(struct work_struct *work); extern void fscache_enqueue_operation(struct fscache_operation *); extern void fscache_op_complete(struct fscache_operation *, bool); extern void fscache_put_operation(struct fscache_operation *); extern void fscache_operation_init(struct fscache_cookie *, struct fscache_operation *, fscache_operation_processor_t, fscache_operation_cancel_t, fscache_operation_release_t); /* * data read operation */ struct fscache_retrieval { struct fscache_operation op; struct fscache_cookie *cookie; /* The netfs cookie */ struct address_space *mapping; /* netfs pages */ fscache_rw_complete_t end_io_func; /* function to call on I/O completion */ void *context; /* netfs read context (pinned) */ struct list_head to_do; /* list of things to be done by the backend */ unsigned long start_time; /* time at which retrieval started */ atomic_t n_pages; /* number of pages to be retrieved */ }; typedef int (*fscache_page_retrieval_func_t)(struct fscache_retrieval *op, struct page *page, gfp_t gfp); typedef int (*fscache_pages_retrieval_func_t)(struct fscache_retrieval *op, struct list_head *pages, unsigned *nr_pages, gfp_t gfp); /** * fscache_get_retrieval - Get an extra reference on a retrieval operation * @op: The retrieval operation to get a reference on * * Get an extra reference on a retrieval operation. */ static inline struct fscache_retrieval *fscache_get_retrieval(struct fscache_retrieval *op) { atomic_inc(&op->op.usage); return op; } /** * fscache_enqueue_retrieval - Enqueue a retrieval operation for processing * @op: The retrieval operation affected * * Enqueue a retrieval operation for processing by the FS-Cache thread pool. */ static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op) { fscache_enqueue_operation(&op->op); } /** * fscache_retrieval_complete - Record (partial) completion of a retrieval * @op: The retrieval operation affected * @n_pages: The number of pages to account for */ static inline void fscache_retrieval_complete(struct fscache_retrieval *op, int n_pages) { if (atomic_sub_return_relaxed(n_pages, &op->n_pages) <= 0) fscache_op_complete(&op->op, false); } /** * fscache_put_retrieval - Drop a reference to a retrieval operation * @op: The retrieval operation affected * * Drop a reference to a retrieval operation. */ static inline void fscache_put_retrieval(struct fscache_retrieval *op) { fscache_put_operation(&op->op); } /* * cached page storage work item * - used to do three things: * - batch writes to the cache * - do cache writes asynchronously * - defer writes until cache object lookup completion */ struct fscache_storage { struct fscache_operation op; pgoff_t store_limit; /* don't write more than this */ }; /* * cache operations */ struct fscache_cache_ops { /* name of cache provider */ const char *name; /* allocate an object record for a cookie */ struct fscache_object *(*alloc_object)(struct fscache_cache *cache, struct fscache_cookie *cookie); /* look up the object for a cookie * - return -ETIMEDOUT to be requeued */ int (*lookup_object)(struct fscache_object *object); /* finished looking up */ void (*lookup_complete)(struct fscache_object *object); /* increment the usage count on this object (may fail if unmounting) */ struct fscache_object *(*grab_object)(struct fscache_object *object, enum fscache_obj_ref_trace why); /* pin an object in the cache */ int (*pin_object)(struct fscache_object *object); /* unpin an object in the cache */ void (*unpin_object)(struct fscache_object *object); /* check the consistency between the backing cache and the FS-Cache * cookie */ int (*check_consistency)(struct fscache_operation *op); /* store the updated auxiliary data on an object */ void (*update_object)(struct fscache_object *object); /* Invalidate an object */ void (*invalidate_object)(struct fscache_operation *op); /* discard the resources pinned by an object and effect retirement if * necessary */ void (*drop_object)(struct fscache_object *object); /* dispose of a reference to an object */ void (*put_object)(struct fscache_object *object, enum fscache_obj_ref_trace why); /* sync a cache */ void (*sync_cache)(struct fscache_cache *cache); /* notification that the attributes of a non-index object (such as * i_size) have changed */ int (*attr_changed)(struct fscache_object *object); /* reserve space for an object's data and associated metadata */ int (*reserve_space)(struct fscache_object *object, loff_t i_size); /* request a backing block for a page be read or allocated in the * cache */ fscache_page_retrieval_func_t read_or_alloc_page; /* request backing blocks for a list of pages be read or allocated in * the cache */ fscache_pages_retrieval_func_t read_or_alloc_pages; /* request a backing block for a page be allocated in the cache so that * it can be written directly */ fscache_page_retrieval_func_t allocate_page; /* request backing blocks for pages be allocated in the cache so that * they can be written directly */ fscache_pages_retrieval_func_t allocate_pages; /* write a page to its backing block in the cache */ int (*write_page)(struct fscache_storage *op, struct page *page); /* detach backing block from a page (optional) * - must release the cookie lock before returning * - may sleep */ void (*uncache_page)(struct fscache_object *object, struct page *page); /* dissociate a cache from all the pages it was backing */ void (*dissociate_pages)(struct fscache_cache *cache); }; extern struct fscache_cookie fscache_fsdef_index; /* * Event list for fscache_object::{event_mask,events} */ enum { FSCACHE_OBJECT_EV_NEW_CHILD, /* T if object has a new child */ FSCACHE_OBJECT_EV_PARENT_READY, /* T if object's parent is ready */ FSCACHE_OBJECT_EV_UPDATE, /* T if object should be updated */ FSCACHE_OBJECT_EV_INVALIDATE, /* T if cache requested object invalidation */ FSCACHE_OBJECT_EV_CLEARED, /* T if accessors all gone */ FSCACHE_OBJECT_EV_ERROR, /* T if fatal error occurred during processing */ FSCACHE_OBJECT_EV_KILL, /* T if netfs relinquished or cache withdrew object */ NR_FSCACHE_OBJECT_EVENTS }; #define FSCACHE_OBJECT_EVENTS_MASK ((1UL << NR_FSCACHE_OBJECT_EVENTS) - 1) /* * States for object state machine. */ struct fscache_transition { unsigned long events; const struct fscache_state *transit_to; }; struct fscache_state { char name[24]; char short_name[8]; const struct fscache_state *(*work)(struct fscache_object *object, int event); const struct fscache_transition transitions[]; }; /* * on-disk cache file or index handle */ struct fscache_object { const struct fscache_state *state; /* Object state machine state */ const struct fscache_transition *oob_table; /* OOB state transition table */ int debug_id; /* debugging ID */ int n_children; /* number of child objects */ int n_ops; /* number of extant ops on object */ int n_obj_ops; /* number of object ops outstanding on object */ int n_in_progress; /* number of ops in progress */ int n_exclusive; /* number of exclusive ops queued or in progress */ atomic_t n_reads; /* number of read ops in progress */ spinlock_t lock; /* state and operations lock */ unsigned long lookup_jif; /* time at which lookup started */ unsigned long oob_event_mask; /* OOB events this object is interested in */ unsigned long event_mask; /* events this object is interested in */ unsigned long events; /* events to be processed by this object * (order is important - using fls) */ unsigned long flags; #define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */ #define FSCACHE_OBJECT_PENDING_WRITE 1 /* T if object has pending write */ #define FSCACHE_OBJECT_WAITING 2 /* T if object is waiting on its parent */ #define FSCACHE_OBJECT_IS_LIVE 3 /* T if object is not withdrawn or relinquished */ #define FSCACHE_OBJECT_IS_LOOKED_UP 4 /* T if object has been looked up */ #define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */ #define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */ #define FSCACHE_OBJECT_KILLED_BY_CACHE 7 /* T if object was killed by the cache */ #define FSCACHE_OBJECT_RUN_AFTER_DEAD 8 /* T if object has been dispatched after death */ struct list_head cache_link; /* link in cache->object_list */ struct hlist_node cookie_link; /* link in cookie->backing_objects */ struct fscache_cache *cache; /* cache that supplied this object */ struct fscache_cookie *cookie; /* netfs's file/index object */ struct fscache_object *parent; /* parent object */ struct work_struct work; /* attention scheduling record */ struct list_head dependents; /* FIFO of dependent objects */ struct list_head dep_link; /* link in parent's dependents list */ struct list_head pending_ops; /* unstarted operations on this object */ #ifdef CONFIG_FSCACHE_OBJECT_LIST struct rb_node objlist_link; /* link in global object list */ #endif pgoff_t store_limit; /* current storage limit */ loff_t store_limit_l; /* current storage limit */ }; extern void fscache_object_init(struct fscache_object *, struct fscache_cookie *, struct fscache_cache *); extern void fscache_object_destroy(struct fscache_object *); extern void fscache_object_lookup_negative(struct fscache_object *object); extern void fscache_obtained_object(struct fscache_object *object); static inline bool fscache_object_is_live(struct fscache_object *object) { return test_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags); } static inline bool fscache_object_is_dying(struct fscache_object *object) { return !fscache_object_is_live(object); } static inline bool fscache_object_is_available(struct fscache_object *object) { return test_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags); } static inline bool fscache_cache_is_broken(struct fscache_object *object) { return test_bit(FSCACHE_IOERROR, &object->cache->flags); } static inline bool fscache_object_is_active(struct fscache_object *object) { return fscache_object_is_available(object) && fscache_object_is_live(object) && !fscache_cache_is_broken(object); } /** * fscache_object_destroyed - Note destruction of an object in a cache * @cache: The cache from which the object came * * Note the destruction and deallocation of an object record in a cache. */ static inline void fscache_object_destroyed(struct fscache_cache *cache) { if (atomic_dec_and_test(&cache->object_count)) wake_up_all(&fscache_cache_cleared_wq); } /** * fscache_object_lookup_error - Note an object encountered an error * @object: The object on which the error was encountered * * Note that an object encountered a fatal error (usually an I/O error) and * that it should be withdrawn as soon as possible. */ static inline void fscache_object_lookup_error(struct fscache_object *object) { set_bit(FSCACHE_OBJECT_EV_ERROR, &object->events); } /** * fscache_set_store_limit - Set the maximum size to be stored in an object * @object: The object to set the maximum on * @i_size: The limit to set in bytes * * Set the maximum size an object is permitted to reach, implying the highest * byte that may be written. Intended to be called by the attr_changed() op. * * See Documentation/filesystems/caching/backend-api.txt for a complete * description. */ static inline void fscache_set_store_limit(struct fscache_object *object, loff_t i_size) { object->store_limit_l = i_size; object->store_limit = i_size >> PAGE_SHIFT; if (i_size & ~PAGE_MASK) object->store_limit++; } /** * fscache_end_io - End a retrieval operation on a page * @op: The FS-Cache operation covering the retrieval * @page: The page that was to be fetched * @error: The error code (0 if successful) * * Note the end of an operation to retrieve a page, as covered by a particular * operation record. */ static inline void fscache_end_io(struct fscache_retrieval *op, struct page *page, int error) { op->end_io_func(page, op->context, error); } static inline void __fscache_use_cookie(struct fscache_cookie *cookie) { atomic_inc(&cookie->n_active); } /** * fscache_use_cookie - Request usage of cookie attached to an object * @object: Object description * * Request usage of the cookie attached to an object. NULL is returned if the * relinquishment had reduced the cookie usage count to 0. */ static inline bool fscache_use_cookie(struct fscache_object *object) { struct fscache_cookie *cookie = object->cookie; return atomic_inc_not_zero(&cookie->n_active) != 0; } static inline bool __fscache_unuse_cookie(struct fscache_cookie *cookie) { return atomic_dec_and_test(&cookie->n_active); } static inline void __fscache_wake_unused_cookie(struct fscache_cookie *cookie) { wake_up_var(&cookie->n_active); } /** * fscache_unuse_cookie - Cease usage of cookie attached to an object * @object: Object description * * Cease usage of the cookie attached to an object. When the users count * reaches zero then the cookie relinquishment will be permitted to proceed. */ static inline void fscache_unuse_cookie(struct fscache_object *object) { struct fscache_cookie *cookie = object->cookie; if (__fscache_unuse_cookie(cookie)) __fscache_wake_unused_cookie(cookie); } /* * out-of-line cache backend functions */ extern __printf(3, 4) void fscache_init_cache(struct fscache_cache *cache, const struct fscache_cache_ops *ops, const char *idfmt, ...); extern int fscache_add_cache(struct fscache_cache *cache, struct fscache_object *fsdef, const char *tagname); extern void fscache_withdraw_cache(struct fscache_cache *cache); extern void fscache_io_error(struct fscache_cache *cache); extern void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page); extern void fscache_mark_pages_cached(struct fscache_retrieval *op, struct pagevec *pagevec); extern bool fscache_object_sleep_till_congested(signed long *timeoutp); extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object, const void *data, uint16_t datalen, loff_t object_size); extern void fscache_object_retrying_stale(struct fscache_object *object); enum fscache_why_object_killed { FSCACHE_OBJECT_IS_STALE, FSCACHE_OBJECT_NO_SPACE, FSCACHE_OBJECT_WAS_RETIRED, FSCACHE_OBJECT_WAS_CULLED, }; extern void fscache_object_mark_killed(struct fscache_object *object, enum fscache_why_object_killed why); #endif /* _LINUX_FSCACHE_CACHE_H */ user-return-notifier.h 0000644 00000002265 14722070374 0011040 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_USER_RETURN_NOTIFIER_H #define _LINUX_USER_RETURN_NOTIFIER_H #ifdef CONFIG_USER_RETURN_NOTIFIER #include <linux/list.h> #include <linux/sched.h> struct user_return_notifier { void (*on_user_return)(struct user_return_notifier *urn); struct hlist_node link; }; void user_return_notifier_register(struct user_return_notifier *urn); void user_return_notifier_unregister(struct user_return_notifier *urn); static inline void propagate_user_return_notify(struct task_struct *prev, struct task_struct *next) { if (test_tsk_thread_flag(prev, TIF_USER_RETURN_NOTIFY)) { clear_tsk_thread_flag(prev, TIF_USER_RETURN_NOTIFY); set_tsk_thread_flag(next, TIF_USER_RETURN_NOTIFY); } } void fire_user_return_notifiers(void); static inline void clear_user_return_notifier(struct task_struct *p) { clear_tsk_thread_flag(p, TIF_USER_RETURN_NOTIFY); } #else struct user_return_notifier {}; static inline void propagate_user_return_notify(struct task_struct *prev, struct task_struct *next) { } static inline void fire_user_return_notifiers(void) {} static inline void clear_user_return_notifier(struct task_struct *p) {} #endif #endif lockd/share.h 0000644 00000001556 14722070374 0007130 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/lockd/share.h * * DOS share management for lockd. * * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> */ #ifndef LINUX_LOCKD_SHARE_H #define LINUX_LOCKD_SHARE_H /* * DOS share for a specific file */ struct nlm_share { struct nlm_share * s_next; /* linked list */ struct nlm_host * s_host; /* client host */ struct nlm_file * s_file; /* shared file */ struct xdr_netobj s_owner; /* owner handle */ u32 s_access; /* access mode */ u32 s_mode; /* deny mode */ }; __be32 nlmsvc_share_file(struct nlm_host *, struct nlm_file *, struct nlm_args *); __be32 nlmsvc_unshare_file(struct nlm_host *, struct nlm_file *, struct nlm_args *); void nlmsvc_traverse_shares(struct nlm_host *, struct nlm_file *, nlm_host_match_fn_t); #endif /* LINUX_LOCKD_SHARE_H */ lockd/xdr4.h 0000644 00000003232 14722070374 0006700 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/lockd/xdr4.h * * XDR types for the NLM protocol * * Copyright (C) 1996 Olaf Kirch <okir@monad.swb.de> */ #ifndef LOCKD_XDR4_H #define LOCKD_XDR4_H #include <linux/fs.h> #include <linux/nfs.h> #include <linux/sunrpc/xdr.h> #include <linux/lockd/xdr.h> /* error codes new to NLMv4 */ #define nlm4_deadlock cpu_to_be32(NLM_DEADLCK) #define nlm4_rofs cpu_to_be32(NLM_ROFS) #define nlm4_stale_fh cpu_to_be32(NLM_STALE_FH) #define nlm4_fbig cpu_to_be32(NLM_FBIG) #define nlm4_failed cpu_to_be32(NLM_FAILED) int nlm4svc_decode_testargs(struct svc_rqst *, __be32 *); int nlm4svc_encode_testres(struct svc_rqst *, __be32 *); int nlm4svc_decode_lockargs(struct svc_rqst *, __be32 *); int nlm4svc_decode_cancargs(struct svc_rqst *, __be32 *); int nlm4svc_decode_unlockargs(struct svc_rqst *, __be32 *); int nlm4svc_encode_res(struct svc_rqst *, __be32 *); int nlm4svc_decode_res(struct svc_rqst *, __be32 *); int nlm4svc_encode_void(struct svc_rqst *, __be32 *); int nlm4svc_decode_void(struct svc_rqst *, __be32 *); int nlm4svc_decode_shareargs(struct svc_rqst *, __be32 *); int nlm4svc_encode_shareres(struct svc_rqst *, __be32 *); int nlm4svc_decode_notify(struct svc_rqst *, __be32 *); int nlm4svc_decode_reboot(struct svc_rqst *, __be32 *); /* int nlmclt_encode_testargs(struct rpc_rqst *, u32 *, struct nlm_args *); int nlmclt_encode_lockargs(struct rpc_rqst *, u32 *, struct nlm_args *); int nlmclt_encode_cancargs(struct rpc_rqst *, u32 *, struct nlm_args *); int nlmclt_encode_unlockargs(struct rpc_rqst *, u32 *, struct nlm_args *); */ extern const struct rpc_version nlm_version4; #endif /* LOCKD_XDR4_H */ lockd/bind.h 0000644 00000004241 14722070374 0006734 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/lockd/bind.h * * This is the part of lockd visible to nfsd and the nfs client. * * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> */ #ifndef LINUX_LOCKD_BIND_H #define LINUX_LOCKD_BIND_H #include <linux/lockd/nlm.h> /* need xdr-encoded error codes too, so... */ #include <linux/lockd/xdr.h> #ifdef CONFIG_LOCKD_V4 #include <linux/lockd/xdr4.h> #endif /* Dummy declarations */ struct svc_rqst; struct rpc_task; /* * This is the set of functions for lockd->nfsd communication */ struct nlmsvc_binding { __be32 (*fopen)(struct svc_rqst *, struct nfs_fh *, struct file **); void (*fclose)(struct file *); }; extern const struct nlmsvc_binding *nlmsvc_ops; /* * Similar to nfs_client_initdata, but without the NFS-specific * rpc_ops field. */ struct nlmclnt_initdata { const char *hostname; const struct sockaddr *address; size_t addrlen; unsigned short protocol; u32 nfs_version; int noresvport; struct net *net; const struct nlmclnt_operations *nlmclnt_ops; const struct cred *cred; }; /* * Functions exported by the lockd module */ extern struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init); extern void nlmclnt_done(struct nlm_host *host); /* * NLM client operations provide a means to modify RPC processing of NLM * requests. Callbacks receive a pointer to data passed into the call to * nlmclnt_proc(). */ struct nlmclnt_operations { /* Called on successful allocation of nlm_rqst, use for allocation or * reference counting. */ void (*nlmclnt_alloc_call)(void *); /* Called in rpc_task_prepare for unlock. A return value of true * indicates the callback has put the task to sleep on a waitqueue * and NLM should not call rpc_call_start(). */ bool (*nlmclnt_unlock_prepare)(struct rpc_task*, void *); /* Called when the nlm_rqst is freed, callbacks should clean up here */ void (*nlmclnt_release_call)(void *); }; extern int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl, void *data); extern int lockd_up(struct net *net, const struct cred *cred); extern void lockd_down(struct net *net); #endif /* LINUX_LOCKD_BIND_H */ lockd/nlm.h 0000644 00000002512 14722070374 0006605 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/lockd/nlm.h * * Declarations for the Network Lock Manager protocol. * * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> */ #ifndef LINUX_LOCKD_NLM_H #define LINUX_LOCKD_NLM_H /* Maximum file offset in file_lock.fl_end */ # define NLM_OFFSET_MAX ((s32) 0x7fffffff) # define NLM4_OFFSET_MAX ((s64) ((~(u64)0) >> 1)) /* Return states for NLM */ enum { NLM_LCK_GRANTED = 0, NLM_LCK_DENIED = 1, NLM_LCK_DENIED_NOLOCKS = 2, NLM_LCK_BLOCKED = 3, NLM_LCK_DENIED_GRACE_PERIOD = 4, #ifdef CONFIG_LOCKD_V4 NLM_DEADLCK = 5, NLM_ROFS = 6, NLM_STALE_FH = 7, NLM_FBIG = 8, NLM_FAILED = 9, #endif }; #define NLM_PROGRAM 100021 #define NLMPROC_NULL 0 #define NLMPROC_TEST 1 #define NLMPROC_LOCK 2 #define NLMPROC_CANCEL 3 #define NLMPROC_UNLOCK 4 #define NLMPROC_GRANTED 5 #define NLMPROC_TEST_MSG 6 #define NLMPROC_LOCK_MSG 7 #define NLMPROC_CANCEL_MSG 8 #define NLMPROC_UNLOCK_MSG 9 #define NLMPROC_GRANTED_MSG 10 #define NLMPROC_TEST_RES 11 #define NLMPROC_LOCK_RES 12 #define NLMPROC_CANCEL_RES 13 #define NLMPROC_UNLOCK_RES 14 #define NLMPROC_GRANTED_RES 15 #define NLMPROC_NSM_NOTIFY 16 /* statd callback */ #define NLMPROC_SHARE 20 #define NLMPROC_UNSHARE 21 #define NLMPROC_NM_LOCK 22 #define NLMPROC_FREE_ALL 23 #endif /* LINUX_LOCKD_NLM_H */ lockd/lockd.h 0000644 00000026741 14722070374 0007125 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/lockd/lockd.h * * General-purpose lockd include file. * * Copyright (C) 1996 Olaf Kirch <okir@monad.swb.de> */ #ifndef LINUX_LOCKD_LOCKD_H #define LINUX_LOCKD_LOCKD_H #ifdef __KERNEL__ #include <linux/in.h> #include <linux/in6.h> #include <net/ipv6.h> #include <linux/fs.h> #include <linux/kref.h> #include <linux/refcount.h> #include <linux/utsname.h> #include <linux/lockd/bind.h> #include <linux/lockd/xdr.h> #ifdef CONFIG_LOCKD_V4 #include <linux/lockd/xdr4.h> #endif #include <linux/lockd/debug.h> #include <linux/sunrpc/svc.h> /* * Version string */ #define LOCKD_VERSION "0.5" /* * Default timeout for RPC calls (seconds) */ #define LOCKD_DFLT_TIMEO 10 /* * Lockd host handle (used both by the client and server personality). */ struct nlm_host { struct hlist_node h_hash; /* doubly linked list */ struct sockaddr_storage h_addr; /* peer address */ size_t h_addrlen; struct sockaddr_storage h_srcaddr; /* our address (optional) */ size_t h_srcaddrlen; struct rpc_clnt *h_rpcclnt; /* RPC client to talk to peer */ char *h_name; /* remote hostname */ u32 h_version; /* interface version */ unsigned short h_proto; /* transport proto */ unsigned short h_reclaiming : 1, h_server : 1, /* server side, not client side */ h_noresvport : 1, h_inuse : 1; wait_queue_head_t h_gracewait; /* wait while reclaiming */ struct rw_semaphore h_rwsem; /* Reboot recovery lock */ u32 h_state; /* pseudo-state counter */ u32 h_nsmstate; /* true remote NSM state */ u32 h_pidcount; /* Pseudopids */ refcount_t h_count; /* reference count */ struct mutex h_mutex; /* mutex for pmap binding */ unsigned long h_nextrebind; /* next portmap call */ unsigned long h_expires; /* eligible for GC */ struct list_head h_lockowners; /* Lockowners for the client */ spinlock_t h_lock; struct list_head h_granted; /* Locks in GRANTED state */ struct list_head h_reclaim; /* Locks in RECLAIM state */ struct nsm_handle *h_nsmhandle; /* NSM status handle */ char *h_addrbuf; /* address eyecatcher */ struct net *net; /* host net */ const struct cred *h_cred; char nodename[UNX_MAXNODENAME + 1]; const struct nlmclnt_operations *h_nlmclnt_ops; /* Callback ops for NLM users */ }; /* * The largest string sm_addrbuf should hold is a full-size IPv6 address * (no "::" anywhere) with a scope ID. The buffer size is computed to * hold eight groups of colon-separated four-hex-digit numbers, a * percent sign, a scope id (at most 32 bits, in decimal), and NUL. */ #define NSM_ADDRBUF ((8 * 4 + 7) + (1 + 10) + 1) struct nsm_handle { struct list_head sm_link; refcount_t sm_count; char *sm_mon_name; char *sm_name; struct sockaddr_storage sm_addr; size_t sm_addrlen; unsigned int sm_monitored : 1, sm_sticky : 1; /* don't unmonitor */ struct nsm_private sm_priv; char sm_addrbuf[NSM_ADDRBUF]; }; /* * Rigorous type checking on sockaddr type conversions */ static inline struct sockaddr_in *nlm_addr_in(const struct nlm_host *host) { return (struct sockaddr_in *)&host->h_addr; } static inline struct sockaddr *nlm_addr(const struct nlm_host *host) { return (struct sockaddr *)&host->h_addr; } static inline struct sockaddr_in *nlm_srcaddr_in(const struct nlm_host *host) { return (struct sockaddr_in *)&host->h_srcaddr; } static inline struct sockaddr *nlm_srcaddr(const struct nlm_host *host) { return (struct sockaddr *)&host->h_srcaddr; } /* * Map an fl_owner_t into a unique 32-bit "pid" */ struct nlm_lockowner { struct list_head list; refcount_t count; struct nlm_host *host; fl_owner_t owner; uint32_t pid; }; struct nlm_wait; /* * Memory chunk for NLM client RPC request. */ #define NLMCLNT_OHSIZE ((__NEW_UTS_LEN) + 10u) struct nlm_rqst { refcount_t a_count; unsigned int a_flags; /* initial RPC task flags */ struct nlm_host * a_host; /* host handle */ struct nlm_args a_args; /* arguments */ struct nlm_res a_res; /* result */ struct nlm_block * a_block; unsigned int a_retries; /* Retry count */ u8 a_owner[NLMCLNT_OHSIZE]; void * a_callback_data; /* sent to nlmclnt_operations callbacks */ }; /* * This struct describes a file held open by lockd on behalf of * an NFS client. */ struct nlm_file { struct hlist_node f_list; /* linked list */ struct nfs_fh f_handle; /* NFS file handle */ struct file * f_file; /* VFS file pointer */ struct nlm_share * f_shares; /* DOS shares */ struct list_head f_blocks; /* blocked locks */ unsigned int f_locks; /* guesstimate # of locks */ unsigned int f_count; /* reference count */ struct mutex f_mutex; /* avoid concurrent access */ }; /* * This is a server block (i.e. a lock requested by some client which * couldn't be granted because of a conflicting lock). */ #define NLM_NEVER (~(unsigned long) 0) /* timeout on non-blocking call: */ #define NLM_TIMEOUT (7 * HZ) struct nlm_block { struct kref b_count; /* Reference count */ struct list_head b_list; /* linked list of all blocks */ struct list_head b_flist; /* linked list (per file) */ struct nlm_rqst * b_call; /* RPC args & callback info */ struct svc_serv * b_daemon; /* NLM service */ struct nlm_host * b_host; /* host handle for RPC clnt */ unsigned long b_when; /* next re-xmit */ unsigned int b_id; /* block id */ unsigned char b_granted; /* VFS granted lock */ struct nlm_file * b_file; /* file in question */ struct cache_req * b_cache_req; /* deferred request handling */ struct cache_deferred_req * b_deferred_req; unsigned int b_flags; /* block flags */ #define B_QUEUED 1 /* lock queued */ #define B_GOT_CALLBACK 2 /* got lock or conflicting lock */ #define B_TIMED_OUT 4 /* filesystem too slow to respond */ }; /* * Global variables */ extern const struct rpc_program nlm_program; extern const struct svc_procedure nlmsvc_procedures[]; #ifdef CONFIG_LOCKD_V4 extern const struct svc_procedure nlmsvc_procedures4[]; #endif extern int nlmsvc_grace_period; extern unsigned long nlmsvc_timeout; extern bool nsm_use_hostnames; extern u32 nsm_local_state; /* * Lockd client functions */ struct nlm_rqst * nlm_alloc_call(struct nlm_host *host); int nlm_async_call(struct nlm_rqst *, u32, const struct rpc_call_ops *); int nlm_async_reply(struct nlm_rqst *, u32, const struct rpc_call_ops *); void nlmclnt_release_call(struct nlm_rqst *); struct nlm_wait * nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *fl); void nlmclnt_finish_block(struct nlm_wait *block); int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout); __be32 nlmclnt_grant(const struct sockaddr *addr, const struct nlm_lock *lock); void nlmclnt_recovery(struct nlm_host *); int nlmclnt_reclaim(struct nlm_host *, struct file_lock *, struct nlm_rqst *); void nlmclnt_next_cookie(struct nlm_cookie *); /* * Host cache */ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap, const size_t salen, const unsigned short protocol, const u32 version, const char *hostname, int noresvport, struct net *net, const struct cred *cred); void nlmclnt_release_host(struct nlm_host *); struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp, const char *hostname, const size_t hostname_len); void nlmsvc_release_host(struct nlm_host *); struct rpc_clnt * nlm_bind_host(struct nlm_host *); void nlm_rebind_host(struct nlm_host *); struct nlm_host * nlm_get_host(struct nlm_host *); void nlm_shutdown_hosts(void); void nlm_shutdown_hosts_net(struct net *net); void nlm_host_rebooted(const struct net *net, const struct nlm_reboot *); /* * Host monitoring */ int nsm_monitor(const struct nlm_host *host); void nsm_unmonitor(const struct nlm_host *host); struct nsm_handle *nsm_get_handle(const struct net *net, const struct sockaddr *sap, const size_t salen, const char *hostname, const size_t hostname_len); struct nsm_handle *nsm_reboot_lookup(const struct net *net, const struct nlm_reboot *info); void nsm_release(struct nsm_handle *nsm); /* * This is used in garbage collection and resource reclaim * A return value != 0 means destroy the lock/block/share */ typedef int (*nlm_host_match_fn_t)(void *cur, struct nlm_host *ref); /* * Server-side lock handling */ __be32 nlmsvc_lock(struct svc_rqst *, struct nlm_file *, struct nlm_host *, struct nlm_lock *, int, struct nlm_cookie *, int); __be32 nlmsvc_unlock(struct net *net, struct nlm_file *, struct nlm_lock *); __be32 nlmsvc_testlock(struct svc_rqst *, struct nlm_file *, struct nlm_host *, struct nlm_lock *, struct nlm_lock *, struct nlm_cookie *); __be32 nlmsvc_cancel_blocked(struct net *net, struct nlm_file *, struct nlm_lock *); unsigned long nlmsvc_retry_blocked(void); void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *, nlm_host_match_fn_t match); void nlmsvc_grant_reply(struct nlm_cookie *, __be32); void nlmsvc_release_call(struct nlm_rqst *); void nlmsvc_locks_init_private(struct file_lock *, struct nlm_host *, pid_t); /* * File handling for the server personality */ __be32 nlm_lookup_file(struct svc_rqst *, struct nlm_file **, struct nfs_fh *); void nlm_release_file(struct nlm_file *); void nlmsvc_release_lockowner(struct nlm_lock *); void nlmsvc_mark_resources(struct net *); void nlmsvc_free_host_resources(struct nlm_host *); void nlmsvc_invalidate_all(void); /* * Cluster failover support */ int nlmsvc_unlock_all_by_sb(struct super_block *sb); int nlmsvc_unlock_all_by_ip(struct sockaddr *server_addr); static inline struct inode *nlmsvc_file_inode(struct nlm_file *file) { return locks_inode(file->f_file); } static inline int __nlm_privileged_request4(const struct sockaddr *sap) { const struct sockaddr_in *sin = (struct sockaddr_in *)sap; if (ntohs(sin->sin_port) > 1023) return 0; return ipv4_is_loopback(sin->sin_addr.s_addr); } #if IS_ENABLED(CONFIG_IPV6) static inline int __nlm_privileged_request6(const struct sockaddr *sap) { const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap; if (ntohs(sin6->sin6_port) > 1023) return 0; if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_MAPPED) return ipv4_is_loopback(sin6->sin6_addr.s6_addr32[3]); return ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LOOPBACK; } #else /* IS_ENABLED(CONFIG_IPV6) */ static inline int __nlm_privileged_request6(const struct sockaddr *sap) { return 0; } #endif /* IS_ENABLED(CONFIG_IPV6) */ /* * Ensure incoming requests are from local privileged callers. * * Return TRUE if sender is local and is connecting via a privileged port; * otherwise return FALSE. */ static inline int nlm_privileged_requester(const struct svc_rqst *rqstp) { const struct sockaddr *sap = svc_addr(rqstp); switch (sap->sa_family) { case AF_INET: return __nlm_privileged_request4(sap); case AF_INET6: return __nlm_privileged_request6(sap); default: return 0; } } /* * Compare two NLM locks. * When the second lock is of type F_UNLCK, this acts like a wildcard. */ static inline int nlm_compare_locks(const struct file_lock *fl1, const struct file_lock *fl2) { return locks_inode(fl1->fl_file) == locks_inode(fl2->fl_file) && fl1->fl_pid == fl2->fl_pid && fl1->fl_owner == fl2->fl_owner && fl1->fl_start == fl2->fl_start && fl1->fl_end == fl2->fl_end &&(fl1->fl_type == fl2->fl_type || fl2->fl_type == F_UNLCK); } extern const struct lock_manager_operations nlmsvc_lock_operations; #endif /* __KERNEL__ */ #endif /* LINUX_LOCKD_LOCKD_H */ lockd/xdr.h 0000644 00000005626 14722070374 0006625 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/lockd/xdr.h * * XDR types for the NLM protocol * * Copyright (C) 1996 Olaf Kirch <okir@monad.swb.de> */ #ifndef LOCKD_XDR_H #define LOCKD_XDR_H #include <linux/fs.h> #include <linux/nfs.h> #include <linux/sunrpc/xdr.h> #define SM_MAXSTRLEN 1024 #define SM_PRIV_SIZE 16 struct nsm_private { unsigned char data[SM_PRIV_SIZE]; }; struct svc_rqst; #define NLM_MAXCOOKIELEN 32 #define NLM_MAXSTRLEN 1024 #define nlm_granted cpu_to_be32(NLM_LCK_GRANTED) #define nlm_lck_denied cpu_to_be32(NLM_LCK_DENIED) #define nlm_lck_denied_nolocks cpu_to_be32(NLM_LCK_DENIED_NOLOCKS) #define nlm_lck_blocked cpu_to_be32(NLM_LCK_BLOCKED) #define nlm_lck_denied_grace_period cpu_to_be32(NLM_LCK_DENIED_GRACE_PERIOD) #define nlm_drop_reply cpu_to_be32(30000) /* Lock info passed via NLM */ struct nlm_lock { char * caller; unsigned int len; /* length of "caller" */ struct nfs_fh fh; struct xdr_netobj oh; u32 svid; struct file_lock fl; }; /* * NLM cookies. Technically they can be 1K, but Linux only uses 8 bytes. * FreeBSD uses 16, Apple Mac OS X 10.3 uses 20. Therefore we set it to * 32 bytes. */ struct nlm_cookie { unsigned char data[NLM_MAXCOOKIELEN]; unsigned int len; }; /* * Generic lockd arguments for all but sm_notify */ struct nlm_args { struct nlm_cookie cookie; struct nlm_lock lock; u32 block; u32 reclaim; u32 state; u32 monitor; u32 fsm_access; u32 fsm_mode; }; typedef struct nlm_args nlm_args; /* * Generic lockd result */ struct nlm_res { struct nlm_cookie cookie; __be32 status; struct nlm_lock lock; }; /* * statd callback when client has rebooted */ struct nlm_reboot { char *mon; unsigned int len; u32 state; struct nsm_private priv; }; /* * Contents of statd callback when monitored host rebooted */ #define NLMSVC_XDRSIZE sizeof(struct nlm_args) int nlmsvc_decode_testargs(struct svc_rqst *, __be32 *); int nlmsvc_encode_testres(struct svc_rqst *, __be32 *); int nlmsvc_decode_lockargs(struct svc_rqst *, __be32 *); int nlmsvc_decode_cancargs(struct svc_rqst *, __be32 *); int nlmsvc_decode_unlockargs(struct svc_rqst *, __be32 *); int nlmsvc_encode_res(struct svc_rqst *, __be32 *); int nlmsvc_decode_res(struct svc_rqst *, __be32 *); int nlmsvc_encode_void(struct svc_rqst *, __be32 *); int nlmsvc_decode_void(struct svc_rqst *, __be32 *); int nlmsvc_decode_shareargs(struct svc_rqst *, __be32 *); int nlmsvc_encode_shareres(struct svc_rqst *, __be32 *); int nlmsvc_decode_notify(struct svc_rqst *, __be32 *); int nlmsvc_decode_reboot(struct svc_rqst *, __be32 *); /* int nlmclt_encode_testargs(struct rpc_rqst *, u32 *, struct nlm_args *); int nlmclt_encode_lockargs(struct rpc_rqst *, u32 *, struct nlm_args *); int nlmclt_encode_cancargs(struct rpc_rqst *, u32 *, struct nlm_args *); int nlmclt_encode_unlockargs(struct rpc_rqst *, u32 *, struct nlm_args *); */ #endif /* LOCKD_XDR_H */ lockd/debug.h 0000644 00000001547 14722070374 0007114 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/lockd/debug.h * * Debugging stuff. * * Copyright (C) 1996 Olaf Kirch <okir@monad.swb.de> */ #ifndef LINUX_LOCKD_DEBUG_H #define LINUX_LOCKD_DEBUG_H #ifdef __KERNEL__ #include <linux/sunrpc/debug.h> /* * Enable lockd debugging. * Requires RPC_DEBUG. */ #undef ifdebug #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) # define ifdebug(flag) if (unlikely(nlm_debug & NLMDBG_##flag)) #else # define ifdebug(flag) if (0) #endif #endif /* __KERNEL__ */ /* * Debug flags */ #define NLMDBG_SVC 0x0001 #define NLMDBG_CLIENT 0x0002 #define NLMDBG_CLNTLOCK 0x0004 #define NLMDBG_SVCLOCK 0x0008 #define NLMDBG_MONITOR 0x0010 #define NLMDBG_CLNTSUBS 0x0020 #define NLMDBG_SVCSUBS 0x0040 #define NLMDBG_HOSTCACHE 0x0080 #define NLMDBG_XDR 0x0100 #define NLMDBG_ALL 0x7fff #endif /* LINUX_LOCKD_DEBUG_H */ netfilter_bridge.h 0000644 00000004120 14722070374 0010230 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_BRIDGE_NETFILTER_H #define __LINUX_BRIDGE_NETFILTER_H #include <uapi/linux/netfilter_bridge.h> #include <linux/skbuff.h> struct nf_bridge_frag_data { char mac[ETH_HLEN]; bool vlan_present; u16 vlan_tci; __be16 vlan_proto; }; #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb); static inline void br_drop_fake_rtable(struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); if (dst && (dst->flags & DST_FAKE_RTABLE)) skb_dst_drop(skb); } static inline struct nf_bridge_info * nf_bridge_info_get(const struct sk_buff *skb) { return skb_ext_find(skb, SKB_EXT_BRIDGE_NF); } static inline bool nf_bridge_info_exists(const struct sk_buff *skb) { return skb_ext_exist(skb, SKB_EXT_BRIDGE_NF); } static inline int nf_bridge_get_physinif(const struct sk_buff *skb) { const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); if (!nf_bridge) return 0; return nf_bridge->physindev ? nf_bridge->physindev->ifindex : 0; } static inline int nf_bridge_get_physoutif(const struct sk_buff *skb) { const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); if (!nf_bridge) return 0; return nf_bridge->physoutdev ? nf_bridge->physoutdev->ifindex : 0; } static inline struct net_device * nf_bridge_get_physindev(const struct sk_buff *skb) { const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); return nf_bridge ? nf_bridge->physindev : NULL; } static inline struct net_device * nf_bridge_get_physoutdev(const struct sk_buff *skb) { const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); return nf_bridge ? nf_bridge->physoutdev : NULL; } static inline bool nf_bridge_in_prerouting(const struct sk_buff *skb) { const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); return nf_bridge && nf_bridge->in_prerouting; } #else #define br_drop_fake_rtable(skb) do { } while (0) static inline bool nf_bridge_in_prerouting(const struct sk_buff *skb) { return false; } #endif /* CONFIG_BRIDGE_NETFILTER */ #endif jz4780-nemc.h 0000644 00000001720 14722070374 0006611 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * JZ4780 NAND/external memory controller (NEMC) * * Copyright (c) 2015 Imagination Technologies * Author: Alex Smith <alex@alex-smith.me.uk> */ #ifndef __LINUX_JZ4780_NEMC_H__ #define __LINUX_JZ4780_NEMC_H__ #include <linux/types.h> struct device; /* * Number of NEMC banks. Note that there are actually 6, but they are numbered * from 1. */ #define JZ4780_NEMC_NUM_BANKS 7 /** * enum jz4780_nemc_bank_type - device types which can be connected to a bank * @JZ4780_NEMC_BANK_SRAM: SRAM * @JZ4780_NEMC_BANK_NAND: NAND */ enum jz4780_nemc_bank_type { JZ4780_NEMC_BANK_SRAM, JZ4780_NEMC_BANK_NAND, }; extern unsigned int jz4780_nemc_num_banks(struct device *dev); extern void jz4780_nemc_set_type(struct device *dev, unsigned int bank, enum jz4780_nemc_bank_type type); extern void jz4780_nemc_assert(struct device *dev, unsigned int bank, bool assert); #endif /* __LINUX_JZ4780_NEMC_H__ */ fs_stack.h 0000644 00000001453 14722070374 0006523 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_FS_STACK_H #define _LINUX_FS_STACK_H /* This file defines generic functions used primarily by stackable * filesystems; none of these functions require i_mutex to be held. */ #include <linux/fs.h> /* externs for fs/stack.c */ extern void fsstack_copy_attr_all(struct inode *dest, const struct inode *src); extern void fsstack_copy_inode_size(struct inode *dst, struct inode *src); /* inlines */ static inline void fsstack_copy_attr_atime(struct inode *dest, const struct inode *src) { dest->i_atime = src->i_atime; } static inline void fsstack_copy_attr_times(struct inode *dest, const struct inode *src) { dest->i_atime = src->i_atime; dest->i_mtime = src->i_mtime; dest->i_ctime = src->i_ctime; } #endif /* _LINUX_FS_STACK_H */ virtio.h 0000644 00000015445 14722070374 0006250 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_VIRTIO_H #define _LINUX_VIRTIO_H /* Everything a virtio driver needs to work with any particular virtio * implementation. */ #include <linux/types.h> #include <linux/scatterlist.h> #include <linux/spinlock.h> #include <linux/device.h> #include <linux/mod_devicetable.h> #include <linux/gfp.h> #include <linux/vringh.h> /** * virtqueue - a queue to register buffers for sending or receiving. * @list: the chain of virtqueues for this device * @callback: the function to call when buffers are consumed (can be NULL). * @name: the name of this virtqueue (mainly for debugging) * @vdev: the virtio device this queue was created for. * @priv: a pointer for the virtqueue implementation to use. * @index: the zero-based ordinal number for this queue. * @num_free: number of elements we expect to be able to fit. * * A note on @num_free: with indirect buffers, each buffer needs one * element in the queue, otherwise a buffer will need one element per * sg element. */ struct virtqueue { struct list_head list; void (*callback)(struct virtqueue *vq); const char *name; struct virtio_device *vdev; unsigned int index; unsigned int num_free; void *priv; }; int virtqueue_add_outbuf(struct virtqueue *vq, struct scatterlist sg[], unsigned int num, void *data, gfp_t gfp); int virtqueue_add_inbuf(struct virtqueue *vq, struct scatterlist sg[], unsigned int num, void *data, gfp_t gfp); int virtqueue_add_inbuf_ctx(struct virtqueue *vq, struct scatterlist sg[], unsigned int num, void *data, void *ctx, gfp_t gfp); int virtqueue_add_sgs(struct virtqueue *vq, struct scatterlist *sgs[], unsigned int out_sgs, unsigned int in_sgs, void *data, gfp_t gfp); bool virtqueue_kick(struct virtqueue *vq); bool virtqueue_kick_prepare(struct virtqueue *vq); bool virtqueue_notify(struct virtqueue *vq); void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len); void *virtqueue_get_buf_ctx(struct virtqueue *vq, unsigned int *len, void **ctx); void virtqueue_disable_cb(struct virtqueue *vq); bool virtqueue_enable_cb(struct virtqueue *vq); unsigned virtqueue_enable_cb_prepare(struct virtqueue *vq); bool virtqueue_poll(struct virtqueue *vq, unsigned); bool virtqueue_enable_cb_delayed(struct virtqueue *vq); void *virtqueue_detach_unused_buf(struct virtqueue *vq); unsigned int virtqueue_get_vring_size(struct virtqueue *vq); bool virtqueue_is_broken(struct virtqueue *vq); const struct vring *virtqueue_get_vring(struct virtqueue *vq); dma_addr_t virtqueue_get_desc_addr(struct virtqueue *vq); dma_addr_t virtqueue_get_avail_addr(struct virtqueue *vq); dma_addr_t virtqueue_get_used_addr(struct virtqueue *vq); /** * virtio_device - representation of a device using virtio * @index: unique position on the virtio bus * @failed: saved value for VIRTIO_CONFIG_S_FAILED bit (for restore) * @config_enabled: configuration change reporting enabled * @config_change_pending: configuration change reported while disabled * @config_lock: protects configuration change reporting * @dev: underlying device. * @id: the device type identification (used to match it with a driver). * @config: the configuration ops for this device. * @vringh_config: configuration ops for host vrings. * @vqs: the list of virtqueues for this device. * @features: the features supported by both driver and device. * @priv: private pointer for the driver's use. */ struct virtio_device { int index; bool failed; bool config_enabled; bool config_change_pending; spinlock_t config_lock; spinlock_t vqs_list_lock; /* Protects VQs list access */ struct device dev; struct virtio_device_id id; const struct virtio_config_ops *config; const struct vringh_config_ops *vringh_config; struct list_head vqs; u64 features; void *priv; }; static inline struct virtio_device *dev_to_virtio(struct device *_dev) { return container_of(_dev, struct virtio_device, dev); } void virtio_add_status(struct virtio_device *dev, unsigned int status); int register_virtio_device(struct virtio_device *dev); void unregister_virtio_device(struct virtio_device *dev); void virtio_break_device(struct virtio_device *dev); void virtio_config_changed(struct virtio_device *dev); void virtio_config_disable(struct virtio_device *dev); void virtio_config_enable(struct virtio_device *dev); #ifdef CONFIG_PM_SLEEP int virtio_device_freeze(struct virtio_device *dev); int virtio_device_restore(struct virtio_device *dev); #endif size_t virtio_max_dma_size(struct virtio_device *vdev); #define virtio_device_for_each_vq(vdev, vq) \ list_for_each_entry(vq, &vdev->vqs, list) /** * virtio_driver - operations for a virtio I/O driver * @driver: underlying device driver (populate name and owner). * @id_table: the ids serviced by this driver. * @feature_table: an array of feature numbers supported by this driver. * @feature_table_size: number of entries in the feature table array. * @feature_table_legacy: same as feature_table but when working in legacy mode. * @feature_table_size_legacy: number of entries in feature table legacy array. * @probe: the function to call when a device is found. Returns 0 or -errno. * @scan: optional function to call after successful probe; intended * for virtio-scsi to invoke a scan. * @remove: the function to call when a device is removed. * @config_changed: optional function to call when the device configuration * changes; may be called in interrupt context. * @freeze: optional function to call during suspend/hibernation. * @restore: optional function to call on resume. */ struct virtio_driver { struct device_driver driver; const struct virtio_device_id *id_table; const unsigned int *feature_table; unsigned int feature_table_size; const unsigned int *feature_table_legacy; unsigned int feature_table_size_legacy; int (*validate)(struct virtio_device *dev); int (*probe)(struct virtio_device *dev); void (*scan)(struct virtio_device *dev); void (*remove)(struct virtio_device *dev); void (*config_changed)(struct virtio_device *dev); #ifdef CONFIG_PM int (*freeze)(struct virtio_device *dev); int (*restore)(struct virtio_device *dev); #endif }; static inline struct virtio_driver *drv_to_virtio(struct device_driver *drv) { return container_of(drv, struct virtio_driver, driver); } int register_virtio_driver(struct virtio_driver *drv); void unregister_virtio_driver(struct virtio_driver *drv); /* module_virtio_driver() - Helper macro for drivers that don't do * anything special in module init/exit. This eliminates a lot of * boilerplate. Each module may only use this macro once, and * calling it replaces module_init() and module_exit() */ #define module_virtio_driver(__virtio_driver) \ module_driver(__virtio_driver, register_virtio_driver, \ unregister_virtio_driver) #endif /* _LINUX_VIRTIO_H */ apple-gmux.h 0000644 00000001626 14722070374 0007007 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * apple-gmux.h - microcontroller built into dual GPU MacBook Pro & Mac Pro * Copyright (C) 2015 Lukas Wunner <lukas@wunner.de> */ #ifndef LINUX_APPLE_GMUX_H #define LINUX_APPLE_GMUX_H #include <linux/acpi.h> #define GMUX_ACPI_HID "APP000B" #if IS_ENABLED(CONFIG_APPLE_GMUX) /** * apple_gmux_present() - detect if gmux is built into the machine * * Drivers may use this to activate quirks specific to dual GPU MacBook Pros * and Mac Pros, e.g. for deferred probing, runtime pm and backlight. * * Return: %true if gmux is present and the kernel was configured * with CONFIG_APPLE_GMUX, %false otherwise. */ static inline bool apple_gmux_present(void) { return acpi_dev_found(GMUX_ACPI_HID); } #else /* !CONFIG_APPLE_GMUX */ static inline bool apple_gmux_present(void) { return false; } #endif /* !CONFIG_APPLE_GMUX */ #endif /* LINUX_APPLE_GMUX_H */ vmacache.h 0000644 00000001322 14722070374 0006470 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_VMACACHE_H #define __LINUX_VMACACHE_H #include <linux/sched.h> #include <linux/mm.h> static inline void vmacache_flush(struct task_struct *tsk) { memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas)); } extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma); extern struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr); #ifndef CONFIG_MMU extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm, unsigned long start, unsigned long end); #endif static inline void vmacache_invalidate(struct mm_struct *mm) { mm->vmacache_seqnum++; } #endif /* __LINUX_VMACACHE_H */ errqueue.h 0000644 00000001014 14722070374 0006554 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_ERRQUEUE_H #define _LINUX_ERRQUEUE_H 1 #include <net/ip.h> #if IS_ENABLED(CONFIG_IPV6) #include <linux/ipv6.h> #endif #include <uapi/linux/errqueue.h> #define SKB_EXT_ERR(skb) ((struct sock_exterr_skb *) ((skb)->cb)) struct sock_exterr_skb { union { struct inet_skb_parm h4; #if IS_ENABLED(CONFIG_IPV6) struct inet6_skb_parm h6; #endif } header; struct sock_extended_err ee; u16 addr_offset; __be16 port; u8 opt_stats:1, unused:7; }; #endif irqdesc.h 0000644 00000020332 14722070374 0006355 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_IRQDESC_H #define _LINUX_IRQDESC_H #include <linux/rcupdate.h> #include <linux/kobject.h> #include <linux/mutex.h> /* * Core internal functions to deal with irq descriptors */ struct irq_affinity_notify; struct proc_dir_entry; struct module; struct irq_desc; struct irq_domain; struct pt_regs; /** * struct irq_desc - interrupt descriptor * @irq_common_data: per irq and chip data passed down to chip functions * @kstat_irqs: irq stats per cpu * @handle_irq: highlevel irq-events handler * @preflow_handler: handler called before the flow handler (currently used by sparc) * @action: the irq action chain * @status: status information * @core_internal_state__do_not_mess_with_it: core internal status information * @depth: disable-depth, for nested irq_disable() calls * @wake_depth: enable depth, for multiple irq_set_irq_wake() callers * @tot_count: stats field for non-percpu irqs * @irq_count: stats field to detect stalled irqs * @last_unhandled: aging timer for unhandled count * @irqs_unhandled: stats field for spurious unhandled interrupts * @threads_handled: stats field for deferred spurious detection of threaded handlers * @threads_handled_last: comparator field for deferred spurious detection of theraded handlers * @lock: locking for SMP * @affinity_hint: hint to user space for preferred irq affinity * @affinity_notify: context for notification of affinity changes * @pending_mask: pending rebalanced interrupts * @threads_oneshot: bitfield to handle shared oneshot threads * @threads_active: number of irqaction threads currently running * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers * @nr_actions: number of installed actions on this descriptor * @no_suspend_depth: number of irqactions on a irq descriptor with * IRQF_NO_SUSPEND set * @force_resume_depth: number of irqactions on a irq descriptor with * IRQF_FORCE_RESUME set * @rcu: rcu head for delayed free * @kobj: kobject used to represent this struct in sysfs * @request_mutex: mutex to protect request/free before locking desc->lock * @dir: /proc/irq/ procfs entry * @debugfs_file: dentry for the debugfs file * @name: flow handler name for /proc/interrupts output */ struct irq_desc { struct irq_common_data irq_common_data; struct irq_data irq_data; unsigned int __percpu *kstat_irqs; irq_flow_handler_t handle_irq; #ifdef CONFIG_IRQ_PREFLOW_FASTEOI irq_preflow_handler_t preflow_handler; #endif struct irqaction *action; /* IRQ action list */ unsigned int status_use_accessors; unsigned int core_internal_state__do_not_mess_with_it; unsigned int depth; /* nested irq disables */ unsigned int wake_depth; /* nested wake enables */ unsigned int tot_count; unsigned int irq_count; /* For detecting broken IRQs */ unsigned long last_unhandled; /* Aging timer for unhandled count */ unsigned int irqs_unhandled; atomic_t threads_handled; int threads_handled_last; raw_spinlock_t lock; struct cpumask *percpu_enabled; const struct cpumask *percpu_affinity; #ifdef CONFIG_SMP const struct cpumask *affinity_hint; struct irq_affinity_notify *affinity_notify; #ifdef CONFIG_GENERIC_PENDING_IRQ cpumask_var_t pending_mask; #endif #endif unsigned long threads_oneshot; atomic_t threads_active; wait_queue_head_t wait_for_threads; #ifdef CONFIG_PM_SLEEP unsigned int nr_actions; unsigned int no_suspend_depth; unsigned int cond_suspend_depth; unsigned int force_resume_depth; #endif #ifdef CONFIG_PROC_FS struct proc_dir_entry *dir; #endif #ifdef CONFIG_GENERIC_IRQ_DEBUGFS struct dentry *debugfs_file; const char *dev_name; #endif #ifdef CONFIG_SPARSE_IRQ struct rcu_head rcu; struct kobject kobj; #endif struct mutex request_mutex; int parent_irq; struct module *owner; const char *name; } ____cacheline_internodealigned_in_smp; #ifdef CONFIG_SPARSE_IRQ extern void irq_lock_sparse(void); extern void irq_unlock_sparse(void); #else static inline void irq_lock_sparse(void) { } static inline void irq_unlock_sparse(void) { } extern struct irq_desc irq_desc[NR_IRQS]; #endif static inline struct irq_desc *irq_data_to_desc(struct irq_data *data) { return container_of(data->common, struct irq_desc, irq_common_data); } static inline unsigned int irq_desc_get_irq(struct irq_desc *desc) { return desc->irq_data.irq; } static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc) { return &desc->irq_data; } static inline struct irq_chip *irq_desc_get_chip(struct irq_desc *desc) { return desc->irq_data.chip; } static inline void *irq_desc_get_chip_data(struct irq_desc *desc) { return desc->irq_data.chip_data; } static inline void *irq_desc_get_handler_data(struct irq_desc *desc) { return desc->irq_common_data.handler_data; } /* * Architectures call this to let the generic IRQ layer * handle an interrupt. */ static inline void generic_handle_irq_desc(struct irq_desc *desc) { desc->handle_irq(desc); } int generic_handle_irq(unsigned int irq); #ifdef CONFIG_HANDLE_DOMAIN_IRQ /* * Convert a HW interrupt number to a logical one using a IRQ domain, * and handle the result interrupt number. Return -EINVAL if * conversion failed. Providing a NULL domain indicates that the * conversion has already been done. */ int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq, bool lookup, struct pt_regs *regs); static inline int handle_domain_irq(struct irq_domain *domain, unsigned int hwirq, struct pt_regs *regs) { return __handle_domain_irq(domain, hwirq, true, regs); } #ifdef CONFIG_IRQ_DOMAIN int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq, struct pt_regs *regs); #endif #endif /* Test to see if a driver has successfully requested an irq */ static inline int irq_desc_has_action(struct irq_desc *desc) { return desc->action != NULL; } static inline int irq_has_action(unsigned int irq) { return irq_desc_has_action(irq_to_desc(irq)); } /** * irq_set_handler_locked - Set irq handler from a locked region * @data: Pointer to the irq_data structure which identifies the irq * @handler: Flow control handler function for this interrupt * * Sets the handler in the irq descriptor associated to @data. * * Must be called with irq_desc locked and valid parameters. Typical * call site is the irq_set_type() callback. */ static inline void irq_set_handler_locked(struct irq_data *data, irq_flow_handler_t handler) { struct irq_desc *desc = irq_data_to_desc(data); desc->handle_irq = handler; } /** * irq_set_chip_handler_name_locked - Set chip, handler and name from a locked region * @data: Pointer to the irq_data structure for which the chip is set * @chip: Pointer to the new irq chip * @handler: Flow control handler function for this interrupt * @name: Name of the interrupt * * Replace the irq chip at the proper hierarchy level in @data and * sets the handler and name in the associated irq descriptor. * * Must be called with irq_desc locked and valid parameters. */ static inline void irq_set_chip_handler_name_locked(struct irq_data *data, struct irq_chip *chip, irq_flow_handler_t handler, const char *name) { struct irq_desc *desc = irq_data_to_desc(data); desc->handle_irq = handler; desc->name = name; data->chip = chip; } static inline bool irq_balancing_disabled(unsigned int irq) { struct irq_desc *desc; desc = irq_to_desc(irq); return desc->status_use_accessors & IRQ_NO_BALANCING_MASK; } static inline bool irq_is_percpu(unsigned int irq) { struct irq_desc *desc; desc = irq_to_desc(irq); return desc->status_use_accessors & IRQ_PER_CPU; } static inline bool irq_is_percpu_devid(unsigned int irq) { struct irq_desc *desc; desc = irq_to_desc(irq); return desc->status_use_accessors & IRQ_PER_CPU_DEVID; } static inline void irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class, struct lock_class_key *request_class) { struct irq_desc *desc = irq_to_desc(irq); if (desc) { lockdep_set_class(&desc->lock, lock_class); lockdep_set_class(&desc->request_mutex, request_class); } } #ifdef CONFIG_IRQ_PREFLOW_FASTEOI static inline void __irq_set_preflow_handler(unsigned int irq, irq_preflow_handler_t handler) { struct irq_desc *desc; desc = irq_to_desc(irq); desc->preflow_handler = handler; } #endif #endif cpu_pm.h 0000644 00000004601 14722070374 0006207 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2011 Google, Inc. * * Author: * Colin Cross <ccross@android.com> */ #ifndef _LINUX_CPU_PM_H #define _LINUX_CPU_PM_H #include <linux/kernel.h> #include <linux/notifier.h> /* * When a CPU goes to a low power state that turns off power to the CPU's * power domain, the contents of some blocks (floating point coprocessors, * interrupt controllers, caches, timers) in the same power domain can * be lost. The cpm_pm notifiers provide a method for platform idle, suspend, * and hotplug implementations to notify the drivers for these blocks that * they may be reset. * * All cpu_pm notifications must be called with interrupts disabled. * * The notifications are split into two classes: CPU notifications and CPU * cluster notifications. * * CPU notifications apply to a single CPU and must be called on the affected * CPU. They are used to save per-cpu context for affected blocks. * * CPU cluster notifications apply to all CPUs in a single power domain. They * are used to save any global context for affected blocks, and must be called * after all the CPUs in the power domain have been notified of the low power * state. */ /* * Event codes passed as unsigned long val to notifier calls */ enum cpu_pm_event { /* A single cpu is entering a low power state */ CPU_PM_ENTER, /* A single cpu failed to enter a low power state */ CPU_PM_ENTER_FAILED, /* A single cpu is exiting a low power state */ CPU_PM_EXIT, /* A cpu power domain is entering a low power state */ CPU_CLUSTER_PM_ENTER, /* A cpu power domain failed to enter a low power state */ CPU_CLUSTER_PM_ENTER_FAILED, /* A cpu power domain is exiting a low power state */ CPU_CLUSTER_PM_EXIT, }; #ifdef CONFIG_CPU_PM int cpu_pm_register_notifier(struct notifier_block *nb); int cpu_pm_unregister_notifier(struct notifier_block *nb); int cpu_pm_enter(void); int cpu_pm_exit(void); int cpu_cluster_pm_enter(void); int cpu_cluster_pm_exit(void); #else static inline int cpu_pm_register_notifier(struct notifier_block *nb) { return 0; } static inline int cpu_pm_unregister_notifier(struct notifier_block *nb) { return 0; } static inline int cpu_pm_enter(void) { return 0; } static inline int cpu_pm_exit(void) { return 0; } static inline int cpu_cluster_pm_enter(void) { return 0; } static inline int cpu_cluster_pm_exit(void) { return 0; } #endif #endif hugetlb_inline.h 0000644 00000000566 14722070374 0007722 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_HUGETLB_INLINE_H #define _LINUX_HUGETLB_INLINE_H #ifdef CONFIG_HUGETLB_PAGE #include <linux/mm.h> static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma) { return !!(vma->vm_flags & VM_HUGETLB); } #else static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma) { return false; } #endif #endif toshiba.h 0000644 00000000714 14722070374 0006356 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* toshiba.h -- Linux driver for accessing the SMM on Toshiba laptops * * Copyright (c) 1996-2000 Jonathan A. Buzzard (jonathan@buzzard.org.uk) * * Thanks to Juergen Heinzl <juergen@monocerus.demon.co.uk> for the pointers * on making sure the structure is aligned and packed. */ #ifndef _LINUX_TOSHIBA_H #define _LINUX_TOSHIBA_H #include <uapi/linux/toshiba.h> int tosh_smm(SMMRegisters *regs); #endif nfs_page.h 0000644 00000014601 14722070374 0006507 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/nfs_page.h * * Copyright (C) 2000 Trond Myklebust * * NFS page cache wrapper. */ #ifndef _LINUX_NFS_PAGE_H #define _LINUX_NFS_PAGE_H #include <linux/list.h> #include <linux/pagemap.h> #include <linux/wait.h> #include <linux/sunrpc/auth.h> #include <linux/nfs_xdr.h> #include <linux/kref.h> /* * Valid flags for a dirty buffer */ enum { PG_BUSY = 0, /* nfs_{un}lock_request */ PG_MAPPED, /* page private set for buffered io */ PG_CLEAN, /* write succeeded */ PG_COMMIT_TO_DS, /* used by pnfs layouts */ PG_INODE_REF, /* extra ref held by inode when in writeback */ PG_HEADLOCK, /* page group lock of wb_head */ PG_TEARDOWN, /* page group sync for destroy */ PG_UNLOCKPAGE, /* page group sync bit in read path */ PG_UPTODATE, /* page group sync bit in read path */ PG_WB_END, /* page group sync bit in write path */ PG_REMOVE, /* page group sync bit in write path */ PG_CONTENDED1, /* Is someone waiting for a lock? */ PG_CONTENDED2, /* Is someone waiting for a lock? */ }; struct nfs_inode; struct nfs_page { struct list_head wb_list; /* Defines state of page: */ struct page *wb_page; /* page to read in/write out */ struct nfs_lock_context *wb_lock_context; /* lock context info */ pgoff_t wb_index; /* Offset >> PAGE_SHIFT */ unsigned int wb_offset, /* Offset & ~PAGE_MASK */ wb_pgbase, /* Start of page data */ wb_bytes; /* Length of request */ struct kref wb_kref; /* reference count */ unsigned long wb_flags; struct nfs_write_verifier wb_verf; /* Commit cookie */ struct nfs_page *wb_this_page; /* list of reqs for this page */ struct nfs_page *wb_head; /* head pointer for req list */ unsigned short wb_nio; /* Number of I/O attempts */ }; struct nfs_pageio_descriptor; struct nfs_pageio_ops { void (*pg_init)(struct nfs_pageio_descriptor *, struct nfs_page *); size_t (*pg_test)(struct nfs_pageio_descriptor *, struct nfs_page *, struct nfs_page *); int (*pg_doio)(struct nfs_pageio_descriptor *); unsigned int (*pg_get_mirror_count)(struct nfs_pageio_descriptor *, struct nfs_page *); void (*pg_cleanup)(struct nfs_pageio_descriptor *); }; struct nfs_rw_ops { struct nfs_pgio_header *(*rw_alloc_header)(void); void (*rw_free_header)(struct nfs_pgio_header *); int (*rw_done)(struct rpc_task *, struct nfs_pgio_header *, struct inode *); void (*rw_result)(struct rpc_task *, struct nfs_pgio_header *); void (*rw_initiate)(struct nfs_pgio_header *, struct rpc_message *, const struct nfs_rpc_ops *, struct rpc_task_setup *, int); }; struct nfs_pgio_mirror { struct list_head pg_list; unsigned long pg_bytes_written; size_t pg_count; size_t pg_bsize; unsigned int pg_base; unsigned char pg_recoalesce : 1; }; struct nfs_pageio_descriptor { struct inode *pg_inode; const struct nfs_pageio_ops *pg_ops; const struct nfs_rw_ops *pg_rw_ops; int pg_ioflags; int pg_error; const struct rpc_call_ops *pg_rpc_callops; const struct nfs_pgio_completion_ops *pg_completion_ops; struct pnfs_layout_segment *pg_lseg; struct nfs_io_completion *pg_io_completion; struct nfs_direct_req *pg_dreq; unsigned int pg_bsize; /* default bsize for mirrors */ u32 pg_mirror_count; struct nfs_pgio_mirror *pg_mirrors; struct nfs_pgio_mirror pg_mirrors_static[1]; struct nfs_pgio_mirror *pg_mirrors_dynamic; u32 pg_mirror_idx; /* current mirror */ unsigned short pg_maxretrans; unsigned char pg_moreio : 1; }; /* arbitrarily selected limit to number of mirrors */ #define NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX 16 #define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags)) extern struct nfs_page *nfs_create_request(struct nfs_open_context *ctx, struct page *page, unsigned int offset, unsigned int count); extern void nfs_release_request(struct nfs_page *); extern void nfs_pageio_init(struct nfs_pageio_descriptor *desc, struct inode *inode, const struct nfs_pageio_ops *pg_ops, const struct nfs_pgio_completion_ops *compl_ops, const struct nfs_rw_ops *rw_ops, size_t bsize, int how); extern int nfs_pageio_add_request(struct nfs_pageio_descriptor *, struct nfs_page *); extern int nfs_pageio_resend(struct nfs_pageio_descriptor *, struct nfs_pgio_header *); extern void nfs_pageio_complete(struct nfs_pageio_descriptor *desc); extern void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *, pgoff_t); extern size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req); extern int nfs_wait_on_request(struct nfs_page *); extern void nfs_unlock_request(struct nfs_page *req); extern void nfs_unlock_and_release_request(struct nfs_page *); extern int nfs_page_group_lock(struct nfs_page *); extern void nfs_page_group_unlock(struct nfs_page *); extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int); extern int nfs_page_set_headlock(struct nfs_page *req); extern void nfs_page_clear_headlock(struct nfs_page *req); extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *); /* * Lock the page of an asynchronous request */ static inline int nfs_lock_request(struct nfs_page *req) { return !test_and_set_bit(PG_BUSY, &req->wb_flags); } /** * nfs_list_add_request - Insert a request into a list * @req: request * @head: head of list into which to insert the request. */ static inline void nfs_list_add_request(struct nfs_page *req, struct list_head *head) { list_add_tail(&req->wb_list, head); } /** * nfs_list_move_request - Move a request to a new list * @req: request * @head: head of list into which to insert the request. */ static inline void nfs_list_move_request(struct nfs_page *req, struct list_head *head) { list_move_tail(&req->wb_list, head); } /** * nfs_list_remove_request - Remove a request from its wb_list * @req: request */ static inline void nfs_list_remove_request(struct nfs_page *req) { if (list_empty(&req->wb_list)) return; list_del_init(&req->wb_list); } static inline struct nfs_page * nfs_list_entry(struct list_head *head) { return list_entry(head, struct nfs_page, wb_list); } static inline loff_t req_offset(struct nfs_page *req) { return (((loff_t)req->wb_index) << PAGE_SHIFT) + req->wb_offset; } static inline struct nfs_open_context * nfs_req_openctx(struct nfs_page *req) { return req->wb_lock_context->open_context; } #endif /* _LINUX_NFS_PAGE_H */ ctype.h 0000644 00000003377 14722070374 0006061 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_CTYPE_H #define _LINUX_CTYPE_H /* * NOTE! This ctype does not handle EOF like the standard C * library is required to. */ #define _U 0x01 /* upper */ #define _L 0x02 /* lower */ #define _D 0x04 /* digit */ #define _C 0x08 /* cntrl */ #define _P 0x10 /* punct */ #define _S 0x20 /* white space (space/lf/tab) */ #define _X 0x40 /* hex digit */ #define _SP 0x80 /* hard space (0x20) */ extern const unsigned char _ctype[]; #define __ismask(x) (_ctype[(int)(unsigned char)(x)]) #define isalnum(c) ((__ismask(c)&(_U|_L|_D)) != 0) #define isalpha(c) ((__ismask(c)&(_U|_L)) != 0) #define iscntrl(c) ((__ismask(c)&(_C)) != 0) static inline int isdigit(int c) { return '0' <= c && c <= '9'; } #define isgraph(c) ((__ismask(c)&(_P|_U|_L|_D)) != 0) #define islower(c) ((__ismask(c)&(_L)) != 0) #define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0) #define ispunct(c) ((__ismask(c)&(_P)) != 0) /* Note: isspace() must return false for %NUL-terminator */ #define isspace(c) ((__ismask(c)&(_S)) != 0) #define isupper(c) ((__ismask(c)&(_U)) != 0) #define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0) #define isascii(c) (((unsigned char)(c))<=0x7f) #define toascii(c) (((unsigned char)(c))&0x7f) static inline unsigned char __tolower(unsigned char c) { if (isupper(c)) c -= 'A'-'a'; return c; } static inline unsigned char __toupper(unsigned char c) { if (islower(c)) c -= 'a'-'A'; return c; } #define tolower(c) __tolower(c) #define toupper(c) __toupper(c) /* * Fast implementation of tolower() for internal usage. Do not use in your * code. */ static inline char _tolower(const char c) { return c | 0x20; } /* Fast check for octal digit */ static inline int isodigit(const char c) { return c >= '0' && c <= '7'; } #endif start_kernel.h 0000644 00000000637 14722070374 0007426 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_START_KERNEL_H #define _LINUX_START_KERNEL_H #include <linux/linkage.h> #include <linux/init.h> /* Define the prototype for start_kernel here, rather than cluttering up something else. */ extern asmlinkage void __init start_kernel(void); extern void __init arch_call_rest_init(void); extern void __ref rest_init(void); #endif /* _LINUX_START_KERNEL_H */ elfcore-compat.h 0000644 00000002357 14722070374 0007632 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_ELFCORE_COMPAT_H #define _LINUX_ELFCORE_COMPAT_H #include <linux/elf.h> #include <linux/elfcore.h> #include <linux/compat.h> /* * Make sure these layouts match the linux/elfcore.h native definitions. */ struct compat_elf_siginfo { compat_int_t si_signo; compat_int_t si_code; compat_int_t si_errno; }; struct compat_elf_prstatus { struct compat_elf_siginfo pr_info; short pr_cursig; compat_ulong_t pr_sigpend; compat_ulong_t pr_sighold; compat_pid_t pr_pid; compat_pid_t pr_ppid; compat_pid_t pr_pgrp; compat_pid_t pr_sid; struct old_timeval32 pr_utime; struct old_timeval32 pr_stime; struct old_timeval32 pr_cutime; struct old_timeval32 pr_cstime; compat_elf_gregset_t pr_reg; #ifdef CONFIG_BINFMT_ELF_FDPIC compat_ulong_t pr_exec_fdpic_loadmap; compat_ulong_t pr_interp_fdpic_loadmap; #endif compat_int_t pr_fpvalid; }; struct compat_elf_prpsinfo { char pr_state; char pr_sname; char pr_zomb; char pr_nice; compat_ulong_t pr_flag; __compat_uid_t pr_uid; __compat_gid_t pr_gid; compat_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid; char pr_fname[16]; char pr_psargs[ELF_PRARGSZ]; }; #endif /* _LINUX_ELFCORE_COMPAT_H */ bpf_lirc.h 0000644 00000001272 14722070374 0006505 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _BPF_LIRC_H #define _BPF_LIRC_H #include <uapi/linux/bpf.h> #ifdef CONFIG_BPF_LIRC_MODE2 int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog); int lirc_prog_detach(const union bpf_attr *attr); int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr); #else static inline int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog) { return -EINVAL; } static inline int lirc_prog_detach(const union bpf_attr *attr) { return -EINVAL; } static inline int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr) { return -EINVAL; } #endif #endif /* _BPF_LIRC_H */ screen_info.h 0000644 00000000277 14722070374 0007223 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _SCREEN_INFO_H #define _SCREEN_INFO_H #include <uapi/linux/screen_info.h> extern struct screen_info screen_info; #endif /* _SCREEN_INFO_H */ timex.h 0000644 00000015315 14722070374 0006056 0 ustar 00 /***************************************************************************** * * * Copyright (c) David L. Mills 1993 * * * * Permission to use, copy, modify, and distribute this software and its * * documentation for any purpose and without fee is hereby granted, provided * * that the above copyright notice appears in all copies and that both the * * copyright notice and this permission notice appear in supporting * * documentation, and that the name University of Delaware not be used in * * advertising or publicity pertaining to distribution of the software * * without specific, written prior permission. The University of Delaware * * makes no representations about the suitability this software for any * * purpose. It is provided "as is" without express or implied warranty. * * * *****************************************************************************/ /* * Modification history timex.h * * 29 Dec 97 Russell King * Moved CLOCK_TICK_RATE, CLOCK_TICK_FACTOR and FINETUNE to asm/timex.h * for ARM machines * * 9 Jan 97 Adrian Sun * Shifted LATCH define to allow access to alpha machines. * * 26 Sep 94 David L. Mills * Added defines for hybrid phase/frequency-lock loop. * * 19 Mar 94 David L. Mills * Moved defines from kernel routines to header file and added new * defines for PPS phase-lock loop. * * 20 Feb 94 David L. Mills * Revised status codes and structures for external clock and PPS * signal discipline. * * 28 Nov 93 David L. Mills * Adjusted parameters to improve stability and increase poll * interval. * * 17 Sep 93 David L. Mills * Created file $NTP/include/sys/timex.h * 07 Oct 93 Torsten Duwe * Derived linux/timex.h * 1995-08-13 Torsten Duwe * kernel PLL updated to 1994-12-13 specs (rfc-1589) * 1997-08-30 Ulrich Windl * Added new constant NTP_PHASE_LIMIT * 2004-08-12 Christoph Lameter * Reworked time interpolation logic */ #ifndef _LINUX_TIMEX_H #define _LINUX_TIMEX_H #include <uapi/linux/timex.h> #define ADJ_ADJTIME 0x8000 /* switch between adjtime/adjtimex modes */ #define ADJ_OFFSET_SINGLESHOT 0x0001 /* old-fashioned adjtime */ #define ADJ_OFFSET_READONLY 0x2000 /* read-only adjtime */ #include <linux/compiler.h> #include <linux/types.h> #include <linux/param.h> unsigned long random_get_entropy_fallback(void); #include <asm/timex.h> #ifndef random_get_entropy /* * The random_get_entropy() function is used by the /dev/random driver * in order to extract entropy via the relative unpredictability of * when an interrupt takes places versus a high speed, fine-grained * timing source or cycle counter. Since it will be occurred on every * single interrupt, it must have a very low cost/overhead. * * By default we use get_cycles() for this purpose, but individual * architectures may override this in their asm/timex.h header file. * If a given arch does not have get_cycles(), then we fallback to * using random_get_entropy_fallback(). */ #ifdef get_cycles #define random_get_entropy() ((unsigned long)get_cycles()) #else #define random_get_entropy() random_get_entropy_fallback() #endif #endif /* * SHIFT_PLL is used as a dampening factor to define how much we * adjust the frequency correction for a given offset in PLL mode. * It also used in dampening the offset correction, to define how * much of the current value in time_offset we correct for each * second. Changing this value changes the stiffness of the ntp * adjustment code. A lower value makes it more flexible, reducing * NTP convergence time. A higher value makes it stiffer, increasing * convergence time, but making the clock more stable. * * In David Mills' nanokernel reference implementation SHIFT_PLL is 4. * However this seems to increase convergence time much too long. * * https://lists.ntp.org/pipermail/hackers/2008-January/003487.html * * In the above mailing list discussion, it seems the value of 4 * was appropriate for other Unix systems with HZ=100, and that * SHIFT_PLL should be decreased as HZ increases. However, Linux's * clock steering implementation is HZ independent. * * Through experimentation, a SHIFT_PLL value of 2 was found to allow * for fast convergence (very similar to the NTPv3 code used prior to * v2.6.19), with good clock stability. * * * SHIFT_FLL is used as a dampening factor to define how much we * adjust the frequency correction for a given offset in FLL mode. * In David Mills' nanokernel reference implementation SHIFT_FLL is 2. * * MAXTC establishes the maximum time constant of the PLL. */ #define SHIFT_PLL 2 /* PLL frequency factor (shift) */ #define SHIFT_FLL 2 /* FLL frequency factor (shift) */ #define MAXTC 10 /* maximum time constant (shift) */ /* * SHIFT_USEC defines the scaling (shift) of the time_freq and * time_tolerance variables, which represent the current frequency * offset and maximum frequency tolerance. */ #define SHIFT_USEC 16 /* frequency offset scale (shift) */ #define PPM_SCALE ((s64)NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC)) #define PPM_SCALE_INV_SHIFT 19 #define PPM_SCALE_INV ((1LL << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \ PPM_SCALE + 1) #define MAXPHASE 500000000L /* max phase error (ns) */ #define MAXFREQ 500000 /* max frequency error (ns/s) */ #define MAXFREQ_SCALED ((s64)MAXFREQ << NTP_SCALE_SHIFT) #define MINSEC 256 /* min interval between updates (s) */ #define MAXSEC 2048 /* max interval between updates (s) */ #define NTP_PHASE_LIMIT ((MAXPHASE / NSEC_PER_USEC) << 5) /* beyond max. dispersion */ /* * kernel variables * Note: maximum error = NTP synch distance = dispersion + delay / 2; * estimated error = NTP dispersion. */ extern unsigned long tick_usec; /* USER_HZ period (usec) */ extern unsigned long tick_nsec; /* SHIFTED_HZ period (nsec) */ /* Required to safely shift negative values */ #define shift_right(x, s) ({ \ __typeof__(x) __x = (x); \ __typeof__(s) __s = (s); \ __x < 0 ? -(-__x >> __s) : __x >> __s; \ }) #define NTP_SCALE_SHIFT 32 #define NTP_INTERVAL_FREQ (HZ) #define NTP_INTERVAL_LENGTH (NSEC_PER_SEC/NTP_INTERVAL_FREQ) extern int do_adjtimex(struct __kernel_timex *); extern int do_clock_adjtime(const clockid_t which_clock, struct __kernel_timex * ktx); extern void hardpps(const struct timespec64 *, const struct timespec64 *); int read_current_timer(unsigned long *timer_val); void ntp_notify_cmos_timer(void); /* The clock frequency of the i8253/i8254 PIT */ #define PIT_TICK_RATE 1193182ul #endif /* LINUX_TIMEX_H */ netfilter_ingress.h 0000644 00000002702 14722070374 0010452 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NETFILTER_INGRESS_H_ #define _NETFILTER_INGRESS_H_ #include <linux/netfilter.h> #include <linux/netdevice.h> #ifdef CONFIG_NETFILTER_INGRESS static inline bool nf_hook_ingress_active(const struct sk_buff *skb) { #ifdef CONFIG_JUMP_LABEL if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS])) return false; #endif return rcu_access_pointer(skb->dev->nf_hooks_ingress); } /* caller must hold rcu_read_lock */ static inline int nf_hook_ingress(struct sk_buff *skb) { struct nf_hook_entries *e = rcu_dereference(skb->dev->nf_hooks_ingress); struct nf_hook_state state; int ret; /* Must recheck the ingress hook head, in the event it became NULL * after the check in nf_hook_ingress_active evaluated to true. */ if (unlikely(!e)) return 0; nf_hook_state_init(&state, NF_NETDEV_INGRESS, NFPROTO_NETDEV, skb->dev, NULL, NULL, dev_net(skb->dev), NULL); ret = nf_hook_slow(skb, &state, e, 0); if (ret == 0) return -1; return ret; } static inline void nf_hook_ingress_init(struct net_device *dev) { RCU_INIT_POINTER(dev->nf_hooks_ingress, NULL); } #else /* CONFIG_NETFILTER_INGRESS */ static inline int nf_hook_ingress_active(struct sk_buff *skb) { return 0; } static inline int nf_hook_ingress(struct sk_buff *skb) { return 0; } static inline void nf_hook_ingress_init(struct net_device *dev) {} #endif /* CONFIG_NETFILTER_INGRESS */ #endif /* _NETFILTER_INGRESS_H_ */ f75375s.h 0000644 00000001035 14722070374 0005745 0 ustar 00 /* * f75375s.h - platform data structure for f75375s sensor * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2007, Riku Voipio <riku.voipio@iki.fi> */ #ifndef __LINUX_F75375S_H #define __LINUX_F75375S_H /* We want to set fans spinning on systems where there is no * BIOS to do that for us */ struct f75375s_platform_data { u8 pwm[2]; u8 pwm_enable[2]; }; #endif /* __LINUX_F75375S_H */ wireless.h 0000644 00000002632 14722070374 0006563 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * This file define a set of standard wireless extensions * * Version : 22 16.3.07 * * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com> * Copyright (c) 1997-2007 Jean Tourrilhes, All Rights Reserved. */ #ifndef _LINUX_WIRELESS_H #define _LINUX_WIRELESS_H #include <uapi/linux/wireless.h> #ifdef CONFIG_COMPAT #include <linux/compat.h> struct compat_iw_point { compat_caddr_t pointer; __u16 length; __u16 flags; }; #endif #ifdef CONFIG_COMPAT struct __compat_iw_event { __u16 len; /* Real length of this stuff */ __u16 cmd; /* Wireless IOCTL */ compat_caddr_t pointer; }; #define IW_EV_COMPAT_LCP_LEN offsetof(struct __compat_iw_event, pointer) #define IW_EV_COMPAT_POINT_OFF offsetof(struct compat_iw_point, length) /* Size of the various events for compat */ #define IW_EV_COMPAT_CHAR_LEN (IW_EV_COMPAT_LCP_LEN + IFNAMSIZ) #define IW_EV_COMPAT_UINT_LEN (IW_EV_COMPAT_LCP_LEN + sizeof(__u32)) #define IW_EV_COMPAT_FREQ_LEN (IW_EV_COMPAT_LCP_LEN + sizeof(struct iw_freq)) #define IW_EV_COMPAT_PARAM_LEN (IW_EV_COMPAT_LCP_LEN + sizeof(struct iw_param)) #define IW_EV_COMPAT_ADDR_LEN (IW_EV_COMPAT_LCP_LEN + sizeof(struct sockaddr)) #define IW_EV_COMPAT_QUAL_LEN (IW_EV_COMPAT_LCP_LEN + sizeof(struct iw_quality)) #define IW_EV_COMPAT_POINT_LEN \ (IW_EV_COMPAT_LCP_LEN + sizeof(struct compat_iw_point) - \ IW_EV_COMPAT_POINT_OFF) #endif #endif /* _LINUX_WIRELESS_H */ stmp3xxx_rtc_wdt.h 0000644 00000000513 14722070374 0010266 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * stmp3xxx_rtc_wdt.h * * Copyright (C) 2011 Wolfram Sang, Pengutronix e.K. */ #ifndef __LINUX_STMP3XXX_RTC_WDT_H #define __LINUX_STMP3XXX_RTC_WDT_H struct stmp3xxx_wdt_pdata { void (*wdt_set_timeout)(struct device *dev, u32 timeout); }; #endif /* __LINUX_STMP3XXX_RTC_WDT_H */ if_pppol2tp.h 0000644 00000001004 14722070374 0007154 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /*************************************************************************** * Linux PPP over L2TP (PPPoL2TP) Socket Implementation (RFC 2661) * * This file supplies definitions required by the PPP over L2TP driver * (l2tp_ppp.c). All version information wrt this file is located in l2tp_ppp.c * * License: */ #ifndef __LINUX_IF_PPPOL2TP_H #define __LINUX_IF_PPPOL2TP_H #include <linux/in.h> #include <linux/in6.h> #include <uapi/linux/if_pppol2tp.h> #endif cleancache.h 0000644 00000007616 14722070374 0007003 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_CLEANCACHE_H #define _LINUX_CLEANCACHE_H #include <linux/fs.h> #include <linux/exportfs.h> #include <linux/mm.h> #define CLEANCACHE_NO_POOL -1 #define CLEANCACHE_NO_BACKEND -2 #define CLEANCACHE_NO_BACKEND_SHARED -3 #define CLEANCACHE_KEY_MAX 6 /* * cleancache requires every file with a page in cleancache to have a * unique key unless/until the file is removed/truncated. For some * filesystems, the inode number is unique, but for "modern" filesystems * an exportable filehandle is required (see exportfs.h) */ struct cleancache_filekey { union { ino_t ino; __u32 fh[CLEANCACHE_KEY_MAX]; u32 key[CLEANCACHE_KEY_MAX]; } u; }; struct cleancache_ops { int (*init_fs)(size_t); int (*init_shared_fs)(uuid_t *uuid, size_t); int (*get_page)(int, struct cleancache_filekey, pgoff_t, struct page *); void (*put_page)(int, struct cleancache_filekey, pgoff_t, struct page *); void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t); void (*invalidate_inode)(int, struct cleancache_filekey); void (*invalidate_fs)(int); }; extern int cleancache_register_ops(const struct cleancache_ops *ops); extern void __cleancache_init_fs(struct super_block *); extern void __cleancache_init_shared_fs(struct super_block *); extern int __cleancache_get_page(struct page *); extern void __cleancache_put_page(struct page *); extern void __cleancache_invalidate_page(struct address_space *, struct page *); extern void __cleancache_invalidate_inode(struct address_space *); extern void __cleancache_invalidate_fs(struct super_block *); #ifdef CONFIG_CLEANCACHE #define cleancache_enabled (1) static inline bool cleancache_fs_enabled_mapping(struct address_space *mapping) { return mapping->host->i_sb->cleancache_poolid >= 0; } static inline bool cleancache_fs_enabled(struct page *page) { return cleancache_fs_enabled_mapping(page->mapping); } #else #define cleancache_enabled (0) #define cleancache_fs_enabled(_page) (0) #define cleancache_fs_enabled_mapping(_page) (0) #endif /* * The shim layer provided by these inline functions allows the compiler * to reduce all cleancache hooks to nothingness if CONFIG_CLEANCACHE * is disabled, to a single global variable check if CONFIG_CLEANCACHE * is enabled but no cleancache "backend" has dynamically enabled it, * and, for the most frequent cleancache ops, to a single global variable * check plus a superblock element comparison if CONFIG_CLEANCACHE is enabled * and a cleancache backend has dynamically enabled cleancache, but the * filesystem referenced by that cleancache op has not enabled cleancache. * As a result, CONFIG_CLEANCACHE can be enabled by default with essentially * no measurable performance impact. */ static inline void cleancache_init_fs(struct super_block *sb) { if (cleancache_enabled) __cleancache_init_fs(sb); } static inline void cleancache_init_shared_fs(struct super_block *sb) { if (cleancache_enabled) __cleancache_init_shared_fs(sb); } static inline int cleancache_get_page(struct page *page) { if (cleancache_enabled && cleancache_fs_enabled(page)) return __cleancache_get_page(page); return -1; } static inline void cleancache_put_page(struct page *page) { if (cleancache_enabled && cleancache_fs_enabled(page)) __cleancache_put_page(page); } static inline void cleancache_invalidate_page(struct address_space *mapping, struct page *page) { /* careful... page->mapping is NULL sometimes when this is called */ if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping)) __cleancache_invalidate_page(mapping, page); } static inline void cleancache_invalidate_inode(struct address_space *mapping) { if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping)) __cleancache_invalidate_inode(mapping); } static inline void cleancache_invalidate_fs(struct super_block *sb) { if (cleancache_enabled) __cleancache_invalidate_fs(sb); } #endif /* _LINUX_CLEANCACHE_H */ genhd.h 0000644 00000060050 14722070374 0006011 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_GENHD_H #define _LINUX_GENHD_H /* * genhd.h Copyright (C) 1992 Drew Eckhardt * Generic hard disk header file by * Drew Eckhardt * * <drew@colorado.edu> */ #include <linux/types.h> #include <linux/kdev_t.h> #include <linux/rcupdate.h> #include <linux/slab.h> #include <linux/percpu-refcount.h> #include <linux/uuid.h> #include <linux/blk_types.h> #include <asm/local.h> #ifdef CONFIG_BLOCK #define dev_to_disk(device) container_of((device), struct gendisk, part0.__dev) #define dev_to_part(device) container_of((device), struct hd_struct, __dev) #define disk_to_dev(disk) (&(disk)->part0.__dev) #define part_to_dev(part) (&((part)->__dev)) extern struct device_type part_type; extern struct kobject *block_depr; extern struct class block_class; enum { /* These three have identical behaviour; use the second one if DOS FDISK gets confused about extended/logical partitions starting past cylinder 1023. */ DOS_EXTENDED_PARTITION = 5, LINUX_EXTENDED_PARTITION = 0x85, WIN98_EXTENDED_PARTITION = 0x0f, SUN_WHOLE_DISK = DOS_EXTENDED_PARTITION, LINUX_SWAP_PARTITION = 0x82, LINUX_DATA_PARTITION = 0x83, LINUX_LVM_PARTITION = 0x8e, LINUX_RAID_PARTITION = 0xfd, /* autodetect RAID partition */ SOLARIS_X86_PARTITION = LINUX_SWAP_PARTITION, NEW_SOLARIS_X86_PARTITION = 0xbf, DM6_AUX1PARTITION = 0x51, /* no DDO: use xlated geom */ DM6_AUX3PARTITION = 0x53, /* no DDO: use xlated geom */ DM6_PARTITION = 0x54, /* has DDO: use xlated geom & offset */ EZD_PARTITION = 0x55, /* EZ-DRIVE */ FREEBSD_PARTITION = 0xa5, /* FreeBSD Partition ID */ OPENBSD_PARTITION = 0xa6, /* OpenBSD Partition ID */ NETBSD_PARTITION = 0xa9, /* NetBSD Partition ID */ BSDI_PARTITION = 0xb7, /* BSDI Partition ID */ MINIX_PARTITION = 0x81, /* Minix Partition ID */ UNIXWARE_PARTITION = 0x63, /* Same as GNU_HURD and SCO Unix */ }; #define DISK_MAX_PARTS 256 #define DISK_NAME_LEN 32 #include <linux/major.h> #include <linux/device.h> #include <linux/smp.h> #include <linux/string.h> #include <linux/fs.h> #include <linux/workqueue.h> struct partition { unsigned char boot_ind; /* 0x80 - active */ unsigned char head; /* starting head */ unsigned char sector; /* starting sector */ unsigned char cyl; /* starting cylinder */ unsigned char sys_ind; /* What partition type */ unsigned char end_head; /* end head */ unsigned char end_sector; /* end sector */ unsigned char end_cyl; /* end cylinder */ __le32 start_sect; /* starting sector counting from 0 */ __le32 nr_sects; /* nr of sectors in partition */ } __attribute__((packed)); struct disk_stats { u64 nsecs[NR_STAT_GROUPS]; unsigned long sectors[NR_STAT_GROUPS]; unsigned long ios[NR_STAT_GROUPS]; unsigned long merges[NR_STAT_GROUPS]; unsigned long io_ticks; unsigned long time_in_queue; local_t in_flight[2]; }; #define PARTITION_META_INFO_VOLNAMELTH 64 /* * Enough for the string representation of any kind of UUID plus NULL. * EFI UUID is 36 characters. MSDOS UUID is 11 characters. */ #define PARTITION_META_INFO_UUIDLTH (UUID_STRING_LEN + 1) struct partition_meta_info { char uuid[PARTITION_META_INFO_UUIDLTH]; u8 volname[PARTITION_META_INFO_VOLNAMELTH]; }; struct hd_struct { sector_t start_sect; /* * nr_sects is protected by sequence counter. One might extend a * partition while IO is happening to it and update of nr_sects * can be non-atomic on 32bit machines with 64bit sector_t. */ sector_t nr_sects; seqcount_t nr_sects_seq; sector_t alignment_offset; unsigned int discard_alignment; struct device __dev; struct kobject *holder_dir; int policy, partno; struct partition_meta_info *info; #ifdef CONFIG_FAIL_MAKE_REQUEST int make_it_fail; #endif unsigned long stamp; #ifdef CONFIG_SMP struct disk_stats __percpu *dkstats; #else struct disk_stats dkstats; #endif struct percpu_ref ref; struct rcu_work rcu_work; }; #define GENHD_FL_REMOVABLE 1 /* 2 is unused */ #define GENHD_FL_MEDIA_CHANGE_NOTIFY 4 #define GENHD_FL_CD 8 #define GENHD_FL_UP 16 #define GENHD_FL_SUPPRESS_PARTITION_INFO 32 #define GENHD_FL_EXT_DEVT 64 /* allow extended devt */ #define GENHD_FL_NATIVE_CAPACITY 128 #define GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE 256 #define GENHD_FL_NO_PART_SCAN 512 #define GENHD_FL_HIDDEN 1024 enum { DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */ DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */ }; enum { /* Poll even if events_poll_msecs is unset */ DISK_EVENT_FLAG_POLL = 1 << 0, /* Forward events to udev */ DISK_EVENT_FLAG_UEVENT = 1 << 1, }; struct disk_part_tbl { struct rcu_head rcu_head; int len; struct hd_struct __rcu *last_lookup; struct hd_struct __rcu *part[]; }; struct disk_events; struct badblocks; #if defined(CONFIG_BLK_DEV_INTEGRITY) struct blk_integrity { const struct blk_integrity_profile *profile; unsigned char flags; unsigned char tuple_size; unsigned char interval_exp; unsigned char tag_size; }; #endif /* CONFIG_BLK_DEV_INTEGRITY */ struct gendisk { /* major, first_minor and minors are input parameters only, * don't use directly. Use disk_devt() and disk_max_parts(). */ int major; /* major number of driver */ int first_minor; int minors; /* maximum number of minors, =1 for * disks that can't be partitioned. */ char disk_name[DISK_NAME_LEN]; /* name of major driver */ char *(*devnode)(struct gendisk *gd, umode_t *mode); unsigned short events; /* supported events */ unsigned short event_flags; /* flags related to event processing */ /* Array of pointers to partitions indexed by partno. * Protected with matching bdev lock but stat and other * non-critical accesses use RCU. Always access through * helpers. */ struct disk_part_tbl __rcu *part_tbl; struct hd_struct part0; const struct block_device_operations *fops; struct request_queue *queue; void *private_data; int flags; struct rw_semaphore lookup_sem; struct kobject *slave_dir; struct timer_rand_state *random; atomic_t sync_io; /* RAID */ struct disk_events *ev; #ifdef CONFIG_BLK_DEV_INTEGRITY struct kobject integrity_kobj; #endif /* CONFIG_BLK_DEV_INTEGRITY */ int node_id; struct badblocks *bb; struct lockdep_map lockdep_map; }; static inline struct gendisk *part_to_disk(struct hd_struct *part) { if (likely(part)) { if (part->partno) return dev_to_disk(part_to_dev(part)->parent); else return dev_to_disk(part_to_dev(part)); } return NULL; } static inline int disk_max_parts(struct gendisk *disk) { if (disk->flags & GENHD_FL_EXT_DEVT) return DISK_MAX_PARTS; return disk->minors; } static inline bool disk_part_scan_enabled(struct gendisk *disk) { return disk_max_parts(disk) > 1 && !(disk->flags & GENHD_FL_NO_PART_SCAN); } static inline dev_t disk_devt(struct gendisk *disk) { return MKDEV(disk->major, disk->first_minor); } static inline dev_t part_devt(struct hd_struct *part) { return part_to_dev(part)->devt; } extern struct hd_struct *__disk_get_part(struct gendisk *disk, int partno); extern struct hd_struct *disk_get_part(struct gendisk *disk, int partno); static inline void disk_put_part(struct hd_struct *part) { if (likely(part)) put_device(part_to_dev(part)); } /* * Smarter partition iterator without context limits. */ #define DISK_PITER_REVERSE (1 << 0) /* iterate in the reverse direction */ #define DISK_PITER_INCL_EMPTY (1 << 1) /* include 0-sized parts */ #define DISK_PITER_INCL_PART0 (1 << 2) /* include partition 0 */ #define DISK_PITER_INCL_EMPTY_PART0 (1 << 3) /* include empty partition 0 */ struct disk_part_iter { struct gendisk *disk; struct hd_struct *part; int idx; unsigned int flags; }; extern void disk_part_iter_init(struct disk_part_iter *piter, struct gendisk *disk, unsigned int flags); extern struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter); extern void disk_part_iter_exit(struct disk_part_iter *piter); extern struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector); /* * Macros to operate on percpu disk statistics: * * {disk|part|all}_stat_{add|sub|inc|dec}() modify the stat counters * and should be called between disk_stat_lock() and * disk_stat_unlock(). * * part_stat_read() can be called at any time. * * part_stat_{add|set_all}() and {init|free}_part_stats are for * internal use only. */ #ifdef CONFIG_SMP #define part_stat_lock() ({ rcu_read_lock(); get_cpu(); }) #define part_stat_unlock() do { put_cpu(); rcu_read_unlock(); } while (0) #define part_stat_get_cpu(part, field, cpu) \ (per_cpu_ptr((part)->dkstats, (cpu))->field) #define part_stat_get(part, field) \ part_stat_get_cpu(part, field, smp_processor_id()) #define part_stat_read(part, field) \ ({ \ typeof((part)->dkstats->field) res = 0; \ unsigned int _cpu; \ for_each_possible_cpu(_cpu) \ res += per_cpu_ptr((part)->dkstats, _cpu)->field; \ res; \ }) static inline void part_stat_set_all(struct hd_struct *part, int value) { int i; for_each_possible_cpu(i) memset(per_cpu_ptr(part->dkstats, i), value, sizeof(struct disk_stats)); } static inline int init_part_stats(struct hd_struct *part) { part->dkstats = alloc_percpu(struct disk_stats); if (!part->dkstats) return 0; return 1; } static inline void free_part_stats(struct hd_struct *part) { free_percpu(part->dkstats); } #else /* !CONFIG_SMP */ #define part_stat_lock() ({ rcu_read_lock(); 0; }) #define part_stat_unlock() rcu_read_unlock() #define part_stat_get(part, field) ((part)->dkstats.field) #define part_stat_get_cpu(part, field, cpu) part_stat_get(part, field) #define part_stat_read(part, field) part_stat_get(part, field) static inline void part_stat_set_all(struct hd_struct *part, int value) { memset(&part->dkstats, value, sizeof(struct disk_stats)); } static inline int init_part_stats(struct hd_struct *part) { return 1; } static inline void free_part_stats(struct hd_struct *part) { } #endif /* CONFIG_SMP */ #define part_stat_read_msecs(part, which) \ div_u64(part_stat_read(part, nsecs[which]), NSEC_PER_MSEC) #define part_stat_read_accum(part, field) \ (part_stat_read(part, field[STAT_READ]) + \ part_stat_read(part, field[STAT_WRITE]) + \ part_stat_read(part, field[STAT_DISCARD])) #define __part_stat_add(part, field, addnd) \ (part_stat_get(part, field) += (addnd)) #define part_stat_add(part, field, addnd) do { \ __part_stat_add((part), field, addnd); \ if ((part)->partno) \ __part_stat_add(&part_to_disk((part))->part0, \ field, addnd); \ } while (0) #define part_stat_dec(gendiskp, field) \ part_stat_add(gendiskp, field, -1) #define part_stat_inc(gendiskp, field) \ part_stat_add(gendiskp, field, 1) #define part_stat_sub(gendiskp, field, subnd) \ part_stat_add(gendiskp, field, -subnd) #define part_stat_local_dec(gendiskp, field) \ local_dec(&(part_stat_get(gendiskp, field))) #define part_stat_local_inc(gendiskp, field) \ local_inc(&(part_stat_get(gendiskp, field))) #define part_stat_local_read(gendiskp, field) \ local_read(&(part_stat_get(gendiskp, field))) #define part_stat_local_read_cpu(gendiskp, field, cpu) \ local_read(&(part_stat_get_cpu(gendiskp, field, cpu))) unsigned int part_in_flight(struct request_queue *q, struct hd_struct *part); void part_in_flight_rw(struct request_queue *q, struct hd_struct *part, unsigned int inflight[2]); void part_dec_in_flight(struct request_queue *q, struct hd_struct *part, int rw); void part_inc_in_flight(struct request_queue *q, struct hd_struct *part, int rw); static inline struct partition_meta_info *alloc_part_info(struct gendisk *disk) { if (disk) return kzalloc_node(sizeof(struct partition_meta_info), GFP_KERNEL, disk->node_id); return kzalloc(sizeof(struct partition_meta_info), GFP_KERNEL); } static inline void free_part_info(struct hd_struct *part) { kfree(part->info); } void update_io_ticks(struct hd_struct *part, unsigned long now, bool end); /* block/genhd.c */ extern void device_add_disk(struct device *parent, struct gendisk *disk, const struct attribute_group **groups); static inline void add_disk(struct gendisk *disk) { device_add_disk(NULL, disk, NULL); } extern void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk); static inline void add_disk_no_queue_reg(struct gendisk *disk) { device_add_disk_no_queue_reg(NULL, disk); } extern void del_gendisk(struct gendisk *gp); extern struct gendisk *get_gendisk(dev_t dev, int *partno); extern struct block_device *bdget_disk(struct gendisk *disk, int partno); extern void set_device_ro(struct block_device *bdev, int flag); extern void set_disk_ro(struct gendisk *disk, int flag); static inline int get_disk_ro(struct gendisk *disk) { return disk->part0.policy; } extern void disk_block_events(struct gendisk *disk); extern void disk_unblock_events(struct gendisk *disk); extern void disk_flush_events(struct gendisk *disk, unsigned int mask); extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask); /* drivers/char/random.c */ extern void add_disk_randomness(struct gendisk *disk) __latent_entropy; extern void rand_initialize_disk(struct gendisk *disk); static inline sector_t get_start_sect(struct block_device *bdev) { return bdev->bd_part->start_sect; } static inline sector_t get_capacity(struct gendisk *disk) { return disk->part0.nr_sects; } static inline void set_capacity(struct gendisk *disk, sector_t size) { disk->part0.nr_sects = size; } #ifdef CONFIG_SOLARIS_X86_PARTITION #define SOLARIS_X86_NUMSLICE 16 #define SOLARIS_X86_VTOC_SANE (0x600DDEEEUL) struct solaris_x86_slice { __le16 s_tag; /* ID tag of partition */ __le16 s_flag; /* permission flags */ __le32 s_start; /* start sector no of partition */ __le32 s_size; /* # of blocks in partition */ }; struct solaris_x86_vtoc { unsigned int v_bootinfo[3]; /* info needed by mboot (unsupported) */ __le32 v_sanity; /* to verify vtoc sanity */ __le32 v_version; /* layout version */ char v_volume[8]; /* volume name */ __le16 v_sectorsz; /* sector size in bytes */ __le16 v_nparts; /* number of partitions */ unsigned int v_reserved[10]; /* free space */ struct solaris_x86_slice v_slice[SOLARIS_X86_NUMSLICE]; /* slice headers */ unsigned int timestamp[SOLARIS_X86_NUMSLICE]; /* timestamp (unsupported) */ char v_asciilabel[128]; /* for compatibility */ }; #endif /* CONFIG_SOLARIS_X86_PARTITION */ #ifdef CONFIG_BSD_DISKLABEL /* * BSD disklabel support by Yossi Gottlieb <yogo@math.tau.ac.il> * updated by Marc Espie <Marc.Espie@openbsd.org> */ /* check against BSD src/sys/sys/disklabel.h for consistency */ #define BSD_DISKMAGIC (0x82564557UL) /* The disk magic number */ #define BSD_MAXPARTITIONS 16 #define OPENBSD_MAXPARTITIONS 16 #define BSD_FS_UNUSED 0 /* disklabel unused partition entry ID */ struct bsd_disklabel { __le32 d_magic; /* the magic number */ __s16 d_type; /* drive type */ __s16 d_subtype; /* controller/d_type specific */ char d_typename[16]; /* type name, e.g. "eagle" */ char d_packname[16]; /* pack identifier */ __u32 d_secsize; /* # of bytes per sector */ __u32 d_nsectors; /* # of data sectors per track */ __u32 d_ntracks; /* # of tracks per cylinder */ __u32 d_ncylinders; /* # of data cylinders per unit */ __u32 d_secpercyl; /* # of data sectors per cylinder */ __u32 d_secperunit; /* # of data sectors per unit */ __u16 d_sparespertrack; /* # of spare sectors per track */ __u16 d_sparespercyl; /* # of spare sectors per cylinder */ __u32 d_acylinders; /* # of alt. cylinders per unit */ __u16 d_rpm; /* rotational speed */ __u16 d_interleave; /* hardware sector interleave */ __u16 d_trackskew; /* sector 0 skew, per track */ __u16 d_cylskew; /* sector 0 skew, per cylinder */ __u32 d_headswitch; /* head switch time, usec */ __u32 d_trkseek; /* track-to-track seek, usec */ __u32 d_flags; /* generic flags */ #define NDDATA 5 __u32 d_drivedata[NDDATA]; /* drive-type specific information */ #define NSPARE 5 __u32 d_spare[NSPARE]; /* reserved for future use */ __le32 d_magic2; /* the magic number (again) */ __le16 d_checksum; /* xor of data incl. partitions */ /* filesystem and partition information: */ __le16 d_npartitions; /* number of partitions in following */ __le32 d_bbsize; /* size of boot area at sn0, bytes */ __le32 d_sbsize; /* max size of fs superblock, bytes */ struct bsd_partition { /* the partition table */ __le32 p_size; /* number of sectors in partition */ __le32 p_offset; /* starting sector */ __le32 p_fsize; /* filesystem basic fragment size */ __u8 p_fstype; /* filesystem type, see below */ __u8 p_frag; /* filesystem fragments per block */ __le16 p_cpg; /* filesystem cylinders per group */ } d_partitions[BSD_MAXPARTITIONS]; /* actually may be more */ }; #endif /* CONFIG_BSD_DISKLABEL */ #ifdef CONFIG_UNIXWARE_DISKLABEL /* * Unixware slices support by Andrzej Krzysztofowicz <ankry@mif.pg.gda.pl> * and Krzysztof G. Baranowski <kgb@knm.org.pl> */ #define UNIXWARE_DISKMAGIC (0xCA5E600DUL) /* The disk magic number */ #define UNIXWARE_DISKMAGIC2 (0x600DDEEEUL) /* The slice table magic nr */ #define UNIXWARE_NUMSLICE 16 #define UNIXWARE_FS_UNUSED 0 /* Unused slice entry ID */ struct unixware_slice { __le16 s_label; /* label */ __le16 s_flags; /* permission flags */ __le32 start_sect; /* starting sector */ __le32 nr_sects; /* number of sectors in slice */ }; struct unixware_disklabel { __le32 d_type; /* drive type */ __le32 d_magic; /* the magic number */ __le32 d_version; /* version number */ char d_serial[12]; /* serial number of the device */ __le32 d_ncylinders; /* # of data cylinders per device */ __le32 d_ntracks; /* # of tracks per cylinder */ __le32 d_nsectors; /* # of data sectors per track */ __le32 d_secsize; /* # of bytes per sector */ __le32 d_part_start; /* # of first sector of this partition */ __le32 d_unknown1[12]; /* ? */ __le32 d_alt_tbl; /* byte offset of alternate table */ __le32 d_alt_len; /* byte length of alternate table */ __le32 d_phys_cyl; /* # of physical cylinders per device */ __le32 d_phys_trk; /* # of physical tracks per cylinder */ __le32 d_phys_sec; /* # of physical sectors per track */ __le32 d_phys_bytes; /* # of physical bytes per sector */ __le32 d_unknown2; /* ? */ __le32 d_unknown3; /* ? */ __le32 d_pad[8]; /* pad */ struct unixware_vtoc { __le32 v_magic; /* the magic number */ __le32 v_version; /* version number */ char v_name[8]; /* volume name */ __le16 v_nslices; /* # of slices */ __le16 v_unknown1; /* ? */ __le32 v_reserved[10]; /* reserved */ struct unixware_slice v_slice[UNIXWARE_NUMSLICE]; /* slice headers */ } vtoc; }; /* 408 */ #endif /* CONFIG_UNIXWARE_DISKLABEL */ #ifdef CONFIG_MINIX_SUBPARTITION # define MINIX_NR_SUBPARTITIONS 4 #endif /* CONFIG_MINIX_SUBPARTITION */ #define ADDPART_FLAG_NONE 0 #define ADDPART_FLAG_RAID 1 #define ADDPART_FLAG_WHOLEDISK 2 extern int blk_alloc_devt(struct hd_struct *part, dev_t *devt); extern void blk_free_devt(dev_t devt); extern void blk_invalidate_devt(dev_t devt); extern dev_t blk_lookup_devt(const char *name, int partno); extern char *disk_name (struct gendisk *hd, int partno, char *buf); extern int disk_expand_part_tbl(struct gendisk *disk, int target); extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev); extern int invalidate_partitions(struct gendisk *disk, struct block_device *bdev); extern struct hd_struct * __must_check add_partition(struct gendisk *disk, int partno, sector_t start, sector_t len, int flags, struct partition_meta_info *info); extern void __delete_partition(struct percpu_ref *); extern void delete_partition(struct gendisk *, int); extern void printk_all_partitions(void); extern struct gendisk *__alloc_disk_node(int minors, int node_id); extern struct kobject *get_disk_and_module(struct gendisk *disk); extern void put_disk(struct gendisk *disk); extern void put_disk_and_module(struct gendisk *disk); extern void blk_register_region(dev_t devt, unsigned long range, struct module *module, struct kobject *(*probe)(dev_t, int *, void *), int (*lock)(dev_t, void *), void *data); extern void blk_unregister_region(dev_t devt, unsigned long range); extern ssize_t part_size_show(struct device *dev, struct device_attribute *attr, char *buf); extern ssize_t part_stat_show(struct device *dev, struct device_attribute *attr, char *buf); extern ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr, char *buf); #ifdef CONFIG_FAIL_MAKE_REQUEST extern ssize_t part_fail_show(struct device *dev, struct device_attribute *attr, char *buf); extern ssize_t part_fail_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); #endif /* CONFIG_FAIL_MAKE_REQUEST */ #define alloc_disk_node(minors, node_id) \ ({ \ static struct lock_class_key __key; \ const char *__name; \ struct gendisk *__disk; \ \ __name = "(gendisk_completion)"#minors"("#node_id")"; \ \ __disk = __alloc_disk_node(minors, node_id); \ \ if (__disk) \ lockdep_init_map(&__disk->lockdep_map, __name, &__key, 0); \ \ __disk; \ }) #define alloc_disk(minors) alloc_disk_node(minors, NUMA_NO_NODE) static inline int hd_ref_init(struct hd_struct *part) { if (percpu_ref_init(&part->ref, __delete_partition, 0, GFP_KERNEL)) return -ENOMEM; return 0; } static inline void hd_struct_get(struct hd_struct *part) { percpu_ref_get(&part->ref); } static inline int hd_struct_try_get(struct hd_struct *part) { return percpu_ref_tryget_live(&part->ref); } static inline void hd_struct_put(struct hd_struct *part) { percpu_ref_put(&part->ref); } static inline void hd_struct_kill(struct hd_struct *part) { percpu_ref_kill(&part->ref); } static inline void hd_free_part(struct hd_struct *part) { free_part_stats(part); free_part_info(part); percpu_ref_exit(&part->ref); } /* * Any access of part->nr_sects which is not protected by partition * bd_mutex or gendisk bdev bd_mutex, should be done using this * accessor function. * * Code written along the lines of i_size_read() and i_size_write(). * CONFIG_PREEMPT case optimizes the case of UP kernel with preemption * on. */ static inline sector_t part_nr_sects_read(struct hd_struct *part) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) sector_t nr_sects; unsigned seq; do { seq = read_seqcount_begin(&part->nr_sects_seq); nr_sects = part->nr_sects; } while (read_seqcount_retry(&part->nr_sects_seq, seq)); return nr_sects; #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT) sector_t nr_sects; preempt_disable(); nr_sects = part->nr_sects; preempt_enable(); return nr_sects; #else return part->nr_sects; #endif } /* * Should be called with mutex lock held (typically bd_mutex) of partition * to provide mutual exlusion among writers otherwise seqcount might be * left in wrong state leaving the readers spinning infinitely. */ static inline void part_nr_sects_write(struct hd_struct *part, sector_t size) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) preempt_disable(); write_seqcount_begin(&part->nr_sects_seq); part->nr_sects = size; write_seqcount_end(&part->nr_sects_seq); preempt_enable(); #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT) preempt_disable(); part->nr_sects = size; preempt_enable(); #else part->nr_sects = size; #endif } #if defined(CONFIG_BLK_DEV_INTEGRITY) extern void blk_integrity_add(struct gendisk *); extern void blk_integrity_del(struct gendisk *); #else /* CONFIG_BLK_DEV_INTEGRITY */ static inline void blk_integrity_add(struct gendisk *disk) { } static inline void blk_integrity_del(struct gendisk *disk) { } #endif /* CONFIG_BLK_DEV_INTEGRITY */ #else /* CONFIG_BLOCK */ static inline void printk_all_partitions(void) { } static inline dev_t blk_lookup_devt(const char *name, int partno) { dev_t devt = MKDEV(0, 0); return devt; } #endif /* CONFIG_BLOCK */ #endif /* _LINUX_GENHD_H */ initrd.h 0000644 00000001504 14722070374 0006214 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #define INITRD_MINOR 250 /* shouldn't collide with /dev/ram* too soon ... */ /* 1 = load ramdisk, 0 = don't load */ extern int rd_doload; /* 1 = prompt for ramdisk, 0 = don't prompt */ extern int rd_prompt; /* starting block # of image */ extern int rd_image_start; /* size of a single RAM disk */ extern unsigned long rd_size; /* 1 if it is not an error if initrd_start < memory_start */ extern int initrd_below_start_ok; /* free_initrd_mem always gets called with the next two as arguments.. */ extern unsigned long initrd_start, initrd_end; extern void free_initrd_mem(unsigned long, unsigned long); extern phys_addr_t phys_initrd_start; extern unsigned long phys_initrd_size; extern unsigned int real_root_dev; extern char __initramfs_start[]; extern unsigned long __initramfs_size; w1-gpio.h 0000644 00000000755 14722070374 0006215 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * w1-gpio interface to platform code * * Copyright (C) 2007 Ville Syrjala <syrjala@sci.fi> */ #ifndef _LINUX_W1_GPIO_H #define _LINUX_W1_GPIO_H struct gpio_desc; /** * struct w1_gpio_platform_data - Platform-dependent data for w1-gpio */ struct w1_gpio_platform_data { struct gpio_desc *gpiod; struct gpio_desc *pullup_gpiod; void (*enable_external_pullup)(int enable); unsigned int pullup_duration; }; #endif /* _LINUX_W1_GPIO_H */ posix_acl.h 0000644 00000006137 14722070374 0006713 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* File: linux/posix_acl.h (C) 2002 Andreas Gruenbacher, <a.gruenbacher@computer.org> */ #ifndef __LINUX_POSIX_ACL_H #define __LINUX_POSIX_ACL_H #include <linux/bug.h> #include <linux/slab.h> #include <linux/rcupdate.h> #include <linux/refcount.h> #include <uapi/linux/posix_acl.h> struct posix_acl_entry { short e_tag; unsigned short e_perm; union { kuid_t e_uid; kgid_t e_gid; }; }; struct posix_acl { refcount_t a_refcount; struct rcu_head a_rcu; unsigned int a_count; struct posix_acl_entry a_entries[0]; }; #define FOREACH_ACL_ENTRY(pa, acl, pe) \ for(pa=(acl)->a_entries, pe=pa+(acl)->a_count; pa<pe; pa++) /* * Duplicate an ACL handle. */ static inline struct posix_acl * posix_acl_dup(struct posix_acl *acl) { if (acl) refcount_inc(&acl->a_refcount); return acl; } /* * Free an ACL handle. */ static inline void posix_acl_release(struct posix_acl *acl) { if (acl && refcount_dec_and_test(&acl->a_refcount)) kfree_rcu(acl, a_rcu); } /* posix_acl.c */ extern void posix_acl_init(struct posix_acl *, int); extern struct posix_acl *posix_acl_alloc(int, gfp_t); extern int posix_acl_valid(struct user_namespace *, const struct posix_acl *); extern int posix_acl_permission(struct inode *, const struct posix_acl *, int); extern struct posix_acl *posix_acl_from_mode(umode_t, gfp_t); extern int posix_acl_equiv_mode(const struct posix_acl *, umode_t *); extern int __posix_acl_create(struct posix_acl **, gfp_t, umode_t *); extern int __posix_acl_chmod(struct posix_acl **, gfp_t, umode_t); extern struct posix_acl *get_posix_acl(struct inode *, int); extern int set_posix_acl(struct inode *, int, struct posix_acl *); #ifdef CONFIG_FS_POSIX_ACL extern int posix_acl_chmod(struct inode *, umode_t); extern int posix_acl_create(struct inode *, umode_t *, struct posix_acl **, struct posix_acl **); extern int posix_acl_update_mode(struct inode *, umode_t *, struct posix_acl **); extern int simple_set_acl(struct inode *, struct posix_acl *, int); extern int simple_acl_create(struct inode *, struct inode *); struct posix_acl *get_cached_acl(struct inode *inode, int type); struct posix_acl *get_cached_acl_rcu(struct inode *inode, int type); void set_cached_acl(struct inode *inode, int type, struct posix_acl *acl); void forget_cached_acl(struct inode *inode, int type); void forget_all_cached_acls(struct inode *inode); static inline void cache_no_acl(struct inode *inode) { inode->i_acl = NULL; inode->i_default_acl = NULL; } #else static inline int posix_acl_chmod(struct inode *inode, umode_t mode) { return 0; } #define simple_set_acl NULL static inline int simple_acl_create(struct inode *dir, struct inode *inode) { return 0; } static inline void cache_no_acl(struct inode *inode) { } static inline int posix_acl_create(struct inode *inode, umode_t *mode, struct posix_acl **default_acl, struct posix_acl **acl) { *default_acl = *acl = NULL; return 0; } static inline void forget_all_cached_acls(struct inode *inode) { } #endif /* CONFIG_FS_POSIX_ACL */ struct posix_acl *get_acl(struct inode *inode, int type); #endif /* __LINUX_POSIX_ACL_H */ cpuidle.h 0000644 00000023474 14722070374 0006362 0 ustar 00 /* * cpuidle.h - a generic framework for CPU idle power management * * (C) 2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> * Shaohua Li <shaohua.li@intel.com> * Adam Belay <abelay@novell.com> * * This code is licenced under the GPL. */ #ifndef _LINUX_CPUIDLE_H #define _LINUX_CPUIDLE_H #include <linux/percpu.h> #include <linux/list.h> #include <linux/hrtimer.h> #define CPUIDLE_STATE_MAX 10 #define CPUIDLE_NAME_LEN 16 #define CPUIDLE_DESC_LEN 32 struct module; struct cpuidle_device; struct cpuidle_driver; /**************************** * CPUIDLE DEVICE INTERFACE * ****************************/ struct cpuidle_state_usage { unsigned long long disable; unsigned long long usage; unsigned long long time; /* in US */ unsigned long long above; /* Number of times it's been too deep */ unsigned long long below; /* Number of times it's been too shallow */ #ifdef CONFIG_SUSPEND unsigned long long s2idle_usage; unsigned long long s2idle_time; /* in US */ #endif }; struct cpuidle_state { char name[CPUIDLE_NAME_LEN]; char desc[CPUIDLE_DESC_LEN]; unsigned int flags; unsigned int exit_latency; /* in US */ int power_usage; /* in mW */ unsigned int target_residency; /* in US */ bool disabled; /* disabled on all CPUs */ int (*enter) (struct cpuidle_device *dev, struct cpuidle_driver *drv, int index); int (*enter_dead) (struct cpuidle_device *dev, int index); /* * CPUs execute ->enter_s2idle with the local tick or entire timekeeping * suspended, so it must not re-enable interrupts at any point (even * temporarily) or attempt to change states of clock event devices. */ void (*enter_s2idle) (struct cpuidle_device *dev, struct cpuidle_driver *drv, int index); }; /* Idle State Flags */ #define CPUIDLE_FLAG_NONE (0x00) #define CPUIDLE_FLAG_POLLING BIT(0) /* polling state */ #define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */ #define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */ struct cpuidle_device_kobj; struct cpuidle_state_kobj; struct cpuidle_driver_kobj; struct cpuidle_device { unsigned int registered:1; unsigned int enabled:1; unsigned int use_deepest_state:1; unsigned int poll_time_limit:1; unsigned int cpu; ktime_t next_hrtimer; int last_state_idx; int last_residency; u64 poll_limit_ns; struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX]; struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX]; struct cpuidle_driver_kobj *kobj_driver; struct cpuidle_device_kobj *kobj_dev; struct list_head device_list; #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED cpumask_t coupled_cpus; struct cpuidle_coupled *coupled; #endif }; DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices); DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev); /**************************** * CPUIDLE DRIVER INTERFACE * ****************************/ struct cpuidle_driver { const char *name; struct module *owner; int refcnt; /* used by the cpuidle framework to setup the broadcast timer */ unsigned int bctimer:1; /* states array must be ordered in decreasing power consumption */ struct cpuidle_state states[CPUIDLE_STATE_MAX]; int state_count; int safe_state_index; /* the driver handles the cpus in cpumask */ struct cpumask *cpumask; /* preferred governor to switch at register time */ const char *governor; }; #ifdef CONFIG_CPU_IDLE extern void disable_cpuidle(void); extern bool cpuidle_not_available(struct cpuidle_driver *drv, struct cpuidle_device *dev); extern int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, bool *stop_tick); extern int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev, int index); extern void cpuidle_reflect(struct cpuidle_device *dev, int index); extern u64 cpuidle_poll_time(struct cpuidle_driver *drv, struct cpuidle_device *dev); extern int cpuidle_register_driver(struct cpuidle_driver *drv); extern struct cpuidle_driver *cpuidle_get_driver(void); extern struct cpuidle_driver *cpuidle_driver_ref(void); extern void cpuidle_driver_unref(void); extern void cpuidle_unregister_driver(struct cpuidle_driver *drv); extern int cpuidle_register_device(struct cpuidle_device *dev); extern void cpuidle_unregister_device(struct cpuidle_device *dev); extern int cpuidle_register(struct cpuidle_driver *drv, const struct cpumask *const coupled_cpus); extern void cpuidle_unregister(struct cpuidle_driver *drv); extern void cpuidle_pause_and_lock(void); extern void cpuidle_resume_and_unlock(void); extern void cpuidle_pause(void); extern void cpuidle_resume(void); extern int cpuidle_enable_device(struct cpuidle_device *dev); extern void cpuidle_disable_device(struct cpuidle_device *dev); extern int cpuidle_play_dead(void); extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); static inline struct cpuidle_device *cpuidle_get_device(void) {return __this_cpu_read(cpuidle_devices); } #else static inline void disable_cpuidle(void) { } static inline bool cpuidle_not_available(struct cpuidle_driver *drv, struct cpuidle_device *dev) {return true; } static inline int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, bool *stop_tick) {return -ENODEV; } static inline int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev, int index) {return -ENODEV; } static inline void cpuidle_reflect(struct cpuidle_device *dev, int index) { } static inline u64 cpuidle_poll_time(struct cpuidle_driver *drv, struct cpuidle_device *dev) {return 0; } static inline int cpuidle_register_driver(struct cpuidle_driver *drv) {return -ENODEV; } static inline struct cpuidle_driver *cpuidle_get_driver(void) {return NULL; } static inline struct cpuidle_driver *cpuidle_driver_ref(void) {return NULL; } static inline void cpuidle_driver_unref(void) {} static inline void cpuidle_unregister_driver(struct cpuidle_driver *drv) { } static inline int cpuidle_register_device(struct cpuidle_device *dev) {return -ENODEV; } static inline void cpuidle_unregister_device(struct cpuidle_device *dev) { } static inline int cpuidle_register(struct cpuidle_driver *drv, const struct cpumask *const coupled_cpus) {return -ENODEV; } static inline void cpuidle_unregister(struct cpuidle_driver *drv) { } static inline void cpuidle_pause_and_lock(void) { } static inline void cpuidle_resume_and_unlock(void) { } static inline void cpuidle_pause(void) { } static inline void cpuidle_resume(void) { } static inline int cpuidle_enable_device(struct cpuidle_device *dev) {return -ENODEV; } static inline void cpuidle_disable_device(struct cpuidle_device *dev) { } static inline int cpuidle_play_dead(void) {return -ENODEV; } static inline struct cpuidle_driver *cpuidle_get_cpu_driver( struct cpuidle_device *dev) {return NULL; } static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; } #endif #ifdef CONFIG_CPU_IDLE extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv, struct cpuidle_device *dev); extern int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev); extern void cpuidle_use_deepest_state(bool enable); #else static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv, struct cpuidle_device *dev) {return -ENODEV; } static inline int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev) {return -ENODEV; } static inline void cpuidle_use_deepest_state(bool enable) { } #endif /* kernel/sched/idle.c */ extern void sched_idle_set_state(struct cpuidle_state *idle_state); extern void default_idle_call(void); #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a); #else static inline void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a) { } #endif #if defined(CONFIG_CPU_IDLE) && defined(CONFIG_ARCH_HAS_CPU_RELAX) void cpuidle_poll_state_init(struct cpuidle_driver *drv); #else static inline void cpuidle_poll_state_init(struct cpuidle_driver *drv) {} #endif /****************************** * CPUIDLE GOVERNOR INTERFACE * ******************************/ struct cpuidle_governor { char name[CPUIDLE_NAME_LEN]; struct list_head governor_list; unsigned int rating; int (*enable) (struct cpuidle_driver *drv, struct cpuidle_device *dev); void (*disable) (struct cpuidle_driver *drv, struct cpuidle_device *dev); int (*select) (struct cpuidle_driver *drv, struct cpuidle_device *dev, bool *stop_tick); void (*reflect) (struct cpuidle_device *dev, int index); }; #ifdef CONFIG_CPU_IDLE extern int cpuidle_register_governor(struct cpuidle_governor *gov); extern int cpuidle_governor_latency_req(unsigned int cpu); #else static inline int cpuidle_register_governor(struct cpuidle_governor *gov) {return 0;} #endif #define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, \ idx, \ state, \ is_retention) \ ({ \ int __ret = 0; \ \ if (!idx) { \ cpu_do_idle(); \ return idx; \ } \ \ if (!is_retention) \ __ret = cpu_pm_enter(); \ if (!__ret) { \ __ret = low_level_idle_enter(state); \ if (!is_retention) \ cpu_pm_exit(); \ } \ \ __ret ? -1 : idx; \ }) #define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 0) #define CPU_PM_CPU_IDLE_ENTER_RETENTION(low_level_idle_enter, idx) \ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 1) #define CPU_PM_CPU_IDLE_ENTER_PARAM(low_level_idle_enter, idx, state) \ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 0) #define CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(low_level_idle_enter, idx, state) \ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 1) #endif /* _LINUX_CPUIDLE_H */ fpga/altera-pr-ip-core.h 0000644 00000000736 14722070374 0011071 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Driver for Altera Partial Reconfiguration IP Core * * Copyright (C) 2016 Intel Corporation * * Based on socfpga-a10.c Copyright (C) 2015-2016 Altera Corporation * by Alan Tull <atull@opensource.altera.com> */ #ifndef _ALT_PR_IP_CORE_H #define _ALT_PR_IP_CORE_H #include <linux/io.h> int alt_pr_register(struct device *dev, void __iomem *reg_base); void alt_pr_unregister(struct device *dev); #endif /* _ALT_PR_IP_CORE_H */ fpga/adi-axi-common.h 0000644 00000000732 14722070374 0010444 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Analog Devices AXI common registers & definitions * * Copyright 2019 Analog Devices Inc. * * https://wiki.analog.com/resources/fpga/docs/axi_ip * https://wiki.analog.com/resources/fpga/docs/hdl/regmap */ #ifndef ADI_AXI_COMMON_H_ #define ADI_AXI_COMMON_H_ #define ADI_AXI_REG_VERSION 0x0000 #define ADI_AXI_PCORE_VER(major, minor, patch) \ (((major) << 16) | ((minor) << 8) | (patch)) #endif /* ADI_AXI_COMMON_H_ */ fpga/fpga-bridge.h 0000644 00000005057 14722070374 0010016 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_FPGA_BRIDGE_H #define _LINUX_FPGA_BRIDGE_H #include <linux/device.h> #include <linux/fpga/fpga-mgr.h> struct fpga_bridge; /** * struct fpga_bridge_ops - ops for low level FPGA bridge drivers * @enable_show: returns the FPGA bridge's status * @enable_set: set a FPGA bridge as enabled or disabled * @fpga_bridge_remove: set FPGA into a specific state during driver remove * @groups: optional attribute groups. */ struct fpga_bridge_ops { int (*enable_show)(struct fpga_bridge *bridge); int (*enable_set)(struct fpga_bridge *bridge, bool enable); void (*fpga_bridge_remove)(struct fpga_bridge *bridge); const struct attribute_group **groups; }; /** * struct fpga_bridge - FPGA bridge structure * @name: name of low level FPGA bridge * @dev: FPGA bridge device * @mutex: enforces exclusive reference to bridge * @br_ops: pointer to struct of FPGA bridge ops * @info: fpga image specific information * @node: FPGA bridge list node * @priv: low level driver private date */ struct fpga_bridge { const char *name; struct device dev; struct mutex mutex; /* for exclusive reference to bridge */ const struct fpga_bridge_ops *br_ops; struct fpga_image_info *info; struct list_head node; void *priv; }; #define to_fpga_bridge(d) container_of(d, struct fpga_bridge, dev) struct fpga_bridge *of_fpga_bridge_get(struct device_node *node, struct fpga_image_info *info); struct fpga_bridge *fpga_bridge_get(struct device *dev, struct fpga_image_info *info); void fpga_bridge_put(struct fpga_bridge *bridge); int fpga_bridge_enable(struct fpga_bridge *bridge); int fpga_bridge_disable(struct fpga_bridge *bridge); int fpga_bridges_enable(struct list_head *bridge_list); int fpga_bridges_disable(struct list_head *bridge_list); void fpga_bridges_put(struct list_head *bridge_list); int fpga_bridge_get_to_list(struct device *dev, struct fpga_image_info *info, struct list_head *bridge_list); int of_fpga_bridge_get_to_list(struct device_node *np, struct fpga_image_info *info, struct list_head *bridge_list); struct fpga_bridge *fpga_bridge_create(struct device *dev, const char *name, const struct fpga_bridge_ops *br_ops, void *priv); void fpga_bridge_free(struct fpga_bridge *br); int fpga_bridge_register(struct fpga_bridge *br); void fpga_bridge_unregister(struct fpga_bridge *br); struct fpga_bridge *devm_fpga_bridge_create(struct device *dev, const char *name, const struct fpga_bridge_ops *br_ops, void *priv); #endif /* _LINUX_FPGA_BRIDGE_H */ fpga/fpga-region.h 0000644 00000003045 14722070374 0010040 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _FPGA_REGION_H #define _FPGA_REGION_H #include <linux/device.h> #include <linux/fpga/fpga-mgr.h> #include <linux/fpga/fpga-bridge.h> /** * struct fpga_region - FPGA Region structure * @dev: FPGA Region device * @mutex: enforces exclusive reference to region * @bridge_list: list of FPGA bridges specified in region * @mgr: FPGA manager * @info: FPGA image info * @compat_id: FPGA region id for compatibility check. * @priv: private data * @get_bridges: optional function to get bridges to a list */ struct fpga_region { struct device dev; struct mutex mutex; /* for exclusive reference to region */ struct list_head bridge_list; struct fpga_manager *mgr; struct fpga_image_info *info; struct fpga_compat_id *compat_id; void *priv; int (*get_bridges)(struct fpga_region *region); }; #define to_fpga_region(d) container_of(d, struct fpga_region, dev) struct fpga_region *fpga_region_class_find( struct device *start, const void *data, int (*match)(struct device *, const void *)); int fpga_region_program_fpga(struct fpga_region *region); struct fpga_region *fpga_region_create(struct device *dev, struct fpga_manager *mgr, int (*get_bridges)(struct fpga_region *)); void fpga_region_free(struct fpga_region *region); int fpga_region_register(struct fpga_region *region); void fpga_region_unregister(struct fpga_region *region); struct fpga_region *devm_fpga_region_create(struct device *dev, struct fpga_manager *mgr, int (*get_bridges)(struct fpga_region *)); #endif /* _FPGA_REGION_H */ fpga/fpga-mgr.h 0000644 00000015213 14722070374 0007342 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * FPGA Framework * * Copyright (C) 2013-2016 Altera Corporation * Copyright (C) 2017 Intel Corporation */ #ifndef _LINUX_FPGA_MGR_H #define _LINUX_FPGA_MGR_H #include <linux/mutex.h> #include <linux/platform_device.h> struct fpga_manager; struct sg_table; /** * enum fpga_mgr_states - fpga framework states * @FPGA_MGR_STATE_UNKNOWN: can't determine state * @FPGA_MGR_STATE_POWER_OFF: FPGA power is off * @FPGA_MGR_STATE_POWER_UP: FPGA reports power is up * @FPGA_MGR_STATE_RESET: FPGA in reset state * @FPGA_MGR_STATE_FIRMWARE_REQ: firmware request in progress * @FPGA_MGR_STATE_FIRMWARE_REQ_ERR: firmware request failed * @FPGA_MGR_STATE_WRITE_INIT: preparing FPGA for programming * @FPGA_MGR_STATE_WRITE_INIT_ERR: Error during WRITE_INIT stage * @FPGA_MGR_STATE_WRITE: writing image to FPGA * @FPGA_MGR_STATE_WRITE_ERR: Error while writing FPGA * @FPGA_MGR_STATE_WRITE_COMPLETE: Doing post programming steps * @FPGA_MGR_STATE_WRITE_COMPLETE_ERR: Error during WRITE_COMPLETE * @FPGA_MGR_STATE_OPERATING: FPGA is programmed and operating */ enum fpga_mgr_states { /* default FPGA states */ FPGA_MGR_STATE_UNKNOWN, FPGA_MGR_STATE_POWER_OFF, FPGA_MGR_STATE_POWER_UP, FPGA_MGR_STATE_RESET, /* getting an image for loading */ FPGA_MGR_STATE_FIRMWARE_REQ, FPGA_MGR_STATE_FIRMWARE_REQ_ERR, /* write sequence: init, write, complete */ FPGA_MGR_STATE_WRITE_INIT, FPGA_MGR_STATE_WRITE_INIT_ERR, FPGA_MGR_STATE_WRITE, FPGA_MGR_STATE_WRITE_ERR, FPGA_MGR_STATE_WRITE_COMPLETE, FPGA_MGR_STATE_WRITE_COMPLETE_ERR, /* fpga is programmed and operating */ FPGA_MGR_STATE_OPERATING, }; /** * DOC: FPGA Manager flags * * Flags used in the &fpga_image_info->flags field * * %FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported * * %FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting * * %FPGA_MGR_ENCRYPTED_BITSTREAM: indicates bitstream is encrypted * * %FPGA_MGR_BITSTREAM_LSB_FIRST: SPI bitstream bit order is LSB first * * %FPGA_MGR_COMPRESSED_BITSTREAM: FPGA bitstream is compressed */ #define FPGA_MGR_PARTIAL_RECONFIG BIT(0) #define FPGA_MGR_EXTERNAL_CONFIG BIT(1) #define FPGA_MGR_ENCRYPTED_BITSTREAM BIT(2) #define FPGA_MGR_BITSTREAM_LSB_FIRST BIT(3) #define FPGA_MGR_COMPRESSED_BITSTREAM BIT(4) /** * struct fpga_image_info - information specific to a FPGA image * @flags: boolean flags as defined above * @enable_timeout_us: maximum time to enable traffic through bridge (uSec) * @disable_timeout_us: maximum time to disable traffic through bridge (uSec) * @config_complete_timeout_us: maximum time for FPGA to switch to operating * status in the write_complete op. * @firmware_name: name of FPGA image firmware file * @sgt: scatter/gather table containing FPGA image * @buf: contiguous buffer containing FPGA image * @count: size of buf * @region_id: id of target region * @dev: device that owns this * @overlay: Device Tree overlay */ struct fpga_image_info { u32 flags; u32 enable_timeout_us; u32 disable_timeout_us; u32 config_complete_timeout_us; char *firmware_name; struct sg_table *sgt; const char *buf; size_t count; int region_id; struct device *dev; #ifdef CONFIG_OF struct device_node *overlay; #endif }; /** * struct fpga_manager_ops - ops for low level fpga manager drivers * @initial_header_size: Maximum number of bytes that should be passed into write_init * @state: returns an enum value of the FPGA's state * @status: returns status of the FPGA, including reconfiguration error code * @write_init: prepare the FPGA to receive confuration data * @write: write count bytes of configuration data to the FPGA * @write_sg: write the scatter list of configuration data to the FPGA * @write_complete: set FPGA to operating state after writing is done * @fpga_remove: optional: Set FPGA into a specific state during driver remove * @groups: optional attribute groups. * * fpga_manager_ops are the low level functions implemented by a specific * fpga manager driver. The optional ones are tested for NULL before being * called, so leaving them out is fine. */ struct fpga_manager_ops { size_t initial_header_size; enum fpga_mgr_states (*state)(struct fpga_manager *mgr); u64 (*status)(struct fpga_manager *mgr); int (*write_init)(struct fpga_manager *mgr, struct fpga_image_info *info, const char *buf, size_t count); int (*write)(struct fpga_manager *mgr, const char *buf, size_t count); int (*write_sg)(struct fpga_manager *mgr, struct sg_table *sgt); int (*write_complete)(struct fpga_manager *mgr, struct fpga_image_info *info); void (*fpga_remove)(struct fpga_manager *mgr); const struct attribute_group **groups; }; /* FPGA manager status: Partial/Full Reconfiguration errors */ #define FPGA_MGR_STATUS_OPERATION_ERR BIT(0) #define FPGA_MGR_STATUS_CRC_ERR BIT(1) #define FPGA_MGR_STATUS_INCOMPATIBLE_IMAGE_ERR BIT(2) #define FPGA_MGR_STATUS_IP_PROTOCOL_ERR BIT(3) #define FPGA_MGR_STATUS_FIFO_OVERFLOW_ERR BIT(4) /** * struct fpga_compat_id - id for compatibility check * * @id_h: high 64bit of the compat_id * @id_l: low 64bit of the compat_id */ struct fpga_compat_id { u64 id_h; u64 id_l; }; /** * struct fpga_manager - fpga manager structure * @name: name of low level fpga manager * @dev: fpga manager device * @ref_mutex: only allows one reference to fpga manager * @state: state of fpga manager * @compat_id: FPGA manager id for compatibility check. * @mops: pointer to struct of fpga manager ops * @priv: low level driver private date */ struct fpga_manager { const char *name; struct device dev; struct mutex ref_mutex; enum fpga_mgr_states state; struct fpga_compat_id *compat_id; const struct fpga_manager_ops *mops; void *priv; }; #define to_fpga_manager(d) container_of(d, struct fpga_manager, dev) struct fpga_image_info *fpga_image_info_alloc(struct device *dev); void fpga_image_info_free(struct fpga_image_info *info); int fpga_mgr_load(struct fpga_manager *mgr, struct fpga_image_info *info); int fpga_mgr_lock(struct fpga_manager *mgr); void fpga_mgr_unlock(struct fpga_manager *mgr); struct fpga_manager *of_fpga_mgr_get(struct device_node *node); struct fpga_manager *fpga_mgr_get(struct device *dev); void fpga_mgr_put(struct fpga_manager *mgr); struct fpga_manager *fpga_mgr_create(struct device *dev, const char *name, const struct fpga_manager_ops *mops, void *priv); void fpga_mgr_free(struct fpga_manager *mgr); int fpga_mgr_register(struct fpga_manager *mgr); void fpga_mgr_unregister(struct fpga_manager *mgr); struct fpga_manager *devm_fpga_mgr_create(struct device *dev, const char *name, const struct fpga_manager_ops *mops, void *priv); #endif /*_LINUX_FPGA_MGR_H */ reset/sunxi.h 0000644 00000000262 14722070374 0007213 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_RESET_SUNXI_H__ #define __LINUX_RESET_SUNXI_H__ void __init sun6i_reset_init(void); #endif /* __LINUX_RESET_SUNXI_H__ */ reset/bcm63xx_pmb.h 0000644 00000004220 14722070374 0010173 0 ustar 00 /* * Broadcom BCM63xx Processor Monitor Bus shared routines (SMP and reset) * * Copyright (C) 2015, Broadcom Corporation * Author: Florian Fainelli <f.fainelli@gmail.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifndef __BCM63XX_PMB_H #define __BCM63XX_PMB_H #include <linux/io.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/err.h> /* PMB Master controller register */ #define PMB_CTRL 0x00 #define PMC_PMBM_START (1 << 31) #define PMC_PMBM_TIMEOUT (1 << 30) #define PMC_PMBM_SLAVE_ERR (1 << 29) #define PMC_PMBM_BUSY (1 << 28) #define PMC_PMBM_READ (0 << 20) #define PMC_PMBM_WRITE (1 << 20) #define PMB_WR_DATA 0x04 #define PMB_TIMEOUT 0x08 #define PMB_RD_DATA 0x0C #define PMB_BUS_ID_SHIFT 8 /* Perform the low-level PMB master operation, shared between reads and * writes. */ static inline int __bpcm_do_op(void __iomem *master, unsigned int addr, u32 off, u32 op) { unsigned int timeout = 1000; u32 cmd; cmd = (PMC_PMBM_START | op | (addr & 0xff) << 12 | off); writel(cmd, master + PMB_CTRL); do { cmd = readl(master + PMB_CTRL); if (!(cmd & PMC_PMBM_START)) return 0; if (cmd & PMC_PMBM_SLAVE_ERR) return -EIO; if (cmd & PMC_PMBM_TIMEOUT) return -ETIMEDOUT; udelay(1); } while (timeout-- > 0); return -ETIMEDOUT; } static inline int bpcm_rd(void __iomem *master, unsigned int addr, u32 off, u32 *val) { int ret = 0; ret = __bpcm_do_op(master, addr, off >> 2, PMC_PMBM_READ); *val = readl(master + PMB_RD_DATA); return ret; } static inline int bpcm_wr(void __iomem *master, unsigned int addr, u32 off, u32 val) { int ret = 0; writel(val, master + PMB_WR_DATA); ret = __bpcm_do_op(master, addr, off >> 2, PMC_PMBM_WRITE); return ret; } #endif /* __BCM63XX_PMB_H */ reset/socfpga.h 0000644 00000000272 14722070374 0007470 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_RESET_SOCFPGA_H__ #define __LINUX_RESET_SOCFPGA_H__ void __init socfpga_reset_init(void); #endif /* __LINUX_RESET_SOCFPGA_H__ */ hid-sensor-ids.h 0000644 00000015251 14722070374 0007557 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * HID Sensors Driver * Copyright (c) 2012, Intel Corporation. */ #ifndef _HID_SENSORS_IDS_H #define _HID_SENSORS_IDS_H #define HID_MAX_PHY_DEVICES 0xFF #define HID_USAGE_SENSOR_COLLECTION 0x200001 /* Accel 3D (200073) */ #define HID_USAGE_SENSOR_ACCEL_3D 0x200073 #define HID_USAGE_SENSOR_DATA_ACCELERATION 0x200452 #define HID_USAGE_SENSOR_ACCEL_X_AXIS 0x200453 #define HID_USAGE_SENSOR_ACCEL_Y_AXIS 0x200454 #define HID_USAGE_SENSOR_ACCEL_Z_AXIS 0x200455 /* ALS (200041) */ #define HID_USAGE_SENSOR_ALS 0x200041 #define HID_USAGE_SENSOR_DATA_LIGHT 0x2004d0 #define HID_USAGE_SENSOR_LIGHT_ILLUM 0x2004d1 /* PROX (200011) */ #define HID_USAGE_SENSOR_PROX 0x200011 #define HID_USAGE_SENSOR_DATA_PRESENCE 0x2004b0 #define HID_USAGE_SENSOR_HUMAN_PRESENCE 0x2004b1 /* Pressure (200031) */ #define HID_USAGE_SENSOR_PRESSURE 0x200031 #define HID_USAGE_SENSOR_DATA_ATMOSPHERIC_PRESSURE 0x200430 #define HID_USAGE_SENSOR_ATMOSPHERIC_PRESSURE 0x200431 /* Tempreture (200033) */ #define HID_USAGE_SENSOR_TEMPERATURE 0x200033 #define HID_USAGE_SENSOR_DATA_ENVIRONMENTAL_TEMPERATURE 0x200434 /* humidity */ #define HID_USAGE_SENSOR_HUMIDITY 0x200032 #define HID_USAGE_SENSOR_ATMOSPHERIC_HUMIDITY 0x200433 /* Gyro 3D: (200076) */ #define HID_USAGE_SENSOR_GYRO_3D 0x200076 #define HID_USAGE_SENSOR_DATA_ANGL_VELOCITY 0x200456 #define HID_USAGE_SENSOR_ANGL_VELOCITY_X_AXIS 0x200457 #define HID_USAGE_SENSOR_ANGL_VELOCITY_Y_AXIS 0x200458 #define HID_USAGE_SENSOR_ANGL_VELOCITY_Z_AXIS 0x200459 /* Gravity vector */ #define HID_USAGE_SENSOR_GRAVITY_VECTOR 0x20007B /* ORIENTATION: Compass 3D: (200083) */ #define HID_USAGE_SENSOR_COMPASS_3D 0x200083 #define HID_USAGE_SENSOR_DATA_ORIENTATION 0x200470 #define HID_USAGE_SENSOR_ORIENT_MAGN_HEADING 0x200471 #define HID_USAGE_SENSOR_ORIENT_MAGN_HEADING_X 0x200472 #define HID_USAGE_SENSOR_ORIENT_MAGN_HEADING_Y 0x200473 #define HID_USAGE_SENSOR_ORIENT_MAGN_HEADING_Z 0x200474 #define HID_USAGE_SENSOR_ORIENT_COMP_MAGN_NORTH 0x200475 #define HID_USAGE_SENSOR_ORIENT_COMP_TRUE_NORTH 0x200476 #define HID_USAGE_SENSOR_ORIENT_MAGN_NORTH 0x200477 #define HID_USAGE_SENSOR_ORIENT_TRUE_NORTH 0x200478 #define HID_USAGE_SENSOR_ORIENT_DISTANCE 0x200479 #define HID_USAGE_SENSOR_ORIENT_DISTANCE_X 0x20047A #define HID_USAGE_SENSOR_ORIENT_DISTANCE_Y 0x20047B #define HID_USAGE_SENSOR_ORIENT_DISTANCE_Z 0x20047C #define HID_USAGE_SENSOR_ORIENT_DISTANCE_OUT_OF_RANGE 0x20047D /* ORIENTATION: Inclinometer 3D: (200086) */ #define HID_USAGE_SENSOR_INCLINOMETER_3D 0x200086 #define HID_USAGE_SENSOR_ORIENT_TILT 0x20047E #define HID_USAGE_SENSOR_ORIENT_TILT_X 0x20047F #define HID_USAGE_SENSOR_ORIENT_TILT_Y 0x200480 #define HID_USAGE_SENSOR_ORIENT_TILT_Z 0x200481 #define HID_USAGE_SENSOR_DEVICE_ORIENTATION 0x20008A #define HID_USAGE_SENSOR_RELATIVE_ORIENTATION 0x20008E #define HID_USAGE_SENSOR_GEOMAGNETIC_ORIENTATION 0x2000C1 #define HID_USAGE_SENSOR_ORIENT_ROTATION_MATRIX 0x200482 #define HID_USAGE_SENSOR_ORIENT_QUATERNION 0x200483 #define HID_USAGE_SENSOR_ORIENT_MAGN_FLUX 0x200484 #define HID_USAGE_SENSOR_ORIENT_MAGN_FLUX_X_AXIS 0x200485 #define HID_USAGE_SENSOR_ORIENT_MAGN_FLUX_Y_AXIS 0x200486 #define HID_USAGE_SENSOR_ORIENT_MAGN_FLUX_Z_AXIS 0x200487 /* Time (2000a0) */ #define HID_USAGE_SENSOR_TIME 0x2000a0 #define HID_USAGE_SENSOR_TIME_YEAR 0x200521 #define HID_USAGE_SENSOR_TIME_MONTH 0x200522 #define HID_USAGE_SENSOR_TIME_DAY 0x200523 #define HID_USAGE_SENSOR_TIME_HOUR 0x200525 #define HID_USAGE_SENSOR_TIME_MINUTE 0x200526 #define HID_USAGE_SENSOR_TIME_SECOND 0x200527 #define HID_USAGE_SENSOR_TIME_TIMESTAMP 0x200529 /* Units */ #define HID_USAGE_SENSOR_UNITS_NOT_SPECIFIED 0x00 #define HID_USAGE_SENSOR_UNITS_LUX 0x01 #define HID_USAGE_SENSOR_UNITS_KELVIN 0x01000100 #define HID_USAGE_SENSOR_UNITS_FAHRENHEIT 0x03000100 #define HID_USAGE_SENSOR_UNITS_PASCAL 0xF1E1 #define HID_USAGE_SENSOR_UNITS_NEWTON 0x11E1 #define HID_USAGE_SENSOR_UNITS_METERS_PER_SECOND 0x11F0 #define HID_USAGE_SENSOR_UNITS_METERS_PER_SEC_SQRD 0x11E0 #define HID_USAGE_SENSOR_UNITS_FARAD 0xE14F2000 #define HID_USAGE_SENSOR_UNITS_AMPERE 0x01001000 #define HID_USAGE_SENSOR_UNITS_WATT 0x21d1 #define HID_USAGE_SENSOR_UNITS_HENRY 0x21E1E000 #define HID_USAGE_SENSOR_UNITS_OHM 0x21D1E000 #define HID_USAGE_SENSOR_UNITS_VOLT 0x21D1F000 #define HID_USAGE_SENSOR_UNITS_HERTZ 0x01F0 #define HID_USAGE_SENSOR_UNITS_DEGREES_PER_SEC_SQRD 0x14E0 #define HID_USAGE_SENSOR_UNITS_RADIANS 0x12 #define HID_USAGE_SENSOR_UNITS_RADIANS_PER_SECOND 0x12F0 #define HID_USAGE_SENSOR_UNITS_RADIANS_PER_SEC_SQRD 0x12E0 #define HID_USAGE_SENSOR_UNITS_SECOND 0x0110 #define HID_USAGE_SENSOR_UNITS_GAUSS 0x01E1F000 #define HID_USAGE_SENSOR_UNITS_GRAM 0x0101 #define HID_USAGE_SENSOR_UNITS_CENTIMETER 0x11 #define HID_USAGE_SENSOR_UNITS_G 0x1A #define HID_USAGE_SENSOR_UNITS_MILLISECOND 0x19 #define HID_USAGE_SENSOR_UNITS_PERCENT 0x17 #define HID_USAGE_SENSOR_UNITS_DEGREES 0x14 #define HID_USAGE_SENSOR_UNITS_DEGREES_PER_SECOND 0x15 /* Common selectors */ #define HID_USAGE_SENSOR_PROP_REPORT_INTERVAL 0x20030E #define HID_USAGE_SENSOR_PROP_SENSITIVITY_ABS 0x20030F #define HID_USAGE_SENSOR_PROP_SENSITIVITY_RANGE_PCT 0x200310 #define HID_USAGE_SENSOR_PROP_SENSITIVITY_REL_PCT 0x200311 #define HID_USAGE_SENSOR_PROP_ACCURACY 0x200312 #define HID_USAGE_SENSOR_PROP_RESOLUTION 0x200313 #define HID_USAGE_SENSOR_PROP_RANGE_MAXIMUM 0x200314 #define HID_USAGE_SENSOR_PROP_RANGE_MINIMUM 0x200315 #define HID_USAGE_SENSOR_PROP_REPORT_STATE 0x200316 #define HID_USAGE_SENSOR_PROY_POWER_STATE 0x200319 /* Batch mode selectors */ #define HID_USAGE_SENSOR_PROP_REPORT_LATENCY 0x20031B /* Per data field properties */ #define HID_USAGE_SENSOR_DATA_MOD_NONE 0x00 #define HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS 0x1000 /* Power state enumerations */ #define HID_USAGE_SENSOR_PROP_POWER_STATE_UNDEFINED_ENUM 0x200850 #define HID_USAGE_SENSOR_PROP_POWER_STATE_D0_FULL_POWER_ENUM 0x200851 #define HID_USAGE_SENSOR_PROP_POWER_STATE_D1_LOW_POWER_ENUM 0x200852 #define HID_USAGE_SENSOR_PROP_POWER_STATE_D2_STANDBY_WITH_WAKE_ENUM 0x200853 #define HID_USAGE_SENSOR_PROP_POWER_STATE_D3_SLEEP_WITH_WAKE_ENUM 0x200854 #define HID_USAGE_SENSOR_PROP_POWER_STATE_D4_POWER_OFF_ENUM 0x200855 /* Report State enumerations */ #define HID_USAGE_SENSOR_PROP_REPORTING_STATE_NO_EVENTS_ENUM 0x200840 #define HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM 0x200841 #endif packing.h 0000644 00000003437 14722070374 0006346 0 ustar 00 /* SPDX-License-Identifier: BSD-3-Clause * Copyright (c) 2016-2018, NXP Semiconductors * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com> */ #ifndef _LINUX_PACKING_H #define _LINUX_PACKING_H #include <linux/types.h> #include <linux/bitops.h> #define QUIRK_MSB_ON_THE_RIGHT BIT(0) #define QUIRK_LITTLE_ENDIAN BIT(1) #define QUIRK_LSW32_IS_FIRST BIT(2) enum packing_op { PACK, UNPACK, }; /** * packing - Convert numbers (currently u64) between a packed and an unpacked * format. Unpacked means laid out in memory in the CPU's native * understanding of integers, while packed means anything else that * requires translation. * * @pbuf: Pointer to a buffer holding the packed value. * @uval: Pointer to an u64 holding the unpacked value. * @startbit: The index (in logical notation, compensated for quirks) where * the packed value starts within pbuf. Must be larger than, or * equal to, endbit. * @endbit: The index (in logical notation, compensated for quirks) where * the packed value ends within pbuf. Must be smaller than, or equal * to, startbit. * @op: If PACK, then uval will be treated as const pointer and copied (packed) * into pbuf, between startbit and endbit. * If UNPACK, then pbuf will be treated as const pointer and the logical * value between startbit and endbit will be copied (unpacked) to uval. * @quirks: A bit mask of QUIRK_LITTLE_ENDIAN, QUIRK_LSW32_IS_FIRST and * QUIRK_MSB_ON_THE_RIGHT. * * Return: 0 on success, EINVAL or ERANGE if called incorrectly. Assuming * correct usage, return code may be discarded. * If op is PACK, pbuf is modified. * If op is UNPACK, uval is modified. */ int packing(void *pbuf, u64 *uval, int startbit, int endbit, size_t pbuflen, enum packing_op op, u8 quirks); #endif bcm963xx_tag.h 0000644 00000007145 14722070374 0007150 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_BCM963XX_TAG_H__ #define __LINUX_BCM963XX_TAG_H__ #include <linux/types.h> #define TAGVER_LEN 4 /* Length of Tag Version */ #define TAGLAYOUT_LEN 4 /* Length of FlashLayoutVer */ #define SIG1_LEN 20 /* Company Signature 1 Length */ #define SIG2_LEN 14 /* Company Signature 2 Length */ #define BOARDID_LEN 16 /* Length of BoardId */ #define ENDIANFLAG_LEN 2 /* Endian Flag Length */ #define CHIPID_LEN 6 /* Chip Id Length */ #define IMAGE_LEN 10 /* Length of Length Field */ #define ADDRESS_LEN 12 /* Length of Address field */ #define IMAGE_SEQUENCE_LEN 4 /* Image sequence Length */ #define RSASIG_LEN 20 /* Length of RSA Signature in tag */ #define TAGINFO1_LEN 30 /* Length of vendor information field1 in tag */ #define FLASHLAYOUTVER_LEN 4 /* Length of Flash Layout Version String tag */ #define TAGINFO2_LEN 16 /* Length of vendor information field2 in tag */ #define ALTTAGINFO_LEN 54 /* Alternate length for vendor information; Pirelli */ #define NUM_PIRELLI 2 #define IMAGETAG_CRC_START 0xFFFFFFFF #define PIRELLI_BOARDS { \ "AGPF-S0", \ "DWV-S0", \ } /* Extended flash address, needs to be subtracted * from bcm_tag flash image offsets. */ #define BCM963XX_EXTENDED_SIZE 0xBFC00000 /* * The broadcom firmware assumes the rootfs starts the image, * therefore uses the rootfs start (flash_image_address) * to determine where to flash the image. Since we have the kernel first * we have to give it the kernel address, but the crc uses the length * associated with this address (root_length), which is added to the kernel * length (kernel_length) to determine the length of image to flash and thus * needs to be rootfs + deadcode (jffs2 EOF marker) */ struct bcm_tag { /* 0-3: Version of the image tag */ char tag_version[TAGVER_LEN]; /* 4-23: Company Line 1 */ char sig_1[SIG1_LEN]; /* 24-37: Company Line 2 */ char sig_2[SIG2_LEN]; /* 38-43: Chip this image is for */ char chip_id[CHIPID_LEN]; /* 44-59: Board name */ char board_id[BOARDID_LEN]; /* 60-61: Map endianness -- 1 BE 0 LE */ char big_endian[ENDIANFLAG_LEN]; /* 62-71: Total length of image */ char total_length[IMAGE_LEN]; /* 72-83: Address in memory of CFE */ char cfe__address[ADDRESS_LEN]; /* 84-93: Size of CFE */ char cfe_length[IMAGE_LEN]; /* 94-105: Address in memory of image start * (kernel for OpenWRT, rootfs for stock firmware) */ char flash_image_start[ADDRESS_LEN]; /* 106-115: Size of rootfs */ char root_length[IMAGE_LEN]; /* 116-127: Address in memory of kernel */ char kernel_address[ADDRESS_LEN]; /* 128-137: Size of kernel */ char kernel_length[IMAGE_LEN]; /* 138-141: Image sequence number * (to be incremented when flashed with a new image) */ char image_sequence[IMAGE_SEQUENCE_LEN]; /* 142-161: RSA Signature (not used; some vendors may use this) */ char rsa_signature[RSASIG_LEN]; /* 162-191: Compilation and related information (not used in OpenWrt) */ char information1[TAGINFO1_LEN]; /* 192-195: Version flash layout */ char flash_layout_ver[FLASHLAYOUTVER_LEN]; /* 196-199: kernel+rootfs CRC32 */ __u32 fskernel_crc; /* 200-215: Unused except on Alice Gate where is is information */ char information2[TAGINFO2_LEN]; /* 216-219: CRC32 of image less imagetag (kernel for Alice Gate) */ __u32 image_crc; /* 220-223: CRC32 of rootfs partition */ __u32 rootfs_crc; /* 224-227: CRC32 of kernel partition */ __u32 kernel_crc; /* 228-235: Unused at present */ char reserved1[8]; /* 236-239: CRC32 of header excluding last 20 bytes */ __u32 header_crc; /* 240-255: Unused at present */ char reserved2[16]; }; #endif /* __LINUX_BCM63XX_TAG_H__ */ virtio_byteorder.h 0000644 00000002723 14722070374 0010322 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_VIRTIO_BYTEORDER_H #define _LINUX_VIRTIO_BYTEORDER_H #include <linux/types.h> #include <uapi/linux/virtio_types.h> static inline bool virtio_legacy_is_little_endian(void) { #ifdef __LITTLE_ENDIAN return true; #else return false; #endif } static inline u16 __virtio16_to_cpu(bool little_endian, __virtio16 val) { if (little_endian) return le16_to_cpu((__force __le16)val); else return be16_to_cpu((__force __be16)val); } static inline __virtio16 __cpu_to_virtio16(bool little_endian, u16 val) { if (little_endian) return (__force __virtio16)cpu_to_le16(val); else return (__force __virtio16)cpu_to_be16(val); } static inline u32 __virtio32_to_cpu(bool little_endian, __virtio32 val) { if (little_endian) return le32_to_cpu((__force __le32)val); else return be32_to_cpu((__force __be32)val); } static inline __virtio32 __cpu_to_virtio32(bool little_endian, u32 val) { if (little_endian) return (__force __virtio32)cpu_to_le32(val); else return (__force __virtio32)cpu_to_be32(val); } static inline u64 __virtio64_to_cpu(bool little_endian, __virtio64 val) { if (little_endian) return le64_to_cpu((__force __le64)val); else return be64_to_cpu((__force __be64)val); } static inline __virtio64 __cpu_to_virtio64(bool little_endian, u64 val) { if (little_endian) return (__force __virtio64)cpu_to_le64(val); else return (__force __virtio64)cpu_to_be64(val); } #endif /* _LINUX_VIRTIO_BYTEORDER */ verification.h 0000644 00000003375 14722070374 0007415 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* Signature verification * * Copyright (C) 2014 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #ifndef _LINUX_VERIFICATION_H #define _LINUX_VERIFICATION_H /* * Indicate that both builtin trusted keys and secondary trusted keys * should be used. */ #define VERIFY_USE_SECONDARY_KEYRING ((struct key *)1UL) #define VERIFY_USE_PLATFORM_KEYRING ((struct key *)2UL) /* * The use to which an asymmetric key is being put. */ enum key_being_used_for { VERIFYING_MODULE_SIGNATURE, VERIFYING_FIRMWARE_SIGNATURE, VERIFYING_KEXEC_PE_SIGNATURE, VERIFYING_KEY_SIGNATURE, VERIFYING_KEY_SELF_SIGNATURE, VERIFYING_UNSPECIFIED_SIGNATURE, NR__KEY_BEING_USED_FOR }; extern const char *const key_being_used_for[NR__KEY_BEING_USED_FOR]; #ifdef CONFIG_SYSTEM_DATA_VERIFICATION struct key; struct pkcs7_message; extern int verify_pkcs7_signature(const void *data, size_t len, const void *raw_pkcs7, size_t pkcs7_len, struct key *trusted_keys, enum key_being_used_for usage, int (*view_content)(void *ctx, const void *data, size_t len, size_t asn1hdrlen), void *ctx); extern int verify_pkcs7_message_sig(const void *data, size_t len, struct pkcs7_message *pkcs7, struct key *trusted_keys, enum key_being_used_for usage, int (*view_content)(void *ctx, const void *data, size_t len, size_t asn1hdrlen), void *ctx); #ifdef CONFIG_SIGNED_PE_FILE_VERIFICATION extern int verify_pefile_signature(const void *pebuf, unsigned pelen, struct key *trusted_keys, enum key_being_used_for usage); #endif #endif /* CONFIG_SYSTEM_DATA_VERIFICATION */ #endif /* _LINUX_VERIFY_PEFILE_H */ lsm_hooks.h 0000644 00000273450 14722070374 0006734 0 ustar 00 /* * Linux Security Module interfaces * * Copyright (C) 2001 WireX Communications, Inc <chris@wirex.com> * Copyright (C) 2001 Greg Kroah-Hartman <greg@kroah.com> * Copyright (C) 2001 Networks Associates Technology, Inc <ssmalley@nai.com> * Copyright (C) 2001 James Morris <jmorris@intercode.com.au> * Copyright (C) 2001 Silicon Graphics, Inc. (Trust Technology Group) * Copyright (C) 2015 Intel Corporation. * Copyright (C) 2015 Casey Schaufler <casey@schaufler-ca.com> * Copyright (C) 2016 Mellanox Techonologies * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Due to this file being licensed under the GPL there is controversy over * whether this permits you to write a module that #includes this file * without placing your module under the GPL. Please consult a lawyer for * advice before doing this. * */ #ifndef __LINUX_LSM_HOOKS_H #define __LINUX_LSM_HOOKS_H #include <linux/security.h> #include <linux/init.h> #include <linux/rculist.h> /** * union security_list_options - Linux Security Module hook function list * * Security hooks for program execution operations. * * @bprm_set_creds: * Save security information in the bprm->security field, typically based * on information about the bprm->file, for later use by the apply_creds * hook. This hook may also optionally check permissions (e.g. for * transitions between security domains). * This hook may be called multiple times during a single execve, e.g. for * interpreters. The hook can tell whether it has already been called by * checking to see if @bprm->security is non-NULL. If so, then the hook * may decide either to retain the security information saved earlier or * to replace it. The hook must set @bprm->secureexec to 1 if a "secure * exec" has happened as a result of this hook call. The flag is used to * indicate the need for a sanitized execution environment, and is also * passed in the ELF auxiliary table on the initial stack to indicate * whether libc should enable secure mode. * @bprm contains the linux_binprm structure. * Return 0 if the hook is successful and permission is granted. * @bprm_check_security: * This hook mediates the point when a search for a binary handler will * begin. It allows a check the @bprm->security value which is set in the * preceding set_creds call. The primary difference from set_creds is * that the argv list and envp list are reliably available in @bprm. This * hook may be called multiple times during a single execve; and in each * pass set_creds is called first. * @bprm contains the linux_binprm structure. * Return 0 if the hook is successful and permission is granted. * @bprm_committing_creds: * Prepare to install the new security attributes of a process being * transformed by an execve operation, based on the old credentials * pointed to by @current->cred and the information set in @bprm->cred by * the bprm_set_creds hook. @bprm points to the linux_binprm structure. * This hook is a good place to perform state changes on the process such * as closing open file descriptors to which access will no longer be * granted when the attributes are changed. This is called immediately * before commit_creds(). * @bprm_committed_creds: * Tidy up after the installation of the new security attributes of a * process being transformed by an execve operation. The new credentials * have, by this point, been set to @current->cred. @bprm points to the * linux_binprm structure. This hook is a good place to perform state * changes on the process such as clearing out non-inheritable signal * state. This is called immediately after commit_creds(). * * Security hooks for mount using fs_context. * [See also Documentation/filesystems/mount_api.txt] * * @fs_context_dup: * Allocate and attach a security structure to sc->security. This pointer * is initialised to NULL by the caller. * @fc indicates the new filesystem context. * @src_fc indicates the original filesystem context. * @fs_context_parse_param: * Userspace provided a parameter to configure a superblock. The LSM may * reject it with an error and may use it for itself, in which case it * should return 0; otherwise it should return -ENOPARAM to pass it on to * the filesystem. * @fc indicates the filesystem context. * @param The parameter * * Security hooks for filesystem operations. * * @sb_alloc_security: * Allocate and attach a security structure to the sb->s_security field. * The s_security field is initialized to NULL when the structure is * allocated. * @sb contains the super_block structure to be modified. * Return 0 if operation was successful. * @sb_free_security: * Deallocate and clear the sb->s_security field. * @sb contains the super_block structure to be modified. * @sb_statfs: * Check permission before obtaining filesystem statistics for the @mnt * mountpoint. * @dentry is a handle on the superblock for the filesystem. * Return 0 if permission is granted. * @sb_mount: * Check permission before an object specified by @dev_name is mounted on * the mount point named by @nd. For an ordinary mount, @dev_name * identifies a device if the file system type requires a device. For a * remount (@flags & MS_REMOUNT), @dev_name is irrelevant. For a * loopback/bind mount (@flags & MS_BIND), @dev_name identifies the * pathname of the object being mounted. * @dev_name contains the name for object being mounted. * @path contains the path for mount point object. * @type contains the filesystem type. * @flags contains the mount flags. * @data contains the filesystem-specific data. * Return 0 if permission is granted. * @sb_copy_data: * Allow mount option data to be copied prior to parsing by the filesystem, * so that the security module can extract security-specific mount * options cleanly (a filesystem may modify the data e.g. with strsep()). * This also allows the original mount data to be stripped of security- * specific options to avoid having to make filesystems aware of them. * @orig the original mount data copied from userspace. * @copy copied data which will be passed to the security module. * Returns 0 if the copy was successful. * @sb_remount: * Extracts security system specific mount options and verifies no changes * are being made to those options. * @sb superblock being remounted * @data contains the filesystem-specific data. * Return 0 if permission is granted. * @sb_umount: * Check permission before the @mnt file system is unmounted. * @mnt contains the mounted file system. * @flags contains the unmount flags, e.g. MNT_FORCE. * Return 0 if permission is granted. * @sb_pivotroot: * Check permission before pivoting the root filesystem. * @old_path contains the path for the new location of the * current root (put_old). * @new_path contains the path for the new root (new_root). * Return 0 if permission is granted. * @sb_set_mnt_opts: * Set the security relevant mount options used for a superblock * @sb the superblock to set security mount options for * @opts binary data structure containing all lsm mount data * @sb_clone_mnt_opts: * Copy all security options from a given superblock to another * @oldsb old superblock which contain information to clone * @newsb new superblock which needs filled in * @sb_parse_opts_str: * Parse a string of security data filling in the opts structure * @options string containing all mount options known by the LSM * @opts binary data structure usable by the LSM * @move_mount: * Check permission before a mount is moved. * @from_path indicates the mount that is going to be moved. * @to_path indicates the mountpoint that will be mounted upon. * @dentry_init_security: * Compute a context for a dentry as the inode is not yet available * since NFSv4 has no label backed by an EA anyway. * @dentry dentry to use in calculating the context. * @mode mode used to determine resource type. * @name name of the last path component used to create file * @ctx pointer to place the pointer to the resulting context in. * @ctxlen point to place the length of the resulting context. * @dentry_create_files_as: * Compute a context for a dentry as the inode is not yet available * and set that context in passed in creds so that new files are * created using that context. Context is calculated using the * passed in creds and not the creds of the caller. * @dentry dentry to use in calculating the context. * @mode mode used to determine resource type. * @name name of the last path component used to create file * @old creds which should be used for context calculation * @new creds to modify * * * Security hooks for inode operations. * * @inode_alloc_security: * Allocate and attach a security structure to @inode->i_security. The * i_security field is initialized to NULL when the inode structure is * allocated. * @inode contains the inode structure. * Return 0 if operation was successful. * @inode_free_security: * @inode contains the inode structure. * Deallocate the inode security structure and set @inode->i_security to * NULL. * @inode_init_security: * Obtain the security attribute name suffix and value to set on a newly * created inode and set up the incore security field for the new inode. * This hook is called by the fs code as part of the inode creation * transaction and provides for atomic labeling of the inode, unlike * the post_create/mkdir/... hooks called by the VFS. The hook function * is expected to allocate the name and value via kmalloc, with the caller * being responsible for calling kfree after using them. * If the security module does not use security attributes or does * not wish to put a security attribute on this particular inode, * then it should return -EOPNOTSUPP to skip this processing. * @inode contains the inode structure of the newly created inode. * @dir contains the inode structure of the parent directory. * @qstr contains the last path component of the new object * @name will be set to the allocated name suffix (e.g. selinux). * @value will be set to the allocated attribute value. * @len will be set to the length of the value. * Returns 0 if @name and @value have been successfully set, * -EOPNOTSUPP if no security attribute is needed, or * -ENOMEM on memory allocation failure. * @inode_create: * Check permission to create a regular file. * @dir contains inode structure of the parent of the new file. * @dentry contains the dentry structure for the file to be created. * @mode contains the file mode of the file to be created. * Return 0 if permission is granted. * @inode_link: * Check permission before creating a new hard link to a file. * @old_dentry contains the dentry structure for an existing * link to the file. * @dir contains the inode structure of the parent directory * of the new link. * @new_dentry contains the dentry structure for the new link. * Return 0 if permission is granted. * @path_link: * Check permission before creating a new hard link to a file. * @old_dentry contains the dentry structure for an existing link * to the file. * @new_dir contains the path structure of the parent directory of * the new link. * @new_dentry contains the dentry structure for the new link. * Return 0 if permission is granted. * @inode_unlink: * Check the permission to remove a hard link to a file. * @dir contains the inode structure of parent directory of the file. * @dentry contains the dentry structure for file to be unlinked. * Return 0 if permission is granted. * @path_unlink: * Check the permission to remove a hard link to a file. * @dir contains the path structure of parent directory of the file. * @dentry contains the dentry structure for file to be unlinked. * Return 0 if permission is granted. * @inode_symlink: * Check the permission to create a symbolic link to a file. * @dir contains the inode structure of parent directory of * the symbolic link. * @dentry contains the dentry structure of the symbolic link. * @old_name contains the pathname of file. * Return 0 if permission is granted. * @path_symlink: * Check the permission to create a symbolic link to a file. * @dir contains the path structure of parent directory of * the symbolic link. * @dentry contains the dentry structure of the symbolic link. * @old_name contains the pathname of file. * Return 0 if permission is granted. * @inode_mkdir: * Check permissions to create a new directory in the existing directory * associated with inode structure @dir. * @dir contains the inode structure of parent of the directory * to be created. * @dentry contains the dentry structure of new directory. * @mode contains the mode of new directory. * Return 0 if permission is granted. * @path_mkdir: * Check permissions to create a new directory in the existing directory * associated with path structure @path. * @dir contains the path structure of parent of the directory * to be created. * @dentry contains the dentry structure of new directory. * @mode contains the mode of new directory. * Return 0 if permission is granted. * @inode_rmdir: * Check the permission to remove a directory. * @dir contains the inode structure of parent of the directory * to be removed. * @dentry contains the dentry structure of directory to be removed. * Return 0 if permission is granted. * @path_rmdir: * Check the permission to remove a directory. * @dir contains the path structure of parent of the directory to be * removed. * @dentry contains the dentry structure of directory to be removed. * Return 0 if permission is granted. * @inode_mknod: * Check permissions when creating a special file (or a socket or a fifo * file created via the mknod system call). Note that if mknod operation * is being done for a regular file, then the create hook will be called * and not this hook. * @dir contains the inode structure of parent of the new file. * @dentry contains the dentry structure of the new file. * @mode contains the mode of the new file. * @dev contains the device number. * Return 0 if permission is granted. * @path_mknod: * Check permissions when creating a file. Note that this hook is called * even if mknod operation is being done for a regular file. * @dir contains the path structure of parent of the new file. * @dentry contains the dentry structure of the new file. * @mode contains the mode of the new file. * @dev contains the undecoded device number. Use new_decode_dev() to get * the decoded device number. * Return 0 if permission is granted. * @inode_rename: * Check for permission to rename a file or directory. * @old_dir contains the inode structure for parent of the old link. * @old_dentry contains the dentry structure of the old link. * @new_dir contains the inode structure for parent of the new link. * @new_dentry contains the dentry structure of the new link. * Return 0 if permission is granted. * @path_rename: * Check for permission to rename a file or directory. * @old_dir contains the path structure for parent of the old link. * @old_dentry contains the dentry structure of the old link. * @new_dir contains the path structure for parent of the new link. * @new_dentry contains the dentry structure of the new link. * Return 0 if permission is granted. * @path_chmod: * Check for permission to change a mode of the file @path. The new * mode is specified in @mode. * @path contains the path structure of the file to change the mode. * @mode contains the new DAC's permission, which is a bitmask of * constants from <include/uapi/linux/stat.h> * Return 0 if permission is granted. * @path_chown: * Check for permission to change owner/group of a file or directory. * @path contains the path structure. * @uid contains new owner's ID. * @gid contains new group's ID. * Return 0 if permission is granted. * @path_chroot: * Check for permission to change root directory. * @path contains the path structure. * Return 0 if permission is granted. * @path_notify: * Check permissions before setting a watch on events as defined by @mask, * on an object at @path, whose type is defined by @obj_type. * @inode_readlink: * Check the permission to read the symbolic link. * @dentry contains the dentry structure for the file link. * Return 0 if permission is granted. * @inode_follow_link: * Check permission to follow a symbolic link when looking up a pathname. * @dentry contains the dentry structure for the link. * @inode contains the inode, which itself is not stable in RCU-walk * @rcu indicates whether we are in RCU-walk mode. * Return 0 if permission is granted. * @inode_permission: * Check permission before accessing an inode. This hook is called by the * existing Linux permission function, so a security module can use it to * provide additional checking for existing Linux permission checks. * Notice that this hook is called when a file is opened (as well as many * other operations), whereas the file_security_ops permission hook is * called when the actual read/write operations are performed. * @inode contains the inode structure to check. * @mask contains the permission mask. * Return 0 if permission is granted. * @inode_setattr: * Check permission before setting file attributes. Note that the kernel * call to notify_change is performed from several locations, whenever * file attributes change (such as when a file is truncated, chown/chmod * operations, transferring disk quotas, etc). * @dentry contains the dentry structure for the file. * @attr is the iattr structure containing the new file attributes. * Return 0 if permission is granted. * @path_truncate: * Check permission before truncating a file. * @path contains the path structure for the file. * Return 0 if permission is granted. * @inode_getattr: * Check permission before obtaining file attributes. * @path contains the path structure for the file. * Return 0 if permission is granted. * @inode_setxattr: * Check permission before setting the extended attributes * @value identified by @name for @dentry. * Return 0 if permission is granted. * @inode_post_setxattr: * Update inode security field after successful setxattr operation. * @value identified by @name for @dentry. * @inode_getxattr: * Check permission before obtaining the extended attributes * identified by @name for @dentry. * Return 0 if permission is granted. * @inode_listxattr: * Check permission before obtaining the list of extended attribute * names for @dentry. * Return 0 if permission is granted. * @inode_removexattr: * Check permission before removing the extended attribute * identified by @name for @dentry. * Return 0 if permission is granted. * @inode_getsecurity: * Retrieve a copy of the extended attribute representation of the * security label associated with @name for @inode via @buffer. Note that * @name is the remainder of the attribute name after the security prefix * has been removed. @alloc is used to specify of the call should return a * value via the buffer or just the value length Return size of buffer on * success. * @inode_setsecurity: * Set the security label associated with @name for @inode from the * extended attribute value @value. @size indicates the size of the * @value in bytes. @flags may be XATTR_CREATE, XATTR_REPLACE, or 0. * Note that @name is the remainder of the attribute name after the * security. prefix has been removed. * Return 0 on success. * @inode_listsecurity: * Copy the extended attribute names for the security labels * associated with @inode into @buffer. The maximum size of @buffer * is specified by @buffer_size. @buffer may be NULL to request * the size of the buffer required. * Returns number of bytes used/required on success. * @inode_need_killpriv: * Called when an inode has been changed. * @dentry is the dentry being changed. * Return <0 on error to abort the inode change operation. * Return 0 if inode_killpriv does not need to be called. * Return >0 if inode_killpriv does need to be called. * @inode_killpriv: * The setuid bit is being removed. Remove similar security labels. * Called with the dentry->d_inode->i_mutex held. * @dentry is the dentry being changed. * Return 0 on success. If error is returned, then the operation * causing setuid bit removal is failed. * @inode_getsecid: * Get the secid associated with the node. * @inode contains a pointer to the inode. * @secid contains a pointer to the location where result will be saved. * In case of failure, @secid will be set to zero. * @inode_copy_up: * A file is about to be copied up from lower layer to upper layer of * overlay filesystem. Security module can prepare a set of new creds * and modify as need be and return new creds. Caller will switch to * new creds temporarily to create new file and release newly allocated * creds. * @src indicates the union dentry of file that is being copied up. * @new pointer to pointer to return newly allocated creds. * Returns 0 on success or a negative error code on error. * @inode_copy_up_xattr: * Filter the xattrs being copied up when a unioned file is copied * up from a lower layer to the union/overlay layer. * @name indicates the name of the xattr. * Returns 0 to accept the xattr, 1 to discard the xattr, -EOPNOTSUPP if * security module does not know about attribute or a negative error code * to abort the copy up. Note that the caller is responsible for reading * and writing the xattrs as this hook is merely a filter. * * Security hooks for kernfs node operations * * @kernfs_init_security: * Initialize the security context of a newly created kernfs node based * on its own and its parent's attributes. * * @kn_dir the parent kernfs node * @kn the new child kernfs node * * Security hooks for file operations * * @file_permission: * Check file permissions before accessing an open file. This hook is * called by various operations that read or write files. A security * module can use this hook to perform additional checking on these * operations, e.g. to revalidate permissions on use to support privilege * bracketing or policy changes. Notice that this hook is used when the * actual read/write operations are performed, whereas the * inode_security_ops hook is called when a file is opened (as well as * many other operations). * Caveat: Although this hook can be used to revalidate permissions for * various system call operations that read or write files, it does not * address the revalidation of permissions for memory-mapped files. * Security modules must handle this separately if they need such * revalidation. * @file contains the file structure being accessed. * @mask contains the requested permissions. * Return 0 if permission is granted. * @file_alloc_security: * Allocate and attach a security structure to the file->f_security field. * The security field is initialized to NULL when the structure is first * created. * @file contains the file structure to secure. * Return 0 if the hook is successful and permission is granted. * @file_free_security: * Deallocate and free any security structures stored in file->f_security. * @file contains the file structure being modified. * @file_ioctl: * @file contains the file structure. * @cmd contains the operation to perform. * @arg contains the operational arguments. * Check permission for an ioctl operation on @file. Note that @arg * sometimes represents a user space pointer; in other cases, it may be a * simple integer value. When @arg represents a user space pointer, it * should never be used by the security module. * Return 0 if permission is granted. * @file_ioctl_compat: * @file contains the file structure. * @cmd contains the operation to perform. * @arg contains the operational arguments. * Check permission for a compat ioctl operation on @file. * Return 0 if permission is granted. * @mmap_addr : * Check permissions for a mmap operation at @addr. * @addr contains virtual address that will be used for the operation. * Return 0 if permission is granted. * @mmap_file : * Check permissions for a mmap operation. The @file may be NULL, e.g. * if mapping anonymous memory. * @file contains the file structure for file to map (may be NULL). * @reqprot contains the protection requested by the application. * @prot contains the protection that will be applied by the kernel. * @flags contains the operational flags. * Return 0 if permission is granted. * @file_mprotect: * Check permissions before changing memory access permissions. * @vma contains the memory region to modify. * @reqprot contains the protection requested by the application. * @prot contains the protection that will be applied by the kernel. * Return 0 if permission is granted. * @file_lock: * Check permission before performing file locking operations. * Note the hook mediates both flock and fcntl style locks. * @file contains the file structure. * @cmd contains the posix-translated lock operation to perform * (e.g. F_RDLCK, F_WRLCK). * Return 0 if permission is granted. * @file_fcntl: * Check permission before allowing the file operation specified by @cmd * from being performed on the file @file. Note that @arg sometimes * represents a user space pointer; in other cases, it may be a simple * integer value. When @arg represents a user space pointer, it should * never be used by the security module. * @file contains the file structure. * @cmd contains the operation to be performed. * @arg contains the operational arguments. * Return 0 if permission is granted. * @file_set_fowner: * Save owner security information (typically from current->security) in * file->f_security for later use by the send_sigiotask hook. * @file contains the file structure to update. * Return 0 on success. * @file_send_sigiotask: * Check permission for the file owner @fown to send SIGIO or SIGURG to the * process @tsk. Note that this hook is sometimes called from interrupt. * Note that the fown_struct, @fown, is never outside the context of a * struct file, so the file structure (and associated security information) * can always be obtained: container_of(fown, struct file, f_owner) * @tsk contains the structure of task receiving signal. * @fown contains the file owner information. * @sig is the signal that will be sent. When 0, kernel sends SIGIO. * Return 0 if permission is granted. * @file_receive: * This hook allows security modules to control the ability of a process * to receive an open file descriptor via socket IPC. * @file contains the file structure being received. * Return 0 if permission is granted. * @file_open: * Save open-time permission checking state for later use upon * file_permission, and recheck access if anything has changed * since inode_permission. * * Security hooks for task operations. * * @task_alloc: * @task task being allocated. * @clone_flags contains the flags indicating what should be shared. * Handle allocation of task-related resources. * Returns a zero on success, negative values on failure. * @task_free: * @task task about to be freed. * Handle release of task-related resources. (Note that this can be called * from interrupt context.) * @cred_alloc_blank: * @cred points to the credentials. * @gfp indicates the atomicity of any memory allocations. * Only allocate sufficient memory and attach to @cred such that * cred_transfer() will not get ENOMEM. * @cred_free: * @cred points to the credentials. * Deallocate and clear the cred->security field in a set of credentials. * @cred_prepare: * @new points to the new credentials. * @old points to the original credentials. * @gfp indicates the atomicity of any memory allocations. * Prepare a new set of credentials by copying the data from the old set. * @cred_transfer: * @new points to the new credentials. * @old points to the original credentials. * Transfer data from original creds to new creds * @cred_getsecid: * Retrieve the security identifier of the cred structure @c * @c contains the credentials, secid will be placed into @secid. * In case of failure, @secid will be set to zero. * @kernel_act_as: * Set the credentials for a kernel service to act as (subjective context). * @new points to the credentials to be modified. * @secid specifies the security ID to be set * The current task must be the one that nominated @secid. * Return 0 if successful. * @kernel_create_files_as: * Set the file creation context in a set of credentials to be the same as * the objective context of the specified inode. * @new points to the credentials to be modified. * @inode points to the inode to use as a reference. * The current task must be the one that nominated @inode. * Return 0 if successful. * @kernel_module_request: * Ability to trigger the kernel to automatically upcall to userspace for * userspace to load a kernel module with the given name. * @kmod_name name of the module requested by the kernel * Return 0 if successful. * @kernel_load_data: * Load data provided by userspace. * @id kernel load data identifier * Return 0 if permission is granted. * @kernel_read_file: * Read a file specified by userspace. * @file contains the file structure pointing to the file being read * by the kernel. * @id kernel read file identifier * Return 0 if permission is granted. * @kernel_post_read_file: * Read a file specified by userspace. * @file contains the file structure pointing to the file being read * by the kernel. * @buf pointer to buffer containing the file contents. * @size length of the file contents. * @id kernel read file identifier * Return 0 if permission is granted. * @task_fix_setuid: * Update the module's state after setting one or more of the user * identity attributes of the current process. The @flags parameter * indicates which of the set*uid system calls invoked this hook. If * @new is the set of credentials that will be installed. Modifications * should be made to this rather than to @current->cred. * @old is the set of credentials that are being replaces * @flags contains one of the LSM_SETID_* values. * Return 0 on success. * @task_setpgid: * Check permission before setting the process group identifier of the * process @p to @pgid. * @p contains the task_struct for process being modified. * @pgid contains the new pgid. * Return 0 if permission is granted. * @task_getpgid: * Check permission before getting the process group identifier of the * process @p. * @p contains the task_struct for the process. * Return 0 if permission is granted. * @task_getsid: * Check permission before getting the session identifier of the process * @p. * @p contains the task_struct for the process. * Return 0 if permission is granted. * @task_getsecid: * Retrieve the security identifier of the process @p. * @p contains the task_struct for the process and place is into @secid. * In case of failure, @secid will be set to zero. * * @task_setnice: * Check permission before setting the nice value of @p to @nice. * @p contains the task_struct of process. * @nice contains the new nice value. * Return 0 if permission is granted. * @task_setioprio: * Check permission before setting the ioprio value of @p to @ioprio. * @p contains the task_struct of process. * @ioprio contains the new ioprio value * Return 0 if permission is granted. * @task_getioprio: * Check permission before getting the ioprio value of @p. * @p contains the task_struct of process. * Return 0 if permission is granted. * @task_prlimit: * Check permission before getting and/or setting the resource limits of * another task. * @cred points to the cred structure for the current task. * @tcred points to the cred structure for the target task. * @flags contains the LSM_PRLIMIT_* flag bits indicating whether the * resource limits are being read, modified, or both. * Return 0 if permission is granted. * @task_setrlimit: * Check permission before setting the resource limits of process @p * for @resource to @new_rlim. The old resource limit values can * be examined by dereferencing (p->signal->rlim + resource). * @p points to the task_struct for the target task's group leader. * @resource contains the resource whose limit is being set. * @new_rlim contains the new limits for @resource. * Return 0 if permission is granted. * @task_setscheduler: * Check permission before setting scheduling policy and/or parameters of * process @p. * @p contains the task_struct for process. * Return 0 if permission is granted. * @task_getscheduler: * Check permission before obtaining scheduling information for process * @p. * @p contains the task_struct for process. * Return 0 if permission is granted. * @task_movememory: * Check permission before moving memory owned by process @p. * @p contains the task_struct for process. * Return 0 if permission is granted. * @task_kill: * Check permission before sending signal @sig to @p. @info can be NULL, * the constant 1, or a pointer to a kernel_siginfo structure. If @info is 1 or * SI_FROMKERNEL(info) is true, then the signal should be viewed as coming * from the kernel and should typically be permitted. * SIGIO signals are handled separately by the send_sigiotask hook in * file_security_ops. * @p contains the task_struct for process. * @info contains the signal information. * @sig contains the signal value. * @cred contains the cred of the process where the signal originated, or * NULL if the current task is the originator. * Return 0 if permission is granted. * @task_prctl: * Check permission before performing a process control operation on the * current process. * @option contains the operation. * @arg2 contains a argument. * @arg3 contains a argument. * @arg4 contains a argument. * @arg5 contains a argument. * Return -ENOSYS if no-one wanted to handle this op, any other value to * cause prctl() to return immediately with that value. * @task_to_inode: * Set the security attributes for an inode based on an associated task's * security attributes, e.g. for /proc/pid inodes. * @p contains the task_struct for the task. * @inode contains the inode structure for the inode. * * Security hooks for Netlink messaging. * * @netlink_send: * Save security information for a netlink message so that permission * checking can be performed when the message is processed. The security * information can be saved using the eff_cap field of the * netlink_skb_parms structure. Also may be used to provide fine * grained control over message transmission. * @sk associated sock of task sending the message. * @skb contains the sk_buff structure for the netlink message. * Return 0 if the information was successfully saved and message * is allowed to be transmitted. * * Security hooks for Unix domain networking. * * @unix_stream_connect: * Check permissions before establishing a Unix domain stream connection * between @sock and @other. * @sock contains the sock structure. * @other contains the peer sock structure. * @newsk contains the new sock structure. * Return 0 if permission is granted. * @unix_may_send: * Check permissions before connecting or sending datagrams from @sock to * @other. * @sock contains the socket structure. * @other contains the peer socket structure. * Return 0 if permission is granted. * * The @unix_stream_connect and @unix_may_send hooks were necessary because * Linux provides an alternative to the conventional file name space for Unix * domain sockets. Whereas binding and connecting to sockets in the file name * space is mediated by the typical file permissions (and caught by the mknod * and permission hooks in inode_security_ops), binding and connecting to * sockets in the abstract name space is completely unmediated. Sufficient * control of Unix domain sockets in the abstract name space isn't possible * using only the socket layer hooks, since we need to know the actual target * socket, which is not looked up until we are inside the af_unix code. * * Security hooks for socket operations. * * @socket_create: * Check permissions prior to creating a new socket. * @family contains the requested protocol family. * @type contains the requested communications type. * @protocol contains the requested protocol. * @kern set to 1 if a kernel socket. * Return 0 if permission is granted. * @socket_post_create: * This hook allows a module to update or allocate a per-socket security * structure. Note that the security field was not added directly to the * socket structure, but rather, the socket security information is stored * in the associated inode. Typically, the inode alloc_security hook will * allocate and and attach security information to * SOCK_INODE(sock)->i_security. This hook may be used to update the * SOCK_INODE(sock)->i_security field with additional information that * wasn't available when the inode was allocated. * @sock contains the newly created socket structure. * @family contains the requested protocol family. * @type contains the requested communications type. * @protocol contains the requested protocol. * @kern set to 1 if a kernel socket. * @socket_socketpair: * Check permissions before creating a fresh pair of sockets. * @socka contains the first socket structure. * @sockb contains the second socket structure. * Return 0 if permission is granted and the connection was established. * @socket_bind: * Check permission before socket protocol layer bind operation is * performed and the socket @sock is bound to the address specified in the * @address parameter. * @sock contains the socket structure. * @address contains the address to bind to. * @addrlen contains the length of address. * Return 0 if permission is granted. * @socket_connect: * Check permission before socket protocol layer connect operation * attempts to connect socket @sock to a remote address, @address. * @sock contains the socket structure. * @address contains the address of remote endpoint. * @addrlen contains the length of address. * Return 0 if permission is granted. * @socket_listen: * Check permission before socket protocol layer listen operation. * @sock contains the socket structure. * @backlog contains the maximum length for the pending connection queue. * Return 0 if permission is granted. * @socket_accept: * Check permission before accepting a new connection. Note that the new * socket, @newsock, has been created and some information copied to it, * but the accept operation has not actually been performed. * @sock contains the listening socket structure. * @newsock contains the newly created server socket for connection. * Return 0 if permission is granted. * @socket_sendmsg: * Check permission before transmitting a message to another socket. * @sock contains the socket structure. * @msg contains the message to be transmitted. * @size contains the size of message. * Return 0 if permission is granted. * @socket_recvmsg: * Check permission before receiving a message from a socket. * @sock contains the socket structure. * @msg contains the message structure. * @size contains the size of message structure. * @flags contains the operational flags. * Return 0 if permission is granted. * @socket_getsockname: * Check permission before the local address (name) of the socket object * @sock is retrieved. * @sock contains the socket structure. * Return 0 if permission is granted. * @socket_getpeername: * Check permission before the remote address (name) of a socket object * @sock is retrieved. * @sock contains the socket structure. * Return 0 if permission is granted. * @socket_getsockopt: * Check permissions before retrieving the options associated with socket * @sock. * @sock contains the socket structure. * @level contains the protocol level to retrieve option from. * @optname contains the name of option to retrieve. * Return 0 if permission is granted. * @socket_setsockopt: * Check permissions before setting the options associated with socket * @sock. * @sock contains the socket structure. * @level contains the protocol level to set options for. * @optname contains the name of the option to set. * Return 0 if permission is granted. * @socket_shutdown: * Checks permission before all or part of a connection on the socket * @sock is shut down. * @sock contains the socket structure. * @how contains the flag indicating how future sends and receives * are handled. * Return 0 if permission is granted. * @socket_sock_rcv_skb: * Check permissions on incoming network packets. This hook is distinct * from Netfilter's IP input hooks since it is the first time that the * incoming sk_buff @skb has been associated with a particular socket, @sk. * Must not sleep inside this hook because some callers hold spinlocks. * @sk contains the sock (not socket) associated with the incoming sk_buff. * @skb contains the incoming network data. * @socket_getpeersec_stream: * This hook allows the security module to provide peer socket security * state for unix or connected tcp sockets to userspace via getsockopt * SO_GETPEERSEC. For tcp sockets this can be meaningful if the * socket is associated with an ipsec SA. * @sock is the local socket. * @optval userspace memory where the security state is to be copied. * @optlen userspace int where the module should copy the actual length * of the security state. * @len as input is the maximum length to copy to userspace provided * by the caller. * Return 0 if all is well, otherwise, typical getsockopt return * values. * @socket_getpeersec_dgram: * This hook allows the security module to provide peer socket security * state for udp sockets on a per-packet basis to userspace via * getsockopt SO_GETPEERSEC. The application must first have indicated * the IP_PASSSEC option via getsockopt. It can then retrieve the * security state returned by this hook for a packet via the SCM_SECURITY * ancillary message type. * @sock contains the peer socket. May be NULL. * @skb is the sk_buff for the packet being queried. May be NULL. * @secid pointer to store the secid of the packet. * Return 0 on success, error on failure. * @sk_alloc_security: * Allocate and attach a security structure to the sk->sk_security field, * which is used to copy security attributes between local stream sockets. * @sk_free_security: * Deallocate security structure. * @sk_clone_security: * Clone/copy security structure. * @sk_getsecid: * Retrieve the LSM-specific secid for the sock to enable caching * of network authorizations. * @sock_graft: * Sets the socket's isec sid to the sock's sid. * @inet_conn_request: * Sets the openreq's sid to socket's sid with MLS portion taken * from peer sid. * @inet_csk_clone: * Sets the new child socket's sid to the openreq sid. * @inet_conn_established: * Sets the connection's peersid to the secmark on skb. * @secmark_relabel_packet: * check if the process should be allowed to relabel packets to * the given secid * @secmark_refcount_inc: * tells the LSM to increment the number of secmark labeling rules loaded * @secmark_refcount_dec: * tells the LSM to decrement the number of secmark labeling rules loaded * @req_classify_flow: * Sets the flow's sid to the openreq sid. * @tun_dev_alloc_security: * This hook allows a module to allocate a security structure for a TUN * device. * @security pointer to a security structure pointer. * Returns a zero on success, negative values on failure. * @tun_dev_free_security: * This hook allows a module to free the security structure for a TUN * device. * @security pointer to the TUN device's security structure * @tun_dev_create: * Check permissions prior to creating a new TUN device. * @tun_dev_attach_queue: * Check permissions prior to attaching to a TUN device queue. * @security pointer to the TUN device's security structure. * @tun_dev_attach: * This hook can be used by the module to update any security state * associated with the TUN device's sock structure. * @sk contains the existing sock structure. * @security pointer to the TUN device's security structure. * @tun_dev_open: * This hook can be used by the module to update any security state * associated with the TUN device's security structure. * @security pointer to the TUN devices's security structure. * * Security hooks for SCTP * * @sctp_assoc_request: * Passes the @ep and @chunk->skb of the association INIT packet to * the security module. * @ep pointer to sctp endpoint structure. * @skb pointer to skbuff of association packet. * Return 0 on success, error on failure. * @sctp_bind_connect: * Validiate permissions required for each address associated with sock * @sk. Depending on @optname, the addresses will be treated as either * for a connect or bind service. The @addrlen is calculated on each * ipv4 and ipv6 address using sizeof(struct sockaddr_in) or * sizeof(struct sockaddr_in6). * @sk pointer to sock structure. * @optname name of the option to validate. * @address list containing one or more ipv4/ipv6 addresses. * @addrlen total length of address(s). * Return 0 on success, error on failure. * @sctp_sk_clone: * Called whenever a new socket is created by accept(2) (i.e. a TCP * style socket) or when a socket is 'peeled off' e.g userspace * calls sctp_peeloff(3). * @ep pointer to current sctp endpoint structure. * @sk pointer to current sock structure. * @sk pointer to new sock structure. * * Security hooks for Infiniband * * @ib_pkey_access: * Check permission to access a pkey when modifing a QP. * @subnet_prefix the subnet prefix of the port being used. * @pkey the pkey to be accessed. * @sec pointer to a security structure. * @ib_endport_manage_subnet: * Check permissions to send and receive SMPs on a end port. * @dev_name the IB device name (i.e. mlx4_0). * @port_num the port number. * @sec pointer to a security structure. * @ib_alloc_security: * Allocate a security structure for Infiniband objects. * @sec pointer to a security structure pointer. * Returns 0 on success, non-zero on failure * @ib_free_security: * Deallocate an Infiniband security structure. * @sec contains the security structure to be freed. * * Security hooks for XFRM operations. * * @xfrm_policy_alloc_security: * @ctxp is a pointer to the xfrm_sec_ctx being added to Security Policy * Database used by the XFRM system. * @sec_ctx contains the security context information being provided by * the user-level policy update program (e.g., setkey). * Allocate a security structure to the xp->security field; the security * field is initialized to NULL when the xfrm_policy is allocated. * Return 0 if operation was successful (memory to allocate, legal context) * @gfp is to specify the context for the allocation * @xfrm_policy_clone_security: * @old_ctx contains an existing xfrm_sec_ctx. * @new_ctxp contains a new xfrm_sec_ctx being cloned from old. * Allocate a security structure in new_ctxp that contains the * information from the old_ctx structure. * Return 0 if operation was successful (memory to allocate). * @xfrm_policy_free_security: * @ctx contains the xfrm_sec_ctx * Deallocate xp->security. * @xfrm_policy_delete_security: * @ctx contains the xfrm_sec_ctx. * Authorize deletion of xp->security. * @xfrm_state_alloc: * @x contains the xfrm_state being added to the Security Association * Database by the XFRM system. * @sec_ctx contains the security context information being provided by * the user-level SA generation program (e.g., setkey or racoon). * Allocate a security structure to the x->security field; the security * field is initialized to NULL when the xfrm_state is allocated. Set the * context to correspond to sec_ctx. Return 0 if operation was successful * (memory to allocate, legal context). * @xfrm_state_alloc_acquire: * @x contains the xfrm_state being added to the Security Association * Database by the XFRM system. * @polsec contains the policy's security context. * @secid contains the secid from which to take the mls portion of the * context. * Allocate a security structure to the x->security field; the security * field is initialized to NULL when the xfrm_state is allocated. Set the * context to correspond to secid. Return 0 if operation was successful * (memory to allocate, legal context). * @xfrm_state_free_security: * @x contains the xfrm_state. * Deallocate x->security. * @xfrm_state_delete_security: * @x contains the xfrm_state. * Authorize deletion of x->security. * @xfrm_policy_lookup: * @ctx contains the xfrm_sec_ctx for which the access control is being * checked. * @fl_secid contains the flow security label that is used to authorize * access to the policy xp. * @dir contains the direction of the flow (input or output). * Check permission when a flow selects a xfrm_policy for processing * XFRMs on a packet. The hook is called when selecting either a * per-socket policy or a generic xfrm policy. * Return 0 if permission is granted, -ESRCH otherwise, or -errno * on other errors. * @xfrm_state_pol_flow_match: * @x contains the state to match. * @xp contains the policy to check for a match. * @fl contains the flow to check for a match. * Return 1 if there is a match. * @xfrm_decode_session: * @skb points to skb to decode. * @secid points to the flow key secid to set. * @ckall says if all xfrms used should be checked for same secid. * Return 0 if ckall is zero or all xfrms used have the same secid. * * Security hooks affecting all Key Management operations * * @key_alloc: * Permit allocation of a key and assign security data. Note that key does * not have a serial number assigned at this point. * @key points to the key. * @flags is the allocation flags * Return 0 if permission is granted, -ve error otherwise. * @key_free: * Notification of destruction; free security data. * @key points to the key. * No return value. * @key_permission: * See whether a specific operational right is granted to a process on a * key. * @key_ref refers to the key (key pointer + possession attribute bit). * @cred points to the credentials to provide the context against which to * evaluate the security data on the key. * @perm describes the combination of permissions required of this key. * Return 0 if permission is granted, -ve error otherwise. * @key_getsecurity: * Get a textual representation of the security context attached to a key * for the purposes of honouring KEYCTL_GETSECURITY. This function * allocates the storage for the NUL-terminated string and the caller * should free it. * @key points to the key to be queried. * @_buffer points to a pointer that should be set to point to the * resulting string (if no label or an error occurs). * Return the length of the string (including terminating NUL) or -ve if * an error. * May also return 0 (and a NULL buffer pointer) if there is no label. * * Security hooks affecting all System V IPC operations. * * @ipc_permission: * Check permissions for access to IPC * @ipcp contains the kernel IPC permission structure * @flag contains the desired (requested) permission set * Return 0 if permission is granted. * @ipc_getsecid: * Get the secid associated with the ipc object. * @ipcp contains the kernel IPC permission structure. * @secid contains a pointer to the location where result will be saved. * In case of failure, @secid will be set to zero. * * Security hooks for individual messages held in System V IPC message queues * @msg_msg_alloc_security: * Allocate and attach a security structure to the msg->security field. * The security field is initialized to NULL when the structure is first * created. * @msg contains the message structure to be modified. * Return 0 if operation was successful and permission is granted. * @msg_msg_free_security: * Deallocate the security structure for this message. * @msg contains the message structure to be modified. * * Security hooks for System V IPC Message Queues * * @msg_queue_alloc_security: * Allocate and attach a security structure to the * @perm->security field. The security field is initialized to * NULL when the structure is first created. * @perm contains the IPC permissions of the message queue. * Return 0 if operation was successful and permission is granted. * @msg_queue_free_security: * Deallocate security field @perm->security for the message queue. * @perm contains the IPC permissions of the message queue. * @msg_queue_associate: * Check permission when a message queue is requested through the * msgget system call. This hook is only called when returning the * message queue identifier for an existing message queue, not when a * new message queue is created. * @perm contains the IPC permissions of the message queue. * @msqflg contains the operation control flags. * Return 0 if permission is granted. * @msg_queue_msgctl: * Check permission when a message control operation specified by @cmd * is to be performed on the message queue with permissions @perm. * The @perm may be NULL, e.g. for IPC_INFO or MSG_INFO. * @perm contains the IPC permissions of the msg queue. May be NULL. * @cmd contains the operation to be performed. * Return 0 if permission is granted. * @msg_queue_msgsnd: * Check permission before a message, @msg, is enqueued on the message * queue with permissions @perm. * @perm contains the IPC permissions of the message queue. * @msg contains the message to be enqueued. * @msqflg contains operational flags. * Return 0 if permission is granted. * @msg_queue_msgrcv: * Check permission before a message, @msg, is removed from the message * queue. The @target task structure contains a pointer to the * process that will be receiving the message (not equal to the current * process when inline receives are being performed). * @perm contains the IPC permissions of the message queue. * @msg contains the message destination. * @target contains the task structure for recipient process. * @type contains the type of message requested. * @mode contains the operational flags. * Return 0 if permission is granted. * * Security hooks for System V Shared Memory Segments * * @shm_alloc_security: * Allocate and attach a security structure to the @perm->security * field. The security field is initialized to NULL when the structure is * first created. * @perm contains the IPC permissions of the shared memory structure. * Return 0 if operation was successful and permission is granted. * @shm_free_security: * Deallocate the security structure @perm->security for the memory segment. * @perm contains the IPC permissions of the shared memory structure. * @shm_associate: * Check permission when a shared memory region is requested through the * shmget system call. This hook is only called when returning the shared * memory region identifier for an existing region, not when a new shared * memory region is created. * @perm contains the IPC permissions of the shared memory structure. * @shmflg contains the operation control flags. * Return 0 if permission is granted. * @shm_shmctl: * Check permission when a shared memory control operation specified by * @cmd is to be performed on the shared memory region with permissions @perm. * The @perm may be NULL, e.g. for IPC_INFO or SHM_INFO. * @perm contains the IPC permissions of the shared memory structure. * @cmd contains the operation to be performed. * Return 0 if permission is granted. * @shm_shmat: * Check permissions prior to allowing the shmat system call to attach the * shared memory segment with permissions @perm to the data segment of the * calling process. The attaching address is specified by @shmaddr. * @perm contains the IPC permissions of the shared memory structure. * @shmaddr contains the address to attach memory region to. * @shmflg contains the operational flags. * Return 0 if permission is granted. * * Security hooks for System V Semaphores * * @sem_alloc_security: * Allocate and attach a security structure to the @perm->security * field. The security field is initialized to NULL when the structure is * first created. * @perm contains the IPC permissions of the semaphore. * Return 0 if operation was successful and permission is granted. * @sem_free_security: * Deallocate security structure @perm->security for the semaphore. * @perm contains the IPC permissions of the semaphore. * @sem_associate: * Check permission when a semaphore is requested through the semget * system call. This hook is only called when returning the semaphore * identifier for an existing semaphore, not when a new one must be * created. * @perm contains the IPC permissions of the semaphore. * @semflg contains the operation control flags. * Return 0 if permission is granted. * @sem_semctl: * Check permission when a semaphore operation specified by @cmd is to be * performed on the semaphore. The @perm may be NULL, e.g. for * IPC_INFO or SEM_INFO. * @perm contains the IPC permissions of the semaphore. May be NULL. * @cmd contains the operation to be performed. * Return 0 if permission is granted. * @sem_semop: * Check permissions before performing operations on members of the * semaphore set. If the @alter flag is nonzero, the semaphore set * may be modified. * @perm contains the IPC permissions of the semaphore. * @sops contains the operations to perform. * @nsops contains the number of operations to perform. * @alter contains the flag indicating whether changes are to be made. * Return 0 if permission is granted. * * @binder_set_context_mgr: * Check whether @mgr is allowed to be the binder context manager. * @mgr contains the struct cred for the current binder process. * Return 0 if permission is granted. * @binder_transaction: * Check whether @from is allowed to invoke a binder transaction call * to @to. * @from contains the struct cred for the sending process. * @to contains the struct cred for the receiving process. * @binder_transfer_binder: * Check whether @from is allowed to transfer a binder reference to @to. * @from contains the struct cred for the sending process. * @to contains the struct cred for the receiving process. * @binder_transfer_file: * Check whether @from is allowed to transfer @file to @to. * @from contains the struct cred for the sending process. * @file contains the struct file being transferred. * @to contains the struct cred for the receiving process. * * @ptrace_access_check: * Check permission before allowing the current process to trace the * @child process. * Security modules may also want to perform a process tracing check * during an execve in the set_security or apply_creds hooks of * tracing check during an execve in the bprm_set_creds hook of * binprm_security_ops if the process is being traced and its security * attributes would be changed by the execve. * @child contains the task_struct structure for the target process. * @mode contains the PTRACE_MODE flags indicating the form of access. * Return 0 if permission is granted. * @ptrace_traceme: * Check that the @parent process has sufficient permission to trace the * current process before allowing the current process to present itself * to the @parent process for tracing. * @parent contains the task_struct structure for debugger process. * Return 0 if permission is granted. * @capget: * Get the @effective, @inheritable, and @permitted capability sets for * the @target process. The hook may also perform permission checking to * determine if the current process is allowed to see the capability sets * of the @target process. * @target contains the task_struct structure for target process. * @effective contains the effective capability set. * @inheritable contains the inheritable capability set. * @permitted contains the permitted capability set. * Return 0 if the capability sets were successfully obtained. * @capset: * Set the @effective, @inheritable, and @permitted capability sets for * the current process. * @new contains the new credentials structure for target process. * @old contains the current credentials structure for target process. * @effective contains the effective capability set. * @inheritable contains the inheritable capability set. * @permitted contains the permitted capability set. * Return 0 and update @new if permission is granted. * @capable: * Check whether the @tsk process has the @cap capability in the indicated * credentials. * @cred contains the credentials to use. * @ns contains the user namespace we want the capability in * @cap contains the capability <include/linux/capability.h>. * @opts contains options for the capable check <include/linux/security.h> * Return 0 if the capability is granted for @tsk. * @syslog: * Check permission before accessing the kernel message ring or changing * logging to the console. * See the syslog(2) manual page for an explanation of the @type values. * @type contains the SYSLOG_ACTION_* constant from <include/linux/syslog.h> * Return 0 if permission is granted. * @settime: * Check permission to change the system time. * struct timespec64 is defined in <include/linux/time64.h> and timezone * is defined in <include/linux/time.h> * @ts contains new time * @tz contains new timezone * Return 0 if permission is granted. * @vm_enough_memory: * Check permissions for allocating a new virtual mapping. * @mm contains the mm struct it is being added to. * @pages contains the number of pages. * Return 0 if permission is granted. * * @ismaclabel: * Check if the extended attribute specified by @name * represents a MAC label. Returns 1 if name is a MAC * attribute otherwise returns 0. * @name full extended attribute name to check against * LSM as a MAC label. * * @secid_to_secctx: * Convert secid to security context. If secdata is NULL the length of * the result will be returned in seclen, but no secdata will be returned. * This does mean that the length could change between calls to check the * length and the next call which actually allocates and returns the * secdata. * @secid contains the security ID. * @secdata contains the pointer that stores the converted security * context. * @seclen pointer which contains the length of the data * @secctx_to_secid: * Convert security context to secid. * @secid contains the pointer to the generated security ID. * @secdata contains the security context. * * @release_secctx: * Release the security context. * @secdata contains the security context. * @seclen contains the length of the security context. * * Security hooks for Audit * * @audit_rule_init: * Allocate and initialize an LSM audit rule structure. * @field contains the required Audit action. * Fields flags are defined in <include/linux/audit.h> * @op contains the operator the rule uses. * @rulestr contains the context where the rule will be applied to. * @lsmrule contains a pointer to receive the result. * Return 0 if @lsmrule has been successfully set, * -EINVAL in case of an invalid rule. * * @audit_rule_known: * Specifies whether given @krule contains any fields related to * current LSM. * @krule contains the audit rule of interest. * Return 1 in case of relation found, 0 otherwise. * * @audit_rule_match: * Determine if given @secid matches a rule previously approved * by @audit_rule_known. * @secid contains the security id in question. * @field contains the field which relates to current LSM. * @op contains the operator that will be used for matching. * @lrule points to the audit rule that will be checked against. * Return 1 if secid matches the rule, 0 if it does not, -ERRNO on failure. * * @audit_rule_free: * Deallocate the LSM audit rule structure previously allocated by * audit_rule_init. * @lsmrule contains the allocated rule * * @inode_invalidate_secctx: * Notify the security module that it must revalidate the security context * of an inode. * * @inode_notifysecctx: * Notify the security module of what the security context of an inode * should be. Initializes the incore security context managed by the * security module for this inode. Example usage: NFS client invokes * this hook to initialize the security context in its incore inode to the * value provided by the server for the file when the server returned the * file's attributes to the client. * Must be called with inode->i_mutex locked. * @inode we wish to set the security context of. * @ctx contains the string which we wish to set in the inode. * @ctxlen contains the length of @ctx. * * @inode_setsecctx: * Change the security context of an inode. Updates the * incore security context managed by the security module and invokes the * fs code as needed (via __vfs_setxattr_noperm) to update any backing * xattrs that represent the context. Example usage: NFS server invokes * this hook to change the security context in its incore inode and on the * backing filesystem to a value provided by the client on a SETATTR * operation. * Must be called with inode->i_mutex locked. * @dentry contains the inode we wish to set the security context of. * @ctx contains the string which we wish to set in the inode. * @ctxlen contains the length of @ctx. * * @inode_getsecctx: * On success, returns 0 and fills out @ctx and @ctxlen with the security * context for the given @inode. * @inode we wish to get the security context of. * @ctx is a pointer in which to place the allocated security context. * @ctxlen points to the place to put the length of @ctx. * * Security hooks for using the eBPF maps and programs functionalities through * eBPF syscalls. * * @bpf: * Do a initial check for all bpf syscalls after the attribute is copied * into the kernel. The actual security module can implement their own * rules to check the specific cmd they need. * * @bpf_map: * Do a check when the kernel generate and return a file descriptor for * eBPF maps. * * @map: bpf map that we want to access * @mask: the access flags * * @bpf_prog: * Do a check when the kernel generate and return a file descriptor for * eBPF programs. * * @prog: bpf prog that userspace want to use. * * @bpf_map_alloc_security: * Initialize the security field inside bpf map. * * @bpf_map_free_security: * Clean up the security information stored inside bpf map. * * @bpf_prog_alloc_security: * Initialize the security field inside bpf program. * * @bpf_prog_free_security: * Clean up the security information stored inside bpf prog. * * @locked_down * Determine whether a kernel feature that potentially enables arbitrary * code execution in kernel space should be permitted. * * @what: kernel feature being accessed * * @lock_kernel_down * Put the kernel into lock-down mode. * * @where: Where the lock-down is originating from (e.g. command line option) * @level: The lock-down level (can only increase) */ union security_list_options { int (*binder_set_context_mgr)(const struct cred *mgr); int (*binder_transaction)(const struct cred *from, const struct cred *to); int (*binder_transfer_binder)(const struct cred *from, const struct cred *to); int (*binder_transfer_file)(const struct cred *from, const struct cred *to, struct file *file); int (*ptrace_access_check)(struct task_struct *child, unsigned int mode); int (*ptrace_traceme)(struct task_struct *parent); int (*capget)(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); int (*capset)(struct cred *new, const struct cred *old, const kernel_cap_t *effective, const kernel_cap_t *inheritable, const kernel_cap_t *permitted); int (*capable)(const struct cred *cred, struct user_namespace *ns, int cap, unsigned int opts); int (*quotactl)(int cmds, int type, int id, struct super_block *sb); int (*quota_on)(struct dentry *dentry); int (*syslog)(int type); int (*settime)(const struct timespec64 *ts, const struct timezone *tz); int (*vm_enough_memory)(struct mm_struct *mm, long pages); int (*bprm_set_creds)(struct linux_binprm *bprm); int (*bprm_check_security)(struct linux_binprm *bprm); void (*bprm_committing_creds)(struct linux_binprm *bprm); void (*bprm_committed_creds)(struct linux_binprm *bprm); int (*fs_context_dup)(struct fs_context *fc, struct fs_context *src_sc); int (*fs_context_parse_param)(struct fs_context *fc, struct fs_parameter *param); int (*sb_alloc_security)(struct super_block *sb); void (*sb_free_security)(struct super_block *sb); void (*sb_free_mnt_opts)(void *mnt_opts); int (*sb_eat_lsm_opts)(char *orig, void **mnt_opts); int (*sb_remount)(struct super_block *sb, void *mnt_opts); int (*sb_kern_mount)(struct super_block *sb); int (*sb_show_options)(struct seq_file *m, struct super_block *sb); int (*sb_statfs)(struct dentry *dentry); int (*sb_mount)(const char *dev_name, const struct path *path, const char *type, unsigned long flags, void *data); int (*sb_umount)(struct vfsmount *mnt, int flags); int (*sb_pivotroot)(const struct path *old_path, const struct path *new_path); int (*sb_set_mnt_opts)(struct super_block *sb, void *mnt_opts, unsigned long kern_flags, unsigned long *set_kern_flags); int (*sb_clone_mnt_opts)(const struct super_block *oldsb, struct super_block *newsb, unsigned long kern_flags, unsigned long *set_kern_flags); int (*sb_add_mnt_opt)(const char *option, const char *val, int len, void **mnt_opts); int (*move_mount)(const struct path *from_path, const struct path *to_path); int (*dentry_init_security)(struct dentry *dentry, int mode, const struct qstr *name, void **ctx, u32 *ctxlen); int (*dentry_create_files_as)(struct dentry *dentry, int mode, struct qstr *name, const struct cred *old, struct cred *new); #ifdef CONFIG_SECURITY_PATH int (*path_unlink)(const struct path *dir, struct dentry *dentry); int (*path_mkdir)(const struct path *dir, struct dentry *dentry, umode_t mode); int (*path_rmdir)(const struct path *dir, struct dentry *dentry); int (*path_mknod)(const struct path *dir, struct dentry *dentry, umode_t mode, unsigned int dev); int (*path_truncate)(const struct path *path); int (*path_symlink)(const struct path *dir, struct dentry *dentry, const char *old_name); int (*path_link)(struct dentry *old_dentry, const struct path *new_dir, struct dentry *new_dentry); int (*path_rename)(const struct path *old_dir, struct dentry *old_dentry, const struct path *new_dir, struct dentry *new_dentry); int (*path_chmod)(const struct path *path, umode_t mode); int (*path_chown)(const struct path *path, kuid_t uid, kgid_t gid); int (*path_chroot)(const struct path *path); #endif /* Needed for inode based security check */ int (*path_notify)(const struct path *path, u64 mask, unsigned int obj_type); int (*inode_alloc_security)(struct inode *inode); void (*inode_free_security)(struct inode *inode); int (*inode_init_security)(struct inode *inode, struct inode *dir, const struct qstr *qstr, const char **name, void **value, size_t *len); int (*inode_create)(struct inode *dir, struct dentry *dentry, umode_t mode); int (*inode_link)(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry); int (*inode_unlink)(struct inode *dir, struct dentry *dentry); int (*inode_symlink)(struct inode *dir, struct dentry *dentry, const char *old_name); int (*inode_mkdir)(struct inode *dir, struct dentry *dentry, umode_t mode); int (*inode_rmdir)(struct inode *dir, struct dentry *dentry); int (*inode_mknod)(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev); int (*inode_rename)(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry); int (*inode_readlink)(struct dentry *dentry); int (*inode_follow_link)(struct dentry *dentry, struct inode *inode, bool rcu); int (*inode_permission)(struct inode *inode, int mask); int (*inode_setattr)(struct dentry *dentry, struct iattr *attr); int (*inode_getattr)(const struct path *path); int (*inode_setxattr)(struct dentry *dentry, const char *name, const void *value, size_t size, int flags); void (*inode_post_setxattr)(struct dentry *dentry, const char *name, const void *value, size_t size, int flags); int (*inode_getxattr)(struct dentry *dentry, const char *name); int (*inode_listxattr)(struct dentry *dentry); int (*inode_removexattr)(struct dentry *dentry, const char *name); int (*inode_need_killpriv)(struct dentry *dentry); int (*inode_killpriv)(struct dentry *dentry); int (*inode_getsecurity)(struct inode *inode, const char *name, void **buffer, bool alloc); int (*inode_setsecurity)(struct inode *inode, const char *name, const void *value, size_t size, int flags); int (*inode_listsecurity)(struct inode *inode, char *buffer, size_t buffer_size); void (*inode_getsecid)(struct inode *inode, u32 *secid); int (*inode_copy_up)(struct dentry *src, struct cred **new); int (*inode_copy_up_xattr)(const char *name); int (*kernfs_init_security)(struct kernfs_node *kn_dir, struct kernfs_node *kn); int (*file_permission)(struct file *file, int mask); int (*file_alloc_security)(struct file *file); void (*file_free_security)(struct file *file); int (*file_ioctl)(struct file *file, unsigned int cmd, unsigned long arg); int (*file_ioctl_compat)(struct file *file, unsigned int cmd, unsigned long arg); int (*mmap_addr)(unsigned long addr); int (*mmap_file)(struct file *file, unsigned long reqprot, unsigned long prot, unsigned long flags); int (*file_mprotect)(struct vm_area_struct *vma, unsigned long reqprot, unsigned long prot); int (*file_lock)(struct file *file, unsigned int cmd); int (*file_fcntl)(struct file *file, unsigned int cmd, unsigned long arg); void (*file_set_fowner)(struct file *file); int (*file_send_sigiotask)(struct task_struct *tsk, struct fown_struct *fown, int sig); int (*file_receive)(struct file *file); int (*file_open)(struct file *file); int (*task_alloc)(struct task_struct *task, unsigned long clone_flags); void (*task_free)(struct task_struct *task); int (*cred_alloc_blank)(struct cred *cred, gfp_t gfp); void (*cred_free)(struct cred *cred); int (*cred_prepare)(struct cred *new, const struct cred *old, gfp_t gfp); void (*cred_transfer)(struct cred *new, const struct cred *old); void (*cred_getsecid)(const struct cred *c, u32 *secid); int (*kernel_act_as)(struct cred *new, u32 secid); int (*kernel_create_files_as)(struct cred *new, struct inode *inode); int (*kernel_module_request)(char *kmod_name); int (*kernel_load_data)(enum kernel_load_data_id id); int (*kernel_read_file)(struct file *file, enum kernel_read_file_id id); int (*kernel_post_read_file)(struct file *file, char *buf, loff_t size, enum kernel_read_file_id id); int (*task_fix_setuid)(struct cred *new, const struct cred *old, int flags); int (*task_setpgid)(struct task_struct *p, pid_t pgid); int (*task_getpgid)(struct task_struct *p); int (*task_getsid)(struct task_struct *p); void (*task_getsecid)(struct task_struct *p, u32 *secid); int (*task_setnice)(struct task_struct *p, int nice); int (*task_setioprio)(struct task_struct *p, int ioprio); int (*task_getioprio)(struct task_struct *p); int (*task_prlimit)(const struct cred *cred, const struct cred *tcred, unsigned int flags); int (*task_setrlimit)(struct task_struct *p, unsigned int resource, struct rlimit *new_rlim); int (*task_setscheduler)(struct task_struct *p); int (*task_getscheduler)(struct task_struct *p); int (*task_movememory)(struct task_struct *p); int (*task_kill)(struct task_struct *p, struct kernel_siginfo *info, int sig, const struct cred *cred); int (*task_prctl)(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5); void (*task_to_inode)(struct task_struct *p, struct inode *inode); int (*ipc_permission)(struct kern_ipc_perm *ipcp, short flag); void (*ipc_getsecid)(struct kern_ipc_perm *ipcp, u32 *secid); int (*msg_msg_alloc_security)(struct msg_msg *msg); void (*msg_msg_free_security)(struct msg_msg *msg); int (*msg_queue_alloc_security)(struct kern_ipc_perm *perm); void (*msg_queue_free_security)(struct kern_ipc_perm *perm); int (*msg_queue_associate)(struct kern_ipc_perm *perm, int msqflg); int (*msg_queue_msgctl)(struct kern_ipc_perm *perm, int cmd); int (*msg_queue_msgsnd)(struct kern_ipc_perm *perm, struct msg_msg *msg, int msqflg); int (*msg_queue_msgrcv)(struct kern_ipc_perm *perm, struct msg_msg *msg, struct task_struct *target, long type, int mode); int (*shm_alloc_security)(struct kern_ipc_perm *perm); void (*shm_free_security)(struct kern_ipc_perm *perm); int (*shm_associate)(struct kern_ipc_perm *perm, int shmflg); int (*shm_shmctl)(struct kern_ipc_perm *perm, int cmd); int (*shm_shmat)(struct kern_ipc_perm *perm, char __user *shmaddr, int shmflg); int (*sem_alloc_security)(struct kern_ipc_perm *perm); void (*sem_free_security)(struct kern_ipc_perm *perm); int (*sem_associate)(struct kern_ipc_perm *perm, int semflg); int (*sem_semctl)(struct kern_ipc_perm *perm, int cmd); int (*sem_semop)(struct kern_ipc_perm *perm, struct sembuf *sops, unsigned nsops, int alter); int (*netlink_send)(struct sock *sk, struct sk_buff *skb); void (*d_instantiate)(struct dentry *dentry, struct inode *inode); int (*getprocattr)(struct task_struct *p, char *name, char **value); int (*setprocattr)(const char *name, void *value, size_t size); int (*ismaclabel)(const char *name); int (*secid_to_secctx)(u32 secid, char **secdata, u32 *seclen); int (*secctx_to_secid)(const char *secdata, u32 seclen, u32 *secid); void (*release_secctx)(char *secdata, u32 seclen); void (*inode_invalidate_secctx)(struct inode *inode); int (*inode_notifysecctx)(struct inode *inode, void *ctx, u32 ctxlen); int (*inode_setsecctx)(struct dentry *dentry, void *ctx, u32 ctxlen); int (*inode_getsecctx)(struct inode *inode, void **ctx, u32 *ctxlen); #ifdef CONFIG_SECURITY_NETWORK int (*unix_stream_connect)(struct sock *sock, struct sock *other, struct sock *newsk); int (*unix_may_send)(struct socket *sock, struct socket *other); int (*socket_create)(int family, int type, int protocol, int kern); int (*socket_post_create)(struct socket *sock, int family, int type, int protocol, int kern); int (*socket_socketpair)(struct socket *socka, struct socket *sockb); int (*socket_bind)(struct socket *sock, struct sockaddr *address, int addrlen); int (*socket_connect)(struct socket *sock, struct sockaddr *address, int addrlen); int (*socket_listen)(struct socket *sock, int backlog); int (*socket_accept)(struct socket *sock, struct socket *newsock); int (*socket_sendmsg)(struct socket *sock, struct msghdr *msg, int size); int (*socket_recvmsg)(struct socket *sock, struct msghdr *msg, int size, int flags); int (*socket_getsockname)(struct socket *sock); int (*socket_getpeername)(struct socket *sock); int (*socket_getsockopt)(struct socket *sock, int level, int optname); int (*socket_setsockopt)(struct socket *sock, int level, int optname); int (*socket_shutdown)(struct socket *sock, int how); int (*socket_sock_rcv_skb)(struct sock *sk, struct sk_buff *skb); int (*socket_getpeersec_stream)(struct socket *sock, char __user *optval, int __user *optlen, unsigned len); int (*socket_getpeersec_dgram)(struct socket *sock, struct sk_buff *skb, u32 *secid); int (*sk_alloc_security)(struct sock *sk, int family, gfp_t priority); void (*sk_free_security)(struct sock *sk); void (*sk_clone_security)(const struct sock *sk, struct sock *newsk); void (*sk_getsecid)(struct sock *sk, u32 *secid); void (*sock_graft)(struct sock *sk, struct socket *parent); int (*inet_conn_request)(struct sock *sk, struct sk_buff *skb, struct request_sock *req); void (*inet_csk_clone)(struct sock *newsk, const struct request_sock *req); void (*inet_conn_established)(struct sock *sk, struct sk_buff *skb); int (*secmark_relabel_packet)(u32 secid); void (*secmark_refcount_inc)(void); void (*secmark_refcount_dec)(void); void (*req_classify_flow)(const struct request_sock *req, struct flowi *fl); int (*tun_dev_alloc_security)(void **security); void (*tun_dev_free_security)(void *security); int (*tun_dev_create)(void); int (*tun_dev_attach_queue)(void *security); int (*tun_dev_attach)(struct sock *sk, void *security); int (*tun_dev_open)(void *security); int (*sctp_assoc_request)(struct sctp_endpoint *ep, struct sk_buff *skb); int (*sctp_bind_connect)(struct sock *sk, int optname, struct sockaddr *address, int addrlen); void (*sctp_sk_clone)(struct sctp_endpoint *ep, struct sock *sk, struct sock *newsk); #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_INFINIBAND int (*ib_pkey_access)(void *sec, u64 subnet_prefix, u16 pkey); int (*ib_endport_manage_subnet)(void *sec, const char *dev_name, u8 port_num); int (*ib_alloc_security)(void **sec); void (*ib_free_security)(void *sec); #endif /* CONFIG_SECURITY_INFINIBAND */ #ifdef CONFIG_SECURITY_NETWORK_XFRM int (*xfrm_policy_alloc_security)(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp); int (*xfrm_policy_clone_security)(struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctx); void (*xfrm_policy_free_security)(struct xfrm_sec_ctx *ctx); int (*xfrm_policy_delete_security)(struct xfrm_sec_ctx *ctx); int (*xfrm_state_alloc)(struct xfrm_state *x, struct xfrm_user_sec_ctx *sec_ctx); int (*xfrm_state_alloc_acquire)(struct xfrm_state *x, struct xfrm_sec_ctx *polsec, u32 secid); void (*xfrm_state_free_security)(struct xfrm_state *x); int (*xfrm_state_delete_security)(struct xfrm_state *x); int (*xfrm_policy_lookup)(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir); int (*xfrm_state_pol_flow_match)(struct xfrm_state *x, struct xfrm_policy *xp, const struct flowi *fl); int (*xfrm_decode_session)(struct sk_buff *skb, u32 *secid, int ckall); #endif /* CONFIG_SECURITY_NETWORK_XFRM */ /* key management security hooks */ #ifdef CONFIG_KEYS int (*key_alloc)(struct key *key, const struct cred *cred, unsigned long flags); void (*key_free)(struct key *key); int (*key_permission)(key_ref_t key_ref, const struct cred *cred, unsigned perm); int (*key_getsecurity)(struct key *key, char **_buffer); #endif /* CONFIG_KEYS */ #ifdef CONFIG_AUDIT int (*audit_rule_init)(u32 field, u32 op, char *rulestr, void **lsmrule); int (*audit_rule_known)(struct audit_krule *krule); int (*audit_rule_match)(u32 secid, u32 field, u32 op, void *lsmrule); void (*audit_rule_free)(void *lsmrule); #endif /* CONFIG_AUDIT */ #ifdef CONFIG_BPF_SYSCALL int (*bpf)(int cmd, union bpf_attr *attr, unsigned int size); int (*bpf_map)(struct bpf_map *map, fmode_t fmode); int (*bpf_prog)(struct bpf_prog *prog); int (*bpf_map_alloc_security)(struct bpf_map *map); void (*bpf_map_free_security)(struct bpf_map *map); int (*bpf_prog_alloc_security)(struct bpf_prog_aux *aux); void (*bpf_prog_free_security)(struct bpf_prog_aux *aux); #endif /* CONFIG_BPF_SYSCALL */ int (*locked_down)(enum lockdown_reason what); int (*lock_kernel_down)(const char *where, enum lockdown_reason level); }; struct security_hook_heads { struct hlist_head binder_set_context_mgr; struct hlist_head binder_transaction; struct hlist_head binder_transfer_binder; struct hlist_head binder_transfer_file; struct hlist_head ptrace_access_check; struct hlist_head ptrace_traceme; struct hlist_head capget; struct hlist_head capset; struct hlist_head capable; struct hlist_head quotactl; struct hlist_head quota_on; struct hlist_head syslog; struct hlist_head settime; struct hlist_head vm_enough_memory; struct hlist_head bprm_set_creds; struct hlist_head bprm_check_security; struct hlist_head bprm_committing_creds; struct hlist_head bprm_committed_creds; struct hlist_head fs_context_dup; struct hlist_head fs_context_parse_param; struct hlist_head sb_alloc_security; struct hlist_head sb_free_security; struct hlist_head sb_free_mnt_opts; struct hlist_head sb_eat_lsm_opts; struct hlist_head sb_remount; struct hlist_head sb_kern_mount; struct hlist_head sb_show_options; struct hlist_head sb_statfs; struct hlist_head sb_mount; struct hlist_head sb_umount; struct hlist_head sb_pivotroot; struct hlist_head sb_set_mnt_opts; struct hlist_head sb_clone_mnt_opts; struct hlist_head sb_add_mnt_opt; struct hlist_head move_mount; struct hlist_head dentry_init_security; struct hlist_head dentry_create_files_as; #ifdef CONFIG_SECURITY_PATH struct hlist_head path_unlink; struct hlist_head path_mkdir; struct hlist_head path_rmdir; struct hlist_head path_mknod; struct hlist_head path_truncate; struct hlist_head path_symlink; struct hlist_head path_link; struct hlist_head path_rename; struct hlist_head path_chmod; struct hlist_head path_chown; struct hlist_head path_chroot; #endif /* Needed for inode based modules as well */ struct hlist_head path_notify; struct hlist_head inode_alloc_security; struct hlist_head inode_free_security; struct hlist_head inode_init_security; struct hlist_head inode_create; struct hlist_head inode_link; struct hlist_head inode_unlink; struct hlist_head inode_symlink; struct hlist_head inode_mkdir; struct hlist_head inode_rmdir; struct hlist_head inode_mknod; struct hlist_head inode_rename; struct hlist_head inode_readlink; struct hlist_head inode_follow_link; struct hlist_head inode_permission; struct hlist_head inode_setattr; struct hlist_head inode_getattr; struct hlist_head inode_setxattr; struct hlist_head inode_post_setxattr; struct hlist_head inode_getxattr; struct hlist_head inode_listxattr; struct hlist_head inode_removexattr; struct hlist_head inode_need_killpriv; struct hlist_head inode_killpriv; struct hlist_head inode_getsecurity; struct hlist_head inode_setsecurity; struct hlist_head inode_listsecurity; struct hlist_head inode_getsecid; struct hlist_head inode_copy_up; struct hlist_head inode_copy_up_xattr; struct hlist_head kernfs_init_security; struct hlist_head file_permission; struct hlist_head file_alloc_security; struct hlist_head file_free_security; struct hlist_head file_ioctl; struct hlist_head file_ioctl_compat; struct hlist_head mmap_addr; struct hlist_head mmap_file; struct hlist_head file_mprotect; struct hlist_head file_lock; struct hlist_head file_fcntl; struct hlist_head file_set_fowner; struct hlist_head file_send_sigiotask; struct hlist_head file_receive; struct hlist_head file_open; struct hlist_head task_alloc; struct hlist_head task_free; struct hlist_head cred_alloc_blank; struct hlist_head cred_free; struct hlist_head cred_prepare; struct hlist_head cred_transfer; struct hlist_head cred_getsecid; struct hlist_head kernel_act_as; struct hlist_head kernel_create_files_as; struct hlist_head kernel_load_data; struct hlist_head kernel_read_file; struct hlist_head kernel_post_read_file; struct hlist_head kernel_module_request; struct hlist_head task_fix_setuid; struct hlist_head task_setpgid; struct hlist_head task_getpgid; struct hlist_head task_getsid; struct hlist_head task_getsecid; struct hlist_head task_setnice; struct hlist_head task_setioprio; struct hlist_head task_getioprio; struct hlist_head task_prlimit; struct hlist_head task_setrlimit; struct hlist_head task_setscheduler; struct hlist_head task_getscheduler; struct hlist_head task_movememory; struct hlist_head task_kill; struct hlist_head task_prctl; struct hlist_head task_to_inode; struct hlist_head ipc_permission; struct hlist_head ipc_getsecid; struct hlist_head msg_msg_alloc_security; struct hlist_head msg_msg_free_security; struct hlist_head msg_queue_alloc_security; struct hlist_head msg_queue_free_security; struct hlist_head msg_queue_associate; struct hlist_head msg_queue_msgctl; struct hlist_head msg_queue_msgsnd; struct hlist_head msg_queue_msgrcv; struct hlist_head shm_alloc_security; struct hlist_head shm_free_security; struct hlist_head shm_associate; struct hlist_head shm_shmctl; struct hlist_head shm_shmat; struct hlist_head sem_alloc_security; struct hlist_head sem_free_security; struct hlist_head sem_associate; struct hlist_head sem_semctl; struct hlist_head sem_semop; struct hlist_head netlink_send; struct hlist_head d_instantiate; struct hlist_head getprocattr; struct hlist_head setprocattr; struct hlist_head ismaclabel; struct hlist_head secid_to_secctx; struct hlist_head secctx_to_secid; struct hlist_head release_secctx; struct hlist_head inode_invalidate_secctx; struct hlist_head inode_notifysecctx; struct hlist_head inode_setsecctx; struct hlist_head inode_getsecctx; #ifdef CONFIG_SECURITY_NETWORK struct hlist_head unix_stream_connect; struct hlist_head unix_may_send; struct hlist_head socket_create; struct hlist_head socket_post_create; struct hlist_head socket_socketpair; struct hlist_head socket_bind; struct hlist_head socket_connect; struct hlist_head socket_listen; struct hlist_head socket_accept; struct hlist_head socket_sendmsg; struct hlist_head socket_recvmsg; struct hlist_head socket_getsockname; struct hlist_head socket_getpeername; struct hlist_head socket_getsockopt; struct hlist_head socket_setsockopt; struct hlist_head socket_shutdown; struct hlist_head socket_sock_rcv_skb; struct hlist_head socket_getpeersec_stream; struct hlist_head socket_getpeersec_dgram; struct hlist_head sk_alloc_security; struct hlist_head sk_free_security; struct hlist_head sk_clone_security; struct hlist_head sk_getsecid; struct hlist_head sock_graft; struct hlist_head inet_conn_request; struct hlist_head inet_csk_clone; struct hlist_head inet_conn_established; struct hlist_head secmark_relabel_packet; struct hlist_head secmark_refcount_inc; struct hlist_head secmark_refcount_dec; struct hlist_head req_classify_flow; struct hlist_head tun_dev_alloc_security; struct hlist_head tun_dev_free_security; struct hlist_head tun_dev_create; struct hlist_head tun_dev_attach_queue; struct hlist_head tun_dev_attach; struct hlist_head tun_dev_open; struct hlist_head sctp_assoc_request; struct hlist_head sctp_bind_connect; struct hlist_head sctp_sk_clone; #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_INFINIBAND struct hlist_head ib_pkey_access; struct hlist_head ib_endport_manage_subnet; struct hlist_head ib_alloc_security; struct hlist_head ib_free_security; #endif /* CONFIG_SECURITY_INFINIBAND */ #ifdef CONFIG_SECURITY_NETWORK_XFRM struct hlist_head xfrm_policy_alloc_security; struct hlist_head xfrm_policy_clone_security; struct hlist_head xfrm_policy_free_security; struct hlist_head xfrm_policy_delete_security; struct hlist_head xfrm_state_alloc; struct hlist_head xfrm_state_alloc_acquire; struct hlist_head xfrm_state_free_security; struct hlist_head xfrm_state_delete_security; struct hlist_head xfrm_policy_lookup; struct hlist_head xfrm_state_pol_flow_match; struct hlist_head xfrm_decode_session; #endif /* CONFIG_SECURITY_NETWORK_XFRM */ #ifdef CONFIG_KEYS struct hlist_head key_alloc; struct hlist_head key_free; struct hlist_head key_permission; struct hlist_head key_getsecurity; #endif /* CONFIG_KEYS */ #ifdef CONFIG_AUDIT struct hlist_head audit_rule_init; struct hlist_head audit_rule_known; struct hlist_head audit_rule_match; struct hlist_head audit_rule_free; #endif /* CONFIG_AUDIT */ #ifdef CONFIG_BPF_SYSCALL struct hlist_head bpf; struct hlist_head bpf_map; struct hlist_head bpf_prog; struct hlist_head bpf_map_alloc_security; struct hlist_head bpf_map_free_security; struct hlist_head bpf_prog_alloc_security; struct hlist_head bpf_prog_free_security; #endif /* CONFIG_BPF_SYSCALL */ struct hlist_head locked_down; struct hlist_head lock_kernel_down; } __randomize_layout; /* * Security module hook list structure. * For use with generic list macros for common operations. */ struct security_hook_list { struct hlist_node list; struct hlist_head *head; union security_list_options hook; char *lsm; } __randomize_layout; /* * The set of hooks that may be selected for a specific module. */ struct lsm_one_hooks { char *lsm; union security_list_options secid_to_secctx; union security_list_options secctx_to_secid; union security_list_options socket_getpeersec_stream; }; /* * Security blob size or offset data. */ struct lsm_blob_sizes { int lbs_cred; int lbs_file; int lbs_inode; int lbs_sock; int lbs_ipc; int lbs_msg_msg; int lbs_task; }; /* * Initializing a security_hook_list structure takes * up a lot of space in a source file. This macro takes * care of the common case and reduces the amount of * text involved. */ #define LSM_HOOK_INIT(HEAD, HOOK) \ { .head = &security_hook_heads.HEAD, .hook = { .HEAD = HOOK } } extern struct security_hook_heads security_hook_heads; extern char *lsm_names; extern void security_add_hooks(struct security_hook_list *hooks, int count, char *lsm); #define LSM_FLAG_LEGACY_MAJOR BIT(0) #define LSM_FLAG_EXCLUSIVE BIT(1) enum lsm_order { LSM_ORDER_FIRST = -1, /* This is only for capabilities. */ LSM_ORDER_MUTABLE = 0, }; struct lsm_info { const char *name; /* Required. */ enum lsm_order order; /* Optional: default is LSM_ORDER_MUTABLE */ unsigned long flags; /* Optional: flags describing LSM */ int *enabled; /* Optional: controlled by CONFIG_LSM */ int (*init)(void); /* Required. */ struct lsm_blob_sizes *blobs; /* Optional: for blob sharing. */ }; extern struct lsm_info __start_lsm_info[], __end_lsm_info[]; extern struct lsm_info __start_early_lsm_info[], __end_early_lsm_info[]; #define DEFINE_LSM(lsm) \ static struct lsm_info __lsm_##lsm \ __used __section(.lsm_info.init) \ __aligned(sizeof(unsigned long)) #define DEFINE_EARLY_LSM(lsm) \ static struct lsm_info __early_lsm_##lsm \ __used __section(.early_lsm_info.init) \ __aligned(sizeof(unsigned long)) #ifdef CONFIG_SECURITY_SELINUX_DISABLE /* * Assuring the safety of deleting a security module is up to * the security module involved. This may entail ordering the * module's hook list in a particular way, refusing to disable * the module once a policy is loaded or any number of other * actions better imagined than described. * * The name of the configuration option reflects the only module * that currently uses the mechanism. Any developer who thinks * disabling their module is a good idea needs to be at least as * careful as the SELinux team. */ static inline void security_delete_hooks(struct security_hook_list *hooks, int count) { int i; for (i = 0; i < count; i++) hlist_del_rcu(&hooks[i].list); } #endif /* CONFIG_SECURITY_SELINUX_DISABLE */ /* Currently required to handle SELinux runtime hook disable. */ #ifdef CONFIG_SECURITY_WRITABLE_HOOKS #define __lsm_ro_after_init #else #define __lsm_ro_after_init __ro_after_init #endif /* CONFIG_SECURITY_WRITABLE_HOOKS */ extern int lsm_inode_alloc(struct inode *inode); #endif /* ! __LINUX_LSM_HOOKS_H */ pda_power.h 0000644 00000001755 14722070374 0006713 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Common power driver for PDAs and phones with one or two external * power supplies (AC/USB) connected to main and backup batteries, * and optional builtin charger. * * Copyright © 2007 Anton Vorontsov <cbou@mail.ru> */ #ifndef __PDA_POWER_H__ #define __PDA_POWER_H__ #define PDA_POWER_CHARGE_AC (1 << 0) #define PDA_POWER_CHARGE_USB (1 << 1) struct device; struct pda_power_pdata { int (*init)(struct device *dev); int (*is_ac_online)(void); int (*is_usb_online)(void); void (*set_charge)(int flags); void (*exit)(struct device *dev); int (*suspend)(pm_message_t state); int (*resume)(void); char **supplied_to; size_t num_supplicants; unsigned int wait_for_status; /* msecs, default is 500 */ unsigned int wait_for_charger; /* msecs, default is 500 */ unsigned int polling_interval; /* msecs, default is 2000 */ unsigned long ac_max_uA; /* current to draw when on AC */ bool use_otg_notifier; }; #endif /* __PDA_POWER_H__ */ regset.h 0000644 00000035651 14722070374 0006226 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * User-mode machine state access * * Copyright (C) 2007 Red Hat, Inc. All rights reserved. * * Red Hat Author: Roland McGrath. */ #ifndef _LINUX_REGSET_H #define _LINUX_REGSET_H 1 #include <linux/compiler.h> #include <linux/types.h> #include <linux/bug.h> #include <linux/uaccess.h> struct task_struct; struct user_regset; /** * user_regset_active_fn - type of @active function in &struct user_regset * @target: thread being examined * @regset: regset being examined * * Return -%ENODEV if not available on the hardware found. * Return %0 if no interesting state in this thread. * Return >%0 number of @size units of interesting state. * Any get call fetching state beyond that number will * see the default initialization state for this data, * so a caller that knows what the default state is need * not copy it all out. * This call is optional; the pointer is %NULL if there * is no inexpensive check to yield a value < @n. */ typedef int user_regset_active_fn(struct task_struct *target, const struct user_regset *regset); /** * user_regset_get_fn - type of @get function in &struct user_regset * @target: thread being examined * @regset: regset being examined * @pos: offset into the regset data to access, in bytes * @count: amount of data to copy, in bytes * @kbuf: if not %NULL, a kernel-space pointer to copy into * @ubuf: if @kbuf is %NULL, a user-space pointer to copy into * * Fetch register values. Return %0 on success; -%EIO or -%ENODEV * are usual failure returns. The @pos and @count values are in * bytes, but must be properly aligned. If @kbuf is non-null, that * buffer is used and @ubuf is ignored. If @kbuf is %NULL, then * ubuf gives a userland pointer to access directly, and an -%EFAULT * return value is possible. */ typedef int user_regset_get_fn(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf); /** * user_regset_set_fn - type of @set function in &struct user_regset * @target: thread being examined * @regset: regset being examined * @pos: offset into the regset data to access, in bytes * @count: amount of data to copy, in bytes * @kbuf: if not %NULL, a kernel-space pointer to copy from * @ubuf: if @kbuf is %NULL, a user-space pointer to copy from * * Store register values. Return %0 on success; -%EIO or -%ENODEV * are usual failure returns. The @pos and @count values are in * bytes, but must be properly aligned. If @kbuf is non-null, that * buffer is used and @ubuf is ignored. If @kbuf is %NULL, then * ubuf gives a userland pointer to access directly, and an -%EFAULT * return value is possible. */ typedef int user_regset_set_fn(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf); /** * user_regset_writeback_fn - type of @writeback function in &struct user_regset * @target: thread being examined * @regset: regset being examined * @immediate: zero if writeback at completion of next context switch is OK * * This call is optional; usually the pointer is %NULL. When * provided, there is some user memory associated with this regset's * hardware, such as memory backing cached register data on register * window machines; the regset's data controls what user memory is * used (e.g. via the stack pointer value). * * Write register data back to user memory. If the @immediate flag * is nonzero, it must be written to the user memory so uaccess or * access_process_vm() can see it when this call returns; if zero, * then it must be written back by the time the task completes a * context switch (as synchronized with wait_task_inactive()). * Return %0 on success or if there was nothing to do, -%EFAULT for * a memory problem (bad stack pointer or whatever), or -%EIO for a * hardware problem. */ typedef int user_regset_writeback_fn(struct task_struct *target, const struct user_regset *regset, int immediate); /** * user_regset_get_size_fn - type of @get_size function in &struct user_regset * @target: thread being examined * @regset: regset being examined * * This call is optional; usually the pointer is %NULL. * * When provided, this function must return the current size of regset * data, as observed by the @get function in &struct user_regset. The * value returned must be a multiple of @size. The returned size is * required to be valid only until the next time (if any) @regset is * modified for @target. * * This function is intended for dynamically sized regsets. A regset * that is statically sized does not need to implement it. * * This function should not be called directly: instead, callers should * call regset_size() to determine the current size of a regset. */ typedef unsigned int user_regset_get_size_fn(struct task_struct *target, const struct user_regset *regset); /** * struct user_regset - accessible thread CPU state * @n: Number of slots (registers). * @size: Size in bytes of a slot (register). * @align: Required alignment, in bytes. * @bias: Bias from natural indexing. * @core_note_type: ELF note @n_type value used in core dumps. * @get: Function to fetch values. * @set: Function to store values. * @active: Function to report if regset is active, or %NULL. * @writeback: Function to write data back to user memory, or %NULL. * @get_size: Function to return the regset's size, or %NULL. * * This data structure describes a machine resource we call a register set. * This is part of the state of an individual thread, not necessarily * actual CPU registers per se. A register set consists of a number of * similar slots, given by @n. Each slot is @size bytes, and aligned to * @align bytes (which is at least @size). For dynamically-sized * regsets, @n must contain the maximum possible number of slots for the * regset, and @get_size must point to a function that returns the * current regset size. * * Callers that need to know only the current size of the regset and do * not care about its internal structure should call regset_size() * instead of inspecting @n or calling @get_size. * * For backward compatibility, the @get and @set methods must pad to, or * accept, @n * @size bytes, even if the current regset size is smaller. * The precise semantics of these operations depend on the regset being * accessed. * * The functions to which &struct user_regset members point must be * called only on the current thread or on a thread that is in * %TASK_STOPPED or %TASK_TRACED state, that we are guaranteed will not * be woken up and return to user mode, and that we have called * wait_task_inactive() on. (The target thread always might wake up for * SIGKILL while these functions are working, in which case that * thread's user_regset state might be scrambled.) * * The @pos argument must be aligned according to @align; the @count * argument must be a multiple of @size. These functions are not * responsible for checking for invalid arguments. * * When there is a natural value to use as an index, @bias gives the * difference between the natural index and the slot index for the * register set. For example, x86 GDT segment descriptors form a regset; * the segment selector produces a natural index, but only a subset of * that index space is available as a regset (the TLS slots); subtracting * @bias from a segment selector index value computes the regset slot. * * If nonzero, @core_note_type gives the n_type field (NT_* value) * of the core file note in which this regset's data appears. * NT_PRSTATUS is a special case in that the regset data starts at * offsetof(struct elf_prstatus, pr_reg) into the note data; that is * part of the per-machine ELF formats userland knows about. In * other cases, the core file note contains exactly the whole regset * (@n * @size) and nothing else. The core file note is normally * omitted when there is an @active function and it returns zero. */ struct user_regset { user_regset_get_fn *get; user_regset_set_fn *set; user_regset_active_fn *active; user_regset_writeback_fn *writeback; user_regset_get_size_fn *get_size; unsigned int n; unsigned int size; unsigned int align; unsigned int bias; unsigned int core_note_type; }; /** * struct user_regset_view - available regsets * @name: Identifier, e.g. UTS_MACHINE string. * @regsets: Array of @n regsets available in this view. * @n: Number of elements in @regsets. * @e_machine: ELF header @e_machine %EM_* value written in core dumps. * @e_flags: ELF header @e_flags value written in core dumps. * @ei_osabi: ELF header @e_ident[%EI_OSABI] value written in core dumps. * * A regset view is a collection of regsets (&struct user_regset, * above). This describes all the state of a thread that can be seen * from a given architecture/ABI environment. More than one view might * refer to the same &struct user_regset, or more than one regset * might refer to the same machine-specific state in the thread. For * example, a 32-bit thread's state could be examined from the 32-bit * view or from the 64-bit view. Either method reaches the same thread * register state, doing appropriate widening or truncation. */ struct user_regset_view { const char *name; const struct user_regset *regsets; unsigned int n; u32 e_flags; u16 e_machine; u8 ei_osabi; }; /* * This is documented here rather than at the definition sites because its * implementation is machine-dependent but its interface is universal. */ /** * task_user_regset_view - Return the process's native regset view. * @tsk: a thread of the process in question * * Return the &struct user_regset_view that is native for the given process. * For example, what it would access when it called ptrace(). * Throughout the life of the process, this only changes at exec. */ const struct user_regset_view *task_user_regset_view(struct task_struct *tsk); /* * These are helpers for writing regset get/set functions in arch code. * Because @start_pos and @end_pos are always compile-time constants, * these are inlined into very little code though they look large. * * Use one or more calls sequentially for each chunk of regset data stored * contiguously in memory. Call with constants for @start_pos and @end_pos, * giving the range of byte positions in the regset that data corresponds * to; @end_pos can be -1 if this chunk is at the end of the regset layout. * Each call updates the arguments to point past its chunk. */ static inline int user_regset_copyout(unsigned int *pos, unsigned int *count, void **kbuf, void __user **ubuf, const void *data, const int start_pos, const int end_pos) { if (*count == 0) return 0; BUG_ON(*pos < start_pos); if (end_pos < 0 || *pos < end_pos) { unsigned int copy = (end_pos < 0 ? *count : min(*count, end_pos - *pos)); data += *pos - start_pos; if (*kbuf) { memcpy(*kbuf, data, copy); *kbuf += copy; } else if (__copy_to_user(*ubuf, data, copy)) return -EFAULT; else *ubuf += copy; *pos += copy; *count -= copy; } return 0; } static inline int user_regset_copyin(unsigned int *pos, unsigned int *count, const void **kbuf, const void __user **ubuf, void *data, const int start_pos, const int end_pos) { if (*count == 0) return 0; BUG_ON(*pos < start_pos); if (end_pos < 0 || *pos < end_pos) { unsigned int copy = (end_pos < 0 ? *count : min(*count, end_pos - *pos)); data += *pos - start_pos; if (*kbuf) { memcpy(data, *kbuf, copy); *kbuf += copy; } else if (__copy_from_user(data, *ubuf, copy)) return -EFAULT; else *ubuf += copy; *pos += copy; *count -= copy; } return 0; } /* * These two parallel the two above, but for portions of a regset layout * that always read as all-zero or for which writes are ignored. */ static inline int user_regset_copyout_zero(unsigned int *pos, unsigned int *count, void **kbuf, void __user **ubuf, const int start_pos, const int end_pos) { if (*count == 0) return 0; BUG_ON(*pos < start_pos); if (end_pos < 0 || *pos < end_pos) { unsigned int copy = (end_pos < 0 ? *count : min(*count, end_pos - *pos)); if (*kbuf) { memset(*kbuf, 0, copy); *kbuf += copy; } else if (__clear_user(*ubuf, copy)) return -EFAULT; else *ubuf += copy; *pos += copy; *count -= copy; } return 0; } static inline int user_regset_copyin_ignore(unsigned int *pos, unsigned int *count, const void **kbuf, const void __user **ubuf, const int start_pos, const int end_pos) { if (*count == 0) return 0; BUG_ON(*pos < start_pos); if (end_pos < 0 || *pos < end_pos) { unsigned int copy = (end_pos < 0 ? *count : min(*count, end_pos - *pos)); if (*kbuf) *kbuf += copy; else *ubuf += copy; *pos += copy; *count -= copy; } return 0; } /** * copy_regset_to_user - fetch a thread's user_regset data into user memory * @target: thread to be examined * @view: &struct user_regset_view describing user thread machine state * @setno: index in @view->regsets * @offset: offset into the regset data, in bytes * @size: amount of data to copy, in bytes * @data: user-mode pointer to copy into */ static inline int copy_regset_to_user(struct task_struct *target, const struct user_regset_view *view, unsigned int setno, unsigned int offset, unsigned int size, void __user *data) { const struct user_regset *regset = &view->regsets[setno]; if (!regset->get) return -EOPNOTSUPP; if (!access_ok(data, size)) return -EFAULT; return regset->get(target, regset, offset, size, NULL, data); } /** * copy_regset_from_user - store into thread's user_regset data from user memory * @target: thread to be examined * @view: &struct user_regset_view describing user thread machine state * @setno: index in @view->regsets * @offset: offset into the regset data, in bytes * @size: amount of data to copy, in bytes * @data: user-mode pointer to copy from */ static inline int copy_regset_from_user(struct task_struct *target, const struct user_regset_view *view, unsigned int setno, unsigned int offset, unsigned int size, const void __user *data) { const struct user_regset *regset = &view->regsets[setno]; if (!regset->set) return -EOPNOTSUPP; if (!access_ok(data, size)) return -EFAULT; return regset->set(target, regset, offset, size, NULL, data); } /** * regset_size - determine the current size of a regset * @target: thread to be examined * @regset: regset to be examined * * Note that the returned size is valid only until the next time * (if any) @regset is modified for @target. */ static inline unsigned int regset_size(struct task_struct *target, const struct user_regset *regset) { if (!regset->get_size) return regset->n * regset->size; else return regset->get_size(target, regset); } #endif /* <linux/regset.h> */ nsproxy.h 0000644 00000004747 14722070374 0006461 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_NSPROXY_H #define _LINUX_NSPROXY_H #include <linux/spinlock.h> #include <linux/sched.h> struct mnt_namespace; struct uts_namespace; struct ipc_namespace; struct pid_namespace; struct cgroup_namespace; struct fs_struct; /* * A structure to contain pointers to all per-process * namespaces - fs (mount), uts, network, sysvipc, etc. * * The pid namespace is an exception -- it's accessed using * task_active_pid_ns. The pid namespace here is the * namespace that children will use. * * 'count' is the number of tasks holding a reference. * The count for each namespace, then, will be the number * of nsproxies pointing to it, not the number of tasks. * * The nsproxy is shared by tasks which share all namespaces. * As soon as a single namespace is cloned or unshared, the * nsproxy is copied. */ struct nsproxy { atomic_t count; struct uts_namespace *uts_ns; struct ipc_namespace *ipc_ns; struct mnt_namespace *mnt_ns; struct pid_namespace *pid_ns_for_children; struct net *net_ns; struct cgroup_namespace *cgroup_ns; }; extern struct nsproxy init_nsproxy; /* * the namespaces access rules are: * * 1. only current task is allowed to change tsk->nsproxy pointer or * any pointer on the nsproxy itself. Current must hold the task_lock * when changing tsk->nsproxy. * * 2. when accessing (i.e. reading) current task's namespaces - no * precautions should be taken - just dereference the pointers * * 3. the access to other task namespaces is performed like this * task_lock(task); * nsproxy = task->nsproxy; * if (nsproxy != NULL) { * / * * * work with the namespaces here * * e.g. get the reference on one of them * * / * } / * * * NULL task->nsproxy means that this task is * * almost dead (zombie) * * / * task_unlock(task); * */ int copy_namespaces(unsigned long flags, struct task_struct *tsk); void exit_task_namespaces(struct task_struct *tsk); void switch_task_namespaces(struct task_struct *tsk, struct nsproxy *new); void free_nsproxy(struct nsproxy *ns); int unshare_nsproxy_namespaces(unsigned long, struct nsproxy **, struct cred *, struct fs_struct *); int __init nsproxy_cache_init(void); static inline void put_nsproxy(struct nsproxy *ns) { if (atomic_dec_and_test(&ns->count)) { free_nsproxy(ns); } } static inline void get_nsproxy(struct nsproxy *ns) { atomic_inc(&ns->count); } #endif cred.h 0000644 00000030541 14722070374 0005643 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* Credentials management - see Documentation/security/credentials.rst * * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #ifndef _LINUX_CRED_H #define _LINUX_CRED_H #include <linux/capability.h> #include <linux/init.h> #include <linux/key.h> #include <linux/atomic.h> #include <linux/uidgid.h> #include <linux/sched.h> #include <linux/sched/user.h> struct cred; struct inode; /* * COW Supplementary groups list */ struct group_info { atomic_t usage; int ngroups; kgid_t gid[0]; } __randomize_layout; /** * get_group_info - Get a reference to a group info structure * @group_info: The group info to reference * * This gets a reference to a set of supplementary groups. * * If the caller is accessing a task's credentials, they must hold the RCU read * lock when reading. */ static inline struct group_info *get_group_info(struct group_info *gi) { atomic_inc(&gi->usage); return gi; } /** * put_group_info - Release a reference to a group info structure * @group_info: The group info to release */ #define put_group_info(group_info) \ do { \ if (atomic_dec_and_test(&(group_info)->usage)) \ groups_free(group_info); \ } while (0) extern struct group_info init_groups; #ifdef CONFIG_MULTIUSER extern struct group_info *groups_alloc(int); extern void groups_free(struct group_info *); extern int in_group_p(kgid_t); extern int in_egroup_p(kgid_t); extern int groups_search(const struct group_info *, kgid_t); extern int set_current_groups(struct group_info *); extern void set_groups(struct cred *, struct group_info *); extern bool may_setgroups(void); extern void groups_sort(struct group_info *); #else static inline void groups_free(struct group_info *group_info) { } static inline int in_group_p(kgid_t grp) { return 1; } static inline int in_egroup_p(kgid_t grp) { return 1; } static inline int groups_search(const struct group_info *group_info, kgid_t grp) { return 1; } #endif /* * The security context of a task * * The parts of the context break down into two categories: * * (1) The objective context of a task. These parts are used when some other * task is attempting to affect this one. * * (2) The subjective context. These details are used when the task is acting * upon another object, be that a file, a task, a key or whatever. * * Note that some members of this structure belong to both categories - the * LSM security pointer for instance. * * A task has two security pointers. task->real_cred points to the objective * context that defines that task's actual details. The objective part of this * context is used whenever that task is acted upon. * * task->cred points to the subjective context that defines the details of how * that task is going to act upon another object. This may be overridden * temporarily to point to another security context, but normally points to the * same context as task->real_cred. */ struct cred { atomic_long_t usage; #ifdef CONFIG_DEBUG_CREDENTIALS atomic_t subscribers; /* number of processes subscribed */ void *put_addr; unsigned magic; #define CRED_MAGIC 0x43736564 #define CRED_MAGIC_DEAD 0x44656144 #endif kuid_t uid; /* real UID of the task */ kgid_t gid; /* real GID of the task */ kuid_t suid; /* saved UID of the task */ kgid_t sgid; /* saved GID of the task */ kuid_t euid; /* effective UID of the task */ kgid_t egid; /* effective GID of the task */ kuid_t fsuid; /* UID for VFS ops */ kgid_t fsgid; /* GID for VFS ops */ unsigned securebits; /* SUID-less security management */ kernel_cap_t cap_inheritable; /* caps our children can inherit */ kernel_cap_t cap_permitted; /* caps we're permitted */ kernel_cap_t cap_effective; /* caps we can actually use */ kernel_cap_t cap_bset; /* capability bounding set */ kernel_cap_t cap_ambient; /* Ambient capability set */ #ifdef CONFIG_KEYS unsigned char jit_keyring; /* default keyring to attach requested * keys to */ struct key *session_keyring; /* keyring inherited over fork */ struct key *process_keyring; /* keyring private to this process */ struct key *thread_keyring; /* keyring private to this thread */ struct key *request_key_auth; /* assumed request_key authority */ #endif #ifdef CONFIG_SECURITY void *security; /* subjective LSM security */ #endif struct user_struct *user; /* real user ID subscription */ struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */ struct group_info *group_info; /* supplementary groups for euid/fsgid */ /* RCU deletion */ union { int non_rcu; /* Can we skip RCU deletion? */ struct rcu_head rcu; /* RCU deletion hook */ }; } __randomize_layout; extern void __put_cred(struct cred *); extern void exit_creds(struct task_struct *); extern int copy_creds(struct task_struct *, unsigned long); extern const struct cred *get_task_cred(struct task_struct *); extern struct cred *cred_alloc_blank(void); extern struct cred *prepare_creds(void); extern struct cred *prepare_exec_creds(void); extern int commit_creds(struct cred *); extern void abort_creds(struct cred *); extern const struct cred *override_creds(const struct cred *); extern void revert_creds(const struct cred *); extern struct cred *prepare_kernel_cred(struct task_struct *); extern int change_create_files_as(struct cred *, struct inode *); extern int set_security_override(struct cred *, u32); extern int set_security_override_from_ctx(struct cred *, const char *); extern int set_create_files_as(struct cred *, struct inode *); extern int cred_fscmp(const struct cred *, const struct cred *); extern void __init cred_init(void); /* * check for validity of credentials */ #ifdef CONFIG_DEBUG_CREDENTIALS extern void __invalid_creds(const struct cred *, const char *, unsigned); extern void __validate_process_creds(struct task_struct *, const char *, unsigned); extern bool creds_are_invalid(const struct cred *cred); static inline void __validate_creds(const struct cred *cred, const char *file, unsigned line) { if (unlikely(creds_are_invalid(cred))) __invalid_creds(cred, file, line); } #define validate_creds(cred) \ do { \ __validate_creds((cred), __FILE__, __LINE__); \ } while(0) #define validate_process_creds() \ do { \ __validate_process_creds(current, __FILE__, __LINE__); \ } while(0) extern void validate_creds_for_do_exit(struct task_struct *); #else static inline void validate_creds(const struct cred *cred) { } static inline void validate_creds_for_do_exit(struct task_struct *tsk) { } static inline void validate_process_creds(void) { } #endif static inline bool cap_ambient_invariant_ok(const struct cred *cred) { return cap_issubset(cred->cap_ambient, cap_intersect(cred->cap_permitted, cred->cap_inheritable)); } /** * get_new_cred - Get a reference on a new set of credentials * @cred: The new credentials to reference * * Get a reference on the specified set of new credentials. The caller must * release the reference. */ static inline struct cred *get_new_cred(struct cred *cred) { atomic_long_inc(&cred->usage); return cred; } /** * get_cred - Get a reference on a set of credentials * @cred: The credentials to reference * * Get a reference on the specified set of credentials. The caller must * release the reference. If %NULL is passed, it is returned with no action. * * This is used to deal with a committed set of credentials. Although the * pointer is const, this will temporarily discard the const and increment the * usage count. The purpose of this is to attempt to catch at compile time the * accidental alteration of a set of credentials that should be considered * immutable. */ static inline const struct cred *get_cred(const struct cred *cred) { struct cred *nonconst_cred = (struct cred *) cred; if (!cred) return cred; validate_creds(cred); nonconst_cred->non_rcu = 0; return get_new_cred(nonconst_cred); } static inline const struct cred *get_cred_rcu(const struct cred *cred) { struct cred *nonconst_cred = (struct cred *) cred; if (!cred) return NULL; if (!atomic_long_inc_not_zero(&nonconst_cred->usage)) return NULL; validate_creds(cred); nonconst_cred->non_rcu = 0; return cred; } /** * put_cred - Release a reference to a set of credentials * @cred: The credentials to release * * Release a reference to a set of credentials, deleting them when the last ref * is released. If %NULL is passed, nothing is done. * * This takes a const pointer to a set of credentials because the credentials * on task_struct are attached by const pointers to prevent accidental * alteration of otherwise immutable credential sets. */ static inline void put_cred(const struct cred *_cred) { struct cred *cred = (struct cred *) _cred; if (cred) { validate_creds(cred); if (atomic_long_dec_and_test(&(cred)->usage)) __put_cred(cred); } } /** * current_cred - Access the current task's subjective credentials * * Access the subjective credentials of the current task. RCU-safe, * since nobody else can modify it. */ #define current_cred() \ rcu_dereference_protected(current->cred, 1) /** * current_real_cred - Access the current task's objective credentials * * Access the objective credentials of the current task. RCU-safe, * since nobody else can modify it. */ #define current_real_cred() \ rcu_dereference_protected(current->real_cred, 1) /** * __task_cred - Access a task's objective credentials * @task: The task to query * * Access the objective credentials of a task. The caller must hold the RCU * readlock. * * The result of this function should not be passed directly to get_cred(); * rather get_task_cred() should be used instead. */ #define __task_cred(task) \ rcu_dereference((task)->real_cred) /** * get_current_cred - Get the current task's subjective credentials * * Get the subjective credentials of the current task, pinning them so that * they can't go away. Accessing the current task's credentials directly is * not permitted. */ #define get_current_cred() \ (get_cred(current_cred())) /** * get_current_user - Get the current task's user_struct * * Get the user record of the current task, pinning it so that it can't go * away. */ #define get_current_user() \ ({ \ struct user_struct *__u; \ const struct cred *__cred; \ __cred = current_cred(); \ __u = get_uid(__cred->user); \ __u; \ }) /** * get_current_groups - Get the current task's supplementary group list * * Get the supplementary group list of the current task, pinning it so that it * can't go away. */ #define get_current_groups() \ ({ \ struct group_info *__groups; \ const struct cred *__cred; \ __cred = current_cred(); \ __groups = get_group_info(__cred->group_info); \ __groups; \ }) #define task_cred_xxx(task, xxx) \ ({ \ __typeof__(((struct cred *)NULL)->xxx) ___val; \ rcu_read_lock(); \ ___val = __task_cred((task))->xxx; \ rcu_read_unlock(); \ ___val; \ }) #define task_uid(task) (task_cred_xxx((task), uid)) #define task_euid(task) (task_cred_xxx((task), euid)) #define current_cred_xxx(xxx) \ ({ \ current_cred()->xxx; \ }) #define current_uid() (current_cred_xxx(uid)) #define current_gid() (current_cred_xxx(gid)) #define current_euid() (current_cred_xxx(euid)) #define current_egid() (current_cred_xxx(egid)) #define current_suid() (current_cred_xxx(suid)) #define current_sgid() (current_cred_xxx(sgid)) #define current_fsuid() (current_cred_xxx(fsuid)) #define current_fsgid() (current_cred_xxx(fsgid)) #define current_cap() (current_cred_xxx(cap_effective)) #define current_user() (current_cred_xxx(user)) extern struct user_namespace init_user_ns; #ifdef CONFIG_USER_NS #define current_user_ns() (current_cred_xxx(user_ns)) #else static inline struct user_namespace *current_user_ns(void) { return &init_user_ns; } #endif #define current_uid_gid(_uid, _gid) \ do { \ const struct cred *__cred; \ __cred = current_cred(); \ *(_uid) = __cred->uid; \ *(_gid) = __cred->gid; \ } while(0) #define current_euid_egid(_euid, _egid) \ do { \ const struct cred *__cred; \ __cred = current_cred(); \ *(_euid) = __cred->euid; \ *(_egid) = __cred->egid; \ } while(0) #define current_fsuid_fsgid(_fsuid, _fsgid) \ do { \ const struct cred *__cred; \ __cred = current_cred(); \ *(_fsuid) = __cred->fsuid; \ *(_fsgid) = __cred->fsgid; \ } while(0) #endif /* _LINUX_CRED_H */ socket.h 0000644 00000033161 14722070374 0006217 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SOCKET_H #define _LINUX_SOCKET_H #include <asm/socket.h> /* arch-dependent defines */ #include <linux/sockios.h> /* the SIOCxxx I/O controls */ #include <linux/uio.h> /* iovec support */ #include <linux/types.h> /* pid_t */ #include <linux/compiler.h> /* __user */ #include <uapi/linux/socket.h> struct pid; struct cred; struct socket; #define __sockaddr_check_size(size) \ BUILD_BUG_ON(((size) > sizeof(struct __kernel_sockaddr_storage))) #ifdef CONFIG_PROC_FS struct seq_file; extern void socket_seq_show(struct seq_file *seq); #endif typedef __kernel_sa_family_t sa_family_t; /* * 1003.1g requires sa_family_t and that sa_data is char. */ struct sockaddr { sa_family_t sa_family; /* address family, AF_xxx */ union { char sa_data_min[14]; /* Minimum 14 bytes of protocol address */ DECLARE_FLEX_ARRAY(char, sa_data); }; }; struct linger { int l_onoff; /* Linger active */ int l_linger; /* How long to linger for */ }; #define sockaddr_storage __kernel_sockaddr_storage /* * As we do 4.4BSD message passing we use a 4.4BSD message passing * system, not 4.3. Thus msg_accrights(len) are now missing. They * belong in an obscure libc emulation or the bin. */ struct msghdr { void *msg_name; /* ptr to socket address structure */ int msg_namelen; /* size of socket address structure */ struct iov_iter msg_iter; /* data */ void *msg_control; /* ancillary data */ __kernel_size_t msg_controllen; /* ancillary data buffer length */ unsigned int msg_flags; /* flags on received message */ struct kiocb *msg_iocb; /* ptr to iocb for async requests */ }; struct user_msghdr { void __user *msg_name; /* ptr to socket address structure */ int msg_namelen; /* size of socket address structure */ struct iovec __user *msg_iov; /* scatter/gather array */ __kernel_size_t msg_iovlen; /* # elements in msg_iov */ void __user *msg_control; /* ancillary data */ __kernel_size_t msg_controllen; /* ancillary data buffer length */ unsigned int msg_flags; /* flags on received message */ }; /* For recvmmsg/sendmmsg */ struct mmsghdr { struct user_msghdr msg_hdr; unsigned int msg_len; }; /* * POSIX 1003.1g - ancillary data object information * Ancillary data consits of a sequence of pairs of * (cmsghdr, cmsg_data[]) */ struct cmsghdr { __kernel_size_t cmsg_len; /* data byte count, including hdr */ int cmsg_level; /* originating protocol */ int cmsg_type; /* protocol-specific type */ }; /* * Ancillary data object information MACROS * Table 5-14 of POSIX 1003.1g */ #define __CMSG_NXTHDR(ctl, len, cmsg) __cmsg_nxthdr((ctl),(len),(cmsg)) #define CMSG_NXTHDR(mhdr, cmsg) cmsg_nxthdr((mhdr), (cmsg)) #define CMSG_ALIGN(len) ( ((len)+sizeof(long)-1) & ~(sizeof(long)-1) ) #define CMSG_DATA(cmsg) ((void *)((char *)(cmsg) + sizeof(struct cmsghdr))) #define CMSG_SPACE(len) (sizeof(struct cmsghdr) + CMSG_ALIGN(len)) #define CMSG_LEN(len) (sizeof(struct cmsghdr) + (len)) #define __CMSG_FIRSTHDR(ctl,len) ((len) >= sizeof(struct cmsghdr) ? \ (struct cmsghdr *)(ctl) : \ (struct cmsghdr *)NULL) #define CMSG_FIRSTHDR(msg) __CMSG_FIRSTHDR((msg)->msg_control, (msg)->msg_controllen) #define CMSG_OK(mhdr, cmsg) ((cmsg)->cmsg_len >= sizeof(struct cmsghdr) && \ (cmsg)->cmsg_len <= (unsigned long) \ ((mhdr)->msg_controllen - \ ((char *)(cmsg) - (char *)(mhdr)->msg_control))) #define for_each_cmsghdr(cmsg, msg) \ for (cmsg = CMSG_FIRSTHDR(msg); \ cmsg; \ cmsg = CMSG_NXTHDR(msg, cmsg)) /* * Get the next cmsg header * * PLEASE, do not touch this function. If you think, that it is * incorrect, grep kernel sources and think about consequences * before trying to improve it. * * Now it always returns valid, not truncated ancillary object * HEADER. But caller still MUST check, that cmsg->cmsg_len is * inside range, given by msg->msg_controllen before using * ancillary object DATA. --ANK (980731) */ static inline struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size, struct cmsghdr *__cmsg) { struct cmsghdr * __ptr; __ptr = (struct cmsghdr*)(((unsigned char *) __cmsg) + CMSG_ALIGN(__cmsg->cmsg_len)); if ((unsigned long)((char*)(__ptr+1) - (char *) __ctl) > __size) return (struct cmsghdr *)0; return __ptr; } static inline struct cmsghdr * cmsg_nxthdr (struct msghdr *__msg, struct cmsghdr *__cmsg) { return __cmsg_nxthdr(__msg->msg_control, __msg->msg_controllen, __cmsg); } static inline size_t msg_data_left(struct msghdr *msg) { return iov_iter_count(&msg->msg_iter); } /* "Socket"-level control message types: */ #define SCM_RIGHTS 0x01 /* rw: access rights (array of int) */ #define SCM_CREDENTIALS 0x02 /* rw: struct ucred */ #define SCM_SECURITY 0x03 /* rw: security label */ struct ucred { __u32 pid; __u32 uid; __u32 gid; }; /* Supported address families. */ #define AF_UNSPEC 0 #define AF_UNIX 1 /* Unix domain sockets */ #define AF_LOCAL 1 /* POSIX name for AF_UNIX */ #define AF_INET 2 /* Internet IP Protocol */ #define AF_AX25 3 /* Amateur Radio AX.25 */ #define AF_IPX 4 /* Novell IPX */ #define AF_APPLETALK 5 /* AppleTalk DDP */ #define AF_NETROM 6 /* Amateur Radio NET/ROM */ #define AF_BRIDGE 7 /* Multiprotocol bridge */ #define AF_ATMPVC 8 /* ATM PVCs */ #define AF_X25 9 /* Reserved for X.25 project */ #define AF_INET6 10 /* IP version 6 */ #define AF_ROSE 11 /* Amateur Radio X.25 PLP */ #define AF_DECnet 12 /* Reserved for DECnet project */ #define AF_NETBEUI 13 /* Reserved for 802.2LLC project*/ #define AF_SECURITY 14 /* Security callback pseudo AF */ #define AF_KEY 15 /* PF_KEY key management API */ #define AF_NETLINK 16 #define AF_ROUTE AF_NETLINK /* Alias to emulate 4.4BSD */ #define AF_PACKET 17 /* Packet family */ #define AF_ASH 18 /* Ash */ #define AF_ECONET 19 /* Acorn Econet */ #define AF_ATMSVC 20 /* ATM SVCs */ #define AF_RDS 21 /* RDS sockets */ #define AF_SNA 22 /* Linux SNA Project (nutters!) */ #define AF_IRDA 23 /* IRDA sockets */ #define AF_PPPOX 24 /* PPPoX sockets */ #define AF_WANPIPE 25 /* Wanpipe API Sockets */ #define AF_LLC 26 /* Linux LLC */ #define AF_IB 27 /* Native InfiniBand address */ #define AF_MPLS 28 /* MPLS */ #define AF_CAN 29 /* Controller Area Network */ #define AF_TIPC 30 /* TIPC sockets */ #define AF_BLUETOOTH 31 /* Bluetooth sockets */ #define AF_IUCV 32 /* IUCV sockets */ #define AF_RXRPC 33 /* RxRPC sockets */ #define AF_ISDN 34 /* mISDN sockets */ #define AF_PHONET 35 /* Phonet sockets */ #define AF_IEEE802154 36 /* IEEE802154 sockets */ #define AF_CAIF 37 /* CAIF sockets */ #define AF_ALG 38 /* Algorithm sockets */ #define AF_NFC 39 /* NFC sockets */ #define AF_VSOCK 40 /* vSockets */ #define AF_KCM 41 /* Kernel Connection Multiplexor*/ #define AF_QIPCRTR 42 /* Qualcomm IPC Router */ #define AF_SMC 43 /* smc sockets: reserve number for * PF_SMC protocol family that * reuses AF_INET address family */ #define AF_XDP 44 /* XDP sockets */ #define AF_MAX 45 /* For now.. */ /* Protocol families, same as address families. */ #define PF_UNSPEC AF_UNSPEC #define PF_UNIX AF_UNIX #define PF_LOCAL AF_LOCAL #define PF_INET AF_INET #define PF_AX25 AF_AX25 #define PF_IPX AF_IPX #define PF_APPLETALK AF_APPLETALK #define PF_NETROM AF_NETROM #define PF_BRIDGE AF_BRIDGE #define PF_ATMPVC AF_ATMPVC #define PF_X25 AF_X25 #define PF_INET6 AF_INET6 #define PF_ROSE AF_ROSE #define PF_DECnet AF_DECnet #define PF_NETBEUI AF_NETBEUI #define PF_SECURITY AF_SECURITY #define PF_KEY AF_KEY #define PF_NETLINK AF_NETLINK #define PF_ROUTE AF_ROUTE #define PF_PACKET AF_PACKET #define PF_ASH AF_ASH #define PF_ECONET AF_ECONET #define PF_ATMSVC AF_ATMSVC #define PF_RDS AF_RDS #define PF_SNA AF_SNA #define PF_IRDA AF_IRDA #define PF_PPPOX AF_PPPOX #define PF_WANPIPE AF_WANPIPE #define PF_LLC AF_LLC #define PF_IB AF_IB #define PF_MPLS AF_MPLS #define PF_CAN AF_CAN #define PF_TIPC AF_TIPC #define PF_BLUETOOTH AF_BLUETOOTH #define PF_IUCV AF_IUCV #define PF_RXRPC AF_RXRPC #define PF_ISDN AF_ISDN #define PF_PHONET AF_PHONET #define PF_IEEE802154 AF_IEEE802154 #define PF_CAIF AF_CAIF #define PF_ALG AF_ALG #define PF_NFC AF_NFC #define PF_VSOCK AF_VSOCK #define PF_KCM AF_KCM #define PF_QIPCRTR AF_QIPCRTR #define PF_SMC AF_SMC #define PF_XDP AF_XDP #define PF_MAX AF_MAX /* Maximum queue length specifiable by listen. */ #define SOMAXCONN 4096 /* Flags we can use with send/ and recv. Added those for 1003.1g not all are supported yet */ #define MSG_OOB 1 #define MSG_PEEK 2 #define MSG_DONTROUTE 4 #define MSG_TRYHARD 4 /* Synonym for MSG_DONTROUTE for DECnet */ #define MSG_CTRUNC 8 #define MSG_PROBE 0x10 /* Do not send. Only probe path f.e. for MTU */ #define MSG_TRUNC 0x20 #define MSG_DONTWAIT 0x40 /* Nonblocking io */ #define MSG_EOR 0x80 /* End of record */ #define MSG_WAITALL 0x100 /* Wait for a full request */ #define MSG_FIN 0x200 #define MSG_SYN 0x400 #define MSG_CONFIRM 0x800 /* Confirm path validity */ #define MSG_RST 0x1000 #define MSG_ERRQUEUE 0x2000 /* Fetch message from error queue */ #define MSG_NOSIGNAL 0x4000 /* Do not generate SIGPIPE */ #define MSG_MORE 0x8000 /* Sender will send more */ #define MSG_WAITFORONE 0x10000 /* recvmmsg(): block until 1+ packets avail */ #define MSG_SENDPAGE_NOPOLICY 0x10000 /* sendpage() internal : do no apply policy */ #define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */ #define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */ #define MSG_EOF MSG_FIN #define MSG_NO_SHARED_FRAGS 0x80000 /* sendpage() internal : page frags are not shared */ #define MSG_SENDPAGE_DECRYPTED 0x100000 /* sendpage() internal : page may carry * plain text and require encryption */ #define MSG_ZEROCOPY 0x4000000 /* Use user data in kernel path */ #define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */ #define MSG_CMSG_CLOEXEC 0x40000000 /* Set close_on_exec for file descriptor received through SCM_RIGHTS */ #if defined(CONFIG_COMPAT) #define MSG_CMSG_COMPAT 0x80000000 /* This message needs 32 bit fixups */ #else #define MSG_CMSG_COMPAT 0 /* We never have 32 bit fixups */ #endif /* Setsockoptions(2) level. Thanks to BSD these must match IPPROTO_xxx */ #define SOL_IP 0 /* #define SOL_ICMP 1 No-no-no! Due to Linux :-) we cannot use SOL_ICMP=1 */ #define SOL_TCP 6 #define SOL_UDP 17 #define SOL_IPV6 41 #define SOL_ICMPV6 58 #define SOL_SCTP 132 #define SOL_UDPLITE 136 /* UDP-Lite (RFC 3828) */ #define SOL_RAW 255 #define SOL_IPX 256 #define SOL_AX25 257 #define SOL_ATALK 258 #define SOL_NETROM 259 #define SOL_ROSE 260 #define SOL_DECNET 261 #define SOL_X25 262 #define SOL_PACKET 263 #define SOL_ATM 264 /* ATM layer (cell level) */ #define SOL_AAL 265 /* ATM Adaption Layer (packet level) */ #define SOL_IRDA 266 #define SOL_NETBEUI 267 #define SOL_LLC 268 #define SOL_DCCP 269 #define SOL_NETLINK 270 #define SOL_TIPC 271 #define SOL_RXRPC 272 #define SOL_PPPOL2TP 273 #define SOL_BLUETOOTH 274 #define SOL_PNPIPE 275 #define SOL_RDS 276 #define SOL_IUCV 277 #define SOL_CAIF 278 #define SOL_ALG 279 #define SOL_NFC 280 #define SOL_KCM 281 #define SOL_TLS 282 #define SOL_XDP 283 /* IPX options */ #define IPX_TYPE 1 extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr); extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); struct timespec64; struct __kernel_timespec; struct old_timespec32; struct scm_timestamping_internal { struct timespec64 ts[3]; }; extern void put_cmsg_scm_timestamping64(struct msghdr *msg, struct scm_timestamping_internal *tss); extern void put_cmsg_scm_timestamping(struct msghdr *msg, struct scm_timestamping_internal *tss); /* The __sys_...msg variants allow MSG_CMSG_COMPAT iff * forbid_cmsg_compat==false */ extern long __sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned int flags, bool forbid_cmsg_compat); extern long __sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned int flags, bool forbid_cmsg_compat); extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, unsigned int flags, struct __kernel_timespec __user *timeout, struct old_timespec32 __user *timeout32); extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, unsigned int flags, bool forbid_cmsg_compat); extern long __sys_sendmsg_sock(struct socket *sock, struct user_msghdr __user *msg, unsigned int flags); extern long __sys_recvmsg_sock(struct socket *sock, struct user_msghdr __user *msg, unsigned int flags); /* helpers which do the actual work for syscalls */ extern int __sys_recvfrom(int fd, void __user *ubuf, size_t size, unsigned int flags, struct sockaddr __user *addr, int __user *addr_len); extern int __sys_sendto(int fd, void __user *buff, size_t len, unsigned int flags, struct sockaddr __user *addr, int addr_len); extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr, int __user *upeer_addrlen, int flags); extern int __sys_socket(int family, int type, int protocol); extern int __sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen); extern int __sys_connect(int fd, struct sockaddr __user *uservaddr, int addrlen); extern int __sys_listen(int fd, int backlog); extern int __sys_getsockname(int fd, struct sockaddr __user *usockaddr, int __user *usockaddr_len); extern int __sys_getpeername(int fd, struct sockaddr __user *usockaddr, int __user *usockaddr_len); extern int __sys_socketpair(int family, int type, int protocol, int __user *usockvec); extern int __sys_shutdown(int fd, int how); #endif /* _LINUX_SOCKET_H */ poison.h 0000644 00000005071 14722070374 0006235 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_POISON_H #define _LINUX_POISON_H /********** include/linux/list.h **********/ /* * Architectures might want to move the poison pointer offset * into some well-recognized area such as 0xdead000000000000, * that is also not mappable by user-space exploits: */ #ifdef CONFIG_ILLEGAL_POINTER_VALUE # define POISON_POINTER_DELTA _AC(CONFIG_ILLEGAL_POINTER_VALUE, UL) #else # define POISON_POINTER_DELTA 0 #endif /* * These are non-NULL pointers that will result in page faults * under normal circumstances, used to verify that nobody uses * non-initialized list entries. */ #define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA) #define LIST_POISON2 ((void *) 0x122 + POISON_POINTER_DELTA) /********** include/linux/timer.h **********/ /* * Magic number "tsta" to indicate a static timer initializer * for the object debugging code. */ #define TIMER_ENTRY_STATIC ((void *) 0x300 + POISON_POINTER_DELTA) /********** mm/page_poison.c **********/ #ifdef CONFIG_PAGE_POISONING_ZERO #define PAGE_POISON 0x00 #else #define PAGE_POISON 0xaa #endif /********** mm/page_alloc.c ************/ #define TAIL_MAPPING ((void *) 0x400 + POISON_POINTER_DELTA) /********** mm/slab.c **********/ /* * Magic nums for obj red zoning. * Placed in the first word before and the first word after an obj. */ #define RED_INACTIVE 0x09F911029D74E35BULL /* when obj is inactive */ #define RED_ACTIVE 0xD84156C5635688C0ULL /* when obj is active */ #define SLUB_RED_INACTIVE 0xbb #define SLUB_RED_ACTIVE 0xcc /* ...and for poisoning */ #define POISON_INUSE 0x5a /* for use-uninitialised poisoning */ #define POISON_FREE 0x6b /* for use-after-free poisoning */ #define POISON_END 0xa5 /* end-byte of poisoning */ /********** arch/$ARCH/mm/init.c **********/ #define POISON_FREE_INITMEM 0xcc /********** arch/ia64/hp/common/sba_iommu.c **********/ /* * arch/ia64/hp/common/sba_iommu.c uses a 16-byte poison string with a * value of "SBAIOMMU POISON\0" for spill-over poisoning. */ /********** fs/jbd/journal.c **********/ #define JBD_POISON_FREE 0x5b #define JBD2_POISON_FREE 0x5c /********** drivers/base/dmapool.c **********/ #define POOL_POISON_FREED 0xa7 /* !inuse */ #define POOL_POISON_ALLOCATED 0xa9 /* !initted */ /********** drivers/atm/ **********/ #define ATM_POISON_FREE 0x12 #define ATM_POISON 0xdeadbeef /********** kernel/mutexes **********/ #define MUTEX_DEBUG_INIT 0x11 #define MUTEX_DEBUG_FREE 0x22 #define MUTEX_POISON_WW_CTX ((void *) 0x500 + POISON_POINTER_DELTA) /********** security/ **********/ #define KEY_DESTROY 0xbd #endif gpio/machine.h 0000644 00000007323 14722070374 0007272 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_GPIO_MACHINE_H #define __LINUX_GPIO_MACHINE_H #include <linux/types.h> #include <linux/list.h> enum gpio_lookup_flags { GPIO_ACTIVE_HIGH = (0 << 0), GPIO_ACTIVE_LOW = (1 << 0), GPIO_OPEN_DRAIN = (1 << 1), GPIO_OPEN_SOURCE = (1 << 2), GPIO_PERSISTENT = (0 << 3), GPIO_TRANSITORY = (1 << 3), GPIO_PULL_UP = (1 << 4), GPIO_PULL_DOWN = (1 << 5), GPIO_LOOKUP_FLAGS_DEFAULT = GPIO_ACTIVE_HIGH | GPIO_PERSISTENT, }; /** * struct gpiod_lookup - lookup table * @chip_label: name of the chip the GPIO belongs to * @chip_hwnum: hardware number (i.e. relative to the chip) of the GPIO * @con_id: name of the GPIO from the device's point of view * @idx: index of the GPIO in case several GPIOs share the same name * @flags: bitmask of gpio_lookup_flags GPIO_* values * * gpiod_lookup is a lookup table for associating GPIOs to specific devices and * functions using platform data. */ struct gpiod_lookup { const char *chip_label; u16 chip_hwnum; const char *con_id; unsigned int idx; unsigned long flags; }; struct gpiod_lookup_table { struct list_head list; const char *dev_id; struct gpiod_lookup table[]; }; /** * struct gpiod_hog - GPIO line hog table * @chip_label: name of the chip the GPIO belongs to * @chip_hwnum: hardware number (i.e. relative to the chip) of the GPIO * @line_name: consumer name for the hogged line * @lflags: bitmask of gpio_lookup_flags GPIO_* values * @dflags: GPIO flags used to specify the direction and value */ struct gpiod_hog { struct list_head list; const char *chip_label; u16 chip_hwnum; const char *line_name; unsigned long lflags; int dflags; }; /* * Simple definition of a single GPIO under a con_id */ #define GPIO_LOOKUP(_chip_label, _chip_hwnum, _con_id, _flags) \ GPIO_LOOKUP_IDX(_chip_label, _chip_hwnum, _con_id, 0, _flags) /* * Use this macro if you need to have several GPIOs under the same con_id. * Each GPIO needs to use a different index and can be accessed using * gpiod_get_index() */ #define GPIO_LOOKUP_IDX(_chip_label, _chip_hwnum, _con_id, _idx, _flags) \ { \ .chip_label = _chip_label, \ .chip_hwnum = _chip_hwnum, \ .con_id = _con_id, \ .idx = _idx, \ .flags = _flags, \ } /* * Simple definition of a single GPIO hog in an array. */ #define GPIO_HOG(_chip_label, _chip_hwnum, _line_name, _lflags, _dflags) \ { \ .chip_label = _chip_label, \ .chip_hwnum = _chip_hwnum, \ .line_name = _line_name, \ .lflags = _lflags, \ .dflags = _dflags, \ } #ifdef CONFIG_GPIOLIB void gpiod_add_lookup_table(struct gpiod_lookup_table *table); void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n); void gpiod_remove_lookup_table(struct gpiod_lookup_table *table); void gpiod_add_hogs(struct gpiod_hog *hogs); #else /* ! CONFIG_GPIOLIB */ static inline void gpiod_add_lookup_table(struct gpiod_lookup_table *table) {} static inline void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n) {} static inline void gpiod_remove_lookup_table(struct gpiod_lookup_table *table) {} static inline void gpiod_add_hogs(struct gpiod_hog *hogs) {} #endif /* CONFIG_GPIOLIB */ #endif /* __LINUX_GPIO_MACHINE_H */ gpio/driver.h 0000644 00000056437 14722070374 0007173 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_GPIO_DRIVER_H #define __LINUX_GPIO_DRIVER_H #include <linux/device.h> #include <linux/types.h> #include <linux/irq.h> #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/lockdep.h> #include <linux/pinctrl/pinctrl.h> #include <linux/pinctrl/pinconf-generic.h> struct gpio_desc; struct of_phandle_args; struct device_node; struct seq_file; struct gpio_device; struct module; enum gpiod_flags; enum gpio_lookup_flags; struct gpio_chip; /** * struct gpio_irq_chip - GPIO interrupt controller */ struct gpio_irq_chip { /** * @chip: * * GPIO IRQ chip implementation, provided by GPIO driver. */ struct irq_chip *chip; /** * @domain: * * Interrupt translation domain; responsible for mapping between GPIO * hwirq number and Linux IRQ number. */ struct irq_domain *domain; /** * @domain_ops: * * Table of interrupt domain operations for this IRQ chip. */ const struct irq_domain_ops *domain_ops; #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY /** * @fwnode: * * Firmware node corresponding to this gpiochip/irqchip, necessary * for hierarchical irqdomain support. */ struct fwnode_handle *fwnode; /** * @parent_domain: * * If non-NULL, will be set as the parent of this GPIO interrupt * controller's IRQ domain to establish a hierarchical interrupt * domain. The presence of this will activate the hierarchical * interrupt support. */ struct irq_domain *parent_domain; /** * @child_to_parent_hwirq: * * This callback translates a child hardware IRQ offset to a parent * hardware IRQ offset on a hierarchical interrupt chip. The child * hardware IRQs correspond to the GPIO index 0..ngpio-1 (see the * ngpio field of struct gpio_chip) and the corresponding parent * hardware IRQ and type (such as IRQ_TYPE_*) shall be returned by * the driver. The driver can calculate this from an offset or using * a lookup table or whatever method is best for this chip. Return * 0 on successful translation in the driver. * * If some ranges of hardware IRQs do not have a corresponding parent * HWIRQ, return -EINVAL, but also make sure to fill in @valid_mask and * @need_valid_mask to make these GPIO lines unavailable for * translation. */ int (*child_to_parent_hwirq)(struct gpio_chip *chip, unsigned int child_hwirq, unsigned int child_type, unsigned int *parent_hwirq, unsigned int *parent_type); /** * @populate_parent_fwspec: * * This optional callback populates the &struct irq_fwspec for the * parent's IRQ domain. If this is not specified, then * &gpiochip_populate_parent_fwspec_twocell will be used. A four-cell * variant named &gpiochip_populate_parent_fwspec_fourcell is also * available. */ void (*populate_parent_fwspec)(struct gpio_chip *chip, struct irq_fwspec *fwspec, unsigned int parent_hwirq, unsigned int parent_type); /** * @child_offset_to_irq: * * This optional callback is used to translate the child's GPIO line * offset on the GPIO chip to an IRQ number for the GPIO to_irq() * callback. If this is not specified, then a default callback will be * provided that returns the line offset. */ unsigned int (*child_offset_to_irq)(struct gpio_chip *chip, unsigned int pin); /** * @child_irq_domain_ops: * * The IRQ domain operations that will be used for this GPIO IRQ * chip. If no operations are provided, then default callbacks will * be populated to setup the IRQ hierarchy. Some drivers need to * supply their own translate function. */ struct irq_domain_ops child_irq_domain_ops; #endif /** * @handler: * * The IRQ handler to use (often a predefined IRQ core function) for * GPIO IRQs, provided by GPIO driver. */ irq_flow_handler_t handler; /** * @default_type: * * Default IRQ triggering type applied during GPIO driver * initialization, provided by GPIO driver. */ unsigned int default_type; /** * @lock_key: * * Per GPIO IRQ chip lockdep class for IRQ lock. */ struct lock_class_key *lock_key; /** * @request_key: * * Per GPIO IRQ chip lockdep class for IRQ request. */ struct lock_class_key *request_key; /** * @parent_handler: * * The interrupt handler for the GPIO chip's parent interrupts, may be * NULL if the parent interrupts are nested rather than cascaded. */ irq_flow_handler_t parent_handler; /** * @parent_handler_data: * * Data associated, and passed to, the handler for the parent * interrupt. */ void *parent_handler_data; /** * @num_parents: * * The number of interrupt parents of a GPIO chip. */ unsigned int num_parents; /** * @parents: * * A list of interrupt parents of a GPIO chip. This is owned by the * driver, so the core will only reference this list, not modify it. */ unsigned int *parents; /** * @map: * * A list of interrupt parents for each line of a GPIO chip. */ unsigned int *map; /** * @threaded: * * True if set the interrupt handling uses nested threads. */ bool threaded; /** * @init_hw: optional routine to initialize hardware before * an IRQ chip will be added. This is quite useful when * a particular driver wants to clear IRQ related registers * in order to avoid undesired events. */ int (*init_hw)(struct gpio_chip *chip); /** * @init_valid_mask: optional routine to initialize @valid_mask, to be * used if not all GPIO lines are valid interrupts. Sometimes some * lines just cannot fire interrupts, and this routine, when defined, * is passed a bitmap in "valid_mask" and it will have ngpios * bits from 0..(ngpios-1) set to "1" as in valid. The callback can * then directly set some bits to "0" if they cannot be used for * interrupts. */ void (*init_valid_mask)(struct gpio_chip *chip, unsigned long *valid_mask, unsigned int ngpios); /** * @valid_mask: * * If not %NULL holds bitmask of GPIOs which are valid to be included * in IRQ domain of the chip. */ unsigned long *valid_mask; /** * @first: * * Required for static IRQ allocation. If set, irq_domain_add_simple() * will allocate and map all IRQs during initialization. */ unsigned int first; /** * @irq_enable: * * Store old irq_chip irq_enable callback */ void (*irq_enable)(struct irq_data *data); /** * @irq_disable: * * Store old irq_chip irq_disable callback */ void (*irq_disable)(struct irq_data *data); }; /** * struct gpio_chip - abstract a GPIO controller * @label: a functional name for the GPIO device, such as a part * number or the name of the SoC IP-block implementing it. * @gpiodev: the internal state holder, opaque struct * @parent: optional parent device providing the GPIOs * @owner: helps prevent removal of modules exporting active GPIOs * @request: optional hook for chip-specific activation, such as * enabling module power and clock; may sleep * @free: optional hook for chip-specific deactivation, such as * disabling module power and clock; may sleep * @get_direction: returns direction for signal "offset", 0=out, 1=in, * (same as GPIOF_DIR_XXX), or negative error. * It is recommended to always implement this function, even on * input-only or output-only gpio chips. * @direction_input: configures signal "offset" as input, or returns error * This can be omitted on input-only or output-only gpio chips. * @direction_output: configures signal "offset" as output, or returns error * This can be omitted on input-only or output-only gpio chips. * @get: returns value for signal "offset", 0=low, 1=high, or negative error * @get_multiple: reads values for multiple signals defined by "mask" and * stores them in "bits", returns 0 on success or negative error * @set: assigns output value for signal "offset" * @set_multiple: assigns output values for multiple signals defined by "mask" * @set_config: optional hook for all kinds of settings. Uses the same * packed config format as generic pinconf. * @to_irq: optional hook supporting non-static gpio_to_irq() mappings; * implementation may not sleep * @dbg_show: optional routine to show contents in debugfs; default code * will be used when this is omitted, but custom code can show extra * state (such as pullup/pulldown configuration). * @init_valid_mask: optional routine to initialize @valid_mask, to be used if * not all GPIOs are valid. * @base: identifies the first GPIO number handled by this chip; * or, if negative during registration, requests dynamic ID allocation. * DEPRECATION: providing anything non-negative and nailing the base * offset of GPIO chips is deprecated. Please pass -1 as base to * let gpiolib select the chip base in all possible cases. We want to * get rid of the static GPIO number space in the long run. * @ngpio: the number of GPIOs handled by this controller; the last GPIO * handled is (base + ngpio - 1). * @names: if set, must be an array of strings to use as alternative * names for the GPIOs in this chip. Any entry in the array * may be NULL if there is no alias for the GPIO, however the * array must be @ngpio entries long. A name can include a single printk * format specifier for an unsigned int. It is substituted by the actual * number of the gpio. * @can_sleep: flag must be set iff get()/set() methods sleep, as they * must while accessing GPIO expander chips over I2C or SPI. This * implies that if the chip supports IRQs, these IRQs need to be threaded * as the chip access may sleep when e.g. reading out the IRQ status * registers. * @read_reg: reader function for generic GPIO * @write_reg: writer function for generic GPIO * @be_bits: if the generic GPIO has big endian bit order (bit 31 is representing * line 0, bit 30 is line 1 ... bit 0 is line 31) this is set to true by the * generic GPIO core. It is for internal housekeeping only. * @reg_dat: data (in) register for generic GPIO * @reg_set: output set register (out=high) for generic GPIO * @reg_clr: output clear register (out=low) for generic GPIO * @reg_dir_out: direction out setting register for generic GPIO * @reg_dir_in: direction in setting register for generic GPIO * @bgpio_dir_unreadable: indicates that the direction register(s) cannot * be read and we need to rely on out internal state tracking. * @bgpio_bits: number of register bits used for a generic GPIO i.e. * <register width> * 8 * @bgpio_lock: used to lock chip->bgpio_data. Also, this is needed to keep * shadowed and real data registers writes together. * @bgpio_data: shadowed data register for generic GPIO to clear/set bits * safely. * @bgpio_dir: shadowed direction register for generic GPIO to clear/set * direction safely. A "1" in this word means the line is set as * output. * * A gpio_chip can help platforms abstract various sources of GPIOs so * they can all be accessed through a common programing interface. * Example sources would be SOC controllers, FPGAs, multifunction * chips, dedicated GPIO expanders, and so on. * * Each chip controls a number of signals, identified in method calls * by "offset" values in the range 0..(@ngpio - 1). When those signals * are referenced through calls like gpio_get_value(gpio), the offset * is calculated by subtracting @base from the gpio number. */ struct gpio_chip { const char *label; struct gpio_device *gpiodev; struct device *parent; struct module *owner; int (*request)(struct gpio_chip *chip, unsigned offset); void (*free)(struct gpio_chip *chip, unsigned offset); int (*get_direction)(struct gpio_chip *chip, unsigned offset); int (*direction_input)(struct gpio_chip *chip, unsigned offset); int (*direction_output)(struct gpio_chip *chip, unsigned offset, int value); int (*get)(struct gpio_chip *chip, unsigned offset); int (*get_multiple)(struct gpio_chip *chip, unsigned long *mask, unsigned long *bits); void (*set)(struct gpio_chip *chip, unsigned offset, int value); void (*set_multiple)(struct gpio_chip *chip, unsigned long *mask, unsigned long *bits); int (*set_config)(struct gpio_chip *chip, unsigned offset, unsigned long config); int (*to_irq)(struct gpio_chip *chip, unsigned offset); void (*dbg_show)(struct seq_file *s, struct gpio_chip *chip); int (*init_valid_mask)(struct gpio_chip *chip, unsigned long *valid_mask, unsigned int ngpios); int base; u16 ngpio; const char *const *names; bool can_sleep; #if IS_ENABLED(CONFIG_GPIO_GENERIC) unsigned long (*read_reg)(void __iomem *reg); void (*write_reg)(void __iomem *reg, unsigned long data); bool be_bits; void __iomem *reg_dat; void __iomem *reg_set; void __iomem *reg_clr; void __iomem *reg_dir_out; void __iomem *reg_dir_in; bool bgpio_dir_unreadable; int bgpio_bits; spinlock_t bgpio_lock; unsigned long bgpio_data; unsigned long bgpio_dir; #endif /* CONFIG_GPIO_GENERIC */ #ifdef CONFIG_GPIOLIB_IRQCHIP /* * With CONFIG_GPIOLIB_IRQCHIP we get an irqchip inside the gpiolib * to handle IRQs for most practical cases. */ /** * @irq: * * Integrates interrupt chip functionality with the GPIO chip. Can be * used to handle IRQs for most practical cases. */ struct gpio_irq_chip irq; #endif /* CONFIG_GPIOLIB_IRQCHIP */ /** * @valid_mask: * * If not %NULL holds bitmask of GPIOs which are valid to be used * from the chip. */ unsigned long *valid_mask; #if defined(CONFIG_OF_GPIO) /* * If CONFIG_OF is enabled, then all GPIO controllers described in the * device tree automatically may have an OF translation */ /** * @of_node: * * Pointer to a device tree node representing this GPIO controller. */ struct device_node *of_node; /** * @of_gpio_n_cells: * * Number of cells used to form the GPIO specifier. */ unsigned int of_gpio_n_cells; /** * @of_xlate: * * Callback to translate a device tree GPIO specifier into a chip- * relative GPIO number and flags. */ int (*of_xlate)(struct gpio_chip *gc, const struct of_phandle_args *gpiospec, u32 *flags); #endif /* CONFIG_OF_GPIO */ }; extern const char *gpiochip_is_requested(struct gpio_chip *chip, unsigned offset); /* add/remove chips */ extern int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, struct lock_class_key *lock_key, struct lock_class_key *request_key); /** * gpiochip_add_data() - register a gpio_chip * @chip: the chip to register, with chip->base initialized * @data: driver-private data associated with this chip * * Context: potentially before irqs will work * * When gpiochip_add_data() is called very early during boot, so that GPIOs * can be freely used, the chip->parent device must be registered before * the gpio framework's arch_initcall(). Otherwise sysfs initialization * for GPIOs will fail rudely. * * gpiochip_add_data() must only be called after gpiolib initialization, * ie after core_initcall(). * * If chip->base is negative, this requests dynamic assignment of * a range of valid GPIOs. * * Returns: * A negative errno if the chip can't be registered, such as because the * chip->base is invalid or already associated with a different chip. * Otherwise it returns zero as a success code. */ #ifdef CONFIG_LOCKDEP #define gpiochip_add_data(chip, data) ({ \ static struct lock_class_key lock_key; \ static struct lock_class_key request_key; \ gpiochip_add_data_with_key(chip, data, &lock_key, \ &request_key); \ }) #else #define gpiochip_add_data(chip, data) gpiochip_add_data_with_key(chip, data, NULL, NULL) #endif /* CONFIG_LOCKDEP */ static inline int gpiochip_add(struct gpio_chip *chip) { return gpiochip_add_data(chip, NULL); } extern void gpiochip_remove(struct gpio_chip *chip); extern int devm_gpiochip_add_data(struct device *dev, struct gpio_chip *chip, void *data); extern struct gpio_chip *gpiochip_find(void *data, int (*match)(struct gpio_chip *chip, void *data)); bool gpiochip_line_is_irq(struct gpio_chip *chip, unsigned int offset); int gpiochip_reqres_irq(struct gpio_chip *chip, unsigned int offset); void gpiochip_relres_irq(struct gpio_chip *chip, unsigned int offset); void gpiochip_disable_irq(struct gpio_chip *chip, unsigned int offset); void gpiochip_enable_irq(struct gpio_chip *chip, unsigned int offset); /* Line status inquiry for drivers */ bool gpiochip_line_is_open_drain(struct gpio_chip *chip, unsigned int offset); bool gpiochip_line_is_open_source(struct gpio_chip *chip, unsigned int offset); /* Sleep persistence inquiry for drivers */ bool gpiochip_line_is_persistent(struct gpio_chip *chip, unsigned int offset); bool gpiochip_line_is_valid(const struct gpio_chip *chip, unsigned int offset); /* get driver data */ void *gpiochip_get_data(struct gpio_chip *chip); struct bgpio_pdata { const char *label; int base; int ngpio; }; #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY void gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip, struct irq_fwspec *fwspec, unsigned int parent_hwirq, unsigned int parent_type); void gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip, struct irq_fwspec *fwspec, unsigned int parent_hwirq, unsigned int parent_type); #else static inline void gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip, struct irq_fwspec *fwspec, unsigned int parent_hwirq, unsigned int parent_type) { } static inline void gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip, struct irq_fwspec *fwspec, unsigned int parent_hwirq, unsigned int parent_type) { } #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ int bgpio_init(struct gpio_chip *gc, struct device *dev, unsigned long sz, void __iomem *dat, void __iomem *set, void __iomem *clr, void __iomem *dirout, void __iomem *dirin, unsigned long flags); #define BGPIOF_BIG_ENDIAN BIT(0) #define BGPIOF_UNREADABLE_REG_SET BIT(1) /* reg_set is unreadable */ #define BGPIOF_UNREADABLE_REG_DIR BIT(2) /* reg_dir is unreadable */ #define BGPIOF_BIG_ENDIAN_BYTE_ORDER BIT(3) #define BGPIOF_READ_OUTPUT_REG_SET BIT(4) /* reg_set stores output value */ #define BGPIOF_NO_OUTPUT BIT(5) /* only input */ int gpiochip_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq); void gpiochip_irq_unmap(struct irq_domain *d, unsigned int irq); int gpiochip_irq_domain_activate(struct irq_domain *domain, struct irq_data *data, bool reserve); void gpiochip_irq_domain_deactivate(struct irq_domain *domain, struct irq_data *data); void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip, struct irq_chip *irqchip, unsigned int parent_irq, irq_flow_handler_t parent_handler); void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip, struct irq_chip *irqchip, unsigned int parent_irq); int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip, struct irq_chip *irqchip, unsigned int first_irq, irq_flow_handler_t handler, unsigned int type, bool threaded, struct lock_class_key *lock_key, struct lock_class_key *request_key); bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gpiochip, unsigned int offset); #ifdef CONFIG_LOCKDEP /* * Lockdep requires that each irqchip instance be created with a * unique key so as to avoid unnecessary warnings. This upfront * boilerplate static inlines provides such a key for each * unique instance. */ static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip, struct irq_chip *irqchip, unsigned int first_irq, irq_flow_handler_t handler, unsigned int type) { static struct lock_class_key lock_key; static struct lock_class_key request_key; return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, handler, type, false, &lock_key, &request_key); } static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, struct irq_chip *irqchip, unsigned int first_irq, irq_flow_handler_t handler, unsigned int type) { static struct lock_class_key lock_key; static struct lock_class_key request_key; return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, handler, type, true, &lock_key, &request_key); } #else /* ! CONFIG_LOCKDEP */ static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip, struct irq_chip *irqchip, unsigned int first_irq, irq_flow_handler_t handler, unsigned int type) { return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, handler, type, false, NULL, NULL); } static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, struct irq_chip *irqchip, unsigned int first_irq, irq_flow_handler_t handler, unsigned int type) { return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, handler, type, true, NULL, NULL); } #endif /* CONFIG_LOCKDEP */ int gpiochip_generic_request(struct gpio_chip *chip, unsigned offset); void gpiochip_generic_free(struct gpio_chip *chip, unsigned offset); int gpiochip_generic_config(struct gpio_chip *chip, unsigned offset, unsigned long config); /** * struct gpio_pin_range - pin range controlled by a gpio chip * @node: list for maintaining set of pin ranges, used internally * @pctldev: pinctrl device which handles corresponding pins * @range: actual range of pins controlled by a gpio controller */ struct gpio_pin_range { struct list_head node; struct pinctrl_dev *pctldev; struct pinctrl_gpio_range range; }; #ifdef CONFIG_PINCTRL int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name, unsigned int gpio_offset, unsigned int pin_offset, unsigned int npins); int gpiochip_add_pingroup_range(struct gpio_chip *chip, struct pinctrl_dev *pctldev, unsigned int gpio_offset, const char *pin_group); void gpiochip_remove_pin_ranges(struct gpio_chip *chip); #else /* ! CONFIG_PINCTRL */ static inline int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name, unsigned int gpio_offset, unsigned int pin_offset, unsigned int npins) { return 0; } static inline int gpiochip_add_pingroup_range(struct gpio_chip *chip, struct pinctrl_dev *pctldev, unsigned int gpio_offset, const char *pin_group) { return 0; } static inline void gpiochip_remove_pin_ranges(struct gpio_chip *chip) { } #endif /* CONFIG_PINCTRL */ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum, const char *label, enum gpio_lookup_flags lflags, enum gpiod_flags dflags); void gpiochip_free_own_desc(struct gpio_desc *desc); void devprop_gpiochip_set_names(struct gpio_chip *chip, const struct fwnode_handle *fwnode); #ifdef CONFIG_GPIOLIB /* lock/unlock as IRQ */ int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset); void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset); struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc); #else /* CONFIG_GPIOLIB */ static inline struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc) { /* GPIO can never have been requested */ WARN_ON(1); return ERR_PTR(-ENODEV); } static inline int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset) { WARN_ON(1); return -EINVAL; } static inline void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset) { WARN_ON(1); } #endif /* CONFIG_GPIOLIB */ #endif /* __LINUX_GPIO_DRIVER_H */ gpio/consumer.h 0000644 00000045732 14722070374 0007527 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_GPIO_CONSUMER_H #define __LINUX_GPIO_CONSUMER_H #include <linux/bug.h> #include <linux/err.h> #include <linux/kernel.h> struct device; /** * Opaque descriptor for a GPIO. These are obtained using gpiod_get() and are * preferable to the old integer-based handles. * * Contrary to integers, a pointer to a gpio_desc is guaranteed to be valid * until the GPIO is released. */ struct gpio_desc; /** * Opaque descriptor for a structure of GPIO array attributes. This structure * is attached to struct gpiod_descs obtained from gpiod_get_array() and can be * passed back to get/set array functions in order to activate fast processing * path if applicable. */ struct gpio_array; /** * Struct containing an array of descriptors that can be obtained using * gpiod_get_array(). */ struct gpio_descs { struct gpio_array *info; unsigned int ndescs; struct gpio_desc *desc[]; }; #define GPIOD_FLAGS_BIT_DIR_SET BIT(0) #define GPIOD_FLAGS_BIT_DIR_OUT BIT(1) #define GPIOD_FLAGS_BIT_DIR_VAL BIT(2) #define GPIOD_FLAGS_BIT_OPEN_DRAIN BIT(3) #define GPIOD_FLAGS_BIT_NONEXCLUSIVE BIT(4) /** * Optional flags that can be passed to one of gpiod_* to configure direction * and output value. These values cannot be OR'd. */ enum gpiod_flags { GPIOD_ASIS = 0, GPIOD_IN = GPIOD_FLAGS_BIT_DIR_SET, GPIOD_OUT_LOW = GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT, GPIOD_OUT_HIGH = GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT | GPIOD_FLAGS_BIT_DIR_VAL, GPIOD_OUT_LOW_OPEN_DRAIN = GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_OPEN_DRAIN, GPIOD_OUT_HIGH_OPEN_DRAIN = GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_OPEN_DRAIN, }; #ifdef CONFIG_GPIOLIB /* Return the number of GPIOs associated with a device / function */ int gpiod_count(struct device *dev, const char *con_id); /* Acquire and dispose GPIOs */ struct gpio_desc *__must_check gpiod_get(struct device *dev, const char *con_id, enum gpiod_flags flags); struct gpio_desc *__must_check gpiod_get_index(struct device *dev, const char *con_id, unsigned int idx, enum gpiod_flags flags); struct gpio_desc *__must_check gpiod_get_optional(struct device *dev, const char *con_id, enum gpiod_flags flags); struct gpio_desc *__must_check gpiod_get_index_optional(struct device *dev, const char *con_id, unsigned int index, enum gpiod_flags flags); struct gpio_descs *__must_check gpiod_get_array(struct device *dev, const char *con_id, enum gpiod_flags flags); struct gpio_descs *__must_check gpiod_get_array_optional(struct device *dev, const char *con_id, enum gpiod_flags flags); void gpiod_put(struct gpio_desc *desc); void gpiod_put_array(struct gpio_descs *descs); struct gpio_desc *__must_check devm_gpiod_get(struct device *dev, const char *con_id, enum gpiod_flags flags); struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev, const char *con_id, unsigned int idx, enum gpiod_flags flags); struct gpio_desc *__must_check devm_gpiod_get_optional(struct device *dev, const char *con_id, enum gpiod_flags flags); struct gpio_desc *__must_check devm_gpiod_get_index_optional(struct device *dev, const char *con_id, unsigned int index, enum gpiod_flags flags); struct gpio_descs *__must_check devm_gpiod_get_array(struct device *dev, const char *con_id, enum gpiod_flags flags); struct gpio_descs *__must_check devm_gpiod_get_array_optional(struct device *dev, const char *con_id, enum gpiod_flags flags); void devm_gpiod_put(struct device *dev, struct gpio_desc *desc); void devm_gpiod_unhinge(struct device *dev, struct gpio_desc *desc); void devm_gpiod_put_array(struct device *dev, struct gpio_descs *descs); int gpiod_get_direction(struct gpio_desc *desc); int gpiod_direction_input(struct gpio_desc *desc); int gpiod_direction_output(struct gpio_desc *desc, int value); int gpiod_direction_output_raw(struct gpio_desc *desc, int value); /* Value get/set from non-sleeping context */ int gpiod_get_value(const struct gpio_desc *desc); int gpiod_get_array_value(unsigned int array_size, struct gpio_desc **desc_array, struct gpio_array *array_info, unsigned long *value_bitmap); void gpiod_set_value(struct gpio_desc *desc, int value); int gpiod_set_array_value(unsigned int array_size, struct gpio_desc **desc_array, struct gpio_array *array_info, unsigned long *value_bitmap); int gpiod_get_raw_value(const struct gpio_desc *desc); int gpiod_get_raw_array_value(unsigned int array_size, struct gpio_desc **desc_array, struct gpio_array *array_info, unsigned long *value_bitmap); void gpiod_set_raw_value(struct gpio_desc *desc, int value); int gpiod_set_raw_array_value(unsigned int array_size, struct gpio_desc **desc_array, struct gpio_array *array_info, unsigned long *value_bitmap); /* Value get/set from sleeping context */ int gpiod_get_value_cansleep(const struct gpio_desc *desc); int gpiod_get_array_value_cansleep(unsigned int array_size, struct gpio_desc **desc_array, struct gpio_array *array_info, unsigned long *value_bitmap); void gpiod_set_value_cansleep(struct gpio_desc *desc, int value); int gpiod_set_array_value_cansleep(unsigned int array_size, struct gpio_desc **desc_array, struct gpio_array *array_info, unsigned long *value_bitmap); int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc); int gpiod_get_raw_array_value_cansleep(unsigned int array_size, struct gpio_desc **desc_array, struct gpio_array *array_info, unsigned long *value_bitmap); void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value); int gpiod_set_raw_array_value_cansleep(unsigned int array_size, struct gpio_desc **desc_array, struct gpio_array *array_info, unsigned long *value_bitmap); int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce); int gpiod_set_transitory(struct gpio_desc *desc, bool transitory); void gpiod_toggle_active_low(struct gpio_desc *desc); int gpiod_is_active_low(const struct gpio_desc *desc); int gpiod_cansleep(const struct gpio_desc *desc); int gpiod_to_irq(const struct gpio_desc *desc); int gpiod_set_consumer_name(struct gpio_desc *desc, const char *name); /* Convert between the old gpio_ and new gpiod_ interfaces */ struct gpio_desc *gpio_to_desc(unsigned gpio); int desc_to_gpio(const struct gpio_desc *desc); /* Child properties interface */ struct fwnode_handle; struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, const char *propname, int index, enum gpiod_flags dflags, const char *label); struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev, const char *con_id, int index, struct fwnode_handle *child, enum gpiod_flags flags, const char *label); #else /* CONFIG_GPIOLIB */ static inline int gpiod_count(struct device *dev, const char *con_id) { return 0; } static inline struct gpio_desc *__must_check gpiod_get(struct device *dev, const char *con_id, enum gpiod_flags flags) { return ERR_PTR(-ENOSYS); } static inline struct gpio_desc *__must_check gpiod_get_index(struct device *dev, const char *con_id, unsigned int idx, enum gpiod_flags flags) { return ERR_PTR(-ENOSYS); } static inline struct gpio_desc *__must_check gpiod_get_optional(struct device *dev, const char *con_id, enum gpiod_flags flags) { return NULL; } static inline struct gpio_desc *__must_check gpiod_get_index_optional(struct device *dev, const char *con_id, unsigned int index, enum gpiod_flags flags) { return NULL; } static inline struct gpio_descs *__must_check gpiod_get_array(struct device *dev, const char *con_id, enum gpiod_flags flags) { return ERR_PTR(-ENOSYS); } static inline struct gpio_descs *__must_check gpiod_get_array_optional(struct device *dev, const char *con_id, enum gpiod_flags flags) { return NULL; } static inline void gpiod_put(struct gpio_desc *desc) { might_sleep(); /* GPIO can never have been requested */ WARN_ON(desc); } static inline void devm_gpiod_unhinge(struct device *dev, struct gpio_desc *desc) { might_sleep(); /* GPIO can never have been requested */ WARN_ON(desc); } static inline void gpiod_put_array(struct gpio_descs *descs) { might_sleep(); /* GPIO can never have been requested */ WARN_ON(descs); } static inline struct gpio_desc *__must_check devm_gpiod_get(struct device *dev, const char *con_id, enum gpiod_flags flags) { return ERR_PTR(-ENOSYS); } static inline struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev, const char *con_id, unsigned int idx, enum gpiod_flags flags) { return ERR_PTR(-ENOSYS); } static inline struct gpio_desc *__must_check devm_gpiod_get_optional(struct device *dev, const char *con_id, enum gpiod_flags flags) { return NULL; } static inline struct gpio_desc *__must_check devm_gpiod_get_index_optional(struct device *dev, const char *con_id, unsigned int index, enum gpiod_flags flags) { return NULL; } static inline struct gpio_descs *__must_check devm_gpiod_get_array(struct device *dev, const char *con_id, enum gpiod_flags flags) { return ERR_PTR(-ENOSYS); } static inline struct gpio_descs *__must_check devm_gpiod_get_array_optional(struct device *dev, const char *con_id, enum gpiod_flags flags) { return NULL; } static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc) { might_sleep(); /* GPIO can never have been requested */ WARN_ON(desc); } static inline void devm_gpiod_put_array(struct device *dev, struct gpio_descs *descs) { might_sleep(); /* GPIO can never have been requested */ WARN_ON(descs); } static inline int gpiod_get_direction(const struct gpio_desc *desc) { /* GPIO can never have been requested */ WARN_ON(desc); return -ENOSYS; } static inline int gpiod_direction_input(struct gpio_desc *desc) { /* GPIO can never have been requested */ WARN_ON(desc); return -ENOSYS; } static inline int gpiod_direction_output(struct gpio_desc *desc, int value) { /* GPIO can never have been requested */ WARN_ON(desc); return -ENOSYS; } static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value) { /* GPIO can never have been requested */ WARN_ON(desc); return -ENOSYS; } static inline int gpiod_get_value(const struct gpio_desc *desc) { /* GPIO can never have been requested */ WARN_ON(desc); return 0; } static inline int gpiod_get_array_value(unsigned int array_size, struct gpio_desc **desc_array, struct gpio_array *array_info, unsigned long *value_bitmap) { /* GPIO can never have been requested */ WARN_ON(desc_array); return 0; } static inline void gpiod_set_value(struct gpio_desc *desc, int value) { /* GPIO can never have been requested */ WARN_ON(desc); } static inline int gpiod_set_array_value(unsigned int array_size, struct gpio_desc **desc_array, struct gpio_array *array_info, unsigned long *value_bitmap) { /* GPIO can never have been requested */ WARN_ON(desc_array); return 0; } static inline int gpiod_get_raw_value(const struct gpio_desc *desc) { /* GPIO can never have been requested */ WARN_ON(desc); return 0; } static inline int gpiod_get_raw_array_value(unsigned int array_size, struct gpio_desc **desc_array, struct gpio_array *array_info, unsigned long *value_bitmap) { /* GPIO can never have been requested */ WARN_ON(desc_array); return 0; } static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value) { /* GPIO can never have been requested */ WARN_ON(desc); } static inline int gpiod_set_raw_array_value(unsigned int array_size, struct gpio_desc **desc_array, struct gpio_array *array_info, unsigned long *value_bitmap) { /* GPIO can never have been requested */ WARN_ON(desc_array); return 0; } static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc) { /* GPIO can never have been requested */ WARN_ON(desc); return 0; } static inline int gpiod_get_array_value_cansleep(unsigned int array_size, struct gpio_desc **desc_array, struct gpio_array *array_info, unsigned long *value_bitmap) { /* GPIO can never have been requested */ WARN_ON(desc_array); return 0; } static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value) { /* GPIO can never have been requested */ WARN_ON(desc); } static inline int gpiod_set_array_value_cansleep(unsigned int array_size, struct gpio_desc **desc_array, struct gpio_array *array_info, unsigned long *value_bitmap) { /* GPIO can never have been requested */ WARN_ON(desc_array); return 0; } static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc) { /* GPIO can never have been requested */ WARN_ON(desc); return 0; } static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size, struct gpio_desc **desc_array, struct gpio_array *array_info, unsigned long *value_bitmap) { /* GPIO can never have been requested */ WARN_ON(desc_array); return 0; } static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value) { /* GPIO can never have been requested */ WARN_ON(desc); } static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size, struct gpio_desc **desc_array, struct gpio_array *array_info, unsigned long *value_bitmap) { /* GPIO can never have been requested */ WARN_ON(desc_array); return 0; } static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce) { /* GPIO can never have been requested */ WARN_ON(desc); return -ENOSYS; } static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory) { /* GPIO can never have been requested */ WARN_ON(desc); return -ENOSYS; } static inline void gpiod_toggle_active_low(struct gpio_desc *desc) { /* GPIO can never have been requested */ WARN_ON(desc); } static inline int gpiod_is_active_low(const struct gpio_desc *desc) { /* GPIO can never have been requested */ WARN_ON(desc); return 0; } static inline int gpiod_cansleep(const struct gpio_desc *desc) { /* GPIO can never have been requested */ WARN_ON(desc); return 0; } static inline int gpiod_to_irq(const struct gpio_desc *desc) { /* GPIO can never have been requested */ WARN_ON(desc); return -EINVAL; } static inline int gpiod_set_consumer_name(struct gpio_desc *desc, const char *name) { /* GPIO can never have been requested */ WARN_ON(desc); return -EINVAL; } static inline struct gpio_desc *gpio_to_desc(unsigned gpio) { return NULL; } static inline int desc_to_gpio(const struct gpio_desc *desc) { /* GPIO can never have been requested */ WARN_ON(desc); return -EINVAL; } /* Child properties interface */ struct fwnode_handle; static inline struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, const char *propname, int index, enum gpiod_flags dflags, const char *label) { return ERR_PTR(-ENOSYS); } static inline struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev, const char *con_id, int index, struct fwnode_handle *child, enum gpiod_flags flags, const char *label) { return ERR_PTR(-ENOSYS); } #endif /* CONFIG_GPIOLIB */ static inline struct gpio_desc *devm_fwnode_get_gpiod_from_child(struct device *dev, const char *con_id, struct fwnode_handle *child, enum gpiod_flags flags, const char *label) { return devm_fwnode_get_index_gpiod_from_child(dev, con_id, 0, child, flags, label); } #if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_OF_GPIO) struct device_node; struct gpio_desc *gpiod_get_from_of_node(struct device_node *node, const char *propname, int index, enum gpiod_flags dflags, const char *label); #else /* CONFIG_GPIOLIB && CONFIG_OF_GPIO */ struct device_node; static inline struct gpio_desc *gpiod_get_from_of_node(struct device_node *node, const char *propname, int index, enum gpiod_flags dflags, const char *label) { return ERR_PTR(-ENOSYS); } #endif /* CONFIG_GPIOLIB && CONFIG_OF_GPIO */ #ifdef CONFIG_GPIOLIB struct device_node; struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev, struct device_node *node, const char *propname, int index, enum gpiod_flags dflags, const char *label); #else /* CONFIG_GPIOLIB */ struct device_node; static inline struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev, struct device_node *node, const char *propname, int index, enum gpiod_flags dflags, const char *label) { return ERR_PTR(-ENOSYS); } #endif /* CONFIG_GPIOLIB */ struct acpi_gpio_params { unsigned int crs_entry_index; unsigned int line_index; bool active_low; }; struct acpi_gpio_mapping { const char *name; const struct acpi_gpio_params *data; unsigned int size; /* Ignore IoRestriction field */ #define ACPI_GPIO_QUIRK_NO_IO_RESTRICTION BIT(0) /* * When ACPI GPIO mapping table is in use the index parameter inside it * refers to the GPIO resource in _CRS method. That index has no * distinction of actual type of the resource. When consumer wants to * get GpioIo type explicitly, this quirk may be used. */ #define ACPI_GPIO_QUIRK_ONLY_GPIOIO BIT(1) unsigned int quirks; }; #if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_ACPI) struct acpi_device; int acpi_dev_add_driver_gpios(struct acpi_device *adev, const struct acpi_gpio_mapping *gpios); void acpi_dev_remove_driver_gpios(struct acpi_device *adev); int devm_acpi_dev_add_driver_gpios(struct device *dev, const struct acpi_gpio_mapping *gpios); void devm_acpi_dev_remove_driver_gpios(struct device *dev); #else /* CONFIG_GPIOLIB && CONFIG_ACPI */ struct acpi_device; static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev, const struct acpi_gpio_mapping *gpios) { return -ENXIO; } static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) {} static inline int devm_acpi_dev_add_driver_gpios(struct device *dev, const struct acpi_gpio_mapping *gpios) { return -ENXIO; } static inline void devm_acpi_dev_remove_driver_gpios(struct device *dev) {} #endif /* CONFIG_GPIOLIB && CONFIG_ACPI */ #if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS) int gpiod_export(struct gpio_desc *desc, bool direction_may_change); int gpiod_export_link(struct device *dev, const char *name, struct gpio_desc *desc); void gpiod_unexport(struct gpio_desc *desc); #else /* CONFIG_GPIOLIB && CONFIG_GPIO_SYSFS */ static inline int gpiod_export(struct gpio_desc *desc, bool direction_may_change) { return -ENOSYS; } static inline int gpiod_export_link(struct device *dev, const char *name, struct gpio_desc *desc) { return -ENOSYS; } static inline void gpiod_unexport(struct gpio_desc *desc) { } #endif /* CONFIG_GPIOLIB && CONFIG_GPIO_SYSFS */ #endif gpio/aspeed.h 0000644 00000000665 14722070374 0007131 0 ustar 00 #ifndef __GPIO_ASPEED_H #define __GPIO_ASPEED_H struct aspeed_gpio_copro_ops { int (*request_access)(void *data); int (*release_access)(void *data); }; int aspeed_gpio_copro_grab_gpio(struct gpio_desc *desc, u16 *vreg_offset, u16 *dreg_offset, u8 *bit); int aspeed_gpio_copro_release_gpio(struct gpio_desc *desc); int aspeed_gpio_copro_set_ops(const struct aspeed_gpio_copro_ops *ops, void *data); #endif /* __GPIO_ASPEED_H */ gpio/gpio-reg.h 0000644 00000000610 14722070374 0007367 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef GPIO_REG_H #define GPIO_REG_H struct device; struct irq_domain; struct gpio_chip *gpio_reg_init(struct device *dev, void __iomem *reg, int base, int num, const char *label, u32 direction, u32 def_out, const char *const *names, struct irq_domain *irqdom, const int *irqs); int gpio_reg_resume(struct gpio_chip *gc); #endif /* GPIO_REG_H */ pe.h 0000644 00000036070 14722070374 0005335 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2011 Red Hat, Inc. * All rights reserved. * * Author(s): Peter Jones <pjones@redhat.com> */ #ifndef __LINUX_PE_H #define __LINUX_PE_H #include <linux/types.h> #define MZ_MAGIC 0x5a4d /* "MZ" */ #define PE_MAGIC 0x00004550 /* "PE\0\0" */ #define PE_OPT_MAGIC_PE32 0x010b #define PE_OPT_MAGIC_PE32_ROM 0x0107 #define PE_OPT_MAGIC_PE32PLUS 0x020b /* machine type */ #define IMAGE_FILE_MACHINE_UNKNOWN 0x0000 #define IMAGE_FILE_MACHINE_AM33 0x01d3 #define IMAGE_FILE_MACHINE_AMD64 0x8664 #define IMAGE_FILE_MACHINE_ARM 0x01c0 #define IMAGE_FILE_MACHINE_ARMV7 0x01c4 #define IMAGE_FILE_MACHINE_ARM64 0xaa64 #define IMAGE_FILE_MACHINE_EBC 0x0ebc #define IMAGE_FILE_MACHINE_I386 0x014c #define IMAGE_FILE_MACHINE_IA64 0x0200 #define IMAGE_FILE_MACHINE_M32R 0x9041 #define IMAGE_FILE_MACHINE_MIPS16 0x0266 #define IMAGE_FILE_MACHINE_MIPSFPU 0x0366 #define IMAGE_FILE_MACHINE_MIPSFPU16 0x0466 #define IMAGE_FILE_MACHINE_POWERPC 0x01f0 #define IMAGE_FILE_MACHINE_POWERPCFP 0x01f1 #define IMAGE_FILE_MACHINE_R4000 0x0166 #define IMAGE_FILE_MACHINE_SH3 0x01a2 #define IMAGE_FILE_MACHINE_SH3DSP 0x01a3 #define IMAGE_FILE_MACHINE_SH3E 0x01a4 #define IMAGE_FILE_MACHINE_SH4 0x01a6 #define IMAGE_FILE_MACHINE_SH5 0x01a8 #define IMAGE_FILE_MACHINE_THUMB 0x01c2 #define IMAGE_FILE_MACHINE_WCEMIPSV2 0x0169 /* flags */ #define IMAGE_FILE_RELOCS_STRIPPED 0x0001 #define IMAGE_FILE_EXECUTABLE_IMAGE 0x0002 #define IMAGE_FILE_LINE_NUMS_STRIPPED 0x0004 #define IMAGE_FILE_LOCAL_SYMS_STRIPPED 0x0008 #define IMAGE_FILE_AGGRESSIVE_WS_TRIM 0x0010 #define IMAGE_FILE_LARGE_ADDRESS_AWARE 0x0020 #define IMAGE_FILE_16BIT_MACHINE 0x0040 #define IMAGE_FILE_BYTES_REVERSED_LO 0x0080 #define IMAGE_FILE_32BIT_MACHINE 0x0100 #define IMAGE_FILE_DEBUG_STRIPPED 0x0200 #define IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP 0x0400 #define IMAGE_FILE_NET_RUN_FROM_SWAP 0x0800 #define IMAGE_FILE_SYSTEM 0x1000 #define IMAGE_FILE_DLL 0x2000 #define IMAGE_FILE_UP_SYSTEM_ONLY 0x4000 #define IMAGE_FILE_BYTES_REVERSED_HI 0x8000 #define IMAGE_FILE_OPT_ROM_MAGIC 0x107 #define IMAGE_FILE_OPT_PE32_MAGIC 0x10b #define IMAGE_FILE_OPT_PE32_PLUS_MAGIC 0x20b #define IMAGE_SUBSYSTEM_UNKNOWN 0 #define IMAGE_SUBSYSTEM_NATIVE 1 #define IMAGE_SUBSYSTEM_WINDOWS_GUI 2 #define IMAGE_SUBSYSTEM_WINDOWS_CUI 3 #define IMAGE_SUBSYSTEM_POSIX_CUI 7 #define IMAGE_SUBSYSTEM_WINDOWS_CE_GUI 9 #define IMAGE_SUBSYSTEM_EFI_APPLICATION 10 #define IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER 11 #define IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER 12 #define IMAGE_SUBSYSTEM_EFI_ROM_IMAGE 13 #define IMAGE_SUBSYSTEM_XBOX 14 #define IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE 0x0040 #define IMAGE_DLL_CHARACTERISTICS_FORCE_INTEGRITY 0x0080 #define IMAGE_DLL_CHARACTERISTICS_NX_COMPAT 0x0100 #define IMAGE_DLLCHARACTERISTICS_NO_ISOLATION 0x0200 #define IMAGE_DLLCHARACTERISTICS_NO_SEH 0x0400 #define IMAGE_DLLCHARACTERISTICS_NO_BIND 0x0800 #define IMAGE_DLLCHARACTERISTICS_WDM_DRIVER 0x2000 #define IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE 0x8000 /* they actually defined 0x00000000 as well, but I think we'll skip that one. */ #define IMAGE_SCN_RESERVED_0 0x00000001 #define IMAGE_SCN_RESERVED_1 0x00000002 #define IMAGE_SCN_RESERVED_2 0x00000004 #define IMAGE_SCN_TYPE_NO_PAD 0x00000008 /* don't pad - obsolete */ #define IMAGE_SCN_RESERVED_3 0x00000010 #define IMAGE_SCN_CNT_CODE 0x00000020 /* .text */ #define IMAGE_SCN_CNT_INITIALIZED_DATA 0x00000040 /* .data */ #define IMAGE_SCN_CNT_UNINITIALIZED_DATA 0x00000080 /* .bss */ #define IMAGE_SCN_LNK_OTHER 0x00000100 /* reserved */ #define IMAGE_SCN_LNK_INFO 0x00000200 /* .drectve comments */ #define IMAGE_SCN_RESERVED_4 0x00000400 #define IMAGE_SCN_LNK_REMOVE 0x00000800 /* .o only - scn to be rm'd*/ #define IMAGE_SCN_LNK_COMDAT 0x00001000 /* .o only - COMDAT data */ #define IMAGE_SCN_RESERVED_5 0x00002000 /* spec omits this */ #define IMAGE_SCN_RESERVED_6 0x00004000 /* spec omits this */ #define IMAGE_SCN_GPREL 0x00008000 /* global pointer referenced data */ /* spec lists 0x20000 twice, I suspect they meant 0x10000 for one of them */ #define IMAGE_SCN_MEM_PURGEABLE 0x00010000 /* reserved for "future" use */ #define IMAGE_SCN_16BIT 0x00020000 /* reserved for "future" use */ #define IMAGE_SCN_LOCKED 0x00040000 /* reserved for "future" use */ #define IMAGE_SCN_PRELOAD 0x00080000 /* reserved for "future" use */ /* and here they just stuck a 1-byte integer in the middle of a bitfield */ #define IMAGE_SCN_ALIGN_1BYTES 0x00100000 /* it does what it says on the box */ #define IMAGE_SCN_ALIGN_2BYTES 0x00200000 #define IMAGE_SCN_ALIGN_4BYTES 0x00300000 #define IMAGE_SCN_ALIGN_8BYTES 0x00400000 #define IMAGE_SCN_ALIGN_16BYTES 0x00500000 #define IMAGE_SCN_ALIGN_32BYTES 0x00600000 #define IMAGE_SCN_ALIGN_64BYTES 0x00700000 #define IMAGE_SCN_ALIGN_128BYTES 0x00800000 #define IMAGE_SCN_ALIGN_256BYTES 0x00900000 #define IMAGE_SCN_ALIGN_512BYTES 0x00a00000 #define IMAGE_SCN_ALIGN_1024BYTES 0x00b00000 #define IMAGE_SCN_ALIGN_2048BYTES 0x00c00000 #define IMAGE_SCN_ALIGN_4096BYTES 0x00d00000 #define IMAGE_SCN_ALIGN_8192BYTES 0x00e00000 #define IMAGE_SCN_LNK_NRELOC_OVFL 0x01000000 /* extended relocations */ #define IMAGE_SCN_MEM_DISCARDABLE 0x02000000 /* scn can be discarded */ #define IMAGE_SCN_MEM_NOT_CACHED 0x04000000 /* cannot be cached */ #define IMAGE_SCN_MEM_NOT_PAGED 0x08000000 /* not pageable */ #define IMAGE_SCN_MEM_SHARED 0x10000000 /* can be shared */ #define IMAGE_SCN_MEM_EXECUTE 0x20000000 /* can be executed as code */ #define IMAGE_SCN_MEM_READ 0x40000000 /* readable */ #define IMAGE_SCN_MEM_WRITE 0x80000000 /* writeable */ #define IMAGE_DEBUG_TYPE_CODEVIEW 2 #ifndef __ASSEMBLY__ struct mz_hdr { uint16_t magic; /* MZ_MAGIC */ uint16_t lbsize; /* size of last used block */ uint16_t blocks; /* pages in file, 0x3 */ uint16_t relocs; /* relocations */ uint16_t hdrsize; /* header size in "paragraphs" */ uint16_t min_extra_pps; /* .bss */ uint16_t max_extra_pps; /* runtime limit for the arena size */ uint16_t ss; /* relative stack segment */ uint16_t sp; /* initial %sp register */ uint16_t checksum; /* word checksum */ uint16_t ip; /* initial %ip register */ uint16_t cs; /* initial %cs relative to load segment */ uint16_t reloc_table_offset; /* offset of the first relocation */ uint16_t overlay_num; /* overlay number. set to 0. */ uint16_t reserved0[4]; /* reserved */ uint16_t oem_id; /* oem identifier */ uint16_t oem_info; /* oem specific */ uint16_t reserved1[10]; /* reserved */ uint32_t peaddr; /* address of pe header */ char message[]; /* message to print */ }; struct mz_reloc { uint16_t offset; uint16_t segment; }; struct pe_hdr { uint32_t magic; /* PE magic */ uint16_t machine; /* machine type */ uint16_t sections; /* number of sections */ uint32_t timestamp; /* time_t */ uint32_t symbol_table; /* symbol table offset */ uint32_t symbols; /* number of symbols */ uint16_t opt_hdr_size; /* size of optional header */ uint16_t flags; /* flags */ }; /* the fact that pe32 isn't padded where pe32+ is 64-bit means union won't * work right. vomit. */ struct pe32_opt_hdr { /* "standard" header */ uint16_t magic; /* file type */ uint8_t ld_major; /* linker major version */ uint8_t ld_minor; /* linker minor version */ uint32_t text_size; /* size of text section(s) */ uint32_t data_size; /* size of data section(s) */ uint32_t bss_size; /* size of bss section(s) */ uint32_t entry_point; /* file offset of entry point */ uint32_t code_base; /* relative code addr in ram */ uint32_t data_base; /* relative data addr in ram */ /* "windows" header */ uint32_t image_base; /* preferred load address */ uint32_t section_align; /* alignment in bytes */ uint32_t file_align; /* file alignment in bytes */ uint16_t os_major; /* major OS version */ uint16_t os_minor; /* minor OS version */ uint16_t image_major; /* major image version */ uint16_t image_minor; /* minor image version */ uint16_t subsys_major; /* major subsystem version */ uint16_t subsys_minor; /* minor subsystem version */ uint32_t win32_version; /* reserved, must be 0 */ uint32_t image_size; /* image size */ uint32_t header_size; /* header size rounded up to file_align */ uint32_t csum; /* checksum */ uint16_t subsys; /* subsystem */ uint16_t dll_flags; /* more flags! */ uint32_t stack_size_req;/* amt of stack requested */ uint32_t stack_size; /* amt of stack required */ uint32_t heap_size_req; /* amt of heap requested */ uint32_t heap_size; /* amt of heap required */ uint32_t loader_flags; /* reserved, must be 0 */ uint32_t data_dirs; /* number of data dir entries */ }; struct pe32plus_opt_hdr { uint16_t magic; /* file type */ uint8_t ld_major; /* linker major version */ uint8_t ld_minor; /* linker minor version */ uint32_t text_size; /* size of text section(s) */ uint32_t data_size; /* size of data section(s) */ uint32_t bss_size; /* size of bss section(s) */ uint32_t entry_point; /* file offset of entry point */ uint32_t code_base; /* relative code addr in ram */ /* "windows" header */ uint64_t image_base; /* preferred load address */ uint32_t section_align; /* alignment in bytes */ uint32_t file_align; /* file alignment in bytes */ uint16_t os_major; /* major OS version */ uint16_t os_minor; /* minor OS version */ uint16_t image_major; /* major image version */ uint16_t image_minor; /* minor image version */ uint16_t subsys_major; /* major subsystem version */ uint16_t subsys_minor; /* minor subsystem version */ uint32_t win32_version; /* reserved, must be 0 */ uint32_t image_size; /* image size */ uint32_t header_size; /* header size rounded up to file_align */ uint32_t csum; /* checksum */ uint16_t subsys; /* subsystem */ uint16_t dll_flags; /* more flags! */ uint64_t stack_size_req;/* amt of stack requested */ uint64_t stack_size; /* amt of stack required */ uint64_t heap_size_req; /* amt of heap requested */ uint64_t heap_size; /* amt of heap required */ uint32_t loader_flags; /* reserved, must be 0 */ uint32_t data_dirs; /* number of data dir entries */ }; struct data_dirent { uint32_t virtual_address; /* relative to load address */ uint32_t size; }; struct data_directory { struct data_dirent exports; /* .edata */ struct data_dirent imports; /* .idata */ struct data_dirent resources; /* .rsrc */ struct data_dirent exceptions; /* .pdata */ struct data_dirent certs; /* certs */ struct data_dirent base_relocations; /* .reloc */ struct data_dirent debug; /* .debug */ struct data_dirent arch; /* reservered */ struct data_dirent global_ptr; /* global pointer reg. Size=0 */ struct data_dirent tls; /* .tls */ struct data_dirent load_config; /* load configuration structure */ struct data_dirent bound_imports; /* no idea */ struct data_dirent import_addrs; /* import address table */ struct data_dirent delay_imports; /* delay-load import table */ struct data_dirent clr_runtime_hdr; /* .cor (object only) */ struct data_dirent reserved; }; struct section_header { char name[8]; /* name or "/12\0" string tbl offset */ uint32_t virtual_size; /* size of loaded section in ram */ uint32_t virtual_address; /* relative virtual address */ uint32_t raw_data_size; /* size of the section */ uint32_t data_addr; /* file pointer to first page of sec */ uint32_t relocs; /* file pointer to relocation entries */ uint32_t line_numbers; /* line numbers! */ uint16_t num_relocs; /* number of relocations */ uint16_t num_lin_numbers; /* srsly. */ uint32_t flags; }; enum x64_coff_reloc_type { IMAGE_REL_AMD64_ABSOLUTE = 0, IMAGE_REL_AMD64_ADDR64, IMAGE_REL_AMD64_ADDR32, IMAGE_REL_AMD64_ADDR32N, IMAGE_REL_AMD64_REL32, IMAGE_REL_AMD64_REL32_1, IMAGE_REL_AMD64_REL32_2, IMAGE_REL_AMD64_REL32_3, IMAGE_REL_AMD64_REL32_4, IMAGE_REL_AMD64_REL32_5, IMAGE_REL_AMD64_SECTION, IMAGE_REL_AMD64_SECREL, IMAGE_REL_AMD64_SECREL7, IMAGE_REL_AMD64_TOKEN, IMAGE_REL_AMD64_SREL32, IMAGE_REL_AMD64_PAIR, IMAGE_REL_AMD64_SSPAN32, }; enum arm_coff_reloc_type { IMAGE_REL_ARM_ABSOLUTE, IMAGE_REL_ARM_ADDR32, IMAGE_REL_ARM_ADDR32N, IMAGE_REL_ARM_BRANCH2, IMAGE_REL_ARM_BRANCH1, IMAGE_REL_ARM_SECTION, IMAGE_REL_ARM_SECREL, }; enum sh_coff_reloc_type { IMAGE_REL_SH3_ABSOLUTE, IMAGE_REL_SH3_DIRECT16, IMAGE_REL_SH3_DIRECT32, IMAGE_REL_SH3_DIRECT8, IMAGE_REL_SH3_DIRECT8_WORD, IMAGE_REL_SH3_DIRECT8_LONG, IMAGE_REL_SH3_DIRECT4, IMAGE_REL_SH3_DIRECT4_WORD, IMAGE_REL_SH3_DIRECT4_LONG, IMAGE_REL_SH3_PCREL8_WORD, IMAGE_REL_SH3_PCREL8_LONG, IMAGE_REL_SH3_PCREL12_WORD, IMAGE_REL_SH3_STARTOF_SECTION, IMAGE_REL_SH3_SIZEOF_SECTION, IMAGE_REL_SH3_SECTION, IMAGE_REL_SH3_SECREL, IMAGE_REL_SH3_DIRECT32_NB, IMAGE_REL_SH3_GPREL4_LONG, IMAGE_REL_SH3_TOKEN, IMAGE_REL_SHM_PCRELPT, IMAGE_REL_SHM_REFLO, IMAGE_REL_SHM_REFHALF, IMAGE_REL_SHM_RELLO, IMAGE_REL_SHM_RELHALF, IMAGE_REL_SHM_PAIR, IMAGE_REL_SHM_NOMODE, }; enum ppc_coff_reloc_type { IMAGE_REL_PPC_ABSOLUTE, IMAGE_REL_PPC_ADDR64, IMAGE_REL_PPC_ADDR32, IMAGE_REL_PPC_ADDR24, IMAGE_REL_PPC_ADDR16, IMAGE_REL_PPC_ADDR14, IMAGE_REL_PPC_REL24, IMAGE_REL_PPC_REL14, IMAGE_REL_PPC_ADDR32N, IMAGE_REL_PPC_SECREL, IMAGE_REL_PPC_SECTION, IMAGE_REL_PPC_SECREL16, IMAGE_REL_PPC_REFHI, IMAGE_REL_PPC_REFLO, IMAGE_REL_PPC_PAIR, IMAGE_REL_PPC_SECRELLO, IMAGE_REL_PPC_GPREL, IMAGE_REL_PPC_TOKEN, }; enum x86_coff_reloc_type { IMAGE_REL_I386_ABSOLUTE, IMAGE_REL_I386_DIR16, IMAGE_REL_I386_REL16, IMAGE_REL_I386_DIR32, IMAGE_REL_I386_DIR32NB, IMAGE_REL_I386_SEG12, IMAGE_REL_I386_SECTION, IMAGE_REL_I386_SECREL, IMAGE_REL_I386_TOKEN, IMAGE_REL_I386_SECREL7, IMAGE_REL_I386_REL32, }; enum ia64_coff_reloc_type { IMAGE_REL_IA64_ABSOLUTE, IMAGE_REL_IA64_IMM14, IMAGE_REL_IA64_IMM22, IMAGE_REL_IA64_IMM64, IMAGE_REL_IA64_DIR32, IMAGE_REL_IA64_DIR64, IMAGE_REL_IA64_PCREL21B, IMAGE_REL_IA64_PCREL21M, IMAGE_REL_IA64_PCREL21F, IMAGE_REL_IA64_GPREL22, IMAGE_REL_IA64_LTOFF22, IMAGE_REL_IA64_SECTION, IMAGE_REL_IA64_SECREL22, IMAGE_REL_IA64_SECREL64I, IMAGE_REL_IA64_SECREL32, IMAGE_REL_IA64_DIR32NB, IMAGE_REL_IA64_SREL14, IMAGE_REL_IA64_SREL22, IMAGE_REL_IA64_SREL32, IMAGE_REL_IA64_UREL32, IMAGE_REL_IA64_PCREL60X, IMAGE_REL_IA64_PCREL60B, IMAGE_REL_IA64_PCREL60F, IMAGE_REL_IA64_PCREL60I, IMAGE_REL_IA64_PCREL60M, IMAGE_REL_IA64_IMMGPREL6, IMAGE_REL_IA64_TOKEN, IMAGE_REL_IA64_GPREL32, IMAGE_REL_IA64_ADDEND, }; struct coff_reloc { uint32_t virtual_address; uint32_t symbol_table_index; union { enum x64_coff_reloc_type x64_type; enum arm_coff_reloc_type arm_type; enum sh_coff_reloc_type sh_type; enum ppc_coff_reloc_type ppc_type; enum x86_coff_reloc_type x86_type; enum ia64_coff_reloc_type ia64_type; uint16_t data; }; }; /* * Definitions for the contents of the certs data block */ #define WIN_CERT_TYPE_PKCS_SIGNED_DATA 0x0002 #define WIN_CERT_TYPE_EFI_OKCS115 0x0EF0 #define WIN_CERT_TYPE_EFI_GUID 0x0EF1 #define WIN_CERT_REVISION_1_0 0x0100 #define WIN_CERT_REVISION_2_0 0x0200 struct win_certificate { uint32_t length; uint16_t revision; uint16_t cert_type; }; #endif /* !__ASSEMBLY__ */ #endif /* __LINUX_PE_H */ crash_core.h 0000644 00000006100 14722070374 0007030 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_CRASH_CORE_H #define LINUX_CRASH_CORE_H #include <linux/linkage.h> #include <linux/elfcore.h> #include <linux/elf.h> #define CRASH_CORE_NOTE_NAME "CORE" #define CRASH_CORE_NOTE_HEAD_BYTES ALIGN(sizeof(struct elf_note), 4) #define CRASH_CORE_NOTE_NAME_BYTES ALIGN(sizeof(CRASH_CORE_NOTE_NAME), 4) #define CRASH_CORE_NOTE_DESC_BYTES ALIGN(sizeof(struct elf_prstatus), 4) /* * The per-cpu notes area is a list of notes terminated by a "NULL" * note header. For kdump, the code in vmcore.c runs in the context * of the second kernel to combine them into one note. */ #define CRASH_CORE_NOTE_BYTES ((CRASH_CORE_NOTE_HEAD_BYTES * 2) + \ CRASH_CORE_NOTE_NAME_BYTES + \ CRASH_CORE_NOTE_DESC_BYTES) #define VMCOREINFO_BYTES PAGE_SIZE #define VMCOREINFO_NOTE_NAME "VMCOREINFO" #define VMCOREINFO_NOTE_NAME_BYTES ALIGN(sizeof(VMCOREINFO_NOTE_NAME), 4) #define VMCOREINFO_NOTE_SIZE ((CRASH_CORE_NOTE_HEAD_BYTES * 2) + \ VMCOREINFO_NOTE_NAME_BYTES + \ VMCOREINFO_BYTES) typedef u32 note_buf_t[CRASH_CORE_NOTE_BYTES/4]; void crash_update_vmcoreinfo_safecopy(void *ptr); void crash_save_vmcoreinfo(void); void arch_crash_save_vmcoreinfo(void); __printf(1, 2) void vmcoreinfo_append_str(const char *fmt, ...); phys_addr_t paddr_vmcoreinfo_note(void); #define VMCOREINFO_OSRELEASE(value) \ vmcoreinfo_append_str("OSRELEASE=%s\n", value) #define VMCOREINFO_PAGESIZE(value) \ vmcoreinfo_append_str("PAGESIZE=%ld\n", value) #define VMCOREINFO_SYMBOL(name) \ vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)&name) #define VMCOREINFO_SYMBOL_ARRAY(name) \ vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)name) #define VMCOREINFO_SIZE(name) \ vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \ (unsigned long)sizeof(name)) #define VMCOREINFO_STRUCT_SIZE(name) \ vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \ (unsigned long)sizeof(struct name)) #define VMCOREINFO_OFFSET(name, field) \ vmcoreinfo_append_str("OFFSET(%s.%s)=%lu\n", #name, #field, \ (unsigned long)offsetof(struct name, field)) #define VMCOREINFO_LENGTH(name, value) \ vmcoreinfo_append_str("LENGTH(%s)=%lu\n", #name, (unsigned long)value) #define VMCOREINFO_NUMBER(name) \ vmcoreinfo_append_str("NUMBER(%s)=%ld\n", #name, (long)name) #define VMCOREINFO_CONFIG(name) \ vmcoreinfo_append_str("CONFIG_%s=y\n", #name) extern unsigned char *vmcoreinfo_data; extern size_t vmcoreinfo_size; extern u32 *vmcoreinfo_note; Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type, void *data, size_t data_len); void final_note(Elf_Word *buf); int __init parse_crashkernel(char *cmdline, unsigned long long system_ram, unsigned long long *crash_size, unsigned long long *crash_base); int parse_crashkernel_high(char *cmdline, unsigned long long system_ram, unsigned long long *crash_size, unsigned long long *crash_base); int parse_crashkernel_low(char *cmdline, unsigned long long system_ram, unsigned long long *crash_size, unsigned long long *crash_base); #endif /* LINUX_CRASH_CORE_H */ spinlock_types.h 0000644 00000003762 14722070374 0010001 0 ustar 00 #ifndef __LINUX_SPINLOCK_TYPES_H #define __LINUX_SPINLOCK_TYPES_H /* * include/linux/spinlock_types.h - generic spinlock type definitions * and initializers * * portions Copyright 2005, Red Hat, Inc., Ingo Molnar * Released under the General Public License (GPL). */ #if defined(CONFIG_SMP) # include <asm/spinlock_types.h> #else # include <linux/spinlock_types_up.h> #endif #include <linux/lockdep.h> typedef struct raw_spinlock { arch_spinlock_t raw_lock; #ifdef CONFIG_DEBUG_SPINLOCK unsigned int magic, owner_cpu; void *owner; #endif #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif } raw_spinlock_t; #define SPINLOCK_MAGIC 0xdead4ead #define SPINLOCK_OWNER_INIT ((void *)-1L) #ifdef CONFIG_DEBUG_LOCK_ALLOC # define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } #else # define SPIN_DEP_MAP_INIT(lockname) #endif #ifdef CONFIG_DEBUG_SPINLOCK # define SPIN_DEBUG_INIT(lockname) \ .magic = SPINLOCK_MAGIC, \ .owner_cpu = -1, \ .owner = SPINLOCK_OWNER_INIT, #else # define SPIN_DEBUG_INIT(lockname) #endif #define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ { \ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ SPIN_DEBUG_INIT(lockname) \ SPIN_DEP_MAP_INIT(lockname) } #define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) #define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) typedef struct spinlock { union { struct raw_spinlock rlock; #ifdef CONFIG_DEBUG_LOCK_ALLOC # define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) struct { u8 __padding[LOCK_PADSIZE]; struct lockdep_map dep_map; }; #endif }; } spinlock_t; #define __SPIN_LOCK_INITIALIZER(lockname) \ { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } #define __SPIN_LOCK_UNLOCKED(lockname) \ (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) #define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) #include <linux/rwlock_types.h> #endif /* __LINUX_SPINLOCK_TYPES_H */ hyperv.h 0000644 00000124677 14722070374 0006261 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * * Copyright (c) 2011, Microsoft Corporation. * * Authors: * Haiyang Zhang <haiyangz@microsoft.com> * Hank Janssen <hjanssen@microsoft.com> * K. Y. Srinivasan <kys@microsoft.com> */ #ifndef _HYPERV_H #define _HYPERV_H #include <uapi/linux/hyperv.h> #include <linux/types.h> #include <linux/scatterlist.h> #include <linux/list.h> #include <linux/timer.h> #include <linux/completion.h> #include <linux/device.h> #include <linux/mod_devicetable.h> #include <linux/interrupt.h> #include <linux/reciprocal_div.h> #define MAX_PAGE_BUFFER_COUNT 32 #define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */ #pragma pack(push, 1) /* Single-page buffer */ struct hv_page_buffer { u32 len; u32 offset; u64 pfn; }; /* Multiple-page buffer */ struct hv_multipage_buffer { /* Length and Offset determines the # of pfns in the array */ u32 len; u32 offset; u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT]; }; /* * Multiple-page buffer array; the pfn array is variable size: * The number of entries in the PFN array is determined by * "len" and "offset". */ struct hv_mpb_array { /* Length and Offset determines the # of pfns in the array */ u32 len; u32 offset; u64 pfn_array[]; }; /* 0x18 includes the proprietary packet header */ #define MAX_PAGE_BUFFER_PACKET (0x18 + \ (sizeof(struct hv_page_buffer) * \ MAX_PAGE_BUFFER_COUNT)) #define MAX_MULTIPAGE_BUFFER_PACKET (0x18 + \ sizeof(struct hv_multipage_buffer)) #pragma pack(pop) struct hv_ring_buffer { /* Offset in bytes from the start of ring data below */ u32 write_index; /* Offset in bytes from the start of ring data below */ u32 read_index; u32 interrupt_mask; /* * WS2012/Win8 and later versions of Hyper-V implement interrupt * driven flow management. The feature bit feat_pending_send_sz * is set by the host on the host->guest ring buffer, and by the * guest on the guest->host ring buffer. * * The meaning of the feature bit is a bit complex in that it has * semantics that apply to both ring buffers. If the guest sets * the feature bit in the guest->host ring buffer, the guest is * telling the host that: * 1) It will set the pending_send_sz field in the guest->host ring * buffer when it is waiting for space to become available, and * 2) It will read the pending_send_sz field in the host->guest * ring buffer and interrupt the host when it frees enough space * * Similarly, if the host sets the feature bit in the host->guest * ring buffer, the host is telling the guest that: * 1) It will set the pending_send_sz field in the host->guest ring * buffer when it is waiting for space to become available, and * 2) It will read the pending_send_sz field in the guest->host * ring buffer and interrupt the guest when it frees enough space * * If either the guest or host does not set the feature bit that it * owns, that guest or host must do polling if it encounters a full * ring buffer, and not signal the other end with an interrupt. */ u32 pending_send_sz; u32 reserved1[12]; union { struct { u32 feat_pending_send_sz:1; }; u32 value; } feature_bits; /* Pad it to PAGE_SIZE so that data starts on page boundary */ u8 reserved2[4028]; /* * Ring data starts here + RingDataStartOffset * !!! DO NOT place any fields below this !!! */ u8 buffer[0]; } __packed; struct hv_ring_buffer_info { struct hv_ring_buffer *ring_buffer; u32 ring_size; /* Include the shared header */ struct reciprocal_value ring_size_div10_reciprocal; spinlock_t ring_lock; u32 ring_datasize; /* < ring_size */ u32 priv_read_index; /* * The ring buffer mutex lock. This lock prevents the ring buffer from * being freed while the ring buffer is being accessed. */ struct mutex ring_buffer_mutex; }; static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi) { u32 read_loc, write_loc, dsize, read; dsize = rbi->ring_datasize; read_loc = rbi->ring_buffer->read_index; write_loc = READ_ONCE(rbi->ring_buffer->write_index); read = write_loc >= read_loc ? (write_loc - read_loc) : (dsize - read_loc) + write_loc; return read; } static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi) { u32 read_loc, write_loc, dsize, write; dsize = rbi->ring_datasize; read_loc = READ_ONCE(rbi->ring_buffer->read_index); write_loc = rbi->ring_buffer->write_index; write = write_loc >= read_loc ? dsize - (write_loc - read_loc) : read_loc - write_loc; return write; } static inline u32 hv_get_avail_to_write_percent( const struct hv_ring_buffer_info *rbi) { u32 avail_write = hv_get_bytes_to_write(rbi); return reciprocal_divide( (avail_write << 3) + (avail_write << 1), rbi->ring_size_div10_reciprocal); } /* * VMBUS version is 32 bit entity broken up into * two 16 bit quantities: major_number. minor_number. * * 0 . 13 (Windows Server 2008) * 1 . 1 (Windows 7) * 2 . 4 (Windows 8) * 3 . 0 (Windows 8 R2) * 4 . 0 (Windows 10) * 5 . 0 (Newer Windows 10) */ #define VERSION_WS2008 ((0 << 16) | (13)) #define VERSION_WIN7 ((1 << 16) | (1)) #define VERSION_WIN8 ((2 << 16) | (4)) #define VERSION_WIN8_1 ((3 << 16) | (0)) #define VERSION_WIN10 ((4 << 16) | (0)) #define VERSION_WIN10_V5 ((5 << 16) | (0)) #define VERSION_INVAL -1 #define VERSION_CURRENT VERSION_WIN10_V5 /* Make maximum size of pipe payload of 16K */ #define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384) /* Define PipeMode values. */ #define VMBUS_PIPE_TYPE_BYTE 0x00000000 #define VMBUS_PIPE_TYPE_MESSAGE 0x00000004 /* The size of the user defined data buffer for non-pipe offers. */ #define MAX_USER_DEFINED_BYTES 120 /* The size of the user defined data buffer for pipe offers. */ #define MAX_PIPE_USER_DEFINED_BYTES 116 /* * At the center of the Channel Management library is the Channel Offer. This * struct contains the fundamental information about an offer. */ struct vmbus_channel_offer { guid_t if_type; guid_t if_instance; /* * These two fields are not currently used. */ u64 reserved1; u64 reserved2; u16 chn_flags; u16 mmio_megabytes; /* in bytes * 1024 * 1024 */ union { /* Non-pipes: The user has MAX_USER_DEFINED_BYTES bytes. */ struct { unsigned char user_def[MAX_USER_DEFINED_BYTES]; } std; /* * Pipes: * The following sructure is an integrated pipe protocol, which * is implemented on top of standard user-defined data. Pipe * clients have MAX_PIPE_USER_DEFINED_BYTES left for their own * use. */ struct { u32 pipe_mode; unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES]; } pipe; } u; /* * The sub_channel_index is defined in Win8: a value of zero means a * primary channel and a value of non-zero means a sub-channel. * * Before Win8, the field is reserved, meaning it's always zero. */ u16 sub_channel_index; u16 reserved3; } __packed; /* Server Flags */ #define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 1 #define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES 2 #define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS 4 #define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x10 #define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100 #define VMBUS_CHANNEL_PARENT_OFFER 0x200 #define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400 #define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 0x2000 struct vmpacket_descriptor { u16 type; u16 offset8; u16 len8; u16 flags; u64 trans_id; } __packed; struct vmpacket_header { u32 prev_pkt_start_offset; struct vmpacket_descriptor descriptor; } __packed; struct vmtransfer_page_range { u32 byte_count; u32 byte_offset; } __packed; struct vmtransfer_page_packet_header { struct vmpacket_descriptor d; u16 xfer_pageset_id; u8 sender_owns_set; u8 reserved; u32 range_cnt; struct vmtransfer_page_range ranges[]; } __packed; struct vmgpadl_packet_header { struct vmpacket_descriptor d; u32 gpadl; u32 reserved; } __packed; struct vmadd_remove_transfer_page_set { struct vmpacket_descriptor d; u32 gpadl; u16 xfer_pageset_id; u16 reserved; } __packed; /* * This structure defines a range in guest physical space that can be made to * look virtually contiguous. */ struct gpa_range { u32 byte_count; u32 byte_offset; u64 pfn_array[0]; }; /* * This is the format for an Establish Gpadl packet, which contains a handle by * which this GPADL will be known and a set of GPA ranges associated with it. * This can be converted to a MDL by the guest OS. If there are multiple GPA * ranges, then the resulting MDL will be "chained," representing multiple VA * ranges. */ struct vmestablish_gpadl { struct vmpacket_descriptor d; u32 gpadl; u32 range_cnt; struct gpa_range range[1]; } __packed; /* * This is the format for a Teardown Gpadl packet, which indicates that the * GPADL handle in the Establish Gpadl packet will never be referenced again. */ struct vmteardown_gpadl { struct vmpacket_descriptor d; u32 gpadl; u32 reserved; /* for alignment to a 8-byte boundary */ } __packed; /* * This is the format for a GPA-Direct packet, which contains a set of GPA * ranges, in addition to commands and/or data. */ struct vmdata_gpa_direct { struct vmpacket_descriptor d; u32 reserved; u32 range_cnt; struct gpa_range range[1]; } __packed; /* This is the format for a Additional Data Packet. */ struct vmadditional_data { struct vmpacket_descriptor d; u64 total_bytes; u32 offset; u32 byte_cnt; unsigned char data[1]; } __packed; union vmpacket_largest_possible_header { struct vmpacket_descriptor simple_hdr; struct vmtransfer_page_packet_header xfer_page_hdr; struct vmgpadl_packet_header gpadl_hdr; struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr; struct vmestablish_gpadl establish_gpadl_hdr; struct vmteardown_gpadl teardown_gpadl_hdr; struct vmdata_gpa_direct data_gpa_direct_hdr; }; #define VMPACKET_DATA_START_ADDRESS(__packet) \ (void *)(((unsigned char *)__packet) + \ ((struct vmpacket_descriptor)__packet)->offset8 * 8) #define VMPACKET_DATA_LENGTH(__packet) \ ((((struct vmpacket_descriptor)__packet)->len8 - \ ((struct vmpacket_descriptor)__packet)->offset8) * 8) #define VMPACKET_TRANSFER_MODE(__packet) \ (((struct IMPACT)__packet)->type) enum vmbus_packet_type { VM_PKT_INVALID = 0x0, VM_PKT_SYNCH = 0x1, VM_PKT_ADD_XFER_PAGESET = 0x2, VM_PKT_RM_XFER_PAGESET = 0x3, VM_PKT_ESTABLISH_GPADL = 0x4, VM_PKT_TEARDOWN_GPADL = 0x5, VM_PKT_DATA_INBAND = 0x6, VM_PKT_DATA_USING_XFER_PAGES = 0x7, VM_PKT_DATA_USING_GPADL = 0x8, VM_PKT_DATA_USING_GPA_DIRECT = 0x9, VM_PKT_CANCEL_REQUEST = 0xa, VM_PKT_COMP = 0xb, VM_PKT_DATA_USING_ADDITIONAL_PKT = 0xc, VM_PKT_ADDITIONAL_DATA = 0xd }; #define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED 1 /* Version 1 messages */ enum vmbus_channel_message_type { CHANNELMSG_INVALID = 0, CHANNELMSG_OFFERCHANNEL = 1, CHANNELMSG_RESCIND_CHANNELOFFER = 2, CHANNELMSG_REQUESTOFFERS = 3, CHANNELMSG_ALLOFFERS_DELIVERED = 4, CHANNELMSG_OPENCHANNEL = 5, CHANNELMSG_OPENCHANNEL_RESULT = 6, CHANNELMSG_CLOSECHANNEL = 7, CHANNELMSG_GPADL_HEADER = 8, CHANNELMSG_GPADL_BODY = 9, CHANNELMSG_GPADL_CREATED = 10, CHANNELMSG_GPADL_TEARDOWN = 11, CHANNELMSG_GPADL_TORNDOWN = 12, CHANNELMSG_RELID_RELEASED = 13, CHANNELMSG_INITIATE_CONTACT = 14, CHANNELMSG_VERSION_RESPONSE = 15, CHANNELMSG_UNLOAD = 16, CHANNELMSG_UNLOAD_RESPONSE = 17, CHANNELMSG_18 = 18, CHANNELMSG_19 = 19, CHANNELMSG_20 = 20, CHANNELMSG_TL_CONNECT_REQUEST = 21, CHANNELMSG_22 = 22, CHANNELMSG_TL_CONNECT_RESULT = 23, CHANNELMSG_COUNT }; /* Hyper-V supports about 2048 channels, and the RELIDs start with 1. */ #define INVALID_RELID U32_MAX struct vmbus_channel_message_header { enum vmbus_channel_message_type msgtype; u32 padding; } __packed; /* Query VMBus Version parameters */ struct vmbus_channel_query_vmbus_version { struct vmbus_channel_message_header header; u32 version; } __packed; /* VMBus Version Supported parameters */ struct vmbus_channel_version_supported { struct vmbus_channel_message_header header; u8 version_supported; } __packed; /* Offer Channel parameters */ struct vmbus_channel_offer_channel { struct vmbus_channel_message_header header; struct vmbus_channel_offer offer; u32 child_relid; u8 monitorid; /* * win7 and beyond splits this field into a bit field. */ u8 monitor_allocated:1; u8 reserved:7; /* * These are new fields added in win7 and later. * Do not access these fields without checking the * negotiated protocol. * * If "is_dedicated_interrupt" is set, we must not set the * associated bit in the channel bitmap while sending the * interrupt to the host. * * connection_id is to be used in signaling the host. */ u16 is_dedicated_interrupt:1; u16 reserved1:15; u32 connection_id; } __packed; /* Rescind Offer parameters */ struct vmbus_channel_rescind_offer { struct vmbus_channel_message_header header; u32 child_relid; } __packed; static inline u32 hv_ringbuffer_pending_size(const struct hv_ring_buffer_info *rbi) { return rbi->ring_buffer->pending_send_sz; } /* * Request Offer -- no parameters, SynIC message contains the partition ID * Set Snoop -- no parameters, SynIC message contains the partition ID * Clear Snoop -- no parameters, SynIC message contains the partition ID * All Offers Delivered -- no parameters, SynIC message contains the partition * ID * Flush Client -- no parameters, SynIC message contains the partition ID */ /* Open Channel parameters */ struct vmbus_channel_open_channel { struct vmbus_channel_message_header header; /* Identifies the specific VMBus channel that is being opened. */ u32 child_relid; /* ID making a particular open request at a channel offer unique. */ u32 openid; /* GPADL for the channel's ring buffer. */ u32 ringbuffer_gpadlhandle; /* * Starting with win8, this field will be used to specify * the target virtual processor on which to deliver the interrupt for * the host to guest communication. * Prior to win8, incoming channel interrupts would only * be delivered on cpu 0. Setting this value to 0 would * preserve the earlier behavior. */ u32 target_vp; /* * The upstream ring buffer begins at offset zero in the memory * described by RingBufferGpadlHandle. The downstream ring buffer * follows it at this offset (in pages). */ u32 downstream_ringbuffer_pageoffset; /* User-specific data to be passed along to the server endpoint. */ unsigned char userdata[MAX_USER_DEFINED_BYTES]; } __packed; /* Open Channel Result parameters */ struct vmbus_channel_open_result { struct vmbus_channel_message_header header; u32 child_relid; u32 openid; u32 status; } __packed; /* Close channel parameters; */ struct vmbus_channel_close_channel { struct vmbus_channel_message_header header; u32 child_relid; } __packed; /* Channel Message GPADL */ #define GPADL_TYPE_RING_BUFFER 1 #define GPADL_TYPE_SERVER_SAVE_AREA 2 #define GPADL_TYPE_TRANSACTION 8 /* * The number of PFNs in a GPADL message is defined by the number of * pages that would be spanned by ByteCount and ByteOffset. If the * implied number of PFNs won't fit in this packet, there will be a * follow-up packet that contains more. */ struct vmbus_channel_gpadl_header { struct vmbus_channel_message_header header; u32 child_relid; u32 gpadl; u16 range_buflen; u16 rangecount; struct gpa_range range[0]; } __packed; /* This is the followup packet that contains more PFNs. */ struct vmbus_channel_gpadl_body { struct vmbus_channel_message_header header; u32 msgnumber; u32 gpadl; u64 pfn[0]; } __packed; struct vmbus_channel_gpadl_created { struct vmbus_channel_message_header header; u32 child_relid; u32 gpadl; u32 creation_status; } __packed; struct vmbus_channel_gpadl_teardown { struct vmbus_channel_message_header header; u32 child_relid; u32 gpadl; } __packed; struct vmbus_channel_gpadl_torndown { struct vmbus_channel_message_header header; u32 gpadl; } __packed; struct vmbus_channel_relid_released { struct vmbus_channel_message_header header; u32 child_relid; } __packed; struct vmbus_channel_initiate_contact { struct vmbus_channel_message_header header; u32 vmbus_version_requested; u32 target_vcpu; /* The VCPU the host should respond to */ union { u64 interrupt_page; struct { u8 msg_sint; u8 padding1[3]; u32 padding2; }; }; u64 monitor_page1; u64 monitor_page2; } __packed; /* Hyper-V socket: guest's connect()-ing to host */ struct vmbus_channel_tl_connect_request { struct vmbus_channel_message_header header; guid_t guest_endpoint_id; guid_t host_service_id; } __packed; struct vmbus_channel_version_response { struct vmbus_channel_message_header header; u8 version_supported; u8 connection_state; u16 padding; /* * On new hosts that support VMBus protocol 5.0, we must use * VMBUS_MESSAGE_CONNECTION_ID_4 for the Initiate Contact Message, * and for subsequent messages, we must use the Message Connection ID * field in the host-returned Version Response Message. * * On old hosts, we should always use VMBUS_MESSAGE_CONNECTION_ID (1). */ u32 msg_conn_id; } __packed; enum vmbus_channel_state { CHANNEL_OFFER_STATE, CHANNEL_OPENING_STATE, CHANNEL_OPEN_STATE, CHANNEL_OPENED_STATE, }; /* * Represents each channel msg on the vmbus connection This is a * variable-size data structure depending on the msg type itself */ struct vmbus_channel_msginfo { /* Bookkeeping stuff */ struct list_head msglistentry; /* So far, this is only used to handle gpadl body message */ struct list_head submsglist; /* Synchronize the request/response if needed */ struct completion waitevent; struct vmbus_channel *waiting_channel; union { struct vmbus_channel_version_supported version_supported; struct vmbus_channel_open_result open_result; struct vmbus_channel_gpadl_torndown gpadl_torndown; struct vmbus_channel_gpadl_created gpadl_created; struct vmbus_channel_version_response version_response; } response; u32 msgsize; /* * The channel message that goes out on the "wire". * It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header */ unsigned char msg[0]; }; struct vmbus_close_msg { struct vmbus_channel_msginfo info; struct vmbus_channel_close_channel msg; }; /* Define connection identifier type. */ union hv_connection_id { u32 asu32; struct { u32 id:24; u32 reserved:8; } u; }; enum hv_numa_policy { HV_BALANCED = 0, HV_LOCALIZED, }; enum vmbus_device_type { HV_IDE = 0, HV_SCSI, HV_FC, HV_NIC, HV_ND, HV_PCIE, HV_FB, HV_KBD, HV_MOUSE, HV_KVP, HV_TS, HV_HB, HV_SHUTDOWN, HV_FCOPY, HV_BACKUP, HV_DM, HV_UNKNOWN, }; struct vmbus_device { u16 dev_type; guid_t guid; bool perf_device; }; struct vmbus_channel { struct list_head listentry; struct hv_device *device_obj; enum vmbus_channel_state state; struct vmbus_channel_offer_channel offermsg; /* * These are based on the OfferMsg.MonitorId. * Save it here for easy access. */ u8 monitor_grp; u8 monitor_bit; bool rescind; /* got rescind msg */ struct completion rescind_event; u32 ringbuffer_gpadlhandle; /* Allocated memory for ring buffer */ struct page *ringbuffer_page; u32 ringbuffer_pagecount; u32 ringbuffer_send_offset; struct hv_ring_buffer_info outbound; /* send to parent */ struct hv_ring_buffer_info inbound; /* receive from parent */ struct vmbus_close_msg close_msg; /* Statistics */ u64 interrupts; /* Host to Guest interrupts */ u64 sig_events; /* Guest to Host events */ /* * Guest to host interrupts caused by the outbound ring buffer changing * from empty to not empty. */ u64 intr_out_empty; /* * Indicates that a full outbound ring buffer was encountered. The flag * is set to true when a full outbound ring buffer is encountered and * set to false when a write to the outbound ring buffer is completed. */ bool out_full_flag; /* Channel callback's invoked in softirq context */ struct tasklet_struct callback_event; void (*onchannel_callback)(void *context); void *channel_callback_context; /* * A channel can be marked for one of three modes of reading: * BATCHED - callback called from taslket and should read * channel until empty. Interrupts from the host * are masked while read is in process (default). * DIRECT - callback called from tasklet (softirq). * ISR - callback called in interrupt context and must * invoke its own deferred processing. * Host interrupts are disabled and must be re-enabled * when ring is empty. */ enum hv_callback_mode { HV_CALL_BATCHED, HV_CALL_DIRECT, HV_CALL_ISR } callback_mode; bool is_dedicated_interrupt; u64 sig_event; /* * Starting with win8, this field will be used to specify * the target virtual processor on which to deliver the interrupt for * the host to guest communication. * Prior to win8, incoming channel interrupts would only * be delivered on cpu 0. Setting this value to 0 would * preserve the earlier behavior. */ u32 target_vp; /* The corresponding CPUID in the guest */ u32 target_cpu; /* * State to manage the CPU affiliation of channels. */ struct cpumask alloced_cpus_in_node; int numa_node; /* * Support for sub-channels. For high performance devices, * it will be useful to have multiple sub-channels to support * a scalable communication infrastructure with the host. * The support for sub-channels is implemented as an extention * to the current infrastructure. * The initial offer is considered the primary channel and this * offer message will indicate if the host supports sub-channels. * The guest is free to ask for sub-channels to be offerred and can * open these sub-channels as a normal "primary" channel. However, * all sub-channels will have the same type and instance guids as the * primary channel. Requests sent on a given channel will result in a * response on the same channel. */ /* * Sub-channel creation callback. This callback will be called in * process context when a sub-channel offer is received from the host. * The guest can open the sub-channel in the context of this callback. */ void (*sc_creation_callback)(struct vmbus_channel *new_sc); /* * Channel rescind callback. Some channels (the hvsock ones), need to * register a callback which is invoked in vmbus_onoffer_rescind(). */ void (*chn_rescind_callback)(struct vmbus_channel *channel); /* * The spinlock to protect the structure. It is being used to protect * test-and-set access to various attributes of the structure as well * as all sc_list operations. */ spinlock_t lock; /* * All Sub-channels of a primary channel are linked here. */ struct list_head sc_list; /* * The primary channel this sub-channel belongs to. * This will be NULL for the primary channel. */ struct vmbus_channel *primary_channel; /* * Support per-channel state for use by vmbus drivers. */ void *per_channel_state; /* * To support per-cpu lookup mapping of relid to channel, * link up channels based on their CPU affinity. */ struct list_head percpu_list; /* * Defer freeing channel until after all cpu's have * gone through grace period. */ struct rcu_head rcu; /* * For sysfs per-channel properties. */ struct kobject kobj; /* * For performance critical channels (storage, networking * etc,), Hyper-V has a mechanism to enhance the throughput * at the expense of latency: * When the host is to be signaled, we just set a bit in a shared page * and this bit will be inspected by the hypervisor within a certain * window and if the bit is set, the host will be signaled. The window * of time is the monitor latency - currently around 100 usecs. This * mechanism improves throughput by: * * A) Making the host more efficient - each time it wakes up, * potentially it will process morev number of packets. The * monitor latency allows a batch to build up. * B) By deferring the hypercall to signal, we will also minimize * the interrupts. * * Clearly, these optimizations improve throughput at the expense of * latency. Furthermore, since the channel is shared for both * control and data messages, control messages currently suffer * unnecessary latency adversley impacting performance and boot * time. To fix this issue, permit tagging the channel as being * in "low latency" mode. In this mode, we will bypass the monitor * mechanism. */ bool low_latency; /* * NUMA distribution policy: * We support two policies: * 1) Balanced: Here all performance critical channels are * distributed evenly amongst all the NUMA nodes. * This policy will be the default policy. * 2) Localized: All channels of a given instance of a * performance critical service will be assigned CPUs * within a selected NUMA node. */ enum hv_numa_policy affinity_policy; bool probe_done; /* * We must offload the handling of the primary/sub channels * from the single-threaded vmbus_connection.work_queue to * two different workqueue, otherwise we can block * vmbus_connection.work_queue and hang: see vmbus_process_offer(). */ struct work_struct add_channel_work; /* * Guest to host interrupts caused by the inbound ring buffer changing * from full to not full while a packet is waiting. */ u64 intr_in_full; /* * The total number of write operations that encountered a full * outbound ring buffer. */ u64 out_full_total; /* * The number of write operations that were the first to encounter a * full outbound ring buffer. */ u64 out_full_first; }; static inline bool is_hvsock_channel(const struct vmbus_channel *c) { return !!(c->offermsg.offer.chn_flags & VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER); } static inline bool is_sub_channel(const struct vmbus_channel *c) { return c->offermsg.offer.sub_channel_index != 0; } static inline void set_channel_affinity_state(struct vmbus_channel *c, enum hv_numa_policy policy) { c->affinity_policy = policy; } static inline void set_channel_read_mode(struct vmbus_channel *c, enum hv_callback_mode mode) { c->callback_mode = mode; } static inline void set_per_channel_state(struct vmbus_channel *c, void *s) { c->per_channel_state = s; } static inline void *get_per_channel_state(struct vmbus_channel *c) { return c->per_channel_state; } static inline void set_channel_pending_send_size(struct vmbus_channel *c, u32 size) { unsigned long flags; if (size) { spin_lock_irqsave(&c->outbound.ring_lock, flags); ++c->out_full_total; if (!c->out_full_flag) { ++c->out_full_first; c->out_full_flag = true; } spin_unlock_irqrestore(&c->outbound.ring_lock, flags); } else { c->out_full_flag = false; } c->outbound.ring_buffer->pending_send_sz = size; } static inline void set_low_latency_mode(struct vmbus_channel *c) { c->low_latency = true; } static inline void clear_low_latency_mode(struct vmbus_channel *c) { c->low_latency = false; } void vmbus_onmessage(void *context); int vmbus_request_offers(void); /* * APIs for managing sub-channels. */ void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel, void (*sc_cr_cb)(struct vmbus_channel *new_sc)); void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel, void (*chn_rescind_cb)(struct vmbus_channel *)); /* * Check if sub-channels have already been offerred. This API will be useful * when the driver is unloaded after establishing sub-channels. In this case, * when the driver is re-loaded, the driver would have to check if the * subchannels have already been established before attempting to request * the creation of sub-channels. * This function returns TRUE to indicate that subchannels have already been * created. * This function should be invoked after setting the callback function for * sub-channel creation. */ bool vmbus_are_subchannels_present(struct vmbus_channel *primary); /* The format must be the same as struct vmdata_gpa_direct */ struct vmbus_channel_packet_page_buffer { u16 type; u16 dataoffset8; u16 length8; u16 flags; u64 transactionid; u32 reserved; u32 rangecount; struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT]; } __packed; /* The format must be the same as struct vmdata_gpa_direct */ struct vmbus_channel_packet_multipage_buffer { u16 type; u16 dataoffset8; u16 length8; u16 flags; u64 transactionid; u32 reserved; u32 rangecount; /* Always 1 in this case */ struct hv_multipage_buffer range; } __packed; /* The format must be the same as struct vmdata_gpa_direct */ struct vmbus_packet_mpb_array { u16 type; u16 dataoffset8; u16 length8; u16 flags; u64 transactionid; u32 reserved; u32 rangecount; /* Always 1 in this case */ struct hv_mpb_array range; } __packed; int vmbus_alloc_ring(struct vmbus_channel *channel, u32 send_size, u32 recv_size); void vmbus_free_ring(struct vmbus_channel *channel); int vmbus_connect_ring(struct vmbus_channel *channel, void (*onchannel_callback)(void *context), void *context); int vmbus_disconnect_ring(struct vmbus_channel *channel); extern int vmbus_open(struct vmbus_channel *channel, u32 send_ringbuffersize, u32 recv_ringbuffersize, void *userdata, u32 userdatalen, void (*onchannel_callback)(void *context), void *context); extern void vmbus_close(struct vmbus_channel *channel); extern int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer, u32 bufferLen, u64 requestid, enum vmbus_packet_type type, u32 flags); extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel, struct hv_page_buffer pagebuffers[], u32 pagecount, void *buffer, u32 bufferlen, u64 requestid); extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, struct vmbus_packet_mpb_array *mpb, u32 desc_size, void *buffer, u32 bufferlen, u64 requestid); extern int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer, u32 size, u32 *gpadl_handle); extern int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle); void vmbus_reset_channel_cb(struct vmbus_channel *channel); extern int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer, u32 bufferlen, u32 *buffer_actual_len, u64 *requestid); extern int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer, u32 bufferlen, u32 *buffer_actual_len, u64 *requestid); extern void vmbus_ontimer(unsigned long data); /* Base driver object */ struct hv_driver { const char *name; /* * A hvsock offer, which has a VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER * channel flag, actually doesn't mean a synthetic device because the * offer's if_type/if_instance can change for every new hvsock * connection. * * However, to facilitate the notification of new-offer/rescind-offer * from vmbus driver to hvsock driver, we can handle hvsock offer as * a special vmbus device, and hence we need the below flag to * indicate if the driver is the hvsock driver or not: we need to * specially treat the hvosck offer & driver in vmbus_match(). */ bool hvsock; /* the device type supported by this driver */ guid_t dev_type; const struct hv_vmbus_device_id *id_table; struct device_driver driver; /* dynamic device GUID's */ struct { spinlock_t lock; struct list_head list; } dynids; int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *); int (*remove)(struct hv_device *); void (*shutdown)(struct hv_device *); int (*suspend)(struct hv_device *); int (*resume)(struct hv_device *); }; /* Base device object */ struct hv_device { /* the device type id of this device */ guid_t dev_type; /* the device instance id of this device */ guid_t dev_instance; u16 vendor_id; u16 device_id; struct device device; char *driver_override; /* Driver name to force a match */ struct vmbus_channel *channel; struct kset *channels_kset; }; static inline struct hv_device *device_to_hv_device(struct device *d) { return container_of(d, struct hv_device, device); } static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d) { return container_of(d, struct hv_driver, driver); } static inline void hv_set_drvdata(struct hv_device *dev, void *data) { dev_set_drvdata(&dev->device, data); } static inline void *hv_get_drvdata(struct hv_device *dev) { return dev_get_drvdata(&dev->device); } struct hv_ring_buffer_debug_info { u32 current_interrupt_mask; u32 current_read_index; u32 current_write_index; u32 bytes_avail_toread; u32 bytes_avail_towrite; }; int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info, struct hv_ring_buffer_debug_info *debug_info); /* Vmbus interface */ #define vmbus_driver_register(driver) \ __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME) int __must_check __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, const char *mod_name); void vmbus_driver_unregister(struct hv_driver *hv_driver); void vmbus_hvsock_device_unregister(struct vmbus_channel *channel); int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, resource_size_t min, resource_size_t max, resource_size_t size, resource_size_t align, bool fb_overlap_ok); void vmbus_free_mmio(resource_size_t start, resource_size_t size); /* * GUID definitions of various offer types - services offered to the guest. */ /* * Network GUID * {f8615163-df3e-46c5-913f-f2d2f965ed0e} */ #define HV_NIC_GUID \ .guid = GUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \ 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e) /* * IDE GUID * {32412632-86cb-44a2-9b5c-50d1417354f5} */ #define HV_IDE_GUID \ .guid = GUID_INIT(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \ 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5) /* * SCSI GUID * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} */ #define HV_SCSI_GUID \ .guid = GUID_INIT(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \ 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f) /* * Shutdown GUID * {0e0b6031-5213-4934-818b-38d90ced39db} */ #define HV_SHUTDOWN_GUID \ .guid = GUID_INIT(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \ 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb) /* * Time Synch GUID * {9527E630-D0AE-497b-ADCE-E80AB0175CAF} */ #define HV_TS_GUID \ .guid = GUID_INIT(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \ 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf) /* * Heartbeat GUID * {57164f39-9115-4e78-ab55-382f3bd5422d} */ #define HV_HEART_BEAT_GUID \ .guid = GUID_INIT(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \ 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d) /* * KVP GUID * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6} */ #define HV_KVP_GUID \ .guid = GUID_INIT(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \ 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6) /* * Dynamic memory GUID * {525074dc-8985-46e2-8057-a307dc18a502} */ #define HV_DM_GUID \ .guid = GUID_INIT(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \ 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02) /* * Mouse GUID * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a} */ #define HV_MOUSE_GUID \ .guid = GUID_INIT(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \ 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a) /* * Keyboard GUID * {f912ad6d-2b17-48ea-bd65-f927a61c7684} */ #define HV_KBD_GUID \ .guid = GUID_INIT(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \ 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84) /* * VSS (Backup/Restore) GUID */ #define HV_VSS_GUID \ .guid = GUID_INIT(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \ 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40) /* * Synthetic Video GUID * {DA0A7802-E377-4aac-8E77-0558EB1073F8} */ #define HV_SYNTHVID_GUID \ .guid = GUID_INIT(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \ 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8) /* * Synthetic FC GUID * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda} */ #define HV_SYNTHFC_GUID \ .guid = GUID_INIT(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \ 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda) /* * Guest File Copy Service * {34D14BE3-DEE4-41c8-9AE7-6B174977C192} */ #define HV_FCOPY_GUID \ .guid = GUID_INIT(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \ 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92) /* * NetworkDirect. This is the guest RDMA service. * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501} */ #define HV_ND_GUID \ .guid = GUID_INIT(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \ 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01) /* * PCI Express Pass Through * {44C4F61D-4444-4400-9D52-802E27EDE19F} */ #define HV_PCIE_GUID \ .guid = GUID_INIT(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \ 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f) /* * Linux doesn't support the 3 devices: the first two are for * Automatic Virtual Machine Activation, and the third is for * Remote Desktop Virtualization. * {f8e65716-3cb3-4a06-9a60-1889c5cccab5} * {3375baf4-9e15-4b30-b765-67acb10d607b} * {276aacf4-ac15-426c-98dd-7521ad3f01fe} */ #define HV_AVMA1_GUID \ .guid = GUID_INIT(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \ 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5) #define HV_AVMA2_GUID \ .guid = GUID_INIT(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \ 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b) #define HV_RDV_GUID \ .guid = GUID_INIT(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \ 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe) /* * Common header for Hyper-V ICs */ #define ICMSGTYPE_NEGOTIATE 0 #define ICMSGTYPE_HEARTBEAT 1 #define ICMSGTYPE_KVPEXCHANGE 2 #define ICMSGTYPE_SHUTDOWN 3 #define ICMSGTYPE_TIMESYNC 4 #define ICMSGTYPE_VSS 5 #define ICMSGHDRFLAG_TRANSACTION 1 #define ICMSGHDRFLAG_REQUEST 2 #define ICMSGHDRFLAG_RESPONSE 4 /* * While we want to handle util services as regular devices, * there is only one instance of each of these services; so * we statically allocate the service specific state. */ struct hv_util_service { u8 *recv_buffer; void *channel; void (*util_cb)(void *); int (*util_init)(struct hv_util_service *); void (*util_deinit)(void); }; struct vmbuspipe_hdr { u32 flags; u32 msgsize; } __packed; struct ic_version { u16 major; u16 minor; } __packed; struct icmsg_hdr { struct ic_version icverframe; u16 icmsgtype; struct ic_version icvermsg; u16 icmsgsize; u32 status; u8 ictransaction_id; u8 icflags; u8 reserved[2]; } __packed; struct icmsg_negotiate { u16 icframe_vercnt; u16 icmsg_vercnt; u32 reserved; struct ic_version icversion_data[1]; /* any size array */ } __packed; struct shutdown_msg_data { u32 reason_code; u32 timeout_seconds; u32 flags; u8 display_message[2048]; } __packed; struct heartbeat_msg_data { u64 seq_num; u32 reserved[8]; } __packed; /* Time Sync IC defs */ #define ICTIMESYNCFLAG_PROBE 0 #define ICTIMESYNCFLAG_SYNC 1 #define ICTIMESYNCFLAG_SAMPLE 2 #ifdef __x86_64__ #define WLTIMEDELTA 116444736000000000L /* in 100ns unit */ #else #define WLTIMEDELTA 116444736000000000LL #endif struct ictimesync_data { u64 parenttime; u64 childtime; u64 roundtriptime; u8 flags; } __packed; struct ictimesync_ref_data { u64 parenttime; u64 vmreferencetime; u8 flags; char leapflags; char stratum; u8 reserved[3]; } __packed; struct hyperv_service_callback { u8 msg_type; char *log_msg; guid_t data; struct vmbus_channel *channel; void (*callback)(void *context); }; #define MAX_SRV_VER 0x7ffffff extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf, const int *fw_version, int fw_vercnt, const int *srv_version, int srv_vercnt, int *nego_fw_version, int *nego_srv_version); void hv_process_channel_removal(struct vmbus_channel *channel); void vmbus_setevent(struct vmbus_channel *channel); /* * Negotiated version with the Host. */ extern __u32 vmbus_proto_version; int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id, const guid_t *shv_host_servie_id); void vmbus_set_event(struct vmbus_channel *channel); /* Get the start of the ring buffer. */ static inline void * hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info) { return ring_info->ring_buffer->buffer; } /* * Mask off host interrupt callback notifications */ static inline void hv_begin_read(struct hv_ring_buffer_info *rbi) { rbi->ring_buffer->interrupt_mask = 1; /* make sure mask update is not reordered */ virt_mb(); } /* * Re-enable host callback and return number of outstanding bytes */ static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi) { rbi->ring_buffer->interrupt_mask = 0; /* make sure mask update is not reordered */ virt_mb(); /* * Now check to see if the ring buffer is still empty. * If it is not, we raced and we need to process new * incoming messages. */ return hv_get_bytes_to_read(rbi); } /* * An API to support in-place processing of incoming VMBUS packets. */ /* Get data payload associated with descriptor */ static inline void *hv_pkt_data(const struct vmpacket_descriptor *desc) { return (void *)((unsigned long)desc + (desc->offset8 << 3)); } /* Get data size associated with descriptor */ static inline u32 hv_pkt_datalen(const struct vmpacket_descriptor *desc) { return (desc->len8 << 3) - (desc->offset8 << 3); } struct vmpacket_descriptor * hv_pkt_iter_first(struct vmbus_channel *channel); struct vmpacket_descriptor * __hv_pkt_iter_next(struct vmbus_channel *channel, const struct vmpacket_descriptor *pkt); void hv_pkt_iter_close(struct vmbus_channel *channel); /* * Get next packet descriptor from iterator * If at end of list, return NULL and update host. */ static inline struct vmpacket_descriptor * hv_pkt_iter_next(struct vmbus_channel *channel, const struct vmpacket_descriptor *pkt) { struct vmpacket_descriptor *nxt; nxt = __hv_pkt_iter_next(channel, pkt); if (!nxt) hv_pkt_iter_close(channel); return nxt; } #define foreach_vmbus_pkt(pkt, channel) \ for (pkt = hv_pkt_iter_first(channel); pkt; \ pkt = hv_pkt_iter_next(channel, pkt)) /* * Interface for passing data between SR-IOV PF and VF drivers. The VF driver * sends requests to read and write blocks. Each block must be 128 bytes or * smaller. Optionally, the VF driver can register a callback function which * will be invoked when the host says that one or more of the first 64 block * IDs is "invalid" which means that the VF driver should reread them. */ #define HV_CONFIG_BLOCK_SIZE_MAX 128 int hyperv_read_cfg_blk(struct pci_dev *dev, void *buf, unsigned int buf_len, unsigned int block_id, unsigned int *bytes_returned); int hyperv_write_cfg_blk(struct pci_dev *dev, void *buf, unsigned int len, unsigned int block_id); int hyperv_reg_block_invalidate(struct pci_dev *dev, void *context, void (*block_invalidate)(void *context, u64 block_mask)); struct hyperv_pci_block_ops { int (*read_block)(struct pci_dev *dev, void *buf, unsigned int buf_len, unsigned int block_id, unsigned int *bytes_returned); int (*write_block)(struct pci_dev *dev, void *buf, unsigned int len, unsigned int block_id); int (*reg_blk_invalidate)(struct pci_dev *dev, void *context, void (*block_invalidate)(void *context, u64 block_mask)); }; extern struct hyperv_pci_block_ops hvpci_block_ops; #endif /* _HYPERV_H */ qcom_scm.h 0000644 00000010136 14722070374 0006525 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* Copyright (c) 2010-2015, 2018, The Linux Foundation. All rights reserved. * Copyright (C) 2015 Linaro Ltd. */ #ifndef __QCOM_SCM_H #define __QCOM_SCM_H #include <linux/err.h> #include <linux/types.h> #include <linux/cpumask.h> #define QCOM_SCM_VERSION(major, minor) (((major) << 16) | ((minor) & 0xFF)) #define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0 #define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1 #define QCOM_SCM_HDCP_MAX_REQ_CNT 5 struct qcom_scm_hdcp_req { u32 addr; u32 val; }; struct qcom_scm_vmperm { int vmid; int perm; }; #define QCOM_SCM_VMID_HLOS 0x3 #define QCOM_SCM_VMID_MSS_MSA 0xF #define QCOM_SCM_VMID_WLAN 0x18 #define QCOM_SCM_VMID_WLAN_CE 0x19 #define QCOM_SCM_PERM_READ 0x4 #define QCOM_SCM_PERM_WRITE 0x2 #define QCOM_SCM_PERM_EXEC 0x1 #define QCOM_SCM_PERM_RW (QCOM_SCM_PERM_READ | QCOM_SCM_PERM_WRITE) #define QCOM_SCM_PERM_RWX (QCOM_SCM_PERM_RW | QCOM_SCM_PERM_EXEC) #if IS_ENABLED(CONFIG_QCOM_SCM) extern int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus); extern int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus); extern bool qcom_scm_is_available(void); extern bool qcom_scm_hdcp_available(void); extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp); extern bool qcom_scm_pas_supported(u32 peripheral); extern int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size); extern int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size); extern int qcom_scm_pas_auth_and_reset(u32 peripheral); extern int qcom_scm_pas_shutdown(u32 peripheral); extern int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, unsigned int *src, const struct qcom_scm_vmperm *newvm, unsigned int dest_cnt); extern void qcom_scm_cpu_power_down(u32 flags); extern u32 qcom_scm_get_version(void); extern int qcom_scm_set_remote_state(u32 state, u32 id); extern int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare); extern int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size); extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare); extern int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val); extern int qcom_scm_io_writel(phys_addr_t addr, unsigned int val); #else #include <linux/errno.h> static inline int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) { return -ENODEV; } static inline int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) { return -ENODEV; } static inline bool qcom_scm_is_available(void) { return false; } static inline bool qcom_scm_hdcp_available(void) { return false; } static inline int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) { return -ENODEV; } static inline bool qcom_scm_pas_supported(u32 peripheral) { return false; } static inline int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size) { return -ENODEV; } static inline int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size) { return -ENODEV; } static inline int qcom_scm_pas_auth_and_reset(u32 peripheral) { return -ENODEV; } static inline int qcom_scm_pas_shutdown(u32 peripheral) { return -ENODEV; } static inline int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, unsigned int *src, const struct qcom_scm_vmperm *newvm, unsigned int dest_cnt) { return -ENODEV; } static inline void qcom_scm_cpu_power_down(u32 flags) {} static inline u32 qcom_scm_get_version(void) { return 0; } static inline u32 qcom_scm_set_remote_state(u32 state,u32 id) { return -ENODEV; } static inline int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) { return -ENODEV; } static inline int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) { return -ENODEV; } static inline int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) { return -ENODEV; } static inline int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) { return -ENODEV; } static inline int qcom_scm_io_writel(phys_addr_t addr, unsigned int val) { return -ENODEV; } #endif #endif vmw_vmci_api.h 0000644 00000005442 14722070374 0007410 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * VMware VMCI Driver * * Copyright (C) 2012 VMware, Inc. All rights reserved. */ #ifndef __VMW_VMCI_API_H__ #define __VMW_VMCI_API_H__ #include <linux/uidgid.h> #include <linux/vmw_vmci_defs.h> #undef VMCI_KERNEL_API_VERSION #define VMCI_KERNEL_API_VERSION_1 1 #define VMCI_KERNEL_API_VERSION_2 2 #define VMCI_KERNEL_API_VERSION VMCI_KERNEL_API_VERSION_2 struct msghdr; typedef void (vmci_device_shutdown_fn) (void *device_registration, void *user_data); int vmci_datagram_create_handle(u32 resource_id, u32 flags, vmci_datagram_recv_cb recv_cb, void *client_data, struct vmci_handle *out_handle); int vmci_datagram_create_handle_priv(u32 resource_id, u32 flags, u32 priv_flags, vmci_datagram_recv_cb recv_cb, void *client_data, struct vmci_handle *out_handle); int vmci_datagram_destroy_handle(struct vmci_handle handle); int vmci_datagram_send(struct vmci_datagram *msg); int vmci_doorbell_create(struct vmci_handle *handle, u32 flags, u32 priv_flags, vmci_callback notify_cb, void *client_data); int vmci_doorbell_destroy(struct vmci_handle handle); int vmci_doorbell_notify(struct vmci_handle handle, u32 priv_flags); u32 vmci_get_context_id(void); bool vmci_is_context_owner(u32 context_id, kuid_t uid); int vmci_event_subscribe(u32 event, vmci_event_cb callback, void *callback_data, u32 *subid); int vmci_event_unsubscribe(u32 subid); u32 vmci_context_get_priv_flags(u32 context_id); int vmci_qpair_alloc(struct vmci_qp **qpair, struct vmci_handle *handle, u64 produce_qsize, u64 consume_qsize, u32 peer, u32 flags, u32 priv_flags); int vmci_qpair_detach(struct vmci_qp **qpair); int vmci_qpair_get_produce_indexes(const struct vmci_qp *qpair, u64 *producer_tail, u64 *consumer_head); int vmci_qpair_get_consume_indexes(const struct vmci_qp *qpair, u64 *consumer_tail, u64 *producer_head); s64 vmci_qpair_produce_free_space(const struct vmci_qp *qpair); s64 vmci_qpair_produce_buf_ready(const struct vmci_qp *qpair); s64 vmci_qpair_consume_free_space(const struct vmci_qp *qpair); s64 vmci_qpair_consume_buf_ready(const struct vmci_qp *qpair); ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair, const void *buf, size_t buf_size, int mode); ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair, void *buf, size_t buf_size, int mode); ssize_t vmci_qpair_peek(struct vmci_qp *qpair, void *buf, size_t buf_size, int mode); ssize_t vmci_qpair_enquev(struct vmci_qp *qpair, struct msghdr *msg, size_t iov_size, int mode); ssize_t vmci_qpair_dequev(struct vmci_qp *qpair, struct msghdr *msg, size_t iov_size, int mode); ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, struct msghdr *msg, size_t iov_size, int mode); #endif /* !__VMW_VMCI_API_H__ */ serial_8250.h 0000644 00000014002 14722070374 0006655 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * linux/include/linux/serial_8250.h * * Copyright (C) 2004 Russell King */ #ifndef _LINUX_SERIAL_8250_H #define _LINUX_SERIAL_8250_H #include <linux/serial_core.h> #include <linux/serial_reg.h> #include <linux/platform_device.h> /* * This is the platform device platform_data structure */ struct plat_serial8250_port { unsigned long iobase; /* io base address */ void __iomem *membase; /* ioremap cookie or NULL */ resource_size_t mapbase; /* resource base */ unsigned int irq; /* interrupt number */ unsigned long irqflags; /* request_irq flags */ unsigned int uartclk; /* UART clock rate */ void *private_data; unsigned char regshift; /* register shift */ unsigned char iotype; /* UPIO_* */ unsigned char hub6; upf_t flags; /* UPF_* flags */ unsigned int type; /* If UPF_FIXED_TYPE */ unsigned int (*serial_in)(struct uart_port *, int); void (*serial_out)(struct uart_port *, int, int); void (*set_termios)(struct uart_port *, struct ktermios *new, struct ktermios *old); void (*set_ldisc)(struct uart_port *, struct ktermios *); unsigned int (*get_mctrl)(struct uart_port *); int (*handle_irq)(struct uart_port *); void (*pm)(struct uart_port *, unsigned int state, unsigned old); void (*handle_break)(struct uart_port *); }; /* * Allocate 8250 platform device IDs. Nothing is implied by * the numbering here, except for the legacy entry being -1. */ enum { PLAT8250_DEV_LEGACY = -1, PLAT8250_DEV_PLATFORM, PLAT8250_DEV_PLATFORM1, PLAT8250_DEV_PLATFORM2, PLAT8250_DEV_FOURPORT, PLAT8250_DEV_ACCENT, PLAT8250_DEV_BOCA, PLAT8250_DEV_EXAR_ST16C554, PLAT8250_DEV_HUB6, PLAT8250_DEV_AU1X00, PLAT8250_DEV_SM501, }; struct uart_8250_dma; struct uart_8250_port; /** * 8250 core driver operations * * @setup_irq() Setup irq handling. The universal 8250 driver links this * port to the irq chain. Other drivers may @request_irq(). * @release_irq() Undo irq handling. The universal 8250 driver unlinks * the port from the irq chain. */ struct uart_8250_ops { int (*setup_irq)(struct uart_8250_port *); void (*release_irq)(struct uart_8250_port *); }; struct uart_8250_em485 { struct hrtimer start_tx_timer; /* "rs485 start tx" timer */ struct hrtimer stop_tx_timer; /* "rs485 stop tx" timer */ struct hrtimer *active_timer; /* pointer to active timer */ struct uart_8250_port *port; /* for hrtimer callbacks */ }; /* * This should be used by drivers which want to register * their own 8250 ports without registering their own * platform device. Using these will make your driver * dependent on the 8250 driver. */ struct uart_8250_port { struct uart_port port; struct timer_list timer; /* "no irq" timer */ struct list_head list; /* ports on this IRQ */ u32 capabilities; /* port capabilities */ unsigned short bugs; /* port bugs */ unsigned int tx_loadsz; /* transmit fifo load size */ unsigned char acr; unsigned char fcr; unsigned char ier; unsigned char lcr; unsigned char mcr; unsigned char mcr_mask; /* mask of user bits */ unsigned char mcr_force; /* mask of forced bits */ unsigned char cur_iotype; /* Running I/O type */ unsigned int rpm_tx_active; unsigned char canary; /* non-zero during system sleep * if no_console_suspend */ unsigned char probe; struct mctrl_gpios *gpios; #define UART_PROBE_RSA (1 << 0) /* * Some bits in registers are cleared on a read, so they must * be saved whenever the register is read but the bits will not * be immediately processed. */ #define LSR_SAVE_FLAGS UART_LSR_BRK_ERROR_BITS unsigned char lsr_saved_flags; #define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA unsigned char msr_saved_flags; struct uart_8250_dma *dma; const struct uart_8250_ops *ops; /* 8250 specific callbacks */ int (*dl_read)(struct uart_8250_port *); void (*dl_write)(struct uart_8250_port *, int); struct uart_8250_em485 *em485; /* Serial port overrun backoff */ struct delayed_work overrun_backoff; u32 overrun_backoff_time_ms; }; static inline struct uart_8250_port *up_to_u8250p(struct uart_port *up) { return container_of(up, struct uart_8250_port, port); } int serial8250_register_8250_port(struct uart_8250_port *); void serial8250_unregister_port(int line); void serial8250_suspend_port(int line); void serial8250_resume_port(int line); extern int early_serial_setup(struct uart_port *port); extern int early_serial8250_setup(struct earlycon_device *device, const char *options); extern void serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old); extern void serial8250_do_set_ldisc(struct uart_port *port, struct ktermios *termios); extern unsigned int serial8250_do_get_mctrl(struct uart_port *port); extern int serial8250_do_startup(struct uart_port *port); extern void serial8250_do_shutdown(struct uart_port *port); extern void serial8250_do_pm(struct uart_port *port, unsigned int state, unsigned int oldstate); extern void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl); extern void serial8250_do_set_divisor(struct uart_port *port, unsigned int baud, unsigned int quot, unsigned int quot_frac); extern int fsl8250_handle_irq(struct uart_port *port); int serial8250_handle_irq(struct uart_port *port, unsigned int iir); unsigned char serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr); void serial8250_read_char(struct uart_8250_port *up, unsigned char lsr); void serial8250_tx_chars(struct uart_8250_port *up); unsigned int serial8250_modem_status(struct uart_8250_port *up); void serial8250_init_port(struct uart_8250_port *up); void serial8250_set_defaults(struct uart_8250_port *up); void serial8250_console_write(struct uart_8250_port *up, const char *s, unsigned int count); int serial8250_console_setup(struct uart_port *port, char *options, bool probe); extern void serial8250_set_isa_configurator(void (*v) (int port, struct uart_port *up, u32 *capabilities)); #endif idle_inject.h 0000644 00000001500 14722070374 0007170 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2018 Linaro Ltd * * Author: Daniel Lezcano <daniel.lezcano@linaro.org> * */ #ifndef __IDLE_INJECT_H__ #define __IDLE_INJECT_H__ /* private idle injection device structure */ struct idle_inject_device; struct idle_inject_device *idle_inject_register(struct cpumask *cpumask); void idle_inject_unregister(struct idle_inject_device *ii_dev); int idle_inject_start(struct idle_inject_device *ii_dev); void idle_inject_stop(struct idle_inject_device *ii_dev); void idle_inject_set_duration(struct idle_inject_device *ii_dev, unsigned int run_duration_us, unsigned int idle_duration_us); void idle_inject_get_duration(struct idle_inject_device *ii_dev, unsigned int *run_duration_us, unsigned int *idle_duration_us); #endif /* __IDLE_INJECT_H__ */ aio.h 0000644 00000001213 14722070374 0005470 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX__AIO_H #define __LINUX__AIO_H #include <linux/aio_abi.h> struct kioctx; struct kiocb; struct mm_struct; typedef int (kiocb_cancel_fn)(struct kiocb *); /* prototypes */ #ifdef CONFIG_AIO extern void exit_aio(struct mm_struct *mm); void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel); #else static inline void exit_aio(struct mm_struct *mm) { } static inline void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel) { } #endif /* CONFIG_AIO */ /* for sysctl: */ extern unsigned long aio_nr; extern unsigned long aio_max_nr; #endif /* __LINUX__AIO_H */ mdio-mux.h 0000644 00000001775 14722070374 0006474 0 ustar 00 /* * MDIO bus multiplexer framwork. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2011, 2012 Cavium, Inc. */ #ifndef __LINUX_MDIO_MUX_H #define __LINUX_MDIO_MUX_H #include <linux/device.h> #include <linux/phy.h> /* mdio_mux_init() - Initialize a MDIO mux * @dev The device owning the MDIO mux * @mux_node The device node of the MDIO mux * @switch_fn The function called for switching target MDIO child * mux_handle A pointer to a (void *) used internaly by mdio-mux * @data Private data used by switch_fn() * @mux_bus An optional parent bus (Other case are to use parent_bus property) */ int mdio_mux_init(struct device *dev, struct device_node *mux_node, int (*switch_fn) (int cur, int desired, void *data), void **mux_handle, void *data, struct mii_bus *mux_bus); void mdio_mux_uninit(void *mux_handle); #endif /* __LINUX_MDIO_MUX_H */ ks8851_mll.h 0000644 00000000730 14722070374 0006532 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * ks8861_mll platform data struct definition * Copyright (c) 2012 BTicino S.p.A. */ #ifndef _LINUX_KS8851_MLL_H #define _LINUX_KS8851_MLL_H #include <linux/if_ether.h> /** * struct ks8851_mll_platform_data - Platform data of the KS8851_MLL network driver * @macaddr: The MAC address of the device, set to all 0:s to use the on in * the chip. */ struct ks8851_mll_platform_data { u8 mac_addr[ETH_ALEN]; }; #endif microchipphy.h 0000644 00000005226 14722070374 0007426 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2015 Microchip Technology */ #ifndef _MICROCHIPPHY_H #define _MICROCHIPPHY_H #define LAN88XX_INT_MASK (0x19) #define LAN88XX_INT_MASK_MDINTPIN_EN_ (0x8000) #define LAN88XX_INT_MASK_SPEED_CHANGE_ (0x4000) #define LAN88XX_INT_MASK_LINK_CHANGE_ (0x2000) #define LAN88XX_INT_MASK_FDX_CHANGE_ (0x1000) #define LAN88XX_INT_MASK_AUTONEG_ERR_ (0x0800) #define LAN88XX_INT_MASK_AUTONEG_DONE_ (0x0400) #define LAN88XX_INT_MASK_POE_DETECT_ (0x0200) #define LAN88XX_INT_MASK_SYMBOL_ERR_ (0x0100) #define LAN88XX_INT_MASK_FAST_LINK_FAIL_ (0x0080) #define LAN88XX_INT_MASK_WOL_EVENT_ (0x0040) #define LAN88XX_INT_MASK_EXTENDED_INT_ (0x0020) #define LAN88XX_INT_MASK_RESERVED_ (0x0010) #define LAN88XX_INT_MASK_FALSE_CARRIER_ (0x0008) #define LAN88XX_INT_MASK_LINK_SPEED_DS_ (0x0004) #define LAN88XX_INT_MASK_MASTER_SLAVE_DONE_ (0x0002) #define LAN88XX_INT_MASK_RX__ER_ (0x0001) #define LAN88XX_INT_STS (0x1A) #define LAN88XX_INT_STS_INT_ACTIVE_ (0x8000) #define LAN88XX_INT_STS_SPEED_CHANGE_ (0x4000) #define LAN88XX_INT_STS_LINK_CHANGE_ (0x2000) #define LAN88XX_INT_STS_FDX_CHANGE_ (0x1000) #define LAN88XX_INT_STS_AUTONEG_ERR_ (0x0800) #define LAN88XX_INT_STS_AUTONEG_DONE_ (0x0400) #define LAN88XX_INT_STS_POE_DETECT_ (0x0200) #define LAN88XX_INT_STS_SYMBOL_ERR_ (0x0100) #define LAN88XX_INT_STS_FAST_LINK_FAIL_ (0x0080) #define LAN88XX_INT_STS_WOL_EVENT_ (0x0040) #define LAN88XX_INT_STS_EXTENDED_INT_ (0x0020) #define LAN88XX_INT_STS_RESERVED_ (0x0010) #define LAN88XX_INT_STS_FALSE_CARRIER_ (0x0008) #define LAN88XX_INT_STS_LINK_SPEED_DS_ (0x0004) #define LAN88XX_INT_STS_MASTER_SLAVE_DONE_ (0x0002) #define LAN88XX_INT_STS_RX_ER_ (0x0001) #define LAN88XX_EXT_PAGE_ACCESS (0x1F) #define LAN88XX_EXT_PAGE_SPACE_0 (0x0000) #define LAN88XX_EXT_PAGE_SPACE_1 (0x0001) #define LAN88XX_EXT_PAGE_SPACE_2 (0x0002) /* Extended Register Page 1 space */ #define LAN88XX_EXT_MODE_CTRL (0x13) #define LAN88XX_EXT_MODE_CTRL_MDIX_MASK_ (0x000C) #define LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_ (0x0000) #define LAN88XX_EXT_MODE_CTRL_MDI_ (0x0008) #define LAN88XX_EXT_MODE_CTRL_MDI_X_ (0x000C) /* MMD 3 Registers */ #define LAN88XX_MMD3_CHIP_ID (32877) #define LAN88XX_MMD3_CHIP_REV (32878) /* Registers specific to the LAN7800/LAN7850 embedded phy */ #define LAN78XX_PHY_LED_MODE_SELECT (0x1D) /* DSP registers */ #define PHY_ARDENNES_MMD_DEV_3_PHY_CFG (0x806A) #define PHY_ARDENNES_MMD_DEV_3_PHY_CFG_ZD_DLY_EN_ (0x2000) #define LAN88XX_EXT_PAGE_ACCESS_TR (0x52B5) #define LAN88XX_EXT_PAGE_TR_CR 16 #define LAN88XX_EXT_PAGE_TR_LOW_DATA 17 #define LAN88XX_EXT_PAGE_TR_HIGH_DATA 18 #endif /* _MICROCHIPPHY_H */ pci-acpi.h 0000644 00000006760 14722070374 0006421 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * File pci-acpi.h * * Copyright (C) 2004 Intel * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com) */ #ifndef _PCI_ACPI_H_ #define _PCI_ACPI_H_ #include <linux/acpi.h> #ifdef CONFIG_ACPI extern acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev); static inline acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev) { return acpi_remove_pm_notifier(dev); } extern acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev, struct pci_dev *pci_dev); static inline acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev) { return acpi_remove_pm_notifier(dev); } extern phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle); struct pci_ecam_ops; extern int pci_mcfg_lookup(struct acpi_pci_root *root, struct resource *cfgres, struct pci_ecam_ops **ecam_ops); static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev) { struct pci_bus *pbus = pdev->bus; /* Find a PCI root bus */ while (!pci_is_root_bus(pbus)) pbus = pbus->parent; return ACPI_HANDLE(pbus->bridge); } static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus) { struct device *dev; if (pci_is_root_bus(pbus)) dev = pbus->bridge; else { /* If pbus is a virtual bus, there is no bridge to it */ if (!pbus->self) return NULL; dev = &pbus->self->dev; } return ACPI_HANDLE(dev); } struct acpi_pci_root; struct acpi_pci_root_ops; struct acpi_pci_root_info { struct acpi_pci_root *root; struct acpi_device *bridge; struct acpi_pci_root_ops *ops; struct list_head resources; char name[16]; }; struct acpi_pci_root_ops { struct pci_ops *pci_ops; int (*init_info)(struct acpi_pci_root_info *info); void (*release_info)(struct acpi_pci_root_info *info); int (*prepare_resources)(struct acpi_pci_root_info *info); }; extern int acpi_pci_probe_root_resources(struct acpi_pci_root_info *info); extern struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root, struct acpi_pci_root_ops *ops, struct acpi_pci_root_info *info, void *sd); void acpi_pci_add_bus(struct pci_bus *bus); void acpi_pci_remove_bus(struct pci_bus *bus); #ifdef CONFIG_ACPI_PCI_SLOT void acpi_pci_slot_init(void); void acpi_pci_slot_enumerate(struct pci_bus *bus); void acpi_pci_slot_remove(struct pci_bus *bus); #else static inline void acpi_pci_slot_init(void) { } static inline void acpi_pci_slot_enumerate(struct pci_bus *bus) { } static inline void acpi_pci_slot_remove(struct pci_bus *bus) { } #endif #ifdef CONFIG_HOTPLUG_PCI_ACPI void acpiphp_init(void); void acpiphp_enumerate_slots(struct pci_bus *bus); void acpiphp_remove_slots(struct pci_bus *bus); void acpiphp_check_host_bridge(struct acpi_device *adev); #else static inline void acpiphp_init(void) { } static inline void acpiphp_enumerate_slots(struct pci_bus *bus) { } static inline void acpiphp_remove_slots(struct pci_bus *bus) { } static inline void acpiphp_check_host_bridge(struct acpi_device *adev) { } #endif extern const guid_t pci_acpi_dsm_guid; #define IGNORE_PCI_BOOT_CONFIG_DSM 0x05 #define DEVICE_LABEL_DSM 0x07 #define RESET_DELAY_DSM 0x08 #define FUNCTION_DELAY_DSM 0x09 #else /* CONFIG_ACPI */ static inline void acpi_pci_add_bus(struct pci_bus *bus) { } static inline void acpi_pci_remove_bus(struct pci_bus *bus) { } #endif /* CONFIG_ACPI */ #ifdef CONFIG_ACPI_APEI extern bool aer_acpi_firmware_first(void); #else static inline bool aer_acpi_firmware_first(void) { return false; } #endif #endif /* _PCI_ACPI_H_ */ sched/cputime.h 0000644 00000012326 14722070374 0007463 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SCHED_CPUTIME_H #define _LINUX_SCHED_CPUTIME_H #include <linux/sched/signal.h> /* * cputime accounting APIs: */ #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE #include <asm/cputime.h> #ifndef cputime_to_nsecs # define cputime_to_nsecs(__ct) \ (cputime_to_usecs(__ct) * NSEC_PER_USEC) #endif #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN extern void task_cputime(struct task_struct *t, u64 *utime, u64 *stime); extern u64 task_gtime(struct task_struct *t); #else static inline void task_cputime(struct task_struct *t, u64 *utime, u64 *stime) { *utime = t->utime; *stime = t->stime; } static inline u64 task_gtime(struct task_struct *t) { return t->gtime; } #endif #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME static inline void task_cputime_scaled(struct task_struct *t, u64 *utimescaled, u64 *stimescaled) { *utimescaled = t->utimescaled; *stimescaled = t->stimescaled; } #else static inline void task_cputime_scaled(struct task_struct *t, u64 *utimescaled, u64 *stimescaled) { task_cputime(t, utimescaled, stimescaled); } #endif extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st); extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st); extern void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, u64 *ut, u64 *st); /* * Thread group CPU time accounting. */ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); void thread_group_sample_cputime(struct task_struct *tsk, u64 *samples); /* * The following are functions that support scheduler-internal time accounting. * These functions are generally called at the timer tick. None of this depends * on CONFIG_SCHEDSTATS. */ /** * get_running_cputimer - return &tsk->signal->cputimer if cputimers are active * * @tsk: Pointer to target task. */ #ifdef CONFIG_POSIX_TIMERS static inline struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk) { struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; /* * Check whether posix CPU timers are active. If not the thread * group accounting is not active either. Lockless check. */ if (!READ_ONCE(tsk->signal->posix_cputimers.timers_active)) return NULL; /* * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime * in __exit_signal(), we won't account to the signal struct further * cputime consumed by that task, even though the task can still be * ticking after __exit_signal(). * * In order to keep a consistent behaviour between thread group cputime * and thread group cputimer accounting, lets also ignore the cputime * elapsing after __exit_signal() in any thread group timer running. * * This makes sure that POSIX CPU clocks and timers are synchronized, so * that a POSIX CPU timer won't expire while the corresponding POSIX CPU * clock delta is behind the expiring timer value. */ if (unlikely(!tsk->sighand)) return NULL; return cputimer; } #else static inline struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk) { return NULL; } #endif /** * account_group_user_time - Maintain utime for a thread group. * * @tsk: Pointer to task structure. * @cputime: Time value by which to increment the utime field of the * thread_group_cputime structure. * * If thread group time is being maintained, get the structure for the * running CPU and update the utime field there. */ static inline void account_group_user_time(struct task_struct *tsk, u64 cputime) { struct thread_group_cputimer *cputimer = get_running_cputimer(tsk); if (!cputimer) return; atomic64_add(cputime, &cputimer->cputime_atomic.utime); } /** * account_group_system_time - Maintain stime for a thread group. * * @tsk: Pointer to task structure. * @cputime: Time value by which to increment the stime field of the * thread_group_cputime structure. * * If thread group time is being maintained, get the structure for the * running CPU and update the stime field there. */ static inline void account_group_system_time(struct task_struct *tsk, u64 cputime) { struct thread_group_cputimer *cputimer = get_running_cputimer(tsk); if (!cputimer) return; atomic64_add(cputime, &cputimer->cputime_atomic.stime); } /** * account_group_exec_runtime - Maintain exec runtime for a thread group. * * @tsk: Pointer to task structure. * @ns: Time value by which to increment the sum_exec_runtime field * of the thread_group_cputime structure. * * If thread group time is being maintained, get the structure for the * running CPU and update the sum_exec_runtime field there. */ static inline void account_group_exec_runtime(struct task_struct *tsk, unsigned long long ns) { struct thread_group_cputimer *cputimer = get_running_cputimer(tsk); if (!cputimer) return; atomic64_add(ns, &cputimer->cputime_atomic.sum_exec_runtime); } static inline void prev_cputime_init(struct prev_cputime *prev) { #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE prev->utime = prev->stime = 0; raw_spin_lock_init(&prev->lock); #endif } extern unsigned long long task_sched_runtime(struct task_struct *task); #endif /* _LINUX_SCHED_CPUTIME_H */ sched/autogroup.h 0000644 00000002315 14722070374 0010037 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SCHED_AUTOGROUP_H #define _LINUX_SCHED_AUTOGROUP_H struct signal_struct; struct task_struct; struct task_group; struct seq_file; #ifdef CONFIG_SCHED_AUTOGROUP extern void sched_autogroup_create_attach(struct task_struct *p); extern void sched_autogroup_detach(struct task_struct *p); extern void sched_autogroup_fork(struct signal_struct *sig); extern void sched_autogroup_exit(struct signal_struct *sig); extern void sched_autogroup_exit_task(struct task_struct *p); #ifdef CONFIG_PROC_FS extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m); extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice); #endif #else static inline void sched_autogroup_create_attach(struct task_struct *p) { } static inline void sched_autogroup_detach(struct task_struct *p) { } static inline void sched_autogroup_fork(struct signal_struct *sig) { } static inline void sched_autogroup_exit(struct signal_struct *sig) { } static inline void sched_autogroup_exit_task(struct task_struct *p) { } #endif #ifdef CONFIG_CGROUP_SCHED extern struct task_group root_task_group; #endif /* CONFIG_CGROUP_SCHED */ #endif /* _LINUX_SCHED_AUTOGROUP_H */ sched/nohz.h 0000644 00000001613 14722070374 0006770 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SCHED_NOHZ_H #define _LINUX_SCHED_NOHZ_H /* * This is the interface between the scheduler and nohz/dynticks: */ #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) extern void nohz_balance_enter_idle(int cpu); extern int get_nohz_timer_target(void); #else static inline void nohz_balance_enter_idle(int cpu) { } #endif #ifdef CONFIG_NO_HZ_COMMON void calc_load_nohz_start(void); void calc_load_nohz_remote(struct rq *rq); void calc_load_nohz_stop(void); #else static inline void calc_load_nohz_start(void) { } static inline void calc_load_nohz_remote(struct rq *rq) { } static inline void calc_load_nohz_stop(void) { } #endif /* CONFIG_NO_HZ_COMMON */ #if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP) extern void wake_up_nohz_cpu(int cpu); #else static inline void wake_up_nohz_cpu(int cpu) { } #endif #endif /* _LINUX_SCHED_NOHZ_H */ sched/topology.h 0000644 00000014455 14722070374 0007676 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SCHED_TOPOLOGY_H #define _LINUX_SCHED_TOPOLOGY_H #include <linux/topology.h> #include <linux/sched/idle.h> /* * sched-domains (multiprocessor balancing) declarations: */ #ifdef CONFIG_SMP #define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */ #define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */ #define SD_BALANCE_EXEC 0x0004 /* Balance on exec */ #define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ #define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ #define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ #define SD_ASYM_CPUCAPACITY 0x0040 /* Domain members have different CPU capacities */ #define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share CPU capacity */ #define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */ #define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share CPU pkg resources */ #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ #define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ #define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ #define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ #define SD_NUMA 0x4000 /* cross-node balancing */ #ifdef CONFIG_SCHED_SMT static inline int cpu_smt_flags(void) { return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; } #endif #ifdef CONFIG_SCHED_MC static inline int cpu_core_flags(void) { return SD_SHARE_PKG_RESOURCES; } #endif #ifdef CONFIG_NUMA static inline int cpu_numa_flags(void) { return SD_NUMA; } #endif extern int arch_asym_cpu_priority(int cpu); struct sched_domain_attr { int relax_domain_level; }; #define SD_ATTR_INIT (struct sched_domain_attr) { \ .relax_domain_level = -1, \ } extern int sched_domain_level_max; struct sched_group; struct sched_domain_shared { atomic_t ref; atomic_t nr_busy_cpus; int has_idle_cores; }; struct sched_domain { /* These fields must be setup */ struct sched_domain __rcu *parent; /* top domain must be null terminated */ struct sched_domain __rcu *child; /* bottom domain must be null terminated */ struct sched_group *groups; /* the balancing groups of the domain */ unsigned long min_interval; /* Minimum balance interval ms */ unsigned long max_interval; /* Maximum balance interval ms */ unsigned int busy_factor; /* less balancing by factor if busy */ unsigned int imbalance_pct; /* No balance until over watermark */ unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ int nohz_idle; /* NOHZ IDLE status */ int flags; /* See SD_* */ int level; /* Runtime fields. */ unsigned long last_balance; /* init to jiffies. units in jiffies */ unsigned int balance_interval; /* initialise to 1. units in ms. */ unsigned int nr_balance_failed; /* initialise to 0 */ /* idle_balance() stats */ u64 max_newidle_lb_cost; unsigned long next_decay_max_lb_cost; u64 avg_scan_cost; /* select_idle_sibling */ #ifdef CONFIG_SCHEDSTATS /* load_balance() stats */ unsigned int lb_count[CPU_MAX_IDLE_TYPES]; unsigned int lb_failed[CPU_MAX_IDLE_TYPES]; unsigned int lb_balanced[CPU_MAX_IDLE_TYPES]; unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES]; unsigned int lb_gained[CPU_MAX_IDLE_TYPES]; unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES]; unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES]; unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES]; /* Active load balancing */ unsigned int alb_count; unsigned int alb_failed; unsigned int alb_pushed; /* SD_BALANCE_EXEC stats */ unsigned int sbe_count; unsigned int sbe_balanced; unsigned int sbe_pushed; /* SD_BALANCE_FORK stats */ unsigned int sbf_count; unsigned int sbf_balanced; unsigned int sbf_pushed; /* try_to_wake_up() stats */ unsigned int ttwu_wake_remote; unsigned int ttwu_move_affine; unsigned int ttwu_move_balance; #endif #ifdef CONFIG_SCHED_DEBUG char *name; #endif union { void *private; /* used during construction */ struct rcu_head rcu; /* used during destruction */ }; struct sched_domain_shared *shared; unsigned int span_weight; /* * Span of all CPUs in this domain. * * NOTE: this field is variable length. (Allocated dynamically * by attaching extra space to the end of the structure, * depending on how many CPUs the kernel has booted up with) */ unsigned long span[0]; }; static inline struct cpumask *sched_domain_span(struct sched_domain *sd) { return to_cpumask(sd->span); } extern void partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[], struct sched_domain_attr *dattr_new); extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], struct sched_domain_attr *dattr_new); /* Allocate an array of sched domains, for partition_sched_domains(). */ cpumask_var_t *alloc_sched_domains(unsigned int ndoms); void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); bool cpus_share_cache(int this_cpu, int that_cpu); typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); typedef int (*sched_domain_flags_f)(void); #define SDTL_OVERLAP 0x01 struct sd_data { struct sched_domain *__percpu *sd; struct sched_domain_shared *__percpu *sds; struct sched_group *__percpu *sg; struct sched_group_capacity *__percpu *sgc; }; struct sched_domain_topology_level { sched_domain_mask_f mask; sched_domain_flags_f sd_flags; int flags; int numa_level; struct sd_data data; #ifdef CONFIG_SCHED_DEBUG char *name; #endif }; extern void set_sched_topology(struct sched_domain_topology_level *tl); #ifdef CONFIG_SCHED_DEBUG # define SD_INIT_NAME(type) .name = #type #else # define SD_INIT_NAME(type) #endif #else /* CONFIG_SMP */ struct sched_domain_attr; static inline void partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[], struct sched_domain_attr *dattr_new) { } static inline void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], struct sched_domain_attr *dattr_new) { } static inline bool cpus_share_cache(int this_cpu, int that_cpu) { return true; } #endif /* !CONFIG_SMP */ #ifndef arch_scale_cpu_capacity static __always_inline unsigned long arch_scale_cpu_capacity(int cpu) { return SCHED_CAPACITY_SCALE; } #endif static inline int task_node(const struct task_struct *p) { return cpu_to_node(task_cpu(p)); } #endif /* _LINUX_SCHED_TOPOLOGY_H */ sched/types.h 0000644 00000001253 14722070374 0007156 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SCHED_TYPES_H #define _LINUX_SCHED_TYPES_H #include <linux/types.h> /** * struct task_cputime - collected CPU time counts * @stime: time spent in kernel mode, in nanoseconds * @utime: time spent in user mode, in nanoseconds * @sum_exec_runtime: total time spent on the CPU, in nanoseconds * * This structure groups together three kinds of CPU time that are tracked for * threads and thread groups. Most things considering CPU time want to group * these counts together and treat all three of them in parallel. */ struct task_cputime { u64 stime; u64 utime; unsigned long long sum_exec_runtime; }; #endif sched/user.h 0000644 00000003326 14722070374 0006773 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SCHED_USER_H #define _LINUX_SCHED_USER_H #include <linux/uidgid.h> #include <linux/atomic.h> #include <linux/refcount.h> #include <linux/ratelimit.h> /* * Some day this will be a full-fledged user tracking system.. */ struct user_struct { refcount_t __count; /* reference count */ atomic_t processes; /* How many processes does this user have? */ atomic_t sigpending; /* How many pending signals does this user have? */ #ifdef CONFIG_FANOTIFY atomic_t fanotify_listeners; #endif #ifdef CONFIG_EPOLL atomic_long_t epoll_watches; /* The number of file descriptors currently watched */ #endif #ifdef CONFIG_POSIX_MQUEUE /* protected by mq_lock */ unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ #endif unsigned long locked_shm; /* How many pages of mlocked shm ? */ unsigned long unix_inflight; /* How many files in flight in unix sockets */ atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */ /* Hash table maintenance information */ struct hlist_node uidhash_node; kuid_t uid; #if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL) || \ defined(CONFIG_NET) || defined(CONFIG_IO_URING) atomic_long_t locked_vm; #endif /* Miscellaneous per-user rate limit */ struct ratelimit_state ratelimit; }; extern int uids_sysfs_init(void); extern struct user_struct *find_user(kuid_t); extern struct user_struct root_user; #define INIT_USER (&root_user) /* per-UID process charging. */ extern struct user_struct * alloc_uid(kuid_t); static inline struct user_struct *get_uid(struct user_struct *u) { refcount_inc(&u->__count); return u; } extern void free_uid(struct user_struct *); #endif /* _LINUX_SCHED_USER_H */ sched/xacct.h 0000644 00000001526 14722070374 0007117 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SCHED_XACCT_H #define _LINUX_SCHED_XACCT_H /* * Extended task accounting methods: */ #include <linux/sched.h> #ifdef CONFIG_TASK_XACCT static inline void add_rchar(struct task_struct *tsk, ssize_t amt) { tsk->ioac.rchar += amt; } static inline void add_wchar(struct task_struct *tsk, ssize_t amt) { tsk->ioac.wchar += amt; } static inline void inc_syscr(struct task_struct *tsk) { tsk->ioac.syscr++; } static inline void inc_syscw(struct task_struct *tsk) { tsk->ioac.syscw++; } #else static inline void add_rchar(struct task_struct *tsk, ssize_t amt) { } static inline void add_wchar(struct task_struct *tsk, ssize_t amt) { } static inline void inc_syscr(struct task_struct *tsk) { } static inline void inc_syscw(struct task_struct *tsk) { } #endif #endif /* _LINUX_SCHED_XACCT_H */ sched/init.h 0000644 00000000360 14722070374 0006753 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SCHED_INIT_H #define _LINUX_SCHED_INIT_H /* * Scheduler init related prototypes: */ extern void sched_init(void); extern void sched_init_smp(void); #endif /* _LINUX_SCHED_INIT_H */ sched/isolation.h 0000644 00000002761 14722070374 0010020 0 ustar 00 #ifndef _LINUX_SCHED_ISOLATION_H #define _LINUX_SCHED_ISOLATION_H #include <linux/cpumask.h> #include <linux/init.h> #include <linux/tick.h> enum hk_flags { HK_FLAG_TIMER = 1, HK_FLAG_RCU = (1 << 1), HK_FLAG_MISC = (1 << 2), HK_FLAG_SCHED = (1 << 3), HK_FLAG_TICK = (1 << 4), HK_FLAG_DOMAIN = (1 << 5), HK_FLAG_WQ = (1 << 6), }; #ifdef CONFIG_CPU_ISOLATION DECLARE_STATIC_KEY_FALSE(housekeeping_overridden); extern int housekeeping_any_cpu(enum hk_flags flags); extern const struct cpumask *housekeeping_cpumask(enum hk_flags flags); extern bool housekeeping_enabled(enum hk_flags flags); extern void housekeeping_affine(struct task_struct *t, enum hk_flags flags); extern bool housekeeping_test_cpu(int cpu, enum hk_flags flags); extern void __init housekeeping_init(void); #else static inline int housekeeping_any_cpu(enum hk_flags flags) { return smp_processor_id(); } static inline const struct cpumask *housekeeping_cpumask(enum hk_flags flags) { return cpu_possible_mask; } static inline bool housekeeping_enabled(enum hk_flags flags) { return false; } static inline void housekeeping_affine(struct task_struct *t, enum hk_flags flags) { } static inline void housekeeping_init(void) { } #endif /* CONFIG_CPU_ISOLATION */ static inline bool housekeeping_cpu(int cpu, enum hk_flags flags) { #ifdef CONFIG_CPU_ISOLATION if (static_branch_unlikely(&housekeeping_overridden)) return housekeeping_test_cpu(cpu, flags); #endif return true; } #endif /* _LINUX_SCHED_ISOLATION_H */ sched/numa_balancing.h 0000644 00000002414 14722070374 0010750 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SCHED_NUMA_BALANCING_H #define _LINUX_SCHED_NUMA_BALANCING_H /* * This is the interface between the scheduler and the MM that * implements memory access pattern based NUMA-balancing: */ #include <linux/sched.h> #define TNF_MIGRATED 0x01 #define TNF_NO_GROUP 0x02 #define TNF_SHARED 0x04 #define TNF_FAULT_LOCAL 0x08 #define TNF_MIGRATE_FAIL 0x10 #ifdef CONFIG_NUMA_BALANCING extern void task_numa_fault(int last_node, int node, int pages, int flags); extern pid_t task_numa_group_id(struct task_struct *p); extern void set_numabalancing_state(bool enabled); extern void task_numa_free(struct task_struct *p, bool final); extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page, int src_nid, int dst_cpu); #else static inline void task_numa_fault(int last_node, int node, int pages, int flags) { } static inline pid_t task_numa_group_id(struct task_struct *p) { return 0; } static inline void set_numabalancing_state(bool enabled) { } static inline void task_numa_free(struct task_struct *p, bool final) { } static inline bool should_numa_migrate_memory(struct task_struct *p, struct page *page, int src_nid, int dst_cpu) { return true; } #endif #endif /* _LINUX_SCHED_NUMA_BALANCING_H */ sched/wake_q.h 0000644 00000004224 14722070374 0007262 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SCHED_WAKE_Q_H #define _LINUX_SCHED_WAKE_Q_H /* * Wake-queues are lists of tasks with a pending wakeup, whose * callers have already marked the task as woken internally, * and can thus carry on. A common use case is being able to * do the wakeups once the corresponding user lock as been * released. * * We hold reference to each task in the list across the wakeup, * thus guaranteeing that the memory is still valid by the time * the actual wakeups are performed in wake_up_q(). * * One per task suffices, because there's never a need for a task to be * in two wake queues simultaneously; it is forbidden to abandon a task * in a wake queue (a call to wake_up_q() _must_ follow), so if a task is * already in a wake queue, the wakeup will happen soon and the second * waker can just skip it. * * The DEFINE_WAKE_Q macro declares and initializes the list head. * wake_up_q() does NOT reinitialize the list; it's expected to be * called near the end of a function. Otherwise, the list can be * re-initialized for later re-use by wake_q_init(). * * NOTE that this can cause spurious wakeups. schedule() callers * must ensure the call is done inside a loop, confirming that the * wakeup condition has in fact occurred. * * NOTE that there is no guarantee the wakeup will happen any later than the * wake_q_add() location. Therefore task must be ready to be woken at the * location of the wake_q_add(). */ #include <linux/sched.h> struct wake_q_head { struct wake_q_node *first; struct wake_q_node **lastp; }; #define WAKE_Q_TAIL ((struct wake_q_node *) 0x01) #define DEFINE_WAKE_Q(name) \ struct wake_q_head name = { WAKE_Q_TAIL, &name.first } static inline void wake_q_init(struct wake_q_head *head) { head->first = WAKE_Q_TAIL; head->lastp = &head->first; } static inline bool wake_q_empty(struct wake_q_head *head) { return head->first == WAKE_Q_TAIL; } extern void wake_q_add(struct wake_q_head *head, struct task_struct *task); extern void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task); extern void wake_up_q(struct wake_q_head *head); #endif /* _LINUX_SCHED_WAKE_Q_H */ sched/coredump.h 0000644 00000005377 14722070374 0007643 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SCHED_COREDUMP_H #define _LINUX_SCHED_COREDUMP_H #include <linux/mm_types.h> #define SUID_DUMP_DISABLE 0 /* No setuid dumping */ #define SUID_DUMP_USER 1 /* Dump as user of process */ #define SUID_DUMP_ROOT 2 /* Dump as root */ /* mm flags */ /* for SUID_DUMP_* above */ #define MMF_DUMPABLE_BITS 2 #define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1) extern void set_dumpable(struct mm_struct *mm, int value); /* * This returns the actual value of the suid_dumpable flag. For things * that are using this for checking for privilege transitions, it must * test against SUID_DUMP_USER rather than treating it as a boolean * value. */ static inline int __get_dumpable(unsigned long mm_flags) { return mm_flags & MMF_DUMPABLE_MASK; } static inline int get_dumpable(struct mm_struct *mm) { return __get_dumpable(mm->flags); } /* coredump filter bits */ #define MMF_DUMP_ANON_PRIVATE 2 #define MMF_DUMP_ANON_SHARED 3 #define MMF_DUMP_MAPPED_PRIVATE 4 #define MMF_DUMP_MAPPED_SHARED 5 #define MMF_DUMP_ELF_HEADERS 6 #define MMF_DUMP_HUGETLB_PRIVATE 7 #define MMF_DUMP_HUGETLB_SHARED 8 #define MMF_DUMP_DAX_PRIVATE 9 #define MMF_DUMP_DAX_SHARED 10 #define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS #define MMF_DUMP_FILTER_BITS 9 #define MMF_DUMP_FILTER_MASK \ (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) #define MMF_DUMP_FILTER_DEFAULT \ ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\ (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF) #ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS # define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS) #else # define MMF_DUMP_MASK_DEFAULT_ELF 0 #endif /* leave room for more dump flags */ #define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ #define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */ /* * This one-shot flag is dropped due to necessity of changing exe once again * on NFS restore */ //#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */ #define MMF_HAS_UPROBES 19 /* has uprobes */ #define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */ #define MMF_OOM_SKIP 21 /* mm is of no interest for the OOM killer */ #define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */ #define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ #define MMF_DISABLE_THP 24 /* disable THP for all VMAs */ #define MMF_OOM_VICTIM 25 /* mm is the oom victim */ #define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */ #define MMF_MULTIPROCESS 27 /* mm is shared between processes */ #define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP) #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ MMF_DISABLE_THP_MASK) #endif /* _LINUX_SCHED_COREDUMP_H */ sched/prio.h 0000644 00000003332 14722070374 0006763 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SCHED_PRIO_H #define _LINUX_SCHED_PRIO_H #define MAX_NICE 19 #define MIN_NICE -20 #define NICE_WIDTH (MAX_NICE - MIN_NICE + 1) /* * Priority of a process goes from 0..MAX_PRIO-1, valid RT * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority * values are inverted: lower p->prio value means higher priority. * * The MAX_USER_RT_PRIO value allows the actual maximum * RT priority to be separate from the value exported to * user-space. This allows kernel threads to set their * priority to a value higher than any user task. Note: * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO. */ #define MAX_USER_RT_PRIO 100 #define MAX_RT_PRIO MAX_USER_RT_PRIO #define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH) #define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2) /* * Convert user-nice values [ -20 ... 0 ... 19 ] * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], * and back. */ #define NICE_TO_PRIO(nice) ((nice) + DEFAULT_PRIO) #define PRIO_TO_NICE(prio) ((prio) - DEFAULT_PRIO) /* * 'User priority' is the nice value converted to something we * can work with better when scaling various scheduler parameters, * it's a [ 0 ... 39 ] range. */ #define USER_PRIO(p) ((p)-MAX_RT_PRIO) #define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio) #define MAX_USER_PRIO (USER_PRIO(MAX_PRIO)) /* * Convert nice value [19,-20] to rlimit style value [1,40]. */ static inline long nice_to_rlimit(long nice) { return (MAX_NICE - nice + 1); } /* * Convert rlimit style value [1,40] to nice value [-20, 19]. */ static inline long rlimit_to_nice(long prio) { return (MAX_NICE - prio + 1); } #endif /* _LINUX_SCHED_PRIO_H */ sched/jobctl.h 0000644 00000003126 14722070374 0007270 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SCHED_JOBCTL_H #define _LINUX_SCHED_JOBCTL_H #include <linux/types.h> struct task_struct; /* * task->jobctl flags */ #define JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */ #define JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */ #define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */ #define JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */ #define JOBCTL_TRAP_STOP_BIT 19 /* trap for STOP */ #define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */ #define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */ #define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */ #define JOBCTL_TRAP_FREEZE_BIT 23 /* trap for cgroup freezer */ #define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT) #define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT) #define JOBCTL_STOP_CONSUME (1UL << JOBCTL_STOP_CONSUME_BIT) #define JOBCTL_TRAP_STOP (1UL << JOBCTL_TRAP_STOP_BIT) #define JOBCTL_TRAP_NOTIFY (1UL << JOBCTL_TRAP_NOTIFY_BIT) #define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT) #define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT) #define JOBCTL_TRAP_FREEZE (1UL << JOBCTL_TRAP_FREEZE_BIT) #define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY) #define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK) extern bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask); extern void task_clear_jobctl_trapping(struct task_struct *task); extern void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask); #endif /* _LINUX_SCHED_JOBCTL_H */ sched/hotplug.h 0000644 00000001102 14722070374 0007465 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SCHED_HOTPLUG_H #define _LINUX_SCHED_HOTPLUG_H /* * Scheduler interfaces for hotplug CPU support: */ extern int sched_cpu_starting(unsigned int cpu); extern int sched_cpu_activate(unsigned int cpu); extern int sched_cpu_deactivate(unsigned int cpu); #ifdef CONFIG_HOTPLUG_CPU extern int sched_cpu_dying(unsigned int cpu); #else # define sched_cpu_dying NULL #endif #ifdef CONFIG_HOTPLUG_CPU extern void idle_task_exit(void); #else static inline void idle_task_exit(void) {} #endif #endif /* _LINUX_SCHED_HOTPLUG_H */ sched/task.h 0000644 00000013207 14722070374 0006756 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SCHED_TASK_H #define _LINUX_SCHED_TASK_H /* * Interface between the scheduler and various task lifetime (fork()/exit()) * functionality: */ #include <linux/sched.h> #include <linux/uaccess.h> struct task_struct; struct rusage; union thread_union; /* All the bits taken by the old clone syscall. */ #define CLONE_LEGACY_FLAGS 0xffffffffULL struct kernel_clone_args { u64 flags; int __user *pidfd; int __user *child_tid; int __user *parent_tid; int exit_signal; unsigned long stack; unsigned long stack_size; unsigned long tls; }; /* * This serializes "schedule()" and also protects * the run-queue from deletions/modifications (but * _adding_ to the beginning of the run-queue has * a separate lock). */ extern rwlock_t tasklist_lock; extern spinlock_t mmlist_lock; extern union thread_union init_thread_union; extern struct task_struct init_task; #ifdef CONFIG_PROVE_RCU extern int lockdep_tasklist_lock_is_held(void); #endif /* #ifdef CONFIG_PROVE_RCU */ extern asmlinkage void schedule_tail(struct task_struct *prev); extern void init_idle(struct task_struct *idle, int cpu); extern int sched_fork(unsigned long clone_flags, struct task_struct *p); extern void sched_dead(struct task_struct *p); void __noreturn do_task_dead(void); void __noreturn make_task_dead(int signr); extern void mm_cache_init(void); extern void proc_caches_init(void); extern void fork_init(void); extern void release_task(struct task_struct * p); #ifdef CONFIG_HAVE_COPY_THREAD_TLS extern int copy_thread_tls(unsigned long, unsigned long, unsigned long, struct task_struct *, unsigned long); #else extern int copy_thread(unsigned long, unsigned long, unsigned long, struct task_struct *); /* Architectures that haven't opted into copy_thread_tls get the tls argument * via pt_regs, so ignore the tls argument passed via C. */ static inline int copy_thread_tls( unsigned long clone_flags, unsigned long sp, unsigned long arg, struct task_struct *p, unsigned long tls) { return copy_thread(clone_flags, sp, arg, p); } #endif extern void flush_thread(void); #ifdef CONFIG_HAVE_EXIT_THREAD extern void exit_thread(struct task_struct *tsk); #else static inline void exit_thread(struct task_struct *tsk) { } #endif extern void do_group_exit(int); extern void exit_files(struct task_struct *); extern void exit_itimers(struct signal_struct *); extern long _do_fork(struct kernel_clone_args *kargs); extern bool legacy_clone_args_valid(const struct kernel_clone_args *kargs); extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); struct task_struct *fork_idle(int); extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); extern long kernel_wait4(pid_t, int __user *, int, struct rusage *); extern void free_task(struct task_struct *tsk); /* sched_exec is called by processes performing an exec */ #ifdef CONFIG_SMP extern void sched_exec(void); #else #define sched_exec() {} #endif static inline struct task_struct *get_task_struct(struct task_struct *t) { refcount_inc(&t->usage); return t; } extern void __put_task_struct(struct task_struct *t); extern void __put_task_struct_rcu_cb(struct rcu_head *rhp); static inline void put_task_struct(struct task_struct *t) { if (!refcount_dec_and_test(&t->usage)) return; /* * under PREEMPT_RT, we can't call put_task_struct * in atomic context because it will indirectly * acquire sleeping locks. * * call_rcu() will schedule delayed_put_task_struct_rcu() * to be called in process context. * * __put_task_struct() is called when * refcount_dec_and_test(&t->usage) succeeds. * * This means that it can't "conflict" with * put_task_struct_rcu_user() which abuses ->rcu the same * way; rcu_users has a reference so task->usage can't be * zero after rcu_users 1 -> 0 transition. * * delayed_free_task() also uses ->rcu, but it is only called * when it fails to fork a process. Therefore, there is no * way it can conflict with put_task_struct(). */ if (IS_ENABLED(CONFIG_PREEMPT_RT) && !preemptible()) call_rcu(&t->rcu, __put_task_struct_rcu_cb); else __put_task_struct(t); } void put_task_struct_rcu_user(struct task_struct *task); #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT extern int arch_task_struct_size __read_mostly; #else # define arch_task_struct_size (sizeof(struct task_struct)) #endif #ifndef CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST /* * If an architecture has not declared a thread_struct whitelist we * must assume something there may need to be copied to userspace. */ static inline void arch_thread_struct_whitelist(unsigned long *offset, unsigned long *size) { *offset = 0; /* Handle dynamically sized thread_struct. */ *size = arch_task_struct_size - offsetof(struct task_struct, thread); } #endif #ifdef CONFIG_VMAP_STACK static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) { return t->stack_vm_area; } #else static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) { return NULL; } #endif /* * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring * subscriptions and synchronises with wait4(). Also used in procfs. Also * pins the final release of task.io_context. Also protects ->cpuset and * ->cgroup.subsys[]. And ->vfork_done. And ->sysvshm.shm_clist. * * Nests both inside and outside of read_lock(&tasklist_lock). * It must not be nested with write_lock_irq(&tasklist_lock), * neither inside nor outside. */ static inline void task_lock(struct task_struct *p) { spin_lock(&p->alloc_lock); } static inline void task_unlock(struct task_struct *p) { spin_unlock(&p->alloc_lock); } #endif /* _LINUX_SCHED_TASK_H */ sched/signal.h 0000644 00000050166 14722070374 0007276 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SCHED_SIGNAL_H #define _LINUX_SCHED_SIGNAL_H #include <linux/rculist.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/sched/jobctl.h> #include <linux/sched/task.h> #include <linux/cred.h> #include <linux/refcount.h> #include <linux/posix-timers.h> /* * Types defining task->signal and task->sighand and APIs using them: */ struct sighand_struct { spinlock_t siglock; refcount_t count; wait_queue_head_t signalfd_wqh; struct k_sigaction action[_NSIG]; }; /* * Per-process accounting stats: */ struct pacct_struct { int ac_flag; long ac_exitcode; unsigned long ac_mem; u64 ac_utime, ac_stime; unsigned long ac_minflt, ac_majflt; }; struct cpu_itimer { u64 expires; u64 incr; }; /* * This is the atomic variant of task_cputime, which can be used for * storing and updating task_cputime statistics without locking. */ struct task_cputime_atomic { atomic64_t utime; atomic64_t stime; atomic64_t sum_exec_runtime; }; #define INIT_CPUTIME_ATOMIC \ (struct task_cputime_atomic) { \ .utime = ATOMIC64_INIT(0), \ .stime = ATOMIC64_INIT(0), \ .sum_exec_runtime = ATOMIC64_INIT(0), \ } /** * struct thread_group_cputimer - thread group interval timer counts * @cputime_atomic: atomic thread group interval timers. * * This structure contains the version of task_cputime, above, that is * used for thread group CPU timer calculations. */ struct thread_group_cputimer { struct task_cputime_atomic cputime_atomic; }; struct multiprocess_signals { sigset_t signal; struct hlist_node node; }; /* * NOTE! "signal_struct" does not have its own * locking, because a shared signal_struct always * implies a shared sighand_struct, so locking * sighand_struct is always a proper superset of * the locking of signal_struct. */ struct signal_struct { refcount_t sigcnt; atomic_t live; int nr_threads; struct list_head thread_head; wait_queue_head_t wait_chldexit; /* for wait4() */ /* current thread group signal load-balancing target: */ struct task_struct *curr_target; /* shared signal handling: */ struct sigpending shared_pending; /* For collecting multiprocess signals during fork */ struct hlist_head multiprocess; /* thread group exit support */ int group_exit_code; /* overloaded: * - notify group_exit_task when ->count is equal to notify_count * - everyone except group_exit_task is stopped during signal delivery * of fatal signals, group_exit_task processes the signal. */ int notify_count; struct task_struct *group_exit_task; /* thread group stop support, overloads group_exit_code too */ int group_stop_count; unsigned int flags; /* see SIGNAL_* flags below */ /* * PR_SET_CHILD_SUBREAPER marks a process, like a service * manager, to re-parent orphan (double-forking) child processes * to this process instead of 'init'. The service manager is * able to receive SIGCHLD signals and is able to investigate * the process until it calls wait(). All children of this * process will inherit a flag if they should look for a * child_subreaper process at exit. */ unsigned int is_child_subreaper:1; unsigned int has_child_subreaper:1; #ifdef CONFIG_POSIX_TIMERS /* POSIX.1b Interval Timers */ unsigned int next_posix_timer_id; struct list_head posix_timers; /* ITIMER_REAL timer for the process */ struct hrtimer real_timer; ktime_t it_real_incr; /* * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these * values are defined to 0 and 1 respectively */ struct cpu_itimer it[2]; /* * Thread group totals for process CPU timers. * See thread_group_cputimer(), et al, for details. */ struct thread_group_cputimer cputimer; #endif /* Empty if CONFIG_POSIX_TIMERS=n */ struct posix_cputimers posix_cputimers; /* PID/PID hash table linkage. */ struct pid *pids[PIDTYPE_MAX]; #ifdef CONFIG_NO_HZ_FULL atomic_t tick_dep_mask; #endif struct pid *tty_old_pgrp; /* boolean value for session group leader */ int leader; struct tty_struct *tty; /* NULL if no tty */ #ifdef CONFIG_SCHED_AUTOGROUP struct autogroup *autogroup; #endif /* * Cumulative resource counters for dead threads in the group, * and for reaped dead child processes forked by this group. * Live threads maintain their own counters and add to these * in __exit_signal, except for the group leader. */ seqlock_t stats_lock; u64 utime, stime, cutime, cstime; u64 gtime; u64 cgtime; struct prev_cputime prev_cputime; unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; unsigned long inblock, oublock, cinblock, coublock; unsigned long maxrss, cmaxrss; struct task_io_accounting ioac; /* * Cumulative ns of schedule CPU time fo dead threads in the * group, not including a zombie group leader, (This only differs * from jiffies_to_ns(utime + stime) if sched_clock uses something * other than jiffies.) */ unsigned long long sum_sched_runtime; /* * We don't bother to synchronize most readers of this at all, * because there is no reader checking a limit that actually needs * to get both rlim_cur and rlim_max atomically, and either one * alone is a single word that can safely be read normally. * getrlimit/setrlimit use task_lock(current->group_leader) to * protect this instead of the siglock, because they really * have no need to disable irqs. */ struct rlimit rlim[RLIM_NLIMITS]; #ifdef CONFIG_BSD_PROCESS_ACCT struct pacct_struct pacct; /* per-process accounting information */ #endif #ifdef CONFIG_TASKSTATS struct taskstats *stats; #endif #ifdef CONFIG_AUDIT unsigned audit_tty; struct tty_audit_buf *tty_audit_buf; #endif /* * Thread is the potential origin of an oom condition; kill first on * oom */ bool oom_flag_origin; short oom_score_adj; /* OOM kill score adjustment */ short oom_score_adj_min; /* OOM kill score adjustment min value. * Only settable by CAP_SYS_RESOURCE. */ struct mm_struct *oom_mm; /* recorded mm when the thread group got * killed by the oom killer */ struct mutex cred_guard_mutex; /* guard against foreign influences on * credential calculations * (notably. ptrace) * Deprecated do not use in new code. * Use exec_update_lock instead. */ struct rw_semaphore exec_update_lock; /* Held while task_struct is * being updated during exec, * and may have inconsistent * permissions. */ } __randomize_layout; /* * Bits in flags field of signal_struct. */ #define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ #define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */ #define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */ #define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */ /* * Pending notifications to parent. */ #define SIGNAL_CLD_STOPPED 0x00000010 #define SIGNAL_CLD_CONTINUED 0x00000020 #define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED) #define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */ #define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \ SIGNAL_STOP_CONTINUED) static inline void signal_set_stop_flags(struct signal_struct *sig, unsigned int flags) { WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP)); sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags; } /* If true, all threads except ->group_exit_task have pending SIGKILL */ static inline int signal_group_exit(const struct signal_struct *sig) { return (sig->flags & SIGNAL_GROUP_EXIT) || (sig->group_exit_task != NULL); } extern void flush_signals(struct task_struct *); extern void ignore_signals(struct task_struct *); extern void flush_signal_handlers(struct task_struct *, int force_default); extern int dequeue_signal(struct task_struct *task, sigset_t *mask, kernel_siginfo_t *info); static inline int kernel_dequeue_signal(void) { struct task_struct *task = current; kernel_siginfo_t __info; int ret; spin_lock_irq(&task->sighand->siglock); ret = dequeue_signal(task, &task->blocked, &__info); spin_unlock_irq(&task->sighand->siglock); return ret; } static inline void kernel_signal_stop(void) { spin_lock_irq(¤t->sighand->siglock); if (current->jobctl & JOBCTL_STOP_DEQUEUED) set_special_state(TASK_STOPPED); spin_unlock_irq(¤t->sighand->siglock); schedule(); } #ifdef __ARCH_SI_TRAPNO # define ___ARCH_SI_TRAPNO(_a1) , _a1 #else # define ___ARCH_SI_TRAPNO(_a1) #endif #ifdef __ia64__ # define ___ARCH_SI_IA64(_a1, _a2, _a3) , _a1, _a2, _a3 #else # define ___ARCH_SI_IA64(_a1, _a2, _a3) #endif int force_sig_fault_to_task(int sig, int code, void __user *addr ___ARCH_SI_TRAPNO(int trapno) ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) , struct task_struct *t); int force_sig_fault(int sig, int code, void __user *addr ___ARCH_SI_TRAPNO(int trapno) ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)); int send_sig_fault(int sig, int code, void __user *addr ___ARCH_SI_TRAPNO(int trapno) ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) , struct task_struct *t); int force_sig_mceerr(int code, void __user *, short); int send_sig_mceerr(int code, void __user *, short, struct task_struct *); int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper); int force_sig_pkuerr(void __user *addr, u32 pkey); int force_sig_ptrace_errno_trap(int errno, void __user *addr); extern int send_sig_info(int, struct kernel_siginfo *, struct task_struct *); extern void force_sigsegv(int sig); extern int force_sig_info(struct kernel_siginfo *); extern int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp); extern int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid); extern int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr, struct pid *, const struct cred *); extern int kill_pgrp(struct pid *pid, int sig, int priv); extern int kill_pid(struct pid *pid, int sig, int priv); extern __must_check bool do_notify_parent(struct task_struct *, int); extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); extern void force_sig(int); extern int send_sig(int, struct task_struct *, int); extern int zap_other_threads(struct task_struct *p); extern struct sigqueue *sigqueue_alloc(void); extern void sigqueue_free(struct sigqueue *); extern int send_sigqueue(struct sigqueue *, struct pid *, enum pid_type); extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); static inline int restart_syscall(void) { set_tsk_thread_flag(current, TIF_SIGPENDING); return -ERESTARTNOINTR; } static inline int signal_pending(struct task_struct *p) { return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); } static inline int __fatal_signal_pending(struct task_struct *p) { return unlikely(sigismember(&p->pending.signal, SIGKILL)); } static inline int fatal_signal_pending(struct task_struct *p) { return signal_pending(p) && __fatal_signal_pending(p); } static inline int signal_pending_state(long state, struct task_struct *p) { if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL))) return 0; if (!signal_pending(p)) return 0; return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); } /* * Reevaluate whether the task has signals pending delivery. * Wake the task if so. * This is required every time the blocked sigset_t changes. * callers must hold sighand->siglock. */ extern void recalc_sigpending_and_wake(struct task_struct *t); extern void recalc_sigpending(void); extern void calculate_sigpending(void); extern void signal_wake_up_state(struct task_struct *t, unsigned int state); static inline void signal_wake_up(struct task_struct *t, bool resume) { signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0); } static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume) { signal_wake_up_state(t, resume ? __TASK_TRACED : 0); } void task_join_group_stop(struct task_struct *task); #ifdef TIF_RESTORE_SIGMASK /* * Legacy restore_sigmask accessors. These are inefficient on * SMP architectures because they require atomic operations. */ /** * set_restore_sigmask() - make sure saved_sigmask processing gets done * * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code * will run before returning to user mode, to process the flag. For * all callers, TIF_SIGPENDING is already set or it's no harm to set * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the * arch code will notice on return to user mode, in case those bits * are scarce. We set TIF_SIGPENDING here to ensure that the arch * signal code always gets run when TIF_RESTORE_SIGMASK is set. */ static inline void set_restore_sigmask(void) { set_thread_flag(TIF_RESTORE_SIGMASK); } static inline void clear_tsk_restore_sigmask(struct task_struct *task) { clear_tsk_thread_flag(task, TIF_RESTORE_SIGMASK); } static inline void clear_restore_sigmask(void) { clear_thread_flag(TIF_RESTORE_SIGMASK); } static inline bool test_tsk_restore_sigmask(struct task_struct *task) { return test_tsk_thread_flag(task, TIF_RESTORE_SIGMASK); } static inline bool test_restore_sigmask(void) { return test_thread_flag(TIF_RESTORE_SIGMASK); } static inline bool test_and_clear_restore_sigmask(void) { return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK); } #else /* TIF_RESTORE_SIGMASK */ /* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */ static inline void set_restore_sigmask(void) { current->restore_sigmask = true; } static inline void clear_tsk_restore_sigmask(struct task_struct *task) { task->restore_sigmask = false; } static inline void clear_restore_sigmask(void) { current->restore_sigmask = false; } static inline bool test_restore_sigmask(void) { return current->restore_sigmask; } static inline bool test_tsk_restore_sigmask(struct task_struct *task) { return task->restore_sigmask; } static inline bool test_and_clear_restore_sigmask(void) { if (!current->restore_sigmask) return false; current->restore_sigmask = false; return true; } #endif static inline void restore_saved_sigmask(void) { if (test_and_clear_restore_sigmask()) __set_current_blocked(¤t->saved_sigmask); } extern int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize); static inline void restore_saved_sigmask_unless(bool interrupted) { if (interrupted) WARN_ON(!test_thread_flag(TIF_SIGPENDING)); else restore_saved_sigmask(); } static inline sigset_t *sigmask_to_save(void) { sigset_t *res = ¤t->blocked; if (unlikely(test_restore_sigmask())) res = ¤t->saved_sigmask; return res; } static inline int kill_cad_pid(int sig, int priv) { return kill_pid(cad_pid, sig, priv); } /* These can be the second arg to send_sig_info/send_group_sig_info. */ #define SEND_SIG_NOINFO ((struct kernel_siginfo *) 0) #define SEND_SIG_PRIV ((struct kernel_siginfo *) 1) static inline int __on_sig_stack(unsigned long sp) { #ifdef CONFIG_STACK_GROWSUP return sp >= current->sas_ss_sp && sp - current->sas_ss_sp < current->sas_ss_size; #else return sp > current->sas_ss_sp && sp - current->sas_ss_sp <= current->sas_ss_size; #endif } /* * True if we are on the alternate signal stack. */ static inline int on_sig_stack(unsigned long sp) { /* * If the signal stack is SS_AUTODISARM then, by construction, we * can't be on the signal stack unless user code deliberately set * SS_AUTODISARM when we were already on it. * * This improves reliability: if user state gets corrupted such that * the stack pointer points very close to the end of the signal stack, * then this check will enable the signal to be handled anyway. */ if (current->sas_ss_flags & SS_AUTODISARM) return 0; return __on_sig_stack(sp); } static inline int sas_ss_flags(unsigned long sp) { if (!current->sas_ss_size) return SS_DISABLE; return on_sig_stack(sp) ? SS_ONSTACK : 0; } static inline void sas_ss_reset(struct task_struct *p) { p->sas_ss_sp = 0; p->sas_ss_size = 0; p->sas_ss_flags = SS_DISABLE; } static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig) { if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp)) #ifdef CONFIG_STACK_GROWSUP return current->sas_ss_sp; #else return current->sas_ss_sp + current->sas_ss_size; #endif return sp; } extern void __cleanup_sighand(struct sighand_struct *); extern void flush_itimer_signals(void); #define tasklist_empty() \ list_empty(&init_task.tasks) #define next_task(p) \ list_entry_rcu((p)->tasks.next, struct task_struct, tasks) #define for_each_process(p) \ for (p = &init_task ; (p = next_task(p)) != &init_task ; ) extern bool current_is_single_threaded(void); /* * Careful: do_each_thread/while_each_thread is a double loop so * 'break' will not work as expected - use goto instead. */ #define do_each_thread(g, t) \ for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do #define while_each_thread(g, t) \ while ((t = next_thread(t)) != g) #define __for_each_thread(signal, t) \ list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node) #define for_each_thread(p, t) \ __for_each_thread((p)->signal, t) /* Careful: this is a double loop, 'break' won't work as expected. */ #define for_each_process_thread(p, t) \ for_each_process(p) for_each_thread(p, t) typedef int (*proc_visitor)(struct task_struct *p, void *data); void walk_process_tree(struct task_struct *top, proc_visitor, void *); static inline struct pid *task_pid_type(struct task_struct *task, enum pid_type type) { struct pid *pid; if (type == PIDTYPE_PID) pid = task_pid(task); else pid = task->signal->pids[type]; return pid; } static inline struct pid *task_tgid(struct task_struct *task) { return task->signal->pids[PIDTYPE_TGID]; } /* * Without tasklist or RCU lock it is not safe to dereference * the result of task_pgrp/task_session even if task == current, * we can race with another thread doing sys_setsid/sys_setpgid. */ static inline struct pid *task_pgrp(struct task_struct *task) { return task->signal->pids[PIDTYPE_PGID]; } static inline struct pid *task_session(struct task_struct *task) { return task->signal->pids[PIDTYPE_SID]; } static inline int get_nr_threads(struct task_struct *task) { return task->signal->nr_threads; } static inline bool thread_group_leader(struct task_struct *p) { return p->exit_signal >= 0; } /* Do to the insanities of de_thread it is possible for a process * to have the pid of the thread group leader without actually being * the thread group leader. For iteration through the pids in proc * all we care about is that we have a task with the appropriate * pid, we don't actually care if we have the right task. */ static inline bool has_group_leader_pid(struct task_struct *p) { return task_pid(p) == task_tgid(p); } static inline bool same_thread_group(struct task_struct *p1, struct task_struct *p2) { return p1->signal == p2->signal; } static inline struct task_struct *next_thread(const struct task_struct *p) { return list_entry_rcu(p->thread_group.next, struct task_struct, thread_group); } static inline int thread_group_empty(struct task_struct *p) { return list_empty(&p->thread_group); } #define delay_group_leader(p) \ (thread_group_leader(p) && !thread_group_empty(p)) extern struct sighand_struct *__lock_task_sighand(struct task_struct *task, unsigned long *flags); static inline struct sighand_struct *lock_task_sighand(struct task_struct *task, unsigned long *flags) { struct sighand_struct *ret; ret = __lock_task_sighand(task, flags); (void)__cond_lock(&task->sighand->siglock, ret); return ret; } static inline void unlock_task_sighand(struct task_struct *task, unsigned long *flags) { spin_unlock_irqrestore(&task->sighand->siglock, *flags); } static inline unsigned long task_rlimit(const struct task_struct *task, unsigned int limit) { return READ_ONCE(task->signal->rlim[limit].rlim_cur); } static inline unsigned long task_rlimit_max(const struct task_struct *task, unsigned int limit) { return READ_ONCE(task->signal->rlim[limit].rlim_max); } static inline unsigned long rlimit(unsigned int limit) { return task_rlimit(current, limit); } static inline unsigned long rlimit_max(unsigned int limit) { return task_rlimit_max(current, limit); } #endif /* _LINUX_SCHED_SIGNAL_H */ sched/sysctl.h 0000644 00000006101 14722070374 0007330 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SCHED_SYSCTL_H #define _LINUX_SCHED_SYSCTL_H #include <linux/types.h> struct ctl_table; #ifdef CONFIG_DETECT_HUNG_TASK extern int sysctl_hung_task_check_count; extern unsigned int sysctl_hung_task_panic; extern unsigned long sysctl_hung_task_timeout_secs; extern unsigned long sysctl_hung_task_check_interval_secs; extern int sysctl_hung_task_warnings; extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); #else /* Avoid need for ifdefs elsewhere in the code */ enum { sysctl_hung_task_timeout_secs = 0 }; #endif extern unsigned int sysctl_sched_latency; extern unsigned int sysctl_sched_min_granularity; extern unsigned int sysctl_sched_wakeup_granularity; extern unsigned int sysctl_sched_child_runs_first; enum sched_tunable_scaling { SCHED_TUNABLESCALING_NONE, SCHED_TUNABLESCALING_LOG, SCHED_TUNABLESCALING_LINEAR, SCHED_TUNABLESCALING_END, }; extern enum sched_tunable_scaling sysctl_sched_tunable_scaling; extern unsigned int sysctl_numa_balancing_scan_delay; extern unsigned int sysctl_numa_balancing_scan_period_min; extern unsigned int sysctl_numa_balancing_scan_period_max; extern unsigned int sysctl_numa_balancing_scan_size; #ifdef CONFIG_SCHED_DEBUG extern __read_mostly unsigned int sysctl_sched_migration_cost; extern __read_mostly unsigned int sysctl_sched_nr_migrate; int sched_proc_update_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos); #endif /* * control realtime throttling: * * /proc/sys/kernel/sched_rt_period_us * /proc/sys/kernel/sched_rt_runtime_us */ extern unsigned int sysctl_sched_rt_period; extern int sysctl_sched_rt_runtime; #ifdef CONFIG_UCLAMP_TASK extern unsigned int sysctl_sched_uclamp_util_min; extern unsigned int sysctl_sched_uclamp_util_max; #endif #ifdef CONFIG_CFS_BANDWIDTH extern unsigned int sysctl_sched_cfs_bandwidth_slice; #endif #ifdef CONFIG_SCHED_AUTOGROUP extern unsigned int sysctl_sched_autogroup_enabled; #endif extern int sysctl_sched_rr_timeslice; extern int sched_rr_timeslice; extern int sched_rr_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); extern int sched_rt_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); #ifdef CONFIG_UCLAMP_TASK extern int sysctl_sched_uclamp_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); #endif extern int sysctl_numa_balancing(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); extern int sysctl_schedstats(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) extern unsigned int sysctl_sched_energy_aware; extern int sched_energy_aware_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); #endif #endif /* _LINUX_SCHED_SYSCTL_H */ sched/loadavg.h 0000644 00000003103 14722070374 0007423 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SCHED_LOADAVG_H #define _LINUX_SCHED_LOADAVG_H /* * These are the constant used to fake the fixed-point load-average * counting. Some notes: * - 11 bit fractions expand to 22 bits by the multiplies: this gives * a load-average precision of 10 bits integer + 11 bits fractional * - if you want to count load-averages more often, you need more * precision, or rounding will get you. With 2-second counting freq, * the EXP_n values would be 1981, 2034 and 2043 if still using only * 11 bit fractions. */ extern unsigned long avenrun[]; /* Load averages */ extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift); #define FSHIFT 11 /* nr of bits of precision */ #define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ #define LOAD_FREQ (5*HZ+1) /* 5 sec intervals */ #define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */ #define EXP_5 2014 /* 1/exp(5sec/5min) */ #define EXP_15 2037 /* 1/exp(5sec/15min) */ /* * a1 = a0 * e + a * (1 - e) */ static inline unsigned long calc_load(unsigned long load, unsigned long exp, unsigned long active) { unsigned long newload; newload = load * exp + active * (FIXED_1 - exp); if (active >= load) newload += FIXED_1-1; return newload / FIXED_1; } extern unsigned long calc_load_n(unsigned long load, unsigned long exp, unsigned long active, unsigned int n); #define LOAD_INT(x) ((x) >> FSHIFT) #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) extern void calc_global_load(unsigned long ticks); #endif /* _LINUX_SCHED_LOADAVG_H */ sched/deadline.h 0000644 00000001214 14722070374 0007554 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * SCHED_DEADLINE tasks has negative priorities, reflecting * the fact that any of them has higher prio than RT and * NORMAL/BATCH tasks. */ #define MAX_DL_PRIO 0 static inline int dl_prio(int prio) { if (unlikely(prio < MAX_DL_PRIO)) return 1; return 0; } static inline int dl_task(struct task_struct *p) { return dl_prio(p->prio); } static inline bool dl_time_before(u64 a, u64 b) { return (s64)(a - b) < 0; } #ifdef CONFIG_SMP struct root_domain; extern void dl_add_task_root_domain(struct task_struct *p); extern void dl_clear_root_domain(struct root_domain *rd); #endif /* CONFIG_SMP */ sched/mm.h 0000644 00000027265 14722070374 0006436 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SCHED_MM_H #define _LINUX_SCHED_MM_H #include <linux/kernel.h> #include <linux/atomic.h> #include <linux/sched.h> #include <linux/mm_types.h> #include <linux/gfp.h> #include <linux/sync_core.h> /* * Routines for handling mm_structs */ extern struct mm_struct *mm_alloc(void); /** * mmgrab() - Pin a &struct mm_struct. * @mm: The &struct mm_struct to pin. * * Make sure that @mm will not get freed even after the owning task * exits. This doesn't guarantee that the associated address space * will still exist later on and mmget_not_zero() has to be used before * accessing it. * * This is a preferred way to to pin @mm for a longer/unbounded amount * of time. * * Use mmdrop() to release the reference acquired by mmgrab(). * * See also <Documentation/vm/active_mm.rst> for an in-depth explanation * of &mm_struct.mm_count vs &mm_struct.mm_users. */ static inline void mmgrab(struct mm_struct *mm) { atomic_inc(&mm->mm_count); } extern void __mmdrop(struct mm_struct *mm); static inline void mmdrop(struct mm_struct *mm) { /* * The implicit full barrier implied by atomic_dec_and_test() is * required by the membarrier system call before returning to * user-space, after storing to rq->curr. */ if (unlikely(atomic_dec_and_test(&mm->mm_count))) __mmdrop(mm); } void mmdrop(struct mm_struct *mm); /* * This has to be called after a get_task_mm()/mmget_not_zero() * followed by taking the mmap_sem for writing before modifying the * vmas or anything the coredump pretends not to change from under it. * * It also has to be called when mmgrab() is used in the context of * the process, but then the mm_count refcount is transferred outside * the context of the process to run down_write() on that pinned mm. * * NOTE: find_extend_vma() called from GUP context is the only place * that can modify the "mm" (notably the vm_start/end) under mmap_sem * for reading and outside the context of the process, so it is also * the only case that holds the mmap_sem for reading that must call * this function. Generally if the mmap_sem is hold for reading * there's no need of this check after get_task_mm()/mmget_not_zero(). * * This function can be obsoleted and the check can be removed, after * the coredump code will hold the mmap_sem for writing before * invoking the ->core_dump methods. */ static inline bool mmget_still_valid(struct mm_struct *mm) { return likely(!mm->core_state); } /** * mmget() - Pin the address space associated with a &struct mm_struct. * @mm: The address space to pin. * * Make sure that the address space of the given &struct mm_struct doesn't * go away. This does not protect against parts of the address space being * modified or freed, however. * * Never use this function to pin this address space for an * unbounded/indefinite amount of time. * * Use mmput() to release the reference acquired by mmget(). * * See also <Documentation/vm/active_mm.rst> for an in-depth explanation * of &mm_struct.mm_count vs &mm_struct.mm_users. */ static inline void mmget(struct mm_struct *mm) { atomic_inc(&mm->mm_users); } static inline bool mmget_not_zero(struct mm_struct *mm) { return atomic_inc_not_zero(&mm->mm_users); } /* mmput gets rid of the mappings and all user-space */ extern void mmput(struct mm_struct *); #ifdef CONFIG_MMU /* same as above but performs the slow path from the async context. Can * be called from the atomic context as well */ void mmput_async(struct mm_struct *); #endif /* Grab a reference to a task's mm, if it is not already going away */ extern struct mm_struct *get_task_mm(struct task_struct *task); /* * Grab a reference to a task's mm, if it is not already going away * and ptrace_may_access with the mode parameter passed to it * succeeds. */ extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); /* Remove the current tasks stale references to the old mm_struct on exit() */ extern void exit_mm_release(struct task_struct *, struct mm_struct *); /* Remove the current tasks stale references to the old mm_struct on exec() */ extern void exec_mm_release(struct task_struct *, struct mm_struct *); #ifdef CONFIG_MEMCG extern void mm_update_next_owner(struct mm_struct *mm); #else static inline void mm_update_next_owner(struct mm_struct *mm) { } #endif /* CONFIG_MEMCG */ #ifdef CONFIG_MMU #ifndef arch_get_mmap_end #define arch_get_mmap_end(addr) (TASK_SIZE) #endif #ifndef arch_get_mmap_base #define arch_get_mmap_base(addr, base) (base) #endif extern void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack); extern unsigned long arch_get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); extern unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); #else static inline void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) {} #endif static inline bool in_vfork(struct task_struct *tsk) { bool ret; /* * need RCU to access ->real_parent if CLONE_VM was used along with * CLONE_PARENT. * * We check real_parent->mm == tsk->mm because CLONE_VFORK does not * imply CLONE_VM * * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus * ->real_parent is not necessarily the task doing vfork(), so in * theory we can't rely on task_lock() if we want to dereference it. * * And in this case we can't trust the real_parent->mm == tsk->mm * check, it can be false negative. But we do not care, if init or * another oom-unkillable task does this it should blame itself. */ rcu_read_lock(); ret = tsk->vfork_done && rcu_dereference(tsk->real_parent)->mm == tsk->mm; rcu_read_unlock(); return ret; } /* * Applies per-task gfp context to the given allocation flags. * PF_MEMALLOC_NOIO implies GFP_NOIO * PF_MEMALLOC_NOFS implies GFP_NOFS * PF_MEMALLOC_NOCMA implies no allocation from CMA region. */ static inline gfp_t current_gfp_context(gfp_t flags) { if (unlikely(current->flags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | PF_MEMALLOC_NOCMA))) { /* * NOIO implies both NOIO and NOFS and it is a weaker context * so always make sure it makes precedence */ if (current->flags & PF_MEMALLOC_NOIO) flags &= ~(__GFP_IO | __GFP_FS); else if (current->flags & PF_MEMALLOC_NOFS) flags &= ~__GFP_FS; #ifdef CONFIG_CMA if (current->flags & PF_MEMALLOC_NOCMA) flags &= ~__GFP_MOVABLE; #endif } return flags; } #ifdef CONFIG_LOCKDEP extern void __fs_reclaim_acquire(void); extern void __fs_reclaim_release(void); extern void fs_reclaim_acquire(gfp_t gfp_mask); extern void fs_reclaim_release(gfp_t gfp_mask); #else static inline void __fs_reclaim_acquire(void) { } static inline void __fs_reclaim_release(void) { } static inline void fs_reclaim_acquire(gfp_t gfp_mask) { } static inline void fs_reclaim_release(gfp_t gfp_mask) { } #endif /** * memalloc_noio_save - Marks implicit GFP_NOIO allocation scope. * * This functions marks the beginning of the GFP_NOIO allocation scope. * All further allocations will implicitly drop __GFP_IO flag and so * they are safe for the IO critical section from the allocation recursion * point of view. Use memalloc_noio_restore to end the scope with flags * returned by this function. * * This function is safe to be used from any context. */ static inline unsigned int memalloc_noio_save(void) { unsigned int flags = current->flags & PF_MEMALLOC_NOIO; current->flags |= PF_MEMALLOC_NOIO; return flags; } /** * memalloc_noio_restore - Ends the implicit GFP_NOIO scope. * @flags: Flags to restore. * * Ends the implicit GFP_NOIO scope started by memalloc_noio_save function. * Always make sure that that the given flags is the return value from the * pairing memalloc_noio_save call. */ static inline void memalloc_noio_restore(unsigned int flags) { current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags; } /** * memalloc_nofs_save - Marks implicit GFP_NOFS allocation scope. * * This functions marks the beginning of the GFP_NOFS allocation scope. * All further allocations will implicitly drop __GFP_FS flag and so * they are safe for the FS critical section from the allocation recursion * point of view. Use memalloc_nofs_restore to end the scope with flags * returned by this function. * * This function is safe to be used from any context. */ static inline unsigned int memalloc_nofs_save(void) { unsigned int flags = current->flags & PF_MEMALLOC_NOFS; current->flags |= PF_MEMALLOC_NOFS; return flags; } /** * memalloc_nofs_restore - Ends the implicit GFP_NOFS scope. * @flags: Flags to restore. * * Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function. * Always make sure that that the given flags is the return value from the * pairing memalloc_nofs_save call. */ static inline void memalloc_nofs_restore(unsigned int flags) { current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags; } static inline unsigned int memalloc_noreclaim_save(void) { unsigned int flags = current->flags & PF_MEMALLOC; current->flags |= PF_MEMALLOC; return flags; } static inline void memalloc_noreclaim_restore(unsigned int flags) { current->flags = (current->flags & ~PF_MEMALLOC) | flags; } #ifdef CONFIG_CMA static inline unsigned int memalloc_nocma_save(void) { unsigned int flags = current->flags & PF_MEMALLOC_NOCMA; current->flags |= PF_MEMALLOC_NOCMA; return flags; } static inline void memalloc_nocma_restore(unsigned int flags) { current->flags = (current->flags & ~PF_MEMALLOC_NOCMA) | flags; } #else static inline unsigned int memalloc_nocma_save(void) { return 0; } static inline void memalloc_nocma_restore(unsigned int flags) { } #endif #ifdef CONFIG_MEMCG /** * memalloc_use_memcg - Starts the remote memcg charging scope. * @memcg: memcg to charge. * * This function marks the beginning of the remote memcg charging scope. All the * __GFP_ACCOUNT allocations till the end of the scope will be charged to the * given memcg. * * NOTE: This function is not nesting safe. */ static inline void memalloc_use_memcg(struct mem_cgroup *memcg) { WARN_ON_ONCE(current->active_memcg); current->active_memcg = memcg; } /** * memalloc_unuse_memcg - Ends the remote memcg charging scope. * * This function marks the end of the remote memcg charging scope started by * memalloc_use_memcg(). */ static inline void memalloc_unuse_memcg(void) { current->active_memcg = NULL; } #else static inline void memalloc_use_memcg(struct mem_cgroup *memcg) { } static inline void memalloc_unuse_memcg(void) { } #endif #ifdef CONFIG_MEMBARRIER enum { MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0), MEMBARRIER_STATE_PRIVATE_EXPEDITED = (1U << 1), MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = (1U << 2), MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3), MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = (1U << 4), MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = (1U << 5), }; enum { MEMBARRIER_FLAG_SYNC_CORE = (1U << 0), }; #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS #include <asm/membarrier.h> #endif static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm) { if (current->mm != mm) return; if (likely(!(atomic_read(&mm->membarrier_state) & MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE))) return; sync_core_before_usermode(); } extern void membarrier_exec_mmap(struct mm_struct *mm); #else #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS static inline void membarrier_arch_switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { } #endif static inline void membarrier_exec_mmap(struct mm_struct *mm) { } static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm) { } #endif #endif /* _LINUX_SCHED_MM_H */ sched/debug.h 0000644 00000002640 14722070374 0007101 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SCHED_DEBUG_H #define _LINUX_SCHED_DEBUG_H /* * Various scheduler/task debugging interfaces: */ struct task_struct; struct pid_namespace; extern void dump_cpu_task(int cpu); /* * Only dump TASK_* tasks. (0 for all tasks) */ extern void show_state_filter(unsigned long state_filter); static inline void show_state(void) { show_state_filter(0); } struct pt_regs; extern void show_regs(struct pt_regs *); /* * TASK is a pointer to the task whose backtrace we want to see (or NULL for current * task), SP is the stack pointer of the first frame that should be shown in the back * trace (or NULL if the entire call-chain of the task should be shown). */ extern void show_stack(struct task_struct *task, unsigned long *sp); extern void sched_show_task(struct task_struct *p); #ifdef CONFIG_SCHED_DEBUG struct seq_file; extern void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, struct seq_file *m); extern void proc_sched_set_task(struct task_struct *p); #endif /* Attach to any functions which should be ignored in wchan output. */ #define __sched __attribute__((__section__(".sched.text"))) /* Linker adds these: start and end of __sched functions */ extern char __sched_text_start[], __sched_text_end[]; /* Is this address in the __sched functions? */ extern int in_sched_functions(unsigned long addr); #endif /* _LINUX_SCHED_DEBUG_H */ sched/smt.h 0000644 00000000637 14722070374 0006622 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SCHED_SMT_H #define _LINUX_SCHED_SMT_H #include <linux/static_key.h> #ifdef CONFIG_SCHED_SMT extern struct static_key_false sched_smt_present; static __always_inline bool sched_smt_active(void) { return static_branch_likely(&sched_smt_present); } #else static inline bool sched_smt_active(void) { return false; } #endif void arch_smt_update(void); #endif sched/clock.h 0000644 00000004712 14722070374 0007110 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SCHED_CLOCK_H #define _LINUX_SCHED_CLOCK_H #include <linux/smp.h> /* * Do not use outside of architecture code which knows its limitations. * * sched_clock() has no promise of monotonicity or bounded drift between * CPUs, use (which you should not) requires disabling IRQs. * * Please use one of the three interfaces below. */ extern unsigned long long notrace sched_clock(void); /* * See the comment in kernel/sched/clock.c */ extern u64 running_clock(void); extern u64 sched_clock_cpu(int cpu); extern void sched_clock_init(void); #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK static inline void sched_clock_tick(void) { } static inline void clear_sched_clock_stable(void) { } static inline void sched_clock_idle_sleep_event(void) { } static inline void sched_clock_idle_wakeup_event(void) { } static inline u64 cpu_clock(int cpu) { return sched_clock(); } static inline u64 local_clock(void) { return sched_clock(); } #else extern int sched_clock_stable(void); extern void clear_sched_clock_stable(void); /* * When sched_clock_stable(), __sched_clock_offset provides the offset * between local_clock() and sched_clock(). */ extern u64 __sched_clock_offset; extern void sched_clock_tick(void); extern void sched_clock_tick_stable(void); extern void sched_clock_idle_sleep_event(void); extern void sched_clock_idle_wakeup_event(void); /* * As outlined in clock.c, provides a fast, high resolution, nanosecond * time source that is monotonic per cpu argument and has bounded drift * between cpus. * * ######################### BIG FAT WARNING ########################## * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can # * # go backwards !! # * #################################################################### */ static inline u64 cpu_clock(int cpu) { return sched_clock_cpu(cpu); } static inline u64 local_clock(void) { return sched_clock_cpu(raw_smp_processor_id()); } #endif #ifdef CONFIG_IRQ_TIME_ACCOUNTING /* * An i/f to runtime opt-in for irq time accounting based off of sched_clock. * The reason for this explicit opt-in is not to have perf penalty with * slow sched_clocks. */ extern void enable_sched_clock_irqtime(void); extern void disable_sched_clock_irqtime(void); #else static inline void enable_sched_clock_irqtime(void) {} static inline void disable_sched_clock_irqtime(void) {} #endif #endif /* _LINUX_SCHED_CLOCK_H */ sched/rt.h 0000644 00000002642 14722070374 0006442 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SCHED_RT_H #define _LINUX_SCHED_RT_H #include <linux/sched.h> struct task_struct; static inline int rt_prio(int prio) { if (unlikely(prio < MAX_RT_PRIO)) return 1; return 0; } static inline int rt_task(struct task_struct *p) { return rt_prio(p->prio); } static inline bool task_is_realtime(struct task_struct *tsk) { int policy = tsk->policy; if (policy == SCHED_FIFO || policy == SCHED_RR) return true; if (policy == SCHED_DEADLINE) return true; return false; } #ifdef CONFIG_RT_MUTEXES /* * Must hold either p->pi_lock or task_rq(p)->lock. */ static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *p) { return p->pi_top_task; } extern void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task); extern void rt_mutex_adjust_pi(struct task_struct *p); static inline bool tsk_is_pi_blocked(struct task_struct *tsk) { return tsk->pi_blocked_on != NULL; } #else static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task) { return NULL; } # define rt_mutex_adjust_pi(p) do { } while (0) static inline bool tsk_is_pi_blocked(struct task_struct *tsk) { return false; } #endif extern void normalize_rt_tasks(void); /* * default timeslice is 100 msecs (used only for SCHED_RR tasks). * Timeslices get refilled after they expire. */ #define RR_TIMESLICE (100 * HZ / 1000) #endif /* _LINUX_SCHED_RT_H */ sched/cpufreq.h 0000644 00000001662 14722070374 0007463 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SCHED_CPUFREQ_H #define _LINUX_SCHED_CPUFREQ_H #include <linux/types.h> /* * Interface between cpufreq drivers and the scheduler: */ #define SCHED_CPUFREQ_IOWAIT (1U << 0) #define SCHED_CPUFREQ_MIGRATION (1U << 1) #ifdef CONFIG_CPU_FREQ struct cpufreq_policy; struct update_util_data { void (*func)(struct update_util_data *data, u64 time, unsigned int flags); }; void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data, void (*func)(struct update_util_data *data, u64 time, unsigned int flags)); void cpufreq_remove_update_util_hook(int cpu); bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy); static inline unsigned long map_util_freq(unsigned long util, unsigned long freq, unsigned long cap) { return (freq + (freq >> 2)) * util / cap; } #endif /* CONFIG_CPU_FREQ */ #endif /* _LINUX_SCHED_CPUFREQ_H */ sched/stat.h 0000644 00000001712 14722070374 0006765 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SCHED_STAT_H #define _LINUX_SCHED_STAT_H #include <linux/percpu.h> /* * Various counters maintained by the scheduler and fork(), * exposed via /proc, sys.c or used by drivers via these APIs. * * ( Note that all these values are acquired without locking, * so they can only be relied on in narrow circumstances. ) */ extern unsigned long total_forks; extern int nr_threads; DECLARE_PER_CPU(unsigned long, process_counts); extern int nr_processes(void); extern unsigned long nr_running(void); extern bool single_task_running(void); extern unsigned long nr_iowait(void); extern unsigned long nr_iowait_cpu(int cpu); static inline int sched_info_on(void) { #ifdef CONFIG_SCHEDSTATS return 1; #elif defined(CONFIG_TASK_DELAY_ACCT) extern int delayacct_on; return delayacct_on; #else return 0; #endif } #ifdef CONFIG_SCHEDSTATS void force_schedstat_enabled(void); #endif #endif /* _LINUX_SCHED_STAT_H */ sched/idle.h 0000644 00000003423 14722070374 0006730 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SCHED_IDLE_H #define _LINUX_SCHED_IDLE_H #include <linux/sched.h> enum cpu_idle_type { CPU_IDLE, CPU_NOT_IDLE, CPU_NEWLY_IDLE, CPU_MAX_IDLE_TYPES }; extern void wake_up_if_idle(int cpu); /* * Idle thread specific functions to determine the need_resched * polling state. */ #ifdef TIF_POLLING_NRFLAG static inline void __current_set_polling(void) { set_thread_flag(TIF_POLLING_NRFLAG); } static inline bool __must_check current_set_polling_and_test(void) { __current_set_polling(); /* * Polling state must be visible before we test NEED_RESCHED, * paired by resched_curr() */ smp_mb__after_atomic(); return unlikely(tif_need_resched()); } static inline void __current_clr_polling(void) { clear_thread_flag(TIF_POLLING_NRFLAG); } static inline bool __must_check current_clr_polling_and_test(void) { __current_clr_polling(); /* * Polling state must be visible before we test NEED_RESCHED, * paired by resched_curr() */ smp_mb__after_atomic(); return unlikely(tif_need_resched()); } #else static inline void __current_set_polling(void) { } static inline void __current_clr_polling(void) { } static inline bool __must_check current_set_polling_and_test(void) { return unlikely(tif_need_resched()); } static inline bool __must_check current_clr_polling_and_test(void) { return unlikely(tif_need_resched()); } #endif static inline void current_clr_polling(void) { __current_clr_polling(); /* * Ensure we check TIF_NEED_RESCHED after we clear the polling bit. * Once the bit is cleared, we'll get IPIs with every new * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also * fold. */ smp_mb(); /* paired with resched_curr() */ preempt_fold_need_resched(); } #endif /* _LINUX_SCHED_IDLE_H */ sched/task_stack.h 0000644 00000006071 14722070374 0010144 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SCHED_TASK_STACK_H #define _LINUX_SCHED_TASK_STACK_H /* * task->stack (kernel stack) handling interfaces: */ #include <linux/sched.h> #include <linux/magic.h> #ifdef CONFIG_THREAD_INFO_IN_TASK /* * When accessing the stack of a non-current task that might exit, use * try_get_task_stack() instead. task_stack_page will return a pointer * that could get freed out from under you. */ static __always_inline void *task_stack_page(const struct task_struct *task) { return task->stack; } #define setup_thread_stack(new,old) do { } while(0) static __always_inline unsigned long *end_of_stack(const struct task_struct *task) { #ifdef CONFIG_STACK_GROWSUP return (unsigned long *)((unsigned long)task->stack + THREAD_SIZE) - 1; #else return task->stack; #endif } #elif !defined(__HAVE_THREAD_FUNCTIONS) #define task_stack_page(task) ((void *)(task)->stack) static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) { *task_thread_info(p) = *task_thread_info(org); task_thread_info(p)->task = p; } /* * Return the address of the last usable long on the stack. * * When the stack grows down, this is just above the thread * info struct. Going any lower will corrupt the threadinfo. * * When the stack grows up, this is the highest address. * Beyond that position, we corrupt data on the next page. */ static inline unsigned long *end_of_stack(struct task_struct *p) { #ifdef CONFIG_STACK_GROWSUP return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1; #else return (unsigned long *)(task_thread_info(p) + 1); #endif } #endif #ifdef CONFIG_THREAD_INFO_IN_TASK static inline void *try_get_task_stack(struct task_struct *tsk) { return refcount_inc_not_zero(&tsk->stack_refcount) ? task_stack_page(tsk) : NULL; } extern void put_task_stack(struct task_struct *tsk); #else static inline void *try_get_task_stack(struct task_struct *tsk) { return task_stack_page(tsk); } static inline void put_task_stack(struct task_struct *tsk) {} #endif #define task_stack_end_corrupted(task) \ (*(end_of_stack(task)) != STACK_END_MAGIC) static inline int object_is_on_stack(const void *obj) { void *stack = task_stack_page(current); return (obj >= stack) && (obj < (stack + THREAD_SIZE)); } extern void thread_stack_cache_init(void); #ifdef CONFIG_DEBUG_STACK_USAGE static inline unsigned long stack_not_used(struct task_struct *p) { unsigned long *n = end_of_stack(p); do { /* Skip over canary */ # ifdef CONFIG_STACK_GROWSUP n--; # else n++; # endif } while (!*n); # ifdef CONFIG_STACK_GROWSUP return (unsigned long)end_of_stack(p) - (unsigned long)n; # else return (unsigned long)n - (unsigned long)end_of_stack(p); # endif } #endif extern void set_task_stack_end_magic(struct task_struct *tsk); #ifndef __HAVE_ARCH_KSTACK_END static inline int kstack_end(void *addr) { /* Reliable end of stack detection: * Some APM bios versions misalign the stack */ return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*))); } #endif #endif /* _LINUX_SCHED_TASK_STACK_H */ mic_bus.h 0000644 00000005320 14722070374 0006344 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Intel MIC Platform Software Stack (MPSS) * * Copyright(c) 2014 Intel Corporation. * * Intel MIC Bus driver. * * This implementation is very similar to the the virtio bus driver * implementation @ include/linux/virtio.h. */ #ifndef _MIC_BUS_H_ #define _MIC_BUS_H_ /* * Everything a mbus driver needs to work with any particular mbus * implementation. */ #include <linux/interrupt.h> #include <linux/dma-mapping.h> struct mbus_device_id { __u32 device; __u32 vendor; }; #define MBUS_DEV_DMA_HOST 2 #define MBUS_DEV_DMA_MIC 3 #define MBUS_DEV_ANY_ID 0xffffffff /** * mbus_device - representation of a device using mbus * @mmio_va: virtual address of mmio space * @hw_ops: the hardware ops supported by this device. * @id: the device type identification (used to match it with a driver). * @dev: underlying device. * be used to communicate with. * @index: unique position on the mbus bus */ struct mbus_device { void __iomem *mmio_va; struct mbus_hw_ops *hw_ops; struct mbus_device_id id; struct device dev; int index; }; /** * mbus_driver - operations for a mbus I/O driver * @driver: underlying device driver (populate name and owner). * @id_table: the ids serviced by this driver. * @probe: the function to call when a device is found. Returns 0 or -errno. * @remove: the function to call when a device is removed. */ struct mbus_driver { struct device_driver driver; const struct mbus_device_id *id_table; int (*probe)(struct mbus_device *dev); void (*scan)(struct mbus_device *dev); void (*remove)(struct mbus_device *dev); }; /** * struct mic_irq - opaque pointer used as cookie */ struct mic_irq; /** * mbus_hw_ops - Hardware operations for accessing a MIC device on the MIC bus. */ struct mbus_hw_ops { struct mic_irq* (*request_threaded_irq)(struct mbus_device *mbdev, irq_handler_t handler, irq_handler_t thread_fn, const char *name, void *data, int intr_src); void (*free_irq)(struct mbus_device *mbdev, struct mic_irq *cookie, void *data); void (*ack_interrupt)(struct mbus_device *mbdev, int num); }; struct mbus_device * mbus_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_ops, struct mbus_hw_ops *hw_ops, int index, void __iomem *mmio_va); void mbus_unregister_device(struct mbus_device *mbdev); int mbus_register_driver(struct mbus_driver *drv); void mbus_unregister_driver(struct mbus_driver *drv); static inline struct mbus_device *dev_to_mbus(struct device *_dev) { return container_of(_dev, struct mbus_device, dev); } static inline struct mbus_driver *drv_to_mbus(struct device_driver *drv) { return container_of(drv, struct mbus_driver, driver); } #endif /* _MIC_BUS_H */ yam.h 0000644 00000004266 14722070374 0005521 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /*****************************************************************************/ /* * yam.h -- YAM radio modem driver. * * Copyright (C) 1998 Frederic Rible F1OAT (frible@teaser.fr) * Adapted from baycom.c driver written by Thomas Sailer (sailer@ife.ee.ethz.ch) * * Please note that the GPL allows you to use the driver, NOT the radio. * In order to use the radio, you need a license from the communications * authority of your country. */ /*****************************************************************************/ #define SIOCYAMRESERVED (0) #define SIOCYAMSCFG (1) /* Set configuration */ #define SIOCYAMGCFG (2) /* Get configuration */ #define SIOCYAMSMCS (3) /* Set mcs data */ #define YAM_IOBASE (1 << 0) #define YAM_IRQ (1 << 1) #define YAM_BITRATE (1 << 2) /* Bit rate of radio port ->57600 */ #define YAM_MODE (1 << 3) /* 0=simplex 1=duplex 2=duplex+tempo */ #define YAM_HOLDDLY (1 << 4) /* duplex tempo (sec) */ #define YAM_TXDELAY (1 << 5) /* Tx Delay (ms) */ #define YAM_TXTAIL (1 << 6) /* Tx Tail (ms) */ #define YAM_PERSIST (1 << 7) /* Persist (ms) */ #define YAM_SLOTTIME (1 << 8) /* Slottime (ms) */ #define YAM_BAUDRATE (1 << 9) /* Baud rate of rs232 port ->115200 */ #define YAM_MAXBITRATE 57600 #define YAM_MAXBAUDRATE 115200 #define YAM_MAXMODE 2 #define YAM_MAXHOLDDLY 99 #define YAM_MAXTXDELAY 999 #define YAM_MAXTXTAIL 999 #define YAM_MAXPERSIST 255 #define YAM_MAXSLOTTIME 999 #define YAM_FPGA_SIZE 5302 struct yamcfg { unsigned int mask; /* Mask of commands */ unsigned int iobase; /* IO Base of COM port */ unsigned int irq; /* IRQ of COM port */ unsigned int bitrate; /* Bit rate of radio port */ unsigned int baudrate; /* Baud rate of the RS232 port */ unsigned int txdelay; /* TxDelay */ unsigned int txtail; /* TxTail */ unsigned int persist; /* Persistence */ unsigned int slottime; /* Slottime */ unsigned int mode; /* mode 0 (simp), 1(Dupl), 2(Dupl+delay) */ unsigned int holddly; /* PTT delay in FullDuplex 2 mode */ }; struct yamdrv_ioctl_cfg { int cmd; struct yamcfg cfg; }; struct yamdrv_ioctl_mcs { int cmd; unsigned int bitrate; unsigned char bits[YAM_FPGA_SIZE]; }; net.h 0000644 00000027401 14722070374 0005515 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * NET An implementation of the SOCKET network access protocol. * This is the master header file for the Linux NET layer, * or, in plain English: the networking handling part of the * kernel. * * Version: @(#)net.h 1.0.3 05/25/93 * * Authors: Orest Zborowski, <obz@Kodak.COM> * Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> */ #ifndef _LINUX_NET_H #define _LINUX_NET_H #include <linux/stringify.h> #include <linux/random.h> #include <linux/wait.h> #include <linux/fcntl.h> /* For O_CLOEXEC and O_NONBLOCK */ #include <linux/rcupdate.h> #include <linux/once.h> #include <linux/fs.h> #include <linux/mm.h> #include <uapi/linux/net.h> struct poll_table_struct; struct pipe_inode_info; struct inode; struct file; struct net; /* Historically, SOCKWQ_ASYNC_NOSPACE & SOCKWQ_ASYNC_WAITDATA were located * in sock->flags, but moved into sk->sk_wq->flags to be RCU protected. * Eventually all flags will be in sk->sk_wq->flags. */ #define SOCKWQ_ASYNC_NOSPACE 0 #define SOCKWQ_ASYNC_WAITDATA 1 #define SOCK_NOSPACE 2 #define SOCK_PASSCRED 3 #define SOCK_PASSSEC 4 #ifndef ARCH_HAS_SOCKET_TYPES /** * enum sock_type - Socket types * @SOCK_STREAM: stream (connection) socket * @SOCK_DGRAM: datagram (conn.less) socket * @SOCK_RAW: raw socket * @SOCK_RDM: reliably-delivered message * @SOCK_SEQPACKET: sequential packet socket * @SOCK_DCCP: Datagram Congestion Control Protocol socket * @SOCK_PACKET: linux specific way of getting packets at the dev level. * For writing rarp and other similar things on the user level. * * When adding some new socket type please * grep ARCH_HAS_SOCKET_TYPE include/asm-* /socket.h, at least MIPS * overrides this enum for binary compat reasons. */ enum sock_type { SOCK_STREAM = 1, SOCK_DGRAM = 2, SOCK_RAW = 3, SOCK_RDM = 4, SOCK_SEQPACKET = 5, SOCK_DCCP = 6, SOCK_PACKET = 10, }; #define SOCK_MAX (SOCK_PACKET + 1) /* Mask which covers at least up to SOCK_MASK-1. The * remaining bits are used as flags. */ #define SOCK_TYPE_MASK 0xf /* Flags for socket, socketpair, accept4 */ #define SOCK_CLOEXEC O_CLOEXEC #ifndef SOCK_NONBLOCK #define SOCK_NONBLOCK O_NONBLOCK #endif #endif /* ARCH_HAS_SOCKET_TYPES */ /** * enum sock_shutdown_cmd - Shutdown types * @SHUT_RD: shutdown receptions * @SHUT_WR: shutdown transmissions * @SHUT_RDWR: shutdown receptions/transmissions */ enum sock_shutdown_cmd { SHUT_RD, SHUT_WR, SHUT_RDWR, }; struct socket_wq { /* Note: wait MUST be first field of socket_wq */ wait_queue_head_t wait; struct fasync_struct *fasync_list; unsigned long flags; /* %SOCKWQ_ASYNC_NOSPACE, etc */ struct rcu_head rcu; } ____cacheline_aligned_in_smp; /** * struct socket - general BSD socket * @state: socket state (%SS_CONNECTED, etc) * @type: socket type (%SOCK_STREAM, etc) * @flags: socket flags (%SOCK_NOSPACE, etc) * @ops: protocol specific socket operations * @file: File back pointer for gc * @sk: internal networking protocol agnostic socket representation * @wq: wait queue for several uses */ struct socket { socket_state state; short type; unsigned long flags; struct file *file; struct sock *sk; const struct proto_ops *ops; struct socket_wq wq; }; struct vm_area_struct; struct page; struct sockaddr; struct msghdr; struct module; struct sk_buff; typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, unsigned int, size_t); struct proto_ops { int family; struct module *owner; int (*release) (struct socket *sock); int (*bind) (struct socket *sock, struct sockaddr *myaddr, int sockaddr_len); int (*connect) (struct socket *sock, struct sockaddr *vaddr, int sockaddr_len, int flags); int (*socketpair)(struct socket *sock1, struct socket *sock2); int (*accept) (struct socket *sock, struct socket *newsock, int flags, bool kern); int (*getname) (struct socket *sock, struct sockaddr *addr, int peer); __poll_t (*poll) (struct file *file, struct socket *sock, struct poll_table_struct *wait); int (*ioctl) (struct socket *sock, unsigned int cmd, unsigned long arg); #ifdef CONFIG_COMPAT int (*compat_ioctl) (struct socket *sock, unsigned int cmd, unsigned long arg); #endif int (*gettstamp) (struct socket *sock, void __user *userstamp, bool timeval, bool time32); int (*listen) (struct socket *sock, int len); int (*shutdown) (struct socket *sock, int flags); int (*setsockopt)(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen); int (*getsockopt)(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen); #ifdef CONFIG_COMPAT int (*compat_setsockopt)(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen); int (*compat_getsockopt)(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen); #endif int (*sendmsg) (struct socket *sock, struct msghdr *m, size_t total_len); /* Notes for implementing recvmsg: * =============================== * msg->msg_namelen should get updated by the recvmsg handlers * iff msg_name != NULL. It is by default 0 to prevent * returning uninitialized memory to user space. The recvfrom * handlers can assume that msg.msg_name is either NULL or has * a minimum size of sizeof(struct sockaddr_storage). */ int (*recvmsg) (struct socket *sock, struct msghdr *m, size_t total_len, int flags); int (*mmap) (struct file *file, struct socket *sock, struct vm_area_struct * vma); ssize_t (*sendpage) (struct socket *sock, struct page *page, int offset, size_t size, int flags); ssize_t (*splice_read)(struct socket *sock, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); int (*set_peek_off)(struct sock *sk, int val); int (*peek_len)(struct socket *sock); /* The following functions are called internally by kernel with * sock lock already held. */ int (*read_sock)(struct sock *sk, read_descriptor_t *desc, sk_read_actor_t recv_actor); int (*sendpage_locked)(struct sock *sk, struct page *page, int offset, size_t size, int flags); int (*sendmsg_locked)(struct sock *sk, struct msghdr *msg, size_t size); int (*set_rcvlowat)(struct sock *sk, int val); }; #define DECLARE_SOCKADDR(type, dst, src) \ type dst = ({ __sockaddr_check_size(sizeof(*dst)); (type) src; }) struct net_proto_family { int family; int (*create)(struct net *net, struct socket *sock, int protocol, int kern); struct module *owner; }; struct iovec; struct kvec; enum { SOCK_WAKE_IO, SOCK_WAKE_WAITD, SOCK_WAKE_SPACE, SOCK_WAKE_URG, }; int sock_wake_async(struct socket_wq *sk_wq, int how, int band); int sock_register(const struct net_proto_family *fam); void sock_unregister(int family); bool sock_is_registered(int family); int __sock_create(struct net *net, int family, int type, int proto, struct socket **res, int kern); int sock_create(int family, int type, int proto, struct socket **res); int sock_create_kern(struct net *net, int family, int type, int proto, struct socket **res); int sock_create_lite(int family, int type, int proto, struct socket **res); struct socket *sock_alloc(void); void sock_release(struct socket *sock); int sock_sendmsg(struct socket *sock, struct msghdr *msg); int sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags); struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname); struct socket *sockfd_lookup(int fd, int *err); struct socket *sock_from_file(struct file *file, int *err); #define sockfd_put(sock) fput(sock->file) int net_ratelimit(void); #define net_ratelimited_function(function, ...) \ do { \ if (net_ratelimit()) \ function(__VA_ARGS__); \ } while (0) #define net_emerg_ratelimited(fmt, ...) \ net_ratelimited_function(pr_emerg, fmt, ##__VA_ARGS__) #define net_alert_ratelimited(fmt, ...) \ net_ratelimited_function(pr_alert, fmt, ##__VA_ARGS__) #define net_crit_ratelimited(fmt, ...) \ net_ratelimited_function(pr_crit, fmt, ##__VA_ARGS__) #define net_err_ratelimited(fmt, ...) \ net_ratelimited_function(pr_err, fmt, ##__VA_ARGS__) #define net_notice_ratelimited(fmt, ...) \ net_ratelimited_function(pr_notice, fmt, ##__VA_ARGS__) #define net_warn_ratelimited(fmt, ...) \ net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__) #define net_info_ratelimited(fmt, ...) \ net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__) #if defined(CONFIG_DYNAMIC_DEBUG) #define net_dbg_ratelimited(fmt, ...) \ do { \ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ if (DYNAMIC_DEBUG_BRANCH(descriptor) && \ net_ratelimit()) \ __dynamic_pr_debug(&descriptor, pr_fmt(fmt), \ ##__VA_ARGS__); \ } while (0) #elif defined(DEBUG) #define net_dbg_ratelimited(fmt, ...) \ net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__) #else #define net_dbg_ratelimited(fmt, ...) \ do { \ if (0) \ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \ } while (0) #endif #define net_get_random_once(buf, nbytes) \ get_random_once((buf), (nbytes)) #define net_get_random_once_wait(buf, nbytes) \ get_random_once_wait((buf), (nbytes)) /* * E.g. XFS meta- & log-data is in slab pages, or bcache meta * data pages, or other high order pages allocated by * __get_free_pages() without __GFP_COMP, which have a page_count * of 0 and/or have PageSlab() set. We cannot use send_page for * those, as that does get_page(); put_page(); and would cause * either a VM_BUG directly, or __page_cache_release a page that * would actually still be referenced by someone, leading to some * obscure delayed Oops somewhere else. */ static inline bool sendpage_ok(struct page *page) { return !PageSlab(page) && page_count(page) >= 1; } int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t num, size_t len); int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg, struct kvec *vec, size_t num, size_t len); int kernel_recvmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t num, size_t len, int flags); int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen); int kernel_listen(struct socket *sock, int backlog); int kernel_accept(struct socket *sock, struct socket **newsock, int flags); int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen, int flags); int kernel_getsockname(struct socket *sock, struct sockaddr *addr); int kernel_getpeername(struct socket *sock, struct sockaddr *addr); int kernel_getsockopt(struct socket *sock, int level, int optname, char *optval, int *optlen); int kernel_setsockopt(struct socket *sock, int level, int optname, char *optval, unsigned int optlen); int kernel_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags); int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset, size_t size, int flags); int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how); /* Routine returns the IP overhead imposed by a (caller-protected) socket. */ u32 kernel_sock_ip_overhead(struct sock *sk); #define MODULE_ALIAS_NETPROTO(proto) \ MODULE_ALIAS("net-pf-" __stringify(proto)) #define MODULE_ALIAS_NET_PF_PROTO(pf, proto) \ MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto)) #define MODULE_ALIAS_NET_PF_PROTO_TYPE(pf, proto, type) \ MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto) \ "-type-" __stringify(type)) #define MODULE_ALIAS_NET_PF_PROTO_NAME(pf, proto, name) \ MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto) \ name) #endif /* _LINUX_NET_H */ memblock.h 0000644 00000044723 14722070374 0006526 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef _LINUX_MEMBLOCK_H #define _LINUX_MEMBLOCK_H #ifdef __KERNEL__ /* * Logical memory blocks. * * Copyright (C) 2001 Peter Bergner, IBM Corp. */ #include <linux/init.h> #include <linux/mm.h> #include <asm/dma.h> extern unsigned long max_low_pfn; extern unsigned long min_low_pfn; /* * highest page */ extern unsigned long max_pfn; /* * highest possible page */ extern unsigned long long max_possible_pfn; /** * enum memblock_flags - definition of memory region attributes * @MEMBLOCK_NONE: no special request * @MEMBLOCK_HOTPLUG: hotpluggable region * @MEMBLOCK_MIRROR: mirrored region * @MEMBLOCK_NOMAP: don't add to kernel direct mapping */ enum memblock_flags { MEMBLOCK_NONE = 0x0, /* No special request */ MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */ MEMBLOCK_MIRROR = 0x2, /* mirrored region */ MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */ }; /** * struct memblock_region - represents a memory region * @base: physical address of the region * @size: size of the region * @flags: memory region attributes * @nid: NUMA node id */ struct memblock_region { phys_addr_t base; phys_addr_t size; enum memblock_flags flags; #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP int nid; #endif }; /** * struct memblock_type - collection of memory regions of certain type * @cnt: number of regions * @max: size of the allocated array * @total_size: size of all regions * @regions: array of regions * @name: the memory type symbolic name */ struct memblock_type { unsigned long cnt; unsigned long max; phys_addr_t total_size; struct memblock_region *regions; char *name; }; /** * struct memblock - memblock allocator metadata * @bottom_up: is bottom up direction? * @current_limit: physical address of the current allocation limit * @memory: usabe memory regions * @reserved: reserved memory regions * @physmem: all physical memory */ struct memblock { bool bottom_up; /* is bottom up direction? */ phys_addr_t current_limit; struct memblock_type memory; struct memblock_type reserved; #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP struct memblock_type physmem; #endif }; extern struct memblock memblock; extern int memblock_debug; #ifndef CONFIG_ARCH_KEEP_MEMBLOCK #define __init_memblock __meminit #define __initdata_memblock __meminitdata void memblock_discard(void); #else #define __init_memblock #define __initdata_memblock static inline void memblock_discard(void) {} #endif #define memblock_dbg(fmt, ...) \ if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, phys_addr_t size, phys_addr_t align); void memblock_allow_resize(void); int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid); int memblock_add(phys_addr_t base, phys_addr_t size); int memblock_remove(phys_addr_t base, phys_addr_t size); int memblock_free(phys_addr_t base, phys_addr_t size); int memblock_reserve(phys_addr_t base, phys_addr_t size); void memblock_trim_memory(phys_addr_t align); bool memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size); int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size); int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size); int memblock_mark_mirror(phys_addr_t base, phys_addr_t size); int memblock_mark_nomap(phys_addr_t base, phys_addr_t size); int memblock_clear_nomap(phys_addr_t base, phys_addr_t size); unsigned long memblock_free_all(void); void reset_node_managed_pages(pg_data_t *pgdat); void reset_all_zones_managed_pages(void); /* Low level functions */ int memblock_add_range(struct memblock_type *type, phys_addr_t base, phys_addr_t size, int nid, enum memblock_flags flags); void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags, struct memblock_type *type_a, struct memblock_type *type_b, phys_addr_t *out_start, phys_addr_t *out_end, int *out_nid); void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags, struct memblock_type *type_a, struct memblock_type *type_b, phys_addr_t *out_start, phys_addr_t *out_end, int *out_nid); void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start, phys_addr_t *out_end); void __memblock_free_late(phys_addr_t base, phys_addr_t size); /** * for_each_mem_range - iterate through memblock areas from type_a and not * included in type_b. Or just type_a if type_b is NULL. * @i: u64 used as loop variable * @type_a: ptr to memblock_type to iterate * @type_b: ptr to memblock_type which excludes from the iteration * @nid: node selector, %NUMA_NO_NODE for all nodes * @flags: pick from blocks based on memory attributes * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL * @p_nid: ptr to int for nid of the range, can be %NULL */ #define for_each_mem_range(i, type_a, type_b, nid, flags, \ p_start, p_end, p_nid) \ for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \ p_start, p_end, p_nid); \ i != (u64)ULLONG_MAX; \ __next_mem_range(&i, nid, flags, type_a, type_b, \ p_start, p_end, p_nid)) /** * for_each_mem_range_rev - reverse iterate through memblock areas from * type_a and not included in type_b. Or just type_a if type_b is NULL. * @i: u64 used as loop variable * @type_a: ptr to memblock_type to iterate * @type_b: ptr to memblock_type which excludes from the iteration * @nid: node selector, %NUMA_NO_NODE for all nodes * @flags: pick from blocks based on memory attributes * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL * @p_nid: ptr to int for nid of the range, can be %NULL */ #define for_each_mem_range_rev(i, type_a, type_b, nid, flags, \ p_start, p_end, p_nid) \ for (i = (u64)ULLONG_MAX, \ __next_mem_range_rev(&i, nid, flags, type_a, type_b,\ p_start, p_end, p_nid); \ i != (u64)ULLONG_MAX; \ __next_mem_range_rev(&i, nid, flags, type_a, type_b, \ p_start, p_end, p_nid)) /** * for_each_reserved_mem_region - iterate over all reserved memblock areas * @i: u64 used as loop variable * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL * * Walks over reserved areas of memblock. Available as soon as memblock * is initialized. */ #define for_each_reserved_mem_region(i, p_start, p_end) \ for (i = 0UL, __next_reserved_mem_region(&i, p_start, p_end); \ i != (u64)ULLONG_MAX; \ __next_reserved_mem_region(&i, p_start, p_end)) static inline bool memblock_is_hotpluggable(struct memblock_region *m) { return m->flags & MEMBLOCK_HOTPLUG; } static inline bool memblock_is_mirror(struct memblock_region *m) { return m->flags & MEMBLOCK_MIRROR; } static inline bool memblock_is_nomap(struct memblock_region *m) { return m->flags & MEMBLOCK_NOMAP; } #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn, unsigned long *end_pfn); void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, unsigned long *out_end_pfn, int *out_nid); /** * for_each_mem_pfn_range - early memory pfn range iterator * @i: an integer used as loop variable * @nid: node selector, %MAX_NUMNODES for all nodes * @p_start: ptr to ulong for start pfn of the range, can be %NULL * @p_end: ptr to ulong for end pfn of the range, can be %NULL * @p_nid: ptr to int for nid of the range, can be %NULL * * Walks over configured memory ranges. */ #define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \ for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \ i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid)) #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone, unsigned long *out_spfn, unsigned long *out_epfn); /** * for_each_free_mem_range_in_zone - iterate through zone specific free * memblock areas * @i: u64 used as loop variable * @zone: zone in which all of the memory blocks reside * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL * * Walks over free (memory && !reserved) areas of memblock in a specific * zone. Available once memblock and an empty zone is initialized. The main * assumption is that the zone start, end, and pgdat have been associated. * This way we can use the zone to determine NUMA node, and if a given part * of the memblock is valid for the zone. */ #define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end) \ for (i = 0, \ __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end); \ i != U64_MAX; \ __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end)) /** * for_each_free_mem_range_in_zone_from - iterate through zone specific * free memblock areas from a given point * @i: u64 used as loop variable * @zone: zone in which all of the memory blocks reside * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL * * Walks over free (memory && !reserved) areas of memblock in a specific * zone, continuing from current position. Available as soon as memblock is * initialized. */ #define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \ for (; i != U64_MAX; \ __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end)) #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ /** * for_each_free_mem_range - iterate through free memblock areas * @i: u64 used as loop variable * @nid: node selector, %NUMA_NO_NODE for all nodes * @flags: pick from blocks based on memory attributes * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL * @p_nid: ptr to int for nid of the range, can be %NULL * * Walks over free (memory && !reserved) areas of memblock. Available as * soon as memblock is initialized. */ #define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \ for_each_mem_range(i, &memblock.memory, &memblock.reserved, \ nid, flags, p_start, p_end, p_nid) /** * for_each_free_mem_range_reverse - rev-iterate through free memblock areas * @i: u64 used as loop variable * @nid: node selector, %NUMA_NO_NODE for all nodes * @flags: pick from blocks based on memory attributes * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL * @p_nid: ptr to int for nid of the range, can be %NULL * * Walks over free (memory && !reserved) areas of memblock in reverse * order. Available as soon as memblock is initialized. */ #define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \ p_nid) \ for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \ nid, flags, p_start, p_end, p_nid) #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP int memblock_set_node(phys_addr_t base, phys_addr_t size, struct memblock_type *type, int nid); static inline void memblock_set_region_node(struct memblock_region *r, int nid) { r->nid = nid; } static inline int memblock_get_region_node(const struct memblock_region *r) { return r->nid; } #else static inline void memblock_set_region_node(struct memblock_region *r, int nid) { } static inline int memblock_get_region_node(const struct memblock_region *r) { return 0; } #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ /* Flags for memblock allocation APIs */ #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) #define MEMBLOCK_ALLOC_ACCESSIBLE 0 #define MEMBLOCK_ALLOC_KASAN 1 /* We are using top down, so it is safe to use 0 here */ #define MEMBLOCK_LOW_LIMIT 0 #ifndef ARCH_LOW_ADDRESS_LIMIT #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL #endif phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align, phys_addr_t start, phys_addr_t end); phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid); static __always_inline phys_addr_t memblock_phys_alloc(phys_addr_t size, phys_addr_t align) { return memblock_phys_alloc_range(size, align, 0, MEMBLOCK_ALLOC_ACCESSIBLE); } void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr, int nid); void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr, int nid); static inline void * __init memblock_alloc(phys_addr_t size, phys_addr_t align) { return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); } static inline void * __init memblock_alloc_raw(phys_addr_t size, phys_addr_t align) { return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT, MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); } static inline void * __init memblock_alloc_from(phys_addr_t size, phys_addr_t align, phys_addr_t min_addr) { return memblock_alloc_try_nid(size, align, min_addr, MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); } static inline void * __init memblock_alloc_low(phys_addr_t size, phys_addr_t align) { return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE); } static inline void * __init memblock_alloc_node(phys_addr_t size, phys_addr_t align, int nid) { return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, MEMBLOCK_ALLOC_ACCESSIBLE, nid); } static inline void __init memblock_free_early(phys_addr_t base, phys_addr_t size) { memblock_free(base, size); } static inline void __init memblock_free_early_nid(phys_addr_t base, phys_addr_t size, int nid) { memblock_free(base, size); } static inline void __init memblock_free_late(phys_addr_t base, phys_addr_t size) { __memblock_free_late(base, size); } /* * Set the allocation direction to bottom-up or top-down. */ static inline void __init memblock_set_bottom_up(bool enable) { memblock.bottom_up = enable; } /* * Check if the allocation direction is bottom-up or not. * if this is true, that said, memblock will allocate memory * in bottom-up direction. */ static inline bool memblock_bottom_up(void) { return memblock.bottom_up; } phys_addr_t memblock_phys_mem_size(void); phys_addr_t memblock_reserved_size(void); phys_addr_t memblock_mem_size(unsigned long limit_pfn); phys_addr_t memblock_start_of_DRAM(void); phys_addr_t memblock_end_of_DRAM(void); void memblock_enforce_memory_limit(phys_addr_t memory_limit); void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size); void memblock_mem_limit_remove_map(phys_addr_t limit); bool memblock_is_memory(phys_addr_t addr); bool memblock_is_map_memory(phys_addr_t addr); bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size); bool memblock_is_reserved(phys_addr_t addr); bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); extern void __memblock_dump_all(void); static inline void memblock_dump_all(void) { if (memblock_debug) __memblock_dump_all(); } /** * memblock_set_current_limit - Set the current allocation limit to allow * limiting allocations to what is currently * accessible during boot * @limit: New limit value (physical address) */ void memblock_set_current_limit(phys_addr_t limit); phys_addr_t memblock_get_current_limit(void); /* * pfn conversion functions * * While the memory MEMBLOCKs should always be page aligned, the reserved * MEMBLOCKs may not be. This accessor attempt to provide a very clear * idea of what they return for such non aligned MEMBLOCKs. */ /** * memblock_region_memory_base_pfn - get the lowest pfn of the memory region * @reg: memblock_region structure * * Return: the lowest pfn intersecting with the memory region */ static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg) { return PFN_UP(reg->base); } /** * memblock_region_memory_end_pfn - get the end pfn of the memory region * @reg: memblock_region structure * * Return: the end_pfn of the reserved region */ static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg) { return PFN_DOWN(reg->base + reg->size); } /** * memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region * @reg: memblock_region structure * * Return: the lowest pfn intersecting with the reserved region */ static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg) { return PFN_DOWN(reg->base); } /** * memblock_region_reserved_end_pfn - get the end pfn of the reserved region * @reg: memblock_region structure * * Return: the end_pfn of the reserved region */ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg) { return PFN_UP(reg->base + reg->size); } #define for_each_memblock(memblock_type, region) \ for (region = memblock.memblock_type.regions; \ region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \ region++) #define for_each_memblock_type(i, memblock_type, rgn) \ for (i = 0, rgn = &memblock_type->regions[0]; \ i < memblock_type->cnt; \ i++, rgn = &memblock_type->regions[i]) extern void *alloc_large_system_hash(const char *tablename, unsigned long bucketsize, unsigned long numentries, int scale, int flags, unsigned int *_hash_shift, unsigned int *_hash_mask, unsigned long low_limit, unsigned long high_limit); #define HASH_EARLY 0x00000001 /* Allocating during early boot? */ #define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min * shift passed via *_hash_shift */ #define HASH_ZERO 0x00000004 /* Zero allocated hash table */ /* Only NUMA needs hash distribution. 64bit NUMA architectures have * sufficient vmalloc space. */ #ifdef CONFIG_NUMA #define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT) extern int hashdist; /* Distribute hashes across NUMA nodes? */ #else #define hashdist (0) #endif #ifdef CONFIG_MEMTEST extern void early_memtest(phys_addr_t start, phys_addr_t end); #else static inline void early_memtest(phys_addr_t start, phys_addr_t end) { } #endif #endif /* __KERNEL__ */ #endif /* _LINUX_MEMBLOCK_H */ amd-iommu.h 0000644 00000014713 14722070374 0006616 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. * Author: Joerg Roedel <joerg.roedel@amd.com> * Leo Duran <leo.duran@amd.com> */ #ifndef _ASM_X86_AMD_IOMMU_H #define _ASM_X86_AMD_IOMMU_H #include <linux/types.h> /* * This is mainly used to communicate information back-and-forth * between SVM and IOMMU for setting up and tearing down posted * interrupt */ struct amd_iommu_pi_data { u32 ga_tag; u32 prev_ga_tag; u64 base; bool is_guest_mode; struct vcpu_data *vcpu_data; void *ir_data; }; #ifdef CONFIG_AMD_IOMMU struct task_struct; struct pci_dev; extern int amd_iommu_detect(void); extern int amd_iommu_init_hardware(void); /** * amd_iommu_enable_device_erratum() - Enable erratum workaround for device * in the IOMMUv2 driver * @pdev: The PCI device the workaround is necessary for * @erratum: The erratum workaround to enable * * The function needs to be called before amd_iommu_init_device(). * Possible values for the erratum number are for now: * - AMD_PRI_DEV_ERRATUM_ENABLE_RESET - Reset PRI capability when PRI * is enabled * - AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE - Limit number of outstanding PRI * requests to one */ #define AMD_PRI_DEV_ERRATUM_ENABLE_RESET 0 #define AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE 1 extern void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum); /** * amd_iommu_init_device() - Init device for use with IOMMUv2 driver * @pdev: The PCI device to initialize * @pasids: Number of PASIDs to support for this device * * This function does all setup for the device pdev so that it can be * used with IOMMUv2. * Returns 0 on success or negative value on error. */ extern int amd_iommu_init_device(struct pci_dev *pdev, int pasids); /** * amd_iommu_free_device() - Free all IOMMUv2 related device resources * and disable IOMMUv2 usage for this device * @pdev: The PCI device to disable IOMMUv2 usage for' */ extern void amd_iommu_free_device(struct pci_dev *pdev); /** * amd_iommu_bind_pasid() - Bind a given task to a PASID on a device * @pdev: The PCI device to bind the task to * @pasid: The PASID on the device the task should be bound to * @task: the task to bind * * The function returns 0 on success or a negative value on error. */ extern int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid, struct task_struct *task); /** * amd_iommu_unbind_pasid() - Unbind a PASID from its task on * a device * @pdev: The device of the PASID * @pasid: The PASID to unbind * * When this function returns the device is no longer using the PASID * and the PASID is no longer bound to its task. */ extern void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid); /** * amd_iommu_set_invalid_ppr_cb() - Register a call-back for failed * PRI requests * @pdev: The PCI device the call-back should be registered for * @cb: The call-back function * * The IOMMUv2 driver invokes this call-back when it is unable to * successfully handle a PRI request. The device driver can then decide * which PRI response the device should see. Possible return values for * the call-back are: * * - AMD_IOMMU_INV_PRI_RSP_SUCCESS - Send SUCCESS back to the device * - AMD_IOMMU_INV_PRI_RSP_INVALID - Send INVALID back to the device * - AMD_IOMMU_INV_PRI_RSP_FAIL - Send Failure back to the device, * the device is required to disable * PRI when it receives this response * * The function returns 0 on success or negative value on error. */ #define AMD_IOMMU_INV_PRI_RSP_SUCCESS 0 #define AMD_IOMMU_INV_PRI_RSP_INVALID 1 #define AMD_IOMMU_INV_PRI_RSP_FAIL 2 typedef int (*amd_iommu_invalid_ppr_cb)(struct pci_dev *pdev, int pasid, unsigned long address, u16); extern int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev, amd_iommu_invalid_ppr_cb cb); #define PPR_FAULT_EXEC (1 << 1) #define PPR_FAULT_READ (1 << 2) #define PPR_FAULT_WRITE (1 << 5) #define PPR_FAULT_USER (1 << 6) #define PPR_FAULT_RSVD (1 << 7) #define PPR_FAULT_GN (1 << 8) /** * amd_iommu_device_info() - Get information about IOMMUv2 support of a * PCI device * @pdev: PCI device to query information from * @info: A pointer to an amd_iommu_device_info structure which will contain * the information about the PCI device * * Returns 0 on success, negative value on error */ #define AMD_IOMMU_DEVICE_FLAG_ATS_SUP 0x1 /* ATS feature supported */ #define AMD_IOMMU_DEVICE_FLAG_PRI_SUP 0x2 /* PRI feature supported */ #define AMD_IOMMU_DEVICE_FLAG_PASID_SUP 0x4 /* PASID context supported */ #define AMD_IOMMU_DEVICE_FLAG_EXEC_SUP 0x8 /* Device may request execution on memory pages */ #define AMD_IOMMU_DEVICE_FLAG_PRIV_SUP 0x10 /* Device may request super-user privileges */ struct amd_iommu_device_info { int max_pasids; u32 flags; }; extern int amd_iommu_device_info(struct pci_dev *pdev, struct amd_iommu_device_info *info); /** * amd_iommu_set_invalidate_ctx_cb() - Register a call-back for invalidating * a pasid context. This call-back is * invoked when the IOMMUv2 driver needs to * invalidate a PASID context, for example * because the task that is bound to that * context is about to exit. * * @pdev: The PCI device the call-back should be registered for * @cb: The call-back function */ typedef void (*amd_iommu_invalidate_ctx)(struct pci_dev *pdev, int pasid); extern int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev, amd_iommu_invalidate_ctx cb); #else /* CONFIG_AMD_IOMMU */ static inline int amd_iommu_detect(void) { return -ENODEV; } #endif /* CONFIG_AMD_IOMMU */ #if defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) /* IOMMU AVIC Function */ extern int amd_iommu_register_ga_log_notifier(int (*notifier)(u32)); extern int amd_iommu_update_ga(int cpu, bool is_run, void *data); extern int amd_iommu_activate_guest_mode(void *data); extern int amd_iommu_deactivate_guest_mode(void *data); #else /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */ static inline int amd_iommu_register_ga_log_notifier(int (*notifier)(u32)) { return 0; } static inline int amd_iommu_update_ga(int cpu, bool is_run, void *data) { return 0; } static inline int amd_iommu_activate_guest_mode(void *data) { return 0; } static inline int amd_iommu_deactivate_guest_mode(void *data) { return 0; } #endif /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */ #endif /* _ASM_X86_AMD_IOMMU_H */ edac.h 0000644 00000050522 14722070374 0005623 0 ustar 00 /* * Generic EDAC defs * * Author: Dave Jiang <djiang@mvista.com> * * 2006-2008 (c) MontaVista Software, Inc. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. * */ #ifndef _LINUX_EDAC_H_ #define _LINUX_EDAC_H_ #include <linux/atomic.h> #include <linux/device.h> #include <linux/completion.h> #include <linux/workqueue.h> #include <linux/debugfs.h> #include <linux/numa.h> #define EDAC_DEVICE_NAME_LEN 31 struct device; #define EDAC_OPSTATE_INVAL -1 #define EDAC_OPSTATE_POLL 0 #define EDAC_OPSTATE_NMI 1 #define EDAC_OPSTATE_INT 2 extern int edac_op_state; struct bus_type *edac_get_sysfs_subsys(void); int edac_get_report_status(void); void edac_set_report_status(int new); enum { EDAC_REPORTING_ENABLED, EDAC_REPORTING_DISABLED, EDAC_REPORTING_FORCE }; static inline void opstate_init(void) { switch (edac_op_state) { case EDAC_OPSTATE_POLL: case EDAC_OPSTATE_NMI: break; default: edac_op_state = EDAC_OPSTATE_POLL; } return; } /* Max length of a DIMM label*/ #define EDAC_MC_LABEL_LEN 31 /* Maximum size of the location string */ #define LOCATION_SIZE 256 /* Defines the maximum number of labels that can be reported */ #define EDAC_MAX_LABELS 8 /* String used to join two or more labels */ #define OTHER_LABEL " or " /** * enum dev_type - describe the type of memory DRAM chips used at the stick * @DEV_UNKNOWN: Can't be determined, or MC doesn't support detect it * @DEV_X1: 1 bit for data * @DEV_X2: 2 bits for data * @DEV_X4: 4 bits for data * @DEV_X8: 8 bits for data * @DEV_X16: 16 bits for data * @DEV_X32: 32 bits for data * @DEV_X64: 64 bits for data * * Typical values are x4 and x8. */ enum dev_type { DEV_UNKNOWN = 0, DEV_X1, DEV_X2, DEV_X4, DEV_X8, DEV_X16, DEV_X32, /* Do these parts exist? */ DEV_X64 /* Do these parts exist? */ }; #define DEV_FLAG_UNKNOWN BIT(DEV_UNKNOWN) #define DEV_FLAG_X1 BIT(DEV_X1) #define DEV_FLAG_X2 BIT(DEV_X2) #define DEV_FLAG_X4 BIT(DEV_X4) #define DEV_FLAG_X8 BIT(DEV_X8) #define DEV_FLAG_X16 BIT(DEV_X16) #define DEV_FLAG_X32 BIT(DEV_X32) #define DEV_FLAG_X64 BIT(DEV_X64) /** * enum hw_event_mc_err_type - type of the detected error * * @HW_EVENT_ERR_CORRECTED: Corrected Error - Indicates that an ECC * corrected error was detected * @HW_EVENT_ERR_UNCORRECTED: Uncorrected Error - Indicates an error that * can't be corrected by ECC, but it is not * fatal (maybe it is on an unused memory area, * or the memory controller could recover from * it for example, by re-trying the operation). * @HW_EVENT_ERR_DEFERRED: Deferred Error - Indicates an uncorrectable * error whose handling is not urgent. This could * be due to hardware data poisoning where the * system can continue operation until the poisoned * data is consumed. Preemptive measures may also * be taken, e.g. offlining pages, etc. * @HW_EVENT_ERR_FATAL: Fatal Error - Uncorrected error that could not * be recovered. * @HW_EVENT_ERR_INFO: Informational - The CPER spec defines a forth * type of error: informational logs. */ enum hw_event_mc_err_type { HW_EVENT_ERR_CORRECTED, HW_EVENT_ERR_UNCORRECTED, HW_EVENT_ERR_DEFERRED, HW_EVENT_ERR_FATAL, HW_EVENT_ERR_INFO, }; static inline char *mc_event_error_type(const unsigned int err_type) { switch (err_type) { case HW_EVENT_ERR_CORRECTED: return "Corrected"; case HW_EVENT_ERR_UNCORRECTED: return "Uncorrected"; case HW_EVENT_ERR_DEFERRED: return "Deferred"; case HW_EVENT_ERR_FATAL: return "Fatal"; default: case HW_EVENT_ERR_INFO: return "Info"; } } /** * enum mem_type - memory types. For a more detailed reference, please see * http://en.wikipedia.org/wiki/DRAM * * @MEM_EMPTY: Empty csrow * @MEM_RESERVED: Reserved csrow type * @MEM_UNKNOWN: Unknown csrow type * @MEM_FPM: FPM - Fast Page Mode, used on systems up to 1995. * @MEM_EDO: EDO - Extended data out, used on systems up to 1998. * @MEM_BEDO: BEDO - Burst Extended data out, an EDO variant. * @MEM_SDR: SDR - Single data rate SDRAM * http://en.wikipedia.org/wiki/Synchronous_dynamic_random-access_memory * They use 3 pins for chip select: Pins 0 and 2 are * for rank 0; pins 1 and 3 are for rank 1, if the memory * is dual-rank. * @MEM_RDR: Registered SDR SDRAM * @MEM_DDR: Double data rate SDRAM * http://en.wikipedia.org/wiki/DDR_SDRAM * @MEM_RDDR: Registered Double data rate SDRAM * This is a variant of the DDR memories. * A registered memory has a buffer inside it, hiding * part of the memory details to the memory controller. * @MEM_RMBS: Rambus DRAM, used on a few Pentium III/IV controllers. * @MEM_DDR2: DDR2 RAM, as described at JEDEC JESD79-2F. * Those memories are labeled as "PC2-" instead of "PC" to * differentiate from DDR. * @MEM_FB_DDR2: Fully-Buffered DDR2, as described at JEDEC Std No. 205 * and JESD206. * Those memories are accessed per DIMM slot, and not by * a chip select signal. * @MEM_RDDR2: Registered DDR2 RAM * This is a variant of the DDR2 memories. * @MEM_XDR: Rambus XDR * It is an evolution of the original RAMBUS memories, * created to compete with DDR2. Weren't used on any * x86 arch, but cell_edac PPC memory controller uses it. * @MEM_DDR3: DDR3 RAM * @MEM_RDDR3: Registered DDR3 RAM * This is a variant of the DDR3 memories. * @MEM_LRDDR3: Load-Reduced DDR3 memory. * @MEM_DDR4: Unbuffered DDR4 RAM * @MEM_RDDR4: Registered DDR4 RAM * This is a variant of the DDR4 memories. * @MEM_LRDDR4: Load-Reduced DDR4 memory. * @MEM_NVDIMM: Non-volatile RAM */ enum mem_type { MEM_EMPTY = 0, MEM_RESERVED, MEM_UNKNOWN, MEM_FPM, MEM_EDO, MEM_BEDO, MEM_SDR, MEM_RDR, MEM_DDR, MEM_RDDR, MEM_RMBS, MEM_DDR2, MEM_FB_DDR2, MEM_RDDR2, MEM_XDR, MEM_DDR3, MEM_RDDR3, MEM_LRDDR3, MEM_DDR4, MEM_RDDR4, MEM_LRDDR4, MEM_NVDIMM, }; #define MEM_FLAG_EMPTY BIT(MEM_EMPTY) #define MEM_FLAG_RESERVED BIT(MEM_RESERVED) #define MEM_FLAG_UNKNOWN BIT(MEM_UNKNOWN) #define MEM_FLAG_FPM BIT(MEM_FPM) #define MEM_FLAG_EDO BIT(MEM_EDO) #define MEM_FLAG_BEDO BIT(MEM_BEDO) #define MEM_FLAG_SDR BIT(MEM_SDR) #define MEM_FLAG_RDR BIT(MEM_RDR) #define MEM_FLAG_DDR BIT(MEM_DDR) #define MEM_FLAG_RDDR BIT(MEM_RDDR) #define MEM_FLAG_RMBS BIT(MEM_RMBS) #define MEM_FLAG_DDR2 BIT(MEM_DDR2) #define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2) #define MEM_FLAG_RDDR2 BIT(MEM_RDDR2) #define MEM_FLAG_XDR BIT(MEM_XDR) #define MEM_FLAG_DDR3 BIT(MEM_DDR3) #define MEM_FLAG_RDDR3 BIT(MEM_RDDR3) #define MEM_FLAG_DDR4 BIT(MEM_DDR4) #define MEM_FLAG_RDDR4 BIT(MEM_RDDR4) #define MEM_FLAG_LRDDR4 BIT(MEM_LRDDR4) #define MEM_FLAG_NVDIMM BIT(MEM_NVDIMM) /** * enum edac-type - Error Detection and Correction capabilities and mode * @EDAC_UNKNOWN: Unknown if ECC is available * @EDAC_NONE: Doesn't support ECC * @EDAC_RESERVED: Reserved ECC type * @EDAC_PARITY: Detects parity errors * @EDAC_EC: Error Checking - no correction * @EDAC_SECDED: Single bit error correction, Double detection * @EDAC_S2ECD2ED: Chipkill x2 devices - do these exist? * @EDAC_S4ECD4ED: Chipkill x4 devices * @EDAC_S8ECD8ED: Chipkill x8 devices * @EDAC_S16ECD16ED: Chipkill x16 devices */ enum edac_type { EDAC_UNKNOWN = 0, EDAC_NONE, EDAC_RESERVED, EDAC_PARITY, EDAC_EC, EDAC_SECDED, EDAC_S2ECD2ED, EDAC_S4ECD4ED, EDAC_S8ECD8ED, EDAC_S16ECD16ED, }; #define EDAC_FLAG_UNKNOWN BIT(EDAC_UNKNOWN) #define EDAC_FLAG_NONE BIT(EDAC_NONE) #define EDAC_FLAG_PARITY BIT(EDAC_PARITY) #define EDAC_FLAG_EC BIT(EDAC_EC) #define EDAC_FLAG_SECDED BIT(EDAC_SECDED) #define EDAC_FLAG_S2ECD2ED BIT(EDAC_S2ECD2ED) #define EDAC_FLAG_S4ECD4ED BIT(EDAC_S4ECD4ED) #define EDAC_FLAG_S8ECD8ED BIT(EDAC_S8ECD8ED) #define EDAC_FLAG_S16ECD16ED BIT(EDAC_S16ECD16ED) /** * enum scrub_type - scrubbing capabilities * @SCRUB_UNKNOWN: Unknown if scrubber is available * @SCRUB_NONE: No scrubber * @SCRUB_SW_PROG: SW progressive (sequential) scrubbing * @SCRUB_SW_SRC: Software scrub only errors * @SCRUB_SW_PROG_SRC: Progressive software scrub from an error * @SCRUB_SW_TUNABLE: Software scrub frequency is tunable * @SCRUB_HW_PROG: HW progressive (sequential) scrubbing * @SCRUB_HW_SRC: Hardware scrub only errors * @SCRUB_HW_PROG_SRC: Progressive hardware scrub from an error * @SCRUB_HW_TUNABLE: Hardware scrub frequency is tunable */ enum scrub_type { SCRUB_UNKNOWN = 0, SCRUB_NONE, SCRUB_SW_PROG, SCRUB_SW_SRC, SCRUB_SW_PROG_SRC, SCRUB_SW_TUNABLE, SCRUB_HW_PROG, SCRUB_HW_SRC, SCRUB_HW_PROG_SRC, SCRUB_HW_TUNABLE }; #define SCRUB_FLAG_SW_PROG BIT(SCRUB_SW_PROG) #define SCRUB_FLAG_SW_SRC BIT(SCRUB_SW_SRC) #define SCRUB_FLAG_SW_PROG_SRC BIT(SCRUB_SW_PROG_SRC) #define SCRUB_FLAG_SW_TUN BIT(SCRUB_SW_SCRUB_TUNABLE) #define SCRUB_FLAG_HW_PROG BIT(SCRUB_HW_PROG) #define SCRUB_FLAG_HW_SRC BIT(SCRUB_HW_SRC) #define SCRUB_FLAG_HW_PROG_SRC BIT(SCRUB_HW_PROG_SRC) #define SCRUB_FLAG_HW_TUN BIT(SCRUB_HW_TUNABLE) /* FIXME - should have notify capabilities: NMI, LOG, PROC, etc */ /* EDAC internal operation states */ #define OP_ALLOC 0x100 #define OP_RUNNING_POLL 0x201 #define OP_RUNNING_INTERRUPT 0x202 #define OP_RUNNING_POLL_INTR 0x203 #define OP_OFFLINE 0x300 /** * enum edac_mc_layer - memory controller hierarchy layer * * @EDAC_MC_LAYER_BRANCH: memory layer is named "branch" * @EDAC_MC_LAYER_CHANNEL: memory layer is named "channel" * @EDAC_MC_LAYER_SLOT: memory layer is named "slot" * @EDAC_MC_LAYER_CHIP_SELECT: memory layer is named "chip select" * @EDAC_MC_LAYER_ALL_MEM: memory layout is unknown. All memory is mapped * as a single memory area. This is used when * retrieving errors from a firmware driven driver. * * This enum is used by the drivers to tell edac_mc_sysfs what name should * be used when describing a memory stick location. */ enum edac_mc_layer_type { EDAC_MC_LAYER_BRANCH, EDAC_MC_LAYER_CHANNEL, EDAC_MC_LAYER_SLOT, EDAC_MC_LAYER_CHIP_SELECT, EDAC_MC_LAYER_ALL_MEM, }; /** * struct edac_mc_layer - describes the memory controller hierarchy * @type: layer type * @size: number of components per layer. For example, * if the channel layer has two channels, size = 2 * @is_virt_csrow: This layer is part of the "csrow" when old API * compatibility mode is enabled. Otherwise, it is * a channel */ struct edac_mc_layer { enum edac_mc_layer_type type; unsigned size; bool is_virt_csrow; }; /* * Maximum number of layers used by the memory controller to uniquely * identify a single memory stick. * NOTE: Changing this constant requires not only to change the constant * below, but also to change the existing code at the core, as there are * some code there that are optimized for 3 layers. */ #define EDAC_MAX_LAYERS 3 /** * EDAC_DIMM_OFF - Macro responsible to get a pointer offset inside a pointer * array for the element given by [layer0,layer1,layer2] * position * * @layers: a struct edac_mc_layer array, describing how many elements * were allocated for each layer * @nlayers: Number of layers at the @layers array * @layer0: layer0 position * @layer1: layer1 position. Unused if n_layers < 2 * @layer2: layer2 position. Unused if n_layers < 3 * * For 1 layer, this macro returns "var[layer0] - var"; * * For 2 layers, this macro is similar to allocate a bi-dimensional array * and to return "var[layer0][layer1] - var"; * * For 3 layers, this macro is similar to allocate a tri-dimensional array * and to return "var[layer0][layer1][layer2] - var". * * A loop could be used here to make it more generic, but, as we only have * 3 layers, this is a little faster. * * By design, layers can never be 0 or more than 3. If that ever happens, * a NULL is returned, causing an OOPS during the memory allocation routine, * with would point to the developer that he's doing something wrong. */ #define EDAC_DIMM_OFF(layers, nlayers, layer0, layer1, layer2) ({ \ int __i; \ if ((nlayers) == 1) \ __i = layer0; \ else if ((nlayers) == 2) \ __i = (layer1) + ((layers[1]).size * (layer0)); \ else if ((nlayers) == 3) \ __i = (layer2) + ((layers[2]).size * ((layer1) + \ ((layers[1]).size * (layer0)))); \ else \ __i = -EINVAL; \ __i; \ }) /** * EDAC_DIMM_PTR - Macro responsible to get a pointer inside a pointer array * for the element given by [layer0,layer1,layer2] position * * @layers: a struct edac_mc_layer array, describing how many elements * were allocated for each layer * @var: name of the var where we want to get the pointer * (like mci->dimms) * @nlayers: Number of layers at the @layers array * @layer0: layer0 position * @layer1: layer1 position. Unused if n_layers < 2 * @layer2: layer2 position. Unused if n_layers < 3 * * For 1 layer, this macro returns "var[layer0]"; * * For 2 layers, this macro is similar to allocate a bi-dimensional array * and to return "var[layer0][layer1]"; * * For 3 layers, this macro is similar to allocate a tri-dimensional array * and to return "var[layer0][layer1][layer2]"; */ #define EDAC_DIMM_PTR(layers, var, nlayers, layer0, layer1, layer2) ({ \ typeof(*var) __p; \ int ___i = EDAC_DIMM_OFF(layers, nlayers, layer0, layer1, layer2); \ if (___i < 0) \ __p = NULL; \ else \ __p = (var)[___i]; \ __p; \ }) struct dimm_info { struct device dev; char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */ /* Memory location data */ unsigned int location[EDAC_MAX_LAYERS]; struct mem_ctl_info *mci; /* the parent */ u32 grain; /* granularity of reported error in bytes */ enum dev_type dtype; /* memory device type */ enum mem_type mtype; /* memory dimm type */ enum edac_type edac_mode; /* EDAC mode for this dimm */ u32 nr_pages; /* number of pages on this dimm */ unsigned int csrow, cschannel; /* Points to the old API data */ u16 smbios_handle; /* Handle for SMBIOS type 17 */ }; /** * struct rank_info - contains the information for one DIMM rank * * @chan_idx: channel number where the rank is (typically, 0 or 1) * @ce_count: number of correctable errors for this rank * @csrow: A pointer to the chip select row structure (the parent * structure). The location of the rank is given by * the (csrow->csrow_idx, chan_idx) vector. * @dimm: A pointer to the DIMM structure, where the DIMM label * information is stored. * * FIXME: Currently, the EDAC core model will assume one DIMM per rank. * This is a bad assumption, but it makes this patch easier. Later * patches in this series will fix this issue. */ struct rank_info { int chan_idx; struct csrow_info *csrow; struct dimm_info *dimm; u32 ce_count; /* Correctable Errors for this csrow */ }; struct csrow_info { struct device dev; /* Used only by edac_mc_find_csrow_by_page() */ unsigned long first_page; /* first page number in csrow */ unsigned long last_page; /* last page number in csrow */ unsigned long page_mask; /* used for interleaving - * 0UL for non intlv */ int csrow_idx; /* the chip-select row */ u32 ue_count; /* Uncorrectable Errors for this csrow */ u32 ce_count; /* Correctable Errors for this csrow */ struct mem_ctl_info *mci; /* the parent */ /* channel information for this csrow */ u32 nr_channels; struct rank_info **channels; }; /* * struct errcount_attribute - used to store the several error counts */ struct errcount_attribute_data { int n_layers; int pos[EDAC_MAX_LAYERS]; int layer0, layer1, layer2; }; /** * struct edac_raw_error_desc - Raw error report structure * @grain: minimum granularity for an error report, in bytes * @error_count: number of errors of the same type * @top_layer: top layer of the error (layer[0]) * @mid_layer: middle layer of the error (layer[1]) * @low_layer: low layer of the error (layer[2]) * @page_frame_number: page where the error happened * @offset_in_page: page offset * @syndrome: syndrome of the error (or 0 if unknown or if * the syndrome is not applicable) * @msg: error message * @location: location of the error * @label: label of the affected DIMM(s) * @other_detail: other driver-specific detail about the error * @enable_per_layer_report: if false, the error affects all layers * (typically, a memory controller error) */ struct edac_raw_error_desc { /* * NOTE: everything before grain won't be cleaned by * edac_raw_error_desc_clean() */ char location[LOCATION_SIZE]; char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * EDAC_MAX_LABELS]; long grain; /* the vars below and grain will be cleaned on every new error report */ u16 error_count; int top_layer; int mid_layer; int low_layer; unsigned long page_frame_number; unsigned long offset_in_page; unsigned long syndrome; const char *msg; const char *other_detail; bool enable_per_layer_report; }; /* MEMORY controller information structure */ struct mem_ctl_info { struct device dev; struct bus_type *bus; struct list_head link; /* for global list of mem_ctl_info structs */ struct module *owner; /* Module owner of this control struct */ unsigned long mtype_cap; /* memory types supported by mc */ unsigned long edac_ctl_cap; /* Mem controller EDAC capabilities */ unsigned long edac_cap; /* configuration capabilities - this is * closely related to edac_ctl_cap. The * difference is that the controller may be * capable of s4ecd4ed which would be listed * in edac_ctl_cap, but if channels aren't * capable of s4ecd4ed then the edac_cap would * not have that capability. */ unsigned long scrub_cap; /* chipset scrub capabilities */ enum scrub_type scrub_mode; /* current scrub mode */ /* Translates sdram memory scrub rate given in bytes/sec to the internal representation and configures whatever else needs to be configured. */ int (*set_sdram_scrub_rate) (struct mem_ctl_info * mci, u32 bw); /* Get the current sdram memory scrub rate from the internal representation and converts it to the closest matching bandwidth in bytes/sec. */ int (*get_sdram_scrub_rate) (struct mem_ctl_info * mci); /* pointer to edac checking routine */ void (*edac_check) (struct mem_ctl_info * mci); /* * Remaps memory pages: controller pages to physical pages. * For most MC's, this will be NULL. */ /* FIXME - why not send the phys page to begin with? */ unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci, unsigned long page); int mc_idx; struct csrow_info **csrows; unsigned int nr_csrows, num_cschannel; /* * Memory Controller hierarchy * * There are basically two types of memory controller: the ones that * sees memory sticks ("dimms"), and the ones that sees memory ranks. * All old memory controllers enumerate memories per rank, but most * of the recent drivers enumerate memories per DIMM, instead. * When the memory controller is per rank, csbased is true. */ unsigned int n_layers; struct edac_mc_layer *layers; bool csbased; /* * DIMM info. Will eventually remove the entire csrows_info some day */ unsigned int tot_dimms; struct dimm_info **dimms; /* * FIXME - what about controllers on other busses? - IDs must be * unique. dev pointer should be sufficiently unique, but * BUS:SLOT.FUNC numbers may not be unique. */ struct device *pdev; const char *mod_name; const char *ctl_name; const char *dev_name; void *pvt_info; unsigned long start_time; /* mci load start time (in jiffies) */ /* * drivers shouldn't access those fields directly, as the core * already handles that. */ u32 ce_noinfo_count, ue_noinfo_count; u32 ue_mc, ce_mc; u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS]; struct completion complete; /* Additional top controller level attributes, but specified * by the low level driver. * * Set by the low level driver to provide attributes at the * controller level. * An array of structures, NULL terminated * * If attributes are desired, then set to array of attributes * If no attributes are desired, leave NULL */ const struct mcidev_sysfs_attribute *mc_driver_sysfs_attributes; /* work struct for this MC */ struct delayed_work work; /* * Used to report an error - by being at the global struct * makes the memory allocated by the EDAC core */ struct edac_raw_error_desc error_desc; /* the internal state of this controller instance */ int op_state; struct dentry *debugfs; u8 fake_inject_layer[EDAC_MAX_LAYERS]; bool fake_inject_ue; u16 fake_inject_count; }; #endif devpts_fs.h 0000644 00000002204 14722070374 0006716 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* -*- linux-c -*- --------------------------------------------------------- * * * linux/include/linux/devpts_fs.h * * Copyright 1998-2004 H. Peter Anvin -- All Rights Reserved * * ------------------------------------------------------------------------- */ #ifndef _LINUX_DEVPTS_FS_H #define _LINUX_DEVPTS_FS_H #include <linux/errno.h> #ifdef CONFIG_UNIX98_PTYS struct pts_fs_info; struct vfsmount *devpts_mntget(struct file *, struct pts_fs_info *); struct pts_fs_info *devpts_acquire(struct file *); void devpts_release(struct pts_fs_info *); int devpts_new_index(struct pts_fs_info *); void devpts_kill_index(struct pts_fs_info *, int); /* mknod in devpts */ struct dentry *devpts_pty_new(struct pts_fs_info *, int, void *); /* get private structure */ void *devpts_get_priv(struct dentry *); /* unlink */ void devpts_pty_kill(struct dentry *); /* in pty.c */ int ptm_open_peer(struct file *master, struct tty_struct *tty, int flags); #else static inline int ptm_open_peer(struct file *master, struct tty_struct *tty, int flags) { return -EIO; } #endif #endif /* _LINUX_DEVPTS_FS_H */ ethtool.h 0000644 00000043615 14722070374 0006412 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * ethtool.h: Defines for Linux ethtool. * * Copyright (C) 1998 David S. Miller (davem@redhat.com) * Copyright 2001 Jeff Garzik <jgarzik@pobox.com> * Portions Copyright 2001 Sun Microsystems (thockin@sun.com) * Portions Copyright 2002 Intel (eli.kupermann@intel.com, * christopher.leech@intel.com, * scott.feldman@intel.com) * Portions Copyright (C) Sun Microsystems 2008 */ #ifndef _LINUX_ETHTOOL_H #define _LINUX_ETHTOOL_H #include <linux/bitmap.h> #include <linux/compat.h> #include <uapi/linux/ethtool.h> #ifdef CONFIG_COMPAT struct compat_ethtool_rx_flow_spec { u32 flow_type; union ethtool_flow_union h_u; struct ethtool_flow_ext h_ext; union ethtool_flow_union m_u; struct ethtool_flow_ext m_ext; compat_u64 ring_cookie; u32 location; }; struct compat_ethtool_rxnfc { u32 cmd; u32 flow_type; compat_u64 data; struct compat_ethtool_rx_flow_spec fs; u32 rule_cnt; u32 rule_locs[0]; }; #endif /* CONFIG_COMPAT */ #include <linux/rculist.h> /** * enum ethtool_phys_id_state - indicator state for physical identification * @ETHTOOL_ID_INACTIVE: Physical ID indicator should be deactivated * @ETHTOOL_ID_ACTIVE: Physical ID indicator should be activated * @ETHTOOL_ID_ON: LED should be turned on (used iff %ETHTOOL_ID_ACTIVE * is not supported) * @ETHTOOL_ID_OFF: LED should be turned off (used iff %ETHTOOL_ID_ACTIVE * is not supported) */ enum ethtool_phys_id_state { ETHTOOL_ID_INACTIVE, ETHTOOL_ID_ACTIVE, ETHTOOL_ID_ON, ETHTOOL_ID_OFF }; enum { ETH_RSS_HASH_TOP_BIT, /* Configurable RSS hash function - Toeplitz */ ETH_RSS_HASH_XOR_BIT, /* Configurable RSS hash function - Xor */ ETH_RSS_HASH_CRC32_BIT, /* Configurable RSS hash function - Crc32 */ /* * Add your fresh new hash function bits above and remember to update * rss_hash_func_strings[] in ethtool.c */ ETH_RSS_HASH_FUNCS_COUNT }; #define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit)) #define __ETH_RSS_HASH(name) __ETH_RSS_HASH_BIT(ETH_RSS_HASH_##name##_BIT) #define ETH_RSS_HASH_TOP __ETH_RSS_HASH(TOP) #define ETH_RSS_HASH_XOR __ETH_RSS_HASH(XOR) #define ETH_RSS_HASH_CRC32 __ETH_RSS_HASH(CRC32) #define ETH_RSS_HASH_UNKNOWN 0 #define ETH_RSS_HASH_NO_CHANGE 0 struct net_device; /* Some generic methods drivers may use in their ethtool_ops */ u32 ethtool_op_get_link(struct net_device *dev); int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *eti); /** * ethtool_rxfh_indir_default - get default value for RX flow hash indirection * @index: Index in RX flow hash indirection table * @n_rx_rings: Number of RX rings to use * * This function provides the default policy for RX flow hash indirection. */ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) { return index % n_rx_rings; } /* declare a link mode bitmap */ #define __ETHTOOL_DECLARE_LINK_MODE_MASK(name) \ DECLARE_BITMAP(name, __ETHTOOL_LINK_MODE_MASK_NBITS) /* drivers must ignore base.cmd and base.link_mode_masks_nwords * fields, but they are allowed to overwrite them (will be ignored). */ struct ethtool_link_ksettings { struct ethtool_link_settings base; struct { __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising); } link_modes; }; /** * ethtool_link_ksettings_zero_link_mode - clear link_ksettings link mode mask * @ptr : pointer to struct ethtool_link_ksettings * @name : one of supported/advertising/lp_advertising */ #define ethtool_link_ksettings_zero_link_mode(ptr, name) \ bitmap_zero((ptr)->link_modes.name, __ETHTOOL_LINK_MODE_MASK_NBITS) /** * ethtool_link_ksettings_add_link_mode - set bit in link_ksettings * link mode mask * @ptr : pointer to struct ethtool_link_ksettings * @name : one of supported/advertising/lp_advertising * @mode : one of the ETHTOOL_LINK_MODE_*_BIT * (not atomic, no bound checking) */ #define ethtool_link_ksettings_add_link_mode(ptr, name, mode) \ __set_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name) /** * ethtool_link_ksettings_del_link_mode - clear bit in link_ksettings * link mode mask * @ptr : pointer to struct ethtool_link_ksettings * @name : one of supported/advertising/lp_advertising * @mode : one of the ETHTOOL_LINK_MODE_*_BIT * (not atomic, no bound checking) */ #define ethtool_link_ksettings_del_link_mode(ptr, name, mode) \ __clear_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name) /** * ethtool_link_ksettings_test_link_mode - test bit in ksettings link mode mask * @ptr : pointer to struct ethtool_link_ksettings * @name : one of supported/advertising/lp_advertising * @mode : one of the ETHTOOL_LINK_MODE_*_BIT * (not atomic, no bound checking) * * Returns true/false. */ #define ethtool_link_ksettings_test_link_mode(ptr, name, mode) \ test_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name) extern int __ethtool_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *link_ksettings); /** * ethtool_intersect_link_masks - Given two link masks, AND them together * @dst: first mask and where result is stored * @src: second mask to intersect with * * Given two link mode masks, AND them together and save the result in dst. */ void ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst, struct ethtool_link_ksettings *src); void ethtool_convert_legacy_u32_to_link_mode(unsigned long *dst, u32 legacy_u32); /* return false if src had higher bits set. lower bits always updated. */ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, const unsigned long *src); /** * struct ethtool_ops - optional netdev operations * @get_drvinfo: Report driver/device information. Should only set the * @driver, @version, @fw_version and @bus_info fields. If not * implemented, the @driver and @bus_info fields will be filled in * according to the netdev's parent device. * @get_regs_len: Get buffer length required for @get_regs * @get_regs: Get device registers * @get_wol: Report whether Wake-on-Lan is enabled * @set_wol: Turn Wake-on-Lan on or off. Returns a negative error code * or zero. * @get_msglevel: Report driver message level. This should be the value * of the @msg_enable field used by netif logging functions. * @set_msglevel: Set driver message level * @nway_reset: Restart autonegotiation. Returns a negative error code * or zero. * @get_link: Report whether physical link is up. Will only be called if * the netdev is up. Should usually be set to ethtool_op_get_link(), * which uses netif_carrier_ok(). * @get_eeprom: Read data from the device EEPROM. * Should fill in the magic field. Don't need to check len for zero * or wraparound. Fill in the data argument with the eeprom values * from offset to offset + len. Update len to the amount read. * Returns an error or zero. * @set_eeprom: Write data to the device EEPROM. * Should validate the magic field. Don't need to check len for zero * or wraparound. Update len to the amount written. Returns an error * or zero. * @get_coalesce: Get interrupt coalescing parameters. Returns a negative * error code or zero. * @set_coalesce: Set interrupt coalescing parameters. Returns a negative * error code or zero. * @get_ringparam: Report ring sizes * @set_ringparam: Set ring sizes. Returns a negative error code or zero. * @get_pauseparam: Report pause parameters * @set_pauseparam: Set pause parameters. Returns a negative error code * or zero. * @self_test: Run specified self-tests * @get_strings: Return a set of strings that describe the requested objects * @set_phys_id: Identify the physical devices, e.g. by flashing an LED * attached to it. The implementation may update the indicator * asynchronously or synchronously, but in either case it must return * quickly. It is initially called with the argument %ETHTOOL_ID_ACTIVE, * and must either activate asynchronous updates and return zero, return * a negative error or return a positive frequency for synchronous * indication (e.g. 1 for one on/off cycle per second). If it returns * a frequency then it will be called again at intervals with the * argument %ETHTOOL_ID_ON or %ETHTOOL_ID_OFF and should set the state of * the indicator accordingly. Finally, it is called with the argument * %ETHTOOL_ID_INACTIVE and must deactivate the indicator. Returns a * negative error code or zero. * @get_ethtool_stats: Return extended statistics about the device. * This is only useful if the device maintains statistics not * included in &struct rtnl_link_stats64. * @begin: Function to be called before any other operation. Returns a * negative error code or zero. * @complete: Function to be called after any other operation except * @begin. Will be called even if the other operation failed. * @get_priv_flags: Report driver-specific feature flags. * @set_priv_flags: Set driver-specific feature flags. Returns a negative * error code or zero. * @get_sset_count: Get number of strings that @get_strings will write. * @get_rxnfc: Get RX flow classification rules. Returns a negative * error code or zero. * @set_rxnfc: Set RX flow classification rules. Returns a negative * error code or zero. * @flash_device: Write a firmware image to device's flash memory. * Returns a negative error code or zero. * @reset: Reset (part of) the device, as specified by a bitmask of * flags from &enum ethtool_reset_flags. Returns a negative * error code or zero. * @get_rxfh_key_size: Get the size of the RX flow hash key. * Returns zero if not supported for this specific device. * @get_rxfh_indir_size: Get the size of the RX flow hash indirection table. * Returns zero if not supported for this specific device. * @get_rxfh: Get the contents of the RX flow hash indirection table, hash key * and/or hash function. * Returns a negative error code or zero. * @set_rxfh: Set the contents of the RX flow hash indirection table, hash * key, and/or hash function. Arguments which are set to %NULL or zero * will remain unchanged. * Returns a negative error code or zero. An error code must be returned * if at least one unsupported change was requested. * @get_rxfh_context: Get the contents of the RX flow hash indirection table, * hash key, and/or hash function assiciated to the given rss context. * Returns a negative error code or zero. * @set_rxfh_context: Create, remove and configure RSS contexts. Allows setting * the contents of the RX flow hash indirection table, hash key, and/or * hash function associated to the given context. Arguments which are set * to %NULL or zero will remain unchanged. * Returns a negative error code or zero. An error code must be returned * if at least one unsupported change was requested. * @get_channels: Get number of channels. * @set_channels: Set number of channels. Returns a negative error code or * zero. * @get_dump_flag: Get dump flag indicating current dump length, version, * and flag of the device. * @get_dump_data: Get dump data. * @set_dump: Set dump specific flags to the device. * @get_ts_info: Get the time stamping and PTP hardware clock capabilities. * Drivers supporting transmit time stamps in software should set this to * ethtool_op_get_ts_info(). * @get_module_info: Get the size and type of the eeprom contained within * a plug-in module. * @get_module_eeprom: Get the eeprom information from the plug-in module * @get_eee: Get Energy-Efficient (EEE) supported and status. * @set_eee: Set EEE status (enable/disable) as well as LPI timers. * @get_per_queue_coalesce: Get interrupt coalescing parameters per queue. * It must check that the given queue number is valid. If neither a RX nor * a TX queue has this number, return -EINVAL. If only a RX queue or a TX * queue has this number, set the inapplicable fields to ~0 and return 0. * Returns a negative error code or zero. * @set_per_queue_coalesce: Set interrupt coalescing parameters per queue. * It must check that the given queue number is valid. If neither a RX nor * a TX queue has this number, return -EINVAL. If only a RX queue or a TX * queue has this number, ignore the inapplicable fields. * Returns a negative error code or zero. * @get_link_ksettings: Get various device settings including Ethernet link * settings. The %cmd and %link_mode_masks_nwords fields should be * ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS instead of the latter), * any change to them will be overwritten by kernel. Returns a negative * error code or zero. * @set_link_ksettings: Set various device settings including Ethernet link * settings. The %cmd and %link_mode_masks_nwords fields should be * ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS instead of the latter), * any change to them will be overwritten by kernel. Returns a negative * error code or zero. * @get_fecparam: Get the network device Forward Error Correction parameters. * @set_fecparam: Set the network device Forward Error Correction parameters. * @get_ethtool_phy_stats: Return extended statistics about the PHY device. * This is only useful if the device maintains PHY statistics and * cannot use the standard PHY library helpers. * * All operations are optional (i.e. the function pointer may be set * to %NULL) and callers must take this into account. Callers must * hold the RTNL lock. * * See the structures used by these operations for further documentation. * Note that for all operations using a structure ending with a zero- * length array, the array is allocated separately in the kernel and * is passed to the driver as an additional parameter. * * See &struct net_device and &struct net_device_ops for documentation * of the generic netdev features interface. */ struct ethtool_ops { void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); int (*get_regs_len)(struct net_device *); void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); u32 (*get_msglevel)(struct net_device *); void (*set_msglevel)(struct net_device *, u32); int (*nway_reset)(struct net_device *); u32 (*get_link)(struct net_device *); int (*get_eeprom_len)(struct net_device *); int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *); int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *); void (*get_pauseparam)(struct net_device *, struct ethtool_pauseparam*); int (*set_pauseparam)(struct net_device *, struct ethtool_pauseparam*); void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); void (*get_strings)(struct net_device *, u32 stringset, u8 *); int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state); void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *); int (*begin)(struct net_device *); void (*complete)(struct net_device *); u32 (*get_priv_flags)(struct net_device *); int (*set_priv_flags)(struct net_device *, u32); int (*get_sset_count)(struct net_device *, int); int (*get_rxnfc)(struct net_device *, struct ethtool_rxnfc *, u32 *rule_locs); int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *); int (*flash_device)(struct net_device *, struct ethtool_flash *); int (*reset)(struct net_device *, u32 *); u32 (*get_rxfh_key_size)(struct net_device *); u32 (*get_rxfh_indir_size)(struct net_device *); int (*get_rxfh)(struct net_device *, u32 *indir, u8 *key, u8 *hfunc); int (*set_rxfh)(struct net_device *, const u32 *indir, const u8 *key, const u8 hfunc); int (*get_rxfh_context)(struct net_device *, u32 *indir, u8 *key, u8 *hfunc, u32 rss_context); int (*set_rxfh_context)(struct net_device *, const u32 *indir, const u8 *key, const u8 hfunc, u32 *rss_context, bool delete); void (*get_channels)(struct net_device *, struct ethtool_channels *); int (*set_channels)(struct net_device *, struct ethtool_channels *); int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); int (*get_dump_data)(struct net_device *, struct ethtool_dump *, void *); int (*set_dump)(struct net_device *, struct ethtool_dump *); int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *); int (*get_module_info)(struct net_device *, struct ethtool_modinfo *); int (*get_module_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); int (*get_eee)(struct net_device *, struct ethtool_eee *); int (*set_eee)(struct net_device *, struct ethtool_eee *); int (*get_tunable)(struct net_device *, const struct ethtool_tunable *, void *); int (*set_tunable)(struct net_device *, const struct ethtool_tunable *, const void *); int (*get_per_queue_coalesce)(struct net_device *, u32, struct ethtool_coalesce *); int (*set_per_queue_coalesce)(struct net_device *, u32, struct ethtool_coalesce *); int (*get_link_ksettings)(struct net_device *, struct ethtool_link_ksettings *); int (*set_link_ksettings)(struct net_device *, const struct ethtool_link_ksettings *); int (*get_fecparam)(struct net_device *, struct ethtool_fecparam *); int (*set_fecparam)(struct net_device *, struct ethtool_fecparam *); void (*get_ethtool_phy_stats)(struct net_device *, struct ethtool_stats *, u64 *); }; struct ethtool_rx_flow_rule { struct flow_rule *rule; unsigned long priv[0]; }; struct ethtool_rx_flow_spec_input { const struct ethtool_rx_flow_spec *fs; u32 rss_ctx; }; struct ethtool_rx_flow_rule * ethtool_rx_flow_rule_create(const struct ethtool_rx_flow_spec_input *input); void ethtool_rx_flow_rule_destroy(struct ethtool_rx_flow_rule *rule); #endif /* _LINUX_ETHTOOL_H */ hiddev.h 0000644 00000002662 14722070374 0006174 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) 1999-2000 Vojtech Pavlik * * Sponsored by SuSE */ /* * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@suse.cz>, or by paper mail: * Vojtech Pavlik, Ucitelska 1576, Prague 8, 182 00 Czech Republic */ #ifndef _HIDDEV_H #define _HIDDEV_H #include <uapi/linux/hiddev.h> /* * In-kernel definitions. */ struct hiddev { int minor; int exist; int open; struct mutex existancelock; wait_queue_head_t wait; struct hid_device *hid; struct list_head list; spinlock_t list_lock; bool initialized; }; struct hid_device; struct hid_usage; struct hid_field; struct hid_report; #ifdef CONFIG_USB_HIDDEV int hiddev_connect(struct hid_device *hid, unsigned int force); void hiddev_disconnect(struct hid_device *); void hiddev_hid_event(struct hid_device *hid, struct hid_field *field, struct hid_usage *usage, __s32 value); void hiddev_report_event(struct hid_device *hid, struct hid_report *report); #else static inline int hiddev_connect(struct hid_device *hid, unsigned int force) { return -1; } static inline void hiddev_disconnect(struct hid_device *hid) { } static inline void hiddev_hid_event(struct hid_device *hid, struct hid_field *field, struct hid_usage *usage, __s32 value) { } static inline void hiddev_report_event(struct hid_device *hid, struct hid_report *report) { } #endif #endif netfilter.h 0000644 00000032562 14722070374 0006727 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_NETFILTER_H #define __LINUX_NETFILTER_H #include <linux/init.h> #include <linux/skbuff.h> #include <linux/net.h> #include <linux/if.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/wait.h> #include <linux/list.h> #include <linux/static_key.h> #include <linux/netfilter_defs.h> #include <linux/netdevice.h> #include <net/net_namespace.h> static inline int NF_DROP_GETERR(int verdict) { return -(verdict >> NF_VERDICT_QBITS); } static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1, const union nf_inet_addr *a2) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 const unsigned long *ul1 = (const unsigned long *)a1; const unsigned long *ul2 = (const unsigned long *)a2; return ((ul1[0] ^ ul2[0]) | (ul1[1] ^ ul2[1])) == 0UL; #else return a1->all[0] == a2->all[0] && a1->all[1] == a2->all[1] && a1->all[2] == a2->all[2] && a1->all[3] == a2->all[3]; #endif } static inline void nf_inet_addr_mask(const union nf_inet_addr *a1, union nf_inet_addr *result, const union nf_inet_addr *mask) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 const unsigned long *ua = (const unsigned long *)a1; unsigned long *ur = (unsigned long *)result; const unsigned long *um = (const unsigned long *)mask; ur[0] = ua[0] & um[0]; ur[1] = ua[1] & um[1]; #else result->all[0] = a1->all[0] & mask->all[0]; result->all[1] = a1->all[1] & mask->all[1]; result->all[2] = a1->all[2] & mask->all[2]; result->all[3] = a1->all[3] & mask->all[3]; #endif } int netfilter_init(void); struct sk_buff; struct nf_hook_ops; struct sock; struct nf_hook_state { unsigned int hook; u_int8_t pf; struct net_device *in; struct net_device *out; struct sock *sk; struct net *net; int (*okfn)(struct net *, struct sock *, struct sk_buff *); }; typedef unsigned int nf_hookfn(void *priv, struct sk_buff *skb, const struct nf_hook_state *state); struct nf_hook_ops { /* User fills in from here down. */ nf_hookfn *hook; struct net_device *dev; void *priv; u_int8_t pf; unsigned int hooknum; /* Hooks are ordered in ascending priority. */ int priority; }; struct nf_hook_entry { nf_hookfn *hook; void *priv; }; struct nf_hook_entries_rcu_head { struct rcu_head head; void *allocation; }; struct nf_hook_entries { u16 num_hook_entries; /* padding */ struct nf_hook_entry hooks[]; /* trailer: pointers to original orig_ops of each hook, * followed by rcu_head and scratch space used for freeing * the structure via call_rcu. * * This is not part of struct nf_hook_entry since its only * needed in slow path (hook register/unregister): * const struct nf_hook_ops *orig_ops[] * * For the same reason, we store this at end -- its * only needed when a hook is deleted, not during * packet path processing: * struct nf_hook_entries_rcu_head head */ }; #ifdef CONFIG_NETFILTER static inline struct nf_hook_ops **nf_hook_entries_get_hook_ops(const struct nf_hook_entries *e) { unsigned int n = e->num_hook_entries; const void *hook_end; hook_end = &e->hooks[n]; /* this is *past* ->hooks[]! */ return (struct nf_hook_ops **)hook_end; } static inline int nf_hook_entry_hookfn(const struct nf_hook_entry *entry, struct sk_buff *skb, struct nf_hook_state *state) { return entry->hook(entry->priv, skb, state); } static inline void nf_hook_state_init(struct nf_hook_state *p, unsigned int hook, u_int8_t pf, struct net_device *indev, struct net_device *outdev, struct sock *sk, struct net *net, int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { p->hook = hook; p->pf = pf; p->in = indev; p->out = outdev; p->sk = sk; p->net = net; p->okfn = okfn; } struct nf_sockopt_ops { struct list_head list; u_int8_t pf; /* Non-inclusive ranges: use 0/0/NULL to never get called. */ int set_optmin; int set_optmax; int (*set)(struct sock *sk, int optval, void __user *user, unsigned int len); #ifdef CONFIG_COMPAT int (*compat_set)(struct sock *sk, int optval, void __user *user, unsigned int len); #endif int get_optmin; int get_optmax; int (*get)(struct sock *sk, int optval, void __user *user, int *len); #ifdef CONFIG_COMPAT int (*compat_get)(struct sock *sk, int optval, void __user *user, int *len); #endif /* Use the module struct to lock set/get code in place */ struct module *owner; }; /* Function to register/unregister hook points. */ int nf_register_net_hook(struct net *net, const struct nf_hook_ops *ops); void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *ops); int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg, unsigned int n); void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg, unsigned int n); /* Functions to register get/setsockopt ranges (non-inclusive). You need to check permissions yourself! */ int nf_register_sockopt(struct nf_sockopt_ops *reg); void nf_unregister_sockopt(struct nf_sockopt_ops *reg); #ifdef CONFIG_JUMP_LABEL extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; #endif int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state, const struct nf_hook_entries *e, unsigned int i); /** * nf_hook - call a netfilter hook * * Returns 1 if the hook has allowed the packet to pass. The function * okfn must be invoked by the caller in this case. Any other return * value indicates the packet has been consumed by the hook. */ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *indev, struct net_device *outdev, int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { struct nf_hook_entries *hook_head = NULL; int ret = 1; #ifdef CONFIG_JUMP_LABEL if (__builtin_constant_p(pf) && __builtin_constant_p(hook) && !static_key_false(&nf_hooks_needed[pf][hook])) return 1; #endif rcu_read_lock(); switch (pf) { case NFPROTO_IPV4: hook_head = rcu_dereference(net->nf.hooks_ipv4[hook]); break; case NFPROTO_IPV6: hook_head = rcu_dereference(net->nf.hooks_ipv6[hook]); break; case NFPROTO_ARP: #ifdef CONFIG_NETFILTER_FAMILY_ARP if (WARN_ON_ONCE(hook >= ARRAY_SIZE(net->nf.hooks_arp))) break; hook_head = rcu_dereference(net->nf.hooks_arp[hook]); #endif break; case NFPROTO_BRIDGE: #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE hook_head = rcu_dereference(net->nf.hooks_bridge[hook]); #endif break; default: WARN_ON_ONCE(1); break; } if (hook_head) { struct nf_hook_state state; nf_hook_state_init(&state, hook, pf, indev, outdev, sk, net, okfn); ret = nf_hook_slow(skb, &state, hook_head, 0); } rcu_read_unlock(); return ret; } /* Activate hook; either okfn or kfree_skb called, unless a hook returns NF_STOLEN (in which case, it's up to the hook to deal with the consequences). Returns -ERRNO if packet dropped. Zero means queued, stolen or accepted. */ /* RR: > I don't want nf_hook to return anything because people might forget > about async and trust the return value to mean "packet was ok". AK: Just document it clearly, then you can expect some sense from kernel coders :) */ static inline int NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *in, struct net_device *out, int (*okfn)(struct net *, struct sock *, struct sk_buff *), bool cond) { int ret; if (!cond || ((ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn)) == 1)) ret = okfn(net, sk, skb); return ret; } static inline int NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *in, struct net_device *out, int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { int ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn); if (ret == 1) ret = okfn(net, sk, skb); return ret; } static inline void NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct list_head *head, struct net_device *in, struct net_device *out, int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { struct sk_buff *skb, *next; struct list_head sublist; INIT_LIST_HEAD(&sublist); list_for_each_entry_safe(skb, next, head, list) { skb_list_del_init(skb); if (nf_hook(pf, hook, net, sk, skb, in, out, okfn) == 1) list_add_tail(&skb->list, &sublist); } /* Put passed packets back on main list */ list_splice(&sublist, head); } /* Call setsockopt() */ int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, unsigned int len); int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, int *len); #ifdef CONFIG_COMPAT int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, unsigned int len); int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, int *len); #endif struct flowi; struct nf_queue_entry; __sum16 nf_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol, unsigned short family); __sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, unsigned int len, u_int8_t protocol, unsigned short family); int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl, bool strict, unsigned short family); int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry); #include <net/flow.h> struct nf_conn; enum nf_nat_manip_type; struct nlattr; enum ip_conntrack_dir; struct nf_nat_hook { int (*parse_nat_setup)(struct nf_conn *ct, enum nf_nat_manip_type manip, const struct nlattr *attr); void (*decode_session)(struct sk_buff *skb, struct flowi *fl); unsigned int (*manip_pkt)(struct sk_buff *skb, struct nf_conn *ct, enum nf_nat_manip_type mtype, enum ip_conntrack_dir dir); }; extern struct nf_nat_hook __rcu *nf_nat_hook; static inline void nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) { #if IS_ENABLED(CONFIG_NF_NAT) struct nf_nat_hook *nat_hook; rcu_read_lock(); nat_hook = rcu_dereference(nf_nat_hook); if (nat_hook && nat_hook->decode_session) nat_hook->decode_session(skb, fl); rcu_read_unlock(); #endif } #else /* !CONFIG_NETFILTER */ static inline int NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *in, struct net_device *out, int (*okfn)(struct net *, struct sock *, struct sk_buff *), bool cond) { return okfn(net, sk, skb); } static inline int NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *in, struct net_device *out, int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { return okfn(net, sk, skb); } static inline void NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct list_head *head, struct net_device *in, struct net_device *out, int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { /* nothing to do */ } static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *indev, struct net_device *outdev, int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { return 1; } struct flowi; static inline void nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) { } #endif /*CONFIG_NETFILTER*/ #if IS_ENABLED(CONFIG_NF_CONNTRACK) #include <linux/netfilter/nf_conntrack_zones_common.h> extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu; void nf_ct_attach(struct sk_buff *, const struct sk_buff *); struct nf_conntrack_tuple; bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple, const struct sk_buff *skb); #else static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {} struct nf_conntrack_tuple; static inline bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple, const struct sk_buff *skb) { return false; } #endif struct nf_conn; enum ip_conntrack_info; struct nf_ct_hook { int (*update)(struct net *net, struct sk_buff *skb); void (*destroy)(struct nf_conntrack *); bool (*get_tuple_skb)(struct nf_conntrack_tuple *, const struct sk_buff *); }; extern struct nf_ct_hook __rcu *nf_ct_hook; struct nlattr; struct nfnl_ct_hook { struct nf_conn *(*get_ct)(const struct sk_buff *skb, enum ip_conntrack_info *ctinfo); size_t (*build_size)(const struct nf_conn *ct); int (*build)(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, u_int16_t ct_attr, u_int16_t ct_info_attr); int (*parse)(const struct nlattr *attr, struct nf_conn *ct); int (*attach_expect)(const struct nlattr *attr, struct nf_conn *ct, u32 portid, u32 report); void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, s32 off); }; extern struct nfnl_ct_hook __rcu *nfnl_ct_hook; /** * nf_skb_duplicated - TEE target has sent a packet * * When a xtables target sends a packet, the OUTPUT and POSTROUTING * hooks are traversed again, i.e. nft and xtables are invoked recursively. * * This is used by xtables TEE target to prevent the duplicated skb from * being duplicated again. */ DECLARE_PER_CPU(bool, nf_skb_duplicated); #endif /*__LINUX_NETFILTER_H*/ sock_diag.h 0000644 00000004240 14722070374 0006646 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __SOCK_DIAG_H__ #define __SOCK_DIAG_H__ #include <linux/netlink.h> #include <linux/user_namespace.h> #include <net/net_namespace.h> #include <net/sock.h> #include <uapi/linux/sock_diag.h> struct sk_buff; struct nlmsghdr; struct sock; struct sock_diag_handler { __u8 family; int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh); int (*get_info)(struct sk_buff *skb, struct sock *sk); int (*destroy)(struct sk_buff *skb, struct nlmsghdr *nlh); }; int sock_diag_register(const struct sock_diag_handler *h); void sock_diag_unregister(const struct sock_diag_handler *h); void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)); void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)); u64 sock_gen_cookie(struct sock *sk); int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie); void sock_diag_save_cookie(struct sock *sk, __u32 *cookie); int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr); int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk, struct sk_buff *skb, int attrtype); static inline enum sknetlink_groups sock_diag_destroy_group(const struct sock *sk) { switch (sk->sk_family) { case AF_INET: if (sk->sk_type == SOCK_RAW) return SKNLGRP_NONE; switch (sk->sk_protocol) { case IPPROTO_TCP: return SKNLGRP_INET_TCP_DESTROY; case IPPROTO_UDP: return SKNLGRP_INET_UDP_DESTROY; default: return SKNLGRP_NONE; } case AF_INET6: if (sk->sk_type == SOCK_RAW) return SKNLGRP_NONE; switch (sk->sk_protocol) { case IPPROTO_TCP: return SKNLGRP_INET6_TCP_DESTROY; case IPPROTO_UDP: return SKNLGRP_INET6_UDP_DESTROY; default: return SKNLGRP_NONE; } default: return SKNLGRP_NONE; } } static inline bool sock_diag_has_destroy_listeners(const struct sock *sk) { const struct net *n = sock_net(sk); const enum sknetlink_groups group = sock_diag_destroy_group(sk); return group != SKNLGRP_NONE && n->diag_nlsk && netlink_has_listeners(n->diag_nlsk, group); } void sock_diag_broadcast_destroy(struct sock *sk); int sock_diag_destroy(struct sock *sk, int err); #endif leds-regulator.h 0000644 00000002220 14722070374 0007650 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * leds-regulator.h - platform data structure for regulator driven LEDs. * * Copyright (C) 2009 Antonio Ospite <ospite@studenti.unina.it> */ #ifndef __LINUX_LEDS_REGULATOR_H #define __LINUX_LEDS_REGULATOR_H /* * Use "vled" as supply id when declaring the regulator consumer: * * static struct regulator_consumer_supply pcap_regulator_VVIB_consumers [] = { * { .dev_name = "leds-regulator.0", .supply = "vled" }, * }; * * If you have several regulator driven LEDs, you can append a numerical id to * .dev_name as done above, and use the same id when declaring the platform * device: * * static struct led_regulator_platform_data a780_vibrator_data = { * .name = "a780::vibrator", * }; * * static struct platform_device a780_vibrator = { * .name = "leds-regulator", * .id = 0, * .dev = { * .platform_data = &a780_vibrator_data, * }, * }; */ #include <linux/leds.h> struct led_regulator_platform_data { char *name; /* LED name as expected by LED class */ enum led_brightness brightness; /* initial brightness value */ }; #endif /* __LINUX_LEDS_REGULATOR_H */ io-mapping.h 0000644 00000007420 14722070374 0006766 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright © 2008 Keith Packard <keithp@keithp.com> */ #ifndef _LINUX_IO_MAPPING_H #define _LINUX_IO_MAPPING_H #include <linux/types.h> #include <linux/slab.h> #include <linux/bug.h> #include <linux/io.h> #include <asm/page.h> /* * The io_mapping mechanism provides an abstraction for mapping * individual pages from an io device to the CPU in an efficient fashion. * * See Documentation/io-mapping.txt */ struct io_mapping { resource_size_t base; unsigned long size; pgprot_t prot; void __iomem *iomem; }; #ifdef CONFIG_HAVE_ATOMIC_IOMAP #include <asm/iomap.h> /* * For small address space machines, mapping large objects * into the kernel virtual space isn't practical. Where * available, use fixmap support to dynamically map pages * of the object at run time. */ static inline struct io_mapping * io_mapping_init_wc(struct io_mapping *iomap, resource_size_t base, unsigned long size) { pgprot_t prot; if (iomap_create_wc(base, size, &prot)) return NULL; iomap->base = base; iomap->size = size; iomap->prot = prot; return iomap; } static inline void io_mapping_fini(struct io_mapping *mapping) { iomap_free(mapping->base, mapping->size); } /* Atomic map/unmap */ static inline void __iomem * io_mapping_map_atomic_wc(struct io_mapping *mapping, unsigned long offset) { resource_size_t phys_addr; unsigned long pfn; BUG_ON(offset >= mapping->size); phys_addr = mapping->base + offset; pfn = (unsigned long) (phys_addr >> PAGE_SHIFT); return iomap_atomic_prot_pfn(pfn, mapping->prot); } static inline void io_mapping_unmap_atomic(void __iomem *vaddr) { iounmap_atomic(vaddr); } static inline void __iomem * io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset, unsigned long size) { resource_size_t phys_addr; BUG_ON(offset >= mapping->size); phys_addr = mapping->base + offset; return ioremap_wc(phys_addr, size); } static inline void io_mapping_unmap(void __iomem *vaddr) { iounmap(vaddr); } #else #include <linux/uaccess.h> #include <asm/pgtable.h> /* Create the io_mapping object*/ static inline struct io_mapping * io_mapping_init_wc(struct io_mapping *iomap, resource_size_t base, unsigned long size) { iomap->iomem = ioremap_wc(base, size); if (!iomap->iomem) return NULL; iomap->base = base; iomap->size = size; #if defined(pgprot_noncached_wc) /* archs can't agree on a name ... */ iomap->prot = pgprot_noncached_wc(PAGE_KERNEL); #elif defined(pgprot_writecombine) iomap->prot = pgprot_writecombine(PAGE_KERNEL); #else iomap->prot = pgprot_noncached(PAGE_KERNEL); #endif return iomap; } static inline void io_mapping_fini(struct io_mapping *mapping) { iounmap(mapping->iomem); } /* Non-atomic map/unmap */ static inline void __iomem * io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset, unsigned long size) { return mapping->iomem + offset; } static inline void io_mapping_unmap(void __iomem *vaddr) { } /* Atomic map/unmap */ static inline void __iomem * io_mapping_map_atomic_wc(struct io_mapping *mapping, unsigned long offset) { preempt_disable(); pagefault_disable(); return io_mapping_map_wc(mapping, offset, PAGE_SIZE); } static inline void io_mapping_unmap_atomic(void __iomem *vaddr) { io_mapping_unmap(vaddr); pagefault_enable(); preempt_enable(); } #endif /* HAVE_ATOMIC_IOMAP */ static inline struct io_mapping * io_mapping_create_wc(resource_size_t base, unsigned long size) { struct io_mapping *iomap; iomap = kmalloc(sizeof(*iomap), GFP_KERNEL); if (!iomap) return NULL; if (!io_mapping_init_wc(iomap, base, size)) { kfree(iomap); return NULL; } return iomap; } static inline void io_mapping_free(struct io_mapping *iomap) { io_mapping_fini(iomap); kfree(iomap); } #endif /* _LINUX_IO_MAPPING_H */ pm_clock.h 0000644 00000004714 14722070374 0006520 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * pm_clock.h - Definitions and headers related to device clocks. * * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. */ #ifndef _LINUX_PM_CLOCK_H #define _LINUX_PM_CLOCK_H #include <linux/device.h> #include <linux/notifier.h> struct pm_clk_notifier_block { struct notifier_block nb; struct dev_pm_domain *pm_domain; char *con_ids[]; }; struct clk; #ifdef CONFIG_PM extern int pm_clk_runtime_suspend(struct device *dev); extern int pm_clk_runtime_resume(struct device *dev); #define USE_PM_CLK_RUNTIME_OPS \ .runtime_suspend = pm_clk_runtime_suspend, \ .runtime_resume = pm_clk_runtime_resume, #else #define USE_PM_CLK_RUNTIME_OPS #endif #ifdef CONFIG_PM_CLK static inline bool pm_clk_no_clocks(struct device *dev) { return dev && dev->power.subsys_data && list_empty(&dev->power.subsys_data->clock_list); } extern void pm_clk_init(struct device *dev); extern int pm_clk_create(struct device *dev); extern void pm_clk_destroy(struct device *dev); extern int pm_clk_add(struct device *dev, const char *con_id); extern int pm_clk_add_clk(struct device *dev, struct clk *clk); extern int of_pm_clk_add_clk(struct device *dev, const char *name); extern int of_pm_clk_add_clks(struct device *dev); extern void pm_clk_remove(struct device *dev, const char *con_id); extern void pm_clk_remove_clk(struct device *dev, struct clk *clk); extern int pm_clk_suspend(struct device *dev); extern int pm_clk_resume(struct device *dev); #else static inline bool pm_clk_no_clocks(struct device *dev) { return true; } static inline void pm_clk_init(struct device *dev) { } static inline int pm_clk_create(struct device *dev) { return -EINVAL; } static inline void pm_clk_destroy(struct device *dev) { } static inline int pm_clk_add(struct device *dev, const char *con_id) { return -EINVAL; } static inline int pm_clk_add_clk(struct device *dev, struct clk *clk) { return -EINVAL; } static inline int of_pm_clk_add_clks(struct device *dev) { return -EINVAL; } static inline void pm_clk_remove(struct device *dev, const char *con_id) { } #define pm_clk_suspend NULL #define pm_clk_resume NULL static inline void pm_clk_remove_clk(struct device *dev, struct clk *clk) { } #endif #ifdef CONFIG_HAVE_CLK extern void pm_clk_add_notifier(struct bus_type *bus, struct pm_clk_notifier_block *clknb); #else static inline void pm_clk_add_notifier(struct bus_type *bus, struct pm_clk_notifier_block *clknb) { } #endif #endif backlight.h 0000644 00000014570 14722070374 0006662 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Backlight Lowlevel Control Abstraction * * Copyright (C) 2003,2004 Hewlett-Packard Company * */ #ifndef _LINUX_BACKLIGHT_H #define _LINUX_BACKLIGHT_H #include <linux/device.h> #include <linux/fb.h> #include <linux/mutex.h> #include <linux/notifier.h> /* Notes on locking: * * backlight_device->ops_lock is an internal backlight lock protecting the * ops pointer and no code outside the core should need to touch it. * * Access to update_status() is serialised by the update_lock mutex since * most drivers seem to need this and historically get it wrong. * * Most drivers don't need locking on their get_brightness() method. * If yours does, you need to implement it in the driver. You can use the * update_lock mutex if appropriate. * * Any other use of the locks below is probably wrong. */ enum backlight_update_reason { BACKLIGHT_UPDATE_HOTKEY, BACKLIGHT_UPDATE_SYSFS, }; enum backlight_type { BACKLIGHT_RAW = 1, BACKLIGHT_PLATFORM, BACKLIGHT_FIRMWARE, BACKLIGHT_TYPE_MAX, }; enum backlight_notification { BACKLIGHT_REGISTERED, BACKLIGHT_UNREGISTERED, }; enum backlight_scale { BACKLIGHT_SCALE_UNKNOWN = 0, BACKLIGHT_SCALE_LINEAR, BACKLIGHT_SCALE_NON_LINEAR, }; struct backlight_device; struct fb_info; struct backlight_ops { unsigned int options; #define BL_CORE_SUSPENDRESUME (1 << 0) /* Notify the backlight driver some property has changed */ int (*update_status)(struct backlight_device *); /* Return the current backlight brightness (accounting for power, fb_blank etc.) */ int (*get_brightness)(struct backlight_device *); /* Check if given framebuffer device is the one bound to this backlight; return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */ int (*check_fb)(struct backlight_device *, struct fb_info *); }; /* This structure defines all the properties of a backlight */ struct backlight_properties { /* Current User requested brightness (0 - max_brightness) */ int brightness; /* Maximal value for brightness (read-only) */ int max_brightness; /* Current FB Power mode (0: full on, 1..3: power saving modes; 4: full off), see FB_BLANK_XXX */ int power; /* FB Blanking active? (values as for power) */ /* Due to be removed, please use (state & BL_CORE_FBBLANK) */ int fb_blank; /* Backlight type */ enum backlight_type type; /* Flags used to signal drivers of state changes */ unsigned int state; /* Type of the brightness scale (linear, non-linear, ...) */ enum backlight_scale scale; #define BL_CORE_SUSPENDED (1 << 0) /* backlight is suspended */ #define BL_CORE_FBBLANK (1 << 1) /* backlight is under an fb blank event */ }; struct backlight_device { /* Backlight properties */ struct backlight_properties props; /* Serialise access to update_status method */ struct mutex update_lock; /* This protects the 'ops' field. If 'ops' is NULL, the driver that registered this device has been unloaded, and if class_get_devdata() points to something in the body of that driver, it is also invalid. */ struct mutex ops_lock; const struct backlight_ops *ops; /* The framebuffer notifier block */ struct notifier_block fb_notif; /* list entry of all registered backlight devices */ struct list_head entry; struct device dev; /* Multiple framebuffers may share one backlight device */ bool fb_bl_on[FB_MAX]; int use_count; }; static inline int backlight_update_status(struct backlight_device *bd) { int ret = -ENOENT; mutex_lock(&bd->update_lock); if (bd->ops && bd->ops->update_status) ret = bd->ops->update_status(bd); mutex_unlock(&bd->update_lock); return ret; } /** * backlight_enable - Enable backlight * @bd: the backlight device to enable */ static inline int backlight_enable(struct backlight_device *bd) { if (!bd) return 0; bd->props.power = FB_BLANK_UNBLANK; bd->props.fb_blank = FB_BLANK_UNBLANK; bd->props.state &= ~BL_CORE_FBBLANK; return backlight_update_status(bd); } /** * backlight_disable - Disable backlight * @bd: the backlight device to disable */ static inline int backlight_disable(struct backlight_device *bd) { if (!bd) return 0; bd->props.power = FB_BLANK_POWERDOWN; bd->props.fb_blank = FB_BLANK_POWERDOWN; bd->props.state |= BL_CORE_FBBLANK; return backlight_update_status(bd); } /** * backlight_put - Drop backlight reference * @bd: the backlight device to put */ static inline void backlight_put(struct backlight_device *bd) { if (bd) put_device(&bd->dev); } extern struct backlight_device *backlight_device_register(const char *name, struct device *dev, void *devdata, const struct backlight_ops *ops, const struct backlight_properties *props); extern struct backlight_device *devm_backlight_device_register( struct device *dev, const char *name, struct device *parent, void *devdata, const struct backlight_ops *ops, const struct backlight_properties *props); extern void backlight_device_unregister(struct backlight_device *bd); extern void devm_backlight_device_unregister(struct device *dev, struct backlight_device *bd); extern void backlight_force_update(struct backlight_device *bd, enum backlight_update_reason reason); extern int backlight_register_notifier(struct notifier_block *nb); extern int backlight_unregister_notifier(struct notifier_block *nb); extern struct backlight_device *backlight_device_get_by_type(enum backlight_type type); extern int backlight_device_set_brightness(struct backlight_device *bd, unsigned long brightness); #define to_backlight_device(obj) container_of(obj, struct backlight_device, dev) static inline void * bl_get_data(struct backlight_device *bl_dev) { return dev_get_drvdata(&bl_dev->dev); } struct generic_bl_info { const char *name; int max_intensity; int default_intensity; int limit_mask; void (*set_bl_intensity)(int intensity); void (*kick_battery)(void); }; #ifdef CONFIG_OF struct backlight_device *of_find_backlight_by_node(struct device_node *node); #else static inline struct backlight_device * of_find_backlight_by_node(struct device_node *node) { return NULL; } #endif #if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) struct backlight_device *of_find_backlight(struct device *dev); struct backlight_device *devm_of_find_backlight(struct device *dev); #else static inline struct backlight_device *of_find_backlight(struct device *dev) { return NULL; } static inline struct backlight_device * devm_of_find_backlight(struct device *dev) { return NULL; } #endif #endif reset.h 0000644 00000037372 14722070374 0006061 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_RESET_H_ #define _LINUX_RESET_H_ #include <linux/err.h> #include <linux/errno.h> #include <linux/types.h> struct device; struct device_node; struct reset_control; #ifdef CONFIG_RESET_CONTROLLER int reset_control_reset(struct reset_control *rstc); int reset_control_assert(struct reset_control *rstc); int reset_control_deassert(struct reset_control *rstc); int reset_control_status(struct reset_control *rstc); int reset_control_acquire(struct reset_control *rstc); void reset_control_release(struct reset_control *rstc); struct reset_control *__of_reset_control_get(struct device_node *node, const char *id, int index, bool shared, bool optional, bool acquired); struct reset_control *__reset_control_get(struct device *dev, const char *id, int index, bool shared, bool optional, bool acquired); void reset_control_put(struct reset_control *rstc); int __device_reset(struct device *dev, bool optional); struct reset_control *__devm_reset_control_get(struct device *dev, const char *id, int index, bool shared, bool optional, bool acquired); struct reset_control *devm_reset_control_array_get(struct device *dev, bool shared, bool optional); struct reset_control *of_reset_control_array_get(struct device_node *np, bool shared, bool optional, bool acquired); int reset_control_get_count(struct device *dev); #else static inline int reset_control_reset(struct reset_control *rstc) { return 0; } static inline int reset_control_assert(struct reset_control *rstc) { return 0; } static inline int reset_control_deassert(struct reset_control *rstc) { return 0; } static inline int reset_control_status(struct reset_control *rstc) { return 0; } static inline int reset_control_acquire(struct reset_control *rstc) { return 0; } static inline void reset_control_release(struct reset_control *rstc) { } static inline void reset_control_put(struct reset_control *rstc) { } static inline int __device_reset(struct device *dev, bool optional) { return optional ? 0 : -ENOTSUPP; } static inline struct reset_control *__of_reset_control_get( struct device_node *node, const char *id, int index, bool shared, bool optional, bool acquired) { return optional ? NULL : ERR_PTR(-ENOTSUPP); } static inline struct reset_control *__reset_control_get( struct device *dev, const char *id, int index, bool shared, bool optional, bool acquired) { return optional ? NULL : ERR_PTR(-ENOTSUPP); } static inline struct reset_control *__devm_reset_control_get( struct device *dev, const char *id, int index, bool shared, bool optional, bool acquired) { return optional ? NULL : ERR_PTR(-ENOTSUPP); } static inline struct reset_control * devm_reset_control_array_get(struct device *dev, bool shared, bool optional) { return optional ? NULL : ERR_PTR(-ENOTSUPP); } static inline struct reset_control * of_reset_control_array_get(struct device_node *np, bool shared, bool optional, bool acquired) { return optional ? NULL : ERR_PTR(-ENOTSUPP); } static inline int reset_control_get_count(struct device *dev) { return -ENOENT; } #endif /* CONFIG_RESET_CONTROLLER */ static inline int __must_check device_reset(struct device *dev) { return __device_reset(dev, false); } static inline int device_reset_optional(struct device *dev) { return __device_reset(dev, true); } /** * reset_control_get_exclusive - Lookup and obtain an exclusive reference * to a reset controller. * @dev: device to be reset by the controller * @id: reset line name * * Returns a struct reset_control or IS_ERR() condition containing errno. * If this function is called more than once for the same reset_control it will * return -EBUSY. * * See reset_control_get_shared() for details on shared references to * reset-controls. * * Use of id names is optional. */ static inline struct reset_control * __must_check reset_control_get_exclusive(struct device *dev, const char *id) { return __reset_control_get(dev, id, 0, false, false, true); } /** * reset_control_get_exclusive_released - Lookup and obtain a temoprarily * exclusive reference to a reset * controller. * @dev: device to be reset by the controller * @id: reset line name * * Returns a struct reset_control or IS_ERR() condition containing errno. * reset-controls returned by this function must be acquired via * reset_control_acquire() before they can be used and should be released * via reset_control_release() afterwards. * * Use of id names is optional. */ static inline struct reset_control * __must_check reset_control_get_exclusive_released(struct device *dev, const char *id) { return __reset_control_get(dev, id, 0, false, false, false); } /** * reset_control_get_shared - Lookup and obtain a shared reference to a * reset controller. * @dev: device to be reset by the controller * @id: reset line name * * Returns a struct reset_control or IS_ERR() condition containing errno. * This function is intended for use with reset-controls which are shared * between hardware blocks. * * When a reset-control is shared, the behavior of reset_control_assert / * deassert is changed, the reset-core will keep track of a deassert_count * and only (re-)assert the reset after reset_control_assert has been called * as many times as reset_control_deassert was called. Also see the remark * about shared reset-controls in the reset_control_assert docs. * * Calling reset_control_assert without first calling reset_control_deassert * is not allowed on a shared reset control. Calling reset_control_reset is * also not allowed on a shared reset control. * * Use of id names is optional. */ static inline struct reset_control *reset_control_get_shared( struct device *dev, const char *id) { return __reset_control_get(dev, id, 0, true, false, false); } static inline struct reset_control *reset_control_get_optional_exclusive( struct device *dev, const char *id) { return __reset_control_get(dev, id, 0, false, true, true); } static inline struct reset_control *reset_control_get_optional_shared( struct device *dev, const char *id) { return __reset_control_get(dev, id, 0, true, true, false); } /** * of_reset_control_get_exclusive - Lookup and obtain an exclusive reference * to a reset controller. * @node: device to be reset by the controller * @id: reset line name * * Returns a struct reset_control or IS_ERR() condition containing errno. * * Use of id names is optional. */ static inline struct reset_control *of_reset_control_get_exclusive( struct device_node *node, const char *id) { return __of_reset_control_get(node, id, 0, false, false, true); } /** * of_reset_control_get_shared - Lookup and obtain a shared reference * to a reset controller. * @node: device to be reset by the controller * @id: reset line name * * When a reset-control is shared, the behavior of reset_control_assert / * deassert is changed, the reset-core will keep track of a deassert_count * and only (re-)assert the reset after reset_control_assert has been called * as many times as reset_control_deassert was called. Also see the remark * about shared reset-controls in the reset_control_assert docs. * * Calling reset_control_assert without first calling reset_control_deassert * is not allowed on a shared reset control. Calling reset_control_reset is * also not allowed on a shared reset control. * Returns a struct reset_control or IS_ERR() condition containing errno. * * Use of id names is optional. */ static inline struct reset_control *of_reset_control_get_shared( struct device_node *node, const char *id) { return __of_reset_control_get(node, id, 0, true, false, false); } /** * of_reset_control_get_exclusive_by_index - Lookup and obtain an exclusive * reference to a reset controller * by index. * @node: device to be reset by the controller * @index: index of the reset controller * * This is to be used to perform a list of resets for a device or power domain * in whatever order. Returns a struct reset_control or IS_ERR() condition * containing errno. */ static inline struct reset_control *of_reset_control_get_exclusive_by_index( struct device_node *node, int index) { return __of_reset_control_get(node, NULL, index, false, false, true); } /** * of_reset_control_get_shared_by_index - Lookup and obtain a shared * reference to a reset controller * by index. * @node: device to be reset by the controller * @index: index of the reset controller * * When a reset-control is shared, the behavior of reset_control_assert / * deassert is changed, the reset-core will keep track of a deassert_count * and only (re-)assert the reset after reset_control_assert has been called * as many times as reset_control_deassert was called. Also see the remark * about shared reset-controls in the reset_control_assert docs. * * Calling reset_control_assert without first calling reset_control_deassert * is not allowed on a shared reset control. Calling reset_control_reset is * also not allowed on a shared reset control. * Returns a struct reset_control or IS_ERR() condition containing errno. * * This is to be used to perform a list of resets for a device or power domain * in whatever order. Returns a struct reset_control or IS_ERR() condition * containing errno. */ static inline struct reset_control *of_reset_control_get_shared_by_index( struct device_node *node, int index) { return __of_reset_control_get(node, NULL, index, true, false, false); } /** * devm_reset_control_get_exclusive - resource managed * reset_control_get_exclusive() * @dev: device to be reset by the controller * @id: reset line name * * Managed reset_control_get_exclusive(). For reset controllers returned * from this function, reset_control_put() is called automatically on driver * detach. * * See reset_control_get_exclusive() for more information. */ static inline struct reset_control * __must_check devm_reset_control_get_exclusive(struct device *dev, const char *id) { return __devm_reset_control_get(dev, id, 0, false, false, true); } /** * devm_reset_control_get_exclusive_released - resource managed * reset_control_get_exclusive_released() * @dev: device to be reset by the controller * @id: reset line name * * Managed reset_control_get_exclusive_released(). For reset controllers * returned from this function, reset_control_put() is called automatically on * driver detach. * * See reset_control_get_exclusive_released() for more information. */ static inline struct reset_control * __must_check devm_reset_control_get_exclusive_released(struct device *dev, const char *id) { return __devm_reset_control_get(dev, id, 0, false, false, false); } /** * devm_reset_control_get_shared - resource managed reset_control_get_shared() * @dev: device to be reset by the controller * @id: reset line name * * Managed reset_control_get_shared(). For reset controllers returned from * this function, reset_control_put() is called automatically on driver detach. * See reset_control_get_shared() for more information. */ static inline struct reset_control *devm_reset_control_get_shared( struct device *dev, const char *id) { return __devm_reset_control_get(dev, id, 0, true, false, false); } static inline struct reset_control *devm_reset_control_get_optional_exclusive( struct device *dev, const char *id) { return __devm_reset_control_get(dev, id, 0, false, true, true); } static inline struct reset_control *devm_reset_control_get_optional_shared( struct device *dev, const char *id) { return __devm_reset_control_get(dev, id, 0, true, true, false); } /** * devm_reset_control_get_exclusive_by_index - resource managed * reset_control_get_exclusive() * @dev: device to be reset by the controller * @index: index of the reset controller * * Managed reset_control_get_exclusive(). For reset controllers returned from * this function, reset_control_put() is called automatically on driver * detach. * * See reset_control_get_exclusive() for more information. */ static inline struct reset_control * devm_reset_control_get_exclusive_by_index(struct device *dev, int index) { return __devm_reset_control_get(dev, NULL, index, false, false, true); } /** * devm_reset_control_get_shared_by_index - resource managed * reset_control_get_shared * @dev: device to be reset by the controller * @index: index of the reset controller * * Managed reset_control_get_shared(). For reset controllers returned from * this function, reset_control_put() is called automatically on driver detach. * See reset_control_get_shared() for more information. */ static inline struct reset_control * devm_reset_control_get_shared_by_index(struct device *dev, int index) { return __devm_reset_control_get(dev, NULL, index, true, false, false); } /* * TEMPORARY calls to use during transition: * * of_reset_control_get() => of_reset_control_get_exclusive() * * These inline function calls will be removed once all consumers * have been moved over to the new explicit API. */ static inline struct reset_control *of_reset_control_get( struct device_node *node, const char *id) { return of_reset_control_get_exclusive(node, id); } static inline struct reset_control *of_reset_control_get_by_index( struct device_node *node, int index) { return of_reset_control_get_exclusive_by_index(node, index); } static inline struct reset_control *devm_reset_control_get( struct device *dev, const char *id) { return devm_reset_control_get_exclusive(dev, id); } static inline struct reset_control *devm_reset_control_get_optional( struct device *dev, const char *id) { return devm_reset_control_get_optional_exclusive(dev, id); } static inline struct reset_control *devm_reset_control_get_by_index( struct device *dev, int index) { return devm_reset_control_get_exclusive_by_index(dev, index); } /* * APIs to manage a list of reset controllers */ static inline struct reset_control * devm_reset_control_array_get_exclusive(struct device *dev) { return devm_reset_control_array_get(dev, false, false); } static inline struct reset_control * devm_reset_control_array_get_shared(struct device *dev) { return devm_reset_control_array_get(dev, true, false); } static inline struct reset_control * devm_reset_control_array_get_optional_exclusive(struct device *dev) { return devm_reset_control_array_get(dev, false, true); } static inline struct reset_control * devm_reset_control_array_get_optional_shared(struct device *dev) { return devm_reset_control_array_get(dev, true, true); } static inline struct reset_control * of_reset_control_array_get_exclusive(struct device_node *node) { return of_reset_control_array_get(node, false, false, true); } static inline struct reset_control * of_reset_control_array_get_exclusive_released(struct device_node *node) { return of_reset_control_array_get(node, false, false, false); } static inline struct reset_control * of_reset_control_array_get_shared(struct device_node *node) { return of_reset_control_array_get(node, true, false, true); } static inline struct reset_control * of_reset_control_array_get_optional_exclusive(struct device_node *node) { return of_reset_control_array_get(node, false, true, true); } static inline struct reset_control * of_reset_control_array_get_optional_shared(struct device_node *node) { return of_reset_control_array_get(node, true, true, true); } #endif hw_breakpoint.h 0000644 00000010146 14722070374 0007561 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_HW_BREAKPOINT_H #define _LINUX_HW_BREAKPOINT_H #include <linux/perf_event.h> #include <uapi/linux/hw_breakpoint.h> #ifdef CONFIG_HAVE_HW_BREAKPOINT extern int __init init_hw_breakpoint(void); static inline void hw_breakpoint_init(struct perf_event_attr *attr) { memset(attr, 0, sizeof(*attr)); attr->type = PERF_TYPE_BREAKPOINT; attr->size = sizeof(*attr); /* * As it's for in-kernel or ptrace use, we want it to be pinned * and to call its callback every hits. */ attr->pinned = 1; attr->sample_period = 1; } static inline void ptrace_breakpoint_init(struct perf_event_attr *attr) { hw_breakpoint_init(attr); attr->exclude_kernel = 1; } static inline unsigned long hw_breakpoint_addr(struct perf_event *bp) { return bp->attr.bp_addr; } static inline int hw_breakpoint_type(struct perf_event *bp) { return bp->attr.bp_type; } static inline unsigned long hw_breakpoint_len(struct perf_event *bp) { return bp->attr.bp_len; } extern struct perf_event * register_user_hw_breakpoint(struct perf_event_attr *attr, perf_overflow_handler_t triggered, void *context, struct task_struct *tsk); /* FIXME: only change from the attr, and don't unregister */ extern int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr); extern int modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr, bool check); /* * Kernel breakpoints are not associated with any particular thread. */ extern struct perf_event * register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr, perf_overflow_handler_t triggered, void *context, int cpu); extern struct perf_event * __percpu * register_wide_hw_breakpoint(struct perf_event_attr *attr, perf_overflow_handler_t triggered, void *context); extern int register_perf_hw_breakpoint(struct perf_event *bp); extern int __register_perf_hw_breakpoint(struct perf_event *bp); extern void unregister_hw_breakpoint(struct perf_event *bp); extern void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events); extern int dbg_reserve_bp_slot(struct perf_event *bp); extern int dbg_release_bp_slot(struct perf_event *bp); extern int reserve_bp_slot(struct perf_event *bp); extern void release_bp_slot(struct perf_event *bp); extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk); static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp) { return &bp->hw.info; } #else /* !CONFIG_HAVE_HW_BREAKPOINT */ static inline int __init init_hw_breakpoint(void) { return 0; } static inline struct perf_event * register_user_hw_breakpoint(struct perf_event_attr *attr, perf_overflow_handler_t triggered, void *context, struct task_struct *tsk) { return NULL; } static inline int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) { return -ENOSYS; } static inline int modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr, bool check) { return -ENOSYS; } static inline struct perf_event * register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr, perf_overflow_handler_t triggered, void *context, int cpu) { return NULL; } static inline struct perf_event * __percpu * register_wide_hw_breakpoint(struct perf_event_attr *attr, perf_overflow_handler_t triggered, void *context) { return NULL; } static inline int register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; } static inline int __register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; } static inline void unregister_hw_breakpoint(struct perf_event *bp) { } static inline void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events) { } static inline int reserve_bp_slot(struct perf_event *bp) {return -ENOSYS; } static inline void release_bp_slot(struct perf_event *bp) { } static inline void flush_ptrace_hw_breakpoint(struct task_struct *tsk) { } static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp) { return NULL; } #endif /* CONFIG_HAVE_HW_BREAKPOINT */ #endif /* _LINUX_HW_BREAKPOINT_H */ mISDNif.h 0000644 00000035646 14722070374 0006172 0 ustar 00 /* * * Author Karsten Keil <kkeil@novell.com> * * Copyright 2008 by Karsten Keil <kkeil@novell.com> * * This code is free software; you can redistribute it and/or modify * it under the terms of the GNU LESSER GENERAL PUBLIC LICENSE * version 2.1 as published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU LESSER GENERAL PUBLIC LICENSE for more details. * */ #ifndef mISDNIF_H #define mISDNIF_H #include <stdarg.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/socket.h> /* * ABI Version 32 bit * * <8 bit> Major version * - changed if any interface become backwards incompatible * * <8 bit> Minor version * - changed if any interface is extended but backwards compatible * * <16 bit> Release number * - should be incremented on every checkin */ #define MISDN_MAJOR_VERSION 1 #define MISDN_MINOR_VERSION 1 #define MISDN_RELEASE 29 /* primitives for information exchange * generell format * <16 bit 0 > * <8 bit command> * BIT 8 = 1 LAYER private * BIT 7 = 1 answer * BIT 6 = 1 DATA * <8 bit target layer mask> * * Layer = 00 is reserved for general commands Layer = 01 L2 -> HW Layer = 02 HW -> L2 Layer = 04 L3 -> L2 Layer = 08 L2 -> L3 * Layer = FF is reserved for broadcast commands */ #define MISDN_CMDMASK 0xff00 #define MISDN_LAYERMASK 0x00ff /* generell commands */ #define OPEN_CHANNEL 0x0100 #define CLOSE_CHANNEL 0x0200 #define CONTROL_CHANNEL 0x0300 #define CHECK_DATA 0x0400 /* layer 2 -> layer 1 */ #define PH_ACTIVATE_REQ 0x0101 #define PH_DEACTIVATE_REQ 0x0201 #define PH_DATA_REQ 0x2001 #define MPH_ACTIVATE_REQ 0x0501 #define MPH_DEACTIVATE_REQ 0x0601 #define MPH_INFORMATION_REQ 0x0701 #define PH_CONTROL_REQ 0x0801 /* layer 1 -> layer 2 */ #define PH_ACTIVATE_IND 0x0102 #define PH_ACTIVATE_CNF 0x4102 #define PH_DEACTIVATE_IND 0x0202 #define PH_DEACTIVATE_CNF 0x4202 #define PH_DATA_IND 0x2002 #define PH_DATA_E_IND 0x3002 #define MPH_ACTIVATE_IND 0x0502 #define MPH_DEACTIVATE_IND 0x0602 #define MPH_INFORMATION_IND 0x0702 #define PH_DATA_CNF 0x6002 #define PH_CONTROL_IND 0x0802 #define PH_CONTROL_CNF 0x4802 /* layer 3 -> layer 2 */ #define DL_ESTABLISH_REQ 0x1004 #define DL_RELEASE_REQ 0x1104 #define DL_DATA_REQ 0x3004 #define DL_UNITDATA_REQ 0x3104 #define DL_INFORMATION_REQ 0x0004 /* layer 2 -> layer 3 */ #define DL_ESTABLISH_IND 0x1008 #define DL_ESTABLISH_CNF 0x5008 #define DL_RELEASE_IND 0x1108 #define DL_RELEASE_CNF 0x5108 #define DL_DATA_IND 0x3008 #define DL_UNITDATA_IND 0x3108 #define DL_INFORMATION_IND 0x0008 /* intern layer 2 management */ #define MDL_ASSIGN_REQ 0x1804 #define MDL_ASSIGN_IND 0x1904 #define MDL_REMOVE_REQ 0x1A04 #define MDL_REMOVE_IND 0x1B04 #define MDL_STATUS_UP_IND 0x1C04 #define MDL_STATUS_DOWN_IND 0x1D04 #define MDL_STATUS_UI_IND 0x1E04 #define MDL_ERROR_IND 0x1F04 #define MDL_ERROR_RSP 0x5F04 /* intern layer 2 */ #define DL_TIMER200_IND 0x7004 #define DL_TIMER203_IND 0x7304 #define DL_INTERN_MSG 0x7804 /* DL_INFORMATION_IND types */ #define DL_INFO_L2_CONNECT 0x0001 #define DL_INFO_L2_REMOVED 0x0002 /* PH_CONTROL types */ /* TOUCH TONE IS 0x20XX XX "0"..."9", "A","B","C","D","*","#" */ #define DTMF_TONE_VAL 0x2000 #define DTMF_TONE_MASK 0x007F #define DTMF_TONE_START 0x2100 #define DTMF_TONE_STOP 0x2200 #define DTMF_HFC_COEF 0x4000 #define DSP_CONF_JOIN 0x2403 #define DSP_CONF_SPLIT 0x2404 #define DSP_RECEIVE_OFF 0x2405 #define DSP_RECEIVE_ON 0x2406 #define DSP_ECHO_ON 0x2407 #define DSP_ECHO_OFF 0x2408 #define DSP_MIX_ON 0x2409 #define DSP_MIX_OFF 0x240a #define DSP_DELAY 0x240b #define DSP_JITTER 0x240c #define DSP_TXDATA_ON 0x240d #define DSP_TXDATA_OFF 0x240e #define DSP_TX_DEJITTER 0x240f #define DSP_TX_DEJ_OFF 0x2410 #define DSP_TONE_PATT_ON 0x2411 #define DSP_TONE_PATT_OFF 0x2412 #define DSP_VOL_CHANGE_TX 0x2413 #define DSP_VOL_CHANGE_RX 0x2414 #define DSP_BF_ENABLE_KEY 0x2415 #define DSP_BF_DISABLE 0x2416 #define DSP_BF_ACCEPT 0x2416 #define DSP_BF_REJECT 0x2417 #define DSP_PIPELINE_CFG 0x2418 #define HFC_VOL_CHANGE_TX 0x2601 #define HFC_VOL_CHANGE_RX 0x2602 #define HFC_SPL_LOOP_ON 0x2603 #define HFC_SPL_LOOP_OFF 0x2604 /* for T30 FAX and analog modem */ #define HW_MOD_FRM 0x4000 #define HW_MOD_FRH 0x4001 #define HW_MOD_FTM 0x4002 #define HW_MOD_FTH 0x4003 #define HW_MOD_FTS 0x4004 #define HW_MOD_CONNECT 0x4010 #define HW_MOD_OK 0x4011 #define HW_MOD_NOCARR 0x4012 #define HW_MOD_FCERROR 0x4013 #define HW_MOD_READY 0x4014 #define HW_MOD_LASTDATA 0x4015 /* DSP_TONE_PATT_ON parameter */ #define TONE_OFF 0x0000 #define TONE_GERMAN_DIALTONE 0x0001 #define TONE_GERMAN_OLDDIALTONE 0x0002 #define TONE_AMERICAN_DIALTONE 0x0003 #define TONE_GERMAN_DIALPBX 0x0004 #define TONE_GERMAN_OLDDIALPBX 0x0005 #define TONE_AMERICAN_DIALPBX 0x0006 #define TONE_GERMAN_RINGING 0x0007 #define TONE_GERMAN_OLDRINGING 0x0008 #define TONE_AMERICAN_RINGPBX 0x000b #define TONE_GERMAN_RINGPBX 0x000c #define TONE_GERMAN_OLDRINGPBX 0x000d #define TONE_AMERICAN_RINGING 0x000e #define TONE_GERMAN_BUSY 0x000f #define TONE_GERMAN_OLDBUSY 0x0010 #define TONE_AMERICAN_BUSY 0x0011 #define TONE_GERMAN_HANGUP 0x0012 #define TONE_GERMAN_OLDHANGUP 0x0013 #define TONE_AMERICAN_HANGUP 0x0014 #define TONE_SPECIAL_INFO 0x0015 #define TONE_GERMAN_GASSENBESETZT 0x0016 #define TONE_GERMAN_AUFSCHALTTON 0x0016 /* MPH_INFORMATION_IND */ #define L1_SIGNAL_LOS_OFF 0x0010 #define L1_SIGNAL_LOS_ON 0x0011 #define L1_SIGNAL_AIS_OFF 0x0012 #define L1_SIGNAL_AIS_ON 0x0013 #define L1_SIGNAL_RDI_OFF 0x0014 #define L1_SIGNAL_RDI_ON 0x0015 #define L1_SIGNAL_SLIP_RX 0x0020 #define L1_SIGNAL_SLIP_TX 0x0021 /* * protocol ids * D channel 1-31 * B channel 33 - 63 */ #define ISDN_P_NONE 0 #define ISDN_P_BASE 0 #define ISDN_P_TE_S0 0x01 #define ISDN_P_NT_S0 0x02 #define ISDN_P_TE_E1 0x03 #define ISDN_P_NT_E1 0x04 #define ISDN_P_TE_UP0 0x05 #define ISDN_P_NT_UP0 0x06 #define IS_ISDN_P_TE(p) ((p == ISDN_P_TE_S0) || (p == ISDN_P_TE_E1) || \ (p == ISDN_P_TE_UP0) || (p == ISDN_P_LAPD_TE)) #define IS_ISDN_P_NT(p) ((p == ISDN_P_NT_S0) || (p == ISDN_P_NT_E1) || \ (p == ISDN_P_NT_UP0) || (p == ISDN_P_LAPD_NT)) #define IS_ISDN_P_S0(p) ((p == ISDN_P_TE_S0) || (p == ISDN_P_NT_S0)) #define IS_ISDN_P_E1(p) ((p == ISDN_P_TE_E1) || (p == ISDN_P_NT_E1)) #define IS_ISDN_P_UP0(p) ((p == ISDN_P_TE_UP0) || (p == ISDN_P_NT_UP0)) #define ISDN_P_LAPD_TE 0x10 #define ISDN_P_LAPD_NT 0x11 #define ISDN_P_B_MASK 0x1f #define ISDN_P_B_START 0x20 #define ISDN_P_B_RAW 0x21 #define ISDN_P_B_HDLC 0x22 #define ISDN_P_B_X75SLP 0x23 #define ISDN_P_B_L2DTMF 0x24 #define ISDN_P_B_L2DSP 0x25 #define ISDN_P_B_L2DSPHDLC 0x26 #define ISDN_P_B_T30_FAX 0x27 #define ISDN_P_B_MODEM_ASYNC 0x28 #define OPTION_L2_PMX 1 #define OPTION_L2_PTP 2 #define OPTION_L2_FIXEDTEI 3 #define OPTION_L2_CLEANUP 4 #define OPTION_L1_HOLD 5 /* should be in sync with linux/kobject.h:KOBJ_NAME_LEN */ #define MISDN_MAX_IDLEN 20 struct mISDNhead { unsigned int prim; unsigned int id; } __packed; #define MISDN_HEADER_LEN sizeof(struct mISDNhead) #define MAX_DATA_SIZE 2048 #define MAX_DATA_MEM (MAX_DATA_SIZE + MISDN_HEADER_LEN) #define MAX_DFRAME_LEN 260 #define MISDN_ID_ADDR_MASK 0xFFFF #define MISDN_ID_TEI_MASK 0xFF00 #define MISDN_ID_SAPI_MASK 0x00FF #define MISDN_ID_TEI_ANY 0x7F00 #define MISDN_ID_ANY 0xFFFF #define MISDN_ID_NONE 0xFFFE #define GROUP_TEI 127 #define TEI_SAPI 63 #define CTRL_SAPI 0 #define MISDN_MAX_CHANNEL 127 #define MISDN_CHMAP_SIZE ((MISDN_MAX_CHANNEL + 1) >> 3) #define SOL_MISDN 0 struct sockaddr_mISDN { sa_family_t family; unsigned char dev; unsigned char channel; unsigned char sapi; unsigned char tei; }; struct mISDNversion { unsigned char major; unsigned char minor; unsigned short release; }; struct mISDN_devinfo { u_int id; u_int Dprotocols; u_int Bprotocols; u_int protocol; u_char channelmap[MISDN_CHMAP_SIZE]; u_int nrbchan; char name[MISDN_MAX_IDLEN]; }; struct mISDN_devrename { u_int id; char name[MISDN_MAX_IDLEN]; /* new name */ }; /* MPH_INFORMATION_REQ payload */ struct ph_info_ch { __u32 protocol; __u64 Flags; }; struct ph_info_dch { struct ph_info_ch ch; __u16 state; __u16 num_bch; }; struct ph_info { struct ph_info_dch dch; struct ph_info_ch bch[]; }; /* timer device ioctl */ #define IMADDTIMER _IOR('I', 64, int) #define IMDELTIMER _IOR('I', 65, int) /* socket ioctls */ #define IMGETVERSION _IOR('I', 66, int) #define IMGETCOUNT _IOR('I', 67, int) #define IMGETDEVINFO _IOR('I', 68, int) #define IMCTRLREQ _IOR('I', 69, int) #define IMCLEAR_L2 _IOR('I', 70, int) #define IMSETDEVNAME _IOR('I', 71, struct mISDN_devrename) #define IMHOLD_L1 _IOR('I', 72, int) static inline int test_channelmap(u_int nr, u_char *map) { if (nr <= MISDN_MAX_CHANNEL) return map[nr >> 3] & (1 << (nr & 7)); else return 0; } static inline void set_channelmap(u_int nr, u_char *map) { map[nr >> 3] |= (1 << (nr & 7)); } static inline void clear_channelmap(u_int nr, u_char *map) { map[nr >> 3] &= ~(1 << (nr & 7)); } /* CONTROL_CHANNEL parameters */ #define MISDN_CTRL_GETOP 0x0000 #define MISDN_CTRL_LOOP 0x0001 #define MISDN_CTRL_CONNECT 0x0002 #define MISDN_CTRL_DISCONNECT 0x0004 #define MISDN_CTRL_RX_BUFFER 0x0008 #define MISDN_CTRL_PCMCONNECT 0x0010 #define MISDN_CTRL_PCMDISCONNECT 0x0020 #define MISDN_CTRL_SETPEER 0x0040 #define MISDN_CTRL_UNSETPEER 0x0080 #define MISDN_CTRL_RX_OFF 0x0100 #define MISDN_CTRL_FILL_EMPTY 0x0200 #define MISDN_CTRL_GETPEER 0x0400 #define MISDN_CTRL_L1_TIMER3 0x0800 #define MISDN_CTRL_HW_FEATURES_OP 0x2000 #define MISDN_CTRL_HW_FEATURES 0x2001 #define MISDN_CTRL_HFC_OP 0x4000 #define MISDN_CTRL_HFC_PCM_CONN 0x4001 #define MISDN_CTRL_HFC_PCM_DISC 0x4002 #define MISDN_CTRL_HFC_CONF_JOIN 0x4003 #define MISDN_CTRL_HFC_CONF_SPLIT 0x4004 #define MISDN_CTRL_HFC_RECEIVE_OFF 0x4005 #define MISDN_CTRL_HFC_RECEIVE_ON 0x4006 #define MISDN_CTRL_HFC_ECHOCAN_ON 0x4007 #define MISDN_CTRL_HFC_ECHOCAN_OFF 0x4008 #define MISDN_CTRL_HFC_WD_INIT 0x4009 #define MISDN_CTRL_HFC_WD_RESET 0x400A /* special RX buffer value for MISDN_CTRL_RX_BUFFER request.p1 is the minimum * buffer size request.p2 the maximum. Using MISDN_CTRL_RX_SIZE_IGNORE will * not change the value, but still read back the actual stetting. */ #define MISDN_CTRL_RX_SIZE_IGNORE -1 /* socket options */ #define MISDN_TIME_STAMP 0x0001 struct mISDN_ctrl_req { int op; int channel; int p1; int p2; }; /* muxer options */ #define MISDN_OPT_ALL 1 #define MISDN_OPT_TEIMGR 2 #ifdef __KERNEL__ #include <linux/list.h> #include <linux/skbuff.h> #include <linux/net.h> #include <net/sock.h> #include <linux/completion.h> #define DEBUG_CORE 0x000000ff #define DEBUG_CORE_FUNC 0x00000002 #define DEBUG_SOCKET 0x00000004 #define DEBUG_MANAGER 0x00000008 #define DEBUG_SEND_ERR 0x00000010 #define DEBUG_MSG_THREAD 0x00000020 #define DEBUG_QUEUE_FUNC 0x00000040 #define DEBUG_L1 0x0000ff00 #define DEBUG_L1_FSM 0x00000200 #define DEBUG_L2 0x00ff0000 #define DEBUG_L2_FSM 0x00020000 #define DEBUG_L2_CTRL 0x00040000 #define DEBUG_L2_RECV 0x00080000 #define DEBUG_L2_TEI 0x00100000 #define DEBUG_L2_TEIFSM 0x00200000 #define DEBUG_TIMER 0x01000000 #define DEBUG_CLOCK 0x02000000 #define mISDN_HEAD_P(s) ((struct mISDNhead *)&s->cb[0]) #define mISDN_HEAD_PRIM(s) (((struct mISDNhead *)&s->cb[0])->prim) #define mISDN_HEAD_ID(s) (((struct mISDNhead *)&s->cb[0])->id) /* socket states */ #define MISDN_OPEN 1 #define MISDN_BOUND 2 #define MISDN_CLOSED 3 struct mISDNchannel; struct mISDNdevice; struct mISDNstack; struct mISDNclock; struct channel_req { u_int protocol; struct sockaddr_mISDN adr; struct mISDNchannel *ch; }; typedef int (ctrl_func_t)(struct mISDNchannel *, u_int, void *); typedef int (send_func_t)(struct mISDNchannel *, struct sk_buff *); typedef int (create_func_t)(struct channel_req *); struct Bprotocol { struct list_head list; char *name; u_int Bprotocols; create_func_t *create; }; struct mISDNchannel { struct list_head list; u_int protocol; u_int nr; u_long opt; u_int addr; struct mISDNstack *st; struct mISDNchannel *peer; send_func_t *send; send_func_t *recv; ctrl_func_t *ctrl; }; struct mISDN_sock_list { struct hlist_head head; rwlock_t lock; }; struct mISDN_sock { struct sock sk; struct mISDNchannel ch; u_int cmask; struct mISDNdevice *dev; }; struct mISDNdevice { struct mISDNchannel D; u_int id; u_int Dprotocols; u_int Bprotocols; u_int nrbchan; u_char channelmap[MISDN_CHMAP_SIZE]; struct list_head bchannels; struct mISDNchannel *teimgr; struct device dev; }; struct mISDNstack { u_long status; struct mISDNdevice *dev; struct task_struct *thread; struct completion *notify; wait_queue_head_t workq; struct sk_buff_head msgq; struct list_head layer2; struct mISDNchannel *layer1; struct mISDNchannel own; struct mutex lmutex; /* protect lists */ struct mISDN_sock_list l1sock; #ifdef MISDN_MSG_STATS u_int msg_cnt; u_int sleep_cnt; u_int stopped_cnt; #endif }; typedef int (clockctl_func_t)(void *, int); struct mISDNclock { struct list_head list; char name[64]; int pri; clockctl_func_t *ctl; void *priv; }; /* global alloc/queue functions */ static inline struct sk_buff * mI_alloc_skb(unsigned int len, gfp_t gfp_mask) { struct sk_buff *skb; skb = alloc_skb(len + MISDN_HEADER_LEN, gfp_mask); if (likely(skb)) skb_reserve(skb, MISDN_HEADER_LEN); return skb; } static inline struct sk_buff * _alloc_mISDN_skb(u_int prim, u_int id, u_int len, void *dp, gfp_t gfp_mask) { struct sk_buff *skb = mI_alloc_skb(len, gfp_mask); struct mISDNhead *hh; if (!skb) return NULL; if (len) skb_put_data(skb, dp, len); hh = mISDN_HEAD_P(skb); hh->prim = prim; hh->id = id; return skb; } static inline void _queue_data(struct mISDNchannel *ch, u_int prim, u_int id, u_int len, void *dp, gfp_t gfp_mask) { struct sk_buff *skb; if (!ch->peer) return; skb = _alloc_mISDN_skb(prim, id, len, dp, gfp_mask); if (!skb) return; if (ch->recv(ch->peer, skb)) dev_kfree_skb(skb); } /* global register/unregister functions */ extern int mISDN_register_device(struct mISDNdevice *, struct device *parent, char *name); extern void mISDN_unregister_device(struct mISDNdevice *); extern int mISDN_register_Bprotocol(struct Bprotocol *); extern void mISDN_unregister_Bprotocol(struct Bprotocol *); extern struct mISDNclock *mISDN_register_clock(char *, int, clockctl_func_t *, void *); extern void mISDN_unregister_clock(struct mISDNclock *); static inline struct mISDNdevice *dev_to_mISDN(struct device *dev) { if (dev) return dev_get_drvdata(dev); else return NULL; } extern void set_channel_address(struct mISDNchannel *, u_int, u_int); extern void mISDN_clock_update(struct mISDNclock *, int, ktime_t *); extern unsigned short mISDN_clock_get(void); extern const char *mISDNDevName4ch(struct mISDNchannel *); #endif /* __KERNEL__ */ #endif /* mISDNIF_H */ marvell_phy.h 0000644 00000002637 14722070374 0007255 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _MARVELL_PHY_H #define _MARVELL_PHY_H /* Mask used for ID comparisons */ #define MARVELL_PHY_ID_MASK 0xfffffff0 /* Known PHY IDs */ #define MARVELL_PHY_ID_88E1101 0x01410c60 #define MARVELL_PHY_ID_88E1112 0x01410c90 #define MARVELL_PHY_ID_88E1111 0x01410cc0 #define MARVELL_PHY_ID_88E1118 0x01410e10 #define MARVELL_PHY_ID_88E1121R 0x01410cb0 #define MARVELL_PHY_ID_88E1145 0x01410cd0 #define MARVELL_PHY_ID_88E1149R 0x01410e50 #define MARVELL_PHY_ID_88E1240 0x01410e30 #define MARVELL_PHY_ID_88E1318S 0x01410e90 #define MARVELL_PHY_ID_88E1116R 0x01410e40 #define MARVELL_PHY_ID_88E1510 0x01410dd0 #define MARVELL_PHY_ID_88E1540 0x01410eb0 #define MARVELL_PHY_ID_88E1545 0x01410ea0 #define MARVELL_PHY_ID_88E3016 0x01410e60 #define MARVELL_PHY_ID_88X3310 0x002b09a0 #define MARVELL_PHY_ID_88E2110 0x002b09b0 /* These Ethernet switch families contain embedded PHYs, but they do * not have a model ID. So the switch driver traps reads to the ID2 * register and returns the switch family ID */ #define MARVELL_PHY_ID_88E6341_FAMILY 0x01410f41 #define MARVELL_PHY_ID_88E6390_FAMILY 0x01410f90 #define MARVELL_PHY_FAMILY_ID(id) ((id) >> 4) /* struct phy_device dev_flags definitions */ #define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001 #define MARVELL_PHY_M1118_DNS323_LEDS 0x00000002 #define MARVELL_PHY_LED0_LINK_LED1_ACTIVE 0x00000004 #endif /* _MARVELL_PHY_H */ patchkey.h 0000644 00000001365 14722070374 0006540 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * <linux/patchkey.h> -- definition of _PATCHKEY macro * * Copyright (C) 2005 Stuart Brady * * This exists because awe_voice.h defined its own _PATCHKEY and it wasn't * clear whether removing this would break anything in userspace. * * Do not include this file directly. Please use <sys/soundcard.h> instead. * For kernel code, use <linux/soundcard.h> */ #ifndef _LINUX_PATCHKEY_H #define _LINUX_PATCHKEY_H # include <asm/byteorder.h> #include <uapi/linux/patchkey.h> # if defined(__BIG_ENDIAN) # define _PATCHKEY(id) (0xfd00|id) # elif defined(__LITTLE_ENDIAN) # define _PATCHKEY(id) ((id<<8)|0x00fd) # else # error "could not determine byte order" # endif #endif /* _LINUX_PATCHKEY_H */ alarmtimer.h 0000644 00000003523 14722070374 0007063 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_ALARMTIMER_H #define _LINUX_ALARMTIMER_H #include <linux/time.h> #include <linux/hrtimer.h> #include <linux/timerqueue.h> struct rtc_device; enum alarmtimer_type { ALARM_REALTIME, ALARM_BOOTTIME, /* Supported types end here */ ALARM_NUMTYPE, /* Used for tracing information. No usable types. */ ALARM_REALTIME_FREEZER, ALARM_BOOTTIME_FREEZER, }; enum alarmtimer_restart { ALARMTIMER_NORESTART, ALARMTIMER_RESTART, }; #define ALARMTIMER_STATE_INACTIVE 0x00 #define ALARMTIMER_STATE_ENQUEUED 0x01 /** * struct alarm - Alarm timer structure * @node: timerqueue node for adding to the event list this value * also includes the expiration time. * @timer: hrtimer used to schedule events while running * @function: Function pointer to be executed when the timer fires. * @type: Alarm type (BOOTTIME/REALTIME). * @state: Flag that represents if the alarm is set to fire or not. * @data: Internal data value. */ struct alarm { struct timerqueue_node node; struct hrtimer timer; enum alarmtimer_restart (*function)(struct alarm *, ktime_t now); enum alarmtimer_type type; int state; void *data; }; void alarm_init(struct alarm *alarm, enum alarmtimer_type type, enum alarmtimer_restart (*function)(struct alarm *, ktime_t)); void alarm_start(struct alarm *alarm, ktime_t start); void alarm_start_relative(struct alarm *alarm, ktime_t start); void alarm_restart(struct alarm *alarm); int alarm_try_to_cancel(struct alarm *alarm); int alarm_cancel(struct alarm *alarm); u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval); u64 alarm_forward_now(struct alarm *alarm, ktime_t interval); ktime_t alarm_expires_remaining(const struct alarm *alarm); /* Provide way to access the rtc device being used by alarmtimers */ struct rtc_device *alarmtimer_get_rtcdev(void); #endif nfsacl.h 0000644 00000002230 14722070374 0006166 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * File: linux/nfsacl.h * * (C) 2003 Andreas Gruenbacher <agruen@suse.de> */ #ifndef __LINUX_NFSACL_H #define __LINUX_NFSACL_H #include <linux/posix_acl.h> #include <linux/sunrpc/xdr.h> #include <uapi/linux/nfsacl.h> /* Maximum number of ACL entries over NFS */ #define NFS_ACL_MAX_ENTRIES 1024 #define NFSACL_MAXWORDS (2*(2+3*NFS_ACL_MAX_ENTRIES)) #define NFSACL_MAXPAGES ((2*(8+12*NFS_ACL_MAX_ENTRIES) + PAGE_SIZE-1) \ >> PAGE_SHIFT) #define NFS_ACL_MAX_ENTRIES_INLINE (5) #define NFS_ACL_INLINE_BUFSIZE ((2*(2+3*NFS_ACL_MAX_ENTRIES_INLINE)) << 2) static inline unsigned int nfsacl_size(struct posix_acl *acl_access, struct posix_acl *acl_default) { unsigned int w = 16; w += max(acl_access ? (int)acl_access->a_count : 3, 4) * 12; if (acl_default) w += max((int)acl_default->a_count, 4) * 12; return w; } extern int nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode, struct posix_acl *acl, int encode_entries, int typeflag); extern int nfsacl_decode(struct xdr_buf *buf, unsigned int base, unsigned int *aclcnt, struct posix_acl **pacl); #endif /* __LINUX_NFSACL_H */ sys_soc.h 0000644 00000002334 14722070374 0006407 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) ST-Ericsson SA 2011 * Author: Lee Jones <lee.jones@linaro.org> for ST-Ericsson. */ #ifndef __SOC_BUS_H #define __SOC_BUS_H #include <linux/device.h> struct soc_device_attribute { const char *machine; const char *family; const char *revision; const char *serial_number; const char *soc_id; const void *data; }; /** * soc_device_register - register SoC as a device * @soc_plat_dev_attr: Attributes passed from platform to be attributed to a SoC */ struct soc_device *soc_device_register( struct soc_device_attribute *soc_plat_dev_attr); /** * soc_device_unregister - unregister SoC device * @dev: SoC device to be unregistered */ void soc_device_unregister(struct soc_device *soc_dev); /** * soc_device_to_device - helper function to fetch struct device * @soc: Previously registered SoC device container */ struct device *soc_device_to_device(struct soc_device *soc); #ifdef CONFIG_SOC_BUS const struct soc_device_attribute *soc_device_match( const struct soc_device_attribute *matches); #else static inline const struct soc_device_attribute *soc_device_match( const struct soc_device_attribute *matches) { return NULL; } #endif #endif /* __SOC_BUS_H */ compiler_types.h 0000644 00000016700 14722070374 0007765 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_COMPILER_TYPES_H #define __LINUX_COMPILER_TYPES_H #ifndef __ASSEMBLY__ #ifdef __CHECKER__ # define __user __attribute__((noderef, address_space(1))) # define __kernel __attribute__((address_space(0))) # define __safe __attribute__((safe)) # define __force __attribute__((force)) # define __nocast __attribute__((nocast)) # define __iomem __attribute__((noderef, address_space(2))) # define __must_hold(x) __attribute__((context(x,1,1))) # define __acquires(x) __attribute__((context(x,0,1))) # define __releases(x) __attribute__((context(x,1,0))) # define __acquire(x) __context__(x,1) # define __release(x) __context__(x,-1) # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) # define __percpu __attribute__((noderef, address_space(3))) # define __rcu __attribute__((noderef, address_space(4))) # define __private __attribute__((noderef)) extern void __chk_user_ptr(const volatile void __user *); extern void __chk_io_ptr(const volatile void __iomem *); # define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member)) #else /* __CHECKER__ */ # ifdef STRUCTLEAK_PLUGIN # define __user __attribute__((user)) # else # define __user # endif # define __kernel # define __safe # define __force # define __nocast # define __iomem # define __chk_user_ptr(x) (void)0 # define __chk_io_ptr(x) (void)0 # define __builtin_warning(x, y...) (1) # define __must_hold(x) # define __acquires(x) # define __releases(x) # define __acquire(x) (void)0 # define __release(x) (void)0 # define __cond_lock(x,c) (c) # define __percpu # define __rcu # define __private # define ACCESS_PRIVATE(p, member) ((p)->member) #endif /* __CHECKER__ */ /* Indirect macros required for expanded argument pasting, eg. __LINE__. */ #define ___PASTE(a,b) a##b #define __PASTE(a,b) ___PASTE(a,b) #ifdef __KERNEL__ /* Attributes */ #include <linux/compiler_attributes.h> /* Compiler specific macros. */ #ifdef __clang__ #include <linux/compiler-clang.h> #elif defined(__INTEL_COMPILER) #include <linux/compiler-intel.h> #elif defined(__GNUC__) /* The above compilers also define __GNUC__, so order is important here. */ #include <linux/compiler-gcc.h> #else #error "Unknown compiler" #endif /* * Some architectures need to provide custom definitions of macros provided * by linux/compiler-*.h, and can do so using asm/compiler.h. We include that * conditionally rather than using an asm-generic wrapper in order to avoid * build failures if any C compilation, which will include this file via an * -include argument in c_flags, occurs prior to the asm-generic wrappers being * generated. */ #ifdef CONFIG_HAVE_ARCH_COMPILER_H #include <asm/compiler.h> #endif struct ftrace_branch_data { const char *func; const char *file; unsigned line; union { struct { unsigned long correct; unsigned long incorrect; }; struct { unsigned long miss; unsigned long hit; }; unsigned long miss_hit[2]; }; }; struct ftrace_likely_data { struct ftrace_branch_data data; unsigned long constant; }; #ifdef CONFIG_ENABLE_MUST_CHECK #define __must_check __attribute__((__warn_unused_result__)) #else #define __must_check #endif #if defined(CC_USING_HOTPATCH) #define notrace __attribute__((hotpatch(0, 0))) #elif defined(CC_USING_PATCHABLE_FUNCTION_ENTRY) #define notrace __attribute__((patchable_function_entry(0, 0))) #else #define notrace __attribute__((__no_instrument_function__)) #endif /* Section for code which can't be instrumented at all */ #define noinstr \ noinline notrace __attribute((__section__(".noinstr.text"))) /* * it doesn't make sense on ARM (currently the only user of __naked) * to trace naked functions because then mcount is called without * stack and frame pointer being set up and there is no chance to * restore the lr register to the value before mcount was called. */ #define __naked __attribute__((__naked__)) notrace #define __compiler_offsetof(a, b) __builtin_offsetof(a, b) /* * Force always-inline if the user requests it so via the .config. * Prefer gnu_inline, so that extern inline functions do not emit an * externally visible function. This makes extern inline behave as per gnu89 * semantics rather than c99. This prevents multiple symbol definition errors * of extern inline functions at link time. * A lot of inline functions can cause havoc with function tracing. * Do not use __always_inline here, since currently it expands to inline again * (which would break users of __always_inline). */ #if !defined(CONFIG_OPTIMIZE_INLINING) #define inline inline __attribute__((__always_inline__)) __gnu_inline \ __inline_maybe_unused notrace #else #define inline inline __gnu_inline \ __inline_maybe_unused notrace #endif /* * gcc provides both __inline__ and __inline as alternate spellings of * the inline keyword, though the latter is undocumented. New kernel * code should only use the inline spelling, but some existing code * uses __inline__. Since we #define inline above, to ensure * __inline__ has the same semantics, we need this #define. * * However, the spelling __inline is strictly reserved for referring * to the bare keyword. */ #define __inline__ inline /* * GCC does not warn about unused static inline functions for -Wunused-function. * Suppress the warning in clang as well by using __maybe_unused, but enable it * for W=1 build. This will allow clang to find unused functions. Remove the * __inline_maybe_unused entirely after fixing most of -Wunused-function warnings. */ #ifdef KBUILD_EXTRA_WARN1 #define __inline_maybe_unused #else #define __inline_maybe_unused __maybe_unused #endif /* * Rather then using noinline to prevent stack consumption, use * noinline_for_stack instead. For documentation reasons. */ #define noinline_for_stack noinline #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ /* * The below symbols may be defined for one or more, but not ALL, of the above * compilers. We don't consider that to be an error, so set them to nothing. * For example, some of them are for compiler specific plugins. */ #ifndef __latent_entropy # define __latent_entropy #endif #ifndef __randomize_layout # define __randomize_layout __designated_init #endif #ifndef __no_randomize_layout # define __no_randomize_layout #endif #ifndef randomized_struct_fields_start # define randomized_struct_fields_start # define randomized_struct_fields_end #endif #ifndef asm_volatile_goto #define asm_volatile_goto(x...) asm goto(x) #endif #ifdef CONFIG_CC_HAS_ASM_INLINE #define asm_inline asm __inline #else #define asm_inline asm #endif /* Are two types/vars the same type (ignoring qualifiers)? */ #define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) /* Is this type a native word size -- useful for atomic operations */ #define __native_word(t) \ (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \ sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) /* Helpers for emitting diagnostics in pragmas. */ #ifndef __diag #define __diag(string) #endif #ifndef __diag_GCC #define __diag_GCC(version, severity, string) #endif #define __diag_push() __diag(push) #define __diag_pop() __diag(pop) #define __diag_ignore(compiler, version, option, comment) \ __diag_ ## compiler(version, ignore, option) #define __diag_warn(compiler, version, option, comment) \ __diag_ ## compiler(version, warn, option) #define __diag_error(compiler, version, option, comment) \ __diag_ ## compiler(version, error, option) #endif /* __LINUX_COMPILER_TYPES_H */ rculist_nulls.h 0000644 00000014262 14722070374 0007632 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_RCULIST_NULLS_H #define _LINUX_RCULIST_NULLS_H #ifdef __KERNEL__ /* * RCU-protected list version */ #include <linux/list_nulls.h> #include <linux/rcupdate.h> /** * hlist_nulls_del_init_rcu - deletes entry from hash list with re-initialization * @n: the element to delete from the hash list. * * Note: hlist_nulls_unhashed() on the node return true after this. It is * useful for RCU based read lockfree traversal if the writer side * must know if the list entry is still hashed or already unhashed. * * In particular, it means that we can not poison the forward pointers * that may still be used for walking the hash list and we can only * zero the pprev pointer so list_unhashed() will return true after * this. * * The caller must take whatever precautions are necessary (such as * holding appropriate locks) to avoid racing with another * list-mutation primitive, such as hlist_nulls_add_head_rcu() or * hlist_nulls_del_rcu(), running on this same list. However, it is * perfectly legal to run concurrently with the _rcu list-traversal * primitives, such as hlist_nulls_for_each_entry_rcu(). */ static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n) { if (!hlist_nulls_unhashed(n)) { __hlist_nulls_del(n); WRITE_ONCE(n->pprev, NULL); } } #define hlist_nulls_first_rcu(head) \ (*((struct hlist_nulls_node __rcu __force **)&(head)->first)) #define hlist_nulls_next_rcu(node) \ (*((struct hlist_nulls_node __rcu __force **)&(node)->next)) /** * hlist_nulls_del_rcu - deletes entry from hash list without re-initialization * @n: the element to delete from the hash list. * * Note: hlist_nulls_unhashed() on entry does not return true after this, * the entry is in an undefined state. It is useful for RCU based * lockfree traversal. * * In particular, it means that we can not poison the forward * pointers that may still be used for walking the hash list. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as hlist_nulls_add_head_rcu() * or hlist_nulls_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * hlist_nulls_for_each_entry(). */ static inline void hlist_nulls_del_rcu(struct hlist_nulls_node *n) { __hlist_nulls_del(n); WRITE_ONCE(n->pprev, LIST_POISON2); } /** * hlist_nulls_add_head_rcu * @n: the element to add to the hash list. * @h: the list to add to. * * Description: * Adds the specified element to the specified hlist_nulls, * while permitting racing traversals. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as hlist_nulls_add_head_rcu() * or hlist_nulls_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency * problems on Alpha CPUs. Regardless of the type of CPU, the * list-traversal primitive must be guarded by rcu_read_lock(). */ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n, struct hlist_nulls_head *h) { struct hlist_nulls_node *first = h->first; n->next = first; WRITE_ONCE(n->pprev, &h->first); rcu_assign_pointer(hlist_nulls_first_rcu(h), n); if (!is_a_nulls(first)) WRITE_ONCE(first->pprev, &n->next); } /** * hlist_nulls_add_tail_rcu * @n: the element to add to the hash list. * @h: the list to add to. * * Description: * Adds the specified element to the specified hlist_nulls, * while permitting racing traversals. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as hlist_nulls_add_head_rcu() * or hlist_nulls_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency * problems on Alpha CPUs. Regardless of the type of CPU, the * list-traversal primitive must be guarded by rcu_read_lock(). */ static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n, struct hlist_nulls_head *h) { struct hlist_nulls_node *i, *last = NULL; /* Note: write side code, so rcu accessors are not needed. */ for (i = h->first; !is_a_nulls(i); i = i->next) last = i; if (last) { n->next = last->next; n->pprev = &last->next; rcu_assign_pointer(hlist_next_rcu(last), n); } else { hlist_nulls_add_head_rcu(n, h); } } /** * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type * @tpos: the type * to use as a loop cursor. * @pos: the &struct hlist_nulls_node to use as a loop cursor. * @head: the head for your list. * @member: the name of the hlist_nulls_node within the struct. * * The barrier() is needed to make sure compiler doesn't cache first element [1], * as this loop can be restarted [2] * [1] Documentation/core-api/atomic_ops.rst around line 114 * [2] Documentation/RCU/rculist_nulls.txt around line 146 */ #define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \ for (({barrier();}), \ pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \ (!is_a_nulls(pos)) && \ ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \ pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos))) /** * hlist_nulls_for_each_entry_safe - * iterate over list of given type safe against removal of list entry * @tpos: the type * to use as a loop cursor. * @pos: the &struct hlist_nulls_node to use as a loop cursor. * @head: the head for your list. * @member: the name of the hlist_nulls_node within the struct. */ #define hlist_nulls_for_each_entry_safe(tpos, pos, head, member) \ for (({barrier();}), \ pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \ (!is_a_nulls(pos)) && \ ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); \ pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)); 1; });) #endif #endif cper.h 0000644 00000037221 14722070374 0005661 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * UEFI Common Platform Error Record * * Copyright (C) 2010, Intel Corp. * Author: Huang Ying <ying.huang@intel.com> */ #ifndef LINUX_CPER_H #define LINUX_CPER_H #include <linux/uuid.h> #include <linux/trace_seq.h> /* CPER record signature and the size */ #define CPER_SIG_RECORD "CPER" #define CPER_SIG_SIZE 4 /* Used in signature_end field in struct cper_record_header */ #define CPER_SIG_END 0xffffffff /* * CPER record header revision, used in revision field in struct * cper_record_header */ #define CPER_RECORD_REV 0x0100 /* * CPER record length contains the CPER fields which are relevant for further * handling of a memory error in userspace (we don't carry all the fields * defined in the UEFI spec because some of them don't make any sense.) * Currently, a length of 256 should be more than enough. */ #define CPER_REC_LEN 256 /* * Severity definition for error_severity in struct cper_record_header * and section_severity in struct cper_section_descriptor */ enum { CPER_SEV_RECOVERABLE, CPER_SEV_FATAL, CPER_SEV_CORRECTED, CPER_SEV_INFORMATIONAL, }; /* * Validation bits definition for validation_bits in struct * cper_record_header. If set, corresponding fields in struct * cper_record_header contain valid information. */ #define CPER_VALID_PLATFORM_ID 0x0001 #define CPER_VALID_TIMESTAMP 0x0002 #define CPER_VALID_PARTITION_ID 0x0004 /* * Notification type used to generate error record, used in * notification_type in struct cper_record_header. These UUIDs are defined * in the UEFI spec v2.7, sec N.2.1. */ /* Corrected Machine Check */ #define CPER_NOTIFY_CMC \ GUID_INIT(0x2DCE8BB1, 0xBDD7, 0x450e, 0xB9, 0xAD, 0x9C, 0xF4, \ 0xEB, 0xD4, 0xF8, 0x90) /* Corrected Platform Error */ #define CPER_NOTIFY_CPE \ GUID_INIT(0x4E292F96, 0xD843, 0x4a55, 0xA8, 0xC2, 0xD4, 0x81, \ 0xF2, 0x7E, 0xBE, 0xEE) /* Machine Check Exception */ #define CPER_NOTIFY_MCE \ GUID_INIT(0xE8F56FFE, 0x919C, 0x4cc5, 0xBA, 0x88, 0x65, 0xAB, \ 0xE1, 0x49, 0x13, 0xBB) /* PCI Express Error */ #define CPER_NOTIFY_PCIE \ GUID_INIT(0xCF93C01F, 0x1A16, 0x4dfc, 0xB8, 0xBC, 0x9C, 0x4D, \ 0xAF, 0x67, 0xC1, 0x04) /* INIT Record (for IPF) */ #define CPER_NOTIFY_INIT \ GUID_INIT(0xCC5263E8, 0x9308, 0x454a, 0x89, 0xD0, 0x34, 0x0B, \ 0xD3, 0x9B, 0xC9, 0x8E) /* Non-Maskable Interrupt */ #define CPER_NOTIFY_NMI \ GUID_INIT(0x5BAD89FF, 0xB7E6, 0x42c9, 0x81, 0x4A, 0xCF, 0x24, \ 0x85, 0xD6, 0xE9, 0x8A) /* BOOT Error Record */ #define CPER_NOTIFY_BOOT \ GUID_INIT(0x3D61A466, 0xAB40, 0x409a, 0xA6, 0x98, 0xF3, 0x62, \ 0xD4, 0x64, 0xB3, 0x8F) /* DMA Remapping Error */ #define CPER_NOTIFY_DMAR \ GUID_INIT(0x667DD791, 0xC6B3, 0x4c27, 0x8A, 0x6B, 0x0F, 0x8E, \ 0x72, 0x2D, 0xEB, 0x41) /* * Flags bits definitions for flags in struct cper_record_header * If set, the error has been recovered */ #define CPER_HW_ERROR_FLAGS_RECOVERED 0x1 /* If set, the error is for previous boot */ #define CPER_HW_ERROR_FLAGS_PREVERR 0x2 /* If set, the error is injected for testing */ #define CPER_HW_ERROR_FLAGS_SIMULATED 0x4 /* * CPER section header revision, used in revision field in struct * cper_section_descriptor */ #define CPER_SEC_REV 0x0100 /* * Validation bits definition for validation_bits in struct * cper_section_descriptor. If set, corresponding fields in struct * cper_section_descriptor contain valid information. */ #define CPER_SEC_VALID_FRU_ID 0x1 #define CPER_SEC_VALID_FRU_TEXT 0x2 /* * Flags bits definitions for flags in struct cper_section_descriptor * * If set, the section is associated with the error condition * directly, and should be focused on */ #define CPER_SEC_PRIMARY 0x0001 /* * If set, the error was not contained within the processor or memory * hierarchy and the error may have propagated to persistent storage * or network */ #define CPER_SEC_CONTAINMENT_WARNING 0x0002 /* If set, the component must be re-initialized or re-enabled prior to use */ #define CPER_SEC_RESET 0x0004 /* If set, Linux may choose to discontinue use of the resource */ #define CPER_SEC_ERROR_THRESHOLD_EXCEEDED 0x0008 /* * If set, resource could not be queried for error information due to * conflicts with other system software or resources. Some fields of * the section will be invalid */ #define CPER_SEC_RESOURCE_NOT_ACCESSIBLE 0x0010 /* * If set, action has been taken to ensure error containment (such as * poisoning data), but the error has not been fully corrected and the * data has not been consumed. Linux may choose to take further * corrective action before the data is consumed */ #define CPER_SEC_LATENT_ERROR 0x0020 /* * Section type definitions, used in section_type field in struct * cper_section_descriptor. These UUIDs are defined in the UEFI spec * v2.7, sec N.2.2. */ /* Processor Generic */ #define CPER_SEC_PROC_GENERIC \ GUID_INIT(0x9876CCAD, 0x47B4, 0x4bdb, 0xB6, 0x5E, 0x16, 0xF1, \ 0x93, 0xC4, 0xF3, 0xDB) /* Processor Specific: X86/X86_64 */ #define CPER_SEC_PROC_IA \ GUID_INIT(0xDC3EA0B0, 0xA144, 0x4797, 0xB9, 0x5B, 0x53, 0xFA, \ 0x24, 0x2B, 0x6E, 0x1D) /* Processor Specific: IA64 */ #define CPER_SEC_PROC_IPF \ GUID_INIT(0xE429FAF1, 0x3CB7, 0x11D4, 0x0B, 0xCA, 0x07, 0x00, \ 0x80, 0xC7, 0x3C, 0x88, 0x81) /* Processor Specific: ARM */ #define CPER_SEC_PROC_ARM \ GUID_INIT(0xE19E3D16, 0xBC11, 0x11E4, 0x9C, 0xAA, 0xC2, 0x05, \ 0x1D, 0x5D, 0x46, 0xB0) /* Platform Memory */ #define CPER_SEC_PLATFORM_MEM \ GUID_INIT(0xA5BC1114, 0x6F64, 0x4EDE, 0xB8, 0x63, 0x3E, 0x83, \ 0xED, 0x7C, 0x83, 0xB1) #define CPER_SEC_PCIE \ GUID_INIT(0xD995E954, 0xBBC1, 0x430F, 0xAD, 0x91, 0xB4, 0x4D, \ 0xCB, 0x3C, 0x6F, 0x35) /* Firmware Error Record Reference */ #define CPER_SEC_FW_ERR_REC_REF \ GUID_INIT(0x81212A96, 0x09ED, 0x4996, 0x94, 0x71, 0x8D, 0x72, \ 0x9C, 0x8E, 0x69, 0xED) /* PCI/PCI-X Bus */ #define CPER_SEC_PCI_X_BUS \ GUID_INIT(0xC5753963, 0x3B84, 0x4095, 0xBF, 0x78, 0xED, 0xDA, \ 0xD3, 0xF9, 0xC9, 0xDD) /* PCI Component/Device */ #define CPER_SEC_PCI_DEV \ GUID_INIT(0xEB5E4685, 0xCA66, 0x4769, 0xB6, 0xA2, 0x26, 0x06, \ 0x8B, 0x00, 0x13, 0x26) #define CPER_SEC_DMAR_GENERIC \ GUID_INIT(0x5B51FEF7, 0xC79D, 0x4434, 0x8F, 0x1B, 0xAA, 0x62, \ 0xDE, 0x3E, 0x2C, 0x64) /* Intel VT for Directed I/O specific DMAr */ #define CPER_SEC_DMAR_VT \ GUID_INIT(0x71761D37, 0x32B2, 0x45cd, 0xA7, 0xD0, 0xB0, 0xFE, \ 0xDD, 0x93, 0xE8, 0xCF) /* IOMMU specific DMAr */ #define CPER_SEC_DMAR_IOMMU \ GUID_INIT(0x036F84E1, 0x7F37, 0x428c, 0xA7, 0x9E, 0x57, 0x5F, \ 0xDF, 0xAA, 0x84, 0xEC) #define CPER_PROC_VALID_TYPE 0x0001 #define CPER_PROC_VALID_ISA 0x0002 #define CPER_PROC_VALID_ERROR_TYPE 0x0004 #define CPER_PROC_VALID_OPERATION 0x0008 #define CPER_PROC_VALID_FLAGS 0x0010 #define CPER_PROC_VALID_LEVEL 0x0020 #define CPER_PROC_VALID_VERSION 0x0040 #define CPER_PROC_VALID_BRAND_INFO 0x0080 #define CPER_PROC_VALID_ID 0x0100 #define CPER_PROC_VALID_TARGET_ADDRESS 0x0200 #define CPER_PROC_VALID_REQUESTOR_ID 0x0400 #define CPER_PROC_VALID_RESPONDER_ID 0x0800 #define CPER_PROC_VALID_IP 0x1000 #define CPER_MEM_VALID_ERROR_STATUS 0x0001 #define CPER_MEM_VALID_PA 0x0002 #define CPER_MEM_VALID_PA_MASK 0x0004 #define CPER_MEM_VALID_NODE 0x0008 #define CPER_MEM_VALID_CARD 0x0010 #define CPER_MEM_VALID_MODULE 0x0020 #define CPER_MEM_VALID_BANK 0x0040 #define CPER_MEM_VALID_DEVICE 0x0080 #define CPER_MEM_VALID_ROW 0x0100 #define CPER_MEM_VALID_COLUMN 0x0200 #define CPER_MEM_VALID_BIT_POSITION 0x0400 #define CPER_MEM_VALID_REQUESTOR_ID 0x0800 #define CPER_MEM_VALID_RESPONDER_ID 0x1000 #define CPER_MEM_VALID_TARGET_ID 0x2000 #define CPER_MEM_VALID_ERROR_TYPE 0x4000 #define CPER_MEM_VALID_RANK_NUMBER 0x8000 #define CPER_MEM_VALID_CARD_HANDLE 0x10000 #define CPER_MEM_VALID_MODULE_HANDLE 0x20000 #define CPER_PCIE_VALID_PORT_TYPE 0x0001 #define CPER_PCIE_VALID_VERSION 0x0002 #define CPER_PCIE_VALID_COMMAND_STATUS 0x0004 #define CPER_PCIE_VALID_DEVICE_ID 0x0008 #define CPER_PCIE_VALID_SERIAL_NUMBER 0x0010 #define CPER_PCIE_VALID_BRIDGE_CONTROL_STATUS 0x0020 #define CPER_PCIE_VALID_CAPABILITY 0x0040 #define CPER_PCIE_VALID_AER_INFO 0x0080 #define CPER_PCIE_SLOT_SHIFT 3 #define CPER_ARM_VALID_MPIDR BIT(0) #define CPER_ARM_VALID_AFFINITY_LEVEL BIT(1) #define CPER_ARM_VALID_RUNNING_STATE BIT(2) #define CPER_ARM_VALID_VENDOR_INFO BIT(3) #define CPER_ARM_INFO_VALID_MULTI_ERR BIT(0) #define CPER_ARM_INFO_VALID_FLAGS BIT(1) #define CPER_ARM_INFO_VALID_ERR_INFO BIT(2) #define CPER_ARM_INFO_VALID_VIRT_ADDR BIT(3) #define CPER_ARM_INFO_VALID_PHYSICAL_ADDR BIT(4) #define CPER_ARM_INFO_FLAGS_FIRST BIT(0) #define CPER_ARM_INFO_FLAGS_LAST BIT(1) #define CPER_ARM_INFO_FLAGS_PROPAGATED BIT(2) #define CPER_ARM_INFO_FLAGS_OVERFLOW BIT(3) #define CPER_ARM_CACHE_ERROR 0 #define CPER_ARM_TLB_ERROR 1 #define CPER_ARM_BUS_ERROR 2 #define CPER_ARM_VENDOR_ERROR 3 #define CPER_ARM_MAX_TYPE CPER_ARM_VENDOR_ERROR #define CPER_ARM_ERR_VALID_TRANSACTION_TYPE BIT(0) #define CPER_ARM_ERR_VALID_OPERATION_TYPE BIT(1) #define CPER_ARM_ERR_VALID_LEVEL BIT(2) #define CPER_ARM_ERR_VALID_PROC_CONTEXT_CORRUPT BIT(3) #define CPER_ARM_ERR_VALID_CORRECTED BIT(4) #define CPER_ARM_ERR_VALID_PRECISE_PC BIT(5) #define CPER_ARM_ERR_VALID_RESTARTABLE_PC BIT(6) #define CPER_ARM_ERR_VALID_PARTICIPATION_TYPE BIT(7) #define CPER_ARM_ERR_VALID_TIME_OUT BIT(8) #define CPER_ARM_ERR_VALID_ADDRESS_SPACE BIT(9) #define CPER_ARM_ERR_VALID_MEM_ATTRIBUTES BIT(10) #define CPER_ARM_ERR_VALID_ACCESS_MODE BIT(11) #define CPER_ARM_ERR_TRANSACTION_SHIFT 16 #define CPER_ARM_ERR_TRANSACTION_MASK GENMASK(1,0) #define CPER_ARM_ERR_OPERATION_SHIFT 18 #define CPER_ARM_ERR_OPERATION_MASK GENMASK(3,0) #define CPER_ARM_ERR_LEVEL_SHIFT 22 #define CPER_ARM_ERR_LEVEL_MASK GENMASK(2,0) #define CPER_ARM_ERR_PC_CORRUPT_SHIFT 25 #define CPER_ARM_ERR_PC_CORRUPT_MASK GENMASK(0,0) #define CPER_ARM_ERR_CORRECTED_SHIFT 26 #define CPER_ARM_ERR_CORRECTED_MASK GENMASK(0,0) #define CPER_ARM_ERR_PRECISE_PC_SHIFT 27 #define CPER_ARM_ERR_PRECISE_PC_MASK GENMASK(0,0) #define CPER_ARM_ERR_RESTARTABLE_PC_SHIFT 28 #define CPER_ARM_ERR_RESTARTABLE_PC_MASK GENMASK(0,0) #define CPER_ARM_ERR_PARTICIPATION_TYPE_SHIFT 29 #define CPER_ARM_ERR_PARTICIPATION_TYPE_MASK GENMASK(1,0) #define CPER_ARM_ERR_TIME_OUT_SHIFT 31 #define CPER_ARM_ERR_TIME_OUT_MASK GENMASK(0,0) #define CPER_ARM_ERR_ADDRESS_SPACE_SHIFT 32 #define CPER_ARM_ERR_ADDRESS_SPACE_MASK GENMASK(1,0) #define CPER_ARM_ERR_MEM_ATTRIBUTES_SHIFT 34 #define CPER_ARM_ERR_MEM_ATTRIBUTES_MASK GENMASK(8,0) #define CPER_ARM_ERR_ACCESS_MODE_SHIFT 43 #define CPER_ARM_ERR_ACCESS_MODE_MASK GENMASK(0,0) /* * All tables and structs must be byte-packed to match CPER * specification, since the tables are provided by the system BIOS */ #pragma pack(1) /* Record Header, UEFI v2.7 sec N.2.1 */ struct cper_record_header { char signature[CPER_SIG_SIZE]; /* must be CPER_SIG_RECORD */ u16 revision; /* must be CPER_RECORD_REV */ u32 signature_end; /* must be CPER_SIG_END */ u16 section_count; u32 error_severity; u32 validation_bits; u32 record_length; u64 timestamp; guid_t platform_id; guid_t partition_id; guid_t creator_id; guid_t notification_type; u64 record_id; u32 flags; u64 persistence_information; u8 reserved[12]; /* must be zero */ }; /* Section Descriptor, UEFI v2.7 sec N.2.2 */ struct cper_section_descriptor { u32 section_offset; /* Offset in bytes of the * section body from the base * of the record header */ u32 section_length; u16 revision; /* must be CPER_RECORD_REV */ u8 validation_bits; u8 reserved; /* must be zero */ u32 flags; guid_t section_type; guid_t fru_id; u32 section_severity; u8 fru_text[20]; }; /* Generic Processor Error Section, UEFI v2.7 sec N.2.4.1 */ struct cper_sec_proc_generic { u64 validation_bits; u8 proc_type; u8 proc_isa; u8 proc_error_type; u8 operation; u8 flags; u8 level; u16 reserved; u64 cpu_version; char cpu_brand[128]; u64 proc_id; u64 target_addr; u64 requestor_id; u64 responder_id; u64 ip; }; /* IA32/X64 Processor Error Section, UEFI v2.7 sec N.2.4.2 */ struct cper_sec_proc_ia { u64 validation_bits; u64 lapic_id; u8 cpuid[48]; }; /* IA32/X64 Processor Error Information Structure, UEFI v2.7 sec N.2.4.2.1 */ struct cper_ia_err_info { guid_t err_type; u64 validation_bits; u64 check_info; u64 target_id; u64 requestor_id; u64 responder_id; u64 ip; }; /* IA32/X64 Processor Context Information Structure, UEFI v2.7 sec N.2.4.2.2 */ struct cper_ia_proc_ctx { u16 reg_ctx_type; u16 reg_arr_size; u32 msr_addr; u64 mm_reg_addr; }; /* ARM Processor Error Section, UEFI v2.7 sec N.2.4.4 */ struct cper_sec_proc_arm { u32 validation_bits; u16 err_info_num; /* Number of Processor Error Info */ u16 context_info_num; /* Number of Processor Context Info Records*/ u32 section_length; u8 affinity_level; u8 reserved[3]; /* must be zero */ u64 mpidr; u64 midr; u32 running_state; /* Bit 0 set - Processor running. PSCI = 0 */ u32 psci_state; }; /* ARM Processor Error Information Structure, UEFI v2.7 sec N.2.4.4.1 */ struct cper_arm_err_info { u8 version; u8 length; u16 validation_bits; u8 type; u16 multiple_error; u8 flags; u64 error_info; u64 virt_fault_addr; u64 physical_fault_addr; }; /* ARM Processor Context Information Structure, UEFI v2.7 sec N.2.4.4.2 */ struct cper_arm_ctx_info { u16 version; u16 type; u32 size; }; /* Old Memory Error Section, UEFI v2.1, v2.2 */ struct cper_sec_mem_err_old { u64 validation_bits; u64 error_status; u64 physical_addr; u64 physical_addr_mask; u16 node; u16 card; u16 module; u16 bank; u16 device; u16 row; u16 column; u16 bit_pos; u64 requestor_id; u64 responder_id; u64 target_id; u8 error_type; }; /* Memory Error Section (UEFI >= v2.3), UEFI v2.7 sec N.2.5 */ struct cper_sec_mem_err { u64 validation_bits; u64 error_status; u64 physical_addr; u64 physical_addr_mask; u16 node; u16 card; u16 module; u16 bank; u16 device; u16 row; u16 column; u16 bit_pos; u64 requestor_id; u64 responder_id; u64 target_id; u8 error_type; u8 reserved; u16 rank; u16 mem_array_handle; /* "card handle" in UEFI 2.4 */ u16 mem_dev_handle; /* "module handle" in UEFI 2.4 */ }; struct cper_mem_err_compact { u64 validation_bits; u16 node; u16 card; u16 module; u16 bank; u16 device; u16 row; u16 column; u16 bit_pos; u64 requestor_id; u64 responder_id; u64 target_id; u16 rank; u16 mem_array_handle; u16 mem_dev_handle; }; /* PCI Express Error Section, UEFI v2.7 sec N.2.7 */ struct cper_sec_pcie { u64 validation_bits; u32 port_type; struct { u8 minor; u8 major; u8 reserved[2]; } version; u16 command; u16 status; u32 reserved; struct { u16 vendor_id; u16 device_id; u8 class_code[3]; u8 function; u8 device; u16 segment; u8 bus; u8 secondary_bus; u16 slot; u8 reserved; } device_id; struct { u32 lower; u32 upper; } serial_number; struct { u16 secondary_status; u16 control; } bridge; u8 capability[60]; u8 aer_info[96]; }; /* Reset to default packing */ #pragma pack() extern const char *const cper_proc_error_type_strs[4]; u64 cper_next_record_id(void); const char *cper_severity_str(unsigned int); const char *cper_mem_err_type_str(unsigned int); void cper_print_bits(const char *prefix, unsigned int bits, const char * const strs[], unsigned int strs_size); void cper_mem_err_pack(const struct cper_sec_mem_err *, struct cper_mem_err_compact *); const char *cper_mem_err_unpack(struct trace_seq *, struct cper_mem_err_compact *); void cper_print_proc_arm(const char *pfx, const struct cper_sec_proc_arm *proc); void cper_print_proc_ia(const char *pfx, const struct cper_sec_proc_ia *proc); #endif crc7.h 0000644 00000000474 14722070374 0005566 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_CRC7_H #define _LINUX_CRC7_H #include <linux/types.h> extern const u8 crc7_be_syndrome_table[256]; static inline u8 crc7_be_byte(u8 crc, u8 data) { return crc7_be_syndrome_table[crc ^ data]; } extern u8 crc7_be(u8 crc, const u8 *buffer, size_t len); #endif hidraw.h 0000644 00000002172 14722070374 0006203 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2007 Jiri Kosina */ #ifndef _HIDRAW_H #define _HIDRAW_H #include <uapi/linux/hidraw.h> struct hidraw { unsigned int minor; int exist; int open; wait_queue_head_t wait; struct hid_device *hid; struct device *dev; spinlock_t list_lock; struct list_head list; }; struct hidraw_report { __u8 *value; int len; }; struct hidraw_list { struct hidraw_report buffer[HIDRAW_BUFFER_SIZE]; int head; int tail; struct fasync_struct *fasync; struct hidraw *hidraw; struct list_head node; struct mutex read_mutex; }; #ifdef CONFIG_HIDRAW int hidraw_init(void); void hidraw_exit(void); int hidraw_report_event(struct hid_device *, u8 *, int); int hidraw_connect(struct hid_device *); void hidraw_disconnect(struct hid_device *); #else static inline int hidraw_init(void) { return 0; } static inline void hidraw_exit(void) { } static inline int hidraw_report_event(struct hid_device *hid, u8 *data, int len) { return 0; } static inline int hidraw_connect(struct hid_device *hid) { return -1; } static inline void hidraw_disconnect(struct hid_device *hid) { } #endif #endif async_tx.h 0000644 00000014322 14722070374 0006555 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright © 2006, Intel Corporation. */ #ifndef _ASYNC_TX_H_ #define _ASYNC_TX_H_ #include <linux/dmaengine.h> #include <linux/spinlock.h> #include <linux/interrupt.h> /* on architectures without dma-mapping capabilities we need to ensure * that the asynchronous path compiles away */ #ifdef CONFIG_HAS_DMA #define __async_inline #else #define __async_inline __always_inline #endif /** * dma_chan_ref - object used to manage dma channels received from the * dmaengine core. * @chan - the channel being tracked * @node - node for the channel to be placed on async_tx_master_list * @rcu - for list_del_rcu * @count - number of times this channel is listed in the pool * (for channels with multiple capabiities) */ struct dma_chan_ref { struct dma_chan *chan; struct list_head node; struct rcu_head rcu; atomic_t count; }; /** * async_tx_flags - modifiers for the async_* calls * @ASYNC_TX_XOR_ZERO_DST: this flag must be used for xor operations where the * the destination address is not a source. The asynchronous case handles this * implicitly, the synchronous case needs to zero the destination block. * @ASYNC_TX_XOR_DROP_DST: this flag must be used if the destination address is * also one of the source addresses. In the synchronous case the destination * address is an implied source, whereas the asynchronous case it must be listed * as a source. The destination address must be the first address in the source * array. * @ASYNC_TX_ACK: immediately ack the descriptor, precludes setting up a * dependency chain * @ASYNC_TX_FENCE: specify that the next operation in the dependency * chain uses this operation's result as an input * @ASYNC_TX_PQ_XOR_DST: do not overwrite the syndrome but XOR it with the * input data. Required for rmw case. */ enum async_tx_flags { ASYNC_TX_XOR_ZERO_DST = (1 << 0), ASYNC_TX_XOR_DROP_DST = (1 << 1), ASYNC_TX_ACK = (1 << 2), ASYNC_TX_FENCE = (1 << 3), ASYNC_TX_PQ_XOR_DST = (1 << 4), }; /** * struct async_submit_ctl - async_tx submission/completion modifiers * @flags: submission modifiers * @depend_tx: parent dependency of the current operation being submitted * @cb_fn: callback routine to run at operation completion * @cb_param: parameter for the callback routine * @scribble: caller provided space for dma/page address conversions */ struct async_submit_ctl { enum async_tx_flags flags; struct dma_async_tx_descriptor *depend_tx; dma_async_tx_callback cb_fn; void *cb_param; void *scribble; }; #if defined(CONFIG_DMA_ENGINE) && !defined(CONFIG_ASYNC_TX_CHANNEL_SWITCH) #define async_tx_issue_pending_all dma_issue_pending_all /** * async_tx_issue_pending - send pending descriptor to the hardware channel * @tx: descriptor handle to retrieve hardware context * * Note: any dependent operations will have already been issued by * async_tx_channel_switch, or (in the case of no channel switch) will * be already pending on this channel. */ static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx) { if (likely(tx)) { struct dma_chan *chan = tx->chan; struct dma_device *dma = chan->device; dma->device_issue_pending(chan); } } #ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL #include <asm/async_tx.h> #else #define async_tx_find_channel(dep, type, dst, dst_count, src, src_count, len) \ __async_tx_find_channel(dep, type) struct dma_chan * __async_tx_find_channel(struct async_submit_ctl *submit, enum dma_transaction_type tx_type); #endif /* CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL */ #else static inline void async_tx_issue_pending_all(void) { do { } while (0); } static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx) { do { } while (0); } static inline struct dma_chan * async_tx_find_channel(struct async_submit_ctl *submit, enum dma_transaction_type tx_type, struct page **dst, int dst_count, struct page **src, int src_count, size_t len) { return NULL; } #endif /** * async_tx_sync_epilog - actions to take if an operation is run synchronously * @cb_fn: function to call when the transaction completes * @cb_fn_param: parameter to pass to the callback routine */ static inline void async_tx_sync_epilog(struct async_submit_ctl *submit) { if (submit->cb_fn) submit->cb_fn(submit->cb_param); } typedef union { unsigned long addr; struct page *page; dma_addr_t dma; } addr_conv_t; static inline void init_async_submit(struct async_submit_ctl *args, enum async_tx_flags flags, struct dma_async_tx_descriptor *tx, dma_async_tx_callback cb_fn, void *cb_param, addr_conv_t *scribble) { args->flags = flags; args->depend_tx = tx; args->cb_fn = cb_fn; args->cb_param = cb_param; args->scribble = scribble; } void async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, struct async_submit_ctl *submit); struct dma_async_tx_descriptor * async_xor(struct page *dest, struct page **src_list, unsigned int offset, int src_cnt, size_t len, struct async_submit_ctl *submit); struct dma_async_tx_descriptor * async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, int src_cnt, size_t len, enum sum_check_flags *result, struct async_submit_ctl *submit); struct dma_async_tx_descriptor * async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, unsigned int src_offset, size_t len, struct async_submit_ctl *submit); struct dma_async_tx_descriptor *async_trigger_callback(struct async_submit_ctl *submit); struct dma_async_tx_descriptor * async_gen_syndrome(struct page **blocks, unsigned int offset, int src_cnt, size_t len, struct async_submit_ctl *submit); struct dma_async_tx_descriptor * async_syndrome_val(struct page **blocks, unsigned int offset, int src_cnt, size_t len, enum sum_check_flags *pqres, struct page *spare, struct async_submit_ctl *submit); struct dma_async_tx_descriptor * async_raid6_2data_recov(int src_num, size_t bytes, int faila, int failb, struct page **ptrs, struct async_submit_ctl *submit); struct dma_async_tx_descriptor * async_raid6_datap_recov(int src_num, size_t bytes, int faila, struct page **ptrs, struct async_submit_ctl *submit); void async_tx_quiesce(struct dma_async_tx_descriptor **tx); #endif /* _ASYNC_TX_H_ */ i3c/device.h 0000644 00000023442 14722070374 0006645 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2018 Cadence Design Systems Inc. * * Author: Boris Brezillon <boris.brezillon@bootlin.com> */ #ifndef I3C_DEV_H #define I3C_DEV_H #include <linux/bitops.h> #include <linux/device.h> #include <linux/i2c.h> #include <linux/kconfig.h> #include <linux/mod_devicetable.h> #include <linux/module.h> /** * enum i3c_error_code - I3C error codes * * These are the standard error codes as defined by the I3C specification. * When -EIO is returned by the i3c_device_do_priv_xfers() or * i3c_device_send_hdr_cmds() one can check the error code in * &struct_i3c_priv_xfer.err or &struct i3c_hdr_cmd.err to get a better idea of * what went wrong. * * @I3C_ERROR_UNKNOWN: unknown error, usually means the error is not I3C * related * @I3C_ERROR_M0: M0 error * @I3C_ERROR_M1: M1 error * @I3C_ERROR_M2: M2 error */ enum i3c_error_code { I3C_ERROR_UNKNOWN = 0, I3C_ERROR_M0 = 1, I3C_ERROR_M1, I3C_ERROR_M2, }; /** * enum i3c_hdr_mode - HDR mode ids * @I3C_HDR_DDR: DDR mode * @I3C_HDR_TSP: TSP mode * @I3C_HDR_TSL: TSL mode */ enum i3c_hdr_mode { I3C_HDR_DDR, I3C_HDR_TSP, I3C_HDR_TSL, }; /** * struct i3c_priv_xfer - I3C SDR private transfer * @rnw: encodes the transfer direction. true for a read, false for a write * @len: transfer length in bytes of the transfer * @data: input/output buffer * @data.in: input buffer. Must point to a DMA-able buffer * @data.out: output buffer. Must point to a DMA-able buffer * @err: I3C error code */ struct i3c_priv_xfer { u8 rnw; u16 len; union { void *in; const void *out; } data; enum i3c_error_code err; }; /** * enum i3c_dcr - I3C DCR values * @I3C_DCR_GENERIC_DEVICE: generic I3C device */ enum i3c_dcr { I3C_DCR_GENERIC_DEVICE = 0, }; #define I3C_PID_MANUF_ID(pid) (((pid) & GENMASK_ULL(47, 33)) >> 33) #define I3C_PID_RND_LOWER_32BITS(pid) (!!((pid) & BIT_ULL(32))) #define I3C_PID_RND_VAL(pid) ((pid) & GENMASK_ULL(31, 0)) #define I3C_PID_PART_ID(pid) (((pid) & GENMASK_ULL(31, 16)) >> 16) #define I3C_PID_INSTANCE_ID(pid) (((pid) & GENMASK_ULL(15, 12)) >> 12) #define I3C_PID_EXTRA_INFO(pid) ((pid) & GENMASK_ULL(11, 0)) #define I3C_BCR_DEVICE_ROLE(bcr) ((bcr) & GENMASK(7, 6)) #define I3C_BCR_I3C_SLAVE (0 << 6) #define I3C_BCR_I3C_MASTER (1 << 6) #define I3C_BCR_HDR_CAP BIT(5) #define I3C_BCR_BRIDGE BIT(4) #define I3C_BCR_OFFLINE_CAP BIT(3) #define I3C_BCR_IBI_PAYLOAD BIT(2) #define I3C_BCR_IBI_REQ_CAP BIT(1) #define I3C_BCR_MAX_DATA_SPEED_LIM BIT(0) /** * struct i3c_device_info - I3C device information * @pid: Provisional ID * @bcr: Bus Characteristic Register * @dcr: Device Characteristic Register * @static_addr: static/I2C address * @dyn_addr: dynamic address * @hdr_cap: supported HDR modes * @max_read_ds: max read speed information * @max_write_ds: max write speed information * @max_ibi_len: max IBI payload length * @max_read_turnaround: max read turn-around time in micro-seconds * @max_read_len: max private SDR read length in bytes * @max_write_len: max private SDR write length in bytes * * These are all basic information that should be advertised by an I3C device. * Some of them are optional depending on the device type and device * capabilities. * For each I3C slave attached to a master with * i3c_master_add_i3c_dev_locked(), the core will send the relevant CCC command * to retrieve these data. */ struct i3c_device_info { u64 pid; u8 bcr; u8 dcr; u8 static_addr; u8 dyn_addr; u8 hdr_cap; u8 max_read_ds; u8 max_write_ds; u8 max_ibi_len; u32 max_read_turnaround; u16 max_read_len; u16 max_write_len; }; /* * I3C device internals are kept hidden from I3C device users. It's just * simpler to refactor things when everything goes through getter/setters, and * I3C device drivers should not have to worry about internal representation * anyway. */ struct i3c_device; /* These macros should be used to i3c_device_id entries. */ #define I3C_MATCH_MANUF_AND_PART (I3C_MATCH_MANUF | I3C_MATCH_PART) #define I3C_DEVICE(_manufid, _partid, _drvdata) \ { \ .match_flags = I3C_MATCH_MANUF_AND_PART, \ .manuf_id = _manufid, \ .part_id = _partid, \ .data = _drvdata, \ } #define I3C_DEVICE_EXTRA_INFO(_manufid, _partid, _info, _drvdata) \ { \ .match_flags = I3C_MATCH_MANUF_AND_PART | \ I3C_MATCH_EXTRA_INFO, \ .manuf_id = _manufid, \ .part_id = _partid, \ .extra_info = _info, \ .data = _drvdata, \ } #define I3C_CLASS(_dcr, _drvdata) \ { \ .match_flags = I3C_MATCH_DCR, \ .dcr = _dcr, \ } /** * struct i3c_driver - I3C device driver * @driver: inherit from device_driver * @probe: I3C device probe method * @remove: I3C device remove method * @id_table: I3C device match table. Will be used by the framework to decide * which device to bind to this driver */ struct i3c_driver { struct device_driver driver; int (*probe)(struct i3c_device *dev); int (*remove)(struct i3c_device *dev); const struct i3c_device_id *id_table; }; static inline struct i3c_driver *drv_to_i3cdrv(struct device_driver *drv) { return container_of(drv, struct i3c_driver, driver); } struct device *i3cdev_to_dev(struct i3c_device *i3cdev); struct i3c_device *dev_to_i3cdev(struct device *dev); const struct i3c_device_id * i3c_device_match_id(struct i3c_device *i3cdev, const struct i3c_device_id *id_table); static inline void i3cdev_set_drvdata(struct i3c_device *i3cdev, void *data) { struct device *dev = i3cdev_to_dev(i3cdev); dev_set_drvdata(dev, data); } static inline void *i3cdev_get_drvdata(struct i3c_device *i3cdev) { struct device *dev = i3cdev_to_dev(i3cdev); return dev_get_drvdata(dev); } int i3c_driver_register_with_owner(struct i3c_driver *drv, struct module *owner); void i3c_driver_unregister(struct i3c_driver *drv); #define i3c_driver_register(__drv) \ i3c_driver_register_with_owner(__drv, THIS_MODULE) /** * module_i3c_driver() - Register a module providing an I3C driver * @__drv: the I3C driver to register * * Provide generic init/exit functions that simply register/unregister an I3C * driver. * Should be used by any driver that does not require extra init/cleanup steps. */ #define module_i3c_driver(__drv) \ module_driver(__drv, i3c_driver_register, i3c_driver_unregister) /** * i3c_i2c_driver_register() - Register an i2c and an i3c driver * @i3cdrv: the I3C driver to register * @i2cdrv: the I2C driver to register * * This function registers both @i2cdev and @i3cdev, and fails if one of these * registrations fails. This is mainly useful for devices that support both I2C * and I3C modes. * Note that when CONFIG_I3C is not enabled, this function only registers the * I2C driver. * * Return: 0 if both registrations succeeds, a negative error code otherwise. */ static inline int i3c_i2c_driver_register(struct i3c_driver *i3cdrv, struct i2c_driver *i2cdrv) { int ret; ret = i2c_add_driver(i2cdrv); if (ret || !IS_ENABLED(CONFIG_I3C)) return ret; ret = i3c_driver_register(i3cdrv); if (ret) i2c_del_driver(i2cdrv); return ret; } /** * i3c_i2c_driver_unregister() - Unregister an i2c and an i3c driver * @i3cdrv: the I3C driver to register * @i2cdrv: the I2C driver to register * * This function unregisters both @i3cdrv and @i2cdrv. * Note that when CONFIG_I3C is not enabled, this function only unregisters the * @i2cdrv. */ static inline void i3c_i2c_driver_unregister(struct i3c_driver *i3cdrv, struct i2c_driver *i2cdrv) { if (IS_ENABLED(CONFIG_I3C)) i3c_driver_unregister(i3cdrv); i2c_del_driver(i2cdrv); } /** * module_i3c_i2c_driver() - Register a module providing an I3C and an I2C * driver * @__i3cdrv: the I3C driver to register * @__i2cdrv: the I3C driver to register * * Provide generic init/exit functions that simply register/unregister an I3C * and an I2C driver. * This macro can be used even if CONFIG_I3C is disabled, in this case, only * the I2C driver will be registered. * Should be used by any driver that does not require extra init/cleanup steps. */ #define module_i3c_i2c_driver(__i3cdrv, __i2cdrv) \ module_driver(__i3cdrv, \ i3c_i2c_driver_register, \ i3c_i2c_driver_unregister) int i3c_device_do_priv_xfers(struct i3c_device *dev, struct i3c_priv_xfer *xfers, int nxfers); void i3c_device_get_info(struct i3c_device *dev, struct i3c_device_info *info); struct i3c_ibi_payload { unsigned int len; const void *data; }; /** * struct i3c_ibi_setup - IBI setup object * @max_payload_len: maximum length of the payload associated to an IBI. If one * IBI appears to have a payload that is bigger than this * number, the IBI will be rejected. * @num_slots: number of pre-allocated IBI slots. This should be chosen so that * the system never runs out of IBI slots, otherwise you'll lose * IBIs. * @handler: IBI handler, every time an IBI is received. This handler is called * in a workqueue context. It is allowed to sleep and send new * messages on the bus, though it's recommended to keep the * processing done there as fast as possible to avoid delaying * processing of other queued on the same workqueue. * * Temporary structure used to pass information to i3c_device_request_ibi(). * This object can be allocated on the stack since i3c_device_request_ibi() * copies every bit of information and do not use it after * i3c_device_request_ibi() has returned. */ struct i3c_ibi_setup { unsigned int max_payload_len; unsigned int num_slots; void (*handler)(struct i3c_device *dev, const struct i3c_ibi_payload *payload); }; int i3c_device_request_ibi(struct i3c_device *dev, const struct i3c_ibi_setup *setup); void i3c_device_free_ibi(struct i3c_device *dev); int i3c_device_enable_ibi(struct i3c_device *dev); int i3c_device_disable_ibi(struct i3c_device *dev); #endif /* I3C_DEV_H */ i3c/master.h 0000644 00000061734 14722070374 0006707 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2018 Cadence Design Systems Inc. * * Author: Boris Brezillon <boris.brezillon@bootlin.com> */ #ifndef I3C_MASTER_H #define I3C_MASTER_H #include <asm/bitsperlong.h> #include <linux/bitops.h> #include <linux/i2c.h> #include <linux/i3c/ccc.h> #include <linux/i3c/device.h> #include <linux/rwsem.h> #include <linux/spinlock.h> #include <linux/workqueue.h> #define I3C_HOT_JOIN_ADDR 0x2 #define I3C_BROADCAST_ADDR 0x7e #define I3C_MAX_ADDR GENMASK(6, 0) struct i3c_master_controller; struct i3c_bus; struct i2c_device; struct i3c_device; /** * struct i3c_i2c_dev_desc - Common part of the I3C/I2C device descriptor * @node: node element used to insert the slot into the I2C or I3C device * list * @master: I3C master that instantiated this device. Will be used to do * I2C/I3C transfers * @master_priv: master private data assigned to the device. Can be used to * add master specific information * * This structure is describing common I3C/I2C dev information. */ struct i3c_i2c_dev_desc { struct list_head node; struct i3c_master_controller *master; void *master_priv; }; #define I3C_LVR_I2C_INDEX_MASK GENMASK(7, 5) #define I3C_LVR_I2C_INDEX(x) ((x) << 5) #define I3C_LVR_I2C_FM_MODE BIT(4) #define I2C_MAX_ADDR GENMASK(6, 0) /** * struct i2c_dev_boardinfo - I2C device board information * @node: used to insert the boardinfo object in the I2C boardinfo list * @base: regular I2C board information * @lvr: LVR (Legacy Virtual Register) needed by the I3C core to know about * the I2C device limitations * * This structure is used to attach board-level information to an I2C device. * Each I2C device connected on the I3C bus should have one. */ struct i2c_dev_boardinfo { struct list_head node; struct i2c_board_info base; u8 lvr; }; /** * struct i2c_dev_desc - I2C device descriptor * @common: common part of the I2C device descriptor * @boardinfo: pointer to the boardinfo attached to this I2C device * @dev: I2C device object registered to the I2C framework * @addr: I2C device address * @lvr: LVR (Legacy Virtual Register) needed by the I3C core to know about * the I2C device limitations * * Each I2C device connected on the bus will have an i2c_dev_desc. * This object is created by the core and later attached to the controller * using &struct_i3c_master_controller->ops->attach_i2c_dev(). * * &struct_i2c_dev_desc is the internal representation of an I2C device * connected on an I3C bus. This object is also passed to all * &struct_i3c_master_controller_ops hooks. */ struct i2c_dev_desc { struct i3c_i2c_dev_desc common; const struct i2c_dev_boardinfo *boardinfo; struct i2c_client *dev; u16 addr; u8 lvr; }; /** * struct i3c_ibi_slot - I3C IBI (In-Band Interrupt) slot * @work: work associated to this slot. The IBI handler will be called from * there * @dev: the I3C device that has generated this IBI * @len: length of the payload associated to this IBI * @data: payload buffer * * An IBI slot is an object pre-allocated by the controller and used when an * IBI comes in. * Every time an IBI comes in, the I3C master driver should find a free IBI * slot in its IBI slot pool, retrieve the IBI payload and queue the IBI using * i3c_master_queue_ibi(). * * How IBI slots are allocated is left to the I3C master driver, though, for * simple kmalloc-based allocation, the generic IBI slot pool can be used. */ struct i3c_ibi_slot { struct work_struct work; struct i3c_dev_desc *dev; unsigned int len; void *data; }; /** * struct i3c_device_ibi_info - IBI information attached to a specific device * @all_ibis_handled: used to be informed when no more IBIs are waiting to be * processed. Used by i3c_device_disable_ibi() to wait for * all IBIs to be dequeued * @pending_ibis: count the number of pending IBIs. Each pending IBI has its * work element queued to the controller workqueue * @max_payload_len: maximum payload length for an IBI coming from this device. * this value is specified when calling * i3c_device_request_ibi() and should not change at run * time. All messages IBIs exceeding this limit should be * rejected by the master * @num_slots: number of IBI slots reserved for this device * @enabled: reflect the IBI status * @handler: IBI handler specified at i3c_device_request_ibi() call time. This * handler will be called from the controller workqueue, and as such * is allowed to sleep (though it is recommended to process the IBI * as fast as possible to not stall processing of other IBIs queued * on the same workqueue). * New I3C messages can be sent from the IBI handler * * The &struct_i3c_device_ibi_info object is allocated when * i3c_device_request_ibi() is called and attached to a specific device. This * object is here to manage IBIs coming from a specific I3C device. * * Note that this structure is the generic view of the IBI management * infrastructure. I3C master drivers may have their own internal * representation which they can associate to the device using * controller-private data. */ struct i3c_device_ibi_info { struct completion all_ibis_handled; atomic_t pending_ibis; unsigned int max_payload_len; unsigned int num_slots; unsigned int enabled; void (*handler)(struct i3c_device *dev, const struct i3c_ibi_payload *payload); }; /** * struct i3c_dev_boardinfo - I3C device board information * @node: used to insert the boardinfo object in the I3C boardinfo list * @init_dyn_addr: initial dynamic address requested by the FW. We provide no * guarantee that the device will end up using this address, * but try our best to assign this specific address to the * device * @static_addr: static address the I3C device listen on before it's been * assigned a dynamic address by the master. Will be used during * bus initialization to assign it a specific dynamic address * before starting DAA (Dynamic Address Assignment) * @pid: I3C Provisional ID exposed by the device. This is a unique identifier * that may be used to attach boardinfo to i3c_dev_desc when the device * does not have a static address * @of_node: optional DT node in case the device has been described in the DT * * This structure is used to attach board-level information to an I3C device. * Not all I3C devices connected on the bus will have a boardinfo. It's only * needed if you want to attach extra resources to a device or assign it a * specific dynamic address. */ struct i3c_dev_boardinfo { struct list_head node; u8 init_dyn_addr; u8 static_addr; u64 pid; struct device_node *of_node; }; /** * struct i3c_dev_desc - I3C device descriptor * @common: common part of the I3C device descriptor * @info: I3C device information. Will be automatically filled when you create * your device with i3c_master_add_i3c_dev_locked() * @ibi_lock: lock used to protect the &struct_i3c_device->ibi * @ibi: IBI info attached to a device. Should be NULL until * i3c_device_request_ibi() is called * @dev: pointer to the I3C device object exposed to I3C device drivers. This * should never be accessed from I3C master controller drivers. Only core * code should manipulate it in when updating the dev <-> desc link or * when propagating IBI events to the driver * @boardinfo: pointer to the boardinfo attached to this I3C device * * Internal representation of an I3C device. This object is only used by the * core and passed to I3C master controller drivers when they're requested to * do some operations on the device. * The core maintains the link between the internal I3C dev descriptor and the * object exposed to the I3C device drivers (&struct_i3c_device). */ struct i3c_dev_desc { struct i3c_i2c_dev_desc common; struct i3c_device_info info; struct mutex ibi_lock; struct i3c_device_ibi_info *ibi; struct i3c_device *dev; const struct i3c_dev_boardinfo *boardinfo; }; /** * struct i3c_device - I3C device object * @dev: device object to register the I3C dev to the device model * @desc: pointer to an i3c device descriptor object. This link is updated * every time the I3C device is rediscovered with a different dynamic * address assigned * @bus: I3C bus this device is attached to * * I3C device object exposed to I3C device drivers. The takes care of linking * this object to the relevant &struct_i3c_dev_desc one. * All I3C devs on the I3C bus are represented, including I3C masters. For each * of them, we have an instance of &struct i3c_device. */ struct i3c_device { struct device dev; struct i3c_dev_desc *desc; struct i3c_bus *bus; }; /* * The I3C specification says the maximum number of devices connected on the * bus is 11, but this number depends on external parameters like trace length, * capacitive load per Device, and the types of Devices present on the Bus. * I3C master can also have limitations, so this number is just here as a * reference and should be adjusted on a per-controller/per-board basis. */ #define I3C_BUS_MAX_DEVS 11 #define I3C_BUS_MAX_I3C_SCL_RATE 12900000 #define I3C_BUS_TYP_I3C_SCL_RATE 12500000 #define I3C_BUS_I2C_FM_PLUS_SCL_RATE 1000000 #define I3C_BUS_I2C_FM_SCL_RATE 400000 #define I3C_BUS_TLOW_OD_MIN_NS 200 /** * enum i3c_bus_mode - I3C bus mode * @I3C_BUS_MODE_PURE: only I3C devices are connected to the bus. No limitation * expected * @I3C_BUS_MODE_MIXED_FAST: I2C devices with 50ns spike filter are present on * the bus. The only impact in this mode is that the * high SCL pulse has to stay below 50ns to trick I2C * devices when transmitting I3C frames * @I3C_BUS_MODE_MIXED_LIMITED: I2C devices without 50ns spike filter are * present on the bus. However they allow * compliance up to the maximum SDR SCL clock * frequency. * @I3C_BUS_MODE_MIXED_SLOW: I2C devices without 50ns spike filter are present * on the bus */ enum i3c_bus_mode { I3C_BUS_MODE_PURE, I3C_BUS_MODE_MIXED_FAST, I3C_BUS_MODE_MIXED_LIMITED, I3C_BUS_MODE_MIXED_SLOW, }; /** * enum i3c_addr_slot_status - I3C address slot status * @I3C_ADDR_SLOT_FREE: address is free * @I3C_ADDR_SLOT_RSVD: address is reserved * @I3C_ADDR_SLOT_I2C_DEV: address is assigned to an I2C device * @I3C_ADDR_SLOT_I3C_DEV: address is assigned to an I3C device * @I3C_ADDR_SLOT_STATUS_MASK: address slot mask * * On an I3C bus, addresses are assigned dynamically, and we need to know which * addresses are free to use and which ones are already assigned. * * Addresses marked as reserved are those reserved by the I3C protocol * (broadcast address, ...). */ enum i3c_addr_slot_status { I3C_ADDR_SLOT_FREE, I3C_ADDR_SLOT_RSVD, I3C_ADDR_SLOT_I2C_DEV, I3C_ADDR_SLOT_I3C_DEV, I3C_ADDR_SLOT_STATUS_MASK = 3, }; /** * struct i3c_bus - I3C bus object * @cur_master: I3C master currently driving the bus. Since I3C is multi-master * this can change over the time. Will be used to let a master * know whether it needs to request bus ownership before sending * a frame or not * @id: bus ID. Assigned by the framework when register the bus * @addrslots: a bitmap with 2-bits per-slot to encode the address status and * ease the DAA (Dynamic Address Assignment) procedure (see * &enum i3c_addr_slot_status) * @mode: bus mode (see &enum i3c_bus_mode) * @scl_rate.i3c: maximum rate for the clock signal when doing I3C SDR/priv * transfers * @scl_rate.i2c: maximum rate for the clock signal when doing I2C transfers * @scl_rate: SCL signal rate for I3C and I2C mode * @devs.i3c: contains a list of I3C device descriptors representing I3C * devices connected on the bus and successfully attached to the * I3C master * @devs.i2c: contains a list of I2C device descriptors representing I2C * devices connected on the bus and successfully attached to the * I3C master * @devs: 2 lists containing all I3C/I2C devices connected to the bus * @lock: read/write lock on the bus. This is needed to protect against * operations that have an impact on the whole bus and the devices * connected to it. For example, when asking slaves to drop their * dynamic address (RSTDAA CCC), we need to make sure no one is trying * to send I3C frames to these devices. * Note that this lock does not protect against concurrency between * devices: several drivers can send different I3C/I2C frames through * the same master in parallel. This is the responsibility of the * master to guarantee that frames are actually sent sequentially and * not interlaced * * The I3C bus is represented with its own object and not implicitly described * by the I3C master to cope with the multi-master functionality, where one bus * can be shared amongst several masters, each of them requesting bus ownership * when they need to. */ struct i3c_bus { struct i3c_dev_desc *cur_master; int id; unsigned long addrslots[((I2C_MAX_ADDR + 1) * 2) / BITS_PER_LONG]; enum i3c_bus_mode mode; struct { unsigned long i3c; unsigned long i2c; } scl_rate; struct { struct list_head i3c; struct list_head i2c; } devs; struct rw_semaphore lock; }; /** * struct i3c_master_controller_ops - I3C master methods * @bus_init: hook responsible for the I3C bus initialization. You should at * least call master_set_info() from there and set the bus mode. * You can also put controller specific initialization in there. * This method is mandatory. * @bus_cleanup: cleanup everything done in * &i3c_master_controller_ops->bus_init(). * This method is optional. * @attach_i3c_dev: called every time an I3C device is attached to the bus. It * can be after a DAA or when a device is statically declared * by the FW, in which case it will only have a static address * and the dynamic address will be 0. * When this function is called, device information have not * been retrieved yet. * This is a good place to attach master controller specific * data to I3C devices. * This method is optional. * @reattach_i3c_dev: called every time an I3C device has its addressed * changed. It can be because the device has been powered * down and has lost its address, or it can happen when a * device had a static address and has been assigned a * dynamic address with SETDASA. * This method is optional. * @detach_i3c_dev: called when an I3C device is detached from the bus. Usually * happens when the master device is unregistered. * This method is optional. * @do_daa: do a DAA (Dynamic Address Assignment) procedure. This is procedure * should send an ENTDAA CCC command and then add all devices * discovered sure the DAA using i3c_master_add_i3c_dev_locked(). * Add devices added with i3c_master_add_i3c_dev_locked() will then be * attached or re-attached to the controller. * This method is mandatory. * @supports_ccc_cmd: should return true if the CCC command is supported, false * otherwise. * This method is optional, if not provided the core assumes * all CCC commands are supported. * @send_ccc_cmd: send a CCC command * This method is mandatory. * @priv_xfers: do one or several private I3C SDR transfers * This method is mandatory. * @attach_i2c_dev: called every time an I2C device is attached to the bus. * This is a good place to attach master controller specific * data to I2C devices. * This method is optional. * @detach_i2c_dev: called when an I2C device is detached from the bus. Usually * happens when the master device is unregistered. * This method is optional. * @i2c_xfers: do one or several I2C transfers. Note that, unlike i3c * transfers, the core does not guarantee that buffers attached to * the transfers are DMA-safe. If drivers want to have DMA-safe * buffers, they should use the i2c_get_dma_safe_msg_buf() * and i2c_put_dma_safe_msg_buf() helpers provided by the I2C * framework. * This method is mandatory. * @request_ibi: attach an IBI handler to an I3C device. This implies defining * an IBI handler and the constraints of the IBI (maximum payload * length and number of pre-allocated slots). * Some controllers support less IBI-capable devices than regular * devices, so this method might return -%EBUSY if there's no * more space for an extra IBI registration * This method is optional. * @free_ibi: free an IBI previously requested with ->request_ibi(). The IBI * should have been disabled with ->disable_irq() prior to that * This method is mandatory only if ->request_ibi is not NULL. * @enable_ibi: enable the IBI. Only valid if ->request_ibi() has been called * prior to ->enable_ibi(). The controller should first enable * the IBI on the controller end (for example, unmask the hardware * IRQ) and then send the ENEC CCC command (with the IBI flag set) * to the I3C device. * This method is mandatory only if ->request_ibi is not NULL. * @disable_ibi: disable an IBI. First send the DISEC CCC command with the IBI * flag set and then deactivate the hardware IRQ on the * controller end. * This method is mandatory only if ->request_ibi is not NULL. * @recycle_ibi_slot: recycle an IBI slot. Called every time an IBI has been * processed by its handler. The IBI slot should be put back * in the IBI slot pool so that the controller can re-use it * for a future IBI * This method is mandatory only if ->request_ibi is not * NULL. */ struct i3c_master_controller_ops { int (*bus_init)(struct i3c_master_controller *master); void (*bus_cleanup)(struct i3c_master_controller *master); int (*attach_i3c_dev)(struct i3c_dev_desc *dev); int (*reattach_i3c_dev)(struct i3c_dev_desc *dev, u8 old_dyn_addr); void (*detach_i3c_dev)(struct i3c_dev_desc *dev); int (*do_daa)(struct i3c_master_controller *master); bool (*supports_ccc_cmd)(struct i3c_master_controller *master, const struct i3c_ccc_cmd *cmd); int (*send_ccc_cmd)(struct i3c_master_controller *master, struct i3c_ccc_cmd *cmd); int (*priv_xfers)(struct i3c_dev_desc *dev, struct i3c_priv_xfer *xfers, int nxfers); int (*attach_i2c_dev)(struct i2c_dev_desc *dev); void (*detach_i2c_dev)(struct i2c_dev_desc *dev); int (*i2c_xfers)(struct i2c_dev_desc *dev, const struct i2c_msg *xfers, int nxfers); int (*request_ibi)(struct i3c_dev_desc *dev, const struct i3c_ibi_setup *req); void (*free_ibi)(struct i3c_dev_desc *dev); int (*enable_ibi)(struct i3c_dev_desc *dev); int (*disable_ibi)(struct i3c_dev_desc *dev); void (*recycle_ibi_slot)(struct i3c_dev_desc *dev, struct i3c_ibi_slot *slot); }; /** * struct i3c_master_controller - I3C master controller object * @dev: device to be registered to the device-model * @this: an I3C device object representing this master. This device will be * added to the list of I3C devs available on the bus * @i2c: I2C adapter used for backward compatibility. This adapter is * registered to the I2C subsystem to be as transparent as possible to * existing I2C drivers * @ops: master operations. See &struct i3c_master_controller_ops * @secondary: true if the master is a secondary master * @init_done: true when the bus initialization is done * @boardinfo.i3c: list of I3C boardinfo objects * @boardinfo.i2c: list of I2C boardinfo objects * @boardinfo: board-level information attached to devices connected on the bus * @bus: I3C bus exposed by this master * @wq: workqueue used to execute IBI handlers. Can also be used by master * drivers if they need to postpone operations that need to take place * in a thread context. Typical examples are Hot Join processing which * requires taking the bus lock in maintenance, which in turn, can only * be done from a sleep-able context * * A &struct i3c_master_controller has to be registered to the I3C subsystem * through i3c_master_register(). None of &struct i3c_master_controller fields * should be set manually, just pass appropriate values to * i3c_master_register(). */ struct i3c_master_controller { struct device dev; struct i3c_dev_desc *this; struct i2c_adapter i2c; const struct i3c_master_controller_ops *ops; unsigned int secondary : 1; unsigned int init_done : 1; struct { struct list_head i3c; struct list_head i2c; } boardinfo; struct i3c_bus bus; struct workqueue_struct *wq; }; /** * i3c_bus_for_each_i2cdev() - iterate over all I2C devices present on the bus * @bus: the I3C bus * @dev: an I2C device descriptor pointer updated to point to the current slot * at each iteration of the loop * * Iterate over all I2C devs present on the bus. */ #define i3c_bus_for_each_i2cdev(bus, dev) \ list_for_each_entry(dev, &(bus)->devs.i2c, common.node) /** * i3c_bus_for_each_i3cdev() - iterate over all I3C devices present on the bus * @bus: the I3C bus * @dev: and I3C device descriptor pointer updated to point to the current slot * at each iteration of the loop * * Iterate over all I3C devs present on the bus. */ #define i3c_bus_for_each_i3cdev(bus, dev) \ list_for_each_entry(dev, &(bus)->devs.i3c, common.node) int i3c_master_do_i2c_xfers(struct i3c_master_controller *master, const struct i2c_msg *xfers, int nxfers); int i3c_master_disec_locked(struct i3c_master_controller *master, u8 addr, u8 evts); int i3c_master_enec_locked(struct i3c_master_controller *master, u8 addr, u8 evts); int i3c_master_entdaa_locked(struct i3c_master_controller *master); int i3c_master_defslvs_locked(struct i3c_master_controller *master); int i3c_master_get_free_addr(struct i3c_master_controller *master, u8 start_addr); int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master, u8 addr); int i3c_master_do_daa(struct i3c_master_controller *master); int i3c_master_set_info(struct i3c_master_controller *master, const struct i3c_device_info *info); int i3c_master_register(struct i3c_master_controller *master, struct device *parent, const struct i3c_master_controller_ops *ops, bool secondary); int i3c_master_unregister(struct i3c_master_controller *master); /** * i3c_dev_get_master_data() - get master private data attached to an I3C * device descriptor * @dev: the I3C device descriptor to get private data from * * Return: the private data previously attached with i3c_dev_set_master_data() * or NULL if no data has been attached to the device. */ static inline void *i3c_dev_get_master_data(const struct i3c_dev_desc *dev) { return dev->common.master_priv; } /** * i3c_dev_set_master_data() - attach master private data to an I3C device * descriptor * @dev: the I3C device descriptor to attach private data to * @data: private data * * This functions allows a master controller to attach per-device private data * which can then be retrieved with i3c_dev_get_master_data(). */ static inline void i3c_dev_set_master_data(struct i3c_dev_desc *dev, void *data) { dev->common.master_priv = data; } /** * i2c_dev_get_master_data() - get master private data attached to an I2C * device descriptor * @dev: the I2C device descriptor to get private data from * * Return: the private data previously attached with i2c_dev_set_master_data() * or NULL if no data has been attached to the device. */ static inline void *i2c_dev_get_master_data(const struct i2c_dev_desc *dev) { return dev->common.master_priv; } /** * i2c_dev_set_master_data() - attach master private data to an I2C device * descriptor * @dev: the I2C device descriptor to attach private data to * @data: private data * * This functions allows a master controller to attach per-device private data * which can then be retrieved with i2c_device_get_master_data(). */ static inline void i2c_dev_set_master_data(struct i2c_dev_desc *dev, void *data) { dev->common.master_priv = data; } /** * i3c_dev_get_master() - get master used to communicate with a device * @dev: I3C dev * * Return: the master controller driving @dev */ static inline struct i3c_master_controller * i3c_dev_get_master(struct i3c_dev_desc *dev) { return dev->common.master; } /** * i2c_dev_get_master() - get master used to communicate with a device * @dev: I2C dev * * Return: the master controller driving @dev */ static inline struct i3c_master_controller * i2c_dev_get_master(struct i2c_dev_desc *dev) { return dev->common.master; } /** * i3c_master_get_bus() - get the bus attached to a master * @master: master object * * Return: the I3C bus @master is connected to */ static inline struct i3c_bus * i3c_master_get_bus(struct i3c_master_controller *master) { return &master->bus; } struct i3c_generic_ibi_pool; struct i3c_generic_ibi_pool * i3c_generic_ibi_alloc_pool(struct i3c_dev_desc *dev, const struct i3c_ibi_setup *req); void i3c_generic_ibi_free_pool(struct i3c_generic_ibi_pool *pool); struct i3c_ibi_slot * i3c_generic_ibi_get_free_slot(struct i3c_generic_ibi_pool *pool); void i3c_generic_ibi_recycle_slot(struct i3c_generic_ibi_pool *pool, struct i3c_ibi_slot *slot); void i3c_master_queue_ibi(struct i3c_dev_desc *dev, struct i3c_ibi_slot *slot); struct i3c_ibi_slot *i3c_master_get_free_ibi_slot(struct i3c_dev_desc *dev); #endif /* I3C_MASTER_H */ i3c/ccc.h 0000644 00000023473 14722070374 0006142 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2018 Cadence Design Systems Inc. * * Author: Boris Brezillon <boris.brezillon@bootlin.com> */ #ifndef I3C_CCC_H #define I3C_CCC_H #include <linux/bitops.h> #include <linux/i3c/device.h> /* I3C CCC (Common Command Codes) related definitions */ #define I3C_CCC_DIRECT BIT(7) #define I3C_CCC_ID(id, broadcast) \ ((id) | ((broadcast) ? 0 : I3C_CCC_DIRECT)) /* Commands valid in both broadcast and unicast modes */ #define I3C_CCC_ENEC(broadcast) I3C_CCC_ID(0x0, broadcast) #define I3C_CCC_DISEC(broadcast) I3C_CCC_ID(0x1, broadcast) #define I3C_CCC_ENTAS(as, broadcast) I3C_CCC_ID(0x2 + (as), broadcast) #define I3C_CCC_RSTDAA(broadcast) I3C_CCC_ID(0x6, broadcast) #define I3C_CCC_SETMWL(broadcast) I3C_CCC_ID(0x9, broadcast) #define I3C_CCC_SETMRL(broadcast) I3C_CCC_ID(0xa, broadcast) #define I3C_CCC_SETXTIME(broadcast) ((broadcast) ? 0x28 : 0x98) #define I3C_CCC_VENDOR(id, broadcast) ((id) + ((broadcast) ? 0x61 : 0xe0)) /* Broadcast-only commands */ #define I3C_CCC_ENTDAA I3C_CCC_ID(0x7, true) #define I3C_CCC_DEFSLVS I3C_CCC_ID(0x8, true) #define I3C_CCC_ENTTM I3C_CCC_ID(0xb, true) #define I3C_CCC_ENTHDR(x) I3C_CCC_ID(0x20 + (x), true) /* Unicast-only commands */ #define I3C_CCC_SETDASA I3C_CCC_ID(0x7, false) #define I3C_CCC_SETNEWDA I3C_CCC_ID(0x8, false) #define I3C_CCC_GETMWL I3C_CCC_ID(0xb, false) #define I3C_CCC_GETMRL I3C_CCC_ID(0xc, false) #define I3C_CCC_GETPID I3C_CCC_ID(0xd, false) #define I3C_CCC_GETBCR I3C_CCC_ID(0xe, false) #define I3C_CCC_GETDCR I3C_CCC_ID(0xf, false) #define I3C_CCC_GETSTATUS I3C_CCC_ID(0x10, false) #define I3C_CCC_GETACCMST I3C_CCC_ID(0x11, false) #define I3C_CCC_SETBRGTGT I3C_CCC_ID(0x13, false) #define I3C_CCC_GETMXDS I3C_CCC_ID(0x14, false) #define I3C_CCC_GETHDRCAP I3C_CCC_ID(0x15, false) #define I3C_CCC_GETXTIME I3C_CCC_ID(0x19, false) #define I3C_CCC_EVENT_SIR BIT(0) #define I3C_CCC_EVENT_MR BIT(1) #define I3C_CCC_EVENT_HJ BIT(3) /** * struct i3c_ccc_events - payload passed to ENEC/DISEC CCC * * @events: bitmask of I3C_CCC_EVENT_xxx events. * * Depending on the CCC command, the specific events coming from all devices * (broadcast version) or a specific device (unicast version) will be * enabled (ENEC) or disabled (DISEC). */ struct i3c_ccc_events { u8 events; }; /** * struct i3c_ccc_mwl - payload passed to SETMWL/GETMWL CCC * * @len: maximum write length in bytes * * The maximum write length is only applicable to SDR private messages or * extended Write CCCs (like SETXTIME). */ struct i3c_ccc_mwl { __be16 len; }; /** * struct i3c_ccc_mrl - payload passed to SETMRL/GETMRL CCC * * @len: maximum read length in bytes * @ibi_len: maximum IBI payload length * * The maximum read length is only applicable to SDR private messages or * extended Read CCCs (like GETXTIME). * The IBI length is only valid if the I3C slave is IBI capable * (%I3C_BCR_IBI_REQ_CAP is set). */ struct i3c_ccc_mrl { __be16 read_len; u8 ibi_len; } __packed; /** * struct i3c_ccc_dev_desc - I3C/I2C device descriptor used for DEFSLVS * * @dyn_addr: dynamic address assigned to the I3C slave or 0 if the entry is * describing an I2C slave. * @dcr: DCR value (not applicable to entries describing I2C devices) * @lvr: LVR value (not applicable to entries describing I3C devices) * @bcr: BCR value or 0 if this entry is describing an I2C slave * @static_addr: static address or 0 if the device does not have a static * address * * The DEFSLVS command should be passed an array of i3c_ccc_dev_desc * descriptors (one entry per I3C/I2C dev controlled by the master). */ struct i3c_ccc_dev_desc { u8 dyn_addr; union { u8 dcr; u8 lvr; }; u8 bcr; u8 static_addr; }; /** * struct i3c_ccc_defslvs - payload passed to DEFSLVS CCC * * @count: number of dev descriptors * @master: descriptor describing the current master * @slaves: array of descriptors describing slaves controlled by the * current master * * Information passed to the broadcast DEFSLVS to propagate device * information to all masters currently acting as slaves on the bus. * This is only meaningful if you have more than one master. */ struct i3c_ccc_defslvs { u8 count; struct i3c_ccc_dev_desc master; struct i3c_ccc_dev_desc slaves[0]; } __packed; /** * enum i3c_ccc_test_mode - enum listing all available test modes * * @I3C_CCC_EXIT_TEST_MODE: exit test mode * @I3C_CCC_VENDOR_TEST_MODE: enter vendor test mode */ enum i3c_ccc_test_mode { I3C_CCC_EXIT_TEST_MODE, I3C_CCC_VENDOR_TEST_MODE, }; /** * struct i3c_ccc_enttm - payload passed to ENTTM CCC * * @mode: one of the &enum i3c_ccc_test_mode modes * * Information passed to the ENTTM CCC to instruct an I3C device to enter a * specific test mode. */ struct i3c_ccc_enttm { u8 mode; }; /** * struct i3c_ccc_setda - payload passed to SETNEWDA and SETDASA CCCs * * @addr: dynamic address to assign to an I3C device * * Information passed to the SETNEWDA and SETDASA CCCs to assign/change the * dynamic address of an I3C device. */ struct i3c_ccc_setda { u8 addr; }; /** * struct i3c_ccc_getpid - payload passed to GETPID CCC * * @pid: 48 bits PID in big endian */ struct i3c_ccc_getpid { u8 pid[6]; }; /** * struct i3c_ccc_getbcr - payload passed to GETBCR CCC * * @bcr: BCR (Bus Characteristic Register) value */ struct i3c_ccc_getbcr { u8 bcr; }; /** * struct i3c_ccc_getdcr - payload passed to GETDCR CCC * * @dcr: DCR (Device Characteristic Register) value */ struct i3c_ccc_getdcr { u8 dcr; }; #define I3C_CCC_STATUS_PENDING_INT(status) ((status) & GENMASK(3, 0)) #define I3C_CCC_STATUS_PROTOCOL_ERROR BIT(5) #define I3C_CCC_STATUS_ACTIVITY_MODE(status) \ (((status) & GENMASK(7, 6)) >> 6) /** * struct i3c_ccc_getstatus - payload passed to GETSTATUS CCC * * @status: status of the I3C slave (see I3C_CCC_STATUS_xxx macros for more * information). */ struct i3c_ccc_getstatus { __be16 status; }; /** * struct i3c_ccc_getaccmst - payload passed to GETACCMST CCC * * @newmaster: address of the master taking bus ownership */ struct i3c_ccc_getaccmst { u8 newmaster; }; /** * struct i3c_ccc_bridged_slave_desc - bridged slave descriptor * * @addr: dynamic address of the bridged device * @id: ID of the slave device behind the bridge */ struct i3c_ccc_bridged_slave_desc { u8 addr; __be16 id; } __packed; /** * struct i3c_ccc_setbrgtgt - payload passed to SETBRGTGT CCC * * @count: number of bridged slaves * @bslaves: bridged slave descriptors */ struct i3c_ccc_setbrgtgt { u8 count; struct i3c_ccc_bridged_slave_desc bslaves[0]; } __packed; /** * enum i3c_sdr_max_data_rate - max data rate values for private SDR transfers */ enum i3c_sdr_max_data_rate { I3C_SDR0_FSCL_MAX, I3C_SDR1_FSCL_8MHZ, I3C_SDR2_FSCL_6MHZ, I3C_SDR3_FSCL_4MHZ, I3C_SDR4_FSCL_2MHZ, }; /** * enum i3c_tsco - clock to data turn-around */ enum i3c_tsco { I3C_TSCO_8NS, I3C_TSCO_9NS, I3C_TSCO_10NS, I3C_TSCO_11NS, I3C_TSCO_12NS, }; #define I3C_CCC_MAX_SDR_FSCL_MASK GENMASK(2, 0) #define I3C_CCC_MAX_SDR_FSCL(x) ((x) & I3C_CCC_MAX_SDR_FSCL_MASK) /** * struct i3c_ccc_getmxds - payload passed to GETMXDS CCC * * @maxwr: write limitations * @maxrd: read limitations * @maxrdturn: maximum read turn-around expressed micro-seconds and * little-endian formatted */ struct i3c_ccc_getmxds { u8 maxwr; u8 maxrd; u8 maxrdturn[3]; } __packed; #define I3C_CCC_HDR_MODE(mode) BIT(mode) /** * struct i3c_ccc_gethdrcap - payload passed to GETHDRCAP CCC * * @modes: bitmap of supported HDR modes */ struct i3c_ccc_gethdrcap { u8 modes; } __packed; /** * enum i3c_ccc_setxtime_subcmd - SETXTIME sub-commands */ enum i3c_ccc_setxtime_subcmd { I3C_CCC_SETXTIME_ST = 0x7f, I3C_CCC_SETXTIME_DT = 0xbf, I3C_CCC_SETXTIME_ENTER_ASYNC_MODE0 = 0xdf, I3C_CCC_SETXTIME_ENTER_ASYNC_MODE1 = 0xef, I3C_CCC_SETXTIME_ENTER_ASYNC_MODE2 = 0xf7, I3C_CCC_SETXTIME_ENTER_ASYNC_MODE3 = 0xfb, I3C_CCC_SETXTIME_ASYNC_TRIGGER = 0xfd, I3C_CCC_SETXTIME_TPH = 0x3f, I3C_CCC_SETXTIME_TU = 0x9f, I3C_CCC_SETXTIME_ODR = 0x8f, }; /** * struct i3c_ccc_setxtime - payload passed to SETXTIME CCC * * @subcmd: one of the sub-commands ddefined in &enum i3c_ccc_setxtime_subcmd * @data: sub-command payload. Amount of data is determined by * &i3c_ccc_setxtime->subcmd */ struct i3c_ccc_setxtime { u8 subcmd; u8 data[0]; } __packed; #define I3C_CCC_GETXTIME_SYNC_MODE BIT(0) #define I3C_CCC_GETXTIME_ASYNC_MODE(x) BIT((x) + 1) #define I3C_CCC_GETXTIME_OVERFLOW BIT(7) /** * struct i3c_ccc_getxtime - payload retrieved from GETXTIME CCC * * @supported_modes: bitmap describing supported XTIME modes * @state: current status (enabled mode and overflow status) * @frequency: slave's internal oscillator frequency in 500KHz steps * @inaccuracy: slave's internal oscillator inaccuracy in 0.1% steps */ struct i3c_ccc_getxtime { u8 supported_modes; u8 state; u8 frequency; u8 inaccuracy; } __packed; /** * struct i3c_ccc_cmd_payload - CCC payload * * @len: payload length * @data: payload data. This buffer must be DMA-able */ struct i3c_ccc_cmd_payload { u16 len; void *data; }; /** * struct i3c_ccc_cmd_dest - CCC command destination * * @addr: can be an I3C device address or the broadcast address if this is a * broadcast CCC * @payload: payload to be sent to this device or broadcasted */ struct i3c_ccc_cmd_dest { u8 addr; struct i3c_ccc_cmd_payload payload; }; /** * struct i3c_ccc_cmd - CCC command * * @rnw: true if the CCC should retrieve data from the device. Only valid for * unicast commands * @id: CCC command id * @ndests: number of destinations. Should always be one for broadcast commands * @dests: array of destinations and associated payload for this CCC. Most of * the time, only one destination is provided * @err: I3C error code */ struct i3c_ccc_cmd { u8 rnw; u8 id; unsigned int ndests; struct i3c_ccc_cmd_dest *dests; enum i3c_error_code err; }; #endif /* I3C_CCC_H */ nfs_xdr.h 0000644 00000120261 14722070374 0006370 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_NFS_XDR_H #define _LINUX_NFS_XDR_H #include <linux/nfsacl.h> #include <linux/sunrpc/gss_api.h> /* * To change the maximum rsize and wsize supported by the NFS client, adjust * NFS_MAX_FILE_IO_SIZE. 64KB is a typical maximum, but some servers can * support a megabyte or more. The default is left at 4096 bytes, which is * reasonable for NFS over UDP. */ #define NFS_MAX_FILE_IO_SIZE (1048576U) #define NFS_DEF_FILE_IO_SIZE (4096U) #define NFS_MIN_FILE_IO_SIZE (1024U) struct nfs4_string { unsigned int len; char *data; }; struct nfs_fsid { uint64_t major; uint64_t minor; }; /* * Helper for checking equality between 2 fsids. */ static inline int nfs_fsid_equal(const struct nfs_fsid *a, const struct nfs_fsid *b) { return a->major == b->major && a->minor == b->minor; } struct nfs4_threshold { __u32 bm; __u32 l_type; __u64 rd_sz; __u64 wr_sz; __u64 rd_io_sz; __u64 wr_io_sz; }; struct nfs_fattr { unsigned int valid; /* which fields are valid */ umode_t mode; __u32 nlink; kuid_t uid; kgid_t gid; dev_t rdev; __u64 size; union { struct { __u32 blocksize; __u32 blocks; } nfs2; struct { __u64 used; } nfs3; } du; struct nfs_fsid fsid; __u64 fileid; __u64 mounted_on_fileid; struct timespec atime; struct timespec mtime; struct timespec ctime; __u64 change_attr; /* NFSv4 change attribute */ __u64 pre_change_attr;/* pre-op NFSv4 change attribute */ __u64 pre_size; /* pre_op_attr.size */ struct timespec pre_mtime; /* pre_op_attr.mtime */ struct timespec pre_ctime; /* pre_op_attr.ctime */ unsigned long time_start; unsigned long gencount; struct nfs4_string *owner_name; struct nfs4_string *group_name; struct nfs4_threshold *mdsthreshold; /* pNFS threshold hints */ }; #define NFS_ATTR_FATTR_TYPE (1U << 0) #define NFS_ATTR_FATTR_MODE (1U << 1) #define NFS_ATTR_FATTR_NLINK (1U << 2) #define NFS_ATTR_FATTR_OWNER (1U << 3) #define NFS_ATTR_FATTR_GROUP (1U << 4) #define NFS_ATTR_FATTR_RDEV (1U << 5) #define NFS_ATTR_FATTR_SIZE (1U << 6) #define NFS_ATTR_FATTR_PRESIZE (1U << 7) #define NFS_ATTR_FATTR_BLOCKS_USED (1U << 8) #define NFS_ATTR_FATTR_SPACE_USED (1U << 9) #define NFS_ATTR_FATTR_FSID (1U << 10) #define NFS_ATTR_FATTR_FILEID (1U << 11) #define NFS_ATTR_FATTR_ATIME (1U << 12) #define NFS_ATTR_FATTR_MTIME (1U << 13) #define NFS_ATTR_FATTR_CTIME (1U << 14) #define NFS_ATTR_FATTR_PREMTIME (1U << 15) #define NFS_ATTR_FATTR_PRECTIME (1U << 16) #define NFS_ATTR_FATTR_CHANGE (1U << 17) #define NFS_ATTR_FATTR_PRECHANGE (1U << 18) #define NFS_ATTR_FATTR_V4_LOCATIONS (1U << 19) #define NFS_ATTR_FATTR_V4_REFERRAL (1U << 20) #define NFS_ATTR_FATTR_MOUNTPOINT (1U << 21) #define NFS_ATTR_FATTR_MOUNTED_ON_FILEID (1U << 22) #define NFS_ATTR_FATTR_OWNER_NAME (1U << 23) #define NFS_ATTR_FATTR_GROUP_NAME (1U << 24) #define NFS_ATTR_FATTR_V4_SECURITY_LABEL (1U << 25) #define NFS_ATTR_FATTR (NFS_ATTR_FATTR_TYPE \ | NFS_ATTR_FATTR_MODE \ | NFS_ATTR_FATTR_NLINK \ | NFS_ATTR_FATTR_OWNER \ | NFS_ATTR_FATTR_GROUP \ | NFS_ATTR_FATTR_RDEV \ | NFS_ATTR_FATTR_SIZE \ | NFS_ATTR_FATTR_FSID \ | NFS_ATTR_FATTR_FILEID \ | NFS_ATTR_FATTR_ATIME \ | NFS_ATTR_FATTR_MTIME \ | NFS_ATTR_FATTR_CTIME \ | NFS_ATTR_FATTR_CHANGE) #define NFS_ATTR_FATTR_V2 (NFS_ATTR_FATTR \ | NFS_ATTR_FATTR_BLOCKS_USED) #define NFS_ATTR_FATTR_V3 (NFS_ATTR_FATTR \ | NFS_ATTR_FATTR_SPACE_USED) #define NFS_ATTR_FATTR_V4 (NFS_ATTR_FATTR \ | NFS_ATTR_FATTR_SPACE_USED \ | NFS_ATTR_FATTR_V4_SECURITY_LABEL) /* * Maximal number of supported layout drivers. */ #define NFS_MAX_LAYOUT_TYPES 8 /* * Info on the file system */ struct nfs_fsinfo { struct nfs_fattr *fattr; /* Post-op attributes */ __u32 rtmax; /* max. read transfer size */ __u32 rtpref; /* pref. read transfer size */ __u32 rtmult; /* reads should be multiple of this */ __u32 wtmax; /* max. write transfer size */ __u32 wtpref; /* pref. write transfer size */ __u32 wtmult; /* writes should be multiple of this */ __u32 dtpref; /* pref. readdir transfer size */ __u64 maxfilesize; struct timespec time_delta; /* server time granularity */ __u32 lease_time; /* in seconds */ __u32 nlayouttypes; /* number of layouttypes */ __u32 layouttype[NFS_MAX_LAYOUT_TYPES]; /* supported pnfs layout driver */ __u32 blksize; /* preferred pnfs io block size */ __u32 clone_blksize; /* granularity of a CLONE operation */ }; struct nfs_fsstat { struct nfs_fattr *fattr; /* Post-op attributes */ __u64 tbytes; /* total size in bytes */ __u64 fbytes; /* # of free bytes */ __u64 abytes; /* # of bytes available to user */ __u64 tfiles; /* # of files */ __u64 ffiles; /* # of free files */ __u64 afiles; /* # of files available to user */ }; struct nfs2_fsstat { __u32 tsize; /* Server transfer size */ __u32 bsize; /* Filesystem block size */ __u32 blocks; /* No. of "bsize" blocks on filesystem */ __u32 bfree; /* No. of free "bsize" blocks */ __u32 bavail; /* No. of available "bsize" blocks */ }; struct nfs_pathconf { struct nfs_fattr *fattr; /* Post-op attributes */ __u32 max_link; /* max # of hard links */ __u32 max_namelen; /* max name length */ }; struct nfs4_change_info { u32 atomic; u64 before; u64 after; }; struct nfs_seqid; /* nfs41 sessions channel attributes */ struct nfs4_channel_attrs { u32 max_rqst_sz; u32 max_resp_sz; u32 max_resp_sz_cached; u32 max_ops; u32 max_reqs; }; struct nfs4_slot; struct nfs4_sequence_args { struct nfs4_slot *sa_slot; u8 sa_cache_this : 1, sa_privileged : 1; }; struct nfs4_sequence_res { struct nfs4_slot *sr_slot; /* slot used to send request */ unsigned long sr_timestamp; int sr_status; /* sequence operation status */ u32 sr_status_flags; u32 sr_highest_slotid; u32 sr_target_highest_slotid; }; struct nfs4_get_lease_time_args { struct nfs4_sequence_args la_seq_args; }; struct nfs4_get_lease_time_res { struct nfs4_sequence_res lr_seq_res; struct nfs_fsinfo *lr_fsinfo; }; struct xdr_stream; struct nfs4_xdr_opaque_data; struct nfs4_xdr_opaque_ops { void (*encode)(struct xdr_stream *, const void *args, const struct nfs4_xdr_opaque_data *); void (*free)(struct nfs4_xdr_opaque_data *); }; struct nfs4_xdr_opaque_data { const struct nfs4_xdr_opaque_ops *ops; void *data; }; #define PNFS_LAYOUT_MAXSIZE 4096 struct nfs4_layoutdriver_data { struct page **pages; __u32 pglen; __u32 len; }; struct pnfs_layout_range { u32 iomode; u64 offset; u64 length; }; struct nfs4_layoutget_args { struct nfs4_sequence_args seq_args; __u32 type; struct pnfs_layout_range range; __u64 minlength; __u32 maxcount; struct inode *inode; struct nfs_open_context *ctx; nfs4_stateid stateid; struct nfs4_layoutdriver_data layout; }; struct nfs4_layoutget_res { struct nfs4_sequence_res seq_res; int status; __u32 return_on_close; struct pnfs_layout_range range; __u32 type; nfs4_stateid stateid; struct nfs4_layoutdriver_data *layoutp; }; struct nfs4_layoutget { struct nfs4_layoutget_args args; struct nfs4_layoutget_res res; const struct cred *cred; gfp_t gfp_flags; }; struct nfs4_getdeviceinfo_args { struct nfs4_sequence_args seq_args; struct pnfs_device *pdev; __u32 notify_types; }; struct nfs4_getdeviceinfo_res { struct nfs4_sequence_res seq_res; struct pnfs_device *pdev; __u32 notification; }; struct nfs4_layoutcommit_args { struct nfs4_sequence_args seq_args; nfs4_stateid stateid; __u64 lastbytewritten; struct inode *inode; const u32 *bitmask; size_t layoutupdate_len; struct page *layoutupdate_page; struct page **layoutupdate_pages; __be32 *start_p; }; struct nfs4_layoutcommit_res { struct nfs4_sequence_res seq_res; struct nfs_fattr *fattr; const struct nfs_server *server; int status; }; struct nfs4_layoutcommit_data { struct rpc_task task; struct nfs_fattr fattr; struct list_head lseg_list; const struct cred *cred; struct inode *inode; struct nfs4_layoutcommit_args args; struct nfs4_layoutcommit_res res; }; struct nfs4_layoutreturn_args { struct nfs4_sequence_args seq_args; struct pnfs_layout_hdr *layout; struct inode *inode; struct pnfs_layout_range range; nfs4_stateid stateid; __u32 layout_type; struct nfs4_xdr_opaque_data *ld_private; }; struct nfs4_layoutreturn_res { struct nfs4_sequence_res seq_res; u32 lrs_present; nfs4_stateid stateid; }; struct nfs4_layoutreturn { struct nfs4_layoutreturn_args args; struct nfs4_layoutreturn_res res; const struct cred *cred; struct nfs_client *clp; struct inode *inode; int rpc_status; struct nfs4_xdr_opaque_data ld_private; }; #define PNFS_LAYOUTSTATS_MAXSIZE 256 struct nfs42_layoutstat_args; struct nfs42_layoutstat_devinfo; typedef void (*layoutstats_encode_t)(struct xdr_stream *, struct nfs42_layoutstat_args *, struct nfs42_layoutstat_devinfo *); /* Per file per deviceid layoutstats */ struct nfs42_layoutstat_devinfo { struct nfs4_deviceid dev_id; __u64 offset; __u64 length; __u64 read_count; __u64 read_bytes; __u64 write_count; __u64 write_bytes; __u32 layout_type; struct nfs4_xdr_opaque_data ld_private; }; struct nfs42_layoutstat_args { struct nfs4_sequence_args seq_args; struct nfs_fh *fh; struct inode *inode; nfs4_stateid stateid; int num_dev; struct nfs42_layoutstat_devinfo *devinfo; }; struct nfs42_layoutstat_res { struct nfs4_sequence_res seq_res; int num_dev; int rpc_status; }; struct nfs42_layoutstat_data { struct inode *inode; struct nfs42_layoutstat_args args; struct nfs42_layoutstat_res res; }; struct nfs42_device_error { struct nfs4_deviceid dev_id; int status; enum nfs_opnum4 opnum; }; struct nfs42_layout_error { __u64 offset; __u64 length; nfs4_stateid stateid; struct nfs42_device_error errors[1]; }; #define NFS42_LAYOUTERROR_MAX 5 struct nfs42_layouterror_args { struct nfs4_sequence_args seq_args; struct inode *inode; unsigned int num_errors; struct nfs42_layout_error errors[NFS42_LAYOUTERROR_MAX]; }; struct nfs42_layouterror_res { struct nfs4_sequence_res seq_res; unsigned int num_errors; int rpc_status; }; struct nfs42_layouterror_data { struct nfs42_layouterror_args args; struct nfs42_layouterror_res res; struct inode *inode; struct pnfs_layout_segment *lseg; }; struct nfs42_clone_args { struct nfs4_sequence_args seq_args; struct nfs_fh *src_fh; struct nfs_fh *dst_fh; nfs4_stateid src_stateid; nfs4_stateid dst_stateid; __u64 src_offset; __u64 dst_offset; __u64 count; const u32 *dst_bitmask; }; struct nfs42_clone_res { struct nfs4_sequence_res seq_res; unsigned int rpc_status; struct nfs_fattr *dst_fattr; const struct nfs_server *server; }; struct stateowner_id { __u64 create_time; __u32 uniquifier; }; /* * Arguments to the open call. */ struct nfs_openargs { struct nfs4_sequence_args seq_args; const struct nfs_fh * fh; struct nfs_seqid * seqid; int open_flags; fmode_t fmode; u32 share_access; u32 access; __u64 clientid; struct stateowner_id id; union { struct { struct iattr * attrs; /* UNCHECKED, GUARDED, EXCLUSIVE4_1 */ nfs4_verifier verifier; /* EXCLUSIVE */ }; nfs4_stateid delegation; /* CLAIM_DELEGATE_CUR */ fmode_t delegation_type; /* CLAIM_PREVIOUS */ } u; const struct qstr * name; const struct nfs_server *server; /* Needed for ID mapping */ const u32 * bitmask; const u32 * open_bitmap; enum open_claim_type4 claim; enum createmode4 createmode; const struct nfs4_label *label; umode_t umask; struct nfs4_layoutget_args *lg_args; }; struct nfs_openres { struct nfs4_sequence_res seq_res; nfs4_stateid stateid; struct nfs_fh fh; struct nfs4_change_info cinfo; __u32 rflags; struct nfs_fattr * f_attr; struct nfs4_label *f_label; struct nfs_seqid * seqid; const struct nfs_server *server; fmode_t delegation_type; nfs4_stateid delegation; unsigned long pagemod_limit; __u32 do_recall; __u32 attrset[NFS4_BITMAP_SIZE]; struct nfs4_string *owner; struct nfs4_string *group_owner; __u32 access_request; __u32 access_supported; __u32 access_result; struct nfs4_layoutget_res *lg_res; }; /* * Arguments to the open_confirm call. */ struct nfs_open_confirmargs { struct nfs4_sequence_args seq_args; const struct nfs_fh * fh; nfs4_stateid * stateid; struct nfs_seqid * seqid; }; struct nfs_open_confirmres { struct nfs4_sequence_res seq_res; nfs4_stateid stateid; struct nfs_seqid * seqid; }; /* * Arguments to the close call. */ struct nfs_closeargs { struct nfs4_sequence_args seq_args; struct nfs_fh * fh; nfs4_stateid stateid; struct nfs_seqid * seqid; fmode_t fmode; u32 share_access; const u32 * bitmask; struct nfs4_layoutreturn_args *lr_args; }; struct nfs_closeres { struct nfs4_sequence_res seq_res; nfs4_stateid stateid; struct nfs_fattr * fattr; struct nfs_seqid * seqid; const struct nfs_server *server; struct nfs4_layoutreturn_res *lr_res; int lr_ret; }; /* * * Arguments to the lock,lockt, and locku call. * */ struct nfs_lowner { __u64 clientid; __u64 id; dev_t s_dev; }; struct nfs_lock_args { struct nfs4_sequence_args seq_args; struct nfs_fh * fh; struct file_lock * fl; struct nfs_seqid * lock_seqid; nfs4_stateid lock_stateid; struct nfs_seqid * open_seqid; nfs4_stateid open_stateid; struct nfs_lowner lock_owner; unsigned char block : 1; unsigned char reclaim : 1; unsigned char new_lock : 1; unsigned char new_lock_owner : 1; }; struct nfs_lock_res { struct nfs4_sequence_res seq_res; nfs4_stateid stateid; struct nfs_seqid * lock_seqid; struct nfs_seqid * open_seqid; }; struct nfs_locku_args { struct nfs4_sequence_args seq_args; struct nfs_fh * fh; struct file_lock * fl; struct nfs_seqid * seqid; nfs4_stateid stateid; }; struct nfs_locku_res { struct nfs4_sequence_res seq_res; nfs4_stateid stateid; struct nfs_seqid * seqid; }; struct nfs_lockt_args { struct nfs4_sequence_args seq_args; struct nfs_fh * fh; struct file_lock * fl; struct nfs_lowner lock_owner; }; struct nfs_lockt_res { struct nfs4_sequence_res seq_res; struct file_lock * denied; /* LOCK, LOCKT failed */ }; struct nfs_release_lockowner_args { struct nfs4_sequence_args seq_args; struct nfs_lowner lock_owner; }; struct nfs_release_lockowner_res { struct nfs4_sequence_res seq_res; }; struct nfs4_delegreturnargs { struct nfs4_sequence_args seq_args; const struct nfs_fh *fhandle; const nfs4_stateid *stateid; const u32 * bitmask; struct nfs4_layoutreturn_args *lr_args; }; struct nfs4_delegreturnres { struct nfs4_sequence_res seq_res; struct nfs_fattr * fattr; struct nfs_server *server; struct nfs4_layoutreturn_res *lr_res; int lr_ret; }; /* * Arguments to the write call. */ struct nfs_write_verifier { char data[8]; }; struct nfs_writeverf { struct nfs_write_verifier verifier; enum nfs3_stable_how committed; }; /* * Arguments shared by the read and write call. */ struct nfs_pgio_args { struct nfs4_sequence_args seq_args; struct nfs_fh * fh; struct nfs_open_context *context; struct nfs_lock_context *lock_context; nfs4_stateid stateid; __u64 offset; __u32 count; unsigned int pgbase; struct page ** pages; union { unsigned int replen; /* used by read */ struct { const u32 * bitmask; /* used by write */ enum nfs3_stable_how stable; /* used by write */ }; }; }; struct nfs_pgio_res { struct nfs4_sequence_res seq_res; struct nfs_fattr * fattr; __u32 count; __u32 op_status; union { struct { unsigned int replen; /* used by read */ int eof; /* used by read */ }; struct { struct nfs_writeverf * verf; /* used by write */ const struct nfs_server *server; /* used by write */ }; }; }; /* * Arguments to the commit call. */ struct nfs_commitargs { struct nfs4_sequence_args seq_args; struct nfs_fh *fh; __u64 offset; __u32 count; const u32 *bitmask; }; struct nfs_commitres { struct nfs4_sequence_res seq_res; __u32 op_status; struct nfs_fattr *fattr; struct nfs_writeverf *verf; const struct nfs_server *server; }; /* * Common arguments to the unlink call */ struct nfs_removeargs { struct nfs4_sequence_args seq_args; const struct nfs_fh *fh; struct qstr name; }; struct nfs_removeres { struct nfs4_sequence_res seq_res; struct nfs_server *server; struct nfs_fattr *dir_attr; struct nfs4_change_info cinfo; }; /* * Common arguments to the rename call */ struct nfs_renameargs { struct nfs4_sequence_args seq_args; const struct nfs_fh *old_dir; const struct nfs_fh *new_dir; const struct qstr *old_name; const struct qstr *new_name; }; struct nfs_renameres { struct nfs4_sequence_res seq_res; struct nfs_server *server; struct nfs4_change_info old_cinfo; struct nfs_fattr *old_fattr; struct nfs4_change_info new_cinfo; struct nfs_fattr *new_fattr; }; /* parsed sec= options */ #define NFS_AUTH_INFO_MAX_FLAVORS 12 /* see fs/nfs/super.c */ struct nfs_auth_info { unsigned int flavor_len; rpc_authflavor_t flavors[NFS_AUTH_INFO_MAX_FLAVORS]; }; /* * Argument struct for decode_entry function */ struct nfs_entry { __u64 ino; __u64 cookie, prev_cookie; const char * name; unsigned int len; int eof; struct nfs_fh * fh; struct nfs_fattr * fattr; struct nfs4_label *label; unsigned char d_type; struct nfs_server * server; }; /* * The following types are for NFSv2 only. */ struct nfs_sattrargs { struct nfs_fh * fh; struct iattr * sattr; }; struct nfs_diropargs { struct nfs_fh * fh; const char * name; unsigned int len; }; struct nfs_createargs { struct nfs_fh * fh; const char * name; unsigned int len; struct iattr * sattr; }; struct nfs_setattrargs { struct nfs4_sequence_args seq_args; struct nfs_fh * fh; nfs4_stateid stateid; struct iattr * iap; const struct nfs_server * server; /* Needed for name mapping */ const u32 * bitmask; const struct nfs4_label *label; }; struct nfs_setaclargs { struct nfs4_sequence_args seq_args; struct nfs_fh * fh; size_t acl_len; struct page ** acl_pages; }; struct nfs_setaclres { struct nfs4_sequence_res seq_res; }; struct nfs_getaclargs { struct nfs4_sequence_args seq_args; struct nfs_fh * fh; size_t acl_len; struct page ** acl_pages; }; /* getxattr ACL interface flags */ #define NFS4_ACL_TRUNC 0x0001 /* ACL was truncated */ struct nfs_getaclres { struct nfs4_sequence_res seq_res; size_t acl_len; size_t acl_data_offset; int acl_flags; struct page * acl_scratch; }; struct nfs_setattrres { struct nfs4_sequence_res seq_res; struct nfs_fattr * fattr; struct nfs4_label *label; const struct nfs_server * server; }; struct nfs_linkargs { struct nfs_fh * fromfh; struct nfs_fh * tofh; const char * toname; unsigned int tolen; }; struct nfs_symlinkargs { struct nfs_fh * fromfh; const char * fromname; unsigned int fromlen; struct page ** pages; unsigned int pathlen; struct iattr * sattr; }; struct nfs_readdirargs { struct nfs_fh * fh; __u32 cookie; unsigned int count; struct page ** pages; }; struct nfs3_getaclargs { struct nfs_fh * fh; int mask; struct page ** pages; }; struct nfs3_setaclargs { struct inode * inode; int mask; struct posix_acl * acl_access; struct posix_acl * acl_default; size_t len; unsigned int npages; struct page ** pages; }; struct nfs_diropok { struct nfs_fh * fh; struct nfs_fattr * fattr; }; struct nfs_readlinkargs { struct nfs_fh * fh; unsigned int pgbase; unsigned int pglen; struct page ** pages; }; struct nfs3_sattrargs { struct nfs_fh * fh; struct iattr * sattr; unsigned int guard; struct timespec guardtime; }; struct nfs3_diropargs { struct nfs_fh * fh; const char * name; unsigned int len; }; struct nfs3_accessargs { struct nfs_fh * fh; __u32 access; }; struct nfs3_createargs { struct nfs_fh * fh; const char * name; unsigned int len; struct iattr * sattr; enum nfs3_createmode createmode; __be32 verifier[2]; }; struct nfs3_mkdirargs { struct nfs_fh * fh; const char * name; unsigned int len; struct iattr * sattr; }; struct nfs3_symlinkargs { struct nfs_fh * fromfh; const char * fromname; unsigned int fromlen; struct page ** pages; unsigned int pathlen; struct iattr * sattr; }; struct nfs3_mknodargs { struct nfs_fh * fh; const char * name; unsigned int len; enum nfs3_ftype type; struct iattr * sattr; dev_t rdev; }; struct nfs3_linkargs { struct nfs_fh * fromfh; struct nfs_fh * tofh; const char * toname; unsigned int tolen; }; struct nfs3_readdirargs { struct nfs_fh * fh; __u64 cookie; __be32 verf[2]; bool plus; unsigned int count; struct page ** pages; }; struct nfs3_diropres { struct nfs_fattr * dir_attr; struct nfs_fh * fh; struct nfs_fattr * fattr; }; struct nfs3_accessres { struct nfs_fattr * fattr; __u32 access; }; struct nfs3_readlinkargs { struct nfs_fh * fh; unsigned int pgbase; unsigned int pglen; struct page ** pages; }; struct nfs3_linkres { struct nfs_fattr * dir_attr; struct nfs_fattr * fattr; }; struct nfs3_readdirres { struct nfs_fattr * dir_attr; __be32 * verf; bool plus; }; struct nfs3_getaclres { struct nfs_fattr * fattr; int mask; unsigned int acl_access_count; unsigned int acl_default_count; struct posix_acl * acl_access; struct posix_acl * acl_default; }; #if IS_ENABLED(CONFIG_NFS_V4) typedef u64 clientid4; struct nfs4_accessargs { struct nfs4_sequence_args seq_args; const struct nfs_fh * fh; const u32 * bitmask; u32 access; }; struct nfs4_accessres { struct nfs4_sequence_res seq_res; const struct nfs_server * server; struct nfs_fattr * fattr; u32 supported; u32 access; }; struct nfs4_create_arg { struct nfs4_sequence_args seq_args; u32 ftype; union { struct { struct page ** pages; unsigned int len; } symlink; /* NF4LNK */ struct { u32 specdata1; u32 specdata2; } device; /* NF4BLK, NF4CHR */ } u; const struct qstr * name; const struct nfs_server * server; const struct iattr * attrs; const struct nfs_fh * dir_fh; const u32 * bitmask; const struct nfs4_label *label; umode_t umask; }; struct nfs4_create_res { struct nfs4_sequence_res seq_res; const struct nfs_server * server; struct nfs_fh * fh; struct nfs_fattr * fattr; struct nfs4_label *label; struct nfs4_change_info dir_cinfo; }; struct nfs4_fsinfo_arg { struct nfs4_sequence_args seq_args; const struct nfs_fh * fh; const u32 * bitmask; }; struct nfs4_fsinfo_res { struct nfs4_sequence_res seq_res; struct nfs_fsinfo *fsinfo; }; struct nfs4_getattr_arg { struct nfs4_sequence_args seq_args; const struct nfs_fh * fh; const u32 * bitmask; }; struct nfs4_getattr_res { struct nfs4_sequence_res seq_res; const struct nfs_server * server; struct nfs_fattr * fattr; struct nfs4_label *label; }; struct nfs4_link_arg { struct nfs4_sequence_args seq_args; const struct nfs_fh * fh; const struct nfs_fh * dir_fh; const struct qstr * name; const u32 * bitmask; }; struct nfs4_link_res { struct nfs4_sequence_res seq_res; const struct nfs_server * server; struct nfs_fattr * fattr; struct nfs4_label *label; struct nfs4_change_info cinfo; struct nfs_fattr * dir_attr; }; struct nfs4_lookup_arg { struct nfs4_sequence_args seq_args; const struct nfs_fh * dir_fh; const struct qstr * name; const u32 * bitmask; }; struct nfs4_lookup_res { struct nfs4_sequence_res seq_res; const struct nfs_server * server; struct nfs_fattr * fattr; struct nfs_fh * fh; struct nfs4_label *label; }; struct nfs4_lookupp_arg { struct nfs4_sequence_args seq_args; const struct nfs_fh *fh; const u32 *bitmask; }; struct nfs4_lookupp_res { struct nfs4_sequence_res seq_res; const struct nfs_server *server; struct nfs_fattr *fattr; struct nfs_fh *fh; struct nfs4_label *label; }; struct nfs4_lookup_root_arg { struct nfs4_sequence_args seq_args; const u32 * bitmask; }; struct nfs4_pathconf_arg { struct nfs4_sequence_args seq_args; const struct nfs_fh * fh; const u32 * bitmask; }; struct nfs4_pathconf_res { struct nfs4_sequence_res seq_res; struct nfs_pathconf *pathconf; }; struct nfs4_readdir_arg { struct nfs4_sequence_args seq_args; const struct nfs_fh * fh; u64 cookie; nfs4_verifier verifier; u32 count; struct page ** pages; /* zero-copy data */ unsigned int pgbase; /* zero-copy data */ const u32 * bitmask; bool plus; }; struct nfs4_readdir_res { struct nfs4_sequence_res seq_res; nfs4_verifier verifier; unsigned int pgbase; }; struct nfs4_readlink { struct nfs4_sequence_args seq_args; const struct nfs_fh * fh; unsigned int pgbase; unsigned int pglen; /* zero-copy data */ struct page ** pages; /* zero-copy data */ }; struct nfs4_readlink_res { struct nfs4_sequence_res seq_res; }; struct nfs4_setclientid { const nfs4_verifier * sc_verifier; u32 sc_prog; unsigned int sc_netid_len; char sc_netid[RPCBIND_MAXNETIDLEN + 1]; unsigned int sc_uaddr_len; char sc_uaddr[RPCBIND_MAXUADDRLEN + 1]; struct nfs_client *sc_clnt; struct rpc_cred *sc_cred; }; struct nfs4_setclientid_res { u64 clientid; nfs4_verifier confirm; }; struct nfs4_statfs_arg { struct nfs4_sequence_args seq_args; const struct nfs_fh * fh; const u32 * bitmask; }; struct nfs4_statfs_res { struct nfs4_sequence_res seq_res; struct nfs_fsstat *fsstat; }; struct nfs4_server_caps_arg { struct nfs4_sequence_args seq_args; struct nfs_fh *fhandle; const u32 * bitmask; }; struct nfs4_server_caps_res { struct nfs4_sequence_res seq_res; u32 attr_bitmask[3]; u32 exclcreat_bitmask[3]; u32 acl_bitmask; u32 has_links; u32 has_symlinks; u32 fh_expire_type; }; #define NFS4_PATHNAME_MAXCOMPONENTS 512 struct nfs4_pathname { unsigned int ncomponents; struct nfs4_string components[NFS4_PATHNAME_MAXCOMPONENTS]; }; #define NFS4_FS_LOCATION_MAXSERVERS 10 struct nfs4_fs_location { unsigned int nservers; struct nfs4_string servers[NFS4_FS_LOCATION_MAXSERVERS]; struct nfs4_pathname rootpath; }; #define NFS4_FS_LOCATIONS_MAXENTRIES 10 struct nfs4_fs_locations { struct nfs_fattr fattr; const struct nfs_server *server; struct nfs4_pathname fs_path; int nlocations; struct nfs4_fs_location locations[NFS4_FS_LOCATIONS_MAXENTRIES]; }; struct nfs4_fs_locations_arg { struct nfs4_sequence_args seq_args; const struct nfs_fh *dir_fh; const struct nfs_fh *fh; const struct qstr *name; struct page *page; const u32 *bitmask; clientid4 clientid; unsigned char migration:1, renew:1; }; struct nfs4_fs_locations_res { struct nfs4_sequence_res seq_res; struct nfs4_fs_locations *fs_locations; unsigned char migration:1, renew:1; }; struct nfs4_secinfo4 { u32 flavor; struct rpcsec_gss_info flavor_info; }; struct nfs4_secinfo_flavors { unsigned int num_flavors; struct nfs4_secinfo4 flavors[0]; }; struct nfs4_secinfo_arg { struct nfs4_sequence_args seq_args; const struct nfs_fh *dir_fh; const struct qstr *name; }; struct nfs4_secinfo_res { struct nfs4_sequence_res seq_res; struct nfs4_secinfo_flavors *flavors; }; struct nfs4_fsid_present_arg { struct nfs4_sequence_args seq_args; const struct nfs_fh *fh; clientid4 clientid; unsigned char renew:1; }; struct nfs4_fsid_present_res { struct nfs4_sequence_res seq_res; struct nfs_fh *fh; unsigned char renew:1; }; #endif /* CONFIG_NFS_V4 */ struct nfstime4 { u64 seconds; u32 nseconds; }; #ifdef CONFIG_NFS_V4_1 struct pnfs_commit_bucket { struct list_head written; struct list_head committing; struct pnfs_layout_segment *wlseg; struct pnfs_layout_segment *clseg; struct nfs_writeverf direct_verf; }; struct pnfs_ds_commit_info { int nwritten; int ncommitting; int nbuckets; struct pnfs_commit_bucket *buckets; }; struct nfs41_state_protection { u32 how; struct nfs4_op_map enforce; struct nfs4_op_map allow; }; struct nfs41_exchange_id_args { struct nfs_client *client; nfs4_verifier verifier; u32 flags; struct nfs41_state_protection state_protect; }; struct nfs41_server_owner { uint64_t minor_id; uint32_t major_id_sz; char major_id[NFS4_OPAQUE_LIMIT]; }; struct nfs41_server_scope { uint32_t server_scope_sz; char server_scope[NFS4_OPAQUE_LIMIT]; }; struct nfs41_impl_id { char domain[NFS4_OPAQUE_LIMIT + 1]; char name[NFS4_OPAQUE_LIMIT + 1]; struct nfstime4 date; }; #define MAX_BIND_CONN_TO_SESSION_RETRIES 3 struct nfs41_bind_conn_to_session_args { struct nfs_client *client; struct nfs4_sessionid sessionid; u32 dir; bool use_conn_in_rdma_mode; int retries; }; struct nfs41_bind_conn_to_session_res { struct nfs4_sessionid sessionid; u32 dir; bool use_conn_in_rdma_mode; }; struct nfs41_exchange_id_res { u64 clientid; u32 seqid; u32 flags; struct nfs41_server_owner *server_owner; struct nfs41_server_scope *server_scope; struct nfs41_impl_id *impl_id; struct nfs41_state_protection state_protect; }; struct nfs41_create_session_args { struct nfs_client *client; u64 clientid; uint32_t seqid; uint32_t flags; uint32_t cb_program; struct nfs4_channel_attrs fc_attrs; /* Fore Channel */ struct nfs4_channel_attrs bc_attrs; /* Back Channel */ }; struct nfs41_create_session_res { struct nfs4_sessionid sessionid; uint32_t seqid; uint32_t flags; struct nfs4_channel_attrs fc_attrs; /* Fore Channel */ struct nfs4_channel_attrs bc_attrs; /* Back Channel */ }; struct nfs41_reclaim_complete_args { struct nfs4_sequence_args seq_args; /* In the future extend to include curr_fh for use with migration */ unsigned char one_fs:1; }; struct nfs41_reclaim_complete_res { struct nfs4_sequence_res seq_res; }; #define SECINFO_STYLE_CURRENT_FH 0 #define SECINFO_STYLE_PARENT 1 struct nfs41_secinfo_no_name_args { struct nfs4_sequence_args seq_args; int style; }; struct nfs41_test_stateid_args { struct nfs4_sequence_args seq_args; nfs4_stateid *stateid; }; struct nfs41_test_stateid_res { struct nfs4_sequence_res seq_res; unsigned int status; }; struct nfs41_free_stateid_args { struct nfs4_sequence_args seq_args; nfs4_stateid stateid; }; struct nfs41_free_stateid_res { struct nfs4_sequence_res seq_res; unsigned int status; }; static inline void nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo) { kfree(cinfo->buckets); } #else struct pnfs_ds_commit_info { }; static inline void nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo) { } #endif /* CONFIG_NFS_V4_1 */ #ifdef CONFIG_NFS_V4_2 struct nfs42_falloc_args { struct nfs4_sequence_args seq_args; struct nfs_fh *falloc_fh; nfs4_stateid falloc_stateid; u64 falloc_offset; u64 falloc_length; const u32 *falloc_bitmask; }; struct nfs42_falloc_res { struct nfs4_sequence_res seq_res; unsigned int status; struct nfs_fattr *falloc_fattr; const struct nfs_server *falloc_server; }; struct nfs42_copy_args { struct nfs4_sequence_args seq_args; struct nfs_fh *src_fh; nfs4_stateid src_stateid; u64 src_pos; struct nfs_fh *dst_fh; nfs4_stateid dst_stateid; u64 dst_pos; u64 count; bool sync; }; struct nfs42_write_res { nfs4_stateid stateid; u64 count; struct nfs_writeverf verifier; }; struct nfs42_copy_res { struct nfs4_sequence_res seq_res; struct nfs42_write_res write_res; bool consecutive; bool synchronous; struct nfs_commitres commit_res; }; struct nfs42_offload_status_args { struct nfs4_sequence_args osa_seq_args; struct nfs_fh *osa_src_fh; nfs4_stateid osa_stateid; }; struct nfs42_offload_status_res { struct nfs4_sequence_res osr_seq_res; uint64_t osr_count; int osr_status; }; struct nfs42_seek_args { struct nfs4_sequence_args seq_args; struct nfs_fh *sa_fh; nfs4_stateid sa_stateid; u64 sa_offset; u32 sa_what; }; struct nfs42_seek_res { struct nfs4_sequence_res seq_res; unsigned int status; u32 sr_eof; u64 sr_offset; }; #endif struct nfs_page; #define NFS_PAGEVEC_SIZE (8U) struct nfs_page_array { struct page **pagevec; unsigned int npages; /* Max length of pagevec */ struct page *page_array[NFS_PAGEVEC_SIZE]; }; /* used as flag bits in nfs_pgio_header */ enum { NFS_IOHDR_ERROR = 0, NFS_IOHDR_EOF, NFS_IOHDR_REDO, NFS_IOHDR_STAT, NFS_IOHDR_RESEND_PNFS, NFS_IOHDR_RESEND_MDS, }; struct nfs_io_completion; struct nfs_pgio_header { struct inode *inode; const struct cred *cred; struct list_head pages; struct nfs_page *req; struct nfs_writeverf verf; /* Used for writes */ fmode_t rw_mode; struct pnfs_layout_segment *lseg; loff_t io_start; const struct rpc_call_ops *mds_ops; void (*release) (struct nfs_pgio_header *hdr); const struct nfs_pgio_completion_ops *completion_ops; const struct nfs_rw_ops *rw_ops; struct nfs_io_completion *io_completion; struct nfs_direct_req *dreq; int pnfs_error; int error; /* merge with pnfs_error */ unsigned int good_bytes; /* boundary of good data */ unsigned long flags; /* * rpc data */ struct rpc_task task; struct nfs_fattr fattr; struct nfs_pgio_args args; /* argument struct */ struct nfs_pgio_res res; /* result struct */ unsigned long timestamp; /* For lease renewal */ int (*pgio_done_cb)(struct rpc_task *, struct nfs_pgio_header *); __u64 mds_offset; /* Filelayout dense stripe */ struct nfs_page_array page_array; struct nfs_client *ds_clp; /* pNFS data server */ int ds_commit_idx; /* ds index if ds_clp is set */ int pgio_mirror_idx;/* mirror index in pgio layer */ }; struct nfs_mds_commit_info { atomic_t rpcs_out; atomic_long_t ncommit; struct list_head list; }; struct nfs_commit_info; struct nfs_commit_data; struct nfs_inode; struct nfs_commit_completion_ops { void (*completion) (struct nfs_commit_data *data); void (*resched_write) (struct nfs_commit_info *, struct nfs_page *); }; struct nfs_commit_info { struct inode *inode; /* Needed for inode->i_lock */ struct nfs_mds_commit_info *mds; struct pnfs_ds_commit_info *ds; struct nfs_direct_req *dreq; /* O_DIRECT request */ const struct nfs_commit_completion_ops *completion_ops; }; struct nfs_commit_data { struct rpc_task task; struct inode *inode; const struct cred *cred; struct nfs_fattr fattr; struct nfs_writeverf verf; struct list_head pages; /* Coalesced requests we wish to flush */ struct list_head list; /* lists of struct nfs_write_data */ struct nfs_direct_req *dreq; /* O_DIRECT request */ struct nfs_commitargs args; /* argument struct */ struct nfs_commitres res; /* result struct */ struct nfs_open_context *context; struct pnfs_layout_segment *lseg; struct nfs_client *ds_clp; /* pNFS data server */ int ds_commit_index; loff_t lwb; const struct rpc_call_ops *mds_ops; const struct nfs_commit_completion_ops *completion_ops; int (*commit_done_cb) (struct rpc_task *task, struct nfs_commit_data *data); unsigned long flags; }; struct nfs_pgio_completion_ops { void (*error_cleanup)(struct list_head *head, int); void (*init_hdr)(struct nfs_pgio_header *hdr); void (*completion)(struct nfs_pgio_header *hdr); void (*reschedule_io)(struct nfs_pgio_header *hdr); }; struct nfs_unlinkdata { struct nfs_removeargs args; struct nfs_removeres res; struct dentry *dentry; wait_queue_head_t wq; const struct cred *cred; struct nfs_fattr dir_attr; long timeout; }; struct nfs_renamedata { struct nfs_renameargs args; struct nfs_renameres res; const struct cred *cred; struct inode *old_dir; struct dentry *old_dentry; struct nfs_fattr old_fattr; struct inode *new_dir; struct dentry *new_dentry; struct nfs_fattr new_fattr; void (*complete)(struct rpc_task *, struct nfs_renamedata *); long timeout; bool cancelled; }; struct nfs_access_entry; struct nfs_client; struct rpc_timeout; struct nfs_subversion; struct nfs_mount_info; struct nfs_client_initdata; struct nfs_pageio_descriptor; /* * RPC procedure vector for NFSv2/NFSv3 demuxing */ struct nfs_rpc_ops { u32 version; /* Protocol version */ const struct dentry_operations *dentry_ops; const struct inode_operations *dir_inode_ops; const struct inode_operations *file_inode_ops; const struct file_operations *file_ops; const struct nlmclnt_operations *nlmclnt_ops; int (*getroot) (struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); struct vfsmount *(*submount) (struct nfs_server *, struct dentry *, struct nfs_fh *, struct nfs_fattr *); struct dentry *(*try_mount) (int, const char *, struct nfs_mount_info *, struct nfs_subversion *); int (*getattr) (struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *, struct inode *); int (*setattr) (struct dentry *, struct nfs_fattr *, struct iattr *); int (*lookup) (struct inode *, const struct qstr *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *); int (*lookupp) (struct inode *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *); int (*access) (struct inode *, struct nfs_access_entry *); int (*readlink)(struct inode *, struct page *, unsigned int, unsigned int); int (*create) (struct inode *, struct dentry *, struct iattr *, int); int (*remove) (struct inode *, struct dentry *); void (*unlink_setup) (struct rpc_message *, struct dentry *, struct inode *); void (*unlink_rpc_prepare) (struct rpc_task *, struct nfs_unlinkdata *); int (*unlink_done) (struct rpc_task *, struct inode *); void (*rename_setup) (struct rpc_message *msg, struct dentry *old_dentry, struct dentry *new_dentry); void (*rename_rpc_prepare)(struct rpc_task *task, struct nfs_renamedata *); int (*rename_done) (struct rpc_task *task, struct inode *old_dir, struct inode *new_dir); int (*link) (struct inode *, struct inode *, const struct qstr *); int (*symlink) (struct inode *, struct dentry *, struct page *, unsigned int, struct iattr *); int (*mkdir) (struct inode *, struct dentry *, struct iattr *); int (*rmdir) (struct inode *, const struct qstr *); int (*readdir) (struct dentry *, const struct cred *, u64, struct page **, unsigned int, bool); int (*mknod) (struct inode *, struct dentry *, struct iattr *, dev_t); int (*statfs) (struct nfs_server *, struct nfs_fh *, struct nfs_fsstat *); int (*fsinfo) (struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); int (*pathconf) (struct nfs_server *, struct nfs_fh *, struct nfs_pathconf *); int (*set_capabilities)(struct nfs_server *, struct nfs_fh *); int (*decode_dirent)(struct xdr_stream *, struct nfs_entry *, bool); int (*pgio_rpc_prepare)(struct rpc_task *, struct nfs_pgio_header *); void (*read_setup)(struct nfs_pgio_header *, struct rpc_message *); int (*read_done)(struct rpc_task *, struct nfs_pgio_header *); void (*write_setup)(struct nfs_pgio_header *, struct rpc_message *, struct rpc_clnt **); int (*write_done)(struct rpc_task *, struct nfs_pgio_header *); void (*commit_setup) (struct nfs_commit_data *, struct rpc_message *, struct rpc_clnt **); void (*commit_rpc_prepare)(struct rpc_task *, struct nfs_commit_data *); int (*commit_done) (struct rpc_task *, struct nfs_commit_data *); int (*lock)(struct file *, int, struct file_lock *); int (*lock_check_bounds)(const struct file_lock *); void (*clear_acl_cache)(struct inode *); void (*close_context)(struct nfs_open_context *ctx, int); struct inode * (*open_context) (struct inode *dir, struct nfs_open_context *ctx, int open_flags, struct iattr *iattr, int *); int (*have_delegation)(struct inode *, fmode_t); struct nfs_client *(*alloc_client) (const struct nfs_client_initdata *); struct nfs_client *(*init_client) (struct nfs_client *, const struct nfs_client_initdata *); void (*free_client) (struct nfs_client *); struct nfs_server *(*create_server)(struct nfs_mount_info *, struct nfs_subversion *); struct nfs_server *(*clone_server)(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, rpc_authflavor_t); }; /* * NFS_CALL(getattr, inode, (fattr)); * into * NFS_PROTO(inode)->getattr(fattr); */ #define NFS_CALL(op, inode, args) NFS_PROTO(inode)->op args /* * Function vectors etc. for the NFS client */ extern const struct nfs_rpc_ops nfs_v2_clientops; extern const struct nfs_rpc_ops nfs_v3_clientops; extern const struct nfs_rpc_ops nfs_v4_clientops; extern const struct rpc_version nfs_version2; extern const struct rpc_version nfs_version3; extern const struct rpc_version nfs_version4; extern const struct rpc_version nfsacl_version3; extern const struct rpc_program nfsacl_program; #endif shrinker.h 0000644 00000006333 14722070374 0006555 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SHRINKER_H #define _LINUX_SHRINKER_H /* * This struct is used to pass information from page reclaim to the shrinkers. * We consolidate the values for easier extention later. * * The 'gfpmask' refers to the allocation we are currently trying to * fulfil. */ struct shrink_control { gfp_t gfp_mask; /* current node being shrunk (for NUMA aware shrinkers) */ int nid; /* * How many objects scan_objects should scan and try to reclaim. * This is reset before every call, so it is safe for callees * to modify. */ unsigned long nr_to_scan; /* * How many objects did scan_objects process? * This defaults to nr_to_scan before every call, but the callee * should track its actual progress. */ unsigned long nr_scanned; /* current memcg being shrunk (for memcg aware shrinkers) */ struct mem_cgroup *memcg; }; #define SHRINK_STOP (~0UL) #define SHRINK_EMPTY (~0UL - 1) /* * A callback you can register to apply pressure to ageable caches. * * @count_objects should return the number of freeable items in the cache. If * there are no objects to free, it should return SHRINK_EMPTY, while 0 is * returned in cases of the number of freeable items cannot be determined * or shrinker should skip this cache for this time (e.g., their number * is below shrinkable limit). No deadlock checks should be done during the * count callback - the shrinker relies on aggregating scan counts that couldn't * be executed due to potential deadlocks to be run at a later call when the * deadlock condition is no longer pending. * * @scan_objects will only be called if @count_objects returned a non-zero * value for the number of freeable objects. The callout should scan the cache * and attempt to free items from the cache. It should then return the number * of objects freed during the scan, or SHRINK_STOP if progress cannot be made * due to potential deadlocks. If SHRINK_STOP is returned, then no further * attempts to call the @scan_objects will be made from the current reclaim * context. * * @flags determine the shrinker abilities, like numa awareness */ struct shrinker { unsigned long (*count_objects)(struct shrinker *, struct shrink_control *sc); unsigned long (*scan_objects)(struct shrinker *, struct shrink_control *sc); long batch; /* reclaim batch size, 0 = default */ int seeks; /* seeks to recreate an obj */ unsigned flags; /* These are for internal use */ struct list_head list; #ifdef CONFIG_MEMCG /* ID in shrinker_idr */ int id; #endif /* objs pending delete, per node */ atomic_long_t *nr_deferred; }; #define DEFAULT_SEEKS 2 /* A good number if you don't know better. */ /* Flags */ #define SHRINKER_NUMA_AWARE (1 << 0) #define SHRINKER_MEMCG_AWARE (1 << 1) /* * It just makes sense when the shrinker is also MEMCG_AWARE for now, * non-MEMCG_AWARE shrinker should not have this flag set. */ #define SHRINKER_NONSLAB (1 << 2) extern int prealloc_shrinker(struct shrinker *shrinker); extern void register_shrinker_prepared(struct shrinker *shrinker); extern int register_shrinker(struct shrinker *shrinker); extern void unregister_shrinker(struct shrinker *shrinker); extern void free_prealloced_shrinker(struct shrinker *shrinker); #endif fs_uart_pd.h 0000644 00000002763 14722070374 0007061 0 ustar 00 /* * Platform information definitions for the CPM Uart driver. * * 2006 (c) MontaVista Software, Inc. * Vitaly Bordug <vbordug@ru.mvista.com> * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #ifndef FS_UART_PD_H #define FS_UART_PD_H #include <asm/types.h> enum fs_uart_id { fsid_smc1_uart, fsid_smc2_uart, fsid_scc1_uart, fsid_scc2_uart, fsid_scc3_uart, fsid_scc4_uart, fs_uart_nr, }; static inline int fs_uart_id_scc2fsid(int id) { return fsid_scc1_uart + id - 1; } static inline int fs_uart_id_fsid2scc(int id) { return id - fsid_scc1_uart + 1; } static inline int fs_uart_id_smc2fsid(int id) { return fsid_smc1_uart + id - 1; } static inline int fs_uart_id_fsid2smc(int id) { return id - fsid_smc1_uart + 1; } struct fs_uart_platform_info { void(*init_ioports)(struct fs_uart_platform_info *); /* device specific information */ int fs_no; /* controller index */ char fs_type[4]; /* controller type */ u32 uart_clk; u8 tx_num_fifo; u8 tx_buf_size; u8 rx_num_fifo; u8 rx_buf_size; u8 brg; u8 clk_rx; u8 clk_tx; }; static inline int fs_uart_get_id(struct fs_uart_platform_info *fpi) { if(strstr(fpi->fs_type, "SMC")) return fs_uart_id_smc2fsid(fpi->fs_no); if(strstr(fpi->fs_type, "SCC")) return fs_uart_id_scc2fsid(fpi->fs_no); return fpi->fs_no; } #endif rtc/ds1286.h 0000644 00000002307 14722070374 0006444 0 ustar 00 /* * Copyright (C) 1998, 1999, 2003 Ralf Baechle * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #ifndef __LINUX_DS1286_H #define __LINUX_DS1286_H /********************************************************************** * register summary **********************************************************************/ #define RTC_HUNDREDTH_SECOND 0 #define RTC_SECONDS 1 #define RTC_MINUTES 2 #define RTC_MINUTES_ALARM 3 #define RTC_HOURS 4 #define RTC_HOURS_ALARM 5 #define RTC_DAY 6 #define RTC_DAY_ALARM 7 #define RTC_DATE 8 #define RTC_MONTH 9 #define RTC_YEAR 10 #define RTC_CMD 11 #define RTC_WHSEC 12 #define RTC_WSEC 13 #define RTC_UNUSED 14 /* RTC_*_alarm is always true if 2 MSBs are set */ # define RTC_ALARM_DONT_CARE 0xC0 /* * Bits in the month register */ #define RTC_EOSC 0x80 #define RTC_ESQW 0x40 /* * Bits in the Command register */ #define RTC_TDF 0x01 #define RTC_WAF 0x02 #define RTC_TDM 0x04 #define RTC_WAM 0x08 #define RTC_PU_LVL 0x10 #define RTC_IBH_LO 0x20 #define RTC_IPSW 0x40 #define RTC_TE 0x80 #endif /* __LINUX_DS1286_H */ rtc/ds1685.h 0000644 00000033413 14722070374 0006451 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Definitions for the registers, addresses, and platform data of the * DS1685/DS1687-series RTC chips. * * This Driver also works for the DS17X85/DS17X87 RTC chips. Functionally * similar to the DS1685/DS1687, they support a few extra features which * include larger, battery-backed NV-SRAM, burst-mode access, and an RTC * write counter. * * Copyright (C) 2011-2014 Joshua Kinard <kumba@gentoo.org>. * Copyright (C) 2009 Matthias Fuchs <matthias.fuchs@esd-electronics.com>. * * References: * DS1685/DS1687 3V/5V Real-Time Clocks, 19-5215, Rev 4/10. * DS17x85/DS17x87 3V/5V Real-Time Clocks, 19-5222, Rev 4/10. * DS1689/DS1693 3V/5V Serialized Real-Time Clocks, Rev 112105. * Application Note 90, Using the Multiplex Bus RTC Extended Features. */ #ifndef _LINUX_RTC_DS1685_H_ #define _LINUX_RTC_DS1685_H_ #include <linux/rtc.h> #include <linux/platform_device.h> #include <linux/workqueue.h> /** * struct ds1685_priv - DS1685 private data structure. * @dev: pointer to the rtc_device structure. * @regs: iomapped base address pointer of the RTC registers. * @regstep: padding/step size between registers (optional). * @baseaddr: base address of the RTC device. * @size: resource size. * @lock: private lock variable for spin locking/unlocking. * @work: private workqueue. * @irq: IRQ number assigned to the RTC device. * @prepare_poweroff: pointer to platform pre-poweroff function. * @wake_alarm: pointer to platform wake alarm function. * @post_ram_clear: pointer to platform post ram-clear function. */ struct ds1685_priv { struct rtc_device *dev; void __iomem *regs; u32 regstep; resource_size_t baseaddr; size_t size; int irq_num; bool bcd_mode; bool no_irq; bool uie_unsupported; bool alloc_io_resources; u8 (*read)(struct ds1685_priv *, int); void (*write)(struct ds1685_priv *, int, u8); void (*prepare_poweroff)(void); void (*wake_alarm)(void); void (*post_ram_clear)(void); }; /** * struct ds1685_rtc_platform_data - platform data structure. * @plat_prepare_poweroff: platform-specific pre-poweroff function. * @plat_wake_alarm: platform-specific wake alarm function. * @plat_post_ram_clear: platform-specific post ram-clear function. * * If your platform needs to use a custom padding/step size between * registers, or uses one or more of the extended interrupts and needs special * handling, then include this header file in your platform definition and * set regstep and the plat_* pointers as appropriate. */ struct ds1685_rtc_platform_data { const u32 regstep; const bool bcd_mode; const bool no_irq; const bool uie_unsupported; const bool alloc_io_resources; u8 (*plat_read)(struct ds1685_priv *, int); void (*plat_write)(struct ds1685_priv *, int, u8); void (*plat_prepare_poweroff)(void); void (*plat_wake_alarm)(void); void (*plat_post_ram_clear)(void); }; /* * Time Registers. */ #define RTC_SECS 0x00 /* Seconds 00-59 */ #define RTC_SECS_ALARM 0x01 /* Alarm Seconds 00-59 */ #define RTC_MINS 0x02 /* Minutes 00-59 */ #define RTC_MINS_ALARM 0x03 /* Alarm Minutes 00-59 */ #define RTC_HRS 0x04 /* Hours 01-12 AM/PM || 00-23 */ #define RTC_HRS_ALARM 0x05 /* Alarm Hours 01-12 AM/PM || 00-23 */ #define RTC_WDAY 0x06 /* Day of Week 01-07 */ #define RTC_MDAY 0x07 /* Day of Month 01-31 */ #define RTC_MONTH 0x08 /* Month 01-12 */ #define RTC_YEAR 0x09 /* Year 00-99 */ #define RTC_CENTURY 0x48 /* Century 00-99 */ #define RTC_MDAY_ALARM 0x49 /* Alarm Day of Month 01-31 */ /* * Bit masks for the Time registers in BCD Mode (DM = 0). */ #define RTC_SECS_BCD_MASK 0x7f /* - x x x x x x x */ #define RTC_MINS_BCD_MASK 0x7f /* - x x x x x x x */ #define RTC_HRS_12_BCD_MASK 0x1f /* - - - x x x x x */ #define RTC_HRS_24_BCD_MASK 0x3f /* - - x x x x x x */ #define RTC_MDAY_BCD_MASK 0x3f /* - - x x x x x x */ #define RTC_MONTH_BCD_MASK 0x1f /* - - - x x x x x */ #define RTC_YEAR_BCD_MASK 0xff /* x x x x x x x x */ /* * Bit masks for the Time registers in BIN Mode (DM = 1). */ #define RTC_SECS_BIN_MASK 0x3f /* - - x x x x x x */ #define RTC_MINS_BIN_MASK 0x3f /* - - x x x x x x */ #define RTC_HRS_12_BIN_MASK 0x0f /* - - - - x x x x */ #define RTC_HRS_24_BIN_MASK 0x1f /* - - - x x x x x */ #define RTC_MDAY_BIN_MASK 0x1f /* - - - x x x x x */ #define RTC_MONTH_BIN_MASK 0x0f /* - - - - x x x x */ #define RTC_YEAR_BIN_MASK 0x7f /* - x x x x x x x */ /* * Bit masks common for the Time registers in BCD or BIN Mode. */ #define RTC_WDAY_MASK 0x07 /* - - - - - x x x */ #define RTC_CENTURY_MASK 0xff /* x x x x x x x x */ #define RTC_MDAY_ALARM_MASK 0xff /* x x x x x x x x */ #define RTC_HRS_AMPM_MASK BIT(7) /* Mask for the AM/PM bit */ /* * Control Registers. */ #define RTC_CTRL_A 0x0a /* Control Register A */ #define RTC_CTRL_B 0x0b /* Control Register B */ #define RTC_CTRL_C 0x0c /* Control Register C */ #define RTC_CTRL_D 0x0d /* Control Register D */ #define RTC_EXT_CTRL_4A 0x4a /* Extended Control Register 4A */ #define RTC_EXT_CTRL_4B 0x4b /* Extended Control Register 4B */ /* * Bit names in Control Register A. */ #define RTC_CTRL_A_UIP BIT(7) /* Update In Progress */ #define RTC_CTRL_A_DV2 BIT(6) /* Countdown Chain */ #define RTC_CTRL_A_DV1 BIT(5) /* Oscillator Enable */ #define RTC_CTRL_A_DV0 BIT(4) /* Bank Select */ #define RTC_CTRL_A_RS2 BIT(2) /* Rate-Selection Bit 2 */ #define RTC_CTRL_A_RS3 BIT(3) /* Rate-Selection Bit 3 */ #define RTC_CTRL_A_RS1 BIT(1) /* Rate-Selection Bit 1 */ #define RTC_CTRL_A_RS0 BIT(0) /* Rate-Selection Bit 0 */ #define RTC_CTRL_A_RS_MASK 0x0f /* RS3 + RS2 + RS1 + RS0 */ /* * Bit names in Control Register B. */ #define RTC_CTRL_B_SET BIT(7) /* SET Bit */ #define RTC_CTRL_B_PIE BIT(6) /* Periodic-Interrupt Enable */ #define RTC_CTRL_B_AIE BIT(5) /* Alarm-Interrupt Enable */ #define RTC_CTRL_B_UIE BIT(4) /* Update-Ended Interrupt-Enable */ #define RTC_CTRL_B_SQWE BIT(3) /* Square-Wave Enable */ #define RTC_CTRL_B_DM BIT(2) /* Data Mode */ #define RTC_CTRL_B_2412 BIT(1) /* 12-Hr/24-Hr Mode */ #define RTC_CTRL_B_DSE BIT(0) /* Daylight Savings Enable */ #define RTC_CTRL_B_PAU_MASK 0x70 /* PIE + AIE + UIE */ /* * Bit names in Control Register C. * * BIT(0), BIT(1), BIT(2), & BIT(3) are unused, always return 0, and cannot * be written to. */ #define RTC_CTRL_C_IRQF BIT(7) /* Interrupt-Request Flag */ #define RTC_CTRL_C_PF BIT(6) /* Periodic-Interrupt Flag */ #define RTC_CTRL_C_AF BIT(5) /* Alarm-Interrupt Flag */ #define RTC_CTRL_C_UF BIT(4) /* Update-Ended Interrupt Flag */ #define RTC_CTRL_C_PAU_MASK 0x70 /* PF + AF + UF */ /* * Bit names in Control Register D. * * BIT(0) through BIT(6) are unused, always return 0, and cannot * be written to. */ #define RTC_CTRL_D_VRT BIT(7) /* Valid RAM and Time */ /* * Bit names in Extended Control Register 4A. * * On the DS1685/DS1687/DS1689/DS1693, BIT(4) and BIT(5) are reserved for * future use. They can be read from and written to, but have no effect * on the RTC's operation. * * On the DS17x85/DS17x87, BIT(5) is Burst-Mode Enable (BME), and allows * access to the extended NV-SRAM by automatically incrementing the address * register when they are read from or written to. */ #define RTC_CTRL_4A_VRT2 BIT(7) /* Auxillary Battery Status */ #define RTC_CTRL_4A_INCR BIT(6) /* Increment-in-Progress Status */ #define RTC_CTRL_4A_PAB BIT(3) /* Power-Active Bar Control */ #define RTC_CTRL_4A_RF BIT(2) /* RAM-Clear Flag */ #define RTC_CTRL_4A_WF BIT(1) /* Wake-Up Alarm Flag */ #define RTC_CTRL_4A_KF BIT(0) /* Kickstart Flag */ #if !defined(CONFIG_RTC_DRV_DS1685) && !defined(CONFIG_RTC_DRV_DS1689) #define RTC_CTRL_4A_BME BIT(5) /* Burst-Mode Enable */ #endif #define RTC_CTRL_4A_RWK_MASK 0x07 /* RF + WF + KF */ /* * Bit names in Extended Control Register 4B. */ #define RTC_CTRL_4B_ABE BIT(7) /* Auxillary Battery Enable */ #define RTC_CTRL_4B_E32K BIT(6) /* Enable 32.768Hz on SQW Pin */ #define RTC_CTRL_4B_CS BIT(5) /* Crystal Select */ #define RTC_CTRL_4B_RCE BIT(4) /* RAM Clear-Enable */ #define RTC_CTRL_4B_PRS BIT(3) /* PAB Reset-Select */ #define RTC_CTRL_4B_RIE BIT(2) /* RAM Clear-Interrupt Enable */ #define RTC_CTRL_4B_WIE BIT(1) /* Wake-Up Alarm-Interrupt Enable */ #define RTC_CTRL_4B_KSE BIT(0) /* Kickstart Interrupt-Enable */ #define RTC_CTRL_4B_RWK_MASK 0x07 /* RIE + WIE + KSE */ /* * Misc register names in Bank 1. * * The DV0 bit in Control Register A must be set to 1 for these registers * to become available, including Extended Control Registers 4A & 4B. */ #define RTC_BANK1_SSN_MODEL 0x40 /* Model Number */ #define RTC_BANK1_SSN_BYTE_1 0x41 /* 1st Byte of Serial Number */ #define RTC_BANK1_SSN_BYTE_2 0x42 /* 2nd Byte of Serial Number */ #define RTC_BANK1_SSN_BYTE_3 0x43 /* 3rd Byte of Serial Number */ #define RTC_BANK1_SSN_BYTE_4 0x44 /* 4th Byte of Serial Number */ #define RTC_BANK1_SSN_BYTE_5 0x45 /* 5th Byte of Serial Number */ #define RTC_BANK1_SSN_BYTE_6 0x46 /* 6th Byte of Serial Number */ #define RTC_BANK1_SSN_CRC 0x47 /* Serial CRC Byte */ #define RTC_BANK1_RAM_DATA_PORT 0x53 /* Extended RAM Data Port */ /* * Model-specific registers in Bank 1. * * The addresses below differ depending on the model of the RTC chip * selected in the kernel configuration. Not all of these features are * supported in the main driver at present. * * DS1685/DS1687 - Extended NV-SRAM address (LSB only). * DS1689/DS1693 - Vcc, Vbat, Pwr Cycle Counters & Customer-specific S/N. * DS17x85/DS17x87 - Extended NV-SRAM addresses (MSB & LSB) & Write counter. */ #if defined(CONFIG_RTC_DRV_DS1685) #define RTC_BANK1_RAM_ADDR 0x50 /* NV-SRAM Addr */ #elif defined(CONFIG_RTC_DRV_DS1689) #define RTC_BANK1_VCC_CTR_LSB 0x54 /* Vcc Counter Addr (LSB) */ #define RTC_BANK1_VCC_CTR_MSB 0x57 /* Vcc Counter Addr (MSB) */ #define RTC_BANK1_VBAT_CTR_LSB 0x58 /* Vbat Counter Addr (LSB) */ #define RTC_BANK1_VBAT_CTR_MSB 0x5b /* Vbat Counter Addr (MSB) */ #define RTC_BANK1_PWR_CTR_LSB 0x5c /* Pwr Cycle Counter Addr (LSB) */ #define RTC_BANK1_PWR_CTR_MSB 0x5d /* Pwr Cycle Counter Addr (MSB) */ #define RTC_BANK1_UNIQ_SN 0x60 /* Customer-specific S/N */ #else /* DS17x85/DS17x87 */ #define RTC_BANK1_RAM_ADDR_LSB 0x50 /* NV-SRAM Addr (LSB) */ #define RTC_BANK1_RAM_ADDR_MSB 0x51 /* NV-SRAM Addr (MSB) */ #define RTC_BANK1_WRITE_CTR 0x5e /* RTC Write Counter */ #endif /* * Model numbers. * * The DS1688/DS1691 and DS1689/DS1693 chips share the same model number * and the manual doesn't indicate any major differences. As such, they * are regarded as the same chip in this driver. */ #define RTC_MODEL_DS1685 0x71 /* DS1685/DS1687 */ #define RTC_MODEL_DS17285 0x72 /* DS17285/DS17287 */ #define RTC_MODEL_DS1689 0x73 /* DS1688/DS1691/DS1689/DS1693 */ #define RTC_MODEL_DS17485 0x74 /* DS17485/DS17487 */ #define RTC_MODEL_DS17885 0x78 /* DS17885/DS17887 */ /* * Periodic Interrupt Rates / Square-Wave Output Frequency * * Periodic rates are selected by setting the RS3-RS0 bits in Control * Register A and enabled via either the E32K bit in Extended Control * Register 4B or the SQWE bit in Control Register B. * * E32K overrides the settings of RS3-RS0 and outputs a frequency of 32768Hz * on the SQW pin of the RTC chip. While there are 16 possible selections, * the 1-of-16 decoder is only able to divide the base 32768Hz signal into 13 * smaller frequencies. The values 0x01 and 0x02 are not used and are * synonymous with 0x08 and 0x09, respectively. * * When E32K is set to a logic 1, periodic interrupts are disabled and reading * /dev/rtc will return -EINVAL. This also applies if the periodic interrupt * frequency is set to 0Hz. * * Not currently used by the rtc-ds1685 driver because the RTC core removed * support for hardware-generated periodic-interrupts in favour of * hrtimer-generated interrupts. But these defines are kept around for use * in userland, as documentation to the hardware, and possible future use if * hardware-generated periodic interrupts are ever added back. */ /* E32K RS3 RS2 RS1 RS0 */ #define RTC_SQW_8192HZ 0x03 /* 0 0 0 1 1 */ #define RTC_SQW_4096HZ 0x04 /* 0 0 1 0 0 */ #define RTC_SQW_2048HZ 0x05 /* 0 0 1 0 1 */ #define RTC_SQW_1024HZ 0x06 /* 0 0 1 1 0 */ #define RTC_SQW_512HZ 0x07 /* 0 0 1 1 1 */ #define RTC_SQW_256HZ 0x08 /* 0 1 0 0 0 */ #define RTC_SQW_128HZ 0x09 /* 0 1 0 0 1 */ #define RTC_SQW_64HZ 0x0a /* 0 1 0 1 0 */ #define RTC_SQW_32HZ 0x0b /* 0 1 0 1 1 */ #define RTC_SQW_16HZ 0x0c /* 0 1 1 0 0 */ #define RTC_SQW_8HZ 0x0d /* 0 1 1 0 1 */ #define RTC_SQW_4HZ 0x0e /* 0 1 1 1 0 */ #define RTC_SQW_2HZ 0x0f /* 0 1 1 1 1 */ #define RTC_SQW_0HZ 0x00 /* 0 0 0 0 0 */ #define RTC_SQW_32768HZ 32768 /* 1 - - - - */ #define RTC_MAX_USER_FREQ 8192 /* * NVRAM data & addresses: * - 50 bytes of NVRAM are available just past the clock registers. * - 64 additional bytes are available in Bank0. * * Extended, battery-backed NV-SRAM: * - DS1685/DS1687 - 128 bytes. * - DS1689/DS1693 - 0 bytes. * - DS17285/DS17287 - 2048 bytes. * - DS17485/DS17487 - 4096 bytes. * - DS17885/DS17887 - 8192 bytes. */ #define NVRAM_TIME_BASE 0x0e /* NVRAM Addr in Time regs */ #define NVRAM_BANK0_BASE 0x40 /* NVRAM Addr in Bank0 regs */ #define NVRAM_SZ_TIME 50 #define NVRAM_SZ_BANK0 64 #if defined(CONFIG_RTC_DRV_DS1685) # define NVRAM_SZ_EXTND 128 #elif defined(CONFIG_RTC_DRV_DS1689) # define NVRAM_SZ_EXTND 0 #elif defined(CONFIG_RTC_DRV_DS17285) # define NVRAM_SZ_EXTND 2048 #elif defined(CONFIG_RTC_DRV_DS17485) # define NVRAM_SZ_EXTND 4096 #elif defined(CONFIG_RTC_DRV_DS17885) # define NVRAM_SZ_EXTND 8192 #endif #define NVRAM_TOTAL_SZ_BANK0 (NVRAM_SZ_TIME + NVRAM_SZ_BANK0) #define NVRAM_TOTAL_SZ (NVRAM_TOTAL_SZ_BANK0 + NVRAM_SZ_EXTND) /* * Function Prototypes. */ extern void __noreturn ds1685_rtc_poweroff(struct platform_device *pdev); #endif /* _LINUX_RTC_DS1685_H_ */ rtc/m48t59.h 0000644 00000003310 14722070374 0006462 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * include/linux/rtc/m48t59.h * * Definitions for the platform data of m48t59 RTC chip driver. * * Copyright (c) 2007 Wind River Systems, Inc. * * Mark Zhan <rongkai.zhan@windriver.com> */ #ifndef _LINUX_RTC_M48T59_H_ #define _LINUX_RTC_M48T59_H_ /* * M48T59 Register Offset */ #define M48T59_YEAR 0xf #define M48T59_MONTH 0xe #define M48T59_MDAY 0xd /* Day of Month */ #define M48T59_WDAY 0xc /* Day of Week */ #define M48T59_WDAY_CB 0x20 /* Century Bit */ #define M48T59_WDAY_CEB 0x10 /* Century Enable Bit */ #define M48T59_HOUR 0xb #define M48T59_MIN 0xa #define M48T59_SEC 0x9 #define M48T59_CNTL 0x8 #define M48T59_CNTL_READ 0x40 #define M48T59_CNTL_WRITE 0x80 #define M48T59_WATCHDOG 0x7 #define M48T59_INTR 0x6 #define M48T59_INTR_AFE 0x80 /* Alarm Interrupt Enable */ #define M48T59_INTR_ABE 0x20 #define M48T59_ALARM_DATE 0x5 #define M48T59_ALARM_HOUR 0x4 #define M48T59_ALARM_MIN 0x3 #define M48T59_ALARM_SEC 0x2 #define M48T59_UNUSED 0x1 #define M48T59_FLAGS 0x0 #define M48T59_FLAGS_WDT 0x80 /* watchdog timer expired */ #define M48T59_FLAGS_AF 0x40 /* alarm */ #define M48T59_FLAGS_BF 0x10 /* low battery */ #define M48T59RTC_TYPE_M48T59 0 /* to keep compatibility */ #define M48T59RTC_TYPE_M48T02 1 #define M48T59RTC_TYPE_M48T08 2 struct m48t59_plat_data { /* The method to access M48T59 registers */ void (*write_byte)(struct device *dev, u32 ofs, u8 val); unsigned char (*read_byte)(struct device *dev, u32 ofs); int type; /* RTC model */ /* ioaddr mapped externally */ void __iomem *ioaddr; /* offset to RTC registers, automatically set according to the type */ unsigned int offset; }; #endif /* _LINUX_RTC_M48T59_H_ */ rtc/ds1307.h 0000644 00000001042 14722070374 0006431 0 ustar 00 /* * ds1307.h - platform_data for the ds1307 (and variants) rtc driver * (C) Copyright 2012 by Wolfram Sang, Pengutronix e.K. * same license as the driver */ #ifndef _LINUX_DS1307_H #define _LINUX_DS1307_H #include <linux/types.h> #define DS1307_TRICKLE_CHARGER_250_OHM 0x01 #define DS1307_TRICKLE_CHARGER_2K_OHM 0x02 #define DS1307_TRICKLE_CHARGER_4K_OHM 0x03 #define DS1307_TRICKLE_CHARGER_NO_DIODE 0x04 #define DS1307_TRICKLE_CHARGER_DIODE 0x08 struct ds1307_platform_data { u8 trickle_charger_setup; }; #endif /* _LINUX_DS1307_H */ rtc/sirfsoc_rtciobrg.h 0000644 00000001140 14722070374 0011052 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * RTC I/O Bridge interfaces for CSR SiRFprimaII * ARM access the registers of SYSRTC, GPSRTC and PWRC through this module * * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. */ #ifndef _SIRFSOC_RTC_IOBRG_H_ #define _SIRFSOC_RTC_IOBRG_H_ struct regmap_config; extern void sirfsoc_rtc_iobrg_besyncing(void); extern u32 sirfsoc_rtc_iobrg_readl(u32 addr); extern void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr); struct regmap *devm_regmap_init_iobg(struct device *dev, const struct regmap_config *config); #endif rtc/rtc-omap.h 0000644 00000000260 14722070374 0007233 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_RTCOMAP_H_ #define _LINUX_RTCOMAP_H_ int omap_rtc_power_off_program(struct device *dev); #endif /* _LINUX_RTCOMAP_H_ */ unicode.h 0000644 00000001662 14722070374 0006356 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_UNICODE_H #define _LINUX_UNICODE_H #include <linux/init.h> #include <linux/dcache.h> struct unicode_map { const char *charset; int version; }; int utf8_validate(const struct unicode_map *um, const struct qstr *str); int utf8_strncmp(const struct unicode_map *um, const struct qstr *s1, const struct qstr *s2); int utf8_strncasecmp(const struct unicode_map *um, const struct qstr *s1, const struct qstr *s2); int utf8_strncasecmp_folded(const struct unicode_map *um, const struct qstr *cf, const struct qstr *s1); int utf8_normalize(const struct unicode_map *um, const struct qstr *str, unsigned char *dest, size_t dlen); int utf8_casefold(const struct unicode_map *um, const struct qstr *str, unsigned char *dest, size_t dlen); struct unicode_map *utf8_load(const char *version); void utf8_unload(struct unicode_map *um); #endif /* _LINUX_UNICODE_H */ uaccess.h 0000644 00000032123 14722070374 0006352 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_UACCESS_H__ #define __LINUX_UACCESS_H__ #include <linux/sched.h> #include <linux/thread_info.h> #include <linux/kasan-checks.h> #define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS) #include <asm/uaccess.h> /* * Architectures should provide two primitives (raw_copy_{to,from}_user()) * and get rid of their private instances of copy_{to,from}_user() and * __copy_{to,from}_user{,_inatomic}(). * * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and * return the amount left to copy. They should assume that access_ok() has * already been checked (and succeeded); they should *not* zero-pad anything. * No KASAN or object size checks either - those belong here. * * Both of these functions should attempt to copy size bytes starting at from * into the area starting at to. They must not fetch or store anything * outside of those areas. Return value must be between 0 (everything * copied successfully) and size (nothing copied). * * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting * at to must become equal to the bytes fetched from the corresponding area * starting at from. All data past to + size - N must be left unmodified. * * If copying succeeds, the return value must be 0. If some data cannot be * fetched, it is permitted to copy less than had been fetched; the only * hard requirement is that not storing anything at all (i.e. returning size) * should happen only when nothing could be copied. In other words, you don't * have to squeeze as much as possible - it is allowed, but not necessary. * * For raw_copy_from_user() to always points to kernel memory and no faults * on store should happen. Interpretation of from is affected by set_fs(). * For raw_copy_to_user() it's the other way round. * * Both can be inlined - it's up to architectures whether it wants to bother * with that. They should not be used directly; they are used to implement * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic()) * that are used instead. Out of those, __... ones are inlined. Plain * copy_{to,from}_user() might or might not be inlined. If you want them * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER. * * NOTE: only copy_from_user() zero-pads the destination in case of short copy. * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything * at all; their callers absolutely must check the return value. * * Biarch ones should also provide raw_copy_in_user() - similar to the above, * but both source and destination are __user pointers (affected by set_fs() * as usual) and both source and destination can trigger faults. */ static __always_inline __must_check unsigned long __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) { kasan_check_write(to, n); check_object_size(to, n, false); return raw_copy_from_user(to, from, n); } static __always_inline __must_check unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) { might_fault(); kasan_check_write(to, n); check_object_size(to, n, false); return raw_copy_from_user(to, from, n); } /** * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking. * @to: Destination address, in user space. * @from: Source address, in kernel space. * @n: Number of bytes to copy. * * Context: User context only. * * Copy data from kernel space to user space. Caller must check * the specified block with access_ok() before calling this function. * The caller should also make sure he pins the user space address * so that we don't result in page fault and sleep. */ static __always_inline __must_check unsigned long __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) { kasan_check_read(from, n); check_object_size(from, n, true); return raw_copy_to_user(to, from, n); } static __always_inline __must_check unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) { might_fault(); kasan_check_read(from, n); check_object_size(from, n, true); return raw_copy_to_user(to, from, n); } #ifdef INLINE_COPY_FROM_USER static inline __must_check unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n) { unsigned long res = n; might_fault(); if (likely(access_ok(from, n))) { kasan_check_write(to, n); res = raw_copy_from_user(to, from, n); } if (unlikely(res)) memset(to + (n - res), 0, res); return res; } #else extern __must_check unsigned long _copy_from_user(void *, const void __user *, unsigned long); #endif #ifdef INLINE_COPY_TO_USER static inline __must_check unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n) { might_fault(); if (access_ok(to, n)) { kasan_check_read(from, n); n = raw_copy_to_user(to, from, n); } return n; } #else extern __must_check unsigned long _copy_to_user(void __user *, const void *, unsigned long); #endif static __always_inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) { if (likely(check_copy_size(to, n, false))) n = _copy_from_user(to, from, n); return n; } static __always_inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) { if (likely(check_copy_size(from, n, true))) n = _copy_to_user(to, from, n); return n; } #ifdef CONFIG_COMPAT static __always_inline unsigned long __must_check copy_in_user(void __user *to, const void __user *from, unsigned long n) { might_fault(); if (access_ok(to, n) && access_ok(from, n)) n = raw_copy_in_user(to, from, n); return n; } #endif static __always_inline void pagefault_disabled_inc(void) { current->pagefault_disabled++; } static __always_inline void pagefault_disabled_dec(void) { current->pagefault_disabled--; } /* * These routines enable/disable the pagefault handler. If disabled, it will * not take any locks and go straight to the fixup table. * * User access methods will not sleep when called from a pagefault_disabled() * environment. */ static inline void pagefault_disable(void) { pagefault_disabled_inc(); /* * make sure to have issued the store before a pagefault * can hit. */ barrier(); } static inline void pagefault_enable(void) { /* * make sure to issue those last loads/stores before enabling * the pagefault handler again. */ barrier(); pagefault_disabled_dec(); } /* * Is the pagefault handler disabled? If so, user access methods will not sleep. */ static inline bool pagefault_disabled(void) { return current->pagefault_disabled != 0; } /* * The pagefault handler is in general disabled by pagefault_disable() or * when in irq context (via in_atomic()). * * This function should only be used by the fault handlers. Other users should * stick to pagefault_disabled(). * Please NEVER use preempt_disable() to disable the fault handler. With * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled. * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT. */ #define faulthandler_disabled() (pagefault_disabled() || in_atomic()) #ifndef ARCH_HAS_NOCACHE_UACCESS static inline __must_check unsigned long __copy_from_user_inatomic_nocache(void *to, const void __user *from, unsigned long n) { return __copy_from_user_inatomic(to, from, n); } #endif /* ARCH_HAS_NOCACHE_UACCESS */ extern __must_check int check_zeroed_user(const void __user *from, size_t size); /** * copy_struct_from_user: copy a struct from userspace * @dst: Destination address, in kernel space. This buffer must be @ksize * bytes long. * @ksize: Size of @dst struct. * @src: Source address, in userspace. * @usize: (Alleged) size of @src struct. * * Copies a struct from userspace to kernel space, in a way that guarantees * backwards-compatibility for struct syscall arguments (as long as future * struct extensions are made such that all new fields are *appended* to the * old struct, and zeroed-out new fields have the same meaning as the old * struct). * * @ksize is just sizeof(*dst), and @usize should've been passed by userspace. * The recommended usage is something like the following: * * SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize) * { * int err; * struct foo karg = {}; * * if (usize > PAGE_SIZE) * return -E2BIG; * if (usize < FOO_SIZE_VER0) * return -EINVAL; * * err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize); * if (err) * return err; * * // ... * } * * There are three cases to consider: * * If @usize == @ksize, then it's copied verbatim. * * If @usize < @ksize, then the userspace has passed an old struct to a * newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize) * are to be zero-filled. * * If @usize > @ksize, then the userspace has passed a new struct to an * older kernel. The trailing bytes unknown to the kernel (@usize - @ksize) * are checked to ensure they are zeroed, otherwise -E2BIG is returned. * * Returns (in all cases, some data may have been copied): * * -E2BIG: (@usize > @ksize) and there are non-zero trailing bytes in @src. * * -EFAULT: access to userspace failed. */ static __always_inline __must_check int copy_struct_from_user(void *dst, size_t ksize, const void __user *src, size_t usize) { size_t size = min(ksize, usize); size_t rest = max(ksize, usize) - size; /* Double check if ksize is larger than a known object size. */ if (WARN_ON_ONCE(ksize > __builtin_object_size(dst, 1))) return -E2BIG; /* Deal with trailing bytes. */ if (usize < ksize) { memset(dst + size, 0, rest); } else if (usize > ksize) { int ret = check_zeroed_user(src + size, rest); if (ret <= 0) return ret ?: -E2BIG; } /* Copy the interoperable parts of the struct. */ if (copy_from_user(dst, src, size)) return -EFAULT; return 0; } /* * probe_kernel_read(): safely attempt to read from a location * @dst: pointer to the buffer that shall take the data * @src: address to read from * @size: size of the data chunk * * Safely read from address @src to the buffer at @dst. If a kernel fault * happens, handle that and return -EFAULT. */ extern long probe_kernel_read(void *dst, const void *src, size_t size); extern long __probe_kernel_read(void *dst, const void *src, size_t size); /* * probe_user_read(): safely attempt to read from a location in user space * @dst: pointer to the buffer that shall take the data * @src: address to read from * @size: size of the data chunk * * Safely read from address @src to the buffer at @dst. If a kernel fault * happens, handle that and return -EFAULT. */ extern long probe_user_read(void *dst, const void __user *src, size_t size); extern long __probe_user_read(void *dst, const void __user *src, size_t size); /* * probe_kernel_write(): safely attempt to write to a location * @dst: address to write to * @src: pointer to the data that shall be written * @size: size of the data chunk * * Safely write to address @dst from the buffer at @src. If a kernel fault * happens, handle that and return -EFAULT. */ extern long notrace probe_kernel_write(void *dst, const void *src, size_t size); extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size); /* * probe_user_write(): safely attempt to write to a location in user space * @dst: address to write to * @src: pointer to the data that shall be written * @size: size of the data chunk * * Safely write to address @dst from the buffer at @src. If a kernel fault * happens, handle that and return -EFAULT. */ extern long notrace probe_user_write(void __user *dst, const void *src, size_t size); extern long notrace __probe_user_write(void __user *dst, const void *src, size_t size); extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr, long count); extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count); /** * probe_kernel_address(): safely attempt to read from a location * @addr: address to read from * @retval: read into this variable * * Returns 0 on success, or -EFAULT. */ #define probe_kernel_address(addr, retval) \ probe_kernel_read(&retval, addr, sizeof(retval)) #ifndef user_access_begin #define user_access_begin(ptr,len) access_ok(ptr, len) #define user_access_end() do { } while (0) #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0) #define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e) #define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e) #define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e) static inline unsigned long user_access_save(void) { return 0UL; } static inline void user_access_restore(unsigned long flags) { } #endif #ifdef CONFIG_HARDENED_USERCOPY void usercopy_warn(const char *name, const char *detail, bool to_user, unsigned long offset, unsigned long len); void __noreturn usercopy_abort(const char *name, const char *detail, bool to_user, unsigned long offset, unsigned long len); #endif #endif /* __LINUX_UACCESS_H__ */ bug.h 0000644 00000003657 14722070374 0005513 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_BUG_H #define _LINUX_BUG_H #include <asm/bug.h> #include <linux/compiler.h> #include <linux/build_bug.h> enum bug_trap_type { BUG_TRAP_TYPE_NONE = 0, BUG_TRAP_TYPE_WARN = 1, BUG_TRAP_TYPE_BUG = 2, }; struct pt_regs; #ifdef __CHECKER__ #define MAYBE_BUILD_BUG_ON(cond) (0) #else /* __CHECKER__ */ #define MAYBE_BUILD_BUG_ON(cond) \ do { \ if (__builtin_constant_p((cond))) \ BUILD_BUG_ON(cond); \ else \ BUG_ON(cond); \ } while (0) #endif /* __CHECKER__ */ #ifdef CONFIG_GENERIC_BUG #include <asm-generic/bug.h> static inline int is_warning_bug(const struct bug_entry *bug) { return bug->flags & BUGFLAG_WARNING; } struct bug_entry *find_bug(unsigned long bugaddr); enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs); /* These are defined by the architecture */ int is_valid_bugaddr(unsigned long addr); void generic_bug_clear_once(void); #else /* !CONFIG_GENERIC_BUG */ static inline void *find_bug(unsigned long bugaddr) { return NULL; } static inline enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs) { return BUG_TRAP_TYPE_BUG; } static inline void generic_bug_clear_once(void) {} #endif /* CONFIG_GENERIC_BUG */ /* * Since detected data corruption should stop operation on the affected * structures. Return value must be checked and sanely acted on by caller. */ static inline __must_check bool check_data_corruption(bool v) { return v; } #define CHECK_DATA_CORRUPTION(condition, fmt, ...) \ check_data_corruption(({ \ bool corruption = unlikely(condition); \ if (corruption) { \ if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) { \ pr_err(fmt, ##__VA_ARGS__); \ BUG(); \ } else \ WARN(1, fmt, ##__VA_ARGS__); \ } \ corruption; \ })) #endif /* _LINUX_BUG_H */ rwlock.h 0000644 00000010545 14722070374 0006231 0 ustar 00 #ifndef __LINUX_RWLOCK_H #define __LINUX_RWLOCK_H #ifndef __LINUX_SPINLOCK_H # error "please don't include this file directly" #endif /* * rwlock related methods * * split out from spinlock.h * * portions Copyright 2005, Red Hat, Inc., Ingo Molnar * Released under the General Public License (GPL). */ #ifdef CONFIG_DEBUG_SPINLOCK extern void __rwlock_init(rwlock_t *lock, const char *name, struct lock_class_key *key); # define rwlock_init(lock) \ do { \ static struct lock_class_key __key; \ \ __rwlock_init((lock), #lock, &__key); \ } while (0) #else # define rwlock_init(lock) \ do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0) #endif #ifdef CONFIG_DEBUG_SPINLOCK extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock); #define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock) extern int do_raw_read_trylock(rwlock_t *lock); extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock); extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock); #define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock) extern int do_raw_write_trylock(rwlock_t *lock); extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock); #else #ifndef arch_read_lock_flags # define arch_read_lock_flags(lock, flags) arch_read_lock(lock) #endif #ifndef arch_write_lock_flags # define arch_write_lock_flags(lock, flags) arch_write_lock(lock) #endif # define do_raw_read_lock(rwlock) do {__acquire(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0) # define do_raw_read_lock_flags(lock, flags) \ do {__acquire(lock); arch_read_lock_flags(&(lock)->raw_lock, *(flags)); } while (0) # define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock) # define do_raw_read_unlock(rwlock) do {arch_read_unlock(&(rwlock)->raw_lock); __release(lock); } while (0) # define do_raw_write_lock(rwlock) do {__acquire(lock); arch_write_lock(&(rwlock)->raw_lock); } while (0) # define do_raw_write_lock_flags(lock, flags) \ do {__acquire(lock); arch_write_lock_flags(&(lock)->raw_lock, *(flags)); } while (0) # define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock) # define do_raw_write_unlock(rwlock) do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0) #endif /* * Define the various rw_lock methods. Note we define these * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various * methods are defined as nops in the case they are not required. */ #define read_trylock(lock) __cond_lock(lock, _raw_read_trylock(lock)) #define write_trylock(lock) __cond_lock(lock, _raw_write_trylock(lock)) #define write_lock(lock) _raw_write_lock(lock) #define read_lock(lock) _raw_read_lock(lock) #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) #define read_lock_irqsave(lock, flags) \ do { \ typecheck(unsigned long, flags); \ flags = _raw_read_lock_irqsave(lock); \ } while (0) #define write_lock_irqsave(lock, flags) \ do { \ typecheck(unsigned long, flags); \ flags = _raw_write_lock_irqsave(lock); \ } while (0) #else #define read_lock_irqsave(lock, flags) \ do { \ typecheck(unsigned long, flags); \ _raw_read_lock_irqsave(lock, flags); \ } while (0) #define write_lock_irqsave(lock, flags) \ do { \ typecheck(unsigned long, flags); \ _raw_write_lock_irqsave(lock, flags); \ } while (0) #endif #define read_lock_irq(lock) _raw_read_lock_irq(lock) #define read_lock_bh(lock) _raw_read_lock_bh(lock) #define write_lock_irq(lock) _raw_write_lock_irq(lock) #define write_lock_bh(lock) _raw_write_lock_bh(lock) #define read_unlock(lock) _raw_read_unlock(lock) #define write_unlock(lock) _raw_write_unlock(lock) #define read_unlock_irq(lock) _raw_read_unlock_irq(lock) #define write_unlock_irq(lock) _raw_write_unlock_irq(lock) #define read_unlock_irqrestore(lock, flags) \ do { \ typecheck(unsigned long, flags); \ _raw_read_unlock_irqrestore(lock, flags); \ } while (0) #define read_unlock_bh(lock) _raw_read_unlock_bh(lock) #define write_unlock_irqrestore(lock, flags) \ do { \ typecheck(unsigned long, flags); \ _raw_write_unlock_irqrestore(lock, flags); \ } while (0) #define write_unlock_bh(lock) _raw_write_unlock_bh(lock) #define write_trylock_irqsave(lock, flags) \ ({ \ local_irq_save(flags); \ write_trylock(lock) ? \ 1 : ({ local_irq_restore(flags); 0; }); \ }) #endif /* __LINUX_RWLOCK_H */ root_dev.h 0000644 00000001153 14722070374 0006544 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ROOT_DEV_H_ #define _ROOT_DEV_H_ #include <linux/major.h> #include <linux/types.h> #include <linux/kdev_t.h> enum { Root_NFS = MKDEV(UNNAMED_MAJOR, 255), Root_CIFS = MKDEV(UNNAMED_MAJOR, 254), Root_RAM0 = MKDEV(RAMDISK_MAJOR, 0), Root_RAM1 = MKDEV(RAMDISK_MAJOR, 1), Root_FD0 = MKDEV(FLOPPY_MAJOR, 0), Root_HDA1 = MKDEV(IDE0_MAJOR, 1), Root_HDA2 = MKDEV(IDE0_MAJOR, 2), Root_SDA1 = MKDEV(SCSI_DISK0_MAJOR, 1), Root_SDA2 = MKDEV(SCSI_DISK0_MAJOR, 2), Root_HDC1 = MKDEV(IDE1_MAJOR, 1), Root_SR0 = MKDEV(SCSI_CDROM_MAJOR, 0), }; extern dev_t ROOT_DEV; #endif dca.h 0000644 00000003602 14722070374 0005453 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved. */ #ifndef DCA_H #define DCA_H #include <linux/pci.h> /* DCA Provider API */ /* DCA Notifier Interface */ void dca_register_notify(struct notifier_block *nb); void dca_unregister_notify(struct notifier_block *nb); #define DCA_PROVIDER_ADD 0x0001 #define DCA_PROVIDER_REMOVE 0x0002 struct dca_provider { struct list_head node; const struct dca_ops *ops; struct device *cd; int id; }; struct dca_domain { struct list_head node; struct list_head dca_providers; struct pci_bus *pci_rc; }; struct dca_ops { int (*add_requester) (struct dca_provider *, struct device *); int (*remove_requester) (struct dca_provider *, struct device *); u8 (*get_tag) (struct dca_provider *, struct device *, int cpu); int (*dev_managed) (struct dca_provider *, struct device *); }; struct dca_provider *alloc_dca_provider(const struct dca_ops *ops, int priv_size); void free_dca_provider(struct dca_provider *dca); int register_dca_provider(struct dca_provider *dca, struct device *dev); void unregister_dca_provider(struct dca_provider *dca, struct device *dev); static inline void *dca_priv(struct dca_provider *dca) { return (void *)dca + sizeof(struct dca_provider); } /* Requester API */ #define DCA_GET_TAG_TWO_ARGS int dca_add_requester(struct device *dev); int dca_remove_requester(struct device *dev); u8 dca_get_tag(int cpu); u8 dca3_get_tag(struct device *dev, int cpu); /* internal stuff */ int __init dca_sysfs_init(void); void __exit dca_sysfs_exit(void); int dca_sysfs_add_provider(struct dca_provider *dca, struct device *dev); void dca_sysfs_remove_provider(struct dca_provider *dca); int dca_sysfs_add_req(struct dca_provider *dca, struct device *dev, int slot); void dca_sysfs_remove_req(struct dca_provider *dca, int slot); #endif /* DCA_H */ genetlink.h 0000644 00000002543 14722070374 0006707 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_GENERIC_NETLINK_H #define __LINUX_GENERIC_NETLINK_H #include <uapi/linux/genetlink.h> /* All generic netlink requests are serialized by a global lock. */ extern void genl_lock(void); extern void genl_unlock(void); #ifdef CONFIG_LOCKDEP extern bool lockdep_genl_is_held(void); #endif /* for synchronisation between af_netlink and genetlink */ extern atomic_t genl_sk_destructing_cnt; extern wait_queue_head_t genl_sk_destructing_waitq; /** * rcu_dereference_genl - rcu_dereference with debug checking * @p: The pointer to read, prior to dereferencing * * Do an rcu_dereference(p), but check caller either holds rcu_read_lock() * or genl mutex. Note : Please prefer genl_dereference() or rcu_dereference() */ #define rcu_dereference_genl(p) \ rcu_dereference_check(p, lockdep_genl_is_held()) /** * genl_dereference - fetch RCU pointer when updates are prevented by genl mutex * @p: The pointer to read, prior to dereferencing * * Return the value of the specified RCU-protected pointer, but omit * the READ_ONCE(), because caller holds genl mutex. */ #define genl_dereference(p) \ rcu_dereference_protected(p, lockdep_genl_is_held()) #define MODULE_ALIAS_GENL_FAMILY(family)\ MODULE_ALIAS_NET_PF_PROTO_NAME(PF_NETLINK, NETLINK_GENERIC, "-family-" family) #endif /* __LINUX_GENERIC_NETLINK_H */ btree-128.h 0000644 00000005261 14722070374 0006340 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ extern struct btree_geo btree_geo128; struct btree_head128 { struct btree_head h; }; static inline void btree_init_mempool128(struct btree_head128 *head, mempool_t *mempool) { btree_init_mempool(&head->h, mempool); } static inline int btree_init128(struct btree_head128 *head) { return btree_init(&head->h); } static inline void btree_destroy128(struct btree_head128 *head) { btree_destroy(&head->h); } static inline void *btree_lookup128(struct btree_head128 *head, u64 k1, u64 k2) { u64 key[2] = {k1, k2}; return btree_lookup(&head->h, &btree_geo128, (unsigned long *)&key); } static inline void *btree_get_prev128(struct btree_head128 *head, u64 *k1, u64 *k2) { u64 key[2] = {*k1, *k2}; void *val; val = btree_get_prev(&head->h, &btree_geo128, (unsigned long *)&key); *k1 = key[0]; *k2 = key[1]; return val; } static inline int btree_insert128(struct btree_head128 *head, u64 k1, u64 k2, void *val, gfp_t gfp) { u64 key[2] = {k1, k2}; return btree_insert(&head->h, &btree_geo128, (unsigned long *)&key, val, gfp); } static inline int btree_update128(struct btree_head128 *head, u64 k1, u64 k2, void *val) { u64 key[2] = {k1, k2}; return btree_update(&head->h, &btree_geo128, (unsigned long *)&key, val); } static inline void *btree_remove128(struct btree_head128 *head, u64 k1, u64 k2) { u64 key[2] = {k1, k2}; return btree_remove(&head->h, &btree_geo128, (unsigned long *)&key); } static inline void *btree_last128(struct btree_head128 *head, u64 *k1, u64 *k2) { u64 key[2]; void *val; val = btree_last(&head->h, &btree_geo128, (unsigned long *)&key[0]); if (val) { *k1 = key[0]; *k2 = key[1]; } return val; } static inline int btree_merge128(struct btree_head128 *target, struct btree_head128 *victim, gfp_t gfp) { return btree_merge(&target->h, &victim->h, &btree_geo128, gfp); } void visitor128(void *elem, unsigned long opaque, unsigned long *__key, size_t index, void *__func); typedef void (*visitor128_t)(void *elem, unsigned long opaque, u64 key1, u64 key2, size_t index); static inline size_t btree_visitor128(struct btree_head128 *head, unsigned long opaque, visitor128_t func2) { return btree_visitor(&head->h, &btree_geo128, opaque, visitor128, func2); } static inline size_t btree_grim_visitor128(struct btree_head128 *head, unsigned long opaque, visitor128_t func2) { return btree_grim_visitor(&head->h, &btree_geo128, opaque, visitor128, func2); } #define btree_for_each_safe128(head, k1, k2, val) \ for (val = btree_last128(head, &k1, &k2); \ val; \ val = btree_get_prev128(head, &k1, &k2)) b1pcmcia.h 0000644 00000001232 14722070374 0006400 0 ustar 00 /* $Id: b1pcmcia.h,v 1.1.8.2 2001/09/23 22:25:05 kai Exp $ * * Exported functions of module b1pcmcia to be called by * avm_cs card services module. * * Copyright 1999 by Carsten Paeth (calle@calle.in-berlin.de) * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #ifndef _B1PCMCIA_H_ #define _B1PCMCIA_H_ int b1pcmcia_addcard_b1(unsigned int port, unsigned irq); int b1pcmcia_addcard_m1(unsigned int port, unsigned irq); int b1pcmcia_addcard_m2(unsigned int port, unsigned irq); int b1pcmcia_delcard(unsigned int port, unsigned irq); #endif /* _B1PCMCIA_H_ */ proc_fs.h 0000644 00000015126 14722070374 0006363 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * The proc filesystem constants/structures */ #ifndef _LINUX_PROC_FS_H #define _LINUX_PROC_FS_H #include <linux/types.h> #include <linux/fs.h> struct proc_dir_entry; struct seq_file; struct seq_operations; #ifdef CONFIG_PROC_FS typedef int (*proc_write_t)(struct file *, char *, size_t); extern void proc_root_init(void); extern void proc_flush_task(struct task_struct *); extern struct proc_dir_entry *proc_symlink(const char *, struct proc_dir_entry *, const char *); struct proc_dir_entry *_proc_mkdir(const char *, umode_t, struct proc_dir_entry *, void *, bool); extern struct proc_dir_entry *proc_mkdir(const char *, struct proc_dir_entry *); extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t, struct proc_dir_entry *, void *); extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t, struct proc_dir_entry *); struct proc_dir_entry *proc_create_mount_point(const char *name); struct proc_dir_entry *proc_create_seq_private(const char *name, umode_t mode, struct proc_dir_entry *parent, const struct seq_operations *ops, unsigned int state_size, void *data); #define proc_create_seq_data(name, mode, parent, ops, data) \ proc_create_seq_private(name, mode, parent, ops, 0, data) #define proc_create_seq(name, mode, parent, ops) \ proc_create_seq_private(name, mode, parent, ops, 0, NULL) struct proc_dir_entry *proc_create_single_data(const char *name, umode_t mode, struct proc_dir_entry *parent, int (*show)(struct seq_file *, void *), void *data); #define proc_create_single(name, mode, parent, show) \ proc_create_single_data(name, mode, parent, show, NULL) extern struct proc_dir_entry *proc_create_data(const char *, umode_t, struct proc_dir_entry *, const struct file_operations *, void *); struct proc_dir_entry *proc_create(const char *name, umode_t mode, struct proc_dir_entry *parent, const struct file_operations *proc_fops); extern void proc_set_size(struct proc_dir_entry *, loff_t); extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t); extern void *PDE_DATA(const struct inode *); extern void *proc_get_parent_data(const struct inode *); extern void proc_remove(struct proc_dir_entry *); extern void remove_proc_entry(const char *, struct proc_dir_entry *); extern int remove_proc_subtree(const char *, struct proc_dir_entry *); struct proc_dir_entry *proc_create_net_data(const char *name, umode_t mode, struct proc_dir_entry *parent, const struct seq_operations *ops, unsigned int state_size, void *data); #define proc_create_net(name, mode, parent, state_size, ops) \ proc_create_net_data(name, mode, parent, state_size, ops, NULL) struct proc_dir_entry *proc_create_net_single(const char *name, umode_t mode, struct proc_dir_entry *parent, int (*show)(struct seq_file *, void *), void *data); struct proc_dir_entry *proc_create_net_data_write(const char *name, umode_t mode, struct proc_dir_entry *parent, const struct seq_operations *ops, proc_write_t write, unsigned int state_size, void *data); struct proc_dir_entry *proc_create_net_single_write(const char *name, umode_t mode, struct proc_dir_entry *parent, int (*show)(struct seq_file *, void *), proc_write_t write, void *data); extern struct pid *tgid_pidfd_to_pid(const struct file *file); #ifdef CONFIG_PROC_PID_ARCH_STATUS /* * The architecture which selects CONFIG_PROC_PID_ARCH_STATUS must * provide proc_pid_arch_status() definition. */ int proc_pid_arch_status(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task); #endif /* CONFIG_PROC_PID_ARCH_STATUS */ #else /* CONFIG_PROC_FS */ static inline void proc_root_init(void) { } static inline void proc_flush_task(struct task_struct *task) { } static inline struct proc_dir_entry *proc_symlink(const char *name, struct proc_dir_entry *parent,const char *dest) { return NULL;} static inline struct proc_dir_entry *proc_mkdir(const char *name, struct proc_dir_entry *parent) {return NULL;} static inline struct proc_dir_entry *proc_create_mount_point(const char *name) { return NULL; } static inline struct proc_dir_entry *_proc_mkdir(const char *name, umode_t mode, struct proc_dir_entry *parent, void *data, bool force_lookup) { return NULL; } static inline struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; } static inline struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode, struct proc_dir_entry *parent) { return NULL; } #define proc_create_seq_private(name, mode, parent, ops, size, data) ({NULL;}) #define proc_create_seq_data(name, mode, parent, ops, data) ({NULL;}) #define proc_create_seq(name, mode, parent, ops) ({NULL;}) #define proc_create_single(name, mode, parent, show) ({NULL;}) #define proc_create_single_data(name, mode, parent, show, data) ({NULL;}) #define proc_create(name, mode, parent, proc_fops) ({NULL;}) #define proc_create_data(name, mode, parent, proc_fops, data) ({NULL;}) static inline void proc_set_size(struct proc_dir_entry *de, loff_t size) {} static inline void proc_set_user(struct proc_dir_entry *de, kuid_t uid, kgid_t gid) {} static inline void *PDE_DATA(const struct inode *inode) {BUG(); return NULL;} static inline void *proc_get_parent_data(const struct inode *inode) { BUG(); return NULL; } static inline void proc_remove(struct proc_dir_entry *de) {} #define remove_proc_entry(name, parent) do {} while (0) static inline int remove_proc_subtree(const char *name, struct proc_dir_entry *parent) { return 0; } #define proc_create_net_data(name, mode, parent, ops, state_size, data) ({NULL;}) #define proc_create_net_data_write(name, mode, parent, ops, write, state_size, data) ({NULL;}) #define proc_create_net(name, mode, parent, state_size, ops) ({NULL;}) #define proc_create_net_single(name, mode, parent, show, data) ({NULL;}) #define proc_create_net_single_write(name, mode, parent, show, write, data) ({NULL;}) static inline struct pid *tgid_pidfd_to_pid(const struct file *file) { return ERR_PTR(-EBADF); } #endif /* CONFIG_PROC_FS */ struct net; static inline struct proc_dir_entry *proc_net_mkdir( struct net *net, const char *name, struct proc_dir_entry *parent) { return _proc_mkdir(name, 0, parent, net, true); } struct ns_common; int open_related_ns(struct ns_common *ns, struct ns_common *(*get_ns)(struct ns_common *ns)); /* get the associated pid namespace for a file in procfs */ static inline struct pid_namespace *proc_pid_ns(const struct inode *inode) { return inode->i_sb->s_fs_info; } #endif /* _LINUX_PROC_FS_H */ rwsem.h 0000644 00000014633 14722070374 0006067 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* rwsem.h: R/W semaphores, public interface * * Written by David Howells (dhowells@redhat.com). * Derived from asm-i386/semaphore.h */ #ifndef _LINUX_RWSEM_H #define _LINUX_RWSEM_H #include <linux/linkage.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/atomic.h> #include <linux/err.h> #ifdef CONFIG_RWSEM_SPIN_ON_OWNER #include <linux/osq_lock.h> #endif /* * For an uncontended rwsem, count and owner are the only fields a task * needs to touch when acquiring the rwsem. So they are put next to each * other to increase the chance that they will share the same cacheline. * * In a contended rwsem, the owner is likely the most frequently accessed * field in the structure as the optimistic waiter that holds the osq lock * will spin on owner. For an embedded rwsem, other hot fields in the * containing structure should be moved further away from the rwsem to * reduce the chance that they will share the same cacheline causing * cacheline bouncing problem. */ struct rw_semaphore { atomic_long_t count; /* * Write owner or one of the read owners as well flags regarding * the current state of the rwsem. Can be used as a speculative * check to see if the write owner is running on the cpu. */ atomic_long_t owner; #ifdef CONFIG_RWSEM_SPIN_ON_OWNER struct optimistic_spin_queue osq; /* spinner MCS lock */ #endif raw_spinlock_t wait_lock; struct list_head wait_list; #ifdef CONFIG_DEBUG_RWSEMS void *magic; #endif #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif }; /* * Setting all bits of the owner field except bit 0 will indicate * that the rwsem is writer-owned with an unknown owner. */ #define RWSEM_OWNER_UNKNOWN (-2L) /* In all implementations count != 0 means locked */ static inline int rwsem_is_locked(struct rw_semaphore *sem) { return atomic_long_read(&sem->count) != 0; } #define RWSEM_UNLOCKED_VALUE 0L #define __RWSEM_INIT_COUNT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE) /* Common initializer macros and functions */ #ifdef CONFIG_DEBUG_LOCK_ALLOC # define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } #else # define __RWSEM_DEP_MAP_INIT(lockname) #endif #ifdef CONFIG_DEBUG_RWSEMS # define __DEBUG_RWSEM_INITIALIZER(lockname) , .magic = &lockname #else # define __DEBUG_RWSEM_INITIALIZER(lockname) #endif #ifdef CONFIG_RWSEM_SPIN_ON_OWNER #define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED #else #define __RWSEM_OPT_INIT(lockname) #endif #define __RWSEM_INITIALIZER(name) \ { __RWSEM_INIT_COUNT(name), \ .owner = ATOMIC_LONG_INIT(0), \ .wait_list = LIST_HEAD_INIT((name).wait_list), \ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \ __RWSEM_OPT_INIT(name) \ __DEBUG_RWSEM_INITIALIZER(name) \ __RWSEM_DEP_MAP_INIT(name) } #define DECLARE_RWSEM(name) \ struct rw_semaphore name = __RWSEM_INITIALIZER(name) extern void __init_rwsem(struct rw_semaphore *sem, const char *name, struct lock_class_key *key); #define init_rwsem(sem) \ do { \ static struct lock_class_key __key; \ \ __init_rwsem((sem), #sem, &__key); \ } while (0) /* * This is the same regardless of which rwsem implementation that is being used. * It is just a heuristic meant to be called by somebody alreadying holding the * rwsem to see if somebody from an incompatible type is wanting access to the * lock. */ static inline int rwsem_is_contended(struct rw_semaphore *sem) { return !list_empty(&sem->wait_list); } /* * lock for reading */ extern void down_read(struct rw_semaphore *sem); extern int __must_check down_read_interruptible(struct rw_semaphore *sem); extern int __must_check down_read_killable(struct rw_semaphore *sem); /* * trylock for reading -- returns 1 if successful, 0 if contention */ extern int down_read_trylock(struct rw_semaphore *sem); /* * lock for writing */ extern void down_write(struct rw_semaphore *sem); extern int __must_check down_write_killable(struct rw_semaphore *sem); /* * trylock for writing -- returns 1 if successful, 0 if contention */ extern int down_write_trylock(struct rw_semaphore *sem); /* * release a read lock */ extern void up_read(struct rw_semaphore *sem); /* * release a write lock */ extern void up_write(struct rw_semaphore *sem); /* * downgrade write lock to read lock */ extern void downgrade_write(struct rw_semaphore *sem); #ifdef CONFIG_DEBUG_LOCK_ALLOC /* * nested locking. NOTE: rwsems are not allowed to recurse * (which occurs if the same task tries to acquire the same * lock instance multiple times), but multiple locks of the * same lock class might be taken, if the order of the locks * is always the same. This ordering rule can be expressed * to lockdep via the _nested() APIs, but enumerating the * subclasses that are used. (If the nesting relationship is * static then another method for expressing nested locking is * the explicit definition of lock class keys and the use of * lockdep_set_class() at lock initialization time. * See Documentation/locking/lockdep-design.rst for more details.) */ extern void down_read_nested(struct rw_semaphore *sem, int subclass); extern int __must_check down_read_killable_nested(struct rw_semaphore *sem, int subclass); extern void down_write_nested(struct rw_semaphore *sem, int subclass); extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass); extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock); # define down_write_nest_lock(sem, nest_lock) \ do { \ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ _down_write_nest_lock(sem, &(nest_lock)->dep_map); \ } while (0); /* * Take/release a lock when not the owner will release it. * * [ This API should be avoided as much as possible - the * proper abstraction for this case is completions. ] */ extern void down_read_non_owner(struct rw_semaphore *sem); extern void up_read_non_owner(struct rw_semaphore *sem); #else # define down_read_nested(sem, subclass) down_read(sem) # define down_read_killable_nested(sem, subclass) down_read_killable(sem) # define down_write_nest_lock(sem, nest_lock) down_write(sem) # define down_write_nested(sem, subclass) down_write(sem) # define down_write_killable_nested(sem, subclass) down_write_killable(sem) # define down_read_non_owner(sem) down_read(sem) # define up_read_non_owner(sem) up_read(sem) #endif #endif /* _LINUX_RWSEM_H */ hw_random.h 0000644 00000004170 14722070374 0006703 0 ustar 00 /* Hardware Random Number Generator Please read Documentation/admin-guide/hw_random.rst for details on use. ---------------------------------------------------------- This software may be used and distributed according to the terms of the GNU General Public License, incorporated herein by reference. */ #ifndef LINUX_HWRANDOM_H_ #define LINUX_HWRANDOM_H_ #include <linux/completion.h> #include <linux/types.h> #include <linux/list.h> #include <linux/kref.h> /** * struct hwrng - Hardware Random Number Generator driver * @name: Unique RNG name. * @init: Initialization callback (can be NULL). * @cleanup: Cleanup callback (can be NULL). * @data_present: Callback to determine if data is available * on the RNG. If NULL, it is assumed that * there is always data available. *OBSOLETE* * @data_read: Read data from the RNG device. * Returns the number of lower random bytes in "data". * Must not be NULL. *OBSOLETE* * @read: New API. drivers can fill up to max bytes of data * into the buffer. The buffer is aligned for any type * and max is a multiple of 4 and >= 32 bytes. * @priv: Private data, for use by the RNG driver. * @quality: Estimation of true entropy in RNG's bitstream * (in bits of entropy per 1024 bits of input; * valid values: 1 to 1024, or 0 for unknown). */ struct hwrng { const char *name; int (*init)(struct hwrng *rng); void (*cleanup)(struct hwrng *rng); int (*data_present)(struct hwrng *rng, int wait); int (*data_read)(struct hwrng *rng, u32 *data); int (*read)(struct hwrng *rng, void *data, size_t max, bool wait); unsigned long priv; unsigned short quality; /* internal. */ struct list_head list; struct kref ref; struct completion cleanup_done; }; struct device; /** Register a new Hardware Random Number Generator driver. */ extern int hwrng_register(struct hwrng *rng); extern int devm_hwrng_register(struct device *dev, struct hwrng *rng); /** Unregister a Hardware Random Number Generator driver. */ extern void hwrng_unregister(struct hwrng *rng); extern void devm_hwrng_unregister(struct device *dve, struct hwrng *rng); #endif /* LINUX_HWRANDOM_H_ */ fsl-diu-fb.h 0000644 00000007573 14722070374 0006667 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved. * * Freescale DIU Frame Buffer device driver * * Authors: Hongjun Chen <hong-jun.chen@freescale.com> * Paul Widmer <paul.widmer@freescale.com> * Srikanth Srinivasan <srikanth.srinivasan@freescale.com> * York Sun <yorksun@freescale.com> * * Based on imxfb.c Copyright (C) 2004 S.Hauer, Pengutronix */ #ifndef __FSL_DIU_FB_H__ #define __FSL_DIU_FB_H__ #include <linux/types.h> struct mfb_chroma_key { int enable; __u8 red_max; __u8 green_max; __u8 blue_max; __u8 red_min; __u8 green_min; __u8 blue_min; }; struct aoi_display_offset { __s32 x_aoi_d; __s32 y_aoi_d; }; #define MFB_SET_CHROMA_KEY _IOW('M', 1, struct mfb_chroma_key) #define MFB_SET_BRIGHTNESS _IOW('M', 3, __u8) #define MFB_SET_ALPHA _IOW('M', 0, __u8) #define MFB_GET_ALPHA _IOR('M', 0, __u8) #define MFB_SET_AOID _IOW('M', 4, struct aoi_display_offset) #define MFB_GET_AOID _IOR('M', 4, struct aoi_display_offset) #define MFB_SET_PIXFMT _IOW('M', 8, __u32) #define MFB_GET_PIXFMT _IOR('M', 8, __u32) /* * The MPC5121 BSP comes with a gamma_set utility that initializes the * gamma table. Unfortunately, it uses bad values for the IOCTL commands, * but there's nothing we can do about it now. These ioctls are only * supported on the MPC5121. */ #define MFB_SET_GAMMA _IOW('M', 1, __u8) #define MFB_GET_GAMMA _IOR('M', 1, __u8) /* * The original definitions of MFB_SET_PIXFMT and MFB_GET_PIXFMT used the * wrong value for 'size' field of the ioctl. The current macros above use the * right size, but we still need to provide backwards compatibility, at least * for a while. */ #define MFB_SET_PIXFMT_OLD 0x80014d08 #define MFB_GET_PIXFMT_OLD 0x40014d08 #ifdef __KERNEL__ /* * These are the fields of area descriptor(in DDR memory) for every plane */ struct diu_ad { /* Word 0(32-bit) in DDR memory */ /* __u16 comp; */ /* __u16 pixel_s:2; */ /* __u16 palette:1; */ /* __u16 red_c:2; */ /* __u16 green_c:2; */ /* __u16 blue_c:2; */ /* __u16 alpha_c:3; */ /* __u16 byte_f:1; */ /* __u16 res0:3; */ __be32 pix_fmt; /* hard coding pixel format */ /* Word 1(32-bit) in DDR memory */ __le32 addr; /* Word 2(32-bit) in DDR memory */ /* __u32 delta_xs:11; */ /* __u32 res1:1; */ /* __u32 delta_ys:11; */ /* __u32 res2:1; */ /* __u32 g_alpha:8; */ __le32 src_size_g_alpha; /* Word 3(32-bit) in DDR memory */ /* __u32 delta_xi:11; */ /* __u32 res3:5; */ /* __u32 delta_yi:11; */ /* __u32 res4:3; */ /* __u32 flip:2; */ __le32 aoi_size; /* Word 4(32-bit) in DDR memory */ /*__u32 offset_xi:11; __u32 res5:5; __u32 offset_yi:11; __u32 res6:5; */ __le32 offset_xyi; /* Word 5(32-bit) in DDR memory */ /*__u32 offset_xd:11; __u32 res7:5; __u32 offset_yd:11; __u32 res8:5; */ __le32 offset_xyd; /* Word 6(32-bit) in DDR memory */ __u8 ckmax_r; __u8 ckmax_g; __u8 ckmax_b; __u8 res9; /* Word 7(32-bit) in DDR memory */ __u8 ckmin_r; __u8 ckmin_g; __u8 ckmin_b; __u8 res10; /* __u32 res10:8; */ /* Word 8(32-bit) in DDR memory */ __le32 next_ad; /* Word 9(32-bit) in DDR memory, just for 64-bit aligned */ __u32 paddr; } __attribute__ ((packed)); /* DIU register map */ struct diu { __be32 desc[3]; __be32 gamma; __be32 palette; __be32 cursor; __be32 curs_pos; __be32 diu_mode; __be32 bgnd; __be32 bgnd_wb; __be32 disp_size; __be32 wb_size; __be32 wb_mem_addr; __be32 hsyn_para; __be32 vsyn_para; __be32 syn_pol; __be32 thresholds; __be32 int_status; __be32 int_mask; __be32 colorbar[8]; __be32 filling; __be32 plut; } __attribute__ ((packed)); /* * Modes of operation of DIU. The DIU supports five different modes, but * the driver only supports modes 0 and 1. */ #define MFB_MODE0 0 /* DIU off */ #define MFB_MODE1 1 /* All three planes output to display */ #endif /* __KERNEL__ */ #endif /* __FSL_DIU_FB_H__ */ fanotify.h 0000644 00000005246 14722070374 0006551 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_FANOTIFY_H #define _LINUX_FANOTIFY_H #include <uapi/linux/fanotify.h> #define FAN_GROUP_FLAG(group, flag) \ ((group)->fanotify_data.flags & (flag)) /* * Flags allowed to be passed from/to userspace. * * We intentionally do not add new bits to the old FAN_ALL_* constants, because * they are uapi exposed constants. If there are programs out there using * these constant, the programs may break if re-compiled with new uapi headers * and then run on an old kernel. */ #define FANOTIFY_CLASS_BITS (FAN_CLASS_NOTIF | FAN_CLASS_CONTENT | \ FAN_CLASS_PRE_CONTENT) #define FANOTIFY_INIT_FLAGS (FANOTIFY_CLASS_BITS | \ FAN_REPORT_TID | FAN_REPORT_FID | \ FAN_CLOEXEC | FAN_NONBLOCK | \ FAN_UNLIMITED_QUEUE | FAN_UNLIMITED_MARKS) #define FANOTIFY_MARK_TYPE_BITS (FAN_MARK_INODE | FAN_MARK_MOUNT | \ FAN_MARK_FILESYSTEM) #define FANOTIFY_MARK_FLAGS (FANOTIFY_MARK_TYPE_BITS | \ FAN_MARK_ADD | \ FAN_MARK_REMOVE | \ FAN_MARK_DONT_FOLLOW | \ FAN_MARK_ONLYDIR | \ FAN_MARK_IGNORED_MASK | \ FAN_MARK_IGNORED_SURV_MODIFY | \ FAN_MARK_FLUSH) /* * Events that can be reported with data type FSNOTIFY_EVENT_PATH. * Note that FAN_MODIFY can also be reported with data type * FSNOTIFY_EVENT_INODE. */ #define FANOTIFY_PATH_EVENTS (FAN_ACCESS | FAN_MODIFY | \ FAN_CLOSE | FAN_OPEN | FAN_OPEN_EXEC) /* * Directory entry modification events - reported only to directory * where entry is modified and not to a watching parent. */ #define FANOTIFY_DIRENT_EVENTS (FAN_MOVE | FAN_CREATE | FAN_DELETE) /* Events that can only be reported with data type FSNOTIFY_EVENT_INODE */ #define FANOTIFY_INODE_EVENTS (FANOTIFY_DIRENT_EVENTS | \ FAN_ATTRIB | FAN_MOVE_SELF | FAN_DELETE_SELF) /* Events that user can request to be notified on */ #define FANOTIFY_EVENTS (FANOTIFY_PATH_EVENTS | \ FANOTIFY_INODE_EVENTS) /* Events that require a permission response from user */ #define FANOTIFY_PERM_EVENTS (FAN_OPEN_PERM | FAN_ACCESS_PERM | \ FAN_OPEN_EXEC_PERM) /* Extra flags that may be reported with event or control handling of events */ #define FANOTIFY_EVENT_FLAGS (FAN_EVENT_ON_CHILD | FAN_ONDIR) /* Events that may be reported to user */ #define FANOTIFY_OUTGOING_EVENTS (FANOTIFY_EVENTS | \ FANOTIFY_PERM_EVENTS | \ FAN_Q_OVERFLOW | FAN_ONDIR) #define ALL_FANOTIFY_EVENT_BITS (FANOTIFY_OUTGOING_EVENTS | \ FANOTIFY_EVENT_FLAGS) /* Do not use these old uapi constants internally */ #undef FAN_ALL_CLASS_BITS #undef FAN_ALL_INIT_FLAGS #undef FAN_ALL_MARK_FLAGS #undef FAN_ALL_EVENTS #undef FAN_ALL_PERM_EVENTS #undef FAN_ALL_OUTGOING_EVENTS #endif /* _LINUX_FANOTIFY_H */ securebits.h 0000644 00000000357 14722070374 0007100 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SECUREBITS_H #define _LINUX_SECUREBITS_H 1 #include <uapi/linux/securebits.h> #define issecure(X) (issecure_mask(X) & current_cred_xxx(securebits)) #endif /* !_LINUX_SECUREBITS_H */ rtsx_common.h 0000644 00000001572 14722070374 0007300 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* Driver for Realtek driver-based card reader * * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved. * * Author: * Wei WANG <wei_wang@realsil.com.cn> */ #ifndef __RTSX_COMMON_H #define __RTSX_COMMON_H #define DRV_NAME_RTSX_PCI "rtsx_pci" #define DRV_NAME_RTSX_PCI_SDMMC "rtsx_pci_sdmmc" #define DRV_NAME_RTSX_PCI_MS "rtsx_pci_ms" #define RTSX_REG_PAIR(addr, val) (((u32)(addr) << 16) | (u8)(val)) #define RTSX_SSC_DEPTH_4M 0x01 #define RTSX_SSC_DEPTH_2M 0x02 #define RTSX_SSC_DEPTH_1M 0x03 #define RTSX_SSC_DEPTH_500K 0x04 #define RTSX_SSC_DEPTH_250K 0x05 #define RTSX_SD_CARD 0 #define RTSX_MS_CARD 1 #define CLK_TO_DIV_N 0 #define DIV_N_TO_CLK 1 struct platform_device; struct rtsx_slot { struct platform_device *p_dev; void (*card_event)(struct platform_device *p_dev); }; #endif eeprom_93xx46.h 0000644 00000001557 14722070374 0007267 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Module: eeprom_93xx46 * platform description for 93xx46 EEPROMs. */ #include <linux/gpio/consumer.h> struct eeprom_93xx46_platform_data { unsigned char flags; #define EE_ADDR8 0x01 /* 8 bit addr. cfg */ #define EE_ADDR16 0x02 /* 16 bit addr. cfg */ #define EE_READONLY 0x08 /* forbid writing */ unsigned int quirks; /* Single word read transfers only; no sequential read. */ #define EEPROM_93XX46_QUIRK_SINGLE_WORD_READ (1 << 0) /* Instructions such as EWEN are (addrlen + 2) in length. */ #define EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH (1 << 1) /* Add extra cycle after address during a read */ #define EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE BIT(2) /* * optional hooks to control additional logic * before and after spi transfer. */ void (*prepare)(void *); void (*finish)(void *); struct gpio_desc *select; }; gpio.h 0000644 00000012546 14722070374 0005671 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * <linux/gpio.h> * * This is the LEGACY GPIO bulk include file, including legacy APIs. It is * used for GPIO drivers still referencing the global GPIO numberspace, * and should not be included in new code. * * If you're implementing a GPIO driver, only include <linux/gpio/driver.h> * If you're implementing a GPIO consumer, only include <linux/gpio/consumer.h> */ #ifndef __LINUX_GPIO_H #define __LINUX_GPIO_H #include <linux/errno.h> /* see Documentation/driver-api/gpio/legacy.rst */ /* make these flag values available regardless of GPIO kconfig options */ #define GPIOF_DIR_OUT (0 << 0) #define GPIOF_DIR_IN (1 << 0) #define GPIOF_INIT_LOW (0 << 1) #define GPIOF_INIT_HIGH (1 << 1) #define GPIOF_IN (GPIOF_DIR_IN) #define GPIOF_OUT_INIT_LOW (GPIOF_DIR_OUT | GPIOF_INIT_LOW) #define GPIOF_OUT_INIT_HIGH (GPIOF_DIR_OUT | GPIOF_INIT_HIGH) /* Gpio pin is active-low */ #define GPIOF_ACTIVE_LOW (1 << 2) /* Gpio pin is open drain */ #define GPIOF_OPEN_DRAIN (1 << 3) /* Gpio pin is open source */ #define GPIOF_OPEN_SOURCE (1 << 4) #define GPIOF_EXPORT (1 << 5) #define GPIOF_EXPORT_CHANGEABLE (1 << 6) #define GPIOF_EXPORT_DIR_FIXED (GPIOF_EXPORT) #define GPIOF_EXPORT_DIR_CHANGEABLE (GPIOF_EXPORT | GPIOF_EXPORT_CHANGEABLE) /** * struct gpio - a structure describing a GPIO with configuration * @gpio: the GPIO number * @flags: GPIO configuration as specified by GPIOF_* * @label: a literal description string of this GPIO */ struct gpio { unsigned gpio; unsigned long flags; const char *label; }; #ifdef CONFIG_GPIOLIB #ifdef CONFIG_ARCH_HAVE_CUSTOM_GPIO_H #include <asm/gpio.h> #else #include <asm-generic/gpio.h> static inline int gpio_get_value(unsigned int gpio) { return __gpio_get_value(gpio); } static inline void gpio_set_value(unsigned int gpio, int value) { __gpio_set_value(gpio, value); } static inline int gpio_cansleep(unsigned int gpio) { return __gpio_cansleep(gpio); } static inline int gpio_to_irq(unsigned int gpio) { return __gpio_to_irq(gpio); } static inline int irq_to_gpio(unsigned int irq) { return -EINVAL; } #endif /* ! CONFIG_ARCH_HAVE_CUSTOM_GPIO_H */ /* CONFIG_GPIOLIB: bindings for managed devices that want to request gpios */ struct device; int devm_gpio_request(struct device *dev, unsigned gpio, const char *label); int devm_gpio_request_one(struct device *dev, unsigned gpio, unsigned long flags, const char *label); void devm_gpio_free(struct device *dev, unsigned int gpio); #else /* ! CONFIG_GPIOLIB */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/bug.h> #include <linux/pinctrl/pinctrl.h> struct device; struct gpio_chip; struct pinctrl_dev; static inline bool gpio_is_valid(int number) { return false; } static inline int gpio_request(unsigned gpio, const char *label) { return -ENOSYS; } static inline int gpio_request_one(unsigned gpio, unsigned long flags, const char *label) { return -ENOSYS; } static inline int gpio_request_array(const struct gpio *array, size_t num) { return -ENOSYS; } static inline void gpio_free(unsigned gpio) { might_sleep(); /* GPIO can never have been requested */ WARN_ON(1); } static inline void gpio_free_array(const struct gpio *array, size_t num) { might_sleep(); /* GPIO can never have been requested */ WARN_ON(1); } static inline int gpio_direction_input(unsigned gpio) { return -ENOSYS; } static inline int gpio_direction_output(unsigned gpio, int value) { return -ENOSYS; } static inline int gpio_set_debounce(unsigned gpio, unsigned debounce) { return -ENOSYS; } static inline int gpio_get_value(unsigned gpio) { /* GPIO can never have been requested or set as {in,out}put */ WARN_ON(1); return 0; } static inline void gpio_set_value(unsigned gpio, int value) { /* GPIO can never have been requested or set as output */ WARN_ON(1); } static inline int gpio_cansleep(unsigned gpio) { /* GPIO can never have been requested or set as {in,out}put */ WARN_ON(1); return 0; } static inline int gpio_get_value_cansleep(unsigned gpio) { /* GPIO can never have been requested or set as {in,out}put */ WARN_ON(1); return 0; } static inline void gpio_set_value_cansleep(unsigned gpio, int value) { /* GPIO can never have been requested or set as output */ WARN_ON(1); } static inline int gpio_export(unsigned gpio, bool direction_may_change) { /* GPIO can never have been requested or set as {in,out}put */ WARN_ON(1); return -EINVAL; } static inline int gpio_export_link(struct device *dev, const char *name, unsigned gpio) { /* GPIO can never have been exported */ WARN_ON(1); return -EINVAL; } static inline void gpio_unexport(unsigned gpio) { /* GPIO can never have been exported */ WARN_ON(1); } static inline int gpio_to_irq(unsigned gpio) { /* GPIO can never have been requested or set as input */ WARN_ON(1); return -EINVAL; } static inline int irq_to_gpio(unsigned irq) { /* irq can never have been returned from gpio_to_irq() */ WARN_ON(1); return -EINVAL; } static inline int devm_gpio_request(struct device *dev, unsigned gpio, const char *label) { WARN_ON(1); return -EINVAL; } static inline int devm_gpio_request_one(struct device *dev, unsigned gpio, unsigned long flags, const char *label) { WARN_ON(1); return -EINVAL; } static inline void devm_gpio_free(struct device *dev, unsigned int gpio) { WARN_ON(1); } #endif /* ! CONFIG_GPIOLIB */ #endif /* __LINUX_GPIO_H */ omapfb.h 0000644 00000001100 14722070374 0006157 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * File: include/linux/omapfb.h * * Framebuffer driver for TI OMAP boards * * Copyright (C) 2004 Nokia Corporation * Author: Imre Deak <imre.deak@nokia.com> */ #ifndef __LINUX_OMAPFB_H__ #define __LINUX_OMAPFB_H__ #include <uapi/linux/omapfb.h> struct omap_lcd_config { char panel_name[16]; char ctrl_name[16]; s16 nreset_gpio; u8 data_lines; }; struct omapfb_platform_data { struct omap_lcd_config lcd; }; void __init omapfb_set_lcd_config(const struct omap_lcd_config *config); #endif /* __OMAPFB_H */ tee_drv.h 0000644 00000044312 14722070374 0006357 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2015-2016, Linaro Limited */ #ifndef __TEE_DRV_H #define __TEE_DRV_H #include <linux/device.h> #include <linux/idr.h> #include <linux/kref.h> #include <linux/list.h> #include <linux/mod_devicetable.h> #include <linux/tee.h> #include <linux/types.h> #include <linux/uuid.h> /* * The file describes the API provided by the generic TEE driver to the * specific TEE driver. */ #define TEE_SHM_MAPPED BIT(0) /* Memory mapped by the kernel */ #define TEE_SHM_DMA_BUF BIT(1) /* Memory with dma-buf handle */ #define TEE_SHM_EXT_DMA_BUF BIT(2) /* Memory with dma-buf handle */ #define TEE_SHM_REGISTER BIT(3) /* Memory registered in secure world */ #define TEE_SHM_USER_MAPPED BIT(4) /* Memory mapped in user space */ #define TEE_SHM_POOL BIT(5) /* Memory allocated from pool */ #define TEE_SHM_PRIV BIT(7) /* Memory private to TEE driver */ struct device; struct tee_device; struct tee_shm; struct tee_shm_pool; /** * struct tee_context - driver specific context on file pointer data * @teedev: pointer to this drivers struct tee_device * @list_shm: List of shared memory object owned by this context * @data: driver specific context data, managed by the driver * @refcount: reference counter for this structure * @releasing: flag that indicates if context is being released right now. * It is needed to break circular dependency on context during * shared memory release. * @supp_nowait: flag that indicates that requests in this context should not * wait for tee-supplicant daemon to be started if not present * and just return with an error code. It is needed for requests * that arises from TEE based kernel drivers that should be * non-blocking in nature. */ struct tee_context { struct tee_device *teedev; struct list_head list_shm; void *data; struct kref refcount; bool releasing; bool supp_nowait; }; struct tee_param_memref { size_t shm_offs; size_t size; struct tee_shm *shm; }; struct tee_param_value { u64 a; u64 b; u64 c; }; struct tee_param { u64 attr; union { struct tee_param_memref memref; struct tee_param_value value; } u; }; /** * struct tee_driver_ops - driver operations vtable * @get_version: returns version of driver * @open: called when the device file is opened * @release: release this open file * @open_session: open a new session * @close_session: close a session * @invoke_func: invoke a trusted function * @cancel_req: request cancel of an ongoing invoke or open * @supp_revc: called for supplicant to get a command * @supp_send: called for supplicant to send a response * @shm_register: register shared memory buffer in TEE * @shm_unregister: unregister shared memory buffer in TEE */ struct tee_driver_ops { void (*get_version)(struct tee_device *teedev, struct tee_ioctl_version_data *vers); int (*open)(struct tee_context *ctx); void (*release)(struct tee_context *ctx); int (*open_session)(struct tee_context *ctx, struct tee_ioctl_open_session_arg *arg, struct tee_param *param); int (*close_session)(struct tee_context *ctx, u32 session); int (*invoke_func)(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg, struct tee_param *param); int (*cancel_req)(struct tee_context *ctx, u32 cancel_id, u32 session); int (*supp_recv)(struct tee_context *ctx, u32 *func, u32 *num_params, struct tee_param *param); int (*supp_send)(struct tee_context *ctx, u32 ret, u32 num_params, struct tee_param *param); int (*shm_register)(struct tee_context *ctx, struct tee_shm *shm, struct page **pages, size_t num_pages, unsigned long start); int (*shm_unregister)(struct tee_context *ctx, struct tee_shm *shm); }; /** * struct tee_desc - Describes the TEE driver to the subsystem * @name: name of driver * @ops: driver operations vtable * @owner: module providing the driver * @flags: Extra properties of driver, defined by TEE_DESC_* below */ #define TEE_DESC_PRIVILEGED 0x1 struct tee_desc { const char *name; const struct tee_driver_ops *ops; struct module *owner; u32 flags; }; /** * tee_device_alloc() - Allocate a new struct tee_device instance * @teedesc: Descriptor for this driver * @dev: Parent device for this device * @pool: Shared memory pool, NULL if not used * @driver_data: Private driver data for this device * * Allocates a new struct tee_device instance. The device is * removed by tee_device_unregister(). * * @returns a pointer to a 'struct tee_device' or an ERR_PTR on failure */ struct tee_device *tee_device_alloc(const struct tee_desc *teedesc, struct device *dev, struct tee_shm_pool *pool, void *driver_data); /** * tee_device_register() - Registers a TEE device * @teedev: Device to register * * tee_device_unregister() need to be called to remove the @teedev if * this function fails. * * @returns < 0 on failure */ int tee_device_register(struct tee_device *teedev); /** * tee_device_unregister() - Removes a TEE device * @teedev: Device to unregister * * This function should be called to remove the @teedev even if * tee_device_register() hasn't been called yet. Does nothing if * @teedev is NULL. */ void tee_device_unregister(struct tee_device *teedev); /** * struct tee_shm - shared memory object * @teedev: device used to allocate the object * @ctx: context using the object, if NULL the context is gone * @link link element * @paddr: physical address of the shared memory * @kaddr: virtual address of the shared memory * @size: size of shared memory * @offset: offset of buffer in user space * @pages: locked pages from userspace * @num_pages: number of locked pages * @refcount: reference counter * @flags: defined by TEE_SHM_* in tee_drv.h * @id: unique id of a shared memory object on this device * * This pool is only supposed to be accessed directly from the TEE * subsystem and from drivers that implements their own shm pool manager. */ struct tee_shm { struct tee_device *teedev; struct tee_context *ctx; struct list_head link; phys_addr_t paddr; void *kaddr; size_t size; unsigned int offset; struct page **pages; size_t num_pages; refcount_t refcount; u32 flags; int id; }; /** * struct tee_shm_pool_mgr - shared memory manager * @ops: operations * @private_data: private data for the shared memory manager */ struct tee_shm_pool_mgr { const struct tee_shm_pool_mgr_ops *ops; void *private_data; }; /** * struct tee_shm_pool_mgr_ops - shared memory pool manager operations * @alloc: called when allocating shared memory * @free: called when freeing shared memory * @destroy_poolmgr: called when destroying the pool manager */ struct tee_shm_pool_mgr_ops { int (*alloc)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm, size_t size); void (*free)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm); void (*destroy_poolmgr)(struct tee_shm_pool_mgr *poolmgr); }; /** * tee_shm_pool_alloc() - Create a shared memory pool from shm managers * @priv_mgr: manager for driver private shared memory allocations * @dmabuf_mgr: manager for dma-buf shared memory allocations * * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied * in @dmabuf, others will use the range provided by @priv. * * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure. */ struct tee_shm_pool *tee_shm_pool_alloc(struct tee_shm_pool_mgr *priv_mgr, struct tee_shm_pool_mgr *dmabuf_mgr); /* * tee_shm_pool_mgr_alloc_res_mem() - Create a shm manager for reserved * memory * @vaddr: Virtual address of start of pool * @paddr: Physical address of start of pool * @size: Size in bytes of the pool * * @returns pointer to a 'struct tee_shm_pool_mgr' or an ERR_PTR on failure. */ struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr, phys_addr_t paddr, size_t size, int min_alloc_order); /** * tee_shm_pool_mgr_destroy() - Free a shared memory manager */ static inline void tee_shm_pool_mgr_destroy(struct tee_shm_pool_mgr *poolm) { poolm->ops->destroy_poolmgr(poolm); } /** * struct tee_shm_pool_mem_info - holds information needed to create a shared * memory pool * @vaddr: Virtual address of start of pool * @paddr: Physical address of start of pool * @size: Size in bytes of the pool */ struct tee_shm_pool_mem_info { unsigned long vaddr; phys_addr_t paddr; size_t size; }; /** * tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved * memory range * @priv_info: Information for driver private shared memory pool * @dmabuf_info: Information for dma-buf shared memory pool * * Start and end of pools will must be page aligned. * * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied * in @dmabuf, others will use the range provided by @priv. * * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure. */ struct tee_shm_pool * tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info, struct tee_shm_pool_mem_info *dmabuf_info); /** * tee_shm_pool_free() - Free a shared memory pool * @pool: The shared memory pool to free * * The must be no remaining shared memory allocated from this pool when * this function is called. */ void tee_shm_pool_free(struct tee_shm_pool *pool); /** * tee_get_drvdata() - Return driver_data pointer * @returns the driver_data pointer supplied to tee_register(). */ void *tee_get_drvdata(struct tee_device *teedev); /** * tee_shm_alloc() - Allocate shared memory * @ctx: Context that allocates the shared memory * @size: Requested size of shared memory * @flags: Flags setting properties for the requested shared memory. * * Memory allocated as global shared memory is automatically freed when the * TEE file pointer is closed. The @flags field uses the bits defined by * TEE_SHM_* above. TEE_SHM_MAPPED must currently always be set. If * TEE_SHM_DMA_BUF global shared memory will be allocated and associated * with a dma-buf handle, else driver private memory. * * @returns a pointer to 'struct tee_shm' */ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags); struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size); /** * tee_shm_priv_alloc() - Allocate shared memory privately * @dev: Device that allocates the shared memory * @size: Requested size of shared memory * * Allocates shared memory buffer that is not associated with any client * context. Such buffers are owned by TEE driver and used for internal calls. * * @returns a pointer to 'struct tee_shm' */ struct tee_shm *tee_shm_priv_alloc(struct tee_device *teedev, size_t size); /** * tee_shm_register() - Register shared memory buffer * @ctx: Context that registers the shared memory * @addr: Address is userspace of the shared buffer * @length: Length of the shared buffer * @flags: Flags setting properties for the requested shared memory. * * @returns a pointer to 'struct tee_shm' */ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr, size_t length, u32 flags); /** * tee_shm_is_registered() - Check if shared memory object in registered in TEE * @shm: Shared memory handle * @returns true if object is registered in TEE */ static inline bool tee_shm_is_registered(struct tee_shm *shm) { return shm && (shm->flags & TEE_SHM_REGISTER); } /** * tee_shm_free() - Free shared memory * @shm: Handle to shared memory to free */ void tee_shm_free(struct tee_shm *shm); /** * tee_shm_put() - Decrease reference count on a shared memory handle * @shm: Shared memory handle */ void tee_shm_put(struct tee_shm *shm); /** * tee_shm_va2pa() - Get physical address of a virtual address * @shm: Shared memory handle * @va: Virtual address to tranlsate * @pa: Returned physical address * @returns 0 on success and < 0 on failure */ int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa); /** * tee_shm_pa2va() - Get virtual address of a physical address * @shm: Shared memory handle * @pa: Physical address to tranlsate * @va: Returned virtual address * @returns 0 on success and < 0 on failure */ int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va); /** * tee_shm_get_va() - Get virtual address of a shared memory plus an offset * @shm: Shared memory handle * @offs: Offset from start of this shared memory * @returns virtual address of the shared memory + offs if offs is within * the bounds of this shared memory, else an ERR_PTR */ void *tee_shm_get_va(struct tee_shm *shm, size_t offs); /** * tee_shm_get_pa() - Get physical address of a shared memory plus an offset * @shm: Shared memory handle * @offs: Offset from start of this shared memory * @pa: Physical address to return * @returns 0 if offs is within the bounds of this shared memory, else an * error code. */ int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa); /** * tee_shm_get_size() - Get size of shared memory buffer * @shm: Shared memory handle * @returns size of shared memory */ static inline size_t tee_shm_get_size(struct tee_shm *shm) { return shm->size; } /** * tee_shm_get_pages() - Get list of pages that hold shared buffer * @shm: Shared memory handle * @num_pages: Number of pages will be stored there * @returns pointer to pages array */ static inline struct page **tee_shm_get_pages(struct tee_shm *shm, size_t *num_pages) { *num_pages = shm->num_pages; return shm->pages; } /** * tee_shm_get_page_offset() - Get shared buffer offset from page start * @shm: Shared memory handle * @returns page offset of shared buffer */ static inline size_t tee_shm_get_page_offset(struct tee_shm *shm) { return shm->offset; } /** * tee_shm_get_id() - Get id of a shared memory object * @shm: Shared memory handle * @returns id */ static inline int tee_shm_get_id(struct tee_shm *shm) { return shm->id; } /** * tee_shm_get_from_id() - Find shared memory object and increase reference * count * @ctx: Context owning the shared memory * @id: Id of shared memory object * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure */ struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id); /** * tee_client_open_context() - Open a TEE context * @start: if not NULL, continue search after this context * @match: function to check TEE device * @data: data for match function * @vers: if not NULL, version data of TEE device of the context returned * * This function does an operation similar to open("/dev/teeX") in user space. * A returned context must be released with tee_client_close_context(). * * Returns a TEE context of the first TEE device matched by the match() * callback or an ERR_PTR. */ struct tee_context * tee_client_open_context(struct tee_context *start, int (*match)(struct tee_ioctl_version_data *, const void *), const void *data, struct tee_ioctl_version_data *vers); /** * tee_client_close_context() - Close a TEE context * @ctx: TEE context to close * * Note that all sessions previously opened with this context will be * closed when this function is called. */ void tee_client_close_context(struct tee_context *ctx); /** * tee_client_get_version() - Query version of TEE * @ctx: TEE context to TEE to query * @vers: Pointer to version data */ void tee_client_get_version(struct tee_context *ctx, struct tee_ioctl_version_data *vers); /** * tee_client_open_session() - Open a session to a Trusted Application * @ctx: TEE context * @arg: Open session arguments, see description of * struct tee_ioctl_open_session_arg * @param: Parameters passed to the Trusted Application * * Returns < 0 on error else see @arg->ret for result. If @arg->ret * is TEEC_SUCCESS the session identifier is available in @arg->session. */ int tee_client_open_session(struct tee_context *ctx, struct tee_ioctl_open_session_arg *arg, struct tee_param *param); /** * tee_client_close_session() - Close a session to a Trusted Application * @ctx: TEE Context * @session: Session id * * Return < 0 on error else 0, regardless the session will not be * valid after this function has returned. */ int tee_client_close_session(struct tee_context *ctx, u32 session); /** * tee_client_invoke_func() - Invoke a function in a Trusted Application * @ctx: TEE Context * @arg: Invoke arguments, see description of * struct tee_ioctl_invoke_arg * @param: Parameters passed to the Trusted Application * * Returns < 0 on error else see @arg->ret for result. */ int tee_client_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg, struct tee_param *param); /** * tee_client_cancel_req() - Request cancellation of the previous open-session * or invoke-command operations in a Trusted Application * @ctx: TEE Context * @arg: Cancellation arguments, see description of * struct tee_ioctl_cancel_arg * * Returns < 0 on error else 0 if the cancellation was successfully requested. */ int tee_client_cancel_req(struct tee_context *ctx, struct tee_ioctl_cancel_arg *arg); static inline bool tee_param_is_memref(struct tee_param *param) { switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) { case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: return true; default: return false; } } extern struct bus_type tee_bus_type; /** * struct tee_client_device - tee based device * @id: device identifier * @dev: device structure */ struct tee_client_device { struct tee_client_device_id id; struct device dev; }; #define to_tee_client_device(d) container_of(d, struct tee_client_device, dev) /** * struct tee_client_driver - tee client driver * @id_table: device id table supported by this driver * @driver: driver structure */ struct tee_client_driver { const struct tee_client_device_id *id_table; struct device_driver driver; }; #define to_tee_client_driver(d) \ container_of(d, struct tee_client_driver, driver) /** * teedev_open() - Open a struct tee_device * @teedev: Device to open * * @return a pointer to struct tee_context on success or an ERR_PTR on failure. */ struct tee_context *teedev_open(struct tee_device *teedev); /** * teedev_close_context() - closes a struct tee_context * @ctx: The struct tee_context to close */ void teedev_close_context(struct tee_context *ctx); #endif /*__TEE_DRV_H*/ elf-randomize.h 0000644 00000001107 14722070374 0007456 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ELF_RANDOMIZE_H #define _ELF_RANDOMIZE_H struct mm_struct; #ifndef CONFIG_ARCH_HAS_ELF_RANDOMIZE static inline unsigned long arch_mmap_rnd(void) { return 0; } # if defined(arch_randomize_brk) && defined(CONFIG_COMPAT_BRK) # define compat_brk_randomized # endif # ifndef arch_randomize_brk # define arch_randomize_brk(mm) (mm->brk) # endif #else extern unsigned long arch_mmap_rnd(void); extern unsigned long arch_randomize_brk(struct mm_struct *mm); # ifdef CONFIG_COMPAT_BRK # define compat_brk_randomized # endif #endif #endif cache.h 0000644 00000004206 14722070374 0005770 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_CACHE_H #define __LINUX_CACHE_H #include <uapi/linux/kernel.h> #include <asm/cache.h> #ifndef L1_CACHE_ALIGN #define L1_CACHE_ALIGN(x) __ALIGN_KERNEL(x, L1_CACHE_BYTES) #endif #ifndef SMP_CACHE_BYTES #define SMP_CACHE_BYTES L1_CACHE_BYTES #endif /* * __read_mostly is used to keep rarely changing variables out of frequently * updated cachelines. If an architecture doesn't support it, ignore the * hint. */ #ifndef __read_mostly #define __read_mostly #endif /* * __ro_after_init is used to mark things that are read-only after init (i.e. * after mark_rodata_ro() has been called). These are effectively read-only, * but may get written to during init, so can't live in .rodata (via "const"). */ #ifndef __ro_after_init #define __ro_after_init __attribute__((__section__(".data..ro_after_init"))) #endif #ifndef ____cacheline_aligned #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES))) #endif #ifndef ____cacheline_aligned_in_smp #ifdef CONFIG_SMP #define ____cacheline_aligned_in_smp ____cacheline_aligned #else #define ____cacheline_aligned_in_smp #endif /* CONFIG_SMP */ #endif #ifndef __cacheline_aligned #define __cacheline_aligned \ __attribute__((__aligned__(SMP_CACHE_BYTES), \ __section__(".data..cacheline_aligned"))) #endif /* __cacheline_aligned */ #ifndef __cacheline_aligned_in_smp #ifdef CONFIG_SMP #define __cacheline_aligned_in_smp __cacheline_aligned #else #define __cacheline_aligned_in_smp #endif /* CONFIG_SMP */ #endif /* * The maximum alignment needed for some critical structures * These could be inter-node cacheline sizes/L3 cacheline * size etc. Define this in asm/cache.h for your arch */ #ifndef INTERNODE_CACHE_SHIFT #define INTERNODE_CACHE_SHIFT L1_CACHE_SHIFT #endif #if !defined(____cacheline_internodealigned_in_smp) #if defined(CONFIG_SMP) #define ____cacheline_internodealigned_in_smp \ __attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT)))) #else #define ____cacheline_internodealigned_in_smp #endif #endif #ifndef CONFIG_ARCH_HAS_CACHE_LINE_SIZE #define cache_line_size() L1_CACHE_BYTES #endif #endif /* __LINUX_CACHE_H */ kobject.h 0000644 00000017306 14722070374 0006353 0 ustar 00 // SPDX-License-Identifier: GPL-2.0 /* * kobject.h - generic kernel object infrastructure. * * Copyright (c) 2002-2003 Patrick Mochel * Copyright (c) 2002-2003 Open Source Development Labs * Copyright (c) 2006-2008 Greg Kroah-Hartman <greg@kroah.com> * Copyright (c) 2006-2008 Novell Inc. * * Please read Documentation/kobject.txt before using the kobject * interface, ESPECIALLY the parts about reference counts and object * destructors. */ #ifndef _KOBJECT_H_ #define _KOBJECT_H_ #include <linux/types.h> #include <linux/list.h> #include <linux/sysfs.h> #include <linux/compiler.h> #include <linux/spinlock.h> #include <linux/kref.h> #include <linux/kobject_ns.h> #include <linux/kernel.h> #include <linux/wait.h> #include <linux/atomic.h> #include <linux/workqueue.h> #include <linux/uidgid.h> #define UEVENT_HELPER_PATH_LEN 256 #define UEVENT_NUM_ENVP 32 /* number of env pointers */ #define UEVENT_BUFFER_SIZE 2048 /* buffer for the variables */ #ifdef CONFIG_UEVENT_HELPER /* path to the userspace helper executed on an event */ extern char uevent_helper[]; #endif /* counter to tag the uevent, read only except for the kobject core */ extern u64 uevent_seqnum; /* * The actions here must match the index to the string array * in lib/kobject_uevent.c * * Do not add new actions here without checking with the driver-core * maintainers. Action strings are not meant to express subsystem * or device specific properties. In most cases you want to send a * kobject_uevent_env(kobj, KOBJ_CHANGE, env) with additional event * specific variables added to the event environment. */ enum kobject_action { KOBJ_ADD, KOBJ_REMOVE, KOBJ_CHANGE, KOBJ_MOVE, KOBJ_ONLINE, KOBJ_OFFLINE, KOBJ_BIND, KOBJ_UNBIND, KOBJ_MAX }; struct kobject { const char *name; struct list_head entry; struct kobject *parent; struct kset *kset; struct kobj_type *ktype; struct kernfs_node *sd; /* sysfs directory entry */ struct kref kref; #ifdef CONFIG_DEBUG_KOBJECT_RELEASE struct delayed_work release; #endif unsigned int state_initialized:1; unsigned int state_in_sysfs:1; unsigned int state_add_uevent_sent:1; unsigned int state_remove_uevent_sent:1; unsigned int uevent_suppress:1; }; extern __printf(2, 3) int kobject_set_name(struct kobject *kobj, const char *name, ...); extern __printf(2, 0) int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list vargs); static inline const char *kobject_name(const struct kobject *kobj) { return kobj->name; } extern void kobject_init(struct kobject *kobj, struct kobj_type *ktype); extern __printf(3, 4) __must_check int kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...); extern __printf(4, 5) __must_check int kobject_init_and_add(struct kobject *kobj, struct kobj_type *ktype, struct kobject *parent, const char *fmt, ...); extern void kobject_del(struct kobject *kobj); extern struct kobject * __must_check kobject_create(void); extern struct kobject * __must_check kobject_create_and_add(const char *name, struct kobject *parent); extern int __must_check kobject_rename(struct kobject *, const char *new_name); extern int __must_check kobject_move(struct kobject *, struct kobject *); extern struct kobject *kobject_get(struct kobject *kobj); extern struct kobject * __must_check kobject_get_unless_zero( struct kobject *kobj); extern void kobject_put(struct kobject *kobj); extern const void *kobject_namespace(struct kobject *kobj); extern void kobject_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid); extern char *kobject_get_path(struct kobject *kobj, gfp_t flag); /** * kobject_has_children - Returns whether a kobject has children. * @kobj: the object to test * * This will return whether a kobject has other kobjects as children. * * It does NOT account for the presence of attribute files, only sub * directories. It also assumes there is no concurrent addition or * removal of such children, and thus relies on external locking. */ static inline bool kobject_has_children(struct kobject *kobj) { WARN_ON_ONCE(kref_read(&kobj->kref) == 0); return kobj->sd && kobj->sd->dir.subdirs; } struct kobj_type { void (*release)(struct kobject *kobj); const struct sysfs_ops *sysfs_ops; struct attribute **default_attrs; /* use default_groups instead */ const struct attribute_group **default_groups; const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj); const void *(*namespace)(struct kobject *kobj); void (*get_ownership)(struct kobject *kobj, kuid_t *uid, kgid_t *gid); }; struct kobj_uevent_env { char *argv[3]; char *envp[UEVENT_NUM_ENVP]; int envp_idx; char buf[UEVENT_BUFFER_SIZE]; int buflen; }; struct kset_uevent_ops { int (* const filter)(struct kset *kset, struct kobject *kobj); const char *(* const name)(struct kset *kset, struct kobject *kobj); int (* const uevent)(struct kset *kset, struct kobject *kobj, struct kobj_uevent_env *env); }; struct kobj_attribute { struct attribute attr; ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr, char *buf); ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count); }; extern const struct sysfs_ops kobj_sysfs_ops; struct sock; /** * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem. * * A kset defines a group of kobjects. They can be individually * different "types" but overall these kobjects all want to be grouped * together and operated on in the same manner. ksets are used to * define the attribute callbacks and other common events that happen to * a kobject. * * @list: the list of all kobjects for this kset * @list_lock: a lock for iterating over the kobjects * @kobj: the embedded kobject for this kset (recursion, isn't it fun...) * @uevent_ops: the set of uevent operations for this kset. These are * called whenever a kobject has something happen to it so that the kset * can add new environment variables, or filter out the uevents if so * desired. */ struct kset { struct list_head list; spinlock_t list_lock; struct kobject kobj; const struct kset_uevent_ops *uevent_ops; } __randomize_layout; extern void kset_init(struct kset *kset); extern int __must_check kset_register(struct kset *kset); extern void kset_unregister(struct kset *kset); extern struct kset * __must_check kset_create_and_add(const char *name, const struct kset_uevent_ops *u, struct kobject *parent_kobj); static inline struct kset *to_kset(struct kobject *kobj) { return kobj ? container_of(kobj, struct kset, kobj) : NULL; } static inline struct kset *kset_get(struct kset *k) { return k ? to_kset(kobject_get(&k->kobj)) : NULL; } static inline void kset_put(struct kset *k) { kobject_put(&k->kobj); } static inline struct kobj_type *get_ktype(struct kobject *kobj) { return kobj->ktype; } extern struct kobject *kset_find_obj(struct kset *, const char *); /* The global /sys/kernel/ kobject for people to chain off of */ extern struct kobject *kernel_kobj; /* The global /sys/kernel/mm/ kobject for people to chain off of */ extern struct kobject *mm_kobj; /* The global /sys/hypervisor/ kobject for people to chain off of */ extern struct kobject *hypervisor_kobj; /* The global /sys/power/ kobject for people to chain off of */ extern struct kobject *power_kobj; /* The global /sys/firmware/ kobject for people to chain off of */ extern struct kobject *firmware_kobj; int kobject_uevent(struct kobject *kobj, enum kobject_action action); int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, char *envp[]); int kobject_synth_uevent(struct kobject *kobj, const char *buf, size_t count); __printf(2, 3) int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...); #endif /* _KOBJECT_H_ */ rcupdate_wait.h 0000644 00000001601 14722070374 0007554 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SCHED_RCUPDATE_WAIT_H #define _LINUX_SCHED_RCUPDATE_WAIT_H /* * RCU synchronization types and methods: */ #include <linux/rcupdate.h> #include <linux/completion.h> /* * Structure allowing asynchronous waiting on RCU. */ struct rcu_synchronize { struct rcu_head head; struct completion completion; }; void wakeme_after_rcu(struct rcu_head *head); void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, struct rcu_synchronize *rs_array); #define _wait_rcu_gp(checktiny, ...) \ do { \ call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \ __wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array), \ __crcu_array, __rs_array); \ } while (0) #define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) #endif /* _LINUX_SCHED_RCUPDATE_WAIT_H */ stmp_device.h 0000644 00000000634 14722070374 0007230 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * basic functions for devices following the "stmp" style register layout * * Copyright (C) 2011 Wolfram Sang, Pengutronix e.K. */ #ifndef __STMP_DEVICE_H__ #define __STMP_DEVICE_H__ #define STMP_OFFSET_REG_SET 0x4 #define STMP_OFFSET_REG_CLR 0x8 #define STMP_OFFSET_REG_TOG 0xc extern int stmp_reset_block(void __iomem *); #endif /* __STMP_DEVICE_H__ */ of_dma.h 0000644 00000004461 14722070374 0006155 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * OF helpers for DMA request / controller * * Based on of_gpio.h * * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ */ #ifndef __LINUX_OF_DMA_H #define __LINUX_OF_DMA_H #include <linux/of.h> #include <linux/dmaengine.h> struct device_node; struct of_dma { struct list_head of_dma_controllers; struct device_node *of_node; struct dma_chan *(*of_dma_xlate) (struct of_phandle_args *, struct of_dma *); void *(*of_dma_route_allocate) (struct of_phandle_args *, struct of_dma *); struct dma_router *dma_router; void *of_dma_data; }; struct of_dma_filter_info { dma_cap_mask_t dma_cap; dma_filter_fn filter_fn; }; #ifdef CONFIG_DMA_OF extern int of_dma_controller_register(struct device_node *np, struct dma_chan *(*of_dma_xlate) (struct of_phandle_args *, struct of_dma *), void *data); extern void of_dma_controller_free(struct device_node *np); extern int of_dma_router_register(struct device_node *np, void *(*of_dma_route_allocate) (struct of_phandle_args *, struct of_dma *), struct dma_router *dma_router); #define of_dma_router_free of_dma_controller_free extern struct dma_chan *of_dma_request_slave_channel(struct device_node *np, const char *name); extern struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma); extern struct dma_chan *of_dma_xlate_by_chan_id(struct of_phandle_args *dma_spec, struct of_dma *ofdma); #else static inline int of_dma_controller_register(struct device_node *np, struct dma_chan *(*of_dma_xlate) (struct of_phandle_args *, struct of_dma *), void *data) { return -ENODEV; } static inline void of_dma_controller_free(struct device_node *np) { } static inline int of_dma_router_register(struct device_node *np, void *(*of_dma_route_allocate) (struct of_phandle_args *, struct of_dma *), struct dma_router *dma_router) { return -ENODEV; } #define of_dma_router_free of_dma_controller_free static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np, const char *name) { return ERR_PTR(-ENODEV); } static inline struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma) { return NULL; } #define of_dma_xlate_by_chan_id NULL #endif #endif /* __LINUX_OF_DMA_H */ mei_cl_bus.h 0000644 00000006664 14722070374 0007040 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2013-2016, Intel Corporation. All rights reserved. */ #ifndef _LINUX_MEI_CL_BUS_H #define _LINUX_MEI_CL_BUS_H #include <linux/device.h> #include <linux/uuid.h> #include <linux/mod_devicetable.h> struct mei_cl_device; struct mei_device; typedef void (*mei_cldev_cb_t)(struct mei_cl_device *cldev); /** * struct mei_cl_device - MEI device handle * An mei_cl_device pointer is returned from mei_add_device() * and links MEI bus clients to their actual ME host client pointer. * Drivers for MEI devices will get an mei_cl_device pointer * when being probed and shall use it for doing ME bus I/O. * * @bus_list: device on the bus list * @bus: parent mei device * @dev: linux driver model device pointer * @me_cl: me client * @cl: mei client * @name: device name * @rx_work: async work to execute Rx event callback * @rx_cb: Drivers register this callback to get asynchronous ME * Rx buffer pending notifications. * @notif_work: async work to execute FW notif event callback * @notif_cb: Drivers register this callback to get asynchronous ME * FW notification pending notifications. * * @do_match: wheather device can be matched with a driver * @is_added: device is already scanned * @priv_data: client private data */ struct mei_cl_device { struct list_head bus_list; struct mei_device *bus; struct device dev; struct mei_me_client *me_cl; struct mei_cl *cl; char name[MEI_CL_NAME_SIZE]; struct work_struct rx_work; mei_cldev_cb_t rx_cb; struct work_struct notif_work; mei_cldev_cb_t notif_cb; unsigned int do_match:1; unsigned int is_added:1; void *priv_data; }; #define to_mei_cl_device(d) container_of(d, struct mei_cl_device, dev) struct mei_cl_driver { struct device_driver driver; const char *name; const struct mei_cl_device_id *id_table; int (*probe)(struct mei_cl_device *cldev, const struct mei_cl_device_id *id); int (*remove)(struct mei_cl_device *cldev); }; int __mei_cldev_driver_register(struct mei_cl_driver *cldrv, struct module *owner); #define mei_cldev_driver_register(cldrv) \ __mei_cldev_driver_register(cldrv, THIS_MODULE) void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv); /** * module_mei_cl_driver - Helper macro for registering mei cl driver * * @__mei_cldrv: mei_cl_driver structure * * Helper macro for mei cl drivers which do not do anything special in module * init/exit, for eliminating a boilerplate code. */ #define module_mei_cl_driver(__mei_cldrv) \ module_driver(__mei_cldrv, \ mei_cldev_driver_register,\ mei_cldev_driver_unregister) ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length); ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length); ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf, size_t length); int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb); int mei_cldev_register_notif_cb(struct mei_cl_device *cldev, mei_cldev_cb_t notif_cb); const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev); u8 mei_cldev_ver(const struct mei_cl_device *cldev); void *mei_cldev_get_drvdata(const struct mei_cl_device *cldev); void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data); int mei_cldev_enable(struct mei_cl_device *cldev); int mei_cldev_disable(struct mei_cl_device *cldev); bool mei_cldev_enabled(struct mei_cl_device *cldev); #endif /* _LINUX_MEI_CL_BUS_H */ highmem.h 0000644 00000015427 14722070374 0006352 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_HIGHMEM_H #define _LINUX_HIGHMEM_H #include <linux/fs.h> #include <linux/kernel.h> #include <linux/bug.h> #include <linux/mm.h> #include <linux/uaccess.h> #include <linux/hardirq.h> #include <asm/cacheflush.h> #ifndef ARCH_HAS_FLUSH_ANON_PAGE static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) { } #endif #ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE static inline void flush_kernel_dcache_page(struct page *page) { } static inline void flush_kernel_vmap_range(void *vaddr, int size) { } static inline void invalidate_kernel_vmap_range(void *vaddr, int size) { } #endif #include <asm/kmap_types.h> #ifdef CONFIG_HIGHMEM #include <asm/highmem.h> /* declarations for linux/mm/highmem.c */ unsigned int nr_free_highpages(void); extern atomic_long_t _totalhigh_pages; static inline unsigned long totalhigh_pages(void) { return (unsigned long)atomic_long_read(&_totalhigh_pages); } static inline void totalhigh_pages_inc(void) { atomic_long_inc(&_totalhigh_pages); } static inline void totalhigh_pages_dec(void) { atomic_long_dec(&_totalhigh_pages); } static inline void totalhigh_pages_add(long count) { atomic_long_add(count, &_totalhigh_pages); } static inline void totalhigh_pages_set(long val) { atomic_long_set(&_totalhigh_pages, val); } void kmap_flush_unused(void); struct page *kmap_to_page(void *addr); #else /* CONFIG_HIGHMEM */ static inline unsigned int nr_free_highpages(void) { return 0; } static inline struct page *kmap_to_page(void *addr) { return virt_to_page(addr); } static inline unsigned long totalhigh_pages(void) { return 0UL; } #ifndef ARCH_HAS_KMAP static inline void *kmap(struct page *page) { might_sleep(); return page_address(page); } static inline void kunmap(struct page *page) { } static inline void *kmap_atomic(struct page *page) { preempt_disable(); pagefault_disable(); return page_address(page); } #define kmap_atomic_prot(page, prot) kmap_atomic(page) static inline void __kunmap_atomic(void *addr) { pagefault_enable(); preempt_enable(); } #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) #define kmap_flush_unused() do {} while(0) #endif #endif /* CONFIG_HIGHMEM */ #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) DECLARE_PER_CPU(int, __kmap_atomic_idx); static inline int kmap_atomic_idx_push(void) { int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1; #ifdef CONFIG_DEBUG_HIGHMEM WARN_ON_ONCE(in_irq() && !irqs_disabled()); BUG_ON(idx >= KM_TYPE_NR); #endif return idx; } static inline int kmap_atomic_idx(void) { return __this_cpu_read(__kmap_atomic_idx) - 1; } static inline void kmap_atomic_idx_pop(void) { #ifdef CONFIG_DEBUG_HIGHMEM int idx = __this_cpu_dec_return(__kmap_atomic_idx); BUG_ON(idx < 0); #else __this_cpu_dec(__kmap_atomic_idx); #endif } #endif /* * Prevent people trying to call kunmap_atomic() as if it were kunmap() * kunmap_atomic() should get the return value of kmap_atomic, not the page. */ #define kunmap_atomic(addr) \ do { \ BUILD_BUG_ON(__same_type((addr), struct page *)); \ __kunmap_atomic(addr); \ } while (0) /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ #ifndef clear_user_highpage static inline void clear_user_highpage(struct page *page, unsigned long vaddr) { void *addr = kmap_atomic(page); clear_user_page(addr, vaddr, page); kunmap_atomic(addr); } #endif #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE /** * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE * @vma: The VMA the page is to be allocated for * @vaddr: The virtual address the page will be inserted into * * This function will allocate a page for a VMA but the caller is expected * to specify via movableflags whether the page will be movable in the * future or not * * An architecture may override this function by defining * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own * implementation. */ static inline struct page * __alloc_zeroed_user_highpage(gfp_t movableflags, struct vm_area_struct *vma, unsigned long vaddr) { struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags, vma, vaddr); if (page) clear_user_highpage(page, vaddr); return page; } #endif /** * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move * @vma: The VMA the page is to be allocated for * @vaddr: The virtual address the page will be inserted into * * This function will allocate a page for a VMA that the caller knows will * be able to migrate in the future using move_pages() or reclaimed */ static inline struct page * alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, unsigned long vaddr) { return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr); } static inline void clear_highpage(struct page *page) { void *kaddr = kmap_atomic(page); clear_page(kaddr); kunmap_atomic(kaddr); } static inline void zero_user_segments(struct page *page, unsigned start1, unsigned end1, unsigned start2, unsigned end2) { void *kaddr = kmap_atomic(page); BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE); if (end1 > start1) memset(kaddr + start1, 0, end1 - start1); if (end2 > start2) memset(kaddr + start2, 0, end2 - start2); kunmap_atomic(kaddr); flush_dcache_page(page); } static inline void zero_user_segment(struct page *page, unsigned start, unsigned end) { zero_user_segments(page, start, end, 0, 0); } static inline void zero_user(struct page *page, unsigned start, unsigned size) { zero_user_segments(page, start, start + size, 0, 0); } #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE static inline void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { char *vfrom, *vto; vfrom = kmap_atomic(from); vto = kmap_atomic(to); copy_user_page(vto, vfrom, vaddr, to); kunmap_atomic(vto); kunmap_atomic(vfrom); } #endif #ifndef __HAVE_ARCH_COPY_HIGHPAGE static inline void copy_highpage(struct page *to, struct page *from) { char *vfrom, *vto; vfrom = kmap_atomic(from); vto = kmap_atomic(to); copy_page(vto, vfrom); kunmap_atomic(vto); kunmap_atomic(vfrom); } #endif static inline void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len) { char *from = kmap_atomic(page); memcpy(to, from + offset, len); kunmap_atomic(from); } static inline void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len) { char *to = kmap_atomic(page); memcpy(to + offset, from, len); kunmap_atomic(to); } #endif /* _LINUX_HIGHMEM_H */ freezer.h 0000644 00000021252 14722070374 0006367 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* Freezer declarations */ #ifndef FREEZER_H_INCLUDED #define FREEZER_H_INCLUDED #include <linux/debug_locks.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/atomic.h> #ifdef CONFIG_FREEZER extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */ extern bool pm_freezing; /* PM freezing in effect */ extern bool pm_nosig_freezing; /* PM nosig freezing in effect */ /* * Timeout for stopping processes */ extern unsigned int freeze_timeout_msecs; /* * Check if a process has been frozen */ static inline bool frozen(struct task_struct *p) { return p->flags & PF_FROZEN; } extern bool freezing_slow_path(struct task_struct *p); /* * Check if there is a request to freeze a process */ static inline bool freezing(struct task_struct *p) { if (likely(!atomic_read(&system_freezing_cnt))) return false; return freezing_slow_path(p); } /* Takes and releases task alloc lock using task_lock() */ extern void __thaw_task(struct task_struct *t); extern bool __refrigerator(bool check_kthr_stop); extern int freeze_processes(void); extern int freeze_kernel_threads(void); extern void thaw_processes(void); extern void thaw_kernel_threads(void); /* * DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION * If try_to_freeze causes a lockdep warning it means the caller may deadlock */ static inline bool try_to_freeze_unsafe(void) { might_sleep(); if (likely(!freezing(current))) return false; return __refrigerator(false); } static inline bool try_to_freeze(void) { if (!(current->flags & PF_NOFREEZE)) debug_check_no_locks_held(); return try_to_freeze_unsafe(); } extern bool freeze_task(struct task_struct *p); extern bool set_freezable(void); #ifdef CONFIG_CGROUP_FREEZER extern bool cgroup_freezing(struct task_struct *task); #else /* !CONFIG_CGROUP_FREEZER */ static inline bool cgroup_freezing(struct task_struct *task) { return false; } #endif /* !CONFIG_CGROUP_FREEZER */ /* * The PF_FREEZER_SKIP flag should be set by a vfork parent right before it * calls wait_for_completion(&vfork) and reset right after it returns from this * function. Next, the parent should call try_to_freeze() to freeze itself * appropriately in case the child has exited before the freezing of tasks is * complete. However, we don't want kernel threads to be frozen in unexpected * places, so we allow them to block freeze_processes() instead or to set * PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the * parent won't really block freeze_processes(), since ____call_usermodehelper() * (the child) does a little before exec/exit and it can't be frozen before * waking up the parent. */ /** * freezer_do_not_count - tell freezer to ignore %current * * Tell freezers to ignore the current task when determining whether the * target frozen state is reached. IOW, the current task will be * considered frozen enough by freezers. * * The caller shouldn't do anything which isn't allowed for a frozen task * until freezer_cont() is called. Usually, freezer[_do_not]_count() pair * wrap a scheduling operation and nothing much else. */ static inline void freezer_do_not_count(void) { current->flags |= PF_FREEZER_SKIP; } /** * freezer_count - tell freezer to stop ignoring %current * * Undo freezer_do_not_count(). It tells freezers that %current should be * considered again and tries to freeze if freezing condition is already in * effect. */ static inline void freezer_count(void) { current->flags &= ~PF_FREEZER_SKIP; /* * If freezing is in progress, the following paired with smp_mb() * in freezer_should_skip() ensures that either we see %true * freezing() or freezer_should_skip() sees !PF_FREEZER_SKIP. */ smp_mb(); try_to_freeze(); } /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */ static inline void freezer_count_unsafe(void) { current->flags &= ~PF_FREEZER_SKIP; smp_mb(); try_to_freeze_unsafe(); } /** * freezer_should_skip - whether to skip a task when determining frozen * state is reached * @p: task in quesion * * This function is used by freezers after establishing %true freezing() to * test whether a task should be skipped when determining the target frozen * state is reached. IOW, if this function returns %true, @p is considered * frozen enough. */ static inline bool freezer_should_skip(struct task_struct *p) { /* * The following smp_mb() paired with the one in freezer_count() * ensures that either freezer_count() sees %true freezing() or we * see cleared %PF_FREEZER_SKIP and return %false. This makes it * impossible for a task to slip frozen state testing after * clearing %PF_FREEZER_SKIP. */ smp_mb(); return p->flags & PF_FREEZER_SKIP; } /* * These functions are intended to be used whenever you want allow a sleeping * task to be frozen. Note that neither return any clear indication of * whether a freeze event happened while in this function. */ /* Like schedule(), but should not block the freezer. */ static inline void freezable_schedule(void) { freezer_do_not_count(); schedule(); freezer_count(); } /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */ static inline void freezable_schedule_unsafe(void) { freezer_do_not_count(); schedule(); freezer_count_unsafe(); } /* * Like schedule_timeout(), but should not block the freezer. Do not * call this with locks held. */ static inline long freezable_schedule_timeout(long timeout) { long __retval; freezer_do_not_count(); __retval = schedule_timeout(timeout); freezer_count(); return __retval; } /* * Like schedule_timeout_interruptible(), but should not block the freezer. Do not * call this with locks held. */ static inline long freezable_schedule_timeout_interruptible(long timeout) { long __retval; freezer_do_not_count(); __retval = schedule_timeout_interruptible(timeout); freezer_count(); return __retval; } /* Like schedule_timeout_killable(), but should not block the freezer. */ static inline long freezable_schedule_timeout_killable(long timeout) { long __retval; freezer_do_not_count(); __retval = schedule_timeout_killable(timeout); freezer_count(); return __retval; } /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */ static inline long freezable_schedule_timeout_killable_unsafe(long timeout) { long __retval; freezer_do_not_count(); __retval = schedule_timeout_killable(timeout); freezer_count_unsafe(); return __retval; } /* * Like schedule_hrtimeout_range(), but should not block the freezer. Do not * call this with locks held. */ static inline int freezable_schedule_hrtimeout_range(ktime_t *expires, u64 delta, const enum hrtimer_mode mode) { int __retval; freezer_do_not_count(); __retval = schedule_hrtimeout_range(expires, delta, mode); freezer_count(); return __retval; } /* * Freezer-friendly wrappers around wait_event_interruptible(), * wait_event_killable() and wait_event_interruptible_timeout(), originally * defined in <linux/wait.h> */ /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */ #define wait_event_freezekillable_unsafe(wq, condition) \ ({ \ int __retval; \ freezer_do_not_count(); \ __retval = wait_event_killable(wq, (condition)); \ freezer_count_unsafe(); \ __retval; \ }) #else /* !CONFIG_FREEZER */ static inline bool frozen(struct task_struct *p) { return false; } static inline bool freezing(struct task_struct *p) { return false; } static inline void __thaw_task(struct task_struct *t) {} static inline bool __refrigerator(bool check_kthr_stop) { return false; } static inline int freeze_processes(void) { return -ENOSYS; } static inline int freeze_kernel_threads(void) { return -ENOSYS; } static inline void thaw_processes(void) {} static inline void thaw_kernel_threads(void) {} static inline bool try_to_freeze_nowarn(void) { return false; } static inline bool try_to_freeze(void) { return false; } static inline void freezer_do_not_count(void) {} static inline void freezer_count(void) {} static inline int freezer_should_skip(struct task_struct *p) { return 0; } static inline void set_freezable(void) {} #define freezable_schedule() schedule() #define freezable_schedule_unsafe() schedule() #define freezable_schedule_timeout(timeout) schedule_timeout(timeout) #define freezable_schedule_timeout_interruptible(timeout) \ schedule_timeout_interruptible(timeout) #define freezable_schedule_timeout_killable(timeout) \ schedule_timeout_killable(timeout) #define freezable_schedule_timeout_killable_unsafe(timeout) \ schedule_timeout_killable(timeout) #define freezable_schedule_hrtimeout_range(expires, delta, mode) \ schedule_hrtimeout_range(expires, delta, mode) #define wait_event_freezekillable_unsafe(wq, condition) \ wait_event_killable(wq, condition) #endif /* !CONFIG_FREEZER */ #endif /* FREEZER_H_INCLUDED */ pfn.h 0000644 00000001232 14722070374 0005504 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PFN_H_ #define _LINUX_PFN_H_ #ifndef __ASSEMBLY__ #include <linux/types.h> /* * pfn_t: encapsulates a page-frame number that is optionally backed * by memmap (struct page). Whether a pfn_t has a 'struct page' * backing is indicated by flags in the high bits of the value. */ typedef struct { u64 val; } pfn_t; #endif #define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK) #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) #define PFN_DOWN(x) ((x) >> PAGE_SHIFT) #define PFN_PHYS(x) ((phys_addr_t)(x) << PAGE_SHIFT) #define PHYS_PFN(x) ((unsigned long)((x) >> PAGE_SHIFT)) #endif fixp-arith.h 0000644 00000007415 14722070374 0007005 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef _FIXP_ARITH_H #define _FIXP_ARITH_H #include <linux/math64.h> /* * Simplistic fixed-point arithmetics. * Hmm, I'm probably duplicating some code :( * * Copyright (c) 2002 Johann Deneux */ /* * * Should you need to contact me, the author, you can do so by * e-mail - mail your message to <johann.deneux@gmail.com> */ #include <linux/types.h> static const s32 sin_table[] = { 0x00000000, 0x023be165, 0x04779632, 0x06b2f1d2, 0x08edc7b6, 0x0b27eb5c, 0x0d61304d, 0x0f996a26, 0x11d06c96, 0x14060b67, 0x163a1a7d, 0x186c6ddd, 0x1a9cd9ac, 0x1ccb3236, 0x1ef74bf2, 0x2120fb82, 0x234815ba, 0x256c6f9e, 0x278dde6e, 0x29ac379f, 0x2bc750e8, 0x2ddf003f, 0x2ff31bdd, 0x32037a44, 0x340ff241, 0x36185aee, 0x381c8bb5, 0x3a1c5c56, 0x3c17a4e7, 0x3e0e3ddb, 0x3fffffff, 0x41ecc483, 0x43d464fa, 0x45b6bb5d, 0x4793a20f, 0x496af3e1, 0x4b3c8c11, 0x4d084650, 0x4ecdfec6, 0x508d9210, 0x5246dd48, 0x53f9be04, 0x55a6125a, 0x574bb8e5, 0x58ea90c2, 0x5a827999, 0x5c135399, 0x5d9cff82, 0x5f1f5ea0, 0x609a52d1, 0x620dbe8a, 0x637984d3, 0x64dd894f, 0x6639b039, 0x678dde6d, 0x68d9f963, 0x6a1de735, 0x6b598ea1, 0x6c8cd70a, 0x6db7a879, 0x6ed9eba0, 0x6ff389de, 0x71046d3c, 0x720c8074, 0x730baeec, 0x7401e4bf, 0x74ef0ebb, 0x75d31a5f, 0x76adf5e5, 0x777f903b, 0x7847d908, 0x7906c0af, 0x79bc384c, 0x7a6831b8, 0x7b0a9f8c, 0x7ba3751c, 0x7c32a67c, 0x7cb82884, 0x7d33f0c8, 0x7da5f5a3, 0x7e0e2e31, 0x7e6c924f, 0x7ec11aa3, 0x7f0bc095, 0x7f4c7e52, 0x7f834ecf, 0x7fb02dc4, 0x7fd317b3, 0x7fec09e1, 0x7ffb025e, 0x7fffffff }; /** * __fixp_sin32() returns the sin of an angle in degrees * * @degrees: angle, in degrees, from 0 to 360. * * The returned value ranges from -0x7fffffff to +0x7fffffff. */ static inline s32 __fixp_sin32(int degrees) { s32 ret; bool negative = false; if (degrees > 180) { negative = true; degrees -= 180; } if (degrees > 90) degrees = 180 - degrees; ret = sin_table[degrees]; return negative ? -ret : ret; } /** * fixp_sin32() returns the sin of an angle in degrees * * @degrees: angle, in degrees. The angle can be positive or negative * * The returned value ranges from -0x7fffffff to +0x7fffffff. */ static inline s32 fixp_sin32(int degrees) { degrees = (degrees % 360 + 360) % 360; return __fixp_sin32(degrees); } /* cos(x) = sin(x + 90 degrees) */ #define fixp_cos32(v) fixp_sin32((v) + 90) /* * 16 bits variants * * The returned value ranges from -0x7fff to 0x7fff */ #define fixp_sin16(v) (fixp_sin32(v) >> 16) #define fixp_cos16(v) (fixp_cos32(v) >> 16) /** * fixp_sin32_rad() - calculates the sin of an angle in radians * * @radians: angle, in radians * @twopi: value to be used for 2*pi * * Provides a variant for the cases where just 360 * values is not enough. This function uses linear * interpolation to a wider range of values given by * twopi var. * * Experimental tests gave a maximum difference of * 0.000038 between the value calculated by sin() and * the one produced by this function, when twopi is * equal to 360000. That seems to be enough precision * for practical purposes. * * Please notice that two high numbers for twopi could cause * overflows, so the routine will not allow values of twopi * bigger than 1^18. */ static inline s32 fixp_sin32_rad(u32 radians, u32 twopi) { int degrees; s32 v1, v2, dx, dy; s64 tmp; /* * Avoid too large values for twopi, as we don't want overflows. */ BUG_ON(twopi > 1 << 18); degrees = (radians * 360) / twopi; tmp = radians - (degrees * twopi) / 360; degrees = (degrees % 360 + 360) % 360; v1 = __fixp_sin32(degrees); v2 = fixp_sin32(degrees + 1); dx = twopi / 360; dy = v2 - v1; tmp *= dy; return v1 + div_s64(tmp, dx); } /* cos(x) = sin(x + pi/2 radians) */ #define fixp_cos32_rad(rad, twopi) \ fixp_sin32_rad(rad + twopi / 4, twopi) #endif glob.h 0000644 00000000400 14722070374 0005640 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_GLOB_H #define _LINUX_GLOB_H #include <linux/types.h> /* For bool */ #include <linux/compiler.h> /* For __pure */ bool __pure glob_match(char const *pat, char const *str); #endif /* _LINUX_GLOB_H */ completion.h 0000644 00000010057 14722070374 0007077 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_COMPLETION_H #define __LINUX_COMPLETION_H /* * (C) Copyright 2001 Linus Torvalds * * Atomic wait-for-completion handler data structures. * See kernel/sched/completion.c for details. */ #include <linux/wait.h> /* * struct completion - structure used to maintain state for a "completion" * * This is the opaque structure used to maintain the state for a "completion". * Completions currently use a FIFO to queue threads that have to wait for * the "completion" event. * * See also: complete(), wait_for_completion() (and friends _timeout, * _interruptible, _interruptible_timeout, and _killable), init_completion(), * reinit_completion(), and macros DECLARE_COMPLETION(), * DECLARE_COMPLETION_ONSTACK(). */ struct completion { unsigned int done; wait_queue_head_t wait; }; #define init_completion_map(x, m) __init_completion(x) #define init_completion(x) __init_completion(x) static inline void complete_acquire(struct completion *x) {} static inline void complete_release(struct completion *x) {} #define COMPLETION_INITIALIZER(work) \ { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } #define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ (*({ init_completion_map(&(work), &(map)); &(work); })) #define COMPLETION_INITIALIZER_ONSTACK(work) \ (*({ init_completion(&work); &work; })) /** * DECLARE_COMPLETION - declare and initialize a completion structure * @work: identifier for the completion structure * * This macro declares and initializes a completion structure. Generally used * for static declarations. You should use the _ONSTACK variant for automatic * variables. */ #define DECLARE_COMPLETION(work) \ struct completion work = COMPLETION_INITIALIZER(work) /* * Lockdep needs to run a non-constant initializer for on-stack * completions - so we use the _ONSTACK() variant for those that * are on the kernel stack: */ /** * DECLARE_COMPLETION_ONSTACK - declare and initialize a completion structure * @work: identifier for the completion structure * * This macro declares and initializes a completion structure on the kernel * stack. */ #ifdef CONFIG_LOCKDEP # define DECLARE_COMPLETION_ONSTACK(work) \ struct completion work = COMPLETION_INITIALIZER_ONSTACK(work) # define DECLARE_COMPLETION_ONSTACK_MAP(work, map) \ struct completion work = COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) #else # define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work) # define DECLARE_COMPLETION_ONSTACK_MAP(work, map) DECLARE_COMPLETION(work) #endif /** * init_completion - Initialize a dynamically allocated completion * @x: pointer to completion structure that is to be initialized * * This inline function will initialize a dynamically created completion * structure. */ static inline void __init_completion(struct completion *x) { x->done = 0; init_waitqueue_head(&x->wait); } /** * reinit_completion - reinitialize a completion structure * @x: pointer to completion structure that is to be reinitialized * * This inline function should be used to reinitialize a completion structure so it can * be reused. This is especially important after complete_all() is used. */ static inline void reinit_completion(struct completion *x) { x->done = 0; } extern void wait_for_completion(struct completion *); extern void wait_for_completion_io(struct completion *); extern int wait_for_completion_interruptible(struct completion *x); extern int wait_for_completion_killable(struct completion *x); extern unsigned long wait_for_completion_timeout(struct completion *x, unsigned long timeout); extern unsigned long wait_for_completion_io_timeout(struct completion *x, unsigned long timeout); extern long wait_for_completion_interruptible_timeout( struct completion *x, unsigned long timeout); extern long wait_for_completion_killable_timeout( struct completion *x, unsigned long timeout); extern bool try_wait_for_completion(struct completion *x); extern bool completion_done(struct completion *x); extern void complete(struct completion *); extern void complete_all(struct completion *); #endif fsldma.h 0000644 00000000312 14722070374 0006165 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* */ #ifndef FSL_DMA_H #define FSL_DMA_H /* fsl dma API for enxternal start */ int fsl_dma_external_start(struct dma_chan *dchan, int enable); #endif intel_rapl.h 0000644 00000010166 14722070374 0007060 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Data types and headers for RAPL support * * Copyright (C) 2019 Intel Corporation. * * Author: Zhang Rui <rui.zhang@intel.com> */ #ifndef __INTEL_RAPL_H__ #define __INTEL_RAPL_H__ #include <linux/types.h> #include <linux/powercap.h> #include <linux/cpuhotplug.h> enum rapl_domain_type { RAPL_DOMAIN_PACKAGE, /* entire package/socket */ RAPL_DOMAIN_PP0, /* core power plane */ RAPL_DOMAIN_PP1, /* graphics uncore */ RAPL_DOMAIN_DRAM, /* DRAM control_type */ RAPL_DOMAIN_PLATFORM, /* PSys control_type */ RAPL_DOMAIN_MAX, }; enum rapl_domain_reg_id { RAPL_DOMAIN_REG_LIMIT, RAPL_DOMAIN_REG_STATUS, RAPL_DOMAIN_REG_PERF, RAPL_DOMAIN_REG_POLICY, RAPL_DOMAIN_REG_INFO, RAPL_DOMAIN_REG_MAX, }; struct rapl_package; enum rapl_primitives { ENERGY_COUNTER, POWER_LIMIT1, POWER_LIMIT2, FW_LOCK, PL1_ENABLE, /* power limit 1, aka long term */ PL1_CLAMP, /* allow frequency to go below OS request */ PL2_ENABLE, /* power limit 2, aka short term, instantaneous */ PL2_CLAMP, TIME_WINDOW1, /* long term */ TIME_WINDOW2, /* short term */ THERMAL_SPEC_POWER, MAX_POWER, MIN_POWER, MAX_TIME_WINDOW, THROTTLED_TIME, PRIORITY_LEVEL, /* below are not raw primitive data */ AVERAGE_POWER, NR_RAPL_PRIMITIVES, }; struct rapl_domain_data { u64 primitives[NR_RAPL_PRIMITIVES]; unsigned long timestamp; }; #define NR_POWER_LIMITS (2) struct rapl_power_limit { struct powercap_zone_constraint *constraint; int prim_id; /* primitive ID used to enable */ struct rapl_domain *domain; const char *name; u64 last_power_limit; }; struct rapl_package; struct rapl_domain { const char *name; enum rapl_domain_type id; u64 regs[RAPL_DOMAIN_REG_MAX]; struct powercap_zone power_zone; struct rapl_domain_data rdd; struct rapl_power_limit rpl[NR_POWER_LIMITS]; u64 attr_map; /* track capabilities */ unsigned int state; unsigned int domain_energy_unit; struct rapl_package *rp; }; struct reg_action { u64 reg; u64 mask; u64 value; int err; }; /** * struct rapl_if_priv: private data for different RAPL interfaces * @control_type: Each RAPL interface must have its own powercap * control type. * @platform_rapl_domain: Optional. Some RAPL interface may have platform * level RAPL control. * @pcap_rapl_online: CPU hotplug state for each RAPL interface. * @reg_unit: Register for getting energy/power/time unit. * @regs: Register sets for different RAPL Domains. * @limits: Number of power limits supported by each domain. * @read_raw: Callback for reading RAPL interface specific * registers. * @write_raw: Callback for writing RAPL interface specific * registers. */ struct rapl_if_priv { struct powercap_control_type *control_type; struct rapl_domain *platform_rapl_domain; enum cpuhp_state pcap_rapl_online; u64 reg_unit; u64 regs[RAPL_DOMAIN_MAX][RAPL_DOMAIN_REG_MAX]; int limits[RAPL_DOMAIN_MAX]; int (*read_raw)(int cpu, struct reg_action *ra); int (*write_raw)(int cpu, struct reg_action *ra); }; /* maximum rapl package domain name: package-%d-die-%d */ #define PACKAGE_DOMAIN_NAME_LENGTH 30 struct rapl_package { unsigned int id; /* logical die id, equals physical 1-die systems */ unsigned int nr_domains; unsigned long domain_map; /* bit map of active domains */ unsigned int power_unit; unsigned int energy_unit; unsigned int time_unit; struct rapl_domain *domains; /* array of domains, sized at runtime */ struct powercap_zone *power_zone; /* keep track of parent zone */ unsigned long power_limit_irq; /* keep track of package power limit * notify interrupt enable status. */ struct list_head plist; int lead_cpu; /* one active cpu per package for access */ /* Track active cpus */ struct cpumask cpumask; char name[PACKAGE_DOMAIN_NAME_LENGTH]; struct rapl_if_priv *priv; }; struct rapl_package *rapl_find_package_domain(int cpu, struct rapl_if_priv *priv); struct rapl_package *rapl_add_package(int cpu, struct rapl_if_priv *priv); void rapl_remove_package(struct rapl_package *rp); int rapl_add_platform_domain(struct rapl_if_priv *priv); void rapl_remove_platform_domain(struct rapl_if_priv *priv); #endif /* __INTEL_RAPL_H__ */ stackleak.h 0000644 00000001615 14722070374 0006670 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_STACKLEAK_H #define _LINUX_STACKLEAK_H #include <linux/sched.h> #include <linux/sched/task_stack.h> /* * Check that the poison value points to the unused hole in the * virtual memory map for your platform. */ #define STACKLEAK_POISON -0xBEEF #define STACKLEAK_SEARCH_DEPTH 128 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK #include <asm/stacktrace.h> static inline void stackleak_task_init(struct task_struct *t) { t->lowest_stack = (unsigned long)end_of_stack(t) + sizeof(unsigned long); # ifdef CONFIG_STACKLEAK_METRICS t->prev_lowest_stack = t->lowest_stack; # endif } #ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE int stack_erasing_sysctl(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); #endif #else /* !CONFIG_GCC_PLUGIN_STACKLEAK */ static inline void stackleak_task_init(struct task_struct *t) { } #endif #endif usb/pd_vdo.h 0000644 00000016211 14722070374 0006770 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright 2015-2017 Google, Inc */ #ifndef __LINUX_USB_PD_VDO_H #define __LINUX_USB_PD_VDO_H #include "pd.h" /* * VDO : Vendor Defined Message Object * VDM object is minimum of VDM header + 6 additional data objects. */ #define VDO_MAX_OBJECTS 6 #define VDO_MAX_SIZE (VDO_MAX_OBJECTS + 1) /* * VDM header * ---------- * <31:16> :: SVID * <15> :: VDM type ( 1b == structured, 0b == unstructured ) * <14:13> :: Structured VDM version (can only be 00 == 1.0 currently) * <12:11> :: reserved * <10:8> :: object position (1-7 valid ... used for enter/exit mode only) * <7:6> :: command type (SVDM only?) * <5> :: reserved (SVDM), command type (UVDM) * <4:0> :: command */ #define VDO(vid, type, custom) \ (((vid) << 16) | \ ((type) << 15) | \ ((custom) & 0x7FFF)) #define VDO_SVDM_TYPE (1 << 15) #define VDO_SVDM_VERS(x) ((x) << 13) #define VDO_OPOS(x) ((x) << 8) #define VDO_CMDT(x) ((x) << 6) #define VDO_OPOS_MASK VDO_OPOS(0x7) #define VDO_CMDT_MASK VDO_CMDT(0x3) #define CMDT_INIT 0 #define CMDT_RSP_ACK 1 #define CMDT_RSP_NAK 2 #define CMDT_RSP_BUSY 3 /* reserved for SVDM ... for Google UVDM */ #define VDO_SRC_INITIATOR (0 << 5) #define VDO_SRC_RESPONDER (1 << 5) #define CMD_DISCOVER_IDENT 1 #define CMD_DISCOVER_SVID 2 #define CMD_DISCOVER_MODES 3 #define CMD_ENTER_MODE 4 #define CMD_EXIT_MODE 5 #define CMD_ATTENTION 6 #define VDO_CMD_VENDOR(x) (((0x10 + (x)) & 0x1f)) /* ChromeOS specific commands */ #define VDO_CMD_VERSION VDO_CMD_VENDOR(0) #define VDO_CMD_SEND_INFO VDO_CMD_VENDOR(1) #define VDO_CMD_READ_INFO VDO_CMD_VENDOR(2) #define VDO_CMD_REBOOT VDO_CMD_VENDOR(5) #define VDO_CMD_FLASH_ERASE VDO_CMD_VENDOR(6) #define VDO_CMD_FLASH_WRITE VDO_CMD_VENDOR(7) #define VDO_CMD_ERASE_SIG VDO_CMD_VENDOR(8) #define VDO_CMD_PING_ENABLE VDO_CMD_VENDOR(10) #define VDO_CMD_CURRENT VDO_CMD_VENDOR(11) #define VDO_CMD_FLIP VDO_CMD_VENDOR(12) #define VDO_CMD_GET_LOG VDO_CMD_VENDOR(13) #define VDO_CMD_CCD_EN VDO_CMD_VENDOR(14) #define PD_VDO_VID(vdo) ((vdo) >> 16) #define PD_VDO_SVDM(vdo) (((vdo) >> 15) & 1) #define PD_VDO_OPOS(vdo) (((vdo) >> 8) & 0x7) #define PD_VDO_CMD(vdo) ((vdo) & 0x1f) #define PD_VDO_CMDT(vdo) (((vdo) >> 6) & 0x3) /* * SVDM Identity request -> response * * Request is simply properly formatted SVDM header * * Response is 4 data objects: * [0] :: SVDM header * [1] :: Identitiy header * [2] :: Cert Stat VDO * [3] :: (Product | Cable) VDO * [4] :: AMA VDO * */ #define VDO_INDEX_HDR 0 #define VDO_INDEX_IDH 1 #define VDO_INDEX_CSTAT 2 #define VDO_INDEX_CABLE 3 #define VDO_INDEX_PRODUCT 3 #define VDO_INDEX_AMA 4 /* * SVDM Identity Header * -------------------- * <31> :: data capable as a USB host * <30> :: data capable as a USB device * <29:27> :: product type * <26> :: modal operation supported (1b == yes) * <25:16> :: Reserved, Shall be set to zero * <15:0> :: USB-IF assigned VID for this cable vendor */ #define IDH_PTYPE_UNDEF 0 #define IDH_PTYPE_HUB 1 #define IDH_PTYPE_PERIPH 2 #define IDH_PTYPE_PCABLE 3 #define IDH_PTYPE_ACABLE 4 #define IDH_PTYPE_AMA 5 #define VDO_IDH(usbh, usbd, ptype, is_modal, vid) \ ((usbh) << 31 | (usbd) << 30 | ((ptype) & 0x7) << 27 \ | (is_modal) << 26 | ((vid) & 0xffff)) #define PD_IDH_PTYPE(vdo) (((vdo) >> 27) & 0x7) #define PD_IDH_VID(vdo) ((vdo) & 0xffff) #define PD_IDH_MODAL_SUPP(vdo) ((vdo) & (1 << 26)) /* * Cert Stat VDO * ------------- * <31:0> : USB-IF assigned XID for this cable */ #define PD_CSTAT_XID(vdo) (vdo) /* * Product VDO * ----------- * <31:16> : USB Product ID * <15:0> : USB bcdDevice */ #define VDO_PRODUCT(pid, bcd) (((pid) & 0xffff) << 16 | ((bcd) & 0xffff)) #define PD_PRODUCT_PID(vdo) (((vdo) >> 16) & 0xffff) /* * Cable VDO * --------- * <31:28> :: Cable HW version * <27:24> :: Cable FW version * <23:20> :: Reserved, Shall be set to zero * <19:18> :: type-C to Type-A/B/C (00b == A, 01 == B, 10 == C) * <17> :: Type-C to Plug/Receptacle (0b == plug, 1b == receptacle) * <16:13> :: cable latency (0001 == <10ns(~1m length)) * <12:11> :: cable termination type (11b == both ends active VCONN req) * <10> :: SSTX1 Directionality support (0b == fixed, 1b == cfgable) * <9> :: SSTX2 Directionality support * <8> :: SSRX1 Directionality support * <7> :: SSRX2 Directionality support * <6:5> :: Vbus current handling capability * <4> :: Vbus through cable (0b == no, 1b == yes) * <3> :: SOP" controller present? (0b == no, 1b == yes) * <2:0> :: USB SS Signaling support */ #define CABLE_ATYPE 0 #define CABLE_BTYPE 1 #define CABLE_CTYPE 2 #define CABLE_PLUG 0 #define CABLE_RECEPTACLE 1 #define CABLE_CURR_1A5 0 #define CABLE_CURR_3A 1 #define CABLE_CURR_5A 2 #define CABLE_USBSS_U2_ONLY 0 #define CABLE_USBSS_U31_GEN1 1 #define CABLE_USBSS_U31_GEN2 2 #define VDO_CABLE(hw, fw, cbl, gdr, lat, term, tx1d, tx2d, rx1d, rx2d, cur,\ vps, sopp, usbss) \ (((hw) & 0x7) << 28 | ((fw) & 0x7) << 24 | ((cbl) & 0x3) << 18 \ | (gdr) << 17 | ((lat) & 0x7) << 13 | ((term) & 0x3) << 11 \ | (tx1d) << 10 | (tx2d) << 9 | (rx1d) << 8 | (rx2d) << 7 \ | ((cur) & 0x3) << 5 | (vps) << 4 | (sopp) << 3 \ | ((usbss) & 0x7)) /* * AMA VDO * --------- * <31:28> :: Cable HW version * <27:24> :: Cable FW version * <23:12> :: Reserved, Shall be set to zero * <11> :: SSTX1 Directionality support (0b == fixed, 1b == cfgable) * <10> :: SSTX2 Directionality support * <9> :: SSRX1 Directionality support * <8> :: SSRX2 Directionality support * <7:5> :: Vconn power * <4> :: Vconn power required * <3> :: Vbus power required * <2:0> :: USB SS Signaling support */ #define VDO_AMA(hw, fw, tx1d, tx2d, rx1d, rx2d, vcpwr, vcr, vbr, usbss) \ (((hw) & 0x7) << 28 | ((fw) & 0x7) << 24 \ | (tx1d) << 11 | (tx2d) << 10 | (rx1d) << 9 | (rx2d) << 8 \ | ((vcpwr) & 0x7) << 5 | (vcr) << 4 | (vbr) << 3 \ | ((usbss) & 0x7)) #define PD_VDO_AMA_VCONN_REQ(vdo) (((vdo) >> 4) & 1) #define PD_VDO_AMA_VBUS_REQ(vdo) (((vdo) >> 3) & 1) #define AMA_VCONN_PWR_1W 0 #define AMA_VCONN_PWR_1W5 1 #define AMA_VCONN_PWR_2W 2 #define AMA_VCONN_PWR_3W 3 #define AMA_VCONN_PWR_4W 4 #define AMA_VCONN_PWR_5W 5 #define AMA_VCONN_PWR_6W 6 #define AMA_USBSS_U2_ONLY 0 #define AMA_USBSS_U31_GEN1 1 #define AMA_USBSS_U31_GEN2 2 #define AMA_USBSS_BBONLY 3 /* * SVDM Discover SVIDs request -> response * * Request is properly formatted VDM Header with discover SVIDs command. * Response is a set of SVIDs of all all supported SVIDs with all zero's to * mark the end of SVIDs. If more than 12 SVIDs are supported command SHOULD be * repeated. */ #define VDO_SVID(svid0, svid1) (((svid0) & 0xffff) << 16 | ((svid1) & 0xffff)) #define PD_VDO_SVID_SVID0(vdo) ((vdo) >> 16) #define PD_VDO_SVID_SVID1(vdo) ((vdo) & 0xffff) /* USB-IF SIDs */ #define USB_SID_PD 0xff00 /* power delivery */ #define USB_SID_DISPLAYPORT 0xff01 #define USB_SID_MHL 0xff02 /* Mobile High-Definition Link */ /* VDM command timeouts (in ms) */ #define PD_T_VDM_UNSTRUCTURED 500 #define PD_T_VDM_BUSY 100 #define PD_T_VDM_WAIT_MODE_E 100 #define PD_T_VDM_SNDR_RSP 30 #define PD_T_VDM_E_MODE 25 #define PD_T_VDM_RCVR_RSP 15 #endif /* __LINUX_USB_PD_VDO_H */ usb/pd_ext_sdb.h 0000644 00000001476 14722070374 0007637 0 ustar 00 // SPDX-License-Identifier: GPL-2.0+ /* * Copyright (c) 2017 Dialog Semiconductor * * Author: Adam Thomson <Adam.Thomson.Opensource@diasemi.com> */ #ifndef __LINUX_USB_PD_EXT_SDB_H #define __LINUX_USB_PD_EXT_SDB_H /* SDB : Status Data Block */ enum usb_pd_ext_sdb_fields { USB_PD_EXT_SDB_INTERNAL_TEMP = 0, USB_PD_EXT_SDB_PRESENT_INPUT, USB_PD_EXT_SDB_PRESENT_BATT_INPUT, USB_PD_EXT_SDB_EVENT_FLAGS, USB_PD_EXT_SDB_TEMP_STATUS, USB_PD_EXT_SDB_DATA_SIZE, }; /* Event Flags */ #define USB_PD_EXT_SDB_EVENT_OCP BIT(1) #define USB_PD_EXT_SDB_EVENT_OTP BIT(2) #define USB_PD_EXT_SDB_EVENT_OVP BIT(3) #define USB_PD_EXT_SDB_EVENT_CF_CV_MODE BIT(4) #define USB_PD_EXT_SDB_PPS_EVENTS (USB_PD_EXT_SDB_EVENT_OCP | \ USB_PD_EXT_SDB_EVENT_OTP | \ USB_PD_EXT_SDB_EVENT_OVP) #endif /* __LINUX_USB_PD_EXT_SDB_H */ usb/ccid.h 0000644 00000001423 14722070374 0006416 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) 2018 Vincent Pelletier */ /* */ #ifndef __CCID_H #define __CCID_H #include <linux/types.h> #define USB_INTERFACE_CLASS_CCID 0x0b struct ccid_descriptor { __u8 bLength; __u8 bDescriptorType; __le16 bcdCCID; __u8 bMaxSlotIndex; __u8 bVoltageSupport; __le32 dwProtocols; __le32 dwDefaultClock; __le32 dwMaximumClock; __u8 bNumClockSupported; __le32 dwDataRate; __le32 dwMaxDataRate; __u8 bNumDataRatesSupported; __le32 dwMaxIFSD; __le32 dwSynchProtocols; __le32 dwMechanical; __le32 dwFeatures; __le32 dwMaxCCIDMessageLength; __u8 bClassGetResponse; __u8 bClassEnvelope; __le16 wLcdLayout; __u8 bPINSupport; __u8 bMaxCCIDBusySlots; } __attribute__ ((packed)); #endif /* __CCID_H */ usb/tcpm.h 0000644 00000012176 14722070374 0006466 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright 2015-2017 Google, Inc */ #ifndef __LINUX_USB_TCPM_H #define __LINUX_USB_TCPM_H #include <linux/bitops.h> #include <linux/usb/typec.h> #include "pd.h" enum typec_cc_status { TYPEC_CC_OPEN, TYPEC_CC_RA, TYPEC_CC_RD, TYPEC_CC_RP_DEF, TYPEC_CC_RP_1_5, TYPEC_CC_RP_3_0, }; enum typec_cc_polarity { TYPEC_POLARITY_CC1, TYPEC_POLARITY_CC2, }; /* Time to wait for TCPC to complete transmit */ #define PD_T_TCPC_TX_TIMEOUT 100 /* in ms */ #define PD_ROLE_SWAP_TIMEOUT (MSEC_PER_SEC * 10) #define PD_PPS_CTRL_TIMEOUT (MSEC_PER_SEC * 10) enum tcpm_transmit_status { TCPC_TX_SUCCESS = 0, TCPC_TX_DISCARDED = 1, TCPC_TX_FAILED = 2, }; enum tcpm_transmit_type { TCPC_TX_SOP = 0, TCPC_TX_SOP_PRIME = 1, TCPC_TX_SOP_PRIME_PRIME = 2, TCPC_TX_SOP_DEBUG_PRIME = 3, TCPC_TX_SOP_DEBUG_PRIME_PRIME = 4, TCPC_TX_HARD_RESET = 5, TCPC_TX_CABLE_RESET = 6, TCPC_TX_BIST_MODE_2 = 7 }; /** * struct tcpc_config - Port configuration * @src_pdo: PDO parameters sent to port partner as response to * PD_CTRL_GET_SOURCE_CAP message * @nr_src_pdo: Number of entries in @src_pdo * @snk_pdo: PDO parameters sent to partner as response to * PD_CTRL_GET_SINK_CAP message * @nr_snk_pdo: Number of entries in @snk_pdo * @operating_snk_mw: * Required operating sink power in mW * @type: Port type (TYPEC_PORT_DFP, TYPEC_PORT_UFP, or * TYPEC_PORT_DRP) * @default_role: * Default port role (TYPEC_SINK or TYPEC_SOURCE). * Set to TYPEC_NO_PREFERRED_ROLE if no default role. * @try_role_hw:True if try.{Src,Snk} is implemented in hardware * @alt_modes: List of supported alternate modes */ struct tcpc_config { const u32 *src_pdo; unsigned int nr_src_pdo; const u32 *snk_pdo; unsigned int nr_snk_pdo; const u32 *snk_vdo; unsigned int nr_snk_vdo; unsigned int operating_snk_mw; enum typec_port_type type; enum typec_port_data data; enum typec_role default_role; bool try_role_hw; /* try.{src,snk} implemented in hardware */ bool self_powered; /* port belongs to a self powered device */ const struct typec_altmode_desc *alt_modes; }; /* Mux state attributes */ #define TCPC_MUX_USB_ENABLED BIT(0) /* USB enabled */ #define TCPC_MUX_DP_ENABLED BIT(1) /* DP enabled */ #define TCPC_MUX_POLARITY_INVERTED BIT(2) /* Polarity inverted */ /** * struct tcpc_dev - Port configuration and callback functions * @config: Pointer to port configuration * @fwnode: Pointer to port fwnode * @get_vbus: Called to read current VBUS state * @get_current_limit: * Optional; called by the tcpm core when configured as a snk * and cc=Rp-def. This allows the tcpm to provide a fallback * current-limit detection method for the cc=Rp-def case. * For example, some tcpcs may include BC1.2 charger detection * and use that in this case. * @set_cc: Called to set value of CC pins * @get_cc: Called to read current CC pin values * @set_polarity: * Called to set polarity * @set_vconn: Called to enable or disable VCONN * @set_vbus: Called to enable or disable VBUS * @set_current_limit: * Optional; called to set current limit as negotiated * with partner. * @set_pd_rx: Called to enable or disable reception of PD messages * @set_roles: Called to set power and data roles * @start_toggling: * Optional; if supported by hardware, called to start dual-role * toggling or single-role connection detection. Toggling stops * automatically if a connection is established. * @try_role: Optional; called to set a preferred role * @pd_transmit:Called to transmit PD message * @mux: Pointer to multiplexer data */ struct tcpc_dev { const struct tcpc_config *config; struct fwnode_handle *fwnode; int (*init)(struct tcpc_dev *dev); int (*get_vbus)(struct tcpc_dev *dev); int (*get_current_limit)(struct tcpc_dev *dev); int (*set_cc)(struct tcpc_dev *dev, enum typec_cc_status cc); int (*get_cc)(struct tcpc_dev *dev, enum typec_cc_status *cc1, enum typec_cc_status *cc2); int (*set_polarity)(struct tcpc_dev *dev, enum typec_cc_polarity polarity); int (*set_vconn)(struct tcpc_dev *dev, bool on); int (*set_vbus)(struct tcpc_dev *dev, bool on, bool charge); int (*set_current_limit)(struct tcpc_dev *dev, u32 max_ma, u32 mv); int (*set_pd_rx)(struct tcpc_dev *dev, bool on); int (*set_roles)(struct tcpc_dev *dev, bool attached, enum typec_role role, enum typec_data_role data); int (*start_toggling)(struct tcpc_dev *dev, enum typec_port_type port_type, enum typec_cc_status cc); int (*try_role)(struct tcpc_dev *dev, int role); int (*pd_transmit)(struct tcpc_dev *dev, enum tcpm_transmit_type type, const struct pd_message *msg); }; struct tcpm_port; struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc); void tcpm_unregister_port(struct tcpm_port *port); void tcpm_vbus_change(struct tcpm_port *port); void tcpm_cc_change(struct tcpm_port *port); void tcpm_pd_receive(struct tcpm_port *port, const struct pd_message *msg); void tcpm_pd_transmit_complete(struct tcpm_port *port, enum tcpm_transmit_status status); void tcpm_pd_hard_reset(struct tcpm_port *port); void tcpm_tcpc_reset(struct tcpm_port *port); #endif /* __LINUX_USB_TCPM_H */ usb/gadget_configfs.h 0000644 00000005706 14722070374 0010635 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __GADGET_CONFIGFS__ #define __GADGET_CONFIGFS__ #include <linux/configfs.h> int check_user_usb_string(const char *name, struct usb_gadget_strings *stringtab_dev); #define GS_STRINGS_W(__struct, __name) \ static ssize_t __struct##_##__name##_store(struct config_item *item, \ const char *page, size_t len) \ { \ struct __struct *gs = to_##__struct(item); \ int ret; \ \ ret = usb_string_copy(page, &gs->__name); \ if (ret) \ return ret; \ return len; \ } #define GS_STRINGS_R(__struct, __name) \ static ssize_t __struct##_##__name##_show(struct config_item *item, char *page) \ { \ struct __struct *gs = to_##__struct(item); \ return sprintf(page, "%s\n", gs->__name ?: ""); \ } #define GS_STRINGS_RW(struct_name, _name) \ GS_STRINGS_R(struct_name, _name) \ GS_STRINGS_W(struct_name, _name) \ CONFIGFS_ATTR(struct_name##_, _name) #define USB_CONFIG_STRING_RW_OPS(struct_in) \ static struct configfs_item_operations struct_in##_langid_item_ops = { \ .release = struct_in##_attr_release, \ }; \ \ static struct config_item_type struct_in##_langid_type = { \ .ct_item_ops = &struct_in##_langid_item_ops, \ .ct_attrs = struct_in##_langid_attrs, \ .ct_owner = THIS_MODULE, \ } #define USB_CONFIG_STRINGS_LANG(struct_in, struct_member) \ static struct config_group *struct_in##_strings_make( \ struct config_group *group, \ const char *name) \ { \ struct struct_member *gi; \ struct struct_in *gs; \ struct struct_in *new; \ int langs = 0; \ int ret; \ \ new = kzalloc(sizeof(*new), GFP_KERNEL); \ if (!new) \ return ERR_PTR(-ENOMEM); \ \ ret = check_user_usb_string(name, &new->stringtab_dev); \ if (ret) \ goto err; \ config_group_init_type_name(&new->group, name, \ &struct_in##_langid_type); \ \ gi = container_of(group, struct struct_member, strings_group); \ ret = -EEXIST; \ list_for_each_entry(gs, &gi->string_list, list) { \ if (gs->stringtab_dev.language == new->stringtab_dev.language) \ goto err; \ langs++; \ } \ ret = -EOVERFLOW; \ if (langs >= MAX_USB_STRING_LANGS) \ goto err; \ \ list_add_tail(&new->list, &gi->string_list); \ return &new->group; \ err: \ kfree(new); \ return ERR_PTR(ret); \ } \ \ static void struct_in##_strings_drop( \ struct config_group *group, \ struct config_item *item) \ { \ config_item_put(item); \ } \ \ static struct configfs_group_operations struct_in##_strings_ops = { \ .make_group = &struct_in##_strings_make, \ .drop_item = &struct_in##_strings_drop, \ }; \ \ static struct config_item_type struct_in##_strings_type = { \ .ct_group_ops = &struct_in##_strings_ops, \ .ct_owner = THIS_MODULE, \ } #endif usb/iowarrior.h 0000644 00000002536 14722070374 0007537 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_USB_IOWARRIOR_H #define __LINUX_USB_IOWARRIOR_H #define CODEMERCS_MAGIC_NUMBER 0xC0 /* like COde Mercenaries */ /* Define the ioctl commands for reading and writing data */ #define IOW_WRITE _IOW(CODEMERCS_MAGIC_NUMBER, 1, __u8 *) #define IOW_READ _IOW(CODEMERCS_MAGIC_NUMBER, 2, __u8 *) /* A struct for available device info which is read with the ioctl IOW_GETINFO. To be compatible with 2.4 userspace which didn't have an easy way to get this information. */ struct iowarrior_info { /* vendor id : supposed to be USB_VENDOR_ID_CODEMERCS in all cases */ __u32 vendor; /* product id : depends on type of chip (USB_DEVICE_ID_CODEMERCS_X) */ __u32 product; /* the serial number of our chip (if a serial-number is not available * this is empty string) */ __u8 serial[9]; /* revision number of the chip */ __u32 revision; /* USB-speed of the device (0=UNKNOWN, 1=LOW, 2=FULL 3=HIGH) */ __u32 speed; /* power consumption of the device in mA */ __u32 power; /* the number of the endpoint */ __u32 if_num; /* size of the data-packets on this interface */ __u32 report_size; }; /* Get some device-information (product-id , serial-number etc.) in order to identify a chip. */ #define IOW_GETINFO _IOR(CODEMERCS_MAGIC_NUMBER, 3, struct iowarrior_info) #endif /* __LINUX_USB_IOWARRIOR_H */ usb/audio-v2.h 0000644 00000032540 14722070374 0007146 0 ustar 00 // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2010 Daniel Mack <daniel@caiaq.de> * * This software is distributed under the terms of the GNU General Public * License ("GPL") version 2, as published by the Free Software Foundation. * * This file holds USB constants and structures defined * by the USB Device Class Definition for Audio Devices in version 2.0. * Comments below reference relevant sections of the documents contained * in http://www.usb.org/developers/devclass_docs/Audio2.0_final.zip */ #ifndef __LINUX_USB_AUDIO_V2_H #define __LINUX_USB_AUDIO_V2_H #include <linux/types.h> /* v1.0 and v2.0 of this standard have many things in common. For the rest * of the definitions, please refer to audio.h */ /* * bmControl field decoders * * From the USB Audio spec v2.0: * * bmaControls() is a (ch+1)-element array of 4-byte bitmaps, * each containing a set of bit pairs. If a Control is present, * it must be Host readable. If a certain Control is not * present then the bit pair must be set to 0b00. * If a Control is present but read-only, the bit pair must be * set to 0b01. If a Control is also Host programmable, the bit * pair must be set to 0b11. The value 0b10 is not allowed. * */ static inline bool uac_v2v3_control_is_readable(u32 bmControls, u8 control) { return (bmControls >> ((control - 1) * 2)) & 0x1; } static inline bool uac_v2v3_control_is_writeable(u32 bmControls, u8 control) { return (bmControls >> ((control - 1) * 2)) & 0x2; } /* 4.7.2 Class-Specific AC Interface Descriptor */ struct uac2_ac_header_descriptor { __u8 bLength; /* 9 */ __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ __u8 bDescriptorSubtype; /* UAC_MS_HEADER */ __le16 bcdADC; /* 0x0200 */ __u8 bCategory; __le16 wTotalLength; /* includes Unit and Terminal desc. */ __u8 bmControls; } __packed; /* 2.3.1.6 Type I Format Type Descriptor (Frmts20 final.pdf)*/ struct uac2_format_type_i_descriptor { __u8 bLength; /* in bytes: 6 */ __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ __u8 bDescriptorSubtype; /* FORMAT_TYPE */ __u8 bFormatType; /* FORMAT_TYPE_1 */ __u8 bSubslotSize; /* {1,2,3,4} */ __u8 bBitResolution; } __packed; /* 4.7.2.1 Clock Source Descriptor */ struct uac_clock_source_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bClockID; __u8 bmAttributes; __u8 bmControls; __u8 bAssocTerminal; __u8 iClockSource; } __attribute__((packed)); /* bmAttribute fields */ #define UAC_CLOCK_SOURCE_TYPE_EXT 0x0 #define UAC_CLOCK_SOURCE_TYPE_INT_FIXED 0x1 #define UAC_CLOCK_SOURCE_TYPE_INT_VAR 0x2 #define UAC_CLOCK_SOURCE_TYPE_INT_PROG 0x3 #define UAC_CLOCK_SOURCE_SYNCED_TO_SOF (1 << 2) /* 4.7.2.2 Clock Source Descriptor */ struct uac_clock_selector_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bClockID; __u8 bNrInPins; __u8 baCSourceID[]; /* bmControls and iClockSource omitted */ } __attribute__((packed)); /* 4.7.2.3 Clock Multiplier Descriptor */ struct uac_clock_multiplier_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bClockID; __u8 bCSourceID; __u8 bmControls; __u8 iClockMultiplier; } __attribute__((packed)); /* 4.7.2.4 Input terminal descriptor */ struct uac2_input_terminal_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bTerminalID; __le16 wTerminalType; __u8 bAssocTerminal; __u8 bCSourceID; __u8 bNrChannels; __le32 bmChannelConfig; __u8 iChannelNames; __le16 bmControls; __u8 iTerminal; } __attribute__((packed)); /* 4.7.2.5 Output terminal descriptor */ struct uac2_output_terminal_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bTerminalID; __le16 wTerminalType; __u8 bAssocTerminal; __u8 bSourceID; __u8 bCSourceID; __le16 bmControls; __u8 iTerminal; } __attribute__((packed)); /* 4.7.2.8 Feature Unit Descriptor */ struct uac2_feature_unit_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bUnitID; __u8 bSourceID; /* bmaControls is actually u32, * but u8 is needed for the hybrid parser */ __u8 bmaControls[0]; /* variable length */ } __attribute__((packed)); /* 4.9.2 Class-Specific AS Interface Descriptor */ struct uac2_as_header_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bTerminalLink; __u8 bmControls; __u8 bFormatType; __le32 bmFormats; __u8 bNrChannels; __le32 bmChannelConfig; __u8 iChannelNames; } __attribute__((packed)); #define UAC2_FORMAT_TYPE_I_RAW_DATA (1 << 31) /* 4.10.1.2 Class-Specific AS Isochronous Audio Data Endpoint Descriptor */ struct uac2_iso_endpoint_descriptor { __u8 bLength; /* in bytes: 8 */ __u8 bDescriptorType; /* USB_DT_CS_ENDPOINT */ __u8 bDescriptorSubtype; /* EP_GENERAL */ __u8 bmAttributes; __u8 bmControls; __u8 bLockDelayUnits; __le16 wLockDelay; } __attribute__((packed)); #define UAC2_CONTROL_PITCH (3 << 0) #define UAC2_CONTROL_DATA_OVERRUN (3 << 2) #define UAC2_CONTROL_DATA_UNDERRUN (3 << 4) /* 5.2.5.4.2 Connector Control Parameter Block */ struct uac2_connectors_ctl_blk { __u8 bNrChannels; __le32 bmChannelConfig; __u8 iChannelNames; } __attribute__((packed)); /* 6.1 Interrupt Data Message */ #define UAC2_INTERRUPT_DATA_MSG_VENDOR (1 << 0) #define UAC2_INTERRUPT_DATA_MSG_EP (1 << 1) struct uac2_interrupt_data_msg { __u8 bInfo; __u8 bAttribute; __le16 wValue; __le16 wIndex; } __attribute__((packed)); /* A.7 Audio Function Category Codes */ #define UAC2_FUNCTION_SUBCLASS_UNDEFINED 0x00 #define UAC2_FUNCTION_DESKTOP_SPEAKER 0x01 #define UAC2_FUNCTION_HOME_THEATER 0x02 #define UAC2_FUNCTION_MICROPHONE 0x03 #define UAC2_FUNCTION_HEADSET 0x04 #define UAC2_FUNCTION_TELEPHONE 0x05 #define UAC2_FUNCTION_CONVERTER 0x06 #define UAC2_FUNCTION_SOUND_RECORDER 0x07 #define UAC2_FUNCTION_IO_BOX 0x08 #define UAC2_FUNCTION_MUSICAL_INSTRUMENT 0x09 #define UAC2_FUNCTION_PRO_AUDIO 0x0a #define UAC2_FUNCTION_AUDIO_VIDEO 0x0b #define UAC2_FUNCTION_CONTROL_PANEL 0x0c #define UAC2_FUNCTION_OTHER 0xff /* A.9 Audio Class-Specific AC Interface Descriptor Subtypes */ /* see audio.h for the rest, which is identical to v1 */ #define UAC2_EFFECT_UNIT 0x07 #define UAC2_PROCESSING_UNIT_V2 0x08 #define UAC2_EXTENSION_UNIT_V2 0x09 #define UAC2_CLOCK_SOURCE 0x0a #define UAC2_CLOCK_SELECTOR 0x0b #define UAC2_CLOCK_MULTIPLIER 0x0c #define UAC2_SAMPLE_RATE_CONVERTER 0x0d /* A.10 Audio Class-Specific AS Interface Descriptor Subtypes */ /* see audio.h for the rest, which is identical to v1 */ #define UAC2_ENCODER 0x03 #define UAC2_DECODER 0x04 /* A.11 Effect Unit Effect Types */ #define UAC2_EFFECT_UNDEFINED 0x00 #define UAC2_EFFECT_PARAM_EQ 0x01 #define UAC2_EFFECT_REVERB 0x02 #define UAC2_EFFECT_MOD_DELAY 0x03 #define UAC2_EFFECT_DYN_RANGE_COMP 0x04 /* A.12 Processing Unit Process Types */ #define UAC2_PROCESS_UNDEFINED 0x00 #define UAC2_PROCESS_UP_DOWNMIX 0x01 #define UAC2_PROCESS_DOLBY_PROLOCIC 0x02 #define UAC2_PROCESS_STEREO_EXTENDER 0x03 /* A.14 Audio Class-Specific Request Codes */ #define UAC2_CS_CUR 0x01 #define UAC2_CS_RANGE 0x02 #define UAC2_CS_MEM 0x03 /* A.15 Encoder Type Codes */ #define UAC2_ENCODER_UNDEFINED 0x00 #define UAC2_ENCODER_OTHER 0x01 #define UAC2_ENCODER_MPEG 0x02 #define UAC2_ENCODER_AC3 0x03 #define UAC2_ENCODER_WMA 0x04 #define UAC2_ENCODER_DTS 0x05 /* A.16 Decoder Type Codes */ #define UAC2_DECODER_UNDEFINED 0x00 #define UAC2_DECODER_OTHER 0x01 #define UAC2_DECODER_MPEG 0x02 #define UAC2_DECODER_AC3 0x03 #define UAC2_DECODER_WMA 0x04 #define UAC2_DECODER_DTS 0x05 /* A.17.1 Clock Source Control Selectors */ #define UAC2_CS_UNDEFINED 0x00 #define UAC2_CS_CONTROL_SAM_FREQ 0x01 #define UAC2_CS_CONTROL_CLOCK_VALID 0x02 /* A.17.2 Clock Selector Control Selectors */ #define UAC2_CX_UNDEFINED 0x00 #define UAC2_CX_CLOCK_SELECTOR 0x01 /* A.17.3 Clock Multiplier Control Selectors */ #define UAC2_CM_UNDEFINED 0x00 #define UAC2_CM_NUMERATOR 0x01 #define UAC2_CM_DENOMINTATOR 0x02 /* A.17.4 Terminal Control Selectors */ #define UAC2_TE_UNDEFINED 0x00 #define UAC2_TE_COPY_PROTECT 0x01 #define UAC2_TE_CONNECTOR 0x02 #define UAC2_TE_OVERLOAD 0x03 #define UAC2_TE_CLUSTER 0x04 #define UAC2_TE_UNDERFLOW 0x05 #define UAC2_TE_OVERFLOW 0x06 #define UAC2_TE_LATENCY 0x07 /* A.17.5 Mixer Control Selectors */ #define UAC2_MU_UNDEFINED 0x00 #define UAC2_MU_MIXER 0x01 #define UAC2_MU_CLUSTER 0x02 #define UAC2_MU_UNDERFLOW 0x03 #define UAC2_MU_OVERFLOW 0x04 #define UAC2_MU_LATENCY 0x05 /* A.17.6 Selector Control Selectors */ #define UAC2_SU_UNDEFINED 0x00 #define UAC2_SU_SELECTOR 0x01 #define UAC2_SU_LATENCY 0x02 /* A.17.7 Feature Unit Control Selectors */ /* see audio.h for the rest, which is identical to v1 */ #define UAC2_FU_INPUT_GAIN 0x0b #define UAC2_FU_INPUT_GAIN_PAD 0x0c #define UAC2_FU_PHASE_INVERTER 0x0d #define UAC2_FU_UNDERFLOW 0x0e #define UAC2_FU_OVERFLOW 0x0f #define UAC2_FU_LATENCY 0x10 /* A.17.8.1 Parametric Equalizer Section Effect Unit Control Selectors */ #define UAC2_PE_UNDEFINED 0x00 #define UAC2_PE_ENABLE 0x01 #define UAC2_PE_CENTERFREQ 0x02 #define UAC2_PE_QFACTOR 0x03 #define UAC2_PE_GAIN 0x04 #define UAC2_PE_UNDERFLOW 0x05 #define UAC2_PE_OVERFLOW 0x06 #define UAC2_PE_LATENCY 0x07 /* A.17.8.2 Reverberation Effect Unit Control Selectors */ #define UAC2_RV_UNDEFINED 0x00 #define UAC2_RV_ENABLE 0x01 #define UAC2_RV_TYPE 0x02 #define UAC2_RV_LEVEL 0x03 #define UAC2_RV_TIME 0x04 #define UAC2_RV_FEEDBACK 0x05 #define UAC2_RV_PREDELAY 0x06 #define UAC2_RV_DENSITY 0x07 #define UAC2_RV_HIFREQ_ROLLOFF 0x08 #define UAC2_RV_UNDERFLOW 0x09 #define UAC2_RV_OVERFLOW 0x0a #define UAC2_RV_LATENCY 0x0b /* A.17.8.3 Modulation Delay Effect Control Selectors */ #define UAC2_MD_UNDEFINED 0x00 #define UAC2_MD_ENABLE 0x01 #define UAC2_MD_BALANCE 0x02 #define UAC2_MD_RATE 0x03 #define UAC2_MD_DEPTH 0x04 #define UAC2_MD_TIME 0x05 #define UAC2_MD_FEEDBACK 0x06 #define UAC2_MD_UNDERFLOW 0x07 #define UAC2_MD_OVERFLOW 0x08 #define UAC2_MD_LATENCY 0x09 /* A.17.8.4 Dynamic Range Compressor Effect Unit Control Selectors */ #define UAC2_DR_UNDEFINED 0x00 #define UAC2_DR_ENABLE 0x01 #define UAC2_DR_COMPRESSION_RATE 0x02 #define UAC2_DR_MAXAMPL 0x03 #define UAC2_DR_THRESHOLD 0x04 #define UAC2_DR_ATTACK_TIME 0x05 #define UAC2_DR_RELEASE_TIME 0x06 #define UAC2_DR_UNDEFLOW 0x07 #define UAC2_DR_OVERFLOW 0x08 #define UAC2_DR_LATENCY 0x09 /* A.17.9.1 Up/Down-mix Processing Unit Control Selectors */ #define UAC2_UD_UNDEFINED 0x00 #define UAC2_UD_ENABLE 0x01 #define UAC2_UD_MODE_SELECT 0x02 #define UAC2_UD_CLUSTER 0x03 #define UAC2_UD_UNDERFLOW 0x04 #define UAC2_UD_OVERFLOW 0x05 #define UAC2_UD_LATENCY 0x06 /* A.17.9.2 Dolby Prologic[tm] Processing Unit Control Selectors */ #define UAC2_DP_UNDEFINED 0x00 #define UAC2_DP_ENABLE 0x01 #define UAC2_DP_MODE_SELECT 0x02 #define UAC2_DP_CLUSTER 0x03 #define UAC2_DP_UNDERFFLOW 0x04 #define UAC2_DP_OVERFLOW 0x05 #define UAC2_DP_LATENCY 0x06 /* A.17.9.3 Stereo Expander Processing Unit Control Selectors */ #define UAC2_ST_EXT_UNDEFINED 0x00 #define UAC2_ST_EXT_ENABLE 0x01 #define UAC2_ST_EXT_WIDTH 0x02 #define UAC2_ST_EXT_UNDEFLOW 0x03 #define UAC2_ST_EXT_OVERFLOW 0x04 #define UAC2_ST_EXT_LATENCY 0x05 /* A.17.10 Extension Unit Control Selectors */ #define UAC2_XU_UNDEFINED 0x00 #define UAC2_XU_ENABLE 0x01 #define UAC2_XU_CLUSTER 0x02 #define UAC2_XU_UNDERFLOW 0x03 #define UAC2_XU_OVERFLOW 0x04 #define UAC2_XU_LATENCY 0x05 /* A.17.11 AudioStreaming Interface Control Selectors */ #define UAC2_AS_UNDEFINED 0x00 #define UAC2_AS_ACT_ALT_SETTING 0x01 #define UAC2_AS_VAL_ALT_SETTINGS 0x02 #define UAC2_AS_AUDIO_DATA_FORMAT 0x03 /* A.17.12 Encoder Control Selectors */ #define UAC2_EN_UNDEFINED 0x00 #define UAC2_EN_BIT_RATE 0x01 #define UAC2_EN_QUALITY 0x02 #define UAC2_EN_VBR 0x03 #define UAC2_EN_TYPE 0x04 #define UAC2_EN_UNDERFLOW 0x05 #define UAC2_EN_OVERFLOW 0x06 #define UAC2_EN_ENCODER_ERROR 0x07 #define UAC2_EN_PARAM1 0x08 #define UAC2_EN_PARAM2 0x09 #define UAC2_EN_PARAM3 0x0a #define UAC2_EN_PARAM4 0x0b #define UAC2_EN_PARAM5 0x0c #define UAC2_EN_PARAM6 0x0d #define UAC2_EN_PARAM7 0x0e #define UAC2_EN_PARAM8 0x0f /* A.17.13.1 MPEG Decoder Control Selectors */ #define UAC2_MPEG_UNDEFINED 0x00 #define UAC2_MPEG_DUAL_CHANNEL 0x01 #define UAC2_MPEG_SECOND_STEREO 0x02 #define UAC2_MPEG_MULTILINGUAL 0x03 #define UAC2_MPEG_DYN_RANGE 0x04 #define UAC2_MPEG_SCALING 0x05 #define UAC2_MPEG_HILO_SCALING 0x06 #define UAC2_MPEG_UNDERFLOW 0x07 #define UAC2_MPEG_OVERFLOW 0x08 #define UAC2_MPEG_DECODER_ERROR 0x09 /* A17.13.2 AC3 Decoder Control Selectors */ #define UAC2_AC3_UNDEFINED 0x00 #define UAC2_AC3_MODE 0x01 #define UAC2_AC3_DYN_RANGE 0x02 #define UAC2_AC3_SCALING 0x03 #define UAC2_AC3_HILO_SCALING 0x04 #define UAC2_AC3_UNDERFLOW 0x05 #define UAC2_AC3_OVERFLOW 0x06 #define UAC2_AC3_DECODER_ERROR 0x07 /* A17.13.3 WMA Decoder Control Selectors */ #define UAC2_WMA_UNDEFINED 0x00 #define UAC2_WMA_UNDERFLOW 0x01 #define UAC2_WMA_OVERFLOW 0x02 #define UAC2_WMA_DECODER_ERROR 0x03 /* A17.13.4 DTS Decoder Control Selectors */ #define UAC2_DTS_UNDEFINED 0x00 #define UAC2_DTS_UNDERFLOW 0x01 #define UAC2_DTS_OVERFLOW 0x02 #define UAC2_DTS_DECODER_ERROR 0x03 /* A17.14 Endpoint Control Selectors */ #define UAC2_EP_CS_UNDEFINED 0x00 #define UAC2_EP_CS_PITCH 0x01 #define UAC2_EP_CS_DATA_OVERRUN 0x02 #define UAC2_EP_CS_DATA_UNDERRUN 0x03 #endif /* __LINUX_USB_AUDIO_V2_H */ usb/serial.h 0000644 00000042030 14722070374 0006772 0 ustar 00 // SPDX-License-Identifier: GPL-2.0 /* * USB Serial Converter stuff * * Copyright (C) 1999 - 2012 * Greg Kroah-Hartman (greg@kroah.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * */ #ifndef __LINUX_USB_SERIAL_H #define __LINUX_USB_SERIAL_H #include <linux/kref.h> #include <linux/mutex.h> #include <linux/serial.h> #include <linux/sysrq.h> #include <linux/kfifo.h> /* The maximum number of ports one device can grab at once */ #define MAX_NUM_PORTS 16 /* USB serial flags */ #define USB_SERIAL_WRITE_BUSY 0 #define USB_SERIAL_THROTTLED 1 /** * usb_serial_port: structure for the specific ports of a device. * @serial: pointer back to the struct usb_serial owner of this port. * @port: pointer to the corresponding tty_port for this port. * @lock: spinlock to grab when updating portions of this structure. * @minor: the minor number of the port * @port_number: the struct usb_serial port number of this port (starts at 0) * @interrupt_in_buffer: pointer to the interrupt in buffer for this port. * @interrupt_in_urb: pointer to the interrupt in struct urb for this port. * @interrupt_in_endpointAddress: endpoint address for the interrupt in pipe * for this port. * @interrupt_out_buffer: pointer to the interrupt out buffer for this port. * @interrupt_out_size: the size of the interrupt_out_buffer, in bytes. * @interrupt_out_urb: pointer to the interrupt out struct urb for this port. * @interrupt_out_endpointAddress: endpoint address for the interrupt out pipe * for this port. * @bulk_in_buffer: pointer to the bulk in buffer for this port. * @bulk_in_size: the size of the bulk_in_buffer, in bytes. * @read_urb: pointer to the bulk in struct urb for this port. * @bulk_in_endpointAddress: endpoint address for the bulk in pipe for this * port. * @bulk_in_buffers: pointers to the bulk in buffers for this port * @read_urbs: pointers to the bulk in urbs for this port * @read_urbs_free: status bitmap the for bulk in urbs * @bulk_out_buffer: pointer to the bulk out buffer for this port. * @bulk_out_size: the size of the bulk_out_buffer, in bytes. * @write_urb: pointer to the bulk out struct urb for this port. * @write_fifo: kfifo used to buffer outgoing data * @bulk_out_buffers: pointers to the bulk out buffers for this port * @write_urbs: pointers to the bulk out urbs for this port * @write_urbs_free: status bitmap the for bulk out urbs * @icount: interrupt counters * @tx_bytes: number of bytes currently in host stack queues * @bulk_out_endpointAddress: endpoint address for the bulk out pipe for this * port. * @flags: usb serial port flags * @write_wait: a wait_queue_head_t used by the port. * @work: work queue entry for the line discipline waking up. * @dev: pointer to the serial device * * This structure is used by the usb-serial core and drivers for the specific * ports of a device. */ struct usb_serial_port { struct usb_serial *serial; struct tty_port port; spinlock_t lock; u32 minor; u8 port_number; unsigned char *interrupt_in_buffer; struct urb *interrupt_in_urb; __u8 interrupt_in_endpointAddress; unsigned char *interrupt_out_buffer; int interrupt_out_size; struct urb *interrupt_out_urb; __u8 interrupt_out_endpointAddress; unsigned char *bulk_in_buffer; int bulk_in_size; struct urb *read_urb; __u8 bulk_in_endpointAddress; unsigned char *bulk_in_buffers[2]; struct urb *read_urbs[2]; unsigned long read_urbs_free; unsigned char *bulk_out_buffer; int bulk_out_size; struct urb *write_urb; struct kfifo write_fifo; unsigned char *bulk_out_buffers[2]; struct urb *write_urbs[2]; unsigned long write_urbs_free; __u8 bulk_out_endpointAddress; struct async_icount icount; int tx_bytes; unsigned long flags; wait_queue_head_t write_wait; struct work_struct work; unsigned long sysrq; /* sysrq timeout */ struct device dev; }; #define to_usb_serial_port(d) container_of(d, struct usb_serial_port, dev) /* get and set the port private data pointer helper functions */ static inline void *usb_get_serial_port_data(struct usb_serial_port *port) { return dev_get_drvdata(&port->dev); } static inline void usb_set_serial_port_data(struct usb_serial_port *port, void *data) { dev_set_drvdata(&port->dev, data); } /** * usb_serial - structure used by the usb-serial core for a device * @dev: pointer to the struct usb_device for this device * @type: pointer to the struct usb_serial_driver for this device * @interface: pointer to the struct usb_interface for this device * @num_ports: the number of ports this device has * @num_interrupt_in: number of interrupt in endpoints we have * @num_interrupt_out: number of interrupt out endpoints we have * @num_bulk_in: number of bulk in endpoints we have * @num_bulk_out: number of bulk out endpoints we have * @port: array of struct usb_serial_port structures for the different ports. * @private: place to put any driver specific information that is needed. The * usb-serial driver is required to manage this data, the usb-serial core * will not touch this. Use usb_get_serial_data() and * usb_set_serial_data() to access this. */ struct usb_serial { struct usb_device *dev; struct usb_serial_driver *type; struct usb_interface *interface; unsigned char disconnected:1; unsigned char suspending:1; unsigned char attached:1; unsigned char minors_reserved:1; unsigned char num_ports; unsigned char num_port_pointers; unsigned char num_interrupt_in; unsigned char num_interrupt_out; unsigned char num_bulk_in; unsigned char num_bulk_out; struct usb_serial_port *port[MAX_NUM_PORTS]; struct kref kref; struct mutex disc_mutex; void *private; }; #define to_usb_serial(d) container_of(d, struct usb_serial, kref) /* get and set the serial private data pointer helper functions */ static inline void *usb_get_serial_data(struct usb_serial *serial) { return serial->private; } static inline void usb_set_serial_data(struct usb_serial *serial, void *data) { serial->private = data; } struct usb_serial_endpoints { unsigned char num_bulk_in; unsigned char num_bulk_out; unsigned char num_interrupt_in; unsigned char num_interrupt_out; struct usb_endpoint_descriptor *bulk_in[MAX_NUM_PORTS]; struct usb_endpoint_descriptor *bulk_out[MAX_NUM_PORTS]; struct usb_endpoint_descriptor *interrupt_in[MAX_NUM_PORTS]; struct usb_endpoint_descriptor *interrupt_out[MAX_NUM_PORTS]; }; /** * usb_serial_driver - describes a usb serial driver * @description: pointer to a string that describes this driver. This string * used in the syslog messages when a device is inserted or removed. * @id_table: pointer to a list of usb_device_id structures that define all * of the devices this structure can support. * @num_ports: the number of different ports this device will have. * @num_bulk_in: minimum number of bulk-in endpoints * @num_bulk_out: minimum number of bulk-out endpoints * @num_interrupt_in: minimum number of interrupt-in endpoints * @num_interrupt_out: minimum number of interrupt-out endpoints * @bulk_in_size: minimum number of bytes to allocate for bulk-in buffer * (0 = end-point size) * @bulk_out_size: bytes to allocate for bulk-out buffer (0 = end-point size) * @calc_num_ports: pointer to a function to determine how many ports this * device has dynamically. It can also be used to verify the number of * endpoints or to modify the port-endpoint mapping. It will be called * after the probe() callback is called, but before attach(). * @probe: pointer to the driver's probe function. * This will be called when the device is inserted into the system, * but before the device has been fully initialized by the usb_serial * subsystem. Use this function to download any firmware to the device, * or any other early initialization that might be needed. * Return 0 to continue on with the initialization sequence. Anything * else will abort it. * @attach: pointer to the driver's attach function. * This will be called when the struct usb_serial structure is fully set * set up. Do any local initialization of the device, or any private * memory structure allocation at this point in time. * @disconnect: pointer to the driver's disconnect function. This will be * called when the device is unplugged or unbound from the driver. * @release: pointer to the driver's release function. This will be called * when the usb_serial data structure is about to be destroyed. * @usb_driver: pointer to the struct usb_driver that controls this * device. This is necessary to allow dynamic ids to be added to * the driver from sysfs. * * This structure is defines a USB Serial driver. It provides all of * the information that the USB serial core code needs. If the function * pointers are defined, then the USB serial core code will call them when * the corresponding tty port functions are called. If they are not * called, the generic serial function will be used instead. * * The driver.owner field should be set to the module owner of this driver. * The driver.name field should be set to the name of this driver (remember * it will show up in sysfs, so it needs to be short and to the point. * Using the module name is a good idea.) */ struct usb_serial_driver { const char *description; const struct usb_device_id *id_table; struct list_head driver_list; struct device_driver driver; struct usb_driver *usb_driver; struct usb_dynids dynids; unsigned char num_ports; unsigned char num_bulk_in; unsigned char num_bulk_out; unsigned char num_interrupt_in; unsigned char num_interrupt_out; size_t bulk_in_size; size_t bulk_out_size; int (*probe)(struct usb_serial *serial, const struct usb_device_id *id); int (*attach)(struct usb_serial *serial); int (*calc_num_ports)(struct usb_serial *serial, struct usb_serial_endpoints *epds); void (*disconnect)(struct usb_serial *serial); void (*release)(struct usb_serial *serial); int (*port_probe)(struct usb_serial_port *port); int (*port_remove)(struct usb_serial_port *port); int (*suspend)(struct usb_serial *serial, pm_message_t message); int (*resume)(struct usb_serial *serial); int (*reset_resume)(struct usb_serial *serial); /* serial function calls */ /* Called by console and by the tty layer */ int (*open)(struct tty_struct *tty, struct usb_serial_port *port); void (*close)(struct usb_serial_port *port); int (*write)(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count); /* Called only by the tty layer */ int (*write_room)(struct tty_struct *tty); int (*ioctl)(struct tty_struct *tty, unsigned int cmd, unsigned long arg); int (*get_serial)(struct tty_struct *tty, struct serial_struct *ss); int (*set_serial)(struct tty_struct *tty, struct serial_struct *ss); void (*set_termios)(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old); void (*break_ctl)(struct tty_struct *tty, int break_state); int (*chars_in_buffer)(struct tty_struct *tty); void (*wait_until_sent)(struct tty_struct *tty, long timeout); bool (*tx_empty)(struct usb_serial_port *port); void (*throttle)(struct tty_struct *tty); void (*unthrottle)(struct tty_struct *tty); int (*tiocmget)(struct tty_struct *tty); int (*tiocmset)(struct tty_struct *tty, unsigned int set, unsigned int clear); int (*tiocmiwait)(struct tty_struct *tty, unsigned long arg); int (*get_icount)(struct tty_struct *tty, struct serial_icounter_struct *icount); /* Called by the tty layer for port level work. There may or may not be an attached tty at this point */ void (*dtr_rts)(struct usb_serial_port *port, int on); int (*carrier_raised)(struct usb_serial_port *port); /* Called by the usb serial hooks to allow the user to rework the termios state */ void (*init_termios)(struct tty_struct *tty); /* USB events */ void (*read_int_callback)(struct urb *urb); void (*write_int_callback)(struct urb *urb); void (*read_bulk_callback)(struct urb *urb); void (*write_bulk_callback)(struct urb *urb); /* Called by the generic read bulk callback */ void (*process_read_urb)(struct urb *urb); /* Called by the generic write implementation */ int (*prepare_write_buffer)(struct usb_serial_port *port, void *dest, size_t size); }; #define to_usb_serial_driver(d) \ container_of(d, struct usb_serial_driver, driver) extern int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[], const char *name, const struct usb_device_id *id_table); extern void usb_serial_deregister_drivers(struct usb_serial_driver *const serial_drivers[]); extern void usb_serial_port_softint(struct usb_serial_port *port); extern int usb_serial_suspend(struct usb_interface *intf, pm_message_t message); extern int usb_serial_resume(struct usb_interface *intf); /* USB Serial console functions */ #ifdef CONFIG_USB_SERIAL_CONSOLE extern void usb_serial_console_init(int minor); extern void usb_serial_console_exit(void); extern void usb_serial_console_disconnect(struct usb_serial *serial); #else static inline void usb_serial_console_init(int minor) { } static inline void usb_serial_console_exit(void) { } static inline void usb_serial_console_disconnect(struct usb_serial *serial) {} #endif /* Functions needed by other parts of the usbserial core */ extern struct usb_serial_port *usb_serial_port_get_by_minor(unsigned int minor); extern void usb_serial_put(struct usb_serial *serial); extern int usb_serial_generic_open(struct tty_struct *tty, struct usb_serial_port *port); extern int usb_serial_generic_write_start(struct usb_serial_port *port, gfp_t mem_flags); extern int usb_serial_generic_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count); extern void usb_serial_generic_close(struct usb_serial_port *port); extern int usb_serial_generic_resume(struct usb_serial *serial); extern int usb_serial_generic_write_room(struct tty_struct *tty); extern int usb_serial_generic_chars_in_buffer(struct tty_struct *tty); extern void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout); extern void usb_serial_generic_read_bulk_callback(struct urb *urb); extern void usb_serial_generic_write_bulk_callback(struct urb *urb); extern void usb_serial_generic_throttle(struct tty_struct *tty); extern void usb_serial_generic_unthrottle(struct tty_struct *tty); extern int usb_serial_generic_tiocmiwait(struct tty_struct *tty, unsigned long arg); extern int usb_serial_generic_get_icount(struct tty_struct *tty, struct serial_icounter_struct *icount); extern int usb_serial_generic_register(void); extern void usb_serial_generic_deregister(void); extern int usb_serial_generic_submit_read_urbs(struct usb_serial_port *port, gfp_t mem_flags); extern void usb_serial_generic_process_read_urb(struct urb *urb); extern int usb_serial_generic_prepare_write_buffer(struct usb_serial_port *port, void *dest, size_t size); extern int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch); extern int usb_serial_handle_break(struct usb_serial_port *port); extern void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port, struct tty_struct *tty, unsigned int status); extern int usb_serial_bus_register(struct usb_serial_driver *device); extern void usb_serial_bus_deregister(struct usb_serial_driver *device); extern struct bus_type usb_serial_bus_type; extern struct tty_driver *usb_serial_tty_driver; static inline void usb_serial_debug_data(struct device *dev, const char *function, int size, const unsigned char *data) { dev_dbg(dev, "%s - length = %d, data = %*ph\n", function, size, size, data); } /* * Macro for reporting errors in write path to avoid inifinite loop * when port is used as a console. */ #define dev_err_console(usport, fmt, ...) \ do { \ static bool __print_once; \ struct usb_serial_port *__port = (usport); \ \ if (!__port->port.console || !__print_once) { \ __print_once = true; \ dev_err(&__port->dev, fmt, ##__VA_ARGS__); \ } \ } while (0) /* * module_usb_serial_driver() - Helper macro for registering a USB Serial driver * @__serial_drivers: list of usb_serial drivers to register * @__ids: all device ids that @__serial_drivers bind to * * Helper macro for USB serial drivers which do not do anything special * in module init/exit. This eliminates a lot of boilerplate. Each * module may only use this macro once, and calling it replaces * module_init() and module_exit() * */ #define usb_serial_module_driver(__name, __serial_drivers, __ids) \ static int __init usb_serial_module_init(void) \ { \ return usb_serial_register_drivers(__serial_drivers, \ __name, __ids); \ } \ module_init(usb_serial_module_init); \ static void __exit usb_serial_module_exit(void) \ { \ usb_serial_deregister_drivers(__serial_drivers); \ } \ module_exit(usb_serial_module_exit); #define module_usb_serial_driver(__serial_drivers, __ids) \ usb_serial_module_driver(KBUILD_MODNAME, __serial_drivers, __ids) #endif /* __LINUX_USB_SERIAL_H */ usb/composite.h 0000644 00000062253 14722070374 0007526 0 ustar 00 // SPDX-License-Identifier: GPL-2.0+ /* * composite.h -- framework for usb gadgets which are composite devices * * Copyright (C) 2006-2008 David Brownell * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef __LINUX_USB_COMPOSITE_H #define __LINUX_USB_COMPOSITE_H /* * This framework is an optional layer on top of the USB Gadget interface, * making it easier to build (a) Composite devices, supporting multiple * functions within any single configuration, and (b) Multi-configuration * devices, also supporting multiple functions but without necessarily * having more than one function per configuration. * * Example: a device with a single configuration supporting both network * link and mass storage functions is a composite device. Those functions * might alternatively be packaged in individual configurations, but in * the composite model the host can use both functions at the same time. */ #include <linux/bcd.h> #include <linux/version.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> #include <linux/log2.h> #include <linux/configfs.h> /* * USB function drivers should return USB_GADGET_DELAYED_STATUS if they * wish to delay the data/status stages of the control transfer till they * are ready. The control transfer will then be kept from completing till * all the function drivers that requested for USB_GADGET_DELAYED_STAUS * invoke usb_composite_setup_continue(). */ #define USB_GADGET_DELAYED_STATUS 0x7fff /* Impossibly large value */ /* big enough to hold our biggest descriptor */ #define USB_COMP_EP0_BUFSIZ 4096 /* OS feature descriptor length <= 4kB */ #define USB_COMP_EP0_OS_DESC_BUFSIZ 4096 #define USB_MS_TO_HS_INTERVAL(x) (ilog2((x * 1000 / 125)) + 1) struct usb_configuration; /** * struct usb_os_desc_ext_prop - describes one "Extended Property" * @entry: used to keep a list of extended properties * @type: Extended Property type * @name_len: Extended Property unicode name length, including terminating '\0' * @name: Extended Property name * @data_len: Length of Extended Property blob (for unicode store double len) * @data: Extended Property blob * @item: Represents this Extended Property in configfs */ struct usb_os_desc_ext_prop { struct list_head entry; u8 type; int name_len; char *name; int data_len; char *data; struct config_item item; }; /** * struct usb_os_desc - describes OS descriptors associated with one interface * @ext_compat_id: 16 bytes of "Compatible ID" and "Subcompatible ID" * @ext_prop: Extended Properties list * @ext_prop_len: Total length of Extended Properties blobs * @ext_prop_count: Number of Extended Properties * @opts_mutex: Optional mutex protecting config data of a usb_function_instance * @group: Represents OS descriptors associated with an interface in configfs * @owner: Module associated with this OS descriptor */ struct usb_os_desc { char *ext_compat_id; struct list_head ext_prop; int ext_prop_len; int ext_prop_count; struct mutex *opts_mutex; struct config_group group; struct module *owner; }; /** * struct usb_os_desc_table - describes OS descriptors associated with one * interface of a usb_function * @if_id: Interface id * @os_desc: "Extended Compatibility ID" and "Extended Properties" of the * interface * * Each interface can have at most one "Extended Compatibility ID" and a * number of "Extended Properties". */ struct usb_os_desc_table { int if_id; struct usb_os_desc *os_desc; }; /** * struct usb_function - describes one function of a configuration * @name: For diagnostics, identifies the function. * @strings: tables of strings, keyed by identifiers assigned during bind() * and by language IDs provided in control requests * @fs_descriptors: Table of full (or low) speed descriptors, using interface and * string identifiers assigned during @bind(). If this pointer is null, * the function will not be available at full speed (or at low speed). * @hs_descriptors: Table of high speed descriptors, using interface and * string identifiers assigned during @bind(). If this pointer is null, * the function will not be available at high speed. * @ss_descriptors: Table of super speed descriptors, using interface and * string identifiers assigned during @bind(). If this * pointer is null after initiation, the function will not * be available at super speed. * @ssp_descriptors: Table of super speed plus descriptors, using * interface and string identifiers assigned during @bind(). If * this pointer is null after initiation, the function will not * be available at super speed plus. * @config: assigned when @usb_add_function() is called; this is the * configuration with which this function is associated. * @os_desc_table: Table of (interface id, os descriptors) pairs. The function * can expose more than one interface. If an interface is a member of * an IAD, only the first interface of IAD has its entry in the table. * @os_desc_n: Number of entries in os_desc_table * @bind: Before the gadget can register, all of its functions bind() to the * available resources including string and interface identifiers used * in interface or class descriptors; endpoints; I/O buffers; and so on. * @unbind: Reverses @bind; called as a side effect of unregistering the * driver which added this function. * @free_func: free the struct usb_function. * @mod: (internal) points to the module that created this structure. * @set_alt: (REQUIRED) Reconfigures altsettings; function drivers may * initialize usb_ep.driver data at this time (when it is used). * Note that setting an interface to its current altsetting resets * interface state, and that all interfaces have a disabled state. * @get_alt: Returns the active altsetting. If this is not provided, * then only altsetting zero is supported. * @disable: (REQUIRED) Indicates the function should be disabled. Reasons * include host resetting or reconfiguring the gadget, and disconnection. * @setup: Used for interface-specific control requests. * @req_match: Tests if a given class request can be handled by this function. * @suspend: Notifies functions when the host stops sending USB traffic. * @resume: Notifies functions when the host restarts USB traffic. * @get_status: Returns function status as a reply to * GetStatus() request when the recipient is Interface. * @func_suspend: callback to be called when * SetFeature(FUNCTION_SUSPEND) is reseived * * A single USB function uses one or more interfaces, and should in most * cases support operation at both full and high speeds. Each function is * associated by @usb_add_function() with a one configuration; that function * causes @bind() to be called so resources can be allocated as part of * setting up a gadget driver. Those resources include endpoints, which * should be allocated using @usb_ep_autoconfig(). * * To support dual speed operation, a function driver provides descriptors * for both high and full speed operation. Except in rare cases that don't * involve bulk endpoints, each speed needs different endpoint descriptors. * * Function drivers choose their own strategies for managing instance data. * The simplest strategy just declares it "static', which means the function * can only be activated once. If the function needs to be exposed in more * than one configuration at a given speed, it needs to support multiple * usb_function structures (one for each configuration). * * A more complex strategy might encapsulate a @usb_function structure inside * a driver-specific instance structure to allows multiple activations. An * example of multiple activations might be a CDC ACM function that supports * two or more distinct instances within the same configuration, providing * several independent logical data links to a USB host. */ struct usb_function { const char *name; struct usb_gadget_strings **strings; struct usb_descriptor_header **fs_descriptors; struct usb_descriptor_header **hs_descriptors; struct usb_descriptor_header **ss_descriptors; struct usb_descriptor_header **ssp_descriptors; struct usb_configuration *config; struct usb_os_desc_table *os_desc_table; unsigned os_desc_n; /* REVISIT: bind() functions can be marked __init, which * makes trouble for section mismatch analysis. See if * we can't restructure things to avoid mismatching. * Related: unbind() may kfree() but bind() won't... */ /* configuration management: bind/unbind */ int (*bind)(struct usb_configuration *, struct usb_function *); void (*unbind)(struct usb_configuration *, struct usb_function *); void (*free_func)(struct usb_function *f); struct module *mod; /* runtime state management */ int (*set_alt)(struct usb_function *, unsigned interface, unsigned alt); int (*get_alt)(struct usb_function *, unsigned interface); void (*disable)(struct usb_function *); int (*setup)(struct usb_function *, const struct usb_ctrlrequest *); bool (*req_match)(struct usb_function *, const struct usb_ctrlrequest *, bool config0); void (*suspend)(struct usb_function *); void (*resume)(struct usb_function *); /* USB 3.0 additions */ int (*get_status)(struct usb_function *); int (*func_suspend)(struct usb_function *, u8 suspend_opt); /* private: */ /* internals */ struct list_head list; DECLARE_BITMAP(endpoints, 32); const struct usb_function_instance *fi; unsigned int bind_deactivated:1; }; int usb_add_function(struct usb_configuration *, struct usb_function *); int usb_function_deactivate(struct usb_function *); int usb_function_activate(struct usb_function *); int usb_interface_id(struct usb_configuration *, struct usb_function *); int config_ep_by_speed_and_alt(struct usb_gadget *g, struct usb_function *f, struct usb_ep *_ep, u8 alt); int config_ep_by_speed(struct usb_gadget *g, struct usb_function *f, struct usb_ep *_ep); #define MAX_CONFIG_INTERFACES 16 /* arbitrary; max 255 */ /** * struct usb_configuration - represents one gadget configuration * @label: For diagnostics, describes the configuration. * @strings: Tables of strings, keyed by identifiers assigned during @bind() * and by language IDs provided in control requests. * @descriptors: Table of descriptors preceding all function descriptors. * Examples include OTG and vendor-specific descriptors. * @unbind: Reverses @bind; called as a side effect of unregistering the * driver which added this configuration. * @setup: Used to delegate control requests that aren't handled by standard * device infrastructure or directed at a specific interface. * @bConfigurationValue: Copied into configuration descriptor. * @iConfiguration: Copied into configuration descriptor. * @bmAttributes: Copied into configuration descriptor. * @MaxPower: Power consumtion in mA. Used to compute bMaxPower in the * configuration descriptor after considering the bus speed. * @cdev: assigned by @usb_add_config() before calling @bind(); this is * the device associated with this configuration. * * Configurations are building blocks for gadget drivers structured around * function drivers. Simple USB gadgets require only one function and one * configuration, and handle dual-speed hardware by always providing the same * functionality. Slightly more complex gadgets may have more than one * single-function configuration at a given speed; or have configurations * that only work at one speed. * * Composite devices are, by definition, ones with configurations which * include more than one function. * * The lifecycle of a usb_configuration includes allocation, initialization * of the fields described above, and calling @usb_add_config() to set up * internal data and bind it to a specific device. The configuration's * @bind() method is then used to initialize all the functions and then * call @usb_add_function() for them. * * Those functions would normally be independent of each other, but that's * not mandatory. CDC WMC devices are an example where functions often * depend on other functions, with some functions subsidiary to others. * Such interdependency may be managed in any way, so long as all of the * descriptors complete by the time the composite driver returns from * its bind() routine. */ struct usb_configuration { const char *label; struct usb_gadget_strings **strings; const struct usb_descriptor_header **descriptors; /* REVISIT: bind() functions can be marked __init, which * makes trouble for section mismatch analysis. See if * we can't restructure things to avoid mismatching... */ /* configuration management: unbind/setup */ void (*unbind)(struct usb_configuration *); int (*setup)(struct usb_configuration *, const struct usb_ctrlrequest *); /* fields in the config descriptor */ u8 bConfigurationValue; u8 iConfiguration; u8 bmAttributes; u16 MaxPower; struct usb_composite_dev *cdev; /* private: */ /* internals */ struct list_head list; struct list_head functions; u8 next_interface_id; unsigned superspeed:1; unsigned highspeed:1; unsigned fullspeed:1; unsigned superspeed_plus:1; struct usb_function *interface[MAX_CONFIG_INTERFACES]; }; int usb_add_config(struct usb_composite_dev *, struct usb_configuration *, int (*)(struct usb_configuration *)); void usb_remove_config(struct usb_composite_dev *, struct usb_configuration *); /* predefined index for usb_composite_driver */ enum { USB_GADGET_MANUFACTURER_IDX = 0, USB_GADGET_PRODUCT_IDX, USB_GADGET_SERIAL_IDX, USB_GADGET_FIRST_AVAIL_IDX, }; /** * struct usb_composite_driver - groups configurations into a gadget * @name: For diagnostics, identifies the driver. * @dev: Template descriptor for the device, including default device * identifiers. * @strings: tables of strings, keyed by identifiers assigned during @bind * and language IDs provided in control requests. Note: The first entries * are predefined. The first entry that may be used is * USB_GADGET_FIRST_AVAIL_IDX * @max_speed: Highest speed the driver supports. * @needs_serial: set to 1 if the gadget needs userspace to provide * a serial number. If one is not provided, warning will be printed. * @bind: (REQUIRED) Used to allocate resources that are shared across the * whole device, such as string IDs, and add its configurations using * @usb_add_config(). This may fail by returning a negative errno * value; it should return zero on successful initialization. * @unbind: Reverses @bind; called as a side effect of unregistering * this driver. * @disconnect: optional driver disconnect method * @suspend: Notifies when the host stops sending USB traffic, * after function notifications * @resume: Notifies configuration when the host restarts USB traffic, * before function notifications * @gadget_driver: Gadget driver controlling this driver * * Devices default to reporting self powered operation. Devices which rely * on bus powered operation should report this in their @bind method. * * Before returning from @bind, various fields in the template descriptor * may be overridden. These include the idVendor/idProduct/bcdDevice values * normally to bind the appropriate host side driver, and the three strings * (iManufacturer, iProduct, iSerialNumber) normally used to provide user * meaningful device identifiers. (The strings will not be defined unless * they are defined in @dev and @strings.) The correct ep0 maxpacket size * is also reported, as defined by the underlying controller driver. */ struct usb_composite_driver { const char *name; const struct usb_device_descriptor *dev; struct usb_gadget_strings **strings; enum usb_device_speed max_speed; unsigned needs_serial:1; int (*bind)(struct usb_composite_dev *cdev); int (*unbind)(struct usb_composite_dev *); void (*disconnect)(struct usb_composite_dev *); /* global suspend hooks */ void (*suspend)(struct usb_composite_dev *); void (*resume)(struct usb_composite_dev *); struct usb_gadget_driver gadget_driver; }; extern int usb_composite_probe(struct usb_composite_driver *driver); extern void usb_composite_unregister(struct usb_composite_driver *driver); /** * module_usb_composite_driver() - Helper macro for registering a USB gadget * composite driver * @__usb_composite_driver: usb_composite_driver struct * * Helper macro for USB gadget composite drivers which do not do anything * special in module init/exit. This eliminates a lot of boilerplate. Each * module may only use this macro once, and calling it replaces module_init() * and module_exit() */ #define module_usb_composite_driver(__usb_composite_driver) \ module_driver(__usb_composite_driver, usb_composite_probe, \ usb_composite_unregister) extern void usb_composite_setup_continue(struct usb_composite_dev *cdev); extern int composite_dev_prepare(struct usb_composite_driver *composite, struct usb_composite_dev *cdev); extern int composite_os_desc_req_prepare(struct usb_composite_dev *cdev, struct usb_ep *ep0); void composite_dev_cleanup(struct usb_composite_dev *cdev); static inline struct usb_composite_driver *to_cdriver( struct usb_gadget_driver *gdrv) { return container_of(gdrv, struct usb_composite_driver, gadget_driver); } #define OS_STRING_QW_SIGN_LEN 14 #define OS_STRING_IDX 0xEE /** * struct usb_composite_device - represents one composite usb gadget * @gadget: read-only, abstracts the gadget's usb peripheral controller * @req: used for control responses; buffer is pre-allocated * @os_desc_req: used for OS descriptors responses; buffer is pre-allocated * @config: the currently active configuration * @qw_sign: qwSignature part of the OS string * @b_vendor_code: bMS_VendorCode part of the OS string * @use_os_string: false by default, interested gadgets set it * @os_desc_config: the configuration to be used with OS descriptors * @setup_pending: true when setup request is queued but not completed * @os_desc_pending: true when os_desc request is queued but not completed * * One of these devices is allocated and initialized before the * associated device driver's bind() is called. * * OPEN ISSUE: it appears that some WUSB devices will need to be * built by combining a normal (wired) gadget with a wireless one. * This revision of the gadget framework should probably try to make * sure doing that won't hurt too much. * * One notion for how to handle Wireless USB devices involves: * * (a) a second gadget here, discovery mechanism TBD, but likely * needing separate "register/unregister WUSB gadget" calls; * (b) updates to usb_gadget to include flags "is it wireless", * "is it wired", plus (presumably in a wrapper structure) * bandgroup and PHY info; * (c) presumably a wireless_ep wrapping a usb_ep, and reporting * wireless-specific parameters like maxburst and maxsequence; * (d) configurations that are specific to wireless links; * (e) function drivers that understand wireless configs and will * support wireless for (additional) function instances; * (f) a function to support association setup (like CBAF), not * necessarily requiring a wireless adapter; * (g) composite device setup that can create one or more wireless * configs, including appropriate association setup support; * (h) more, TBD. */ struct usb_composite_dev { struct usb_gadget *gadget; struct usb_request *req; struct usb_request *os_desc_req; struct usb_configuration *config; /* OS String is a custom (yet popular) extension to the USB standard. */ u8 qw_sign[OS_STRING_QW_SIGN_LEN]; u8 b_vendor_code; struct usb_configuration *os_desc_config; unsigned int use_os_string:1; /* private: */ /* internals */ unsigned int suspended:1; struct usb_device_descriptor desc; struct list_head configs; struct list_head gstrings; struct usb_composite_driver *driver; u8 next_string_id; char *def_manufacturer; /* the gadget driver won't enable the data pullup * while the deactivation count is nonzero. */ unsigned deactivations; /* the composite driver won't complete the control transfer's * data/status stages till delayed_status is zero. */ int delayed_status; /* protects deactivations and delayed_status counts*/ spinlock_t lock; /* public: */ unsigned int setup_pending:1; unsigned int os_desc_pending:1; }; extern int usb_string_id(struct usb_composite_dev *c); extern int usb_string_ids_tab(struct usb_composite_dev *c, struct usb_string *str); extern struct usb_string *usb_gstrings_attach(struct usb_composite_dev *cdev, struct usb_gadget_strings **sp, unsigned n_strings); extern int usb_string_ids_n(struct usb_composite_dev *c, unsigned n); extern void composite_disconnect(struct usb_gadget *gadget); extern int composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl); extern void composite_suspend(struct usb_gadget *gadget); extern void composite_resume(struct usb_gadget *gadget); /* * Some systems will need runtime overrides for the product identifiers * published in the device descriptor, either numbers or strings or both. * String parameters are in UTF-8 (superset of ASCII's 7 bit characters). */ struct usb_composite_overwrite { u16 idVendor; u16 idProduct; u16 bcdDevice; char *serial_number; char *manufacturer; char *product; }; #define USB_GADGET_COMPOSITE_OPTIONS() \ static struct usb_composite_overwrite coverwrite; \ \ module_param_named(idVendor, coverwrite.idVendor, ushort, S_IRUGO); \ MODULE_PARM_DESC(idVendor, "USB Vendor ID"); \ \ module_param_named(idProduct, coverwrite.idProduct, ushort, S_IRUGO); \ MODULE_PARM_DESC(idProduct, "USB Product ID"); \ \ module_param_named(bcdDevice, coverwrite.bcdDevice, ushort, S_IRUGO); \ MODULE_PARM_DESC(bcdDevice, "USB Device version (BCD)"); \ \ module_param_named(iSerialNumber, coverwrite.serial_number, charp, \ S_IRUGO); \ MODULE_PARM_DESC(iSerialNumber, "SerialNumber string"); \ \ module_param_named(iManufacturer, coverwrite.manufacturer, charp, \ S_IRUGO); \ MODULE_PARM_DESC(iManufacturer, "USB Manufacturer string"); \ \ module_param_named(iProduct, coverwrite.product, charp, S_IRUGO); \ MODULE_PARM_DESC(iProduct, "USB Product string") void usb_composite_overwrite_options(struct usb_composite_dev *cdev, struct usb_composite_overwrite *covr); static inline u16 get_default_bcdDevice(void) { u16 bcdDevice; bcdDevice = bin2bcd((LINUX_VERSION_CODE >> 16 & 0xff)) << 8; bcdDevice |= bin2bcd((LINUX_VERSION_CODE >> 8 & 0xff)); return bcdDevice; } struct usb_function_driver { const char *name; struct module *mod; struct list_head list; struct usb_function_instance *(*alloc_inst)(void); struct usb_function *(*alloc_func)(struct usb_function_instance *inst); }; struct usb_function_instance { struct config_group group; struct list_head cfs_list; struct usb_function_driver *fd; int (*set_inst_name)(struct usb_function_instance *inst, const char *name); void (*free_func_inst)(struct usb_function_instance *inst); }; void usb_function_unregister(struct usb_function_driver *f); int usb_function_register(struct usb_function_driver *newf); void usb_put_function_instance(struct usb_function_instance *fi); void usb_put_function(struct usb_function *f); struct usb_function_instance *usb_get_function_instance(const char *name); struct usb_function *usb_get_function(struct usb_function_instance *fi); struct usb_configuration *usb_get_config(struct usb_composite_dev *cdev, int val); int usb_add_config_only(struct usb_composite_dev *cdev, struct usb_configuration *config); void usb_remove_function(struct usb_configuration *c, struct usb_function *f); #define DECLARE_USB_FUNCTION(_name, _inst_alloc, _func_alloc) \ static struct usb_function_driver _name ## usb_func = { \ .name = __stringify(_name), \ .mod = THIS_MODULE, \ .alloc_inst = _inst_alloc, \ .alloc_func = _func_alloc, \ }; \ MODULE_ALIAS("usbfunc:"__stringify(_name)); #define DECLARE_USB_FUNCTION_INIT(_name, _inst_alloc, _func_alloc) \ DECLARE_USB_FUNCTION(_name, _inst_alloc, _func_alloc) \ static int __init _name ## mod_init(void) \ { \ return usb_function_register(&_name ## usb_func); \ } \ static void __exit _name ## mod_exit(void) \ { \ usb_function_unregister(&_name ## usb_func); \ } \ module_init(_name ## mod_init); \ module_exit(_name ## mod_exit) /* messaging utils */ #define DBG(d, fmt, args...) \ dev_dbg(&(d)->gadget->dev , fmt , ## args) #define VDBG(d, fmt, args...) \ dev_vdbg(&(d)->gadget->dev , fmt , ## args) #define ERROR(d, fmt, args...) \ dev_err(&(d)->gadget->dev , fmt , ## args) #define WARNING(d, fmt, args...) \ dev_warn(&(d)->gadget->dev , fmt , ## args) #define INFO(d, fmt, args...) \ dev_info(&(d)->gadget->dev , fmt , ## args) #endif /* __LINUX_USB_COMPOSITE_H */ usb/typec.h 0000644 00000015447 14722070374 0006653 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_USB_TYPEC_H #define __LINUX_USB_TYPEC_H #include <linux/types.h> /* USB Type-C Specification releases */ #define USB_TYPEC_REV_1_0 0x100 /* 1.0 */ #define USB_TYPEC_REV_1_1 0x110 /* 1.1 */ #define USB_TYPEC_REV_1_2 0x120 /* 1.2 */ struct typec_partner; struct typec_cable; struct typec_plug; struct typec_port; struct fwnode_handle; struct device; enum typec_port_type { TYPEC_PORT_SRC, TYPEC_PORT_SNK, TYPEC_PORT_DRP, }; enum typec_port_data { TYPEC_PORT_DFP, TYPEC_PORT_UFP, TYPEC_PORT_DRD, }; enum typec_plug_type { USB_PLUG_NONE, USB_PLUG_TYPE_A, USB_PLUG_TYPE_B, USB_PLUG_TYPE_C, USB_PLUG_CAPTIVE, }; enum typec_data_role { TYPEC_DEVICE, TYPEC_HOST, }; enum typec_role { TYPEC_SINK, TYPEC_SOURCE, }; enum typec_pwr_opmode { TYPEC_PWR_MODE_USB, TYPEC_PWR_MODE_1_5A, TYPEC_PWR_MODE_3_0A, TYPEC_PWR_MODE_PD, }; enum typec_accessory { TYPEC_ACCESSORY_NONE, TYPEC_ACCESSORY_AUDIO, TYPEC_ACCESSORY_DEBUG, }; #define TYPEC_MAX_ACCESSORY 3 enum typec_orientation { TYPEC_ORIENTATION_NONE, TYPEC_ORIENTATION_NORMAL, TYPEC_ORIENTATION_REVERSE, }; /* * struct usb_pd_identity - USB Power Delivery identity data * @id_header: ID Header VDO * @cert_stat: Cert Stat VDO * @product: Product VDO * * USB power delivery Discover Identity command response data. * * REVISIT: This is USB Power Delivery specific information, so this structure * probable belongs to USB Power Delivery header file once we have them. */ struct usb_pd_identity { u32 id_header; u32 cert_stat; u32 product; }; int typec_partner_set_identity(struct typec_partner *partner); int typec_cable_set_identity(struct typec_cable *cable); /* * struct typec_altmode_desc - USB Type-C Alternate Mode Descriptor * @svid: Standard or Vendor ID * @mode: Index of the Mode * @vdo: VDO returned by Discover Modes USB PD command * @roles: Only for ports. DRP if the mode is available in both roles * * Description of an Alternate Mode which a connector, cable plug or partner * supports. */ struct typec_altmode_desc { u16 svid; u8 mode; u32 vdo; /* Only used with ports */ enum typec_port_data roles; }; struct typec_altmode *typec_partner_register_altmode(struct typec_partner *partner, const struct typec_altmode_desc *desc); struct typec_altmode *typec_plug_register_altmode(struct typec_plug *plug, const struct typec_altmode_desc *desc); struct typec_altmode *typec_port_register_altmode(struct typec_port *port, const struct typec_altmode_desc *desc); void typec_unregister_altmode(struct typec_altmode *altmode); struct typec_port *typec_altmode2port(struct typec_altmode *alt); void typec_altmode_update_active(struct typec_altmode *alt, bool active); enum typec_plug_index { TYPEC_PLUG_SOP_P, TYPEC_PLUG_SOP_PP, }; /* * struct typec_plug_desc - USB Type-C Cable Plug Descriptor * @index: SOP Prime for the plug connected to DFP and SOP Double Prime for the * plug connected to UFP * * Represents USB Type-C Cable Plug. */ struct typec_plug_desc { enum typec_plug_index index; }; /* * struct typec_cable_desc - USB Type-C Cable Descriptor * @type: The plug type from USB PD Cable VDO * @active: Is the cable active or passive * @identity: Result of Discover Identity command * * Represents USB Type-C Cable attached to USB Type-C port. */ struct typec_cable_desc { enum typec_plug_type type; unsigned int active:1; struct usb_pd_identity *identity; }; /* * struct typec_partner_desc - USB Type-C Partner Descriptor * @usb_pd: USB Power Delivery support * @accessory: Audio, Debug or none. * @identity: Discover Identity command data * * Details about a partner that is attached to USB Type-C port. If @identity * member exists when partner is registered, a directory named "identity" is * created to sysfs for the partner device. */ struct typec_partner_desc { unsigned int usb_pd:1; enum typec_accessory accessory; struct usb_pd_identity *identity; }; /* * struct typec_capability - USB Type-C Port Capabilities * @type: Supported power role of the port * @data: Supported data role of the port * @revision: USB Type-C Specification release. Binary coded decimal * @pd_revision: USB Power Delivery Specification revision if supported * @prefer_role: Initial role preference (DRP ports). * @accessory: Supported Accessory Modes * @sw: Cable plug orientation switch * @mux: Multiplexer switch for Alternate/Accessory Modes * @fwnode: Optional fwnode of the port * @try_role: Set data role preference for DRP port * @dr_set: Set Data Role * @pr_set: Set Power Role * @vconn_set: Set VCONN Role * @port_type_set: Set port type * * Static capabilities of a single USB Type-C port. */ struct typec_capability { enum typec_port_type type; enum typec_port_data data; u16 revision; /* 0120H = "1.2" */ u16 pd_revision; /* 0300H = "3.0" */ int prefer_role; enum typec_accessory accessory[TYPEC_MAX_ACCESSORY]; struct typec_switch *sw; struct typec_mux *mux; struct fwnode_handle *fwnode; int (*try_role)(const struct typec_capability *, int role); int (*dr_set)(const struct typec_capability *, enum typec_data_role); int (*pr_set)(const struct typec_capability *, enum typec_role); int (*vconn_set)(const struct typec_capability *, enum typec_role); int (*port_type_set)(const struct typec_capability *, enum typec_port_type); }; /* Specific to try_role(). Indicates the user want's to clear the preference. */ #define TYPEC_NO_PREFERRED_ROLE (-1) struct typec_port *typec_register_port(struct device *parent, const struct typec_capability *cap); void typec_unregister_port(struct typec_port *port); struct typec_partner *typec_register_partner(struct typec_port *port, struct typec_partner_desc *desc); void typec_unregister_partner(struct typec_partner *partner); struct typec_cable *typec_register_cable(struct typec_port *port, struct typec_cable_desc *desc); void typec_unregister_cable(struct typec_cable *cable); struct typec_plug *typec_register_plug(struct typec_cable *cable, struct typec_plug_desc *desc); void typec_unregister_plug(struct typec_plug *plug); void typec_set_data_role(struct typec_port *port, enum typec_data_role role); void typec_set_pwr_role(struct typec_port *port, enum typec_role role); void typec_set_vconn_role(struct typec_port *port, enum typec_role role); void typec_set_pwr_opmode(struct typec_port *port, enum typec_pwr_opmode mode); int typec_set_orientation(struct typec_port *port, enum typec_orientation orientation); enum typec_orientation typec_get_orientation(struct typec_port *port); int typec_set_mode(struct typec_port *port, int mode); int typec_find_port_power_role(const char *name); int typec_find_power_role(const char *name); int typec_find_port_data_role(const char *name); #endif /* __LINUX_USB_TYPEC_H */ usb/ulpi.h 0000644 00000003766 14722070374 0006501 0 ustar 00 // SPDX-License-Identifier: GPL-2.0 /* * ulpi.h -- ULPI defines and function prorotypes * * Copyright (C) 2010 Nokia Corporation * * This software is distributed under the terms of the GNU General * Public License ("GPL") as published by the Free Software Foundation, * version 2 of that License. */ #ifndef __LINUX_USB_ULPI_H #define __LINUX_USB_ULPI_H #include <linux/usb/otg.h> #include <linux/ulpi/regs.h> /*-------------------------------------------------------------------------*/ /* * ULPI Flags */ #define ULPI_OTG_ID_PULLUP (1 << 0) #define ULPI_OTG_DP_PULLDOWN_DIS (1 << 1) #define ULPI_OTG_DM_PULLDOWN_DIS (1 << 2) #define ULPI_OTG_DISCHRGVBUS (1 << 3) #define ULPI_OTG_CHRGVBUS (1 << 4) #define ULPI_OTG_DRVVBUS (1 << 5) #define ULPI_OTG_DRVVBUS_EXT (1 << 6) #define ULPI_OTG_EXTVBUSIND (1 << 7) #define ULPI_IC_6PIN_SERIAL (1 << 8) #define ULPI_IC_3PIN_SERIAL (1 << 9) #define ULPI_IC_CARKIT (1 << 10) #define ULPI_IC_CLKSUSPM (1 << 11) #define ULPI_IC_AUTORESUME (1 << 12) #define ULPI_IC_EXTVBUS_INDINV (1 << 13) #define ULPI_IC_IND_PASSTHRU (1 << 14) #define ULPI_IC_PROTECT_DIS (1 << 15) #define ULPI_FC_HS (1 << 16) #define ULPI_FC_FS (1 << 17) #define ULPI_FC_LS (1 << 18) #define ULPI_FC_FS4LS (1 << 19) #define ULPI_FC_TERMSEL (1 << 20) #define ULPI_FC_OP_NORM (1 << 21) #define ULPI_FC_OP_NODRV (1 << 22) #define ULPI_FC_OP_DIS_NRZI (1 << 23) #define ULPI_FC_OP_NSYNC_NEOP (1 << 24) #define ULPI_FC_RST (1 << 25) #define ULPI_FC_SUSPM (1 << 26) /*-------------------------------------------------------------------------*/ #if IS_ENABLED(CONFIG_USB_ULPI) struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops, unsigned int flags); #else static inline struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops, unsigned int flags) { return NULL; } #endif #ifdef CONFIG_USB_ULPI_VIEWPORT /* access ops for controllers with a viewport register */ extern struct usb_phy_io_ops ulpi_viewport_access_ops; #endif #endif /* __LINUX_USB_ULPI_H */ usb/isp1301.h 0000644 00000004601 14722070374 0006615 0 ustar 00 // SPDX-License-Identifier: GPL-2.0 /* * NXP ISP1301 USB transceiver driver * * Copyright (C) 2012 Roland Stigge <stigge@antcom.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #ifndef __LINUX_USB_ISP1301_H #define __LINUX_USB_ISP1301_H #include <linux/of.h> /* I2C Register definitions: */ #define ISP1301_I2C_MODE_CONTROL_1 0x04 /* u8 read, set, +1 clear */ #define MC1_SPEED_REG (1 << 0) #define MC1_SUSPEND_REG (1 << 1) #define MC1_DAT_SE0 (1 << 2) #define MC1_TRANSPARENT (1 << 3) #define MC1_BDIS_ACON_EN (1 << 4) #define MC1_OE_INT_EN (1 << 5) #define MC1_UART_EN (1 << 6) #define MC1_MASK 0x7f #define ISP1301_I2C_MODE_CONTROL_2 0x12 /* u8 read, set, +1 clear */ #define MC2_GLOBAL_PWR_DN (1 << 0) #define MC2_SPD_SUSP_CTRL (1 << 1) #define MC2_BI_DI (1 << 2) #define MC2_TRANSP_BDIR0 (1 << 3) #define MC2_TRANSP_BDIR1 (1 << 4) #define MC2_AUDIO_EN (1 << 5) #define MC2_PSW_EN (1 << 6) #define MC2_EN2V7 (1 << 7) #define ISP1301_I2C_OTG_CONTROL_1 0x06 /* u8 read, set, +1 clear */ #define OTG1_DP_PULLUP (1 << 0) #define OTG1_DM_PULLUP (1 << 1) #define OTG1_DP_PULLDOWN (1 << 2) #define OTG1_DM_PULLDOWN (1 << 3) #define OTG1_ID_PULLDOWN (1 << 4) #define OTG1_VBUS_DRV (1 << 5) #define OTG1_VBUS_DISCHRG (1 << 6) #define OTG1_VBUS_CHRG (1 << 7) #define ISP1301_I2C_OTG_CONTROL_2 0x10 /* u8 readonly */ #define OTG_B_SESS_END (1 << 6) #define OTG_B_SESS_VLD (1 << 7) #define ISP1301_I2C_INTERRUPT_SOURCE 0x8 #define ISP1301_I2C_INTERRUPT_LATCH 0xA #define ISP1301_I2C_INTERRUPT_FALLING 0xC #define ISP1301_I2C_INTERRUPT_RISING 0xE #define INT_VBUS_VLD (1 << 0) #define INT_SESS_VLD (1 << 1) #define INT_DP_HI (1 << 2) #define INT_ID_GND (1 << 3) #define INT_DM_HI (1 << 4) #define INT_ID_FLOAT (1 << 5) #define INT_BDIS_ACON (1 << 6) #define INT_CR_INT (1 << 7) #define ISP1301_I2C_REG_CLEAR_ADDR 1 /* Register Address Modifier */ struct i2c_client *isp1301_get_client(struct device_node *node); #endif /* __LINUX_USB_ISP1301_H */ usb/pd_ado.h 0000644 00000002247 14722070374 0006747 0 ustar 00 // SPDX-License-Identifier: GPL-2.0+ /* * Copyright (c) 2017 Dialog Semiconductor * * Author: Adam Thomson <Adam.Thomson.Opensource@diasemi.com> */ #ifndef __LINUX_USB_PD_ADO_H #define __LINUX_USB_PD_ADO_H /* ADO : Alert Data Object */ #define USB_PD_ADO_TYPE_SHIFT 24 #define USB_PD_ADO_TYPE_MASK 0xff #define USB_PD_ADO_FIXED_BATT_SHIFT 20 #define USB_PD_ADO_FIXED_BATT_MASK 0xf #define USB_PD_ADO_HOT_SWAP_BATT_SHIFT 16 #define USB_PD_ADO_HOT_SWAP_BATT_MASK 0xf #define USB_PD_ADO_TYPE_BATT_STATUS_CHANGE BIT(1) #define USB_PD_ADO_TYPE_OCP BIT(2) #define USB_PD_ADO_TYPE_OTP BIT(3) #define USB_PD_ADO_TYPE_OP_COND_CHANGE BIT(4) #define USB_PD_ADO_TYPE_SRC_INPUT_CHANGE BIT(5) #define USB_PD_ADO_TYPE_OVP BIT(6) static inline unsigned int usb_pd_ado_type(u32 ado) { return (ado >> USB_PD_ADO_TYPE_SHIFT) & USB_PD_ADO_TYPE_MASK; } static inline unsigned int usb_pd_ado_fixed_batt(u32 ado) { return (ado >> USB_PD_ADO_FIXED_BATT_SHIFT) & USB_PD_ADO_FIXED_BATT_MASK; } static inline unsigned int usb_pd_ado_hot_swap_batt(u32 ado) { return (ado >> USB_PD_ADO_HOT_SWAP_BATT_SHIFT) & USB_PD_ADO_HOT_SWAP_BATT_MASK; } #endif /* __LINUX_USB_PD_ADO_H */ usb/isp1760.h 0000644 00000001171 14722070374 0006625 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * board initialization should put one of these into dev->platform_data * and place the isp1760 onto platform_bus named "isp1760-hcd". */ #ifndef __LINUX_USB_ISP1760_H #define __LINUX_USB_ISP1760_H struct isp1760_platform_data { unsigned is_isp1761:1; /* Chip is ISP1761 */ unsigned bus_width_16:1; /* 16/32-bit data bus width */ unsigned port1_otg:1; /* Port 1 supports OTG */ unsigned analog_oc:1; /* Analog overcurrent */ unsigned dack_polarity_high:1; /* DACK active high */ unsigned dreq_polarity_high:1; /* DREQ active high */ }; #endif /* __LINUX_USB_ISP1760_H */ usb/ezusb.h 0000644 00000000436 14722070374 0006647 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __EZUSB_H #define __EZUSB_H extern int ezusb_fx1_set_reset(struct usb_device *dev, unsigned char reset_bit); extern int ezusb_fx1_ihex_firmware_download(struct usb_device *dev, const char *firmware_path); #endif /* __EZUSB_H */ usb/isp116x.h 0000644 00000002210 14722070374 0006722 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Board initialization code should put one of these into dev->platform_data * and place the isp116x onto platform_bus. */ #ifndef __LINUX_USB_ISP116X_H #define __LINUX_USB_ISP116X_H struct isp116x_platform_data { /* Enable internal resistors on downstream ports */ unsigned sel15Kres:1; /* On-chip overcurrent detection */ unsigned oc_enable:1; /* INT output polarity */ unsigned int_act_high:1; /* INT edge or level triggered */ unsigned int_edge_triggered:1; /* Enable wakeup by devices on usb bus (e.g. wakeup by attachment/detachment or by device activity such as moving a mouse). When chosen, this option prevents stopping internal clock, increasing thereby power consumption in suspended state. */ unsigned remote_wakeup_enable:1; /* Inter-io delay (ns). The chip is picky about access timings; it expects at least: 150ns delay between consecutive accesses to DATA_REG, 300ns delay between access to ADDR_REG and DATA_REG OE, WE MUST NOT be changed during these intervals */ void (*delay) (struct device *dev, int delay); }; #endif /* __LINUX_USB_ISP116X_H */ usb/audio.h 0000644 00000002355 14722070374 0006622 0 ustar 00 // SPDX-License-Identifier: GPL-2.0 /* * <linux/usb/audio.h> -- USB Audio definitions. * * Copyright (C) 2006 Thumtronics Pty Ltd. * Developed for Thumtronics by Grey Innovation * Ben Williamson <ben.williamson@greyinnovation.com> * * This software is distributed under the terms of the GNU General Public * License ("GPL") version 2, as published by the Free Software Foundation. * * This file holds USB constants and structures defined * by the USB Device Class Definition for Audio Devices. * Comments below reference relevant sections of that document: * * http://www.usb.org/developers/devclass_docs/audio10.pdf * * Types and defines in this file are either specific to version 1.0 of * this standard or common for newer versions. */ #ifndef __LINUX_USB_AUDIO_H #define __LINUX_USB_AUDIO_H #include <uapi/linux/usb/audio.h> struct usb_audio_control { struct list_head list; const char *name; u8 type; int data[5]; int (*set)(struct usb_audio_control *con, u8 cmd, int value); int (*get)(struct usb_audio_control *con, u8 cmd); }; struct usb_audio_control_selector { struct list_head list; struct list_head control; u8 id; const char *name; u8 type; struct usb_descriptor_header *desc; }; #endif /* __LINUX_USB_AUDIO_H */ usb/otg.h 0000644 00000006007 14722070374 0006310 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* USB OTG (On The Go) defines */ /* * * These APIs may be used between USB controllers. USB device drivers * (for either host or peripheral roles) don't use these calls; they * continue to use just usb_device and usb_gadget. */ #ifndef __LINUX_USB_OTG_H #define __LINUX_USB_OTG_H #include <linux/phy/phy.h> #include <linux/usb/phy.h> struct usb_otg { u8 default_a; struct phy *phy; /* old usb_phy interface */ struct usb_phy *usb_phy; struct usb_bus *host; struct usb_gadget *gadget; enum usb_otg_state state; /* bind/unbind the host controller */ int (*set_host)(struct usb_otg *otg, struct usb_bus *host); /* bind/unbind the peripheral controller */ int (*set_peripheral)(struct usb_otg *otg, struct usb_gadget *gadget); /* effective for A-peripheral, ignored for B devices */ int (*set_vbus)(struct usb_otg *otg, bool enabled); /* for B devices only: start session with A-Host */ int (*start_srp)(struct usb_otg *otg); /* start or continue HNP role switch */ int (*start_hnp)(struct usb_otg *otg); }; /** * struct usb_otg_caps - describes the otg capabilities of the device * @otg_rev: The OTG revision number the device is compliant with, it's * in binary-coded decimal (i.e. 2.0 is 0200H). * @hnp_support: Indicates if the device supports HNP. * @srp_support: Indicates if the device supports SRP. * @adp_support: Indicates if the device supports ADP. */ struct usb_otg_caps { u16 otg_rev; bool hnp_support; bool srp_support; bool adp_support; }; extern const char *usb_otg_state_string(enum usb_otg_state state); /* Context: can sleep */ static inline int otg_start_hnp(struct usb_otg *otg) { if (otg && otg->start_hnp) return otg->start_hnp(otg); return -ENOTSUPP; } /* Context: can sleep */ static inline int otg_set_vbus(struct usb_otg *otg, bool enabled) { if (otg && otg->set_vbus) return otg->set_vbus(otg, enabled); return -ENOTSUPP; } /* for HCDs */ static inline int otg_set_host(struct usb_otg *otg, struct usb_bus *host) { if (otg && otg->set_host) return otg->set_host(otg, host); return -ENOTSUPP; } /* for usb peripheral controller drivers */ /* Context: can sleep */ static inline int otg_set_peripheral(struct usb_otg *otg, struct usb_gadget *periph) { if (otg && otg->set_peripheral) return otg->set_peripheral(otg, periph); return -ENOTSUPP; } static inline int otg_start_srp(struct usb_otg *otg) { if (otg && otg->start_srp) return otg->start_srp(otg); return -ENOTSUPP; } /* for OTG controller drivers (and maybe other stuff) */ extern int usb_bus_start_enum(struct usb_bus *bus, unsigned port_num); enum usb_dr_mode { USB_DR_MODE_UNKNOWN, USB_DR_MODE_HOST, USB_DR_MODE_PERIPHERAL, USB_DR_MODE_OTG, }; /** * usb_get_dr_mode - Get dual role mode for given device * @dev: Pointer to the given device * * The function gets phy interface string from property 'dr_mode', * and returns the correspondig enum usb_dr_mode */ extern enum usb_dr_mode usb_get_dr_mode(struct device *dev); #endif /* __LINUX_USB_OTG_H */ usb/functionfs.h 0000644 00000000227 14722070374 0007673 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_FUNCTIONFS_H__ #define __LINUX_FUNCTIONFS_H__ 1 #include <uapi/linux/usb/functionfs.h> #endif usb/pd.h 0000644 00000032514 14722070374 0006124 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright 2015-2017 Google, Inc */ #ifndef __LINUX_USB_PD_H #define __LINUX_USB_PD_H #include <linux/kernel.h> #include <linux/types.h> #include <linux/usb/typec.h> /* USB PD Messages */ enum pd_ctrl_msg_type { /* 0 Reserved */ PD_CTRL_GOOD_CRC = 1, PD_CTRL_GOTO_MIN = 2, PD_CTRL_ACCEPT = 3, PD_CTRL_REJECT = 4, PD_CTRL_PING = 5, PD_CTRL_PS_RDY = 6, PD_CTRL_GET_SOURCE_CAP = 7, PD_CTRL_GET_SINK_CAP = 8, PD_CTRL_DR_SWAP = 9, PD_CTRL_PR_SWAP = 10, PD_CTRL_VCONN_SWAP = 11, PD_CTRL_WAIT = 12, PD_CTRL_SOFT_RESET = 13, /* 14-15 Reserved */ PD_CTRL_NOT_SUPP = 16, PD_CTRL_GET_SOURCE_CAP_EXT = 17, PD_CTRL_GET_STATUS = 18, PD_CTRL_FR_SWAP = 19, PD_CTRL_GET_PPS_STATUS = 20, PD_CTRL_GET_COUNTRY_CODES = 21, /* 22-31 Reserved */ }; enum pd_data_msg_type { /* 0 Reserved */ PD_DATA_SOURCE_CAP = 1, PD_DATA_REQUEST = 2, PD_DATA_BIST = 3, PD_DATA_SINK_CAP = 4, PD_DATA_BATT_STATUS = 5, PD_DATA_ALERT = 6, PD_DATA_GET_COUNTRY_INFO = 7, /* 8-14 Reserved */ PD_DATA_VENDOR_DEF = 15, /* 16-31 Reserved */ }; enum pd_ext_msg_type { /* 0 Reserved */ PD_EXT_SOURCE_CAP_EXT = 1, PD_EXT_STATUS = 2, PD_EXT_GET_BATT_CAP = 3, PD_EXT_GET_BATT_STATUS = 4, PD_EXT_BATT_CAP = 5, PD_EXT_GET_MANUFACTURER_INFO = 6, PD_EXT_MANUFACTURER_INFO = 7, PD_EXT_SECURITY_REQUEST = 8, PD_EXT_SECURITY_RESPONSE = 9, PD_EXT_FW_UPDATE_REQUEST = 10, PD_EXT_FW_UPDATE_RESPONSE = 11, PD_EXT_PPS_STATUS = 12, PD_EXT_COUNTRY_INFO = 13, PD_EXT_COUNTRY_CODES = 14, /* 15-31 Reserved */ }; #define PD_REV10 0x0 #define PD_REV20 0x1 #define PD_REV30 0x2 #define PD_MAX_REV PD_REV30 #define PD_HEADER_EXT_HDR BIT(15) #define PD_HEADER_CNT_SHIFT 12 #define PD_HEADER_CNT_MASK 0x7 #define PD_HEADER_ID_SHIFT 9 #define PD_HEADER_ID_MASK 0x7 #define PD_HEADER_PWR_ROLE BIT(8) #define PD_HEADER_REV_SHIFT 6 #define PD_HEADER_REV_MASK 0x3 #define PD_HEADER_DATA_ROLE BIT(5) #define PD_HEADER_TYPE_SHIFT 0 #define PD_HEADER_TYPE_MASK 0x1f #define PD_HEADER(type, pwr, data, rev, id, cnt, ext_hdr) \ ((((type) & PD_HEADER_TYPE_MASK) << PD_HEADER_TYPE_SHIFT) | \ ((pwr) == TYPEC_SOURCE ? PD_HEADER_PWR_ROLE : 0) | \ ((data) == TYPEC_HOST ? PD_HEADER_DATA_ROLE : 0) | \ (rev << PD_HEADER_REV_SHIFT) | \ (((id) & PD_HEADER_ID_MASK) << PD_HEADER_ID_SHIFT) | \ (((cnt) & PD_HEADER_CNT_MASK) << PD_HEADER_CNT_SHIFT) | \ ((ext_hdr) ? PD_HEADER_EXT_HDR : 0)) #define PD_HEADER_LE(type, pwr, data, rev, id, cnt) \ cpu_to_le16(PD_HEADER((type), (pwr), (data), (rev), (id), (cnt), (0))) static inline unsigned int pd_header_cnt(u16 header) { return (header >> PD_HEADER_CNT_SHIFT) & PD_HEADER_CNT_MASK; } static inline unsigned int pd_header_cnt_le(__le16 header) { return pd_header_cnt(le16_to_cpu(header)); } static inline unsigned int pd_header_type(u16 header) { return (header >> PD_HEADER_TYPE_SHIFT) & PD_HEADER_TYPE_MASK; } static inline unsigned int pd_header_type_le(__le16 header) { return pd_header_type(le16_to_cpu(header)); } static inline unsigned int pd_header_msgid(u16 header) { return (header >> PD_HEADER_ID_SHIFT) & PD_HEADER_ID_MASK; } static inline unsigned int pd_header_msgid_le(__le16 header) { return pd_header_msgid(le16_to_cpu(header)); } static inline unsigned int pd_header_rev(u16 header) { return (header >> PD_HEADER_REV_SHIFT) & PD_HEADER_REV_MASK; } static inline unsigned int pd_header_rev_le(__le16 header) { return pd_header_rev(le16_to_cpu(header)); } #define PD_EXT_HDR_CHUNKED BIT(15) #define PD_EXT_HDR_CHUNK_NUM_SHIFT 11 #define PD_EXT_HDR_CHUNK_NUM_MASK 0xf #define PD_EXT_HDR_REQ_CHUNK BIT(10) #define PD_EXT_HDR_DATA_SIZE_SHIFT 0 #define PD_EXT_HDR_DATA_SIZE_MASK 0x1ff #define PD_EXT_HDR(data_size, req_chunk, chunk_num, chunked) \ ((((data_size) & PD_EXT_HDR_DATA_SIZE_MASK) << PD_EXT_HDR_DATA_SIZE_SHIFT) | \ ((req_chunk) ? PD_EXT_HDR_REQ_CHUNK : 0) | \ (((chunk_num) & PD_EXT_HDR_CHUNK_NUM_MASK) << PD_EXT_HDR_CHUNK_NUM_SHIFT) | \ ((chunked) ? PD_EXT_HDR_CHUNKED : 0)) #define PD_EXT_HDR_LE(data_size, req_chunk, chunk_num, chunked) \ cpu_to_le16(PD_EXT_HDR((data_size), (req_chunk), (chunk_num), (chunked))) static inline unsigned int pd_ext_header_chunk_num(u16 ext_header) { return (ext_header >> PD_EXT_HDR_CHUNK_NUM_SHIFT) & PD_EXT_HDR_CHUNK_NUM_MASK; } static inline unsigned int pd_ext_header_data_size(u16 ext_header) { return (ext_header >> PD_EXT_HDR_DATA_SIZE_SHIFT) & PD_EXT_HDR_DATA_SIZE_MASK; } static inline unsigned int pd_ext_header_data_size_le(__le16 ext_header) { return pd_ext_header_data_size(le16_to_cpu(ext_header)); } #define PD_MAX_PAYLOAD 7 #define PD_EXT_MAX_CHUNK_DATA 26 /** * struct pd_chunked_ext_message_data - PD chunked extended message data as * seen on wire * @header: PD extended message header * @data: PD extended message data */ struct pd_chunked_ext_message_data { __le16 header; u8 data[PD_EXT_MAX_CHUNK_DATA]; } __packed; /** * struct pd_message - PD message as seen on wire * @header: PD message header * @payload: PD message payload * @ext_msg: PD message chunked extended message data */ struct pd_message { __le16 header; union { __le32 payload[PD_MAX_PAYLOAD]; struct pd_chunked_ext_message_data ext_msg; }; } __packed; /* PDO: Power Data Object */ #define PDO_MAX_OBJECTS 7 enum pd_pdo_type { PDO_TYPE_FIXED = 0, PDO_TYPE_BATT = 1, PDO_TYPE_VAR = 2, PDO_TYPE_APDO = 3, }; #define PDO_TYPE_SHIFT 30 #define PDO_TYPE_MASK 0x3 #define PDO_TYPE(t) ((t) << PDO_TYPE_SHIFT) #define PDO_VOLT_MASK 0x3ff #define PDO_CURR_MASK 0x3ff #define PDO_PWR_MASK 0x3ff #define PDO_FIXED_DUAL_ROLE BIT(29) /* Power role swap supported */ #define PDO_FIXED_SUSPEND BIT(28) /* USB Suspend supported (Source) */ #define PDO_FIXED_HIGHER_CAP BIT(28) /* Requires more than vSafe5V (Sink) */ #define PDO_FIXED_EXTPOWER BIT(27) /* Externally powered */ #define PDO_FIXED_USB_COMM BIT(26) /* USB communications capable */ #define PDO_FIXED_DATA_SWAP BIT(25) /* Data role swap supported */ #define PDO_FIXED_VOLT_SHIFT 10 /* 50mV units */ #define PDO_FIXED_CURR_SHIFT 0 /* 10mA units */ #define PDO_FIXED_VOLT(mv) ((((mv) / 50) & PDO_VOLT_MASK) << PDO_FIXED_VOLT_SHIFT) #define PDO_FIXED_CURR(ma) ((((ma) / 10) & PDO_CURR_MASK) << PDO_FIXED_CURR_SHIFT) #define PDO_FIXED(mv, ma, flags) \ (PDO_TYPE(PDO_TYPE_FIXED) | (flags) | \ PDO_FIXED_VOLT(mv) | PDO_FIXED_CURR(ma)) #define VSAFE5V 5000 /* mv units */ #define PDO_BATT_MAX_VOLT_SHIFT 20 /* 50mV units */ #define PDO_BATT_MIN_VOLT_SHIFT 10 /* 50mV units */ #define PDO_BATT_MAX_PWR_SHIFT 0 /* 250mW units */ #define PDO_BATT_MIN_VOLT(mv) ((((mv) / 50) & PDO_VOLT_MASK) << PDO_BATT_MIN_VOLT_SHIFT) #define PDO_BATT_MAX_VOLT(mv) ((((mv) / 50) & PDO_VOLT_MASK) << PDO_BATT_MAX_VOLT_SHIFT) #define PDO_BATT_MAX_POWER(mw) ((((mw) / 250) & PDO_PWR_MASK) << PDO_BATT_MAX_PWR_SHIFT) #define PDO_BATT(min_mv, max_mv, max_mw) \ (PDO_TYPE(PDO_TYPE_BATT) | PDO_BATT_MIN_VOLT(min_mv) | \ PDO_BATT_MAX_VOLT(max_mv) | PDO_BATT_MAX_POWER(max_mw)) #define PDO_VAR_MAX_VOLT_SHIFT 20 /* 50mV units */ #define PDO_VAR_MIN_VOLT_SHIFT 10 /* 50mV units */ #define PDO_VAR_MAX_CURR_SHIFT 0 /* 10mA units */ #define PDO_VAR_MIN_VOLT(mv) ((((mv) / 50) & PDO_VOLT_MASK) << PDO_VAR_MIN_VOLT_SHIFT) #define PDO_VAR_MAX_VOLT(mv) ((((mv) / 50) & PDO_VOLT_MASK) << PDO_VAR_MAX_VOLT_SHIFT) #define PDO_VAR_MAX_CURR(ma) ((((ma) / 10) & PDO_CURR_MASK) << PDO_VAR_MAX_CURR_SHIFT) #define PDO_VAR(min_mv, max_mv, max_ma) \ (PDO_TYPE(PDO_TYPE_VAR) | PDO_VAR_MIN_VOLT(min_mv) | \ PDO_VAR_MAX_VOLT(max_mv) | PDO_VAR_MAX_CURR(max_ma)) enum pd_apdo_type { APDO_TYPE_PPS = 0, }; #define PDO_APDO_TYPE_SHIFT 28 /* Only valid value currently is 0x0 - PPS */ #define PDO_APDO_TYPE_MASK 0x3 #define PDO_APDO_TYPE(t) ((t) << PDO_APDO_TYPE_SHIFT) #define PDO_PPS_APDO_MAX_VOLT_SHIFT 17 /* 100mV units */ #define PDO_PPS_APDO_MIN_VOLT_SHIFT 8 /* 100mV units */ #define PDO_PPS_APDO_MAX_CURR_SHIFT 0 /* 50mA units */ #define PDO_PPS_APDO_VOLT_MASK 0xff #define PDO_PPS_APDO_CURR_MASK 0x7f #define PDO_PPS_APDO_MIN_VOLT(mv) \ ((((mv) / 100) & PDO_PPS_APDO_VOLT_MASK) << PDO_PPS_APDO_MIN_VOLT_SHIFT) #define PDO_PPS_APDO_MAX_VOLT(mv) \ ((((mv) / 100) & PDO_PPS_APDO_VOLT_MASK) << PDO_PPS_APDO_MAX_VOLT_SHIFT) #define PDO_PPS_APDO_MAX_CURR(ma) \ ((((ma) / 50) & PDO_PPS_APDO_CURR_MASK) << PDO_PPS_APDO_MAX_CURR_SHIFT) #define PDO_PPS_APDO(min_mv, max_mv, max_ma) \ (PDO_TYPE(PDO_TYPE_APDO) | PDO_APDO_TYPE(APDO_TYPE_PPS) | \ PDO_PPS_APDO_MIN_VOLT(min_mv) | PDO_PPS_APDO_MAX_VOLT(max_mv) | \ PDO_PPS_APDO_MAX_CURR(max_ma)) static inline enum pd_pdo_type pdo_type(u32 pdo) { return (pdo >> PDO_TYPE_SHIFT) & PDO_TYPE_MASK; } static inline unsigned int pdo_fixed_voltage(u32 pdo) { return ((pdo >> PDO_FIXED_VOLT_SHIFT) & PDO_VOLT_MASK) * 50; } static inline unsigned int pdo_min_voltage(u32 pdo) { return ((pdo >> PDO_VAR_MIN_VOLT_SHIFT) & PDO_VOLT_MASK) * 50; } static inline unsigned int pdo_max_voltage(u32 pdo) { return ((pdo >> PDO_VAR_MAX_VOLT_SHIFT) & PDO_VOLT_MASK) * 50; } static inline unsigned int pdo_max_current(u32 pdo) { return ((pdo >> PDO_VAR_MAX_CURR_SHIFT) & PDO_CURR_MASK) * 10; } static inline unsigned int pdo_max_power(u32 pdo) { return ((pdo >> PDO_BATT_MAX_PWR_SHIFT) & PDO_PWR_MASK) * 250; } static inline enum pd_apdo_type pdo_apdo_type(u32 pdo) { return (pdo >> PDO_APDO_TYPE_SHIFT) & PDO_APDO_TYPE_MASK; } static inline unsigned int pdo_pps_apdo_min_voltage(u32 pdo) { return ((pdo >> PDO_PPS_APDO_MIN_VOLT_SHIFT) & PDO_PPS_APDO_VOLT_MASK) * 100; } static inline unsigned int pdo_pps_apdo_max_voltage(u32 pdo) { return ((pdo >> PDO_PPS_APDO_MAX_VOLT_SHIFT) & PDO_PPS_APDO_VOLT_MASK) * 100; } static inline unsigned int pdo_pps_apdo_max_current(u32 pdo) { return ((pdo >> PDO_PPS_APDO_MAX_CURR_SHIFT) & PDO_PPS_APDO_CURR_MASK) * 50; } /* RDO: Request Data Object */ #define RDO_OBJ_POS_SHIFT 28 #define RDO_OBJ_POS_MASK 0x7 #define RDO_GIVE_BACK BIT(27) /* Supports reduced operating current */ #define RDO_CAP_MISMATCH BIT(26) /* Not satisfied by source caps */ #define RDO_USB_COMM BIT(25) /* USB communications capable */ #define RDO_NO_SUSPEND BIT(24) /* USB Suspend not supported */ #define RDO_PWR_MASK 0x3ff #define RDO_CURR_MASK 0x3ff #define RDO_FIXED_OP_CURR_SHIFT 10 #define RDO_FIXED_MAX_CURR_SHIFT 0 #define RDO_OBJ(idx) (((idx) & RDO_OBJ_POS_MASK) << RDO_OBJ_POS_SHIFT) #define PDO_FIXED_OP_CURR(ma) ((((ma) / 10) & RDO_CURR_MASK) << RDO_FIXED_OP_CURR_SHIFT) #define PDO_FIXED_MAX_CURR(ma) ((((ma) / 10) & RDO_CURR_MASK) << RDO_FIXED_MAX_CURR_SHIFT) #define RDO_FIXED(idx, op_ma, max_ma, flags) \ (RDO_OBJ(idx) | (flags) | \ PDO_FIXED_OP_CURR(op_ma) | PDO_FIXED_MAX_CURR(max_ma)) #define RDO_BATT_OP_PWR_SHIFT 10 /* 250mW units */ #define RDO_BATT_MAX_PWR_SHIFT 0 /* 250mW units */ #define RDO_BATT_OP_PWR(mw) ((((mw) / 250) & RDO_PWR_MASK) << RDO_BATT_OP_PWR_SHIFT) #define RDO_BATT_MAX_PWR(mw) ((((mw) / 250) & RDO_PWR_MASK) << RDO_BATT_MAX_PWR_SHIFT) #define RDO_BATT(idx, op_mw, max_mw, flags) \ (RDO_OBJ(idx) | (flags) | \ RDO_BATT_OP_PWR(op_mw) | RDO_BATT_MAX_PWR(max_mw)) #define RDO_PROG_VOLT_MASK 0x7ff #define RDO_PROG_CURR_MASK 0x7f #define RDO_PROG_VOLT_SHIFT 9 #define RDO_PROG_CURR_SHIFT 0 #define RDO_PROG_VOLT_MV_STEP 20 #define RDO_PROG_CURR_MA_STEP 50 #define PDO_PROG_OUT_VOLT(mv) \ ((((mv) / RDO_PROG_VOLT_MV_STEP) & RDO_PROG_VOLT_MASK) << RDO_PROG_VOLT_SHIFT) #define PDO_PROG_OP_CURR(ma) \ ((((ma) / RDO_PROG_CURR_MA_STEP) & RDO_PROG_CURR_MASK) << RDO_PROG_CURR_SHIFT) #define RDO_PROG(idx, out_mv, op_ma, flags) \ (RDO_OBJ(idx) | (flags) | \ PDO_PROG_OUT_VOLT(out_mv) | PDO_PROG_OP_CURR(op_ma)) static inline unsigned int rdo_index(u32 rdo) { return (rdo >> RDO_OBJ_POS_SHIFT) & RDO_OBJ_POS_MASK; } static inline unsigned int rdo_op_current(u32 rdo) { return ((rdo >> RDO_FIXED_OP_CURR_SHIFT) & RDO_CURR_MASK) * 10; } static inline unsigned int rdo_max_current(u32 rdo) { return ((rdo >> RDO_FIXED_MAX_CURR_SHIFT) & RDO_CURR_MASK) * 10; } static inline unsigned int rdo_op_power(u32 rdo) { return ((rdo >> RDO_BATT_OP_PWR_SHIFT) & RDO_PWR_MASK) * 250; } static inline unsigned int rdo_max_power(u32 rdo) { return ((rdo >> RDO_BATT_MAX_PWR_SHIFT) & RDO_PWR_MASK) * 250; } /* USB PD timers and counters */ #define PD_T_NO_RESPONSE 5000 /* 4.5 - 5.5 seconds */ #define PD_T_DB_DETECT 10000 /* 10 - 15 seconds */ #define PD_T_SEND_SOURCE_CAP 150 /* 100 - 200 ms */ #define PD_T_SENDER_RESPONSE 60 /* 24 - 30 ms, relaxed */ #define PD_T_SOURCE_ACTIVITY 45 #define PD_T_SINK_ACTIVITY 135 #define PD_T_SINK_WAIT_CAP 310 /* 310 - 620 ms */ #define PD_T_PS_TRANSITION 500 #define PD_T_SRC_TRANSITION 35 #define PD_T_DRP_SNK 40 #define PD_T_DRP_SRC 30 #define PD_T_PS_SOURCE_OFF 920 #define PD_T_PS_SOURCE_ON 480 #define PD_T_PS_HARD_RESET 30 #define PD_T_SRC_RECOVER 760 #define PD_T_SRC_RECOVER_MAX 1000 #define PD_T_SRC_TURN_ON 275 #define PD_T_SAFE_0V 650 #define PD_T_VCONN_SOURCE_ON 100 #define PD_T_SINK_REQUEST 100 /* 100 ms minimum */ #define PD_T_ERROR_RECOVERY 100 /* minimum 25 is insufficient */ #define PD_T_SRCSWAPSTDBY 625 /* Maximum of 650ms */ #define PD_T_NEWSRC 250 /* Maximum of 275ms */ #define PD_T_SWAP_SRC_START 20 /* Minimum of 20ms */ #define PD_T_DRP_TRY 100 /* 75 - 150 ms */ #define PD_T_DRP_TRYWAIT 600 /* 400 - 800 ms */ #define PD_T_CC_DEBOUNCE 200 /* 100 - 200 ms */ #define PD_T_PD_DEBOUNCE 20 /* 10 - 20 ms */ #define PD_N_CAPS_COUNT (PD_T_NO_RESPONSE / PD_T_SEND_SOURCE_CAP) #define PD_N_HARD_RESET_COUNT 2 #endif /* __LINUX_USB_PD_H */ usb/uas.h 0000644 00000004111 14722070374 0006301 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __USB_UAS_H__ #define __USB_UAS_H__ #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> /* Common header for all IUs */ struct iu { __u8 iu_id; __u8 rsvd1; __be16 tag; } __attribute__((__packed__)); enum { IU_ID_COMMAND = 0x01, IU_ID_STATUS = 0x03, IU_ID_RESPONSE = 0x04, IU_ID_TASK_MGMT = 0x05, IU_ID_READ_READY = 0x06, IU_ID_WRITE_READY = 0x07, }; enum { TMF_ABORT_TASK = 0x01, TMF_ABORT_TASK_SET = 0x02, TMF_CLEAR_TASK_SET = 0x04, TMF_LOGICAL_UNIT_RESET = 0x08, TMF_I_T_NEXUS_RESET = 0x10, TMF_CLEAR_ACA = 0x40, TMF_QUERY_TASK = 0x80, TMF_QUERY_TASK_SET = 0x81, TMF_QUERY_ASYNC_EVENT = 0x82, }; enum { RC_TMF_COMPLETE = 0x00, RC_INVALID_INFO_UNIT = 0x02, RC_TMF_NOT_SUPPORTED = 0x04, RC_TMF_FAILED = 0x05, RC_TMF_SUCCEEDED = 0x08, RC_INCORRECT_LUN = 0x09, RC_OVERLAPPED_TAG = 0x0a, }; struct command_iu { __u8 iu_id; __u8 rsvd1; __be16 tag; __u8 prio_attr; __u8 rsvd5; __u8 len; __u8 rsvd7; struct scsi_lun lun; __u8 cdb[16]; /* XXX: Overflow-checking tools may misunderstand */ } __attribute__((__packed__)); struct task_mgmt_iu { __u8 iu_id; __u8 rsvd1; __be16 tag; __u8 function; __u8 rsvd2; __be16 task_tag; struct scsi_lun lun; } __attribute__((__packed__)); /* * Also used for the Read Ready and Write Ready IUs since they have the * same first four bytes */ struct sense_iu { __u8 iu_id; __u8 rsvd1; __be16 tag; __be16 status_qual; __u8 status; __u8 rsvd7[7]; __be16 len; __u8 sense[SCSI_SENSE_BUFFERSIZE]; } __attribute__((__packed__)); struct response_iu { __u8 iu_id; __u8 rsvd1; __be16 tag; __u8 add_response_info[3]; __u8 response_code; } __attribute__((__packed__)); struct usb_pipe_usage_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bPipeID; __u8 Reserved; } __attribute__((__packed__)); enum { CMD_PIPE_ID = 1, STATUS_PIPE_ID = 2, DATA_IN_PIPE_ID = 3, DATA_OUT_PIPE_ID = 4, UAS_SIMPLE_TAG = 0, UAS_HEAD_TAG = 1, UAS_ORDERED_TAG = 2, UAS_ACA = 4, }; #endif usb/g_hid.h 0000644 00000002206 14722070374 0006566 0 ustar 00 // SPDX-License-Identifier: GPL-2.0+ /* * g_hid.h -- Header file for USB HID gadget driver * * Copyright (C) 2010 Fabien Chouteau <fabien.chouteau@barco.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef __LINUX_USB_G_HID_H #define __LINUX_USB_G_HID_H struct hidg_func_descriptor { unsigned char subclass; unsigned char protocol; unsigned short report_length; unsigned short report_desc_length; unsigned char report_desc[]; }; #endif /* __LINUX_USB_G_HID_H */ usb/typec_altmode.h 0000644 00000012437 14722070374 0010354 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __USB_TYPEC_ALTMODE_H #define __USB_TYPEC_ALTMODE_H #include <linux/mod_devicetable.h> #include <linux/usb/typec.h> #include <linux/device.h> #define MODE_DISCOVERY_MAX 6 struct typec_altmode_ops; /** * struct typec_altmode - USB Type-C alternate mode device * @dev: Driver model's view of this device * @svid: Standard or Vendor ID (SVID) of the alternate mode * @mode: Index of the Mode * @vdo: VDO returned by Discover Modes USB PD command * @active: Tells has the mode been entered or not * @desc: Optional human readable description of the mode * @ops: Operations vector from the driver */ struct typec_altmode { struct device dev; u16 svid; int mode; u32 vdo; unsigned int active:1; char *desc; const struct typec_altmode_ops *ops; }; #define to_typec_altmode(d) container_of(d, struct typec_altmode, dev) static inline void typec_altmode_set_drvdata(struct typec_altmode *altmode, void *data) { dev_set_drvdata(&altmode->dev, data); } static inline void *typec_altmode_get_drvdata(struct typec_altmode *altmode) { return dev_get_drvdata(&altmode->dev); } /** * struct typec_altmode_ops - Alternate mode specific operations vector * @enter: Operations to be executed with Enter Mode Command * @exit: Operations to be executed with Exit Mode Command * @attention: Callback for Attention Command * @vdm: Callback for SVID specific commands * @notify: Communication channel for platform and the alternate mode * @activate: User callback for Enter/Exit Mode */ struct typec_altmode_ops { int (*enter)(struct typec_altmode *altmode); int (*exit)(struct typec_altmode *altmode); void (*attention)(struct typec_altmode *altmode, u32 vdo); int (*vdm)(struct typec_altmode *altmode, const u32 hdr, const u32 *vdo, int cnt); int (*notify)(struct typec_altmode *altmode, unsigned long conf, void *data); int (*activate)(struct typec_altmode *altmode, int activate); }; int typec_altmode_enter(struct typec_altmode *altmode); int typec_altmode_exit(struct typec_altmode *altmode); int typec_altmode_attention(struct typec_altmode *altmode, u32 vdo); int typec_altmode_vdm(struct typec_altmode *altmode, const u32 header, const u32 *vdo, int count); int typec_altmode_notify(struct typec_altmode *altmode, unsigned long conf, void *data); const struct typec_altmode * typec_altmode_get_partner(struct typec_altmode *altmode); /* * These are the connector states (USB, Safe and Alt Mode) defined in USB Type-C * Specification. SVID specific connector states are expected to follow and * start from the value TYPEC_STATE_MODAL. */ enum { TYPEC_STATE_SAFE, /* USB Safe State */ TYPEC_STATE_USB, /* USB Operation */ TYPEC_STATE_MODAL, /* Alternate Modes */ }; /* * For the muxes there is no difference between Accessory Modes and Alternate * Modes, so the Accessory Modes are supplied with specific modal state values * here. Unlike with Alternate Modes, where the mux will be linked with the * alternate mode device, the mux for Accessory Modes will be linked with the * port device instead. * * Port drivers can use TYPEC_MODE_AUDIO and TYPEC_MODE_DEBUG as the mode * value for typec_set_mode() when accessory modes are supported. */ enum { TYPEC_MODE_AUDIO = TYPEC_STATE_MODAL, /* Audio Accessory */ TYPEC_MODE_DEBUG, /* Debug Accessory */ }; #define TYPEC_MODAL_STATE(_state_) ((_state_) + TYPEC_STATE_MODAL) struct typec_altmode *typec_altmode_get_plug(struct typec_altmode *altmode, enum typec_plug_index index); void typec_altmode_put_plug(struct typec_altmode *plug); struct typec_altmode *typec_match_altmode(struct typec_altmode **altmodes, size_t n, u16 svid, u8 mode); struct typec_altmode * typec_altmode_register_notifier(struct device *dev, u16 svid, u8 mode, struct notifier_block *nb); void typec_altmode_unregister_notifier(struct typec_altmode *adev, struct notifier_block *nb); /** * typec_altmode_get_orientation - Get cable plug orientation * altmode: Handle to the alternate mode */ static inline enum typec_orientation typec_altmode_get_orientation(struct typec_altmode *altmode) { return typec_get_orientation(typec_altmode2port(altmode)); } /** * struct typec_altmode_driver - USB Type-C alternate mode device driver * @id_table: Null terminated array of SVIDs * @probe: Callback for device binding * @remove: Callback for device unbinding * @driver: Device driver model driver * * These drivers will be bind to the partner alternate mode devices. They will * handle all SVID specific communication. */ struct typec_altmode_driver { const struct typec_device_id *id_table; int (*probe)(struct typec_altmode *altmode); void (*remove)(struct typec_altmode *altmode); struct device_driver driver; }; #define to_altmode_driver(d) container_of(d, struct typec_altmode_driver, \ driver) #define typec_altmode_register_driver(drv) \ __typec_altmode_register_driver(drv, THIS_MODULE) int __typec_altmode_register_driver(struct typec_altmode_driver *drv, struct module *module); void typec_altmode_unregister_driver(struct typec_altmode_driver *drv); #define module_typec_altmode_driver(__typec_altmode_driver) \ module_driver(__typec_altmode_driver, typec_altmode_register_driver, \ typec_altmode_unregister_driver) #endif /* __USB_TYPEC_ALTMODE_H */ usb/gadget.h 0000644 00000103427 14722070374 0006756 0 ustar 00 // SPDX-License-Identifier: GPL-2.0 /* * <linux/usb/gadget.h> * * We call the USB code inside a Linux-based peripheral device a "gadget" * driver, except for the hardware-specific bus glue. One USB host can * master many USB gadgets, but the gadgets are only slaved to one host. * * * (C) Copyright 2002-2004 by David Brownell * All Rights Reserved. * * This software is licensed under the GNU GPL version 2. */ #ifndef __LINUX_USB_GADGET_H #define __LINUX_USB_GADGET_H #include <linux/device.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/scatterlist.h> #include <linux/types.h> #include <linux/workqueue.h> #include <linux/usb/ch9.h> #define UDC_TRACE_STR_MAX 512 struct usb_ep; /** * struct usb_request - describes one i/o request * @buf: Buffer used for data. Always provide this; some controllers * only use PIO, or don't use DMA for some endpoints. * @dma: DMA address corresponding to 'buf'. If you don't set this * field, and the usb controller needs one, it is responsible * for mapping and unmapping the buffer. * @sg: a scatterlist for SG-capable controllers. * @num_sgs: number of SG entries * @num_mapped_sgs: number of SG entries mapped to DMA (internal) * @length: Length of that data * @stream_id: The stream id, when USB3.0 bulk streams are being used * @no_interrupt: If true, hints that no completion irq is needed. * Helpful sometimes with deep request queues that are handled * directly by DMA controllers. * @zero: If true, when writing data, makes the last packet be "short" * by adding a zero length packet as needed; * @short_not_ok: When reading data, makes short packets be * treated as errors (queue stops advancing till cleanup). * @dma_mapped: Indicates if request has been mapped to DMA (internal) * @complete: Function called when request completes, so this request and * its buffer may be re-used. The function will always be called with * interrupts disabled, and it must not sleep. * Reads terminate with a short packet, or when the buffer fills, * whichever comes first. When writes terminate, some data bytes * will usually still be in flight (often in a hardware fifo). * Errors (for reads or writes) stop the queue from advancing * until the completion function returns, so that any transfers * invalidated by the error may first be dequeued. * @context: For use by the completion callback * @list: For use by the gadget driver. * @frame_number: Reports the interval number in (micro)frame in which the * isochronous transfer was transmitted or received. * @status: Reports completion code, zero or a negative errno. * Normally, faults block the transfer queue from advancing until * the completion callback returns. * Code "-ESHUTDOWN" indicates completion caused by device disconnect, * or when the driver disabled the endpoint. * @actual: Reports bytes transferred to/from the buffer. For reads (OUT * transfers) this may be less than the requested length. If the * short_not_ok flag is set, short reads are treated as errors * even when status otherwise indicates successful completion. * Note that for writes (IN transfers) some data bytes may still * reside in a device-side FIFO when the request is reported as * complete. * * These are allocated/freed through the endpoint they're used with. The * hardware's driver can add extra per-request data to the memory it returns, * which often avoids separate memory allocations (potential failures), * later when the request is queued. * * Request flags affect request handling, such as whether a zero length * packet is written (the "zero" flag), whether a short read should be * treated as an error (blocking request queue advance, the "short_not_ok" * flag), or hinting that an interrupt is not required (the "no_interrupt" * flag, for use with deep request queues). * * Bulk endpoints can use any size buffers, and can also be used for interrupt * transfers. interrupt-only endpoints can be much less functional. * * NOTE: this is analogous to 'struct urb' on the host side, except that * it's thinner and promotes more pre-allocation. */ struct usb_request { void *buf; unsigned length; dma_addr_t dma; struct scatterlist *sg; unsigned num_sgs; unsigned num_mapped_sgs; unsigned stream_id:16; unsigned no_interrupt:1; unsigned zero:1; unsigned short_not_ok:1; unsigned dma_mapped:1; void (*complete)(struct usb_ep *ep, struct usb_request *req); void *context; struct list_head list; unsigned frame_number; /* ISO ONLY */ int status; unsigned actual; }; /*-------------------------------------------------------------------------*/ /* endpoint-specific parts of the api to the usb controller hardware. * unlike the urb model, (de)multiplexing layers are not required. * (so this api could slash overhead if used on the host side...) * * note that device side usb controllers commonly differ in how many * endpoints they support, as well as their capabilities. */ struct usb_ep_ops { int (*enable) (struct usb_ep *ep, const struct usb_endpoint_descriptor *desc); int (*disable) (struct usb_ep *ep); void (*dispose) (struct usb_ep *ep); struct usb_request *(*alloc_request) (struct usb_ep *ep, gfp_t gfp_flags); void (*free_request) (struct usb_ep *ep, struct usb_request *req); int (*queue) (struct usb_ep *ep, struct usb_request *req, gfp_t gfp_flags); int (*dequeue) (struct usb_ep *ep, struct usb_request *req); int (*set_halt) (struct usb_ep *ep, int value); int (*set_wedge) (struct usb_ep *ep); int (*fifo_status) (struct usb_ep *ep); void (*fifo_flush) (struct usb_ep *ep); }; /** * struct usb_ep_caps - endpoint capabilities description * @type_control:Endpoint supports control type (reserved for ep0). * @type_iso:Endpoint supports isochronous transfers. * @type_bulk:Endpoint supports bulk transfers. * @type_int:Endpoint supports interrupt transfers. * @dir_in:Endpoint supports IN direction. * @dir_out:Endpoint supports OUT direction. */ struct usb_ep_caps { unsigned type_control:1; unsigned type_iso:1; unsigned type_bulk:1; unsigned type_int:1; unsigned dir_in:1; unsigned dir_out:1; }; #define USB_EP_CAPS_TYPE_CONTROL 0x01 #define USB_EP_CAPS_TYPE_ISO 0x02 #define USB_EP_CAPS_TYPE_BULK 0x04 #define USB_EP_CAPS_TYPE_INT 0x08 #define USB_EP_CAPS_TYPE_ALL \ (USB_EP_CAPS_TYPE_ISO | USB_EP_CAPS_TYPE_BULK | USB_EP_CAPS_TYPE_INT) #define USB_EP_CAPS_DIR_IN 0x01 #define USB_EP_CAPS_DIR_OUT 0x02 #define USB_EP_CAPS_DIR_ALL (USB_EP_CAPS_DIR_IN | USB_EP_CAPS_DIR_OUT) #define USB_EP_CAPS(_type, _dir) \ { \ .type_control = !!(_type & USB_EP_CAPS_TYPE_CONTROL), \ .type_iso = !!(_type & USB_EP_CAPS_TYPE_ISO), \ .type_bulk = !!(_type & USB_EP_CAPS_TYPE_BULK), \ .type_int = !!(_type & USB_EP_CAPS_TYPE_INT), \ .dir_in = !!(_dir & USB_EP_CAPS_DIR_IN), \ .dir_out = !!(_dir & USB_EP_CAPS_DIR_OUT), \ } /** * struct usb_ep - device side representation of USB endpoint * @name:identifier for the endpoint, such as "ep-a" or "ep9in-bulk" * @ops: Function pointers used to access hardware-specific operations. * @ep_list:the gadget's ep_list holds all of its endpoints * @caps:The structure describing types and directions supported by endoint. * @enabled: The current endpoint enabled/disabled state. * @claimed: True if this endpoint is claimed by a function. * @maxpacket:The maximum packet size used on this endpoint. The initial * value can sometimes be reduced (hardware allowing), according to * the endpoint descriptor used to configure the endpoint. * @maxpacket_limit:The maximum packet size value which can be handled by this * endpoint. It's set once by UDC driver when endpoint is initialized, and * should not be changed. Should not be confused with maxpacket. * @max_streams: The maximum number of streams supported * by this EP (0 - 16, actual number is 2^n) * @mult: multiplier, 'mult' value for SS Isoc EPs * @maxburst: the maximum number of bursts supported by this EP (for usb3) * @driver_data:for use by the gadget driver. * @address: used to identify the endpoint when finding descriptor that * matches connection speed * @desc: endpoint descriptor. This pointer is set before the endpoint is * enabled and remains valid until the endpoint is disabled. * @comp_desc: In case of SuperSpeed support, this is the endpoint companion * descriptor that is used to configure the endpoint * * the bus controller driver lists all the general purpose endpoints in * gadget->ep_list. the control endpoint (gadget->ep0) is not in that list, * and is accessed only in response to a driver setup() callback. */ struct usb_ep { void *driver_data; const char *name; const struct usb_ep_ops *ops; struct list_head ep_list; struct usb_ep_caps caps; bool claimed; bool enabled; unsigned maxpacket:16; unsigned maxpacket_limit:16; unsigned max_streams:16; unsigned mult:2; unsigned maxburst:5; u8 address; const struct usb_endpoint_descriptor *desc; const struct usb_ss_ep_comp_descriptor *comp_desc; }; /*-------------------------------------------------------------------------*/ #if IS_ENABLED(CONFIG_USB_GADGET) void usb_ep_set_maxpacket_limit(struct usb_ep *ep, unsigned maxpacket_limit); int usb_ep_enable(struct usb_ep *ep); int usb_ep_disable(struct usb_ep *ep); struct usb_request *usb_ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags); void usb_ep_free_request(struct usb_ep *ep, struct usb_request *req); int usb_ep_queue(struct usb_ep *ep, struct usb_request *req, gfp_t gfp_flags); int usb_ep_dequeue(struct usb_ep *ep, struct usb_request *req); int usb_ep_set_halt(struct usb_ep *ep); int usb_ep_clear_halt(struct usb_ep *ep); int usb_ep_set_wedge(struct usb_ep *ep); int usb_ep_fifo_status(struct usb_ep *ep); void usb_ep_fifo_flush(struct usb_ep *ep); #else static inline void usb_ep_set_maxpacket_limit(struct usb_ep *ep, unsigned maxpacket_limit) { } static inline int usb_ep_enable(struct usb_ep *ep) { return 0; } static inline int usb_ep_disable(struct usb_ep *ep) { return 0; } static inline struct usb_request *usb_ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) { return NULL; } static inline void usb_ep_free_request(struct usb_ep *ep, struct usb_request *req) { } static inline int usb_ep_queue(struct usb_ep *ep, struct usb_request *req, gfp_t gfp_flags) { return 0; } static inline int usb_ep_dequeue(struct usb_ep *ep, struct usb_request *req) { return 0; } static inline int usb_ep_set_halt(struct usb_ep *ep) { return 0; } static inline int usb_ep_clear_halt(struct usb_ep *ep) { return 0; } static inline int usb_ep_set_wedge(struct usb_ep *ep) { return 0; } static inline int usb_ep_fifo_status(struct usb_ep *ep) { return 0; } static inline void usb_ep_fifo_flush(struct usb_ep *ep) { } #endif /* USB_GADGET */ /*-------------------------------------------------------------------------*/ struct usb_dcd_config_params { __u8 bU1devExitLat; /* U1 Device exit Latency */ #define USB_DEFAULT_U1_DEV_EXIT_LAT 0x01 /* Less then 1 microsec */ __le16 bU2DevExitLat; /* U2 Device exit Latency */ #define USB_DEFAULT_U2_DEV_EXIT_LAT 0x1F4 /* Less then 500 microsec */ __u8 besl_baseline; /* Recommended baseline BESL (0-15) */ __u8 besl_deep; /* Recommended deep BESL (0-15) */ #define USB_DEFAULT_BESL_UNSPECIFIED 0xFF /* No recommended value */ }; struct usb_gadget; struct usb_gadget_driver; struct usb_udc; /* the rest of the api to the controller hardware: device operations, * which don't involve endpoints (or i/o). */ struct usb_gadget_ops { int (*get_frame)(struct usb_gadget *); int (*wakeup)(struct usb_gadget *); int (*set_selfpowered) (struct usb_gadget *, int is_selfpowered); int (*vbus_session) (struct usb_gadget *, int is_active); int (*vbus_draw) (struct usb_gadget *, unsigned mA); int (*pullup) (struct usb_gadget *, int is_on); int (*ioctl)(struct usb_gadget *, unsigned code, unsigned long param); void (*get_config_params)(struct usb_gadget *, struct usb_dcd_config_params *); int (*udc_start)(struct usb_gadget *, struct usb_gadget_driver *); int (*udc_stop)(struct usb_gadget *); void (*udc_set_speed)(struct usb_gadget *, enum usb_device_speed); struct usb_ep *(*match_ep)(struct usb_gadget *, struct usb_endpoint_descriptor *, struct usb_ss_ep_comp_descriptor *); }; /** * struct usb_gadget - represents a usb slave device * @work: (internal use) Workqueue to be used for sysfs_notify() * @udc: struct usb_udc pointer for this gadget * @ops: Function pointers used to access hardware-specific operations. * @ep0: Endpoint zero, used when reading or writing responses to * driver setup() requests * @ep_list: List of other endpoints supported by the device. * @speed: Speed of current connection to USB host. * @max_speed: Maximal speed the UDC can handle. UDC must support this * and all slower speeds. * @state: the state we are now (attached, suspended, configured, etc) * @name: Identifies the controller hardware type. Used in diagnostics * and sometimes configuration. * @dev: Driver model state for this abstract device. * @isoch_delay: value from Set Isoch Delay request. Only valid on SS/SSP * @out_epnum: last used out ep number * @in_epnum: last used in ep number * @mA: last set mA value * @otg_caps: OTG capabilities of this gadget. * @sg_supported: true if we can handle scatter-gather * @is_otg: True if the USB device port uses a Mini-AB jack, so that the * gadget driver must provide a USB OTG descriptor. * @is_a_peripheral: False unless is_otg, the "A" end of a USB cable * is in the Mini-AB jack, and HNP has been used to switch roles * so that the "A" device currently acts as A-Peripheral, not A-Host. * @a_hnp_support: OTG device feature flag, indicating that the A-Host * supports HNP at this port. * @a_alt_hnp_support: OTG device feature flag, indicating that the A-Host * only supports HNP on a different root port. * @b_hnp_enable: OTG device feature flag, indicating that the A-Host * enabled HNP support. * @hnp_polling_support: OTG device feature flag, indicating if the OTG device * in peripheral mode can support HNP polling. * @host_request_flag: OTG device feature flag, indicating if A-Peripheral * or B-Peripheral wants to take host role. * @quirk_ep_out_aligned_size: epout requires buffer size to be aligned to * MaxPacketSize. * @quirk_altset_not_supp: UDC controller doesn't support alt settings. * @quirk_stall_not_supp: UDC controller doesn't support stalling. * @quirk_zlp_not_supp: UDC controller doesn't support ZLP. * @quirk_avoids_skb_reserve: udc/platform wants to avoid skb_reserve() in * u_ether.c to improve performance. * @is_selfpowered: if the gadget is self-powered. * @deactivated: True if gadget is deactivated - in deactivated state it cannot * be connected. * @connected: True if gadget is connected. * @lpm_capable: If the gadget max_speed is FULL or HIGH, this flag * indicates that it supports LPM as per the LPM ECN & errata. * @irq: the interrupt number for device controller. * * Gadgets have a mostly-portable "gadget driver" implementing device * functions, handling all usb configurations and interfaces. Gadget * drivers talk to hardware-specific code indirectly, through ops vectors. * That insulates the gadget driver from hardware details, and packages * the hardware endpoints through generic i/o queues. The "usb_gadget" * and "usb_ep" interfaces provide that insulation from the hardware. * * Except for the driver data, all fields in this structure are * read-only to the gadget driver. That driver data is part of the * "driver model" infrastructure in 2.6 (and later) kernels, and for * earlier systems is grouped in a similar structure that's not known * to the rest of the kernel. * * Values of the three OTG device feature flags are updated before the * setup() call corresponding to USB_REQ_SET_CONFIGURATION, and before * driver suspend() calls. They are valid only when is_otg, and when the * device is acting as a B-Peripheral (so is_a_peripheral is false). */ struct usb_gadget { struct work_struct work; struct usb_udc *udc; /* readonly to gadget driver */ const struct usb_gadget_ops *ops; struct usb_ep *ep0; struct list_head ep_list; /* of usb_ep */ enum usb_device_speed speed; enum usb_device_speed max_speed; enum usb_device_state state; const char *name; struct device dev; unsigned isoch_delay; unsigned out_epnum; unsigned in_epnum; unsigned mA; struct usb_otg_caps *otg_caps; unsigned sg_supported:1; unsigned is_otg:1; unsigned is_a_peripheral:1; unsigned b_hnp_enable:1; unsigned a_hnp_support:1; unsigned a_alt_hnp_support:1; unsigned hnp_polling_support:1; unsigned host_request_flag:1; unsigned quirk_ep_out_aligned_size:1; unsigned quirk_altset_not_supp:1; unsigned quirk_stall_not_supp:1; unsigned quirk_zlp_not_supp:1; unsigned quirk_avoids_skb_reserve:1; unsigned is_selfpowered:1; unsigned deactivated:1; unsigned connected:1; unsigned lpm_capable:1; int irq; }; #define work_to_gadget(w) (container_of((w), struct usb_gadget, work)) static inline void set_gadget_data(struct usb_gadget *gadget, void *data) { dev_set_drvdata(&gadget->dev, data); } static inline void *get_gadget_data(struct usb_gadget *gadget) { return dev_get_drvdata(&gadget->dev); } static inline struct usb_gadget *dev_to_usb_gadget(struct device *dev) { return container_of(dev, struct usb_gadget, dev); } /* iterates the non-control endpoints; 'tmp' is a struct usb_ep pointer */ #define gadget_for_each_ep(tmp, gadget) \ list_for_each_entry(tmp, &(gadget)->ep_list, ep_list) /** * usb_ep_align - returns @len aligned to ep's maxpacketsize. * @ep: the endpoint whose maxpacketsize is used to align @len * @len: buffer size's length to align to @ep's maxpacketsize * * This helper is used to align buffer's size to an ep's maxpacketsize. */ static inline size_t usb_ep_align(struct usb_ep *ep, size_t len) { int max_packet_size = (size_t)usb_endpoint_maxp(ep->desc) & 0x7ff; return round_up(len, max_packet_size); } /** * usb_ep_align_maybe - returns @len aligned to ep's maxpacketsize if gadget * requires quirk_ep_out_aligned_size, otherwise returns len. * @g: controller to check for quirk * @ep: the endpoint whose maxpacketsize is used to align @len * @len: buffer size's length to align to @ep's maxpacketsize * * This helper is used in case it's required for any reason to check and maybe * align buffer's size to an ep's maxpacketsize. */ static inline size_t usb_ep_align_maybe(struct usb_gadget *g, struct usb_ep *ep, size_t len) { return g->quirk_ep_out_aligned_size ? usb_ep_align(ep, len) : len; } /** * gadget_is_altset_supported - return true iff the hardware supports * altsettings * @g: controller to check for quirk */ static inline int gadget_is_altset_supported(struct usb_gadget *g) { return !g->quirk_altset_not_supp; } /** * gadget_is_stall_supported - return true iff the hardware supports stalling * @g: controller to check for quirk */ static inline int gadget_is_stall_supported(struct usb_gadget *g) { return !g->quirk_stall_not_supp; } /** * gadget_is_zlp_supported - return true iff the hardware supports zlp * @g: controller to check for quirk */ static inline int gadget_is_zlp_supported(struct usb_gadget *g) { return !g->quirk_zlp_not_supp; } /** * gadget_avoids_skb_reserve - return true iff the hardware would like to avoid * skb_reserve to improve performance. * @g: controller to check for quirk */ static inline int gadget_avoids_skb_reserve(struct usb_gadget *g) { return g->quirk_avoids_skb_reserve; } /** * gadget_is_dualspeed - return true iff the hardware handles high speed * @g: controller that might support both high and full speeds */ static inline int gadget_is_dualspeed(struct usb_gadget *g) { return g->max_speed >= USB_SPEED_HIGH; } /** * gadget_is_superspeed() - return true if the hardware handles superspeed * @g: controller that might support superspeed */ static inline int gadget_is_superspeed(struct usb_gadget *g) { return g->max_speed >= USB_SPEED_SUPER; } /** * gadget_is_superspeed_plus() - return true if the hardware handles * superspeed plus * @g: controller that might support superspeed plus */ static inline int gadget_is_superspeed_plus(struct usb_gadget *g) { return g->max_speed >= USB_SPEED_SUPER_PLUS; } /** * gadget_is_otg - return true iff the hardware is OTG-ready * @g: controller that might have a Mini-AB connector * * This is a runtime test, since kernels with a USB-OTG stack sometimes * run on boards which only have a Mini-B (or Mini-A) connector. */ static inline int gadget_is_otg(struct usb_gadget *g) { #ifdef CONFIG_USB_OTG return g->is_otg; #else return 0; #endif } /*-------------------------------------------------------------------------*/ #if IS_ENABLED(CONFIG_USB_GADGET) int usb_gadget_frame_number(struct usb_gadget *gadget); int usb_gadget_wakeup(struct usb_gadget *gadget); int usb_gadget_set_selfpowered(struct usb_gadget *gadget); int usb_gadget_clear_selfpowered(struct usb_gadget *gadget); int usb_gadget_vbus_connect(struct usb_gadget *gadget); int usb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA); int usb_gadget_vbus_disconnect(struct usb_gadget *gadget); int usb_gadget_connect(struct usb_gadget *gadget); int usb_gadget_disconnect(struct usb_gadget *gadget); int usb_gadget_deactivate(struct usb_gadget *gadget); int usb_gadget_activate(struct usb_gadget *gadget); #else static inline int usb_gadget_frame_number(struct usb_gadget *gadget) { return 0; } static inline int usb_gadget_wakeup(struct usb_gadget *gadget) { return 0; } static inline int usb_gadget_set_selfpowered(struct usb_gadget *gadget) { return 0; } static inline int usb_gadget_clear_selfpowered(struct usb_gadget *gadget) { return 0; } static inline int usb_gadget_vbus_connect(struct usb_gadget *gadget) { return 0; } static inline int usb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA) { return 0; } static inline int usb_gadget_vbus_disconnect(struct usb_gadget *gadget) { return 0; } static inline int usb_gadget_connect(struct usb_gadget *gadget) { return 0; } static inline int usb_gadget_disconnect(struct usb_gadget *gadget) { return 0; } static inline int usb_gadget_deactivate(struct usb_gadget *gadget) { return 0; } static inline int usb_gadget_activate(struct usb_gadget *gadget) { return 0; } #endif /* CONFIG_USB_GADGET */ /*-------------------------------------------------------------------------*/ /** * struct usb_gadget_driver - driver for usb 'slave' devices * @function: String describing the gadget's function * @max_speed: Highest speed the driver handles. * @setup: Invoked for ep0 control requests that aren't handled by * the hardware level driver. Most calls must be handled by * the gadget driver, including descriptor and configuration * management. The 16 bit members of the setup data are in * USB byte order. Called in_interrupt; this may not sleep. Driver * queues a response to ep0, or returns negative to stall. * @disconnect: Invoked after all transfers have been stopped, * when the host is disconnected. May be called in_interrupt; this * may not sleep. Some devices can't detect disconnect, so this might * not be called except as part of controller shutdown. * @bind: the driver's bind callback * @unbind: Invoked when the driver is unbound from a gadget, * usually from rmmod (after a disconnect is reported). * Called in a context that permits sleeping. * @suspend: Invoked on USB suspend. May be called in_interrupt. * @resume: Invoked on USB resume. May be called in_interrupt. * @reset: Invoked on USB bus reset. It is mandatory for all gadget drivers * and should be called in_interrupt. * @driver: Driver model state for this driver. * @udc_name: A name of UDC this driver should be bound to. If udc_name is NULL, * this driver will be bound to any available UDC. * @pending: UDC core private data used for deferred probe of this driver. * @match_existing_only: If udc is not found, return an error and don't add this * gadget driver to list of pending driver * * Devices are disabled till a gadget driver successfully bind()s, which * means the driver will handle setup() requests needed to enumerate (and * meet "chapter 9" requirements) then do some useful work. * * If gadget->is_otg is true, the gadget driver must provide an OTG * descriptor during enumeration, or else fail the bind() call. In such * cases, no USB traffic may flow until both bind() returns without * having called usb_gadget_disconnect(), and the USB host stack has * initialized. * * Drivers use hardware-specific knowledge to configure the usb hardware. * endpoint addressing is only one of several hardware characteristics that * are in descriptors the ep0 implementation returns from setup() calls. * * Except for ep0 implementation, most driver code shouldn't need change to * run on top of different usb controllers. It'll use endpoints set up by * that ep0 implementation. * * The usb controller driver handles a few standard usb requests. Those * include set_address, and feature flags for devices, interfaces, and * endpoints (the get_status, set_feature, and clear_feature requests). * * Accordingly, the driver's setup() callback must always implement all * get_descriptor requests, returning at least a device descriptor and * a configuration descriptor. Drivers must make sure the endpoint * descriptors match any hardware constraints. Some hardware also constrains * other descriptors. (The pxa250 allows only configurations 1, 2, or 3). * * The driver's setup() callback must also implement set_configuration, * and should also implement set_interface, get_configuration, and * get_interface. Setting a configuration (or interface) is where * endpoints should be activated or (config 0) shut down. * * (Note that only the default control endpoint is supported. Neither * hosts nor devices generally support control traffic except to ep0.) * * Most devices will ignore USB suspend/resume operations, and so will * not provide those callbacks. However, some may need to change modes * when the host is not longer directing those activities. For example, * local controls (buttons, dials, etc) may need to be re-enabled since * the (remote) host can't do that any longer; or an error state might * be cleared, to make the device behave identically whether or not * power is maintained. */ struct usb_gadget_driver { char *function; enum usb_device_speed max_speed; int (*bind)(struct usb_gadget *gadget, struct usb_gadget_driver *driver); void (*unbind)(struct usb_gadget *); int (*setup)(struct usb_gadget *, const struct usb_ctrlrequest *); void (*disconnect)(struct usb_gadget *); void (*suspend)(struct usb_gadget *); void (*resume)(struct usb_gadget *); void (*reset)(struct usb_gadget *); /* FIXME support safe rmmod */ struct device_driver driver; char *udc_name; struct list_head pending; unsigned match_existing_only:1; }; /*-------------------------------------------------------------------------*/ /* driver modules register and unregister, as usual. * these calls must be made in a context that can sleep. * * these will usually be implemented directly by the hardware-dependent * usb bus interface driver, which will only support a single driver. */ /** * usb_gadget_probe_driver - probe a gadget driver * @driver: the driver being registered * Context: can sleep * * Call this in your gadget driver's module initialization function, * to tell the underlying usb controller driver about your driver. * The @bind() function will be called to bind it to a gadget before this * registration call returns. It's expected that the @bind() function will * be in init sections. */ int usb_gadget_probe_driver(struct usb_gadget_driver *driver); /** * usb_gadget_unregister_driver - unregister a gadget driver * @driver:the driver being unregistered * Context: can sleep * * Call this in your gadget driver's module cleanup function, * to tell the underlying usb controller that your driver is * going away. If the controller is connected to a USB host, * it will first disconnect(). The driver is also requested * to unbind() and clean up any device state, before this procedure * finally returns. It's expected that the unbind() functions * will in in exit sections, so may not be linked in some kernels. */ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver); extern int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, void (*release)(struct device *dev)); extern int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget); extern void usb_del_gadget_udc(struct usb_gadget *gadget); extern char *usb_get_gadget_udc_name(void); /*-------------------------------------------------------------------------*/ /* utility to simplify dealing with string descriptors */ /** * struct usb_string - wraps a C string and its USB id * @id:the (nonzero) ID for this string * @s:the string, in UTF-8 encoding * * If you're using usb_gadget_get_string(), use this to wrap a string * together with its ID. */ struct usb_string { u8 id; const char *s; }; /** * struct usb_gadget_strings - a set of USB strings in a given language * @language:identifies the strings' language (0x0409 for en-us) * @strings:array of strings with their ids * * If you're using usb_gadget_get_string(), use this to wrap all the * strings for a given language. */ struct usb_gadget_strings { u16 language; /* 0x0409 for en-us */ struct usb_string *strings; }; struct usb_gadget_string_container { struct list_head list; u8 *stash[0]; }; /* put descriptor for string with that id into buf (buflen >= 256) */ int usb_gadget_get_string(const struct usb_gadget_strings *table, int id, u8 *buf); /*-------------------------------------------------------------------------*/ /* utility to simplify managing config descriptors */ /* write vector of descriptors into buffer */ int usb_descriptor_fillbuf(void *, unsigned, const struct usb_descriptor_header **); /* build config descriptor from single descriptor vector */ int usb_gadget_config_buf(const struct usb_config_descriptor *config, void *buf, unsigned buflen, const struct usb_descriptor_header **desc); /* copy a NULL-terminated vector of descriptors */ struct usb_descriptor_header **usb_copy_descriptors( struct usb_descriptor_header **); /** * usb_free_descriptors - free descriptors returned by usb_copy_descriptors() * @v: vector of descriptors */ static inline void usb_free_descriptors(struct usb_descriptor_header **v) { kfree(v); } struct usb_function; int usb_assign_descriptors(struct usb_function *f, struct usb_descriptor_header **fs, struct usb_descriptor_header **hs, struct usb_descriptor_header **ss, struct usb_descriptor_header **ssp); void usb_free_all_descriptors(struct usb_function *f); struct usb_descriptor_header *usb_otg_descriptor_alloc( struct usb_gadget *gadget); int usb_otg_descriptor_init(struct usb_gadget *gadget, struct usb_descriptor_header *otg_desc); /*-------------------------------------------------------------------------*/ /* utility to simplify map/unmap of usb_requests to/from DMA */ #ifdef CONFIG_HAS_DMA extern int usb_gadget_map_request_by_dev(struct device *dev, struct usb_request *req, int is_in); extern int usb_gadget_map_request(struct usb_gadget *gadget, struct usb_request *req, int is_in); extern void usb_gadget_unmap_request_by_dev(struct device *dev, struct usb_request *req, int is_in); extern void usb_gadget_unmap_request(struct usb_gadget *gadget, struct usb_request *req, int is_in); #else /* !CONFIG_HAS_DMA */ static inline int usb_gadget_map_request_by_dev(struct device *dev, struct usb_request *req, int is_in) { return -ENOSYS; } static inline int usb_gadget_map_request(struct usb_gadget *gadget, struct usb_request *req, int is_in) { return -ENOSYS; } static inline void usb_gadget_unmap_request_by_dev(struct device *dev, struct usb_request *req, int is_in) { } static inline void usb_gadget_unmap_request(struct usb_gadget *gadget, struct usb_request *req, int is_in) { } #endif /* !CONFIG_HAS_DMA */ /*-------------------------------------------------------------------------*/ /* utility to set gadget state properly */ extern void usb_gadget_set_state(struct usb_gadget *gadget, enum usb_device_state state); /*-------------------------------------------------------------------------*/ /* utility to tell udc core that the bus reset occurs */ extern void usb_gadget_udc_reset(struct usb_gadget *gadget, struct usb_gadget_driver *driver); /*-------------------------------------------------------------------------*/ /* utility to give requests back to the gadget layer */ extern void usb_gadget_giveback_request(struct usb_ep *ep, struct usb_request *req); /*-------------------------------------------------------------------------*/ /* utility to find endpoint by name */ extern struct usb_ep *gadget_find_ep_by_name(struct usb_gadget *g, const char *name); /*-------------------------------------------------------------------------*/ /* utility to check if endpoint caps match descriptor needs */ extern int usb_gadget_ep_match_desc(struct usb_gadget *gadget, struct usb_ep *ep, struct usb_endpoint_descriptor *desc, struct usb_ss_ep_comp_descriptor *ep_comp); /*-------------------------------------------------------------------------*/ /* utility to update vbus status for udc core, it may be scheduled */ extern void usb_udc_vbus_handler(struct usb_gadget *gadget, bool status); /*-------------------------------------------------------------------------*/ /* utility wrapping a simple endpoint selection policy */ extern struct usb_ep *usb_ep_autoconfig(struct usb_gadget *, struct usb_endpoint_descriptor *); extern struct usb_ep *usb_ep_autoconfig_ss(struct usb_gadget *, struct usb_endpoint_descriptor *, struct usb_ss_ep_comp_descriptor *); extern void usb_ep_autoconfig_release(struct usb_ep *); extern void usb_ep_autoconfig_reset(struct usb_gadget *); #endif /* __LINUX_USB_GADGET_H */ usb/musb-ux500.h 0000644 00000001603 14722070374 0007341 0 ustar 00 // SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2013 ST-Ericsson AB * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifndef __MUSB_UX500_H__ #define __MUSB_UX500_H__ enum ux500_musb_vbus_id_status { UX500_MUSB_NONE = 0, UX500_MUSB_VBUS, UX500_MUSB_ID, UX500_MUSB_CHARGER, UX500_MUSB_ENUMERATED, UX500_MUSB_RIDA, UX500_MUSB_RIDB, UX500_MUSB_RIDC, UX500_MUSB_PREPARE, UX500_MUSB_CLEAN, }; #endif /* __MUSB_UX500_H__ */ usb/xhci-dbgp.h 0000644 00000001471 14722070374 0007364 0 ustar 00 // SPDX-License-Identifier: GPL-2.0 /* * Standalone xHCI debug capability driver * * Copyright (C) 2016 Intel Corporation * * Author: Lu Baolu <baolu.lu@linux.intel.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef __LINUX_XHCI_DBGP_H #define __LINUX_XHCI_DBGP_H #ifdef CONFIG_EARLY_PRINTK_USB_XDBC int __init early_xdbc_parse_parameter(char *s); int __init early_xdbc_setup_hardware(void); void __init early_xdbc_register_console(void); #else static inline int __init early_xdbc_setup_hardware(void) { return -ENODEV; } static inline void __init early_xdbc_register_console(void) { } #endif /* CONFIG_EARLY_PRINTK_USB_XDBC */ #endif /* __LINUX_XHCI_DBGP_H */ usb/rndis_host.h 0000644 00000013700 14722070374 0007671 0 ustar 00 // SPDX-License-Identifier: GPL-2.0+ /* * Host Side support for RNDIS Networking Links * Copyright (C) 2005 by David Brownell * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef __LINUX_USB_RNDIS_HOST_H #define __LINUX_USB_RNDIS_HOST_H #include <linux/rndis.h> /* * CONTROL uses CDC "encapsulated commands" with funky notifications. * - control-out: SEND_ENCAPSULATED * - interrupt-in: RESPONSE_AVAILABLE * - control-in: GET_ENCAPSULATED * * We'll try to ignore the RESPONSE_AVAILABLE notifications. * * REVISIT some RNDIS implementations seem to have curious issues still * to be resolved. */ struct rndis_msg_hdr { __le32 msg_type; /* RNDIS_MSG_* */ __le32 msg_len; /* followed by data that varies between messages */ __le32 request_id; __le32 status; /* ... and more */ } __attribute__ ((packed)); /* MS-Windows uses this strange size, but RNDIS spec says 1024 minimum */ #define CONTROL_BUFFER_SIZE 1025 /* RNDIS defines an (absurdly huge) 10 second control timeout, * but ActiveSync seems to use a more usual 5 second timeout * (which matches the USB 2.0 spec). */ #define RNDIS_CONTROL_TIMEOUT_MS (5 * 1000) struct rndis_data_hdr { __le32 msg_type; /* RNDIS_MSG_PACKET */ __le32 msg_len; /* rndis_data_hdr + data_len + pad */ __le32 data_offset; /* 36 -- right after header */ __le32 data_len; /* ... real packet size */ __le32 oob_data_offset; /* zero */ __le32 oob_data_len; /* zero */ __le32 num_oob; /* zero */ __le32 packet_data_offset; /* zero */ __le32 packet_data_len; /* zero */ __le32 vc_handle; /* zero */ __le32 reserved; /* zero */ } __attribute__ ((packed)); struct rndis_init { /* OUT */ /* header and: */ __le32 msg_type; /* RNDIS_MSG_INIT */ __le32 msg_len; /* 24 */ __le32 request_id; __le32 major_version; /* of rndis (1.0) */ __le32 minor_version; __le32 max_transfer_size; } __attribute__ ((packed)); struct rndis_init_c { /* IN */ /* header and: */ __le32 msg_type; /* RNDIS_MSG_INIT_C */ __le32 msg_len; __le32 request_id; __le32 status; __le32 major_version; /* of rndis (1.0) */ __le32 minor_version; __le32 device_flags; __le32 medium; /* zero == 802.3 */ __le32 max_packets_per_message; __le32 max_transfer_size; __le32 packet_alignment; /* max 7; (1<<n) bytes */ __le32 af_list_offset; /* zero */ __le32 af_list_size; /* zero */ } __attribute__ ((packed)); struct rndis_halt { /* OUT (no reply) */ /* header and: */ __le32 msg_type; /* RNDIS_MSG_HALT */ __le32 msg_len; __le32 request_id; } __attribute__ ((packed)); struct rndis_query { /* OUT */ /* header and: */ __le32 msg_type; /* RNDIS_MSG_QUERY */ __le32 msg_len; __le32 request_id; __le32 oid; __le32 len; __le32 offset; /*?*/ __le32 handle; /* zero */ } __attribute__ ((packed)); struct rndis_query_c { /* IN */ /* header and: */ __le32 msg_type; /* RNDIS_MSG_QUERY_C */ __le32 msg_len; __le32 request_id; __le32 status; __le32 len; __le32 offset; } __attribute__ ((packed)); struct rndis_set { /* OUT */ /* header and: */ __le32 msg_type; /* RNDIS_MSG_SET */ __le32 msg_len; __le32 request_id; __le32 oid; __le32 len; __le32 offset; /*?*/ __le32 handle; /* zero */ } __attribute__ ((packed)); struct rndis_set_c { /* IN */ /* header and: */ __le32 msg_type; /* RNDIS_MSG_SET_C */ __le32 msg_len; __le32 request_id; __le32 status; } __attribute__ ((packed)); struct rndis_reset { /* IN */ /* header and: */ __le32 msg_type; /* RNDIS_MSG_RESET */ __le32 msg_len; __le32 reserved; } __attribute__ ((packed)); struct rndis_reset_c { /* OUT */ /* header and: */ __le32 msg_type; /* RNDIS_MSG_RESET_C */ __le32 msg_len; __le32 status; __le32 addressing_lost; } __attribute__ ((packed)); struct rndis_indicate { /* IN (unrequested) */ /* header and: */ __le32 msg_type; /* RNDIS_MSG_INDICATE */ __le32 msg_len; __le32 status; __le32 length; __le32 offset; /**/ __le32 diag_status; __le32 error_offset; /**/ __le32 message; } __attribute__ ((packed)); struct rndis_keepalive { /* OUT (optionally IN) */ /* header and: */ __le32 msg_type; /* RNDIS_MSG_KEEPALIVE */ __le32 msg_len; __le32 request_id; } __attribute__ ((packed)); struct rndis_keepalive_c { /* IN (optionally OUT) */ /* header and: */ __le32 msg_type; /* RNDIS_MSG_KEEPALIVE_C */ __le32 msg_len; __le32 request_id; __le32 status; } __attribute__ ((packed)); /* default filter used with RNDIS devices */ #define RNDIS_DEFAULT_FILTER ( \ RNDIS_PACKET_TYPE_DIRECTED | \ RNDIS_PACKET_TYPE_BROADCAST | \ RNDIS_PACKET_TYPE_ALL_MULTICAST | \ RNDIS_PACKET_TYPE_PROMISCUOUS) /* Flags to require specific physical medium type for generic_rndis_bind() */ #define FLAG_RNDIS_PHYM_NOT_WIRELESS 0x0001 #define FLAG_RNDIS_PHYM_WIRELESS 0x0002 /* Flags for driver_info::data */ #define RNDIS_DRIVER_DATA_POLL_STATUS 1 /* poll status before control */ extern void rndis_status(struct usbnet *dev, struct urb *urb); extern int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen); extern int generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags); extern void rndis_unbind(struct usbnet *dev, struct usb_interface *intf); extern int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb); extern struct sk_buff * rndis_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags); #endif /* __LINUX_USB_RNDIS_HOST_H */ usb/usb_phy_generic.h 0000644 00000001574 14722070374 0010670 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_USB_NOP_XCEIV_H #define __LINUX_USB_NOP_XCEIV_H #include <linux/usb/otg.h> #include <linux/gpio/consumer.h> struct usb_phy_generic_platform_data { enum usb_phy_type type; unsigned long clk_rate; /* if set fails with -EPROBE_DEFER if can't get regulator */ unsigned int needs_vcc:1; unsigned int needs_reset:1; /* deprecated */ int gpio_reset; struct gpio_desc *gpiod_vbus; }; #if IS_ENABLED(CONFIG_NOP_USB_XCEIV) /* sometimes transceivers are accessed only through e.g. ULPI */ extern struct platform_device *usb_phy_generic_register(void); extern void usb_phy_generic_unregister(struct platform_device *); #else static inline struct platform_device *usb_phy_generic_register(void) { return NULL; } static inline void usb_phy_generic_unregister(struct platform_device *pdev) { } #endif #endif /* __LINUX_USB_NOP_XCEIV_H */ usb/ehci_pdriver.h 0000644 00000004502 14722070374 0010160 0 ustar 00 // SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2012 Hauke Mehrtens <hauke@hauke-m.de> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef __USB_CORE_EHCI_PDRIVER_H #define __USB_CORE_EHCI_PDRIVER_H struct platform_device; struct usb_hcd; /** * struct usb_ehci_pdata - platform_data for generic ehci driver * * @caps_offset: offset of the EHCI Capability Registers to the start of * the io memory region provided to the driver. * @has_tt: set to 1 if TT is integrated in root hub. * @port_power_on: set to 1 if the controller needs a power up after * initialization. * @port_power_off: set to 1 if the controller needs to be powered down * after initialization. * @no_io_watchdog: set to 1 if the controller does not need the I/O * watchdog to run. * @reset_on_resume: set to 1 if the controller needs to be reset after * a suspend / resume cycle (but can't detect that itself). * * These are general configuration options for the EHCI controller. All of * these options are activating more or less workarounds for some hardware. */ struct usb_ehci_pdata { int caps_offset; unsigned has_tt:1; unsigned has_synopsys_hc_bug:1; unsigned big_endian_desc:1; unsigned big_endian_mmio:1; unsigned no_io_watchdog:1; unsigned reset_on_resume:1; unsigned dma_mask_64:1; /* Turn on all power and clocks */ int (*power_on)(struct platform_device *pdev); /* Turn off all power and clocks */ void (*power_off)(struct platform_device *pdev); /* Turn on only VBUS suspend power and hotplug detection, * turn off everything else */ void (*power_suspend)(struct platform_device *pdev); int (*pre_setup)(struct usb_hcd *hcd); }; #endif /* __USB_CORE_EHCI_PDRIVER_H */ usb/musb.h 0000644 00000007020 14722070374 0006461 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * This is used to for host and peripheral modes of the driver for * Inventra (Multidrop) Highspeed Dual-Role Controllers: (M)HDRC. * * Board initialization should put one of these into dev->platform_data, * probably on some platform_device named "musb-hdrc". It encapsulates * key configuration differences between boards. */ #ifndef __LINUX_USB_MUSB_H #define __LINUX_USB_MUSB_H /* The USB role is defined by the connector used on the board, so long as * standards are being followed. (Developer boards sometimes won't.) */ enum musb_mode { MUSB_UNDEFINED = 0, MUSB_HOST, /* A or Mini-A connector */ MUSB_PERIPHERAL, /* B or Mini-B connector */ MUSB_OTG /* Mini-AB connector */ }; struct clk; enum musb_fifo_style { FIFO_RXTX, FIFO_TX, FIFO_RX } __attribute__ ((packed)); enum musb_buf_mode { BUF_SINGLE, BUF_DOUBLE } __attribute__ ((packed)); struct musb_fifo_cfg { u8 hw_ep_num; enum musb_fifo_style style; enum musb_buf_mode mode; u16 maxpacket; }; #define MUSB_EP_FIFO(ep, st, m, pkt) \ { \ .hw_ep_num = ep, \ .style = st, \ .mode = m, \ .maxpacket = pkt, \ } #define MUSB_EP_FIFO_SINGLE(ep, st, pkt) \ MUSB_EP_FIFO(ep, st, BUF_SINGLE, pkt) #define MUSB_EP_FIFO_DOUBLE(ep, st, pkt) \ MUSB_EP_FIFO(ep, st, BUF_DOUBLE, pkt) struct musb_hdrc_eps_bits { const char name[16]; u8 bits; }; struct musb_hdrc_config { struct musb_fifo_cfg *fifo_cfg; /* board fifo configuration */ unsigned fifo_cfg_size; /* size of the fifo configuration */ /* MUSB configuration-specific details */ unsigned multipoint:1; /* multipoint device */ unsigned dyn_fifo:1 __deprecated; /* supports dynamic fifo sizing */ /* need to explicitly de-assert the port reset after resume? */ unsigned host_port_deassert_reset_at_resume:1; u8 num_eps; /* number of endpoints _with_ ep0 */ u8 ram_bits; /* ram address size */ u32 maximum_speed; }; struct musb_hdrc_platform_data { /* MUSB_HOST, MUSB_PERIPHERAL, or MUSB_OTG */ u8 mode; /* for clk_get() */ const char *clock; /* (HOST or OTG) switch VBUS on/off */ int (*set_vbus)(struct device *dev, int is_on); /* (HOST or OTG) mA/2 power supplied on (default = 8mA) */ u8 power; /* (PERIPHERAL) mA/2 max power consumed (default = 100mA) */ u8 min_power; /* (HOST or OTG) msec/2 after VBUS on till power good */ u8 potpgt; /* (HOST or OTG) program PHY for external Vbus */ unsigned extvbus:1; /* Power the device on or off */ int (*set_power)(int state); /* MUSB configuration-specific details */ const struct musb_hdrc_config *config; /* Architecture specific board data */ void *board_data; /* Platform specific struct musb_ops pointer */ const void *platform_ops; }; enum musb_vbus_id_status { MUSB_UNKNOWN = 0, MUSB_ID_GROUND, MUSB_ID_FLOAT, MUSB_VBUS_VALID, MUSB_VBUS_OFF, }; #if IS_ENABLED(CONFIG_USB_MUSB_HDRC) int musb_mailbox(enum musb_vbus_id_status status); #else static inline int musb_mailbox(enum musb_vbus_id_status status) { return 0; } #endif /* TUSB 6010 support */ #define TUSB6010_OSCCLK_60 16667 /* psec/clk @ 60.0 MHz */ #define TUSB6010_REFCLK_24 41667 /* psec/clk @ 24.0 MHz XI */ #define TUSB6010_REFCLK_19 52083 /* psec/clk @ 19.2 MHz CLKIN */ #ifdef CONFIG_ARCH_OMAP2 extern int __init tusb6010_setup_interface( struct musb_hdrc_platform_data *data, unsigned ps_refclk, unsigned waitpin, unsigned async_cs, unsigned sync_cs, unsigned irq, unsigned dmachan); extern int tusb6010_platform_retime(unsigned is_refclk); #endif /* OMAP2 */ #endif /* __LINUX_USB_MUSB_H */ usb/net2280.h 0000644 00000057070 14722070374 0006627 0 ustar 00 // SPDX-License-Identifier: GPL-2.0+ /* * NetChip 2280 high/full speed USB device controller. * Unlike many such controllers, this one talks PCI. * * Copyright (C) 2002 NetChip Technology, Inc. (http://www.netchip.com) * Copyright (C) 2003 David Brownell * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef __LINUX_USB_NET2280_H #define __LINUX_USB_NET2280_H /*-------------------------------------------------------------------------*/ /* NET2280 MEMORY MAPPED REGISTERS * * The register layout came from the chip documentation, and the bit * number definitions were extracted from chip specification. * * Use the shift operator ('<<') to build bit masks, with readl/writel * to access the registers through PCI. */ /* main registers, BAR0 + 0x0000 */ struct net2280_regs { /* offset 0x0000 */ u32 devinit; #define LOCAL_CLOCK_FREQUENCY 8 #define FORCE_PCI_RESET 7 #define PCI_ID 6 #define PCI_ENABLE 5 #define FIFO_SOFT_RESET 4 #define CFG_SOFT_RESET 3 #define PCI_SOFT_RESET 2 #define USB_SOFT_RESET 1 #define M8051_RESET 0 u32 eectl; #define EEPROM_ADDRESS_WIDTH 23 #define EEPROM_CHIP_SELECT_ACTIVE 22 #define EEPROM_PRESENT 21 #define EEPROM_VALID 20 #define EEPROM_BUSY 19 #define EEPROM_CHIP_SELECT_ENABLE 18 #define EEPROM_BYTE_READ_START 17 #define EEPROM_BYTE_WRITE_START 16 #define EEPROM_READ_DATA 8 #define EEPROM_WRITE_DATA 0 u32 eeclkfreq; u32 _unused0; /* offset 0x0010 */ u32 pciirqenb0; /* interrupt PCI master ... */ #define SETUP_PACKET_INTERRUPT_ENABLE 7 #define ENDPOINT_F_INTERRUPT_ENABLE 6 #define ENDPOINT_E_INTERRUPT_ENABLE 5 #define ENDPOINT_D_INTERRUPT_ENABLE 4 #define ENDPOINT_C_INTERRUPT_ENABLE 3 #define ENDPOINT_B_INTERRUPT_ENABLE 2 #define ENDPOINT_A_INTERRUPT_ENABLE 1 #define ENDPOINT_0_INTERRUPT_ENABLE 0 u32 pciirqenb1; #define PCI_INTERRUPT_ENABLE 31 #define POWER_STATE_CHANGE_INTERRUPT_ENABLE 27 #define PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE 26 #define PCI_PARITY_ERROR_INTERRUPT_ENABLE 25 #define PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE 20 #define PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE 19 #define PCI_TARGET_ABORT_ASSERTED_INTERRUPT_ENABLE 18 #define PCI_RETRY_ABORT_INTERRUPT_ENABLE 17 #define PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE 16 #define GPIO_INTERRUPT_ENABLE 13 #define DMA_D_INTERRUPT_ENABLE 12 #define DMA_C_INTERRUPT_ENABLE 11 #define DMA_B_INTERRUPT_ENABLE 10 #define DMA_A_INTERRUPT_ENABLE 9 #define EEPROM_DONE_INTERRUPT_ENABLE 8 #define VBUS_INTERRUPT_ENABLE 7 #define CONTROL_STATUS_INTERRUPT_ENABLE 6 #define ROOT_PORT_RESET_INTERRUPT_ENABLE 4 #define SUSPEND_REQUEST_INTERRUPT_ENABLE 3 #define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 2 #define RESUME_INTERRUPT_ENABLE 1 #define SOF_INTERRUPT_ENABLE 0 u32 cpu_irqenb0; /* ... or onboard 8051 */ #define SETUP_PACKET_INTERRUPT_ENABLE 7 #define ENDPOINT_F_INTERRUPT_ENABLE 6 #define ENDPOINT_E_INTERRUPT_ENABLE 5 #define ENDPOINT_D_INTERRUPT_ENABLE 4 #define ENDPOINT_C_INTERRUPT_ENABLE 3 #define ENDPOINT_B_INTERRUPT_ENABLE 2 #define ENDPOINT_A_INTERRUPT_ENABLE 1 #define ENDPOINT_0_INTERRUPT_ENABLE 0 u32 cpu_irqenb1; #define CPU_INTERRUPT_ENABLE 31 #define POWER_STATE_CHANGE_INTERRUPT_ENABLE 27 #define PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE 26 #define PCI_PARITY_ERROR_INTERRUPT_ENABLE 25 #define PCI_INTA_INTERRUPT_ENABLE 24 #define PCI_PME_INTERRUPT_ENABLE 23 #define PCI_SERR_INTERRUPT_ENABLE 22 #define PCI_PERR_INTERRUPT_ENABLE 21 #define PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE 20 #define PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE 19 #define PCI_RETRY_ABORT_INTERRUPT_ENABLE 17 #define PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE 16 #define GPIO_INTERRUPT_ENABLE 13 #define DMA_D_INTERRUPT_ENABLE 12 #define DMA_C_INTERRUPT_ENABLE 11 #define DMA_B_INTERRUPT_ENABLE 10 #define DMA_A_INTERRUPT_ENABLE 9 #define EEPROM_DONE_INTERRUPT_ENABLE 8 #define VBUS_INTERRUPT_ENABLE 7 #define CONTROL_STATUS_INTERRUPT_ENABLE 6 #define ROOT_PORT_RESET_INTERRUPT_ENABLE 4 #define SUSPEND_REQUEST_INTERRUPT_ENABLE 3 #define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 2 #define RESUME_INTERRUPT_ENABLE 1 #define SOF_INTERRUPT_ENABLE 0 /* offset 0x0020 */ u32 _unused1; u32 usbirqenb1; #define USB_INTERRUPT_ENABLE 31 #define POWER_STATE_CHANGE_INTERRUPT_ENABLE 27 #define PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE 26 #define PCI_PARITY_ERROR_INTERRUPT_ENABLE 25 #define PCI_INTA_INTERRUPT_ENABLE 24 #define PCI_PME_INTERRUPT_ENABLE 23 #define PCI_SERR_INTERRUPT_ENABLE 22 #define PCI_PERR_INTERRUPT_ENABLE 21 #define PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE 20 #define PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE 19 #define PCI_RETRY_ABORT_INTERRUPT_ENABLE 17 #define PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE 16 #define GPIO_INTERRUPT_ENABLE 13 #define DMA_D_INTERRUPT_ENABLE 12 #define DMA_C_INTERRUPT_ENABLE 11 #define DMA_B_INTERRUPT_ENABLE 10 #define DMA_A_INTERRUPT_ENABLE 9 #define EEPROM_DONE_INTERRUPT_ENABLE 8 #define VBUS_INTERRUPT_ENABLE 7 #define CONTROL_STATUS_INTERRUPT_ENABLE 6 #define ROOT_PORT_RESET_INTERRUPT_ENABLE 4 #define SUSPEND_REQUEST_INTERRUPT_ENABLE 3 #define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 2 #define RESUME_INTERRUPT_ENABLE 1 #define SOF_INTERRUPT_ENABLE 0 u32 irqstat0; #define INTA_ASSERTED 12 #define SETUP_PACKET_INTERRUPT 7 #define ENDPOINT_F_INTERRUPT 6 #define ENDPOINT_E_INTERRUPT 5 #define ENDPOINT_D_INTERRUPT 4 #define ENDPOINT_C_INTERRUPT 3 #define ENDPOINT_B_INTERRUPT 2 #define ENDPOINT_A_INTERRUPT 1 #define ENDPOINT_0_INTERRUPT 0 #define USB3380_IRQSTAT0_EP_INTR_MASK_IN (0xF << 17) #define USB3380_IRQSTAT0_EP_INTR_MASK_OUT (0xF << 1) u32 irqstat1; #define POWER_STATE_CHANGE_INTERRUPT 27 #define PCI_ARBITER_TIMEOUT_INTERRUPT 26 #define PCI_PARITY_ERROR_INTERRUPT 25 #define PCI_INTA_INTERRUPT 24 #define PCI_PME_INTERRUPT 23 #define PCI_SERR_INTERRUPT 22 #define PCI_PERR_INTERRUPT 21 #define PCI_MASTER_ABORT_RECEIVED_INTERRUPT 20 #define PCI_TARGET_ABORT_RECEIVED_INTERRUPT 19 #define PCI_RETRY_ABORT_INTERRUPT 17 #define PCI_MASTER_CYCLE_DONE_INTERRUPT 16 #define SOF_DOWN_INTERRUPT 14 #define GPIO_INTERRUPT 13 #define DMA_D_INTERRUPT 12 #define DMA_C_INTERRUPT 11 #define DMA_B_INTERRUPT 10 #define DMA_A_INTERRUPT 9 #define EEPROM_DONE_INTERRUPT 8 #define VBUS_INTERRUPT 7 #define CONTROL_STATUS_INTERRUPT 6 #define ROOT_PORT_RESET_INTERRUPT 4 #define SUSPEND_REQUEST_INTERRUPT 3 #define SUSPEND_REQUEST_CHANGE_INTERRUPT 2 #define RESUME_INTERRUPT 1 #define SOF_INTERRUPT 0 /* offset 0x0030 */ u32 idxaddr; u32 idxdata; u32 fifoctl; #define PCI_BASE2_RANGE 16 #define IGNORE_FIFO_AVAILABILITY 3 #define PCI_BASE2_SELECT 2 #define FIFO_CONFIGURATION_SELECT 0 u32 _unused2; /* offset 0x0040 */ u32 memaddr; #define START 28 #define DIRECTION 27 #define FIFO_DIAGNOSTIC_SELECT 24 #define MEMORY_ADDRESS 0 u32 memdata0; u32 memdata1; u32 _unused3; /* offset 0x0050 */ u32 gpioctl; #define GPIO3_LED_SELECT 12 #define GPIO3_INTERRUPT_ENABLE 11 #define GPIO2_INTERRUPT_ENABLE 10 #define GPIO1_INTERRUPT_ENABLE 9 #define GPIO0_INTERRUPT_ENABLE 8 #define GPIO3_OUTPUT_ENABLE 7 #define GPIO2_OUTPUT_ENABLE 6 #define GPIO1_OUTPUT_ENABLE 5 #define GPIO0_OUTPUT_ENABLE 4 #define GPIO3_DATA 3 #define GPIO2_DATA 2 #define GPIO1_DATA 1 #define GPIO0_DATA 0 u32 gpiostat; #define GPIO3_INTERRUPT 3 #define GPIO2_INTERRUPT 2 #define GPIO1_INTERRUPT 1 #define GPIO0_INTERRUPT 0 } __attribute__ ((packed)); /* usb control, BAR0 + 0x0080 */ struct net2280_usb_regs { /* offset 0x0080 */ u32 stdrsp; #define STALL_UNSUPPORTED_REQUESTS 31 #define SET_TEST_MODE 16 #define GET_OTHER_SPEED_CONFIGURATION 15 #define GET_DEVICE_QUALIFIER 14 #define SET_ADDRESS 13 #define ENDPOINT_SET_CLEAR_HALT 12 #define DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP 11 #define GET_STRING_DESCRIPTOR_2 10 #define GET_STRING_DESCRIPTOR_1 9 #define GET_STRING_DESCRIPTOR_0 8 #define GET_SET_INTERFACE 6 #define GET_SET_CONFIGURATION 5 #define GET_CONFIGURATION_DESCRIPTOR 4 #define GET_DEVICE_DESCRIPTOR 3 #define GET_ENDPOINT_STATUS 2 #define GET_INTERFACE_STATUS 1 #define GET_DEVICE_STATUS 0 u32 prodvendid; #define PRODUCT_ID 16 #define VENDOR_ID 0 u32 relnum; u32 usbctl; #define SERIAL_NUMBER_INDEX 16 #define PRODUCT_ID_STRING_ENABLE 13 #define VENDOR_ID_STRING_ENABLE 12 #define USB_ROOT_PORT_WAKEUP_ENABLE 11 #define VBUS_PIN 10 #define TIMED_DISCONNECT 9 #define SUSPEND_IMMEDIATELY 7 #define SELF_POWERED_USB_DEVICE 6 #define REMOTE_WAKEUP_SUPPORT 5 #define PME_POLARITY 4 #define USB_DETECT_ENABLE 3 #define PME_WAKEUP_ENABLE 2 #define DEVICE_REMOTE_WAKEUP_ENABLE 1 #define SELF_POWERED_STATUS 0 /* offset 0x0090 */ u32 usbstat; #define HIGH_SPEED 7 #define FULL_SPEED 6 #define GENERATE_RESUME 5 #define GENERATE_DEVICE_REMOTE_WAKEUP 4 u32 xcvrdiag; #define FORCE_HIGH_SPEED_MODE 31 #define FORCE_FULL_SPEED_MODE 30 #define USB_TEST_MODE 24 #define LINE_STATE 16 #define TRANSCEIVER_OPERATION_MODE 2 #define TRANSCEIVER_SELECT 1 #define TERMINATION_SELECT 0 u32 setup0123; u32 setup4567; /* offset 0x0090 */ u32 _unused0; u32 ouraddr; #define FORCE_IMMEDIATE 7 #define OUR_USB_ADDRESS 0 u32 ourconfig; } __attribute__ ((packed)); /* pci control, BAR0 + 0x0100 */ struct net2280_pci_regs { /* offset 0x0100 */ u32 pcimstctl; #define PCI_ARBITER_PARK_SELECT 13 #define PCI_MULTI LEVEL_ARBITER 12 #define PCI_RETRY_ABORT_ENABLE 11 #define DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE 10 #define DMA_READ_MULTIPLE_ENABLE 9 #define DMA_READ_LINE_ENABLE 8 #define PCI_MASTER_COMMAND_SELECT 6 #define MEM_READ_OR_WRITE 0 #define IO_READ_OR_WRITE 1 #define CFG_READ_OR_WRITE 2 #define PCI_MASTER_START 5 #define PCI_MASTER_READ_WRITE 4 #define PCI_MASTER_WRITE 0 #define PCI_MASTER_READ 1 #define PCI_MASTER_BYTE_WRITE_ENABLES 0 u32 pcimstaddr; u32 pcimstdata; u32 pcimststat; #define PCI_ARBITER_CLEAR 2 #define PCI_EXTERNAL_ARBITER 1 #define PCI_HOST_MODE 0 } __attribute__ ((packed)); /* dma control, BAR0 + 0x0180 ... array of four structs like this, * for channels 0..3. see also struct net2280_dma: descriptor * that can be loaded into some of these registers. */ struct net2280_dma_regs { /* [11.7] */ /* offset 0x0180, 0x01a0, 0x01c0, 0x01e0, */ u32 dmactl; #define DMA_SCATTER_GATHER_DONE_INTERRUPT_ENABLE 25 #define DMA_CLEAR_COUNT_ENABLE 21 #define DESCRIPTOR_POLLING_RATE 19 #define POLL_CONTINUOUS 0 #define POLL_1_USEC 1 #define POLL_100_USEC 2 #define POLL_1_MSEC 3 #define DMA_VALID_BIT_POLLING_ENABLE 18 #define DMA_VALID_BIT_ENABLE 17 #define DMA_SCATTER_GATHER_ENABLE 16 #define DMA_OUT_AUTO_START_ENABLE 4 #define DMA_PREEMPT_ENABLE 3 #define DMA_FIFO_VALIDATE 2 #define DMA_ENABLE 1 #define DMA_ADDRESS_HOLD 0 u32 dmastat; #define DMA_ABORT_DONE_INTERRUPT 27 #define DMA_SCATTER_GATHER_DONE_INTERRUPT 25 #define DMA_TRANSACTION_DONE_INTERRUPT 24 #define DMA_ABORT 1 #define DMA_START 0 u32 _unused0[2]; /* offset 0x0190, 0x01b0, 0x01d0, 0x01f0, */ u32 dmacount; #define VALID_BIT 31 #define DMA_DIRECTION 30 #define DMA_DONE_INTERRUPT_ENABLE 29 #define END_OF_CHAIN 28 #define DMA_BYTE_COUNT_MASK ((1<<24)-1) #define DMA_BYTE_COUNT 0 u32 dmaaddr; u32 dmadesc; u32 _unused1; } __attribute__ ((packed)); /* dedicated endpoint registers, BAR0 + 0x0200 */ struct net2280_dep_regs { /* [11.8] */ /* offset 0x0200, 0x0210, 0x220, 0x230, 0x240 */ u32 dep_cfg; /* offset 0x0204, 0x0214, 0x224, 0x234, 0x244 */ u32 dep_rsp; u32 _unused[2]; } __attribute__ ((packed)); /* configurable endpoint registers, BAR0 + 0x0300 ... array of seven structs * like this, for ep0 then the configurable endpoints A..F * ep0 reserved for control; E and F have only 64 bytes of fifo */ struct net2280_ep_regs { /* [11.9] */ /* offset 0x0300, 0x0320, 0x0340, 0x0360, 0x0380, 0x03a0, 0x03c0 */ u32 ep_cfg; #define ENDPOINT_BYTE_COUNT 16 #define ENDPOINT_ENABLE 10 #define ENDPOINT_TYPE 8 #define ENDPOINT_DIRECTION 7 #define ENDPOINT_NUMBER 0 u32 ep_rsp; #define SET_NAK_OUT_PACKETS 15 #define SET_EP_HIDE_STATUS_PHASE 14 #define SET_EP_FORCE_CRC_ERROR 13 #define SET_INTERRUPT_MODE 12 #define SET_CONTROL_STATUS_PHASE_HANDSHAKE 11 #define SET_NAK_OUT_PACKETS_MODE 10 #define SET_ENDPOINT_TOGGLE 9 #define SET_ENDPOINT_HALT 8 #define CLEAR_NAK_OUT_PACKETS 7 #define CLEAR_EP_HIDE_STATUS_PHASE 6 #define CLEAR_EP_FORCE_CRC_ERROR 5 #define CLEAR_INTERRUPT_MODE 4 #define CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE 3 #define CLEAR_NAK_OUT_PACKETS_MODE 2 #define CLEAR_ENDPOINT_TOGGLE 1 #define CLEAR_ENDPOINT_HALT 0 u32 ep_irqenb; #define SHORT_PACKET_OUT_DONE_INTERRUPT_ENABLE 6 #define SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE 5 #define DATA_PACKET_RECEIVED_INTERRUPT_ENABLE 3 #define DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE 2 #define DATA_OUT_PING_TOKEN_INTERRUPT_ENABLE 1 #define DATA_IN_TOKEN_INTERRUPT_ENABLE 0 u32 ep_stat; #define FIFO_VALID_COUNT 24 #define HIGH_BANDWIDTH_OUT_TRANSACTION_PID 22 #define TIMEOUT 21 #define USB_STALL_SENT 20 #define USB_IN_NAK_SENT 19 #define USB_IN_ACK_RCVD 18 #define USB_OUT_PING_NAK_SENT 17 #define USB_OUT_ACK_SENT 16 #define FIFO_OVERFLOW 13 #define FIFO_UNDERFLOW 12 #define FIFO_FULL 11 #define FIFO_EMPTY 10 #define FIFO_FLUSH 9 #define SHORT_PACKET_OUT_DONE_INTERRUPT 6 #define SHORT_PACKET_TRANSFERRED_INTERRUPT 5 #define NAK_OUT_PACKETS 4 #define DATA_PACKET_RECEIVED_INTERRUPT 3 #define DATA_PACKET_TRANSMITTED_INTERRUPT 2 #define DATA_OUT_PING_TOKEN_INTERRUPT 1 #define DATA_IN_TOKEN_INTERRUPT 0 /* offset 0x0310, 0x0330, 0x0350, 0x0370, 0x0390, 0x03b0, 0x03d0 */ u32 ep_avail; u32 ep_data; u32 _unused0[2]; } __attribute__ ((packed)); #endif /* __LINUX_USB_NET2280_H */ usb/role.h 0000644 00000005430 14722070374 0006457 0 ustar 00 // SPDX-License-Identifier: GPL-2.0 #ifndef __LINUX_USB_ROLE_H #define __LINUX_USB_ROLE_H #include <linux/device.h> struct usb_role_switch; enum usb_role { USB_ROLE_NONE, USB_ROLE_HOST, USB_ROLE_DEVICE, }; typedef int (*usb_role_switch_set_t)(struct device *dev, enum usb_role role); typedef enum usb_role (*usb_role_switch_get_t)(struct device *dev); /** * struct usb_role_switch_desc - USB Role Switch Descriptor * @fwnode: The device node to be associated with the role switch * @usb2_port: Optional reference to the host controller port device (USB2) * @usb3_port: Optional reference to the host controller port device (USB3) * @udc: Optional reference to the peripheral controller device * @set: Callback for setting the role * @get: Callback for getting the role (optional) * @allow_userspace_control: If true userspace may change the role through sysfs * * @usb2_port and @usb3_port will point to the USB host port and @udc to the USB * device controller behind the USB connector with the role switch. If * @usb2_port, @usb3_port and @udc are included in the description, the * reference count for them should be incremented by the caller of * usb_role_switch_register() before registering the switch. */ struct usb_role_switch_desc { struct fwnode_handle *fwnode; struct device *usb2_port; struct device *usb3_port; struct device *udc; usb_role_switch_set_t set; usb_role_switch_get_t get; bool allow_userspace_control; }; #if IS_ENABLED(CONFIG_USB_ROLE_SWITCH) int usb_role_switch_set_role(struct usb_role_switch *sw, enum usb_role role); enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw); struct usb_role_switch *usb_role_switch_get(struct device *dev); struct usb_role_switch *fwnode_usb_role_switch_get(struct fwnode_handle *node); void usb_role_switch_put(struct usb_role_switch *sw); struct usb_role_switch * usb_role_switch_register(struct device *parent, const struct usb_role_switch_desc *desc); void usb_role_switch_unregister(struct usb_role_switch *sw); #else static inline int usb_role_switch_set_role(struct usb_role_switch *sw, enum usb_role role) { return 0; } static inline enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw) { return USB_ROLE_NONE; } static inline struct usb_role_switch *usb_role_switch_get(struct device *dev) { return ERR_PTR(-ENODEV); } static inline struct usb_role_switch * fwnode_usb_role_switch_get(struct fwnode_handle *node) { return ERR_PTR(-ENODEV); } static inline void usb_role_switch_put(struct usb_role_switch *sw) { } static inline struct usb_role_switch * usb_role_switch_register(struct device *parent, const struct usb_role_switch_desc *desc) { return ERR_PTR(-ENODEV); } static inline void usb_role_switch_unregister(struct usb_role_switch *sw) { } #endif #endif /* __LINUX_USB_ROLE_H */ usb/ehci-dbgp.h 0000644 00000004063 14722070374 0007341 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Standalone EHCI usb debug driver * * Originally written by: * Eric W. Biederman" <ebiederm@xmission.com> and * Yinghai Lu <yhlu.kernel@gmail.com> * * Changes for early/late printk and HW errata: * Jason Wessel <jason.wessel@windriver.com> * Copyright (C) 2009 Wind River Systems, Inc. * */ #ifndef __LINUX_USB_EHCI_DBGP_H #define __LINUX_USB_EHCI_DBGP_H #include <linux/console.h> #include <linux/types.h> /* Appendix C, Debug port ... intended for use with special "debug devices" * that can help if there's no serial console. (nonstandard enumeration.) */ struct ehci_dbg_port { u32 control; #define DBGP_OWNER (1<<30) #define DBGP_ENABLED (1<<28) #define DBGP_DONE (1<<16) #define DBGP_INUSE (1<<10) #define DBGP_ERRCODE(x) (((x)>>7)&0x07) # define DBGP_ERR_BAD 1 # define DBGP_ERR_SIGNAL 2 #define DBGP_ERROR (1<<6) #define DBGP_GO (1<<5) #define DBGP_OUT (1<<4) #define DBGP_LEN(x) (((x)>>0)&0x0f) u32 pids; #define DBGP_PID_GET(x) (((x)>>16)&0xff) #define DBGP_PID_SET(data, tok) (((data)<<8)|(tok)) u32 data03; u32 data47; u32 address; #define DBGP_EPADDR(dev, ep) (((dev)<<8)|(ep)) }; #ifdef CONFIG_EARLY_PRINTK_DBGP extern int early_dbgp_init(char *s); extern struct console early_dbgp_console; #endif /* CONFIG_EARLY_PRINTK_DBGP */ struct usb_hcd; #ifdef CONFIG_XEN_DOM0 extern int xen_dbgp_reset_prep(struct usb_hcd *); extern int xen_dbgp_external_startup(struct usb_hcd *); #else static inline int xen_dbgp_reset_prep(struct usb_hcd *hcd) { return 1; /* Shouldn't this be 0? */ } static inline int xen_dbgp_external_startup(struct usb_hcd *hcd) { return -1; } #endif #ifdef CONFIG_EARLY_PRINTK_DBGP /* Call backs from ehci host driver to ehci debug driver */ extern int dbgp_external_startup(struct usb_hcd *); extern int dbgp_reset_prep(struct usb_hcd *); #else static inline int dbgp_reset_prep(struct usb_hcd *hcd) { return xen_dbgp_reset_prep(hcd); } static inline int dbgp_external_startup(struct usb_hcd *hcd) { return xen_dbgp_external_startup(hcd); } #endif #endif /* __LINUX_USB_EHCI_DBGP_H */ usb/ch9.h 0000644 00000007673 14722070374 0006214 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * This file holds USB constants and structures that are needed for * USB device APIs. These are used by the USB device model, which is * defined in chapter 9 of the USB 2.0 specification and in the * Wireless USB 1.0 (spread around). Linux has several APIs in C that * need these: * * - the master/host side Linux-USB kernel driver API; * - the "usbfs" user space API; and * - the Linux "gadget" slave/device/peripheral side driver API. * * USB 2.0 adds an additional "On The Go" (OTG) mode, which lets systems * act either as a USB master/host or as a USB slave/device. That means * the master and slave side APIs benefit from working well together. * * There's also "Wireless USB", using low power short range radios for * peripheral interconnection but otherwise building on the USB framework. * * Note all descriptors are declared '__attribute__((packed))' so that: * * [a] they never get padded, either internally (USB spec writers * probably handled that) or externally; * * [b] so that accessing bigger-than-a-bytes fields will never * generate bus errors on any platform, even when the location of * its descriptor inside a bundle isn't "naturally aligned", and * * [c] for consistency, removing all doubt even when it appears to * someone that the two other points are non-issues for that * particular descriptor type. */ #ifndef __LINUX_USB_CH9_H #define __LINUX_USB_CH9_H #include <linux/device.h> #include <uapi/linux/usb/ch9.h> /** * usb_ep_type_string() - Returns human readable-name of the endpoint type. * @ep_type: The endpoint type to return human-readable name for. If it's not * any of the types: USB_ENDPOINT_XFER_{CONTROL, ISOC, BULK, INT}, * usually got by usb_endpoint_type(), the string 'unknown' will be returned. */ extern const char *usb_ep_type_string(int ep_type); /** * usb_speed_string() - Returns human readable-name of the speed. * @speed: The speed to return human-readable name for. If it's not * any of the speeds defined in usb_device_speed enum, string for * USB_SPEED_UNKNOWN will be returned. */ extern const char *usb_speed_string(enum usb_device_speed speed); /** * usb_get_maximum_speed - Get maximum requested speed for a given USB * controller. * @dev: Pointer to the given USB controller device * * The function gets the maximum speed string from property "maximum-speed", * and returns the corresponding enum usb_device_speed. */ extern enum usb_device_speed usb_get_maximum_speed(struct device *dev); /** * usb_state_string - Returns human readable name for the state. * @state: The state to return a human-readable name for. If it's not * any of the states devices in usb_device_state_string enum, * the string UNKNOWN will be returned. */ extern const char *usb_state_string(enum usb_device_state state); #ifdef CONFIG_TRACING /** * usb_decode_ctrl - Returns human readable representation of control request. * @str: buffer to return a human-readable representation of control request. * This buffer should have about 200 bytes. * @size: size of str buffer. * @bRequestType: matches the USB bmRequestType field * @bRequest: matches the USB bRequest field * @wValue: matches the USB wValue field (CPU byte order) * @wIndex: matches the USB wIndex field (CPU byte order) * @wLength: matches the USB wLength field (CPU byte order) * * Function returns decoded, formatted and human-readable description of * control request packet. * * The usage scenario for this is for tracepoints, so function as a return * use the same value as in parameters. This approach allows to use this * function in TP_printk * * Important: wValue, wIndex, wLength parameters before invoking this function * should be processed by le16_to_cpu macro. */ extern const char *usb_decode_ctrl(char *str, size_t size, __u8 bRequestType, __u8 bRequest, __u16 wValue, __u16 wIndex, __u16 wLength); #endif #endif /* __LINUX_USB_CH9_H */ usb/typec_mux.h 0000644 00000002602 14722070374 0007531 0 ustar 00 // SPDX-License-Identifier: GPL-2.0 #ifndef __USB_TYPEC_MUX #define __USB_TYPEC_MUX #include <linux/usb/typec.h> struct device; struct typec_mux; struct typec_switch; struct fwnode_handle; typedef int (*typec_switch_set_fn_t)(struct typec_switch *sw, enum typec_orientation orientation); struct typec_switch_desc { struct fwnode_handle *fwnode; typec_switch_set_fn_t set; void *drvdata; }; struct typec_switch *typec_switch_get(struct device *dev); void typec_switch_put(struct typec_switch *sw); struct typec_switch * typec_switch_register(struct device *parent, const struct typec_switch_desc *desc); void typec_switch_unregister(struct typec_switch *sw); void typec_switch_set_drvdata(struct typec_switch *sw, void *data); void *typec_switch_get_drvdata(struct typec_switch *sw); typedef int (*typec_mux_set_fn_t)(struct typec_mux *mux, int state); struct typec_mux_desc { struct fwnode_handle *fwnode; typec_mux_set_fn_t set; void *drvdata; }; struct typec_mux * typec_mux_get(struct device *dev, const struct typec_altmode_desc *desc); void typec_mux_put(struct typec_mux *mux); struct typec_mux * typec_mux_register(struct device *parent, const struct typec_mux_desc *desc); void typec_mux_unregister(struct typec_mux *mux); void typec_mux_set_drvdata(struct typec_mux *mux, void *data); void *typec_mux_get_drvdata(struct typec_mux *mux); #endif /* __USB_TYPEC_MUX */ usb/gpio_vbus.h 0000644 00000002201 14722070374 0007504 0 ustar 00 // SPDX-License-Identifier: GPL-2.0 /* * A simple GPIO VBUS sensing driver for B peripheral only devices * with internal transceivers. * Optionally D+ pullup can be controlled by a second GPIO. * * Copyright (c) 2008 Philipp Zabel <philipp.zabel@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ /** * struct gpio_vbus_mach_info - configuration for gpio_vbus * @gpio_vbus: VBUS sensing GPIO * @gpio_pullup: optional D+ or D- pullup GPIO (else negative/invalid) * @gpio_vbus_inverted: true if gpio_vbus is active low * @gpio_pullup_inverted: true if gpio_pullup is active low * @wakeup: configure gpio_vbus as a wake-up source * * The VBUS sensing GPIO should have a pulldown, which will normally be * part of a resistor ladder turning a 4.0V-5.25V level on VBUS into a * value the GPIO detects as active. Some systems will use comparators. */ struct gpio_vbus_mach_info { int gpio_vbus; int gpio_pullup; bool gpio_vbus_inverted; bool gpio_pullup_inverted; bool wakeup; }; usb/chipidea.h 0000644 00000006070 14722070374 0007265 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Platform data for the chipidea USB dual role controller */ #ifndef __LINUX_USB_CHIPIDEA_H #define __LINUX_USB_CHIPIDEA_H #include <linux/extcon.h> #include <linux/usb/otg.h> struct ci_hdrc; /** * struct ci_hdrc_cable - structure for external connector cable state tracking * @connected: true if cable is connected, false otherwise * @changed: set to true when extcon event happen * @enabled: set to true if we've enabled the vbus or id interrupt * @edev: device which generate events * @ci: driver state of the chipidea device * @nb: hold event notification callback * @conn: used for notification registration */ struct ci_hdrc_cable { bool connected; bool changed; bool enabled; struct extcon_dev *edev; struct ci_hdrc *ci; struct notifier_block nb; }; struct ci_hdrc_platform_data { const char *name; /* offset of the capability registers */ uintptr_t capoffset; unsigned power_budget; struct phy *phy; /* old usb_phy interface */ struct usb_phy *usb_phy; enum usb_phy_interface phy_mode; unsigned long flags; #define CI_HDRC_REGS_SHARED BIT(0) #define CI_HDRC_DISABLE_DEVICE_STREAMING BIT(1) #define CI_HDRC_SUPPORTS_RUNTIME_PM BIT(2) #define CI_HDRC_DISABLE_HOST_STREAMING BIT(3) #define CI_HDRC_DISABLE_STREAMING (CI_HDRC_DISABLE_DEVICE_STREAMING | \ CI_HDRC_DISABLE_HOST_STREAMING) /* * Only set it when DCCPARAMS.DC==1 and DCCPARAMS.HC==1, * but otg is not supported (no register otgsc). */ #define CI_HDRC_DUAL_ROLE_NOT_OTG BIT(4) #define CI_HDRC_IMX28_WRITE_FIX BIT(5) #define CI_HDRC_FORCE_FULLSPEED BIT(6) #define CI_HDRC_TURN_VBUS_EARLY_ON BIT(7) #define CI_HDRC_SET_NON_ZERO_TTHA BIT(8) #define CI_HDRC_OVERRIDE_AHB_BURST BIT(9) #define CI_HDRC_OVERRIDE_TX_BURST BIT(10) #define CI_HDRC_OVERRIDE_RX_BURST BIT(11) #define CI_HDRC_OVERRIDE_PHY_CONTROL BIT(12) /* Glue layer manages phy */ #define CI_HDRC_REQUIRES_ALIGNED_DMA BIT(13) #define CI_HDRC_IMX_IS_HSIC BIT(14) #define CI_HDRC_PMQOS BIT(15) enum usb_dr_mode dr_mode; #define CI_HDRC_CONTROLLER_RESET_EVENT 0 #define CI_HDRC_CONTROLLER_STOPPED_EVENT 1 #define CI_HDRC_IMX_HSIC_ACTIVE_EVENT 2 #define CI_HDRC_IMX_HSIC_SUSPEND_EVENT 3 int (*notify_event) (struct ci_hdrc *ci, unsigned event); struct regulator *reg_vbus; struct usb_otg_caps ci_otg_caps; bool tpl_support; /* interrupt threshold setting */ u32 itc_setting; u32 ahb_burst_config; u32 tx_burst_size; u32 rx_burst_size; /* VBUS and ID signal state tracking, using extcon framework */ struct ci_hdrc_cable vbus_extcon; struct ci_hdrc_cable id_extcon; u32 phy_clkgate_delay_us; /* pins */ struct pinctrl *pctl; struct pinctrl_state *pins_default; struct pinctrl_state *pins_host; struct pinctrl_state *pins_device; }; /* Default offset of capability registers */ #define DEF_CAPOFFSET 0x100 /* Add ci hdrc device */ struct platform_device *ci_hdrc_add_device(struct device *dev, struct resource *res, int nres, struct ci_hdrc_platform_data *platdata); /* Remove ci hdrc device */ void ci_hdrc_remove_device(struct platform_device *pdev); #endif usb/typec_dp.h 0000644 00000007613 14722070374 0007332 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __USB_TYPEC_DP_H #define __USB_TYPEC_DP_H #include <linux/usb/typec_altmode.h> #define USB_TYPEC_DP_SID 0xff01 /* USB IF has not assigned a Standard ID (SID) for VirtualLink, * so the manufacturers of VirtualLink adapters use their Vendor * IDs as the SVID. */ #define USB_TYPEC_NVIDIA_VLINK_SID 0x955 /* NVIDIA VirtualLink */ #define USB_TYPEC_DP_MODE 1 /* * Connector states matching the pin assignments in DisplayPort Alt Mode * Specification. * * These values are meant primarily to be used by the mux drivers, but they are * also used as the "value" part in the alternate mode notification chain, so * receivers of those notifications will always see them. * * Note. DisplayPort USB Type-C Alt Mode Specification version 1.0b deprecated * pin assignments A, B and F, but they are still defined here for legacy * purposes. */ enum { TYPEC_DP_STATE_A = TYPEC_STATE_MODAL, /* Not supported after v1.0b */ TYPEC_DP_STATE_B, /* Not supported after v1.0b */ TYPEC_DP_STATE_C, TYPEC_DP_STATE_D, TYPEC_DP_STATE_E, TYPEC_DP_STATE_F, /* Not supported after v1.0b */ }; /* * struct typec_displayport_data - DisplayPort Alt Mode specific data * @status: Status Update command VDO content * @conf: Configure command VDO content * * This structure is delivered as the data part with the notifications. It * contains the VDOs from the two DisplayPort Type-C alternate mode specific * commands: Status Update and Configure. * * @status will show for example the status of the HPD signal. */ struct typec_displayport_data { u32 status; u32 conf; }; enum { DP_PIN_ASSIGN_A, /* Not supported after v1.0b */ DP_PIN_ASSIGN_B, /* Not supported after v1.0b */ DP_PIN_ASSIGN_C, DP_PIN_ASSIGN_D, DP_PIN_ASSIGN_E, DP_PIN_ASSIGN_F, /* Not supported after v1.0b */ }; /* DisplayPort alt mode specific commands */ #define DP_CMD_STATUS_UPDATE VDO_CMD_VENDOR(0) #define DP_CMD_CONFIGURE VDO_CMD_VENDOR(1) /* DisplayPort Capabilities VDO bits (returned with Discover Modes) */ #define DP_CAP_CAPABILITY(_cap_) ((_cap_) & 3) #define DP_CAP_UFP_D 1 #define DP_CAP_DFP_D 2 #define DP_CAP_DFP_D_AND_UFP_D 3 #define DP_CAP_DP_SIGNALING BIT(2) /* Always set */ #define DP_CAP_GEN2 BIT(3) /* Reserved after v1.0b */ #define DP_CAP_RECEPTACLE BIT(6) #define DP_CAP_USB BIT(7) #define DP_CAP_DFP_D_PIN_ASSIGN(_cap_) (((_cap_) & GENMASK(15, 8)) >> 8) #define DP_CAP_UFP_D_PIN_ASSIGN(_cap_) (((_cap_) & GENMASK(23, 16)) >> 16) /* Get pin assignment taking plug & receptacle into consideration */ #define DP_CAP_PIN_ASSIGN_UFP_D(_cap_) ((_cap_ & DP_CAP_RECEPTACLE) ? \ DP_CAP_UFP_D_PIN_ASSIGN(_cap_) : DP_CAP_DFP_D_PIN_ASSIGN(_cap_)) #define DP_CAP_PIN_ASSIGN_DFP_D(_cap_) ((_cap_ & DP_CAP_RECEPTACLE) ? \ DP_CAP_DFP_D_PIN_ASSIGN(_cap_) : DP_CAP_UFP_D_PIN_ASSIGN(_cap_)) /* DisplayPort Status Update VDO bits */ #define DP_STATUS_CONNECTION(_status_) ((_status_) & 3) #define DP_STATUS_CON_DISABLED 0 #define DP_STATUS_CON_DFP_D 1 #define DP_STATUS_CON_UFP_D 2 #define DP_STATUS_CON_BOTH 3 #define DP_STATUS_POWER_LOW BIT(2) #define DP_STATUS_ENABLED BIT(3) #define DP_STATUS_PREFER_MULTI_FUNC BIT(4) #define DP_STATUS_SWITCH_TO_USB BIT(5) #define DP_STATUS_EXIT_DP_MODE BIT(6) #define DP_STATUS_HPD_STATE BIT(7) /* 0 = HPD_Low, 1 = HPD_High */ #define DP_STATUS_IRQ_HPD BIT(8) /* DisplayPort Configurations VDO bits */ #define DP_CONF_CURRENTLY(_conf_) ((_conf_) & 3) #define DP_CONF_UFP_U_AS_DFP_D BIT(0) #define DP_CONF_UFP_U_AS_UFP_D BIT(1) #define DP_CONF_SIGNALING_DP BIT(2) #define DP_CONF_SIGNALING_GEN_2 BIT(3) /* Reserved after v1.0b */ #define DP_CONF_PIN_ASSIGNEMENT_SHIFT 8 #define DP_CONF_PIN_ASSIGNEMENT_MASK GENMASK(15, 8) /* Helper for setting/getting the pin assignement value to the configuration */ #define DP_CONF_SET_PIN_ASSIGN(_a_) ((_a_) << 8) #define DP_CONF_GET_PIN_ASSIGN(_conf_) (((_conf_) & GENMASK(15, 8)) >> 8) #endif /* __USB_TYPEC_DP_H */ usb/m66592.h 0000644 00000002644 14722070374 0006372 0 ustar 00 // SPDX-License-Identifier: GPL-2.0 /* * M66592 driver platform data * * Copyright (C) 2009 Renesas Solutions Corp. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #ifndef __LINUX_USB_M66592_H #define __LINUX_USB_M66592_H #define M66592_PLATDATA_XTAL_12MHZ 0x01 #define M66592_PLATDATA_XTAL_24MHZ 0x02 #define M66592_PLATDATA_XTAL_48MHZ 0x03 struct m66592_platdata { /* one = on chip controller, zero = external controller */ unsigned on_chip:1; /* one = big endian, zero = little endian */ unsigned endian:1; /* (external controller only) M66592_PLATDATA_XTAL_nnMHZ */ unsigned xtal:2; /* (external controller only) one = 3.3V, zero = 1.5V */ unsigned vif:1; /* (external controller only) set one = WR0_N shorted to WR1_N */ unsigned wr0_shorted_to_wr1:1; }; #endif /* __LINUX_USB_M66592_H */ usb/hcd.h 0000644 00000066460 14722070374 0006266 0 ustar 00 // SPDX-License-Identifier: GPL-2.0+ /* * Copyright (c) 2001-2002 by David Brownell * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef __USB_CORE_HCD_H #define __USB_CORE_HCD_H #ifdef __KERNEL__ #include <linux/rwsem.h> #include <linux/interrupt.h> #include <linux/idr.h> #define MAX_TOPO_LEVEL 6 /* This file contains declarations of usbcore internals that are mostly * used or exposed by Host Controller Drivers. */ /* * USB Packet IDs (PIDs) */ #define USB_PID_EXT 0xf0 /* USB 2.0 LPM ECN */ #define USB_PID_OUT 0xe1 #define USB_PID_ACK 0xd2 #define USB_PID_DATA0 0xc3 #define USB_PID_PING 0xb4 /* USB 2.0 */ #define USB_PID_SOF 0xa5 #define USB_PID_NYET 0x96 /* USB 2.0 */ #define USB_PID_DATA2 0x87 /* USB 2.0 */ #define USB_PID_SPLIT 0x78 /* USB 2.0 */ #define USB_PID_IN 0x69 #define USB_PID_NAK 0x5a #define USB_PID_DATA1 0x4b #define USB_PID_PREAMBLE 0x3c /* Token mode */ #define USB_PID_ERR 0x3c /* USB 2.0: handshake mode */ #define USB_PID_SETUP 0x2d #define USB_PID_STALL 0x1e #define USB_PID_MDATA 0x0f /* USB 2.0 */ /*-------------------------------------------------------------------------*/ /* * USB Host Controller Driver (usb_hcd) framework * * Since "struct usb_bus" is so thin, you can't share much code in it. * This framework is a layer over that, and should be more sharable. */ /*-------------------------------------------------------------------------*/ struct giveback_urb_bh { bool running; bool high_prio; spinlock_t lock; struct list_head head; struct tasklet_struct bh; struct usb_host_endpoint *completing_ep; }; enum usb_dev_authorize_policy { USB_DEVICE_AUTHORIZE_NONE = 0, USB_DEVICE_AUTHORIZE_ALL = 1, USB_DEVICE_AUTHORIZE_INTERNAL = 2, }; struct usb_hcd { /* * housekeeping */ struct usb_bus self; /* hcd is-a bus */ struct kref kref; /* reference counter */ const char *product_desc; /* product/vendor string */ int speed; /* Speed for this roothub. * May be different from * hcd->driver->flags & HCD_MASK */ char irq_descr[24]; /* driver + bus # */ struct timer_list rh_timer; /* drives root-hub polling */ struct urb *status_urb; /* the current status urb */ #ifdef CONFIG_PM struct work_struct wakeup_work; /* for remote wakeup */ #endif struct work_struct died_work; /* for when the device dies */ /* * hardware info/state */ const struct hc_driver *driver; /* hw-specific hooks */ /* * OTG and some Host controllers need software interaction with phys; * other external phys should be software-transparent */ struct usb_phy *usb_phy; struct usb_phy_roothub *phy_roothub; /* Flags that need to be manipulated atomically because they can * change while the host controller is running. Always use * set_bit() or clear_bit() to change their values. */ unsigned long flags; #define HCD_FLAG_HW_ACCESSIBLE 0 /* at full power */ #define HCD_FLAG_POLL_RH 2 /* poll for rh status? */ #define HCD_FLAG_POLL_PENDING 3 /* status has changed? */ #define HCD_FLAG_WAKEUP_PENDING 4 /* root hub is resuming? */ #define HCD_FLAG_RH_RUNNING 5 /* root hub is running? */ #define HCD_FLAG_DEAD 6 /* controller has died? */ #define HCD_FLAG_INTF_AUTHORIZED 7 /* authorize interfaces? */ #define HCD_FLAG_DEFER_RH_REGISTER 8 /* Defer roothub registration */ /* The flags can be tested using these macros; they are likely to * be slightly faster than test_bit(). */ #define HCD_HW_ACCESSIBLE(hcd) ((hcd)->flags & (1U << HCD_FLAG_HW_ACCESSIBLE)) #define HCD_POLL_RH(hcd) ((hcd)->flags & (1U << HCD_FLAG_POLL_RH)) #define HCD_POLL_PENDING(hcd) ((hcd)->flags & (1U << HCD_FLAG_POLL_PENDING)) #define HCD_WAKEUP_PENDING(hcd) ((hcd)->flags & (1U << HCD_FLAG_WAKEUP_PENDING)) #define HCD_RH_RUNNING(hcd) ((hcd)->flags & (1U << HCD_FLAG_RH_RUNNING)) #define HCD_DEAD(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEAD)) #define HCD_DEFER_RH_REGISTER(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEFER_RH_REGISTER)) /* * Specifies if interfaces are authorized by default * or they require explicit user space authorization; this bit is * settable through /sys/class/usb_host/X/interface_authorized_default */ #define HCD_INTF_AUTHORIZED(hcd) \ ((hcd)->flags & (1U << HCD_FLAG_INTF_AUTHORIZED)) /* * Specifies if devices are authorized by default * or they require explicit user space authorization; this bit is * settable through /sys/class/usb_host/X/authorized_default */ enum usb_dev_authorize_policy dev_policy; /* Flags that get set only during HCD registration or removal. */ unsigned rh_registered:1;/* is root hub registered? */ unsigned rh_pollable:1; /* may we poll the root hub? */ unsigned msix_enabled:1; /* driver has MSI-X enabled? */ unsigned msi_enabled:1; /* driver has MSI enabled? */ /* * do not manage the PHY state in the HCD core, instead let the driver * handle this (for example if the PHY can only be turned on after a * specific event) */ unsigned skip_phy_initialization:1; /* The next flag is a stopgap, to be removed when all the HCDs * support the new root-hub polling mechanism. */ unsigned uses_new_polling:1; unsigned wireless:1; /* Wireless USB HCD */ unsigned has_tt:1; /* Integrated TT in root hub */ unsigned amd_resume_bug:1; /* AMD remote wakeup quirk */ unsigned can_do_streams:1; /* HC supports streams */ unsigned tpl_support:1; /* OTG & EH TPL support */ unsigned cant_recv_wakeups:1; /* wakeup requests from downstream aren't received */ unsigned int irq; /* irq allocated */ void __iomem *regs; /* device memory/io */ resource_size_t rsrc_start; /* memory/io resource start */ resource_size_t rsrc_len; /* memory/io resource length */ unsigned power_budget; /* in mA, 0 = no limit */ struct giveback_urb_bh high_prio_bh; struct giveback_urb_bh low_prio_bh; /* bandwidth_mutex should be taken before adding or removing * any new bus bandwidth constraints: * 1. Before adding a configuration for a new device. * 2. Before removing the configuration to put the device into * the addressed state. * 3. Before selecting a different configuration. * 4. Before selecting an alternate interface setting. * * bandwidth_mutex should be dropped after a successful control message * to the device, or resetting the bandwidth after a failed attempt. */ struct mutex *address0_mutex; struct mutex *bandwidth_mutex; struct usb_hcd *shared_hcd; struct usb_hcd *primary_hcd; #define HCD_BUFFER_POOLS 4 struct dma_pool *pool[HCD_BUFFER_POOLS]; int state; # define __ACTIVE 0x01 # define __SUSPEND 0x04 # define __TRANSIENT 0x80 # define HC_STATE_HALT 0 # define HC_STATE_RUNNING (__ACTIVE) # define HC_STATE_QUIESCING (__SUSPEND|__TRANSIENT|__ACTIVE) # define HC_STATE_RESUMING (__SUSPEND|__TRANSIENT) # define HC_STATE_SUSPENDED (__SUSPEND) #define HC_IS_RUNNING(state) ((state) & __ACTIVE) #define HC_IS_SUSPENDED(state) ((state) & __SUSPEND) /* memory pool for HCs having local memory, or %NULL */ struct gen_pool *localmem_pool; /* more shared queuing code would be good; it should support * smarter scheduling, handle transaction translators, etc; * input size of periodic table to an interrupt scheduler. * (ohci 32, uhci 1024, ehci 256/512/1024). */ /* The HC driver's private data is stored at the end of * this structure. */ unsigned long hcd_priv[0] __attribute__ ((aligned(sizeof(s64)))); }; /* 2.4 does this a bit differently ... */ static inline struct usb_bus *hcd_to_bus(struct usb_hcd *hcd) { return &hcd->self; } static inline struct usb_hcd *bus_to_hcd(struct usb_bus *bus) { return container_of(bus, struct usb_hcd, self); } /*-------------------------------------------------------------------------*/ struct hc_driver { const char *description; /* "ehci-hcd" etc */ const char *product_desc; /* product/vendor string */ size_t hcd_priv_size; /* size of private data */ /* irq handler */ irqreturn_t (*irq) (struct usb_hcd *hcd); int flags; #define HCD_MEMORY 0x0001 /* HC regs use memory (else I/O) */ #define HCD_DMA 0x0002 /* HC uses DMA */ #define HCD_SHARED 0x0004 /* Two (or more) usb_hcds share HW */ #define HCD_USB11 0x0010 /* USB 1.1 */ #define HCD_USB2 0x0020 /* USB 2.0 */ #define HCD_USB25 0x0030 /* Wireless USB 1.0 (USB 2.5)*/ #define HCD_USB3 0x0040 /* USB 3.0 */ #define HCD_USB31 0x0050 /* USB 3.1 */ #define HCD_USB32 0x0060 /* USB 3.2 */ #define HCD_MASK 0x0070 #define HCD_BH 0x0100 /* URB complete in BH context */ /* called to init HCD and root hub */ int (*reset) (struct usb_hcd *hcd); int (*start) (struct usb_hcd *hcd); /* NOTE: these suspend/resume calls relate to the HC as * a whole, not just the root hub; they're for PCI bus glue. */ /* called after suspending the hub, before entering D3 etc */ int (*pci_suspend)(struct usb_hcd *hcd, bool do_wakeup); /* called after entering D0 (etc), before resuming the hub */ int (*pci_resume)(struct usb_hcd *hcd, bool hibernated); /* cleanly make HCD stop writing memory and doing I/O */ void (*stop) (struct usb_hcd *hcd); /* shutdown HCD */ void (*shutdown) (struct usb_hcd *hcd); /* return current frame number */ int (*get_frame_number) (struct usb_hcd *hcd); /* manage i/o requests, device state */ int (*urb_enqueue)(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags); int (*urb_dequeue)(struct usb_hcd *hcd, struct urb *urb, int status); /* * (optional) these hooks allow an HCD to override the default DMA * mapping and unmapping routines. In general, they shouldn't be * necessary unless the host controller has special DMA requirements, * such as alignment contraints. If these are not specified, the * general usb_hcd_(un)?map_urb_for_dma functions will be used instead * (and it may be a good idea to call these functions in your HCD * implementation) */ int (*map_urb_for_dma)(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags); void (*unmap_urb_for_dma)(struct usb_hcd *hcd, struct urb *urb); /* hw synch, freeing endpoint resources that urb_dequeue can't */ void (*endpoint_disable)(struct usb_hcd *hcd, struct usb_host_endpoint *ep); /* (optional) reset any endpoint state such as sequence number and current window */ void (*endpoint_reset)(struct usb_hcd *hcd, struct usb_host_endpoint *ep); /* root hub support */ int (*hub_status_data) (struct usb_hcd *hcd, char *buf); int (*hub_control) (struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, char *buf, u16 wLength); int (*bus_suspend)(struct usb_hcd *); int (*bus_resume)(struct usb_hcd *); int (*start_port_reset)(struct usb_hcd *, unsigned port_num); unsigned long (*get_resuming_ports)(struct usb_hcd *); /* force handover of high-speed port to full-speed companion */ void (*relinquish_port)(struct usb_hcd *, int); /* has a port been handed over to a companion? */ int (*port_handed_over)(struct usb_hcd *, int); /* CLEAR_TT_BUFFER completion callback */ void (*clear_tt_buffer_complete)(struct usb_hcd *, struct usb_host_endpoint *); /* xHCI specific functions */ /* Called by usb_alloc_dev to alloc HC device structures */ int (*alloc_dev)(struct usb_hcd *, struct usb_device *); /* Called by usb_disconnect to free HC device structures */ void (*free_dev)(struct usb_hcd *, struct usb_device *); /* Change a group of bulk endpoints to support multiple stream IDs */ int (*alloc_streams)(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint **eps, unsigned int num_eps, unsigned int num_streams, gfp_t mem_flags); /* Reverts a group of bulk endpoints back to not using stream IDs. * Can fail if we run out of memory. */ int (*free_streams)(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint **eps, unsigned int num_eps, gfp_t mem_flags); /* Bandwidth computation functions */ /* Note that add_endpoint() can only be called once per endpoint before * check_bandwidth() or reset_bandwidth() must be called. * drop_endpoint() can only be called once per endpoint also. * A call to xhci_drop_endpoint() followed by a call to * xhci_add_endpoint() will add the endpoint to the schedule with * possibly new parameters denoted by a different endpoint descriptor * in usb_host_endpoint. A call to xhci_add_endpoint() followed by a * call to xhci_drop_endpoint() is not allowed. */ /* Allocate endpoint resources and add them to a new schedule */ int (*add_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *); /* Drop an endpoint from a new schedule */ int (*drop_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *); /* Check that a new hardware configuration, set using * endpoint_enable and endpoint_disable, does not exceed bus * bandwidth. This must be called before any set configuration * or set interface requests are sent to the device. */ int (*check_bandwidth)(struct usb_hcd *, struct usb_device *); /* Reset the device schedule to the last known good schedule, * which was set from a previous successful call to * check_bandwidth(). This reverts any add_endpoint() and * drop_endpoint() calls since that last successful call. * Used for when a check_bandwidth() call fails due to resource * or bandwidth constraints. */ void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *); /* Returns the hardware-chosen device address */ int (*address_device)(struct usb_hcd *, struct usb_device *udev); /* prepares the hardware to send commands to the device */ int (*enable_device)(struct usb_hcd *, struct usb_device *udev); /* Notifies the HCD after a hub descriptor is fetched. * Will block. */ int (*update_hub_device)(struct usb_hcd *, struct usb_device *hdev, struct usb_tt *tt, gfp_t mem_flags); int (*reset_device)(struct usb_hcd *, struct usb_device *); /* Notifies the HCD after a device is connected and its * address is set */ int (*update_device)(struct usb_hcd *, struct usb_device *); int (*set_usb2_hw_lpm)(struct usb_hcd *, struct usb_device *, int); /* USB 3.0 Link Power Management */ /* Returns the USB3 hub-encoded value for the U1/U2 timeout. */ int (*enable_usb3_lpm_timeout)(struct usb_hcd *, struct usb_device *, enum usb3_link_state state); /* The xHCI host controller can still fail the command to * disable the LPM timeouts, so this can return an error code. */ int (*disable_usb3_lpm_timeout)(struct usb_hcd *, struct usb_device *, enum usb3_link_state state); int (*find_raw_port_number)(struct usb_hcd *, int); /* Call for power on/off the port if necessary */ int (*port_power)(struct usb_hcd *hcd, int portnum, bool enable); }; static inline int hcd_giveback_urb_in_bh(struct usb_hcd *hcd) { return hcd->driver->flags & HCD_BH; } static inline bool hcd_periodic_completion_in_progress(struct usb_hcd *hcd, struct usb_host_endpoint *ep) { return hcd->high_prio_bh.completing_ep == ep; } static inline bool hcd_uses_dma(struct usb_hcd *hcd) { return IS_ENABLED(CONFIG_HAS_DMA) && (hcd->driver->flags & HCD_DMA); } extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb); extern int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb, int status); extern void usb_hcd_unlink_urb_from_ep(struct usb_hcd *hcd, struct urb *urb); extern int usb_hcd_submit_urb(struct urb *urb, gfp_t mem_flags); extern int usb_hcd_unlink_urb(struct urb *urb, int status); extern void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb, int status); extern int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags); extern void usb_hcd_unmap_urb_setup_for_dma(struct usb_hcd *, struct urb *); extern void usb_hcd_unmap_urb_for_dma(struct usb_hcd *, struct urb *); extern void usb_hcd_flush_endpoint(struct usb_device *udev, struct usb_host_endpoint *ep); extern void usb_hcd_disable_endpoint(struct usb_device *udev, struct usb_host_endpoint *ep); extern void usb_hcd_reset_endpoint(struct usb_device *udev, struct usb_host_endpoint *ep); extern void usb_hcd_synchronize_unlinks(struct usb_device *udev); extern int usb_hcd_alloc_bandwidth(struct usb_device *udev, struct usb_host_config *new_config, struct usb_host_interface *old_alt, struct usb_host_interface *new_alt); extern int usb_hcd_get_frame_number(struct usb_device *udev); struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver, struct device *sysdev, struct device *dev, const char *bus_name, struct usb_hcd *primary_hcd); extern struct usb_hcd *usb_create_hcd(const struct hc_driver *driver, struct device *dev, const char *bus_name); extern struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver, struct device *dev, const char *bus_name, struct usb_hcd *shared_hcd); extern struct usb_hcd *usb_get_hcd(struct usb_hcd *hcd); extern void usb_put_hcd(struct usb_hcd *hcd); extern int usb_hcd_is_primary_hcd(struct usb_hcd *hcd); extern int usb_add_hcd(struct usb_hcd *hcd, unsigned int irqnum, unsigned long irqflags); extern void usb_remove_hcd(struct usb_hcd *hcd); extern int usb_hcd_find_raw_port_number(struct usb_hcd *hcd, int port1); int usb_hcd_setup_local_mem(struct usb_hcd *hcd, phys_addr_t phys_addr, dma_addr_t dma, size_t size); struct platform_device; extern void usb_hcd_platform_shutdown(struct platform_device *dev); #ifdef CONFIG_USB_PCI struct pci_dev; struct pci_device_id; extern int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id); extern void usb_hcd_pci_remove(struct pci_dev *dev); extern void usb_hcd_pci_shutdown(struct pci_dev *dev); extern int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *dev); #ifdef CONFIG_PM extern const struct dev_pm_ops usb_hcd_pci_pm_ops; #endif #endif /* CONFIG_USB_PCI */ /* pci-ish (pdev null is ok) buffer alloc/mapping support */ void usb_init_pool_max(void); int hcd_buffer_create(struct usb_hcd *hcd); void hcd_buffer_destroy(struct usb_hcd *hcd); void *hcd_buffer_alloc(struct usb_bus *bus, size_t size, gfp_t mem_flags, dma_addr_t *dma); void hcd_buffer_free(struct usb_bus *bus, size_t size, void *addr, dma_addr_t dma); void *hcd_buffer_alloc_pages(struct usb_hcd *hcd, size_t size, gfp_t mem_flags, dma_addr_t *dma); void hcd_buffer_free_pages(struct usb_hcd *hcd, size_t size, void *addr, dma_addr_t dma); /* generic bus glue, needed for host controllers that don't use PCI */ extern irqreturn_t usb_hcd_irq(int irq, void *__hcd); extern void usb_hc_died(struct usb_hcd *hcd); extern void usb_hcd_poll_rh_status(struct usb_hcd *hcd); extern void usb_wakeup_notification(struct usb_device *hdev, unsigned int portnum); extern void usb_hcd_start_port_resume(struct usb_bus *bus, int portnum); extern void usb_hcd_end_port_resume(struct usb_bus *bus, int portnum); /* The D0/D1 toggle bits ... USE WITH CAUTION (they're almost hcd-internal) */ #define usb_gettoggle(dev, ep, out) (((dev)->toggle[out] >> (ep)) & 1) #define usb_dotoggle(dev, ep, out) ((dev)->toggle[out] ^= (1 << (ep))) #define usb_settoggle(dev, ep, out, bit) \ ((dev)->toggle[out] = ((dev)->toggle[out] & ~(1 << (ep))) | \ ((bit) << (ep))) /* -------------------------------------------------------------------------- */ /* Enumeration is only for the hub driver, or HCD virtual root hubs */ extern struct usb_device *usb_alloc_dev(struct usb_device *parent, struct usb_bus *, unsigned port); extern int usb_new_device(struct usb_device *dev); extern void usb_disconnect(struct usb_device **); extern int usb_get_configuration(struct usb_device *dev); extern void usb_destroy_configuration(struct usb_device *dev); /*-------------------------------------------------------------------------*/ /* * HCD Root Hub support */ #include <linux/usb/ch11.h> /* * As of USB 2.0, full/low speed devices are segregated into trees. * One type grows from USB 1.1 host controllers (OHCI, UHCI etc). * The other type grows from high speed hubs when they connect to * full/low speed devices using "Transaction Translators" (TTs). * * TTs should only be known to the hub driver, and high speed bus * drivers (only EHCI for now). They affect periodic scheduling and * sometimes control/bulk error recovery. */ struct usb_device; struct usb_tt { struct usb_device *hub; /* upstream highspeed hub */ int multi; /* true means one TT per port */ unsigned think_time; /* think time in ns */ void *hcpriv; /* HCD private data */ /* for control/bulk error recovery (CLEAR_TT_BUFFER) */ spinlock_t lock; struct list_head clear_list; /* of usb_tt_clear */ struct work_struct clear_work; }; struct usb_tt_clear { struct list_head clear_list; unsigned tt; u16 devinfo; struct usb_hcd *hcd; struct usb_host_endpoint *ep; }; extern int usb_hub_clear_tt_buffer(struct urb *urb); extern void usb_ep0_reinit(struct usb_device *); /* (shifted) direction/type/recipient from the USB 2.0 spec, table 9.2 */ #define DeviceRequest \ ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_DEVICE)<<8) #define DeviceOutRequest \ ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_DEVICE)<<8) #define InterfaceRequest \ ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8) #define EndpointRequest \ ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT)<<8) #define EndpointOutRequest \ ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT)<<8) /* class requests from the USB 2.0 hub spec, table 11-15 */ #define HUB_CLASS_REQ(dir, type, request) ((((dir) | (type)) << 8) | (request)) /* GetBusState and SetHubDescriptor are optional, omitted */ #define ClearHubFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_HUB, USB_REQ_CLEAR_FEATURE) #define ClearPortFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, USB_REQ_CLEAR_FEATURE) #define GetHubDescriptor HUB_CLASS_REQ(USB_DIR_IN, USB_RT_HUB, USB_REQ_GET_DESCRIPTOR) #define GetHubStatus HUB_CLASS_REQ(USB_DIR_IN, USB_RT_HUB, USB_REQ_GET_STATUS) #define GetPortStatus HUB_CLASS_REQ(USB_DIR_IN, USB_RT_PORT, USB_REQ_GET_STATUS) #define SetHubFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_HUB, USB_REQ_SET_FEATURE) #define SetPortFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, USB_REQ_SET_FEATURE) #define ClearTTBuffer HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, HUB_CLEAR_TT_BUFFER) #define ResetTT HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, HUB_RESET_TT) #define GetTTState HUB_CLASS_REQ(USB_DIR_IN, USB_RT_PORT, HUB_GET_TT_STATE) #define StopTT HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, HUB_STOP_TT) /*-------------------------------------------------------------------------*/ /* class requests from USB 3.1 hub spec, table 10-7 */ #define SetHubDepth HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_HUB, HUB_SET_DEPTH) #define GetPortErrorCount HUB_CLASS_REQ(USB_DIR_IN, USB_RT_PORT, HUB_GET_PORT_ERR_COUNT) /* * Generic bandwidth allocation constants/support */ #define FRAME_TIME_USECS 1000L #define BitTime(bytecount) (7 * 8 * bytecount / 6) /* with integer truncation */ /* Trying not to use worst-case bit-stuffing * of (7/6 * 8 * bytecount) = 9.33 * bytecount */ /* bytecount = data payload byte count */ #define NS_TO_US(ns) DIV_ROUND_UP(ns, 1000L) /* convert nanoseconds to microseconds, rounding up */ /* * Full/low speed bandwidth allocation constants/support. */ #define BW_HOST_DELAY 1000L /* nanoseconds */ #define BW_HUB_LS_SETUP 333L /* nanoseconds */ /* 4 full-speed bit times (est.) */ #define FRAME_TIME_BITS 12000L /* frame = 1 millisecond */ #define FRAME_TIME_MAX_BITS_ALLOC (90L * FRAME_TIME_BITS / 100L) #define FRAME_TIME_MAX_USECS_ALLOC (90L * FRAME_TIME_USECS / 100L) /* * Ceiling [nano/micro]seconds (typical) for that many bytes at high speed * ISO is a bit less, no ACK ... from USB 2.0 spec, 5.11.3 (and needed * to preallocate bandwidth) */ #define USB2_HOST_DELAY 5 /* nsec, guess */ #define HS_NSECS(bytes) (((55 * 8 * 2083) \ + (2083UL * (3 + BitTime(bytes))))/1000 \ + USB2_HOST_DELAY) #define HS_NSECS_ISO(bytes) (((38 * 8 * 2083) \ + (2083UL * (3 + BitTime(bytes))))/1000 \ + USB2_HOST_DELAY) #define HS_USECS(bytes) NS_TO_US(HS_NSECS(bytes)) #define HS_USECS_ISO(bytes) NS_TO_US(HS_NSECS_ISO(bytes)) extern long usb_calc_bus_time(int speed, int is_input, int isoc, int bytecount); /*-------------------------------------------------------------------------*/ extern void usb_set_device_state(struct usb_device *udev, enum usb_device_state new_state); /*-------------------------------------------------------------------------*/ /* exported only within usbcore */ extern struct idr usb_bus_idr; extern struct mutex usb_bus_idr_lock; extern wait_queue_head_t usb_kill_urb_queue; #define usb_endpoint_out(ep_dir) (!((ep_dir) & USB_DIR_IN)) #ifdef CONFIG_PM extern unsigned usb_wakeup_enabled_descendants(struct usb_device *udev); extern void usb_root_hub_lost_power(struct usb_device *rhdev); extern int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg); extern int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg); extern void usb_hcd_resume_root_hub(struct usb_hcd *hcd); #else static inline unsigned usb_wakeup_enabled_descendants(struct usb_device *udev) { return 0; } static inline void usb_hcd_resume_root_hub(struct usb_hcd *hcd) { return; } #endif /* CONFIG_PM */ /*-------------------------------------------------------------------------*/ #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE) struct usb_mon_operations { void (*urb_submit)(struct usb_bus *bus, struct urb *urb); void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err); void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status); /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */ }; extern const struct usb_mon_operations *mon_ops; static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb) { if (bus->monitored) (*mon_ops->urb_submit)(bus, urb); } static inline void usbmon_urb_submit_error(struct usb_bus *bus, struct urb *urb, int error) { if (bus->monitored) (*mon_ops->urb_submit_error)(bus, urb, error); } static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb, int status) { if (bus->monitored) (*mon_ops->urb_complete)(bus, urb, status); } int usb_mon_register(const struct usb_mon_operations *ops); void usb_mon_deregister(void); #else static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb) {} static inline void usbmon_urb_submit_error(struct usb_bus *bus, struct urb *urb, int error) {} static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb, int status) {} #endif /* CONFIG_USB_MON || CONFIG_USB_MON_MODULE */ /*-------------------------------------------------------------------------*/ /* random stuff */ #define RUN_CONTEXT (in_irq() ? "in_irq" \ : (in_interrupt() ? "in_interrupt" : "can sleep")) /* This rwsem is for use only by the hub driver and ehci-hcd. * Nobody else should touch it. */ extern struct rw_semaphore ehci_cf_port_reset_rwsem; /* Keep track of which host controller drivers are loaded */ #define USB_UHCI_LOADED 0 #define USB_OHCI_LOADED 1 #define USB_EHCI_LOADED 2 extern unsigned long usb_hcds_loaded; #endif /* __KERNEL__ */ #endif /* __USB_CORE_HCD_H */ usb/of.h 0000644 00000003503 14722070374 0006121 0 ustar 00 // SPDX-License-Identifier: GPL-2.0 /* * OF helpers for usb devices. * * This file is released under the GPLv2 */ #ifndef __LINUX_USB_OF_H #define __LINUX_USB_OF_H #include <linux/usb/ch9.h> #include <linux/usb/otg.h> #include <linux/usb/phy.h> struct usb_device; #if IS_ENABLED(CONFIG_OF) enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0); bool of_usb_host_tpl_support(struct device_node *np); int of_usb_update_otg_caps(struct device_node *np, struct usb_otg_caps *otg_caps); struct device_node *usb_of_get_device_node(struct usb_device *hub, int port1); bool usb_of_has_combined_node(struct usb_device *udev); struct device_node *usb_of_get_interface_node(struct usb_device *udev, u8 config, u8 ifnum); struct device *usb_of_get_companion_dev(struct device *dev); #else static inline enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0) { return USB_DR_MODE_UNKNOWN; } static inline bool of_usb_host_tpl_support(struct device_node *np) { return false; } static inline int of_usb_update_otg_caps(struct device_node *np, struct usb_otg_caps *otg_caps) { return 0; } static inline struct device_node * usb_of_get_device_node(struct usb_device *hub, int port1) { return NULL; } static inline bool usb_of_has_combined_node(struct usb_device *udev) { return false; } static inline struct device_node * usb_of_get_interface_node(struct usb_device *udev, u8 config, u8 ifnum) { return NULL; } static inline struct device *usb_of_get_companion_dev(struct device *dev) { return NULL; } #endif #if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_USB_SUPPORT) enum usb_phy_interface of_usb_get_phy_mode(struct device_node *np); #else static inline enum usb_phy_interface of_usb_get_phy_mode(struct device_node *np) { return USBPHY_INTERFACE_MODE_UNKNOWN; } #endif #endif /* __LINUX_USB_OF_H */ usb/isp1362.h 0000644 00000003134 14722070374 0006624 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * board initialization code should put one of these into dev->platform_data * and place the isp1362 onto platform_bus. */ #ifndef __LINUX_USB_ISP1362_H__ #define __LINUX_USB_ISP1362_H__ struct isp1362_platform_data { /* Enable internal pulldown resistors on downstream ports */ unsigned sel15Kres:1; /* Clock cannot be stopped */ unsigned clknotstop:1; /* On-chip overcurrent protection */ unsigned oc_enable:1; /* INT output polarity */ unsigned int_act_high:1; /* INT edge or level triggered */ unsigned int_edge_triggered:1; /* DREQ output polarity */ unsigned dreq_act_high:1; /* DACK input polarity */ unsigned dack_act_high:1; /* chip can be resumed via H_WAKEUP pin */ unsigned remote_wakeup_connected:1; /* Switch or not to switch (keep always powered) */ unsigned no_power_switching:1; /* Ganged port power switching (0) or individual port power switching (1) */ unsigned power_switching_mode:1; /* Given port_power, msec/2 after power on till power good */ u8 potpg; /* Hardware reset set/clear */ void (*reset) (struct device *dev, int set); /* Clock start/stop */ void (*clock) (struct device *dev, int start); /* Inter-io delay (ns). The chip is picky about access timings; it * expects at least: * 110ns delay between consecutive accesses to DATA_REG, * 300ns delay between access to ADDR_REG and DATA_REG (registers) * 462ns delay between access to ADDR_REG and DATA_REG (buffer memory) * WE MUST NOT be activated during these intervals (even without CS!) */ void (*delay) (struct device *dev, unsigned int delay); }; #endif usb/ehci_def.h 0000644 00000017614 14722070374 0007253 0 ustar 00 // SPDX-License-Identifier: GPL-2.0+ /* * Copyright (c) 2001-2002 by David Brownell * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef __LINUX_USB_EHCI_DEF_H #define __LINUX_USB_EHCI_DEF_H #include <linux/usb/ehci-dbgp.h> /* EHCI register interface, corresponds to EHCI Revision 0.95 specification */ /* Section 2.2 Host Controller Capability Registers */ struct ehci_caps { /* these fields are specified as 8 and 16 bit registers, * but some hosts can't perform 8 or 16 bit PCI accesses. * some hosts treat caplength and hciversion as parts of a 32-bit * register, others treat them as two separate registers, this * affects the memory map for big endian controllers. */ u32 hc_capbase; #define HC_LENGTH(ehci, p) (0x00ff&((p) >> /* bits 7:0 / offset 00h */ \ (ehci_big_endian_capbase(ehci) ? 24 : 0))) #define HC_VERSION(ehci, p) (0xffff&((p) >> /* bits 31:16 / offset 02h */ \ (ehci_big_endian_capbase(ehci) ? 0 : 16))) u32 hcs_params; /* HCSPARAMS - offset 0x4 */ #define HCS_DEBUG_PORT(p) (((p)>>20)&0xf) /* bits 23:20, debug port? */ #define HCS_INDICATOR(p) ((p)&(1 << 16)) /* true: has port indicators */ #define HCS_N_CC(p) (((p)>>12)&0xf) /* bits 15:12, #companion HCs */ #define HCS_N_PCC(p) (((p)>>8)&0xf) /* bits 11:8, ports per CC */ #define HCS_PORTROUTED(p) ((p)&(1 << 7)) /* true: port routing */ #define HCS_PPC(p) ((p)&(1 << 4)) /* true: port power control */ #define HCS_N_PORTS(p) (((p)>>0)&0xf) /* bits 3:0, ports on HC */ u32 hcc_params; /* HCCPARAMS - offset 0x8 */ /* EHCI 1.1 addendum */ #define HCC_32FRAME_PERIODIC_LIST(p) ((p)&(1 << 19)) #define HCC_PER_PORT_CHANGE_EVENT(p) ((p)&(1 << 18)) #define HCC_LPM(p) ((p)&(1 << 17)) #define HCC_HW_PREFETCH(p) ((p)&(1 << 16)) #define HCC_EXT_CAPS(p) (((p)>>8)&0xff) /* for pci extended caps */ #define HCC_ISOC_CACHE(p) ((p)&(1 << 7)) /* true: can cache isoc frame */ #define HCC_ISOC_THRES(p) (((p)>>4)&0x7) /* bits 6:4, uframes cached */ #define HCC_CANPARK(p) ((p)&(1 << 2)) /* true: can park on async qh */ #define HCC_PGM_FRAMELISTLEN(p) ((p)&(1 << 1)) /* true: periodic_size changes*/ #define HCC_64BIT_ADDR(p) ((p)&(1)) /* true: can use 64-bit addr */ u8 portroute[8]; /* nibbles for routing - offset 0xC */ }; /* Section 2.3 Host Controller Operational Registers */ struct ehci_regs { /* USBCMD: offset 0x00 */ u32 command; /* EHCI 1.1 addendum */ #define CMD_HIRD (0xf<<24) /* host initiated resume duration */ #define CMD_PPCEE (1<<15) /* per port change event enable */ #define CMD_FSP (1<<14) /* fully synchronized prefetch */ #define CMD_ASPE (1<<13) /* async schedule prefetch enable */ #define CMD_PSPE (1<<12) /* periodic schedule prefetch enable */ /* 23:16 is r/w intr rate, in microframes; default "8" == 1/msec */ #define CMD_PARK (1<<11) /* enable "park" on async qh */ #define CMD_PARK_CNT(c) (((c)>>8)&3) /* how many transfers to park for */ #define CMD_LRESET (1<<7) /* partial reset (no ports, etc) */ #define CMD_IAAD (1<<6) /* "doorbell" interrupt async advance */ #define CMD_ASE (1<<5) /* async schedule enable */ #define CMD_PSE (1<<4) /* periodic schedule enable */ /* 3:2 is periodic frame list size */ #define CMD_RESET (1<<1) /* reset HC not bus */ #define CMD_RUN (1<<0) /* start/stop HC */ /* USBSTS: offset 0x04 */ u32 status; #define STS_PPCE_MASK (0xff<<16) /* Per-Port change event 1-16 */ #define STS_ASS (1<<15) /* Async Schedule Status */ #define STS_PSS (1<<14) /* Periodic Schedule Status */ #define STS_RECL (1<<13) /* Reclamation */ #define STS_HALT (1<<12) /* Not running (any reason) */ /* some bits reserved */ /* these STS_* flags are also intr_enable bits (USBINTR) */ #define STS_IAA (1<<5) /* Interrupted on async advance */ #define STS_FATAL (1<<4) /* such as some PCI access errors */ #define STS_FLR (1<<3) /* frame list rolled over */ #define STS_PCD (1<<2) /* port change detect */ #define STS_ERR (1<<1) /* "error" completion (overflow, ...) */ #define STS_INT (1<<0) /* "normal" completion (short, ...) */ /* USBINTR: offset 0x08 */ u32 intr_enable; /* FRINDEX: offset 0x0C */ u32 frame_index; /* current microframe number */ /* CTRLDSSEGMENT: offset 0x10 */ u32 segment; /* address bits 63:32 if needed */ /* PERIODICLISTBASE: offset 0x14 */ u32 frame_list; /* points to periodic list */ /* ASYNCLISTADDR: offset 0x18 */ u32 async_next; /* address of next async queue head */ u32 reserved1[2]; /* TXFILLTUNING: offset 0x24 */ u32 txfill_tuning; /* TX FIFO Tuning register */ #define TXFIFO_DEFAULT (8<<16) /* FIFO burst threshold 8 */ u32 reserved2[6]; /* CONFIGFLAG: offset 0x40 */ u32 configured_flag; #define FLAG_CF (1<<0) /* true: we'll support "high speed" */ /* PORTSC: offset 0x44 */ u32 port_status[0]; /* up to N_PORTS */ /* EHCI 1.1 addendum */ #define PORTSC_SUSPEND_STS_ACK 0 #define PORTSC_SUSPEND_STS_NYET 1 #define PORTSC_SUSPEND_STS_STALL 2 #define PORTSC_SUSPEND_STS_ERR 3 #define PORT_DEV_ADDR (0x7f<<25) /* device address */ #define PORT_SSTS (0x3<<23) /* suspend status */ /* 31:23 reserved */ #define PORT_WKOC_E (1<<22) /* wake on overcurrent (enable) */ #define PORT_WKDISC_E (1<<21) /* wake on disconnect (enable) */ #define PORT_WKCONN_E (1<<20) /* wake on connect (enable) */ /* 19:16 for port testing */ #define PORT_TEST(x) (((x)&0xf)<<16) /* Port Test Control */ #define PORT_TEST_PKT PORT_TEST(0x4) /* Port Test Control - packet test */ #define PORT_TEST_FORCE PORT_TEST(0x5) /* Port Test Control - force enable */ #define PORT_LED_OFF (0<<14) #define PORT_LED_AMBER (1<<14) #define PORT_LED_GREEN (2<<14) #define PORT_LED_MASK (3<<14) #define PORT_OWNER (1<<13) /* true: companion hc owns this port */ #define PORT_POWER (1<<12) /* true: has power (see PPC) */ #define PORT_USB11(x) (((x)&(3<<10)) == (1<<10)) /* USB 1.1 device */ #define PORT_LS_MASK (3<<10) /* Link status (SE0, K or J */ /* 9 reserved */ #define PORT_LPM (1<<9) /* LPM transaction */ #define PORT_RESET (1<<8) /* reset port */ #define PORT_SUSPEND (1<<7) /* suspend port */ #define PORT_RESUME (1<<6) /* resume it */ #define PORT_OCC (1<<5) /* over current change */ #define PORT_OC (1<<4) /* over current active */ #define PORT_PEC (1<<3) /* port enable change */ #define PORT_PE (1<<2) /* port enable */ #define PORT_CSC (1<<1) /* connect status change */ #define PORT_CONNECT (1<<0) /* device connected */ #define PORT_RWC_BITS (PORT_CSC | PORT_PEC | PORT_OCC) u32 reserved3[9]; /* USBMODE: offset 0x68 */ u32 usbmode; /* USB Device mode */ #define USBMODE_SDIS (1<<3) /* Stream disable */ #define USBMODE_BE (1<<2) /* BE/LE endianness select */ #define USBMODE_CM_HC (3<<0) /* host controller mode */ #define USBMODE_CM_IDLE (0<<0) /* idle state */ u32 reserved4[6]; /* Moorestown has some non-standard registers, partially due to the fact that * its EHCI controller has both TT and LPM support. HOSTPCx are extensions to * PORTSCx */ /* HOSTPC: offset 0x84 */ u32 hostpc[0]; /* HOSTPC extension */ #define HOSTPC_PHCD (1<<22) /* Phy clock disable */ #define HOSTPC_PSPD (3<<25) /* Port speed detection */ u32 reserved5[17]; /* USBMODE_EX: offset 0xc8 */ u32 usbmode_ex; /* USB Device mode extension */ #define USBMODE_EX_VBPS (1<<5) /* VBus Power Select On */ #define USBMODE_EX_HC (3<<0) /* host controller mode */ }; #endif /* __LINUX_USB_EHCI_DEF_H */ usb/otg-fsm.h 0000644 00000021164 14722070374 0007074 0 ustar 00 // SPDX-License-Identifier: GPL-2.0+ /* Copyright (C) 2007,2008 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef __LINUX_USB_OTG_FSM_H #define __LINUX_USB_OTG_FSM_H #include <linux/mutex.h> #include <linux/errno.h> #define PROTO_UNDEF (0) #define PROTO_HOST (1) #define PROTO_GADGET (2) #define OTG_STS_SELECTOR 0xF000 /* OTG status selector, according to * OTG and EH 2.0 Chapter 6.2.3 * Table:6-4 */ #define HOST_REQUEST_FLAG 1 /* Host request flag, according to * OTG and EH 2.0 Charpter 6.2.3 * Table:6-5 */ #define T_HOST_REQ_POLL (1500) /* 1500ms, HNP polling interval */ enum otg_fsm_timer { /* Standard OTG timers */ A_WAIT_VRISE, A_WAIT_VFALL, A_WAIT_BCON, A_AIDL_BDIS, B_ASE0_BRST, A_BIDL_ADIS, B_AIDL_BDIS, /* Auxiliary timers */ B_SE0_SRP, B_SRP_FAIL, A_WAIT_ENUM, B_DATA_PLS, B_SSEND_SRP, NUM_OTG_FSM_TIMERS, }; /** * struct otg_fsm - OTG state machine according to the OTG spec * * OTG hardware Inputs * * Common inputs for A and B device * @id: TRUE for B-device, FALSE for A-device. * @adp_change: TRUE when current ADP measurement (n) value, compared to the * ADP measurement taken at n-2, differs by more than CADP_THR * @power_up: TRUE when the OTG device first powers up its USB system and * ADP measurement taken if ADP capable * * A-Device state inputs * @a_srp_det: TRUE if the A-device detects SRP * @a_vbus_vld: TRUE when VBUS voltage is in regulation * @b_conn: TRUE if the A-device detects connection from the B-device * @a_bus_resume: TRUE when the B-device detects that the A-device is signaling * a resume (K state) * B-Device state inputs * @a_bus_suspend: TRUE when the B-device detects that the A-device has put the * bus into suspend * @a_conn: TRUE if the B-device detects a connection from the A-device * @b_se0_srp: TRUE when the line has been at SE0 for more than the minimum * time before generating SRP * @b_ssend_srp: TRUE when the VBUS has been below VOTG_SESS_VLD for more than * the minimum time before generating SRP * @b_sess_vld: TRUE when the B-device detects that the voltage on VBUS is * above VOTG_SESS_VLD * @test_device: TRUE when the B-device switches to B-Host and detects an OTG * test device. This must be set by host/hub driver * * Application inputs (A-Device) * @a_bus_drop: TRUE when A-device application needs to power down the bus * @a_bus_req: TRUE when A-device application wants to use the bus. * FALSE to suspend the bus * * Application inputs (B-Device) * @b_bus_req: TRUE during the time that the Application running on the * B-device wants to use the bus * * Auxilary inputs (OTG v1.3 only. Obsolete now.) * @a_sess_vld: TRUE if the A-device detects that VBUS is above VA_SESS_VLD * @b_bus_suspend: TRUE when the A-device detects that the B-device has put * the bus into suspend * @b_bus_resume: TRUE when the A-device detects that the B-device is signaling * resume on the bus * * OTG Output status. Read only for users. Updated by OTG FSM helpers defined * in this file * * Outputs for Both A and B device * @drv_vbus: TRUE when A-device is driving VBUS * @loc_conn: TRUE when the local device has signaled that it is connected * to the bus * @loc_sof: TRUE when the local device is generating activity on the bus * @adp_prb: TRUE when the local device is in the process of doing * ADP probing * * Outputs for B-device state * @adp_sns: TRUE when the B-device is in the process of carrying out * ADP sensing * @data_pulse: TRUE when the B-device is performing data line pulsing * * Internal Variables * * a_set_b_hnp_en: TRUE when the A-device has successfully set the * b_hnp_enable bit in the B-device. * Unused as OTG fsm uses otg->host->b_hnp_enable instead * b_srp_done: TRUE when the B-device has completed initiating SRP * b_hnp_enable: TRUE when the B-device has accepted the * SetFeature(b_hnp_enable) B-device. * Unused as OTG fsm uses otg->gadget->b_hnp_enable instead * a_clr_err: Asserted (by application ?) to clear a_vbus_err due to an * overcurrent condition and causes the A-device to transition * to a_wait_vfall */ struct otg_fsm { /* Input */ int id; int adp_change; int power_up; int a_srp_det; int a_vbus_vld; int b_conn; int a_bus_resume; int a_bus_suspend; int a_conn; int b_se0_srp; int b_ssend_srp; int b_sess_vld; int test_device; int a_bus_drop; int a_bus_req; int b_bus_req; /* Auxilary inputs */ int a_sess_vld; int b_bus_resume; int b_bus_suspend; /* Output */ int drv_vbus; int loc_conn; int loc_sof; int adp_prb; int adp_sns; int data_pulse; /* Internal variables */ int a_set_b_hnp_en; int b_srp_done; int b_hnp_enable; int a_clr_err; /* Informative variables. All unused as of now */ int a_bus_drop_inf; int a_bus_req_inf; int a_clr_err_inf; int b_bus_req_inf; /* Auxilary informative variables */ int a_suspend_req_inf; /* Timeout indicator for timers */ int a_wait_vrise_tmout; int a_wait_vfall_tmout; int a_wait_bcon_tmout; int a_aidl_bdis_tmout; int b_ase0_brst_tmout; int a_bidl_adis_tmout; struct otg_fsm_ops *ops; struct usb_otg *otg; /* Current usb protocol used: 0:undefine; 1:host; 2:client */ int protocol; struct mutex lock; u8 *host_req_flag; struct delayed_work hnp_polling_work; bool hnp_work_inited; bool state_changed; }; struct otg_fsm_ops { void (*chrg_vbus)(struct otg_fsm *fsm, int on); void (*drv_vbus)(struct otg_fsm *fsm, int on); void (*loc_conn)(struct otg_fsm *fsm, int on); void (*loc_sof)(struct otg_fsm *fsm, int on); void (*start_pulse)(struct otg_fsm *fsm); void (*start_adp_prb)(struct otg_fsm *fsm); void (*start_adp_sns)(struct otg_fsm *fsm); void (*add_timer)(struct otg_fsm *fsm, enum otg_fsm_timer timer); void (*del_timer)(struct otg_fsm *fsm, enum otg_fsm_timer timer); int (*start_host)(struct otg_fsm *fsm, int on); int (*start_gadget)(struct otg_fsm *fsm, int on); }; static inline int otg_chrg_vbus(struct otg_fsm *fsm, int on) { if (!fsm->ops->chrg_vbus) return -EOPNOTSUPP; fsm->ops->chrg_vbus(fsm, on); return 0; } static inline int otg_drv_vbus(struct otg_fsm *fsm, int on) { if (!fsm->ops->drv_vbus) return -EOPNOTSUPP; if (fsm->drv_vbus != on) { fsm->drv_vbus = on; fsm->ops->drv_vbus(fsm, on); } return 0; } static inline int otg_loc_conn(struct otg_fsm *fsm, int on) { if (!fsm->ops->loc_conn) return -EOPNOTSUPP; if (fsm->loc_conn != on) { fsm->loc_conn = on; fsm->ops->loc_conn(fsm, on); } return 0; } static inline int otg_loc_sof(struct otg_fsm *fsm, int on) { if (!fsm->ops->loc_sof) return -EOPNOTSUPP; if (fsm->loc_sof != on) { fsm->loc_sof = on; fsm->ops->loc_sof(fsm, on); } return 0; } static inline int otg_start_pulse(struct otg_fsm *fsm) { if (!fsm->ops->start_pulse) return -EOPNOTSUPP; if (!fsm->data_pulse) { fsm->data_pulse = 1; fsm->ops->start_pulse(fsm); } return 0; } static inline int otg_start_adp_prb(struct otg_fsm *fsm) { if (!fsm->ops->start_adp_prb) return -EOPNOTSUPP; if (!fsm->adp_prb) { fsm->adp_sns = 0; fsm->adp_prb = 1; fsm->ops->start_adp_prb(fsm); } return 0; } static inline int otg_start_adp_sns(struct otg_fsm *fsm) { if (!fsm->ops->start_adp_sns) return -EOPNOTSUPP; if (!fsm->adp_sns) { fsm->adp_sns = 1; fsm->ops->start_adp_sns(fsm); } return 0; } static inline int otg_add_timer(struct otg_fsm *fsm, enum otg_fsm_timer timer) { if (!fsm->ops->add_timer) return -EOPNOTSUPP; fsm->ops->add_timer(fsm, timer); return 0; } static inline int otg_del_timer(struct otg_fsm *fsm, enum otg_fsm_timer timer) { if (!fsm->ops->del_timer) return -EOPNOTSUPP; fsm->ops->del_timer(fsm, timer); return 0; } static inline int otg_start_host(struct otg_fsm *fsm, int on) { if (!fsm->ops->start_host) return -EOPNOTSUPP; return fsm->ops->start_host(fsm, on); } static inline int otg_start_gadget(struct otg_fsm *fsm, int on) { if (!fsm->ops->start_gadget) return -EOPNOTSUPP; return fsm->ops->start_gadget(fsm, on); } int otg_statemachine(struct otg_fsm *fsm); #endif /* __LINUX_USB_OTG_FSM_H */ usb/irda.h 0000644 00000007415 14722070374 0006442 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * USB IrDA Bridge Device Definition */ #ifndef __LINUX_USB_IRDA_H #define __LINUX_USB_IRDA_H /* This device should use Application-specific class */ #define USB_SUBCLASS_IRDA 0x02 /*-------------------------------------------------------------------------*/ /* Class-Specific requests (bRequest field) */ #define USB_REQ_CS_IRDA_RECEIVING 1 #define USB_REQ_CS_IRDA_CHECK_MEDIA_BUSY 3 #define USB_REQ_CS_IRDA_RATE_SNIFF 4 #define USB_REQ_CS_IRDA_UNICAST_LIST 5 #define USB_REQ_CS_IRDA_GET_CLASS_DESC 6 /*-------------------------------------------------------------------------*/ /* Class-Specific descriptor */ #define USB_DT_CS_IRDA 0x21 /*-------------------------------------------------------------------------*/ /* Data sizes */ #define USB_IRDA_DS_2048 (1 << 5) #define USB_IRDA_DS_1024 (1 << 4) #define USB_IRDA_DS_512 (1 << 3) #define USB_IRDA_DS_256 (1 << 2) #define USB_IRDA_DS_128 (1 << 1) #define USB_IRDA_DS_64 (1 << 0) /* Window sizes */ #define USB_IRDA_WS_7 (1 << 6) #define USB_IRDA_WS_6 (1 << 5) #define USB_IRDA_WS_5 (1 << 4) #define USB_IRDA_WS_4 (1 << 3) #define USB_IRDA_WS_3 (1 << 2) #define USB_IRDA_WS_2 (1 << 1) #define USB_IRDA_WS_1 (1 << 0) /* Min turnaround times in usecs */ #define USB_IRDA_MTT_0 (1 << 7) #define USB_IRDA_MTT_10 (1 << 6) #define USB_IRDA_MTT_50 (1 << 5) #define USB_IRDA_MTT_100 (1 << 4) #define USB_IRDA_MTT_500 (1 << 3) #define USB_IRDA_MTT_1000 (1 << 2) #define USB_IRDA_MTT_5000 (1 << 1) #define USB_IRDA_MTT_10000 (1 << 0) /* Baud rates */ #define USB_IRDA_BR_4000000 (1 << 8) #define USB_IRDA_BR_1152000 (1 << 7) #define USB_IRDA_BR_576000 (1 << 6) #define USB_IRDA_BR_115200 (1 << 5) #define USB_IRDA_BR_57600 (1 << 4) #define USB_IRDA_BR_38400 (1 << 3) #define USB_IRDA_BR_19200 (1 << 2) #define USB_IRDA_BR_9600 (1 << 1) #define USB_IRDA_BR_2400 (1 << 0) /* Additional BOFs */ #define USB_IRDA_AB_0 (1 << 7) #define USB_IRDA_AB_1 (1 << 6) #define USB_IRDA_AB_2 (1 << 5) #define USB_IRDA_AB_3 (1 << 4) #define USB_IRDA_AB_6 (1 << 3) #define USB_IRDA_AB_12 (1 << 2) #define USB_IRDA_AB_24 (1 << 1) #define USB_IRDA_AB_48 (1 << 0) /* IRDA Rate Sniff */ #define USB_IRDA_RATE_SNIFF 1 /*-------------------------------------------------------------------------*/ struct usb_irda_cs_descriptor { __u8 bLength; __u8 bDescriptorType; __le16 bcdSpecRevision; __u8 bmDataSize; __u8 bmWindowSize; __u8 bmMinTurnaroundTime; __le16 wBaudRate; __u8 bmAdditionalBOFs; __u8 bIrdaRateSniff; __u8 bMaxUnicastList; } __attribute__ ((packed)); /*-------------------------------------------------------------------------*/ /* Data Format */ #define USB_IRDA_STATUS_MEDIA_BUSY (1 << 7) /* The following is a 4-bit value used for both * inbound and outbound headers: * * 0 - speed ignored * 1 - 2400 bps * 2 - 9600 bps * 3 - 19200 bps * 4 - 38400 bps * 5 - 57600 bps * 6 - 115200 bps * 7 - 576000 bps * 8 - 1.152 Mbps * 9 - 4 Mbps * 10..15 - Reserved */ #define USB_IRDA_STATUS_LINK_SPEED 0x0f #define USB_IRDA_LS_NO_CHANGE 0 #define USB_IRDA_LS_2400 1 #define USB_IRDA_LS_9600 2 #define USB_IRDA_LS_19200 3 #define USB_IRDA_LS_38400 4 #define USB_IRDA_LS_57600 5 #define USB_IRDA_LS_115200 6 #define USB_IRDA_LS_576000 7 #define USB_IRDA_LS_1152000 8 #define USB_IRDA_LS_4000000 9 /* The following is a 4-bit value used only for * outbound header: * * 0 - No change (BOF ignored) * 1 - 48 BOFs * 2 - 24 BOFs * 3 - 12 BOFs * 4 - 6 BOFs * 5 - 3 BOFs * 6 - 2 BOFs * 7 - 1 BOFs * 8 - 0 BOFs * 9..15 - Reserved */ #define USB_IRDA_EXTRA_BOFS 0xf0 struct usb_irda_inbound_header { __u8 bmStatus; }; struct usb_irda_outbound_header { __u8 bmChange; }; #endif /* __LINUX_USB_IRDA_H */ usb/phy_companion.h 0000644 00000002254 14722070374 0010362 0 ustar 00 // SPDX-License-Identifier: GPL-2.0+ /* * phy-companion.h -- phy companion to indicate the comparator part of PHY * * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Author: Kishon Vijay Abraham I <kishon@ti.com> * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #ifndef __DRIVERS_PHY_COMPANION_H #define __DRIVERS_PHY_COMPANION_H #include <linux/usb/otg.h> /* phy_companion to take care of VBUS, ID and srp capabilities */ struct phy_companion { /* effective for A-peripheral, ignored for B devices */ int (*set_vbus)(struct phy_companion *x, bool enabled); /* for B devices only: start session with A-Host */ int (*start_srp)(struct phy_companion *x); }; #endif /* __DRIVERS_PHY_COMPANION_H */ usb/quirks.h 0000644 00000004532 14722070374 0007036 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * This file holds the definitions of quirks found in USB devices. * Only quirks that affect the whole device, not an interface, * belong here. */ #ifndef __LINUX_USB_QUIRKS_H #define __LINUX_USB_QUIRKS_H /* string descriptors must not be fetched using a 255-byte read */ #define USB_QUIRK_STRING_FETCH_255 BIT(0) /* device can't resume correctly so reset it instead */ #define USB_QUIRK_RESET_RESUME BIT(1) /* device can't handle Set-Interface requests */ #define USB_QUIRK_NO_SET_INTF BIT(2) /* device can't handle its Configuration or Interface strings */ #define USB_QUIRK_CONFIG_INTF_STRINGS BIT(3) /* device can't be reset(e.g morph devices), don't use reset */ #define USB_QUIRK_RESET BIT(4) /* device has more interface descriptions than the bNumInterfaces count, and can't handle talking to these interfaces */ #define USB_QUIRK_HONOR_BNUMINTERFACES BIT(5) /* device needs a pause during initialization, after we read the device descriptor */ #define USB_QUIRK_DELAY_INIT BIT(6) /* * For high speed and super speed interupt endpoints, the USB 2.0 and * USB 3.0 spec require the interval in microframes * (1 microframe = 125 microseconds) to be calculated as * interval = 2 ^ (bInterval-1). * * Devices with this quirk report their bInterval as the result of this * calculation instead of the exponent variable used in the calculation. */ #define USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL BIT(7) /* device can't handle device_qualifier descriptor requests */ #define USB_QUIRK_DEVICE_QUALIFIER BIT(8) /* device generates spurious wakeup, ignore remote wakeup capability */ #define USB_QUIRK_IGNORE_REMOTE_WAKEUP BIT(9) /* device can't handle Link Power Management */ #define USB_QUIRK_NO_LPM BIT(10) /* * Device reports its bInterval as linear frames instead of the * USB 2.0 calculation. */ #define USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL BIT(11) /* * Device needs to be disconnected before suspend to prevent spurious * wakeup. */ #define USB_QUIRK_DISCONNECT_SUSPEND BIT(12) /* Device needs a pause after every control message. */ #define USB_QUIRK_DELAY_CTRL_MSG BIT(13) /* Hub needs extra delay after resetting its port. */ #define USB_QUIRK_HUB_SLOW_RESET BIT(14) /* device has blacklisted endpoints */ #define USB_QUIRK_ENDPOINT_BLACKLIST BIT(15) #endif /* __LINUX_USB_QUIRKS_H */ usb/cdc_ncm.h 0000644 00000013504 14722070374 0007105 0 ustar 00 // SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) /* * Copyright (C) ST-Ericsson 2010-2012 * Contact: Alexey Orishko <alexey.orishko@stericsson.com> * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com> * * USB Host Driver for Network Control Model (NCM) * http://www.usb.org/developers/devclass_docs/NCM10.zip * * The NCM encoding, decoding and initialization logic * derives from FreeBSD 8.x. if_cdce.c and if_cdcereg.h * * This software is available to you under a choice of one of two * licenses. You may choose this file to be licensed under the terms * of the GNU General Public License (GPL) Version 2 or the 2-clause * BSD license listed below: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef __LINUX_USB_CDC_NCM_H #define __LINUX_USB_CDC_NCM_H #define CDC_NCM_COMM_ALTSETTING_NCM 0 #define CDC_NCM_COMM_ALTSETTING_MBIM 1 #define CDC_NCM_DATA_ALTSETTING_NCM 1 #define CDC_NCM_DATA_ALTSETTING_MBIM 2 /* CDC NCM subclass 3.3.1 */ #define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10 /* CDC NCM subclass 3.3.2 */ #define USB_CDC_NCM_NDP32_LENGTH_MIN 0x20 /* Maximum NTB length */ #define CDC_NCM_NTB_MAX_SIZE_TX 32768 /* bytes */ #define CDC_NCM_NTB_MAX_SIZE_RX 32768 /* bytes */ /* Initial NTB length */ #define CDC_NCM_NTB_DEF_SIZE_TX 16384 /* bytes */ #define CDC_NCM_NTB_DEF_SIZE_RX 16384 /* bytes */ /* Minimum value for MaxDatagramSize, ch. 6.2.9 */ #define CDC_NCM_MIN_DATAGRAM_SIZE 1514 /* bytes */ /* Minimum value for MaxDatagramSize, ch. 8.1.3 */ #define CDC_MBIM_MIN_DATAGRAM_SIZE 2048 /* bytes */ #define CDC_NCM_MIN_TX_PKT 512 /* bytes */ /* Default value for MaxDatagramSize */ #define CDC_NCM_MAX_DATAGRAM_SIZE 8192 /* bytes */ /* * Maximum amount of datagrams in NCM Datagram Pointer Table, not counting * the last NULL entry. */ #define CDC_NCM_DPT_DATAGRAMS_MAX 40 /* Restart the timer, if amount of datagrams is less than given value */ #define CDC_NCM_RESTART_TIMER_DATAGRAM_CNT 3 #define CDC_NCM_TIMER_PENDING_CNT 2 #define CDC_NCM_TIMER_INTERVAL_USEC 400UL #define CDC_NCM_TIMER_INTERVAL_MIN 5UL #define CDC_NCM_TIMER_INTERVAL_MAX (U32_MAX / NSEC_PER_USEC) /* Driver flags */ #define CDC_NCM_FLAG_NDP_TO_END 0x02 /* NDP is placed at end of frame */ #define CDC_MBIM_FLAG_AVOID_ALTSETTING_TOGGLE 0x04 /* Avoid altsetting toggle during init */ #define CDC_NCM_FLAG_PREFER_NTB32 0x08 /* prefer NDP32 over NDP16 */ #define cdc_ncm_comm_intf_is_mbim(x) ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \ (x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE) #define cdc_ncm_data_intf_is_mbim(x) ((x)->desc.bInterfaceProtocol == USB_CDC_MBIM_PROTO_NTB) struct cdc_ncm_ctx { struct usb_cdc_ncm_ntb_parameters ncm_parm; struct hrtimer tx_timer; struct tasklet_struct bh; const struct usb_cdc_ncm_desc *func_desc; const struct usb_cdc_mbim_desc *mbim_desc; const struct usb_cdc_mbim_extended_desc *mbim_extended_desc; const struct usb_cdc_ether_desc *ether_desc; struct usb_interface *control; struct usb_interface *data; struct sk_buff *tx_curr_skb; struct sk_buff *tx_rem_skb; __le32 tx_rem_sign; spinlock_t mtx; atomic_t stop; int drvflags; u32 timer_interval; u32 max_ndp_size; u8 is_ndp16; union { struct usb_cdc_ncm_ndp16 *delayed_ndp16; struct usb_cdc_ncm_ndp32 *delayed_ndp32; }; u32 tx_timer_pending; u32 tx_curr_frame_num; u32 rx_max; u32 tx_max; u32 tx_curr_size; u32 tx_low_mem_max_cnt; u32 tx_low_mem_val; u32 max_datagram_size; u16 tx_max_datagrams; u16 tx_remainder; u16 tx_modulus; u16 tx_ndp_modulus; u16 tx_seq; u16 rx_seq; u16 min_tx_pkt; /* statistics */ u32 tx_curr_frame_payload; u32 tx_reason_ntb_full; u32 tx_reason_ndp_full; u32 tx_reason_timeout; u32 tx_reason_max_datagram; u64 tx_overhead; u64 tx_ntbs; u64 rx_overhead; u64 rx_ntbs; }; u8 cdc_ncm_select_altsetting(struct usb_interface *intf); int cdc_ncm_change_mtu(struct net_device *net, int new_mtu); int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags); void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf); struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign); int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in); int cdc_ncm_rx_verify_ndp16(struct sk_buff *skb_in, int ndpoffset); int cdc_ncm_rx_verify_nth32(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in); int cdc_ncm_rx_verify_ndp32(struct sk_buff *skb_in, int ndpoffset); struct sk_buff * cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags); int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in); #endif /* __LINUX_USB_CDC_NCM_H */ usb/usb338x.h 0000644 00000017451 14722070374 0006743 0 ustar 00 // SPDX-License-Identifier: GPL-2.0+ /* * USB 338x super/high/full speed USB device controller. * Unlike many such controllers, this one talks PCI. * * Copyright (C) 2002 NetChip Technology, Inc. (http://www.netchip.com) * Copyright (C) 2003 David Brownell * Copyright (C) 2014 Ricardo Ribalda - Qtechnology/AS * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #ifndef __LINUX_USB_USB338X_H #define __LINUX_USB_USB338X_H #include <linux/usb/net2280.h> /* * Extra defined bits for net2280 registers */ #define SCRATCH 0x0b #define DEFECT7374_FSM_FIELD 28 #define SUPER_SPEED 8 #define DMA_REQUEST_OUTSTANDING 5 #define DMA_PAUSE_DONE_INTERRUPT 26 #define SET_ISOCHRONOUS_DELAY 24 #define SET_SEL 22 #define SUPER_SPEED_MODE 8 /*ep_cfg*/ #define MAX_BURST_SIZE 24 #define EP_FIFO_BYTE_COUNT 16 #define IN_ENDPOINT_ENABLE 14 #define IN_ENDPOINT_TYPE 12 #define OUT_ENDPOINT_ENABLE 10 #define OUT_ENDPOINT_TYPE 8 #define USB3380_EP_CFG_MASK_IN ((0x3 << IN_ENDPOINT_TYPE) | \ BIT(IN_ENDPOINT_ENABLE)) #define USB3380_EP_CFG_MASK_OUT ((0x3 << OUT_ENDPOINT_TYPE) | \ BIT(OUT_ENDPOINT_ENABLE)) struct usb338x_usb_ext_regs { u32 usbclass; #define DEVICE_PROTOCOL 16 #define DEVICE_SUB_CLASS 8 #define DEVICE_CLASS 0 u32 ss_sel; #define U2_SYSTEM_EXIT_LATENCY 8 #define U1_SYSTEM_EXIT_LATENCY 0 u32 ss_del; #define U2_DEVICE_EXIT_LATENCY 8 #define U1_DEVICE_EXIT_LATENCY 0 u32 usb2lpm; #define USB_L1_LPM_HIRD 2 #define USB_L1_LPM_REMOTE_WAKE 1 #define USB_L1_LPM_SUPPORT 0 u32 usb3belt; #define BELT_MULTIPLIER 10 #define BEST_EFFORT_LATENCY_TOLERANCE 0 u32 usbctl2; #define LTM_ENABLE 7 #define U2_ENABLE 6 #define U1_ENABLE 5 #define FUNCTION_SUSPEND 4 #define USB3_CORE_ENABLE 3 #define USB2_CORE_ENABLE 2 #define SERIAL_NUMBER_STRING_ENABLE 0 u32 in_timeout; #define GPEP3_TIMEOUT 19 #define GPEP2_TIMEOUT 18 #define GPEP1_TIMEOUT 17 #define GPEP0_TIMEOUT 16 #define GPEP3_TIMEOUT_VALUE 13 #define GPEP3_TIMEOUT_ENABLE 12 #define GPEP2_TIMEOUT_VALUE 9 #define GPEP2_TIMEOUT_ENABLE 8 #define GPEP1_TIMEOUT_VALUE 5 #define GPEP1_TIMEOUT_ENABLE 4 #define GPEP0_TIMEOUT_VALUE 1 #define GPEP0_TIMEOUT_ENABLE 0 u32 isodelay; #define ISOCHRONOUS_DELAY 0 } __packed; struct usb338x_fifo_regs { /* offset 0x0500, 0x0520, 0x0540, 0x0560, 0x0580 */ u32 ep_fifo_size_base; #define IN_FIFO_BASE_ADDRESS 22 #define IN_FIFO_SIZE 16 #define OUT_FIFO_BASE_ADDRESS 6 #define OUT_FIFO_SIZE 0 u32 ep_fifo_out_wrptr; u32 ep_fifo_out_rdptr; u32 ep_fifo_in_wrptr; u32 ep_fifo_in_rdptr; u32 unused[3]; } __packed; /* Link layer */ struct usb338x_ll_regs { /* offset 0x700 */ u32 ll_ltssm_ctrl1; u32 ll_ltssm_ctrl2; u32 ll_ltssm_ctrl3; u32 unused1; /* 0x710 */ u32 unused2; u32 ll_general_ctrl0; u32 ll_general_ctrl1; #define PM_U3_AUTO_EXIT 29 #define PM_U2_AUTO_EXIT 28 #define PM_U1_AUTO_EXIT 27 #define PM_FORCE_U2_ENTRY 26 #define PM_FORCE_U1_ENTRY 25 #define PM_LGO_COLLISION_SEND_LAU 24 #define PM_DIR_LINK_REJECT 23 #define PM_FORCE_LINK_ACCEPT 22 #define PM_DIR_ENTRY_U3 20 #define PM_DIR_ENTRY_U2 19 #define PM_DIR_ENTRY_U1 18 #define PM_U2_ENABLE 17 #define PM_U1_ENABLE 16 #define SKP_THRESHOLD_ADJUST_FMW 8 #define RESEND_DPP_ON_LRTY_FMW 7 #define DL_BIT_VALUE_FMW 6 #define FORCE_DL_BIT 5 u32 ll_general_ctrl2; #define SELECT_INVERT_LANE_POLARITY 7 #define FORCE_INVERT_LANE_POLARITY 6 /* 0x720 */ u32 ll_general_ctrl3; u32 ll_general_ctrl4; u32 ll_error_gen; u32 unused3; /* 0x730 */ u32 unused4[4]; /* 0x740 */ u32 unused5[2]; u32 ll_lfps_5; #define TIMER_LFPS_6US 16 u32 ll_lfps_6; #define TIMER_LFPS_80US 0 /* 0x750 */ u32 unused6[8]; /* 0x770 */ u32 unused7[3]; u32 ll_tsn_counters_2; #define HOT_TX_NORESET_TS2 24 /* 0x780 */ u32 ll_tsn_counters_3; #define HOT_RX_RESET_TS2 0 u32 unused8[3]; /* 0x790 */ u32 unused9; u32 ll_lfps_timers_2; #define LFPS_TIMERS_2_WORKAROUND_VALUE 0x084d u32 unused10; u32 ll_tsn_chicken_bit; #define RECOVERY_IDLE_TO_RECOVER_FMW 3 } __packed; /* protocol layer */ struct usb338x_pl_regs { /* offset 0x800 */ u32 pl_reg_1; u32 pl_reg_2; u32 pl_reg_3; u32 pl_reg_4; u32 pl_ep_ctrl; /* Protocol Layer Endpoint Control*/ #define PL_EP_CTRL 0x810 #define ENDPOINT_SELECT 0 /* [4:0] */ #define EP_INITIALIZED 16 #define SEQUENCE_NUMBER_RESET 17 #define CLEAR_ACK_ERROR_CODE 20 u32 pl_reg_6; u32 pl_reg_7; u32 pl_reg_8; u32 pl_ep_status_1; /* Protocol Layer Endpoint Status 1*/ #define PL_EP_STATUS_1 0x820 #define STATE 16 #define ACK_GOOD_NORMAL 0x11 #define ACK_GOOD_MORE_ACKS_TO_COME 0x16 u32 pl_ep_status_2; u32 pl_ep_status_3; /* Protocol Layer Endpoint Status 3*/ #define PL_EP_STATUS_3 0x828 #define SEQUENCE_NUMBER 0 u32 pl_ep_status_4; /* Protocol Layer Endpoint Status 4*/ #define PL_EP_STATUS_4 0x82c u32 pl_ep_cfg_4; /* Protocol Layer Endpoint Configuration 4*/ #define PL_EP_CFG_4 0x830 #define NON_CTRL_IN_TOLERATE_BAD_DIR 6 } __packed; #endif /* __LINUX_USB_USB338X_H */ usb/cdc-wdm.h 0000644 00000001213 14722070374 0007027 0 ustar 00 // SPDX-License-Identifier: GPL-2.0 /* * USB CDC Device Management subdriver * * Copyright (c) 2012 Bjørn Mork <bjorn@mork.no> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. */ #ifndef __LINUX_USB_CDC_WDM_H #define __LINUX_USB_CDC_WDM_H #include <uapi/linux/usb/cdc-wdm.h> extern struct usb_driver *usb_cdc_wdm_register(struct usb_interface *intf, struct usb_endpoint_descriptor *ep, int bufsize, int (*manage_power)(struct usb_interface *, int)); #endif /* __LINUX_USB_CDC_WDM_H */ usb/r8a66597.h 0000644 00000043225 14722070374 0006635 0 ustar 00 // SPDX-License-Identifier: GPL-2.0 /* * R8A66597 driver platform data * * Copyright (C) 2009 Renesas Solutions Corp. * * Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #ifndef __LINUX_USB_R8A66597_H #define __LINUX_USB_R8A66597_H #define R8A66597_PLATDATA_XTAL_12MHZ 0x01 #define R8A66597_PLATDATA_XTAL_24MHZ 0x02 #define R8A66597_PLATDATA_XTAL_48MHZ 0x03 struct r8a66597_platdata { /* This callback can control port power instead of DVSTCTR register. */ void (*port_power)(int port, int power); /* This parameter is for BUSWAIT */ u16 buswait; /* set one = on chip controller, set zero = external controller */ unsigned on_chip:1; /* (external controller only) set R8A66597_PLATDATA_XTAL_nnMHZ */ unsigned xtal:2; /* set one = 3.3V, set zero = 1.5V */ unsigned vif:1; /* set one = big endian, set zero = little endian */ unsigned endian:1; /* (external controller only) set one = WR0_N shorted to WR1_N */ unsigned wr0_shorted_to_wr1:1; /* set one = using SUDMAC */ unsigned sudmac:1; }; /* Register definitions */ #define SYSCFG0 0x00 #define SYSCFG1 0x02 #define SYSSTS0 0x04 #define SYSSTS1 0x06 #define DVSTCTR0 0x08 #define DVSTCTR1 0x0A #define TESTMODE 0x0C #define PINCFG 0x0E #define DMA0CFG 0x10 #define DMA1CFG 0x12 #define CFIFO 0x14 #define D0FIFO 0x18 #define D1FIFO 0x1C #define CFIFOSEL 0x20 #define CFIFOCTR 0x22 #define CFIFOSIE 0x24 #define D0FIFOSEL 0x28 #define D0FIFOCTR 0x2A #define D1FIFOSEL 0x2C #define D1FIFOCTR 0x2E #define INTENB0 0x30 #define INTENB1 0x32 #define INTENB2 0x34 #define BRDYENB 0x36 #define NRDYENB 0x38 #define BEMPENB 0x3A #define SOFCFG 0x3C #define INTSTS0 0x40 #define INTSTS1 0x42 #define INTSTS2 0x44 #define BRDYSTS 0x46 #define NRDYSTS 0x48 #define BEMPSTS 0x4A #define FRMNUM 0x4C #define UFRMNUM 0x4E #define USBADDR 0x50 #define USBREQ 0x54 #define USBVAL 0x56 #define USBINDX 0x58 #define USBLENG 0x5A #define DCPCFG 0x5C #define DCPMAXP 0x5E #define DCPCTR 0x60 #define PIPESEL 0x64 #define PIPECFG 0x68 #define PIPEBUF 0x6A #define PIPEMAXP 0x6C #define PIPEPERI 0x6E #define PIPE1CTR 0x70 #define PIPE2CTR 0x72 #define PIPE3CTR 0x74 #define PIPE4CTR 0x76 #define PIPE5CTR 0x78 #define PIPE6CTR 0x7A #define PIPE7CTR 0x7C #define PIPE8CTR 0x7E #define PIPE9CTR 0x80 #define PIPE1TRE 0x90 #define PIPE1TRN 0x92 #define PIPE2TRE 0x94 #define PIPE2TRN 0x96 #define PIPE3TRE 0x98 #define PIPE3TRN 0x9A #define PIPE4TRE 0x9C #define PIPE4TRN 0x9E #define PIPE5TRE 0xA0 #define PIPE5TRN 0xA2 #define DEVADD0 0xD0 #define DEVADD1 0xD2 #define DEVADD2 0xD4 #define DEVADD3 0xD6 #define DEVADD4 0xD8 #define DEVADD5 0xDA #define DEVADD6 0xDC #define DEVADD7 0xDE #define DEVADD8 0xE0 #define DEVADD9 0xE2 #define DEVADDA 0xE4 /* System Configuration Control Register */ #define XTAL 0xC000 /* b15-14: Crystal selection */ #define XTAL48 0x8000 /* 48MHz */ #define XTAL24 0x4000 /* 24MHz */ #define XTAL12 0x0000 /* 12MHz */ #define XCKE 0x2000 /* b13: External clock enable */ #define PLLC 0x0800 /* b11: PLL control */ #define SCKE 0x0400 /* b10: USB clock enable */ #define PCSDIS 0x0200 /* b9: not CS wakeup */ #define LPSME 0x0100 /* b8: Low power sleep mode */ #define HSE 0x0080 /* b7: Hi-speed enable */ #define DCFM 0x0040 /* b6: Controller function select */ #define DRPD 0x0020 /* b5: D+/- pull down control */ #define DPRPU 0x0010 /* b4: D+ pull up control */ #define USBE 0x0001 /* b0: USB module operation enable */ /* System Configuration Status Register */ #define OVCBIT 0x8000 /* b15-14: Over-current bit */ #define OVCMON 0xC000 /* b15-14: Over-current monitor */ #define SOFEA 0x0020 /* b5: SOF monitor */ #define IDMON 0x0004 /* b3: ID-pin monitor */ #define LNST 0x0003 /* b1-0: D+, D- line status */ #define SE1 0x0003 /* SE1 */ #define FS_KSTS 0x0002 /* Full-Speed K State */ #define FS_JSTS 0x0001 /* Full-Speed J State */ #define LS_JSTS 0x0002 /* Low-Speed J State */ #define LS_KSTS 0x0001 /* Low-Speed K State */ #define SE0 0x0000 /* SE0 */ /* Device State Control Register */ #define EXTLP0 0x0400 /* b10: External port */ #define VBOUT 0x0200 /* b9: VBUS output */ #define WKUP 0x0100 /* b8: Remote wakeup */ #define RWUPE 0x0080 /* b7: Remote wakeup sense */ #define USBRST 0x0040 /* b6: USB reset enable */ #define RESUME 0x0020 /* b5: Resume enable */ #define UACT 0x0010 /* b4: USB bus enable */ #define RHST 0x0007 /* b1-0: Reset handshake status */ #define HSPROC 0x0004 /* HS handshake is processing */ #define HSMODE 0x0003 /* Hi-Speed mode */ #define FSMODE 0x0002 /* Full-Speed mode */ #define LSMODE 0x0001 /* Low-Speed mode */ #define UNDECID 0x0000 /* Undecided */ /* Test Mode Register */ #define UTST 0x000F /* b3-0: Test select */ #define H_TST_PACKET 0x000C /* HOST TEST Packet */ #define H_TST_SE0_NAK 0x000B /* HOST TEST SE0 NAK */ #define H_TST_K 0x000A /* HOST TEST K */ #define H_TST_J 0x0009 /* HOST TEST J */ #define H_TST_NORMAL 0x0000 /* HOST Normal Mode */ #define P_TST_PACKET 0x0004 /* PERI TEST Packet */ #define P_TST_SE0_NAK 0x0003 /* PERI TEST SE0 NAK */ #define P_TST_K 0x0002 /* PERI TEST K */ #define P_TST_J 0x0001 /* PERI TEST J */ #define P_TST_NORMAL 0x0000 /* PERI Normal Mode */ /* Data Pin Configuration Register */ #define LDRV 0x8000 /* b15: Drive Current Adjust */ #define VIF1 0x0000 /* VIF = 1.8V */ #define VIF3 0x8000 /* VIF = 3.3V */ #define INTA 0x0001 /* b1: USB INT-pin active */ /* DMAx Pin Configuration Register */ #define DREQA 0x4000 /* b14: Dreq active select */ #define BURST 0x2000 /* b13: Burst mode */ #define DACKA 0x0400 /* b10: Dack active select */ #define DFORM 0x0380 /* b9-7: DMA mode select */ #define CPU_ADR_RD_WR 0x0000 /* Address + RD/WR mode (CPU bus) */ #define CPU_DACK_RD_WR 0x0100 /* DACK + RD/WR mode (CPU bus) */ #define CPU_DACK_ONLY 0x0180 /* DACK only mode (CPU bus) */ #define SPLIT_DACK_ONLY 0x0200 /* DACK only mode (SPLIT bus) */ #define DENDA 0x0040 /* b6: Dend active select */ #define PKTM 0x0020 /* b5: Packet mode */ #define DENDE 0x0010 /* b4: Dend enable */ #define OBUS 0x0004 /* b2: OUTbus mode */ /* CFIFO/DxFIFO Port Select Register */ #define RCNT 0x8000 /* b15: Read count mode */ #define REW 0x4000 /* b14: Buffer rewind */ #define DCLRM 0x2000 /* b13: DMA buffer clear mode */ #define DREQE 0x1000 /* b12: DREQ output enable */ #define MBW_8 0x0000 /* 8bit */ #define MBW_16 0x0400 /* 16bit */ #define MBW_32 0x0800 /* 32bit */ #define BIGEND 0x0100 /* b8: Big endian mode */ #define BYTE_LITTLE 0x0000 /* little dendian */ #define BYTE_BIG 0x0100 /* big endifan */ #define ISEL 0x0020 /* b5: DCP FIFO port direction select */ #define CURPIPE 0x000F /* b2-0: PIPE select */ /* CFIFO/DxFIFO Port Control Register */ #define BVAL 0x8000 /* b15: Buffer valid flag */ #define BCLR 0x4000 /* b14: Buffer clear */ #define FRDY 0x2000 /* b13: FIFO ready */ #define DTLN 0x0FFF /* b11-0: FIFO received data length */ /* Interrupt Enable Register 0 */ #define VBSE 0x8000 /* b15: VBUS interrupt */ #define RSME 0x4000 /* b14: Resume interrupt */ #define SOFE 0x2000 /* b13: Frame update interrupt */ #define DVSE 0x1000 /* b12: Device state transition interrupt */ #define CTRE 0x0800 /* b11: Control transfer stage transition interrupt */ #define BEMPE 0x0400 /* b10: Buffer empty interrupt */ #define NRDYE 0x0200 /* b9: Buffer not ready interrupt */ #define BRDYE 0x0100 /* b8: Buffer ready interrupt */ /* Interrupt Enable Register 1 */ #define OVRCRE 0x8000 /* b15: Over-current interrupt */ #define BCHGE 0x4000 /* b14: USB us chenge interrupt */ #define DTCHE 0x1000 /* b12: Detach sense interrupt */ #define ATTCHE 0x0800 /* b11: Attach sense interrupt */ #define EOFERRE 0x0040 /* b6: EOF error interrupt */ #define SIGNE 0x0020 /* b5: SETUP IGNORE interrupt */ #define SACKE 0x0010 /* b4: SETUP ACK interrupt */ /* BRDY Interrupt Enable/Status Register */ #define BRDY9 0x0200 /* b9: PIPE9 */ #define BRDY8 0x0100 /* b8: PIPE8 */ #define BRDY7 0x0080 /* b7: PIPE7 */ #define BRDY6 0x0040 /* b6: PIPE6 */ #define BRDY5 0x0020 /* b5: PIPE5 */ #define BRDY4 0x0010 /* b4: PIPE4 */ #define BRDY3 0x0008 /* b3: PIPE3 */ #define BRDY2 0x0004 /* b2: PIPE2 */ #define BRDY1 0x0002 /* b1: PIPE1 */ #define BRDY0 0x0001 /* b1: PIPE0 */ /* NRDY Interrupt Enable/Status Register */ #define NRDY9 0x0200 /* b9: PIPE9 */ #define NRDY8 0x0100 /* b8: PIPE8 */ #define NRDY7 0x0080 /* b7: PIPE7 */ #define NRDY6 0x0040 /* b6: PIPE6 */ #define NRDY5 0x0020 /* b5: PIPE5 */ #define NRDY4 0x0010 /* b4: PIPE4 */ #define NRDY3 0x0008 /* b3: PIPE3 */ #define NRDY2 0x0004 /* b2: PIPE2 */ #define NRDY1 0x0002 /* b1: PIPE1 */ #define NRDY0 0x0001 /* b1: PIPE0 */ /* BEMP Interrupt Enable/Status Register */ #define BEMP9 0x0200 /* b9: PIPE9 */ #define BEMP8 0x0100 /* b8: PIPE8 */ #define BEMP7 0x0080 /* b7: PIPE7 */ #define BEMP6 0x0040 /* b6: PIPE6 */ #define BEMP5 0x0020 /* b5: PIPE5 */ #define BEMP4 0x0010 /* b4: PIPE4 */ #define BEMP3 0x0008 /* b3: PIPE3 */ #define BEMP2 0x0004 /* b2: PIPE2 */ #define BEMP1 0x0002 /* b1: PIPE1 */ #define BEMP0 0x0001 /* b0: PIPE0 */ /* SOF Pin Configuration Register */ #define TRNENSEL 0x0100 /* b8: Select transaction enable period */ #define BRDYM 0x0040 /* b6: BRDY clear timing */ #define INTL 0x0020 /* b5: Interrupt sense select */ #define EDGESTS 0x0010 /* b4: */ #define SOFMODE 0x000C /* b3-2: SOF pin select */ #define SOF_125US 0x0008 /* SOF OUT 125us Frame Signal */ #define SOF_1MS 0x0004 /* SOF OUT 1ms Frame Signal */ #define SOF_DISABLE 0x0000 /* SOF OUT Disable */ /* Interrupt Status Register 0 */ #define VBINT 0x8000 /* b15: VBUS interrupt */ #define RESM 0x4000 /* b14: Resume interrupt */ #define SOFR 0x2000 /* b13: SOF frame update interrupt */ #define DVST 0x1000 /* b12: Device state transition interrupt */ #define CTRT 0x0800 /* b11: Control transfer stage transition interrupt */ #define BEMP 0x0400 /* b10: Buffer empty interrupt */ #define NRDY 0x0200 /* b9: Buffer not ready interrupt */ #define BRDY 0x0100 /* b8: Buffer ready interrupt */ #define VBSTS 0x0080 /* b7: VBUS input port */ #define DVSQ 0x0070 /* b6-4: Device state */ #define DS_SPD_CNFG 0x0070 /* Suspend Configured */ #define DS_SPD_ADDR 0x0060 /* Suspend Address */ #define DS_SPD_DFLT 0x0050 /* Suspend Default */ #define DS_SPD_POWR 0x0040 /* Suspend Powered */ #define DS_SUSP 0x0040 /* Suspend */ #define DS_CNFG 0x0030 /* Configured */ #define DS_ADDS 0x0020 /* Address */ #define DS_DFLT 0x0010 /* Default */ #define DS_POWR 0x0000 /* Powered */ #define DVSQS 0x0030 /* b5-4: Device state */ #define VALID 0x0008 /* b3: Setup packet detected flag */ #define CTSQ 0x0007 /* b2-0: Control transfer stage */ #define CS_SQER 0x0006 /* Sequence error */ #define CS_WRND 0x0005 /* Control write nodata status stage */ #define CS_WRSS 0x0004 /* Control write status stage */ #define CS_WRDS 0x0003 /* Control write data stage */ #define CS_RDSS 0x0002 /* Control read status stage */ #define CS_RDDS 0x0001 /* Control read data stage */ #define CS_IDST 0x0000 /* Idle or setup stage */ /* Interrupt Status Register 1 */ #define OVRCR 0x8000 /* b15: Over-current interrupt */ #define BCHG 0x4000 /* b14: USB bus chenge interrupt */ #define DTCH 0x1000 /* b12: Detach sense interrupt */ #define ATTCH 0x0800 /* b11: Attach sense interrupt */ #define EOFERR 0x0040 /* b6: EOF-error interrupt */ #define SIGN 0x0020 /* b5: Setup ignore interrupt */ #define SACK 0x0010 /* b4: Setup acknowledge interrupt */ /* Frame Number Register */ #define OVRN 0x8000 /* b15: Overrun error */ #define CRCE 0x4000 /* b14: Received data error */ #define FRNM 0x07FF /* b10-0: Frame number */ /* Micro Frame Number Register */ #define UFRNM 0x0007 /* b2-0: Micro frame number */ /* Default Control Pipe Maxpacket Size Register */ /* Pipe Maxpacket Size Register */ #define DEVSEL 0xF000 /* b15-14: Device address select */ #define MAXP 0x007F /* b6-0: Maxpacket size of default control pipe */ /* Default Control Pipe Control Register */ #define BSTS 0x8000 /* b15: Buffer status */ #define SUREQ 0x4000 /* b14: Send USB request */ #define CSCLR 0x2000 /* b13: complete-split status clear */ #define CSSTS 0x1000 /* b12: complete-split status */ #define SUREQCLR 0x0800 /* b11: stop setup request */ #define SQCLR 0x0100 /* b8: Sequence toggle bit clear */ #define SQSET 0x0080 /* b7: Sequence toggle bit set */ #define SQMON 0x0040 /* b6: Sequence toggle bit monitor */ #define PBUSY 0x0020 /* b5: pipe busy */ #define PINGE 0x0010 /* b4: ping enable */ #define CCPL 0x0004 /* b2: Enable control transfer complete */ #define PID 0x0003 /* b1-0: Response PID */ #define PID_STALL11 0x0003 /* STALL */ #define PID_STALL 0x0002 /* STALL */ #define PID_BUF 0x0001 /* BUF */ #define PID_NAK 0x0000 /* NAK */ /* Pipe Window Select Register */ #define PIPENM 0x0007 /* b2-0: Pipe select */ /* Pipe Configuration Register */ #define R8A66597_TYP 0xC000 /* b15-14: Transfer type */ #define R8A66597_ISO 0xC000 /* Isochronous */ #define R8A66597_INT 0x8000 /* Interrupt */ #define R8A66597_BULK 0x4000 /* Bulk */ #define R8A66597_BFRE 0x0400 /* b10: Buffer ready interrupt mode select */ #define R8A66597_DBLB 0x0200 /* b9: Double buffer mode select */ #define R8A66597_CNTMD 0x0100 /* b8: Continuous transfer mode select */ #define R8A66597_SHTNAK 0x0080 /* b7: Transfer end NAK */ #define R8A66597_DIR 0x0010 /* b4: Transfer direction select */ #define R8A66597_EPNUM 0x000F /* b3-0: Eendpoint number select */ /* Pipe Buffer Configuration Register */ #define BUFSIZE 0x7C00 /* b14-10: Pipe buffer size */ #define BUFNMB 0x007F /* b6-0: Pipe buffer number */ #define PIPE0BUF 256 #define PIPExBUF 64 /* Pipe Maxpacket Size Register */ #define MXPS 0x07FF /* b10-0: Maxpacket size */ /* Pipe Cycle Configuration Register */ #define IFIS 0x1000 /* b12: Isochronous in-buffer flush mode select */ #define IITV 0x0007 /* b2-0: Isochronous interval */ /* Pipex Control Register */ #define BSTS 0x8000 /* b15: Buffer status */ #define INBUFM 0x4000 /* b14: IN buffer monitor (Only for PIPE1 to 5) */ #define CSCLR 0x2000 /* b13: complete-split status clear */ #define CSSTS 0x1000 /* b12: complete-split status */ #define ATREPM 0x0400 /* b10: Auto repeat mode */ #define ACLRM 0x0200 /* b9: Out buffer auto clear mode */ #define SQCLR 0x0100 /* b8: Sequence toggle bit clear */ #define SQSET 0x0080 /* b7: Sequence toggle bit set */ #define SQMON 0x0040 /* b6: Sequence toggle bit monitor */ #define PBUSY 0x0020 /* b5: pipe busy */ #define PID 0x0003 /* b1-0: Response PID */ /* PIPExTRE */ #define TRENB 0x0200 /* b9: Transaction counter enable */ #define TRCLR 0x0100 /* b8: Transaction counter clear */ /* PIPExTRN */ #define TRNCNT 0xFFFF /* b15-0: Transaction counter */ /* DEVADDx */ #define UPPHUB 0x7800 #define HUBPORT 0x0700 #define USBSPD 0x00C0 #define RTPORT 0x0001 /* SUDMAC registers */ #define CH0CFG 0x00 #define CH1CFG 0x04 #define CH0BA 0x10 #define CH1BA 0x14 #define CH0BBC 0x18 #define CH1BBC 0x1C #define CH0CA 0x20 #define CH1CA 0x24 #define CH0CBC 0x28 #define CH1CBC 0x2C #define CH0DEN 0x30 #define CH1DEN 0x34 #define DSTSCLR 0x38 #define DBUFCTRL 0x3C #define DINTCTRL 0x40 #define DINTSTS 0x44 #define DINTSTSCLR 0x48 #define CH0SHCTRL 0x50 #define CH1SHCTRL 0x54 /* SUDMAC Configuration Registers */ #define SENDBUFM 0x1000 /* b12: Transmit Buffer Mode */ #define RCVENDM 0x0100 /* b8: Receive Data Transfer End Mode */ #define LBA_WAIT 0x0030 /* b5-4: Local Bus Access Wait */ /* DMA Enable Registers */ #define DEN 0x0001 /* b1: DMA Transfer Enable */ /* DMA Status Clear Register */ #define CH1STCLR 0x0002 /* b2: Ch1 DMA Status Clear */ #define CH0STCLR 0x0001 /* b1: Ch0 DMA Status Clear */ /* DMA Buffer Control Register */ #define CH1BUFW 0x0200 /* b9: Ch1 DMA Buffer Data Transfer Enable */ #define CH0BUFW 0x0100 /* b8: Ch0 DMA Buffer Data Transfer Enable */ #define CH1BUFS 0x0002 /* b2: Ch1 DMA Buffer Data Status */ #define CH0BUFS 0x0001 /* b1: Ch0 DMA Buffer Data Status */ /* DMA Interrupt Control Register */ #define CH1ERRE 0x0200 /* b9: Ch1 SHwy Res Err Detect Int Enable */ #define CH0ERRE 0x0100 /* b8: Ch0 SHwy Res Err Detect Int Enable */ #define CH1ENDE 0x0002 /* b2: Ch1 DMA Transfer End Int Enable */ #define CH0ENDE 0x0001 /* b1: Ch0 DMA Transfer End Int Enable */ /* DMA Interrupt Status Register */ #define CH1ERRS 0x0200 /* b9: Ch1 SHwy Res Err Detect Int Status */ #define CH0ERRS 0x0100 /* b8: Ch0 SHwy Res Err Detect Int Status */ #define CH1ENDS 0x0002 /* b2: Ch1 DMA Transfer End Int Status */ #define CH0ENDS 0x0001 /* b1: Ch0 DMA Transfer End Int Status */ /* DMA Interrupt Status Clear Register */ #define CH1ERRC 0x0200 /* b9: Ch1 SHwy Res Err Detect Int Stat Clear */ #define CH0ERRC 0x0100 /* b8: Ch0 SHwy Res Err Detect Int Stat Clear */ #define CH1ENDC 0x0002 /* b2: Ch1 DMA Transfer End Int Stat Clear */ #define CH0ENDC 0x0001 /* b1: Ch0 DMA Transfer End Int Stat Clear */ #endif /* __LINUX_USB_R8A66597_H */ usb/pd_bdo.h 0000644 00000001053 14722070374 0006742 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright 2015-2017 Google, Inc */ #ifndef __LINUX_USB_PD_BDO_H #define __LINUX_USB_PD_BDO_H /* BDO : BIST Data Object */ #define BDO_MODE_RECV (0 << 28) #define BDO_MODE_TRANSMIT (1 << 28) #define BDO_MODE_COUNTERS (2 << 28) #define BDO_MODE_CARRIER0 (3 << 28) #define BDO_MODE_CARRIER1 (4 << 28) #define BDO_MODE_CARRIER2 (5 << 28) #define BDO_MODE_CARRIER3 (6 << 28) #define BDO_MODE_EYE (7 << 28) #define BDO_MODE_TESTDATA (8 << 28) #define BDO_MODE_MASK(mode) ((mode) & 0xf0000000) #endif usb/c67x00.h 0000644 00000003513 14722070374 0006445 0 ustar 00 // SPDX-License-Identifier: GPL-2.0+ /* * usb_c67x00.h: platform definitions for the Cypress C67X00 USB chip * * Copyright (C) 2006-2008 Barco N.V. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, * MA 02110-1301 USA. */ #ifndef _LINUX_USB_C67X00_H #define _LINUX_USB_C67X00_H /* SIE configuration */ #define C67X00_SIE_UNUSED 0 #define C67X00_SIE_HOST 1 #define C67X00_SIE_PERIPHERAL_A 2 /* peripheral on A port */ #define C67X00_SIE_PERIPHERAL_B 3 /* peripheral on B port */ #define c67x00_sie_config(config, n) (((config)>>(4*(n)))&0x3) #define C67X00_SIE1_UNUSED (C67X00_SIE_UNUSED << 0) #define C67X00_SIE1_HOST (C67X00_SIE_HOST << 0) #define C67X00_SIE1_PERIPHERAL_A (C67X00_SIE_PERIPHERAL_A << 0) #define C67X00_SIE1_PERIPHERAL_B (C67X00_SIE_PERIPHERAL_B << 0) #define C67X00_SIE2_UNUSED (C67X00_SIE_UNUSED << 4) #define C67X00_SIE2_HOST (C67X00_SIE_HOST << 4) #define C67X00_SIE2_PERIPHERAL_A (C67X00_SIE_PERIPHERAL_A << 4) #define C67X00_SIE2_PERIPHERAL_B (C67X00_SIE_PERIPHERAL_B << 4) struct c67x00_platform_data { int sie_config; /* SIEs config (C67X00_SIEx_*) */ unsigned long hpi_regstep; /* Step between HPI registers */ }; #endif /* _LINUX_USB_C67X00_H */ usb/audio-v3.h 0000644 00000033677 14722070374 0007163 0 ustar 00 // SPDX-License-Identifier: GPL-2.0+ /* * Copyright (c) 2017 Ruslan Bilovol <ruslan.bilovol@gmail.com> * * This file holds USB constants and structures defined * by the USB DEVICE CLASS DEFINITION FOR AUDIO DEVICES Release 3.0. */ #ifndef __LINUX_USB_AUDIO_V3_H #define __LINUX_USB_AUDIO_V3_H #include <linux/types.h> /* * v1.0, v2.0 and v3.0 of this standard have many things in common. For the rest * of the definitions, please refer to audio.h and audio-v2.h */ /* All High Capability descriptors have these 2 fields at the beginning */ struct uac3_hc_descriptor_header { __le16 wLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __le16 wDescriptorID; } __attribute__ ((packed)); /* 4.3.1 CLUSTER DESCRIPTOR HEADER */ struct uac3_cluster_header_descriptor { __le16 wLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __le16 wDescriptorID; __u8 bNrChannels; } __attribute__ ((packed)); /* 4.3.2.1 SEGMENTS */ struct uac3_cluster_segment_descriptor { __le16 wLength; __u8 bSegmentType; /* __u8[0]; segment-specific data */ } __attribute__ ((packed)); /* 4.3.2.1.1 END SEGMENT */ struct uac3_cluster_end_segment_descriptor { __le16 wLength; __u8 bSegmentType; /* Constant END_SEGMENT */ } __attribute__ ((packed)); /* 4.3.2.1.3.1 INFORMATION SEGMENT */ struct uac3_cluster_information_segment_descriptor { __le16 wLength; __u8 bSegmentType; __u8 bChPurpose; __u8 bChRelationship; __u8 bChGroupID; } __attribute__ ((packed)); /* 4.5.2 CLASS-SPECIFIC AC INTERFACE DESCRIPTOR */ struct uac3_ac_header_descriptor { __u8 bLength; /* 10 */ __u8 bDescriptorType; /* CS_INTERFACE descriptor type */ __u8 bDescriptorSubtype; /* HEADER descriptor subtype */ __u8 bCategory; /* includes Clock Source, Unit, Terminal, and Power Domain desc. */ __le16 wTotalLength; __le32 bmControls; } __attribute__ ((packed)); /* 4.5.2.1 INPUT TERMINAL DESCRIPTOR */ struct uac3_input_terminal_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bTerminalID; __le16 wTerminalType; __u8 bAssocTerminal; __u8 bCSourceID; __le32 bmControls; __le16 wClusterDescrID; __le16 wExTerminalDescrID; __le16 wConnectorsDescrID; __le16 wTerminalDescrStr; } __attribute__((packed)); /* 4.5.2.2 OUTPUT TERMINAL DESCRIPTOR */ struct uac3_output_terminal_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bTerminalID; __le16 wTerminalType; __u8 bAssocTerminal; __u8 bSourceID; __u8 bCSourceID; __le32 bmControls; __le16 wExTerminalDescrID; __le16 wConnectorsDescrID; __le16 wTerminalDescrStr; } __attribute__((packed)); /* 4.5.2.7 FEATURE UNIT DESCRIPTOR */ struct uac3_feature_unit_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bUnitID; __u8 bSourceID; /* bmaControls is actually u32, * but u8 is needed for the hybrid parser */ __u8 bmaControls[0]; /* variable length */ /* wFeatureDescrStr omitted */ } __attribute__((packed)); #define UAC3_DT_FEATURE_UNIT_SIZE(ch) (7 + ((ch) + 1) * 4) /* As above, but more useful for defining your own descriptors */ #define DECLARE_UAC3_FEATURE_UNIT_DESCRIPTOR(ch) \ struct uac3_feature_unit_descriptor_##ch { \ __u8 bLength; \ __u8 bDescriptorType; \ __u8 bDescriptorSubtype; \ __u8 bUnitID; \ __u8 bSourceID; \ __le32 bmaControls[ch + 1]; \ __le16 wFeatureDescrStr; \ } __attribute__ ((packed)) /* 4.5.2.12 CLOCK SOURCE DESCRIPTOR */ struct uac3_clock_source_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bClockID; __u8 bmAttributes; __le32 bmControls; __u8 bReferenceTerminal; __le16 wClockSourceStr; } __attribute__((packed)); /* bmAttribute fields */ #define UAC3_CLOCK_SOURCE_TYPE_EXT 0x0 #define UAC3_CLOCK_SOURCE_TYPE_INT 0x1 #define UAC3_CLOCK_SOURCE_ASYNC (0 << 2) #define UAC3_CLOCK_SOURCE_SYNCED_TO_SOF (1 << 1) /* 4.5.2.13 CLOCK SELECTOR DESCRIPTOR */ struct uac3_clock_selector_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bClockID; __u8 bNrInPins; __u8 baCSourceID[]; /* bmControls and wCSelectorDescrStr omitted */ } __attribute__((packed)); /* 4.5.2.14 CLOCK MULTIPLIER DESCRIPTOR */ struct uac3_clock_multiplier_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bClockID; __u8 bCSourceID; __le32 bmControls; __le16 wCMultiplierDescrStr; } __attribute__((packed)); /* 4.5.2.15 POWER DOMAIN DESCRIPTOR */ struct uac3_power_domain_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bPowerDomainID; __le16 waRecoveryTime1; __le16 waRecoveryTime2; __u8 bNrEntities; __u8 baEntityID[]; /* wPDomainDescrStr omitted */ } __attribute__((packed)); /* As above, but more useful for defining your own descriptors */ #define DECLARE_UAC3_POWER_DOMAIN_DESCRIPTOR(n) \ struct uac3_power_domain_descriptor_##n { \ __u8 bLength; \ __u8 bDescriptorType; \ __u8 bDescriptorSubtype; \ __u8 bPowerDomainID; \ __le16 waRecoveryTime1; \ __le16 waRecoveryTime2; \ __u8 bNrEntities; \ __u8 baEntityID[n]; \ __le16 wPDomainDescrStr; \ } __attribute__ ((packed)) /* 4.7.2 CLASS-SPECIFIC AS INTERFACE DESCRIPTOR */ struct uac3_as_header_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bTerminalLink; __le32 bmControls; __le16 wClusterDescrID; __le64 bmFormats; __u8 bSubslotSize; __u8 bBitResolution; __le16 bmAuxProtocols; __u8 bControlSize; } __attribute__((packed)); #define UAC3_FORMAT_TYPE_I_RAW_DATA (1 << 6) /* 4.8.1.2 CLASS-SPECIFIC AS ISOCHRONOUS AUDIO DATA ENDPOINT DESCRIPTOR */ struct uac3_iso_endpoint_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __le32 bmControls; __u8 bLockDelayUnits; __le16 wLockDelay; } __attribute__((packed)); /* 5.2.1.6.1 INSERTION CONTROL PARAMETER BLOCK */ struct uac3_insertion_ctl_blk { __u8 bSize; __u8 bmConInserted; } __attribute__ ((packed)); /* 6.1 INTERRUPT DATA MESSAGE */ struct uac3_interrupt_data_msg { __u8 bInfo; __u8 bSourceType; __le16 wValue; __le16 wIndex; } __attribute__((packed)); /* A.2 AUDIO AUDIO FUNCTION SUBCLASS CODES */ #define UAC3_FUNCTION_SUBCLASS_UNDEFINED 0x00 #define UAC3_FUNCTION_SUBCLASS_FULL_ADC_3_0 0x01 /* BADD profiles */ #define UAC3_FUNCTION_SUBCLASS_GENERIC_IO 0x20 #define UAC3_FUNCTION_SUBCLASS_HEADPHONE 0x21 #define UAC3_FUNCTION_SUBCLASS_SPEAKER 0x22 #define UAC3_FUNCTION_SUBCLASS_MICROPHONE 0x23 #define UAC3_FUNCTION_SUBCLASS_HEADSET 0x24 #define UAC3_FUNCTION_SUBCLASS_HEADSET_ADAPTER 0x25 #define UAC3_FUNCTION_SUBCLASS_SPEAKERPHONE 0x26 /* A.7 AUDIO FUNCTION CATEGORY CODES */ #define UAC3_FUNCTION_SUBCLASS_UNDEFINED 0x00 #define UAC3_FUNCTION_DESKTOP_SPEAKER 0x01 #define UAC3_FUNCTION_HOME_THEATER 0x02 #define UAC3_FUNCTION_MICROPHONE 0x03 #define UAC3_FUNCTION_HEADSET 0x04 #define UAC3_FUNCTION_TELEPHONE 0x05 #define UAC3_FUNCTION_CONVERTER 0x06 #define UAC3_FUNCTION_SOUND_RECORDER 0x07 #define UAC3_FUNCTION_IO_BOX 0x08 #define UAC3_FUNCTION_MUSICAL_INSTRUMENT 0x09 #define UAC3_FUNCTION_PRO_AUDIO 0x0a #define UAC3_FUNCTION_AUDIO_VIDEO 0x0b #define UAC3_FUNCTION_CONTROL_PANEL 0x0c #define UAC3_FUNCTION_HEADPHONE 0x0d #define UAC3_FUNCTION_GENERIC_SPEAKER 0x0e #define UAC3_FUNCTION_HEADSET_ADAPTER 0x0f #define UAC3_FUNCTION_SPEAKERPHONE 0x10 #define UAC3_FUNCTION_OTHER 0xff /* A.8 AUDIO CLASS-SPECIFIC DESCRIPTOR TYPES */ #define UAC3_CS_UNDEFINED 0x20 #define UAC3_CS_DEVICE 0x21 #define UAC3_CS_CONFIGURATION 0x22 #define UAC3_CS_STRING 0x23 #define UAC3_CS_INTERFACE 0x24 #define UAC3_CS_ENDPOINT 0x25 #define UAC3_CS_CLUSTER 0x26 /* A.10 CLUSTER DESCRIPTOR SEGMENT TYPES */ #define UAC3_SEGMENT_UNDEFINED 0x00 #define UAC3_CLUSTER_DESCRIPTION 0x01 #define UAC3_CLUSTER_VENDOR_DEFINED 0x1F #define UAC3_CHANNEL_INFORMATION 0x20 #define UAC3_CHANNEL_AMBISONIC 0x21 #define UAC3_CHANNEL_DESCRIPTION 0x22 #define UAC3_CHANNEL_VENDOR_DEFINED 0xFE #define UAC3_END_SEGMENT 0xFF /* A.11 CHANNEL PURPOSE DEFINITIONS */ #define UAC3_PURPOSE_UNDEFINED 0x00 #define UAC3_PURPOSE_GENERIC_AUDIO 0x01 #define UAC3_PURPOSE_VOICE 0x02 #define UAC3_PURPOSE_SPEECH 0x03 #define UAC3_PURPOSE_AMBIENT 0x04 #define UAC3_PURPOSE_REFERENCE 0x05 #define UAC3_PURPOSE_ULTRASONIC 0x06 #define UAC3_PURPOSE_VIBROKINETIC 0x07 #define UAC3_PURPOSE_NON_AUDIO 0xFF /* A.12 CHANNEL RELATIONSHIP DEFINITIONS */ #define UAC3_CH_RELATIONSHIP_UNDEFINED 0x00 #define UAC3_CH_MONO 0x01 #define UAC3_CH_LEFT 0x02 #define UAC3_CH_RIGHT 0x03 #define UAC3_CH_ARRAY 0x04 #define UAC3_CH_PATTERN_X 0x20 #define UAC3_CH_PATTERN_Y 0x21 #define UAC3_CH_PATTERN_A 0x22 #define UAC3_CH_PATTERN_B 0x23 #define UAC3_CH_PATTERN_M 0x24 #define UAC3_CH_PATTERN_S 0x25 #define UAC3_CH_FRONT_LEFT 0x80 #define UAC3_CH_FRONT_RIGHT 0x81 #define UAC3_CH_FRONT_CENTER 0x82 #define UAC3_CH_FRONT_LEFT_OF_CENTER 0x83 #define UAC3_CH_FRONT_RIGHT_OF_CENTER 0x84 #define UAC3_CH_FRONT_WIDE_LEFT 0x85 #define UAC3_CH_FRONT_WIDE_RIGHT 0x86 #define UAC3_CH_SIDE_LEFT 0x87 #define UAC3_CH_SIDE_RIGHT 0x88 #define UAC3_CH_SURROUND_ARRAY_LEFT 0x89 #define UAC3_CH_SURROUND_ARRAY_RIGHT 0x8A #define UAC3_CH_BACK_LEFT 0x8B #define UAC3_CH_BACK_RIGHT 0x8C #define UAC3_CH_BACK_CENTER 0x8D #define UAC3_CH_BACK_LEFT_OF_CENTER 0x8E #define UAC3_CH_BACK_RIGHT_OF_CENTER 0x8F #define UAC3_CH_BACK_WIDE_LEFT 0x90 #define UAC3_CH_BACK_WIDE_RIGHT 0x91 #define UAC3_CH_TOP_CENTER 0x92 #define UAC3_CH_TOP_FRONT_LEFT 0x93 #define UAC3_CH_TOP_FRONT_RIGHT 0x94 #define UAC3_CH_TOP_FRONT_CENTER 0x95 #define UAC3_CH_TOP_FRONT_LOC 0x96 #define UAC3_CH_TOP_FRONT_ROC 0x97 #define UAC3_CH_TOP_FRONT_WIDE_LEFT 0x98 #define UAC3_CH_TOP_FRONT_WIDE_RIGHT 0x99 #define UAC3_CH_TOP_SIDE_LEFT 0x9A #define UAC3_CH_TOP_SIDE_RIGHT 0x9B #define UAC3_CH_TOP_SURR_ARRAY_LEFT 0x9C #define UAC3_CH_TOP_SURR_ARRAY_RIGHT 0x9D #define UAC3_CH_TOP_BACK_LEFT 0x9E #define UAC3_CH_TOP_BACK_RIGHT 0x9F #define UAC3_CH_TOP_BACK_CENTER 0xA0 #define UAC3_CH_TOP_BACK_LOC 0xA1 #define UAC3_CH_TOP_BACK_ROC 0xA2 #define UAC3_CH_TOP_BACK_WIDE_LEFT 0xA3 #define UAC3_CH_TOP_BACK_WIDE_RIGHT 0xA4 #define UAC3_CH_BOTTOM_CENTER 0xA5 #define UAC3_CH_BOTTOM_FRONT_LEFT 0xA6 #define UAC3_CH_BOTTOM_FRONT_RIGHT 0xA7 #define UAC3_CH_BOTTOM_FRONT_CENTER 0xA8 #define UAC3_CH_BOTTOM_FRONT_LOC 0xA9 #define UAC3_CH_BOTTOM_FRONT_ROC 0xAA #define UAC3_CH_BOTTOM_FRONT_WIDE_LEFT 0xAB #define UAC3_CH_BOTTOM_FRONT_WIDE_RIGHT 0xAC #define UAC3_CH_BOTTOM_SIDE_LEFT 0xAD #define UAC3_CH_BOTTOM_SIDE_RIGHT 0xAE #define UAC3_CH_BOTTOM_SURR_ARRAY_LEFT 0xAF #define UAC3_CH_BOTTOM_SURR_ARRAY_RIGHT 0xB0 #define UAC3_CH_BOTTOM_BACK_LEFT 0xB1 #define UAC3_CH_BOTTOM_BACK_RIGHT 0xB2 #define UAC3_CH_BOTTOM_BACK_CENTER 0xB3 #define UAC3_CH_BOTTOM_BACK_LOC 0xB4 #define UAC3_CH_BOTTOM_BACK_ROC 0xB5 #define UAC3_CH_BOTTOM_BACK_WIDE_LEFT 0xB6 #define UAC3_CH_BOTTOM_BACK_WIDE_RIGHT 0xB7 #define UAC3_CH_LOW_FREQUENCY_EFFECTS 0xB8 #define UAC3_CH_LFE_LEFT 0xB9 #define UAC3_CH_LFE_RIGHT 0xBA #define UAC3_CH_HEADPHONE_LEFT 0xBB #define UAC3_CH_HEADPHONE_RIGHT 0xBC /* A.15 AUDIO CLASS-SPECIFIC AC INTERFACE DESCRIPTOR SUBTYPES */ /* see audio.h for the rest, which is identical to v1 */ #define UAC3_EXTENDED_TERMINAL 0x04 #define UAC3_MIXER_UNIT 0x05 #define UAC3_SELECTOR_UNIT 0x06 #define UAC3_FEATURE_UNIT 0x07 #define UAC3_EFFECT_UNIT 0x08 #define UAC3_PROCESSING_UNIT 0x09 #define UAC3_EXTENSION_UNIT 0x0a #define UAC3_CLOCK_SOURCE 0x0b #define UAC3_CLOCK_SELECTOR 0x0c #define UAC3_CLOCK_MULTIPLIER 0x0d #define UAC3_SAMPLE_RATE_CONVERTER 0x0e #define UAC3_CONNECTORS 0x0f #define UAC3_POWER_DOMAIN 0x10 /* A.20 PROCESSING UNIT PROCESS TYPES */ #define UAC3_PROCESS_UNDEFINED 0x00 #define UAC3_PROCESS_UP_DOWNMIX 0x01 #define UAC3_PROCESS_STEREO_EXTENDER 0x02 #define UAC3_PROCESS_MULTI_FUNCTION 0x03 /* A.22 AUDIO CLASS-SPECIFIC REQUEST CODES */ /* see audio-v2.h for the rest, which is identical to v2 */ #define UAC3_CS_REQ_INTEN 0x04 #define UAC3_CS_REQ_STRING 0x05 #define UAC3_CS_REQ_HIGH_CAPABILITY_DESCRIPTOR 0x06 /* A.23.1 AUDIOCONTROL INTERFACE CONTROL SELECTORS */ #define UAC3_AC_CONTROL_UNDEFINED 0x00 #define UAC3_AC_ACTIVE_INTERFACE_CONTROL 0x01 #define UAC3_AC_POWER_DOMAIN_CONTROL 0x02 /* A.23.5 TERMINAL CONTROL SELECTORS */ #define UAC3_TE_UNDEFINED 0x00 #define UAC3_TE_INSERTION 0x01 #define UAC3_TE_OVERLOAD 0x02 #define UAC3_TE_UNDERFLOW 0x03 #define UAC3_TE_OVERFLOW 0x04 #define UAC3_TE_LATENCY 0x05 /* A.23.10 PROCESSING UNITS CONTROL SELECTROS */ /* Up/Down Mixer */ #define UAC3_UD_MODE_SELECT 0x01 /* Stereo Extender */ #define UAC3_EXT_WIDTH_CONTROL 0x01 /* BADD predefined Unit/Terminal values */ #define UAC3_BADD_IT_ID1 1 /* Input Terminal ID1: bTerminalID = 1 */ #define UAC3_BADD_FU_ID2 2 /* Feature Unit ID2: bUnitID = 2 */ #define UAC3_BADD_OT_ID3 3 /* Output Terminal ID3: bTerminalID = 3 */ #define UAC3_BADD_IT_ID4 4 /* Input Terminal ID4: bTerminalID = 4 */ #define UAC3_BADD_FU_ID5 5 /* Feature Unit ID5: bUnitID = 5 */ #define UAC3_BADD_OT_ID6 6 /* Output Terminal ID6: bTerminalID = 6 */ #define UAC3_BADD_FU_ID7 7 /* Feature Unit ID7: bUnitID = 7 */ #define UAC3_BADD_MU_ID8 8 /* Mixer Unit ID8: bUnitID = 8 */ #define UAC3_BADD_CS_ID9 9 /* Clock Source Entity ID9: bClockID = 9 */ #define UAC3_BADD_PD_ID10 10 /* Power Domain ID10: bPowerDomainID = 10 */ #define UAC3_BADD_PD_ID11 11 /* Power Domain ID11: bPowerDomainID = 11 */ /* BADD wMaxPacketSize of AS endpoints */ #define UAC3_BADD_EP_MAXPSIZE_SYNC_MONO_16 0x0060 #define UAC3_BADD_EP_MAXPSIZE_ASYNC_MONO_16 0x0062 #define UAC3_BADD_EP_MAXPSIZE_SYNC_MONO_24 0x0090 #define UAC3_BADD_EP_MAXPSIZE_ASYNC_MONO_24 0x0093 #define UAC3_BADD_EP_MAXPSIZE_SYNC_STEREO_16 0x00C0 #define UAC3_BADD_EP_MAXPSIZE_ASYNC_STEREO_16 0x00C4 #define UAC3_BADD_EP_MAXPSIZE_SYNC_STEREO_24 0x0120 #define UAC3_BADD_EP_MAXPSIZE_ASYNC_STEREO_24 0x0126 /* BADD sample rate is always fixed to 48kHz */ #define UAC3_BADD_SAMPLING_RATE 48000 /* BADD power domains recovery times in 50us increments */ #define UAC3_BADD_PD_RECOVER_D1D0 0x0258 /* 30ms */ #define UAC3_BADD_PD_RECOVER_D2D0 0x1770 /* 300ms */ #endif /* __LINUX_USB_AUDIO_V3_H */ usb/input.h 0000644 00000001314 14722070374 0006652 0 ustar 00 // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2005 Dmitry Torokhov * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #ifndef __LINUX_USB_INPUT_H #define __LINUX_USB_INPUT_H #include <linux/usb.h> #include <linux/input.h> #include <asm/byteorder.h> static inline void usb_to_input_id(const struct usb_device *dev, struct input_id *id) { id->bustype = BUS_USB; id->vendor = le16_to_cpu(dev->descriptor.idVendor); id->product = le16_to_cpu(dev->descriptor.idProduct); id->version = le16_to_cpu(dev->descriptor.bcdDevice); } #endif /* __LINUX_USB_INPUT_H */ usb/renesas_usbhs.h 0000644 00000010342 14722070374 0010360 0 ustar 00 // SPDX-License-Identifier: GPL-1.0+ /* * Renesas USB * * Copyright (C) 2011 Renesas Solutions Corp. * Copyright (C) 2019 Renesas Electronics Corporation * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #ifndef RENESAS_USB_H #define RENESAS_USB_H #include <linux/notifier.h> #include <linux/platform_device.h> #include <linux/usb/ch9.h> /* * module type * * it will be return value from get_id */ enum { USBHS_HOST = 0, USBHS_GADGET, USBHS_MAX, }; /* * callback functions for platform * * These functions are called from driver for platform */ struct renesas_usbhs_platform_callback { /* * option: * * Hardware init function for platform. * it is called when driver was probed. */ int (*hardware_init)(struct platform_device *pdev); /* * option: * * Hardware exit function for platform. * it is called when driver was removed */ int (*hardware_exit)(struct platform_device *pdev); /* * option: * * for board specific clock control */ int (*power_ctrl)(struct platform_device *pdev, void __iomem *base, int enable); /* * option: * * Phy reset for platform */ int (*phy_reset)(struct platform_device *pdev); /* * get USB ID function * - USBHS_HOST * - USBHS_GADGET */ int (*get_id)(struct platform_device *pdev); /* * get VBUS status function. */ int (*get_vbus)(struct platform_device *pdev); /* * option: * * VBUS control is needed for Host */ int (*set_vbus)(struct platform_device *pdev, int enable); /* * option: * extcon notifier to set host/peripheral mode. */ int (*notifier)(struct notifier_block *nb, unsigned long event, void *data); }; /* * parameters for renesas usbhs * * some register needs USB chip specific parameters. * This struct show it to driver */ struct renesas_usbhs_driver_pipe_config { u8 type; /* USB_ENDPOINT_XFER_xxx */ u16 bufsize; u8 bufnum; bool double_buf; }; #define RENESAS_USBHS_PIPE(_type, _size, _num, _double_buf) { \ .type = (_type), \ .bufsize = (_size), \ .bufnum = (_num), \ .double_buf = (_double_buf), \ } struct renesas_usbhs_driver_param { /* * pipe settings */ struct renesas_usbhs_driver_pipe_config *pipe_configs; int pipe_size; /* pipe_configs array size */ /* * option: * * for BUSWAIT :: BWAIT * see * renesas_usbhs/common.c :: usbhsc_set_buswait() * */ int buswait_bwait; /* * option: * * delay time from notify_hotplug callback */ int detection_delay; /* msec */ /* * option: * * dma id for dmaengine * The data transfer direction on D0FIFO/D1FIFO should be * fixed for keeping consistency. * So, the platform id settings will be.. * .d0_tx_id = xx_TX, * .d1_rx_id = xx_RX, * or * .d1_tx_id = xx_TX, * .d0_rx_id = xx_RX, */ int d0_tx_id; int d0_rx_id; int d1_tx_id; int d1_rx_id; int d2_tx_id; int d2_rx_id; int d3_tx_id; int d3_rx_id; /* * option: * * pio <--> dma border. */ int pio_dma_border; /* default is 64byte */ u32 enable_gpio; /* * option: */ u32 has_usb_dmac:1; /* for USB-DMAC */ u32 runtime_pwctrl:1; u32 has_cnen:1; u32 cfifo_byte_addr:1; /* CFIFO is byte addressable */ #define USBHS_USB_DMAC_XFER_SIZE 32 /* hardcode the xfer size */ u32 multi_clks:1; u32 has_new_pipe_configs:1; }; /* * option: * * platform information for renesas_usbhs driver. */ struct renesas_usbhs_platform_info { /* * option: * * platform set these functions before * call platform_add_devices if needed */ struct renesas_usbhs_platform_callback platform_callback; /* * option: * * driver use these param for some register */ struct renesas_usbhs_driver_param driver_param; }; /* * macro for platform */ #define renesas_usbhs_get_info(pdev)\ ((struct renesas_usbhs_platform_info *)(pdev)->dev.platform_data) #endif /* RENESAS_USB_H */ usb/phy.h 0000644 00000020363 14722070374 0006320 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * USB PHY defines * * These APIs may be used between USB controllers. USB device drivers * (for either host or peripheral roles) don't use these calls; they * continue to use just usb_device and usb_gadget. */ #ifndef __LINUX_USB_PHY_H #define __LINUX_USB_PHY_H #include <linux/extcon.h> #include <linux/notifier.h> #include <linux/usb.h> #include <uapi/linux/usb/charger.h> enum usb_phy_interface { USBPHY_INTERFACE_MODE_UNKNOWN, USBPHY_INTERFACE_MODE_UTMI, USBPHY_INTERFACE_MODE_UTMIW, USBPHY_INTERFACE_MODE_ULPI, USBPHY_INTERFACE_MODE_SERIAL, USBPHY_INTERFACE_MODE_HSIC, }; enum usb_phy_events { USB_EVENT_NONE, /* no events or cable disconnected */ USB_EVENT_VBUS, /* vbus valid event */ USB_EVENT_ID, /* id was grounded */ USB_EVENT_CHARGER, /* usb dedicated charger */ USB_EVENT_ENUMERATED, /* gadget driver enumerated */ }; /* associate a type with PHY */ enum usb_phy_type { USB_PHY_TYPE_UNDEFINED, USB_PHY_TYPE_USB2, USB_PHY_TYPE_USB3, }; /* OTG defines lots of enumeration states before device reset */ enum usb_otg_state { OTG_STATE_UNDEFINED = 0, /* single-role peripheral, and dual-role default-b */ OTG_STATE_B_IDLE, OTG_STATE_B_SRP_INIT, OTG_STATE_B_PERIPHERAL, /* extra dual-role default-b states */ OTG_STATE_B_WAIT_ACON, OTG_STATE_B_HOST, /* dual-role default-a */ OTG_STATE_A_IDLE, OTG_STATE_A_WAIT_VRISE, OTG_STATE_A_WAIT_BCON, OTG_STATE_A_HOST, OTG_STATE_A_SUSPEND, OTG_STATE_A_PERIPHERAL, OTG_STATE_A_WAIT_VFALL, OTG_STATE_A_VBUS_ERR, }; struct usb_phy; struct usb_otg; /* for phys connected thru an ULPI interface, the user must * provide access ops */ struct usb_phy_io_ops { int (*read)(struct usb_phy *x, u32 reg); int (*write)(struct usb_phy *x, u32 val, u32 reg); }; struct usb_charger_current { unsigned int sdp_min; unsigned int sdp_max; unsigned int dcp_min; unsigned int dcp_max; unsigned int cdp_min; unsigned int cdp_max; unsigned int aca_min; unsigned int aca_max; }; struct usb_phy { struct device *dev; const char *label; unsigned int flags; enum usb_phy_type type; enum usb_phy_events last_event; struct usb_otg *otg; struct device *io_dev; struct usb_phy_io_ops *io_ops; void __iomem *io_priv; /* to support extcon device */ struct extcon_dev *edev; struct extcon_dev *id_edev; struct notifier_block vbus_nb; struct notifier_block id_nb; struct notifier_block type_nb; /* Support USB charger */ enum usb_charger_type chg_type; enum usb_charger_state chg_state; struct usb_charger_current chg_cur; struct work_struct chg_work; /* for notification of usb_phy_events */ struct atomic_notifier_head notifier; /* to pass extra port status to the root hub */ u16 port_status; u16 port_change; /* to support controllers that have multiple phys */ struct list_head head; /* initialize/shutdown the phy */ int (*init)(struct usb_phy *x); void (*shutdown)(struct usb_phy *x); /* enable/disable VBUS */ int (*set_vbus)(struct usb_phy *x, int on); /* effective for B devices, ignored for A-peripheral */ int (*set_power)(struct usb_phy *x, unsigned mA); /* Set phy into suspend mode */ int (*set_suspend)(struct usb_phy *x, int suspend); /* * Set wakeup enable for PHY, in that case, the PHY can be * woken up from suspend status due to external events, * like vbus change, dp/dm change and id. */ int (*set_wakeup)(struct usb_phy *x, bool enabled); /* notify phy connect status change */ int (*notify_connect)(struct usb_phy *x, enum usb_device_speed speed); int (*notify_disconnect)(struct usb_phy *x, enum usb_device_speed speed); /* * Charger detection method can be implemented if you need to * manually detect the charger type. */ enum usb_charger_type (*charger_detect)(struct usb_phy *x); }; /* for board-specific init logic */ extern int usb_add_phy(struct usb_phy *, enum usb_phy_type type); extern int usb_add_phy_dev(struct usb_phy *); extern void usb_remove_phy(struct usb_phy *); /* helpers for direct access thru low-level io interface */ static inline int usb_phy_io_read(struct usb_phy *x, u32 reg) { if (x && x->io_ops && x->io_ops->read) return x->io_ops->read(x, reg); return -EINVAL; } static inline int usb_phy_io_write(struct usb_phy *x, u32 val, u32 reg) { if (x && x->io_ops && x->io_ops->write) return x->io_ops->write(x, val, reg); return -EINVAL; } static inline int usb_phy_init(struct usb_phy *x) { if (x && x->init) return x->init(x); return 0; } static inline void usb_phy_shutdown(struct usb_phy *x) { if (x && x->shutdown) x->shutdown(x); } static inline int usb_phy_vbus_on(struct usb_phy *x) { if (!x || !x->set_vbus) return 0; return x->set_vbus(x, true); } static inline int usb_phy_vbus_off(struct usb_phy *x) { if (!x || !x->set_vbus) return 0; return x->set_vbus(x, false); } /* for usb host and peripheral controller drivers */ #if IS_ENABLED(CONFIG_USB_PHY) extern struct usb_phy *usb_get_phy(enum usb_phy_type type); extern struct usb_phy *devm_usb_get_phy(struct device *dev, enum usb_phy_type type); extern struct usb_phy *devm_usb_get_phy_by_phandle(struct device *dev, const char *phandle, u8 index); extern struct usb_phy *devm_usb_get_phy_by_node(struct device *dev, struct device_node *node, struct notifier_block *nb); extern void usb_put_phy(struct usb_phy *); extern void devm_usb_put_phy(struct device *dev, struct usb_phy *x); extern void usb_phy_set_event(struct usb_phy *x, unsigned long event); extern void usb_phy_set_charger_current(struct usb_phy *usb_phy, unsigned int mA); extern void usb_phy_get_charger_current(struct usb_phy *usb_phy, unsigned int *min, unsigned int *max); extern void usb_phy_set_charger_state(struct usb_phy *usb_phy, enum usb_charger_state state); #else static inline struct usb_phy *usb_get_phy(enum usb_phy_type type) { return ERR_PTR(-ENXIO); } static inline struct usb_phy *devm_usb_get_phy(struct device *dev, enum usb_phy_type type) { return ERR_PTR(-ENXIO); } static inline struct usb_phy *devm_usb_get_phy_by_phandle(struct device *dev, const char *phandle, u8 index) { return ERR_PTR(-ENXIO); } static inline struct usb_phy *devm_usb_get_phy_by_node(struct device *dev, struct device_node *node, struct notifier_block *nb) { return ERR_PTR(-ENXIO); } static inline void usb_put_phy(struct usb_phy *x) { } static inline void devm_usb_put_phy(struct device *dev, struct usb_phy *x) { } static inline void usb_phy_set_event(struct usb_phy *x, unsigned long event) { } static inline void usb_phy_set_charger_current(struct usb_phy *usb_phy, unsigned int mA) { } static inline void usb_phy_get_charger_current(struct usb_phy *usb_phy, unsigned int *min, unsigned int *max) { } static inline void usb_phy_set_charger_state(struct usb_phy *usb_phy, enum usb_charger_state state) { } #endif static inline int usb_phy_set_power(struct usb_phy *x, unsigned mA) { if (!x) return 0; usb_phy_set_charger_current(x, mA); if (x->set_power) return x->set_power(x, mA); return 0; } /* Context: can sleep */ static inline int usb_phy_set_suspend(struct usb_phy *x, int suspend) { if (x && x->set_suspend != NULL) return x->set_suspend(x, suspend); else return 0; } static inline int usb_phy_set_wakeup(struct usb_phy *x, bool enabled) { if (x && x->set_wakeup) return x->set_wakeup(x, enabled); else return 0; } static inline int usb_phy_notify_connect(struct usb_phy *x, enum usb_device_speed speed) { if (x && x->notify_connect) return x->notify_connect(x, speed); else return 0; } static inline int usb_phy_notify_disconnect(struct usb_phy *x, enum usb_device_speed speed) { if (x && x->notify_disconnect) return x->notify_disconnect(x, speed); else return 0; } /* notifiers */ static inline int usb_register_notifier(struct usb_phy *x, struct notifier_block *nb) { return atomic_notifier_chain_register(&x->notifier, nb); } static inline void usb_unregister_notifier(struct usb_phy *x, struct notifier_block *nb) { atomic_notifier_chain_unregister(&x->notifier, nb); } static inline const char *usb_phy_type_string(enum usb_phy_type type) { switch (type) { case USB_PHY_TYPE_USB2: return "USB2 PHY"; case USB_PHY_TYPE_USB3: return "USB3 PHY"; default: return "UNKNOWN PHY TYPE"; } } #endif /* __LINUX_USB_PHY_H */ usb/tegra_usb_phy.h 0000644 00000004672 14722070374 0010360 0 ustar 00 // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2010 Google, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #ifndef __TEGRA_USB_PHY_H #define __TEGRA_USB_PHY_H #include <linux/clk.h> #include <linux/reset.h> #include <linux/usb/otg.h> /* * utmi_pll_config_in_car_module: true if the UTMI PLL configuration registers * should be set up by clk-tegra, false if by the PHY code * has_hostpc: true if the USB controller has the HOSTPC extension, which * changes the location of the PHCD and PTS fields * requires_usbmode_setup: true if the USBMODE register needs to be set to * enter host mode * requires_extra_tuning_parameters: true if xcvr_hsslew, hssquelch_level * and hsdiscon_level should be set for adequate signal quality */ struct tegra_phy_soc_config { bool utmi_pll_config_in_car_module; bool has_hostpc; bool requires_usbmode_setup; bool requires_extra_tuning_parameters; }; struct tegra_utmip_config { u8 hssync_start_delay; u8 elastic_limit; u8 idle_wait_delay; u8 term_range_adj; bool xcvr_setup_use_fuses; u8 xcvr_setup; u8 xcvr_lsfslew; u8 xcvr_lsrslew; u8 xcvr_hsslew; u8 hssquelch_level; u8 hsdiscon_level; }; enum tegra_usb_phy_port_speed { TEGRA_USB_PHY_PORT_SPEED_FULL = 0, TEGRA_USB_PHY_PORT_SPEED_LOW, TEGRA_USB_PHY_PORT_SPEED_HIGH, }; struct tegra_xtal_freq; struct tegra_usb_phy { int instance; const struct tegra_xtal_freq *freq; void __iomem *regs; void __iomem *pad_regs; struct clk *clk; struct clk *pll_u; struct clk *pad_clk; struct regulator *vbus; enum usb_dr_mode mode; void *config; const struct tegra_phy_soc_config *soc_config; struct usb_phy *ulpi; struct usb_phy u_phy; bool is_legacy_phy; bool is_ulpi_phy; int reset_gpio; struct reset_control *pad_rst; }; void tegra_usb_phy_preresume(struct usb_phy *phy); void tegra_usb_phy_postresume(struct usb_phy *phy); void tegra_ehci_phy_restore_start(struct usb_phy *phy, enum tegra_usb_phy_port_speed port_speed); void tegra_ehci_phy_restore_end(struct usb_phy *phy); #endif /* __TEGRA_USB_PHY_H */ usb/ohci_pdriver.h 0000644 00000003324 14722070374 0010173 0 ustar 00 // SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2012 Hauke Mehrtens <hauke@hauke-m.de> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef __USB_CORE_OHCI_PDRIVER_H #define __USB_CORE_OHCI_PDRIVER_H /** * struct usb_ohci_pdata - platform_data for generic ohci driver * * @big_endian_desc: BE descriptors * @big_endian_mmio: BE registers * @no_big_frame_no: no big endian frame_no shift * @num_ports: number of ports * * These are general configuration options for the OHCI controller. All of * these options are activating more or less workarounds for some hardware. */ struct usb_ohci_pdata { unsigned big_endian_desc:1; unsigned big_endian_mmio:1; unsigned no_big_frame_no:1; unsigned int num_ports; /* Turn on all power and clocks */ int (*power_on)(struct platform_device *pdev); /* Turn off all power and clocks */ void (*power_off)(struct platform_device *pdev); /* Turn on only VBUS suspend power and hotplug detection, * turn off everything else */ void (*power_suspend)(struct platform_device *pdev); }; #endif /* __USB_CORE_OHCI_PDRIVER_H */ usb/sl811.h 0000644 00000001506 14722070374 0006366 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * board initialization should put one of these into dev->platform_data * and place the sl811hs onto platform_bus named "sl811-hcd". */ #ifndef __LINUX_USB_SL811_H #define __LINUX_USB_SL811_H struct sl811_platform_data { unsigned can_wakeup:1; /* given port_power, msec/2 after power on till power good */ u8 potpg; /* mA/2 power supplied on this port (max = default = 250) */ u8 power; /* sl811 relies on an external source of VBUS current */ void (*port_power)(struct device *dev, int is_on); /* pulse sl811 nRST (probably with a GPIO) */ void (*reset)(struct device *dev); /* some boards need something like these: */ /* int (*check_overcurrent)(struct device *dev); */ /* void (*clock_enable)(struct device *dev, int is_on); */ }; #endif /* __LINUX_USB_SL811_H */ usb/usbnet.h 0000644 00000024015 14722070374 0007016 0 ustar 00 // SPDX-License-Identifier: GPL-2.0+ /* * USB Networking Link Interface * * Copyright (C) 2000-2005 by David Brownell <dbrownell@users.sourceforge.net> * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef __LINUX_USB_USBNET_H #define __LINUX_USB_USBNET_H /* interface from usbnet core to each USB networking link we handle */ struct usbnet { /* housekeeping */ struct usb_device *udev; struct usb_interface *intf; const struct driver_info *driver_info; const char *driver_name; void *driver_priv; wait_queue_head_t wait; struct mutex phy_mutex; unsigned char suspend_count; unsigned char pkt_cnt, pkt_err; unsigned short rx_qlen, tx_qlen; unsigned can_dma_sg:1; /* i/o info: pipes etc */ unsigned in, out; struct usb_host_endpoint *status; unsigned maxpacket; struct timer_list delay; const char *padding_pkt; /* protocol/interface state */ struct net_device *net; int msg_enable; unsigned long data[5]; u32 xid; u32 hard_mtu; /* count any extra framing */ size_t rx_urb_size; /* size for rx urbs */ struct mii_if_info mii; /* various kinds of pending driver work */ struct sk_buff_head rxq; struct sk_buff_head txq; struct sk_buff_head done; struct sk_buff_head rxq_pause; struct urb *interrupt; unsigned interrupt_count; struct mutex interrupt_mutex; struct usb_anchor deferred; struct tasklet_struct bh; struct pcpu_sw_netstats __percpu *stats64; struct work_struct kevent; unsigned long flags; # define EVENT_TX_HALT 0 # define EVENT_RX_HALT 1 # define EVENT_RX_MEMORY 2 # define EVENT_STS_SPLIT 3 # define EVENT_LINK_RESET 4 # define EVENT_RX_PAUSED 5 # define EVENT_DEV_ASLEEP 6 # define EVENT_DEV_OPEN 7 # define EVENT_DEVICE_REPORT_IDLE 8 # define EVENT_NO_RUNTIME_PM 9 # define EVENT_RX_KILL 10 # define EVENT_LINK_CHANGE 11 # define EVENT_SET_RX_MODE 12 # define EVENT_NO_IP_ALIGN 13 u32 rx_speed; /* in bps - NOT Mbps */ u32 tx_speed; /* in bps - NOT Mbps */ }; static inline struct usb_driver *driver_of(struct usb_interface *intf) { return to_usb_driver(intf->dev.driver); } /* interface from the device/framing level "minidriver" to core */ struct driver_info { char *description; int flags; /* framing is CDC Ethernet, not writing ZLPs (hw issues), or optionally: */ #define FLAG_FRAMING_NC 0x0001 /* guard against device dropouts */ #define FLAG_FRAMING_GL 0x0002 /* genelink batches packets */ #define FLAG_FRAMING_Z 0x0004 /* zaurus adds a trailer */ #define FLAG_FRAMING_RN 0x0008 /* RNDIS batches, plus huge header */ #define FLAG_NO_SETINT 0x0010 /* device can't set_interface() */ #define FLAG_ETHER 0x0020 /* maybe use "eth%d" names */ #define FLAG_FRAMING_AX 0x0040 /* AX88772/178 packets */ #define FLAG_WLAN 0x0080 /* use "wlan%d" names */ #define FLAG_AVOID_UNLINK_URBS 0x0100 /* don't unlink urbs at usbnet_stop() */ #define FLAG_SEND_ZLP 0x0200 /* hw requires ZLPs are sent */ #define FLAG_WWAN 0x0400 /* use "wwan%d" names */ #define FLAG_LINK_INTR 0x0800 /* updates link (carrier) status */ #define FLAG_POINTTOPOINT 0x1000 /* possibly use "usb%d" names */ /* * Indicates to usbnet, that USB driver accumulates multiple IP packets. * Affects statistic (counters) and short packet handling. */ #define FLAG_MULTI_PACKET 0x2000 #define FLAG_RX_ASSEMBLE 0x4000 /* rx packets may span >1 frames */ #define FLAG_NOARP 0x8000 /* device can't do ARP */ /* init device ... can sleep, or cause probe() failure */ int (*bind)(struct usbnet *, struct usb_interface *); /* cleanup device ... can sleep, but can't fail */ void (*unbind)(struct usbnet *, struct usb_interface *); /* reset device ... can sleep */ int (*reset)(struct usbnet *); /* stop device ... can sleep */ int (*stop)(struct usbnet *); /* see if peer is connected ... can sleep */ int (*check_connect)(struct usbnet *); /* (dis)activate runtime power management */ int (*manage_power)(struct usbnet *, int); /* for status polling */ void (*status)(struct usbnet *, struct urb *); /* link reset handling, called from defer_kevent */ int (*link_reset)(struct usbnet *); /* fixup rx packet (strip framing) */ int (*rx_fixup)(struct usbnet *dev, struct sk_buff *skb); /* fixup tx packet (add framing) */ struct sk_buff *(*tx_fixup)(struct usbnet *dev, struct sk_buff *skb, gfp_t flags); /* recover from timeout */ void (*recover)(struct usbnet *dev); /* early initialization code, can sleep. This is for minidrivers * having 'subminidrivers' that need to do extra initialization * right after minidriver have initialized hardware. */ int (*early_init)(struct usbnet *dev); /* called by minidriver when receiving indication */ void (*indication)(struct usbnet *dev, void *ind, int indlen); /* rx mode change (device changes address list filtering) */ void (*set_rx_mode)(struct usbnet *dev); /* for new devices, use the descriptor-reading code instead */ int in; /* rx endpoint */ int out; /* tx endpoint */ unsigned long data; /* Misc driver specific data */ }; /* Minidrivers are just drivers using the "usbnet" core as a powerful * network-specific subroutine library ... that happens to do pretty * much everything except custom framing and chip-specific stuff. */ extern int usbnet_probe(struct usb_interface *, const struct usb_device_id *); extern int usbnet_suspend(struct usb_interface *, pm_message_t); extern int usbnet_resume(struct usb_interface *); extern void usbnet_disconnect(struct usb_interface *); extern void usbnet_device_suggests_idle(struct usbnet *dev); extern int usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, u16 value, u16 index, void *data, u16 size); extern int usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, u16 value, u16 index, const void *data, u16 size); extern int usbnet_read_cmd_nopm(struct usbnet *dev, u8 cmd, u8 reqtype, u16 value, u16 index, void *data, u16 size); extern int usbnet_write_cmd_nopm(struct usbnet *dev, u8 cmd, u8 reqtype, u16 value, u16 index, const void *data, u16 size); extern int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype, u16 value, u16 index, const void *data, u16 size); /* Drivers that reuse some of the standard USB CDC infrastructure * (notably, using multiple interfaces according to the CDC * union descriptor) get some helper code. */ struct cdc_state { struct usb_cdc_header_desc *header; struct usb_cdc_union_desc *u; struct usb_cdc_ether_desc *ether; struct usb_interface *control; struct usb_interface *data; }; extern int usbnet_generic_cdc_bind(struct usbnet *, struct usb_interface *); extern int usbnet_ether_cdc_bind(struct usbnet *dev, struct usb_interface *intf); extern int usbnet_cdc_bind(struct usbnet *, struct usb_interface *); extern void usbnet_cdc_unbind(struct usbnet *, struct usb_interface *); extern void usbnet_cdc_status(struct usbnet *, struct urb *); /* CDC and RNDIS support the same host-chosen packet filters for IN transfers */ #define DEFAULT_FILTER (USB_CDC_PACKET_TYPE_BROADCAST \ |USB_CDC_PACKET_TYPE_ALL_MULTICAST \ |USB_CDC_PACKET_TYPE_PROMISCUOUS \ |USB_CDC_PACKET_TYPE_DIRECTED) /* we record the state for each of our queued skbs */ enum skb_state { illegal = 0, tx_start, tx_done, rx_start, rx_done, rx_cleanup, unlink_start }; struct skb_data { /* skb->cb is one of these */ struct urb *urb; struct usbnet *dev; enum skb_state state; long length; unsigned long packets; }; /* Drivers that set FLAG_MULTI_PACKET must call this in their * tx_fixup method before returning an skb. */ static inline void usbnet_set_skb_tx_stats(struct sk_buff *skb, unsigned long packets, long bytes_delta) { struct skb_data *entry = (struct skb_data *) skb->cb; entry->packets = packets; entry->length = bytes_delta; } extern int usbnet_open(struct net_device *net); extern int usbnet_stop(struct net_device *net); extern netdev_tx_t usbnet_start_xmit(struct sk_buff *skb, struct net_device *net); extern void usbnet_tx_timeout(struct net_device *net); extern int usbnet_change_mtu(struct net_device *net, int new_mtu); extern int usbnet_get_endpoints(struct usbnet *, struct usb_interface *); extern int usbnet_get_ethernet_addr(struct usbnet *, int); extern void usbnet_defer_kevent(struct usbnet *, int); extern void usbnet_skb_return(struct usbnet *, struct sk_buff *); extern void usbnet_unlink_rx_urbs(struct usbnet *); extern void usbnet_pause_rx(struct usbnet *); extern void usbnet_resume_rx(struct usbnet *); extern void usbnet_purge_paused_rxq(struct usbnet *); extern int usbnet_get_link_ksettings(struct net_device *net, struct ethtool_link_ksettings *cmd); extern int usbnet_set_link_ksettings(struct net_device *net, const struct ethtool_link_ksettings *cmd); extern u32 usbnet_get_link(struct net_device *net); extern u32 usbnet_get_msglevel(struct net_device *); extern void usbnet_set_msglevel(struct net_device *, u32); extern void usbnet_get_drvinfo(struct net_device *, struct ethtool_drvinfo *); extern int usbnet_nway_reset(struct net_device *net); extern int usbnet_manage_power(struct usbnet *, int); extern void usbnet_link_change(struct usbnet *, bool, bool); extern int usbnet_status_start(struct usbnet *dev, gfp_t mem_flags); extern void usbnet_status_stop(struct usbnet *dev); extern void usbnet_update_max_qlen(struct usbnet *dev); extern void usbnet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats); #endif /* __LINUX_USB_USBNET_H */ usb/storage.h 0000644 00000005125 14722070374 0007163 0 ustar 00 // SPDX-License-Identifier: GPL-2.0 #ifndef __LINUX_USB_STORAGE_H #define __LINUX_USB_STORAGE_H /* * linux/usb/storage.h * * Copyright Matthew Wilcox for Intel Corp, 2010 * * This file contains definitions taken from the * USB Mass Storage Class Specification Overview * * Distributed under the terms of the GNU GPL, version two. */ /* Storage subclass codes */ #define USB_SC_RBC 0x01 /* Typically, flash devices */ #define USB_SC_8020 0x02 /* CD-ROM */ #define USB_SC_QIC 0x03 /* QIC-157 Tapes */ #define USB_SC_UFI 0x04 /* Floppy */ #define USB_SC_8070 0x05 /* Removable media */ #define USB_SC_SCSI 0x06 /* Transparent */ #define USB_SC_LOCKABLE 0x07 /* Password-protected */ #define USB_SC_ISD200 0xf0 /* ISD200 ATA */ #define USB_SC_CYP_ATACB 0xf1 /* Cypress ATACB */ #define USB_SC_DEVICE 0xff /* Use device's value */ /* Storage protocol codes */ #define USB_PR_CBI 0x00 /* Control/Bulk/Interrupt */ #define USB_PR_CB 0x01 /* Control/Bulk w/o interrupt */ #define USB_PR_BULK 0x50 /* bulk only */ #define USB_PR_UAS 0x62 /* USB Attached SCSI */ #define USB_PR_USBAT 0x80 /* SCM-ATAPI bridge */ #define USB_PR_EUSB_SDDR09 0x81 /* SCM-SCSI bridge for SDDR-09 */ #define USB_PR_SDDR55 0x82 /* SDDR-55 (made up) */ #define USB_PR_DPCM_USB 0xf0 /* Combination CB/SDDR09 */ #define USB_PR_FREECOM 0xf1 /* Freecom */ #define USB_PR_DATAFAB 0xf2 /* Datafab chipsets */ #define USB_PR_JUMPSHOT 0xf3 /* Lexar Jumpshot */ #define USB_PR_ALAUDA 0xf4 /* Alauda chipsets */ #define USB_PR_KARMA 0xf5 /* Rio Karma */ #define USB_PR_DEVICE 0xff /* Use device's value */ /* * Bulk only data structures */ /* command block wrapper */ struct bulk_cb_wrap { __le32 Signature; /* contains 'USBC' */ __u32 Tag; /* unique per command id */ __le32 DataTransferLength; /* size of data */ __u8 Flags; /* direction in bit 0 */ __u8 Lun; /* LUN normally 0 */ __u8 Length; /* length of the CDB */ __u8 CDB[16]; /* max command */ }; #define US_BULK_CB_WRAP_LEN 31 #define US_BULK_CB_SIGN 0x43425355 /* spells out 'USBC' */ #define US_BULK_FLAG_IN (1 << 7) #define US_BULK_FLAG_OUT 0 /* command status wrapper */ struct bulk_cs_wrap { __le32 Signature; /* contains 'USBS' */ __u32 Tag; /* same as original command */ __le32 Residue; /* amount not transferred */ __u8 Status; /* see below */ }; #define US_BULK_CS_WRAP_LEN 13 #define US_BULK_CS_SIGN 0x53425355 /* spells out 'USBS' */ #define US_BULK_STAT_OK 0 #define US_BULK_STAT_FAIL 1 #define US_BULK_STAT_PHASE 2 /* bulk-only class specific requests */ #define US_BULK_RESET_REQUEST 0xff #define US_BULK_GET_MAX_LUN 0xfe #endif usb/cdc.h 0000644 00000002755 14722070374 0006256 0 ustar 00 // SPDX-License-Identifier: GPL-2.0 /* * USB CDC common helpers * * Copyright (c) 2015 Oliver Neukum <oneukum@suse.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. */ #ifndef __LINUX_USB_CDC_H #define __LINUX_USB_CDC_H #include <uapi/linux/usb/cdc.h> /* * inofficial magic numbers */ #define CDC_PHONET_MAGIC_NUMBER 0xAB /* * parsing CDC headers */ struct usb_cdc_parsed_header { struct usb_cdc_union_desc *usb_cdc_union_desc; struct usb_cdc_header_desc *usb_cdc_header_desc; struct usb_cdc_call_mgmt_descriptor *usb_cdc_call_mgmt_descriptor; struct usb_cdc_acm_descriptor *usb_cdc_acm_descriptor; struct usb_cdc_country_functional_desc *usb_cdc_country_functional_desc; struct usb_cdc_network_terminal_desc *usb_cdc_network_terminal_desc; struct usb_cdc_ether_desc *usb_cdc_ether_desc; struct usb_cdc_dmm_desc *usb_cdc_dmm_desc; struct usb_cdc_mdlm_desc *usb_cdc_mdlm_desc; struct usb_cdc_mdlm_detail_desc *usb_cdc_mdlm_detail_desc; struct usb_cdc_obex_desc *usb_cdc_obex_desc; struct usb_cdc_ncm_desc *usb_cdc_ncm_desc; struct usb_cdc_mbim_desc *usb_cdc_mbim_desc; struct usb_cdc_mbim_extended_desc *usb_cdc_mbim_extended_desc; bool phonet_magic_present; }; struct usb_interface; int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr, struct usb_interface *intf, u8 *buffer, int buflen); #endif /* __LINUX_USB_CDC_H */ igmp.h 0000644 00000010677 14722070374 0005672 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Linux NET3: Internet Group Management Protocol [IGMP] * * Authors: * Alan Cox <alan@lxorguk.ukuu.org.uk> * * Extended to talk the BSD extended IGMP protocol of mrouted 3.6 */ #ifndef _LINUX_IGMP_H #define _LINUX_IGMP_H #include <linux/skbuff.h> #include <linux/timer.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/refcount.h> #include <uapi/linux/igmp.h> static inline struct igmphdr *igmp_hdr(const struct sk_buff *skb) { return (struct igmphdr *)skb_transport_header(skb); } static inline struct igmpv3_report * igmpv3_report_hdr(const struct sk_buff *skb) { return (struct igmpv3_report *)skb_transport_header(skb); } static inline struct igmpv3_query * igmpv3_query_hdr(const struct sk_buff *skb) { return (struct igmpv3_query *)skb_transport_header(skb); } struct ip_sf_socklist { unsigned int sl_max; unsigned int sl_count; struct rcu_head rcu; __be32 sl_addr[0]; }; #define IP_SFLSIZE(count) (sizeof(struct ip_sf_socklist) + \ (count) * sizeof(__be32)) #define IP_SFBLOCK 10 /* allocate this many at once */ /* ip_mc_socklist is real list now. Speed is not argument; this list never used in fast path code */ struct ip_mc_socklist { struct ip_mc_socklist __rcu *next_rcu; struct ip_mreqn multi; unsigned int sfmode; /* MCAST_{INCLUDE,EXCLUDE} */ struct ip_sf_socklist __rcu *sflist; struct rcu_head rcu; }; struct ip_sf_list { struct ip_sf_list *sf_next; unsigned long sf_count[2]; /* include/exclude counts */ __be32 sf_inaddr; unsigned char sf_gsresp; /* include in g & s response? */ unsigned char sf_oldin; /* change state */ unsigned char sf_crcount; /* retrans. left to send */ }; struct ip_mc_list { struct in_device *interface; __be32 multiaddr; unsigned int sfmode; struct ip_sf_list *sources; struct ip_sf_list *tomb; unsigned long sfcount[2]; union { struct ip_mc_list *next; struct ip_mc_list __rcu *next_rcu; }; struct ip_mc_list __rcu *next_hash; struct timer_list timer; int users; refcount_t refcnt; spinlock_t lock; char tm_running; char reporter; char unsolicit_count; char loaded; unsigned char gsquery; /* check source marks? */ unsigned char crcount; struct rcu_head rcu; }; /* V3 exponential field decoding */ #define IGMPV3_MASK(value, nb) ((nb)>=32 ? (value) : ((1<<(nb))-1) & (value)) #define IGMPV3_EXP(thresh, nbmant, nbexp, value) \ ((value) < (thresh) ? (value) : \ ((IGMPV3_MASK(value, nbmant) | (1<<(nbmant))) << \ (IGMPV3_MASK((value) >> (nbmant), nbexp) + (nbexp)))) #define IGMPV3_QQIC(value) IGMPV3_EXP(0x80, 4, 3, value) #define IGMPV3_MRC(value) IGMPV3_EXP(0x80, 4, 3, value) static inline int ip_mc_may_pull(struct sk_buff *skb, unsigned int len) { if (skb_transport_offset(skb) + ip_transport_len(skb) < len) return 0; return pskb_may_pull(skb, len); } extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u8 proto); extern int igmp_rcv(struct sk_buff *); extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr); extern int ip_mc_join_group_ssm(struct sock *sk, struct ip_mreqn *imr, unsigned int mode); extern int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr); extern void ip_mc_drop_socket(struct sock *sk); extern int ip_mc_source(int add, int omode, struct sock *sk, struct ip_mreq_source *mreqs, int ifindex); extern int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf,int ifindex); extern int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf, struct ip_msfilter __user *optval, int __user *optlen); extern int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf, struct group_filter __user *optval, int __user *optlen); extern int ip_mc_sf_allow(struct sock *sk, __be32 local, __be32 rmt, int dif, int sdif); extern void ip_mc_init_dev(struct in_device *); extern void ip_mc_destroy_dev(struct in_device *); extern void ip_mc_up(struct in_device *); extern void ip_mc_down(struct in_device *); extern void ip_mc_unmap(struct in_device *); extern void ip_mc_remap(struct in_device *); extern void __ip_mc_dec_group(struct in_device *in_dev, __be32 addr, gfp_t gfp); static inline void ip_mc_dec_group(struct in_device *in_dev, __be32 addr) { return __ip_mc_dec_group(in_dev, addr, GFP_KERNEL); } extern void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr, gfp_t gfp); extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr); int ip_mc_check_igmp(struct sk_buff *skb); #endif f2fs_fs.h 0000644 00000045237 14722070374 0006266 0 ustar 00 // SPDX-License-Identifier: GPL-2.0 /** * include/linux/f2fs_fs.h * * Copyright (c) 2012 Samsung Electronics Co., Ltd. * http://www.samsung.com/ */ #ifndef _LINUX_F2FS_FS_H #define _LINUX_F2FS_FS_H #include <linux/pagemap.h> #include <linux/types.h> #define F2FS_SUPER_OFFSET 1024 /* byte-size offset */ #define F2FS_MIN_LOG_SECTOR_SIZE 9 /* 9 bits for 512 bytes */ #define F2FS_MAX_LOG_SECTOR_SIZE 12 /* 12 bits for 4096 bytes */ #define F2FS_LOG_SECTORS_PER_BLOCK 3 /* log number for sector/blk */ #define F2FS_BLKSIZE 4096 /* support only 4KB block */ #define F2FS_BLKSIZE_BITS 12 /* bits for F2FS_BLKSIZE */ #define F2FS_MAX_EXTENSION 64 /* # of extension entries */ #define F2FS_EXTENSION_LEN 8 /* max size of extension */ #define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) >> F2FS_BLKSIZE_BITS) #define NULL_ADDR ((block_t)0) /* used as block_t addresses */ #define NEW_ADDR ((block_t)-1) /* used as block_t addresses */ #define F2FS_BYTES_TO_BLK(bytes) ((bytes) >> F2FS_BLKSIZE_BITS) #define F2FS_BLK_TO_BYTES(blk) ((blk) << F2FS_BLKSIZE_BITS) /* 0, 1(node nid), 2(meta nid) are reserved node id */ #define F2FS_RESERVED_NODE_NUM 3 #define F2FS_ROOT_INO(sbi) ((sbi)->root_ino_num) #define F2FS_NODE_INO(sbi) ((sbi)->node_ino_num) #define F2FS_META_INO(sbi) ((sbi)->meta_ino_num) #define F2FS_MAX_QUOTAS 3 #define F2FS_ENC_UTF8_12_1 1 #define F2FS_ENC_STRICT_MODE_FL (1 << 0) #define f2fs_has_strict_mode(sbi) \ (sbi->s_encoding_flags & F2FS_ENC_STRICT_MODE_FL) #define F2FS_IO_SIZE(sbi) (1 << F2FS_OPTION(sbi).write_io_size_bits) /* Blocks */ #define F2FS_IO_SIZE_KB(sbi) (1 << (F2FS_OPTION(sbi).write_io_size_bits + 2)) /* KB */ #define F2FS_IO_SIZE_BYTES(sbi) (1 << (F2FS_OPTION(sbi).write_io_size_bits + 12)) /* B */ #define F2FS_IO_SIZE_BITS(sbi) (F2FS_OPTION(sbi).write_io_size_bits) /* power of 2 */ #define F2FS_IO_SIZE_MASK(sbi) (F2FS_IO_SIZE(sbi) - 1) #define F2FS_IO_ALIGNED(sbi) (F2FS_IO_SIZE(sbi) > 1) /* This flag is used by node and meta inodes, and by recovery */ #define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO) /* * For further optimization on multi-head logs, on-disk layout supports maximum * 16 logs by default. The number, 16, is expected to cover all the cases * enoughly. The implementaion currently uses no more than 6 logs. * Half the logs are used for nodes, and the other half are used for data. */ #define MAX_ACTIVE_LOGS 16 #define MAX_ACTIVE_NODE_LOGS 8 #define MAX_ACTIVE_DATA_LOGS 8 #define VERSION_LEN 256 #define MAX_VOLUME_NAME 512 #define MAX_PATH_LEN 64 #define MAX_DEVICES 8 /* * For superblock */ struct f2fs_device { __u8 path[MAX_PATH_LEN]; __le32 total_segments; } __packed; struct f2fs_super_block { __le32 magic; /* Magic Number */ __le16 major_ver; /* Major Version */ __le16 minor_ver; /* Minor Version */ __le32 log_sectorsize; /* log2 sector size in bytes */ __le32 log_sectors_per_block; /* log2 # of sectors per block */ __le32 log_blocksize; /* log2 block size in bytes */ __le32 log_blocks_per_seg; /* log2 # of blocks per segment */ __le32 segs_per_sec; /* # of segments per section */ __le32 secs_per_zone; /* # of sections per zone */ __le32 checksum_offset; /* checksum offset inside super block */ __le64 block_count; /* total # of user blocks */ __le32 section_count; /* total # of sections */ __le32 segment_count; /* total # of segments */ __le32 segment_count_ckpt; /* # of segments for checkpoint */ __le32 segment_count_sit; /* # of segments for SIT */ __le32 segment_count_nat; /* # of segments for NAT */ __le32 segment_count_ssa; /* # of segments for SSA */ __le32 segment_count_main; /* # of segments for main area */ __le32 segment0_blkaddr; /* start block address of segment 0 */ __le32 cp_blkaddr; /* start block address of checkpoint */ __le32 sit_blkaddr; /* start block address of SIT */ __le32 nat_blkaddr; /* start block address of NAT */ __le32 ssa_blkaddr; /* start block address of SSA */ __le32 main_blkaddr; /* start block address of main area */ __le32 root_ino; /* root inode number */ __le32 node_ino; /* node inode number */ __le32 meta_ino; /* meta inode number */ __u8 uuid[16]; /* 128-bit uuid for volume */ __le16 volume_name[MAX_VOLUME_NAME]; /* volume name */ __le32 extension_count; /* # of extensions below */ __u8 extension_list[F2FS_MAX_EXTENSION][F2FS_EXTENSION_LEN];/* extension array */ __le32 cp_payload; __u8 version[VERSION_LEN]; /* the kernel version */ __u8 init_version[VERSION_LEN]; /* the initial kernel version */ __le32 feature; /* defined features */ __u8 encryption_level; /* versioning level for encryption */ __u8 encrypt_pw_salt[16]; /* Salt used for string2key algorithm */ struct f2fs_device devs[MAX_DEVICES]; /* device list */ __le32 qf_ino[F2FS_MAX_QUOTAS]; /* quota inode numbers */ __u8 hot_ext_count; /* # of hot file extension */ __le16 s_encoding; /* Filename charset encoding */ __le16 s_encoding_flags; /* Filename charset encoding flags */ __u8 reserved[306]; /* valid reserved region */ __le32 crc; /* checksum of superblock */ } __packed; /* * For checkpoint */ #define CP_RESIZEFS_FLAG 0x00004000 #define CP_DISABLED_QUICK_FLAG 0x00002000 #define CP_DISABLED_FLAG 0x00001000 #define CP_QUOTA_NEED_FSCK_FLAG 0x00000800 #define CP_LARGE_NAT_BITMAP_FLAG 0x00000400 #define CP_NOCRC_RECOVERY_FLAG 0x00000200 #define CP_TRIMMED_FLAG 0x00000100 #define CP_NAT_BITS_FLAG 0x00000080 #define CP_CRC_RECOVERY_FLAG 0x00000040 #define CP_FASTBOOT_FLAG 0x00000020 #define CP_FSCK_FLAG 0x00000010 #define CP_ERROR_FLAG 0x00000008 #define CP_COMPACT_SUM_FLAG 0x00000004 #define CP_ORPHAN_PRESENT_FLAG 0x00000002 #define CP_UMOUNT_FLAG 0x00000001 #define F2FS_CP_PACKS 2 /* # of checkpoint packs */ struct f2fs_checkpoint { __le64 checkpoint_ver; /* checkpoint block version number */ __le64 user_block_count; /* # of user blocks */ __le64 valid_block_count; /* # of valid blocks in main area */ __le32 rsvd_segment_count; /* # of reserved segments for gc */ __le32 overprov_segment_count; /* # of overprovision segments */ __le32 free_segment_count; /* # of free segments in main area */ /* information of current node segments */ __le32 cur_node_segno[MAX_ACTIVE_NODE_LOGS]; __le16 cur_node_blkoff[MAX_ACTIVE_NODE_LOGS]; /* information of current data segments */ __le32 cur_data_segno[MAX_ACTIVE_DATA_LOGS]; __le16 cur_data_blkoff[MAX_ACTIVE_DATA_LOGS]; __le32 ckpt_flags; /* Flags : umount and journal_present */ __le32 cp_pack_total_block_count; /* total # of one cp pack */ __le32 cp_pack_start_sum; /* start block number of data summary */ __le32 valid_node_count; /* Total number of valid nodes */ __le32 valid_inode_count; /* Total number of valid inodes */ __le32 next_free_nid; /* Next free node number */ __le32 sit_ver_bitmap_bytesize; /* Default value 64 */ __le32 nat_ver_bitmap_bytesize; /* Default value 256 */ __le32 checksum_offset; /* checksum offset inside cp block */ __le64 elapsed_time; /* mounted time */ /* allocation type of current segment */ unsigned char alloc_type[MAX_ACTIVE_LOGS]; /* SIT and NAT version bitmap */ unsigned char sit_nat_version_bitmap[1]; } __packed; #define CP_CHKSUM_OFFSET 4092 /* default chksum offset in checkpoint */ #define CP_MIN_CHKSUM_OFFSET \ (offsetof(struct f2fs_checkpoint, sit_nat_version_bitmap)) /* * For orphan inode management */ #define F2FS_ORPHANS_PER_BLOCK 1020 #define GET_ORPHAN_BLOCKS(n) (((n) + F2FS_ORPHANS_PER_BLOCK - 1) / \ F2FS_ORPHANS_PER_BLOCK) struct f2fs_orphan_block { __le32 ino[F2FS_ORPHANS_PER_BLOCK]; /* inode numbers */ __le32 reserved; /* reserved */ __le16 blk_addr; /* block index in current CP */ __le16 blk_count; /* Number of orphan inode blocks in CP */ __le32 entry_count; /* Total number of orphan nodes in current CP */ __le32 check_sum; /* CRC32 for orphan inode block */ } __packed; /* * For NODE structure */ struct f2fs_extent { __le32 fofs; /* start file offset of the extent */ __le32 blk; /* start block address of the extent */ __le32 len; /* length of the extent */ } __packed; #define F2FS_NAME_LEN 255 /* 200 bytes for inline xattrs by default */ #define DEFAULT_INLINE_XATTR_ADDRS 50 #define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */ #define CUR_ADDRS_PER_INODE(inode) (DEF_ADDRS_PER_INODE - \ get_extra_isize(inode)) #define DEF_NIDS_PER_INODE 5 /* Node IDs in an Inode */ #define ADDRS_PER_INODE(inode) addrs_per_inode(inode) #define DEF_ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */ #define ADDRS_PER_BLOCK(inode) addrs_per_block(inode) #define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */ #define ADDRS_PER_PAGE(page, inode) \ (IS_INODE(page) ? ADDRS_PER_INODE(inode) : ADDRS_PER_BLOCK(inode)) #define NODE_DIR1_BLOCK (DEF_ADDRS_PER_INODE + 1) #define NODE_DIR2_BLOCK (DEF_ADDRS_PER_INODE + 2) #define NODE_IND1_BLOCK (DEF_ADDRS_PER_INODE + 3) #define NODE_IND2_BLOCK (DEF_ADDRS_PER_INODE + 4) #define NODE_DIND_BLOCK (DEF_ADDRS_PER_INODE + 5) #define F2FS_INLINE_XATTR 0x01 /* file inline xattr flag */ #define F2FS_INLINE_DATA 0x02 /* file inline data flag */ #define F2FS_INLINE_DENTRY 0x04 /* file inline dentry flag */ #define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */ #define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries */ #define F2FS_EXTRA_ATTR 0x20 /* file having extra attribute */ #define F2FS_PIN_FILE 0x40 /* file should not be gced */ struct f2fs_inode { __le16 i_mode; /* file mode */ __u8 i_advise; /* file hints */ __u8 i_inline; /* file inline flags */ __le32 i_uid; /* user ID */ __le32 i_gid; /* group ID */ __le32 i_links; /* links count */ __le64 i_size; /* file size in bytes */ __le64 i_blocks; /* file size in blocks */ __le64 i_atime; /* access time */ __le64 i_ctime; /* change time */ __le64 i_mtime; /* modification time */ __le32 i_atime_nsec; /* access time in nano scale */ __le32 i_ctime_nsec; /* change time in nano scale */ __le32 i_mtime_nsec; /* modification time in nano scale */ __le32 i_generation; /* file version (for NFS) */ union { __le32 i_current_depth; /* only for directory depth */ __le16 i_gc_failures; /* * # of gc failures on pinned file. * only for regular files. */ }; __le32 i_xattr_nid; /* nid to save xattr */ __le32 i_flags; /* file attributes */ __le32 i_pino; /* parent inode number */ __le32 i_namelen; /* file name length */ __u8 i_name[F2FS_NAME_LEN]; /* file name for SPOR */ __u8 i_dir_level; /* dentry_level for large dir */ struct f2fs_extent i_ext; /* caching a largest extent */ union { struct { __le16 i_extra_isize; /* extra inode attribute size */ __le16 i_inline_xattr_size; /* inline xattr size, unit: 4 bytes */ __le32 i_projid; /* project id */ __le32 i_inode_checksum;/* inode meta checksum */ __le64 i_crtime; /* creation time */ __le32 i_crtime_nsec; /* creation time in nano scale */ __le32 i_extra_end[0]; /* for attribute size calculation */ } __packed; __le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */ }; __le32 i_nid[DEF_NIDS_PER_INODE]; /* direct(2), indirect(2), double_indirect(1) node id */ } __packed; struct direct_node { __le32 addr[DEF_ADDRS_PER_BLOCK]; /* array of data block address */ } __packed; struct indirect_node { __le32 nid[NIDS_PER_BLOCK]; /* array of data block address */ } __packed; enum { COLD_BIT_SHIFT = 0, FSYNC_BIT_SHIFT, DENT_BIT_SHIFT, OFFSET_BIT_SHIFT }; #define OFFSET_BIT_MASK (0x07) /* (0x01 << OFFSET_BIT_SHIFT) - 1 */ struct node_footer { __le32 nid; /* node id */ __le32 ino; /* inode number */ __le32 flag; /* include cold/fsync/dentry marks and offset */ __le64 cp_ver; /* checkpoint version */ __le32 next_blkaddr; /* next node page block address */ } __packed; struct f2fs_node { /* can be one of three types: inode, direct, and indirect types */ union { struct f2fs_inode i; struct direct_node dn; struct indirect_node in; }; struct node_footer footer; } __packed; /* * For NAT entries */ #define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry)) struct f2fs_nat_entry { __u8 version; /* latest version of cached nat entry */ __le32 ino; /* inode number */ __le32 block_addr; /* block address */ } __packed; struct f2fs_nat_block { struct f2fs_nat_entry entries[NAT_ENTRY_PER_BLOCK]; } __packed; /* * For SIT entries * * Each segment is 2MB in size by default so that a bitmap for validity of * there-in blocks should occupy 64 bytes, 512 bits. * Not allow to change this. */ #define SIT_VBLOCK_MAP_SIZE 64 #define SIT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_sit_entry)) /* * F2FS uses 4 bytes to represent block address. As a result, supported size of * disk is 16 TB and it equals to 16 * 1024 * 1024 / 2 segments. */ #define F2FS_MAX_SEGMENT ((16 * 1024 * 1024) / 2) /* * Note that f2fs_sit_entry->vblocks has the following bit-field information. * [15:10] : allocation type such as CURSEG_XXXX_TYPE * [9:0] : valid block count */ #define SIT_VBLOCKS_SHIFT 10 #define SIT_VBLOCKS_MASK ((1 << SIT_VBLOCKS_SHIFT) - 1) #define GET_SIT_VBLOCKS(raw_sit) \ (le16_to_cpu((raw_sit)->vblocks) & SIT_VBLOCKS_MASK) #define GET_SIT_TYPE(raw_sit) \ ((le16_to_cpu((raw_sit)->vblocks) & ~SIT_VBLOCKS_MASK) \ >> SIT_VBLOCKS_SHIFT) struct f2fs_sit_entry { __le16 vblocks; /* reference above */ __u8 valid_map[SIT_VBLOCK_MAP_SIZE]; /* bitmap for valid blocks */ __le64 mtime; /* segment age for cleaning */ } __packed; struct f2fs_sit_block { struct f2fs_sit_entry entries[SIT_ENTRY_PER_BLOCK]; } __packed; /* * For segment summary * * One summary block contains exactly 512 summary entries, which represents * exactly 2MB segment by default. Not allow to change the basic units. * * NOTE: For initializing fields, you must use set_summary * * - If data page, nid represents dnode's nid * - If node page, nid represents the node page's nid. * * The ofs_in_node is used by only data page. It represents offset * from node's page's beginning to get a data block address. * ex) data_blkaddr = (block_t)(nodepage_start_address + ofs_in_node) */ #define ENTRIES_IN_SUM 512 #define SUMMARY_SIZE (7) /* sizeof(struct summary) */ #define SUM_FOOTER_SIZE (5) /* sizeof(struct summary_footer) */ #define SUM_ENTRY_SIZE (SUMMARY_SIZE * ENTRIES_IN_SUM) /* a summary entry for a 4KB-sized block in a segment */ struct f2fs_summary { __le32 nid; /* parent node id */ union { __u8 reserved[3]; struct { __u8 version; /* node version number */ __le16 ofs_in_node; /* block index in parent node */ } __packed; }; } __packed; /* summary block type, node or data, is stored to the summary_footer */ #define SUM_TYPE_NODE (1) #define SUM_TYPE_DATA (0) struct summary_footer { unsigned char entry_type; /* SUM_TYPE_XXX */ __le32 check_sum; /* summary checksum */ } __packed; #define SUM_JOURNAL_SIZE (F2FS_BLKSIZE - SUM_FOOTER_SIZE -\ SUM_ENTRY_SIZE) #define NAT_JOURNAL_ENTRIES ((SUM_JOURNAL_SIZE - 2) /\ sizeof(struct nat_journal_entry)) #define NAT_JOURNAL_RESERVED ((SUM_JOURNAL_SIZE - 2) %\ sizeof(struct nat_journal_entry)) #define SIT_JOURNAL_ENTRIES ((SUM_JOURNAL_SIZE - 2) /\ sizeof(struct sit_journal_entry)) #define SIT_JOURNAL_RESERVED ((SUM_JOURNAL_SIZE - 2) %\ sizeof(struct sit_journal_entry)) /* Reserved area should make size of f2fs_extra_info equals to * that of nat_journal and sit_journal. */ #define EXTRA_INFO_RESERVED (SUM_JOURNAL_SIZE - 2 - 8) /* * frequently updated NAT/SIT entries can be stored in the spare area in * summary blocks */ enum { NAT_JOURNAL = 0, SIT_JOURNAL }; struct nat_journal_entry { __le32 nid; struct f2fs_nat_entry ne; } __packed; struct nat_journal { struct nat_journal_entry entries[NAT_JOURNAL_ENTRIES]; __u8 reserved[NAT_JOURNAL_RESERVED]; } __packed; struct sit_journal_entry { __le32 segno; struct f2fs_sit_entry se; } __packed; struct sit_journal { struct sit_journal_entry entries[SIT_JOURNAL_ENTRIES]; __u8 reserved[SIT_JOURNAL_RESERVED]; } __packed; struct f2fs_extra_info { __le64 kbytes_written; __u8 reserved[EXTRA_INFO_RESERVED]; } __packed; struct f2fs_journal { union { __le16 n_nats; __le16 n_sits; }; /* spare area is used by NAT or SIT journals or extra info */ union { struct nat_journal nat_j; struct sit_journal sit_j; struct f2fs_extra_info info; }; } __packed; /* 4KB-sized summary block structure */ struct f2fs_summary_block { struct f2fs_summary entries[ENTRIES_IN_SUM]; struct f2fs_journal journal; struct summary_footer footer; } __packed; /* * For directory operations */ #define F2FS_DOT_HASH 0 #define F2FS_DDOT_HASH F2FS_DOT_HASH #define F2FS_MAX_HASH (~((0x3ULL) << 62)) #define F2FS_HASH_COL_BIT ((0x1ULL) << 63) typedef __le32 f2fs_hash_t; /* One directory entry slot covers 8bytes-long file name */ #define F2FS_SLOT_LEN 8 #define F2FS_SLOT_LEN_BITS 3 #define GET_DENTRY_SLOTS(x) (((x) + F2FS_SLOT_LEN - 1) >> F2FS_SLOT_LEN_BITS) /* MAX level for dir lookup */ #define MAX_DIR_HASH_DEPTH 63 /* MAX buckets in one level of dir */ #define MAX_DIR_BUCKETS (1 << ((MAX_DIR_HASH_DEPTH / 2) - 1)) /* * space utilization of regular dentry and inline dentry (w/o extra reservation) * regular dentry inline dentry (def) inline dentry (min) * bitmap 1 * 27 = 27 1 * 23 = 23 1 * 1 = 1 * reserved 1 * 3 = 3 1 * 7 = 7 1 * 1 = 1 * dentry 11 * 214 = 2354 11 * 182 = 2002 11 * 2 = 22 * filename 8 * 214 = 1712 8 * 182 = 1456 8 * 2 = 16 * total 4096 3488 40 * * Note: there are more reserved space in inline dentry than in regular * dentry, when converting inline dentry we should handle this carefully. */ #define NR_DENTRY_IN_BLOCK 214 /* the number of dentry in a block */ #define SIZE_OF_DIR_ENTRY 11 /* by byte */ #define SIZE_OF_DENTRY_BITMAP ((NR_DENTRY_IN_BLOCK + BITS_PER_BYTE - 1) / \ BITS_PER_BYTE) #define SIZE_OF_RESERVED (PAGE_SIZE - ((SIZE_OF_DIR_ENTRY + \ F2FS_SLOT_LEN) * \ NR_DENTRY_IN_BLOCK + SIZE_OF_DENTRY_BITMAP)) #define MIN_INLINE_DENTRY_SIZE 40 /* just include '.' and '..' entries */ /* One directory entry slot representing F2FS_SLOT_LEN-sized file name */ struct f2fs_dir_entry { __le32 hash_code; /* hash code of file name */ __le32 ino; /* inode number */ __le16 name_len; /* length of file name */ __u8 file_type; /* file type */ } __packed; /* 4KB-sized directory entry block */ struct f2fs_dentry_block { /* validity bitmap for directory entries in each block */ __u8 dentry_bitmap[SIZE_OF_DENTRY_BITMAP]; __u8 reserved[SIZE_OF_RESERVED]; struct f2fs_dir_entry dentry[NR_DENTRY_IN_BLOCK]; __u8 filename[NR_DENTRY_IN_BLOCK][F2FS_SLOT_LEN]; } __packed; /* file types used in inode_info->flags */ enum { F2FS_FT_UNKNOWN, F2FS_FT_REG_FILE, F2FS_FT_DIR, F2FS_FT_CHRDEV, F2FS_FT_BLKDEV, F2FS_FT_FIFO, F2FS_FT_SOCK, F2FS_FT_SYMLINK, F2FS_FT_MAX }; #define S_SHIFT 12 #define F2FS_DEF_PROJID 0 /* default project ID */ #endif /* _LINUX_F2FS_FS_H */ crc-t10dif.h 0000644 00000000705 14722070374 0006561 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_CRC_T10DIF_H #define _LINUX_CRC_T10DIF_H #include <linux/types.h> #define CRC_T10DIF_DIGEST_SIZE 2 #define CRC_T10DIF_BLOCK_SIZE 1 #define CRC_T10DIF_STRING "crct10dif" extern __u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, size_t len); extern __u16 crc_t10dif(unsigned char const *, size_t); extern __u16 crc_t10dif_update(__u16 crc, unsigned char const *, size_t); #endif fs_pin.h 0000644 00000001033 14722070374 0006176 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/wait.h> struct fs_pin { wait_queue_head_t wait; int done; struct hlist_node s_list; struct hlist_node m_list; void (*kill)(struct fs_pin *); }; struct vfsmount; static inline void init_fs_pin(struct fs_pin *p, void (*kill)(struct fs_pin *)) { init_waitqueue_head(&p->wait); INIT_HLIST_NODE(&p->s_list); INIT_HLIST_NODE(&p->m_list); p->kill = kill; } void pin_remove(struct fs_pin *); void pin_insert(struct fs_pin *, struct vfsmount *); void pin_kill(struct fs_pin *); reset-controller.h 0000644 00000006040 14722070374 0010226 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_RESET_CONTROLLER_H_ #define _LINUX_RESET_CONTROLLER_H_ #include <linux/list.h> struct reset_controller_dev; /** * struct reset_control_ops - reset controller driver callbacks * * @reset: for self-deasserting resets, does all necessary * things to reset the device * @assert: manually assert the reset line, if supported * @deassert: manually deassert the reset line, if supported * @status: return the status of the reset line, if supported */ struct reset_control_ops { int (*reset)(struct reset_controller_dev *rcdev, unsigned long id); int (*assert)(struct reset_controller_dev *rcdev, unsigned long id); int (*deassert)(struct reset_controller_dev *rcdev, unsigned long id); int (*status)(struct reset_controller_dev *rcdev, unsigned long id); }; struct module; struct device_node; struct of_phandle_args; /** * struct reset_control_lookup - represents a single lookup entry * * @list: internal list of all reset lookup entries * @provider: name of the reset controller device controlling this reset line * @index: ID of the reset controller in the reset controller device * @dev_id: name of the device associated with this reset line * @con_id: name of the reset line (can be NULL) */ struct reset_control_lookup { struct list_head list; const char *provider; unsigned int index; const char *dev_id; const char *con_id; }; #define RESET_LOOKUP(_provider, _index, _dev_id, _con_id) \ { \ .provider = _provider, \ .index = _index, \ .dev_id = _dev_id, \ .con_id = _con_id, \ } /** * struct reset_controller_dev - reset controller entity that might * provide multiple reset controls * @ops: a pointer to device specific struct reset_control_ops * @owner: kernel module of the reset controller driver * @list: internal list of reset controller devices * @reset_control_head: head of internal list of requested reset controls * @dev: corresponding driver model device struct * @of_node: corresponding device tree node as phandle target * @of_reset_n_cells: number of cells in reset line specifiers * @of_xlate: translation function to translate from specifier as found in the * device tree to id as given to the reset control ops * @nr_resets: number of reset controls in this reset controller device */ struct reset_controller_dev { const struct reset_control_ops *ops; struct module *owner; struct list_head list; struct list_head reset_control_head; struct device *dev; struct device_node *of_node; int of_reset_n_cells; int (*of_xlate)(struct reset_controller_dev *rcdev, const struct of_phandle_args *reset_spec); unsigned int nr_resets; }; int reset_controller_register(struct reset_controller_dev *rcdev); void reset_controller_unregister(struct reset_controller_dev *rcdev); struct device; int devm_reset_controller_register(struct device *dev, struct reset_controller_dev *rcdev); void reset_controller_add_lookup(struct reset_control_lookup *lookup, unsigned int num_entries); #endif bcm47xx_wdt.h 0000644 00000001053 14722070374 0007074 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_BCM47XX_WDT_H_ #define LINUX_BCM47XX_WDT_H_ #include <linux/timer.h> #include <linux/types.h> #include <linux/watchdog.h> struct bcm47xx_wdt { u32 (*timer_set)(struct bcm47xx_wdt *, u32); u32 (*timer_set_ms)(struct bcm47xx_wdt *, u32); u32 max_timer_ms; void *driver_data; struct watchdog_device wdd; struct timer_list soft_timer; atomic_t soft_ticks; }; static inline void *bcm47xx_wdt_get_drvdata(struct bcm47xx_wdt *wdt) { return wdt->driver_data; } #endif /* LINUX_BCM47XX_WDT_H_ */ fsnotify.h 0000644 00000022721 14722070374 0006570 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_FS_NOTIFY_H #define _LINUX_FS_NOTIFY_H /* * include/linux/fsnotify.h - generic hooks for filesystem notification, to * reduce in-source duplication from both dnotify and inotify. * * We don't compile any of this away in some complicated menagerie of ifdefs. * Instead, we rely on the code inside to optimize away as needed. * * (C) Copyright 2005 Robert Love */ #include <linux/fsnotify_backend.h> #include <linux/audit.h> #include <linux/slab.h> #include <linux/bug.h> /* * Notify this @dir inode about a change in the directory entry @dentry. * * Unlike fsnotify_parent(), the event will be reported regardless of the * FS_EVENT_ON_CHILD mask on the parent inode. */ static inline int fsnotify_dirent(struct inode *dir, struct dentry *dentry, __u32 mask) { return fsnotify(dir, mask, d_inode(dentry), FSNOTIFY_EVENT_INODE, &dentry->d_name, 0); } /* Notify this dentry's parent about a child's events. */ static inline int fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask) { if (!dentry) dentry = path->dentry; return __fsnotify_parent(path, dentry, mask); } /* * Simple wrapper to consolidate calls fsnotify_parent()/fsnotify() when * an event is on a path. */ static inline int fsnotify_path(struct inode *inode, const struct path *path, __u32 mask) { int ret = fsnotify_parent(path, NULL, mask); if (ret) return ret; return fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); } /* Simple call site for access decisions */ static inline int fsnotify_perm(struct file *file, int mask) { int ret; const struct path *path = &file->f_path; struct inode *inode = file_inode(file); __u32 fsnotify_mask = 0; /* * FMODE_NONOTIFY are fds generated by fanotify itself which should not * generate new events. We also don't want to generate events for * FMODE_PATH fds (involves open & close events) as they are just * handle creation / destruction events and not "real" file events. */ if (file->f_mode & (FMODE_NONOTIFY | FMODE_PATH)) return 0; if (!(mask & (MAY_READ | MAY_OPEN))) return 0; if (mask & MAY_OPEN) { fsnotify_mask = FS_OPEN_PERM; if (file->f_flags & __FMODE_EXEC) { ret = fsnotify_path(inode, path, FS_OPEN_EXEC_PERM); if (ret) return ret; } } else if (mask & MAY_READ) { fsnotify_mask = FS_ACCESS_PERM; } if (S_ISDIR(inode->i_mode)) fsnotify_mask |= FS_ISDIR; return fsnotify_path(inode, path, fsnotify_mask); } /* * fsnotify_link_count - inode's link count changed */ static inline void fsnotify_link_count(struct inode *inode) { __u32 mask = FS_ATTRIB; if (S_ISDIR(inode->i_mode)) mask |= FS_ISDIR; fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); } /* * fsnotify_move - file old_name at old_dir was moved to new_name at new_dir */ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, const struct qstr *old_name, int isdir, struct inode *target, struct dentry *moved) { struct inode *source = moved->d_inode; u32 fs_cookie = fsnotify_get_cookie(); __u32 old_dir_mask = FS_MOVED_FROM; __u32 new_dir_mask = FS_MOVED_TO; __u32 mask = FS_MOVE_SELF; const struct qstr *new_name = &moved->d_name; if (old_dir == new_dir) old_dir_mask |= FS_DN_RENAME; if (isdir) { old_dir_mask |= FS_ISDIR; new_dir_mask |= FS_ISDIR; mask |= FS_ISDIR; } fsnotify(old_dir, old_dir_mask, source, FSNOTIFY_EVENT_INODE, old_name, fs_cookie); fsnotify(new_dir, new_dir_mask, source, FSNOTIFY_EVENT_INODE, new_name, fs_cookie); if (target) fsnotify_link_count(target); if (source) fsnotify(source, mask, source, FSNOTIFY_EVENT_INODE, NULL, 0); audit_inode_child(new_dir, moved, AUDIT_TYPE_CHILD_CREATE); } /* * fsnotify_inode_delete - and inode is being evicted from cache, clean up is needed */ static inline void fsnotify_inode_delete(struct inode *inode) { __fsnotify_inode_delete(inode); } /* * fsnotify_vfsmount_delete - a vfsmount is being destroyed, clean up is needed */ static inline void fsnotify_vfsmount_delete(struct vfsmount *mnt) { __fsnotify_vfsmount_delete(mnt); } /* * fsnotify_inoderemove - an inode is going away */ static inline void fsnotify_inoderemove(struct inode *inode) { __u32 mask = FS_DELETE_SELF; if (S_ISDIR(inode->i_mode)) mask |= FS_ISDIR; fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); __fsnotify_inode_delete(inode); } /* * fsnotify_create - 'name' was linked in */ static inline void fsnotify_create(struct inode *inode, struct dentry *dentry) { audit_inode_child(inode, dentry, AUDIT_TYPE_CHILD_CREATE); fsnotify_dirent(inode, dentry, FS_CREATE); } /* * fsnotify_link - new hardlink in 'inode' directory * Note: We have to pass also the linked inode ptr as some filesystems leave * new_dentry->d_inode NULL and instantiate inode pointer later */ static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct dentry *new_dentry) { fsnotify_link_count(inode); audit_inode_child(dir, new_dentry, AUDIT_TYPE_CHILD_CREATE); fsnotify(dir, FS_CREATE, inode, FSNOTIFY_EVENT_INODE, &new_dentry->d_name, 0); } /* * fsnotify_delete - @dentry was unlinked and unhashed * * Caller must make sure that dentry->d_name is stable. * * Note: unlike fsnotify_unlink(), we have to pass also the unlinked inode * as this may be called after d_delete() and old_dentry may be negative. */ static inline void fsnotify_delete(struct inode *dir, struct inode *inode, struct dentry *dentry) { __u32 mask = FS_DELETE; if (S_ISDIR(inode->i_mode)) mask |= FS_ISDIR; fsnotify(dir, mask, inode, FSNOTIFY_EVENT_INODE, &dentry->d_name, 0); } /** * d_delete_notify - delete a dentry and call fsnotify_delete() * @dentry: The dentry to delete * * This helper is used to guaranty that the unlinked inode cannot be found * by lookup of this name after fsnotify_delete() event has been delivered. */ static inline void d_delete_notify(struct inode *dir, struct dentry *dentry) { struct inode *inode = d_inode(dentry); ihold(inode); d_delete(dentry); fsnotify_delete(dir, inode, dentry); iput(inode); } /* * fsnotify_unlink - 'name' was unlinked * * Caller must make sure that dentry->d_name is stable. */ static inline void fsnotify_unlink(struct inode *dir, struct dentry *dentry) { if (WARN_ON_ONCE(d_is_negative(dentry))) return; fsnotify_delete(dir, d_inode(dentry), dentry); } /* * fsnotify_mkdir - directory 'name' was created */ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry) { audit_inode_child(inode, dentry, AUDIT_TYPE_CHILD_CREATE); fsnotify_dirent(inode, dentry, FS_CREATE | FS_ISDIR); } /* * fsnotify_rmdir - directory 'name' was removed * * Caller must make sure that dentry->d_name is stable. */ static inline void fsnotify_rmdir(struct inode *dir, struct dentry *dentry) { if (WARN_ON_ONCE(d_is_negative(dentry))) return; fsnotify_delete(dir, d_inode(dentry), dentry); } /* * fsnotify_access - file was read */ static inline void fsnotify_access(struct file *file) { const struct path *path = &file->f_path; struct inode *inode = file_inode(file); __u32 mask = FS_ACCESS; if (S_ISDIR(inode->i_mode)) mask |= FS_ISDIR; if (!(file->f_mode & FMODE_NONOTIFY)) fsnotify_path(inode, path, mask); } /* * fsnotify_modify - file was modified */ static inline void fsnotify_modify(struct file *file) { const struct path *path = &file->f_path; struct inode *inode = file_inode(file); __u32 mask = FS_MODIFY; if (S_ISDIR(inode->i_mode)) mask |= FS_ISDIR; if (!(file->f_mode & FMODE_NONOTIFY)) fsnotify_path(inode, path, mask); } /* * fsnotify_open - file was opened */ static inline void fsnotify_open(struct file *file) { const struct path *path = &file->f_path; struct inode *inode = file_inode(file); __u32 mask = FS_OPEN; if (S_ISDIR(inode->i_mode)) mask |= FS_ISDIR; if (file->f_flags & __FMODE_EXEC) mask |= FS_OPEN_EXEC; fsnotify_path(inode, path, mask); } /* * fsnotify_close - file was closed */ static inline void fsnotify_close(struct file *file) { const struct path *path = &file->f_path; struct inode *inode = file_inode(file); fmode_t mode = file->f_mode; __u32 mask = (mode & FMODE_WRITE) ? FS_CLOSE_WRITE : FS_CLOSE_NOWRITE; if (S_ISDIR(inode->i_mode)) mask |= FS_ISDIR; if (!(file->f_mode & FMODE_NONOTIFY)) fsnotify_path(inode, path, mask); } /* * fsnotify_xattr - extended attributes were changed */ static inline void fsnotify_xattr(struct dentry *dentry) { struct inode *inode = dentry->d_inode; __u32 mask = FS_ATTRIB; if (S_ISDIR(inode->i_mode)) mask |= FS_ISDIR; fsnotify_parent(NULL, dentry, mask); fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); } /* * fsnotify_change - notify_change event. file was modified and/or metadata * was changed. */ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid) { struct inode *inode = dentry->d_inode; __u32 mask = 0; if (ia_valid & ATTR_UID) mask |= FS_ATTRIB; if (ia_valid & ATTR_GID) mask |= FS_ATTRIB; if (ia_valid & ATTR_SIZE) mask |= FS_MODIFY; /* both times implies a utime(s) call */ if ((ia_valid & (ATTR_ATIME | ATTR_MTIME)) == (ATTR_ATIME | ATTR_MTIME)) mask |= FS_ATTRIB; else if (ia_valid & ATTR_ATIME) mask |= FS_ACCESS; else if (ia_valid & ATTR_MTIME) mask |= FS_MODIFY; if (ia_valid & ATTR_MODE) mask |= FS_ATTRIB; if (mask) { if (S_ISDIR(inode->i_mode)) mask |= FS_ISDIR; fsnotify_parent(NULL, dentry, mask); fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); } } #endif /* _LINUX_FS_NOTIFY_H */ huge_mm.h 0000644 00000030076 14722070374 0006352 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_HUGE_MM_H #define _LINUX_HUGE_MM_H #include <linux/sched/coredump.h> #include <linux/mm_types.h> #include <linux/fs.h> /* only for vma_is_dax() */ extern vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf); extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, struct vm_area_struct *vma); extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd); extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, pud_t *dst_pud, pud_t *src_pud, unsigned long addr, struct vm_area_struct *vma); #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud); #else static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) { } #endif extern vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd); extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, unsigned int flags); extern bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long next); extern int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr); extern int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud, unsigned long addr); extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned char *vec); extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, unsigned long old_end, pmd_t *old_pmd, pmd_t *new_pmd); extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, pgprot_t newprot, int prot_numa); vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write); vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write); enum transparent_hugepage_flag { TRANSPARENT_HUGEPAGE_FLAG, TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG, #ifdef CONFIG_DEBUG_VM TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG, #endif }; struct kobject; struct kobj_attribute; extern ssize_t single_hugepage_flag_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count, enum transparent_hugepage_flag flag); extern ssize_t single_hugepage_flag_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf, enum transparent_hugepage_flag flag); extern struct kobj_attribute shmem_enabled_attr; #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define HPAGE_PMD_SHIFT PMD_SHIFT #define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT) #define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1)) #define HPAGE_PUD_SHIFT PUD_SHIFT #define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT) #define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1)) extern bool is_vma_temporary_stack(struct vm_area_struct *vma); extern unsigned long transparent_hugepage_flags; /* * to be used on vmas which are known to support THP. * Use transparent_hugepage_enabled otherwise */ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) { if (vma->vm_flags & VM_NOHUGEPAGE) return false; if (is_vma_temporary_stack(vma)) return false; if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) return false; if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG)) return true; /* * For dax vmas, try to always use hugepage mappings. If the kernel does * not support hugepages, fsdax mappings will fallback to PAGE_SIZE * mappings, and device-dax namespaces, that try to guarantee a given * mapping size, will fail to enable */ if (vma_is_dax(vma)) return true; if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)) return !!(vma->vm_flags & VM_HUGEPAGE); return false; } bool transparent_hugepage_enabled(struct vm_area_struct *vma); #define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1) static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, unsigned long haddr) { /* Don't have to check pgoff for anonymous vma */ if (!vma_is_anonymous(vma)) { if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) != (vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK)) return false; } if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) return false; return true; } #define transparent_hugepage_use_zero_page() \ (transparent_hugepage_flags & \ (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) #ifdef CONFIG_DEBUG_VM #define transparent_hugepage_debug_cow() \ (transparent_hugepage_flags & \ (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG)) #else /* CONFIG_DEBUG_VM */ #define transparent_hugepage_debug_cow() 0 #endif /* CONFIG_DEBUG_VM */ extern unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); extern void prep_transhuge_page(struct page *page); extern void free_transhuge_page(struct page *page); bool can_split_huge_page(struct page *page, int *pextra_pins); int split_huge_page_to_list(struct page *page, struct list_head *list); static inline int split_huge_page(struct page *page) { return split_huge_page_to_list(page, NULL); } void deferred_split_huge_page(struct page *page); void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long address, bool freeze, struct page *page); #define split_huge_pmd(__vma, __pmd, __address) \ do { \ pmd_t *____pmd = (__pmd); \ if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \ || pmd_devmap(*____pmd)) \ __split_huge_pmd(__vma, __pmd, __address, \ false, NULL); \ } while (0) void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, bool freeze, struct page *page); void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, unsigned long address); #define split_huge_pud(__vma, __pud, __address) \ do { \ pud_t *____pud = (__pud); \ if (pud_trans_huge(*____pud) \ || pud_devmap(*____pud)) \ __split_huge_pud(__vma, __pud, __address); \ } while (0) extern int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, int advice); extern void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start, unsigned long end, long adjust_next); extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma); extern spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma); static inline int is_swap_pmd(pmd_t pmd) { return !pmd_none(pmd) && !pmd_present(pmd); } /* mmap_sem must be held on entry */ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) { VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) return __pmd_trans_huge_lock(pmd, vma); else return NULL; } static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma) { VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); if (pud_trans_huge(*pud) || pud_devmap(*pud)) return __pud_trans_huge_lock(pud, vma); else return NULL; } /** * thp_order - Order of a transparent huge page. * @page: Head page of a transparent huge page. */ static inline unsigned int thp_order(struct page *page) { VM_BUG_ON_PGFLAGS(PageTail(page), page); if (PageHead(page)) return HPAGE_PMD_ORDER; return 0; } static inline int hpage_nr_pages(struct page *page) { if (unlikely(PageTransHuge(page))) return HPAGE_PMD_NR; return 1; } struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap); struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap); extern vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd); extern struct page *huge_zero_page; extern unsigned long huge_zero_pfn; static inline bool is_huge_zero_page(struct page *page) { return READ_ONCE(huge_zero_page) == page; } static inline bool is_huge_zero_pmd(pmd_t pmd) { return READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd) && pmd_present(pmd); } static inline bool is_huge_zero_pud(pud_t pud) { return false; } struct page *mm_get_huge_zero_page(struct mm_struct *mm); void mm_put_huge_zero_page(struct mm_struct *mm); #define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot)) static inline bool thp_migration_supported(void) { return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION); } static inline struct list_head *page_deferred_list(struct page *page) { /* * Global or memcg deferred list in the second tail pages is * occupied by compound_head. */ return &page[2].deferred_list; } #else /* CONFIG_TRANSPARENT_HUGEPAGE */ #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; }) #define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; }) #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; }) #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; }) static inline unsigned int thp_order(struct page *page) { VM_BUG_ON_PGFLAGS(PageTail(page), page); return 0; } #define hpage_nr_pages(x) 1 static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) { return false; } static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma) { return false; } static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, unsigned long haddr) { return false; } static inline void prep_transhuge_page(struct page *page) {} #define transparent_hugepage_flags 0UL #define thp_get_unmapped_area NULL static inline bool can_split_huge_page(struct page *page, int *pextra_pins) { BUILD_BUG(); return false; } static inline int split_huge_page_to_list(struct page *page, struct list_head *list) { return 0; } static inline int split_huge_page(struct page *page) { return 0; } static inline void deferred_split_huge_page(struct page *page) {} #define split_huge_pmd(__vma, __pmd, __address) \ do { } while (0) static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long address, bool freeze, struct page *page) {} static inline void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, bool freeze, struct page *page) {} #define split_huge_pud(__vma, __pmd, __address) \ do { } while (0) static inline int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, int advice) { BUG(); return 0; } static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start, unsigned long end, long adjust_next) { } static inline int is_swap_pmd(pmd_t pmd) { return 0; } static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) { return NULL; } static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma) { return NULL; } static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd) { return 0; } static inline bool is_huge_zero_page(struct page *page) { return false; } static inline bool is_huge_zero_pmd(pmd_t pmd) { return false; } static inline bool is_huge_zero_pud(pud_t pud) { return false; } static inline void mm_put_huge_zero_page(struct mm_struct *mm) { return; } static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap) { return NULL; } static inline struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap) { return NULL; } static inline bool thp_migration_supported(void) { return false; } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* _LINUX_HUGE_MM_H */ context_tracking_state.h 0000644 00000002616 14722070374 0011476 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_CONTEXT_TRACKING_STATE_H #define _LINUX_CONTEXT_TRACKING_STATE_H #include <linux/percpu.h> #include <linux/static_key.h> struct context_tracking { /* * When active is false, probes are unset in order * to minimize overhead: TIF flags are cleared * and calls to user_enter/exit are ignored. This * may be further optimized using static keys. */ bool active; int recursion; enum ctx_state { CONTEXT_DISABLED = -1, /* returned by ct_state() if unknown */ CONTEXT_KERNEL = 0, CONTEXT_USER, CONTEXT_GUEST, } state; }; #ifdef CONFIG_CONTEXT_TRACKING extern struct static_key_false context_tracking_enabled; DECLARE_PER_CPU(struct context_tracking, context_tracking); static inline bool context_tracking_is_enabled(void) { return static_branch_unlikely(&context_tracking_enabled); } static inline bool context_tracking_cpu_is_enabled(void) { return __this_cpu_read(context_tracking.active); } static inline bool context_tracking_in_user(void) { return __this_cpu_read(context_tracking.state) == CONTEXT_USER; } #else static inline bool context_tracking_in_user(void) { return false; } static inline bool context_tracking_active(void) { return false; } static inline bool context_tracking_is_enabled(void) { return false; } static inline bool context_tracking_cpu_is_enabled(void) { return false; } #endif /* CONFIG_CONTEXT_TRACKING */ #endif balloon_compaction.h 0000644 00000015133 14722070374 0010570 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * include/linux/balloon_compaction.h * * Common interface definitions for making balloon pages movable by compaction. * * Balloon page migration makes use of the general non-lru movable page * feature. * * page->private is used to reference the responsible balloon device. * page->mapping is used in context of non-lru page migration to reference * the address space operations for page isolation/migration/compaction. * * As the page isolation scanning step a compaction thread does is a lockless * procedure (from a page standpoint), it might bring some racy situations while * performing balloon page compaction. In order to sort out these racy scenarios * and safely perform balloon's page compaction and migration we must, always, * ensure following these simple rules: * * i. when updating a balloon's page ->mapping element, strictly do it under * the following lock order, independently of the far superior * locking scheme (lru_lock, balloon_lock): * +-page_lock(page); * +--spin_lock_irq(&b_dev_info->pages_lock); * ... page->mapping updates here ... * * ii. isolation or dequeueing procedure must remove the page from balloon * device page list under b_dev_info->pages_lock. * * The functions provided by this interface are placed to help on coping with * the aforementioned balloon page corner case, as well as to ensure the simple * set of exposed rules are satisfied while we are dealing with balloon pages * compaction / migration. * * Copyright (C) 2012, Red Hat, Inc. Rafael Aquini <aquini@redhat.com> */ #ifndef _LINUX_BALLOON_COMPACTION_H #define _LINUX_BALLOON_COMPACTION_H #include <linux/pagemap.h> #include <linux/page-flags.h> #include <linux/migrate.h> #include <linux/gfp.h> #include <linux/err.h> #include <linux/fs.h> #include <linux/list.h> /* * Balloon device information descriptor. * This struct is used to allow the common balloon compaction interface * procedures to find the proper balloon device holding memory pages they'll * have to cope for page compaction / migration, as well as it serves the * balloon driver as a page book-keeper for its registered balloon devices. */ struct balloon_dev_info { unsigned long isolated_pages; /* # of isolated pages for migration */ spinlock_t pages_lock; /* Protection to pages list */ struct list_head pages; /* Pages enqueued & handled to Host */ int (*migratepage)(struct balloon_dev_info *, struct page *newpage, struct page *page, enum migrate_mode mode); struct inode *inode; }; extern struct page *balloon_page_alloc(void); extern void balloon_page_enqueue(struct balloon_dev_info *b_dev_info, struct page *page); extern struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info); extern size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info, struct list_head *pages); extern size_t balloon_page_list_dequeue(struct balloon_dev_info *b_dev_info, struct list_head *pages, size_t n_req_pages); static inline void balloon_devinfo_init(struct balloon_dev_info *balloon) { balloon->isolated_pages = 0; spin_lock_init(&balloon->pages_lock); INIT_LIST_HEAD(&balloon->pages); balloon->migratepage = NULL; balloon->inode = NULL; } #ifdef CONFIG_BALLOON_COMPACTION extern const struct address_space_operations balloon_aops; extern bool balloon_page_isolate(struct page *page, isolate_mode_t mode); extern void balloon_page_putback(struct page *page); extern int balloon_page_migrate(struct address_space *mapping, struct page *newpage, struct page *page, enum migrate_mode mode); /* * balloon_page_insert - insert a page into the balloon's page list and make * the page->private assignment accordingly. * @balloon : pointer to balloon device * @page : page to be assigned as a 'balloon page' * * Caller must ensure the page is locked and the spin_lock protecting balloon * pages list is held before inserting a page into the balloon device. */ static inline void balloon_page_insert(struct balloon_dev_info *balloon, struct page *page) { __SetPageOffline(page); __SetPageMovable(page, balloon->inode->i_mapping); set_page_private(page, (unsigned long)balloon); list_add(&page->lru, &balloon->pages); } /* * balloon_page_delete - delete a page from balloon's page list and clear * the page->private assignement accordingly. * @page : page to be released from balloon's page list * * Caller must ensure the page is locked and the spin_lock protecting balloon * pages list is held before deleting a page from the balloon device. */ static inline void balloon_page_delete(struct page *page) { __ClearPageOffline(page); __ClearPageMovable(page); set_page_private(page, 0); /* * No touch page.lru field once @page has been isolated * because VM is using the field. */ if (!PageIsolated(page)) list_del(&page->lru); } /* * balloon_page_device - get the b_dev_info descriptor for the balloon device * that enqueues the given page. */ static inline struct balloon_dev_info *balloon_page_device(struct page *page) { return (struct balloon_dev_info *)page_private(page); } static inline gfp_t balloon_mapping_gfp_mask(void) { return GFP_HIGHUSER_MOVABLE; } #else /* !CONFIG_BALLOON_COMPACTION */ static inline void balloon_page_insert(struct balloon_dev_info *balloon, struct page *page) { __SetPageOffline(page); list_add(&page->lru, &balloon->pages); } static inline void balloon_page_delete(struct page *page) { __ClearPageOffline(page); list_del(&page->lru); } static inline bool balloon_page_isolate(struct page *page) { return false; } static inline void balloon_page_putback(struct page *page) { return; } static inline int balloon_page_migrate(struct page *newpage, struct page *page, enum migrate_mode mode) { return 0; } static inline gfp_t balloon_mapping_gfp_mask(void) { return GFP_HIGHUSER; } #endif /* CONFIG_BALLOON_COMPACTION */ /* * balloon_page_push - insert a page into a page list. * @head : pointer to list * @page : page to be added * * Caller must ensure the page is private and protect the list. */ static inline void balloon_page_push(struct list_head *pages, struct page *page) { list_add(&page->lru, pages); } /* * balloon_page_pop - remove a page from a page list. * @head : pointer to list * @page : page to be added * * Caller must ensure the page is private and protect the list. */ static inline struct page *balloon_page_pop(struct list_head *pages) { struct page *page = list_first_entry_or_null(pages, struct page, lru); if (!page) return NULL; list_del(&page->lru); return page; } #endif /* _LINUX_BALLOON_COMPACTION_H */ interconnect-provider.h 0000644 00000010134 14722070374 0011245 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2018, Linaro Ltd. * Author: Georgi Djakov <georgi.djakov@linaro.org> */ #ifndef __LINUX_INTERCONNECT_PROVIDER_H #define __LINUX_INTERCONNECT_PROVIDER_H #include <linux/interconnect.h> #define icc_units_to_bps(bw) ((bw) * 1000ULL) struct icc_node; struct of_phandle_args; /** * struct icc_onecell_data - driver data for onecell interconnect providers * * @num_nodes: number of nodes in this device * @nodes: array of pointers to the nodes in this device */ struct icc_onecell_data { unsigned int num_nodes; struct icc_node *nodes[]; }; struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec, void *data); /** * struct icc_provider - interconnect provider (controller) entity that might * provide multiple interconnect controls * * @provider_list: list of the registered interconnect providers * @nodes: internal list of the interconnect provider nodes * @set: pointer to device specific set operation function * @aggregate: pointer to device specific aggregate operation function * @pre_aggregate: pointer to device specific function that is called * before the aggregation begins (optional) * @xlate: provider-specific callback for mapping nodes from phandle arguments * @dev: the device this interconnect provider belongs to * @users: count of active users * @data: pointer to private data */ struct icc_provider { struct list_head provider_list; struct list_head nodes; int (*set)(struct icc_node *src, struct icc_node *dst); int (*aggregate)(struct icc_node *node, u32 tag, u32 avg_bw, u32 peak_bw, u32 *agg_avg, u32 *agg_peak); void (*pre_aggregate)(struct icc_node *node); struct icc_node* (*xlate)(struct of_phandle_args *spec, void *data); struct device *dev; int users; void *data; }; /** * struct icc_node - entity that is part of the interconnect topology * * @id: platform specific node id * @name: node name used in debugfs * @links: a list of targets pointing to where we can go next when traversing * @num_links: number of links to other interconnect nodes * @provider: points to the interconnect provider of this node * @node_list: the list entry in the parent provider's "nodes" list * @search_list: list used when walking the nodes graph * @reverse: pointer to previous node when walking the nodes graph * @is_traversed: flag that is used when walking the nodes graph * @req_list: a list of QoS constraint requests associated with this node * @avg_bw: aggregated value of average bandwidth requests from all consumers * @peak_bw: aggregated value of peak bandwidth requests from all consumers * @data: pointer to private data */ struct icc_node { int id; const char *name; struct icc_node **links; size_t num_links; struct icc_provider *provider; struct list_head node_list; struct list_head search_list; struct icc_node *reverse; u8 is_traversed:1; struct hlist_head req_list; u32 avg_bw; u32 peak_bw; void *data; }; #if IS_ENABLED(CONFIG_INTERCONNECT) struct icc_node *icc_node_create(int id); void icc_node_destroy(int id); int icc_link_create(struct icc_node *node, const int dst_id); int icc_link_destroy(struct icc_node *src, struct icc_node *dst); void icc_node_add(struct icc_node *node, struct icc_provider *provider); void icc_node_del(struct icc_node *node); int icc_provider_add(struct icc_provider *provider); int icc_provider_del(struct icc_provider *provider); #else static inline struct icc_node *icc_node_create(int id) { return ERR_PTR(-ENOTSUPP); } void icc_node_destroy(int id) { } static inline int icc_link_create(struct icc_node *node, const int dst_id) { return -ENOTSUPP; } int icc_link_destroy(struct icc_node *src, struct icc_node *dst) { return -ENOTSUPP; } void icc_node_add(struct icc_node *node, struct icc_provider *provider) { } void icc_node_del(struct icc_node *node) { } static inline int icc_provider_add(struct icc_provider *provider) { return -ENOTSUPP; } static inline int icc_provider_del(struct icc_provider *provider) { return -ENOTSUPP; } #endif /* CONFIG_INTERCONNECT */ #endif /* __LINUX_INTERCONNECT_PROVIDER_H */ syscore_ops.h 0000644 00000001171 14722070374 0007273 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * syscore_ops.h - System core operations. * * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. */ #ifndef _LINUX_SYSCORE_OPS_H #define _LINUX_SYSCORE_OPS_H #include <linux/list.h> struct syscore_ops { struct list_head node; int (*suspend)(void); void (*resume)(void); void (*shutdown)(void); }; extern void register_syscore_ops(struct syscore_ops *ops); extern void unregister_syscore_ops(struct syscore_ops *ops); #ifdef CONFIG_PM_SLEEP extern int syscore_suspend(void); extern void syscore_resume(void); #endif extern void syscore_shutdown(void); #endif timer.h 0000644 00000017045 14722070374 0006052 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_TIMER_H #define _LINUX_TIMER_H #include <linux/list.h> #include <linux/ktime.h> #include <linux/stddef.h> #include <linux/debugobjects.h> #include <linux/stringify.h> struct timer_list { /* * All fields that change during normal runtime grouped to the * same cacheline */ struct hlist_node entry; unsigned long expires; void (*function)(struct timer_list *); u32 flags; #ifdef CONFIG_LOCKDEP struct lockdep_map lockdep_map; #endif }; #ifdef CONFIG_LOCKDEP /* * NB: because we have to copy the lockdep_map, setting the lockdep_map key * (second argument) here is required, otherwise it could be initialised to * the copy of the lockdep_map later! We use the pointer to and the string * "<file>:<line>" as the key resp. the name of the lockdep_map. */ #define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn) \ .lockdep_map = STATIC_LOCKDEP_MAP_INIT(_kn, &_kn), #else #define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn) #endif /** * @TIMER_DEFERRABLE: A deferrable timer will work normally when the * system is busy, but will not cause a CPU to come out of idle just * to service it; instead, the timer will be serviced when the CPU * eventually wakes up with a subsequent non-deferrable timer. * * @TIMER_IRQSAFE: An irqsafe timer is executed with IRQ disabled and * it's safe to wait for the completion of the running instance from * IRQ handlers, for example, by calling del_timer_sync(). * * Note: The irq disabled callback execution is a special case for * workqueue locking issues. It's not meant for executing random crap * with interrupts disabled. Abuse is monitored! * * @TIMER_PINNED: A pinned timer will not be affected by any timer * placement heuristics (like, NOHZ) and will always expire on the CPU * on which the timer was enqueued. * * Note: Because enqueuing of timers can migrate the timer from one * CPU to another, pinned timers are not guaranteed to stay on the * initialy selected CPU. They move to the CPU on which the enqueue * function is invoked via mod_timer() or add_timer(). If the timer * should be placed on a particular CPU, then add_timer_on() has to be * used. */ #define TIMER_CPUMASK 0x0003FFFF #define TIMER_MIGRATING 0x00040000 #define TIMER_BASEMASK (TIMER_CPUMASK | TIMER_MIGRATING) #define TIMER_DEFERRABLE 0x00080000 #define TIMER_PINNED 0x00100000 #define TIMER_IRQSAFE 0x00200000 #define TIMER_ARRAYSHIFT 22 #define TIMER_ARRAYMASK 0xFFC00000 #define TIMER_TRACE_FLAGMASK (TIMER_MIGRATING | TIMER_DEFERRABLE | TIMER_PINNED | TIMER_IRQSAFE) #define __TIMER_INITIALIZER(_function, _flags) { \ .entry = { .next = TIMER_ENTRY_STATIC }, \ .function = (_function), \ .flags = (_flags), \ __TIMER_LOCKDEP_MAP_INITIALIZER( \ __FILE__ ":" __stringify(__LINE__)) \ } #define DEFINE_TIMER(_name, _function) \ struct timer_list _name = \ __TIMER_INITIALIZER(_function, 0) /* * LOCKDEP and DEBUG timer interfaces. */ void init_timer_key(struct timer_list *timer, void (*func)(struct timer_list *), unsigned int flags, const char *name, struct lock_class_key *key); #ifdef CONFIG_DEBUG_OBJECTS_TIMERS extern void init_timer_on_stack_key(struct timer_list *timer, void (*func)(struct timer_list *), unsigned int flags, const char *name, struct lock_class_key *key); #else static inline void init_timer_on_stack_key(struct timer_list *timer, void (*func)(struct timer_list *), unsigned int flags, const char *name, struct lock_class_key *key) { init_timer_key(timer, func, flags, name, key); } #endif #ifdef CONFIG_LOCKDEP #define __init_timer(_timer, _fn, _flags) \ do { \ static struct lock_class_key __key; \ init_timer_key((_timer), (_fn), (_flags), #_timer, &__key);\ } while (0) #define __init_timer_on_stack(_timer, _fn, _flags) \ do { \ static struct lock_class_key __key; \ init_timer_on_stack_key((_timer), (_fn), (_flags), \ #_timer, &__key); \ } while (0) #else #define __init_timer(_timer, _fn, _flags) \ init_timer_key((_timer), (_fn), (_flags), NULL, NULL) #define __init_timer_on_stack(_timer, _fn, _flags) \ init_timer_on_stack_key((_timer), (_fn), (_flags), NULL, NULL) #endif /** * timer_setup - prepare a timer for first use * @timer: the timer in question * @callback: the function to call when timer expires * @flags: any TIMER_* flags * * Regular timer initialization should use either DEFINE_TIMER() above, * or timer_setup(). For timers on the stack, timer_setup_on_stack() must * be used and must be balanced with a call to destroy_timer_on_stack(). */ #define timer_setup(timer, callback, flags) \ __init_timer((timer), (callback), (flags)) #define timer_setup_on_stack(timer, callback, flags) \ __init_timer_on_stack((timer), (callback), (flags)) #ifdef CONFIG_DEBUG_OBJECTS_TIMERS extern void destroy_timer_on_stack(struct timer_list *timer); #else static inline void destroy_timer_on_stack(struct timer_list *timer) { } #endif #define from_timer(var, callback_timer, timer_fieldname) \ container_of(callback_timer, typeof(*var), timer_fieldname) /** * timer_pending - is a timer pending? * @timer: the timer in question * * timer_pending will tell whether a given timer is currently pending, * or not. Callers must ensure serialization wrt. other operations done * to this timer, eg. interrupt contexts, or other CPUs on SMP. * * return value: 1 if the timer is pending, 0 if not. */ static inline int timer_pending(const struct timer_list * timer) { return timer->entry.pprev != NULL; } extern void add_timer_on(struct timer_list *timer, int cpu); extern int del_timer(struct timer_list * timer); extern int mod_timer(struct timer_list *timer, unsigned long expires); extern int mod_timer_pending(struct timer_list *timer, unsigned long expires); extern int timer_reduce(struct timer_list *timer, unsigned long expires); /* * The jiffies value which is added to now, when there is no timer * in the timer wheel: */ #define NEXT_TIMER_MAX_DELTA ((1UL << 30) - 1) extern void add_timer(struct timer_list *timer); extern int try_to_del_timer_sync(struct timer_list *timer); extern int timer_delete_sync(struct timer_list *timer); /** * del_timer_sync - Delete a pending timer and wait for a running callback * @timer: The timer to be deleted * * See timer_delete_sync() for detailed explanation. * * Do not use in new code. Use timer_delete_sync() instead. */ static inline int del_timer_sync(struct timer_list *timer) { return timer_delete_sync(timer); } #define del_singleshot_timer_sync(t) del_timer_sync(t) extern void init_timers(void); extern void run_local_timers(void); struct hrtimer; extern enum hrtimer_restart it_real_fn(struct hrtimer *); #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) struct ctl_table; extern unsigned int sysctl_timer_migration; int timer_migration_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); #endif unsigned long __round_jiffies(unsigned long j, int cpu); unsigned long __round_jiffies_relative(unsigned long j, int cpu); unsigned long round_jiffies(unsigned long j); unsigned long round_jiffies_relative(unsigned long j); unsigned long __round_jiffies_up(unsigned long j, int cpu); unsigned long __round_jiffies_up_relative(unsigned long j, int cpu); unsigned long round_jiffies_up(unsigned long j); unsigned long round_jiffies_up_relative(unsigned long j); #ifdef CONFIG_HOTPLUG_CPU int timers_prepare_cpu(unsigned int cpu); int timers_dead_cpu(unsigned int cpu); #else #define timers_prepare_cpu NULL #define timers_dead_cpu NULL #endif #endif libgcc.h 0000644 00000000717 14722070374 0006153 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/lib/libgcc.h */ #ifndef __LIB_LIBGCC_H #define __LIB_LIBGCC_H #include <asm/byteorder.h> typedef int word_type __attribute__ ((mode (__word__))); #ifdef __BIG_ENDIAN struct DWstruct { int high, low; }; #elif defined(__LITTLE_ENDIAN) struct DWstruct { int low, high; }; #else #error I feel sick. #endif typedef union { struct DWstruct s; long long ll; } DWunion; #endif /* __ASM_LIBGCC_H */ fec.h 0000644 00000000717 14722070374 0005465 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* include/linux/fec.h * * Copyright (c) 2009 Orex Computed Radiography * Baruch Siach <baruch@tkos.co.il> * * Copyright (C) 2010 Freescale Semiconductor, Inc. * * Header file for the FEC platform data */ #ifndef __LINUX_FEC_H__ #define __LINUX_FEC_H__ #include <linux/phy.h> struct fec_platform_data { phy_interface_t phy; unsigned char mac[ETH_ALEN]; void (*sleep_mode_enable)(int enabled); }; #endif vermagic.h 0000644 00000002207 14722070374 0006521 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #include <generated/utsrelease.h> /* Simply sanity version stamp for modules. */ #ifdef CONFIG_SMP #define MODULE_VERMAGIC_SMP "SMP " #else #define MODULE_VERMAGIC_SMP "" #endif #ifdef CONFIG_PREEMPT #define MODULE_VERMAGIC_PREEMPT "preempt " #elif defined(CONFIG_PREEMPT_RT) #define MODULE_VERMAGIC_PREEMPT "preempt_rt " #else #define MODULE_VERMAGIC_PREEMPT "" #endif #ifdef CONFIG_MODULE_UNLOAD #define MODULE_VERMAGIC_MODULE_UNLOAD "mod_unload " #else #define MODULE_VERMAGIC_MODULE_UNLOAD "" #endif #ifdef CONFIG_MODVERSIONS #define MODULE_VERMAGIC_MODVERSIONS "modversions " #else #define MODULE_VERMAGIC_MODVERSIONS "" #endif #ifndef MODULE_ARCH_VERMAGIC #define MODULE_ARCH_VERMAGIC "" #endif #ifdef RANDSTRUCT_PLUGIN #include <generated/randomize_layout_hash.h> #define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED #else #define MODULE_RANDSTRUCT_PLUGIN #endif #define VERMAGIC_STRING \ UTS_RELEASE " " \ MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \ MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \ MODULE_ARCH_VERMAGIC \ MODULE_RANDSTRUCT_PLUGIN inet_diag.h 0000644 00000004663 14722070374 0006657 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _INET_DIAG_H_ #define _INET_DIAG_H_ 1 #include <net/netlink.h> #include <uapi/linux/inet_diag.h> struct inet_hashinfo; struct inet_diag_handler { void (*dump)(struct sk_buff *skb, struct netlink_callback *cb, const struct inet_diag_req_v2 *r, struct nlattr *bc); int (*dump_one)(struct sk_buff *in_skb, const struct nlmsghdr *nlh, const struct inet_diag_req_v2 *req); void (*idiag_get_info)(struct sock *sk, struct inet_diag_msg *r, void *info); int (*idiag_get_aux)(struct sock *sk, bool net_admin, struct sk_buff *skb); size_t (*idiag_get_aux_size)(struct sock *sk, bool net_admin); int (*destroy)(struct sk_buff *in_skb, const struct inet_diag_req_v2 *req); __u16 idiag_type; __u16 idiag_info_size; }; struct inet_connection_sock; int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, struct sk_buff *skb, const struct inet_diag_req_v2 *req, struct user_namespace *user_ns, u32 pid, u32 seq, u16 nlmsg_flags, const struct nlmsghdr *unlh, bool net_admin); void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb, struct netlink_callback *cb, const struct inet_diag_req_v2 *r, struct nlattr *bc); int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_skb, const struct nlmsghdr *nlh, const struct inet_diag_req_v2 *req); struct sock *inet_diag_find_one_icsk(struct net *net, struct inet_hashinfo *hashinfo, const struct inet_diag_req_v2 *req); int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk); void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk); static inline size_t inet_diag_msg_attrs_size(void) { return nla_total_size(1) /* INET_DIAG_SHUTDOWN */ + nla_total_size(1) /* INET_DIAG_TOS */ #if IS_ENABLED(CONFIG_IPV6) + nla_total_size(1) /* INET_DIAG_TCLASS */ + nla_total_size(1) /* INET_DIAG_SKV6ONLY */ #endif + nla_total_size(4) /* INET_DIAG_MARK */ + nla_total_size(4); /* INET_DIAG_CLASS_ID */ } int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb, struct inet_diag_msg *r, int ext, struct user_namespace *user_ns, bool net_admin); extern int inet_diag_register(const struct inet_diag_handler *handler); extern void inet_diag_unregister(const struct inet_diag_handler *handler); #endif /* _INET_DIAG_H_ */ c2port.h 0000644 00000002550 14722070374 0006136 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Silicon Labs C2 port Linux support * * Copyright (c) 2007 Rodolfo Giometti <giometti@linux.it> * Copyright (c) 2007 Eurotech S.p.A. <info@eurotech.it> */ #define C2PORT_NAME_LEN 32 struct device; /* * C2 port basic structs */ /* Main struct */ struct c2port_ops; struct c2port_device { unsigned int access:1; unsigned int flash_access:1; int id; char name[C2PORT_NAME_LEN]; struct c2port_ops *ops; struct mutex mutex; /* prevent races during read/write */ struct device *dev; void *private_data; }; /* Basic operations */ struct c2port_ops { /* Flash layout */ unsigned short block_size; /* flash block size in bytes */ unsigned short blocks_num; /* flash blocks number */ /* Enable or disable the access to C2 port */ void (*access)(struct c2port_device *dev, int status); /* Set C2D data line as input/output */ void (*c2d_dir)(struct c2port_device *dev, int dir); /* Read/write C2D data line */ int (*c2d_get)(struct c2port_device *dev); void (*c2d_set)(struct c2port_device *dev, int status); /* Write C2CK clock line */ void (*c2ck_set)(struct c2port_device *dev, int status); }; /* * Exported functions */ extern struct c2port_device *c2port_device_register(char *name, struct c2port_ops *ops, void *devdata); extern void c2port_device_unregister(struct c2port_device *dev); hippidevice.h 0000644 00000002034 14722070374 0007213 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Definitions for the HIPPI handlers. * * Version: @(#)hippidevice.h 1.0.0 05/26/97 * * Author: Jes Sorensen, <Jes.Sorensen@cern.ch> * * hippidevice.h is based on previous fddidevice.h work by * Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Alan Cox, <gw4pts@gw4pts.ampr.org> * Lawrence V. Stefani, <stefani@lkg.dec.com> */ #ifndef _LINUX_HIPPIDEVICE_H #define _LINUX_HIPPIDEVICE_H #include <linux/if_hippi.h> #ifdef __KERNEL__ struct hippi_cb { __u32 ifield; }; __be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev); int hippi_mac_addr(struct net_device *dev, void *p); int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p); struct net_device *alloc_hippi_dev(int sizeof_priv); #endif #endif /* _LINUX_HIPPIDEVICE_H */ pid_namespace.h 0000644 00000004522 14722070374 0007516 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PID_NS_H #define _LINUX_PID_NS_H #include <linux/sched.h> #include <linux/bug.h> #include <linux/mm.h> #include <linux/workqueue.h> #include <linux/threads.h> #include <linux/nsproxy.h> #include <linux/kref.h> #include <linux/ns_common.h> #include <linux/idr.h> struct fs_pin; enum { /* definitions for pid_namespace's hide_pid field */ HIDEPID_OFF = 0, HIDEPID_NO_ACCESS = 1, HIDEPID_INVISIBLE = 2, }; struct pid_namespace { struct kref kref; struct idr idr; struct rcu_head rcu; unsigned int pid_allocated; struct task_struct *child_reaper; struct kmem_cache *pid_cachep; unsigned int level; struct pid_namespace *parent; #ifdef CONFIG_PROC_FS struct vfsmount *proc_mnt; struct dentry *proc_self; struct dentry *proc_thread_self; #endif #ifdef CONFIG_BSD_PROCESS_ACCT struct fs_pin *bacct; #endif struct user_namespace *user_ns; struct ucounts *ucounts; struct work_struct proc_work; kgid_t pid_gid; int hide_pid; int reboot; /* group exit code if this pidns was rebooted */ struct ns_common ns; } __randomize_layout; extern struct pid_namespace init_pid_ns; #define PIDNS_ADDING (1U << 31) #ifdef CONFIG_PID_NS static inline struct pid_namespace *get_pid_ns(struct pid_namespace *ns) { if (ns != &init_pid_ns) kref_get(&ns->kref); return ns; } extern struct pid_namespace *copy_pid_ns(unsigned long flags, struct user_namespace *user_ns, struct pid_namespace *ns); extern void zap_pid_ns_processes(struct pid_namespace *pid_ns); extern int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd); extern void put_pid_ns(struct pid_namespace *ns); #else /* !CONFIG_PID_NS */ #include <linux/err.h> static inline struct pid_namespace *get_pid_ns(struct pid_namespace *ns) { return ns; } static inline struct pid_namespace *copy_pid_ns(unsigned long flags, struct user_namespace *user_ns, struct pid_namespace *ns) { if (flags & CLONE_NEWPID) ns = ERR_PTR(-EINVAL); return ns; } static inline void put_pid_ns(struct pid_namespace *ns) { } static inline void zap_pid_ns_processes(struct pid_namespace *ns) { BUG(); } static inline int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd) { return 0; } #endif /* CONFIG_PID_NS */ extern struct pid_namespace *task_active_pid_ns(struct task_struct *tsk); void pidhash_init(void); void pid_idr_init(void); #endif /* _LINUX_PID_NS_H */ seg6_hmac.h 0000644 00000000210 14722070374 0006550 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SEG6_HMAC_H #define _LINUX_SEG6_HMAC_H #include <uapi/linux/seg6_hmac.h> #endif mm-arch-hooks.h 0000644 00000001025 14722070374 0007366 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Generic mm no-op hooks. * * Copyright (C) 2015, IBM Corporation * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com> */ #ifndef _LINUX_MM_ARCH_HOOKS_H #define _LINUX_MM_ARCH_HOOKS_H #include <asm/mm-arch-hooks.h> #ifndef arch_remap static inline void arch_remap(struct mm_struct *mm, unsigned long old_start, unsigned long old_end, unsigned long new_start, unsigned long new_end) { } #define arch_remap arch_remap #endif #endif /* _LINUX_MM_ARCH_HOOKS_H */ serial_core.h 0000644 00000046754 14722070374 0007232 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * linux/drivers/char/serial_core.h * * Copyright (C) 2000 Deep Blue Solutions Ltd. */ #ifndef LINUX_SERIAL_CORE_H #define LINUX_SERIAL_CORE_H #include <linux/bitops.h> #include <linux/compiler.h> #include <linux/console.h> #include <linux/interrupt.h> #include <linux/circ_buf.h> #include <linux/spinlock.h> #include <linux/sched.h> #include <linux/tty.h> #include <linux/mutex.h> #include <linux/sysrq.h> #include <uapi/linux/serial_core.h> #ifdef CONFIG_SERIAL_CORE_CONSOLE #define uart_console(port) \ ((port)->cons && (port)->cons->index == (port)->line) #else #define uart_console(port) ({ (void)port; 0; }) #endif struct uart_port; struct serial_struct; struct device; /* * This structure describes all the operations that can be done on the * physical hardware. See Documentation/driver-api/serial/driver.rst for details. */ struct uart_ops { unsigned int (*tx_empty)(struct uart_port *); void (*set_mctrl)(struct uart_port *, unsigned int mctrl); unsigned int (*get_mctrl)(struct uart_port *); void (*stop_tx)(struct uart_port *); void (*start_tx)(struct uart_port *); void (*throttle)(struct uart_port *); void (*unthrottle)(struct uart_port *); void (*send_xchar)(struct uart_port *, char ch); void (*stop_rx)(struct uart_port *); void (*enable_ms)(struct uart_port *); void (*break_ctl)(struct uart_port *, int ctl); int (*startup)(struct uart_port *); void (*shutdown)(struct uart_port *); void (*flush_buffer)(struct uart_port *); void (*set_termios)(struct uart_port *, struct ktermios *new, struct ktermios *old); void (*set_ldisc)(struct uart_port *, struct ktermios *); void (*pm)(struct uart_port *, unsigned int state, unsigned int oldstate); /* * Return a string describing the type of the port */ const char *(*type)(struct uart_port *); /* * Release IO and memory resources used by the port. * This includes iounmap if necessary. */ void (*release_port)(struct uart_port *); /* * Request IO and memory resources used by the port. * This includes iomapping the port if necessary. */ int (*request_port)(struct uart_port *); void (*config_port)(struct uart_port *, int); int (*verify_port)(struct uart_port *, struct serial_struct *); int (*ioctl)(struct uart_port *, unsigned int, unsigned long); #ifdef CONFIG_CONSOLE_POLL int (*poll_init)(struct uart_port *); void (*poll_put_char)(struct uart_port *, unsigned char); int (*poll_get_char)(struct uart_port *); #endif }; #define NO_POLL_CHAR 0x00ff0000 #define UART_CONFIG_TYPE (1 << 0) #define UART_CONFIG_IRQ (1 << 1) struct uart_icount { __u32 cts; __u32 dsr; __u32 rng; __u32 dcd; __u32 rx; __u32 tx; __u32 frame; __u32 overrun; __u32 parity; __u32 brk; __u32 buf_overrun; }; typedef unsigned int __bitwise upf_t; typedef unsigned int __bitwise upstat_t; struct uart_port { spinlock_t lock; /* port lock */ unsigned long iobase; /* in/out[bwl] */ unsigned char __iomem *membase; /* read/write[bwl] */ unsigned int (*serial_in)(struct uart_port *, int); void (*serial_out)(struct uart_port *, int, int); void (*set_termios)(struct uart_port *, struct ktermios *new, struct ktermios *old); void (*set_ldisc)(struct uart_port *, struct ktermios *); unsigned int (*get_mctrl)(struct uart_port *); void (*set_mctrl)(struct uart_port *, unsigned int); unsigned int (*get_divisor)(struct uart_port *, unsigned int baud, unsigned int *frac); void (*set_divisor)(struct uart_port *, unsigned int baud, unsigned int quot, unsigned int quot_frac); int (*startup)(struct uart_port *port); void (*shutdown)(struct uart_port *port); void (*throttle)(struct uart_port *port); void (*unthrottle)(struct uart_port *port); int (*handle_irq)(struct uart_port *); void (*pm)(struct uart_port *, unsigned int state, unsigned int old); void (*handle_break)(struct uart_port *); int (*rs485_config)(struct uart_port *, struct serial_rs485 *rs485); int (*iso7816_config)(struct uart_port *, struct serial_iso7816 *iso7816); unsigned int irq; /* irq number */ unsigned long irqflags; /* irq flags */ unsigned int uartclk; /* base uart clock */ unsigned int fifosize; /* tx fifo size */ unsigned char x_char; /* xon/xoff char */ unsigned char regshift; /* reg offset shift */ unsigned char iotype; /* io access style */ unsigned char quirks; /* internal quirks */ #define UPIO_PORT (SERIAL_IO_PORT) /* 8b I/O port access */ #define UPIO_HUB6 (SERIAL_IO_HUB6) /* Hub6 ISA card */ #define UPIO_MEM (SERIAL_IO_MEM) /* driver-specific */ #define UPIO_MEM32 (SERIAL_IO_MEM32) /* 32b little endian */ #define UPIO_AU (SERIAL_IO_AU) /* Au1x00 and RT288x type IO */ #define UPIO_TSI (SERIAL_IO_TSI) /* Tsi108/109 type IO */ #define UPIO_MEM32BE (SERIAL_IO_MEM32BE) /* 32b big endian */ #define UPIO_MEM16 (SERIAL_IO_MEM16) /* 16b little endian */ /* quirks must be updated while holding port mutex */ #define UPQ_NO_TXEN_TEST BIT(0) unsigned int read_status_mask; /* driver specific */ unsigned int ignore_status_mask; /* driver specific */ struct uart_state *state; /* pointer to parent state */ struct uart_icount icount; /* statistics */ struct console *cons; /* struct console, if any */ #if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(SUPPORT_SYSRQ) unsigned long sysrq; /* sysrq timeout */ unsigned int sysrq_ch; /* char for sysrq */ #endif /* flags must be updated while holding port mutex */ upf_t flags; /* * These flags must be equivalent to the flags defined in * include/uapi/linux/tty_flags.h which are the userspace definitions * assigned from the serial_struct flags in uart_set_info() * [for bit definitions in the UPF_CHANGE_MASK] * * Bits [0..UPF_LAST_USER] are userspace defined/visible/changeable * The remaining bits are serial-core specific and not modifiable by * userspace. */ #define UPF_FOURPORT ((__force upf_t) ASYNC_FOURPORT /* 1 */ ) #define UPF_SAK ((__force upf_t) ASYNC_SAK /* 2 */ ) #define UPF_SPD_HI ((__force upf_t) ASYNC_SPD_HI /* 4 */ ) #define UPF_SPD_VHI ((__force upf_t) ASYNC_SPD_VHI /* 5 */ ) #define UPF_SPD_CUST ((__force upf_t) ASYNC_SPD_CUST /* 0x0030 */ ) #define UPF_SPD_WARP ((__force upf_t) ASYNC_SPD_WARP /* 0x1010 */ ) #define UPF_SPD_MASK ((__force upf_t) ASYNC_SPD_MASK /* 0x1030 */ ) #define UPF_SKIP_TEST ((__force upf_t) ASYNC_SKIP_TEST /* 6 */ ) #define UPF_AUTO_IRQ ((__force upf_t) ASYNC_AUTO_IRQ /* 7 */ ) #define UPF_HARDPPS_CD ((__force upf_t) ASYNC_HARDPPS_CD /* 11 */ ) #define UPF_SPD_SHI ((__force upf_t) ASYNC_SPD_SHI /* 12 */ ) #define UPF_LOW_LATENCY ((__force upf_t) ASYNC_LOW_LATENCY /* 13 */ ) #define UPF_BUGGY_UART ((__force upf_t) ASYNC_BUGGY_UART /* 14 */ ) #define UPF_MAGIC_MULTIPLIER ((__force upf_t) ASYNC_MAGIC_MULTIPLIER /* 16 */ ) #define UPF_NO_THRE_TEST ((__force upf_t) (1 << 19)) /* Port has hardware-assisted h/w flow control */ #define UPF_AUTO_CTS ((__force upf_t) (1 << 20)) #define UPF_AUTO_RTS ((__force upf_t) (1 << 21)) #define UPF_HARD_FLOW ((__force upf_t) (UPF_AUTO_CTS | UPF_AUTO_RTS)) /* Port has hardware-assisted s/w flow control */ #define UPF_SOFT_FLOW ((__force upf_t) (1 << 22)) #define UPF_CONS_FLOW ((__force upf_t) (1 << 23)) #define UPF_SHARE_IRQ ((__force upf_t) (1 << 24)) #define UPF_EXAR_EFR ((__force upf_t) (1 << 25)) #define UPF_BUG_THRE ((__force upf_t) (1 << 26)) /* The exact UART type is known and should not be probed. */ #define UPF_FIXED_TYPE ((__force upf_t) (1 << 27)) #define UPF_BOOT_AUTOCONF ((__force upf_t) (1 << 28)) #define UPF_FIXED_PORT ((__force upf_t) (1 << 29)) #define UPF_DEAD ((__force upf_t) (1 << 30)) #define UPF_IOREMAP ((__force upf_t) (1 << 31)) #define __UPF_CHANGE_MASK 0x17fff #define UPF_CHANGE_MASK ((__force upf_t) __UPF_CHANGE_MASK) #define UPF_USR_MASK ((__force upf_t) (UPF_SPD_MASK|UPF_LOW_LATENCY)) #if __UPF_CHANGE_MASK > ASYNC_FLAGS #error Change mask not equivalent to userspace-visible bit defines #endif /* * Must hold termios_rwsem, port mutex and port lock to change; * can hold any one lock to read. */ upstat_t status; #define UPSTAT_CTS_ENABLE ((__force upstat_t) (1 << 0)) #define UPSTAT_DCD_ENABLE ((__force upstat_t) (1 << 1)) #define UPSTAT_AUTORTS ((__force upstat_t) (1 << 2)) #define UPSTAT_AUTOCTS ((__force upstat_t) (1 << 3)) #define UPSTAT_AUTOXOFF ((__force upstat_t) (1 << 4)) #define UPSTAT_SYNC_FIFO ((__force upstat_t) (1 << 5)) int hw_stopped; /* sw-assisted CTS flow state */ unsigned int mctrl; /* current modem ctrl settings */ unsigned int timeout; /* character-based timeout */ unsigned int type; /* port type */ const struct uart_ops *ops; unsigned int custom_divisor; unsigned int line; /* port index */ unsigned int minor; resource_size_t mapbase; /* for ioremap */ resource_size_t mapsize; struct device *dev; /* parent device */ unsigned char hub6; /* this should be in the 8250 driver */ unsigned char suspended; unsigned char unused[2]; const char *name; /* port name */ struct attribute_group *attr_group; /* port specific attributes */ const struct attribute_group **tty_groups; /* all attributes (serial core use only) */ struct serial_rs485 rs485; struct serial_iso7816 iso7816; void *private_data; /* generic platform data pointer */ }; /** * uart_port_lock - Lock the UART port * @up: Pointer to UART port structure */ static inline void uart_port_lock(struct uart_port *up) { spin_lock(&up->lock); } /** * uart_port_lock_irq - Lock the UART port and disable interrupts * @up: Pointer to UART port structure */ static inline void uart_port_lock_irq(struct uart_port *up) { spin_lock_irq(&up->lock); } /** * uart_port_lock_irqsave - Lock the UART port, save and disable interrupts * @up: Pointer to UART port structure * @flags: Pointer to interrupt flags storage */ static inline void uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags) { spin_lock_irqsave(&up->lock, *flags); } /** * uart_port_trylock - Try to lock the UART port * @up: Pointer to UART port structure * * Returns: True if lock was acquired, false otherwise */ static inline bool uart_port_trylock(struct uart_port *up) { return spin_trylock(&up->lock); } /** * uart_port_trylock_irqsave - Try to lock the UART port, save and disable interrupts * @up: Pointer to UART port structure * @flags: Pointer to interrupt flags storage * * Returns: True if lock was acquired, false otherwise */ static inline bool uart_port_trylock_irqsave(struct uart_port *up, unsigned long *flags) { return spin_trylock_irqsave(&up->lock, *flags); } /** * uart_port_unlock - Unlock the UART port * @up: Pointer to UART port structure */ static inline void uart_port_unlock(struct uart_port *up) { spin_unlock(&up->lock); } /** * uart_port_unlock_irq - Unlock the UART port and re-enable interrupts * @up: Pointer to UART port structure */ static inline void uart_port_unlock_irq(struct uart_port *up) { spin_unlock_irq(&up->lock); } /** * uart_port_unlock_irqrestore - Unlock the UART port, restore interrupts * @up: Pointer to UART port structure * @flags: The saved interrupt flags for restore */ static inline void uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags) { spin_unlock_irqrestore(&up->lock, flags); } static inline int serial_port_in(struct uart_port *up, int offset) { return up->serial_in(up, offset); } static inline void serial_port_out(struct uart_port *up, int offset, int value) { up->serial_out(up, offset, value); } /** * enum uart_pm_state - power states for UARTs * @UART_PM_STATE_ON: UART is powered, up and operational * @UART_PM_STATE_OFF: UART is powered off * @UART_PM_STATE_UNDEFINED: sentinel */ enum uart_pm_state { UART_PM_STATE_ON = 0, UART_PM_STATE_OFF = 3, /* number taken from ACPI */ UART_PM_STATE_UNDEFINED, }; /* * This is the state information which is persistent across opens. */ struct uart_state { struct tty_port port; enum uart_pm_state pm_state; struct circ_buf xmit; atomic_t refcount; wait_queue_head_t remove_wait; struct uart_port *uart_port; }; #define UART_XMIT_SIZE PAGE_SIZE /* number of characters left in xmit buffer before we ask for more */ #define WAKEUP_CHARS 256 /** * uart_xmit_advance - Advance xmit buffer and account Tx'ed chars * @up: uart_port structure describing the port * @chars: number of characters sent * * This function advances the tail of circular xmit buffer by the number of * @chars transmitted and handles accounting of transmitted bytes (into * @up's icount.tx). */ static inline void uart_xmit_advance(struct uart_port *up, unsigned int chars) { struct circ_buf *xmit = &up->state->xmit; xmit->tail = (xmit->tail + chars) & (UART_XMIT_SIZE - 1); up->icount.tx += chars; } struct module; struct tty_driver; struct uart_driver { struct module *owner; const char *driver_name; const char *dev_name; int major; int minor; int nr; struct console *cons; /* * these are private; the low level driver should not * touch these; they should be initialised to NULL */ struct uart_state *state; struct tty_driver *tty_driver; }; void uart_write_wakeup(struct uart_port *port); /* * Baud rate helpers. */ void uart_update_timeout(struct uart_port *port, unsigned int cflag, unsigned int baud); unsigned int uart_get_baud_rate(struct uart_port *port, struct ktermios *termios, struct ktermios *old, unsigned int min, unsigned int max); unsigned int uart_get_divisor(struct uart_port *port, unsigned int baud); /* Base timer interval for polling */ static inline int uart_poll_timeout(struct uart_port *port) { int timeout = port->timeout; return timeout > 6 ? (timeout / 2 - 2) : 1; } /* * Console helpers. */ struct earlycon_device { struct console *con; struct uart_port port; char options[16]; /* e.g., 115200n8 */ unsigned int baud; }; struct earlycon_id { char name[15]; char name_term; /* In case compiler didn't '\0' term name */ char compatible[128]; int (*setup)(struct earlycon_device *, const char *options); }; extern const struct earlycon_id *__earlycon_table[]; extern const struct earlycon_id *__earlycon_table_end[]; #if defined(CONFIG_SERIAL_EARLYCON) && !defined(MODULE) #define EARLYCON_USED_OR_UNUSED __used #else #define EARLYCON_USED_OR_UNUSED __maybe_unused #endif #define _OF_EARLYCON_DECLARE(_name, compat, fn, unique_id) \ static const struct earlycon_id unique_id \ EARLYCON_USED_OR_UNUSED __initconst \ = { .name = __stringify(_name), \ .compatible = compat, \ .setup = fn }; \ static const struct earlycon_id EARLYCON_USED_OR_UNUSED \ __section(__earlycon_table) \ * const __PASTE(__p, unique_id) = &unique_id #define OF_EARLYCON_DECLARE(_name, compat, fn) \ _OF_EARLYCON_DECLARE(_name, compat, fn, \ __UNIQUE_ID(__earlycon_##_name)) #define EARLYCON_DECLARE(_name, fn) OF_EARLYCON_DECLARE(_name, "", fn) extern int of_setup_earlycon(const struct earlycon_id *match, unsigned long node, const char *options); #ifdef CONFIG_SERIAL_EARLYCON extern bool earlycon_acpi_spcr_enable __initdata; int setup_earlycon(char *buf); #else static const bool earlycon_acpi_spcr_enable EARLYCON_USED_OR_UNUSED; static inline int setup_earlycon(char *buf) { return 0; } #endif struct uart_port *uart_get_console(struct uart_port *ports, int nr, struct console *c); int uart_parse_earlycon(char *p, unsigned char *iotype, resource_size_t *addr, char **options); void uart_parse_options(const char *options, int *baud, int *parity, int *bits, int *flow); int uart_set_options(struct uart_port *port, struct console *co, int baud, int parity, int bits, int flow); struct tty_driver *uart_console_device(struct console *co, int *index); void uart_console_write(struct uart_port *port, const char *s, unsigned int count, void (*putchar)(struct uart_port *, int)); /* * Port/driver registration/removal */ int uart_register_driver(struct uart_driver *uart); void uart_unregister_driver(struct uart_driver *uart); int uart_add_one_port(struct uart_driver *reg, struct uart_port *port); int uart_remove_one_port(struct uart_driver *reg, struct uart_port *port); int uart_match_port(struct uart_port *port1, struct uart_port *port2); /* * Power Management */ int uart_suspend_port(struct uart_driver *reg, struct uart_port *port); int uart_resume_port(struct uart_driver *reg, struct uart_port *port); #define uart_circ_empty(circ) ((circ)->head == (circ)->tail) #define uart_circ_clear(circ) ((circ)->head = (circ)->tail = 0) #define uart_circ_chars_pending(circ) \ (CIRC_CNT((circ)->head, (circ)->tail, UART_XMIT_SIZE)) #define uart_circ_chars_free(circ) \ (CIRC_SPACE((circ)->head, (circ)->tail, UART_XMIT_SIZE)) static inline int uart_tx_stopped(struct uart_port *port) { struct tty_struct *tty = port->state->port.tty; if ((tty && tty->stopped) || port->hw_stopped) return 1; return 0; } static inline bool uart_cts_enabled(struct uart_port *uport) { return !!(uport->status & UPSTAT_CTS_ENABLE); } static inline bool uart_softcts_mode(struct uart_port *uport) { upstat_t mask = UPSTAT_CTS_ENABLE | UPSTAT_AUTOCTS; return ((uport->status & mask) == UPSTAT_CTS_ENABLE); } /* * The following are helper functions for the low level drivers. */ extern void uart_handle_dcd_change(struct uart_port *uport, unsigned int status); extern void uart_handle_cts_change(struct uart_port *uport, unsigned int status); extern void uart_insert_char(struct uart_port *port, unsigned int status, unsigned int overrun, unsigned int ch, unsigned int flag); #if defined(SUPPORT_SYSRQ) && defined(CONFIG_MAGIC_SYSRQ_SERIAL) static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch) { if (port->sysrq) { if (ch && time_before(jiffies, port->sysrq)) { handle_sysrq(ch); port->sysrq = 0; return 1; } port->sysrq = 0; } return 0; } static inline int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch) { if (port->sysrq) { if (ch && time_before(jiffies, port->sysrq)) { port->sysrq_ch = ch; port->sysrq = 0; return 1; } port->sysrq = 0; } return 0; } static inline void uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long irqflags) { int sysrq_ch; sysrq_ch = port->sysrq_ch; port->sysrq_ch = 0; spin_unlock_irqrestore(&port->lock, irqflags); if (sysrq_ch) handle_sysrq(sysrq_ch); } #else static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch) { return 0; } static inline int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch) { return 0; } static inline void uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long irqflags) { spin_unlock_irqrestore(&port->lock, irqflags); } #endif /* * We do the SysRQ and SAK checking like this... */ static inline int uart_handle_break(struct uart_port *port) { struct uart_state *state = port->state; if (port->handle_break) port->handle_break(port); #ifdef SUPPORT_SYSRQ if (port->cons && port->cons->index == port->line) { if (!port->sysrq) { port->sysrq = jiffies + HZ*5; return 1; } port->sysrq = 0; } #endif if (port->flags & UPF_SAK) do_SAK(state->port.tty); return 0; } /* * UART_ENABLE_MS - determine if port should enable modem status irqs */ #define UART_ENABLE_MS(port,cflag) ((port)->flags & UPF_HARDPPS_CD || \ (cflag) & CRTSCTS || \ !((cflag) & CLOCAL)) void uart_get_rs485_mode(struct device *dev, struct serial_rs485 *rs485conf); #endif /* LINUX_SERIAL_CORE_H */ bch.h 0000644 00000004005 14722070374 0005456 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Generic binary BCH encoding/decoding library * * Copyright © 2011 Parrot S.A. * * Author: Ivan Djelic <ivan.djelic@parrot.com> * * Description: * * This library provides runtime configurable encoding/decoding of binary * Bose-Chaudhuri-Hocquenghem (BCH) codes. */ #ifndef _BCH_H #define _BCH_H #include <linux/types.h> /** * struct bch_control - BCH control structure * @m: Galois field order * @n: maximum codeword size in bits (= 2^m-1) * @t: error correction capability in bits * @ecc_bits: ecc exact size in bits, i.e. generator polynomial degree (<=m*t) * @ecc_bytes: ecc max size (m*t bits) in bytes * @a_pow_tab: Galois field GF(2^m) exponentiation lookup table * @a_log_tab: Galois field GF(2^m) log lookup table * @mod8_tab: remainder generator polynomial lookup tables * @ecc_buf: ecc parity words buffer * @ecc_buf2: ecc parity words buffer * @xi_tab: GF(2^m) base for solving degree 2 polynomial roots * @syn: syndrome buffer * @cache: log-based polynomial representation buffer * @elp: error locator polynomial * @poly_2t: temporary polynomials of degree 2t */ struct bch_control { unsigned int m; unsigned int n; unsigned int t; unsigned int ecc_bits; unsigned int ecc_bytes; /* private: */ uint16_t *a_pow_tab; uint16_t *a_log_tab; uint32_t *mod8_tab; uint32_t *ecc_buf; uint32_t *ecc_buf2; unsigned int *xi_tab; unsigned int *syn; int *cache; struct gf_poly *elp; struct gf_poly *poly_2t[4]; }; struct bch_control *init_bch(int m, int t, unsigned int prim_poly); void free_bch(struct bch_control *bch); void encode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len, uint8_t *ecc); int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len, const uint8_t *recv_ecc, const uint8_t *calc_ecc, const unsigned int *syn, unsigned int *errloc); #endif /* _BCH_H */ netfilter_defs.h 0000644 00000000362 14722070374 0007721 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_NETFILTER_CORE_H_ #define __LINUX_NETFILTER_CORE_H_ #include <uapi/linux/netfilter.h> /* in/out/forward only */ #define NF_ARP_NUMHOOKS 3 #define NF_MAX_HOOKS NF_INET_NUMHOOKS #endif asn1_ber_bytecode.h 0000644 00000005020 14722070374 0010270 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* ASN.1 BER/DER/CER parsing state machine internal definitions * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #ifndef _LINUX_ASN1_BER_BYTECODE_H #define _LINUX_ASN1_BER_BYTECODE_H #ifdef __KERNEL__ #include <linux/types.h> #endif #include <linux/asn1.h> typedef int (*asn1_action_t)(void *context, size_t hdrlen, /* In case of ANY type */ unsigned char tag, /* In case of ANY type */ const void *value, size_t vlen); struct asn1_decoder { const unsigned char *machine; size_t machlen; const asn1_action_t *actions; }; enum asn1_opcode { /* The tag-matching ops come first and the odd-numbered slots * are for OR_SKIP ops. */ #define ASN1_OP_MATCH__SKIP 0x01 #define ASN1_OP_MATCH__ACT 0x02 #define ASN1_OP_MATCH__JUMP 0x04 #define ASN1_OP_MATCH__ANY 0x08 #define ASN1_OP_MATCH__COND 0x10 ASN1_OP_MATCH = 0x00, ASN1_OP_MATCH_OR_SKIP = 0x01, ASN1_OP_MATCH_ACT = 0x02, ASN1_OP_MATCH_ACT_OR_SKIP = 0x03, ASN1_OP_MATCH_JUMP = 0x04, ASN1_OP_MATCH_JUMP_OR_SKIP = 0x05, ASN1_OP_MATCH_ANY = 0x08, ASN1_OP_MATCH_ANY_OR_SKIP = 0x09, ASN1_OP_MATCH_ANY_ACT = 0x0a, ASN1_OP_MATCH_ANY_ACT_OR_SKIP = 0x0b, /* Everything before here matches unconditionally */ ASN1_OP_COND_MATCH_OR_SKIP = 0x11, ASN1_OP_COND_MATCH_ACT_OR_SKIP = 0x13, ASN1_OP_COND_MATCH_JUMP_OR_SKIP = 0x15, ASN1_OP_COND_MATCH_ANY = 0x18, ASN1_OP_COND_MATCH_ANY_OR_SKIP = 0x19, ASN1_OP_COND_MATCH_ANY_ACT = 0x1a, ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP = 0x1b, /* Everything before here will want a tag from the data */ #define ASN1_OP__MATCHES_TAG ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP /* These are here to help fill up space */ ASN1_OP_COND_FAIL = 0x1c, ASN1_OP_COMPLETE = 0x1d, ASN1_OP_ACT = 0x1e, ASN1_OP_MAYBE_ACT = 0x1f, /* The following eight have bit 0 -> SET, 1 -> OF, 2 -> ACT */ ASN1_OP_END_SEQ = 0x20, ASN1_OP_END_SET = 0x21, ASN1_OP_END_SEQ_OF = 0x22, ASN1_OP_END_SET_OF = 0x23, ASN1_OP_END_SEQ_ACT = 0x24, ASN1_OP_END_SET_ACT = 0x25, ASN1_OP_END_SEQ_OF_ACT = 0x26, ASN1_OP_END_SET_OF_ACT = 0x27, #define ASN1_OP_END__SET 0x01 #define ASN1_OP_END__OF 0x02 #define ASN1_OP_END__ACT 0x04 ASN1_OP_RETURN = 0x28, ASN1_OP__NR }; #define _tag(CLASS, CP, TAG) ((ASN1_##CLASS << 6) | (ASN1_##CP << 5) | ASN1_##TAG) #define _tagn(CLASS, CP, TAG) ((ASN1_##CLASS << 6) | (ASN1_##CP << 5) | TAG) #define _jump_target(N) (N) #define _action(N) (N) #endif /* _LINUX_ASN1_BER_BYTECODE_H */ irq_poll.h 0000644 00000001077 14722070374 0006551 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef IRQ_POLL_H #define IRQ_POLL_H struct irq_poll; typedef int (irq_poll_fn)(struct irq_poll *, int); struct irq_poll { struct list_head list; unsigned long state; int weight; irq_poll_fn *poll; }; enum { IRQ_POLL_F_SCHED = 0, IRQ_POLL_F_DISABLE = 1, }; extern void irq_poll_sched(struct irq_poll *); extern void irq_poll_init(struct irq_poll *, int, irq_poll_fn *); extern void irq_poll_complete(struct irq_poll *); extern void irq_poll_enable(struct irq_poll *); extern void irq_poll_disable(struct irq_poll *); #endif dqblk_v2.h 0000644 00000000626 14722070374 0006433 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Definitions for vfsv0 quota format */ #ifndef _LINUX_DQBLK_V2_H #define _LINUX_DQBLK_V2_H #include <linux/dqblk_qtree.h> /* Numbers of blocks needed for updates */ #define V2_INIT_ALLOC QTREE_INIT_ALLOC #define V2_INIT_REWRITE QTREE_INIT_REWRITE #define V2_DEL_ALLOC QTREE_DEL_ALLOC #define V2_DEL_REWRITE QTREE_DEL_REWRITE #endif /* _LINUX_DQBLK_V2_H */ i2c-algo-bit.h 0000644 00000002634 14722070374 0007101 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0+ */ /* * i2c-algo-bit.h: i2c driver algorithms for bit-shift adapters * * Copyright (C) 1995-99 Simon G. Vogl * With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and even * Frodo Looijaard <frodol@dds.nl> */ #ifndef _LINUX_I2C_ALGO_BIT_H #define _LINUX_I2C_ALGO_BIT_H #include <linux/i2c.h> /* --- Defines for bit-adapters --------------------------------------- */ /* * This struct contains the hw-dependent functions of bit-style adapters to * manipulate the line states, and to init any hw-specific features. This is * only used if you have more than one hw-type of adapter running. */ struct i2c_algo_bit_data { void *data; /* private data for lowlevel routines */ void (*setsda) (void *data, int state); void (*setscl) (void *data, int state); int (*getsda) (void *data); int (*getscl) (void *data); int (*pre_xfer) (struct i2c_adapter *); void (*post_xfer) (struct i2c_adapter *); /* local settings */ int udelay; /* half clock cycle time in us, minimum 2 us for fast-mode I2C, minimum 5 us for standard-mode I2C and SMBus, maximum 50 us for SMBus */ int timeout; /* in jiffies */ bool can_do_atomic; /* callbacks don't sleep, we can be atomic */ }; int i2c_bit_add_bus(struct i2c_adapter *); int i2c_bit_add_numbered_bus(struct i2c_adapter *); extern const struct i2c_algorithm i2c_bit_algo; #endif /* _LINUX_I2C_ALGO_BIT_H */ reboot-mode.h 0000644 00000001130 14722070374 0007132 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __REBOOT_MODE_H__ #define __REBOOT_MODE_H__ struct reboot_mode_driver { struct device *dev; struct list_head head; int (*write)(struct reboot_mode_driver *reboot, unsigned int magic); struct notifier_block reboot_notifier; }; int reboot_mode_register(struct reboot_mode_driver *reboot); int reboot_mode_unregister(struct reboot_mode_driver *reboot); int devm_reboot_mode_register(struct device *dev, struct reboot_mode_driver *reboot); void devm_reboot_mode_unregister(struct device *dev, struct reboot_mode_driver *reboot); #endif scpi_protocol.h 0000644 00000004054 14722070374 0007605 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * SCPI Message Protocol driver header * * Copyright (C) 2014 ARM Ltd. */ #include <linux/types.h> struct scpi_opp { u32 freq; u32 m_volt; } __packed; struct scpi_dvfs_info { unsigned int count; unsigned int latency; /* in nanoseconds */ struct scpi_opp *opps; }; enum scpi_sensor_class { TEMPERATURE, VOLTAGE, CURRENT, POWER, ENERGY, }; struct scpi_sensor_info { u16 sensor_id; u8 class; u8 trigger_type; char name[20]; } __packed; /** * struct scpi_ops - represents the various operations provided * by SCP through SCPI message protocol * @get_version: returns the major and minor revision on the SCPI * message protocol * @clk_get_range: gets clock range limit(min - max in Hz) * @clk_get_val: gets clock value(in Hz) * @clk_set_val: sets the clock value, setting to 0 will disable the * clock (if supported) * @dvfs_get_idx: gets the Operating Point of the given power domain. * OPP is an index to the list return by @dvfs_get_info * @dvfs_set_idx: sets the Operating Point of the given power domain. * OPP is an index to the list return by @dvfs_get_info * @dvfs_get_info: returns the DVFS capabilities of the given power * domain. It includes the OPP list and the latency information */ struct scpi_ops { u32 (*get_version)(void); int (*clk_get_range)(u16, unsigned long *, unsigned long *); unsigned long (*clk_get_val)(u16); int (*clk_set_val)(u16, unsigned long); int (*dvfs_get_idx)(u8); int (*dvfs_set_idx)(u8, u8); struct scpi_dvfs_info *(*dvfs_get_info)(u8); int (*device_domain_id)(struct device *); int (*get_transition_latency)(struct device *); int (*add_opps_to_device)(struct device *); int (*sensor_get_capability)(u16 *sensors); int (*sensor_get_info)(u16 sensor_id, struct scpi_sensor_info *); int (*sensor_get_value)(u16, u64 *); int (*device_get_power_state)(u16); int (*device_set_power_state)(u16, u8); }; #if IS_REACHABLE(CONFIG_ARM_SCPI_PROTOCOL) struct scpi_ops *get_scpi_ops(void); #else static inline struct scpi_ops *get_scpi_ops(void) { return NULL; } #endif cpuset.h 0000644 00000016446 14722070374 0006241 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_CPUSET_H #define _LINUX_CPUSET_H /* * cpuset interface * * Copyright (C) 2003 BULL SA * Copyright (C) 2004-2006 Silicon Graphics, Inc. * */ #include <linux/sched.h> #include <linux/sched/topology.h> #include <linux/sched/task.h> #include <linux/cpumask.h> #include <linux/nodemask.h> #include <linux/mm.h> #include <linux/jump_label.h> #ifdef CONFIG_CPUSETS /* * Static branch rewrites can happen in an arbitrary order for a given * key. In code paths where we need to loop with read_mems_allowed_begin() and * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need * to ensure that begin() always gets rewritten before retry() in the * disabled -> enabled transition. If not, then if local irqs are disabled * around the loop, we can deadlock since retry() would always be * comparing the latest value of the mems_allowed seqcount against 0 as * begin() still would see cpusets_enabled() as false. The enabled -> disabled * transition should happen in reverse order for the same reasons (want to stop * looking at real value of mems_allowed.sequence in retry() first). */ extern struct static_key_false cpusets_pre_enable_key; extern struct static_key_false cpusets_enabled_key; static inline bool cpusets_enabled(void) { return static_branch_unlikely(&cpusets_enabled_key); } static inline void cpuset_inc(void) { static_branch_inc_cpuslocked(&cpusets_pre_enable_key); static_branch_inc_cpuslocked(&cpusets_enabled_key); } static inline void cpuset_dec(void) { static_branch_dec_cpuslocked(&cpusets_enabled_key); static_branch_dec_cpuslocked(&cpusets_pre_enable_key); } extern int cpuset_init(void); extern void cpuset_init_smp(void); extern void cpuset_force_rebuild(void); extern void cpuset_update_active_cpus(void); extern void cpuset_wait_for_hotplug(void); extern void cpuset_read_lock(void); extern void cpuset_read_unlock(void); extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); extern void cpuset_cpus_allowed_fallback(struct task_struct *p); extern nodemask_t cpuset_mems_allowed(struct task_struct *p); #define cpuset_current_mems_allowed (current->mems_allowed) void cpuset_init_current_mems_allowed(void); int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask); static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask) { if (cpusets_enabled()) return __cpuset_node_allowed(node, gfp_mask); return true; } static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) { return __cpuset_node_allowed(zone_to_nid(z), gfp_mask); } static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) { if (cpusets_enabled()) return __cpuset_zone_allowed(z, gfp_mask); return true; } extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, const struct task_struct *tsk2); #define cpuset_memory_pressure_bump() \ do { \ if (cpuset_memory_pressure_enabled) \ __cpuset_memory_pressure_bump(); \ } while (0) extern int cpuset_memory_pressure_enabled; extern void __cpuset_memory_pressure_bump(void); extern void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task); extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *tsk); extern int cpuset_mem_spread_node(void); extern int cpuset_slab_spread_node(void); static inline int cpuset_do_page_mem_spread(void) { return task_spread_page(current); } static inline int cpuset_do_slab_mem_spread(void) { return task_spread_slab(current); } extern bool current_cpuset_is_being_rebound(void); extern void rebuild_sched_domains(void); extern void cpuset_print_current_mems_allowed(void); /* * read_mems_allowed_begin is required when making decisions involving * mems_allowed such as during page allocation. mems_allowed can be updated in * parallel and depending on the new value an operation can fail potentially * causing process failure. A retry loop with read_mems_allowed_begin and * read_mems_allowed_retry prevents these artificial failures. */ static inline unsigned int read_mems_allowed_begin(void) { if (!static_branch_unlikely(&cpusets_pre_enable_key)) return 0; return read_seqcount_begin(¤t->mems_allowed_seq); } /* * If this returns true, the operation that took place after * read_mems_allowed_begin may have failed artificially due to a concurrent * update of mems_allowed. It is up to the caller to retry the operation if * appropriate. */ static inline bool read_mems_allowed_retry(unsigned int seq) { if (!static_branch_unlikely(&cpusets_enabled_key)) return false; return read_seqcount_retry(¤t->mems_allowed_seq, seq); } static inline void set_mems_allowed(nodemask_t nodemask) { unsigned long flags; task_lock(current); local_irq_save(flags); write_seqcount_begin(¤t->mems_allowed_seq); current->mems_allowed = nodemask; write_seqcount_end(¤t->mems_allowed_seq); local_irq_restore(flags); task_unlock(current); } #else /* !CONFIG_CPUSETS */ static inline bool cpusets_enabled(void) { return false; } static inline int cpuset_init(void) { return 0; } static inline void cpuset_init_smp(void) {} static inline void cpuset_force_rebuild(void) { } static inline void cpuset_update_active_cpus(void) { partition_sched_domains(1, NULL, NULL); } static inline void cpuset_wait_for_hotplug(void) { } static inline void cpuset_read_lock(void) { } static inline void cpuset_read_unlock(void) { } static inline void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask) { cpumask_copy(mask, cpu_possible_mask); } static inline void cpuset_cpus_allowed_fallback(struct task_struct *p) { } static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) { return node_possible_map; } #define cpuset_current_mems_allowed (node_states[N_MEMORY]) static inline void cpuset_init_current_mems_allowed(void) {} static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) { return 1; } static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask) { return true; } static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) { return true; } static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) { return true; } static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, const struct task_struct *tsk2) { return 1; } static inline void cpuset_memory_pressure_bump(void) {} static inline void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task) { } static inline int cpuset_mem_spread_node(void) { return 0; } static inline int cpuset_slab_spread_node(void) { return 0; } static inline int cpuset_do_page_mem_spread(void) { return 0; } static inline int cpuset_do_slab_mem_spread(void) { return 0; } static inline bool current_cpuset_is_being_rebound(void) { return false; } static inline void rebuild_sched_domains(void) { partition_sched_domains(1, NULL, NULL); } static inline void cpuset_print_current_mems_allowed(void) { } static inline void set_mems_allowed(nodemask_t nodemask) { } static inline unsigned int read_mems_allowed_begin(void) { return 0; } static inline bool read_mems_allowed_retry(unsigned int seq) { return false; } #endif /* !CONFIG_CPUSETS */ #endif /* _LINUX_CPUSET_H */ lsm_audit.h 0000644 00000005350 14722070374 0006707 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Common LSM logging functions * Heavily borrowed from selinux/avc.h * * Author : Etienne BASSET <etienne.basset@ensta.org> * * All credits to : Stephen Smalley, <sds@tycho.nsa.gov> * All BUGS to : Etienne BASSET <etienne.basset@ensta.org> */ #ifndef _LSM_COMMON_LOGGING_ #define _LSM_COMMON_LOGGING_ #include <linux/stddef.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/kdev_t.h> #include <linux/spinlock.h> #include <linux/init.h> #include <linux/audit.h> #include <linux/in6.h> #include <linux/path.h> #include <linux/key.h> #include <linux/skbuff.h> #include <rdma/ib_verbs.h> struct lsm_network_audit { int netif; struct sock *sk; u16 family; __be16 dport; __be16 sport; union { struct { __be32 daddr; __be32 saddr; } v4; struct { struct in6_addr daddr; struct in6_addr saddr; } v6; } fam; }; struct lsm_ioctlop_audit { struct path path; u16 cmd; }; struct lsm_ibpkey_audit { u64 subnet_prefix; u16 pkey; }; struct lsm_ibendport_audit { char dev_name[IB_DEVICE_NAME_MAX]; u8 port; }; /* Auxiliary data to use in generating the audit record. */ struct common_audit_data { char type; #define LSM_AUDIT_DATA_PATH 1 #define LSM_AUDIT_DATA_NET 2 #define LSM_AUDIT_DATA_CAP 3 #define LSM_AUDIT_DATA_IPC 4 #define LSM_AUDIT_DATA_TASK 5 #define LSM_AUDIT_DATA_KEY 6 #define LSM_AUDIT_DATA_NONE 7 #define LSM_AUDIT_DATA_KMOD 8 #define LSM_AUDIT_DATA_INODE 9 #define LSM_AUDIT_DATA_DENTRY 10 #define LSM_AUDIT_DATA_IOCTL_OP 11 #define LSM_AUDIT_DATA_FILE 12 #define LSM_AUDIT_DATA_IBPKEY 13 #define LSM_AUDIT_DATA_IBENDPORT 14 union { struct path path; struct dentry *dentry; struct inode *inode; struct lsm_network_audit *net; int cap; int ipc_id; struct task_struct *tsk; #ifdef CONFIG_KEYS struct { key_serial_t key; char *key_desc; } key_struct; #endif char *kmod_name; struct lsm_ioctlop_audit *op; struct file *file; struct lsm_ibpkey_audit *ibpkey; struct lsm_ibendport_audit *ibendport; } u; /* this union contains LSM specific data */ union { #ifdef CONFIG_SECURITY_SMACK struct smack_audit_data *smack_audit_data; #endif #ifdef CONFIG_SECURITY_SELINUX struct selinux_audit_data *selinux_audit_data; #endif #ifdef CONFIG_SECURITY_APPARMOR struct apparmor_audit_data *apparmor_audit_data; #endif }; /* per LSM data pointer union */ }; #define v4info fam.v4 #define v6info fam.v6 int ipv4_skb_to_auditdata(struct sk_buff *skb, struct common_audit_data *ad, u8 *proto); int ipv6_skb_to_auditdata(struct sk_buff *skb, struct common_audit_data *ad, u8 *proto); void common_lsm_audit(struct common_audit_data *a, void (*pre_audit)(struct audit_buffer *, void *), void (*post_audit)(struct audit_buffer *, void *)); #endif kprobes.h 0000644 00000032505 14722070374 0006375 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef _LINUX_KPROBES_H #define _LINUX_KPROBES_H /* * Kernel Probes (KProbes) * include/linux/kprobes.h * * Copyright (C) IBM Corporation, 2002, 2004 * * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel * Probes initial implementation ( includes suggestions from * Rusty Russell). * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes * interface to access function arguments. * 2005-May Hien Nguyen <hien@us.ibm.com> and Jim Keniston * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi * <prasanna@in.ibm.com> added function-return probes. */ #include <linux/compiler.h> #include <linux/linkage.h> #include <linux/list.h> #include <linux/notifier.h> #include <linux/smp.h> #include <linux/bug.h> #include <linux/percpu.h> #include <linux/spinlock.h> #include <linux/rcupdate.h> #include <linux/mutex.h> #include <linux/ftrace.h> #include <asm/kprobes.h> #ifdef CONFIG_KPROBES /* kprobe_status settings */ #define KPROBE_HIT_ACTIVE 0x00000001 #define KPROBE_HIT_SS 0x00000002 #define KPROBE_REENTER 0x00000004 #define KPROBE_HIT_SSDONE 0x00000008 #else /* CONFIG_KPROBES */ #include <asm-generic/kprobes.h> typedef int kprobe_opcode_t; struct arch_specific_insn { int dummy; }; #endif /* CONFIG_KPROBES */ struct kprobe; struct pt_regs; struct kretprobe; struct kretprobe_instance; typedef int (*kprobe_pre_handler_t) (struct kprobe *, struct pt_regs *); typedef void (*kprobe_post_handler_t) (struct kprobe *, struct pt_regs *, unsigned long flags); typedef int (*kprobe_fault_handler_t) (struct kprobe *, struct pt_regs *, int trapnr); typedef int (*kretprobe_handler_t) (struct kretprobe_instance *, struct pt_regs *); struct kprobe { struct hlist_node hlist; /* list of kprobes for multi-handler support */ struct list_head list; /*count the number of times this probe was temporarily disarmed */ unsigned long nmissed; /* location of the probe point */ kprobe_opcode_t *addr; /* Allow user to indicate symbol name of the probe point */ const char *symbol_name; /* Offset into the symbol */ unsigned int offset; /* Called before addr is executed. */ kprobe_pre_handler_t pre_handler; /* Called after addr is executed, unless... */ kprobe_post_handler_t post_handler; /* * ... called if executing addr causes a fault (eg. page fault). * Return 1 if it handled fault, otherwise kernel will see it. */ kprobe_fault_handler_t fault_handler; /* Saved opcode (which has been replaced with breakpoint) */ kprobe_opcode_t opcode; /* copy of the original instruction */ struct arch_specific_insn ainsn; /* * Indicates various status flags. * Protected by kprobe_mutex after this kprobe is registered. */ u32 flags; }; /* Kprobe status flags */ #define KPROBE_FLAG_GONE 1 /* breakpoint has already gone */ #define KPROBE_FLAG_DISABLED 2 /* probe is temporarily disabled */ #define KPROBE_FLAG_OPTIMIZED 4 /* * probe is really optimized. * NOTE: * this flag is only for optimized_kprobe. */ #define KPROBE_FLAG_FTRACE 8 /* probe is using ftrace */ /* Has this kprobe gone ? */ static inline int kprobe_gone(struct kprobe *p) { return p->flags & KPROBE_FLAG_GONE; } /* Is this kprobe disabled ? */ static inline int kprobe_disabled(struct kprobe *p) { return p->flags & (KPROBE_FLAG_DISABLED | KPROBE_FLAG_GONE); } /* Is this kprobe really running optimized path ? */ static inline int kprobe_optimized(struct kprobe *p) { return p->flags & KPROBE_FLAG_OPTIMIZED; } /* Is this kprobe uses ftrace ? */ static inline int kprobe_ftrace(struct kprobe *p) { return p->flags & KPROBE_FLAG_FTRACE; } /* * Function-return probe - * Note: * User needs to provide a handler function, and initialize maxactive. * maxactive - The maximum number of instances of the probed function that * can be active concurrently. * nmissed - tracks the number of times the probed function's return was * ignored, due to maxactive being too low. * */ struct kretprobe { struct kprobe kp; kretprobe_handler_t handler; kretprobe_handler_t entry_handler; int maxactive; int nmissed; size_t data_size; struct hlist_head free_instances; raw_spinlock_t lock; }; #define KRETPROBE_MAX_DATA_SIZE 4096 struct kretprobe_instance { struct hlist_node hlist; struct kretprobe *rp; kprobe_opcode_t *ret_addr; struct task_struct *task; void *fp; char data[0]; }; struct kretprobe_blackpoint { const char *name; void *addr; }; struct kprobe_blacklist_entry { struct list_head list; unsigned long start_addr; unsigned long end_addr; }; #ifdef CONFIG_KPROBES DECLARE_PER_CPU(struct kprobe *, current_kprobe); DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); /* * For #ifdef avoidance: */ static inline int kprobes_built_in(void) { return 1; } #ifdef CONFIG_KRETPROBES extern void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs); extern int arch_trampoline_kprobe(struct kprobe *p); #else /* CONFIG_KRETPROBES */ static inline void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) { } static inline int arch_trampoline_kprobe(struct kprobe *p) { return 0; } #endif /* CONFIG_KRETPROBES */ extern struct kretprobe_blackpoint kretprobe_blacklist[]; static inline void kretprobe_assert(struct kretprobe_instance *ri, unsigned long orig_ret_address, unsigned long trampoline_address) { if (!orig_ret_address || (orig_ret_address == trampoline_address)) { printk("kretprobe BUG!: Processing kretprobe %p @ %p\n", ri->rp, ri->rp->kp.addr); BUG(); } } #ifdef CONFIG_KPROBES_SANITY_TEST extern int init_test_probes(void); #else static inline int init_test_probes(void) { return 0; } #endif /* CONFIG_KPROBES_SANITY_TEST */ extern int arch_prepare_kprobe(struct kprobe *p); extern void arch_arm_kprobe(struct kprobe *p); extern void arch_disarm_kprobe(struct kprobe *p); extern int arch_init_kprobes(void); extern void show_registers(struct pt_regs *regs); extern void kprobes_inc_nmissed_count(struct kprobe *p); extern bool arch_within_kprobe_blacklist(unsigned long addr); extern int arch_populate_kprobe_blacklist(void); extern bool arch_kprobe_on_func_entry(unsigned long offset); extern int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset); extern bool within_kprobe_blacklist(unsigned long addr); extern int kprobe_add_ksym_blacklist(unsigned long entry); extern int kprobe_add_area_blacklist(unsigned long start, unsigned long end); struct kprobe_insn_cache { struct mutex mutex; void *(*alloc)(void); /* allocate insn page */ void (*free)(void *); /* free insn page */ struct list_head pages; /* list of kprobe_insn_page */ size_t insn_size; /* size of instruction slot */ int nr_garbage; }; #ifdef __ARCH_WANT_KPROBES_INSN_SLOT extern kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c); extern void __free_insn_slot(struct kprobe_insn_cache *c, kprobe_opcode_t *slot, int dirty); /* sleep-less address checking routine */ extern bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr); #define DEFINE_INSN_CACHE_OPS(__name) \ extern struct kprobe_insn_cache kprobe_##__name##_slots; \ \ static inline kprobe_opcode_t *get_##__name##_slot(void) \ { \ return __get_insn_slot(&kprobe_##__name##_slots); \ } \ \ static inline void free_##__name##_slot(kprobe_opcode_t *slot, int dirty)\ { \ __free_insn_slot(&kprobe_##__name##_slots, slot, dirty); \ } \ \ static inline bool is_kprobe_##__name##_slot(unsigned long addr) \ { \ return __is_insn_slot_addr(&kprobe_##__name##_slots, addr); \ } #else /* __ARCH_WANT_KPROBES_INSN_SLOT */ #define DEFINE_INSN_CACHE_OPS(__name) \ static inline bool is_kprobe_##__name##_slot(unsigned long addr) \ { \ return 0; \ } #endif DEFINE_INSN_CACHE_OPS(insn); #ifdef CONFIG_OPTPROBES /* * Internal structure for direct jump optimized probe */ struct optimized_kprobe { struct kprobe kp; struct list_head list; /* list for optimizing queue */ struct arch_optimized_insn optinsn; }; /* Architecture dependent functions for direct jump optimization */ extern int arch_prepared_optinsn(struct arch_optimized_insn *optinsn); extern int arch_check_optimized_kprobe(struct optimized_kprobe *op); extern int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *orig); extern void arch_remove_optimized_kprobe(struct optimized_kprobe *op); extern void arch_optimize_kprobes(struct list_head *oplist); extern void arch_unoptimize_kprobes(struct list_head *oplist, struct list_head *done_list); extern void arch_unoptimize_kprobe(struct optimized_kprobe *op); extern int arch_within_optimized_kprobe(struct optimized_kprobe *op, unsigned long addr); extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs); DEFINE_INSN_CACHE_OPS(optinsn); #ifdef CONFIG_SYSCTL extern int sysctl_kprobes_optimization; extern int proc_kprobes_optimization_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos); #endif extern void wait_for_kprobe_optimizer(void); bool optprobe_queued_unopt(struct optimized_kprobe *op); bool kprobe_disarmed(struct kprobe *p); #else static inline void wait_for_kprobe_optimizer(void) { } #endif /* CONFIG_OPTPROBES */ #ifdef CONFIG_KPROBES_ON_FTRACE extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *ops, struct pt_regs *regs); extern int arch_prepare_kprobe_ftrace(struct kprobe *p); #endif int arch_check_ftrace_location(struct kprobe *p); /* Get the kprobe at this addr (if any) - called with preemption disabled */ struct kprobe *get_kprobe(void *addr); void kretprobe_hash_lock(struct task_struct *tsk, struct hlist_head **head, unsigned long *flags); void kretprobe_hash_unlock(struct task_struct *tsk, unsigned long *flags); struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk); /* kprobe_running() will just return the current_kprobe on this CPU */ static inline struct kprobe *kprobe_running(void) { return (__this_cpu_read(current_kprobe)); } static inline void reset_current_kprobe(void) { __this_cpu_write(current_kprobe, NULL); } static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void) { return this_cpu_ptr(&kprobe_ctlblk); } extern struct kprobe kprobe_busy; void kprobe_busy_begin(void); void kprobe_busy_end(void); kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset); int register_kprobe(struct kprobe *p); void unregister_kprobe(struct kprobe *p); int register_kprobes(struct kprobe **kps, int num); void unregister_kprobes(struct kprobe **kps, int num); unsigned long arch_deref_entry_point(void *); int register_kretprobe(struct kretprobe *rp); void unregister_kretprobe(struct kretprobe *rp); int register_kretprobes(struct kretprobe **rps, int num); void unregister_kretprobes(struct kretprobe **rps, int num); void kprobe_flush_task(struct task_struct *tk); void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head); void kprobe_free_init_mem(void); int disable_kprobe(struct kprobe *kp); int enable_kprobe(struct kprobe *kp); void dump_kprobe(struct kprobe *kp); void *alloc_insn_page(void); void free_insn_page(void *page); #else /* !CONFIG_KPROBES: */ static inline int kprobes_built_in(void) { return 0; } static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) { return 0; } static inline struct kprobe *get_kprobe(void *addr) { return NULL; } static inline struct kprobe *kprobe_running(void) { return NULL; } static inline int register_kprobe(struct kprobe *p) { return -ENOSYS; } static inline int register_kprobes(struct kprobe **kps, int num) { return -ENOSYS; } static inline void unregister_kprobe(struct kprobe *p) { } static inline void unregister_kprobes(struct kprobe **kps, int num) { } static inline int register_kretprobe(struct kretprobe *rp) { return -ENOSYS; } static inline int register_kretprobes(struct kretprobe **rps, int num) { return -ENOSYS; } static inline void unregister_kretprobe(struct kretprobe *rp) { } static inline void unregister_kretprobes(struct kretprobe **rps, int num) { } static inline void kprobe_flush_task(struct task_struct *tk) { } static inline void kprobe_free_init_mem(void) { } static inline int disable_kprobe(struct kprobe *kp) { return -ENOSYS; } static inline int enable_kprobe(struct kprobe *kp) { return -ENOSYS; } static inline bool within_kprobe_blacklist(unsigned long addr) { return true; } #endif /* CONFIG_KPROBES */ static inline int disable_kretprobe(struct kretprobe *rp) { return disable_kprobe(&rp->kp); } static inline int enable_kretprobe(struct kretprobe *rp) { return enable_kprobe(&rp->kp); } #ifndef CONFIG_KPROBES static inline bool is_kprobe_insn_slot(unsigned long addr) { return false; } #endif #ifndef CONFIG_OPTPROBES static inline bool is_kprobe_optinsn_slot(unsigned long addr) { return false; } #endif /* Returns true if kprobes handled the fault */ static nokprobe_inline bool kprobe_page_fault(struct pt_regs *regs, unsigned int trap) { if (!kprobes_built_in()) return false; if (user_mode(regs)) return false; /* * To be potentially processing a kprobe fault and to be allowed * to call kprobe_running(), we have to be non-preemptible. */ if (preemptible()) return false; if (!kprobe_running()) return false; return kprobe_fault_handler(regs, trap); } #endif /* _LINUX_KPROBES_H */ i2c-smbus.h 0000644 00000002444 14722070374 0006533 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * i2c-smbus.h - SMBus extensions to the I2C protocol * * Copyright (C) 2010 Jean Delvare <jdelvare@suse.de> */ #ifndef _LINUX_I2C_SMBUS_H #define _LINUX_I2C_SMBUS_H #include <linux/i2c.h> #include <linux/spinlock.h> #include <linux/workqueue.h> /** * i2c_smbus_alert_setup - platform data for the smbus_alert i2c client * @alert_edge_triggered: whether the alert interrupt is edge (1) or level (0) * triggered * @irq: IRQ number, if the smbus_alert driver should take care of interrupt * handling * * If irq is not specified, the smbus_alert driver doesn't take care of * interrupt handling. In that case it is up to the I2C bus driver to either * handle the interrupts or to poll for alerts. * * If irq is specified then it it crucial that alert_edge_triggered is * properly set. */ struct i2c_smbus_alert_setup { int irq; }; struct i2c_client *i2c_setup_smbus_alert(struct i2c_adapter *adapter, struct i2c_smbus_alert_setup *setup); int i2c_handle_smbus_alert(struct i2c_client *ara); #if IS_ENABLED(CONFIG_I2C_SMBUS) && IS_ENABLED(CONFIG_OF) int of_i2c_setup_smbus_alert(struct i2c_adapter *adap); #else static inline int of_i2c_setup_smbus_alert(struct i2c_adapter *adap) { return 0; } #endif #endif /* _LINUX_I2C_SMBUS_H */ pm_wakeup.h 0000644 00000013362 14722070374 0006720 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * pm_wakeup.h - Power management wakeup interface * * Copyright (C) 2008 Alan Stern * Copyright (C) 2010 Rafael J. Wysocki, Novell Inc. */ #ifndef _LINUX_PM_WAKEUP_H #define _LINUX_PM_WAKEUP_H #ifndef _DEVICE_H_ # error "please don't include this file directly" #endif #include <linux/types.h> struct wake_irq; /** * struct wakeup_source - Representation of wakeup sources * * @name: Name of the wakeup source * @id: Wakeup source id * @entry: Wakeup source list entry * @lock: Wakeup source lock * @wakeirq: Optional device specific wakeirq * @timer: Wakeup timer list * @timer_expires: Wakeup timer expiration * @total_time: Total time this wakeup source has been active. * @max_time: Maximum time this wakeup source has been continuously active. * @last_time: Monotonic clock when the wakeup source's was touched last time. * @prevent_sleep_time: Total time this source has been preventing autosleep. * @event_count: Number of signaled wakeup events. * @active_count: Number of times the wakeup source was activated. * @relax_count: Number of times the wakeup source was deactivated. * @expire_count: Number of times the wakeup source's timeout has expired. * @wakeup_count: Number of times the wakeup source might abort suspend. * @dev: Struct device for sysfs statistics about the wakeup source. * @active: Status of the wakeup source. * @autosleep_enabled: Autosleep is active, so update @prevent_sleep_time. */ struct wakeup_source { const char *name; int id; struct list_head entry; spinlock_t lock; struct wake_irq *wakeirq; struct timer_list timer; unsigned long timer_expires; ktime_t total_time; ktime_t max_time; ktime_t last_time; ktime_t start_prevent_time; ktime_t prevent_sleep_time; unsigned long event_count; unsigned long active_count; unsigned long relax_count; unsigned long expire_count; unsigned long wakeup_count; struct device *dev; bool active:1; bool autosleep_enabled:1; }; #ifdef CONFIG_PM_SLEEP /* * Changes to device_may_wakeup take effect on the next pm state change. */ static inline bool device_can_wakeup(struct device *dev) { return dev->power.can_wakeup; } static inline bool device_may_wakeup(struct device *dev) { return dev->power.can_wakeup && !!dev->power.wakeup; } static inline void device_set_wakeup_path(struct device *dev) { dev->power.wakeup_path = true; } /* drivers/base/power/wakeup.c */ extern struct wakeup_source *wakeup_source_create(const char *name); extern void wakeup_source_destroy(struct wakeup_source *ws); extern void wakeup_source_add(struct wakeup_source *ws); extern void wakeup_source_remove(struct wakeup_source *ws); extern struct wakeup_source *wakeup_source_register(struct device *dev, const char *name); extern void wakeup_source_unregister(struct wakeup_source *ws); extern int device_wakeup_enable(struct device *dev); extern int device_wakeup_disable(struct device *dev); extern void device_set_wakeup_capable(struct device *dev, bool capable); extern int device_init_wakeup(struct device *dev, bool val); extern int device_set_wakeup_enable(struct device *dev, bool enable); extern void __pm_stay_awake(struct wakeup_source *ws); extern void pm_stay_awake(struct device *dev); extern void __pm_relax(struct wakeup_source *ws); extern void pm_relax(struct device *dev); extern void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard); extern void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard); #else /* !CONFIG_PM_SLEEP */ static inline void device_set_wakeup_capable(struct device *dev, bool capable) { dev->power.can_wakeup = capable; } static inline bool device_can_wakeup(struct device *dev) { return dev->power.can_wakeup; } static inline struct wakeup_source *wakeup_source_create(const char *name) { return NULL; } static inline void wakeup_source_destroy(struct wakeup_source *ws) {} static inline void wakeup_source_add(struct wakeup_source *ws) {} static inline void wakeup_source_remove(struct wakeup_source *ws) {} static inline struct wakeup_source *wakeup_source_register(struct device *dev, const char *name) { return NULL; } static inline void wakeup_source_unregister(struct wakeup_source *ws) {} static inline int device_wakeup_enable(struct device *dev) { dev->power.should_wakeup = true; return 0; } static inline int device_wakeup_disable(struct device *dev) { dev->power.should_wakeup = false; return 0; } static inline int device_set_wakeup_enable(struct device *dev, bool enable) { dev->power.should_wakeup = enable; return 0; } static inline int device_init_wakeup(struct device *dev, bool val) { device_set_wakeup_capable(dev, val); device_set_wakeup_enable(dev, val); return 0; } static inline bool device_may_wakeup(struct device *dev) { return dev->power.can_wakeup && dev->power.should_wakeup; } static inline void device_set_wakeup_path(struct device *dev) {} static inline void __pm_stay_awake(struct wakeup_source *ws) {} static inline void pm_stay_awake(struct device *dev) {} static inline void __pm_relax(struct wakeup_source *ws) {} static inline void pm_relax(struct device *dev) {} static inline void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard) {} static inline void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard) {} #endif /* !CONFIG_PM_SLEEP */ static inline void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) { return pm_wakeup_ws_event(ws, msec, false); } static inline void pm_wakeup_event(struct device *dev, unsigned int msec) { return pm_wakeup_dev_event(dev, msec, false); } static inline void pm_wakeup_hard_event(struct device *dev) { return pm_wakeup_dev_event(dev, 0, true); } #endif /* _LINUX_PM_WAKEUP_H */ init_ohci1394_dma.h 0000644 00000000304 14722070374 0010027 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT extern int __initdata init_ohci1394_dma_early; extern void __init init_ohci1394_dma_on_all_controllers(void); #endif cdrom.h 0000644 00000021440 14722070374 0006030 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * -- <linux/cdrom.h> * General header file for linux CD-ROM drivers * Copyright (C) 1992 David Giller, rafetmad@oxy.edu * 1994, 1995 Eberhard Mönkeberg, emoenke@gwdg.de * 1996 David van Leeuwen, david@tm.tno.nl * 1997, 1998 Erik Andersen, andersee@debian.org * 1998-2002 Jens Axboe, axboe@suse.de */ #ifndef _LINUX_CDROM_H #define _LINUX_CDROM_H #include <linux/fs.h> /* not really needed, later.. */ #include <linux/list.h> #include <scsi/scsi_common.h> #include <uapi/linux/cdrom.h> struct packet_command { unsigned char cmd[CDROM_PACKET_SIZE]; unsigned char *buffer; unsigned int buflen; int stat; struct scsi_sense_hdr *sshdr; unsigned char data_direction; int quiet; int timeout; void *reserved[1]; }; /* * _OLD will use PIO transfer on atapi devices, _BPC_* will use DMA */ #define CDDA_OLD 0 /* old style */ #define CDDA_BPC_SINGLE 1 /* single frame block pc */ #define CDDA_BPC_FULL 2 /* multi frame block pc */ /* Uniform cdrom data structures for cdrom.c */ struct cdrom_device_info { const struct cdrom_device_ops *ops; /* link to device_ops */ struct list_head list; /* linked list of all device_info */ struct gendisk *disk; /* matching block layer disk */ void *handle; /* driver-dependent data */ /* specifications */ int mask; /* mask of capability: disables them */ int speed; /* maximum speed for reading data */ int capacity; /* number of discs in jukebox */ /* device-related storage */ unsigned int options : 30; /* options flags */ unsigned mc_flags : 2; /* media change buffer flags */ unsigned int vfs_events; /* cached events for vfs path */ unsigned int ioctl_events; /* cached events for ioctl path */ int use_count; /* number of times device opened */ char name[20]; /* name of the device type */ /* per-device flags */ __u8 sanyo_slot : 2; /* Sanyo 3 CD changer support */ __u8 keeplocked : 1; /* CDROM_LOCKDOOR status */ __u8 reserved : 5; /* not used yet */ int cdda_method; /* see flags */ __u8 last_sense; __u8 media_written; /* dirty flag, DVD+RW bookkeeping */ unsigned short mmc3_profile; /* current MMC3 profile */ int for_data; int (*exit)(struct cdrom_device_info *); int mrw_mode_page; }; struct cdrom_device_ops { /* routines */ int (*open) (struct cdrom_device_info *, int); void (*release) (struct cdrom_device_info *); int (*drive_status) (struct cdrom_device_info *, int); unsigned int (*check_events) (struct cdrom_device_info *cdi, unsigned int clearing, int slot); int (*media_changed) (struct cdrom_device_info *, int); int (*tray_move) (struct cdrom_device_info *, int); int (*lock_door) (struct cdrom_device_info *, int); int (*select_speed) (struct cdrom_device_info *, int); int (*select_disc) (struct cdrom_device_info *, int); int (*get_last_session) (struct cdrom_device_info *, struct cdrom_multisession *); int (*get_mcn) (struct cdrom_device_info *, struct cdrom_mcn *); /* hard reset device */ int (*reset) (struct cdrom_device_info *); /* play stuff */ int (*audio_ioctl) (struct cdrom_device_info *,unsigned int, void *); /* driver specifications */ const int capability; /* capability flags */ /* handle uniform packets for scsi type devices (scsi,atapi) */ int (*generic_packet) (struct cdrom_device_info *, struct packet_command *); }; /* the general block_device operations structure: */ extern int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t mode); extern void cdrom_release(struct cdrom_device_info *cdi, fmode_t mode); extern int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg); extern unsigned int cdrom_check_events(struct cdrom_device_info *cdi, unsigned int clearing); extern int cdrom_media_changed(struct cdrom_device_info *); extern int register_cdrom(struct cdrom_device_info *cdi); extern void unregister_cdrom(struct cdrom_device_info *cdi); typedef struct { int data; int audio; int cdi; int xa; long error; } tracktype; extern int cdrom_get_last_written(struct cdrom_device_info *cdi, long *last_written); extern int cdrom_number_of_slots(struct cdrom_device_info *cdi); extern int cdrom_mode_select(struct cdrom_device_info *cdi, struct packet_command *cgc); extern int cdrom_mode_sense(struct cdrom_device_info *cdi, struct packet_command *cgc, int page_code, int page_control); extern void init_cdrom_command(struct packet_command *cgc, void *buffer, int len, int type); extern int cdrom_dummy_generic_packet(struct cdrom_device_info *cdi, struct packet_command *cgc); /* The SCSI spec says there could be 256 slots. */ #define CDROM_MAX_SLOTS 256 struct cdrom_mechstat_header { #if defined(__BIG_ENDIAN_BITFIELD) __u8 fault : 1; __u8 changer_state : 2; __u8 curslot : 5; __u8 mech_state : 3; __u8 door_open : 1; __u8 reserved1 : 4; #elif defined(__LITTLE_ENDIAN_BITFIELD) __u8 curslot : 5; __u8 changer_state : 2; __u8 fault : 1; __u8 reserved1 : 4; __u8 door_open : 1; __u8 mech_state : 3; #endif __u8 curlba[3]; __u8 nslots; __u16 slot_tablelen; }; struct cdrom_slot { #if defined(__BIG_ENDIAN_BITFIELD) __u8 disc_present : 1; __u8 reserved1 : 6; __u8 change : 1; #elif defined(__LITTLE_ENDIAN_BITFIELD) __u8 change : 1; __u8 reserved1 : 6; __u8 disc_present : 1; #endif __u8 reserved2[3]; }; struct cdrom_changer_info { struct cdrom_mechstat_header hdr; struct cdrom_slot slots[CDROM_MAX_SLOTS]; }; typedef enum { mechtype_caddy = 0, mechtype_tray = 1, mechtype_popup = 2, mechtype_individual_changer = 4, mechtype_cartridge_changer = 5 } mechtype_t; typedef struct { #if defined(__BIG_ENDIAN_BITFIELD) __u8 ps : 1; __u8 reserved1 : 1; __u8 page_code : 6; __u8 page_length; __u8 reserved2 : 1; __u8 bufe : 1; __u8 ls_v : 1; __u8 test_write : 1; __u8 write_type : 4; __u8 multi_session : 2; /* or border, DVD */ __u8 fp : 1; __u8 copy : 1; __u8 track_mode : 4; __u8 reserved3 : 4; __u8 data_block_type : 4; #elif defined(__LITTLE_ENDIAN_BITFIELD) __u8 page_code : 6; __u8 reserved1 : 1; __u8 ps : 1; __u8 page_length; __u8 write_type : 4; __u8 test_write : 1; __u8 ls_v : 1; __u8 bufe : 1; __u8 reserved2 : 1; __u8 track_mode : 4; __u8 copy : 1; __u8 fp : 1; __u8 multi_session : 2; /* or border, DVD */ __u8 data_block_type : 4; __u8 reserved3 : 4; #endif __u8 link_size; __u8 reserved4; #if defined(__BIG_ENDIAN_BITFIELD) __u8 reserved5 : 2; __u8 app_code : 6; #elif defined(__LITTLE_ENDIAN_BITFIELD) __u8 app_code : 6; __u8 reserved5 : 2; #endif __u8 session_format; __u8 reserved6; __be32 packet_size; __u16 audio_pause; __u8 mcn[16]; __u8 isrc[16]; __u8 subhdr0; __u8 subhdr1; __u8 subhdr2; __u8 subhdr3; } __attribute__((packed)) write_param_page; struct modesel_head { __u8 reserved1; __u8 medium; __u8 reserved2; __u8 block_desc_length; __u8 density; __u8 number_of_blocks_hi; __u8 number_of_blocks_med; __u8 number_of_blocks_lo; __u8 reserved3; __u8 block_length_hi; __u8 block_length_med; __u8 block_length_lo; }; typedef struct { __u16 report_key_length; __u8 reserved1; __u8 reserved2; #if defined(__BIG_ENDIAN_BITFIELD) __u8 type_code : 2; __u8 vra : 3; __u8 ucca : 3; #elif defined(__LITTLE_ENDIAN_BITFIELD) __u8 ucca : 3; __u8 vra : 3; __u8 type_code : 2; #endif __u8 region_mask; __u8 rpc_scheme; __u8 reserved3; } rpc_state_t; struct event_header { __be16 data_len; #if defined(__BIG_ENDIAN_BITFIELD) __u8 nea : 1; __u8 reserved1 : 4; __u8 notification_class : 3; #elif defined(__LITTLE_ENDIAN_BITFIELD) __u8 notification_class : 3; __u8 reserved1 : 4; __u8 nea : 1; #endif __u8 supp_event_class; }; struct media_event_desc { #if defined(__BIG_ENDIAN_BITFIELD) __u8 reserved1 : 4; __u8 media_event_code : 4; __u8 reserved2 : 6; __u8 media_present : 1; __u8 door_open : 1; #elif defined(__LITTLE_ENDIAN_BITFIELD) __u8 media_event_code : 4; __u8 reserved1 : 4; __u8 door_open : 1; __u8 media_present : 1; __u8 reserved2 : 6; #endif __u8 start_slot; __u8 end_slot; }; extern int cdrom_get_media_event(struct cdrom_device_info *cdi, struct media_event_desc *med); static inline void lba_to_msf(int lba, u8 *m, u8 *s, u8 *f) { lba += CD_MSF_OFFSET; lba &= 0xffffff; /* negative lbas use only 24 bits */ *m = lba / (CD_SECS * CD_FRAMES); lba %= (CD_SECS * CD_FRAMES); *s = lba / CD_FRAMES; *f = lba % CD_FRAMES; } static inline int msf_to_lba(u8 m, u8 s, u8 f) { return (((m * CD_SECS) + s) * CD_FRAMES + f) - CD_MSF_OFFSET; } #endif /* _LINUX_CDROM_H */ wmi.h 0000644 00000003075 14722070374 0005524 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * wmi.h - ACPI WMI interface * * Copyright (c) 2015 Andrew Lutomirski */ #ifndef _LINUX_WMI_H #define _LINUX_WMI_H #include <linux/device.h> #include <linux/acpi.h> #include <linux/mod_devicetable.h> #include <uapi/linux/wmi.h> struct wmi_device { struct device dev; /* True for data blocks implementing the Set Control Method */ bool setable; }; /* evaluate the ACPI method associated with this device */ extern acpi_status wmidev_evaluate_method(struct wmi_device *wdev, u8 instance, u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out); /* Caller must kfree the result. */ extern union acpi_object *wmidev_block_query(struct wmi_device *wdev, u8 instance); extern int set_required_buffer_size(struct wmi_device *wdev, u64 length); struct wmi_driver { struct device_driver driver; const struct wmi_device_id *id_table; int (*probe)(struct wmi_device *wdev, const void *context); int (*remove)(struct wmi_device *wdev); void (*notify)(struct wmi_device *device, union acpi_object *data); long (*filter_callback)(struct wmi_device *wdev, unsigned int cmd, struct wmi_ioctl_buffer *arg); }; extern int __must_check __wmi_driver_register(struct wmi_driver *driver, struct module *owner); extern void wmi_driver_unregister(struct wmi_driver *driver); #define wmi_driver_register(driver) __wmi_driver_register((driver), THIS_MODULE) #define module_wmi_driver(__wmi_driver) \ module_driver(__wmi_driver, wmi_driver_register, \ wmi_driver_unregister) #endif phonet.h 0000644 00000001031 14722070374 0006213 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /** * file phonet.h * * Phonet sockets kernel interface * * Copyright (C) 2008 Nokia Corporation. All rights reserved. */ #ifndef LINUX_PHONET_H #define LINUX_PHONET_H #include <uapi/linux/phonet.h> #define SIOCPNGAUTOCONF (SIOCDEVPRIVATE + 0) struct if_phonet_autoconf { uint8_t device; }; struct if_phonet_req { char ifr_phonet_name[16]; union { struct if_phonet_autoconf ifru_phonet_autoconf; } ifr_ifru; }; #define ifr_phonet_autoconf ifr_ifru.ifru_phonet_autoconf #endif elfnote.h 0000644 00000007045 14722070374 0006365 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_ELFNOTE_H #define _LINUX_ELFNOTE_H /* * Helper macros to generate ELF Note structures, which are put into a * PT_NOTE segment of the final vmlinux image. These are useful for * including name-value pairs of metadata into the kernel binary (or * modules?) for use by external programs. * * Each note has three parts: a name, a type and a desc. The name is * intended to distinguish the note's originator, so it would be a * company, project, subsystem, etc; it must be in a suitable form for * use in a section name. The type is an integer which is used to tag * the data, and is considered to be within the "name" namespace (so * "FooCo"'s type 42 is distinct from "BarProj"'s type 42). The * "desc" field is the actual data. There are no constraints on the * desc field's contents, though typically they're fairly small. * * All notes from a given NAME are put into a section named * .note.NAME. When the kernel image is finally linked, all the notes * are packed into a single .notes section, which is mapped into the * PT_NOTE segment. Because notes for a given name are grouped into * the same section, they'll all be adjacent the output file. * * This file defines macros for both C and assembler use. Their * syntax is slightly different, but they're semantically similar. * * See the ELF specification for more detail about ELF notes. */ #ifdef __ASSEMBLER__ /* * Generate a structure with the same shape as Elf{32,64}_Nhdr (which * turn out to be the same size and shape), followed by the name and * desc data with appropriate padding. The 'desctype' argument is the * assembler pseudo op defining the type of the data e.g. .asciz while * 'descdata' is the data itself e.g. "hello, world". * * e.g. ELFNOTE(XYZCo, 42, .asciz, "forty-two") * ELFNOTE(XYZCo, 12, .long, 0xdeadbeef) */ #define ELFNOTE_START(name, type, flags) \ .pushsection .note.name, flags,@note ; \ .balign 4 ; \ .long 2f - 1f /* namesz */ ; \ .long 4484f - 3f /* descsz */ ; \ .long type ; \ 1:.asciz #name ; \ 2:.balign 4 ; \ 3: #define ELFNOTE_END \ 4484:.balign 4 ; \ .popsection ; #define ELFNOTE(name, type, desc) \ ELFNOTE_START(name, type, "a") \ desc ; \ ELFNOTE_END #else /* !__ASSEMBLER__ */ #include <linux/elf.h> /* * Use an anonymous structure which matches the shape of * Elf{32,64}_Nhdr, but includes the name and desc data. The size and * type of name and desc depend on the macro arguments. "name" must * be a literal string, and "desc" must be passed by value. You may * only define one note per line, since __LINE__ is used to generate * unique symbols. */ #define _ELFNOTE_PASTE(a,b) a##b #define _ELFNOTE(size, name, unique, type, desc) \ static const struct { \ struct elf##size##_note _nhdr; \ unsigned char _name[sizeof(name)] \ __attribute__((aligned(sizeof(Elf##size##_Word)))); \ typeof(desc) _desc \ __attribute__((aligned(sizeof(Elf##size##_Word)))); \ } _ELFNOTE_PASTE(_note_, unique) \ __used \ __attribute__((section(".note." name), \ aligned(sizeof(Elf##size##_Word)), \ unused)) = { \ { \ sizeof(name), \ sizeof(desc), \ type, \ }, \ name, \ desc \ } #define ELFNOTE(size, name, type, desc) \ _ELFNOTE(size, name, __LINE__, type, desc) #define ELFNOTE32(name, type, desc) ELFNOTE(32, name, type, desc) #define ELFNOTE64(name, type, desc) ELFNOTE(64, name, type, desc) #endif /* __ASSEMBLER__ */ #endif /* _LINUX_ELFNOTE_H */ tfrc.h 0000644 00000003272 14722070374 0005665 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef _LINUX_TFRC_H_ #define _LINUX_TFRC_H_ /* * TFRC - Data Structures for the TCP-Friendly Rate Control congestion * control mechanism as specified in RFC 3448. * * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz> * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br> * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon */ #include <linux/types.h> /** tfrc_rx_info - TFRC Receiver Data Structure * * @tfrcrx_x_recv: receiver estimate of sending rate (3.2.2) * @tfrcrx_rtt: round-trip-time (communicated by sender) * @tfrcrx_p: current estimate of loss event rate (3.2.2) */ struct tfrc_rx_info { __u32 tfrcrx_x_recv; __u32 tfrcrx_rtt; __u32 tfrcrx_p; }; /** tfrc_tx_info - TFRC Sender Data Structure * * @tfrctx_x: computed transmit rate (4.3 (4)) * @tfrctx_x_recv: receiver estimate of send rate (4.3) * @tfrctx_x_calc: return value of throughput equation (3.1) * @tfrctx_rtt: (moving average) estimate of RTT (4.3) * @tfrctx_p: current loss event rate (5.4) * @tfrctx_rto: estimate of RTO, equals 4*RTT (4.3) * @tfrctx_ipi: inter-packet interval (4.6) * * Note: X and X_recv are both maintained in units of 64 * bytes/second. This * enables a finer resolution of sending rates and avoids problems with * integer arithmetic; u32 is not sufficient as scaling consumes 6 bits. */ struct tfrc_tx_info { __u64 tfrctx_x; __u64 tfrctx_x_recv; __u32 tfrctx_x_calc; __u32 tfrctx_rtt; __u32 tfrctx_p; __u32 tfrctx_rto; __u32 tfrctx_ipi; }; #endif /* _LINUX_TFRC_H_ */ virtio_console.h 0000644 00000003666 14722070374 0007774 0 ustar 00 /* * This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so * anyone can use the definitions to implement compatible drivers/servers: * * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of IBM nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Copyright (C) Red Hat, Inc., 2009, 2010, 2011 * Copyright (C) Amit Shah <amit.shah@redhat.com>, 2009, 2010, 2011 */ #ifndef _LINUX_VIRTIO_CONSOLE_H #define _LINUX_VIRTIO_CONSOLE_H #include <uapi/linux/virtio_console.h> int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int)); #endif /* _LINUX_VIRTIO_CONSOLE_H */ fsl_hypervisor.h 0000644 00000005410 14722070374 0010001 0 ustar 00 /* * Freescale hypervisor ioctl and kernel interface * * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. * Author: Timur Tabi <timur@freescale.com> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Freescale Semiconductor nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * * ALTERNATIVELY, this software may be distributed under the terms of the * GNU General Public License ("GPL") as published by the Free Software * Foundation, either version 2 of that License or (at your option) any * later version. * * This software is provided by Freescale Semiconductor "as is" and any * express or implied warranties, including, but not limited to, the implied * warranties of merchantability and fitness for a particular purpose are * disclaimed. In no event shall Freescale Semiconductor be liable for any * direct, indirect, incidental, special, exemplary, or consequential damages * (including, but not limited to, procurement of substitute goods or services; * loss of use, data, or profits; or business interruption) however caused and * on any theory of liability, whether in contract, strict liability, or tort * (including negligence or otherwise) arising in any way out of the use of this * software, even if advised of the possibility of such damage. * * This file is used by the Freescale hypervisor management driver. It can * also be included by applications that need to communicate with the driver * via the ioctl interface. */ #ifndef FSL_HYPERVISOR_H #define FSL_HYPERVISOR_H #include <uapi/linux/fsl_hypervisor.h> /** * fsl_hv_event_register() - register a callback for failover events * @nb: pointer to caller-supplied notifier_block structure * * This function is called by device drivers to register their callback * functions for fail-over events. * * The caller should allocate a notifier_block object and initialize the * 'priority' and 'notifier_call' fields. */ int fsl_hv_failover_register(struct notifier_block *nb); /** * fsl_hv_event_unregister() - unregister a callback for failover events * @nb: the same 'nb' used in previous fsl_hv_failover_register call */ int fsl_hv_failover_unregister(struct notifier_block *nb); #endif dmar.h 0000644 00000016776 14722070374 0005667 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2006, Intel Corporation. * * Copyright (C) Ashok Raj <ashok.raj@intel.com> * Copyright (C) Shaohua Li <shaohua.li@intel.com> */ #ifndef __DMAR_H__ #define __DMAR_H__ #include <linux/acpi.h> #include <linux/types.h> #include <linux/msi.h> #include <linux/irqreturn.h> #include <linux/rwsem.h> #include <linux/rculist.h> struct acpi_dmar_header; #ifdef CONFIG_X86 # define DMAR_UNITS_SUPPORTED MAX_IO_APICS #else # define DMAR_UNITS_SUPPORTED 64 #endif /* DMAR Flags */ #define DMAR_INTR_REMAP 0x1 #define DMAR_X2APIC_OPT_OUT 0x2 #define DMAR_PLATFORM_OPT_IN 0x4 struct intel_iommu; struct dmar_dev_scope { struct device __rcu *dev; u8 bus; u8 devfn; }; #ifdef CONFIG_DMAR_TABLE extern struct acpi_table_header *dmar_tbl; struct dmar_drhd_unit { struct list_head list; /* list of drhd units */ struct acpi_dmar_header *hdr; /* ACPI header */ u64 reg_base_addr; /* register base address*/ struct dmar_dev_scope *devices;/* target device array */ int devices_cnt; /* target device count */ u16 segment; /* PCI domain */ u8 ignored:1; /* ignore drhd */ u8 include_all:1; struct intel_iommu *iommu; }; struct dmar_pci_path { u8 bus; u8 device; u8 function; }; struct dmar_pci_notify_info { struct pci_dev *dev; unsigned long event; int bus; u16 seg; u16 level; struct dmar_pci_path path[]; } __attribute__((packed)); extern struct rw_semaphore dmar_global_lock; extern struct list_head dmar_drhd_units; #define for_each_drhd_unit(drhd) \ list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \ dmar_rcu_check()) #define for_each_active_drhd_unit(drhd) \ list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \ dmar_rcu_check()) \ if (drhd->ignored) {} else #define for_each_active_iommu(i, drhd) \ list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \ dmar_rcu_check()) \ if (i=drhd->iommu, drhd->ignored) {} else #define for_each_iommu(i, drhd) \ list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \ dmar_rcu_check()) \ if (i=drhd->iommu, 0) {} else static inline bool dmar_rcu_check(void) { return rwsem_is_locked(&dmar_global_lock) || system_state == SYSTEM_BOOTING; } #define dmar_rcu_dereference(p) rcu_dereference_check((p), dmar_rcu_check()) #define for_each_dev_scope(devs, cnt, i, tmp) \ for ((i) = 0; ((tmp) = (i) < (cnt) ? \ dmar_rcu_dereference((devs)[(i)].dev) : NULL, (i) < (cnt)); \ (i)++) #define for_each_active_dev_scope(devs, cnt, i, tmp) \ for_each_dev_scope((devs), (cnt), (i), (tmp)) \ if (!(tmp)) { continue; } else extern int dmar_table_init(void); extern int dmar_dev_scope_init(void); extern void dmar_register_bus_notifier(void); extern int dmar_parse_dev_scope(void *start, void *end, int *cnt, struct dmar_dev_scope **devices, u16 segment); extern void *dmar_alloc_dev_scope(void *start, void *end, int *cnt); extern void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt); extern int dmar_insert_dev_scope(struct dmar_pci_notify_info *info, void *start, void*end, u16 segment, struct dmar_dev_scope *devices, int devices_cnt); extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment, struct dmar_dev_scope *devices, int count); /* Intel IOMMU detection */ extern int detect_intel_iommu(void); extern int enable_drhd_fault_handling(void); extern int dmar_device_add(acpi_handle handle); extern int dmar_device_remove(acpi_handle handle); static inline int dmar_res_noop(struct acpi_dmar_header *hdr, void *arg) { return 0; } #ifdef CONFIG_INTEL_IOMMU extern int iommu_detected, no_iommu; extern int intel_iommu_init(void); extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg); extern int dmar_parse_one_atsr(struct acpi_dmar_header *header, void *arg); extern int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg); extern int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg); extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert); extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info); #else /* !CONFIG_INTEL_IOMMU: */ static inline int intel_iommu_init(void) { return -ENODEV; } #define dmar_parse_one_rmrr dmar_res_noop #define dmar_parse_one_atsr dmar_res_noop #define dmar_check_one_atsr dmar_res_noop #define dmar_release_one_atsr dmar_res_noop static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) { return 0; } static inline int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert) { return 0; } #endif /* CONFIG_INTEL_IOMMU */ #ifdef CONFIG_IRQ_REMAP extern int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert); #else /* CONFIG_IRQ_REMAP */ static inline int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert) { return 0; } #endif /* CONFIG_IRQ_REMAP */ extern bool dmar_platform_optin(void); #else /* CONFIG_DMAR_TABLE */ static inline int dmar_device_add(void *handle) { return 0; } static inline int dmar_device_remove(void *handle) { return 0; } static inline bool dmar_platform_optin(void) { return false; } #endif /* CONFIG_DMAR_TABLE */ struct irte { union { /* Shared between remapped and posted mode*/ struct { __u64 present : 1, /* 0 */ fpd : 1, /* 1 */ __res0 : 6, /* 2 - 6 */ avail : 4, /* 8 - 11 */ __res1 : 3, /* 12 - 14 */ pst : 1, /* 15 */ vector : 8, /* 16 - 23 */ __res2 : 40; /* 24 - 63 */ }; /* Remapped mode */ struct { __u64 r_present : 1, /* 0 */ r_fpd : 1, /* 1 */ dst_mode : 1, /* 2 */ redir_hint : 1, /* 3 */ trigger_mode : 1, /* 4 */ dlvry_mode : 3, /* 5 - 7 */ r_avail : 4, /* 8 - 11 */ r_res0 : 4, /* 12 - 15 */ r_vector : 8, /* 16 - 23 */ r_res1 : 8, /* 24 - 31 */ dest_id : 32; /* 32 - 63 */ }; /* Posted mode */ struct { __u64 p_present : 1, /* 0 */ p_fpd : 1, /* 1 */ p_res0 : 6, /* 2 - 7 */ p_avail : 4, /* 8 - 11 */ p_res1 : 2, /* 12 - 13 */ p_urgent : 1, /* 14 */ p_pst : 1, /* 15 */ p_vector : 8, /* 16 - 23 */ p_res2 : 14, /* 24 - 37 */ pda_l : 26; /* 38 - 63 */ }; __u64 low; }; union { /* Shared between remapped and posted mode*/ struct { __u64 sid : 16, /* 64 - 79 */ sq : 2, /* 80 - 81 */ svt : 2, /* 82 - 83 */ __res3 : 44; /* 84 - 127 */ }; /* Posted mode*/ struct { __u64 p_sid : 16, /* 64 - 79 */ p_sq : 2, /* 80 - 81 */ p_svt : 2, /* 82 - 83 */ p_res3 : 12, /* 84 - 95 */ pda_h : 32; /* 96 - 127 */ }; __u64 high; }; }; static inline void dmar_copy_shared_irte(struct irte *dst, struct irte *src) { dst->present = src->present; dst->fpd = src->fpd; dst->avail = src->avail; dst->pst = src->pst; dst->vector = src->vector; dst->sid = src->sid; dst->sq = src->sq; dst->svt = src->svt; } #define PDA_LOW_BIT 26 #define PDA_HIGH_BIT 32 /* Can't use the common MSI interrupt functions * since DMAR is not a pci device */ struct irq_data; extern void dmar_msi_unmask(struct irq_data *data); extern void dmar_msi_mask(struct irq_data *data); extern void dmar_msi_read(int irq, struct msi_msg *msg); extern void dmar_msi_write(int irq, struct msi_msg *msg); extern int dmar_set_interrupt(struct intel_iommu *iommu); extern irqreturn_t dmar_fault(int irq, void *dev_id); extern int dmar_alloc_hwirq(int id, int node, void *arg); extern void dmar_free_hwirq(int irq); #endif /* __DMAR_H__ */ counter_enum.h 0000644 00000002667 14722070374 0007441 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Counter interface enum functions * Copyright (C) 2018 William Breathitt Gray */ #ifndef _COUNTER_ENUM_H_ #define _COUNTER_ENUM_H_ #include <linux/types.h> struct counter_device; struct counter_signal; struct counter_count; ssize_t counter_signal_enum_read(struct counter_device *counter, struct counter_signal *signal, void *priv, char *buf); ssize_t counter_signal_enum_write(struct counter_device *counter, struct counter_signal *signal, void *priv, const char *buf, size_t len); ssize_t counter_signal_enum_available_read(struct counter_device *counter, struct counter_signal *signal, void *priv, char *buf); ssize_t counter_count_enum_read(struct counter_device *counter, struct counter_count *count, void *priv, char *buf); ssize_t counter_count_enum_write(struct counter_device *counter, struct counter_count *count, void *priv, const char *buf, size_t len); ssize_t counter_count_enum_available_read(struct counter_device *counter, struct counter_count *count, void *priv, char *buf); ssize_t counter_device_enum_read(struct counter_device *counter, void *priv, char *buf); ssize_t counter_device_enum_write(struct counter_device *counter, void *priv, const char *buf, size_t len); ssize_t counter_device_enum_available_read(struct counter_device *counter, void *priv, char *buf); #endif /* _COUNTER_ENUM_H_ */ lapb.h 0000644 00000003330 14722070374 0005640 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * These are the public elements of the Linux LAPB module. */ #ifndef LAPB_KERNEL_H #define LAPB_KERNEL_H #define LAPB_OK 0 #define LAPB_BADTOKEN 1 #define LAPB_INVALUE 2 #define LAPB_CONNECTED 3 #define LAPB_NOTCONNECTED 4 #define LAPB_REFUSED 5 #define LAPB_TIMEDOUT 6 #define LAPB_NOMEM 7 #define LAPB_STANDARD 0x00 #define LAPB_EXTENDED 0x01 #define LAPB_SLP 0x00 #define LAPB_MLP 0x02 #define LAPB_DTE 0x00 #define LAPB_DCE 0x04 struct lapb_register_struct { void (*connect_confirmation)(struct net_device *dev, int reason); void (*connect_indication)(struct net_device *dev, int reason); void (*disconnect_confirmation)(struct net_device *dev, int reason); void (*disconnect_indication)(struct net_device *dev, int reason); int (*data_indication)(struct net_device *dev, struct sk_buff *skb); void (*data_transmit)(struct net_device *dev, struct sk_buff *skb); }; struct lapb_parms_struct { unsigned int t1; unsigned int t1timer; unsigned int t2; unsigned int t2timer; unsigned int n2; unsigned int n2count; unsigned int window; unsigned int state; unsigned int mode; }; extern int lapb_register(struct net_device *dev, const struct lapb_register_struct *callbacks); extern int lapb_unregister(struct net_device *dev); extern int lapb_getparms(struct net_device *dev, struct lapb_parms_struct *parms); extern int lapb_setparms(struct net_device *dev, struct lapb_parms_struct *parms); extern int lapb_connect_request(struct net_device *dev); extern int lapb_disconnect_request(struct net_device *dev); extern int lapb_data_request(struct net_device *dev, struct sk_buff *skb); extern int lapb_data_received(struct net_device *dev, struct sk_buff *skb); #endif smc911x.h 0000644 00000000446 14722070374 0006134 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __SMC911X_H__ #define __SMC911X_H__ #define SMC911X_USE_16BIT (1 << 0) #define SMC911X_USE_32BIT (1 << 1) struct smc911x_platdata { unsigned long flags; unsigned long irq_flags; /* IRQF_... */ int irq_polarity; }; #endif /* __SMC911X_H__ */ hmm.h 0000644 00000034327 14722070374 0005515 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright 2013 Red Hat Inc. * * Authors: Jérôme Glisse <jglisse@redhat.com> */ /* * Heterogeneous Memory Management (HMM) * * See Documentation/vm/hmm.rst for reasons and overview of what HMM is and it * is for. Here we focus on the HMM API description, with some explanation of * the underlying implementation. * * Short description: HMM provides a set of helpers to share a virtual address * space between CPU and a device, so that the device can access any valid * address of the process (while still obeying memory protection). HMM also * provides helpers to migrate process memory to device memory, and back. Each * set of functionality (address space mirroring, and migration to and from * device memory) can be used independently of the other. * * * HMM address space mirroring API: * * Use HMM address space mirroring if you want to mirror a range of the CPU * page tables of a process into a device page table. Here, "mirror" means "keep * synchronized". Prerequisites: the device must provide the ability to write- * protect its page tables (at PAGE_SIZE granularity), and must be able to * recover from the resulting potential page faults. * * HMM guarantees that at any point in time, a given virtual address points to * either the same memory in both CPU and device page tables (that is: CPU and * device page tables each point to the same pages), or that one page table (CPU * or device) points to no entry, while the other still points to the old page * for the address. The latter case happens when the CPU page table update * happens first, and then the update is mirrored over to the device page table. * This does not cause any issue, because the CPU page table cannot start * pointing to a new page until the device page table is invalidated. * * HMM uses mmu_notifiers to monitor the CPU page tables, and forwards any * updates to each device driver that has registered a mirror. It also provides * some API calls to help with taking a snapshot of the CPU page table, and to * synchronize with any updates that might happen concurrently. * * * HMM migration to and from device memory: * * HMM provides a set of helpers to hotplug device memory as ZONE_DEVICE, with * a new MEMORY_DEVICE_PRIVATE type. This provides a struct page for each page * of the device memory, and allows the device driver to manage its memory * using those struct pages. Having struct pages for device memory makes * migration easier. Because that memory is not addressable by the CPU it must * never be pinned to the device; in other words, any CPU page fault can always * cause the device memory to be migrated (copied/moved) back to regular memory. * * A new migrate helper (migrate_vma()) has been added (see mm/migrate.c) that * allows use of a device DMA engine to perform the copy operation between * regular system memory and device memory. */ #ifndef LINUX_HMM_H #define LINUX_HMM_H #include <linux/kconfig.h> #include <asm/pgtable.h> #ifdef CONFIG_HMM_MIRROR #include <linux/device.h> #include <linux/migrate.h> #include <linux/memremap.h> #include <linux/completion.h> #include <linux/mmu_notifier.h> /* * struct hmm - HMM per mm struct * * @mm: mm struct this HMM struct is bound to * @lock: lock protecting ranges list * @ranges: list of range being snapshotted * @mirrors: list of mirrors for this mm * @mmu_notifier: mmu notifier to track updates to CPU page table * @mirrors_sem: read/write semaphore protecting the mirrors list * @wq: wait queue for user waiting on a range invalidation * @notifiers: count of active mmu notifiers */ struct hmm { struct mmu_notifier mmu_notifier; spinlock_t ranges_lock; struct list_head ranges; struct list_head mirrors; struct rw_semaphore mirrors_sem; wait_queue_head_t wq; long notifiers; }; /* * hmm_pfn_flag_e - HMM flag enums * * Flags: * HMM_PFN_VALID: pfn is valid. It has, at least, read permission. * HMM_PFN_WRITE: CPU page table has write permission set * HMM_PFN_DEVICE_PRIVATE: private device memory (ZONE_DEVICE) * * The driver provides a flags array for mapping page protections to device * PTE bits. If the driver valid bit for an entry is bit 3, * i.e., (entry & (1 << 3)), then the driver must provide * an array in hmm_range.flags with hmm_range.flags[HMM_PFN_VALID] == 1 << 3. * Same logic apply to all flags. This is the same idea as vm_page_prot in vma * except that this is per device driver rather than per architecture. */ enum hmm_pfn_flag_e { HMM_PFN_VALID = 0, HMM_PFN_WRITE, HMM_PFN_DEVICE_PRIVATE, HMM_PFN_FLAG_MAX }; /* * hmm_pfn_value_e - HMM pfn special value * * Flags: * HMM_PFN_ERROR: corresponding CPU page table entry points to poisoned memory * HMM_PFN_NONE: corresponding CPU page table entry is pte_none() * HMM_PFN_SPECIAL: corresponding CPU page table entry is special; i.e., the * result of vmf_insert_pfn() or vm_insert_page(). Therefore, it should not * be mirrored by a device, because the entry will never have HMM_PFN_VALID * set and the pfn value is undefined. * * Driver provides values for none entry, error entry, and special entry. * Driver can alias (i.e., use same value) error and special, but * it should not alias none with error or special. * * HMM pfn value returned by hmm_vma_get_pfns() or hmm_vma_fault() will be: * hmm_range.values[HMM_PFN_ERROR] if CPU page table entry is poisonous, * hmm_range.values[HMM_PFN_NONE] if there is no CPU page table entry, * hmm_range.values[HMM_PFN_SPECIAL] if CPU page table entry is a special one */ enum hmm_pfn_value_e { HMM_PFN_ERROR, HMM_PFN_NONE, HMM_PFN_SPECIAL, HMM_PFN_VALUE_MAX }; /* * struct hmm_range - track invalidation lock on virtual address range * * @hmm: the core HMM structure this range is active against * @vma: the vm area struct for the range * @list: all range lock are on a list * @start: range virtual start address (inclusive) * @end: range virtual end address (exclusive) * @pfns: array of pfns (big enough for the range) * @flags: pfn flags to match device driver page table * @values: pfn value for some special case (none, special, error, ...) * @default_flags: default flags for the range (write, read, ... see hmm doc) * @pfn_flags_mask: allows to mask pfn flags so that only default_flags matter * @pfn_shifts: pfn shift value (should be <= PAGE_SHIFT) * @valid: pfns array did not change since it has been fill by an HMM function */ struct hmm_range { struct hmm *hmm; struct list_head list; unsigned long start; unsigned long end; uint64_t *pfns; const uint64_t *flags; const uint64_t *values; uint64_t default_flags; uint64_t pfn_flags_mask; uint8_t pfn_shift; bool valid; }; /* * hmm_range_wait_until_valid() - wait for range to be valid * @range: range affected by invalidation to wait on * @timeout: time out for wait in ms (ie abort wait after that period of time) * Return: true if the range is valid, false otherwise. */ static inline bool hmm_range_wait_until_valid(struct hmm_range *range, unsigned long timeout) { return wait_event_timeout(range->hmm->wq, range->valid, msecs_to_jiffies(timeout)) != 0; } /* * hmm_range_valid() - test if a range is valid or not * @range: range * Return: true if the range is valid, false otherwise. */ static inline bool hmm_range_valid(struct hmm_range *range) { return range->valid; } /* * hmm_device_entry_to_page() - return struct page pointed to by a device entry * @range: range use to decode device entry value * @entry: device entry value to get corresponding struct page from * Return: struct page pointer if entry is a valid, NULL otherwise * * If the device entry is valid (ie valid flag set) then return the struct page * matching the entry value. Otherwise return NULL. */ static inline struct page *hmm_device_entry_to_page(const struct hmm_range *range, uint64_t entry) { if (entry == range->values[HMM_PFN_NONE]) return NULL; if (entry == range->values[HMM_PFN_ERROR]) return NULL; if (entry == range->values[HMM_PFN_SPECIAL]) return NULL; if (!(entry & range->flags[HMM_PFN_VALID])) return NULL; return pfn_to_page(entry >> range->pfn_shift); } /* * hmm_device_entry_to_pfn() - return pfn value store in a device entry * @range: range use to decode device entry value * @entry: device entry to extract pfn from * Return: pfn value if device entry is valid, -1UL otherwise */ static inline unsigned long hmm_device_entry_to_pfn(const struct hmm_range *range, uint64_t pfn) { if (pfn == range->values[HMM_PFN_NONE]) return -1UL; if (pfn == range->values[HMM_PFN_ERROR]) return -1UL; if (pfn == range->values[HMM_PFN_SPECIAL]) return -1UL; if (!(pfn & range->flags[HMM_PFN_VALID])) return -1UL; return (pfn >> range->pfn_shift); } /* * hmm_device_entry_from_page() - create a valid device entry for a page * @range: range use to encode HMM pfn value * @page: page for which to create the device entry * Return: valid device entry for the page */ static inline uint64_t hmm_device_entry_from_page(const struct hmm_range *range, struct page *page) { return (page_to_pfn(page) << range->pfn_shift) | range->flags[HMM_PFN_VALID]; } /* * hmm_device_entry_from_pfn() - create a valid device entry value from pfn * @range: range use to encode HMM pfn value * @pfn: pfn value for which to create the device entry * Return: valid device entry for the pfn */ static inline uint64_t hmm_device_entry_from_pfn(const struct hmm_range *range, unsigned long pfn) { return (pfn << range->pfn_shift) | range->flags[HMM_PFN_VALID]; } /* * Mirroring: how to synchronize device page table with CPU page table. * * A device driver that is participating in HMM mirroring must always * synchronize with CPU page table updates. For this, device drivers can either * directly use mmu_notifier APIs or they can use the hmm_mirror API. Device * drivers can decide to register one mirror per device per process, or just * one mirror per process for a group of devices. The pattern is: * * int device_bind_address_space(..., struct mm_struct *mm, ...) * { * struct device_address_space *das; * * // Device driver specific initialization, and allocation of das * // which contains an hmm_mirror struct as one of its fields. * ... * * ret = hmm_mirror_register(&das->mirror, mm, &device_mirror_ops); * if (ret) { * // Cleanup on error * return ret; * } * * // Other device driver specific initialization * ... * } * * Once an hmm_mirror is registered for an address space, the device driver * will get callbacks through sync_cpu_device_pagetables() operation (see * hmm_mirror_ops struct). * * Device driver must not free the struct containing the hmm_mirror struct * before calling hmm_mirror_unregister(). The expected usage is to do that when * the device driver is unbinding from an address space. * * * void device_unbind_address_space(struct device_address_space *das) * { * // Device driver specific cleanup * ... * * hmm_mirror_unregister(&das->mirror); * * // Other device driver specific cleanup, and now das can be freed * ... * } */ struct hmm_mirror; /* * struct hmm_mirror_ops - HMM mirror device operations callback * * @update: callback to update range on a device */ struct hmm_mirror_ops { /* release() - release hmm_mirror * * @mirror: pointer to struct hmm_mirror * * This is called when the mm_struct is being released. The callback * must ensure that all access to any pages obtained from this mirror * is halted before the callback returns. All future access should * fault. */ void (*release)(struct hmm_mirror *mirror); /* sync_cpu_device_pagetables() - synchronize page tables * * @mirror: pointer to struct hmm_mirror * @update: update information (see struct mmu_notifier_range) * Return: -EAGAIN if mmu_notifier_range_blockable(update) is false * and callback needs to block, 0 otherwise. * * This callback ultimately originates from mmu_notifiers when the CPU * page table is updated. The device driver must update its page table * in response to this callback. The update argument tells what action * to perform. * * The device driver must not return from this callback until the device * page tables are completely updated (TLBs flushed, etc); this is a * synchronous call. */ int (*sync_cpu_device_pagetables)( struct hmm_mirror *mirror, const struct mmu_notifier_range *update); }; /* * struct hmm_mirror - mirror struct for a device driver * * @hmm: pointer to struct hmm (which is unique per mm_struct) * @ops: device driver callback for HMM mirror operations * @list: for list of mirrors of a given mm * * Each address space (mm_struct) being mirrored by a device must register one * instance of an hmm_mirror struct with HMM. HMM will track the list of all * mirrors for each mm_struct. */ struct hmm_mirror { struct hmm *hmm; const struct hmm_mirror_ops *ops; struct list_head list; }; int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm); void hmm_mirror_unregister(struct hmm_mirror *mirror); /* * Please see Documentation/vm/hmm.rst for how to use the range API. */ int hmm_range_register(struct hmm_range *range, struct hmm_mirror *mirror); void hmm_range_unregister(struct hmm_range *range); /* * Retry fault if non-blocking, drop mmap_sem and return -EAGAIN in that case. */ #define HMM_FAULT_ALLOW_RETRY (1 << 0) /* Don't fault in missing PTEs, just snapshot the current state. */ #define HMM_FAULT_SNAPSHOT (1 << 1) long hmm_range_fault(struct hmm_range *range, unsigned int flags); long hmm_range_dma_map(struct hmm_range *range, struct device *device, dma_addr_t *daddrs, unsigned int flags); long hmm_range_dma_unmap(struct hmm_range *range, struct device *device, dma_addr_t *daddrs, bool dirty); /* * HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range * * When waiting for mmu notifiers we need some kind of time out otherwise we * could potentialy wait for ever, 1000ms ie 1s sounds like a long time to * wait already. */ #define HMM_RANGE_DEFAULT_TIMEOUT 1000 #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */ #endif /* LINUX_HMM_H */ cn_proc.h 0000644 00000003542 14722070374 0006352 0 ustar 00 /* * cn_proc.h - process events connector * * Copyright (C) Matt Helsley, IBM Corp. 2005 * Based on cn_fork.h by Nguyen Anh Quynh and Guillaume Thouvenin * Copyright (C) 2005 Nguyen Anh Quynh <aquynh@gmail.com> * Copyright (C) 2005 Guillaume Thouvenin <guillaume.thouvenin@bull.net> * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2.1 of the GNU Lesser General Public License * as published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. */ #ifndef CN_PROC_H #define CN_PROC_H #include <uapi/linux/cn_proc.h> #ifdef CONFIG_PROC_EVENTS void proc_fork_connector(struct task_struct *task); void proc_exec_connector(struct task_struct *task); void proc_id_connector(struct task_struct *task, int which_id); void proc_sid_connector(struct task_struct *task); void proc_ptrace_connector(struct task_struct *task, int which_id); void proc_comm_connector(struct task_struct *task); void proc_coredump_connector(struct task_struct *task); void proc_exit_connector(struct task_struct *task); #else static inline void proc_fork_connector(struct task_struct *task) {} static inline void proc_exec_connector(struct task_struct *task) {} static inline void proc_id_connector(struct task_struct *task, int which_id) {} static inline void proc_sid_connector(struct task_struct *task) {} static inline void proc_comm_connector(struct task_struct *task) {} static inline void proc_ptrace_connector(struct task_struct *task, int ptrace_id) {} static inline void proc_coredump_connector(struct task_struct *task) {} static inline void proc_exit_connector(struct task_struct *task) {} #endif /* CONFIG_PROC_EVENTS */ #endif /* CN_PROC_H */ fs_struct.h 0000644 00000002041 14722070374 0006734 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_FS_STRUCT_H #define _LINUX_FS_STRUCT_H #include <linux/path.h> #include <linux/spinlock.h> #include <linux/seqlock.h> struct fs_struct { int users; spinlock_t lock; seqcount_t seq; int umask; int in_exec; struct path root, pwd; } __randomize_layout; extern struct kmem_cache *fs_cachep; extern void exit_fs(struct task_struct *); extern void set_fs_root(struct fs_struct *, const struct path *); extern void set_fs_pwd(struct fs_struct *, const struct path *); extern struct fs_struct *copy_fs_struct(struct fs_struct *); extern void free_fs_struct(struct fs_struct *); extern int unshare_fs_struct(void); static inline void get_fs_root(struct fs_struct *fs, struct path *root) { spin_lock(&fs->lock); *root = fs->root; path_get(root); spin_unlock(&fs->lock); } static inline void get_fs_pwd(struct fs_struct *fs, struct path *pwd) { spin_lock(&fs->lock); *pwd = fs->pwd; path_get(pwd); spin_unlock(&fs->lock); } extern bool current_chrooted(void); #endif /* _LINUX_FS_STRUCT_H */ ntb_transport.h 0000644 00000007464 14722070374 0007635 0 ustar 00 /* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 Intel Corporation. All rights reserved. * Copyright (C) 2015 EMC Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * BSD LICENSE * * Copyright(c) 2012 Intel Corporation. All rights reserved. * Copyright (C) 2015 EMC Corporation. All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copy * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * PCIe NTB Transport Linux driver * * Contact Information: * Jon Mason <jon.mason@intel.com> */ struct ntb_transport_qp; struct ntb_transport_client { struct device_driver driver; int (*probe)(struct device *client_dev); void (*remove)(struct device *client_dev); }; int ntb_transport_register_client(struct ntb_transport_client *drvr); void ntb_transport_unregister_client(struct ntb_transport_client *drvr); int ntb_transport_register_client_dev(char *device_name); void ntb_transport_unregister_client_dev(char *device_name); struct ntb_queue_handlers { void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, void *data, int len); void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data, void *data, int len); void (*event_handler)(void *data, int status); }; unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp); unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp); struct ntb_transport_qp * ntb_transport_create_queue(void *data, struct device *client_dev, const struct ntb_queue_handlers *handlers); void ntb_transport_free_queue(struct ntb_transport_qp *qp); int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, unsigned int len); int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, unsigned int len); void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len); void ntb_transport_link_up(struct ntb_transport_qp *qp); void ntb_transport_link_down(struct ntb_transport_qp *qp); bool ntb_transport_link_query(struct ntb_transport_qp *qp); unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp); journal-head.h 0000644 00000005566 14722070374 0007310 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * include/linux/journal-head.h * * buffer_head fields for JBD * * 27 May 2001 Andrew Morton * Created - pulled out of fs.h */ #ifndef JOURNAL_HEAD_H_INCLUDED #define JOURNAL_HEAD_H_INCLUDED typedef unsigned int tid_t; /* Unique transaction ID */ typedef struct transaction_s transaction_t; /* Compound transaction type */ struct buffer_head; struct journal_head { /* * Points back to our buffer_head. [jbd_lock_bh_journal_head()] */ struct buffer_head *b_bh; /* * Reference count - see description in journal.c * [jbd_lock_bh_journal_head()] */ int b_jcount; /* * Journalling list for this buffer [jbd_lock_bh_state()] * NOTE: We *cannot* combine this with b_modified into a bitfield * as gcc would then (which the C standard allows but which is * very unuseful) make 64-bit accesses to the bitfield and clobber * b_jcount if its update races with bitfield modification. */ unsigned b_jlist; /* * This flag signals the buffer has been modified by * the currently running transaction * [jbd_lock_bh_state()] */ unsigned b_modified; /* * Copy of the buffer data frozen for writing to the log. * [jbd_lock_bh_state()] */ char *b_frozen_data; /* * Pointer to a saved copy of the buffer containing no uncommitted * deallocation references, so that allocations can avoid overwriting * uncommitted deletes. [jbd_lock_bh_state()] */ char *b_committed_data; /* * Pointer to the compound transaction which owns this buffer's * metadata: either the running transaction or the committing * transaction (if there is one). Only applies to buffers on a * transaction's data or metadata journaling list. * [j_list_lock] [jbd_lock_bh_state()] * Either of these locks is enough for reading, both are needed for * changes. */ transaction_t *b_transaction; /* * Pointer to the running compound transaction which is currently * modifying the buffer's metadata, if there was already a transaction * committing it when the new transaction touched it. * [t_list_lock] [jbd_lock_bh_state()] */ transaction_t *b_next_transaction; /* * Doubly-linked list of buffers on a transaction's data, metadata or * forget queue. [t_list_lock] [jbd_lock_bh_state()] */ struct journal_head *b_tnext, *b_tprev; /* * Pointer to the compound transaction against which this buffer * is checkpointed. Only dirty buffers can be checkpointed. * [j_list_lock] */ transaction_t *b_cp_transaction; /* * Doubly-linked list of buffers still remaining to be flushed * before an old transaction can be checkpointed. * [j_list_lock] */ struct journal_head *b_cpnext, *b_cpprev; /* Trigger type */ struct jbd2_buffer_trigger_type *b_triggers; /* Trigger type for the committing transaction's frozen data */ struct jbd2_buffer_trigger_type *b_frozen_triggers; }; #endif /* JOURNAL_HEAD_H_INCLUDED */ sbitmap.h 0000644 00000041022 14722070374 0006361 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Fast and scalable bitmaps. * * Copyright (C) 2016 Facebook * Copyright (C) 2013-2014 Jens Axboe */ #ifndef __LINUX_SCALE_BITMAP_H #define __LINUX_SCALE_BITMAP_H #include <linux/kernel.h> #include <linux/slab.h> struct seq_file; /** * struct sbitmap_word - Word in a &struct sbitmap. */ struct sbitmap_word { /** * @depth: Number of bits being used in @word/@cleared */ unsigned long depth; /** * @word: word holding free bits */ unsigned long word ____cacheline_aligned_in_smp; /** * @cleared: word holding cleared bits */ unsigned long cleared ____cacheline_aligned_in_smp; /** * @swap_lock: Held while swapping word <-> cleared */ spinlock_t swap_lock; } ____cacheline_aligned_in_smp; /** * struct sbitmap - Scalable bitmap. * * A &struct sbitmap is spread over multiple cachelines to avoid ping-pong. This * trades off higher memory usage for better scalability. */ struct sbitmap { /** * @depth: Number of bits used in the whole bitmap. */ unsigned int depth; /** * @shift: log2(number of bits used per word) */ unsigned int shift; /** * @map_nr: Number of words (cachelines) being used for the bitmap. */ unsigned int map_nr; /** * @map: Allocated bitmap. */ struct sbitmap_word *map; }; #define SBQ_WAIT_QUEUES 8 #define SBQ_WAKE_BATCH 8 /** * struct sbq_wait_state - Wait queue in a &struct sbitmap_queue. */ struct sbq_wait_state { /** * @wait_cnt: Number of frees remaining before we wake up. */ atomic_t wait_cnt; /** * @wait: Wait queue. */ wait_queue_head_t wait; } ____cacheline_aligned_in_smp; /** * struct sbitmap_queue - Scalable bitmap with the added ability to wait on free * bits. * * A &struct sbitmap_queue uses multiple wait queues and rolling wakeups to * avoid contention on the wait queue spinlock. This ensures that we don't hit a * scalability wall when we run out of free bits and have to start putting tasks * to sleep. */ struct sbitmap_queue { /** * @sb: Scalable bitmap. */ struct sbitmap sb; /* * @alloc_hint: Cache of last successfully allocated or freed bit. * * This is per-cpu, which allows multiple users to stick to different * cachelines until the map is exhausted. */ unsigned int __percpu *alloc_hint; /** * @wake_batch: Number of bits which must be freed before we wake up any * waiters. */ unsigned int wake_batch; /** * @wake_index: Next wait queue in @ws to wake up. */ atomic_t wake_index; /** * @ws: Wait queues. */ struct sbq_wait_state *ws; /* * @ws_active: count of currently active ws waitqueues */ atomic_t ws_active; /** * @round_robin: Allocate bits in strict round-robin order. */ bool round_robin; /** * @min_shallow_depth: The minimum shallow depth which may be passed to * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow(). */ unsigned int min_shallow_depth; }; /** * sbitmap_init_node() - Initialize a &struct sbitmap on a specific memory node. * @sb: Bitmap to initialize. * @depth: Number of bits to allocate. * @shift: Use 2^@shift bits per word in the bitmap; if a negative number if * given, a good default is chosen. * @flags: Allocation flags. * @node: Memory node to allocate on. * * Return: Zero on success or negative errno on failure. */ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, gfp_t flags, int node); /** * sbitmap_free() - Free memory used by a &struct sbitmap. * @sb: Bitmap to free. */ static inline void sbitmap_free(struct sbitmap *sb) { kfree(sb->map); sb->map = NULL; } /** * sbitmap_resize() - Resize a &struct sbitmap. * @sb: Bitmap to resize. * @depth: New number of bits to resize to. * * Doesn't reallocate anything. It's up to the caller to ensure that the new * depth doesn't exceed the depth that the sb was initialized with. */ void sbitmap_resize(struct sbitmap *sb, unsigned int depth); /** * sbitmap_get() - Try to allocate a free bit from a &struct sbitmap. * @sb: Bitmap to allocate from. * @alloc_hint: Hint for where to start searching for a free bit. * @round_robin: If true, be stricter about allocation order; always allocate * starting from the last allocated bit. This is less efficient * than the default behavior (false). * * This operation provides acquire barrier semantics if it succeeds. * * Return: Non-negative allocated bit number if successful, -1 otherwise. */ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin); /** * sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap, * limiting the depth used from each word. * @sb: Bitmap to allocate from. * @alloc_hint: Hint for where to start searching for a free bit. * @shallow_depth: The maximum number of bits to allocate from a single word. * * This rather specific operation allows for having multiple users with * different allocation limits. E.g., there can be a high-priority class that * uses sbitmap_get() and a low-priority class that uses sbitmap_get_shallow() * with a @shallow_depth of (1 << (@sb->shift - 1)). Then, the low-priority * class can only allocate half of the total bits in the bitmap, preventing it * from starving out the high-priority class. * * Return: Non-negative allocated bit number if successful, -1 otherwise. */ int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint, unsigned long shallow_depth); /** * sbitmap_any_bit_set() - Check for a set bit in a &struct sbitmap. * @sb: Bitmap to check. * * Return: true if any bit in the bitmap is set, false otherwise. */ bool sbitmap_any_bit_set(const struct sbitmap *sb); /** * sbitmap_any_bit_clear() - Check for an unset bit in a &struct * sbitmap. * @sb: Bitmap to check. * * Return: true if any bit in the bitmap is clear, false otherwise. */ bool sbitmap_any_bit_clear(const struct sbitmap *sb); #define SB_NR_TO_INDEX(sb, bitnr) ((bitnr) >> (sb)->shift) #define SB_NR_TO_BIT(sb, bitnr) ((bitnr) & ((1U << (sb)->shift) - 1U)) typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *); /** * __sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap. * @start: Where to start the iteration. * @sb: Bitmap to iterate over. * @fn: Callback. Should return true to continue or false to break early. * @data: Pointer to pass to callback. * * This is inline even though it's non-trivial so that the function calls to the * callback will hopefully get optimized away. */ static inline void __sbitmap_for_each_set(struct sbitmap *sb, unsigned int start, sb_for_each_fn fn, void *data) { unsigned int index; unsigned int nr; unsigned int scanned = 0; if (start >= sb->depth) start = 0; index = SB_NR_TO_INDEX(sb, start); nr = SB_NR_TO_BIT(sb, start); while (scanned < sb->depth) { unsigned long word; unsigned int depth = min_t(unsigned int, sb->map[index].depth - nr, sb->depth - scanned); scanned += depth; word = sb->map[index].word & ~sb->map[index].cleared; if (!word) goto next; /* * On the first iteration of the outer loop, we need to add the * bit offset back to the size of the word for find_next_bit(). * On all other iterations, nr is zero, so this is a noop. */ depth += nr; while (1) { nr = find_next_bit(&word, depth, nr); if (nr >= depth) break; if (!fn(sb, (index << sb->shift) + nr, data)) return; nr++; } next: nr = 0; if (++index >= sb->map_nr) index = 0; } } /** * sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap. * @sb: Bitmap to iterate over. * @fn: Callback. Should return true to continue or false to break early. * @data: Pointer to pass to callback. */ static inline void sbitmap_for_each_set(struct sbitmap *sb, sb_for_each_fn fn, void *data) { __sbitmap_for_each_set(sb, 0, fn, data); } static inline unsigned long *__sbitmap_word(struct sbitmap *sb, unsigned int bitnr) { return &sb->map[SB_NR_TO_INDEX(sb, bitnr)].word; } /* Helpers equivalent to the operations in asm/bitops.h and linux/bitmap.h */ static inline void sbitmap_set_bit(struct sbitmap *sb, unsigned int bitnr) { set_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); } static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr) { clear_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); } /* * This one is special, since it doesn't actually clear the bit, rather it * sets the corresponding bit in the ->cleared mask instead. Paired with * the caller doing sbitmap_deferred_clear() if a given index is full, which * will clear the previously freed entries in the corresponding ->word. */ static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int bitnr) { unsigned long *addr = &sb->map[SB_NR_TO_INDEX(sb, bitnr)].cleared; set_bit(SB_NR_TO_BIT(sb, bitnr), addr); } static inline void sbitmap_clear_bit_unlock(struct sbitmap *sb, unsigned int bitnr) { clear_bit_unlock(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); } static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr) { return test_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); } /** * sbitmap_show() - Dump &struct sbitmap information to a &struct seq_file. * @sb: Bitmap to show. * @m: struct seq_file to write to. * * This is intended for debugging. The format may change at any time. */ void sbitmap_show(struct sbitmap *sb, struct seq_file *m); /** * sbitmap_bitmap_show() - Write a hex dump of a &struct sbitmap to a &struct * seq_file. * @sb: Bitmap to show. * @m: struct seq_file to write to. * * This is intended for debugging. The output isn't guaranteed to be internally * consistent. */ void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m); /** * sbitmap_queue_init_node() - Initialize a &struct sbitmap_queue on a specific * memory node. * @sbq: Bitmap queue to initialize. * @depth: See sbitmap_init_node(). * @shift: See sbitmap_init_node(). * @round_robin: See sbitmap_get(). * @flags: Allocation flags. * @node: Memory node to allocate on. * * Return: Zero on success or negative errno on failure. */ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, int shift, bool round_robin, gfp_t flags, int node); /** * sbitmap_queue_free() - Free memory used by a &struct sbitmap_queue. * * @sbq: Bitmap queue to free. */ static inline void sbitmap_queue_free(struct sbitmap_queue *sbq) { kfree(sbq->ws); free_percpu(sbq->alloc_hint); sbitmap_free(&sbq->sb); } /** * sbitmap_queue_resize() - Resize a &struct sbitmap_queue. * @sbq: Bitmap queue to resize. * @depth: New number of bits to resize to. * * Like sbitmap_resize(), this doesn't reallocate anything. It has to do * some extra work on the &struct sbitmap_queue, so it's not safe to just * resize the underlying &struct sbitmap. */ void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth); /** * __sbitmap_queue_get() - Try to allocate a free bit from a &struct * sbitmap_queue with preemption already disabled. * @sbq: Bitmap queue to allocate from. * * Return: Non-negative allocated bit number if successful, -1 otherwise. */ int __sbitmap_queue_get(struct sbitmap_queue *sbq); /** * __sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct * sbitmap_queue, limiting the depth used from each word, with preemption * already disabled. * @sbq: Bitmap queue to allocate from. * @shallow_depth: The maximum number of bits to allocate from a single word. * See sbitmap_get_shallow(). * * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after * initializing @sbq. * * Return: Non-negative allocated bit number if successful, -1 otherwise. */ int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, unsigned int shallow_depth); /** * sbitmap_queue_get() - Try to allocate a free bit from a &struct * sbitmap_queue. * @sbq: Bitmap queue to allocate from. * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to * sbitmap_queue_clear()). * * Return: Non-negative allocated bit number if successful, -1 otherwise. */ static inline int sbitmap_queue_get(struct sbitmap_queue *sbq, unsigned int *cpu) { int nr; *cpu = get_cpu(); nr = __sbitmap_queue_get(sbq); put_cpu(); return nr; } /** * sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct * sbitmap_queue, limiting the depth used from each word. * @sbq: Bitmap queue to allocate from. * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to * sbitmap_queue_clear()). * @shallow_depth: The maximum number of bits to allocate from a single word. * See sbitmap_get_shallow(). * * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after * initializing @sbq. * * Return: Non-negative allocated bit number if successful, -1 otherwise. */ static inline int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, unsigned int *cpu, unsigned int shallow_depth) { int nr; *cpu = get_cpu(); nr = __sbitmap_queue_get_shallow(sbq, shallow_depth); put_cpu(); return nr; } /** * sbitmap_queue_min_shallow_depth() - Inform a &struct sbitmap_queue of the * minimum shallow depth that will be used. * @sbq: Bitmap queue in question. * @min_shallow_depth: The minimum shallow depth that will be passed to * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow(). * * sbitmap_queue_clear() batches wakeups as an optimization. The batch size * depends on the depth of the bitmap. Since the shallow allocation functions * effectively operate with a different depth, the shallow depth must be taken * into account when calculating the batch size. This function must be called * with the minimum shallow depth that will be used. Failure to do so can result * in missed wakeups. */ void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq, unsigned int min_shallow_depth); /** * sbitmap_queue_clear() - Free an allocated bit and wake up waiters on a * &struct sbitmap_queue. * @sbq: Bitmap to free from. * @nr: Bit number to free. * @cpu: CPU the bit was allocated on. */ void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, unsigned int cpu); static inline int sbq_index_inc(int index) { return (index + 1) & (SBQ_WAIT_QUEUES - 1); } static inline void sbq_index_atomic_inc(atomic_t *index) { int old = atomic_read(index); int new = sbq_index_inc(old); atomic_cmpxchg(index, old, new); } /** * sbq_wait_ptr() - Get the next wait queue to use for a &struct * sbitmap_queue. * @sbq: Bitmap queue to wait on. * @wait_index: A counter per "user" of @sbq. */ static inline struct sbq_wait_state *sbq_wait_ptr(struct sbitmap_queue *sbq, atomic_t *wait_index) { struct sbq_wait_state *ws; ws = &sbq->ws[atomic_read(wait_index)]; sbq_index_atomic_inc(wait_index); return ws; } /** * sbitmap_queue_wake_all() - Wake up everything waiting on a &struct * sbitmap_queue. * @sbq: Bitmap queue to wake up. */ void sbitmap_queue_wake_all(struct sbitmap_queue *sbq); /** * sbitmap_queue_wake_up() - Wake up some of waiters in one waitqueue * on a &struct sbitmap_queue. * @sbq: Bitmap queue to wake up. */ void sbitmap_queue_wake_up(struct sbitmap_queue *sbq); /** * sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct * seq_file. * @sbq: Bitmap queue to show. * @m: struct seq_file to write to. * * This is intended for debugging. The format may change at any time. */ void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m); struct sbq_wait { struct sbitmap_queue *sbq; /* if set, sbq_wait is accounted */ struct wait_queue_entry wait; }; #define DEFINE_SBQ_WAIT(name) \ struct sbq_wait name = { \ .sbq = NULL, \ .wait = { \ .private = current, \ .func = autoremove_wake_function, \ .entry = LIST_HEAD_INIT((name).wait.entry), \ } \ } /* * Wrapper around prepare_to_wait_exclusive(), which maintains some extra * internal state. */ void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws, struct sbq_wait *sbq_wait, int state); /* * Must be paired with sbitmap_prepare_to_wait(). */ void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws, struct sbq_wait *sbq_wait); /* * Wrapper around add_wait_queue(), which maintains some extra internal state */ void sbitmap_add_wait_queue(struct sbitmap_queue *sbq, struct sbq_wait_state *ws, struct sbq_wait *sbq_wait); /* * Must be paired with sbitmap_add_wait_queue() */ void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait); #endif /* __LINUX_SCALE_BITMAP_H */ intel-iommu.h 0000644 00000055642 14722070374 0007176 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright © 2006-2015, Intel Corporation. * * Authors: Ashok Raj <ashok.raj@intel.com> * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> * David Woodhouse <David.Woodhouse@intel.com> */ #ifndef _INTEL_IOMMU_H_ #define _INTEL_IOMMU_H_ #include <linux/types.h> #include <linux/iova.h> #include <linux/io.h> #include <linux/idr.h> #include <linux/mmu_notifier.h> #include <linux/list.h> #include <linux/iommu.h> #include <linux/io-64-nonatomic-lo-hi.h> #include <linux/dmar.h> #include <asm/cacheflush.h> #include <asm/iommu.h> /* * VT-d hardware uses 4KiB page size regardless of host page size. */ #define VTD_PAGE_SHIFT (12) #define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT) #define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT) #define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK) #define VTD_STRIDE_SHIFT (9) #define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT) #define DMA_PTE_READ (1) #define DMA_PTE_WRITE (2) #define DMA_PTE_LARGE_PAGE (1 << 7) #define DMA_PTE_SNP (1 << 11) #define CONTEXT_TT_MULTI_LEVEL 0 #define CONTEXT_TT_DEV_IOTLB 1 #define CONTEXT_TT_PASS_THROUGH 2 #define CONTEXT_PASIDE BIT_ULL(3) /* * Intel IOMMU register specification per version 1.0 public spec. */ #define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */ #define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */ #define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */ #define DMAR_GCMD_REG 0x18 /* Global command register */ #define DMAR_GSTS_REG 0x1c /* Global status register */ #define DMAR_RTADDR_REG 0x20 /* Root entry table */ #define DMAR_CCMD_REG 0x28 /* Context command reg */ #define DMAR_FSTS_REG 0x34 /* Fault Status register */ #define DMAR_FECTL_REG 0x38 /* Fault control register */ #define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */ #define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */ #define DMAR_FEUADDR_REG 0x44 /* Upper address register */ #define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */ #define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */ #define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */ #define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */ #define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */ #define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */ #define DMAR_IQH_REG 0x80 /* Invalidation queue head register */ #define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */ #define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */ #define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */ #define DMAR_ICS_REG 0x9c /* Invalidation complete status register */ #define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */ #define DMAR_PQH_REG 0xc0 /* Page request queue head register */ #define DMAR_PQT_REG 0xc8 /* Page request queue tail register */ #define DMAR_PQA_REG 0xd0 /* Page request queue address register */ #define DMAR_PRS_REG 0xdc /* Page request status register */ #define DMAR_PECTL_REG 0xe0 /* Page request event control register */ #define DMAR_PEDATA_REG 0xe4 /* Page request event interrupt data register */ #define DMAR_PEADDR_REG 0xe8 /* Page request event interrupt addr register */ #define DMAR_PEUADDR_REG 0xec /* Page request event Upper address register */ #define DMAR_MTRRCAP_REG 0x100 /* MTRR capability register */ #define DMAR_MTRRDEF_REG 0x108 /* MTRR default type register */ #define DMAR_MTRR_FIX64K_00000_REG 0x120 /* MTRR Fixed range registers */ #define DMAR_MTRR_FIX16K_80000_REG 0x128 #define DMAR_MTRR_FIX16K_A0000_REG 0x130 #define DMAR_MTRR_FIX4K_C0000_REG 0x138 #define DMAR_MTRR_FIX4K_C8000_REG 0x140 #define DMAR_MTRR_FIX4K_D0000_REG 0x148 #define DMAR_MTRR_FIX4K_D8000_REG 0x150 #define DMAR_MTRR_FIX4K_E0000_REG 0x158 #define DMAR_MTRR_FIX4K_E8000_REG 0x160 #define DMAR_MTRR_FIX4K_F0000_REG 0x168 #define DMAR_MTRR_FIX4K_F8000_REG 0x170 #define DMAR_MTRR_PHYSBASE0_REG 0x180 /* MTRR Variable range registers */ #define DMAR_MTRR_PHYSMASK0_REG 0x188 #define DMAR_MTRR_PHYSBASE1_REG 0x190 #define DMAR_MTRR_PHYSMASK1_REG 0x198 #define DMAR_MTRR_PHYSBASE2_REG 0x1a0 #define DMAR_MTRR_PHYSMASK2_REG 0x1a8 #define DMAR_MTRR_PHYSBASE3_REG 0x1b0 #define DMAR_MTRR_PHYSMASK3_REG 0x1b8 #define DMAR_MTRR_PHYSBASE4_REG 0x1c0 #define DMAR_MTRR_PHYSMASK4_REG 0x1c8 #define DMAR_MTRR_PHYSBASE5_REG 0x1d0 #define DMAR_MTRR_PHYSMASK5_REG 0x1d8 #define DMAR_MTRR_PHYSBASE6_REG 0x1e0 #define DMAR_MTRR_PHYSMASK6_REG 0x1e8 #define DMAR_MTRR_PHYSBASE7_REG 0x1f0 #define DMAR_MTRR_PHYSMASK7_REG 0x1f8 #define DMAR_MTRR_PHYSBASE8_REG 0x200 #define DMAR_MTRR_PHYSMASK8_REG 0x208 #define DMAR_MTRR_PHYSBASE9_REG 0x210 #define DMAR_MTRR_PHYSMASK9_REG 0x218 #define DMAR_VCCAP_REG 0xe00 /* Virtual command capability register */ #define DMAR_VCMD_REG 0xe10 /* Virtual command register */ #define DMAR_VCRSP_REG 0xe20 /* Virtual command response register */ #define OFFSET_STRIDE (9) #define dmar_readq(a) readq(a) #define dmar_writeq(a,v) writeq(v,a) #define dmar_readl(a) readl(a) #define dmar_writel(a, v) writel(v, a) #define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4) #define DMAR_VER_MINOR(v) ((v) & 0x0f) /* * Decoding Capability Register */ #define cap_5lp_support(c) (((c) >> 60) & 1) #define cap_pi_support(c) (((c) >> 59) & 1) #define cap_fl1gp_support(c) (((c) >> 56) & 1) #define cap_read_drain(c) (((c) >> 55) & 1) #define cap_write_drain(c) (((c) >> 54) & 1) #define cap_max_amask_val(c) (((c) >> 48) & 0x3f) #define cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1) #define cap_pgsel_inv(c) (((c) >> 39) & 1) #define cap_super_page_val(c) (((c) >> 34) & 0xf) #define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \ * OFFSET_STRIDE) + 21) #define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16) #define cap_max_fault_reg_offset(c) \ (cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16) #define cap_zlr(c) (((c) >> 22) & 1) #define cap_isoch(c) (((c) >> 23) & 1) #define cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1) #define cap_sagaw(c) (((c) >> 8) & 0x1f) #define cap_caching_mode(c) (((c) >> 7) & 1) #define cap_phmr(c) (((c) >> 6) & 1) #define cap_plmr(c) (((c) >> 5) & 1) #define cap_rwbf(c) (((c) >> 4) & 1) #define cap_afl(c) (((c) >> 3) & 1) #define cap_ndoms(c) (((unsigned long)1) << (4 + 2 * ((c) & 0x7))) /* * Extended Capability Register */ #define ecap_smpwc(e) (((e) >> 48) & 0x1) #define ecap_flts(e) (((e) >> 47) & 0x1) #define ecap_slts(e) (((e) >> 46) & 0x1) #define ecap_smts(e) (((e) >> 43) & 0x1) #define ecap_dit(e) ((e >> 41) & 0x1) #define ecap_pasid(e) ((e >> 40) & 0x1) #define ecap_pss(e) ((e >> 35) & 0x1f) #define ecap_eafs(e) ((e >> 34) & 0x1) #define ecap_nwfs(e) ((e >> 33) & 0x1) #define ecap_srs(e) ((e >> 31) & 0x1) #define ecap_ers(e) ((e >> 30) & 0x1) #define ecap_prs(e) ((e >> 29) & 0x1) #define ecap_broken_pasid(e) ((e >> 28) & 0x1) #define ecap_dis(e) ((e >> 27) & 0x1) #define ecap_nest(e) ((e >> 26) & 0x1) #define ecap_mts(e) ((e >> 25) & 0x1) #define ecap_ecs(e) ((e >> 24) & 0x1) #define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16) #define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16) #define ecap_coherent(e) ((e) & 0x1) #define ecap_qis(e) ((e) & 0x2) #define ecap_pass_through(e) ((e >> 6) & 0x1) #define ecap_eim_support(e) ((e >> 4) & 0x1) #define ecap_ir_support(e) ((e >> 3) & 0x1) #define ecap_dev_iotlb_support(e) (((e) >> 2) & 0x1) #define ecap_max_handle_mask(e) ((e >> 20) & 0xf) #define ecap_sc_support(e) ((e >> 7) & 0x1) /* Snooping Control */ /* IOTLB_REG */ #define DMA_TLB_FLUSH_GRANU_OFFSET 60 #define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60) #define DMA_TLB_DSI_FLUSH (((u64)2) << 60) #define DMA_TLB_PSI_FLUSH (((u64)3) << 60) #define DMA_TLB_IIRG(type) ((type >> 60) & 3) #define DMA_TLB_IAIG(val) (((val) >> 57) & 3) #define DMA_TLB_READ_DRAIN (((u64)1) << 49) #define DMA_TLB_WRITE_DRAIN (((u64)1) << 48) #define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32) #define DMA_TLB_IVT (((u64)1) << 63) #define DMA_TLB_IH_NONLEAF (((u64)1) << 6) #define DMA_TLB_MAX_SIZE (0x3f) /* INVALID_DESC */ #define DMA_CCMD_INVL_GRANU_OFFSET 61 #define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 4) #define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 4) #define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 4) #define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7) #define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6) #define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16))) #define DMA_ID_TLB_IH_NONLEAF (((u64)1) << 6) #define DMA_ID_TLB_ADDR(addr) (addr) #define DMA_ID_TLB_ADDR_MASK(mask) (mask) /* PMEN_REG */ #define DMA_PMEN_EPM (((u32)1)<<31) #define DMA_PMEN_PRS (((u32)1)<<0) /* GCMD_REG */ #define DMA_GCMD_TE (((u32)1) << 31) #define DMA_GCMD_SRTP (((u32)1) << 30) #define DMA_GCMD_SFL (((u32)1) << 29) #define DMA_GCMD_EAFL (((u32)1) << 28) #define DMA_GCMD_WBF (((u32)1) << 27) #define DMA_GCMD_QIE (((u32)1) << 26) #define DMA_GCMD_SIRTP (((u32)1) << 24) #define DMA_GCMD_IRE (((u32) 1) << 25) #define DMA_GCMD_CFI (((u32) 1) << 23) /* GSTS_REG */ #define DMA_GSTS_TES (((u32)1) << 31) #define DMA_GSTS_RTPS (((u32)1) << 30) #define DMA_GSTS_FLS (((u32)1) << 29) #define DMA_GSTS_AFLS (((u32)1) << 28) #define DMA_GSTS_WBFS (((u32)1) << 27) #define DMA_GSTS_QIES (((u32)1) << 26) #define DMA_GSTS_IRTPS (((u32)1) << 24) #define DMA_GSTS_IRES (((u32)1) << 25) #define DMA_GSTS_CFIS (((u32)1) << 23) /* DMA_RTADDR_REG */ #define DMA_RTADDR_RTT (((u64)1) << 11) #define DMA_RTADDR_SMT (((u64)1) << 10) /* CCMD_REG */ #define DMA_CCMD_ICC (((u64)1) << 63) #define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61) #define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61) #define DMA_CCMD_DEVICE_INVL (((u64)3) << 61) #define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32) #define DMA_CCMD_MASK_NOBIT 0 #define DMA_CCMD_MASK_1BIT 1 #define DMA_CCMD_MASK_2BIT 2 #define DMA_CCMD_MASK_3BIT 3 #define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16) #define DMA_CCMD_DID(d) ((u64)((d) & 0xffff)) /* FECTL_REG */ #define DMA_FECTL_IM (((u32)1) << 31) /* FSTS_REG */ #define DMA_FSTS_PFO (1 << 0) /* Primary Fault Overflow */ #define DMA_FSTS_PPF (1 << 1) /* Primary Pending Fault */ #define DMA_FSTS_IQE (1 << 4) /* Invalidation Queue Error */ #define DMA_FSTS_ICE (1 << 5) /* Invalidation Completion Error */ #define DMA_FSTS_ITE (1 << 6) /* Invalidation Time-out Error */ #define DMA_FSTS_PRO (1 << 7) /* Page Request Overflow */ #define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff) /* FRCD_REG, 32 bits access */ #define DMA_FRCD_F (((u32)1) << 31) #define dma_frcd_type(d) ((d >> 30) & 1) #define dma_frcd_fault_reason(c) (c & 0xff) #define dma_frcd_source_id(c) (c & 0xffff) #define dma_frcd_pasid_value(c) (((c) >> 8) & 0xfffff) #define dma_frcd_pasid_present(c) (((c) >> 31) & 1) /* low 64 bit */ #define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT)) /* PRS_REG */ #define DMA_PRS_PPR ((u32)1) #define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ do { \ cycles_t start_time = get_cycles(); \ while (1) { \ sts = op(iommu->reg + offset); \ if (cond) \ break; \ if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\ panic("DMAR hardware is malfunctioning\n"); \ cpu_relax(); \ } \ } while (0) #define QI_LENGTH 256 /* queue length */ enum { QI_FREE, QI_IN_USE, QI_DONE, QI_ABORT }; #define QI_CC_TYPE 0x1 #define QI_IOTLB_TYPE 0x2 #define QI_DIOTLB_TYPE 0x3 #define QI_IEC_TYPE 0x4 #define QI_IWD_TYPE 0x5 #define QI_EIOTLB_TYPE 0x6 #define QI_PC_TYPE 0x7 #define QI_DEIOTLB_TYPE 0x8 #define QI_PGRP_RESP_TYPE 0x9 #define QI_PSTRM_RESP_TYPE 0xa #define QI_IEC_SELECTIVE (((u64)1) << 4) #define QI_IEC_IIDEX(idx) (((u64)(idx & 0xffff) << 32)) #define QI_IEC_IM(m) (((u64)(m & 0x1f) << 27)) #define QI_IWD_STATUS_DATA(d) (((u64)d) << 32) #define QI_IWD_STATUS_WRITE (((u64)1) << 5) #define QI_IOTLB_DID(did) (((u64)did) << 16) #define QI_IOTLB_DR(dr) (((u64)dr) << 7) #define QI_IOTLB_DW(dw) (((u64)dw) << 6) #define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4)) #define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK) #define QI_IOTLB_IH(ih) (((u64)ih) << 6) #define QI_IOTLB_AM(am) (((u8)am)) #define QI_CC_FM(fm) (((u64)fm) << 48) #define QI_CC_SID(sid) (((u64)sid) << 32) #define QI_CC_DID(did) (((u64)did) << 16) #define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4)) #define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32) #define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16) #define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) #define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \ ((u64)((pfsid >> 4) & 0xfff) << 52)) #define QI_DEV_IOTLB_SIZE 1 #define QI_DEV_IOTLB_MAX_INVS 32 #define QI_PC_PASID(pasid) (((u64)pasid) << 32) #define QI_PC_DID(did) (((u64)did) << 16) #define QI_PC_GRAN(gran) (((u64)gran) << 4) #define QI_PC_ALL_PASIDS (QI_PC_TYPE | QI_PC_GRAN(0)) #define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1)) #define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) #define QI_EIOTLB_IH(ih) (((u64)ih) << 6) #define QI_EIOTLB_AM(am) (((u64)am)) #define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32) #define QI_EIOTLB_DID(did) (((u64)did) << 16) #define QI_EIOTLB_GRAN(gran) (((u64)gran) << 4) #define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK) #define QI_DEV_EIOTLB_SIZE (((u64)1) << 11) #define QI_DEV_EIOTLB_GLOB(g) ((u64)(g) & 0x1) #define QI_DEV_EIOTLB_PASID(p) ((u64)((p) & 0xfffff) << 32) #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16) #define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4) #define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \ ((u64)((pfsid >> 4) & 0xfff) << 52)) #define QI_DEV_EIOTLB_MAX_INVS 32 /* Page group response descriptor QW0 */ #define QI_PGRP_PASID_P(p) (((u64)(p)) << 4) #define QI_PGRP_PDP(p) (((u64)(p)) << 5) #define QI_PGRP_RESP_CODE(res) (((u64)(res)) << 12) #define QI_PGRP_DID(rid) (((u64)(rid)) << 16) #define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32) /* Page group response descriptor QW1 */ #define QI_PGRP_LPIG(x) (((u64)(x)) << 2) #define QI_PGRP_IDX(idx) (((u64)(idx)) << 3) #define QI_RESP_SUCCESS 0x0 #define QI_RESP_INVALID 0x1 #define QI_RESP_FAILURE 0xf #define QI_GRAN_NONG_PASID 2 #define QI_GRAN_PSI_PASID 3 #define qi_shift(iommu) (DMAR_IQ_SHIFT + !!ecap_smts((iommu)->ecap)) struct qi_desc { u64 qw0; u64 qw1; u64 qw2; u64 qw3; }; struct q_inval { raw_spinlock_t q_lock; void *desc; /* invalidation queue */ int *desc_status; /* desc status */ int free_head; /* first free entry */ int free_tail; /* last free entry */ int free_cnt; }; #ifdef CONFIG_IRQ_REMAP /* 1MB - maximum possible interrupt remapping table size */ #define INTR_REMAP_PAGE_ORDER 8 #define INTR_REMAP_TABLE_REG_SIZE 0xf #define INTR_REMAP_TABLE_REG_SIZE_MASK 0xf #define INTR_REMAP_TABLE_ENTRIES 65536 struct irq_domain; struct ir_table { struct irte *base; unsigned long *bitmap; }; #endif struct iommu_flush { void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, u64 type); void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr, unsigned int size_order, u64 type); }; enum { SR_DMAR_FECTL_REG, SR_DMAR_FEDATA_REG, SR_DMAR_FEADDR_REG, SR_DMAR_FEUADDR_REG, MAX_SR_DMAR_REGS }; #define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0) #define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1) extern int intel_iommu_sm; #define sm_supported(iommu) (intel_iommu_sm && ecap_smts((iommu)->ecap)) #define pasid_supported(iommu) (sm_supported(iommu) && \ ecap_pasid((iommu)->ecap)) struct pasid_entry; struct pasid_state_entry; struct page_req_dsc; /* * 0: Present * 1-11: Reserved * 12-63: Context Ptr (12 - (haw-1)) * 64-127: Reserved */ struct root_entry { u64 lo; u64 hi; }; /* * low 64 bits: * 0: present * 1: fault processing disable * 2-3: translation type * 12-63: address space root * high 64 bits: * 0-2: address width * 3-6: aval * 8-23: domain id */ struct context_entry { u64 lo; u64 hi; }; struct dmar_domain { int nid; /* node id */ unsigned iommu_refcnt[DMAR_UNITS_SUPPORTED]; /* Refcount of devices per iommu */ u16 iommu_did[DMAR_UNITS_SUPPORTED]; /* Domain ids per IOMMU. Use u16 since * domain ids are 16 bit wide according * to VT-d spec, section 9.3 */ unsigned int auxd_refcnt; /* Refcount of auxiliary attaching */ bool has_iotlb_device; struct list_head devices; /* all devices' list */ struct list_head auxd; /* link to device's auxiliary list */ struct iova_domain iovad; /* iova's that belong to this domain */ struct dma_pte *pgd; /* virtual address */ int gaw; /* max guest address width */ /* adjusted guest address width, 0 is level 2 30-bit */ int agaw; int flags; /* flags to find out type of domain */ int iommu_coherency;/* indicate coherency of iommu access */ int iommu_snooping; /* indicate snooping control feature*/ int iommu_count; /* reference count of iommu */ int iommu_superpage;/* Level of superpages supported: 0 == 4KiB (no superpages), 1 == 2MiB, 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */ u64 max_addr; /* maximum mapped address */ int default_pasid; /* * The default pasid used for non-SVM * traffic on mediated devices. */ struct iommu_domain domain; /* generic domain data structure for iommu core */ }; struct intel_iommu { void __iomem *reg; /* Pointer to hardware regs, virtual addr */ u64 reg_phys; /* physical address of hw register set */ u64 reg_size; /* size of hw register set */ u64 cap; u64 ecap; u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */ raw_spinlock_t register_lock; /* protect register handling */ int seq_id; /* sequence id of the iommu */ int agaw; /* agaw of this iommu */ int msagaw; /* max sagaw of this iommu */ unsigned int irq, pr_irq; u16 segment; /* PCI segment# */ unsigned char name[13]; /* Device Name */ #ifdef CONFIG_INTEL_IOMMU unsigned long *domain_ids; /* bitmap of domains */ struct dmar_domain ***domains; /* ptr to domains */ spinlock_t lock; /* protect context, domain ids */ struct root_entry *root_entry; /* virtual address */ struct iommu_flush flush; #endif #ifdef CONFIG_INTEL_IOMMU_SVM struct page_req_dsc *prq; unsigned char prq_name[16]; /* Name for PRQ interrupt */ #endif struct q_inval *qi; /* Queued invalidation info */ u32 *iommu_state; /* Store iommu states between suspend and resume.*/ #ifdef CONFIG_IRQ_REMAP struct ir_table *ir_table; /* Interrupt remapping info */ struct irq_domain *ir_domain; struct irq_domain *ir_msi_domain; #endif struct iommu_device iommu; /* IOMMU core code handle */ int node; u32 flags; /* Software defined flags */ struct dmar_drhd_unit *drhd; }; /* PCI domain-device relationship */ struct device_domain_info { struct list_head link; /* link to domain siblings */ struct list_head global; /* link to global list */ struct list_head table; /* link to pasid table */ struct list_head auxiliary_domains; /* auxiliary domains * attached to this device */ u8 bus; /* PCI bus number */ u8 devfn; /* PCI devfn number */ u16 pfsid; /* SRIOV physical function source ID */ u8 pasid_supported:3; u8 pasid_enabled:1; u8 pri_supported:1; u8 pri_enabled:1; u8 ats_supported:1; u8 ats_enabled:1; u8 auxd_enabled:1; /* Multiple domains per device */ u8 ats_qdep; struct device *dev; /* it's NULL for PCIe-to-PCI bridge */ struct intel_iommu *iommu; /* IOMMU used by this device */ struct dmar_domain *domain; /* pointer to domain */ struct pasid_table *pasid_table; /* pasid table */ }; static inline void __iommu_flush_cache( struct intel_iommu *iommu, void *addr, int size) { if (!ecap_coherent(iommu->ecap)) clflush_cache_range(addr, size); } /* * 0: readable * 1: writable * 2-6: reserved * 7: super page * 8-10: available * 11: snoop behavior * 12-63: Host physcial address */ struct dma_pte { u64 val; }; static inline void dma_clear_pte(struct dma_pte *pte) { pte->val = 0; } static inline u64 dma_pte_addr(struct dma_pte *pte) { #ifdef CONFIG_64BIT return pte->val & VTD_PAGE_MASK; #else /* Must have a full atomic 64-bit read */ return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK; #endif } static inline bool dma_pte_present(struct dma_pte *pte) { return (pte->val & 3) != 0; } static inline bool dma_pte_superpage(struct dma_pte *pte) { return (pte->val & DMA_PTE_LARGE_PAGE); } static inline int first_pte_in_page(struct dma_pte *pte) { return !((unsigned long)pte & ~VTD_PAGE_MASK); } extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev); extern int dmar_find_matched_atsr_unit(struct pci_dev *dev); extern int dmar_enable_qi(struct intel_iommu *iommu); extern void dmar_disable_qi(struct intel_iommu *iommu); extern int dmar_reenable_qi(struct intel_iommu *iommu); extern void qi_global_iec(struct intel_iommu *iommu); extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, u64 type); extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, unsigned int size_order, u64 type); extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, u16 qdep, u64 addr, unsigned mask); extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); extern int dmar_ir_support(void); void *alloc_pgtable_page(int node); void free_pgtable_page(void *vaddr); struct intel_iommu *domain_get_iommu(struct dmar_domain *domain); int for_each_device_domain(int (*fn)(struct device_domain_info *info, void *data), void *data); void iommu_flush_write_buffer(struct intel_iommu *iommu); int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev); #ifdef CONFIG_INTEL_IOMMU_SVM int intel_svm_init(struct intel_iommu *iommu); extern int intel_svm_enable_prq(struct intel_iommu *iommu); extern int intel_svm_finish_prq(struct intel_iommu *iommu); struct svm_dev_ops; struct intel_svm_dev { struct list_head list; struct rcu_head rcu; struct device *dev; struct svm_dev_ops *ops; int users; u16 did; u16 dev_iotlb:1; u16 sid, qdep; }; struct intel_svm { struct mmu_notifier notifier; struct mm_struct *mm; struct intel_iommu *iommu; int flags; int pasid; struct list_head devs; struct list_head list; }; extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev); #endif #ifdef CONFIG_INTEL_IOMMU_DEBUGFS void intel_iommu_debugfs_init(void); #else static inline void intel_iommu_debugfs_init(void) {} #endif /* CONFIG_INTEL_IOMMU_DEBUGFS */ extern const struct attribute_group *intel_iommu_groups[]; bool context_present(struct context_entry *context); struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, u8 devfn, int alloc); #ifdef CONFIG_INTEL_IOMMU extern int iommu_calculate_agaw(struct intel_iommu *iommu); extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu); extern int dmar_disabled; extern int intel_iommu_enabled; extern int intel_iommu_gfx_mapped; #else static inline int iommu_calculate_agaw(struct intel_iommu *iommu) { return 0; } static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu) { return 0; } #define dmar_disabled (1) #define intel_iommu_enabled (0) #endif #endif list_lru.h 0000644 00000016446 14722070374 0006573 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved. * Authors: David Chinner and Glauber Costa * * Generic LRU infrastructure */ #ifndef _LRU_LIST_H #define _LRU_LIST_H #include <linux/list.h> #include <linux/nodemask.h> #include <linux/shrinker.h> struct mem_cgroup; /* list_lru_walk_cb has to always return one of those */ enum lru_status { LRU_REMOVED, /* item removed from list */ LRU_REMOVED_RETRY, /* item removed, but lock has been dropped and reacquired */ LRU_ROTATE, /* item referenced, give another pass */ LRU_SKIP, /* item cannot be locked, skip */ LRU_RETRY, /* item not freeable. May drop the lock internally, but has to return locked. */ }; struct list_lru_one { struct list_head list; /* may become negative during memcg reparenting */ long nr_items; }; struct list_lru_memcg { struct rcu_head rcu; /* array of per cgroup lists, indexed by memcg_cache_id */ struct list_lru_one *lru[0]; }; struct list_lru_node { /* protects all lists on the node, including per cgroup */ spinlock_t lock; /* global list, used for the root cgroup in cgroup aware lrus */ struct list_lru_one lru; #ifdef CONFIG_MEMCG_KMEM /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */ struct list_lru_memcg __rcu *memcg_lrus; #endif long nr_items; } ____cacheline_aligned_in_smp; struct list_lru { struct list_lru_node *node; #ifdef CONFIG_MEMCG_KMEM struct list_head list; int shrinker_id; bool memcg_aware; #endif }; void list_lru_destroy(struct list_lru *lru); int __list_lru_init(struct list_lru *lru, bool memcg_aware, struct lock_class_key *key, struct shrinker *shrinker); #define list_lru_init(lru) \ __list_lru_init((lru), false, NULL, NULL) #define list_lru_init_key(lru, key) \ __list_lru_init((lru), false, (key), NULL) #define list_lru_init_memcg(lru, shrinker) \ __list_lru_init((lru), true, NULL, shrinker) int memcg_update_all_list_lrus(int num_memcgs); void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg); /** * list_lru_add: add an element to the lru list's tail * @list_lru: the lru pointer * @item: the item to be added. * * If the element is already part of a list, this function returns doing * nothing. Therefore the caller does not need to keep state about whether or * not the element already belongs in the list and is allowed to lazy update * it. Note however that this is valid for *a* list, not *this* list. If * the caller organize itself in a way that elements can be in more than * one type of list, it is up to the caller to fully remove the item from * the previous list (with list_lru_del() for instance) before moving it * to @list_lru * * Return value: true if the list was updated, false otherwise */ bool list_lru_add(struct list_lru *lru, struct list_head *item); /** * list_lru_del: delete an element to the lru list * @list_lru: the lru pointer * @item: the item to be deleted. * * This function works analogously as list_lru_add in terms of list * manipulation. The comments about an element already pertaining to * a list are also valid for list_lru_del. * * Return value: true if the list was updated, false otherwise */ bool list_lru_del(struct list_lru *lru, struct list_head *item); /** * list_lru_count_one: return the number of objects currently held by @lru * @lru: the lru pointer. * @nid: the node id to count from. * @memcg: the cgroup to count from. * * Always return a non-negative number, 0 for empty lists. There is no * guarantee that the list is not updated while the count is being computed. * Callers that want such a guarantee need to provide an outer lock. */ unsigned long list_lru_count_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg); unsigned long list_lru_count_node(struct list_lru *lru, int nid); static inline unsigned long list_lru_shrink_count(struct list_lru *lru, struct shrink_control *sc) { return list_lru_count_one(lru, sc->nid, sc->memcg); } static inline unsigned long list_lru_count(struct list_lru *lru) { long count = 0; int nid; for_each_node_state(nid, N_NORMAL_MEMORY) count += list_lru_count_node(lru, nid); return count; } void list_lru_isolate(struct list_lru_one *list, struct list_head *item); void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item, struct list_head *head); typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item, struct list_lru_one *list, spinlock_t *lock, void *cb_arg); /** * list_lru_walk_one: walk a list_lru, isolating and disposing freeable items. * @lru: the lru pointer. * @nid: the node id to scan from. * @memcg: the cgroup to scan from. * @isolate: callback function that is resposible for deciding what to do with * the item currently being scanned * @cb_arg: opaque type that will be passed to @isolate * @nr_to_walk: how many items to scan. * * This function will scan all elements in a particular list_lru, calling the * @isolate callback for each of those items, along with the current list * spinlock and a caller-provided opaque. The @isolate callback can choose to * drop the lock internally, but *must* return with the lock held. The callback * will return an enum lru_status telling the list_lru infrastructure what to * do with the object being scanned. * * Please note that nr_to_walk does not mean how many objects will be freed, * just how many objects will be scanned. * * Return value: the number of objects effectively removed from the LRU. */ unsigned long list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg, list_lru_walk_cb isolate, void *cb_arg, unsigned long *nr_to_walk); /** * list_lru_walk_one_irq: walk a list_lru, isolating and disposing freeable items. * @lru: the lru pointer. * @nid: the node id to scan from. * @memcg: the cgroup to scan from. * @isolate: callback function that is resposible for deciding what to do with * the item currently being scanned * @cb_arg: opaque type that will be passed to @isolate * @nr_to_walk: how many items to scan. * * Same as @list_lru_walk_one except that the spinlock is acquired with * spin_lock_irq(). */ unsigned long list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg, list_lru_walk_cb isolate, void *cb_arg, unsigned long *nr_to_walk); unsigned long list_lru_walk_node(struct list_lru *lru, int nid, list_lru_walk_cb isolate, void *cb_arg, unsigned long *nr_to_walk); static inline unsigned long list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc, list_lru_walk_cb isolate, void *cb_arg) { return list_lru_walk_one(lru, sc->nid, sc->memcg, isolate, cb_arg, &sc->nr_to_scan); } static inline unsigned long list_lru_shrink_walk_irq(struct list_lru *lru, struct shrink_control *sc, list_lru_walk_cb isolate, void *cb_arg) { return list_lru_walk_one_irq(lru, sc->nid, sc->memcg, isolate, cb_arg, &sc->nr_to_scan); } static inline unsigned long list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate, void *cb_arg, unsigned long nr_to_walk) { long isolated = 0; int nid; for_each_node_state(nid, N_NORMAL_MEMORY) { isolated += list_lru_walk_node(lru, nid, isolate, cb_arg, &nr_to_walk); if (nr_to_walk <= 0) break; } return isolated; } #endif /* _LRU_LIST_H */ blk_types.h 0000644 00000032210 14722070374 0006715 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Block data types and constants. Directly include this file only to * break include dependency loop. */ #ifndef __LINUX_BLK_TYPES_H #define __LINUX_BLK_TYPES_H #include <linux/types.h> #include <linux/bvec.h> #include <linux/ktime.h> struct bio_set; struct bio; struct bio_integrity_payload; struct page; struct block_device; struct io_context; struct cgroup_subsys_state; typedef void (bio_end_io_t) (struct bio *); /* * Block error status values. See block/blk-core:blk_errors for the details. * Alpha cannot write a byte atomically, so we need to use 32-bit value. */ #if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__) typedef u32 __bitwise blk_status_t; #else typedef u8 __bitwise blk_status_t; #endif #define BLK_STS_OK 0 #define BLK_STS_NOTSUPP ((__force blk_status_t)1) #define BLK_STS_TIMEOUT ((__force blk_status_t)2) #define BLK_STS_NOSPC ((__force blk_status_t)3) #define BLK_STS_TRANSPORT ((__force blk_status_t)4) #define BLK_STS_TARGET ((__force blk_status_t)5) #define BLK_STS_NEXUS ((__force blk_status_t)6) #define BLK_STS_MEDIUM ((__force blk_status_t)7) #define BLK_STS_PROTECTION ((__force blk_status_t)8) #define BLK_STS_RESOURCE ((__force blk_status_t)9) #define BLK_STS_IOERR ((__force blk_status_t)10) /* hack for device mapper, don't use elsewhere: */ #define BLK_STS_DM_REQUEUE ((__force blk_status_t)11) #define BLK_STS_AGAIN ((__force blk_status_t)12) /* * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if * device related resources are unavailable, but the driver can guarantee * that the queue will be rerun in the future once resources become * available again. This is typically the case for device specific * resources that are consumed for IO. If the driver fails allocating these * resources, we know that inflight (or pending) IO will free these * resource upon completion. * * This is different from BLK_STS_RESOURCE in that it explicitly references * a device specific resource. For resources of wider scope, allocation * failure can happen without having pending IO. This means that we can't * rely on request completions freeing these resources, as IO may not be in * flight. Examples of that are kernel memory allocations, DMA mappings, or * any other system wide resources. */ #define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13) /** * blk_path_error - returns true if error may be path related * @error: status the request was completed with * * Description: * This classifies block error status into non-retryable errors and ones * that may be successful if retried on a failover path. * * Return: * %false - retrying failover path will not help * %true - may succeed if retried */ static inline bool blk_path_error(blk_status_t error) { switch (error) { case BLK_STS_NOTSUPP: case BLK_STS_NOSPC: case BLK_STS_TARGET: case BLK_STS_NEXUS: case BLK_STS_MEDIUM: case BLK_STS_PROTECTION: return false; } /* Anything else could be a path failure, so should be retried */ return true; } /* * From most significant bit: * 1 bit: reserved for other usage, see below * 12 bits: original size of bio * 51 bits: issue time of bio */ #define BIO_ISSUE_RES_BITS 1 #define BIO_ISSUE_SIZE_BITS 12 #define BIO_ISSUE_RES_SHIFT (64 - BIO_ISSUE_RES_BITS) #define BIO_ISSUE_SIZE_SHIFT (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS) #define BIO_ISSUE_TIME_MASK ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1) #define BIO_ISSUE_SIZE_MASK \ (((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT) #define BIO_ISSUE_RES_MASK (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1)) /* Reserved bit for blk-throtl */ #define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63) struct bio_issue { u64 value; }; static inline u64 __bio_issue_time(u64 time) { return time & BIO_ISSUE_TIME_MASK; } static inline u64 bio_issue_time(struct bio_issue *issue) { return __bio_issue_time(issue->value); } static inline sector_t bio_issue_size(struct bio_issue *issue) { return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT); } static inline void bio_issue_init(struct bio_issue *issue, sector_t size) { size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1; issue->value = ((issue->value & BIO_ISSUE_RES_MASK) | (ktime_get_ns() & BIO_ISSUE_TIME_MASK) | ((u64)size << BIO_ISSUE_SIZE_SHIFT)); } /* * main unit of I/O for the block layer and lower layers (ie drivers and * stacking drivers) */ struct bio { struct bio *bi_next; /* request queue link */ struct gendisk *bi_disk; unsigned int bi_opf; /* bottom bits req flags, * top bits REQ_OP. Use * accessors. */ unsigned short bi_flags; /* status, etc and bvec pool number */ unsigned short bi_ioprio; unsigned short bi_write_hint; blk_status_t bi_status; u8 bi_partno; struct bvec_iter bi_iter; atomic_t __bi_remaining; bio_end_io_t *bi_end_io; void *bi_private; #ifdef CONFIG_BLK_CGROUP /* * Represents the association of the css and request_queue for the bio. * If a bio goes direct to device, it will not have a blkg as it will * not have a request_queue associated with it. The reference is put * on release of the bio. */ struct blkcg_gq *bi_blkg; struct bio_issue bi_issue; #ifdef CONFIG_BLK_CGROUP_IOCOST u64 bi_iocost_cost; #endif #endif union { #if defined(CONFIG_BLK_DEV_INTEGRITY) struct bio_integrity_payload *bi_integrity; /* data integrity */ #endif }; unsigned short bi_vcnt; /* how many bio_vec's */ /* * Everything starting with bi_max_vecs will be preserved by bio_reset() */ unsigned short bi_max_vecs; /* max bvl_vecs we can hold */ atomic_t __bi_cnt; /* pin count */ struct bio_vec *bi_io_vec; /* the actual vec list */ struct bio_set *bi_pool; /* * We can inline a number of vecs at the end of the bio, to avoid * double allocations for a small number of bio_vecs. This member * MUST obviously be kept at the very end of the bio. */ struct bio_vec bi_inline_vecs[0]; }; #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) /* * bio flags */ enum { BIO_NO_PAGE_REF, /* don't put release vec pages */ BIO_CLONED, /* doesn't own data */ BIO_BOUNCED, /* bio is a bounce bio */ BIO_USER_MAPPED, /* contains user pages */ BIO_NULL_MAPPED, /* contains invalid user pages */ BIO_WORKINGSET, /* contains userspace workingset pages */ BIO_QUIET, /* Make BIO Quiet */ BIO_CHAIN, /* chained bio, ->bi_remaining in effect */ BIO_REFFED, /* bio has elevated ->bi_cnt */ BIO_THROTTLED, /* This bio has already been subjected to * throttling rules. Don't do it again. */ BIO_TRACE_COMPLETION, /* bio_endio() should trace the final completion * of this bio. */ BIO_QUEUE_ENTERED, /* can use blk_queue_enter_live() */ BIO_TRACKED, /* set if bio goes through the rq_qos path */ BIO_FLAG_LAST }; /* See BVEC_POOL_OFFSET below before adding new flags */ /* * We support 6 different bvec pools, the last one is magic in that it * is backed by a mempool. */ #define BVEC_POOL_NR 6 #define BVEC_POOL_MAX (BVEC_POOL_NR - 1) /* * Top 3 bits of bio flags indicate the pool the bvecs came from. We add * 1 to the actual index so that 0 indicates that there are no bvecs to be * freed. */ #define BVEC_POOL_BITS (3) #define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS) #define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET) #if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1) # error "BVEC_POOL_BITS is too small" #endif /* * Flags starting here get preserved by bio_reset() - this includes * only BVEC_POOL_IDX() */ #define BIO_RESET_BITS BVEC_POOL_OFFSET typedef __u32 __bitwise blk_mq_req_flags_t; /* * Operations and flags common to the bio and request structures. * We use 8 bits for encoding the operation, and the remaining 24 for flags. * * The least significant bit of the operation number indicates the data * transfer direction: * * - if the least significant bit is set transfers are TO the device * - if the least significant bit is not set transfers are FROM the device * * If a operation does not transfer data the least significant bit has no * meaning. */ #define REQ_OP_BITS 8 #define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1) #define REQ_FLAG_BITS 24 enum req_opf { /* read sectors from the device */ REQ_OP_READ = 0, /* write sectors to the device */ REQ_OP_WRITE = 1, /* flush the volatile write cache */ REQ_OP_FLUSH = 2, /* discard sectors */ REQ_OP_DISCARD = 3, /* securely erase sectors */ REQ_OP_SECURE_ERASE = 5, /* reset a zone write pointer */ REQ_OP_ZONE_RESET = 6, /* write the same sector many times */ REQ_OP_WRITE_SAME = 7, /* reset all the zone present on the device */ REQ_OP_ZONE_RESET_ALL = 8, /* write the zero filled sector many times */ REQ_OP_WRITE_ZEROES = 9, /* SCSI passthrough using struct scsi_request */ REQ_OP_SCSI_IN = 32, REQ_OP_SCSI_OUT = 33, /* Driver private requests */ REQ_OP_DRV_IN = 34, REQ_OP_DRV_OUT = 35, REQ_OP_LAST, }; enum req_flag_bits { __REQ_FAILFAST_DEV = /* no driver retries of device errors */ REQ_OP_BITS, __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ __REQ_SYNC, /* request is sync (sync write or read) */ __REQ_META, /* metadata io request */ __REQ_PRIO, /* boost priority in cfq */ __REQ_NOMERGE, /* don't touch this for merging */ __REQ_IDLE, /* anticipate more IO after this one */ __REQ_INTEGRITY, /* I/O includes block integrity payload */ __REQ_FUA, /* forced unit access */ __REQ_PREFLUSH, /* request for cache flush */ __REQ_RAHEAD, /* read ahead, can fail anytime */ __REQ_BACKGROUND, /* background IO */ __REQ_NOWAIT, /* Don't wait if request will block */ __REQ_NOWAIT_INLINE, /* Return would-block error inline */ /* * When a shared kthread needs to issue a bio for a cgroup, doing * so synchronously can lead to priority inversions as the kthread * can be trapped waiting for that cgroup. CGROUP_PUNT flag makes * submit_bio() punt the actual issuing to a dedicated per-blkcg * work item to avoid such priority inversions. */ __REQ_CGROUP_PUNT, /* command specific flags for REQ_OP_WRITE_ZEROES: */ __REQ_NOUNMAP, /* do not free blocks when zeroing */ __REQ_HIPRI, /* for driver use */ __REQ_DRV, __REQ_SWAP, /* swapping request. */ __REQ_NR_BITS, /* stops here */ }; #define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV) #define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT) #define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER) #define REQ_SYNC (1ULL << __REQ_SYNC) #define REQ_META (1ULL << __REQ_META) #define REQ_PRIO (1ULL << __REQ_PRIO) #define REQ_NOMERGE (1ULL << __REQ_NOMERGE) #define REQ_IDLE (1ULL << __REQ_IDLE) #define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY) #define REQ_FUA (1ULL << __REQ_FUA) #define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH) #define REQ_RAHEAD (1ULL << __REQ_RAHEAD) #define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND) #define REQ_NOWAIT (1ULL << __REQ_NOWAIT) #define REQ_NOWAIT_INLINE (1ULL << __REQ_NOWAIT_INLINE) #define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT) #define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP) #define REQ_HIPRI (1ULL << __REQ_HIPRI) #define REQ_DRV (1ULL << __REQ_DRV) #define REQ_SWAP (1ULL << __REQ_SWAP) #define REQ_FAILFAST_MASK \ (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) #define REQ_NOMERGE_FLAGS \ (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA) enum stat_group { STAT_READ, STAT_WRITE, STAT_DISCARD, NR_STAT_GROUPS }; #define bio_op(bio) \ ((bio)->bi_opf & REQ_OP_MASK) #define req_op(req) \ ((req)->cmd_flags & REQ_OP_MASK) /* obsolete, don't use in new code */ static inline void bio_set_op_attrs(struct bio *bio, unsigned op, unsigned op_flags) { bio->bi_opf = op | op_flags; } static inline bool op_is_write(unsigned int op) { return (op & 1); } /* * Check if the bio or request is one that needs special treatment in the * flush state machine. */ static inline bool op_is_flush(unsigned int op) { return op & (REQ_FUA | REQ_PREFLUSH); } /* * Reads are always treated as synchronous, as are requests with the FUA or * PREFLUSH flag. Other operations may be marked as synchronous using the * REQ_SYNC flag. */ static inline bool op_is_sync(unsigned int op) { return (op & REQ_OP_MASK) == REQ_OP_READ || (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH)); } static inline bool op_is_discard(unsigned int op) { return (op & REQ_OP_MASK) == REQ_OP_DISCARD; } static inline int op_stat_group(unsigned int op) { if (op_is_discard(op)) return STAT_DISCARD; return op_is_write(op); } typedef unsigned int blk_qc_t; #define BLK_QC_T_NONE -1U #define BLK_QC_T_EAGAIN -2U #define BLK_QC_T_SHIFT 16 #define BLK_QC_T_INTERNAL (1U << 31) static inline bool blk_qc_t_valid(blk_qc_t cookie) { return cookie != BLK_QC_T_NONE && cookie != BLK_QC_T_EAGAIN; } static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie) { return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT; } static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie) { return cookie & ((1u << BLK_QC_T_SHIFT) - 1); } static inline bool blk_qc_t_is_internal(blk_qc_t cookie) { return (cookie & BLK_QC_T_INTERNAL) != 0; } struct blk_rq_stat { u64 mean; u64 min; u64 max; u32 nr_samples; u64 batch; }; #endif /* __LINUX_BLK_TYPES_H */ dirent.h 0000644 00000000330 14722070374 0006204 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_DIRENT_H #define _LINUX_DIRENT_H struct linux_dirent64 { u64 d_ino; s64 d_off; unsigned short d_reclen; unsigned char d_type; char d_name[0]; }; #endif omap-iommu.h 0000644 00000001560 14722070374 0007005 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * omap iommu: simple virtual address space management * * Copyright (C) 2008-2009 Nokia Corporation * * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com> */ #ifndef _OMAP_IOMMU_H_ #define _OMAP_IOMMU_H_ struct iommu_domain; #ifdef CONFIG_OMAP_IOMMU extern void omap_iommu_save_ctx(struct device *dev); extern void omap_iommu_restore_ctx(struct device *dev); int omap_iommu_domain_deactivate(struct iommu_domain *domain); int omap_iommu_domain_activate(struct iommu_domain *domain); #else static inline void omap_iommu_save_ctx(struct device *dev) {} static inline void omap_iommu_restore_ctx(struct device *dev) {} static inline int omap_iommu_domain_deactivate(struct iommu_domain *domain) { return -ENODEV; } static inline int omap_iommu_domain_activate(struct iommu_domain *domain) { return -ENODEV; } #endif #endif sched_clock.h 0000644 00000001010 14722070374 0007154 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * sched_clock.h: support for extending counters to full 64-bit ns counter */ #ifndef LINUX_SCHED_CLOCK #define LINUX_SCHED_CLOCK #ifdef CONFIG_GENERIC_SCHED_CLOCK extern void generic_sched_clock_init(void); extern void sched_clock_register(u64 (*read)(void), int bits, unsigned long rate); #else static inline void generic_sched_clock_init(void) { } static inline void sched_clock_register(u64 (*read)(void), int bits, unsigned long rate) { } #endif #endif usb.h 0000644 00000232306 14722070374 0005522 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_USB_H #define __LINUX_USB_H #include <linux/mod_devicetable.h> #include <linux/usb/ch9.h> #define USB_MAJOR 180 #define USB_DEVICE_MAJOR 189 #ifdef __KERNEL__ #include <linux/errno.h> /* for -ENODEV */ #include <linux/delay.h> /* for mdelay() */ #include <linux/interrupt.h> /* for in_interrupt() */ #include <linux/list.h> /* for struct list_head */ #include <linux/kref.h> /* for struct kref */ #include <linux/device.h> /* for struct device */ #include <linux/fs.h> /* for struct file_operations */ #include <linux/completion.h> /* for struct completion */ #include <linux/sched.h> /* for current && schedule_timeout */ #include <linux/mutex.h> /* for struct mutex */ #include <linux/pm_runtime.h> /* for runtime PM */ struct usb_device; struct usb_driver; struct wusb_dev; /*-------------------------------------------------------------------------*/ /* * Host-side wrappers for standard USB descriptors ... these are parsed * from the data provided by devices. Parsing turns them from a flat * sequence of descriptors into a hierarchy: * * - devices have one (usually) or more configs; * - configs have one (often) or more interfaces; * - interfaces have one (usually) or more settings; * - each interface setting has zero or (usually) more endpoints. * - a SuperSpeed endpoint has a companion descriptor * * And there might be other descriptors mixed in with those. * * Devices may also have class-specific or vendor-specific descriptors. */ struct ep_device; /** * struct usb_host_endpoint - host-side endpoint descriptor and queue * @desc: descriptor for this endpoint, wMaxPacketSize in native byteorder * @ss_ep_comp: SuperSpeed companion descriptor for this endpoint * @ssp_isoc_ep_comp: SuperSpeedPlus isoc companion descriptor for this endpoint * @urb_list: urbs queued to this endpoint; maintained by usbcore * @hcpriv: for use by HCD; typically holds hardware dma queue head (QH) * with one or more transfer descriptors (TDs) per urb * @ep_dev: ep_device for sysfs info * @extra: descriptors following this endpoint in the configuration * @extralen: how many bytes of "extra" are valid * @enabled: URBs may be submitted to this endpoint * @streams: number of USB-3 streams allocated on the endpoint * * USB requests are always queued to a given endpoint, identified by a * descriptor within an active interface in a given USB configuration. */ struct usb_host_endpoint { struct usb_endpoint_descriptor desc; struct usb_ss_ep_comp_descriptor ss_ep_comp; struct usb_ssp_isoc_ep_comp_descriptor ssp_isoc_ep_comp; struct list_head urb_list; void *hcpriv; struct ep_device *ep_dev; /* For sysfs info */ unsigned char *extra; /* Extra descriptors */ int extralen; int enabled; int streams; }; /* host-side wrapper for one interface setting's parsed descriptors */ struct usb_host_interface { struct usb_interface_descriptor desc; int extralen; unsigned char *extra; /* Extra descriptors */ /* array of desc.bNumEndpoints endpoints associated with this * interface setting. these will be in no particular order. */ struct usb_host_endpoint *endpoint; char *string; /* iInterface string, if present */ }; enum usb_interface_condition { USB_INTERFACE_UNBOUND = 0, USB_INTERFACE_BINDING, USB_INTERFACE_BOUND, USB_INTERFACE_UNBINDING, }; int __must_check usb_find_common_endpoints(struct usb_host_interface *alt, struct usb_endpoint_descriptor **bulk_in, struct usb_endpoint_descriptor **bulk_out, struct usb_endpoint_descriptor **int_in, struct usb_endpoint_descriptor **int_out); int __must_check usb_find_common_endpoints_reverse(struct usb_host_interface *alt, struct usb_endpoint_descriptor **bulk_in, struct usb_endpoint_descriptor **bulk_out, struct usb_endpoint_descriptor **int_in, struct usb_endpoint_descriptor **int_out); static inline int __must_check usb_find_bulk_in_endpoint(struct usb_host_interface *alt, struct usb_endpoint_descriptor **bulk_in) { return usb_find_common_endpoints(alt, bulk_in, NULL, NULL, NULL); } static inline int __must_check usb_find_bulk_out_endpoint(struct usb_host_interface *alt, struct usb_endpoint_descriptor **bulk_out) { return usb_find_common_endpoints(alt, NULL, bulk_out, NULL, NULL); } static inline int __must_check usb_find_int_in_endpoint(struct usb_host_interface *alt, struct usb_endpoint_descriptor **int_in) { return usb_find_common_endpoints(alt, NULL, NULL, int_in, NULL); } static inline int __must_check usb_find_int_out_endpoint(struct usb_host_interface *alt, struct usb_endpoint_descriptor **int_out) { return usb_find_common_endpoints(alt, NULL, NULL, NULL, int_out); } static inline int __must_check usb_find_last_bulk_in_endpoint(struct usb_host_interface *alt, struct usb_endpoint_descriptor **bulk_in) { return usb_find_common_endpoints_reverse(alt, bulk_in, NULL, NULL, NULL); } static inline int __must_check usb_find_last_bulk_out_endpoint(struct usb_host_interface *alt, struct usb_endpoint_descriptor **bulk_out) { return usb_find_common_endpoints_reverse(alt, NULL, bulk_out, NULL, NULL); } static inline int __must_check usb_find_last_int_in_endpoint(struct usb_host_interface *alt, struct usb_endpoint_descriptor **int_in) { return usb_find_common_endpoints_reverse(alt, NULL, NULL, int_in, NULL); } static inline int __must_check usb_find_last_int_out_endpoint(struct usb_host_interface *alt, struct usb_endpoint_descriptor **int_out) { return usb_find_common_endpoints_reverse(alt, NULL, NULL, NULL, int_out); } /** * struct usb_interface - what usb device drivers talk to * @altsetting: array of interface structures, one for each alternate * setting that may be selected. Each one includes a set of * endpoint configurations. They will be in no particular order. * @cur_altsetting: the current altsetting. * @num_altsetting: number of altsettings defined. * @intf_assoc: interface association descriptor * @minor: the minor number assigned to this interface, if this * interface is bound to a driver that uses the USB major number. * If this interface does not use the USB major, this field should * be unused. The driver should set this value in the probe() * function of the driver, after it has been assigned a minor * number from the USB core by calling usb_register_dev(). * @condition: binding state of the interface: not bound, binding * (in probe()), bound to a driver, or unbinding (in disconnect()) * @sysfs_files_created: sysfs attributes exist * @ep_devs_created: endpoint child pseudo-devices exist * @unregistering: flag set when the interface is being unregistered * @needs_remote_wakeup: flag set when the driver requires remote-wakeup * capability during autosuspend. * @needs_altsetting0: flag set when a set-interface request for altsetting 0 * has been deferred. * @needs_binding: flag set when the driver should be re-probed or unbound * following a reset or suspend operation it doesn't support. * @authorized: This allows to (de)authorize individual interfaces instead * a whole device in contrast to the device authorization. * @dev: driver model's view of this device * @usb_dev: if an interface is bound to the USB major, this will point * to the sysfs representation for that device. * @reset_ws: Used for scheduling resets from atomic context. * @resetting_device: USB core reset the device, so use alt setting 0 as * current; needs bandwidth alloc after reset. * * USB device drivers attach to interfaces on a physical device. Each * interface encapsulates a single high level function, such as feeding * an audio stream to a speaker or reporting a change in a volume control. * Many USB devices only have one interface. The protocol used to talk to * an interface's endpoints can be defined in a usb "class" specification, * or by a product's vendor. The (default) control endpoint is part of * every interface, but is never listed among the interface's descriptors. * * The driver that is bound to the interface can use standard driver model * calls such as dev_get_drvdata() on the dev member of this structure. * * Each interface may have alternate settings. The initial configuration * of a device sets altsetting 0, but the device driver can change * that setting using usb_set_interface(). Alternate settings are often * used to control the use of periodic endpoints, such as by having * different endpoints use different amounts of reserved USB bandwidth. * All standards-conformant USB devices that use isochronous endpoints * will use them in non-default settings. * * The USB specification says that alternate setting numbers must run from * 0 to one less than the total number of alternate settings. But some * devices manage to mess this up, and the structures aren't necessarily * stored in numerical order anyhow. Use usb_altnum_to_altsetting() to * look up an alternate setting in the altsetting array based on its number. */ struct usb_interface { /* array of alternate settings for this interface, * stored in no particular order */ struct usb_host_interface *altsetting; struct usb_host_interface *cur_altsetting; /* the currently * active alternate setting */ unsigned num_altsetting; /* number of alternate settings */ /* If there is an interface association descriptor then it will list * the associated interfaces */ struct usb_interface_assoc_descriptor *intf_assoc; int minor; /* minor number this interface is * bound to */ enum usb_interface_condition condition; /* state of binding */ unsigned sysfs_files_created:1; /* the sysfs attributes exist */ unsigned ep_devs_created:1; /* endpoint "devices" exist */ unsigned unregistering:1; /* unregistration is in progress */ unsigned needs_remote_wakeup:1; /* driver requires remote wakeup */ unsigned needs_altsetting0:1; /* switch to altsetting 0 is pending */ unsigned needs_binding:1; /* needs delayed unbind/rebind */ unsigned resetting_device:1; /* true: bandwidth alloc after reset */ unsigned authorized:1; /* used for interface authorization */ struct device dev; /* interface specific device info */ struct device *usb_dev; struct work_struct reset_ws; /* for resets in atomic context */ }; #define to_usb_interface(d) container_of(d, struct usb_interface, dev) static inline void *usb_get_intfdata(struct usb_interface *intf) { return dev_get_drvdata(&intf->dev); } static inline void usb_set_intfdata(struct usb_interface *intf, void *data) { dev_set_drvdata(&intf->dev, data); } struct usb_interface *usb_get_intf(struct usb_interface *intf); void usb_put_intf(struct usb_interface *intf); /* Hard limit */ #define USB_MAXENDPOINTS 30 /* this maximum is arbitrary */ #define USB_MAXINTERFACES 32 #define USB_MAXIADS (USB_MAXINTERFACES/2) bool usb_check_bulk_endpoints( const struct usb_interface *intf, const u8 *ep_addrs); bool usb_check_int_endpoints( const struct usb_interface *intf, const u8 *ep_addrs); /* * USB Resume Timer: Every Host controller driver should drive the resume * signalling on the bus for the amount of time defined by this macro. * * That way we will have a 'stable' behavior among all HCDs supported by Linux. * * Note that the USB Specification states we should drive resume for *at least* * 20 ms, but it doesn't give an upper bound. This creates two possible * situations which we want to avoid: * * (a) sometimes an msleep(20) might expire slightly before 20 ms, which causes * us to fail USB Electrical Tests, thus failing Certification * * (b) Some (many) devices actually need more than 20 ms of resume signalling, * and while we can argue that's against the USB Specification, we don't have * control over which devices a certification laboratory will be using for * certification. If CertLab uses a device which was tested against Windows and * that happens to have relaxed resume signalling rules, we might fall into * situations where we fail interoperability and electrical tests. * * In order to avoid both conditions, we're using a 40 ms resume timeout, which * should cope with both LPJ calibration errors and devices not following every * detail of the USB Specification. */ #define USB_RESUME_TIMEOUT 40 /* ms */ /** * struct usb_interface_cache - long-term representation of a device interface * @num_altsetting: number of altsettings defined. * @ref: reference counter. * @altsetting: variable-length array of interface structures, one for * each alternate setting that may be selected. Each one includes a * set of endpoint configurations. They will be in no particular order. * * These structures persist for the lifetime of a usb_device, unlike * struct usb_interface (which persists only as long as its configuration * is installed). The altsetting arrays can be accessed through these * structures at any time, permitting comparison of configurations and * providing support for the /sys/kernel/debug/usb/devices pseudo-file. */ struct usb_interface_cache { unsigned num_altsetting; /* number of alternate settings */ struct kref ref; /* reference counter */ /* variable-length array of alternate settings for this interface, * stored in no particular order */ struct usb_host_interface altsetting[0]; }; #define ref_to_usb_interface_cache(r) \ container_of(r, struct usb_interface_cache, ref) #define altsetting_to_usb_interface_cache(a) \ container_of(a, struct usb_interface_cache, altsetting[0]) /** * struct usb_host_config - representation of a device's configuration * @desc: the device's configuration descriptor. * @string: pointer to the cached version of the iConfiguration string, if * present for this configuration. * @intf_assoc: list of any interface association descriptors in this config * @interface: array of pointers to usb_interface structures, one for each * interface in the configuration. The number of interfaces is stored * in desc.bNumInterfaces. These pointers are valid only while the * the configuration is active. * @intf_cache: array of pointers to usb_interface_cache structures, one * for each interface in the configuration. These structures exist * for the entire life of the device. * @extra: pointer to buffer containing all extra descriptors associated * with this configuration (those preceding the first interface * descriptor). * @extralen: length of the extra descriptors buffer. * * USB devices may have multiple configurations, but only one can be active * at any time. Each encapsulates a different operational environment; * for example, a dual-speed device would have separate configurations for * full-speed and high-speed operation. The number of configurations * available is stored in the device descriptor as bNumConfigurations. * * A configuration can contain multiple interfaces. Each corresponds to * a different function of the USB device, and all are available whenever * the configuration is active. The USB standard says that interfaces * are supposed to be numbered from 0 to desc.bNumInterfaces-1, but a lot * of devices get this wrong. In addition, the interface array is not * guaranteed to be sorted in numerical order. Use usb_ifnum_to_if() to * look up an interface entry based on its number. * * Device drivers should not attempt to activate configurations. The choice * of which configuration to install is a policy decision based on such * considerations as available power, functionality provided, and the user's * desires (expressed through userspace tools). However, drivers can call * usb_reset_configuration() to reinitialize the current configuration and * all its interfaces. */ struct usb_host_config { struct usb_config_descriptor desc; char *string; /* iConfiguration string, if present */ /* List of any Interface Association Descriptors in this * configuration. */ struct usb_interface_assoc_descriptor *intf_assoc[USB_MAXIADS]; /* the interfaces associated with this configuration, * stored in no particular order */ struct usb_interface *interface[USB_MAXINTERFACES]; /* Interface information available even when this is not the * active configuration */ struct usb_interface_cache *intf_cache[USB_MAXINTERFACES]; unsigned char *extra; /* Extra descriptors */ int extralen; }; /* USB2.0 and USB3.0 device BOS descriptor set */ struct usb_host_bos { struct usb_bos_descriptor *desc; /* wireless cap descriptor is handled by wusb */ struct usb_ext_cap_descriptor *ext_cap; struct usb_ss_cap_descriptor *ss_cap; struct usb_ssp_cap_descriptor *ssp_cap; struct usb_ss_container_id_descriptor *ss_id; struct usb_ptm_cap_descriptor *ptm_cap; }; int __usb_get_extra_descriptor(char *buffer, unsigned size, unsigned char type, void **ptr, size_t min); #define usb_get_extra_descriptor(ifpoint, type, ptr) \ __usb_get_extra_descriptor((ifpoint)->extra, \ (ifpoint)->extralen, \ type, (void **)ptr, sizeof(**(ptr))) /* ----------------------------------------------------------------------- */ /* USB device number allocation bitmap */ struct usb_devmap { unsigned long devicemap[128 / (8*sizeof(unsigned long))]; }; /* * Allocated per bus (tree of devices) we have: */ struct usb_bus { struct device *controller; /* host/master side hardware */ struct device *sysdev; /* as seen from firmware or bus */ int busnum; /* Bus number (in order of reg) */ const char *bus_name; /* stable id (PCI slot_name etc) */ u8 uses_pio_for_control; /* * Does the host controller use PIO * for control transfers? */ u8 otg_port; /* 0, or number of OTG/HNP port */ unsigned is_b_host:1; /* true during some HNP roleswitches */ unsigned b_hnp_enable:1; /* OTG: did A-Host enable HNP? */ unsigned no_stop_on_short:1; /* * Quirk: some controllers don't stop * the ep queue on a short transfer * with the URB_SHORT_NOT_OK flag set. */ unsigned no_sg_constraint:1; /* no sg constraint */ unsigned sg_tablesize; /* 0 or largest number of sg list entries */ int devnum_next; /* Next open device number in * round-robin allocation */ struct mutex devnum_next_mutex; /* devnum_next mutex */ struct usb_devmap devmap; /* device address allocation map */ struct usb_device *root_hub; /* Root hub */ struct usb_bus *hs_companion; /* Companion EHCI bus, if any */ int bandwidth_allocated; /* on this bus: how much of the time * reserved for periodic (intr/iso) * requests is used, on average? * Units: microseconds/frame. * Limits: Full/low speed reserve 90%, * while high speed reserves 80%. */ int bandwidth_int_reqs; /* number of Interrupt requests */ int bandwidth_isoc_reqs; /* number of Isoc. requests */ unsigned resuming_ports; /* bit array: resuming root-hub ports */ #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE) struct mon_bus *mon_bus; /* non-null when associated */ int monitored; /* non-zero when monitored */ #endif }; struct usb_dev_state; /* ----------------------------------------------------------------------- */ struct usb_tt; enum usb_device_removable { USB_DEVICE_REMOVABLE_UNKNOWN = 0, USB_DEVICE_REMOVABLE, USB_DEVICE_FIXED, }; enum usb_port_connect_type { USB_PORT_CONNECT_TYPE_UNKNOWN = 0, USB_PORT_CONNECT_TYPE_HOT_PLUG, USB_PORT_CONNECT_TYPE_HARD_WIRED, USB_PORT_NOT_USED, }; /* * USB port quirks. */ /* For the given port, prefer the old (faster) enumeration scheme. */ #define USB_PORT_QUIRK_OLD_SCHEME BIT(0) /* Decrease TRSTRCY to 10ms during device enumeration. */ #define USB_PORT_QUIRK_FAST_ENUM BIT(1) /* * USB 2.0 Link Power Management (LPM) parameters. */ struct usb2_lpm_parameters { /* Best effort service latency indicate how long the host will drive * resume on an exit from L1. */ unsigned int besl; /* Timeout value in microseconds for the L1 inactivity (LPM) timer. * When the timer counts to zero, the parent hub will initiate a LPM * transition to L1. */ int timeout; }; /* * USB 3.0 Link Power Management (LPM) parameters. * * PEL and SEL are USB 3.0 Link PM latencies for device-initiated LPM exit. * MEL is the USB 3.0 Link PM latency for host-initiated LPM exit. * All three are stored in nanoseconds. */ struct usb3_lpm_parameters { /* * Maximum exit latency (MEL) for the host to send a packet to the * device (either a Ping for isoc endpoints, or a data packet for * interrupt endpoints), the hubs to decode the packet, and for all hubs * in the path to transition the links to U0. */ unsigned int mel; /* * Maximum exit latency for a device-initiated LPM transition to bring * all links into U0. Abbreviated as "PEL" in section 9.4.12 of the USB * 3.0 spec, with no explanation of what "P" stands for. "Path"? */ unsigned int pel; /* * The System Exit Latency (SEL) includes PEL, and three other * latencies. After a device initiates a U0 transition, it will take * some time from when the device sends the ERDY to when it will finally * receive the data packet. Basically, SEL should be the worse-case * latency from when a device starts initiating a U0 transition to when * it will get data. */ unsigned int sel; /* * The idle timeout value that is currently programmed into the parent * hub for this device. When the timer counts to zero, the parent hub * will initiate an LPM transition to either U1 or U2. */ int timeout; }; /** * struct usb_device - kernel's representation of a USB device * @devnum: device number; address on a USB bus * @devpath: device ID string for use in messages (e.g., /port/...) * @route: tree topology hex string for use with xHCI * @state: device state: configured, not attached, etc. * @speed: device speed: high/full/low (or error) * @rx_lanes: number of rx lanes in use, USB 3.2 adds dual-lane support * @tx_lanes: number of tx lanes in use, USB 3.2 adds dual-lane support * @tt: Transaction Translator info; used with low/full speed dev, highspeed hub * @ttport: device port on that tt hub * @toggle: one bit for each endpoint, with ([0] = IN, [1] = OUT) endpoints * @parent: our hub, unless we're the root * @bus: bus we're part of * @ep0: endpoint 0 data (default control pipe) * @dev: generic device interface * @descriptor: USB device descriptor * @bos: USB device BOS descriptor set * @config: all of the device's configs * @actconfig: the active configuration * @ep_in: array of IN endpoints * @ep_out: array of OUT endpoints * @rawdescriptors: raw descriptors for each config * @bus_mA: Current available from the bus * @portnum: parent port number (origin 1) * @level: number of USB hub ancestors * @devaddr: device address, XHCI: assigned by HW, others: same as devnum * @can_submit: URBs may be submitted * @persist_enabled: USB_PERSIST enabled for this device * @reset_in_progress: the device is being reset * @have_langid: whether string_langid is valid * @authorized: policy has said we can use it; * (user space) policy determines if we authorize this device to be * used or not. By default, wired USB devices are authorized. * WUSB devices are not, until we authorize them from user space. * FIXME -- complete doc * @authenticated: Crypto authentication passed * @wusb: device is Wireless USB * @lpm_capable: device supports LPM * @usb2_hw_lpm_capable: device can perform USB2 hardware LPM * @usb2_hw_lpm_besl_capable: device can perform USB2 hardware BESL LPM * @usb2_hw_lpm_enabled: USB2 hardware LPM is enabled * @usb2_hw_lpm_allowed: Userspace allows USB 2.0 LPM to be enabled * @usb3_lpm_u1_enabled: USB3 hardware U1 LPM enabled * @usb3_lpm_u2_enabled: USB3 hardware U2 LPM enabled * @string_langid: language ID for strings * @product: iProduct string, if present (static) * @manufacturer: iManufacturer string, if present (static) * @serial: iSerialNumber string, if present (static) * @filelist: usbfs files that are open to this device * @maxchild: number of ports if hub * @quirks: quirks of the whole device * @urbnum: number of URBs submitted for the whole device * @active_duration: total time device is not suspended * @connect_time: time device was first connected * @do_remote_wakeup: remote wakeup should be enabled * @reset_resume: needs reset instead of resume * @port_is_suspended: the upstream port is suspended (L2 or U3) * @wusb_dev: if this is a Wireless USB device, link to the WUSB * specific data for the device. * @slot_id: Slot ID assigned by xHCI * @removable: Device can be physically removed from this port * @l1_params: best effor service latency for USB2 L1 LPM state, and L1 timeout. * @u1_params: exit latencies for USB3 U1 LPM state, and hub-initiated timeout. * @u2_params: exit latencies for USB3 U2 LPM state, and hub-initiated timeout. * @lpm_disable_count: Ref count used by usb_disable_lpm() and usb_enable_lpm() * to keep track of the number of functions that require USB 3.0 Link Power * Management to be disabled for this usb_device. This count should only * be manipulated by those functions, with the bandwidth_mutex is held. * @hub_delay: cached value consisting of: * parent->hub_delay + wHubDelay + tTPTransmissionDelay (40ns) * * Will be used as wValue for SetIsochDelay requests. * * Notes: * Usbcore drivers should not set usbdev->state directly. Instead use * usb_set_device_state(). */ struct usb_device { int devnum; char devpath[16]; u32 route; enum usb_device_state state; enum usb_device_speed speed; unsigned int rx_lanes; unsigned int tx_lanes; struct usb_tt *tt; int ttport; unsigned int toggle[2]; struct usb_device *parent; struct usb_bus *bus; struct usb_host_endpoint ep0; struct device dev; struct usb_device_descriptor descriptor; struct usb_host_bos *bos; struct usb_host_config *config; struct usb_host_config *actconfig; struct usb_host_endpoint *ep_in[16]; struct usb_host_endpoint *ep_out[16]; char **rawdescriptors; unsigned short bus_mA; u8 portnum; u8 level; u8 devaddr; unsigned can_submit:1; unsigned persist_enabled:1; unsigned reset_in_progress:1; unsigned have_langid:1; unsigned authorized:1; unsigned authenticated:1; unsigned wusb:1; unsigned lpm_capable:1; unsigned usb2_hw_lpm_capable:1; unsigned usb2_hw_lpm_besl_capable:1; unsigned usb2_hw_lpm_enabled:1; unsigned usb2_hw_lpm_allowed:1; unsigned usb3_lpm_u1_enabled:1; unsigned usb3_lpm_u2_enabled:1; int string_langid; /* static strings from the device */ char *product; char *manufacturer; char *serial; struct list_head filelist; int maxchild; u32 quirks; atomic_t urbnum; unsigned long active_duration; #ifdef CONFIG_PM unsigned long connect_time; unsigned do_remote_wakeup:1; unsigned reset_resume:1; unsigned port_is_suspended:1; #endif struct wusb_dev *wusb_dev; int slot_id; enum usb_device_removable removable; struct usb2_lpm_parameters l1_params; struct usb3_lpm_parameters u1_params; struct usb3_lpm_parameters u2_params; unsigned lpm_disable_count; u16 hub_delay; }; #define to_usb_device(d) container_of(d, struct usb_device, dev) static inline struct usb_device *interface_to_usbdev(struct usb_interface *intf) { return to_usb_device(intf->dev.parent); } extern struct usb_device *usb_get_dev(struct usb_device *dev); extern void usb_put_dev(struct usb_device *dev); extern struct usb_device *usb_hub_find_child(struct usb_device *hdev, int port1); /** * usb_hub_for_each_child - iterate over all child devices on the hub * @hdev: USB device belonging to the usb hub * @port1: portnum associated with child device * @child: child device pointer */ #define usb_hub_for_each_child(hdev, port1, child) \ for (port1 = 1, child = usb_hub_find_child(hdev, port1); \ port1 <= hdev->maxchild; \ child = usb_hub_find_child(hdev, ++port1)) \ if (!child) continue; else /* USB device locking */ #define usb_lock_device(udev) device_lock(&(udev)->dev) #define usb_unlock_device(udev) device_unlock(&(udev)->dev) #define usb_lock_device_interruptible(udev) device_lock_interruptible(&(udev)->dev) #define usb_trylock_device(udev) device_trylock(&(udev)->dev) extern int usb_lock_device_for_reset(struct usb_device *udev, const struct usb_interface *iface); /* USB port reset for device reinitialization */ extern int usb_reset_device(struct usb_device *dev); extern void usb_queue_reset_device(struct usb_interface *dev); #ifdef CONFIG_ACPI extern int usb_acpi_set_power_state(struct usb_device *hdev, int index, bool enable); extern bool usb_acpi_power_manageable(struct usb_device *hdev, int index); extern int usb_acpi_port_lpm_incapable(struct usb_device *hdev, int index); #else static inline int usb_acpi_set_power_state(struct usb_device *hdev, int index, bool enable) { return 0; } static inline bool usb_acpi_power_manageable(struct usb_device *hdev, int index) { return true; } static inline int usb_acpi_port_lpm_incapable(struct usb_device *hdev, int index) { return 0; } #endif /* USB autosuspend and autoresume */ #ifdef CONFIG_PM extern void usb_enable_autosuspend(struct usb_device *udev); extern void usb_disable_autosuspend(struct usb_device *udev); extern int usb_autopm_get_interface(struct usb_interface *intf); extern void usb_autopm_put_interface(struct usb_interface *intf); extern int usb_autopm_get_interface_async(struct usb_interface *intf); extern void usb_autopm_put_interface_async(struct usb_interface *intf); extern void usb_autopm_get_interface_no_resume(struct usb_interface *intf); extern void usb_autopm_put_interface_no_suspend(struct usb_interface *intf); static inline void usb_mark_last_busy(struct usb_device *udev) { pm_runtime_mark_last_busy(&udev->dev); } #else static inline int usb_enable_autosuspend(struct usb_device *udev) { return 0; } static inline int usb_disable_autosuspend(struct usb_device *udev) { return 0; } static inline int usb_autopm_get_interface(struct usb_interface *intf) { return 0; } static inline int usb_autopm_get_interface_async(struct usb_interface *intf) { return 0; } static inline void usb_autopm_put_interface(struct usb_interface *intf) { } static inline void usb_autopm_put_interface_async(struct usb_interface *intf) { } static inline void usb_autopm_get_interface_no_resume( struct usb_interface *intf) { } static inline void usb_autopm_put_interface_no_suspend( struct usb_interface *intf) { } static inline void usb_mark_last_busy(struct usb_device *udev) { } #endif extern int usb_disable_lpm(struct usb_device *udev); extern void usb_enable_lpm(struct usb_device *udev); /* Same as above, but these functions lock/unlock the bandwidth_mutex. */ extern int usb_unlocked_disable_lpm(struct usb_device *udev); extern void usb_unlocked_enable_lpm(struct usb_device *udev); extern int usb_disable_ltm(struct usb_device *udev); extern void usb_enable_ltm(struct usb_device *udev); static inline bool usb_device_supports_ltm(struct usb_device *udev) { if (udev->speed < USB_SPEED_SUPER || !udev->bos || !udev->bos->ss_cap) return false; return udev->bos->ss_cap->bmAttributes & USB_LTM_SUPPORT; } static inline bool usb_device_no_sg_constraint(struct usb_device *udev) { return udev && udev->bus && udev->bus->no_sg_constraint; } /*-------------------------------------------------------------------------*/ /* for drivers using iso endpoints */ extern int usb_get_current_frame_number(struct usb_device *usb_dev); /* Sets up a group of bulk endpoints to support multiple stream IDs. */ extern int usb_alloc_streams(struct usb_interface *interface, struct usb_host_endpoint **eps, unsigned int num_eps, unsigned int num_streams, gfp_t mem_flags); /* Reverts a group of bulk endpoints back to not using stream IDs. */ extern int usb_free_streams(struct usb_interface *interface, struct usb_host_endpoint **eps, unsigned int num_eps, gfp_t mem_flags); /* used these for multi-interface device registration */ extern int usb_driver_claim_interface(struct usb_driver *driver, struct usb_interface *iface, void *priv); /** * usb_interface_claimed - returns true iff an interface is claimed * @iface: the interface being checked * * Return: %true (nonzero) iff the interface is claimed, else %false * (zero). * * Note: * Callers must own the driver model's usb bus readlock. So driver * probe() entries don't need extra locking, but other call contexts * may need to explicitly claim that lock. * */ static inline int usb_interface_claimed(struct usb_interface *iface) { return (iface->dev.driver != NULL); } extern void usb_driver_release_interface(struct usb_driver *driver, struct usb_interface *iface); const struct usb_device_id *usb_match_id(struct usb_interface *interface, const struct usb_device_id *id); extern int usb_match_one_id(struct usb_interface *interface, const struct usb_device_id *id); extern int usb_for_each_dev(void *data, int (*fn)(struct usb_device *, void *)); extern struct usb_interface *usb_find_interface(struct usb_driver *drv, int minor); extern struct usb_interface *usb_ifnum_to_if(const struct usb_device *dev, unsigned ifnum); extern struct usb_host_interface *usb_altnum_to_altsetting( const struct usb_interface *intf, unsigned int altnum); extern struct usb_host_interface *usb_find_alt_setting( struct usb_host_config *config, unsigned int iface_num, unsigned int alt_num); /* port claiming functions */ int usb_hub_claim_port(struct usb_device *hdev, unsigned port1, struct usb_dev_state *owner); int usb_hub_release_port(struct usb_device *hdev, unsigned port1, struct usb_dev_state *owner); /** * usb_make_path - returns stable device path in the usb tree * @dev: the device whose path is being constructed * @buf: where to put the string * @size: how big is "buf"? * * Return: Length of the string (> 0) or negative if size was too small. * * Note: * This identifier is intended to be "stable", reflecting physical paths in * hardware such as physical bus addresses for host controllers or ports on * USB hubs. That makes it stay the same until systems are physically * reconfigured, by re-cabling a tree of USB devices or by moving USB host * controllers. Adding and removing devices, including virtual root hubs * in host controller driver modules, does not change these path identifiers; * neither does rebooting or re-enumerating. These are more useful identifiers * than changeable ("unstable") ones like bus numbers or device addresses. * * With a partial exception for devices connected to USB 2.0 root hubs, these * identifiers are also predictable. So long as the device tree isn't changed, * plugging any USB device into a given hub port always gives it the same path. * Because of the use of "companion" controllers, devices connected to ports on * USB 2.0 root hubs (EHCI host controllers) will get one path ID if they are * high speed, and a different one if they are full or low speed. */ static inline int usb_make_path(struct usb_device *dev, char *buf, size_t size) { int actual; actual = snprintf(buf, size, "usb-%s-%s", dev->bus->bus_name, dev->devpath); return (actual >= (int)size) ? -1 : actual; } /*-------------------------------------------------------------------------*/ #define USB_DEVICE_ID_MATCH_DEVICE \ (USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_PRODUCT) #define USB_DEVICE_ID_MATCH_DEV_RANGE \ (USB_DEVICE_ID_MATCH_DEV_LO | USB_DEVICE_ID_MATCH_DEV_HI) #define USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION \ (USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_DEV_RANGE) #define USB_DEVICE_ID_MATCH_DEV_INFO \ (USB_DEVICE_ID_MATCH_DEV_CLASS | \ USB_DEVICE_ID_MATCH_DEV_SUBCLASS | \ USB_DEVICE_ID_MATCH_DEV_PROTOCOL) #define USB_DEVICE_ID_MATCH_INT_INFO \ (USB_DEVICE_ID_MATCH_INT_CLASS | \ USB_DEVICE_ID_MATCH_INT_SUBCLASS | \ USB_DEVICE_ID_MATCH_INT_PROTOCOL) /** * USB_DEVICE - macro used to describe a specific usb device * @vend: the 16 bit USB Vendor ID * @prod: the 16 bit USB Product ID * * This macro is used to create a struct usb_device_id that matches a * specific device. */ #define USB_DEVICE(vend, prod) \ .match_flags = USB_DEVICE_ID_MATCH_DEVICE, \ .idVendor = (vend), \ .idProduct = (prod) /** * USB_DEVICE_VER - describe a specific usb device with a version range * @vend: the 16 bit USB Vendor ID * @prod: the 16 bit USB Product ID * @lo: the bcdDevice_lo value * @hi: the bcdDevice_hi value * * This macro is used to create a struct usb_device_id that matches a * specific device, with a version range. */ #define USB_DEVICE_VER(vend, prod, lo, hi) \ .match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION, \ .idVendor = (vend), \ .idProduct = (prod), \ .bcdDevice_lo = (lo), \ .bcdDevice_hi = (hi) /** * USB_DEVICE_INTERFACE_CLASS - describe a usb device with a specific interface class * @vend: the 16 bit USB Vendor ID * @prod: the 16 bit USB Product ID * @cl: bInterfaceClass value * * This macro is used to create a struct usb_device_id that matches a * specific interface class of devices. */ #define USB_DEVICE_INTERFACE_CLASS(vend, prod, cl) \ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \ USB_DEVICE_ID_MATCH_INT_CLASS, \ .idVendor = (vend), \ .idProduct = (prod), \ .bInterfaceClass = (cl) /** * USB_DEVICE_INTERFACE_PROTOCOL - describe a usb device with a specific interface protocol * @vend: the 16 bit USB Vendor ID * @prod: the 16 bit USB Product ID * @pr: bInterfaceProtocol value * * This macro is used to create a struct usb_device_id that matches a * specific interface protocol of devices. */ #define USB_DEVICE_INTERFACE_PROTOCOL(vend, prod, pr) \ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \ USB_DEVICE_ID_MATCH_INT_PROTOCOL, \ .idVendor = (vend), \ .idProduct = (prod), \ .bInterfaceProtocol = (pr) /** * USB_DEVICE_INTERFACE_NUMBER - describe a usb device with a specific interface number * @vend: the 16 bit USB Vendor ID * @prod: the 16 bit USB Product ID * @num: bInterfaceNumber value * * This macro is used to create a struct usb_device_id that matches a * specific interface number of devices. */ #define USB_DEVICE_INTERFACE_NUMBER(vend, prod, num) \ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \ USB_DEVICE_ID_MATCH_INT_NUMBER, \ .idVendor = (vend), \ .idProduct = (prod), \ .bInterfaceNumber = (num) /** * USB_DEVICE_INFO - macro used to describe a class of usb devices * @cl: bDeviceClass value * @sc: bDeviceSubClass value * @pr: bDeviceProtocol value * * This macro is used to create a struct usb_device_id that matches a * specific class of devices. */ #define USB_DEVICE_INFO(cl, sc, pr) \ .match_flags = USB_DEVICE_ID_MATCH_DEV_INFO, \ .bDeviceClass = (cl), \ .bDeviceSubClass = (sc), \ .bDeviceProtocol = (pr) /** * USB_INTERFACE_INFO - macro used to describe a class of usb interfaces * @cl: bInterfaceClass value * @sc: bInterfaceSubClass value * @pr: bInterfaceProtocol value * * This macro is used to create a struct usb_device_id that matches a * specific class of interfaces. */ #define USB_INTERFACE_INFO(cl, sc, pr) \ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO, \ .bInterfaceClass = (cl), \ .bInterfaceSubClass = (sc), \ .bInterfaceProtocol = (pr) /** * USB_DEVICE_AND_INTERFACE_INFO - describe a specific usb device with a class of usb interfaces * @vend: the 16 bit USB Vendor ID * @prod: the 16 bit USB Product ID * @cl: bInterfaceClass value * @sc: bInterfaceSubClass value * @pr: bInterfaceProtocol value * * This macro is used to create a struct usb_device_id that matches a * specific device with a specific class of interfaces. * * This is especially useful when explicitly matching devices that have * vendor specific bDeviceClass values, but standards-compliant interfaces. */ #define USB_DEVICE_AND_INTERFACE_INFO(vend, prod, cl, sc, pr) \ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO \ | USB_DEVICE_ID_MATCH_DEVICE, \ .idVendor = (vend), \ .idProduct = (prod), \ .bInterfaceClass = (cl), \ .bInterfaceSubClass = (sc), \ .bInterfaceProtocol = (pr) /** * USB_VENDOR_AND_INTERFACE_INFO - describe a specific usb vendor with a class of usb interfaces * @vend: the 16 bit USB Vendor ID * @cl: bInterfaceClass value * @sc: bInterfaceSubClass value * @pr: bInterfaceProtocol value * * This macro is used to create a struct usb_device_id that matches a * specific vendor with a specific class of interfaces. * * This is especially useful when explicitly matching devices that have * vendor specific bDeviceClass values, but standards-compliant interfaces. */ #define USB_VENDOR_AND_INTERFACE_INFO(vend, cl, sc, pr) \ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO \ | USB_DEVICE_ID_MATCH_VENDOR, \ .idVendor = (vend), \ .bInterfaceClass = (cl), \ .bInterfaceSubClass = (sc), \ .bInterfaceProtocol = (pr) /* ----------------------------------------------------------------------- */ /* Stuff for dynamic usb ids */ struct usb_dynids { spinlock_t lock; struct list_head list; }; struct usb_dynid { struct list_head node; struct usb_device_id id; }; extern ssize_t usb_store_new_id(struct usb_dynids *dynids, const struct usb_device_id *id_table, struct device_driver *driver, const char *buf, size_t count); extern ssize_t usb_show_dynids(struct usb_dynids *dynids, char *buf); /** * struct usbdrv_wrap - wrapper for driver-model structure * @driver: The driver-model core driver structure. * @for_devices: Non-zero for device drivers, 0 for interface drivers. */ struct usbdrv_wrap { struct device_driver driver; int for_devices; }; /** * struct usb_driver - identifies USB interface driver to usbcore * @name: The driver name should be unique among USB drivers, * and should normally be the same as the module name. * @probe: Called to see if the driver is willing to manage a particular * interface on a device. If it is, probe returns zero and uses * usb_set_intfdata() to associate driver-specific data with the * interface. It may also use usb_set_interface() to specify the * appropriate altsetting. If unwilling to manage the interface, * return -ENODEV, if genuine IO errors occurred, an appropriate * negative errno value. * @disconnect: Called when the interface is no longer accessible, usually * because its device has been (or is being) disconnected or the * driver module is being unloaded. * @unlocked_ioctl: Used for drivers that want to talk to userspace through * the "usbfs" filesystem. This lets devices provide ways to * expose information to user space regardless of where they * do (or don't) show up otherwise in the filesystem. * @suspend: Called when the device is going to be suspended by the * system either from system sleep or runtime suspend context. The * return value will be ignored in system sleep context, so do NOT * try to continue using the device if suspend fails in this case. * Instead, let the resume or reset-resume routine recover from * the failure. * @resume: Called when the device is being resumed by the system. * @reset_resume: Called when the suspended device has been reset instead * of being resumed. * @pre_reset: Called by usb_reset_device() when the device is about to be * reset. This routine must not return until the driver has no active * URBs for the device, and no more URBs may be submitted until the * post_reset method is called. * @post_reset: Called by usb_reset_device() after the device * has been reset * @id_table: USB drivers use ID table to support hotplugging. * Export this with MODULE_DEVICE_TABLE(usb,...). This must be set * or your driver's probe function will never get called. * @dev_groups: Attributes attached to the device that will be created once it * is bound to the driver. * @dynids: used internally to hold the list of dynamically added device * ids for this driver. * @drvwrap: Driver-model core structure wrapper. * @no_dynamic_id: if set to 1, the USB core will not allow dynamic ids to be * added to this driver by preventing the sysfs file from being created. * @supports_autosuspend: if set to 0, the USB core will not allow autosuspend * for interfaces bound to this driver. * @soft_unbind: if set to 1, the USB core will not kill URBs and disable * endpoints before calling the driver's disconnect method. * @disable_hub_initiated_lpm: if set to 1, the USB core will not allow hubs * to initiate lower power link state transitions when an idle timeout * occurs. Device-initiated USB 3.0 link PM will still be allowed. * * USB interface drivers must provide a name, probe() and disconnect() * methods, and an id_table. Other driver fields are optional. * * The id_table is used in hotplugging. It holds a set of descriptors, * and specialized data may be associated with each entry. That table * is used by both user and kernel mode hotplugging support. * * The probe() and disconnect() methods are called in a context where * they can sleep, but they should avoid abusing the privilege. Most * work to connect to a device should be done when the device is opened, * and undone at the last close. The disconnect code needs to address * concurrency issues with respect to open() and close() methods, as * well as forcing all pending I/O requests to complete (by unlinking * them as necessary, and blocking until the unlinks complete). */ struct usb_driver { const char *name; int (*probe) (struct usb_interface *intf, const struct usb_device_id *id); void (*disconnect) (struct usb_interface *intf); int (*unlocked_ioctl) (struct usb_interface *intf, unsigned int code, void *buf); int (*suspend) (struct usb_interface *intf, pm_message_t message); int (*resume) (struct usb_interface *intf); int (*reset_resume)(struct usb_interface *intf); int (*pre_reset)(struct usb_interface *intf); int (*post_reset)(struct usb_interface *intf); const struct usb_device_id *id_table; const struct attribute_group **dev_groups; struct usb_dynids dynids; struct usbdrv_wrap drvwrap; unsigned int no_dynamic_id:1; unsigned int supports_autosuspend:1; unsigned int disable_hub_initiated_lpm:1; unsigned int soft_unbind:1; }; #define to_usb_driver(d) container_of(d, struct usb_driver, drvwrap.driver) /** * struct usb_device_driver - identifies USB device driver to usbcore * @name: The driver name should be unique among USB drivers, * and should normally be the same as the module name. * @probe: Called to see if the driver is willing to manage a particular * device. If it is, probe returns zero and uses dev_set_drvdata() * to associate driver-specific data with the device. If unwilling * to manage the device, return a negative errno value. * @disconnect: Called when the device is no longer accessible, usually * because it has been (or is being) disconnected or the driver's * module is being unloaded. * @suspend: Called when the device is going to be suspended by the system. * @resume: Called when the device is being resumed by the system. * @dev_groups: Attributes attached to the device that will be created once it * is bound to the driver. * @drvwrap: Driver-model core structure wrapper. * @supports_autosuspend: if set to 0, the USB core will not allow autosuspend * for devices bound to this driver. * * USB drivers must provide all the fields listed above except drvwrap. */ struct usb_device_driver { const char *name; int (*probe) (struct usb_device *udev); void (*disconnect) (struct usb_device *udev); int (*suspend) (struct usb_device *udev, pm_message_t message); int (*resume) (struct usb_device *udev, pm_message_t message); const struct attribute_group **dev_groups; struct usbdrv_wrap drvwrap; unsigned int supports_autosuspend:1; }; #define to_usb_device_driver(d) container_of(d, struct usb_device_driver, \ drvwrap.driver) extern struct bus_type usb_bus_type; /** * struct usb_class_driver - identifies a USB driver that wants to use the USB major number * @name: the usb class device name for this driver. Will show up in sysfs. * @devnode: Callback to provide a naming hint for a possible * device node to create. * @fops: pointer to the struct file_operations of this driver. * @minor_base: the start of the minor range for this driver. * * This structure is used for the usb_register_dev() and * usb_deregister_dev() functions, to consolidate a number of the * parameters used for them. */ struct usb_class_driver { char *name; char *(*devnode)(struct device *dev, umode_t *mode); const struct file_operations *fops; int minor_base; }; /* * use these in module_init()/module_exit() * and don't forget MODULE_DEVICE_TABLE(usb, ...) */ extern int usb_register_driver(struct usb_driver *, struct module *, const char *); /* use a define to avoid include chaining to get THIS_MODULE & friends */ #define usb_register(driver) \ usb_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) extern void usb_deregister(struct usb_driver *); /** * module_usb_driver() - Helper macro for registering a USB driver * @__usb_driver: usb_driver struct * * Helper macro for USB drivers which do not do anything special in module * init/exit. This eliminates a lot of boilerplate. Each module may only * use this macro once, and calling it replaces module_init() and module_exit() */ #define module_usb_driver(__usb_driver) \ module_driver(__usb_driver, usb_register, \ usb_deregister) extern int usb_register_device_driver(struct usb_device_driver *, struct module *); extern void usb_deregister_device_driver(struct usb_device_driver *); extern int usb_register_dev(struct usb_interface *intf, struct usb_class_driver *class_driver); extern void usb_deregister_dev(struct usb_interface *intf, struct usb_class_driver *class_driver); extern int usb_disabled(void); /* ----------------------------------------------------------------------- */ /* * URB support, for asynchronous request completions */ /* * urb->transfer_flags: * * Note: URB_DIR_IN/OUT is automatically set in usb_submit_urb(). */ #define URB_SHORT_NOT_OK 0x0001 /* report short reads as errors */ #define URB_ISO_ASAP 0x0002 /* iso-only; use the first unexpired * slot in the schedule */ #define URB_NO_TRANSFER_DMA_MAP 0x0004 /* urb->transfer_dma valid on submit */ #define URB_ZERO_PACKET 0x0040 /* Finish bulk OUT with short packet */ #define URB_NO_INTERRUPT 0x0080 /* HINT: no non-error interrupt * needed */ #define URB_FREE_BUFFER 0x0100 /* Free transfer buffer with the URB */ /* The following flags are used internally by usbcore and HCDs */ #define URB_DIR_IN 0x0200 /* Transfer from device to host */ #define URB_DIR_OUT 0 #define URB_DIR_MASK URB_DIR_IN #define URB_DMA_MAP_SINGLE 0x00010000 /* Non-scatter-gather mapping */ #define URB_DMA_MAP_PAGE 0x00020000 /* HCD-unsupported S-G */ #define URB_DMA_MAP_SG 0x00040000 /* HCD-supported S-G */ #define URB_MAP_LOCAL 0x00080000 /* HCD-local-memory mapping */ #define URB_SETUP_MAP_SINGLE 0x00100000 /* Setup packet DMA mapped */ #define URB_SETUP_MAP_LOCAL 0x00200000 /* HCD-local setup packet */ #define URB_DMA_SG_COMBINED 0x00400000 /* S-G entries were combined */ #define URB_ALIGNED_TEMP_BUFFER 0x00800000 /* Temp buffer was alloc'd */ struct usb_iso_packet_descriptor { unsigned int offset; unsigned int length; /* expected length */ unsigned int actual_length; int status; }; struct urb; struct usb_anchor { struct list_head urb_list; wait_queue_head_t wait; spinlock_t lock; atomic_t suspend_wakeups; unsigned int poisoned:1; }; static inline void init_usb_anchor(struct usb_anchor *anchor) { memset(anchor, 0, sizeof(*anchor)); INIT_LIST_HEAD(&anchor->urb_list); init_waitqueue_head(&anchor->wait); spin_lock_init(&anchor->lock); } typedef void (*usb_complete_t)(struct urb *); /** * struct urb - USB Request Block * @urb_list: For use by current owner of the URB. * @anchor_list: membership in the list of an anchor * @anchor: to anchor URBs to a common mooring * @ep: Points to the endpoint's data structure. Will eventually * replace @pipe. * @pipe: Holds endpoint number, direction, type, and more. * Create these values with the eight macros available; * usb_{snd,rcv}TYPEpipe(dev,endpoint), where the TYPE is "ctrl" * (control), "bulk", "int" (interrupt), or "iso" (isochronous). * For example usb_sndbulkpipe() or usb_rcvintpipe(). Endpoint * numbers range from zero to fifteen. Note that "in" endpoint two * is a different endpoint (and pipe) from "out" endpoint two. * The current configuration controls the existence, type, and * maximum packet size of any given endpoint. * @stream_id: the endpoint's stream ID for bulk streams * @dev: Identifies the USB device to perform the request. * @status: This is read in non-iso completion functions to get the * status of the particular request. ISO requests only use it * to tell whether the URB was unlinked; detailed status for * each frame is in the fields of the iso_frame-desc. * @transfer_flags: A variety of flags may be used to affect how URB * submission, unlinking, or operation are handled. Different * kinds of URB can use different flags. * @transfer_buffer: This identifies the buffer to (or from) which the I/O * request will be performed unless URB_NO_TRANSFER_DMA_MAP is set * (however, do not leave garbage in transfer_buffer even then). * This buffer must be suitable for DMA; allocate it with * kmalloc() or equivalent. For transfers to "in" endpoints, contents * of this buffer will be modified. This buffer is used for the data * stage of control transfers. * @transfer_dma: When transfer_flags includes URB_NO_TRANSFER_DMA_MAP, * the device driver is saying that it provided this DMA address, * which the host controller driver should use in preference to the * transfer_buffer. * @sg: scatter gather buffer list, the buffer size of each element in * the list (except the last) must be divisible by the endpoint's * max packet size if no_sg_constraint isn't set in 'struct usb_bus' * @num_mapped_sgs: (internal) number of mapped sg entries * @num_sgs: number of entries in the sg list * @transfer_buffer_length: How big is transfer_buffer. The transfer may * be broken up into chunks according to the current maximum packet * size for the endpoint, which is a function of the configuration * and is encoded in the pipe. When the length is zero, neither * transfer_buffer nor transfer_dma is used. * @actual_length: This is read in non-iso completion functions, and * it tells how many bytes (out of transfer_buffer_length) were * transferred. It will normally be the same as requested, unless * either an error was reported or a short read was performed. * The URB_SHORT_NOT_OK transfer flag may be used to make such * short reads be reported as errors. * @setup_packet: Only used for control transfers, this points to eight bytes * of setup data. Control transfers always start by sending this data * to the device. Then transfer_buffer is read or written, if needed. * @setup_dma: DMA pointer for the setup packet. The caller must not use * this field; setup_packet must point to a valid buffer. * @start_frame: Returns the initial frame for isochronous transfers. * @number_of_packets: Lists the number of ISO transfer buffers. * @interval: Specifies the polling interval for interrupt or isochronous * transfers. The units are frames (milliseconds) for full and low * speed devices, and microframes (1/8 millisecond) for highspeed * and SuperSpeed devices. * @error_count: Returns the number of ISO transfers that reported errors. * @context: For use in completion functions. This normally points to * request-specific driver context. * @complete: Completion handler. This URB is passed as the parameter to the * completion function. The completion function may then do what * it likes with the URB, including resubmitting or freeing it. * @iso_frame_desc: Used to provide arrays of ISO transfer buffers and to * collect the transfer status for each buffer. * * This structure identifies USB transfer requests. URBs must be allocated by * calling usb_alloc_urb() and freed with a call to usb_free_urb(). * Initialization may be done using various usb_fill_*_urb() functions. URBs * are submitted using usb_submit_urb(), and pending requests may be canceled * using usb_unlink_urb() or usb_kill_urb(). * * Data Transfer Buffers: * * Normally drivers provide I/O buffers allocated with kmalloc() or otherwise * taken from the general page pool. That is provided by transfer_buffer * (control requests also use setup_packet), and host controller drivers * perform a dma mapping (and unmapping) for each buffer transferred. Those * mapping operations can be expensive on some platforms (perhaps using a dma * bounce buffer or talking to an IOMMU), * although they're cheap on commodity x86 and ppc hardware. * * Alternatively, drivers may pass the URB_NO_TRANSFER_DMA_MAP transfer flag, * which tells the host controller driver that no such mapping is needed for * the transfer_buffer since * the device driver is DMA-aware. For example, a device driver might * allocate a DMA buffer with usb_alloc_coherent() or call usb_buffer_map(). * When this transfer flag is provided, host controller drivers will * attempt to use the dma address found in the transfer_dma * field rather than determining a dma address themselves. * * Note that transfer_buffer must still be set if the controller * does not support DMA (as indicated by hcd_uses_dma()) and when talking * to root hub. If you have to trasfer between highmem zone and the device * on such controller, create a bounce buffer or bail out with an error. * If transfer_buffer cannot be set (is in highmem) and the controller is DMA * capable, assign NULL to it, so that usbmon knows not to use the value. * The setup_packet must always be set, so it cannot be located in highmem. * * Initialization: * * All URBs submitted must initialize the dev, pipe, transfer_flags (may be * zero), and complete fields. All URBs must also initialize * transfer_buffer and transfer_buffer_length. They may provide the * URB_SHORT_NOT_OK transfer flag, indicating that short reads are * to be treated as errors; that flag is invalid for write requests. * * Bulk URBs may * use the URB_ZERO_PACKET transfer flag, indicating that bulk OUT transfers * should always terminate with a short packet, even if it means adding an * extra zero length packet. * * Control URBs must provide a valid pointer in the setup_packet field. * Unlike the transfer_buffer, the setup_packet may not be mapped for DMA * beforehand. * * Interrupt URBs must provide an interval, saying how often (in milliseconds * or, for highspeed devices, 125 microsecond units) * to poll for transfers. After the URB has been submitted, the interval * field reflects how the transfer was actually scheduled. * The polling interval may be more frequent than requested. * For example, some controllers have a maximum interval of 32 milliseconds, * while others support intervals of up to 1024 milliseconds. * Isochronous URBs also have transfer intervals. (Note that for isochronous * endpoints, as well as high speed interrupt endpoints, the encoding of * the transfer interval in the endpoint descriptor is logarithmic. * Device drivers must convert that value to linear units themselves.) * * If an isochronous endpoint queue isn't already running, the host * controller will schedule a new URB to start as soon as bandwidth * utilization allows. If the queue is running then a new URB will be * scheduled to start in the first transfer slot following the end of the * preceding URB, if that slot has not already expired. If the slot has * expired (which can happen when IRQ delivery is delayed for a long time), * the scheduling behavior depends on the URB_ISO_ASAP flag. If the flag * is clear then the URB will be scheduled to start in the expired slot, * implying that some of its packets will not be transferred; if the flag * is set then the URB will be scheduled in the first unexpired slot, * breaking the queue's synchronization. Upon URB completion, the * start_frame field will be set to the (micro)frame number in which the * transfer was scheduled. Ranges for frame counter values are HC-specific * and can go from as low as 256 to as high as 65536 frames. * * Isochronous URBs have a different data transfer model, in part because * the quality of service is only "best effort". Callers provide specially * allocated URBs, with number_of_packets worth of iso_frame_desc structures * at the end. Each such packet is an individual ISO transfer. Isochronous * URBs are normally queued, submitted by drivers to arrange that * transfers are at least double buffered, and then explicitly resubmitted * in completion handlers, so * that data (such as audio or video) streams at as constant a rate as the * host controller scheduler can support. * * Completion Callbacks: * * The completion callback is made in_interrupt(), and one of the first * things that a completion handler should do is check the status field. * The status field is provided for all URBs. It is used to report * unlinked URBs, and status for all non-ISO transfers. It should not * be examined before the URB is returned to the completion handler. * * The context field is normally used to link URBs back to the relevant * driver or request state. * * When the completion callback is invoked for non-isochronous URBs, the * actual_length field tells how many bytes were transferred. This field * is updated even when the URB terminated with an error or was unlinked. * * ISO transfer status is reported in the status and actual_length fields * of the iso_frame_desc array, and the number of errors is reported in * error_count. Completion callbacks for ISO transfers will normally * (re)submit URBs to ensure a constant transfer rate. * * Note that even fields marked "public" should not be touched by the driver * when the urb is owned by the hcd, that is, since the call to * usb_submit_urb() till the entry into the completion routine. */ struct urb { /* private: usb core and host controller only fields in the urb */ struct kref kref; /* reference count of the URB */ int unlinked; /* unlink error code */ void *hcpriv; /* private data for host controller */ atomic_t use_count; /* concurrent submissions counter */ atomic_t reject; /* submissions will fail */ /* public: documented fields in the urb that can be used by drivers */ struct list_head urb_list; /* list head for use by the urb's * current owner */ struct list_head anchor_list; /* the URB may be anchored */ struct usb_anchor *anchor; struct usb_device *dev; /* (in) pointer to associated device */ struct usb_host_endpoint *ep; /* (internal) pointer to endpoint */ unsigned int pipe; /* (in) pipe information */ unsigned int stream_id; /* (in) stream ID */ int status; /* (return) non-ISO status */ unsigned int transfer_flags; /* (in) URB_SHORT_NOT_OK | ...*/ void *transfer_buffer; /* (in) associated data buffer */ dma_addr_t transfer_dma; /* (in) dma addr for transfer_buffer */ struct scatterlist *sg; /* (in) scatter gather buffer list */ int num_mapped_sgs; /* (internal) mapped sg entries */ int num_sgs; /* (in) number of entries in the sg list */ u32 transfer_buffer_length; /* (in) data buffer length */ u32 actual_length; /* (return) actual transfer length */ unsigned char *setup_packet; /* (in) setup packet (control only) */ dma_addr_t setup_dma; /* (in) dma addr for setup_packet */ int start_frame; /* (modify) start frame (ISO) */ int number_of_packets; /* (in) number of ISO packets */ int interval; /* (modify) transfer interval * (INT/ISO) */ int error_count; /* (return) number of ISO errors */ void *context; /* (in) context for completion */ usb_complete_t complete; /* (in) completion routine */ struct usb_iso_packet_descriptor iso_frame_desc[0]; /* (in) ISO ONLY */ }; /* ----------------------------------------------------------------------- */ /** * usb_fill_control_urb - initializes a control urb * @urb: pointer to the urb to initialize. * @dev: pointer to the struct usb_device for this urb. * @pipe: the endpoint pipe * @setup_packet: pointer to the setup_packet buffer * @transfer_buffer: pointer to the transfer buffer * @buffer_length: length of the transfer buffer * @complete_fn: pointer to the usb_complete_t function * @context: what to set the urb context to. * * Initializes a control urb with the proper information needed to submit * it to a device. */ static inline void usb_fill_control_urb(struct urb *urb, struct usb_device *dev, unsigned int pipe, unsigned char *setup_packet, void *transfer_buffer, int buffer_length, usb_complete_t complete_fn, void *context) { urb->dev = dev; urb->pipe = pipe; urb->setup_packet = setup_packet; urb->transfer_buffer = transfer_buffer; urb->transfer_buffer_length = buffer_length; urb->complete = complete_fn; urb->context = context; } /** * usb_fill_bulk_urb - macro to help initialize a bulk urb * @urb: pointer to the urb to initialize. * @dev: pointer to the struct usb_device for this urb. * @pipe: the endpoint pipe * @transfer_buffer: pointer to the transfer buffer * @buffer_length: length of the transfer buffer * @complete_fn: pointer to the usb_complete_t function * @context: what to set the urb context to. * * Initializes a bulk urb with the proper information needed to submit it * to a device. */ static inline void usb_fill_bulk_urb(struct urb *urb, struct usb_device *dev, unsigned int pipe, void *transfer_buffer, int buffer_length, usb_complete_t complete_fn, void *context) { urb->dev = dev; urb->pipe = pipe; urb->transfer_buffer = transfer_buffer; urb->transfer_buffer_length = buffer_length; urb->complete = complete_fn; urb->context = context; } /** * usb_fill_int_urb - macro to help initialize a interrupt urb * @urb: pointer to the urb to initialize. * @dev: pointer to the struct usb_device for this urb. * @pipe: the endpoint pipe * @transfer_buffer: pointer to the transfer buffer * @buffer_length: length of the transfer buffer * @complete_fn: pointer to the usb_complete_t function * @context: what to set the urb context to. * @interval: what to set the urb interval to, encoded like * the endpoint descriptor's bInterval value. * * Initializes a interrupt urb with the proper information needed to submit * it to a device. * * Note that High Speed and SuperSpeed(+) interrupt endpoints use a logarithmic * encoding of the endpoint interval, and express polling intervals in * microframes (eight per millisecond) rather than in frames (one per * millisecond). * * Wireless USB also uses the logarithmic encoding, but specifies it in units of * 128us instead of 125us. For Wireless USB devices, the interval is passed * through to the host controller, rather than being translated into microframe * units. */ static inline void usb_fill_int_urb(struct urb *urb, struct usb_device *dev, unsigned int pipe, void *transfer_buffer, int buffer_length, usb_complete_t complete_fn, void *context, int interval) { urb->dev = dev; urb->pipe = pipe; urb->transfer_buffer = transfer_buffer; urb->transfer_buffer_length = buffer_length; urb->complete = complete_fn; urb->context = context; if (dev->speed == USB_SPEED_HIGH || dev->speed >= USB_SPEED_SUPER) { /* make sure interval is within allowed range */ interval = clamp(interval, 1, 16); urb->interval = 1 << (interval - 1); } else { urb->interval = interval; } urb->start_frame = -1; } extern void usb_init_urb(struct urb *urb); extern struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags); extern void usb_free_urb(struct urb *urb); #define usb_put_urb usb_free_urb extern struct urb *usb_get_urb(struct urb *urb); extern int usb_submit_urb(struct urb *urb, gfp_t mem_flags); extern int usb_unlink_urb(struct urb *urb); extern void usb_kill_urb(struct urb *urb); extern void usb_poison_urb(struct urb *urb); extern void usb_unpoison_urb(struct urb *urb); extern void usb_block_urb(struct urb *urb); extern void usb_kill_anchored_urbs(struct usb_anchor *anchor); extern void usb_poison_anchored_urbs(struct usb_anchor *anchor); extern void usb_unpoison_anchored_urbs(struct usb_anchor *anchor); extern void usb_unlink_anchored_urbs(struct usb_anchor *anchor); extern void usb_anchor_suspend_wakeups(struct usb_anchor *anchor); extern void usb_anchor_resume_wakeups(struct usb_anchor *anchor); extern void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor); extern void usb_unanchor_urb(struct urb *urb); extern int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor, unsigned int timeout); extern struct urb *usb_get_from_anchor(struct usb_anchor *anchor); extern void usb_scuttle_anchored_urbs(struct usb_anchor *anchor); extern int usb_anchor_empty(struct usb_anchor *anchor); #define usb_unblock_urb usb_unpoison_urb /** * usb_urb_dir_in - check if an URB describes an IN transfer * @urb: URB to be checked * * Return: 1 if @urb describes an IN transfer (device-to-host), * otherwise 0. */ static inline int usb_urb_dir_in(struct urb *urb) { return (urb->transfer_flags & URB_DIR_MASK) == URB_DIR_IN; } /** * usb_urb_dir_out - check if an URB describes an OUT transfer * @urb: URB to be checked * * Return: 1 if @urb describes an OUT transfer (host-to-device), * otherwise 0. */ static inline int usb_urb_dir_out(struct urb *urb) { return (urb->transfer_flags & URB_DIR_MASK) == URB_DIR_OUT; } int usb_pipe_type_check(struct usb_device *dev, unsigned int pipe); int usb_urb_ep_type_check(const struct urb *urb); void *usb_alloc_coherent(struct usb_device *dev, size_t size, gfp_t mem_flags, dma_addr_t *dma); void usb_free_coherent(struct usb_device *dev, size_t size, void *addr, dma_addr_t dma); #if 0 struct urb *usb_buffer_map(struct urb *urb); void usb_buffer_dmasync(struct urb *urb); void usb_buffer_unmap(struct urb *urb); #endif struct scatterlist; int usb_buffer_map_sg(const struct usb_device *dev, int is_in, struct scatterlist *sg, int nents); #if 0 void usb_buffer_dmasync_sg(const struct usb_device *dev, int is_in, struct scatterlist *sg, int n_hw_ents); #endif void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in, struct scatterlist *sg, int n_hw_ents); /*-------------------------------------------------------------------* * SYNCHRONOUS CALL SUPPORT * *-------------------------------------------------------------------*/ extern int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request, __u8 requesttype, __u16 value, __u16 index, void *data, __u16 size, int timeout); extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe, void *data, int len, int *actual_length, int timeout); extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe, void *data, int len, int *actual_length, int timeout); /* wrappers around usb_control_msg() for the most common standard requests */ int usb_control_msg_send(struct usb_device *dev, __u8 endpoint, __u8 request, __u8 requesttype, __u16 value, __u16 index, const void *data, __u16 size, int timeout, gfp_t memflags); int usb_control_msg_recv(struct usb_device *dev, __u8 endpoint, __u8 request, __u8 requesttype, __u16 value, __u16 index, void *data, __u16 size, int timeout, gfp_t memflags); extern int usb_get_descriptor(struct usb_device *dev, unsigned char desctype, unsigned char descindex, void *buf, int size); extern int usb_get_status(struct usb_device *dev, int recip, int type, int target, void *data); static inline int usb_get_std_status(struct usb_device *dev, int recip, int target, void *data) { return usb_get_status(dev, recip, USB_STATUS_TYPE_STANDARD, target, data); } static inline int usb_get_ptm_status(struct usb_device *dev, void *data) { return usb_get_status(dev, USB_RECIP_DEVICE, USB_STATUS_TYPE_PTM, 0, data); } extern int usb_string(struct usb_device *dev, int index, char *buf, size_t size); /* wrappers that also update important state inside usbcore */ extern int usb_clear_halt(struct usb_device *dev, int pipe); extern int usb_reset_configuration(struct usb_device *dev); extern int usb_set_interface(struct usb_device *dev, int ifnum, int alternate); extern void usb_reset_endpoint(struct usb_device *dev, unsigned int epaddr); /* this request isn't really synchronous, but it belongs with the others */ extern int usb_driver_set_configuration(struct usb_device *udev, int config); /* choose and set configuration for device */ extern int usb_choose_configuration(struct usb_device *udev); extern int usb_set_configuration(struct usb_device *dev, int configuration); /* * timeouts, in milliseconds, used for sending/receiving control messages * they typically complete within a few frames (msec) after they're issued * USB identifies 5 second timeouts, maybe more in a few cases, and a few * slow devices (like some MGE Ellipse UPSes) actually push that limit. */ #define USB_CTRL_GET_TIMEOUT 5000 #define USB_CTRL_SET_TIMEOUT 5000 /** * struct usb_sg_request - support for scatter/gather I/O * @status: zero indicates success, else negative errno * @bytes: counts bytes transferred. * * These requests are initialized using usb_sg_init(), and then are used * as request handles passed to usb_sg_wait() or usb_sg_cancel(). Most * members of the request object aren't for driver access. * * The status and bytecount values are valid only after usb_sg_wait() * returns. If the status is zero, then the bytecount matches the total * from the request. * * After an error completion, drivers may need to clear a halt condition * on the endpoint. */ struct usb_sg_request { int status; size_t bytes; /* private: * members below are private to usbcore, * and are not provided for driver access! */ spinlock_t lock; struct usb_device *dev; int pipe; int entries; struct urb **urbs; int count; struct completion complete; }; int usb_sg_init( struct usb_sg_request *io, struct usb_device *dev, unsigned pipe, unsigned period, struct scatterlist *sg, int nents, size_t length, gfp_t mem_flags ); void usb_sg_cancel(struct usb_sg_request *io); void usb_sg_wait(struct usb_sg_request *io); /* ----------------------------------------------------------------------- */ /* * For various legacy reasons, Linux has a small cookie that's paired with * a struct usb_device to identify an endpoint queue. Queue characteristics * are defined by the endpoint's descriptor. This cookie is called a "pipe", * an unsigned int encoded as: * * - direction: bit 7 (0 = Host-to-Device [Out], * 1 = Device-to-Host [In] ... * like endpoint bEndpointAddress) * - device address: bits 8-14 ... bit positions known to uhci-hcd * - endpoint: bits 15-18 ... bit positions known to uhci-hcd * - pipe type: bits 30-31 (00 = isochronous, 01 = interrupt, * 10 = control, 11 = bulk) * * Given the device address and endpoint descriptor, pipes are redundant. */ /* NOTE: these are not the standard USB_ENDPOINT_XFER_* values!! */ /* (yet ... they're the values used by usbfs) */ #define PIPE_ISOCHRONOUS 0 #define PIPE_INTERRUPT 1 #define PIPE_CONTROL 2 #define PIPE_BULK 3 #define usb_pipein(pipe) ((pipe) & USB_DIR_IN) #define usb_pipeout(pipe) (!usb_pipein(pipe)) #define usb_pipedevice(pipe) (((pipe) >> 8) & 0x7f) #define usb_pipeendpoint(pipe) (((pipe) >> 15) & 0xf) #define usb_pipetype(pipe) (((pipe) >> 30) & 3) #define usb_pipeisoc(pipe) (usb_pipetype((pipe)) == PIPE_ISOCHRONOUS) #define usb_pipeint(pipe) (usb_pipetype((pipe)) == PIPE_INTERRUPT) #define usb_pipecontrol(pipe) (usb_pipetype((pipe)) == PIPE_CONTROL) #define usb_pipebulk(pipe) (usb_pipetype((pipe)) == PIPE_BULK) static inline unsigned int __create_pipe(struct usb_device *dev, unsigned int endpoint) { return (dev->devnum << 8) | (endpoint << 15); } /* Create various pipes... */ #define usb_sndctrlpipe(dev, endpoint) \ ((PIPE_CONTROL << 30) | __create_pipe(dev, endpoint)) #define usb_rcvctrlpipe(dev, endpoint) \ ((PIPE_CONTROL << 30) | __create_pipe(dev, endpoint) | USB_DIR_IN) #define usb_sndisocpipe(dev, endpoint) \ ((PIPE_ISOCHRONOUS << 30) | __create_pipe(dev, endpoint)) #define usb_rcvisocpipe(dev, endpoint) \ ((PIPE_ISOCHRONOUS << 30) | __create_pipe(dev, endpoint) | USB_DIR_IN) #define usb_sndbulkpipe(dev, endpoint) \ ((PIPE_BULK << 30) | __create_pipe(dev, endpoint)) #define usb_rcvbulkpipe(dev, endpoint) \ ((PIPE_BULK << 30) | __create_pipe(dev, endpoint) | USB_DIR_IN) #define usb_sndintpipe(dev, endpoint) \ ((PIPE_INTERRUPT << 30) | __create_pipe(dev, endpoint)) #define usb_rcvintpipe(dev, endpoint) \ ((PIPE_INTERRUPT << 30) | __create_pipe(dev, endpoint) | USB_DIR_IN) static inline struct usb_host_endpoint * usb_pipe_endpoint(struct usb_device *dev, unsigned int pipe) { struct usb_host_endpoint **eps; eps = usb_pipein(pipe) ? dev->ep_in : dev->ep_out; return eps[usb_pipeendpoint(pipe)]; } /*-------------------------------------------------------------------------*/ static inline __u16 usb_maxpacket(struct usb_device *udev, int pipe, int is_out) { struct usb_host_endpoint *ep; unsigned epnum = usb_pipeendpoint(pipe); if (is_out) { WARN_ON(usb_pipein(pipe)); ep = udev->ep_out[epnum]; } else { WARN_ON(usb_pipeout(pipe)); ep = udev->ep_in[epnum]; } if (!ep) return 0; /* NOTE: only 0x07ff bits are for packet size... */ return usb_endpoint_maxp(&ep->desc); } /* ----------------------------------------------------------------------- */ /* translate USB error codes to codes user space understands */ static inline int usb_translate_errors(int error_code) { switch (error_code) { case 0: case -ENOMEM: case -ENODEV: case -EOPNOTSUPP: return error_code; default: return -EIO; } } /* Events from the usb core */ #define USB_DEVICE_ADD 0x0001 #define USB_DEVICE_REMOVE 0x0002 #define USB_BUS_ADD 0x0003 #define USB_BUS_REMOVE 0x0004 extern void usb_register_notify(struct notifier_block *nb); extern void usb_unregister_notify(struct notifier_block *nb); /* debugfs stuff */ extern struct dentry *usb_debug_root; /* LED triggers */ enum usb_led_event { USB_LED_EVENT_HOST = 0, USB_LED_EVENT_GADGET = 1, }; #ifdef CONFIG_USB_LED_TRIG extern void usb_led_activity(enum usb_led_event ev); #else static inline void usb_led_activity(enum usb_led_event ev) {} #endif #endif /* __KERNEL__ */ #endif mISDNhw.h 0000644 00000012623 14722070374 0006200 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * * Author Karsten Keil <kkeil@novell.com> * * Basic declarations for the mISDN HW channels * * Copyright 2008 by Karsten Keil <kkeil@novell.com> */ #ifndef MISDNHW_H #define MISDNHW_H #include <linux/mISDNif.h> #include <linux/timer.h> /* * HW DEBUG 0xHHHHGGGG * H - hardware driver specific bits * G - for all drivers */ #define DEBUG_HW 0x00000001 #define DEBUG_HW_OPEN 0x00000002 #define DEBUG_HW_DCHANNEL 0x00000100 #define DEBUG_HW_DFIFO 0x00000200 #define DEBUG_HW_BCHANNEL 0x00001000 #define DEBUG_HW_BFIFO 0x00002000 #define MAX_DFRAME_LEN_L1 300 #define MAX_MON_FRAME 32 #define MAX_LOG_SPACE 2048 #define MISDN_COPY_SIZE 32 /* channel->Flags bit field */ #define FLG_TX_BUSY 0 /* tx_buf in use */ #define FLG_TX_NEXT 1 /* next_skb in use */ #define FLG_L1_BUSY 2 /* L1 is permanent busy */ #define FLG_L2_ACTIVATED 3 /* activated from L2 */ #define FLG_OPEN 5 /* channel is in use */ #define FLG_ACTIVE 6 /* channel is activated */ #define FLG_BUSY_TIMER 7 /* channel type */ #define FLG_DCHANNEL 8 /* channel is D-channel */ #define FLG_BCHANNEL 9 /* channel is B-channel */ #define FLG_ECHANNEL 10 /* channel is E-channel */ #define FLG_TRANSPARENT 12 /* channel use transparent data */ #define FLG_HDLC 13 /* channel use hdlc data */ #define FLG_L2DATA 14 /* channel use L2 DATA primitivs */ #define FLG_ORIGIN 15 /* channel is on origin site */ /* channel specific stuff */ #define FLG_FILLEMPTY 16 /* fill fifo on first frame (empty) */ /* arcofi specific */ #define FLG_ARCOFI_TIMER 17 #define FLG_ARCOFI_ERROR 18 /* isar specific */ #define FLG_INITIALIZED 17 #define FLG_DLEETX 18 #define FLG_LASTDLE 19 #define FLG_FIRST 20 #define FLG_LASTDATA 21 #define FLG_NMD_DATA 22 #define FLG_FTI_RUN 23 #define FLG_LL_OK 24 #define FLG_LL_CONN 25 #define FLG_DTMFSEND 26 #define FLG_TX_EMPTY 27 /* stop sending received data upstream */ #define FLG_RX_OFF 28 /* workq events */ #define FLG_RECVQUEUE 30 #define FLG_PHCHANGE 31 #define schedule_event(s, ev) do { \ test_and_set_bit(ev, &((s)->Flags)); \ schedule_work(&((s)->workq)); \ } while (0) struct dchannel { struct mISDNdevice dev; u_long Flags; struct work_struct workq; void (*phfunc) (struct dchannel *); u_int state; void *l1; void *hw; int slot; /* multiport card channel slot */ struct timer_list timer; /* receive data */ struct sk_buff *rx_skb; int maxlen; /* send data */ struct sk_buff_head squeue; struct sk_buff_head rqueue; struct sk_buff *tx_skb; int tx_idx; int debug; /* statistics */ int err_crc; int err_tx; int err_rx; }; typedef int (dchannel_l1callback)(struct dchannel *, u_int); extern int create_l1(struct dchannel *, dchannel_l1callback *); /* private L1 commands */ #define INFO0 0x8002 #define INFO1 0x8102 #define INFO2 0x8202 #define INFO3_P8 0x8302 #define INFO3_P10 0x8402 #define INFO4_P8 0x8502 #define INFO4_P10 0x8602 #define LOSTFRAMING 0x8702 #define ANYSIGNAL 0x8802 #define HW_POWERDOWN 0x8902 #define HW_RESET_REQ 0x8a02 #define HW_POWERUP_REQ 0x8b02 #define HW_DEACT_REQ 0x8c02 #define HW_ACTIVATE_REQ 0x8e02 #define HW_D_NOBLOCKED 0x8f02 #define HW_RESET_IND 0x9002 #define HW_POWERUP_IND 0x9102 #define HW_DEACT_IND 0x9202 #define HW_ACTIVATE_IND 0x9302 #define HW_DEACT_CNF 0x9402 #define HW_TESTLOOP 0x9502 #define HW_TESTRX_RAW 0x9602 #define HW_TESTRX_HDLC 0x9702 #define HW_TESTRX_OFF 0x9802 #define HW_TIMER3_IND 0x9902 #define HW_TIMER3_VALUE 0x9a00 #define HW_TIMER3_VMASK 0x00FF struct layer1; extern int l1_event(struct layer1 *, u_int); #define MISDN_BCH_FILL_SIZE 4 struct bchannel { struct mISDNchannel ch; int nr; u_long Flags; struct work_struct workq; u_int state; void *hw; int slot; /* multiport card channel slot */ struct timer_list timer; /* receive data */ u8 fill[MISDN_BCH_FILL_SIZE]; struct sk_buff *rx_skb; unsigned short maxlen; unsigned short init_maxlen; /* initial value */ unsigned short next_maxlen; /* pending value */ unsigned short minlen; /* for transparent data */ unsigned short init_minlen; /* initial value */ unsigned short next_minlen; /* pending value */ /* send data */ struct sk_buff *next_skb; struct sk_buff *tx_skb; struct sk_buff_head rqueue; int rcount; int tx_idx; int debug; /* statistics */ int err_crc; int err_tx; int err_rx; int dropcnt; }; extern int mISDN_initdchannel(struct dchannel *, int, void *); extern int mISDN_initbchannel(struct bchannel *, unsigned short, unsigned short); extern int mISDN_freedchannel(struct dchannel *); extern void mISDN_clear_bchannel(struct bchannel *); extern void mISDN_freebchannel(struct bchannel *); extern int mISDN_ctrl_bchannel(struct bchannel *, struct mISDN_ctrl_req *); extern void queue_ch_frame(struct mISDNchannel *, u_int, int, struct sk_buff *); extern int dchannel_senddata(struct dchannel *, struct sk_buff *); extern int bchannel_senddata(struct bchannel *, struct sk_buff *); extern int bchannel_get_rxbuf(struct bchannel *, int); extern void recv_Dchannel(struct dchannel *); extern void recv_Echannel(struct dchannel *, struct dchannel *); extern void recv_Bchannel(struct bchannel *, unsigned int, bool); extern void recv_Dchannel_skb(struct dchannel *, struct sk_buff *); extern void recv_Bchannel_skb(struct bchannel *, struct sk_buff *); extern int get_next_bframe(struct bchannel *); extern int get_next_dframe(struct dchannel *); #endif intel_th.h 0000644 00000004705 14722070374 0006537 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Intel(R) Trace Hub data structures for implementing buffer sinks. * * Copyright (C) 2019 Intel Corporation. */ #ifndef _INTEL_TH_H_ #define _INTEL_TH_H_ #include <linux/scatterlist.h> /* MSC operating modes (MSC_MODE) */ enum { MSC_MODE_SINGLE = 0, MSC_MODE_MULTI, MSC_MODE_EXI, MSC_MODE_DEBUG, }; struct msu_buffer { const char *name; /* * ->assign() called when buffer 'mode' is set to this driver * (aka mode_store()) * @device: struct device * of the msc * @mode: allows the driver to set HW mode (see the enum above) * Returns: a pointer to a private structure associated with this * msc or NULL in case of error. This private structure * will then be passed into all other callbacks. */ void *(*assign)(struct device *dev, int *mode); /* ->unassign(): some other mode is selected, clean up */ void (*unassign)(void *priv); /* * ->alloc_window(): allocate memory for the window of a given * size * @sgt: pointer to sg_table, can be overridden by the buffer * driver, or kept intact * Returns: number of sg table entries <= number of pages; * 0 is treated as an allocation failure. */ int (*alloc_window)(void *priv, struct sg_table **sgt, size_t size); void (*free_window)(void *priv, struct sg_table *sgt); /* ->activate(): trace has started */ void (*activate)(void *priv); /* ->deactivate(): trace is about to stop */ void (*deactivate)(void *priv); /* * ->ready(): window @sgt is filled up to the last block OR * tracing is stopped by the user; this window contains * @bytes data. The window in question transitions into * the "LOCKED" state, indicating that it can't be used * by hardware. To clear this state and make the window * available to the hardware again, call * intel_th_msc_window_unlock(). */ int (*ready)(void *priv, struct sg_table *sgt, size_t bytes); }; int intel_th_msu_buffer_register(const struct msu_buffer *mbuf, struct module *owner); void intel_th_msu_buffer_unregister(const struct msu_buffer *mbuf); void intel_th_msc_window_unlock(struct device *dev, struct sg_table *sgt); #define module_intel_th_msu_buffer(__buffer) \ static int __init __buffer##_init(void) \ { \ return intel_th_msu_buffer_register(&(__buffer), THIS_MODULE); \ } \ module_init(__buffer##_init); \ static void __exit __buffer##_exit(void) \ { \ intel_th_msu_buffer_unregister(&(__buffer)); \ } \ module_exit(__buffer##_exit); #endif /* _INTEL_TH_H_ */ ata.h 0000644 00000101712 14722070374 0005472 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright 2003-2004 Red Hat, Inc. All rights reserved. * Copyright 2003-2004 Jeff Garzik * * libata documentation is available via 'make {ps|pdf}docs', * as Documentation/driver-api/libata.rst * * Hardware documentation available from http://www.t13.org/ */ #ifndef __LINUX_ATA_H__ #define __LINUX_ATA_H__ #include <linux/kernel.h> #include <linux/string.h> #include <linux/types.h> #include <asm/byteorder.h> /* defines only for the constants which don't work well as enums */ #define ATA_DMA_BOUNDARY 0xffffUL #define ATA_DMA_MASK 0xffffffffULL enum { /* various global constants */ ATA_MAX_DEVICES = 2, /* per bus/port */ ATA_MAX_PRD = 256, /* we could make these 256/256 */ ATA_SECT_SIZE = 512, ATA_MAX_SECTORS_128 = 128, ATA_MAX_SECTORS = 256, ATA_MAX_SECTORS_1024 = 1024, ATA_MAX_SECTORS_LBA48 = 65535,/* avoid count to be 0000h */ ATA_MAX_SECTORS_TAPE = 65535, ATA_MAX_TRIM_RNUM = 64, /* 512-byte payload / (6-byte LBA + 2-byte range per entry) */ ATA_ID_WORDS = 256, ATA_ID_CONFIG = 0, ATA_ID_CYLS = 1, ATA_ID_HEADS = 3, ATA_ID_SECTORS = 6, ATA_ID_SERNO = 10, ATA_ID_BUF_SIZE = 21, ATA_ID_FW_REV = 23, ATA_ID_PROD = 27, ATA_ID_MAX_MULTSECT = 47, ATA_ID_DWORD_IO = 48, /* before ATA-8 */ ATA_ID_TRUSTED = 48, /* ATA-8 and later */ ATA_ID_CAPABILITY = 49, ATA_ID_OLD_PIO_MODES = 51, ATA_ID_OLD_DMA_MODES = 52, ATA_ID_FIELD_VALID = 53, ATA_ID_CUR_CYLS = 54, ATA_ID_CUR_HEADS = 55, ATA_ID_CUR_SECTORS = 56, ATA_ID_MULTSECT = 59, ATA_ID_LBA_CAPACITY = 60, ATA_ID_SWDMA_MODES = 62, ATA_ID_MWDMA_MODES = 63, ATA_ID_PIO_MODES = 64, ATA_ID_EIDE_DMA_MIN = 65, ATA_ID_EIDE_DMA_TIME = 66, ATA_ID_EIDE_PIO = 67, ATA_ID_EIDE_PIO_IORDY = 68, ATA_ID_ADDITIONAL_SUPP = 69, ATA_ID_QUEUE_DEPTH = 75, ATA_ID_SATA_CAPABILITY = 76, ATA_ID_SATA_CAPABILITY_2 = 77, ATA_ID_FEATURE_SUPP = 78, ATA_ID_MAJOR_VER = 80, ATA_ID_COMMAND_SET_1 = 82, ATA_ID_COMMAND_SET_2 = 83, ATA_ID_CFSSE = 84, ATA_ID_CFS_ENABLE_1 = 85, ATA_ID_CFS_ENABLE_2 = 86, ATA_ID_CSF_DEFAULT = 87, ATA_ID_UDMA_MODES = 88, ATA_ID_HW_CONFIG = 93, ATA_ID_SPG = 98, ATA_ID_LBA_CAPACITY_2 = 100, ATA_ID_SECTOR_SIZE = 106, ATA_ID_WWN = 108, ATA_ID_LOGICAL_SECTOR_SIZE = 117, /* and 118 */ ATA_ID_COMMAND_SET_3 = 119, ATA_ID_COMMAND_SET_4 = 120, ATA_ID_LAST_LUN = 126, ATA_ID_DLF = 128, ATA_ID_CSFO = 129, ATA_ID_CFA_POWER = 160, ATA_ID_CFA_KEY_MGMT = 162, ATA_ID_CFA_MODES = 163, ATA_ID_DATA_SET_MGMT = 169, ATA_ID_SCT_CMD_XPORT = 206, ATA_ID_ROT_SPEED = 217, ATA_ID_PIO4 = (1 << 1), ATA_ID_SERNO_LEN = 20, ATA_ID_FW_REV_LEN = 8, ATA_ID_PROD_LEN = 40, ATA_ID_WWN_LEN = 8, ATA_PCI_CTL_OFS = 2, ATA_PIO0 = (1 << 0), ATA_PIO1 = ATA_PIO0 | (1 << 1), ATA_PIO2 = ATA_PIO1 | (1 << 2), ATA_PIO3 = ATA_PIO2 | (1 << 3), ATA_PIO4 = ATA_PIO3 | (1 << 4), ATA_PIO5 = ATA_PIO4 | (1 << 5), ATA_PIO6 = ATA_PIO5 | (1 << 6), ATA_PIO4_ONLY = (1 << 4), ATA_SWDMA0 = (1 << 0), ATA_SWDMA1 = ATA_SWDMA0 | (1 << 1), ATA_SWDMA2 = ATA_SWDMA1 | (1 << 2), ATA_SWDMA2_ONLY = (1 << 2), ATA_MWDMA0 = (1 << 0), ATA_MWDMA1 = ATA_MWDMA0 | (1 << 1), ATA_MWDMA2 = ATA_MWDMA1 | (1 << 2), ATA_MWDMA3 = ATA_MWDMA2 | (1 << 3), ATA_MWDMA4 = ATA_MWDMA3 | (1 << 4), ATA_MWDMA12_ONLY = (1 << 1) | (1 << 2), ATA_MWDMA2_ONLY = (1 << 2), ATA_UDMA0 = (1 << 0), ATA_UDMA1 = ATA_UDMA0 | (1 << 1), ATA_UDMA2 = ATA_UDMA1 | (1 << 2), ATA_UDMA3 = ATA_UDMA2 | (1 << 3), ATA_UDMA4 = ATA_UDMA3 | (1 << 4), ATA_UDMA5 = ATA_UDMA4 | (1 << 5), ATA_UDMA6 = ATA_UDMA5 | (1 << 6), ATA_UDMA7 = ATA_UDMA6 | (1 << 7), /* ATA_UDMA7 is just for completeness... doesn't exist (yet?). */ ATA_UDMA24_ONLY = (1 << 2) | (1 << 4), ATA_UDMA_MASK_40C = ATA_UDMA2, /* udma0-2 */ /* DMA-related */ ATA_PRD_SZ = 8, ATA_PRD_TBL_SZ = (ATA_MAX_PRD * ATA_PRD_SZ), ATA_PRD_EOT = (1 << 31), /* end-of-table flag */ ATA_DMA_TABLE_OFS = 4, ATA_DMA_STATUS = 2, ATA_DMA_CMD = 0, ATA_DMA_WR = (1 << 3), ATA_DMA_START = (1 << 0), ATA_DMA_INTR = (1 << 2), ATA_DMA_ERR = (1 << 1), ATA_DMA_ACTIVE = (1 << 0), /* bits in ATA command block registers */ ATA_HOB = (1 << 7), /* LBA48 selector */ ATA_NIEN = (1 << 1), /* disable-irq flag */ ATA_LBA = (1 << 6), /* LBA28 selector */ ATA_DEV1 = (1 << 4), /* Select Device 1 (slave) */ ATA_DEVICE_OBS = (1 << 7) | (1 << 5), /* obs bits in dev reg */ ATA_DEVCTL_OBS = (1 << 3), /* obsolete bit in devctl reg */ ATA_BUSY = (1 << 7), /* BSY status bit */ ATA_DRDY = (1 << 6), /* device ready */ ATA_DF = (1 << 5), /* device fault */ ATA_DSC = (1 << 4), /* drive seek complete */ ATA_DRQ = (1 << 3), /* data request i/o */ ATA_CORR = (1 << 2), /* corrected data error */ ATA_SENSE = (1 << 1), /* sense code available */ ATA_ERR = (1 << 0), /* have an error */ ATA_SRST = (1 << 2), /* software reset */ ATA_ICRC = (1 << 7), /* interface CRC error */ ATA_BBK = ATA_ICRC, /* pre-EIDE: block marked bad */ ATA_UNC = (1 << 6), /* uncorrectable media error */ ATA_MC = (1 << 5), /* media changed */ ATA_IDNF = (1 << 4), /* ID not found */ ATA_MCR = (1 << 3), /* media change requested */ ATA_ABORTED = (1 << 2), /* command aborted */ ATA_TRK0NF = (1 << 1), /* track 0 not found */ ATA_AMNF = (1 << 0), /* address mark not found */ ATAPI_LFS = 0xF0, /* last failed sense */ ATAPI_EOM = ATA_TRK0NF, /* end of media */ ATAPI_ILI = ATA_AMNF, /* illegal length indication */ ATAPI_IO = (1 << 1), ATAPI_COD = (1 << 0), /* ATA command block registers */ ATA_REG_DATA = 0x00, ATA_REG_ERR = 0x01, ATA_REG_NSECT = 0x02, ATA_REG_LBAL = 0x03, ATA_REG_LBAM = 0x04, ATA_REG_LBAH = 0x05, ATA_REG_DEVICE = 0x06, ATA_REG_STATUS = 0x07, ATA_REG_FEATURE = ATA_REG_ERR, /* and their aliases */ ATA_REG_CMD = ATA_REG_STATUS, ATA_REG_BYTEL = ATA_REG_LBAM, ATA_REG_BYTEH = ATA_REG_LBAH, ATA_REG_DEVSEL = ATA_REG_DEVICE, ATA_REG_IRQ = ATA_REG_NSECT, /* ATA device commands */ ATA_CMD_DEV_RESET = 0x08, /* ATAPI device reset */ ATA_CMD_CHK_POWER = 0xE5, /* check power mode */ ATA_CMD_STANDBY = 0xE2, /* place in standby power mode */ ATA_CMD_IDLE = 0xE3, /* place in idle power mode */ ATA_CMD_EDD = 0x90, /* execute device diagnostic */ ATA_CMD_DOWNLOAD_MICRO = 0x92, ATA_CMD_DOWNLOAD_MICRO_DMA = 0x93, ATA_CMD_NOP = 0x00, ATA_CMD_FLUSH = 0xE7, ATA_CMD_FLUSH_EXT = 0xEA, ATA_CMD_ID_ATA = 0xEC, ATA_CMD_ID_ATAPI = 0xA1, ATA_CMD_SERVICE = 0xA2, ATA_CMD_READ = 0xC8, ATA_CMD_READ_EXT = 0x25, ATA_CMD_READ_QUEUED = 0x26, ATA_CMD_READ_STREAM_EXT = 0x2B, ATA_CMD_READ_STREAM_DMA_EXT = 0x2A, ATA_CMD_WRITE = 0xCA, ATA_CMD_WRITE_EXT = 0x35, ATA_CMD_WRITE_QUEUED = 0x36, ATA_CMD_WRITE_STREAM_EXT = 0x3B, ATA_CMD_WRITE_STREAM_DMA_EXT = 0x3A, ATA_CMD_WRITE_FUA_EXT = 0x3D, ATA_CMD_WRITE_QUEUED_FUA_EXT = 0x3E, ATA_CMD_FPDMA_READ = 0x60, ATA_CMD_FPDMA_WRITE = 0x61, ATA_CMD_NCQ_NON_DATA = 0x63, ATA_CMD_FPDMA_SEND = 0x64, ATA_CMD_FPDMA_RECV = 0x65, ATA_CMD_PIO_READ = 0x20, ATA_CMD_PIO_READ_EXT = 0x24, ATA_CMD_PIO_WRITE = 0x30, ATA_CMD_PIO_WRITE_EXT = 0x34, ATA_CMD_READ_MULTI = 0xC4, ATA_CMD_READ_MULTI_EXT = 0x29, ATA_CMD_WRITE_MULTI = 0xC5, ATA_CMD_WRITE_MULTI_EXT = 0x39, ATA_CMD_WRITE_MULTI_FUA_EXT = 0xCE, ATA_CMD_SET_FEATURES = 0xEF, ATA_CMD_SET_MULTI = 0xC6, ATA_CMD_PACKET = 0xA0, ATA_CMD_VERIFY = 0x40, ATA_CMD_VERIFY_EXT = 0x42, ATA_CMD_WRITE_UNCORR_EXT = 0x45, ATA_CMD_STANDBYNOW1 = 0xE0, ATA_CMD_IDLEIMMEDIATE = 0xE1, ATA_CMD_SLEEP = 0xE6, ATA_CMD_INIT_DEV_PARAMS = 0x91, ATA_CMD_READ_NATIVE_MAX = 0xF8, ATA_CMD_READ_NATIVE_MAX_EXT = 0x27, ATA_CMD_SET_MAX = 0xF9, ATA_CMD_SET_MAX_EXT = 0x37, ATA_CMD_READ_LOG_EXT = 0x2F, ATA_CMD_WRITE_LOG_EXT = 0x3F, ATA_CMD_READ_LOG_DMA_EXT = 0x47, ATA_CMD_WRITE_LOG_DMA_EXT = 0x57, ATA_CMD_TRUSTED_NONDATA = 0x5B, ATA_CMD_TRUSTED_RCV = 0x5C, ATA_CMD_TRUSTED_RCV_DMA = 0x5D, ATA_CMD_TRUSTED_SND = 0x5E, ATA_CMD_TRUSTED_SND_DMA = 0x5F, ATA_CMD_PMP_READ = 0xE4, ATA_CMD_PMP_READ_DMA = 0xE9, ATA_CMD_PMP_WRITE = 0xE8, ATA_CMD_PMP_WRITE_DMA = 0xEB, ATA_CMD_CONF_OVERLAY = 0xB1, ATA_CMD_SEC_SET_PASS = 0xF1, ATA_CMD_SEC_UNLOCK = 0xF2, ATA_CMD_SEC_ERASE_PREP = 0xF3, ATA_CMD_SEC_ERASE_UNIT = 0xF4, ATA_CMD_SEC_FREEZE_LOCK = 0xF5, ATA_CMD_SEC_DISABLE_PASS = 0xF6, ATA_CMD_CONFIG_STREAM = 0x51, ATA_CMD_SMART = 0xB0, ATA_CMD_MEDIA_LOCK = 0xDE, ATA_CMD_MEDIA_UNLOCK = 0xDF, ATA_CMD_DSM = 0x06, ATA_CMD_CHK_MED_CRD_TYP = 0xD1, ATA_CMD_CFA_REQ_EXT_ERR = 0x03, ATA_CMD_CFA_WRITE_NE = 0x38, ATA_CMD_CFA_TRANS_SECT = 0x87, ATA_CMD_CFA_ERASE = 0xC0, ATA_CMD_CFA_WRITE_MULT_NE = 0xCD, ATA_CMD_REQ_SENSE_DATA = 0x0B, ATA_CMD_SANITIZE_DEVICE = 0xB4, ATA_CMD_ZAC_MGMT_IN = 0x4A, ATA_CMD_ZAC_MGMT_OUT = 0x9F, /* marked obsolete in the ATA/ATAPI-7 spec */ ATA_CMD_RESTORE = 0x10, /* Subcmds for ATA_CMD_FPDMA_RECV */ ATA_SUBCMD_FPDMA_RECV_RD_LOG_DMA_EXT = 0x01, ATA_SUBCMD_FPDMA_RECV_ZAC_MGMT_IN = 0x02, /* Subcmds for ATA_CMD_FPDMA_SEND */ ATA_SUBCMD_FPDMA_SEND_DSM = 0x00, ATA_SUBCMD_FPDMA_SEND_WR_LOG_DMA_EXT = 0x02, /* Subcmds for ATA_CMD_NCQ_NON_DATA */ ATA_SUBCMD_NCQ_NON_DATA_ABORT_QUEUE = 0x00, ATA_SUBCMD_NCQ_NON_DATA_SET_FEATURES = 0x05, ATA_SUBCMD_NCQ_NON_DATA_ZERO_EXT = 0x06, ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT = 0x07, /* Subcmds for ATA_CMD_ZAC_MGMT_IN */ ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES = 0x00, /* Subcmds for ATA_CMD_ZAC_MGMT_OUT */ ATA_SUBCMD_ZAC_MGMT_OUT_CLOSE_ZONE = 0x01, ATA_SUBCMD_ZAC_MGMT_OUT_FINISH_ZONE = 0x02, ATA_SUBCMD_ZAC_MGMT_OUT_OPEN_ZONE = 0x03, ATA_SUBCMD_ZAC_MGMT_OUT_RESET_WRITE_POINTER = 0x04, /* READ_LOG_EXT pages */ ATA_LOG_DIRECTORY = 0x0, ATA_LOG_SATA_NCQ = 0x10, ATA_LOG_NCQ_NON_DATA = 0x12, ATA_LOG_NCQ_SEND_RECV = 0x13, ATA_LOG_IDENTIFY_DEVICE = 0x30, /* Identify device log pages: */ ATA_LOG_SECURITY = 0x06, ATA_LOG_SATA_SETTINGS = 0x08, ATA_LOG_ZONED_INFORMATION = 0x09, /* Identify device SATA settings log:*/ ATA_LOG_DEVSLP_OFFSET = 0x30, ATA_LOG_DEVSLP_SIZE = 0x08, ATA_LOG_DEVSLP_MDAT = 0x00, ATA_LOG_DEVSLP_MDAT_MASK = 0x1F, ATA_LOG_DEVSLP_DETO = 0x01, ATA_LOG_DEVSLP_VALID = 0x07, ATA_LOG_DEVSLP_VALID_MASK = 0x80, ATA_LOG_NCQ_PRIO_OFFSET = 0x09, /* NCQ send and receive log */ ATA_LOG_NCQ_SEND_RECV_SUBCMDS_OFFSET = 0x00, ATA_LOG_NCQ_SEND_RECV_SUBCMDS_DSM = (1 << 0), ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET = 0x04, ATA_LOG_NCQ_SEND_RECV_DSM_TRIM = (1 << 0), ATA_LOG_NCQ_SEND_RECV_RD_LOG_OFFSET = 0x08, ATA_LOG_NCQ_SEND_RECV_RD_LOG_SUPPORTED = (1 << 0), ATA_LOG_NCQ_SEND_RECV_WR_LOG_OFFSET = 0x0C, ATA_LOG_NCQ_SEND_RECV_WR_LOG_SUPPORTED = (1 << 0), ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_OFFSET = 0x10, ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_OUT_SUPPORTED = (1 << 0), ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_IN_SUPPORTED = (1 << 1), ATA_LOG_NCQ_SEND_RECV_SIZE = 0x14, /* NCQ Non-Data log */ ATA_LOG_NCQ_NON_DATA_SUBCMDS_OFFSET = 0x00, ATA_LOG_NCQ_NON_DATA_ABORT_OFFSET = 0x00, ATA_LOG_NCQ_NON_DATA_ABORT_NCQ = (1 << 0), ATA_LOG_NCQ_NON_DATA_ABORT_ALL = (1 << 1), ATA_LOG_NCQ_NON_DATA_ABORT_STREAMING = (1 << 2), ATA_LOG_NCQ_NON_DATA_ABORT_NON_STREAMING = (1 << 3), ATA_LOG_NCQ_NON_DATA_ABORT_SELECTED = (1 << 4), ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OFFSET = 0x1C, ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OUT = (1 << 0), ATA_LOG_NCQ_NON_DATA_SIZE = 0x40, /* READ/WRITE LONG (obsolete) */ ATA_CMD_READ_LONG = 0x22, ATA_CMD_READ_LONG_ONCE = 0x23, ATA_CMD_WRITE_LONG = 0x32, ATA_CMD_WRITE_LONG_ONCE = 0x33, /* SETFEATURES stuff */ SETFEATURES_XFER = 0x03, XFER_UDMA_7 = 0x47, XFER_UDMA_6 = 0x46, XFER_UDMA_5 = 0x45, XFER_UDMA_4 = 0x44, XFER_UDMA_3 = 0x43, XFER_UDMA_2 = 0x42, XFER_UDMA_1 = 0x41, XFER_UDMA_0 = 0x40, XFER_MW_DMA_4 = 0x24, /* CFA only */ XFER_MW_DMA_3 = 0x23, /* CFA only */ XFER_MW_DMA_2 = 0x22, XFER_MW_DMA_1 = 0x21, XFER_MW_DMA_0 = 0x20, XFER_SW_DMA_2 = 0x12, XFER_SW_DMA_1 = 0x11, XFER_SW_DMA_0 = 0x10, XFER_PIO_6 = 0x0E, /* CFA only */ XFER_PIO_5 = 0x0D, /* CFA only */ XFER_PIO_4 = 0x0C, XFER_PIO_3 = 0x0B, XFER_PIO_2 = 0x0A, XFER_PIO_1 = 0x09, XFER_PIO_0 = 0x08, XFER_PIO_SLOW = 0x00, SETFEATURES_WC_ON = 0x02, /* Enable write cache */ SETFEATURES_WC_OFF = 0x82, /* Disable write cache */ SETFEATURES_RA_ON = 0xaa, /* Enable read look-ahead */ SETFEATURES_RA_OFF = 0x55, /* Disable read look-ahead */ /* Enable/Disable Automatic Acoustic Management */ SETFEATURES_AAM_ON = 0x42, SETFEATURES_AAM_OFF = 0xC2, SETFEATURES_SPINUP = 0x07, /* Spin-up drive */ SETFEATURES_SPINUP_TIMEOUT = 30000, /* 30s timeout for drive spin-up from PUIS */ SETFEATURES_SATA_ENABLE = 0x10, /* Enable use of SATA feature */ SETFEATURES_SATA_DISABLE = 0x90, /* Disable use of SATA feature */ /* SETFEATURE Sector counts for SATA features */ SATA_FPDMA_OFFSET = 0x01, /* FPDMA non-zero buffer offsets */ SATA_FPDMA_AA = 0x02, /* FPDMA Setup FIS Auto-Activate */ SATA_DIPM = 0x03, /* Device Initiated Power Management */ SATA_FPDMA_IN_ORDER = 0x04, /* FPDMA in-order data delivery */ SATA_AN = 0x05, /* Asynchronous Notification */ SATA_SSP = 0x06, /* Software Settings Preservation */ SATA_DEVSLP = 0x09, /* Device Sleep */ SETFEATURE_SENSE_DATA = 0xC3, /* Sense Data Reporting feature */ /* feature values for SET_MAX */ ATA_SET_MAX_ADDR = 0x00, ATA_SET_MAX_PASSWD = 0x01, ATA_SET_MAX_LOCK = 0x02, ATA_SET_MAX_UNLOCK = 0x03, ATA_SET_MAX_FREEZE_LOCK = 0x04, ATA_SET_MAX_PASSWD_DMA = 0x05, ATA_SET_MAX_UNLOCK_DMA = 0x06, /* feature values for DEVICE CONFIGURATION OVERLAY */ ATA_DCO_RESTORE = 0xC0, ATA_DCO_FREEZE_LOCK = 0xC1, ATA_DCO_IDENTIFY = 0xC2, ATA_DCO_SET = 0xC3, /* feature values for SMART */ ATA_SMART_ENABLE = 0xD8, ATA_SMART_READ_VALUES = 0xD0, ATA_SMART_READ_THRESHOLDS = 0xD1, /* feature values for Data Set Management */ ATA_DSM_TRIM = 0x01, /* password used in LBA Mid / LBA High for executing SMART commands */ ATA_SMART_LBAM_PASS = 0x4F, ATA_SMART_LBAH_PASS = 0xC2, /* ATAPI stuff */ ATAPI_PKT_DMA = (1 << 0), ATAPI_DMADIR = (1 << 2), /* ATAPI data dir: 0=to device, 1=to host */ ATAPI_CDB_LEN = 16, /* PMP stuff */ SATA_PMP_MAX_PORTS = 15, SATA_PMP_CTRL_PORT = 15, SATA_PMP_GSCR_DWORDS = 128, SATA_PMP_GSCR_PROD_ID = 0, SATA_PMP_GSCR_REV = 1, SATA_PMP_GSCR_PORT_INFO = 2, SATA_PMP_GSCR_ERROR = 32, SATA_PMP_GSCR_ERROR_EN = 33, SATA_PMP_GSCR_FEAT = 64, SATA_PMP_GSCR_FEAT_EN = 96, SATA_PMP_PSCR_STATUS = 0, SATA_PMP_PSCR_ERROR = 1, SATA_PMP_PSCR_CONTROL = 2, SATA_PMP_FEAT_BIST = (1 << 0), SATA_PMP_FEAT_PMREQ = (1 << 1), SATA_PMP_FEAT_DYNSSC = (1 << 2), SATA_PMP_FEAT_NOTIFY = (1 << 3), /* cable types */ ATA_CBL_NONE = 0, ATA_CBL_PATA40 = 1, ATA_CBL_PATA80 = 2, ATA_CBL_PATA40_SHORT = 3, /* 40 wire cable to high UDMA spec */ ATA_CBL_PATA_UNK = 4, /* don't know, maybe 80c? */ ATA_CBL_PATA_IGN = 5, /* don't know, ignore cable handling */ ATA_CBL_SATA = 6, /* SATA Status and Control Registers */ SCR_STATUS = 0, SCR_ERROR = 1, SCR_CONTROL = 2, SCR_ACTIVE = 3, SCR_NOTIFICATION = 4, /* SError bits */ SERR_DATA_RECOVERED = (1 << 0), /* recovered data error */ SERR_COMM_RECOVERED = (1 << 1), /* recovered comm failure */ SERR_DATA = (1 << 8), /* unrecovered data error */ SERR_PERSISTENT = (1 << 9), /* persistent data/comm error */ SERR_PROTOCOL = (1 << 10), /* protocol violation */ SERR_INTERNAL = (1 << 11), /* host internal error */ SERR_PHYRDY_CHG = (1 << 16), /* PHY RDY changed */ SERR_PHY_INT_ERR = (1 << 17), /* PHY internal error */ SERR_COMM_WAKE = (1 << 18), /* Comm wake */ SERR_10B_8B_ERR = (1 << 19), /* 10b to 8b decode error */ SERR_DISPARITY = (1 << 20), /* Disparity */ SERR_CRC = (1 << 21), /* CRC error */ SERR_HANDSHAKE = (1 << 22), /* Handshake error */ SERR_LINK_SEQ_ERR = (1 << 23), /* Link sequence error */ SERR_TRANS_ST_ERROR = (1 << 24), /* Transport state trans. error */ SERR_UNRECOG_FIS = (1 << 25), /* Unrecognized FIS */ SERR_DEV_XCHG = (1 << 26), /* device exchanged */ }; enum ata_prot_flags { /* protocol flags */ ATA_PROT_FLAG_PIO = (1 << 0), /* is PIO */ ATA_PROT_FLAG_DMA = (1 << 1), /* is DMA */ ATA_PROT_FLAG_NCQ = (1 << 2), /* is NCQ */ ATA_PROT_FLAG_ATAPI = (1 << 3), /* is ATAPI */ /* taskfile protocols */ ATA_PROT_UNKNOWN = (u8)-1, ATA_PROT_NODATA = 0, ATA_PROT_PIO = ATA_PROT_FLAG_PIO, ATA_PROT_DMA = ATA_PROT_FLAG_DMA, ATA_PROT_NCQ_NODATA = ATA_PROT_FLAG_NCQ, ATA_PROT_NCQ = ATA_PROT_FLAG_DMA | ATA_PROT_FLAG_NCQ, ATAPI_PROT_NODATA = ATA_PROT_FLAG_ATAPI, ATAPI_PROT_PIO = ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_PIO, ATAPI_PROT_DMA = ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_DMA, }; enum ata_ioctls { ATA_IOC_GET_IO32 = 0x309, /* HDIO_GET_32BIT */ ATA_IOC_SET_IO32 = 0x324, /* HDIO_SET_32BIT */ }; /* core structures */ struct ata_bmdma_prd { __le32 addr; __le32 flags_len; }; /* * id tests */ #define ata_id_is_ata(id) (((id)[ATA_ID_CONFIG] & (1 << 15)) == 0) #define ata_id_has_lba(id) ((id)[ATA_ID_CAPABILITY] & (1 << 9)) #define ata_id_has_dma(id) ((id)[ATA_ID_CAPABILITY] & (1 << 8)) #define ata_id_has_ncq(id) ((id)[ATA_ID_SATA_CAPABILITY] & (1 << 8)) #define ata_id_queue_depth(id) (((id)[ATA_ID_QUEUE_DEPTH] & 0x1f) + 1) #define ata_id_removable(id) ((id)[ATA_ID_CONFIG] & (1 << 7)) #define ata_id_has_atapi_AN(id) \ ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \ ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \ ((id)[ATA_ID_FEATURE_SUPP] & (1 << 5))) #define ata_id_has_fpdma_aa(id) \ ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \ ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \ ((id)[ATA_ID_FEATURE_SUPP] & (1 << 2))) #define ata_id_has_devslp(id) \ ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \ ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \ ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8))) #define ata_id_has_ncq_autosense(id) \ ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \ ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \ ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7))) #define ata_id_has_dipm(id) \ ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \ ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \ ((id)[ATA_ID_FEATURE_SUPP] & (1 << 3))) #define ata_id_iordy_disable(id) ((id)[ATA_ID_CAPABILITY] & (1 << 10)) #define ata_id_has_iordy(id) ((id)[ATA_ID_CAPABILITY] & (1 << 11)) #define ata_id_u32(id,n) \ (((u32) (id)[(n) + 1] << 16) | ((u32) (id)[(n)])) #define ata_id_u64(id,n) \ ( ((u64) (id)[(n) + 3] << 48) | \ ((u64) (id)[(n) + 2] << 32) | \ ((u64) (id)[(n) + 1] << 16) | \ ((u64) (id)[(n) + 0]) ) #define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20) #define ata_id_has_da(id) ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4)) static inline bool ata_id_has_hipm(const u16 *id) { u16 val = id[ATA_ID_SATA_CAPABILITY]; if (val == 0 || val == 0xffff) return false; return val & (1 << 9); } static inline bool ata_id_has_fua(const u16 *id) { if ((id[ATA_ID_CFSSE] & 0xC000) != 0x4000) return false; return id[ATA_ID_CFSSE] & (1 << 6); } static inline bool ata_id_has_flush(const u16 *id) { if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) return false; return id[ATA_ID_COMMAND_SET_2] & (1 << 12); } static inline bool ata_id_flush_enabled(const u16 *id) { if (ata_id_has_flush(id) == 0) return false; if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000) return false; return id[ATA_ID_CFS_ENABLE_2] & (1 << 12); } static inline bool ata_id_has_flush_ext(const u16 *id) { if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) return false; return id[ATA_ID_COMMAND_SET_2] & (1 << 13); } static inline bool ata_id_flush_ext_enabled(const u16 *id) { if (ata_id_has_flush_ext(id) == 0) return false; if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000) return false; /* * some Maxtor disks have bit 13 defined incorrectly * so check bit 10 too */ return (id[ATA_ID_CFS_ENABLE_2] & 0x2400) == 0x2400; } static inline u32 ata_id_logical_sector_size(const u16 *id) { /* T13/1699-D Revision 6a, Sep 6, 2008. Page 128. * IDENTIFY DEVICE data, word 117-118. * 0xd000 ignores bit 13 (logical:physical > 1) */ if ((id[ATA_ID_SECTOR_SIZE] & 0xd000) == 0x5000) return (((id[ATA_ID_LOGICAL_SECTOR_SIZE+1] << 16) + id[ATA_ID_LOGICAL_SECTOR_SIZE]) * sizeof(u16)) ; return ATA_SECT_SIZE; } static inline u8 ata_id_log2_per_physical_sector(const u16 *id) { /* T13/1699-D Revision 6a, Sep 6, 2008. Page 128. * IDENTIFY DEVICE data, word 106. * 0xe000 ignores bit 12 (logical sector > 512 bytes) */ if ((id[ATA_ID_SECTOR_SIZE] & 0xe000) == 0x6000) return (id[ATA_ID_SECTOR_SIZE] & 0xf); return 0; } /* Offset of logical sectors relative to physical sectors. * * If device has more than one logical sector per physical sector * (aka 512 byte emulation), vendors might offset the "sector 0" address * so sector 63 is "naturally aligned" - e.g. FAT partition table. * This avoids Read/Mod/Write penalties when using FAT partition table * and updating "well aligned" (FS perspective) physical sectors on every * transaction. */ static inline u16 ata_id_logical_sector_offset(const u16 *id, u8 log2_per_phys) { u16 word_209 = id[209]; if ((log2_per_phys > 1) && (word_209 & 0xc000) == 0x4000) { u16 first = word_209 & 0x3fff; if (first > 0) return (1 << log2_per_phys) - first; } return 0; } static inline bool ata_id_has_lba48(const u16 *id) { if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) return false; if (!ata_id_u64(id, ATA_ID_LBA_CAPACITY_2)) return false; return id[ATA_ID_COMMAND_SET_2] & (1 << 10); } static inline bool ata_id_lba48_enabled(const u16 *id) { if (ata_id_has_lba48(id) == 0) return false; if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000) return false; return id[ATA_ID_CFS_ENABLE_2] & (1 << 10); } static inline bool ata_id_hpa_enabled(const u16 *id) { /* Yes children, word 83 valid bits cover word 82 data */ if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) return false; /* And 87 covers 85-87 */ if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000) return false; /* Check command sets enabled as well as supported */ if ((id[ATA_ID_CFS_ENABLE_1] & (1 << 10)) == 0) return false; return id[ATA_ID_COMMAND_SET_1] & (1 << 10); } static inline bool ata_id_has_wcache(const u16 *id) { /* Yes children, word 83 valid bits cover word 82 data */ if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) return false; return id[ATA_ID_COMMAND_SET_1] & (1 << 5); } static inline bool ata_id_has_pm(const u16 *id) { if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) return false; return id[ATA_ID_COMMAND_SET_1] & (1 << 3); } static inline bool ata_id_rahead_enabled(const u16 *id) { if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000) return false; return id[ATA_ID_CFS_ENABLE_1] & (1 << 6); } static inline bool ata_id_wcache_enabled(const u16 *id) { if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000) return false; return id[ATA_ID_CFS_ENABLE_1] & (1 << 5); } static inline bool ata_id_has_read_log_dma_ext(const u16 *id) { /* Word 86 must have bit 15 set */ if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15))) return false; /* READ LOG DMA EXT support can be signaled either from word 119 * or from word 120. The format is the same for both words: Bit * 15 must be cleared, bit 14 set and bit 3 set. */ if ((id[ATA_ID_COMMAND_SET_3] & 0xC008) == 0x4008 || (id[ATA_ID_COMMAND_SET_4] & 0xC008) == 0x4008) return true; return false; } static inline bool ata_id_has_sense_reporting(const u16 *id) { if (!(id[ATA_ID_CFS_ENABLE_2] & BIT(15))) return false; if ((id[ATA_ID_COMMAND_SET_3] & (BIT(15) | BIT(14))) != BIT(14)) return false; return id[ATA_ID_COMMAND_SET_3] & BIT(6); } static inline bool ata_id_sense_reporting_enabled(const u16 *id) { if (!ata_id_has_sense_reporting(id)) return false; /* ata_id_has_sense_reporting() == true, word 86 must have bit 15 set */ if ((id[ATA_ID_COMMAND_SET_4] & (BIT(15) | BIT(14))) != BIT(14)) return false; return id[ATA_ID_COMMAND_SET_4] & BIT(6); } /** * * Word: 206 - SCT Command Transport * 15:12 - Vendor Specific * 11:6 - Reserved * 5 - SCT Command Transport Data Tables supported * 4 - SCT Command Transport Features Control supported * 3 - SCT Command Transport Error Recovery Control supported * 2 - SCT Command Transport Write Same supported * 1 - SCT Command Transport Long Sector Access supported * 0 - SCT Command Transport supported */ static inline bool ata_id_sct_data_tables(const u16 *id) { return id[ATA_ID_SCT_CMD_XPORT] & (1 << 5) ? true : false; } static inline bool ata_id_sct_features_ctrl(const u16 *id) { return id[ATA_ID_SCT_CMD_XPORT] & (1 << 4) ? true : false; } static inline bool ata_id_sct_error_recovery_ctrl(const u16 *id) { return id[ATA_ID_SCT_CMD_XPORT] & (1 << 3) ? true : false; } static inline bool ata_id_sct_long_sector_access(const u16 *id) { return id[ATA_ID_SCT_CMD_XPORT] & (1 << 1) ? true : false; } static inline bool ata_id_sct_supported(const u16 *id) { return id[ATA_ID_SCT_CMD_XPORT] & (1 << 0) ? true : false; } /** * ata_id_major_version - get ATA level of drive * @id: Identify data * * Caveats: * ATA-1 considers identify optional * ATA-2 introduces mandatory identify * ATA-3 introduces word 80 and accurate reporting * * The practical impact of this is that ata_id_major_version cannot * reliably report on drives below ATA3. */ static inline unsigned int ata_id_major_version(const u16 *id) { unsigned int mver; if (id[ATA_ID_MAJOR_VER] == 0xFFFF) return 0; for (mver = 14; mver >= 1; mver--) if (id[ATA_ID_MAJOR_VER] & (1 << mver)) break; return mver; } static inline bool ata_id_is_sata(const u16 *id) { /* * See if word 93 is 0 AND drive is at least ATA-5 compatible * verifying that word 80 by casting it to a signed type -- * this trick allows us to filter out the reserved values of * 0x0000 and 0xffff along with the earlier ATA revisions... */ if (id[ATA_ID_HW_CONFIG] == 0 && (short)id[ATA_ID_MAJOR_VER] >= 0x0020) return true; return false; } static inline bool ata_id_has_tpm(const u16 *id) { /* The TPM bits are only valid on ATA8 */ if (ata_id_major_version(id) < 8) return false; if ((id[48] & 0xC000) != 0x4000) return false; return id[48] & (1 << 0); } static inline bool ata_id_has_dword_io(const u16 *id) { /* ATA 8 reuses this flag for "trusted" computing */ if (ata_id_major_version(id) > 7) return false; return id[ATA_ID_DWORD_IO] & (1 << 0); } static inline bool ata_id_has_trusted(const u16 *id) { if (ata_id_major_version(id) <= 7) return false; return id[ATA_ID_TRUSTED] & (1 << 0); } static inline bool ata_id_has_unload(const u16 *id) { if (ata_id_major_version(id) >= 7 && (id[ATA_ID_CFSSE] & 0xC000) == 0x4000 && id[ATA_ID_CFSSE] & (1 << 13)) return true; return false; } static inline bool ata_id_has_wwn(const u16 *id) { return (id[ATA_ID_CSF_DEFAULT] & 0xC100) == 0x4100; } static inline int ata_id_form_factor(const u16 *id) { u16 val = id[168]; if (ata_id_major_version(id) < 7 || val == 0 || val == 0xffff) return 0; val &= 0xf; if (val > 5) return 0; return val; } static inline int ata_id_rotation_rate(const u16 *id) { u16 val = id[217]; if (ata_id_major_version(id) < 7 || val == 0 || val == 0xffff) return 0; if (val > 1 && val < 0x401) return 0; return val; } static inline bool ata_id_has_ncq_send_and_recv(const u16 *id) { return id[ATA_ID_SATA_CAPABILITY_2] & BIT(6); } static inline bool ata_id_has_ncq_non_data(const u16 *id) { return id[ATA_ID_SATA_CAPABILITY_2] & BIT(5); } static inline bool ata_id_has_ncq_prio(const u16 *id) { return id[ATA_ID_SATA_CAPABILITY] & BIT(12); } static inline bool ata_id_has_trim(const u16 *id) { if (ata_id_major_version(id) >= 7 && (id[ATA_ID_DATA_SET_MGMT] & 1)) return true; return false; } static inline bool ata_id_has_zero_after_trim(const u16 *id) { /* DSM supported, deterministic read, and read zero after trim set */ if (ata_id_has_trim(id) && (id[ATA_ID_ADDITIONAL_SUPP] & 0x4020) == 0x4020) return true; return false; } static inline bool ata_id_current_chs_valid(const u16 *id) { /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command has not been issued to the device then the values of id[ATA_ID_CUR_CYLS] to id[ATA_ID_CUR_SECTORS] are vendor specific. */ return (id[ATA_ID_FIELD_VALID] & 1) && /* Current translation valid */ id[ATA_ID_CUR_CYLS] && /* cylinders in current translation */ id[ATA_ID_CUR_HEADS] && /* heads in current translation */ id[ATA_ID_CUR_HEADS] <= 16 && id[ATA_ID_CUR_SECTORS]; /* sectors in current translation */ } static inline bool ata_id_is_cfa(const u16 *id) { if ((id[ATA_ID_CONFIG] == 0x848A) || /* Traditional CF */ (id[ATA_ID_CONFIG] == 0x844A)) /* Delkin Devices CF */ return true; /* * CF specs don't require specific value in the word 0 anymore and yet * they forbid to report the ATA version in the word 80 and require the * CFA feature set support to be indicated in the word 83 in this case. * Unfortunately, some cards only follow either of this requirements, * and while those that don't indicate CFA feature support need some * sort of quirk list, it seems impractical for the ones that do... */ return (id[ATA_ID_COMMAND_SET_2] & 0xC004) == 0x4004; } static inline bool ata_id_is_ssd(const u16 *id) { return id[ATA_ID_ROT_SPEED] == 0x01; } static inline u8 ata_id_zoned_cap(const u16 *id) { return (id[ATA_ID_ADDITIONAL_SUPP] & 0x3); } static inline bool ata_id_pio_need_iordy(const u16 *id, const u8 pio) { /* CF spec. r4.1 Table 22 says no IORDY on PIO5 and PIO6. */ if (pio > 4 && ata_id_is_cfa(id)) return false; /* For PIO3 and higher it is mandatory. */ if (pio > 2) return true; /* Turn it on when possible. */ return ata_id_has_iordy(id); } static inline bool ata_drive_40wire(const u16 *dev_id) { if (ata_id_is_sata(dev_id)) return false; /* SATA */ if ((dev_id[ATA_ID_HW_CONFIG] & 0xE000) == 0x6000) return false; /* 80 wire */ return true; } static inline bool ata_drive_40wire_relaxed(const u16 *dev_id) { if ((dev_id[ATA_ID_HW_CONFIG] & 0x2000) == 0x2000) return false; /* 80 wire */ return true; } static inline int atapi_cdb_len(const u16 *dev_id) { u16 tmp = dev_id[ATA_ID_CONFIG] & 0x3; switch (tmp) { case 0: return 12; case 1: return 16; default: return -1; } } static inline int atapi_command_packet_set(const u16 *dev_id) { return (dev_id[ATA_ID_CONFIG] >> 8) & 0x1f; } static inline bool atapi_id_dmadir(const u16 *dev_id) { return ata_id_major_version(dev_id) >= 7 && (dev_id[62] & 0x8000); } /* * ata_id_is_lba_capacity_ok() performs a sanity check on * the claimed LBA capacity value for the device. * * Returns 1 if LBA capacity looks sensible, 0 otherwise. * * It is called only once for each device. */ static inline bool ata_id_is_lba_capacity_ok(u16 *id) { unsigned long lba_sects, chs_sects, head, tail; /* No non-LBA info .. so valid! */ if (id[ATA_ID_CYLS] == 0) return true; lba_sects = ata_id_u32(id, ATA_ID_LBA_CAPACITY); /* * The ATA spec tells large drives to return * C/H/S = 16383/16/63 independent of their size. * Some drives can be jumpered to use 15 heads instead of 16. * Some drives can be jumpered to use 4092 cyls instead of 16383. */ if ((id[ATA_ID_CYLS] == 16383 || (id[ATA_ID_CYLS] == 4092 && id[ATA_ID_CUR_CYLS] == 16383)) && id[ATA_ID_SECTORS] == 63 && (id[ATA_ID_HEADS] == 15 || id[ATA_ID_HEADS] == 16) && (lba_sects >= 16383 * 63 * id[ATA_ID_HEADS])) return true; chs_sects = id[ATA_ID_CYLS] * id[ATA_ID_HEADS] * id[ATA_ID_SECTORS]; /* perform a rough sanity check on lba_sects: within 10% is OK */ if (lba_sects - chs_sects < chs_sects/10) return true; /* some drives have the word order reversed */ head = (lba_sects >> 16) & 0xffff; tail = lba_sects & 0xffff; lba_sects = head | (tail << 16); if (lba_sects - chs_sects < chs_sects/10) { *(__le32 *)&id[ATA_ID_LBA_CAPACITY] = __cpu_to_le32(lba_sects); return true; /* LBA capacity is (now) good */ } return false; /* LBA capacity value may be bad */ } static inline void ata_id_to_hd_driveid(u16 *id) { #ifdef __BIG_ENDIAN /* accessed in struct hd_driveid as 8-bit values */ id[ATA_ID_MAX_MULTSECT] = __cpu_to_le16(id[ATA_ID_MAX_MULTSECT]); id[ATA_ID_CAPABILITY] = __cpu_to_le16(id[ATA_ID_CAPABILITY]); id[ATA_ID_OLD_PIO_MODES] = __cpu_to_le16(id[ATA_ID_OLD_PIO_MODES]); id[ATA_ID_OLD_DMA_MODES] = __cpu_to_le16(id[ATA_ID_OLD_DMA_MODES]); id[ATA_ID_MULTSECT] = __cpu_to_le16(id[ATA_ID_MULTSECT]); /* as 32-bit values */ *(u32 *)&id[ATA_ID_LBA_CAPACITY] = ata_id_u32(id, ATA_ID_LBA_CAPACITY); *(u32 *)&id[ATA_ID_SPG] = ata_id_u32(id, ATA_ID_SPG); /* as 64-bit value */ *(u64 *)&id[ATA_ID_LBA_CAPACITY_2] = ata_id_u64(id, ATA_ID_LBA_CAPACITY_2); #endif } static inline bool ata_ok(u8 status) { return ((status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR)) == ATA_DRDY); } static inline bool lba_28_ok(u64 block, u32 n_block) { /* check the ending block number: must be LESS THAN 0x0fffffff */ return ((block + n_block) < ((1 << 28) - 1)) && (n_block <= ATA_MAX_SECTORS); } static inline bool lba_48_ok(u64 block, u32 n_block) { /* check the ending block number */ return ((block + n_block - 1) < ((u64)1 << 48)) && (n_block <= ATA_MAX_SECTORS_LBA48); } #define sata_pmp_gscr_vendor(gscr) ((gscr)[SATA_PMP_GSCR_PROD_ID] & 0xffff) #define sata_pmp_gscr_devid(gscr) ((gscr)[SATA_PMP_GSCR_PROD_ID] >> 16) #define sata_pmp_gscr_rev(gscr) (((gscr)[SATA_PMP_GSCR_REV] >> 8) & 0xff) #define sata_pmp_gscr_ports(gscr) ((gscr)[SATA_PMP_GSCR_PORT_INFO] & 0xf) #endif /* __LINUX_ATA_H__ */ pci-dma-compat.h 0000644 00000007245 14722070374 0007526 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* include this file if the platform implements the dma_ DMA Mapping API * and wants to provide the pci_ DMA Mapping API in terms of it */ #ifndef _ASM_GENERIC_PCI_DMA_COMPAT_H #define _ASM_GENERIC_PCI_DMA_COMPAT_H #include <linux/dma-mapping.h> /* This defines the direction arg to the DMA mapping routines. */ #define PCI_DMA_BIDIRECTIONAL DMA_BIDIRECTIONAL #define PCI_DMA_TODEVICE DMA_TO_DEVICE #define PCI_DMA_FROMDEVICE DMA_FROM_DEVICE #define PCI_DMA_NONE DMA_NONE static inline void * pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle) { return dma_alloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC); } static inline void * pci_zalloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle) { return dma_alloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC); } static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle) { dma_free_coherent(&hwdev->dev, size, vaddr, dma_handle); } static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction) { return dma_map_single(&hwdev->dev, ptr, size, (enum dma_data_direction)direction); } static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction) { dma_unmap_single(&hwdev->dev, dma_addr, size, (enum dma_data_direction)direction); } static inline dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page, unsigned long offset, size_t size, int direction) { return dma_map_page(&hwdev->dev, page, offset, size, (enum dma_data_direction)direction); } static inline void pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address, size_t size, int direction) { dma_unmap_page(&hwdev->dev, dma_address, size, (enum dma_data_direction)direction); } static inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction) { return dma_map_sg(&hwdev->dev, sg, nents, (enum dma_data_direction)direction); } static inline void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction) { dma_unmap_sg(&hwdev->dev, sg, nents, (enum dma_data_direction)direction); } static inline void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction) { dma_sync_single_for_cpu(&hwdev->dev, dma_handle, size, (enum dma_data_direction)direction); } static inline void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction) { dma_sync_single_for_device(&hwdev->dev, dma_handle, size, (enum dma_data_direction)direction); } static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction) { dma_sync_sg_for_cpu(&hwdev->dev, sg, nelems, (enum dma_data_direction)direction); } static inline void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction) { dma_sync_sg_for_device(&hwdev->dev, sg, nelems, (enum dma_data_direction)direction); } static inline int pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr) { return dma_mapping_error(&pdev->dev, dma_addr); } #ifdef CONFIG_PCI static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) { return dma_set_mask(&dev->dev, mask); } static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) { return dma_set_coherent_mask(&dev->dev, mask); } #else static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) { return -EIO; } static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) { return -EIO; } #endif #endif pm-trace.h 0000644 00000001654 14722070374 0006441 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef PM_TRACE_H #define PM_TRACE_H #include <linux/types.h> #ifdef CONFIG_PM_TRACE #include <asm/pm-trace.h> extern int pm_trace_enabled; extern bool pm_trace_rtc_abused; static inline bool pm_trace_rtc_valid(void) { return !pm_trace_rtc_abused; } static inline int pm_trace_is_enabled(void) { return pm_trace_enabled; } struct device; extern void set_trace_device(struct device *); extern void generate_pm_trace(const void *tracedata, unsigned int user); extern int show_trace_dev_match(char *buf, size_t size); #define TRACE_DEVICE(dev) do { \ if (pm_trace_enabled) \ set_trace_device(dev); \ } while(0) #else static inline bool pm_trace_rtc_valid(void) { return true; } static inline int pm_trace_is_enabled(void) { return 0; } #define TRACE_DEVICE(dev) do { } while (0) #define TRACE_RESUME(dev) do { } while (0) #define TRACE_SUSPEND(dev) do { } while (0) #endif #endif genl_magic_struct.h 0000644 00000017241 14722070374 0010421 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef GENL_MAGIC_STRUCT_H #define GENL_MAGIC_STRUCT_H #ifndef GENL_MAGIC_FAMILY # error "you need to define GENL_MAGIC_FAMILY before inclusion" #endif #ifndef GENL_MAGIC_VERSION # error "you need to define GENL_MAGIC_VERSION before inclusion" #endif #ifndef GENL_MAGIC_INCLUDE_FILE # error "you need to define GENL_MAGIC_INCLUDE_FILE before inclusion" #endif #include <linux/genetlink.h> #include <linux/types.h> #define CONCAT__(a,b) a ## b #define CONCAT_(a,b) CONCAT__(a,b) extern int CONCAT_(GENL_MAGIC_FAMILY, _genl_register)(void); extern void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void); /* * Extension of genl attribute validation policies {{{2 */ /* * @DRBD_GENLA_F_MANDATORY: By default, netlink ignores attributes it does not * know about. This flag can be set in nlattr->nla_type to indicate that this * attribute must not be ignored. * * We check and remove this flag in drbd_nla_check_mandatory() before * validating the attribute types and lengths via nla_parse_nested(). */ #define DRBD_GENLA_F_MANDATORY (1 << 14) /* * Flags specific to drbd and not visible at the netlink layer, used in * <struct>_from_attrs and <struct>_to_skb: * * @DRBD_F_REQUIRED: Attribute is required; a request without this attribute is * invalid. * * @DRBD_F_SENSITIVE: Attribute includes sensitive information and must not be * included in unpriviledged get requests or broadcasts. * * @DRBD_F_INVARIANT: Attribute is set when an object is initially created, but * cannot subsequently be changed. */ #define DRBD_F_REQUIRED (1 << 0) #define DRBD_F_SENSITIVE (1 << 1) #define DRBD_F_INVARIANT (1 << 2) #define __nla_type(x) ((__u16)((x) & NLA_TYPE_MASK & ~DRBD_GENLA_F_MANDATORY)) /* }}}1 * MAGIC * multi-include macro expansion magic starts here */ /* MAGIC helpers {{{2 */ static inline int nla_put_u64_0pad(struct sk_buff *skb, int attrtype, u64 value) { return nla_put_64bit(skb, attrtype, sizeof(u64), &value, 0); } /* possible field types */ #define __flg_field(attr_nr, attr_flag, name) \ __field(attr_nr, attr_flag, name, NLA_U8, char, \ nla_get_u8, nla_put_u8, false) #define __u8_field(attr_nr, attr_flag, name) \ __field(attr_nr, attr_flag, name, NLA_U8, unsigned char, \ nla_get_u8, nla_put_u8, false) #define __u16_field(attr_nr, attr_flag, name) \ __field(attr_nr, attr_flag, name, NLA_U16, __u16, \ nla_get_u16, nla_put_u16, false) #define __u32_field(attr_nr, attr_flag, name) \ __field(attr_nr, attr_flag, name, NLA_U32, __u32, \ nla_get_u32, nla_put_u32, false) #define __s32_field(attr_nr, attr_flag, name) \ __field(attr_nr, attr_flag, name, NLA_U32, __s32, \ nla_get_u32, nla_put_u32, true) #define __u64_field(attr_nr, attr_flag, name) \ __field(attr_nr, attr_flag, name, NLA_U64, __u64, \ nla_get_u64, nla_put_u64_0pad, false) #define __str_field(attr_nr, attr_flag, name, maxlen) \ __array(attr_nr, attr_flag, name, NLA_NUL_STRING, char, maxlen, \ nla_strlcpy, nla_put, false) #define __bin_field(attr_nr, attr_flag, name, maxlen) \ __array(attr_nr, attr_flag, name, NLA_BINARY, char, maxlen, \ nla_memcpy, nla_put, false) /* fields with default values */ #define __flg_field_def(attr_nr, attr_flag, name, default) \ __flg_field(attr_nr, attr_flag, name) #define __u32_field_def(attr_nr, attr_flag, name, default) \ __u32_field(attr_nr, attr_flag, name) #define __s32_field_def(attr_nr, attr_flag, name, default) \ __s32_field(attr_nr, attr_flag, name) #define __str_field_def(attr_nr, attr_flag, name, maxlen) \ __str_field(attr_nr, attr_flag, name, maxlen) #define GENL_op_init(args...) args #define GENL_doit(handler) \ .doit = handler, \ .flags = GENL_ADMIN_PERM, #define GENL_dumpit(handler) \ .dumpit = handler, \ .flags = GENL_ADMIN_PERM, /* }}}1 * Magic: define the enum symbols for genl_ops * Magic: define the enum symbols for top level attributes * Magic: define the enum symbols for nested attributes * {{{2 */ #undef GENL_struct #define GENL_struct(tag_name, tag_number, s_name, s_fields) #undef GENL_mc_group #define GENL_mc_group(group) #undef GENL_notification #define GENL_notification(op_name, op_num, mcast_group, tla_list) \ op_name = op_num, #undef GENL_op #define GENL_op(op_name, op_num, handler, tla_list) \ op_name = op_num, enum { #include GENL_MAGIC_INCLUDE_FILE }; #undef GENL_notification #define GENL_notification(op_name, op_num, mcast_group, tla_list) #undef GENL_op #define GENL_op(op_name, op_num, handler, attr_list) #undef GENL_struct #define GENL_struct(tag_name, tag_number, s_name, s_fields) \ tag_name = tag_number, enum { #include GENL_MAGIC_INCLUDE_FILE }; #undef GENL_struct #define GENL_struct(tag_name, tag_number, s_name, s_fields) \ enum { \ s_fields \ }; #undef __field #define __field(attr_nr, attr_flag, name, nla_type, type, \ __get, __put, __is_signed) \ T_ ## name = (__u16)(attr_nr | ((attr_flag) & DRBD_GENLA_F_MANDATORY)), #undef __array #define __array(attr_nr, attr_flag, name, nla_type, type, \ maxlen, __get, __put, __is_signed) \ T_ ## name = (__u16)(attr_nr | ((attr_flag) & DRBD_GENLA_F_MANDATORY)), #include GENL_MAGIC_INCLUDE_FILE /* }}}1 * Magic: compile time assert unique numbers for operations * Magic: -"- unique numbers for top level attributes * Magic: -"- unique numbers for nested attributes * {{{2 */ #undef GENL_struct #define GENL_struct(tag_name, tag_number, s_name, s_fields) #undef GENL_op #define GENL_op(op_name, op_num, handler, attr_list) \ case op_name: #undef GENL_notification #define GENL_notification(op_name, op_num, mcast_group, tla_list) \ case op_name: static inline void ct_assert_unique_operations(void) { switch (0) { #include GENL_MAGIC_INCLUDE_FILE case 0: ; } } #undef GENL_op #define GENL_op(op_name, op_num, handler, attr_list) #undef GENL_notification #define GENL_notification(op_name, op_num, mcast_group, tla_list) #undef GENL_struct #define GENL_struct(tag_name, tag_number, s_name, s_fields) \ case tag_number: static inline void ct_assert_unique_top_level_attributes(void) { switch (0) { #include GENL_MAGIC_INCLUDE_FILE case 0: ; } } #undef GENL_struct #define GENL_struct(tag_name, tag_number, s_name, s_fields) \ static inline void ct_assert_unique_ ## s_name ## _attributes(void) \ { \ switch (0) { \ s_fields \ case 0: \ ; \ } \ } #undef __field #define __field(attr_nr, attr_flag, name, nla_type, type, __get, __put, \ __is_signed) \ case attr_nr: #undef __array #define __array(attr_nr, attr_flag, name, nla_type, type, maxlen, \ __get, __put, __is_signed) \ case attr_nr: #include GENL_MAGIC_INCLUDE_FILE /* }}}1 * Magic: declare structs * struct <name> { * fields * }; * {{{2 */ #undef GENL_struct #define GENL_struct(tag_name, tag_number, s_name, s_fields) \ struct s_name { s_fields }; #undef __field #define __field(attr_nr, attr_flag, name, nla_type, type, __get, __put, \ __is_signed) \ type name; #undef __array #define __array(attr_nr, attr_flag, name, nla_type, type, maxlen, \ __get, __put, __is_signed) \ type name[maxlen]; \ __u32 name ## _len; #include GENL_MAGIC_INCLUDE_FILE #undef GENL_struct #define GENL_struct(tag_name, tag_number, s_name, s_fields) \ enum { \ s_fields \ }; #undef __field #define __field(attr_nr, attr_flag, name, nla_type, type, __get, __put, \ is_signed) \ F_ ## name ## _IS_SIGNED = is_signed, #undef __array #define __array(attr_nr, attr_flag, name, nla_type, type, maxlen, \ __get, __put, is_signed) \ F_ ## name ## _IS_SIGNED = is_signed, #include GENL_MAGIC_INCLUDE_FILE /* }}}1 */ #endif /* GENL_MAGIC_STRUCT_H */ /* vim: set foldmethod=marker nofoldenable : */ ssb/ssb_driver_gige.h 0000644 00000012302 14722070374 0010645 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_SSB_DRIVER_GIGE_H_ #define LINUX_SSB_DRIVER_GIGE_H_ #include <linux/ssb/ssb.h> #include <linux/bug.h> #include <linux/pci.h> #include <linux/spinlock.h> #ifdef CONFIG_SSB_DRIVER_GIGE #define SSB_GIGE_PCIIO 0x0000 /* PCI I/O Registers (1024 bytes) */ #define SSB_GIGE_RESERVED 0x0400 /* Reserved (1024 bytes) */ #define SSB_GIGE_PCICFG 0x0800 /* PCI config space (256 bytes) */ #define SSB_GIGE_SHIM_FLUSHSTAT 0x0C00 /* PCI to OCP: Flush status control (32bit) */ #define SSB_GIGE_SHIM_FLUSHRDA 0x0C04 /* PCI to OCP: Flush read address (32bit) */ #define SSB_GIGE_SHIM_FLUSHTO 0x0C08 /* PCI to OCP: Flush timeout counter (32bit) */ #define SSB_GIGE_SHIM_BARRIER 0x0C0C /* PCI to OCP: Barrier register (32bit) */ #define SSB_GIGE_SHIM_MAOCPSI 0x0C10 /* PCI to OCP: MaocpSI Control (32bit) */ #define SSB_GIGE_SHIM_SIOCPMA 0x0C14 /* PCI to OCP: SiocpMa Control (32bit) */ /* TM Status High flags */ #define SSB_GIGE_TMSHIGH_RGMII 0x00010000 /* Have an RGMII PHY-bus */ /* TM Status Low flags */ #define SSB_GIGE_TMSLOW_TXBYPASS 0x00080000 /* TX bypass (no delay) */ #define SSB_GIGE_TMSLOW_RXBYPASS 0x00100000 /* RX bypass (no delay) */ #define SSB_GIGE_TMSLOW_DLLEN 0x01000000 /* Enable DLL controls */ /* Boardflags (low) */ #define SSB_GIGE_BFL_ROBOSWITCH 0x0010 #define SSB_GIGE_MEM_RES_NAME "SSB Broadcom 47xx GigE memory" #define SSB_GIGE_IO_RES_NAME "SSB Broadcom 47xx GigE I/O" struct ssb_gige { struct ssb_device *dev; spinlock_t lock; /* True, if the device has an RGMII bus. * False, if the device has a GMII bus. */ bool has_rgmii; /* The PCI controller device. */ struct pci_controller pci_controller; struct pci_ops pci_ops; struct resource mem_resource; struct resource io_resource; }; /* Check whether a PCI device is a SSB Gigabit Ethernet core. */ extern bool pdev_is_ssb_gige_core(struct pci_dev *pdev); /* Convert a pci_dev pointer to a ssb_gige pointer. */ static inline struct ssb_gige * pdev_to_ssb_gige(struct pci_dev *pdev) { if (!pdev_is_ssb_gige_core(pdev)) return NULL; return container_of(pdev->bus->ops, struct ssb_gige, pci_ops); } /* Returns whether the PHY is connected by an RGMII bus. */ static inline bool ssb_gige_is_rgmii(struct pci_dev *pdev) { struct ssb_gige *dev = pdev_to_ssb_gige(pdev); return (dev ? dev->has_rgmii : 0); } /* Returns whether we have a Roboswitch. */ static inline bool ssb_gige_have_roboswitch(struct pci_dev *pdev) { struct ssb_gige *dev = pdev_to_ssb_gige(pdev); if (dev) return !!(dev->dev->bus->sprom.boardflags_lo & SSB_GIGE_BFL_ROBOSWITCH); return 0; } /* Returns whether we can only do one DMA at once. */ static inline bool ssb_gige_one_dma_at_once(struct pci_dev *pdev) { struct ssb_gige *dev = pdev_to_ssb_gige(pdev); if (dev) return ((dev->dev->bus->chip_id == 0x4785) && (dev->dev->bus->chip_rev < 2)); return 0; } /* Returns whether we must flush posted writes. */ static inline bool ssb_gige_must_flush_posted_writes(struct pci_dev *pdev) { struct ssb_gige *dev = pdev_to_ssb_gige(pdev); if (dev) return (dev->dev->bus->chip_id == 0x4785); return 0; } /* Get the device MAC address */ static inline int ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr) { struct ssb_gige *dev = pdev_to_ssb_gige(pdev); if (!dev) return -ENODEV; memcpy(macaddr, dev->dev->bus->sprom.et0mac, 6); return 0; } /* Get the device phy address */ static inline int ssb_gige_get_phyaddr(struct pci_dev *pdev) { struct ssb_gige *dev = pdev_to_ssb_gige(pdev); if (!dev) return -ENODEV; return dev->dev->bus->sprom.et0phyaddr; } extern int ssb_gige_pcibios_plat_dev_init(struct ssb_device *sdev, struct pci_dev *pdev); extern int ssb_gige_map_irq(struct ssb_device *sdev, const struct pci_dev *pdev); /* The GigE driver is not a standalone module, because we don't have support * for unregistering the driver. So we could not unload the module anyway. */ extern int ssb_gige_init(void); static inline void ssb_gige_exit(void) { /* Currently we can not unregister the GigE driver, * because we can not unregister the PCI bridge. */ BUG(); } #else /* CONFIG_SSB_DRIVER_GIGE */ /* Gigabit Ethernet driver disabled */ static inline int ssb_gige_pcibios_plat_dev_init(struct ssb_device *sdev, struct pci_dev *pdev) { return -ENOSYS; } static inline int ssb_gige_map_irq(struct ssb_device *sdev, const struct pci_dev *pdev) { return -ENOSYS; } static inline int ssb_gige_init(void) { return 0; } static inline void ssb_gige_exit(void) { } static inline bool pdev_is_ssb_gige_core(struct pci_dev *pdev) { return 0; } static inline struct ssb_gige * pdev_to_ssb_gige(struct pci_dev *pdev) { return NULL; } static inline bool ssb_gige_is_rgmii(struct pci_dev *pdev) { return 0; } static inline bool ssb_gige_have_roboswitch(struct pci_dev *pdev) { return 0; } static inline bool ssb_gige_one_dma_at_once(struct pci_dev *pdev) { return 0; } static inline bool ssb_gige_must_flush_posted_writes(struct pci_dev *pdev) { return 0; } static inline int ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr) { return -ENODEV; } static inline int ssb_gige_get_phyaddr(struct pci_dev *pdev) { return -ENODEV; } #endif /* CONFIG_SSB_DRIVER_GIGE */ #endif /* LINUX_SSB_DRIVER_GIGE_H_ */ ssb/ssb_driver_pci.h 0000644 00000012561 14722070374 0010514 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_SSB_PCICORE_H_ #define LINUX_SSB_PCICORE_H_ #include <linux/types.h> struct pci_dev; #ifdef CONFIG_SSB_DRIVER_PCICORE /* PCI core registers. */ #define SSB_PCICORE_CTL 0x0000 /* PCI Control */ #define SSB_PCICORE_CTL_RST_OE 0x00000001 /* PCI_RESET Output Enable */ #define SSB_PCICORE_CTL_RST 0x00000002 /* PCI_RESET driven out to pin */ #define SSB_PCICORE_CTL_CLK_OE 0x00000004 /* Clock gate Output Enable */ #define SSB_PCICORE_CTL_CLK 0x00000008 /* Gate for clock driven out to pin */ #define SSB_PCICORE_ARBCTL 0x0010 /* PCI Arbiter Control */ #define SSB_PCICORE_ARBCTL_INTERN 0x00000001 /* Use internal arbiter */ #define SSB_PCICORE_ARBCTL_EXTERN 0x00000002 /* Use external arbiter */ #define SSB_PCICORE_ARBCTL_PARKID 0x00000006 /* Mask, selects which agent is parked on an idle bus */ #define SSB_PCICORE_ARBCTL_PARKID_LAST 0x00000000 /* Last requestor */ #define SSB_PCICORE_ARBCTL_PARKID_4710 0x00000002 /* 4710 */ #define SSB_PCICORE_ARBCTL_PARKID_EXT0 0x00000004 /* External requestor 0 */ #define SSB_PCICORE_ARBCTL_PARKID_EXT1 0x00000006 /* External requestor 1 */ #define SSB_PCICORE_ISTAT 0x0020 /* Interrupt status */ #define SSB_PCICORE_ISTAT_INTA 0x00000001 /* PCI INTA# */ #define SSB_PCICORE_ISTAT_INTB 0x00000002 /* PCI INTB# */ #define SSB_PCICORE_ISTAT_SERR 0x00000004 /* PCI SERR# (write to clear) */ #define SSB_PCICORE_ISTAT_PERR 0x00000008 /* PCI PERR# (write to clear) */ #define SSB_PCICORE_ISTAT_PME 0x00000010 /* PCI PME# */ #define SSB_PCICORE_IMASK 0x0024 /* Interrupt mask */ #define SSB_PCICORE_IMASK_INTA 0x00000001 /* PCI INTA# */ #define SSB_PCICORE_IMASK_INTB 0x00000002 /* PCI INTB# */ #define SSB_PCICORE_IMASK_SERR 0x00000004 /* PCI SERR# */ #define SSB_PCICORE_IMASK_PERR 0x00000008 /* PCI PERR# */ #define SSB_PCICORE_IMASK_PME 0x00000010 /* PCI PME# */ #define SSB_PCICORE_MBOX 0x0028 /* Backplane to PCI Mailbox */ #define SSB_PCICORE_MBOX_F0_0 0x00000100 /* PCI function 0, INT 0 */ #define SSB_PCICORE_MBOX_F0_1 0x00000200 /* PCI function 0, INT 1 */ #define SSB_PCICORE_MBOX_F1_0 0x00000400 /* PCI function 1, INT 0 */ #define SSB_PCICORE_MBOX_F1_1 0x00000800 /* PCI function 1, INT 1 */ #define SSB_PCICORE_MBOX_F2_0 0x00001000 /* PCI function 2, INT 0 */ #define SSB_PCICORE_MBOX_F2_1 0x00002000 /* PCI function 2, INT 1 */ #define SSB_PCICORE_MBOX_F3_0 0x00004000 /* PCI function 3, INT 0 */ #define SSB_PCICORE_MBOX_F3_1 0x00008000 /* PCI function 3, INT 1 */ #define SSB_PCICORE_BCAST_ADDR 0x0050 /* Backplane Broadcast Address */ #define SSB_PCICORE_BCAST_ADDR_MASK 0x000000FF #define SSB_PCICORE_BCAST_DATA 0x0054 /* Backplane Broadcast Data */ #define SSB_PCICORE_GPIO_IN 0x0060 /* rev >= 2 only */ #define SSB_PCICORE_GPIO_OUT 0x0064 /* rev >= 2 only */ #define SSB_PCICORE_GPIO_ENABLE 0x0068 /* rev >= 2 only */ #define SSB_PCICORE_GPIO_CTL 0x006C /* rev >= 2 only */ #define SSB_PCICORE_SBTOPCI0 0x0100 /* Backplane to PCI translation 0 (sbtopci0) */ #define SSB_PCICORE_SBTOPCI0_MASK 0xFC000000 #define SSB_PCICORE_SBTOPCI1 0x0104 /* Backplane to PCI translation 1 (sbtopci1) */ #define SSB_PCICORE_SBTOPCI1_MASK 0xFC000000 #define SSB_PCICORE_SBTOPCI2 0x0108 /* Backplane to PCI translation 2 (sbtopci2) */ #define SSB_PCICORE_SBTOPCI2_MASK 0xC0000000 #define SSB_PCICORE_PCICFG0 0x0400 /* PCI config space 0 (rev >= 8) */ #define SSB_PCICORE_PCICFG1 0x0500 /* PCI config space 1 (rev >= 8) */ #define SSB_PCICORE_PCICFG2 0x0600 /* PCI config space 2 (rev >= 8) */ #define SSB_PCICORE_PCICFG3 0x0700 /* PCI config space 3 (rev >= 8) */ #define SSB_PCICORE_SPROM(wordoffset) (0x0800 + ((wordoffset) * 2)) /* SPROM shadow area (72 bytes) */ /* SBtoPCIx */ #define SSB_PCICORE_SBTOPCI_MEM 0x00000000 #define SSB_PCICORE_SBTOPCI_IO 0x00000001 #define SSB_PCICORE_SBTOPCI_CFG0 0x00000002 #define SSB_PCICORE_SBTOPCI_CFG1 0x00000003 #define SSB_PCICORE_SBTOPCI_PREF 0x00000004 /* Prefetch enable */ #define SSB_PCICORE_SBTOPCI_BURST 0x00000008 /* Burst enable */ #define SSB_PCICORE_SBTOPCI_MRM 0x00000020 /* Memory Read Multiple */ #define SSB_PCICORE_SBTOPCI_RC 0x00000030 /* Read Command mask (rev >= 11) */ #define SSB_PCICORE_SBTOPCI_RC_READ 0x00000000 /* Memory read */ #define SSB_PCICORE_SBTOPCI_RC_READL 0x00000010 /* Memory read line */ #define SSB_PCICORE_SBTOPCI_RC_READM 0x00000020 /* Memory read multiple */ /* PCIcore specific boardflags */ #define SSB_PCICORE_BFL_NOPCI 0x00000400 /* Board leaves PCI floating */ struct ssb_pcicore { struct ssb_device *dev; u8 setup_done:1; u8 hostmode:1; u8 cardbusmode:1; }; extern void ssb_pcicore_init(struct ssb_pcicore *pc); /* Enable IRQ routing for a specific device */ extern int ssb_pcicore_dev_irqvecs_enable(struct ssb_pcicore *pc, struct ssb_device *dev); int ssb_pcicore_plat_dev_init(struct pci_dev *d); int ssb_pcicore_pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin); #else /* CONFIG_SSB_DRIVER_PCICORE */ struct ssb_pcicore { }; static inline void ssb_pcicore_init(struct ssb_pcicore *pc) { } static inline int ssb_pcicore_dev_irqvecs_enable(struct ssb_pcicore *pc, struct ssb_device *dev) { return 0; } static inline int ssb_pcicore_plat_dev_init(struct pci_dev *d) { return -ENODEV; } static inline int ssb_pcicore_pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { return -ENODEV; } #endif /* CONFIG_SSB_DRIVER_PCICORE */ #endif /* LINUX_SSB_PCICORE_H_ */ ssb/ssb_regs.h 0000644 00000100173 14722070374 0007323 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_SSB_REGS_H_ #define LINUX_SSB_REGS_H_ /* SiliconBackplane Address Map. * All regions may not exist on all chips. */ #define SSB_SDRAM_BASE 0x00000000U /* Physical SDRAM */ #define SSB_PCI_MEM 0x08000000U /* Host Mode sb2pcitranslation0 (64 MB) */ #define SSB_PCI_CFG 0x0c000000U /* Host Mode sb2pcitranslation1 (64 MB) */ #define SSB_SDRAM_SWAPPED 0x10000000U /* Byteswapped Physical SDRAM */ #define SSB_ENUM_BASE 0x18000000U /* Enumeration space base */ #define SSB_ENUM_LIMIT 0x18010000U /* Enumeration space limit */ #define SSB_FLASH2 0x1c000000U /* Flash Region 2 (region 1 shadowed here) */ #define SSB_FLASH2_SZ 0x02000000U /* Size of Flash Region 2 */ #define SSB_EXTIF_BASE 0x1f000000U /* External Interface region base address */ #define SSB_FLASH1 0x1fc00000U /* Flash Region 1 */ #define SSB_FLASH1_SZ 0x00400000U /* Size of Flash Region 1 */ #define SSB_PCI_DMA 0x40000000U /* Client Mode sb2pcitranslation2 (1 GB) */ #define SSB_PCI_DMA_SZ 0x40000000U /* Client Mode sb2pcitranslation2 size in bytes */ #define SSB_PCIE_DMA_L32 0x00000000U /* PCIE Client Mode sb2pcitranslation2 (2 ZettaBytes), low 32 bits */ #define SSB_PCIE_DMA_H32 0x80000000U /* PCIE Client Mode sb2pcitranslation2 (2 ZettaBytes), high 32 bits */ #define SSB_EUART (SSB_EXTIF_BASE + 0x00800000) #define SSB_LED (SSB_EXTIF_BASE + 0x00900000) /* Enumeration space constants */ #define SSB_CORE_SIZE 0x1000 /* Size of a core MMIO area */ #define SSB_MAX_NR_CORES ((SSB_ENUM_LIMIT - SSB_ENUM_BASE) / SSB_CORE_SIZE) /* mips address */ #define SSB_EJTAG 0xff200000 /* MIPS EJTAG space (2M) */ /* SSB PCI config space registers. */ #define SSB_PMCSR 0x44 #define SSB_PE 0x100 #define SSB_BAR0_WIN 0x80 /* Backplane address space 0 */ #define SSB_BAR1_WIN 0x84 /* Backplane address space 1 */ #define SSB_SPROMCTL 0x88 /* SPROM control */ #define SSB_SPROMCTL_WE 0x10 /* SPROM write enable */ #define SSB_BAR1_CONTROL 0x8c /* Address space 1 burst control */ #define SSB_PCI_IRQS 0x90 /* PCI interrupts */ #define SSB_PCI_IRQMASK 0x94 /* PCI IRQ control and mask (pcirev >= 6 only) */ #define SSB_BACKPLANE_IRQS 0x98 /* Backplane Interrupts */ #define SSB_GPIO_IN 0xB0 /* GPIO Input (pcirev >= 3 only) */ #define SSB_GPIO_OUT 0xB4 /* GPIO Output (pcirev >= 3 only) */ #define SSB_GPIO_OUT_ENABLE 0xB8 /* GPIO Output Enable/Disable (pcirev >= 3 only) */ #define SSB_GPIO_SCS 0x10 /* PCI config space bit 4 for 4306c0 slow clock source */ #define SSB_GPIO_HWRAD 0x20 /* PCI config space GPIO 13 for hw radio disable */ #define SSB_GPIO_XTAL 0x40 /* PCI config space GPIO 14 for Xtal powerup */ #define SSB_GPIO_PLL 0x80 /* PCI config space GPIO 15 for PLL powerdown */ #define SSB_BAR0_MAX_RETRIES 50 /* Silicon backplane configuration register definitions */ #define SSB_IPSFLAG 0x0F08 #define SSB_IPSFLAG_IRQ1 0x0000003F /* which sbflags get routed to mips interrupt 1 */ #define SSB_IPSFLAG_IRQ1_SHIFT 0 #define SSB_IPSFLAG_IRQ2 0x00003F00 /* which sbflags get routed to mips interrupt 2 */ #define SSB_IPSFLAG_IRQ2_SHIFT 8 #define SSB_IPSFLAG_IRQ3 0x003F0000 /* which sbflags get routed to mips interrupt 3 */ #define SSB_IPSFLAG_IRQ3_SHIFT 16 #define SSB_IPSFLAG_IRQ4 0x3F000000 /* which sbflags get routed to mips interrupt 4 */ #define SSB_IPSFLAG_IRQ4_SHIFT 24 #define SSB_TPSFLAG 0x0F18 #define SSB_TPSFLAG_BPFLAG 0x0000003F /* Backplane flag # */ #define SSB_TPSFLAG_ALWAYSIRQ 0x00000040 /* IRQ is always sent on the Backplane */ #define SSB_TMERRLOGA 0x0F48 #define SSB_TMERRLOG 0x0F50 #define SSB_ADMATCH3 0x0F60 #define SSB_ADMATCH2 0x0F68 #define SSB_ADMATCH1 0x0F70 #define SSB_IMSTATE 0x0F90 /* SB Initiator Agent State */ #define SSB_IMSTATE_PC 0x0000000f /* Pipe Count */ #define SSB_IMSTATE_AP_MASK 0x00000030 /* Arbitration Priority */ #define SSB_IMSTATE_AP_BOTH 0x00000000 /* Use both timeslices and token */ #define SSB_IMSTATE_AP_TS 0x00000010 /* Use timeslices only */ #define SSB_IMSTATE_AP_TK 0x00000020 /* Use token only */ #define SSB_IMSTATE_AP_RSV 0x00000030 /* Reserved */ #define SSB_IMSTATE_IBE 0x00020000 /* In Band Error */ #define SSB_IMSTATE_TO 0x00040000 /* Timeout */ #define SSB_IMSTATE_BUSY 0x01800000 /* Busy (Backplane rev >= 2.3 only) */ #define SSB_IMSTATE_REJECT 0x02000000 /* Reject (Backplane rev >= 2.3 only) */ #define SSB_INTVEC 0x0F94 /* SB Interrupt Mask */ #define SSB_INTVEC_PCI 0x00000001 /* Enable interrupts for PCI */ #define SSB_INTVEC_ENET0 0x00000002 /* Enable interrupts for enet 0 */ #define SSB_INTVEC_ILINE20 0x00000004 /* Enable interrupts for iline20 */ #define SSB_INTVEC_CODEC 0x00000008 /* Enable interrupts for v90 codec */ #define SSB_INTVEC_USB 0x00000010 /* Enable interrupts for usb */ #define SSB_INTVEC_EXTIF 0x00000020 /* Enable interrupts for external i/f */ #define SSB_INTVEC_ENET1 0x00000040 /* Enable interrupts for enet 1 */ #define SSB_TMSLOW 0x0F98 /* SB Target State Low */ #define SSB_TMSLOW_RESET 0x00000001 /* Reset */ #define SSB_TMSLOW_REJECT 0x00000002 /* Reject (Standard Backplane) */ #define SSB_TMSLOW_REJECT_23 0x00000004 /* Reject (Backplane rev 2.3) */ #define SSB_TMSLOW_CLOCK 0x00010000 /* Clock Enable */ #define SSB_TMSLOW_FGC 0x00020000 /* Force Gated Clocks On */ #define SSB_TMSLOW_PE 0x40000000 /* Power Management Enable */ #define SSB_TMSLOW_BE 0x80000000 /* BIST Enable */ #define SSB_TMSHIGH 0x0F9C /* SB Target State High */ #define SSB_TMSHIGH_SERR 0x00000001 /* S-error */ #define SSB_TMSHIGH_INT 0x00000002 /* Interrupt */ #define SSB_TMSHIGH_BUSY 0x00000004 /* Busy */ #define SSB_TMSHIGH_TO 0x00000020 /* Timeout. Backplane rev >= 2.3 only */ #define SSB_TMSHIGH_COREFL 0x1FFF0000 /* Core specific flags */ #define SSB_TMSHIGH_COREFL_SHIFT 16 #define SSB_TMSHIGH_DMA64 0x10000000 /* 64bit DMA supported */ #define SSB_TMSHIGH_GCR 0x20000000 /* Gated Clock Request */ #define SSB_TMSHIGH_BISTF 0x40000000 /* BIST Failed */ #define SSB_TMSHIGH_BISTD 0x80000000 /* BIST Done */ #define SSB_BWA0 0x0FA0 #define SSB_IMCFGLO 0x0FA8 #define SSB_IMCFGLO_SERTO 0x00000007 /* Service timeout */ #define SSB_IMCFGLO_REQTO 0x00000070 /* Request timeout */ #define SSB_IMCFGLO_REQTO_SHIFT 4 #define SSB_IMCFGLO_CONNID 0x00FF0000 /* Connection ID */ #define SSB_IMCFGLO_CONNID_SHIFT 16 #define SSB_IMCFGHI 0x0FAC #define SSB_ADMATCH0 0x0FB0 #define SSB_TMCFGLO 0x0FB8 #define SSB_TMCFGHI 0x0FBC #define SSB_BCONFIG 0x0FC0 #define SSB_BSTATE 0x0FC8 #define SSB_ACTCFG 0x0FD8 #define SSB_FLAGST 0x0FE8 #define SSB_IDLOW 0x0FF8 #define SSB_IDLOW_CFGSP 0x00000003 /* Config Space */ #define SSB_IDLOW_ADDRNGE 0x00000038 /* Address Ranges supported */ #define SSB_IDLOW_ADDRNGE_SHIFT 3 #define SSB_IDLOW_SYNC 0x00000040 #define SSB_IDLOW_INITIATOR 0x00000080 #define SSB_IDLOW_MIBL 0x00000F00 /* Minimum Backplane latency */ #define SSB_IDLOW_MIBL_SHIFT 8 #define SSB_IDLOW_MABL 0x0000F000 /* Maximum Backplane latency */ #define SSB_IDLOW_MABL_SHIFT 12 #define SSB_IDLOW_TIF 0x00010000 /* This Initiator is first */ #define SSB_IDLOW_CCW 0x000C0000 /* Cycle counter width */ #define SSB_IDLOW_CCW_SHIFT 18 #define SSB_IDLOW_TPT 0x00F00000 /* Target ports */ #define SSB_IDLOW_TPT_SHIFT 20 #define SSB_IDLOW_INITP 0x0F000000 /* Initiator ports */ #define SSB_IDLOW_INITP_SHIFT 24 #define SSB_IDLOW_SSBREV 0xF0000000 /* Sonics Backplane Revision code */ #define SSB_IDLOW_SSBREV_22 0x00000000 /* <= 2.2 */ #define SSB_IDLOW_SSBREV_23 0x10000000 /* 2.3 */ #define SSB_IDLOW_SSBREV_24 0x40000000 /* ?? Found in BCM4328 */ #define SSB_IDLOW_SSBREV_25 0x50000000 /* ?? Not Found yet */ #define SSB_IDLOW_SSBREV_26 0x60000000 /* ?? Found in some BCM4311/2 */ #define SSB_IDLOW_SSBREV_27 0x70000000 /* ?? Found in some BCM4311/2 */ #define SSB_IDHIGH 0x0FFC /* SB Identification High */ #define SSB_IDHIGH_RCLO 0x0000000F /* Revision Code (low part) */ #define SSB_IDHIGH_CC 0x00008FF0 /* Core Code */ #define SSB_IDHIGH_CC_SHIFT 4 #define SSB_IDHIGH_RCHI 0x00007000 /* Revision Code (high part) */ #define SSB_IDHIGH_RCHI_SHIFT 8 /* yes, shift 8 is right */ #define SSB_IDHIGH_VC 0xFFFF0000 /* Vendor Code */ #define SSB_IDHIGH_VC_SHIFT 16 /* SPROM shadow area. If not otherwise noted, fields are * two bytes wide. Note that the SPROM can _only_ be read * in two-byte quantities. */ #define SSB_SPROMSIZE_WORDS 64 #define SSB_SPROMSIZE_BYTES (SSB_SPROMSIZE_WORDS * sizeof(u16)) #define SSB_SPROMSIZE_WORDS_R123 64 #define SSB_SPROMSIZE_WORDS_R4 220 #define SSB_SPROMSIZE_BYTES_R123 (SSB_SPROMSIZE_WORDS_R123 * sizeof(u16)) #define SSB_SPROMSIZE_BYTES_R4 (SSB_SPROMSIZE_WORDS_R4 * sizeof(u16)) #define SSB_SPROMSIZE_WORDS_R10 230 #define SSB_SPROMSIZE_WORDS_R11 234 #define SSB_SPROM_BASE1 0x1000 #define SSB_SPROM_BASE31 0x0800 #define SSB_SPROM_REVISION 0x007E #define SSB_SPROM_REVISION_REV 0x00FF /* SPROM Revision number */ #define SSB_SPROM_REVISION_CRC 0xFF00 /* SPROM CRC8 value */ #define SSB_SPROM_REVISION_CRC_SHIFT 8 /* SPROM Revision 1 */ #define SSB_SPROM1_SPID 0x0004 /* Subsystem Product ID for PCI */ #define SSB_SPROM1_SVID 0x0006 /* Subsystem Vendor ID for PCI */ #define SSB_SPROM1_PID 0x0008 /* Product ID for PCI */ #define SSB_SPROM1_IL0MAC 0x0048 /* 6 bytes MAC address for 802.11b/g */ #define SSB_SPROM1_ET0MAC 0x004E /* 6 bytes MAC address for Ethernet */ #define SSB_SPROM1_ET1MAC 0x0054 /* 6 bytes MAC address for 802.11a */ #define SSB_SPROM1_ETHPHY 0x005A /* Ethernet PHY settings */ #define SSB_SPROM1_ETHPHY_ET0A 0x001F /* MII Address for enet0 */ #define SSB_SPROM1_ETHPHY_ET1A 0x03E0 /* MII Address for enet1 */ #define SSB_SPROM1_ETHPHY_ET1A_SHIFT 5 #define SSB_SPROM1_ETHPHY_ET0M (1<<14) /* MDIO for enet0 */ #define SSB_SPROM1_ETHPHY_ET1M (1<<15) /* MDIO for enet1 */ #define SSB_SPROM1_BINF 0x005C /* Board info */ #define SSB_SPROM1_BINF_BREV 0x00FF /* Board Revision */ #define SSB_SPROM1_BINF_CCODE 0x0F00 /* Country Code */ #define SSB_SPROM1_BINF_CCODE_SHIFT 8 #define SSB_SPROM1_BINF_ANTBG 0x3000 /* Available B-PHY and G-PHY antennas */ #define SSB_SPROM1_BINF_ANTBG_SHIFT 12 #define SSB_SPROM1_BINF_ANTA 0xC000 /* Available A-PHY antennas */ #define SSB_SPROM1_BINF_ANTA_SHIFT 14 #define SSB_SPROM1_PA0B0 0x005E #define SSB_SPROM1_PA0B1 0x0060 #define SSB_SPROM1_PA0B2 0x0062 #define SSB_SPROM1_GPIOA 0x0064 /* General Purpose IO pins 0 and 1 */ #define SSB_SPROM1_GPIOA_P0 0x00FF /* Pin 0 */ #define SSB_SPROM1_GPIOA_P1 0xFF00 /* Pin 1 */ #define SSB_SPROM1_GPIOA_P1_SHIFT 8 #define SSB_SPROM1_GPIOB 0x0066 /* General Purpuse IO pins 2 and 3 */ #define SSB_SPROM1_GPIOB_P2 0x00FF /* Pin 2 */ #define SSB_SPROM1_GPIOB_P3 0xFF00 /* Pin 3 */ #define SSB_SPROM1_GPIOB_P3_SHIFT 8 #define SSB_SPROM1_MAXPWR 0x0068 /* Power Amplifier Max Power */ #define SSB_SPROM1_MAXPWR_BG 0x00FF /* B-PHY and G-PHY (in dBm Q5.2) */ #define SSB_SPROM1_MAXPWR_A 0xFF00 /* A-PHY (in dBm Q5.2) */ #define SSB_SPROM1_MAXPWR_A_SHIFT 8 #define SSB_SPROM1_PA1B0 0x006A #define SSB_SPROM1_PA1B1 0x006C #define SSB_SPROM1_PA1B2 0x006E #define SSB_SPROM1_ITSSI 0x0070 /* Idle TSSI Target */ #define SSB_SPROM1_ITSSI_BG 0x00FF /* B-PHY and G-PHY*/ #define SSB_SPROM1_ITSSI_A 0xFF00 /* A-PHY */ #define SSB_SPROM1_ITSSI_A_SHIFT 8 #define SSB_SPROM1_BFLLO 0x0072 /* Boardflags (low 16 bits) */ #define SSB_SPROM1_AGAIN 0x0074 /* Antenna Gain (in dBm Q5.2) */ #define SSB_SPROM1_AGAIN_BG 0x00FF /* B-PHY and G-PHY */ #define SSB_SPROM1_AGAIN_BG_SHIFT 0 #define SSB_SPROM1_AGAIN_A 0xFF00 /* A-PHY */ #define SSB_SPROM1_AGAIN_A_SHIFT 8 #define SSB_SPROM1_CCODE 0x0076 /* SPROM Revision 2 (inherits from rev 1) */ #define SSB_SPROM2_BFLHI 0x0038 /* Boardflags (high 16 bits) */ #define SSB_SPROM2_MAXP_A 0x003A /* A-PHY Max Power */ #define SSB_SPROM2_MAXP_A_HI 0x00FF /* Max Power High */ #define SSB_SPROM2_MAXP_A_LO 0xFF00 /* Max Power Low */ #define SSB_SPROM2_MAXP_A_LO_SHIFT 8 #define SSB_SPROM2_PA1LOB0 0x003C /* A-PHY PowerAmplifier Low Settings */ #define SSB_SPROM2_PA1LOB1 0x003E /* A-PHY PowerAmplifier Low Settings */ #define SSB_SPROM2_PA1LOB2 0x0040 /* A-PHY PowerAmplifier Low Settings */ #define SSB_SPROM2_PA1HIB0 0x0042 /* A-PHY PowerAmplifier High Settings */ #define SSB_SPROM2_PA1HIB1 0x0044 /* A-PHY PowerAmplifier High Settings */ #define SSB_SPROM2_PA1HIB2 0x0046 /* A-PHY PowerAmplifier High Settings */ #define SSB_SPROM2_OPO 0x0078 /* OFDM Power Offset from CCK Level */ #define SSB_SPROM2_OPO_VALUE 0x00FF #define SSB_SPROM2_OPO_UNUSED 0xFF00 #define SSB_SPROM2_CCODE 0x007C /* Two char Country Code */ /* SPROM Revision 3 (inherits most data from rev 2) */ #define SSB_SPROM3_OFDMAPO 0x002C /* A-PHY OFDM Mid Power Offset (4 bytes, BigEndian) */ #define SSB_SPROM3_OFDMALPO 0x0030 /* A-PHY OFDM Low Power Offset (4 bytes, BigEndian) */ #define SSB_SPROM3_OFDMAHPO 0x0034 /* A-PHY OFDM High Power Offset (4 bytes, BigEndian) */ #define SSB_SPROM3_GPIOLDC 0x0042 /* GPIO LED Powersave Duty Cycle (4 bytes, BigEndian) */ #define SSB_SPROM3_GPIOLDC_OFF 0x0000FF00 /* Off Count */ #define SSB_SPROM3_GPIOLDC_OFF_SHIFT 8 #define SSB_SPROM3_GPIOLDC_ON 0x00FF0000 /* On Count */ #define SSB_SPROM3_GPIOLDC_ON_SHIFT 16 #define SSB_SPROM3_IL0MAC 0x004A /* 6 bytes MAC address for 802.11b/g */ #define SSB_SPROM3_CCKPO 0x0078 /* CCK Power Offset */ #define SSB_SPROM3_CCKPO_1M 0x000F /* 1M Rate PO */ #define SSB_SPROM3_CCKPO_2M 0x00F0 /* 2M Rate PO */ #define SSB_SPROM3_CCKPO_2M_SHIFT 4 #define SSB_SPROM3_CCKPO_55M 0x0F00 /* 5.5M Rate PO */ #define SSB_SPROM3_CCKPO_55M_SHIFT 8 #define SSB_SPROM3_CCKPO_11M 0xF000 /* 11M Rate PO */ #define SSB_SPROM3_CCKPO_11M_SHIFT 12 #define SSB_SPROM3_OFDMGPO 0x107A /* G-PHY OFDM Power Offset (4 bytes, BigEndian) */ /* SPROM Revision 4 */ #define SSB_SPROM4_BOARDREV 0x0042 /* Board revision */ #define SSB_SPROM4_BFLLO 0x0044 /* Boardflags (low 16 bits) */ #define SSB_SPROM4_BFLHI 0x0046 /* Board Flags Hi */ #define SSB_SPROM4_BFL2LO 0x0048 /* Board flags 2 (low 16 bits) */ #define SSB_SPROM4_BFL2HI 0x004A /* Board flags 2 Hi */ #define SSB_SPROM4_IL0MAC 0x004C /* 6 byte MAC address for a/b/g/n */ #define SSB_SPROM4_CCODE 0x0052 /* Country Code (2 bytes) */ #define SSB_SPROM4_GPIOA 0x0056 /* Gen. Purpose IO # 0 and 1 */ #define SSB_SPROM4_GPIOA_P0 0x00FF /* Pin 0 */ #define SSB_SPROM4_GPIOA_P1 0xFF00 /* Pin 1 */ #define SSB_SPROM4_GPIOA_P1_SHIFT 8 #define SSB_SPROM4_GPIOB 0x0058 /* Gen. Purpose IO # 2 and 3 */ #define SSB_SPROM4_GPIOB_P2 0x00FF /* Pin 2 */ #define SSB_SPROM4_GPIOB_P3 0xFF00 /* Pin 3 */ #define SSB_SPROM4_GPIOB_P3_SHIFT 8 #define SSB_SPROM4_ETHPHY 0x005A /* Ethernet PHY settings ?? */ #define SSB_SPROM4_ETHPHY_ET0A 0x001F /* MII Address for enet0 */ #define SSB_SPROM4_ETHPHY_ET1A 0x03E0 /* MII Address for enet1 */ #define SSB_SPROM4_ETHPHY_ET1A_SHIFT 5 #define SSB_SPROM4_ETHPHY_ET0M (1<<14) /* MDIO for enet0 */ #define SSB_SPROM4_ETHPHY_ET1M (1<<15) /* MDIO for enet1 */ #define SSB_SPROM4_ANTAVAIL 0x005C /* Antenna available bitfields */ #define SSB_SPROM4_ANTAVAIL_BG 0x00FF /* B-PHY and G-PHY bitfield */ #define SSB_SPROM4_ANTAVAIL_BG_SHIFT 0 #define SSB_SPROM4_ANTAVAIL_A 0xFF00 /* A-PHY bitfield */ #define SSB_SPROM4_ANTAVAIL_A_SHIFT 8 #define SSB_SPROM4_AGAIN01 0x005E /* Antenna Gain (in dBm Q5.2) */ #define SSB_SPROM4_AGAIN0 0x00FF /* Antenna 0 */ #define SSB_SPROM4_AGAIN0_SHIFT 0 #define SSB_SPROM4_AGAIN1 0xFF00 /* Antenna 1 */ #define SSB_SPROM4_AGAIN1_SHIFT 8 #define SSB_SPROM4_AGAIN23 0x0060 #define SSB_SPROM4_AGAIN2 0x00FF /* Antenna 2 */ #define SSB_SPROM4_AGAIN2_SHIFT 0 #define SSB_SPROM4_AGAIN3 0xFF00 /* Antenna 3 */ #define SSB_SPROM4_AGAIN3_SHIFT 8 #define SSB_SPROM4_TXPID2G01 0x0062 /* TX Power Index 2GHz */ #define SSB_SPROM4_TXPID2G0 0x00FF #define SSB_SPROM4_TXPID2G0_SHIFT 0 #define SSB_SPROM4_TXPID2G1 0xFF00 #define SSB_SPROM4_TXPID2G1_SHIFT 8 #define SSB_SPROM4_TXPID2G23 0x0064 /* TX Power Index 2GHz */ #define SSB_SPROM4_TXPID2G2 0x00FF #define SSB_SPROM4_TXPID2G2_SHIFT 0 #define SSB_SPROM4_TXPID2G3 0xFF00 #define SSB_SPROM4_TXPID2G3_SHIFT 8 #define SSB_SPROM4_TXPID5G01 0x0066 /* TX Power Index 5GHz middle subband */ #define SSB_SPROM4_TXPID5G0 0x00FF #define SSB_SPROM4_TXPID5G0_SHIFT 0 #define SSB_SPROM4_TXPID5G1 0xFF00 #define SSB_SPROM4_TXPID5G1_SHIFT 8 #define SSB_SPROM4_TXPID5G23 0x0068 /* TX Power Index 5GHz middle subband */ #define SSB_SPROM4_TXPID5G2 0x00FF #define SSB_SPROM4_TXPID5G2_SHIFT 0 #define SSB_SPROM4_TXPID5G3 0xFF00 #define SSB_SPROM4_TXPID5G3_SHIFT 8 #define SSB_SPROM4_TXPID5GL01 0x006A /* TX Power Index 5GHz low subband */ #define SSB_SPROM4_TXPID5GL0 0x00FF #define SSB_SPROM4_TXPID5GL0_SHIFT 0 #define SSB_SPROM4_TXPID5GL1 0xFF00 #define SSB_SPROM4_TXPID5GL1_SHIFT 8 #define SSB_SPROM4_TXPID5GL23 0x006C /* TX Power Index 5GHz low subband */ #define SSB_SPROM4_TXPID5GL2 0x00FF #define SSB_SPROM4_TXPID5GL2_SHIFT 0 #define SSB_SPROM4_TXPID5GL3 0xFF00 #define SSB_SPROM4_TXPID5GL3_SHIFT 8 #define SSB_SPROM4_TXPID5GH01 0x006E /* TX Power Index 5GHz high subband */ #define SSB_SPROM4_TXPID5GH0 0x00FF #define SSB_SPROM4_TXPID5GH0_SHIFT 0 #define SSB_SPROM4_TXPID5GH1 0xFF00 #define SSB_SPROM4_TXPID5GH1_SHIFT 8 #define SSB_SPROM4_TXPID5GH23 0x0070 /* TX Power Index 5GHz high subband */ #define SSB_SPROM4_TXPID5GH2 0x00FF #define SSB_SPROM4_TXPID5GH2_SHIFT 0 #define SSB_SPROM4_TXPID5GH3 0xFF00 #define SSB_SPROM4_TXPID5GH3_SHIFT 8 /* There are 4 blocks with power info sharing the same layout */ #define SSB_SPROM4_PWR_INFO_CORE0 0x0080 #define SSB_SPROM4_PWR_INFO_CORE1 0x00AE #define SSB_SPROM4_PWR_INFO_CORE2 0x00DC #define SSB_SPROM4_PWR_INFO_CORE3 0x010A #define SSB_SPROM4_2G_MAXP_ITSSI 0x00 /* 2 GHz ITSSI and 2 GHz Max Power */ #define SSB_SPROM4_2G_MAXP 0x00FF #define SSB_SPROM4_2G_ITSSI 0xFF00 #define SSB_SPROM4_2G_ITSSI_SHIFT 8 #define SSB_SPROM4_2G_PA_0 0x02 /* 2 GHz power amp */ #define SSB_SPROM4_2G_PA_1 0x04 #define SSB_SPROM4_2G_PA_2 0x06 #define SSB_SPROM4_2G_PA_3 0x08 #define SSB_SPROM4_5G_MAXP_ITSSI 0x0A /* 5 GHz ITSSI and 5.3 GHz Max Power */ #define SSB_SPROM4_5G_MAXP 0x00FF #define SSB_SPROM4_5G_ITSSI 0xFF00 #define SSB_SPROM4_5G_ITSSI_SHIFT 8 #define SSB_SPROM4_5GHL_MAXP 0x0C /* 5.2 GHz and 5.8 GHz Max Power */ #define SSB_SPROM4_5GH_MAXP 0x00FF #define SSB_SPROM4_5GL_MAXP 0xFF00 #define SSB_SPROM4_5GL_MAXP_SHIFT 8 #define SSB_SPROM4_5G_PA_0 0x0E /* 5.3 GHz power amp */ #define SSB_SPROM4_5G_PA_1 0x10 #define SSB_SPROM4_5G_PA_2 0x12 #define SSB_SPROM4_5G_PA_3 0x14 #define SSB_SPROM4_5GL_PA_0 0x16 /* 5.2 GHz power amp */ #define SSB_SPROM4_5GL_PA_1 0x18 #define SSB_SPROM4_5GL_PA_2 0x1A #define SSB_SPROM4_5GL_PA_3 0x1C #define SSB_SPROM4_5GH_PA_0 0x1E /* 5.8 GHz power amp */ #define SSB_SPROM4_5GH_PA_1 0x20 #define SSB_SPROM4_5GH_PA_2 0x22 #define SSB_SPROM4_5GH_PA_3 0x24 /* TODO: Make it deprecated */ #define SSB_SPROM4_MAXP_BG 0x0080 /* Max Power BG in path 1 */ #define SSB_SPROM4_MAXP_BG_MASK 0x00FF /* Mask for Max Power BG */ #define SSB_SPROM4_ITSSI_BG 0xFF00 /* Mask for path 1 itssi_bg */ #define SSB_SPROM4_ITSSI_BG_SHIFT 8 #define SSB_SPROM4_MAXP_A 0x008A /* Max Power A in path 1 */ #define SSB_SPROM4_MAXP_A_MASK 0x00FF /* Mask for Max Power A */ #define SSB_SPROM4_ITSSI_A 0xFF00 /* Mask for path 1 itssi_a */ #define SSB_SPROM4_ITSSI_A_SHIFT 8 #define SSB_SPROM4_PA0B0 0x0082 /* The paXbY locations are */ #define SSB_SPROM4_PA0B1 0x0084 /* only guesses */ #define SSB_SPROM4_PA0B2 0x0086 #define SSB_SPROM4_PA1B0 0x008E #define SSB_SPROM4_PA1B1 0x0090 #define SSB_SPROM4_PA1B2 0x0092 /* SPROM Revision 5 (inherits most data from rev 4) */ #define SSB_SPROM5_CCODE 0x0044 /* Country Code (2 bytes) */ #define SSB_SPROM5_BFLLO 0x004A /* Boardflags (low 16 bits) */ #define SSB_SPROM5_BFLHI 0x004C /* Board Flags Hi */ #define SSB_SPROM5_BFL2LO 0x004E /* Board flags 2 (low 16 bits) */ #define SSB_SPROM5_BFL2HI 0x0050 /* Board flags 2 Hi */ #define SSB_SPROM5_IL0MAC 0x0052 /* 6 byte MAC address for a/b/g/n */ #define SSB_SPROM5_GPIOA 0x0076 /* Gen. Purpose IO # 0 and 1 */ #define SSB_SPROM5_GPIOA_P0 0x00FF /* Pin 0 */ #define SSB_SPROM5_GPIOA_P1 0xFF00 /* Pin 1 */ #define SSB_SPROM5_GPIOA_P1_SHIFT 8 #define SSB_SPROM5_GPIOB 0x0078 /* Gen. Purpose IO # 2 and 3 */ #define SSB_SPROM5_GPIOB_P2 0x00FF /* Pin 2 */ #define SSB_SPROM5_GPIOB_P3 0xFF00 /* Pin 3 */ #define SSB_SPROM5_GPIOB_P3_SHIFT 8 /* SPROM Revision 8 */ #define SSB_SPROM8_BOARDREV 0x0082 /* Board revision */ #define SSB_SPROM8_BFLLO 0x0084 /* Board flags (bits 0-15) */ #define SSB_SPROM8_BFLHI 0x0086 /* Board flags (bits 16-31) */ #define SSB_SPROM8_BFL2LO 0x0088 /* Board flags (bits 32-47) */ #define SSB_SPROM8_BFL2HI 0x008A /* Board flags (bits 48-63) */ #define SSB_SPROM8_IL0MAC 0x008C /* 6 byte MAC address */ #define SSB_SPROM8_CCODE 0x0092 /* 2 byte country code */ #define SSB_SPROM8_GPIOA 0x0096 /*Gen. Purpose IO # 0 and 1 */ #define SSB_SPROM8_GPIOA_P0 0x00FF /* Pin 0 */ #define SSB_SPROM8_GPIOA_P1 0xFF00 /* Pin 1 */ #define SSB_SPROM8_GPIOA_P1_SHIFT 8 #define SSB_SPROM8_GPIOB 0x0098 /* Gen. Purpose IO # 2 and 3 */ #define SSB_SPROM8_GPIOB_P2 0x00FF /* Pin 2 */ #define SSB_SPROM8_GPIOB_P3 0xFF00 /* Pin 3 */ #define SSB_SPROM8_GPIOB_P3_SHIFT 8 #define SSB_SPROM8_LEDDC 0x009A #define SSB_SPROM8_LEDDC_ON 0xFF00 /* oncount */ #define SSB_SPROM8_LEDDC_ON_SHIFT 8 #define SSB_SPROM8_LEDDC_OFF 0x00FF /* offcount */ #define SSB_SPROM8_LEDDC_OFF_SHIFT 0 #define SSB_SPROM8_ANTAVAIL 0x009C /* Antenna available bitfields*/ #define SSB_SPROM8_ANTAVAIL_A 0xFF00 /* A-PHY bitfield */ #define SSB_SPROM8_ANTAVAIL_A_SHIFT 8 #define SSB_SPROM8_ANTAVAIL_BG 0x00FF /* B-PHY and G-PHY bitfield */ #define SSB_SPROM8_ANTAVAIL_BG_SHIFT 0 #define SSB_SPROM8_AGAIN01 0x009E /* Antenna Gain (in dBm Q5.2) */ #define SSB_SPROM8_AGAIN0 0x00FF /* Antenna 0 */ #define SSB_SPROM8_AGAIN0_SHIFT 0 #define SSB_SPROM8_AGAIN1 0xFF00 /* Antenna 1 */ #define SSB_SPROM8_AGAIN1_SHIFT 8 #define SSB_SPROM8_AGAIN23 0x00A0 #define SSB_SPROM8_AGAIN2 0x00FF /* Antenna 2 */ #define SSB_SPROM8_AGAIN2_SHIFT 0 #define SSB_SPROM8_AGAIN3 0xFF00 /* Antenna 3 */ #define SSB_SPROM8_AGAIN3_SHIFT 8 #define SSB_SPROM8_TXRXC 0x00A2 #define SSB_SPROM8_TXRXC_TXCHAIN 0x000f #define SSB_SPROM8_TXRXC_TXCHAIN_SHIFT 0 #define SSB_SPROM8_TXRXC_RXCHAIN 0x00f0 #define SSB_SPROM8_TXRXC_RXCHAIN_SHIFT 4 #define SSB_SPROM8_TXRXC_SWITCH 0xff00 #define SSB_SPROM8_TXRXC_SWITCH_SHIFT 8 #define SSB_SPROM8_RSSIPARM2G 0x00A4 /* RSSI params for 2GHz */ #define SSB_SPROM8_RSSISMF2G 0x000F #define SSB_SPROM8_RSSISMC2G 0x00F0 #define SSB_SPROM8_RSSISMC2G_SHIFT 4 #define SSB_SPROM8_RSSISAV2G 0x0700 #define SSB_SPROM8_RSSISAV2G_SHIFT 8 #define SSB_SPROM8_BXA2G 0x1800 #define SSB_SPROM8_BXA2G_SHIFT 11 #define SSB_SPROM8_RSSIPARM5G 0x00A6 /* RSSI params for 5GHz */ #define SSB_SPROM8_RSSISMF5G 0x000F #define SSB_SPROM8_RSSISMC5G 0x00F0 #define SSB_SPROM8_RSSISMC5G_SHIFT 4 #define SSB_SPROM8_RSSISAV5G 0x0700 #define SSB_SPROM8_RSSISAV5G_SHIFT 8 #define SSB_SPROM8_BXA5G 0x1800 #define SSB_SPROM8_BXA5G_SHIFT 11 #define SSB_SPROM8_TRI25G 0x00A8 /* TX isolation 2.4&5.3GHz */ #define SSB_SPROM8_TRI2G 0x00FF /* TX isolation 2.4GHz */ #define SSB_SPROM8_TRI5G 0xFF00 /* TX isolation 5.3GHz */ #define SSB_SPROM8_TRI5G_SHIFT 8 #define SSB_SPROM8_TRI5GHL 0x00AA /* TX isolation 5.2/5.8GHz */ #define SSB_SPROM8_TRI5GL 0x00FF /* TX isolation 5.2GHz */ #define SSB_SPROM8_TRI5GH 0xFF00 /* TX isolation 5.8GHz */ #define SSB_SPROM8_TRI5GH_SHIFT 8 #define SSB_SPROM8_RXPO 0x00AC /* RX power offsets */ #define SSB_SPROM8_RXPO2G 0x00FF /* 2GHz RX power offset */ #define SSB_SPROM8_RXPO2G_SHIFT 0 #define SSB_SPROM8_RXPO5G 0xFF00 /* 5GHz RX power offset */ #define SSB_SPROM8_RXPO5G_SHIFT 8 #define SSB_SPROM8_FEM2G 0x00AE #define SSB_SPROM8_FEM5G 0x00B0 #define SSB_SROM8_FEM_TSSIPOS 0x0001 #define SSB_SROM8_FEM_TSSIPOS_SHIFT 0 #define SSB_SROM8_FEM_EXTPA_GAIN 0x0006 #define SSB_SROM8_FEM_EXTPA_GAIN_SHIFT 1 #define SSB_SROM8_FEM_PDET_RANGE 0x00F8 #define SSB_SROM8_FEM_PDET_RANGE_SHIFT 3 #define SSB_SROM8_FEM_TR_ISO 0x0700 #define SSB_SROM8_FEM_TR_ISO_SHIFT 8 #define SSB_SROM8_FEM_ANTSWLUT 0xF800 #define SSB_SROM8_FEM_ANTSWLUT_SHIFT 11 #define SSB_SPROM8_THERMAL 0x00B2 #define SSB_SPROM8_THERMAL_OFFSET 0x00ff #define SSB_SPROM8_THERMAL_OFFSET_SHIFT 0 #define SSB_SPROM8_THERMAL_TRESH 0xff00 #define SSB_SPROM8_THERMAL_TRESH_SHIFT 8 /* Temp sense related entries */ #define SSB_SPROM8_RAWTS 0x00B4 #define SSB_SPROM8_RAWTS_RAWTEMP 0x01ff #define SSB_SPROM8_RAWTS_RAWTEMP_SHIFT 0 #define SSB_SPROM8_RAWTS_MEASPOWER 0xfe00 #define SSB_SPROM8_RAWTS_MEASPOWER_SHIFT 9 #define SSB_SPROM8_OPT_CORRX 0x00B6 #define SSB_SPROM8_OPT_CORRX_TEMP_SLOPE 0x00ff #define SSB_SPROM8_OPT_CORRX_TEMP_SLOPE_SHIFT 0 #define SSB_SPROM8_OPT_CORRX_TEMPCORRX 0xfc00 #define SSB_SPROM8_OPT_CORRX_TEMPCORRX_SHIFT 10 #define SSB_SPROM8_OPT_CORRX_TEMP_OPTION 0x0300 #define SSB_SPROM8_OPT_CORRX_TEMP_OPTION_SHIFT 8 /* FOC: freiquency offset correction, HWIQ: H/W IOCAL enable, IQSWP: IQ CAL swap disable */ #define SSB_SPROM8_HWIQ_IQSWP 0x00B8 #define SSB_SPROM8_HWIQ_IQSWP_FREQ_CORR 0x000f #define SSB_SPROM8_HWIQ_IQSWP_FREQ_CORR_SHIFT 0 #define SSB_SPROM8_HWIQ_IQSWP_IQCAL_SWP 0x0010 #define SSB_SPROM8_HWIQ_IQSWP_IQCAL_SWP_SHIFT 4 #define SSB_SPROM8_HWIQ_IQSWP_HW_IQCAL 0x0020 #define SSB_SPROM8_HWIQ_IQSWP_HW_IQCAL_SHIFT 5 #define SSB_SPROM8_TEMPDELTA 0x00BC #define SSB_SPROM8_TEMPDELTA_PHYCAL 0x00ff #define SSB_SPROM8_TEMPDELTA_PHYCAL_SHIFT 0 #define SSB_SPROM8_TEMPDELTA_PERIOD 0x0f00 #define SSB_SPROM8_TEMPDELTA_PERIOD_SHIFT 8 #define SSB_SPROM8_TEMPDELTA_HYSTERESIS 0xf000 #define SSB_SPROM8_TEMPDELTA_HYSTERESIS_SHIFT 12 /* There are 4 blocks with power info sharing the same layout */ #define SSB_SROM8_PWR_INFO_CORE0 0x00C0 #define SSB_SROM8_PWR_INFO_CORE1 0x00E0 #define SSB_SROM8_PWR_INFO_CORE2 0x0100 #define SSB_SROM8_PWR_INFO_CORE3 0x0120 #define SSB_SROM8_2G_MAXP_ITSSI 0x00 #define SSB_SPROM8_2G_MAXP 0x00FF #define SSB_SPROM8_2G_ITSSI 0xFF00 #define SSB_SPROM8_2G_ITSSI_SHIFT 8 #define SSB_SROM8_2G_PA_0 0x02 /* 2GHz power amp settings */ #define SSB_SROM8_2G_PA_1 0x04 #define SSB_SROM8_2G_PA_2 0x06 #define SSB_SROM8_5G_MAXP_ITSSI 0x08 /* 5GHz ITSSI and 5.3GHz Max Power */ #define SSB_SPROM8_5G_MAXP 0x00FF #define SSB_SPROM8_5G_ITSSI 0xFF00 #define SSB_SPROM8_5G_ITSSI_SHIFT 8 #define SSB_SPROM8_5GHL_MAXP 0x0A /* 5.2GHz and 5.8GHz Max Power */ #define SSB_SPROM8_5GH_MAXP 0x00FF #define SSB_SPROM8_5GL_MAXP 0xFF00 #define SSB_SPROM8_5GL_MAXP_SHIFT 8 #define SSB_SROM8_5G_PA_0 0x0C /* 5.3GHz power amp settings */ #define SSB_SROM8_5G_PA_1 0x0E #define SSB_SROM8_5G_PA_2 0x10 #define SSB_SROM8_5GL_PA_0 0x12 /* 5.2GHz power amp settings */ #define SSB_SROM8_5GL_PA_1 0x14 #define SSB_SROM8_5GL_PA_2 0x16 #define SSB_SROM8_5GH_PA_0 0x18 /* 5.8GHz power amp settings */ #define SSB_SROM8_5GH_PA_1 0x1A #define SSB_SROM8_5GH_PA_2 0x1C /* TODO: Make it deprecated */ #define SSB_SPROM8_MAXP_BG 0x00C0 /* Max Power 2GHz in path 1 */ #define SSB_SPROM8_MAXP_BG_MASK 0x00FF /* Mask for Max Power 2GHz */ #define SSB_SPROM8_ITSSI_BG 0xFF00 /* Mask for path 1 itssi_bg */ #define SSB_SPROM8_ITSSI_BG_SHIFT 8 #define SSB_SPROM8_PA0B0 0x00C2 /* 2GHz power amp settings */ #define SSB_SPROM8_PA0B1 0x00C4 #define SSB_SPROM8_PA0B2 0x00C6 #define SSB_SPROM8_MAXP_A 0x00C8 /* Max Power 5.3GHz */ #define SSB_SPROM8_MAXP_A_MASK 0x00FF /* Mask for Max Power 5.3GHz */ #define SSB_SPROM8_ITSSI_A 0xFF00 /* Mask for path 1 itssi_a */ #define SSB_SPROM8_ITSSI_A_SHIFT 8 #define SSB_SPROM8_MAXP_AHL 0x00CA /* Max Power 5.2/5.8GHz */ #define SSB_SPROM8_MAXP_AH_MASK 0x00FF /* Mask for Max Power 5.8GHz */ #define SSB_SPROM8_MAXP_AL_MASK 0xFF00 /* Mask for Max Power 5.2GHz */ #define SSB_SPROM8_MAXP_AL_SHIFT 8 #define SSB_SPROM8_PA1B0 0x00CC /* 5.3GHz power amp settings */ #define SSB_SPROM8_PA1B1 0x00CE #define SSB_SPROM8_PA1B2 0x00D0 #define SSB_SPROM8_PA1LOB0 0x00D2 /* 5.2GHz power amp settings */ #define SSB_SPROM8_PA1LOB1 0x00D4 #define SSB_SPROM8_PA1LOB2 0x00D6 #define SSB_SPROM8_PA1HIB0 0x00D8 /* 5.8GHz power amp settings */ #define SSB_SPROM8_PA1HIB1 0x00DA #define SSB_SPROM8_PA1HIB2 0x00DC #define SSB_SPROM8_CCK2GPO 0x0140 /* CCK power offset */ #define SSB_SPROM8_OFDM2GPO 0x0142 /* 2.4GHz OFDM power offset */ #define SSB_SPROM8_OFDM5GPO 0x0146 /* 5.3GHz OFDM power offset */ #define SSB_SPROM8_OFDM5GLPO 0x014A /* 5.2GHz OFDM power offset */ #define SSB_SPROM8_OFDM5GHPO 0x014E /* 5.8GHz OFDM power offset */ #define SSB_SPROM8_2G_MCSPO 0x0152 #define SSB_SPROM8_5G_MCSPO 0x0162 #define SSB_SPROM8_5GL_MCSPO 0x0172 #define SSB_SPROM8_5GH_MCSPO 0x0182 #define SSB_SPROM8_CDDPO 0x0192 #define SSB_SPROM8_STBCPO 0x0194 #define SSB_SPROM8_BW40PO 0x0196 #define SSB_SPROM8_BWDUPPO 0x0198 /* Values for boardflags_lo read from SPROM */ #define SSB_BFL_BTCOEXIST 0x0001 /* implements Bluetooth coexistance */ #define SSB_BFL_PACTRL 0x0002 /* GPIO 9 controlling the PA */ #define SSB_BFL_AIRLINEMODE 0x0004 /* implements GPIO 13 radio disable indication */ #define SSB_BFL_RSSI 0x0008 /* software calculates nrssi slope. */ #define SSB_BFL_ENETSPI 0x0010 /* has ephy roboswitch spi */ #define SSB_BFL_XTAL_NOSLOW 0x0020 /* no slow clock available */ #define SSB_BFL_CCKHIPWR 0x0040 /* can do high power CCK transmission */ #define SSB_BFL_ENETADM 0x0080 /* has ADMtek switch */ #define SSB_BFL_ENETVLAN 0x0100 /* can do vlan */ #define SSB_BFL_AFTERBURNER 0x0200 /* supports Afterburner mode */ #define SSB_BFL_NOPCI 0x0400 /* board leaves PCI floating */ #define SSB_BFL_FEM 0x0800 /* supports the Front End Module */ #define SSB_BFL_EXTLNA 0x1000 /* has an external LNA */ #define SSB_BFL_HGPA 0x2000 /* had high gain PA */ #define SSB_BFL_BTCMOD 0x4000 /* BFL_BTCOEXIST is given in alternate GPIOs */ #define SSB_BFL_ALTIQ 0x8000 /* alternate I/Q settings */ /* Values for boardflags_hi read from SPROM */ #define SSB_BFH_NOPA 0x0001 /* has no PA */ #define SSB_BFH_RSSIINV 0x0002 /* RSSI uses positive slope (not TSSI) */ #define SSB_BFH_PAREF 0x0004 /* uses the PARef LDO */ #define SSB_BFH_3TSWITCH 0x0008 /* uses a triple throw switch shared with bluetooth */ #define SSB_BFH_PHASESHIFT 0x0010 /* can support phase shifter */ #define SSB_BFH_BUCKBOOST 0x0020 /* has buck/booster */ #define SSB_BFH_FEM_BT 0x0040 /* has FEM and switch to share antenna with bluetooth */ /* Values for boardflags2_lo read from SPROM */ #define SSB_BFL2_RXBB_INT_REG_DIS 0x0001 /* external RX BB regulator present */ #define SSB_BFL2_APLL_WAR 0x0002 /* alternative A-band PLL settings implemented */ #define SSB_BFL2_TXPWRCTRL_EN 0x0004 /* permits enabling TX Power Control */ #define SSB_BFL2_2X4_DIV 0x0008 /* 2x4 diversity switch */ #define SSB_BFL2_5G_PWRGAIN 0x0010 /* supports 5G band power gain */ #define SSB_BFL2_PCIEWAR_OVR 0x0020 /* overrides ASPM and Clkreq settings */ #define SSB_BFL2_CAESERS_BRD 0x0040 /* is Caesers board (unused) */ #define SSB_BFL2_BTC3WIRE 0x0080 /* used 3-wire bluetooth coexist */ #define SSB_BFL2_SKWRKFEM_BRD 0x0100 /* 4321mcm93 uses Skyworks FEM */ #define SSB_BFL2_SPUR_WAR 0x0200 /* has a workaround for clock-harmonic spurs */ #define SSB_BFL2_GPLL_WAR 0x0400 /* altenative G-band PLL settings implemented */ /* Values for SSB_SPROM1_BINF_CCODE */ enum { SSB_SPROM1CCODE_WORLD = 0, SSB_SPROM1CCODE_THAILAND, SSB_SPROM1CCODE_ISRAEL, SSB_SPROM1CCODE_JORDAN, SSB_SPROM1CCODE_CHINA, SSB_SPROM1CCODE_JAPAN, SSB_SPROM1CCODE_USA_CANADA_ANZ, SSB_SPROM1CCODE_EUROPE, SSB_SPROM1CCODE_USA_LOW, SSB_SPROM1CCODE_JAPAN_HIGH, SSB_SPROM1CCODE_ALL, SSB_SPROM1CCODE_NONE, }; /* Address-Match values and masks (SSB_ADMATCHxxx) */ #define SSB_ADM_TYPE 0x00000003 /* Address type */ #define SSB_ADM_TYPE0 0 #define SSB_ADM_TYPE1 1 #define SSB_ADM_TYPE2 2 #define SSB_ADM_AD64 0x00000004 #define SSB_ADM_SZ0 0x000000F8 /* Type0 size */ #define SSB_ADM_SZ0_SHIFT 3 #define SSB_ADM_SZ1 0x000001F8 /* Type1 size */ #define SSB_ADM_SZ1_SHIFT 3 #define SSB_ADM_SZ2 0x000001F8 /* Type2 size */ #define SSB_ADM_SZ2_SHIFT 3 #define SSB_ADM_EN 0x00000400 /* Enable */ #define SSB_ADM_NEG 0x00000800 /* Negative decode */ #define SSB_ADM_BASE0 0xFFFFFF00 /* Type0 base address */ #define SSB_ADM_BASE0_SHIFT 8 #define SSB_ADM_BASE1 0xFFFFF000 /* Type1 base address for the core */ #define SSB_ADM_BASE1_SHIFT 12 #define SSB_ADM_BASE2 0xFFFF0000 /* Type2 base address for the core */ #define SSB_ADM_BASE2_SHIFT 16 #endif /* LINUX_SSB_REGS_H_ */ ssb/ssb_embedded.h 0000644 00000001204 14722070374 0010107 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_SSB_EMBEDDED_H_ #define LINUX_SSB_EMBEDDED_H_ #include <linux/types.h> #include <linux/ssb/ssb.h> extern int ssb_watchdog_timer_set(struct ssb_bus *bus, u32 ticks); /* Generic GPIO API */ u32 ssb_gpio_in(struct ssb_bus *bus, u32 mask); u32 ssb_gpio_out(struct ssb_bus *bus, u32 mask, u32 value); u32 ssb_gpio_outen(struct ssb_bus *bus, u32 mask, u32 value); u32 ssb_gpio_control(struct ssb_bus *bus, u32 mask, u32 value); u32 ssb_gpio_intmask(struct ssb_bus *bus, u32 mask, u32 value); u32 ssb_gpio_polarity(struct ssb_bus *bus, u32 mask, u32 value); #endif /* LINUX_SSB_EMBEDDED_H_ */ ssb/ssb.h 0000644 00000050321 14722070374 0006302 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_SSB_H_ #define LINUX_SSB_H_ #include <linux/device.h> #include <linux/list.h> #include <linux/types.h> #include <linux/spinlock.h> #include <linux/pci.h> #include <linux/gpio.h> #include <linux/mod_devicetable.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> #include <linux/ssb/ssb_regs.h> struct pcmcia_device; struct ssb_bus; struct ssb_driver; struct ssb_sprom_core_pwr_info { u8 itssi_2g, itssi_5g; u8 maxpwr_2g, maxpwr_5gl, maxpwr_5g, maxpwr_5gh; u16 pa_2g[4], pa_5gl[4], pa_5g[4], pa_5gh[4]; }; struct ssb_sprom { u8 revision; u8 il0mac[6] __aligned(sizeof(u16)); /* MAC address for 802.11b/g */ u8 et0mac[6] __aligned(sizeof(u16)); /* MAC address for Ethernet */ u8 et1mac[6] __aligned(sizeof(u16)); /* MAC address for 802.11a */ u8 et2mac[6] __aligned(sizeof(u16)); /* MAC address for extra Ethernet */ u8 et0phyaddr; /* MII address for enet0 */ u8 et1phyaddr; /* MII address for enet1 */ u8 et2phyaddr; /* MII address for enet2 */ u8 et0mdcport; /* MDIO for enet0 */ u8 et1mdcport; /* MDIO for enet1 */ u8 et2mdcport; /* MDIO for enet2 */ u16 dev_id; /* Device ID overriding e.g. PCI ID */ u16 board_rev; /* Board revision number from SPROM. */ u16 board_num; /* Board number from SPROM. */ u16 board_type; /* Board type from SPROM. */ u8 country_code; /* Country Code */ char alpha2[2]; /* Country Code as two chars like EU or US */ u8 leddc_on_time; /* LED Powersave Duty Cycle On Count */ u8 leddc_off_time; /* LED Powersave Duty Cycle Off Count */ u8 ant_available_a; /* 2GHz antenna available bits (up to 4) */ u8 ant_available_bg; /* 5GHz antenna available bits (up to 4) */ u16 pa0b0; u16 pa0b1; u16 pa0b2; u16 pa1b0; u16 pa1b1; u16 pa1b2; u16 pa1lob0; u16 pa1lob1; u16 pa1lob2; u16 pa1hib0; u16 pa1hib1; u16 pa1hib2; u8 gpio0; /* GPIO pin 0 */ u8 gpio1; /* GPIO pin 1 */ u8 gpio2; /* GPIO pin 2 */ u8 gpio3; /* GPIO pin 3 */ u8 maxpwr_bg; /* 2.4GHz Amplifier Max Power (in dBm Q5.2) */ u8 maxpwr_al; /* 5.2GHz Amplifier Max Power (in dBm Q5.2) */ u8 maxpwr_a; /* 5.3GHz Amplifier Max Power (in dBm Q5.2) */ u8 maxpwr_ah; /* 5.8GHz Amplifier Max Power (in dBm Q5.2) */ u8 itssi_a; /* Idle TSSI Target for A-PHY */ u8 itssi_bg; /* Idle TSSI Target for B/G-PHY */ u8 tri2g; /* 2.4GHz TX isolation */ u8 tri5gl; /* 5.2GHz TX isolation */ u8 tri5g; /* 5.3GHz TX isolation */ u8 tri5gh; /* 5.8GHz TX isolation */ u8 txpid2g[4]; /* 2GHz TX power index */ u8 txpid5gl[4]; /* 4.9 - 5.1GHz TX power index */ u8 txpid5g[4]; /* 5.1 - 5.5GHz TX power index */ u8 txpid5gh[4]; /* 5.5 - ...GHz TX power index */ s8 rxpo2g; /* 2GHz RX power offset */ s8 rxpo5g; /* 5GHz RX power offset */ u8 rssisav2g; /* 2GHz RSSI params */ u8 rssismc2g; u8 rssismf2g; u8 bxa2g; /* 2GHz BX arch */ u8 rssisav5g; /* 5GHz RSSI params */ u8 rssismc5g; u8 rssismf5g; u8 bxa5g; /* 5GHz BX arch */ u16 cck2gpo; /* CCK power offset */ u32 ofdm2gpo; /* 2.4GHz OFDM power offset */ u32 ofdm5glpo; /* 5.2GHz OFDM power offset */ u32 ofdm5gpo; /* 5.3GHz OFDM power offset */ u32 ofdm5ghpo; /* 5.8GHz OFDM power offset */ u32 boardflags; u32 boardflags2; u32 boardflags3; /* TODO: Switch all drivers to new u32 fields and drop below ones */ u16 boardflags_lo; /* Board flags (bits 0-15) */ u16 boardflags_hi; /* Board flags (bits 16-31) */ u16 boardflags2_lo; /* Board flags (bits 32-47) */ u16 boardflags2_hi; /* Board flags (bits 48-63) */ struct ssb_sprom_core_pwr_info core_pwr_info[4]; /* Antenna gain values for up to 4 antennas * on each band. Values in dBm/4 (Q5.2). Negative gain means the * loss in the connectors is bigger than the gain. */ struct { s8 a0, a1, a2, a3; } antenna_gain; struct { struct { u8 tssipos, extpa_gain, pdet_range, tr_iso, antswlut; } ghz2; struct { u8 tssipos, extpa_gain, pdet_range, tr_iso, antswlut; } ghz5; } fem; u16 mcs2gpo[8]; u16 mcs5gpo[8]; u16 mcs5glpo[8]; u16 mcs5ghpo[8]; u8 opo; u8 rxgainerr2ga[3]; u8 rxgainerr5gla[3]; u8 rxgainerr5gma[3]; u8 rxgainerr5gha[3]; u8 rxgainerr5gua[3]; u8 noiselvl2ga[3]; u8 noiselvl5gla[3]; u8 noiselvl5gma[3]; u8 noiselvl5gha[3]; u8 noiselvl5gua[3]; u8 regrev; u8 txchain; u8 rxchain; u8 antswitch; u16 cddpo; u16 stbcpo; u16 bw40po; u16 bwduppo; u8 tempthresh; u8 tempoffset; u16 rawtempsense; u8 measpower; u8 tempsense_slope; u8 tempcorrx; u8 tempsense_option; u8 freqoffset_corr; u8 iqcal_swp_dis; u8 hw_iqcal_en; u8 elna2g; u8 elna5g; u8 phycal_tempdelta; u8 temps_period; u8 temps_hysteresis; u8 measpower1; u8 measpower2; u8 pcieingress_war; /* power per rate from sromrev 9 */ u16 cckbw202gpo; u16 cckbw20ul2gpo; u32 legofdmbw202gpo; u32 legofdmbw20ul2gpo; u32 legofdmbw205glpo; u32 legofdmbw20ul5glpo; u32 legofdmbw205gmpo; u32 legofdmbw20ul5gmpo; u32 legofdmbw205ghpo; u32 legofdmbw20ul5ghpo; u32 mcsbw202gpo; u32 mcsbw20ul2gpo; u32 mcsbw402gpo; u32 mcsbw205glpo; u32 mcsbw20ul5glpo; u32 mcsbw405glpo; u32 mcsbw205gmpo; u32 mcsbw20ul5gmpo; u32 mcsbw405gmpo; u32 mcsbw205ghpo; u32 mcsbw20ul5ghpo; u32 mcsbw405ghpo; u16 mcs32po; u16 legofdm40duppo; u8 sar2g; u8 sar5g; }; /* Information about the PCB the circuitry is soldered on. */ struct ssb_boardinfo { u16 vendor; u16 type; }; struct ssb_device; /* Lowlevel read/write operations on the device MMIO. * Internal, don't use that outside of ssb. */ struct ssb_bus_ops { u8 (*read8)(struct ssb_device *dev, u16 offset); u16 (*read16)(struct ssb_device *dev, u16 offset); u32 (*read32)(struct ssb_device *dev, u16 offset); void (*write8)(struct ssb_device *dev, u16 offset, u8 value); void (*write16)(struct ssb_device *dev, u16 offset, u16 value); void (*write32)(struct ssb_device *dev, u16 offset, u32 value); #ifdef CONFIG_SSB_BLOCKIO void (*block_read)(struct ssb_device *dev, void *buffer, size_t count, u16 offset, u8 reg_width); void (*block_write)(struct ssb_device *dev, const void *buffer, size_t count, u16 offset, u8 reg_width); #endif }; /* Core-ID values. */ #define SSB_DEV_CHIPCOMMON 0x800 #define SSB_DEV_ILINE20 0x801 #define SSB_DEV_SDRAM 0x803 #define SSB_DEV_PCI 0x804 #define SSB_DEV_MIPS 0x805 #define SSB_DEV_ETHERNET 0x806 #define SSB_DEV_V90 0x807 #define SSB_DEV_USB11_HOSTDEV 0x808 #define SSB_DEV_ADSL 0x809 #define SSB_DEV_ILINE100 0x80A #define SSB_DEV_IPSEC 0x80B #define SSB_DEV_PCMCIA 0x80D #define SSB_DEV_INTERNAL_MEM 0x80E #define SSB_DEV_MEMC_SDRAM 0x80F #define SSB_DEV_EXTIF 0x811 #define SSB_DEV_80211 0x812 #define SSB_DEV_MIPS_3302 0x816 #define SSB_DEV_USB11_HOST 0x817 #define SSB_DEV_USB11_DEV 0x818 #define SSB_DEV_USB20_HOST 0x819 #define SSB_DEV_USB20_DEV 0x81A #define SSB_DEV_SDIO_HOST 0x81B #define SSB_DEV_ROBOSWITCH 0x81C #define SSB_DEV_PARA_ATA 0x81D #define SSB_DEV_SATA_XORDMA 0x81E #define SSB_DEV_ETHERNET_GBIT 0x81F #define SSB_DEV_PCIE 0x820 #define SSB_DEV_MIMO_PHY 0x821 #define SSB_DEV_SRAM_CTRLR 0x822 #define SSB_DEV_MINI_MACPHY 0x823 #define SSB_DEV_ARM_1176 0x824 #define SSB_DEV_ARM_7TDMI 0x825 #define SSB_DEV_ARM_CM3 0x82A /* Vendor-ID values */ #define SSB_VENDOR_BROADCOM 0x4243 /* Some kernel subsystems poke with dev->drvdata, so we must use the * following ugly workaround to get from struct device to struct ssb_device */ struct __ssb_dev_wrapper { struct device dev; struct ssb_device *sdev; }; struct ssb_device { /* Having a copy of the ops pointer in each dev struct * is an optimization. */ const struct ssb_bus_ops *ops; struct device *dev, *dma_dev; struct ssb_bus *bus; struct ssb_device_id id; u8 core_index; unsigned int irq; /* Internal-only stuff follows. */ void *drvdata; /* Per-device data */ void *devtypedata; /* Per-devicetype (eg 802.11) data */ }; /* Go from struct device to struct ssb_device. */ static inline struct ssb_device * dev_to_ssb_dev(struct device *dev) { struct __ssb_dev_wrapper *wrap; wrap = container_of(dev, struct __ssb_dev_wrapper, dev); return wrap->sdev; } /* Device specific user data */ static inline void ssb_set_drvdata(struct ssb_device *dev, void *data) { dev->drvdata = data; } static inline void * ssb_get_drvdata(struct ssb_device *dev) { return dev->drvdata; } /* Devicetype specific user data. This is per device-type (not per device) */ void ssb_set_devtypedata(struct ssb_device *dev, void *data); static inline void * ssb_get_devtypedata(struct ssb_device *dev) { return dev->devtypedata; } struct ssb_driver { const char *name; const struct ssb_device_id *id_table; int (*probe)(struct ssb_device *dev, const struct ssb_device_id *id); void (*remove)(struct ssb_device *dev); int (*suspend)(struct ssb_device *dev, pm_message_t state); int (*resume)(struct ssb_device *dev); void (*shutdown)(struct ssb_device *dev); struct device_driver drv; }; #define drv_to_ssb_drv(_drv) container_of(_drv, struct ssb_driver, drv) extern int __ssb_driver_register(struct ssb_driver *drv, struct module *owner); #define ssb_driver_register(drv) \ __ssb_driver_register(drv, THIS_MODULE) extern void ssb_driver_unregister(struct ssb_driver *drv); enum ssb_bustype { SSB_BUSTYPE_SSB, /* This SSB bus is the system bus */ SSB_BUSTYPE_PCI, /* SSB is connected to PCI bus */ SSB_BUSTYPE_PCMCIA, /* SSB is connected to PCMCIA bus */ SSB_BUSTYPE_SDIO, /* SSB is connected to SDIO bus */ }; /* board_vendor */ #define SSB_BOARDVENDOR_BCM 0x14E4 /* Broadcom */ #define SSB_BOARDVENDOR_DELL 0x1028 /* Dell */ #define SSB_BOARDVENDOR_HP 0x0E11 /* HP */ /* board_type */ #define SSB_BOARD_BCM94301CB 0x0406 #define SSB_BOARD_BCM94301MP 0x0407 #define SSB_BOARD_BU4309 0x040A #define SSB_BOARD_BCM94309CB 0x040B #define SSB_BOARD_BCM4309MP 0x040C #define SSB_BOARD_BU4306 0x0416 #define SSB_BOARD_BCM94306MP 0x0418 #define SSB_BOARD_BCM4309G 0x0421 #define SSB_BOARD_BCM4306CB 0x0417 #define SSB_BOARD_BCM94306PC 0x0425 /* pcmcia 3.3v 4306 card */ #define SSB_BOARD_BCM94306CBSG 0x042B /* with SiGe PA */ #define SSB_BOARD_PCSG94306 0x042D /* with SiGe PA */ #define SSB_BOARD_BU4704SD 0x042E /* with sdram */ #define SSB_BOARD_BCM94704AGR 0x042F /* dual 11a/11g Router */ #define SSB_BOARD_BCM94308MP 0x0430 /* 11a-only minipci */ #define SSB_BOARD_BU4318 0x0447 #define SSB_BOARD_CB4318 0x0448 #define SSB_BOARD_MPG4318 0x0449 #define SSB_BOARD_MP4318 0x044A #define SSB_BOARD_SD4318 0x044B #define SSB_BOARD_BCM94306P 0x044C /* with SiGe */ #define SSB_BOARD_BCM94303MP 0x044E #define SSB_BOARD_BCM94306MPM 0x0450 #define SSB_BOARD_BCM94306MPL 0x0453 #define SSB_BOARD_PC4303 0x0454 /* pcmcia */ #define SSB_BOARD_BCM94306MPLNA 0x0457 #define SSB_BOARD_BCM94306MPH 0x045B #define SSB_BOARD_BCM94306PCIV 0x045C #define SSB_BOARD_BCM94318MPGH 0x0463 #define SSB_BOARD_BU4311 0x0464 #define SSB_BOARD_BCM94311MC 0x0465 #define SSB_BOARD_BCM94311MCAG 0x0466 /* 4321 boards */ #define SSB_BOARD_BU4321 0x046B #define SSB_BOARD_BU4321E 0x047C #define SSB_BOARD_MP4321 0x046C #define SSB_BOARD_CB2_4321 0x046D #define SSB_BOARD_CB2_4321_AG 0x0066 #define SSB_BOARD_MC4321 0x046E /* 4325 boards */ #define SSB_BOARD_BCM94325DEVBU 0x0490 #define SSB_BOARD_BCM94325BGABU 0x0491 #define SSB_BOARD_BCM94325SDGWB 0x0492 #define SSB_BOARD_BCM94325SDGMDL 0x04AA #define SSB_BOARD_BCM94325SDGMDL2 0x04C6 #define SSB_BOARD_BCM94325SDGMDL3 0x04C9 #define SSB_BOARD_BCM94325SDABGWBA 0x04E1 /* 4322 boards */ #define SSB_BOARD_BCM94322MC 0x04A4 #define SSB_BOARD_BCM94322USB 0x04A8 /* dualband */ #define SSB_BOARD_BCM94322HM 0x04B0 #define SSB_BOARD_BCM94322USB2D 0x04Bf /* single band discrete front end */ /* 4312 boards */ #define SSB_BOARD_BU4312 0x048A #define SSB_BOARD_BCM4312MCGSG 0x04B5 /* chip_package */ #define SSB_CHIPPACK_BCM4712S 1 /* Small 200pin 4712 */ #define SSB_CHIPPACK_BCM4712M 2 /* Medium 225pin 4712 */ #define SSB_CHIPPACK_BCM4712L 0 /* Large 340pin 4712 */ #include <linux/ssb/ssb_driver_chipcommon.h> #include <linux/ssb/ssb_driver_mips.h> #include <linux/ssb/ssb_driver_extif.h> #include <linux/ssb/ssb_driver_pci.h> struct ssb_bus { /* The MMIO area. */ void __iomem *mmio; const struct ssb_bus_ops *ops; /* The core currently mapped into the MMIO window. * Not valid on all host-buses. So don't use outside of SSB. */ struct ssb_device *mapped_device; union { /* Currently mapped PCMCIA segment. (bustype == SSB_BUSTYPE_PCMCIA only) */ u8 mapped_pcmcia_seg; /* Current SSB base address window for SDIO. */ u32 sdio_sbaddr; }; /* Lock for core and segment switching. * On PCMCIA-host busses this is used to protect the whole MMIO access. */ spinlock_t bar_lock; /* The host-bus this backplane is running on. */ enum ssb_bustype bustype; /* Pointers to the host-bus. Check bustype before using any of these pointers. */ union { /* Pointer to the PCI bus (only valid if bustype == SSB_BUSTYPE_PCI). */ struct pci_dev *host_pci; /* Pointer to the PCMCIA device (only if bustype == SSB_BUSTYPE_PCMCIA). */ struct pcmcia_device *host_pcmcia; /* Pointer to the SDIO device (only if bustype == SSB_BUSTYPE_SDIO). */ struct sdio_func *host_sdio; }; /* See enum ssb_quirks */ unsigned int quirks; #ifdef CONFIG_SSB_SPROM /* Mutex to protect the SPROM writing. */ struct mutex sprom_mutex; #endif /* ID information about the Chip. */ u16 chip_id; u8 chip_rev; u16 sprom_offset; u16 sprom_size; /* number of words in sprom */ u8 chip_package; /* List of devices (cores) on the backplane. */ struct ssb_device devices[SSB_MAX_NR_CORES]; u8 nr_devices; /* Software ID number for this bus. */ unsigned int busnumber; /* The ChipCommon device (if available). */ struct ssb_chipcommon chipco; /* The PCI-core device (if available). */ struct ssb_pcicore pcicore; /* The MIPS-core device (if available). */ struct ssb_mipscore mipscore; /* The EXTif-core device (if available). */ struct ssb_extif extif; /* The following structure elements are not available in early * SSB initialization. Though, they are available for regular * registered drivers at any stage. So be careful when * using them in the ssb core code. */ /* ID information about the PCB. */ struct ssb_boardinfo boardinfo; /* Contents of the SPROM. */ struct ssb_sprom sprom; /* If the board has a cardbus slot, this is set to true. */ bool has_cardbus_slot; #ifdef CONFIG_SSB_EMBEDDED /* Lock for GPIO register access. */ spinlock_t gpio_lock; struct platform_device *watchdog; #endif /* EMBEDDED */ #ifdef CONFIG_SSB_DRIVER_GPIO struct gpio_chip gpio; struct irq_domain *irq_domain; #endif /* DRIVER_GPIO */ /* Internal-only stuff follows. Do not touch. */ struct list_head list; /* Is the bus already powered up? */ bool powered_up; int power_warn_count; }; enum ssb_quirks { /* SDIO connected card requires performing a read after writing a 32-bit value */ SSB_QUIRK_SDIO_READ_AFTER_WRITE32 = (1 << 0), }; /* The initialization-invariants. */ struct ssb_init_invariants { /* Versioning information about the PCB. */ struct ssb_boardinfo boardinfo; /* The SPROM information. That's either stored in an * EEPROM or NVRAM on the board. */ struct ssb_sprom sprom; /* If the board has a cardbus slot, this is set to true. */ bool has_cardbus_slot; }; /* Type of function to fetch the invariants. */ typedef int (*ssb_invariants_func_t)(struct ssb_bus *bus, struct ssb_init_invariants *iv); /* Register SoC bus. */ extern int ssb_bus_host_soc_register(struct ssb_bus *bus, unsigned long baseaddr); #ifdef CONFIG_SSB_PCIHOST extern int ssb_bus_pcibus_register(struct ssb_bus *bus, struct pci_dev *host_pci); #endif /* CONFIG_SSB_PCIHOST */ #ifdef CONFIG_SSB_PCMCIAHOST extern int ssb_bus_pcmciabus_register(struct ssb_bus *bus, struct pcmcia_device *pcmcia_dev, unsigned long baseaddr); #endif /* CONFIG_SSB_PCMCIAHOST */ #ifdef CONFIG_SSB_SDIOHOST extern int ssb_bus_sdiobus_register(struct ssb_bus *bus, struct sdio_func *sdio_func, unsigned int quirks); #endif /* CONFIG_SSB_SDIOHOST */ extern void ssb_bus_unregister(struct ssb_bus *bus); /* Does the device have an SPROM? */ extern bool ssb_is_sprom_available(struct ssb_bus *bus); /* Set a fallback SPROM. * See kdoc at the function definition for complete documentation. */ extern int ssb_arch_register_fallback_sprom( int (*sprom_callback)(struct ssb_bus *bus, struct ssb_sprom *out)); /* Suspend a SSB bus. * Call this from the parent bus suspend routine. */ extern int ssb_bus_suspend(struct ssb_bus *bus); /* Resume a SSB bus. * Call this from the parent bus resume routine. */ extern int ssb_bus_resume(struct ssb_bus *bus); extern u32 ssb_clockspeed(struct ssb_bus *bus); /* Is the device enabled in hardware? */ int ssb_device_is_enabled(struct ssb_device *dev); /* Enable a device and pass device-specific SSB_TMSLOW flags. * If no device-specific flags are available, use 0. */ void ssb_device_enable(struct ssb_device *dev, u32 core_specific_flags); /* Disable a device in hardware and pass SSB_TMSLOW flags (if any). */ void ssb_device_disable(struct ssb_device *dev, u32 core_specific_flags); /* Device MMIO register read/write functions. */ static inline u8 ssb_read8(struct ssb_device *dev, u16 offset) { return dev->ops->read8(dev, offset); } static inline u16 ssb_read16(struct ssb_device *dev, u16 offset) { return dev->ops->read16(dev, offset); } static inline u32 ssb_read32(struct ssb_device *dev, u16 offset) { return dev->ops->read32(dev, offset); } static inline void ssb_write8(struct ssb_device *dev, u16 offset, u8 value) { dev->ops->write8(dev, offset, value); } static inline void ssb_write16(struct ssb_device *dev, u16 offset, u16 value) { dev->ops->write16(dev, offset, value); } static inline void ssb_write32(struct ssb_device *dev, u16 offset, u32 value) { dev->ops->write32(dev, offset, value); } #ifdef CONFIG_SSB_BLOCKIO static inline void ssb_block_read(struct ssb_device *dev, void *buffer, size_t count, u16 offset, u8 reg_width) { dev->ops->block_read(dev, buffer, count, offset, reg_width); } static inline void ssb_block_write(struct ssb_device *dev, const void *buffer, size_t count, u16 offset, u8 reg_width) { dev->ops->block_write(dev, buffer, count, offset, reg_width); } #endif /* CONFIG_SSB_BLOCKIO */ /* The SSB DMA API. Use this API for any DMA operation on the device. * This API basically is a wrapper that calls the correct DMA API for * the host device type the SSB device is attached to. */ /* Translation (routing) bits that need to be ORed to DMA * addresses before they are given to a device. */ extern u32 ssb_dma_translation(struct ssb_device *dev); #define SSB_DMA_TRANSLATION_MASK 0xC0000000 #define SSB_DMA_TRANSLATION_SHIFT 30 static inline void __cold __ssb_dma_not_implemented(struct ssb_device *dev) { #ifdef CONFIG_SSB_DEBUG printk(KERN_ERR "SSB: BUG! Calling DMA API for " "unsupported bustype %d\n", dev->bus->bustype); #endif /* DEBUG */ } #ifdef CONFIG_SSB_PCIHOST /* PCI-host wrapper driver */ extern int ssb_pcihost_register(struct pci_driver *driver); static inline void ssb_pcihost_unregister(struct pci_driver *driver) { pci_unregister_driver(driver); } static inline void ssb_pcihost_set_power_state(struct ssb_device *sdev, pci_power_t state) { if (sdev->bus->bustype == SSB_BUSTYPE_PCI) pci_set_power_state(sdev->bus->host_pci, state); } #else static inline void ssb_pcihost_unregister(struct pci_driver *driver) { } static inline void ssb_pcihost_set_power_state(struct ssb_device *sdev, pci_power_t state) { } #endif /* CONFIG_SSB_PCIHOST */ /* If a driver is shutdown or suspended, call this to signal * that the bus may be completely powered down. SSB will decide, * if it's really time to power down the bus, based on if there * are other devices that want to run. */ extern int ssb_bus_may_powerdown(struct ssb_bus *bus); /* Before initializing and enabling a device, call this to power-up the bus. * If you want to allow use of dynamic-power-control, pass the flag. * Otherwise static always-on powercontrol will be used. */ extern int ssb_bus_powerup(struct ssb_bus *bus, bool dynamic_pctl); extern void ssb_commit_settings(struct ssb_bus *bus); /* Various helper functions */ extern u32 ssb_admatch_base(u32 adm); extern u32 ssb_admatch_size(u32 adm); /* PCI device mapping and fixup routines. * Called from the architecture pcibios init code. * These are only available on SSB_EMBEDDED configurations. */ #ifdef CONFIG_SSB_EMBEDDED int ssb_pcibios_plat_dev_init(struct pci_dev *dev); int ssb_pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin); #endif /* CONFIG_SSB_EMBEDDED */ #endif /* LINUX_SSB_H_ */ ssb/ssb_driver_extif.h 0000644 00000020204 14722070374 0011051 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Hardware-specific External Interface I/O core definitions * for the BCM47xx family of SiliconBackplane-based chips. * * The External Interface core supports a total of three external chip selects * supporting external interfaces. One of the external chip selects is * used for Flash, one is used for PCMCIA, and the other may be * programmed to support either a synchronous interface or an * asynchronous interface. The asynchronous interface can be used to * support external devices such as UARTs and the BCM2019 Bluetooth * baseband processor. * The external interface core also contains 2 on-chip 16550 UARTs, clock * frequency control, a watchdog interrupt timer, and a GPIO interface. * * Copyright 2005, Broadcom Corporation * Copyright 2006, Michael Buesch */ #ifndef LINUX_SSB_EXTIFCORE_H_ #define LINUX_SSB_EXTIFCORE_H_ /* external interface address space */ #define SSB_EXTIF_PCMCIA_MEMBASE(x) (x) #define SSB_EXTIF_PCMCIA_IOBASE(x) ((x) + 0x100000) #define SSB_EXTIF_PCMCIA_CFGBASE(x) ((x) + 0x200000) #define SSB_EXTIF_CFGIF_BASE(x) ((x) + 0x800000) #define SSB_EXTIF_FLASH_BASE(x) ((x) + 0xc00000) #define SSB_EXTIF_NR_GPIOOUT 5 /* GPIO NOTE: * The multiple instances of output and output enable registers * are present to allow driver software for multiple cores to control * gpio outputs without needing to share a single register pair. * Use the following helper macro to get a register offset value. */ #define SSB_EXTIF_GPIO_OUT(index) ({ \ BUILD_BUG_ON(index >= SSB_EXTIF_NR_GPIOOUT); \ SSB_EXTIF_GPIO_OUT_BASE + ((index) * 8); \ }) #define SSB_EXTIF_GPIO_OUTEN(index) ({ \ BUILD_BUG_ON(index >= SSB_EXTIF_NR_GPIOOUT); \ SSB_EXTIF_GPIO_OUTEN_BASE + ((index) * 8); \ }) /** EXTIF core registers **/ #define SSB_EXTIF_CTL 0x0000 #define SSB_EXTIF_CTL_UARTEN (1 << 0) /* UART enable */ #define SSB_EXTIF_EXTSTAT 0x0004 #define SSB_EXTIF_EXTSTAT_EMODE (1 << 0) /* Endian mode (ro) */ #define SSB_EXTIF_EXTSTAT_EIRQPIN (1 << 1) /* External interrupt pin (ro) */ #define SSB_EXTIF_EXTSTAT_GPIOIRQPIN (1 << 2) /* GPIO interrupt pin (ro) */ #define SSB_EXTIF_PCMCIA_CFG 0x0010 #define SSB_EXTIF_PCMCIA_MEMWAIT 0x0014 #define SSB_EXTIF_PCMCIA_ATTRWAIT 0x0018 #define SSB_EXTIF_PCMCIA_IOWAIT 0x001C #define SSB_EXTIF_PROG_CFG 0x0020 #define SSB_EXTIF_PROG_WAITCNT 0x0024 #define SSB_EXTIF_FLASH_CFG 0x0028 #define SSB_EXTIF_FLASH_WAITCNT 0x002C #define SSB_EXTIF_WATCHDOG 0x0040 #define SSB_EXTIF_CLOCK_N 0x0044 #define SSB_EXTIF_CLOCK_SB 0x0048 #define SSB_EXTIF_CLOCK_PCI 0x004C #define SSB_EXTIF_CLOCK_MII 0x0050 #define SSB_EXTIF_GPIO_IN 0x0060 #define SSB_EXTIF_GPIO_OUT_BASE 0x0064 #define SSB_EXTIF_GPIO_OUTEN_BASE 0x0068 #define SSB_EXTIF_EJTAG_OUTEN 0x0090 #define SSB_EXTIF_GPIO_INTPOL 0x0094 #define SSB_EXTIF_GPIO_INTMASK 0x0098 #define SSB_EXTIF_UART_DATA 0x0300 #define SSB_EXTIF_UART_TIMER 0x0310 #define SSB_EXTIF_UART_FCR 0x0320 #define SSB_EXTIF_UART_LCR 0x0330 #define SSB_EXTIF_UART_MCR 0x0340 #define SSB_EXTIF_UART_LSR 0x0350 #define SSB_EXTIF_UART_MSR 0x0360 #define SSB_EXTIF_UART_SCRATCH 0x0370 /* pcmcia/prog/flash_config */ #define SSB_EXTCFG_EN (1 << 0) /* enable */ #define SSB_EXTCFG_MODE 0xE /* mode */ #define SSB_EXTCFG_MODE_SHIFT 1 #define SSB_EXTCFG_MODE_FLASH 0x0 /* flash/asynchronous mode */ #define SSB_EXTCFG_MODE_SYNC 0x2 /* synchronous mode */ #define SSB_EXTCFG_MODE_PCMCIA 0x4 /* pcmcia mode */ #define SSB_EXTCFG_DS16 (1 << 4) /* destsize: 0=8bit, 1=16bit */ #define SSB_EXTCFG_BSWAP (1 << 5) /* byteswap */ #define SSB_EXTCFG_CLKDIV 0xC0 /* clock divider */ #define SSB_EXTCFG_CLKDIV_SHIFT 6 #define SSB_EXTCFG_CLKDIV_2 0x0 /* backplane/2 */ #define SSB_EXTCFG_CLKDIV_3 0x40 /* backplane/3 */ #define SSB_EXTCFG_CLKDIV_4 0x80 /* backplane/4 */ #define SSB_EXTCFG_CLKEN (1 << 8) /* clock enable */ #define SSB_EXTCFG_STROBE (1 << 9) /* size/bytestrobe (synch only) */ /* pcmcia_memwait */ #define SSB_PCMCIA_MEMW_0 0x0000003F /* waitcount0 */ #define SSB_PCMCIA_MEMW_1 0x00001F00 /* waitcount1 */ #define SSB_PCMCIA_MEMW_1_SHIFT 8 #define SSB_PCMCIA_MEMW_2 0x001F0000 /* waitcount2 */ #define SSB_PCMCIA_MEMW_2_SHIFT 16 #define SSB_PCMCIA_MEMW_3 0x1F000000 /* waitcount3 */ #define SSB_PCMCIA_MEMW_3_SHIFT 24 /* pcmcia_attrwait */ #define SSB_PCMCIA_ATTW_0 0x0000003F /* waitcount0 */ #define SSB_PCMCIA_ATTW_1 0x00001F00 /* waitcount1 */ #define SSB_PCMCIA_ATTW_1_SHIFT 8 #define SSB_PCMCIA_ATTW_2 0x001F0000 /* waitcount2 */ #define SSB_PCMCIA_ATTW_2_SHIFT 16 #define SSB_PCMCIA_ATTW_3 0x1F000000 /* waitcount3 */ #define SSB_PCMCIA_ATTW_3_SHIFT 24 /* pcmcia_iowait */ #define SSB_PCMCIA_IOW_0 0x0000003F /* waitcount0 */ #define SSB_PCMCIA_IOW_1 0x00001F00 /* waitcount1 */ #define SSB_PCMCIA_IOW_1_SHIFT 8 #define SSB_PCMCIA_IOW_2 0x001F0000 /* waitcount2 */ #define SSB_PCMCIA_IOW_2_SHIFT 16 #define SSB_PCMCIA_IOW_3 0x1F000000 /* waitcount3 */ #define SSB_PCMCIA_IOW_3_SHIFT 24 /* prog_waitcount */ #define SSB_PROG_WCNT_0 0x0000001F /* waitcount0 */ #define SSB_PROG_WCNT_1 0x00001F00 /* waitcount1 */ #define SSB_PROG_WCNT_1_SHIFT 8 #define SSB_PROG_WCNT_2 0x001F0000 /* waitcount2 */ #define SSB_PROG_WCNT_2_SHIFT 16 #define SSB_PROG_WCNT_3 0x1F000000 /* waitcount3 */ #define SSB_PROG_WCNT_3_SHIFT 24 #define SSB_PROG_W0 0x0000000C #define SSB_PROG_W1 0x00000A00 #define SSB_PROG_W2 0x00020000 #define SSB_PROG_W3 0x01000000 /* flash_waitcount */ #define SSB_FLASH_WCNT_0 0x0000001F /* waitcount0 */ #define SSB_FLASH_WCNT_1 0x00001F00 /* waitcount1 */ #define SSB_FLASH_WCNT_1_SHIFT 8 #define SSB_FLASH_WCNT_2 0x001F0000 /* waitcount2 */ #define SSB_FLASH_WCNT_2_SHIFT 16 #define SSB_FLASH_WCNT_3 0x1F000000 /* waitcount3 */ #define SSB_FLASH_WCNT_3_SHIFT 24 /* watchdog */ #define SSB_EXTIF_WATCHDOG_CLK 48000000 /* Hz */ #define SSB_EXTIF_WATCHDOG_MAX_TIMER ((1 << 28) - 1) #define SSB_EXTIF_WATCHDOG_MAX_TIMER_MS (SSB_EXTIF_WATCHDOG_MAX_TIMER \ / (SSB_EXTIF_WATCHDOG_CLK / 1000)) #ifdef CONFIG_SSB_DRIVER_EXTIF struct ssb_extif { struct ssb_device *dev; spinlock_t gpio_lock; }; static inline bool ssb_extif_available(struct ssb_extif *extif) { return (extif->dev != NULL); } extern void ssb_extif_get_clockcontrol(struct ssb_extif *extif, u32 *plltype, u32 *n, u32 *m); extern void ssb_extif_timing_init(struct ssb_extif *extif, unsigned long ns); extern u32 ssb_extif_watchdog_timer_set(struct ssb_extif *extif, u32 ticks); /* Extif GPIO pin access */ u32 ssb_extif_gpio_in(struct ssb_extif *extif, u32 mask); u32 ssb_extif_gpio_out(struct ssb_extif *extif, u32 mask, u32 value); u32 ssb_extif_gpio_outen(struct ssb_extif *extif, u32 mask, u32 value); u32 ssb_extif_gpio_polarity(struct ssb_extif *extif, u32 mask, u32 value); u32 ssb_extif_gpio_intmask(struct ssb_extif *extif, u32 mask, u32 value); #ifdef CONFIG_SSB_SERIAL extern int ssb_extif_serial_init(struct ssb_extif *extif, struct ssb_serial_port *ports); #endif /* CONFIG_SSB_SERIAL */ #else /* CONFIG_SSB_DRIVER_EXTIF */ /* extif disabled */ struct ssb_extif { }; static inline bool ssb_extif_available(struct ssb_extif *extif) { return 0; } static inline void ssb_extif_get_clockcontrol(struct ssb_extif *extif, u32 *plltype, u32 *n, u32 *m) { } static inline void ssb_extif_timing_init(struct ssb_extif *extif, unsigned long ns) { } static inline u32 ssb_extif_watchdog_timer_set(struct ssb_extif *extif, u32 ticks) { return 0; } static inline u32 ssb_extif_gpio_in(struct ssb_extif *extif, u32 mask) { return 0; } static inline u32 ssb_extif_gpio_out(struct ssb_extif *extif, u32 mask, u32 value) { return 0; } static inline u32 ssb_extif_gpio_outen(struct ssb_extif *extif, u32 mask, u32 value) { return 0; } static inline u32 ssb_extif_gpio_polarity(struct ssb_extif *extif, u32 mask, u32 value) { return 0; } static inline u32 ssb_extif_gpio_intmask(struct ssb_extif *extif, u32 mask, u32 value) { return 0; } #ifdef CONFIG_SSB_SERIAL static inline int ssb_extif_serial_init(struct ssb_extif *extif, struct ssb_serial_port *ports) { return 0; } #endif /* CONFIG_SSB_SERIAL */ #endif /* CONFIG_SSB_DRIVER_EXTIF */ #endif /* LINUX_SSB_EXTIFCORE_H_ */ ssb/ssb_driver_mips.h 0000644 00000002226 14722070374 0010706 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_SSB_MIPSCORE_H_ #define LINUX_SSB_MIPSCORE_H_ #ifdef CONFIG_SSB_DRIVER_MIPS struct ssb_device; struct ssb_serial_port { void *regs; unsigned long clockspeed; unsigned int irq; unsigned int baud_base; unsigned int reg_shift; }; struct ssb_pflash { bool present; u8 buswidth; u32 window; u32 window_size; }; #ifdef CONFIG_SSB_SFLASH struct ssb_sflash { bool present; u32 window; u32 blocksize; u16 numblocks; u32 size; void *priv; }; #endif struct ssb_mipscore { struct ssb_device *dev; int nr_serial_ports; struct ssb_serial_port serial_ports[4]; struct ssb_pflash pflash; #ifdef CONFIG_SSB_SFLASH struct ssb_sflash sflash; #endif }; extern void ssb_mipscore_init(struct ssb_mipscore *mcore); extern u32 ssb_cpu_clock(struct ssb_mipscore *mcore); extern unsigned int ssb_mips_irq(struct ssb_device *dev); #else /* CONFIG_SSB_DRIVER_MIPS */ struct ssb_mipscore { }; static inline void ssb_mipscore_init(struct ssb_mipscore *mcore) { } static inline unsigned int ssb_mips_irq(struct ssb_device *dev) { return 0; } #endif /* CONFIG_SSB_DRIVER_MIPS */ #endif /* LINUX_SSB_MIPSCORE_H_ */ ssb/ssb_driver_chipcommon.h 0000644 00000072401 14722070374 0012074 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ #ifndef LINUX_SSB_CHIPCO_H_ #define LINUX_SSB_CHIPCO_H_ /* SonicsSiliconBackplane CHIPCOMMON core hardware definitions * * The chipcommon core provides chip identification, SB control, * jtag, 0/1/2 uarts, clock frequency control, a watchdog interrupt timer, * gpio interface, extbus, and support for serial and parallel flashes. * * Copyright 2005, Broadcom Corporation * Copyright 2006, Michael Buesch <m@bues.ch> */ /** ChipCommon core registers. **/ #define SSB_CHIPCO_CHIPID 0x0000 #define SSB_CHIPCO_IDMASK 0x0000FFFF #define SSB_CHIPCO_REVMASK 0x000F0000 #define SSB_CHIPCO_REVSHIFT 16 #define SSB_CHIPCO_PACKMASK 0x00F00000 #define SSB_CHIPCO_PACKSHIFT 20 #define SSB_CHIPCO_NRCORESMASK 0x0F000000 #define SSB_CHIPCO_NRCORESSHIFT 24 #define SSB_CHIPCO_CAP 0x0004 /* Capabilities */ #define SSB_CHIPCO_CAP_NRUART 0x00000003 /* # of UARTs */ #define SSB_CHIPCO_CAP_MIPSEB 0x00000004 /* MIPS in BigEndian Mode */ #define SSB_CHIPCO_CAP_UARTCLK 0x00000018 /* UART clock select */ #define SSB_CHIPCO_CAP_UARTCLK_INT 0x00000008 /* UARTs are driven by internal divided clock */ #define SSB_CHIPCO_CAP_UARTGPIO 0x00000020 /* UARTs on GPIO 15-12 */ #define SSB_CHIPCO_CAP_EXTBUS 0x000000C0 /* External buses present */ #define SSB_CHIPCO_CAP_FLASHT 0x00000700 /* Flash Type */ #define SSB_CHIPCO_FLASHT_NONE 0x00000000 /* No flash */ #define SSB_CHIPCO_FLASHT_STSER 0x00000100 /* ST serial flash */ #define SSB_CHIPCO_FLASHT_ATSER 0x00000200 /* Atmel serial flash */ #define SSB_CHIPCO_FLASHT_PARA 0x00000700 /* Parallel flash */ #define SSB_CHIPCO_CAP_PLLT 0x00038000 /* PLL Type */ #define SSB_PLLTYPE_NONE 0x00000000 #define SSB_PLLTYPE_1 0x00010000 /* 48Mhz base, 3 dividers */ #define SSB_PLLTYPE_2 0x00020000 /* 48Mhz, 4 dividers */ #define SSB_PLLTYPE_3 0x00030000 /* 25Mhz, 2 dividers */ #define SSB_PLLTYPE_4 0x00008000 /* 48Mhz, 4 dividers */ #define SSB_PLLTYPE_5 0x00018000 /* 25Mhz, 4 dividers */ #define SSB_PLLTYPE_6 0x00028000 /* 100/200 or 120/240 only */ #define SSB_PLLTYPE_7 0x00038000 /* 25Mhz, 4 dividers */ #define SSB_CHIPCO_CAP_PCTL 0x00040000 /* Power Control */ #define SSB_CHIPCO_CAP_OTPS 0x00380000 /* OTP size */ #define SSB_CHIPCO_CAP_OTPS_SHIFT 19 #define SSB_CHIPCO_CAP_OTPS_BASE 5 #define SSB_CHIPCO_CAP_JTAGM 0x00400000 /* JTAG master present */ #define SSB_CHIPCO_CAP_BROM 0x00800000 /* Internal boot ROM active */ #define SSB_CHIPCO_CAP_64BIT 0x08000000 /* 64-bit Backplane */ #define SSB_CHIPCO_CAP_PMU 0x10000000 /* PMU available (rev >= 20) */ #define SSB_CHIPCO_CAP_ECI 0x20000000 /* ECI available (rev >= 20) */ #define SSB_CHIPCO_CAP_SPROM 0x40000000 /* SPROM present */ #define SSB_CHIPCO_CORECTL 0x0008 #define SSB_CHIPCO_CORECTL_UARTCLK0 0x00000001 /* Drive UART with internal clock */ #define SSB_CHIPCO_CORECTL_SE 0x00000002 /* sync clk out enable (corerev >= 3) */ #define SSB_CHIPCO_CORECTL_UARTCLKEN 0x00000008 /* UART clock enable (rev >= 21) */ #define SSB_CHIPCO_BIST 0x000C #define SSB_CHIPCO_OTPS 0x0010 /* OTP status */ #define SSB_CHIPCO_OTPS_PROGFAIL 0x80000000 #define SSB_CHIPCO_OTPS_PROTECT 0x00000007 #define SSB_CHIPCO_OTPS_HW_PROTECT 0x00000001 #define SSB_CHIPCO_OTPS_SW_PROTECT 0x00000002 #define SSB_CHIPCO_OTPS_CID_PROTECT 0x00000004 #define SSB_CHIPCO_OTPC 0x0014 /* OTP control */ #define SSB_CHIPCO_OTPC_RECWAIT 0xFF000000 #define SSB_CHIPCO_OTPC_PROGWAIT 0x00FFFF00 #define SSB_CHIPCO_OTPC_PRW_SHIFT 8 #define SSB_CHIPCO_OTPC_MAXFAIL 0x00000038 #define SSB_CHIPCO_OTPC_VSEL 0x00000006 #define SSB_CHIPCO_OTPC_SELVL 0x00000001 #define SSB_CHIPCO_OTPP 0x0018 /* OTP prog */ #define SSB_CHIPCO_OTPP_COL 0x000000FF #define SSB_CHIPCO_OTPP_ROW 0x0000FF00 #define SSB_CHIPCO_OTPP_ROW_SHIFT 8 #define SSB_CHIPCO_OTPP_READERR 0x10000000 #define SSB_CHIPCO_OTPP_VALUE 0x20000000 #define SSB_CHIPCO_OTPP_READ 0x40000000 #define SSB_CHIPCO_OTPP_START 0x80000000 #define SSB_CHIPCO_OTPP_BUSY 0x80000000 #define SSB_CHIPCO_IRQSTAT 0x0020 #define SSB_CHIPCO_IRQMASK 0x0024 #define SSB_CHIPCO_IRQ_GPIO 0x00000001 /* gpio intr */ #define SSB_CHIPCO_IRQ_EXT 0x00000002 /* ro: ext intr pin (corerev >= 3) */ #define SSB_CHIPCO_IRQ_WDRESET 0x80000000 /* watchdog reset occurred */ #define SSB_CHIPCO_CHIPCTL 0x0028 /* Rev >= 11 only */ #define SSB_CHIPCO_CHIPSTAT 0x002C /* Rev >= 11 only */ #define SSB_CHIPCO_JCMD 0x0030 /* Rev >= 10 only */ #define SSB_CHIPCO_JCMD_START 0x80000000 #define SSB_CHIPCO_JCMD_BUSY 0x80000000 #define SSB_CHIPCO_JCMD_PAUSE 0x40000000 #define SSB_CHIPCO_JCMD0_ACC_MASK 0x0000F000 #define SSB_CHIPCO_JCMD0_ACC_IRDR 0x00000000 #define SSB_CHIPCO_JCMD0_ACC_DR 0x00001000 #define SSB_CHIPCO_JCMD0_ACC_IR 0x00002000 #define SSB_CHIPCO_JCMD0_ACC_RESET 0x00003000 #define SSB_CHIPCO_JCMD0_ACC_IRPDR 0x00004000 #define SSB_CHIPCO_JCMD0_ACC_PDR 0x00005000 #define SSB_CHIPCO_JCMD0_IRW_MASK 0x00000F00 #define SSB_CHIPCO_JCMD_ACC_MASK 0x000F0000 /* Changes for corerev 11 */ #define SSB_CHIPCO_JCMD_ACC_IRDR 0x00000000 #define SSB_CHIPCO_JCMD_ACC_DR 0x00010000 #define SSB_CHIPCO_JCMD_ACC_IR 0x00020000 #define SSB_CHIPCO_JCMD_ACC_RESET 0x00030000 #define SSB_CHIPCO_JCMD_ACC_IRPDR 0x00040000 #define SSB_CHIPCO_JCMD_ACC_PDR 0x00050000 #define SSB_CHIPCO_JCMD_IRW_MASK 0x00001F00 #define SSB_CHIPCO_JCMD_IRW_SHIFT 8 #define SSB_CHIPCO_JCMD_DRW_MASK 0x0000003F #define SSB_CHIPCO_JIR 0x0034 /* Rev >= 10 only */ #define SSB_CHIPCO_JDR 0x0038 /* Rev >= 10 only */ #define SSB_CHIPCO_JCTL 0x003C /* Rev >= 10 only */ #define SSB_CHIPCO_JCTL_FORCE_CLK 4 /* Force clock */ #define SSB_CHIPCO_JCTL_EXT_EN 2 /* Enable external targets */ #define SSB_CHIPCO_JCTL_EN 1 /* Enable Jtag master */ #define SSB_CHIPCO_FLASHCTL 0x0040 #define SSB_CHIPCO_FLASHCTL_START 0x80000000 #define SSB_CHIPCO_FLASHCTL_BUSY SSB_CHIPCO_FLASHCTL_START #define SSB_CHIPCO_FLASHADDR 0x0044 #define SSB_CHIPCO_FLASHDATA 0x0048 #define SSB_CHIPCO_BCAST_ADDR 0x0050 #define SSB_CHIPCO_BCAST_DATA 0x0054 #define SSB_CHIPCO_GPIOPULLUP 0x0058 /* Rev >= 20 only */ #define SSB_CHIPCO_GPIOPULLDOWN 0x005C /* Rev >= 20 only */ #define SSB_CHIPCO_GPIOIN 0x0060 #define SSB_CHIPCO_GPIOOUT 0x0064 #define SSB_CHIPCO_GPIOOUTEN 0x0068 #define SSB_CHIPCO_GPIOCTL 0x006C #define SSB_CHIPCO_GPIOPOL 0x0070 #define SSB_CHIPCO_GPIOIRQ 0x0074 #define SSB_CHIPCO_WATCHDOG 0x0080 #define SSB_CHIPCO_GPIOTIMER 0x0088 /* LED powersave (corerev >= 16) */ #define SSB_CHIPCO_GPIOTIMER_OFFTIME 0x0000FFFF #define SSB_CHIPCO_GPIOTIMER_OFFTIME_SHIFT 0 #define SSB_CHIPCO_GPIOTIMER_ONTIME 0xFFFF0000 #define SSB_CHIPCO_GPIOTIMER_ONTIME_SHIFT 16 #define SSB_CHIPCO_GPIOTOUTM 0x008C /* LED powersave (corerev >= 16) */ #define SSB_CHIPCO_CLOCK_N 0x0090 #define SSB_CHIPCO_CLOCK_SB 0x0094 #define SSB_CHIPCO_CLOCK_PCI 0x0098 #define SSB_CHIPCO_CLOCK_M2 0x009C #define SSB_CHIPCO_CLOCK_MIPS 0x00A0 #define SSB_CHIPCO_CLKDIV 0x00A4 /* Rev >= 3 only */ #define SSB_CHIPCO_CLKDIV_SFLASH 0x0F000000 #define SSB_CHIPCO_CLKDIV_SFLASH_SHIFT 24 #define SSB_CHIPCO_CLKDIV_OTP 0x000F0000 #define SSB_CHIPCO_CLKDIV_OTP_SHIFT 16 #define SSB_CHIPCO_CLKDIV_JTAG 0x00000F00 #define SSB_CHIPCO_CLKDIV_JTAG_SHIFT 8 #define SSB_CHIPCO_CLKDIV_UART 0x000000FF #define SSB_CHIPCO_PLLONDELAY 0x00B0 /* Rev >= 4 only */ #define SSB_CHIPCO_FREFSELDELAY 0x00B4 /* Rev >= 4 only */ #define SSB_CHIPCO_SLOWCLKCTL 0x00B8 /* 6 <= Rev <= 9 only */ #define SSB_CHIPCO_SLOWCLKCTL_SRC 0x00000007 /* slow clock source mask */ #define SSB_CHIPCO_SLOWCLKCTL_SRC_LPO 0x00000000 /* source of slow clock is LPO */ #define SSB_CHIPCO_SLOWCLKCTL_SRC_XTAL 0x00000001 /* source of slow clock is crystal */ #define SSB_CHIPCO_SLOECLKCTL_SRC_PCI 0x00000002 /* source of slow clock is PCI */ #define SSB_CHIPCO_SLOWCLKCTL_LPOFREQ 0x00000200 /* LPOFreqSel, 1: 160Khz, 0: 32KHz */ #define SSB_CHIPCO_SLOWCLKCTL_LPOPD 0x00000400 /* LPOPowerDown, 1: LPO is disabled, 0: LPO is enabled */ #define SSB_CHIPCO_SLOWCLKCTL_FSLOW 0x00000800 /* ForceSlowClk, 1: sb/cores running on slow clock, 0: power logic control */ #define SSB_CHIPCO_SLOWCLKCTL_IPLL 0x00001000 /* IgnorePllOffReq, 1/0: power logic ignores/honors PLL clock disable requests from core */ #define SSB_CHIPCO_SLOWCLKCTL_ENXTAL 0x00002000 /* XtalControlEn, 1/0: power logic does/doesn't disable crystal when appropriate */ #define SSB_CHIPCO_SLOWCLKCTL_XTALPU 0x00004000 /* XtalPU (RO), 1/0: crystal running/disabled */ #define SSB_CHIPCO_SLOWCLKCTL_CLKDIV 0xFFFF0000 /* ClockDivider (SlowClk = 1/(4+divisor)) */ #define SSB_CHIPCO_SLOWCLKCTL_CLKDIV_SHIFT 16 #define SSB_CHIPCO_SYSCLKCTL 0x00C0 /* Rev >= 3 only */ #define SSB_CHIPCO_SYSCLKCTL_IDLPEN 0x00000001 /* ILPen: Enable Idle Low Power */ #define SSB_CHIPCO_SYSCLKCTL_ALPEN 0x00000002 /* ALPen: Enable Active Low Power */ #define SSB_CHIPCO_SYSCLKCTL_PLLEN 0x00000004 /* ForcePLLOn */ #define SSB_CHIPCO_SYSCLKCTL_FORCEALP 0x00000008 /* Force ALP (or HT if ALPen is not set */ #define SSB_CHIPCO_SYSCLKCTL_FORCEHT 0x00000010 /* Force HT */ #define SSB_CHIPCO_SYSCLKCTL_CLKDIV 0xFFFF0000 /* ClkDiv (ILP = 1/(4+divisor)) */ #define SSB_CHIPCO_SYSCLKCTL_CLKDIV_SHIFT 16 #define SSB_CHIPCO_CLKSTSTR 0x00C4 /* Rev >= 3 only */ #define SSB_CHIPCO_PCMCIA_CFG 0x0100 #define SSB_CHIPCO_PCMCIA_MEMWAIT 0x0104 #define SSB_CHIPCO_PCMCIA_ATTRWAIT 0x0108 #define SSB_CHIPCO_PCMCIA_IOWAIT 0x010C #define SSB_CHIPCO_IDE_CFG 0x0110 #define SSB_CHIPCO_IDE_MEMWAIT 0x0114 #define SSB_CHIPCO_IDE_ATTRWAIT 0x0118 #define SSB_CHIPCO_IDE_IOWAIT 0x011C #define SSB_CHIPCO_PROG_CFG 0x0120 #define SSB_CHIPCO_PROG_WAITCNT 0x0124 #define SSB_CHIPCO_FLASH_CFG 0x0128 #define SSB_CHIPCO_FLASH_WAITCNT 0x012C #define SSB_CHIPCO_CLKCTLST 0x01E0 /* Clock control and status (rev >= 20) */ #define SSB_CHIPCO_CLKCTLST_FORCEALP 0x00000001 /* Force ALP request */ #define SSB_CHIPCO_CLKCTLST_FORCEHT 0x00000002 /* Force HT request */ #define SSB_CHIPCO_CLKCTLST_FORCEILP 0x00000004 /* Force ILP request */ #define SSB_CHIPCO_CLKCTLST_HAVEALPREQ 0x00000008 /* ALP available request */ #define SSB_CHIPCO_CLKCTLST_HAVEHTREQ 0x00000010 /* HT available request */ #define SSB_CHIPCO_CLKCTLST_HWCROFF 0x00000020 /* Force HW clock request off */ #define SSB_CHIPCO_CLKCTLST_HAVEALP 0x00010000 /* ALP available */ #define SSB_CHIPCO_CLKCTLST_HAVEHT 0x00020000 /* HT available */ #define SSB_CHIPCO_CLKCTLST_4328A0_HAVEHT 0x00010000 /* 4328a0 has reversed bits */ #define SSB_CHIPCO_CLKCTLST_4328A0_HAVEALP 0x00020000 /* 4328a0 has reversed bits */ #define SSB_CHIPCO_HW_WORKAROUND 0x01E4 /* Hardware workaround (rev >= 20) */ #define SSB_CHIPCO_UART0_DATA 0x0300 #define SSB_CHIPCO_UART0_IMR 0x0304 #define SSB_CHIPCO_UART0_FCR 0x0308 #define SSB_CHIPCO_UART0_LCR 0x030C #define SSB_CHIPCO_UART0_MCR 0x0310 #define SSB_CHIPCO_UART0_LSR 0x0314 #define SSB_CHIPCO_UART0_MSR 0x0318 #define SSB_CHIPCO_UART0_SCRATCH 0x031C #define SSB_CHIPCO_UART1_DATA 0x0400 #define SSB_CHIPCO_UART1_IMR 0x0404 #define SSB_CHIPCO_UART1_FCR 0x0408 #define SSB_CHIPCO_UART1_LCR 0x040C #define SSB_CHIPCO_UART1_MCR 0x0410 #define SSB_CHIPCO_UART1_LSR 0x0414 #define SSB_CHIPCO_UART1_MSR 0x0418 #define SSB_CHIPCO_UART1_SCRATCH 0x041C /* PMU registers (rev >= 20) */ #define SSB_CHIPCO_PMU_CTL 0x0600 /* PMU control */ #define SSB_CHIPCO_PMU_CTL_ILP_DIV 0xFFFF0000 /* ILP div mask */ #define SSB_CHIPCO_PMU_CTL_ILP_DIV_SHIFT 16 #define SSB_CHIPCO_PMU_CTL_PLL_UPD 0x00000400 #define SSB_CHIPCO_PMU_CTL_NOILPONW 0x00000200 /* No ILP on wait */ #define SSB_CHIPCO_PMU_CTL_HTREQEN 0x00000100 /* HT req enable */ #define SSB_CHIPCO_PMU_CTL_ALPREQEN 0x00000080 /* ALP req enable */ #define SSB_CHIPCO_PMU_CTL_XTALFREQ 0x0000007C /* Crystal freq */ #define SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT 2 #define SSB_CHIPCO_PMU_CTL_ILPDIVEN 0x00000002 /* ILP div enable */ #define SSB_CHIPCO_PMU_CTL_LPOSEL 0x00000001 /* LPO sel */ #define SSB_CHIPCO_PMU_CAP 0x0604 /* PMU capabilities */ #define SSB_CHIPCO_PMU_CAP_REVISION 0x000000FF /* Revision mask */ #define SSB_CHIPCO_PMU_STAT 0x0608 /* PMU status */ #define SSB_CHIPCO_PMU_STAT_INTPEND 0x00000040 /* Interrupt pending */ #define SSB_CHIPCO_PMU_STAT_SBCLKST 0x00000030 /* Backplane clock status? */ #define SSB_CHIPCO_PMU_STAT_HAVEALP 0x00000008 /* ALP available */ #define SSB_CHIPCO_PMU_STAT_HAVEHT 0x00000004 /* HT available */ #define SSB_CHIPCO_PMU_STAT_RESINIT 0x00000003 /* Res init */ #define SSB_CHIPCO_PMU_RES_STAT 0x060C /* PMU res status */ #define SSB_CHIPCO_PMU_RES_PEND 0x0610 /* PMU res pending */ #define SSB_CHIPCO_PMU_TIMER 0x0614 /* PMU timer */ #define SSB_CHIPCO_PMU_MINRES_MSK 0x0618 /* PMU min res mask */ #define SSB_CHIPCO_PMU_MAXRES_MSK 0x061C /* PMU max res mask */ #define SSB_CHIPCO_PMU_RES_TABSEL 0x0620 /* PMU res table sel */ #define SSB_CHIPCO_PMU_RES_DEPMSK 0x0624 /* PMU res dep mask */ #define SSB_CHIPCO_PMU_RES_UPDNTM 0x0628 /* PMU res updown timer */ #define SSB_CHIPCO_PMU_RES_TIMER 0x062C /* PMU res timer */ #define SSB_CHIPCO_PMU_CLKSTRETCH 0x0630 /* PMU clockstretch */ #define SSB_CHIPCO_PMU_WATCHDOG 0x0634 /* PMU watchdog */ #define SSB_CHIPCO_PMU_RES_REQTS 0x0640 /* PMU res req timer sel */ #define SSB_CHIPCO_PMU_RES_REQT 0x0644 /* PMU res req timer */ #define SSB_CHIPCO_PMU_RES_REQM 0x0648 /* PMU res req mask */ #define SSB_CHIPCO_CHIPCTL_ADDR 0x0650 #define SSB_CHIPCO_CHIPCTL_DATA 0x0654 #define SSB_CHIPCO_REGCTL_ADDR 0x0658 #define SSB_CHIPCO_REGCTL_DATA 0x065C #define SSB_CHIPCO_PLLCTL_ADDR 0x0660 #define SSB_CHIPCO_PLLCTL_DATA 0x0664 /** PMU PLL registers */ /* PMU rev 0 PLL registers */ #define SSB_PMU0_PLLCTL0 0 #define SSB_PMU0_PLLCTL0_PDIV_MSK 0x00000001 #define SSB_PMU0_PLLCTL0_PDIV_FREQ 25000 /* kHz */ #define SSB_PMU0_PLLCTL1 1 #define SSB_PMU0_PLLCTL1_WILD_IMSK 0xF0000000 /* Wild int mask (low nibble) */ #define SSB_PMU0_PLLCTL1_WILD_IMSK_SHIFT 28 #define SSB_PMU0_PLLCTL1_WILD_FMSK 0x0FFFFF00 /* Wild frac mask */ #define SSB_PMU0_PLLCTL1_WILD_FMSK_SHIFT 8 #define SSB_PMU0_PLLCTL1_STOPMOD 0x00000040 /* Stop mod */ #define SSB_PMU0_PLLCTL2 2 #define SSB_PMU0_PLLCTL2_WILD_IMSKHI 0x0000000F /* Wild int mask (high nibble) */ #define SSB_PMU0_PLLCTL2_WILD_IMSKHI_SHIFT 0 /* PMU rev 1 PLL registers */ #define SSB_PMU1_PLLCTL0 0 #define SSB_PMU1_PLLCTL0_P1DIV 0x00F00000 /* P1 div */ #define SSB_PMU1_PLLCTL0_P1DIV_SHIFT 20 #define SSB_PMU1_PLLCTL0_P2DIV 0x0F000000 /* P2 div */ #define SSB_PMU1_PLLCTL0_P2DIV_SHIFT 24 #define SSB_PMU1_PLLCTL1 1 #define SSB_PMU1_PLLCTL1_M1DIV 0x000000FF /* M1 div */ #define SSB_PMU1_PLLCTL1_M1DIV_SHIFT 0 #define SSB_PMU1_PLLCTL1_M2DIV 0x0000FF00 /* M2 div */ #define SSB_PMU1_PLLCTL1_M2DIV_SHIFT 8 #define SSB_PMU1_PLLCTL1_M3DIV 0x00FF0000 /* M3 div */ #define SSB_PMU1_PLLCTL1_M3DIV_SHIFT 16 #define SSB_PMU1_PLLCTL1_M4DIV 0xFF000000 /* M4 div */ #define SSB_PMU1_PLLCTL1_M4DIV_SHIFT 24 #define SSB_PMU1_PLLCTL2 2 #define SSB_PMU1_PLLCTL2_M5DIV 0x000000FF /* M5 div */ #define SSB_PMU1_PLLCTL2_M5DIV_SHIFT 0 #define SSB_PMU1_PLLCTL2_M6DIV 0x0000FF00 /* M6 div */ #define SSB_PMU1_PLLCTL2_M6DIV_SHIFT 8 #define SSB_PMU1_PLLCTL2_NDIVMODE 0x000E0000 /* NDIV mode */ #define SSB_PMU1_PLLCTL2_NDIVMODE_SHIFT 17 #define SSB_PMU1_PLLCTL2_NDIVINT 0x1FF00000 /* NDIV int */ #define SSB_PMU1_PLLCTL2_NDIVINT_SHIFT 20 #define SSB_PMU1_PLLCTL3 3 #define SSB_PMU1_PLLCTL3_NDIVFRAC 0x00FFFFFF /* NDIV frac */ #define SSB_PMU1_PLLCTL3_NDIVFRAC_SHIFT 0 #define SSB_PMU1_PLLCTL4 4 #define SSB_PMU1_PLLCTL5 5 #define SSB_PMU1_PLLCTL5_CLKDRV 0xFFFFFF00 /* clk drv */ #define SSB_PMU1_PLLCTL5_CLKDRV_SHIFT 8 /* BCM4312 PLL resource numbers. */ #define SSB_PMURES_4312_SWITCHER_BURST 0 #define SSB_PMURES_4312_SWITCHER_PWM 1 #define SSB_PMURES_4312_PA_REF_LDO 2 #define SSB_PMURES_4312_CORE_LDO_BURST 3 #define SSB_PMURES_4312_CORE_LDO_PWM 4 #define SSB_PMURES_4312_RADIO_LDO 5 #define SSB_PMURES_4312_ILP_REQUEST 6 #define SSB_PMURES_4312_BG_FILTBYP 7 #define SSB_PMURES_4312_TX_FILTBYP 8 #define SSB_PMURES_4312_RX_FILTBYP 9 #define SSB_PMURES_4312_XTAL_PU 10 #define SSB_PMURES_4312_ALP_AVAIL 11 #define SSB_PMURES_4312_BB_PLL_FILTBYP 12 #define SSB_PMURES_4312_RF_PLL_FILTBYP 13 #define SSB_PMURES_4312_HT_AVAIL 14 /* BCM4325 PLL resource numbers. */ #define SSB_PMURES_4325_BUCK_BOOST_BURST 0 #define SSB_PMURES_4325_CBUCK_BURST 1 #define SSB_PMURES_4325_CBUCK_PWM 2 #define SSB_PMURES_4325_CLDO_CBUCK_BURST 3 #define SSB_PMURES_4325_CLDO_CBUCK_PWM 4 #define SSB_PMURES_4325_BUCK_BOOST_PWM 5 #define SSB_PMURES_4325_ILP_REQUEST 6 #define SSB_PMURES_4325_ABUCK_BURST 7 #define SSB_PMURES_4325_ABUCK_PWM 8 #define SSB_PMURES_4325_LNLDO1_PU 9 #define SSB_PMURES_4325_LNLDO2_PU 10 #define SSB_PMURES_4325_LNLDO3_PU 11 #define SSB_PMURES_4325_LNLDO4_PU 12 #define SSB_PMURES_4325_XTAL_PU 13 #define SSB_PMURES_4325_ALP_AVAIL 14 #define SSB_PMURES_4325_RX_PWRSW_PU 15 #define SSB_PMURES_4325_TX_PWRSW_PU 16 #define SSB_PMURES_4325_RFPLL_PWRSW_PU 17 #define SSB_PMURES_4325_LOGEN_PWRSW_PU 18 #define SSB_PMURES_4325_AFE_PWRSW_PU 19 #define SSB_PMURES_4325_BBPLL_PWRSW_PU 20 #define SSB_PMURES_4325_HT_AVAIL 21 /* BCM4328 PLL resource numbers. */ #define SSB_PMURES_4328_EXT_SWITCHER_PWM 0 #define SSB_PMURES_4328_BB_SWITCHER_PWM 1 #define SSB_PMURES_4328_BB_SWITCHER_BURST 2 #define SSB_PMURES_4328_BB_EXT_SWITCHER_BURST 3 #define SSB_PMURES_4328_ILP_REQUEST 4 #define SSB_PMURES_4328_RADIO_SWITCHER_PWM 5 #define SSB_PMURES_4328_RADIO_SWITCHER_BURST 6 #define SSB_PMURES_4328_ROM_SWITCH 7 #define SSB_PMURES_4328_PA_REF_LDO 8 #define SSB_PMURES_4328_RADIO_LDO 9 #define SSB_PMURES_4328_AFE_LDO 10 #define SSB_PMURES_4328_PLL_LDO 11 #define SSB_PMURES_4328_BG_FILTBYP 12 #define SSB_PMURES_4328_TX_FILTBYP 13 #define SSB_PMURES_4328_RX_FILTBYP 14 #define SSB_PMURES_4328_XTAL_PU 15 #define SSB_PMURES_4328_XTAL_EN 16 #define SSB_PMURES_4328_BB_PLL_FILTBYP 17 #define SSB_PMURES_4328_RF_PLL_FILTBYP 18 #define SSB_PMURES_4328_BB_PLL_PU 19 /* BCM5354 PLL resource numbers. */ #define SSB_PMURES_5354_EXT_SWITCHER_PWM 0 #define SSB_PMURES_5354_BB_SWITCHER_PWM 1 #define SSB_PMURES_5354_BB_SWITCHER_BURST 2 #define SSB_PMURES_5354_BB_EXT_SWITCHER_BURST 3 #define SSB_PMURES_5354_ILP_REQUEST 4 #define SSB_PMURES_5354_RADIO_SWITCHER_PWM 5 #define SSB_PMURES_5354_RADIO_SWITCHER_BURST 6 #define SSB_PMURES_5354_ROM_SWITCH 7 #define SSB_PMURES_5354_PA_REF_LDO 8 #define SSB_PMURES_5354_RADIO_LDO 9 #define SSB_PMURES_5354_AFE_LDO 10 #define SSB_PMURES_5354_PLL_LDO 11 #define SSB_PMURES_5354_BG_FILTBYP 12 #define SSB_PMURES_5354_TX_FILTBYP 13 #define SSB_PMURES_5354_RX_FILTBYP 14 #define SSB_PMURES_5354_XTAL_PU 15 #define SSB_PMURES_5354_XTAL_EN 16 #define SSB_PMURES_5354_BB_PLL_FILTBYP 17 #define SSB_PMURES_5354_RF_PLL_FILTBYP 18 #define SSB_PMURES_5354_BB_PLL_PU 19 /** Chip specific Chip-Status register contents. */ #define SSB_CHIPCO_CHST_4322_SPROM_EXISTS 0x00000040 /* SPROM present */ #define SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL 0x00000003 #define SSB_CHIPCO_CHST_4325_DEFCIS_SEL 0 /* OTP is powered up, use def. CIS, no SPROM */ #define SSB_CHIPCO_CHST_4325_SPROM_SEL 1 /* OTP is powered up, SPROM is present */ #define SSB_CHIPCO_CHST_4325_OTP_SEL 2 /* OTP is powered up, no SPROM */ #define SSB_CHIPCO_CHST_4325_OTP_PWRDN 3 /* OTP is powered down, SPROM is present */ #define SSB_CHIPCO_CHST_4325_SDIO_USB_MODE 0x00000004 #define SSB_CHIPCO_CHST_4325_SDIO_USB_MODE_SHIFT 2 #define SSB_CHIPCO_CHST_4325_RCAL_VALID 0x00000008 #define SSB_CHIPCO_CHST_4325_RCAL_VALID_SHIFT 3 #define SSB_CHIPCO_CHST_4325_RCAL_VALUE 0x000001F0 #define SSB_CHIPCO_CHST_4325_RCAL_VALUE_SHIFT 4 #define SSB_CHIPCO_CHST_4325_PMUTOP_2B 0x00000200 /* 1 for 2b, 0 for to 2a */ /** Macros to determine SPROM presence based on Chip-Status register. */ #define SSB_CHIPCO_CHST_4312_SPROM_PRESENT(status) \ ((status & SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL) != \ SSB_CHIPCO_CHST_4325_OTP_SEL) #define SSB_CHIPCO_CHST_4322_SPROM_PRESENT(status) \ (status & SSB_CHIPCO_CHST_4322_SPROM_EXISTS) #define SSB_CHIPCO_CHST_4325_SPROM_PRESENT(status) \ (((status & SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL) != \ SSB_CHIPCO_CHST_4325_DEFCIS_SEL) && \ ((status & SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL) != \ SSB_CHIPCO_CHST_4325_OTP_SEL)) /** Clockcontrol masks and values **/ /* SSB_CHIPCO_CLOCK_N */ #define SSB_CHIPCO_CLK_N1 0x0000003F /* n1 control */ #define SSB_CHIPCO_CLK_N2 0x00003F00 /* n2 control */ #define SSB_CHIPCO_CLK_N2_SHIFT 8 #define SSB_CHIPCO_CLK_PLLC 0x000F0000 /* pll control */ #define SSB_CHIPCO_CLK_PLLC_SHIFT 16 /* SSB_CHIPCO_CLOCK_SB/PCI/UART */ #define SSB_CHIPCO_CLK_M1 0x0000003F /* m1 control */ #define SSB_CHIPCO_CLK_M2 0x00003F00 /* m2 control */ #define SSB_CHIPCO_CLK_M2_SHIFT 8 #define SSB_CHIPCO_CLK_M3 0x003F0000 /* m3 control */ #define SSB_CHIPCO_CLK_M3_SHIFT 16 #define SSB_CHIPCO_CLK_MC 0x1F000000 /* mux control */ #define SSB_CHIPCO_CLK_MC_SHIFT 24 /* N3M Clock control magic field values */ #define SSB_CHIPCO_CLK_F6_2 0x02 /* A factor of 2 in */ #define SSB_CHIPCO_CLK_F6_3 0x03 /* 6-bit fields like */ #define SSB_CHIPCO_CLK_F6_4 0x05 /* N1, M1 or M3 */ #define SSB_CHIPCO_CLK_F6_5 0x09 #define SSB_CHIPCO_CLK_F6_6 0x11 #define SSB_CHIPCO_CLK_F6_7 0x21 #define SSB_CHIPCO_CLK_F5_BIAS 5 /* 5-bit fields get this added */ #define SSB_CHIPCO_CLK_MC_BYPASS 0x08 #define SSB_CHIPCO_CLK_MC_M1 0x04 #define SSB_CHIPCO_CLK_MC_M1M2 0x02 #define SSB_CHIPCO_CLK_MC_M1M2M3 0x01 #define SSB_CHIPCO_CLK_MC_M1M3 0x11 /* Type 2 Clock control magic field values */ #define SSB_CHIPCO_CLK_T2_BIAS 2 /* n1, n2, m1 & m3 bias */ #define SSB_CHIPCO_CLK_T2M2_BIAS 3 /* m2 bias */ #define SSB_CHIPCO_CLK_T2MC_M1BYP 1 #define SSB_CHIPCO_CLK_T2MC_M2BYP 2 #define SSB_CHIPCO_CLK_T2MC_M3BYP 4 /* Type 6 Clock control magic field values */ #define SSB_CHIPCO_CLK_T6_MMASK 1 /* bits of interest in m */ #define SSB_CHIPCO_CLK_T6_M0 120000000 /* sb clock for m = 0 */ #define SSB_CHIPCO_CLK_T6_M1 100000000 /* sb clock for m = 1 */ #define SSB_CHIPCO_CLK_SB2MIPS_T6(sb) (2 * (sb)) /* Common clock base */ #define SSB_CHIPCO_CLK_BASE1 24000000 /* Half the clock freq */ #define SSB_CHIPCO_CLK_BASE2 12500000 /* Alternate crystal on some PLL's */ /* Clock control values for 200Mhz in 5350 */ #define SSB_CHIPCO_CLK_5350_N 0x0311 #define SSB_CHIPCO_CLK_5350_M 0x04020009 /** Bits in the config registers **/ #define SSB_CHIPCO_CFG_EN 0x0001 /* Enable */ #define SSB_CHIPCO_CFG_EXTM 0x000E /* Extif Mode */ #define SSB_CHIPCO_CFG_EXTM_ASYNC 0x0002 /* Async/Parallel flash */ #define SSB_CHIPCO_CFG_EXTM_SYNC 0x0004 /* Synchronous */ #define SSB_CHIPCO_CFG_EXTM_PCMCIA 0x0008 /* PCMCIA */ #define SSB_CHIPCO_CFG_EXTM_IDE 0x000A /* IDE */ #define SSB_CHIPCO_CFG_DS16 0x0010 /* Data size, 0=8bit, 1=16bit */ #define SSB_CHIPCO_CFG_CLKDIV 0x0060 /* Sync: Clock divisor */ #define SSB_CHIPCO_CFG_CLKEN 0x0080 /* Sync: Clock enable */ #define SSB_CHIPCO_CFG_BSTRO 0x0100 /* Sync: Size/Bytestrobe */ /** Flash-specific control/status values */ /* flashcontrol opcodes for ST flashes */ #define SSB_CHIPCO_FLASHCTL_ST_WREN 0x0006 /* Write Enable */ #define SSB_CHIPCO_FLASHCTL_ST_WRDIS 0x0004 /* Write Disable */ #define SSB_CHIPCO_FLASHCTL_ST_RDSR 0x0105 /* Read Status Register */ #define SSB_CHIPCO_FLASHCTL_ST_WRSR 0x0101 /* Write Status Register */ #define SSB_CHIPCO_FLASHCTL_ST_READ 0x0303 /* Read Data Bytes */ #define SSB_CHIPCO_FLASHCTL_ST_PP 0x0302 /* Page Program */ #define SSB_CHIPCO_FLASHCTL_ST_SE 0x02D8 /* Sector Erase */ #define SSB_CHIPCO_FLASHCTL_ST_BE 0x00C7 /* Bulk Erase */ #define SSB_CHIPCO_FLASHCTL_ST_DP 0x00B9 /* Deep Power-down */ #define SSB_CHIPCO_FLASHCTL_ST_RES 0x03AB /* Read Electronic Signature */ #define SSB_CHIPCO_FLASHCTL_ST_CSA 0x1000 /* Keep chip select asserted */ #define SSB_CHIPCO_FLASHCTL_ST_SSE 0x0220 /* Sub-sector Erase */ /* Status register bits for ST flashes */ #define SSB_CHIPCO_FLASHSTA_ST_WIP 0x01 /* Write In Progress */ #define SSB_CHIPCO_FLASHSTA_ST_WEL 0x02 /* Write Enable Latch */ #define SSB_CHIPCO_FLASHSTA_ST_BP 0x1C /* Block Protect */ #define SSB_CHIPCO_FLASHSTA_ST_BP_SHIFT 2 #define SSB_CHIPCO_FLASHSTA_ST_SRWD 0x80 /* Status Register Write Disable */ /* flashcontrol opcodes for Atmel flashes */ #define SSB_CHIPCO_FLASHCTL_AT_READ 0x07E8 #define SSB_CHIPCO_FLASHCTL_AT_PAGE_READ 0x07D2 #define SSB_CHIPCO_FLASHCTL_AT_BUF1_READ /* FIXME */ #define SSB_CHIPCO_FLASHCTL_AT_BUF2_READ /* FIXME */ #define SSB_CHIPCO_FLASHCTL_AT_STATUS 0x01D7 #define SSB_CHIPCO_FLASHCTL_AT_BUF1_WRITE 0x0384 #define SSB_CHIPCO_FLASHCTL_AT_BUF2_WRITE 0x0387 #define SSB_CHIPCO_FLASHCTL_AT_BUF1_ERASE_PRGM 0x0283 /* Erase program */ #define SSB_CHIPCO_FLASHCTL_AT_BUF2_ERASE_PRGM 0x0286 /* Erase program */ #define SSB_CHIPCO_FLASHCTL_AT_BUF1_PROGRAM 0x0288 #define SSB_CHIPCO_FLASHCTL_AT_BUF2_PROGRAM 0x0289 #define SSB_CHIPCO_FLASHCTL_AT_PAGE_ERASE 0x0281 #define SSB_CHIPCO_FLASHCTL_AT_BLOCK_ERASE 0x0250 #define SSB_CHIPCO_FLASHCTL_AT_BUF1_WRER_PRGM 0x0382 /* Write erase program */ #define SSB_CHIPCO_FLASHCTL_AT_BUF2_WRER_PRGM 0x0385 /* Write erase program */ #define SSB_CHIPCO_FLASHCTL_AT_BUF1_LOAD 0x0253 #define SSB_CHIPCO_FLASHCTL_AT_BUF2_LOAD 0x0255 #define SSB_CHIPCO_FLASHCTL_AT_BUF1_COMPARE 0x0260 #define SSB_CHIPCO_FLASHCTL_AT_BUF2_COMPARE 0x0261 #define SSB_CHIPCO_FLASHCTL_AT_BUF1_REPROGRAM 0x0258 #define SSB_CHIPCO_FLASHCTL_AT_BUF2_REPROGRAM 0x0259 /* Status register bits for Atmel flashes */ #define SSB_CHIPCO_FLASHSTA_AT_READY 0x80 #define SSB_CHIPCO_FLASHSTA_AT_MISMATCH 0x40 #define SSB_CHIPCO_FLASHSTA_AT_ID 0x38 #define SSB_CHIPCO_FLASHSTA_AT_ID_SHIFT 3 /** OTP **/ /* OTP regions */ #define SSB_CHIPCO_OTP_HW_REGION SSB_CHIPCO_OTPS_HW_PROTECT #define SSB_CHIPCO_OTP_SW_REGION SSB_CHIPCO_OTPS_SW_PROTECT #define SSB_CHIPCO_OTP_CID_REGION SSB_CHIPCO_OTPS_CID_PROTECT /* OTP regions (Byte offsets from otp size) */ #define SSB_CHIPCO_OTP_SWLIM_OFF (-8) #define SSB_CHIPCO_OTP_CIDBASE_OFF 0 #define SSB_CHIPCO_OTP_CIDLIM_OFF 8 /* Predefined OTP words (Word offset from otp size) */ #define SSB_CHIPCO_OTP_BOUNDARY_OFF (-4) #define SSB_CHIPCO_OTP_HWSIGN_OFF (-3) #define SSB_CHIPCO_OTP_SWSIGN_OFF (-2) #define SSB_CHIPCO_OTP_CIDSIGN_OFF (-1) #define SSB_CHIPCO_OTP_CID_OFF 0 #define SSB_CHIPCO_OTP_PKG_OFF 1 #define SSB_CHIPCO_OTP_FID_OFF 2 #define SSB_CHIPCO_OTP_RSV_OFF 3 #define SSB_CHIPCO_OTP_LIM_OFF 4 #define SSB_CHIPCO_OTP_SIGNATURE 0x578A #define SSB_CHIPCO_OTP_MAGIC 0x4E56 struct ssb_device; struct ssb_serial_port; /* Data for the PMU, if available. * Check availability with ((struct ssb_chipcommon)->capabilities & SSB_CHIPCO_CAP_PMU) */ struct ssb_chipcommon_pmu { u8 rev; /* PMU revision */ u32 crystalfreq; /* The active crystal frequency (in kHz) */ }; struct ssb_chipcommon { struct ssb_device *dev; u32 capabilities; u32 status; /* Fast Powerup Delay constant */ u16 fast_pwrup_delay; spinlock_t gpio_lock; struct ssb_chipcommon_pmu pmu; u32 ticks_per_ms; u32 max_timer_ms; }; static inline bool ssb_chipco_available(struct ssb_chipcommon *cc) { return (cc->dev != NULL); } /* Register access */ #define chipco_read32(cc, offset) ssb_read32((cc)->dev, offset) #define chipco_write32(cc, offset, val) ssb_write32((cc)->dev, offset, val) #define chipco_mask32(cc, offset, mask) \ chipco_write32(cc, offset, chipco_read32(cc, offset) & (mask)) #define chipco_set32(cc, offset, set) \ chipco_write32(cc, offset, chipco_read32(cc, offset) | (set)) #define chipco_maskset32(cc, offset, mask, set) \ chipco_write32(cc, offset, (chipco_read32(cc, offset) & (mask)) | (set)) extern void ssb_chipcommon_init(struct ssb_chipcommon *cc); extern void ssb_chipco_suspend(struct ssb_chipcommon *cc); extern void ssb_chipco_resume(struct ssb_chipcommon *cc); extern void ssb_chipco_get_clockcpu(struct ssb_chipcommon *cc, u32 *plltype, u32 *n, u32 *m); extern void ssb_chipco_get_clockcontrol(struct ssb_chipcommon *cc, u32 *plltype, u32 *n, u32 *m); extern void ssb_chipco_timing_init(struct ssb_chipcommon *cc, unsigned long ns_per_cycle); enum ssb_clkmode { SSB_CLKMODE_SLOW, SSB_CLKMODE_FAST, SSB_CLKMODE_DYNAMIC, }; extern void ssb_chipco_set_clockmode(struct ssb_chipcommon *cc, enum ssb_clkmode mode); extern u32 ssb_chipco_watchdog_timer_set(struct ssb_chipcommon *cc, u32 ticks); void ssb_chipco_irq_mask(struct ssb_chipcommon *cc, u32 mask, u32 value); u32 ssb_chipco_irq_status(struct ssb_chipcommon *cc, u32 mask); /* Chipcommon GPIO pin access. */ u32 ssb_chipco_gpio_in(struct ssb_chipcommon *cc, u32 mask); u32 ssb_chipco_gpio_out(struct ssb_chipcommon *cc, u32 mask, u32 value); u32 ssb_chipco_gpio_outen(struct ssb_chipcommon *cc, u32 mask, u32 value); u32 ssb_chipco_gpio_control(struct ssb_chipcommon *cc, u32 mask, u32 value); u32 ssb_chipco_gpio_intmask(struct ssb_chipcommon *cc, u32 mask, u32 value); u32 ssb_chipco_gpio_polarity(struct ssb_chipcommon *cc, u32 mask, u32 value); u32 ssb_chipco_gpio_pullup(struct ssb_chipcommon *cc, u32 mask, u32 value); u32 ssb_chipco_gpio_pulldown(struct ssb_chipcommon *cc, u32 mask, u32 value); #ifdef CONFIG_SSB_SERIAL extern int ssb_chipco_serial_init(struct ssb_chipcommon *cc, struct ssb_serial_port *ports); #endif /* CONFIG_SSB_SERIAL */ /* PMU support */ extern void ssb_pmu_init(struct ssb_chipcommon *cc); enum ssb_pmu_ldo_volt_id { LDO_PAREF = 0, LDO_VOLT1, LDO_VOLT2, LDO_VOLT3, }; void ssb_pmu_set_ldo_voltage(struct ssb_chipcommon *cc, enum ssb_pmu_ldo_volt_id id, u32 voltage); void ssb_pmu_set_ldo_paref(struct ssb_chipcommon *cc, bool on); void ssb_pmu_spuravoid_pllupdate(struct ssb_chipcommon *cc, int spuravoid); #endif /* LINUX_SSB_CHIPCO_H_ */ sw842.h 0000644 00000000510 14722070374 0005606 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __SW842_H__ #define __SW842_H__ #define SW842_MEM_COMPRESS (0xf000) int sw842_compress(const u8 *src, unsigned int srclen, u8 *dst, unsigned int *destlen, void *wmem); int sw842_decompress(const u8 *src, unsigned int srclen, u8 *dst, unsigned int *destlen); #endif hrtimer_defs.h 0000644 00000001224 14722070374 0007375 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_HRTIMER_DEFS_H #define _LINUX_HRTIMER_DEFS_H #include <linux/ktime.h> #ifdef CONFIG_HIGH_RES_TIMERS /* * The resolution of the clocks. The resolution value is returned in * the clock_getres() system call to give application programmers an * idea of the (in)accuracy of timers. Timer values are rounded up to * this resolution values. */ # define HIGH_RES_NSEC 1 # define KTIME_HIGH_RES (HIGH_RES_NSEC) # define MONOTONIC_RES_NSEC HIGH_RES_NSEC # define KTIME_MONOTONIC_RES KTIME_HIGH_RES #else # define MONOTONIC_RES_NSEC LOW_RES_NSEC # define KTIME_MONOTONIC_RES KTIME_LOW_RES #endif #endif elf.h 0000644 00000003035 14722070374 0005472 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_ELF_H #define _LINUX_ELF_H #include <asm/elf.h> #include <uapi/linux/elf.h> #ifndef elf_read_implies_exec /* Executables for which elf_read_implies_exec() returns TRUE will have the READ_IMPLIES_EXEC personality flag set automatically. Override in asm/elf.h as needed. */ # define elf_read_implies_exec(ex, have_pt_gnu_stack) 0 #endif #ifndef SET_PERSONALITY #define SET_PERSONALITY(ex) \ set_personality(PER_LINUX | (current->personality & (~PER_MASK))) #endif #ifndef SET_PERSONALITY2 #define SET_PERSONALITY2(ex, state) \ SET_PERSONALITY(ex) #endif #if ELF_CLASS == ELFCLASS32 extern Elf32_Dyn _DYNAMIC []; #define elfhdr elf32_hdr #define elf_phdr elf32_phdr #define elf_shdr elf32_shdr #define elf_note elf32_note #define elf_addr_t Elf32_Off #define Elf_Half Elf32_Half #define Elf_Word Elf32_Word #else extern Elf64_Dyn _DYNAMIC []; #define elfhdr elf64_hdr #define elf_phdr elf64_phdr #define elf_shdr elf64_shdr #define elf_note elf64_note #define elf_addr_t Elf64_Off #define Elf_Half Elf64_Half #define Elf_Word Elf64_Word #endif /* Optional callbacks to write extra ELF notes. */ struct file; struct coredump_params; #ifndef ARCH_HAVE_EXTRA_ELF_NOTES static inline int elf_coredump_extra_notes_size(void) { return 0; } static inline int elf_coredump_extra_notes_write(struct coredump_params *cprm) { return 0; } #else extern int elf_coredump_extra_notes_size(void); extern int elf_coredump_extra_notes_write(struct coredump_params *cprm); #endif #endif /* _LINUX_ELF_H */ lcm.h 0000644 00000000423 14722070374 0005475 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LCM_H #define _LCM_H #include <linux/compiler.h> unsigned long lcm(unsigned long a, unsigned long b) __attribute_const__; unsigned long lcm_not_zero(unsigned long a, unsigned long b) __attribute_const__; #endif /* _LCM_H */ via-core.h 0000644 00000015153 14722070374 0006435 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright 1998-2009 VIA Technologies, Inc. All Rights Reserved. * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved. * Copyright 2009-2010 Jonathan Corbet <corbet@lwn.net> * Copyright 2010 Florian Tobias Schandinat <FlorianSchandinat@gmx.de> */ #ifndef __VIA_CORE_H__ #define __VIA_CORE_H__ #include <linux/types.h> #include <linux/io.h> #include <linux/spinlock.h> #include <linux/pci.h> /* * A description of each known serial I2C/GPIO port. */ enum via_port_type { VIA_PORT_NONE = 0, VIA_PORT_I2C, VIA_PORT_GPIO, }; enum via_port_mode { VIA_MODE_OFF = 0, VIA_MODE_I2C, /* Used as I2C port */ VIA_MODE_GPIO, /* Two GPIO ports */ }; enum viafb_i2c_adap { VIA_PORT_26 = 0, VIA_PORT_31, VIA_PORT_25, VIA_PORT_2C, VIA_PORT_3D, }; #define VIAFB_NUM_PORTS 5 struct via_port_cfg { enum via_port_type type; enum via_port_mode mode; u16 io_port; u8 ioport_index; }; /* * Allow subdevs to register suspend/resume hooks. */ #ifdef CONFIG_PM struct viafb_pm_hooks { struct list_head list; int (*suspend)(void *private); int (*resume)(void *private); void *private; }; void viafb_pm_register(struct viafb_pm_hooks *hooks); void viafb_pm_unregister(struct viafb_pm_hooks *hooks); #endif /* CONFIG_PM */ /* * This is the global viafb "device" containing stuff needed by * all subdevs. */ struct viafb_dev { struct pci_dev *pdev; int chip_type; struct via_port_cfg *port_cfg; /* * Spinlock for access to device registers. Not yet * globally used. */ spinlock_t reg_lock; /* * The framebuffer MMIO region. Little, if anything, touches * this memory directly, and certainly nothing outside of the * framebuffer device itself. We *do* have to be able to allocate * chunks of this memory for other devices, though. */ unsigned long fbmem_start; long fbmem_len; void __iomem *fbmem; #if defined(CONFIG_VIDEO_VIA_CAMERA) || defined(CONFIG_VIDEO_VIA_CAMERA_MODULE) long camera_fbmem_offset; long camera_fbmem_size; #endif /* * The MMIO region for device registers. */ unsigned long engine_start; unsigned long engine_len; void __iomem *engine_mmio; }; /* * Interrupt management. */ void viafb_irq_enable(u32 mask); void viafb_irq_disable(u32 mask); /* * The global interrupt control register and its bits. */ #define VDE_INTERRUPT 0x200 /* Video interrupt flags/masks */ #define VDE_I_DVISENSE 0x00000001 /* DVI sense int status */ #define VDE_I_VBLANK 0x00000002 /* Vertical blank status */ #define VDE_I_MCCFI 0x00000004 /* MCE compl. frame int status */ #define VDE_I_VSYNC 0x00000008 /* VGA VSYNC int status */ #define VDE_I_DMA0DDONE 0x00000010 /* DMA 0 descr done */ #define VDE_I_DMA0TDONE 0x00000020 /* DMA 0 transfer done */ #define VDE_I_DMA1DDONE 0x00000040 /* DMA 1 descr done */ #define VDE_I_DMA1TDONE 0x00000080 /* DMA 1 transfer done */ #define VDE_I_C1AV 0x00000100 /* Cap Eng 1 act vid end */ #define VDE_I_HQV0 0x00000200 /* First HQV engine */ #define VDE_I_HQV1 0x00000400 /* Second HQV engine */ #define VDE_I_HQV1EN 0x00000800 /* Second HQV engine enable */ #define VDE_I_C0AV 0x00001000 /* Cap Eng 0 act vid end */ #define VDE_I_C0VBI 0x00002000 /* Cap Eng 0 VBI end */ #define VDE_I_C1VBI 0x00004000 /* Cap Eng 1 VBI end */ #define VDE_I_VSYNC2 0x00008000 /* Sec. Disp. VSYNC */ #define VDE_I_DVISNSEN 0x00010000 /* DVI sense enable */ #define VDE_I_VSYNC2EN 0x00020000 /* Sec Disp VSYNC enable */ #define VDE_I_MCCFIEN 0x00040000 /* MC comp frame int mask enable */ #define VDE_I_VSYNCEN 0x00080000 /* VSYNC enable */ #define VDE_I_DMA0DDEN 0x00100000 /* DMA 0 descr done enable */ #define VDE_I_DMA0TDEN 0x00200000 /* DMA 0 trans done enable */ #define VDE_I_DMA1DDEN 0x00400000 /* DMA 1 descr done enable */ #define VDE_I_DMA1TDEN 0x00800000 /* DMA 1 trans done enable */ #define VDE_I_C1AVEN 0x01000000 /* cap 1 act vid end enable */ #define VDE_I_HQV0EN 0x02000000 /* First hqv engine enable */ #define VDE_I_C1VBIEN 0x04000000 /* Cap 1 VBI end enable */ #define VDE_I_LVDSSI 0x08000000 /* LVDS sense interrupt */ #define VDE_I_C0AVEN 0x10000000 /* Cap 0 act vid end enable */ #define VDE_I_C0VBIEN 0x20000000 /* Cap 0 VBI end enable */ #define VDE_I_LVDSSIEN 0x40000000 /* LVDS Sense enable */ #define VDE_I_ENABLE 0x80000000 /* Global interrupt enable */ #if defined(CONFIG_VIDEO_VIA_CAMERA) || defined(CONFIG_VIDEO_VIA_CAMERA_MODULE) /* * DMA management. */ int viafb_request_dma(void); void viafb_release_dma(void); /* void viafb_dma_copy_out(unsigned int offset, dma_addr_t paddr, int len); */ int viafb_dma_copy_out_sg(unsigned int offset, struct scatterlist *sg, int nsg); /* * DMA Controller registers. */ #define VDMA_MR0 0xe00 /* Mod reg 0 */ #define VDMA_MR_CHAIN 0x01 /* Chaining mode */ #define VDMA_MR_TDIE 0x02 /* Transfer done int enable */ #define VDMA_CSR0 0xe04 /* Control/status */ #define VDMA_C_ENABLE 0x01 /* DMA Enable */ #define VDMA_C_START 0x02 /* Start a transfer */ #define VDMA_C_ABORT 0x04 /* Abort a transfer */ #define VDMA_C_DONE 0x08 /* Transfer is done */ #define VDMA_MARL0 0xe20 /* Mem addr low */ #define VDMA_MARH0 0xe24 /* Mem addr high */ #define VDMA_DAR0 0xe28 /* Device address */ #define VDMA_DQWCR0 0xe2c /* Count (16-byte) */ #define VDMA_TMR0 0xe30 /* Tile mode reg */ #define VDMA_DPRL0 0xe34 /* Not sure */ #define VDMA_DPR_IN 0x08 /* Inbound transfer to FB */ #define VDMA_DPRH0 0xe38 #define VDMA_PMR0 (0xe00 + 0x134) /* Pitch mode */ /* * Useful stuff that probably belongs somewhere global. */ #define VGA_WIDTH 640 #define VGA_HEIGHT 480 #endif /* CONFIG_VIDEO_VIA_CAMERA */ /* * Indexed port operations. Note that these are all multi-op * functions; every invocation will be racy if you're not holding * reg_lock. */ #define VIAStatus 0x3DA /* Non-indexed port */ #define VIACR 0x3D4 #define VIASR 0x3C4 #define VIAGR 0x3CE #define VIAAR 0x3C0 static inline u8 via_read_reg(u16 port, u8 index) { outb(index, port); return inb(port + 1); } static inline void via_write_reg(u16 port, u8 index, u8 data) { outb(index, port); outb(data, port + 1); } static inline void via_write_reg_mask(u16 port, u8 index, u8 data, u8 mask) { u8 old; outb(index, port); old = inb(port + 1); outb((data & mask) | (old & ~mask), port + 1); } #define VIA_MISC_REG_READ 0x03CC #define VIA_MISC_REG_WRITE 0x03C2 static inline void via_write_misc_reg_mask(u8 data, u8 mask) { u8 old = inb(VIA_MISC_REG_READ); outb((data & mask) | (old & ~mask), VIA_MISC_REG_WRITE); } #endif /* __VIA_CORE_H__ */ phy/ulpi_phy.h 0000644 00000001417 14722070374 0007357 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/phy/phy.h> /** * Helper that registers PHY for a ULPI device and adds a lookup for binding it * and it's controller, which is always the parent. */ static inline struct phy *ulpi_phy_create(struct ulpi *ulpi, const struct phy_ops *ops) { struct phy *phy; int ret; phy = phy_create(&ulpi->dev, NULL, ops); if (IS_ERR(phy)) return phy; ret = phy_create_lookup(phy, "usb2-phy", dev_name(ulpi->dev.parent)); if (ret) { phy_destroy(phy); return ERR_PTR(ret); } return phy; } /* Remove a PHY that was created with ulpi_phy_create() and it's lookup. */ static inline void ulpi_phy_destroy(struct ulpi *ulpi, struct phy *phy) { phy_remove_lookup(phy, "usb2-phy", dev_name(ulpi->dev.parent)); phy_destroy(phy); } phy/tegra/xusb.h 0000644 00000001273 14722070374 0007611 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. */ #ifndef PHY_TEGRA_XUSB_H #define PHY_TEGRA_XUSB_H struct tegra_xusb_padctl; struct device; struct tegra_xusb_padctl *tegra_xusb_padctl_get(struct device *dev); void tegra_xusb_padctl_put(struct tegra_xusb_padctl *padctl); int tegra_xusb_padctl_usb3_save_context(struct tegra_xusb_padctl *padctl, unsigned int port); int tegra_xusb_padctl_hsic_set_idle(struct tegra_xusb_padctl *padctl, unsigned int port, bool idle); int tegra_xusb_padctl_usb3_set_lfps_detect(struct tegra_xusb_padctl *padctl, unsigned int port, bool enable); #endif /* PHY_TEGRA_XUSB_H */ phy/omap_usb.h 0000644 00000004104 14722070374 0007327 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * omap_usb.h -- omap usb2 phy header file * * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com * Author: Kishon Vijay Abraham I <kishon@ti.com> */ #ifndef __DRIVERS_OMAP_USB2_H #define __DRIVERS_OMAP_USB2_H #include <linux/io.h> #include <linux/usb/otg.h> struct usb_dpll_params { u16 m; u8 n; u8 freq:3; u8 sd; u32 mf; }; enum omap_usb_phy_type { TYPE_USB2, /* USB2_PHY, power down in CONTROL_DEV_CONF */ TYPE_DRA7USB2, /* USB2 PHY, power and power_aux e.g. DRA7 */ TYPE_AM437USB2, /* USB2 PHY, power e.g. AM437x */ }; struct omap_usb { struct usb_phy phy; struct phy_companion *comparator; void __iomem *pll_ctrl_base; void __iomem *phy_base; struct device *dev; struct device *control_dev; struct clk *wkupclk; struct clk *optclk; u8 flags; enum omap_usb_phy_type type; struct regmap *syscon_phy_power; /* ctrl. reg. acces */ unsigned int power_reg; /* power reg. index within syscon */ u32 mask; u32 power_on; u32 power_off; }; struct usb_phy_data { const char *label; u8 flags; u32 mask; u32 power_on; u32 power_off; }; /* Driver Flags */ #define OMAP_USB2_HAS_START_SRP (1 << 0) #define OMAP_USB2_HAS_SET_VBUS (1 << 1) #define OMAP_USB2_CALIBRATE_FALSE_DISCONNECT (1 << 2) #define OMAP_DEV_PHY_PD BIT(0) #define OMAP_USB2_PHY_PD BIT(28) #define AM437X_USB2_PHY_PD BIT(0) #define AM437X_USB2_OTG_PD BIT(1) #define AM437X_USB2_OTGVDET_EN BIT(19) #define AM437X_USB2_OTGSESSEND_EN BIT(20) #define phy_to_omapusb(x) container_of((x), struct omap_usb, phy) #if defined(CONFIG_OMAP_USB2) || defined(CONFIG_OMAP_USB2_MODULE) extern int omap_usb2_set_comparator(struct phy_companion *comparator); #else static inline int omap_usb2_set_comparator(struct phy_companion *comparator) { return -ENODEV; } #endif static inline u32 omap_usb_readl(void __iomem *addr, unsigned offset) { return __raw_readl(addr + offset); } static inline void omap_usb_writel(void __iomem *addr, unsigned offset, u32 data) { __raw_writel(data, addr + offset); } #endif /* __DRIVERS_OMAP_USB_H */ phy/omap_control_phy.h 0000644 00000004631 14722070374 0011103 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * omap_control_phy.h - Header file for the PHY part of control module. * * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com * Author: Kishon Vijay Abraham I <kishon@ti.com> */ #ifndef __OMAP_CONTROL_PHY_H__ #define __OMAP_CONTROL_PHY_H__ enum omap_control_phy_type { OMAP_CTRL_TYPE_OTGHS = 1, /* Mailbox OTGHS_CONTROL */ OMAP_CTRL_TYPE_USB2, /* USB2_PHY, power down in CONTROL_DEV_CONF */ OMAP_CTRL_TYPE_PIPE3, /* PIPE3 PHY, DPLL & seperate Rx/Tx power */ OMAP_CTRL_TYPE_PCIE, /* RX TX control of ACSPCIE */ OMAP_CTRL_TYPE_DRA7USB2, /* USB2 PHY, power and power_aux e.g. DRA7 */ OMAP_CTRL_TYPE_AM437USB2, /* USB2 PHY, power e.g. AM437x */ }; struct omap_control_phy { struct device *dev; u32 __iomem *otghs_control; u32 __iomem *power; u32 __iomem *power_aux; u32 __iomem *pcie_pcs; struct clk *sys_clk; enum omap_control_phy_type type; }; enum omap_control_usb_mode { USB_MODE_UNDEFINED = 0, USB_MODE_HOST, USB_MODE_DEVICE, USB_MODE_DISCONNECT, }; #define OMAP_CTRL_DEV_PHY_PD BIT(0) #define OMAP_CTRL_DEV_AVALID BIT(0) #define OMAP_CTRL_DEV_BVALID BIT(1) #define OMAP_CTRL_DEV_VBUSVALID BIT(2) #define OMAP_CTRL_DEV_SESSEND BIT(3) #define OMAP_CTRL_DEV_IDDIG BIT(4) #define OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_CMD_MASK 0x003FC000 #define OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_CMD_SHIFT 0xE #define OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_FREQ_MASK 0xFFC00000 #define OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_FREQ_SHIFT 0x16 #define OMAP_CTRL_PIPE3_PHY_TX_RX_POWERON 0x3 #define OMAP_CTRL_PIPE3_PHY_TX_RX_POWEROFF 0x0 #define OMAP_CTRL_PCIE_PCS_MASK 0xff #define OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT 16 #define OMAP_CTRL_USB2_PHY_PD BIT(28) #define AM437X_CTRL_USB2_PHY_PD BIT(0) #define AM437X_CTRL_USB2_OTG_PD BIT(1) #define AM437X_CTRL_USB2_OTGVDET_EN BIT(19) #define AM437X_CTRL_USB2_OTGSESSEND_EN BIT(20) #if IS_ENABLED(CONFIG_OMAP_CONTROL_PHY) void omap_control_phy_power(struct device *dev, int on); void omap_control_usb_set_mode(struct device *dev, enum omap_control_usb_mode mode); void omap_control_pcie_pcs(struct device *dev, u8 delay); #else static inline void omap_control_phy_power(struct device *dev, int on) { } static inline void omap_control_usb_set_mode(struct device *dev, enum omap_control_usb_mode mode) { } static inline void omap_control_pcie_pcs(struct device *dev, u8 delay) { } #endif #endif /* __OMAP_CONTROL_PHY_H__ */ phy/phy-mipi-dphy.h 0000644 00000014355 14722070374 0010231 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2018 Cadence Design Systems Inc. */ #ifndef __PHY_MIPI_DPHY_H_ #define __PHY_MIPI_DPHY_H_ /** * struct phy_configure_opts_mipi_dphy - MIPI D-PHY configuration set * * This structure is used to represent the configuration state of a * MIPI D-PHY phy. */ struct phy_configure_opts_mipi_dphy { /** * @clk_miss: * * Timeout, in picoseconds, for receiver to detect absence of * Clock transitions and disable the Clock Lane HS-RX. * * Maximum value: 60000 ps */ unsigned int clk_miss; /** * @clk_post: * * Time, in picoseconds, that the transmitter continues to * send HS clock after the last associated Data Lane has * transitioned to LP Mode. Interval is defined as the period * from the end of @hs_trail to the beginning of @clk_trail. * * Minimum value: 60000 ps + 52 * @hs_clk_rate period in ps */ unsigned int clk_post; /** * @clk_pre: * * Time, in UI, that the HS clock shall be driven by * the transmitter prior to any associated Data Lane beginning * the transition from LP to HS mode. * * Minimum value: 8 UI */ unsigned int clk_pre; /** * @clk_prepare: * * Time, in picoseconds, that the transmitter drives the Clock * Lane LP-00 Line state immediately before the HS-0 Line * state starting the HS transmission. * * Minimum value: 38000 ps * Maximum value: 95000 ps */ unsigned int clk_prepare; /** * @clk_settle: * * Time interval, in picoseconds, during which the HS receiver * should ignore any Clock Lane HS transitions, starting from * the beginning of @clk_prepare. * * Minimum value: 95000 ps * Maximum value: 300000 ps */ unsigned int clk_settle; /** * @clk_term_en: * * Time, in picoseconds, for the Clock Lane receiver to enable * the HS line termination. * * Maximum value: 38000 ps */ unsigned int clk_term_en; /** * @clk_trail: * * Time, in picoseconds, that the transmitter drives the HS-0 * state after the last payload clock bit of a HS transmission * burst. * * Minimum value: 60000 ps */ unsigned int clk_trail; /** * @clk_zero: * * Time, in picoseconds, that the transmitter drives the HS-0 * state prior to starting the Clock. */ unsigned int clk_zero; /** * @d_term_en: * * Time, in picoseconds, for the Data Lane receiver to enable * the HS line termination. * * Maximum value: 35000 ps + 4 * @hs_clk_rate period in ps */ unsigned int d_term_en; /** * @eot: * * Transmitted time interval, in picoseconds, from the start * of @hs_trail or @clk_trail, to the start of the LP- 11 * state following a HS burst. * * Maximum value: 105000 ps + 12 * @hs_clk_rate period in ps */ unsigned int eot; /** * @hs_exit: * * Time, in picoseconds, that the transmitter drives LP-11 * following a HS burst. * * Minimum value: 100000 ps */ unsigned int hs_exit; /** * @hs_prepare: * * Time, in picoseconds, that the transmitter drives the Data * Lane LP-00 Line state immediately before the HS-0 Line * state starting the HS transmission. * * Minimum value: 40000 ps + 4 * @hs_clk_rate period in ps * Maximum value: 85000 ps + 6 * @hs_clk_rate period in ps */ unsigned int hs_prepare; /** * @hs_settle: * * Time interval, in picoseconds, during which the HS receiver * shall ignore any Data Lane HS transitions, starting from * the beginning of @hs_prepare. * * Minimum value: 85000 ps + 6 * @hs_clk_rate period in ps * Maximum value: 145000 ps + 10 * @hs_clk_rate period in ps */ unsigned int hs_settle; /** * @hs_skip: * * Time interval, in picoseconds, during which the HS-RX * should ignore any transitions on the Data Lane, following a * HS burst. The end point of the interval is defined as the * beginning of the LP-11 state following the HS burst. * * Minimum value: 40000 ps * Maximum value: 55000 ps + 4 * @hs_clk_rate period in ps */ unsigned int hs_skip; /** * @hs_trail: * * Time, in picoseconds, that the transmitter drives the * flipped differential state after last payload data bit of a * HS transmission burst * * Minimum value: max(8 * @hs_clk_rate period in ps, * 60000 ps + 4 * @hs_clk_rate period in ps) */ unsigned int hs_trail; /** * @hs_zero: * * Time, in picoseconds, that the transmitter drives the HS-0 * state prior to transmitting the Sync sequence. */ unsigned int hs_zero; /** * @init: * * Time, in microseconds for the initialization period to * complete. * * Minimum value: 100 us */ unsigned int init; /** * @lpx: * * Transmitted length, in picoseconds, of any Low-Power state * period. * * Minimum value: 50000 ps */ unsigned int lpx; /** * @ta_get: * * Time, in picoseconds, that the new transmitter drives the * Bridge state (LP-00) after accepting control during a Link * Turnaround. * * Value: 5 * @lpx */ unsigned int ta_get; /** * @ta_go: * * Time, in picoseconds, that the transmitter drives the * Bridge state (LP-00) before releasing control during a Link * Turnaround. * * Value: 4 * @lpx */ unsigned int ta_go; /** * @ta_sure: * * Time, in picoseconds, that the new transmitter waits after * the LP-10 state before transmitting the Bridge state * (LP-00) during a Link Turnaround. * * Minimum value: @lpx * Maximum value: 2 * @lpx */ unsigned int ta_sure; /** * @wakeup: * * Time, in microseconds, that a transmitter drives a Mark-1 * state prior to a Stop state in order to initiate an exit * from ULPS. * * Minimum value: 1000 us */ unsigned int wakeup; /** * @hs_clk_rate: * * Clock rate, in Hertz, of the high-speed clock. */ unsigned long hs_clk_rate; /** * @lp_clk_rate: * * Clock rate, in Hertz, of the low-power clock. */ unsigned long lp_clk_rate; /** * @lanes: * * Number of active, consecutive, data lanes, starting from * lane 0, used for the transmissions. */ unsigned char lanes; }; int phy_mipi_dphy_get_default_config(unsigned long pixel_clock, unsigned int bpp, unsigned int lanes, struct phy_configure_opts_mipi_dphy *cfg); int phy_mipi_dphy_config_validate(struct phy_configure_opts_mipi_dphy *cfg); #endif /* __PHY_MIPI_DPHY_H_ */ phy/phy-sun4i-usb.h 0000644 00000000655 14722070374 0010160 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2015 Hans de Goede <hdegoede@redhat.com> */ #ifndef PHY_SUN4I_USB_H_ #define PHY_SUN4I_USB_H_ #include "phy.h" /** * sun4i_usb_phy_set_squelch_detect() - Enable/disable squelch detect * @phy: reference to a sun4i usb phy * @enabled: wether to enable or disable squelch detect */ void sun4i_usb_phy_set_squelch_detect(struct phy *phy, bool enabled); #endif phy/phy.h 0000644 00000030317 14722070374 0006327 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * phy.h -- generic phy header file * * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com * * Author: Kishon Vijay Abraham I <kishon@ti.com> */ #ifndef __DRIVERS_PHY_H #define __DRIVERS_PHY_H #include <linux/err.h> #include <linux/of.h> #include <linux/device.h> #include <linux/pm_runtime.h> #include <linux/regulator/consumer.h> #include <linux/phy/phy-mipi-dphy.h> struct phy; enum phy_mode { PHY_MODE_INVALID, PHY_MODE_USB_HOST, PHY_MODE_USB_HOST_LS, PHY_MODE_USB_HOST_FS, PHY_MODE_USB_HOST_HS, PHY_MODE_USB_HOST_SS, PHY_MODE_USB_DEVICE, PHY_MODE_USB_DEVICE_LS, PHY_MODE_USB_DEVICE_FS, PHY_MODE_USB_DEVICE_HS, PHY_MODE_USB_DEVICE_SS, PHY_MODE_USB_OTG, PHY_MODE_UFS_HS_A, PHY_MODE_UFS_HS_B, PHY_MODE_PCIE, PHY_MODE_ETHERNET, PHY_MODE_MIPI_DPHY, PHY_MODE_SATA }; /** * union phy_configure_opts - Opaque generic phy configuration * * @mipi_dphy: Configuration set applicable for phys supporting * the MIPI_DPHY phy mode. */ union phy_configure_opts { struct phy_configure_opts_mipi_dphy mipi_dphy; }; /** * struct phy_ops - set of function pointers for performing phy operations * @init: operation to be performed for initializing phy * @exit: operation to be performed while exiting * @power_on: powering on the phy * @power_off: powering off the phy * @set_mode: set the mode of the phy * @reset: resetting the phy * @calibrate: calibrate the phy * @release: ops to be performed while the consumer relinquishes the PHY * @owner: the module owner containing the ops */ struct phy_ops { int (*init)(struct phy *phy); int (*exit)(struct phy *phy); int (*power_on)(struct phy *phy); int (*power_off)(struct phy *phy); int (*set_mode)(struct phy *phy, enum phy_mode mode, int submode); /** * @configure: * * Optional. * * Used to change the PHY parameters. phy_init() must have * been called on the phy. * * Returns: 0 if successful, an negative error code otherwise */ int (*configure)(struct phy *phy, union phy_configure_opts *opts); /** * @validate: * * Optional. * * Used to check that the current set of parameters can be * handled by the phy. Implementations are free to tune the * parameters passed as arguments if needed by some * implementation detail or constraints. It must not change * any actual configuration of the PHY, so calling it as many * times as deemed fit by the consumer must have no side * effect. * * Returns: 0 if the configuration can be applied, an negative * error code otherwise */ int (*validate)(struct phy *phy, enum phy_mode mode, int submode, union phy_configure_opts *opts); int (*reset)(struct phy *phy); int (*calibrate)(struct phy *phy); void (*release)(struct phy *phy); struct module *owner; }; /** * struct phy_attrs - represents phy attributes * @bus_width: Data path width implemented by PHY * @mode: PHY mode */ struct phy_attrs { u32 bus_width; enum phy_mode mode; }; /** * struct phy - represents the phy device * @dev: phy device * @id: id of the phy device * @ops: function pointers for performing phy operations * @mutex: mutex to protect phy_ops * @init_count: used to protect when the PHY is used by multiple consumers * @power_count: used to protect when the PHY is used by multiple consumers * @attrs: used to specify PHY specific attributes * @pwr: power regulator associated with the phy */ struct phy { struct device dev; int id; const struct phy_ops *ops; struct mutex mutex; int init_count; int power_count; struct phy_attrs attrs; struct regulator *pwr; }; /** * struct phy_provider - represents the phy provider * @dev: phy provider device * @children: can be used to override the default (dev->of_node) child node * @owner: the module owner having of_xlate * @list: to maintain a linked list of PHY providers * @of_xlate: function pointer to obtain phy instance from phy pointer */ struct phy_provider { struct device *dev; struct device_node *children; struct module *owner; struct list_head list; struct phy * (*of_xlate)(struct device *dev, struct of_phandle_args *args); }; /** * struct phy_lookup - PHY association in list of phys managed by the phy driver * @node: list node * @dev_id: the device of the association * @con_id: connection ID string on device * @phy: the phy of the association */ struct phy_lookup { struct list_head node; const char *dev_id; const char *con_id; struct phy *phy; }; #define to_phy(a) (container_of((a), struct phy, dev)) #define of_phy_provider_register(dev, xlate) \ __of_phy_provider_register((dev), NULL, THIS_MODULE, (xlate)) #define devm_of_phy_provider_register(dev, xlate) \ __devm_of_phy_provider_register((dev), NULL, THIS_MODULE, (xlate)) #define of_phy_provider_register_full(dev, children, xlate) \ __of_phy_provider_register(dev, children, THIS_MODULE, xlate) #define devm_of_phy_provider_register_full(dev, children, xlate) \ __devm_of_phy_provider_register(dev, children, THIS_MODULE, xlate) static inline void phy_set_drvdata(struct phy *phy, void *data) { dev_set_drvdata(&phy->dev, data); } static inline void *phy_get_drvdata(struct phy *phy) { return dev_get_drvdata(&phy->dev); } #if IS_ENABLED(CONFIG_GENERIC_PHY) int phy_pm_runtime_get(struct phy *phy); int phy_pm_runtime_get_sync(struct phy *phy); int phy_pm_runtime_put(struct phy *phy); int phy_pm_runtime_put_sync(struct phy *phy); void phy_pm_runtime_allow(struct phy *phy); void phy_pm_runtime_forbid(struct phy *phy); int phy_init(struct phy *phy); int phy_exit(struct phy *phy); int phy_power_on(struct phy *phy); int phy_power_off(struct phy *phy); int phy_set_mode_ext(struct phy *phy, enum phy_mode mode, int submode); #define phy_set_mode(phy, mode) \ phy_set_mode_ext(phy, mode, 0) int phy_configure(struct phy *phy, union phy_configure_opts *opts); int phy_validate(struct phy *phy, enum phy_mode mode, int submode, union phy_configure_opts *opts); static inline enum phy_mode phy_get_mode(struct phy *phy) { return phy->attrs.mode; } int phy_reset(struct phy *phy); int phy_calibrate(struct phy *phy); static inline int phy_get_bus_width(struct phy *phy) { return phy->attrs.bus_width; } static inline void phy_set_bus_width(struct phy *phy, int bus_width) { phy->attrs.bus_width = bus_width; } struct phy *phy_get(struct device *dev, const char *string); struct phy *phy_optional_get(struct device *dev, const char *string); struct phy *devm_phy_get(struct device *dev, const char *string); struct phy *devm_phy_optional_get(struct device *dev, const char *string); struct phy *devm_of_phy_get(struct device *dev, struct device_node *np, const char *con_id); struct phy *devm_of_phy_get_by_index(struct device *dev, struct device_node *np, int index); void phy_put(struct phy *phy); void devm_phy_put(struct device *dev, struct phy *phy); struct phy *of_phy_get(struct device_node *np, const char *con_id); struct phy *of_phy_simple_xlate(struct device *dev, struct of_phandle_args *args); struct phy *phy_create(struct device *dev, struct device_node *node, const struct phy_ops *ops); struct phy *devm_phy_create(struct device *dev, struct device_node *node, const struct phy_ops *ops); void phy_destroy(struct phy *phy); void devm_phy_destroy(struct device *dev, struct phy *phy); struct phy_provider *__of_phy_provider_register(struct device *dev, struct device_node *children, struct module *owner, struct phy * (*of_xlate)(struct device *dev, struct of_phandle_args *args)); struct phy_provider *__devm_of_phy_provider_register(struct device *dev, struct device_node *children, struct module *owner, struct phy * (*of_xlate)(struct device *dev, struct of_phandle_args *args)); void of_phy_provider_unregister(struct phy_provider *phy_provider); void devm_of_phy_provider_unregister(struct device *dev, struct phy_provider *phy_provider); int phy_create_lookup(struct phy *phy, const char *con_id, const char *dev_id); void phy_remove_lookup(struct phy *phy, const char *con_id, const char *dev_id); #else static inline int phy_pm_runtime_get(struct phy *phy) { if (!phy) return 0; return -ENOSYS; } static inline int phy_pm_runtime_get_sync(struct phy *phy) { if (!phy) return 0; return -ENOSYS; } static inline int phy_pm_runtime_put(struct phy *phy) { if (!phy) return 0; return -ENOSYS; } static inline int phy_pm_runtime_put_sync(struct phy *phy) { if (!phy) return 0; return -ENOSYS; } static inline void phy_pm_runtime_allow(struct phy *phy) { return; } static inline void phy_pm_runtime_forbid(struct phy *phy) { return; } static inline int phy_init(struct phy *phy) { if (!phy) return 0; return -ENOSYS; } static inline int phy_exit(struct phy *phy) { if (!phy) return 0; return -ENOSYS; } static inline int phy_power_on(struct phy *phy) { if (!phy) return 0; return -ENOSYS; } static inline int phy_power_off(struct phy *phy) { if (!phy) return 0; return -ENOSYS; } static inline int phy_set_mode_ext(struct phy *phy, enum phy_mode mode, int submode) { if (!phy) return 0; return -ENOSYS; } #define phy_set_mode(phy, mode) \ phy_set_mode_ext(phy, mode, 0) static inline enum phy_mode phy_get_mode(struct phy *phy) { return PHY_MODE_INVALID; } static inline int phy_reset(struct phy *phy) { if (!phy) return 0; return -ENOSYS; } static inline int phy_calibrate(struct phy *phy) { if (!phy) return 0; return -ENOSYS; } static inline int phy_configure(struct phy *phy, union phy_configure_opts *opts) { if (!phy) return 0; return -ENOSYS; } static inline int phy_validate(struct phy *phy, enum phy_mode mode, int submode, union phy_configure_opts *opts) { if (!phy) return 0; return -ENOSYS; } static inline int phy_get_bus_width(struct phy *phy) { return -ENOSYS; } static inline void phy_set_bus_width(struct phy *phy, int bus_width) { return; } static inline struct phy *phy_get(struct device *dev, const char *string) { return ERR_PTR(-ENOSYS); } static inline struct phy *phy_optional_get(struct device *dev, const char *string) { return ERR_PTR(-ENOSYS); } static inline struct phy *devm_phy_get(struct device *dev, const char *string) { return ERR_PTR(-ENOSYS); } static inline struct phy *devm_phy_optional_get(struct device *dev, const char *string) { return NULL; } static inline struct phy *devm_of_phy_get(struct device *dev, struct device_node *np, const char *con_id) { return ERR_PTR(-ENOSYS); } static inline struct phy *devm_of_phy_get_by_index(struct device *dev, struct device_node *np, int index) { return ERR_PTR(-ENOSYS); } static inline void phy_put(struct phy *phy) { } static inline void devm_phy_put(struct device *dev, struct phy *phy) { } static inline struct phy *of_phy_get(struct device_node *np, const char *con_id) { return ERR_PTR(-ENOSYS); } static inline struct phy *of_phy_simple_xlate(struct device *dev, struct of_phandle_args *args) { return ERR_PTR(-ENOSYS); } static inline struct phy *phy_create(struct device *dev, struct device_node *node, const struct phy_ops *ops) { return ERR_PTR(-ENOSYS); } static inline struct phy *devm_phy_create(struct device *dev, struct device_node *node, const struct phy_ops *ops) { return ERR_PTR(-ENOSYS); } static inline void phy_destroy(struct phy *phy) { } static inline void devm_phy_destroy(struct device *dev, struct phy *phy) { } static inline struct phy_provider *__of_phy_provider_register( struct device *dev, struct device_node *children, struct module *owner, struct phy * (*of_xlate)(struct device *dev, struct of_phandle_args *args)) { return ERR_PTR(-ENOSYS); } static inline struct phy_provider *__devm_of_phy_provider_register(struct device *dev, struct device_node *children, struct module *owner, struct phy * (*of_xlate)(struct device *dev, struct of_phandle_args *args)) { return ERR_PTR(-ENOSYS); } static inline void of_phy_provider_unregister(struct phy_provider *phy_provider) { } static inline void devm_of_phy_provider_unregister(struct device *dev, struct phy_provider *phy_provider) { } static inline int phy_create_lookup(struct phy *phy, const char *con_id, const char *dev_id) { return 0; } static inline void phy_remove_lookup(struct phy *phy, const char *con_id, const char *dev_id) { } #endif #endif /* __DRIVERS_PHY_H */ netfilter_ipv6.h 0000644 00000013544 14722070374 0007672 0 ustar 00 /* IPv6-specific defines for netfilter. * (C)1998 Rusty Russell -- This code is GPL. * (C)1999 David Jeffery * this header was blatantly ripped from netfilter_ipv4.h * it's amazing what adding a bunch of 6s can do =8^) */ #ifndef __LINUX_IP6_NETFILTER_H #define __LINUX_IP6_NETFILTER_H #include <uapi/linux/netfilter_ipv6.h> #include <net/tcp.h> /* Check for an extension */ static inline int nf_ip6_ext_hdr(u8 nexthdr) { return (nexthdr == IPPROTO_HOPOPTS) || (nexthdr == IPPROTO_ROUTING) || (nexthdr == IPPROTO_FRAGMENT) || (nexthdr == IPPROTO_ESP) || (nexthdr == IPPROTO_AH) || (nexthdr == IPPROTO_NONE) || (nexthdr == IPPROTO_DSTOPTS); } /* Extra routing may needed on local out, as the QUEUE target never returns * control to the table. */ struct ip6_rt_info { struct in6_addr daddr; struct in6_addr saddr; u_int32_t mark; }; struct nf_queue_entry; struct nf_bridge_frag_data; /* * Hook functions for ipv6 to allow xt_* modules to be built-in even * if IPv6 is a module. */ struct nf_ipv6_ops { #if IS_MODULE(CONFIG_IPV6) int (*chk_addr)(struct net *net, const struct in6_addr *addr, const struct net_device *dev, int strict); int (*route_me_harder)(struct net *net, struct sock *sk, struct sk_buff *skb); int (*dev_get_saddr)(struct net *net, const struct net_device *dev, const struct in6_addr *daddr, unsigned int srcprefs, struct in6_addr *saddr); int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl, bool strict); u32 (*cookie_init_sequence)(const struct ipv6hdr *iph, const struct tcphdr *th, u16 *mssp); int (*cookie_v6_check)(const struct ipv6hdr *iph, const struct tcphdr *th, __u32 cookie); #endif void (*route_input)(struct sk_buff *skb); int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb, int (*output)(struct net *, struct sock *, struct sk_buff *)); int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry); #if IS_MODULE(CONFIG_IPV6) int (*br_fragment)(struct net *net, struct sock *sk, struct sk_buff *skb, struct nf_bridge_frag_data *data, int (*output)(struct net *, struct sock *sk, const struct nf_bridge_frag_data *data, struct sk_buff *)); #endif }; #ifdef CONFIG_NETFILTER #include <net/addrconf.h> extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops; static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void) { return rcu_dereference(nf_ipv6_ops); } static inline int nf_ipv6_chk_addr(struct net *net, const struct in6_addr *addr, const struct net_device *dev, int strict) { #if IS_MODULE(CONFIG_IPV6) const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); if (!v6_ops) return 1; return v6_ops->chk_addr(net, addr, dev, strict); #elif IS_BUILTIN(CONFIG_IPV6) return ipv6_chk_addr(net, addr, dev, strict); #else return 1; #endif } int __nf_ip6_route(struct net *net, struct dst_entry **dst, struct flowi *fl, bool strict); static inline int nf_ip6_route(struct net *net, struct dst_entry **dst, struct flowi *fl, bool strict) { #if IS_MODULE(CONFIG_IPV6) const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops(); if (v6ops) return v6ops->route(net, dst, fl, strict); return -EHOSTUNREACH; #endif #if IS_BUILTIN(CONFIG_IPV6) return __nf_ip6_route(net, dst, fl, strict); #else return -EHOSTUNREACH; #endif } #include <net/netfilter/ipv6/nf_defrag_ipv6.h> int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, struct nf_bridge_frag_data *data, int (*output)(struct net *, struct sock *sk, const struct nf_bridge_frag_data *data, struct sk_buff *)); static inline int nf_br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, struct nf_bridge_frag_data *data, int (*output)(struct net *, struct sock *sk, const struct nf_bridge_frag_data *data, struct sk_buff *)) { #if IS_MODULE(CONFIG_IPV6) const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); if (!v6_ops) return 1; return v6_ops->br_fragment(net, sk, skb, data, output); #elif IS_BUILTIN(CONFIG_IPV6) return br_ip6_fragment(net, sk, skb, data, output); #else return 1; #endif } int ip6_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb); static inline int nf_ip6_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb) { #if IS_MODULE(CONFIG_IPV6) const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); if (!v6_ops) return -EHOSTUNREACH; return v6_ops->route_me_harder(net, sk, skb); #elif IS_BUILTIN(CONFIG_IPV6) return ip6_route_me_harder(net, sk, skb); #else return -EHOSTUNREACH; #endif } static inline u32 nf_ipv6_cookie_init_sequence(const struct ipv6hdr *iph, const struct tcphdr *th, u16 *mssp) { #if IS_ENABLED(CONFIG_SYN_COOKIES) #if IS_MODULE(CONFIG_IPV6) const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); if (v6_ops) return v6_ops->cookie_init_sequence(iph, th, mssp); #elif IS_BUILTIN(CONFIG_IPV6) return __cookie_v6_init_sequence(iph, th, mssp); #endif #endif return 0; } static inline int nf_cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th, __u32 cookie) { #if IS_ENABLED(CONFIG_SYN_COOKIES) #if IS_MODULE(CONFIG_IPV6) const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); if (v6_ops) return v6_ops->cookie_v6_check(iph, th, cookie); #elif IS_BUILTIN(CONFIG_IPV6) return __cookie_v6_check(iph, th, cookie); #endif #endif return 0; } __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol); int ipv6_netfilter_init(void); void ipv6_netfilter_fini(void); #else /* CONFIG_NETFILTER */ static inline int ipv6_netfilter_init(void) { return 0; } static inline void ipv6_netfilter_fini(void) { return; } static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void) { return NULL; } #endif /* CONFIG_NETFILTER */ #endif /*__LINUX_IP6_NETFILTER_H*/ ti-emif-sram.h 0000644 00000012226 14722070374 0007220 0 ustar 00 /* * TI AM33XX EMIF Routines * * Copyright (C) 2016-2017 Texas Instruments Inc. * Dave Gerlach * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifndef __LINUX_TI_EMIF_H #define __LINUX_TI_EMIF_H #include <linux/kbuild.h> #include <linux/types.h> #ifndef __ASSEMBLY__ struct emif_regs_amx3 { u32 emif_sdcfg_val; u32 emif_timing1_val; u32 emif_timing2_val; u32 emif_timing3_val; u32 emif_ref_ctrl_val; u32 emif_zqcfg_val; u32 emif_pmcr_val; u32 emif_pmcr_shdw_val; u32 emif_rd_wr_level_ramp_ctrl; u32 emif_rd_wr_exec_thresh; u32 emif_cos_config; u32 emif_priority_to_cos_mapping; u32 emif_connect_id_serv_1_map; u32 emif_connect_id_serv_2_map; u32 emif_ocp_config_val; u32 emif_lpddr2_nvm_tim; u32 emif_lpddr2_nvm_tim_shdw; u32 emif_dll_calib_ctrl_val; u32 emif_dll_calib_ctrl_val_shdw; u32 emif_ddr_phy_ctlr_1; u32 emif_ext_phy_ctrl_vals[120]; }; struct ti_emif_pm_data { void __iomem *ti_emif_base_addr_virt; phys_addr_t ti_emif_base_addr_phys; unsigned long ti_emif_sram_config; struct emif_regs_amx3 *regs_virt; phys_addr_t regs_phys; } __packed __aligned(8); struct ti_emif_pm_functions { u32 save_context; u32 restore_context; u32 run_hw_leveling; u32 enter_sr; u32 exit_sr; u32 abort_sr; } __packed __aligned(8); static inline void ti_emif_asm_offsets(void) { DEFINE(EMIF_SDCFG_VAL_OFFSET, offsetof(struct emif_regs_amx3, emif_sdcfg_val)); DEFINE(EMIF_TIMING1_VAL_OFFSET, offsetof(struct emif_regs_amx3, emif_timing1_val)); DEFINE(EMIF_TIMING2_VAL_OFFSET, offsetof(struct emif_regs_amx3, emif_timing2_val)); DEFINE(EMIF_TIMING3_VAL_OFFSET, offsetof(struct emif_regs_amx3, emif_timing3_val)); DEFINE(EMIF_REF_CTRL_VAL_OFFSET, offsetof(struct emif_regs_amx3, emif_ref_ctrl_val)); DEFINE(EMIF_ZQCFG_VAL_OFFSET, offsetof(struct emif_regs_amx3, emif_zqcfg_val)); DEFINE(EMIF_PMCR_VAL_OFFSET, offsetof(struct emif_regs_amx3, emif_pmcr_val)); DEFINE(EMIF_PMCR_SHDW_VAL_OFFSET, offsetof(struct emif_regs_amx3, emif_pmcr_shdw_val)); DEFINE(EMIF_RD_WR_LEVEL_RAMP_CTRL_OFFSET, offsetof(struct emif_regs_amx3, emif_rd_wr_level_ramp_ctrl)); DEFINE(EMIF_RD_WR_EXEC_THRESH_OFFSET, offsetof(struct emif_regs_amx3, emif_rd_wr_exec_thresh)); DEFINE(EMIF_COS_CONFIG_OFFSET, offsetof(struct emif_regs_amx3, emif_cos_config)); DEFINE(EMIF_PRIORITY_TO_COS_MAPPING_OFFSET, offsetof(struct emif_regs_amx3, emif_priority_to_cos_mapping)); DEFINE(EMIF_CONNECT_ID_SERV_1_MAP_OFFSET, offsetof(struct emif_regs_amx3, emif_connect_id_serv_1_map)); DEFINE(EMIF_CONNECT_ID_SERV_2_MAP_OFFSET, offsetof(struct emif_regs_amx3, emif_connect_id_serv_2_map)); DEFINE(EMIF_OCP_CONFIG_VAL_OFFSET, offsetof(struct emif_regs_amx3, emif_ocp_config_val)); DEFINE(EMIF_LPDDR2_NVM_TIM_OFFSET, offsetof(struct emif_regs_amx3, emif_lpddr2_nvm_tim)); DEFINE(EMIF_LPDDR2_NVM_TIM_SHDW_OFFSET, offsetof(struct emif_regs_amx3, emif_lpddr2_nvm_tim_shdw)); DEFINE(EMIF_DLL_CALIB_CTRL_VAL_OFFSET, offsetof(struct emif_regs_amx3, emif_dll_calib_ctrl_val)); DEFINE(EMIF_DLL_CALIB_CTRL_VAL_SHDW_OFFSET, offsetof(struct emif_regs_amx3, emif_dll_calib_ctrl_val_shdw)); DEFINE(EMIF_DDR_PHY_CTLR_1_OFFSET, offsetof(struct emif_regs_amx3, emif_ddr_phy_ctlr_1)); DEFINE(EMIF_EXT_PHY_CTRL_VALS_OFFSET, offsetof(struct emif_regs_amx3, emif_ext_phy_ctrl_vals)); DEFINE(EMIF_REGS_AMX3_SIZE, sizeof(struct emif_regs_amx3)); BLANK(); DEFINE(EMIF_PM_BASE_ADDR_VIRT_OFFSET, offsetof(struct ti_emif_pm_data, ti_emif_base_addr_virt)); DEFINE(EMIF_PM_BASE_ADDR_PHYS_OFFSET, offsetof(struct ti_emif_pm_data, ti_emif_base_addr_phys)); DEFINE(EMIF_PM_CONFIG_OFFSET, offsetof(struct ti_emif_pm_data, ti_emif_sram_config)); DEFINE(EMIF_PM_REGS_VIRT_OFFSET, offsetof(struct ti_emif_pm_data, regs_virt)); DEFINE(EMIF_PM_REGS_PHYS_OFFSET, offsetof(struct ti_emif_pm_data, regs_phys)); DEFINE(EMIF_PM_DATA_SIZE, sizeof(struct ti_emif_pm_data)); BLANK(); DEFINE(EMIF_PM_SAVE_CONTEXT_OFFSET, offsetof(struct ti_emif_pm_functions, save_context)); DEFINE(EMIF_PM_RESTORE_CONTEXT_OFFSET, offsetof(struct ti_emif_pm_functions, restore_context)); DEFINE(EMIF_PM_RUN_HW_LEVELING, offsetof(struct ti_emif_pm_functions, run_hw_leveling)); DEFINE(EMIF_PM_ENTER_SR_OFFSET, offsetof(struct ti_emif_pm_functions, enter_sr)); DEFINE(EMIF_PM_EXIT_SR_OFFSET, offsetof(struct ti_emif_pm_functions, exit_sr)); DEFINE(EMIF_PM_ABORT_SR_OFFSET, offsetof(struct ti_emif_pm_functions, abort_sr)); DEFINE(EMIF_PM_FUNCTIONS_SIZE, sizeof(struct ti_emif_pm_functions)); } struct gen_pool; int ti_emif_copy_pm_function_table(struct gen_pool *sram_pool, void *dst); int ti_emif_get_mem_type(void); #endif #endif /* __LINUX_TI_EMIF_H */ tracepoint.h 0000644 00000043745 14722070374 0007110 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ #ifndef _LINUX_TRACEPOINT_H #define _LINUX_TRACEPOINT_H /* * Kernel Tracepoint API. * * See Documentation/trace/tracepoints.rst. * * Copyright (C) 2008-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> * * Heavily inspired from the Linux Kernel Markers. */ #include <linux/smp.h> #include <linux/srcu.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/cpumask.h> #include <linux/rcupdate.h> #include <linux/tracepoint-defs.h> struct module; struct tracepoint; struct notifier_block; struct trace_eval_map { const char *system; const char *eval_string; unsigned long eval_value; }; #define TRACEPOINT_DEFAULT_PRIO 10 extern struct srcu_struct tracepoint_srcu; extern int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data); extern int tracepoint_probe_register_prio(struct tracepoint *tp, void *probe, void *data, int prio); extern int tracepoint_probe_register_prio_may_exist(struct tracepoint *tp, void *probe, void *data, int prio); extern int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data); static inline int tracepoint_probe_register_may_exist(struct tracepoint *tp, void *probe, void *data) { return tracepoint_probe_register_prio_may_exist(tp, probe, data, TRACEPOINT_DEFAULT_PRIO); } extern void for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv), void *priv); #ifdef CONFIG_MODULES struct tp_module { struct list_head list; struct module *mod; }; bool trace_module_has_bad_taint(struct module *mod); extern int register_tracepoint_module_notifier(struct notifier_block *nb); extern int unregister_tracepoint_module_notifier(struct notifier_block *nb); #else static inline bool trace_module_has_bad_taint(struct module *mod) { return false; } static inline int register_tracepoint_module_notifier(struct notifier_block *nb) { return 0; } static inline int unregister_tracepoint_module_notifier(struct notifier_block *nb) { return 0; } #endif /* CONFIG_MODULES */ /* * tracepoint_synchronize_unregister must be called between the last tracepoint * probe unregistration and the end of module exit to make sure there is no * caller executing a probe when it is freed. */ #ifdef CONFIG_TRACEPOINTS static inline void tracepoint_synchronize_unregister(void) { synchronize_srcu(&tracepoint_srcu); synchronize_rcu(); } #else static inline void tracepoint_synchronize_unregister(void) { } #endif #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS extern int syscall_regfunc(void); extern void syscall_unregfunc(void); #endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */ #define PARAMS(args...) args #define TRACE_DEFINE_ENUM(x) #define TRACE_DEFINE_SIZEOF(x) #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) { return offset_to_ptr(p); } #define __TRACEPOINT_ENTRY(name) \ asm(" .section \"__tracepoints_ptrs\", \"a\" \n" \ " .balign 4 \n" \ " .long __tracepoint_" #name " - . \n" \ " .previous \n") #else static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) { return *p; } #define __TRACEPOINT_ENTRY(name) \ static tracepoint_ptr_t __tracepoint_ptr_##name __used \ __attribute__((section("__tracepoints_ptrs"))) = \ &__tracepoint_##name #endif #endif /* _LINUX_TRACEPOINT_H */ /* * Note: we keep the TRACE_EVENT and DECLARE_TRACE outside the include * file ifdef protection. * This is due to the way trace events work. If a file includes two * trace event headers under one "CREATE_TRACE_POINTS" the first include * will override the TRACE_EVENT and break the second include. */ #ifndef DECLARE_TRACE #define TP_PROTO(args...) args #define TP_ARGS(args...) args #define TP_CONDITION(args...) args /* * Individual subsystem my have a separate configuration to * enable their tracepoints. By default, this file will create * the tracepoints if CONFIG_TRACEPOINT is defined. If a subsystem * wants to be able to disable its tracepoints from being created * it can define NOTRACE before including the tracepoint headers. */ #if defined(CONFIG_TRACEPOINTS) && !defined(NOTRACE) #define TRACEPOINTS_ENABLED #endif #ifdef TRACEPOINTS_ENABLED /* * it_func[0] is never NULL because there is at least one element in the array * when the array itself is non NULL. * * Note, the proto and args passed in includes "__data" as the first parameter. * The reason for this is to handle the "void" prototype. If a tracepoint * has a "void" prototype, then it is invalid to declare a function * as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just * "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto". */ #define __DO_TRACE(tp, proto, args, cond, rcuidle) \ do { \ struct tracepoint_func *it_func_ptr; \ void *it_func; \ void *__data; \ int __maybe_unused __idx = 0; \ \ if (!(cond)) \ return; \ \ /* srcu can't be used from NMI */ \ WARN_ON_ONCE(rcuidle && in_nmi()); \ \ /* keep srcu and sched-rcu usage consistent */ \ preempt_disable_notrace(); \ \ /* \ * For rcuidle callers, use srcu since sched-rcu \ * doesn't work from the idle path. \ */ \ if (rcuidle) { \ __idx = srcu_read_lock_notrace(&tracepoint_srcu);\ rcu_irq_enter_irqson(); \ } \ \ it_func_ptr = rcu_dereference_raw((tp)->funcs); \ \ if (it_func_ptr) { \ do { \ it_func = (it_func_ptr)->func; \ __data = (it_func_ptr)->data; \ ((void(*)(proto))(it_func))(args); \ } while ((++it_func_ptr)->func); \ } \ \ if (rcuidle) { \ rcu_irq_exit_irqson(); \ srcu_read_unlock_notrace(&tracepoint_srcu, __idx);\ } \ \ preempt_enable_notrace(); \ } while (0) #ifndef MODULE #define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) \ static inline void trace_##name##_rcuidle(proto) \ { \ if (static_key_false(&__tracepoint_##name.key)) \ __DO_TRACE(&__tracepoint_##name, \ TP_PROTO(data_proto), \ TP_ARGS(data_args), \ TP_CONDITION(cond), 1); \ } #else #define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) #endif /* * Make sure the alignment of the structure in the __tracepoints section will * not add unwanted padding between the beginning of the section and the * structure. Force alignment to the same alignment as the section start. * * When lockdep is enabled, we make sure to always test if RCU is * "watching" regardless if the tracepoint is enabled or not. Tracepoints * require RCU to be active, and it should always warn at the tracepoint * site if it is not watching, as it will need to be active when the * tracepoint is enabled. */ #define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ extern struct tracepoint __tracepoint_##name; \ static inline void trace_##name(proto) \ { \ if (static_key_false(&__tracepoint_##name.key)) \ __DO_TRACE(&__tracepoint_##name, \ TP_PROTO(data_proto), \ TP_ARGS(data_args), \ TP_CONDITION(cond), 0); \ if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \ WARN_ON_ONCE(!rcu_is_watching()); \ } \ } \ __DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args), \ PARAMS(cond), PARAMS(data_proto), PARAMS(data_args)) \ static inline int \ register_trace_##name(void (*probe)(data_proto), void *data) \ { \ return tracepoint_probe_register(&__tracepoint_##name, \ (void *)probe, data); \ } \ static inline int \ register_trace_prio_##name(void (*probe)(data_proto), void *data,\ int prio) \ { \ return tracepoint_probe_register_prio(&__tracepoint_##name, \ (void *)probe, data, prio); \ } \ static inline int \ unregister_trace_##name(void (*probe)(data_proto), void *data) \ { \ return tracepoint_probe_unregister(&__tracepoint_##name,\ (void *)probe, data); \ } \ static inline void \ check_trace_callback_type_##name(void (*cb)(data_proto)) \ { \ } \ static inline bool \ trace_##name##_enabled(void) \ { \ return static_key_false(&__tracepoint_##name.key); \ } /* * We have no guarantee that gcc and the linker won't up-align the tracepoint * structures, so we create an array of pointers that will be used for iteration * on the tracepoints. */ #define DEFINE_TRACE_FN(name, reg, unreg) \ static const char __tpstrtab_##name[] \ __attribute__((section("__tracepoints_strings"))) = #name; \ struct tracepoint __tracepoint_##name \ __attribute__((section("__tracepoints"), used)) = \ { __tpstrtab_##name, STATIC_KEY_INIT_FALSE, reg, unreg, NULL };\ __TRACEPOINT_ENTRY(name); #define DEFINE_TRACE(name) \ DEFINE_TRACE_FN(name, NULL, NULL); #define EXPORT_TRACEPOINT_SYMBOL_GPL(name) \ EXPORT_SYMBOL_GPL(__tracepoint_##name) #define EXPORT_TRACEPOINT_SYMBOL(name) \ EXPORT_SYMBOL(__tracepoint_##name) #else /* !TRACEPOINTS_ENABLED */ #define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ static inline void trace_##name(proto) \ { } \ static inline void trace_##name##_rcuidle(proto) \ { } \ static inline int \ register_trace_##name(void (*probe)(data_proto), \ void *data) \ { \ return -ENOSYS; \ } \ static inline int \ unregister_trace_##name(void (*probe)(data_proto), \ void *data) \ { \ return -ENOSYS; \ } \ static inline void check_trace_callback_type_##name(void (*cb)(data_proto)) \ { \ } \ static inline bool \ trace_##name##_enabled(void) \ { \ return false; \ } #define DEFINE_TRACE_FN(name, reg, unreg) #define DEFINE_TRACE(name) #define EXPORT_TRACEPOINT_SYMBOL_GPL(name) #define EXPORT_TRACEPOINT_SYMBOL(name) #endif /* TRACEPOINTS_ENABLED */ #ifdef CONFIG_TRACING /** * tracepoint_string - register constant persistent string to trace system * @str - a constant persistent string that will be referenced in tracepoints * * If constant strings are being used in tracepoints, it is faster and * more efficient to just save the pointer to the string and reference * that with a printf "%s" instead of saving the string in the ring buffer * and wasting space and time. * * The problem with the above approach is that userspace tools that read * the binary output of the trace buffers do not have access to the string. * Instead they just show the address of the string which is not very * useful to users. * * With tracepoint_string(), the string will be registered to the tracing * system and exported to userspace via the debugfs/tracing/printk_formats * file that maps the string address to the string text. This way userspace * tools that read the binary buffers have a way to map the pointers to * the ASCII strings they represent. * * The @str used must be a constant string and persistent as it would not * make sense to show a string that no longer exists. But it is still fine * to be used with modules, because when modules are unloaded, if they * had tracepoints, the ring buffers are cleared too. As long as the string * does not change during the life of the module, it is fine to use * tracepoint_string() within a module. */ #define tracepoint_string(str) \ ({ \ static const char *___tp_str __tracepoint_string = str; \ ___tp_str; \ }) #define __tracepoint_string __attribute__((section("__tracepoint_str"), used)) #else /* * tracepoint_string() is used to save the string address for userspace * tracing tools. When tracing isn't configured, there's no need to save * anything. */ # define tracepoint_string(str) str # define __tracepoint_string #endif /* * The need for the DECLARE_TRACE_NOARGS() is to handle the prototype * (void). "void" is a special value in a function prototype and can * not be combined with other arguments. Since the DECLARE_TRACE() * macro adds a data element at the beginning of the prototype, * we need a way to differentiate "(void *data, proto)" from * "(void *data, void)". The second prototype is invalid. * * DECLARE_TRACE_NOARGS() passes "void" as the tracepoint prototype * and "void *__data" as the callback prototype. * * DECLARE_TRACE() passes "proto" as the tracepoint protoype and * "void *__data, proto" as the callback prototype. */ #define DECLARE_TRACE_NOARGS(name) \ __DECLARE_TRACE(name, void, , \ cpu_online(raw_smp_processor_id()), \ void *__data, __data) #define DECLARE_TRACE(name, proto, args) \ __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \ cpu_online(raw_smp_processor_id()), \ PARAMS(void *__data, proto), \ PARAMS(__data, args)) #define DECLARE_TRACE_CONDITION(name, proto, args, cond) \ __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \ cpu_online(raw_smp_processor_id()) && (PARAMS(cond)), \ PARAMS(void *__data, proto), \ PARAMS(__data, args)) #define TRACE_EVENT_FLAGS(event, flag) #define TRACE_EVENT_PERF_PERM(event, expr...) #endif /* DECLARE_TRACE */ #ifndef TRACE_EVENT /* * For use with the TRACE_EVENT macro: * * We define a tracepoint, its arguments, its printk format * and its 'fast binary record' layout. * * Firstly, name your tracepoint via TRACE_EVENT(name : the * 'subsystem_event' notation is fine. * * Think about this whole construct as the * 'trace_sched_switch() function' from now on. * * * TRACE_EVENT(sched_switch, * * * * * A function has a regular function arguments * * prototype, declare it via TP_PROTO(): * * * * TP_PROTO(struct rq *rq, struct task_struct *prev, * struct task_struct *next), * * * * * Define the call signature of the 'function'. * * (Design sidenote: we use this instead of a * * TP_PROTO1/TP_PROTO2/TP_PROTO3 ugliness.) * * * * TP_ARGS(rq, prev, next), * * * * * Fast binary tracing: define the trace record via * * TP_STRUCT__entry(). You can think about it like a * * regular C structure local variable definition. * * * * This is how the trace record is structured and will * * be saved into the ring buffer. These are the fields * * that will be exposed to user-space in * * /sys/kernel/debug/tracing/events/<*>/format. * * * * The declared 'local variable' is called '__entry' * * * * __field(pid_t, prev_prid) is equivalent to a standard declariton: * * * * pid_t prev_pid; * * * * __array(char, prev_comm, TASK_COMM_LEN) is equivalent to: * * * * char prev_comm[TASK_COMM_LEN]; * * * * TP_STRUCT__entry( * __array( char, prev_comm, TASK_COMM_LEN ) * __field( pid_t, prev_pid ) * __field( int, prev_prio ) * __array( char, next_comm, TASK_COMM_LEN ) * __field( pid_t, next_pid ) * __field( int, next_prio ) * ), * * * * * Assign the entry into the trace record, by embedding * * a full C statement block into TP_fast_assign(). You * * can refer to the trace record as '__entry' - * * otherwise you can put arbitrary C code in here. * * * * Note: this C code will execute every time a trace event * * happens, on an active tracepoint. * * * * TP_fast_assign( * memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN); * __entry->prev_pid = prev->pid; * __entry->prev_prio = prev->prio; * memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN); * __entry->next_pid = next->pid; * __entry->next_prio = next->prio; * ), * * * * * Formatted output of a trace record via TP_printk(). * * This is how the tracepoint will appear under ftrace * * plugins that make use of this tracepoint. * * * * (raw-binary tracing wont actually perform this step.) * * * * TP_printk("task %s:%d [%d] ==> %s:%d [%d]", * __entry->prev_comm, __entry->prev_pid, __entry->prev_prio, * __entry->next_comm, __entry->next_pid, __entry->next_prio), * * ); * * This macro construct is thus used for the regular printk format * tracing setup, it is used to construct a function pointer based * tracepoint callback (this is used by programmatic plugins and * can also by used by generic instrumentation like SystemTap), and * it is also used to expose a structured trace record in * /sys/kernel/debug/tracing/events/. * * A set of (un)registration functions can be passed to the variant * TRACE_EVENT_FN to perform any (un)registration work. */ #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) #define DEFINE_EVENT(template, name, proto, args) \ DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) #define DEFINE_EVENT_FN(template, name, proto, args, reg, unreg)\ DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) #define DEFINE_EVENT_CONDITION(template, name, proto, \ args, cond) \ DECLARE_TRACE_CONDITION(name, PARAMS(proto), \ PARAMS(args), PARAMS(cond)) #define TRACE_EVENT(name, proto, args, struct, assign, print) \ DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) #define TRACE_EVENT_FN(name, proto, args, struct, \ assign, print, reg, unreg) \ DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) #define TRACE_EVENT_FN_COND(name, proto, args, cond, struct, \ assign, print, reg, unreg) \ DECLARE_TRACE_CONDITION(name, PARAMS(proto), \ PARAMS(args), PARAMS(cond)) #define TRACE_EVENT_CONDITION(name, proto, args, cond, \ struct, assign, print) \ DECLARE_TRACE_CONDITION(name, PARAMS(proto), \ PARAMS(args), PARAMS(cond)) #define TRACE_EVENT_FLAGS(event, flag) #define TRACE_EVENT_PERF_PERM(event, expr...) #define DECLARE_EVENT_NOP(name, proto, args) \ static inline void trace_##name(proto) \ { } \ static inline bool trace_##name##_enabled(void) \ { \ return false; \ } #define TRACE_EVENT_NOP(name, proto, args, struct, assign, print) \ DECLARE_EVENT_NOP(name, PARAMS(proto), PARAMS(args)) #define DECLARE_EVENT_CLASS_NOP(name, proto, args, tstruct, assign, print) #define DEFINE_EVENT_NOP(template, name, proto, args) \ DECLARE_EVENT_NOP(name, PARAMS(proto), PARAMS(args)) #endif /* ifdef TRACE_EVENT (see note above) */ hugetlb_cgroup.h 0000644 00000005674 14722070374 0007750 0 ustar 00 /* * Copyright IBM Corporation, 2012 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2.1 of the GNU Lesser General Public License * as published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * */ #ifndef _LINUX_HUGETLB_CGROUP_H #define _LINUX_HUGETLB_CGROUP_H #include <linux/mmdebug.h> struct hugetlb_cgroup; /* * Minimum page order trackable by hugetlb cgroup. * At least 3 pages are necessary for all the tracking information. */ #define HUGETLB_CGROUP_MIN_ORDER 2 #ifdef CONFIG_CGROUP_HUGETLB static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) { VM_BUG_ON_PAGE(!PageHuge(page), page); if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) return NULL; return (struct hugetlb_cgroup *)page[2].private; } static inline int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg) { VM_BUG_ON_PAGE(!PageHuge(page), page); if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) return -1; page[2].private = (unsigned long)h_cg; return 0; } static inline bool hugetlb_cgroup_disabled(void) { return !cgroup_subsys_enabled(hugetlb_cgrp_subsys); } extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, struct hugetlb_cgroup **ptr); extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, struct hugetlb_cgroup *h_cg, struct page *page); extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, struct page *page); extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, struct hugetlb_cgroup *h_cg); extern void hugetlb_cgroup_file_init(void) __init; extern void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage); #else static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) { return NULL; } static inline int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg) { return 0; } static inline bool hugetlb_cgroup_disabled(void) { return true; } static inline int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, struct hugetlb_cgroup **ptr) { return 0; } static inline void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, struct hugetlb_cgroup *h_cg, struct page *page) { } static inline void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, struct page *page) { } static inline void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, struct hugetlb_cgroup *h_cg) { } static inline void hugetlb_cgroup_file_init(void) { } static inline void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage) { } #endif /* CONFIG_MEM_RES_CTLR_HUGETLB */ #endif bpf_types.h 0000644 00000006225 14722070374 0006723 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* internal file - do not include directly */ #ifdef CONFIG_NET BPF_PROG_TYPE(BPF_PROG_TYPE_SOCKET_FILTER, sk_filter) BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_CLS, tc_cls_act) BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_ACT, tc_cls_act) BPF_PROG_TYPE(BPF_PROG_TYPE_XDP, xdp) #ifdef CONFIG_CGROUP_BPF BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SKB, cg_skb) BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK, cg_sock) BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK_ADDR, cg_sock_addr) #endif BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_in) BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_out) BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit) BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_SEG6LOCAL, lwt_seg6local) BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops) BPF_PROG_TYPE(BPF_PROG_TYPE_SK_SKB, sk_skb) BPF_PROG_TYPE(BPF_PROG_TYPE_SK_MSG, sk_msg) BPF_PROG_TYPE(BPF_PROG_TYPE_FLOW_DISSECTOR, flow_dissector) #endif #ifdef CONFIG_BPF_EVENTS BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe) BPF_PROG_TYPE(BPF_PROG_TYPE_TRACEPOINT, tracepoint) BPF_PROG_TYPE(BPF_PROG_TYPE_PERF_EVENT, perf_event) BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT, raw_tracepoint) BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, raw_tracepoint_writable) #endif #ifdef CONFIG_CGROUP_BPF BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_DEVICE, cg_dev) BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SYSCTL, cg_sysctl) BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCKOPT, cg_sockopt) #endif #ifdef CONFIG_BPF_LIRC_MODE2 BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2) #endif #ifdef CONFIG_INET BPF_PROG_TYPE(BPF_PROG_TYPE_SK_REUSEPORT, sk_reuseport) #endif BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, percpu_array_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_PROG_ARRAY, prog_array_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, perf_event_array_map_ops) #ifdef CONFIG_CGROUPS BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, cgroup_array_map_ops) #endif #ifdef CONFIG_CGROUP_BPF BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, cgroup_storage_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, cgroup_storage_map_ops) #endif BPF_MAP_TYPE(BPF_MAP_TYPE_HASH, htab_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_HASH, htab_percpu_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_HASH, htab_lru_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_PERCPU_HASH, htab_lru_percpu_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_LPM_TRIE, trie_map_ops) #ifdef CONFIG_PERF_EVENTS BPF_MAP_TYPE(BPF_MAP_TYPE_STACK_TRACE, stack_trace_map_ops) #endif BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops) #ifdef CONFIG_NET BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, dev_map_hash_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_SK_STORAGE, sk_storage_map_ops) #if defined(CONFIG_BPF_STREAM_PARSER) BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKHASH, sock_hash_ops) #endif BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops) #if defined(CONFIG_XDP_SOCKETS) BPF_MAP_TYPE(BPF_MAP_TYPE_XSKMAP, xsk_map_ops) #endif #ifdef CONFIG_INET BPF_MAP_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, reuseport_array_ops) #endif #endif BPF_MAP_TYPE(BPF_MAP_TYPE_QUEUE, queue_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_STACK, stack_map_ops) notifier.h 0000644 00000020331 14722070374 0006541 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Routines to manage notifier chains for passing status changes to any * interested routines. We need this instead of hard coded call lists so * that modules can poke their nose into the innards. The network devices * needed them so here they are for the rest of you. * * Alan Cox <Alan.Cox@linux.org> */ #ifndef _LINUX_NOTIFIER_H #define _LINUX_NOTIFIER_H #include <linux/errno.h> #include <linux/mutex.h> #include <linux/rwsem.h> #include <linux/srcu.h> /* * Notifier chains are of four types: * * Atomic notifier chains: Chain callbacks run in interrupt/atomic * context. Callouts are not allowed to block. * Blocking notifier chains: Chain callbacks run in process context. * Callouts are allowed to block. * Raw notifier chains: There are no restrictions on callbacks, * registration, or unregistration. All locking and protection * must be provided by the caller. * SRCU notifier chains: A variant of blocking notifier chains, with * the same restrictions. * * atomic_notifier_chain_register() may be called from an atomic context, * but blocking_notifier_chain_register() and srcu_notifier_chain_register() * must be called from a process context. Ditto for the corresponding * _unregister() routines. * * atomic_notifier_chain_unregister(), blocking_notifier_chain_unregister(), * and srcu_notifier_chain_unregister() _must not_ be called from within * the call chain. * * SRCU notifier chains are an alternative form of blocking notifier chains. * They use SRCU (Sleepable Read-Copy Update) instead of rw-semaphores for * protection of the chain links. This means there is _very_ low overhead * in srcu_notifier_call_chain(): no cache bounces and no memory barriers. * As compensation, srcu_notifier_chain_unregister() is rather expensive. * SRCU notifier chains should be used when the chain will be called very * often but notifier_blocks will seldom be removed. */ struct notifier_block; typedef int (*notifier_fn_t)(struct notifier_block *nb, unsigned long action, void *data); struct notifier_block { notifier_fn_t notifier_call; struct notifier_block __rcu *next; int priority; }; struct atomic_notifier_head { spinlock_t lock; struct notifier_block __rcu *head; }; struct blocking_notifier_head { struct rw_semaphore rwsem; struct notifier_block __rcu *head; }; struct raw_notifier_head { struct notifier_block __rcu *head; }; struct srcu_notifier_head { struct mutex mutex; struct srcu_struct srcu; struct notifier_block __rcu *head; }; #define ATOMIC_INIT_NOTIFIER_HEAD(name) do { \ spin_lock_init(&(name)->lock); \ (name)->head = NULL; \ } while (0) #define BLOCKING_INIT_NOTIFIER_HEAD(name) do { \ init_rwsem(&(name)->rwsem); \ (name)->head = NULL; \ } while (0) #define RAW_INIT_NOTIFIER_HEAD(name) do { \ (name)->head = NULL; \ } while (0) /* srcu_notifier_heads must be cleaned up dynamically */ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh); #define srcu_cleanup_notifier_head(name) \ cleanup_srcu_struct(&(name)->srcu); #define ATOMIC_NOTIFIER_INIT(name) { \ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ .head = NULL } #define BLOCKING_NOTIFIER_INIT(name) { \ .rwsem = __RWSEM_INITIALIZER((name).rwsem), \ .head = NULL } #define RAW_NOTIFIER_INIT(name) { \ .head = NULL } #define SRCU_NOTIFIER_INIT(name, pcpu) \ { \ .mutex = __MUTEX_INITIALIZER(name.mutex), \ .head = NULL, \ .srcu = __SRCU_STRUCT_INIT(name.srcu, pcpu), \ } #define ATOMIC_NOTIFIER_HEAD(name) \ struct atomic_notifier_head name = \ ATOMIC_NOTIFIER_INIT(name) #define BLOCKING_NOTIFIER_HEAD(name) \ struct blocking_notifier_head name = \ BLOCKING_NOTIFIER_INIT(name) #define RAW_NOTIFIER_HEAD(name) \ struct raw_notifier_head name = \ RAW_NOTIFIER_INIT(name) #ifdef CONFIG_TREE_SRCU #define _SRCU_NOTIFIER_HEAD(name, mod) \ static DEFINE_PER_CPU(struct srcu_data, name##_head_srcu_data); \ mod struct srcu_notifier_head name = \ SRCU_NOTIFIER_INIT(name, name##_head_srcu_data) #else #define _SRCU_NOTIFIER_HEAD(name, mod) \ mod struct srcu_notifier_head name = \ SRCU_NOTIFIER_INIT(name, name) #endif #define SRCU_NOTIFIER_HEAD(name) \ _SRCU_NOTIFIER_HEAD(name, /* not static */) #define SRCU_NOTIFIER_HEAD_STATIC(name) \ _SRCU_NOTIFIER_HEAD(name, static) #ifdef __KERNEL__ extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh, struct notifier_block *nb); extern int blocking_notifier_chain_register(struct blocking_notifier_head *nh, struct notifier_block *nb); extern int raw_notifier_chain_register(struct raw_notifier_head *nh, struct notifier_block *nb); extern int srcu_notifier_chain_register(struct srcu_notifier_head *nh, struct notifier_block *nb); extern int blocking_notifier_chain_cond_register( struct blocking_notifier_head *nh, struct notifier_block *nb); extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh, struct notifier_block *nb); extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh, struct notifier_block *nb); extern int raw_notifier_chain_unregister(struct raw_notifier_head *nh, struct notifier_block *nb); extern int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh, struct notifier_block *nb); extern int atomic_notifier_call_chain(struct atomic_notifier_head *nh, unsigned long val, void *v); extern int __atomic_notifier_call_chain(struct atomic_notifier_head *nh, unsigned long val, void *v, int nr_to_call, int *nr_calls); extern int blocking_notifier_call_chain(struct blocking_notifier_head *nh, unsigned long val, void *v); extern int __blocking_notifier_call_chain(struct blocking_notifier_head *nh, unsigned long val, void *v, int nr_to_call, int *nr_calls); extern int raw_notifier_call_chain(struct raw_notifier_head *nh, unsigned long val, void *v); extern int __raw_notifier_call_chain(struct raw_notifier_head *nh, unsigned long val, void *v, int nr_to_call, int *nr_calls); extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh, unsigned long val, void *v); extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh, unsigned long val, void *v, int nr_to_call, int *nr_calls); #define NOTIFY_DONE 0x0000 /* Don't care */ #define NOTIFY_OK 0x0001 /* Suits me */ #define NOTIFY_STOP_MASK 0x8000 /* Don't call further */ #define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002) /* Bad/Veto action */ /* * Clean way to return from the notifier and stop further calls. */ #define NOTIFY_STOP (NOTIFY_OK|NOTIFY_STOP_MASK) /* Encapsulate (negative) errno value (in particular, NOTIFY_BAD <=> EPERM). */ static inline int notifier_from_errno(int err) { if (err) return NOTIFY_STOP_MASK | (NOTIFY_OK - err); return NOTIFY_OK; } /* Restore (negative) errno value from notify return value. */ static inline int notifier_to_errno(int ret) { ret &= ~NOTIFY_STOP_MASK; return ret > NOTIFY_OK ? NOTIFY_OK - ret : 0; } /* * Declared notifiers so far. I can imagine quite a few more chains * over time (eg laptop power reset chains, reboot chain (to clean * device units up), device [un]mount chain, module load/unload chain, * low memory chain, screenblank chain (for plug in modular screenblankers) * VC switch chains (for loadable kernel svgalib VC switch helpers) etc... */ /* CPU notfiers are defined in include/linux/cpu.h. */ /* netdevice notifiers are defined in include/linux/netdevice.h */ /* reboot notifiers are defined in include/linux/reboot.h. */ /* Hibernation and suspend events are defined in include/linux/suspend.h. */ /* Virtual Terminal events are defined in include/linux/vt.h. */ #define NETLINK_URELEASE 0x0001 /* Unicast netlink socket released */ /* Console keyboard events. * Note: KBD_KEYCODE is always sent before KBD_UNBOUND_KEYCODE, KBD_UNICODE and * KBD_KEYSYM. */ #define KBD_KEYCODE 0x0001 /* Keyboard keycode, called before any other */ #define KBD_UNBOUND_KEYCODE 0x0002 /* Keyboard keycode which is not bound to any other */ #define KBD_UNICODE 0x0003 /* Keyboard unicode */ #define KBD_KEYSYM 0x0004 /* Keyboard keysym */ #define KBD_POST_KEYSYM 0x0005 /* Called after keyboard keysym interpretation */ extern struct blocking_notifier_head reboot_notifier_list; #endif /* __KERNEL__ */ #endif /* _LINUX_NOTIFIER_H */ sed-opal.h 0000644 00000003177 14722070374 0006437 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright © 2016 Intel Corporation * * Authors: * Rafael Antognolli <rafael.antognolli@intel.com> * Scott Bauer <scott.bauer@intel.com> */ #ifndef LINUX_OPAL_H #define LINUX_OPAL_H #include <uapi/linux/sed-opal.h> #include <linux/kernel.h> struct opal_dev; typedef int (sec_send_recv)(void *data, u16 spsp, u8 secp, void *buffer, size_t len, bool send); #ifdef CONFIG_BLK_SED_OPAL void free_opal_dev(struct opal_dev *dev); bool opal_unlock_from_suspend(struct opal_dev *dev); struct opal_dev *init_opal_dev(void *data, sec_send_recv *send_recv); int sed_ioctl(struct opal_dev *dev, unsigned int cmd, void __user *ioctl_ptr); static inline bool is_sed_ioctl(unsigned int cmd) { switch (cmd) { case IOC_OPAL_SAVE: case IOC_OPAL_LOCK_UNLOCK: case IOC_OPAL_TAKE_OWNERSHIP: case IOC_OPAL_ACTIVATE_LSP: case IOC_OPAL_SET_PW: case IOC_OPAL_ACTIVATE_USR: case IOC_OPAL_REVERT_TPR: case IOC_OPAL_LR_SETUP: case IOC_OPAL_ADD_USR_TO_LR: case IOC_OPAL_ENABLE_DISABLE_MBR: case IOC_OPAL_ERASE_LR: case IOC_OPAL_SECURE_ERASE_LR: case IOC_OPAL_PSID_REVERT_TPR: case IOC_OPAL_MBR_DONE: case IOC_OPAL_WRITE_SHADOW_MBR: return true; } return false; } #else static inline void free_opal_dev(struct opal_dev *dev) { } static inline bool is_sed_ioctl(unsigned int cmd) { return false; } static inline int sed_ioctl(struct opal_dev *dev, unsigned int cmd, void __user *ioctl_ptr) { return 0; } static inline bool opal_unlock_from_suspend(struct opal_dev *dev) { return false; } #define init_opal_dev(data, send_recv) NULL #endif /* CONFIG_BLK_SED_OPAL */ #endif /* LINUX_OPAL_H */ prime_numbers.h 0000644 00000002550 14722070374 0007574 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_PRIME_NUMBERS_H #define __LINUX_PRIME_NUMBERS_H #include <linux/types.h> bool is_prime_number(unsigned long x); unsigned long next_prime_number(unsigned long x); /** * for_each_prime_number - iterate over each prime upto a value * @prime: the current prime number in this iteration * @max: the upper limit * * Starting from the first prime number 2 iterate over each prime number up to * the @max value. On each iteration, @prime is set to the current prime number. * @max should be less than ULONG_MAX to ensure termination. To begin with * @prime set to 1 on the first iteration use for_each_prime_number_from() * instead. */ #define for_each_prime_number(prime, max) \ for_each_prime_number_from((prime), 2, (max)) /** * for_each_prime_number_from - iterate over each prime upto a value * @prime: the current prime number in this iteration * @from: the initial value * @max: the upper limit * * Starting from @from iterate over each successive prime number up to the * @max value. On each iteration, @prime is set to the current prime number. * @max should be less than ULONG_MAX, and @from less than @max, to ensure * termination. */ #define for_each_prime_number_from(prime, from, max) \ for (prime = (from); prime <= (max); prime = next_prime_number(prime)) #endif /* !__LINUX_PRIME_NUMBERS_H */ userfaultfd_k.h 0000644 00000007305 14722070374 0007566 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * include/linux/userfaultfd_k.h * * Copyright (C) 2015 Red Hat, Inc. * */ #ifndef _LINUX_USERFAULTFD_K_H #define _LINUX_USERFAULTFD_K_H #ifdef CONFIG_USERFAULTFD #include <linux/userfaultfd.h> /* linux/include/uapi/linux/userfaultfd.h */ #include <linux/fcntl.h> /* * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining * new flags, since they might collide with O_* ones. We want * to re-use O_* flags that couldn't possibly have a meaning * from userfaultfd, in order to leave a free define-space for * shared O_* flags. */ #define UFFD_CLOEXEC O_CLOEXEC #define UFFD_NONBLOCK O_NONBLOCK #define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK) #define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS) extern int sysctl_unprivileged_userfaultfd; extern vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason); extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long src_start, unsigned long len, bool *mmap_changing); extern ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long len, bool *mmap_changing); /* mm helpers */ static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, struct vm_userfaultfd_ctx vm_ctx) { return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx; } static inline bool userfaultfd_missing(struct vm_area_struct *vma) { return vma->vm_flags & VM_UFFD_MISSING; } static inline bool userfaultfd_armed(struct vm_area_struct *vma) { return vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP); } extern int dup_userfaultfd(struct vm_area_struct *, struct list_head *); extern void dup_userfaultfd_complete(struct list_head *); extern void mremap_userfaultfd_prep(struct vm_area_struct *, struct vm_userfaultfd_ctx *); extern void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *, unsigned long from, unsigned long to, unsigned long len); extern bool userfaultfd_remove(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern int userfaultfd_unmap_prep(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct list_head *uf); extern void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf); #else /* CONFIG_USERFAULTFD */ /* mm helpers */ static inline vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason) { return VM_FAULT_SIGBUS; } static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, struct vm_userfaultfd_ctx vm_ctx) { return true; } static inline bool userfaultfd_missing(struct vm_area_struct *vma) { return false; } static inline bool userfaultfd_armed(struct vm_area_struct *vma) { return false; } static inline int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *l) { return 0; } static inline void dup_userfaultfd_complete(struct list_head *l) { } static inline void mremap_userfaultfd_prep(struct vm_area_struct *vma, struct vm_userfaultfd_ctx *ctx) { } static inline void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *ctx, unsigned long from, unsigned long to, unsigned long len) { } static inline bool userfaultfd_remove(struct vm_area_struct *vma, unsigned long start, unsigned long end) { return true; } static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct list_head *uf) { return 0; } static inline void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf) { } #endif /* CONFIG_USERFAULTFD */ #endif /* _LINUX_USERFAULTFD_K_H */ cpumask.h 0000644 00000064535 14722070374 0006403 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_CPUMASK_H #define __LINUX_CPUMASK_H /* * Cpumasks provide a bitmap suitable for representing the * set of CPU's in a system, one bit position per CPU number. In general, * only nr_cpu_ids (<= NR_CPUS) bits are valid. */ #include <linux/kernel.h> #include <linux/threads.h> #include <linux/bitmap.h> #include <linux/atomic.h> #include <linux/bug.h> /* Don't assign or return these: may not be this big! */ typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; /** * cpumask_bits - get the bits in a cpumask * @maskp: the struct cpumask * * * You should only assume nr_cpu_ids bits of this mask are valid. This is * a macro so it's const-correct. */ #define cpumask_bits(maskp) ((maskp)->bits) /** * cpumask_pr_args - printf args to output a cpumask * @maskp: cpumask to be printed * * Can be used to provide arguments for '%*pb[l]' when printing a cpumask. */ #define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp) #if NR_CPUS == 1 #define nr_cpu_ids 1U #else extern unsigned int nr_cpu_ids; #endif #ifdef CONFIG_CPUMASK_OFFSTACK /* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also, * not all bits may be allocated. */ #define nr_cpumask_bits nr_cpu_ids #else #define nr_cpumask_bits ((unsigned int)NR_CPUS) #endif /* * The following particular system cpumasks and operations manage * possible, present, active and online cpus. * * cpu_possible_mask- has bit 'cpu' set iff cpu is populatable * cpu_present_mask - has bit 'cpu' set iff cpu is populated * cpu_online_mask - has bit 'cpu' set iff cpu available to scheduler * cpu_active_mask - has bit 'cpu' set iff cpu available to migration * * If !CONFIG_HOTPLUG_CPU, present == possible, and active == online. * * The cpu_possible_mask is fixed at boot time, as the set of CPU id's * that it is possible might ever be plugged in at anytime during the * life of that system boot. The cpu_present_mask is dynamic(*), * representing which CPUs are currently plugged in. And * cpu_online_mask is the dynamic subset of cpu_present_mask, * indicating those CPUs available for scheduling. * * If HOTPLUG is enabled, then cpu_possible_mask is forced to have * all NR_CPUS bits set, otherwise it is just the set of CPUs that * ACPI reports present at boot. * * If HOTPLUG is enabled, then cpu_present_mask varies dynamically, * depending on what ACPI reports as currently plugged in, otherwise * cpu_present_mask is just a copy of cpu_possible_mask. * * (*) Well, cpu_present_mask is dynamic in the hotplug case. If not * hotplug, it's a copy of cpu_possible_mask, hence fixed at boot. * * Subtleties: * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode * assumption that their single CPU is online. The UP * cpu_{online,possible,present}_masks are placebos. Changing them * will have no useful affect on the following num_*_cpus() * and cpu_*() macros in the UP case. This ugliness is a UP * optimization - don't waste any instructions or memory references * asking if you're online or how many CPUs there are if there is * only one CPU. */ extern struct cpumask __cpu_possible_mask; extern struct cpumask __cpu_online_mask; extern struct cpumask __cpu_present_mask; extern struct cpumask __cpu_active_mask; #define cpu_possible_mask ((const struct cpumask *)&__cpu_possible_mask) #define cpu_online_mask ((const struct cpumask *)&__cpu_online_mask) #define cpu_present_mask ((const struct cpumask *)&__cpu_present_mask) #define cpu_active_mask ((const struct cpumask *)&__cpu_active_mask) extern atomic_t __num_online_cpus; #if NR_CPUS > 1 /** * num_online_cpus() - Read the number of online CPUs * * Despite the fact that __num_online_cpus is of type atomic_t, this * interface gives only a momentary snapshot and is not protected against * concurrent CPU hotplug operations unless invoked from a cpuhp_lock held * region. */ static inline unsigned int num_online_cpus(void) { return atomic_read(&__num_online_cpus); } #define num_possible_cpus() cpumask_weight(cpu_possible_mask) #define num_present_cpus() cpumask_weight(cpu_present_mask) #define num_active_cpus() cpumask_weight(cpu_active_mask) #define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask) #define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask) #define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask) #define cpu_active(cpu) cpumask_test_cpu((cpu), cpu_active_mask) #else #define num_online_cpus() 1U #define num_possible_cpus() 1U #define num_present_cpus() 1U #define num_active_cpus() 1U #define cpu_online(cpu) ((cpu) == 0) #define cpu_possible(cpu) ((cpu) == 0) #define cpu_present(cpu) ((cpu) == 0) #define cpu_active(cpu) ((cpu) == 0) #endif extern cpumask_t cpus_booted_once_mask; static inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits) { #ifdef CONFIG_DEBUG_PER_CPU_MAPS WARN_ON_ONCE(cpu >= bits); #endif /* CONFIG_DEBUG_PER_CPU_MAPS */ } /* verify cpu argument to cpumask_* operators */ static inline unsigned int cpumask_check(unsigned int cpu) { cpu_max_bits_warn(cpu, nr_cpumask_bits); return cpu; } #if NR_CPUS == 1 /* Uniprocessor. Assume all masks are "1". */ static inline unsigned int cpumask_first(const struct cpumask *srcp) { return 0; } static inline unsigned int cpumask_last(const struct cpumask *srcp) { return 0; } /* Valid inputs for n are -1 and 0. */ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp) { return n+1; } static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) { return n+1; } static inline unsigned int cpumask_next_and(int n, const struct cpumask *srcp, const struct cpumask *andp) { return n+1; } static inline unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap) { /* cpu0 unless stop condition, wrap and at cpu0, then nr_cpumask_bits */ return (wrap && n == 0); } /* cpu must be a valid cpu, ie 0, so there's no other choice. */ static inline unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) { return 1; } static inline unsigned int cpumask_local_spread(unsigned int i, int node) { return 0; } #define for_each_cpu(cpu, mask) \ for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) #define for_each_cpu_not(cpu, mask) \ for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) #define for_each_cpu_wrap(cpu, mask, start) \ for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)(start)) #define for_each_cpu_and(cpu, mask1, mask2) \ for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask1, (void)mask2) #else /** * cpumask_first - get the first cpu in a cpumask * @srcp: the cpumask pointer * * Returns >= nr_cpu_ids if no cpus set. */ static inline unsigned int cpumask_first(const struct cpumask *srcp) { return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits); } /** * cpumask_last - get the last CPU in a cpumask * @srcp: - the cpumask pointer * * Returns >= nr_cpumask_bits if no CPUs set. */ static inline unsigned int cpumask_last(const struct cpumask *srcp) { return find_last_bit(cpumask_bits(srcp), nr_cpumask_bits); } unsigned int cpumask_next(int n, const struct cpumask *srcp); /** * cpumask_next_zero - get the next unset cpu in a cpumask * @n: the cpu prior to the place to search (ie. return will be > @n) * @srcp: the cpumask pointer * * Returns >= nr_cpu_ids if no further cpus unset. */ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) { /* -1 is a legal arg here. */ if (n != -1) cpumask_check(n); return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1); } int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *); int cpumask_any_but(const struct cpumask *mask, unsigned int cpu); unsigned int cpumask_local_spread(unsigned int i, int node); /** * for_each_cpu - iterate over every cpu in a mask * @cpu: the (optionally unsigned) integer iterator * @mask: the cpumask pointer * * After the loop, cpu is >= nr_cpu_ids. */ #define for_each_cpu(cpu, mask) \ for ((cpu) = -1; \ (cpu) = cpumask_next((cpu), (mask)), \ (cpu) < nr_cpu_ids;) /** * for_each_cpu_not - iterate over every cpu in a complemented mask * @cpu: the (optionally unsigned) integer iterator * @mask: the cpumask pointer * * After the loop, cpu is >= nr_cpu_ids. */ #define for_each_cpu_not(cpu, mask) \ for ((cpu) = -1; \ (cpu) = cpumask_next_zero((cpu), (mask)), \ (cpu) < nr_cpu_ids;) extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap); /** * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location * @cpu: the (optionally unsigned) integer iterator * @mask: the cpumask poiter * @start: the start location * * The implementation does not assume any bit in @mask is set (including @start). * * After the loop, cpu is >= nr_cpu_ids. */ #define for_each_cpu_wrap(cpu, mask, start) \ for ((cpu) = cpumask_next_wrap((start)-1, (mask), (start), false); \ (cpu) < nr_cpumask_bits; \ (cpu) = cpumask_next_wrap((cpu), (mask), (start), true)) /** * for_each_cpu_and - iterate over every cpu in both masks * @cpu: the (optionally unsigned) integer iterator * @mask1: the first cpumask pointer * @mask2: the second cpumask pointer * * This saves a temporary CPU mask in many places. It is equivalent to: * struct cpumask tmp; * cpumask_and(&tmp, &mask1, &mask2); * for_each_cpu(cpu, &tmp) * ... * * After the loop, cpu is >= nr_cpu_ids. */ #define for_each_cpu_and(cpu, mask1, mask2) \ for ((cpu) = -1; \ (cpu) = cpumask_next_and((cpu), (mask1), (mask2)), \ (cpu) < nr_cpu_ids;) #endif /* SMP */ #define CPU_BITS_NONE \ { \ [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \ } #define CPU_BITS_CPU0 \ { \ [0] = 1UL \ } /** * cpumask_set_cpu - set a cpu in a cpumask * @cpu: cpu number (< nr_cpu_ids) * @dstp: the cpumask pointer */ static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) { set_bit(cpumask_check(cpu), cpumask_bits(dstp)); } static inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) { __set_bit(cpumask_check(cpu), cpumask_bits(dstp)); } /** * cpumask_clear_cpu - clear a cpu in a cpumask * @cpu: cpu number (< nr_cpu_ids) * @dstp: the cpumask pointer */ static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp) { clear_bit(cpumask_check(cpu), cpumask_bits(dstp)); } static inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp) { __clear_bit(cpumask_check(cpu), cpumask_bits(dstp)); } /** * cpumask_test_cpu - test for a cpu in a cpumask * @cpu: cpu number (< nr_cpu_ids) * @cpumask: the cpumask pointer * * Returns 1 if @cpu is set in @cpumask, else returns 0 */ static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask) { return test_bit(cpumask_check(cpu), cpumask_bits((cpumask))); } /** * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask * @cpu: cpu number (< nr_cpu_ids) * @cpumask: the cpumask pointer * * Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0 * * test_and_set_bit wrapper for cpumasks. */ static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask) { return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask)); } /** * cpumask_test_and_clear_cpu - atomically test and clear a cpu in a cpumask * @cpu: cpu number (< nr_cpu_ids) * @cpumask: the cpumask pointer * * Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0 * * test_and_clear_bit wrapper for cpumasks. */ static inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask) { return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask)); } /** * cpumask_setall - set all cpus (< nr_cpu_ids) in a cpumask * @dstp: the cpumask pointer */ static inline void cpumask_setall(struct cpumask *dstp) { bitmap_fill(cpumask_bits(dstp), nr_cpumask_bits); } /** * cpumask_clear - clear all cpus (< nr_cpu_ids) in a cpumask * @dstp: the cpumask pointer */ static inline void cpumask_clear(struct cpumask *dstp) { bitmap_zero(cpumask_bits(dstp), nr_cpumask_bits); } /** * cpumask_and - *dstp = *src1p & *src2p * @dstp: the cpumask result * @src1p: the first input * @src2p: the second input * * If *@dstp is empty, returns 0, else returns 1 */ static inline int cpumask_and(struct cpumask *dstp, const struct cpumask *src1p, const struct cpumask *src2p) { return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p), cpumask_bits(src2p), nr_cpumask_bits); } /** * cpumask_or - *dstp = *src1p | *src2p * @dstp: the cpumask result * @src1p: the first input * @src2p: the second input */ static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p, const struct cpumask *src2p) { bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p), cpumask_bits(src2p), nr_cpumask_bits); } /** * cpumask_xor - *dstp = *src1p ^ *src2p * @dstp: the cpumask result * @src1p: the first input * @src2p: the second input */ static inline void cpumask_xor(struct cpumask *dstp, const struct cpumask *src1p, const struct cpumask *src2p) { bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p), cpumask_bits(src2p), nr_cpumask_bits); } /** * cpumask_andnot - *dstp = *src1p & ~*src2p * @dstp: the cpumask result * @src1p: the first input * @src2p: the second input * * If *@dstp is empty, returns 0, else returns 1 */ static inline int cpumask_andnot(struct cpumask *dstp, const struct cpumask *src1p, const struct cpumask *src2p) { return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p), cpumask_bits(src2p), nr_cpumask_bits); } /** * cpumask_complement - *dstp = ~*srcp * @dstp: the cpumask result * @srcp: the input to invert */ static inline void cpumask_complement(struct cpumask *dstp, const struct cpumask *srcp) { bitmap_complement(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits); } /** * cpumask_equal - *src1p == *src2p * @src1p: the first input * @src2p: the second input */ static inline bool cpumask_equal(const struct cpumask *src1p, const struct cpumask *src2p) { return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p), nr_cpumask_bits); } /** * cpumask_or_equal - *src1p | *src2p == *src3p * @src1p: the first input * @src2p: the second input * @src3p: the third input */ static inline bool cpumask_or_equal(const struct cpumask *src1p, const struct cpumask *src2p, const struct cpumask *src3p) { return bitmap_or_equal(cpumask_bits(src1p), cpumask_bits(src2p), cpumask_bits(src3p), nr_cpumask_bits); } /** * cpumask_intersects - (*src1p & *src2p) != 0 * @src1p: the first input * @src2p: the second input */ static inline bool cpumask_intersects(const struct cpumask *src1p, const struct cpumask *src2p) { return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p), nr_cpumask_bits); } /** * cpumask_subset - (*src1p & ~*src2p) == 0 * @src1p: the first input * @src2p: the second input * * Returns 1 if *@src1p is a subset of *@src2p, else returns 0 */ static inline int cpumask_subset(const struct cpumask *src1p, const struct cpumask *src2p) { return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p), nr_cpumask_bits); } /** * cpumask_empty - *srcp == 0 * @srcp: the cpumask to that all cpus < nr_cpu_ids are clear. */ static inline bool cpumask_empty(const struct cpumask *srcp) { return bitmap_empty(cpumask_bits(srcp), nr_cpumask_bits); } /** * cpumask_full - *srcp == 0xFFFFFFFF... * @srcp: the cpumask to that all cpus < nr_cpu_ids are set. */ static inline bool cpumask_full(const struct cpumask *srcp) { return bitmap_full(cpumask_bits(srcp), nr_cpumask_bits); } /** * cpumask_weight - Count of bits in *srcp * @srcp: the cpumask to count bits (< nr_cpu_ids) in. */ static inline unsigned int cpumask_weight(const struct cpumask *srcp) { return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits); } /** * cpumask_shift_right - *dstp = *srcp >> n * @dstp: the cpumask result * @srcp: the input to shift * @n: the number of bits to shift by */ static inline void cpumask_shift_right(struct cpumask *dstp, const struct cpumask *srcp, int n) { bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n, nr_cpumask_bits); } /** * cpumask_shift_left - *dstp = *srcp << n * @dstp: the cpumask result * @srcp: the input to shift * @n: the number of bits to shift by */ static inline void cpumask_shift_left(struct cpumask *dstp, const struct cpumask *srcp, int n) { bitmap_shift_left(cpumask_bits(dstp), cpumask_bits(srcp), n, nr_cpumask_bits); } /** * cpumask_copy - *dstp = *srcp * @dstp: the result * @srcp: the input cpumask */ static inline void cpumask_copy(struct cpumask *dstp, const struct cpumask *srcp) { bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits); } /** * cpumask_any - pick a "random" cpu from *srcp * @srcp: the input cpumask * * Returns >= nr_cpu_ids if no cpus set. */ #define cpumask_any(srcp) cpumask_first(srcp) /** * cpumask_first_and - return the first cpu from *srcp1 & *srcp2 * @src1p: the first input * @src2p: the second input * * Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and(). */ #define cpumask_first_and(src1p, src2p) cpumask_next_and(-1, (src1p), (src2p)) /** * cpumask_any_and - pick a "random" cpu from *mask1 & *mask2 * @mask1: the first input cpumask * @mask2: the second input cpumask * * Returns >= nr_cpu_ids if no cpus set. */ #define cpumask_any_and(mask1, mask2) cpumask_first_and((mask1), (mask2)) /** * cpumask_of - the cpumask containing just a given cpu * @cpu: the cpu (<= nr_cpu_ids) */ #define cpumask_of(cpu) (get_cpu_mask(cpu)) /** * cpumask_parse_user - extract a cpumask from a user string * @buf: the buffer to extract from * @len: the length of the buffer * @dstp: the cpumask to set. * * Returns -errno, or 0 for success. */ static inline int cpumask_parse_user(const char __user *buf, int len, struct cpumask *dstp) { return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits); } /** * cpumask_parselist_user - extract a cpumask from a user string * @buf: the buffer to extract from * @len: the length of the buffer * @dstp: the cpumask to set. * * Returns -errno, or 0 for success. */ static inline int cpumask_parselist_user(const char __user *buf, int len, struct cpumask *dstp) { return bitmap_parselist_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits); } /** * cpumask_parse - extract a cpumask from a string * @buf: the buffer to extract from * @dstp: the cpumask to set. * * Returns -errno, or 0 for success. */ static inline int cpumask_parse(const char *buf, struct cpumask *dstp) { unsigned int len = strchrnul(buf, '\n') - buf; return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits); } /** * cpulist_parse - extract a cpumask from a user string of ranges * @buf: the buffer to extract from * @dstp: the cpumask to set. * * Returns -errno, or 0 for success. */ static inline int cpulist_parse(const char *buf, struct cpumask *dstp) { return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits); } /** * cpumask_size - size to allocate for a 'struct cpumask' in bytes */ static inline unsigned int cpumask_size(void) { return bitmap_size(nr_cpumask_bits); } /* * cpumask_var_t: struct cpumask for stack usage. * * Oh, the wicked games we play! In order to make kernel coding a * little more difficult, we typedef cpumask_var_t to an array or a * pointer: doing &mask on an array is a noop, so it still works. * * ie. * cpumask_var_t tmpmask; * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL)) * return -ENOMEM; * * ... use 'tmpmask' like a normal struct cpumask * ... * * free_cpumask_var(tmpmask); * * * However, one notable exception is there. alloc_cpumask_var() allocates * only nr_cpumask_bits bits (in the other hand, real cpumask_t always has * NR_CPUS bits). Therefore you don't have to dereference cpumask_var_t. * * cpumask_var_t tmpmask; * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL)) * return -ENOMEM; * * var = *tmpmask; * * This code makes NR_CPUS length memcopy and brings to a memory corruption. * cpumask_copy() provide safe copy functionality. * * Note that there is another evil here: If you define a cpumask_var_t * as a percpu variable then the way to obtain the address of the cpumask * structure differently influences what this_cpu_* operation needs to be * used. Please use this_cpu_cpumask_var_t in those cases. The direct use * of this_cpu_ptr() or this_cpu_read() will lead to failures when the * other type of cpumask_var_t implementation is configured. * * Please also note that __cpumask_var_read_mostly can be used to declare * a cpumask_var_t variable itself (not its content) as read mostly. */ #ifdef CONFIG_CPUMASK_OFFSTACK typedef struct cpumask *cpumask_var_t; #define this_cpu_cpumask_var_ptr(x) this_cpu_read(x) #define __cpumask_var_read_mostly __read_mostly bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); void alloc_bootmem_cpumask_var(cpumask_var_t *mask); void free_cpumask_var(cpumask_var_t mask); void free_bootmem_cpumask_var(cpumask_var_t mask); static inline bool cpumask_available(cpumask_var_t mask) { return mask != NULL; } #else typedef struct cpumask cpumask_var_t[1]; #define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x) #define __cpumask_var_read_mostly static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) { return true; } static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) { return true; } static inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) { cpumask_clear(*mask); return true; } static inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) { cpumask_clear(*mask); return true; } static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask) { } static inline void free_cpumask_var(cpumask_var_t mask) { } static inline void free_bootmem_cpumask_var(cpumask_var_t mask) { } static inline bool cpumask_available(cpumask_var_t mask) { return true; } #endif /* CONFIG_CPUMASK_OFFSTACK */ /* It's common to want to use cpu_all_mask in struct member initializers, * so it has to refer to an address rather than a pointer. */ extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS); #define cpu_all_mask to_cpumask(cpu_all_bits) /* First bits of cpu_bit_bitmap are in fact unset. */ #define cpu_none_mask to_cpumask(cpu_bit_bitmap[0]) #define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask) #define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask) #define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask) /* Wrappers for arch boot code to manipulate normally-constant masks */ void init_cpu_present(const struct cpumask *src); void init_cpu_possible(const struct cpumask *src); void init_cpu_online(const struct cpumask *src); static inline void reset_cpu_possible_mask(void) { bitmap_zero(cpumask_bits(&__cpu_possible_mask), NR_CPUS); } static inline void set_cpu_possible(unsigned int cpu, bool possible) { if (possible) cpumask_set_cpu(cpu, &__cpu_possible_mask); else cpumask_clear_cpu(cpu, &__cpu_possible_mask); } static inline void set_cpu_present(unsigned int cpu, bool present) { if (present) cpumask_set_cpu(cpu, &__cpu_present_mask); else cpumask_clear_cpu(cpu, &__cpu_present_mask); } void set_cpu_online(unsigned int cpu, bool online); static inline void set_cpu_active(unsigned int cpu, bool active) { if (active) cpumask_set_cpu(cpu, &__cpu_active_mask); else cpumask_clear_cpu(cpu, &__cpu_active_mask); } /** * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * * @bitmap: the bitmap * * There are a few places where cpumask_var_t isn't appropriate and * static cpumasks must be used (eg. very early boot), yet we don't * expose the definition of 'struct cpumask'. * * This does the conversion, and can be used as a constant initializer. */ #define to_cpumask(bitmap) \ ((struct cpumask *)(1 ? (bitmap) \ : (void *)sizeof(__check_is_bitmap(bitmap)))) static inline int __check_is_bitmap(const unsigned long *bitmap) { return 1; } /* * Special-case data structure for "single bit set only" constant CPU masks. * * We pre-generate all the 64 (or 32) possible bit positions, with enough * padding to the left and the right, and return the constant pointer * appropriately offset. */ extern const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)]; static inline const struct cpumask *get_cpu_mask(unsigned int cpu) { const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG]; p -= cpu / BITS_PER_LONG; return to_cpumask(p); } #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) #if NR_CPUS <= BITS_PER_LONG #define CPU_BITS_ALL \ { \ [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \ } #else /* NR_CPUS > BITS_PER_LONG */ #define CPU_BITS_ALL \ { \ [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \ } #endif /* NR_CPUS > BITS_PER_LONG */ /** * cpumap_print_to_pagebuf - copies the cpumask into the buffer either * as comma-separated list of cpus or hex values of cpumask * @list: indicates whether the cpumap must be list * @mask: the cpumask to copy * @buf: the buffer to copy into * * Returns the length of the (null-terminated) @buf string, zero if * nothing is copied. */ static inline ssize_t cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask) { return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask), nr_cpu_ids); } #if NR_CPUS <= BITS_PER_LONG #define CPU_MASK_ALL \ (cpumask_t) { { \ [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \ } } #else #define CPU_MASK_ALL \ (cpumask_t) { { \ [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \ } } #endif /* NR_CPUS > BITS_PER_LONG */ #define CPU_MASK_NONE \ (cpumask_t) { { \ [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \ } } #define CPU_MASK_CPU0 \ (cpumask_t) { { \ [0] = 1UL \ } } #endif /* __LINUX_CPUMASK_H */ armada-37xx-rwtm-mailbox.h 0000644 00000000660 14722070374 0011401 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0+ */ /* * rWTM BIU Mailbox driver for Armada 37xx * * Author: Marek Behun <marek.behun@nic.cz> */ #ifndef _LINUX_ARMADA_37XX_RWTM_MAILBOX_H_ #define _LINUX_ARMADA_37XX_RWTM_MAILBOX_H_ #include <linux/types.h> struct armada_37xx_rwtm_tx_msg { u16 command; u32 args[16]; }; struct armada_37xx_rwtm_rx_msg { u32 retval; u32 status[16]; }; #endif /* _LINUX_ARMADA_37XX_RWTM_MAILBOX_H_ */ ccp.h 0000644 00000044167 14722070374 0005504 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * AMD Cryptographic Coprocessor (CCP) driver * * Copyright (C) 2013,2017 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <thomas.lendacky@amd.com> * Author: Gary R Hook <gary.hook@amd.com> */ #ifndef __CCP_H__ #define __CCP_H__ #include <linux/scatterlist.h> #include <linux/workqueue.h> #include <linux/list.h> #include <crypto/aes.h> #include <crypto/sha.h> struct ccp_device; struct ccp_cmd; #if defined(CONFIG_CRYPTO_DEV_SP_CCP) /** * ccp_present - check if a CCP device is present * * Returns zero if a CCP device is present, -ENODEV otherwise. */ int ccp_present(void); #define CCP_VSIZE 16 #define CCP_VMASK ((unsigned int)((1 << CCP_VSIZE) - 1)) #define CCP_VERSION(v, r) ((unsigned int)((v << CCP_VSIZE) \ | (r & CCP_VMASK))) /** * ccp_version - get the version of the CCP * * Returns a positive version number, or zero if no CCP */ unsigned int ccp_version(void); /** * ccp_enqueue_cmd - queue an operation for processing by the CCP * * @cmd: ccp_cmd struct to be processed * * Refer to the ccp_cmd struct below for required fields. * * Queue a cmd to be processed by the CCP. If queueing the cmd * would exceed the defined length of the cmd queue the cmd will * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will * result in a return code of -EBUSY. * * The callback routine specified in the ccp_cmd struct will be * called to notify the caller of completion (if the cmd was not * backlogged) or advancement out of the backlog. If the cmd has * advanced out of the backlog the "err" value of the callback * will be -EINPROGRESS. Any other "err" value during callback is * the result of the operation. * * The cmd has been successfully queued if: * the return code is -EINPROGRESS or * the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set */ int ccp_enqueue_cmd(struct ccp_cmd *cmd); #else /* CONFIG_CRYPTO_DEV_CCP_SP_DEV is not enabled */ static inline int ccp_present(void) { return -ENODEV; } static inline unsigned int ccp_version(void) { return 0; } static inline int ccp_enqueue_cmd(struct ccp_cmd *cmd) { return -ENODEV; } #endif /* CONFIG_CRYPTO_DEV_SP_CCP */ /***** AES engine *****/ /** * ccp_aes_type - AES key size * * @CCP_AES_TYPE_128: 128-bit key * @CCP_AES_TYPE_192: 192-bit key * @CCP_AES_TYPE_256: 256-bit key */ enum ccp_aes_type { CCP_AES_TYPE_128 = 0, CCP_AES_TYPE_192, CCP_AES_TYPE_256, CCP_AES_TYPE__LAST, }; /** * ccp_aes_mode - AES operation mode * * @CCP_AES_MODE_ECB: ECB mode * @CCP_AES_MODE_CBC: CBC mode * @CCP_AES_MODE_OFB: OFB mode * @CCP_AES_MODE_CFB: CFB mode * @CCP_AES_MODE_CTR: CTR mode * @CCP_AES_MODE_CMAC: CMAC mode */ enum ccp_aes_mode { CCP_AES_MODE_ECB = 0, CCP_AES_MODE_CBC, CCP_AES_MODE_OFB, CCP_AES_MODE_CFB, CCP_AES_MODE_CTR, CCP_AES_MODE_CMAC, CCP_AES_MODE_GHASH, CCP_AES_MODE_GCTR, CCP_AES_MODE_GCM, CCP_AES_MODE_GMAC, CCP_AES_MODE__LAST, }; /** * ccp_aes_mode - AES operation mode * * @CCP_AES_ACTION_DECRYPT: AES decrypt operation * @CCP_AES_ACTION_ENCRYPT: AES encrypt operation */ enum ccp_aes_action { CCP_AES_ACTION_DECRYPT = 0, CCP_AES_ACTION_ENCRYPT, CCP_AES_ACTION__LAST, }; /* Overloaded field */ #define CCP_AES_GHASHAAD CCP_AES_ACTION_DECRYPT #define CCP_AES_GHASHFINAL CCP_AES_ACTION_ENCRYPT /** * struct ccp_aes_engine - CCP AES operation * @type: AES operation key size * @mode: AES operation mode * @action: AES operation (decrypt/encrypt) * @key: key to be used for this AES operation * @key_len: length in bytes of key * @iv: IV to be used for this AES operation * @iv_len: length in bytes of iv * @src: data to be used for this operation * @dst: data produced by this operation * @src_len: length in bytes of data used for this operation * @cmac_final: indicates final operation when running in CMAC mode * @cmac_key: K1/K2 key used in final CMAC operation * @cmac_key_len: length in bytes of cmac_key * * Variables required to be set when calling ccp_enqueue_cmd(): * - type, mode, action, key, key_len, src, dst, src_len * - iv, iv_len for any mode other than ECB * - cmac_final for CMAC mode * - cmac_key, cmac_key_len for CMAC mode if cmac_final is non-zero * * The iv variable is used as both input and output. On completion of the * AES operation the new IV overwrites the old IV. */ struct ccp_aes_engine { enum ccp_aes_type type; enum ccp_aes_mode mode; enum ccp_aes_action action; u32 authsize; struct scatterlist *key; u32 key_len; /* In bytes */ struct scatterlist *iv; u32 iv_len; /* In bytes */ struct scatterlist *src, *dst; u64 src_len; /* In bytes */ u32 cmac_final; /* Indicates final cmac cmd */ struct scatterlist *cmac_key; /* K1/K2 cmac key required for * final cmac cmd */ u32 cmac_key_len; /* In bytes */ u32 aad_len; /* In bytes */ }; /***** XTS-AES engine *****/ /** * ccp_xts_aes_unit_size - XTS unit size * * @CCP_XTS_AES_UNIT_SIZE_16: Unit size of 16 bytes * @CCP_XTS_AES_UNIT_SIZE_512: Unit size of 512 bytes * @CCP_XTS_AES_UNIT_SIZE_1024: Unit size of 1024 bytes * @CCP_XTS_AES_UNIT_SIZE_2048: Unit size of 2048 bytes * @CCP_XTS_AES_UNIT_SIZE_4096: Unit size of 4096 bytes */ enum ccp_xts_aes_unit_size { CCP_XTS_AES_UNIT_SIZE_16 = 0, CCP_XTS_AES_UNIT_SIZE_512, CCP_XTS_AES_UNIT_SIZE_1024, CCP_XTS_AES_UNIT_SIZE_2048, CCP_XTS_AES_UNIT_SIZE_4096, CCP_XTS_AES_UNIT_SIZE__LAST, }; /** * struct ccp_xts_aes_engine - CCP XTS AES operation * @action: AES operation (decrypt/encrypt) * @unit_size: unit size of the XTS operation * @key: key to be used for this XTS AES operation * @key_len: length in bytes of key * @iv: IV to be used for this XTS AES operation * @iv_len: length in bytes of iv * @src: data to be used for this operation * @dst: data produced by this operation * @src_len: length in bytes of data used for this operation * @final: indicates final XTS operation * * Variables required to be set when calling ccp_enqueue_cmd(): * - action, unit_size, key, key_len, iv, iv_len, src, dst, src_len, final * * The iv variable is used as both input and output. On completion of the * AES operation the new IV overwrites the old IV. */ struct ccp_xts_aes_engine { enum ccp_aes_type type; enum ccp_aes_action action; enum ccp_xts_aes_unit_size unit_size; struct scatterlist *key; u32 key_len; /* In bytes */ struct scatterlist *iv; u32 iv_len; /* In bytes */ struct scatterlist *src, *dst; u64 src_len; /* In bytes */ u32 final; }; /***** SHA engine *****/ /** * ccp_sha_type - type of SHA operation * * @CCP_SHA_TYPE_1: SHA-1 operation * @CCP_SHA_TYPE_224: SHA-224 operation * @CCP_SHA_TYPE_256: SHA-256 operation */ enum ccp_sha_type { CCP_SHA_TYPE_1 = 1, CCP_SHA_TYPE_224, CCP_SHA_TYPE_256, CCP_SHA_TYPE_384, CCP_SHA_TYPE_512, CCP_SHA_TYPE__LAST, }; /** * struct ccp_sha_engine - CCP SHA operation * @type: Type of SHA operation * @ctx: current hash value * @ctx_len: length in bytes of hash value * @src: data to be used for this operation * @src_len: length in bytes of data used for this operation * @opad: data to be used for final HMAC operation * @opad_len: length in bytes of data used for final HMAC operation * @first: indicates first SHA operation * @final: indicates final SHA operation * @msg_bits: total length of the message in bits used in final SHA operation * * Variables required to be set when calling ccp_enqueue_cmd(): * - type, ctx, ctx_len, src, src_len, final * - msg_bits if final is non-zero * * The ctx variable is used as both input and output. On completion of the * SHA operation the new hash value overwrites the old hash value. */ struct ccp_sha_engine { enum ccp_sha_type type; struct scatterlist *ctx; u32 ctx_len; /* In bytes */ struct scatterlist *src; u64 src_len; /* In bytes */ struct scatterlist *opad; u32 opad_len; /* In bytes */ u32 first; /* Indicates first sha cmd */ u32 final; /* Indicates final sha cmd */ u64 msg_bits; /* Message length in bits required for * final sha cmd */ }; /***** 3DES engine *****/ enum ccp_des3_mode { CCP_DES3_MODE_ECB = 0, CCP_DES3_MODE_CBC, CCP_DES3_MODE_CFB, CCP_DES3_MODE__LAST, }; enum ccp_des3_type { CCP_DES3_TYPE_168 = 1, CCP_DES3_TYPE__LAST, }; enum ccp_des3_action { CCP_DES3_ACTION_DECRYPT = 0, CCP_DES3_ACTION_ENCRYPT, CCP_DES3_ACTION__LAST, }; /** * struct ccp_des3_engine - CCP SHA operation * @type: Type of 3DES operation * @mode: cipher mode * @action: 3DES operation (decrypt/encrypt) * @key: key to be used for this 3DES operation * @key_len: length of key (in bytes) * @iv: IV to be used for this AES operation * @iv_len: length in bytes of iv * @src: input data to be used for this operation * @src_len: length of input data used for this operation (in bytes) * @dst: output data produced by this operation * * Variables required to be set when calling ccp_enqueue_cmd(): * - type, mode, action, key, key_len, src, dst, src_len * - iv, iv_len for any mode other than ECB * * The iv variable is used as both input and output. On completion of the * 3DES operation the new IV overwrites the old IV. */ struct ccp_des3_engine { enum ccp_des3_type type; enum ccp_des3_mode mode; enum ccp_des3_action action; struct scatterlist *key; u32 key_len; /* In bytes */ struct scatterlist *iv; u32 iv_len; /* In bytes */ struct scatterlist *src, *dst; u64 src_len; /* In bytes */ }; /***** RSA engine *****/ /** * struct ccp_rsa_engine - CCP RSA operation * @key_size: length in bits of RSA key * @exp: RSA exponent * @exp_len: length in bytes of exponent * @mod: RSA modulus * @mod_len: length in bytes of modulus * @src: data to be used for this operation * @dst: data produced by this operation * @src_len: length in bytes of data used for this operation * * Variables required to be set when calling ccp_enqueue_cmd(): * - key_size, exp, exp_len, mod, mod_len, src, dst, src_len */ struct ccp_rsa_engine { u32 key_size; /* In bits */ struct scatterlist *exp; u32 exp_len; /* In bytes */ struct scatterlist *mod; u32 mod_len; /* In bytes */ struct scatterlist *src, *dst; u32 src_len; /* In bytes */ }; /***** Passthru engine *****/ /** * ccp_passthru_bitwise - type of bitwise passthru operation * * @CCP_PASSTHRU_BITWISE_NOOP: no bitwise operation performed * @CCP_PASSTHRU_BITWISE_AND: perform bitwise AND of src with mask * @CCP_PASSTHRU_BITWISE_OR: perform bitwise OR of src with mask * @CCP_PASSTHRU_BITWISE_XOR: perform bitwise XOR of src with mask * @CCP_PASSTHRU_BITWISE_MASK: overwrite with mask */ enum ccp_passthru_bitwise { CCP_PASSTHRU_BITWISE_NOOP = 0, CCP_PASSTHRU_BITWISE_AND, CCP_PASSTHRU_BITWISE_OR, CCP_PASSTHRU_BITWISE_XOR, CCP_PASSTHRU_BITWISE_MASK, CCP_PASSTHRU_BITWISE__LAST, }; /** * ccp_passthru_byteswap - type of byteswap passthru operation * * @CCP_PASSTHRU_BYTESWAP_NOOP: no byte swapping performed * @CCP_PASSTHRU_BYTESWAP_32BIT: swap bytes within 32-bit words * @CCP_PASSTHRU_BYTESWAP_256BIT: swap bytes within 256-bit words */ enum ccp_passthru_byteswap { CCP_PASSTHRU_BYTESWAP_NOOP = 0, CCP_PASSTHRU_BYTESWAP_32BIT, CCP_PASSTHRU_BYTESWAP_256BIT, CCP_PASSTHRU_BYTESWAP__LAST, }; /** * struct ccp_passthru_engine - CCP pass-through operation * @bit_mod: bitwise operation to perform * @byte_swap: byteswap operation to perform * @mask: mask to be applied to data * @mask_len: length in bytes of mask * @src: data to be used for this operation * @dst: data produced by this operation * @src_len: length in bytes of data used for this operation * @final: indicate final pass-through operation * * Variables required to be set when calling ccp_enqueue_cmd(): * - bit_mod, byte_swap, src, dst, src_len * - mask, mask_len if bit_mod is not CCP_PASSTHRU_BITWISE_NOOP */ struct ccp_passthru_engine { enum ccp_passthru_bitwise bit_mod; enum ccp_passthru_byteswap byte_swap; struct scatterlist *mask; u32 mask_len; /* In bytes */ struct scatterlist *src, *dst; u64 src_len; /* In bytes */ u32 final; }; /** * struct ccp_passthru_nomap_engine - CCP pass-through operation * without performing DMA mapping * @bit_mod: bitwise operation to perform * @byte_swap: byteswap operation to perform * @mask: mask to be applied to data * @mask_len: length in bytes of mask * @src: data to be used for this operation * @dst: data produced by this operation * @src_len: length in bytes of data used for this operation * @final: indicate final pass-through operation * * Variables required to be set when calling ccp_enqueue_cmd(): * - bit_mod, byte_swap, src, dst, src_len * - mask, mask_len if bit_mod is not CCP_PASSTHRU_BITWISE_NOOP */ struct ccp_passthru_nomap_engine { enum ccp_passthru_bitwise bit_mod; enum ccp_passthru_byteswap byte_swap; dma_addr_t mask; u32 mask_len; /* In bytes */ dma_addr_t src_dma, dst_dma; u64 src_len; /* In bytes */ u32 final; }; /***** ECC engine *****/ #define CCP_ECC_MODULUS_BYTES 48 /* 384-bits */ #define CCP_ECC_MAX_OPERANDS 6 #define CCP_ECC_MAX_OUTPUTS 3 /** * ccp_ecc_function - type of ECC function * * @CCP_ECC_FUNCTION_MMUL_384BIT: 384-bit modular multiplication * @CCP_ECC_FUNCTION_MADD_384BIT: 384-bit modular addition * @CCP_ECC_FUNCTION_MINV_384BIT: 384-bit multiplicative inverse * @CCP_ECC_FUNCTION_PADD_384BIT: 384-bit point addition * @CCP_ECC_FUNCTION_PMUL_384BIT: 384-bit point multiplication * @CCP_ECC_FUNCTION_PDBL_384BIT: 384-bit point doubling */ enum ccp_ecc_function { CCP_ECC_FUNCTION_MMUL_384BIT = 0, CCP_ECC_FUNCTION_MADD_384BIT, CCP_ECC_FUNCTION_MINV_384BIT, CCP_ECC_FUNCTION_PADD_384BIT, CCP_ECC_FUNCTION_PMUL_384BIT, CCP_ECC_FUNCTION_PDBL_384BIT, }; /** * struct ccp_ecc_modular_math - CCP ECC modular math parameters * @operand_1: first operand for the modular math operation * @operand_1_len: length of the first operand * @operand_2: second operand for the modular math operation * (not used for CCP_ECC_FUNCTION_MINV_384BIT) * @operand_2_len: length of the second operand * (not used for CCP_ECC_FUNCTION_MINV_384BIT) * @result: result of the modular math operation * @result_len: length of the supplied result buffer */ struct ccp_ecc_modular_math { struct scatterlist *operand_1; unsigned int operand_1_len; /* In bytes */ struct scatterlist *operand_2; unsigned int operand_2_len; /* In bytes */ struct scatterlist *result; unsigned int result_len; /* In bytes */ }; /** * struct ccp_ecc_point - CCP ECC point definition * @x: the x coordinate of the ECC point * @x_len: the length of the x coordinate * @y: the y coordinate of the ECC point * @y_len: the length of the y coordinate */ struct ccp_ecc_point { struct scatterlist *x; unsigned int x_len; /* In bytes */ struct scatterlist *y; unsigned int y_len; /* In bytes */ }; /** * struct ccp_ecc_point_math - CCP ECC point math parameters * @point_1: the first point of the ECC point math operation * @point_2: the second point of the ECC point math operation * (only used for CCP_ECC_FUNCTION_PADD_384BIT) * @domain_a: the a parameter of the ECC curve * @domain_a_len: the length of the a parameter * @scalar: the scalar parameter for the point match operation * (only used for CCP_ECC_FUNCTION_PMUL_384BIT) * @scalar_len: the length of the scalar parameter * (only used for CCP_ECC_FUNCTION_PMUL_384BIT) * @result: the point resulting from the point math operation */ struct ccp_ecc_point_math { struct ccp_ecc_point point_1; struct ccp_ecc_point point_2; struct scatterlist *domain_a; unsigned int domain_a_len; /* In bytes */ struct scatterlist *scalar; unsigned int scalar_len; /* In bytes */ struct ccp_ecc_point result; }; /** * struct ccp_ecc_engine - CCP ECC operation * @function: ECC function to perform * @mod: ECC modulus * @mod_len: length in bytes of modulus * @mm: module math parameters * @pm: point math parameters * @ecc_result: result of the ECC operation * * Variables required to be set when calling ccp_enqueue_cmd(): * - function, mod, mod_len * - operand, operand_len, operand_count, output, output_len, output_count * - ecc_result */ struct ccp_ecc_engine { enum ccp_ecc_function function; struct scatterlist *mod; u32 mod_len; /* In bytes */ union { struct ccp_ecc_modular_math mm; struct ccp_ecc_point_math pm; } u; u16 ecc_result; }; /** * ccp_engine - CCP operation identifiers * * @CCP_ENGINE_AES: AES operation * @CCP_ENGINE_XTS_AES: 128-bit XTS AES operation * @CCP_ENGINE_RSVD1: unused * @CCP_ENGINE_SHA: SHA operation * @CCP_ENGINE_RSA: RSA operation * @CCP_ENGINE_PASSTHRU: pass-through operation * @CCP_ENGINE_ZLIB_DECOMPRESS: unused * @CCP_ENGINE_ECC: ECC operation */ enum ccp_engine { CCP_ENGINE_AES = 0, CCP_ENGINE_XTS_AES_128, CCP_ENGINE_DES3, CCP_ENGINE_SHA, CCP_ENGINE_RSA, CCP_ENGINE_PASSTHRU, CCP_ENGINE_ZLIB_DECOMPRESS, CCP_ENGINE_ECC, CCP_ENGINE__LAST, }; /* Flag values for flags member of ccp_cmd */ #define CCP_CMD_MAY_BACKLOG 0x00000001 #define CCP_CMD_PASSTHRU_NO_DMA_MAP 0x00000002 /** * struct ccp_cmd - CCP operation request * @entry: list element (ccp driver use only) * @work: work element used for callbacks (ccp driver use only) * @ccp: CCP device to be run on * @ret: operation return code (ccp driver use only) * @flags: cmd processing flags * @engine: CCP operation to perform * @engine_error: CCP engine return code * @u: engine specific structures, refer to specific engine struct below * @callback: operation completion callback function * @data: parameter value to be supplied to the callback function * * Variables required to be set when calling ccp_enqueue_cmd(): * - engine, callback * - See the operation structures below for what is required for each * operation. */ struct ccp_cmd { /* The list_head, work_struct, ccp and ret variables are for use * by the CCP driver only. */ struct list_head entry; struct work_struct work; struct ccp_device *ccp; int ret; u32 flags; enum ccp_engine engine; u32 engine_error; union { struct ccp_aes_engine aes; struct ccp_xts_aes_engine xts; struct ccp_des3_engine des3; struct ccp_sha_engine sha; struct ccp_rsa_engine rsa; struct ccp_passthru_engine passthru; struct ccp_passthru_nomap_engine passthru_nomap; struct ccp_ecc_engine ecc; } u; /* Completion callback support */ void (*callback)(void *data, int err); void *data; }; #endif circ_buf.h 0000644 00000002140 14722070374 0006474 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * See Documentation/core-api/circular-buffers.rst for more information. */ #ifndef _LINUX_CIRC_BUF_H #define _LINUX_CIRC_BUF_H 1 struct circ_buf { char *buf; int head; int tail; }; /* Return count in buffer. */ #define CIRC_CNT(head,tail,size) (((head) - (tail)) & ((size)-1)) /* Return space available, 0..size-1. We always leave one free char as a completely full buffer has head == tail, which is the same as empty. */ #define CIRC_SPACE(head,tail,size) CIRC_CNT((tail),((head)+1),(size)) /* Return count up to the end of the buffer. Carefully avoid accessing head and tail more than once, so they can change underneath us without returning inconsistent results. */ #define CIRC_CNT_TO_END(head,tail,size) \ ({int end = (size) - (tail); \ int n = ((head) + end) & ((size)-1); \ n < end ? n : end;}) /* Return space available up to the end of the buffer. */ #define CIRC_SPACE_TO_END(head,tail,size) \ ({int end = (size) - 1 - (head); \ int n = (end + (tail)) & ((size)-1); \ n <= end ? n : end+1;}) #endif /* _LINUX_CIRC_BUF_H */ page_counter.h 0000644 00000003701 14722070374 0007377 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PAGE_COUNTER_H #define _LINUX_PAGE_COUNTER_H #include <linux/atomic.h> #include <linux/kernel.h> #include <asm/page.h> struct page_counter { atomic_long_t usage; unsigned long min; unsigned long low; unsigned long max; struct page_counter *parent; /* effective memory.min and memory.min usage tracking */ unsigned long emin; atomic_long_t min_usage; atomic_long_t children_min_usage; /* effective memory.low and memory.low usage tracking */ unsigned long elow; atomic_long_t low_usage; atomic_long_t children_low_usage; /* legacy */ unsigned long watermark; unsigned long failcnt; }; #if BITS_PER_LONG == 32 #define PAGE_COUNTER_MAX LONG_MAX #else #define PAGE_COUNTER_MAX (LONG_MAX / PAGE_SIZE) #endif static inline void page_counter_init(struct page_counter *counter, struct page_counter *parent) { atomic_long_set(&counter->usage, 0); counter->max = PAGE_COUNTER_MAX; counter->parent = parent; } static inline unsigned long page_counter_read(struct page_counter *counter) { return atomic_long_read(&counter->usage); } void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages); void page_counter_charge(struct page_counter *counter, unsigned long nr_pages); bool page_counter_try_charge(struct page_counter *counter, unsigned long nr_pages, struct page_counter **fail); void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages); void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages); void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages); int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages); int page_counter_memparse(const char *buf, const char *max, unsigned long *nr_pages); static inline void page_counter_reset_watermark(struct page_counter *counter) { counter->watermark = page_counter_read(counter); } #endif /* _LINUX_PAGE_COUNTER_H */ crash_dump.h 0000644 00000010025 14722070374 0007046 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_CRASH_DUMP_H #define LINUX_CRASH_DUMP_H #include <linux/kexec.h> #include <linux/proc_fs.h> #include <linux/elf.h> #include <uapi/linux/vmcore.h> #include <asm/pgtable.h> /* for pgprot_t */ #ifdef CONFIG_CRASH_DUMP #define ELFCORE_ADDR_MAX (-1ULL) #define ELFCORE_ADDR_ERR (-2ULL) extern unsigned long long elfcorehdr_addr; extern unsigned long long elfcorehdr_size; extern int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size); extern void elfcorehdr_free(unsigned long long addr); extern ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos); extern ssize_t elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos); extern int remap_oldmem_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot); extern ssize_t copy_oldmem_page(unsigned long, char *, size_t, unsigned long, int); extern ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize, unsigned long offset, int userbuf); void vmcore_cleanup(void); /* Architecture code defines this if there are other possible ELF * machine types, e.g. on bi-arch capable hardware. */ #ifndef vmcore_elf_check_arch_cross #define vmcore_elf_check_arch_cross(x) 0 #endif /* * Architecture code can redefine this if there are any special checks * needed for 32-bit ELF or 64-bit ELF vmcores. In case of 32-bit * only architecture, vmcore_elf64_check_arch can be set to zero. */ #ifndef vmcore_elf32_check_arch #define vmcore_elf32_check_arch(x) elf_check_arch(x) #endif #ifndef vmcore_elf64_check_arch #define vmcore_elf64_check_arch(x) (elf_check_arch(x) || vmcore_elf_check_arch_cross(x)) #endif /* * is_kdump_kernel() checks whether this kernel is booting after a panic of * previous kernel or not. This is determined by checking if previous kernel * has passed the elf core header address on command line. * * This is not just a test if CONFIG_CRASH_DUMP is enabled or not. It will * return true if CONFIG_CRASH_DUMP=y and if kernel is booting after a panic * of previous kernel. */ static inline bool is_kdump_kernel(void) { return elfcorehdr_addr != ELFCORE_ADDR_MAX; } /* is_vmcore_usable() checks if the kernel is booting after a panic and * the vmcore region is usable. * * This makes use of the fact that due to alignment -2ULL is not * a valid pointer, much in the vain of IS_ERR(), except * dealing directly with an unsigned long long rather than a pointer. */ static inline int is_vmcore_usable(void) { return is_kdump_kernel() && elfcorehdr_addr != ELFCORE_ADDR_ERR ? 1 : 0; } /* vmcore_unusable() marks the vmcore as unusable, * without disturbing the logic of is_kdump_kernel() */ static inline void vmcore_unusable(void) { if (is_kdump_kernel()) elfcorehdr_addr = ELFCORE_ADDR_ERR; } #define HAVE_OLDMEM_PFN_IS_RAM 1 extern int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn)); extern void unregister_oldmem_pfn_is_ram(void); #else /* !CONFIG_CRASH_DUMP */ static inline bool is_kdump_kernel(void) { return 0; } #endif /* CONFIG_CRASH_DUMP */ extern unsigned long saved_max_pfn; /* Device Dump information to be filled by drivers */ struct vmcoredd_data { char dump_name[VMCOREDD_MAX_NAME_BYTES]; /* Unique name of the dump */ unsigned int size; /* Size of the dump */ /* Driver's registered callback to be invoked to collect dump */ int (*vmcoredd_callback)(struct vmcoredd_data *data, void *buf); }; #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP int vmcore_add_device_dump(struct vmcoredd_data *data); #else static inline int vmcore_add_device_dump(struct vmcoredd_data *data) { return -EOPNOTSUPP; } #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ #ifdef CONFIG_PROC_VMCORE ssize_t read_from_oldmem(char *buf, size_t count, u64 *ppos, int userbuf, bool encrypted); #else static inline ssize_t read_from_oldmem(char *buf, size_t count, u64 *ppos, int userbuf, bool encrypted) { return -EOPNOTSUPP; } #endif /* CONFIG_PROC_VMCORE */ #endif /* LINUX_CRASHDUMP_H */ mmap_lock.h 0000644 00000002072 14722070374 0006666 0 ustar 00 #ifndef _LINUX_MMAP_LOCK_H #define _LINUX_MMAP_LOCK_H static inline void mmap_init_lock(struct mm_struct *mm) { init_rwsem(&mm->mmap_sem); } static inline void mmap_write_lock(struct mm_struct *mm) { down_write(&mm->mmap_sem); } static inline int mmap_write_lock_killable(struct mm_struct *mm) { return down_write_killable(&mm->mmap_sem); } static inline bool mmap_write_trylock(struct mm_struct *mm) { return down_write_trylock(&mm->mmap_sem) != 0; } static inline void mmap_write_unlock(struct mm_struct *mm) { up_write(&mm->mmap_sem); } static inline void mmap_write_downgrade(struct mm_struct *mm) { downgrade_write(&mm->mmap_sem); } static inline void mmap_read_lock(struct mm_struct *mm) { down_read(&mm->mmap_sem); } static inline int mmap_read_lock_killable(struct mm_struct *mm) { return down_read_killable(&mm->mmap_sem); } static inline bool mmap_read_trylock(struct mm_struct *mm) { return down_read_trylock(&mm->mmap_sem) != 0; } static inline void mmap_read_unlock(struct mm_struct *mm) { up_read(&mm->mmap_sem); } #endif /* _LINUX_MMAP_LOCK_H */ rpmsg/qcom_smd.h 0000644 00000001134 14722070374 0007654 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_RPMSG_QCOM_SMD_H #define _LINUX_RPMSG_QCOM_SMD_H #include <linux/device.h> struct qcom_smd_edge; #if IS_ENABLED(CONFIG_RPMSG_QCOM_SMD) struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent, struct device_node *node); int qcom_smd_unregister_edge(struct qcom_smd_edge *edge); #else static inline struct qcom_smd_edge * qcom_smd_register_edge(struct device *parent, struct device_node *node) { return NULL; } static inline int qcom_smd_unregister_edge(struct qcom_smd_edge *edge) { return 0; } #endif #endif rpmsg/qcom_glink.h 0000644 00000001122 14722070374 0010172 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_RPMSG_QCOM_GLINK_H #define _LINUX_RPMSG_QCOM_GLINK_H #include <linux/device.h> struct qcom_glink; #if IS_ENABLED(CONFIG_RPMSG_QCOM_GLINK_SMEM) struct qcom_glink *qcom_glink_smem_register(struct device *parent, struct device_node *node); void qcom_glink_smem_unregister(struct qcom_glink *glink); #else static inline struct qcom_glink * qcom_glink_smem_register(struct device *parent, struct device_node *node) { return NULL; } static inline void qcom_glink_smem_unregister(struct qcom_glink *glink) {} #endif #endif leds-bd2802.h 0000644 00000000755 14722070374 0006560 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * leds-bd2802.h - RGB LED Driver * * Copyright (C) 2009 Samsung Electronics * Kim Kyuwon <q1.kim@samsung.com> * * Datasheet: http://www.rohm.com/products/databook/driver/pdf/bd2802gu-e.pdf */ #ifndef _LEDS_BD2802_H_ #define _LEDS_BD2802_H_ struct bd2802_led_platform_data{ int reset_gpio; u8 rgb_time; }; #define RGB_TIME(slopedown, slopeup, waveform) \ ((slopedown) << 6 | (slopeup) << 4 | (waveform)) #endif /* _LEDS_BD2802_H_ */ fd.h 0000644 00000000752 14722070374 0005320 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_FD_H #define _LINUX_FD_H #include <uapi/linux/fd.h> #ifdef CONFIG_COMPAT #include <linux/compat.h> struct compat_floppy_struct { compat_uint_t size; compat_uint_t sect; compat_uint_t head; compat_uint_t track; compat_uint_t stretch; unsigned char gap; unsigned char rate; unsigned char spec1; unsigned char fmt_gap; const compat_caddr_t name; }; #define FDGETPRM32 _IOR(2, 0x04, struct compat_floppy_struct) #endif #endif elf-fdpic.h 0000644 00000003756 14722070374 0006567 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* FDPIC ELF load map * * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #ifndef _LINUX_ELF_FDPIC_H #define _LINUX_ELF_FDPIC_H #include <uapi/linux/elf-fdpic.h> /* * binfmt binary parameters structure */ struct elf_fdpic_params { struct elfhdr hdr; /* ref copy of ELF header */ struct elf_phdr *phdrs; /* ref copy of PT_PHDR table */ struct elf32_fdpic_loadmap *loadmap; /* loadmap to be passed to userspace */ unsigned long elfhdr_addr; /* mapped ELF header user address */ unsigned long ph_addr; /* mapped PT_PHDR user address */ unsigned long map_addr; /* mapped loadmap user address */ unsigned long entry_addr; /* mapped entry user address */ unsigned long stack_size; /* stack size requested (PT_GNU_STACK) */ unsigned long dynamic_addr; /* mapped PT_DYNAMIC user address */ unsigned long load_addr; /* user address at which to map binary */ unsigned long flags; #define ELF_FDPIC_FLAG_ARRANGEMENT 0x0000000f /* PT_LOAD arrangement flags */ #define ELF_FDPIC_FLAG_INDEPENDENT 0x00000000 /* PT_LOADs can be put anywhere */ #define ELF_FDPIC_FLAG_HONOURVADDR 0x00000001 /* PT_LOAD.vaddr must be honoured */ #define ELF_FDPIC_FLAG_CONSTDISP 0x00000002 /* PT_LOADs require constant * displacement */ #define ELF_FDPIC_FLAG_CONTIGUOUS 0x00000003 /* PT_LOADs should be contiguous */ #define ELF_FDPIC_FLAG_EXEC_STACK 0x00000010 /* T if stack to be executable */ #define ELF_FDPIC_FLAG_NOEXEC_STACK 0x00000020 /* T if stack not to be executable */ #define ELF_FDPIC_FLAG_EXECUTABLE 0x00000040 /* T if this object is the executable */ #define ELF_FDPIC_FLAG_PRESENT 0x80000000 /* T if this object is present */ }; #ifdef CONFIG_MMU extern void elf_fdpic_arch_lay_out_mm(struct elf_fdpic_params *exec_params, struct elf_fdpic_params *interp_params, unsigned long *start_stack, unsigned long *start_brk); #endif #endif /* _LINUX_ELF_FDPIC_H */ compat.h 0000644 00000077016 14722070374 0006221 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_COMPAT_H #define _LINUX_COMPAT_H /* * These are the type definitions for the architecture specific * syscall compatibility layer. */ #include <linux/types.h> #include <linux/time.h> #include <linux/stat.h> #include <linux/param.h> /* for HZ */ #include <linux/sem.h> #include <linux/socket.h> #include <linux/if.h> #include <linux/fs.h> #include <linux/aio_abi.h> /* for aio_context_t */ #include <linux/uaccess.h> #include <linux/unistd.h> #include <asm/compat.h> #ifdef CONFIG_COMPAT #include <asm/siginfo.h> #include <asm/signal.h> #endif #ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER /* * It may be useful for an architecture to override the definitions of the * COMPAT_SYSCALL_DEFINE0 and COMPAT_SYSCALL_DEFINEx() macros, in particular * to use a different calling convention for syscalls. To allow for that, + the prototypes for the compat_sys_*() functions below will *not* be included * if CONFIG_ARCH_HAS_SYSCALL_WRAPPER is enabled. */ #include <asm/syscall_wrapper.h> #endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */ #ifndef COMPAT_USE_64BIT_TIME #define COMPAT_USE_64BIT_TIME 0 #endif #ifndef __SC_DELOUSE #define __SC_DELOUSE(t,v) ((__force t)(unsigned long)(v)) #endif #ifndef COMPAT_SYSCALL_DEFINE0 #define COMPAT_SYSCALL_DEFINE0(name) \ asmlinkage long compat_sys_##name(void); \ ALLOW_ERROR_INJECTION(compat_sys_##name, ERRNO); \ asmlinkage long compat_sys_##name(void) #endif /* COMPAT_SYSCALL_DEFINE0 */ #define COMPAT_SYSCALL_DEFINE1(name, ...) \ COMPAT_SYSCALL_DEFINEx(1, _##name, __VA_ARGS__) #define COMPAT_SYSCALL_DEFINE2(name, ...) \ COMPAT_SYSCALL_DEFINEx(2, _##name, __VA_ARGS__) #define COMPAT_SYSCALL_DEFINE3(name, ...) \ COMPAT_SYSCALL_DEFINEx(3, _##name, __VA_ARGS__) #define COMPAT_SYSCALL_DEFINE4(name, ...) \ COMPAT_SYSCALL_DEFINEx(4, _##name, __VA_ARGS__) #define COMPAT_SYSCALL_DEFINE5(name, ...) \ COMPAT_SYSCALL_DEFINEx(5, _##name, __VA_ARGS__) #define COMPAT_SYSCALL_DEFINE6(name, ...) \ COMPAT_SYSCALL_DEFINEx(6, _##name, __VA_ARGS__) /* * The asmlinkage stub is aliased to a function named __se_compat_sys_*() which * sign-extends 32-bit ints to longs whenever needed. The actual work is * done within __do_compat_sys_*(). */ #ifndef COMPAT_SYSCALL_DEFINEx #define COMPAT_SYSCALL_DEFINEx(x, name, ...) \ __diag_push(); \ __diag_ignore(GCC, 8, "-Wattribute-alias", \ "Type aliasing is used to sanitize syscall arguments");\ asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \ __attribute__((alias(__stringify(__se_compat_sys##name)))); \ ALLOW_ERROR_INJECTION(compat_sys##name, ERRNO); \ static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\ asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ { \ long ret = __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__));\ __MAP(x,__SC_TEST,__VA_ARGS__); \ return ret; \ } \ __diag_pop(); \ static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) #endif /* COMPAT_SYSCALL_DEFINEx */ #ifdef CONFIG_COMPAT #ifndef compat_user_stack_pointer #define compat_user_stack_pointer() current_user_stack_pointer() #endif #ifndef compat_sigaltstack /* we'll need that for MIPS */ typedef struct compat_sigaltstack { compat_uptr_t ss_sp; int ss_flags; compat_size_t ss_size; } compat_stack_t; #endif #ifndef COMPAT_MINSIGSTKSZ #define COMPAT_MINSIGSTKSZ MINSIGSTKSZ #endif #define compat_jiffies_to_clock_t(x) \ (((unsigned long)(x) * COMPAT_USER_HZ) / HZ) typedef __compat_uid32_t compat_uid_t; typedef __compat_gid32_t compat_gid_t; struct compat_sel_arg_struct; struct rusage; struct compat_itimerval { struct old_timeval32 it_interval; struct old_timeval32 it_value; }; struct itimerval; int get_compat_itimerval(struct itimerval *, const struct compat_itimerval __user *); int put_compat_itimerval(struct compat_itimerval __user *, const struct itimerval *); struct compat_tms { compat_clock_t tms_utime; compat_clock_t tms_stime; compat_clock_t tms_cutime; compat_clock_t tms_cstime; }; #define _COMPAT_NSIG_WORDS (_COMPAT_NSIG / _COMPAT_NSIG_BPW) typedef struct { compat_sigset_word sig[_COMPAT_NSIG_WORDS]; } compat_sigset_t; int set_compat_user_sigmask(const compat_sigset_t __user *umask, size_t sigsetsize); struct compat_sigaction { #ifndef __ARCH_HAS_IRIX_SIGACTION compat_uptr_t sa_handler; compat_ulong_t sa_flags; #else compat_uint_t sa_flags; compat_uptr_t sa_handler; #endif #ifdef __ARCH_HAS_SA_RESTORER compat_uptr_t sa_restorer; #endif compat_sigset_t sa_mask __packed; }; typedef union compat_sigval { compat_int_t sival_int; compat_uptr_t sival_ptr; } compat_sigval_t; typedef struct compat_siginfo { int si_signo; #ifndef __ARCH_HAS_SWAPPED_SIGINFO int si_errno; int si_code; #else int si_code; int si_errno; #endif union { int _pad[128/sizeof(int) - 3]; /* kill() */ struct { compat_pid_t _pid; /* sender's pid */ __compat_uid32_t _uid; /* sender's uid */ } _kill; /* POSIX.1b timers */ struct { compat_timer_t _tid; /* timer id */ int _overrun; /* overrun count */ compat_sigval_t _sigval; /* same as below */ } _timer; /* POSIX.1b signals */ struct { compat_pid_t _pid; /* sender's pid */ __compat_uid32_t _uid; /* sender's uid */ compat_sigval_t _sigval; } _rt; /* SIGCHLD */ struct { compat_pid_t _pid; /* which child */ __compat_uid32_t _uid; /* sender's uid */ int _status; /* exit code */ compat_clock_t _utime; compat_clock_t _stime; } _sigchld; #ifdef CONFIG_X86_X32_ABI /* SIGCHLD (x32 version) */ struct { compat_pid_t _pid; /* which child */ __compat_uid32_t _uid; /* sender's uid */ int _status; /* exit code */ compat_s64 _utime; compat_s64 _stime; } _sigchld_x32; #endif /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */ struct { compat_uptr_t _addr; /* faulting insn/memory ref. */ #ifdef __ARCH_SI_TRAPNO int _trapno; /* TRAP # which caused the signal */ #endif #define __COMPAT_ADDR_BND_PKEY_PAD (__alignof__(compat_uptr_t) < sizeof(short) ? \ sizeof(short) : __alignof__(compat_uptr_t)) union { /* * used when si_code=BUS_MCEERR_AR or * used when si_code=BUS_MCEERR_AO */ short int _addr_lsb; /* Valid LSB of the reported address. */ /* used when si_code=SEGV_BNDERR */ struct { char _dummy_bnd[__COMPAT_ADDR_BND_PKEY_PAD]; compat_uptr_t _lower; compat_uptr_t _upper; } _addr_bnd; /* used when si_code=SEGV_PKUERR */ struct { char _dummy_pkey[__COMPAT_ADDR_BND_PKEY_PAD]; u32 _pkey; } _addr_pkey; }; } _sigfault; /* SIGPOLL */ struct { compat_long_t _band; /* POLL_IN, POLL_OUT, POLL_MSG */ int _fd; } _sigpoll; struct { compat_uptr_t _call_addr; /* calling user insn */ int _syscall; /* triggering system call number */ unsigned int _arch; /* AUDIT_ARCH_* of syscall */ } _sigsys; } _sifields; } compat_siginfo_t; /* * These functions operate on 32- or 64-bit specs depending on * COMPAT_USE_64BIT_TIME, hence the void user pointer arguments. */ extern int compat_get_timespec(struct timespec *, const void __user *); extern int compat_put_timespec(const struct timespec *, void __user *); extern int compat_get_timeval(struct timeval *, const void __user *); extern int compat_put_timeval(const struct timeval *, void __user *); struct compat_iovec { compat_uptr_t iov_base; compat_size_t iov_len; }; struct compat_rlimit { compat_ulong_t rlim_cur; compat_ulong_t rlim_max; }; struct compat_rusage { struct old_timeval32 ru_utime; struct old_timeval32 ru_stime; compat_long_t ru_maxrss; compat_long_t ru_ixrss; compat_long_t ru_idrss; compat_long_t ru_isrss; compat_long_t ru_minflt; compat_long_t ru_majflt; compat_long_t ru_nswap; compat_long_t ru_inblock; compat_long_t ru_oublock; compat_long_t ru_msgsnd; compat_long_t ru_msgrcv; compat_long_t ru_nsignals; compat_long_t ru_nvcsw; compat_long_t ru_nivcsw; }; extern int put_compat_rusage(const struct rusage *, struct compat_rusage __user *); struct compat_siginfo; struct __compat_aio_sigset; struct compat_dirent { u32 d_ino; compat_off_t d_off; u16 d_reclen; char d_name[256]; }; struct compat_ustat { compat_daddr_t f_tfree; compat_ino_t f_tinode; char f_fname[6]; char f_fpack[6]; }; #define COMPAT_SIGEV_PAD_SIZE ((SIGEV_MAX_SIZE/sizeof(int)) - 3) typedef struct compat_sigevent { compat_sigval_t sigev_value; compat_int_t sigev_signo; compat_int_t sigev_notify; union { compat_int_t _pad[COMPAT_SIGEV_PAD_SIZE]; compat_int_t _tid; struct { compat_uptr_t _function; compat_uptr_t _attribute; } _sigev_thread; } _sigev_un; } compat_sigevent_t; struct compat_ifmap { compat_ulong_t mem_start; compat_ulong_t mem_end; unsigned short base_addr; unsigned char irq; unsigned char dma; unsigned char port; }; struct compat_if_settings { unsigned int type; /* Type of physical device or protocol */ unsigned int size; /* Size of the data allocated by the caller */ compat_uptr_t ifs_ifsu; /* union of pointers */ }; struct compat_ifreq { union { char ifrn_name[IFNAMSIZ]; /* if name, e.g. "en0" */ } ifr_ifrn; union { struct sockaddr ifru_addr; struct sockaddr ifru_dstaddr; struct sockaddr ifru_broadaddr; struct sockaddr ifru_netmask; struct sockaddr ifru_hwaddr; short ifru_flags; compat_int_t ifru_ivalue; compat_int_t ifru_mtu; struct compat_ifmap ifru_map; char ifru_slave[IFNAMSIZ]; /* Just fits the size */ char ifru_newname[IFNAMSIZ]; compat_caddr_t ifru_data; struct compat_if_settings ifru_settings; } ifr_ifru; }; struct compat_ifconf { compat_int_t ifc_len; /* size of buffer */ compat_caddr_t ifcbuf; }; struct compat_robust_list { compat_uptr_t next; }; struct compat_robust_list_head { struct compat_robust_list list; compat_long_t futex_offset; compat_uptr_t list_op_pending; }; #ifdef CONFIG_COMPAT_OLD_SIGACTION struct compat_old_sigaction { compat_uptr_t sa_handler; compat_old_sigset_t sa_mask; compat_ulong_t sa_flags; compat_uptr_t sa_restorer; }; #endif struct compat_keyctl_kdf_params { compat_uptr_t hashname; compat_uptr_t otherinfo; __u32 otherinfolen; __u32 __spare[8]; }; struct compat_statfs; struct compat_statfs64; struct compat_old_linux_dirent; struct compat_linux_dirent; struct linux_dirent64; struct compat_msghdr; struct compat_mmsghdr; struct compat_sysinfo; struct compat_sysctl_args; struct compat_kexec_segment; struct compat_mq_attr; struct compat_msgbuf; #define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t)) #define BITS_TO_COMPAT_LONGS(bits) DIV_ROUND_UP(bits, BITS_PER_COMPAT_LONG) long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, unsigned long bitmap_size); long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, unsigned long bitmap_size); int copy_siginfo_from_user32(kernel_siginfo_t *to, const struct compat_siginfo __user *from); int copy_siginfo_to_user32(struct compat_siginfo __user *to, const kernel_siginfo_t *from); int get_compat_sigevent(struct sigevent *event, const struct compat_sigevent __user *u_event); static inline int old_timeval32_compare(struct old_timeval32 *lhs, struct old_timeval32 *rhs) { if (lhs->tv_sec < rhs->tv_sec) return -1; if (lhs->tv_sec > rhs->tv_sec) return 1; return lhs->tv_usec - rhs->tv_usec; } static inline int old_timespec32_compare(struct old_timespec32 *lhs, struct old_timespec32 *rhs) { if (lhs->tv_sec < rhs->tv_sec) return -1; if (lhs->tv_sec > rhs->tv_sec) return 1; return lhs->tv_nsec - rhs->tv_nsec; } extern int get_compat_sigset(sigset_t *set, const compat_sigset_t __user *compat); /* * Defined inline such that size can be compile time constant, which avoids * CONFIG_HARDENED_USERCOPY complaining about copies from task_struct */ static inline int put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set, unsigned int size) { /* size <= sizeof(compat_sigset_t) <= sizeof(sigset_t) */ #ifdef __BIG_ENDIAN compat_sigset_t v; switch (_NSIG_WORDS) { case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3]; /* fall through */ case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2]; /* fall through */ case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1]; /* fall through */ case 1: v.sig[1] = (set->sig[0] >> 32); v.sig[0] = set->sig[0]; } return copy_to_user(compat, &v, size) ? -EFAULT : 0; #else return copy_to_user(compat, set, size) ? -EFAULT : 0; #endif } extern int compat_ptrace_request(struct task_struct *child, compat_long_t request, compat_ulong_t addr, compat_ulong_t data); extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request, compat_ulong_t addr, compat_ulong_t data); struct epoll_event; /* fortunately, this one is fixed-layout */ extern ssize_t compat_rw_copy_check_uvector(int type, const struct compat_iovec __user *uvector, unsigned long nr_segs, unsigned long fast_segs, struct iovec *fast_pointer, struct iovec **ret_pointer); extern void __user *compat_alloc_user_space(unsigned long len); int compat_restore_altstack(const compat_stack_t __user *uss); int __compat_save_altstack(compat_stack_t __user *, unsigned long); #define compat_save_altstack_ex(uss, sp) do { \ compat_stack_t __user *__uss = uss; \ struct task_struct *t = current; \ put_user_ex(ptr_to_compat((void __user *)t->sas_ss_sp), &__uss->ss_sp); \ put_user_ex(t->sas_ss_flags, &__uss->ss_flags); \ put_user_ex(t->sas_ss_size, &__uss->ss_size); \ if (t->sas_ss_flags & SS_AUTODISARM) \ sas_ss_reset(t); \ } while (0); /* * These syscall function prototypes are kept in the same order as * include/uapi/asm-generic/unistd.h. Deprecated or obsolete system calls * go below. * * Please note that these prototypes here are only provided for information * purposes, for static analysis, and for linking from the syscall table. * These functions should not be called elsewhere from kernel code. * * As the syscall calling convention may be different from the default * for architectures overriding the syscall calling convention, do not * include the prototypes if CONFIG_ARCH_HAS_SYSCALL_WRAPPER is enabled. */ #ifndef CONFIG_ARCH_HAS_SYSCALL_WRAPPER asmlinkage long compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p); asmlinkage long compat_sys_io_submit(compat_aio_context_t ctx_id, int nr, u32 __user *iocb); asmlinkage long compat_sys_io_pgetevents(compat_aio_context_t ctx_id, compat_long_t min_nr, compat_long_t nr, struct io_event __user *events, struct old_timespec32 __user *timeout, const struct __compat_aio_sigset __user *usig); asmlinkage long compat_sys_io_pgetevents_time64(compat_aio_context_t ctx_id, compat_long_t min_nr, compat_long_t nr, struct io_event __user *events, struct __kernel_timespec __user *timeout, const struct __compat_aio_sigset __user *usig); /* fs/cookies.c */ asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t); /* fs/eventpoll.c */ asmlinkage long compat_sys_epoll_pwait(int epfd, struct epoll_event __user *events, int maxevents, int timeout, const compat_sigset_t __user *sigmask, compat_size_t sigsetsize); /* fs/fcntl.c */ asmlinkage long compat_sys_fcntl(unsigned int fd, unsigned int cmd, compat_ulong_t arg); asmlinkage long compat_sys_fcntl64(unsigned int fd, unsigned int cmd, compat_ulong_t arg); /* fs/ioctl.c */ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd, compat_ulong_t arg); /* fs/namespace.c */ asmlinkage long compat_sys_mount(const char __user *dev_name, const char __user *dir_name, const char __user *type, compat_ulong_t flags, const void __user *data); /* fs/open.c */ asmlinkage long compat_sys_statfs(const char __user *pathname, struct compat_statfs __user *buf); asmlinkage long compat_sys_statfs64(const char __user *pathname, compat_size_t sz, struct compat_statfs64 __user *buf); asmlinkage long compat_sys_fstatfs(unsigned int fd, struct compat_statfs __user *buf); asmlinkage long compat_sys_fstatfs64(unsigned int fd, compat_size_t sz, struct compat_statfs64 __user *buf); asmlinkage long compat_sys_truncate(const char __user *, compat_off_t); asmlinkage long compat_sys_ftruncate(unsigned int, compat_off_t); /* No generic prototype for truncate64, ftruncate64, fallocate */ asmlinkage long compat_sys_openat(int dfd, const char __user *filename, int flags, umode_t mode); /* fs/readdir.c */ asmlinkage long compat_sys_getdents(unsigned int fd, struct compat_linux_dirent __user *dirent, unsigned int count); /* fs/read_write.c */ asmlinkage long compat_sys_lseek(unsigned int, compat_off_t, unsigned int); asmlinkage ssize_t compat_sys_readv(compat_ulong_t fd, const struct compat_iovec __user *vec, compat_ulong_t vlen); asmlinkage ssize_t compat_sys_writev(compat_ulong_t fd, const struct compat_iovec __user *vec, compat_ulong_t vlen); /* No generic prototype for pread64 and pwrite64 */ asmlinkage ssize_t compat_sys_preadv(compat_ulong_t fd, const struct compat_iovec __user *vec, compat_ulong_t vlen, u32 pos_low, u32 pos_high); asmlinkage ssize_t compat_sys_pwritev(compat_ulong_t fd, const struct compat_iovec __user *vec, compat_ulong_t vlen, u32 pos_low, u32 pos_high); #ifdef __ARCH_WANT_COMPAT_SYS_PREADV64 asmlinkage long compat_sys_preadv64(unsigned long fd, const struct compat_iovec __user *vec, unsigned long vlen, loff_t pos); #endif #ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64 asmlinkage long compat_sys_pwritev64(unsigned long fd, const struct compat_iovec __user *vec, unsigned long vlen, loff_t pos); #endif /* fs/sendfile.c */ asmlinkage long compat_sys_sendfile(int out_fd, int in_fd, compat_off_t __user *offset, compat_size_t count); asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd, compat_loff_t __user *offset, compat_size_t count); /* fs/select.c */ asmlinkage long compat_sys_pselect6_time32(int n, compat_ulong_t __user *inp, compat_ulong_t __user *outp, compat_ulong_t __user *exp, struct old_timespec32 __user *tsp, void __user *sig); asmlinkage long compat_sys_pselect6_time64(int n, compat_ulong_t __user *inp, compat_ulong_t __user *outp, compat_ulong_t __user *exp, struct __kernel_timespec __user *tsp, void __user *sig); asmlinkage long compat_sys_ppoll_time32(struct pollfd __user *ufds, unsigned int nfds, struct old_timespec32 __user *tsp, const compat_sigset_t __user *sigmask, compat_size_t sigsetsize); asmlinkage long compat_sys_ppoll_time64(struct pollfd __user *ufds, unsigned int nfds, struct __kernel_timespec __user *tsp, const compat_sigset_t __user *sigmask, compat_size_t sigsetsize); /* fs/signalfd.c */ asmlinkage long compat_sys_signalfd4(int ufd, const compat_sigset_t __user *sigmask, compat_size_t sigsetsize, int flags); /* fs/splice.c */ asmlinkage long compat_sys_vmsplice(int fd, const struct compat_iovec __user *, unsigned int nr_segs, unsigned int flags); /* fs/stat.c */ asmlinkage long compat_sys_newfstatat(unsigned int dfd, const char __user *filename, struct compat_stat __user *statbuf, int flag); asmlinkage long compat_sys_newfstat(unsigned int fd, struct compat_stat __user *statbuf); /* fs/sync.c: No generic prototype for sync_file_range and sync_file_range2 */ /* kernel/exit.c */ asmlinkage long compat_sys_waitid(int, compat_pid_t, struct compat_siginfo __user *, int, struct compat_rusage __user *); /* kernel/futex.c */ asmlinkage long compat_sys_set_robust_list(struct compat_robust_list_head __user *head, compat_size_t len); asmlinkage long compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, compat_size_t __user *len_ptr); /* kernel/itimer.c */ asmlinkage long compat_sys_getitimer(int which, struct compat_itimerval __user *it); asmlinkage long compat_sys_setitimer(int which, struct compat_itimerval __user *in, struct compat_itimerval __user *out); /* kernel/kexec.c */ asmlinkage long compat_sys_kexec_load(compat_ulong_t entry, compat_ulong_t nr_segments, struct compat_kexec_segment __user *, compat_ulong_t flags); /* kernel/posix-timers.c */ asmlinkage long compat_sys_timer_create(clockid_t which_clock, struct compat_sigevent __user *timer_event_spec, timer_t __user *created_timer_id); /* kernel/ptrace.c */ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, compat_long_t addr, compat_long_t data); /* kernel/sched/core.c */ asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid, unsigned int len, compat_ulong_t __user *user_mask_ptr); asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len, compat_ulong_t __user *user_mask_ptr); /* kernel/signal.c */ asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr, compat_stack_t __user *uoss_ptr); asmlinkage long compat_sys_rt_sigsuspend(compat_sigset_t __user *unewset, compat_size_t sigsetsize); #ifndef CONFIG_ODD_RT_SIGACTION asmlinkage long compat_sys_rt_sigaction(int, const struct compat_sigaction __user *, struct compat_sigaction __user *, compat_size_t); #endif asmlinkage long compat_sys_rt_sigprocmask(int how, compat_sigset_t __user *set, compat_sigset_t __user *oset, compat_size_t sigsetsize); asmlinkage long compat_sys_rt_sigpending(compat_sigset_t __user *uset, compat_size_t sigsetsize); asmlinkage long compat_sys_rt_sigtimedwait_time32(compat_sigset_t __user *uthese, struct compat_siginfo __user *uinfo, struct old_timespec32 __user *uts, compat_size_t sigsetsize); asmlinkage long compat_sys_rt_sigtimedwait_time64(compat_sigset_t __user *uthese, struct compat_siginfo __user *uinfo, struct __kernel_timespec __user *uts, compat_size_t sigsetsize); asmlinkage long compat_sys_rt_sigqueueinfo(compat_pid_t pid, int sig, struct compat_siginfo __user *uinfo); /* No generic prototype for rt_sigreturn */ /* kernel/sys.c */ asmlinkage long compat_sys_times(struct compat_tms __user *tbuf); asmlinkage long compat_sys_getrlimit(unsigned int resource, struct compat_rlimit __user *rlim); asmlinkage long compat_sys_setrlimit(unsigned int resource, struct compat_rlimit __user *rlim); asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru); /* kernel/time.c */ asmlinkage long compat_sys_gettimeofday(struct old_timeval32 __user *tv, struct timezone __user *tz); asmlinkage long compat_sys_settimeofday(struct old_timeval32 __user *tv, struct timezone __user *tz); /* kernel/timer.c */ asmlinkage long compat_sys_sysinfo(struct compat_sysinfo __user *info); /* ipc/mqueue.c */ asmlinkage long compat_sys_mq_open(const char __user *u_name, int oflag, compat_mode_t mode, struct compat_mq_attr __user *u_attr); asmlinkage long compat_sys_mq_notify(mqd_t mqdes, const struct compat_sigevent __user *u_notification); asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes, const struct compat_mq_attr __user *u_mqstat, struct compat_mq_attr __user *u_omqstat); /* ipc/msg.c */ asmlinkage long compat_sys_msgctl(int first, int second, void __user *uptr); asmlinkage long compat_sys_msgrcv(int msqid, compat_uptr_t msgp, compat_ssize_t msgsz, compat_long_t msgtyp, int msgflg); asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp, compat_ssize_t msgsz, int msgflg); /* ipc/sem.c */ asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg); /* ipc/shm.c */ asmlinkage long compat_sys_shmctl(int first, int second, void __user *uptr); asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg); /* net/socket.c */ asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, compat_size_t len, unsigned flags, struct sockaddr __user *addr, int __user *addrlen); asmlinkage long compat_sys_setsockopt(int fd, int level, int optname, char __user *optval, unsigned int optlen); asmlinkage long compat_sys_getsockopt(int fd, int level, int optname, char __user *optval, int __user *optlen); asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned flags); asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags); /* mm/filemap.c: No generic prototype for readahead */ /* security/keys/keyctl.c */ asmlinkage long compat_sys_keyctl(u32 option, u32 arg2, u32 arg3, u32 arg4, u32 arg5); /* arch/example/kernel/sys_example.c */ asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv, const compat_uptr_t __user *envp); /* mm/fadvise.c: No generic prototype for fadvise64_64 */ /* mm/, CONFIG_MMU only */ asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len, compat_ulong_t mode, compat_ulong_t __user *nmask, compat_ulong_t maxnode, compat_ulong_t flags); asmlinkage long compat_sys_get_mempolicy(int __user *policy, compat_ulong_t __user *nmask, compat_ulong_t maxnode, compat_ulong_t addr, compat_ulong_t flags); asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask, compat_ulong_t maxnode); asmlinkage long compat_sys_migrate_pages(compat_pid_t pid, compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes, const compat_ulong_t __user *new_nodes); asmlinkage long compat_sys_move_pages(pid_t pid, compat_ulong_t nr_pages, __u32 __user *pages, const int __user *nodes, int __user *status, int flags); asmlinkage long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, compat_pid_t pid, int sig, struct compat_siginfo __user *uinfo); asmlinkage long compat_sys_recvmmsg_time64(int fd, struct compat_mmsghdr __user *mmsg, unsigned vlen, unsigned int flags, struct __kernel_timespec __user *timeout); asmlinkage long compat_sys_recvmmsg_time32(int fd, struct compat_mmsghdr __user *mmsg, unsigned vlen, unsigned int flags, struct old_timespec32 __user *timeout); asmlinkage long compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options, struct compat_rusage __user *ru); asmlinkage long compat_sys_fanotify_mark(int, unsigned int, __u32, __u32, int, const char __user *); asmlinkage long compat_sys_open_by_handle_at(int mountdirfd, struct file_handle __user *handle, int flags); asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg, unsigned vlen, unsigned int flags); asmlinkage ssize_t compat_sys_process_vm_readv(compat_pid_t pid, const struct compat_iovec __user *lvec, compat_ulong_t liovcnt, const struct compat_iovec __user *rvec, compat_ulong_t riovcnt, compat_ulong_t flags); asmlinkage ssize_t compat_sys_process_vm_writev(compat_pid_t pid, const struct compat_iovec __user *lvec, compat_ulong_t liovcnt, const struct compat_iovec __user *rvec, compat_ulong_t riovcnt, compat_ulong_t flags); asmlinkage long compat_sys_execveat(int dfd, const char __user *filename, const compat_uptr_t __user *argv, const compat_uptr_t __user *envp, int flags); asmlinkage ssize_t compat_sys_preadv2(compat_ulong_t fd, const struct compat_iovec __user *vec, compat_ulong_t vlen, u32 pos_low, u32 pos_high, rwf_t flags); asmlinkage ssize_t compat_sys_pwritev2(compat_ulong_t fd, const struct compat_iovec __user *vec, compat_ulong_t vlen, u32 pos_low, u32 pos_high, rwf_t flags); #ifdef __ARCH_WANT_COMPAT_SYS_PREADV64V2 asmlinkage long compat_sys_readv64v2(unsigned long fd, const struct compat_iovec __user *vec, unsigned long vlen, loff_t pos, rwf_t flags); #endif #ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64V2 asmlinkage long compat_sys_pwritev64v2(unsigned long fd, const struct compat_iovec __user *vec, unsigned long vlen, loff_t pos, rwf_t flags); #endif /* * Deprecated system calls which are still defined in * include/uapi/asm-generic/unistd.h and wanted by >= 1 arch */ /* __ARCH_WANT_SYSCALL_NO_AT */ asmlinkage long compat_sys_open(const char __user *filename, int flags, umode_t mode); /* __ARCH_WANT_SYSCALL_NO_FLAGS */ asmlinkage long compat_sys_signalfd(int ufd, const compat_sigset_t __user *sigmask, compat_size_t sigsetsize); /* __ARCH_WANT_SYSCALL_OFF_T */ asmlinkage long compat_sys_newstat(const char __user *filename, struct compat_stat __user *statbuf); asmlinkage long compat_sys_newlstat(const char __user *filename, struct compat_stat __user *statbuf); /* __ARCH_WANT_SYSCALL_DEPRECATED */ asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, compat_ulong_t __user *outp, compat_ulong_t __user *exp, struct old_timeval32 __user *tvp); asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32); asmlinkage long compat_sys_recv(int fd, void __user *buf, compat_size_t len, unsigned flags); asmlinkage long compat_sys_sysctl(struct compat_sysctl_args __user *args); /* obsolete: fs/readdir.c */ asmlinkage long compat_sys_old_readdir(unsigned int fd, struct compat_old_linux_dirent __user *, unsigned int count); /* obsolete: fs/select.c */ asmlinkage long compat_sys_old_select(struct compat_sel_arg_struct __user *arg); /* obsolete: ipc */ asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32); /* obsolete: kernel/signal.c */ #ifdef __ARCH_WANT_SYS_SIGPENDING asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set); #endif #ifdef __ARCH_WANT_SYS_SIGPROCMASK asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *nset, compat_old_sigset_t __user *oset); #endif #ifdef CONFIG_COMPAT_OLD_SIGACTION asmlinkage long compat_sys_sigaction(int sig, const struct compat_old_sigaction __user *act, struct compat_old_sigaction __user *oact); #endif /* obsolete: net/socket.c */ asmlinkage long compat_sys_socketcall(int call, u32 __user *args); #endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */ /* * For most but not all architectures, "am I in a compat syscall?" and * "am I a compat task?" are the same question. For architectures on which * they aren't the same question, arch code can override in_compat_syscall. */ #ifndef in_compat_syscall static inline bool in_compat_syscall(void) { return is_compat_task(); } #endif /** * ns_to_old_timeval32 - Compat version of ns_to_timeval * @nsec: the nanoseconds value to be converted * * Returns the old_timeval32 representation of the nsec parameter. */ static inline struct old_timeval32 ns_to_old_timeval32(s64 nsec) { struct timeval tv; struct old_timeval32 ctv; tv = ns_to_timeval(nsec); ctv.tv_sec = tv.tv_sec; ctv.tv_usec = tv.tv_usec; return ctv; } /* * Kernel code should not call compat syscalls (i.e., compat_sys_xyzyyz()) * directly. Instead, use one of the functions which work equivalently, such * as the kcompat_sys_xyzyyz() functions prototyped below. */ int kcompat_sys_statfs64(const char __user * pathname, compat_size_t sz, struct compat_statfs64 __user * buf); int kcompat_sys_fstatfs64(unsigned int fd, compat_size_t sz, struct compat_statfs64 __user * buf); #else /* !CONFIG_COMPAT */ #define is_compat_task() (0) /* Ensure no one redefines in_compat_syscall() under !CONFIG_COMPAT */ #define in_compat_syscall in_compat_syscall static inline bool in_compat_syscall(void) { return false; } #endif /* CONFIG_COMPAT */ #endif /* _LINUX_COMPAT_H */ splice.h 0000644 00000006014 14722070374 0006203 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Function declerations and data structures related to the splice * implementation. * * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com> * */ #ifndef SPLICE_H #define SPLICE_H #include <linux/pipe_fs_i.h> /* * Flags passed in from splice/tee/vmsplice */ #define SPLICE_F_MOVE (0x01) /* move pages instead of copying */ #define SPLICE_F_NONBLOCK (0x02) /* don't block on the pipe splicing (but */ /* we may still block on the fd we splice */ /* from/to, of course */ #define SPLICE_F_MORE (0x04) /* expect more data */ #define SPLICE_F_GIFT (0x08) /* pages passed in are a gift */ #define SPLICE_F_ALL (SPLICE_F_MOVE|SPLICE_F_NONBLOCK|SPLICE_F_MORE|SPLICE_F_GIFT) /* * Passed to the actors */ struct splice_desc { size_t total_len; /* remaining length */ unsigned int len; /* current length */ unsigned int flags; /* splice flags */ /* * actor() private data */ union { void __user *userptr; /* memory to write to */ struct file *file; /* file to read/write */ void *data; /* cookie */ } u; loff_t pos; /* file position */ loff_t *opos; /* sendfile: output position */ size_t num_spliced; /* number of bytes already spliced */ bool need_wakeup; /* need to wake up writer */ }; struct partial_page { unsigned int offset; unsigned int len; unsigned long private; }; /* * Passed to splice_to_pipe */ struct splice_pipe_desc { struct page **pages; /* page map */ struct partial_page *partial; /* pages[] may not be contig */ int nr_pages; /* number of populated pages in map */ unsigned int nr_pages_max; /* pages[] & partial[] arrays size */ const struct pipe_buf_operations *ops;/* ops associated with output pipe */ void (*spd_release)(struct splice_pipe_desc *, unsigned int); }; typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *, struct splice_desc *); typedef int (splice_direct_actor)(struct pipe_inode_info *, struct splice_desc *); extern ssize_t splice_from_pipe(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int, splice_actor *); extern ssize_t __splice_from_pipe(struct pipe_inode_info *, struct splice_desc *, splice_actor *); extern ssize_t splice_to_pipe(struct pipe_inode_info *, struct splice_pipe_desc *); extern ssize_t add_to_pipe(struct pipe_inode_info *, struct pipe_buffer *); extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *, splice_direct_actor *); /* * for dynamic pipe sizing */ extern int splice_grow_spd(const struct pipe_inode_info *, struct splice_pipe_desc *); extern void splice_shrink_spd(struct splice_pipe_desc *); extern const struct pipe_buf_operations page_cache_pipe_buf_ops; extern const struct pipe_buf_operations default_pipe_buf_ops; extern long do_splice_from(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags); extern long do_splice_to(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); #endif ramfs.h 0000644 00000001223 14722070374 0006031 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_RAMFS_H #define _LINUX_RAMFS_H struct inode *ramfs_get_inode(struct super_block *sb, const struct inode *dir, umode_t mode, dev_t dev); extern int ramfs_init_fs_context(struct fs_context *fc); #ifdef CONFIG_MMU static inline int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) { return 0; } #else extern int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize); #endif extern const struct fs_parameter_description ramfs_fs_parameters; extern const struct file_operations ramfs_file_operations; extern const struct vm_operations_struct generic_file_vm_ops; #endif time64.h 0000644 00000011144 14722070374 0006034 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_TIME64_H #define _LINUX_TIME64_H #include <linux/math64.h> typedef __s64 time64_t; typedef __u64 timeu64_t; #include <uapi/linux/time.h> struct timespec64 { time64_t tv_sec; /* seconds */ long tv_nsec; /* nanoseconds */ }; struct itimerspec64 { struct timespec64 it_interval; struct timespec64 it_value; }; /* Parameters used to convert the timespec values: */ #define MSEC_PER_SEC 1000L #define USEC_PER_MSEC 1000L #define NSEC_PER_USEC 1000L #define NSEC_PER_MSEC 1000000L #define USEC_PER_SEC 1000000L #define NSEC_PER_SEC 1000000000L #define FSEC_PER_SEC 1000000000000000LL /* Located here for timespec[64]_valid_strict */ #define TIME64_MAX ((s64)~((u64)1 << 63)) #define TIME64_MIN (-TIME64_MAX - 1) #define KTIME_MAX ((s64)~((u64)1 << 63)) #define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC) /* * Limits for settimeofday(): * * To prevent setting the time close to the wraparound point time setting * is limited so a reasonable uptime can be accomodated. Uptime of 30 years * should be really sufficient, which means the cutoff is 2232. At that * point the cutoff is just a small part of the larger problem. */ #define TIME_UPTIME_SEC_MAX (30LL * 365 * 24 *3600) #define TIME_SETTOD_SEC_MAX (KTIME_SEC_MAX - TIME_UPTIME_SEC_MAX) static inline int timespec64_equal(const struct timespec64 *a, const struct timespec64 *b) { return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec); } /* * lhs < rhs: return <0 * lhs == rhs: return 0 * lhs > rhs: return >0 */ static inline int timespec64_compare(const struct timespec64 *lhs, const struct timespec64 *rhs) { if (lhs->tv_sec < rhs->tv_sec) return -1; if (lhs->tv_sec > rhs->tv_sec) return 1; return lhs->tv_nsec - rhs->tv_nsec; } extern void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec); static inline struct timespec64 timespec64_add(struct timespec64 lhs, struct timespec64 rhs) { struct timespec64 ts_delta; set_normalized_timespec64(&ts_delta, lhs.tv_sec + rhs.tv_sec, lhs.tv_nsec + rhs.tv_nsec); return ts_delta; } /* * sub = lhs - rhs, in normalized form */ static inline struct timespec64 timespec64_sub(struct timespec64 lhs, struct timespec64 rhs) { struct timespec64 ts_delta; set_normalized_timespec64(&ts_delta, lhs.tv_sec - rhs.tv_sec, lhs.tv_nsec - rhs.tv_nsec); return ts_delta; } /* * Returns true if the timespec64 is norm, false if denorm: */ static inline bool timespec64_valid(const struct timespec64 *ts) { /* Dates before 1970 are bogus */ if (ts->tv_sec < 0) return false; /* Can't have more nanoseconds then a second */ if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) return false; return true; } static inline bool timespec64_valid_strict(const struct timespec64 *ts) { if (!timespec64_valid(ts)) return false; /* Disallow values that could overflow ktime_t */ if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX) return false; return true; } static inline bool timespec64_valid_settod(const struct timespec64 *ts) { if (!timespec64_valid(ts)) return false; /* Disallow values which cause overflow issues vs. CLOCK_REALTIME */ if ((unsigned long long)ts->tv_sec >= TIME_SETTOD_SEC_MAX) return false; return true; } /** * timespec64_to_ns - Convert timespec64 to nanoseconds * @ts: pointer to the timespec64 variable to be converted * * Returns the scalar nanosecond representation of the timespec64 * parameter. */ static inline s64 timespec64_to_ns(const struct timespec64 *ts) { /* Prevent multiplication overflow */ if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX) return KTIME_MAX; return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec; } /** * ns_to_timespec64 - Convert nanoseconds to timespec64 * @nsec: the nanoseconds value to be converted * * Returns the timespec64 representation of the nsec parameter. */ extern struct timespec64 ns_to_timespec64(const s64 nsec); /** * timespec64_add_ns - Adds nanoseconds to a timespec64 * @a: pointer to timespec64 to be incremented * @ns: unsigned nanoseconds value to be added * * This must always be inlined because its used from the x86-64 vdso, * which cannot call other kernel functions. */ static __always_inline void timespec64_add_ns(struct timespec64 *a, u64 ns) { a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns); a->tv_nsec = ns; } /* * timespec64_add_safe assumes both values are positive and checks for * overflow. It will return TIME64_MAX in case of overflow. */ extern struct timespec64 timespec64_add_safe(const struct timespec64 lhs, const struct timespec64 rhs); #endif /* _LINUX_TIME64_H */ hdmi.h 0000644 00000030414 14722070374 0005646 0 ustar 00 /* * Copyright (C) 2012 Avionic Design GmbH * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sub license, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __LINUX_HDMI_H_ #define __LINUX_HDMI_H_ #include <linux/types.h> #include <linux/device.h> enum hdmi_packet_type { HDMI_PACKET_TYPE_NULL = 0x00, HDMI_PACKET_TYPE_AUDIO_CLOCK_REGEN = 0x01, HDMI_PACKET_TYPE_AUDIO_SAMPLE = 0x02, HDMI_PACKET_TYPE_GENERAL_CONTROL = 0x03, HDMI_PACKET_TYPE_ACP = 0x04, HDMI_PACKET_TYPE_ISRC1 = 0x05, HDMI_PACKET_TYPE_ISRC2 = 0x06, HDMI_PACKET_TYPE_ONE_BIT_AUDIO_SAMPLE = 0x07, HDMI_PACKET_TYPE_DST_AUDIO = 0x08, HDMI_PACKET_TYPE_HBR_AUDIO_STREAM = 0x09, HDMI_PACKET_TYPE_GAMUT_METADATA = 0x0a, /* + enum hdmi_infoframe_type */ }; enum hdmi_infoframe_type { HDMI_INFOFRAME_TYPE_VENDOR = 0x81, HDMI_INFOFRAME_TYPE_AVI = 0x82, HDMI_INFOFRAME_TYPE_SPD = 0x83, HDMI_INFOFRAME_TYPE_AUDIO = 0x84, HDMI_INFOFRAME_TYPE_DRM = 0x87, }; #define HDMI_IEEE_OUI 0x000c03 #define HDMI_FORUM_IEEE_OUI 0xc45dd8 #define HDMI_INFOFRAME_HEADER_SIZE 4 #define HDMI_AVI_INFOFRAME_SIZE 13 #define HDMI_SPD_INFOFRAME_SIZE 25 #define HDMI_AUDIO_INFOFRAME_SIZE 10 #define HDMI_DRM_INFOFRAME_SIZE 26 #define HDMI_INFOFRAME_SIZE(type) \ (HDMI_INFOFRAME_HEADER_SIZE + HDMI_ ## type ## _INFOFRAME_SIZE) struct hdmi_any_infoframe { enum hdmi_infoframe_type type; unsigned char version; unsigned char length; }; enum hdmi_colorspace { HDMI_COLORSPACE_RGB, HDMI_COLORSPACE_YUV422, HDMI_COLORSPACE_YUV444, HDMI_COLORSPACE_YUV420, HDMI_COLORSPACE_RESERVED4, HDMI_COLORSPACE_RESERVED5, HDMI_COLORSPACE_RESERVED6, HDMI_COLORSPACE_IDO_DEFINED, }; enum hdmi_scan_mode { HDMI_SCAN_MODE_NONE, HDMI_SCAN_MODE_OVERSCAN, HDMI_SCAN_MODE_UNDERSCAN, HDMI_SCAN_MODE_RESERVED, }; enum hdmi_colorimetry { HDMI_COLORIMETRY_NONE, HDMI_COLORIMETRY_ITU_601, HDMI_COLORIMETRY_ITU_709, HDMI_COLORIMETRY_EXTENDED, }; enum hdmi_picture_aspect { HDMI_PICTURE_ASPECT_NONE, HDMI_PICTURE_ASPECT_4_3, HDMI_PICTURE_ASPECT_16_9, HDMI_PICTURE_ASPECT_64_27, HDMI_PICTURE_ASPECT_256_135, HDMI_PICTURE_ASPECT_RESERVED, }; enum hdmi_active_aspect { HDMI_ACTIVE_ASPECT_16_9_TOP = 2, HDMI_ACTIVE_ASPECT_14_9_TOP = 3, HDMI_ACTIVE_ASPECT_16_9_CENTER = 4, HDMI_ACTIVE_ASPECT_PICTURE = 8, HDMI_ACTIVE_ASPECT_4_3 = 9, HDMI_ACTIVE_ASPECT_16_9 = 10, HDMI_ACTIVE_ASPECT_14_9 = 11, HDMI_ACTIVE_ASPECT_4_3_SP_14_9 = 13, HDMI_ACTIVE_ASPECT_16_9_SP_14_9 = 14, HDMI_ACTIVE_ASPECT_16_9_SP_4_3 = 15, }; enum hdmi_extended_colorimetry { HDMI_EXTENDED_COLORIMETRY_XV_YCC_601, HDMI_EXTENDED_COLORIMETRY_XV_YCC_709, HDMI_EXTENDED_COLORIMETRY_S_YCC_601, HDMI_EXTENDED_COLORIMETRY_OPYCC_601, HDMI_EXTENDED_COLORIMETRY_OPRGB, /* The following EC values are only defined in CEA-861-F. */ HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM, HDMI_EXTENDED_COLORIMETRY_BT2020, HDMI_EXTENDED_COLORIMETRY_RESERVED, }; enum hdmi_quantization_range { HDMI_QUANTIZATION_RANGE_DEFAULT, HDMI_QUANTIZATION_RANGE_LIMITED, HDMI_QUANTIZATION_RANGE_FULL, HDMI_QUANTIZATION_RANGE_RESERVED, }; /* non-uniform picture scaling */ enum hdmi_nups { HDMI_NUPS_UNKNOWN, HDMI_NUPS_HORIZONTAL, HDMI_NUPS_VERTICAL, HDMI_NUPS_BOTH, }; enum hdmi_ycc_quantization_range { HDMI_YCC_QUANTIZATION_RANGE_LIMITED, HDMI_YCC_QUANTIZATION_RANGE_FULL, }; enum hdmi_content_type { HDMI_CONTENT_TYPE_GRAPHICS, HDMI_CONTENT_TYPE_PHOTO, HDMI_CONTENT_TYPE_CINEMA, HDMI_CONTENT_TYPE_GAME, }; enum hdmi_metadata_type { HDMI_STATIC_METADATA_TYPE1 = 1, }; enum hdmi_eotf { HDMI_EOTF_TRADITIONAL_GAMMA_SDR, HDMI_EOTF_TRADITIONAL_GAMMA_HDR, HDMI_EOTF_SMPTE_ST2084, HDMI_EOTF_BT_2100_HLG, }; struct hdmi_avi_infoframe { enum hdmi_infoframe_type type; unsigned char version; unsigned char length; enum hdmi_colorspace colorspace; enum hdmi_scan_mode scan_mode; enum hdmi_colorimetry colorimetry; enum hdmi_picture_aspect picture_aspect; enum hdmi_active_aspect active_aspect; bool itc; enum hdmi_extended_colorimetry extended_colorimetry; enum hdmi_quantization_range quantization_range; enum hdmi_nups nups; unsigned char video_code; enum hdmi_ycc_quantization_range ycc_quantization_range; enum hdmi_content_type content_type; unsigned char pixel_repeat; unsigned short top_bar; unsigned short bottom_bar; unsigned short left_bar; unsigned short right_bar; }; /* DRM Infoframe as per CTA 861.G spec */ struct hdmi_drm_infoframe { enum hdmi_infoframe_type type; unsigned char version; unsigned char length; enum hdmi_eotf eotf; enum hdmi_metadata_type metadata_type; struct { u16 x, y; } display_primaries[3]; struct { u16 x, y; } white_point; u16 max_display_mastering_luminance; u16 min_display_mastering_luminance; u16 max_cll; u16 max_fall; }; int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame); ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer, size_t size); ssize_t hdmi_avi_infoframe_pack_only(const struct hdmi_avi_infoframe *frame, void *buffer, size_t size); int hdmi_avi_infoframe_check(struct hdmi_avi_infoframe *frame); int hdmi_drm_infoframe_init(struct hdmi_drm_infoframe *frame); ssize_t hdmi_drm_infoframe_pack(struct hdmi_drm_infoframe *frame, void *buffer, size_t size); ssize_t hdmi_drm_infoframe_pack_only(const struct hdmi_drm_infoframe *frame, void *buffer, size_t size); int hdmi_drm_infoframe_check(struct hdmi_drm_infoframe *frame); enum hdmi_spd_sdi { HDMI_SPD_SDI_UNKNOWN, HDMI_SPD_SDI_DSTB, HDMI_SPD_SDI_DVDP, HDMI_SPD_SDI_DVHS, HDMI_SPD_SDI_HDDVR, HDMI_SPD_SDI_DVC, HDMI_SPD_SDI_DSC, HDMI_SPD_SDI_VCD, HDMI_SPD_SDI_GAME, HDMI_SPD_SDI_PC, HDMI_SPD_SDI_BD, HDMI_SPD_SDI_SACD, HDMI_SPD_SDI_HDDVD, HDMI_SPD_SDI_PMP, }; struct hdmi_spd_infoframe { enum hdmi_infoframe_type type; unsigned char version; unsigned char length; char vendor[8]; char product[16]; enum hdmi_spd_sdi sdi; }; int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame, const char *vendor, const char *product); ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer, size_t size); ssize_t hdmi_spd_infoframe_pack_only(const struct hdmi_spd_infoframe *frame, void *buffer, size_t size); int hdmi_spd_infoframe_check(struct hdmi_spd_infoframe *frame); enum hdmi_audio_coding_type { HDMI_AUDIO_CODING_TYPE_STREAM, HDMI_AUDIO_CODING_TYPE_PCM, HDMI_AUDIO_CODING_TYPE_AC3, HDMI_AUDIO_CODING_TYPE_MPEG1, HDMI_AUDIO_CODING_TYPE_MP3, HDMI_AUDIO_CODING_TYPE_MPEG2, HDMI_AUDIO_CODING_TYPE_AAC_LC, HDMI_AUDIO_CODING_TYPE_DTS, HDMI_AUDIO_CODING_TYPE_ATRAC, HDMI_AUDIO_CODING_TYPE_DSD, HDMI_AUDIO_CODING_TYPE_EAC3, HDMI_AUDIO_CODING_TYPE_DTS_HD, HDMI_AUDIO_CODING_TYPE_MLP, HDMI_AUDIO_CODING_TYPE_DST, HDMI_AUDIO_CODING_TYPE_WMA_PRO, HDMI_AUDIO_CODING_TYPE_CXT, }; enum hdmi_audio_sample_size { HDMI_AUDIO_SAMPLE_SIZE_STREAM, HDMI_AUDIO_SAMPLE_SIZE_16, HDMI_AUDIO_SAMPLE_SIZE_20, HDMI_AUDIO_SAMPLE_SIZE_24, }; enum hdmi_audio_sample_frequency { HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM, HDMI_AUDIO_SAMPLE_FREQUENCY_32000, HDMI_AUDIO_SAMPLE_FREQUENCY_44100, HDMI_AUDIO_SAMPLE_FREQUENCY_48000, HDMI_AUDIO_SAMPLE_FREQUENCY_88200, HDMI_AUDIO_SAMPLE_FREQUENCY_96000, HDMI_AUDIO_SAMPLE_FREQUENCY_176400, HDMI_AUDIO_SAMPLE_FREQUENCY_192000, }; enum hdmi_audio_coding_type_ext { /* Refer to Audio Coding Type (CT) field in Data Byte 1 */ HDMI_AUDIO_CODING_TYPE_EXT_CT, /* * The next three CXT values are defined in CEA-861-E only. * They do not exist in older versions, and in CEA-861-F they are * defined as 'Not in use'. */ HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC, HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC_V2, HDMI_AUDIO_CODING_TYPE_EXT_MPEG_SURROUND, /* The following CXT values are only defined in CEA-861-F. */ HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC, HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_V2, HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC, HDMI_AUDIO_CODING_TYPE_EXT_DRA, HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_SURROUND, HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC_SURROUND = 10, }; struct hdmi_audio_infoframe { enum hdmi_infoframe_type type; unsigned char version; unsigned char length; unsigned char channels; enum hdmi_audio_coding_type coding_type; enum hdmi_audio_sample_size sample_size; enum hdmi_audio_sample_frequency sample_frequency; enum hdmi_audio_coding_type_ext coding_type_ext; unsigned char channel_allocation; unsigned char level_shift_value; bool downmix_inhibit; }; int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame); ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame, void *buffer, size_t size); ssize_t hdmi_audio_infoframe_pack_only(const struct hdmi_audio_infoframe *frame, void *buffer, size_t size); int hdmi_audio_infoframe_check(struct hdmi_audio_infoframe *frame); enum hdmi_3d_structure { HDMI_3D_STRUCTURE_INVALID = -1, HDMI_3D_STRUCTURE_FRAME_PACKING = 0, HDMI_3D_STRUCTURE_FIELD_ALTERNATIVE, HDMI_3D_STRUCTURE_LINE_ALTERNATIVE, HDMI_3D_STRUCTURE_SIDE_BY_SIDE_FULL, HDMI_3D_STRUCTURE_L_DEPTH, HDMI_3D_STRUCTURE_L_DEPTH_GFX_GFX_DEPTH, HDMI_3D_STRUCTURE_TOP_AND_BOTTOM, HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF = 8, }; struct hdmi_vendor_infoframe { enum hdmi_infoframe_type type; unsigned char version; unsigned char length; unsigned int oui; u8 vic; enum hdmi_3d_structure s3d_struct; unsigned int s3d_ext_data; }; /* HDR Metadata as per 861.G spec */ struct hdr_static_metadata { __u8 eotf; __u8 metadata_type; __u16 max_cll; __u16 max_fall; __u16 min_cll; }; /** * struct hdr_sink_metadata - HDR sink metadata * * Metadata Information read from Sink's EDID */ struct hdr_sink_metadata { /** * @metadata_type: Static_Metadata_Descriptor_ID. */ __u32 metadata_type; /** * @hdmi_type1: HDR Metadata Infoframe. */ union { struct hdr_static_metadata hdmi_type1; }; }; int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame); ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame, void *buffer, size_t size); ssize_t hdmi_vendor_infoframe_pack_only(const struct hdmi_vendor_infoframe *frame, void *buffer, size_t size); int hdmi_vendor_infoframe_check(struct hdmi_vendor_infoframe *frame); union hdmi_vendor_any_infoframe { struct { enum hdmi_infoframe_type type; unsigned char version; unsigned char length; unsigned int oui; } any; struct hdmi_vendor_infoframe hdmi; }; /** * union hdmi_infoframe - overall union of all abstract infoframe representations * @any: generic infoframe * @avi: avi infoframe * @spd: spd infoframe * @vendor: union of all vendor infoframes * @audio: audio infoframe * @drm: Dynamic Range and Mastering infoframe * * This is used by the generic pack function. This works since all infoframes * have the same header which also indicates which type of infoframe should be * packed. */ union hdmi_infoframe { struct hdmi_any_infoframe any; struct hdmi_avi_infoframe avi; struct hdmi_spd_infoframe spd; union hdmi_vendor_any_infoframe vendor; struct hdmi_audio_infoframe audio; struct hdmi_drm_infoframe drm; }; ssize_t hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size); ssize_t hdmi_infoframe_pack_only(const union hdmi_infoframe *frame, void *buffer, size_t size); int hdmi_infoframe_check(union hdmi_infoframe *frame); int hdmi_infoframe_unpack(union hdmi_infoframe *frame, const void *buffer, size_t size); void hdmi_infoframe_log(const char *level, struct device *dev, const union hdmi_infoframe *frame); #endif /* _DRM_HDMI_H */ fsi-occ.h 0000644 00000001251 14722070374 0006245 0 ustar 00 // SPDX-License-Identifier: GPL-2.0 #ifndef LINUX_FSI_OCC_H #define LINUX_FSI_OCC_H struct device; #define OCC_RESP_CMD_IN_PRG 0xFF #define OCC_RESP_SUCCESS 0 #define OCC_RESP_CMD_INVAL 0x11 #define OCC_RESP_CMD_LEN_INVAL 0x12 #define OCC_RESP_DATA_INVAL 0x13 #define OCC_RESP_CHKSUM_ERR 0x14 #define OCC_RESP_INT_ERR 0x15 #define OCC_RESP_BAD_STATE 0x16 #define OCC_RESP_CRIT_EXCEPT 0xE0 #define OCC_RESP_CRIT_INIT 0xE1 #define OCC_RESP_CRIT_WATCHDOG 0xE2 #define OCC_RESP_CRIT_OCB 0xE3 #define OCC_RESP_CRIT_HW 0xE4 int fsi_occ_submit(struct device *dev, const void *request, size_t req_len, void *response, size_t *resp_len); #endif /* LINUX_FSI_OCC_H */ keyctl.h 0000644 00000002374 14722070374 0006224 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* keyctl kernel bits * * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #ifndef __LINUX_KEYCTL_H #define __LINUX_KEYCTL_H #include <uapi/linux/keyctl.h> struct kernel_pkey_query { __u32 supported_ops; /* Which ops are supported */ __u32 key_size; /* Size of the key in bits */ __u16 max_data_size; /* Maximum size of raw data to sign in bytes */ __u16 max_sig_size; /* Maximum size of signature in bytes */ __u16 max_enc_size; /* Maximum size of encrypted blob in bytes */ __u16 max_dec_size; /* Maximum size of decrypted blob in bytes */ }; enum kernel_pkey_operation { kernel_pkey_encrypt, kernel_pkey_decrypt, kernel_pkey_sign, kernel_pkey_verify, }; struct kernel_pkey_params { struct key *key; const char *encoding; /* Encoding (eg. "oaep" or "raw" for none) */ const char *hash_algo; /* Digest algorithm used (eg. "sha1") or NULL if N/A */ char *info; /* Modified info string to be released later */ __u32 in_len; /* Input data size */ union { __u32 out_len; /* Output buffer size (enc/dec/sign) */ __u32 in2_len; /* 2nd input data size (verify) */ }; enum kernel_pkey_operation op : 8; }; #endif /* __LINUX_KEYCTL_H */ kexec.h 0000644 00000027225 14722070374 0006032 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_KEXEC_H #define LINUX_KEXEC_H #define IND_DESTINATION_BIT 0 #define IND_INDIRECTION_BIT 1 #define IND_DONE_BIT 2 #define IND_SOURCE_BIT 3 #define IND_DESTINATION (1 << IND_DESTINATION_BIT) #define IND_INDIRECTION (1 << IND_INDIRECTION_BIT) #define IND_DONE (1 << IND_DONE_BIT) #define IND_SOURCE (1 << IND_SOURCE_BIT) #define IND_FLAGS (IND_DESTINATION | IND_INDIRECTION | IND_DONE | IND_SOURCE) #if !defined(__ASSEMBLY__) #include <linux/crash_core.h> #include <asm/io.h> #include <uapi/linux/kexec.h> #ifdef CONFIG_KEXEC_CORE #include <linux/list.h> #include <linux/compat.h> #include <linux/ioport.h> #include <linux/module.h> #include <asm/kexec.h> /* Verify architecture specific macros are defined */ #ifndef KEXEC_SOURCE_MEMORY_LIMIT #error KEXEC_SOURCE_MEMORY_LIMIT not defined #endif #ifndef KEXEC_DESTINATION_MEMORY_LIMIT #error KEXEC_DESTINATION_MEMORY_LIMIT not defined #endif #ifndef KEXEC_CONTROL_MEMORY_LIMIT #error KEXEC_CONTROL_MEMORY_LIMIT not defined #endif #ifndef KEXEC_CONTROL_MEMORY_GFP #define KEXEC_CONTROL_MEMORY_GFP (GFP_KERNEL | __GFP_NORETRY) #endif #ifndef KEXEC_CONTROL_PAGE_SIZE #error KEXEC_CONTROL_PAGE_SIZE not defined #endif #ifndef KEXEC_ARCH #error KEXEC_ARCH not defined #endif #ifndef KEXEC_CRASH_CONTROL_MEMORY_LIMIT #define KEXEC_CRASH_CONTROL_MEMORY_LIMIT KEXEC_CONTROL_MEMORY_LIMIT #endif #ifndef KEXEC_CRASH_MEM_ALIGN #define KEXEC_CRASH_MEM_ALIGN PAGE_SIZE #endif #define KEXEC_CORE_NOTE_NAME CRASH_CORE_NOTE_NAME /* * This structure is used to hold the arguments that are used when loading * kernel binaries. */ typedef unsigned long kimage_entry_t; struct kexec_segment { /* * This pointer can point to user memory if kexec_load() system * call is used or will point to kernel memory if * kexec_file_load() system call is used. * * Use ->buf when expecting to deal with user memory and use ->kbuf * when expecting to deal with kernel memory. */ union { void __user *buf; void *kbuf; }; size_t bufsz; unsigned long mem; size_t memsz; }; #ifdef CONFIG_COMPAT struct compat_kexec_segment { compat_uptr_t buf; compat_size_t bufsz; compat_ulong_t mem; /* User space sees this as a (void *) ... */ compat_size_t memsz; }; #endif #ifdef CONFIG_KEXEC_FILE struct purgatory_info { /* * Pointer to elf header at the beginning of kexec_purgatory. * Note: kexec_purgatory is read only */ const Elf_Ehdr *ehdr; /* * Temporary, modifiable buffer for sechdrs used for relocation. * This memory can be freed post image load. */ Elf_Shdr *sechdrs; /* * Temporary, modifiable buffer for stripped purgatory used for * relocation. This memory can be freed post image load. */ void *purgatory_buf; }; struct kimage; typedef int (kexec_probe_t)(const char *kernel_buf, unsigned long kernel_size); typedef void *(kexec_load_t)(struct kimage *image, char *kernel_buf, unsigned long kernel_len, char *initrd, unsigned long initrd_len, char *cmdline, unsigned long cmdline_len); typedef int (kexec_cleanup_t)(void *loader_data); #ifdef CONFIG_KEXEC_SIG typedef int (kexec_verify_sig_t)(const char *kernel_buf, unsigned long kernel_len); #endif struct kexec_file_ops { kexec_probe_t *probe; kexec_load_t *load; kexec_cleanup_t *cleanup; #ifdef CONFIG_KEXEC_SIG kexec_verify_sig_t *verify_sig; #endif }; extern const struct kexec_file_ops * const kexec_file_loaders[]; int kexec_image_probe_default(struct kimage *image, void *buf, unsigned long buf_len); int kexec_image_post_load_cleanup_default(struct kimage *image); /* * If kexec_buf.mem is set to this value, kexec_locate_mem_hole() * will try to allocate free memory. Arch may overwrite it. */ #ifndef KEXEC_BUF_MEM_UNKNOWN #define KEXEC_BUF_MEM_UNKNOWN 0 #endif /** * struct kexec_buf - parameters for finding a place for a buffer in memory * @image: kexec image in which memory to search. * @buffer: Contents which will be copied to the allocated memory. * @bufsz: Size of @buffer. * @mem: On return will have address of the buffer in memory. * @memsz: Size for the buffer in memory. * @buf_align: Minimum alignment needed. * @buf_min: The buffer can't be placed below this address. * @buf_max: The buffer can't be placed above this address. * @top_down: Allocate from top of memory. */ struct kexec_buf { struct kimage *image; void *buffer; unsigned long bufsz; unsigned long mem; unsigned long memsz; unsigned long buf_align; unsigned long buf_min; unsigned long buf_max; bool top_down; }; int kexec_load_purgatory(struct kimage *image, struct kexec_buf *kbuf); int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name, void *buf, unsigned int size, bool get_value); void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name); int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf, unsigned long buf_len); void * __weak arch_kexec_kernel_image_load(struct kimage *image); extern int kexec_add_buffer(struct kexec_buf *kbuf); int kexec_locate_mem_hole(struct kexec_buf *kbuf); /* Alignment required for elf header segment */ #define ELF_CORE_HEADER_ALIGN 4096 struct crash_mem_range { u64 start, end; }; struct crash_mem { unsigned int max_nr_ranges; unsigned int nr_ranges; struct crash_mem_range ranges[0]; }; extern int crash_exclude_mem_range(struct crash_mem *mem, unsigned long long mstart, unsigned long long mend); extern int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map, void **addr, unsigned long *sz); #ifndef arch_kexec_apply_relocations_add /* * arch_kexec_apply_relocations_add - apply relocations of type RELA * @pi: Purgatory to be relocated. * @section: Section relocations applying to. * @relsec: Section containing RELAs. * @symtab: Corresponding symtab. * * Return: 0 on success, negative errno on error. */ static inline int arch_kexec_apply_relocations_add(struct purgatory_info *pi, Elf_Shdr *section, const Elf_Shdr *relsec, const Elf_Shdr *symtab) { pr_err("RELA relocation unsupported.\n"); return -ENOEXEC; } #endif #ifndef arch_kexec_apply_relocations /* * arch_kexec_apply_relocations - apply relocations of type REL * @pi: Purgatory to be relocated. * @section: Section relocations applying to. * @relsec: Section containing RELs. * @symtab: Corresponding symtab. * * Return: 0 on success, negative errno on error. */ static inline int arch_kexec_apply_relocations(struct purgatory_info *pi, Elf_Shdr *section, const Elf_Shdr *relsec, const Elf_Shdr *symtab) { pr_err("REL relocation unsupported.\n"); return -ENOEXEC; } #endif #endif /* CONFIG_KEXEC_FILE */ #ifdef CONFIG_KEXEC_ELF struct kexec_elf_info { /* * Where the ELF binary contents are kept. * Memory managed by the user of the struct. */ const char *buffer; const struct elfhdr *ehdr; const struct elf_phdr *proghdrs; }; int kexec_build_elf_info(const char *buf, size_t len, struct elfhdr *ehdr, struct kexec_elf_info *elf_info); int kexec_elf_load(struct kimage *image, struct elfhdr *ehdr, struct kexec_elf_info *elf_info, struct kexec_buf *kbuf, unsigned long *lowest_load_addr); void kexec_free_elf_info(struct kexec_elf_info *elf_info); int kexec_elf_probe(const char *buf, unsigned long len); #endif struct kimage { kimage_entry_t head; kimage_entry_t *entry; kimage_entry_t *last_entry; unsigned long start; struct page *control_code_page; struct page *swap_page; void *vmcoreinfo_data_copy; /* locates in the crash memory */ unsigned long nr_segments; struct kexec_segment segment[KEXEC_SEGMENT_MAX]; struct list_head control_pages; struct list_head dest_pages; struct list_head unusable_pages; /* Address of next control page to allocate for crash kernels. */ unsigned long control_page; /* Flags to indicate special processing */ unsigned int type : 1; #define KEXEC_TYPE_DEFAULT 0 #define KEXEC_TYPE_CRASH 1 unsigned int preserve_context : 1; /* If set, we are using file mode kexec syscall */ unsigned int file_mode:1; #ifdef ARCH_HAS_KIMAGE_ARCH struct kimage_arch arch; #endif #ifdef CONFIG_KEXEC_FILE /* Additional fields for file based kexec syscall */ void *kernel_buf; unsigned long kernel_buf_len; void *initrd_buf; unsigned long initrd_buf_len; char *cmdline_buf; unsigned long cmdline_buf_len; /* File operations provided by image loader */ const struct kexec_file_ops *fops; /* Image loader handling the kernel can store a pointer here */ void *image_loader_data; /* Information for loading purgatory */ struct purgatory_info purgatory_info; #endif #ifdef CONFIG_IMA_KEXEC /* Virtual address of IMA measurement buffer for kexec syscall */ void *ima_buffer; #endif }; /* kexec interface functions */ extern void machine_kexec(struct kimage *image); extern int machine_kexec_prepare(struct kimage *image); extern void machine_kexec_cleanup(struct kimage *image); extern int kernel_kexec(void); extern struct page *kimage_alloc_control_pages(struct kimage *image, unsigned int order); extern void __crash_kexec(struct pt_regs *); extern void crash_kexec(struct pt_regs *); int kexec_should_crash(struct task_struct *); int kexec_crash_loaded(void); void crash_save_cpu(struct pt_regs *regs, int cpu); extern int kimage_crash_copy_vmcoreinfo(struct kimage *image); extern struct kimage *kexec_image; extern struct kimage *kexec_crash_image; extern int kexec_load_disabled; #ifndef kexec_flush_icache_page #define kexec_flush_icache_page(page) #endif /* List of defined/legal kexec flags */ #ifndef CONFIG_KEXEC_JUMP #define KEXEC_FLAGS KEXEC_ON_CRASH #else #define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_PRESERVE_CONTEXT) #endif /* List of defined/legal kexec file flags */ #define KEXEC_FILE_FLAGS (KEXEC_FILE_UNLOAD | KEXEC_FILE_ON_CRASH | \ KEXEC_FILE_NO_INITRAMFS) /* Location of a reserved region to hold the crash kernel. */ extern struct resource crashk_res; extern struct resource crashk_low_res; extern note_buf_t __percpu *crash_notes; /* flag to track if kexec reboot is in progress */ extern bool kexec_in_progress; int crash_shrink_memory(unsigned long new_size); size_t crash_get_memory_size(void); void crash_free_reserved_phys_range(unsigned long begin, unsigned long end); void arch_kexec_protect_crashkres(void); void arch_kexec_unprotect_crashkres(void); #ifndef page_to_boot_pfn static inline unsigned long page_to_boot_pfn(struct page *page) { return page_to_pfn(page); } #endif #ifndef boot_pfn_to_page static inline struct page *boot_pfn_to_page(unsigned long boot_pfn) { return pfn_to_page(boot_pfn); } #endif #ifndef phys_to_boot_phys static inline unsigned long phys_to_boot_phys(phys_addr_t phys) { return phys; } #endif #ifndef boot_phys_to_phys static inline phys_addr_t boot_phys_to_phys(unsigned long boot_phys) { return boot_phys; } #endif static inline unsigned long virt_to_boot_phys(void *addr) { return phys_to_boot_phys(__pa((unsigned long)addr)); } static inline void *boot_phys_to_virt(unsigned long entry) { return phys_to_virt(boot_phys_to_phys(entry)); } #ifndef arch_kexec_post_alloc_pages static inline int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, gfp_t gfp) { return 0; } #endif #ifndef arch_kexec_pre_free_pages static inline void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages) { } #endif #else /* !CONFIG_KEXEC_CORE */ struct pt_regs; struct task_struct; static inline void __crash_kexec(struct pt_regs *regs) { } static inline void crash_kexec(struct pt_regs *regs) { } static inline int kexec_should_crash(struct task_struct *p) { return 0; } static inline int kexec_crash_loaded(void) { return 0; } #define kexec_in_progress false #endif /* CONFIG_KEXEC_CORE */ #endif /* !defined(__ASSEBMLY__) */ #endif /* LINUX_KEXEC_H */ torture.h 0000644 00000006332 14722070374 0006433 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0+ */ /* * Common functions for in-kernel torture tests. * * Copyright IBM Corporation, 2014 * * Author: Paul E. McKenney <paulmck@linux.ibm.com> */ #ifndef __LINUX_TORTURE_H #define __LINUX_TORTURE_H #include <linux/types.h> #include <linux/cache.h> #include <linux/spinlock.h> #include <linux/threads.h> #include <linux/cpumask.h> #include <linux/seqlock.h> #include <linux/lockdep.h> #include <linux/completion.h> #include <linux/debugobjects.h> #include <linux/bug.h> #include <linux/compiler.h> /* Definitions for a non-string torture-test module parameter. */ #define torture_param(type, name, init, msg) \ static type name = init; \ module_param(name, type, 0444); \ MODULE_PARM_DESC(name, msg); #define TORTURE_FLAG "-torture:" #define TOROUT_STRING(s) \ pr_alert("%s" TORTURE_FLAG " %s\n", torture_type, s) #define VERBOSE_TOROUT_STRING(s) \ do { if (verbose) pr_alert("%s" TORTURE_FLAG " %s\n", torture_type, s); } while (0) #define VERBOSE_TOROUT_ERRSTRING(s) \ do { if (verbose) pr_alert("%s" TORTURE_FLAG "!!! %s\n", torture_type, s); } while (0) /* Definitions for online/offline exerciser. */ typedef void torture_ofl_func(void); bool torture_offline(int cpu, long *n_onl_attempts, long *n_onl_successes, unsigned long *sum_offl, int *min_onl, int *max_onl); bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes, unsigned long *sum_onl, int *min_onl, int *max_onl); int torture_onoff_init(long ooholdoff, long oointerval, torture_ofl_func *f); void torture_onoff_stats(void); bool torture_onoff_failures(void); /* Low-rider random number generator. */ struct torture_random_state { unsigned long trs_state; long trs_count; }; #define DEFINE_TORTURE_RANDOM(name) struct torture_random_state name = { 0, 0 } #define DEFINE_TORTURE_RANDOM_PERCPU(name) \ DEFINE_PER_CPU(struct torture_random_state, name) unsigned long torture_random(struct torture_random_state *trsp); /* Task shuffler, which causes CPUs to occasionally go idle. */ void torture_shuffle_task_register(struct task_struct *tp); int torture_shuffle_init(long shuffint); /* Test auto-shutdown handling. */ void torture_shutdown_absorb(const char *title); int torture_shutdown_init(int ssecs, void (*cleanup)(void)); /* Task stuttering, which forces load/no-load transitions. */ bool stutter_wait(const char *title); int torture_stutter_init(int s, int sgap); /* Initialization and cleanup. */ bool torture_init_begin(char *ttype, int v); void torture_init_end(void); bool torture_cleanup_begin(void); void torture_cleanup_end(void); bool torture_must_stop(void); bool torture_must_stop_irq(void); void torture_kthread_stopping(char *title); int _torture_create_kthread(int (*fn)(void *arg), void *arg, char *s, char *m, char *f, struct task_struct **tp); void _torture_stop_kthread(char *m, struct task_struct **tp); #define torture_create_kthread(n, arg, tp) \ _torture_create_kthread(n, (arg), #n, "Creating " #n " task", \ "Failed to create " #n, &(tp)) #define torture_stop_kthread(n, tp) \ _torture_stop_kthread("Stopping " #n " task", &(tp)) #ifdef CONFIG_PREEMPTION #define torture_preempt_schedule() preempt_schedule() #else #define torture_preempt_schedule() #endif #endif /* __LINUX_TORTURE_H */ mux/driver.h 0000644 00000006003 14722070374 0007026 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * mux/driver.h - definitions for the multiplexer driver interface * * Copyright (C) 2017 Axentia Technologies AB * * Author: Peter Rosin <peda@axentia.se> */ #ifndef _LINUX_MUX_DRIVER_H #define _LINUX_MUX_DRIVER_H #include <dt-bindings/mux/mux.h> #include <linux/device.h> #include <linux/semaphore.h> struct mux_chip; struct mux_control; /** * struct mux_control_ops - Mux controller operations for a mux chip. * @set: Set the state of the given mux controller. */ struct mux_control_ops { int (*set)(struct mux_control *mux, int state); }; /** * struct mux_control - Represents a mux controller. * @lock: Protects the mux controller state. * @chip: The mux chip that is handling this mux controller. * @cached_state: The current mux controller state, or -1 if none. * @states: The number of mux controller states. * @idle_state: The mux controller state to use when inactive, or one * of MUX_IDLE_AS_IS and MUX_IDLE_DISCONNECT. * * Mux drivers may only change @states and @idle_state, and may only do so * between allocation and registration of the mux controller. Specifically, * @cached_state is internal to the mux core and should never be written by * mux drivers. */ struct mux_control { struct semaphore lock; /* protects the state of the mux */ struct mux_chip *chip; int cached_state; unsigned int states; int idle_state; }; /** * struct mux_chip - Represents a chip holding mux controllers. * @controllers: Number of mux controllers handled by the chip. * @mux: Array of mux controllers that are handled. * @dev: Device structure. * @id: Used to identify the device internally. * @ops: Mux controller operations. */ struct mux_chip { unsigned int controllers; struct mux_control *mux; struct device dev; int id; const struct mux_control_ops *ops; }; #define to_mux_chip(x) container_of((x), struct mux_chip, dev) /** * mux_chip_priv() - Get the extra memory reserved by mux_chip_alloc(). * @mux_chip: The mux-chip to get the private memory from. * * Return: Pointer to the private memory reserved by the allocator. */ static inline void *mux_chip_priv(struct mux_chip *mux_chip) { return &mux_chip->mux[mux_chip->controllers]; } struct mux_chip *mux_chip_alloc(struct device *dev, unsigned int controllers, size_t sizeof_priv); int mux_chip_register(struct mux_chip *mux_chip); void mux_chip_unregister(struct mux_chip *mux_chip); void mux_chip_free(struct mux_chip *mux_chip); struct mux_chip *devm_mux_chip_alloc(struct device *dev, unsigned int controllers, size_t sizeof_priv); int devm_mux_chip_register(struct device *dev, struct mux_chip *mux_chip); /** * mux_control_get_index() - Get the index of the given mux controller * @mux: The mux-control to get the index for. * * Return: The index of the mux controller within the mux chip the mux * controller is a part of. */ static inline unsigned int mux_control_get_index(struct mux_control *mux) { return mux - mux->chip->mux; } #endif /* _LINUX_MUX_DRIVER_H */ mux/consumer.h 0000644 00000001561 14722070374 0007372 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * mux/consumer.h - definitions for the multiplexer consumer interface * * Copyright (C) 2017 Axentia Technologies AB * * Author: Peter Rosin <peda@axentia.se> */ #ifndef _LINUX_MUX_CONSUMER_H #define _LINUX_MUX_CONSUMER_H #include <linux/compiler.h> struct device; struct mux_control; unsigned int mux_control_states(struct mux_control *mux); int __must_check mux_control_select(struct mux_control *mux, unsigned int state); int __must_check mux_control_try_select(struct mux_control *mux, unsigned int state); int mux_control_deselect(struct mux_control *mux); struct mux_control *mux_control_get(struct device *dev, const char *mux_name); void mux_control_put(struct mux_control *mux); struct mux_control *devm_mux_control_get(struct device *dev, const char *mux_name); #endif /* _LINUX_MUX_CONSUMER_H */ limits.h 0000644 00000002070 14722070374 0006223 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_LIMITS_H #define _LINUX_LIMITS_H #include <uapi/linux/limits.h> #include <linux/types.h> #define USHRT_MAX ((unsigned short)~0U) #define SHRT_MAX ((short)(USHRT_MAX >> 1)) #define SHRT_MIN ((short)(-SHRT_MAX - 1)) #define INT_MAX ((int)(~0U >> 1)) #define INT_MIN (-INT_MAX - 1) #define UINT_MAX (~0U) #define LONG_MAX ((long)(~0UL >> 1)) #define LONG_MIN (-LONG_MAX - 1) #define ULONG_MAX (~0UL) #define LLONG_MAX ((long long)(~0ULL >> 1)) #define LLONG_MIN (-LLONG_MAX - 1) #define ULLONG_MAX (~0ULL) #define SIZE_MAX (~(size_t)0) #define PHYS_ADDR_MAX (~(phys_addr_t)0) #define U8_MAX ((u8)~0U) #define S8_MAX ((s8)(U8_MAX >> 1)) #define S8_MIN ((s8)(-S8_MAX - 1)) #define U16_MAX ((u16)~0U) #define S16_MAX ((s16)(U16_MAX >> 1)) #define S16_MIN ((s16)(-S16_MAX - 1)) #define U32_MAX ((u32)~0U) #define S32_MAX ((s32)(U32_MAX >> 1)) #define S32_MIN ((s32)(-S32_MAX - 1)) #define U64_MAX ((u64)~0ULL) #define S64_MAX ((s64)(U64_MAX >> 1)) #define S64_MIN ((s64)(-S64_MAX - 1)) #endif /* _LINUX_LIMITS_H */ tty_flip.h 0000644 00000003167 14722070374 0006564 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_TTY_FLIP_H #define _LINUX_TTY_FLIP_H extern int tty_buffer_set_limit(struct tty_port *port, int limit); extern int tty_buffer_space_avail(struct tty_port *port); extern int tty_buffer_request_room(struct tty_port *port, size_t size); extern int tty_insert_flip_string_flags(struct tty_port *port, const unsigned char *chars, const char *flags, size_t size); extern int tty_insert_flip_string_fixed_flag(struct tty_port *port, const unsigned char *chars, char flag, size_t size); extern int tty_prepare_flip_string(struct tty_port *port, unsigned char **chars, size_t size); extern void tty_flip_buffer_push(struct tty_port *port); int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag); static inline int tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag) { struct tty_buffer *tb = port->buf.tail; int change; change = (tb->flags & TTYB_NORMAL) && (flag != TTY_NORMAL); if (!change && tb->used < tb->size) { if (~tb->flags & TTYB_NORMAL) *flag_buf_ptr(tb, tb->used) = flag; *char_buf_ptr(tb, tb->used++) = ch; return 1; } return __tty_insert_flip_char(port, ch, flag); } static inline int tty_insert_flip_string(struct tty_port *port, const unsigned char *chars, size_t size) { return tty_insert_flip_string_fixed_flag(port, chars, TTY_NORMAL, size); } extern void tty_buffer_lock_exclusive(struct tty_port *port); extern void tty_buffer_unlock_exclusive(struct tty_port *port); int tty_insert_flip_string_and_push_buffer(struct tty_port *port, const unsigned char *chars, size_t cnt); #endif /* _LINUX_TTY_FLIP_H */ ppp-comp.h 0000644 00000005717 14722070374 0006470 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * ppp-comp.h - Definitions for doing PPP packet compression. * * Copyright 1994-1998 Paul Mackerras. */ #ifndef _NET_PPP_COMP_H #define _NET_PPP_COMP_H #include <uapi/linux/ppp-comp.h> struct module; /* * The following symbols control whether we include code for * various compression methods. */ #ifndef DO_BSD_COMPRESS #define DO_BSD_COMPRESS 1 /* by default, include BSD-Compress */ #endif #ifndef DO_DEFLATE #define DO_DEFLATE 1 /* by default, include Deflate */ #endif #define DO_PREDICTOR_1 0 #define DO_PREDICTOR_2 0 /* * Structure giving methods for compression/decompression. */ struct compressor { int compress_proto; /* CCP compression protocol number */ /* Allocate space for a compressor (transmit side) */ void *(*comp_alloc) (unsigned char *options, int opt_len); /* Free space used by a compressor */ void (*comp_free) (void *state); /* Initialize a compressor */ int (*comp_init) (void *state, unsigned char *options, int opt_len, int unit, int opthdr, int debug); /* Reset a compressor */ void (*comp_reset) (void *state); /* Compress a packet */ int (*compress) (void *state, unsigned char *rptr, unsigned char *obuf, int isize, int osize); /* Return compression statistics */ void (*comp_stat) (void *state, struct compstat *stats); /* Allocate space for a decompressor (receive side) */ void *(*decomp_alloc) (unsigned char *options, int opt_len); /* Free space used by a decompressor */ void (*decomp_free) (void *state); /* Initialize a decompressor */ int (*decomp_init) (void *state, unsigned char *options, int opt_len, int unit, int opthdr, int mru, int debug); /* Reset a decompressor */ void (*decomp_reset) (void *state); /* Decompress a packet. */ int (*decompress) (void *state, unsigned char *ibuf, int isize, unsigned char *obuf, int osize); /* Update state for an incompressible packet received */ void (*incomp) (void *state, unsigned char *ibuf, int icnt); /* Return decompression statistics */ void (*decomp_stat) (void *state, struct compstat *stats); /* Used in locking compressor modules */ struct module *owner; /* Extra skb space needed by the compressor algorithm */ unsigned int comp_extra; }; /* * The return value from decompress routine is the length of the * decompressed packet if successful, otherwise DECOMP_ERROR * or DECOMP_FATALERROR if an error occurred. * * We need to make this distinction so that we can disable certain * useful functionality, namely sending a CCP reset-request as a result * of an error detected after decompression. This is to avoid infringing * a patent held by Motorola. * Don't you just lurve software patents. */ #define DECOMP_ERROR -1 /* error detected before decomp. */ #define DECOMP_FATALERROR -2 /* error detected after decomp. */ extern int ppp_register_compressor(struct compressor *); extern void ppp_unregister_compressor(struct compressor *); #endif /* _NET_PPP_COMP_H */ nfs_iostat.h 0000644 00000010275 14722070374 0007101 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * User-space visible declarations for NFS client per-mount * point statistics * * Copyright (C) 2005, 2006 Chuck Lever <cel@netapp.com> * * NFS client per-mount statistics provide information about the * health of the NFS client and the health of each NFS mount point. * Generally these are not for detailed problem diagnosis, but * simply to indicate that there is a problem. * * These counters are not meant to be human-readable, but are meant * to be integrated into system monitoring tools such as "sar" and * "iostat". As such, the counters are sampled by the tools over * time, and are never zeroed after a file system is mounted. * Moving averages can be computed by the tools by taking the * difference between two instantaneous samples and dividing that * by the time between the samples. */ #ifndef _LINUX_NFS_IOSTAT #define _LINUX_NFS_IOSTAT #define NFS_IOSTAT_VERS "1.1" /* * NFS byte counters * * 1. SERVER - the number of payload bytes read from or written * to the server by the NFS client via an NFS READ or WRITE * request. * * 2. NORMAL - the number of bytes read or written by applications * via the read(2) and write(2) system call interfaces. * * 3. DIRECT - the number of bytes read or written from files * opened with the O_DIRECT flag. * * These counters give a view of the data throughput into and out * of the NFS client. Comparing the number of bytes requested by * an application with the number of bytes the client requests from * the server can provide an indication of client efficiency * (per-op, cache hits, etc). * * These counters can also help characterize which access methods * are in use. DIRECT by itself shows whether there is any O_DIRECT * traffic. NORMAL + DIRECT shows how much data is going through * the system call interface. A large amount of SERVER traffic * without much NORMAL or DIRECT traffic shows that applications * are using mapped files. * * NFS page counters * * These count the number of pages read or written via nfs_readpage(), * nfs_readpages(), or their write equivalents. * * NB: When adding new byte counters, please include the measured * units in the name of each byte counter to help users of this * interface determine what exactly is being counted. */ enum nfs_stat_bytecounters { NFSIOS_NORMALREADBYTES = 0, NFSIOS_NORMALWRITTENBYTES, NFSIOS_DIRECTREADBYTES, NFSIOS_DIRECTWRITTENBYTES, NFSIOS_SERVERREADBYTES, NFSIOS_SERVERWRITTENBYTES, NFSIOS_READPAGES, NFSIOS_WRITEPAGES, __NFSIOS_BYTESMAX, }; /* * NFS event counters * * These counters provide a low-overhead way of monitoring client * activity without enabling NFS trace debugging. The counters * show the rate at which VFS requests are made, and how often the * client invalidates its data and attribute caches. This allows * system administrators to monitor such things as how close-to-open * is working, and answer questions such as "why are there so many * GETATTR requests on the wire?" * * They also count anamolous events such as short reads and writes, * silly renames due to close-after-delete, and operations that * change the size of a file (such operations can often be the * source of data corruption if applications aren't using file * locking properly). */ enum nfs_stat_eventcounters { NFSIOS_INODEREVALIDATE = 0, NFSIOS_DENTRYREVALIDATE, NFSIOS_DATAINVALIDATE, NFSIOS_ATTRINVALIDATE, NFSIOS_VFSOPEN, NFSIOS_VFSLOOKUP, NFSIOS_VFSACCESS, NFSIOS_VFSUPDATEPAGE, NFSIOS_VFSREADPAGE, NFSIOS_VFSREADPAGES, NFSIOS_VFSWRITEPAGE, NFSIOS_VFSWRITEPAGES, NFSIOS_VFSGETDENTS, NFSIOS_VFSSETATTR, NFSIOS_VFSFLUSH, NFSIOS_VFSFSYNC, NFSIOS_VFSLOCK, NFSIOS_VFSRELEASE, NFSIOS_CONGESTIONWAIT, NFSIOS_SETATTRTRUNC, NFSIOS_EXTENDWRITE, NFSIOS_SILLYRENAME, NFSIOS_SHORTREAD, NFSIOS_SHORTWRITE, NFSIOS_DELAY, NFSIOS_PNFS_READ, NFSIOS_PNFS_WRITE, __NFSIOS_COUNTSMAX, }; /* * NFS local caching servicing counters */ enum nfs_stat_fscachecounters { NFSIOS_FSCACHE_PAGES_READ_OK, NFSIOS_FSCACHE_PAGES_READ_FAIL, NFSIOS_FSCACHE_PAGES_WRITTEN_OK, NFSIOS_FSCACHE_PAGES_WRITTEN_FAIL, NFSIOS_FSCACHE_PAGES_UNCACHED, __NFSIOS_FSCACHEMAX, }; #endif /* _LINUX_NFS_IOSTAT */ khugepaged.h 0000644 00000005236 14722070374 0007035 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_KHUGEPAGED_H #define _LINUX_KHUGEPAGED_H #include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE extern struct attribute_group khugepaged_attr_group; extern int khugepaged_init(void); extern void khugepaged_destroy(void); extern int start_stop_khugepaged(void); extern int __khugepaged_enter(struct mm_struct *mm); extern void __khugepaged_exit(struct mm_struct *mm); extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma, unsigned long vm_flags); extern void khugepaged_min_free_kbytes_update(void); #ifdef CONFIG_SHMEM extern void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr); #else static inline void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr) { } #endif #define khugepaged_enabled() \ (transparent_hugepage_flags & \ ((1<<TRANSPARENT_HUGEPAGE_FLAG) | \ (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))) #define khugepaged_always() \ (transparent_hugepage_flags & \ (1<<TRANSPARENT_HUGEPAGE_FLAG)) #define khugepaged_req_madv() \ (transparent_hugepage_flags & \ (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)) #define khugepaged_defrag() \ (transparent_hugepage_flags & \ (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)) static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm) { if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags)) return __khugepaged_enter(mm); return 0; } static inline void khugepaged_exit(struct mm_struct *mm) { if (test_bit(MMF_VM_HUGEPAGE, &mm->flags)) __khugepaged_exit(mm); } static inline int khugepaged_enter(struct vm_area_struct *vma, unsigned long vm_flags) { if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags)) if ((khugepaged_always() || (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) && !(vm_flags & VM_NOHUGEPAGE) && !test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) if (__khugepaged_enter(vma->vm_mm)) return -ENOMEM; return 0; } #else /* CONFIG_TRANSPARENT_HUGEPAGE */ static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm) { return 0; } static inline void khugepaged_exit(struct mm_struct *mm) { } static inline int khugepaged_enter(struct vm_area_struct *vma, unsigned long vm_flags) { return 0; } static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma, unsigned long vm_flags) { return 0; } static inline void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr) { } static inline void khugepaged_min_free_kbytes_update(void) { } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* _LINUX_KHUGEPAGED_H */ dlm_plock.h 0000644 00000001024 14722070374 0006664 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved. */ #ifndef __DLM_PLOCK_DOT_H__ #define __DLM_PLOCK_DOT_H__ #include <uapi/linux/dlm_plock.h> int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file, int cmd, struct file_lock *fl); int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file, struct file_lock *fl); int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file, struct file_lock *fl); #endif quota.h 0000644 00000045316 14722070374 0006065 0 ustar 00 /* * Copyright (c) 1982, 1986 Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * Robert Elz at The University of Melbourne. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _LINUX_QUOTA_ #define _LINUX_QUOTA_ #include <linux/list.h> #include <linux/mutex.h> #include <linux/rwsem.h> #include <linux/spinlock.h> #include <linux/wait.h> #include <linux/percpu_counter.h> #include <linux/dqblk_xfs.h> #include <linux/dqblk_v1.h> #include <linux/dqblk_v2.h> #include <linux/atomic.h> #include <linux/uidgid.h> #include <linux/projid.h> #include <uapi/linux/quota.h> #undef USRQUOTA #undef GRPQUOTA #undef PRJQUOTA enum quota_type { USRQUOTA = 0, /* element used for user quotas */ GRPQUOTA = 1, /* element used for group quotas */ PRJQUOTA = 2, /* element used for project quotas */ }; /* Masks for quota types when used as a bitmask */ #define QTYPE_MASK_USR (1 << USRQUOTA) #define QTYPE_MASK_GRP (1 << GRPQUOTA) #define QTYPE_MASK_PRJ (1 << PRJQUOTA) typedef __kernel_uid32_t qid_t; /* Type in which we store ids in memory */ typedef long long qsize_t; /* Type in which we store sizes */ struct kqid { /* Type in which we store the quota identifier */ union { kuid_t uid; kgid_t gid; kprojid_t projid; }; enum quota_type type; /* USRQUOTA (uid) or GRPQUOTA (gid) or PRJQUOTA (projid) */ }; extern bool qid_eq(struct kqid left, struct kqid right); extern bool qid_lt(struct kqid left, struct kqid right); extern qid_t from_kqid(struct user_namespace *to, struct kqid qid); extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid); extern bool qid_valid(struct kqid qid); /** * make_kqid - Map a user-namespace, type, qid tuple into a kqid. * @from: User namespace that the qid is in * @type: The type of quota * @qid: Quota identifier * * Maps a user-namespace, type qid tuple into a kernel internal * kqid, and returns that kqid. * * When there is no mapping defined for the user-namespace, type, * qid tuple an invalid kqid is returned. Callers are expected to * test for and handle handle invalid kqids being returned. * Invalid kqids may be tested for using qid_valid(). */ static inline struct kqid make_kqid(struct user_namespace *from, enum quota_type type, qid_t qid) { struct kqid kqid; kqid.type = type; switch (type) { case USRQUOTA: kqid.uid = make_kuid(from, qid); break; case GRPQUOTA: kqid.gid = make_kgid(from, qid); break; case PRJQUOTA: kqid.projid = make_kprojid(from, qid); break; default: BUG(); } return kqid; } /** * make_kqid_invalid - Explicitly make an invalid kqid * @type: The type of quota identifier * * Returns an invalid kqid with the specified type. */ static inline struct kqid make_kqid_invalid(enum quota_type type) { struct kqid kqid; kqid.type = type; switch (type) { case USRQUOTA: kqid.uid = INVALID_UID; break; case GRPQUOTA: kqid.gid = INVALID_GID; break; case PRJQUOTA: kqid.projid = INVALID_PROJID; break; default: BUG(); } return kqid; } /** * make_kqid_uid - Make a kqid from a kuid * @uid: The kuid to make the quota identifier from */ static inline struct kqid make_kqid_uid(kuid_t uid) { struct kqid kqid; kqid.type = USRQUOTA; kqid.uid = uid; return kqid; } /** * make_kqid_gid - Make a kqid from a kgid * @gid: The kgid to make the quota identifier from */ static inline struct kqid make_kqid_gid(kgid_t gid) { struct kqid kqid; kqid.type = GRPQUOTA; kqid.gid = gid; return kqid; } /** * make_kqid_projid - Make a kqid from a projid * @projid: The kprojid to make the quota identifier from */ static inline struct kqid make_kqid_projid(kprojid_t projid) { struct kqid kqid; kqid.type = PRJQUOTA; kqid.projid = projid; return kqid; } /** * qid_has_mapping - Report if a qid maps into a user namespace. * @ns: The user namespace to see if a value maps into. * @qid: The kernel internal quota identifier to test. */ static inline bool qid_has_mapping(struct user_namespace *ns, struct kqid qid) { return from_kqid(ns, qid) != (qid_t) -1; } extern spinlock_t dq_data_lock; /* Maximal numbers of writes for quota operation (insert/delete/update) * (over VFS all formats) */ #define DQUOT_INIT_ALLOC max(V1_INIT_ALLOC, V2_INIT_ALLOC) #define DQUOT_INIT_REWRITE max(V1_INIT_REWRITE, V2_INIT_REWRITE) #define DQUOT_DEL_ALLOC max(V1_DEL_ALLOC, V2_DEL_ALLOC) #define DQUOT_DEL_REWRITE max(V1_DEL_REWRITE, V2_DEL_REWRITE) /* * Data for one user/group kept in memory */ struct mem_dqblk { qsize_t dqb_bhardlimit; /* absolute limit on disk blks alloc */ qsize_t dqb_bsoftlimit; /* preferred limit on disk blks */ qsize_t dqb_curspace; /* current used space */ qsize_t dqb_rsvspace; /* current reserved space for delalloc*/ qsize_t dqb_ihardlimit; /* absolute limit on allocated inodes */ qsize_t dqb_isoftlimit; /* preferred inode limit */ qsize_t dqb_curinodes; /* current # allocated inodes */ time64_t dqb_btime; /* time limit for excessive disk use */ time64_t dqb_itime; /* time limit for excessive inode use */ }; /* * Data for one quotafile kept in memory */ struct quota_format_type; struct mem_dqinfo { struct quota_format_type *dqi_format; int dqi_fmt_id; /* Id of the dqi_format - used when turning * quotas on after remount RW */ struct list_head dqi_dirty_list; /* List of dirty dquots [dq_list_lock] */ unsigned long dqi_flags; /* DFQ_ flags [dq_data_lock] */ unsigned int dqi_bgrace; /* Space grace time [dq_data_lock] */ unsigned int dqi_igrace; /* Inode grace time [dq_data_lock] */ qsize_t dqi_max_spc_limit; /* Maximum space limit [static] */ qsize_t dqi_max_ino_limit; /* Maximum inode limit [static] */ void *dqi_priv; }; struct super_block; /* Mask for flags passed to userspace */ #define DQF_GETINFO_MASK (DQF_ROOT_SQUASH | DQF_SYS_FILE) /* Mask for flags modifiable from userspace */ #define DQF_SETINFO_MASK DQF_ROOT_SQUASH enum { DQF_INFO_DIRTY_B = DQF_PRIVATE, }; #define DQF_INFO_DIRTY (1 << DQF_INFO_DIRTY_B) /* Is info dirty? */ extern void mark_info_dirty(struct super_block *sb, int type); static inline int info_dirty(struct mem_dqinfo *info) { return test_bit(DQF_INFO_DIRTY_B, &info->dqi_flags); } enum { DQST_LOOKUPS, DQST_DROPS, DQST_READS, DQST_WRITES, DQST_CACHE_HITS, DQST_ALLOC_DQUOTS, DQST_FREE_DQUOTS, DQST_SYNCS, _DQST_DQSTAT_LAST }; struct dqstats { unsigned long stat[_DQST_DQSTAT_LAST]; struct percpu_counter counter[_DQST_DQSTAT_LAST]; }; extern struct dqstats dqstats; static inline void dqstats_inc(unsigned int type) { percpu_counter_inc(&dqstats.counter[type]); } static inline void dqstats_dec(unsigned int type) { percpu_counter_dec(&dqstats.counter[type]); } #define DQ_MOD_B 0 /* dquot modified since read */ #define DQ_BLKS_B 1 /* uid/gid has been warned about blk limit */ #define DQ_INODES_B 2 /* uid/gid has been warned about inode limit */ #define DQ_FAKE_B 3 /* no limits only usage */ #define DQ_READ_B 4 /* dquot was read into memory */ #define DQ_ACTIVE_B 5 /* dquot is active (dquot_release not called) */ #define DQ_RELEASING_B 6 /* dquot is in releasing_dquots list waiting * to be cleaned up */ #define DQ_LASTSET_B 7 /* Following 6 bits (see QIF_) are reserved\ * for the mask of entries set via SETQUOTA\ * quotactl. They are set under dq_data_lock\ * and the quota format handling dquot can\ * clear them when it sees fit. */ struct dquot { struct hlist_node dq_hash; /* Hash list in memory [dq_list_lock] */ struct list_head dq_inuse; /* List of all quotas [dq_list_lock] */ struct list_head dq_free; /* Free list element [dq_list_lock] */ struct list_head dq_dirty; /* List of dirty dquots [dq_list_lock] */ struct mutex dq_lock; /* dquot IO lock */ spinlock_t dq_dqb_lock; /* Lock protecting dq_dqb changes */ atomic_t dq_count; /* Use count */ struct super_block *dq_sb; /* superblock this applies to */ struct kqid dq_id; /* ID this applies to (uid, gid, projid) */ loff_t dq_off; /* Offset of dquot on disk [dq_lock, stable once set] */ unsigned long dq_flags; /* See DQ_* */ struct mem_dqblk dq_dqb; /* Diskquota usage [dq_dqb_lock] */ }; /* Operations which must be implemented by each quota format */ struct quota_format_ops { int (*check_quota_file)(struct super_block *sb, int type); /* Detect whether file is in our format */ int (*read_file_info)(struct super_block *sb, int type); /* Read main info about file - called on quotaon() */ int (*write_file_info)(struct super_block *sb, int type); /* Write main info about file */ int (*free_file_info)(struct super_block *sb, int type); /* Called on quotaoff() */ int (*read_dqblk)(struct dquot *dquot); /* Read structure for one user */ int (*commit_dqblk)(struct dquot *dquot); /* Write structure for one user */ int (*release_dqblk)(struct dquot *dquot); /* Called when last reference to dquot is being dropped */ int (*get_next_id)(struct super_block *sb, struct kqid *qid); /* Get next ID with existing structure in the quota file */ }; /* Operations working with dquots */ struct dquot_operations { int (*write_dquot) (struct dquot *); /* Ordinary dquot write */ struct dquot *(*alloc_dquot)(struct super_block *, int); /* Allocate memory for new dquot */ void (*destroy_dquot)(struct dquot *); /* Free memory for dquot */ int (*acquire_dquot) (struct dquot *); /* Quota is going to be created on disk */ int (*release_dquot) (struct dquot *); /* Quota is going to be deleted from disk */ int (*mark_dirty) (struct dquot *); /* Dquot is marked dirty */ int (*write_info) (struct super_block *, int); /* Write of quota "superblock" */ /* get reserved quota for delayed alloc, value returned is managed by * quota code only */ qsize_t *(*get_reserved_space) (struct inode *); int (*get_projid) (struct inode *, kprojid_t *);/* Get project ID */ /* Get number of inodes that were charged for a given inode */ int (*get_inode_usage) (struct inode *, qsize_t *); /* Get next ID with active quota structure */ int (*get_next_id) (struct super_block *sb, struct kqid *qid); }; struct path; /* Structure for communicating via ->get_dqblk() & ->set_dqblk() */ struct qc_dqblk { int d_fieldmask; /* mask of fields to change in ->set_dqblk() */ u64 d_spc_hardlimit; /* absolute limit on used space */ u64 d_spc_softlimit; /* preferred limit on used space */ u64 d_ino_hardlimit; /* maximum # allocated inodes */ u64 d_ino_softlimit; /* preferred inode limit */ u64 d_space; /* Space owned by the user */ u64 d_ino_count; /* # inodes owned by the user */ s64 d_ino_timer; /* zero if within inode limits */ /* if not, we refuse service */ s64 d_spc_timer; /* similar to above; for space */ int d_ino_warns; /* # warnings issued wrt num inodes */ int d_spc_warns; /* # warnings issued wrt used space */ u64 d_rt_spc_hardlimit; /* absolute limit on realtime space */ u64 d_rt_spc_softlimit; /* preferred limit on RT space */ u64 d_rt_space; /* realtime space owned */ s64 d_rt_spc_timer; /* similar to above; for RT space */ int d_rt_spc_warns; /* # warnings issued wrt RT space */ }; /* * Field specifiers for ->set_dqblk() in struct qc_dqblk and also for * ->set_info() in struct qc_info */ #define QC_INO_SOFT (1<<0) #define QC_INO_HARD (1<<1) #define QC_SPC_SOFT (1<<2) #define QC_SPC_HARD (1<<3) #define QC_RT_SPC_SOFT (1<<4) #define QC_RT_SPC_HARD (1<<5) #define QC_LIMIT_MASK (QC_INO_SOFT | QC_INO_HARD | QC_SPC_SOFT | QC_SPC_HARD | \ QC_RT_SPC_SOFT | QC_RT_SPC_HARD) #define QC_SPC_TIMER (1<<6) #define QC_INO_TIMER (1<<7) #define QC_RT_SPC_TIMER (1<<8) #define QC_TIMER_MASK (QC_SPC_TIMER | QC_INO_TIMER | QC_RT_SPC_TIMER) #define QC_SPC_WARNS (1<<9) #define QC_INO_WARNS (1<<10) #define QC_RT_SPC_WARNS (1<<11) #define QC_WARNS_MASK (QC_SPC_WARNS | QC_INO_WARNS | QC_RT_SPC_WARNS) #define QC_SPACE (1<<12) #define QC_INO_COUNT (1<<13) #define QC_RT_SPACE (1<<14) #define QC_ACCT_MASK (QC_SPACE | QC_INO_COUNT | QC_RT_SPACE) #define QC_FLAGS (1<<15) #define QCI_SYSFILE (1 << 0) /* Quota file is hidden from userspace */ #define QCI_ROOT_SQUASH (1 << 1) /* Root squash turned on */ #define QCI_ACCT_ENABLED (1 << 2) /* Quota accounting enabled */ #define QCI_LIMITS_ENFORCED (1 << 3) /* Quota limits enforced */ /* Structures for communicating via ->get_state */ struct qc_type_state { unsigned int flags; /* Flags QCI_* */ unsigned int spc_timelimit; /* Time after which space softlimit is * enforced */ unsigned int ino_timelimit; /* Ditto for inode softlimit */ unsigned int rt_spc_timelimit; /* Ditto for real-time space */ unsigned int spc_warnlimit; /* Limit for number of space warnings */ unsigned int ino_warnlimit; /* Ditto for inodes */ unsigned int rt_spc_warnlimit; /* Ditto for real-time space */ unsigned long long ino; /* Inode number of quota file */ blkcnt_t blocks; /* Number of 512-byte blocks in the file */ blkcnt_t nextents; /* Number of extents in the file */ }; struct qc_state { unsigned int s_incoredqs; /* Number of dquots in core */ struct qc_type_state s_state[MAXQUOTAS]; /* Per quota type information */ }; /* Structure for communicating via ->set_info */ struct qc_info { int i_fieldmask; /* mask of fields to change in ->set_info() */ unsigned int i_flags; /* Flags QCI_* */ unsigned int i_spc_timelimit; /* Time after which space softlimit is * enforced */ unsigned int i_ino_timelimit; /* Ditto for inode softlimit */ unsigned int i_rt_spc_timelimit;/* Ditto for real-time space */ unsigned int i_spc_warnlimit; /* Limit for number of space warnings */ unsigned int i_ino_warnlimit; /* Limit for number of inode warnings */ unsigned int i_rt_spc_warnlimit; /* Ditto for real-time space */ }; /* Operations handling requests from userspace */ struct quotactl_ops { int (*quota_on)(struct super_block *, int, int, const struct path *); int (*quota_off)(struct super_block *, int); int (*quota_enable)(struct super_block *, unsigned int); int (*quota_disable)(struct super_block *, unsigned int); int (*quota_sync)(struct super_block *, int); int (*set_info)(struct super_block *, int, struct qc_info *); int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); int (*get_nextdqblk)(struct super_block *, struct kqid *, struct qc_dqblk *); int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); int (*get_state)(struct super_block *, struct qc_state *); int (*rm_xquota)(struct super_block *, unsigned int); }; struct quota_format_type { int qf_fmt_id; /* Quota format id */ const struct quota_format_ops *qf_ops; /* Operations of format */ struct module *qf_owner; /* Module implementing quota format */ struct quota_format_type *qf_next; }; /** * Quota state flags - they actually come in two flavors - for users and groups. * * Actual typed flags layout: * USRQUOTA GRPQUOTA * DQUOT_USAGE_ENABLED 0x0001 0x0002 * DQUOT_LIMITS_ENABLED 0x0004 0x0008 * DQUOT_SUSPENDED 0x0010 0x0020 * * Following bits are used for non-typed flags: * DQUOT_QUOTA_SYS_FILE 0x0040 * DQUOT_NEGATIVE_USAGE 0x0080 */ enum { _DQUOT_USAGE_ENABLED = 0, /* Track disk usage for users */ _DQUOT_LIMITS_ENABLED, /* Enforce quota limits for users */ _DQUOT_SUSPENDED, /* User diskquotas are off, but * we have necessary info in * memory to turn them on */ _DQUOT_STATE_FLAGS }; #define DQUOT_USAGE_ENABLED (1 << _DQUOT_USAGE_ENABLED * MAXQUOTAS) #define DQUOT_LIMITS_ENABLED (1 << _DQUOT_LIMITS_ENABLED * MAXQUOTAS) #define DQUOT_SUSPENDED (1 << _DQUOT_SUSPENDED * MAXQUOTAS) #define DQUOT_STATE_FLAGS (DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED | \ DQUOT_SUSPENDED) /* Other quota flags */ #define DQUOT_STATE_LAST (_DQUOT_STATE_FLAGS * MAXQUOTAS) #define DQUOT_QUOTA_SYS_FILE (1 << DQUOT_STATE_LAST) /* Quota file is a special * system file and user cannot * touch it. Filesystem is * responsible for setting * S_NOQUOTA, S_NOATIME flags */ #define DQUOT_NEGATIVE_USAGE (1 << (DQUOT_STATE_LAST + 1)) /* Allow negative quota usage */ /* Do not track dirty dquots in a list */ #define DQUOT_NOLIST_DIRTY (1 << (DQUOT_STATE_LAST + 2)) static inline unsigned int dquot_state_flag(unsigned int flags, int type) { return flags << type; } static inline unsigned int dquot_generic_flag(unsigned int flags, int type) { return (flags >> type) & DQUOT_STATE_FLAGS; } /* Bitmap of quota types where flag is set in flags */ static __always_inline unsigned dquot_state_types(unsigned flags, unsigned flag) { BUILD_BUG_ON_NOT_POWER_OF_2(flag); return (flags / flag) & ((1 << MAXQUOTAS) - 1); } #ifdef CONFIG_QUOTA_NETLINK_INTERFACE extern void quota_send_warning(struct kqid qid, dev_t dev, const char warntype); #else static inline void quota_send_warning(struct kqid qid, dev_t dev, const char warntype) { return; } #endif /* CONFIG_QUOTA_NETLINK_INTERFACE */ struct quota_info { unsigned int flags; /* Flags for diskquotas on this device */ struct rw_semaphore dqio_sem; /* Lock quota file while I/O in progress */ struct inode *files[MAXQUOTAS]; /* inodes of quotafiles */ struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */ const struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */ }; int register_quota_format(struct quota_format_type *fmt); void unregister_quota_format(struct quota_format_type *fmt); struct quota_module_name { int qm_fmt_id; char *qm_mod_name; }; #define INIT_QUOTA_MODULE_NAMES {\ {QFMT_VFS_OLD, "quota_v1"},\ {QFMT_VFS_V0, "quota_v2"},\ {QFMT_VFS_V1, "quota_v2"},\ {0, NULL}} #endif /* _QUOTA_ */ interval_tree_generic.h 0000644 00000015317 14722070374 0011271 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* Interval Trees (C) 2012 Michel Lespinasse <walken@google.com> include/linux/interval_tree_generic.h */ #include <linux/rbtree_augmented.h> /* * Template for implementing interval trees * * ITSTRUCT: struct type of the interval tree nodes * ITRB: name of struct rb_node field within ITSTRUCT * ITTYPE: type of the interval endpoints * ITSUBTREE: name of ITTYPE field within ITSTRUCT holding last-in-subtree * ITSTART(n): start endpoint of ITSTRUCT node n * ITLAST(n): last endpoint of ITSTRUCT node n * ITSTATIC: 'static' or empty * ITPREFIX: prefix to use for the inline tree definitions * * Note - before using this, please consider if generic version * (interval_tree.h) would work for you... */ #define INTERVAL_TREE_DEFINE(ITSTRUCT, ITRB, ITTYPE, ITSUBTREE, \ ITSTART, ITLAST, ITSTATIC, ITPREFIX) \ \ /* Callbacks for augmented rbtree insert and remove */ \ \ RB_DECLARE_CALLBACKS_MAX(static, ITPREFIX ## _augment, \ ITSTRUCT, ITRB, ITTYPE, ITSUBTREE, ITLAST) \ \ /* Insert / remove interval nodes from the tree */ \ \ ITSTATIC void ITPREFIX ## _insert(ITSTRUCT *node, \ struct rb_root_cached *root) \ { \ struct rb_node **link = &root->rb_root.rb_node, *rb_parent = NULL; \ ITTYPE start = ITSTART(node), last = ITLAST(node); \ ITSTRUCT *parent; \ bool leftmost = true; \ \ while (*link) { \ rb_parent = *link; \ parent = rb_entry(rb_parent, ITSTRUCT, ITRB); \ if (parent->ITSUBTREE < last) \ parent->ITSUBTREE = last; \ if (start < ITSTART(parent)) \ link = &parent->ITRB.rb_left; \ else { \ link = &parent->ITRB.rb_right; \ leftmost = false; \ } \ } \ \ node->ITSUBTREE = last; \ rb_link_node(&node->ITRB, rb_parent, link); \ rb_insert_augmented_cached(&node->ITRB, root, \ leftmost, &ITPREFIX ## _augment); \ } \ \ ITSTATIC void ITPREFIX ## _remove(ITSTRUCT *node, \ struct rb_root_cached *root) \ { \ rb_erase_augmented_cached(&node->ITRB, root, &ITPREFIX ## _augment); \ } \ \ /* \ * Iterate over intervals intersecting [start;last] \ * \ * Note that a node's interval intersects [start;last] iff: \ * Cond1: ITSTART(node) <= last \ * and \ * Cond2: start <= ITLAST(node) \ */ \ \ static ITSTRUCT * \ ITPREFIX ## _subtree_search(ITSTRUCT *node, ITTYPE start, ITTYPE last) \ { \ while (true) { \ /* \ * Loop invariant: start <= node->ITSUBTREE \ * (Cond2 is satisfied by one of the subtree nodes) \ */ \ if (node->ITRB.rb_left) { \ ITSTRUCT *left = rb_entry(node->ITRB.rb_left, \ ITSTRUCT, ITRB); \ if (start <= left->ITSUBTREE) { \ /* \ * Some nodes in left subtree satisfy Cond2. \ * Iterate to find the leftmost such node N. \ * If it also satisfies Cond1, that's the \ * match we are looking for. Otherwise, there \ * is no matching interval as nodes to the \ * right of N can't satisfy Cond1 either. \ */ \ node = left; \ continue; \ } \ } \ if (ITSTART(node) <= last) { /* Cond1 */ \ if (start <= ITLAST(node)) /* Cond2 */ \ return node; /* node is leftmost match */ \ if (node->ITRB.rb_right) { \ node = rb_entry(node->ITRB.rb_right, \ ITSTRUCT, ITRB); \ if (start <= node->ITSUBTREE) \ continue; \ } \ } \ return NULL; /* No match */ \ } \ } \ \ ITSTATIC ITSTRUCT * \ ITPREFIX ## _iter_first(struct rb_root_cached *root, \ ITTYPE start, ITTYPE last) \ { \ ITSTRUCT *node, *leftmost; \ \ if (!root->rb_root.rb_node) \ return NULL; \ \ /* \ * Fastpath range intersection/overlap between A: [a0, a1] and \ * B: [b0, b1] is given by: \ * \ * a0 <= b1 && b0 <= a1 \ * \ * ... where A holds the lock range and B holds the smallest \ * 'start' and largest 'last' in the tree. For the later, we \ * rely on the root node, which by augmented interval tree \ * property, holds the largest value in its last-in-subtree. \ * This allows mitigating some of the tree walk overhead for \ * for non-intersecting ranges, maintained and consulted in O(1). \ */ \ node = rb_entry(root->rb_root.rb_node, ITSTRUCT, ITRB); \ if (node->ITSUBTREE < start) \ return NULL; \ \ leftmost = rb_entry(root->rb_leftmost, ITSTRUCT, ITRB); \ if (ITSTART(leftmost) > last) \ return NULL; \ \ return ITPREFIX ## _subtree_search(node, start, last); \ } \ \ ITSTATIC ITSTRUCT * \ ITPREFIX ## _iter_next(ITSTRUCT *node, ITTYPE start, ITTYPE last) \ { \ struct rb_node *rb = node->ITRB.rb_right, *prev; \ \ while (true) { \ /* \ * Loop invariants: \ * Cond1: ITSTART(node) <= last \ * rb == node->ITRB.rb_right \ * \ * First, search right subtree if suitable \ */ \ if (rb) { \ ITSTRUCT *right = rb_entry(rb, ITSTRUCT, ITRB); \ if (start <= right->ITSUBTREE) \ return ITPREFIX ## _subtree_search(right, \ start, last); \ } \ \ /* Move up the tree until we come from a node's left child */ \ do { \ rb = rb_parent(&node->ITRB); \ if (!rb) \ return NULL; \ prev = &node->ITRB; \ node = rb_entry(rb, ITSTRUCT, ITRB); \ rb = node->ITRB.rb_right; \ } while (prev == rb); \ \ /* Check if the node intersects [start;last] */ \ if (last < ITSTART(node)) /* !Cond1 */ \ return NULL; \ else if (start <= ITLAST(node)) /* Cond2 */ \ return node; \ } \ } wait.h 0000644 00000122543 14722070374 0005676 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_WAIT_H #define _LINUX_WAIT_H /* * Linux wait queue related types and methods */ #include <linux/list.h> #include <linux/stddef.h> #include <linux/spinlock.h> #include <asm/current.h> #include <uapi/linux/wait.h> typedef struct wait_queue_entry wait_queue_entry_t; typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key); int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key); /* wait_queue_entry::flags */ #define WQ_FLAG_EXCLUSIVE 0x01 #define WQ_FLAG_WOKEN 0x02 #define WQ_FLAG_BOOKMARK 0x04 #define WQ_FLAG_CUSTOM 0x08 #define WQ_FLAG_DONE 0x10 /* * A single wait-queue entry structure: */ struct wait_queue_entry { unsigned int flags; void *private; wait_queue_func_t func; struct list_head entry; }; struct wait_queue_head { spinlock_t lock; struct list_head head; }; typedef struct wait_queue_head wait_queue_head_t; struct task_struct; /* * Macros for declaration and initialisaton of the datatypes */ #define __WAITQUEUE_INITIALIZER(name, tsk) { \ .private = tsk, \ .func = default_wake_function, \ .entry = { NULL, NULL } } #define DECLARE_WAITQUEUE(name, tsk) \ struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk) #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ .head = { &(name).head, &(name).head } } #define DECLARE_WAIT_QUEUE_HEAD(name) \ struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name) extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *); #define init_waitqueue_head(wq_head) \ do { \ static struct lock_class_key __key; \ \ __init_waitqueue_head((wq_head), #wq_head, &__key); \ } while (0) #ifdef CONFIG_LOCKDEP # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \ ({ init_waitqueue_head(&name); name; }) # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \ struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) #else # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name) #endif static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p) { wq_entry->flags = 0; wq_entry->private = p; wq_entry->func = default_wake_function; } static inline void init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func) { wq_entry->flags = 0; wq_entry->private = NULL; wq_entry->func = func; } /** * waitqueue_active -- locklessly test for waiters on the queue * @wq_head: the waitqueue to test for waiters * * returns true if the wait list is not empty * * NOTE: this function is lockless and requires care, incorrect usage _will_ * lead to sporadic and non-obvious failure. * * Use either while holding wait_queue_head::lock or when used for wakeups * with an extra smp_mb() like:: * * CPU0 - waker CPU1 - waiter * * for (;;) { * @cond = true; prepare_to_wait(&wq_head, &wait, state); * smp_mb(); // smp_mb() from set_current_state() * if (waitqueue_active(wq_head)) if (@cond) * wake_up(wq_head); break; * schedule(); * } * finish_wait(&wq_head, &wait); * * Because without the explicit smp_mb() it's possible for the * waitqueue_active() load to get hoisted over the @cond store such that we'll * observe an empty wait list while the waiter might not observe @cond. * * Also note that this 'optimization' trades a spin_lock() for an smp_mb(), * which (when the lock is uncontended) are of roughly equal cost. */ static inline int waitqueue_active(struct wait_queue_head *wq_head) { return !list_empty(&wq_head->head); } /** * wq_has_single_sleeper - check if there is only one sleeper * @wq_head: wait queue head * * Returns true of wq_head has only one sleeper on the list. * * Please refer to the comment for waitqueue_active. */ static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head) { return list_is_singular(&wq_head->head); } /** * wq_has_sleeper - check if there are any waiting processes * @wq_head: wait queue head * * Returns true if wq_head has waiting processes * * Please refer to the comment for waitqueue_active. */ static inline bool wq_has_sleeper(struct wait_queue_head *wq_head) { /* * We need to be sure we are in sync with the * add_wait_queue modifications to the wait queue. * * This memory barrier should be paired with one on the * waiting side. */ smp_mb(); return waitqueue_active(wq_head); } extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) { list_add(&wq_entry->entry, &wq_head->head); } /* * Used for wake-one threads: */ static inline void __add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) { wq_entry->flags |= WQ_FLAG_EXCLUSIVE; __add_wait_queue(wq_head, wq_entry); } static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) { list_add_tail(&wq_entry->entry, &wq_head->head); } static inline void __add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) { wq_entry->flags |= WQ_FLAG_EXCLUSIVE; __add_wait_queue_entry_tail(wq_head, wq_entry); } static inline void __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) { list_del(&wq_entry->entry); } void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key); void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head, unsigned int mode, void *key, wait_queue_entry_t *bookmark); void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key); void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr); void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr); void __wake_up_pollfree(struct wait_queue_head *wq_head); #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL) #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL) #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL) #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1) #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0) #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL) #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL) #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL) #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1) /* * Wakeup macros to be used to report events to the targets. */ #define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m)) #define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m)) #define wake_up_poll(x, m) \ __wake_up(x, TASK_NORMAL, 1, poll_to_key(m)) #define wake_up_locked_poll(x, m) \ __wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m)) #define wake_up_interruptible_poll(x, m) \ __wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m)) #define wake_up_interruptible_sync_poll(x, m) \ __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, poll_to_key(m)) /** * wake_up_pollfree - signal that a polled waitqueue is going away * @wq_head: the wait queue head * * In the very rare cases where a ->poll() implementation uses a waitqueue whose * lifetime is tied to a task rather than to the 'struct file' being polled, * this function must be called before the waitqueue is freed so that * non-blocking polls (e.g. epoll) are notified that the queue is going away. * * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU. */ static inline void wake_up_pollfree(struct wait_queue_head *wq_head) { /* * For performance reasons, we don't always take the queue lock here. * Therefore, we might race with someone removing the last entry from * the queue, and proceed while they still hold the queue lock. * However, rcu_read_lock() is required to be held in such cases, so we * can safely proceed with an RCU-delayed free. */ if (waitqueue_active(wq_head)) __wake_up_pollfree(wq_head); } #define ___wait_cond_timeout(condition) \ ({ \ bool __cond = (condition); \ if (__cond && !__ret) \ __ret = 1; \ __cond || !__ret; \ }) #define ___wait_is_interruptible(state) \ (!__builtin_constant_p(state) || \ state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \ extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags); /* * The below macro ___wait_event() has an explicit shadow of the __ret * variable when used from the wait_event_*() macros. * * This is so that both can use the ___wait_cond_timeout() construct * to wrap the condition. * * The type inconsistency of the wait_event_*() __ret variable is also * on purpose; we use long where we can return timeout values and int * otherwise. */ #define ___wait_event(wq_head, condition, state, exclusive, ret, cmd) \ ({ \ __label__ __out; \ struct wait_queue_entry __wq_entry; \ long __ret = ret; /* explicit shadow */ \ \ init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0); \ for (;;) { \ long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\ \ if (condition) \ break; \ \ if (___wait_is_interruptible(state) && __int) { \ __ret = __int; \ goto __out; \ } \ \ cmd; \ } \ finish_wait(&wq_head, &__wq_entry); \ __out: __ret; \ }) #define __wait_event(wq_head, condition) \ (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ schedule()) /** * wait_event - sleep until a condition gets true * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the * @condition evaluates to true. The @condition is checked each time * the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. */ #define wait_event(wq_head, condition) \ do { \ might_sleep(); \ if (condition) \ break; \ __wait_event(wq_head, condition); \ } while (0) #define __io_wait_event(wq_head, condition) \ (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ io_schedule()) /* * io_wait_event() -- like wait_event() but with io_schedule() */ #define io_wait_event(wq_head, condition) \ do { \ might_sleep(); \ if (condition) \ break; \ __io_wait_event(wq_head, condition); \ } while (0) #define __wait_event_freezable(wq_head, condition) \ ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \ freezable_schedule()) /** * wait_event_freezable - sleep (or freeze) until a condition gets true * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute * to system load) until the @condition evaluates to true. The * @condition is checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. */ #define wait_event_freezable(wq_head, condition) \ ({ \ int __ret = 0; \ might_sleep(); \ if (!(condition)) \ __ret = __wait_event_freezable(wq_head, condition); \ __ret; \ }) #define __wait_event_timeout(wq_head, condition, timeout) \ ___wait_event(wq_head, ___wait_cond_timeout(condition), \ TASK_UNINTERRUPTIBLE, 0, timeout, \ __ret = schedule_timeout(__ret)) /** * wait_event_timeout - sleep until a condition gets true or a timeout elapses * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @timeout: timeout, in jiffies * * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the * @condition evaluates to true. The @condition is checked each time * the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * Returns: * 0 if the @condition evaluated to %false after the @timeout elapsed, * 1 if the @condition evaluated to %true after the @timeout elapsed, * or the remaining jiffies (at least 1) if the @condition evaluated * to %true before the @timeout elapsed. */ #define wait_event_timeout(wq_head, condition, timeout) \ ({ \ long __ret = timeout; \ might_sleep(); \ if (!___wait_cond_timeout(condition)) \ __ret = __wait_event_timeout(wq_head, condition, timeout); \ __ret; \ }) #define __wait_event_freezable_timeout(wq_head, condition, timeout) \ ___wait_event(wq_head, ___wait_cond_timeout(condition), \ TASK_INTERRUPTIBLE, 0, timeout, \ __ret = freezable_schedule_timeout(__ret)) /* * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid * increasing load and is freezable. */ #define wait_event_freezable_timeout(wq_head, condition, timeout) \ ({ \ long __ret = timeout; \ might_sleep(); \ if (!___wait_cond_timeout(condition)) \ __ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \ __ret; \ }) #define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \ (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0, \ cmd1; schedule(); cmd2) /* * Just like wait_event_cmd(), except it sets exclusive flag */ #define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \ do { \ if (condition) \ break; \ __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2); \ } while (0) #define __wait_event_cmd(wq_head, condition, cmd1, cmd2) \ (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ cmd1; schedule(); cmd2) /** * wait_event_cmd - sleep until a condition gets true * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @cmd1: the command will be executed before sleep * @cmd2: the command will be executed after sleep * * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the * @condition evaluates to true. The @condition is checked each time * the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. */ #define wait_event_cmd(wq_head, condition, cmd1, cmd2) \ do { \ if (condition) \ break; \ __wait_event_cmd(wq_head, condition, cmd1, cmd2); \ } while (0) #define __wait_event_interruptible(wq_head, condition) \ ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \ schedule()) /** * wait_event_interruptible - sleep until a condition gets true * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or a signal is received. * The @condition is checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * The function will return -ERESTARTSYS if it was interrupted by a * signal and 0 if @condition evaluated to true. */ #define wait_event_interruptible(wq_head, condition) \ ({ \ int __ret = 0; \ might_sleep(); \ if (!(condition)) \ __ret = __wait_event_interruptible(wq_head, condition); \ __ret; \ }) #define __wait_event_interruptible_timeout(wq_head, condition, timeout) \ ___wait_event(wq_head, ___wait_cond_timeout(condition), \ TASK_INTERRUPTIBLE, 0, timeout, \ __ret = schedule_timeout(__ret)) /** * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @timeout: timeout, in jiffies * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or a signal is received. * The @condition is checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * Returns: * 0 if the @condition evaluated to %false after the @timeout elapsed, * 1 if the @condition evaluated to %true after the @timeout elapsed, * the remaining jiffies (at least 1) if the @condition evaluated * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was * interrupted by a signal. */ #define wait_event_interruptible_timeout(wq_head, condition, timeout) \ ({ \ long __ret = timeout; \ might_sleep(); \ if (!___wait_cond_timeout(condition)) \ __ret = __wait_event_interruptible_timeout(wq_head, \ condition, timeout); \ __ret; \ }) #define __wait_event_hrtimeout(wq_head, condition, timeout, state) \ ({ \ int __ret = 0; \ struct hrtimer_sleeper __t; \ \ hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \ HRTIMER_MODE_REL); \ if ((timeout) != KTIME_MAX) { \ hrtimer_set_expires_range_ns(&__t.timer, timeout, \ current->timer_slack_ns); \ hrtimer_sleeper_start_expires(&__t, HRTIMER_MODE_REL); \ } \ \ __ret = ___wait_event(wq_head, condition, state, 0, 0, \ if (!__t.task) { \ __ret = -ETIME; \ break; \ } \ schedule()); \ \ hrtimer_cancel(&__t.timer); \ destroy_hrtimer_on_stack(&__t.timer); \ __ret; \ }) /** * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @timeout: timeout, as a ktime_t * * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the * @condition evaluates to true or a signal is received. * The @condition is checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * The function returns 0 if @condition became true, or -ETIME if the timeout * elapsed. */ #define wait_event_hrtimeout(wq_head, condition, timeout) \ ({ \ int __ret = 0; \ might_sleep(); \ if (!(condition)) \ __ret = __wait_event_hrtimeout(wq_head, condition, timeout, \ TASK_UNINTERRUPTIBLE); \ __ret; \ }) /** * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses * @wq: the waitqueue to wait on * @condition: a C expression for the event to wait for * @timeout: timeout, as a ktime_t * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or a signal is received. * The @condition is checked each time the waitqueue @wq is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * The function returns 0 if @condition became true, -ERESTARTSYS if it was * interrupted by a signal, or -ETIME if the timeout elapsed. */ #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \ ({ \ long __ret = 0; \ might_sleep(); \ if (!(condition)) \ __ret = __wait_event_hrtimeout(wq, condition, timeout, \ TASK_INTERRUPTIBLE); \ __ret; \ }) #define __wait_event_interruptible_exclusive(wq, condition) \ ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \ schedule()) #define wait_event_interruptible_exclusive(wq, condition) \ ({ \ int __ret = 0; \ might_sleep(); \ if (!(condition)) \ __ret = __wait_event_interruptible_exclusive(wq, condition); \ __ret; \ }) #define __wait_event_killable_exclusive(wq, condition) \ ___wait_event(wq, condition, TASK_KILLABLE, 1, 0, \ schedule()) #define wait_event_killable_exclusive(wq, condition) \ ({ \ int __ret = 0; \ might_sleep(); \ if (!(condition)) \ __ret = __wait_event_killable_exclusive(wq, condition); \ __ret; \ }) #define __wait_event_freezable_exclusive(wq, condition) \ ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \ freezable_schedule()) #define wait_event_freezable_exclusive(wq, condition) \ ({ \ int __ret = 0; \ might_sleep(); \ if (!(condition)) \ __ret = __wait_event_freezable_exclusive(wq, condition); \ __ret; \ }) /** * wait_event_idle - wait for a condition without contributing to system load * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_IDLE) until the * @condition evaluates to true. * The @condition is checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * */ #define wait_event_idle(wq_head, condition) \ do { \ might_sleep(); \ if (!(condition)) \ ___wait_event(wq_head, condition, TASK_IDLE, 0, 0, schedule()); \ } while (0) /** * wait_event_idle_exclusive - wait for a condition with contributing to system load * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_IDLE) until the * @condition evaluates to true. * The @condition is checked each time the waitqueue @wq_head is woken up. * * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag * set thus if other processes wait on the same list, when this * process is woken further processes are not considered. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * */ #define wait_event_idle_exclusive(wq_head, condition) \ do { \ might_sleep(); \ if (!(condition)) \ ___wait_event(wq_head, condition, TASK_IDLE, 1, 0, schedule()); \ } while (0) #define __wait_event_idle_timeout(wq_head, condition, timeout) \ ___wait_event(wq_head, ___wait_cond_timeout(condition), \ TASK_IDLE, 0, timeout, \ __ret = schedule_timeout(__ret)) /** * wait_event_idle_timeout - sleep without load until a condition becomes true or a timeout elapses * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @timeout: timeout, in jiffies * * The process is put to sleep (TASK_IDLE) until the * @condition evaluates to true. The @condition is checked each time * the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * Returns: * 0 if the @condition evaluated to %false after the @timeout elapsed, * 1 if the @condition evaluated to %true after the @timeout elapsed, * or the remaining jiffies (at least 1) if the @condition evaluated * to %true before the @timeout elapsed. */ #define wait_event_idle_timeout(wq_head, condition, timeout) \ ({ \ long __ret = timeout; \ might_sleep(); \ if (!___wait_cond_timeout(condition)) \ __ret = __wait_event_idle_timeout(wq_head, condition, timeout); \ __ret; \ }) #define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \ ___wait_event(wq_head, ___wait_cond_timeout(condition), \ TASK_IDLE, 1, timeout, \ __ret = schedule_timeout(__ret)) /** * wait_event_idle_exclusive_timeout - sleep without load until a condition becomes true or a timeout elapses * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @timeout: timeout, in jiffies * * The process is put to sleep (TASK_IDLE) until the * @condition evaluates to true. The @condition is checked each time * the waitqueue @wq_head is woken up. * * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag * set thus if other processes wait on the same list, when this * process is woken further processes are not considered. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * Returns: * 0 if the @condition evaluated to %false after the @timeout elapsed, * 1 if the @condition evaluated to %true after the @timeout elapsed, * or the remaining jiffies (at least 1) if the @condition evaluated * to %true before the @timeout elapsed. */ #define wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \ ({ \ long __ret = timeout; \ might_sleep(); \ if (!___wait_cond_timeout(condition)) \ __ret = __wait_event_idle_exclusive_timeout(wq_head, condition, timeout);\ __ret; \ }) extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *); extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *); #define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \ ({ \ int __ret; \ DEFINE_WAIT(__wait); \ if (exclusive) \ __wait.flags |= WQ_FLAG_EXCLUSIVE; \ do { \ __ret = fn(&(wq), &__wait); \ if (__ret) \ break; \ } while (!(condition)); \ __remove_wait_queue(&(wq), &__wait); \ __set_current_state(TASK_RUNNING); \ __ret; \ }) /** * wait_event_interruptible_locked - sleep until a condition gets true * @wq: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or a signal is received. * The @condition is checked each time the waitqueue @wq is woken up. * * It must be called with wq.lock being held. This spinlock is * unlocked while sleeping but @condition testing is done while lock * is held and when this macro exits the lock is held. * * The lock is locked/unlocked using spin_lock()/spin_unlock() * functions which must match the way they are locked/unlocked outside * of this macro. * * wake_up_locked() has to be called after changing any variable that could * change the result of the wait condition. * * The function will return -ERESTARTSYS if it was interrupted by a * signal and 0 if @condition evaluated to true. */ #define wait_event_interruptible_locked(wq, condition) \ ((condition) \ ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr)) /** * wait_event_interruptible_locked_irq - sleep until a condition gets true * @wq: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or a signal is received. * The @condition is checked each time the waitqueue @wq is woken up. * * It must be called with wq.lock being held. This spinlock is * unlocked while sleeping but @condition testing is done while lock * is held and when this macro exits the lock is held. * * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq() * functions which must match the way they are locked/unlocked outside * of this macro. * * wake_up_locked() has to be called after changing any variable that could * change the result of the wait condition. * * The function will return -ERESTARTSYS if it was interrupted by a * signal and 0 if @condition evaluated to true. */ #define wait_event_interruptible_locked_irq(wq, condition) \ ((condition) \ ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq)) /** * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true * @wq: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or a signal is received. * The @condition is checked each time the waitqueue @wq is woken up. * * It must be called with wq.lock being held. This spinlock is * unlocked while sleeping but @condition testing is done while lock * is held and when this macro exits the lock is held. * * The lock is locked/unlocked using spin_lock()/spin_unlock() * functions which must match the way they are locked/unlocked outside * of this macro. * * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag * set thus when other process waits process on the list if this * process is awaken further processes are not considered. * * wake_up_locked() has to be called after changing any variable that could * change the result of the wait condition. * * The function will return -ERESTARTSYS if it was interrupted by a * signal and 0 if @condition evaluated to true. */ #define wait_event_interruptible_exclusive_locked(wq, condition) \ ((condition) \ ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr)) /** * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true * @wq: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or a signal is received. * The @condition is checked each time the waitqueue @wq is woken up. * * It must be called with wq.lock being held. This spinlock is * unlocked while sleeping but @condition testing is done while lock * is held and when this macro exits the lock is held. * * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq() * functions which must match the way they are locked/unlocked outside * of this macro. * * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag * set thus when other process waits process on the list if this * process is awaken further processes are not considered. * * wake_up_locked() has to be called after changing any variable that could * change the result of the wait condition. * * The function will return -ERESTARTSYS if it was interrupted by a * signal and 0 if @condition evaluated to true. */ #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \ ((condition) \ ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq)) #define __wait_event_killable(wq, condition) \ ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule()) /** * wait_event_killable - sleep until a condition gets true * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_KILLABLE) until the * @condition evaluates to true or a signal is received. * The @condition is checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * The function will return -ERESTARTSYS if it was interrupted by a * signal and 0 if @condition evaluated to true. */ #define wait_event_killable(wq_head, condition) \ ({ \ int __ret = 0; \ might_sleep(); \ if (!(condition)) \ __ret = __wait_event_killable(wq_head, condition); \ __ret; \ }) #define __wait_event_killable_timeout(wq_head, condition, timeout) \ ___wait_event(wq_head, ___wait_cond_timeout(condition), \ TASK_KILLABLE, 0, timeout, \ __ret = schedule_timeout(__ret)) /** * wait_event_killable_timeout - sleep until a condition gets true or a timeout elapses * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @timeout: timeout, in jiffies * * The process is put to sleep (TASK_KILLABLE) until the * @condition evaluates to true or a kill signal is received. * The @condition is checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * Returns: * 0 if the @condition evaluated to %false after the @timeout elapsed, * 1 if the @condition evaluated to %true after the @timeout elapsed, * the remaining jiffies (at least 1) if the @condition evaluated * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was * interrupted by a kill signal. * * Only kill signals interrupt this process. */ #define wait_event_killable_timeout(wq_head, condition, timeout) \ ({ \ long __ret = timeout; \ might_sleep(); \ if (!___wait_cond_timeout(condition)) \ __ret = __wait_event_killable_timeout(wq_head, \ condition, timeout); \ __ret; \ }) #define __wait_event_lock_irq(wq_head, condition, lock, cmd) \ (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ spin_unlock_irq(&lock); \ cmd; \ schedule(); \ spin_lock_irq(&lock)) /** * wait_event_lock_irq_cmd - sleep until a condition gets true. The * condition is checked under the lock. This * is expected to be called with the lock * taken. * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @lock: a locked spinlock_t, which will be released before cmd * and schedule() and reacquired afterwards. * @cmd: a command which is invoked outside the critical section before * sleep * * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the * @condition evaluates to true. The @condition is checked each time * the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * This is supposed to be called while holding the lock. The lock is * dropped before invoking the cmd and going to sleep and is reacquired * afterwards. */ #define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd) \ do { \ if (condition) \ break; \ __wait_event_lock_irq(wq_head, condition, lock, cmd); \ } while (0) /** * wait_event_lock_irq - sleep until a condition gets true. The * condition is checked under the lock. This * is expected to be called with the lock * taken. * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @lock: a locked spinlock_t, which will be released before schedule() * and reacquired afterwards. * * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the * @condition evaluates to true. The @condition is checked each time * the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * This is supposed to be called while holding the lock. The lock is * dropped before going to sleep and is reacquired afterwards. */ #define wait_event_lock_irq(wq_head, condition, lock) \ do { \ if (condition) \ break; \ __wait_event_lock_irq(wq_head, condition, lock, ); \ } while (0) #define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd) \ ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \ spin_unlock_irq(&lock); \ cmd; \ schedule(); \ spin_lock_irq(&lock)) /** * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true. * The condition is checked under the lock. This is expected to * be called with the lock taken. * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @lock: a locked spinlock_t, which will be released before cmd and * schedule() and reacquired afterwards. * @cmd: a command which is invoked outside the critical section before * sleep * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or a signal is received. The @condition is * checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * This is supposed to be called while holding the lock. The lock is * dropped before invoking the cmd and going to sleep and is reacquired * afterwards. * * The macro will return -ERESTARTSYS if it was interrupted by a signal * and 0 if @condition evaluated to true. */ #define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd) \ ({ \ int __ret = 0; \ if (!(condition)) \ __ret = __wait_event_interruptible_lock_irq(wq_head, \ condition, lock, cmd); \ __ret; \ }) /** * wait_event_interruptible_lock_irq - sleep until a condition gets true. * The condition is checked under the lock. This is expected * to be called with the lock taken. * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @lock: a locked spinlock_t, which will be released before schedule() * and reacquired afterwards. * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or signal is received. The @condition is * checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * This is supposed to be called while holding the lock. The lock is * dropped before going to sleep and is reacquired afterwards. * * The macro will return -ERESTARTSYS if it was interrupted by a signal * and 0 if @condition evaluated to true. */ #define wait_event_interruptible_lock_irq(wq_head, condition, lock) \ ({ \ int __ret = 0; \ if (!(condition)) \ __ret = __wait_event_interruptible_lock_irq(wq_head, \ condition, lock,); \ __ret; \ }) #define __wait_event_lock_irq_timeout(wq_head, condition, lock, timeout, state) \ ___wait_event(wq_head, ___wait_cond_timeout(condition), \ state, 0, timeout, \ spin_unlock_irq(&lock); \ __ret = schedule_timeout(__ret); \ spin_lock_irq(&lock)); /** * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets * true or a timeout elapses. The condition is checked under * the lock. This is expected to be called with the lock taken. * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @lock: a locked spinlock_t, which will be released before schedule() * and reacquired afterwards. * @timeout: timeout, in jiffies * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or signal is received. The @condition is * checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * This is supposed to be called while holding the lock. The lock is * dropped before going to sleep and is reacquired afterwards. * * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it * was interrupted by a signal, and the remaining jiffies otherwise * if the condition evaluated to true before the timeout elapsed. */ #define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock, \ timeout) \ ({ \ long __ret = timeout; \ if (!___wait_cond_timeout(condition)) \ __ret = __wait_event_lock_irq_timeout( \ wq_head, condition, lock, timeout, \ TASK_INTERRUPTIBLE); \ __ret; \ }) #define wait_event_lock_irq_timeout(wq_head, condition, lock, timeout) \ ({ \ long __ret = timeout; \ if (!___wait_cond_timeout(condition)) \ __ret = __wait_event_lock_irq_timeout( \ wq_head, condition, lock, timeout, \ TASK_UNINTERRUPTIBLE); \ __ret; \ }) /* * Waitqueues which are removed from the waitqueue_head at wakeup time */ void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state); bool prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state); long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state); void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout); int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key); int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key); #define DEFINE_WAIT_FUNC(name, function) \ struct wait_queue_entry name = { \ .private = current, \ .func = function, \ .entry = LIST_HEAD_INIT((name).entry), \ } #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function) #define init_wait(wait) \ do { \ (wait)->private = current; \ (wait)->func = autoremove_wake_function; \ INIT_LIST_HEAD(&(wait)->entry); \ (wait)->flags = 0; \ } while (0) #endif /* _LINUX_WAIT_H */ nd.h 0000644 00000013030 14722070374 0005321 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. */ #ifndef __LINUX_ND_H__ #define __LINUX_ND_H__ #include <linux/fs.h> #include <linux/ndctl.h> #include <linux/device.h> #include <linux/badblocks.h> enum nvdimm_event { NVDIMM_REVALIDATE_POISON, }; enum nvdimm_claim_class { NVDIMM_CCLASS_NONE, NVDIMM_CCLASS_BTT, NVDIMM_CCLASS_BTT2, NVDIMM_CCLASS_PFN, NVDIMM_CCLASS_DAX, NVDIMM_CCLASS_UNKNOWN, }; struct nd_device_driver { struct device_driver drv; unsigned long type; int (*probe)(struct device *dev); int (*remove)(struct device *dev); void (*shutdown)(struct device *dev); void (*notify)(struct device *dev, enum nvdimm_event event); }; static inline struct nd_device_driver *to_nd_device_driver( struct device_driver *drv) { return container_of(drv, struct nd_device_driver, drv); }; /** * struct nd_namespace_common - core infrastructure of a namespace * @force_raw: ignore other personalities for the namespace (e.g. btt) * @dev: device model node * @claim: when set a another personality has taken ownership of the namespace * @claim_class: restrict claim type to a given class * @rw_bytes: access the raw namespace capacity with byte-aligned transfers */ struct nd_namespace_common { int force_raw; struct device dev; struct device *claim; enum nvdimm_claim_class claim_class; int (*rw_bytes)(struct nd_namespace_common *, resource_size_t offset, void *buf, size_t size, int rw, unsigned long flags); }; static inline struct nd_namespace_common *to_ndns(struct device *dev) { return container_of(dev, struct nd_namespace_common, dev); } /** * struct nd_namespace_io - device representation of a persistent memory range * @dev: namespace device created by the nd region driver * @res: struct resource conversion of a NFIT SPA table * @size: cached resource_size(@res) for fast path size checks * @addr: virtual address to access the namespace range * @bb: badblocks list for the namespace range */ struct nd_namespace_io { struct nd_namespace_common common; struct resource res; resource_size_t size; void *addr; struct badblocks bb; }; /** * struct nd_namespace_pmem - namespace device for dimm-backed interleaved memory * @nsio: device and system physical address range to drive * @lbasize: logical sector size for the namespace in block-device-mode * @alt_name: namespace name supplied in the dimm label * @uuid: namespace name supplied in the dimm label * @id: ida allocated id */ struct nd_namespace_pmem { struct nd_namespace_io nsio; unsigned long lbasize; char *alt_name; u8 *uuid; int id; }; /** * struct nd_namespace_blk - namespace for dimm-bounded persistent memory * @alt_name: namespace name supplied in the dimm label * @uuid: namespace name supplied in the dimm label * @id: ida allocated id * @lbasize: blk namespaces have a native sector size when btt not present * @size: sum of all the resource ranges allocated to this namespace * @num_resources: number of dpa extents to claim * @res: discontiguous dpa extents for given dimm */ struct nd_namespace_blk { struct nd_namespace_common common; char *alt_name; u8 *uuid; int id; unsigned long lbasize; resource_size_t size; int num_resources; struct resource **res; }; static inline struct nd_namespace_io *to_nd_namespace_io(const struct device *dev) { return container_of(dev, struct nd_namespace_io, common.dev); } static inline struct nd_namespace_pmem *to_nd_namespace_pmem(const struct device *dev) { struct nd_namespace_io *nsio = to_nd_namespace_io(dev); return container_of(nsio, struct nd_namespace_pmem, nsio); } static inline struct nd_namespace_blk *to_nd_namespace_blk(const struct device *dev) { return container_of(dev, struct nd_namespace_blk, common.dev); } /** * nvdimm_read_bytes() - synchronously read bytes from an nvdimm namespace * @ndns: device to read * @offset: namespace-relative starting offset * @buf: buffer to fill * @size: transfer length * * @buf is up-to-date upon return from this routine. */ static inline int nvdimm_read_bytes(struct nd_namespace_common *ndns, resource_size_t offset, void *buf, size_t size, unsigned long flags) { return ndns->rw_bytes(ndns, offset, buf, size, READ, flags); } /** * nvdimm_write_bytes() - synchronously write bytes to an nvdimm namespace * @ndns: device to read * @offset: namespace-relative starting offset * @buf: buffer to drain * @size: transfer length * * NVDIMM Namepaces disks do not implement sectors internally. Depending on * the @ndns, the contents of @buf may be in cpu cache, platform buffers, * or on backing memory media upon return from this routine. Flushing * to media is handled internal to the @ndns driver, if at all. */ static inline int nvdimm_write_bytes(struct nd_namespace_common *ndns, resource_size_t offset, void *buf, size_t size, unsigned long flags) { return ndns->rw_bytes(ndns, offset, buf, size, WRITE, flags); } #define MODULE_ALIAS_ND_DEVICE(type) \ MODULE_ALIAS("nd:t" __stringify(type) "*") #define ND_DEVICE_MODALIAS_FMT "nd:t%d" struct nd_region; void nvdimm_region_notify(struct nd_region *nd_region, enum nvdimm_event event); int __must_check __nd_driver_register(struct nd_device_driver *nd_drv, struct module *module, const char *mod_name); static inline void nd_driver_unregister(struct nd_device_driver *drv) { driver_unregister(&drv->drv); } #define nd_driver_register(driver) \ __nd_driver_register(driver, THIS_MODULE, KBUILD_MODNAME) #define module_nd_driver(driver) \ module_driver(driver, nd_driver_register, nd_driver_unregister) #endif /* __LINUX_ND_H__ */ i2c-algo-pcf.h 0000644 00000002324 14722070374 0007067 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* ------------------------------------------------------------------------- */ /* adap-pcf.h i2c driver algorithms for PCF8584 adapters */ /* ------------------------------------------------------------------------- */ /* Copyright (C) 1995-97 Simon G. Vogl 1998-99 Hans Berglund */ /* ------------------------------------------------------------------------- */ /* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and even Frodo Looijaard <frodol@dds.nl> */ #ifndef _LINUX_I2C_ALGO_PCF_H #define _LINUX_I2C_ALGO_PCF_H struct i2c_algo_pcf_data { void *data; /* private data for lolevel routines */ void (*setpcf) (void *data, int ctl, int val); int (*getpcf) (void *data, int ctl); int (*getown) (void *data); int (*getclock) (void *data); void (*waitforpin) (void *data); void (*xfer_begin) (void *data); void (*xfer_end) (void *data); /* Multi-master lost arbitration back-off delay (msecs) * This should be set by the bus adapter or knowledgable client * if bus is multi-mastered, else zero */ unsigned long lab_mdelay; }; int i2c_pcf_add_bus(struct i2c_adapter *); #endif /* _LINUX_I2C_ALGO_PCF_H */ migrate.h 0000644 00000013723 14722070374 0006361 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MIGRATE_H #define _LINUX_MIGRATE_H #include <linux/mm.h> #include <linux/mempolicy.h> #include <linux/migrate_mode.h> #include <linux/hugetlb.h> typedef struct page *new_page_t(struct page *page, unsigned long private); typedef void free_page_t(struct page *page, unsigned long private); /* * Return values from addresss_space_operations.migratepage(): * - negative errno on page migration failure; * - zero on page migration success; */ #define MIGRATEPAGE_SUCCESS 0 enum migrate_reason { MR_COMPACTION, MR_MEMORY_FAILURE, MR_MEMORY_HOTPLUG, MR_SYSCALL, /* also applies to cpusets */ MR_MEMPOLICY_MBIND, MR_NUMA_MISPLACED, MR_CONTIG_RANGE, MR_TYPES }; /* In mm/debug.c; also keep sync with include/trace/events/migrate.h */ extern const char *migrate_reason_names[MR_TYPES]; static inline struct page *new_page_nodemask(struct page *page, int preferred_nid, nodemask_t *nodemask) { gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL; unsigned int order = 0; struct page *new_page = NULL; if (PageHuge(page)) return alloc_huge_page_nodemask(page_hstate(compound_head(page)), preferred_nid, nodemask); if (PageTransHuge(page)) { gfp_mask |= GFP_TRANSHUGE; order = HPAGE_PMD_ORDER; } if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE)) gfp_mask |= __GFP_HIGHMEM; new_page = __alloc_pages_nodemask(gfp_mask, order, preferred_nid, nodemask); if (new_page && PageTransHuge(new_page)) prep_transhuge_page(new_page); return new_page; } #ifdef CONFIG_MIGRATION extern void putback_movable_pages(struct list_head *l); extern int migrate_page(struct address_space *mapping, struct page *newpage, struct page *page, enum migrate_mode mode); extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, unsigned long private, enum migrate_mode mode, int reason); extern int isolate_movable_page(struct page *page, isolate_mode_t mode); extern void putback_movable_page(struct page *page); extern int migrate_prep(void); extern int migrate_prep_local(void); extern void migrate_page_states(struct page *newpage, struct page *page); extern void migrate_page_copy(struct page *newpage, struct page *page); extern int migrate_huge_page_move_mapping(struct address_space *mapping, struct page *newpage, struct page *page); extern int migrate_page_move_mapping(struct address_space *mapping, struct page *newpage, struct page *page, int extra_count); #else static inline void putback_movable_pages(struct list_head *l) {} static inline int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, unsigned long private, enum migrate_mode mode, int reason) { return -ENOSYS; } static inline int isolate_movable_page(struct page *page, isolate_mode_t mode) { return -EBUSY; } static inline int migrate_prep(void) { return -ENOSYS; } static inline int migrate_prep_local(void) { return -ENOSYS; } static inline void migrate_page_states(struct page *newpage, struct page *page) { } static inline void migrate_page_copy(struct page *newpage, struct page *page) {} static inline int migrate_huge_page_move_mapping(struct address_space *mapping, struct page *newpage, struct page *page) { return -ENOSYS; } #endif /* CONFIG_MIGRATION */ #ifdef CONFIG_COMPACTION extern int PageMovable(struct page *page); extern void __SetPageMovable(struct page *page, struct address_space *mapping); extern void __ClearPageMovable(struct page *page); #else static inline int PageMovable(struct page *page) { return 0; }; static inline void __SetPageMovable(struct page *page, struct address_space *mapping) { } static inline void __ClearPageMovable(struct page *page) { } #endif #ifdef CONFIG_NUMA_BALANCING extern bool pmd_trans_migrating(pmd_t pmd); extern int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, int node); #else static inline bool pmd_trans_migrating(pmd_t pmd) { return false; } static inline int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, int node) { return -EAGAIN; /* can't migrate now */ } #endif /* CONFIG_NUMA_BALANCING */ #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE) extern int migrate_misplaced_transhuge_page(struct mm_struct *mm, struct vm_area_struct *vma, pmd_t *pmd, pmd_t entry, unsigned long address, struct page *page, int node); #else static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm, struct vm_area_struct *vma, pmd_t *pmd, pmd_t entry, unsigned long address, struct page *page, int node) { return -EAGAIN; } #endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/ #ifdef CONFIG_MIGRATION /* * Watch out for PAE architecture, which has an unsigned long, and might not * have enough bits to store all physical address and flags. So far we have * enough room for all our flags. */ #define MIGRATE_PFN_VALID (1UL << 0) #define MIGRATE_PFN_MIGRATE (1UL << 1) #define MIGRATE_PFN_LOCKED (1UL << 2) #define MIGRATE_PFN_WRITE (1UL << 3) #define MIGRATE_PFN_SHIFT 6 static inline struct page *migrate_pfn_to_page(unsigned long mpfn) { if (!(mpfn & MIGRATE_PFN_VALID)) return NULL; return pfn_to_page(mpfn >> MIGRATE_PFN_SHIFT); } static inline unsigned long migrate_pfn(unsigned long pfn) { return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID; } struct migrate_vma { struct vm_area_struct *vma; /* * Both src and dst array must be big enough for * (end - start) >> PAGE_SHIFT entries. * * The src array must not be modified by the caller after * migrate_vma_setup(), and must not change the dst array after * migrate_vma_pages() returns. */ unsigned long *dst; unsigned long *src; unsigned long cpages; unsigned long npages; unsigned long start; unsigned long end; }; int migrate_vma_setup(struct migrate_vma *args); void migrate_vma_pages(struct migrate_vma *migrate); void migrate_vma_finalize(struct migrate_vma *migrate); #endif /* CONFIG_MIGRATION */ #endif /* _LINUX_MIGRATE_H */ timerfd.h 0000644 00000000774 14722070374 0006365 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * include/linux/timerfd.h * * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org> * */ #ifndef _LINUX_TIMERFD_H #define _LINUX_TIMERFD_H #include <uapi/linux/timerfd.h> #define TFD_SHARED_FCNTL_FLAGS (TFD_CLOEXEC | TFD_NONBLOCK) /* Flags for timerfd_create. */ #define TFD_CREATE_FLAGS TFD_SHARED_FCNTL_FLAGS /* Flags for timerfd_settime. */ #define TFD_SETTIME_FLAGS (TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET) #endif /* _LINUX_TIMERFD_H */ coredump.h 0000644 00000001435 14722070374 0006544 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_COREDUMP_H #define _LINUX_COREDUMP_H #include <linux/types.h> #include <linux/mm.h> #include <linux/fs.h> #include <asm/siginfo.h> /* * These are the only things you should do on a core-file: use only these * functions to write out all the necessary info. */ struct coredump_params; extern int dump_skip(struct coredump_params *cprm, size_t nr); extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr); extern int dump_align(struct coredump_params *cprm, int align); extern void dump_truncate(struct coredump_params *cprm); #ifdef CONFIG_COREDUMP extern void do_coredump(const kernel_siginfo_t *siginfo); #else static inline void do_coredump(const kernel_siginfo_t *siginfo) {} #endif #endif /* _LINUX_COREDUMP_H */ tsacct_kern.h 0000644 00000002316 14722070374 0007225 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * tsacct_kern.h - kernel header for system accounting over taskstats interface * * Copyright (C) Jay Lan SGI */ #ifndef _LINUX_TSACCT_KERN_H #define _LINUX_TSACCT_KERN_H #include <linux/taskstats.h> #ifdef CONFIG_TASKSTATS extern void bacct_add_tsk(struct user_namespace *user_ns, struct pid_namespace *pid_ns, struct taskstats *stats, struct task_struct *tsk); #else static inline void bacct_add_tsk(struct user_namespace *user_ns, struct pid_namespace *pid_ns, struct taskstats *stats, struct task_struct *tsk) {} #endif /* CONFIG_TASKSTATS */ #ifdef CONFIG_TASK_XACCT extern void xacct_add_tsk(struct taskstats *stats, struct task_struct *p); extern void acct_update_integrals(struct task_struct *tsk); extern void acct_account_cputime(struct task_struct *tsk); extern void acct_clear_integrals(struct task_struct *tsk); #else static inline void xacct_add_tsk(struct taskstats *stats, struct task_struct *p) {} static inline void acct_update_integrals(struct task_struct *tsk) {} static inline void acct_account_cputime(struct task_struct *tsk) {} static inline void acct_clear_integrals(struct task_struct *tsk) {} #endif /* CONFIG_TASK_XACCT */ #endif cpuhotplug.h 0000644 00000030514 14722070374 0007120 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __CPUHOTPLUG_H #define __CPUHOTPLUG_H #include <linux/types.h> /* * CPU-up CPU-down * * BP AP BP AP * * OFFLINE OFFLINE * | ^ * v | * BRINGUP_CPU->AP_OFFLINE BRINGUP_CPU <- AP_IDLE_DEAD (idle thread/play_dead) * | AP_OFFLINE * v (IRQ-off) ,---------------^ * AP_ONLNE | (stop_machine) * | TEARDOWN_CPU <- AP_ONLINE_IDLE * | ^ * v | * AP_ACTIVE AP_ACTIVE */ enum cpuhp_state { CPUHP_INVALID = -1, CPUHP_OFFLINE = 0, CPUHP_CREATE_THREADS, CPUHP_PERF_PREPARE, CPUHP_PERF_X86_PREPARE, CPUHP_PERF_X86_AMD_UNCORE_PREP, CPUHP_PERF_POWER, CPUHP_PERF_SUPERH, CPUHP_X86_HPET_DEAD, CPUHP_X86_APB_DEAD, CPUHP_X86_MCE_DEAD, CPUHP_VIRT_NET_DEAD, CPUHP_SLUB_DEAD, CPUHP_MM_WRITEBACK_DEAD, CPUHP_MM_VMSTAT_DEAD, CPUHP_SOFTIRQ_DEAD, CPUHP_NET_MVNETA_DEAD, CPUHP_CPUIDLE_DEAD, CPUHP_ARM64_FPSIMD_DEAD, CPUHP_ARM_OMAP_WAKE_DEAD, CPUHP_IRQ_POLL_DEAD, CPUHP_BLOCK_SOFTIRQ_DEAD, CPUHP_ACPI_CPUDRV_DEAD, CPUHP_S390_PFAULT_DEAD, CPUHP_BLK_MQ_DEAD, CPUHP_FS_BUFF_DEAD, CPUHP_PRINTK_DEAD, CPUHP_MM_MEMCQ_DEAD, CPUHP_PERCPU_CNT_DEAD, CPUHP_RADIX_DEAD, CPUHP_PAGE_ALLOC_DEAD, CPUHP_NET_DEV_DEAD, CPUHP_PCI_XGENE_DEAD, CPUHP_IOMMU_INTEL_DEAD, CPUHP_LUSTRE_CFS_DEAD, CPUHP_AP_ARM_CACHE_B15_RAC_DEAD, CPUHP_PADATA_DEAD, CPUHP_RANDOM_PREPARE, CPUHP_WORKQUEUE_PREP, CPUHP_POWER_NUMA_PREPARE, CPUHP_HRTIMERS_PREPARE, CPUHP_PROFILE_PREPARE, CPUHP_X2APIC_PREPARE, CPUHP_SMPCFD_PREPARE, CPUHP_RELAY_PREPARE, CPUHP_SLAB_PREPARE, CPUHP_MD_RAID5_PREPARE, CPUHP_RCUTREE_PREP, CPUHP_CPUIDLE_COUPLED_PREPARE, CPUHP_POWERPC_PMAC_PREPARE, CPUHP_POWERPC_MMU_CTX_PREPARE, CPUHP_XEN_PREPARE, CPUHP_XEN_EVTCHN_PREPARE, CPUHP_ARM_SHMOBILE_SCU_PREPARE, CPUHP_SH_SH3X_PREPARE, CPUHP_NET_FLOW_PREPARE, CPUHP_TOPOLOGY_PREPARE, CPUHP_NET_IUCV_PREPARE, CPUHP_ARM_BL_PREPARE, CPUHP_TRACE_RB_PREPARE, CPUHP_MM_ZS_PREPARE, CPUHP_MM_ZSWP_MEM_PREPARE, CPUHP_MM_ZSWP_POOL_PREPARE, CPUHP_KVM_PPC_BOOK3S_PREPARE, CPUHP_ZCOMP_PREPARE, CPUHP_TIMERS_PREPARE, CPUHP_MIPS_SOC_PREPARE, CPUHP_BP_PREPARE_DYN, CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20, CPUHP_BRINGUP_CPU, CPUHP_AP_IDLE_DEAD, CPUHP_AP_OFFLINE, CPUHP_AP_SCHED_STARTING, CPUHP_AP_RCUTREE_DYING, CPUHP_AP_IRQ_GIC_STARTING, CPUHP_AP_IRQ_HIP04_STARTING, CPUHP_AP_IRQ_ARMADA_XP_STARTING, CPUHP_AP_IRQ_BCM2836_STARTING, CPUHP_AP_IRQ_MIPS_GIC_STARTING, CPUHP_AP_ARM_MVEBU_COHERENCY, CPUHP_AP_MICROCODE_LOADER, CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, CPUHP_AP_PERF_X86_STARTING, CPUHP_AP_PERF_X86_AMD_IBS_STARTING, CPUHP_AP_PERF_X86_CQM_STARTING, CPUHP_AP_PERF_X86_CSTATE_STARTING, CPUHP_AP_PERF_XTENSA_STARTING, CPUHP_AP_MIPS_OP_LOONGSON3_STARTING, CPUHP_AP_ARM_VFP_STARTING, CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING, CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING, CPUHP_AP_PERF_ARM_ACPI_STARTING, CPUHP_AP_PERF_ARM_STARTING, CPUHP_AP_ARM_L2X0_STARTING, CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING, CPUHP_AP_ARM_ARCH_TIMER_STARTING, CPUHP_AP_OMAP_DM_TIMER_STARTING, CPUHP_AP_ARM_GLOBAL_TIMER_STARTING, CPUHP_AP_JCORE_TIMER_STARTING, CPUHP_AP_ARM_TWD_STARTING, CPUHP_AP_QCOM_TIMER_STARTING, CPUHP_AP_TEGRA_TIMER_STARTING, CPUHP_AP_ARMADA_TIMER_STARTING, CPUHP_AP_MARCO_TIMER_STARTING, CPUHP_AP_MIPS_GIC_TIMER_STARTING, CPUHP_AP_ARC_TIMER_STARTING, CPUHP_AP_RISCV_TIMER_STARTING, CPUHP_AP_CSKY_TIMER_STARTING, CPUHP_AP_KVM_STARTING, CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING, CPUHP_AP_KVM_ARM_VGIC_STARTING, CPUHP_AP_KVM_ARM_TIMER_STARTING, /* Must be the last timer callback */ CPUHP_AP_DUMMY_TIMER_STARTING, CPUHP_AP_ARM_XEN_STARTING, CPUHP_AP_ARM_CORESIGHT_STARTING, CPUHP_AP_ARM64_ISNDEP_STARTING, CPUHP_AP_SMPCFD_DYING, CPUHP_AP_HRTIMERS_DYING, CPUHP_AP_X86_TBOOT_DYING, CPUHP_AP_ARM_CACHE_B15_RAC_DYING, CPUHP_AP_ONLINE, CPUHP_TEARDOWN_CPU, CPUHP_AP_ONLINE_IDLE, CPUHP_AP_SMPBOOT_THREADS, CPUHP_AP_X86_VDSO_VMA_ONLINE, CPUHP_AP_IRQ_AFFINITY_ONLINE, CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS, CPUHP_AP_X86_INTEL_EPB_ONLINE, CPUHP_AP_PERF_ONLINE, CPUHP_AP_PERF_X86_ONLINE, CPUHP_AP_PERF_X86_UNCORE_ONLINE, CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE, CPUHP_AP_PERF_X86_AMD_POWER_ONLINE, CPUHP_AP_PERF_X86_RAPL_ONLINE, CPUHP_AP_PERF_X86_CQM_ONLINE, CPUHP_AP_PERF_X86_CSTATE_ONLINE, CPUHP_AP_PERF_S390_CF_ONLINE, CPUHP_AP_PERF_S390_SF_ONLINE, CPUHP_AP_PERF_ARM_CCI_ONLINE, CPUHP_AP_PERF_ARM_CCN_ONLINE, CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE, CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, CPUHP_AP_PERF_ARM_HISI_L3_ONLINE, CPUHP_AP_PERF_ARM_L2X0_ONLINE, CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE, CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE, CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE, CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE, CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE, CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE, CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE, CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE, CPUHP_AP_WATCHDOG_ONLINE, CPUHP_AP_WORKQUEUE_ONLINE, CPUHP_AP_RANDOM_ONLINE, CPUHP_AP_RCUTREE_ONLINE, CPUHP_AP_BASE_CACHEINFO_ONLINE, CPUHP_AP_ONLINE_DYN, CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, CPUHP_AP_X86_HPET_ONLINE, CPUHP_AP_X86_KVM_CLK_ONLINE, CPUHP_AP_ACTIVE, CPUHP_ONLINE, }; int __cpuhp_setup_state(enum cpuhp_state state, const char *name, bool invoke, int (*startup)(unsigned int cpu), int (*teardown)(unsigned int cpu), bool multi_instance); int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state, const char *name, bool invoke, int (*startup)(unsigned int cpu), int (*teardown)(unsigned int cpu), bool multi_instance); /** * cpuhp_setup_state - Setup hotplug state callbacks with calling the callbacks * @state: The state for which the calls are installed * @name: Name of the callback (will be used in debug output) * @startup: startup callback function * @teardown: teardown callback function * * Installs the callback functions and invokes the startup callback on * the present cpus which have already reached the @state. */ static inline int cpuhp_setup_state(enum cpuhp_state state, const char *name, int (*startup)(unsigned int cpu), int (*teardown)(unsigned int cpu)) { return __cpuhp_setup_state(state, name, true, startup, teardown, false); } static inline int cpuhp_setup_state_cpuslocked(enum cpuhp_state state, const char *name, int (*startup)(unsigned int cpu), int (*teardown)(unsigned int cpu)) { return __cpuhp_setup_state_cpuslocked(state, name, true, startup, teardown, false); } /** * cpuhp_setup_state_nocalls - Setup hotplug state callbacks without calling the * callbacks * @state: The state for which the calls are installed * @name: Name of the callback. * @startup: startup callback function * @teardown: teardown callback function * * Same as @cpuhp_setup_state except that no calls are executed are invoked * during installation of this callback. NOP if SMP=n or HOTPLUG_CPU=n. */ static inline int cpuhp_setup_state_nocalls(enum cpuhp_state state, const char *name, int (*startup)(unsigned int cpu), int (*teardown)(unsigned int cpu)) { return __cpuhp_setup_state(state, name, false, startup, teardown, false); } static inline int cpuhp_setup_state_nocalls_cpuslocked(enum cpuhp_state state, const char *name, int (*startup)(unsigned int cpu), int (*teardown)(unsigned int cpu)) { return __cpuhp_setup_state_cpuslocked(state, name, false, startup, teardown, false); } /** * cpuhp_setup_state_multi - Add callbacks for multi state * @state: The state for which the calls are installed * @name: Name of the callback. * @startup: startup callback function * @teardown: teardown callback function * * Sets the internal multi_instance flag and prepares a state to work as a multi * instance callback. No callbacks are invoked at this point. The callbacks are * invoked once an instance for this state are registered via * @cpuhp_state_add_instance or @cpuhp_state_add_instance_nocalls. */ static inline int cpuhp_setup_state_multi(enum cpuhp_state state, const char *name, int (*startup)(unsigned int cpu, struct hlist_node *node), int (*teardown)(unsigned int cpu, struct hlist_node *node)) { return __cpuhp_setup_state(state, name, false, (void *) startup, (void *) teardown, true); } int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node, bool invoke); int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state, struct hlist_node *node, bool invoke); /** * cpuhp_state_add_instance - Add an instance for a state and invoke startup * callback. * @state: The state for which the instance is installed * @node: The node for this individual state. * * Installs the instance for the @state and invokes the startup callback on * the present cpus which have already reached the @state. The @state must have * been earlier marked as multi-instance by @cpuhp_setup_state_multi. */ static inline int cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node) { return __cpuhp_state_add_instance(state, node, true); } /** * cpuhp_state_add_instance_nocalls - Add an instance for a state without * invoking the startup callback. * @state: The state for which the instance is installed * @node: The node for this individual state. * * Installs the instance for the @state The @state must have been earlier * marked as multi-instance by @cpuhp_setup_state_multi. */ static inline int cpuhp_state_add_instance_nocalls(enum cpuhp_state state, struct hlist_node *node) { return __cpuhp_state_add_instance(state, node, false); } static inline int cpuhp_state_add_instance_nocalls_cpuslocked(enum cpuhp_state state, struct hlist_node *node) { return __cpuhp_state_add_instance_cpuslocked(state, node, false); } void __cpuhp_remove_state(enum cpuhp_state state, bool invoke); void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke); /** * cpuhp_remove_state - Remove hotplug state callbacks and invoke the teardown * @state: The state for which the calls are removed * * Removes the callback functions and invokes the teardown callback on * the present cpus which have already reached the @state. */ static inline void cpuhp_remove_state(enum cpuhp_state state) { __cpuhp_remove_state(state, true); } /** * cpuhp_remove_state_nocalls - Remove hotplug state callbacks without invoking * teardown * @state: The state for which the calls are removed */ static inline void cpuhp_remove_state_nocalls(enum cpuhp_state state) { __cpuhp_remove_state(state, false); } static inline void cpuhp_remove_state_nocalls_cpuslocked(enum cpuhp_state state) { __cpuhp_remove_state_cpuslocked(state, false); } /** * cpuhp_remove_multi_state - Remove hotplug multi state callback * @state: The state for which the calls are removed * * Removes the callback functions from a multi state. This is the reverse of * cpuhp_setup_state_multi(). All instances should have been removed before * invoking this function. */ static inline void cpuhp_remove_multi_state(enum cpuhp_state state) { __cpuhp_remove_state(state, false); } int __cpuhp_state_remove_instance(enum cpuhp_state state, struct hlist_node *node, bool invoke); /** * cpuhp_state_remove_instance - Remove hotplug instance from state and invoke * the teardown callback * @state: The state from which the instance is removed * @node: The node for this individual state. * * Removes the instance and invokes the teardown callback on the present cpus * which have already reached the @state. */ static inline int cpuhp_state_remove_instance(enum cpuhp_state state, struct hlist_node *node) { return __cpuhp_state_remove_instance(state, node, true); } /** * cpuhp_state_remove_instance_nocalls - Remove hotplug instance from state * without invoking the reatdown callback * @state: The state from which the instance is removed * @node: The node for this individual state. * * Removes the instance without invoking the teardown callback. */ static inline int cpuhp_state_remove_instance_nocalls(enum cpuhp_state state, struct hlist_node *node) { return __cpuhp_state_remove_instance(state, node, false); } #ifdef CONFIG_SMP void cpuhp_online_idle(enum cpuhp_state state); #else static inline void cpuhp_online_idle(enum cpuhp_state state) { } #endif #endif wkup_m3_ipc.h 0000644 00000003402 14722070374 0007142 0 ustar 00 /* * TI Wakeup M3 for AMx3 SoCs Power Management Routines * * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/ * Dave Gerlach <d-gerlach@ti.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifndef _LINUX_WKUP_M3_IPC_H #define _LINUX_WKUP_M3_IPC_H #define WKUP_M3_DEEPSLEEP 1 #define WKUP_M3_STANDBY 2 #define WKUP_M3_IDLE 3 #include <linux/mailbox_client.h> struct wkup_m3_ipc_ops; struct wkup_m3_ipc { struct rproc *rproc; void __iomem *ipc_mem_base; struct device *dev; int mem_type; unsigned long resume_addr; int state; struct completion sync_complete; struct mbox_client mbox_client; struct mbox_chan *mbox; struct wkup_m3_ipc_ops *ops; int is_rtc_only; }; struct wkup_m3_wakeup_src { int irq_nr; char src[10]; }; struct wkup_m3_ipc_ops { void (*set_mem_type)(struct wkup_m3_ipc *m3_ipc, int mem_type); void (*set_resume_address)(struct wkup_m3_ipc *m3_ipc, void *addr); int (*prepare_low_power)(struct wkup_m3_ipc *m3_ipc, int state); int (*finish_low_power)(struct wkup_m3_ipc *m3_ipc); int (*request_pm_status)(struct wkup_m3_ipc *m3_ipc); const char *(*request_wake_src)(struct wkup_m3_ipc *m3_ipc); void (*set_rtc_only)(struct wkup_m3_ipc *m3_ipc); }; struct wkup_m3_ipc *wkup_m3_ipc_get(void); void wkup_m3_ipc_put(struct wkup_m3_ipc *m3_ipc); void wkup_m3_set_rtc_only_mode(void); #endif /* _LINUX_WKUP_M3_IPC_H */ pmu.h 0000644 00000004706 14722070374 0005533 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Definitions for talking to the PMU. The PMU is a microcontroller * which controls battery charging and system power on PowerBook 3400 * and 2400 models as well as the RTC and various other things. * * Copyright (C) 1998 Paul Mackerras. */ #ifndef _LINUX_PMU_H #define _LINUX_PMU_H #include <linux/rtc.h> #include <uapi/linux/pmu.h> extern int find_via_pmu(void); extern int pmu_request(struct adb_request *req, void (*done)(struct adb_request *), int nbytes, ...); extern int pmu_queue_request(struct adb_request *req); extern void pmu_poll(void); extern void pmu_poll_adb(void); /* For use by xmon */ extern void pmu_wait_complete(struct adb_request *req); /* For use before switching interrupts off for a long time; * warning: not stackable */ #if defined(CONFIG_ADB_PMU) extern void pmu_suspend(void); extern void pmu_resume(void); #else static inline void pmu_suspend(void) {} static inline void pmu_resume(void) {} #endif extern void pmu_enable_irled(int on); extern time64_t pmu_get_time(void); extern int pmu_set_rtc_time(struct rtc_time *tm); extern void pmu_restart(void); extern void pmu_shutdown(void); extern void pmu_unlock(void); extern int pmu_present(void); extern int pmu_get_model(void); extern void pmu_backlight_set_sleep(int sleep); #define PMU_MAX_BATTERIES 2 /* values for pmu_power_flags */ #define PMU_PWR_AC_PRESENT 0x00000001 /* values for pmu_battery_info.flags */ #define PMU_BATT_PRESENT 0x00000001 #define PMU_BATT_CHARGING 0x00000002 #define PMU_BATT_TYPE_MASK 0x000000f0 #define PMU_BATT_TYPE_SMART 0x00000010 /* Smart battery */ #define PMU_BATT_TYPE_HOOPER 0x00000020 /* 3400/3500 */ #define PMU_BATT_TYPE_COMET 0x00000030 /* 2400 */ struct pmu_battery_info { unsigned int flags; unsigned int charge; /* current charge */ unsigned int max_charge; /* maximum charge */ signed int amperage; /* current, positive if charging */ unsigned int voltage; /* voltage */ unsigned int time_remaining; /* remaining time */ }; extern int pmu_battery_count; extern struct pmu_battery_info pmu_batteries[PMU_MAX_BATTERIES]; extern unsigned int pmu_power_flags; /* Backlight */ extern void pmu_backlight_init(void); /* some code needs to know if the PMU was suspended for hibernation */ #if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32) extern int pmu_sys_suspended; #else /* if power management is not configured it can't be suspended */ #define pmu_sys_suspended 0 #endif #endif /* _LINUX_PMU_H */ ext2_fs.h 0000644 00000001707 14722070374 0006302 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/ext2_fs.h * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/include/linux/minix_fs.h * * Copyright (C) 1991, 1992 Linus Torvalds */ #ifndef _LINUX_EXT2_FS_H #define _LINUX_EXT2_FS_H #include <linux/types.h> #include <linux/magic.h> #define EXT2_NAME_LEN 255 /* * Maximal count of links to a file */ #define EXT2_LINK_MAX 32000 #define EXT2_SB_MAGIC_OFFSET 0x38 #define EXT2_SB_BLOCKS_OFFSET 0x04 #define EXT2_SB_BSIZE_OFFSET 0x18 static inline u64 ext2_image_size(void *ext2_sb) { __u8 *p = ext2_sb; if (*(__le16 *)(p + EXT2_SB_MAGIC_OFFSET) != cpu_to_le16(EXT2_SUPER_MAGIC)) return 0; return (u64)le32_to_cpup((__le32 *)(p + EXT2_SB_BLOCKS_OFFSET)) << le32_to_cpup((__le32 *)(p + EXT2_SB_BSIZE_OFFSET)); } #endif /* _LINUX_EXT2_FS_H */ of_iommu.h 0000644 00000001465 14722070374 0006543 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __OF_IOMMU_H #define __OF_IOMMU_H #include <linux/device.h> #include <linux/iommu.h> #include <linux/of.h> #ifdef CONFIG_OF_IOMMU extern int of_get_dma_window(struct device_node *dn, const char *prefix, int index, unsigned long *busno, dma_addr_t *addr, size_t *size); extern const struct iommu_ops *of_iommu_configure(struct device *dev, struct device_node *master_np); #else static inline int of_get_dma_window(struct device_node *dn, const char *prefix, int index, unsigned long *busno, dma_addr_t *addr, size_t *size) { return -EINVAL; } static inline const struct iommu_ops *of_iommu_configure(struct device *dev, struct device_node *master_np) { return NULL; } #endif /* CONFIG_OF_IOMMU */ #endif /* __OF_IOMMU_H */ ucs2_string.h 0000644 00000001226 14722070374 0007166 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_UCS2_STRING_H_ #define _LINUX_UCS2_STRING_H_ #include <linux/types.h> /* for size_t */ #include <linux/stddef.h> /* for NULL */ typedef u16 ucs2_char_t; unsigned long ucs2_strnlen(const ucs2_char_t *s, size_t maxlength); unsigned long ucs2_strlen(const ucs2_char_t *s); unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength); int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len); unsigned long ucs2_utf8size(const ucs2_char_t *src); unsigned long ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, unsigned long maxlength); #endif /* _LINUX_UCS2_STRING_H_ */ file.h 0000644 00000004366 14722070374 0005653 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Wrapper functions for accessing the file_struct fd array. */ #ifndef __LINUX_FILE_H #define __LINUX_FILE_H #include <linux/compiler.h> #include <linux/types.h> #include <linux/posix_types.h> struct file; extern void fput(struct file *); extern void fput_many(struct file *, unsigned int); struct file_operations; struct vfsmount; struct dentry; struct inode; struct path; extern struct file *alloc_file_pseudo(struct inode *, struct vfsmount *, const char *, int flags, const struct file_operations *); extern struct file *alloc_file_clone(struct file *, int flags, const struct file_operations *); static inline void fput_light(struct file *file, int fput_needed) { if (fput_needed) fput(file); } struct fd { struct file *file; unsigned int flags; }; #define FDPUT_FPUT 1 #define FDPUT_POS_UNLOCK 2 static inline void fdput(struct fd fd) { if (fd.flags & FDPUT_FPUT) fput(fd.file); } extern struct file *fget(unsigned int fd); extern struct file *fget_many(unsigned int fd, unsigned int refs); extern struct file *fget_raw(unsigned int fd); extern unsigned long __fdget(unsigned int fd); extern unsigned long __fdget_raw(unsigned int fd); extern unsigned long __fdget_pos(unsigned int fd); extern void __f_unlock_pos(struct file *); static inline struct fd __to_fd(unsigned long v) { return (struct fd){(struct file *)(v & ~3),v & 3}; } static inline struct fd fdget(unsigned int fd) { return __to_fd(__fdget(fd)); } static inline struct fd fdget_raw(unsigned int fd) { return __to_fd(__fdget_raw(fd)); } static inline struct fd fdget_pos(int fd) { return __to_fd(__fdget_pos(fd)); } static inline void fdput_pos(struct fd f) { if (f.flags & FDPUT_POS_UNLOCK) __f_unlock_pos(f.file); fdput(f); } extern int f_dupfd(unsigned int from, struct file *file, unsigned flags); extern int replace_fd(unsigned fd, struct file *file, unsigned flags); extern void set_close_on_exec(unsigned int fd, int flag); extern bool get_close_on_exec(unsigned int fd); extern int get_unused_fd_flags(unsigned flags); extern void put_unused_fd(unsigned int fd); extern void fd_install(unsigned int fd, struct file *file); extern void flush_delayed_fput(void); extern void __fput_sync(struct file *); #endif /* __LINUX_FILE_H */ psi.h 0000644 00000003033 14722070374 0005515 0 ustar 00 #ifndef _LINUX_PSI_H #define _LINUX_PSI_H #include <linux/jump_label.h> #include <linux/psi_types.h> #include <linux/sched.h> #include <linux/poll.h> struct seq_file; struct css_set; #ifdef CONFIG_PSI extern struct static_key_false psi_disabled; extern struct psi_group psi_system; void psi_init(void); void psi_task_change(struct task_struct *task, int clear, int set); void psi_memstall_tick(struct task_struct *task, int cpu); void psi_memstall_enter(unsigned long *flags); void psi_memstall_leave(unsigned long *flags); int psi_show(struct seq_file *s, struct psi_group *group, enum psi_res res); #ifdef CONFIG_CGROUPS int psi_cgroup_alloc(struct cgroup *cgrp); void psi_cgroup_free(struct cgroup *cgrp); void cgroup_move_task(struct task_struct *p, struct css_set *to); struct psi_trigger *psi_trigger_create(struct psi_group *group, char *buf, size_t nbytes, enum psi_res res); void psi_trigger_destroy(struct psi_trigger *t); __poll_t psi_trigger_poll(void **trigger_ptr, struct file *file, poll_table *wait); #endif #else /* CONFIG_PSI */ static inline void psi_init(void) {} static inline void psi_memstall_enter(unsigned long *flags) {} static inline void psi_memstall_leave(unsigned long *flags) {} #ifdef CONFIG_CGROUPS static inline int psi_cgroup_alloc(struct cgroup *cgrp) { return 0; } static inline void psi_cgroup_free(struct cgroup *cgrp) { } static inline void cgroup_move_task(struct task_struct *p, struct css_set *to) { rcu_assign_pointer(p->cgroups, to); } #endif #endif /* CONFIG_PSI */ #endif /* _LINUX_PSI_H */ dnotify.h 0000644 00000002027 14722070374 0006400 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_DNOTIFY_H #define _LINUX_DNOTIFY_H /* * Directory notification for Linux * * Copyright (C) 2000,2002 Stephen Rothwell */ #include <linux/fs.h> struct dnotify_struct { struct dnotify_struct * dn_next; __u32 dn_mask; int dn_fd; struct file * dn_filp; fl_owner_t dn_owner; }; #ifdef __KERNEL__ #ifdef CONFIG_DNOTIFY #define DNOTIFY_ALL_EVENTS (FS_DELETE | FS_DELETE_CHILD |\ FS_MODIFY | FS_MODIFY_CHILD |\ FS_ACCESS | FS_ACCESS_CHILD |\ FS_ATTRIB | FS_ATTRIB_CHILD |\ FS_CREATE | FS_DN_RENAME |\ FS_MOVED_FROM | FS_MOVED_TO) extern int dir_notify_enable; extern void dnotify_flush(struct file *, fl_owner_t); extern int fcntl_dirnotify(int, struct file *, unsigned long); #else static inline void dnotify_flush(struct file *filp, fl_owner_t id) { } static inline int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) { return -EINVAL; } #endif /* CONFIG_DNOTIFY */ #endif /* __KERNEL __ */ #endif /* _LINUX_DNOTIFY_H */ sunxi-rsb.h 0000644 00000005613 14722070374 0006662 0 ustar 00 /* * Allwinner Reduced Serial Bus Driver * * Copyright (c) 2015 Chen-Yu Tsai * * Author: Chen-Yu Tsai <wens@csie.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #ifndef _SUNXI_RSB_H #define _SUNXI_RSB_H #include <linux/device.h> #include <linux/regmap.h> #include <linux/types.h> struct sunxi_rsb; /** * struct sunxi_rsb_device - Basic representation of an RSB device * @dev: Driver model representation of the device. * @ctrl: RSB controller managing the bus hosting this device. * @rtaddr: This device's runtime address * @hwaddr: This device's hardware address */ struct sunxi_rsb_device { struct device dev; struct sunxi_rsb *rsb; int irq; u8 rtaddr; u16 hwaddr; }; static inline struct sunxi_rsb_device *to_sunxi_rsb_device(struct device *d) { return container_of(d, struct sunxi_rsb_device, dev); } static inline void *sunxi_rsb_device_get_drvdata(const struct sunxi_rsb_device *rdev) { return dev_get_drvdata(&rdev->dev); } static inline void sunxi_rsb_device_set_drvdata(struct sunxi_rsb_device *rdev, void *data) { dev_set_drvdata(&rdev->dev, data); } /** * struct sunxi_rsb_driver - RSB slave device driver * @driver: RSB device drivers should initialize name and owner field of * this structure. * @probe: binds this driver to a RSB device. * @remove: unbinds this driver from the RSB device. */ struct sunxi_rsb_driver { struct device_driver driver; int (*probe)(struct sunxi_rsb_device *rdev); int (*remove)(struct sunxi_rsb_device *rdev); }; static inline struct sunxi_rsb_driver *to_sunxi_rsb_driver(struct device_driver *d) { return container_of(d, struct sunxi_rsb_driver, driver); } int sunxi_rsb_driver_register(struct sunxi_rsb_driver *rdrv); /** * sunxi_rsb_driver_unregister() - unregister an RSB client driver * @rdrv: the driver to unregister */ static inline void sunxi_rsb_driver_unregister(struct sunxi_rsb_driver *rdrv) { if (rdrv) driver_unregister(&rdrv->driver); } #define module_sunxi_rsb_driver(__sunxi_rsb_driver) \ module_driver(__sunxi_rsb_driver, sunxi_rsb_driver_register, \ sunxi_rsb_driver_unregister) struct regmap *__devm_regmap_init_sunxi_rsb(struct sunxi_rsb_device *rdev, const struct regmap_config *config, struct lock_class_key *lock_key, const char *lock_name); /** * devm_regmap_init_sunxi_rsb(): Initialise managed register map * * @rdev: Device that will be interacted with * @config: Configuration for register map * * The return value will be an ERR_PTR() on error or a valid pointer * to a struct regmap. The regmap will be automatically freed by the * device management code. */ #define devm_regmap_init_sunxi_rsb(rdev, config) \ __regmap_lockdep_wrapper(__devm_regmap_init_sunxi_rsb, #config, \ rdev, config) #endif /* _SUNXI_RSB_H */ pci-ep-cfs.h 0000644 00000001667 14722070374 0006663 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0+ */ /** * PCI Endpoint ConfigFS header file * * Copyright (C) 2017 Texas Instruments * Author: Kishon Vijay Abraham I <kishon@ti.com> */ #ifndef __LINUX_PCI_EP_CFS_H #define __LINUX_PCI_EP_CFS_H #include <linux/configfs.h> #ifdef CONFIG_PCI_ENDPOINT_CONFIGFS struct config_group *pci_ep_cfs_add_epc_group(const char *name); void pci_ep_cfs_remove_epc_group(struct config_group *group); struct config_group *pci_ep_cfs_add_epf_group(const char *name); void pci_ep_cfs_remove_epf_group(struct config_group *group); #else static inline struct config_group *pci_ep_cfs_add_epc_group(const char *name) { return 0; } static inline void pci_ep_cfs_remove_epc_group(struct config_group *group) { } static inline struct config_group *pci_ep_cfs_add_epf_group(const char *name) { return 0; } static inline void pci_ep_cfs_remove_epf_group(struct config_group *group) { } #endif #endif /* __LINUX_PCI_EP_CFS_H */ pm.h 0000644 00000102421 14722070374 0005337 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * pm.h - Power management interface * * Copyright (C) 2000 Andrew Henroid */ #ifndef _LINUX_PM_H #define _LINUX_PM_H #include <linux/list.h> #include <linux/workqueue.h> #include <linux/spinlock.h> #include <linux/wait.h> #include <linux/timer.h> #include <linux/hrtimer.h> #include <linux/completion.h> /* * Callbacks for platform drivers to implement. */ extern void (*pm_power_off)(void); extern void (*pm_power_off_prepare)(void); struct device; /* we have a circular dep with device.h */ #ifdef CONFIG_VT_CONSOLE_SLEEP extern void pm_vt_switch_required(struct device *dev, bool required); extern void pm_vt_switch_unregister(struct device *dev); #else static inline void pm_vt_switch_required(struct device *dev, bool required) { } static inline void pm_vt_switch_unregister(struct device *dev) { } #endif /* CONFIG_VT_CONSOLE_SLEEP */ /* * Device power management */ struct device; #ifdef CONFIG_PM extern const char power_group_name[]; /* = "power" */ #else #define power_group_name NULL #endif typedef struct pm_message { int event; } pm_message_t; /** * struct dev_pm_ops - device PM callbacks. * * @prepare: The principal role of this callback is to prevent new children of * the device from being registered after it has returned (the driver's * subsystem and generally the rest of the kernel is supposed to prevent * new calls to the probe method from being made too once @prepare() has * succeeded). If @prepare() detects a situation it cannot handle (e.g. * registration of a child already in progress), it may return -EAGAIN, so * that the PM core can execute it once again (e.g. after a new child has * been registered) to recover from the race condition. * This method is executed for all kinds of suspend transitions and is * followed by one of the suspend callbacks: @suspend(), @freeze(), or * @poweroff(). If the transition is a suspend to memory or standby (that * is, not related to hibernation), the return value of @prepare() may be * used to indicate to the PM core to leave the device in runtime suspend * if applicable. Namely, if @prepare() returns a positive number, the PM * core will understand that as a declaration that the device appears to be * runtime-suspended and it may be left in that state during the entire * transition and during the subsequent resume if all of its descendants * are left in runtime suspend too. If that happens, @complete() will be * executed directly after @prepare() and it must ensure the proper * functioning of the device after the system resume. * The PM core executes subsystem-level @prepare() for all devices before * starting to invoke suspend callbacks for any of them, so generally * devices may be assumed to be functional or to respond to runtime resume * requests while @prepare() is being executed. However, device drivers * may NOT assume anything about the availability of user space at that * time and it is NOT valid to request firmware from within @prepare() * (it's too late to do that). It also is NOT valid to allocate * substantial amounts of memory from @prepare() in the GFP_KERNEL mode. * [To work around these limitations, drivers may register suspend and * hibernation notifiers to be executed before the freezing of tasks.] * * @complete: Undo the changes made by @prepare(). This method is executed for * all kinds of resume transitions, following one of the resume callbacks: * @resume(), @thaw(), @restore(). Also called if the state transition * fails before the driver's suspend callback: @suspend(), @freeze() or * @poweroff(), can be executed (e.g. if the suspend callback fails for one * of the other devices that the PM core has unsuccessfully attempted to * suspend earlier). * The PM core executes subsystem-level @complete() after it has executed * the appropriate resume callbacks for all devices. If the corresponding * @prepare() at the beginning of the suspend transition returned a * positive number and the device was left in runtime suspend (without * executing any suspend and resume callbacks for it), @complete() will be * the only callback executed for the device during resume. In that case, * @complete() must be prepared to do whatever is necessary to ensure the * proper functioning of the device after the system resume. To this end, * @complete() can check the power.direct_complete flag of the device to * learn whether (unset) or not (set) the previous suspend and resume * callbacks have been executed for it. * * @suspend: Executed before putting the system into a sleep state in which the * contents of main memory are preserved. The exact action to perform * depends on the device's subsystem (PM domain, device type, class or bus * type), but generally the device must be quiescent after subsystem-level * @suspend() has returned, so that it doesn't do any I/O or DMA. * Subsystem-level @suspend() is executed for all devices after invoking * subsystem-level @prepare() for all of them. * * @suspend_late: Continue operations started by @suspend(). For a number of * devices @suspend_late() may point to the same callback routine as the * runtime suspend callback. * * @resume: Executed after waking the system up from a sleep state in which the * contents of main memory were preserved. The exact action to perform * depends on the device's subsystem, but generally the driver is expected * to start working again, responding to hardware events and software * requests (the device itself may be left in a low-power state, waiting * for a runtime resume to occur). The state of the device at the time its * driver's @resume() callback is run depends on the platform and subsystem * the device belongs to. On most platforms, there are no restrictions on * availability of resources like clocks during @resume(). * Subsystem-level @resume() is executed for all devices after invoking * subsystem-level @resume_noirq() for all of them. * * @resume_early: Prepare to execute @resume(). For a number of devices * @resume_early() may point to the same callback routine as the runtime * resume callback. * * @freeze: Hibernation-specific, executed before creating a hibernation image. * Analogous to @suspend(), but it should not enable the device to signal * wakeup events or change its power state. The majority of subsystems * (with the notable exception of the PCI bus type) expect the driver-level * @freeze() to save the device settings in memory to be used by @restore() * during the subsequent resume from hibernation. * Subsystem-level @freeze() is executed for all devices after invoking * subsystem-level @prepare() for all of them. * * @freeze_late: Continue operations started by @freeze(). Analogous to * @suspend_late(), but it should not enable the device to signal wakeup * events or change its power state. * * @thaw: Hibernation-specific, executed after creating a hibernation image OR * if the creation of an image has failed. Also executed after a failing * attempt to restore the contents of main memory from such an image. * Undo the changes made by the preceding @freeze(), so the device can be * operated in the same way as immediately before the call to @freeze(). * Subsystem-level @thaw() is executed for all devices after invoking * subsystem-level @thaw_noirq() for all of them. It also may be executed * directly after @freeze() in case of a transition error. * * @thaw_early: Prepare to execute @thaw(). Undo the changes made by the * preceding @freeze_late(). * * @poweroff: Hibernation-specific, executed after saving a hibernation image. * Analogous to @suspend(), but it need not save the device's settings in * memory. * Subsystem-level @poweroff() is executed for all devices after invoking * subsystem-level @prepare() for all of them. * * @poweroff_late: Continue operations started by @poweroff(). Analogous to * @suspend_late(), but it need not save the device's settings in memory. * * @restore: Hibernation-specific, executed after restoring the contents of main * memory from a hibernation image, analogous to @resume(). * * @restore_early: Prepare to execute @restore(), analogous to @resume_early(). * * @suspend_noirq: Complete the actions started by @suspend(). Carry out any * additional operations required for suspending the device that might be * racing with its driver's interrupt handler, which is guaranteed not to * run while @suspend_noirq() is being executed. * It generally is expected that the device will be in a low-power state * (appropriate for the target system sleep state) after subsystem-level * @suspend_noirq() has returned successfully. If the device can generate * system wakeup signals and is enabled to wake up the system, it should be * configured to do so at that time. However, depending on the platform * and device's subsystem, @suspend() or @suspend_late() may be allowed to * put the device into the low-power state and configure it to generate * wakeup signals, in which case it generally is not necessary to define * @suspend_noirq(). * * @resume_noirq: Prepare for the execution of @resume() by carrying out any * operations required for resuming the device that might be racing with * its driver's interrupt handler, which is guaranteed not to run while * @resume_noirq() is being executed. * * @freeze_noirq: Complete the actions started by @freeze(). Carry out any * additional operations required for freezing the device that might be * racing with its driver's interrupt handler, which is guaranteed not to * run while @freeze_noirq() is being executed. * The power state of the device should not be changed by either @freeze(), * or @freeze_late(), or @freeze_noirq() and it should not be configured to * signal system wakeup by any of these callbacks. * * @thaw_noirq: Prepare for the execution of @thaw() by carrying out any * operations required for thawing the device that might be racing with its * driver's interrupt handler, which is guaranteed not to run while * @thaw_noirq() is being executed. * * @poweroff_noirq: Complete the actions started by @poweroff(). Analogous to * @suspend_noirq(), but it need not save the device's settings in memory. * * @restore_noirq: Prepare for the execution of @restore() by carrying out any * operations required for thawing the device that might be racing with its * driver's interrupt handler, which is guaranteed not to run while * @restore_noirq() is being executed. Analogous to @resume_noirq(). * * @runtime_suspend: Prepare the device for a condition in which it won't be * able to communicate with the CPU(s) and RAM due to power management. * This need not mean that the device should be put into a low-power state. * For example, if the device is behind a link which is about to be turned * off, the device may remain at full power. If the device does go to low * power and is capable of generating runtime wakeup events, remote wakeup * (i.e., a hardware mechanism allowing the device to request a change of * its power state via an interrupt) should be enabled for it. * * @runtime_resume: Put the device into the fully active state in response to a * wakeup event generated by hardware or at the request of software. If * necessary, put the device into the full-power state and restore its * registers, so that it is fully operational. * * @runtime_idle: Device appears to be inactive and it might be put into a * low-power state if all of the necessary conditions are satisfied. * Check these conditions, and return 0 if it's appropriate to let the PM * core queue a suspend request for the device. * * Several device power state transitions are externally visible, affecting * the state of pending I/O queues and (for drivers that touch hardware) * interrupts, wakeups, DMA, and other hardware state. There may also be * internal transitions to various low-power modes which are transparent * to the rest of the driver stack (such as a driver that's ON gating off * clocks which are not in active use). * * The externally visible transitions are handled with the help of callbacks * included in this structure in such a way that, typically, two levels of * callbacks are involved. First, the PM core executes callbacks provided by PM * domains, device types, classes and bus types. They are the subsystem-level * callbacks expected to execute callbacks provided by device drivers, although * they may choose not to do that. If the driver callbacks are executed, they * have to collaborate with the subsystem-level callbacks to achieve the goals * appropriate for the given system transition, given transition phase and the * subsystem the device belongs to. * * All of the above callbacks, except for @complete(), return error codes. * However, the error codes returned by @resume(), @thaw(), @restore(), * @resume_noirq(), @thaw_noirq(), and @restore_noirq(), do not cause the PM * core to abort the resume transition during which they are returned. The * error codes returned in those cases are only printed to the system logs for * debugging purposes. Still, it is recommended that drivers only return error * codes from their resume methods in case of an unrecoverable failure (i.e. * when the device being handled refuses to resume and becomes unusable) to * allow the PM core to be modified in the future, so that it can avoid * attempting to handle devices that failed to resume and their children. * * It is allowed to unregister devices while the above callbacks are being * executed. However, a callback routine MUST NOT try to unregister the device * it was called for, although it may unregister children of that device (for * example, if it detects that a child was unplugged while the system was * asleep). * * There also are callbacks related to runtime power management of devices. * Again, as a rule these callbacks are executed by the PM core for subsystems * (PM domains, device types, classes and bus types) and the subsystem-level * callbacks are expected to invoke the driver callbacks. Moreover, the exact * actions to be performed by a device driver's callbacks generally depend on * the platform and subsystem the device belongs to. * * Refer to Documentation/power/runtime_pm.rst for more information about the * role of the @runtime_suspend(), @runtime_resume() and @runtime_idle() * callbacks in device runtime power management. */ struct dev_pm_ops { int (*prepare)(struct device *dev); void (*complete)(struct device *dev); int (*suspend)(struct device *dev); int (*resume)(struct device *dev); int (*freeze)(struct device *dev); int (*thaw)(struct device *dev); int (*poweroff)(struct device *dev); int (*restore)(struct device *dev); int (*suspend_late)(struct device *dev); int (*resume_early)(struct device *dev); int (*freeze_late)(struct device *dev); int (*thaw_early)(struct device *dev); int (*poweroff_late)(struct device *dev); int (*restore_early)(struct device *dev); int (*suspend_noirq)(struct device *dev); int (*resume_noirq)(struct device *dev); int (*freeze_noirq)(struct device *dev); int (*thaw_noirq)(struct device *dev); int (*poweroff_noirq)(struct device *dev); int (*restore_noirq)(struct device *dev); int (*runtime_suspend)(struct device *dev); int (*runtime_resume)(struct device *dev); int (*runtime_idle)(struct device *dev); }; #ifdef CONFIG_PM_SLEEP #define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ .suspend = suspend_fn, \ .resume = resume_fn, \ .freeze = suspend_fn, \ .thaw = resume_fn, \ .poweroff = suspend_fn, \ .restore = resume_fn, #else #define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) #endif #ifdef CONFIG_PM_SLEEP #define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ .suspend_late = suspend_fn, \ .resume_early = resume_fn, \ .freeze_late = suspend_fn, \ .thaw_early = resume_fn, \ .poweroff_late = suspend_fn, \ .restore_early = resume_fn, #else #define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) #endif #ifdef CONFIG_PM_SLEEP #define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ .suspend_noirq = suspend_fn, \ .resume_noirq = resume_fn, \ .freeze_noirq = suspend_fn, \ .thaw_noirq = resume_fn, \ .poweroff_noirq = suspend_fn, \ .restore_noirq = resume_fn, #else #define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) #endif #ifdef CONFIG_PM #define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ .runtime_suspend = suspend_fn, \ .runtime_resume = resume_fn, \ .runtime_idle = idle_fn, #else #define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) #endif /* * Use this if you want to use the same suspend and resume callbacks for suspend * to RAM and hibernation. */ #define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \ const struct dev_pm_ops name = { \ SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ } /* * Use this for defining a set of PM operations to be used in all situations * (system suspend, hibernation or runtime PM). * NOTE: In general, system suspend callbacks, .suspend() and .resume(), should * be different from the corresponding runtime PM callbacks, .runtime_suspend(), * and .runtime_resume(), because .runtime_suspend() always works on an already * quiescent device, while .suspend() should assume that the device may be doing * something when it is called (it should ensure that the device will be * quiescent after it has returned). Therefore it's better to point the "late" * suspend and "early" resume callback pointers, .suspend_late() and * .resume_early(), to the same routines as .runtime_suspend() and * .runtime_resume(), respectively (and analogously for hibernation). */ #define UNIVERSAL_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \ const struct dev_pm_ops name = { \ SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ } /* * PM_EVENT_ messages * * The following PM_EVENT_ messages are defined for the internal use of the PM * core, in order to provide a mechanism allowing the high level suspend and * hibernation code to convey the necessary information to the device PM core * code: * * ON No transition. * * FREEZE System is going to hibernate, call ->prepare() and ->freeze() * for all devices. * * SUSPEND System is going to suspend, call ->prepare() and ->suspend() * for all devices. * * HIBERNATE Hibernation image has been saved, call ->prepare() and * ->poweroff() for all devices. * * QUIESCE Contents of main memory are going to be restored from a (loaded) * hibernation image, call ->prepare() and ->freeze() for all * devices. * * RESUME System is resuming, call ->resume() and ->complete() for all * devices. * * THAW Hibernation image has been created, call ->thaw() and * ->complete() for all devices. * * RESTORE Contents of main memory have been restored from a hibernation * image, call ->restore() and ->complete() for all devices. * * RECOVER Creation of a hibernation image or restoration of the main * memory contents from a hibernation image has failed, call * ->thaw() and ->complete() for all devices. * * The following PM_EVENT_ messages are defined for internal use by * kernel subsystems. They are never issued by the PM core. * * USER_SUSPEND Manual selective suspend was issued by userspace. * * USER_RESUME Manual selective resume was issued by userspace. * * REMOTE_WAKEUP Remote-wakeup request was received from the device. * * AUTO_SUSPEND Automatic (device idle) runtime suspend was * initiated by the subsystem. * * AUTO_RESUME Automatic (device needed) runtime resume was * requested by a driver. */ #define PM_EVENT_INVALID (-1) #define PM_EVENT_ON 0x0000 #define PM_EVENT_FREEZE 0x0001 #define PM_EVENT_SUSPEND 0x0002 #define PM_EVENT_HIBERNATE 0x0004 #define PM_EVENT_QUIESCE 0x0008 #define PM_EVENT_RESUME 0x0010 #define PM_EVENT_THAW 0x0020 #define PM_EVENT_RESTORE 0x0040 #define PM_EVENT_RECOVER 0x0080 #define PM_EVENT_USER 0x0100 #define PM_EVENT_REMOTE 0x0200 #define PM_EVENT_AUTO 0x0400 #define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE) #define PM_EVENT_USER_SUSPEND (PM_EVENT_USER | PM_EVENT_SUSPEND) #define PM_EVENT_USER_RESUME (PM_EVENT_USER | PM_EVENT_RESUME) #define PM_EVENT_REMOTE_RESUME (PM_EVENT_REMOTE | PM_EVENT_RESUME) #define PM_EVENT_AUTO_SUSPEND (PM_EVENT_AUTO | PM_EVENT_SUSPEND) #define PM_EVENT_AUTO_RESUME (PM_EVENT_AUTO | PM_EVENT_RESUME) #define PMSG_INVALID ((struct pm_message){ .event = PM_EVENT_INVALID, }) #define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, }) #define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, }) #define PMSG_QUIESCE ((struct pm_message){ .event = PM_EVENT_QUIESCE, }) #define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, }) #define PMSG_HIBERNATE ((struct pm_message){ .event = PM_EVENT_HIBERNATE, }) #define PMSG_RESUME ((struct pm_message){ .event = PM_EVENT_RESUME, }) #define PMSG_THAW ((struct pm_message){ .event = PM_EVENT_THAW, }) #define PMSG_RESTORE ((struct pm_message){ .event = PM_EVENT_RESTORE, }) #define PMSG_RECOVER ((struct pm_message){ .event = PM_EVENT_RECOVER, }) #define PMSG_USER_SUSPEND ((struct pm_message) \ { .event = PM_EVENT_USER_SUSPEND, }) #define PMSG_USER_RESUME ((struct pm_message) \ { .event = PM_EVENT_USER_RESUME, }) #define PMSG_REMOTE_RESUME ((struct pm_message) \ { .event = PM_EVENT_REMOTE_RESUME, }) #define PMSG_AUTO_SUSPEND ((struct pm_message) \ { .event = PM_EVENT_AUTO_SUSPEND, }) #define PMSG_AUTO_RESUME ((struct pm_message) \ { .event = PM_EVENT_AUTO_RESUME, }) #define PMSG_IS_AUTO(msg) (((msg).event & PM_EVENT_AUTO) != 0) /* * Device run-time power management status. * * These status labels are used internally by the PM core to indicate the * current status of a device with respect to the PM core operations. They do * not reflect the actual power state of the device or its status as seen by the * driver. * * RPM_ACTIVE Device is fully operational. Indicates that the device * bus type's ->runtime_resume() callback has completed * successfully. * * RPM_SUSPENDED Device bus type's ->runtime_suspend() callback has * completed successfully. The device is regarded as * suspended. * * RPM_RESUMING Device bus type's ->runtime_resume() callback is being * executed. * * RPM_SUSPENDING Device bus type's ->runtime_suspend() callback is being * executed. */ enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING, RPM_SUSPENDED, RPM_SUSPENDING, }; /* * Device run-time power management request types. * * RPM_REQ_NONE Do nothing. * * RPM_REQ_IDLE Run the device bus type's ->runtime_idle() callback * * RPM_REQ_SUSPEND Run the device bus type's ->runtime_suspend() callback * * RPM_REQ_AUTOSUSPEND Same as RPM_REQ_SUSPEND, but not until the device has * been inactive for as long as power.autosuspend_delay * * RPM_REQ_RESUME Run the device bus type's ->runtime_resume() callback */ enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE, RPM_REQ_SUSPEND, RPM_REQ_AUTOSUSPEND, RPM_REQ_RESUME, }; struct wakeup_source; struct wake_irq; struct pm_domain_data; struct pm_subsys_data { spinlock_t lock; unsigned int refcount; #ifdef CONFIG_PM_CLK struct list_head clock_list; #endif #ifdef CONFIG_PM_GENERIC_DOMAINS struct pm_domain_data *domain_data; #endif }; /* * Driver flags to control system suspend/resume behavior. * * These flags can be set by device drivers at the probe time. They need not be * cleared by the drivers as the driver core will take care of that. * * NEVER_SKIP: Do not skip all system suspend/resume callbacks for the device. * SMART_PREPARE: Check the return value of the driver's ->prepare callback. * SMART_SUSPEND: No need to resume the device from runtime suspend. * LEAVE_SUSPENDED: Avoid resuming the device during system resume if possible. * * Setting SMART_PREPARE instructs bus types and PM domains which may want * system suspend/resume callbacks to be skipped for the device to return 0 from * their ->prepare callbacks if the driver's ->prepare callback returns 0 (in * other words, the system suspend/resume callbacks can only be skipped for the * device if its driver doesn't object against that). This flag has no effect * if NEVER_SKIP is set. * * Setting SMART_SUSPEND instructs bus types and PM domains which may want to * runtime resume the device upfront during system suspend that doing so is not * necessary from the driver's perspective. It also may cause them to skip * invocations of the ->suspend_late and ->suspend_noirq callbacks provided by * the driver if they decide to leave the device in runtime suspend. * * Setting LEAVE_SUSPENDED informs the PM core and middle-layer code that the * driver prefers the device to be left in suspend after system resume. */ #define DPM_FLAG_NEVER_SKIP BIT(0) #define DPM_FLAG_SMART_PREPARE BIT(1) #define DPM_FLAG_SMART_SUSPEND BIT(2) #define DPM_FLAG_LEAVE_SUSPENDED BIT(3) struct dev_pm_info { pm_message_t power_state; unsigned int can_wakeup:1; unsigned int async_suspend:1; bool in_dpm_list:1; /* Owned by the PM core */ bool is_prepared:1; /* Owned by the PM core */ bool is_suspended:1; /* Ditto */ bool is_noirq_suspended:1; bool is_late_suspended:1; bool no_pm:1; bool early_init:1; /* Owned by the PM core */ bool direct_complete:1; /* Owned by the PM core */ u32 driver_flags; spinlock_t lock; #ifdef CONFIG_PM_SLEEP struct list_head entry; struct completion completion; struct wakeup_source *wakeup; bool wakeup_path:1; bool syscore:1; bool no_pm_callbacks:1; /* Owned by the PM core */ unsigned int must_resume:1; /* Owned by the PM core */ unsigned int may_skip_resume:1; /* Set by subsystems */ #else unsigned int should_wakeup:1; #endif #ifdef CONFIG_PM struct hrtimer suspend_timer; u64 timer_expires; struct work_struct work; wait_queue_head_t wait_queue; struct wake_irq *wakeirq; atomic_t usage_count; atomic_t child_count; unsigned int disable_depth:3; unsigned int idle_notification:1; unsigned int request_pending:1; unsigned int deferred_resume:1; unsigned int needs_force_resume:1; unsigned int runtime_auto:1; bool ignore_children:1; unsigned int no_callbacks:1; unsigned int irq_safe:1; unsigned int use_autosuspend:1; unsigned int timer_autosuspends:1; unsigned int memalloc_noio:1; unsigned int links_count; enum rpm_request request; enum rpm_status runtime_status; int runtime_error; int autosuspend_delay; u64 last_busy; u64 active_time; u64 suspended_time; u64 accounting_timestamp; #endif struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */ void (*set_latency_tolerance)(struct device *, s32); struct dev_pm_qos *qos; }; extern int dev_pm_get_subsys_data(struct device *dev); extern void dev_pm_put_subsys_data(struct device *dev); /** * struct dev_pm_domain - power management domain representation. * * @ops: Power management operations associated with this domain. * @detach: Called when removing a device from the domain. * @activate: Called before executing probe routines for bus types and drivers. * @sync: Called after successful driver probe. * @dismiss: Called after unsuccessful driver probe and after driver removal. * * Power domains provide callbacks that are executed during system suspend, * hibernation, system resume and during runtime PM transitions instead of * subsystem-level and driver-level callbacks. */ struct dev_pm_domain { struct dev_pm_ops ops; void (*detach)(struct device *dev, bool power_off); int (*activate)(struct device *dev); void (*sync)(struct device *dev); void (*dismiss)(struct device *dev); }; /* * The PM_EVENT_ messages are also used by drivers implementing the legacy * suspend framework, based on the ->suspend() and ->resume() callbacks common * for suspend and hibernation transitions, according to the rules below. */ /* Necessary, because several drivers use PM_EVENT_PRETHAW */ #define PM_EVENT_PRETHAW PM_EVENT_QUIESCE /* * One transition is triggered by resume(), after a suspend() call; the * message is implicit: * * ON Driver starts working again, responding to hardware events * and software requests. The hardware may have gone through * a power-off reset, or it may have maintained state from the * previous suspend() which the driver will rely on while * resuming. On most platforms, there are no restrictions on * availability of resources like clocks during resume(). * * Other transitions are triggered by messages sent using suspend(). All * these transitions quiesce the driver, so that I/O queues are inactive. * That commonly entails turning off IRQs and DMA; there may be rules * about how to quiesce that are specific to the bus or the device's type. * (For example, network drivers mark the link state.) Other details may * differ according to the message: * * SUSPEND Quiesce, enter a low power device state appropriate for * the upcoming system state (such as PCI_D3hot), and enable * wakeup events as appropriate. * * HIBERNATE Enter a low power device state appropriate for the hibernation * state (eg. ACPI S4) and enable wakeup events as appropriate. * * FREEZE Quiesce operations so that a consistent image can be saved; * but do NOT otherwise enter a low power device state, and do * NOT emit system wakeup events. * * PRETHAW Quiesce as if for FREEZE; additionally, prepare for restoring * the system from a snapshot taken after an earlier FREEZE. * Some drivers will need to reset their hardware state instead * of preserving it, to ensure that it's never mistaken for the * state which that earlier snapshot had set up. * * A minimally power-aware driver treats all messages as SUSPEND, fully * reinitializes its device during resume() -- whether or not it was reset * during the suspend/resume cycle -- and can't issue wakeup events. * * More power-aware drivers may also use low power states at runtime as * well as during system sleep states like PM_SUSPEND_STANDBY. They may * be able to use wakeup events to exit from runtime low-power states, * or from system low-power states such as standby or suspend-to-RAM. */ #ifdef CONFIG_PM_SLEEP extern void device_pm_lock(void); extern void dpm_resume_start(pm_message_t state); extern void dpm_resume_end(pm_message_t state); extern void dpm_resume_noirq(pm_message_t state); extern void dpm_resume_early(pm_message_t state); extern void dpm_resume(pm_message_t state); extern void dpm_complete(pm_message_t state); extern void device_pm_unlock(void); extern int dpm_suspend_end(pm_message_t state); extern int dpm_suspend_start(pm_message_t state); extern int dpm_suspend_noirq(pm_message_t state); extern int dpm_suspend_late(pm_message_t state); extern int dpm_suspend(pm_message_t state); extern int dpm_prepare(pm_message_t state); extern void __suspend_report_result(const char *function, void *fn, int ret); #define suspend_report_result(fn, ret) \ do { \ __suspend_report_result(__func__, fn, ret); \ } while (0) extern int device_pm_wait_for_dev(struct device *sub, struct device *dev); extern void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *)); extern int pm_generic_prepare(struct device *dev); extern int pm_generic_suspend_late(struct device *dev); extern int pm_generic_suspend_noirq(struct device *dev); extern int pm_generic_suspend(struct device *dev); extern int pm_generic_resume_early(struct device *dev); extern int pm_generic_resume_noirq(struct device *dev); extern int pm_generic_resume(struct device *dev); extern int pm_generic_freeze_noirq(struct device *dev); extern int pm_generic_freeze_late(struct device *dev); extern int pm_generic_freeze(struct device *dev); extern int pm_generic_thaw_noirq(struct device *dev); extern int pm_generic_thaw_early(struct device *dev); extern int pm_generic_thaw(struct device *dev); extern int pm_generic_restore_noirq(struct device *dev); extern int pm_generic_restore_early(struct device *dev); extern int pm_generic_restore(struct device *dev); extern int pm_generic_poweroff_noirq(struct device *dev); extern int pm_generic_poweroff_late(struct device *dev); extern int pm_generic_poweroff(struct device *dev); extern void pm_generic_complete(struct device *dev); extern bool dev_pm_may_skip_resume(struct device *dev); extern bool dev_pm_smart_suspend_and_suspended(struct device *dev); #else /* !CONFIG_PM_SLEEP */ #define device_pm_lock() do {} while (0) #define device_pm_unlock() do {} while (0) static inline int dpm_suspend_start(pm_message_t state) { return 0; } #define suspend_report_result(fn, ret) do {} while (0) static inline int device_pm_wait_for_dev(struct device *a, struct device *b) { return 0; } static inline void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *)) { } #define pm_generic_prepare NULL #define pm_generic_suspend_late NULL #define pm_generic_suspend_noirq NULL #define pm_generic_suspend NULL #define pm_generic_resume_early NULL #define pm_generic_resume_noirq NULL #define pm_generic_resume NULL #define pm_generic_freeze_noirq NULL #define pm_generic_freeze_late NULL #define pm_generic_freeze NULL #define pm_generic_thaw_noirq NULL #define pm_generic_thaw_early NULL #define pm_generic_thaw NULL #define pm_generic_restore_noirq NULL #define pm_generic_restore_early NULL #define pm_generic_restore NULL #define pm_generic_poweroff_noirq NULL #define pm_generic_poweroff_late NULL #define pm_generic_poweroff NULL #define pm_generic_complete NULL #endif /* !CONFIG_PM_SLEEP */ /* How to reorder dpm_list after device_move() */ enum dpm_order { DPM_ORDER_NONE, DPM_ORDER_DEV_AFTER_PARENT, DPM_ORDER_PARENT_BEFORE_DEV, DPM_ORDER_DEV_LAST, }; #endif /* _LINUX_PM_H */ mm_types.h 0000644 00000057154 14722070374 0006574 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MM_TYPES_H #define _LINUX_MM_TYPES_H #include <linux/mm_types_task.h> #include <linux/auxvec.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/rbtree.h> #include <linux/rwsem.h> #include <linux/completion.h> #include <linux/cpumask.h> #include <linux/uprobes.h> #include <linux/page-flags-layout.h> #include <linux/workqueue.h> #include <asm/mmu.h> #ifndef AT_VECTOR_SIZE_ARCH #define AT_VECTOR_SIZE_ARCH 0 #endif #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1)) struct address_space; struct mem_cgroup; /* * Each physical page in the system has a struct page associated with * it to keep track of whatever it is we are using the page for at the * moment. Note that we have no way to track which tasks are using * a page, though if it is a pagecache page, rmap structures can tell us * who is mapping it. * * If you allocate the page using alloc_pages(), you can use some of the * space in struct page for your own purposes. The five words in the main * union are available, except for bit 0 of the first word which must be * kept clear. Many users use this word to store a pointer to an object * which is guaranteed to be aligned. If you use the same storage as * page->mapping, you must restore it to NULL before freeing the page. * * If your page will not be mapped to userspace, you can also use the four * bytes in the mapcount union, but you must call page_mapcount_reset() * before freeing it. * * If you want to use the refcount field, it must be used in such a way * that other CPUs temporarily incrementing and then decrementing the * refcount does not cause problems. On receiving the page from * alloc_pages(), the refcount will be positive. * * If you allocate pages of order > 0, you can use some of the fields * in each subpage, but you may need to restore some of their values * afterwards. * * SLUB uses cmpxchg_double() to atomically update its freelist and * counters. That requires that freelist & counters be adjacent and * double-word aligned. We align all struct pages to double-word * boundaries, and ensure that 'freelist' is aligned within the * struct. */ #ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE #define _struct_page_alignment __aligned(2 * sizeof(unsigned long)) #else #define _struct_page_alignment #endif struct page { unsigned long flags; /* Atomic flags, some possibly * updated asynchronously */ /* * Five words (20/40 bytes) are available in this union. * WARNING: bit 0 of the first word is used for PageTail(). That * means the other users of this union MUST NOT use the bit to * avoid collision and false-positive PageTail(). */ union { struct { /* Page cache and anonymous pages */ /** * @lru: Pageout list, eg. active_list protected by * pgdat->lru_lock. Sometimes used as a generic list * by the page owner. */ struct list_head lru; /* See page-flags.h for PAGE_MAPPING_FLAGS */ struct address_space *mapping; pgoff_t index; /* Our offset within mapping. */ /** * @private: Mapping-private opaque data. * Usually used for buffer_heads if PagePrivate. * Used for swp_entry_t if PageSwapCache. * Indicates order in the buddy system if PageBuddy. */ unsigned long private; }; struct { /* page_pool used by netstack */ /** * @dma_addr: might require a 64-bit value on * 32-bit architectures. */ unsigned long dma_addr[2]; }; struct { /* slab, slob and slub */ union { struct list_head slab_list; struct { /* Partial pages */ struct page *next; #ifdef CONFIG_64BIT int pages; /* Nr of pages left */ int pobjects; /* Approximate count */ #else short int pages; short int pobjects; #endif }; }; struct kmem_cache *slab_cache; /* not slob */ /* Double-word boundary */ void *freelist; /* first free object */ union { void *s_mem; /* slab: first object */ unsigned long counters; /* SLUB */ struct { /* SLUB */ unsigned inuse:16; unsigned objects:15; unsigned frozen:1; }; }; }; struct { /* Tail pages of compound page */ unsigned long compound_head; /* Bit zero is set */ /* First tail page only */ unsigned char compound_dtor; unsigned char compound_order; atomic_t compound_mapcount; }; struct { /* Second tail page of compound page */ unsigned long _compound_pad_1; /* compound_head */ unsigned long _compound_pad_2; /* For both global and memcg */ struct list_head deferred_list; }; struct { /* Page table pages */ unsigned long _pt_pad_1; /* compound_head */ pgtable_t pmd_huge_pte; /* protected by page->ptl */ unsigned long _pt_pad_2; /* mapping */ union { struct mm_struct *pt_mm; /* x86 pgds only */ atomic_t pt_frag_refcount; /* powerpc */ }; #if ALLOC_SPLIT_PTLOCKS spinlock_t *ptl; #else spinlock_t ptl; #endif }; struct { /* ZONE_DEVICE pages */ /** @pgmap: Points to the hosting device page map. */ struct dev_pagemap *pgmap; void *zone_device_data; /* * ZONE_DEVICE private pages are counted as being * mapped so the next 3 words hold the mapping, index, * and private fields from the source anonymous or * page cache page while the page is migrated to device * private memory. * ZONE_DEVICE MEMORY_DEVICE_FS_DAX pages also * use the mapping, index, and private fields when * pmem backed DAX files are mapped. */ }; /** @rcu_head: You can use this to free a page by RCU. */ struct rcu_head rcu_head; }; union { /* This union is 4 bytes in size. */ /* * If the page can be mapped to userspace, encodes the number * of times this page is referenced by a page table. */ atomic_t _mapcount; /* * If the page is neither PageSlab nor mappable to userspace, * the value stored here may help determine what this page * is used for. See page-flags.h for a list of page types * which are currently stored here. */ unsigned int page_type; unsigned int active; /* SLAB */ int units; /* SLOB */ }; /* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */ atomic_t _refcount; #ifdef CONFIG_MEMCG struct mem_cgroup *mem_cgroup; #endif /* * On machines where all RAM is mapped into kernel address space, * we can simply calculate the virtual address. On machines with * highmem some memory is mapped into kernel virtual memory * dynamically, so we need a place to store that address. * Note that this field could be 16 bits on x86 ... ;) * * Architectures with slow multiplication can define * WANT_PAGE_VIRTUAL in asm/page.h */ #if defined(WANT_PAGE_VIRTUAL) void *virtual; /* Kernel virtual address (NULL if not kmapped, ie. highmem) */ #endif /* WANT_PAGE_VIRTUAL */ #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS int _last_cpupid; #endif } _struct_page_alignment; static inline atomic_t *compound_mapcount_ptr(struct page *page) { return &page[1].compound_mapcount; } /* * Used for sizing the vmemmap region on some architectures */ #define STRUCT_PAGE_MAX_SHIFT (order_base_2(sizeof(struct page))) #define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK) #define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE) #define page_private(page) ((page)->private) #define set_page_private(page, v) ((page)->private = (v)) struct page_frag_cache { void * va; #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) __u16 offset; __u16 size; #else __u32 offset; #endif /* we maintain a pagecount bias, so that we dont dirty cache line * containing page->_refcount every time we allocate a fragment. */ unsigned int pagecnt_bias; bool pfmemalloc; }; typedef unsigned long vm_flags_t; /* * A region containing a mapping of a non-memory backed file under NOMMU * conditions. These are held in a global tree and are pinned by the VMAs that * map parts of them. */ struct vm_region { struct rb_node vm_rb; /* link in global region tree */ vm_flags_t vm_flags; /* VMA vm_flags */ unsigned long vm_start; /* start address of region */ unsigned long vm_end; /* region initialised to here */ unsigned long vm_top; /* region allocated to here */ unsigned long vm_pgoff; /* the offset in vm_file corresponding to vm_start */ struct file *vm_file; /* the backing file or NULL */ struct file *vm_prfile; /* the virtual backing file or NULL */ int vm_usage; /* region usage count (access under nommu_region_sem) */ bool vm_icache_flushed : 1; /* true if the icache has been flushed for * this region */ }; #ifdef CONFIG_USERFAULTFD #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, }) struct vm_userfaultfd_ctx { struct userfaultfd_ctx *ctx; }; #else /* CONFIG_USERFAULTFD */ #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {}) struct vm_userfaultfd_ctx {}; #endif /* CONFIG_USERFAULTFD */ /* * This struct defines a memory VMM memory area. There is one of these * per VM-area/task. A VM area is any part of the process virtual memory * space that has a special rule for the page-fault handlers (ie a shared * library, the executable area etc). */ struct vm_area_struct { /* The first cache line has the info for VMA tree walking. */ unsigned long vm_start; /* Our start address within vm_mm. */ unsigned long vm_end; /* The first byte after our end address within vm_mm. */ /* linked list of VM areas per task, sorted by address */ struct vm_area_struct *vm_next, *vm_prev; struct rb_node vm_rb; /* * Largest free memory gap in bytes to the left of this VMA. * Either between this VMA and vma->vm_prev, or between one of the * VMAs below us in the VMA rbtree and its ->vm_prev. This helps * get_unmapped_area find a free area of the right size. */ unsigned long rb_subtree_gap; /* Second cache line starts here. */ struct mm_struct *vm_mm; /* The address space we belong to. */ pgprot_t vm_page_prot; /* Access permissions of this VMA. */ unsigned long vm_flags; /* Flags, see mm.h. */ /* * For areas with an address space and backing store, * linkage into the address_space->i_mmap interval tree. */ struct { struct rb_node rb; unsigned long rb_subtree_last; } shared; /* * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma * list, after a COW of one of the file pages. A MAP_SHARED vma * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack * or brk vma (with NULL file) can only be in an anon_vma list. */ struct list_head anon_vma_chain; /* Serialized by mmap_sem & * page_table_lock */ struct anon_vma *anon_vma; /* Serialized by page_table_lock */ /* Function pointers to deal with this struct. */ const struct vm_operations_struct *vm_ops; /* Information about our backing store: */ unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE units */ struct file * vm_file; /* File we map to (can be NULL). */ struct file *vm_prfile; /* shadow of vm_file */ void * vm_private_data; /* was vm_pte (shared mem) */ #ifdef CONFIG_SWAP atomic_long_t swap_readahead_info; #endif #ifndef CONFIG_MMU struct vm_region *vm_region; /* NOMMU mapping region */ #endif #ifdef CONFIG_NUMA struct mempolicy *vm_policy; /* NUMA policy for the VMA */ #endif struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } __randomize_layout; struct core_thread { struct task_struct *task; struct core_thread *next; }; struct core_state { atomic_t nr_threads; struct core_thread dumper; struct completion startup; }; struct kioctx_table; struct mm_struct { struct { struct vm_area_struct *mmap; /* list of VMAs */ struct rb_root mm_rb; u64 vmacache_seqnum; /* per-thread vmacache */ #ifdef CONFIG_MMU unsigned long (*get_unmapped_area) (struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); #endif unsigned long mmap_base; /* base of mmap area */ unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */ #ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES /* Base adresses for compatible mmap() */ unsigned long mmap_compat_base; unsigned long mmap_compat_legacy_base; #endif unsigned long task_size; /* size of task vm space */ unsigned long highest_vm_end; /* highest vma end address */ pgd_t * pgd; #ifdef CONFIG_MEMBARRIER /** * @membarrier_state: Flags controlling membarrier behavior. * * This field is close to @pgd to hopefully fit in the same * cache-line, which needs to be touched by switch_mm(). */ atomic_t membarrier_state; #endif /** * @mm_users: The number of users including userspace. * * Use mmget()/mmget_not_zero()/mmput() to modify. When this * drops to 0 (i.e. when the task exits and there are no other * temporary reference holders), we also release a reference on * @mm_count (which may then free the &struct mm_struct if * @mm_count also drops to 0). */ atomic_t mm_users; /** * @mm_count: The number of references to &struct mm_struct * (@mm_users count as 1). * * Use mmgrab()/mmdrop() to modify. When this drops to 0, the * &struct mm_struct is freed. */ atomic_t mm_count; #ifdef CONFIG_MMU atomic_long_t pgtables_bytes; /* PTE page table pages */ #endif int map_count; /* number of VMAs */ spinlock_t page_table_lock; /* Protects page tables and some * counters */ struct rw_semaphore mmap_sem; struct list_head mmlist; /* List of maybe swapped mm's. These * are globally strung together off * init_mm.mmlist, and are protected * by mmlist_lock */ unsigned long hiwater_rss; /* High-watermark of RSS usage */ unsigned long hiwater_vm; /* High-water virtual memory usage */ unsigned long total_vm; /* Total pages mapped */ unsigned long locked_vm; /* Pages that have PG_mlocked set */ atomic64_t pinned_vm; /* Refcount permanently increased */ unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */ unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */ unsigned long stack_vm; /* VM_STACK */ unsigned long def_flags; spinlock_t arg_lock; /* protect the below fields */ unsigned long start_code, end_code, start_data, end_data; unsigned long start_brk, brk, start_stack; unsigned long arg_start, arg_end, env_start, env_end; unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */ /* * Special counters, in some configurations protected by the * page_table_lock, in other configurations by being atomic. */ struct mm_rss_stat rss_stat; struct linux_binfmt *binfmt; /* Architecture-specific MM context */ mm_context_t context; unsigned long flags; /* Must use atomic bitops to access */ struct core_state *core_state; /* coredumping support */ #ifdef CONFIG_AIO spinlock_t ioctx_lock; struct kioctx_table __rcu *ioctx_table; #endif #ifdef CONFIG_MEMCG /* * "owner" points to a task that is regarded as the canonical * user/owner of this mm. All of the following must be true in * order for it to be changed: * * current == mm->owner * current->mm != mm * new_owner->mm == mm * new_owner->alloc_lock is held */ struct task_struct __rcu *owner; #endif struct user_namespace *user_ns; /* store ref to file /proc/<pid>/exe symlink points to */ struct file __rcu *exe_file; #ifdef CONFIG_MMU_NOTIFIER struct mmu_notifier_mm *mmu_notifier_mm; #endif #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS pgtable_t pmd_huge_pte; /* protected by page_table_lock */ #endif #ifdef CONFIG_NUMA_BALANCING /* * numa_next_scan is the next time that the PTEs will be marked * pte_numa. NUMA hinting faults will gather statistics and * migrate pages to new nodes if necessary. */ unsigned long numa_next_scan; /* Restart point for scanning and setting pte_numa */ unsigned long numa_scan_offset; /* numa_scan_seq prevents two threads setting pte_numa */ int numa_scan_seq; #endif /* * An operation with batched TLB flushing is going on. Anything * that can move process memory needs to flush the TLB when * moving a PROT_NONE or PROT_NUMA mapped page. */ atomic_t tlb_flush_pending; #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH /* See flush_tlb_batched_pending() */ bool tlb_flush_batched; #endif struct uprobes_state uprobes_state; #ifdef CONFIG_HUGETLB_PAGE atomic_long_t hugetlb_usage; #endif struct work_struct async_put_work; } __randomize_layout; /* * The mm_cpumask needs to be at the end of mm_struct, because it * is dynamically sized based on nr_cpu_ids. */ unsigned long cpu_bitmap[]; }; extern struct mm_struct init_mm; /* Pointer magic because the dynamic array size confuses some compilers. */ static inline void mm_init_cpumask(struct mm_struct *mm) { unsigned long cpu_bitmap = (unsigned long)mm; cpu_bitmap += offsetof(struct mm_struct, cpu_bitmap); cpumask_clear((struct cpumask *)cpu_bitmap); } /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */ static inline cpumask_t *mm_cpumask(struct mm_struct *mm) { return (struct cpumask *)&mm->cpu_bitmap; } struct mmu_gather; extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end); extern void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end); static inline void init_tlb_flush_pending(struct mm_struct *mm) { atomic_set(&mm->tlb_flush_pending, 0); } static inline void inc_tlb_flush_pending(struct mm_struct *mm) { atomic_inc(&mm->tlb_flush_pending); /* * The only time this value is relevant is when there are indeed pages * to flush. And we'll only flush pages after changing them, which * requires the PTL. * * So the ordering here is: * * atomic_inc(&mm->tlb_flush_pending); * spin_lock(&ptl); * ... * set_pte_at(); * spin_unlock(&ptl); * * spin_lock(&ptl) * mm_tlb_flush_pending(); * .... * spin_unlock(&ptl); * * flush_tlb_range(); * atomic_dec(&mm->tlb_flush_pending); * * Where the increment if constrained by the PTL unlock, it thus * ensures that the increment is visible if the PTE modification is * visible. After all, if there is no PTE modification, nobody cares * about TLB flushes either. * * This very much relies on users (mm_tlb_flush_pending() and * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc * locks (PPC) the unlock of one doesn't order against the lock of * another PTL. * * The decrement is ordered by the flush_tlb_range(), such that * mm_tlb_flush_pending() will not return false unless all flushes have * completed. */ } static inline void dec_tlb_flush_pending(struct mm_struct *mm) { /* * See inc_tlb_flush_pending(). * * This cannot be smp_mb__before_atomic() because smp_mb() simply does * not order against TLB invalidate completion, which is what we need. * * Therefore we must rely on tlb_flush_*() to guarantee order. */ atomic_dec(&mm->tlb_flush_pending); } static inline bool mm_tlb_flush_pending(struct mm_struct *mm) { /* * Must be called after having acquired the PTL; orders against that * PTLs release and therefore ensures that if we observe the modified * PTE we must also observe the increment from inc_tlb_flush_pending(). * * That is, it only guarantees to return true if there is a flush * pending for _this_ PTL. */ return atomic_read(&mm->tlb_flush_pending); } static inline bool mm_tlb_flush_nested(struct mm_struct *mm) { /* * Similar to mm_tlb_flush_pending(), we must have acquired the PTL * for which there is a TLB flush pending in order to guarantee * we've seen both that PTE modification and the increment. * * (no requirement on actually still holding the PTL, that is irrelevant) */ return atomic_read(&mm->tlb_flush_pending) > 1; } struct vm_fault; /** * typedef vm_fault_t - Return type for page fault handlers. * * Page fault handlers return a bitmask of %VM_FAULT values. */ typedef __bitwise unsigned int vm_fault_t; /** * enum vm_fault_reason - Page fault handlers return a bitmask of * these values to tell the core VM what happened when handling the * fault. Used to decide whether a process gets delivered SIGBUS or * just gets major/minor fault counters bumped up. * * @VM_FAULT_OOM: Out Of Memory * @VM_FAULT_SIGBUS: Bad access * @VM_FAULT_MAJOR: Page read from storage * @VM_FAULT_WRITE: Special case for get_user_pages * @VM_FAULT_HWPOISON: Hit poisoned small page * @VM_FAULT_HWPOISON_LARGE: Hit poisoned large page. Index encoded * in upper bits * @VM_FAULT_SIGSEGV: segmentation fault * @VM_FAULT_NOPAGE: ->fault installed the pte, not return page * @VM_FAULT_LOCKED: ->fault locked the returned page * @VM_FAULT_RETRY: ->fault blocked, must retry * @VM_FAULT_FALLBACK: huge page fault failed, fall back to small * @VM_FAULT_DONE_COW: ->fault has fully handled COW * @VM_FAULT_NEEDDSYNC: ->fault did not modify page tables and needs * fsync() to complete (for synchronous page faults * in DAX) * @VM_FAULT_HINDEX_MASK: mask HINDEX value * */ enum vm_fault_reason { VM_FAULT_OOM = (__force vm_fault_t)0x000001, VM_FAULT_SIGBUS = (__force vm_fault_t)0x000002, VM_FAULT_MAJOR = (__force vm_fault_t)0x000004, VM_FAULT_WRITE = (__force vm_fault_t)0x000008, VM_FAULT_HWPOISON = (__force vm_fault_t)0x000010, VM_FAULT_HWPOISON_LARGE = (__force vm_fault_t)0x000020, VM_FAULT_SIGSEGV = (__force vm_fault_t)0x000040, VM_FAULT_NOPAGE = (__force vm_fault_t)0x000100, VM_FAULT_LOCKED = (__force vm_fault_t)0x000200, VM_FAULT_RETRY = (__force vm_fault_t)0x000400, VM_FAULT_FALLBACK = (__force vm_fault_t)0x000800, VM_FAULT_DONE_COW = (__force vm_fault_t)0x001000, VM_FAULT_NEEDDSYNC = (__force vm_fault_t)0x002000, VM_FAULT_HINDEX_MASK = (__force vm_fault_t)0x0f0000, }; /* Encode hstate index for a hwpoisoned large page */ #define VM_FAULT_SET_HINDEX(x) ((__force vm_fault_t)((x) << 16)) #define VM_FAULT_GET_HINDEX(x) (((__force unsigned int)(x) >> 16) & 0xf) #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | \ VM_FAULT_SIGSEGV | VM_FAULT_HWPOISON | \ VM_FAULT_HWPOISON_LARGE | VM_FAULT_FALLBACK) #define VM_FAULT_RESULT_TRACE \ { VM_FAULT_OOM, "OOM" }, \ { VM_FAULT_SIGBUS, "SIGBUS" }, \ { VM_FAULT_MAJOR, "MAJOR" }, \ { VM_FAULT_WRITE, "WRITE" }, \ { VM_FAULT_HWPOISON, "HWPOISON" }, \ { VM_FAULT_HWPOISON_LARGE, "HWPOISON_LARGE" }, \ { VM_FAULT_SIGSEGV, "SIGSEGV" }, \ { VM_FAULT_NOPAGE, "NOPAGE" }, \ { VM_FAULT_LOCKED, "LOCKED" }, \ { VM_FAULT_RETRY, "RETRY" }, \ { VM_FAULT_FALLBACK, "FALLBACK" }, \ { VM_FAULT_DONE_COW, "DONE_COW" }, \ { VM_FAULT_NEEDDSYNC, "NEEDDSYNC" } struct vm_special_mapping { const char *name; /* The name, e.g. "[vdso]". */ /* * If .fault is not provided, this points to a * NULL-terminated array of pages that back the special mapping. * * This must not be NULL unless .fault is provided. */ struct page **pages; /* * If non-NULL, then this is called to resolve page faults * on the special mapping. If used, .pages is not checked. */ vm_fault_t (*fault)(const struct vm_special_mapping *sm, struct vm_area_struct *vma, struct vm_fault *vmf); int (*mremap)(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma); }; enum tlb_flush_reason { TLB_FLUSH_ON_TASK_SWITCH, TLB_REMOTE_SHOOTDOWN, TLB_LOCAL_SHOOTDOWN, TLB_LOCAL_MM_SHOOTDOWN, TLB_REMOTE_SEND_IPI, NR_TLB_FLUSH_REASONS, }; /* * A swap entry has to fit into a "unsigned long", as the entry is hidden * in the "index" field of the swapper address space. */ typedef struct { unsigned long val; } swp_entry_t; #endif /* _LINUX_MM_TYPES_H */ relay.h 0000644 00000021541 14722070374 0006042 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/relay.h * * Copyright (C) 2002, 2003 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp * Copyright (C) 1999, 2000, 2001, 2002 - Karim Yaghmour (karim@opersys.com) * * CONFIG_RELAY definitions and declarations */ #ifndef _LINUX_RELAY_H #define _LINUX_RELAY_H #include <linux/types.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/wait.h> #include <linux/list.h> #include <linux/irq_work.h> #include <linux/bug.h> #include <linux/fs.h> #include <linux/poll.h> #include <linux/kref.h> #include <linux/percpu.h> /* * Tracks changes to rchan/rchan_buf structs */ #define RELAYFS_CHANNEL_VERSION 7 /* * Per-cpu relay channel buffer */ struct rchan_buf { void *start; /* start of channel buffer */ void *data; /* start of current sub-buffer */ size_t offset; /* current offset into sub-buffer */ size_t subbufs_produced; /* count of sub-buffers produced */ size_t subbufs_consumed; /* count of sub-buffers consumed */ struct rchan *chan; /* associated channel */ wait_queue_head_t read_wait; /* reader wait queue */ struct irq_work wakeup_work; /* reader wakeup */ struct dentry *dentry; /* channel file dentry */ struct kref kref; /* channel buffer refcount */ struct page **page_array; /* array of current buffer pages */ unsigned int page_count; /* number of current buffer pages */ unsigned int finalized; /* buffer has been finalized */ size_t *padding; /* padding counts per sub-buffer */ size_t prev_padding; /* temporary variable */ size_t bytes_consumed; /* bytes consumed in cur read subbuf */ size_t early_bytes; /* bytes consumed before VFS inited */ unsigned int cpu; /* this buf's cpu */ } ____cacheline_aligned; /* * Relay channel data structure */ struct rchan { u32 version; /* the version of this struct */ size_t subbuf_size; /* sub-buffer size */ size_t n_subbufs; /* number of sub-buffers per buffer */ size_t alloc_size; /* total buffer size allocated */ struct rchan_callbacks *cb; /* client callbacks */ struct kref kref; /* channel refcount */ void *private_data; /* for user-defined data */ size_t last_toobig; /* tried to log event > subbuf size */ struct rchan_buf * __percpu *buf; /* per-cpu channel buffers */ int is_global; /* One global buffer ? */ struct list_head list; /* for channel list */ struct dentry *parent; /* parent dentry passed to open */ int has_base_filename; /* has a filename associated? */ char base_filename[NAME_MAX]; /* saved base filename */ }; /* * Relay channel client callbacks */ struct rchan_callbacks { /* * subbuf_start - called on buffer-switch to a new sub-buffer * @buf: the channel buffer containing the new sub-buffer * @subbuf: the start of the new sub-buffer * @prev_subbuf: the start of the previous sub-buffer * @prev_padding: unused space at the end of previous sub-buffer * * The client should return 1 to continue logging, 0 to stop * logging. * * NOTE: subbuf_start will also be invoked when the buffer is * created, so that the first sub-buffer can be initialized * if necessary. In this case, prev_subbuf will be NULL. * * NOTE: the client can reserve bytes at the beginning of the new * sub-buffer by calling subbuf_start_reserve() in this callback. */ int (*subbuf_start) (struct rchan_buf *buf, void *subbuf, void *prev_subbuf, size_t prev_padding); /* * buf_mapped - relay buffer mmap notification * @buf: the channel buffer * @filp: relay file pointer * * Called when a relay file is successfully mmapped */ void (*buf_mapped)(struct rchan_buf *buf, struct file *filp); /* * buf_unmapped - relay buffer unmap notification * @buf: the channel buffer * @filp: relay file pointer * * Called when a relay file is successfully unmapped */ void (*buf_unmapped)(struct rchan_buf *buf, struct file *filp); /* * create_buf_file - create file to represent a relay channel buffer * @filename: the name of the file to create * @parent: the parent of the file to create * @mode: the mode of the file to create * @buf: the channel buffer * @is_global: outparam - set non-zero if the buffer should be global * * Called during relay_open(), once for each per-cpu buffer, * to allow the client to create a file to be used to * represent the corresponding channel buffer. If the file is * created outside of relay, the parent must also exist in * that filesystem. * * The callback should return the dentry of the file created * to represent the relay buffer. * * Setting the is_global outparam to a non-zero value will * cause relay_open() to create a single global buffer rather * than the default set of per-cpu buffers. * * See Documentation/filesystems/relay.txt for more info. */ struct dentry *(*create_buf_file)(const char *filename, struct dentry *parent, umode_t mode, struct rchan_buf *buf, int *is_global); /* * remove_buf_file - remove file representing a relay channel buffer * @dentry: the dentry of the file to remove * * Called during relay_close(), once for each per-cpu buffer, * to allow the client to remove a file used to represent a * channel buffer. * * The callback should return 0 if successful, negative if not. */ int (*remove_buf_file)(struct dentry *dentry); }; /* * CONFIG_RELAY kernel API, kernel/relay.c */ struct rchan *relay_open(const char *base_filename, struct dentry *parent, size_t subbuf_size, size_t n_subbufs, struct rchan_callbacks *cb, void *private_data); extern int relay_late_setup_files(struct rchan *chan, const char *base_filename, struct dentry *parent); extern void relay_close(struct rchan *chan); extern void relay_flush(struct rchan *chan); extern void relay_subbufs_consumed(struct rchan *chan, unsigned int cpu, size_t consumed); extern void relay_reset(struct rchan *chan); extern int relay_buf_full(struct rchan_buf *buf); extern size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length); /** * relay_write - write data into the channel * @chan: relay channel * @data: data to be written * @length: number of bytes to write * * Writes data into the current cpu's channel buffer. * * Protects the buffer by disabling interrupts. Use this * if you might be logging from interrupt context. Try * __relay_write() if you know you won't be logging from * interrupt context. */ static inline void relay_write(struct rchan *chan, const void *data, size_t length) { unsigned long flags; struct rchan_buf *buf; local_irq_save(flags); buf = *this_cpu_ptr(chan->buf); if (unlikely(buf->offset + length > chan->subbuf_size)) length = relay_switch_subbuf(buf, length); memcpy(buf->data + buf->offset, data, length); buf->offset += length; local_irq_restore(flags); } /** * __relay_write - write data into the channel * @chan: relay channel * @data: data to be written * @length: number of bytes to write * * Writes data into the current cpu's channel buffer. * * Protects the buffer by disabling preemption. Use * relay_write() if you might be logging from interrupt * context. */ static inline void __relay_write(struct rchan *chan, const void *data, size_t length) { struct rchan_buf *buf; buf = *get_cpu_ptr(chan->buf); if (unlikely(buf->offset + length > buf->chan->subbuf_size)) length = relay_switch_subbuf(buf, length); memcpy(buf->data + buf->offset, data, length); buf->offset += length; put_cpu_ptr(chan->buf); } /** * relay_reserve - reserve slot in channel buffer * @chan: relay channel * @length: number of bytes to reserve * * Returns pointer to reserved slot, NULL if full. * * Reserves a slot in the current cpu's channel buffer. * Does not protect the buffer at all - caller must provide * appropriate synchronization. */ static inline void *relay_reserve(struct rchan *chan, size_t length) { void *reserved = NULL; struct rchan_buf *buf = *get_cpu_ptr(chan->buf); if (unlikely(buf->offset + length > buf->chan->subbuf_size)) { length = relay_switch_subbuf(buf, length); if (!length) goto end; } reserved = buf->data + buf->offset; buf->offset += length; end: put_cpu_ptr(chan->buf); return reserved; } /** * subbuf_start_reserve - reserve bytes at the start of a sub-buffer * @buf: relay channel buffer * @length: number of bytes to reserve * * Helper function used to reserve bytes at the beginning of * a sub-buffer in the subbuf_start() callback. */ static inline void subbuf_start_reserve(struct rchan_buf *buf, size_t length) { BUG_ON(length >= buf->chan->subbuf_size - 1); buf->offset = length; } /* * exported relay file operations, kernel/relay.c */ extern const struct file_operations relay_file_operations; #ifdef CONFIG_RELAY int relay_prepare_cpu(unsigned int cpu); #else #define relay_prepare_cpu NULL #endif #endif /* _LINUX_RELAY_H */ shm.h 0000644 00000001710 14722070374 0005511 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SHM_H_ #define _LINUX_SHM_H_ #include <linux/list.h> #include <asm/page.h> #include <uapi/linux/shm.h> #include <asm/shmparam.h> struct file; #ifdef CONFIG_SYSVIPC struct sysv_shm { struct list_head shm_clist; }; long do_shmat(int shmid, char __user *shmaddr, int shmflg, unsigned long *addr, unsigned long shmlba); bool is_file_shm_hugepages(struct file *file); void exit_shm(struct task_struct *task); #define shm_init_task(task) INIT_LIST_HEAD(&(task)->sysvshm.shm_clist) #else struct sysv_shm { /* empty */ }; static inline long do_shmat(int shmid, char __user *shmaddr, int shmflg, unsigned long *addr, unsigned long shmlba) { return -ENOSYS; } static inline bool is_file_shm_hugepages(struct file *file) { return false; } static inline void exit_shm(struct task_struct *task) { } static inline void shm_init_task(struct task_struct *task) { } #endif #endif /* _LINUX_SHM_H_ */ taskstats_kern.h 0000644 00000001675 14722070374 0007774 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* taskstats_kern.h - kernel header for per-task statistics interface * * Copyright (C) Shailabh Nagar, IBM Corp. 2006 * (C) Balbir Singh, IBM Corp. 2006 */ #ifndef _LINUX_TASKSTATS_KERN_H #define _LINUX_TASKSTATS_KERN_H #include <linux/taskstats.h> #include <linux/sched/signal.h> #include <linux/slab.h> #ifdef CONFIG_TASKSTATS extern struct kmem_cache *taskstats_cache; extern struct mutex taskstats_exit_mutex; static inline void taskstats_tgid_free(struct signal_struct *sig) { if (sig->stats) kmem_cache_free(taskstats_cache, sig->stats); } extern void taskstats_exit(struct task_struct *, int group_dead); extern void taskstats_init_early(void); #else static inline void taskstats_exit(struct task_struct *tsk, int group_dead) {} static inline void taskstats_tgid_free(struct signal_struct *sig) {} static inline void taskstats_init_early(void) {} #endif /* CONFIG_TASKSTATS */ #endif dns_resolver.h 0000644 00000002556 14722070374 0007440 0 ustar 00 /* * DNS Resolver upcall management for CIFS DFS and AFS * Handles host name to IP address resolution and DNS query for AFSDB RR. * * Copyright (c) International Business Machines Corp., 2008 * Author(s): Steve French (sfrench@us.ibm.com) * Wang Lei (wang840925@gmail.com) * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef _LINUX_DNS_RESOLVER_H #define _LINUX_DNS_RESOLVER_H #include <uapi/linux/dns_resolver.h> struct net; extern int dns_query(struct net *net, const char *type, const char *name, size_t namelen, const char *options, char **_result, time64_t *_expiry, bool invalidate); #endif /* _LINUX_DNS_RESOLVER_H */ nmi.h 0000644 00000015563 14722070374 0005520 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/nmi.h */ #ifndef LINUX_NMI_H #define LINUX_NMI_H #include <linux/sched.h> #include <asm/irq.h> #if defined(CONFIG_HAVE_NMI_WATCHDOG) #include <asm/nmi.h> #endif #ifdef CONFIG_LOCKUP_DETECTOR void lockup_detector_init(void); void lockup_detector_soft_poweroff(void); void lockup_detector_cleanup(void); bool is_hardlockup(void); extern int watchdog_user_enabled; extern int nmi_watchdog_user_enabled; extern int soft_watchdog_user_enabled; extern int watchdog_thresh; extern unsigned long watchdog_enabled; extern struct cpumask watchdog_cpumask; extern unsigned long *watchdog_cpumask_bits; #ifdef CONFIG_SMP extern int sysctl_softlockup_all_cpu_backtrace; extern int sysctl_hardlockup_all_cpu_backtrace; #else #define sysctl_softlockup_all_cpu_backtrace 0 #define sysctl_hardlockup_all_cpu_backtrace 0 #endif /* !CONFIG_SMP */ #else /* CONFIG_LOCKUP_DETECTOR */ static inline void lockup_detector_init(void) { } static inline void lockup_detector_soft_poweroff(void) { } static inline void lockup_detector_cleanup(void) { } #endif /* !CONFIG_LOCKUP_DETECTOR */ #ifdef CONFIG_SOFTLOCKUP_DETECTOR extern void touch_softlockup_watchdog_sched(void); extern void touch_softlockup_watchdog(void); extern void touch_softlockup_watchdog_sync(void); extern void touch_all_softlockup_watchdogs(void); extern unsigned int softlockup_panic; extern int lockup_detector_online_cpu(unsigned int cpu); extern int lockup_detector_offline_cpu(unsigned int cpu); #else /* CONFIG_SOFTLOCKUP_DETECTOR */ static inline void touch_softlockup_watchdog_sched(void) { } static inline void touch_softlockup_watchdog(void) { } static inline void touch_softlockup_watchdog_sync(void) { } static inline void touch_all_softlockup_watchdogs(void) { } #define lockup_detector_online_cpu NULL #define lockup_detector_offline_cpu NULL #endif /* CONFIG_SOFTLOCKUP_DETECTOR */ #ifdef CONFIG_DETECT_HUNG_TASK void reset_hung_task_detector(void); #else static inline void reset_hung_task_detector(void) { } #endif /* * The run state of the lockup detectors is controlled by the content of the * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit - * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector. * * 'watchdog_user_enabled', 'nmi_watchdog_user_enabled' and * 'soft_watchdog_user_enabled' are variables that are only used as an * 'interface' between the parameters in /proc/sys/kernel and the internal * state bits in 'watchdog_enabled'. The 'watchdog_thresh' variable is * handled differently because its value is not boolean, and the lockup * detectors are 'suspended' while 'watchdog_thresh' is equal zero. */ #define NMI_WATCHDOG_ENABLED_BIT 0 #define SOFT_WATCHDOG_ENABLED_BIT 1 #define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT) #define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT) #if defined(CONFIG_HARDLOCKUP_DETECTOR) extern void hardlockup_detector_disable(void); extern unsigned int hardlockup_panic; #else static inline void hardlockup_detector_disable(void) {} #endif #if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR) # define NMI_WATCHDOG_SYSCTL_PERM 0644 #else # define NMI_WATCHDOG_SYSCTL_PERM 0444 #endif #if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF) extern void arch_touch_nmi_watchdog(void); extern void hardlockup_detector_perf_stop(void); extern void hardlockup_detector_perf_restart(void); extern void hardlockup_detector_perf_disable(void); extern void hardlockup_detector_perf_enable(void); extern void hardlockup_detector_perf_cleanup(void); extern int hardlockup_detector_perf_init(void); #else static inline void hardlockup_detector_perf_stop(void) { } static inline void hardlockup_detector_perf_restart(void) { } static inline void hardlockup_detector_perf_disable(void) { } static inline void hardlockup_detector_perf_enable(void) { } static inline void hardlockup_detector_perf_cleanup(void) { } # if !defined(CONFIG_HAVE_NMI_WATCHDOG) static inline int hardlockup_detector_perf_init(void) { return -ENODEV; } static inline void arch_touch_nmi_watchdog(void) {} # else static inline int hardlockup_detector_perf_init(void) { return 0; } # endif #endif void watchdog_nmi_stop(void); void watchdog_nmi_start(void); int watchdog_nmi_probe(void); int watchdog_nmi_enable(unsigned int cpu); void watchdog_nmi_disable(unsigned int cpu); void lockup_detector_reconfigure(void); /** * touch_nmi_watchdog - restart NMI watchdog timeout. * * If the architecture supports the NMI watchdog, touch_nmi_watchdog() * may be used to reset the timeout - for code which intentionally * disables interrupts for a long time. This call is stateless. */ static inline void touch_nmi_watchdog(void) { arch_touch_nmi_watchdog(); touch_softlockup_watchdog(); } /* * Create trigger_all_cpu_backtrace() out of the arch-provided * base function. Return whether such support was available, * to allow calling code to fall back to some other mechanism: */ #ifdef arch_trigger_cpumask_backtrace static inline bool trigger_all_cpu_backtrace(void) { arch_trigger_cpumask_backtrace(cpu_online_mask, false); return true; } static inline bool trigger_allbutself_cpu_backtrace(void) { arch_trigger_cpumask_backtrace(cpu_online_mask, true); return true; } static inline bool trigger_cpumask_backtrace(struct cpumask *mask) { arch_trigger_cpumask_backtrace(mask, false); return true; } static inline bool trigger_single_cpu_backtrace(int cpu) { arch_trigger_cpumask_backtrace(cpumask_of(cpu), false); return true; } /* generic implementation */ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self, void (*raise)(cpumask_t *mask)); bool nmi_cpu_backtrace(struct pt_regs *regs); #else static inline bool trigger_all_cpu_backtrace(void) { return false; } static inline bool trigger_allbutself_cpu_backtrace(void) { return false; } static inline bool trigger_cpumask_backtrace(struct cpumask *mask) { return false; } static inline bool trigger_single_cpu_backtrace(int cpu) { return false; } #endif #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF u64 hw_nmi_get_sample_period(int watchdog_thresh); #endif #if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \ defined(CONFIG_HARDLOCKUP_DETECTOR_PERF) void watchdog_update_hrtimer_threshold(u64 period); #else static inline void watchdog_update_hrtimer_threshold(u64 period) { } #endif struct ctl_table; extern int proc_watchdog(struct ctl_table *, int , void __user *, size_t *, loff_t *); extern int proc_nmi_watchdog(struct ctl_table *, int , void __user *, size_t *, loff_t *); extern int proc_soft_watchdog(struct ctl_table *, int , void __user *, size_t *, loff_t *); extern int proc_watchdog_thresh(struct ctl_table *, int , void __user *, size_t *, loff_t *); extern int proc_watchdog_cpumask(struct ctl_table *, int, void __user *, size_t *, loff_t *); #ifdef CONFIG_HAVE_ACPI_APEI_NMI #include <asm/nmi.h> #endif #endif cm4000_cs.h 0000644 00000000307 14722070374 0006313 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _CM4000_H_ #define _CM4000_H_ #include <uapi/linux/cm4000_cs.h> #define DEVICE_NAME "cmm" #define MODULE_NAME "cm4000_cs" #endif /* _CM4000_H_ */ memory.h 0000644 00000011371 14722070374 0006236 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * include/linux/memory.h - generic memory definition * * This is mainly for topological representation. We define the * basic "struct memory_block" here, which can be embedded in per-arch * definitions or NUMA information. * * Basic handling of the devices is done in drivers/base/memory.c * and system devices are handled in drivers/base/sys.c. * * Memory block are exported via sysfs in the class/memory/devices/ * directory. * */ #ifndef _LINUX_MEMORY_H_ #define _LINUX_MEMORY_H_ #include <linux/node.h> #include <linux/compiler.h> #include <linux/mutex.h> #include <linux/notifier.h> #define MIN_MEMORY_BLOCK_SIZE (1UL << SECTION_SIZE_BITS) struct memory_block { unsigned long start_section_nr; unsigned long state; /* serialized by the dev->lock */ int section_count; /* serialized by mem_sysfs_mutex */ int online_type; /* for passing data to online routine */ int phys_device; /* to which fru does this belong? */ void *hw; /* optional pointer to fw/hw data */ int (*phys_callback)(struct memory_block *); struct device dev; int nid; /* NID for this memory block */ }; int arch_get_memory_phys_device(unsigned long start_pfn); unsigned long memory_block_size_bytes(void); int set_memory_block_size_order(unsigned int order); /* These states are exposed to userspace as text strings in sysfs */ #define MEM_ONLINE (1<<0) /* exposed to userspace */ #define MEM_GOING_OFFLINE (1<<1) /* exposed to userspace */ #define MEM_OFFLINE (1<<2) /* exposed to userspace */ #define MEM_GOING_ONLINE (1<<3) #define MEM_CANCEL_ONLINE (1<<4) #define MEM_CANCEL_OFFLINE (1<<5) struct memory_notify { unsigned long start_pfn; unsigned long nr_pages; int status_change_nid_normal; int status_change_nid_high; int status_change_nid; }; /* * During pageblock isolation, count the number of pages within the * range [start_pfn, start_pfn + nr_pages) which are owned by code * in the notifier chain. */ #define MEM_ISOLATE_COUNT (1<<0) struct memory_isolate_notify { unsigned long start_pfn; /* Start of range to check */ unsigned int nr_pages; /* # pages in range to check */ unsigned int pages_found; /* # pages owned found by callbacks */ }; struct notifier_block; struct mem_section; /* * Priorities for the hotplug memory callback routines (stored in decreasing * order in the callback chain) */ #define SLAB_CALLBACK_PRI 1 #define IPC_CALLBACK_PRI 10 #ifndef CONFIG_MEMORY_HOTPLUG_SPARSE static inline void memory_dev_init(void) { return; } static inline int register_memory_notifier(struct notifier_block *nb) { return 0; } static inline void unregister_memory_notifier(struct notifier_block *nb) { } static inline int memory_notify(unsigned long val, void *v) { return 0; } static inline int register_memory_isolate_notifier(struct notifier_block *nb) { return 0; } static inline void unregister_memory_isolate_notifier(struct notifier_block *nb) { } static inline int memory_isolate_notify(unsigned long val, void *v) { return 0; } #else extern int register_memory_notifier(struct notifier_block *nb); extern void unregister_memory_notifier(struct notifier_block *nb); extern int register_memory_isolate_notifier(struct notifier_block *nb); extern void unregister_memory_isolate_notifier(struct notifier_block *nb); int create_memory_block_devices(unsigned long start, unsigned long size); void remove_memory_block_devices(unsigned long start, unsigned long size); extern void memory_dev_init(void); extern int memory_notify(unsigned long val, void *v); extern int memory_isolate_notify(unsigned long val, void *v); extern struct memory_block *find_memory_block(struct mem_section *); typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *); extern int walk_memory_blocks(unsigned long start, unsigned long size, void *arg, walk_memory_blocks_func_t func); extern int for_each_memory_block(void *arg, walk_memory_blocks_func_t func); #define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT) #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ #ifdef CONFIG_MEMORY_HOTPLUG #define hotplug_memory_notifier(fn, pri) ({ \ static __meminitdata struct notifier_block fn##_mem_nb =\ { .notifier_call = fn, .priority = pri };\ register_memory_notifier(&fn##_mem_nb); \ }) #define register_hotmemory_notifier(nb) register_memory_notifier(nb) #define unregister_hotmemory_notifier(nb) unregister_memory_notifier(nb) #else #define hotplug_memory_notifier(fn, pri) ({ 0; }) /* These aren't inline functions due to a GCC bug. */ #define register_hotmemory_notifier(nb) ({ (void)(nb); 0; }) #define unregister_hotmemory_notifier(nb) ({ (void)(nb); }) #endif /* * Kernel text modification mutex, used for code patching. Users of this lock * can sleep. */ extern struct mutex text_mutex; #endif /* _LINUX_MEMORY_H_ */ irqflags.h 0000644 00000012137 14722070374 0006537 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * include/linux/irqflags.h * * IRQ flags tracing: follow the state of the hardirq and softirq flags and * provide callbacks for transitions between ON and OFF states. * * This file gets included from lowlevel asm headers too, to provide * wrapped versions of the local_irq_*() APIs, based on the * raw_local_irq_*() macros from the lowlevel headers. */ #ifndef _LINUX_TRACE_IRQFLAGS_H #define _LINUX_TRACE_IRQFLAGS_H #include <linux/typecheck.h> #include <asm/irqflags.h> /* Currently trace_softirqs_on/off is used only by lockdep */ #ifdef CONFIG_PROVE_LOCKING extern void trace_softirqs_on(unsigned long ip); extern void trace_softirqs_off(unsigned long ip); extern void lockdep_hardirqs_on(unsigned long ip); extern void lockdep_hardirqs_off(unsigned long ip); #else static inline void trace_softirqs_on(unsigned long ip) { } static inline void trace_softirqs_off(unsigned long ip) { } static inline void lockdep_hardirqs_on(unsigned long ip) { } static inline void lockdep_hardirqs_off(unsigned long ip) { } #endif #ifdef CONFIG_TRACE_IRQFLAGS extern void trace_hardirqs_on(void); extern void trace_hardirqs_off(void); # define trace_hardirq_context(p) ((p)->hardirq_context) # define trace_softirq_context(p) ((p)->softirq_context) # define trace_hardirqs_enabled(p) ((p)->hardirqs_enabled) # define trace_softirqs_enabled(p) ((p)->softirqs_enabled) # define trace_hardirq_enter() \ do { \ current->hardirq_context++; \ } while (0) # define trace_hardirq_exit() \ do { \ current->hardirq_context--; \ } while (0) # define lockdep_softirq_enter() \ do { \ current->softirq_context++; \ } while (0) # define lockdep_softirq_exit() \ do { \ current->softirq_context--; \ } while (0) #else # define trace_hardirqs_on() do { } while (0) # define trace_hardirqs_off() do { } while (0) # define trace_hardirq_context(p) 0 # define trace_softirq_context(p) 0 # define trace_hardirqs_enabled(p) 0 # define trace_softirqs_enabled(p) 0 # define trace_hardirq_enter() do { } while (0) # define trace_hardirq_exit() do { } while (0) # define lockdep_softirq_enter() do { } while (0) # define lockdep_softirq_exit() do { } while (0) #endif #if defined(CONFIG_IRQSOFF_TRACER) || \ defined(CONFIG_PREEMPT_TRACER) extern void stop_critical_timings(void); extern void start_critical_timings(void); #else # define stop_critical_timings() do { } while (0) # define start_critical_timings() do { } while (0) #endif /* * Wrap the arch provided IRQ routines to provide appropriate checks. */ #define raw_local_irq_disable() arch_local_irq_disable() #define raw_local_irq_enable() arch_local_irq_enable() #define raw_local_irq_save(flags) \ do { \ typecheck(unsigned long, flags); \ flags = arch_local_irq_save(); \ } while (0) #define raw_local_irq_restore(flags) \ do { \ typecheck(unsigned long, flags); \ arch_local_irq_restore(flags); \ } while (0) #define raw_local_save_flags(flags) \ do { \ typecheck(unsigned long, flags); \ flags = arch_local_save_flags(); \ } while (0) #define raw_irqs_disabled_flags(flags) \ ({ \ typecheck(unsigned long, flags); \ arch_irqs_disabled_flags(flags); \ }) #define raw_irqs_disabled() (arch_irqs_disabled()) #define raw_safe_halt() arch_safe_halt() /* * The local_irq_*() APIs are equal to the raw_local_irq*() * if !TRACE_IRQFLAGS. */ #ifdef CONFIG_TRACE_IRQFLAGS #define local_irq_enable() \ do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0) #define local_irq_disable() \ do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0) #define local_irq_save(flags) \ do { \ raw_local_irq_save(flags); \ trace_hardirqs_off(); \ } while (0) #define local_irq_restore(flags) \ do { \ if (raw_irqs_disabled_flags(flags)) { \ raw_local_irq_restore(flags); \ trace_hardirqs_off(); \ } else { \ trace_hardirqs_on(); \ raw_local_irq_restore(flags); \ } \ } while (0) #define safe_halt() \ do { \ trace_hardirqs_on(); \ raw_safe_halt(); \ } while (0) #else /* !CONFIG_TRACE_IRQFLAGS */ #define local_irq_enable() do { raw_local_irq_enable(); } while (0) #define local_irq_disable() do { raw_local_irq_disable(); } while (0) #define local_irq_save(flags) \ do { \ raw_local_irq_save(flags); \ } while (0) #define local_irq_restore(flags) do { raw_local_irq_restore(flags); } while (0) #define safe_halt() do { raw_safe_halt(); } while (0) #endif /* CONFIG_TRACE_IRQFLAGS */ #define local_save_flags(flags) raw_local_save_flags(flags) /* * Some architectures don't define arch_irqs_disabled(), so even if either * definition would be fine we need to use different ones for the time being * to avoid build issues. */ #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT #define irqs_disabled() \ ({ \ unsigned long _flags; \ raw_local_save_flags(_flags); \ raw_irqs_disabled_flags(_flags); \ }) #else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */ #define irqs_disabled() raw_irqs_disabled() #endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */ #define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags) #endif agpgart.h 0000644 00000007504 14722070374 0006356 0 ustar 00 /* * AGPGART module version 0.99 * Copyright (C) 1999 Jeff Hartmann * Copyright (C) 1999 Precision Insight, Inc. * Copyright (C) 1999 Xi Graphics, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #ifndef _AGP_H #define _AGP_H 1 #include <linux/mutex.h> #include <linux/agp_backend.h> #include <uapi/linux/agpgart.h> #define AGPGART_MINOR 175 struct agp_info { struct agp_version version; /* version of the driver */ u32 bridge_id; /* bridge vendor/device */ u32 agp_mode; /* mode info of bridge */ unsigned long aper_base;/* base of aperture */ size_t aper_size; /* size of aperture */ size_t pg_total; /* max pages (swap + system) */ size_t pg_system; /* max pages (system) */ size_t pg_used; /* current pages used */ }; struct agp_setup { u32 agp_mode; /* mode info of bridge */ }; /* * The "prot" down below needs still a "sleep" flag somehow ... */ struct agp_segment { off_t pg_start; /* starting page to populate */ size_t pg_count; /* number of pages */ int prot; /* prot flags for mmap */ }; struct agp_segment_priv { off_t pg_start; size_t pg_count; pgprot_t prot; }; struct agp_region { pid_t pid; /* pid of process */ size_t seg_count; /* number of segments */ struct agp_segment *seg_list; }; struct agp_allocate { int key; /* tag of allocation */ size_t pg_count; /* number of pages */ u32 type; /* 0 == normal, other devspec */ u32 physical; /* device specific (some devices * need a phys address of the * actual page behind the gatt * table) */ }; struct agp_bind { int key; /* tag of allocation */ off_t pg_start; /* starting page to populate */ }; struct agp_unbind { int key; /* tag of allocation */ u32 priority; /* priority for paging out */ }; struct agp_client { struct agp_client *next; struct agp_client *prev; pid_t pid; int num_segments; struct agp_segment_priv **segments; }; struct agp_controller { struct agp_controller *next; struct agp_controller *prev; pid_t pid; int num_clients; struct agp_memory *pool; struct agp_client *clients; }; #define AGP_FF_ALLOW_CLIENT 0 #define AGP_FF_ALLOW_CONTROLLER 1 #define AGP_FF_IS_CLIENT 2 #define AGP_FF_IS_CONTROLLER 3 #define AGP_FF_IS_VALID 4 struct agp_file_private { struct agp_file_private *next; struct agp_file_private *prev; pid_t my_pid; unsigned long access_flags; /* long req'd for set_bit --RR */ }; struct agp_front_data { struct mutex agp_mutex; struct agp_controller *current_controller; struct agp_controller *controllers; struct agp_file_private *file_priv_list; bool used_by_controller; bool backend_acquired; }; #endif /* _AGP_H */ seg6.h 0000644 00000000171 14722070374 0005566 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SEG6_H #define _LINUX_SEG6_H #include <uapi/linux/seg6.h> #endif cdev.h 0000644 00000001515 14722070374 0005646 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_CDEV_H #define _LINUX_CDEV_H #include <linux/kobject.h> #include <linux/kdev_t.h> #include <linux/list.h> #include <linux/device.h> struct file_operations; struct inode; struct module; struct cdev { struct kobject kobj; struct module *owner; const struct file_operations *ops; struct list_head list; dev_t dev; unsigned int count; } __randomize_layout; void cdev_init(struct cdev *, const struct file_operations *); struct cdev *cdev_alloc(void); void cdev_put(struct cdev *p); int cdev_add(struct cdev *, dev_t, unsigned); void cdev_set_parent(struct cdev *p, struct kobject *kobj); int cdev_device_add(struct cdev *cdev, struct device *dev); void cdev_device_del(struct cdev *cdev, struct device *dev); void cdev_del(struct cdev *); void cd_forget(struct inode *); #endif lru_cache.h 0000644 00000027503 14722070374 0006657 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* lru_cache.c This file is part of DRBD by Philipp Reisner and Lars Ellenberg. Copyright (C) 2003-2008, LINBIT Information Technologies GmbH. Copyright (C) 2003-2008, Philipp Reisner <philipp.reisner@linbit.com>. Copyright (C) 2003-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. */ #ifndef LRU_CACHE_H #define LRU_CACHE_H #include <linux/list.h> #include <linux/slab.h> #include <linux/bitops.h> #include <linux/string.h> /* for memset */ #include <linux/seq_file.h> /* This header file (and its .c file; kernel-doc of functions see there) define a helper framework to easily keep track of index:label associations, and changes to an "active set" of objects, as well as pending transactions, to persistently record those changes. We use an LRU policy if it is necessary to "cool down" a region currently in the active set before we can "heat" a previously unused region. Because of this later property, it is called "lru_cache". As it actually Tracks Objects in an Active SeT, we could also call it toast (incidentally that is what may happen to the data on the backend storage uppon next resync, if we don't get it right). What for? We replicate IO (more or less synchronously) to local and remote disk. For crash recovery after replication node failure, we need to resync all regions that have been target of in-flight WRITE IO (in use, or "hot", regions), as we don't know whether or not those WRITEs have made it to stable storage. To avoid a "full resync", we need to persistently track these regions. This is known as "write intent log", and can be implemented as on-disk (coarse or fine grained) bitmap, or other meta data. To avoid the overhead of frequent extra writes to this meta data area, usually the condition is softened to regions that _may_ have been target of in-flight WRITE IO, e.g. by only lazily clearing the on-disk write-intent bitmap, trading frequency of meta data transactions against amount of (possibly unnecessary) resync traffic. If we set a hard limit on the area that may be "hot" at any given time, we limit the amount of resync traffic needed for crash recovery. For recovery after replication link failure, we need to resync all blocks that have been changed on the other replica in the mean time, or, if both replica have been changed independently [*], all blocks that have been changed on either replica in the mean time. [*] usually as a result of a cluster split-brain and insufficient protection. but there are valid use cases to do this on purpose. Tracking those blocks can be implemented as "dirty bitmap". Having it fine-grained reduces the amount of resync traffic. It should also be persistent, to allow for reboots (or crashes) while the replication link is down. There are various possible implementations for persistently storing write intent log information, three of which are mentioned here. "Chunk dirtying" The on-disk "dirty bitmap" may be re-used as "write-intent" bitmap as well. To reduce the frequency of bitmap updates for write-intent log purposes, one could dirty "chunks" (of some size) at a time of the (fine grained) on-disk bitmap, while keeping the in-memory "dirty" bitmap as clean as possible, flushing it to disk again when a previously "hot" (and on-disk dirtied as full chunk) area "cools down" again (no IO in flight anymore, and none expected in the near future either). "Explicit (coarse) write intent bitmap" An other implementation could chose a (probably coarse) explicit bitmap, for write-intent log purposes, additionally to the fine grained dirty bitmap. "Activity log" Yet an other implementation may keep track of the hot regions, by starting with an empty set, and writing down a journal of region numbers that have become "hot", or have "cooled down" again. To be able to use a ring buffer for this journal of changes to the active set, we not only record the actual changes to that set, but also record the not changing members of the set in a round robin fashion. To do so, we use a fixed (but configurable) number of slots which we can identify by index, and associate region numbers (labels) with these indices. For each transaction recording a change to the active set, we record the change itself (index: -old_label, +new_label), and which index is associated with which label (index: current_label) within a certain sliding window that is moved further over the available indices with each such transaction. Thus, for crash recovery, if the ringbuffer is sufficiently large, we can accurately reconstruct the active set. Sufficiently large depends only on maximum number of active objects, and the size of the sliding window recording "index: current_label" associations within each transaction. This is what we call the "activity log". Currently we need one activity log transaction per single label change, which does not give much benefit over the "dirty chunks of bitmap" approach, other than potentially less seeks. We plan to change the transaction format to support multiple changes per transaction, which then would reduce several (disjoint, "random") updates to the bitmap into one transaction to the activity log ring buffer. */ /* this defines an element in a tracked set * .colision is for hash table lookup. * When we process a new IO request, we know its sector, thus can deduce the * region number (label) easily. To do the label -> object lookup without a * full list walk, we use a simple hash table. * * .list is on one of three lists: * in_use: currently in use (refcnt > 0, lc_number != LC_FREE) * lru: unused but ready to be reused or recycled * (lc_refcnt == 0, lc_number != LC_FREE), * free: unused but ready to be recycled * (lc_refcnt == 0, lc_number == LC_FREE), * * an element is said to be "in the active set", * if either on "in_use" or "lru", i.e. lc_number != LC_FREE. * * DRBD currently (May 2009) only uses 61 elements on the resync lru_cache * (total memory usage 2 pages), and up to 3833 elements on the act_log * lru_cache, totalling ~215 kB for 64bit architecture, ~53 pages. * * We usually do not actually free these objects again, but only "recycle" * them, as the change "index: -old_label, +LC_FREE" would need a transaction * as well. Which also means that using a kmem_cache to allocate the objects * from wastes some resources. * But it avoids high order page allocations in kmalloc. */ struct lc_element { struct hlist_node colision; struct list_head list; /* LRU list or free list */ unsigned refcnt; /* back "pointer" into lc_cache->element[index], * for paranoia, and for "lc_element_to_index" */ unsigned lc_index; /* if we want to track a larger set of objects, * it needs to become arch independend u64 */ unsigned lc_number; /* special label when on free list */ #define LC_FREE (~0U) /* for pending changes */ unsigned lc_new_number; }; struct lru_cache { /* the least recently used item is kept at lru->prev */ struct list_head lru; struct list_head free; struct list_head in_use; struct list_head to_be_changed; /* the pre-created kmem cache to allocate the objects from */ struct kmem_cache *lc_cache; /* size of tracked objects, used to memset(,0,) them in lc_reset */ size_t element_size; /* offset of struct lc_element member in the tracked object */ size_t element_off; /* number of elements (indices) */ unsigned int nr_elements; /* Arbitrary limit on maximum tracked objects. Practical limit is much * lower due to allocation failures, probably. For typical use cases, * nr_elements should be a few thousand at most. * This also limits the maximum value of lc_element.lc_index, allowing the * 8 high bits of .lc_index to be overloaded with flags in the future. */ #define LC_MAX_ACTIVE (1<<24) /* allow to accumulate a few (index:label) changes, * but no more than max_pending_changes */ unsigned int max_pending_changes; /* number of elements currently on to_be_changed list */ unsigned int pending_changes; /* statistics */ unsigned used; /* number of elements currently on in_use list */ unsigned long hits, misses, starving, locked, changed; /* see below: flag-bits for lru_cache */ unsigned long flags; void *lc_private; const char *name; /* nr_elements there */ struct hlist_head *lc_slot; struct lc_element **lc_element; }; /* flag-bits for lru_cache */ enum { /* debugging aid, to catch concurrent access early. * user needs to guarantee exclusive access by proper locking! */ __LC_PARANOIA, /* annotate that the set is "dirty", possibly accumulating further * changes, until a transaction is finally triggered */ __LC_DIRTY, /* Locked, no further changes allowed. * Also used to serialize changing transactions. */ __LC_LOCKED, /* if we need to change the set, but currently there is no free nor * unused element available, we are "starving", and must not give out * further references, to guarantee that eventually some refcnt will * drop to zero and we will be able to make progress again, changing * the set, writing the transaction. * if the statistics say we are frequently starving, * nr_elements is too small. */ __LC_STARVING, }; #define LC_PARANOIA (1<<__LC_PARANOIA) #define LC_DIRTY (1<<__LC_DIRTY) #define LC_LOCKED (1<<__LC_LOCKED) #define LC_STARVING (1<<__LC_STARVING) extern struct lru_cache *lc_create(const char *name, struct kmem_cache *cache, unsigned max_pending_changes, unsigned e_count, size_t e_size, size_t e_off); extern void lc_reset(struct lru_cache *lc); extern void lc_destroy(struct lru_cache *lc); extern void lc_set(struct lru_cache *lc, unsigned int enr, int index); extern void lc_del(struct lru_cache *lc, struct lc_element *element); extern struct lc_element *lc_get_cumulative(struct lru_cache *lc, unsigned int enr); extern struct lc_element *lc_try_get(struct lru_cache *lc, unsigned int enr); extern struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr); extern struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr); extern unsigned int lc_put(struct lru_cache *lc, struct lc_element *e); extern void lc_committed(struct lru_cache *lc); struct seq_file; extern void lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc); extern void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char *utext, void (*detail) (struct seq_file *, struct lc_element *)); /** * lc_try_lock_for_transaction - can be used to stop lc_get() from changing the tracked set * @lc: the lru cache to operate on * * Allows (expects) the set to be "dirty". Note that the reference counts and * order on the active and lru lists may still change. Used to serialize * changing transactions. Returns true if we aquired the lock. */ static inline int lc_try_lock_for_transaction(struct lru_cache *lc) { return !test_and_set_bit(__LC_LOCKED, &lc->flags); } /** * lc_try_lock - variant to stop lc_get() from changing the tracked set * @lc: the lru cache to operate on * * Note that the reference counts and order on the active and lru lists may * still change. Only works on a "clean" set. Returns true if we aquired the * lock, which means there are no pending changes, and any further attempt to * change the set will not succeed until the next lc_unlock(). */ extern int lc_try_lock(struct lru_cache *lc); /** * lc_unlock - unlock @lc, allow lc_get() to change the set again * @lc: the lru cache to operate on */ static inline void lc_unlock(struct lru_cache *lc) { clear_bit(__LC_DIRTY, &lc->flags); clear_bit_unlock(__LC_LOCKED, &lc->flags); } extern bool lc_is_used(struct lru_cache *lc, unsigned int enr); #define lc_entry(ptr, type, member) \ container_of(ptr, type, member) extern struct lc_element *lc_element_by_index(struct lru_cache *lc, unsigned i); extern unsigned int lc_index_of(struct lru_cache *lc, struct lc_element *e); #endif cacheinfo.h 0000644 00000006503 14722070374 0006646 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_CACHEINFO_H #define _LINUX_CACHEINFO_H #include <linux/bitops.h> #include <linux/cpumask.h> #include <linux/smp.h> struct device_node; struct attribute; enum cache_type { CACHE_TYPE_NOCACHE = 0, CACHE_TYPE_INST = BIT(0), CACHE_TYPE_DATA = BIT(1), CACHE_TYPE_SEPARATE = CACHE_TYPE_INST | CACHE_TYPE_DATA, CACHE_TYPE_UNIFIED = BIT(2), }; extern unsigned int coherency_max_size; /** * struct cacheinfo - represent a cache leaf node * @id: This cache's id. It is unique among caches with the same (type, level). * @type: type of the cache - data, inst or unified * @level: represents the hierarchy in the multi-level cache * @coherency_line_size: size of each cache line usually representing * the minimum amount of data that gets transferred from memory * @number_of_sets: total number of sets, a set is a collection of cache * lines sharing the same index * @ways_of_associativity: number of ways in which a particular memory * block can be placed in the cache * @physical_line_partition: number of physical cache lines sharing the * same cachetag * @size: Total size of the cache * @shared_cpu_map: logical cpumask representing all the cpus sharing * this cache node * @attributes: bitfield representing various cache attributes * @fw_token: Unique value used to determine if different cacheinfo * structures represent a single hardware cache instance. * @disable_sysfs: indicates whether this node is visible to the user via * sysfs or not * @priv: pointer to any private data structure specific to particular * cache design * * While @of_node, @disable_sysfs and @priv are used for internal book * keeping, the remaining members form the core properties of the cache */ struct cacheinfo { unsigned int id; enum cache_type type; unsigned int level; unsigned int coherency_line_size; unsigned int number_of_sets; unsigned int ways_of_associativity; unsigned int physical_line_partition; unsigned int size; cpumask_t shared_cpu_map; unsigned int attributes; #define CACHE_WRITE_THROUGH BIT(0) #define CACHE_WRITE_BACK BIT(1) #define CACHE_WRITE_POLICY_MASK \ (CACHE_WRITE_THROUGH | CACHE_WRITE_BACK) #define CACHE_READ_ALLOCATE BIT(2) #define CACHE_WRITE_ALLOCATE BIT(3) #define CACHE_ALLOCATE_POLICY_MASK \ (CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE) #define CACHE_ID BIT(4) void *fw_token; bool disable_sysfs; void *priv; }; struct cpu_cacheinfo { struct cacheinfo *info_list; unsigned int num_levels; unsigned int num_leaves; bool cpu_map_populated; }; struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu); int init_cache_level(unsigned int cpu); int populate_cache_leaves(unsigned int cpu); int cache_setup_acpi(unsigned int cpu); #ifndef CONFIG_ACPI_PPTT /* * acpi_find_last_cache_level is only called on ACPI enabled * platforms using the PPTT for topology. This means that if * the platform supports other firmware configuration methods * we need to stub out the call when ACPI is disabled. * ACPI enabled platforms not using PPTT won't be making calls * to this function so we need not worry about them. */ static inline int acpi_find_last_cache_level(unsigned int cpu) { return 0; } #else int acpi_find_last_cache_level(unsigned int cpu); #endif const struct attribute_group *cache_get_priv_group(struct cacheinfo *this_leaf); #endif /* _LINUX_CACHEINFO_H */ tty_ldisc.h 0000644 00000017733 14722070374 0006734 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_TTY_LDISC_H #define _LINUX_TTY_LDISC_H /* * This structure defines the interface between the tty line discipline * implementation and the tty routines. The following routines can be * defined; unless noted otherwise, they are optional, and can be * filled in with a null pointer. * * int (*open)(struct tty_struct *); * * This function is called when the line discipline is associated * with the tty. The line discipline can use this as an * opportunity to initialize any state needed by the ldisc routines. * * void (*close)(struct tty_struct *); * * This function is called when the line discipline is being * shutdown, either because the tty is being closed or because * the tty is being changed to use a new line discipline * * void (*flush_buffer)(struct tty_struct *tty); * * This function instructs the line discipline to clear its * buffers of any input characters it may have queued to be * delivered to the user mode process. * * ssize_t (*read)(struct tty_struct * tty, struct file * file, * unsigned char * buf, size_t nr); * * This function is called when the user requests to read from * the tty. The line discipline will return whatever characters * it has buffered up for the user. If this function is not * defined, the user will receive an EIO error. * * ssize_t (*write)(struct tty_struct * tty, struct file * file, * const unsigned char * buf, size_t nr); * * This function is called when the user requests to write to the * tty. The line discipline will deliver the characters to the * low-level tty device for transmission, optionally performing * some processing on the characters first. If this function is * not defined, the user will receive an EIO error. * * int (*ioctl)(struct tty_struct * tty, struct file * file, * unsigned int cmd, unsigned long arg); * * This function is called when the user requests an ioctl which * is not handled by the tty layer or the low-level tty driver. * It is intended for ioctls which affect line discpline * operation. Note that the search order for ioctls is (1) tty * layer, (2) tty low-level driver, (3) line discpline. So a * low-level driver can "grab" an ioctl request before the line * discpline has a chance to see it. * * int (*compat_ioctl)(struct tty_struct * tty, struct file * file, * unsigned int cmd, unsigned long arg); * * Process ioctl calls from 32-bit process on 64-bit system * * NOTE: only ioctls that are neither "pointer to compatible * structure" nor tty-generic. Something private that takes * an integer or a pointer to wordsize-sensitive structure * belongs here, but most of ldiscs will happily leave * it NULL. * * void (*set_termios)(struct tty_struct *tty, struct ktermios * old); * * This function notifies the line discpline that a change has * been made to the termios structure. * * int (*poll)(struct tty_struct * tty, struct file * file, * poll_table *wait); * * This function is called when a user attempts to select/poll on a * tty device. It is solely the responsibility of the line * discipline to handle poll requests. * * void (*receive_buf)(struct tty_struct *, const unsigned char *cp, * char *fp, int count); * * This function is called by the low-level tty driver to send * characters received by the hardware to the line discpline for * processing. <cp> is a pointer to the buffer of input * character received by the device. <fp> is a pointer to a * pointer of flag bytes which indicate whether a character was * received with a parity error, etc. <fp> may be NULL to indicate * all data received is TTY_NORMAL. * * void (*write_wakeup)(struct tty_struct *); * * This function is called by the low-level tty driver to signal * that line discpline should try to send more characters to the * low-level driver for transmission. If the line discpline does * not have any more data to send, it can just return. If the line * discipline does have some data to send, please arise a tasklet * or workqueue to do the real data transfer. Do not send data in * this hook, it may leads to a deadlock. * * int (*hangup)(struct tty_struct *) * * Called on a hangup. Tells the discipline that it should * cease I/O to the tty driver. Can sleep. The driver should * seek to perform this action quickly but should wait until * any pending driver I/O is completed. * * void (*dcd_change)(struct tty_struct *tty, unsigned int status) * * Tells the discipline that the DCD pin has changed its status. * Used exclusively by the N_PPS (Pulse-Per-Second) line discipline. * * int (*receive_buf2)(struct tty_struct *, const unsigned char *cp, * char *fp, int count); * * This function is called by the low-level tty driver to send * characters received by the hardware to the line discpline for * processing. <cp> is a pointer to the buffer of input * character received by the device. <fp> is a pointer to a * pointer of flag bytes which indicate whether a character was * received with a parity error, etc. <fp> may be NULL to indicate * all data received is TTY_NORMAL. * If assigned, prefer this function for automatic flow control. */ #include <linux/fs.h> #include <linux/wait.h> #include <linux/atomic.h> /* * the semaphore definition */ struct ld_semaphore { atomic_long_t count; raw_spinlock_t wait_lock; unsigned int wait_readers; struct list_head read_wait; struct list_head write_wait; #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif }; extern void __init_ldsem(struct ld_semaphore *sem, const char *name, struct lock_class_key *key); #define init_ldsem(sem) \ do { \ static struct lock_class_key __key; \ \ __init_ldsem((sem), #sem, &__key); \ } while (0) extern int ldsem_down_read(struct ld_semaphore *sem, long timeout); extern int ldsem_down_read_trylock(struct ld_semaphore *sem); extern int ldsem_down_write(struct ld_semaphore *sem, long timeout); extern int ldsem_down_write_trylock(struct ld_semaphore *sem); extern void ldsem_up_read(struct ld_semaphore *sem); extern void ldsem_up_write(struct ld_semaphore *sem); #ifdef CONFIG_DEBUG_LOCK_ALLOC extern int ldsem_down_read_nested(struct ld_semaphore *sem, int subclass, long timeout); extern int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass, long timeout); #else # define ldsem_down_read_nested(sem, subclass, timeout) \ ldsem_down_read(sem, timeout) # define ldsem_down_write_nested(sem, subclass, timeout) \ ldsem_down_write(sem, timeout) #endif struct tty_ldisc_ops { int magic; char *name; int num; int flags; /* * The following routines are called from above. */ int (*open)(struct tty_struct *); void (*close)(struct tty_struct *); void (*flush_buffer)(struct tty_struct *tty); ssize_t (*read)(struct tty_struct *tty, struct file *file, unsigned char __user *buf, size_t nr); ssize_t (*write)(struct tty_struct *tty, struct file *file, const unsigned char *buf, size_t nr); int (*ioctl)(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg); int (*compat_ioctl)(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg); void (*set_termios)(struct tty_struct *tty, struct ktermios *old); __poll_t (*poll)(struct tty_struct *, struct file *, struct poll_table_struct *); int (*hangup)(struct tty_struct *tty); /* * The following routines are called from below. */ void (*receive_buf)(struct tty_struct *, const unsigned char *cp, char *fp, int count); void (*write_wakeup)(struct tty_struct *); void (*dcd_change)(struct tty_struct *, unsigned int); int (*receive_buf2)(struct tty_struct *, const unsigned char *cp, char *fp, int count); struct module *owner; int refcount; }; struct tty_ldisc { struct tty_ldisc_ops *ops; struct tty_struct *tty; }; #define TTY_LDISC_MAGIC 0x5403 #define LDISC_FLAG_DEFINED 0x00000001 #define MODULE_ALIAS_LDISC(ldisc) \ MODULE_ALIAS("tty-ldisc-" __stringify(ldisc)) #endif /* _LINUX_TTY_LDISC_H */ qcom-geni-se.h 0000644 00000030331 14722070374 0007207 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. */ #ifndef _LINUX_QCOM_GENI_SE #define _LINUX_QCOM_GENI_SE /* Transfer mode supported by GENI Serial Engines */ enum geni_se_xfer_mode { GENI_SE_INVALID, GENI_SE_FIFO, GENI_SE_DMA, }; /* Protocols supported by GENI Serial Engines */ enum geni_se_protocol_type { GENI_SE_NONE, GENI_SE_SPI, GENI_SE_UART, GENI_SE_I2C, GENI_SE_I3C, }; struct geni_wrapper; struct clk; /** * struct geni_se - GENI Serial Engine * @base: Base Address of the Serial Engine's register block * @dev: Pointer to the Serial Engine device * @wrapper: Pointer to the parent QUP Wrapper core * @clk: Handle to the core serial engine clock * @num_clk_levels: Number of valid clock levels in clk_perf_tbl * @clk_perf_tbl: Table of clock frequency input to serial engine clock */ struct geni_se { void __iomem *base; struct device *dev; struct geni_wrapper *wrapper; struct clk *clk; unsigned int num_clk_levels; unsigned long *clk_perf_tbl; }; /* Common SE registers */ #define GENI_FORCE_DEFAULT_REG 0x20 #define SE_GENI_STATUS 0x40 #define GENI_SER_M_CLK_CFG 0x48 #define GENI_SER_S_CLK_CFG 0x4c #define GENI_FW_REVISION_RO 0x68 #define SE_GENI_CLK_SEL 0x7c #define SE_GENI_DMA_MODE_EN 0x258 #define SE_GENI_M_CMD0 0x600 #define SE_GENI_M_CMD_CTRL_REG 0x604 #define SE_GENI_M_IRQ_STATUS 0x610 #define SE_GENI_M_IRQ_EN 0x614 #define SE_GENI_M_IRQ_CLEAR 0x618 #define SE_GENI_S_CMD0 0x630 #define SE_GENI_S_CMD_CTRL_REG 0x634 #define SE_GENI_S_IRQ_STATUS 0x640 #define SE_GENI_S_IRQ_EN 0x644 #define SE_GENI_S_IRQ_CLEAR 0x648 #define SE_GENI_TX_FIFOn 0x700 #define SE_GENI_RX_FIFOn 0x780 #define SE_GENI_TX_FIFO_STATUS 0x800 #define SE_GENI_RX_FIFO_STATUS 0x804 #define SE_GENI_TX_WATERMARK_REG 0x80c #define SE_GENI_RX_WATERMARK_REG 0x810 #define SE_GENI_RX_RFR_WATERMARK_REG 0x814 #define SE_GENI_IOS 0x908 #define SE_DMA_TX_IRQ_STAT 0xc40 #define SE_DMA_TX_IRQ_CLR 0xc44 #define SE_DMA_TX_FSM_RST 0xc58 #define SE_DMA_RX_IRQ_STAT 0xd40 #define SE_DMA_RX_IRQ_CLR 0xd44 #define SE_DMA_RX_FSM_RST 0xd58 #define SE_HW_PARAM_0 0xe24 #define SE_HW_PARAM_1 0xe28 /* GENI_FORCE_DEFAULT_REG fields */ #define FORCE_DEFAULT BIT(0) /* GENI_STATUS fields */ #define M_GENI_CMD_ACTIVE BIT(0) #define S_GENI_CMD_ACTIVE BIT(12) /* GENI_SER_M_CLK_CFG/GENI_SER_S_CLK_CFG */ #define SER_CLK_EN BIT(0) #define CLK_DIV_MSK GENMASK(15, 4) #define CLK_DIV_SHFT 4 /* GENI_FW_REVISION_RO fields */ #define FW_REV_PROTOCOL_MSK GENMASK(15, 8) #define FW_REV_PROTOCOL_SHFT 8 /* GENI_CLK_SEL fields */ #define CLK_SEL_MSK GENMASK(2, 0) /* SE_GENI_DMA_MODE_EN */ #define GENI_DMA_MODE_EN BIT(0) /* GENI_M_CMD0 fields */ #define M_OPCODE_MSK GENMASK(31, 27) #define M_OPCODE_SHFT 27 #define M_PARAMS_MSK GENMASK(26, 0) /* GENI_M_CMD_CTRL_REG */ #define M_GENI_CMD_CANCEL BIT(2) #define M_GENI_CMD_ABORT BIT(1) #define M_GENI_DISABLE BIT(0) /* GENI_S_CMD0 fields */ #define S_OPCODE_MSK GENMASK(31, 27) #define S_OPCODE_SHFT 27 #define S_PARAMS_MSK GENMASK(26, 0) /* GENI_S_CMD_CTRL_REG */ #define S_GENI_CMD_CANCEL BIT(2) #define S_GENI_CMD_ABORT BIT(1) #define S_GENI_DISABLE BIT(0) /* GENI_M_IRQ_EN fields */ #define M_CMD_DONE_EN BIT(0) #define M_CMD_OVERRUN_EN BIT(1) #define M_ILLEGAL_CMD_EN BIT(2) #define M_CMD_FAILURE_EN BIT(3) #define M_CMD_CANCEL_EN BIT(4) #define M_CMD_ABORT_EN BIT(5) #define M_TIMESTAMP_EN BIT(6) #define M_RX_IRQ_EN BIT(7) #define M_GP_SYNC_IRQ_0_EN BIT(8) #define M_GP_IRQ_0_EN BIT(9) #define M_GP_IRQ_1_EN BIT(10) #define M_GP_IRQ_2_EN BIT(11) #define M_GP_IRQ_3_EN BIT(12) #define M_GP_IRQ_4_EN BIT(13) #define M_GP_IRQ_5_EN BIT(14) #define M_IO_DATA_DEASSERT_EN BIT(22) #define M_IO_DATA_ASSERT_EN BIT(23) #define M_RX_FIFO_RD_ERR_EN BIT(24) #define M_RX_FIFO_WR_ERR_EN BIT(25) #define M_RX_FIFO_WATERMARK_EN BIT(26) #define M_RX_FIFO_LAST_EN BIT(27) #define M_TX_FIFO_RD_ERR_EN BIT(28) #define M_TX_FIFO_WR_ERR_EN BIT(29) #define M_TX_FIFO_WATERMARK_EN BIT(30) #define M_SEC_IRQ_EN BIT(31) #define M_COMMON_GENI_M_IRQ_EN (GENMASK(6, 1) | \ M_IO_DATA_DEASSERT_EN | \ M_IO_DATA_ASSERT_EN | M_RX_FIFO_RD_ERR_EN | \ M_RX_FIFO_WR_ERR_EN | M_TX_FIFO_RD_ERR_EN | \ M_TX_FIFO_WR_ERR_EN) /* GENI_S_IRQ_EN fields */ #define S_CMD_DONE_EN BIT(0) #define S_CMD_OVERRUN_EN BIT(1) #define S_ILLEGAL_CMD_EN BIT(2) #define S_CMD_FAILURE_EN BIT(3) #define S_CMD_CANCEL_EN BIT(4) #define S_CMD_ABORT_EN BIT(5) #define S_GP_SYNC_IRQ_0_EN BIT(8) #define S_GP_IRQ_0_EN BIT(9) #define S_GP_IRQ_1_EN BIT(10) #define S_GP_IRQ_2_EN BIT(11) #define S_GP_IRQ_3_EN BIT(12) #define S_GP_IRQ_4_EN BIT(13) #define S_GP_IRQ_5_EN BIT(14) #define S_IO_DATA_DEASSERT_EN BIT(22) #define S_IO_DATA_ASSERT_EN BIT(23) #define S_RX_FIFO_RD_ERR_EN BIT(24) #define S_RX_FIFO_WR_ERR_EN BIT(25) #define S_RX_FIFO_WATERMARK_EN BIT(26) #define S_RX_FIFO_LAST_EN BIT(27) #define S_COMMON_GENI_S_IRQ_EN (GENMASK(5, 1) | GENMASK(13, 9) | \ S_RX_FIFO_RD_ERR_EN | S_RX_FIFO_WR_ERR_EN) /* GENI_/TX/RX/RX_RFR/_WATERMARK_REG fields */ #define WATERMARK_MSK GENMASK(5, 0) /* GENI_TX_FIFO_STATUS fields */ #define TX_FIFO_WC GENMASK(27, 0) /* GENI_RX_FIFO_STATUS fields */ #define RX_LAST BIT(31) #define RX_LAST_BYTE_VALID_MSK GENMASK(30, 28) #define RX_LAST_BYTE_VALID_SHFT 28 #define RX_FIFO_WC_MSK GENMASK(24, 0) /* SE_GENI_IOS fields */ #define IO2_DATA_IN BIT(1) #define RX_DATA_IN BIT(0) /* SE_DMA_TX_IRQ_STAT Register fields */ #define TX_DMA_DONE BIT(0) #define TX_EOT BIT(1) #define TX_SBE BIT(2) #define TX_RESET_DONE BIT(3) /* SE_DMA_RX_IRQ_STAT Register fields */ #define RX_DMA_DONE BIT(0) #define RX_EOT BIT(1) #define RX_SBE BIT(2) #define RX_RESET_DONE BIT(3) #define RX_FLUSH_DONE BIT(4) #define RX_GENI_GP_IRQ GENMASK(10, 5) #define RX_GENI_CANCEL_IRQ BIT(11) #define RX_GENI_GP_IRQ_EXT GENMASK(13, 12) /* SE_HW_PARAM_0 fields */ #define TX_FIFO_WIDTH_MSK GENMASK(29, 24) #define TX_FIFO_WIDTH_SHFT 24 #define TX_FIFO_DEPTH_MSK GENMASK(21, 16) #define TX_FIFO_DEPTH_SHFT 16 /* SE_HW_PARAM_1 fields */ #define RX_FIFO_WIDTH_MSK GENMASK(29, 24) #define RX_FIFO_WIDTH_SHFT 24 #define RX_FIFO_DEPTH_MSK GENMASK(21, 16) #define RX_FIFO_DEPTH_SHFT 16 #define HW_VER_MAJOR_MASK GENMASK(31, 28) #define HW_VER_MAJOR_SHFT 28 #define HW_VER_MINOR_MASK GENMASK(27, 16) #define HW_VER_MINOR_SHFT 16 #define HW_VER_STEP_MASK GENMASK(15, 0) #define GENI_SE_VERSION_MAJOR(ver) ((ver & HW_VER_MAJOR_MASK) >> HW_VER_MAJOR_SHFT) #define GENI_SE_VERSION_MINOR(ver) ((ver & HW_VER_MINOR_MASK) >> HW_VER_MINOR_SHFT) #define GENI_SE_VERSION_STEP(ver) (ver & HW_VER_STEP_MASK) /* QUP SE VERSION value for major number 2 and minor number 5 */ #define QUP_SE_VERSION_2_5 0x20050000 #if IS_ENABLED(CONFIG_QCOM_GENI_SE) u32 geni_se_get_qup_hw_version(struct geni_se *se); /** * geni_se_read_proto() - Read the protocol configured for a serial engine * @se: Pointer to the concerned serial engine. * * Return: Protocol value as configured in the serial engine. */ static inline u32 geni_se_read_proto(struct geni_se *se) { u32 val; val = readl_relaxed(se->base + GENI_FW_REVISION_RO); return (val & FW_REV_PROTOCOL_MSK) >> FW_REV_PROTOCOL_SHFT; } /** * geni_se_setup_m_cmd() - Setup the primary sequencer * @se: Pointer to the concerned serial engine. * @cmd: Command/Operation to setup in the primary sequencer. * @params: Parameter for the sequencer command. * * This function is used to configure the primary sequencer with the * command and its associated parameters. */ static inline void geni_se_setup_m_cmd(struct geni_se *se, u32 cmd, u32 params) { u32 m_cmd; m_cmd = (cmd << M_OPCODE_SHFT) | (params & M_PARAMS_MSK); writel_relaxed(m_cmd, se->base + SE_GENI_M_CMD0); } /** * geni_se_setup_s_cmd() - Setup the secondary sequencer * @se: Pointer to the concerned serial engine. * @cmd: Command/Operation to setup in the secondary sequencer. * @params: Parameter for the sequencer command. * * This function is used to configure the secondary sequencer with the * command and its associated parameters. */ static inline void geni_se_setup_s_cmd(struct geni_se *se, u32 cmd, u32 params) { u32 s_cmd; s_cmd = readl_relaxed(se->base + SE_GENI_S_CMD0); s_cmd &= ~(S_OPCODE_MSK | S_PARAMS_MSK); s_cmd |= (cmd << S_OPCODE_SHFT); s_cmd |= (params & S_PARAMS_MSK); writel_relaxed(s_cmd, se->base + SE_GENI_S_CMD0); } /** * geni_se_cancel_m_cmd() - Cancel the command configured in the primary * sequencer * @se: Pointer to the concerned serial engine. * * This function is used to cancel the currently configured command in the * primary sequencer. */ static inline void geni_se_cancel_m_cmd(struct geni_se *se) { writel_relaxed(M_GENI_CMD_CANCEL, se->base + SE_GENI_M_CMD_CTRL_REG); } /** * geni_se_cancel_s_cmd() - Cancel the command configured in the secondary * sequencer * @se: Pointer to the concerned serial engine. * * This function is used to cancel the currently configured command in the * secondary sequencer. */ static inline void geni_se_cancel_s_cmd(struct geni_se *se) { writel_relaxed(S_GENI_CMD_CANCEL, se->base + SE_GENI_S_CMD_CTRL_REG); } /** * geni_se_abort_m_cmd() - Abort the command configured in the primary sequencer * @se: Pointer to the concerned serial engine. * * This function is used to force abort the currently configured command in the * primary sequencer. */ static inline void geni_se_abort_m_cmd(struct geni_se *se) { writel_relaxed(M_GENI_CMD_ABORT, se->base + SE_GENI_M_CMD_CTRL_REG); } /** * geni_se_abort_s_cmd() - Abort the command configured in the secondary * sequencer * @se: Pointer to the concerned serial engine. * * This function is used to force abort the currently configured command in the * secondary sequencer. */ static inline void geni_se_abort_s_cmd(struct geni_se *se) { writel_relaxed(S_GENI_CMD_ABORT, se->base + SE_GENI_S_CMD_CTRL_REG); } /** * geni_se_get_tx_fifo_depth() - Get the TX fifo depth of the serial engine * @se: Pointer to the concerned serial engine. * * This function is used to get the depth i.e. number of elements in the * TX fifo of the serial engine. * * Return: TX fifo depth in units of FIFO words. */ static inline u32 geni_se_get_tx_fifo_depth(struct geni_se *se) { u32 val; val = readl_relaxed(se->base + SE_HW_PARAM_0); return (val & TX_FIFO_DEPTH_MSK) >> TX_FIFO_DEPTH_SHFT; } /** * geni_se_get_tx_fifo_width() - Get the TX fifo width of the serial engine * @se: Pointer to the concerned serial engine. * * This function is used to get the width i.e. word size per element in the * TX fifo of the serial engine. * * Return: TX fifo width in bits */ static inline u32 geni_se_get_tx_fifo_width(struct geni_se *se) { u32 val; val = readl_relaxed(se->base + SE_HW_PARAM_0); return (val & TX_FIFO_WIDTH_MSK) >> TX_FIFO_WIDTH_SHFT; } /** * geni_se_get_rx_fifo_depth() - Get the RX fifo depth of the serial engine * @se: Pointer to the concerned serial engine. * * This function is used to get the depth i.e. number of elements in the * RX fifo of the serial engine. * * Return: RX fifo depth in units of FIFO words */ static inline u32 geni_se_get_rx_fifo_depth(struct geni_se *se) { u32 val; val = readl_relaxed(se->base + SE_HW_PARAM_1); return (val & RX_FIFO_DEPTH_MSK) >> RX_FIFO_DEPTH_SHFT; } void geni_se_init(struct geni_se *se, u32 rx_wm, u32 rx_rfr); void geni_se_select_mode(struct geni_se *se, enum geni_se_xfer_mode mode); void geni_se_config_packing(struct geni_se *se, int bpw, int pack_words, bool msb_to_lsb, bool tx_cfg, bool rx_cfg); int geni_se_resources_off(struct geni_se *se); int geni_se_resources_on(struct geni_se *se); int geni_se_clk_tbl_get(struct geni_se *se, unsigned long **tbl); int geni_se_clk_freq_match(struct geni_se *se, unsigned long req_freq, unsigned int *index, unsigned long *res_freq, bool exact); int geni_se_tx_dma_prep(struct geni_se *se, void *buf, size_t len, dma_addr_t *iova); int geni_se_rx_dma_prep(struct geni_se *se, void *buf, size_t len, dma_addr_t *iova); void geni_se_tx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len); void geni_se_rx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len); #endif #endif kgdb.h 0000644 00000025611 14722070374 0005637 0 ustar 00 /* * This provides the callbacks and functions that KGDB needs to share between * the core, I/O and arch-specific portions. * * Author: Amit Kale <amitkale@linsyssoft.com> and * Tom Rini <trini@kernel.crashing.org> * * 2001-2004 (c) Amit S. Kale and 2003-2005 (c) MontaVista Software, Inc. * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #ifndef _KGDB_H_ #define _KGDB_H_ #include <linux/linkage.h> #include <linux/init.h> #include <linux/atomic.h> #ifdef CONFIG_HAVE_ARCH_KGDB #include <asm/kgdb.h> #endif #ifdef CONFIG_KGDB struct pt_regs; /** * kgdb_skipexception - (optional) exit kgdb_handle_exception early * @exception: Exception vector number * @regs: Current &struct pt_regs. * * On some architectures it is required to skip a breakpoint * exception when it occurs after a breakpoint has been removed. * This can be implemented in the architecture specific portion of kgdb. */ extern int kgdb_skipexception(int exception, struct pt_regs *regs); struct tasklet_struct; struct task_struct; struct uart_port; /** * kgdb_breakpoint - compiled in breakpoint * * This will be implemented as a static inline per architecture. This * function is called by the kgdb core to execute an architecture * specific trap to cause kgdb to enter the exception processing. * */ void kgdb_breakpoint(void); extern int kgdb_connected; extern int kgdb_io_module_registered; extern atomic_t kgdb_setting_breakpoint; extern atomic_t kgdb_cpu_doing_single_step; extern struct task_struct *kgdb_usethread; extern struct task_struct *kgdb_contthread; enum kgdb_bptype { BP_BREAKPOINT = 0, BP_HARDWARE_BREAKPOINT, BP_WRITE_WATCHPOINT, BP_READ_WATCHPOINT, BP_ACCESS_WATCHPOINT, BP_POKE_BREAKPOINT, }; enum kgdb_bpstate { BP_UNDEFINED = 0, BP_REMOVED, BP_SET, BP_ACTIVE }; struct kgdb_bkpt { unsigned long bpt_addr; unsigned char saved_instr[BREAK_INSTR_SIZE]; enum kgdb_bptype type; enum kgdb_bpstate state; }; struct dbg_reg_def_t { char *name; int size; int offset; }; #ifndef DBG_MAX_REG_NUM #define DBG_MAX_REG_NUM 0 #else extern struct dbg_reg_def_t dbg_reg_def[]; extern char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs); extern int dbg_set_reg(int regno, void *mem, struct pt_regs *regs); #endif #ifndef KGDB_MAX_BREAKPOINTS # define KGDB_MAX_BREAKPOINTS 1000 #endif #define KGDB_HW_BREAKPOINT 1 /* * Functions each KGDB-supporting architecture must provide: */ /** * kgdb_arch_init - Perform any architecture specific initalization. * * This function will handle the initalization of any architecture * specific callbacks. */ extern int kgdb_arch_init(void); /** * kgdb_arch_exit - Perform any architecture specific uninitalization. * * This function will handle the uninitalization of any architecture * specific callbacks, for dynamic registration and unregistration. */ extern void kgdb_arch_exit(void); /** * pt_regs_to_gdb_regs - Convert ptrace regs to GDB regs * @gdb_regs: A pointer to hold the registers in the order GDB wants. * @regs: The &struct pt_regs of the current process. * * Convert the pt_regs in @regs into the format for registers that * GDB expects, stored in @gdb_regs. */ extern void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs); /** * sleeping_thread_to_gdb_regs - Convert ptrace regs to GDB regs * @gdb_regs: A pointer to hold the registers in the order GDB wants. * @p: The &struct task_struct of the desired process. * * Convert the register values of the sleeping process in @p to * the format that GDB expects. * This function is called when kgdb does not have access to the * &struct pt_regs and therefore it should fill the gdb registers * @gdb_regs with what has been saved in &struct thread_struct * thread field during switch_to. */ extern void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p); /** * gdb_regs_to_pt_regs - Convert GDB regs to ptrace regs. * @gdb_regs: A pointer to hold the registers we've received from GDB. * @regs: A pointer to a &struct pt_regs to hold these values in. * * Convert the GDB regs in @gdb_regs into the pt_regs, and store them * in @regs. */ extern void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs); /** * kgdb_arch_handle_exception - Handle architecture specific GDB packets. * @vector: The error vector of the exception that happened. * @signo: The signal number of the exception that happened. * @err_code: The error code of the exception that happened. * @remcom_in_buffer: The buffer of the packet we have read. * @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into. * @regs: The &struct pt_regs of the current process. * * This function MUST handle the 'c' and 's' command packets, * as well packets to set / remove a hardware breakpoint, if used. * If there are additional packets which the hardware needs to handle, * they are handled here. The code should return -1 if it wants to * process more packets, and a %0 or %1 if it wants to exit from the * kgdb callback. */ extern int kgdb_arch_handle_exception(int vector, int signo, int err_code, char *remcom_in_buffer, char *remcom_out_buffer, struct pt_regs *regs); /** * kgdb_call_nmi_hook - Call kgdb_nmicallback() on the current CPU * @ignored: This parameter is only here to match the prototype. * * If you're using the default implementation of kgdb_roundup_cpus() * this function will be called per CPU. If you don't implement * kgdb_call_nmi_hook() a default will be used. */ extern void kgdb_call_nmi_hook(void *ignored); /** * kgdb_roundup_cpus - Get other CPUs into a holding pattern * * On SMP systems, we need to get the attention of the other CPUs * and get them into a known state. This should do what is needed * to get the other CPUs to call kgdb_wait(). Note that on some arches, * the NMI approach is not used for rounding up all the CPUs. Normally * those architectures can just not implement this and get the default. * * On non-SMP systems, this is not called. */ extern void kgdb_roundup_cpus(void); /** * kgdb_arch_set_pc - Generic call back to the program counter * @regs: Current &struct pt_regs. * @pc: The new value for the program counter * * This function handles updating the program counter and requires an * architecture specific implementation. */ extern void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc); /* Optional functions. */ extern int kgdb_validate_break_address(unsigned long addr); extern int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt); extern int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt); /** * kgdb_arch_late - Perform any architecture specific initalization. * * This function will handle the late initalization of any * architecture specific callbacks. This is an optional function for * handling things like late initialization of hw breakpoints. The * default implementation does nothing. */ extern void kgdb_arch_late(void); /** * struct kgdb_arch - Describe architecture specific values. * @gdb_bpt_instr: The instruction to trigger a breakpoint. * @flags: Flags for the breakpoint, currently just %KGDB_HW_BREAKPOINT. * @set_breakpoint: Allow an architecture to specify how to set a software * breakpoint. * @remove_breakpoint: Allow an architecture to specify how to remove a * software breakpoint. * @set_hw_breakpoint: Allow an architecture to specify how to set a hardware * breakpoint. * @remove_hw_breakpoint: Allow an architecture to specify how to remove a * hardware breakpoint. * @disable_hw_break: Allow an architecture to specify how to disable * hardware breakpoints for a single cpu. * @remove_all_hw_break: Allow an architecture to specify how to remove all * hardware breakpoints. * @correct_hw_break: Allow an architecture to specify how to correct the * hardware debug registers. * @enable_nmi: Manage NMI-triggered entry to KGDB */ struct kgdb_arch { unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE]; unsigned long flags; int (*set_breakpoint)(unsigned long, char *); int (*remove_breakpoint)(unsigned long, char *); int (*set_hw_breakpoint)(unsigned long, int, enum kgdb_bptype); int (*remove_hw_breakpoint)(unsigned long, int, enum kgdb_bptype); void (*disable_hw_break)(struct pt_regs *regs); void (*remove_all_hw_break)(void); void (*correct_hw_break)(void); void (*enable_nmi)(bool on); }; /** * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB. * @name: Name of the I/O driver. * @read_char: Pointer to a function that will return one char. * @write_char: Pointer to a function that will write one char. * @flush: Pointer to a function that will flush any pending writes. * @init: Pointer to a function that will initialize the device. * @pre_exception: Pointer to a function that will do any prep work for * the I/O driver. * @post_exception: Pointer to a function that will do any cleanup work * for the I/O driver. * @is_console: 1 if the end device is a console 0 if the I/O device is * not a console */ struct kgdb_io { const char *name; int (*read_char) (void); void (*write_char) (u8); void (*flush) (void); int (*init) (void); void (*pre_exception) (void); void (*post_exception) (void); int is_console; }; extern const struct kgdb_arch arch_kgdb_ops; extern unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs); #ifdef CONFIG_SERIAL_KGDB_NMI extern int kgdb_register_nmi_console(void); extern int kgdb_unregister_nmi_console(void); extern bool kgdb_nmi_poll_knock(void); #else static inline int kgdb_register_nmi_console(void) { return 0; } static inline int kgdb_unregister_nmi_console(void) { return 0; } static inline bool kgdb_nmi_poll_knock(void) { return 1; } #endif extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops); extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops); extern struct kgdb_io *dbg_io_ops; extern int kgdb_hex2long(char **ptr, unsigned long *long_val); extern char *kgdb_mem2hex(char *mem, char *buf, int count); extern int kgdb_hex2mem(char *buf, char *mem, int count); extern int kgdb_isremovedbreak(unsigned long addr); extern void kgdb_schedule_breakpoint(void); extern int kgdb_handle_exception(int ex_vector, int signo, int err_code, struct pt_regs *regs); extern int kgdb_nmicallback(int cpu, void *regs); extern int kgdb_nmicallin(int cpu, int trapnr, void *regs, int err_code, atomic_t *snd_rdy); extern void gdbstub_exit(int status); extern int kgdb_single_step; extern atomic_t kgdb_active; #define in_dbg_master() \ (irqs_disabled() && (smp_processor_id() == atomic_read(&kgdb_active))) extern bool dbg_is_early; extern void __init dbg_late_init(void); extern void kgdb_panic(const char *msg); #else /* ! CONFIG_KGDB */ #define in_dbg_master() (0) #define dbg_late_init() static inline void kgdb_panic(const char *msg) {} #endif /* ! CONFIG_KGDB */ #endif /* _KGDB_H_ */ sram.h 0000644 00000001514 14722070374 0005666 0 ustar 00 /* * Generic SRAM Driver Interface * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifndef __LINUX_SRAM_H__ #define __LINUX_SRAM_H__ struct gen_pool; #ifdef CONFIG_SRAM_EXEC void *sram_exec_copy(struct gen_pool *pool, void *dst, void *src, size_t size); #else static inline void *sram_exec_copy(struct gen_pool *pool, void *dst, void *src, size_t size) { return NULL; } #endif /* CONFIG_SRAM_EXEC */ #endif /* __LINUX_SRAM_H__ */ time32.h 0000644 00000013413 14722070374 0006030 0 ustar 00 #ifndef _LINUX_TIME32_H #define _LINUX_TIME32_H /* * These are all interfaces based on the old time_t definition * that overflows in 2038 on 32-bit architectures. New code * should use the replacements based on time64_t and timespec64. * * Any interfaces in here that become unused as we migrate * code to time64_t should get removed. */ #include <linux/time64.h> #include <linux/timex.h> #define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1) typedef s32 old_time32_t; struct old_timespec32 { old_time32_t tv_sec; s32 tv_nsec; }; struct old_timeval32 { old_time32_t tv_sec; s32 tv_usec; }; struct old_itimerspec32 { struct old_timespec32 it_interval; struct old_timespec32 it_value; }; struct old_utimbuf32 { old_time32_t actime; old_time32_t modtime; }; struct old_timex32 { u32 modes; s32 offset; s32 freq; s32 maxerror; s32 esterror; s32 status; s32 constant; s32 precision; s32 tolerance; struct old_timeval32 time; s32 tick; s32 ppsfreq; s32 jitter; s32 shift; s32 stabil; s32 jitcnt; s32 calcnt; s32 errcnt; s32 stbcnt; s32 tai; s32:32; s32:32; s32:32; s32:32; s32:32; s32:32; s32:32; s32:32; s32:32; s32:32; s32:32; }; extern int get_old_timespec32(struct timespec64 *, const void __user *); extern int put_old_timespec32(const struct timespec64 *, void __user *); extern int get_old_itimerspec32(struct itimerspec64 *its, const struct old_itimerspec32 __user *uits); extern int put_old_itimerspec32(const struct itimerspec64 *its, struct old_itimerspec32 __user *uits); struct __kernel_timex; int get_old_timex32(struct __kernel_timex *, const struct old_timex32 __user *); int put_old_timex32(struct old_timex32 __user *, const struct __kernel_timex *); #if __BITS_PER_LONG == 64 /* timespec64 is defined as timespec here */ static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64) { return *(const struct timespec *)&ts64; } static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) { return *(const struct timespec64 *)&ts; } #else static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64) { struct timespec ret; ret.tv_sec = (time_t)ts64.tv_sec; ret.tv_nsec = ts64.tv_nsec; return ret; } static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) { struct timespec64 ret; ret.tv_sec = ts.tv_sec; ret.tv_nsec = ts.tv_nsec; return ret; } #endif static inline int timespec_equal(const struct timespec *a, const struct timespec *b) { return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec); } /* * lhs < rhs: return <0 * lhs == rhs: return 0 * lhs > rhs: return >0 */ static inline int timespec_compare(const struct timespec *lhs, const struct timespec *rhs) { if (lhs->tv_sec < rhs->tv_sec) return -1; if (lhs->tv_sec > rhs->tv_sec) return 1; return lhs->tv_nsec - rhs->tv_nsec; } /* * Returns true if the timespec is norm, false if denorm: */ static inline bool timespec_valid(const struct timespec *ts) { /* Dates before 1970 are bogus */ if (ts->tv_sec < 0) return false; /* Can't have more nanoseconds then a second */ if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) return false; return true; } /** * timespec_to_ns - Convert timespec to nanoseconds * @ts: pointer to the timespec variable to be converted * * Returns the scalar nanosecond representation of the timespec * parameter. */ static inline s64 timespec_to_ns(const struct timespec *ts) { return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec; } /** * ns_to_timespec - Convert nanoseconds to timespec * @nsec: the nanoseconds value to be converted * * Returns the timespec representation of the nsec parameter. */ extern struct timespec ns_to_timespec(const s64 nsec); /** * timespec_add_ns - Adds nanoseconds to a timespec * @a: pointer to timespec to be incremented * @ns: unsigned nanoseconds value to be added * * This must always be inlined because its used from the x86-64 vdso, * which cannot call other kernel functions. */ static __always_inline void timespec_add_ns(struct timespec *a, u64 ns) { a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns); a->tv_nsec = ns; } static inline unsigned long mktime(const unsigned int year, const unsigned int mon, const unsigned int day, const unsigned int hour, const unsigned int min, const unsigned int sec) { return mktime64(year, mon, day, hour, min, sec); } static inline bool timeval_valid(const struct timeval *tv) { /* Dates before 1970 are bogus */ if (tv->tv_sec < 0) return false; /* Can't have more microseconds then a second */ if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC) return false; return true; } /** * timeval_to_ns - Convert timeval to nanoseconds * @ts: pointer to the timeval variable to be converted * * Returns the scalar nanosecond representation of the timeval * parameter. */ static inline s64 timeval_to_ns(const struct timeval *tv) { return ((s64) tv->tv_sec * NSEC_PER_SEC) + tv->tv_usec * NSEC_PER_USEC; } /** * ns_to_timeval - Convert nanoseconds to timeval * @nsec: the nanoseconds value to be converted * * Returns the timeval representation of the nsec parameter. */ extern struct timeval ns_to_timeval(const s64 nsec); extern struct __kernel_old_timeval ns_to_kernel_old_timeval(s64 nsec); /* * Old names for the 32-bit time_t interfaces, these will be removed * when everything uses the new names. */ #define compat_time_t old_time32_t #define compat_timeval old_timeval32 #define compat_timespec old_timespec32 #define compat_itimerspec old_itimerspec32 #define ns_to_compat_timeval ns_to_old_timeval32 #define get_compat_itimerspec64 get_old_itimerspec32 #define put_compat_itimerspec64 put_old_itimerspec32 #define compat_get_timespec64 get_old_timespec32 #define compat_put_timespec64 put_old_timespec32 #endif swap_cgroup.h 0000644 00000001713 14722070374 0007256 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_SWAP_CGROUP_H #define __LINUX_SWAP_CGROUP_H #include <linux/swap.h> #ifdef CONFIG_MEMCG_SWAP extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, unsigned short old, unsigned short new); extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id, unsigned int nr_ents); extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent); extern int swap_cgroup_swapon(int type, unsigned long max_pages); extern void swap_cgroup_swapoff(int type); #else static inline unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id, unsigned int nr_ents) { return 0; } static inline unsigned short lookup_swap_cgroup_id(swp_entry_t ent) { return 0; } static inline int swap_cgroup_swapon(int type, unsigned long max_pages) { return 0; } static inline void swap_cgroup_swapoff(int type) { return; } #endif /* CONFIG_MEMCG_SWAP */ #endif /* __LINUX_SWAP_CGROUP_H */ mcb.h 0000644 00000007325 14722070374 0005473 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * MEN Chameleon Bus. * * Copyright (C) 2014 MEN Mikroelektronik GmbH (www.men.de) * Author: Johannes Thumshirn <johannes.thumshirn@men.de> */ #ifndef _LINUX_MCB_H #define _LINUX_MCB_H #include <linux/mod_devicetable.h> #include <linux/device.h> #include <linux/irqreturn.h> #define CHAMELEON_FILENAME_LEN 12 struct mcb_driver; struct mcb_device; /** * struct mcb_bus - MEN Chameleon Bus * * @dev: bus device * @carrier: pointer to carrier device * @bus_nr: mcb bus number * @get_irq: callback to get IRQ number * @revision: the FPGA's revision number * @model: the FPGA's model number * @filename: the FPGA's name */ struct mcb_bus { struct device dev; struct device *carrier; int bus_nr; u8 revision; char model; u8 minor; char name[CHAMELEON_FILENAME_LEN + 1]; int (*get_irq)(struct mcb_device *dev); }; static inline struct mcb_bus *to_mcb_bus(struct device *dev) { return container_of(dev, struct mcb_bus, dev); } /** * struct mcb_device - MEN Chameleon Bus device * * @dev: device in kernel representation * @bus: mcb bus the device is plugged to * @is_added: flag to check if device is added to bus * @driver: associated mcb_driver * @id: mcb device id * @inst: instance in Chameleon table * @group: group in Chameleon table * @var: variant in Chameleon table * @bar: BAR in Chameleon table * @rev: revision in Chameleon table * @irq: IRQ resource * @memory: memory resource */ struct mcb_device { struct device dev; struct mcb_bus *bus; struct mcb_driver *driver; u16 id; int inst; int group; int var; int bar; int rev; struct resource irq; struct resource mem; struct device *dma_dev; }; static inline struct mcb_device *to_mcb_device(struct device *dev) { return container_of(dev, struct mcb_device, dev); } /** * struct mcb_driver - MEN Chameleon Bus device driver * * @driver: device_driver * @id_table: mcb id table * @probe: probe callback * @remove: remove callback * @shutdown: shutdown callback */ struct mcb_driver { struct device_driver driver; const struct mcb_device_id *id_table; int (*probe)(struct mcb_device *mdev, const struct mcb_device_id *id); void (*remove)(struct mcb_device *mdev); void (*shutdown)(struct mcb_device *mdev); }; static inline struct mcb_driver *to_mcb_driver(struct device_driver *drv) { return container_of(drv, struct mcb_driver, driver); } static inline void *mcb_get_drvdata(struct mcb_device *dev) { return dev_get_drvdata(&dev->dev); } static inline void mcb_set_drvdata(struct mcb_device *dev, void *data) { dev_set_drvdata(&dev->dev, data); } extern int __must_check __mcb_register_driver(struct mcb_driver *drv, struct module *owner, const char *mod_name); #define mcb_register_driver(driver) \ __mcb_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) extern void mcb_unregister_driver(struct mcb_driver *driver); #define module_mcb_driver(__mcb_driver) \ module_driver(__mcb_driver, mcb_register_driver, mcb_unregister_driver); extern void mcb_bus_add_devices(const struct mcb_bus *bus); extern int mcb_device_register(struct mcb_bus *bus, struct mcb_device *dev); extern struct mcb_bus *mcb_alloc_bus(struct device *carrier); extern struct mcb_bus *mcb_bus_get(struct mcb_bus *bus); extern void mcb_bus_put(struct mcb_bus *bus); extern struct mcb_device *mcb_alloc_dev(struct mcb_bus *bus); extern void mcb_free_dev(struct mcb_device *dev); extern void mcb_release_bus(struct mcb_bus *bus); extern struct resource *mcb_request_mem(struct mcb_device *dev, const char *name); extern void mcb_release_mem(struct resource *mem); extern int mcb_get_irq(struct mcb_device *dev); extern struct resource *mcb_get_resource(struct mcb_device *dev, unsigned int type); #endif /* _LINUX_MCB_H */ ahci_platform.h 0000644 00000003054 14722070374 0007535 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * AHCI SATA platform driver * * Copyright 2004-2005 Red Hat, Inc. * Jeff Garzik <jgarzik@pobox.com> * Copyright 2010 MontaVista Software, LLC. * Anton Vorontsov <avorontsov@ru.mvista.com> */ #ifndef _AHCI_PLATFORM_H #define _AHCI_PLATFORM_H #include <linux/compiler.h> struct device; struct ata_port_info; struct ahci_host_priv; struct platform_device; struct scsi_host_template; int ahci_platform_enable_phys(struct ahci_host_priv *hpriv); void ahci_platform_disable_phys(struct ahci_host_priv *hpriv); int ahci_platform_enable_clks(struct ahci_host_priv *hpriv); void ahci_platform_disable_clks(struct ahci_host_priv *hpriv); int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv); void ahci_platform_disable_regulators(struct ahci_host_priv *hpriv); int ahci_platform_enable_resources(struct ahci_host_priv *hpriv); void ahci_platform_disable_resources(struct ahci_host_priv *hpriv); struct ahci_host_priv *ahci_platform_get_resources( struct platform_device *pdev, unsigned int flags); int ahci_platform_init_host(struct platform_device *pdev, struct ahci_host_priv *hpriv, const struct ata_port_info *pi_template, struct scsi_host_template *sht); void ahci_platform_shutdown(struct platform_device *pdev); int ahci_platform_suspend_host(struct device *dev); int ahci_platform_resume_host(struct device *dev); int ahci_platform_suspend(struct device *dev); int ahci_platform_resume(struct device *dev); #define AHCI_PLATFORM_GET_RESETS 0x01 #endif /* _AHCI_PLATFORM_H */ context_tracking.h 0000644 00000010675 14722070374 0010302 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_CONTEXT_TRACKING_H #define _LINUX_CONTEXT_TRACKING_H #include <linux/sched.h> #include <linux/vtime.h> #include <linux/context_tracking_state.h> #include <asm/ptrace.h> #ifdef CONFIG_CONTEXT_TRACKING extern void context_tracking_cpu_set(int cpu); /* Called with interrupts disabled. */ extern void __context_tracking_enter(enum ctx_state state); extern void __context_tracking_exit(enum ctx_state state); extern void context_tracking_enter(enum ctx_state state); extern void context_tracking_exit(enum ctx_state state); extern void context_tracking_user_enter(void); extern void context_tracking_user_exit(void); static inline void user_enter(void) { if (context_tracking_is_enabled()) context_tracking_enter(CONTEXT_USER); } static inline void user_exit(void) { if (context_tracking_is_enabled()) context_tracking_exit(CONTEXT_USER); } /* Called with interrupts disabled. */ static inline void user_enter_irqoff(void) { if (context_tracking_is_enabled()) __context_tracking_enter(CONTEXT_USER); } static inline void user_exit_irqoff(void) { if (context_tracking_is_enabled()) __context_tracking_exit(CONTEXT_USER); } static inline enum ctx_state exception_enter(void) { enum ctx_state prev_ctx; if (!context_tracking_is_enabled()) return 0; prev_ctx = this_cpu_read(context_tracking.state); if (prev_ctx != CONTEXT_KERNEL) context_tracking_exit(prev_ctx); return prev_ctx; } static inline void exception_exit(enum ctx_state prev_ctx) { if (context_tracking_is_enabled()) { if (prev_ctx != CONTEXT_KERNEL) context_tracking_enter(prev_ctx); } } /** * ct_state() - return the current context tracking state if known * * Returns the current cpu's context tracking state if context tracking * is enabled. If context tracking is disabled, returns * CONTEXT_DISABLED. This should be used primarily for debugging. */ static inline enum ctx_state ct_state(void) { return context_tracking_is_enabled() ? this_cpu_read(context_tracking.state) : CONTEXT_DISABLED; } #else static inline void user_enter(void) { } static inline void user_exit(void) { } static inline void user_enter_irqoff(void) { } static inline void user_exit_irqoff(void) { } static inline enum ctx_state exception_enter(void) { return 0; } static inline void exception_exit(enum ctx_state prev_ctx) { } static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; } #endif /* !CONFIG_CONTEXT_TRACKING */ #define CT_WARN_ON(cond) WARN_ON(context_tracking_is_enabled() && (cond)) #ifdef CONFIG_CONTEXT_TRACKING_FORCE extern void context_tracking_init(void); #else static inline void context_tracking_init(void) { } #endif /* CONFIG_CONTEXT_TRACKING_FORCE */ #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN /* must be called with irqs disabled */ static inline void guest_enter_irqoff(void) { if (vtime_accounting_cpu_enabled()) vtime_guest_enter(current); else current->flags |= PF_VCPU; if (context_tracking_is_enabled()) __context_tracking_enter(CONTEXT_GUEST); /* KVM does not hold any references to rcu protected data when it * switches CPU into a guest mode. In fact switching to a guest mode * is very similar to exiting to userspace from rcu point of view. In * addition CPU may stay in a guest mode for quite a long time (up to * one time slice). Lets treat guest mode as quiescent state, just like * we do with user-mode execution. */ if (!context_tracking_cpu_is_enabled()) rcu_virt_note_context_switch(smp_processor_id()); } static inline void guest_exit_irqoff(void) { if (context_tracking_is_enabled()) __context_tracking_exit(CONTEXT_GUEST); if (vtime_accounting_cpu_enabled()) vtime_guest_exit(current); else current->flags &= ~PF_VCPU; } #else static inline void guest_enter_irqoff(void) { /* * This is running in ioctl context so its safe * to assume that it's the stime pending cputime * to flush. */ vtime_account_system(current); current->flags |= PF_VCPU; rcu_virt_note_context_switch(smp_processor_id()); } static inline void guest_exit_irqoff(void) { /* Flush the guest cputime we spent on the guest */ vtime_account_system(current); current->flags &= ~PF_VCPU; } #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */ static inline void guest_enter(void) { unsigned long flags; local_irq_save(flags); guest_enter_irqoff(); local_irq_restore(flags); } static inline void guest_exit(void) { unsigned long flags; local_irq_save(flags); guest_exit_irqoff(); local_irq_restore(flags); } #endif of_pdt.h 0000644 00000002213 14722070374 0006174 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0+ */ /* * Definitions for building a device tree by calling into the * Open Firmware PROM. * * Copyright (C) 2010 Andres Salomon <dilinger@queued.net> */ #ifndef _LINUX_OF_PDT_H #define _LINUX_OF_PDT_H /* overridable operations for calling into the PROM */ struct of_pdt_ops { /* * buf should be 32 bytes; return 0 on success. * If prev is NULL, the first property will be returned. */ int (*nextprop)(phandle node, char *prev, char *buf); /* for both functions, return proplen on success; -1 on error */ int (*getproplen)(phandle node, const char *prop); int (*getproperty)(phandle node, const char *prop, char *buf, int bufsize); /* phandles are 0 if no child or sibling exists */ phandle (*getchild)(phandle parent); phandle (*getsibling)(phandle node); /* return 0 on success; fill in 'len' with number of bytes in path */ int (*pkg2path)(phandle node, char *buf, const int buflen, int *len); }; extern void *prom_early_alloc(unsigned long size); /* for building the device tree */ extern void of_pdt_build_devicetree(phandle root_node, struct of_pdt_ops *ops); #endif /* _LINUX_OF_PDT_H */ purgatory.h 0000644 00000001115 14722070374 0006755 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PURGATORY_H #define _LINUX_PURGATORY_H #include <linux/types.h> #include <crypto/sha.h> #include <uapi/linux/kexec.h> struct kexec_sha_region { unsigned long start; unsigned long len; }; /* * These forward declarations serve two purposes: * * 1) Make sparse happy when checking arch/purgatory * 2) Document that these are required to be global so the symbol * lookup in kexec works */ extern struct kexec_sha_region purgatory_sha_regions[KEXEC_SEGMENT_MAX]; extern u8 purgatory_sha256_digest[SHA256_DIGEST_SIZE]; #endif stop_machine.h 0000644 00000011362 14722070374 0007377 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_STOP_MACHINE #define _LINUX_STOP_MACHINE #include <linux/cpu.h> #include <linux/cpumask.h> #include <linux/smp.h> #include <linux/list.h> /* * stop_cpu[s]() is simplistic per-cpu maximum priority cpu * monopolization mechanism. The caller can specify a non-sleeping * function to be executed on a single or multiple cpus preempting all * other processes and monopolizing those cpus until it finishes. * * Resources for this mechanism are preallocated when a cpu is brought * up and requests are guaranteed to be served as long as the target * cpus are online. */ typedef int (*cpu_stop_fn_t)(void *arg); #ifdef CONFIG_SMP struct cpu_stop_work { struct list_head list; /* cpu_stopper->works */ cpu_stop_fn_t fn; void *arg; struct cpu_stop_done *done; }; int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg); int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg); bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, struct cpu_stop_work *work_buf); int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); void stop_machine_park(int cpu); void stop_machine_unpark(int cpu); void stop_machine_yield(const struct cpumask *cpumask); #else /* CONFIG_SMP */ #include <linux/workqueue.h> struct cpu_stop_work { struct work_struct work; cpu_stop_fn_t fn; void *arg; }; static inline int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg) { int ret = -ENOENT; preempt_disable(); if (cpu == smp_processor_id()) ret = fn(arg); preempt_enable(); return ret; } static void stop_one_cpu_nowait_workfn(struct work_struct *work) { struct cpu_stop_work *stwork = container_of(work, struct cpu_stop_work, work); preempt_disable(); stwork->fn(stwork->arg); preempt_enable(); } static inline bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, struct cpu_stop_work *work_buf) { if (cpu == smp_processor_id()) { INIT_WORK(&work_buf->work, stop_one_cpu_nowait_workfn); work_buf->fn = fn; work_buf->arg = arg; schedule_work(&work_buf->work); return true; } return false; } static inline int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) { if (cpumask_test_cpu(raw_smp_processor_id(), cpumask)) return stop_one_cpu(raw_smp_processor_id(), fn, arg); return -ENOENT; } static inline int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) { return stop_cpus(cpumask, fn, arg); } #endif /* CONFIG_SMP */ /* * stop_machine "Bogolock": stop the entire machine, disable * interrupts. This is a very heavy lock, which is equivalent to * grabbing every spinlock (and more). So the "read" side to such a * lock is anything which disables preemption. */ #if defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU) /** * stop_machine: freeze the machine on all CPUs and run this function * @fn: the function to run * @data: the data ptr for the @fn() * @cpus: the cpus to run the @fn() on (NULL = any online cpu) * * Description: This causes a thread to be scheduled on every cpu, * each of which disables interrupts. The result is that no one is * holding a spinlock or inside any other preempt-disabled region when * @fn() runs. * * This can be thought of as a very heavy write lock, equivalent to * grabbing every spinlock in the kernel. * * Protects against CPU hotplug. */ int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus); /** * stop_machine_cpuslocked: freeze the machine on all CPUs and run this function * @fn: the function to run * @data: the data ptr for the @fn() * @cpus: the cpus to run the @fn() on (NULL = any online cpu) * * Same as above. Must be called from with in a cpus_read_lock() protected * region. Avoids nested calls to cpus_read_lock(). */ int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus); int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus); #else /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */ static __always_inline int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) { unsigned long flags; int ret; local_irq_save(flags); ret = fn(data); local_irq_restore(flags); return ret; } static __always_inline int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) { return stop_machine_cpuslocked(fn, data, cpus); } static __always_inline int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) { return stop_machine(fn, data, cpus); } #endif /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */ #endif /* _LINUX_STOP_MACHINE */ list_sort.h 0000644 00000000463 14722070374 0006750 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_LIST_SORT_H #define _LINUX_LIST_SORT_H #include <linux/types.h> struct list_head; __attribute__((nonnull(2,3))) void list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, struct list_head *a, struct list_head *b)); #endif openvswitch.h 0000644 00000000623 14722070374 0007275 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2007-2011 Nicira Networks. */ #ifndef _LINUX_OPENVSWITCH_H #define _LINUX_OPENVSWITCH_H 1 #include <uapi/linux/openvswitch.h> #define OVS_CLONE_ATTR_EXEC 0 /* Specify an u32 value. When nonzero, * actions in clone will not change flow * keys. False otherwise. */ #endif /* _LINUX_OPENVSWITCH_H */ tick.h 0000644 00000022647 14722070374 0005670 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Tick related global functions */ #ifndef _LINUX_TICK_H #define _LINUX_TICK_H #include <linux/clockchips.h> #include <linux/irqflags.h> #include <linux/percpu.h> #include <linux/context_tracking_state.h> #include <linux/cpumask.h> #include <linux/sched.h> #ifdef CONFIG_GENERIC_CLOCKEVENTS extern void __init tick_init(void); /* Should be core only, but ARM BL switcher requires it */ extern void tick_suspend_local(void); /* Should be core only, but XEN resume magic and ARM BL switcher require it */ extern void tick_resume_local(void); extern void tick_handover_do_timer(void); extern void tick_cleanup_dead_cpu(int cpu); #else /* CONFIG_GENERIC_CLOCKEVENTS */ static inline void tick_init(void) { } static inline void tick_suspend_local(void) { } static inline void tick_resume_local(void) { } static inline void tick_handover_do_timer(void) { } static inline void tick_cleanup_dead_cpu(int cpu) { } #endif /* !CONFIG_GENERIC_CLOCKEVENTS */ #if defined(CONFIG_GENERIC_CLOCKEVENTS) && defined(CONFIG_SUSPEND) extern void tick_freeze(void); extern void tick_unfreeze(void); #else static inline void tick_freeze(void) { } static inline void tick_unfreeze(void) { } #endif #ifdef CONFIG_TICK_ONESHOT extern void tick_irq_enter(void); # ifndef arch_needs_cpu # define arch_needs_cpu() (0) # endif # else static inline void tick_irq_enter(void) { } #endif #if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT) extern void hotplug_cpu__broadcast_tick_pull(int dead_cpu); #else static inline void hotplug_cpu__broadcast_tick_pull(int dead_cpu) { } #endif enum tick_broadcast_mode { TICK_BROADCAST_OFF, TICK_BROADCAST_ON, TICK_BROADCAST_FORCE, }; enum tick_broadcast_state { TICK_BROADCAST_EXIT, TICK_BROADCAST_ENTER, }; #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST extern void tick_broadcast_control(enum tick_broadcast_mode mode); #else static inline void tick_broadcast_control(enum tick_broadcast_mode mode) { } #endif /* BROADCAST */ #if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_HOTPLUG_CPU) extern void tick_offline_cpu(unsigned int cpu); #else static inline void tick_offline_cpu(unsigned int cpu) { } #endif #ifdef CONFIG_GENERIC_CLOCKEVENTS extern int tick_broadcast_oneshot_control(enum tick_broadcast_state state); #else static inline int tick_broadcast_oneshot_control(enum tick_broadcast_state state) { return 0; } #endif static inline void tick_broadcast_enable(void) { tick_broadcast_control(TICK_BROADCAST_ON); } static inline void tick_broadcast_disable(void) { tick_broadcast_control(TICK_BROADCAST_OFF); } static inline void tick_broadcast_force(void) { tick_broadcast_control(TICK_BROADCAST_FORCE); } static inline int tick_broadcast_enter(void) { return tick_broadcast_oneshot_control(TICK_BROADCAST_ENTER); } static inline void tick_broadcast_exit(void) { tick_broadcast_oneshot_control(TICK_BROADCAST_EXIT); } enum tick_dep_bits { TICK_DEP_BIT_POSIX_TIMER = 0, TICK_DEP_BIT_PERF_EVENTS = 1, TICK_DEP_BIT_SCHED = 2, TICK_DEP_BIT_CLOCK_UNSTABLE = 3, TICK_DEP_BIT_RCU = 4 }; #define TICK_DEP_MASK_NONE 0 #define TICK_DEP_MASK_POSIX_TIMER (1 << TICK_DEP_BIT_POSIX_TIMER) #define TICK_DEP_MASK_PERF_EVENTS (1 << TICK_DEP_BIT_PERF_EVENTS) #define TICK_DEP_MASK_SCHED (1 << TICK_DEP_BIT_SCHED) #define TICK_DEP_MASK_CLOCK_UNSTABLE (1 << TICK_DEP_BIT_CLOCK_UNSTABLE) #define TICK_DEP_MASK_RCU (1 << TICK_DEP_BIT_RCU) #ifdef CONFIG_NO_HZ_COMMON extern bool tick_nohz_enabled; extern bool tick_nohz_tick_stopped(void); extern bool tick_nohz_tick_stopped_cpu(int cpu); extern void tick_nohz_idle_stop_tick(void); extern void tick_nohz_idle_retain_tick(void); extern void tick_nohz_idle_restart_tick(void); extern void tick_nohz_idle_enter(void); extern void tick_nohz_idle_exit(void); extern void tick_nohz_irq_exit(void); extern bool tick_nohz_idle_got_tick(void); extern ktime_t tick_nohz_get_next_hrtimer(void); extern ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next); extern unsigned long tick_nohz_get_idle_calls(void); extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu); extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); static inline void tick_nohz_idle_stop_tick_protected(void) { local_irq_disable(); tick_nohz_idle_stop_tick(); local_irq_enable(); } #else /* !CONFIG_NO_HZ_COMMON */ #define tick_nohz_enabled (0) static inline int tick_nohz_tick_stopped(void) { return 0; } static inline int tick_nohz_tick_stopped_cpu(int cpu) { return 0; } static inline void tick_nohz_idle_stop_tick(void) { } static inline void tick_nohz_idle_retain_tick(void) { } static inline void tick_nohz_idle_restart_tick(void) { } static inline void tick_nohz_idle_enter(void) { } static inline void tick_nohz_idle_exit(void) { } static inline bool tick_nohz_idle_got_tick(void) { return false; } static inline ktime_t tick_nohz_get_next_hrtimer(void) { /* Next wake up is the tick period, assume it starts now */ return ktime_add(ktime_get(), TICK_NSEC); } static inline ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next) { *delta_next = TICK_NSEC; return *delta_next; } static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } static inline void tick_nohz_idle_stop_tick_protected(void) { } #endif /* !CONFIG_NO_HZ_COMMON */ #ifdef CONFIG_NO_HZ_FULL extern bool tick_nohz_full_running; extern cpumask_var_t tick_nohz_full_mask; static inline bool tick_nohz_full_enabled(void) { if (!context_tracking_is_enabled()) return false; return tick_nohz_full_running; } static inline bool tick_nohz_full_cpu(int cpu) { if (!tick_nohz_full_enabled()) return false; return cpumask_test_cpu(cpu, tick_nohz_full_mask); } static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { if (tick_nohz_full_enabled()) cpumask_or(mask, mask, tick_nohz_full_mask); } extern void tick_nohz_dep_set(enum tick_dep_bits bit); extern void tick_nohz_dep_clear(enum tick_dep_bits bit); extern void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit); extern void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit); extern void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit); extern void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit); extern void tick_nohz_dep_set_signal(struct signal_struct *signal, enum tick_dep_bits bit); extern void tick_nohz_dep_clear_signal(struct signal_struct *signal, enum tick_dep_bits bit); extern bool tick_nohz_cpu_hotpluggable(unsigned int cpu); /* * The below are tick_nohz_[set,clear]_dep() wrappers that optimize off-cases * on top of static keys. */ static inline void tick_dep_set(enum tick_dep_bits bit) { if (tick_nohz_full_enabled()) tick_nohz_dep_set(bit); } static inline void tick_dep_clear(enum tick_dep_bits bit) { if (tick_nohz_full_enabled()) tick_nohz_dep_clear(bit); } static inline void tick_dep_set_cpu(int cpu, enum tick_dep_bits bit) { if (tick_nohz_full_cpu(cpu)) tick_nohz_dep_set_cpu(cpu, bit); } static inline void tick_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { if (tick_nohz_full_cpu(cpu)) tick_nohz_dep_clear_cpu(cpu, bit); } static inline void tick_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit) { if (tick_nohz_full_enabled()) tick_nohz_dep_set_task(tsk, bit); } static inline void tick_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit) { if (tick_nohz_full_enabled()) tick_nohz_dep_clear_task(tsk, bit); } static inline void tick_dep_set_signal(struct signal_struct *signal, enum tick_dep_bits bit) { if (tick_nohz_full_enabled()) tick_nohz_dep_set_signal(signal, bit); } static inline void tick_dep_clear_signal(struct signal_struct *signal, enum tick_dep_bits bit) { if (tick_nohz_full_enabled()) tick_nohz_dep_clear_signal(signal, bit); } extern void tick_nohz_full_kick_cpu(int cpu); extern void __tick_nohz_task_switch(void); extern void __init tick_nohz_full_setup(cpumask_var_t cpumask); #else static inline bool tick_nohz_full_enabled(void) { return false; } static inline bool tick_nohz_full_cpu(int cpu) { return false; } static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { } static inline void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) { } static inline void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { } static inline bool tick_nohz_cpu_hotpluggable(unsigned int cpu) { return true; } static inline void tick_dep_set(enum tick_dep_bits bit) { } static inline void tick_dep_clear(enum tick_dep_bits bit) { } static inline void tick_dep_set_cpu(int cpu, enum tick_dep_bits bit) { } static inline void tick_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { } static inline void tick_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit) { } static inline void tick_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit) { } static inline void tick_dep_set_signal(struct signal_struct *signal, enum tick_dep_bits bit) { } static inline void tick_dep_clear_signal(struct signal_struct *signal, enum tick_dep_bits bit) { } static inline void tick_nohz_full_kick_cpu(int cpu) { } static inline void __tick_nohz_task_switch(void) { } static inline void tick_nohz_full_setup(cpumask_var_t cpumask) { } #endif static inline void tick_nohz_task_switch(void) { if (tick_nohz_full_enabled()) __tick_nohz_task_switch(); } #endif iio/machine.h 0000644 00000002101 14722070374 0007101 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Industrial I/O in kernel access map definitions for board files. * * Copyright (c) 2011 Jonathan Cameron */ #ifndef __LINUX_IIO_MACHINE_H__ #define __LINUX_IIO_MACHINE_H__ /** * struct iio_map - description of link between consumer and device channels * @adc_channel_label: Label used to identify the channel on the provider. * This is matched against the datasheet_name element * of struct iio_chan_spec. * @consumer_dev_name: Name to uniquely identify the consumer device. * @consumer_channel: Unique name used to identify the channel on the * consumer side. * @consumer_data: Data about the channel for use by the consumer driver. */ struct iio_map { const char *adc_channel_label; const char *consumer_dev_name; const char *consumer_channel; void *consumer_data; }; #define IIO_MAP(_provider_channel, _consumer_dev_name, _consumer_channel) \ { \ .adc_channel_label = _provider_channel, \ .consumer_dev_name = _consumer_dev_name, \ .consumer_channel = _consumer_channel, \ } #endif iio/sysfs.h 0000644 00000011130 14722070374 0006646 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* The industrial I/O core * *Copyright (c) 2008 Jonathan Cameron * * General attributes */ #ifndef _INDUSTRIAL_IO_SYSFS_H_ #define _INDUSTRIAL_IO_SYSFS_H_ struct iio_chan_spec; /** * struct iio_dev_attr - iio specific device attribute * @dev_attr: underlying device attribute * @address: associated register address * @l: list head for maintaining list of dynamically created attrs * @c: specification for the underlying channel */ struct iio_dev_attr { struct device_attribute dev_attr; u64 address; struct list_head l; struct iio_chan_spec const *c; }; #define to_iio_dev_attr(_dev_attr) \ container_of(_dev_attr, struct iio_dev_attr, dev_attr) ssize_t iio_read_const_attr(struct device *dev, struct device_attribute *attr, char *len); /** * struct iio_const_attr - constant device specific attribute * often used for things like available modes * @string: attribute string * @dev_attr: underlying device attribute */ struct iio_const_attr { const char *string; struct device_attribute dev_attr; }; #define to_iio_const_attr(_dev_attr) \ container_of(_dev_attr, struct iio_const_attr, dev_attr) /* Some attributes will be hard coded (device dependent) and not require an address, in these cases pass a negative */ #define IIO_ATTR(_name, _mode, _show, _store, _addr) \ { .dev_attr = __ATTR(_name, _mode, _show, _store), \ .address = _addr } #define IIO_ATTR_RO(_name, _addr) \ { .dev_attr = __ATTR_RO(_name), \ .address = _addr } #define IIO_ATTR_WO(_name, _addr) \ { .dev_attr = __ATTR_WO(_name), \ .address = _addr } #define IIO_ATTR_RW(_name, _addr) \ { .dev_attr = __ATTR_RW(_name), \ .address = _addr } #define IIO_DEVICE_ATTR(_name, _mode, _show, _store, _addr) \ struct iio_dev_attr iio_dev_attr_##_name \ = IIO_ATTR(_name, _mode, _show, _store, _addr) #define IIO_DEVICE_ATTR_RO(_name, _addr) \ struct iio_dev_attr iio_dev_attr_##_name \ = IIO_ATTR_RO(_name, _addr) #define IIO_DEVICE_ATTR_WO(_name, _addr) \ struct iio_dev_attr iio_dev_attr_##_name \ = IIO_ATTR_WO(_name, _addr) #define IIO_DEVICE_ATTR_RW(_name, _addr) \ struct iio_dev_attr iio_dev_attr_##_name \ = IIO_ATTR_RW(_name, _addr) #define IIO_DEVICE_ATTR_NAMED(_vname, _name, _mode, _show, _store, _addr) \ struct iio_dev_attr iio_dev_attr_##_vname \ = IIO_ATTR(_name, _mode, _show, _store, _addr) #define IIO_CONST_ATTR(_name, _string) \ struct iio_const_attr iio_const_attr_##_name \ = { .string = _string, \ .dev_attr = __ATTR(_name, S_IRUGO, iio_read_const_attr, NULL)} #define IIO_CONST_ATTR_NAMED(_vname, _name, _string) \ struct iio_const_attr iio_const_attr_##_vname \ = { .string = _string, \ .dev_attr = __ATTR(_name, S_IRUGO, iio_read_const_attr, NULL)} /* Generic attributes of onetype or another */ /** * IIO_DEV_ATTR_SAMP_FREQ - sets any internal clock frequency * @_mode: sysfs file mode/permissions * @_show: output method for the attribute * @_store: input method for the attribute **/ #define IIO_DEV_ATTR_SAMP_FREQ(_mode, _show, _store) \ IIO_DEVICE_ATTR(sampling_frequency, _mode, _show, _store, 0) /** * IIO_DEV_ATTR_SAMP_FREQ_AVAIL - list available sampling frequencies * @_show: output method for the attribute * * May be mode dependent on some devices **/ #define IIO_DEV_ATTR_SAMP_FREQ_AVAIL(_show) \ IIO_DEVICE_ATTR(sampling_frequency_available, S_IRUGO, _show, NULL, 0) /** * IIO_CONST_ATTR_SAMP_FREQ_AVAIL - list available sampling frequencies * @_string: frequency string for the attribute * * Constant version **/ #define IIO_CONST_ATTR_SAMP_FREQ_AVAIL(_string) \ IIO_CONST_ATTR(sampling_frequency_available, _string) /** * IIO_DEV_ATTR_INT_TIME_AVAIL - list available integration times * @_show: output method for the attribute **/ #define IIO_DEV_ATTR_INT_TIME_AVAIL(_show) \ IIO_DEVICE_ATTR(integration_time_available, S_IRUGO, _show, NULL, 0) /** * IIO_CONST_ATTR_INT_TIME_AVAIL - list available integration times * @_string: frequency string for the attribute * * Constant version **/ #define IIO_CONST_ATTR_INT_TIME_AVAIL(_string) \ IIO_CONST_ATTR(integration_time_available, _string) #define IIO_DEV_ATTR_TEMP_RAW(_show) \ IIO_DEVICE_ATTR(in_temp_raw, S_IRUGO, _show, NULL, 0) #define IIO_CONST_ATTR_TEMP_OFFSET(_string) \ IIO_CONST_ATTR(in_temp_offset, _string) #define IIO_CONST_ATTR_TEMP_SCALE(_string) \ IIO_CONST_ATTR(in_temp_scale, _string) #endif /* _INDUSTRIAL_IO_SYSFS_H_ */ iio/trigger_consumer.h 0000644 00000003224 14722070374 0011062 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* The industrial I/O core, trigger consumer functions * * Copyright (c) 2008-2011 Jonathan Cameron */ #ifndef __LINUX_IIO_TRIGGER_CONSUMER_H__ #define __LINUX_IIO_TRIGGER_CONSUMER_H__ #include <linux/interrupt.h> #include <linux/types.h> struct iio_dev; struct iio_trigger; /** * struct iio_poll_func - poll function pair * * @indio_dev: data specific to device (passed into poll func) * @h: the function that is actually run on trigger * @thread: threaded interrupt part * @type: the type of interrupt (basically if oneshot) * @name: name used to identify the trigger consumer. * @irq: the corresponding irq as allocated from the * trigger pool * @timestamp: some devices need a timestamp grabbed as soon * as possible after the trigger - hence handler * passes it via here. **/ struct iio_poll_func { struct iio_dev *indio_dev; irqreturn_t (*h)(int irq, void *p); irqreturn_t (*thread)(int irq, void *p); int type; char *name; int irq; s64 timestamp; }; struct iio_poll_func *iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p), irqreturn_t (*thread)(int irq, void *p), int type, struct iio_dev *indio_dev, const char *fmt, ...); void iio_dealloc_pollfunc(struct iio_poll_func *pf); irqreturn_t iio_pollfunc_store_time(int irq, void *p); void iio_trigger_notify_done(struct iio_trigger *trig); /* * Two functions for common case where all that happens is a pollfunc * is attached and detached from a trigger */ int iio_triggered_buffer_postenable(struct iio_dev *indio_dev); int iio_triggered_buffer_predisable(struct iio_dev *indio_dev); #endif iio/buffer-dmaengine.h 0000644 00000000614 14722070374 0010702 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright 2014-2015 Analog Devices Inc. * Author: Lars-Peter Clausen <lars@metafoo.de> */ #ifndef __IIO_DMAENGINE_H__ #define __IIO_DMAENGINE_H__ struct iio_buffer; struct device; struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev, const char *channel); void iio_dmaengine_buffer_free(struct iio_buffer *buffer); #endif iio/types.h 0000644 00000002706 14722070374 0006654 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* industrial I/O data types needed both in and out of kernel * * Copyright (c) 2008 Jonathan Cameron */ #ifndef _IIO_TYPES_H_ #define _IIO_TYPES_H_ #include <uapi/linux/iio/types.h> enum iio_event_info { IIO_EV_INFO_ENABLE, IIO_EV_INFO_VALUE, IIO_EV_INFO_HYSTERESIS, IIO_EV_INFO_PERIOD, IIO_EV_INFO_HIGH_PASS_FILTER_3DB, IIO_EV_INFO_LOW_PASS_FILTER_3DB, }; #define IIO_VAL_INT 1 #define IIO_VAL_INT_PLUS_MICRO 2 #define IIO_VAL_INT_PLUS_NANO 3 #define IIO_VAL_INT_PLUS_MICRO_DB 4 #define IIO_VAL_INT_MULTIPLE 5 #define IIO_VAL_FRACTIONAL 10 #define IIO_VAL_FRACTIONAL_LOG2 11 enum iio_available_type { IIO_AVAIL_LIST, IIO_AVAIL_RANGE, }; enum iio_chan_info_enum { IIO_CHAN_INFO_RAW = 0, IIO_CHAN_INFO_PROCESSED, IIO_CHAN_INFO_SCALE, IIO_CHAN_INFO_OFFSET, IIO_CHAN_INFO_CALIBSCALE, IIO_CHAN_INFO_CALIBBIAS, IIO_CHAN_INFO_PEAK, IIO_CHAN_INFO_PEAK_SCALE, IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW, IIO_CHAN_INFO_AVERAGE_RAW, IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY, IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY, IIO_CHAN_INFO_SAMP_FREQ, IIO_CHAN_INFO_FREQUENCY, IIO_CHAN_INFO_PHASE, IIO_CHAN_INFO_HARDWAREGAIN, IIO_CHAN_INFO_HYSTERESIS, IIO_CHAN_INFO_INT_TIME, IIO_CHAN_INFO_ENABLE, IIO_CHAN_INFO_CALIBHEIGHT, IIO_CHAN_INFO_CALIBWEIGHT, IIO_CHAN_INFO_DEBOUNCE_COUNT, IIO_CHAN_INFO_DEBOUNCE_TIME, IIO_CHAN_INFO_CALIBEMISSIVITY, IIO_CHAN_INFO_OVERSAMPLING_RATIO, }; #endif /* _IIO_TYPES_H_ */ iio/sw_trigger.h 0000644 00000003357 14722070374 0007667 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Industrial I/O software trigger interface * * Copyright (c) 2015 Intel Corporation */ #ifndef __IIO_SW_TRIGGER #define __IIO_SW_TRIGGER #include <linux/module.h> #include <linux/device.h> #include <linux/iio/iio.h> #include <linux/configfs.h> #define module_iio_sw_trigger_driver(__iio_sw_trigger_type) \ module_driver(__iio_sw_trigger_type, iio_register_sw_trigger_type, \ iio_unregister_sw_trigger_type) struct iio_sw_trigger_ops; struct iio_sw_trigger_type { const char *name; struct module *owner; const struct iio_sw_trigger_ops *ops; struct list_head list; struct config_group *group; }; struct iio_sw_trigger { struct iio_trigger *trigger; struct iio_sw_trigger_type *trigger_type; struct config_group group; }; struct iio_sw_trigger_ops { struct iio_sw_trigger* (*probe)(const char *); int (*remove)(struct iio_sw_trigger *); }; static inline struct iio_sw_trigger *to_iio_sw_trigger(struct config_item *item) { return container_of(to_config_group(item), struct iio_sw_trigger, group); } int iio_register_sw_trigger_type(struct iio_sw_trigger_type *tt); void iio_unregister_sw_trigger_type(struct iio_sw_trigger_type *tt); struct iio_sw_trigger *iio_sw_trigger_create(const char *, const char *); void iio_sw_trigger_destroy(struct iio_sw_trigger *); int iio_sw_trigger_type_configfs_register(struct iio_sw_trigger_type *tt); void iio_sw_trigger_type_configfs_unregister(struct iio_sw_trigger_type *tt); static inline void iio_swt_group_init_type_name(struct iio_sw_trigger *t, const char *name, const struct config_item_type *type) { #if IS_ENABLED(CONFIG_CONFIGFS_FS) config_group_init_type_name(&t->group, name, type); #endif } #endif /* __IIO_SW_TRIGGER */ iio/accel/kxcjk_1013.h 0000644 00000000413 14722070374 0010326 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * KXCJK-1013 3-axis accelerometer Interface * Copyright (c) 2014, Intel Corporation. */ #ifndef __IIO_KXCJK_1013_H__ #define __IIO_KXCJK_1013_H__ struct kxcjk_1013_platform_data { bool active_high_intr; }; #endif iio/hw-consumer.h 0000644 00000001253 14722070374 0007753 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Industrial I/O in kernel hardware consumer interface * * Copyright 2017 Analog Devices Inc. * Author: Lars-Peter Clausen <lars@metafoo.de> */ #ifndef LINUX_IIO_HW_CONSUMER_H #define LINUX_IIO_HW_CONSUMER_H struct iio_hw_consumer; struct iio_hw_consumer *iio_hw_consumer_alloc(struct device *dev); void iio_hw_consumer_free(struct iio_hw_consumer *hwc); struct iio_hw_consumer *devm_iio_hw_consumer_alloc(struct device *dev); void devm_iio_hw_consumer_free(struct device *dev, struct iio_hw_consumer *hwc); int iio_hw_consumer_enable(struct iio_hw_consumer *hwc); void iio_hw_consumer_disable(struct iio_hw_consumer *hwc); #endif iio/driver.h 0000644 00000001276 14722070374 0007004 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Industrial I/O in kernel access map interface. * * Copyright (c) 2011 Jonathan Cameron */ #ifndef _IIO_INKERN_H_ #define _IIO_INKERN_H_ struct iio_dev; struct iio_map; /** * iio_map_array_register() - tell the core about inkernel consumers * @indio_dev: provider device * @map: array of mappings specifying association of channel with client */ int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *map); /** * iio_map_array_unregister() - tell the core to remove consumer mappings for * the given provider device * @indio_dev: provider device */ int iio_map_array_unregister(struct iio_dev *indio_dev); #endif iio/adc/ad_sigma_delta.h 0000644 00000013613 14722070374 0011153 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Support code for Analog Devices Sigma-Delta ADCs * * Copyright 2012 Analog Devices Inc. * Author: Lars-Peter Clausen <lars@metafoo.de> */ #ifndef __AD_SIGMA_DELTA_H__ #define __AD_SIGMA_DELTA_H__ enum ad_sigma_delta_mode { AD_SD_MODE_CONTINUOUS = 0, AD_SD_MODE_SINGLE = 1, AD_SD_MODE_IDLE = 2, AD_SD_MODE_POWERDOWN = 3, }; /** * struct ad_sigma_delta_calib_data - Calibration data for Sigma Delta devices * @mode: Calibration mode. * @channel: Calibration channel. */ struct ad_sd_calib_data { unsigned int mode; unsigned int channel; }; struct ad_sigma_delta; struct iio_dev; /** * struct ad_sigma_delta_info - Sigma Delta driver specific callbacks and options * @set_channel: Will be called to select the current channel, may be NULL. * @set_mode: Will be called to select the current mode, may be NULL. * @postprocess_sample: Is called for each sampled data word, can be used to * modify or drop the sample data, it, may be NULL. * @has_registers: true if the device has writable and readable registers, false * if there is just one read-only sample data shift register. * @addr_shift: Shift of the register address in the communications register. * @read_mask: Mask for the communications register having the read bit set. * @data_reg: Address of the data register, if 0 the default address of 0x3 will * be used. */ struct ad_sigma_delta_info { int (*set_channel)(struct ad_sigma_delta *, unsigned int channel); int (*set_mode)(struct ad_sigma_delta *, enum ad_sigma_delta_mode mode); int (*postprocess_sample)(struct ad_sigma_delta *, unsigned int raw_sample); bool has_registers; unsigned int addr_shift; unsigned int read_mask; unsigned int data_reg; }; /** * struct ad_sigma_delta - Sigma Delta device struct * @spi: The spi device associated with the Sigma Delta device. * @trig: The IIO trigger associated with the Sigma Delta device. * * Most of the fields are private to the sigma delta library code and should not * be accessed by individual drivers. */ struct ad_sigma_delta { struct spi_device *spi; struct iio_trigger *trig; /* private: */ struct completion completion; bool irq_dis; bool bus_locked; bool keep_cs_asserted; uint8_t comm; const struct ad_sigma_delta_info *info; /* * DMA (thus cache coherency maintenance) requires the * transfer buffers to live in their own cache lines. */ uint8_t data[4] ____cacheline_aligned; }; static inline int ad_sigma_delta_set_channel(struct ad_sigma_delta *sd, unsigned int channel) { if (sd->info->set_channel) return sd->info->set_channel(sd, channel); return 0; } static inline int ad_sigma_delta_set_mode(struct ad_sigma_delta *sd, unsigned int mode) { if (sd->info->set_mode) return sd->info->set_mode(sd, mode); return 0; } static inline int ad_sigma_delta_postprocess_sample(struct ad_sigma_delta *sd, unsigned int raw_sample) { if (sd->info->postprocess_sample) return sd->info->postprocess_sample(sd, raw_sample); return 0; } void ad_sd_set_comm(struct ad_sigma_delta *sigma_delta, uint8_t comm); int ad_sd_write_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg, unsigned int size, unsigned int val); int ad_sd_read_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg, unsigned int size, unsigned int *val); int ad_sd_reset(struct ad_sigma_delta *sigma_delta, unsigned int reset_length); int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, int *val); int ad_sd_calibrate_all(struct ad_sigma_delta *sigma_delta, const struct ad_sd_calib_data *cd, unsigned int n); int ad_sd_init(struct ad_sigma_delta *sigma_delta, struct iio_dev *indio_dev, struct spi_device *spi, const struct ad_sigma_delta_info *info); int ad_sd_setup_buffer_and_trigger(struct iio_dev *indio_dev); void ad_sd_cleanup_buffer_and_trigger(struct iio_dev *indio_dev); int ad_sd_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig); #define __AD_SD_CHANNEL(_si, _channel1, _channel2, _address, _bits, \ _storagebits, _shift, _extend_name, _type, _mask_all) \ { \ .type = (_type), \ .differential = (_channel2 == -1 ? 0 : 1), \ .indexed = 1, \ .channel = (_channel1), \ .channel2 = (_channel2), \ .address = (_address), \ .extend_name = (_extend_name), \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ BIT(IIO_CHAN_INFO_OFFSET), \ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ .info_mask_shared_by_all = _mask_all, \ .scan_index = (_si), \ .scan_type = { \ .sign = 'u', \ .realbits = (_bits), \ .storagebits = (_storagebits), \ .shift = (_shift), \ .endianness = IIO_BE, \ }, \ } #define AD_SD_DIFF_CHANNEL(_si, _channel1, _channel2, _address, _bits, \ _storagebits, _shift) \ __AD_SD_CHANNEL(_si, _channel1, _channel2, _address, _bits, \ _storagebits, _shift, NULL, IIO_VOLTAGE, \ BIT(IIO_CHAN_INFO_SAMP_FREQ)) #define AD_SD_SHORTED_CHANNEL(_si, _channel, _address, _bits, \ _storagebits, _shift) \ __AD_SD_CHANNEL(_si, _channel, _channel, _address, _bits, \ _storagebits, _shift, "shorted", IIO_VOLTAGE, \ BIT(IIO_CHAN_INFO_SAMP_FREQ)) #define AD_SD_CHANNEL(_si, _channel, _address, _bits, \ _storagebits, _shift) \ __AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \ _storagebits, _shift, NULL, IIO_VOLTAGE, \ BIT(IIO_CHAN_INFO_SAMP_FREQ)) #define AD_SD_CHANNEL_NO_SAMP_FREQ(_si, _channel, _address, _bits, \ _storagebits, _shift) \ __AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \ _storagebits, _shift, NULL, IIO_VOLTAGE, 0) #define AD_SD_TEMP_CHANNEL(_si, _address, _bits, _storagebits, _shift) \ __AD_SD_CHANNEL(_si, 0, -1, _address, _bits, \ _storagebits, _shift, NULL, IIO_TEMP, \ BIT(IIO_CHAN_INFO_SAMP_FREQ)) #define AD_SD_SUPPLY_CHANNEL(_si, _channel, _address, _bits, _storagebits, \ _shift) \ __AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \ _storagebits, _shift, "supply", IIO_VOLTAGE, \ BIT(IIO_CHAN_INFO_SAMP_FREQ)) #endif iio/adc/stm32-dfsdm-adc.h 0000644 00000001031 14722070374 0011015 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * This file discribe the STM32 DFSDM IIO driver API for audio part * * Copyright (C) 2017, STMicroelectronics - All Rights Reserved * Author(s): Arnaud Pouliquen <arnaud.pouliquen@st.com>. */ #ifndef STM32_DFSDM_ADC_H #define STM32_DFSDM_ADC_H #include <linux/iio/iio.h> int stm32_dfsdm_get_buff_cb(struct iio_dev *iio_dev, int (*cb)(const void *data, size_t size, void *private), void *private); int stm32_dfsdm_release_buff_cb(struct iio_dev *iio_dev); #endif iio/buffer.h 0000644 00000003126 14722070374 0006756 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* The industrial I/O core - generic buffer interfaces. * * Copyright (c) 2008 Jonathan Cameron */ #ifndef _IIO_BUFFER_GENERIC_H_ #define _IIO_BUFFER_GENERIC_H_ #include <linux/sysfs.h> #include <linux/iio/iio.h> struct iio_buffer; void iio_buffer_set_attrs(struct iio_buffer *buffer, const struct attribute **attrs); int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data); /** * iio_push_to_buffers_with_timestamp() - push data and timestamp to buffers * @indio_dev: iio_dev structure for device. * @data: sample data * @timestamp: timestamp for the sample data * * Pushes data to the IIO device's buffers. If timestamps are enabled for the * device the function will store the supplied timestamp as the last element in * the sample data buffer before pushing it to the device buffers. The sample * data buffer needs to be large enough to hold the additional timestamp * (usually the buffer should be indio->scan_bytes bytes large). * * Returns 0 on success, a negative error code otherwise. */ static inline int iio_push_to_buffers_with_timestamp(struct iio_dev *indio_dev, void *data, int64_t timestamp) { if (indio_dev->scan_timestamp) { size_t ts_offset = indio_dev->scan_bytes / sizeof(int64_t) - 1; ((int64_t *)data)[ts_offset] = timestamp; } return iio_push_to_buffers(indio_dev, data); } bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev, const unsigned long *mask); void iio_device_attach_buffer(struct iio_dev *indio_dev, struct iio_buffer *buffer); #endif /* _IIO_BUFFER_GENERIC_H_ */ iio/triggered_event.h 0000644 00000000540 14722070374 0010657 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_IIO_TRIGGERED_EVENT_H_ #define _LINUX_IIO_TRIGGERED_EVENT_H_ #include <linux/interrupt.h> int iio_triggered_event_setup(struct iio_dev *indio_dev, irqreturn_t (*h)(int irq, void *p), irqreturn_t (*thread)(int irq, void *p)); void iio_triggered_event_cleanup(struct iio_dev *indio_dev); #endif iio/buffer-dma.h 0000644 00000011115 14722070374 0007512 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2013-2015 Analog Devices Inc. * Author: Lars-Peter Clausen <lars@metafoo.de> */ #ifndef __INDUSTRIALIO_DMA_BUFFER_H__ #define __INDUSTRIALIO_DMA_BUFFER_H__ #include <linux/list.h> #include <linux/kref.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <linux/iio/buffer.h> struct iio_dma_buffer_queue; struct iio_dma_buffer_ops; struct device; struct iio_buffer_block { u32 size; u32 bytes_used; }; /** * enum iio_block_state - State of a struct iio_dma_buffer_block * @IIO_BLOCK_STATE_DEQUEUED: Block is not queued * @IIO_BLOCK_STATE_QUEUED: Block is on the incoming queue * @IIO_BLOCK_STATE_ACTIVE: Block is currently being processed by the DMA * @IIO_BLOCK_STATE_DONE: Block is on the outgoing queue * @IIO_BLOCK_STATE_DEAD: Block has been marked as to be freed */ enum iio_block_state { IIO_BLOCK_STATE_DEQUEUED, IIO_BLOCK_STATE_QUEUED, IIO_BLOCK_STATE_ACTIVE, IIO_BLOCK_STATE_DONE, IIO_BLOCK_STATE_DEAD, }; /** * struct iio_dma_buffer_block - IIO buffer block * @head: List head * @size: Total size of the block in bytes * @bytes_used: Number of bytes that contain valid data * @vaddr: Virutal address of the blocks memory * @phys_addr: Physical address of the blocks memory * @queue: Parent DMA buffer queue * @kref: kref used to manage the lifetime of block * @state: Current state of the block */ struct iio_dma_buffer_block { /* May only be accessed by the owner of the block */ struct list_head head; size_t bytes_used; /* * Set during allocation, constant thereafter. May be accessed read-only * by anybody holding a reference to the block. */ void *vaddr; dma_addr_t phys_addr; size_t size; struct iio_dma_buffer_queue *queue; /* Must not be accessed outside the core. */ struct kref kref; /* * Must not be accessed outside the core. Access needs to hold * queue->list_lock if the block is not owned by the core. */ enum iio_block_state state; }; /** * struct iio_dma_buffer_queue_fileio - FileIO state for the DMA buffer * @blocks: Buffer blocks used for fileio * @active_block: Block being used in read() * @pos: Read offset in the active block * @block_size: Size of each block */ struct iio_dma_buffer_queue_fileio { struct iio_dma_buffer_block *blocks[2]; struct iio_dma_buffer_block *active_block; size_t pos; size_t block_size; }; /** * struct iio_dma_buffer_queue - DMA buffer base structure * @buffer: IIO buffer base structure * @dev: Parent device * @ops: DMA buffer callbacks * @lock: Protects the incoming list, active and the fields in the fileio * substruct * @list_lock: Protects lists that contain blocks which can be modified in * atomic context as well as blocks on those lists. This is the outgoing queue * list and typically also a list of active blocks in the part that handles * the DMA controller * @incoming: List of buffers on the incoming queue * @outgoing: List of buffers on the outgoing queue * @active: Whether the buffer is currently active * @fileio: FileIO state */ struct iio_dma_buffer_queue { struct iio_buffer buffer; struct device *dev; const struct iio_dma_buffer_ops *ops; struct mutex lock; spinlock_t list_lock; struct list_head incoming; struct list_head outgoing; bool active; struct iio_dma_buffer_queue_fileio fileio; }; /** * struct iio_dma_buffer_ops - DMA buffer callback operations * @submit: Called when a block is submitted to the DMA controller * @abort: Should abort all pending transfers */ struct iio_dma_buffer_ops { int (*submit)(struct iio_dma_buffer_queue *queue, struct iio_dma_buffer_block *block); void (*abort)(struct iio_dma_buffer_queue *queue); }; void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block); void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue, struct list_head *list); int iio_dma_buffer_enable(struct iio_buffer *buffer, struct iio_dev *indio_dev); int iio_dma_buffer_disable(struct iio_buffer *buffer, struct iio_dev *indio_dev); int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n, char __user *user_buffer); size_t iio_dma_buffer_data_available(struct iio_buffer *buffer); int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd); int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length); int iio_dma_buffer_request_update(struct iio_buffer *buffer); int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue, struct device *dma_dev, const struct iio_dma_buffer_ops *ops); void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue); void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue); #endif iio/gyro/itg3200.h 0000644 00000010115 14722070374 0007551 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * itg3200.h -- support InvenSense ITG3200 * Digital 3-Axis Gyroscope driver * * Copyright (c) 2011 Christian Strobel <christian.strobel@iis.fraunhofer.de> * Copyright (c) 2011 Manuel Stahl <manuel.stahl@iis.fraunhofer.de> * Copyright (c) 2012 Thorsten Nowak <thorsten.nowak@iis.fraunhofer.de> */ #ifndef I2C_ITG3200_H_ #define I2C_ITG3200_H_ #include <linux/iio/iio.h> /* Register with I2C address (34h) */ #define ITG3200_REG_ADDRESS 0x00 /* Sample rate divider * Range: 0 to 255 * Default value: 0x00 */ #define ITG3200_REG_SAMPLE_RATE_DIV 0x15 /* Digital low pass filter settings */ #define ITG3200_REG_DLPF 0x16 /* DLPF full scale range */ #define ITG3200_DLPF_FS_SEL_2000 0x18 /* Bandwidth (Hz) and internal sample rate * (kHz) of DLPF */ #define ITG3200_DLPF_256_8 0x00 #define ITG3200_DLPF_188_1 0x01 #define ITG3200_DLPF_98_1 0x02 #define ITG3200_DLPF_42_1 0x03 #define ITG3200_DLPF_20_1 0x04 #define ITG3200_DLPF_10_1 0x05 #define ITG3200_DLPF_5_1 0x06 #define ITG3200_DLPF_CFG_MASK 0x07 /* Configuration for interrupt operations */ #define ITG3200_REG_IRQ_CONFIG 0x17 /* Logic level */ #define ITG3200_IRQ_ACTIVE_LOW 0x80 #define ITG3200_IRQ_ACTIVE_HIGH 0x00 /* Drive type */ #define ITG3200_IRQ_OPEN_DRAIN 0x40 #define ITG3200_IRQ_PUSH_PULL 0x00 /* Latch mode */ #define ITG3200_IRQ_LATCH_UNTIL_CLEARED 0x20 #define ITG3200_IRQ_LATCH_50US_PULSE 0x00 /* Latch clear method */ #define ITG3200_IRQ_LATCH_CLEAR_ANY 0x10 #define ITG3200_IRQ_LATCH_CLEAR_STATUS 0x00 /* Enable interrupt when device is ready */ #define ITG3200_IRQ_DEVICE_RDY_ENABLE 0x04 /* Enable interrupt when data is available */ #define ITG3200_IRQ_DATA_RDY_ENABLE 0x01 /* Determine the status of ITG-3200 interrupts */ #define ITG3200_REG_IRQ_STATUS 0x1A /* Status of 'device is ready'-interrupt */ #define ITG3200_IRQ_DEVICE_RDY_STATUS 0x04 /* Status of 'data is available'-interrupt */ #define ITG3200_IRQ_DATA_RDY_STATUS 0x01 /* Sensor registers */ #define ITG3200_REG_TEMP_OUT_H 0x1B #define ITG3200_REG_TEMP_OUT_L 0x1C #define ITG3200_REG_GYRO_XOUT_H 0x1D #define ITG3200_REG_GYRO_XOUT_L 0x1E #define ITG3200_REG_GYRO_YOUT_H 0x1F #define ITG3200_REG_GYRO_YOUT_L 0x20 #define ITG3200_REG_GYRO_ZOUT_H 0x21 #define ITG3200_REG_GYRO_ZOUT_L 0x22 /* Power management */ #define ITG3200_REG_POWER_MANAGEMENT 0x3E /* Reset device and internal registers to the * power-up-default settings */ #define ITG3200_RESET 0x80 /* Enable low power sleep mode */ #define ITG3200_SLEEP 0x40 /* Put according gyroscope in standby mode */ #define ITG3200_STANDBY_GYRO_X 0x20 #define ITG3200_STANDBY_GYRO_Y 0x10 #define ITG3200_STANDBY_GYRO_Z 0x08 /* Determine the device clock source */ #define ITG3200_CLK_INTERNAL 0x00 #define ITG3200_CLK_GYRO_X 0x01 #define ITG3200_CLK_GYRO_Y 0x02 #define ITG3200_CLK_GYRO_Z 0x03 #define ITG3200_CLK_EXT_32K 0x04 #define ITG3200_CLK_EXT_19M 0x05 /** * struct itg3200 - device instance specific data * @i2c: actual i2c_client * @trig: data ready trigger from itg3200 pin **/ struct itg3200 { struct i2c_client *i2c; struct iio_trigger *trig; struct iio_mount_matrix orientation; }; enum ITG3200_SCAN_INDEX { ITG3200_SCAN_TEMP, ITG3200_SCAN_GYRO_X, ITG3200_SCAN_GYRO_Y, ITG3200_SCAN_GYRO_Z, ITG3200_SCAN_ELEMENTS, }; int itg3200_write_reg_8(struct iio_dev *indio_dev, u8 reg_address, u8 val); int itg3200_read_reg_8(struct iio_dev *indio_dev, u8 reg_address, u8 *val); #ifdef CONFIG_IIO_BUFFER void itg3200_remove_trigger(struct iio_dev *indio_dev); int itg3200_probe_trigger(struct iio_dev *indio_dev); int itg3200_buffer_configure(struct iio_dev *indio_dev); void itg3200_buffer_unconfigure(struct iio_dev *indio_dev); #else /* CONFIG_IIO_BUFFER */ static inline void itg3200_remove_trigger(struct iio_dev *indio_dev) { } static inline int itg3200_probe_trigger(struct iio_dev *indio_dev) { return 0; } static inline int itg3200_buffer_configure(struct iio_dev *indio_dev) { return 0; } static inline void itg3200_buffer_unconfigure(struct iio_dev *indio_dev) { } #endif /* CONFIG_IIO_BUFFER */ #endif /* ITG3200_H_ */ iio/events.h 0000644 00000004146 14722070374 0007014 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* The industrial I/O - event passing to userspace * * Copyright (c) 2008-2011 Jonathan Cameron */ #ifndef _IIO_EVENTS_H_ #define _IIO_EVENTS_H_ #include <linux/iio/types.h> #include <uapi/linux/iio/events.h> /** * IIO_EVENT_CODE() - create event identifier * @chan_type: Type of the channel. Should be one of enum iio_chan_type. * @diff: Whether the event is for an differential channel or not. * @modifier: Modifier for the channel. Should be one of enum iio_modifier. * @direction: Direction of the event. One of enum iio_event_direction. * @type: Type of the event. Should be one of enum iio_event_type. * @chan: Channel number for non-differential channels. * @chan1: First channel number for differential channels. * @chan2: Second channel number for differential channels. */ #define IIO_EVENT_CODE(chan_type, diff, modifier, direction, \ type, chan, chan1, chan2) \ (((u64)type << 56) | ((u64)diff << 55) | \ ((u64)direction << 48) | ((u64)modifier << 40) | \ ((u64)chan_type << 32) | (((u16)chan2) << 16) | ((u16)chan1) | \ ((u16)chan)) /** * IIO_MOD_EVENT_CODE() - create event identifier for modified channels * @chan_type: Type of the channel. Should be one of enum iio_chan_type. * @number: Channel number. * @modifier: Modifier for the channel. Should be one of enum iio_modifier. * @type: Type of the event. Should be one of enum iio_event_type. * @direction: Direction of the event. One of enum iio_event_direction. */ #define IIO_MOD_EVENT_CODE(chan_type, number, modifier, \ type, direction) \ IIO_EVENT_CODE(chan_type, 0, modifier, direction, type, number, 0, 0) /** * IIO_UNMOD_EVENT_CODE() - create event identifier for unmodified channels * @chan_type: Type of the channel. Should be one of enum iio_chan_type. * @number: Channel number. * @type: Type of the event. Should be one of enum iio_event_type. * @direction: Direction of the event. One of enum iio_event_direction. */ #define IIO_UNMOD_EVENT_CODE(chan_type, number, type, direction) \ IIO_EVENT_CODE(chan_type, 0, 0, direction, type, number, 0, 0) #endif iio/consumer.h 0000644 00000033363 14722070374 0007346 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Industrial I/O in kernel consumer interface * * Copyright (c) 2011 Jonathan Cameron */ #ifndef _IIO_INKERN_CONSUMER_H_ #define _IIO_INKERN_CONSUMER_H_ #include <linux/types.h> #include <linux/iio/types.h> struct iio_dev; struct iio_chan_spec; struct device; /** * struct iio_channel - everything needed for a consumer to use a channel * @indio_dev: Device on which the channel exists. * @channel: Full description of the channel. * @data: Data about the channel used by consumer. */ struct iio_channel { struct iio_dev *indio_dev; const struct iio_chan_spec *channel; void *data; }; /** * iio_channel_get() - get description of all that is needed to access channel. * @dev: Pointer to consumer device. Device name must match * the name of the device as provided in the iio_map * with which the desired provider to consumer mapping * was registered. * @consumer_channel: Unique name to identify the channel on the consumer * side. This typically describes the channels use within * the consumer. E.g. 'battery_voltage' */ struct iio_channel *iio_channel_get(struct device *dev, const char *consumer_channel); /** * iio_channel_release() - release channels obtained via iio_channel_get * @chan: The channel to be released. */ void iio_channel_release(struct iio_channel *chan); /** * devm_iio_channel_get() - Resource managed version of iio_channel_get(). * @dev: Pointer to consumer device. Device name must match * the name of the device as provided in the iio_map * with which the desired provider to consumer mapping * was registered. * @consumer_channel: Unique name to identify the channel on the consumer * side. This typically describes the channels use within * the consumer. E.g. 'battery_voltage' * * Returns a pointer to negative errno if it is not able to get the iio channel * otherwise returns valid pointer for iio channel. * * The allocated iio channel is automatically released when the device is * unbound. */ struct iio_channel *devm_iio_channel_get(struct device *dev, const char *consumer_channel); /** * devm_iio_channel_release() - Resource managed version of * iio_channel_release(). * @dev: Pointer to consumer device for which resource * is allocared. * @chan: The channel to be released. */ void devm_iio_channel_release(struct device *dev, struct iio_channel *chan); /** * iio_channel_get_all() - get all channels associated with a client * @dev: Pointer to consumer device. * * Returns an array of iio_channel structures terminated with one with * null iio_dev pointer. * This function is used by fairly generic consumers to get all the * channels registered as having this consumer. */ struct iio_channel *iio_channel_get_all(struct device *dev); /** * iio_channel_release_all() - reverse iio_channel_get_all * @chan: Array of channels to be released. */ void iio_channel_release_all(struct iio_channel *chan); /** * devm_iio_channel_get_all() - Resource managed version of * iio_channel_get_all(). * @dev: Pointer to consumer device. * * Returns a pointer to negative errno if it is not able to get the iio channel * otherwise returns an array of iio_channel structures terminated with one with * null iio_dev pointer. * * This function is used by fairly generic consumers to get all the * channels registered as having this consumer. * * The allocated iio channels are automatically released when the device is * unbounded. */ struct iio_channel *devm_iio_channel_get_all(struct device *dev); /** * devm_iio_channel_release_all() - Resource managed version of * iio_channel_release_all(). * @dev: Pointer to consumer device for which resource * is allocared. * @chan: Array channel to be released. */ void devm_iio_channel_release_all(struct device *dev, struct iio_channel *chan); struct iio_cb_buffer; /** * iio_channel_get_all_cb() - register callback for triggered capture * @dev: Pointer to client device. * @cb: Callback function. * @private: Private data passed to callback. * * NB right now we have no ability to mux data from multiple devices. * So if the channels requested come from different devices this will * fail. */ struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev, int (*cb)(const void *data, void *private), void *private); /** * iio_channel_cb_set_buffer_watermark() - set the buffer watermark. * @cb_buffer: The callback buffer from whom we want the channel * information. * @watermark: buffer watermark in bytes. * * This function allows to configure the buffer watermark. */ int iio_channel_cb_set_buffer_watermark(struct iio_cb_buffer *cb_buffer, size_t watermark); /** * iio_channel_release_all_cb() - release and unregister the callback. * @cb_buffer: The callback buffer that was allocated. */ void iio_channel_release_all_cb(struct iio_cb_buffer *cb_buffer); /** * iio_channel_start_all_cb() - start the flow of data through callback. * @cb_buff: The callback buffer we are starting. */ int iio_channel_start_all_cb(struct iio_cb_buffer *cb_buff); /** * iio_channel_stop_all_cb() - stop the flow of data through the callback. * @cb_buff: The callback buffer we are stopping. */ void iio_channel_stop_all_cb(struct iio_cb_buffer *cb_buff); /** * iio_channel_cb_get_channels() - get access to the underlying channels. * @cb_buffer: The callback buffer from whom we want the channel * information. * * This function allows one to obtain information about the channels. * Whilst this may allow direct reading if all buffers are disabled, the * primary aim is to allow drivers that are consuming a channel to query * things like scaling of the channel. */ struct iio_channel *iio_channel_cb_get_channels(const struct iio_cb_buffer *cb_buffer); /** * iio_channel_cb_get_iio_dev() - get access to the underlying device. * @cb_buffer: The callback buffer from whom we want the device * information. * * This function allows one to obtain information about the device. * The primary aim is to allow drivers that are consuming a device to query * things like current trigger. */ struct iio_dev *iio_channel_cb_get_iio_dev(const struct iio_cb_buffer *cb_buffer); /** * iio_read_channel_raw() - read from a given channel * @chan: The channel being queried. * @val: Value read back. * * Note raw reads from iio channels are in adc counts and hence * scale will need to be applied if standard units required. */ int iio_read_channel_raw(struct iio_channel *chan, int *val); /** * iio_read_channel_average_raw() - read from a given channel * @chan: The channel being queried. * @val: Value read back. * * Note raw reads from iio channels are in adc counts and hence * scale will need to be applied if standard units required. * * In opposit to the normal iio_read_channel_raw this function * returns the average of multiple reads. */ int iio_read_channel_average_raw(struct iio_channel *chan, int *val); /** * iio_read_channel_processed() - read processed value from a given channel * @chan: The channel being queried. * @val: Value read back. * * Returns an error code or 0. * * This function will read a processed value from a channel. A processed value * means that this value will have the correct unit and not some device internal * representation. If the device does not support reporting a processed value * the function will query the raw value and the channels scale and offset and * do the appropriate transformation. */ int iio_read_channel_processed(struct iio_channel *chan, int *val); /** * iio_write_channel_attribute() - Write values to the device attribute. * @chan: The channel being queried. * @val: Value being written. * @val2: Value being written.val2 use depends on attribute type. * @attribute: info attribute to be read. * * Returns an error code or 0. */ int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2, enum iio_chan_info_enum attribute); /** * iio_read_channel_attribute() - Read values from the device attribute. * @chan: The channel being queried. * @val: Value being written. * @val2: Value being written.Val2 use depends on attribute type. * @attribute: info attribute to be written. * * Returns an error code if failed. Else returns a description of what is in val * and val2, such as IIO_VAL_INT_PLUS_MICRO telling us we have a value of val * + val2/1e6 */ int iio_read_channel_attribute(struct iio_channel *chan, int *val, int *val2, enum iio_chan_info_enum attribute); /** * iio_write_channel_raw() - write to a given channel * @chan: The channel being queried. * @val: Value being written. * * Note raw writes to iio channels are in dac counts and hence * scale will need to be applied if standard units required. */ int iio_write_channel_raw(struct iio_channel *chan, int val); /** * iio_read_max_channel_raw() - read maximum available raw value from a given * channel, i.e. the maximum possible value. * @chan: The channel being queried. * @val: Value read back. * * Note raw reads from iio channels are in adc counts and hence * scale will need to be applied if standard units are required. */ int iio_read_max_channel_raw(struct iio_channel *chan, int *val); /** * iio_read_avail_channel_raw() - read available raw values from a given channel * @chan: The channel being queried. * @vals: Available values read back. * @length: Number of entries in vals. * * Returns an error code, IIO_AVAIL_RANGE or IIO_AVAIL_LIST. * * For ranges, three vals are always returned; min, step and max. * For lists, all the possible values are enumerated. * * Note raw available values from iio channels are in adc counts and * hence scale will need to be applied if standard units are required. */ int iio_read_avail_channel_raw(struct iio_channel *chan, const int **vals, int *length); /** * iio_read_avail_channel_attribute() - read available channel attribute values * @chan: The channel being queried. * @vals: Available values read back. * @type: Type of values read back. * @length: Number of entries in vals. * @attribute: info attribute to be read back. * * Returns an error code, IIO_AVAIL_RANGE or IIO_AVAIL_LIST. */ int iio_read_avail_channel_attribute(struct iio_channel *chan, const int **vals, int *type, int *length, enum iio_chan_info_enum attribute); /** * iio_get_channel_type() - get the type of a channel * @channel: The channel being queried. * @type: The type of the channel. * * returns the enum iio_chan_type of the channel */ int iio_get_channel_type(struct iio_channel *channel, enum iio_chan_type *type); /** * iio_read_channel_offset() - read the offset value for a channel * @chan: The channel being queried. * @val: First part of value read back. * @val2: Second part of value read back. * * Note returns a description of what is in val and val2, such * as IIO_VAL_INT_PLUS_MICRO telling us we have a value of val * + val2/1e6 */ int iio_read_channel_offset(struct iio_channel *chan, int *val, int *val2); /** * iio_read_channel_scale() - read the scale value for a channel * @chan: The channel being queried. * @val: First part of value read back. * @val2: Second part of value read back. * * Note returns a description of what is in val and val2, such * as IIO_VAL_INT_PLUS_MICRO telling us we have a value of val * + val2/1e6 */ int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2); /** * iio_convert_raw_to_processed() - Converts a raw value to a processed value * @chan: The channel being queried * @raw: The raw IIO to convert * @processed: The result of the conversion * @scale: Scale factor to apply during the conversion * * Returns an error code or 0. * * This function converts a raw value to processed value for a specific channel. * A raw value is the device internal representation of a sample and the value * returned by iio_read_channel_raw, so the unit of that value is device * depended. A processed value on the other hand is value has a normed unit * according with the IIO specification. * * The scale factor allows to increase the precession of the returned value. For * a scale factor of 1 the function will return the result in the normal IIO * unit for the channel type. E.g. millivolt for voltage channels, if you want * nanovolts instead pass 1000000 as the scale factor. */ int iio_convert_raw_to_processed(struct iio_channel *chan, int raw, int *processed, unsigned int scale); /** * iio_get_channel_ext_info_count() - get number of ext_info attributes * connected to the channel. * @chan: The channel being queried * * Returns the number of ext_info attributes */ unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan); /** * iio_read_channel_ext_info() - read ext_info attribute from a given channel * @chan: The channel being queried. * @attr: The ext_info attribute to read. * @buf: Where to store the attribute value. Assumed to hold * at least PAGE_SIZE bytes. * * Returns the number of bytes written to buf (perhaps w/o zero termination; * it need not even be a string), or an error code. */ ssize_t iio_read_channel_ext_info(struct iio_channel *chan, const char *attr, char *buf); /** * iio_write_channel_ext_info() - write ext_info attribute from a given channel * @chan: The channel being queried. * @attr: The ext_info attribute to read. * @buf: The new attribute value. Strings needs to be zero- * terminated, but the terminator should not be included * in the below len. * @len: The size of the new attribute value. * * Returns the number of accepted bytes, which should be the same as len. * An error code can also be returned. */ ssize_t iio_write_channel_ext_info(struct iio_channel *chan, const char *attr, const char *buf, size_t len); #endif iio/dac/max517.h 0000644 00000000414 14722070374 0007253 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * MAX517 DAC driver * * Copyright 2011 Roland Stigge <stigge@antcom.de> */ #ifndef IIO_DAC_MAX517_H_ #define IIO_DAC_MAX517_H_ struct max517_platform_data { u16 vref_mv[8]; }; #endif /* IIO_DAC_MAX517_H_ */ iio/dac/ad5504.h 0000644 00000000361 14722070374 0007134 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * AD5504 SPI DAC driver * * Copyright 2011 Analog Devices Inc. */ #ifndef SPI_AD5504_H_ #define SPI_AD5504_H_ struct ad5504_platform_data { u16 vref_mv; }; #endif /* SPI_AD5504_H_ */ iio/dac/ad5791.h 0000644 00000001047 14722070374 0007146 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * AD5791 SPI DAC driver * * Copyright 2011 Analog Devices Inc. */ #ifndef SPI_AD5791_H_ #define SPI_AD5791_H_ /** * struct ad5791_platform_data - platform specific information * @vref_pos_mv: Vdd Positive Analog Supply Volatge (mV) * @vref_neg_mv: Vdd Negative Analog Supply Volatge (mV) * @use_rbuf_gain2: ext. amplifier connected in gain of two configuration */ struct ad5791_platform_data { u16 vref_pos_mv; u16 vref_neg_mv; bool use_rbuf_gain2; }; #endif /* SPI_AD5791_H_ */ iio/dac/ad5421.h 0000644 00000001510 14722070374 0007127 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __IIO_DAC_AD5421_H__ #define __IIO_DAC_AD5421_H__ /** * enum ad5421_current_range - Current range the AD5421 is configured for. * @AD5421_CURRENT_RANGE_4mA_20mA: 4 mA to 20 mA (RANGE1,0 pins = 00) * @AD5421_CURRENT_RANGE_3mA8_21mA: 3.8 mA to 21 mA (RANGE1,0 pins = x1) * @AD5421_CURRENT_RANGE_3mA2_24mA: 3.2 mA to 24 mA (RANGE1,0 pins = 10) */ enum ad5421_current_range { AD5421_CURRENT_RANGE_4mA_20mA, AD5421_CURRENT_RANGE_3mA8_21mA, AD5421_CURRENT_RANGE_3mA2_24mA, }; /** * struct ad5421_platform_data - AD5421 DAC driver platform data * @external_vref: whether an external reference voltage is used or not * @current_range: Current range the AD5421 is configured for */ struct ad5421_platform_data { bool external_vref; enum ad5421_current_range current_range; }; #endif iio/dac/mcp4725.h 0000644 00000001331 14722070374 0007331 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * MCP4725 DAC driver * * Copyright (C) 2012 Peter Meerwald <pmeerw@pmeerw.net> */ #ifndef IIO_DAC_MCP4725_H_ #define IIO_DAC_MCP4725_H_ /** * struct mcp4725_platform_data - MCP4725/6 DAC specific data. * @use_vref: Whether an external reference voltage on Vref pin should be used. * Additional vref-supply must be specified when used. * @vref_buffered: Controls buffering of the external reference voltage. * * Vref related settings are available only on MCP4756. See * Documentation/devicetree/bindings/iio/dac/mcp4725.txt for more information. */ struct mcp4725_platform_data { bool use_vref; bool vref_buffered; }; #endif /* IIO_DAC_MCP4725_H_ */ iio/magnetometer/ak8975.h 0000644 00000000666 14722070374 0011132 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __IIO_MAGNETOMETER_AK8975_H__ #define __IIO_MAGNETOMETER_AK8975_H__ #include <linux/iio/iio.h> /** * struct ak8975_platform_data - AK8975 magnetometer driver platform data * @eoc_gpio: data ready event gpio * @orientation: mounting matrix relative to main hardware */ struct ak8975_platform_data { int eoc_gpio; struct iio_mount_matrix orientation; }; #endif iio/kfifo_buf.h 0000644 00000000564 14722070374 0007442 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_IIO_KFIFO_BUF_H__ #define __LINUX_IIO_KFIFO_BUF_H__ struct iio_buffer; struct device; struct iio_buffer *iio_kfifo_allocate(void); void iio_kfifo_free(struct iio_buffer *r); struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev); void devm_iio_kfifo_free(struct device *dev, struct iio_buffer *r); #endif iio/sw_device.h 0000644 00000003310 14722070374 0007450 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Industrial I/O software device interface * * Copyright (c) 2016 Intel Corporation */ #ifndef __IIO_SW_DEVICE #define __IIO_SW_DEVICE #include <linux/module.h> #include <linux/device.h> #include <linux/iio/iio.h> #include <linux/configfs.h> #define module_iio_sw_device_driver(__iio_sw_device_type) \ module_driver(__iio_sw_device_type, iio_register_sw_device_type, \ iio_unregister_sw_device_type) struct iio_sw_device_ops; struct iio_sw_device_type { const char *name; struct module *owner; const struct iio_sw_device_ops *ops; struct list_head list; struct config_group *group; }; struct iio_sw_device { struct iio_dev *device; struct iio_sw_device_type *device_type; struct config_group group; }; struct iio_sw_device_ops { struct iio_sw_device* (*probe)(const char *); int (*remove)(struct iio_sw_device *); }; static inline struct iio_sw_device *to_iio_sw_device(struct config_item *item) { return container_of(to_config_group(item), struct iio_sw_device, group); } int iio_register_sw_device_type(struct iio_sw_device_type *dt); void iio_unregister_sw_device_type(struct iio_sw_device_type *dt); struct iio_sw_device *iio_sw_device_create(const char *, const char *); void iio_sw_device_destroy(struct iio_sw_device *); int iio_sw_device_type_configfs_register(struct iio_sw_device_type *dt); void iio_sw_device_type_configfs_unregister(struct iio_sw_device_type *dt); static inline void iio_swd_group_init_type_name(struct iio_sw_device *d, const char *name, const struct config_item_type *type) { #if IS_ENABLED(CONFIG_CONFIGFS_FS) config_group_init_type_name(&d->group, name, type); #endif } #endif /* __IIO_SW_DEVICE */ iio/common/cros_ec_sensors_core.h 0000644 00000014460 14722070374 0013201 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * ChromeOS EC sensor hub * * Copyright (C) 2016 Google, Inc */ #ifndef __CROS_EC_SENSORS_CORE_H #define __CROS_EC_SENSORS_CORE_H #include <linux/iio/iio.h> #include <linux/irqreturn.h> #include <linux/platform_data/cros_ec_commands.h> #include <linux/platform_data/cros_ec_proto.h> enum { CROS_EC_SENSOR_X, CROS_EC_SENSOR_Y, CROS_EC_SENSOR_Z, CROS_EC_SENSOR_MAX_AXIS, }; /* EC returns sensor values using signed 16 bit registers */ #define CROS_EC_SENSOR_BITS 16 /* * 4 16 bit channels are allowed. * Good enough for current sensors, they use up to 3 16 bit vectors. */ #define CROS_EC_SAMPLE_SIZE (sizeof(s64) * 2) /* Minimum sampling period to use when device is suspending */ #define CROS_EC_MIN_SUSPEND_SAMPLING_FREQUENCY 1000 /* 1 second */ /** * struct cros_ec_sensors_core_state - state data for EC sensors IIO driver * @ec: cros EC device structure * @cmd_lock: lock used to prevent simultaneous access to the * commands. * @msg: cros EC command structure * @param: motion sensor parameters structure * @resp: motion sensor response structure * @type: type of motion sensor * @loc: location where the motion sensor is placed * @calib: calibration parameters. Note that trigger * captured data will always provide the calibrated * data * @samples: static array to hold data from a single capture. * For each channel we need 2 bytes, except for * the timestamp. The timestamp is always last and * is always 8-byte aligned. * @read_ec_sensors_data: function used for accessing sensors values * @cuur_sampl_freq: current sampling period */ struct cros_ec_sensors_core_state { struct cros_ec_device *ec; struct mutex cmd_lock; struct cros_ec_command *msg; struct ec_params_motion_sense param; struct ec_response_motion_sense *resp; enum motionsensor_type type; enum motionsensor_location loc; struct calib_data { s16 offset; u16 scale; } calib[CROS_EC_SENSOR_MAX_AXIS]; s8 sign[CROS_EC_SENSOR_MAX_AXIS]; u8 samples[CROS_EC_SAMPLE_SIZE] __aligned(8); int (*read_ec_sensors_data)(struct iio_dev *indio_dev, unsigned long scan_mask, s16 *data); int curr_sampl_freq; /* Table of known available frequencies : 0, Min and Max in mHz */ int frequencies[3]; }; /** * cros_ec_sensors_read_lpc() - retrieve data from EC shared memory * @indio_dev: pointer to IIO device * @scan_mask: bitmap of the sensor indices to scan * @data: location to store data * * This is the safe function for reading the EC data. It guarantees that the * data sampled was not modified by the EC while being read. * * Return: 0 on success, -errno on failure. */ int cros_ec_sensors_read_lpc(struct iio_dev *indio_dev, unsigned long scan_mask, s16 *data); /** * cros_ec_sensors_read_cmd() - retrieve data using the EC command protocol * @indio_dev: pointer to IIO device * @scan_mask: bitmap of the sensor indices to scan * @data: location to store data * * Return: 0 on success, -errno on failure. */ int cros_ec_sensors_read_cmd(struct iio_dev *indio_dev, unsigned long scan_mask, s16 *data); struct platform_device; /** * cros_ec_sensors_core_init() - basic initialization of the core structure * @pdev: platform device created for the sensors * @indio_dev: iio device structure of the device * @physical_device: true if the device refers to a physical device * * Return: 0 on success, -errno on failure. */ int cros_ec_sensors_core_init(struct platform_device *pdev, struct iio_dev *indio_dev, bool physical_device); /** * cros_ec_sensors_capture() - the trigger handler function * @irq: the interrupt number. * @p: a pointer to the poll function. * * On a trigger event occurring, if the pollfunc is attached then this * handler is called as a threaded interrupt (and hence may sleep). It * is responsible for grabbing data from the device and pushing it into * the associated buffer. * * Return: IRQ_HANDLED */ irqreturn_t cros_ec_sensors_capture(int irq, void *p); /** * cros_ec_motion_send_host_cmd() - send motion sense host command * @st: pointer to state information for device * @opt_length: optional length to reduce the response size, useful on the data * path. Otherwise, the maximal allowed response size is used * * When called, the sub-command is assumed to be set in param->cmd. * * Return: 0 on success, -errno on failure. */ int cros_ec_motion_send_host_cmd(struct cros_ec_sensors_core_state *st, u16 opt_length); /** * cros_ec_sensors_core_read() - function to request a value from the sensor * @st: pointer to state information for device * @chan: channel specification structure table * @val: will contain one element making up the returned value * @val2: will contain another element making up the returned value * @mask: specifies which values to be requested * * Return: the type of value returned by the device */ int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st, struct iio_chan_spec const *chan, int *val, int *val2, long mask); /** * cros_ec_sensors_core_read_avail() - get available values * @indio_dev: pointer to state information for device * @chan: channel specification structure table * @vals: list of available values * @type: type of data returned * @length: number of data returned in the array * @mask: specifies which values to be requested * * Return: an error code, IIO_AVAIL_RANGE or IIO_AVAIL_LIST */ int cros_ec_sensors_core_read_avail(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, const int **vals, int *type, int *length, long mask); /** * cros_ec_sensors_core_write() - function to write a value to the sensor * @st: pointer to state information for device * @chan: channel specification structure table * @val: first part of value to write * @val2: second part of value to write * @mask: specifies which values to write * * Return: the type of value returned by the device */ int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st, struct iio_chan_spec const *chan, int val, int val2, long mask); extern const struct dev_pm_ops cros_ec_sensors_pm_ops; /* List of extended channel specification for all sensors */ extern const struct iio_chan_spec_ext_info cros_ec_sensors_ext_info[]; #endif /* __CROS_EC_SENSORS_CORE_H */ iio/common/st_sensors.h 0000644 00000022150 14722070374 0011175 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * STMicroelectronics sensors library driver * * Copyright 2012-2013 STMicroelectronics Inc. * * Denis Ciocca <denis.ciocca@st.com> */ #ifndef ST_SENSORS_H #define ST_SENSORS_H #include <linux/i2c.h> #include <linux/spi/spi.h> #include <linux/irqreturn.h> #include <linux/iio/trigger.h> #include <linux/bitops.h> #include <linux/regulator/consumer.h> #include <linux/regmap.h> #include <linux/platform_data/st_sensors_pdata.h> /* * Buffer size max case: 2bytes per channel, 3 channels in total + * 8bytes timestamp channel (s64) */ #define ST_SENSORS_MAX_BUFFER_SIZE (ALIGN(2 * 3, sizeof(s64)) + \ sizeof(s64)) #define ST_SENSORS_ODR_LIST_MAX 10 #define ST_SENSORS_FULLSCALE_AVL_MAX 10 #define ST_SENSORS_NUMBER_ALL_CHANNELS 4 #define ST_SENSORS_ENABLE_ALL_AXIS 0x07 #define ST_SENSORS_SCAN_X 0 #define ST_SENSORS_SCAN_Y 1 #define ST_SENSORS_SCAN_Z 2 #define ST_SENSORS_DEFAULT_POWER_ON_VALUE 0x01 #define ST_SENSORS_DEFAULT_POWER_OFF_VALUE 0x00 #define ST_SENSORS_DEFAULT_WAI_ADDRESS 0x0f #define ST_SENSORS_DEFAULT_AXIS_ADDR 0x20 #define ST_SENSORS_DEFAULT_AXIS_MASK 0x07 #define ST_SENSORS_DEFAULT_AXIS_N_BIT 3 #define ST_SENSORS_DEFAULT_STAT_ADDR 0x27 #define ST_SENSORS_MAX_NAME 17 #define ST_SENSORS_MAX_4WAI 8 #define ST_SENSORS_LSM_CHANNELS(device_type, mask, index, mod, \ ch2, s, endian, rbits, sbits, addr) \ { \ .type = device_type, \ .modified = mod, \ .info_mask_separate = mask, \ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \ .scan_index = index, \ .channel2 = ch2, \ .address = addr, \ .scan_type = { \ .sign = s, \ .realbits = rbits, \ .shift = sbits - rbits, \ .storagebits = sbits, \ .endianness = endian, \ }, \ } #define ST_SENSORS_DEV_ATTR_SAMP_FREQ_AVAIL() \ IIO_DEV_ATTR_SAMP_FREQ_AVAIL( \ st_sensors_sysfs_sampling_frequency_avail) #define ST_SENSORS_DEV_ATTR_SCALE_AVAIL(name) \ IIO_DEVICE_ATTR(name, S_IRUGO, \ st_sensors_sysfs_scale_avail, NULL , 0); struct st_sensor_odr_avl { unsigned int hz; u8 value; }; struct st_sensor_odr { u8 addr; u8 mask; struct st_sensor_odr_avl odr_avl[ST_SENSORS_ODR_LIST_MAX]; }; struct st_sensor_power { u8 addr; u8 mask; u8 value_off; u8 value_on; }; struct st_sensor_axis { u8 addr; u8 mask; }; struct st_sensor_fullscale_avl { unsigned int num; u8 value; unsigned int gain; unsigned int gain2; }; struct st_sensor_fullscale { u8 addr; u8 mask; struct st_sensor_fullscale_avl fs_avl[ST_SENSORS_FULLSCALE_AVL_MAX]; }; struct st_sensor_sim { u8 addr; u8 value; }; /** * struct st_sensor_bdu - ST sensor device block data update * @addr: address of the register. * @mask: mask to write the block data update flag. */ struct st_sensor_bdu { u8 addr; u8 mask; }; /** * struct st_sensor_das - ST sensor device data alignment selection * @addr: address of the register. * @mask: mask to write the das flag for left alignment. */ struct st_sensor_das { u8 addr; u8 mask; }; /** * struct st_sensor_int_drdy - ST sensor device drdy line parameters * @addr: address of INT drdy register. * @mask: mask to enable drdy line. * @addr_od: address to enable/disable Open Drain on the INT line. * @mask_od: mask to enable/disable Open Drain on the INT line. */ struct st_sensor_int_drdy { u8 addr; u8 mask; u8 addr_od; u8 mask_od; }; /** * struct st_sensor_data_ready_irq - ST sensor device data-ready interrupt * struct int1 - data-ready configuration register for INT1 pin. * struct int2 - data-ready configuration register for INT2 pin. * @addr_ihl: address to enable/disable active low on the INT lines. * @mask_ihl: mask to enable/disable active low on the INT lines. * struct stat_drdy - status register of DRDY (data ready) interrupt. * struct ig1 - represents the Interrupt Generator 1 of sensors. * @en_addr: address of the enable ig1 register. * @en_mask: mask to write the on/off value for enable. */ struct st_sensor_data_ready_irq { struct st_sensor_int_drdy int1; struct st_sensor_int_drdy int2; u8 addr_ihl; u8 mask_ihl; struct { u8 addr; u8 mask; } stat_drdy; struct { u8 en_addr; u8 en_mask; } ig1; }; /** * struct st_sensor_settings - ST specific sensor settings * @wai: Contents of WhoAmI register. * @wai_addr: The address of WhoAmI register. * @sensors_supported: List of supported sensors by struct itself. * @ch: IIO channels for the sensor. * @odr: Output data rate register and ODR list available. * @pw: Power register of the sensor. * @enable_axis: Enable one or more axis of the sensor. * @fs: Full scale register and full scale list available. * @bdu: Block data update register. * @das: Data Alignment Selection register. * @drdy_irq: Data ready register of the sensor. * @sim: SPI serial interface mode register of the sensor. * @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read. * @bootime: samples to discard when sensor passing from power-down to power-up. */ struct st_sensor_settings { u8 wai; u8 wai_addr; char sensors_supported[ST_SENSORS_MAX_4WAI][ST_SENSORS_MAX_NAME]; struct iio_chan_spec *ch; int num_ch; struct st_sensor_odr odr; struct st_sensor_power pw; struct st_sensor_axis enable_axis; struct st_sensor_fullscale fs; struct st_sensor_bdu bdu; struct st_sensor_das das; struct st_sensor_data_ready_irq drdy_irq; struct st_sensor_sim sim; bool multi_read_bit; unsigned int bootime; }; /** * struct st_sensor_data - ST sensor device status * @dev: Pointer to instance of struct device (I2C or SPI). * @trig: The trigger in use by the core driver. * @sensor_settings: Pointer to the specific sensor settings in use. * @current_fullscale: Maximum range of measure by the sensor. * @vdd: Pointer to sensor's Vdd power supply * @vdd_io: Pointer to sensor's Vdd-IO power supply * @regmap: Pointer to specific sensor regmap configuration. * @enabled: Status of the sensor (false->off, true->on). * @odr: Output data rate of the sensor [Hz]. * num_data_channels: Number of data channels used in buffer. * @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2). * @int_pin_open_drain: Set the interrupt/DRDY to open drain. * @irq: the IRQ number. * @edge_irq: the IRQ triggers on edges and need special handling. * @hw_irq_trigger: if we're using the hardware interrupt on the sensor. * @hw_timestamp: Latest timestamp from the interrupt handler, when in use. * @buffer_data: Data used by buffer part. * @odr_lock: Local lock for preventing concurrent ODR accesses/changes */ struct st_sensor_data { struct device *dev; struct iio_trigger *trig; struct iio_mount_matrix *mount_matrix; struct st_sensor_settings *sensor_settings; struct st_sensor_fullscale_avl *current_fullscale; struct regulator *vdd; struct regulator *vdd_io; struct regmap *regmap; bool enabled; unsigned int odr; unsigned int num_data_channels; u8 drdy_int_pin; bool int_pin_open_drain; int irq; bool edge_irq; bool hw_irq_trigger; s64 hw_timestamp; char buffer_data[ST_SENSORS_MAX_BUFFER_SIZE] ____cacheline_aligned; struct mutex odr_lock; }; #ifdef CONFIG_IIO_BUFFER irqreturn_t st_sensors_trigger_handler(int irq, void *p); #endif #ifdef CONFIG_IIO_TRIGGER int st_sensors_allocate_trigger(struct iio_dev *indio_dev, const struct iio_trigger_ops *trigger_ops); void st_sensors_deallocate_trigger(struct iio_dev *indio_dev); int st_sensors_validate_device(struct iio_trigger *trig, struct iio_dev *indio_dev); #else static inline int st_sensors_allocate_trigger(struct iio_dev *indio_dev, const struct iio_trigger_ops *trigger_ops) { return 0; } static inline void st_sensors_deallocate_trigger(struct iio_dev *indio_dev) { return; } #define st_sensors_validate_device NULL #endif int st_sensors_init_sensor(struct iio_dev *indio_dev, struct st_sensors_platform_data *pdata); int st_sensors_set_enable(struct iio_dev *indio_dev, bool enable); int st_sensors_set_axis_enable(struct iio_dev *indio_dev, u8 axis_enable); int st_sensors_power_enable(struct iio_dev *indio_dev); void st_sensors_power_disable(struct iio_dev *indio_dev); int st_sensors_debugfs_reg_access(struct iio_dev *indio_dev, unsigned reg, unsigned writeval, unsigned *readval); int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr); int st_sensors_set_dataready_irq(struct iio_dev *indio_dev, bool enable); int st_sensors_set_fullscale_by_gain(struct iio_dev *indio_dev, int scale); int st_sensors_read_info_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *ch, int *val); int st_sensors_get_settings_index(const char *name, const struct st_sensor_settings *list, const int list_length); int st_sensors_verify_id(struct iio_dev *indio_dev); ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev, struct device_attribute *attr, char *buf); ssize_t st_sensors_sysfs_scale_avail(struct device *dev, struct device_attribute *attr, char *buf); #ifdef CONFIG_OF void st_sensors_of_name_probe(struct device *dev, const struct of_device_id *match, char *name, int len); #else static inline void st_sensors_of_name_probe(struct device *dev, const struct of_device_id *match, char *name, int len) { } #endif #endif /* ST_SENSORS_H */ iio/common/st_sensors_spi.h 0000644 00000000660 14722070374 0012052 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * STMicroelectronics sensors spi library driver * * Copyright 2012-2013 STMicroelectronics Inc. * * Denis Ciocca <denis.ciocca@st.com> */ #ifndef ST_SENSORS_SPI_H #define ST_SENSORS_SPI_H #include <linux/spi/spi.h> #include <linux/iio/common/st_sensors.h> int st_sensors_spi_configure(struct iio_dev *indio_dev, struct spi_device *spi); #endif /* ST_SENSORS_SPI_H */ iio/common/st_sensors_i2c.h 0000644 00000001164 14722070374 0011734 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * STMicroelectronics sensors i2c library driver * * Copyright 2012-2013 STMicroelectronics Inc. * * Denis Ciocca <denis.ciocca@st.com> */ #ifndef ST_SENSORS_I2C_H #define ST_SENSORS_I2C_H #include <linux/i2c.h> #include <linux/iio/common/st_sensors.h> #include <linux/of.h> int st_sensors_i2c_configure(struct iio_dev *indio_dev, struct i2c_client *client); #ifdef CONFIG_ACPI int st_sensors_match_acpi_device(struct device *dev); #else static inline int st_sensors_match_acpi_device(struct device *dev) { return -ENODEV; } #endif #endif /* ST_SENSORS_I2C_H */ iio/common/ssp_sensors.h 0000644 00000003357 14722070374 0011364 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2014, Samsung Electronics Co. Ltd. All Rights Reserved. */ #ifndef _SSP_SENSORS_H_ #define _SSP_SENSORS_H_ #include <linux/iio/iio.h> #define SSP_TIME_SIZE 4 #define SSP_ACCELEROMETER_SIZE 6 #define SSP_GYROSCOPE_SIZE 6 #define SSP_BIO_HRM_RAW_SIZE 8 #define SSP_BIO_HRM_RAW_FAC_SIZE 36 #define SSP_BIO_HRM_LIB_SIZE 8 /** * enum ssp_sensor_type - SSP sensor type */ enum ssp_sensor_type { SSP_ACCELEROMETER_SENSOR = 0, SSP_GYROSCOPE_SENSOR, SSP_GEOMAGNETIC_UNCALIB_SENSOR, SSP_GEOMAGNETIC_RAW, SSP_GEOMAGNETIC_SENSOR, SSP_PRESSURE_SENSOR, SSP_GESTURE_SENSOR, SSP_PROXIMITY_SENSOR, SSP_TEMPERATURE_HUMIDITY_SENSOR, SSP_LIGHT_SENSOR, SSP_PROXIMITY_RAW, SSP_ORIENTATION_SENSOR, SSP_STEP_DETECTOR, SSP_SIG_MOTION_SENSOR, SSP_GYRO_UNCALIB_SENSOR, SSP_GAME_ROTATION_VECTOR, SSP_ROTATION_VECTOR, SSP_STEP_COUNTER, SSP_BIO_HRM_RAW, SSP_BIO_HRM_RAW_FAC, SSP_BIO_HRM_LIB, SSP_SENSOR_MAX, }; struct ssp_data; /** * struct ssp_sensor_data - Sensor object * @process_data: Callback to feed sensor data. * @type: Used sensor type. * @buffer: Received data buffer. */ struct ssp_sensor_data { int (*process_data)(struct iio_dev *indio_dev, void *buf, int64_t timestamp); enum ssp_sensor_type type; u8 *buffer; }; void ssp_register_consumer(struct iio_dev *indio_dev, enum ssp_sensor_type type); int ssp_enable_sensor(struct ssp_data *data, enum ssp_sensor_type type, u32 delay); int ssp_disable_sensor(struct ssp_data *data, enum ssp_sensor_type type); u32 ssp_get_sensor_delay(struct ssp_data *data, enum ssp_sensor_type); int ssp_change_delay(struct ssp_data *data, enum ssp_sensor_type type, u32 delay); #endif /* _SSP_SENSORS_H_ */ iio/configfs.h 0000644 00000000403 14722070374 0007276 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Industrial I/O configfs support * * Copyright (c) 2015 Intel Corporation */ #ifndef __IIO_CONFIGFS #define __IIO_CONFIGFS extern struct configfs_subsystem iio_configfs_subsys; #endif /* __IIO_CONFIGFS */ iio/frequency/ad9523.h 0000644 00000012543 14722070374 0010420 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * AD9523 SPI Low Jitter Clock Generator * * Copyright 2012 Analog Devices Inc. */ #ifndef IIO_FREQUENCY_AD9523_H_ #define IIO_FREQUENCY_AD9523_H_ enum outp_drv_mode { TRISTATE, LVPECL_8mA, LVDS_4mA, LVDS_7mA, HSTL0_16mA, HSTL1_8mA, CMOS_CONF1, CMOS_CONF2, CMOS_CONF3, CMOS_CONF4, CMOS_CONF5, CMOS_CONF6, CMOS_CONF7, CMOS_CONF8, CMOS_CONF9 }; enum ref_sel_mode { NONEREVERTIVE_STAY_ON_REFB, REVERT_TO_REFA, SELECT_REFA, SELECT_REFB, EXT_REF_SEL }; /** * struct ad9523_channel_spec - Output channel configuration * * @channel_num: Output channel number. * @divider_output_invert_en: Invert the polarity of the output clock. * @sync_ignore_en: Ignore chip-level SYNC signal. * @low_power_mode_en: Reduce power used in the differential output modes. * @use_alt_clock_src: Channel divider uses alternative clk source. * @output_dis: Disables, powers down the entire channel. * @driver_mode: Output driver mode (logic level family). * @divider_phase: Divider initial phase after a SYNC. Range 0..63 LSB = 1/2 of a period of the divider input clock. * @channel_divider: 10-bit channel divider. * @extended_name: Optional descriptive channel name. */ struct ad9523_channel_spec { unsigned channel_num; bool divider_output_invert_en; bool sync_ignore_en; bool low_power_mode_en; /* CH0..CH3 VCXO, CH4..CH9 VCO2 */ bool use_alt_clock_src; bool output_dis; enum outp_drv_mode driver_mode; unsigned char divider_phase; unsigned short channel_divider; char extended_name[16]; }; enum pll1_rzero_resistor { RZERO_883_OHM, RZERO_677_OHM, RZERO_341_OHM, RZERO_135_OHM, RZERO_10_OHM, RZERO_USE_EXT_RES = 8, }; enum rpole2_resistor { RPOLE2_900_OHM, RPOLE2_450_OHM, RPOLE2_300_OHM, RPOLE2_225_OHM, }; enum rzero_resistor { RZERO_3250_OHM, RZERO_2750_OHM, RZERO_2250_OHM, RZERO_2100_OHM, RZERO_3000_OHM, RZERO_2500_OHM, RZERO_2000_OHM, RZERO_1850_OHM, }; enum cpole1_capacitor { CPOLE1_0_PF, CPOLE1_8_PF, CPOLE1_16_PF, CPOLE1_24_PF, _CPOLE1_24_PF, /* place holder */ CPOLE1_32_PF, CPOLE1_40_PF, CPOLE1_48_PF, }; /** * struct ad9523_platform_data - platform specific information * * @vcxo_freq: External VCXO frequency in Hz * @refa_diff_rcv_en: REFA differential/single-ended input selection. * @refb_diff_rcv_en: REFB differential/single-ended input selection. * @zd_in_diff_en: Zero Delay differential/single-ended input selection. * @osc_in_diff_en: OSC differential/ single-ended input selection. * @refa_cmos_neg_inp_en: REFA single-ended neg./pos. input enable. * @refb_cmos_neg_inp_en: REFB single-ended neg./pos. input enable. * @zd_in_cmos_neg_inp_en: Zero Delay single-ended neg./pos. input enable. * @osc_in_cmos_neg_inp_en: OSC single-ended neg./pos. input enable. * @refa_r_div: PLL1 10-bit REFA R divider. * @refb_r_div: PLL1 10-bit REFB R divider. * @pll1_feedback_div: PLL1 10-bit Feedback N divider. * @pll1_charge_pump_current_nA: Magnitude of PLL1 charge pump current (nA). * @zero_delay_mode_internal_en: Internal, external Zero Delay mode selection. * @osc_in_feedback_en: PLL1 feedback path, local feedback from * the OSC_IN receiver or zero delay mode * @pll1_loop_filter_rzero: PLL1 Loop Filter Zero Resistor selection. * @ref_mode: Reference selection mode. * @pll2_charge_pump_current_nA: Magnitude of PLL2 charge pump current (nA). * @pll2_ndiv_a_cnt: PLL2 Feedback N-divider, A Counter, range 0..4. * @pll2_ndiv_b_cnt: PLL2 Feedback N-divider, B Counter, range 0..63. * @pll2_freq_doubler_en: PLL2 frequency doubler enable. * @pll2_r2_div: PLL2 R2 divider, range 0..31. * @pll2_vco_div_m1: VCO1 divider, range 3..5. * @pll2_vco_div_m2: VCO2 divider, range 3..5. * @rpole2: PLL2 loop filter Rpole resistor value. * @rzero: PLL2 loop filter Rzero resistor value. * @cpole1: PLL2 loop filter Cpole capacitor value. * @rzero_bypass_en: PLL2 loop filter Rzero bypass enable. * @num_channels: Array size of struct ad9523_channel_spec. * @channels: Pointer to channel array. * @name: Optional alternative iio device name. */ struct ad9523_platform_data { unsigned long vcxo_freq; /* Differential/ Single-Ended Input Configuration */ bool refa_diff_rcv_en; bool refb_diff_rcv_en; bool zd_in_diff_en; bool osc_in_diff_en; /* * Valid if differential input disabled * if false defaults to pos input */ bool refa_cmos_neg_inp_en; bool refb_cmos_neg_inp_en; bool zd_in_cmos_neg_inp_en; bool osc_in_cmos_neg_inp_en; /* PLL1 Setting */ unsigned short refa_r_div; unsigned short refb_r_div; unsigned short pll1_feedback_div; unsigned short pll1_charge_pump_current_nA; bool zero_delay_mode_internal_en; bool osc_in_feedback_en; enum pll1_rzero_resistor pll1_loop_filter_rzero; /* Reference */ enum ref_sel_mode ref_mode; /* PLL2 Setting */ unsigned int pll2_charge_pump_current_nA; unsigned char pll2_ndiv_a_cnt; unsigned char pll2_ndiv_b_cnt; bool pll2_freq_doubler_en; unsigned char pll2_r2_div; unsigned char pll2_vco_div_m1; /* 3..5 */ unsigned char pll2_vco_div_m2; /* 3..5 */ /* Loop Filter PLL2 */ enum rpole2_resistor rpole2; enum rzero_resistor rzero; enum cpole1_capacitor cpole1; bool rzero_bypass_en; /* Output Channel Configuration */ int num_channels; struct ad9523_channel_spec *channels; char name[SPI_NAME_SIZE]; }; #endif /* IIO_FREQUENCY_AD9523_H_ */ iio/frequency/adf4350.h 0000644 00000010660 14722070374 0010555 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * ADF4350/ADF4351 SPI PLL driver * * Copyright 2012-2013 Analog Devices Inc. */ #ifndef IIO_PLL_ADF4350_H_ #define IIO_PLL_ADF4350_H_ /* Registers */ #define ADF4350_REG0 0 #define ADF4350_REG1 1 #define ADF4350_REG2 2 #define ADF4350_REG3 3 #define ADF4350_REG4 4 #define ADF4350_REG5 5 /* REG0 Bit Definitions */ #define ADF4350_REG0_FRACT(x) (((x) & 0xFFF) << 3) #define ADF4350_REG0_INT(x) (((x) & 0xFFFF) << 15) /* REG1 Bit Definitions */ #define ADF4350_REG1_MOD(x) (((x) & 0xFFF) << 3) #define ADF4350_REG1_PHASE(x) (((x) & 0xFFF) << 15) #define ADF4350_REG1_PRESCALER (1 << 27) /* REG2 Bit Definitions */ #define ADF4350_REG2_COUNTER_RESET_EN (1 << 3) #define ADF4350_REG2_CP_THREESTATE_EN (1 << 4) #define ADF4350_REG2_POWER_DOWN_EN (1 << 5) #define ADF4350_REG2_PD_POLARITY_POS (1 << 6) #define ADF4350_REG2_LDP_6ns (1 << 7) #define ADF4350_REG2_LDP_10ns (0 << 7) #define ADF4350_REG2_LDF_FRACT_N (0 << 8) #define ADF4350_REG2_LDF_INT_N (1 << 8) #define ADF4350_REG2_CHARGE_PUMP_CURR_uA(x) (((((x)-312) / 312) & 0xF) << 9) #define ADF4350_REG2_DOUBLE_BUFF_EN (1 << 13) #define ADF4350_REG2_10BIT_R_CNT(x) ((x) << 14) #define ADF4350_REG2_RDIV2_EN (1 << 24) #define ADF4350_REG2_RMULT2_EN (1 << 25) #define ADF4350_REG2_MUXOUT(x) ((x) << 26) #define ADF4350_REG2_NOISE_MODE(x) (((unsigned)(x)) << 29) #define ADF4350_MUXOUT_THREESTATE 0 #define ADF4350_MUXOUT_DVDD 1 #define ADF4350_MUXOUT_GND 2 #define ADF4350_MUXOUT_R_DIV_OUT 3 #define ADF4350_MUXOUT_N_DIV_OUT 4 #define ADF4350_MUXOUT_ANALOG_LOCK_DETECT 5 #define ADF4350_MUXOUT_DIGITAL_LOCK_DETECT 6 /* REG3 Bit Definitions */ #define ADF4350_REG3_12BIT_CLKDIV(x) ((x) << 3) #define ADF4350_REG3_12BIT_CLKDIV_MODE(x) ((x) << 16) #define ADF4350_REG3_12BIT_CSR_EN (1 << 18) #define ADF4351_REG3_CHARGE_CANCELLATION_EN (1 << 21) #define ADF4351_REG3_ANTI_BACKLASH_3ns_EN (1 << 22) #define ADF4351_REG3_BAND_SEL_CLOCK_MODE_HIGH (1 << 23) /* REG4 Bit Definitions */ #define ADF4350_REG4_OUTPUT_PWR(x) ((x) << 3) #define ADF4350_REG4_RF_OUT_EN (1 << 5) #define ADF4350_REG4_AUX_OUTPUT_PWR(x) ((x) << 6) #define ADF4350_REG4_AUX_OUTPUT_EN (1 << 8) #define ADF4350_REG4_AUX_OUTPUT_FUND (1 << 9) #define ADF4350_REG4_AUX_OUTPUT_DIV (0 << 9) #define ADF4350_REG4_MUTE_TILL_LOCK_EN (1 << 10) #define ADF4350_REG4_VCO_PWRDOWN_EN (1 << 11) #define ADF4350_REG4_8BIT_BAND_SEL_CLKDIV(x) ((x) << 12) #define ADF4350_REG4_RF_DIV_SEL(x) ((x) << 20) #define ADF4350_REG4_FEEDBACK_DIVIDED (0 << 23) #define ADF4350_REG4_FEEDBACK_FUND (1 << 23) /* REG5 Bit Definitions */ #define ADF4350_REG5_LD_PIN_MODE_LOW (0 << 22) #define ADF4350_REG5_LD_PIN_MODE_DIGITAL (1 << 22) #define ADF4350_REG5_LD_PIN_MODE_HIGH (3 << 22) /* Specifications */ #define ADF4350_MAX_OUT_FREQ 4400000000ULL /* Hz */ #define ADF4350_MIN_OUT_FREQ 137500000 /* Hz */ #define ADF4351_MIN_OUT_FREQ 34375000 /* Hz */ #define ADF4350_MIN_VCO_FREQ 2200000000ULL /* Hz */ #define ADF4350_MAX_FREQ_45_PRESC 3000000000ULL /* Hz */ #define ADF4350_MAX_FREQ_PFD 32000000 /* Hz */ #define ADF4350_MAX_BANDSEL_CLK 125000 /* Hz */ #define ADF4350_MAX_FREQ_REFIN 250000000 /* Hz */ #define ADF4350_MAX_MODULUS 4095 #define ADF4350_MAX_R_CNT 1023 /** * struct adf4350_platform_data - platform specific information * @name: Optional device name. * @clkin: REFin frequency in Hz. * @channel_spacing: Channel spacing in Hz (influences MODULUS). * @power_up_frequency: Optional, If set in Hz the PLL tunes to the desired * frequency on probe. * @ref_div_factor: Optional, if set the driver skips dynamic calculation * and uses this default value instead. * @ref_doubler_en: Enables reference doubler. * @ref_div2_en: Enables reference divider. * @r2_user_settings: User defined settings for ADF4350/1 REGISTER_2. * @r3_user_settings: User defined settings for ADF4350/1 REGISTER_3. * @r4_user_settings: User defined settings for ADF4350/1 REGISTER_4. * @gpio_lock_detect: Optional, if set with a valid GPIO number, * pll lock state is tested upon read. * If not used - set to -1. */ struct adf4350_platform_data { char name[32]; unsigned long clkin; unsigned long channel_spacing; unsigned long long power_up_frequency; unsigned short ref_div_factor; /* 10-bit R counter */ bool ref_doubler_en; bool ref_div2_en; unsigned r2_user_settings; unsigned r3_user_settings; unsigned r4_user_settings; int gpio_lock_detect; }; #endif /* IIO_PLL_ADF4350_H_ */ iio/triggered_buffer.h 0000644 00000001415 14722070374 0011011 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_IIO_TRIGGERED_BUFFER_H_ #define _LINUX_IIO_TRIGGERED_BUFFER_H_ #include <linux/interrupt.h> struct iio_dev; struct iio_buffer_setup_ops; int iio_triggered_buffer_setup(struct iio_dev *indio_dev, irqreturn_t (*h)(int irq, void *p), irqreturn_t (*thread)(int irq, void *p), const struct iio_buffer_setup_ops *setup_ops); void iio_triggered_buffer_cleanup(struct iio_dev *indio_dev); int devm_iio_triggered_buffer_setup(struct device *dev, struct iio_dev *indio_dev, irqreturn_t (*h)(int irq, void *p), irqreturn_t (*thread)(int irq, void *p), const struct iio_buffer_setup_ops *ops); void devm_iio_triggered_buffer_cleanup(struct device *dev, struct iio_dev *indio_dev); #endif iio/trigger.h 0000644 00000012334 14722070374 0007151 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* The industrial I/O core, trigger handling functions * * Copyright (c) 2008 Jonathan Cameron */ #include <linux/irq.h> #include <linux/module.h> #include <linux/atomic.h> #ifndef _IIO_TRIGGER_H_ #define _IIO_TRIGGER_H_ #ifdef CONFIG_IIO_TRIGGER struct iio_subirq { bool enabled; }; struct iio_dev; struct iio_trigger; /** * struct iio_trigger_ops - operations structure for an iio_trigger. * @set_trigger_state: switch on/off the trigger on demand * @try_reenable: function to reenable the trigger when the * use count is zero (may be NULL) * @validate_device: function to validate the device when the * current trigger gets changed. * * This is typically static const within a driver and shared by * instances of a given device. **/ struct iio_trigger_ops { int (*set_trigger_state)(struct iio_trigger *trig, bool state); int (*try_reenable)(struct iio_trigger *trig); int (*validate_device)(struct iio_trigger *trig, struct iio_dev *indio_dev); }; /** * struct iio_trigger - industrial I/O trigger device * @ops: [DRIVER] operations structure * @owner: [INTERN] owner of this driver module * @id: [INTERN] unique id number * @name: [DRIVER] unique name * @dev: [DRIVER] associated device (if relevant) * @list: [INTERN] used in maintenance of global trigger list * @alloc_list: [DRIVER] used for driver specific trigger list * @use_count: [INTERN] use count for the trigger. * @subirq_chip: [INTERN] associate 'virtual' irq chip. * @subirq_base: [INTERN] base number for irqs provided by trigger. * @subirqs: [INTERN] information about the 'child' irqs. * @pool: [INTERN] bitmap of irqs currently in use. * @pool_lock: [INTERN] protection of the irq pool. * @attached_own_device:[INTERN] if we are using our own device as trigger, * i.e. if we registered a poll function to the same * device as the one providing the trigger. **/ struct iio_trigger { const struct iio_trigger_ops *ops; struct module *owner; int id; const char *name; struct device dev; struct list_head list; struct list_head alloc_list; atomic_t use_count; struct irq_chip subirq_chip; int subirq_base; struct iio_subirq subirqs[CONFIG_IIO_CONSUMERS_PER_TRIGGER]; unsigned long pool[BITS_TO_LONGS(CONFIG_IIO_CONSUMERS_PER_TRIGGER)]; struct mutex pool_lock; bool attached_own_device; }; static inline struct iio_trigger *to_iio_trigger(struct device *d) { return container_of(d, struct iio_trigger, dev); } static inline void iio_trigger_put(struct iio_trigger *trig) { module_put(trig->owner); put_device(&trig->dev); } static inline struct iio_trigger *iio_trigger_get(struct iio_trigger *trig) { get_device(&trig->dev); __module_get(trig->owner); return trig; } /** * iio_device_set_drvdata() - Set trigger driver data * @trig: IIO trigger structure * @data: Driver specific data * * Allows to attach an arbitrary pointer to an IIO trigger, which can later be * retrieved by iio_trigger_get_drvdata(). */ static inline void iio_trigger_set_drvdata(struct iio_trigger *trig, void *data) { dev_set_drvdata(&trig->dev, data); } /** * iio_trigger_get_drvdata() - Get trigger driver data * @trig: IIO trigger structure * * Returns the data previously set with iio_trigger_set_drvdata() */ static inline void *iio_trigger_get_drvdata(struct iio_trigger *trig) { return dev_get_drvdata(&trig->dev); } /** * iio_trigger_register() - register a trigger with the IIO core * @trig_info: trigger to be registered **/ #define iio_trigger_register(trig_info) \ __iio_trigger_register((trig_info), THIS_MODULE) int __iio_trigger_register(struct iio_trigger *trig_info, struct module *this_mod); #define devm_iio_trigger_register(dev, trig_info) \ __devm_iio_trigger_register((dev), (trig_info), THIS_MODULE) int __devm_iio_trigger_register(struct device *dev, struct iio_trigger *trig_info, struct module *this_mod); /** * iio_trigger_unregister() - unregister a trigger from the core * @trig_info: trigger to be unregistered **/ void iio_trigger_unregister(struct iio_trigger *trig_info); void devm_iio_trigger_unregister(struct device *dev, struct iio_trigger *trig_info); /** * iio_trigger_set_immutable() - set an immutable trigger on destination * * @indio_dev: IIO device structure containing the device * @trig: trigger to assign to device * **/ int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig); /** * iio_trigger_poll() - called on a trigger occurring * @trig: trigger which occurred * * Typically called in relevant hardware interrupt handler. **/ void iio_trigger_poll(struct iio_trigger *trig); void iio_trigger_poll_chained(struct iio_trigger *trig); irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private); __printf(1, 2) struct iio_trigger *iio_trigger_alloc(const char *fmt, ...); void iio_trigger_free(struct iio_trigger *trig); /** * iio_trigger_using_own() - tells us if we use our own HW trigger ourselves * @indio_dev: device to check */ bool iio_trigger_using_own(struct iio_dev *indio_dev); int iio_trigger_validate_own_device(struct iio_trigger *trig, struct iio_dev *indio_dev); #else struct iio_trigger; struct iio_trigger_ops; #endif #endif /* _IIO_TRIGGER_H_ */ iio/imu/adis.h 0000644 00000016644 14722070374 0007230 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Common library for ADIS16XXX devices * * Copyright 2012 Analog Devices Inc. * Author: Lars-Peter Clausen <lars@metafoo.de> */ #ifndef __IIO_ADIS_H__ #define __IIO_ADIS_H__ #include <linux/spi/spi.h> #include <linux/interrupt.h> #include <linux/iio/types.h> #define ADIS_WRITE_REG(reg) ((0x80 | (reg))) #define ADIS_READ_REG(reg) ((reg) & 0x7f) #define ADIS_PAGE_SIZE 0x80 #define ADIS_REG_PAGE_ID 0x00 struct adis; struct adis_burst; /** * struct adis_data - ADIS chip variant specific data * @read_delay: SPI delay for read operations in us * @write_delay: SPI delay for write operations in us * @cs_change_delay: SPI delay between CS changes in us * @glob_cmd_reg: Register address of the GLOB_CMD register * @msc_ctrl_reg: Register address of the MSC_CTRL register * @diag_stat_reg: Register address of the DIAG_STAT register * @status_error_msgs: Array of error messgaes * @status_error_mask: */ struct adis_data { unsigned int read_delay; unsigned int write_delay; unsigned int cs_change_delay; unsigned int glob_cmd_reg; unsigned int msc_ctrl_reg; unsigned int diag_stat_reg; unsigned int self_test_mask; bool self_test_no_autoclear; unsigned int startup_delay; const char * const *status_error_msgs; unsigned int status_error_mask; int (*enable_irq)(struct adis *adis, bool enable); bool has_paging; }; struct adis { struct spi_device *spi; struct iio_trigger *trig; const struct adis_data *data; struct adis_burst *burst; struct mutex txrx_lock; struct spi_message msg; struct spi_transfer *xfer; unsigned int current_page; void *buffer; uint8_t tx[10] ____cacheline_aligned; uint8_t rx[4]; }; int adis_init(struct adis *adis, struct iio_dev *indio_dev, struct spi_device *spi, const struct adis_data *data); int adis_reset(struct adis *adis); int adis_write_reg(struct adis *adis, unsigned int reg, unsigned int val, unsigned int size); int adis_read_reg(struct adis *adis, unsigned int reg, unsigned int *val, unsigned int size); /** * adis_write_reg_8() - Write single byte to a register * @adis: The adis device * @reg: The address of the register to be written * @value: The value to write */ static inline int adis_write_reg_8(struct adis *adis, unsigned int reg, uint8_t val) { return adis_write_reg(adis, reg, val, 1); } /** * adis_write_reg_16() - Write 2 bytes to a pair of registers * @adis: The adis device * @reg: The address of the lower of the two registers * @value: Value to be written */ static inline int adis_write_reg_16(struct adis *adis, unsigned int reg, uint16_t val) { return adis_write_reg(adis, reg, val, 2); } /** * adis_write_reg_32() - write 4 bytes to four registers * @adis: The adis device * @reg: The address of the lower of the four register * @value: Value to be written */ static inline int adis_write_reg_32(struct adis *adis, unsigned int reg, uint32_t val) { return adis_write_reg(adis, reg, val, 4); } /** * adis_read_reg_16() - read 2 bytes from a 16-bit register * @adis: The adis device * @reg: The address of the lower of the two registers * @val: The value read back from the device */ static inline int adis_read_reg_16(struct adis *adis, unsigned int reg, uint16_t *val) { unsigned int tmp; int ret; ret = adis_read_reg(adis, reg, &tmp, 2); *val = tmp; return ret; } /** * adis_read_reg_32() - read 4 bytes from a 32-bit register * @adis: The adis device * @reg: The address of the lower of the two registers * @val: The value read back from the device */ static inline int adis_read_reg_32(struct adis *adis, unsigned int reg, uint32_t *val) { unsigned int tmp; int ret; ret = adis_read_reg(adis, reg, &tmp, 4); *val = tmp; return ret; } int adis_enable_irq(struct adis *adis, bool enable); int adis_check_status(struct adis *adis); int adis_initial_startup(struct adis *adis); int adis_single_conversion(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, unsigned int error_mask, int *val); #define ADIS_VOLTAGE_CHAN(addr, si, chan, name, info_all, bits) { \ .type = IIO_VOLTAGE, \ .indexed = 1, \ .channel = (chan), \ .extend_name = name, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ BIT(IIO_CHAN_INFO_SCALE), \ .info_mask_shared_by_all = info_all, \ .address = (addr), \ .scan_index = (si), \ .scan_type = { \ .sign = 'u', \ .realbits = (bits), \ .storagebits = 16, \ .endianness = IIO_BE, \ }, \ } #define ADIS_SUPPLY_CHAN(addr, si, info_all, bits) \ ADIS_VOLTAGE_CHAN(addr, si, 0, "supply", info_all, bits) #define ADIS_AUX_ADC_CHAN(addr, si, info_all, bits) \ ADIS_VOLTAGE_CHAN(addr, si, 1, NULL, info_all, bits) #define ADIS_TEMP_CHAN(addr, si, info_all, bits) { \ .type = IIO_TEMP, \ .indexed = 1, \ .channel = 0, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ BIT(IIO_CHAN_INFO_SCALE) | \ BIT(IIO_CHAN_INFO_OFFSET), \ .info_mask_shared_by_all = info_all, \ .address = (addr), \ .scan_index = (si), \ .scan_type = { \ .sign = 'u', \ .realbits = (bits), \ .storagebits = 16, \ .endianness = IIO_BE, \ }, \ } #define ADIS_MOD_CHAN(_type, mod, addr, si, info_sep, info_all, bits) { \ .type = (_type), \ .modified = 1, \ .channel2 = IIO_MOD_ ## mod, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ info_sep, \ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ .info_mask_shared_by_all = info_all, \ .address = (addr), \ .scan_index = (si), \ .scan_type = { \ .sign = 's', \ .realbits = (bits), \ .storagebits = 16, \ .endianness = IIO_BE, \ }, \ } #define ADIS_ACCEL_CHAN(mod, addr, si, info_sep, info_all, bits) \ ADIS_MOD_CHAN(IIO_ACCEL, mod, addr, si, info_sep, info_all, bits) #define ADIS_GYRO_CHAN(mod, addr, si, info_sep, info_all, bits) \ ADIS_MOD_CHAN(IIO_ANGL_VEL, mod, addr, si, info_sep, info_all, bits) #define ADIS_INCLI_CHAN(mod, addr, si, info_sep, info_all, bits) \ ADIS_MOD_CHAN(IIO_INCLI, mod, addr, si, info_sep, info_all, bits) #define ADIS_ROT_CHAN(mod, addr, si, info_sep, info_all, bits) \ ADIS_MOD_CHAN(IIO_ROT, mod, addr, si, info_sep, info_all, bits) #ifdef CONFIG_IIO_ADIS_LIB_BUFFER /** * struct adis_burst - ADIS data for burst transfers * @en burst mode enabled * @reg_cmd register command that triggers burst * @extra_len extra length to account in the SPI RX buffer */ struct adis_burst { bool en; unsigned int reg_cmd; unsigned int extra_len; }; int adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev, irqreturn_t (*trigger_handler)(int, void *)); void adis_cleanup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev); int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev); void adis_remove_trigger(struct adis *adis); int adis_update_scan_mode(struct iio_dev *indio_dev, const unsigned long *scan_mask); #else /* CONFIG_IIO_BUFFER */ static inline int adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev, irqreturn_t (*trigger_handler)(int, void *)) { return 0; } static inline void adis_cleanup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev) { } static inline int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev) { return 0; } static inline void adis_remove_trigger(struct adis *adis) { } #define adis_update_scan_mode NULL #endif /* CONFIG_IIO_BUFFER */ #ifdef CONFIG_DEBUG_FS int adis_debugfs_reg_access(struct iio_dev *indio_dev, unsigned int reg, unsigned int writeval, unsigned int *readval); #else #define adis_debugfs_reg_access NULL #endif #endif iio/timer/stm32-timer-trigger.h 0000644 00000003577 14722070374 0012366 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) STMicroelectronics 2016 * * Author: Benjamin Gaignard <benjamin.gaignard@st.com> */ #ifndef _STM32_TIMER_TRIGGER_H_ #define _STM32_TIMER_TRIGGER_H_ #define TIM1_TRGO "tim1_trgo" #define TIM1_TRGO2 "tim1_trgo2" #define TIM1_CH1 "tim1_ch1" #define TIM1_CH2 "tim1_ch2" #define TIM1_CH3 "tim1_ch3" #define TIM1_CH4 "tim1_ch4" #define TIM2_TRGO "tim2_trgo" #define TIM2_CH1 "tim2_ch1" #define TIM2_CH2 "tim2_ch2" #define TIM2_CH3 "tim2_ch3" #define TIM2_CH4 "tim2_ch4" #define TIM3_TRGO "tim3_trgo" #define TIM3_CH1 "tim3_ch1" #define TIM3_CH2 "tim3_ch2" #define TIM3_CH3 "tim3_ch3" #define TIM3_CH4 "tim3_ch4" #define TIM4_TRGO "tim4_trgo" #define TIM4_CH1 "tim4_ch1" #define TIM4_CH2 "tim4_ch2" #define TIM4_CH3 "tim4_ch3" #define TIM4_CH4 "tim4_ch4" #define TIM5_TRGO "tim5_trgo" #define TIM5_CH1 "tim5_ch1" #define TIM5_CH2 "tim5_ch2" #define TIM5_CH3 "tim5_ch3" #define TIM5_CH4 "tim5_ch4" #define TIM6_TRGO "tim6_trgo" #define TIM7_TRGO "tim7_trgo" #define TIM8_TRGO "tim8_trgo" #define TIM8_TRGO2 "tim8_trgo2" #define TIM8_CH1 "tim8_ch1" #define TIM8_CH2 "tim8_ch2" #define TIM8_CH3 "tim8_ch3" #define TIM8_CH4 "tim8_ch4" #define TIM9_TRGO "tim9_trgo" #define TIM9_CH1 "tim9_ch1" #define TIM9_CH2 "tim9_ch2" #define TIM10_OC1 "tim10_oc1" #define TIM11_OC1 "tim11_oc1" #define TIM12_TRGO "tim12_trgo" #define TIM12_CH1 "tim12_ch1" #define TIM12_CH2 "tim12_ch2" #define TIM13_OC1 "tim13_oc1" #define TIM14_OC1 "tim14_oc1" #define TIM15_TRGO "tim15_trgo" #define TIM16_OC1 "tim16_oc1" #define TIM17_OC1 "tim17_oc1" #if IS_REACHABLE(CONFIG_IIO_STM32_TIMER_TRIGGER) bool is_stm32_timer_trigger(struct iio_trigger *trig); #else static inline bool is_stm32_timer_trigger(struct iio_trigger *trig) { #if IS_ENABLED(CONFIG_IIO_STM32_TIMER_TRIGGER) pr_warn_once("stm32-timer-trigger not linked in\n"); #endif return false; } #endif #endif iio/timer/stm32-lptim-trigger.h 0000644 00000001264 14722070374 0012362 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) STMicroelectronics 2017 * * Author: Fabrice Gasnier <fabrice.gasnier@st.com> */ #ifndef _STM32_LPTIM_TRIGGER_H_ #define _STM32_LPTIM_TRIGGER_H_ #include <linux/iio/iio.h> #include <linux/iio/trigger.h> #define LPTIM1_OUT "lptim1_out" #define LPTIM2_OUT "lptim2_out" #define LPTIM3_OUT "lptim3_out" #if IS_REACHABLE(CONFIG_IIO_STM32_LPTIMER_TRIGGER) bool is_stm32_lptim_trigger(struct iio_trigger *trig); #else static inline bool is_stm32_lptim_trigger(struct iio_trigger *trig) { #if IS_ENABLED(CONFIG_IIO_STM32_LPTIMER_TRIGGER) pr_warn_once("stm32 lptim_trigger not linked in\n"); #endif return false; } #endif #endif iio/iio.h 0000644 00000064446 14722070374 0006301 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* The industrial I/O core * * Copyright (c) 2008 Jonathan Cameron */ #ifndef _INDUSTRIAL_IO_H_ #define _INDUSTRIAL_IO_H_ #include <linux/device.h> #include <linux/cdev.h> #include <linux/iio/types.h> #include <linux/of.h> /* IIO TODO LIST */ /* * Provide means of adjusting timer accuracy. * Currently assumes nano seconds. */ enum iio_shared_by { IIO_SEPARATE, IIO_SHARED_BY_TYPE, IIO_SHARED_BY_DIR, IIO_SHARED_BY_ALL }; enum iio_endian { IIO_CPU, IIO_BE, IIO_LE, }; struct iio_chan_spec; struct iio_dev; /** * struct iio_chan_spec_ext_info - Extended channel info attribute * @name: Info attribute name * @shared: Whether this attribute is shared between all channels. * @read: Read callback for this info attribute, may be NULL. * @write: Write callback for this info attribute, may be NULL. * @private: Data private to the driver. */ struct iio_chan_spec_ext_info { const char *name; enum iio_shared_by shared; ssize_t (*read)(struct iio_dev *, uintptr_t private, struct iio_chan_spec const *, char *buf); ssize_t (*write)(struct iio_dev *, uintptr_t private, struct iio_chan_spec const *, const char *buf, size_t len); uintptr_t private; }; /** * struct iio_enum - Enum channel info attribute * @items: An array of strings. * @num_items: Length of the item array. * @set: Set callback function, may be NULL. * @get: Get callback function, may be NULL. * * The iio_enum struct can be used to implement enum style channel attributes. * Enum style attributes are those which have a set of strings which map to * unsigned integer values. The IIO enum helper code takes care of mapping * between value and string as well as generating a "_available" file which * contains a list of all available items. The set callback will be called when * the attribute is updated. The last parameter is the index to the newly * activated item. The get callback will be used to query the currently active * item and is supposed to return the index for it. */ struct iio_enum { const char * const *items; unsigned int num_items; int (*set)(struct iio_dev *, const struct iio_chan_spec *, unsigned int); int (*get)(struct iio_dev *, const struct iio_chan_spec *); }; ssize_t iio_enum_available_read(struct iio_dev *indio_dev, uintptr_t priv, const struct iio_chan_spec *chan, char *buf); ssize_t iio_enum_read(struct iio_dev *indio_dev, uintptr_t priv, const struct iio_chan_spec *chan, char *buf); ssize_t iio_enum_write(struct iio_dev *indio_dev, uintptr_t priv, const struct iio_chan_spec *chan, const char *buf, size_t len); /** * IIO_ENUM() - Initialize enum extended channel attribute * @_name: Attribute name * @_shared: Whether the attribute is shared between all channels * @_e: Pointer to an iio_enum struct * * This should usually be used together with IIO_ENUM_AVAILABLE() */ #define IIO_ENUM(_name, _shared, _e) \ { \ .name = (_name), \ .shared = (_shared), \ .read = iio_enum_read, \ .write = iio_enum_write, \ .private = (uintptr_t)(_e), \ } /** * IIO_ENUM_AVAILABLE() - Initialize enum available extended channel attribute * @_name: Attribute name ("_available" will be appended to the name) * @_e: Pointer to an iio_enum struct * * Creates a read only attribute which lists all the available enum items in a * space separated list. This should usually be used together with IIO_ENUM() */ #define IIO_ENUM_AVAILABLE(_name, _e) \ { \ .name = (_name "_available"), \ .shared = IIO_SHARED_BY_TYPE, \ .read = iio_enum_available_read, \ .private = (uintptr_t)(_e), \ } /** * struct iio_mount_matrix - iio mounting matrix * @rotation: 3 dimensional space rotation matrix defining sensor alignment with * main hardware */ struct iio_mount_matrix { const char *rotation[9]; }; ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv, const struct iio_chan_spec *chan, char *buf); int iio_read_mount_matrix(struct device *dev, const char *propname, struct iio_mount_matrix *matrix); typedef const struct iio_mount_matrix * (iio_get_mount_matrix_t)(const struct iio_dev *indio_dev, const struct iio_chan_spec *chan); /** * IIO_MOUNT_MATRIX() - Initialize mount matrix extended channel attribute * @_shared: Whether the attribute is shared between all channels * @_get: Pointer to an iio_get_mount_matrix_t accessor */ #define IIO_MOUNT_MATRIX(_shared, _get) \ { \ .name = "mount_matrix", \ .shared = (_shared), \ .read = iio_show_mount_matrix, \ .private = (uintptr_t)(_get), \ } /** * struct iio_event_spec - specification for a channel event * @type: Type of the event * @dir: Direction of the event * @mask_separate: Bit mask of enum iio_event_info values. Attributes * set in this mask will be registered per channel. * @mask_shared_by_type: Bit mask of enum iio_event_info values. Attributes * set in this mask will be shared by channel type. * @mask_shared_by_dir: Bit mask of enum iio_event_info values. Attributes * set in this mask will be shared by channel type and * direction. * @mask_shared_by_all: Bit mask of enum iio_event_info values. Attributes * set in this mask will be shared by all channels. */ struct iio_event_spec { enum iio_event_type type; enum iio_event_direction dir; unsigned long mask_separate; unsigned long mask_shared_by_type; unsigned long mask_shared_by_dir; unsigned long mask_shared_by_all; }; /** * struct iio_chan_spec - specification of a single channel * @type: What type of measurement is the channel making. * @channel: What number do we wish to assign the channel. * @channel2: If there is a second number for a differential * channel then this is it. If modified is set then the * value here specifies the modifier. * @address: Driver specific identifier. * @scan_index: Monotonic index to give ordering in scans when read * from a buffer. * @scan_type: struct describing the scan type * @scan_type.sign: 's' or 'u' to specify signed or unsigned * @scan_type.realbits: Number of valid bits of data * @scan_type.storagebits: Realbits + padding * @scan_type.shift: Shift right by this before masking out * realbits. * @scan_type.repeat: Number of times real/storage bits repeats. * When the repeat element is more than 1, then * the type element in sysfs will show a repeat * value. Otherwise, the number of repetitions * is omitted. * @scan_type.endianness: little or big endian * @info_mask_separate: What information is to be exported that is specific to * this channel. * @info_mask_separate_available: What availability information is to be * exported that is specific to this channel. * @info_mask_shared_by_type: What information is to be exported that is shared * by all channels of the same type. * @info_mask_shared_by_type_available: What availability information is to be * exported that is shared by all channels of the same * type. * @info_mask_shared_by_dir: What information is to be exported that is shared * by all channels of the same direction. * @info_mask_shared_by_dir_available: What availability information is to be * exported that is shared by all channels of the same * direction. * @info_mask_shared_by_all: What information is to be exported that is shared * by all channels. * @info_mask_shared_by_all_available: What availability information is to be * exported that is shared by all channels. * @event_spec: Array of events which should be registered for this * channel. * @num_event_specs: Size of the event_spec array. * @ext_info: Array of extended info attributes for this channel. * The array is NULL terminated, the last element should * have its name field set to NULL. * @extend_name: Allows labeling of channel attributes with an * informative name. Note this has no effect codes etc, * unlike modifiers. * @datasheet_name: A name used in in-kernel mapping of channels. It should * correspond to the first name that the channel is referred * to by in the datasheet (e.g. IND), or the nearest * possible compound name (e.g. IND-INC). * @modified: Does a modifier apply to this channel. What these are * depends on the channel type. Modifier is set in * channel2. Examples are IIO_MOD_X for axial sensors about * the 'x' axis. * @indexed: Specify the channel has a numerical index. If not, * the channel index number will be suppressed for sysfs * attributes but not for event codes. * @output: Channel is output. * @differential: Channel is differential. */ struct iio_chan_spec { enum iio_chan_type type; int channel; int channel2; unsigned long address; int scan_index; struct { char sign; u8 realbits; u8 storagebits; u8 shift; u8 repeat; enum iio_endian endianness; } scan_type; long info_mask_separate; long info_mask_separate_available; long info_mask_shared_by_type; long info_mask_shared_by_type_available; long info_mask_shared_by_dir; long info_mask_shared_by_dir_available; long info_mask_shared_by_all; long info_mask_shared_by_all_available; const struct iio_event_spec *event_spec; unsigned int num_event_specs; const struct iio_chan_spec_ext_info *ext_info; const char *extend_name; const char *datasheet_name; unsigned modified:1; unsigned indexed:1; unsigned output:1; unsigned differential:1; }; /** * iio_channel_has_info() - Checks whether a channel supports a info attribute * @chan: The channel to be queried * @type: Type of the info attribute to be checked * * Returns true if the channels supports reporting values for the given info * attribute type, false otherwise. */ static inline bool iio_channel_has_info(const struct iio_chan_spec *chan, enum iio_chan_info_enum type) { return (chan->info_mask_separate & BIT(type)) | (chan->info_mask_shared_by_type & BIT(type)) | (chan->info_mask_shared_by_dir & BIT(type)) | (chan->info_mask_shared_by_all & BIT(type)); } /** * iio_channel_has_available() - Checks if a channel has an available attribute * @chan: The channel to be queried * @type: Type of the available attribute to be checked * * Returns true if the channel supports reporting available values for the * given attribute type, false otherwise. */ static inline bool iio_channel_has_available(const struct iio_chan_spec *chan, enum iio_chan_info_enum type) { return (chan->info_mask_separate_available & BIT(type)) | (chan->info_mask_shared_by_type_available & BIT(type)) | (chan->info_mask_shared_by_dir_available & BIT(type)) | (chan->info_mask_shared_by_all_available & BIT(type)); } #define IIO_CHAN_SOFT_TIMESTAMP(_si) { \ .type = IIO_TIMESTAMP, \ .channel = -1, \ .scan_index = _si, \ .scan_type = { \ .sign = 's', \ .realbits = 64, \ .storagebits = 64, \ }, \ } s64 iio_get_time_ns(const struct iio_dev *indio_dev); unsigned int iio_get_time_res(const struct iio_dev *indio_dev); /* Device operating modes */ #define INDIO_DIRECT_MODE 0x01 #define INDIO_BUFFER_TRIGGERED 0x02 #define INDIO_BUFFER_SOFTWARE 0x04 #define INDIO_BUFFER_HARDWARE 0x08 #define INDIO_EVENT_TRIGGERED 0x10 #define INDIO_HARDWARE_TRIGGERED 0x20 #define INDIO_ALL_BUFFER_MODES \ (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE | INDIO_BUFFER_SOFTWARE) #define INDIO_ALL_TRIGGERED_MODES \ (INDIO_BUFFER_TRIGGERED \ | INDIO_EVENT_TRIGGERED \ | INDIO_HARDWARE_TRIGGERED) #define INDIO_MAX_RAW_ELEMENTS 4 struct iio_trigger; /* forward declaration */ /** * struct iio_info - constant information about device * @event_attrs: event control attributes * @attrs: general purpose device attributes * @read_raw: function to request a value from the device. * mask specifies which value. Note 0 means a reading of * the channel in question. Return value will specify the * type of value returned by the device. val and val2 will * contain the elements making up the returned value. * @read_raw_multi: function to return values from the device. * mask specifies which value. Note 0 means a reading of * the channel in question. Return value will specify the * type of value returned by the device. vals pointer * contain the elements making up the returned value. * max_len specifies maximum number of elements * vals pointer can contain. val_len is used to return * length of valid elements in vals. * @read_avail: function to return the available values from the device. * mask specifies which value. Note 0 means the available * values for the channel in question. Return value * specifies if a IIO_AVAIL_LIST or a IIO_AVAIL_RANGE is * returned in vals. The type of the vals are returned in * type and the number of vals is returned in length. For * ranges, there are always three vals returned; min, step * and max. For lists, all possible values are enumerated. * @write_raw: function to write a value to the device. * Parameters are the same as for read_raw. * @write_raw_get_fmt: callback function to query the expected * format/precision. If not set by the driver, write_raw * returns IIO_VAL_INT_PLUS_MICRO. * @read_event_config: find out if the event is enabled. * @write_event_config: set if the event is enabled. * @read_event_value: read a configuration value associated with the event. * @write_event_value: write a configuration value for the event. * @validate_trigger: function to validate the trigger when the * current trigger gets changed. * @update_scan_mode: function to configure device and scan buffer when * channels have changed * @debugfs_reg_access: function to read or write register value of device * @of_xlate: function pointer to obtain channel specifier index. * When #iio-cells is greater than '0', the driver could * provide a custom of_xlate function that reads the * *args* and returns the appropriate index in registered * IIO channels array. * @hwfifo_set_watermark: function pointer to set the current hardware * fifo watermark level; see hwfifo_* entries in * Documentation/ABI/testing/sysfs-bus-iio for details on * how the hardware fifo operates * @hwfifo_flush_to_buffer: function pointer to flush the samples stored * in the hardware fifo to the device buffer. The driver * should not flush more than count samples. The function * must return the number of samples flushed, 0 if no * samples were flushed or a negative integer if no samples * were flushed and there was an error. **/ struct iio_info { const struct attribute_group *event_attrs; const struct attribute_group *attrs; int (*read_raw)(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask); int (*read_raw_multi)(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int max_len, int *vals, int *val_len, long mask); int (*read_avail)(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, const int **vals, int *type, int *length, long mask); int (*write_raw)(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, int val2, long mask); int (*write_raw_get_fmt)(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, long mask); int (*read_event_config)(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, enum iio_event_type type, enum iio_event_direction dir); int (*write_event_config)(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, enum iio_event_type type, enum iio_event_direction dir, int state); int (*read_event_value)(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, enum iio_event_type type, enum iio_event_direction dir, enum iio_event_info info, int *val, int *val2); int (*write_event_value)(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, enum iio_event_type type, enum iio_event_direction dir, enum iio_event_info info, int val, int val2); int (*validate_trigger)(struct iio_dev *indio_dev, struct iio_trigger *trig); int (*update_scan_mode)(struct iio_dev *indio_dev, const unsigned long *scan_mask); int (*debugfs_reg_access)(struct iio_dev *indio_dev, unsigned reg, unsigned writeval, unsigned *readval); int (*of_xlate)(struct iio_dev *indio_dev, const struct of_phandle_args *iiospec); int (*hwfifo_set_watermark)(struct iio_dev *indio_dev, unsigned val); int (*hwfifo_flush_to_buffer)(struct iio_dev *indio_dev, unsigned count); }; /** * struct iio_buffer_setup_ops - buffer setup related callbacks * @preenable: [DRIVER] function to run prior to marking buffer enabled * @postenable: [DRIVER] function to run after marking buffer enabled * @predisable: [DRIVER] function to run prior to marking buffer * disabled * @postdisable: [DRIVER] function to run after marking buffer disabled * @validate_scan_mask: [DRIVER] function callback to check whether a given * scan mask is valid for the device. */ struct iio_buffer_setup_ops { int (*preenable)(struct iio_dev *); int (*postenable)(struct iio_dev *); int (*predisable)(struct iio_dev *); int (*postdisable)(struct iio_dev *); bool (*validate_scan_mask)(struct iio_dev *indio_dev, const unsigned long *scan_mask); }; /** * struct iio_dev - industrial I/O device * @id: [INTERN] used to identify device internally * @driver_module: [INTERN] used to make it harder to undercut users * @modes: [DRIVER] operating modes supported by device * @currentmode: [DRIVER] current operating mode * @dev: [DRIVER] device structure, should be assigned a parent * and owner * @event_interface: [INTERN] event chrdevs associated with interrupt lines * @buffer: [DRIVER] any buffer present * @buffer_list: [INTERN] list of all buffers currently attached * @scan_bytes: [INTERN] num bytes captured to be fed to buffer demux * @mlock: [DRIVER] lock used to prevent simultaneous device state * changes * @available_scan_masks: [DRIVER] optional array of allowed bitmasks * @masklength: [INTERN] the length of the mask established from * channels * @active_scan_mask: [INTERN] union of all scan masks requested by buffers * @scan_timestamp: [INTERN] set if any buffers have requested timestamp * @scan_index_timestamp:[INTERN] cache of the index to the timestamp * @trig: [INTERN] current device trigger (buffer modes) * @trig_readonly: [INTERN] mark the current trigger immutable * @pollfunc: [DRIVER] function run on trigger being received * @pollfunc_event: [DRIVER] function run on events trigger being received * @channels: [DRIVER] channel specification structure table * @num_channels: [DRIVER] number of channels specified in @channels. * @channel_attr_list: [INTERN] keep track of automatically created channel * attributes * @chan_attr_group: [INTERN] group for all attrs in base directory * @name: [DRIVER] name of the device. * @info: [DRIVER] callbacks and constant info from driver * @clock_id: [INTERN] timestamping clock posix identifier * @info_exist_lock: [INTERN] lock to prevent use during removal * @setup_ops: [DRIVER] callbacks to call before and after buffer * enable/disable * @chrdev: [INTERN] associated character device * @groups: [INTERN] attribute groups * @groupcounter: [INTERN] index of next attribute group * @flags: [INTERN] file ops related flags including busy flag. * @debugfs_dentry: [INTERN] device specific debugfs dentry. * @cached_reg_addr: [INTERN] cached register address for debugfs reads. */ struct iio_dev { int id; struct module *driver_module; int modes; int currentmode; struct device dev; struct iio_event_interface *event_interface; struct iio_buffer *buffer; struct list_head buffer_list; int scan_bytes; struct mutex mlock; const unsigned long *available_scan_masks; unsigned masklength; const unsigned long *active_scan_mask; bool scan_timestamp; unsigned scan_index_timestamp; struct iio_trigger *trig; bool trig_readonly; struct iio_poll_func *pollfunc; struct iio_poll_func *pollfunc_event; struct iio_chan_spec const *channels; int num_channels; struct list_head channel_attr_list; struct attribute_group chan_attr_group; const char *name; const struct iio_info *info; clockid_t clock_id; struct mutex info_exist_lock; const struct iio_buffer_setup_ops *setup_ops; struct cdev chrdev; #define IIO_MAX_GROUPS 6 const struct attribute_group *groups[IIO_MAX_GROUPS + 1]; int groupcounter; unsigned long flags; #if defined(CONFIG_DEBUG_FS) struct dentry *debugfs_dentry; unsigned cached_reg_addr; #endif }; const struct iio_chan_spec *iio_find_channel_from_si(struct iio_dev *indio_dev, int si); /** * iio_device_register() - register a device with the IIO subsystem * @indio_dev: Device structure filled by the device driver **/ #define iio_device_register(indio_dev) \ __iio_device_register((indio_dev), THIS_MODULE) int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod); void iio_device_unregister(struct iio_dev *indio_dev); /** * devm_iio_device_register - Resource-managed iio_device_register() * @dev: Device to allocate iio_dev for * @indio_dev: Device structure filled by the device driver * * Managed iio_device_register. The IIO device registered with this * function is automatically unregistered on driver detach. This function * calls iio_device_register() internally. Refer to that function for more * information. * * If an iio_dev registered with this function needs to be unregistered * separately, devm_iio_device_unregister() must be used. * * RETURNS: * 0 on success, negative error number on failure. */ #define devm_iio_device_register(dev, indio_dev) \ __devm_iio_device_register((dev), (indio_dev), THIS_MODULE) int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev, struct module *this_mod); void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev); int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp); int iio_device_claim_direct_mode(struct iio_dev *indio_dev); void iio_device_release_direct_mode(struct iio_dev *indio_dev); extern struct bus_type iio_bus_type; /** * iio_device_put() - reference counted deallocation of struct device * @indio_dev: IIO device structure containing the device **/ static inline void iio_device_put(struct iio_dev *indio_dev) { if (indio_dev) put_device(&indio_dev->dev); } /** * iio_device_get_clock() - Retrieve current timestamping clock for the device * @indio_dev: IIO device structure containing the device */ static inline clockid_t iio_device_get_clock(const struct iio_dev *indio_dev) { return indio_dev->clock_id; } /** * dev_to_iio_dev() - Get IIO device struct from a device struct * @dev: The device embedded in the IIO device * * Note: The device must be a IIO device, otherwise the result is undefined. */ static inline struct iio_dev *dev_to_iio_dev(struct device *dev) { return container_of(dev, struct iio_dev, dev); } /** * iio_device_get() - increment reference count for the device * @indio_dev: IIO device structure * * Returns: The passed IIO device **/ static inline struct iio_dev *iio_device_get(struct iio_dev *indio_dev) { return indio_dev ? dev_to_iio_dev(get_device(&indio_dev->dev)) : NULL; } /** * iio_device_set_drvdata() - Set device driver data * @indio_dev: IIO device structure * @data: Driver specific data * * Allows to attach an arbitrary pointer to an IIO device, which can later be * retrieved by iio_device_get_drvdata(). */ static inline void iio_device_set_drvdata(struct iio_dev *indio_dev, void *data) { dev_set_drvdata(&indio_dev->dev, data); } /** * iio_device_get_drvdata() - Get device driver data * @indio_dev: IIO device structure * * Returns the data previously set with iio_device_set_drvdata() */ static inline void *iio_device_get_drvdata(struct iio_dev *indio_dev) { return dev_get_drvdata(&indio_dev->dev); } /* Can we make this smaller? */ #define IIO_ALIGN L1_CACHE_BYTES struct iio_dev *iio_device_alloc(int sizeof_priv); static inline void *iio_priv(const struct iio_dev *indio_dev) { return (char *)indio_dev + ALIGN(sizeof(struct iio_dev), IIO_ALIGN); } static inline struct iio_dev *iio_priv_to_dev(void *priv) { return (struct iio_dev *)((char *)priv - ALIGN(sizeof(struct iio_dev), IIO_ALIGN)); } void iio_device_free(struct iio_dev *indio_dev); int devm_iio_device_match(struct device *dev, void *res, void *data); struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv); void devm_iio_device_free(struct device *dev, struct iio_dev *indio_dev); struct iio_trigger *devm_iio_trigger_alloc(struct device *dev, const char *fmt, ...); void devm_iio_trigger_free(struct device *dev, struct iio_trigger *iio_trig); /** * iio_buffer_enabled() - helper function to test if the buffer is enabled * @indio_dev: IIO device structure for device **/ static inline bool iio_buffer_enabled(struct iio_dev *indio_dev) { return indio_dev->currentmode & (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE | INDIO_BUFFER_SOFTWARE); } /** * iio_get_debugfs_dentry() - helper function to get the debugfs_dentry * @indio_dev: IIO device structure for device **/ #if defined(CONFIG_DEBUG_FS) static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev) { return indio_dev->debugfs_dentry; } #else static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev) { return NULL; } #endif ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals); int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer, int *fract); /** * IIO_DEGREE_TO_RAD() - Convert degree to rad * @deg: A value in degree * * Returns the given value converted from degree to rad */ #define IIO_DEGREE_TO_RAD(deg) (((deg) * 314159ULL + 9000000ULL) / 18000000ULL) /** * IIO_RAD_TO_DEGREE() - Convert rad to degree * @rad: A value in rad * * Returns the given value converted from rad to degree */ #define IIO_RAD_TO_DEGREE(rad) \ (((rad) * 18000000ULL + 314159ULL / 2) / 314159ULL) /** * IIO_G_TO_M_S_2() - Convert g to meter / second**2 * @g: A value in g * * Returns the given value converted from g to meter / second**2 */ #define IIO_G_TO_M_S_2(g) ((g) * 980665ULL / 100000ULL) /** * IIO_M_S_2_TO_G() - Convert meter / second**2 to g * @ms2: A value in meter / second**2 * * Returns the given value converted from meter / second**2 to g */ #define IIO_M_S_2_TO_G(ms2) (((ms2) * 100000ULL + 980665ULL / 2) / 980665ULL) #endif /* _INDUSTRIAL_IO_H_ */ iio/buffer_impl.h 0000644 00000012271 14722070374 0010000 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _IIO_BUFFER_GENERIC_IMPL_H_ #define _IIO_BUFFER_GENERIC_IMPL_H_ #include <linux/sysfs.h> #include <linux/kref.h> #ifdef CONFIG_IIO_BUFFER struct iio_dev; struct iio_buffer; /** * INDIO_BUFFER_FLAG_FIXED_WATERMARK - Watermark level of the buffer can not be * configured. It has a fixed value which will be buffer specific. */ #define INDIO_BUFFER_FLAG_FIXED_WATERMARK BIT(0) /** * struct iio_buffer_access_funcs - access functions for buffers. * @store_to: actually store stuff to the buffer * @read_first_n: try to get a specified number of bytes (must exist) * @data_available: indicates how much data is available for reading from * the buffer. * @request_update: if a parameter change has been marked, update underlying * storage. * @set_bytes_per_datum:set number of bytes per datum * @set_length: set number of datums in buffer * @enable: called if the buffer is attached to a device and the * device starts sampling. Calls are balanced with * @disable. * @disable: called if the buffer is attached to a device and the * device stops sampling. Calles are balanced with @enable. * @release: called when the last reference to the buffer is dropped, * should free all resources allocated by the buffer. * @modes: Supported operating modes by this buffer type * @flags: A bitmask combination of INDIO_BUFFER_FLAG_* * * The purpose of this structure is to make the buffer element * modular as event for a given driver, different usecases may require * different buffer designs (space efficiency vs speed for example). * * It is worth noting that a given buffer implementation may only support a * small proportion of these functions. The core code 'should' cope fine with * any of them not existing. **/ struct iio_buffer_access_funcs { int (*store_to)(struct iio_buffer *buffer, const void *data); int (*read_first_n)(struct iio_buffer *buffer, size_t n, char __user *buf); size_t (*data_available)(struct iio_buffer *buffer); int (*request_update)(struct iio_buffer *buffer); int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd); int (*set_length)(struct iio_buffer *buffer, unsigned int length); int (*enable)(struct iio_buffer *buffer, struct iio_dev *indio_dev); int (*disable)(struct iio_buffer *buffer, struct iio_dev *indio_dev); void (*release)(struct iio_buffer *buffer); unsigned int modes; unsigned int flags; }; /** * struct iio_buffer - general buffer structure * * Note that the internals of this structure should only be of interest to * those writing new buffer implementations. */ struct iio_buffer { /** @length: Number of datums in buffer. */ unsigned int length; /** @bytes_per_datum: Size of individual datum including timestamp. */ size_t bytes_per_datum; /** * @access: Buffer access functions associated with the * implementation. */ const struct iio_buffer_access_funcs *access; /** @scan_mask: Bitmask used in masking scan mode elements. */ long *scan_mask; /** @demux_list: List of operations required to demux the scan. */ struct list_head demux_list; /** @pollq: Wait queue to allow for polling on the buffer. */ wait_queue_head_t pollq; /** @watermark: Number of datums to wait for poll/read. */ unsigned int watermark; /* private: */ /* * @scan_el_attrs: Control of scan elements if that scan mode * control method is used. */ struct attribute_group *scan_el_attrs; /* @scan_timestamp: Does the scan mode include a timestamp. */ bool scan_timestamp; /* @scan_el_dev_attr_list: List of scan element related attributes. */ struct list_head scan_el_dev_attr_list; /* @buffer_group: Attributes of the buffer group. */ struct attribute_group buffer_group; /* * @scan_el_group: Attribute group for those attributes not * created from the iio_chan_info array. */ struct attribute_group scan_el_group; /* @stufftoread: Flag to indicate new data. */ bool stufftoread; /* @attrs: Standard attributes of the buffer. */ const struct attribute **attrs; /* @demux_bounce: Buffer for doing gather from incoming scan. */ void *demux_bounce; /* @buffer_list: Entry in the devices list of current buffers. */ struct list_head buffer_list; /* @ref: Reference count of the buffer. */ struct kref ref; }; /** * iio_update_buffers() - add or remove buffer from active list * @indio_dev: device to add buffer to * @insert_buffer: buffer to insert * @remove_buffer: buffer_to_remove * * Note this will tear down the all buffering and build it up again */ int iio_update_buffers(struct iio_dev *indio_dev, struct iio_buffer *insert_buffer, struct iio_buffer *remove_buffer); /** * iio_buffer_init() - Initialize the buffer structure * @buffer: buffer to be initialized **/ void iio_buffer_init(struct iio_buffer *buffer); struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer); void iio_buffer_put(struct iio_buffer *buffer); #else /* CONFIG_IIO_BUFFER */ static inline void iio_buffer_get(struct iio_buffer *buffer) {} static inline void iio_buffer_put(struct iio_buffer *buffer) {} #endif /* CONFIG_IIO_BUFFER */ #endif /* _IIO_BUFFER_GENERIC_IMPL_H_ */ virtio_caif.h 0000644 00000000754 14722070374 0007227 0 ustar 00 /* * Copyright (C) ST-Ericsson AB 2012 * Author: Sjur Brændeland <sjur.brandeland@stericsson.com> * * This header is BSD licensed so * anyone can use the definitions to implement compatible remote processors */ #ifndef VIRTIO_CAIF_H #define VIRTIO_CAIF_H #include <linux/types.h> struct virtio_caif_transf_config { u16 headroom; u16 tailroom; u32 mtu; u8 reserved[4]; }; struct virtio_caif_config { struct virtio_caif_transf_config uplink, downlink; u8 reserved[8]; }; #endif task_io_accounting.h 0000644 00000002207 14722070374 0010567 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * task_io_accounting: a structure which is used for recording a single task's * IO statistics. * * Don't include this header file directly - it is designed to be dragged in via * sched.h. * * Blame Andrew Morton for all this. */ struct task_io_accounting { #ifdef CONFIG_TASK_XACCT /* bytes read */ u64 rchar; /* bytes written */ u64 wchar; /* # of read syscalls */ u64 syscr; /* # of write syscalls */ u64 syscw; #endif /* CONFIG_TASK_XACCT */ #ifdef CONFIG_TASK_IO_ACCOUNTING /* * The number of bytes which this task has caused to be read from * storage. */ u64 read_bytes; /* * The number of bytes which this task has caused, or shall cause to be * written to disk. */ u64 write_bytes; /* * A task can cause "negative" IO too. If this task truncates some * dirty pagecache, some IO which another task has been accounted for * (in its write_bytes) will not be happening. We _could_ just * subtract that from the truncating task's write_bytes, but there is * information loss in doing that. */ u64 cancelled_write_bytes; #endif /* CONFIG_TASK_IO_ACCOUNTING */ }; mbcache.h 0000644 00000004240 14722070374 0006305 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MBCACHE_H #define _LINUX_MBCACHE_H #include <linux/hash.h> #include <linux/list_bl.h> #include <linux/list.h> #include <linux/atomic.h> #include <linux/fs.h> struct mb_cache; /* Cache entry flags */ enum { MBE_REFERENCED_B = 0, MBE_REUSABLE_B }; struct mb_cache_entry { /* List of entries in cache - protected by cache->c_list_lock */ struct list_head e_list; /* * Hash table list - protected by hash chain bitlock. The entry is * guaranteed to be hashed while e_refcnt > 0. */ struct hlist_bl_node e_hash_list; /* * Entry refcount. Once it reaches zero, entry is unhashed and freed. * While refcount > 0, the entry is guaranteed to stay in the hash and * e.g. mb_cache_entry_try_delete() will fail. */ atomic_t e_refcnt; /* Key in hash - stable during lifetime of the entry */ u32 e_key; unsigned long e_flags; /* User provided value - stable during lifetime of the entry */ u64 e_value; }; struct mb_cache *mb_cache_create(int bucket_bits); void mb_cache_destroy(struct mb_cache *cache); int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key, u64 value, bool reusable); void __mb_cache_entry_free(struct mb_cache *cache, struct mb_cache_entry *entry); void mb_cache_entry_wait_unused(struct mb_cache_entry *entry); static inline void mb_cache_entry_put(struct mb_cache *cache, struct mb_cache_entry *entry) { unsigned int cnt = atomic_dec_return(&entry->e_refcnt); if (cnt > 0) { if (cnt <= 2) wake_up_var(&entry->e_refcnt); return; } __mb_cache_entry_free(cache, entry); } struct mb_cache_entry *mb_cache_entry_delete_or_get(struct mb_cache *cache, u32 key, u64 value); void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value); struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key, u64 value); struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache, u32 key); struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache, struct mb_cache_entry *entry); void mb_cache_entry_touch(struct mb_cache *cache, struct mb_cache_entry *entry); #endif /* _LINUX_MBCACHE_H */ ihex.h 0000644 00000004237 14722070374 0005666 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Compact binary representation of ihex records. Some devices need their * firmware loaded in strange orders rather than a single big blob, but * actually parsing ihex-as-text within the kernel seems silly. Thus,... */ #ifndef __LINUX_IHEX_H__ #define __LINUX_IHEX_H__ #include <linux/types.h> #include <linux/firmware.h> #include <linux/device.h> /* Intel HEX files actually limit the length to 256 bytes, but we have drivers which would benefit from using separate records which are longer than that, so we extend to 16 bits of length */ struct ihex_binrec { __be32 addr; __be16 len; uint8_t data[0]; } __attribute__((packed)); static inline uint16_t ihex_binrec_size(const struct ihex_binrec *p) { return be16_to_cpu(p->len) + sizeof(*p); } /* Find the next record, taking into account the 4-byte alignment */ static inline const struct ihex_binrec * __ihex_next_binrec(const struct ihex_binrec *rec) { const void *p = rec; return p + ALIGN(ihex_binrec_size(rec), 4); } static inline const struct ihex_binrec * ihex_next_binrec(const struct ihex_binrec *rec) { rec = __ihex_next_binrec(rec); return be16_to_cpu(rec->len) ? rec : NULL; } /* Check that ihex_next_binrec() won't take us off the end of the image... */ static inline int ihex_validate_fw(const struct firmware *fw) { const struct ihex_binrec *end, *rec; rec = (const void *)fw->data; end = (const void *)&fw->data[fw->size - sizeof(*end)]; for (; rec <= end; rec = __ihex_next_binrec(rec)) { /* Zero length marks end of records */ if (rec == end && !be16_to_cpu(rec->len)) return 0; } return -EINVAL; } /* Request firmware and validate it so that we can trust we won't * run off the end while reading records... */ static inline int request_ihex_firmware(const struct firmware **fw, const char *fw_name, struct device *dev) { const struct firmware *lfw; int ret; ret = request_firmware(&lfw, fw_name, dev); if (ret) return ret; ret = ihex_validate_fw(lfw); if (ret) { dev_err(dev, "Firmware \"%s\" not valid IHEX records\n", fw_name); release_firmware(lfw); return ret; } *fw = lfw; return 0; } #endif /* __LINUX_IHEX_H__ */ utsname.h 0000644 00000003444 14722070374 0006404 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_UTSNAME_H #define _LINUX_UTSNAME_H #include <linux/sched.h> #include <linux/kref.h> #include <linux/nsproxy.h> #include <linux/ns_common.h> #include <linux/err.h> #include <uapi/linux/utsname.h> enum uts_proc { UTS_PROC_OSTYPE, UTS_PROC_OSRELEASE, UTS_PROC_VERSION, UTS_PROC_HOSTNAME, UTS_PROC_DOMAINNAME, }; struct user_namespace; extern struct user_namespace init_user_ns; struct uts_namespace { struct kref kref; struct new_utsname name; struct user_namespace *user_ns; struct ucounts *ucounts; struct ns_common ns; } __randomize_layout; extern struct uts_namespace init_uts_ns; #ifdef CONFIG_UTS_NS static inline void get_uts_ns(struct uts_namespace *ns) { kref_get(&ns->kref); } extern struct uts_namespace *copy_utsname(unsigned long flags, struct user_namespace *user_ns, struct uts_namespace *old_ns); extern void free_uts_ns(struct kref *kref); static inline void put_uts_ns(struct uts_namespace *ns) { kref_put(&ns->kref, free_uts_ns); } void uts_ns_init(void); #else static inline void get_uts_ns(struct uts_namespace *ns) { } static inline void put_uts_ns(struct uts_namespace *ns) { } static inline struct uts_namespace *copy_utsname(unsigned long flags, struct user_namespace *user_ns, struct uts_namespace *old_ns) { if (flags & CLONE_NEWUTS) return ERR_PTR(-EINVAL); return old_ns; } static inline void uts_ns_init(void) { } #endif #ifdef CONFIG_PROC_SYSCTL extern void uts_proc_notify(enum uts_proc proc); #else static inline void uts_proc_notify(enum uts_proc proc) { } #endif static inline struct new_utsname *utsname(void) { return ¤t->nsproxy->uts_ns->name; } static inline struct new_utsname *init_utsname(void) { return &init_uts_ns.name; } extern struct rw_semaphore uts_sem; #endif /* _LINUX_UTSNAME_H */ clk/mxs.h 0000644 00000000326 14722070374 0006304 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 Freescale Semiconductor, Inc. */ #ifndef __LINUX_CLK_MXS_H #define __LINUX_CLK_MXS_H int mxs_saif_clkmux_select(unsigned int clkmux); #endif clk/mmp.h 0000644 00000000733 14722070374 0006270 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __CLK_MMP_H #define __CLK_MMP_H #include <linux/types.h> extern void pxa168_clk_init(phys_addr_t mpmu_phys, phys_addr_t apmu_phys, phys_addr_t apbc_phys); extern void pxa910_clk_init(phys_addr_t mpmu_phys, phys_addr_t apmu_phys, phys_addr_t apbc_phys, phys_addr_t apbcp_phys); extern void mmp2_clk_init(phys_addr_t mpmu_phys, phys_addr_t apmu_phys, phys_addr_t apbc_phys); #endif clk/analogbits-wrpll-cln28hpc.h 0000644 00000005473 14722070374 0012405 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2018-2019 SiFive, Inc. * Wesley Terpstra * Paul Walmsley */ #ifndef __LINUX_CLK_ANALOGBITS_WRPLL_CLN28HPC_H #define __LINUX_CLK_ANALOGBITS_WRPLL_CLN28HPC_H #include <linux/types.h> /* DIVQ_VALUES: number of valid DIVQ values */ #define DIVQ_VALUES 6 /* * Bit definitions for struct wrpll_cfg.flags * * WRPLL_FLAGS_BYPASS_FLAG: if set, the PLL is either in bypass, or should be * programmed to enter bypass * WRPLL_FLAGS_RESET_FLAG: if set, the PLL is in reset * WRPLL_FLAGS_INT_FEEDBACK_FLAG: if set, the PLL is configured for internal * feedback mode * WRPLL_FLAGS_EXT_FEEDBACK_FLAG: if set, the PLL is configured for external * feedback mode (not yet supported by this driver) */ #define WRPLL_FLAGS_BYPASS_SHIFT 0 #define WRPLL_FLAGS_BYPASS_MASK BIT(WRPLL_FLAGS_BYPASS_SHIFT) #define WRPLL_FLAGS_RESET_SHIFT 1 #define WRPLL_FLAGS_RESET_MASK BIT(WRPLL_FLAGS_RESET_SHIFT) #define WRPLL_FLAGS_INT_FEEDBACK_SHIFT 2 #define WRPLL_FLAGS_INT_FEEDBACK_MASK BIT(WRPLL_FLAGS_INT_FEEDBACK_SHIFT) #define WRPLL_FLAGS_EXT_FEEDBACK_SHIFT 3 #define WRPLL_FLAGS_EXT_FEEDBACK_MASK BIT(WRPLL_FLAGS_EXT_FEEDBACK_SHIFT) /** * struct wrpll_cfg - WRPLL configuration values * @divr: reference divider value (6 bits), as presented to the PLL signals * @divf: feedback divider value (9 bits), as presented to the PLL signals * @divq: output divider value (3 bits), as presented to the PLL signals * @flags: PLL configuration flags. See above for more information * @range: PLL loop filter range. See below for more information * @output_rate_cache: cached output rates, swept across DIVQ * @parent_rate: PLL refclk rate for which values are valid * @max_r: maximum possible R divider value, given @parent_rate * @init_r: initial R divider value to start the search from * * @divr, @divq, @divq, @range represent what the PLL expects to see * on its input signals. Thus @divr and @divf are the actual divisors * minus one. @divq is a power-of-two divider; for example, 1 = * divide-by-2 and 6 = divide-by-64. 0 is an invalid @divq value. * * When initially passing a struct wrpll_cfg record, the * record should be zero-initialized with the exception of the @flags * field. The only flag bits that need to be set are either * WRPLL_FLAGS_INT_FEEDBACK or WRPLL_FLAGS_EXT_FEEDBACK. */ struct wrpll_cfg { u8 divr; u8 divq; u8 range; u8 flags; u16 divf; /* private: */ u32 output_rate_cache[DIVQ_VALUES]; unsigned long parent_rate; u8 max_r; u8 init_r; }; int wrpll_configure_for_rate(struct wrpll_cfg *c, u32 target_rate, unsigned long parent_rate); unsigned int wrpll_calc_max_lock_us(const struct wrpll_cfg *c); unsigned long wrpll_calc_output_rate(const struct wrpll_cfg *c, unsigned long parent_rate); #endif /* __LINUX_CLK_ANALOGBITS_WRPLL_CLN28HPC_H */ clk/tegra.h 0000644 00000005327 14722070374 0006605 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. */ #ifndef __LINUX_CLK_TEGRA_H_ #define __LINUX_CLK_TEGRA_H_ #include <linux/types.h> #include <linux/bug.h> /* * Tegra CPU clock and reset control ops * * wait_for_reset: * keep waiting until the CPU in reset state * put_in_reset: * put the CPU in reset state * out_of_reset: * release the CPU from reset state * enable_clock: * CPU clock un-gate * disable_clock: * CPU clock gate * rail_off_ready: * CPU is ready for rail off * suspend: * save the clock settings when CPU go into low-power state * resume: * restore the clock settings when CPU exit low-power state */ struct tegra_cpu_car_ops { void (*wait_for_reset)(u32 cpu); void (*put_in_reset)(u32 cpu); void (*out_of_reset)(u32 cpu); void (*enable_clock)(u32 cpu); void (*disable_clock)(u32 cpu); #ifdef CONFIG_PM_SLEEP bool (*rail_off_ready)(void); void (*suspend)(void); void (*resume)(void); #endif }; extern struct tegra_cpu_car_ops *tegra_cpu_car_ops; static inline void tegra_wait_cpu_in_reset(u32 cpu) { if (WARN_ON(!tegra_cpu_car_ops->wait_for_reset)) return; tegra_cpu_car_ops->wait_for_reset(cpu); } static inline void tegra_put_cpu_in_reset(u32 cpu) { if (WARN_ON(!tegra_cpu_car_ops->put_in_reset)) return; tegra_cpu_car_ops->put_in_reset(cpu); } static inline void tegra_cpu_out_of_reset(u32 cpu) { if (WARN_ON(!tegra_cpu_car_ops->out_of_reset)) return; tegra_cpu_car_ops->out_of_reset(cpu); } static inline void tegra_enable_cpu_clock(u32 cpu) { if (WARN_ON(!tegra_cpu_car_ops->enable_clock)) return; tegra_cpu_car_ops->enable_clock(cpu); } static inline void tegra_disable_cpu_clock(u32 cpu) { if (WARN_ON(!tegra_cpu_car_ops->disable_clock)) return; tegra_cpu_car_ops->disable_clock(cpu); } #ifdef CONFIG_PM_SLEEP static inline bool tegra_cpu_rail_off_ready(void) { if (WARN_ON(!tegra_cpu_car_ops->rail_off_ready)) return false; return tegra_cpu_car_ops->rail_off_ready(); } static inline void tegra_cpu_clock_suspend(void) { if (WARN_ON(!tegra_cpu_car_ops->suspend)) return; tegra_cpu_car_ops->suspend(); } static inline void tegra_cpu_clock_resume(void) { if (WARN_ON(!tegra_cpu_car_ops->resume)) return; tegra_cpu_car_ops->resume(); } #endif extern void tegra210_xusb_pll_hw_control_enable(void); extern void tegra210_xusb_pll_hw_sequence_start(void); extern void tegra210_sata_pll_hw_control_enable(void); extern void tegra210_sata_pll_hw_sequence_start(void); extern void tegra210_set_sata_pll_seq_sw(bool state); extern void tegra210_put_utmipll_in_iddq(void); extern void tegra210_put_utmipll_out_iddq(void); extern int tegra210_clk_handle_mbist_war(unsigned int id); #endif /* __LINUX_CLK_TEGRA_H_ */ clk/sunxi-ng.h 0000644 00000001060 14722070374 0007241 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2017 Chen-Yu Tsai. All rights reserved. */ #ifndef _LINUX_CLK_SUNXI_NG_H_ #define _LINUX_CLK_SUNXI_NG_H_ #include <linux/errno.h> #ifdef CONFIG_SUNXI_CCU int sunxi_ccu_set_mmc_timing_mode(struct clk *clk, bool new_mode); int sunxi_ccu_get_mmc_timing_mode(struct clk *clk); #else static inline int sunxi_ccu_set_mmc_timing_mode(struct clk *clk, bool new_mode) { return -ENOTSUPP; } static inline int sunxi_ccu_get_mmc_timing_mode(struct clk *clk) { return -ENOTSUPP; } #endif #endif clk/at91_pmc.h 0000644 00000026447 14722070374 0007126 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/clk/at91_pmc.h * * Copyright (C) 2005 Ivan Kokshaysky * Copyright (C) SAN People * * Power Management Controller (PMC) - System peripherals registers. * Based on AT91RM9200 datasheet revision E. */ #ifndef AT91_PMC_H #define AT91_PMC_H #define AT91_PMC_SCER 0x00 /* System Clock Enable Register */ #define AT91_PMC_SCDR 0x04 /* System Clock Disable Register */ #define AT91_PMC_SCSR 0x08 /* System Clock Status Register */ #define AT91_PMC_PCK (1 << 0) /* Processor Clock */ #define AT91RM9200_PMC_UDP (1 << 1) /* USB Devcice Port Clock [AT91RM9200 only] */ #define AT91RM9200_PMC_MCKUDP (1 << 2) /* USB Device Port Master Clock Automatic Disable on Suspend [AT91RM9200 only] */ #define AT91RM9200_PMC_UHP (1 << 4) /* USB Host Port Clock [AT91RM9200 only] */ #define AT91SAM926x_PMC_UHP (1 << 6) /* USB Host Port Clock [AT91SAM926x only] */ #define AT91SAM926x_PMC_UDP (1 << 7) /* USB Devcice Port Clock [AT91SAM926x only] */ #define AT91_PMC_PCK0 (1 << 8) /* Programmable Clock 0 */ #define AT91_PMC_PCK1 (1 << 9) /* Programmable Clock 1 */ #define AT91_PMC_PCK2 (1 << 10) /* Programmable Clock 2 */ #define AT91_PMC_PCK3 (1 << 11) /* Programmable Clock 3 */ #define AT91_PMC_PCK4 (1 << 12) /* Programmable Clock 4 [AT572D940HF only] */ #define AT91_PMC_HCK0 (1 << 16) /* AHB Clock (USB host) [AT91SAM9261 only] */ #define AT91_PMC_HCK1 (1 << 17) /* AHB Clock (LCD) [AT91SAM9261 only] */ #define AT91_PMC_PCER 0x10 /* Peripheral Clock Enable Register */ #define AT91_PMC_PCDR 0x14 /* Peripheral Clock Disable Register */ #define AT91_PMC_PCSR 0x18 /* Peripheral Clock Status Register */ #define AT91_CKGR_UCKR 0x1C /* UTMI Clock Register [some SAM9] */ #define AT91_PMC_UPLLEN (1 << 16) /* UTMI PLL Enable */ #define AT91_PMC_UPLLCOUNT (0xf << 20) /* UTMI PLL Start-up Time */ #define AT91_PMC_BIASEN (1 << 24) /* UTMI BIAS Enable */ #define AT91_PMC_BIASCOUNT (0xf << 28) /* UTMI BIAS Start-up Time */ #define AT91_CKGR_MOR 0x20 /* Main Oscillator Register [not on SAM9RL] */ #define AT91_PMC_MOSCEN (1 << 0) /* Main Oscillator Enable */ #define AT91_PMC_OSCBYPASS (1 << 1) /* Oscillator Bypass */ #define AT91_PMC_WAITMODE (1 << 2) /* Wait Mode Command */ #define AT91_PMC_MOSCRCEN (1 << 3) /* Main On-Chip RC Oscillator Enable [some SAM9] */ #define AT91_PMC_OSCOUNT (0xff << 8) /* Main Oscillator Start-up Time */ #define AT91_PMC_KEY_MASK (0xff << 16) #define AT91_PMC_KEY (0x37 << 16) /* MOR Writing Key */ #define AT91_PMC_MOSCSEL (1 << 24) /* Main Oscillator Selection [some SAM9] */ #define AT91_PMC_CFDEN (1 << 25) /* Clock Failure Detector Enable [some SAM9] */ #define AT91_CKGR_MCFR 0x24 /* Main Clock Frequency Register */ #define AT91_PMC_MAINF (0xffff << 0) /* Main Clock Frequency */ #define AT91_PMC_MAINRDY (1 << 16) /* Main Clock Ready */ #define AT91_CKGR_PLLAR 0x28 /* PLL A Register */ #define AT91_CKGR_PLLBR 0x2c /* PLL B Register */ #define AT91_PMC_DIV (0xff << 0) /* Divider */ #define AT91_PMC_PLLCOUNT (0x3f << 8) /* PLL Counter */ #define AT91_PMC_OUT (3 << 14) /* PLL Clock Frequency Range */ #define AT91_PMC_MUL (0x7ff << 16) /* PLL Multiplier */ #define AT91_PMC_MUL_GET(n) ((n) >> 16 & 0x7ff) #define AT91_PMC3_MUL (0x7f << 18) /* PLL Multiplier [SAMA5 only] */ #define AT91_PMC3_MUL_GET(n) ((n) >> 18 & 0x7f) #define AT91_PMC_USBDIV (3 << 28) /* USB Divisor (PLLB only) */ #define AT91_PMC_USBDIV_1 (0 << 28) #define AT91_PMC_USBDIV_2 (1 << 28) #define AT91_PMC_USBDIV_4 (2 << 28) #define AT91_PMC_USB96M (1 << 28) /* Divider by 2 Enable (PLLB only) */ #define AT91_PMC_CPU_CKR 0x28 /* CPU Clock Register */ #define AT91_PMC_MCKR 0x30 /* Master Clock Register */ #define AT91_PMC_CSS (3 << 0) /* Master Clock Selection */ #define AT91_PMC_CSS_SLOW (0 << 0) #define AT91_PMC_CSS_MAIN (1 << 0) #define AT91_PMC_CSS_PLLA (2 << 0) #define AT91_PMC_CSS_PLLB (3 << 0) #define AT91_PMC_CSS_UPLL (3 << 0) /* [some SAM9 only] */ #define PMC_PRES_OFFSET 2 #define AT91_PMC_PRES (7 << PMC_PRES_OFFSET) /* Master Clock Prescaler */ #define AT91_PMC_PRES_1 (0 << PMC_PRES_OFFSET) #define AT91_PMC_PRES_2 (1 << PMC_PRES_OFFSET) #define AT91_PMC_PRES_4 (2 << PMC_PRES_OFFSET) #define AT91_PMC_PRES_8 (3 << PMC_PRES_OFFSET) #define AT91_PMC_PRES_16 (4 << PMC_PRES_OFFSET) #define AT91_PMC_PRES_32 (5 << PMC_PRES_OFFSET) #define AT91_PMC_PRES_64 (6 << PMC_PRES_OFFSET) #define PMC_ALT_PRES_OFFSET 4 #define AT91_PMC_ALT_PRES (7 << PMC_ALT_PRES_OFFSET) /* Master Clock Prescaler [alternate location] */ #define AT91_PMC_ALT_PRES_1 (0 << PMC_ALT_PRES_OFFSET) #define AT91_PMC_ALT_PRES_2 (1 << PMC_ALT_PRES_OFFSET) #define AT91_PMC_ALT_PRES_4 (2 << PMC_ALT_PRES_OFFSET) #define AT91_PMC_ALT_PRES_8 (3 << PMC_ALT_PRES_OFFSET) #define AT91_PMC_ALT_PRES_16 (4 << PMC_ALT_PRES_OFFSET) #define AT91_PMC_ALT_PRES_32 (5 << PMC_ALT_PRES_OFFSET) #define AT91_PMC_ALT_PRES_64 (6 << PMC_ALT_PRES_OFFSET) #define AT91_PMC_MDIV (3 << 8) /* Master Clock Division */ #define AT91RM9200_PMC_MDIV_1 (0 << 8) /* [AT91RM9200 only] */ #define AT91RM9200_PMC_MDIV_2 (1 << 8) #define AT91RM9200_PMC_MDIV_3 (2 << 8) #define AT91RM9200_PMC_MDIV_4 (3 << 8) #define AT91SAM9_PMC_MDIV_1 (0 << 8) /* [SAM9 only] */ #define AT91SAM9_PMC_MDIV_2 (1 << 8) #define AT91SAM9_PMC_MDIV_4 (2 << 8) #define AT91SAM9_PMC_MDIV_6 (3 << 8) /* [some SAM9 only] */ #define AT91SAM9_PMC_MDIV_3 (3 << 8) /* [some SAM9 only] */ #define AT91_PMC_PDIV (1 << 12) /* Processor Clock Division [some SAM9 only] */ #define AT91_PMC_PDIV_1 (0 << 12) #define AT91_PMC_PDIV_2 (1 << 12) #define AT91_PMC_PLLADIV2 (1 << 12) /* PLLA divisor by 2 [some SAM9 only] */ #define AT91_PMC_PLLADIV2_OFF (0 << 12) #define AT91_PMC_PLLADIV2_ON (1 << 12) #define AT91_PMC_H32MXDIV BIT(24) #define AT91_PMC_USB 0x38 /* USB Clock Register [some SAM9 only] */ #define AT91_PMC_USBS (0x1 << 0) /* USB OHCI Input clock selection */ #define AT91_PMC_USBS_PLLA (0 << 0) #define AT91_PMC_USBS_UPLL (1 << 0) #define AT91_PMC_USBS_PLLB (1 << 0) /* [AT91SAMN12 only] */ #define AT91_PMC_OHCIUSBDIV (0xF << 8) /* Divider for USB OHCI Clock */ #define AT91_PMC_OHCIUSBDIV_1 (0x0 << 8) #define AT91_PMC_OHCIUSBDIV_2 (0x1 << 8) #define AT91_PMC_SMD 0x3c /* Soft Modem Clock Register [some SAM9 only] */ #define AT91_PMC_SMDS (0x1 << 0) /* SMD input clock selection */ #define AT91_PMC_SMD_DIV (0x1f << 8) /* SMD input clock divider */ #define AT91_PMC_SMDDIV(n) (((n) << 8) & AT91_PMC_SMD_DIV) #define AT91_PMC_PCKR(n) (0x40 + ((n) * 4)) /* Programmable Clock 0-N Registers */ #define AT91_PMC_ALT_PCKR_CSS (0x7 << 0) /* Programmable Clock Source Selection [alternate length] */ #define AT91_PMC_CSS_MASTER (4 << 0) /* [some SAM9 only] */ #define AT91_PMC_CSSMCK (0x1 << 8) /* CSS or Master Clock Selection */ #define AT91_PMC_CSSMCK_CSS (0 << 8) #define AT91_PMC_CSSMCK_MCK (1 << 8) #define AT91_PMC_IER 0x60 /* Interrupt Enable Register */ #define AT91_PMC_IDR 0x64 /* Interrupt Disable Register */ #define AT91_PMC_SR 0x68 /* Status Register */ #define AT91_PMC_MOSCS (1 << 0) /* MOSCS Flag */ #define AT91_PMC_LOCKA (1 << 1) /* PLLA Lock */ #define AT91_PMC_LOCKB (1 << 2) /* PLLB Lock */ #define AT91_PMC_MCKRDY (1 << 3) /* Master Clock */ #define AT91_PMC_LOCKU (1 << 6) /* UPLL Lock [some SAM9] */ #define AT91_PMC_OSCSEL (1 << 7) /* Slow Oscillator Selection [some SAM9] */ #define AT91_PMC_PCK0RDY (1 << 8) /* Programmable Clock 0 */ #define AT91_PMC_PCK1RDY (1 << 9) /* Programmable Clock 1 */ #define AT91_PMC_PCK2RDY (1 << 10) /* Programmable Clock 2 */ #define AT91_PMC_PCK3RDY (1 << 11) /* Programmable Clock 3 */ #define AT91_PMC_MOSCSELS (1 << 16) /* Main Oscillator Selection [some SAM9] */ #define AT91_PMC_MOSCRCS (1 << 17) /* Main On-Chip RC [some SAM9] */ #define AT91_PMC_CFDEV (1 << 18) /* Clock Failure Detector Event [some SAM9] */ #define AT91_PMC_GCKRDY (1 << 24) /* Generated Clocks */ #define AT91_PMC_IMR 0x6c /* Interrupt Mask Register */ #define AT91_PMC_FSMR 0x70 /* Fast Startup Mode Register */ #define AT91_PMC_FSTT(n) BIT(n) #define AT91_PMC_RTTAL BIT(16) #define AT91_PMC_RTCAL BIT(17) /* RTC Alarm Enable */ #define AT91_PMC_USBAL BIT(18) /* USB Resume Enable */ #define AT91_PMC_SDMMC_CD BIT(19) /* SDMMC Card Detect Enable */ #define AT91_PMC_LPM BIT(20) /* Low-power Mode */ #define AT91_PMC_RXLP_MCE BIT(24) /* Backup UART Receive Enable */ #define AT91_PMC_ACC_CE BIT(25) /* ACC Enable */ #define AT91_PMC_FSPR 0x74 /* Fast Startup Polarity Reg */ #define AT91_PMC_FS_INPUT_MASK 0x7ff #define AT91_PMC_PLLICPR 0x80 /* PLL Charge Pump Current Register */ #define AT91_PMC_PROT 0xe4 /* Write Protect Mode Register [some SAM9] */ #define AT91_PMC_WPEN (0x1 << 0) /* Write Protect Enable */ #define AT91_PMC_WPKEY (0xffffff << 8) /* Write Protect Key */ #define AT91_PMC_PROTKEY (0x504d43 << 8) /* Activation Code */ #define AT91_PMC_WPSR 0xe8 /* Write Protect Status Register [some SAM9] */ #define AT91_PMC_WPVS (0x1 << 0) /* Write Protect Violation Status */ #define AT91_PMC_WPVSRC (0xffff << 8) /* Write Protect Violation Source */ #define AT91_PMC_PCER1 0x100 /* Peripheral Clock Enable Register 1 [SAMA5 only]*/ #define AT91_PMC_PCDR1 0x104 /* Peripheral Clock Enable Register 1 */ #define AT91_PMC_PCSR1 0x108 /* Peripheral Clock Enable Register 1 */ #define AT91_PMC_PCR 0x10c /* Peripheral Control Register [some SAM9 and SAMA5] */ #define AT91_PMC_PCR_PID_MASK 0x3f #define AT91_PMC_PCR_CMD (0x1 << 12) /* Command (read=0, write=1) */ #define AT91_PMC_PCR_GCKDIV_MASK GENMASK(27, 20) #define AT91_PMC_PCR_EN (0x1 << 28) /* Enable */ #define AT91_PMC_PCR_GCKEN (0x1 << 29) /* GCK Enable */ #define AT91_PMC_AUDIO_PLL0 0x14c #define AT91_PMC_AUDIO_PLL_PLLEN (1 << 0) #define AT91_PMC_AUDIO_PLL_PADEN (1 << 1) #define AT91_PMC_AUDIO_PLL_PMCEN (1 << 2) #define AT91_PMC_AUDIO_PLL_RESETN (1 << 3) #define AT91_PMC_AUDIO_PLL_ND_OFFSET 8 #define AT91_PMC_AUDIO_PLL_ND_MASK (0x7f << AT91_PMC_AUDIO_PLL_ND_OFFSET) #define AT91_PMC_AUDIO_PLL_ND(n) ((n) << AT91_PMC_AUDIO_PLL_ND_OFFSET) #define AT91_PMC_AUDIO_PLL_QDPMC_OFFSET 16 #define AT91_PMC_AUDIO_PLL_QDPMC_MASK (0x7f << AT91_PMC_AUDIO_PLL_QDPMC_OFFSET) #define AT91_PMC_AUDIO_PLL_QDPMC(n) ((n) << AT91_PMC_AUDIO_PLL_QDPMC_OFFSET) #define AT91_PMC_AUDIO_PLL1 0x150 #define AT91_PMC_AUDIO_PLL_FRACR_MASK 0x3fffff #define AT91_PMC_AUDIO_PLL_QDPAD_OFFSET 24 #define AT91_PMC_AUDIO_PLL_QDPAD_MASK (0x7f << AT91_PMC_AUDIO_PLL_QDPAD_OFFSET) #define AT91_PMC_AUDIO_PLL_QDPAD(n) ((n) << AT91_PMC_AUDIO_PLL_QDPAD_OFFSET) #define AT91_PMC_AUDIO_PLL_QDPAD_DIV_OFFSET AT91_PMC_AUDIO_PLL_QDPAD_OFFSET #define AT91_PMC_AUDIO_PLL_QDPAD_DIV_MASK (0x3 << AT91_PMC_AUDIO_PLL_QDPAD_DIV_OFFSET) #define AT91_PMC_AUDIO_PLL_QDPAD_DIV(n) ((n) << AT91_PMC_AUDIO_PLL_QDPAD_DIV_OFFSET) #define AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_OFFSET 26 #define AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_MAX 0x1f #define AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_MASK (AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_MAX << AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_OFFSET) #define AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV(n) ((n) << AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_OFFSET) #endif clk/zynq.h 0000644 00000000637 14722070374 0006503 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2013 Xilinx Inc. * Copyright (C) 2012 National Instruments */ #ifndef __LINUX_CLK_ZYNQ_H_ #define __LINUX_CLK_ZYNQ_H_ #include <linux/spinlock.h> void zynq_clock_init(void); struct clk *clk_register_zynq_pll(const char *name, const char *parent, void __iomem *pll_ctrl, void __iomem *pll_status, u8 lock_index, spinlock_t *lock); #endif clk/clk-conf.h 0000644 00000000773 14722070374 0007177 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2014 Samsung Electronics Co., Ltd. * Sylwester Nawrocki <s.nawrocki@samsung.com> */ #ifndef __CLK_CONF_H #define __CLK_CONF_H #include <linux/types.h> struct device_node; #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) int of_clk_set_defaults(struct device_node *node, bool clk_supplier); #else static inline int of_clk_set_defaults(struct device_node *node, bool clk_supplier) { return 0; } #endif #endif /* __CLK_CONF_H */ clk/ti.h 0000644 00000027145 14722070374 0006121 0 ustar 00 /* * TI clock drivers support * * Copyright (C) 2013 Texas Instruments, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifndef __LINUX_CLK_TI_H__ #define __LINUX_CLK_TI_H__ #include <linux/clk-provider.h> #include <linux/clkdev.h> /** * struct clk_omap_reg - OMAP register declaration * @offset: offset from the master IP module base address * @index: index of the master IP module */ struct clk_omap_reg { void __iomem *ptr; u16 offset; u8 index; u8 flags; }; /** * struct dpll_data - DPLL registers and integration data * @mult_div1_reg: register containing the DPLL M and N bitfields * @mult_mask: mask of the DPLL M bitfield in @mult_div1_reg * @div1_mask: mask of the DPLL N bitfield in @mult_div1_reg * @clk_bypass: struct clk_hw pointer to the clock's bypass clock input * @clk_ref: struct clk_hw pointer to the clock's reference clock input * @control_reg: register containing the DPLL mode bitfield * @enable_mask: mask of the DPLL mode bitfield in @control_reg * @last_rounded_rate: cache of the last rate result of omap2_dpll_round_rate() * @last_rounded_m: cache of the last M result of omap2_dpll_round_rate() * @last_rounded_m4xen: cache of the last M4X result of * omap4_dpll_regm4xen_round_rate() * @last_rounded_lpmode: cache of the last lpmode result of * omap4_dpll_lpmode_recalc() * @max_multiplier: maximum valid non-bypass multiplier value (actual) * @last_rounded_n: cache of the last N result of omap2_dpll_round_rate() * @min_divider: minimum valid non-bypass divider value (actual) * @max_divider: maximum valid non-bypass divider value (actual) * @max_rate: maximum clock rate for the DPLL * @modes: possible values of @enable_mask * @autoidle_reg: register containing the DPLL autoidle mode bitfield * @idlest_reg: register containing the DPLL idle status bitfield * @autoidle_mask: mask of the DPLL autoidle mode bitfield in @autoidle_reg * @freqsel_mask: mask of the DPLL jitter correction bitfield in @control_reg * @dcc_mask: mask of the DPLL DCC correction bitfield @mult_div1_reg * @dcc_rate: rate atleast which DCC @dcc_mask must be set * @idlest_mask: mask of the DPLL idle status bitfield in @idlest_reg * @lpmode_mask: mask of the DPLL low-power mode bitfield in @control_reg * @m4xen_mask: mask of the DPLL M4X multiplier bitfield in @control_reg * @auto_recal_bit: bitshift of the driftguard enable bit in @control_reg * @recal_en_bit: bitshift of the PRM_IRQENABLE_* bit for recalibration IRQs * @recal_st_bit: bitshift of the PRM_IRQSTATUS_* bit for recalibration IRQs * @flags: DPLL type/features (see below) * * Possible values for @flags: * DPLL_J_TYPE: "J-type DPLL" (only some 36xx, 4xxx DPLLs) * * @freqsel_mask is only used on the OMAP34xx family and AM35xx. * * XXX Some DPLLs have multiple bypass inputs, so it's not technically * correct to only have one @clk_bypass pointer. * * XXX The runtime-variable fields (@last_rounded_rate, @last_rounded_m, * @last_rounded_n) should be separated from the runtime-fixed fields * and placed into a different structure, so that the runtime-fixed data * can be placed into read-only space. */ struct dpll_data { struct clk_omap_reg mult_div1_reg; u32 mult_mask; u32 div1_mask; struct clk_hw *clk_bypass; struct clk_hw *clk_ref; struct clk_omap_reg control_reg; u32 enable_mask; unsigned long last_rounded_rate; u16 last_rounded_m; u8 last_rounded_m4xen; u8 last_rounded_lpmode; u16 max_multiplier; u8 last_rounded_n; u8 min_divider; u16 max_divider; unsigned long max_rate; u8 modes; struct clk_omap_reg autoidle_reg; struct clk_omap_reg idlest_reg; u32 autoidle_mask; u32 freqsel_mask; u32 idlest_mask; u32 dco_mask; u32 sddiv_mask; u32 dcc_mask; unsigned long dcc_rate; u32 lpmode_mask; u32 m4xen_mask; u8 auto_recal_bit; u8 recal_en_bit; u8 recal_st_bit; u8 flags; }; struct clk_hw_omap; /** * struct clk_hw_omap_ops - OMAP clk ops * @find_idlest: find idlest register information for a clock * @find_companion: find companion clock register information for a clock, * basically converts CM_ICLKEN* <-> CM_FCLKEN* * @allow_idle: enables autoidle hardware functionality for a clock * @deny_idle: prevent autoidle hardware functionality for a clock */ struct clk_hw_omap_ops { void (*find_idlest)(struct clk_hw_omap *oclk, struct clk_omap_reg *idlest_reg, u8 *idlest_bit, u8 *idlest_val); void (*find_companion)(struct clk_hw_omap *oclk, struct clk_omap_reg *other_reg, u8 *other_bit); void (*allow_idle)(struct clk_hw_omap *oclk); void (*deny_idle)(struct clk_hw_omap *oclk); }; /** * struct clk_hw_omap - OMAP struct clk * @node: list_head connecting this clock into the full clock list * @enable_reg: register to write to enable the clock (see @enable_bit) * @enable_bit: bitshift to write to enable/disable the clock (see @enable_reg) * @flags: see "struct clk.flags possibilities" above * @clksel_reg: for clksel clks, register va containing src/divisor select * @dpll_data: for DPLLs, pointer to struct dpll_data for this clock * @clkdm_name: clockdomain name that this clock is contained in * @clkdm: pointer to struct clockdomain, resolved from @clkdm_name at runtime * @ops: clock ops for this clock */ struct clk_hw_omap { struct clk_hw hw; struct list_head node; unsigned long fixed_rate; u8 fixed_div; struct clk_omap_reg enable_reg; u8 enable_bit; u8 flags; struct clk_omap_reg clksel_reg; struct dpll_data *dpll_data; const char *clkdm_name; struct clockdomain *clkdm; const struct clk_hw_omap_ops *ops; u32 context; int autoidle_count; }; /* * struct clk_hw_omap.flags possibilities * * XXX document the rest of the clock flags here * * ENABLE_REG_32BIT: (OMAP1 only) clock control register must be accessed * with 32bit ops, by default OMAP1 uses 16bit ops. * CLOCK_IDLE_CONTROL: (OMAP1 only) clock has autoidle support. * CLOCK_NO_IDLE_PARENT: (OMAP1 only) when clock is enabled, its parent * clock is put to no-idle mode. * ENABLE_ON_INIT: Clock is enabled on init. * INVERT_ENABLE: By default, clock enable bit behavior is '1' enable, '0' * disable. This inverts the behavior making '0' enable and '1' disable. * CLOCK_CLKOUTX2: (OMAP4 only) DPLL CLKOUT and CLKOUTX2 GATE_CTRL * bits share the same register. This flag allows the * omap4_dpllmx*() code to determine which GATE_CTRL bit field * should be used. This is a temporary solution - a better approach * would be to associate clock type-specific data with the clock, * similar to the struct dpll_data approach. */ #define ENABLE_REG_32BIT (1 << 0) /* Use 32-bit access */ #define CLOCK_IDLE_CONTROL (1 << 1) #define CLOCK_NO_IDLE_PARENT (1 << 2) #define ENABLE_ON_INIT (1 << 3) /* Enable upon framework init */ #define INVERT_ENABLE (1 << 4) /* 0 enables, 1 disables */ #define CLOCK_CLKOUTX2 (1 << 5) /* CM_CLKEN_PLL*.EN* bit values - not all are available for every DPLL */ #define DPLL_LOW_POWER_STOP 0x1 #define DPLL_LOW_POWER_BYPASS 0x5 #define DPLL_LOCKED 0x7 /* DPLL Type and DCO Selection Flags */ #define DPLL_J_TYPE 0x1 /* Static memmap indices */ enum { TI_CLKM_CM = 0, TI_CLKM_CM2, TI_CLKM_PRM, TI_CLKM_SCRM, TI_CLKM_CTRL, TI_CLKM_CTRL_AUX, TI_CLKM_PLLSS, CLK_MAX_MEMMAPS }; /** * struct ti_clk_ll_ops - low-level ops for clocks * @clk_readl: pointer to register read function * @clk_writel: pointer to register write function * @clk_rmw: pointer to register read-modify-write function * @clkdm_clk_enable: pointer to clockdomain enable function * @clkdm_clk_disable: pointer to clockdomain disable function * @clkdm_lookup: pointer to clockdomain lookup function * @cm_wait_module_ready: pointer to CM module wait ready function * @cm_split_idlest_reg: pointer to CM module function to split idlest reg * * Low-level ops are generally used by the basic clock types (clk-gate, * clk-mux, clk-divider etc.) to provide support for various low-level * hadrware interfaces (direct MMIO, regmap etc.), and is initialized * by board code. Low-level ops also contain some other platform specific * operations not provided directly by clock drivers. */ struct ti_clk_ll_ops { u32 (*clk_readl)(const struct clk_omap_reg *reg); void (*clk_writel)(u32 val, const struct clk_omap_reg *reg); void (*clk_rmw)(u32 val, u32 mask, const struct clk_omap_reg *reg); int (*clkdm_clk_enable)(struct clockdomain *clkdm, struct clk *clk); int (*clkdm_clk_disable)(struct clockdomain *clkdm, struct clk *clk); struct clockdomain * (*clkdm_lookup)(const char *name); int (*cm_wait_module_ready)(u8 part, s16 prcm_mod, u16 idlest_reg, u8 idlest_shift); int (*cm_split_idlest_reg)(struct clk_omap_reg *idlest_reg, s16 *prcm_inst, u8 *idlest_reg_id); }; #define to_clk_hw_omap(_hw) container_of(_hw, struct clk_hw_omap, hw) bool omap2_clk_is_hw_omap(struct clk_hw *hw); int omap2_clk_disable_autoidle_all(void); int omap2_clk_enable_autoidle_all(void); int omap2_clk_allow_idle(struct clk *clk); int omap2_clk_deny_idle(struct clk *clk); unsigned long omap2_dpllcore_recalc(struct clk_hw *hw, unsigned long parent_rate); int omap2_reprogram_dpllcore(struct clk_hw *clk, unsigned long rate, unsigned long parent_rate); void omap2xxx_clkt_dpllcore_init(struct clk_hw *hw); void omap2xxx_clkt_vps_init(void); unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk); void ti_dt_clk_init_retry_clks(void); void ti_dt_clockdomains_setup(void); int ti_clk_setup_ll_ops(struct ti_clk_ll_ops *ops); struct regmap; int omap2_clk_provider_init(struct device_node *parent, int index, struct regmap *syscon, void __iomem *mem); void omap2_clk_legacy_provider_init(int index, void __iomem *mem); int omap3430_dt_clk_init(void); int omap3630_dt_clk_init(void); int am35xx_dt_clk_init(void); int dm814x_dt_clk_init(void); int dm816x_dt_clk_init(void); int omap4xxx_dt_clk_init(void); int omap5xxx_dt_clk_init(void); int dra7xx_dt_clk_init(void); int am33xx_dt_clk_init(void); int am43xx_dt_clk_init(void); int omap2420_dt_clk_init(void); int omap2430_dt_clk_init(void); struct ti_clk_features { u32 flags; long fint_min; long fint_max; long fint_band1_max; long fint_band2_min; u8 dpll_bypass_vals; u8 cm_idlest_val; }; #define TI_CLK_DPLL_HAS_FREQSEL BIT(0) #define TI_CLK_DPLL4_DENY_REPROGRAM BIT(1) #define TI_CLK_DISABLE_CLKDM_CONTROL BIT(2) #define TI_CLK_ERRATA_I810 BIT(3) #define TI_CLK_CLKCTRL_COMPAT BIT(4) #define TI_CLK_DEVICE_TYPE_GP BIT(5) void ti_clk_setup_features(struct ti_clk_features *features); const struct ti_clk_features *ti_clk_get_features(void); int omap3_noncore_dpll_save_context(struct clk_hw *hw); void omap3_noncore_dpll_restore_context(struct clk_hw *hw); int omap3_core_dpll_save_context(struct clk_hw *hw); void omap3_core_dpll_restore_context(struct clk_hw *hw); extern const struct clk_hw_omap_ops clkhwops_omap2xxx_dpll; #ifdef CONFIG_ATAGS int omap3430_clk_legacy_init(void); int omap3430es1_clk_legacy_init(void); int omap36xx_clk_legacy_init(void); int am35xx_clk_legacy_init(void); #else static inline int omap3430_clk_legacy_init(void) { return -ENXIO; } static inline int omap3430es1_clk_legacy_init(void) { return -ENXIO; } static inline int omap36xx_clk_legacy_init(void) { return -ENXIO; } static inline int am35xx_clk_legacy_init(void) { return -ENXIO; } #endif #endif clk/renesas.h 0000644 00000001751 14722070374 0007140 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0+ * * Copyright 2013 Ideas On Board SPRL * Copyright 2013, 2014 Horms Solutions Ltd. * * Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com> * Contact: Simon Horman <horms@verge.net.au> */ #ifndef __LINUX_CLK_RENESAS_H_ #define __LINUX_CLK_RENESAS_H_ #include <linux/types.h> struct device; struct device_node; struct generic_pm_domain; void cpg_mstp_add_clk_domain(struct device_node *np); #ifdef CONFIG_CLK_RENESAS_CPG_MSTP int cpg_mstp_attach_dev(struct generic_pm_domain *unused, struct device *dev); void cpg_mstp_detach_dev(struct generic_pm_domain *unused, struct device *dev); #else #define cpg_mstp_attach_dev NULL #define cpg_mstp_detach_dev NULL #endif #ifdef CONFIG_CLK_RENESAS_CPG_MSSR int cpg_mssr_attach_dev(struct generic_pm_domain *unused, struct device *dev); void cpg_mssr_detach_dev(struct generic_pm_domain *unused, struct device *dev); #else #define cpg_mssr_attach_dev NULL #define cpg_mssr_detach_dev NULL #endif #endif clk/davinci.h 0000644 00000002672 14722070374 0007120 0 ustar 00 // SPDX-License-Identifier: GPL-2.0 /* * Clock drivers for TI DaVinci PLL and PSC controllers * * Copyright (C) 2018 David Lechner <david@lechnology.com> */ #ifndef __LINUX_CLK_DAVINCI_PLL_H___ #define __LINUX_CLK_DAVINCI_PLL_H___ #include <linux/device.h> #include <linux/regmap.h> /* function for registering clocks in early boot */ #ifdef CONFIG_ARCH_DAVINCI_DA830 int da830_pll_init(struct device *dev, void __iomem *base, struct regmap *cfgchip); #endif #ifdef CONFIG_ARCH_DAVINCI_DA850 int da850_pll0_init(struct device *dev, void __iomem *base, struct regmap *cfgchip); #endif #ifdef CONFIG_ARCH_DAVINCI_DM355 int dm355_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip); int dm355_psc_init(struct device *dev, void __iomem *base); #endif #ifdef CONFIG_ARCH_DAVINCI_DM365 int dm365_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip); int dm365_pll2_init(struct device *dev, void __iomem *base, struct regmap *cfgchip); int dm365_psc_init(struct device *dev, void __iomem *base); #endif #ifdef CONFIG_ARCH_DAVINCI_DM644x int dm644x_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip); int dm644x_psc_init(struct device *dev, void __iomem *base); #endif #ifdef CONFIG_ARCH_DAVINCI_DM646x int dm646x_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip); int dm646x_psc_init(struct device *dev, void __iomem *base); #endif #endif /* __LINUX_CLK_DAVINCI_PLL_H___ */ reciprocal_div.h 0000644 00000006434 14722070374 0007717 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_RECIPROCAL_DIV_H #define _LINUX_RECIPROCAL_DIV_H #include <linux/types.h> /* * This algorithm is based on the paper "Division by Invariant * Integers Using Multiplication" by Torbjörn Granlund and Peter * L. Montgomery. * * The assembler implementation from Agner Fog, which this code is * based on, can be found here: * http://www.agner.org/optimize/asmlib.zip * * This optimization for A/B is helpful if the divisor B is mostly * runtime invariant. The reciprocal of B is calculated in the * slow-path with reciprocal_value(). The fast-path can then just use * a much faster multiplication operation with a variable dividend A * to calculate the division A/B. */ struct reciprocal_value { u32 m; u8 sh1, sh2; }; /* "reciprocal_value" and "reciprocal_divide" together implement the basic * version of the algorithm described in Figure 4.1 of the paper. */ struct reciprocal_value reciprocal_value(u32 d); static inline u32 reciprocal_divide(u32 a, struct reciprocal_value R) { u32 t = (u32)(((u64)a * R.m) >> 32); return (t + ((a - t) >> R.sh1)) >> R.sh2; } struct reciprocal_value_adv { u32 m; u8 sh, exp; bool is_wide_m; }; /* "reciprocal_value_adv" implements the advanced version of the algorithm * described in Figure 4.2 of the paper except when "divisor > (1U << 31)" whose * ceil(log2(d)) result will be 32 which then requires u128 divide on host. The * exception case could be easily handled before calling "reciprocal_value_adv". * * The advanced version requires more complex calculation to get the reciprocal * multiplier and other control variables, but then could reduce the required * emulation operations. * * It makes no sense to use this advanced version for host divide emulation, * those extra complexities for calculating multiplier etc could completely * waive our saving on emulation operations. * * However, it makes sense to use it for JIT divide code generation for which * we are willing to trade performance of JITed code with that of host. As shown * by the following pseudo code, the required emulation operations could go down * from 6 (the basic version) to 3 or 4. * * To use the result of "reciprocal_value_adv", suppose we want to calculate * n/d, the pseudo C code will be: * * struct reciprocal_value_adv rvalue; * u8 pre_shift, exp; * * // handle exception case. * if (d >= (1U << 31)) { * result = n >= d; * return; * } * * rvalue = reciprocal_value_adv(d, 32) * exp = rvalue.exp; * if (rvalue.is_wide_m && !(d & 1)) { * // floor(log2(d & (2^32 -d))) * pre_shift = fls(d & -d) - 1; * rvalue = reciprocal_value_adv(d >> pre_shift, 32 - pre_shift); * } else { * pre_shift = 0; * } * * // code generation starts. * if (imm == 1U << exp) { * result = n >> exp; * } else if (rvalue.is_wide_m) { * // pre_shift must be zero when reached here. * t = (n * rvalue.m) >> 32; * result = n - t; * result >>= 1; * result += t; * result >>= rvalue.sh - 1; * } else { * if (pre_shift) * result = n >> pre_shift; * result = ((u64)result * rvalue.m) >> 32; * result >>= rvalue.sh; * } */ struct reciprocal_value_adv reciprocal_value_adv(u32 d, u8 prec); #endif /* _LINUX_RECIPROCAL_DIV_H */ spinlock_api_smp.h 0000644 00000012635 14722070374 0010264 0 ustar 00 #ifndef __LINUX_SPINLOCK_API_SMP_H #define __LINUX_SPINLOCK_API_SMP_H #ifndef __LINUX_SPINLOCK_H # error "please don't include this file directly" #endif /* * include/linux/spinlock_api_smp.h * * spinlock API declarations on SMP (and debug) * (implemented in kernel/spinlock.c) * * portions Copyright 2005, Red Hat, Inc., Ingo Molnar * Released under the General Public License (GPL). */ int in_lock_functions(unsigned long addr); #define assert_raw_spin_locked(x) BUG_ON(!raw_spin_is_locked(x)) void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) __acquires(lock); void __lockfunc _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) __acquires(lock); void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock); void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) __acquires(lock); unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) __acquires(lock); unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass) __acquires(lock); int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock); int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock); void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) __releases(lock); void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) __releases(lock); void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) __releases(lock); #ifdef CONFIG_INLINE_SPIN_LOCK #define _raw_spin_lock(lock) __raw_spin_lock(lock) #endif #ifdef CONFIG_INLINE_SPIN_LOCK_BH #define _raw_spin_lock_bh(lock) __raw_spin_lock_bh(lock) #endif #ifdef CONFIG_INLINE_SPIN_LOCK_IRQ #define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock) #endif #ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE #define _raw_spin_lock_irqsave(lock) __raw_spin_lock_irqsave(lock) #endif #ifdef CONFIG_INLINE_SPIN_TRYLOCK #define _raw_spin_trylock(lock) __raw_spin_trylock(lock) #endif #ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH #define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock) #endif #ifndef CONFIG_UNINLINE_SPIN_UNLOCK #define _raw_spin_unlock(lock) __raw_spin_unlock(lock) #endif #ifdef CONFIG_INLINE_SPIN_UNLOCK_BH #define _raw_spin_unlock_bh(lock) __raw_spin_unlock_bh(lock) #endif #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ #define _raw_spin_unlock_irq(lock) __raw_spin_unlock_irq(lock) #endif #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE #define _raw_spin_unlock_irqrestore(lock, flags) __raw_spin_unlock_irqrestore(lock, flags) #endif static inline int __raw_spin_trylock(raw_spinlock_t *lock) { preempt_disable(); if (do_raw_spin_trylock(lock)) { spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); return 1; } preempt_enable(); return 0; } /* * If lockdep is enabled then we use the non-preemption spin-ops * even on CONFIG_PREEMPTION, because lockdep assumes that interrupts are * not re-enabled during lock-acquire (which the preempt-spin-ops do): */ #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock) { unsigned long flags; local_irq_save(flags); preempt_disable(); spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); /* * On lockdep we dont want the hand-coded irq-enable of * do_raw_spin_lock_flags() code, because lockdep assumes * that interrupts are not re-enabled during lock-acquire: */ #ifdef CONFIG_LOCKDEP LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); #else do_raw_spin_lock_flags(lock, &flags); #endif return flags; } static inline void __raw_spin_lock_irq(raw_spinlock_t *lock) { local_irq_disable(); preempt_disable(); spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); } static inline void __raw_spin_lock_bh(raw_spinlock_t *lock) { __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); } static inline void __raw_spin_lock(raw_spinlock_t *lock) { preempt_disable(); spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); } #endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */ static inline void __raw_spin_unlock(raw_spinlock_t *lock) { spin_release(&lock->dep_map, 1, _RET_IP_); do_raw_spin_unlock(lock); preempt_enable(); } static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) { spin_release(&lock->dep_map, 1, _RET_IP_); do_raw_spin_unlock(lock); local_irq_restore(flags); preempt_enable(); } static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock) { spin_release(&lock->dep_map, 1, _RET_IP_); do_raw_spin_unlock(lock); local_irq_enable(); preempt_enable(); } static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock) { spin_release(&lock->dep_map, 1, _RET_IP_); do_raw_spin_unlock(lock); __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); } static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock) { __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); if (do_raw_spin_trylock(lock)) { spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); return 1; } __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); return 0; } #include <linux/rwlock_api_smp.h> #endif /* __LINUX_SPINLOCK_API_SMP_H */ irq_cpustat.h 0000644 00000001515 14722070374 0007263 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __irq_cpustat_h #define __irq_cpustat_h /* * Contains default mappings for irq_cpustat_t, used by almost every * architecture. Some arch (like s390) have per cpu hardware pages and * they define their own mappings for irq_stat. * * Keith Owens <kaos@ocs.com.au> July 2000. */ /* * Simple wrappers reducing source bloat. Define all irq_stat fields * here, even ones that are arch dependent. That way we get common * definitions instead of differing sets for each arch. */ #ifndef __ARCH_IRQ_STAT DECLARE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat); /* defined in asm/hardirq.h */ #define __IRQ_STAT(cpu, member) (per_cpu(irq_stat.member, cpu)) #endif /* arch dependent irq_stat fields */ #define nmi_count(cpu) __IRQ_STAT((cpu), __nmi_count) /* i386 */ #endif /* __irq_cpustat_h */ soundcard.h 0000644 00000003137 14722070374 0006711 0 ustar 00 /* * Copyright by Hannu Savolainen 1993-1997 * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. 2. * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef SOUNDCARD_H #define SOUNDCARD_H # include <asm/byteorder.h> #include <uapi/linux/soundcard.h> # if defined(__BIG_ENDIAN) # define AFMT_S16_NE AFMT_S16_BE # elif defined(__LITTLE_ENDIAN) # define AFMT_S16_NE AFMT_S16_LE # else # error "could not determine byte order" # endif #endif timecounter.h 0000644 00000010215 14722070374 0007260 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * linux/include/linux/timecounter.h * * based on code that migrated away from * linux/include/linux/clocksource.h */ #ifndef _LINUX_TIMECOUNTER_H #define _LINUX_TIMECOUNTER_H #include <linux/types.h> /* simplify initialization of mask field */ #define CYCLECOUNTER_MASK(bits) (u64)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) /** * struct cyclecounter - hardware abstraction for a free running counter * Provides completely state-free accessors to the underlying hardware. * Depending on which hardware it reads, the cycle counter may wrap * around quickly. Locking rules (if necessary) have to be defined * by the implementor and user of specific instances of this API. * * @read: returns the current cycle value * @mask: bitmask for two's complement * subtraction of non 64 bit counters, * see CYCLECOUNTER_MASK() helper macro * @mult: cycle to nanosecond multiplier * @shift: cycle to nanosecond divisor (power of two) */ struct cyclecounter { u64 (*read)(const struct cyclecounter *cc); u64 mask; u32 mult; u32 shift; }; /** * struct timecounter - layer above a %struct cyclecounter which counts nanoseconds * Contains the state needed by timecounter_read() to detect * cycle counter wrap around. Initialize with * timecounter_init(). Also used to convert cycle counts into the * corresponding nanosecond counts with timecounter_cyc2time(). Users * of this code are responsible for initializing the underlying * cycle counter hardware, locking issues and reading the time * more often than the cycle counter wraps around. The nanosecond * counter will only wrap around after ~585 years. * * @cc: the cycle counter used by this instance * @cycle_last: most recent cycle counter value seen by * timecounter_read() * @nsec: continuously increasing count * @mask: bit mask for maintaining the 'frac' field * @frac: accumulated fractional nanoseconds */ struct timecounter { const struct cyclecounter *cc; u64 cycle_last; u64 nsec; u64 mask; u64 frac; }; /** * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds * @cc: Pointer to cycle counter. * @cycles: Cycles * @mask: bit mask for maintaining the 'frac' field * @frac: pointer to storage for the fractional nanoseconds. */ static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc, u64 cycles, u64 mask, u64 *frac) { u64 ns = (u64) cycles; ns = (ns * cc->mult) + *frac; *frac = ns & mask; return ns >> cc->shift; } /** * timecounter_adjtime - Shifts the time of the clock. * @delta: Desired change in nanoseconds. */ static inline void timecounter_adjtime(struct timecounter *tc, s64 delta) { tc->nsec += delta; } /** * timecounter_init - initialize a time counter * @tc: Pointer to time counter which is to be initialized/reset * @cc: A cycle counter, ready to be used. * @start_tstamp: Arbitrary initial time stamp. * * After this call the current cycle register (roughly) corresponds to * the initial time stamp. Every call to timecounter_read() increments * the time stamp counter by the number of elapsed nanoseconds. */ extern void timecounter_init(struct timecounter *tc, const struct cyclecounter *cc, u64 start_tstamp); /** * timecounter_read - return nanoseconds elapsed since timecounter_init() * plus the initial time stamp * @tc: Pointer to time counter. * * In other words, keeps track of time since the same epoch as * the function which generated the initial time stamp. */ extern u64 timecounter_read(struct timecounter *tc); /** * timecounter_cyc2time - convert a cycle counter to same * time base as values returned by * timecounter_read() * @tc: Pointer to time counter. * @cycle_tstamp: a value returned by tc->cc->read() * * Cycle counts that are converted correctly as long as they * fall into the interval [-1/2 max cycle count, +1/2 max cycle count], * with "max cycle count" == cs->mask+1. * * This allows conversion of cycle counter values which were generated * in the past. */ extern u64 timecounter_cyc2time(struct timecounter *tc, u64 cycle_tstamp); #endif component.h 0000644 00000010103 14722070374 0006720 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef COMPONENT_H #define COMPONENT_H #include <linux/stddef.h> struct device; /** * struct component_ops - callbacks for component drivers * * Components are registered with component_add() and unregistered with * component_del(). */ struct component_ops { /** * @bind: * * Called through component_bind_all() when the aggregate driver is * ready to bind the overall driver. */ int (*bind)(struct device *comp, struct device *master, void *master_data); /** * @unbind: * * Called through component_unbind_all() when the aggregate driver is * ready to bind the overall driver, or when component_bind_all() fails * part-ways through and needs to unbind some already bound components. */ void (*unbind)(struct device *comp, struct device *master, void *master_data); }; int component_add(struct device *, const struct component_ops *); int component_add_typed(struct device *dev, const struct component_ops *ops, int subcomponent); void component_del(struct device *, const struct component_ops *); int component_bind_all(struct device *master, void *master_data); void component_unbind_all(struct device *master, void *master_data); struct master; /** * struct component_master_ops - callback for the aggregate driver * * Aggregate drivers are registered with component_master_add_with_match() and * unregistered with component_master_del(). */ struct component_master_ops { /** * @bind: * * Called when all components or the aggregate driver, as specified in * the match list passed to component_master_add_with_match(), are * ready. Usually there are 3 steps to bind an aggregate driver: * * 1. Allocate a structure for the aggregate driver. * * 2. Bind all components to the aggregate driver by calling * component_bind_all() with the aggregate driver structure as opaque * pointer data. * * 3. Register the aggregate driver with the subsystem to publish its * interfaces. * * Note that the lifetime of the aggregate driver does not align with * any of the underlying &struct device instances. Therefore devm cannot * be used and all resources acquired or allocated in this callback must * be explicitly released in the @unbind callback. */ int (*bind)(struct device *master); /** * @unbind: * * Called when either the aggregate driver, using * component_master_del(), or one of its components, using * component_del(), is unregistered. */ void (*unbind)(struct device *master); }; void component_master_del(struct device *, const struct component_master_ops *); struct component_match; int component_master_add_with_match(struct device *, const struct component_master_ops *, struct component_match *); void component_match_add_release(struct device *master, struct component_match **matchptr, void (*release)(struct device *, void *), int (*compare)(struct device *, void *), void *compare_data); void component_match_add_typed(struct device *master, struct component_match **matchptr, int (*compare_typed)(struct device *, int, void *), void *compare_data); /** * component_match_add - add a component match entry * @master: device with the aggregate driver * @matchptr: pointer to the list of component matches * @compare: compare function to match against all components * @compare_data: opaque pointer passed to the @compare function * * Adds a new component match to the list stored in @matchptr, which the @master * aggregate driver needs to function. The list of component matches pointed to * by @matchptr must be initialized to NULL before adding the first match. This * only matches against components added with component_add(). * * The allocated match list in @matchptr is automatically released using devm * actions. * * See also component_match_add_release() and component_match_add_typed(). */ static inline void component_match_add(struct device *master, struct component_match **matchptr, int (*compare)(struct device *, void *), void *compare_data) { component_match_add_release(master, matchptr, NULL, compare, compare_data); } #endif fsl/bestcomm/fec.h 0000644 00000003555 14722070374 0010065 0 ustar 00 /* * Header for Bestcomm FEC tasks driver * * * Copyright (C) 2006-2007 Sylvain Munaut <tnt@246tNt.com> * Copyright (C) 2003-2004 MontaVista, Software, Inc. * ( by Dale Farnsworth <dfarnsworth@mvista.com> ) * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #ifndef __BESTCOMM_FEC_H__ #define __BESTCOMM_FEC_H__ struct bcom_fec_bd { u32 status; u32 skb_pa; }; #define BCOM_FEC_TX_BD_TFD 0x08000000ul /* transmit frame done */ #define BCOM_FEC_TX_BD_TC 0x04000000ul /* transmit CRC */ #define BCOM_FEC_TX_BD_ABC 0x02000000ul /* append bad CRC */ #define BCOM_FEC_RX_BD_L 0x08000000ul /* buffer is last in frame */ #define BCOM_FEC_RX_BD_BC 0x00800000ul /* DA is broadcast */ #define BCOM_FEC_RX_BD_MC 0x00400000ul /* DA is multicast and not broadcast */ #define BCOM_FEC_RX_BD_LG 0x00200000ul /* Rx frame length violation */ #define BCOM_FEC_RX_BD_NO 0x00100000ul /* Rx non-octet aligned frame */ #define BCOM_FEC_RX_BD_CR 0x00040000ul /* Rx CRC error */ #define BCOM_FEC_RX_BD_OV 0x00020000ul /* overrun */ #define BCOM_FEC_RX_BD_TR 0x00010000ul /* Rx frame truncated */ #define BCOM_FEC_RX_BD_LEN_MASK 0x000007fful /* mask for length of received frame */ #define BCOM_FEC_RX_BD_ERRORS (BCOM_FEC_RX_BD_LG | BCOM_FEC_RX_BD_NO | \ BCOM_FEC_RX_BD_CR | BCOM_FEC_RX_BD_OV | BCOM_FEC_RX_BD_TR) extern struct bcom_task * bcom_fec_rx_init(int queue_len, phys_addr_t fifo, int maxbufsize); extern int bcom_fec_rx_reset(struct bcom_task *tsk); extern void bcom_fec_rx_release(struct bcom_task *tsk); extern struct bcom_task * bcom_fec_tx_init(int queue_len, phys_addr_t fifo); extern int bcom_fec_tx_reset(struct bcom_task *tsk); extern void bcom_fec_tx_release(struct bcom_task *tsk); #endif /* __BESTCOMM_FEC_H__ */ fsl/bestcomm/gen_bd.h 0000644 00000002216 14722070374 0010537 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Header for Bestcomm General Buffer Descriptor tasks driver * * Copyright (C) 2007 Sylvain Munaut <tnt@246tNt.com> * Copyright (C) 2006 AppSpec Computer Technologies Corp. * Jeff Gibbons <jeff.gibbons@appspec.com> */ #ifndef __BESTCOMM_GEN_BD_H__ #define __BESTCOMM_GEN_BD_H__ struct bcom_gen_bd { u32 status; u32 buf_pa; }; extern struct bcom_task * bcom_gen_bd_rx_init(int queue_len, phys_addr_t fifo, int initiator, int ipr, int maxbufsize); extern int bcom_gen_bd_rx_reset(struct bcom_task *tsk); extern void bcom_gen_bd_rx_release(struct bcom_task *tsk); extern struct bcom_task * bcom_gen_bd_tx_init(int queue_len, phys_addr_t fifo, int initiator, int ipr); extern int bcom_gen_bd_tx_reset(struct bcom_task *tsk); extern void bcom_gen_bd_tx_release(struct bcom_task *tsk); /* PSC support utility wrappers */ struct bcom_task * bcom_psc_gen_bd_rx_init(unsigned psc_num, int queue_len, phys_addr_t fifo, int maxbufsize); struct bcom_task * bcom_psc_gen_bd_tx_init(unsigned psc_num, int queue_len, phys_addr_t fifo); #endif /* __BESTCOMM_GEN_BD_H__ */ fsl/bestcomm/ata.h 0000644 00000001423 14722070374 0010065 0 ustar 00 /* * Header for Bestcomm ATA task driver * * * Copyright (C) 2006 Freescale - John Rigby * Copyright (C) 2006 Sylvain Munaut <tnt@246tNt.com> * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #ifndef __BESTCOMM_ATA_H__ #define __BESTCOMM_ATA_H__ struct bcom_ata_bd { u32 status; u32 src_pa; u32 dst_pa; }; extern struct bcom_task * bcom_ata_init(int queue_len, int maxbufsize); extern void bcom_ata_rx_prepare(struct bcom_task *tsk); extern void bcom_ata_tx_prepare(struct bcom_task *tsk); extern void bcom_ata_reset_bd(struct bcom_task *tsk); extern void bcom_ata_release(struct bcom_task *tsk); #endif /* __BESTCOMM_ATA_H__ */ fsl/bestcomm/sram.h 0000644 00000002377 14722070374 0010273 0 ustar 00 /* * Handling of a sram zone for bestcomm * * * Copyright (C) 2007 Sylvain Munaut <tnt@246tNt.com> * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #ifndef __BESTCOMM_SRAM_H__ #define __BESTCOMM_SRAM_H__ #include <asm/rheap.h> #include <asm/mmu.h> #include <linux/spinlock.h> /* Structure used internally */ /* The internals are here for the inline functions * sake, certainly not for the user to mess with ! */ struct bcom_sram { phys_addr_t base_phys; void *base_virt; unsigned int size; rh_info_t *rh; spinlock_t lock; }; extern struct bcom_sram *bcom_sram; /* Public API */ extern int bcom_sram_init(struct device_node *sram_node, char *owner); extern void bcom_sram_cleanup(void); extern void* bcom_sram_alloc(int size, int align, phys_addr_t *phys); extern void bcom_sram_free(void *ptr); static inline phys_addr_t bcom_sram_va2pa(void *va) { return bcom_sram->base_phys + (unsigned long)(va - bcom_sram->base_virt); } static inline void *bcom_sram_pa2va(phys_addr_t pa) { return bcom_sram->base_virt + (unsigned long)(pa - bcom_sram->base_phys); } #endif /* __BESTCOMM_SRAM_H__ */ fsl/bestcomm/bestcomm.h 0000644 00000013100 14722070374 0011124 0 ustar 00 /* * Public header for the MPC52xx processor BestComm driver * * * Copyright (C) 2006 Sylvain Munaut <tnt@246tNt.com> * Copyright (C) 2005 Varma Electronics Oy, * ( by Andrey Volkov <avolkov@varma-el.com> ) * Copyright (C) 2003-2004 MontaVista, Software, Inc. * ( by Dale Farnsworth <dfarnsworth@mvista.com> ) * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #ifndef __BESTCOMM_H__ #define __BESTCOMM_H__ /** * struct bcom_bd - Structure describing a generic BestComm buffer descriptor * @status: The current status of this buffer. Exact meaning depends on the * task type * @data: An array of u32 extra data. Size of array is task dependent. * * Note: Don't dereference a bcom_bd pointer as an array. The size of the * bcom_bd is variable. Use bcom_get_bd() instead. */ struct bcom_bd { u32 status; u32 data[0]; /* variable payload size */ }; /* ======================================================================== */ /* Generic task management */ /* ======================================================================== */ /** * struct bcom_task - Structure describing a loaded BestComm task * * This structure is never built by the driver it self. It's built and * filled the intermediate layer of the BestComm API, the task dependent * support code. * * Most likely you don't need to poke around inside this structure. The * fields are exposed in the header just for the sake of inline functions */ struct bcom_task { unsigned int tasknum; unsigned int flags; int irq; struct bcom_bd *bd; phys_addr_t bd_pa; void **cookie; unsigned short index; unsigned short outdex; unsigned int num_bd; unsigned int bd_size; void* priv; }; #define BCOM_FLAGS_NONE 0x00000000ul #define BCOM_FLAGS_ENABLE_TASK (1ul << 0) /** * bcom_enable - Enable a BestComm task * @tsk: The BestComm task structure * * This function makes sure the given task is enabled and can be run * by the BestComm engine as needed */ extern void bcom_enable(struct bcom_task *tsk); /** * bcom_disable - Disable a BestComm task * @tsk: The BestComm task structure * * This function disable a given task, making sure it's not executed * by the BestComm engine. */ extern void bcom_disable(struct bcom_task *tsk); /** * bcom_get_task_irq - Returns the irq number of a BestComm task * @tsk: The BestComm task structure */ static inline int bcom_get_task_irq(struct bcom_task *tsk) { return tsk->irq; } /* ======================================================================== */ /* BD based tasks helpers */ /* ======================================================================== */ #define BCOM_BD_READY 0x40000000ul /** _bcom_next_index - Get next input index. * @tsk: pointer to task structure * * Support function; Device drivers should not call this */ static inline int _bcom_next_index(struct bcom_task *tsk) { return ((tsk->index + 1) == tsk->num_bd) ? 0 : tsk->index + 1; } /** _bcom_next_outdex - Get next output index. * @tsk: pointer to task structure * * Support function; Device drivers should not call this */ static inline int _bcom_next_outdex(struct bcom_task *tsk) { return ((tsk->outdex + 1) == tsk->num_bd) ? 0 : tsk->outdex + 1; } /** * bcom_queue_empty - Checks if a BestComm task BD queue is empty * @tsk: The BestComm task structure */ static inline int bcom_queue_empty(struct bcom_task *tsk) { return tsk->index == tsk->outdex; } /** * bcom_queue_full - Checks if a BestComm task BD queue is full * @tsk: The BestComm task structure */ static inline int bcom_queue_full(struct bcom_task *tsk) { return tsk->outdex == _bcom_next_index(tsk); } /** * bcom_get_bd - Get a BD from the queue * @tsk: The BestComm task structure * index: Index of the BD to fetch */ static inline struct bcom_bd *bcom_get_bd(struct bcom_task *tsk, unsigned int index) { /* A cast to (void*) so the address can be incremented by the * real size instead of by sizeof(struct bcom_bd) */ return ((void *)tsk->bd) + (index * tsk->bd_size); } /** * bcom_buffer_done - Checks if a BestComm * @tsk: The BestComm task structure */ static inline int bcom_buffer_done(struct bcom_task *tsk) { struct bcom_bd *bd; if (bcom_queue_empty(tsk)) return 0; bd = bcom_get_bd(tsk, tsk->outdex); return !(bd->status & BCOM_BD_READY); } /** * bcom_prepare_next_buffer - clear status of next available buffer. * @tsk: The BestComm task structure * * Returns pointer to next buffer descriptor */ static inline struct bcom_bd * bcom_prepare_next_buffer(struct bcom_task *tsk) { struct bcom_bd *bd; bd = bcom_get_bd(tsk, tsk->index); bd->status = 0; /* cleanup last status */ return bd; } static inline void bcom_submit_next_buffer(struct bcom_task *tsk, void *cookie) { struct bcom_bd *bd = bcom_get_bd(tsk, tsk->index); tsk->cookie[tsk->index] = cookie; mb(); /* ensure the bd is really up-to-date */ bd->status |= BCOM_BD_READY; tsk->index = _bcom_next_index(tsk); if (tsk->flags & BCOM_FLAGS_ENABLE_TASK) bcom_enable(tsk); } static inline void * bcom_retrieve_buffer(struct bcom_task *tsk, u32 *p_status, struct bcom_bd **p_bd) { void *cookie = tsk->cookie[tsk->outdex]; struct bcom_bd *bd = bcom_get_bd(tsk, tsk->outdex); if (p_status) *p_status = bd->status; if (p_bd) *p_bd = bd; tsk->outdex = _bcom_next_outdex(tsk); return cookie; } #endif /* __BESTCOMM_H__ */ fsl/bestcomm/bestcomm_priv.h 0000644 00000023703 14722070374 0012176 0 ustar 00 /* * Private header for the MPC52xx processor BestComm driver * * By private, we mean that driver should not use it directly. It's meant * to be used by the BestComm engine driver itself and by the intermediate * layer between the core and the drivers. * * Copyright (C) 2006 Sylvain Munaut <tnt@246tNt.com> * Copyright (C) 2005 Varma Electronics Oy, * ( by Andrey Volkov <avolkov@varma-el.com> ) * Copyright (C) 2003-2004 MontaVista, Software, Inc. * ( by Dale Farnsworth <dfarnsworth@mvista.com> ) * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #ifndef __BESTCOMM_PRIV_H__ #define __BESTCOMM_PRIV_H__ #include <linux/spinlock.h> #include <linux/of.h> #include <asm/io.h> #include <asm/mpc52xx.h> #include "sram.h" /* ======================================================================== */ /* Engine related stuff */ /* ======================================================================== */ /* Zones sizes and needed alignments */ #define BCOM_MAX_TASKS 16 #define BCOM_MAX_VAR 24 #define BCOM_MAX_INC 8 #define BCOM_MAX_FDT 64 #define BCOM_MAX_CTX 20 #define BCOM_CTX_SIZE (BCOM_MAX_CTX * sizeof(u32)) #define BCOM_CTX_ALIGN 0x100 #define BCOM_VAR_SIZE (BCOM_MAX_VAR * sizeof(u32)) #define BCOM_INC_SIZE (BCOM_MAX_INC * sizeof(u32)) #define BCOM_VAR_ALIGN 0x80 #define BCOM_FDT_SIZE (BCOM_MAX_FDT * sizeof(u32)) #define BCOM_FDT_ALIGN 0x100 /** * struct bcom_tdt - Task Descriptor Table Entry * */ struct bcom_tdt { u32 start; u32 stop; u32 var; u32 fdt; u32 exec_status; /* used internally by BestComm engine */ u32 mvtp; /* used internally by BestComm engine */ u32 context; u32 litbase; }; /** * struct bcom_engine * * This holds all info needed globaly to handle the engine */ struct bcom_engine { struct device_node *ofnode; struct mpc52xx_sdma __iomem *regs; phys_addr_t regs_base; struct bcom_tdt *tdt; u32 *ctx; u32 *var; u32 *fdt; spinlock_t lock; }; extern struct bcom_engine *bcom_eng; /* ======================================================================== */ /* Tasks related stuff */ /* ======================================================================== */ /* Tasks image header */ #define BCOM_TASK_MAGIC 0x4243544B /* 'BCTK' */ struct bcom_task_header { u32 magic; u8 desc_size; /* the size fields */ u8 var_size; /* are given in number */ u8 inc_size; /* of 32-bits words */ u8 first_var; u8 reserved[8]; }; /* Descriptors structure & co */ #define BCOM_DESC_NOP 0x000001f8 #define BCOM_LCD_MASK 0x80000000 #define BCOM_DRD_EXTENDED 0x40000000 #define BCOM_DRD_INITIATOR_SHIFT 21 /* Tasks pragma */ #define BCOM_PRAGMA_BIT_RSV 7 /* reserved pragma bit */ #define BCOM_PRAGMA_BIT_PRECISE_INC 6 /* increment 0=when possible, */ /* 1=iter end */ #define BCOM_PRAGMA_BIT_RST_ERROR_NO 5 /* don't reset errors on */ /* task enable */ #define BCOM_PRAGMA_BIT_PACK 4 /* pack data enable */ #define BCOM_PRAGMA_BIT_INTEGER 3 /* data alignment */ /* 0=frac(msb), 1=int(lsb) */ #define BCOM_PRAGMA_BIT_SPECREAD 2 /* XLB speculative read */ #define BCOM_PRAGMA_BIT_CW 1 /* write line buffer enable */ #define BCOM_PRAGMA_BIT_RL 0 /* read line buffer enable */ /* Looks like XLB speculative read generates XLB errors when a buffer * is at the end of the physical memory. i.e. when accessing the * lasts words, the engine tries to prefetch the next but there is no * next ... */ #define BCOM_STD_PRAGMA ((0 << BCOM_PRAGMA_BIT_RSV) | \ (0 << BCOM_PRAGMA_BIT_PRECISE_INC) | \ (0 << BCOM_PRAGMA_BIT_RST_ERROR_NO) | \ (0 << BCOM_PRAGMA_BIT_PACK) | \ (0 << BCOM_PRAGMA_BIT_INTEGER) | \ (0 << BCOM_PRAGMA_BIT_SPECREAD) | \ (1 << BCOM_PRAGMA_BIT_CW) | \ (1 << BCOM_PRAGMA_BIT_RL)) #define BCOM_PCI_PRAGMA ((0 << BCOM_PRAGMA_BIT_RSV) | \ (0 << BCOM_PRAGMA_BIT_PRECISE_INC) | \ (0 << BCOM_PRAGMA_BIT_RST_ERROR_NO) | \ (0 << BCOM_PRAGMA_BIT_PACK) | \ (1 << BCOM_PRAGMA_BIT_INTEGER) | \ (0 << BCOM_PRAGMA_BIT_SPECREAD) | \ (1 << BCOM_PRAGMA_BIT_CW) | \ (1 << BCOM_PRAGMA_BIT_RL)) #define BCOM_ATA_PRAGMA BCOM_STD_PRAGMA #define BCOM_CRC16_DP_0_PRAGMA BCOM_STD_PRAGMA #define BCOM_CRC16_DP_1_PRAGMA BCOM_STD_PRAGMA #define BCOM_FEC_RX_BD_PRAGMA BCOM_STD_PRAGMA #define BCOM_FEC_TX_BD_PRAGMA BCOM_STD_PRAGMA #define BCOM_GEN_DP_0_PRAGMA BCOM_STD_PRAGMA #define BCOM_GEN_DP_1_PRAGMA BCOM_STD_PRAGMA #define BCOM_GEN_DP_2_PRAGMA BCOM_STD_PRAGMA #define BCOM_GEN_DP_3_PRAGMA BCOM_STD_PRAGMA #define BCOM_GEN_DP_BD_0_PRAGMA BCOM_STD_PRAGMA #define BCOM_GEN_DP_BD_1_PRAGMA BCOM_STD_PRAGMA #define BCOM_GEN_RX_BD_PRAGMA BCOM_STD_PRAGMA #define BCOM_GEN_TX_BD_PRAGMA BCOM_STD_PRAGMA #define BCOM_GEN_LPC_PRAGMA BCOM_STD_PRAGMA #define BCOM_PCI_RX_PRAGMA BCOM_PCI_PRAGMA #define BCOM_PCI_TX_PRAGMA BCOM_PCI_PRAGMA /* Initiators number */ #define BCOM_INITIATOR_ALWAYS 0 #define BCOM_INITIATOR_SCTMR_0 1 #define BCOM_INITIATOR_SCTMR_1 2 #define BCOM_INITIATOR_FEC_RX 3 #define BCOM_INITIATOR_FEC_TX 4 #define BCOM_INITIATOR_ATA_RX 5 #define BCOM_INITIATOR_ATA_TX 6 #define BCOM_INITIATOR_SCPCI_RX 7 #define BCOM_INITIATOR_SCPCI_TX 8 #define BCOM_INITIATOR_PSC3_RX 9 #define BCOM_INITIATOR_PSC3_TX 10 #define BCOM_INITIATOR_PSC2_RX 11 #define BCOM_INITIATOR_PSC2_TX 12 #define BCOM_INITIATOR_PSC1_RX 13 #define BCOM_INITIATOR_PSC1_TX 14 #define BCOM_INITIATOR_SCTMR_2 15 #define BCOM_INITIATOR_SCLPC 16 #define BCOM_INITIATOR_PSC5_RX 17 #define BCOM_INITIATOR_PSC5_TX 18 #define BCOM_INITIATOR_PSC4_RX 19 #define BCOM_INITIATOR_PSC4_TX 20 #define BCOM_INITIATOR_I2C2_RX 21 #define BCOM_INITIATOR_I2C2_TX 22 #define BCOM_INITIATOR_I2C1_RX 23 #define BCOM_INITIATOR_I2C1_TX 24 #define BCOM_INITIATOR_PSC6_RX 25 #define BCOM_INITIATOR_PSC6_TX 26 #define BCOM_INITIATOR_IRDA_RX 25 #define BCOM_INITIATOR_IRDA_TX 26 #define BCOM_INITIATOR_SCTMR_3 27 #define BCOM_INITIATOR_SCTMR_4 28 #define BCOM_INITIATOR_SCTMR_5 29 #define BCOM_INITIATOR_SCTMR_6 30 #define BCOM_INITIATOR_SCTMR_7 31 /* Initiators priorities */ #define BCOM_IPR_ALWAYS 7 #define BCOM_IPR_SCTMR_0 2 #define BCOM_IPR_SCTMR_1 2 #define BCOM_IPR_FEC_RX 6 #define BCOM_IPR_FEC_TX 5 #define BCOM_IPR_ATA_RX 7 #define BCOM_IPR_ATA_TX 7 #define BCOM_IPR_SCPCI_RX 2 #define BCOM_IPR_SCPCI_TX 2 #define BCOM_IPR_PSC3_RX 2 #define BCOM_IPR_PSC3_TX 2 #define BCOM_IPR_PSC2_RX 2 #define BCOM_IPR_PSC2_TX 2 #define BCOM_IPR_PSC1_RX 2 #define BCOM_IPR_PSC1_TX 2 #define BCOM_IPR_SCTMR_2 2 #define BCOM_IPR_SCLPC 2 #define BCOM_IPR_PSC5_RX 2 #define BCOM_IPR_PSC5_TX 2 #define BCOM_IPR_PSC4_RX 2 #define BCOM_IPR_PSC4_TX 2 #define BCOM_IPR_I2C2_RX 2 #define BCOM_IPR_I2C2_TX 2 #define BCOM_IPR_I2C1_RX 2 #define BCOM_IPR_I2C1_TX 2 #define BCOM_IPR_PSC6_RX 2 #define BCOM_IPR_PSC6_TX 2 #define BCOM_IPR_IRDA_RX 2 #define BCOM_IPR_IRDA_TX 2 #define BCOM_IPR_SCTMR_3 2 #define BCOM_IPR_SCTMR_4 2 #define BCOM_IPR_SCTMR_5 2 #define BCOM_IPR_SCTMR_6 2 #define BCOM_IPR_SCTMR_7 2 /* ======================================================================== */ /* API */ /* ======================================================================== */ extern struct bcom_task *bcom_task_alloc(int bd_count, int bd_size, int priv_size); extern void bcom_task_free(struct bcom_task *tsk); extern int bcom_load_image(int task, u32 *task_image); extern void bcom_set_initiator(int task, int initiator); #define TASK_ENABLE 0x8000 /** * bcom_disable_prefetch - Hook to disable bus prefetching * * ATA DMA and the original MPC5200 need this due to silicon bugs. At the * moment disabling prefetch is a one-way street. There is no mechanism * in place to turn prefetch back on after it has been disabled. There is * no reason it couldn't be done, it would just be more complex to implement. */ static inline void bcom_disable_prefetch(void) { u16 regval; regval = in_be16(&bcom_eng->regs->PtdCntrl); out_be16(&bcom_eng->regs->PtdCntrl, regval | 1); }; static inline void bcom_enable_task(int task) { u16 reg; reg = in_be16(&bcom_eng->regs->tcr[task]); out_be16(&bcom_eng->regs->tcr[task], reg | TASK_ENABLE); } static inline void bcom_disable_task(int task) { u16 reg = in_be16(&bcom_eng->regs->tcr[task]); out_be16(&bcom_eng->regs->tcr[task], reg & ~TASK_ENABLE); } static inline u32 * bcom_task_desc(int task) { return bcom_sram_pa2va(bcom_eng->tdt[task].start); } static inline int bcom_task_num_descs(int task) { return (bcom_eng->tdt[task].stop - bcom_eng->tdt[task].start)/sizeof(u32) + 1; } static inline u32 * bcom_task_var(int task) { return bcom_sram_pa2va(bcom_eng->tdt[task].var); } static inline u32 * bcom_task_inc(int task) { return &bcom_task_var(task)[BCOM_MAX_VAR]; } static inline int bcom_drd_is_extended(u32 desc) { return (desc) & BCOM_DRD_EXTENDED; } static inline int bcom_desc_is_drd(u32 desc) { return !(desc & BCOM_LCD_MASK) && desc != BCOM_DESC_NOP; } static inline int bcom_desc_initiator(u32 desc) { return (desc >> BCOM_DRD_INITIATOR_SHIFT) & 0x1f; } static inline void bcom_set_desc_initiator(u32 *desc, int initiator) { *desc = (*desc & ~(0x1f << BCOM_DRD_INITIATOR_SHIFT)) | ((initiator & 0x1f) << BCOM_DRD_INITIATOR_SHIFT); } static inline void bcom_set_task_pragma(int task, int pragma) { u32 *fdt = &bcom_eng->tdt[task].fdt; *fdt = (*fdt & ~0xff) | pragma; } static inline void bcom_set_task_auto_start(int task, int next_task) { u16 __iomem *tcr = &bcom_eng->regs->tcr[task]; out_be16(tcr, (in_be16(tcr) & ~0xff) | 0x00c0 | next_task); } static inline void bcom_set_tcr_initiator(int task, int initiator) { u16 __iomem *tcr = &bcom_eng->regs->tcr[task]; out_be16(tcr, (in_be16(tcr) & ~0x1f00) | ((initiator & 0x1f) << 8)); } #endif /* __BESTCOMM_PRIV_H__ */ fsl/edac.h 0000644 00000000233 14722070374 0006401 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef FSL_EDAC_H #define FSL_EDAC_H struct mpc85xx_edac_pci_plat_data { struct device_node *of_node; }; #endif fsl/mc.h 0000644 00000041522 14722070374 0006112 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Freescale Management Complex (MC) bus public interface * * Copyright (C) 2014-2016 Freescale Semiconductor, Inc. * Author: German Rivera <German.Rivera@freescale.com> * */ #ifndef _FSL_MC_H_ #define _FSL_MC_H_ #include <linux/device.h> #include <linux/mod_devicetable.h> #include <linux/interrupt.h> #define FSL_MC_VENDOR_FREESCALE 0x1957 struct irq_domain; struct msi_domain_info; struct fsl_mc_device; struct fsl_mc_io; /** * struct fsl_mc_driver - MC object device driver object * @driver: Generic device driver * @match_id_table: table of supported device matching Ids * @probe: Function called when a device is added * @remove: Function called when a device is removed * @shutdown: Function called at shutdown time to quiesce the device * @suspend: Function called when a device is stopped * @resume: Function called when a device is resumed * * Generic DPAA device driver object for device drivers that are registered * with a DPRC bus. This structure is to be embedded in each device-specific * driver structure. */ struct fsl_mc_driver { struct device_driver driver; const struct fsl_mc_device_id *match_id_table; int (*probe)(struct fsl_mc_device *dev); int (*remove)(struct fsl_mc_device *dev); void (*shutdown)(struct fsl_mc_device *dev); int (*suspend)(struct fsl_mc_device *dev, pm_message_t state); int (*resume)(struct fsl_mc_device *dev); }; #define to_fsl_mc_driver(_drv) \ container_of(_drv, struct fsl_mc_driver, driver) /** * enum fsl_mc_pool_type - Types of allocatable MC bus resources * * Entries in these enum are used as indices in the array of resource * pools of an fsl_mc_bus object. */ enum fsl_mc_pool_type { FSL_MC_POOL_DPMCP = 0x0, /* corresponds to "dpmcp" in the MC */ FSL_MC_POOL_DPBP, /* corresponds to "dpbp" in the MC */ FSL_MC_POOL_DPCON, /* corresponds to "dpcon" in the MC */ FSL_MC_POOL_IRQ, /* * NOTE: New resource pool types must be added before this entry */ FSL_MC_NUM_POOL_TYPES }; /** * struct fsl_mc_resource - MC generic resource * @type: type of resource * @id: unique MC resource Id within the resources of the same type * @data: pointer to resource-specific data if the resource is currently * allocated, or NULL if the resource is not currently allocated. * @parent_pool: pointer to the parent resource pool from which this * resource is allocated from. * @node: Node in the free list of the corresponding resource pool * * NOTE: This structure is to be embedded as a field of specific * MC resource structures. */ struct fsl_mc_resource { enum fsl_mc_pool_type type; s32 id; void *data; struct fsl_mc_resource_pool *parent_pool; struct list_head node; }; /** * struct fsl_mc_device_irq - MC object device message-based interrupt * @msi_desc: pointer to MSI descriptor allocated by fsl_mc_msi_alloc_descs() * @mc_dev: MC object device that owns this interrupt * @dev_irq_index: device-relative IRQ index * @resource: MC generic resource associated with the interrupt */ struct fsl_mc_device_irq { struct msi_desc *msi_desc; struct fsl_mc_device *mc_dev; u8 dev_irq_index; struct fsl_mc_resource resource; }; #define to_fsl_mc_irq(_mc_resource) \ container_of(_mc_resource, struct fsl_mc_device_irq, resource) /* Opened state - Indicates that an object is open by at least one owner */ #define FSL_MC_OBJ_STATE_OPEN 0x00000001 /* Plugged state - Indicates that the object is plugged */ #define FSL_MC_OBJ_STATE_PLUGGED 0x00000002 /** * Shareability flag - Object flag indicating no memory shareability. * the object generates memory accesses that are non coherent with other * masters; * user is responsible for proper memory handling through IOMMU configuration. */ #define FSL_MC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001 /** * struct fsl_mc_obj_desc - Object descriptor * @type: Type of object: NULL terminated string * @id: ID of logical object resource * @vendor: Object vendor identifier * @ver_major: Major version number * @ver_minor: Minor version number * @irq_count: Number of interrupts supported by the object * @region_count: Number of mappable regions supported by the object * @state: Object state: combination of FSL_MC_OBJ_STATE_ states * @label: Object label: NULL terminated string * @flags: Object's flags */ struct fsl_mc_obj_desc { char type[16]; int id; u16 vendor; u16 ver_major; u16 ver_minor; u8 irq_count; u8 region_count; u32 state; char label[16]; u16 flags; }; /** * Bit masks for a MC object device (struct fsl_mc_device) flags */ #define FSL_MC_IS_DPRC 0x0001 /** * struct fsl_mc_device - MC object device object * @dev: Linux driver model device object * @dma_mask: Default DMA mask * @flags: MC object device flags * @icid: Isolation context ID for the device * @mc_handle: MC handle for the corresponding MC object opened * @mc_io: Pointer to MC IO object assigned to this device or * NULL if none. * @obj_desc: MC description of the DPAA device * @regions: pointer to array of MMIO region entries * @irqs: pointer to array of pointers to interrupts allocated to this device * @resource: generic resource associated with this MC object device, if any. * * Generic device object for MC object devices that are "attached" to a * MC bus. * * NOTES: * - For a non-DPRC object its icid is the same as its parent DPRC's icid. * - The SMMU notifier callback gets invoked after device_add() has been * called for an MC object device, but before the device-specific probe * callback gets called. * - DP_OBJ_DPRC objects are the only MC objects that have built-in MC * portals. For all other MC objects, their device drivers are responsible for * allocating MC portals for them by calling fsl_mc_portal_allocate(). * - Some types of MC objects (e.g., DP_OBJ_DPBP, DP_OBJ_DPCON) are * treated as resources that can be allocated/deallocated from the * corresponding resource pool in the object's parent DPRC, using the * fsl_mc_object_allocate()/fsl_mc_object_free() functions. These MC objects * are known as "allocatable" objects. For them, the corresponding * fsl_mc_device's 'resource' points to the associated resource object. * For MC objects that are not allocatable (e.g., DP_OBJ_DPRC, DP_OBJ_DPNI), * 'resource' is NULL. */ struct fsl_mc_device { struct device dev; u64 dma_mask; u16 flags; u16 icid; u16 mc_handle; struct fsl_mc_io *mc_io; struct fsl_mc_obj_desc obj_desc; struct resource *regions; struct fsl_mc_device_irq **irqs; struct fsl_mc_resource *resource; struct device_link *consumer_link; }; #define to_fsl_mc_device(_dev) \ container_of(_dev, struct fsl_mc_device, dev) #define MC_CMD_NUM_OF_PARAMS 7 struct mc_cmd_header { u8 src_id; u8 flags_hw; u8 status; u8 flags_sw; __le16 token; __le16 cmd_id; }; struct fsl_mc_command { __le64 header; __le64 params[MC_CMD_NUM_OF_PARAMS]; }; enum mc_cmd_status { MC_CMD_STATUS_OK = 0x0, /* Completed successfully */ MC_CMD_STATUS_READY = 0x1, /* Ready to be processed */ MC_CMD_STATUS_AUTH_ERR = 0x3, /* Authentication error */ MC_CMD_STATUS_NO_PRIVILEGE = 0x4, /* No privilege */ MC_CMD_STATUS_DMA_ERR = 0x5, /* DMA or I/O error */ MC_CMD_STATUS_CONFIG_ERR = 0x6, /* Configuration error */ MC_CMD_STATUS_TIMEOUT = 0x7, /* Operation timed out */ MC_CMD_STATUS_NO_RESOURCE = 0x8, /* No resources */ MC_CMD_STATUS_NO_MEMORY = 0x9, /* No memory available */ MC_CMD_STATUS_BUSY = 0xA, /* Device is busy */ MC_CMD_STATUS_UNSUPPORTED_OP = 0xB, /* Unsupported operation */ MC_CMD_STATUS_INVALID_STATE = 0xC /* Invalid state */ }; /* * MC command flags */ /* High priority flag */ #define MC_CMD_FLAG_PRI 0x80 /* Command completion flag */ #define MC_CMD_FLAG_INTR_DIS 0x01 static inline __le64 mc_encode_cmd_header(u16 cmd_id, u32 cmd_flags, u16 token) { __le64 header = 0; struct mc_cmd_header *hdr = (struct mc_cmd_header *)&header; hdr->cmd_id = cpu_to_le16(cmd_id); hdr->token = cpu_to_le16(token); hdr->status = MC_CMD_STATUS_READY; if (cmd_flags & MC_CMD_FLAG_PRI) hdr->flags_hw = MC_CMD_FLAG_PRI; if (cmd_flags & MC_CMD_FLAG_INTR_DIS) hdr->flags_sw = MC_CMD_FLAG_INTR_DIS; return header; } static inline u16 mc_cmd_hdr_read_token(struct fsl_mc_command *cmd) { struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header; u16 token = le16_to_cpu(hdr->token); return token; } struct mc_rsp_create { __le32 object_id; }; struct mc_rsp_api_ver { __le16 major_ver; __le16 minor_ver; }; static inline u32 mc_cmd_read_object_id(struct fsl_mc_command *cmd) { struct mc_rsp_create *rsp_params; rsp_params = (struct mc_rsp_create *)cmd->params; return le32_to_cpu(rsp_params->object_id); } static inline void mc_cmd_read_api_version(struct fsl_mc_command *cmd, u16 *major_ver, u16 *minor_ver) { struct mc_rsp_api_ver *rsp_params; rsp_params = (struct mc_rsp_api_ver *)cmd->params; *major_ver = le16_to_cpu(rsp_params->major_ver); *minor_ver = le16_to_cpu(rsp_params->minor_ver); } /** * Bit masks for a MC I/O object (struct fsl_mc_io) flags */ #define FSL_MC_IO_ATOMIC_CONTEXT_PORTAL 0x0001 /** * struct fsl_mc_io - MC I/O object to be passed-in to mc_send_command() * @dev: device associated with this Mc I/O object * @flags: flags for mc_send_command() * @portal_size: MC command portal size in bytes * @portal_phys_addr: MC command portal physical address * @portal_virt_addr: MC command portal virtual address * @dpmcp_dev: pointer to the DPMCP device associated with the MC portal. * * Fields are only meaningful if the FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is not * set: * @mutex: Mutex to serialize mc_send_command() calls that use the same MC * portal, if the fsl_mc_io object was created with the * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag off. mc_send_command() calls for this * fsl_mc_io object must be made only from non-atomic context. * * Fields are only meaningful if the FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is * set: * @spinlock: Spinlock to serialize mc_send_command() calls that use the same MC * portal, if the fsl_mc_io object was created with the * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag on. mc_send_command() calls for this * fsl_mc_io object can be made from atomic or non-atomic context. */ struct fsl_mc_io { struct device *dev; u16 flags; u32 portal_size; phys_addr_t portal_phys_addr; void __iomem *portal_virt_addr; struct fsl_mc_device *dpmcp_dev; union { /* * This field is only meaningful if the * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is not set */ struct mutex mutex; /* serializes mc_send_command() */ /* * This field is only meaningful if the * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is set */ spinlock_t spinlock; /* serializes mc_send_command() */ }; }; int mc_send_command(struct fsl_mc_io *mc_io, struct fsl_mc_command *cmd); #ifdef CONFIG_FSL_MC_BUS #define dev_is_fsl_mc(_dev) ((_dev)->bus == &fsl_mc_bus_type) #else /* If fsl-mc bus is not present device cannot belong to fsl-mc bus */ #define dev_is_fsl_mc(_dev) (0) #endif /* Macro to check if a device is a container device */ #define fsl_mc_is_cont_dev(_dev) (to_fsl_mc_device(_dev)->flags & \ FSL_MC_IS_DPRC) /* Macro to get the container device of a MC device */ #define fsl_mc_cont_dev(_dev) (fsl_mc_is_cont_dev(_dev) ? \ (_dev) : (_dev)->parent) /* * module_fsl_mc_driver() - Helper macro for drivers that don't do * anything special in module init/exit. This eliminates a lot of * boilerplate. Each module may only use this macro once, and * calling it replaces module_init() and module_exit() */ #define module_fsl_mc_driver(__fsl_mc_driver) \ module_driver(__fsl_mc_driver, fsl_mc_driver_register, \ fsl_mc_driver_unregister) /* * Macro to avoid include chaining to get THIS_MODULE */ #define fsl_mc_driver_register(drv) \ __fsl_mc_driver_register(drv, THIS_MODULE) int __must_check __fsl_mc_driver_register(struct fsl_mc_driver *fsl_mc_driver, struct module *owner); void fsl_mc_driver_unregister(struct fsl_mc_driver *driver); int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev, u16 mc_io_flags, struct fsl_mc_io **new_mc_io); void fsl_mc_portal_free(struct fsl_mc_io *mc_io); int fsl_mc_portal_reset(struct fsl_mc_io *mc_io); int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev, enum fsl_mc_pool_type pool_type, struct fsl_mc_device **new_mc_adev); void fsl_mc_object_free(struct fsl_mc_device *mc_adev); struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode, struct msi_domain_info *info, struct irq_domain *parent); int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev); void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev); extern struct bus_type fsl_mc_bus_type; extern struct device_type fsl_mc_bus_dprc_type; extern struct device_type fsl_mc_bus_dpni_type; extern struct device_type fsl_mc_bus_dpio_type; extern struct device_type fsl_mc_bus_dpsw_type; extern struct device_type fsl_mc_bus_dpbp_type; extern struct device_type fsl_mc_bus_dpcon_type; extern struct device_type fsl_mc_bus_dpmcp_type; extern struct device_type fsl_mc_bus_dpmac_type; extern struct device_type fsl_mc_bus_dprtc_type; extern struct device_type fsl_mc_bus_dpseci_type; static inline bool is_fsl_mc_bus_dprc(const struct fsl_mc_device *mc_dev) { return mc_dev->dev.type == &fsl_mc_bus_dprc_type; } static inline bool is_fsl_mc_bus_dpni(const struct fsl_mc_device *mc_dev) { return mc_dev->dev.type == &fsl_mc_bus_dpni_type; } static inline bool is_fsl_mc_bus_dpio(const struct fsl_mc_device *mc_dev) { return mc_dev->dev.type == &fsl_mc_bus_dpio_type; } static inline bool is_fsl_mc_bus_dpsw(const struct fsl_mc_device *mc_dev) { return mc_dev->dev.type == &fsl_mc_bus_dpsw_type; } static inline bool is_fsl_mc_bus_dpbp(const struct fsl_mc_device *mc_dev) { return mc_dev->dev.type == &fsl_mc_bus_dpbp_type; } static inline bool is_fsl_mc_bus_dpcon(const struct fsl_mc_device *mc_dev) { return mc_dev->dev.type == &fsl_mc_bus_dpcon_type; } static inline bool is_fsl_mc_bus_dpmcp(const struct fsl_mc_device *mc_dev) { return mc_dev->dev.type == &fsl_mc_bus_dpmcp_type; } static inline bool is_fsl_mc_bus_dpmac(const struct fsl_mc_device *mc_dev) { return mc_dev->dev.type == &fsl_mc_bus_dpmac_type; } static inline bool is_fsl_mc_bus_dprtc(const struct fsl_mc_device *mc_dev) { return mc_dev->dev.type == &fsl_mc_bus_dprtc_type; } static inline bool is_fsl_mc_bus_dpseci(const struct fsl_mc_device *mc_dev) { return mc_dev->dev.type == &fsl_mc_bus_dpseci_type; } /* * Data Path Buffer Pool (DPBP) API * Contains initialization APIs and runtime control APIs for DPBP */ int dpbp_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpbp_id, u16 *token); int dpbp_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); int dpbp_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); int dpbp_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); int dpbp_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); /** * struct dpbp_attr - Structure representing DPBP attributes * @id: DPBP object ID * @bpid: Hardware buffer pool ID; should be used as an argument in * acquire/release operations on buffers */ struct dpbp_attr { int id; u16 bpid; }; int dpbp_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, struct dpbp_attr *attr); /* Data Path Concentrator (DPCON) API * Contains initialization APIs and runtime control APIs for DPCON */ /** * Use it to disable notifications; see dpcon_set_notification() */ #define DPCON_INVALID_DPIO_ID (int)(-1) int dpcon_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpcon_id, u16 *token); int dpcon_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); int dpcon_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); int dpcon_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); int dpcon_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); /** * struct dpcon_attr - Structure representing DPCON attributes * @id: DPCON object ID * @qbman_ch_id: Channel ID to be used by dequeue operation * @num_priorities: Number of priorities for the DPCON channel (1-8) */ struct dpcon_attr { int id; u16 qbman_ch_id; u8 num_priorities; }; int dpcon_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, struct dpcon_attr *attr); /** * struct dpcon_notification_cfg - Structure representing notification params * @dpio_id: DPIO object ID; must be configured with a notification channel; * to disable notifications set it to 'DPCON_INVALID_DPIO_ID'; * @priority: Priority selection within the DPIO channel; valid values * are 0-7, depending on the number of priorities in that channel * @user_ctx: User context value provided with each CDAN message */ struct dpcon_notification_cfg { int dpio_id; u8 priority; u64 user_ctx; }; int dpcon_set_notification(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, struct dpcon_notification_cfg *cfg); #endif /* _FSL_MC_H_ */ fsl/ptp_qoriq.h 0000644 00000017054 14722070374 0007534 0 ustar 00 // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2010 OMICRON electronics GmbH * Copyright 2018 NXP */ #ifndef __PTP_QORIQ_H__ #define __PTP_QORIQ_H__ #include <linux/io.h> #include <linux/interrupt.h> #include <linux/ptp_clock_kernel.h> /* * qoriq ptp registers */ struct ctrl_regs { u32 tmr_ctrl; /* Timer control register */ u32 tmr_tevent; /* Timestamp event register */ u32 tmr_temask; /* Timer event mask register */ u32 tmr_pevent; /* Timestamp event register */ u32 tmr_pemask; /* Timer event mask register */ u32 tmr_stat; /* Timestamp status register */ u32 tmr_cnt_h; /* Timer counter high register */ u32 tmr_cnt_l; /* Timer counter low register */ u32 tmr_add; /* Timer drift compensation addend register */ u32 tmr_acc; /* Timer accumulator register */ u32 tmr_prsc; /* Timer prescale */ u8 res1[4]; u32 tmroff_h; /* Timer offset high */ u32 tmroff_l; /* Timer offset low */ }; struct alarm_regs { u32 tmr_alarm1_h; /* Timer alarm 1 high register */ u32 tmr_alarm1_l; /* Timer alarm 1 high register */ u32 tmr_alarm2_h; /* Timer alarm 2 high register */ u32 tmr_alarm2_l; /* Timer alarm 2 high register */ }; struct fiper_regs { u32 tmr_fiper1; /* Timer fixed period interval */ u32 tmr_fiper2; /* Timer fixed period interval */ u32 tmr_fiper3; /* Timer fixed period interval */ }; struct etts_regs { u32 tmr_etts1_h; /* Timestamp of general purpose external trigger */ u32 tmr_etts1_l; /* Timestamp of general purpose external trigger */ u32 tmr_etts2_h; /* Timestamp of general purpose external trigger */ u32 tmr_etts2_l; /* Timestamp of general purpose external trigger */ }; struct ptp_qoriq_registers { struct ctrl_regs __iomem *ctrl_regs; struct alarm_regs __iomem *alarm_regs; struct fiper_regs __iomem *fiper_regs; struct etts_regs __iomem *etts_regs; }; /* Offset definitions for the four register groups */ #define ETSEC_CTRL_REGS_OFFSET 0x0 #define ETSEC_ALARM_REGS_OFFSET 0x40 #define ETSEC_FIPER_REGS_OFFSET 0x80 #define ETSEC_ETTS_REGS_OFFSET 0xa0 #define CTRL_REGS_OFFSET 0x80 #define ALARM_REGS_OFFSET 0xb8 #define FIPER_REGS_OFFSET 0xd0 #define ETTS_REGS_OFFSET 0xe0 /* Bit definitions for the TMR_CTRL register */ #define ALM1P (1<<31) /* Alarm1 output polarity */ #define ALM2P (1<<30) /* Alarm2 output polarity */ #define FIPERST (1<<28) /* FIPER start indication */ #define PP1L (1<<27) /* Fiper1 pulse loopback mode enabled. */ #define PP2L (1<<26) /* Fiper2 pulse loopback mode enabled. */ #define TCLK_PERIOD_SHIFT (16) /* 1588 timer reference clock period. */ #define TCLK_PERIOD_MASK (0x3ff) #define RTPE (1<<15) /* Record Tx Timestamp to PAL Enable. */ #define FRD (1<<14) /* FIPER Realignment Disable */ #define ESFDP (1<<11) /* External Tx/Rx SFD Polarity. */ #define ESFDE (1<<10) /* External Tx/Rx SFD Enable. */ #define ETEP2 (1<<9) /* External trigger 2 edge polarity */ #define ETEP1 (1<<8) /* External trigger 1 edge polarity */ #define COPH (1<<7) /* Generated clock output phase. */ #define CIPH (1<<6) /* External oscillator input clock phase */ #define TMSR (1<<5) /* Timer soft reset. */ #define BYP (1<<3) /* Bypass drift compensated clock */ #define TE (1<<2) /* 1588 timer enable. */ #define CKSEL_SHIFT (0) /* 1588 Timer reference clock source */ #define CKSEL_MASK (0x3) /* Bit definitions for the TMR_TEVENT register */ #define ETS2 (1<<25) /* External trigger 2 timestamp sampled */ #define ETS1 (1<<24) /* External trigger 1 timestamp sampled */ #define ALM2 (1<<17) /* Current time = alarm time register 2 */ #define ALM1 (1<<16) /* Current time = alarm time register 1 */ #define PP1 (1<<7) /* periodic pulse generated on FIPER1 */ #define PP2 (1<<6) /* periodic pulse generated on FIPER2 */ #define PP3 (1<<5) /* periodic pulse generated on FIPER3 */ /* Bit definitions for the TMR_TEMASK register */ #define ETS2EN (1<<25) /* External trigger 2 timestamp enable */ #define ETS1EN (1<<24) /* External trigger 1 timestamp enable */ #define ALM2EN (1<<17) /* Timer ALM2 event enable */ #define ALM1EN (1<<16) /* Timer ALM1 event enable */ #define PP1EN (1<<7) /* Periodic pulse event 1 enable */ #define PP2EN (1<<6) /* Periodic pulse event 2 enable */ /* Bit definitions for the TMR_PEVENT register */ #define TXP2 (1<<9) /* PTP transmitted timestamp im TXTS2 */ #define TXP1 (1<<8) /* PTP transmitted timestamp in TXTS1 */ #define RXP (1<<0) /* PTP frame has been received */ /* Bit definitions for the TMR_PEMASK register */ #define TXP2EN (1<<9) /* Transmit PTP packet event 2 enable */ #define TXP1EN (1<<8) /* Transmit PTP packet event 1 enable */ #define RXPEN (1<<0) /* Receive PTP packet event enable */ /* Bit definitions for the TMR_STAT register */ #define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */ #define STAT_VEC_MASK (0x3f) #define ETS1_VLD (1<<24) #define ETS2_VLD (1<<25) /* Bit definitions for the TMR_PRSC register */ #define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */ #define PRSC_OCK_MASK (0xffff) #define DRIVER "ptp_qoriq" #define N_EXT_TS 2 #define DEFAULT_CKSEL 1 #define DEFAULT_TMR_PRSC 2 #define DEFAULT_FIPER1_PERIOD 1000000000 #define DEFAULT_FIPER2_PERIOD 100000 struct ptp_qoriq { void __iomem *base; struct ptp_qoriq_registers regs; spinlock_t lock; /* protects regs */ struct ptp_clock *clock; struct ptp_clock_info caps; struct resource *rsrc; struct dentry *debugfs_root; struct device *dev; bool extts_fifo_support; int irq; int phc_index; u64 alarm_interval; /* for periodic alarm */ u64 alarm_value; u32 tclk_period; /* nanoseconds */ u32 tmr_prsc; u32 tmr_add; u32 cksel; u32 tmr_fiper1; u32 tmr_fiper2; u32 (*read)(unsigned __iomem *addr); void (*write)(unsigned __iomem *addr, u32 val); }; static inline u32 qoriq_read_be(unsigned __iomem *addr) { return ioread32be(addr); } static inline void qoriq_write_be(unsigned __iomem *addr, u32 val) { iowrite32be(val, addr); } static inline u32 qoriq_read_le(unsigned __iomem *addr) { return ioread32(addr); } static inline void qoriq_write_le(unsigned __iomem *addr, u32 val) { iowrite32(val, addr); } irqreturn_t ptp_qoriq_isr(int irq, void *priv); int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base, const struct ptp_clock_info *caps); void ptp_qoriq_free(struct ptp_qoriq *ptp_qoriq); int ptp_qoriq_adjfine(struct ptp_clock_info *ptp, long scaled_ppm); int ptp_qoriq_adjtime(struct ptp_clock_info *ptp, s64 delta); int ptp_qoriq_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts); int ptp_qoriq_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts); int ptp_qoriq_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on); #ifdef CONFIG_DEBUG_FS void ptp_qoriq_create_debugfs(struct ptp_qoriq *ptp_qoriq); void ptp_qoriq_remove_debugfs(struct ptp_qoriq *ptp_qoriq); #else static inline void ptp_qoriq_create_debugfs(struct ptp_qoriq *ptp_qoriq) { } static inline void ptp_qoriq_remove_debugfs(struct ptp_qoriq *ptp_qoriq) { } #endif #endif fsl/guts.h 0000644 00000030401 14722070374 0006467 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /** * Freecale 85xx and 86xx Global Utilties register set * * Authors: Jeff Brown * Timur Tabi <timur@freescale.com> * * Copyright 2004,2007,2012 Freescale Semiconductor, Inc */ #ifndef __FSL_GUTS_H__ #define __FSL_GUTS_H__ #include <linux/types.h> #include <linux/io.h> /** * Global Utility Registers. * * Not all registers defined in this structure are available on all chips, so * you are expected to know whether a given register actually exists on your * chip before you access it. * * Also, some registers are similar on different chips but have slightly * different names. In these cases, one name is chosen to avoid extraneous * #ifdefs. */ struct ccsr_guts { u32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */ u32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */ u32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and * Control Register */ u32 pordevsr; /* 0x.000c - POR I/O Device Status Register */ u32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */ u32 pordevsr2; /* 0x.0014 - POR device status register 2 */ u8 res018[0x20 - 0x18]; u32 porcir; /* 0x.0020 - POR Configuration Information * Register */ u8 res024[0x30 - 0x24]; u32 gpiocr; /* 0x.0030 - GPIO Control Register */ u8 res034[0x40 - 0x34]; u32 gpoutdr; /* 0x.0040 - General-Purpose Output Data * Register */ u8 res044[0x50 - 0x44]; u32 gpindr; /* 0x.0050 - General-Purpose Input Data * Register */ u8 res054[0x60 - 0x54]; u32 pmuxcr; /* 0x.0060 - Alternate Function Signal * Multiplex Control */ u32 pmuxcr2; /* 0x.0064 - Alternate function signal * multiplex control 2 */ u32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */ u8 res06c[0x70 - 0x6c]; u32 devdisr; /* 0x.0070 - Device Disable Control */ #define CCSR_GUTS_DEVDISR_TB1 0x00001000 #define CCSR_GUTS_DEVDISR_TB0 0x00004000 u32 devdisr2; /* 0x.0074 - Device Disable Control 2 */ u8 res078[0x7c - 0x78]; u32 pmjcr; /* 0x.007c - 4 Power Management Jog Control * Register */ u32 powmgtcsr; /* 0x.0080 - Power Management Status and * Control Register */ u32 pmrccr; /* 0x.0084 - Power Management Reset Counter * Configuration Register */ u32 pmpdccr; /* 0x.0088 - Power Management Power Down Counter * Configuration Register */ u32 pmcdr; /* 0x.008c - 4Power management clock disable * register */ u32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */ u32 rstrscr; /* 0x.0094 - Reset Request Status and * Control Register */ u32 ectrstcr; /* 0x.0098 - Exception reset control register */ u32 autorstsr; /* 0x.009c - Automatic reset status register */ u32 pvr; /* 0x.00a0 - Processor Version Register */ u32 svr; /* 0x.00a4 - System Version Register */ u8 res0a8[0xb0 - 0xa8]; u32 rstcr; /* 0x.00b0 - Reset Control Register */ u8 res0b4[0xc0 - 0xb4]; u32 iovselsr; /* 0x.00c0 - I/O voltage select status register Called 'elbcvselcr' on 86xx SOCs */ u8 res0c4[0x100 - 0xc4]; u32 rcwsr[16]; /* 0x.0100 - Reset Control Word Status registers There are 16 registers */ u8 res140[0x224 - 0x140]; u32 iodelay1; /* 0x.0224 - IO delay control register 1 */ u32 iodelay2; /* 0x.0228 - IO delay control register 2 */ u8 res22c[0x604 - 0x22c]; u32 pamubypenr; /* 0x.604 - PAMU bypass enable register */ u8 res608[0x800 - 0x608]; u32 clkdvdr; /* 0x.0800 - Clock Divide Register */ u8 res804[0x900 - 0x804]; u32 ircr; /* 0x.0900 - Infrared Control Register */ u8 res904[0x908 - 0x904]; u32 dmacr; /* 0x.0908 - DMA Control Register */ u8 res90c[0x914 - 0x90c]; u32 elbccr; /* 0x.0914 - eLBC Control Register */ u8 res918[0xb20 - 0x918]; u32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */ u32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */ u32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */ u8 resb2c[0xe00 - 0xb2c]; u32 clkocr; /* 0x.0e00 - Clock Out Select Register */ u8 rese04[0xe10 - 0xe04]; u32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */ u8 rese14[0xe20 - 0xe14]; u32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */ u32 cpfor; /* 0x.0e24 - L2 charge pump fuse override * register */ u8 rese28[0xf04 - 0xe28]; u32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */ u32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */ u8 resf0c[0xf2c - 0xf0c]; u32 itcr; /* 0x.0f2c - Internal transaction control * register */ u8 resf30[0xf40 - 0xf30]; u32 srds2cr0; /* 0x.0f40 - SerDes2 Control Register 0 */ u32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */ } __attribute__ ((packed)); /* Alternate function signal multiplex control */ #define MPC85xx_PMUXCR_QE(x) (0x8000 >> (x)) #ifdef CONFIG_PPC_86xx #define CCSR_GUTS_DMACR_DEV_SSI 0 /* DMA controller/channel set to SSI */ #define CCSR_GUTS_DMACR_DEV_IR 1 /* DMA controller/channel set to IR */ /* * Set the DMACR register in the GUTS * * The DMACR register determines the source of initiated transfers for each * channel on each DMA controller. Rather than have a bunch of repetitive * macros for the bit patterns, we just have a function that calculates * them. * * guts: Pointer to GUTS structure * co: The DMA controller (0 or 1) * ch: The channel on the DMA controller (0, 1, 2, or 3) * device: The device to set as the source (CCSR_GUTS_DMACR_DEV_xx) */ static inline void guts_set_dmacr(struct ccsr_guts __iomem *guts, unsigned int co, unsigned int ch, unsigned int device) { unsigned int shift = 16 + (8 * (1 - co) + 2 * (3 - ch)); clrsetbits_be32(&guts->dmacr, 3 << shift, device << shift); } #define CCSR_GUTS_PMUXCR_LDPSEL 0x00010000 #define CCSR_GUTS_PMUXCR_SSI1_MASK 0x0000C000 /* Bitmask for SSI1 */ #define CCSR_GUTS_PMUXCR_SSI1_LA 0x00000000 /* Latched address */ #define CCSR_GUTS_PMUXCR_SSI1_HI 0x00004000 /* High impedance */ #define CCSR_GUTS_PMUXCR_SSI1_SSI 0x00008000 /* Used for SSI1 */ #define CCSR_GUTS_PMUXCR_SSI2_MASK 0x00003000 /* Bitmask for SSI2 */ #define CCSR_GUTS_PMUXCR_SSI2_LA 0x00000000 /* Latched address */ #define CCSR_GUTS_PMUXCR_SSI2_HI 0x00001000 /* High impedance */ #define CCSR_GUTS_PMUXCR_SSI2_SSI 0x00002000 /* Used for SSI2 */ #define CCSR_GUTS_PMUXCR_LA_22_25_LA 0x00000000 /* Latched Address */ #define CCSR_GUTS_PMUXCR_LA_22_25_HI 0x00000400 /* High impedance */ #define CCSR_GUTS_PMUXCR_DBGDRV 0x00000200 /* Signals not driven */ #define CCSR_GUTS_PMUXCR_DMA2_0 0x00000008 #define CCSR_GUTS_PMUXCR_DMA2_3 0x00000004 #define CCSR_GUTS_PMUXCR_DMA1_0 0x00000002 #define CCSR_GUTS_PMUXCR_DMA1_3 0x00000001 /* * Set the DMA external control bits in the GUTS * * The DMA external control bits in the PMUXCR are only meaningful for * channels 0 and 3. Any other channels are ignored. * * guts: Pointer to GUTS structure * co: The DMA controller (0 or 1) * ch: The channel on the DMA controller (0, 1, 2, or 3) * value: the new value for the bit (0 or 1) */ static inline void guts_set_pmuxcr_dma(struct ccsr_guts __iomem *guts, unsigned int co, unsigned int ch, unsigned int value) { if ((ch == 0) || (ch == 3)) { unsigned int shift = 2 * (co + 1) - (ch & 1) - 1; clrsetbits_be32(&guts->pmuxcr, 1 << shift, value << shift); } } #define CCSR_GUTS_CLKDVDR_PXCKEN 0x80000000 #define CCSR_GUTS_CLKDVDR_SSICKEN 0x20000000 #define CCSR_GUTS_CLKDVDR_PXCKINV 0x10000000 #define CCSR_GUTS_CLKDVDR_PXCKDLY_SHIFT 25 #define CCSR_GUTS_CLKDVDR_PXCKDLY_MASK 0x06000000 #define CCSR_GUTS_CLKDVDR_PXCKDLY(x) \ (((x) & 3) << CCSR_GUTS_CLKDVDR_PXCKDLY_SHIFT) #define CCSR_GUTS_CLKDVDR_PXCLK_SHIFT 16 #define CCSR_GUTS_CLKDVDR_PXCLK_MASK 0x001F0000 #define CCSR_GUTS_CLKDVDR_PXCLK(x) (((x) & 31) << CCSR_GUTS_CLKDVDR_PXCLK_SHIFT) #define CCSR_GUTS_CLKDVDR_SSICLK_MASK 0x000000FF #define CCSR_GUTS_CLKDVDR_SSICLK(x) ((x) & CCSR_GUTS_CLKDVDR_SSICLK_MASK) #endif struct ccsr_rcpm_v1 { u8 res0000[4]; __be32 cdozsr; /* 0x0004 Core Doze Status Register */ u8 res0008[4]; __be32 cdozcr; /* 0x000c Core Doze Control Register */ u8 res0010[4]; __be32 cnapsr; /* 0x0014 Core Nap Status Register */ u8 res0018[4]; __be32 cnapcr; /* 0x001c Core Nap Control Register */ u8 res0020[4]; __be32 cdozpsr; /* 0x0024 Core Doze Previous Status Register */ u8 res0028[4]; __be32 cnappsr; /* 0x002c Core Nap Previous Status Register */ u8 res0030[4]; __be32 cwaitsr; /* 0x0034 Core Wait Status Register */ u8 res0038[4]; __be32 cwdtdsr; /* 0x003c Core Watchdog Detect Status Register */ __be32 powmgtcsr; /* 0x0040 PM Control&Status Register */ #define RCPM_POWMGTCSR_SLP 0x00020000 u8 res0044[12]; __be32 ippdexpcr; /* 0x0050 IP Powerdown Exception Control Register */ u8 res0054[16]; __be32 cpmimr; /* 0x0064 Core PM IRQ Mask Register */ u8 res0068[4]; __be32 cpmcimr; /* 0x006c Core PM Critical IRQ Mask Register */ u8 res0070[4]; __be32 cpmmcmr; /* 0x0074 Core PM Machine Check Mask Register */ u8 res0078[4]; __be32 cpmnmimr; /* 0x007c Core PM NMI Mask Register */ u8 res0080[4]; __be32 ctbenr; /* 0x0084 Core Time Base Enable Register */ u8 res0088[4]; __be32 ctbckselr; /* 0x008c Core Time Base Clock Select Register */ u8 res0090[4]; __be32 ctbhltcr; /* 0x0094 Core Time Base Halt Control Register */ u8 res0098[4]; __be32 cmcpmaskcr; /* 0x00a4 Core Machine Check Mask Register */ }; struct ccsr_rcpm_v2 { u8 res_00[12]; __be32 tph10sr0; /* Thread PH10 Status Register */ u8 res_10[12]; __be32 tph10setr0; /* Thread PH10 Set Control Register */ u8 res_20[12]; __be32 tph10clrr0; /* Thread PH10 Clear Control Register */ u8 res_30[12]; __be32 tph10psr0; /* Thread PH10 Previous Status Register */ u8 res_40[12]; __be32 twaitsr0; /* Thread Wait Status Register */ u8 res_50[96]; __be32 pcph15sr; /* Physical Core PH15 Status Register */ __be32 pcph15setr; /* Physical Core PH15 Set Control Register */ __be32 pcph15clrr; /* Physical Core PH15 Clear Control Register */ __be32 pcph15psr; /* Physical Core PH15 Prev Status Register */ u8 res_c0[16]; __be32 pcph20sr; /* Physical Core PH20 Status Register */ __be32 pcph20setr; /* Physical Core PH20 Set Control Register */ __be32 pcph20clrr; /* Physical Core PH20 Clear Control Register */ __be32 pcph20psr; /* Physical Core PH20 Prev Status Register */ __be32 pcpw20sr; /* Physical Core PW20 Status Register */ u8 res_e0[12]; __be32 pcph30sr; /* Physical Core PH30 Status Register */ __be32 pcph30setr; /* Physical Core PH30 Set Control Register */ __be32 pcph30clrr; /* Physical Core PH30 Clear Control Register */ __be32 pcph30psr; /* Physical Core PH30 Prev Status Register */ u8 res_100[32]; __be32 ippwrgatecr; /* IP Power Gating Control Register */ u8 res_124[12]; __be32 powmgtcsr; /* Power Management Control & Status Reg */ #define RCPM_POWMGTCSR_LPM20_RQ 0x00100000 #define RCPM_POWMGTCSR_LPM20_ST 0x00000200 #define RCPM_POWMGTCSR_P_LPM20_ST 0x00000100 u8 res_134[12]; __be32 ippdexpcr[4]; /* IP Powerdown Exception Control Reg */ u8 res_150[12]; __be32 tpmimr0; /* Thread PM Interrupt Mask Reg */ u8 res_160[12]; __be32 tpmcimr0; /* Thread PM Crit Interrupt Mask Reg */ u8 res_170[12]; __be32 tpmmcmr0; /* Thread PM Machine Check Interrupt Mask Reg */ u8 res_180[12]; __be32 tpmnmimr0; /* Thread PM NMI Mask Reg */ u8 res_190[12]; __be32 tmcpmaskcr0; /* Thread Machine Check Mask Control Reg */ __be32 pctbenr; /* Physical Core Time Base Enable Reg */ __be32 pctbclkselr; /* Physical Core Time Base Clock Select */ __be32 tbclkdivr; /* Time Base Clock Divider Register */ u8 res_1ac[4]; __be32 ttbhltcr[4]; /* Thread Time Base Halt Control Register */ __be32 clpcl10sr; /* Cluster PCL10 Status Register */ __be32 clpcl10setr; /* Cluster PCL30 Set Control Register */ __be32 clpcl10clrr; /* Cluster PCL30 Clear Control Register */ __be32 clpcl10psr; /* Cluster PCL30 Prev Status Register */ __be32 cddslpsetr; /* Core Domain Deep Sleep Set Register */ __be32 cddslpclrr; /* Core Domain Deep Sleep Clear Register */ __be32 cdpwroksetr; /* Core Domain Power OK Set Register */ __be32 cdpwrokclrr; /* Core Domain Power OK Clear Register */ __be32 cdpwrensr; /* Core Domain Power Enable Status Register */ __be32 cddslsr; /* Core Domain Deep Sleep Status Register */ u8 res_1e8[8]; __be32 dslpcntcr[8]; /* Deep Sleep Counter Cfg Register */ u8 res_300[3568]; }; #endif fsl/ftm.h 0000644 00000005564 14722070374 0006307 0 ustar 00 // SPDX-License-Identifier: GPL-2.0 #ifndef __FSL_FTM_H__ #define __FSL_FTM_H__ #define FTM_SC 0x0 /* Status And Control */ #define FTM_CNT 0x4 /* Counter */ #define FTM_MOD 0x8 /* Modulo */ #define FTM_CNTIN 0x4C /* Counter Initial Value */ #define FTM_STATUS 0x50 /* Capture And Compare Status */ #define FTM_MODE 0x54 /* Features Mode Selection */ #define FTM_SYNC 0x58 /* Synchronization */ #define FTM_OUTINIT 0x5C /* Initial State For Channels Output */ #define FTM_OUTMASK 0x60 /* Output Mask */ #define FTM_COMBINE 0x64 /* Function For Linked Channels */ #define FTM_DEADTIME 0x68 /* Deadtime Insertion Control */ #define FTM_EXTTRIG 0x6C /* FTM External Trigger */ #define FTM_POL 0x70 /* Channels Polarity */ #define FTM_FMS 0x74 /* Fault Mode Status */ #define FTM_FILTER 0x78 /* Input Capture Filter Control */ #define FTM_FLTCTRL 0x7C /* Fault Control */ #define FTM_QDCTRL 0x80 /* Quadrature Decoder Control And Status */ #define FTM_CONF 0x84 /* Configuration */ #define FTM_FLTPOL 0x88 /* FTM Fault Input Polarity */ #define FTM_SYNCONF 0x8C /* Synchronization Configuration */ #define FTM_INVCTRL 0x90 /* FTM Inverting Control */ #define FTM_SWOCTRL 0x94 /* FTM Software Output Control */ #define FTM_PWMLOAD 0x98 /* FTM PWM Load */ #define FTM_SC_CLK_MASK_SHIFT 3 #define FTM_SC_CLK_MASK (3 << FTM_SC_CLK_MASK_SHIFT) #define FTM_SC_TOF 0x80 #define FTM_SC_TOIE 0x40 #define FTM_SC_CPWMS 0x20 #define FTM_SC_CLKS 0x18 #define FTM_SC_PS_1 0x0 #define FTM_SC_PS_2 0x1 #define FTM_SC_PS_4 0x2 #define FTM_SC_PS_8 0x3 #define FTM_SC_PS_16 0x4 #define FTM_SC_PS_32 0x5 #define FTM_SC_PS_64 0x6 #define FTM_SC_PS_128 0x7 #define FTM_SC_PS_MASK 0x7 #define FTM_MODE_FAULTIE 0x80 #define FTM_MODE_FAULTM 0x60 #define FTM_MODE_CAPTEST 0x10 #define FTM_MODE_PWMSYNC 0x8 #define FTM_MODE_WPDIS 0x4 #define FTM_MODE_INIT 0x2 #define FTM_MODE_FTMEN 0x1 /* NXP Errata: The PHAFLTREN and PHBFLTREN bits are tide to zero internally * and these bits cannot be set. Flextimer cannot use Filter in * Quadrature Decoder Mode. * https://community.nxp.com/thread/467648#comment-1010319 */ #define FTM_QDCTRL_PHAFLTREN 0x80 #define FTM_QDCTRL_PHBFLTREN 0x40 #define FTM_QDCTRL_PHAPOL 0x20 #define FTM_QDCTRL_PHBPOL 0x10 #define FTM_QDCTRL_QUADMODE 0x8 #define FTM_QDCTRL_QUADDIR 0x4 #define FTM_QDCTRL_TOFDIR 0x2 #define FTM_QDCTRL_QUADEN 0x1 #define FTM_FMS_FAULTF 0x80 #define FTM_FMS_WPEN 0x40 #define FTM_FMS_FAULTIN 0x10 #define FTM_FMS_FAULTF3 0x8 #define FTM_FMS_FAULTF2 0x4 #define FTM_FMS_FAULTF1 0x2 #define FTM_FMS_FAULTF0 0x1 #define FTM_CSC_BASE 0xC #define FTM_CSC_MSB 0x20 #define FTM_CSC_MSA 0x10 #define FTM_CSC_ELSB 0x8 #define FTM_CSC_ELSA 0x4 #define FTM_CSC(_channel) (FTM_CSC_BASE + ((_channel) * 8)) #define FTM_CV_BASE 0x10 #define FTM_CV(_channel) (FTM_CV_BASE + ((_channel) * 8)) #define FTM_PS_MAX 7 #endif netfilter_arp/arp_tables.h 0000644 00000004372 14722070374 0011703 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Format of an ARP firewall descriptor * * src, tgt, src_mask, tgt_mask, arpop, arpop_mask are always stored in * network byte order. * flags are stored in host byte order (of course). */ #ifndef _ARPTABLES_H #define _ARPTABLES_H #include <linux/if.h> #include <linux/in.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <uapi/linux/netfilter_arp/arp_tables.h> /* Standard entry. */ struct arpt_standard { struct arpt_entry entry; struct xt_standard_target target; }; struct arpt_error { struct arpt_entry entry; struct xt_error_target target; }; #define ARPT_ENTRY_INIT(__size) \ { \ .target_offset = sizeof(struct arpt_entry), \ .next_offset = (__size), \ } #define ARPT_STANDARD_INIT(__verdict) \ { \ .entry = ARPT_ENTRY_INIT(sizeof(struct arpt_standard)), \ .target = XT_TARGET_INIT(XT_STANDARD_TARGET, \ sizeof(struct xt_standard_target)), \ .target.verdict = -(__verdict) - 1, \ } #define ARPT_ERROR_INIT \ { \ .entry = ARPT_ENTRY_INIT(sizeof(struct arpt_error)), \ .target = XT_TARGET_INIT(XT_ERROR_TARGET, \ sizeof(struct xt_error_target)), \ .target.errorname = "ERROR", \ } extern void *arpt_alloc_initial_table(const struct xt_table *); int arpt_register_table(struct net *net, const struct xt_table *table, const struct arpt_replace *repl, const struct nf_hook_ops *ops, struct xt_table **res); void arpt_unregister_table(struct net *net, struct xt_table *table); void arpt_unregister_table_pre_exit(struct net *net, struct xt_table *table, const struct nf_hook_ops *ops); extern unsigned int arpt_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct xt_table *table); #ifdef CONFIG_COMPAT #include <net/compat.h> struct compat_arpt_entry { struct arpt_arp arp; __u16 target_offset; __u16 next_offset; compat_uint_t comefrom; struct compat_xt_counters counters; unsigned char elems[0]; }; static inline struct xt_entry_target * compat_arpt_get_target(struct compat_arpt_entry *e) { return (void *)e + e->target_offset; } #endif /* CONFIG_COMPAT */ #endif /* _ARPTABLES_H */ security.h 0000644 00000147052 14722070374 0006603 0 ustar 00 /* * Linux Security plug * * Copyright (C) 2001 WireX Communications, Inc <chris@wirex.com> * Copyright (C) 2001 Greg Kroah-Hartman <greg@kroah.com> * Copyright (C) 2001 Networks Associates Technology, Inc <ssmalley@nai.com> * Copyright (C) 2001 James Morris <jmorris@intercode.com.au> * Copyright (C) 2001 Silicon Graphics, Inc. (Trust Technology Group) * Copyright (C) 2016 Mellanox Techonologies * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Due to this file being licensed under the GPL there is controversy over * whether this permits you to write a module that #includes this file * without placing your module under the GPL. Please consult a lawyer for * advice before doing this. * */ #ifndef __LINUX_SECURITY_H #define __LINUX_SECURITY_H #include <linux/key.h> #include <linux/capability.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/fs.h> struct linux_binprm; struct cred; struct rlimit; struct kernel_siginfo; struct sembuf; struct kern_ipc_perm; struct audit_context; struct super_block; struct inode; struct dentry; struct file; struct vfsmount; struct path; struct qstr; struct iattr; struct fown_struct; struct file_operations; struct msg_msg; struct xattr; struct kernfs_node; struct xfrm_sec_ctx; struct mm_struct; struct fs_context; struct fs_parameter; enum fs_value_type; /* Default (no) options for the capable function */ #define CAP_OPT_NONE 0x0 /* If capable should audit the security request */ #define CAP_OPT_NOAUDIT BIT(1) /* If capable is being called by a setid function */ #define CAP_OPT_INSETID BIT(2) /* LSM Agnostic defines for fs_context::lsm_flags */ #define SECURITY_LSM_NATIVE_LABELS 1 struct ctl_table; struct audit_krule; struct user_namespace; struct timezone; enum lsm_event { LSM_POLICY_CHANGE, }; /* * These are reasons that can be passed to the security_locked_down() * LSM hook. Lockdown reasons that protect kernel integrity (ie, the * ability for userland to modify kernel code) are placed before * LOCKDOWN_INTEGRITY_MAX. Lockdown reasons that protect kernel * confidentiality (ie, the ability for userland to extract * information from the running kernel that would otherwise be * restricted) are placed before LOCKDOWN_CONFIDENTIALITY_MAX. * * LSM authors should note that the semantics of any given lockdown * reason are not guaranteed to be stable - the same reason may block * one set of features in one kernel release, and a slightly different * set of features in a later kernel release. LSMs that seek to expose * lockdown policy at any level of granularity other than "none", * "integrity" or "confidentiality" are responsible for either * ensuring that they expose a consistent level of functionality to * userland, or ensuring that userland is aware that this is * potentially a moving target. It is easy to misuse this information * in a way that could break userspace. Please be careful not to do * so. * * If you add to this, remember to extend lockdown_reasons in * security/lockdown/lockdown.c. */ enum lockdown_reason { LOCKDOWN_NONE, LOCKDOWN_MODULE_SIGNATURE, LOCKDOWN_DEV_MEM, LOCKDOWN_EFI_TEST, LOCKDOWN_KEXEC, LOCKDOWN_HIBERNATION, LOCKDOWN_PCI_ACCESS, LOCKDOWN_IOPORT, LOCKDOWN_MSR, LOCKDOWN_ACPI_TABLES, LOCKDOWN_PCMCIA_CIS, LOCKDOWN_TIOCSSERIAL, LOCKDOWN_MODULE_PARAMETERS, LOCKDOWN_MMIOTRACE, LOCKDOWN_DEBUGFS, LOCKDOWN_XMON_WR, LOCKDOWN_KGDB, LOCKDOWN_INTEGRITY_MAX, LOCKDOWN_KCORE, LOCKDOWN_KPROBES, LOCKDOWN_BPF_READ, LOCKDOWN_PERF, LOCKDOWN_TRACEFS, LOCKDOWN_XMON_RW, LOCKDOWN_CONFIDENTIALITY_MAX, }; /* These functions are in security/commoncap.c */ extern int cap_capable(const struct cred *cred, struct user_namespace *ns, int cap, unsigned int opts); extern int cap_settime(const struct timespec64 *ts, const struct timezone *tz); extern int cap_ptrace_access_check(struct task_struct *child, unsigned int mode); extern int cap_ptrace_traceme(struct task_struct *parent); extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); extern int cap_capset(struct cred *new, const struct cred *old, const kernel_cap_t *effective, const kernel_cap_t *inheritable, const kernel_cap_t *permitted); extern int cap_bprm_set_creds(struct linux_binprm *bprm); extern int cap_inode_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags); extern int cap_inode_removexattr(struct dentry *dentry, const char *name); extern int cap_inode_need_killpriv(struct dentry *dentry); extern int cap_inode_killpriv(struct dentry *dentry); extern int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc); extern int cap_mmap_addr(unsigned long addr); extern int cap_mmap_file(struct file *file, unsigned long reqprot, unsigned long prot, unsigned long flags); extern int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags); extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5); extern int cap_task_setscheduler(struct task_struct *p); extern int cap_task_setioprio(struct task_struct *p, int ioprio); extern int cap_task_setnice(struct task_struct *p, int nice); extern int cap_vm_enough_memory(struct mm_struct *mm, long pages); struct msghdr; struct sk_buff; struct sock; struct sockaddr; struct socket; struct flowi; struct dst_entry; struct xfrm_selector; struct xfrm_policy; struct xfrm_state; struct xfrm_user_sec_ctx; struct seq_file; struct sctp_endpoint; #ifdef CONFIG_MMU extern unsigned long mmap_min_addr; extern unsigned long dac_mmap_min_addr; #else #define mmap_min_addr 0UL #define dac_mmap_min_addr 0UL #endif /* * Values used in the task_security_ops calls */ /* setuid or setgid, id0 == uid or gid */ #define LSM_SETID_ID 1 /* setreuid or setregid, id0 == real, id1 == eff */ #define LSM_SETID_RE 2 /* setresuid or setresgid, id0 == real, id1 == eff, uid2 == saved */ #define LSM_SETID_RES 4 /* setfsuid or setfsgid, id0 == fsuid or fsgid */ #define LSM_SETID_FS 8 /* Flags for security_task_prlimit(). */ #define LSM_PRLIMIT_READ 1 #define LSM_PRLIMIT_WRITE 2 /* forward declares to avoid warnings */ struct sched_param; struct request_sock; /* bprm->unsafe reasons */ #define LSM_UNSAFE_SHARE 1 #define LSM_UNSAFE_PTRACE 2 #define LSM_UNSAFE_NO_NEW_PRIVS 4 #ifdef CONFIG_MMU extern int mmap_min_addr_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); #endif /* security_inode_init_security callback function to write xattrs */ typedef int (*initxattrs) (struct inode *inode, const struct xattr *xattr_array, void *fs_data); /* Keep the kernel_load_data_id enum in sync with kernel_read_file_id */ #define __data_id_enumify(ENUM, dummy) LOADING_ ## ENUM, #define __data_id_stringify(dummy, str) #str, enum kernel_load_data_id { __kernel_read_file_id(__data_id_enumify) }; static const char * const kernel_load_data_str[] = { __kernel_read_file_id(__data_id_stringify) }; static inline const char *kernel_load_data_id_str(enum kernel_load_data_id id) { if ((unsigned)id >= LOADING_MAX_ID) return kernel_load_data_str[LOADING_UNKNOWN]; return kernel_load_data_str[id]; } #ifdef CONFIG_SECURITY int call_blocking_lsm_notifier(enum lsm_event event, void *data); int register_blocking_lsm_notifier(struct notifier_block *nb); int unregister_blocking_lsm_notifier(struct notifier_block *nb); /* prototypes */ extern int security_init(void); extern int early_security_init(void); /* Security operations */ int security_binder_set_context_mgr(const struct cred *mgr); int security_binder_transaction(const struct cred *from, const struct cred *to); int security_binder_transfer_binder(const struct cred *from, const struct cred *to); int security_binder_transfer_file(const struct cred *from, const struct cred *to, struct file *file); int security_ptrace_access_check(struct task_struct *child, unsigned int mode); int security_ptrace_traceme(struct task_struct *parent); int security_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); int security_capset(struct cred *new, const struct cred *old, const kernel_cap_t *effective, const kernel_cap_t *inheritable, const kernel_cap_t *permitted); int security_capable(const struct cred *cred, struct user_namespace *ns, int cap, unsigned int opts); int security_quotactl(int cmds, int type, int id, struct super_block *sb); int security_quota_on(struct dentry *dentry); int security_syslog(int type); int security_settime64(const struct timespec64 *ts, const struct timezone *tz); int security_vm_enough_memory_mm(struct mm_struct *mm, long pages); int security_bprm_set_creds(struct linux_binprm *bprm); int security_bprm_check(struct linux_binprm *bprm); void security_bprm_committing_creds(struct linux_binprm *bprm); void security_bprm_committed_creds(struct linux_binprm *bprm); int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc); int security_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param); int security_sb_alloc(struct super_block *sb); void security_sb_free(struct super_block *sb); void security_free_mnt_opts(void **mnt_opts); int security_sb_eat_lsm_opts(char *options, void **mnt_opts); int security_sb_remount(struct super_block *sb, void *mnt_opts); int security_sb_kern_mount(struct super_block *sb); int security_sb_show_options(struct seq_file *m, struct super_block *sb); int security_sb_statfs(struct dentry *dentry); int security_sb_mount(const char *dev_name, const struct path *path, const char *type, unsigned long flags, void *data); int security_sb_umount(struct vfsmount *mnt, int flags); int security_sb_pivotroot(const struct path *old_path, const struct path *new_path); int security_sb_set_mnt_opts(struct super_block *sb, void *mnt_opts, unsigned long kern_flags, unsigned long *set_kern_flags); int security_sb_clone_mnt_opts(const struct super_block *oldsb, struct super_block *newsb, unsigned long kern_flags, unsigned long *set_kern_flags); int security_add_mnt_opt(const char *option, const char *val, int len, void **mnt_opts); int security_move_mount(const struct path *from_path, const struct path *to_path); int security_dentry_init_security(struct dentry *dentry, int mode, const struct qstr *name, void **ctx, u32 *ctxlen); int security_dentry_create_files_as(struct dentry *dentry, int mode, struct qstr *name, const struct cred *old, struct cred *new); int security_path_notify(const struct path *path, u64 mask, unsigned int obj_type); int security_inode_alloc(struct inode *inode); void security_inode_free(struct inode *inode); int security_inode_init_security(struct inode *inode, struct inode *dir, const struct qstr *qstr, initxattrs initxattrs, void *fs_data); int security_old_inode_init_security(struct inode *inode, struct inode *dir, const struct qstr *qstr, const char **name, void **value, size_t *len); int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode); int security_inode_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry); int security_inode_unlink(struct inode *dir, struct dentry *dentry); int security_inode_symlink(struct inode *dir, struct dentry *dentry, const char *old_name); int security_inode_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode); int security_inode_rmdir(struct inode *dir, struct dentry *dentry); int security_inode_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev); int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags); int security_inode_readlink(struct dentry *dentry); int security_inode_follow_link(struct dentry *dentry, struct inode *inode, bool rcu); int security_inode_permission(struct inode *inode, int mask); int security_inode_setattr(struct dentry *dentry, struct iattr *attr); int security_inode_getattr(const struct path *path); int security_inode_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags); void security_inode_post_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags); int security_inode_getxattr(struct dentry *dentry, const char *name); int security_inode_listxattr(struct dentry *dentry); int security_inode_removexattr(struct dentry *dentry, const char *name); int security_inode_need_killpriv(struct dentry *dentry); int security_inode_killpriv(struct dentry *dentry); int security_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc); int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags); int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size); void security_inode_getsecid(struct inode *inode, u32 *secid); int security_inode_copy_up(struct dentry *src, struct cred **new); int security_inode_copy_up_xattr(const char *name); int security_kernfs_init_security(struct kernfs_node *kn_dir, struct kernfs_node *kn); int security_file_permission(struct file *file, int mask); int security_file_alloc(struct file *file); void security_file_free(struct file *file); int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg); int security_file_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg); int security_mmap_file(struct file *file, unsigned long prot, unsigned long flags); int security_mmap_addr(unsigned long addr); int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot, unsigned long prot); int security_file_lock(struct file *file, unsigned int cmd); int security_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg); void security_file_set_fowner(struct file *file); int security_file_send_sigiotask(struct task_struct *tsk, struct fown_struct *fown, int sig); int security_file_receive(struct file *file); int security_file_open(struct file *file); int security_task_alloc(struct task_struct *task, unsigned long clone_flags); void security_task_free(struct task_struct *task); int security_cred_alloc_blank(struct cred *cred, gfp_t gfp); void security_cred_free(struct cred *cred); int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp); void security_transfer_creds(struct cred *new, const struct cred *old); void security_cred_getsecid(const struct cred *c, u32 *secid); int security_kernel_act_as(struct cred *new, u32 secid); int security_kernel_create_files_as(struct cred *new, struct inode *inode); int security_kernel_module_request(char *kmod_name); int security_kernel_load_data(enum kernel_load_data_id id); int security_kernel_read_file(struct file *file, enum kernel_read_file_id id); int security_kernel_post_read_file(struct file *file, char *buf, loff_t size, enum kernel_read_file_id id); int security_task_fix_setuid(struct cred *new, const struct cred *old, int flags); int security_task_setpgid(struct task_struct *p, pid_t pgid); int security_task_getpgid(struct task_struct *p); int security_task_getsid(struct task_struct *p); void security_task_getsecid(struct task_struct *p, u32 *secid); int security_task_setnice(struct task_struct *p, int nice); int security_task_setioprio(struct task_struct *p, int ioprio); int security_task_getioprio(struct task_struct *p); int security_task_prlimit(const struct cred *cred, const struct cred *tcred, unsigned int flags); int security_task_setrlimit(struct task_struct *p, unsigned int resource, struct rlimit *new_rlim); int security_task_setscheduler(struct task_struct *p); int security_task_getscheduler(struct task_struct *p); int security_task_movememory(struct task_struct *p); int security_task_kill(struct task_struct *p, struct kernel_siginfo *info, int sig, const struct cred *cred); int security_task_prctl(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5); void security_task_to_inode(struct task_struct *p, struct inode *inode); int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag); void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid); int security_msg_msg_alloc(struct msg_msg *msg); void security_msg_msg_free(struct msg_msg *msg); int security_msg_queue_alloc(struct kern_ipc_perm *msq); void security_msg_queue_free(struct kern_ipc_perm *msq); int security_msg_queue_associate(struct kern_ipc_perm *msq, int msqflg); int security_msg_queue_msgctl(struct kern_ipc_perm *msq, int cmd); int security_msg_queue_msgsnd(struct kern_ipc_perm *msq, struct msg_msg *msg, int msqflg); int security_msg_queue_msgrcv(struct kern_ipc_perm *msq, struct msg_msg *msg, struct task_struct *target, long type, int mode); int security_shm_alloc(struct kern_ipc_perm *shp); void security_shm_free(struct kern_ipc_perm *shp); int security_shm_associate(struct kern_ipc_perm *shp, int shmflg); int security_shm_shmctl(struct kern_ipc_perm *shp, int cmd); int security_shm_shmat(struct kern_ipc_perm *shp, char __user *shmaddr, int shmflg); int security_sem_alloc(struct kern_ipc_perm *sma); void security_sem_free(struct kern_ipc_perm *sma); int security_sem_associate(struct kern_ipc_perm *sma, int semflg); int security_sem_semctl(struct kern_ipc_perm *sma, int cmd); int security_sem_semop(struct kern_ipc_perm *sma, struct sembuf *sops, unsigned nsops, int alter); void security_d_instantiate(struct dentry *dentry, struct inode *inode); int security_getprocattr(struct task_struct *p, const char *lsm, char *name, char **value); int security_setprocattr(const char *lsm, const char *name, void *value, size_t size); int security_netlink_send(struct sock *sk, struct sk_buff *skb); int security_ismaclabel(const char *name); int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen); int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid); void security_release_secctx(char *secdata, u32 seclen); void security_inode_invalidate_secctx(struct inode *inode); int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen); int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen); int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen); int security_locked_down(enum lockdown_reason what); int security_lock_kernel_down(const char *where, enum lockdown_reason level); #else /* CONFIG_SECURITY */ static inline int call_blocking_lsm_notifier(enum lsm_event event, void *data) { return 0; } static inline int register_blocking_lsm_notifier(struct notifier_block *nb) { return 0; } static inline int unregister_blocking_lsm_notifier(struct notifier_block *nb) { return 0; } static inline void security_free_mnt_opts(void **mnt_opts) { } /* * This is the default capabilities functionality. Most of these functions * are just stubbed out, but a few must call the proper capable code. */ static inline int security_init(void) { return 0; } static inline int early_security_init(void) { return 0; } static inline int security_binder_set_context_mgr(const struct cred *mgr) { return 0; } static inline int security_binder_transaction(const struct cred *from, const struct cred *to) { return 0; } static inline int security_binder_transfer_binder(const struct cred *from, const struct cred *to) { return 0; } static inline int security_binder_transfer_file(const struct cred *from, const struct cred *to, struct file *file) { return 0; } static inline int security_ptrace_access_check(struct task_struct *child, unsigned int mode) { return cap_ptrace_access_check(child, mode); } static inline int security_ptrace_traceme(struct task_struct *parent) { return cap_ptrace_traceme(parent); } static inline int security_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted) { return cap_capget(target, effective, inheritable, permitted); } static inline int security_capset(struct cred *new, const struct cred *old, const kernel_cap_t *effective, const kernel_cap_t *inheritable, const kernel_cap_t *permitted) { return cap_capset(new, old, effective, inheritable, permitted); } static inline int security_capable(const struct cred *cred, struct user_namespace *ns, int cap, unsigned int opts) { return cap_capable(cred, ns, cap, opts); } static inline int security_quotactl(int cmds, int type, int id, struct super_block *sb) { return 0; } static inline int security_quota_on(struct dentry *dentry) { return 0; } static inline int security_syslog(int type) { return 0; } static inline int security_settime64(const struct timespec64 *ts, const struct timezone *tz) { return cap_settime(ts, tz); } static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages) { return __vm_enough_memory(mm, pages, cap_vm_enough_memory(mm, pages)); } static inline int security_bprm_set_creds(struct linux_binprm *bprm) { return cap_bprm_set_creds(bprm); } static inline int security_bprm_check(struct linux_binprm *bprm) { return 0; } static inline void security_bprm_committing_creds(struct linux_binprm *bprm) { } static inline void security_bprm_committed_creds(struct linux_binprm *bprm) { } static inline int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc) { return 0; } static inline int security_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param) { return -ENOPARAM; } static inline int security_sb_alloc(struct super_block *sb) { return 0; } static inline void security_sb_free(struct super_block *sb) { } static inline int security_sb_eat_lsm_opts(char *options, void **mnt_opts) { return 0; } static inline int security_sb_remount(struct super_block *sb, void *mnt_opts) { return 0; } static inline int security_sb_kern_mount(struct super_block *sb) { return 0; } static inline int security_sb_show_options(struct seq_file *m, struct super_block *sb) { return 0; } static inline int security_sb_statfs(struct dentry *dentry) { return 0; } static inline int security_sb_mount(const char *dev_name, const struct path *path, const char *type, unsigned long flags, void *data) { return 0; } static inline int security_sb_umount(struct vfsmount *mnt, int flags) { return 0; } static inline int security_sb_pivotroot(const struct path *old_path, const struct path *new_path) { return 0; } static inline int security_sb_set_mnt_opts(struct super_block *sb, void *mnt_opts, unsigned long kern_flags, unsigned long *set_kern_flags) { return 0; } static inline int security_sb_clone_mnt_opts(const struct super_block *oldsb, struct super_block *newsb, unsigned long kern_flags, unsigned long *set_kern_flags) { return 0; } static inline int security_add_mnt_opt(const char *option, const char *val, int len, void **mnt_opts) { return 0; } static inline int security_move_mount(const struct path *from_path, const struct path *to_path) { return 0; } static inline int security_path_notify(const struct path *path, u64 mask, unsigned int obj_type) { return 0; } static inline int security_inode_alloc(struct inode *inode) { return 0; } static inline void security_inode_free(struct inode *inode) { } static inline int security_dentry_init_security(struct dentry *dentry, int mode, const struct qstr *name, void **ctx, u32 *ctxlen) { return -EOPNOTSUPP; } static inline int security_dentry_create_files_as(struct dentry *dentry, int mode, struct qstr *name, const struct cred *old, struct cred *new) { return 0; } static inline int security_inode_init_security(struct inode *inode, struct inode *dir, const struct qstr *qstr, const initxattrs xattrs, void *fs_data) { return 0; } static inline int security_old_inode_init_security(struct inode *inode, struct inode *dir, const struct qstr *qstr, const char **name, void **value, size_t *len) { return -EOPNOTSUPP; } static inline int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode) { return 0; } static inline int security_inode_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry) { return 0; } static inline int security_inode_unlink(struct inode *dir, struct dentry *dentry) { return 0; } static inline int security_inode_symlink(struct inode *dir, struct dentry *dentry, const char *old_name) { return 0; } static inline int security_inode_mkdir(struct inode *dir, struct dentry *dentry, int mode) { return 0; } static inline int security_inode_rmdir(struct inode *dir, struct dentry *dentry) { return 0; } static inline int security_inode_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) { return 0; } static inline int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { return 0; } static inline int security_inode_readlink(struct dentry *dentry) { return 0; } static inline int security_inode_follow_link(struct dentry *dentry, struct inode *inode, bool rcu) { return 0; } static inline int security_inode_permission(struct inode *inode, int mask) { return 0; } static inline int security_inode_setattr(struct dentry *dentry, struct iattr *attr) { return 0; } static inline int security_inode_getattr(const struct path *path) { return 0; } static inline int security_inode_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { return cap_inode_setxattr(dentry, name, value, size, flags); } static inline void security_inode_post_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { } static inline int security_inode_getxattr(struct dentry *dentry, const char *name) { return 0; } static inline int security_inode_listxattr(struct dentry *dentry) { return 0; } static inline int security_inode_removexattr(struct dentry *dentry, const char *name) { return cap_inode_removexattr(dentry, name); } static inline int security_inode_need_killpriv(struct dentry *dentry) { return cap_inode_need_killpriv(dentry); } static inline int security_inode_killpriv(struct dentry *dentry) { return cap_inode_killpriv(dentry); } static inline int security_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc) { return cap_inode_getsecurity(inode, name, buffer, alloc); } static inline int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags) { return -EOPNOTSUPP; } static inline int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size) { return 0; } static inline void security_inode_getsecid(struct inode *inode, u32 *secid) { *secid = 0; } static inline int security_inode_copy_up(struct dentry *src, struct cred **new) { return 0; } static inline int security_kernfs_init_security(struct kernfs_node *kn_dir, struct kernfs_node *kn) { return 0; } static inline int security_inode_copy_up_xattr(const char *name) { return -EOPNOTSUPP; } static inline int security_file_permission(struct file *file, int mask) { return 0; } static inline int security_file_alloc(struct file *file) { return 0; } static inline void security_file_free(struct file *file) { } static inline int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return 0; } static inline int security_file_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) { return 0; } static inline int security_mmap_file(struct file *file, unsigned long prot, unsigned long flags) { return 0; } static inline int security_mmap_addr(unsigned long addr) { return cap_mmap_addr(addr); } static inline int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot, unsigned long prot) { return 0; } static inline int security_file_lock(struct file *file, unsigned int cmd) { return 0; } static inline int security_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg) { return 0; } static inline void security_file_set_fowner(struct file *file) { return; } static inline int security_file_send_sigiotask(struct task_struct *tsk, struct fown_struct *fown, int sig) { return 0; } static inline int security_file_receive(struct file *file) { return 0; } static inline int security_file_open(struct file *file) { return 0; } static inline int security_task_alloc(struct task_struct *task, unsigned long clone_flags) { return 0; } static inline void security_task_free(struct task_struct *task) { } static inline int security_cred_alloc_blank(struct cred *cred, gfp_t gfp) { return 0; } static inline void security_cred_free(struct cred *cred) { } static inline int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp) { return 0; } static inline void security_transfer_creds(struct cred *new, const struct cred *old) { } static inline void security_cred_getsecid(const struct cred *c, u32 *secid) { *secid = 0; } static inline int security_kernel_act_as(struct cred *cred, u32 secid) { return 0; } static inline int security_kernel_create_files_as(struct cred *cred, struct inode *inode) { return 0; } static inline int security_kernel_module_request(char *kmod_name) { return 0; } static inline int security_kernel_load_data(enum kernel_load_data_id id) { return 0; } static inline int security_kernel_read_file(struct file *file, enum kernel_read_file_id id) { return 0; } static inline int security_kernel_post_read_file(struct file *file, char *buf, loff_t size, enum kernel_read_file_id id) { return 0; } static inline int security_task_fix_setuid(struct cred *new, const struct cred *old, int flags) { return cap_task_fix_setuid(new, old, flags); } static inline int security_task_setpgid(struct task_struct *p, pid_t pgid) { return 0; } static inline int security_task_getpgid(struct task_struct *p) { return 0; } static inline int security_task_getsid(struct task_struct *p) { return 0; } static inline void security_task_getsecid(struct task_struct *p, u32 *secid) { *secid = 0; } static inline int security_task_setnice(struct task_struct *p, int nice) { return cap_task_setnice(p, nice); } static inline int security_task_setioprio(struct task_struct *p, int ioprio) { return cap_task_setioprio(p, ioprio); } static inline int security_task_getioprio(struct task_struct *p) { return 0; } static inline int security_task_prlimit(const struct cred *cred, const struct cred *tcred, unsigned int flags) { return 0; } static inline int security_task_setrlimit(struct task_struct *p, unsigned int resource, struct rlimit *new_rlim) { return 0; } static inline int security_task_setscheduler(struct task_struct *p) { return cap_task_setscheduler(p); } static inline int security_task_getscheduler(struct task_struct *p) { return 0; } static inline int security_task_movememory(struct task_struct *p) { return 0; } static inline int security_task_kill(struct task_struct *p, struct kernel_siginfo *info, int sig, const struct cred *cred) { return 0; } static inline int security_task_prctl(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5) { return cap_task_prctl(option, arg2, arg3, arg4, arg5); } static inline void security_task_to_inode(struct task_struct *p, struct inode *inode) { } static inline int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag) { return 0; } static inline void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid) { *secid = 0; } static inline int security_msg_msg_alloc(struct msg_msg *msg) { return 0; } static inline void security_msg_msg_free(struct msg_msg *msg) { } static inline int security_msg_queue_alloc(struct kern_ipc_perm *msq) { return 0; } static inline void security_msg_queue_free(struct kern_ipc_perm *msq) { } static inline int security_msg_queue_associate(struct kern_ipc_perm *msq, int msqflg) { return 0; } static inline int security_msg_queue_msgctl(struct kern_ipc_perm *msq, int cmd) { return 0; } static inline int security_msg_queue_msgsnd(struct kern_ipc_perm *msq, struct msg_msg *msg, int msqflg) { return 0; } static inline int security_msg_queue_msgrcv(struct kern_ipc_perm *msq, struct msg_msg *msg, struct task_struct *target, long type, int mode) { return 0; } static inline int security_shm_alloc(struct kern_ipc_perm *shp) { return 0; } static inline void security_shm_free(struct kern_ipc_perm *shp) { } static inline int security_shm_associate(struct kern_ipc_perm *shp, int shmflg) { return 0; } static inline int security_shm_shmctl(struct kern_ipc_perm *shp, int cmd) { return 0; } static inline int security_shm_shmat(struct kern_ipc_perm *shp, char __user *shmaddr, int shmflg) { return 0; } static inline int security_sem_alloc(struct kern_ipc_perm *sma) { return 0; } static inline void security_sem_free(struct kern_ipc_perm *sma) { } static inline int security_sem_associate(struct kern_ipc_perm *sma, int semflg) { return 0; } static inline int security_sem_semctl(struct kern_ipc_perm *sma, int cmd) { return 0; } static inline int security_sem_semop(struct kern_ipc_perm *sma, struct sembuf *sops, unsigned nsops, int alter) { return 0; } static inline void security_d_instantiate(struct dentry *dentry, struct inode *inode) { } static inline int security_getprocattr(struct task_struct *p, const char *lsm, char *name, char **value) { return -EINVAL; } static inline int security_setprocattr(const char *lsm, char *name, void *value, size_t size) { return -EINVAL; } static inline int security_netlink_send(struct sock *sk, struct sk_buff *skb) { return 0; } static inline int security_ismaclabel(const char *name) { return 0; } static inline int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) { return -EOPNOTSUPP; } static inline int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid) { return -EOPNOTSUPP; } static inline void security_release_secctx(char *secdata, u32 seclen) { } static inline void security_inode_invalidate_secctx(struct inode *inode) { } static inline int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen) { return -EOPNOTSUPP; } static inline int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen) { return -EOPNOTSUPP; } static inline int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen) { return -EOPNOTSUPP; } static inline int security_locked_down(enum lockdown_reason what) { return 0; } static inline int security_lock_kernel_down(const char *where, enum lockdown_reason level) { return 0; } #endif /* CONFIG_SECURITY */ #ifdef CONFIG_SECURITY_NETWORK int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk); int security_unix_may_send(struct socket *sock, struct socket *other); int security_socket_create(int family, int type, int protocol, int kern); int security_socket_post_create(struct socket *sock, int family, int type, int protocol, int kern); int security_socket_socketpair(struct socket *socka, struct socket *sockb); int security_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen); int security_socket_connect(struct socket *sock, struct sockaddr *address, int addrlen); int security_socket_listen(struct socket *sock, int backlog); int security_socket_accept(struct socket *sock, struct socket *newsock); int security_socket_sendmsg(struct socket *sock, struct msghdr *msg, int size); int security_socket_recvmsg(struct socket *sock, struct msghdr *msg, int size, int flags); int security_socket_getsockname(struct socket *sock); int security_socket_getpeername(struct socket *sock); int security_socket_getsockopt(struct socket *sock, int level, int optname); int security_socket_setsockopt(struct socket *sock, int level, int optname); int security_socket_shutdown(struct socket *sock, int how); int security_sock_rcv_skb(struct sock *sk, struct sk_buff *skb); int security_socket_getpeersec_stream(struct socket *sock, char __user *optval, int __user *optlen, unsigned len); int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid); int security_sk_alloc(struct sock *sk, int family, gfp_t priority); void security_sk_free(struct sock *sk); void security_sk_clone(const struct sock *sk, struct sock *newsk); void security_sk_classify_flow(struct sock *sk, struct flowi *fl); void security_req_classify_flow(const struct request_sock *req, struct flowi *fl); void security_sock_graft(struct sock*sk, struct socket *parent); int security_inet_conn_request(struct sock *sk, struct sk_buff *skb, struct request_sock *req); void security_inet_csk_clone(struct sock *newsk, const struct request_sock *req); void security_inet_conn_established(struct sock *sk, struct sk_buff *skb); int security_secmark_relabel_packet(u32 secid); void security_secmark_refcount_inc(void); void security_secmark_refcount_dec(void); int security_tun_dev_alloc_security(void **security); void security_tun_dev_free_security(void *security); int security_tun_dev_create(void); int security_tun_dev_attach_queue(void *security); int security_tun_dev_attach(struct sock *sk, void *security); int security_tun_dev_open(void *security); int security_sctp_assoc_request(struct sctp_endpoint *ep, struct sk_buff *skb); int security_sctp_bind_connect(struct sock *sk, int optname, struct sockaddr *address, int addrlen); void security_sctp_sk_clone(struct sctp_endpoint *ep, struct sock *sk, struct sock *newsk); #else /* CONFIG_SECURITY_NETWORK */ static inline int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk) { return 0; } static inline int security_unix_may_send(struct socket *sock, struct socket *other) { return 0; } static inline int security_socket_create(int family, int type, int protocol, int kern) { return 0; } static inline int security_socket_post_create(struct socket *sock, int family, int type, int protocol, int kern) { return 0; } static inline int security_socket_socketpair(struct socket *socka, struct socket *sockb) { return 0; } static inline int security_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen) { return 0; } static inline int security_socket_connect(struct socket *sock, struct sockaddr *address, int addrlen) { return 0; } static inline int security_socket_listen(struct socket *sock, int backlog) { return 0; } static inline int security_socket_accept(struct socket *sock, struct socket *newsock) { return 0; } static inline int security_socket_sendmsg(struct socket *sock, struct msghdr *msg, int size) { return 0; } static inline int security_socket_recvmsg(struct socket *sock, struct msghdr *msg, int size, int flags) { return 0; } static inline int security_socket_getsockname(struct socket *sock) { return 0; } static inline int security_socket_getpeername(struct socket *sock) { return 0; } static inline int security_socket_getsockopt(struct socket *sock, int level, int optname) { return 0; } static inline int security_socket_setsockopt(struct socket *sock, int level, int optname) { return 0; } static inline int security_socket_shutdown(struct socket *sock, int how) { return 0; } static inline int security_sock_rcv_skb(struct sock *sk, struct sk_buff *skb) { return 0; } static inline int security_socket_getpeersec_stream(struct socket *sock, char __user *optval, int __user *optlen, unsigned len) { return -ENOPROTOOPT; } static inline int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid) { return -ENOPROTOOPT; } static inline int security_sk_alloc(struct sock *sk, int family, gfp_t priority) { return 0; } static inline void security_sk_free(struct sock *sk) { } static inline void security_sk_clone(const struct sock *sk, struct sock *newsk) { } static inline void security_sk_classify_flow(struct sock *sk, struct flowi *fl) { } static inline void security_req_classify_flow(const struct request_sock *req, struct flowi *fl) { } static inline void security_sock_graft(struct sock *sk, struct socket *parent) { } static inline int security_inet_conn_request(struct sock *sk, struct sk_buff *skb, struct request_sock *req) { return 0; } static inline void security_inet_csk_clone(struct sock *newsk, const struct request_sock *req) { } static inline void security_inet_conn_established(struct sock *sk, struct sk_buff *skb) { } static inline int security_secmark_relabel_packet(u32 secid) { return 0; } static inline void security_secmark_refcount_inc(void) { } static inline void security_secmark_refcount_dec(void) { } static inline int security_tun_dev_alloc_security(void **security) { return 0; } static inline void security_tun_dev_free_security(void *security) { } static inline int security_tun_dev_create(void) { return 0; } static inline int security_tun_dev_attach_queue(void *security) { return 0; } static inline int security_tun_dev_attach(struct sock *sk, void *security) { return 0; } static inline int security_tun_dev_open(void *security) { return 0; } static inline int security_sctp_assoc_request(struct sctp_endpoint *ep, struct sk_buff *skb) { return 0; } static inline int security_sctp_bind_connect(struct sock *sk, int optname, struct sockaddr *address, int addrlen) { return 0; } static inline void security_sctp_sk_clone(struct sctp_endpoint *ep, struct sock *sk, struct sock *newsk) { } #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_INFINIBAND int security_ib_pkey_access(void *sec, u64 subnet_prefix, u16 pkey); int security_ib_endport_manage_subnet(void *sec, const char *name, u8 port_num); int security_ib_alloc_security(void **sec); void security_ib_free_security(void *sec); #else /* CONFIG_SECURITY_INFINIBAND */ static inline int security_ib_pkey_access(void *sec, u64 subnet_prefix, u16 pkey) { return 0; } static inline int security_ib_endport_manage_subnet(void *sec, const char *dev_name, u8 port_num) { return 0; } static inline int security_ib_alloc_security(void **sec) { return 0; } static inline void security_ib_free_security(void *sec) { } #endif /* CONFIG_SECURITY_INFINIBAND */ #ifdef CONFIG_SECURITY_NETWORK_XFRM int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp); int security_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctxp); void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx); int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx); int security_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *sec_ctx); int security_xfrm_state_alloc_acquire(struct xfrm_state *x, struct xfrm_sec_ctx *polsec, u32 secid); int security_xfrm_state_delete(struct xfrm_state *x); void security_xfrm_state_free(struct xfrm_state *x); int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir); int security_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy *xp, const struct flowi *fl); int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid); void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl); #else /* CONFIG_SECURITY_NETWORK_XFRM */ static inline int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp) { return 0; } static inline int security_xfrm_policy_clone(struct xfrm_sec_ctx *old, struct xfrm_sec_ctx **new_ctxp) { return 0; } static inline void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx) { } static inline int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx) { return 0; } static inline int security_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *sec_ctx) { return 0; } static inline int security_xfrm_state_alloc_acquire(struct xfrm_state *x, struct xfrm_sec_ctx *polsec, u32 secid) { return 0; } static inline void security_xfrm_state_free(struct xfrm_state *x) { } static inline int security_xfrm_state_delete(struct xfrm_state *x) { return 0; } static inline int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir) { return 0; } static inline int security_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy *xp, const struct flowi *fl) { return 1; } static inline int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid) { return 0; } static inline void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl) { } #endif /* CONFIG_SECURITY_NETWORK_XFRM */ #ifdef CONFIG_SECURITY_PATH int security_path_unlink(const struct path *dir, struct dentry *dentry); int security_path_mkdir(const struct path *dir, struct dentry *dentry, umode_t mode); int security_path_rmdir(const struct path *dir, struct dentry *dentry); int security_path_mknod(const struct path *dir, struct dentry *dentry, umode_t mode, unsigned int dev); int security_path_truncate(const struct path *path); int security_path_symlink(const struct path *dir, struct dentry *dentry, const char *old_name); int security_path_link(struct dentry *old_dentry, const struct path *new_dir, struct dentry *new_dentry); int security_path_rename(const struct path *old_dir, struct dentry *old_dentry, const struct path *new_dir, struct dentry *new_dentry, unsigned int flags); int security_path_chmod(const struct path *path, umode_t mode); int security_path_chown(const struct path *path, kuid_t uid, kgid_t gid); int security_path_chroot(const struct path *path); #else /* CONFIG_SECURITY_PATH */ static inline int security_path_unlink(const struct path *dir, struct dentry *dentry) { return 0; } static inline int security_path_mkdir(const struct path *dir, struct dentry *dentry, umode_t mode) { return 0; } static inline int security_path_rmdir(const struct path *dir, struct dentry *dentry) { return 0; } static inline int security_path_mknod(const struct path *dir, struct dentry *dentry, umode_t mode, unsigned int dev) { return 0; } static inline int security_path_truncate(const struct path *path) { return 0; } static inline int security_path_symlink(const struct path *dir, struct dentry *dentry, const char *old_name) { return 0; } static inline int security_path_link(struct dentry *old_dentry, const struct path *new_dir, struct dentry *new_dentry) { return 0; } static inline int security_path_rename(const struct path *old_dir, struct dentry *old_dentry, const struct path *new_dir, struct dentry *new_dentry, unsigned int flags) { return 0; } static inline int security_path_chmod(const struct path *path, umode_t mode) { return 0; } static inline int security_path_chown(const struct path *path, kuid_t uid, kgid_t gid) { return 0; } static inline int security_path_chroot(const struct path *path) { return 0; } #endif /* CONFIG_SECURITY_PATH */ #ifdef CONFIG_KEYS #ifdef CONFIG_SECURITY int security_key_alloc(struct key *key, const struct cred *cred, unsigned long flags); void security_key_free(struct key *key); int security_key_permission(key_ref_t key_ref, const struct cred *cred, unsigned perm); int security_key_getsecurity(struct key *key, char **_buffer); #else static inline int security_key_alloc(struct key *key, const struct cred *cred, unsigned long flags) { return 0; } static inline void security_key_free(struct key *key) { } static inline int security_key_permission(key_ref_t key_ref, const struct cred *cred, unsigned perm) { return 0; } static inline int security_key_getsecurity(struct key *key, char **_buffer) { *_buffer = NULL; return 0; } #endif #endif /* CONFIG_KEYS */ #ifdef CONFIG_AUDIT #ifdef CONFIG_SECURITY int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule); int security_audit_rule_known(struct audit_krule *krule); int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule); void security_audit_rule_free(void *lsmrule); #else static inline int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule) { return 0; } static inline int security_audit_rule_known(struct audit_krule *krule) { return 0; } static inline int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule) { return 0; } static inline void security_audit_rule_free(void *lsmrule) { } #endif /* CONFIG_SECURITY */ #endif /* CONFIG_AUDIT */ #ifdef CONFIG_SECURITYFS extern struct dentry *securityfs_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops); extern struct dentry *securityfs_create_dir(const char *name, struct dentry *parent); struct dentry *securityfs_create_symlink(const char *name, struct dentry *parent, const char *target, const struct inode_operations *iops); extern void securityfs_remove(struct dentry *dentry); #else /* CONFIG_SECURITYFS */ static inline struct dentry *securityfs_create_dir(const char *name, struct dentry *parent) { return ERR_PTR(-ENODEV); } static inline struct dentry *securityfs_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops) { return ERR_PTR(-ENODEV); } static inline struct dentry *securityfs_create_symlink(const char *name, struct dentry *parent, const char *target, const struct inode_operations *iops) { return ERR_PTR(-ENODEV); } static inline void securityfs_remove(struct dentry *dentry) {} #endif #ifdef CONFIG_BPF_SYSCALL union bpf_attr; struct bpf_map; struct bpf_prog; struct bpf_prog_aux; #ifdef CONFIG_SECURITY extern int security_bpf(int cmd, union bpf_attr *attr, unsigned int size); extern int security_bpf_map(struct bpf_map *map, fmode_t fmode); extern int security_bpf_prog(struct bpf_prog *prog); extern int security_bpf_map_alloc(struct bpf_map *map); extern void security_bpf_map_free(struct bpf_map *map); extern int security_bpf_prog_alloc(struct bpf_prog_aux *aux); extern void security_bpf_prog_free(struct bpf_prog_aux *aux); #else static inline int security_bpf(int cmd, union bpf_attr *attr, unsigned int size) { return 0; } static inline int security_bpf_map(struct bpf_map *map, fmode_t fmode) { return 0; } static inline int security_bpf_prog(struct bpf_prog *prog) { return 0; } static inline int security_bpf_map_alloc(struct bpf_map *map) { return 0; } static inline void security_bpf_map_free(struct bpf_map *map) { } static inline int security_bpf_prog_alloc(struct bpf_prog_aux *aux) { return 0; } static inline void security_bpf_prog_free(struct bpf_prog_aux *aux) { } #endif /* CONFIG_SECURITY */ #endif /* CONFIG_BPF_SYSCALL */ #endif /* ! __LINUX_SECURITY_H */ pl320-ipc.h 0000644 00000000321 14722070374 0006330 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* */ int pl320_ipc_transmit(u32 *data); int pl320_ipc_register_notifier(struct notifier_block *nb); int pl320_ipc_unregister_notifier(struct notifier_block *nb); kallsyms.h 0000644 00000010421 14722070374 0006560 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* Rewritten and vastly simplified by Rusty Russell for in-kernel * module loader: * Copyright 2002 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation */ #ifndef _LINUX_KALLSYMS_H #define _LINUX_KALLSYMS_H #include <linux/errno.h> #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/mm.h> #include <linux/module.h> #include <asm/sections.h> #define KSYM_NAME_LEN 128 #define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \ 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1) struct cred; struct module; static inline int is_kernel_inittext(unsigned long addr) { if (addr >= (unsigned long)_sinittext && addr <= (unsigned long)_einittext) return 1; return 0; } static inline int is_kernel_text(unsigned long addr) { if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) || arch_is_kernel_text(addr)) return 1; return in_gate_area_no_mm(addr); } static inline int is_kernel(unsigned long addr) { if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end) return 1; return in_gate_area_no_mm(addr); } static inline int is_ksym_addr(unsigned long addr) { if (IS_ENABLED(CONFIG_KALLSYMS_ALL)) return is_kernel(addr); return is_kernel_text(addr) || is_kernel_inittext(addr); } static inline void *dereference_symbol_descriptor(void *ptr) { #ifdef HAVE_DEREFERENCE_FUNCTION_DESCRIPTOR struct module *mod; ptr = dereference_kernel_function_descriptor(ptr); if (is_ksym_addr((unsigned long)ptr)) return ptr; preempt_disable(); mod = __module_address((unsigned long)ptr); preempt_enable(); if (mod) ptr = dereference_module_function_descriptor(mod, ptr); #endif return ptr; } #ifdef CONFIG_KALLSYMS /* Lookup the address for a symbol. Returns 0 if not found. */ unsigned long kallsyms_lookup_name(const char *name); /* Call a function on each kallsyms symbol in the core kernel */ int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *, unsigned long), void *data); extern int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize, unsigned long *offset); /* Lookup an address. modname is set to NULL if it's in the kernel. */ const char *kallsyms_lookup(unsigned long addr, unsigned long *symbolsize, unsigned long *offset, char **modname, char *namebuf); /* Look up a kernel symbol and return it in a text buffer. */ extern int sprint_symbol(char *buffer, unsigned long address); extern int sprint_symbol_no_offset(char *buffer, unsigned long address); extern int sprint_backtrace(char *buffer, unsigned long address); int lookup_symbol_name(unsigned long addr, char *symname); int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name); /* How and when do we show kallsyms values? */ extern bool kallsyms_show_value(const struct cred *cred); #else /* !CONFIG_KALLSYMS */ static inline unsigned long kallsyms_lookup_name(const char *name) { return 0; } static inline int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *, unsigned long), void *data) { return 0; } static inline int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize, unsigned long *offset) { return 0; } static inline const char *kallsyms_lookup(unsigned long addr, unsigned long *symbolsize, unsigned long *offset, char **modname, char *namebuf) { return NULL; } static inline int sprint_symbol(char *buffer, unsigned long addr) { *buffer = '\0'; return 0; } static inline int sprint_symbol_no_offset(char *buffer, unsigned long addr) { *buffer = '\0'; return 0; } static inline int sprint_backtrace(char *buffer, unsigned long addr) { *buffer = '\0'; return 0; } static inline int lookup_symbol_name(unsigned long addr, char *symname) { return -ERANGE; } static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name) { return -ERANGE; } static inline bool kallsyms_show_value(const struct cred *cred) { return false; } #endif /*CONFIG_KALLSYMS*/ static inline void print_ip_sym(unsigned long ip) { printk("[<%px>] %pS\n", (void *) ip, (void *) ip); } #endif /*_LINUX_KALLSYMS_H*/ linkmode.h 0000644 00000003711 14722070374 0006527 0 ustar 00 #ifndef __LINKMODE_H #define __LINKMODE_H #include <linux/bitmap.h> #include <linux/ethtool.h> #include <uapi/linux/ethtool.h> static inline void linkmode_zero(unsigned long *dst) { bitmap_zero(dst, __ETHTOOL_LINK_MODE_MASK_NBITS); } static inline void linkmode_copy(unsigned long *dst, const unsigned long *src) { bitmap_copy(dst, src, __ETHTOOL_LINK_MODE_MASK_NBITS); } static inline void linkmode_and(unsigned long *dst, const unsigned long *a, const unsigned long *b) { bitmap_and(dst, a, b, __ETHTOOL_LINK_MODE_MASK_NBITS); } static inline void linkmode_or(unsigned long *dst, const unsigned long *a, const unsigned long *b) { bitmap_or(dst, a, b, __ETHTOOL_LINK_MODE_MASK_NBITS); } static inline bool linkmode_empty(const unsigned long *src) { return bitmap_empty(src, __ETHTOOL_LINK_MODE_MASK_NBITS); } static inline int linkmode_andnot(unsigned long *dst, const unsigned long *src1, const unsigned long *src2) { return bitmap_andnot(dst, src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS); } static inline void linkmode_set_bit(int nr, volatile unsigned long *addr) { __set_bit(nr, addr); } static inline void linkmode_set_bit_array(const int *array, int array_size, unsigned long *addr) { int i; for (i = 0; i < array_size; i++) linkmode_set_bit(array[i], addr); } static inline void linkmode_clear_bit(int nr, volatile unsigned long *addr) { __clear_bit(nr, addr); } static inline void linkmode_mod_bit(int nr, volatile unsigned long *addr, int set) { if (set) linkmode_set_bit(nr, addr); else linkmode_clear_bit(nr, addr); } static inline void linkmode_change_bit(int nr, volatile unsigned long *addr) { __change_bit(nr, addr); } static inline int linkmode_test_bit(int nr, volatile unsigned long *addr) { return test_bit(nr, addr); } static inline int linkmode_equal(const unsigned long *src1, const unsigned long *src2) { return bitmap_equal(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS); } #endif /* __LINKMODE_H */ input-polldev.h 0000644 00000004032 14722070374 0007524 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ #ifndef _INPUT_POLLDEV_H #define _INPUT_POLLDEV_H /* * Copyright (c) 2007 Dmitry Torokhov */ #include <linux/input.h> #include <linux/workqueue.h> /** * struct input_polled_dev - simple polled input device * @private: private driver data. * @open: driver-supplied method that prepares device for polling * (enabled the device and maybe flushes device state). * @close: driver-supplied method that is called when device is no * longer being polled. Used to put device into low power mode. * @poll: driver-supplied method that polls the device and posts * input events (mandatory). * @poll_interval: specifies how often the poll() method should be called. * Defaults to 500 msec unless overridden when registering the device. * @poll_interval_max: specifies upper bound for the poll interval. * Defaults to the initial value of @poll_interval. * @poll_interval_min: specifies lower bound for the poll interval. * Defaults to 0. * @input: input device structure associated with the polled device. * Must be properly initialized by the driver (id, name, phys, bits). * * Polled input device provides a skeleton for supporting simple input * devices that do not raise interrupts but have to be periodically * scanned or polled to detect changes in their state. */ struct input_polled_dev { void *private; void (*open)(struct input_polled_dev *dev); void (*close)(struct input_polled_dev *dev); void (*poll)(struct input_polled_dev *dev); unsigned int poll_interval; /* msec */ unsigned int poll_interval_max; /* msec */ unsigned int poll_interval_min; /* msec */ struct input_dev *input; /* private: */ struct delayed_work work; bool devres_managed; }; struct input_polled_dev *input_allocate_polled_device(void); struct input_polled_dev *devm_input_allocate_polled_device(struct device *dev); void input_free_polled_device(struct input_polled_dev *dev); int input_register_polled_device(struct input_polled_dev *dev); void input_unregister_polled_device(struct input_polled_dev *dev); #endif pstore_ram.h 0000644 00000007502 14722070374 0007102 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2010 Marco Stornelli <marco.stornelli@gmail.com> * Copyright (C) 2011 Kees Cook <keescook@chromium.org> * Copyright (C) 2011 Google, Inc. */ #ifndef __LINUX_PSTORE_RAM_H__ #define __LINUX_PSTORE_RAM_H__ #include <linux/compiler.h> #include <linux/device.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/pstore.h> #include <linux/types.h> /* * Choose whether access to the RAM zone requires locking or not. If a zone * can be written to from different CPUs like with ftrace for example, then * PRZ_FLAG_NO_LOCK is used. For all other cases, locking is required. */ #define PRZ_FLAG_NO_LOCK BIT(0) /* * If a PRZ should only have a single-boot lifetime, this marks it as * getting wiped after its contents get copied out after boot. */ #define PRZ_FLAG_ZAP_OLD BIT(1) struct persistent_ram_buffer; struct rs_control; struct persistent_ram_ecc_info { int block_size; int ecc_size; int symsize; int poly; uint16_t *par; }; /** * struct persistent_ram_zone - Details of a persistent RAM zone (PRZ) * used as a pstore backend * * @paddr: physical address of the mapped RAM area * @size: size of mapping * @label: unique name of this PRZ * @type: frontend type for this PRZ * @flags: holds PRZ_FLAGS_* bits * * @buffer_lock: * locks access to @buffer "size" bytes and "start" offset * @buffer: * pointer to actual RAM area managed by this PRZ * @buffer_size: * bytes in @buffer->data (not including any trailing ECC bytes) * * @par_buffer: * pointer into @buffer->data containing ECC bytes for @buffer->data * @par_header: * pointer into @buffer->data containing ECC bytes for @buffer header * (i.e. all fields up to @data) * @rs_decoder: * RSLIB instance for doing ECC calculations * @corrected_bytes: * ECC corrected bytes accounting since boot * @bad_blocks: * ECC uncorrectable bytes accounting since boot * @ecc_info: * ECC configuration details * * @old_log: * saved copy of @buffer->data prior to most recent wipe * @old_log_size: * bytes contained in @old_log * */ struct persistent_ram_zone { phys_addr_t paddr; size_t size; void *vaddr; char *label; enum pstore_type_id type; u32 flags; raw_spinlock_t buffer_lock; struct persistent_ram_buffer *buffer; size_t buffer_size; char *par_buffer; char *par_header; struct rs_control *rs_decoder; int corrected_bytes; int bad_blocks; struct persistent_ram_ecc_info ecc_info; char *old_log; size_t old_log_size; }; struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, u32 sig, struct persistent_ram_ecc_info *ecc_info, unsigned int memtype, u32 flags, char *label); void persistent_ram_free(struct persistent_ram_zone *prz); void persistent_ram_zap(struct persistent_ram_zone *prz); int persistent_ram_write(struct persistent_ram_zone *prz, const void *s, unsigned int count); int persistent_ram_write_user(struct persistent_ram_zone *prz, const void __user *s, unsigned int count); void persistent_ram_save_old(struct persistent_ram_zone *prz); size_t persistent_ram_old_size(struct persistent_ram_zone *prz); void *persistent_ram_old(struct persistent_ram_zone *prz); void persistent_ram_free_old(struct persistent_ram_zone *prz); ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz, char *str, size_t len); /* * Ramoops platform data * @mem_size memory size for ramoops * @mem_address physical memory address to contain ramoops */ #define RAMOOPS_FLAG_FTRACE_PER_CPU BIT(0) struct ramoops_platform_data { unsigned long mem_size; phys_addr_t mem_address; unsigned int mem_type; unsigned long record_size; unsigned long console_size; unsigned long ftrace_size; unsigned long pmsg_size; int dump_oops; u32 flags; struct persistent_ram_ecc_info ecc_info; }; #endif atm_suni.h 0000644 00000000375 14722070374 0006547 0 ustar 00 /* atm_suni.h - Driver-specific declarations of the SUNI driver (for use by driver-specific utilities) */ /* Written 1998,2000 by Werner Almesberger, EPFL ICA */ #ifndef LINUX_ATM_SUNI_H #define LINUX_ATM_SUNI_H /* everything obsoleted */ #endif serdev.h 0000644 00000022716 14722070374 0006223 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2016-2017 Linaro Ltd., Rob Herring <robh@kernel.org> */ #ifndef _LINUX_SERDEV_H #define _LINUX_SERDEV_H #include <linux/types.h> #include <linux/device.h> #include <linux/termios.h> #include <linux/delay.h> struct serdev_controller; struct serdev_device; /* * serdev device structures */ /** * struct serdev_device_ops - Callback operations for a serdev device * @receive_buf: Function called with data received from device; * returns number of bytes accepted; may sleep. * @write_wakeup: Function called when ready to transmit more data; must * not sleep. */ struct serdev_device_ops { int (*receive_buf)(struct serdev_device *, const unsigned char *, size_t); void (*write_wakeup)(struct serdev_device *); }; /** * struct serdev_device - Basic representation of an serdev device * @dev: Driver model representation of the device. * @nr: Device number on serdev bus. * @ctrl: serdev controller managing this device. * @ops: Device operations. * @write_comp Completion used by serdev_device_write() internally * @write_lock Lock to serialize access when writing data */ struct serdev_device { struct device dev; int nr; struct serdev_controller *ctrl; const struct serdev_device_ops *ops; struct completion write_comp; struct mutex write_lock; }; static inline struct serdev_device *to_serdev_device(struct device *d) { return container_of(d, struct serdev_device, dev); } /** * struct serdev_device_driver - serdev slave device driver * @driver: serdev device drivers should initialize name field of this * structure. * @probe: binds this driver to a serdev device. * @remove: unbinds this driver from the serdev device. */ struct serdev_device_driver { struct device_driver driver; int (*probe)(struct serdev_device *); void (*remove)(struct serdev_device *); }; static inline struct serdev_device_driver *to_serdev_device_driver(struct device_driver *d) { return container_of(d, struct serdev_device_driver, driver); } enum serdev_parity { SERDEV_PARITY_NONE, SERDEV_PARITY_EVEN, SERDEV_PARITY_ODD, }; /* * serdev controller structures */ struct serdev_controller_ops { int (*write_buf)(struct serdev_controller *, const unsigned char *, size_t); void (*write_flush)(struct serdev_controller *); int (*write_room)(struct serdev_controller *); int (*open)(struct serdev_controller *); void (*close)(struct serdev_controller *); void (*set_flow_control)(struct serdev_controller *, bool); int (*set_parity)(struct serdev_controller *, enum serdev_parity); unsigned int (*set_baudrate)(struct serdev_controller *, unsigned int); void (*wait_until_sent)(struct serdev_controller *, long); int (*get_tiocm)(struct serdev_controller *); int (*set_tiocm)(struct serdev_controller *, unsigned int, unsigned int); }; /** * struct serdev_controller - interface to the serdev controller * @dev: Driver model representation of the device. * @nr: number identifier for this controller/bus. * @serdev: Pointer to slave device for this controller. * @ops: Controller operations. */ struct serdev_controller { struct device dev; unsigned int nr; struct serdev_device *serdev; const struct serdev_controller_ops *ops; }; static inline struct serdev_controller *to_serdev_controller(struct device *d) { return container_of(d, struct serdev_controller, dev); } static inline void *serdev_device_get_drvdata(const struct serdev_device *serdev) { return dev_get_drvdata(&serdev->dev); } static inline void serdev_device_set_drvdata(struct serdev_device *serdev, void *data) { dev_set_drvdata(&serdev->dev, data); } /** * serdev_device_put() - decrement serdev device refcount * @serdev serdev device. */ static inline void serdev_device_put(struct serdev_device *serdev) { if (serdev) put_device(&serdev->dev); } static inline void serdev_device_set_client_ops(struct serdev_device *serdev, const struct serdev_device_ops *ops) { serdev->ops = ops; } static inline void *serdev_controller_get_drvdata(const struct serdev_controller *ctrl) { return ctrl ? dev_get_drvdata(&ctrl->dev) : NULL; } static inline void serdev_controller_set_drvdata(struct serdev_controller *ctrl, void *data) { dev_set_drvdata(&ctrl->dev, data); } /** * serdev_controller_put() - decrement controller refcount * @ctrl serdev controller. */ static inline void serdev_controller_put(struct serdev_controller *ctrl) { if (ctrl) put_device(&ctrl->dev); } struct serdev_device *serdev_device_alloc(struct serdev_controller *); int serdev_device_add(struct serdev_device *); void serdev_device_remove(struct serdev_device *); struct serdev_controller *serdev_controller_alloc(struct device *, size_t); int serdev_controller_add(struct serdev_controller *); void serdev_controller_remove(struct serdev_controller *); static inline void serdev_controller_write_wakeup(struct serdev_controller *ctrl) { struct serdev_device *serdev = ctrl->serdev; if (!serdev || !serdev->ops->write_wakeup) return; serdev->ops->write_wakeup(serdev); } static inline int serdev_controller_receive_buf(struct serdev_controller *ctrl, const unsigned char *data, size_t count) { struct serdev_device *serdev = ctrl->serdev; if (!serdev || !serdev->ops->receive_buf) return 0; return serdev->ops->receive_buf(serdev, data, count); } #if IS_ENABLED(CONFIG_SERIAL_DEV_BUS) int serdev_device_open(struct serdev_device *); void serdev_device_close(struct serdev_device *); int devm_serdev_device_open(struct device *, struct serdev_device *); unsigned int serdev_device_set_baudrate(struct serdev_device *, unsigned int); void serdev_device_set_flow_control(struct serdev_device *, bool); int serdev_device_write_buf(struct serdev_device *, const unsigned char *, size_t); void serdev_device_wait_until_sent(struct serdev_device *, long); int serdev_device_get_tiocm(struct serdev_device *); int serdev_device_set_tiocm(struct serdev_device *, int, int); void serdev_device_write_wakeup(struct serdev_device *); int serdev_device_write(struct serdev_device *, const unsigned char *, size_t, long); void serdev_device_write_flush(struct serdev_device *); int serdev_device_write_room(struct serdev_device *); /* * serdev device driver functions */ int __serdev_device_driver_register(struct serdev_device_driver *, struct module *); #define serdev_device_driver_register(sdrv) \ __serdev_device_driver_register(sdrv, THIS_MODULE) /** * serdev_device_driver_unregister() - unregister an serdev client driver * @sdrv: the driver to unregister */ static inline void serdev_device_driver_unregister(struct serdev_device_driver *sdrv) { if (sdrv) driver_unregister(&sdrv->driver); } #define module_serdev_device_driver(__serdev_device_driver) \ module_driver(__serdev_device_driver, serdev_device_driver_register, \ serdev_device_driver_unregister) #else static inline int serdev_device_open(struct serdev_device *sdev) { return -ENODEV; } static inline void serdev_device_close(struct serdev_device *sdev) {} static inline unsigned int serdev_device_set_baudrate(struct serdev_device *sdev, unsigned int baudrate) { return 0; } static inline void serdev_device_set_flow_control(struct serdev_device *sdev, bool enable) {} static inline int serdev_device_write_buf(struct serdev_device *serdev, const unsigned char *buf, size_t count) { return -ENODEV; } static inline void serdev_device_wait_until_sent(struct serdev_device *sdev, long timeout) {} static inline int serdev_device_get_tiocm(struct serdev_device *serdev) { return -ENOTSUPP; } static inline int serdev_device_set_tiocm(struct serdev_device *serdev, int set, int clear) { return -ENOTSUPP; } static inline int serdev_device_write(struct serdev_device *sdev, const unsigned char *buf, size_t count, unsigned long timeout) { return -ENODEV; } static inline void serdev_device_write_flush(struct serdev_device *sdev) {} static inline int serdev_device_write_room(struct serdev_device *sdev) { return 0; } #define serdev_device_driver_register(x) #define serdev_device_driver_unregister(x) #endif /* CONFIG_SERIAL_DEV_BUS */ static inline bool serdev_device_get_cts(struct serdev_device *serdev) { int status = serdev_device_get_tiocm(serdev); return !!(status & TIOCM_CTS); } static inline int serdev_device_wait_for_cts(struct serdev_device *serdev, bool state, int timeout_ms) { unsigned long timeout; bool signal; timeout = jiffies + msecs_to_jiffies(timeout_ms); while (time_is_after_jiffies(timeout)) { signal = serdev_device_get_cts(serdev); if (signal == state) return 0; usleep_range(1000, 2000); } return -ETIMEDOUT; } static inline int serdev_device_set_rts(struct serdev_device *serdev, bool enable) { if (enable) return serdev_device_set_tiocm(serdev, TIOCM_RTS, 0); else return serdev_device_set_tiocm(serdev, 0, TIOCM_RTS); } int serdev_device_set_parity(struct serdev_device *serdev, enum serdev_parity parity); /* * serdev hooks into TTY core */ struct tty_port; struct tty_driver; #ifdef CONFIG_SERIAL_DEV_CTRL_TTYPORT struct device *serdev_tty_port_register(struct tty_port *port, struct device *parent, struct tty_driver *drv, int idx); int serdev_tty_port_unregister(struct tty_port *port); #else static inline struct device *serdev_tty_port_register(struct tty_port *port, struct device *parent, struct tty_driver *drv, int idx) { return ERR_PTR(-ENODEV); } static inline int serdev_tty_port_unregister(struct tty_port *port) { return -ENODEV; } #endif /* CONFIG_SERIAL_DEV_CTRL_TTYPORT */ #endif /*_LINUX_SERDEV_H */ dmapool.h 0000644 00000003450 14722070374 0006360 0 ustar 00 /* * include/linux/dmapool.h * * Allocation pools for DMAable (coherent) memory. * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #ifndef LINUX_DMAPOOL_H #define LINUX_DMAPOOL_H #include <linux/scatterlist.h> #include <asm/io.h> struct device; #ifdef CONFIG_HAS_DMA struct dma_pool *dma_pool_create(const char *name, struct device *dev, size_t size, size_t align, size_t allocation); void dma_pool_destroy(struct dma_pool *pool); void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle); void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr); /* * Managed DMA pool */ struct dma_pool *dmam_pool_create(const char *name, struct device *dev, size_t size, size_t align, size_t allocation); void dmam_pool_destroy(struct dma_pool *pool); #else /* !CONFIG_HAS_DMA */ static inline struct dma_pool *dma_pool_create(const char *name, struct device *dev, size_t size, size_t align, size_t allocation) { return NULL; } static inline void dma_pool_destroy(struct dma_pool *pool) { } static inline void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle) { return NULL; } static inline void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr) { } static inline struct dma_pool *dmam_pool_create(const char *name, struct device *dev, size_t size, size_t align, size_t allocation) { return NULL; } static inline void dmam_pool_destroy(struct dma_pool *pool) { } #endif /* !CONFIG_HAS_DMA */ static inline void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle) { return dma_pool_alloc(pool, mem_flags | __GFP_ZERO, handle); } #endif psp-sev.h 0000644 00000042045 14722070374 0006325 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * AMD Secure Encrypted Virtualization (SEV) driver interface * * Copyright (C) 2016-2017 Advanced Micro Devices, Inc. * * Author: Brijesh Singh <brijesh.singh@amd.com> * * SEV API spec is available at https://developer.amd.com/sev */ #ifndef __PSP_SEV_H__ #define __PSP_SEV_H__ #include <uapi/linux/psp-sev.h> #ifdef CONFIG_X86 #include <linux/mem_encrypt.h> #define __psp_pa(x) __sme_pa(x) #else #define __psp_pa(x) __pa(x) #endif #define SEV_FW_BLOB_MAX_SIZE 0x4000 /* 16KB */ /** * SEV platform state */ enum sev_state { SEV_STATE_UNINIT = 0x0, SEV_STATE_INIT = 0x1, SEV_STATE_WORKING = 0x2, SEV_STATE_MAX }; /** * SEV platform and guest management commands */ enum sev_cmd { /* platform commands */ SEV_CMD_INIT = 0x001, SEV_CMD_SHUTDOWN = 0x002, SEV_CMD_FACTORY_RESET = 0x003, SEV_CMD_PLATFORM_STATUS = 0x004, SEV_CMD_PEK_GEN = 0x005, SEV_CMD_PEK_CSR = 0x006, SEV_CMD_PEK_CERT_IMPORT = 0x007, SEV_CMD_PDH_CERT_EXPORT = 0x008, SEV_CMD_PDH_GEN = 0x009, SEV_CMD_DF_FLUSH = 0x00A, SEV_CMD_DOWNLOAD_FIRMWARE = 0x00B, SEV_CMD_GET_ID = 0x00C, /* Guest commands */ SEV_CMD_DECOMMISSION = 0x020, SEV_CMD_ACTIVATE = 0x021, SEV_CMD_DEACTIVATE = 0x022, SEV_CMD_GUEST_STATUS = 0x023, /* Guest launch commands */ SEV_CMD_LAUNCH_START = 0x030, SEV_CMD_LAUNCH_UPDATE_DATA = 0x031, SEV_CMD_LAUNCH_UPDATE_VMSA = 0x032, SEV_CMD_LAUNCH_MEASURE = 0x033, SEV_CMD_LAUNCH_UPDATE_SECRET = 0x034, SEV_CMD_LAUNCH_FINISH = 0x035, /* Guest migration commands (outgoing) */ SEV_CMD_SEND_START = 0x040, SEV_CMD_SEND_UPDATE_DATA = 0x041, SEV_CMD_SEND_UPDATE_VMSA = 0x042, SEV_CMD_SEND_FINISH = 0x043, /* Guest migration commands (incoming) */ SEV_CMD_RECEIVE_START = 0x050, SEV_CMD_RECEIVE_UPDATE_DATA = 0x051, SEV_CMD_RECEIVE_UPDATE_VMSA = 0x052, SEV_CMD_RECEIVE_FINISH = 0x053, /* Guest debug commands */ SEV_CMD_DBG_DECRYPT = 0x060, SEV_CMD_DBG_ENCRYPT = 0x061, SEV_CMD_MAX, }; /** * struct sev_data_init - INIT command parameters * * @flags: processing flags * @tmr_address: system physical address used for SEV-ES * @tmr_len: len of tmr_address */ struct sev_data_init { u32 flags; /* In */ u32 reserved; /* In */ u64 tmr_address; /* In */ u32 tmr_len; /* In */ } __packed; /** * struct sev_data_pek_csr - PEK_CSR command parameters * * @address: PEK certificate chain * @len: len of certificate */ struct sev_data_pek_csr { u64 address; /* In */ u32 len; /* In/Out */ } __packed; /** * struct sev_data_cert_import - PEK_CERT_IMPORT command parameters * * @pek_address: PEK certificate chain * @pek_len: len of PEK certificate * @oca_address: OCA certificate chain * @oca_len: len of OCA certificate */ struct sev_data_pek_cert_import { u64 pek_cert_address; /* In */ u32 pek_cert_len; /* In */ u32 reserved; /* In */ u64 oca_cert_address; /* In */ u32 oca_cert_len; /* In */ } __packed; /** * struct sev_data_download_firmware - DOWNLOAD_FIRMWARE command parameters * * @address: physical address of firmware image * @len: len of the firmware image */ struct sev_data_download_firmware { u64 address; /* In */ u32 len; /* In */ } __packed; /** * struct sev_data_get_id - GET_ID command parameters * * @address: physical address of region to place unique CPU ID(s) * @len: len of the region */ struct sev_data_get_id { u64 address; /* In */ u32 len; /* In/Out */ } __packed; /** * struct sev_data_pdh_cert_export - PDH_CERT_EXPORT command parameters * * @pdh_address: PDH certificate address * @pdh_len: len of PDH certificate * @cert_chain_address: PDH certificate chain * @cert_chain_len: len of PDH certificate chain */ struct sev_data_pdh_cert_export { u64 pdh_cert_address; /* In */ u32 pdh_cert_len; /* In/Out */ u32 reserved; /* In */ u64 cert_chain_address; /* In */ u32 cert_chain_len; /* In/Out */ } __packed; /** * struct sev_data_decommission - DECOMMISSION command parameters * * @handle: handle of the VM to decommission */ struct sev_data_decommission { u32 handle; /* In */ } __packed; /** * struct sev_data_activate - ACTIVATE command parameters * * @handle: handle of the VM to activate * @asid: asid assigned to the VM */ struct sev_data_activate { u32 handle; /* In */ u32 asid; /* In */ } __packed; /** * struct sev_data_deactivate - DEACTIVATE command parameters * * @handle: handle of the VM to deactivate */ struct sev_data_deactivate { u32 handle; /* In */ } __packed; /** * struct sev_data_guest_status - SEV GUEST_STATUS command parameters * * @handle: handle of the VM to retrieve status * @policy: policy information for the VM * @asid: current ASID of the VM * @state: current state of the VM */ struct sev_data_guest_status { u32 handle; /* In */ u32 policy; /* Out */ u32 asid; /* Out */ u8 state; /* Out */ } __packed; /** * struct sev_data_launch_start - LAUNCH_START command parameters * * @handle: handle assigned to the VM * @policy: guest launch policy * @dh_cert_address: physical address of DH certificate blob * @dh_cert_len: len of DH certificate blob * @session_address: physical address of session parameters * @session_len: len of session parameters */ struct sev_data_launch_start { u32 handle; /* In/Out */ u32 policy; /* In */ u64 dh_cert_address; /* In */ u32 dh_cert_len; /* In */ u32 reserved; /* In */ u64 session_address; /* In */ u32 session_len; /* In */ } __packed; /** * struct sev_data_launch_update_data - LAUNCH_UPDATE_DATA command parameter * * @handle: handle of the VM to update * @len: len of memory to be encrypted * @address: physical address of memory region to encrypt */ struct sev_data_launch_update_data { u32 handle; /* In */ u32 reserved; u64 address; /* In */ u32 len; /* In */ } __packed; /** * struct sev_data_launch_update_vmsa - LAUNCH_UPDATE_VMSA command * * @handle: handle of the VM * @address: physical address of memory region to encrypt * @len: len of memory region to encrypt */ struct sev_data_launch_update_vmsa { u32 handle; /* In */ u32 reserved; u64 address; /* In */ u32 len; /* In */ } __packed; /** * struct sev_data_launch_measure - LAUNCH_MEASURE command parameters * * @handle: handle of the VM to process * @address: physical address containing the measurement blob * @len: len of measurement blob */ struct sev_data_launch_measure { u32 handle; /* In */ u32 reserved; u64 address; /* In */ u32 len; /* In/Out */ } __packed; /** * struct sev_data_launch_secret - LAUNCH_SECRET command parameters * * @handle: handle of the VM to process * @hdr_address: physical address containing the packet header * @hdr_len: len of packet header * @guest_address: system physical address of guest memory region * @guest_len: len of guest_paddr * @trans_address: physical address of transport memory buffer * @trans_len: len of transport memory buffer */ struct sev_data_launch_secret { u32 handle; /* In */ u32 reserved1; u64 hdr_address; /* In */ u32 hdr_len; /* In */ u32 reserved2; u64 guest_address; /* In */ u32 guest_len; /* In */ u32 reserved3; u64 trans_address; /* In */ u32 trans_len; /* In */ } __packed; /** * struct sev_data_launch_finish - LAUNCH_FINISH command parameters * * @handle: handle of the VM to process */ struct sev_data_launch_finish { u32 handle; /* In */ } __packed; /** * struct sev_data_send_start - SEND_START command parameters * * @handle: handle of the VM to process * @policy: policy information for the VM * @pdh_cert_address: physical address containing PDH certificate * @pdh_cert_len: len of PDH certificate * @plat_certs_address: physical address containing platform certificate * @plat_certs_len: len of platform certificate * @amd_certs_address: physical address containing AMD certificate * @amd_certs_len: len of AMD certificate * @session_address: physical address containing Session data * @session_len: len of session data */ struct sev_data_send_start { u32 handle; /* In */ u32 policy; /* Out */ u64 pdh_cert_address; /* In */ u32 pdh_cert_len; /* In */ u32 reserved1; u64 plat_cert_address; /* In */ u32 plat_cert_len; /* In */ u32 reserved2; u64 amd_cert_address; /* In */ u32 amd_cert_len; /* In */ u32 reserved3; u64 session_address; /* In */ u32 session_len; /* In/Out */ } __packed; /** * struct sev_data_send_update - SEND_UPDATE_DATA command * * @handle: handle of the VM to process * @hdr_address: physical address containing packet header * @hdr_len: len of packet header * @guest_address: physical address of guest memory region to send * @guest_len: len of guest memory region to send * @trans_address: physical address of host memory region * @trans_len: len of host memory region */ struct sev_data_send_update_data { u32 handle; /* In */ u32 reserved1; u64 hdr_address; /* In */ u32 hdr_len; /* In/Out */ u32 reserved2; u64 guest_address; /* In */ u32 guest_len; /* In */ u32 reserved3; u64 trans_address; /* In */ u32 trans_len; /* In */ } __packed; /** * struct sev_data_send_update - SEND_UPDATE_VMSA command * * @handle: handle of the VM to process * @hdr_address: physical address containing packet header * @hdr_len: len of packet header * @guest_address: physical address of guest memory region to send * @guest_len: len of guest memory region to send * @trans_address: physical address of host memory region * @trans_len: len of host memory region */ struct sev_data_send_update_vmsa { u32 handle; /* In */ u64 hdr_address; /* In */ u32 hdr_len; /* In/Out */ u32 reserved2; u64 guest_address; /* In */ u32 guest_len; /* In */ u32 reserved3; u64 trans_address; /* In */ u32 trans_len; /* In */ } __packed; /** * struct sev_data_send_finish - SEND_FINISH command parameters * * @handle: handle of the VM to process */ struct sev_data_send_finish { u32 handle; /* In */ } __packed; /** * struct sev_data_receive_start - RECEIVE_START command parameters * * @handle: handle of the VM to perform receive operation * @pdh_cert_address: system physical address containing PDH certificate blob * @pdh_cert_len: len of PDH certificate blob * @session_address: system physical address containing session blob * @session_len: len of session blob */ struct sev_data_receive_start { u32 handle; /* In/Out */ u32 policy; /* In */ u64 pdh_cert_address; /* In */ u32 pdh_cert_len; /* In */ u32 reserved1; u64 session_address; /* In */ u32 session_len; /* In */ } __packed; /** * struct sev_data_receive_update_data - RECEIVE_UPDATE_DATA command parameters * * @handle: handle of the VM to update * @hdr_address: physical address containing packet header blob * @hdr_len: len of packet header * @guest_address: system physical address of guest memory region * @guest_len: len of guest memory region * @trans_address: system physical address of transport buffer * @trans_len: len of transport buffer */ struct sev_data_receive_update_data { u32 handle; /* In */ u32 reserved1; u64 hdr_address; /* In */ u32 hdr_len; /* In */ u32 reserved2; u64 guest_address; /* In */ u32 guest_len; /* In */ u32 reserved3; u64 trans_address; /* In */ u32 trans_len; /* In */ } __packed; /** * struct sev_data_receive_update_vmsa - RECEIVE_UPDATE_VMSA command parameters * * @handle: handle of the VM to update * @hdr_address: physical address containing packet header blob * @hdr_len: len of packet header * @guest_address: system physical address of guest memory region * @guest_len: len of guest memory region * @trans_address: system physical address of transport buffer * @trans_len: len of transport buffer */ struct sev_data_receive_update_vmsa { u32 handle; /* In */ u32 reserved1; u64 hdr_address; /* In */ u32 hdr_len; /* In */ u32 reserved2; u64 guest_address; /* In */ u32 guest_len; /* In */ u32 reserved3; u64 trans_address; /* In */ u32 trans_len; /* In */ } __packed; /** * struct sev_data_receive_finish - RECEIVE_FINISH command parameters * * @handle: handle of the VM to finish */ struct sev_data_receive_finish { u32 handle; /* In */ } __packed; /** * struct sev_data_dbg - DBG_ENCRYPT/DBG_DECRYPT command parameters * * @handle: handle of the VM to perform debug operation * @src_addr: source address of data to operate on * @dst_addr: destination address of data to operate on * @len: len of data to operate on */ struct sev_data_dbg { u32 handle; /* In */ u32 reserved; u64 src_addr; /* In */ u64 dst_addr; /* In */ u32 len; /* In */ } __packed; #ifdef CONFIG_CRYPTO_DEV_SP_PSP /** * sev_platform_init - perform SEV INIT command * * @error: SEV command return code * * Returns: * 0 if the SEV successfully processed the command * -%ENODEV if the SEV device is not available * -%ENOTSUPP if the SEV does not support SEV * -%ETIMEDOUT if the SEV command timed out * -%EIO if the SEV returned a non-zero return code */ int sev_platform_init(int *error); /** * sev_platform_status - perform SEV PLATFORM_STATUS command * * @status: sev_user_data_status structure to be processed * @error: SEV command return code * * Returns: * 0 if the SEV successfully processed the command * -%ENODEV if the SEV device is not available * -%ENOTSUPP if the SEV does not support SEV * -%ETIMEDOUT if the SEV command timed out * -%EIO if the SEV returned a non-zero return code */ int sev_platform_status(struct sev_user_data_status *status, int *error); /** * sev_issue_cmd_external_user - issue SEV command by other driver with a file * handle. * * This function can be used by other drivers to issue a SEV command on * behalf of userspace. The caller must pass a valid SEV file descriptor * so that we know that it has access to SEV device. * * @filep - SEV device file pointer * @cmd - command to issue * @data - command buffer * @error: SEV command return code * * Returns: * 0 if the SEV successfully processed the command * -%ENODEV if the SEV device is not available * -%ENOTSUPP if the SEV does not support SEV * -%ETIMEDOUT if the SEV command timed out * -%EIO if the SEV returned a non-zero return code * -%EINVAL if the SEV file descriptor is not valid */ int sev_issue_cmd_external_user(struct file *filep, unsigned int id, void *data, int *error); /** * sev_guest_deactivate - perform SEV DEACTIVATE command * * @deactivate: sev_data_deactivate structure to be processed * @sev_ret: sev command return code * * Returns: * 0 if the sev successfully processed the command * -%ENODEV if the sev device is not available * -%ENOTSUPP if the sev does not support SEV * -%ETIMEDOUT if the sev command timed out * -%EIO if the sev returned a non-zero return code */ int sev_guest_deactivate(struct sev_data_deactivate *data, int *error); /** * sev_guest_activate - perform SEV ACTIVATE command * * @activate: sev_data_activate structure to be processed * @sev_ret: sev command return code * * Returns: * 0 if the sev successfully processed the command * -%ENODEV if the sev device is not available * -%ENOTSUPP if the sev does not support SEV * -%ETIMEDOUT if the sev command timed out * -%EIO if the sev returned a non-zero return code */ int sev_guest_activate(struct sev_data_activate *data, int *error); /** * sev_guest_df_flush - perform SEV DF_FLUSH command * * @sev_ret: sev command return code * * Returns: * 0 if the sev successfully processed the command * -%ENODEV if the sev device is not available * -%ENOTSUPP if the sev does not support SEV * -%ETIMEDOUT if the sev command timed out * -%EIO if the sev returned a non-zero return code */ int sev_guest_df_flush(int *error); /** * sev_guest_decommission - perform SEV DECOMMISSION command * * @decommission: sev_data_decommission structure to be processed * @sev_ret: sev command return code * * Returns: * 0 if the sev successfully processed the command * -%ENODEV if the sev device is not available * -%ENOTSUPP if the sev does not support SEV * -%ETIMEDOUT if the sev command timed out * -%EIO if the sev returned a non-zero return code */ int sev_guest_decommission(struct sev_data_decommission *data, int *error); void *psp_copy_user_blob(u64 __user uaddr, u32 len); #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ static inline int sev_platform_status(struct sev_user_data_status *status, int *error) { return -ENODEV; } static inline int sev_platform_init(int *error) { return -ENODEV; } static inline int sev_guest_deactivate(struct sev_data_deactivate *data, int *error) { return -ENODEV; } static inline int sev_guest_decommission(struct sev_data_decommission *data, int *error) { return -ENODEV; } static inline int sev_guest_activate(struct sev_data_activate *data, int *error) { return -ENODEV; } static inline int sev_guest_df_flush(int *error) { return -ENODEV; } static inline int sev_issue_cmd_external_user(struct file *filep, unsigned int id, void *data, int *error) { return -ENODEV; } static inline void *psp_copy_user_blob(u64 __user uaddr, u32 len) { return ERR_PTR(-EINVAL); } #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ #endif /* __PSP_SEV_H__ */ syslog.h 0000644 00000002362 14722070374 0006246 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* Syslog internals * * Copyright 2010 Canonical, Ltd. * Author: Kees Cook <kees.cook@canonical.com> */ #ifndef _LINUX_SYSLOG_H #define _LINUX_SYSLOG_H /* Close the log. Currently a NOP. */ #define SYSLOG_ACTION_CLOSE 0 /* Open the log. Currently a NOP. */ #define SYSLOG_ACTION_OPEN 1 /* Read from the log. */ #define SYSLOG_ACTION_READ 2 /* Read all messages remaining in the ring buffer. */ #define SYSLOG_ACTION_READ_ALL 3 /* Read and clear all messages remaining in the ring buffer */ #define SYSLOG_ACTION_READ_CLEAR 4 /* Clear ring buffer. */ #define SYSLOG_ACTION_CLEAR 5 /* Disable printk's to console */ #define SYSLOG_ACTION_CONSOLE_OFF 6 /* Enable printk's to console */ #define SYSLOG_ACTION_CONSOLE_ON 7 /* Set level of messages printed to console */ #define SYSLOG_ACTION_CONSOLE_LEVEL 8 /* Return number of unread characters in the log buffer */ #define SYSLOG_ACTION_SIZE_UNREAD 9 /* Return size of the log buffer */ #define SYSLOG_ACTION_SIZE_BUFFER 10 #define SYSLOG_FROM_READER 0 #define SYSLOG_FROM_PROC 1 int do_syslog(int type, char __user *buf, int count, int source); #endif /* _LINUX_SYSLOG_H */ compiler-gcc.h 0000644 00000011627 14722070374 0007276 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_COMPILER_TYPES_H #error "Please don't include <linux/compiler-gcc.h> directly, include <linux/compiler.h> instead." #endif /* * Common definitions for all gcc versions go here. */ #define GCC_VERSION (__GNUC__ * 10000 \ + __GNUC_MINOR__ * 100 \ + __GNUC_PATCHLEVEL__) #if GCC_VERSION < 40600 # error Sorry, your compiler is too old - please upgrade it. #elif defined(CONFIG_ARM64) && GCC_VERSION < 50100 /* * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63293 * https://lore.kernel.org/r/20210107111841.GN1551@shell.armlinux.org.uk */ # error Sorry, your version of GCC is too old - please use 5.1 or newer. #endif /* * This macro obfuscates arithmetic on a variable address so that gcc * shouldn't recognize the original var, and make assumptions about it. * * This is needed because the C standard makes it undefined to do * pointer arithmetic on "objects" outside their boundaries and the * gcc optimizers assume this is the case. In particular they * assume such arithmetic does not wrap. * * A miscompilation has been observed because of this on PPC. * To work around it we hide the relationship of the pointer and the object * using this macro. * * Versions of the ppc64 compiler before 4.1 had a bug where use of * RELOC_HIDE could trash r30. The bug can be worked around by changing * the inline assembly constraint from =g to =r, in this particular * case either is valid. */ #define RELOC_HIDE(ptr, off) \ ({ \ unsigned long __ptr; \ __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \ (typeof(ptr)) (__ptr + (off)); \ }) /* * A trick to suppress uninitialized variable warning without generating any * code */ #define uninitialized_var(x) x = x #ifdef CONFIG_RETPOLINE #define __noretpoline __attribute__((__indirect_branch__("keep"))) #endif #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) #define __compiletime_object_size(obj) __builtin_object_size(obj, 0) #define __compiletime_warning(message) __attribute__((__warning__(message))) #define __compiletime_error(message) __attribute__((__error__(message))) #if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__) #define __latent_entropy __attribute__((latent_entropy)) #endif /* * calling noreturn functions, __builtin_unreachable() and __builtin_trap() * confuse the stack allocation in gcc, leading to overly large stack * frames, see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82365 * * Adding an empty inline assembly before it works around the problem */ #define barrier_before_unreachable() asm volatile("") /* * Mark a position in code as unreachable. This can be used to * suppress control flow warnings after asm blocks that transfer * control elsewhere. */ #define unreachable() \ do { \ annotate_unreachable(); \ barrier_before_unreachable(); \ __builtin_unreachable(); \ } while (0) #if defined(RANDSTRUCT_PLUGIN) && !defined(__CHECKER__) #define __randomize_layout __attribute__((randomize_layout)) #define __no_randomize_layout __attribute__((no_randomize_layout)) /* This anon struct can add padding, so only enable it under randstruct. */ #define randomized_struct_fields_start struct { #define randomized_struct_fields_end } __randomize_layout; #endif /* * GCC 'asm goto' miscompiles certain code sequences: * * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670 * * Work it around via a compiler barrier quirk suggested by Jakub Jelinek. * * (asm goto is automatically volatile - the naming reflects this.) */ #define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0) /* * sparse (__CHECKER__) pretends to be gcc, but can't do constant * folding in __builtin_bswap*() (yet), so don't set these for it. */ #if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) && !defined(__CHECKER__) #define __HAVE_BUILTIN_BSWAP32__ #define __HAVE_BUILTIN_BSWAP64__ #if GCC_VERSION >= 40800 #define __HAVE_BUILTIN_BSWAP16__ #endif #endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */ #if GCC_VERSION >= 70000 #define KASAN_ABI_VERSION 5 #elif GCC_VERSION >= 50000 #define KASAN_ABI_VERSION 4 #elif GCC_VERSION >= 40902 #define KASAN_ABI_VERSION 3 #endif #if __has_attribute(__no_sanitize_address__) #define __no_sanitize_address __attribute__((no_sanitize_address)) #else #define __no_sanitize_address #endif #if GCC_VERSION >= 50100 #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 #endif /* * Turn individual warnings and errors on and off locally, depending * on version. */ #define __diag_GCC(version, severity, s) \ __diag_GCC_ ## version(__diag_GCC_ ## severity s) /* Severity used in pragma directives */ #define __diag_GCC_ignore ignored #define __diag_GCC_warn warning #define __diag_GCC_error error #define __diag_str1(s) #s #define __diag_str(s) __diag_str1(s) #define __diag(s) _Pragma(__diag_str(GCC diagnostic s)) #if GCC_VERSION >= 80000 #define __diag_GCC_8(s) __diag(s) #else #define __diag_GCC_8(s) #endif mailbox/zynqmp-ipi-message.h 0000644 00000000766 14722070374 0012126 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_ZYNQMP_IPI_MESSAGE_H_ #define _LINUX_ZYNQMP_IPI_MESSAGE_H_ /** * struct zynqmp_ipi_message - ZynqMP IPI message structure * @len: Length of message * @data: message payload * * This is the structure for data used in mbox_send_message * the maximum length of data buffer is fixed to 32 bytes. * Client is supposed to be aware of this. */ struct zynqmp_ipi_message { size_t len; u8 data[0]; }; #endif /* _LINUX_ZYNQMP_IPI_MESSAGE_H_ */ mailbox/brcm-message.h 0000644 00000002243 14722070374 0010724 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2016 Broadcom * * Common header for Broadcom mailbox messages which is shared across * Broadcom SoCs and Broadcom mailbox client drivers. */ #ifndef _LINUX_BRCM_MESSAGE_H_ #define _LINUX_BRCM_MESSAGE_H_ #include <linux/scatterlist.h> enum brcm_message_type { BRCM_MESSAGE_UNKNOWN = 0, BRCM_MESSAGE_BATCH, BRCM_MESSAGE_SPU, BRCM_MESSAGE_SBA, BRCM_MESSAGE_MAX, }; struct brcm_sba_command { u64 cmd; u64 *cmd_dma; dma_addr_t cmd_dma_addr; #define BRCM_SBA_CMD_TYPE_A BIT(0) #define BRCM_SBA_CMD_TYPE_B BIT(1) #define BRCM_SBA_CMD_TYPE_C BIT(2) #define BRCM_SBA_CMD_HAS_RESP BIT(3) #define BRCM_SBA_CMD_HAS_OUTPUT BIT(4) u64 flags; dma_addr_t resp; size_t resp_len; dma_addr_t data; size_t data_len; }; struct brcm_message { enum brcm_message_type type; union { struct { struct brcm_message *msgs; unsigned int msgs_queued; unsigned int msgs_count; } batch; struct { struct scatterlist *src; struct scatterlist *dst; } spu; struct { struct brcm_sba_command *cmds; unsigned int cmds_count; } sba; }; void *ctx; int error; }; #endif /* _LINUX_BRCM_MESSAGE_H_ */ mailbox/mtk-cmdq-mailbox.h 0000644 00000003232 14722070374 0011524 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2018 MediaTek Inc. * */ #ifndef __MTK_CMDQ_MAILBOX_H__ #define __MTK_CMDQ_MAILBOX_H__ #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/types.h> #define CMDQ_INST_SIZE 8 /* instruction is 64-bit */ #define CMDQ_SUBSYS_SHIFT 16 #define CMDQ_OP_CODE_SHIFT 24 #define CMDQ_JUMP_PASS CMDQ_INST_SIZE #define CMDQ_WFE_UPDATE BIT(31) #define CMDQ_WFE_WAIT BIT(15) #define CMDQ_WFE_WAIT_VALUE 0x1 /** cmdq event maximum */ #define CMDQ_MAX_EVENT 0x3ff /* * CMDQ_CODE_MASK: * set write mask * format: op mask * CMDQ_CODE_WRITE: * write value into target register * format: op subsys address value * CMDQ_CODE_JUMP: * jump by offset * format: op offset * CMDQ_CODE_WFE: * wait for event and clear * it is just clear if no wait * format: [wait] op event update:1 to_wait:1 wait:1 * [clear] op event update:1 to_wait:0 wait:0 * CMDQ_CODE_EOC: * end of command * format: op irq_flag */ enum cmdq_code { CMDQ_CODE_MASK = 0x02, CMDQ_CODE_WRITE = 0x04, CMDQ_CODE_JUMP = 0x10, CMDQ_CODE_WFE = 0x20, CMDQ_CODE_EOC = 0x40, }; enum cmdq_cb_status { CMDQ_CB_NORMAL = 0, CMDQ_CB_ERROR }; struct cmdq_cb_data { enum cmdq_cb_status sta; void *data; }; typedef void (*cmdq_async_flush_cb)(struct cmdq_cb_data data); struct cmdq_task_cb { cmdq_async_flush_cb cb; void *data; }; struct cmdq_pkt { void *va_base; dma_addr_t pa_base; size_t cmd_buf_size; /* command occupied size */ size_t buf_size; /* real buffer size */ struct cmdq_task_cb cb; struct cmdq_task_cb async_cb; void *cl; }; #endif /* __MTK_CMDQ_MAILBOX_H__ */ filter.h 0000644 00000103451 14722070374 0006214 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Linux Socket Filter Data Structures */ #ifndef __LINUX_FILTER_H__ #define __LINUX_FILTER_H__ #include <stdarg.h> #include <linux/atomic.h> #include <linux/refcount.h> #include <linux/compat.h> #include <linux/skbuff.h> #include <linux/linkage.h> #include <linux/printk.h> #include <linux/workqueue.h> #include <linux/sched.h> #include <linux/capability.h> #include <linux/cryptohash.h> #include <linux/set_memory.h> #include <linux/kallsyms.h> #include <linux/if_vlan.h> #include <linux/vmalloc.h> #include <net/sch_generic.h> #include <asm/byteorder.h> #include <uapi/linux/filter.h> #include <uapi/linux/bpf.h> struct sk_buff; struct sock; struct seccomp_data; struct bpf_prog_aux; struct xdp_rxq_info; struct xdp_buff; struct sock_reuseport; struct ctl_table; struct ctl_table_header; /* ArgX, context and stack frame pointer register positions. Note, * Arg1, Arg2, Arg3, etc are used as argument mappings of function * calls in BPF_CALL instruction. */ #define BPF_REG_ARG1 BPF_REG_1 #define BPF_REG_ARG2 BPF_REG_2 #define BPF_REG_ARG3 BPF_REG_3 #define BPF_REG_ARG4 BPF_REG_4 #define BPF_REG_ARG5 BPF_REG_5 #define BPF_REG_CTX BPF_REG_6 #define BPF_REG_FP BPF_REG_10 /* Additional register mappings for converted user programs. */ #define BPF_REG_A BPF_REG_0 #define BPF_REG_X BPF_REG_7 #define BPF_REG_TMP BPF_REG_2 /* scratch reg */ #define BPF_REG_D BPF_REG_8 /* data, callee-saved */ #define BPF_REG_H BPF_REG_9 /* hlen, callee-saved */ /* Kernel hidden auxiliary/helper register. */ #define BPF_REG_AX MAX_BPF_REG #define MAX_BPF_EXT_REG (MAX_BPF_REG + 1) #define MAX_BPF_JIT_REG MAX_BPF_EXT_REG /* unused opcode to mark special call to bpf_tail_call() helper */ #define BPF_TAIL_CALL 0xf0 /* unused opcode to mark call to interpreter with arguments */ #define BPF_CALL_ARGS 0xe0 /* unused opcode to mark speculation barrier for mitigating * Speculative Store Bypass */ #define BPF_NOSPEC 0xc0 /* As per nm, we expose JITed images as text (code) section for * kallsyms. That way, tools like perf can find it to match * addresses. */ #define BPF_SYM_ELF_TYPE 't' /* BPF program can access up to 512 bytes of stack space. */ #define MAX_BPF_STACK 512 /* Helper macros for filter block array initializers. */ /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */ #define BPF_ALU64_REG(OP, DST, SRC) \ ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = 0, \ .imm = 0 }) #define BPF_ALU32_REG(OP, DST, SRC) \ ((struct bpf_insn) { \ .code = BPF_ALU | BPF_OP(OP) | BPF_X, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = 0, \ .imm = 0 }) /* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */ #define BPF_ALU64_IMM(OP, DST, IMM) \ ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ .off = 0, \ .imm = IMM }) #define BPF_ALU32_IMM(OP, DST, IMM) \ ((struct bpf_insn) { \ .code = BPF_ALU | BPF_OP(OP) | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ .off = 0, \ .imm = IMM }) /* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */ #define BPF_ENDIAN(TYPE, DST, LEN) \ ((struct bpf_insn) { \ .code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \ .dst_reg = DST, \ .src_reg = 0, \ .off = 0, \ .imm = LEN }) /* Short form of mov, dst_reg = src_reg */ #define BPF_MOV64_REG(DST, SRC) \ ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_MOV | BPF_X, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = 0, \ .imm = 0 }) #define BPF_MOV32_REG(DST, SRC) \ ((struct bpf_insn) { \ .code = BPF_ALU | BPF_MOV | BPF_X, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = 0, \ .imm = 0 }) /* Short form of mov, dst_reg = imm32 */ #define BPF_MOV64_IMM(DST, IMM) \ ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_MOV | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ .off = 0, \ .imm = IMM }) #define BPF_MOV32_IMM(DST, IMM) \ ((struct bpf_insn) { \ .code = BPF_ALU | BPF_MOV | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ .off = 0, \ .imm = IMM }) /* Special form of mov32, used for doing explicit zero extension on dst. */ #define BPF_ZEXT_REG(DST) \ ((struct bpf_insn) { \ .code = BPF_ALU | BPF_MOV | BPF_X, \ .dst_reg = DST, \ .src_reg = DST, \ .off = 0, \ .imm = 1 }) static inline bool insn_is_zext(const struct bpf_insn *insn) { return insn->code == (BPF_ALU | BPF_MOV | BPF_X) && insn->imm == 1; } /* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */ #define BPF_LD_IMM64(DST, IMM) \ BPF_LD_IMM64_RAW(DST, 0, IMM) #define BPF_LD_IMM64_RAW(DST, SRC, IMM) \ ((struct bpf_insn) { \ .code = BPF_LD | BPF_DW | BPF_IMM, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = 0, \ .imm = (__u32) (IMM) }), \ ((struct bpf_insn) { \ .code = 0, /* zero is reserved opcode */ \ .dst_reg = 0, \ .src_reg = 0, \ .off = 0, \ .imm = ((__u64) (IMM)) >> 32 }) /* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */ #define BPF_LD_MAP_FD(DST, MAP_FD) \ BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD) /* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */ #define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \ ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \ .dst_reg = DST, \ .src_reg = SRC, \ .off = 0, \ .imm = IMM }) #define BPF_MOV32_RAW(TYPE, DST, SRC, IMM) \ ((struct bpf_insn) { \ .code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \ .dst_reg = DST, \ .src_reg = SRC, \ .off = 0, \ .imm = IMM }) /* Direct packet access, R0 = *(uint *) (skb->data + imm32) */ #define BPF_LD_ABS(SIZE, IMM) \ ((struct bpf_insn) { \ .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \ .dst_reg = 0, \ .src_reg = 0, \ .off = 0, \ .imm = IMM }) /* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */ #define BPF_LD_IND(SIZE, SRC, IMM) \ ((struct bpf_insn) { \ .code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \ .dst_reg = 0, \ .src_reg = SRC, \ .off = 0, \ .imm = IMM }) /* Memory load, dst_reg = *(uint *) (src_reg + off16) */ #define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \ ((struct bpf_insn) { \ .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = OFF, \ .imm = 0 }) /* Memory store, *(uint *) (dst_reg + off16) = src_reg */ #define BPF_STX_MEM(SIZE, DST, SRC, OFF) \ ((struct bpf_insn) { \ .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = OFF, \ .imm = 0 }) /* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */ #define BPF_STX_XADD(SIZE, DST, SRC, OFF) \ ((struct bpf_insn) { \ .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = OFF, \ .imm = 0 }) /* Memory store, *(uint *) (dst_reg + off16) = imm32 */ #define BPF_ST_MEM(SIZE, DST, OFF, IMM) \ ((struct bpf_insn) { \ .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \ .dst_reg = DST, \ .src_reg = 0, \ .off = OFF, \ .imm = IMM }) /* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */ #define BPF_JMP_REG(OP, DST, SRC, OFF) \ ((struct bpf_insn) { \ .code = BPF_JMP | BPF_OP(OP) | BPF_X, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = OFF, \ .imm = 0 }) /* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */ #define BPF_JMP_IMM(OP, DST, IMM, OFF) \ ((struct bpf_insn) { \ .code = BPF_JMP | BPF_OP(OP) | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ .off = OFF, \ .imm = IMM }) /* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */ #define BPF_JMP32_REG(OP, DST, SRC, OFF) \ ((struct bpf_insn) { \ .code = BPF_JMP32 | BPF_OP(OP) | BPF_X, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = OFF, \ .imm = 0 }) /* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */ #define BPF_JMP32_IMM(OP, DST, IMM, OFF) \ ((struct bpf_insn) { \ .code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ .off = OFF, \ .imm = IMM }) /* Unconditional jumps, goto pc + off16 */ #define BPF_JMP_A(OFF) \ ((struct bpf_insn) { \ .code = BPF_JMP | BPF_JA, \ .dst_reg = 0, \ .src_reg = 0, \ .off = OFF, \ .imm = 0 }) /* Relative call */ #define BPF_CALL_REL(TGT) \ ((struct bpf_insn) { \ .code = BPF_JMP | BPF_CALL, \ .dst_reg = 0, \ .src_reg = BPF_PSEUDO_CALL, \ .off = 0, \ .imm = TGT }) /* Function call */ #define BPF_CAST_CALL(x) \ ((u64 (*)(u64, u64, u64, u64, u64))(x)) #define BPF_EMIT_CALL(FUNC) \ ((struct bpf_insn) { \ .code = BPF_JMP | BPF_CALL, \ .dst_reg = 0, \ .src_reg = 0, \ .off = 0, \ .imm = ((FUNC) - __bpf_call_base) }) /* Raw code statement block */ #define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \ ((struct bpf_insn) { \ .code = CODE, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = OFF, \ .imm = IMM }) /* Program exit */ #define BPF_EXIT_INSN() \ ((struct bpf_insn) { \ .code = BPF_JMP | BPF_EXIT, \ .dst_reg = 0, \ .src_reg = 0, \ .off = 0, \ .imm = 0 }) /* Speculation barrier */ #define BPF_ST_NOSPEC() \ ((struct bpf_insn) { \ .code = BPF_ST | BPF_NOSPEC, \ .dst_reg = 0, \ .src_reg = 0, \ .off = 0, \ .imm = 0 }) /* Internal classic blocks for direct assignment */ #define __BPF_STMT(CODE, K) \ ((struct sock_filter) BPF_STMT(CODE, K)) #define __BPF_JUMP(CODE, K, JT, JF) \ ((struct sock_filter) BPF_JUMP(CODE, K, JT, JF)) #define bytes_to_bpf_size(bytes) \ ({ \ int bpf_size = -EINVAL; \ \ if (bytes == sizeof(u8)) \ bpf_size = BPF_B; \ else if (bytes == sizeof(u16)) \ bpf_size = BPF_H; \ else if (bytes == sizeof(u32)) \ bpf_size = BPF_W; \ else if (bytes == sizeof(u64)) \ bpf_size = BPF_DW; \ \ bpf_size; \ }) #define bpf_size_to_bytes(bpf_size) \ ({ \ int bytes = -EINVAL; \ \ if (bpf_size == BPF_B) \ bytes = sizeof(u8); \ else if (bpf_size == BPF_H) \ bytes = sizeof(u16); \ else if (bpf_size == BPF_W) \ bytes = sizeof(u32); \ else if (bpf_size == BPF_DW) \ bytes = sizeof(u64); \ \ bytes; \ }) #define BPF_SIZEOF(type) \ ({ \ const int __size = bytes_to_bpf_size(sizeof(type)); \ BUILD_BUG_ON(__size < 0); \ __size; \ }) #define BPF_FIELD_SIZEOF(type, field) \ ({ \ const int __size = bytes_to_bpf_size(FIELD_SIZEOF(type, field)); \ BUILD_BUG_ON(__size < 0); \ __size; \ }) #define BPF_LDST_BYTES(insn) \ ({ \ const int __size = bpf_size_to_bytes(BPF_SIZE((insn)->code)); \ WARN_ON(__size < 0); \ __size; \ }) #define __BPF_MAP_0(m, v, ...) v #define __BPF_MAP_1(m, v, t, a, ...) m(t, a) #define __BPF_MAP_2(m, v, t, a, ...) m(t, a), __BPF_MAP_1(m, v, __VA_ARGS__) #define __BPF_MAP_3(m, v, t, a, ...) m(t, a), __BPF_MAP_2(m, v, __VA_ARGS__) #define __BPF_MAP_4(m, v, t, a, ...) m(t, a), __BPF_MAP_3(m, v, __VA_ARGS__) #define __BPF_MAP_5(m, v, t, a, ...) m(t, a), __BPF_MAP_4(m, v, __VA_ARGS__) #define __BPF_REG_0(...) __BPF_PAD(5) #define __BPF_REG_1(...) __BPF_MAP(1, __VA_ARGS__), __BPF_PAD(4) #define __BPF_REG_2(...) __BPF_MAP(2, __VA_ARGS__), __BPF_PAD(3) #define __BPF_REG_3(...) __BPF_MAP(3, __VA_ARGS__), __BPF_PAD(2) #define __BPF_REG_4(...) __BPF_MAP(4, __VA_ARGS__), __BPF_PAD(1) #define __BPF_REG_5(...) __BPF_MAP(5, __VA_ARGS__) #define __BPF_MAP(n, ...) __BPF_MAP_##n(__VA_ARGS__) #define __BPF_REG(n, ...) __BPF_REG_##n(__VA_ARGS__) #define __BPF_CAST(t, a) \ (__force t) \ (__force \ typeof(__builtin_choose_expr(sizeof(t) == sizeof(unsigned long), \ (unsigned long)0, (t)0))) a #define __BPF_V void #define __BPF_N #define __BPF_DECL_ARGS(t, a) t a #define __BPF_DECL_REGS(t, a) u64 a #define __BPF_PAD(n) \ __BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2, \ u64, __ur_3, u64, __ur_4, u64, __ur_5) #define BPF_CALL_x(x, attr, name, ...) \ static __always_inline \ u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \ typedef u64 (*btf_##name)(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \ attr u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \ attr u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \ { \ return ((btf_##name)____##name)(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\ } \ static __always_inline \ u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)) #define __NOATTR #define BPF_CALL_0(name, ...) BPF_CALL_x(0, __NOATTR, name, __VA_ARGS__) #define BPF_CALL_1(name, ...) BPF_CALL_x(1, __NOATTR, name, __VA_ARGS__) #define BPF_CALL_2(name, ...) BPF_CALL_x(2, __NOATTR, name, __VA_ARGS__) #define BPF_CALL_3(name, ...) BPF_CALL_x(3, __NOATTR, name, __VA_ARGS__) #define BPF_CALL_4(name, ...) BPF_CALL_x(4, __NOATTR, name, __VA_ARGS__) #define BPF_CALL_5(name, ...) BPF_CALL_x(5, __NOATTR, name, __VA_ARGS__) #define NOTRACE_BPF_CALL_1(name, ...) BPF_CALL_x(1, notrace, name, __VA_ARGS__) #define bpf_ctx_range(TYPE, MEMBER) \ offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1 #define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2) \ offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1 #if BITS_PER_LONG == 64 # define bpf_ctx_range_ptr(TYPE, MEMBER) \ offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1 #else # define bpf_ctx_range_ptr(TYPE, MEMBER) \ offsetof(TYPE, MEMBER) ... offsetof(TYPE, MEMBER) + 8 - 1 #endif /* BITS_PER_LONG == 64 */ #define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE) \ ({ \ BUILD_BUG_ON(FIELD_SIZEOF(TYPE, MEMBER) != (SIZE)); \ *(PTR_SIZE) = (SIZE); \ offsetof(TYPE, MEMBER); \ }) #ifdef CONFIG_COMPAT /* A struct sock_filter is architecture independent. */ struct compat_sock_fprog { u16 len; compat_uptr_t filter; /* struct sock_filter * */ }; #endif struct sock_fprog_kern { u16 len; struct sock_filter *filter; }; struct bpf_binary_header { u32 pages; /* Some arches need word alignment for their instructions */ u8 image[] __aligned(4); }; struct bpf_prog { u16 pages; /* Number of allocated pages */ u16 jited:1, /* Is our filter JIT'ed? */ jit_requested:1,/* archs need to JIT the prog */ gpl_compatible:1, /* Is filter GPL compatible? */ cb_access:1, /* Is control block accessed? */ dst_needed:1, /* Do we need dst entry? */ blinded:1, /* Was blinded */ is_func:1, /* program is a bpf function */ kprobe_override:1, /* Do we override a kprobe? */ has_callchain_buf:1, /* callchain buffer allocated? */ enforce_expected_attach_type:1; /* Enforce expected_attach_type checking at attach time */ enum bpf_prog_type type; /* Type of BPF program */ enum bpf_attach_type expected_attach_type; /* For some prog types */ u32 len; /* Number of filter blocks */ u32 jited_len; /* Size of jited insns in bytes */ u8 tag[BPF_TAG_SIZE]; struct bpf_prog_aux *aux; /* Auxiliary fields */ struct sock_fprog_kern *orig_prog; /* Original BPF program */ unsigned int (*bpf_func)(const void *ctx, const struct bpf_insn *insn); /* Instructions for interpreter */ union { struct sock_filter insns[0]; struct bpf_insn insnsi[0]; }; }; struct sk_filter { refcount_t refcnt; struct rcu_head rcu; struct bpf_prog *prog; }; DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key); #define BPF_PROG_RUN(prog, ctx) ({ \ u32 ret; \ cant_sleep(); \ if (static_branch_unlikely(&bpf_stats_enabled_key)) { \ struct bpf_prog_stats *stats; \ u64 start = sched_clock(); \ ret = (*(prog)->bpf_func)(ctx, (prog)->insnsi); \ stats = this_cpu_ptr(prog->aux->stats); \ u64_stats_update_begin(&stats->syncp); \ stats->cnt++; \ stats->nsecs += sched_clock() - start; \ u64_stats_update_end(&stats->syncp); \ } else { \ ret = (*(prog)->bpf_func)(ctx, (prog)->insnsi); \ } \ ret; }) #define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN struct bpf_skb_data_end { struct qdisc_skb_cb qdisc_cb; void *data_meta; void *data_end; }; struct bpf_redirect_info { u32 flags; u32 tgt_index; void *tgt_value; struct bpf_map *map; struct bpf_map *map_to_flush; u32 kern_flags; }; DECLARE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info); /* flags for bpf_redirect_info kern_flags */ #define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */ /* Compute the linear packet data range [data, data_end) which * will be accessed by various program types (cls_bpf, act_bpf, * lwt, ...). Subsystems allowing direct data access must (!) * ensure that cb[] area can be written to when BPF program is * invoked (otherwise cb[] save/restore is necessary). */ static inline void bpf_compute_data_pointers(struct sk_buff *skb) { struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; BUILD_BUG_ON(sizeof(*cb) > FIELD_SIZEOF(struct sk_buff, cb)); cb->data_meta = skb->data - skb_metadata_len(skb); cb->data_end = skb->data + skb_headlen(skb); } /* Similar to bpf_compute_data_pointers(), except that save orginal * data in cb->data and cb->meta_data for restore. */ static inline void bpf_compute_and_save_data_end( struct sk_buff *skb, void **saved_data_end) { struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; *saved_data_end = cb->data_end; cb->data_end = skb->data + skb_headlen(skb); } /* Restore data saved by bpf_compute_data_pointers(). */ static inline void bpf_restore_data_end( struct sk_buff *skb, void *saved_data_end) { struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; cb->data_end = saved_data_end; } static inline u8 *bpf_skb_cb(struct sk_buff *skb) { /* eBPF programs may read/write skb->cb[] area to transfer meta * data between tail calls. Since this also needs to work with * tc, that scratch memory is mapped to qdisc_skb_cb's data area. * * In some socket filter cases, the cb unfortunately needs to be * saved/restored so that protocol specific skb->cb[] data won't * be lost. In any case, due to unpriviledged eBPF programs * attached to sockets, we need to clear the bpf_skb_cb() area * to not leak previous contents to user space. */ BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != BPF_SKB_CB_LEN); BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != FIELD_SIZEOF(struct qdisc_skb_cb, data)); return qdisc_skb_cb(skb)->data; } static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog, struct sk_buff *skb) { u8 *cb_data = bpf_skb_cb(skb); u8 cb_saved[BPF_SKB_CB_LEN]; u32 res; if (unlikely(prog->cb_access)) { memcpy(cb_saved, cb_data, sizeof(cb_saved)); memset(cb_data, 0, sizeof(cb_saved)); } res = BPF_PROG_RUN(prog, skb); if (unlikely(prog->cb_access)) memcpy(cb_data, cb_saved, sizeof(cb_saved)); return res; } static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, struct sk_buff *skb) { u32 res; preempt_disable(); res = __bpf_prog_run_save_cb(prog, skb); preempt_enable(); return res; } static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, struct sk_buff *skb) { u8 *cb_data = bpf_skb_cb(skb); u32 res; if (unlikely(prog->cb_access)) memset(cb_data, 0, BPF_SKB_CB_LEN); preempt_disable(); res = BPF_PROG_RUN(prog, skb); preempt_enable(); return res; } static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, struct xdp_buff *xdp) { /* Caller needs to hold rcu_read_lock() (!), otherwise program * can be released while still running, or map elements could be * freed early while still having concurrent users. XDP fastpath * already takes rcu_read_lock() when fetching the program, so * it's not necessary here anymore. */ return BPF_PROG_RUN(prog, xdp); } static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog) { return prog->len * sizeof(struct bpf_insn); } static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog) { return round_up(bpf_prog_insn_size(prog) + sizeof(__be64) + 1, SHA_MESSAGE_BYTES); } static inline unsigned int bpf_prog_size(unsigned int proglen) { return max(sizeof(struct bpf_prog), offsetof(struct bpf_prog, insns[proglen])); } static inline bool bpf_prog_was_classic(const struct bpf_prog *prog) { /* When classic BPF programs have been loaded and the arch * does not have a classic BPF JIT (anymore), they have been * converted via bpf_migrate_filter() to eBPF and thus always * have an unspec program type. */ return prog->type == BPF_PROG_TYPE_UNSPEC; } static inline u32 bpf_ctx_off_adjust_machine(u32 size) { const u32 size_machine = sizeof(unsigned long); if (size > size_machine && size % size_machine == 0) size = size_machine; return size; } static inline bool bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default) { return size <= size_default && (size & (size - 1)) == 0; } static inline u8 bpf_ctx_narrow_access_offset(u32 off, u32 size, u32 size_default) { u8 access_off = off & (size_default - 1); #ifdef __LITTLE_ENDIAN return access_off; #else return size_default - (access_off + size); #endif } #define bpf_ctx_wide_access_ok(off, size, type, field) \ (size == sizeof(__u64) && \ off >= offsetof(type, field) && \ off + sizeof(__u64) <= offsetofend(type, field) && \ off % sizeof(__u64) == 0) #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0])) static inline void bpf_prog_lock_ro(struct bpf_prog *fp) { #ifndef CONFIG_BPF_JIT_ALWAYS_ON if (!fp->jited) { set_vm_flush_reset_perms(fp); set_memory_ro((unsigned long)fp, fp->pages); } #endif } static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) { set_vm_flush_reset_perms(hdr); set_memory_ro((unsigned long)hdr, hdr->pages); set_memory_x((unsigned long)hdr, hdr->pages); } static inline struct bpf_binary_header * bpf_jit_binary_hdr(const struct bpf_prog *fp) { unsigned long real_start = (unsigned long)fp->bpf_func; unsigned long addr = real_start & PAGE_MASK; return (void *)addr; } int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap); static inline int sk_filter(struct sock *sk, struct sk_buff *skb) { return sk_filter_trim_cap(sk, skb, 1); } struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err); void bpf_prog_free(struct bpf_prog *fp); bool bpf_opcode_in_insntable(u8 code); void bpf_prog_free_linfo(struct bpf_prog *prog); void bpf_prog_fill_jited_linfo(struct bpf_prog *prog, const u32 *insn_to_jit_off); int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog); void bpf_prog_free_jited_linfo(struct bpf_prog *prog); void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog); struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags); struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags); struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, gfp_t gfp_extra_flags); void __bpf_prog_free(struct bpf_prog *fp); static inline void bpf_prog_unlock_free(struct bpf_prog *fp) { __bpf_prog_free(fp); } typedef int (*bpf_aux_classic_check_t)(struct sock_filter *filter, unsigned int flen); int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog); int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog, bpf_aux_classic_check_t trans, bool save_orig); void bpf_prog_destroy(struct bpf_prog *fp); int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); int sk_attach_bpf(u32 ufd, struct sock *sk); int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk); int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk); void sk_reuseport_prog_free(struct bpf_prog *prog); int sk_detach_filter(struct sock *sk); int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, unsigned int len); bool sk_filter_charge(struct sock *sk, struct sk_filter *fp); void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); #define __bpf_call_base_args \ ((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \ (void *)__bpf_call_base) struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); void bpf_jit_compile(struct bpf_prog *prog); bool bpf_jit_needs_zext(void); bool bpf_helper_changes_pkt_data(void *func); static inline bool bpf_dump_raw_ok(const struct cred *cred) { /* Reconstruction of call-sites is dependent on kallsyms, * thus make dump the same restriction. */ return kallsyms_show_value(cred); } struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, const struct bpf_insn *patch, u32 len); int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt); void bpf_clear_redirect_map(struct bpf_map *map); static inline bool xdp_return_frame_no_direct(void) { struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); return ri->kern_flags & BPF_RI_F_RF_NO_DIRECT; } static inline void xdp_set_return_frame_no_direct(void) { struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); ri->kern_flags |= BPF_RI_F_RF_NO_DIRECT; } static inline void xdp_clear_return_frame_no_direct(void) { struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); ri->kern_flags &= ~BPF_RI_F_RF_NO_DIRECT; } static inline int xdp_ok_fwd_dev(const struct net_device *fwd, unsigned int pktlen) { unsigned int len; if (unlikely(!(fwd->flags & IFF_UP))) return -ENETDOWN; len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN; if (pktlen > len) return -EMSGSIZE; return 0; } /* The pair of xdp_do_redirect and xdp_do_flush_map MUST be called in the * same cpu context. Further for best results no more than a single map * for the do_redirect/do_flush pair should be used. This limitation is * because we only track one map and force a flush when the map changes. * This does not appear to be a real limitation for existing software. */ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb, struct xdp_buff *xdp, struct bpf_prog *prog); int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp, struct bpf_prog *prog); void xdp_do_flush_map(void); void bpf_warn_invalid_xdp_action(u32 act); #ifdef CONFIG_INET struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, struct bpf_prog *prog, struct sk_buff *skb, u32 hash); #else static inline struct sock * bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, struct bpf_prog *prog, struct sk_buff *skb, u32 hash) { return NULL; } #endif #ifdef CONFIG_BPF_JIT extern int bpf_jit_enable; extern int bpf_jit_harden; extern int bpf_jit_kallsyms; extern long bpf_jit_limit; extern long bpf_jit_limit_max; typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); struct bpf_binary_header * bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, unsigned int alignment, bpf_jit_fill_hole_t bpf_fill_ill_insns); void bpf_jit_binary_free(struct bpf_binary_header *hdr); u64 bpf_jit_alloc_exec_limit(void); void *bpf_jit_alloc_exec(unsigned long size); void bpf_jit_free_exec(void *addr); void bpf_jit_free(struct bpf_prog *fp); int bpf_jit_get_func_addr(const struct bpf_prog *prog, const struct bpf_insn *insn, bool extra_pass, u64 *func_addr, bool *func_addr_fixed); struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp); void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other); static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, u32 pass, void *image) { pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen, proglen, pass, image, current->comm, task_pid_nr(current)); if (image) print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET, 16, 1, image, proglen, false); } static inline bool bpf_jit_is_ebpf(void) { # ifdef CONFIG_HAVE_EBPF_JIT return true; # else return false; # endif } static inline bool ebpf_jit_enabled(void) { return bpf_jit_enable && bpf_jit_is_ebpf(); } static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) { return fp->jited && bpf_jit_is_ebpf(); } static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog) { /* These are the prerequisites, should someone ever have the * idea to call blinding outside of them, we make sure to * bail out. */ if (!bpf_jit_is_ebpf()) return false; if (!prog->jit_requested) return false; if (!bpf_jit_harden) return false; if (bpf_jit_harden == 1 && capable(CAP_SYS_ADMIN)) return false; return true; } static inline bool bpf_jit_kallsyms_enabled(void) { /* There are a couple of corner cases where kallsyms should * not be enabled f.e. on hardening. */ if (bpf_jit_harden) return false; if (!bpf_jit_kallsyms) return false; if (bpf_jit_kallsyms == 1) return true; return false; } const char *__bpf_address_lookup(unsigned long addr, unsigned long *size, unsigned long *off, char *sym); bool is_bpf_text_address(unsigned long addr); int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, char *sym); static inline const char * bpf_address_lookup(unsigned long addr, unsigned long *size, unsigned long *off, char **modname, char *sym) { const char *ret = __bpf_address_lookup(addr, size, off, sym); if (ret && modname) *modname = NULL; return ret; } void bpf_prog_kallsyms_add(struct bpf_prog *fp); void bpf_prog_kallsyms_del(struct bpf_prog *fp); void bpf_get_prog_name(const struct bpf_prog *prog, char *sym); #else /* CONFIG_BPF_JIT */ static inline bool ebpf_jit_enabled(void) { return false; } static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) { return false; } static inline void bpf_jit_free(struct bpf_prog *fp) { bpf_prog_unlock_free(fp); } static inline bool bpf_jit_kallsyms_enabled(void) { return false; } static inline const char * __bpf_address_lookup(unsigned long addr, unsigned long *size, unsigned long *off, char *sym) { return NULL; } static inline bool is_bpf_text_address(unsigned long addr) { return false; } static inline int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, char *sym) { return -ERANGE; } static inline const char * bpf_address_lookup(unsigned long addr, unsigned long *size, unsigned long *off, char **modname, char *sym) { return NULL; } static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp) { } static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp) { } static inline void bpf_get_prog_name(const struct bpf_prog *prog, char *sym) { sym[0] = '\0'; } #endif /* CONFIG_BPF_JIT */ void bpf_prog_kallsyms_del_all(struct bpf_prog *fp); #define BPF_ANC BIT(15) static inline bool bpf_needs_clear_a(const struct sock_filter *first) { switch (first->code) { case BPF_RET | BPF_K: case BPF_LD | BPF_W | BPF_LEN: return false; case BPF_LD | BPF_W | BPF_ABS: case BPF_LD | BPF_H | BPF_ABS: case BPF_LD | BPF_B | BPF_ABS: if (first->k == SKF_AD_OFF + SKF_AD_ALU_XOR_X) return true; return false; default: return true; } } static inline u16 bpf_anc_helper(const struct sock_filter *ftest) { BUG_ON(ftest->code & BPF_ANC); switch (ftest->code) { case BPF_LD | BPF_W | BPF_ABS: case BPF_LD | BPF_H | BPF_ABS: case BPF_LD | BPF_B | BPF_ABS: #define BPF_ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \ return BPF_ANC | SKF_AD_##CODE switch (ftest->k) { BPF_ANCILLARY(PROTOCOL); BPF_ANCILLARY(PKTTYPE); BPF_ANCILLARY(IFINDEX); BPF_ANCILLARY(NLATTR); BPF_ANCILLARY(NLATTR_NEST); BPF_ANCILLARY(MARK); BPF_ANCILLARY(QUEUE); BPF_ANCILLARY(HATYPE); BPF_ANCILLARY(RXHASH); BPF_ANCILLARY(CPU); BPF_ANCILLARY(ALU_XOR_X); BPF_ANCILLARY(VLAN_TAG); BPF_ANCILLARY(VLAN_TAG_PRESENT); BPF_ANCILLARY(PAY_OFFSET); BPF_ANCILLARY(RANDOM); BPF_ANCILLARY(VLAN_TPID); } /* Fallthrough. */ default: return ftest->code; } } void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size); static inline void *bpf_load_pointer(const struct sk_buff *skb, int k, unsigned int size, void *buffer) { if (k >= 0) return skb_header_pointer(skb, k, size, buffer); return bpf_internal_load_pointer_neg_helper(skb, k, size); } static inline int bpf_tell_extensions(void) { return SKF_AD_MAX; } struct bpf_sock_addr_kern { struct sock *sk; struct sockaddr *uaddr; /* Temporary "register" to make indirect stores to nested structures * defined above. We need three registers to make such a store, but * only two (src and dst) are available at convert_ctx_access time */ u64 tmp_reg; void *t_ctx; /* Attach type specific context. */ }; struct bpf_sock_ops_kern { struct sock *sk; u32 op; union { u32 args[4]; u32 reply; u32 replylong[4]; }; u32 is_fullsock; u64 temp; /* temp and everything after is not * initialized to 0 before calling * the BPF program. New fields that * should be initialized to 0 should * be inserted before temp. * temp is scratch storage used by * sock_ops_convert_ctx_access * as temporary storage of a register. */ }; struct bpf_sysctl_kern { struct ctl_table_header *head; struct ctl_table *table; void *cur_val; size_t cur_len; void *new_val; size_t new_len; int new_updated; int write; loff_t *ppos; /* Temporary "register" for indirect stores to ppos. */ u64 tmp_reg; }; struct bpf_sockopt_kern { struct sock *sk; u8 *optval; u8 *optval_end; s32 level; s32 optname; s32 optlen; s32 retval; }; #endif /* __LINUX_FILTER_H__ */ mdio-bitbang.h 0000644 00000002175 14722070374 0007264 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_MDIO_BITBANG_H #define __LINUX_MDIO_BITBANG_H #include <linux/phy.h> struct module; struct mdiobb_ctrl; struct mdiobb_ops { struct module *owner; /* Set the Management Data Clock high if level is one, * low if level is zero. */ void (*set_mdc)(struct mdiobb_ctrl *ctrl, int level); /* Configure the Management Data I/O pin as an input if * "output" is zero, or an output if "output" is one. */ void (*set_mdio_dir)(struct mdiobb_ctrl *ctrl, int output); /* Set the Management Data I/O pin high if value is one, * low if "value" is zero. This may only be called * when the MDIO pin is configured as an output. */ void (*set_mdio_data)(struct mdiobb_ctrl *ctrl, int value); /* Retrieve the state Management Data I/O pin. */ int (*get_mdio_data)(struct mdiobb_ctrl *ctrl); }; struct mdiobb_ctrl { const struct mdiobb_ops *ops; }; /* The returned bus is not yet registered with the phy layer. */ struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl); /* The bus must already have been unregistered. */ void free_mdio_bitbang(struct mii_bus *bus); #endif tracepoint-defs.h 0000644 00000002032 14722070374 0010007 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef TRACEPOINT_DEFS_H #define TRACEPOINT_DEFS_H 1 /* * File can be included directly by headers who only want to access * tracepoint->key to guard out of line trace calls, or the definition of * trace_print_flags{_u64}. Otherwise linux/tracepoint.h should be used. */ #include <linux/atomic.h> #include <linux/static_key.h> struct trace_print_flags { unsigned long mask; const char *name; }; struct trace_print_flags_u64 { unsigned long long mask; const char *name; }; struct tracepoint_func { void *func; void *data; int prio; }; struct tracepoint { const char *name; /* Tracepoint name */ struct static_key key; int (*regfunc)(void); void (*unregfunc)(void); struct tracepoint_func __rcu *funcs; }; #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS typedef const int tracepoint_ptr_t; #else typedef struct tracepoint * const tracepoint_ptr_t; #endif struct bpf_raw_event_map { struct tracepoint *tp; void *bpf_func; u32 num_args; u32 writable_size; } __aligned(32); #endif cgroup_subsys.h 0000644 00000002263 14722070374 0007635 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * List of cgroup subsystems. * * DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS. */ /* * This file *must* be included with SUBSYS() defined. */ #if IS_ENABLED(CONFIG_CPUSETS) SUBSYS(cpuset) #endif #if IS_ENABLED(CONFIG_CGROUP_SCHED) SUBSYS(cpu) #endif #if IS_ENABLED(CONFIG_CGROUP_CPUACCT) SUBSYS(cpuacct) #endif #if IS_ENABLED(CONFIG_BLK_CGROUP) SUBSYS(io) #endif #if IS_ENABLED(CONFIG_MEMCG) SUBSYS(memory) #endif #if IS_ENABLED(CONFIG_CGROUP_DEVICE) SUBSYS(devices) #endif #if IS_ENABLED(CONFIG_CGROUP_FREEZER) SUBSYS(freezer) #endif #if IS_ENABLED(CONFIG_CGROUP_NET_CLASSID) SUBSYS(net_cls) #endif #if IS_ENABLED(CONFIG_CGROUP_PERF) SUBSYS(perf_event) #endif #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) SUBSYS(net_prio) #endif #if IS_ENABLED(CONFIG_CGROUP_HUGETLB) SUBSYS(hugetlb) #endif #if IS_ENABLED(CONFIG_CGROUP_PIDS) SUBSYS(pids) #endif #if IS_ENABLED(CONFIG_CGROUP_RDMA) SUBSYS(rdma) #endif /* * The following subsystems are not supported on the default hierarchy. */ #if IS_ENABLED(CONFIG_CGROUP_DEBUG) SUBSYS(debug) #endif /* * DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS. */ sm501.h 0000644 00000010022 14722070374 0005563 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* include/linux/sm501.h * * Copyright (c) 2006 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * Vincent Sanders <vince@simtec.co.uk> */ extern int sm501_unit_power(struct device *dev, unsigned int unit, unsigned int to); extern unsigned long sm501_set_clock(struct device *dev, int clksrc, unsigned long freq); extern unsigned long sm501_find_clock(struct device *dev, int clksrc, unsigned long req_freq); /* sm501_misc_control * * Modify the SM501's MISC_CONTROL register */ extern int sm501_misc_control(struct device *dev, unsigned long set, unsigned long clear); /* sm501_modify_reg * * Modify a register in the SM501 which may be shared with other * drivers. */ extern unsigned long sm501_modify_reg(struct device *dev, unsigned long reg, unsigned long set, unsigned long clear); /* Platform data definitions */ #define SM501FB_FLAG_USE_INIT_MODE (1<<0) #define SM501FB_FLAG_DISABLE_AT_EXIT (1<<1) #define SM501FB_FLAG_USE_HWCURSOR (1<<2) #define SM501FB_FLAG_USE_HWACCEL (1<<3) #define SM501FB_FLAG_PANEL_NO_FPEN (1<<4) #define SM501FB_FLAG_PANEL_NO_VBIASEN (1<<5) #define SM501FB_FLAG_PANEL_INV_FPEN (1<<6) #define SM501FB_FLAG_PANEL_INV_VBIASEN (1<<7) struct sm501_platdata_fbsub { struct fb_videomode *def_mode; unsigned int def_bpp; unsigned long max_mem; unsigned int flags; }; enum sm501_fb_routing { SM501_FB_OWN = 0, /* CRT=>CRT, Panel=>Panel */ SM501_FB_CRT_PANEL = 1, /* Panel=>CRT, Panel=>Panel */ }; /* sm501_platdata_fb flag field bit definitions */ #define SM501_FBPD_SWAP_FB_ENDIAN (1<<0) /* need to endian swap */ /* sm501_platdata_fb * * configuration data for the framebuffer driver */ struct sm501_platdata_fb { enum sm501_fb_routing fb_route; unsigned int flags; struct sm501_platdata_fbsub *fb_crt; struct sm501_platdata_fbsub *fb_pnl; }; /* gpio i2c * * Note, we have to pass in the bus number, as the number used will be * passed to the i2c-gpio driver's platform_device.id, subsequently used * to register the i2c bus. */ struct sm501_platdata_gpio_i2c { unsigned int bus_num; unsigned int pin_sda; unsigned int pin_scl; int udelay; int timeout; }; /* sm501_initdata * * use for initialising values that may not have been setup * before the driver is loaded. */ struct sm501_reg_init { unsigned long set; unsigned long mask; }; #define SM501_USE_USB_HOST (1<<0) #define SM501_USE_USB_SLAVE (1<<1) #define SM501_USE_SSP0 (1<<2) #define SM501_USE_SSP1 (1<<3) #define SM501_USE_UART0 (1<<4) #define SM501_USE_UART1 (1<<5) #define SM501_USE_FBACCEL (1<<6) #define SM501_USE_AC97 (1<<7) #define SM501_USE_I2S (1<<8) #define SM501_USE_GPIO (1<<9) #define SM501_USE_ALL (0xffffffff) struct sm501_initdata { struct sm501_reg_init gpio_low; struct sm501_reg_init gpio_high; struct sm501_reg_init misc_timing; struct sm501_reg_init misc_control; unsigned long devices; unsigned long mclk; /* non-zero to modify */ unsigned long m1xclk; /* non-zero to modify */ }; /* sm501_init_gpio * * default gpio settings */ struct sm501_init_gpio { struct sm501_reg_init gpio_data_low; struct sm501_reg_init gpio_data_high; struct sm501_reg_init gpio_ddr_low; struct sm501_reg_init gpio_ddr_high; }; #define SM501_FLAG_SUSPEND_OFF (1<<4) /* sm501_platdata * * This is passed with the platform device to allow the board * to control the behaviour of the SM501 driver(s) which attach * to the device. * */ struct sm501_platdata { struct sm501_initdata *init; struct sm501_init_gpio *init_gpiop; struct sm501_platdata_fb *fb; int flags; int gpio_base; int (*get_power)(struct device *dev); int (*set_power)(struct device *dev, unsigned int on); struct sm501_platdata_gpio_i2c *gpio_i2c; unsigned int gpio_i2c_nr; }; #if defined(CONFIG_PPC32) #define smc501_readl(addr) ioread32be((addr)) #define smc501_writel(val, addr) iowrite32be((val), (addr)) #else #define smc501_readl(addr) readl(addr) #define smc501_writel(val, addr) writel(val, addr) #endif pinctrl/machine.h 0000644 00000011765 14722070374 0010014 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Machine interface for the pinctrl subsystem. * * Copyright (C) 2011 ST-Ericsson SA * Written on behalf of Linaro for ST-Ericsson * Based on bits of regulator core, gpio core and clk core * * Author: Linus Walleij <linus.walleij@linaro.org> */ #ifndef __LINUX_PINCTRL_MACHINE_H #define __LINUX_PINCTRL_MACHINE_H #include <linux/bug.h> #include <linux/pinctrl/pinctrl-state.h> enum pinctrl_map_type { PIN_MAP_TYPE_INVALID, PIN_MAP_TYPE_DUMMY_STATE, PIN_MAP_TYPE_MUX_GROUP, PIN_MAP_TYPE_CONFIGS_PIN, PIN_MAP_TYPE_CONFIGS_GROUP, }; /** * struct pinctrl_map_mux - mapping table content for MAP_TYPE_MUX_GROUP * @group: the name of the group whose mux function is to be configured. This * field may be left NULL, and the first applicable group for the function * will be used. * @function: the mux function to select for the group */ struct pinctrl_map_mux { const char *group; const char *function; }; /** * struct pinctrl_map_configs - mapping table content for MAP_TYPE_CONFIGS_* * @group_or_pin: the name of the pin or group whose configuration parameters * are to be configured. * @configs: a pointer to an array of config parameters/values to program into * hardware. Each individual pin controller defines the format and meaning * of config parameters. * @num_configs: the number of entries in array @configs */ struct pinctrl_map_configs { const char *group_or_pin; unsigned long *configs; unsigned num_configs; }; /** * struct pinctrl_map - boards/machines shall provide this map for devices * @dev_name: the name of the device using this specific mapping, the name * must be the same as in your struct device*. If this name is set to the * same name as the pin controllers own dev_name(), the map entry will be * hogged by the driver itself upon registration * @name: the name of this specific map entry for the particular machine. * This is the parameter passed to pinmux_lookup_state() * @type: the type of mapping table entry * @ctrl_dev_name: the name of the device controlling this specific mapping, * the name must be the same as in your struct device*. This field is not * used for PIN_MAP_TYPE_DUMMY_STATE * @data: Data specific to the mapping type */ struct pinctrl_map { const char *dev_name; const char *name; enum pinctrl_map_type type; const char *ctrl_dev_name; union { struct pinctrl_map_mux mux; struct pinctrl_map_configs configs; } data; }; /* Convenience macros to create mapping table entries */ #define PIN_MAP_DUMMY_STATE(dev, state) \ { \ .dev_name = dev, \ .name = state, \ .type = PIN_MAP_TYPE_DUMMY_STATE, \ } #define PIN_MAP_MUX_GROUP(dev, state, pinctrl, grp, func) \ { \ .dev_name = dev, \ .name = state, \ .type = PIN_MAP_TYPE_MUX_GROUP, \ .ctrl_dev_name = pinctrl, \ .data.mux = { \ .group = grp, \ .function = func, \ }, \ } #define PIN_MAP_MUX_GROUP_DEFAULT(dev, pinctrl, grp, func) \ PIN_MAP_MUX_GROUP(dev, PINCTRL_STATE_DEFAULT, pinctrl, grp, func) #define PIN_MAP_MUX_GROUP_HOG(dev, state, grp, func) \ PIN_MAP_MUX_GROUP(dev, state, dev, grp, func) #define PIN_MAP_MUX_GROUP_HOG_DEFAULT(dev, grp, func) \ PIN_MAP_MUX_GROUP(dev, PINCTRL_STATE_DEFAULT, dev, grp, func) #define PIN_MAP_CONFIGS_PIN(dev, state, pinctrl, pin, cfgs) \ { \ .dev_name = dev, \ .name = state, \ .type = PIN_MAP_TYPE_CONFIGS_PIN, \ .ctrl_dev_name = pinctrl, \ .data.configs = { \ .group_or_pin = pin, \ .configs = cfgs, \ .num_configs = ARRAY_SIZE(cfgs), \ }, \ } #define PIN_MAP_CONFIGS_PIN_DEFAULT(dev, pinctrl, pin, cfgs) \ PIN_MAP_CONFIGS_PIN(dev, PINCTRL_STATE_DEFAULT, pinctrl, pin, cfgs) #define PIN_MAP_CONFIGS_PIN_HOG(dev, state, pin, cfgs) \ PIN_MAP_CONFIGS_PIN(dev, state, dev, pin, cfgs) #define PIN_MAP_CONFIGS_PIN_HOG_DEFAULT(dev, pin, cfgs) \ PIN_MAP_CONFIGS_PIN(dev, PINCTRL_STATE_DEFAULT, dev, pin, cfgs) #define PIN_MAP_CONFIGS_GROUP(dev, state, pinctrl, grp, cfgs) \ { \ .dev_name = dev, \ .name = state, \ .type = PIN_MAP_TYPE_CONFIGS_GROUP, \ .ctrl_dev_name = pinctrl, \ .data.configs = { \ .group_or_pin = grp, \ .configs = cfgs, \ .num_configs = ARRAY_SIZE(cfgs), \ }, \ } #define PIN_MAP_CONFIGS_GROUP_DEFAULT(dev, pinctrl, grp, cfgs) \ PIN_MAP_CONFIGS_GROUP(dev, PINCTRL_STATE_DEFAULT, pinctrl, grp, cfgs) #define PIN_MAP_CONFIGS_GROUP_HOG(dev, state, grp, cfgs) \ PIN_MAP_CONFIGS_GROUP(dev, state, dev, grp, cfgs) #define PIN_MAP_CONFIGS_GROUP_HOG_DEFAULT(dev, grp, cfgs) \ PIN_MAP_CONFIGS_GROUP(dev, PINCTRL_STATE_DEFAULT, dev, grp, cfgs) #ifdef CONFIG_PINCTRL extern int pinctrl_register_mappings(const struct pinctrl_map *map, unsigned num_maps); extern void pinctrl_provide_dummies(void); #else static inline int pinctrl_register_mappings(const struct pinctrl_map *map, unsigned num_maps) { return 0; } static inline void pinctrl_provide_dummies(void) { } #endif /* !CONFIG_PINCTRL */ #endif pinctrl/pinctrl.h 0000644 00000016614 14722070374 0010061 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Interface the pinctrl subsystem * * Copyright (C) 2011 ST-Ericsson SA * Written on behalf of Linaro for ST-Ericsson * This interface is used in the core to keep track of pins. * * Author: Linus Walleij <linus.walleij@linaro.org> */ #ifndef __LINUX_PINCTRL_PINCTRL_H #define __LINUX_PINCTRL_PINCTRL_H #include <linux/radix-tree.h> #include <linux/list.h> #include <linux/seq_file.h> #include <linux/pinctrl/pinctrl-state.h> #include <linux/pinctrl/devinfo.h> struct device; struct pinctrl_dev; struct pinctrl_map; struct pinmux_ops; struct pinconf_ops; struct pin_config_item; struct gpio_chip; struct device_node; /** * struct pinctrl_pin_desc - boards/machines provide information on their * pins, pads or other muxable units in this struct * @number: unique pin number from the global pin number space * @name: a name for this pin * @drv_data: driver-defined per-pin data. pinctrl core does not touch this */ struct pinctrl_pin_desc { unsigned number; const char *name; void *drv_data; }; /* Convenience macro to define a single named or anonymous pin descriptor */ #define PINCTRL_PIN(a, b) { .number = a, .name = b } #define PINCTRL_PIN_ANON(a) { .number = a } /** * struct pinctrl_gpio_range - each pin controller can provide subranges of * the GPIO number space to be handled by the controller * @node: list node for internal use * @name: a name for the chip in this range * @id: an ID number for the chip in this range * @base: base offset of the GPIO range * @pin_base: base pin number of the GPIO range if pins == NULL * @pins: enumeration of pins in GPIO range or NULL * @npins: number of pins in the GPIO range, including the base number * @gc: an optional pointer to a gpio_chip */ struct pinctrl_gpio_range { struct list_head node; const char *name; unsigned int id; unsigned int base; unsigned int pin_base; unsigned const *pins; unsigned int npins; struct gpio_chip *gc; }; /** * struct pinctrl_ops - global pin control operations, to be implemented by * pin controller drivers. * @get_groups_count: Returns the count of total number of groups registered. * @get_group_name: return the group name of the pin group * @get_group_pins: return an array of pins corresponding to a certain * group selector @pins, and the size of the array in @num_pins * @pin_dbg_show: optional debugfs display hook that will provide per-device * info for a certain pin in debugfs * @dt_node_to_map: parse a device tree "pin configuration node", and create * mapping table entries for it. These are returned through the @map and * @num_maps output parameters. This function is optional, and may be * omitted for pinctrl drivers that do not support device tree. * @dt_free_map: free mapping table entries created via @dt_node_to_map. The * top-level @map pointer must be freed, along with any dynamically * allocated members of the mapping table entries themselves. This * function is optional, and may be omitted for pinctrl drivers that do * not support device tree. */ struct pinctrl_ops { int (*get_groups_count) (struct pinctrl_dev *pctldev); const char *(*get_group_name) (struct pinctrl_dev *pctldev, unsigned selector); int (*get_group_pins) (struct pinctrl_dev *pctldev, unsigned selector, const unsigned **pins, unsigned *num_pins); void (*pin_dbg_show) (struct pinctrl_dev *pctldev, struct seq_file *s, unsigned offset); int (*dt_node_to_map) (struct pinctrl_dev *pctldev, struct device_node *np_config, struct pinctrl_map **map, unsigned *num_maps); void (*dt_free_map) (struct pinctrl_dev *pctldev, struct pinctrl_map *map, unsigned num_maps); }; /** * struct pinctrl_desc - pin controller descriptor, register this to pin * control subsystem * @name: name for the pin controller * @pins: an array of pin descriptors describing all the pins handled by * this pin controller * @npins: number of descriptors in the array, usually just ARRAY_SIZE() * of the pins field above * @pctlops: pin control operation vtable, to support global concepts like * grouping of pins, this is optional. * @pmxops: pinmux operations vtable, if you support pinmuxing in your driver * @confops: pin config operations vtable, if you support pin configuration in * your driver * @owner: module providing the pin controller, used for refcounting * @num_custom_params: Number of driver-specific custom parameters to be parsed * from the hardware description * @custom_params: List of driver_specific custom parameters to be parsed from * the hardware description * @custom_conf_items: Information how to print @params in debugfs, must be * the same size as the @custom_params, i.e. @num_custom_params * @link_consumers: If true create a device link between pinctrl and its * consumers (i.e. the devices requesting pin control states). This is * sometimes necessary to ascertain the right suspend/resume order for * example. */ struct pinctrl_desc { const char *name; const struct pinctrl_pin_desc *pins; unsigned int npins; const struct pinctrl_ops *pctlops; const struct pinmux_ops *pmxops; const struct pinconf_ops *confops; struct module *owner; #ifdef CONFIG_GENERIC_PINCONF unsigned int num_custom_params; const struct pinconf_generic_params *custom_params; const struct pin_config_item *custom_conf_items; #endif bool link_consumers; }; /* External interface to pin controller */ extern int pinctrl_register_and_init(struct pinctrl_desc *pctldesc, struct device *dev, void *driver_data, struct pinctrl_dev **pctldev); extern int pinctrl_enable(struct pinctrl_dev *pctldev); /* Please use pinctrl_register_and_init() and pinctrl_enable() instead */ extern struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc, struct device *dev, void *driver_data); extern void pinctrl_unregister(struct pinctrl_dev *pctldev); extern int devm_pinctrl_register_and_init(struct device *dev, struct pinctrl_desc *pctldesc, void *driver_data, struct pinctrl_dev **pctldev); /* Please use devm_pinctrl_register_and_init() instead */ extern struct pinctrl_dev *devm_pinctrl_register(struct device *dev, struct pinctrl_desc *pctldesc, void *driver_data); extern void devm_pinctrl_unregister(struct device *dev, struct pinctrl_dev *pctldev); extern void pinctrl_add_gpio_range(struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range); extern void pinctrl_add_gpio_ranges(struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *ranges, unsigned nranges); extern void pinctrl_remove_gpio_range(struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range); extern struct pinctrl_dev *pinctrl_find_and_add_gpio_range(const char *devname, struct pinctrl_gpio_range *range); extern struct pinctrl_gpio_range * pinctrl_find_gpio_range_from_pin(struct pinctrl_dev *pctldev, unsigned int pin); extern int pinctrl_get_group_pins(struct pinctrl_dev *pctldev, const char *pin_group, const unsigned **pins, unsigned *num_pins); #ifdef CONFIG_OF extern struct pinctrl_dev *of_pinctrl_get(struct device_node *np); #else static inline struct pinctrl_dev *of_pinctrl_get(struct device_node *np) { return NULL; } #endif /* CONFIG_OF */ extern const char *pinctrl_dev_get_name(struct pinctrl_dev *pctldev); extern const char *pinctrl_dev_get_devname(struct pinctrl_dev *pctldev); extern void *pinctrl_dev_get_drvdata(struct pinctrl_dev *pctldev); #endif /* __LINUX_PINCTRL_PINCTRL_H */ pinctrl/pinctrl-state.h 0000644 00000003244 14722070374 0011172 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Standard pin control state definitions */ #ifndef __LINUX_PINCTRL_PINCTRL_STATE_H #define __LINUX_PINCTRL_PINCTRL_STATE_H /** * @PINCTRL_STATE_DEFAULT: the state the pinctrl handle shall be put * into as default, usually this means the pins are up and ready to * be used by the device driver. This state is commonly used by * hogs to configure muxing and pins at boot, and also as a state * to go into when returning from sleep and idle in * .pm_runtime_resume() or ordinary .resume() for example. * @PINCTRL_STATE_INIT: normally the pinctrl will be set to "default" * before the driver's probe() function is called. There are some * drivers where that is not appropriate becausing doing so would * glitch the pins. In those cases you can add an "init" pinctrl * which is the state of the pins before drive probe. After probe * if the pins are still in "init" state they'll be moved to * "default". * @PINCTRL_STATE_IDLE: the state the pinctrl handle shall be put into * when the pins are idle. This is a state where the system is relaxed * but not fully sleeping - some power may be on but clocks gated for * example. Could typically be set from a pm_runtime_suspend() or * pm_runtime_idle() operation. * @PINCTRL_STATE_SLEEP: the state the pinctrl handle shall be put into * when the pins are sleeping. This is a state where the system is in * its lowest sleep state. Could typically be set from an * ordinary .suspend() function. */ #define PINCTRL_STATE_DEFAULT "default" #define PINCTRL_STATE_INIT "init" #define PINCTRL_STATE_IDLE "idle" #define PINCTRL_STATE_SLEEP "sleep" #endif /* __LINUX_PINCTRL_PINCTRL_STATE_H */ pinctrl/pinmux.h 0000644 00000007420 14722070374 0007721 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Interface the pinmux subsystem * * Copyright (C) 2011 ST-Ericsson SA * Written on behalf of Linaro for ST-Ericsson * Based on bits of regulator core, gpio core and clk core * * Author: Linus Walleij <linus.walleij@linaro.org> */ #ifndef __LINUX_PINCTRL_PINMUX_H #define __LINUX_PINCTRL_PINMUX_H #include <linux/list.h> #include <linux/seq_file.h> #include <linux/pinctrl/pinctrl.h> struct pinctrl_dev; /** * struct pinmux_ops - pinmux operations, to be implemented by pin controller * drivers that support pinmuxing * @request: called by the core to see if a certain pin can be made * available for muxing. This is called by the core to acquire the pins * before selecting any actual mux setting across a function. The driver * is allowed to answer "no" by returning a negative error code * @free: the reverse function of the request() callback, frees a pin after * being requested * @get_functions_count: returns number of selectable named functions available * in this pinmux driver * @get_function_name: return the function name of the muxing selector, * called by the core to figure out which mux setting it shall map a * certain device to * @get_function_groups: return an array of groups names (in turn * referencing pins) connected to a certain function selector. The group * name can be used with the generic @pinctrl_ops to retrieve the * actual pins affected. The applicable groups will be returned in * @groups and the number of groups in @num_groups * @set_mux: enable a certain muxing function with a certain pin group. The * driver does not need to figure out whether enabling this function * conflicts some other use of the pins in that group, such collisions * are handled by the pinmux subsystem. The @func_selector selects a * certain function whereas @group_selector selects a certain set of pins * to be used. On simple controllers the latter argument may be ignored * @gpio_request_enable: requests and enables GPIO on a certain pin. * Implement this only if you can mux every pin individually as GPIO. The * affected GPIO range is passed along with an offset(pin number) into that * specific GPIO range - function selectors and pin groups are orthogonal * to this, the core will however make sure the pins do not collide. * @gpio_disable_free: free up GPIO muxing on a certain pin, the reverse of * @gpio_request_enable * @gpio_set_direction: Since controllers may need different configurations * depending on whether the GPIO is configured as input or output, * a direction selector function may be implemented as a backing * to the GPIO controllers that need pin muxing. * @strict: do not allow simultaneous use of the same pin for GPIO and another * function. Check both gpio_owner and mux_owner strictly before approving * the pin request. */ struct pinmux_ops { int (*request) (struct pinctrl_dev *pctldev, unsigned offset); int (*free) (struct pinctrl_dev *pctldev, unsigned offset); int (*get_functions_count) (struct pinctrl_dev *pctldev); const char *(*get_function_name) (struct pinctrl_dev *pctldev, unsigned selector); int (*get_function_groups) (struct pinctrl_dev *pctldev, unsigned selector, const char * const **groups, unsigned *num_groups); int (*set_mux) (struct pinctrl_dev *pctldev, unsigned func_selector, unsigned group_selector); int (*gpio_request_enable) (struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range, unsigned offset); void (*gpio_disable_free) (struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range, unsigned offset); int (*gpio_set_direction) (struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range, unsigned offset, bool input); bool strict; }; #endif /* __LINUX_PINCTRL_PINMUX_H */ pinctrl/consumer.h 0000644 00000010607 14722070374 0010235 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Consumer interface the pin control subsystem * * Copyright (C) 2012 ST-Ericsson SA * Written on behalf of Linaro for ST-Ericsson * Based on bits of regulator core, gpio core and clk core * * Author: Linus Walleij <linus.walleij@linaro.org> */ #ifndef __LINUX_PINCTRL_CONSUMER_H #define __LINUX_PINCTRL_CONSUMER_H #include <linux/err.h> #include <linux/list.h> #include <linux/seq_file.h> #include <linux/pinctrl/pinctrl-state.h> /* This struct is private to the core and should be regarded as a cookie */ struct pinctrl; struct pinctrl_state; struct device; #ifdef CONFIG_PINCTRL /* External interface to pin control */ extern bool pinctrl_gpio_can_use_line(unsigned gpio); extern int pinctrl_gpio_request(unsigned gpio); extern void pinctrl_gpio_free(unsigned gpio); extern int pinctrl_gpio_direction_input(unsigned gpio); extern int pinctrl_gpio_direction_output(unsigned gpio); extern int pinctrl_gpio_set_config(unsigned gpio, unsigned long config); extern struct pinctrl * __must_check pinctrl_get(struct device *dev); extern void pinctrl_put(struct pinctrl *p); extern struct pinctrl_state * __must_check pinctrl_lookup_state( struct pinctrl *p, const char *name); extern int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *s); extern struct pinctrl * __must_check devm_pinctrl_get(struct device *dev); extern void devm_pinctrl_put(struct pinctrl *p); #ifdef CONFIG_PM extern int pinctrl_pm_select_default_state(struct device *dev); extern int pinctrl_pm_select_sleep_state(struct device *dev); extern int pinctrl_pm_select_idle_state(struct device *dev); #else static inline int pinctrl_pm_select_default_state(struct device *dev) { return 0; } static inline int pinctrl_pm_select_sleep_state(struct device *dev) { return 0; } static inline int pinctrl_pm_select_idle_state(struct device *dev) { return 0; } #endif #else /* !CONFIG_PINCTRL */ static inline bool pinctrl_gpio_can_use_line(unsigned gpio) { return true; } static inline int pinctrl_gpio_request(unsigned gpio) { return 0; } static inline void pinctrl_gpio_free(unsigned gpio) { } static inline int pinctrl_gpio_direction_input(unsigned gpio) { return 0; } static inline int pinctrl_gpio_direction_output(unsigned gpio) { return 0; } static inline int pinctrl_gpio_set_config(unsigned gpio, unsigned long config) { return 0; } static inline struct pinctrl * __must_check pinctrl_get(struct device *dev) { return NULL; } static inline void pinctrl_put(struct pinctrl *p) { } static inline struct pinctrl_state * __must_check pinctrl_lookup_state( struct pinctrl *p, const char *name) { return NULL; } static inline int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *s) { return 0; } static inline struct pinctrl * __must_check devm_pinctrl_get(struct device *dev) { return NULL; } static inline void devm_pinctrl_put(struct pinctrl *p) { } static inline int pinctrl_pm_select_default_state(struct device *dev) { return 0; } static inline int pinctrl_pm_select_sleep_state(struct device *dev) { return 0; } static inline int pinctrl_pm_select_idle_state(struct device *dev) { return 0; } #endif /* CONFIG_PINCTRL */ static inline struct pinctrl * __must_check pinctrl_get_select( struct device *dev, const char *name) { struct pinctrl *p; struct pinctrl_state *s; int ret; p = pinctrl_get(dev); if (IS_ERR(p)) return p; s = pinctrl_lookup_state(p, name); if (IS_ERR(s)) { pinctrl_put(p); return ERR_CAST(s); } ret = pinctrl_select_state(p, s); if (ret < 0) { pinctrl_put(p); return ERR_PTR(ret); } return p; } static inline struct pinctrl * __must_check pinctrl_get_select_default( struct device *dev) { return pinctrl_get_select(dev, PINCTRL_STATE_DEFAULT); } static inline struct pinctrl * __must_check devm_pinctrl_get_select( struct device *dev, const char *name) { struct pinctrl *p; struct pinctrl_state *s; int ret; p = devm_pinctrl_get(dev); if (IS_ERR(p)) return p; s = pinctrl_lookup_state(p, name); if (IS_ERR(s)) { devm_pinctrl_put(p); return ERR_CAST(s); } ret = pinctrl_select_state(p, s); if (ret < 0) { devm_pinctrl_put(p); return ERR_PTR(ret); } return p; } static inline struct pinctrl * __must_check devm_pinctrl_get_select_default( struct device *dev) { return devm_pinctrl_get_select(dev, PINCTRL_STATE_DEFAULT); } #endif /* __LINUX_PINCTRL_CONSUMER_H */ pinctrl/pinconf-generic.h 0000644 00000022611 14722070374 0011446 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Interface the generic pinconfig portions of the pinctrl subsystem * * Copyright (C) 2011 ST-Ericsson SA * Written on behalf of Linaro for ST-Ericsson * This interface is used in the core to keep track of pins. * * Author: Linus Walleij <linus.walleij@linaro.org> */ #ifndef __LINUX_PINCTRL_PINCONF_GENERIC_H #define __LINUX_PINCTRL_PINCONF_GENERIC_H #include <linux/device.h> #include <linux/pinctrl/machine.h> struct pinctrl_dev; struct pinctrl_map; /** * enum pin_config_param - possible pin configuration parameters * @PIN_CONFIG_BIAS_BUS_HOLD: the pin will be set to weakly latch so that it * weakly drives the last value on a tristate bus, also known as a "bus * holder", "bus keeper" or "repeater". This allows another device on the * bus to change the value by driving the bus high or low and switching to * tristate. The argument is ignored. * @PIN_CONFIG_BIAS_DISABLE: disable any pin bias on the pin, a * transition from say pull-up to pull-down implies that you disable * pull-up in the process, this setting disables all biasing. * @PIN_CONFIG_BIAS_HIGH_IMPEDANCE: the pin will be set to a high impedance * mode, also know as "third-state" (tristate) or "high-Z" or "floating". * On output pins this effectively disconnects the pin, which is useful * if for example some other pin is going to drive the signal connected * to it for a while. Pins used for input are usually always high * impedance. * @PIN_CONFIG_BIAS_PULL_DOWN: the pin will be pulled down (usually with high * impedance to GROUND). If the argument is != 0 pull-down is enabled, * if it is 0, pull-down is total, i.e. the pin is connected to GROUND. * @PIN_CONFIG_BIAS_PULL_PIN_DEFAULT: the pin will be pulled up or down based * on embedded knowledge of the controller hardware, like current mux * function. The pull direction and possibly strength too will normally * be decided completely inside the hardware block and not be readable * from the kernel side. * If the argument is != 0 pull up/down is enabled, if it is 0, the * configuration is ignored. The proper way to disable it is to use * @PIN_CONFIG_BIAS_DISABLE. * @PIN_CONFIG_BIAS_PULL_UP: the pin will be pulled up (usually with high * impedance to VDD). If the argument is != 0 pull-up is enabled, * if it is 0, pull-up is total, i.e. the pin is connected to VDD. * @PIN_CONFIG_DRIVE_OPEN_DRAIN: the pin will be driven with open drain (open * collector) which means it is usually wired with other output ports * which are then pulled up with an external resistor. Setting this * config will enable open drain mode, the argument is ignored. * @PIN_CONFIG_DRIVE_OPEN_SOURCE: the pin will be driven with open source * (open emitter). Setting this config will enable open source mode, the * argument is ignored. * @PIN_CONFIG_DRIVE_PUSH_PULL: the pin will be driven actively high and * low, this is the most typical case and is typically achieved with two * active transistors on the output. Setting this config will enable * push-pull mode, the argument is ignored. * @PIN_CONFIG_DRIVE_STRENGTH: the pin will sink or source at most the current * passed as argument. The argument is in mA. * @PIN_CONFIG_DRIVE_STRENGTH_UA: the pin will sink or source at most the current * passed as argument. The argument is in uA. * @PIN_CONFIG_INPUT_DEBOUNCE: this will configure the pin to debounce mode, * which means it will wait for signals to settle when reading inputs. The * argument gives the debounce time in usecs. Setting the * argument to zero turns debouncing off. * @PIN_CONFIG_INPUT_ENABLE: enable the pin's input. Note that this does not * affect the pin's ability to drive output. 1 enables input, 0 disables * input. * @PIN_CONFIG_INPUT_SCHMITT: this will configure an input pin to run in * schmitt-trigger mode. If the schmitt-trigger has adjustable hysteresis, * the threshold value is given on a custom format as argument when * setting pins to this mode. * @PIN_CONFIG_INPUT_SCHMITT_ENABLE: control schmitt-trigger mode on the pin. * If the argument != 0, schmitt-trigger mode is enabled. If it's 0, * schmitt-trigger mode is disabled. * @PIN_CONFIG_LOW_POWER_MODE: this will configure the pin for low power * operation, if several modes of operation are supported these can be * passed in the argument on a custom form, else just use argument 1 * to indicate low power mode, argument 0 turns low power mode off. * @PIN_CONFIG_OUTPUT_ENABLE: this will enable the pin's output mode * without driving a value there. For most platforms this reduces to * enable the output buffers and then let the pin controller current * configuration (eg. the currently selected mux function) drive values on * the line. Use argument 1 to enable output mode, argument 0 to disable * it. * @PIN_CONFIG_OUTPUT: this will configure the pin as an output and drive a * value on the line. Use argument 1 to indicate high level, argument 0 to * indicate low level. (Please see Documentation/driver-api/pinctl.rst, * section "GPIO mode pitfalls" for a discussion around this parameter.) * @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power * supplies, the argument to this parameter (on a custom format) tells * the driver which alternative power source to use. * @PIN_CONFIG_SLEEP_HARDWARE_STATE: indicate this is sleep related state. * @PIN_CONFIG_SLEW_RATE: if the pin can select slew rate, the argument to * this parameter (on a custom format) tells the driver which alternative * slew rate to use. * @PIN_CONFIG_SKEW_DELAY: if the pin has programmable skew rate (on inputs) * or latch delay (on outputs) this parameter (in a custom format) * specifies the clock skew or latch delay. It typically controls how * many double inverters are put in front of the line. * @PIN_CONFIG_PERSIST_STATE: retain pin state across sleep or controller reset * @PIN_CONFIG_END: this is the last enumerator for pin configurations, if * you need to pass in custom configurations to the pin controller, use * PIN_CONFIG_END+1 as the base offset. * @PIN_CONFIG_MAX: this is the maximum configuration value that can be * presented using the packed format. */ enum pin_config_param { PIN_CONFIG_BIAS_BUS_HOLD, PIN_CONFIG_BIAS_DISABLE, PIN_CONFIG_BIAS_HIGH_IMPEDANCE, PIN_CONFIG_BIAS_PULL_DOWN, PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, PIN_CONFIG_BIAS_PULL_UP, PIN_CONFIG_DRIVE_OPEN_DRAIN, PIN_CONFIG_DRIVE_OPEN_SOURCE, PIN_CONFIG_DRIVE_PUSH_PULL, PIN_CONFIG_DRIVE_STRENGTH, PIN_CONFIG_DRIVE_STRENGTH_UA, PIN_CONFIG_INPUT_DEBOUNCE, PIN_CONFIG_INPUT_ENABLE, PIN_CONFIG_INPUT_SCHMITT, PIN_CONFIG_INPUT_SCHMITT_ENABLE, PIN_CONFIG_LOW_POWER_MODE, PIN_CONFIG_OUTPUT_ENABLE, PIN_CONFIG_OUTPUT, PIN_CONFIG_POWER_SOURCE, PIN_CONFIG_SLEEP_HARDWARE_STATE, PIN_CONFIG_SLEW_RATE, PIN_CONFIG_SKEW_DELAY, PIN_CONFIG_PERSIST_STATE, PIN_CONFIG_END = 0x7F, PIN_CONFIG_MAX = 0xFF, }; /* * Helpful configuration macro to be used in tables etc. */ #define PIN_CONF_PACKED(p, a) ((a << 8) | ((unsigned long) p & 0xffUL)) /* * The following inlines stuffs a configuration parameter and data value * into and out of an unsigned long argument, as used by the generic pin config * system. We put the parameter in the lower 8 bits and the argument in the * upper 24 bits. */ static inline enum pin_config_param pinconf_to_config_param(unsigned long config) { return (enum pin_config_param) (config & 0xffUL); } static inline u32 pinconf_to_config_argument(unsigned long config) { return (u32) ((config >> 8) & 0xffffffUL); } static inline unsigned long pinconf_to_config_packed(enum pin_config_param param, u32 argument) { return PIN_CONF_PACKED(param, argument); } #define PCONFDUMP(a, b, c, d) { \ .param = a, .display = b, .format = c, .has_arg = d \ } struct pin_config_item { const enum pin_config_param param; const char * const display; const char * const format; bool has_arg; }; struct pinconf_generic_params { const char * const property; enum pin_config_param param; u32 default_value; }; int pinconf_generic_dt_subnode_to_map(struct pinctrl_dev *pctldev, struct device_node *np, struct pinctrl_map **map, unsigned *reserved_maps, unsigned *num_maps, enum pinctrl_map_type type); int pinconf_generic_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node *np_config, struct pinctrl_map **map, unsigned *num_maps, enum pinctrl_map_type type); void pinconf_generic_dt_free_map(struct pinctrl_dev *pctldev, struct pinctrl_map *map, unsigned num_maps); static inline int pinconf_generic_dt_node_to_map_group( struct pinctrl_dev *pctldev, struct device_node *np_config, struct pinctrl_map **map, unsigned *num_maps) { return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps, PIN_MAP_TYPE_CONFIGS_GROUP); } static inline int pinconf_generic_dt_node_to_map_pin( struct pinctrl_dev *pctldev, struct device_node *np_config, struct pinctrl_map **map, unsigned *num_maps) { return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps, PIN_MAP_TYPE_CONFIGS_PIN); } static inline int pinconf_generic_dt_node_to_map_all( struct pinctrl_dev *pctldev, struct device_node *np_config, struct pinctrl_map **map, unsigned *num_maps) { /* * passing the type as PIN_MAP_TYPE_INVALID causes the underlying parser * to infer the map type from the DT properties used. */ return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps, PIN_MAP_TYPE_INVALID); } #endif /* __LINUX_PINCTRL_PINCONF_GENERIC_H */ pinctrl/devinfo.h 0000644 00000002675 14722070374 0010042 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Per-device information from the pin control system. * This is the stuff that get included into the device * core. * * Copyright (C) 2012 ST-Ericsson SA * Written on behalf of Linaro for ST-Ericsson * This interface is used in the core to keep track of pins. * * Author: Linus Walleij <linus.walleij@linaro.org> */ #ifndef PINCTRL_DEVINFO_H #define PINCTRL_DEVINFO_H #ifdef CONFIG_PINCTRL /* The device core acts as a consumer toward pinctrl */ #include <linux/pinctrl/consumer.h> /** * struct dev_pin_info - pin state container for devices * @p: pinctrl handle for the containing device * @default_state: the default state for the handle, if found * @init_state: the state at probe time, if found * @sleep_state: the state at suspend time, if found * @idle_state: the state at idle (runtime suspend) time, if found */ struct dev_pin_info { struct pinctrl *p; struct pinctrl_state *default_state; struct pinctrl_state *init_state; #ifdef CONFIG_PM struct pinctrl_state *sleep_state; struct pinctrl_state *idle_state; #endif }; extern int pinctrl_bind_pins(struct device *dev); extern int pinctrl_init_done(struct device *dev); #else struct device; /* Stubs if we're not using pinctrl */ static inline int pinctrl_bind_pins(struct device *dev) { return 0; } static inline int pinctrl_init_done(struct device *dev) { return 0; } #endif /* CONFIG_PINCTRL */ #endif /* PINCTRL_DEVINFO_H */ pinctrl/pinconf.h 0000644 00000004760 14722070374 0010041 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Interface the pinconfig portions of the pinctrl subsystem * * Copyright (C) 2011 ST-Ericsson SA * Written on behalf of Linaro for ST-Ericsson * This interface is used in the core to keep track of pins. * * Author: Linus Walleij <linus.walleij@linaro.org> */ #ifndef __LINUX_PINCTRL_PINCONF_H #define __LINUX_PINCTRL_PINCONF_H #include <linux/types.h> struct pinctrl_dev; struct seq_file; /** * struct pinconf_ops - pin config operations, to be implemented by * pin configuration capable drivers. * @is_generic: for pin controllers that want to use the generic interface, * this flag tells the framework that it's generic. * @pin_config_get: get the config of a certain pin, if the requested config * is not available on this controller this should return -ENOTSUPP * and if it is available but disabled it should return -EINVAL * @pin_config_set: configure an individual pin * @pin_config_group_get: get configurations for an entire pin group; should * return -ENOTSUPP and -EINVAL using the same rules as pin_config_get. * @pin_config_group_set: configure all pins in a group * @pin_config_dbg_show: optional debugfs display hook that will provide * per-device info for a certain pin in debugfs * @pin_config_group_dbg_show: optional debugfs display hook that will provide * per-device info for a certain group in debugfs * @pin_config_config_dbg_show: optional debugfs display hook that will decode * and display a driver's pin configuration parameter */ struct pinconf_ops { #ifdef CONFIG_GENERIC_PINCONF bool is_generic; #endif int (*pin_config_get) (struct pinctrl_dev *pctldev, unsigned pin, unsigned long *config); int (*pin_config_set) (struct pinctrl_dev *pctldev, unsigned pin, unsigned long *configs, unsigned num_configs); int (*pin_config_group_get) (struct pinctrl_dev *pctldev, unsigned selector, unsigned long *config); int (*pin_config_group_set) (struct pinctrl_dev *pctldev, unsigned selector, unsigned long *configs, unsigned num_configs); void (*pin_config_dbg_show) (struct pinctrl_dev *pctldev, struct seq_file *s, unsigned offset); void (*pin_config_group_dbg_show) (struct pinctrl_dev *pctldev, struct seq_file *s, unsigned selector); void (*pin_config_config_dbg_show) (struct pinctrl_dev *pctldev, struct seq_file *s, unsigned long config); }; #endif /* __LINUX_PINCTRL_PINCONF_H */ inotify.h 0000644 00000001311 14722070374 0006400 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Inode based directory notification for Linux * * Copyright (C) 2005 John McCutchan */ #ifndef _LINUX_INOTIFY_H #define _LINUX_INOTIFY_H #include <linux/sysctl.h> #include <uapi/linux/inotify.h> extern struct ctl_table inotify_table[]; /* for sysctl */ #define ALL_INOTIFY_BITS (IN_ACCESS | IN_MODIFY | IN_ATTRIB | IN_CLOSE_WRITE | \ IN_CLOSE_NOWRITE | IN_OPEN | IN_MOVED_FROM | \ IN_MOVED_TO | IN_CREATE | IN_DELETE | \ IN_DELETE_SELF | IN_MOVE_SELF | IN_UNMOUNT | \ IN_Q_OVERFLOW | IN_IGNORED | IN_ONLYDIR | \ IN_DONT_FOLLOW | IN_EXCL_UNLINK | IN_MASK_ADD | \ IN_MASK_CREATE | IN_ISDIR | IN_ONESHOT) #endif /* _LINUX_INOTIFY_H */ irqbypass.h 0000644 00000006717 14722070374 0006753 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * IRQ offload/bypass manager * * Copyright (C) 2015 Red Hat, Inc. * Copyright (c) 2015 Linaro Ltd. */ #ifndef IRQBYPASS_H #define IRQBYPASS_H #include <linux/list.h> struct irq_bypass_consumer; /* * Theory of operation * * The IRQ bypass manager is a simple set of lists and callbacks that allows * IRQ producers (ex. physical interrupt sources) to be matched to IRQ * consumers (ex. virtualization hardware that allows IRQ bypass or offload) * via a shared token (ex. eventfd_ctx). Producers and consumers register * independently. When a token match is found, the optional @stop callback * will be called for each participant. The pair will then be connected via * the @add_* callbacks, and finally the optional @start callback will allow * any final coordination. When either participant is unregistered, the * process is repeated using the @del_* callbacks in place of the @add_* * callbacks. Match tokens must be unique per producer/consumer, 1:N pairings * are not supported. */ /** * struct irq_bypass_producer - IRQ bypass producer definition * @node: IRQ bypass manager private list management * @token: opaque token to match between producer and consumer (non-NULL) * @irq: Linux IRQ number for the producer device * @add_consumer: Connect the IRQ producer to an IRQ consumer (optional) * @del_consumer: Disconnect the IRQ producer from an IRQ consumer (optional) * @stop: Perform any quiesce operations necessary prior to add/del (optional) * @start: Perform any startup operations necessary after add/del (optional) * * The IRQ bypass producer structure represents an interrupt source for * participation in possible host bypass, for instance an interrupt vector * for a physical device assigned to a VM. */ struct irq_bypass_producer { struct list_head node; void *token; int irq; int (*add_consumer)(struct irq_bypass_producer *, struct irq_bypass_consumer *); void (*del_consumer)(struct irq_bypass_producer *, struct irq_bypass_consumer *); void (*stop)(struct irq_bypass_producer *); void (*start)(struct irq_bypass_producer *); }; /** * struct irq_bypass_consumer - IRQ bypass consumer definition * @node: IRQ bypass manager private list management * @token: opaque token to match between producer and consumer (non-NULL) * @add_producer: Connect the IRQ consumer to an IRQ producer * @del_producer: Disconnect the IRQ consumer from an IRQ producer * @stop: Perform any quiesce operations necessary prior to add/del (optional) * @start: Perform any startup operations necessary after add/del (optional) * * The IRQ bypass consumer structure represents an interrupt sink for * participation in possible host bypass, for instance a hypervisor may * support offloads to allow bypassing the host entirely or offload * portions of the interrupt handling to the VM. */ struct irq_bypass_consumer { struct list_head node; void *token; int (*add_producer)(struct irq_bypass_consumer *, struct irq_bypass_producer *); void (*del_producer)(struct irq_bypass_consumer *, struct irq_bypass_producer *); void (*stop)(struct irq_bypass_consumer *); void (*start)(struct irq_bypass_consumer *); }; int irq_bypass_register_producer(struct irq_bypass_producer *); void irq_bypass_unregister_producer(struct irq_bypass_producer *); int irq_bypass_register_consumer(struct irq_bypass_consumer *); void irq_bypass_unregister_consumer(struct irq_bypass_consumer *); #endif /* IRQBYPASS_H */ hypervisor.h 0000644 00000001140 14722070374 0007131 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_HYPEVISOR_H #define __LINUX_HYPEVISOR_H /* * Generic Hypervisor support * Juergen Gross <jgross@suse.com> */ #ifdef CONFIG_X86 #include <asm/jailhouse_para.h> #include <asm/x86_init.h> static inline void hypervisor_pin_vcpu(int cpu) { x86_platform.hyper.pin_vcpu(cpu); } #else /* !CONFIG_X86 */ #include <linux/of.h> static inline void hypervisor_pin_vcpu(int cpu) { } static inline bool jailhouse_paravirt(void) { return of_find_compatible_node(NULL, NULL, "jailhouse,cell"); } #endif /* !CONFIG_X86 */ #endif /* __LINUX_HYPEVISOR_H */ drbd_limits.h 0000644 00000017503 14722070374 0007225 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* drbd_limits.h This file is part of DRBD by Philipp Reisner and Lars Ellenberg. */ /* * Our current limitations. * Some of them are hard limits, * some of them are arbitrary range limits, that make it easier to provide * feedback about nonsense settings for certain configurable values. */ #ifndef DRBD_LIMITS_H #define DRBD_LIMITS_H 1 #define DEBUG_RANGE_CHECK 0 #define DRBD_MINOR_COUNT_MIN 1 #define DRBD_MINOR_COUNT_MAX 255 #define DRBD_MINOR_COUNT_DEF 32 #define DRBD_MINOR_COUNT_SCALE '1' #define DRBD_VOLUME_MAX 65535 #define DRBD_DIALOG_REFRESH_MIN 0 #define DRBD_DIALOG_REFRESH_MAX 600 #define DRBD_DIALOG_REFRESH_SCALE '1' /* valid port number */ #define DRBD_PORT_MIN 1 #define DRBD_PORT_MAX 0xffff #define DRBD_PORT_SCALE '1' /* startup { */ /* if you want more than 3.4 days, disable */ #define DRBD_WFC_TIMEOUT_MIN 0 #define DRBD_WFC_TIMEOUT_MAX 300000 #define DRBD_WFC_TIMEOUT_DEF 0 #define DRBD_WFC_TIMEOUT_SCALE '1' #define DRBD_DEGR_WFC_TIMEOUT_MIN 0 #define DRBD_DEGR_WFC_TIMEOUT_MAX 300000 #define DRBD_DEGR_WFC_TIMEOUT_DEF 0 #define DRBD_DEGR_WFC_TIMEOUT_SCALE '1' #define DRBD_OUTDATED_WFC_TIMEOUT_MIN 0 #define DRBD_OUTDATED_WFC_TIMEOUT_MAX 300000 #define DRBD_OUTDATED_WFC_TIMEOUT_DEF 0 #define DRBD_OUTDATED_WFC_TIMEOUT_SCALE '1' /* }*/ /* net { */ /* timeout, unit centi seconds * more than one minute timeout is not useful */ #define DRBD_TIMEOUT_MIN 1 #define DRBD_TIMEOUT_MAX 600 #define DRBD_TIMEOUT_DEF 60 /* 6 seconds */ #define DRBD_TIMEOUT_SCALE '1' /* If backing disk takes longer than disk_timeout, mark the disk as failed */ #define DRBD_DISK_TIMEOUT_MIN 0 /* 0 = disabled */ #define DRBD_DISK_TIMEOUT_MAX 6000 /* 10 Minutes */ #define DRBD_DISK_TIMEOUT_DEF 0 /* disabled */ #define DRBD_DISK_TIMEOUT_SCALE '1' /* active connection retries when C_WF_CONNECTION */ #define DRBD_CONNECT_INT_MIN 1 #define DRBD_CONNECT_INT_MAX 120 #define DRBD_CONNECT_INT_DEF 10 /* seconds */ #define DRBD_CONNECT_INT_SCALE '1' /* keep-alive probes when idle */ #define DRBD_PING_INT_MIN 1 #define DRBD_PING_INT_MAX 120 #define DRBD_PING_INT_DEF 10 #define DRBD_PING_INT_SCALE '1' /* timeout for the ping packets.*/ #define DRBD_PING_TIMEO_MIN 1 #define DRBD_PING_TIMEO_MAX 300 #define DRBD_PING_TIMEO_DEF 5 #define DRBD_PING_TIMEO_SCALE '1' /* max number of write requests between write barriers */ #define DRBD_MAX_EPOCH_SIZE_MIN 1 #define DRBD_MAX_EPOCH_SIZE_MAX 20000 #define DRBD_MAX_EPOCH_SIZE_DEF 2048 #define DRBD_MAX_EPOCH_SIZE_SCALE '1' /* I don't think that a tcp send buffer of more than 10M is useful */ #define DRBD_SNDBUF_SIZE_MIN 0 #define DRBD_SNDBUF_SIZE_MAX (10<<20) #define DRBD_SNDBUF_SIZE_DEF 0 #define DRBD_SNDBUF_SIZE_SCALE '1' #define DRBD_RCVBUF_SIZE_MIN 0 #define DRBD_RCVBUF_SIZE_MAX (10<<20) #define DRBD_RCVBUF_SIZE_DEF 0 #define DRBD_RCVBUF_SIZE_SCALE '1' /* @4k PageSize -> 128kB - 512MB */ #define DRBD_MAX_BUFFERS_MIN 32 #define DRBD_MAX_BUFFERS_MAX 131072 #define DRBD_MAX_BUFFERS_DEF 2048 #define DRBD_MAX_BUFFERS_SCALE '1' /* @4k PageSize -> 4kB - 512MB */ #define DRBD_UNPLUG_WATERMARK_MIN 1 #define DRBD_UNPLUG_WATERMARK_MAX 131072 #define DRBD_UNPLUG_WATERMARK_DEF (DRBD_MAX_BUFFERS_DEF/16) #define DRBD_UNPLUG_WATERMARK_SCALE '1' /* 0 is disabled. * 200 should be more than enough even for very short timeouts */ #define DRBD_KO_COUNT_MIN 0 #define DRBD_KO_COUNT_MAX 200 #define DRBD_KO_COUNT_DEF 7 #define DRBD_KO_COUNT_SCALE '1' /* } */ /* syncer { */ /* FIXME allow rate to be zero? */ #define DRBD_RESYNC_RATE_MIN 1 /* channel bonding 10 GbE, or other hardware */ #define DRBD_RESYNC_RATE_MAX (4 << 20) #define DRBD_RESYNC_RATE_DEF 250 #define DRBD_RESYNC_RATE_SCALE 'k' /* kilobytes */ #define DRBD_AL_EXTENTS_MIN 67 /* we use u16 as "slot number", (u16)~0 is "FREE". * If you use >= 292 kB on-disk ring buffer, * this is the maximum you can use: */ #define DRBD_AL_EXTENTS_MAX 0xfffe #define DRBD_AL_EXTENTS_DEF 1237 #define DRBD_AL_EXTENTS_SCALE '1' #define DRBD_MINOR_NUMBER_MIN -1 #define DRBD_MINOR_NUMBER_MAX ((1 << 20) - 1) #define DRBD_MINOR_NUMBER_DEF -1 #define DRBD_MINOR_NUMBER_SCALE '1' /* } */ /* drbdsetup XY resize -d Z * you are free to reduce the device size to nothing, if you want to. * the upper limit with 64bit kernel, enough ram and flexible meta data * is 1 PiB, currently. */ /* DRBD_MAX_SECTORS */ #define DRBD_DISK_SIZE_MIN 0 #define DRBD_DISK_SIZE_MAX (1 * (2LLU << 40)) #define DRBD_DISK_SIZE_DEF 0 /* = disabled = no user size... */ #define DRBD_DISK_SIZE_SCALE 's' /* sectors */ #define DRBD_ON_IO_ERROR_DEF EP_DETACH #define DRBD_FENCING_DEF FP_DONT_CARE #define DRBD_AFTER_SB_0P_DEF ASB_DISCONNECT #define DRBD_AFTER_SB_1P_DEF ASB_DISCONNECT #define DRBD_AFTER_SB_2P_DEF ASB_DISCONNECT #define DRBD_RR_CONFLICT_DEF ASB_DISCONNECT #define DRBD_ON_NO_DATA_DEF OND_IO_ERROR #define DRBD_ON_CONGESTION_DEF OC_BLOCK #define DRBD_READ_BALANCING_DEF RB_PREFER_LOCAL #define DRBD_MAX_BIO_BVECS_MIN 0 #define DRBD_MAX_BIO_BVECS_MAX 128 #define DRBD_MAX_BIO_BVECS_DEF 0 #define DRBD_MAX_BIO_BVECS_SCALE '1' #define DRBD_C_PLAN_AHEAD_MIN 0 #define DRBD_C_PLAN_AHEAD_MAX 300 #define DRBD_C_PLAN_AHEAD_DEF 20 #define DRBD_C_PLAN_AHEAD_SCALE '1' #define DRBD_C_DELAY_TARGET_MIN 1 #define DRBD_C_DELAY_TARGET_MAX 100 #define DRBD_C_DELAY_TARGET_DEF 10 #define DRBD_C_DELAY_TARGET_SCALE '1' #define DRBD_C_FILL_TARGET_MIN 0 #define DRBD_C_FILL_TARGET_MAX (1<<20) /* 500MByte in sec */ #define DRBD_C_FILL_TARGET_DEF 100 /* Try to place 50KiB in socket send buffer during resync */ #define DRBD_C_FILL_TARGET_SCALE 's' /* sectors */ #define DRBD_C_MAX_RATE_MIN 250 #define DRBD_C_MAX_RATE_MAX (4 << 20) #define DRBD_C_MAX_RATE_DEF 102400 #define DRBD_C_MAX_RATE_SCALE 'k' /* kilobytes */ #define DRBD_C_MIN_RATE_MIN 0 #define DRBD_C_MIN_RATE_MAX (4 << 20) #define DRBD_C_MIN_RATE_DEF 250 #define DRBD_C_MIN_RATE_SCALE 'k' /* kilobytes */ #define DRBD_CONG_FILL_MIN 0 #define DRBD_CONG_FILL_MAX (10<<21) /* 10GByte in sectors */ #define DRBD_CONG_FILL_DEF 0 #define DRBD_CONG_FILL_SCALE 's' /* sectors */ #define DRBD_CONG_EXTENTS_MIN DRBD_AL_EXTENTS_MIN #define DRBD_CONG_EXTENTS_MAX DRBD_AL_EXTENTS_MAX #define DRBD_CONG_EXTENTS_DEF DRBD_AL_EXTENTS_DEF #define DRBD_CONG_EXTENTS_SCALE DRBD_AL_EXTENTS_SCALE #define DRBD_PROTOCOL_DEF DRBD_PROT_C #define DRBD_DISK_BARRIER_DEF 0 #define DRBD_DISK_FLUSHES_DEF 1 #define DRBD_DISK_DRAIN_DEF 1 #define DRBD_MD_FLUSHES_DEF 1 #define DRBD_TCP_CORK_DEF 1 #define DRBD_AL_UPDATES_DEF 1 /* We used to ignore the discard_zeroes_data setting. * To not change established (and expected) behaviour, * by default assume that, for discard_zeroes_data=0, * we can make that an effective discard_zeroes_data=1, * if we only explicitly zero-out unaligned partial chunks. */ #define DRBD_DISCARD_ZEROES_IF_ALIGNED_DEF 1 /* Some backends pretend to support WRITE SAME, * but fail such requests when they are actually submitted. * This is to tell DRBD to not even try. */ #define DRBD_DISABLE_WRITE_SAME_DEF 0 #define DRBD_ALLOW_TWO_PRIMARIES_DEF 0 #define DRBD_ALWAYS_ASBP_DEF 0 #define DRBD_USE_RLE_DEF 1 #define DRBD_CSUMS_AFTER_CRASH_ONLY_DEF 0 #define DRBD_AL_STRIPES_MIN 1 #define DRBD_AL_STRIPES_MAX 1024 #define DRBD_AL_STRIPES_DEF 1 #define DRBD_AL_STRIPES_SCALE '1' #define DRBD_AL_STRIPE_SIZE_MIN 4 #define DRBD_AL_STRIPE_SIZE_MAX 16777216 #define DRBD_AL_STRIPE_SIZE_DEF 32 #define DRBD_AL_STRIPE_SIZE_SCALE 'k' /* kilobytes */ #define DRBD_SOCKET_CHECK_TIMEO_MIN 0 #define DRBD_SOCKET_CHECK_TIMEO_MAX DRBD_PING_TIMEO_MAX #define DRBD_SOCKET_CHECK_TIMEO_DEF 0 #define DRBD_SOCKET_CHECK_TIMEO_SCALE '1' #define DRBD_RS_DISCARD_GRANULARITY_MIN 0 #define DRBD_RS_DISCARD_GRANULARITY_MAX (1<<20) /* 1MiByte */ #define DRBD_RS_DISCARD_GRANULARITY_DEF 0 /* disabled by default */ #define DRBD_RS_DISCARD_GRANULARITY_SCALE '1' /* bytes */ #endif jiffies.h 0000644 00000037252 14722070374 0006353 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_JIFFIES_H #define _LINUX_JIFFIES_H #include <linux/cache.h> #include <linux/math64.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/time.h> #include <linux/timex.h> #include <asm/param.h> /* for HZ */ #include <generated/timeconst.h> /* * The following defines establish the engineering parameters of the PLL * model. The HZ variable establishes the timer interrupt frequency, 100 Hz * for the SunOS kernel, 256 Hz for the Ultrix kernel and 1024 Hz for the * OSF/1 kernel. The SHIFT_HZ define expresses the same value as the * nearest power of two in order to avoid hardware multiply operations. */ #if HZ >= 12 && HZ < 24 # define SHIFT_HZ 4 #elif HZ >= 24 && HZ < 48 # define SHIFT_HZ 5 #elif HZ >= 48 && HZ < 96 # define SHIFT_HZ 6 #elif HZ >= 96 && HZ < 192 # define SHIFT_HZ 7 #elif HZ >= 192 && HZ < 384 # define SHIFT_HZ 8 #elif HZ >= 384 && HZ < 768 # define SHIFT_HZ 9 #elif HZ >= 768 && HZ < 1536 # define SHIFT_HZ 10 #elif HZ >= 1536 && HZ < 3072 # define SHIFT_HZ 11 #elif HZ >= 3072 && HZ < 6144 # define SHIFT_HZ 12 #elif HZ >= 6144 && HZ < 12288 # define SHIFT_HZ 13 #else # error Invalid value of HZ. #endif /* Suppose we want to divide two numbers NOM and DEN: NOM/DEN, then we can * improve accuracy by shifting LSH bits, hence calculating: * (NOM << LSH) / DEN * This however means trouble for large NOM, because (NOM << LSH) may no * longer fit in 32 bits. The following way of calculating this gives us * some slack, under the following conditions: * - (NOM / DEN) fits in (32 - LSH) bits. * - (NOM % DEN) fits in (32 - LSH) bits. */ #define SH_DIV(NOM,DEN,LSH) ( (((NOM) / (DEN)) << (LSH)) \ + ((((NOM) % (DEN)) << (LSH)) + (DEN) / 2) / (DEN)) /* LATCH is used in the interval timer and ftape setup. */ #define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */ extern int register_refined_jiffies(long clock_tick_rate); /* TICK_NSEC is the time between ticks in nsec assuming SHIFTED_HZ */ #define TICK_NSEC ((NSEC_PER_SEC+HZ/2)/HZ) /* TICK_USEC is the time between ticks in usec assuming SHIFTED_HZ */ #define TICK_USEC ((USEC_PER_SEC + HZ/2) / HZ) /* USER_TICK_USEC is the time between ticks in usec assuming fake USER_HZ */ #define USER_TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ) #ifndef __jiffy_arch_data #define __jiffy_arch_data #endif /* * The 64-bit value is not atomic - you MUST NOT read it * without sampling the sequence number in jiffies_lock. * get_jiffies_64() will do this for you as appropriate. */ extern u64 __cacheline_aligned_in_smp jiffies_64; extern unsigned long volatile __cacheline_aligned_in_smp __jiffy_arch_data jiffies; #if (BITS_PER_LONG < 64) u64 get_jiffies_64(void); #else static inline u64 get_jiffies_64(void) { return (u64)jiffies; } #endif /* * These inlines deal with timer wrapping correctly. You are * strongly encouraged to use them * 1. Because people otherwise forget * 2. Because if the timer wrap changes in future you won't have to * alter your driver code. * * time_after(a,b) returns true if the time a is after time b. * * Do this with "<0" and ">=0" to only test the sign of the result. A * good compiler would generate better code (and a really good compiler * wouldn't care). Gcc is currently neither. */ #define time_after(a,b) \ (typecheck(unsigned long, a) && \ typecheck(unsigned long, b) && \ ((long)((b) - (a)) < 0)) #define time_before(a,b) time_after(b,a) #define time_after_eq(a,b) \ (typecheck(unsigned long, a) && \ typecheck(unsigned long, b) && \ ((long)((a) - (b)) >= 0)) #define time_before_eq(a,b) time_after_eq(b,a) /* * Calculate whether a is in the range of [b, c]. */ #define time_in_range(a,b,c) \ (time_after_eq(a,b) && \ time_before_eq(a,c)) /* * Calculate whether a is in the range of [b, c). */ #define time_in_range_open(a,b,c) \ (time_after_eq(a,b) && \ time_before(a,c)) /* Same as above, but does so with platform independent 64bit types. * These must be used when utilizing jiffies_64 (i.e. return value of * get_jiffies_64() */ #define time_after64(a,b) \ (typecheck(__u64, a) && \ typecheck(__u64, b) && \ ((__s64)((b) - (a)) < 0)) #define time_before64(a,b) time_after64(b,a) #define time_after_eq64(a,b) \ (typecheck(__u64, a) && \ typecheck(__u64, b) && \ ((__s64)((a) - (b)) >= 0)) #define time_before_eq64(a,b) time_after_eq64(b,a) #define time_in_range64(a, b, c) \ (time_after_eq64(a, b) && \ time_before_eq64(a, c)) /* * These four macros compare jiffies and 'a' for convenience. */ /* time_is_before_jiffies(a) return true if a is before jiffies */ #define time_is_before_jiffies(a) time_after(jiffies, a) #define time_is_before_jiffies64(a) time_after64(get_jiffies_64(), a) /* time_is_after_jiffies(a) return true if a is after jiffies */ #define time_is_after_jiffies(a) time_before(jiffies, a) #define time_is_after_jiffies64(a) time_before64(get_jiffies_64(), a) /* time_is_before_eq_jiffies(a) return true if a is before or equal to jiffies*/ #define time_is_before_eq_jiffies(a) time_after_eq(jiffies, a) #define time_is_before_eq_jiffies64(a) time_after_eq64(get_jiffies_64(), a) /* time_is_after_eq_jiffies(a) return true if a is after or equal to jiffies*/ #define time_is_after_eq_jiffies(a) time_before_eq(jiffies, a) #define time_is_after_eq_jiffies64(a) time_before_eq64(get_jiffies_64(), a) /* * Have the 32 bit jiffies value wrap 5 minutes after boot * so jiffies wrap bugs show up earlier. */ #define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ)) /* * Change timeval to jiffies, trying to avoid the * most obvious overflows.. * * And some not so obvious. * * Note that we don't want to return LONG_MAX, because * for various timeout reasons we often end up having * to wait "jiffies+1" in order to guarantee that we wait * at _least_ "jiffies" - so "jiffies+1" had better still * be positive. */ #define MAX_JIFFY_OFFSET ((LONG_MAX >> 1)-1) extern unsigned long preset_lpj; /* * We want to do realistic conversions of time so we need to use the same * values the update wall clock code uses as the jiffies size. This value * is: TICK_NSEC (which is defined in timex.h). This * is a constant and is in nanoseconds. We will use scaled math * with a set of scales defined here as SEC_JIFFIE_SC, USEC_JIFFIE_SC and * NSEC_JIFFIE_SC. Note that these defines contain nothing but * constants and so are computed at compile time. SHIFT_HZ (computed in * timex.h) adjusts the scaling for different HZ values. * Scaled math??? What is that? * * Scaled math is a way to do integer math on values that would, * otherwise, either overflow, underflow, or cause undesired div * instructions to appear in the execution path. In short, we "scale" * up the operands so they take more bits (more precision, less * underflow), do the desired operation and then "scale" the result back * by the same amount. If we do the scaling by shifting we avoid the * costly mpy and the dastardly div instructions. * Suppose, for example, we want to convert from seconds to jiffies * where jiffies is defined in nanoseconds as NSEC_PER_JIFFIE. The * simple math is: jiff = (sec * NSEC_PER_SEC) / NSEC_PER_JIFFIE; We * observe that (NSEC_PER_SEC / NSEC_PER_JIFFIE) is a constant which we * might calculate at compile time, however, the result will only have * about 3-4 bits of precision (less for smaller values of HZ). * * So, we scale as follows: * jiff = (sec) * (NSEC_PER_SEC / NSEC_PER_JIFFIE); * jiff = ((sec) * ((NSEC_PER_SEC * SCALE)/ NSEC_PER_JIFFIE)) / SCALE; * Then we make SCALE a power of two so: * jiff = ((sec) * ((NSEC_PER_SEC << SCALE)/ NSEC_PER_JIFFIE)) >> SCALE; * Now we define: * #define SEC_CONV = ((NSEC_PER_SEC << SCALE)/ NSEC_PER_JIFFIE)) * jiff = (sec * SEC_CONV) >> SCALE; * * Often the math we use will expand beyond 32-bits so we tell C how to * do this and pass the 64-bit result of the mpy through the ">> SCALE" * which should take the result back to 32-bits. We want this expansion * to capture as much precision as possible. At the same time we don't * want to overflow so we pick the SCALE to avoid this. In this file, * that means using a different scale for each range of HZ values (as * defined in timex.h). * * For those who want to know, gcc will give a 64-bit result from a "*" * operator if the result is a long long AND at least one of the * operands is cast to long long (usually just prior to the "*" so as * not to confuse it into thinking it really has a 64-bit operand, * which, buy the way, it can do, but it takes more code and at least 2 * mpys). * We also need to be aware that one second in nanoseconds is only a * couple of bits away from overflowing a 32-bit word, so we MUST use * 64-bits to get the full range time in nanoseconds. */ /* * Here are the scales we will use. One for seconds, nanoseconds and * microseconds. * * Within the limits of cpp we do a rough cut at the SEC_JIFFIE_SC and * check if the sign bit is set. If not, we bump the shift count by 1. * (Gets an extra bit of precision where we can use it.) * We know it is set for HZ = 1024 and HZ = 100 not for 1000. * Haven't tested others. * Limits of cpp (for #if expressions) only long (no long long), but * then we only need the most signicant bit. */ #define SEC_JIFFIE_SC (31 - SHIFT_HZ) #if !((((NSEC_PER_SEC << 2) / TICK_NSEC) << (SEC_JIFFIE_SC - 2)) & 0x80000000) #undef SEC_JIFFIE_SC #define SEC_JIFFIE_SC (32 - SHIFT_HZ) #endif #define NSEC_JIFFIE_SC (SEC_JIFFIE_SC + 29) #define SEC_CONVERSION ((unsigned long)((((u64)NSEC_PER_SEC << SEC_JIFFIE_SC) +\ TICK_NSEC -1) / (u64)TICK_NSEC)) #define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\ TICK_NSEC -1) / (u64)TICK_NSEC)) /* * The maximum jiffie value is (MAX_INT >> 1). Here we translate that * into seconds. The 64-bit case will overflow if we are not careful, * so use the messy SH_DIV macro to do it. Still all constants. */ #if BITS_PER_LONG < 64 # define MAX_SEC_IN_JIFFIES \ (long)((u64)((u64)MAX_JIFFY_OFFSET * TICK_NSEC) / NSEC_PER_SEC) #else /* take care of overflow on 64 bits machines */ # define MAX_SEC_IN_JIFFIES \ (SH_DIV((MAX_JIFFY_OFFSET >> SEC_JIFFIE_SC) * TICK_NSEC, NSEC_PER_SEC, 1) - 1) #endif /* * Convert various time units to each other: */ extern unsigned int jiffies_to_msecs(const unsigned long j); extern unsigned int jiffies_to_usecs(const unsigned long j); static inline u64 jiffies_to_nsecs(const unsigned long j) { return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC; } extern u64 jiffies64_to_nsecs(u64 j); extern u64 jiffies64_to_msecs(u64 j); extern unsigned long __msecs_to_jiffies(const unsigned int m); #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) /* * HZ is equal to or smaller than 1000, and 1000 is a nice round * multiple of HZ, divide with the factor between them, but round * upwards: */ static inline unsigned long _msecs_to_jiffies(const unsigned int m) { return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ); } #elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) /* * HZ is larger than 1000, and HZ is a nice round multiple of 1000 - * simply multiply with the factor between them. * * But first make sure the multiplication result cannot overflow: */ static inline unsigned long _msecs_to_jiffies(const unsigned int m) { if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET)) return MAX_JIFFY_OFFSET; return m * (HZ / MSEC_PER_SEC); } #else /* * Generic case - multiply, round and divide. But first check that if * we are doing a net multiplication, that we wouldn't overflow: */ static inline unsigned long _msecs_to_jiffies(const unsigned int m) { if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET)) return MAX_JIFFY_OFFSET; return (MSEC_TO_HZ_MUL32 * m + MSEC_TO_HZ_ADJ32) >> MSEC_TO_HZ_SHR32; } #endif /** * msecs_to_jiffies: - convert milliseconds to jiffies * @m: time in milliseconds * * conversion is done as follows: * * - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET) * * - 'too large' values [that would result in larger than * MAX_JIFFY_OFFSET values] mean 'infinite timeout' too. * * - all other values are converted to jiffies by either multiplying * the input value by a factor or dividing it with a factor and * handling any 32-bit overflows. * for the details see __msecs_to_jiffies() * * msecs_to_jiffies() checks for the passed in value being a constant * via __builtin_constant_p() allowing gcc to eliminate most of the * code, __msecs_to_jiffies() is called if the value passed does not * allow constant folding and the actual conversion must be done at * runtime. * the HZ range specific helpers _msecs_to_jiffies() are called both * directly here and from __msecs_to_jiffies() in the case where * constant folding is not possible. */ static __always_inline unsigned long msecs_to_jiffies(const unsigned int m) { if (__builtin_constant_p(m)) { if ((int)m < 0) return MAX_JIFFY_OFFSET; return _msecs_to_jiffies(m); } else { return __msecs_to_jiffies(m); } } extern unsigned long __usecs_to_jiffies(const unsigned int u); #if !(USEC_PER_SEC % HZ) static inline unsigned long _usecs_to_jiffies(const unsigned int u) { return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ); } #else static inline unsigned long _usecs_to_jiffies(const unsigned int u) { return (USEC_TO_HZ_MUL32 * u + USEC_TO_HZ_ADJ32) >> USEC_TO_HZ_SHR32; } #endif /** * usecs_to_jiffies: - convert microseconds to jiffies * @u: time in microseconds * * conversion is done as follows: * * - 'too large' values [that would result in larger than * MAX_JIFFY_OFFSET values] mean 'infinite timeout' too. * * - all other values are converted to jiffies by either multiplying * the input value by a factor or dividing it with a factor and * handling any 32-bit overflows as for msecs_to_jiffies. * * usecs_to_jiffies() checks for the passed in value being a constant * via __builtin_constant_p() allowing gcc to eliminate most of the * code, __usecs_to_jiffies() is called if the value passed does not * allow constant folding and the actual conversion must be done at * runtime. * the HZ range specific helpers _usecs_to_jiffies() are called both * directly here and from __msecs_to_jiffies() in the case where * constant folding is not possible. */ static __always_inline unsigned long usecs_to_jiffies(const unsigned int u) { if (__builtin_constant_p(u)) { if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET)) return MAX_JIFFY_OFFSET; return _usecs_to_jiffies(u); } else { return __usecs_to_jiffies(u); } } extern unsigned long timespec64_to_jiffies(const struct timespec64 *value); extern void jiffies_to_timespec64(const unsigned long jiffies, struct timespec64 *value); static inline unsigned long timespec_to_jiffies(const struct timespec *value) { struct timespec64 ts = timespec_to_timespec64(*value); return timespec64_to_jiffies(&ts); } static inline void jiffies_to_timespec(const unsigned long jiffies, struct timespec *value) { struct timespec64 ts; jiffies_to_timespec64(jiffies, &ts); *value = timespec64_to_timespec(ts); } extern unsigned long timeval_to_jiffies(const struct timeval *value); extern void jiffies_to_timeval(const unsigned long jiffies, struct timeval *value); extern clock_t jiffies_to_clock_t(unsigned long x); static inline clock_t jiffies_delta_to_clock_t(long delta) { return jiffies_to_clock_t(max(0L, delta)); } static inline unsigned int jiffies_delta_to_msecs(long delta) { return jiffies_to_msecs(max(0L, delta)); } extern unsigned long clock_t_to_jiffies(unsigned long x); extern u64 jiffies_64_to_clock_t(u64 x); extern u64 nsec_to_clock_t(u64 x); extern u64 nsecs_to_jiffies64(u64 n); extern unsigned long nsecs_to_jiffies(u64 n); #define TIMESTAMP_SIZE 30 #endif a.out.h 0000644 00000000542 14722070374 0005752 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __A_OUT_GNU_H__ #define __A_OUT_GNU_H__ #include <uapi/linux/a.out.h> #ifndef __ASSEMBLY__ #ifdef linux #include <asm/page.h> #if defined(__i386__) || defined(__mc68000__) #else #ifndef SEGMENT_SIZE #define SEGMENT_SIZE PAGE_SIZE #endif #endif #endif #endif /*__ASSEMBLY__ */ #endif /* __A_OUT_GNU_H__ */ vlynq.h 0000644 00000006344 14722070374 0006103 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2006, 2007 Eugene Konev <ejka@openwrt.org> */ #ifndef __VLYNQ_H__ #define __VLYNQ_H__ #include <linux/device.h> #include <linux/types.h> struct module; #define VLYNQ_NUM_IRQS 32 struct vlynq_mapping { u32 size; u32 offset; }; enum vlynq_divisor { vlynq_div_auto = 0, vlynq_ldiv1, vlynq_ldiv2, vlynq_ldiv3, vlynq_ldiv4, vlynq_ldiv5, vlynq_ldiv6, vlynq_ldiv7, vlynq_ldiv8, vlynq_rdiv1, vlynq_rdiv2, vlynq_rdiv3, vlynq_rdiv4, vlynq_rdiv5, vlynq_rdiv6, vlynq_rdiv7, vlynq_rdiv8, vlynq_div_external }; struct vlynq_device_id { u32 id; enum vlynq_divisor divisor; unsigned long driver_data; }; struct vlynq_regs; struct vlynq_device { u32 id, dev_id; int local_irq; int remote_irq; enum vlynq_divisor divisor; u32 regs_start, regs_end; u32 mem_start, mem_end; u32 irq_start, irq_end; int irq; int enabled; struct vlynq_regs *local; struct vlynq_regs *remote; struct device dev; }; struct vlynq_driver { char *name; struct vlynq_device_id *id_table; int (*probe)(struct vlynq_device *dev, struct vlynq_device_id *id); void (*remove)(struct vlynq_device *dev); struct device_driver driver; }; struct plat_vlynq_ops { int (*on)(struct vlynq_device *dev); void (*off)(struct vlynq_device *dev); }; static inline struct vlynq_driver *to_vlynq_driver(struct device_driver *drv) { return container_of(drv, struct vlynq_driver, driver); } static inline struct vlynq_device *to_vlynq_device(struct device *device) { return container_of(device, struct vlynq_device, dev); } extern struct bus_type vlynq_bus_type; extern int __vlynq_register_driver(struct vlynq_driver *driver, struct module *owner); static inline int vlynq_register_driver(struct vlynq_driver *driver) { return __vlynq_register_driver(driver, THIS_MODULE); } static inline void *vlynq_get_drvdata(struct vlynq_device *dev) { return dev_get_drvdata(&dev->dev); } static inline void vlynq_set_drvdata(struct vlynq_device *dev, void *data) { dev_set_drvdata(&dev->dev, data); } static inline u32 vlynq_mem_start(struct vlynq_device *dev) { return dev->mem_start; } static inline u32 vlynq_mem_end(struct vlynq_device *dev) { return dev->mem_end; } static inline u32 vlynq_mem_len(struct vlynq_device *dev) { return dev->mem_end - dev->mem_start + 1; } static inline int vlynq_virq_to_irq(struct vlynq_device *dev, int virq) { int irq = dev->irq_start + virq; if ((irq < dev->irq_start) || (irq > dev->irq_end)) return -EINVAL; return irq; } static inline int vlynq_irq_to_virq(struct vlynq_device *dev, int irq) { if ((irq < dev->irq_start) || (irq > dev->irq_end)) return -EINVAL; return irq - dev->irq_start; } extern void vlynq_unregister_driver(struct vlynq_driver *driver); extern int vlynq_enable_device(struct vlynq_device *dev); extern void vlynq_disable_device(struct vlynq_device *dev); extern int vlynq_set_local_mapping(struct vlynq_device *dev, u32 tx_offset, struct vlynq_mapping *mapping); extern int vlynq_set_remote_mapping(struct vlynq_device *dev, u32 tx_offset, struct vlynq_mapping *mapping); extern int vlynq_set_local_irq(struct vlynq_device *dev, int virq); extern int vlynq_set_remote_irq(struct vlynq_device *dev, int virq); #endif /* __VLYNQ_H__ */ acpi_dma.h 0000644 00000006122 14722070374 0006461 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * ACPI helpers for DMA request / controller * * Based on of_dma.h * * Copyright (C) 2013, Intel Corporation * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> */ #ifndef __LINUX_ACPI_DMA_H #define __LINUX_ACPI_DMA_H #include <linux/list.h> #include <linux/device.h> #include <linux/err.h> #include <linux/dmaengine.h> /** * struct acpi_dma_spec - slave device DMA resources * @chan_id: channel unique id * @slave_id: request line unique id * @dev: struct device of the DMA controller to be used in the filter * function */ struct acpi_dma_spec { int chan_id; int slave_id; struct device *dev; }; /** * struct acpi_dma - representation of the registered DMAC * @dma_controllers: linked list node * @dev: struct device of this controller * @acpi_dma_xlate: callback function to find a suitable channel * @data: private data used by a callback function * @base_request_line: first supported request line (CSRT) * @end_request_line: last supported request line (CSRT) */ struct acpi_dma { struct list_head dma_controllers; struct device *dev; struct dma_chan *(*acpi_dma_xlate) (struct acpi_dma_spec *, struct acpi_dma *); void *data; unsigned short base_request_line; unsigned short end_request_line; }; /* Used with acpi_dma_simple_xlate() */ struct acpi_dma_filter_info { dma_cap_mask_t dma_cap; dma_filter_fn filter_fn; }; #ifdef CONFIG_DMA_ACPI int acpi_dma_controller_register(struct device *dev, struct dma_chan *(*acpi_dma_xlate) (struct acpi_dma_spec *, struct acpi_dma *), void *data); int acpi_dma_controller_free(struct device *dev); int devm_acpi_dma_controller_register(struct device *dev, struct dma_chan *(*acpi_dma_xlate) (struct acpi_dma_spec *, struct acpi_dma *), void *data); void devm_acpi_dma_controller_free(struct device *dev); struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev, size_t index); struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev, const char *name); struct dma_chan *acpi_dma_simple_xlate(struct acpi_dma_spec *dma_spec, struct acpi_dma *adma); #else static inline int acpi_dma_controller_register(struct device *dev, struct dma_chan *(*acpi_dma_xlate) (struct acpi_dma_spec *, struct acpi_dma *), void *data) { return -ENODEV; } static inline int acpi_dma_controller_free(struct device *dev) { return -ENODEV; } static inline int devm_acpi_dma_controller_register(struct device *dev, struct dma_chan *(*acpi_dma_xlate) (struct acpi_dma_spec *, struct acpi_dma *), void *data) { return -ENODEV; } static inline void devm_acpi_dma_controller_free(struct device *dev) { } static inline struct dma_chan *acpi_dma_request_slave_chan_by_index( struct device *dev, size_t index) { return ERR_PTR(-ENODEV); } static inline struct dma_chan *acpi_dma_request_slave_chan_by_name( struct device *dev, const char *name) { return ERR_PTR(-ENODEV); } #define acpi_dma_simple_xlate NULL #endif #define acpi_dma_request_slave_channel acpi_dma_request_slave_chan_by_index #endif /* __LINUX_ACPI_DMA_H */ sunrpc/svc_rdma.h 0000644 00000016500 14722070374 0010035 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the BSD-type * license below: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Network Appliance, Inc. nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Author: Tom Tucker <tom@opengridcomputing.com> */ #ifndef SVC_RDMA_H #define SVC_RDMA_H #include <linux/llist.h> #include <linux/sunrpc/xdr.h> #include <linux/sunrpc/svcsock.h> #include <linux/sunrpc/rpc_rdma.h> #include <rdma/ib_verbs.h> #include <rdma/rdma_cm.h> #define SVCRDMA_DEBUG /* Default and maximum inline threshold sizes */ enum { RPCRDMA_DEF_INLINE_THRESH = 4096, RPCRDMA_MAX_INLINE_THRESH = 65536 }; /* RPC/RDMA parameters and stats */ extern unsigned int svcrdma_ord; extern unsigned int svcrdma_max_requests; extern unsigned int svcrdma_max_bc_requests; extern unsigned int svcrdma_max_req_size; extern atomic_t rdma_stat_recv; extern atomic_t rdma_stat_read; extern atomic_t rdma_stat_write; extern atomic_t rdma_stat_sq_starve; extern atomic_t rdma_stat_rq_starve; extern atomic_t rdma_stat_rq_poll; extern atomic_t rdma_stat_rq_prod; extern atomic_t rdma_stat_sq_poll; extern atomic_t rdma_stat_sq_prod; struct svcxprt_rdma { struct svc_xprt sc_xprt; /* SVC transport structure */ struct rdma_cm_id *sc_cm_id; /* RDMA connection id */ struct list_head sc_accept_q; /* Conn. waiting accept */ int sc_ord; /* RDMA read limit */ int sc_max_send_sges; bool sc_snd_w_inv; /* OK to use Send With Invalidate */ atomic_t sc_sq_avail; /* SQEs ready to be consumed */ unsigned int sc_sq_depth; /* Depth of SQ */ __be32 sc_fc_credits; /* Forward credits */ u32 sc_max_requests; /* Max requests */ u32 sc_max_bc_requests;/* Backward credits */ int sc_max_req_size; /* Size of each RQ WR buf */ u8 sc_port_num; struct ib_pd *sc_pd; spinlock_t sc_send_lock; struct list_head sc_send_ctxts; spinlock_t sc_rw_ctxt_lock; struct list_head sc_rw_ctxts; struct list_head sc_rq_dto_q; spinlock_t sc_rq_dto_lock; struct ib_qp *sc_qp; struct ib_cq *sc_rq_cq; struct ib_cq *sc_sq_cq; spinlock_t sc_lock; /* transport lock */ wait_queue_head_t sc_send_wait; /* SQ exhaustion waitlist */ unsigned long sc_flags; struct list_head sc_read_complete_q; struct work_struct sc_work; struct llist_head sc_recv_ctxts; }; /* sc_flags */ #define RDMAXPRT_CONN_PENDING 3 /* * Default connection parameters */ enum { RPCRDMA_LISTEN_BACKLOG = 10, RPCRDMA_MAX_REQUESTS = 64, RPCRDMA_MAX_BC_REQUESTS = 2, }; #define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD struct svc_rdma_recv_ctxt { struct llist_node rc_node; struct list_head rc_list; struct ib_recv_wr rc_recv_wr; struct ib_cqe rc_cqe; struct ib_sge rc_recv_sge; void *rc_recv_buf; struct xdr_buf rc_arg; bool rc_temp; u32 rc_byte_len; unsigned int rc_page_count; unsigned int rc_hdr_count; u32 rc_inv_rkey; unsigned int rc_read_payload_offset; unsigned int rc_read_payload_length; struct page *rc_pages[RPCSVC_MAXPAGES]; }; struct svc_rdma_send_ctxt { struct list_head sc_list; struct ib_send_wr sc_send_wr; struct ib_cqe sc_cqe; void *sc_xprt_buf; int sc_page_count; int sc_cur_sge_no; struct page *sc_pages[RPCSVC_MAXPAGES]; struct ib_sge sc_sges[]; }; /* svc_rdma_backchannel.c */ extern void svc_rdma_handle_bc_reply(struct svc_rqst *rqstp, struct svc_rdma_recv_ctxt *rctxt); /* svc_rdma_recvfrom.c */ extern void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma); extern bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma); extern void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma, struct svc_rdma_recv_ctxt *ctxt); extern void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma); extern void svc_rdma_release_rqst(struct svc_rqst *rqstp); extern int svc_rdma_recvfrom(struct svc_rqst *); /* svc_rdma_rw.c */ extern void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma); extern int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp, struct svc_rdma_recv_ctxt *head, __be32 *p); extern int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, __be32 *wr_ch, struct xdr_buf *xdr, unsigned int offset, unsigned long length); extern int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, __be32 *rp_ch, bool writelist, struct xdr_buf *xdr); /* svc_rdma_sendto.c */ extern void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma); extern struct svc_rdma_send_ctxt * svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma); extern void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt); extern int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr); extern void svc_rdma_sync_reply_hdr(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt, unsigned int len); extern int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt, struct xdr_buf *xdr, __be32 *wr_lst); extern int svc_rdma_sendto(struct svc_rqst *); extern int svc_rdma_read_payload(struct svc_rqst *rqstp, unsigned int offset, unsigned int length); /* svc_rdma_transport.c */ extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *); extern void svc_sq_reap(struct svcxprt_rdma *); extern void svc_rq_reap(struct svcxprt_rdma *); extern struct svc_xprt_class svc_rdma_class; #ifdef CONFIG_SUNRPC_BACKCHANNEL extern struct svc_xprt_class svc_rdma_bc_class; #endif /* svc_rdma.c */ extern int svc_rdma_init(void); extern void svc_rdma_cleanup(void); #endif sunrpc/svcsock.h 0000644 00000004134 14722070374 0007712 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/svcsock.h * * RPC server socket I/O. * * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> */ #ifndef SUNRPC_SVCSOCK_H #define SUNRPC_SVCSOCK_H #include <linux/sunrpc/svc.h> #include <linux/sunrpc/svc_xprt.h> /* * RPC server socket. */ struct svc_sock { struct svc_xprt sk_xprt; struct socket * sk_sock; /* berkeley socket layer */ struct sock * sk_sk; /* INET layer */ /* We keep the old state_change and data_ready CB's here */ void (*sk_ostate)(struct sock *); void (*sk_odata)(struct sock *); void (*sk_owspace)(struct sock *); /* private TCP part */ /* On-the-wire fragment header: */ __be32 sk_reclen; /* As we receive a record, this includes the length received so * far (including the fragment header): */ u32 sk_tcplen; /* Total length of the data (not including fragment headers) * received so far in the fragments making up this rpc: */ u32 sk_datalen; struct page * sk_pages[RPCSVC_MAXPAGES]; /* received data */ }; static inline u32 svc_sock_reclen(struct svc_sock *svsk) { return ntohl(svsk->sk_reclen) & RPC_FRAGMENT_SIZE_MASK; } static inline u32 svc_sock_final_rec(struct svc_sock *svsk) { return ntohl(svsk->sk_reclen) & RPC_LAST_STREAM_FRAGMENT; } /* * Function prototypes. */ void svc_close_net(struct svc_serv *, struct net *); int svc_recv(struct svc_rqst *, long); int svc_send(struct svc_rqst *); void svc_drop(struct svc_rqst *); void svc_sock_update_bufs(struct svc_serv *serv); bool svc_alien_sock(struct net *net, int fd); int svc_addsock(struct svc_serv *serv, const int fd, char *name_return, const size_t len, const struct cred *cred); void svc_init_xprt_sock(void); void svc_cleanup_xprt_sock(void); struct svc_xprt *svc_sock_create(struct svc_serv *serv, int prot); void svc_sock_destroy(struct svc_xprt *); /* * svc_makesock socket characteristics */ #define SVC_SOCK_DEFAULTS (0U) #define SVC_SOCK_ANONYMOUS (1U << 0) /* don't register with pmap */ #define SVC_SOCK_TEMPORARY (1U << 1) /* flag socket as temporary */ #endif /* SUNRPC_SVCSOCK_H */ sunrpc/auth_gss.h 0000644 00000004553 14722070374 0010061 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/auth_gss.h * * Declarations for RPCSEC_GSS * * Dug Song <dugsong@monkey.org> * Andy Adamson <andros@umich.edu> * Bruce Fields <bfields@umich.edu> * Copyright (c) 2000 The Regents of the University of Michigan */ #ifndef _LINUX_SUNRPC_AUTH_GSS_H #define _LINUX_SUNRPC_AUTH_GSS_H #ifdef __KERNEL__ #include <linux/refcount.h> #include <linux/sunrpc/auth.h> #include <linux/sunrpc/svc.h> #include <linux/sunrpc/gss_api.h> #define RPC_GSS_VERSION 1 #define MAXSEQ 0x80000000 /* maximum legal sequence number, from rfc 2203 */ enum rpc_gss_proc { RPC_GSS_PROC_DATA = 0, RPC_GSS_PROC_INIT = 1, RPC_GSS_PROC_CONTINUE_INIT = 2, RPC_GSS_PROC_DESTROY = 3 }; enum rpc_gss_svc { RPC_GSS_SVC_NONE = 1, RPC_GSS_SVC_INTEGRITY = 2, RPC_GSS_SVC_PRIVACY = 3 }; /* on-the-wire gss cred: */ struct rpc_gss_wire_cred { u32 gc_v; /* version */ u32 gc_proc; /* control procedure */ u32 gc_seq; /* sequence number */ u32 gc_svc; /* service */ struct xdr_netobj gc_ctx; /* context handle */ }; /* on-the-wire gss verifier: */ struct rpc_gss_wire_verf { u32 gv_flavor; struct xdr_netobj gv_verf; }; /* return from gss NULL PROC init sec context */ struct rpc_gss_init_res { struct xdr_netobj gr_ctx; /* context handle */ u32 gr_major; /* major status */ u32 gr_minor; /* minor status */ u32 gr_win; /* sequence window */ struct xdr_netobj gr_token; /* token */ }; /* The gss_cl_ctx struct holds all the information the rpcsec_gss client * code needs to know about a single security context. In particular, * gc_gss_ctx is the context handle that is used to do gss-api calls, while * gc_wire_ctx is the context handle that is used to identify the context on * the wire when communicating with a server. */ struct gss_cl_ctx { refcount_t count; enum rpc_gss_proc gc_proc; u32 gc_seq; u32 gc_seq_xmit; spinlock_t gc_seq_lock; struct gss_ctx *gc_gss_ctx; struct xdr_netobj gc_wire_ctx; struct xdr_netobj gc_acceptor; u32 gc_win; unsigned long gc_expiry; struct rcu_head gc_rcu; }; struct gss_upcall_msg; struct gss_cred { struct rpc_cred gc_base; enum rpc_gss_svc gc_service; struct gss_cl_ctx __rcu *gc_ctx; struct gss_upcall_msg *gc_upcall; const char *gc_principal; unsigned long gc_upcall_timestamp; }; #endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_AUTH_GSS_H */ sunrpc/rpc_rdma.h 0000644 00000007562 14722070374 0010036 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (c) 2015-2017 Oracle. All rights reserved. * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the BSD-type * license below: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Network Appliance, Inc. nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _LINUX_SUNRPC_RPC_RDMA_H #define _LINUX_SUNRPC_RPC_RDMA_H #include <linux/types.h> #include <linux/bitops.h> #define RPCRDMA_VERSION 1 #define rpcrdma_version cpu_to_be32(RPCRDMA_VERSION) enum { RPCRDMA_V1_DEF_INLINE_SIZE = 1024, }; /* * XDR sizes, in quads */ enum { rpcrdma_fixed_maxsz = 4, rpcrdma_segment_maxsz = 4, rpcrdma_readchunk_maxsz = 2 + rpcrdma_segment_maxsz, }; /* * Smallest RPC/RDMA header: rm_xid through rm_type, then rm_nochunks */ #define RPCRDMA_HDRLEN_MIN (sizeof(__be32) * 7) #define RPCRDMA_HDRLEN_ERR (sizeof(__be32) * 5) enum rpcrdma_errcode { ERR_VERS = 1, ERR_CHUNK = 2 }; enum rpcrdma_proc { RDMA_MSG = 0, /* An RPC call or reply msg */ RDMA_NOMSG = 1, /* An RPC call or reply msg - separate body */ RDMA_MSGP = 2, /* An RPC call or reply msg with padding */ RDMA_DONE = 3, /* Client signals reply completion */ RDMA_ERROR = 4 /* An RPC RDMA encoding error */ }; #define rdma_msg cpu_to_be32(RDMA_MSG) #define rdma_nomsg cpu_to_be32(RDMA_NOMSG) #define rdma_msgp cpu_to_be32(RDMA_MSGP) #define rdma_done cpu_to_be32(RDMA_DONE) #define rdma_error cpu_to_be32(RDMA_ERROR) #define err_vers cpu_to_be32(ERR_VERS) #define err_chunk cpu_to_be32(ERR_CHUNK) /* * Private extension to RPC-over-RDMA Version One. * Message passed during RDMA-CM connection set-up. * * Add new fields at the end, and don't permute existing * fields. */ struct rpcrdma_connect_private { __be32 cp_magic; u8 cp_version; u8 cp_flags; u8 cp_send_size; u8 cp_recv_size; } __packed; #define rpcrdma_cmp_magic __cpu_to_be32(0xf6ab0e18) enum { RPCRDMA_CMP_VERSION = 1, RPCRDMA_CMP_F_SND_W_INV_OK = BIT(0), }; static inline u8 rpcrdma_encode_buffer_size(unsigned int size) { return (size >> 10) - 1; } static inline unsigned int rpcrdma_decode_buffer_size(u8 val) { return ((unsigned int)val + 1) << 10; } #endif /* _LINUX_SUNRPC_RPC_RDMA_H */ sunrpc/types.h 0000644 00000000761 14722070374 0007405 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/types.h * * Generic types and misc stuff for RPC. * * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> */ #ifndef _LINUX_SUNRPC_TYPES_H_ #define _LINUX_SUNRPC_TYPES_H_ #include <linux/timer.h> #include <linux/sched/signal.h> #include <linux/workqueue.h> #include <linux/sunrpc/debug.h> #include <linux/list.h> /* * Shorthands */ #define signalled() (signal_pending(current)) #endif /* _LINUX_SUNRPC_TYPES_H_ */ sunrpc/stats.h 0000644 00000004007 14722070374 0007374 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/stats.h * * Client statistics collection for SUN RPC * * Copyright (C) 1996 Olaf Kirch <okir@monad.swb.de> */ #ifndef _LINUX_SUNRPC_STATS_H #define _LINUX_SUNRPC_STATS_H #include <linux/proc_fs.h> struct rpc_stat { const struct rpc_program *program; unsigned int netcnt, netudpcnt, nettcpcnt, nettcpconn, netreconn; unsigned int rpccnt, rpcretrans, rpcauthrefresh, rpcgarbage; }; struct svc_stat { struct svc_program * program; unsigned int netcnt, netudpcnt, nettcpcnt, nettcpconn; unsigned int rpccnt, rpcbadfmt, rpcbadauth, rpcbadclnt; }; struct net; #ifdef CONFIG_PROC_FS int rpc_proc_init(struct net *); void rpc_proc_exit(struct net *); #else static inline int rpc_proc_init(struct net *net) { return 0; } static inline void rpc_proc_exit(struct net *net) { } #endif #ifdef MODULE void rpc_modcount(struct inode *, int); #endif #ifdef CONFIG_PROC_FS struct proc_dir_entry * rpc_proc_register(struct net *,struct rpc_stat *); void rpc_proc_unregister(struct net *,const char *); void rpc_proc_zero(const struct rpc_program *); struct proc_dir_entry * svc_proc_register(struct net *, struct svc_stat *, const struct file_operations *); void svc_proc_unregister(struct net *, const char *); void svc_seq_show(struct seq_file *, const struct svc_stat *); #else static inline struct proc_dir_entry *rpc_proc_register(struct net *net, struct rpc_stat *s) { return NULL; } static inline void rpc_proc_unregister(struct net *net, const char *p) {} static inline void rpc_proc_zero(const struct rpc_program *p) {} static inline struct proc_dir_entry *svc_proc_register(struct net *net, struct svc_stat *s, const struct file_operations *f) { return NULL; } static inline void svc_proc_unregister(struct net *net, const char *p) {} static inline void svc_seq_show(struct seq_file *seq, const struct svc_stat *st) {} #endif #endif /* _LINUX_SUNRPC_STATS_H */ sunrpc/gss_err.h 0000644 00000013664 14722070374 0007713 0 ustar 00 /* * linux/include/sunrpc/gss_err.h * * Adapted from MIT Kerberos 5-1.2.1 include/gssapi/gssapi.h * * Copyright (c) 2002 The Regents of the University of Michigan. * All rights reserved. * * Andy Adamson <andros@umich.edu> */ /* * Copyright 1993 by OpenVision Technologies, Inc. * * Permission to use, copy, modify, distribute, and sell this software * and its documentation for any purpose is hereby granted without fee, * provided that the above copyright notice appears in all copies and * that both that copyright notice and this permission notice appear in * supporting documentation, and that the name of OpenVision not be used * in advertising or publicity pertaining to distribution of the software * without specific, written prior permission. OpenVision makes no * representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied warranty. * * OPENVISION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL OPENVISION BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ #ifndef _LINUX_SUNRPC_GSS_ERR_H #define _LINUX_SUNRPC_GSS_ERR_H #ifdef __KERNEL__ typedef unsigned int OM_uint32; /* * Flag bits for context-level services. */ #define GSS_C_DELEG_FLAG 1 #define GSS_C_MUTUAL_FLAG 2 #define GSS_C_REPLAY_FLAG 4 #define GSS_C_SEQUENCE_FLAG 8 #define GSS_C_CONF_FLAG 16 #define GSS_C_INTEG_FLAG 32 #define GSS_C_ANON_FLAG 64 #define GSS_C_PROT_READY_FLAG 128 #define GSS_C_TRANS_FLAG 256 /* * Credential usage options */ #define GSS_C_BOTH 0 #define GSS_C_INITIATE 1 #define GSS_C_ACCEPT 2 /* * Status code types for gss_display_status */ #define GSS_C_GSS_CODE 1 #define GSS_C_MECH_CODE 2 /* * Expiration time of 2^32-1 seconds means infinite lifetime for a * credential or security context */ #define GSS_C_INDEFINITE ((OM_uint32) 0xfffffffful) /* Major status codes */ #define GSS_S_COMPLETE 0 /* * Some "helper" definitions to make the status code macros obvious. */ #define GSS_C_CALLING_ERROR_OFFSET 24 #define GSS_C_ROUTINE_ERROR_OFFSET 16 #define GSS_C_SUPPLEMENTARY_OFFSET 0 #define GSS_C_CALLING_ERROR_MASK ((OM_uint32) 0377ul) #define GSS_C_ROUTINE_ERROR_MASK ((OM_uint32) 0377ul) #define GSS_C_SUPPLEMENTARY_MASK ((OM_uint32) 0177777ul) /* * The macros that test status codes for error conditions. Note that the * GSS_ERROR() macro has changed slightly from the V1 GSSAPI so that it now * evaluates its argument only once. */ #define GSS_CALLING_ERROR(x) \ ((x) & (GSS_C_CALLING_ERROR_MASK << GSS_C_CALLING_ERROR_OFFSET)) #define GSS_ROUTINE_ERROR(x) \ ((x) & (GSS_C_ROUTINE_ERROR_MASK << GSS_C_ROUTINE_ERROR_OFFSET)) #define GSS_SUPPLEMENTARY_INFO(x) \ ((x) & (GSS_C_SUPPLEMENTARY_MASK << GSS_C_SUPPLEMENTARY_OFFSET)) #define GSS_ERROR(x) \ ((x) & ((GSS_C_CALLING_ERROR_MASK << GSS_C_CALLING_ERROR_OFFSET) | \ (GSS_C_ROUTINE_ERROR_MASK << GSS_C_ROUTINE_ERROR_OFFSET))) /* * Now the actual status code definitions */ /* * Calling errors: */ #define GSS_S_CALL_INACCESSIBLE_READ \ (((OM_uint32) 1ul) << GSS_C_CALLING_ERROR_OFFSET) #define GSS_S_CALL_INACCESSIBLE_WRITE \ (((OM_uint32) 2ul) << GSS_C_CALLING_ERROR_OFFSET) #define GSS_S_CALL_BAD_STRUCTURE \ (((OM_uint32) 3ul) << GSS_C_CALLING_ERROR_OFFSET) /* * Routine errors: */ #define GSS_S_BAD_MECH (((OM_uint32) 1ul) << GSS_C_ROUTINE_ERROR_OFFSET) #define GSS_S_BAD_NAME (((OM_uint32) 2ul) << GSS_C_ROUTINE_ERROR_OFFSET) #define GSS_S_BAD_NAMETYPE (((OM_uint32) 3ul) << GSS_C_ROUTINE_ERROR_OFFSET) #define GSS_S_BAD_BINDINGS (((OM_uint32) 4ul) << GSS_C_ROUTINE_ERROR_OFFSET) #define GSS_S_BAD_STATUS (((OM_uint32) 5ul) << GSS_C_ROUTINE_ERROR_OFFSET) #define GSS_S_BAD_SIG (((OM_uint32) 6ul) << GSS_C_ROUTINE_ERROR_OFFSET) #define GSS_S_NO_CRED (((OM_uint32) 7ul) << GSS_C_ROUTINE_ERROR_OFFSET) #define GSS_S_NO_CONTEXT (((OM_uint32) 8ul) << GSS_C_ROUTINE_ERROR_OFFSET) #define GSS_S_DEFECTIVE_TOKEN (((OM_uint32) 9ul) << GSS_C_ROUTINE_ERROR_OFFSET) #define GSS_S_DEFECTIVE_CREDENTIAL \ (((OM_uint32) 10ul) << GSS_C_ROUTINE_ERROR_OFFSET) #define GSS_S_CREDENTIALS_EXPIRED \ (((OM_uint32) 11ul) << GSS_C_ROUTINE_ERROR_OFFSET) #define GSS_S_CONTEXT_EXPIRED \ (((OM_uint32) 12ul) << GSS_C_ROUTINE_ERROR_OFFSET) #define GSS_S_FAILURE (((OM_uint32) 13ul) << GSS_C_ROUTINE_ERROR_OFFSET) #define GSS_S_BAD_QOP (((OM_uint32) 14ul) << GSS_C_ROUTINE_ERROR_OFFSET) #define GSS_S_UNAUTHORIZED (((OM_uint32) 15ul) << GSS_C_ROUTINE_ERROR_OFFSET) #define GSS_S_UNAVAILABLE (((OM_uint32) 16ul) << GSS_C_ROUTINE_ERROR_OFFSET) #define GSS_S_DUPLICATE_ELEMENT \ (((OM_uint32) 17ul) << GSS_C_ROUTINE_ERROR_OFFSET) #define GSS_S_NAME_NOT_MN \ (((OM_uint32) 18ul) << GSS_C_ROUTINE_ERROR_OFFSET) /* * Supplementary info bits: */ #define GSS_S_CONTINUE_NEEDED (1 << (GSS_C_SUPPLEMENTARY_OFFSET + 0)) #define GSS_S_DUPLICATE_TOKEN (1 << (GSS_C_SUPPLEMENTARY_OFFSET + 1)) #define GSS_S_OLD_TOKEN (1 << (GSS_C_SUPPLEMENTARY_OFFSET + 2)) #define GSS_S_UNSEQ_TOKEN (1 << (GSS_C_SUPPLEMENTARY_OFFSET + 3)) #define GSS_S_GAP_TOKEN (1 << (GSS_C_SUPPLEMENTARY_OFFSET + 4)) /* XXXX these are not part of the GSSAPI C bindings! (but should be) */ #define GSS_CALLING_ERROR_FIELD(x) \ (((x) >> GSS_C_CALLING_ERROR_OFFSET) & GSS_C_CALLING_ERROR_MASK) #define GSS_ROUTINE_ERROR_FIELD(x) \ (((x) >> GSS_C_ROUTINE_ERROR_OFFSET) & GSS_C_ROUTINE_ERROR_MASK) #define GSS_SUPPLEMENTARY_INFO_FIELD(x) \ (((x) >> GSS_C_SUPPLEMENTARY_OFFSET) & GSS_C_SUPPLEMENTARY_MASK) /* XXXX This is a necessary evil until the spec is fixed */ #define GSS_S_CRED_UNAVAIL GSS_S_FAILURE #endif /* __KERNEL__ */ #endif /* __LINUX_SUNRPC_GSS_ERR_H */ sunrpc/cache.h 0000644 00000021045 14722070374 0007302 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * include/linux/sunrpc/cache.h * * Generic code for various authentication-related caches * used by sunrpc clients and servers. * * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au> */ #ifndef _LINUX_SUNRPC_CACHE_H_ #define _LINUX_SUNRPC_CACHE_H_ #include <linux/kref.h> #include <linux/slab.h> #include <linux/atomic.h> #include <linux/proc_fs.h> /* * Each cache requires: * - A 'struct cache_detail' which contains information specific to the cache * for common code to use. * - An item structure that must contain a "struct cache_head" * - A lookup function defined using DefineCacheLookup * - A 'put' function that can release a cache item. It will only * be called after cache_put has succeed, so there are guarantee * to be no references. * - A function to calculate a hash of an item's key. * * as well as assorted code fragments (e.g. compare keys) and numbers * (e.g. hash size, goal_age, etc). * * Each cache must be registered so that it can be cleaned regularly. * When the cache is unregistered, it is flushed completely. * * Entries have a ref count and a 'hashed' flag which counts the existence * in the hash table. * We only expire entries when refcount is zero. * Existence in the cache is counted the refcount. */ /* Every cache item has a common header that is used * for expiring and refreshing entries. * */ struct cache_head { struct hlist_node cache_list; time_t expiry_time; /* After time time, don't use the data */ time_t last_refresh; /* If CACHE_PENDING, this is when upcall was * sent, else this is when update was * received, though it is alway set to * be *after* ->flush_time. */ struct kref ref; unsigned long flags; }; #define CACHE_VALID 0 /* Entry contains valid data */ #define CACHE_NEGATIVE 1 /* Negative entry - there is no match for the key */ #define CACHE_PENDING 2 /* An upcall has been sent but no reply received yet*/ #define CACHE_CLEANED 3 /* Entry has been cleaned from cache */ #define CACHE_NEW_EXPIRY 120 /* keep new things pending confirmation for 120 seconds */ struct cache_detail { struct module * owner; int hash_size; struct hlist_head * hash_table; spinlock_t hash_lock; char *name; void (*cache_put)(struct kref *); int (*cache_upcall)(struct cache_detail *, struct cache_head *); void (*cache_request)(struct cache_detail *cd, struct cache_head *ch, char **bpp, int *blen); int (*cache_parse)(struct cache_detail *, char *buf, int len); int (*cache_show)(struct seq_file *m, struct cache_detail *cd, struct cache_head *h); void (*warn_no_listener)(struct cache_detail *cd, int has_died); struct cache_head * (*alloc)(void); void (*flush)(void); int (*match)(struct cache_head *orig, struct cache_head *new); void (*init)(struct cache_head *orig, struct cache_head *new); void (*update)(struct cache_head *orig, struct cache_head *new); /* fields below this comment are for internal use * and should not be touched by cache owners */ time_t flush_time; /* flush all cache items with * last_refresh at or earlier * than this. last_refresh * is never set at or earlier * than this. */ struct list_head others; time_t nextcheck; int entries; /* fields for communication over channel */ struct list_head queue; atomic_t writers; /* how many time is /channel open */ time_t last_close; /* if no writers, when did last close */ time_t last_warn; /* when we last warned about no writers */ union { struct proc_dir_entry *procfs; struct dentry *pipefs; }; struct net *net; }; /* this must be embedded in any request structure that * identifies an object that will want a callback on * a cache fill */ struct cache_req { struct cache_deferred_req *(*defer)(struct cache_req *req); int thread_wait; /* How long (jiffies) we can block the * current thread to wait for updates. */ }; /* this must be embedded in a deferred_request that is being * delayed awaiting cache-fill */ struct cache_deferred_req { struct hlist_node hash; /* on hash chain */ struct list_head recent; /* on fifo */ struct cache_head *item; /* cache item we wait on */ void *owner; /* we might need to discard all defered requests * owned by someone */ void (*revisit)(struct cache_deferred_req *req, int too_many); }; /* * timestamps kept in the cache are expressed in seconds * since boot. This is the best for measuring differences in * real time. */ static inline time_t seconds_since_boot(void) { struct timespec boot; getboottime(&boot); return get_seconds() - boot.tv_sec; } static inline time_t convert_to_wallclock(time_t sinceboot) { struct timespec boot; getboottime(&boot); return boot.tv_sec + sinceboot; } extern const struct file_operations cache_file_operations_pipefs; extern const struct file_operations content_file_operations_pipefs; extern const struct file_operations cache_flush_operations_pipefs; extern struct cache_head * sunrpc_cache_lookup_rcu(struct cache_detail *detail, struct cache_head *key, int hash); extern struct cache_head * sunrpc_cache_update(struct cache_detail *detail, struct cache_head *new, struct cache_head *old, int hash); extern int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h); extern void cache_clean_deferred(void *owner); static inline struct cache_head *cache_get(struct cache_head *h) { kref_get(&h->ref); return h; } static inline struct cache_head *cache_get_rcu(struct cache_head *h) { if (kref_get_unless_zero(&h->ref)) return h; return NULL; } static inline void cache_put(struct cache_head *h, struct cache_detail *cd) { if (kref_read(&h->ref) <= 2 && h->expiry_time < cd->nextcheck) cd->nextcheck = h->expiry_time; kref_put(&h->ref, cd->cache_put); } static inline bool cache_is_expired(struct cache_detail *detail, struct cache_head *h) { if (!test_bit(CACHE_VALID, &h->flags)) return false; return (h->expiry_time < seconds_since_boot()) || (detail->flush_time >= h->last_refresh); } extern int cache_check(struct cache_detail *detail, struct cache_head *h, struct cache_req *rqstp); extern void cache_flush(void); extern void cache_purge(struct cache_detail *detail); #define NEVER (0x7FFFFFFF) extern void __init cache_initialize(void); extern int cache_register_net(struct cache_detail *cd, struct net *net); extern void cache_unregister_net(struct cache_detail *cd, struct net *net); extern struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net); extern void cache_destroy_net(struct cache_detail *cd, struct net *net); extern void sunrpc_init_cache_detail(struct cache_detail *cd); extern void sunrpc_destroy_cache_detail(struct cache_detail *cd); extern int sunrpc_cache_register_pipefs(struct dentry *parent, const char *, umode_t, struct cache_detail *); extern void sunrpc_cache_unregister_pipefs(struct cache_detail *); extern void sunrpc_cache_unhash(struct cache_detail *, struct cache_head *); /* Must store cache_detail in seq_file->private if using next three functions */ extern void *cache_seq_start_rcu(struct seq_file *file, loff_t *pos); extern void *cache_seq_next_rcu(struct seq_file *file, void *p, loff_t *pos); extern void cache_seq_stop_rcu(struct seq_file *file, void *p); extern void qword_add(char **bpp, int *lp, char *str); extern void qword_addhex(char **bpp, int *lp, char *buf, int blen); extern int qword_get(char **bpp, char *dest, int bufsize); static inline int get_int(char **bpp, int *anint) { char buf[50]; char *ep; int rv; int len = qword_get(bpp, buf, sizeof(buf)); if (len < 0) return -EINVAL; if (len == 0) return -ENOENT; rv = simple_strtol(buf, &ep, 0); if (*ep) return -EINVAL; *anint = rv; return 0; } static inline int get_uint(char **bpp, unsigned int *anint) { char buf[50]; int len = qword_get(bpp, buf, sizeof(buf)); if (len < 0) return -EINVAL; if (len == 0) return -ENOENT; if (kstrtouint(buf, 0, anint)) return -EINVAL; return 0; } static inline int get_time(char **bpp, time_t *time) { char buf[50]; long long ll; int len = qword_get(bpp, buf, sizeof(buf)); if (len < 0) return -EINVAL; if (len == 0) return -ENOENT; if (kstrtoll(buf, 0, &ll)) return -EINVAL; *time = (time_t)ll; return 0; } static inline time_t get_expiry(char **bpp) { time_t rv; struct timespec boot; if (get_time(bpp, &rv)) return 0; if (rv < 0) return 0; getboottime(&boot); return rv - boot.tv_sec; } #endif /* _LINUX_SUNRPC_CACHE_H_ */ sunrpc/gss_krb5_enctypes.h 0000644 00000002345 14722070374 0011672 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Define the string that exports the set of kernel-supported * Kerberos enctypes. This list is sent via upcall to gssd, and * is also exposed via the nfsd /proc API. The consumers generally * treat this as an ordered list, where the first item in the list * is the most preferred. */ #ifndef _LINUX_SUNRPC_GSS_KRB5_ENCTYPES_H #define _LINUX_SUNRPC_GSS_KRB5_ENCTYPES_H #ifdef CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES /* * NB: This list includes encryption types that were deprecated * by RFC 8429 (DES3_CBC_SHA1 and ARCFOUR_HMAC). * * ENCTYPE_AES256_CTS_HMAC_SHA1_96 * ENCTYPE_AES128_CTS_HMAC_SHA1_96 * ENCTYPE_DES3_CBC_SHA1 * ENCTYPE_ARCFOUR_HMAC */ #define KRB5_SUPPORTED_ENCTYPES "18,17,16,23" #else /* CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES */ /* * NB: This list includes encryption types that were deprecated * by RFC 8429 and RFC 6649. * * ENCTYPE_AES256_CTS_HMAC_SHA1_96 * ENCTYPE_AES128_CTS_HMAC_SHA1_96 * ENCTYPE_DES3_CBC_SHA1 * ENCTYPE_ARCFOUR_HMAC * ENCTYPE_DES_CBC_MD5 * ENCTYPE_DES_CBC_CRC * ENCTYPE_DES_CBC_MD4 */ #define KRB5_SUPPORTED_ENCTYPES "18,17,16,23,3,1,2" #endif /* CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES */ #endif /* _LINUX_SUNRPC_GSS_KRB5_ENCTYPES_H */ sunrpc/metrics.h 0000644 00000007273 14722070374 0007714 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/metrics.h * * Declarations for RPC client per-operation metrics * * Copyright (C) 2005 Chuck Lever <cel@netapp.com> * * RPC client per-operation statistics provide latency and retry * information about each type of RPC procedure in a given RPC program. * These statistics are not for detailed problem diagnosis, but simply * to indicate whether the problem is local or remote. * * These counters are not meant to be human-readable, but are meant to be * integrated into system monitoring tools such as "sar" and "iostat". As * such, the counters are sampled by the tools over time, and are never * zeroed after a file system is mounted. Moving averages can be computed * by the tools by taking the difference between two instantaneous samples * and dividing that by the time between the samples. * * The counters are maintained in a single array per RPC client, indexed * by procedure number. There is no need to maintain separate counter * arrays per-CPU because these counters are always modified behind locks. */ #ifndef _LINUX_SUNRPC_METRICS_H #define _LINUX_SUNRPC_METRICS_H #include <linux/seq_file.h> #include <linux/ktime.h> #include <linux/spinlock.h> #define RPC_IOSTATS_VERS "1.1" struct rpc_iostats { spinlock_t om_lock; /* * These counters give an idea about how many request * transmissions are required, on average, to complete that * particular procedure. Some procedures may require more * than one transmission because the server is unresponsive, * the client is retransmitting too aggressively, or the * requests are large and the network is congested. */ unsigned long om_ops, /* count of operations */ om_ntrans, /* count of RPC transmissions */ om_timeouts; /* count of major timeouts */ /* * These count how many bytes are sent and received for a * given RPC procedure type. This indicates how much load a * particular procedure is putting on the network. These * counts include the RPC and ULP headers, and the request * payload. */ unsigned long long om_bytes_sent, /* count of bytes out */ om_bytes_recv; /* count of bytes in */ /* * The length of time an RPC request waits in queue before * transmission, the network + server latency of the request, * and the total time the request spent from init to release * are measured. */ ktime_t om_queue, /* queued for xmit */ om_rtt, /* RPC RTT */ om_execute; /* RPC execution */ /* * The count of operations that complete with tk_status < 0. * These statuses usually indicate error conditions. */ unsigned long om_error_status; } ____cacheline_aligned; struct rpc_task; struct rpc_clnt; /* * EXPORTed functions for managing rpc_iostats structures */ #ifdef CONFIG_PROC_FS struct rpc_iostats * rpc_alloc_iostats(struct rpc_clnt *); void rpc_count_iostats(const struct rpc_task *, struct rpc_iostats *); void rpc_count_iostats_metrics(const struct rpc_task *, struct rpc_iostats *); void rpc_clnt_show_stats(struct seq_file *, struct rpc_clnt *); void rpc_free_iostats(struct rpc_iostats *); #else /* CONFIG_PROC_FS */ static inline struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt) { return NULL; } static inline void rpc_count_iostats(const struct rpc_task *task, struct rpc_iostats *stats) {} static inline void rpc_count_iostats_metrics(const struct rpc_task *task, struct rpc_iostats *stats) { } static inline void rpc_clnt_show_stats(struct seq_file *seq, struct rpc_clnt *clnt) {} static inline void rpc_free_iostats(struct rpc_iostats *stats) {} #endif /* CONFIG_PROC_FS */ #endif /* _LINUX_SUNRPC_METRICS_H */ sunrpc/timer.h 0000644 00000002224 14722070374 0007355 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/timer.h * * Declarations for the RPC transport timer. * * Copyright (C) 2002 Trond Myklebust <trond.myklebust@fys.uio.no> */ #ifndef _LINUX_SUNRPC_TIMER_H #define _LINUX_SUNRPC_TIMER_H #include <linux/atomic.h> struct rpc_rtt { unsigned long timeo; /* default timeout value */ unsigned long srtt[5]; /* smoothed round trip time << 3 */ unsigned long sdrtt[5]; /* smoothed medium deviation of RTT */ int ntimeouts[5]; /* Number of timeouts for the last request */ }; extern void rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo); extern void rpc_update_rtt(struct rpc_rtt *rt, unsigned timer, long m); extern unsigned long rpc_calc_rto(struct rpc_rtt *rt, unsigned timer); static inline void rpc_set_timeo(struct rpc_rtt *rt, int timer, int ntimeo) { int *t; if (!timer) return; t = &rt->ntimeouts[timer-1]; if (ntimeo < *t) { if (*t > 0) (*t)--; } else { if (ntimeo > 8) ntimeo = 8; *t = ntimeo; } } static inline int rpc_ntimeo(struct rpc_rtt *rt, int timer) { if (!timer) return 0; return rt->ntimeouts[timer-1]; } #endif /* _LINUX_SUNRPC_TIMER_H */ sunrpc/addr.h 0000644 00000011707 14722070374 0007155 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/addr.h * * Various routines for copying and comparing sockaddrs and for * converting them to and from presentation format. */ #ifndef _LINUX_SUNRPC_ADDR_H #define _LINUX_SUNRPC_ADDR_H #include <linux/socket.h> #include <linux/in.h> #include <linux/in6.h> #include <net/ipv6.h> size_t rpc_ntop(const struct sockaddr *, char *, const size_t); size_t rpc_pton(struct net *, const char *, const size_t, struct sockaddr *, const size_t); char * rpc_sockaddr2uaddr(const struct sockaddr *, gfp_t); size_t rpc_uaddr2sockaddr(struct net *, const char *, const size_t, struct sockaddr *, const size_t); static inline unsigned short rpc_get_port(const struct sockaddr *sap) { switch (sap->sa_family) { case AF_INET: return ntohs(((struct sockaddr_in *)sap)->sin_port); case AF_INET6: return ntohs(((struct sockaddr_in6 *)sap)->sin6_port); } return 0; } static inline void rpc_set_port(struct sockaddr *sap, const unsigned short port) { switch (sap->sa_family) { case AF_INET: ((struct sockaddr_in *)sap)->sin_port = htons(port); break; case AF_INET6: ((struct sockaddr_in6 *)sap)->sin6_port = htons(port); break; } } #define IPV6_SCOPE_DELIMITER '%' #define IPV6_SCOPE_ID_LEN sizeof("%nnnnnnnnnn") static inline bool rpc_cmp_addr4(const struct sockaddr *sap1, const struct sockaddr *sap2) { const struct sockaddr_in *sin1 = (const struct sockaddr_in *)sap1; const struct sockaddr_in *sin2 = (const struct sockaddr_in *)sap2; return sin1->sin_addr.s_addr == sin2->sin_addr.s_addr; } static inline bool __rpc_copy_addr4(struct sockaddr *dst, const struct sockaddr *src) { const struct sockaddr_in *ssin = (struct sockaddr_in *) src; struct sockaddr_in *dsin = (struct sockaddr_in *) dst; dsin->sin_family = ssin->sin_family; dsin->sin_addr.s_addr = ssin->sin_addr.s_addr; return true; } #if IS_ENABLED(CONFIG_IPV6) static inline bool rpc_cmp_addr6(const struct sockaddr *sap1, const struct sockaddr *sap2) { const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sap1; const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sap2; if (!ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr)) return false; else if (ipv6_addr_type(&sin1->sin6_addr) & IPV6_ADDR_LINKLOCAL) return sin1->sin6_scope_id == sin2->sin6_scope_id; return true; } static inline bool __rpc_copy_addr6(struct sockaddr *dst, const struct sockaddr *src) { const struct sockaddr_in6 *ssin6 = (const struct sockaddr_in6 *) src; struct sockaddr_in6 *dsin6 = (struct sockaddr_in6 *) dst; dsin6->sin6_family = ssin6->sin6_family; dsin6->sin6_addr = ssin6->sin6_addr; dsin6->sin6_scope_id = ssin6->sin6_scope_id; return true; } #else /* !(IS_ENABLED(CONFIG_IPV6) */ static inline bool rpc_cmp_addr6(const struct sockaddr *sap1, const struct sockaddr *sap2) { return false; } static inline bool __rpc_copy_addr6(struct sockaddr *dst, const struct sockaddr *src) { return false; } #endif /* !(IS_ENABLED(CONFIG_IPV6) */ /** * rpc_cmp_addr - compare the address portion of two sockaddrs. * @sap1: first sockaddr * @sap2: second sockaddr * * Just compares the family and address portion. Ignores port, but * compares the scope if it's a link-local address. * * Returns true if the addrs are equal, false if they aren't. */ static inline bool rpc_cmp_addr(const struct sockaddr *sap1, const struct sockaddr *sap2) { if (sap1->sa_family == sap2->sa_family) { switch (sap1->sa_family) { case AF_INET: return rpc_cmp_addr4(sap1, sap2); case AF_INET6: return rpc_cmp_addr6(sap1, sap2); } } return false; } /** * rpc_cmp_addr_port - compare the address and port number of two sockaddrs. * @sap1: first sockaddr * @sap2: second sockaddr */ static inline bool rpc_cmp_addr_port(const struct sockaddr *sap1, const struct sockaddr *sap2) { if (!rpc_cmp_addr(sap1, sap2)) return false; return rpc_get_port(sap1) == rpc_get_port(sap2); } /** * rpc_copy_addr - copy the address portion of one sockaddr to another * @dst: destination sockaddr * @src: source sockaddr * * Just copies the address portion and family. Ignores port, scope, etc. * Caller is responsible for making certain that dst is large enough to hold * the address in src. Returns true if address family is supported. Returns * false otherwise. */ static inline bool rpc_copy_addr(struct sockaddr *dst, const struct sockaddr *src) { switch (src->sa_family) { case AF_INET: return __rpc_copy_addr4(dst, src); case AF_INET6: return __rpc_copy_addr6(dst, src); } return false; } /** * rpc_get_scope_id - return scopeid for a given sockaddr * @sa: sockaddr to get scopeid from * * Returns the value of the sin6_scope_id for AF_INET6 addrs, or 0 if * not an AF_INET6 address. */ static inline u32 rpc_get_scope_id(const struct sockaddr *sa) { if (sa->sa_family != AF_INET6) return 0; return ((struct sockaddr_in6 *) sa)->sin6_scope_id; } #endif /* _LINUX_SUNRPC_ADDR_H */ sunrpc/svc_xprt.h 0000644 00000015437 14722070374 0010117 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/svc_xprt.h * * RPC server transport I/O */ #ifndef SUNRPC_SVC_XPRT_H #define SUNRPC_SVC_XPRT_H #include <linux/sunrpc/svc.h> struct module; struct svc_xprt_ops { struct svc_xprt *(*xpo_create)(struct svc_serv *, struct net *net, struct sockaddr *, int, int); struct svc_xprt *(*xpo_accept)(struct svc_xprt *); int (*xpo_has_wspace)(struct svc_xprt *); int (*xpo_recvfrom)(struct svc_rqst *); int (*xpo_sendto)(struct svc_rqst *); int (*xpo_read_payload)(struct svc_rqst *, unsigned int, unsigned int); void (*xpo_release_rqst)(struct svc_rqst *); void (*xpo_detach)(struct svc_xprt *); void (*xpo_free)(struct svc_xprt *); void (*xpo_secure_port)(struct svc_rqst *rqstp); void (*xpo_kill_temp_xprt)(struct svc_xprt *); }; struct svc_xprt_class { const char *xcl_name; struct module *xcl_owner; const struct svc_xprt_ops *xcl_ops; struct list_head xcl_list; u32 xcl_max_payload; int xcl_ident; }; /* * This is embedded in an object that wants a callback before deleting * an xprt; intended for use by NFSv4.1, which needs to know when a * client's tcp connection (and hence possibly a backchannel) goes away. */ struct svc_xpt_user { struct list_head list; void (*callback)(struct svc_xpt_user *); }; struct svc_xprt { struct svc_xprt_class *xpt_class; const struct svc_xprt_ops *xpt_ops; struct kref xpt_ref; struct list_head xpt_list; struct list_head xpt_ready; unsigned long xpt_flags; #define XPT_BUSY 0 /* enqueued/receiving */ #define XPT_CONN 1 /* conn pending */ #define XPT_CLOSE 2 /* dead or dying */ #define XPT_DATA 3 /* data pending */ #define XPT_TEMP 4 /* connected transport */ #define XPT_DEAD 6 /* transport closed */ #define XPT_CHNGBUF 7 /* need to change snd/rcv buf sizes */ #define XPT_DEFERRED 8 /* deferred request pending */ #define XPT_OLD 9 /* used for xprt aging mark+sweep */ #define XPT_LISTENER 10 /* listening endpoint */ #define XPT_CACHE_AUTH 11 /* cache auth info */ #define XPT_LOCAL 12 /* connection from loopback interface */ #define XPT_KILL_TEMP 13 /* call xpo_kill_temp_xprt before closing */ #define XPT_CONG_CTRL 14 /* has congestion control */ struct svc_serv *xpt_server; /* service for transport */ atomic_t xpt_reserved; /* space on outq that is rsvd */ atomic_t xpt_nr_rqsts; /* Number of requests */ struct mutex xpt_mutex; /* to serialize sending data */ spinlock_t xpt_lock; /* protects sk_deferred * and xpt_auth_cache */ void *xpt_auth_cache;/* auth cache */ struct list_head xpt_deferred; /* deferred requests that need * to be revisted */ struct sockaddr_storage xpt_local; /* local address */ size_t xpt_locallen; /* length of address */ struct sockaddr_storage xpt_remote; /* remote peer's address */ size_t xpt_remotelen; /* length of address */ char xpt_remotebuf[INET6_ADDRSTRLEN + 10]; struct list_head xpt_users; /* callbacks on free */ struct net *xpt_net; const struct cred *xpt_cred; struct rpc_xprt *xpt_bc_xprt; /* NFSv4.1 backchannel */ struct rpc_xprt_switch *xpt_bc_xps; /* NFSv4.1 backchannel */ }; static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u) { spin_lock(&xpt->xpt_lock); list_del_init(&u->list); spin_unlock(&xpt->xpt_lock); } static inline int register_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u) { spin_lock(&xpt->xpt_lock); if (test_bit(XPT_CLOSE, &xpt->xpt_flags)) { /* * The connection is about to be deleted soon (or, * worse, may already be deleted--in which case we've * already notified the xpt_users). */ spin_unlock(&xpt->xpt_lock); return -ENOTCONN; } list_add(&u->list, &xpt->xpt_users); spin_unlock(&xpt->xpt_lock); return 0; } int svc_reg_xprt_class(struct svc_xprt_class *); void svc_unreg_xprt_class(struct svc_xprt_class *); void svc_xprt_init(struct net *, struct svc_xprt_class *, struct svc_xprt *, struct svc_serv *); int svc_create_xprt(struct svc_serv *, const char *, struct net *, const int, const unsigned short, int, const struct cred *); void svc_xprt_do_enqueue(struct svc_xprt *xprt); void svc_xprt_enqueue(struct svc_xprt *xprt); void svc_xprt_put(struct svc_xprt *xprt); void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt); void svc_close_xprt(struct svc_xprt *xprt); int svc_port_is_privileged(struct sockaddr *sin); int svc_print_xprts(char *buf, int maxlen); struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name, struct net *net, const sa_family_t af, const unsigned short port); int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen); void svc_add_new_perm_xprt(struct svc_serv *serv, struct svc_xprt *xprt); void svc_age_temp_xprts_now(struct svc_serv *, struct sockaddr *); static inline void svc_xprt_get(struct svc_xprt *xprt) { kref_get(&xprt->xpt_ref); } static inline void svc_xprt_set_local(struct svc_xprt *xprt, const struct sockaddr *sa, const size_t salen) { memcpy(&xprt->xpt_local, sa, salen); xprt->xpt_locallen = salen; } static inline void svc_xprt_set_remote(struct svc_xprt *xprt, const struct sockaddr *sa, const size_t salen) { memcpy(&xprt->xpt_remote, sa, salen); xprt->xpt_remotelen = salen; snprintf(xprt->xpt_remotebuf, sizeof(xprt->xpt_remotebuf) - 1, "%pISpc", sa); } static inline unsigned short svc_addr_port(const struct sockaddr *sa) { const struct sockaddr_in *sin = (const struct sockaddr_in *)sa; const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)sa; switch (sa->sa_family) { case AF_INET: return ntohs(sin->sin_port); case AF_INET6: return ntohs(sin6->sin6_port); } return 0; } static inline size_t svc_addr_len(const struct sockaddr *sa) { switch (sa->sa_family) { case AF_INET: return sizeof(struct sockaddr_in); case AF_INET6: return sizeof(struct sockaddr_in6); } BUG(); } static inline unsigned short svc_xprt_local_port(const struct svc_xprt *xprt) { return svc_addr_port((const struct sockaddr *)&xprt->xpt_local); } static inline unsigned short svc_xprt_remote_port(const struct svc_xprt *xprt) { return svc_addr_port((const struct sockaddr *)&xprt->xpt_remote); } static inline char *__svc_print_addr(const struct sockaddr *addr, char *buf, const size_t len) { const struct sockaddr_in *sin = (const struct sockaddr_in *)addr; const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)addr; switch (addr->sa_family) { case AF_INET: snprintf(buf, len, "%pI4, port=%u", &sin->sin_addr, ntohs(sin->sin_port)); break; case AF_INET6: snprintf(buf, len, "%pI6, port=%u", &sin6->sin6_addr, ntohs(sin6->sin6_port)); break; default: snprintf(buf, len, "unknown address type: %d", addr->sa_family); break; } return buf; } #endif /* SUNRPC_SVC_XPRT_H */ sunrpc/svcauth.h 0000644 00000014342 14722070374 0007716 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/svcauth.h * * RPC server-side authentication stuff. * * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> */ #ifndef _LINUX_SUNRPC_SVCAUTH_H_ #define _LINUX_SUNRPC_SVCAUTH_H_ #ifdef __KERNEL__ #include <linux/string.h> #include <linux/sunrpc/msg_prot.h> #include <linux/sunrpc/cache.h> #include <linux/sunrpc/gss_api.h> #include <linux/hash.h> #include <linux/stringhash.h> #include <linux/cred.h> struct svc_cred { kuid_t cr_uid; kgid_t cr_gid; struct group_info *cr_group_info; u32 cr_flavor; /* pseudoflavor */ /* name of form servicetype/hostname@REALM, passed down by * gss-proxy: */ char *cr_raw_principal; /* name of form servicetype@hostname, passed down by * rpc.svcgssd, or computed from the above: */ char *cr_principal; char *cr_targ_princ; struct gss_api_mech *cr_gss_mech; }; static inline void init_svc_cred(struct svc_cred *cred) { cred->cr_group_info = NULL; cred->cr_raw_principal = NULL; cred->cr_principal = NULL; cred->cr_targ_princ = NULL; cred->cr_gss_mech = NULL; } static inline void free_svc_cred(struct svc_cred *cred) { if (cred->cr_group_info) put_group_info(cred->cr_group_info); kfree(cred->cr_raw_principal); kfree(cred->cr_principal); kfree(cred->cr_targ_princ); gss_mech_put(cred->cr_gss_mech); init_svc_cred(cred); } struct svc_rqst; /* forward decl */ struct in6_addr; /* Authentication is done in the context of a domain. * * Currently, the nfs server uses the auth_domain to stand * for the "client" listed in /etc/exports. * * More generally, a domain might represent a group of clients using * a common mechanism for authentication and having a common mapping * between local identity (uid) and network identity. All clients * in a domain have similar general access rights. Each domain can * contain multiple principals which will have different specific right * based on normal Discretionary Access Control. * * A domain is created by an authentication flavour module based on name * only. Userspace then fills in detail on demand. * * In the case of auth_unix and auth_null, the auth_domain is also * associated with entries in another cache representing the mapping * of ip addresses to the given client. */ struct auth_domain { struct kref ref; struct hlist_node hash; char *name; struct auth_ops *flavour; struct rcu_head rcu_head; }; /* * Each authentication flavour registers an auth_ops * structure. * name is simply the name. * flavour gives the auth flavour. It determines where the flavour is registered * accept() is given a request and should verify it. * It should inspect the authenticator and verifier, and possibly the data. * If there is a problem with the authentication *authp should be set. * The return value of accept() can indicate: * OK - authorised. client and credential are set in rqstp. * reqbuf points to arguments * resbuf points to good place for results. verfier * is (probably) already in place. Certainly space is * reserved for it. * DROP - simply drop the request. It may have been deferred * GARBAGE - rpc garbage_args error * SYSERR - rpc system_err error * DENIED - authp holds reason for denial. * COMPLETE - the reply is encoded already and ready to be sent; no * further processing is necessary. (This is used for processing * null procedure calls which are used to set up encryption * contexts.) * * accept is passed the proc number so that it can accept NULL rpc requests * even if it cannot authenticate the client (as is sometimes appropriate). * * release() is given a request after the procedure has been run. * It should sign/encrypt the results if needed * It should return: * OK - the resbuf is ready to be sent * DROP - the reply should be quitely dropped * DENIED - authp holds a reason for MSG_DENIED * SYSERR - rpc system_err * * domain_release() * This call releases a domain. * set_client() * Givens a pending request (struct svc_rqst), finds and assigns * an appropriate 'auth_domain' as the client. */ struct auth_ops { char * name; struct module *owner; int flavour; int (*accept)(struct svc_rqst *rq, __be32 *authp); int (*release)(struct svc_rqst *rq); void (*domain_release)(struct auth_domain *); int (*set_client)(struct svc_rqst *rq); }; #define SVC_GARBAGE 1 #define SVC_SYSERR 2 #define SVC_VALID 3 #define SVC_NEGATIVE 4 #define SVC_OK 5 #define SVC_DROP 6 #define SVC_CLOSE 7 /* Like SVC_DROP, but request is definitely * lost so if there is a tcp connection, it * should be closed */ #define SVC_DENIED 8 #define SVC_PENDING 9 #define SVC_COMPLETE 10 struct svc_xprt; extern int svc_authenticate(struct svc_rqst *rqstp, __be32 *authp); extern int svc_authorise(struct svc_rqst *rqstp); extern int svc_set_client(struct svc_rqst *rqstp); extern int svc_auth_register(rpc_authflavor_t flavor, struct auth_ops *aops); extern void svc_auth_unregister(rpc_authflavor_t flavor); extern struct auth_domain *unix_domain_find(char *name); extern void auth_domain_put(struct auth_domain *item); extern int auth_unix_add_addr(struct net *net, struct in6_addr *addr, struct auth_domain *dom); extern struct auth_domain *auth_domain_lookup(char *name, struct auth_domain *new); extern struct auth_domain *auth_domain_find(char *name); extern struct auth_domain *auth_unix_lookup(struct net *net, struct in6_addr *addr); extern int auth_unix_forget_old(struct auth_domain *dom); extern void svcauth_unix_purge(struct net *net); extern void svcauth_unix_info_release(struct svc_xprt *xpt); extern int svcauth_unix_set_client(struct svc_rqst *rqstp); extern int unix_gid_cache_create(struct net *net); extern void unix_gid_cache_destroy(struct net *net); /* * The <stringhash.h> functions are good enough that we don't need to * use hash_32() on them; just extracting the high bits is enough. */ static inline unsigned long hash_str(char const *name, int bits) { return hashlen_hash(hashlen_string(NULL, name)) >> (32 - bits); } static inline unsigned long hash_mem(char const *buf, int length, int bits) { return full_name_hash(NULL, buf, length) >> (32 - bits); } #endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_SVCAUTH_H_ */ sunrpc/clnt.h 0000644 00000017746 14722070374 0007214 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/clnt.h * * Declarations for the high-level RPC client interface * * Copyright (C) 1995, 1996, Olaf Kirch <okir@monad.swb.de> */ #ifndef _LINUX_SUNRPC_CLNT_H #define _LINUX_SUNRPC_CLNT_H #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/sunrpc/msg_prot.h> #include <linux/sunrpc/sched.h> #include <linux/sunrpc/xprt.h> #include <linux/sunrpc/auth.h> #include <linux/sunrpc/stats.h> #include <linux/sunrpc/xdr.h> #include <linux/sunrpc/timer.h> #include <linux/sunrpc/rpc_pipe_fs.h> #include <asm/signal.h> #include <linux/path.h> #include <net/ipv6.h> #include <linux/sunrpc/xprtmultipath.h> struct rpc_inode; /* * The high-level client handle */ struct rpc_clnt { atomic_t cl_count; /* Number of references */ unsigned int cl_clid; /* client id */ struct list_head cl_clients; /* Global list of clients */ struct list_head cl_tasks; /* List of tasks */ spinlock_t cl_lock; /* spinlock */ struct rpc_xprt __rcu * cl_xprt; /* transport */ const struct rpc_procinfo *cl_procinfo; /* procedure info */ u32 cl_prog, /* RPC program number */ cl_vers, /* RPC version number */ cl_maxproc; /* max procedure number */ struct rpc_auth * cl_auth; /* authenticator */ struct rpc_stat * cl_stats; /* per-program statistics */ struct rpc_iostats * cl_metrics; /* per-client statistics */ unsigned int cl_softrtry : 1,/* soft timeouts */ cl_softerr : 1,/* Timeouts return errors */ cl_discrtry : 1,/* disconnect before retry */ cl_noretranstimeo: 1,/* No retransmit timeouts */ cl_autobind : 1,/* use getport() */ cl_chatty : 1;/* be verbose */ struct rpc_rtt * cl_rtt; /* RTO estimator data */ const struct rpc_timeout *cl_timeout; /* Timeout strategy */ atomic_t cl_swapper; /* swapfile count */ int cl_nodelen; /* nodename length */ char cl_nodename[UNX_MAXNODENAME+1]; struct rpc_pipe_dir_head cl_pipedir_objects; struct rpc_clnt * cl_parent; /* Points to parent of clones */ struct rpc_rtt cl_rtt_default; struct rpc_timeout cl_timeout_default; const struct rpc_program *cl_program; const char * cl_principal; /* use for machine cred */ #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) struct dentry *cl_debugfs; /* debugfs directory */ #endif struct rpc_xprt_iter cl_xpi; const struct cred *cl_cred; struct super_block *pipefs_sb; }; /* * General RPC program info */ #define RPC_MAXVERSION 4 struct rpc_program { const char * name; /* protocol name */ u32 number; /* program number */ unsigned int nrvers; /* number of versions */ const struct rpc_version ** version; /* version array */ struct rpc_stat * stats; /* statistics */ const char * pipe_dir_name; /* path to rpc_pipefs dir */ }; struct rpc_version { u32 number; /* version number */ unsigned int nrprocs; /* number of procs */ const struct rpc_procinfo *procs; /* procedure array */ unsigned int *counts; /* call counts */ }; /* * Procedure information */ struct rpc_procinfo { u32 p_proc; /* RPC procedure number */ kxdreproc_t p_encode; /* XDR encode function */ kxdrdproc_t p_decode; /* XDR decode function */ unsigned int p_arglen; /* argument hdr length (u32) */ unsigned int p_replen; /* reply hdr length (u32) */ unsigned int p_timer; /* Which RTT timer to use */ u32 p_statidx; /* Which procedure to account */ const char * p_name; /* name of procedure */ }; #ifdef __KERNEL__ struct rpc_create_args { struct net *net; int protocol; struct sockaddr *address; size_t addrsize; struct sockaddr *saddress; const struct rpc_timeout *timeout; const char *servername; const char *nodename; const struct rpc_program *program; struct rpc_stat *stats; u32 prognumber; /* overrides program->number */ u32 version; rpc_authflavor_t authflavor; u32 nconnect; unsigned long flags; char *client_name; struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ const struct cred *cred; }; struct rpc_add_xprt_test { void (*add_xprt_test)(struct rpc_clnt *clnt, struct rpc_xprt *xprt, void *calldata); void *data; }; /* Values for "flags" field */ #define RPC_CLNT_CREATE_HARDRTRY (1UL << 0) #define RPC_CLNT_CREATE_AUTOBIND (1UL << 2) #define RPC_CLNT_CREATE_NONPRIVPORT (1UL << 3) #define RPC_CLNT_CREATE_NOPING (1UL << 4) #define RPC_CLNT_CREATE_DISCRTRY (1UL << 5) #define RPC_CLNT_CREATE_QUIET (1UL << 6) #define RPC_CLNT_CREATE_INFINITE_SLOTS (1UL << 7) #define RPC_CLNT_CREATE_NO_IDLE_TIMEOUT (1UL << 8) #define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT (1UL << 9) #define RPC_CLNT_CREATE_SOFTERR (1UL << 10) struct rpc_clnt *rpc_create(struct rpc_create_args *args); struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *, const struct rpc_program *, u32); struct rpc_clnt *rpc_clone_client(struct rpc_clnt *); struct rpc_clnt *rpc_clone_client_set_auth(struct rpc_clnt *, rpc_authflavor_t); int rpc_switch_client_transport(struct rpc_clnt *, struct xprt_create *, const struct rpc_timeout *); void rpc_shutdown_client(struct rpc_clnt *); void rpc_release_client(struct rpc_clnt *); void rpc_task_release_transport(struct rpc_task *); void rpc_task_release_client(struct rpc_task *); struct rpc_xprt *rpc_task_get_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt); int rpcb_create_local(struct net *); void rpcb_put_local(struct net *); int rpcb_register(struct net *, u32, u32, int, unsigned short); int rpcb_v4_register(struct net *net, const u32 program, const u32 version, const struct sockaddr *address, const char *netid); void rpcb_getport_async(struct rpc_task *); void rpc_prepare_reply_pages(struct rpc_rqst *req, struct page **pages, unsigned int base, unsigned int len, unsigned int hdrsize); void rpc_call_start(struct rpc_task *); int rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags, const struct rpc_call_ops *tk_ops, void *calldata); int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags); struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags); int rpc_restart_call_prepare(struct rpc_task *); int rpc_restart_call(struct rpc_task *); void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int); struct net * rpc_net_ns(struct rpc_clnt *); size_t rpc_max_payload(struct rpc_clnt *); size_t rpc_max_bc_payload(struct rpc_clnt *); unsigned int rpc_num_bc_slots(struct rpc_clnt *); void rpc_force_rebind(struct rpc_clnt *); size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t); const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t); int rpc_localaddr(struct rpc_clnt *, struct sockaddr *, size_t); int rpc_clnt_iterate_for_each_xprt(struct rpc_clnt *clnt, int (*fn)(struct rpc_clnt *, struct rpc_xprt *, void *), void *data); int rpc_clnt_test_and_add_xprt(struct rpc_clnt *clnt, struct rpc_xprt_switch *xps, struct rpc_xprt *xprt, void *dummy); int rpc_clnt_add_xprt(struct rpc_clnt *, struct xprt_create *, int (*setup)(struct rpc_clnt *, struct rpc_xprt_switch *, struct rpc_xprt *, void *), void *data); void rpc_set_connect_timeout(struct rpc_clnt *clnt, unsigned long connect_timeout, unsigned long reconnect_timeout); int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt *, struct rpc_xprt_switch *, struct rpc_xprt *, void *); const char *rpc_proc_name(const struct rpc_task *task); void rpc_clnt_xprt_switch_put(struct rpc_clnt *); void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *, struct rpc_xprt *); bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt, const struct sockaddr *sap); void rpc_cleanup_clids(void); static inline int rpc_reply_expected(struct rpc_task *task) { return (task->tk_msg.rpc_proc != NULL) && (task->tk_msg.rpc_proc->p_decode != NULL); } static inline void rpc_task_close_connection(struct rpc_task *task) { if (task->tk_xprt) xprt_force_disconnect(task->tk_xprt); } #endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_CLNT_H */ sunrpc/auth.h 0000644 00000014117 14722070374 0007202 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/auth.h * * Declarations for the RPC client authentication machinery. * * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> */ #ifndef _LINUX_SUNRPC_AUTH_H #define _LINUX_SUNRPC_AUTH_H #ifdef __KERNEL__ #include <linux/sunrpc/sched.h> #include <linux/sunrpc/msg_prot.h> #include <linux/sunrpc/xdr.h> #include <linux/atomic.h> #include <linux/rcupdate.h> #include <linux/uidgid.h> #include <linux/utsname.h> /* * Maximum size of AUTH_NONE authentication information, in XDR words. */ #define NUL_CALLSLACK (4) #define NUL_REPLYSLACK (2) /* * Size of the nodename buffer. RFC1831 specifies a hard limit of 255 bytes, * but Linux hostnames are actually limited to __NEW_UTS_LEN bytes. */ #define UNX_MAXNODENAME __NEW_UTS_LEN #define UNX_CALLSLACK (21 + XDR_QUADLEN(UNX_MAXNODENAME)) #define UNX_NGROUPS 16 struct rpcsec_gss_info; struct auth_cred { const struct cred *cred; const char *principal; /* If present, this is a machine credential */ }; /* * Client user credentials */ struct rpc_auth; struct rpc_credops; struct rpc_cred { struct hlist_node cr_hash; /* hash chain */ struct list_head cr_lru; /* lru garbage collection */ struct rcu_head cr_rcu; struct rpc_auth * cr_auth; const struct rpc_credops *cr_ops; unsigned long cr_expire; /* when to gc */ unsigned long cr_flags; /* various flags */ refcount_t cr_count; /* ref count */ const struct cred *cr_cred; /* per-flavor data */ }; #define RPCAUTH_CRED_NEW 0 #define RPCAUTH_CRED_UPTODATE 1 #define RPCAUTH_CRED_HASHED 2 #define RPCAUTH_CRED_NEGATIVE 3 const struct cred *rpc_machine_cred(void); /* * Client authentication handle */ struct rpc_cred_cache; struct rpc_authops; struct rpc_auth { unsigned int au_cslack; /* call cred size estimate */ unsigned int au_rslack; /* reply cred size estimate */ unsigned int au_verfsize; /* size of reply verifier */ unsigned int au_ralign; /* words before UL header */ unsigned int au_flags; const struct rpc_authops *au_ops; rpc_authflavor_t au_flavor; /* pseudoflavor (note may * differ from the flavor in * au_ops->au_flavor in gss * case) */ refcount_t au_count; /* Reference counter */ struct rpc_cred_cache * au_credcache; /* per-flavor data */ }; /* rpc_auth au_flags */ #define RPCAUTH_AUTH_DATATOUCH 0x00000002 struct rpc_auth_create_args { rpc_authflavor_t pseudoflavor; const char *target_name; }; /* Flags for rpcauth_lookupcred() */ #define RPCAUTH_LOOKUP_NEW 0x01 /* Accept an uninitialised cred */ /* * Client authentication ops */ struct rpc_authops { struct module *owner; rpc_authflavor_t au_flavor; /* flavor (RPC_AUTH_*) */ char * au_name; struct rpc_auth * (*create)(const struct rpc_auth_create_args *, struct rpc_clnt *); void (*destroy)(struct rpc_auth *); int (*hash_cred)(struct auth_cred *, unsigned int); struct rpc_cred * (*lookup_cred)(struct rpc_auth *, struct auth_cred *, int); struct rpc_cred * (*crcreate)(struct rpc_auth*, struct auth_cred *, int, gfp_t); int (*list_pseudoflavors)(rpc_authflavor_t *, int); rpc_authflavor_t (*info2flavor)(struct rpcsec_gss_info *); int (*flavor2info)(rpc_authflavor_t, struct rpcsec_gss_info *); int (*key_timeout)(struct rpc_auth *, struct rpc_cred *); }; struct rpc_credops { const char * cr_name; /* Name of the auth flavour */ int (*cr_init)(struct rpc_auth *, struct rpc_cred *); void (*crdestroy)(struct rpc_cred *); int (*crmatch)(struct auth_cred *, struct rpc_cred *, int); int (*crmarshal)(struct rpc_task *task, struct xdr_stream *xdr); int (*crrefresh)(struct rpc_task *); int (*crvalidate)(struct rpc_task *task, struct xdr_stream *xdr); int (*crwrap_req)(struct rpc_task *task, struct xdr_stream *xdr); int (*crunwrap_resp)(struct rpc_task *task, struct xdr_stream *xdr); int (*crkey_timeout)(struct rpc_cred *); char * (*crstringify_acceptor)(struct rpc_cred *); bool (*crneed_reencode)(struct rpc_task *); }; extern const struct rpc_authops authunix_ops; extern const struct rpc_authops authnull_ops; int __init rpc_init_authunix(void); int __init rpcauth_init_module(void); void rpcauth_remove_module(void); void rpc_destroy_authunix(void); int rpcauth_register(const struct rpc_authops *); int rpcauth_unregister(const struct rpc_authops *); struct rpc_auth * rpcauth_create(const struct rpc_auth_create_args *, struct rpc_clnt *); void rpcauth_release(struct rpc_auth *); rpc_authflavor_t rpcauth_get_pseudoflavor(rpc_authflavor_t, struct rpcsec_gss_info *); int rpcauth_get_gssinfo(rpc_authflavor_t, struct rpcsec_gss_info *); int rpcauth_list_flavors(rpc_authflavor_t *, int); struct rpc_cred * rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred *, int, gfp_t); void rpcauth_init_cred(struct rpc_cred *, const struct auth_cred *, struct rpc_auth *, const struct rpc_credops *); struct rpc_cred * rpcauth_lookupcred(struct rpc_auth *, int); void put_rpccred(struct rpc_cred *); int rpcauth_marshcred(struct rpc_task *task, struct xdr_stream *xdr); int rpcauth_checkverf(struct rpc_task *task, struct xdr_stream *xdr); int rpcauth_wrap_req_encode(struct rpc_task *task, struct xdr_stream *xdr); int rpcauth_wrap_req(struct rpc_task *task, struct xdr_stream *xdr); int rpcauth_unwrap_resp_decode(struct rpc_task *task, struct xdr_stream *xdr); int rpcauth_unwrap_resp(struct rpc_task *task, struct xdr_stream *xdr); bool rpcauth_xmit_need_reencode(struct rpc_task *task); int rpcauth_refreshcred(struct rpc_task *); void rpcauth_invalcred(struct rpc_task *); int rpcauth_uptodatecred(struct rpc_task *); int rpcauth_init_credcache(struct rpc_auth *); void rpcauth_destroy_credcache(struct rpc_auth *); void rpcauth_clear_credcache(struct rpc_cred_cache *); char * rpcauth_stringify_acceptor(struct rpc_cred *); static inline struct rpc_cred *get_rpccred(struct rpc_cred *cred) { if (cred != NULL && refcount_inc_not_zero(&cred->cr_count)) return cred; return NULL; } #endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_AUTH_H */ sunrpc/gss_asn1.h 0000644 00000006164 14722070374 0007762 0 ustar 00 /* * linux/include/linux/sunrpc/gss_asn1.h * * minimal asn1 for generic encoding/decoding of gss tokens * * Adapted from MIT Kerberos 5-1.2.1 lib/include/krb5.h, * lib/gssapi/krb5/gssapiP_krb5.h, and others * * Copyright (c) 2000 The Regents of the University of Michigan. * All rights reserved. * * Andy Adamson <andros@umich.edu> */ /* * Copyright 1995 by the Massachusetts Institute of Technology. * All Rights Reserved. * * Export of this software from the United States of America may * require a specific license from the United States Government. * It is the responsibility of any person or organization contemplating * export to obtain such a license before exporting. * * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and * distribute this software and its documentation for any purpose and * without fee is hereby granted, provided that the above copyright * notice appear in all copies and that both that copyright notice and * this permission notice appear in supporting documentation, and that * the name of M.I.T. not be used in advertising or publicity pertaining * to distribution of the software without specific, written prior * permission. Furthermore if you modify this software you must label * your software as modified software and not distribute it in such a * fashion that it might be confused with the original M.I.T. software. * M.I.T. makes no representations about the suitability of * this software for any purpose. It is provided "as is" without express * or implied warranty. * */ #include <linux/sunrpc/gss_api.h> #define SIZEOF_INT 4 /* from gssapi_err_generic.h */ #define G_BAD_SERVICE_NAME (-2045022976L) #define G_BAD_STRING_UID (-2045022975L) #define G_NOUSER (-2045022974L) #define G_VALIDATE_FAILED (-2045022973L) #define G_BUFFER_ALLOC (-2045022972L) #define G_BAD_MSG_CTX (-2045022971L) #define G_WRONG_SIZE (-2045022970L) #define G_BAD_USAGE (-2045022969L) #define G_UNKNOWN_QOP (-2045022968L) #define G_NO_HOSTNAME (-2045022967L) #define G_BAD_HOSTNAME (-2045022966L) #define G_WRONG_MECH (-2045022965L) #define G_BAD_TOK_HEADER (-2045022964L) #define G_BAD_DIRECTION (-2045022963L) #define G_TOK_TRUNC (-2045022962L) #define G_REFLECT (-2045022961L) #define G_WRONG_TOKID (-2045022960L) #define g_OID_equal(o1,o2) \ (((o1)->len == (o2)->len) && \ (memcmp((o1)->data,(o2)->data,(int) (o1)->len) == 0)) u32 g_verify_token_header( struct xdr_netobj *mech, int *body_size, unsigned char **buf_in, int toksize); int g_token_size( struct xdr_netobj *mech, unsigned int body_size); void g_make_token_header( struct xdr_netobj *mech, int body_size, unsigned char **buf); sunrpc/svc.h 0000644 00000043666 14722070374 0007047 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/svc.h * * RPC server declarations. * * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> */ #ifndef SUNRPC_SVC_H #define SUNRPC_SVC_H #include <linux/in.h> #include <linux/in6.h> #include <linux/sunrpc/types.h> #include <linux/sunrpc/xdr.h> #include <linux/sunrpc/auth.h> #include <linux/sunrpc/svcauth.h> #include <linux/wait.h> #include <linux/mm.h> /* statistics for svc_pool structures */ struct svc_pool_stats { atomic_long_t packets; unsigned long sockets_queued; atomic_long_t threads_woken; atomic_long_t threads_timedout; }; /* * * RPC service thread pool. * * Pool of threads and temporary sockets. Generally there is only * a single one of these per RPC service, but on NUMA machines those * services that can benefit from it (i.e. nfs but not lockd) will * have one pool per NUMA node. This optimisation reduces cross- * node traffic on multi-node NUMA NFS servers. */ struct svc_pool { unsigned int sp_id; /* pool id; also node id on NUMA */ spinlock_t sp_lock; /* protects all fields */ struct list_head sp_sockets; /* pending sockets */ unsigned int sp_nrthreads; /* # of threads in pool */ struct list_head sp_all_threads; /* all server threads */ struct svc_pool_stats sp_stats; /* statistics on pool operation */ #define SP_TASK_PENDING (0) /* still work to do even if no * xprt is queued. */ #define SP_CONGESTED (1) unsigned long sp_flags; } ____cacheline_aligned_in_smp; struct svc_serv; struct svc_serv_ops { /* Callback to use when last thread exits. */ void (*svo_shutdown)(struct svc_serv *, struct net *); /* function for service threads to run */ int (*svo_function)(void *); /* queue up a transport for servicing */ void (*svo_enqueue_xprt)(struct svc_xprt *); /* set up thread (or whatever) execution context */ int (*svo_setup)(struct svc_serv *, struct svc_pool *, int); /* optional module to count when adding threads (pooled svcs only) */ struct module *svo_module; }; /* * RPC service. * * An RPC service is a ``daemon,'' possibly multithreaded, which * receives and processes incoming RPC messages. * It has one or more transport sockets associated with it, and maintains * a list of idle threads waiting for input. * * We currently do not support more than one RPC program per daemon. */ struct svc_serv { struct svc_program * sv_program; /* RPC program */ struct svc_stat * sv_stats; /* RPC statistics */ spinlock_t sv_lock; unsigned int sv_nrthreads; /* # of server threads */ unsigned int sv_maxconn; /* max connections allowed or * '0' causing max to be based * on number of threads. */ unsigned int sv_max_payload; /* datagram payload size */ unsigned int sv_max_mesg; /* max_payload + 1 page for overheads */ unsigned int sv_xdrsize; /* XDR buffer size */ struct list_head sv_permsocks; /* all permanent sockets */ struct list_head sv_tempsocks; /* all temporary sockets */ int sv_tmpcnt; /* count of temporary sockets */ struct timer_list sv_temptimer; /* timer for aging temporary sockets */ char * sv_name; /* service name */ unsigned int sv_nrpools; /* number of thread pools */ struct svc_pool * sv_pools; /* array of thread pools */ const struct svc_serv_ops *sv_ops; /* server operations */ #if defined(CONFIG_SUNRPC_BACKCHANNEL) struct list_head sv_cb_list; /* queue for callback requests * that arrive over the same * connection */ spinlock_t sv_cb_lock; /* protects the svc_cb_list */ wait_queue_head_t sv_cb_waitq; /* sleep here if there are no * entries in the svc_cb_list */ bool sv_bc_enabled; /* service uses backchannel */ #endif /* CONFIG_SUNRPC_BACKCHANNEL */ }; /* * We use sv_nrthreads as a reference count. svc_destroy() drops * this refcount, so we need to bump it up around operations that * change the number of threads. Horrible, but there it is. * Should be called with the "service mutex" held. */ static inline void svc_get(struct svc_serv *serv) { serv->sv_nrthreads++; } /* * Maximum payload size supported by a kernel RPC server. * This is use to determine the max number of pages nfsd is * willing to return in a single READ operation. * * These happen to all be powers of 2, which is not strictly * necessary but helps enforce the real limitation, which is * that they should be multiples of PAGE_SIZE. * * For UDP transports, a block plus NFS,RPC, and UDP headers * has to fit into the IP datagram limit of 64K. The largest * feasible number for all known page sizes is probably 48K, * but we choose 32K here. This is the same as the historical * Linux limit; someone who cares more about NFS/UDP performance * can test a larger number. * * For TCP transports we have more freedom. A size of 1MB is * chosen to match the client limit. Other OSes are known to * have larger limits, but those numbers are probably beyond * the point of diminishing returns. */ #define RPCSVC_MAXPAYLOAD (1*1024*1024u) #define RPCSVC_MAXPAYLOAD_TCP RPCSVC_MAXPAYLOAD #define RPCSVC_MAXPAYLOAD_UDP (32*1024u) extern u32 svc_max_payload(const struct svc_rqst *rqstp); /* * RPC Requsts and replies are stored in one or more pages. * We maintain an array of pages for each server thread. * Requests are copied into these pages as they arrive. Remaining * pages are available to write the reply into. * * Pages are sent using ->sendpage so each server thread needs to * allocate more to replace those used in sending. To help keep track * of these pages we have a receive list where all pages initialy live, * and a send list where pages are moved to when there are to be part * of a reply. * * We use xdr_buf for holding responses as it fits well with NFS * read responses (that have a header, and some data pages, and possibly * a tail) and means we can share some client side routines. * * The xdr_buf.head kvec always points to the first page in the rq_*pages * list. The xdr_buf.pages pointer points to the second page on that * list. xdr_buf.tail points to the end of the first page. * This assumes that the non-page part of an rpc reply will fit * in a page - NFSd ensures this. lockd also has no trouble. * * Each request/reply pair can have at most one "payload", plus two pages, * one for the request, and one for the reply. * We using ->sendfile to return read data, we might need one extra page * if the request is not page-aligned. So add another '1'. */ #define RPCSVC_MAXPAGES ((RPCSVC_MAXPAYLOAD+PAGE_SIZE-1)/PAGE_SIZE \ + 2 + 1) static inline u32 svc_getnl(struct kvec *iov) { __be32 val, *vp; vp = iov->iov_base; val = *vp++; iov->iov_base = (void*)vp; iov->iov_len -= sizeof(__be32); return ntohl(val); } static inline void svc_putnl(struct kvec *iov, u32 val) { __be32 *vp = iov->iov_base + iov->iov_len; *vp = htonl(val); iov->iov_len += sizeof(__be32); } static inline __be32 svc_getu32(struct kvec *iov) { __be32 val, *vp; vp = iov->iov_base; val = *vp++; iov->iov_base = (void*)vp; iov->iov_len -= sizeof(__be32); return val; } static inline void svc_ungetu32(struct kvec *iov) { __be32 *vp = (__be32 *)iov->iov_base; iov->iov_base = (void *)(vp - 1); iov->iov_len += sizeof(*vp); } static inline void svc_putu32(struct kvec *iov, __be32 val) { __be32 *vp = iov->iov_base + iov->iov_len; *vp = val; iov->iov_len += sizeof(__be32); } /* * The context of a single thread, including the request currently being * processed. */ struct svc_rqst { struct list_head rq_all; /* all threads list */ struct rcu_head rq_rcu_head; /* for RCU deferred kfree */ struct svc_xprt * rq_xprt; /* transport ptr */ struct sockaddr_storage rq_addr; /* peer address */ size_t rq_addrlen; struct sockaddr_storage rq_daddr; /* dest addr of request * - reply from here */ size_t rq_daddrlen; struct svc_serv * rq_server; /* RPC service definition */ struct svc_pool * rq_pool; /* thread pool */ const struct svc_procedure *rq_procinfo;/* procedure info */ struct auth_ops * rq_authop; /* authentication flavour */ struct svc_cred rq_cred; /* auth info */ void * rq_xprt_ctxt; /* transport specific context ptr */ struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */ size_t rq_xprt_hlen; /* xprt header len */ struct xdr_buf rq_arg; struct xdr_buf rq_res; struct page *rq_pages[RPCSVC_MAXPAGES + 1]; struct page * *rq_respages; /* points into rq_pages */ struct page * *rq_next_page; /* next reply page to use */ struct page * *rq_page_end; /* one past the last page */ struct kvec rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */ __be32 rq_xid; /* transmission id */ u32 rq_prog; /* program number */ u32 rq_vers; /* program version */ u32 rq_proc; /* procedure number */ u32 rq_prot; /* IP protocol */ int rq_cachetype; /* catering to nfsd */ #define RQ_SECURE (0) /* secure port */ #define RQ_LOCAL (1) /* local request */ #define RQ_USEDEFERRAL (2) /* use deferral */ #define RQ_DROPME (3) /* drop current reply */ #define RQ_SPLICE_OK (4) /* turned off in gss privacy * to prevent encrypting page * cache pages */ #define RQ_VICTIM (5) /* about to be shut down */ #define RQ_BUSY (6) /* request is busy */ #define RQ_DATA (7) /* request has data */ #define RQ_AUTHERR (8) /* Request status is auth error */ unsigned long rq_flags; /* flags field */ ktime_t rq_qtime; /* enqueue time */ void * rq_argp; /* decoded arguments */ void * rq_resp; /* xdr'd results */ void * rq_auth_data; /* flavor-specific data */ int rq_auth_slack; /* extra space xdr code * should leave in head * for krb5i, krb5p. */ int rq_reserved; /* space on socket outq * reserved for this request */ ktime_t rq_stime; /* start time */ struct cache_req rq_chandle; /* handle passed to caches for * request delaying */ /* Catering to nfsd */ struct auth_domain * rq_client; /* RPC peer info */ struct auth_domain * rq_gssclient; /* "gss/"-style peer info */ struct svc_cacherep * rq_cacherep; /* cache info */ struct task_struct *rq_task; /* service thread */ spinlock_t rq_lock; /* per-request lock */ struct net *rq_bc_net; /* pointer to backchannel's * net namespace */ }; #define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net) /* * Rigorous type checking on sockaddr type conversions */ static inline struct sockaddr_in *svc_addr_in(const struct svc_rqst *rqst) { return (struct sockaddr_in *) &rqst->rq_addr; } static inline struct sockaddr_in6 *svc_addr_in6(const struct svc_rqst *rqst) { return (struct sockaddr_in6 *) &rqst->rq_addr; } static inline struct sockaddr *svc_addr(const struct svc_rqst *rqst) { return (struct sockaddr *) &rqst->rq_addr; } static inline struct sockaddr_in *svc_daddr_in(const struct svc_rqst *rqst) { return (struct sockaddr_in *) &rqst->rq_daddr; } static inline struct sockaddr_in6 *svc_daddr_in6(const struct svc_rqst *rqst) { return (struct sockaddr_in6 *) &rqst->rq_daddr; } static inline struct sockaddr *svc_daddr(const struct svc_rqst *rqst) { return (struct sockaddr *) &rqst->rq_daddr; } /* * Check buffer bounds after decoding arguments */ static inline int xdr_argsize_check(struct svc_rqst *rqstp, __be32 *p) { char *cp = (char *)p; struct kvec *vec = &rqstp->rq_arg.head[0]; return cp >= (char*)vec->iov_base && cp <= (char*)vec->iov_base + vec->iov_len; } static inline int xdr_ressize_check(struct svc_rqst *rqstp, __be32 *p) { struct kvec *vec = &rqstp->rq_res.head[0]; char *cp = (char*)p; vec->iov_len = cp - (char*)vec->iov_base; return vec->iov_len <= PAGE_SIZE; } static inline void svc_free_res_pages(struct svc_rqst *rqstp) { while (rqstp->rq_next_page != rqstp->rq_respages) { struct page **pp = --rqstp->rq_next_page; if (*pp) { put_page(*pp); *pp = NULL; } } } struct svc_deferred_req { u32 prot; /* protocol (UDP or TCP) */ struct svc_xprt *xprt; struct sockaddr_storage addr; /* where reply must go */ size_t addrlen; struct sockaddr_storage daddr; /* where reply must come from */ size_t daddrlen; struct cache_deferred_req handle; size_t xprt_hlen; int argslen; __be32 args[0]; }; struct svc_process_info { union { int (*dispatch)(struct svc_rqst *, __be32 *); struct { unsigned int lovers; unsigned int hivers; } mismatch; }; }; /* * List of RPC programs on the same transport endpoint */ struct svc_program { struct svc_program * pg_next; /* other programs (same xprt) */ u32 pg_prog; /* program number */ unsigned int pg_lovers; /* lowest version */ unsigned int pg_hivers; /* highest version */ unsigned int pg_nvers; /* number of versions */ const struct svc_version **pg_vers; /* version array */ char * pg_name; /* service name */ char * pg_class; /* class name: services sharing authentication */ struct svc_stat * pg_stats; /* rpc statistics */ int (*pg_authenticate)(struct svc_rqst *); __be32 (*pg_init_request)(struct svc_rqst *, const struct svc_program *, struct svc_process_info *); int (*pg_rpcbind_set)(struct net *net, const struct svc_program *, u32 version, int family, unsigned short proto, unsigned short port); }; /* * RPC program version */ struct svc_version { u32 vs_vers; /* version number */ u32 vs_nproc; /* number of procedures */ const struct svc_procedure *vs_proc; /* per-procedure info */ unsigned int *vs_count; /* call counts */ u32 vs_xdrsize; /* xdrsize needed for this version */ /* Don't register with rpcbind */ bool vs_hidden; /* Don't care if the rpcbind registration fails */ bool vs_rpcb_optnl; /* Need xprt with congestion control */ bool vs_need_cong_ctrl; /* Override dispatch function (e.g. when caching replies). * A return value of 0 means drop the request. * vs_dispatch == NULL means use default dispatcher. */ int (*vs_dispatch)(struct svc_rqst *, __be32 *); }; /* * RPC procedure info */ struct svc_procedure { /* process the request: */ __be32 (*pc_func)(struct svc_rqst *); /* XDR decode args: */ int (*pc_decode)(struct svc_rqst *, __be32 *data); /* XDR encode result: */ int (*pc_encode)(struct svc_rqst *, __be32 *data); /* XDR free result: */ void (*pc_release)(struct svc_rqst *); unsigned int pc_argsize; /* argument struct size */ unsigned int pc_ressize; /* result struct size */ unsigned int pc_cachetype; /* cache info (NFS) */ unsigned int pc_xdrressize; /* maximum size of XDR reply */ }; /* * Mode for mapping cpus to pools. */ enum { SVC_POOL_AUTO = -1, /* choose one of the others */ SVC_POOL_GLOBAL, /* no mapping, just a single global pool * (legacy & UP mode) */ SVC_POOL_PERCPU, /* one pool per cpu */ SVC_POOL_PERNODE /* one pool per numa node */ }; struct svc_pool_map { int count; /* How many svc_servs use us */ int mode; /* Note: int not enum to avoid * warnings about "enumeration value * not handled in switch" */ unsigned int npools; unsigned int *pool_to; /* maps pool id to cpu or node */ unsigned int *to_pool; /* maps cpu or node to pool id */ }; extern struct svc_pool_map svc_pool_map; /* * Function prototypes. */ int svc_rpcb_setup(struct svc_serv *serv, struct net *net); void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net); int svc_bind(struct svc_serv *serv, struct net *net); struct svc_serv *svc_create(struct svc_program *, unsigned int, const struct svc_serv_ops *); struct svc_rqst *svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node); struct svc_rqst *svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node); void svc_rqst_free(struct svc_rqst *); void svc_exit_thread(struct svc_rqst *); unsigned int svc_pool_map_get(void); void svc_pool_map_put(void); struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int, const struct svc_serv_ops *); int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int); int svc_set_num_threads_sync(struct svc_serv *, struct svc_pool *, int); int svc_pool_stats_open(struct svc_serv *serv, struct file *file); void svc_destroy(struct svc_serv *); void svc_shutdown_net(struct svc_serv *, struct net *); int svc_process(struct svc_rqst *); int bc_svc_process(struct svc_serv *, struct rpc_rqst *, struct svc_rqst *); int svc_register(const struct svc_serv *, struct net *, const int, const unsigned short, const unsigned short); void svc_wake_up(struct svc_serv *); void svc_reserve(struct svc_rqst *rqstp, int space); struct svc_pool * svc_pool_for_cpu(struct svc_serv *serv, int cpu); char * svc_print_addr(struct svc_rqst *, char *, size_t); int svc_encode_read_payload(struct svc_rqst *rqstp, unsigned int offset, unsigned int length); unsigned int svc_fill_write_vector(struct svc_rqst *rqstp, struct page **pages, struct kvec *first, size_t total); char *svc_fill_symlink_pathname(struct svc_rqst *rqstp, struct kvec *first, void *p, size_t total); __be32 svc_return_autherr(struct svc_rqst *rqstp, __be32 auth_err); __be32 svc_generic_init_request(struct svc_rqst *rqstp, const struct svc_program *progp, struct svc_process_info *procinfo); int svc_generic_rpcbind_set(struct net *net, const struct svc_program *progp, u32 version, int family, unsigned short proto, unsigned short port); int svc_rpcbind_set_version(struct net *net, const struct svc_program *progp, u32 version, int family, unsigned short proto, unsigned short port); #define RPC_MAX_ADDRBUFLEN (63U) /* * When we want to reduce the size of the reserved space in the response * buffer, we need to take into account the size of any checksum data that * may be at the end of the packet. This is difficult to determine exactly * for all cases without actually generating the checksum, so we just use a * static value. */ static inline void svc_reserve_auth(struct svc_rqst *rqstp, int space) { svc_reserve(rqstp, space + rqstp->rq_auth_slack); } #endif /* SUNRPC_SVC_H */ sunrpc/xdr.h 0000644 00000040033 14722070374 0007032 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * XDR standard data types and function declarations * * Copyright (C) 1995-1997 Olaf Kirch <okir@monad.swb.de> * * Based on: * RFC 4506 "XDR: External Data Representation Standard", May 2006 */ #ifndef _SUNRPC_XDR_H_ #define _SUNRPC_XDR_H_ #ifdef __KERNEL__ #include <linux/uio.h> #include <asm/byteorder.h> #include <asm/unaligned.h> #include <linux/scatterlist.h> struct bio_vec; struct rpc_rqst; /* * Buffer adjustment */ #define XDR_QUADLEN(l) (((l) + 3) >> 2) /* * Generic opaque `network object.' */ #define XDR_MAX_NETOBJ 1024 struct xdr_netobj { unsigned int len; u8 * data; }; /* * Basic structure for transmission/reception of a client XDR message. * Features a header (for a linear buffer containing RPC headers * and the data payload for short messages), and then an array of * pages. * The tail iovec allows you to append data after the page array. Its * main interest is for appending padding to the pages in order to * satisfy the int_32-alignment requirements in RFC1832. * * For the future, we might want to string several of these together * in a list if anybody wants to make use of NFSv4 COMPOUND * operations and/or has a need for scatter/gather involving pages. */ struct xdr_buf { struct kvec head[1], /* RPC header + non-page data */ tail[1]; /* Appended after page data */ struct bio_vec *bvec; struct page ** pages; /* Array of pages */ unsigned int page_base, /* Start of page data */ page_len, /* Length of page data */ flags; /* Flags for data disposition */ #define XDRBUF_READ 0x01 /* target of file read */ #define XDRBUF_WRITE 0x02 /* source of file write */ #define XDRBUF_SPARSE_PAGES 0x04 /* Page array is sparse */ unsigned int buflen, /* Total length of storage buffer */ len; /* Length of XDR encoded message */ }; static inline void xdr_buf_init(struct xdr_buf *buf, void *start, size_t len) { buf->head[0].iov_base = start; buf->head[0].iov_len = len; buf->tail[0].iov_len = 0; buf->pages = NULL; buf->page_len = 0; buf->flags = 0; buf->len = 0; buf->buflen = len; } /* * pre-xdr'ed macros. */ #define xdr_zero cpu_to_be32(0) #define xdr_one cpu_to_be32(1) #define xdr_two cpu_to_be32(2) #define rpc_auth_null cpu_to_be32(RPC_AUTH_NULL) #define rpc_auth_unix cpu_to_be32(RPC_AUTH_UNIX) #define rpc_auth_short cpu_to_be32(RPC_AUTH_SHORT) #define rpc_auth_gss cpu_to_be32(RPC_AUTH_GSS) #define rpc_call cpu_to_be32(RPC_CALL) #define rpc_reply cpu_to_be32(RPC_REPLY) #define rpc_msg_accepted cpu_to_be32(RPC_MSG_ACCEPTED) #define rpc_success cpu_to_be32(RPC_SUCCESS) #define rpc_prog_unavail cpu_to_be32(RPC_PROG_UNAVAIL) #define rpc_prog_mismatch cpu_to_be32(RPC_PROG_MISMATCH) #define rpc_proc_unavail cpu_to_be32(RPC_PROC_UNAVAIL) #define rpc_garbage_args cpu_to_be32(RPC_GARBAGE_ARGS) #define rpc_system_err cpu_to_be32(RPC_SYSTEM_ERR) #define rpc_drop_reply cpu_to_be32(RPC_DROP_REPLY) #define rpc_mismatch cpu_to_be32(RPC_MISMATCH) #define rpc_auth_error cpu_to_be32(RPC_AUTH_ERROR) #define rpc_auth_ok cpu_to_be32(RPC_AUTH_OK) #define rpc_autherr_badcred cpu_to_be32(RPC_AUTH_BADCRED) #define rpc_autherr_rejectedcred cpu_to_be32(RPC_AUTH_REJECTEDCRED) #define rpc_autherr_badverf cpu_to_be32(RPC_AUTH_BADVERF) #define rpc_autherr_rejectedverf cpu_to_be32(RPC_AUTH_REJECTEDVERF) #define rpc_autherr_tooweak cpu_to_be32(RPC_AUTH_TOOWEAK) #define rpcsec_gsserr_credproblem cpu_to_be32(RPCSEC_GSS_CREDPROBLEM) #define rpcsec_gsserr_ctxproblem cpu_to_be32(RPCSEC_GSS_CTXPROBLEM) /* * Miscellaneous XDR helper functions */ __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int len); __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int len); __be32 *xdr_encode_string(__be32 *p, const char *s); __be32 *xdr_decode_string_inplace(__be32 *p, char **sp, unsigned int *lenp, unsigned int maxlen); __be32 *xdr_encode_netobj(__be32 *p, const struct xdr_netobj *); __be32 *xdr_decode_netobj(__be32 *p, struct xdr_netobj *); void xdr_inline_pages(struct xdr_buf *, unsigned int, struct page **, unsigned int, unsigned int); void xdr_terminate_string(struct xdr_buf *, const u32); size_t xdr_buf_pagecount(struct xdr_buf *buf); int xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp); void xdr_free_bvec(struct xdr_buf *buf); static inline __be32 *xdr_encode_array(__be32 *p, const void *s, unsigned int len) { return xdr_encode_opaque(p, s, len); } /* * Decode 64bit quantities (NFSv3 support) */ static inline __be32 * xdr_encode_hyper(__be32 *p, __u64 val) { put_unaligned_be64(val, p); return p + 2; } static inline __be32 * xdr_decode_hyper(__be32 *p, __u64 *valp) { *valp = get_unaligned_be64(p); return p + 2; } static inline __be32 * xdr_decode_opaque_fixed(__be32 *p, void *ptr, unsigned int len) { memcpy(ptr, p, len); return p + XDR_QUADLEN(len); } static inline void xdr_netobj_dup(struct xdr_netobj *dst, struct xdr_netobj *src, gfp_t gfp_mask) { dst->data = kmemdup(src->data, src->len, gfp_mask); dst->len = src->len; } /* * Adjust kvec to reflect end of xdr'ed data (RPC client XDR) */ static inline int xdr_adjust_iovec(struct kvec *iov, __be32 *p) { return iov->iov_len = ((u8 *) p - (u8 *) iov->iov_base); } /* * XDR buffer helper functions */ extern void xdr_shift_buf(struct xdr_buf *, size_t); extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *); extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int); extern void xdr_buf_trim(struct xdr_buf *, unsigned int); extern int xdr_buf_read_mic(struct xdr_buf *, struct xdr_netobj *, unsigned int); extern int read_bytes_from_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int); extern int write_bytes_to_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int); /* * Helper structure for copying from an sk_buff. */ struct xdr_skb_reader { struct sk_buff *skb; unsigned int offset; size_t count; __wsum csum; }; typedef size_t (*xdr_skb_read_actor)(struct xdr_skb_reader *desc, void *to, size_t len); extern int csum_partial_copy_to_xdr(struct xdr_buf *, struct sk_buff *); extern int xdr_encode_word(struct xdr_buf *, unsigned int, u32); extern int xdr_decode_word(struct xdr_buf *, unsigned int, u32 *); struct xdr_array2_desc; typedef int (*xdr_xcode_elem_t)(struct xdr_array2_desc *desc, void *elem); struct xdr_array2_desc { unsigned int elem_size; unsigned int array_len; unsigned int array_maxlen; xdr_xcode_elem_t xcode; }; extern int xdr_decode_array2(struct xdr_buf *buf, unsigned int base, struct xdr_array2_desc *desc); extern int xdr_encode_array2(struct xdr_buf *buf, unsigned int base, struct xdr_array2_desc *desc); extern void _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len); /* * Provide some simple tools for XDR buffer overflow-checking etc. */ struct xdr_stream { __be32 *p; /* start of available buffer */ struct xdr_buf *buf; /* XDR buffer to read/write */ __be32 *end; /* end of available buffer space */ struct kvec *iov; /* pointer to the current kvec */ struct kvec scratch; /* Scratch buffer */ struct page **page_ptr; /* pointer to the current page */ unsigned int nwords; /* Remaining decode buffer length */ struct rpc_rqst *rqst; /* For debugging */ }; /* * These are the xdr_stream style generic XDR encode and decode functions. */ typedef void (*kxdreproc_t)(struct rpc_rqst *rqstp, struct xdr_stream *xdr, const void *obj); typedef int (*kxdrdproc_t)(struct rpc_rqst *rqstp, struct xdr_stream *xdr, void *obj); extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p, struct rpc_rqst *rqst); extern __be32 *xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes); extern void xdr_commit_encode(struct xdr_stream *xdr); extern void xdr_truncate_encode(struct xdr_stream *xdr, size_t len); extern int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen); extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base, unsigned int len); extern unsigned int xdr_stream_pos(const struct xdr_stream *xdr); extern void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p, struct rpc_rqst *rqst); extern void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf, struct page **pages, unsigned int len); extern void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen); extern __be32 *xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes); extern unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len); extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len); extern int xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, int (*actor)(struct scatterlist *, void *), void *data); /** * xdr_stream_remaining - Return the number of bytes remaining in the stream * @xdr: pointer to struct xdr_stream * * Return value: * Number of bytes remaining in @xdr before xdr->end */ static inline size_t xdr_stream_remaining(const struct xdr_stream *xdr) { return xdr->nwords << 2; } ssize_t xdr_stream_decode_opaque(struct xdr_stream *xdr, void *ptr, size_t size); ssize_t xdr_stream_decode_opaque_dup(struct xdr_stream *xdr, void **ptr, size_t maxlen, gfp_t gfp_flags); ssize_t xdr_stream_decode_string(struct xdr_stream *xdr, char *str, size_t size); ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str, size_t maxlen, gfp_t gfp_flags); /** * xdr_align_size - Calculate padded size of an object * @n: Size of an object being XDR encoded (in bytes) * * Return value: * Size (in bytes) of the object including xdr padding */ static inline size_t xdr_align_size(size_t n) { const size_t mask = sizeof(__u32) - 1; return (n + mask) & ~mask; } /** * xdr_stream_encode_u32 - Encode a 32-bit integer * @xdr: pointer to xdr_stream * @n: integer to encode * * Return values: * On success, returns length in bytes of XDR buffer consumed * %-EMSGSIZE on XDR buffer overflow */ static inline ssize_t xdr_stream_encode_u32(struct xdr_stream *xdr, __u32 n) { const size_t len = sizeof(n); __be32 *p = xdr_reserve_space(xdr, len); if (unlikely(!p)) return -EMSGSIZE; *p = cpu_to_be32(n); return len; } /** * xdr_stream_encode_u64 - Encode a 64-bit integer * @xdr: pointer to xdr_stream * @n: 64-bit integer to encode * * Return values: * On success, returns length in bytes of XDR buffer consumed * %-EMSGSIZE on XDR buffer overflow */ static inline ssize_t xdr_stream_encode_u64(struct xdr_stream *xdr, __u64 n) { const size_t len = sizeof(n); __be32 *p = xdr_reserve_space(xdr, len); if (unlikely(!p)) return -EMSGSIZE; xdr_encode_hyper(p, n); return len; } /** * xdr_stream_encode_opaque_inline - Encode opaque xdr data * @xdr: pointer to xdr_stream * @ptr: pointer to void pointer * @len: size of object * * Return values: * On success, returns length in bytes of XDR buffer consumed * %-EMSGSIZE on XDR buffer overflow */ static inline ssize_t xdr_stream_encode_opaque_inline(struct xdr_stream *xdr, void **ptr, size_t len) { size_t count = sizeof(__u32) + xdr_align_size(len); __be32 *p = xdr_reserve_space(xdr, count); if (unlikely(!p)) { *ptr = NULL; return -EMSGSIZE; } xdr_encode_opaque(p, NULL, len); *ptr = ++p; return count; } /** * xdr_stream_encode_opaque_fixed - Encode fixed length opaque xdr data * @xdr: pointer to xdr_stream * @ptr: pointer to opaque data object * @len: size of object pointed to by @ptr * * Return values: * On success, returns length in bytes of XDR buffer consumed * %-EMSGSIZE on XDR buffer overflow */ static inline ssize_t xdr_stream_encode_opaque_fixed(struct xdr_stream *xdr, const void *ptr, size_t len) { __be32 *p = xdr_reserve_space(xdr, len); if (unlikely(!p)) return -EMSGSIZE; xdr_encode_opaque_fixed(p, ptr, len); return xdr_align_size(len); } /** * xdr_stream_encode_opaque - Encode variable length opaque xdr data * @xdr: pointer to xdr_stream * @ptr: pointer to opaque data object * @len: size of object pointed to by @ptr * * Return values: * On success, returns length in bytes of XDR buffer consumed * %-EMSGSIZE on XDR buffer overflow */ static inline ssize_t xdr_stream_encode_opaque(struct xdr_stream *xdr, const void *ptr, size_t len) { size_t count = sizeof(__u32) + xdr_align_size(len); __be32 *p = xdr_reserve_space(xdr, count); if (unlikely(!p)) return -EMSGSIZE; xdr_encode_opaque(p, ptr, len); return count; } /** * xdr_stream_encode_uint32_array - Encode variable length array of integers * @xdr: pointer to xdr_stream * @array: array of integers * @array_size: number of elements in @array * * Return values: * On success, returns length in bytes of XDR buffer consumed * %-EMSGSIZE on XDR buffer overflow */ static inline ssize_t xdr_stream_encode_uint32_array(struct xdr_stream *xdr, const __u32 *array, size_t array_size) { ssize_t ret = (array_size+1) * sizeof(__u32); __be32 *p = xdr_reserve_space(xdr, ret); if (unlikely(!p)) return -EMSGSIZE; *p++ = cpu_to_be32(array_size); for (; array_size > 0; p++, array++, array_size--) *p = cpu_to_be32p(array); return ret; } /** * xdr_stream_decode_u32 - Decode a 32-bit integer * @xdr: pointer to xdr_stream * @ptr: location to store integer * * Return values: * %0 on success * %-EBADMSG on XDR buffer overflow */ static inline ssize_t xdr_stream_decode_u32(struct xdr_stream *xdr, __u32 *ptr) { const size_t count = sizeof(*ptr); __be32 *p = xdr_inline_decode(xdr, count); if (unlikely(!p)) return -EBADMSG; *ptr = be32_to_cpup(p); return 0; } /** * xdr_stream_decode_opaque_fixed - Decode fixed length opaque xdr data * @xdr: pointer to xdr_stream * @ptr: location to store data * @len: size of buffer pointed to by @ptr * * Return values: * On success, returns size of object stored in @ptr * %-EBADMSG on XDR buffer overflow */ static inline ssize_t xdr_stream_decode_opaque_fixed(struct xdr_stream *xdr, void *ptr, size_t len) { __be32 *p = xdr_inline_decode(xdr, len); if (unlikely(!p)) return -EBADMSG; xdr_decode_opaque_fixed(p, ptr, len); return len; } /** * xdr_stream_decode_opaque_inline - Decode variable length opaque xdr data * @xdr: pointer to xdr_stream * @ptr: location to store pointer to opaque data * @maxlen: maximum acceptable object size * * Note: the pointer stored in @ptr cannot be assumed valid after the XDR * buffer has been destroyed, or even after calling xdr_inline_decode() * on @xdr. It is therefore expected that the object it points to should * be processed immediately. * * Return values: * On success, returns size of object stored in *@ptr * %-EBADMSG on XDR buffer overflow * %-EMSGSIZE if the size of the object would exceed @maxlen */ static inline ssize_t xdr_stream_decode_opaque_inline(struct xdr_stream *xdr, void **ptr, size_t maxlen) { __be32 *p; __u32 len; *ptr = NULL; if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0)) return -EBADMSG; if (len != 0) { p = xdr_inline_decode(xdr, len); if (unlikely(!p)) return -EBADMSG; if (unlikely(len > maxlen)) return -EMSGSIZE; *ptr = p; } return len; } /** * xdr_stream_decode_uint32_array - Decode variable length array of integers * @xdr: pointer to xdr_stream * @array: location to store the integer array or NULL * @array_size: number of elements to store * * Return values: * On success, returns number of elements stored in @array * %-EBADMSG on XDR buffer overflow * %-EMSGSIZE if the size of the array exceeds @array_size */ static inline ssize_t xdr_stream_decode_uint32_array(struct xdr_stream *xdr, __u32 *array, size_t array_size) { __be32 *p; __u32 len; ssize_t retval; if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0)) return -EBADMSG; if (len > SIZE_MAX / sizeof(*p)) return -EBADMSG; p = xdr_inline_decode(xdr, len * sizeof(*p)); if (unlikely(!p)) return -EBADMSG; if (array == NULL) return len; if (len <= array_size) { if (len < array_size) memset(array+len, 0, (array_size-len)*sizeof(*array)); array_size = len; retval = len; } else retval = -EMSGSIZE; for (; array_size > 0; p++, array++, array_size--) *array = be32_to_cpup(p); return retval; } #endif /* __KERNEL__ */ #endif /* _SUNRPC_XDR_H_ */ sunrpc/xprtsock.h 0000644 00000003426 14722070374 0010117 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/xprtsock.h * * Declarations for the RPC transport socket provider. */ #ifndef _LINUX_SUNRPC_XPRTSOCK_H #define _LINUX_SUNRPC_XPRTSOCK_H #ifdef __KERNEL__ int init_socket_xprt(void); void cleanup_socket_xprt(void); #define RPC_MIN_RESVPORT (1U) #define RPC_MAX_RESVPORT (65535U) #define RPC_DEF_MIN_RESVPORT (665U) #define RPC_DEF_MAX_RESVPORT (1023U) struct sock_xprt { struct rpc_xprt xprt; /* * Network layer */ struct socket * sock; struct sock * inet; struct file * file; /* * State of TCP reply receive */ struct { struct { __be32 fraghdr, xid, calldir; } __attribute__((packed)); u32 offset, len; unsigned long copied; } recv; /* * State of TCP transmit queue */ struct { u32 offset; } xmit; /* * Connection of transports */ unsigned long sock_state; struct delayed_work connect_worker; struct work_struct error_worker; struct work_struct recv_worker; struct mutex recv_mutex; struct sockaddr_storage srcaddr; unsigned short srcport; int xprt_err; /* * UDP socket buffer size parameters */ size_t rcvsize, sndsize; struct rpc_timeout tcp_timeout; /* * Saved socket callback addresses */ void (*old_data_ready)(struct sock *); void (*old_state_change)(struct sock *); void (*old_write_space)(struct sock *); void (*old_error_report)(struct sock *); }; /* * TCP RPC flags */ #define XPRT_SOCK_CONNECTING 1U #define XPRT_SOCK_DATA_READY (2) #define XPRT_SOCK_UPD_TIMEOUT (3) #define XPRT_SOCK_WAKE_ERROR (4) #define XPRT_SOCK_WAKE_WRITE (5) #define XPRT_SOCK_WAKE_PENDING (6) #define XPRT_SOCK_WAKE_DISCONNECT (7) #define XPRT_SOCK_CONNECT_SENT (8) #endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_XPRTSOCK_H */ sunrpc/xprt.h 0000644 00000037153 14722070374 0007243 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/xprt.h * * Declarations for the RPC transport interface. * * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> */ #ifndef _LINUX_SUNRPC_XPRT_H #define _LINUX_SUNRPC_XPRT_H #include <linux/uio.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/ktime.h> #include <linux/kref.h> #include <linux/sunrpc/sched.h> #include <linux/sunrpc/xdr.h> #include <linux/sunrpc/msg_prot.h> #ifdef __KERNEL__ #define RPC_MIN_SLOT_TABLE (2U) #define RPC_DEF_SLOT_TABLE (16U) #define RPC_MAX_SLOT_TABLE_LIMIT (65536U) #define RPC_MAX_SLOT_TABLE RPC_MAX_SLOT_TABLE_LIMIT #define RPC_CWNDSHIFT (8U) #define RPC_CWNDSCALE (1U << RPC_CWNDSHIFT) #define RPC_INITCWND RPC_CWNDSCALE #define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT) #define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd) /* * This describes a timeout strategy */ struct rpc_timeout { unsigned long to_initval, /* initial timeout */ to_maxval, /* max timeout */ to_increment; /* if !exponential */ unsigned int to_retries; /* max # of retries */ unsigned char to_exponential; }; enum rpc_display_format_t { RPC_DISPLAY_ADDR = 0, RPC_DISPLAY_PORT, RPC_DISPLAY_PROTO, RPC_DISPLAY_HEX_ADDR, RPC_DISPLAY_HEX_PORT, RPC_DISPLAY_NETID, RPC_DISPLAY_MAX, }; struct rpc_task; struct rpc_xprt; struct seq_file; struct svc_serv; struct net; /* * This describes a complete RPC request */ struct rpc_rqst { /* * This is the user-visible part */ struct rpc_xprt * rq_xprt; /* RPC client */ struct xdr_buf rq_snd_buf; /* send buffer */ struct xdr_buf rq_rcv_buf; /* recv buffer */ /* * This is the private part */ struct rpc_task * rq_task; /* RPC task data */ struct rpc_cred * rq_cred; /* Bound cred */ __be32 rq_xid; /* request XID */ int rq_cong; /* has incremented xprt->cong */ u32 rq_seqno; /* gss seq no. used on req. */ int rq_enc_pages_num; struct page **rq_enc_pages; /* scratch pages for use by gss privacy code */ void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */ union { struct list_head rq_list; /* Slot allocation list */ struct rb_node rq_recv; /* Receive queue */ }; struct list_head rq_xmit; /* Send queue */ struct list_head rq_xmit2; /* Send queue */ void *rq_buffer; /* Call XDR encode buffer */ size_t rq_callsize; void *rq_rbuffer; /* Reply XDR decode buffer */ size_t rq_rcvsize; size_t rq_xmit_bytes_sent; /* total bytes sent */ size_t rq_reply_bytes_recvd; /* total reply bytes */ /* received */ struct xdr_buf rq_private_buf; /* The receive buffer * used in the softirq. */ unsigned long rq_majortimeo; /* major timeout alarm */ unsigned long rq_timeout; /* Current timeout value */ ktime_t rq_rtt; /* round-trip time */ unsigned int rq_retries; /* # of retries */ unsigned int rq_connect_cookie; /* A cookie used to track the state of the transport connection */ atomic_t rq_pin; /* * Partial send handling */ u32 rq_bytes_sent; /* Bytes we have sent */ ktime_t rq_xtime; /* transmit time stamp */ int rq_ntrans; #if defined(CONFIG_SUNRPC_BACKCHANNEL) struct list_head rq_bc_list; /* Callback service list */ unsigned long rq_bc_pa_state; /* Backchannel prealloc state */ struct list_head rq_bc_pa_list; /* Backchannel prealloc list */ #endif /* CONFIG_SUNRPC_BACKCHANEL */ }; #define rq_svec rq_snd_buf.head #define rq_slen rq_snd_buf.len struct rpc_xprt_ops { void (*set_buffer_size)(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize); int (*reserve_xprt)(struct rpc_xprt *xprt, struct rpc_task *task); void (*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task); void (*alloc_slot)(struct rpc_xprt *xprt, struct rpc_task *task); void (*free_slot)(struct rpc_xprt *xprt, struct rpc_rqst *req); void (*rpcbind)(struct rpc_task *task); void (*set_port)(struct rpc_xprt *xprt, unsigned short port); void (*connect)(struct rpc_xprt *xprt, struct rpc_task *task); int (*buf_alloc)(struct rpc_task *task); void (*buf_free)(struct rpc_task *task); void (*prepare_request)(struct rpc_rqst *req); int (*send_request)(struct rpc_rqst *req); void (*wait_for_reply_request)(struct rpc_task *task); void (*timer)(struct rpc_xprt *xprt, struct rpc_task *task); void (*release_request)(struct rpc_task *task); void (*close)(struct rpc_xprt *xprt); void (*destroy)(struct rpc_xprt *xprt); void (*set_connect_timeout)(struct rpc_xprt *xprt, unsigned long connect_timeout, unsigned long reconnect_timeout); void (*print_stats)(struct rpc_xprt *xprt, struct seq_file *seq); int (*enable_swap)(struct rpc_xprt *xprt); void (*disable_swap)(struct rpc_xprt *xprt); void (*inject_disconnect)(struct rpc_xprt *xprt); int (*bc_setup)(struct rpc_xprt *xprt, unsigned int min_reqs); size_t (*bc_maxpayload)(struct rpc_xprt *xprt); unsigned int (*bc_num_slots)(struct rpc_xprt *xprt); void (*bc_free_rqst)(struct rpc_rqst *rqst); void (*bc_destroy)(struct rpc_xprt *xprt, unsigned int max_reqs); }; /* * RPC transport identifiers * * To preserve compatibility with the historical use of raw IP protocol * id's for transport selection, UDP and TCP identifiers are specified * with the previous values. No such restriction exists for new transports, * except that they may not collide with these values (17 and 6, * respectively). */ #define XPRT_TRANSPORT_BC (1 << 31) enum xprt_transports { XPRT_TRANSPORT_UDP = IPPROTO_UDP, XPRT_TRANSPORT_TCP = IPPROTO_TCP, XPRT_TRANSPORT_BC_TCP = IPPROTO_TCP | XPRT_TRANSPORT_BC, XPRT_TRANSPORT_RDMA = 256, XPRT_TRANSPORT_BC_RDMA = XPRT_TRANSPORT_RDMA | XPRT_TRANSPORT_BC, XPRT_TRANSPORT_LOCAL = 257, }; struct rpc_xprt { struct kref kref; /* Reference count */ const struct rpc_xprt_ops *ops; /* transport methods */ const struct rpc_timeout *timeout; /* timeout parms */ struct sockaddr_storage addr; /* server address */ size_t addrlen; /* size of server address */ int prot; /* IP protocol */ unsigned long cong; /* current congestion */ unsigned long cwnd; /* congestion window */ size_t max_payload; /* largest RPC payload size, in bytes */ struct rpc_wait_queue binding; /* requests waiting on rpcbind */ struct rpc_wait_queue sending; /* requests waiting to send */ struct rpc_wait_queue pending; /* requests in flight */ struct rpc_wait_queue backlog; /* waiting for slot */ struct list_head free; /* free slots */ unsigned int max_reqs; /* max number of slots */ unsigned int min_reqs; /* min number of slots */ unsigned int num_reqs; /* total slots */ unsigned long state; /* transport state */ unsigned char resvport : 1; /* use a reserved port */ atomic_t swapper; /* we're swapping over this transport */ unsigned int bind_index; /* bind function index */ /* * Multipath */ struct list_head xprt_switch; /* * Connection of transports */ unsigned long bind_timeout, reestablish_timeout; unsigned int connect_cookie; /* A cookie that gets bumped every time the transport is reconnected */ /* * Disconnection of idle transports */ struct work_struct task_cleanup; struct timer_list timer; unsigned long last_used, idle_timeout, connect_timeout, max_reconnect_timeout; /* * Send stuff */ atomic_long_t queuelen; spinlock_t transport_lock; /* lock transport info */ spinlock_t reserve_lock; /* lock slot table */ spinlock_t queue_lock; /* send/receive queue lock */ u32 xid; /* Next XID value to use */ struct rpc_task * snd_task; /* Task blocked in send */ struct list_head xmit_queue; /* Send queue */ struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ #if defined(CONFIG_SUNRPC_BACKCHANNEL) struct svc_serv *bc_serv; /* The RPC service which will */ /* process the callback */ unsigned int bc_alloc_max; unsigned int bc_alloc_count; /* Total number of preallocs */ atomic_t bc_slot_count; /* Number of allocated slots */ spinlock_t bc_pa_lock; /* Protects the preallocated * items */ struct list_head bc_pa_list; /* List of preallocated * backchannel rpc_rqst's */ #endif /* CONFIG_SUNRPC_BACKCHANNEL */ struct rb_root recv_queue; /* Receive queue */ struct { unsigned long bind_count, /* total number of binds */ connect_count, /* total number of connects */ connect_start, /* connect start timestamp */ connect_time, /* jiffies waiting for connect */ sends, /* how many complete requests */ recvs, /* how many complete requests */ bad_xids, /* lookup_rqst didn't find XID */ max_slots; /* max rpc_slots used */ unsigned long long req_u, /* average requests on the wire */ bklog_u, /* backlog queue utilization */ sending_u, /* send q utilization */ pending_u; /* pend q utilization */ } stat; struct net *xprt_net; const char *servername; const char *address_strings[RPC_DISPLAY_MAX]; #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) struct dentry *debugfs; /* debugfs directory */ atomic_t inject_disconnect; #endif struct rcu_head rcu; }; #if defined(CONFIG_SUNRPC_BACKCHANNEL) /* * Backchannel flags */ #define RPC_BC_PA_IN_USE 0x0001 /* Preallocated backchannel */ /* buffer in use */ #endif /* CONFIG_SUNRPC_BACKCHANNEL */ #if defined(CONFIG_SUNRPC_BACKCHANNEL) static inline int bc_prealloc(struct rpc_rqst *req) { return test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); } #else static inline int bc_prealloc(struct rpc_rqst *req) { return 0; } #endif /* CONFIG_SUNRPC_BACKCHANNEL */ #define XPRT_CREATE_INFINITE_SLOTS (1U) #define XPRT_CREATE_NO_IDLE_TIMEOUT (1U << 1) struct xprt_create { int ident; /* XPRT_TRANSPORT identifier */ struct net * net; struct sockaddr * srcaddr; /* optional local address */ struct sockaddr * dstaddr; /* remote peer address */ size_t addrlen; const char *servername; struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ struct rpc_xprt_switch *bc_xps; unsigned int flags; }; struct xprt_class { struct list_head list; int ident; /* XPRT_TRANSPORT identifier */ struct rpc_xprt * (*setup)(struct xprt_create *); struct module *owner; char name[32]; const char * netid[]; }; /* * Generic internal transport functions */ struct rpc_xprt *xprt_create_transport(struct xprt_create *args); void xprt_connect(struct rpc_task *task); unsigned long xprt_reconnect_delay(const struct rpc_xprt *xprt); void xprt_reconnect_backoff(struct rpc_xprt *xprt, unsigned long init_to); void xprt_reserve(struct rpc_task *task); void xprt_retry_reserve(struct rpc_task *task); int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task); int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task); void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task); void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req); void xprt_request_prepare(struct rpc_rqst *req); bool xprt_prepare_transmit(struct rpc_task *task); void xprt_request_enqueue_transmit(struct rpc_task *task); void xprt_request_enqueue_receive(struct rpc_task *task); void xprt_request_wait_receive(struct rpc_task *task); void xprt_request_dequeue_xprt(struct rpc_task *task); bool xprt_request_need_retransmit(struct rpc_task *task); void xprt_transmit(struct rpc_task *task); void xprt_end_transmit(struct rpc_task *task); int xprt_adjust_timeout(struct rpc_rqst *req); void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task); void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task); void xprt_release(struct rpc_task *task); struct rpc_xprt * xprt_get(struct rpc_xprt *xprt); void xprt_put(struct rpc_xprt *xprt); struct rpc_xprt * xprt_alloc(struct net *net, size_t size, unsigned int num_prealloc, unsigned int max_req); void xprt_free(struct rpc_xprt *); static inline int xprt_enable_swap(struct rpc_xprt *xprt) { return xprt->ops->enable_swap(xprt); } static inline void xprt_disable_swap(struct rpc_xprt *xprt) { xprt->ops->disable_swap(xprt); } /* * Transport switch helper functions */ int xprt_register_transport(struct xprt_class *type); int xprt_unregister_transport(struct xprt_class *type); int xprt_load_transport(const char *); void xprt_wait_for_reply_request_def(struct rpc_task *task); void xprt_wait_for_reply_request_rtt(struct rpc_task *task); void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status); void xprt_wait_for_buffer_space(struct rpc_xprt *xprt); bool xprt_write_space(struct rpc_xprt *xprt); void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result); struct rpc_rqst * xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid); void xprt_update_rtt(struct rpc_task *task); void xprt_complete_rqst(struct rpc_task *task, int copied); void xprt_pin_rqst(struct rpc_rqst *req); void xprt_unpin_rqst(struct rpc_rqst *req); void xprt_release_rqst_cong(struct rpc_task *task); bool xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req); void xprt_disconnect_done(struct rpc_xprt *xprt); void xprt_force_disconnect(struct rpc_xprt *xprt); void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie); bool xprt_lock_connect(struct rpc_xprt *, struct rpc_task *, void *); void xprt_unlock_connect(struct rpc_xprt *, void *); /* * Reserved bit positions in xprt->state */ #define XPRT_LOCKED (0) #define XPRT_CONNECTED (1) #define XPRT_CONNECTING (2) #define XPRT_CLOSE_WAIT (3) #define XPRT_BOUND (4) #define XPRT_BINDING (5) #define XPRT_CLOSING (6) #define XPRT_CONGESTED (9) #define XPRT_CWND_WAIT (10) #define XPRT_WRITE_SPACE (11) #define XPRT_SND_IS_COOKIE (12) static inline void xprt_set_connected(struct rpc_xprt *xprt) { set_bit(XPRT_CONNECTED, &xprt->state); } static inline void xprt_clear_connected(struct rpc_xprt *xprt) { clear_bit(XPRT_CONNECTED, &xprt->state); } static inline int xprt_connected(struct rpc_xprt *xprt) { return test_bit(XPRT_CONNECTED, &xprt->state); } static inline int xprt_test_and_set_connected(struct rpc_xprt *xprt) { return test_and_set_bit(XPRT_CONNECTED, &xprt->state); } static inline int xprt_test_and_clear_connected(struct rpc_xprt *xprt) { return test_and_clear_bit(XPRT_CONNECTED, &xprt->state); } static inline void xprt_clear_connecting(struct rpc_xprt *xprt) { smp_mb__before_atomic(); clear_bit(XPRT_CONNECTING, &xprt->state); smp_mb__after_atomic(); } static inline int xprt_connecting(struct rpc_xprt *xprt) { return test_bit(XPRT_CONNECTING, &xprt->state); } static inline int xprt_test_and_set_connecting(struct rpc_xprt *xprt) { return test_and_set_bit(XPRT_CONNECTING, &xprt->state); } static inline void xprt_set_bound(struct rpc_xprt *xprt) { test_and_set_bit(XPRT_BOUND, &xprt->state); } static inline int xprt_bound(struct rpc_xprt *xprt) { return test_bit(XPRT_BOUND, &xprt->state); } static inline void xprt_clear_bound(struct rpc_xprt *xprt) { clear_bit(XPRT_BOUND, &xprt->state); } static inline void xprt_clear_binding(struct rpc_xprt *xprt) { smp_mb__before_atomic(); clear_bit(XPRT_BINDING, &xprt->state); smp_mb__after_atomic(); } static inline int xprt_test_and_set_binding(struct rpc_xprt *xprt) { return test_and_set_bit(XPRT_BINDING, &xprt->state); } #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) extern unsigned int rpc_inject_disconnect; static inline void xprt_inject_disconnect(struct rpc_xprt *xprt) { if (!rpc_inject_disconnect) return; if (atomic_dec_return(&xprt->inject_disconnect)) return; atomic_set(&xprt->inject_disconnect, rpc_inject_disconnect); xprt->ops->inject_disconnect(xprt); } #else static inline void xprt_inject_disconnect(struct rpc_xprt *xprt) { } #endif #endif /* __KERNEL__*/ #endif /* _LINUX_SUNRPC_XPRT_H */ sunrpc/bc_xprt.h 0000644 00000005436 14722070374 0007706 0 ustar 00 /****************************************************************************** (c) 2008 NetApp. All Rights Reserved. NetApp provides this source code under the GPL v2 License. The GPL v2 license is available at http://opensource.org/licenses/gpl-license.php. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /* * Functions to create and manage the backchannel */ #ifndef _LINUX_SUNRPC_BC_XPRT_H #define _LINUX_SUNRPC_BC_XPRT_H #include <linux/sunrpc/svcsock.h> #include <linux/sunrpc/xprt.h> #include <linux/sunrpc/sched.h> #ifdef CONFIG_SUNRPC_BACKCHANNEL struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid); void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied); void xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task); void xprt_free_bc_request(struct rpc_rqst *req); int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs); void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs); /* Socket backchannel transport methods */ int xprt_setup_bc(struct rpc_xprt *xprt, unsigned int min_reqs); void xprt_destroy_bc(struct rpc_xprt *xprt, unsigned int max_reqs); void xprt_free_bc_rqst(struct rpc_rqst *req); unsigned int xprt_bc_max_slots(struct rpc_xprt *xprt); /* * Determine if a shared backchannel is in use */ static inline bool svc_is_backchannel(const struct svc_rqst *rqstp) { return rqstp->rq_server->sv_bc_enabled; } static inline void set_bc_enabled(struct svc_serv *serv) { serv->sv_bc_enabled = true; } #else /* CONFIG_SUNRPC_BACKCHANNEL */ static inline int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs) { return 0; } static inline void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs) { } static inline bool svc_is_backchannel(const struct svc_rqst *rqstp) { return false; } static inline void set_bc_enabled(struct svc_serv *serv) { } static inline void xprt_free_bc_request(struct rpc_rqst *req) { } #endif /* CONFIG_SUNRPC_BACKCHANNEL */ #endif /* _LINUX_SUNRPC_BC_XPRT_H */ sunrpc/xprtmultipath.h 0000644 00000004075 14722070374 0011170 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * RPC client multipathing definitions * * Copyright (c) 2015, 2016, Primary Data, Inc. All rights reserved. * * Trond Myklebust <trond.myklebust@primarydata.com> */ #ifndef _NET_SUNRPC_XPRTMULTIPATH_H #define _NET_SUNRPC_XPRTMULTIPATH_H struct rpc_xprt_iter_ops; struct rpc_xprt_switch { spinlock_t xps_lock; struct kref xps_kref; unsigned int xps_nxprts; unsigned int xps_nactive; atomic_long_t xps_queuelen; struct list_head xps_xprt_list; struct net * xps_net; const struct rpc_xprt_iter_ops *xps_iter_ops; struct rcu_head xps_rcu; }; struct rpc_xprt_iter { struct rpc_xprt_switch __rcu *xpi_xpswitch; struct rpc_xprt * xpi_cursor; const struct rpc_xprt_iter_ops *xpi_ops; }; struct rpc_xprt_iter_ops { void (*xpi_rewind)(struct rpc_xprt_iter *); struct rpc_xprt *(*xpi_xprt)(struct rpc_xprt_iter *); struct rpc_xprt *(*xpi_next)(struct rpc_xprt_iter *); }; extern struct rpc_xprt_switch *xprt_switch_alloc(struct rpc_xprt *xprt, gfp_t gfp_flags); extern struct rpc_xprt_switch *xprt_switch_get(struct rpc_xprt_switch *xps); extern void xprt_switch_put(struct rpc_xprt_switch *xps); extern void rpc_xprt_switch_set_roundrobin(struct rpc_xprt_switch *xps); extern void rpc_xprt_switch_add_xprt(struct rpc_xprt_switch *xps, struct rpc_xprt *xprt); extern void rpc_xprt_switch_remove_xprt(struct rpc_xprt_switch *xps, struct rpc_xprt *xprt); extern void xprt_iter_init(struct rpc_xprt_iter *xpi, struct rpc_xprt_switch *xps); extern void xprt_iter_init_listall(struct rpc_xprt_iter *xpi, struct rpc_xprt_switch *xps); extern void xprt_iter_destroy(struct rpc_xprt_iter *xpi); extern struct rpc_xprt_switch *xprt_iter_xchg_switch( struct rpc_xprt_iter *xpi, struct rpc_xprt_switch *newswitch); extern struct rpc_xprt *xprt_iter_xprt(struct rpc_xprt_iter *xpi); extern struct rpc_xprt *xprt_iter_get_xprt(struct rpc_xprt_iter *xpi); extern struct rpc_xprt *xprt_iter_get_next(struct rpc_xprt_iter *xpi); extern bool rpc_xprt_switch_has_addr(struct rpc_xprt_switch *xps, const struct sockaddr *sap); #endif sunrpc/svcauth_gss.h 0000644 00000001470 14722070374 0010570 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/svcauth_gss.h * * Bruce Fields <bfields@umich.edu> * Copyright (c) 2002 The Regents of the University of Michigan */ #ifndef _LINUX_SUNRPC_SVCAUTH_GSS_H #define _LINUX_SUNRPC_SVCAUTH_GSS_H #ifdef __KERNEL__ #include <linux/sched.h> #include <linux/sunrpc/types.h> #include <linux/sunrpc/xdr.h> #include <linux/sunrpc/svcauth.h> #include <linux/sunrpc/svcsock.h> #include <linux/sunrpc/auth_gss.h> int gss_svc_init(void); void gss_svc_shutdown(void); int gss_svc_init_net(struct net *net); void gss_svc_shutdown_net(struct net *net); struct auth_domain *svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char *name); u32 svcauth_gss_flavor(struct auth_domain *dom); #endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_SVCAUTH_GSS_H */ sunrpc/msg_prot.h 0000644 00000014360 14722070374 0010073 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/msg_prot.h * * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> */ #ifndef _LINUX_SUNRPC_MSGPROT_H_ #define _LINUX_SUNRPC_MSGPROT_H_ #ifdef __KERNEL__ /* user programs should get these from the rpc header files */ #define RPC_VERSION 2 /* size of an XDR encoding unit in bytes, i.e. 32bit */ #define XDR_UNIT (4) /* spec defines authentication flavor as an unsigned 32 bit integer */ typedef u32 rpc_authflavor_t; enum rpc_auth_flavors { RPC_AUTH_NULL = 0, RPC_AUTH_UNIX = 1, RPC_AUTH_SHORT = 2, RPC_AUTH_DES = 3, RPC_AUTH_KRB = 4, RPC_AUTH_GSS = 6, RPC_AUTH_MAXFLAVOR = 8, /* pseudoflavors: */ RPC_AUTH_GSS_KRB5 = 390003, RPC_AUTH_GSS_KRB5I = 390004, RPC_AUTH_GSS_KRB5P = 390005, RPC_AUTH_GSS_LKEY = 390006, RPC_AUTH_GSS_LKEYI = 390007, RPC_AUTH_GSS_LKEYP = 390008, RPC_AUTH_GSS_SPKM = 390009, RPC_AUTH_GSS_SPKMI = 390010, RPC_AUTH_GSS_SPKMP = 390011, }; /* Maximum size (in bytes) of an rpc credential or verifier */ #define RPC_MAX_AUTH_SIZE (400) enum rpc_msg_type { RPC_CALL = 0, RPC_REPLY = 1 }; enum rpc_reply_stat { RPC_MSG_ACCEPTED = 0, RPC_MSG_DENIED = 1 }; enum rpc_accept_stat { RPC_SUCCESS = 0, RPC_PROG_UNAVAIL = 1, RPC_PROG_MISMATCH = 2, RPC_PROC_UNAVAIL = 3, RPC_GARBAGE_ARGS = 4, RPC_SYSTEM_ERR = 5, /* internal use only */ RPC_DROP_REPLY = 60000, }; enum rpc_reject_stat { RPC_MISMATCH = 0, RPC_AUTH_ERROR = 1 }; enum rpc_auth_stat { RPC_AUTH_OK = 0, RPC_AUTH_BADCRED = 1, RPC_AUTH_REJECTEDCRED = 2, RPC_AUTH_BADVERF = 3, RPC_AUTH_REJECTEDVERF = 4, RPC_AUTH_TOOWEAK = 5, /* RPCSEC_GSS errors */ RPCSEC_GSS_CREDPROBLEM = 13, RPCSEC_GSS_CTXPROBLEM = 14 }; #define RPC_MAXNETNAMELEN 256 /* * From RFC 1831: * * "A record is composed of one or more record fragments. A record * fragment is a four-byte header followed by 0 to (2**31) - 1 bytes of * fragment data. The bytes encode an unsigned binary number; as with * XDR integers, the byte order is from highest to lowest. The number * encodes two values -- a boolean which indicates whether the fragment * is the last fragment of the record (bit value 1 implies the fragment * is the last fragment) and a 31-bit unsigned binary value which is the * length in bytes of the fragment's data. The boolean value is the * highest-order bit of the header; the length is the 31 low-order bits. * (Note that this record specification is NOT in XDR standard form!)" * * The Linux RPC client always sends its requests in a single record * fragment, limiting the maximum payload size for stream transports to * 2GB. */ typedef __be32 rpc_fraghdr; #define RPC_LAST_STREAM_FRAGMENT (1U << 31) #define RPC_FRAGMENT_SIZE_MASK (~RPC_LAST_STREAM_FRAGMENT) #define RPC_MAX_FRAGMENT_SIZE ((1U << 31) - 1) /* * RPC call and reply header size as number of 32bit words (verifier * size computed separately, see below) */ #define RPC_CALLHDRSIZE (6) #define RPC_REPHDRSIZE (4) /* * Maximum RPC header size, including authentication, * as number of 32bit words (see RFCs 1831, 1832). * * xid 1 xdr unit = 4 bytes * mtype 1 * rpc_version 1 * program 1 * prog_version 1 * procedure 1 * cred { * flavor 1 * length 1 * body<RPC_MAX_AUTH_SIZE> 100 xdr units = 400 bytes * } * verf { * flavor 1 * length 1 * body<RPC_MAX_AUTH_SIZE> 100 xdr units = 400 bytes * } * TOTAL 210 xdr units = 840 bytes */ #define RPC_MAX_HEADER_WITH_AUTH \ (RPC_CALLHDRSIZE + 2*(2+RPC_MAX_AUTH_SIZE/4)) #define RPC_MAX_REPHEADER_WITH_AUTH \ (RPC_REPHDRSIZE + (2 + RPC_MAX_AUTH_SIZE/4)) /* * Well-known netids. See: * * http://www.iana.org/assignments/rpc-netids/rpc-netids.xhtml */ #define RPCBIND_NETID_UDP "udp" #define RPCBIND_NETID_TCP "tcp" #define RPCBIND_NETID_RDMA "rdma" #define RPCBIND_NETID_SCTP "sctp" #define RPCBIND_NETID_UDP6 "udp6" #define RPCBIND_NETID_TCP6 "tcp6" #define RPCBIND_NETID_RDMA6 "rdma6" #define RPCBIND_NETID_SCTP6 "sctp6" #define RPCBIND_NETID_LOCAL "local" /* * Note that RFC 1833 does not put any size restrictions on the * netid string, but all currently defined netid's fit in 5 bytes. */ #define RPCBIND_MAXNETIDLEN (5u) /* * Universal addresses are introduced in RFC 1833 and further spelled * out in RFC 3530. RPCBIND_MAXUADDRLEN defines a maximum byte length * of a universal address for use in allocating buffers and character * arrays. * * Quoting RFC 3530, section 2.2: * * For TCP over IPv4 and for UDP over IPv4, the format of r_addr is the * US-ASCII string: * * h1.h2.h3.h4.p1.p2 * * The prefix, "h1.h2.h3.h4", is the standard textual form for * representing an IPv4 address, which is always four octets long. * Assuming big-endian ordering, h1, h2, h3, and h4, are respectively, * the first through fourth octets each converted to ASCII-decimal. * Assuming big-endian ordering, p1 and p2 are, respectively, the first * and second octets each converted to ASCII-decimal. For example, if a * host, in big-endian order, has an address of 0x0A010307 and there is * a service listening on, in big endian order, port 0x020F (decimal * 527), then the complete universal address is "10.1.3.7.2.15". * * ... * * For TCP over IPv6 and for UDP over IPv6, the format of r_addr is the * US-ASCII string: * * x1:x2:x3:x4:x5:x6:x7:x8.p1.p2 * * The suffix "p1.p2" is the service port, and is computed the same way * as with universal addresses for TCP and UDP over IPv4. The prefix, * "x1:x2:x3:x4:x5:x6:x7:x8", is the standard textual form for * representing an IPv6 address as defined in Section 2.2 of [RFC2373]. * Additionally, the two alternative forms specified in Section 2.2 of * [RFC2373] are also acceptable. */ #include <linux/inet.h> /* Maximum size of the port number part of a universal address */ #define RPCBIND_MAXUADDRPLEN sizeof(".255.255") /* Maximum size of an IPv4 universal address */ #define RPCBIND_MAXUADDR4LEN \ (INET_ADDRSTRLEN + RPCBIND_MAXUADDRPLEN) /* Maximum size of an IPv6 universal address */ #define RPCBIND_MAXUADDR6LEN \ (INET6_ADDRSTRLEN + RPCBIND_MAXUADDRPLEN) /* Assume INET6_ADDRSTRLEN will always be larger than INET_ADDRSTRLEN... */ #define RPCBIND_MAXUADDRLEN RPCBIND_MAXUADDR6LEN #endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_MSGPROT_H_ */ sunrpc/debug.h 0000644 00000005415 14722070374 0007330 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/debug.h * * Debugging support for sunrpc module * * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> */ #ifndef _LINUX_SUNRPC_DEBUG_H_ #define _LINUX_SUNRPC_DEBUG_H_ #include <uapi/linux/sunrpc/debug.h> /* * Debugging macros etc */ #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) extern unsigned int rpc_debug; extern unsigned int nfs_debug; extern unsigned int nfsd_debug; extern unsigned int nlm_debug; #endif #define dprintk(fmt, ...) \ dfprintk(FACILITY, fmt, ##__VA_ARGS__) #define dprintk_cont(fmt, ...) \ dfprintk_cont(FACILITY, fmt, ##__VA_ARGS__) #define dprintk_rcu(fmt, ...) \ dfprintk_rcu(FACILITY, fmt, ##__VA_ARGS__) #define dprintk_rcu_cont(fmt, ...) \ dfprintk_rcu_cont(FACILITY, fmt, ##__VA_ARGS__) #undef ifdebug #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) # define ifdebug(fac) if (unlikely(rpc_debug & RPCDBG_##fac)) # define dfprintk(fac, fmt, ...) \ do { \ ifdebug(fac) \ printk(KERN_DEFAULT fmt, ##__VA_ARGS__); \ } while (0) # define dfprintk_cont(fac, fmt, ...) \ do { \ ifdebug(fac) \ printk(KERN_CONT fmt, ##__VA_ARGS__); \ } while (0) # define dfprintk_rcu(fac, fmt, ...) \ do { \ ifdebug(fac) { \ rcu_read_lock(); \ printk(KERN_DEFAULT fmt, ##__VA_ARGS__); \ rcu_read_unlock(); \ } \ } while (0) # define dfprintk_rcu_cont(fac, fmt, ...) \ do { \ ifdebug(fac) { \ rcu_read_lock(); \ printk(KERN_CONT fmt, ##__VA_ARGS__); \ rcu_read_unlock(); \ } \ } while (0) # define RPC_IFDEBUG(x) x #else # define ifdebug(fac) if (0) # define dfprintk(fac, fmt, ...) do {} while (0) # define dfprintk_cont(fac, fmt, ...) do {} while (0) # define dfprintk_rcu(fac, fmt, ...) do {} while (0) # define RPC_IFDEBUG(x) #endif /* * Sysctl interface for RPC debugging */ struct rpc_clnt; struct rpc_xprt; #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) void rpc_register_sysctl(void); void rpc_unregister_sysctl(void); void sunrpc_debugfs_init(void); void sunrpc_debugfs_exit(void); void rpc_clnt_debugfs_register(struct rpc_clnt *); void rpc_clnt_debugfs_unregister(struct rpc_clnt *); void rpc_xprt_debugfs_register(struct rpc_xprt *); void rpc_xprt_debugfs_unregister(struct rpc_xprt *); #else static inline void sunrpc_debugfs_init(void) { return; } static inline void sunrpc_debugfs_exit(void) { return; } static inline void rpc_clnt_debugfs_register(struct rpc_clnt *clnt) { return; } static inline void rpc_clnt_debugfs_unregister(struct rpc_clnt *clnt) { return; } static inline void rpc_xprt_debugfs_register(struct rpc_xprt *xprt) { return; } static inline void rpc_xprt_debugfs_unregister(struct rpc_xprt *xprt) { return; } #endif #endif /* _LINUX_SUNRPC_DEBUG_H_ */ sunrpc/xprtrdma.h 0000644 00000005717 14722070374 0010110 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the BSD-type * license below: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Network Appliance, Inc. nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _LINUX_SUNRPC_XPRTRDMA_H #define _LINUX_SUNRPC_XPRTRDMA_H /* * Constants. Max RPC/NFS header is big enough to account for * additional marshaling buffers passed down by Linux client. * * RDMA header is currently fixed max size, and is big enough for a * fully-chunked NFS message (read chunks are the largest). Note only * a single chunk type per message is supported currently. */ #define RPCRDMA_MIN_SLOT_TABLE (4U) #define RPCRDMA_DEF_SLOT_TABLE (128U) #define RPCRDMA_MAX_SLOT_TABLE (16384U) #define RPCRDMA_MIN_INLINE (1024) /* min inline thresh */ #define RPCRDMA_DEF_INLINE (4096) /* default inline thresh */ #define RPCRDMA_MAX_INLINE (65536) /* max inline thresh */ /* Memory registration strategies, by number. * This is part of a kernel / user space API. Do not remove. */ enum rpcrdma_memreg { RPCRDMA_BOUNCEBUFFERS = 0, RPCRDMA_REGISTER, RPCRDMA_MEMWINDOWS, RPCRDMA_MEMWINDOWS_ASYNC, RPCRDMA_MTHCAFMR, RPCRDMA_FRWR, RPCRDMA_ALLPHYSICAL, RPCRDMA_LAST }; #endif /* _LINUX_SUNRPC_XPRTRDMA_H */ sunrpc/gss_krb5.h 0000644 00000026351 14722070374 0007763 0 ustar 00 /* * linux/include/linux/sunrpc/gss_krb5_types.h * * Adapted from MIT Kerberos 5-1.2.1 lib/include/krb5.h, * lib/gssapi/krb5/gssapiP_krb5.h, and others * * Copyright (c) 2000-2008 The Regents of the University of Michigan. * All rights reserved. * * Andy Adamson <andros@umich.edu> * Bruce Fields <bfields@umich.edu> */ /* * Copyright 1995 by the Massachusetts Institute of Technology. * All Rights Reserved. * * Export of this software from the United States of America may * require a specific license from the United States Government. * It is the responsibility of any person or organization contemplating * export to obtain such a license before exporting. * * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and * distribute this software and its documentation for any purpose and * without fee is hereby granted, provided that the above copyright * notice appear in all copies and that both that copyright notice and * this permission notice appear in supporting documentation, and that * the name of M.I.T. not be used in advertising or publicity pertaining * to distribution of the software without specific, written prior * permission. Furthermore if you modify this software you must label * your software as modified software and not distribute it in such a * fashion that it might be confused with the original M.I.T. software. * M.I.T. makes no representations about the suitability of * this software for any purpose. It is provided "as is" without express * or implied warranty. * */ #include <crypto/skcipher.h> #include <linux/sunrpc/auth_gss.h> #include <linux/sunrpc/gss_err.h> #include <linux/sunrpc/gss_asn1.h> /* Length of constant used in key derivation */ #define GSS_KRB5_K5CLENGTH (5) /* Maximum key length (in bytes) for the supported crypto algorithms*/ #define GSS_KRB5_MAX_KEYLEN (32) /* Maximum checksum function output for the supported crypto algorithms */ #define GSS_KRB5_MAX_CKSUM_LEN (20) /* Maximum blocksize for the supported crypto algorithms */ #define GSS_KRB5_MAX_BLOCKSIZE (16) struct krb5_ctx; struct gss_krb5_enctype { const u32 etype; /* encryption (key) type */ const u32 ctype; /* checksum type */ const char *name; /* "friendly" name */ const char *encrypt_name; /* crypto encrypt name */ const char *cksum_name; /* crypto checksum name */ const u16 signalg; /* signing algorithm */ const u16 sealalg; /* sealing algorithm */ const u32 blocksize; /* encryption blocksize */ const u32 conflen; /* confounder length (normally the same as the blocksize) */ const u32 cksumlength; /* checksum length */ const u32 keyed_cksum; /* is it a keyed cksum? */ const u32 keybytes; /* raw key len, in bytes */ const u32 keylength; /* final key len, in bytes */ u32 (*encrypt) (struct crypto_sync_skcipher *tfm, void *iv, void *in, void *out, int length); /* encryption function */ u32 (*decrypt) (struct crypto_sync_skcipher *tfm, void *iv, void *in, void *out, int length); /* decryption function */ u32 (*mk_key) (const struct gss_krb5_enctype *gk5e, struct xdr_netobj *in, struct xdr_netobj *out); /* complete key generation */ u32 (*encrypt_v2) (struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, struct page **pages); /* v2 encryption function */ u32 (*decrypt_v2) (struct krb5_ctx *kctx, u32 offset, u32 len, struct xdr_buf *buf, u32 *headskip, u32 *tailskip); /* v2 decryption function */ }; /* krb5_ctx flags definitions */ #define KRB5_CTX_FLAG_INITIATOR 0x00000001 #define KRB5_CTX_FLAG_CFX 0x00000002 #define KRB5_CTX_FLAG_ACCEPTOR_SUBKEY 0x00000004 struct krb5_ctx { int initiate; /* 1 = initiating, 0 = accepting */ u32 enctype; u32 flags; const struct gss_krb5_enctype *gk5e; /* enctype-specific info */ struct crypto_sync_skcipher *enc; struct crypto_sync_skcipher *seq; struct crypto_sync_skcipher *acceptor_enc; struct crypto_sync_skcipher *initiator_enc; struct crypto_sync_skcipher *acceptor_enc_aux; struct crypto_sync_skcipher *initiator_enc_aux; u8 Ksess[GSS_KRB5_MAX_KEYLEN]; /* session key */ u8 cksum[GSS_KRB5_MAX_KEYLEN]; s32 endtime; atomic_t seq_send; atomic64_t seq_send64; struct xdr_netobj mech_used; u8 initiator_sign[GSS_KRB5_MAX_KEYLEN]; u8 acceptor_sign[GSS_KRB5_MAX_KEYLEN]; u8 initiator_seal[GSS_KRB5_MAX_KEYLEN]; u8 acceptor_seal[GSS_KRB5_MAX_KEYLEN]; u8 initiator_integ[GSS_KRB5_MAX_KEYLEN]; u8 acceptor_integ[GSS_KRB5_MAX_KEYLEN]; }; /* The length of the Kerberos GSS token header */ #define GSS_KRB5_TOK_HDR_LEN (16) #define KG_TOK_MIC_MSG 0x0101 #define KG_TOK_WRAP_MSG 0x0201 #define KG2_TOK_INITIAL 0x0101 #define KG2_TOK_RESPONSE 0x0202 #define KG2_TOK_MIC 0x0404 #define KG2_TOK_WRAP 0x0504 #define KG2_TOKEN_FLAG_SENTBYACCEPTOR 0x01 #define KG2_TOKEN_FLAG_SEALED 0x02 #define KG2_TOKEN_FLAG_ACCEPTORSUBKEY 0x04 #define KG2_RESP_FLAG_ERROR 0x0001 #define KG2_RESP_FLAG_DELEG_OK 0x0002 enum sgn_alg { SGN_ALG_DES_MAC_MD5 = 0x0000, SGN_ALG_MD2_5 = 0x0001, SGN_ALG_DES_MAC = 0x0002, SGN_ALG_3 = 0x0003, /* not published */ SGN_ALG_HMAC_MD5 = 0x0011, /* microsoft w2k; no support */ SGN_ALG_HMAC_SHA1_DES3_KD = 0x0004 }; enum seal_alg { SEAL_ALG_NONE = 0xffff, SEAL_ALG_DES = 0x0000, SEAL_ALG_1 = 0x0001, /* not published */ SEAL_ALG_MICROSOFT_RC4 = 0x0010,/* microsoft w2k; no support */ SEAL_ALG_DES3KD = 0x0002 }; #define CKSUMTYPE_CRC32 0x0001 #define CKSUMTYPE_RSA_MD4 0x0002 #define CKSUMTYPE_RSA_MD4_DES 0x0003 #define CKSUMTYPE_DESCBC 0x0004 #define CKSUMTYPE_RSA_MD5 0x0007 #define CKSUMTYPE_RSA_MD5_DES 0x0008 #define CKSUMTYPE_NIST_SHA 0x0009 #define CKSUMTYPE_HMAC_SHA1_DES3 0x000c #define CKSUMTYPE_HMAC_SHA1_96_AES128 0x000f #define CKSUMTYPE_HMAC_SHA1_96_AES256 0x0010 #define CKSUMTYPE_HMAC_MD5_ARCFOUR -138 /* Microsoft md5 hmac cksumtype */ /* from gssapi_err_krb5.h */ #define KG_CCACHE_NOMATCH (39756032L) #define KG_KEYTAB_NOMATCH (39756033L) #define KG_TGT_MISSING (39756034L) #define KG_NO_SUBKEY (39756035L) #define KG_CONTEXT_ESTABLISHED (39756036L) #define KG_BAD_SIGN_TYPE (39756037L) #define KG_BAD_LENGTH (39756038L) #define KG_CTX_INCOMPLETE (39756039L) #define KG_CONTEXT (39756040L) #define KG_CRED (39756041L) #define KG_ENC_DESC (39756042L) #define KG_BAD_SEQ (39756043L) #define KG_EMPTY_CCACHE (39756044L) #define KG_NO_CTYPES (39756045L) /* per Kerberos v5 protocol spec crypto types from the wire. * these get mapped to linux kernel crypto routines. */ #define ENCTYPE_NULL 0x0000 #define ENCTYPE_DES_CBC_CRC 0x0001 /* DES cbc mode with CRC-32 */ #define ENCTYPE_DES_CBC_MD4 0x0002 /* DES cbc mode with RSA-MD4 */ #define ENCTYPE_DES_CBC_MD5 0x0003 /* DES cbc mode with RSA-MD5 */ #define ENCTYPE_DES_CBC_RAW 0x0004 /* DES cbc mode raw */ /* XXX deprecated? */ #define ENCTYPE_DES3_CBC_SHA 0x0005 /* DES-3 cbc mode with NIST-SHA */ #define ENCTYPE_DES3_CBC_RAW 0x0006 /* DES-3 cbc mode raw */ #define ENCTYPE_DES_HMAC_SHA1 0x0008 #define ENCTYPE_DES3_CBC_SHA1 0x0010 #define ENCTYPE_AES128_CTS_HMAC_SHA1_96 0x0011 #define ENCTYPE_AES256_CTS_HMAC_SHA1_96 0x0012 #define ENCTYPE_ARCFOUR_HMAC 0x0017 #define ENCTYPE_ARCFOUR_HMAC_EXP 0x0018 #define ENCTYPE_UNKNOWN 0x01ff /* * Constants used for key derivation */ /* for 3DES */ #define KG_USAGE_SEAL (22) #define KG_USAGE_SIGN (23) #define KG_USAGE_SEQ (24) /* from rfc3961 */ #define KEY_USAGE_SEED_CHECKSUM (0x99) #define KEY_USAGE_SEED_ENCRYPTION (0xAA) #define KEY_USAGE_SEED_INTEGRITY (0x55) /* from rfc4121 */ #define KG_USAGE_ACCEPTOR_SEAL (22) #define KG_USAGE_ACCEPTOR_SIGN (23) #define KG_USAGE_INITIATOR_SEAL (24) #define KG_USAGE_INITIATOR_SIGN (25) /* * This compile-time check verifies that we will not exceed the * slack space allotted by the client and server auth_gss code * before they call gss_wrap(). */ #define GSS_KRB5_MAX_SLACK_NEEDED \ (GSS_KRB5_TOK_HDR_LEN /* gss token header */ \ + GSS_KRB5_MAX_CKSUM_LEN /* gss token checksum */ \ + GSS_KRB5_MAX_BLOCKSIZE /* confounder */ \ + GSS_KRB5_MAX_BLOCKSIZE /* possible padding */ \ + GSS_KRB5_TOK_HDR_LEN /* encrypted hdr in v2 token */\ + GSS_KRB5_MAX_CKSUM_LEN /* encryption hmac */ \ + 4 + 4 /* RPC verifier */ \ + GSS_KRB5_TOK_HDR_LEN \ + GSS_KRB5_MAX_CKSUM_LEN) u32 make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen, struct xdr_buf *body, int body_offset, u8 *cksumkey, unsigned int usage, struct xdr_netobj *cksumout); u32 make_checksum_v2(struct krb5_ctx *, char *header, int hdrlen, struct xdr_buf *body, int body_offset, u8 *key, unsigned int usage, struct xdr_netobj *cksum); u32 gss_get_mic_kerberos(struct gss_ctx *, struct xdr_buf *, struct xdr_netobj *); u32 gss_verify_mic_kerberos(struct gss_ctx *, struct xdr_buf *, struct xdr_netobj *); u32 gss_wrap_kerberos(struct gss_ctx *ctx_id, int offset, struct xdr_buf *outbuf, struct page **pages); u32 gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset, int len, struct xdr_buf *buf); u32 krb5_encrypt(struct crypto_sync_skcipher *key, void *iv, void *in, void *out, int length); u32 krb5_decrypt(struct crypto_sync_skcipher *key, void *iv, void *in, void *out, int length); int gss_encrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *outbuf, int offset, struct page **pages); int gss_decrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *inbuf, int offset); s32 krb5_make_seq_num(struct krb5_ctx *kctx, struct crypto_sync_skcipher *key, int direction, u32 seqnum, unsigned char *cksum, unsigned char *buf); s32 krb5_get_seq_num(struct krb5_ctx *kctx, unsigned char *cksum, unsigned char *buf, int *direction, u32 *seqnum); int xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen); u32 krb5_derive_key(const struct gss_krb5_enctype *gk5e, const struct xdr_netobj *inkey, struct xdr_netobj *outkey, const struct xdr_netobj *in_constant, gfp_t gfp_mask); u32 gss_krb5_des3_make_key(const struct gss_krb5_enctype *gk5e, struct xdr_netobj *randombits, struct xdr_netobj *key); u32 gss_krb5_aes_make_key(const struct gss_krb5_enctype *gk5e, struct xdr_netobj *randombits, struct xdr_netobj *key); u32 gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, struct page **pages); u32 gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len, struct xdr_buf *buf, u32 *plainoffset, u32 *plainlen); int krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_sync_skcipher *cipher, unsigned char *cksum); int krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_sync_skcipher *cipher, s32 seqnum); void gss_krb5_make_confounder(char *p, u32 conflen); sunrpc/gss_api.h 0000644 00000011161 14722070374 0007662 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/gss_api.h * * Somewhat simplified version of the gss api. * * Dug Song <dugsong@monkey.org> * Andy Adamson <andros@umich.edu> * Bruce Fields <bfields@umich.edu> * Copyright (c) 2000 The Regents of the University of Michigan */ #ifndef _LINUX_SUNRPC_GSS_API_H #define _LINUX_SUNRPC_GSS_API_H #ifdef __KERNEL__ #include <linux/sunrpc/xdr.h> #include <linux/sunrpc/msg_prot.h> #include <linux/uio.h> /* The mechanism-independent gss-api context: */ struct gss_ctx { struct gss_api_mech *mech_type; void *internal_ctx_id; unsigned int slack, align; }; #define GSS_C_NO_BUFFER ((struct xdr_netobj) 0) #define GSS_C_NO_CONTEXT ((struct gss_ctx *) 0) #define GSS_C_QOP_DEFAULT (0) /*XXX arbitrary length - is this set somewhere? */ #define GSS_OID_MAX_LEN 32 struct rpcsec_gss_oid { unsigned int len; u8 data[GSS_OID_MAX_LEN]; }; /* From RFC 3530 */ struct rpcsec_gss_info { struct rpcsec_gss_oid oid; u32 qop; u32 service; }; /* gss-api prototypes; note that these are somewhat simplified versions of * the prototypes specified in RFC 2744. */ int gss_import_sec_context( const void* input_token, size_t bufsize, struct gss_api_mech *mech, struct gss_ctx **ctx_id, time_t *endtime, gfp_t gfp_mask); u32 gss_get_mic( struct gss_ctx *ctx_id, struct xdr_buf *message, struct xdr_netobj *mic_token); u32 gss_verify_mic( struct gss_ctx *ctx_id, struct xdr_buf *message, struct xdr_netobj *mic_token); u32 gss_wrap( struct gss_ctx *ctx_id, int offset, struct xdr_buf *outbuf, struct page **inpages); u32 gss_unwrap( struct gss_ctx *ctx_id, int offset, int len, struct xdr_buf *inbuf); u32 gss_delete_sec_context( struct gss_ctx **ctx_id); rpc_authflavor_t gss_svc_to_pseudoflavor(struct gss_api_mech *, u32 qop, u32 service); u32 gss_pseudoflavor_to_service(struct gss_api_mech *, u32 pseudoflavor); bool gss_pseudoflavor_to_datatouch(struct gss_api_mech *, u32 pseudoflavor); char *gss_service_to_auth_domain_name(struct gss_api_mech *, u32 service); struct pf_desc { u32 pseudoflavor; u32 qop; u32 service; char *name; char *auth_domain_name; struct auth_domain *domain; bool datatouch; }; /* Different mechanisms (e.g., krb5 or spkm3) may implement gss-api, and * mechanisms may be dynamically registered or unregistered by modules. */ /* Each mechanism is described by the following struct: */ struct gss_api_mech { struct list_head gm_list; struct module *gm_owner; struct rpcsec_gss_oid gm_oid; char *gm_name; const struct gss_api_ops *gm_ops; /* pseudoflavors supported by this mechanism: */ int gm_pf_num; struct pf_desc * gm_pfs; /* Should the following be a callback operation instead? */ const char *gm_upcall_enctypes; }; /* and must provide the following operations: */ struct gss_api_ops { int (*gss_import_sec_context)( const void *input_token, size_t bufsize, struct gss_ctx *ctx_id, time_t *endtime, gfp_t gfp_mask); u32 (*gss_get_mic)( struct gss_ctx *ctx_id, struct xdr_buf *message, struct xdr_netobj *mic_token); u32 (*gss_verify_mic)( struct gss_ctx *ctx_id, struct xdr_buf *message, struct xdr_netobj *mic_token); u32 (*gss_wrap)( struct gss_ctx *ctx_id, int offset, struct xdr_buf *outbuf, struct page **inpages); u32 (*gss_unwrap)( struct gss_ctx *ctx_id, int offset, int len, struct xdr_buf *buf); void (*gss_delete_sec_context)( void *internal_ctx_id); }; int gss_mech_register(struct gss_api_mech *); void gss_mech_unregister(struct gss_api_mech *); /* returns a mechanism descriptor given an OID, and increments the mechanism's * reference count. */ struct gss_api_mech * gss_mech_get_by_OID(struct rpcsec_gss_oid *); /* Given a GSS security tuple, look up a pseudoflavor */ rpc_authflavor_t gss_mech_info2flavor(struct rpcsec_gss_info *); /* Given a pseudoflavor, look up a GSS security tuple */ int gss_mech_flavor2info(rpc_authflavor_t, struct rpcsec_gss_info *); /* Returns a reference to a mechanism, given a name like "krb5" etc. */ struct gss_api_mech *gss_mech_get_by_name(const char *); /* Similar, but get by pseudoflavor. */ struct gss_api_mech *gss_mech_get_by_pseudoflavor(u32); /* Fill in an array with a list of supported pseudoflavors */ int gss_mech_list_pseudoflavors(rpc_authflavor_t *, int); struct gss_api_mech * gss_mech_get(struct gss_api_mech *); /* For every successful gss_mech_get or gss_mech_get_by_* call there must be a * corresponding call to gss_mech_put. */ void gss_mech_put(struct gss_api_mech *); #endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_GSS_API_H */ sunrpc/sched.h 0000644 00000023423 14722070374 0007327 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/sched.h * * Scheduling primitives for kernel Sun RPC. * * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> */ #ifndef _LINUX_SUNRPC_SCHED_H_ #define _LINUX_SUNRPC_SCHED_H_ #include <linux/timer.h> #include <linux/ktime.h> #include <linux/sunrpc/types.h> #include <linux/spinlock.h> #include <linux/wait_bit.h> #include <linux/workqueue.h> #include <linux/sunrpc/xdr.h> /* * This is the actual RPC procedure call info. */ struct rpc_procinfo; struct rpc_message { const struct rpc_procinfo *rpc_proc; /* Procedure information */ void * rpc_argp; /* Arguments */ void * rpc_resp; /* Result */ const struct cred * rpc_cred; /* Credentials */ }; struct rpc_call_ops; struct rpc_wait_queue; struct rpc_wait { struct list_head list; /* wait queue links */ struct list_head links; /* Links to related tasks */ struct list_head timer_list; /* Timer list */ }; /* * This is the RPC task struct */ struct rpc_task { atomic_t tk_count; /* Reference count */ int tk_status; /* result of last operation */ struct list_head tk_task; /* global list of tasks */ /* * callback to be executed after waking up * action next procedure for async tasks */ void (*tk_callback)(struct rpc_task *); void (*tk_action)(struct rpc_task *); unsigned long tk_timeout; /* timeout for rpc_sleep() */ unsigned long tk_runstate; /* Task run status */ struct rpc_wait_queue *tk_waitqueue; /* RPC wait queue we're on */ union { struct work_struct tk_work; /* Async task work queue */ struct rpc_wait tk_wait; /* RPC wait */ } u; int tk_rpc_status; /* Result of last RPC operation */ /* * RPC call state */ struct rpc_message tk_msg; /* RPC call info */ void * tk_calldata; /* Caller private data */ const struct rpc_call_ops *tk_ops; /* Caller callbacks */ struct rpc_clnt * tk_client; /* RPC client */ struct rpc_xprt * tk_xprt; /* Transport */ struct rpc_cred * tk_op_cred; /* cred being operated on */ struct rpc_rqst * tk_rqstp; /* RPC request */ struct workqueue_struct *tk_workqueue; /* Normally rpciod, but could * be any workqueue */ ktime_t tk_start; /* RPC task init timestamp */ pid_t tk_owner; /* Process id for batching tasks */ unsigned short tk_flags; /* misc flags */ unsigned short tk_timeouts; /* maj timeouts */ #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS) unsigned short tk_pid; /* debugging aid */ #endif unsigned char tk_priority : 2,/* Task priority */ tk_garb_retry : 2, tk_cred_retry : 2; }; typedef void (*rpc_action)(struct rpc_task *); struct rpc_call_ops { void (*rpc_call_prepare)(struct rpc_task *, void *); void (*rpc_call_done)(struct rpc_task *, void *); void (*rpc_count_stats)(struct rpc_task *, void *); void (*rpc_release)(void *); }; struct rpc_task_setup { struct rpc_task *task; struct rpc_clnt *rpc_client; struct rpc_xprt *rpc_xprt; struct rpc_cred *rpc_op_cred; /* credential being operated on */ const struct rpc_message *rpc_message; const struct rpc_call_ops *callback_ops; void *callback_data; struct workqueue_struct *workqueue; unsigned short flags; signed char priority; }; /* * RPC task flags */ #define RPC_TASK_ASYNC 0x0001 /* is an async task */ #define RPC_TASK_SWAPPER 0x0002 /* is swapping in/out */ #define RPC_TASK_NULLCREDS 0x0010 /* Use AUTH_NULL credential */ #define RPC_CALL_MAJORSEEN 0x0020 /* major timeout seen */ #define RPC_TASK_ROOTCREDS 0x0040 /* force root creds */ #define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */ #define RPC_TASK_NO_ROUND_ROBIN 0x0100 /* send requests on "main" xprt */ #define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */ #define RPC_TASK_SOFTCONN 0x0400 /* Fail if can't connect */ #define RPC_TASK_SENT 0x0800 /* message was sent */ #define RPC_TASK_TIMEOUT 0x1000 /* fail with ETIMEDOUT on timeout */ #define RPC_TASK_NOCONNECT 0x2000 /* return ENOTCONN if not connected */ #define RPC_TASK_NO_RETRANS_TIMEOUT 0x4000 /* wait forever for a reply */ #define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC) #define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER) #define RPC_IS_SOFT(t) ((t)->tk_flags & (RPC_TASK_SOFT|RPC_TASK_TIMEOUT)) #define RPC_IS_SOFTCONN(t) ((t)->tk_flags & RPC_TASK_SOFTCONN) #define RPC_WAS_SENT(t) ((t)->tk_flags & RPC_TASK_SENT) #define RPC_TASK_RUNNING 0 #define RPC_TASK_QUEUED 1 #define RPC_TASK_ACTIVE 2 #define RPC_TASK_NEED_XMIT 3 #define RPC_TASK_NEED_RECV 4 #define RPC_TASK_MSG_PIN_WAIT 5 #define RPC_TASK_SIGNALLED 6 #define RPC_IS_RUNNING(t) test_bit(RPC_TASK_RUNNING, &(t)->tk_runstate) #define rpc_set_running(t) set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate) #define rpc_test_and_set_running(t) \ test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate) #define rpc_clear_running(t) \ do { \ smp_mb__before_atomic(); \ clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate); \ smp_mb__after_atomic(); \ } while (0) #define RPC_IS_QUEUED(t) test_bit(RPC_TASK_QUEUED, &(t)->tk_runstate) #define rpc_set_queued(t) set_bit(RPC_TASK_QUEUED, &(t)->tk_runstate) #define rpc_clear_queued(t) \ do { \ smp_mb__before_atomic(); \ clear_bit(RPC_TASK_QUEUED, &(t)->tk_runstate); \ smp_mb__after_atomic(); \ } while (0) #define RPC_IS_ACTIVATED(t) test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate) #define RPC_SIGNALLED(t) test_bit(RPC_TASK_SIGNALLED, &(t)->tk_runstate) /* * Task priorities. * Note: if you change these, you must also change * the task initialization definitions below. */ #define RPC_PRIORITY_LOW (-1) #define RPC_PRIORITY_NORMAL (0) #define RPC_PRIORITY_HIGH (1) #define RPC_PRIORITY_PRIVILEGED (2) #define RPC_NR_PRIORITY (1 + RPC_PRIORITY_PRIVILEGED - RPC_PRIORITY_LOW) struct rpc_timer { struct list_head list; unsigned long expires; struct delayed_work dwork; }; /* * RPC synchronization objects */ struct rpc_wait_queue { spinlock_t lock; struct list_head tasks[RPC_NR_PRIORITY]; /* task queue for each priority level */ unsigned char maxpriority; /* maximum priority (0 if queue is not a priority queue) */ unsigned char priority; /* current priority */ unsigned char nr; /* # tasks remaining for cookie */ unsigned int qlen; /* total # tasks waiting in queue */ struct rpc_timer timer_list; #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS) const char * name; #endif }; /* * This is the # requests to send consecutively * from a single cookie. The aim is to improve * performance of NFS operations such as read/write. */ #define RPC_IS_PRIORITY(q) ((q)->maxpriority > 0) /* * Function prototypes */ struct rpc_task *rpc_new_task(const struct rpc_task_setup *); struct rpc_task *rpc_run_task(const struct rpc_task_setup *); struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req); void rpc_put_task(struct rpc_task *); void rpc_put_task_async(struct rpc_task *); void rpc_signal_task(struct rpc_task *); void rpc_exit_task(struct rpc_task *); void rpc_exit(struct rpc_task *, int); void rpc_release_calldata(const struct rpc_call_ops *, void *); void rpc_killall_tasks(struct rpc_clnt *); void rpc_execute(struct rpc_task *); void rpc_init_priority_wait_queue(struct rpc_wait_queue *, const char *); void rpc_init_wait_queue(struct rpc_wait_queue *, const char *); void rpc_destroy_wait_queue(struct rpc_wait_queue *); unsigned long rpc_task_timeout(const struct rpc_task *task); void rpc_sleep_on_timeout(struct rpc_wait_queue *queue, struct rpc_task *task, rpc_action action, unsigned long timeout); void rpc_sleep_on(struct rpc_wait_queue *, struct rpc_task *, rpc_action action); void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *queue, struct rpc_task *task, unsigned long timeout, int priority); void rpc_sleep_on_priority(struct rpc_wait_queue *, struct rpc_task *, int priority); void rpc_wake_up_queued_task(struct rpc_wait_queue *, struct rpc_task *); void rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *, struct rpc_task *, int); void rpc_wake_up(struct rpc_wait_queue *); struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *); struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq, struct rpc_wait_queue *, bool (*)(struct rpc_task *, void *), void *); struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *, bool (*)(struct rpc_task *, void *), void *); void rpc_wake_up_status(struct rpc_wait_queue *, int); void rpc_delay(struct rpc_task *, unsigned long); int rpc_malloc(struct rpc_task *); void rpc_free(struct rpc_task *); int rpciod_up(void); void rpciod_down(void); int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *); #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) struct net; void rpc_show_tasks(struct net *); #endif int rpc_init_mempool(void); void rpc_destroy_mempool(void); extern struct workqueue_struct *rpciod_workqueue; extern struct workqueue_struct *xprtiod_workqueue; void rpc_prepare_task(struct rpc_task *task); static inline int rpc_wait_for_completion_task(struct rpc_task *task) { return __rpc_wait_for_completion_task(task, NULL); } #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS) static inline const char * rpc_qname(const struct rpc_wait_queue *q) { return ((q && q->name) ? q->name : "unknown"); } static inline void rpc_assign_waitqueue_name(struct rpc_wait_queue *q, const char *name) { q->name = name; } #else static inline void rpc_assign_waitqueue_name(struct rpc_wait_queue *q, const char *name) { } #endif #if IS_ENABLED(CONFIG_SUNRPC_SWAP) int rpc_clnt_swap_activate(struct rpc_clnt *clnt); void rpc_clnt_swap_deactivate(struct rpc_clnt *clnt); #else static inline int rpc_clnt_swap_activate(struct rpc_clnt *clnt) { return -EINVAL; } static inline void rpc_clnt_swap_deactivate(struct rpc_clnt *clnt) { } #endif /* CONFIG_SUNRPC_SWAP */ #endif /* _LINUX_SUNRPC_SCHED_H_ */ sunrpc/rpc_pipe_fs.h 0000644 00000007605 14722070374 0010536 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SUNRPC_RPC_PIPE_FS_H #define _LINUX_SUNRPC_RPC_PIPE_FS_H #ifdef __KERNEL__ #include <linux/workqueue.h> struct rpc_pipe_dir_head { struct list_head pdh_entries; struct dentry *pdh_dentry; }; struct rpc_pipe_dir_object_ops; struct rpc_pipe_dir_object { struct list_head pdo_head; const struct rpc_pipe_dir_object_ops *pdo_ops; void *pdo_data; }; struct rpc_pipe_dir_object_ops { int (*create)(struct dentry *dir, struct rpc_pipe_dir_object *pdo); void (*destroy)(struct dentry *dir, struct rpc_pipe_dir_object *pdo); }; struct rpc_pipe_msg { struct list_head list; void *data; size_t len; size_t copied; int errno; }; struct rpc_pipe_ops { ssize_t (*upcall)(struct file *, struct rpc_pipe_msg *, char __user *, size_t); ssize_t (*downcall)(struct file *, const char __user *, size_t); void (*release_pipe)(struct inode *); int (*open_pipe)(struct inode *); void (*destroy_msg)(struct rpc_pipe_msg *); }; struct rpc_pipe { struct list_head pipe; struct list_head in_upcall; struct list_head in_downcall; int pipelen; int nreaders; int nwriters; #define RPC_PIPE_WAIT_FOR_OPEN 1 int flags; struct delayed_work queue_timeout; const struct rpc_pipe_ops *ops; spinlock_t lock; struct dentry *dentry; }; struct rpc_inode { struct inode vfs_inode; void *private; struct rpc_pipe *pipe; wait_queue_head_t waitq; }; static inline struct rpc_inode * RPC_I(struct inode *inode) { return container_of(inode, struct rpc_inode, vfs_inode); } enum { SUNRPC_PIPEFS_NFS_PRIO, SUNRPC_PIPEFS_RPC_PRIO, }; extern int rpc_pipefs_notifier_register(struct notifier_block *); extern void rpc_pipefs_notifier_unregister(struct notifier_block *); enum { RPC_PIPEFS_MOUNT, RPC_PIPEFS_UMOUNT, }; extern struct dentry *rpc_d_lookup_sb(const struct super_block *sb, const unsigned char *dir_name); extern int rpc_pipefs_init_net(struct net *net); extern void rpc_pipefs_exit_net(struct net *net); extern struct super_block *rpc_get_sb_net(const struct net *net); extern void rpc_put_sb_net(const struct net *net); extern ssize_t rpc_pipe_generic_upcall(struct file *, struct rpc_pipe_msg *, char __user *, size_t); extern int rpc_queue_upcall(struct rpc_pipe *, struct rpc_pipe_msg *); /* returns true if the msg is in-flight, i.e., already eaten by the peer */ static inline bool rpc_msg_is_inflight(const struct rpc_pipe_msg *msg) { return (msg->copied != 0 && list_empty(&msg->list)); } struct rpc_clnt; extern struct dentry *rpc_create_client_dir(struct dentry *, const char *, struct rpc_clnt *); extern int rpc_remove_client_dir(struct rpc_clnt *); extern void rpc_init_pipe_dir_head(struct rpc_pipe_dir_head *pdh); extern void rpc_init_pipe_dir_object(struct rpc_pipe_dir_object *pdo, const struct rpc_pipe_dir_object_ops *pdo_ops, void *pdo_data); extern int rpc_add_pipe_dir_object(struct net *net, struct rpc_pipe_dir_head *pdh, struct rpc_pipe_dir_object *pdo); extern void rpc_remove_pipe_dir_object(struct net *net, struct rpc_pipe_dir_head *pdh, struct rpc_pipe_dir_object *pdo); extern struct rpc_pipe_dir_object *rpc_find_or_alloc_pipe_dir_object( struct net *net, struct rpc_pipe_dir_head *pdh, int (*match)(struct rpc_pipe_dir_object *, void *), struct rpc_pipe_dir_object *(*alloc)(void *), void *data); struct cache_detail; extern struct dentry *rpc_create_cache_dir(struct dentry *, const char *, umode_t umode, struct cache_detail *); extern void rpc_remove_cache_dir(struct dentry *); struct rpc_pipe *rpc_mkpipe_data(const struct rpc_pipe_ops *ops, int flags); void rpc_destroy_pipe_data(struct rpc_pipe *pipe); extern struct dentry *rpc_mkpipe_dentry(struct dentry *, const char *, void *, struct rpc_pipe *); extern int rpc_unlink(struct dentry *); extern int register_rpc_pipefs(void); extern void unregister_rpc_pipefs(void); extern bool gssd_running(struct net *net); #endif #endif profile.h 0000644 00000005267 14722070374 0006375 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PROFILE_H #define _LINUX_PROFILE_H #include <linux/kernel.h> #include <linux/init.h> #include <linux/cpumask.h> #include <linux/cache.h> #include <asm/errno.h> #define CPU_PROFILING 1 #define SCHED_PROFILING 2 #define SLEEP_PROFILING 3 #define KVM_PROFILING 4 struct proc_dir_entry; struct pt_regs; struct notifier_block; #if defined(CONFIG_PROFILING) && defined(CONFIG_PROC_FS) void create_prof_cpu_mask(void); int create_proc_profile(void); #else static inline void create_prof_cpu_mask(void) { } static inline int create_proc_profile(void) { return 0; } #endif enum profile_type { PROFILE_TASK_EXIT, PROFILE_MUNMAP }; #ifdef CONFIG_PROFILING extern int prof_on __read_mostly; /* init basic kernel profiler */ int profile_init(void); int profile_setup(char *str); void profile_tick(int type); int setup_profiling_timer(unsigned int multiplier); /* * Add multiple profiler hits to a given address: */ void profile_hits(int type, void *ip, unsigned int nr_hits); /* * Single profiler hit: */ static inline void profile_hit(int type, void *ip) { /* * Speedup for the common (no profiling enabled) case: */ if (unlikely(prof_on == type)) profile_hits(type, ip, 1); } struct task_struct; struct mm_struct; /* task is in do_exit() */ void profile_task_exit(struct task_struct * task); /* task is dead, free task struct ? Returns 1 if * the task was taken, 0 if the task should be freed. */ int profile_handoff_task(struct task_struct * task); /* sys_munmap */ void profile_munmap(unsigned long addr); int task_handoff_register(struct notifier_block * n); int task_handoff_unregister(struct notifier_block * n); int profile_event_register(enum profile_type, struct notifier_block * n); int profile_event_unregister(enum profile_type, struct notifier_block * n); struct pt_regs; #else #define prof_on 0 static inline int profile_init(void) { return 0; } static inline void profile_tick(int type) { return; } static inline void profile_hits(int type, void *ip, unsigned int nr_hits) { return; } static inline void profile_hit(int type, void *ip) { return; } static inline int task_handoff_register(struct notifier_block * n) { return -ENOSYS; } static inline int task_handoff_unregister(struct notifier_block * n) { return -ENOSYS; } static inline int profile_event_register(enum profile_type t, struct notifier_block * n) { return -ENOSYS; } static inline int profile_event_unregister(enum profile_type t, struct notifier_block * n) { return -ENOSYS; } #define profile_task_exit(a) do { } while (0) #define profile_handoff_task(a) (0) #define profile_munmap(a) do { } while (0) #endif /* CONFIG_PROFILING */ #endif /* _LINUX_PROFILE_H */ nvram.h 0000644 00000006736 14722070374 0006062 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_NVRAM_H #define _LINUX_NVRAM_H #include <linux/errno.h> #include <uapi/linux/nvram.h> #ifdef CONFIG_PPC #include <asm/machdep.h> #endif /** * struct nvram_ops - NVRAM functionality made available to drivers * @read: validate checksum (if any) then load a range of bytes from NVRAM * @write: store a range of bytes to NVRAM then update checksum (if any) * @read_byte: load a single byte from NVRAM * @write_byte: store a single byte to NVRAM * @get_size: return the fixed number of bytes in the NVRAM * * Architectures which provide an nvram ops struct need not implement all * of these methods. If the NVRAM hardware can be accessed only one byte * at a time then it may be sufficient to provide .read_byte and .write_byte. * If the NVRAM has a checksum (and it is to be checked) the .read and * .write methods can be used to implement that efficiently. * * Portable drivers may use the wrapper functions defined here. * The nvram_read() and nvram_write() functions call the .read and .write * methods when available and fall back on the .read_byte and .write_byte * methods otherwise. */ struct nvram_ops { ssize_t (*get_size)(void); unsigned char (*read_byte)(int); void (*write_byte)(unsigned char, int); ssize_t (*read)(char *, size_t, loff_t *); ssize_t (*write)(char *, size_t, loff_t *); #if defined(CONFIG_X86) || defined(CONFIG_M68K) long (*initialize)(void); long (*set_checksum)(void); #endif }; extern const struct nvram_ops arch_nvram_ops; static inline ssize_t nvram_get_size(void) { #ifdef CONFIG_PPC if (ppc_md.nvram_size) return ppc_md.nvram_size(); #else if (arch_nvram_ops.get_size) return arch_nvram_ops.get_size(); #endif return -ENODEV; } static inline unsigned char nvram_read_byte(int addr) { #ifdef CONFIG_PPC if (ppc_md.nvram_read_val) return ppc_md.nvram_read_val(addr); #else if (arch_nvram_ops.read_byte) return arch_nvram_ops.read_byte(addr); #endif return 0xFF; } static inline void nvram_write_byte(unsigned char val, int addr) { #ifdef CONFIG_PPC if (ppc_md.nvram_write_val) ppc_md.nvram_write_val(addr, val); #else if (arch_nvram_ops.write_byte) arch_nvram_ops.write_byte(val, addr); #endif } static inline ssize_t nvram_read_bytes(char *buf, size_t count, loff_t *ppos) { ssize_t nvram_size = nvram_get_size(); loff_t i; char *p = buf; if (nvram_size < 0) return nvram_size; for (i = *ppos; count > 0 && i < nvram_size; ++i, ++p, --count) *p = nvram_read_byte(i); *ppos = i; return p - buf; } static inline ssize_t nvram_write_bytes(char *buf, size_t count, loff_t *ppos) { ssize_t nvram_size = nvram_get_size(); loff_t i; char *p = buf; if (nvram_size < 0) return nvram_size; for (i = *ppos; count > 0 && i < nvram_size; ++i, ++p, --count) nvram_write_byte(*p, i); *ppos = i; return p - buf; } static inline ssize_t nvram_read(char *buf, size_t count, loff_t *ppos) { #ifdef CONFIG_PPC if (ppc_md.nvram_read) return ppc_md.nvram_read(buf, count, ppos); #else if (arch_nvram_ops.read) return arch_nvram_ops.read(buf, count, ppos); #endif return nvram_read_bytes(buf, count, ppos); } static inline ssize_t nvram_write(char *buf, size_t count, loff_t *ppos) { #ifdef CONFIG_PPC if (ppc_md.nvram_write) return ppc_md.nvram_write(buf, count, ppos); #else if (arch_nvram_ops.write) return arch_nvram_ops.write(buf, count, ppos); #endif return nvram_write_bytes(buf, count, ppos); } #endif /* _LINUX_NVRAM_H */ mISDNdsp.h 0000644 00000002301 14722070374 0006340 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __mISDNdsp_H__ #define __mISDNdsp_H__ struct mISDN_dsp_element_arg { char *name; char *def; char *desc; }; struct mISDN_dsp_element { char *name; void *(*new)(const char *arg); void (*free)(void *p); void (*process_tx)(void *p, unsigned char *data, int len); void (*process_rx)(void *p, unsigned char *data, int len, unsigned int txlen); int num_args; struct mISDN_dsp_element_arg *args; }; extern int mISDN_dsp_element_register(struct mISDN_dsp_element *elem); extern void mISDN_dsp_element_unregister(struct mISDN_dsp_element *elem); struct dsp_features { int hfc_id; /* unique id to identify the chip (or -1) */ int hfc_dtmf; /* set if HFCmulti card supports dtmf */ int hfc_conf; /* set if HFCmulti card supports conferences */ int hfc_loops; /* set if card supports tone loops */ int hfc_echocanhw; /* set if card supports echocancelation*/ int pcm_id; /* unique id to identify the pcm bus (or -1) */ int pcm_slots; /* number of slots on the pcm bus */ int pcm_banks; /* number of IO banks of pcm bus */ int unclocked; /* data is not clocked (has jitter/loss) */ int unordered; /* data is unordered (packets have index) */ }; #endif atm_tcp.h 0000644 00000000777 14722070374 0006365 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* atm_tcp.h - Driver-specific declarations of the ATMTCP driver (for use by driver-specific utilities) */ /* Written 1997-2000 by Werner Almesberger, EPFL LRC/ICA */ #ifndef LINUX_ATM_TCP_H #define LINUX_ATM_TCP_H #include <uapi/linux/atm_tcp.h> struct atm_tcp_ops { int (*attach)(struct atm_vcc *vcc,int itf); int (*create_persistent)(int itf); int (*remove_persistent)(int itf); struct module *owner; }; extern struct atm_tcp_ops atm_tcp_ops; #endif stm.h 0000644 00000010545 14722070374 0005533 0 ustar 00 // SPDX-License-Identifier: GPL-2.0 /* * System Trace Module (STM) infrastructure apis * Copyright (C) 2014 Intel Corporation. */ #ifndef _STM_H_ #define _STM_H_ #include <linux/device.h> /** * enum stp_packet_type - STP packets that an STM driver sends */ enum stp_packet_type { STP_PACKET_DATA = 0, STP_PACKET_FLAG, STP_PACKET_USER, STP_PACKET_MERR, STP_PACKET_GERR, STP_PACKET_TRIG, STP_PACKET_XSYNC, }; /** * enum stp_packet_flags - STP packet modifiers */ enum stp_packet_flags { STP_PACKET_MARKED = 0x1, STP_PACKET_TIMESTAMPED = 0x2, }; struct stp_policy; struct stm_device; /** * struct stm_data - STM device description and callbacks * @name: device name * @stm: internal structure, only used by stm class code * @sw_start: first STP master available to software * @sw_end: last STP master available to software * @sw_nchannels: number of STP channels per master * @sw_mmiosz: size of one channel's IO space, for mmap, optional * @hw_override: masters in the STP stream will not match the ones * assigned by software, but are up to the STM hardware * @packet: callback that sends an STP packet * @mmio_addr: mmap callback, optional * @link: called when a new stm_source gets linked to us, optional * @unlink: likewise for unlinking, again optional * @set_options: set device-specific options on a channel * * Fill out this structure before calling stm_register_device() to create * an STM device and stm_unregister_device() to destroy it. It will also be * passed back to @packet(), @mmio_addr(), @link(), @unlink() and @set_options() * callbacks. * * Normally, an STM device will have a range of masters available to software * and the rest being statically assigned to various hardware trace sources. * The former is defined by the the range [@sw_start..@sw_end] of the device * description. That is, the lowest master that can be allocated to software * writers is @sw_start and data from this writer will appear is @sw_start * master in the STP stream. * * The @packet callback should adhere to the following rules: * 1) it must return the number of bytes it consumed from the payload; * 2) therefore, if it sent a packet that does not have payload (like FLAG), * it must return zero; * 3) if it does not support the requested packet type/flag combination, * it must return -ENOTSUPP. * * The @unlink callback is called when there are no more active writers so * that the master/channel can be quiesced. */ struct stm_data { const char *name; struct stm_device *stm; unsigned int sw_start; unsigned int sw_end; unsigned int sw_nchannels; unsigned int sw_mmiosz; unsigned int hw_override; ssize_t (*packet)(struct stm_data *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, const unsigned char *); phys_addr_t (*mmio_addr)(struct stm_data *, unsigned int, unsigned int, unsigned int); int (*link)(struct stm_data *, unsigned int, unsigned int); void (*unlink)(struct stm_data *, unsigned int, unsigned int); long (*set_options)(struct stm_data *, unsigned int, unsigned int, unsigned int, unsigned long); }; int stm_register_device(struct device *parent, struct stm_data *stm_data, struct module *owner); void stm_unregister_device(struct stm_data *stm_data); struct stm_source_device; /** * struct stm_source_data - STM source device description and callbacks * @name: device name, will be used for policy lookup * @src: internal structure, only used by stm class code * @nr_chans: number of channels to allocate * @link: called when this source gets linked to an STM device * @unlink: called when this source is about to get unlinked from its STM * * Fill in this structure before calling stm_source_register_device() to * register a source device. Also pass it to unregister and write calls. */ struct stm_source_data { const char *name; struct stm_source_device *src; unsigned int percpu; unsigned int nr_chans; int (*link)(struct stm_source_data *data); void (*unlink)(struct stm_source_data *data); }; int stm_source_register_device(struct device *parent, struct stm_source_data *data); void stm_source_unregister_device(struct stm_source_data *data); int notrace stm_source_write(struct stm_source_data *data, unsigned int chan, const char *buf, size_t count); #endif /* _STM_H_ */ coresight-pmu.h 0000644 00000001735 14722070374 0007517 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright(C) 2015 Linaro Limited. All rights reserved. * Author: Mathieu Poirier <mathieu.poirier@linaro.org> */ #ifndef _LINUX_CORESIGHT_PMU_H #define _LINUX_CORESIGHT_PMU_H #define CORESIGHT_ETM_PMU_NAME "cs_etm" #define CORESIGHT_ETM_PMU_SEED 0x10 /* ETMv3.5/PTM's ETMCR config bit */ #define ETM_OPT_CYCACC 12 #define ETM_OPT_CTXTID 14 #define ETM_OPT_TS 28 #define ETM_OPT_RETSTK 29 /* ETMv4 CONFIGR programming bits for the ETM OPTs */ #define ETM4_CFG_BIT_CYCACC 4 #define ETM4_CFG_BIT_CTXTID 6 #define ETM4_CFG_BIT_TS 11 #define ETM4_CFG_BIT_RETSTK 12 static inline int coresight_get_trace_id(int cpu) { /* * A trace ID of value 0 is invalid, so let's start at some * random value that fits in 7 bits and go from there. Since * the common convention is to have data trace IDs be I(N) + 1, * set instruction trace IDs as a function of the CPU number. */ return (CORESIGHT_ETM_PMU_SEED + (cpu * 2)); } #endif zorro.h 0000644 00000007676 14722070374 0006116 0 ustar 00 /* * linux/zorro.h -- Amiga AutoConfig (Zorro) Bus Definitions * * Copyright (C) 1995--2003 Geert Uytterhoeven * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #ifndef _LINUX_ZORRO_H #define _LINUX_ZORRO_H #include <uapi/linux/zorro.h> #include <linux/device.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/mod_devicetable.h> #include <asm/zorro.h> /* * Zorro devices */ struct zorro_dev { struct ExpansionRom rom; zorro_id id; struct zorro_driver *driver; /* which driver has allocated this device */ struct device dev; /* Generic device interface */ u16 slotaddr; u16 slotsize; char name[64]; struct resource resource; }; #define to_zorro_dev(n) container_of(n, struct zorro_dev, dev) /* * Zorro bus */ extern struct bus_type zorro_bus_type; /* * Zorro device drivers */ struct zorro_driver { struct list_head node; char *name; const struct zorro_device_id *id_table; /* NULL if wants all devices */ int (*probe)(struct zorro_dev *z, const struct zorro_device_id *id); /* New device inserted */ void (*remove)(struct zorro_dev *z); /* Device removed (NULL if not a hot-plug capable driver) */ struct device_driver driver; }; #define to_zorro_driver(drv) container_of(drv, struct zorro_driver, driver) #define zorro_for_each_dev(dev) \ for (dev = &zorro_autocon[0]; dev < zorro_autocon+zorro_num_autocon; dev++) /* New-style probing */ extern int zorro_register_driver(struct zorro_driver *); extern void zorro_unregister_driver(struct zorro_driver *); extern const struct zorro_device_id *zorro_match_device(const struct zorro_device_id *ids, const struct zorro_dev *z); static inline struct zorro_driver *zorro_dev_driver(const struct zorro_dev *z) { return z->driver; } extern unsigned int zorro_num_autocon; /* # of autoconfig devices found */ extern struct zorro_dev *zorro_autocon; /* * Minimal information about a Zorro device, passed from bootinfo * Only available temporarily, i.e. until initmem has been freed! */ struct zorro_dev_init { struct ExpansionRom rom; u16 slotaddr; u16 slotsize; u32 boardaddr; u32 boardsize; }; extern struct zorro_dev_init zorro_autocon_init[ZORRO_NUM_AUTO] __initdata; /* * Zorro Functions */ extern struct zorro_dev *zorro_find_device(zorro_id id, struct zorro_dev *from); #define zorro_resource_start(z) ((z)->resource.start) #define zorro_resource_end(z) ((z)->resource.end) #define zorro_resource_len(z) (resource_size(&(z)->resource)) #define zorro_resource_flags(z) ((z)->resource.flags) #define zorro_request_device(z, name) \ request_mem_region(zorro_resource_start(z), zorro_resource_len(z), name) #define zorro_release_device(z) \ release_mem_region(zorro_resource_start(z), zorro_resource_len(z)) /* Similar to the helpers above, these manipulate per-zorro_dev * driver-specific data. They are really just a wrapper around * the generic device structure functions of these calls. */ static inline void *zorro_get_drvdata (struct zorro_dev *z) { return dev_get_drvdata(&z->dev); } static inline void zorro_set_drvdata (struct zorro_dev *z, void *data) { dev_set_drvdata(&z->dev, data); } /* * Bitmask indicating portions of available Zorro II RAM that are unused * by the system. Every bit represents a 64K chunk, for a maximum of 8MB * (128 chunks, physical 0x00200000-0x009fffff). * * If you want to use (= allocate) portions of this RAM, you should clear * the corresponding bits. */ extern DECLARE_BITMAP(zorro_unused_z2ram, 128); #define Z2RAM_START (0x00200000) #define Z2RAM_END (0x00a00000) #define Z2RAM_SIZE (0x00800000) #define Z2RAM_CHUNKSIZE (0x00010000) #define Z2RAM_CHUNKMASK (0x0000ffff) #define Z2RAM_CHUNKSHIFT (16) #endif /* _LINUX_ZORRO_H */ ds2782_battery.h 0000644 00000000236 14722070374 0007407 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_DS2782_BATTERY_H #define __LINUX_DS2782_BATTERY_H struct ds278x_platform_data { int rsns; }; #endif soc/brcmstb/brcmstb.h 0000644 00000000634 14722070374 0010602 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __BRCMSTB_SOC_H #define __BRCMSTB_SOC_H static inline u32 BRCM_ID(u32 reg) { return reg >> 28 ? reg >> 16 : reg >> 8; } static inline u32 BRCM_REV(u32 reg) { return reg & 0xff; } /* * Helper functions for getting family or product id from the * SoC driver. */ u32 brcmstb_get_family_id(void); u32 brcmstb_get_product_id(void); #endif /* __BRCMSTB_SOC_H */ soc/sunxi/sunxi_sram.h 0000644 00000000770 14722070374 0011051 0 ustar 00 /* * Allwinner SoCs SRAM Controller Driver * * Copyright (C) 2015 Maxime Ripard * * Author: Maxime Ripard <maxime.ripard@free-electrons.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #ifndef _SUNXI_SRAM_H_ #define _SUNXI_SRAM_H_ int sunxi_sram_claim(struct device *dev); int sunxi_sram_release(struct device *dev); #endif /* _SUNXI_SRAM_H_ */ soc/nxp/lpc32xx-misc.h 0000644 00000001475 14722070374 0010557 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Author: Kevin Wells <kevin.wells@nxp.com> * * Copyright (C) 2010 NXP Semiconductors */ #ifndef __SOC_LPC32XX_MISC_H #define __SOC_LPC32XX_MISC_H #include <linux/types.h> #include <linux/phy.h> #ifdef CONFIG_ARCH_LPC32XX extern u32 lpc32xx_return_iram(void __iomem **mapbase, dma_addr_t *dmaaddr); extern void lpc32xx_set_phy_interface_mode(phy_interface_t mode); extern void lpc32xx_loopback_set(resource_size_t mapbase, int state); #else static inline u32 lpc32xx_return_iram(void __iomem **mapbase, dma_addr_t *dmaaddr) { *mapbase = NULL; *dmaaddr = 0; return 0; } static inline void lpc32xx_set_phy_interface_mode(phy_interface_t mode) { } static inline void lpc32xx_loopback_set(resource_size_t mapbase, int state) { } #endif #endif /* __SOC_LPC32XX_MISC_H */ soc/ixp4xx/qmgr.h 0000644 00000005434 14722070374 0007727 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl> */ #ifndef IXP4XX_QMGR_H #define IXP4XX_QMGR_H #include <linux/io.h> #include <linux/kernel.h> #define DEBUG_QMGR 0 #define HALF_QUEUES 32 #define QUEUES 64 #define MAX_QUEUE_LENGTH 4 /* in dwords */ #define QUEUE_STAT1_EMPTY 1 /* queue status bits */ #define QUEUE_STAT1_NEARLY_EMPTY 2 #define QUEUE_STAT1_NEARLY_FULL 4 #define QUEUE_STAT1_FULL 8 #define QUEUE_STAT2_UNDERFLOW 1 #define QUEUE_STAT2_OVERFLOW 2 #define QUEUE_WATERMARK_0_ENTRIES 0 #define QUEUE_WATERMARK_1_ENTRY 1 #define QUEUE_WATERMARK_2_ENTRIES 2 #define QUEUE_WATERMARK_4_ENTRIES 3 #define QUEUE_WATERMARK_8_ENTRIES 4 #define QUEUE_WATERMARK_16_ENTRIES 5 #define QUEUE_WATERMARK_32_ENTRIES 6 #define QUEUE_WATERMARK_64_ENTRIES 7 /* queue interrupt request conditions */ #define QUEUE_IRQ_SRC_EMPTY 0 #define QUEUE_IRQ_SRC_NEARLY_EMPTY 1 #define QUEUE_IRQ_SRC_NEARLY_FULL 2 #define QUEUE_IRQ_SRC_FULL 3 #define QUEUE_IRQ_SRC_NOT_EMPTY 4 #define QUEUE_IRQ_SRC_NOT_NEARLY_EMPTY 5 #define QUEUE_IRQ_SRC_NOT_NEARLY_FULL 6 #define QUEUE_IRQ_SRC_NOT_FULL 7 struct qmgr_regs { u32 acc[QUEUES][MAX_QUEUE_LENGTH]; /* 0x000 - 0x3FF */ u32 stat1[4]; /* 0x400 - 0x40F */ u32 stat2[2]; /* 0x410 - 0x417 */ u32 statne_h; /* 0x418 - queue nearly empty */ u32 statf_h; /* 0x41C - queue full */ u32 irqsrc[4]; /* 0x420 - 0x42F IRC source */ u32 irqen[2]; /* 0x430 - 0x437 IRQ enabled */ u32 irqstat[2]; /* 0x438 - 0x43F - IRQ access only */ u32 reserved[1776]; u32 sram[2048]; /* 0x2000 - 0x3FFF - config and buffer */ }; void qmgr_put_entry(unsigned int queue, u32 val); u32 qmgr_get_entry(unsigned int queue); int qmgr_stat_empty(unsigned int queue); int qmgr_stat_below_low_watermark(unsigned int queue); int qmgr_stat_full(unsigned int queue); int qmgr_stat_overflow(unsigned int queue); void qmgr_release_queue(unsigned int queue); void qmgr_set_irq(unsigned int queue, int src, void (*handler)(void *pdev), void *pdev); void qmgr_enable_irq(unsigned int queue); void qmgr_disable_irq(unsigned int queue); /* request_ and release_queue() must be called from non-IRQ context */ #if DEBUG_QMGR extern char qmgr_queue_descs[QUEUES][32]; int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */, unsigned int nearly_empty_watermark, unsigned int nearly_full_watermark, const char *desc_format, const char* name); #else int __qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */, unsigned int nearly_empty_watermark, unsigned int nearly_full_watermark); #define qmgr_request_queue(queue, len, nearly_empty_watermark, \ nearly_full_watermark, desc_format, name) \ __qmgr_request_queue(queue, len, nearly_empty_watermark, \ nearly_full_watermark) #endif #endif soc/ixp4xx/npe.h 0000644 00000001722 14722070374 0007537 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __IXP4XX_NPE_H #define __IXP4XX_NPE_H #include <linux/kernel.h> extern const char *npe_names[]; struct npe_regs { u32 exec_addr, exec_data, exec_status_cmd, exec_count; u32 action_points[4]; u32 watchpoint_fifo, watch_count; u32 profile_count; u32 messaging_status, messaging_control; u32 mailbox_status, /*messaging_*/ in_out_fifo; }; struct npe { struct npe_regs __iomem *regs; int id; int valid; }; static inline const char *npe_name(struct npe *npe) { return npe_names[npe->id]; } int npe_running(struct npe *npe); int npe_send_message(struct npe *npe, const void *msg, const char *what); int npe_recv_message(struct npe *npe, void *msg, const char *what); int npe_send_recv_message(struct npe *npe, void *msg, const char *what); int npe_load_firmware(struct npe *npe, const char *name, struct device *dev); struct npe *npe_request(unsigned id); void npe_release(struct npe *npe); #endif /* __IXP4XX_NPE_H */ soc/dove/pmu.h 0000644 00000000743 14722070374 0007251 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_SOC_DOVE_PMU_H #define LINUX_SOC_DOVE_PMU_H #include <linux/types.h> struct dove_pmu_domain_initdata { u32 pwr_mask; u32 rst_mask; u32 iso_mask; const char *name; }; struct dove_pmu_initdata { void __iomem *pmc_base; void __iomem *pmu_base; int irq; int irq_domain_start; const struct dove_pmu_domain_initdata *domains; }; int dove_init_pmu_legacy(const struct dove_pmu_initdata *); int dove_init_pmu(void); #endif soc/qcom/apr.h 0000644 00000006326 14722070374 0007237 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __QCOM_APR_H_ #define __QCOM_APR_H_ #include <linux/spinlock.h> #include <linux/device.h> #include <linux/mod_devicetable.h> #include <dt-bindings/soc/qcom,apr.h> extern struct bus_type aprbus; #define APR_HDR_LEN(hdr_len) ((hdr_len)/4) /* * HEADER field * version:0:3 * header_size : 4:7 * message_type : 8:9 * reserved: 10:15 */ #define APR_HDR_FIELD(msg_type, hdr_len, ver)\ (((msg_type & 0x3) << 8) | ((hdr_len & 0xF) << 4) | (ver & 0xF)) #define APR_HDR_SIZE sizeof(struct apr_hdr) #define APR_SEQ_CMD_HDR_FIELD APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \ APR_HDR_LEN(APR_HDR_SIZE), \ APR_PKT_VER) /* Version */ #define APR_PKT_VER 0x0 /* Command and Response Types */ #define APR_MSG_TYPE_EVENT 0x0 #define APR_MSG_TYPE_CMD_RSP 0x1 #define APR_MSG_TYPE_SEQ_CMD 0x2 #define APR_MSG_TYPE_NSEQ_CMD 0x3 #define APR_MSG_TYPE_MAX 0x04 /* APR Basic Response Message */ #define APR_BASIC_RSP_RESULT 0x000110E8 #define APR_RSP_ACCEPTED 0x000100BE struct aprv2_ibasic_rsp_result_t { uint32_t opcode; uint32_t status; }; /* hdr field Ver [0:3], Size [4:7], Message type [8:10] */ #define APR_HDR_FIELD_VER(h) (h & 0x000F) #define APR_HDR_FIELD_SIZE(h) ((h & 0x00F0) >> 4) #define APR_HDR_FIELD_SIZE_BYTES(h) (((h & 0x00F0) >> 4) * 4) #define APR_HDR_FIELD_MT(h) ((h & 0x0300) >> 8) struct apr_hdr { uint16_t hdr_field; uint16_t pkt_size; uint8_t src_svc; uint8_t src_domain; uint16_t src_port; uint8_t dest_svc; uint8_t dest_domain; uint16_t dest_port; uint32_t token; uint32_t opcode; } __packed; struct apr_pkt { struct apr_hdr hdr; uint8_t payload[]; }; struct apr_resp_pkt { struct apr_hdr hdr; void *payload; int payload_size; }; /* Bits 0 to 15 -- Minor version, Bits 16 to 31 -- Major version */ #define APR_SVC_MAJOR_VERSION(v) ((v >> 16) & 0xFF) #define APR_SVC_MINOR_VERSION(v) (v & 0xFF) struct apr_device { struct device dev; uint16_t svc_id; uint16_t domain_id; uint32_t version; char name[APR_NAME_SIZE]; spinlock_t lock; struct list_head node; }; #define to_apr_device(d) container_of(d, struct apr_device, dev) struct apr_driver { int (*probe)(struct apr_device *sl); int (*remove)(struct apr_device *sl); int (*callback)(struct apr_device *a, struct apr_resp_pkt *d); struct device_driver driver; const struct apr_device_id *id_table; }; #define to_apr_driver(d) container_of(d, struct apr_driver, driver) /* * use a macro to avoid include chaining to get THIS_MODULE */ #define apr_driver_register(drv) __apr_driver_register(drv, THIS_MODULE) int __apr_driver_register(struct apr_driver *drv, struct module *owner); void apr_driver_unregister(struct apr_driver *drv); /** * module_apr_driver() - Helper macro for registering a aprbus driver * @__aprbus_driver: aprbus_driver struct * * Helper macro for aprbus drivers which do not do anything special in * module init/exit. This eliminates a lot of boilerplate. Each module * may only use this macro once, and calling it replaces module_init() * and module_exit() */ #define module_apr_driver(__apr_driver) \ module_driver(__apr_driver, apr_driver_register, \ apr_driver_unregister) int apr_send_pkt(struct apr_device *adev, struct apr_pkt *pkt); #endif /* __QCOM_APR_H_ */ soc/qcom/smd-rpm.h 0000644 00000002400 14722070374 0010021 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __QCOM_SMD_RPM_H__ #define __QCOM_SMD_RPM_H__ struct qcom_smd_rpm; #define QCOM_SMD_RPM_ACTIVE_STATE 0 #define QCOM_SMD_RPM_SLEEP_STATE 1 /* * Constants used for addressing resources in the RPM. */ #define QCOM_SMD_RPM_BOBB 0x62626f62 #define QCOM_SMD_RPM_BOOST 0x61747362 #define QCOM_SMD_RPM_BUS_CLK 0x316b6c63 #define QCOM_SMD_RPM_BUS_MASTER 0x73616d62 #define QCOM_SMD_RPM_BUS_SLAVE 0x766c7362 #define QCOM_SMD_RPM_CLK_BUF_A 0x616B6C63 #define QCOM_SMD_RPM_LDOA 0x616f646c #define QCOM_SMD_RPM_LDOB 0x626F646C #define QCOM_SMD_RPM_MEM_CLK 0x326b6c63 #define QCOM_SMD_RPM_MISC_CLK 0x306b6c63 #define QCOM_SMD_RPM_NCPA 0x6170636E #define QCOM_SMD_RPM_NCPB 0x6270636E #define QCOM_SMD_RPM_OCMEM_PWR 0x706d636f #define QCOM_SMD_RPM_QPIC_CLK 0x63697071 #define QCOM_SMD_RPM_SMPA 0x61706d73 #define QCOM_SMD_RPM_SMPB 0x62706d73 #define QCOM_SMD_RPM_SPDM 0x63707362 #define QCOM_SMD_RPM_VSA 0x00617376 #define QCOM_SMD_RPM_MMAXI_CLK 0x69786d6d #define QCOM_SMD_RPM_IPA_CLK 0x617069 #define QCOM_SMD_RPM_CE_CLK 0x6563 #define QCOM_SMD_RPM_AGGR_CLK 0x72676761 int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm, int state, u32 resource_type, u32 resource_id, void *buf, size_t count); #endif soc/qcom/qmi.h 0000644 00000017351 14722070374 0007243 0 ustar 00 // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. * Copyright (c) 2017, Linaro Ltd. */ #ifndef __QMI_HELPERS_H__ #define __QMI_HELPERS_H__ #include <linux/completion.h> #include <linux/idr.h> #include <linux/list.h> #include <linux/qrtr.h> #include <linux/types.h> #include <linux/workqueue.h> struct socket; /** * qmi_header - wireformat header of QMI messages * @type: type of message * @txn_id: transaction id * @msg_id: message id * @msg_len: length of message payload following header */ struct qmi_header { u8 type; u16 txn_id; u16 msg_id; u16 msg_len; } __packed; #define QMI_REQUEST 0 #define QMI_RESPONSE 2 #define QMI_INDICATION 4 #define QMI_COMMON_TLV_TYPE 0 enum qmi_elem_type { QMI_EOTI, QMI_OPT_FLAG, QMI_DATA_LEN, QMI_UNSIGNED_1_BYTE, QMI_UNSIGNED_2_BYTE, QMI_UNSIGNED_4_BYTE, QMI_UNSIGNED_8_BYTE, QMI_SIGNED_2_BYTE_ENUM, QMI_SIGNED_4_BYTE_ENUM, QMI_STRUCT, QMI_STRING, }; enum qmi_array_type { NO_ARRAY, STATIC_ARRAY, VAR_LEN_ARRAY, }; /** * struct qmi_elem_info - describes how to encode a single QMI element * @data_type: Data type of this element. * @elem_len: Array length of this element, if an array. * @elem_size: Size of a single instance of this data type. * @array_type: Array type of this element. * @tlv_type: QMI message specific type to identify which element * is present in an incoming message. * @offset: Specifies the offset of the first instance of this * element in the data structure. * @ei_array: Null-terminated array of @qmi_elem_info to describe nested * structures. */ struct qmi_elem_info { enum qmi_elem_type data_type; u32 elem_len; u32 elem_size; enum qmi_array_type array_type; u8 tlv_type; u32 offset; struct qmi_elem_info *ei_array; }; #define QMI_RESULT_SUCCESS_V01 0 #define QMI_RESULT_FAILURE_V01 1 #define QMI_ERR_NONE_V01 0 #define QMI_ERR_MALFORMED_MSG_V01 1 #define QMI_ERR_NO_MEMORY_V01 2 #define QMI_ERR_INTERNAL_V01 3 #define QMI_ERR_CLIENT_IDS_EXHAUSTED_V01 5 #define QMI_ERR_INVALID_ID_V01 41 #define QMI_ERR_ENCODING_V01 58 #define QMI_ERR_INCOMPATIBLE_STATE_V01 90 #define QMI_ERR_NOT_SUPPORTED_V01 94 /** * qmi_response_type_v01 - common response header (decoded) * @result: result of the transaction * @error: error value, when @result is QMI_RESULT_FAILURE_V01 */ struct qmi_response_type_v01 { u16 result; u16 error; }; extern struct qmi_elem_info qmi_response_type_v01_ei[]; /** * struct qmi_service - context to track lookup-results * @service: service type * @version: version of the @service * @instance: instance id of the @service * @node: node of the service * @port: port of the service * @priv: handle for client's use * @list_node: list_head for house keeping */ struct qmi_service { unsigned int service; unsigned int version; unsigned int instance; unsigned int node; unsigned int port; void *priv; struct list_head list_node; }; struct qmi_handle; /** * struct qmi_ops - callbacks for qmi_handle * @new_server: inform client of a new_server lookup-result, returning * successfully from this call causes the library to call * @del_server as the service is removed from the * lookup-result. @priv of the qmi_service can be used by * the client * @del_server: inform client of a del_server lookup-result * @net_reset: inform client that the name service was restarted and * that and any state needs to be released * @msg_handler: invoked for incoming messages, allows a client to * override the usual QMI message handler * @bye: inform a client that all clients from a node are gone * @del_client: inform a client that a particular client is gone */ struct qmi_ops { int (*new_server)(struct qmi_handle *qmi, struct qmi_service *svc); void (*del_server)(struct qmi_handle *qmi, struct qmi_service *svc); void (*net_reset)(struct qmi_handle *qmi); void (*msg_handler)(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, const void *data, size_t count); void (*bye)(struct qmi_handle *qmi, unsigned int node); void (*del_client)(struct qmi_handle *qmi, unsigned int node, unsigned int port); }; /** * struct qmi_txn - transaction context * @qmi: QMI handle this transaction is associated with * @id: transaction id * @lock: for synchronization between handler and waiter of messages * @completion: completion object as the transaction receives a response * @result: result code for the completed transaction * @ei: description of the QMI encoded response (optional) * @dest: destination buffer to decode message into (optional) */ struct qmi_txn { struct qmi_handle *qmi; u16 id; struct mutex lock; struct completion completion; int result; struct qmi_elem_info *ei; void *dest; }; /** * struct qmi_msg_handler - description of QMI message handler * @type: type of message * @msg_id: message id * @ei: description of the QMI encoded message * @decoded_size: size of the decoded object * @fn: function to invoke as the message is decoded */ struct qmi_msg_handler { unsigned int type; unsigned int msg_id; struct qmi_elem_info *ei; size_t decoded_size; void (*fn)(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, struct qmi_txn *txn, const void *decoded); }; /** * struct qmi_handle - QMI context * @sock: socket handle * @sock_lock: synchronization of @sock modifications * @sq: sockaddr of @sock * @work: work for handling incoming messages * @wq: workqueue to post @work on * @recv_buf: scratch buffer for handling incoming messages * @recv_buf_size: size of @recv_buf * @lookups: list of registered lookup requests * @lookup_results: list of lookup-results advertised to the client * @services: list of registered services (by this client) * @ops: reference to callbacks * @txns: outstanding transactions * @txn_lock: lock for modifications of @txns * @handlers: list of handlers for incoming messages */ struct qmi_handle { struct socket *sock; struct mutex sock_lock; struct sockaddr_qrtr sq; struct work_struct work; struct workqueue_struct *wq; void *recv_buf; size_t recv_buf_size; struct list_head lookups; struct list_head lookup_results; struct list_head services; struct qmi_ops ops; struct idr txns; struct mutex txn_lock; const struct qmi_msg_handler *handlers; }; int qmi_add_lookup(struct qmi_handle *qmi, unsigned int service, unsigned int version, unsigned int instance); int qmi_add_server(struct qmi_handle *qmi, unsigned int service, unsigned int version, unsigned int instance); int qmi_handle_init(struct qmi_handle *qmi, size_t max_msg_len, const struct qmi_ops *ops, const struct qmi_msg_handler *handlers); void qmi_handle_release(struct qmi_handle *qmi); ssize_t qmi_send_request(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, struct qmi_txn *txn, int msg_id, size_t len, struct qmi_elem_info *ei, const void *c_struct); ssize_t qmi_send_response(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, struct qmi_txn *txn, int msg_id, size_t len, struct qmi_elem_info *ei, const void *c_struct); ssize_t qmi_send_indication(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, int msg_id, size_t len, struct qmi_elem_info *ei, const void *c_struct); void *qmi_encode_message(int type, unsigned int msg_id, size_t *len, unsigned int txn_id, struct qmi_elem_info *ei, const void *c_struct); int qmi_decode_message(const void *buf, size_t len, struct qmi_elem_info *ei, void *c_struct); int qmi_txn_init(struct qmi_handle *qmi, struct qmi_txn *txn, struct qmi_elem_info *ei, void *c_struct); int qmi_txn_wait(struct qmi_txn *txn, unsigned long timeout); void qmi_txn_cancel(struct qmi_txn *txn); #endif soc/qcom/wcnss_ctrl.h 0000644 00000000756 14722070374 0010637 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __WCNSS_CTRL_H__ #define __WCNSS_CTRL_H__ #include <linux/rpmsg.h> #if IS_ENABLED(CONFIG_QCOM_WCNSS_CTRL) struct rpmsg_endpoint *qcom_wcnss_open_channel(void *wcnss, const char *name, rpmsg_rx_cb_t cb, void *priv); #else static struct rpmsg_endpoint *qcom_wcnss_open_channel(void *wcnss, const char *name, rpmsg_rx_cb_t cb, void *priv) { WARN_ON(1); return ERR_PTR(-ENXIO); } #endif #endif soc/qcom/smem_state.h 0000644 00000002452 14722070374 0010612 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __QCOM_SMEM_STATE__ #define __QCOM_SMEM_STATE__ #include <linux/err.h> struct device_node; struct qcom_smem_state; struct qcom_smem_state_ops { int (*update_bits)(void *, u32, u32); }; #ifdef CONFIG_QCOM_SMEM_STATE struct qcom_smem_state *qcom_smem_state_get(struct device *dev, const char *con_id, unsigned *bit); void qcom_smem_state_put(struct qcom_smem_state *); int qcom_smem_state_update_bits(struct qcom_smem_state *state, u32 mask, u32 value); struct qcom_smem_state *qcom_smem_state_register(struct device_node *of_node, const struct qcom_smem_state_ops *ops, void *data); void qcom_smem_state_unregister(struct qcom_smem_state *state); #else static inline struct qcom_smem_state *qcom_smem_state_get(struct device *dev, const char *con_id, unsigned *bit) { return ERR_PTR(-EINVAL); } static inline void qcom_smem_state_put(struct qcom_smem_state *state) { } static inline int qcom_smem_state_update_bits(struct qcom_smem_state *state, u32 mask, u32 value) { return -EINVAL; } static inline struct qcom_smem_state *qcom_smem_state_register(struct device_node *of_node, const struct qcom_smem_state_ops *ops, void *data) { return ERR_PTR(-EINVAL); } static inline void qcom_smem_state_unregister(struct qcom_smem_state *state) { } #endif #endif soc/qcom/llcc-qcom.h 0000644 00000012651 14722070374 0010325 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. * */ #include <linux/platform_device.h> #ifndef __LLCC_QCOM__ #define __LLCC_QCOM__ #define LLCC_CPUSS 1 #define LLCC_VIDSC0 2 #define LLCC_VIDSC1 3 #define LLCC_ROTATOR 4 #define LLCC_VOICE 5 #define LLCC_AUDIO 6 #define LLCC_MDMHPGRW 7 #define LLCC_MDM 8 #define LLCC_CMPT 10 #define LLCC_GPUHTW 11 #define LLCC_GPU 12 #define LLCC_MMUHWT 13 #define LLCC_CMPTDMA 15 #define LLCC_DISP 16 #define LLCC_VIDFW 17 #define LLCC_MDMHPFX 20 #define LLCC_MDMPNG 21 #define LLCC_AUDHW 22 /** * llcc_slice_desc - Cache slice descriptor * @slice_id: llcc slice id * @slice_size: Size allocated for the llcc slice */ struct llcc_slice_desc { u32 slice_id; size_t slice_size; }; /** * llcc_slice_config - Data associated with the llcc slice * @usecase_id: Unique id for the client's use case * @slice_id: llcc slice id for each client * @max_cap: The maximum capacity of the cache slice provided in KB * @priority: Priority of the client used to select victim line for replacement * @fixed_size: Boolean indicating if the slice has a fixed capacity * @bonus_ways: Bonus ways are additional ways to be used for any slice, * if client ends up using more than reserved cache ways. Bonus * ways are allocated only if they are not reserved for some * other client. * @res_ways: Reserved ways for the cache slice, the reserved ways cannot * be used by any other client than the one its assigned to. * @cache_mode: Each slice operates as a cache, this controls the mode of the * slice: normal or TCM(Tightly Coupled Memory) * @probe_target_ways: Determines what ways to probe for access hit. When * configured to 1 only bonus and reserved ways are probed. * When configured to 0 all ways in llcc are probed. * @dis_cap_alloc: Disable capacity based allocation for a client * @retain_on_pc: If this bit is set and client has maintained active vote * then the ways assigned to this client are not flushed on power * collapse. * @activate_on_init: Activate the slice immediately after it is programmed */ struct llcc_slice_config { u32 usecase_id; u32 slice_id; u32 max_cap; u32 priority; bool fixed_size; u32 bonus_ways; u32 res_ways; u32 cache_mode; u32 probe_target_ways; bool dis_cap_alloc; bool retain_on_pc; bool activate_on_init; }; /** * llcc_drv_data - Data associated with the llcc driver * @regmap: regmap associated with the llcc device * @bcast_regmap: regmap associated with llcc broadcast offset * @cfg: pointer to the data structure for slice configuration * @lock: mutex associated with each slice * @cfg_size: size of the config data table * @max_slices: max slices as read from device tree * @num_banks: Number of llcc banks * @bitmap: Bit map to track the active slice ids * @offsets: Pointer to the bank offsets array * @ecc_irq: interrupt for llcc cache error detection and reporting */ struct llcc_drv_data { struct regmap *regmap; struct regmap *bcast_regmap; const struct llcc_slice_config *cfg; struct mutex lock; u32 cfg_size; u32 max_slices; u32 num_banks; unsigned long *bitmap; u32 *offsets; int ecc_irq; }; /** * llcc_edac_reg_data - llcc edac registers data for each error type * @name: Name of the error * @synd_reg: Syndrome register address * @count_status_reg: Status register address to read the error count * @ways_status_reg: Status register address to read the error ways * @reg_cnt: Number of registers * @count_mask: Mask value to get the error count * @ways_mask: Mask value to get the error ways * @count_shift: Shift value to get the error count * @ways_shift: Shift value to get the error ways */ struct llcc_edac_reg_data { char *name; u64 synd_reg; u64 count_status_reg; u64 ways_status_reg; u32 reg_cnt; u32 count_mask; u32 ways_mask; u8 count_shift; u8 ways_shift; }; #if IS_ENABLED(CONFIG_QCOM_LLCC) /** * llcc_slice_getd - get llcc slice descriptor * @uid: usecase_id of the client */ struct llcc_slice_desc *llcc_slice_getd(u32 uid); /** * llcc_slice_putd - llcc slice descritpor * @desc: Pointer to llcc slice descriptor */ void llcc_slice_putd(struct llcc_slice_desc *desc); /** * llcc_get_slice_id - get slice id * @desc: Pointer to llcc slice descriptor */ int llcc_get_slice_id(struct llcc_slice_desc *desc); /** * llcc_get_slice_size - llcc slice size * @desc: Pointer to llcc slice descriptor */ size_t llcc_get_slice_size(struct llcc_slice_desc *desc); /** * llcc_slice_activate - Activate the llcc slice * @desc: Pointer to llcc slice descriptor */ int llcc_slice_activate(struct llcc_slice_desc *desc); /** * llcc_slice_deactivate - Deactivate the llcc slice * @desc: Pointer to llcc slice descriptor */ int llcc_slice_deactivate(struct llcc_slice_desc *desc); #else static inline struct llcc_slice_desc *llcc_slice_getd(u32 uid) { return NULL; } static inline void llcc_slice_putd(struct llcc_slice_desc *desc) { }; static inline int llcc_get_slice_id(struct llcc_slice_desc *desc) { return -EINVAL; } static inline size_t llcc_get_slice_size(struct llcc_slice_desc *desc) { return 0; } static inline int llcc_slice_activate(struct llcc_slice_desc *desc) { return -EINVAL; } static inline int llcc_slice_deactivate(struct llcc_slice_desc *desc) { return -EINVAL; } #endif #endif soc/qcom/smem.h 0000644 00000000534 14722070374 0007411 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __QCOM_SMEM_H__ #define __QCOM_SMEM_H__ #define QCOM_SMEM_HOST_ANY -1 int qcom_smem_alloc(unsigned host, unsigned item, size_t size); void *qcom_smem_get(unsigned host, unsigned item, size_t *size); int qcom_smem_get_free_space(unsigned host); phys_addr_t qcom_smem_virt_to_phys(void *p); #endif soc/qcom/mdt_loader.h 0000644 00000001446 14722070374 0010565 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __QCOM_MDT_LOADER_H__ #define __QCOM_MDT_LOADER_H__ #include <linux/types.h> #define QCOM_MDT_TYPE_MASK (7 << 24) #define QCOM_MDT_TYPE_HASH (2 << 24) #define QCOM_MDT_RELOCATABLE BIT(27) struct device; struct firmware; ssize_t qcom_mdt_get_size(const struct firmware *fw); int qcom_mdt_load(struct device *dev, const struct firmware *fw, const char *fw_name, int pas_id, void *mem_region, phys_addr_t mem_phys, size_t mem_size, phys_addr_t *reloc_base); int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw, const char *fw_name, int pas_id, void *mem_region, phys_addr_t mem_phys, size_t mem_size, phys_addr_t *reloc_base); void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len); #endif soc/amlogic/meson-canvas.h 0000644 00000003567 14722070374 0011527 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (C) 2018 BayLibre, SAS */ #ifndef __SOC_MESON_CANVAS_H #define __SOC_MESON_CANVAS_H #include <linux/kernel.h> #define MESON_CANVAS_WRAP_NONE 0x00 #define MESON_CANVAS_WRAP_X 0x01 #define MESON_CANVAS_WRAP_Y 0x02 #define MESON_CANVAS_BLKMODE_LINEAR 0x00 #define MESON_CANVAS_BLKMODE_32x32 0x01 #define MESON_CANVAS_BLKMODE_64x64 0x02 #define MESON_CANVAS_ENDIAN_SWAP16 0x1 #define MESON_CANVAS_ENDIAN_SWAP32 0x3 #define MESON_CANVAS_ENDIAN_SWAP64 0x7 #define MESON_CANVAS_ENDIAN_SWAP128 0xf struct device; struct meson_canvas; /** * meson_canvas_get() - get a canvas provider instance * * @dev: consumer device pointer */ struct meson_canvas *meson_canvas_get(struct device *dev); /** * meson_canvas_alloc() - take ownership of a canvas * * @canvas: canvas provider instance retrieved from meson_canvas_get() * @canvas_index: will be filled with the canvas ID */ int meson_canvas_alloc(struct meson_canvas *canvas, u8 *canvas_index); /** * meson_canvas_free() - remove ownership from a canvas * * @canvas: canvas provider instance retrieved from meson_canvas_get() * @canvas_index: canvas ID that was obtained via meson_canvas_alloc() */ int meson_canvas_free(struct meson_canvas *canvas, u8 canvas_index); /** * meson_canvas_config() - configure a canvas * * @canvas: canvas provider instance retrieved from meson_canvas_get() * @canvas_index: canvas ID that was obtained via meson_canvas_alloc() * @addr: physical address to the pixel buffer * @stride: width of the buffer * @height: height of the buffer * @wrap: undocumented * @blkmode: block mode (linear, 32x32, 64x64) * @endian: byte swapping (swap16, swap32, swap64, swap128) */ int meson_canvas_config(struct meson_canvas *canvas, u8 canvas_index, u32 addr, u32 stride, u32 height, unsigned int wrap, unsigned int blkmode, unsigned int endian); #endif soc/cirrus/ep93xx.h 0000644 00000002631 14722070374 0010160 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _SOC_EP93XX_H #define _SOC_EP93XX_H struct platform_device; #define EP93XX_CHIP_REV_D0 3 #define EP93XX_CHIP_REV_D1 4 #define EP93XX_CHIP_REV_E0 5 #define EP93XX_CHIP_REV_E1 6 #define EP93XX_CHIP_REV_E2 7 #ifdef CONFIG_ARCH_EP93XX int ep93xx_pwm_acquire_gpio(struct platform_device *pdev); void ep93xx_pwm_release_gpio(struct platform_device *pdev); int ep93xx_ide_acquire_gpio(struct platform_device *pdev); void ep93xx_ide_release_gpio(struct platform_device *pdev); int ep93xx_keypad_acquire_gpio(struct platform_device *pdev); void ep93xx_keypad_release_gpio(struct platform_device *pdev); int ep93xx_i2s_acquire(void); void ep93xx_i2s_release(void); unsigned int ep93xx_chip_revision(void); #else static inline int ep93xx_pwm_acquire_gpio(struct platform_device *pdev) { return 0; } static inline void ep93xx_pwm_release_gpio(struct platform_device *pdev) {} static inline int ep93xx_ide_acquire_gpio(struct platform_device *pdev) { return 0; } static inline void ep93xx_ide_release_gpio(struct platform_device *pdev) {} static inline int ep93xx_keypad_acquire_gpio(struct platform_device *pdev) { return 0; } static inline void ep93xx_keypad_release_gpio(struct platform_device *pdev) {} static inline int ep93xx_i2s_acquire(void) { return 0; } static inline void ep93xx_i2s_release(void) {} static inline unsigned int ep93xx_chip_revision(void) { return 0; } #endif #endif soc/ti/knav_dma.h 0000644 00000012741 14722070374 0007710 0 ustar 00 /* * Copyright (C) 2014 Texas Instruments Incorporated * Authors: Sandeep Nair <sandeep_n@ti.com * Cyril Chemparathy <cyril@ti.com Santosh Shilimkar <santosh.shilimkar@ti.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifndef __SOC_TI_KEYSTONE_NAVIGATOR_DMA_H__ #define __SOC_TI_KEYSTONE_NAVIGATOR_DMA_H__ #include <linux/dmaengine.h> /* * PKTDMA descriptor manipulation macros for host packet descriptor */ #define MASK(x) (BIT(x) - 1) #define KNAV_DMA_DESC_PKT_LEN_MASK MASK(22) #define KNAV_DMA_DESC_PKT_LEN_SHIFT 0 #define KNAV_DMA_DESC_PS_INFO_IN_SOP BIT(22) #define KNAV_DMA_DESC_PS_INFO_IN_DESC 0 #define KNAV_DMA_DESC_TAG_MASK MASK(8) #define KNAV_DMA_DESC_SAG_HI_SHIFT 24 #define KNAV_DMA_DESC_STAG_LO_SHIFT 16 #define KNAV_DMA_DESC_DTAG_HI_SHIFT 8 #define KNAV_DMA_DESC_DTAG_LO_SHIFT 0 #define KNAV_DMA_DESC_HAS_EPIB BIT(31) #define KNAV_DMA_DESC_NO_EPIB 0 #define KNAV_DMA_DESC_PSLEN_SHIFT 24 #define KNAV_DMA_DESC_PSLEN_MASK MASK(6) #define KNAV_DMA_DESC_ERR_FLAG_SHIFT 20 #define KNAV_DMA_DESC_ERR_FLAG_MASK MASK(4) #define KNAV_DMA_DESC_PSFLAG_SHIFT 16 #define KNAV_DMA_DESC_PSFLAG_MASK MASK(4) #define KNAV_DMA_DESC_RETQ_SHIFT 0 #define KNAV_DMA_DESC_RETQ_MASK MASK(14) #define KNAV_DMA_DESC_BUF_LEN_MASK MASK(22) #define KNAV_DMA_DESC_EFLAGS_MASK MASK(4) #define KNAV_DMA_DESC_EFLAGS_SHIFT 20 #define KNAV_DMA_NUM_EPIB_WORDS 4 #define KNAV_DMA_NUM_PS_WORDS 16 #define KNAV_DMA_NUM_SW_DATA_WORDS 4 #define KNAV_DMA_FDQ_PER_CHAN 4 /* Tx channel scheduling priority */ enum knav_dma_tx_priority { DMA_PRIO_HIGH = 0, DMA_PRIO_MED_H, DMA_PRIO_MED_L, DMA_PRIO_LOW }; /* Rx channel error handling mode during buffer starvation */ enum knav_dma_rx_err_mode { DMA_DROP = 0, DMA_RETRY }; /* Rx flow size threshold configuration */ enum knav_dma_rx_thresholds { DMA_THRESH_NONE = 0, DMA_THRESH_0 = 1, DMA_THRESH_0_1 = 3, DMA_THRESH_0_1_2 = 7 }; /* Descriptor type */ enum knav_dma_desc_type { DMA_DESC_HOST = 0, DMA_DESC_MONOLITHIC = 2 }; /** * struct knav_dma_tx_cfg: Tx channel configuration * @filt_einfo: Filter extended packet info * @filt_pswords: Filter PS words present * @knav_dma_tx_priority: Tx channel scheduling priority */ struct knav_dma_tx_cfg { bool filt_einfo; bool filt_pswords; enum knav_dma_tx_priority priority; }; /** * struct knav_dma_rx_cfg: Rx flow configuration * @einfo_present: Extended packet info present * @psinfo_present: PS words present * @knav_dma_rx_err_mode: Error during buffer starvation * @knav_dma_desc_type: Host or Monolithic desc * @psinfo_at_sop: PS word located at start of packet * @sop_offset: Start of packet offset * @dst_q: Destination queue for a given flow * @thresh: Rx flow size threshold * @fdq[]: Free desc Queue array * @sz_thresh0: RX packet size threshold 0 * @sz_thresh1: RX packet size threshold 1 * @sz_thresh2: RX packet size threshold 2 */ struct knav_dma_rx_cfg { bool einfo_present; bool psinfo_present; enum knav_dma_rx_err_mode err_mode; enum knav_dma_desc_type desc_type; bool psinfo_at_sop; unsigned int sop_offset; unsigned int dst_q; enum knav_dma_rx_thresholds thresh; unsigned int fdq[KNAV_DMA_FDQ_PER_CHAN]; unsigned int sz_thresh0; unsigned int sz_thresh1; unsigned int sz_thresh2; }; /** * struct knav_dma_cfg: Pktdma channel configuration * @sl_cfg: Slave configuration * @tx: Tx channel configuration * @rx: Rx flow configuration */ struct knav_dma_cfg { enum dma_transfer_direction direction; union { struct knav_dma_tx_cfg tx; struct knav_dma_rx_cfg rx; } u; }; /** * struct knav_dma_desc: Host packet descriptor layout * @desc_info: Descriptor information like id, type, length * @tag_info: Flow tag info written in during RX * @packet_info: Queue Manager, policy, flags etc * @buff_len: Buffer length in bytes * @buff: Buffer pointer * @next_desc: For chaining the descriptors * @orig_len: length since 'buff_len' can be overwritten * @orig_buff: buff pointer since 'buff' can be overwritten * @epib: Extended packet info block * @psdata: Protocol specific * @sw_data: Software private data not touched by h/w */ struct knav_dma_desc { __le32 desc_info; __le32 tag_info; __le32 packet_info; __le32 buff_len; __le32 buff; __le32 next_desc; __le32 orig_len; __le32 orig_buff; __le32 epib[KNAV_DMA_NUM_EPIB_WORDS]; __le32 psdata[KNAV_DMA_NUM_PS_WORDS]; u32 sw_data[KNAV_DMA_NUM_SW_DATA_WORDS]; } ____cacheline_aligned; #if IS_ENABLED(CONFIG_KEYSTONE_NAVIGATOR_DMA) void *knav_dma_open_channel(struct device *dev, const char *name, struct knav_dma_cfg *config); void knav_dma_close_channel(void *channel); int knav_dma_get_flow(void *channel); bool knav_dma_device_ready(void); #else static inline void *knav_dma_open_channel(struct device *dev, const char *name, struct knav_dma_cfg *config) { return (void *) NULL; } static inline void knav_dma_close_channel(void *channel) {} static inline int knav_dma_get_flow(void *channel) { return -EINVAL; } static inline bool knav_dma_device_ready(void) { return false; } #endif #endif /* __SOC_TI_KEYSTONE_NAVIGATOR_DMA_H__ */ soc/ti/ti_sci_protocol.h 0000644 00000056543 14722070374 0011333 0 ustar 00 // SPDX-License-Identifier: GPL-2.0 /* * Texas Instruments System Control Interface Protocol * * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/ * Nishanth Menon */ #ifndef __TISCI_PROTOCOL_H #define __TISCI_PROTOCOL_H /** * struct ti_sci_version_info - version information structure * @abi_major: Major ABI version. Change here implies risk of backward * compatibility break. * @abi_minor: Minor ABI version. Change here implies new feature addition, * or compatible change in ABI. * @firmware_revision: Firmware revision (not usually used). * @firmware_description: Firmware description (not usually used). */ struct ti_sci_version_info { u8 abi_major; u8 abi_minor; u16 firmware_revision; char firmware_description[32]; }; struct ti_sci_handle; /** * struct ti_sci_core_ops - SoC Core Operations * @reboot_device: Reboot the SoC * Returns 0 for successful request(ideally should never return), * else returns corresponding error value. */ struct ti_sci_core_ops { int (*reboot_device)(const struct ti_sci_handle *handle); }; /** * struct ti_sci_dev_ops - Device control operations * @get_device: Command to request for device managed by TISCI * Returns 0 for successful exclusive request, else returns * corresponding error message. * @idle_device: Command to idle a device managed by TISCI * Returns 0 for successful exclusive request, else returns * corresponding error message. * @put_device: Command to release a device managed by TISCI * Returns 0 for successful release, else returns corresponding * error message. * @is_valid: Check if the device ID is a valid ID. * Returns 0 if the ID is valid, else returns corresponding error. * @get_context_loss_count: Command to retrieve context loss counter - this * increments every time the device looses context. Overflow * is possible. * - count: pointer to u32 which will retrieve counter * Returns 0 for successful information request and count has * proper data, else returns corresponding error message. * @is_idle: Reports back about device idle state * - req_state: Returns requested idle state * Returns 0 for successful information request and req_state and * current_state has proper data, else returns corresponding error * message. * @is_stop: Reports back about device stop state * - req_state: Returns requested stop state * - current_state: Returns current stop state * Returns 0 for successful information request and req_state and * current_state has proper data, else returns corresponding error * message. * @is_on: Reports back about device ON(or active) state * - req_state: Returns requested ON state * - current_state: Returns current ON state * Returns 0 for successful information request and req_state and * current_state has proper data, else returns corresponding error * message. * @is_transitioning: Reports back if the device is in the middle of transition * of state. * -current_state: Returns 'true' if currently transitioning. * @set_device_resets: Command to configure resets for device managed by TISCI. * -reset_state: Device specific reset bit field * Returns 0 for successful request, else returns * corresponding error message. * @get_device_resets: Command to read state of resets for device managed * by TISCI. * -reset_state: pointer to u32 which will retrieve resets * Returns 0 for successful request, else returns * corresponding error message. * * NOTE: for all these functions, the following parameters are generic in * nature: * -handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle * -id: Device Identifier * * Request for the device - NOTE: the client MUST maintain integrity of * usage count by balancing get_device with put_device. No refcounting is * managed by driver for that purpose. */ struct ti_sci_dev_ops { int (*get_device)(const struct ti_sci_handle *handle, u32 id); int (*get_device_exclusive)(const struct ti_sci_handle *handle, u32 id); int (*idle_device)(const struct ti_sci_handle *handle, u32 id); int (*idle_device_exclusive)(const struct ti_sci_handle *handle, u32 id); int (*put_device)(const struct ti_sci_handle *handle, u32 id); int (*is_valid)(const struct ti_sci_handle *handle, u32 id); int (*get_context_loss_count)(const struct ti_sci_handle *handle, u32 id, u32 *count); int (*is_idle)(const struct ti_sci_handle *handle, u32 id, bool *requested_state); int (*is_stop)(const struct ti_sci_handle *handle, u32 id, bool *req_state, bool *current_state); int (*is_on)(const struct ti_sci_handle *handle, u32 id, bool *req_state, bool *current_state); int (*is_transitioning)(const struct ti_sci_handle *handle, u32 id, bool *current_state); int (*set_device_resets)(const struct ti_sci_handle *handle, u32 id, u32 reset_state); int (*get_device_resets)(const struct ti_sci_handle *handle, u32 id, u32 *reset_state); }; /** * struct ti_sci_clk_ops - Clock control operations * @get_clock: Request for activation of clock and manage by processor * - needs_ssc: 'true' if Spread Spectrum clock is desired. * - can_change_freq: 'true' if frequency change is desired. * - enable_input_term: 'true' if input termination is desired. * @idle_clock: Request for Idling a clock managed by processor * @put_clock: Release the clock to be auto managed by TISCI * @is_auto: Is the clock being auto managed * - req_state: state indicating if the clock is auto managed * @is_on: Is the clock ON * - req_state: if the clock is requested to be forced ON * - current_state: if the clock is currently ON * @is_off: Is the clock OFF * - req_state: if the clock is requested to be forced OFF * - current_state: if the clock is currently Gated * @set_parent: Set the clock source of a specific device clock * - parent_id: Parent clock identifier to set. * @get_parent: Get the current clock source of a specific device clock * - parent_id: Parent clock identifier which is the parent. * @get_num_parents: Get the number of parents of the current clock source * - num_parents: returns the number of parent clocks. * @get_best_match_freq: Find a best matching frequency for a frequency * range. * - match_freq: Best matching frequency in Hz. * @set_freq: Set the Clock frequency * @get_freq: Get the Clock frequency * - current_freq: Frequency in Hz that the clock is at. * * NOTE: for all these functions, the following parameters are generic in * nature: * -handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle * -did: Device identifier this request is for * -cid: Clock identifier for the device for this request. * Each device has it's own set of clock inputs. This indexes * which clock input to modify. * -min_freq: The minimum allowable frequency in Hz. This is the minimum * allowable programmed frequency and does not account for clock * tolerances and jitter. * -target_freq: The target clock frequency in Hz. A frequency will be * processed as close to this target frequency as possible. * -max_freq: The maximum allowable frequency in Hz. This is the maximum * allowable programmed frequency and does not account for clock * tolerances and jitter. * * Request for the clock - NOTE: the client MUST maintain integrity of * usage count by balancing get_clock with put_clock. No refcounting is * managed by driver for that purpose. */ struct ti_sci_clk_ops { int (*get_clock)(const struct ti_sci_handle *handle, u32 did, u32 cid, bool needs_ssc, bool can_change_freq, bool enable_input_term); int (*idle_clock)(const struct ti_sci_handle *handle, u32 did, u32 cid); int (*put_clock)(const struct ti_sci_handle *handle, u32 did, u32 cid); int (*is_auto)(const struct ti_sci_handle *handle, u32 did, u32 cid, bool *req_state); int (*is_on)(const struct ti_sci_handle *handle, u32 did, u32 cid, bool *req_state, bool *current_state); int (*is_off)(const struct ti_sci_handle *handle, u32 did, u32 cid, bool *req_state, bool *current_state); int (*set_parent)(const struct ti_sci_handle *handle, u32 did, u32 cid, u32 parent_id); int (*get_parent)(const struct ti_sci_handle *handle, u32 did, u32 cid, u32 *parent_id); int (*get_num_parents)(const struct ti_sci_handle *handle, u32 did, u32 cid, u32 *num_parents); int (*get_best_match_freq)(const struct ti_sci_handle *handle, u32 did, u32 cid, u64 min_freq, u64 target_freq, u64 max_freq, u64 *match_freq); int (*set_freq)(const struct ti_sci_handle *handle, u32 did, u32 cid, u64 min_freq, u64 target_freq, u64 max_freq); int (*get_freq)(const struct ti_sci_handle *handle, u32 did, u32 cid, u64 *current_freq); }; /** * struct ti_sci_rm_core_ops - Resource management core operations * @get_range: Get a range of resources belonging to ti sci host. * @get_rage_from_shost: Get a range of resources belonging to * specified host id. * - s_host: Host processing entity to which the * resources are allocated * * NOTE: for these functions, all the parameters are consolidated and defined * as below: * - handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle * - dev_id: TISCI device ID. * - subtype: Resource assignment subtype that is being requested * from the given device. * - range_start: Start index of the resource range * - range_end: Number of resources in the range */ struct ti_sci_rm_core_ops { int (*get_range)(const struct ti_sci_handle *handle, u32 dev_id, u8 subtype, u16 *range_start, u16 *range_num); int (*get_range_from_shost)(const struct ti_sci_handle *handle, u32 dev_id, u8 subtype, u8 s_host, u16 *range_start, u16 *range_num); }; /** * struct ti_sci_rm_irq_ops: IRQ management operations * @set_irq: Set an IRQ route between the requested source * and destination * @set_event_map: Set an Event based peripheral irq to Interrupt * Aggregator. * @free_irq: Free an an IRQ route between the requested source * destination. * @free_event_map: Free an event based peripheral irq to Interrupt * Aggregator. */ struct ti_sci_rm_irq_ops { int (*set_irq)(const struct ti_sci_handle *handle, u16 src_id, u16 src_index, u16 dst_id, u16 dst_host_irq); int (*set_event_map)(const struct ti_sci_handle *handle, u16 src_id, u16 src_index, u16 ia_id, u16 vint, u16 global_event, u8 vint_status_bit); int (*free_irq)(const struct ti_sci_handle *handle, u16 src_id, u16 src_index, u16 dst_id, u16 dst_host_irq); int (*free_event_map)(const struct ti_sci_handle *handle, u16 src_id, u16 src_index, u16 ia_id, u16 vint, u16 global_event, u8 vint_status_bit); }; /* RA config.addr_lo parameter is valid for RM ring configure TI_SCI message */ #define TI_SCI_MSG_VALUE_RM_RING_ADDR_LO_VALID BIT(0) /* RA config.addr_hi parameter is valid for RM ring configure TI_SCI message */ #define TI_SCI_MSG_VALUE_RM_RING_ADDR_HI_VALID BIT(1) /* RA config.count parameter is valid for RM ring configure TI_SCI message */ #define TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID BIT(2) /* RA config.mode parameter is valid for RM ring configure TI_SCI message */ #define TI_SCI_MSG_VALUE_RM_RING_MODE_VALID BIT(3) /* RA config.size parameter is valid for RM ring configure TI_SCI message */ #define TI_SCI_MSG_VALUE_RM_RING_SIZE_VALID BIT(4) /* RA config.order_id parameter is valid for RM ring configure TISCI message */ #define TI_SCI_MSG_VALUE_RM_RING_ORDER_ID_VALID BIT(5) #define TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER \ (TI_SCI_MSG_VALUE_RM_RING_ADDR_LO_VALID | \ TI_SCI_MSG_VALUE_RM_RING_ADDR_HI_VALID | \ TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID | \ TI_SCI_MSG_VALUE_RM_RING_MODE_VALID | \ TI_SCI_MSG_VALUE_RM_RING_SIZE_VALID) /** * struct ti_sci_rm_ringacc_ops - Ring Accelerator Management operations * @config: configure the SoC Navigator Subsystem Ring Accelerator ring * @get_config: get the SoC Navigator Subsystem Ring Accelerator ring * configuration */ struct ti_sci_rm_ringacc_ops { int (*config)(const struct ti_sci_handle *handle, u32 valid_params, u16 nav_id, u16 index, u32 addr_lo, u32 addr_hi, u32 count, u8 mode, u8 size, u8 order_id ); int (*get_config)(const struct ti_sci_handle *handle, u32 nav_id, u32 index, u8 *mode, u32 *addr_lo, u32 *addr_hi, u32 *count, u8 *size, u8 *order_id); }; /** * struct ti_sci_rm_psil_ops - PSI-L thread operations * @pair: pair PSI-L source thread to a destination thread. * If the src_thread is mapped to UDMA tchan, the corresponding channel's * TCHAN_THRD_ID register is updated. * If the dst_thread is mapped to UDMA rchan, the corresponding channel's * RCHAN_THRD_ID register is updated. * @unpair: unpair PSI-L source thread from a destination thread. * If the src_thread is mapped to UDMA tchan, the corresponding channel's * TCHAN_THRD_ID register is cleared. * If the dst_thread is mapped to UDMA rchan, the corresponding channel's * RCHAN_THRD_ID register is cleared. */ struct ti_sci_rm_psil_ops { int (*pair)(const struct ti_sci_handle *handle, u32 nav_id, u32 src_thread, u32 dst_thread); int (*unpair)(const struct ti_sci_handle *handle, u32 nav_id, u32 src_thread, u32 dst_thread); }; /* UDMAP channel types */ #define TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR 2 #define TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR_SB 3 /* RX only */ #define TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR 10 #define TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBVR 11 #define TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR 12 #define TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBVR 13 #define TI_SCI_RM_UDMAP_RX_FLOW_DESC_HOST 0 #define TI_SCI_RM_UDMAP_RX_FLOW_DESC_MONO 2 #define TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES 1 #define TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES 2 #define TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES 3 /* UDMAP TX/RX channel valid_params common declarations */ #define TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID BIT(0) #define TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID BIT(1) #define TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID BIT(2) #define TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID BIT(3) #define TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID BIT(4) #define TI_SCI_MSG_VALUE_RM_UDMAP_CH_PRIORITY_VALID BIT(5) #define TI_SCI_MSG_VALUE_RM_UDMAP_CH_QOS_VALID BIT(6) #define TI_SCI_MSG_VALUE_RM_UDMAP_CH_ORDER_ID_VALID BIT(7) #define TI_SCI_MSG_VALUE_RM_UDMAP_CH_SCHED_PRIORITY_VALID BIT(8) #define TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID BIT(14) /** * Configures a Navigator Subsystem UDMAP transmit channel * * Configures a Navigator Subsystem UDMAP transmit channel registers. * See @ti_sci_msg_rm_udmap_tx_ch_cfg_req */ struct ti_sci_msg_rm_udmap_tx_ch_cfg { u32 valid_params; #define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID BIT(9) #define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID BIT(10) #define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID BIT(11) #define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_CREDIT_COUNT_VALID BIT(12) #define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FDEPTH_VALID BIT(13) u16 nav_id; u16 index; u8 tx_pause_on_err; u8 tx_filt_einfo; u8 tx_filt_pswords; u8 tx_atype; u8 tx_chan_type; u8 tx_supr_tdpkt; u16 tx_fetch_size; u8 tx_credit_count; u16 txcq_qnum; u8 tx_priority; u8 tx_qos; u8 tx_orderid; u16 fdepth; u8 tx_sched_priority; u8 tx_burst_size; }; /** * Configures a Navigator Subsystem UDMAP receive channel * * Configures a Navigator Subsystem UDMAP receive channel registers. * See @ti_sci_msg_rm_udmap_rx_ch_cfg_req */ struct ti_sci_msg_rm_udmap_rx_ch_cfg { u32 valid_params; #define TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID BIT(9) #define TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID BIT(10) #define TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID BIT(11) #define TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID BIT(12) u16 nav_id; u16 index; u16 rx_fetch_size; u16 rxcq_qnum; u8 rx_priority; u8 rx_qos; u8 rx_orderid; u8 rx_sched_priority; u16 flowid_start; u16 flowid_cnt; u8 rx_pause_on_err; u8 rx_atype; u8 rx_chan_type; u8 rx_ignore_short; u8 rx_ignore_long; u8 rx_burst_size; }; /** * Configures a Navigator Subsystem UDMAP receive flow * * Configures a Navigator Subsystem UDMAP receive flow's registers. * See @tis_ci_msg_rm_udmap_flow_cfg_req */ struct ti_sci_msg_rm_udmap_flow_cfg { u32 valid_params; #define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID BIT(0) #define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID BIT(1) #define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID BIT(2) #define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID BIT(3) #define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SOP_OFFSET_VALID BIT(4) #define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID BIT(5) #define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_VALID BIT(6) #define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_VALID BIT(7) #define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_VALID BIT(8) #define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_VALID BIT(9) #define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID BIT(10) #define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID BIT(11) #define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID BIT(12) #define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID BIT(13) #define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID BIT(14) #define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID BIT(15) #define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID BIT(16) #define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID BIT(17) #define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PS_LOCATION_VALID BIT(18) u16 nav_id; u16 flow_index; u8 rx_einfo_present; u8 rx_psinfo_present; u8 rx_error_handling; u8 rx_desc_type; u16 rx_sop_offset; u16 rx_dest_qnum; u8 rx_src_tag_hi; u8 rx_src_tag_lo; u8 rx_dest_tag_hi; u8 rx_dest_tag_lo; u8 rx_src_tag_hi_sel; u8 rx_src_tag_lo_sel; u8 rx_dest_tag_hi_sel; u8 rx_dest_tag_lo_sel; u16 rx_fdq0_sz0_qnum; u16 rx_fdq1_qnum; u16 rx_fdq2_qnum; u16 rx_fdq3_qnum; u8 rx_ps_location; }; /** * struct ti_sci_rm_udmap_ops - UDMA Management operations * @tx_ch_cfg: configure SoC Navigator Subsystem UDMA transmit channel. * @rx_ch_cfg: configure SoC Navigator Subsystem UDMA receive channel. * @rx_flow_cfg1: configure SoC Navigator Subsystem UDMA receive flow. */ struct ti_sci_rm_udmap_ops { int (*tx_ch_cfg)(const struct ti_sci_handle *handle, const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params); int (*rx_ch_cfg)(const struct ti_sci_handle *handle, const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params); int (*rx_flow_cfg)(const struct ti_sci_handle *handle, const struct ti_sci_msg_rm_udmap_flow_cfg *params); }; /** * struct ti_sci_proc_ops - Processor Control operations * @request: Request to control a physical processor. The requesting host * should be in the processor access list * @release: Relinquish a physical processor control * @handover: Handover a physical processor control to another host * in the permitted list * @set_config: Set base configuration of a processor * @set_control: Setup limited control flags in specific cases * @get_status: Get the state of physical processor * * NOTE: The following paramteres are generic in nature for all these ops, * -handle: Pointer to TI SCI handle as retrieved by *ti_sci_get_handle * -pid: Processor ID * -hid: Host ID */ struct ti_sci_proc_ops { int (*request)(const struct ti_sci_handle *handle, u8 pid); int (*release)(const struct ti_sci_handle *handle, u8 pid); int (*handover)(const struct ti_sci_handle *handle, u8 pid, u8 hid); int (*set_config)(const struct ti_sci_handle *handle, u8 pid, u64 boot_vector, u32 cfg_set, u32 cfg_clr); int (*set_control)(const struct ti_sci_handle *handle, u8 pid, u32 ctrl_set, u32 ctrl_clr); int (*get_status)(const struct ti_sci_handle *handle, u8 pid, u64 *boot_vector, u32 *cfg_flags, u32 *ctrl_flags, u32 *status_flags); }; /** * struct ti_sci_ops - Function support for TI SCI * @dev_ops: Device specific operations * @clk_ops: Clock specific operations * @rm_core_ops: Resource management core operations. * @rm_irq_ops: IRQ management specific operations * @proc_ops: Processor Control specific operations */ struct ti_sci_ops { struct ti_sci_core_ops core_ops; struct ti_sci_dev_ops dev_ops; struct ti_sci_clk_ops clk_ops; struct ti_sci_rm_core_ops rm_core_ops; struct ti_sci_rm_irq_ops rm_irq_ops; struct ti_sci_rm_ringacc_ops rm_ring_ops; struct ti_sci_rm_psil_ops rm_psil_ops; struct ti_sci_rm_udmap_ops rm_udmap_ops; struct ti_sci_proc_ops proc_ops; }; /** * struct ti_sci_handle - Handle returned to TI SCI clients for usage. * @version: structure containing version information * @ops: operations that are made available to TI SCI clients */ struct ti_sci_handle { struct ti_sci_version_info version; struct ti_sci_ops ops; }; #define TI_SCI_RESOURCE_NULL 0xffff /** * struct ti_sci_resource_desc - Description of TI SCI resource instance range. * @start: Start index of the resource. * @num: Number of resources. * @res_map: Bitmap to manage the allocation of these resources. */ struct ti_sci_resource_desc { u16 start; u16 num; unsigned long *res_map; }; /** * struct ti_sci_resource - Structure representing a resource assigned * to a device. * @sets: Number of sets available from this resource type * @lock: Lock to guard the res map in each set. * @desc: Array of resource descriptors. */ struct ti_sci_resource { u16 sets; raw_spinlock_t lock; struct ti_sci_resource_desc *desc; }; #if IS_ENABLED(CONFIG_TI_SCI_PROTOCOL) const struct ti_sci_handle *ti_sci_get_handle(struct device *dev); int ti_sci_put_handle(const struct ti_sci_handle *handle); const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev); const struct ti_sci_handle *ti_sci_get_by_phandle(struct device_node *np, const char *property); const struct ti_sci_handle *devm_ti_sci_get_by_phandle(struct device *dev, const char *property); u16 ti_sci_get_free_resource(struct ti_sci_resource *res); void ti_sci_release_resource(struct ti_sci_resource *res, u16 id); u32 ti_sci_get_num_resources(struct ti_sci_resource *res); struct ti_sci_resource * devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle, struct device *dev, u32 dev_id, char *of_prop); #else /* CONFIG_TI_SCI_PROTOCOL */ static inline const struct ti_sci_handle *ti_sci_get_handle(struct device *dev) { return ERR_PTR(-EINVAL); } static inline int ti_sci_put_handle(const struct ti_sci_handle *handle) { return -EINVAL; } static inline const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev) { return ERR_PTR(-EINVAL); } static inline const struct ti_sci_handle *ti_sci_get_by_phandle(struct device_node *np, const char *property) { return ERR_PTR(-EINVAL); } static inline const struct ti_sci_handle *devm_ti_sci_get_by_phandle(struct device *dev, const char *property) { return ERR_PTR(-EINVAL); } static inline u16 ti_sci_get_free_resource(struct ti_sci_resource *res) { return TI_SCI_RESOURCE_NULL; } static inline void ti_sci_release_resource(struct ti_sci_resource *res, u16 id) { } static inline u32 ti_sci_get_num_resources(struct ti_sci_resource *res) { return 0; } static inline struct ti_sci_resource * devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle, struct device *dev, u32 dev_id, char *of_prop) { return ERR_PTR(-EINVAL); } #endif /* CONFIG_TI_SCI_PROTOCOL */ #endif /* __TISCI_PROTOCOL_H */ soc/ti/knav_qmss.h 0000644 00000006051 14722070374 0010127 0 ustar 00 /* * Keystone Navigator Queue Management Sub-System header * * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com * Author: Sandeep Nair <sandeep_n@ti.com> * Cyril Chemparathy <cyril@ti.com> * Santosh Shilimkar <santosh.shilimkar@ti.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifndef __SOC_TI_KNAV_QMSS_H__ #define __SOC_TI_KNAV_QMSS_H__ #include <linux/err.h> #include <linux/time.h> #include <linux/atomic.h> #include <linux/device.h> #include <linux/fcntl.h> #include <linux/dma-mapping.h> /* queue types */ #define KNAV_QUEUE_QPEND ((unsigned)-2) /* interruptible qpend queue */ #define KNAV_QUEUE_ACC ((unsigned)-3) /* Accumulated queue */ #define KNAV_QUEUE_GP ((unsigned)-4) /* General purpose queue */ /* queue flags */ #define KNAV_QUEUE_SHARED 0x0001 /* Queue can be shared */ /** * enum knav_queue_ctrl_cmd - queue operations. * @KNAV_QUEUE_GET_ID: Get the ID number for an open queue * @KNAV_QUEUE_FLUSH: forcibly empty a queue if possible * @KNAV_QUEUE_SET_NOTIFIER: Set a notifier callback to a queue handle. * @KNAV_QUEUE_ENABLE_NOTIFY: Enable notifier callback for a queue handle. * @KNAV_QUEUE_DISABLE_NOTIFY: Disable notifier callback for a queue handle. * @KNAV_QUEUE_GET_COUNT: Get number of queues. */ enum knav_queue_ctrl_cmd { KNAV_QUEUE_GET_ID, KNAV_QUEUE_FLUSH, KNAV_QUEUE_SET_NOTIFIER, KNAV_QUEUE_ENABLE_NOTIFY, KNAV_QUEUE_DISABLE_NOTIFY, KNAV_QUEUE_GET_COUNT }; /* Queue notifier callback prototype */ typedef void (*knav_queue_notify_fn)(void *arg); /** * struct knav_queue_notify_config: Notifier configuration * @fn: Notifier function * @fn_arg: Notifier function arguments */ struct knav_queue_notify_config { knav_queue_notify_fn fn; void *fn_arg; }; void *knav_queue_open(const char *name, unsigned id, unsigned flags); void knav_queue_close(void *qhandle); int knav_queue_device_control(void *qhandle, enum knav_queue_ctrl_cmd cmd, unsigned long arg); dma_addr_t knav_queue_pop(void *qhandle, unsigned *size); int knav_queue_push(void *qhandle, dma_addr_t dma, unsigned size, unsigned flags); void *knav_pool_create(const char *name, int num_desc, int region_id); void knav_pool_destroy(void *ph); int knav_pool_count(void *ph); void *knav_pool_desc_get(void *ph); void knav_pool_desc_put(void *ph, void *desc); int knav_pool_desc_map(void *ph, void *desc, unsigned size, dma_addr_t *dma, unsigned *dma_sz); void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz); dma_addr_t knav_pool_desc_virt_to_dma(void *ph, void *virt); void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma); bool knav_qmss_device_ready(void); #endif /* __SOC_TI_KNAV_QMSS_H__ */ soc/ti/ti-msgmgr.h 0000644 00000002073 14722070374 0010033 0 ustar 00 /* * Texas Instruments' Message Manager * * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/ * Nishanth Menon * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifndef TI_MSGMGR_H #define TI_MSGMGR_H /** * struct ti_msgmgr_message - Message Manager structure * @len: Length of data in the Buffer * @buf: Buffer pointer * * This is the structure for data used in mbox_send_message * the length of data buffer used depends on the SoC integration * parameters - each message may be 64, 128 bytes long depending * on SoC. Client is supposed to be aware of this. */ struct ti_msgmgr_message { size_t len; u8 *buf; }; #endif /* TI_MSGMGR_H */ soc/ti/ti_sci_inta_msi.h 0000644 00000001437 14722070374 0011265 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Texas Instruments' K3 TI SCI INTA MSI helper * * Copyright (C) 2018-2019 Texas Instruments Incorporated - https://www.ti.com/ * Lokesh Vutla <lokeshvutla@ti.com> */ #ifndef __INCLUDE_LINUX_TI_SCI_INTA_MSI_H #define __INCLUDE_LINUX_TI_SCI_INTA_MSI_H #include <linux/msi.h> #include <linux/soc/ti/ti_sci_protocol.h> struct irq_domain *ti_sci_inta_msi_create_irq_domain(struct fwnode_handle *fwnode, struct msi_domain_info *info, struct irq_domain *parent); int ti_sci_inta_msi_domain_alloc_irqs(struct device *dev, struct ti_sci_resource *res); unsigned int ti_sci_inta_msi_get_virq(struct device *dev, u32 index); void ti_sci_inta_msi_domain_free_irqs(struct device *dev); #endif /* __INCLUDE_LINUX_IRQCHIP_TI_SCI_INTA_H */ soc/actions/owl-sps.h 0000644 00000000353 14722070374 0010554 0 ustar 00 /* * Copyright (c) 2017 Andreas Färber * * SPDX-License-Identifier: GPL-2.0+ */ #ifndef SOC_ACTIONS_OWL_SPS_H #define SOC_ACTIONS_OWL_SPS_H int owl_sps_set_pg(void __iomem *base, u32 pwr_mask, u32 ack_mask, bool enable); #endif soc/renesas/rcar-rst.h 0000644 00000000476 14722070374 0010713 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_SOC_RENESAS_RCAR_RST_H__ #define __LINUX_SOC_RENESAS_RCAR_RST_H__ #ifdef CONFIG_RST_RCAR int rcar_rst_read_mode_pins(u32 *mode); #else static inline int rcar_rst_read_mode_pins(u32 *mode) { return -ENODEV; } #endif #endif /* __LINUX_SOC_RENESAS_RCAR_RST_H__ */ soc/renesas/rcar-sysc.h 0000644 00000000412 14722070374 0011052 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_SOC_RENESAS_RCAR_SYSC_H__ #define __LINUX_SOC_RENESAS_RCAR_SYSC_H__ int rcar_sysc_power_down_cpu(unsigned int cpu); int rcar_sysc_power_up_cpu(unsigned int cpu); #endif /* __LINUX_SOC_RENESAS_RCAR_SYSC_H__ */ soc/mediatek/infracfg.h 0000644 00000003070 14722070374 0011051 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __SOC_MEDIATEK_INFRACFG_H #define __SOC_MEDIATEK_INFRACFG_H #define MT8173_TOP_AXI_PROT_EN_MCI_M2 BIT(0) #define MT8173_TOP_AXI_PROT_EN_MM_M0 BIT(1) #define MT8173_TOP_AXI_PROT_EN_MM_M1 BIT(2) #define MT8173_TOP_AXI_PROT_EN_MMAPB_S BIT(6) #define MT8173_TOP_AXI_PROT_EN_L2C_M2 BIT(9) #define MT8173_TOP_AXI_PROT_EN_L2SS_SMI BIT(11) #define MT8173_TOP_AXI_PROT_EN_L2SS_ADD BIT(12) #define MT8173_TOP_AXI_PROT_EN_CCI_M2 BIT(13) #define MT8173_TOP_AXI_PROT_EN_MFG_S BIT(14) #define MT8173_TOP_AXI_PROT_EN_PERI_M0 BIT(15) #define MT8173_TOP_AXI_PROT_EN_PERI_M1 BIT(16) #define MT8173_TOP_AXI_PROT_EN_DEBUGSYS BIT(17) #define MT8173_TOP_AXI_PROT_EN_CQ_DMA BIT(18) #define MT8173_TOP_AXI_PROT_EN_GCPU BIT(19) #define MT8173_TOP_AXI_PROT_EN_IOMMU BIT(20) #define MT8173_TOP_AXI_PROT_EN_MFG_M0 BIT(21) #define MT8173_TOP_AXI_PROT_EN_MFG_M1 BIT(22) #define MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT BIT(23) #define MT2701_TOP_AXI_PROT_EN_MM_M0 BIT(1) #define MT2701_TOP_AXI_PROT_EN_CONN_M BIT(2) #define MT2701_TOP_AXI_PROT_EN_CONN_S BIT(8) #define MT7622_TOP_AXI_PROT_EN_ETHSYS (BIT(3) | BIT(17)) #define MT7622_TOP_AXI_PROT_EN_HIF0 (BIT(24) | BIT(25)) #define MT7622_TOP_AXI_PROT_EN_HIF1 (BIT(26) | BIT(27) | \ BIT(28)) #define MT7622_TOP_AXI_PROT_EN_WB (BIT(2) | BIT(6) | \ BIT(7) | BIT(8)) int mtk_infracfg_set_bus_protection(struct regmap *infracfg, u32 mask, bool reg_update); int mtk_infracfg_clear_bus_protection(struct regmap *infracfg, u32 mask, bool reg_update); #endif /* __SOC_MEDIATEK_INFRACFG_H */ soc/mediatek/mtk-cmdq.h 0000644 00000007270 14722070374 0011015 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2018 MediaTek Inc. * */ #ifndef __MTK_CMDQ_H__ #define __MTK_CMDQ_H__ #include <linux/mailbox_client.h> #include <linux/mailbox/mtk-cmdq-mailbox.h> #include <linux/timer.h> #define CMDQ_NO_TIMEOUT 0xffffffffu struct cmdq_pkt; struct cmdq_client { spinlock_t lock; u32 pkt_cnt; struct mbox_client client; struct mbox_chan *chan; struct timer_list timer; u32 timeout_ms; /* in unit of microsecond */ }; /** * cmdq_mbox_create() - create CMDQ mailbox client and channel * @dev: device of CMDQ mailbox client * @index: index of CMDQ mailbox channel * @timeout: timeout of a pkt execution by GCE, in unit of microsecond, set * CMDQ_NO_TIMEOUT if a timer is not used. * * Return: CMDQ mailbox client pointer */ struct cmdq_client *cmdq_mbox_create(struct device *dev, int index, u32 timeout); /** * cmdq_mbox_destroy() - destroy CMDQ mailbox client and channel * @client: the CMDQ mailbox client */ void cmdq_mbox_destroy(struct cmdq_client *client); /** * cmdq_pkt_create() - create a CMDQ packet * @client: the CMDQ mailbox client * @size: required CMDQ buffer size * * Return: CMDQ packet pointer */ struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size); /** * cmdq_pkt_destroy() - destroy the CMDQ packet * @pkt: the CMDQ packet */ void cmdq_pkt_destroy(struct cmdq_pkt *pkt); /** * cmdq_pkt_write() - append write command to the CMDQ packet * @pkt: the CMDQ packet * @subsys: the CMDQ sub system code * @offset: register offset from CMDQ sub system * @value: the specified target register value * * Return: 0 for success; else the error code is returned */ int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value); /** * cmdq_pkt_write_mask() - append write command with mask to the CMDQ packet * @pkt: the CMDQ packet * @subsys: the CMDQ sub system code * @offset: register offset from CMDQ sub system * @value: the specified target register value * @mask: the specified target register mask * * Return: 0 for success; else the error code is returned */ int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value, u32 mask); /** * cmdq_pkt_wfe() - append wait for event command to the CMDQ packet * @pkt: the CMDQ packet * @event: the desired event type to "wait and CLEAR" * * Return: 0 for success; else the error code is returned */ int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event); /** * cmdq_pkt_clear_event() - append clear event command to the CMDQ packet * @pkt: the CMDQ packet * @event: the desired event to be cleared * * Return: 0 for success; else the error code is returned */ int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event); /** * cmdq_pkt_flush_async() - trigger CMDQ to asynchronously execute the CMDQ * packet and call back at the end of done packet * @pkt: the CMDQ packet * @cb: called at the end of done packet * @data: this data will pass back to cb * * Return: 0 for success; else the error code is returned * * Trigger CMDQ to asynchronously execute the CMDQ packet and call back * at the end of done packet. Note that this is an ASYNC function. When the * function returned, it may or may not be finished. */ int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb, void *data); /** * cmdq_pkt_flush() - trigger CMDQ to execute the CMDQ packet * @pkt: the CMDQ packet * * Return: 0 for success; else the error code is returned * * Trigger CMDQ to execute the CMDQ packet. Note that this is a * synchronous flush function. When the function returned, the recorded * commands have been done. */ int cmdq_pkt_flush(struct cmdq_pkt *pkt); #endif /* __MTK_CMDQ_H__ */ soc/samsung/exynos-chipid.h 0000644 00000003212 14722070374 0011745 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2018 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * Exynos - CHIPID support */ #ifndef __LINUX_SOC_EXYNOS_CHIPID_H #define __LINUX_SOC_EXYNOS_CHIPID_H #define EXYNOS_CHIPID_REG_PRO_ID 0x00 #define EXYNOS_SUBREV_MASK (0xf << 4) #define EXYNOS_MAINREV_MASK (0xf << 0) #define EXYNOS_REV_MASK (EXYNOS_SUBREV_MASK | \ EXYNOS_MAINREV_MASK) #define EXYNOS_MASK 0xfffff000 #define EXYNOS_CHIPID_REG_PKG_ID 0x04 /* Bit field definitions for EXYNOS_CHIPID_REG_PKG_ID register */ #define EXYNOS5422_IDS_OFFSET 24 #define EXYNOS5422_IDS_MASK 0xff #define EXYNOS5422_USESG_OFFSET 3 #define EXYNOS5422_USESG_MASK 0x01 #define EXYNOS5422_SG_OFFSET 0 #define EXYNOS5422_SG_MASK 0x07 #define EXYNOS5422_TABLE_OFFSET 8 #define EXYNOS5422_TABLE_MASK 0x03 #define EXYNOS5422_SG_A_OFFSET 17 #define EXYNOS5422_SG_A_MASK 0x0f #define EXYNOS5422_SG_B_OFFSET 21 #define EXYNOS5422_SG_B_MASK 0x03 #define EXYNOS5422_SG_BSIGN_OFFSET 23 #define EXYNOS5422_SG_BSIGN_MASK 0x01 #define EXYNOS5422_BIN2_OFFSET 12 #define EXYNOS5422_BIN2_MASK 0x01 #define EXYNOS_CHIPID_REG_LOT_ID 0x14 #define EXYNOS_CHIPID_REG_AUX_INFO 0x1c /* Bit field definitions for EXYNOS_CHIPID_REG_AUX_INFO register */ #define EXYNOS5422_TMCB_OFFSET 0 #define EXYNOS5422_TMCB_MASK 0x7f #define EXYNOS5422_ARM_UP_OFFSET 8 #define EXYNOS5422_ARM_UP_MASK 0x03 #define EXYNOS5422_ARM_DN_OFFSET 10 #define EXYNOS5422_ARM_DN_MASK 0x03 #define EXYNOS5422_KFC_UP_OFFSET 12 #define EXYNOS5422_KFC_UP_MASK 0x03 #define EXYNOS5422_KFC_DN_OFFSET 14 #define EXYNOS5422_KFC_DN_MASK 0x03 #endif /*__LINUX_SOC_EXYNOS_CHIPID_H */ soc/samsung/exynos-regs-pmu.h 0000644 00000070365 14722070374 0012261 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2010-2015 Samsung Electronics Co., Ltd. * http://www.samsung.com * * EXYNOS - Power management unit definition * * Notice: * This is not a list of all Exynos Power Management Unit SFRs. * There are too many of them, not mentioning subtle differences * between SoCs. For now, put here only the used registers. */ #ifndef __LINUX_SOC_EXYNOS_REGS_PMU_H #define __LINUX_SOC_EXYNOS_REGS_PMU_H __FILE__ #define S5P_CENTRAL_SEQ_CONFIGURATION 0x0200 #define S5P_CENTRAL_LOWPWR_CFG (1 << 16) #define S5P_CENTRAL_SEQ_OPTION 0x0208 #define S5P_USE_STANDBY_WFI0 (1 << 16) #define S5P_USE_STANDBY_WFI1 (1 << 17) #define S5P_USE_STANDBY_WFI2 (1 << 19) #define S5P_USE_STANDBY_WFI3 (1 << 20) #define S5P_USE_STANDBY_WFE0 (1 << 24) #define S5P_USE_STANDBY_WFE1 (1 << 25) #define S5P_USE_STANDBY_WFE2 (1 << 27) #define S5P_USE_STANDBY_WFE3 (1 << 28) #define S5P_USE_STANDBY_WFI_ALL \ (S5P_USE_STANDBY_WFI0 | S5P_USE_STANDBY_WFI1 | \ S5P_USE_STANDBY_WFI2 | S5P_USE_STANDBY_WFI3 | \ S5P_USE_STANDBY_WFE0 | S5P_USE_STANDBY_WFE1 | \ S5P_USE_STANDBY_WFE2 | S5P_USE_STANDBY_WFE3) #define S5P_USE_DELAYED_RESET_ASSERTION BIT(12) #define EXYNOS_CORE_PO_RESET(n) ((1 << 4) << n) #define EXYNOS_WAKEUP_FROM_LOWPWR (1 << 28) #define EXYNOS_SWRESET 0x0400 #define S5P_WAKEUP_STAT 0x0600 /* Value for EXYNOS_EINT_WAKEUP_MASK disabling all external wakeup interrupts */ #define EXYNOS_EINT_WAKEUP_MASK_DISABLED 0xffffffff #define EXYNOS_EINT_WAKEUP_MASK 0x0604 #define S5P_WAKEUP_MASK 0x0608 #define S5P_WAKEUP_MASK2 0x0614 /* MIPI_PHYn_CONTROL, valid for Exynos3250, Exynos4, Exynos5250 and Exynos5433 */ #define EXYNOS4_MIPI_PHY_CONTROL(n) (0x0710 + (n) * 4) /* Phy enable bit, common for all phy registers, not only MIPI */ #define EXYNOS4_PHY_ENABLE (1 << 0) #define EXYNOS4_MIPI_PHY_SRESETN (1 << 1) #define EXYNOS4_MIPI_PHY_MRESETN (1 << 2) #define EXYNOS4_MIPI_PHY_RESET_MASK (3 << 1) #define S5P_INFORM0 0x0800 #define S5P_INFORM1 0x0804 #define S5P_INFORM5 0x0814 #define S5P_INFORM6 0x0818 #define S5P_INFORM7 0x081C #define S5P_PMU_SPARE2 0x0908 #define S5P_PMU_SPARE3 0x090C #define EXYNOS_IROM_DATA2 0x0988 #define S5P_ARM_CORE0_LOWPWR 0x1000 #define S5P_DIS_IRQ_CORE0 0x1004 #define S5P_DIS_IRQ_CENTRAL0 0x1008 #define S5P_ARM_CORE1_LOWPWR 0x1010 #define S5P_DIS_IRQ_CORE1 0x1014 #define S5P_DIS_IRQ_CENTRAL1 0x1018 #define S5P_ARM_COMMON_LOWPWR 0x1080 #define S5P_L2_0_LOWPWR 0x10C0 #define S5P_L2_1_LOWPWR 0x10C4 #define S5P_CMU_ACLKSTOP_LOWPWR 0x1100 #define S5P_CMU_SCLKSTOP_LOWPWR 0x1104 #define S5P_CMU_RESET_LOWPWR 0x110C #define S5P_APLL_SYSCLK_LOWPWR 0x1120 #define S5P_MPLL_SYSCLK_LOWPWR 0x1124 #define S5P_VPLL_SYSCLK_LOWPWR 0x1128 #define S5P_EPLL_SYSCLK_LOWPWR 0x112C #define S5P_CMU_CLKSTOP_GPS_ALIVE_LOWPWR 0x1138 #define S5P_CMU_RESET_GPSALIVE_LOWPWR 0x113C #define S5P_CMU_CLKSTOP_CAM_LOWPWR 0x1140 #define S5P_CMU_CLKSTOP_TV_LOWPWR 0x1144 #define S5P_CMU_CLKSTOP_MFC_LOWPWR 0x1148 #define S5P_CMU_CLKSTOP_G3D_LOWPWR 0x114C #define S5P_CMU_CLKSTOP_LCD0_LOWPWR 0x1150 #define S5P_CMU_CLKSTOP_MAUDIO_LOWPWR 0x1158 #define S5P_CMU_CLKSTOP_GPS_LOWPWR 0x115C #define S5P_CMU_RESET_CAM_LOWPWR 0x1160 #define S5P_CMU_RESET_TV_LOWPWR 0x1164 #define S5P_CMU_RESET_MFC_LOWPWR 0x1168 #define S5P_CMU_RESET_G3D_LOWPWR 0x116C #define S5P_CMU_RESET_LCD0_LOWPWR 0x1170 #define S5P_CMU_RESET_MAUDIO_LOWPWR 0x1178 #define S5P_CMU_RESET_GPS_LOWPWR 0x117C #define S5P_TOP_BUS_LOWPWR 0x1180 #define S5P_TOP_RETENTION_LOWPWR 0x1184 #define S5P_TOP_PWR_LOWPWR 0x1188 #define S5P_LOGIC_RESET_LOWPWR 0x11A0 #define S5P_ONENAND_MEM_LOWPWR 0x11C0 #define S5P_G2D_ACP_MEM_LOWPWR 0x11C8 #define S5P_USBOTG_MEM_LOWPWR 0x11CC #define S5P_HSMMC_MEM_LOWPWR 0x11D0 #define S5P_CSSYS_MEM_LOWPWR 0x11D4 #define S5P_SECSS_MEM_LOWPWR 0x11D8 #define S5P_PAD_RETENTION_DRAM_LOWPWR 0x1200 #define S5P_PAD_RETENTION_MAUDIO_LOWPWR 0x1204 #define S5P_PAD_RETENTION_GPIO_LOWPWR 0x1220 #define S5P_PAD_RETENTION_UART_LOWPWR 0x1224 #define S5P_PAD_RETENTION_MMCA_LOWPWR 0x1228 #define S5P_PAD_RETENTION_MMCB_LOWPWR 0x122C #define S5P_PAD_RETENTION_EBIA_LOWPWR 0x1230 #define S5P_PAD_RETENTION_EBIB_LOWPWR 0x1234 #define S5P_PAD_RETENTION_ISOLATION_LOWPWR 0x1240 #define S5P_PAD_RETENTION_ALV_SEL_LOWPWR 0x1260 #define S5P_XUSBXTI_LOWPWR 0x1280 #define S5P_XXTI_LOWPWR 0x1284 #define S5P_EXT_REGULATOR_LOWPWR 0x12C0 #define S5P_GPIO_MODE_LOWPWR 0x1300 #define S5P_GPIO_MODE_MAUDIO_LOWPWR 0x1340 #define S5P_CAM_LOWPWR 0x1380 #define S5P_TV_LOWPWR 0x1384 #define S5P_MFC_LOWPWR 0x1388 #define S5P_G3D_LOWPWR 0x138C #define S5P_LCD0_LOWPWR 0x1390 #define S5P_MAUDIO_LOWPWR 0x1398 #define S5P_GPS_LOWPWR 0x139C #define S5P_GPS_ALIVE_LOWPWR 0x13A0 #define EXYNOS_ARM_CORE0_CONFIGURATION 0x2000 #define EXYNOS_ARM_CORE_CONFIGURATION(_nr) \ (EXYNOS_ARM_CORE0_CONFIGURATION + (0x80 * (_nr))) #define EXYNOS_ARM_CORE_STATUS(_nr) \ (EXYNOS_ARM_CORE_CONFIGURATION(_nr) + 0x4) #define EXYNOS_ARM_CORE_OPTION(_nr) \ (EXYNOS_ARM_CORE_CONFIGURATION(_nr) + 0x8) #define EXYNOS_ARM_COMMON_CONFIGURATION 0x2500 #define EXYNOS_COMMON_CONFIGURATION(_nr) \ (EXYNOS_ARM_COMMON_CONFIGURATION + (0x80 * (_nr))) #define EXYNOS_COMMON_STATUS(_nr) \ (EXYNOS_COMMON_CONFIGURATION(_nr) + 0x4) #define EXYNOS_COMMON_OPTION(_nr) \ (EXYNOS_COMMON_CONFIGURATION(_nr) + 0x8) #define EXYNOS_ARM_L2_CONFIGURATION 0x2600 #define EXYNOS_L2_CONFIGURATION(_nr) \ (EXYNOS_ARM_L2_CONFIGURATION + ((_nr) * 0x80)) #define EXYNOS_L2_STATUS(_nr) \ (EXYNOS_L2_CONFIGURATION(_nr) + 0x4) #define EXYNOS_L2_OPTION(_nr) \ (EXYNOS_L2_CONFIGURATION(_nr) + 0x8) #define EXYNOS_L2_USE_RETENTION BIT(4) #define S5P_PAD_RET_MAUDIO_OPTION 0x3028 #define S5P_PAD_RET_MMC2_OPTION 0x30c8 #define S5P_PAD_RET_GPIO_OPTION 0x3108 #define S5P_PAD_RET_UART_OPTION 0x3128 #define S5P_PAD_RET_MMCA_OPTION 0x3148 #define S5P_PAD_RET_MMCB_OPTION 0x3168 #define S5P_PAD_RET_EBIA_OPTION 0x3188 #define S5P_PAD_RET_EBIB_OPTION 0x31A8 #define S5P_PAD_RET_SPI_OPTION 0x31c8 #define S5P_PS_HOLD_CONTROL 0x330C #define S5P_PS_HOLD_EN (1 << 31) #define S5P_PS_HOLD_OUTPUT_HIGH (3 << 8) #define S5P_CAM_OPTION 0x3C08 #define S5P_MFC_OPTION 0x3C48 #define S5P_G3D_OPTION 0x3C68 #define S5P_LCD0_OPTION 0x3C88 #define S5P_LCD1_OPTION 0x3CA8 #define S5P_ISP_OPTION S5P_LCD1_OPTION #define S5P_CORE_LOCAL_PWR_EN 0x3 #define S5P_CORE_WAKEUP_FROM_LOCAL_CFG (0x3 << 8) #define S5P_CORE_AUTOWAKEUP_EN (1 << 31) /* Only for S5Pv210 */ #define S5PV210_EINT_WAKEUP_MASK 0xC004 /* Only for EXYNOS4210 */ #define S5P_CMU_CLKSTOP_LCD1_LOWPWR 0x1154 #define S5P_CMU_RESET_LCD1_LOWPWR 0x1174 #define S5P_MODIMIF_MEM_LOWPWR 0x11C4 #define S5P_PCIE_MEM_LOWPWR 0x11E0 #define S5P_SATA_MEM_LOWPWR 0x11E4 #define S5P_LCD1_LOWPWR 0x1394 /* Only for EXYNOS4x12 */ #define S5P_ISP_ARM_LOWPWR 0x1050 #define S5P_DIS_IRQ_ISP_ARM_LOCAL_LOWPWR 0x1054 #define S5P_DIS_IRQ_ISP_ARM_CENTRAL_LOWPWR 0x1058 #define S5P_CMU_ACLKSTOP_COREBLK_LOWPWR 0x1110 #define S5P_CMU_SCLKSTOP_COREBLK_LOWPWR 0x1114 #define S5P_CMU_RESET_COREBLK_LOWPWR 0x111C #define S5P_MPLLUSER_SYSCLK_LOWPWR 0x1130 #define S5P_CMU_CLKSTOP_ISP_LOWPWR 0x1154 #define S5P_CMU_RESET_ISP_LOWPWR 0x1174 #define S5P_TOP_BUS_COREBLK_LOWPWR 0x1190 #define S5P_TOP_RETENTION_COREBLK_LOWPWR 0x1194 #define S5P_TOP_PWR_COREBLK_LOWPWR 0x1198 #define S5P_OSCCLK_GATE_LOWPWR 0x11A4 #define S5P_LOGIC_RESET_COREBLK_LOWPWR 0x11B0 #define S5P_OSCCLK_GATE_COREBLK_LOWPWR 0x11B4 #define S5P_HSI_MEM_LOWPWR 0x11C4 #define S5P_ROTATOR_MEM_LOWPWR 0x11DC #define S5P_PAD_RETENTION_GPIO_COREBLK_LOWPWR 0x123C #define S5P_PAD_ISOLATION_COREBLK_LOWPWR 0x1250 #define S5P_GPIO_MODE_COREBLK_LOWPWR 0x1320 #define S5P_TOP_ASB_RESET_LOWPWR 0x1344 #define S5P_TOP_ASB_ISOLATION_LOWPWR 0x1348 #define S5P_ISP_LOWPWR 0x1394 #define S5P_DRAM_FREQ_DOWN_LOWPWR 0x13B0 #define S5P_DDRPHY_DLLOFF_LOWPWR 0x13B4 #define S5P_CMU_SYSCLK_ISP_LOWPWR 0x13B8 #define S5P_CMU_SYSCLK_GPS_LOWPWR 0x13BC #define S5P_LPDDR_PHY_DLL_LOCK_LOWPWR 0x13C0 #define S5P_ARM_L2_0_OPTION 0x2608 #define S5P_ARM_L2_1_OPTION 0x2628 #define S5P_ONENAND_MEM_OPTION 0x2E08 #define S5P_HSI_MEM_OPTION 0x2E28 #define S5P_G2D_ACP_MEM_OPTION 0x2E48 #define S5P_USBOTG_MEM_OPTION 0x2E68 #define S5P_HSMMC_MEM_OPTION 0x2E88 #define S5P_CSSYS_MEM_OPTION 0x2EA8 #define S5P_SECSS_MEM_OPTION 0x2EC8 #define S5P_ROTATOR_MEM_OPTION 0x2F48 /* Only for EXYNOS4412 */ #define S5P_ARM_CORE2_LOWPWR 0x1020 #define S5P_DIS_IRQ_CORE2 0x1024 #define S5P_DIS_IRQ_CENTRAL2 0x1028 #define S5P_ARM_CORE3_LOWPWR 0x1030 #define S5P_DIS_IRQ_CORE3 0x1034 #define S5P_DIS_IRQ_CENTRAL3 0x1038 /* Only for EXYNOS3XXX */ #define EXYNOS3_ARM_CORE0_SYS_PWR_REG 0x1000 #define EXYNOS3_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG 0x1004 #define EXYNOS3_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG 0x1008 #define EXYNOS3_ARM_CORE1_SYS_PWR_REG 0x1010 #define EXYNOS3_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG 0x1014 #define EXYNOS3_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG 0x1018 #define EXYNOS3_ISP_ARM_SYS_PWR_REG 0x1050 #define EXYNOS3_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG 0x1054 #define EXYNOS3_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG 0x1058 #define EXYNOS3_ARM_COMMON_SYS_PWR_REG 0x1080 #define EXYNOS3_ARM_L2_SYS_PWR_REG 0x10C0 #define EXYNOS3_CMU_ACLKSTOP_SYS_PWR_REG 0x1100 #define EXYNOS3_CMU_SCLKSTOP_SYS_PWR_REG 0x1104 #define EXYNOS3_CMU_RESET_SYS_PWR_REG 0x110C #define EXYNOS3_CMU_ACLKSTOP_COREBLK_SYS_PWR_REG 0x1110 #define EXYNOS3_CMU_SCLKSTOP_COREBLK_SYS_PWR_REG 0x1114 #define EXYNOS3_CMU_RESET_COREBLK_SYS_PWR_REG 0x111C #define EXYNOS3_APLL_SYSCLK_SYS_PWR_REG 0x1120 #define EXYNOS3_MPLL_SYSCLK_SYS_PWR_REG 0x1124 #define EXYNOS3_VPLL_SYSCLK_SYS_PWR_REG 0x1128 #define EXYNOS3_EPLL_SYSCLK_SYS_PWR_REG 0x112C #define EXYNOS3_MPLLUSER_SYSCLK_SYS_PWR_REG 0x1130 #define EXYNOS3_BPLLUSER_SYSCLK_SYS_PWR_REG 0x1134 #define EXYNOS3_EPLLUSER_SYSCLK_SYS_PWR_REG 0x1138 #define EXYNOS3_CMU_CLKSTOP_CAM_SYS_PWR_REG 0x1140 #define EXYNOS3_CMU_CLKSTOP_MFC_SYS_PWR_REG 0x1148 #define EXYNOS3_CMU_CLKSTOP_G3D_SYS_PWR_REG 0x114C #define EXYNOS3_CMU_CLKSTOP_LCD0_SYS_PWR_REG 0x1150 #define EXYNOS3_CMU_CLKSTOP_ISP_SYS_PWR_REG 0x1154 #define EXYNOS3_CMU_CLKSTOP_MAUDIO_SYS_PWR_REG 0x1158 #define EXYNOS3_CMU_RESET_CAM_SYS_PWR_REG 0x1160 #define EXYNOS3_CMU_RESET_MFC_SYS_PWR_REG 0x1168 #define EXYNOS3_CMU_RESET_G3D_SYS_PWR_REG 0x116C #define EXYNOS3_CMU_RESET_LCD0_SYS_PWR_REG 0x1170 #define EXYNOS3_CMU_RESET_ISP_SYS_PWR_REG 0x1174 #define EXYNOS3_CMU_RESET_MAUDIO_SYS_PWR_REG 0x1178 #define EXYNOS3_TOP_BUS_SYS_PWR_REG 0x1180 #define EXYNOS3_TOP_RETENTION_SYS_PWR_REG 0x1184 #define EXYNOS3_TOP_PWR_SYS_PWR_REG 0x1188 #define EXYNOS3_TOP_BUS_COREBLK_SYS_PWR_REG 0x1190 #define EXYNOS3_TOP_RETENTION_COREBLK_SYS_PWR_REG 0x1194 #define EXYNOS3_TOP_PWR_COREBLK_SYS_PWR_REG 0x1198 #define EXYNOS3_LOGIC_RESET_SYS_PWR_REG 0x11A0 #define EXYNOS3_OSCCLK_GATE_SYS_PWR_REG 0x11A4 #define EXYNOS3_LOGIC_RESET_COREBLK_SYS_PWR_REG 0x11B0 #define EXYNOS3_OSCCLK_GATE_COREBLK_SYS_PWR_REG 0x11B4 #define EXYNOS3_PAD_RETENTION_DRAM_SYS_PWR_REG 0x1200 #define EXYNOS3_PAD_RETENTION_MAUDIO_SYS_PWR_REG 0x1204 #define EXYNOS3_PAD_RETENTION_SPI_SYS_PWR_REG 0x1208 #define EXYNOS3_PAD_RETENTION_MMC2_SYS_PWR_REG 0x1218 #define EXYNOS3_PAD_RETENTION_GPIO_SYS_PWR_REG 0x1220 #define EXYNOS3_PAD_RETENTION_UART_SYS_PWR_REG 0x1224 #define EXYNOS3_PAD_RETENTION_MMC0_SYS_PWR_REG 0x1228 #define EXYNOS3_PAD_RETENTION_MMC1_SYS_PWR_REG 0x122C #define EXYNOS3_PAD_RETENTION_EBIA_SYS_PWR_REG 0x1230 #define EXYNOS3_PAD_RETENTION_EBIB_SYS_PWR_REG 0x1234 #define EXYNOS3_PAD_RETENTION_JTAG_SYS_PWR_REG 0x1238 #define EXYNOS3_PAD_ISOLATION_SYS_PWR_REG 0x1240 #define EXYNOS3_PAD_ALV_SEL_SYS_PWR_REG 0x1260 #define EXYNOS3_XUSBXTI_SYS_PWR_REG 0x1280 #define EXYNOS3_XXTI_SYS_PWR_REG 0x1284 #define EXYNOS3_EXT_REGULATOR_SYS_PWR_REG 0x12C0 #define EXYNOS3_EXT_REGULATOR_COREBLK_SYS_PWR_REG 0x12C4 #define EXYNOS3_GPIO_MODE_SYS_PWR_REG 0x1300 #define EXYNOS3_GPIO_MODE_MAUDIO_SYS_PWR_REG 0x1340 #define EXYNOS3_TOP_ASB_RESET_SYS_PWR_REG 0x1344 #define EXYNOS3_TOP_ASB_ISOLATION_SYS_PWR_REG 0x1348 #define EXYNOS3_TOP_ASB_RESET_COREBLK_SYS_PWR_REG 0x1350 #define EXYNOS3_TOP_ASB_ISOLATION_COREBLK_SYS_PWR_REG 0x1354 #define EXYNOS3_CAM_SYS_PWR_REG 0x1380 #define EXYNOS3_MFC_SYS_PWR_REG 0x1388 #define EXYNOS3_G3D_SYS_PWR_REG 0x138C #define EXYNOS3_LCD0_SYS_PWR_REG 0x1390 #define EXYNOS3_ISP_SYS_PWR_REG 0x1394 #define EXYNOS3_MAUDIO_SYS_PWR_REG 0x1398 #define EXYNOS3_DRAM_FREQ_DOWN_SYS_PWR_REG 0x13B0 #define EXYNOS3_DDRPHY_DLLOFF_SYS_PWR_REG 0x13B4 #define EXYNOS3_CMU_SYSCLK_ISP_SYS_PWR_REG 0x13B8 #define EXYNOS3_LPDDR_PHY_DLL_LOCK_SYS_PWR_REG 0x13C0 #define EXYNOS3_BPLL_SYSCLK_SYS_PWR_REG 0x13C4 #define EXYNOS3_UPLL_SYSCLK_SYS_PWR_REG 0x13C8 #define EXYNOS3_ARM_CORE0_OPTION 0x2008 #define EXYNOS3_ARM_CORE_OPTION(_nr) \ (EXYNOS3_ARM_CORE0_OPTION + ((_nr) * 0x80)) #define EXYNOS3_ARM_COMMON_OPTION 0x2408 #define EXYNOS3_ARM_L2_OPTION 0x2608 #define EXYNOS3_TOP_PWR_OPTION 0x2C48 #define EXYNOS3_CORE_TOP_PWR_OPTION 0x2CA8 #define EXYNOS3_XUSBXTI_DURATION 0x341C #define EXYNOS3_XXTI_DURATION 0x343C #define EXYNOS3_EXT_REGULATOR_DURATION 0x361C #define EXYNOS3_EXT_REGULATOR_COREBLK_DURATION 0x363C #define XUSBXTI_DURATION 0x00000BB8 #define XXTI_DURATION XUSBXTI_DURATION #define EXT_REGULATOR_DURATION 0x00001D4C #define EXT_REGULATOR_COREBLK_DURATION EXT_REGULATOR_DURATION /* for XXX_OPTION */ #define EXYNOS3_OPTION_USE_SC_COUNTER (1 << 0) #define EXYNOS3_OPTION_USE_SC_FEEDBACK (1 << 1) #define EXYNOS3_OPTION_SKIP_DEACTIVATE_ACEACP_IN_PWDN (1 << 7) /* For EXYNOS5 */ #define EXYNOS5_AUTO_WDTRESET_DISABLE 0x0408 #define EXYNOS5_MASK_WDTRESET_REQUEST 0x040C #define EXYNOS5_USBDRD_PHY_CONTROL 0x0704 #define EXYNOS5_DPTX_PHY_CONTROL 0x0720 #define EXYNOS5_USE_RETENTION BIT(4) #define EXYNOS5_SYS_WDTRESET (1 << 20) #define EXYNOS5_ARM_CORE0_SYS_PWR_REG 0x1000 #define EXYNOS5_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG 0x1004 #define EXYNOS5_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG 0x1008 #define EXYNOS5_ARM_CORE1_SYS_PWR_REG 0x1010 #define EXYNOS5_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG 0x1014 #define EXYNOS5_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG 0x1018 #define EXYNOS5_FSYS_ARM_SYS_PWR_REG 0x1040 #define EXYNOS5_DIS_IRQ_FSYS_ARM_CENTRAL_SYS_PWR_REG 0x1048 #define EXYNOS5_ISP_ARM_SYS_PWR_REG 0x1050 #define EXYNOS5_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG 0x1054 #define EXYNOS5_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG 0x1058 #define EXYNOS5_ARM_COMMON_SYS_PWR_REG 0x1080 #define EXYNOS5_ARM_L2_SYS_PWR_REG 0x10C0 #define EXYNOS5_CMU_ACLKSTOP_SYS_PWR_REG 0x1100 #define EXYNOS5_CMU_SCLKSTOP_SYS_PWR_REG 0x1104 #define EXYNOS5_CMU_RESET_SYS_PWR_REG 0x110C #define EXYNOS5_CMU_ACLKSTOP_SYSMEM_SYS_PWR_REG 0x1120 #define EXYNOS5_CMU_SCLKSTOP_SYSMEM_SYS_PWR_REG 0x1124 #define EXYNOS5_CMU_RESET_SYSMEM_SYS_PWR_REG 0x112C #define EXYNOS5_DRAM_FREQ_DOWN_SYS_PWR_REG 0x1130 #define EXYNOS5_DDRPHY_DLLOFF_SYS_PWR_REG 0x1134 #define EXYNOS5_DDRPHY_DLLLOCK_SYS_PWR_REG 0x1138 #define EXYNOS5_APLL_SYSCLK_SYS_PWR_REG 0x1140 #define EXYNOS5_MPLL_SYSCLK_SYS_PWR_REG 0x1144 #define EXYNOS5_VPLL_SYSCLK_SYS_PWR_REG 0x1148 #define EXYNOS5_EPLL_SYSCLK_SYS_PWR_REG 0x114C #define EXYNOS5_BPLL_SYSCLK_SYS_PWR_REG 0x1150 #define EXYNOS5_CPLL_SYSCLK_SYS_PWR_REG 0x1154 #define EXYNOS5_MPLLUSER_SYSCLK_SYS_PWR_REG 0x1164 #define EXYNOS5_BPLLUSER_SYSCLK_SYS_PWR_REG 0x1170 #define EXYNOS5_TOP_BUS_SYS_PWR_REG 0x1180 #define EXYNOS5_TOP_RETENTION_SYS_PWR_REG 0x1184 #define EXYNOS5_TOP_PWR_SYS_PWR_REG 0x1188 #define EXYNOS5_TOP_BUS_SYSMEM_SYS_PWR_REG 0x1190 #define EXYNOS5_TOP_RETENTION_SYSMEM_SYS_PWR_REG 0x1194 #define EXYNOS5_TOP_PWR_SYSMEM_SYS_PWR_REG 0x1198 #define EXYNOS5_LOGIC_RESET_SYS_PWR_REG 0x11A0 #define EXYNOS5_OSCCLK_GATE_SYS_PWR_REG 0x11A4 #define EXYNOS5_LOGIC_RESET_SYSMEM_SYS_PWR_REG 0x11B0 #define EXYNOS5_OSCCLK_GATE_SYSMEM_SYS_PWR_REG 0x11B4 #define EXYNOS5_USBOTG_MEM_SYS_PWR_REG 0x11C0 #define EXYNOS5_G2D_MEM_SYS_PWR_REG 0x11C8 #define EXYNOS5_USBDRD_MEM_SYS_PWR_REG 0x11CC #define EXYNOS5_SDMMC_MEM_SYS_PWR_REG 0x11D0 #define EXYNOS5_CSSYS_MEM_SYS_PWR_REG 0x11D4 #define EXYNOS5_SECSS_MEM_SYS_PWR_REG 0x11D8 #define EXYNOS5_ROTATOR_MEM_SYS_PWR_REG 0x11DC #define EXYNOS5_INTRAM_MEM_SYS_PWR_REG 0x11E0 #define EXYNOS5_INTROM_MEM_SYS_PWR_REG 0x11E4 #define EXYNOS5_JPEG_MEM_SYS_PWR_REG 0x11E8 #define EXYNOS5_HSI_MEM_SYS_PWR_REG 0x11EC #define EXYNOS5_MCUIOP_MEM_SYS_PWR_REG 0x11F4 #define EXYNOS5_SATA_MEM_SYS_PWR_REG 0x11FC #define EXYNOS5_PAD_RETENTION_DRAM_SYS_PWR_REG 0x1200 #define EXYNOS5_PAD_RETENTION_MAU_SYS_PWR_REG 0x1204 #define EXYNOS5_PAD_RETENTION_GPIO_SYS_PWR_REG 0x1220 #define EXYNOS5_PAD_RETENTION_UART_SYS_PWR_REG 0x1224 #define EXYNOS5_PAD_RETENTION_MMCA_SYS_PWR_REG 0x1228 #define EXYNOS5_PAD_RETENTION_MMCB_SYS_PWR_REG 0x122C #define EXYNOS5_PAD_RETENTION_EBIA_SYS_PWR_REG 0x1230 #define EXYNOS5_PAD_RETENTION_EBIB_SYS_PWR_REG 0x1234 #define EXYNOS5_PAD_RETENTION_SPI_SYS_PWR_REG 0x1238 #define EXYNOS5_PAD_RETENTION_GPIO_SYSMEM_SYS_PWR_REG 0x123C #define EXYNOS5_PAD_ISOLATION_SYS_PWR_REG 0x1240 #define EXYNOS5_PAD_ISOLATION_SYSMEM_SYS_PWR_REG 0x1250 #define EXYNOS5_PAD_ALV_SEL_SYS_PWR_REG 0x1260 #define EXYNOS5_XUSBXTI_SYS_PWR_REG 0x1280 #define EXYNOS5_XXTI_SYS_PWR_REG 0x1284 #define EXYNOS5_EXT_REGULATOR_SYS_PWR_REG 0x12C0 #define EXYNOS5_GPIO_MODE_SYS_PWR_REG 0x1300 #define EXYNOS5_GPIO_MODE_SYSMEM_SYS_PWR_REG 0x1320 #define EXYNOS5_GPIO_MODE_MAU_SYS_PWR_REG 0x1340 #define EXYNOS5_TOP_ASB_RESET_SYS_PWR_REG 0x1344 #define EXYNOS5_TOP_ASB_ISOLATION_SYS_PWR_REG 0x1348 #define EXYNOS5_GSCL_SYS_PWR_REG 0x1400 #define EXYNOS5_ISP_SYS_PWR_REG 0x1404 #define EXYNOS5_MFC_SYS_PWR_REG 0x1408 #define EXYNOS5_G3D_SYS_PWR_REG 0x140C #define EXYNOS5_DISP1_SYS_PWR_REG 0x1414 #define EXYNOS5_MAU_SYS_PWR_REG 0x1418 #define EXYNOS5_CMU_CLKSTOP_GSCL_SYS_PWR_REG 0x1480 #define EXYNOS5_CMU_CLKSTOP_ISP_SYS_PWR_REG 0x1484 #define EXYNOS5_CMU_CLKSTOP_MFC_SYS_PWR_REG 0x1488 #define EXYNOS5_CMU_CLKSTOP_G3D_SYS_PWR_REG 0x148C #define EXYNOS5_CMU_CLKSTOP_DISP1_SYS_PWR_REG 0x1494 #define EXYNOS5_CMU_CLKSTOP_MAU_SYS_PWR_REG 0x1498 #define EXYNOS5_CMU_SYSCLK_GSCL_SYS_PWR_REG 0x14C0 #define EXYNOS5_CMU_SYSCLK_ISP_SYS_PWR_REG 0x14C4 #define EXYNOS5_CMU_SYSCLK_MFC_SYS_PWR_REG 0x14C8 #define EXYNOS5_CMU_SYSCLK_G3D_SYS_PWR_REG 0x14CC #define EXYNOS5_CMU_SYSCLK_DISP1_SYS_PWR_REG 0x14D4 #define EXYNOS5_CMU_SYSCLK_MAU_SYS_PWR_REG 0x14D8 #define EXYNOS5_CMU_RESET_GSCL_SYS_PWR_REG 0x1580 #define EXYNOS5_CMU_RESET_ISP_SYS_PWR_REG 0x1584 #define EXYNOS5_CMU_RESET_MFC_SYS_PWR_REG 0x1588 #define EXYNOS5_CMU_RESET_G3D_SYS_PWR_REG 0x158C #define EXYNOS5_CMU_RESET_DISP1_SYS_PWR_REG 0x1594 #define EXYNOS5_CMU_RESET_MAU_SYS_PWR_REG 0x1598 #define EXYNOS5_ARM_CORE0_OPTION 0x2008 #define EXYNOS5_ARM_CORE1_OPTION 0x2088 #define EXYNOS5_FSYS_ARM_OPTION 0x2208 #define EXYNOS5_ISP_ARM_OPTION 0x2288 #define EXYNOS5_ARM_COMMON_OPTION 0x2408 #define EXYNOS5_ARM_L2_OPTION 0x2608 #define EXYNOS5_TOP_PWR_OPTION 0x2C48 #define EXYNOS5_TOP_PWR_SYSMEM_OPTION 0x2CC8 #define EXYNOS5_JPEG_MEM_OPTION 0x2F48 #define EXYNOS5_GSCL_OPTION 0x4008 #define EXYNOS5_ISP_OPTION 0x4028 #define EXYNOS5_MFC_OPTION 0x4048 #define EXYNOS5_G3D_OPTION 0x4068 #define EXYNOS5_DISP1_OPTION 0x40A8 #define EXYNOS5_MAU_OPTION 0x40C8 #define EXYNOS5_USE_SC_FEEDBACK (1 << 1) #define EXYNOS5_USE_SC_COUNTER (1 << 0) #define EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN (1 << 7) #define EXYNOS5_OPTION_USE_STANDBYWFE (1 << 24) #define EXYNOS5_OPTION_USE_STANDBYWFI (1 << 16) #define EXYNOS5_OPTION_USE_RETENTION (1 << 4) #define EXYNOS5420_SWRESET_KFC_SEL 0x3 /* Only for EXYNOS5420 */ #define EXYNOS5420_L2RSTDISABLE_VALUE BIT(3) #define EXYNOS5420_LPI_MASK 0x0004 #define EXYNOS5420_LPI_MASK1 0x0008 #define EXYNOS5420_UFS BIT(8) #define EXYNOS5420_ATB_KFC BIT(13) #define EXYNOS5420_ATB_ISP_ARM BIT(19) #define EXYNOS5420_EMULATION BIT(31) #define EXYNOS5420_ARM_INTR_SPREAD_ENABLE 0x0100 #define EXYNOS5420_ARM_INTR_SPREAD_USE_STANDBYWFI 0x0104 #define EXYNOS5420_UP_SCHEDULER 0x0120 #define SPREAD_ENABLE 0xF #define SPREAD_USE_STANDWFI 0xF #define EXYNOS5420_KFC_CORE_RESET0 BIT(8) #define EXYNOS5420_KFC_ETM_RESET0 BIT(20) #define EXYNOS5420_KFC_CORE_RESET(_nr) \ ((EXYNOS5420_KFC_CORE_RESET0 | EXYNOS5420_KFC_ETM_RESET0) << (_nr)) #define EXYNOS5420_USBDRD1_PHY_CONTROL 0x0708 #define EXYNOS5420_MIPI_PHY_CONTROL(n) (0x0714 + (n) * 4) #define EXYNOS5420_DPTX_PHY_CONTROL 0x0728 #define EXYNOS5420_ARM_CORE2_SYS_PWR_REG 0x1020 #define EXYNOS5420_DIS_IRQ_ARM_CORE2_LOCAL_SYS_PWR_REG 0x1024 #define EXYNOS5420_DIS_IRQ_ARM_CORE2_CENTRAL_SYS_PWR_REG 0x1028 #define EXYNOS5420_ARM_CORE3_SYS_PWR_REG 0x1030 #define EXYNOS5420_DIS_IRQ_ARM_CORE3_LOCAL_SYS_PWR_REG 0x1034 #define EXYNOS5420_DIS_IRQ_ARM_CORE3_CENTRAL_SYS_PWR_REG 0x1038 #define EXYNOS5420_KFC_CORE0_SYS_PWR_REG 0x1040 #define EXYNOS5420_DIS_IRQ_KFC_CORE0_LOCAL_SYS_PWR_REG 0x1044 #define EXYNOS5420_DIS_IRQ_KFC_CORE0_CENTRAL_SYS_PWR_REG 0x1048 #define EXYNOS5420_KFC_CORE1_SYS_PWR_REG 0x1050 #define EXYNOS5420_DIS_IRQ_KFC_CORE1_LOCAL_SYS_PWR_REG 0x1054 #define EXYNOS5420_DIS_IRQ_KFC_CORE1_CENTRAL_SYS_PWR_REG 0x1058 #define EXYNOS5420_KFC_CORE2_SYS_PWR_REG 0x1060 #define EXYNOS5420_DIS_IRQ_KFC_CORE2_LOCAL_SYS_PWR_REG 0x1064 #define EXYNOS5420_DIS_IRQ_KFC_CORE2_CENTRAL_SYS_PWR_REG 0x1068 #define EXYNOS5420_KFC_CORE3_SYS_PWR_REG 0x1070 #define EXYNOS5420_DIS_IRQ_KFC_CORE3_LOCAL_SYS_PWR_REG 0x1074 #define EXYNOS5420_DIS_IRQ_KFC_CORE3_CENTRAL_SYS_PWR_REG 0x1078 #define EXYNOS5420_ISP_ARM_SYS_PWR_REG 0x1090 #define EXYNOS5420_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG 0x1094 #define EXYNOS5420_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG 0x1098 #define EXYNOS5420_ARM_COMMON_SYS_PWR_REG 0x10A0 #define EXYNOS5420_KFC_COMMON_SYS_PWR_REG 0x10B0 #define EXYNOS5420_KFC_L2_SYS_PWR_REG 0x10D0 #define EXYNOS5420_DPLL_SYSCLK_SYS_PWR_REG 0x1158 #define EXYNOS5420_IPLL_SYSCLK_SYS_PWR_REG 0x115C #define EXYNOS5420_KPLL_SYSCLK_SYS_PWR_REG 0x1160 #define EXYNOS5420_RPLL_SYSCLK_SYS_PWR_REG 0x1174 #define EXYNOS5420_SPLL_SYSCLK_SYS_PWR_REG 0x1178 #define EXYNOS5420_INTRAM_MEM_SYS_PWR_REG 0x11B8 #define EXYNOS5420_INTROM_MEM_SYS_PWR_REG 0x11BC #define EXYNOS5420_PAD_RETENTION_JTAG_SYS_PWR_REG 0x1208 #define EXYNOS5420_PAD_RETENTION_DRAM_SYS_PWR_REG 0x1210 #define EXYNOS5420_PAD_RETENTION_UART_SYS_PWR_REG 0x1214 #define EXYNOS5420_PAD_RETENTION_MMC0_SYS_PWR_REG 0x1218 #define EXYNOS5420_PAD_RETENTION_MMC1_SYS_PWR_REG 0x121C #define EXYNOS5420_PAD_RETENTION_MMC2_SYS_PWR_REG 0x1220 #define EXYNOS5420_PAD_RETENTION_HSI_SYS_PWR_REG 0x1224 #define EXYNOS5420_PAD_RETENTION_EBIA_SYS_PWR_REG 0x1228 #define EXYNOS5420_PAD_RETENTION_EBIB_SYS_PWR_REG 0x122C #define EXYNOS5420_PAD_RETENTION_SPI_SYS_PWR_REG 0x1230 #define EXYNOS5420_PAD_RETENTION_DRAM_COREBLK_SYS_PWR_REG 0x1234 #define EXYNOS5420_DISP1_SYS_PWR_REG 0x1410 #define EXYNOS5420_MAU_SYS_PWR_REG 0x1414 #define EXYNOS5420_G2D_SYS_PWR_REG 0x1418 #define EXYNOS5420_MSC_SYS_PWR_REG 0x141C #define EXYNOS5420_FSYS_SYS_PWR_REG 0x1420 #define EXYNOS5420_FSYS2_SYS_PWR_REG 0x1424 #define EXYNOS5420_PSGEN_SYS_PWR_REG 0x1428 #define EXYNOS5420_PERIC_SYS_PWR_REG 0x142C #define EXYNOS5420_WCORE_SYS_PWR_REG 0x1430 #define EXYNOS5420_CMU_CLKSTOP_DISP1_SYS_PWR_REG 0x1490 #define EXYNOS5420_CMU_CLKSTOP_MAU_SYS_PWR_REG 0x1494 #define EXYNOS5420_CMU_CLKSTOP_G2D_SYS_PWR_REG 0x1498 #define EXYNOS5420_CMU_CLKSTOP_MSC_SYS_PWR_REG 0x149C #define EXYNOS5420_CMU_CLKSTOP_FSYS_SYS_PWR_REG 0x14A0 #define EXYNOS5420_CMU_CLKSTOP_FSYS2_SYS_PWR_REG 0x14A4 #define EXYNOS5420_CMU_CLKSTOP_PSGEN_SYS_PWR_REG 0x14A8 #define EXYNOS5420_CMU_CLKSTOP_PERIC_SYS_PWR_REG 0x14AC #define EXYNOS5420_CMU_CLKSTOP_WCORE_SYS_PWR_REG 0x14B0 #define EXYNOS5420_CMU_SYSCLK_TOPPWR_SYS_PWR_REG 0x14BC #define EXYNOS5420_CMU_SYSCLK_DISP1_SYS_PWR_REG 0x14D0 #define EXYNOS5420_CMU_SYSCLK_MAU_SYS_PWR_REG 0x14D4 #define EXYNOS5420_CMU_SYSCLK_G2D_SYS_PWR_REG 0x14D8 #define EXYNOS5420_CMU_SYSCLK_MSC_SYS_PWR_REG 0x14DC #define EXYNOS5420_CMU_SYSCLK_FSYS_SYS_PWR_REG 0x14E0 #define EXYNOS5420_CMU_SYSCLK_FSYS2_SYS_PWR_REG 0x14E4 #define EXYNOS5420_CMU_SYSCLK_PSGEN_SYS_PWR_REG 0x14E8 #define EXYNOS5420_CMU_SYSCLK_PERIC_SYS_PWR_REG 0x14EC #define EXYNOS5420_CMU_SYSCLK_WCORE_SYS_PWR_REG 0x14F0 #define EXYNOS5420_CMU_SYSCLK_SYSMEM_TOPPWR_SYS_PWR_REG 0x14F4 #define EXYNOS5420_CMU_RESET_FSYS2_SYS_PWR_REG 0x1570 #define EXYNOS5420_CMU_RESET_PSGEN_SYS_PWR_REG 0x1574 #define EXYNOS5420_CMU_RESET_PERIC_SYS_PWR_REG 0x1578 #define EXYNOS5420_CMU_RESET_WCORE_SYS_PWR_REG 0x157C #define EXYNOS5420_CMU_RESET_DISP1_SYS_PWR_REG 0x1590 #define EXYNOS5420_CMU_RESET_MAU_SYS_PWR_REG 0x1594 #define EXYNOS5420_CMU_RESET_G2D_SYS_PWR_REG 0x1598 #define EXYNOS5420_CMU_RESET_MSC_SYS_PWR_REG 0x159C #define EXYNOS5420_CMU_RESET_FSYS_SYS_PWR_REG 0x15A0 #define EXYNOS5420_SFR_AXI_CGDIS1 0x15E4 #define EXYNOS5420_ARM_COMMON_OPTION 0x2508 #define EXYNOS5420_KFC_COMMON_OPTION 0x2588 #define EXYNOS5420_LOGIC_RESET_DURATION3 0x2D1C #define EXYNOS5420_PAD_RET_GPIO_OPTION 0x30C8 #define EXYNOS5420_PAD_RET_UART_OPTION 0x30E8 #define EXYNOS5420_PAD_RET_MMCA_OPTION 0x3108 #define EXYNOS5420_PAD_RET_MMCB_OPTION 0x3128 #define EXYNOS5420_PAD_RET_MMCC_OPTION 0x3148 #define EXYNOS5420_PAD_RET_HSI_OPTION 0x3168 #define EXYNOS5420_PAD_RET_SPI_OPTION 0x31C8 #define EXYNOS5420_PAD_RET_DRAM_COREBLK_OPTION 0x31E8 #define EXYNOS_PAD_RET_DRAM_OPTION 0x3008 #define EXYNOS_PAD_RET_MAUDIO_OPTION 0x3028 #define EXYNOS_PAD_RET_JTAG_OPTION 0x3048 #define EXYNOS_PAD_RET_EBIA_OPTION 0x3188 #define EXYNOS_PAD_RET_EBIB_OPTION 0x31A8 #define EXYNOS5420_FSYS2_OPTION 0x4168 #define EXYNOS5420_PSGEN_OPTION 0x4188 /* For EXYNOS_CENTRAL_SEQ_OPTION */ #define EXYNOS5_USE_STANDBYWFI_ARM_CORE0 BIT(16) #define EXYNOS5_USE_STANDBYWFI_ARM_CORE1 BUT(17) #define EXYNOS5_USE_STANDBYWFE_ARM_CORE0 BIT(24) #define EXYNOS5_USE_STANDBYWFE_ARM_CORE1 BIT(25) #define EXYNOS5420_ARM_USE_STANDBY_WFI0 BIT(4) #define EXYNOS5420_ARM_USE_STANDBY_WFI1 BIT(5) #define EXYNOS5420_ARM_USE_STANDBY_WFI2 BIT(6) #define EXYNOS5420_ARM_USE_STANDBY_WFI3 BIT(7) #define EXYNOS5420_KFC_USE_STANDBY_WFI0 BIT(8) #define EXYNOS5420_KFC_USE_STANDBY_WFI1 BIT(9) #define EXYNOS5420_KFC_USE_STANDBY_WFI2 BIT(10) #define EXYNOS5420_KFC_USE_STANDBY_WFI3 BIT(11) #define EXYNOS5420_ARM_USE_STANDBY_WFE0 BIT(16) #define EXYNOS5420_ARM_USE_STANDBY_WFE1 BIT(17) #define EXYNOS5420_ARM_USE_STANDBY_WFE2 BIT(18) #define EXYNOS5420_ARM_USE_STANDBY_WFE3 BIT(19) #define EXYNOS5420_KFC_USE_STANDBY_WFE0 BIT(20) #define EXYNOS5420_KFC_USE_STANDBY_WFE1 BIT(21) #define EXYNOS5420_KFC_USE_STANDBY_WFE2 BIT(22) #define EXYNOS5420_KFC_USE_STANDBY_WFE3 BIT(23) #define DUR_WAIT_RESET 0xF #define EXYNOS5420_USE_STANDBY_WFI_ALL (EXYNOS5420_ARM_USE_STANDBY_WFI0 \ | EXYNOS5420_ARM_USE_STANDBY_WFI1 \ | EXYNOS5420_ARM_USE_STANDBY_WFI2 \ | EXYNOS5420_ARM_USE_STANDBY_WFI3 \ | EXYNOS5420_KFC_USE_STANDBY_WFI0 \ | EXYNOS5420_KFC_USE_STANDBY_WFI1 \ | EXYNOS5420_KFC_USE_STANDBY_WFI2 \ | EXYNOS5420_KFC_USE_STANDBY_WFI3) /* For EXYNOS5433 */ #define EXYNOS5433_EINT_WAKEUP_MASK (0x060C) #define EXYNOS5433_USBHOST30_PHY_CONTROL (0x0728) #define EXYNOS5433_PAD_RETENTION_AUD_OPTION (0x3028) #define EXYNOS5433_PAD_RETENTION_MMC2_OPTION (0x30C8) #define EXYNOS5433_PAD_RETENTION_TOP_OPTION (0x3108) #define EXYNOS5433_PAD_RETENTION_UART_OPTION (0x3128) #define EXYNOS5433_PAD_RETENTION_MMC0_OPTION (0x3148) #define EXYNOS5433_PAD_RETENTION_MMC1_OPTION (0x3168) #define EXYNOS5433_PAD_RETENTION_EBIA_OPTION (0x3188) #define EXYNOS5433_PAD_RETENTION_EBIB_OPTION (0x31A8) #define EXYNOS5433_PAD_RETENTION_SPI_OPTION (0x31C8) #define EXYNOS5433_PAD_RETENTION_MIF_OPTION (0x31E8) #define EXYNOS5433_PAD_RETENTION_USBXTI_OPTION (0x3228) #define EXYNOS5433_PAD_RETENTION_BOOTLDO_OPTION (0x3248) #define EXYNOS5433_PAD_RETENTION_UFS_OPTION (0x3268) #define EXYNOS5433_PAD_RETENTION_FSYSGENIO_OPTION (0x32A8) #endif /* __LINUX_SOC_EXYNOS_REGS_PMU_H */ soc/samsung/exynos-pmu.h 0000644 00000001141 14722070374 0011305 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2014 Samsung Electronics Co., Ltd. * http://www.samsung.com * * Header for EXYNOS PMU Driver support */ #ifndef __LINUX_SOC_EXYNOS_PMU_H #define __LINUX_SOC_EXYNOS_PMU_H struct regmap; enum sys_powerdown { SYS_AFTR, SYS_LPA, SYS_SLEEP, NUM_SYS_POWERDOWN, }; extern void exynos_sys_powerdown_conf(enum sys_powerdown mode); #ifdef CONFIG_EXYNOS_PMU extern struct regmap *exynos_get_pmu_regmap(void); #else static inline struct regmap *exynos_get_pmu_regmap(void) { return ERR_PTR(-ENODEV); } #endif #endif /* __LINUX_SOC_EXYNOS_PMU_H */ compiler-clang.h 0000644 00000002550 14722070374 0007621 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_COMPILER_TYPES_H #error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead." #endif /* Compiler specific definitions for Clang compiler */ #define uninitialized_var(x) x = *(&(x)) /* same as gcc, this was present in clang-2.6 so we can assume it works * with any version that can compile the kernel */ #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) /* all clang versions usable with the kernel support KASAN ABI version 5 */ #define KASAN_ABI_VERSION 5 #if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer) /* emulate gcc's __SANITIZE_ADDRESS__ flag */ #define __SANITIZE_ADDRESS__ #define __no_sanitize_address \ __attribute__((no_sanitize("address", "hwaddress"))) #else #define __no_sanitize_address #endif /* * Not all versions of clang implement the the type-generic versions * of the builtin overflow checkers. Fortunately, clang implements * __has_builtin allowing us to avoid awkward version * checks. Unfortunately, we don't know which version of gcc clang * pretends to be, so the macro may or may not be defined. */ #if __has_builtin(__builtin_mul_overflow) && \ __has_builtin(__builtin_add_overflow) && \ __has_builtin(__builtin_sub_overflow) #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 #endif genalloc.h 0000644 00000017263 14722070374 0006520 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Basic general purpose allocator for managing special purpose * memory, for example, memory that is not managed by the regular * kmalloc/kfree interface. Uses for this includes on-device special * memory, uncached memory etc. * * It is safe to use the allocator in NMI handlers and other special * unblockable contexts that could otherwise deadlock on locks. This * is implemented by using atomic operations and retries on any * conflicts. The disadvantage is that there may be livelocks in * extreme cases. For better scalability, one allocator can be used * for each CPU. * * The lockless operation only works if there is enough memory * available. If new memory is added to the pool a lock has to be * still taken. So any user relying on locklessness has to ensure * that sufficient memory is preallocated. * * The basic atomic operation of this allocator is cmpxchg on long. * On architectures that don't have NMI-safe cmpxchg implementation, * the allocator can NOT be used in NMI handler. So code uses the * allocator in NMI handler should depend on * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. */ #ifndef __GENALLOC_H__ #define __GENALLOC_H__ #include <linux/types.h> #include <linux/spinlock_types.h> #include <linux/atomic.h> struct device; struct device_node; struct gen_pool; /** * typedef genpool_algo_t: Allocation callback function type definition * @map: Pointer to bitmap * @size: The bitmap size in bits * @start: The bitnumber to start searching at * @nr: The number of zeroed bits we're looking for * @data: optional additional data used by the callback * @pool: the pool being allocated from */ typedef unsigned long (*genpool_algo_t)(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data, struct gen_pool *pool, unsigned long start_addr); /* * General purpose special memory pool descriptor. */ struct gen_pool { spinlock_t lock; struct list_head chunks; /* list of chunks in this pool */ int min_alloc_order; /* minimum allocation order */ genpool_algo_t algo; /* allocation function */ void *data; const char *name; }; /* * General purpose special memory pool chunk descriptor. */ struct gen_pool_chunk { struct list_head next_chunk; /* next chunk in pool */ atomic_long_t avail; phys_addr_t phys_addr; /* physical starting address of memory chunk */ void *owner; /* private data to retrieve at alloc time */ unsigned long start_addr; /* start address of memory chunk */ unsigned long end_addr; /* end address of memory chunk (inclusive) */ unsigned long bits[0]; /* bitmap for allocating memory chunk */ }; /* * gen_pool data descriptor for gen_pool_first_fit_align. */ struct genpool_data_align { int align; /* alignment by bytes for starting address */ }; /* * gen_pool data descriptor for gen_pool_fixed_alloc. */ struct genpool_data_fixed { unsigned long offset; /* The offset of the specific region */ }; extern struct gen_pool *gen_pool_create(int, int); extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long); extern int gen_pool_add_owner(struct gen_pool *, unsigned long, phys_addr_t, size_t, int, void *); static inline int gen_pool_add_virt(struct gen_pool *pool, unsigned long addr, phys_addr_t phys, size_t size, int nid) { return gen_pool_add_owner(pool, addr, phys, size, nid, NULL); } /** * gen_pool_add - add a new chunk of special memory to the pool * @pool: pool to add new memory chunk to * @addr: starting address of memory chunk to add to pool * @size: size in bytes of the memory chunk to add to pool * @nid: node id of the node the chunk structure and bitmap should be * allocated on, or -1 * * Add a new chunk of special memory to the specified pool. * * Returns 0 on success or a -ve errno on failure. */ static inline int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size, int nid) { return gen_pool_add_virt(pool, addr, -1, size, nid); } extern void gen_pool_destroy(struct gen_pool *); unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size, genpool_algo_t algo, void *data, void **owner); static inline unsigned long gen_pool_alloc_owner(struct gen_pool *pool, size_t size, void **owner) { return gen_pool_alloc_algo_owner(pool, size, pool->algo, pool->data, owner); } static inline unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size, genpool_algo_t algo, void *data) { return gen_pool_alloc_algo_owner(pool, size, algo, data, NULL); } /** * gen_pool_alloc - allocate special memory from the pool * @pool: pool to allocate from * @size: number of bytes to allocate from the pool * * Allocate the requested number of bytes from the specified pool. * Uses the pool allocation function (with first-fit algorithm by default). * Can not be used in NMI handler on architectures without * NMI-safe cmpxchg implementation. */ static inline unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) { return gen_pool_alloc_algo(pool, size, pool->algo, pool->data); } extern void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma); extern void *gen_pool_dma_alloc_algo(struct gen_pool *pool, size_t size, dma_addr_t *dma, genpool_algo_t algo, void *data); extern void *gen_pool_dma_alloc_align(struct gen_pool *pool, size_t size, dma_addr_t *dma, int align); extern void *gen_pool_dma_zalloc(struct gen_pool *pool, size_t size, dma_addr_t *dma); extern void *gen_pool_dma_zalloc_algo(struct gen_pool *pool, size_t size, dma_addr_t *dma, genpool_algo_t algo, void *data); extern void *gen_pool_dma_zalloc_align(struct gen_pool *pool, size_t size, dma_addr_t *dma, int align); extern void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr, size_t size, void **owner); static inline void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) { gen_pool_free_owner(pool, addr, size, NULL); } extern void gen_pool_for_each_chunk(struct gen_pool *, void (*)(struct gen_pool *, struct gen_pool_chunk *, void *), void *); extern size_t gen_pool_avail(struct gen_pool *); extern size_t gen_pool_size(struct gen_pool *); extern void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data); extern unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data, struct gen_pool *pool, unsigned long start_addr); extern unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data, struct gen_pool *pool, unsigned long start_addr); extern unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data, struct gen_pool *pool, unsigned long start_addr); extern unsigned long gen_pool_first_fit_order_align(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data, struct gen_pool *pool, unsigned long start_addr); extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data, struct gen_pool *pool, unsigned long start_addr); extern struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order, int nid, const char *name); extern struct gen_pool *gen_pool_get(struct device *dev, const char *name); bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start, size_t size); #ifdef CONFIG_OF extern struct gen_pool *of_gen_pool_get(struct device_node *np, const char *propname, int index); #else static inline struct gen_pool *of_gen_pool_get(struct device_node *np, const char *propname, int index) { return NULL; } #endif #endif /* __GENALLOC_H__ */ kmod.h 0000644 00000002025 14722070374 0005654 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef __LINUX_KMOD_H__ #define __LINUX_KMOD_H__ /* * include/linux/kmod.h */ #include <linux/umh.h> #include <linux/gfp.h> #include <linux/stddef.h> #include <linux/errno.h> #include <linux/compiler.h> #include <linux/workqueue.h> #include <linux/sysctl.h> #define KMOD_PATH_LEN 256 #ifdef CONFIG_MODULES extern char modprobe_path[]; /* for sysctl */ /* modprobe exit status on success, -ve on error. Return value * usually useless though. */ extern __printf(2, 3) int __request_module(bool wait, const char *name, ...); #define request_module(mod...) __request_module(true, mod) #define request_module_nowait(mod...) __request_module(false, mod) #define try_then_request_module(x, mod...) \ ((x) ?: (__request_module(true, mod), (x))) #else static inline int request_module(const char *name, ...) { return -ENOSYS; } static inline int request_module_nowait(const char *name, ...) { return -ENOSYS; } #define try_then_request_module(x, mod...) (x) #endif #endif /* __LINUX_KMOD_H__ */ smp.h 0000644 00000013756 14722070374 0005536 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_SMP_H #define __LINUX_SMP_H /* * Generic SMP support * Alan Cox. <alan@redhat.com> */ #include <linux/errno.h> #include <linux/types.h> #include <linux/list.h> #include <linux/cpumask.h> #include <linux/init.h> #include <linux/llist.h> typedef void (*smp_call_func_t)(void *info); struct __call_single_data { struct llist_node llist; smp_call_func_t func; void *info; unsigned int flags; }; /* Use __aligned() to avoid to use 2 cache lines for 1 csd */ typedef struct __call_single_data call_single_data_t __aligned(sizeof(struct __call_single_data)); /* total number of cpus in this system (may exceed NR_CPUS) */ extern unsigned int total_cpus; int smp_call_function_single(int cpuid, smp_call_func_t func, void *info, int wait); /* * Call a function on all processors */ void on_each_cpu(smp_call_func_t func, void *info, int wait); /* * Call a function on processors specified by mask, which might include * the local one. */ void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, void *info, bool wait); /* * Call a function on each processor for which the supplied function * cond_func returns a positive value. This may include the local * processor. */ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), smp_call_func_t func, void *info, bool wait, gfp_t gfp_flags); void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info), smp_call_func_t func, void *info, bool wait, gfp_t gfp_flags, const struct cpumask *mask); int smp_call_function_single_async(int cpu, struct __call_single_data *csd); #ifdef CONFIG_SMP #include <linux/preempt.h> #include <linux/kernel.h> #include <linux/compiler.h> #include <linux/thread_info.h> #include <asm/smp.h> /* * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc. * (defined in asm header): */ /* * stops all CPUs but the current one: */ extern void smp_send_stop(void); /* * sends a 'reschedule' event to another CPU: */ extern void smp_send_reschedule(int cpu); /* * Prepare machine for booting other CPUs. */ extern void smp_prepare_cpus(unsigned int max_cpus); /* * Bring a CPU up */ extern int __cpu_up(unsigned int cpunum, struct task_struct *tidle); /* * Final polishing of CPUs */ extern void smp_cpus_done(unsigned int max_cpus); /* * Call a function on all other processors */ void smp_call_function(smp_call_func_t func, void *info, int wait); void smp_call_function_many(const struct cpumask *mask, smp_call_func_t func, void *info, bool wait); int smp_call_function_any(const struct cpumask *mask, smp_call_func_t func, void *info, int wait); void kick_all_cpus_sync(void); void wake_up_all_idle_cpus(void); /* * Generic and arch helpers */ void __init call_function_init(void); void generic_smp_call_function_single_interrupt(void); #define generic_smp_call_function_interrupt \ generic_smp_call_function_single_interrupt /* * Mark the boot cpu "online" so that it can call console drivers in * printk() and can access its per-cpu storage. */ void smp_prepare_boot_cpu(void); extern unsigned int setup_max_cpus; extern void __init setup_nr_cpu_ids(void); extern void __init smp_init(void); extern int __boot_cpu_id; static inline int get_boot_cpu_id(void) { return __boot_cpu_id; } #else /* !SMP */ static inline void smp_send_stop(void) { } /* * These macros fold the SMP functionality into a single CPU system */ #define raw_smp_processor_id() 0 static inline void up_smp_call_function(smp_call_func_t func, void *info) { } #define smp_call_function(func, info, wait) \ (up_smp_call_function(func, info)) static inline void smp_send_reschedule(int cpu) { } #define smp_prepare_boot_cpu() do {} while (0) #define smp_call_function_many(mask, func, info, wait) \ (up_smp_call_function(func, info)) static inline void call_function_init(void) { } static inline int smp_call_function_any(const struct cpumask *mask, smp_call_func_t func, void *info, int wait) { return smp_call_function_single(0, func, info, wait); } static inline void kick_all_cpus_sync(void) { } static inline void wake_up_all_idle_cpus(void) { } #ifdef CONFIG_UP_LATE_INIT extern void __init up_late_init(void); static inline void smp_init(void) { up_late_init(); } #else static inline void smp_init(void) { } #endif static inline int get_boot_cpu_id(void) { return 0; } #endif /* !SMP */ /** * raw_processor_id() - get the current (unstable) CPU id * * For then you know what you are doing and need an unstable * CPU id. */ /** * smp_processor_id() - get the current (stable) CPU id * * This is the normal accessor to the CPU id and should be used * whenever possible. * * The CPU id is stable when: * * - IRQs are disabled; * - preemption is disabled; * - the task is CPU affine. * * When CONFIG_DEBUG_PREEMPT; we verify these assumption and WARN * when smp_processor_id() is used when the CPU id is not stable. */ /* * Allow the architecture to differentiate between a stable and unstable read. * For example, x86 uses an IRQ-safe asm-volatile read for the unstable but a * regular asm read for the stable. */ #ifndef __smp_processor_id #define __smp_processor_id(x) raw_smp_processor_id(x) #endif #ifdef CONFIG_DEBUG_PREEMPT extern unsigned int debug_smp_processor_id(void); # define smp_processor_id() debug_smp_processor_id() #else # define smp_processor_id() __smp_processor_id() #endif #define get_cpu() ({ preempt_disable(); __smp_processor_id(); }) #define put_cpu() preempt_enable() /* * Callback to arch code if there's nosmp or maxcpus=0 on the * boot command line: */ extern void arch_disable_smp_support(void); extern void arch_enable_nonboot_cpus_begin(void); extern void arch_enable_nonboot_cpus_end(void); void smp_setup_processor_id(void); int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys); /* SMP core functions */ int smpcfd_prepare_cpu(unsigned int cpu); int smpcfd_dead_cpu(unsigned int cpu); int smpcfd_dying_cpu(unsigned int cpu); #endif /* __LINUX_SMP_H */ node.h 0000644 00000011307 14722070374 0005652 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * include/linux/node.h - generic node definition * * This is mainly for topological representation. We define the * basic 'struct node' here, which can be embedded in per-arch * definitions of processors. * * Basic handling of the devices is done in drivers/base/node.c * and system devices are handled in drivers/base/sys.c. * * Nodes are exported via driverfs in the class/node/devices/ * directory. */ #ifndef _LINUX_NODE_H_ #define _LINUX_NODE_H_ #include <linux/device.h> #include <linux/cpumask.h> #include <linux/list.h> #include <linux/workqueue.h> /** * struct node_hmem_attrs - heterogeneous memory performance attributes * * @read_bandwidth: Read bandwidth in MB/s * @write_bandwidth: Write bandwidth in MB/s * @read_latency: Read latency in nanoseconds * @write_latency: Write latency in nanoseconds */ struct node_hmem_attrs { unsigned int read_bandwidth; unsigned int write_bandwidth; unsigned int read_latency; unsigned int write_latency; }; enum cache_indexing { NODE_CACHE_DIRECT_MAP, NODE_CACHE_INDEXED, NODE_CACHE_OTHER, }; enum cache_write_policy { NODE_CACHE_WRITE_BACK, NODE_CACHE_WRITE_THROUGH, NODE_CACHE_WRITE_OTHER, }; /** * struct node_cache_attrs - system memory caching attributes * * @indexing: The ways memory blocks may be placed in cache * @write_policy: Write back or write through policy * @size: Total size of cache in bytes * @line_size: Number of bytes fetched on a cache miss * @level: The cache hierarchy level */ struct node_cache_attrs { enum cache_indexing indexing; enum cache_write_policy write_policy; u64 size; u16 line_size; u8 level; }; #ifdef CONFIG_HMEM_REPORTING void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs); void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs, unsigned access); #else static inline void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs) { } static inline void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs, unsigned access) { } #endif struct node { struct device dev; struct list_head access_list; #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS) struct work_struct node_work; #endif #ifdef CONFIG_HMEM_REPORTING struct list_head cache_attrs; struct device *cache_dev; #endif }; struct memory_block; extern struct node *node_devices[]; typedef void (*node_registration_func_t)(struct node *); #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA) int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn, enum meminit_context context); #else static inline int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn, enum meminit_context context) { return 0; } #endif extern void unregister_node(struct node *node); #ifdef CONFIG_NUMA /* Core of the node registration - only memory hotplug should use this */ extern int __register_one_node(int nid); /* Registers an online node */ static inline int register_one_node(int nid) { int error = 0; if (node_online(nid)) { struct pglist_data *pgdat = NODE_DATA(nid); unsigned long start_pfn = pgdat->node_start_pfn; unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages; error = __register_one_node(nid); if (error) return error; /* link memory sections under this node */ error = link_mem_sections(nid, start_pfn, end_pfn, MEMINIT_EARLY); } return error; } extern void unregister_one_node(int nid); extern int register_cpu_under_node(unsigned int cpu, unsigned int nid); extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid); extern void unregister_memory_block_under_nodes(struct memory_block *mem_blk); extern int register_memory_node_under_compute_node(unsigned int mem_nid, unsigned int cpu_nid, unsigned access); #ifdef CONFIG_HUGETLBFS extern void register_hugetlbfs_with_node(node_registration_func_t doregister, node_registration_func_t unregister); #endif #else static inline int __register_one_node(int nid) { return 0; } static inline int register_one_node(int nid) { return 0; } static inline int unregister_one_node(int nid) { return 0; } static inline int register_cpu_under_node(unsigned int cpu, unsigned int nid) { return 0; } static inline int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) { return 0; } static inline void unregister_memory_block_under_nodes(struct memory_block *mem_blk) { } static inline void register_hugetlbfs_with_node(node_registration_func_t reg, node_registration_func_t unreg) { } #endif #define to_node(device) container_of(device, struct node, dev) #endif /* _LINUX_NODE_H_ */ mailbox_controller.h 0000644 00000013313 14722070374 0010622 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ #ifndef __MAILBOX_CONTROLLER_H #define __MAILBOX_CONTROLLER_H #include <linux/of.h> #include <linux/types.h> #include <linux/hrtimer.h> #include <linux/device.h> #include <linux/completion.h> struct mbox_chan; /** * struct mbox_chan_ops - methods to control mailbox channels * @send_data: The API asks the MBOX controller driver, in atomic * context try to transmit a message on the bus. Returns 0 if * data is accepted for transmission, -EBUSY while rejecting * if the remote hasn't yet read the last data sent. Actual * transmission of data is reported by the controller via * mbox_chan_txdone (if it has some TX ACK irq). It must not * sleep. * @flush: Called when a client requests transmissions to be blocking but * the context doesn't allow sleeping. Typically the controller * will implement a busy loop waiting for the data to flush out. * @startup: Called when a client requests the chan. The controller * could ask clients for additional parameters of communication * to be provided via client's chan_data. This call may * block. After this call the Controller must forward any * data received on the chan by calling mbox_chan_received_data. * The controller may do stuff that need to sleep. * @shutdown: Called when a client relinquishes control of a chan. * This call may block too. The controller must not forward * any received data anymore. * The controller may do stuff that need to sleep. * @last_tx_done: If the controller sets 'txdone_poll', the API calls * this to poll status of last TX. The controller must * give priority to IRQ method over polling and never * set both txdone_poll and txdone_irq. Only in polling * mode 'send_data' is expected to return -EBUSY. * The controller may do stuff that need to sleep/block. * Used only if txdone_poll:=true && txdone_irq:=false * @peek_data: Atomic check for any received data. Return true if controller * has some data to push to the client. False otherwise. */ struct mbox_chan_ops { int (*send_data)(struct mbox_chan *chan, void *data); int (*flush)(struct mbox_chan *chan, unsigned long timeout); int (*startup)(struct mbox_chan *chan); void (*shutdown)(struct mbox_chan *chan); bool (*last_tx_done)(struct mbox_chan *chan); bool (*peek_data)(struct mbox_chan *chan); }; /** * struct mbox_controller - Controller of a class of communication channels * @dev: Device backing this controller * @ops: Operators that work on each communication chan * @chans: Array of channels * @num_chans: Number of channels in the 'chans' array. * @txdone_irq: Indicates if the controller can report to API when * the last transmitted data was read by the remote. * Eg, if it has some TX ACK irq. * @txdone_poll: If the controller can read but not report the TX * done. Ex, some register shows the TX status but * no interrupt rises. Ignored if 'txdone_irq' is set. * @txpoll_period: If 'txdone_poll' is in effect, the API polls for * last TX's status after these many millisecs * @of_xlate: Controller driver specific mapping of channel via DT * @poll_hrt: API private. hrtimer used to poll for TXDONE on all * channels. * @node: API private. To hook into list of controllers. */ struct mbox_controller { struct device *dev; const struct mbox_chan_ops *ops; struct mbox_chan *chans; int num_chans; bool txdone_irq; bool txdone_poll; unsigned txpoll_period; struct mbox_chan *(*of_xlate)(struct mbox_controller *mbox, const struct of_phandle_args *sp); /* Internal to API */ struct hrtimer poll_hrt; spinlock_t poll_hrt_lock; struct list_head node; }; /* * The length of circular buffer for queuing messages from a client. * 'msg_count' tracks the number of buffered messages while 'msg_free' * is the index where the next message would be buffered. * We shouldn't need it too big because every transfer is interrupt * triggered and if we have lots of data to transfer, the interrupt * latencies are going to be the bottleneck, not the buffer length. * Besides, mbox_send_message could be called from atomic context and * the client could also queue another message from the notifier 'tx_done' * of the last transfer done. * REVISIT: If too many platforms see the "Try increasing MBOX_TX_QUEUE_LEN" * print, it needs to be taken from config option or somesuch. */ #define MBOX_TX_QUEUE_LEN 20 /** * struct mbox_chan - s/w representation of a communication chan * @mbox: Pointer to the parent/provider of this channel * @txdone_method: Way to detect TXDone chosen by the API * @cl: Pointer to the current owner of this channel * @tx_complete: Transmission completion * @active_req: Currently active request hook * @msg_count: No. of mssg currently queued * @msg_free: Index of next available mssg slot * @msg_data: Hook for data packet * @lock: Serialise access to the channel * @con_priv: Hook for controller driver to attach private data */ struct mbox_chan { struct mbox_controller *mbox; unsigned txdone_method; struct mbox_client *cl; struct completion tx_complete; void *active_req; unsigned msg_count, msg_free; void *msg_data[MBOX_TX_QUEUE_LEN]; spinlock_t lock; /* Serialise access to the channel */ void *con_priv; }; int mbox_controller_register(struct mbox_controller *mbox); /* can sleep */ void mbox_controller_unregister(struct mbox_controller *mbox); /* can sleep */ void mbox_chan_received_data(struct mbox_chan *chan, void *data); /* atomic */ void mbox_chan_txdone(struct mbox_chan *chan, int r); /* atomic */ int devm_mbox_controller_register(struct device *dev, struct mbox_controller *mbox); void devm_mbox_controller_unregister(struct device *dev, struct mbox_controller *mbox); #endif /* __MAILBOX_CONTROLLER_H */ i8253.h 0000644 00000001451 14722070374 0005476 0 ustar 00 /* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Machine specific IO port address definition for generic. * Written by Osamu Tomita <tomita@cinet.co.jp> */ #ifndef __LINUX_I8253_H #define __LINUX_I8253_H #include <linux/param.h> #include <linux/spinlock.h> #include <linux/timex.h> /* i8253A PIT registers */ #define PIT_MODE 0x43 #define PIT_CH0 0x40 #define PIT_CH2 0x42 #define PIT_LATCH ((PIT_TICK_RATE + HZ/2) / HZ) extern raw_spinlock_t i8253_lock; extern bool i8253_clear_counter_on_shutdown; extern struct clock_event_device i8253_clockevent; extern void clockevent_i8253_init(bool oneshot); extern void setup_pit_timer(void); #endif /* __LINUX_I8253_H */ devfreq.h 0000644 00000032072 14722070374 0006363 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework * for Non-CPU Devices. * * Copyright (C) 2011 Samsung Electronics * MyungJoo Ham <myungjoo.ham@samsung.com> */ #ifndef __LINUX_DEVFREQ_H__ #define __LINUX_DEVFREQ_H__ #include <linux/device.h> #include <linux/notifier.h> #include <linux/pm_opp.h> #define DEVFREQ_NAME_LEN 16 /* DEVFREQ governor name */ #define DEVFREQ_GOV_SIMPLE_ONDEMAND "simple_ondemand" #define DEVFREQ_GOV_PERFORMANCE "performance" #define DEVFREQ_GOV_POWERSAVE "powersave" #define DEVFREQ_GOV_USERSPACE "userspace" #define DEVFREQ_GOV_PASSIVE "passive" /* DEVFREQ notifier interface */ #define DEVFREQ_TRANSITION_NOTIFIER (0) /* Transition notifiers of DEVFREQ_TRANSITION_NOTIFIER */ #define DEVFREQ_PRECHANGE (0) #define DEVFREQ_POSTCHANGE (1) struct devfreq; struct devfreq_governor; /** * struct devfreq_dev_status - Data given from devfreq user device to * governors. Represents the performance * statistics. * @total_time: The total time represented by this instance of * devfreq_dev_status * @busy_time: The time that the device was working among the * total_time. * @current_frequency: The operating frequency. * @private_data: An entry not specified by the devfreq framework. * A device and a specific governor may have their * own protocol with private_data. However, because * this is governor-specific, a governor using this * will be only compatible with devices aware of it. */ struct devfreq_dev_status { /* both since the last measure */ unsigned long total_time; unsigned long busy_time; unsigned long current_frequency; void *private_data; }; /* * The resulting frequency should be at most this. (this bound is the * least upper bound; thus, the resulting freq should be lower or same) * If the flag is not set, the resulting frequency should be at most the * bound (greatest lower bound) */ #define DEVFREQ_FLAG_LEAST_UPPER_BOUND 0x1 /** * struct devfreq_dev_profile - Devfreq's user device profile * @initial_freq: The operating frequency when devfreq_add_device() is * called. * @polling_ms: The polling interval in ms. 0 disables polling. * @target: The device should set its operating frequency at * freq or lowest-upper-than-freq value. If freq is * higher than any operable frequency, set maximum. * Before returning, target function should set * freq at the current frequency. * The "flags" parameter's possible values are * explained above with "DEVFREQ_FLAG_*" macros. * @get_dev_status: The device should provide the current performance * status to devfreq. Governors are recommended not to * use this directly. Instead, governors are recommended * to use devfreq_update_stats() along with * devfreq.last_status. * @get_cur_freq: The device should provide the current frequency * at which it is operating. * @exit: An optional callback that is called when devfreq * is removing the devfreq object due to error or * from devfreq_remove_device() call. If the user * has registered devfreq->nb at a notifier-head, * this is the time to unregister it. * @freq_table: Optional list of frequencies to support statistics * and freq_table must be generated in ascending order. * @max_state: The size of freq_table. */ struct devfreq_dev_profile { unsigned long initial_freq; unsigned int polling_ms; int (*target)(struct device *dev, unsigned long *freq, u32 flags); int (*get_dev_status)(struct device *dev, struct devfreq_dev_status *stat); int (*get_cur_freq)(struct device *dev, unsigned long *freq); void (*exit)(struct device *dev); unsigned long *freq_table; unsigned int max_state; }; /** * struct devfreq - Device devfreq structure * @node: list node - contains the devices with devfreq that have been * registered. * @lock: a mutex to protect accessing devfreq. * @dev: device registered by devfreq class. dev.parent is the device * using devfreq. * @profile: device-specific devfreq profile * @governor: method how to choose frequency based on the usage. * @governor_name: devfreq governor name for use with this devfreq * @nb: notifier block used to notify devfreq object that it should * reevaluate operable frequencies. Devfreq users may use * devfreq.nb to the corresponding register notifier call chain. * @work: delayed work for load monitoring. * @previous_freq: previously configured frequency value. * @data: devfreq driver pass to governors, governor should not change it. * @governor_data: private data for governors, devfreq core doesn't touch it. * @min_freq: Limit minimum frequency requested by user (0: none) * @max_freq: Limit maximum frequency requested by user (0: none) * @scaling_min_freq: Limit minimum frequency requested by OPP interface * @scaling_max_freq: Limit maximum frequency requested by OPP interface * @stop_polling: devfreq polling status of a device. * @suspend_freq: frequency of a device set during suspend phase. * @resume_freq: frequency of a device set in resume phase. * @suspend_count: suspend requests counter for a device. * @total_trans: Number of devfreq transitions * @trans_table: Statistics of devfreq transitions * @time_in_state: Statistics of devfreq states * @last_stat_updated: The last time stat updated * @transition_notifier_list: list head of DEVFREQ_TRANSITION_NOTIFIER notifier * * This structure stores the devfreq information for a give device. * * Note that when a governor accesses entries in struct devfreq in its * functions except for the context of callbacks defined in struct * devfreq_governor, the governor should protect its access with the * struct mutex lock in struct devfreq. A governor may use this mutex * to protect its own private data in void *data as well. */ struct devfreq { struct list_head node; struct mutex lock; struct device dev; struct devfreq_dev_profile *profile; const struct devfreq_governor *governor; char governor_name[DEVFREQ_NAME_LEN]; struct notifier_block nb; struct delayed_work work; unsigned long previous_freq; struct devfreq_dev_status last_status; void *data; void *governor_data; unsigned long min_freq; unsigned long max_freq; unsigned long scaling_min_freq; unsigned long scaling_max_freq; bool stop_polling; unsigned long suspend_freq; unsigned long resume_freq; atomic_t suspend_count; /* information for device frequency transition */ unsigned int total_trans; unsigned int *trans_table; unsigned long *time_in_state; unsigned long last_stat_updated; struct srcu_notifier_head transition_notifier_list; }; struct devfreq_freqs { unsigned long old; unsigned long new; }; #if defined(CONFIG_PM_DEVFREQ) extern struct devfreq *devfreq_add_device(struct device *dev, struct devfreq_dev_profile *profile, const char *governor_name, void *data); extern int devfreq_remove_device(struct devfreq *devfreq); extern struct devfreq *devm_devfreq_add_device(struct device *dev, struct devfreq_dev_profile *profile, const char *governor_name, void *data); extern void devm_devfreq_remove_device(struct device *dev, struct devfreq *devfreq); /* Supposed to be called by PM callbacks */ extern int devfreq_suspend_device(struct devfreq *devfreq); extern int devfreq_resume_device(struct devfreq *devfreq); extern void devfreq_suspend(void); extern void devfreq_resume(void); /** * update_devfreq() - Reevaluate the device and configure frequency * @devfreq: the devfreq device * * Note: devfreq->lock must be held */ extern int update_devfreq(struct devfreq *devfreq); /* Helper functions for devfreq user device driver with OPP. */ extern struct dev_pm_opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq, u32 flags); extern int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq); extern int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq); extern int devm_devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq); extern void devm_devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq); extern int devfreq_register_notifier(struct devfreq *devfreq, struct notifier_block *nb, unsigned int list); extern int devfreq_unregister_notifier(struct devfreq *devfreq, struct notifier_block *nb, unsigned int list); extern int devm_devfreq_register_notifier(struct device *dev, struct devfreq *devfreq, struct notifier_block *nb, unsigned int list); extern void devm_devfreq_unregister_notifier(struct device *dev, struct devfreq *devfreq, struct notifier_block *nb, unsigned int list); extern struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index); #if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) /** * struct devfreq_simple_ondemand_data - void *data fed to struct devfreq * and devfreq_add_device * @upthreshold: If the load is over this value, the frequency jumps. * Specify 0 to use the default. Valid value = 0 to 100. * @downdifferential: If the load is under upthreshold - downdifferential, * the governor may consider slowing the frequency down. * Specify 0 to use the default. Valid value = 0 to 100. * downdifferential < upthreshold must hold. * * If the fed devfreq_simple_ondemand_data pointer is NULL to the governor, * the governor uses the default values. */ struct devfreq_simple_ondemand_data { unsigned int upthreshold; unsigned int downdifferential; }; #endif #if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE) /** * struct devfreq_passive_data - void *data fed to struct devfreq * and devfreq_add_device * @parent: the devfreq instance of parent device. * @get_target_freq: Optional callback, Returns desired operating frequency * for the device using passive governor. That is called * when passive governor should decide the next frequency * by using the new frequency of parent devfreq device * using governors except for passive governor. * If the devfreq device has the specific method to decide * the next frequency, should use this callback. * @this: the devfreq instance of own device. * @nb: the notifier block for DEVFREQ_TRANSITION_NOTIFIER list * * The devfreq_passive_data have to set the devfreq instance of parent * device with governors except for the passive governor. But, don't need to * initialize the 'this' and 'nb' field because the devfreq core will handle * them. */ struct devfreq_passive_data { /* Should set the devfreq instance of parent device */ struct devfreq *parent; /* Optional callback to decide the next frequency of passvice device */ int (*get_target_freq)(struct devfreq *this, unsigned long *freq); /* For passive governor's internal use. Don't need to set them */ struct devfreq *this; struct notifier_block nb; }; #endif #else /* !CONFIG_PM_DEVFREQ */ static inline struct devfreq *devfreq_add_device(struct device *dev, struct devfreq_dev_profile *profile, const char *governor_name, void *data) { return ERR_PTR(-ENOSYS); } static inline int devfreq_remove_device(struct devfreq *devfreq) { return 0; } static inline struct devfreq *devm_devfreq_add_device(struct device *dev, struct devfreq_dev_profile *profile, const char *governor_name, void *data) { return ERR_PTR(-ENOSYS); } static inline void devm_devfreq_remove_device(struct device *dev, struct devfreq *devfreq) { } static inline int devfreq_suspend_device(struct devfreq *devfreq) { return 0; } static inline int devfreq_resume_device(struct devfreq *devfreq) { return 0; } static inline void devfreq_suspend(void) {} static inline void devfreq_resume(void) {} static inline struct dev_pm_opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq, u32 flags) { return ERR_PTR(-EINVAL); } static inline int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq) { return -EINVAL; } static inline int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq) { return -EINVAL; } static inline int devm_devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq) { return -EINVAL; } static inline void devm_devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq) { } static inline int devfreq_register_notifier(struct devfreq *devfreq, struct notifier_block *nb, unsigned int list) { return 0; } static inline int devfreq_unregister_notifier(struct devfreq *devfreq, struct notifier_block *nb, unsigned int list) { return 0; } static inline int devm_devfreq_register_notifier(struct device *dev, struct devfreq *devfreq, struct notifier_block *nb, unsigned int list) { return 0; } static inline void devm_devfreq_unregister_notifier(struct device *dev, struct devfreq *devfreq, struct notifier_block *nb, unsigned int list) { } static inline struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index) { return ERR_PTR(-ENODEV); } static inline int devfreq_update_stats(struct devfreq *df) { return -EINVAL; } #endif /* CONFIG_PM_DEVFREQ */ #endif /* __LINUX_DEVFREQ_H__ */ rtc.h 0000644 00000022005 14722070374 0005512 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Generic RTC interface. * This version contains the part of the user interface to the Real Time Clock * service. It is used with both the legacy mc146818 and also EFI * Struct rtc_time and first 12 ioctl by Paul Gortmaker, 1996 - separated out * from <linux/mc146818rtc.h> to this file for 2.4 kernels. * * Copyright (C) 1999 Hewlett-Packard Co. * Copyright (C) 1999 Stephane Eranian <eranian@hpl.hp.com> */ #ifndef _LINUX_RTC_H_ #define _LINUX_RTC_H_ #include <linux/types.h> #include <linux/interrupt.h> #include <linux/nvmem-provider.h> #include <uapi/linux/rtc.h> extern int rtc_month_days(unsigned int month, unsigned int year); extern int rtc_year_days(unsigned int day, unsigned int month, unsigned int year); extern int rtc_valid_tm(struct rtc_time *tm); extern time64_t rtc_tm_to_time64(struct rtc_time *tm); extern void rtc_time64_to_tm(time64_t time, struct rtc_time *tm); ktime_t rtc_tm_to_ktime(struct rtc_time tm); struct rtc_time rtc_ktime_to_tm(ktime_t kt); /* * rtc_tm_sub - Return the difference in seconds. */ static inline time64_t rtc_tm_sub(struct rtc_time *lhs, struct rtc_time *rhs) { return rtc_tm_to_time64(lhs) - rtc_tm_to_time64(rhs); } static inline void rtc_time_to_tm(unsigned long time, struct rtc_time *tm) { rtc_time64_to_tm(time, tm); } static inline int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time) { *time = rtc_tm_to_time64(tm); return 0; } #include <linux/device.h> #include <linux/seq_file.h> #include <linux/cdev.h> #include <linux/poll.h> #include <linux/mutex.h> #include <linux/timerqueue.h> #include <linux/workqueue.h> extern struct class *rtc_class; /* * For these RTC methods the device parameter is the physical device * on whatever bus holds the hardware (I2C, Platform, SPI, etc), which * was passed to rtc_device_register(). Its driver_data normally holds * device state, including the rtc_device pointer for the RTC. * * Most of these methods are called with rtc_device.ops_lock held, * through the rtc_*(struct rtc_device *, ...) calls. * * The (current) exceptions are mostly filesystem hooks: * - the proc() hook for procfs * - non-ioctl() chardev hooks: open(), release() * * REVISIT those periodic irq calls *do* have ops_lock when they're * issued through ioctl() ... */ struct rtc_class_ops { int (*ioctl)(struct device *, unsigned int, unsigned long); int (*read_time)(struct device *, struct rtc_time *); int (*set_time)(struct device *, struct rtc_time *); int (*read_alarm)(struct device *, struct rtc_wkalrm *); int (*set_alarm)(struct device *, struct rtc_wkalrm *); int (*proc)(struct device *, struct seq_file *); int (*alarm_irq_enable)(struct device *, unsigned int enabled); int (*read_offset)(struct device *, long *offset); int (*set_offset)(struct device *, long offset); }; struct rtc_device; struct rtc_timer { struct timerqueue_node node; ktime_t period; void (*func)(struct rtc_device *rtc); struct rtc_device *rtc; int enabled; }; /* flags */ #define RTC_DEV_BUSY 0 struct rtc_device { struct device dev; struct module *owner; int id; const struct rtc_class_ops *ops; struct mutex ops_lock; struct cdev char_dev; unsigned long flags; unsigned long irq_data; spinlock_t irq_lock; wait_queue_head_t irq_queue; struct fasync_struct *async_queue; int irq_freq; int max_user_freq; struct timerqueue_head timerqueue; struct rtc_timer aie_timer; struct rtc_timer uie_rtctimer; struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */ int pie_enabled; struct work_struct irqwork; /* Some hardware can't support UIE mode */ int uie_unsupported; /* Number of nsec it takes to set the RTC clock. This influences when * the set ops are called. An offset: * - of 0.5 s will call RTC set for wall clock time 10.0 s at 9.5 s * - of 1.5 s will call RTC set for wall clock time 10.0 s at 8.5 s * - of -0.5 s will call RTC set for wall clock time 10.0 s at 10.5 s */ long set_offset_nsec; bool registered; /* Old ABI support */ bool nvram_old_abi; struct bin_attribute *nvram; time64_t range_min; timeu64_t range_max; time64_t start_secs; time64_t offset_secs; bool set_start_time; #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL struct work_struct uie_task; struct timer_list uie_timer; /* Those fields are protected by rtc->irq_lock */ unsigned int oldsecs; unsigned int uie_irq_active:1; unsigned int stop_uie_polling:1; unsigned int uie_task_active:1; unsigned int uie_timer_active:1; #endif }; #define to_rtc_device(d) container_of(d, struct rtc_device, dev) /* useful timestamps */ #define RTC_TIMESTAMP_BEGIN_1900 -2208988800LL /* 1900-01-01 00:00:00 */ #define RTC_TIMESTAMP_BEGIN_2000 946684800LL /* 2000-01-01 00:00:00 */ #define RTC_TIMESTAMP_END_2063 2966371199LL /* 2063-12-31 23:59:59 */ #define RTC_TIMESTAMP_END_2099 4102444799LL /* 2099-12-31 23:59:59 */ #define RTC_TIMESTAMP_END_9999 253402300799LL /* 9999-12-31 23:59:59 */ extern struct rtc_device *devm_rtc_device_register(struct device *dev, const char *name, const struct rtc_class_ops *ops, struct module *owner); struct rtc_device *devm_rtc_allocate_device(struct device *dev); int __rtc_register_device(struct module *owner, struct rtc_device *rtc); extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm); extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm); extern int rtc_set_ntp_time(struct timespec64 now, unsigned long *target_nsec); int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm); extern int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alrm); extern int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alrm); extern int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alrm); extern void rtc_update_irq(struct rtc_device *rtc, unsigned long num, unsigned long events); extern struct rtc_device *rtc_class_open(const char *name); extern void rtc_class_close(struct rtc_device *rtc); extern int rtc_irq_set_state(struct rtc_device *rtc, int enabled); extern int rtc_irq_set_freq(struct rtc_device *rtc, int freq); extern int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled); extern int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled); extern int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, unsigned int enabled); void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode); void rtc_aie_update_irq(struct rtc_device *rtc); void rtc_uie_update_irq(struct rtc_device *rtc); enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer); void rtc_timer_init(struct rtc_timer *timer, void (*f)(struct rtc_device *r), struct rtc_device *rtc); int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer *timer, ktime_t expires, ktime_t period); void rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer *timer); int rtc_read_offset(struct rtc_device *rtc, long *offset); int rtc_set_offset(struct rtc_device *rtc, long offset); void rtc_timer_do_work(struct work_struct *work); static inline bool is_leap_year(unsigned int year) { return (!(year % 4) && (year % 100)) || !(year % 400); } /* Determine if we can call to driver to set the time. Drivers can only be * called to set a second aligned time value, and the field set_offset_nsec * specifies how far away from the second aligned time to call the driver. * * This also computes 'to_set' which is the time we are trying to set, and has * a zero in tv_nsecs, such that: * to_set - set_delay_nsec == now +/- FUZZ * */ static inline bool rtc_tv_nsec_ok(s64 set_offset_nsec, struct timespec64 *to_set, const struct timespec64 *now) { /* Allowed error in tv_nsec, arbitarily set to 5 jiffies in ns. */ const unsigned long TIME_SET_NSEC_FUZZ = TICK_NSEC * 5; struct timespec64 delay = {.tv_sec = 0, .tv_nsec = set_offset_nsec}; *to_set = timespec64_add(*now, delay); if (to_set->tv_nsec < TIME_SET_NSEC_FUZZ) { to_set->tv_nsec = 0; return true; } if (to_set->tv_nsec > NSEC_PER_SEC - TIME_SET_NSEC_FUZZ) { to_set->tv_sec++; to_set->tv_nsec = 0; return true; } return false; } #define rtc_register_device(device) \ __rtc_register_device(THIS_MODULE, device) #ifdef CONFIG_RTC_HCTOSYS_DEVICE extern int rtc_hctosys_ret; #else #define rtc_hctosys_ret -ENODEV #endif #ifdef CONFIG_RTC_NVMEM int rtc_nvmem_register(struct rtc_device *rtc, struct nvmem_config *nvmem_config); void rtc_nvmem_unregister(struct rtc_device *rtc); #else static inline int rtc_nvmem_register(struct rtc_device *rtc, struct nvmem_config *nvmem_config) { return 0; } static inline void rtc_nvmem_unregister(struct rtc_device *rtc) {} #endif #ifdef CONFIG_RTC_INTF_SYSFS int rtc_add_group(struct rtc_device *rtc, const struct attribute_group *grp); int rtc_add_groups(struct rtc_device *rtc, const struct attribute_group **grps); #else static inline int rtc_add_group(struct rtc_device *rtc, const struct attribute_group *grp) { return 0; } static inline int rtc_add_groups(struct rtc_device *rtc, const struct attribute_group **grps) { return 0; } #endif #endif /* _LINUX_RTC_H_ */ fs_parser.h 0000644 00000011153 14722070374 0006710 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* Filesystem parameter description and parser * * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #ifndef _LINUX_FS_PARSER_H #define _LINUX_FS_PARSER_H #include <linux/fs_context.h> struct path; struct constant_table { const char *name; int value; }; /* * The type of parameter expected. */ enum fs_parameter_type { __fs_param_wasnt_defined, fs_param_is_flag, fs_param_is_bool, fs_param_is_u32, fs_param_is_u32_octal, fs_param_is_u32_hex, fs_param_is_s32, fs_param_is_u64, fs_param_is_enum, fs_param_is_string, fs_param_is_blob, fs_param_is_blockdev, fs_param_is_path, fs_param_is_fd, nr__fs_parameter_type, }; /* * Specification of the type of value a parameter wants. * * Note that the fsparam_flag(), fsparam_string(), fsparam_u32(), ... macros * should be used to generate elements of this type. */ struct fs_parameter_spec { const char *name; u8 opt; /* Option number (returned by fs_parse()) */ enum fs_parameter_type type:8; /* The desired parameter type */ unsigned short flags; #define fs_param_v_optional 0x0001 /* The value is optional */ #define fs_param_neg_with_no 0x0002 /* "noxxx" is negative param */ #define fs_param_neg_with_empty 0x0004 /* "xxx=" is negative param */ #define fs_param_deprecated 0x0008 /* The param is deprecated */ }; struct fs_parameter_enum { u8 opt; /* Option number (as fs_parameter_spec::opt) */ char name[14]; u8 value; }; struct fs_parameter_description { const char name[16]; /* Name for logging purposes */ const struct fs_parameter_spec *specs; /* List of param specifications */ const struct fs_parameter_enum *enums; /* Enum values */ }; /* * Result of parse. */ struct fs_parse_result { bool negated; /* T if param was "noxxx" */ bool has_value; /* T if value supplied to param */ union { bool boolean; /* For spec_bool */ int int_32; /* For spec_s32/spec_enum */ unsigned int uint_32; /* For spec_u32{,_octal,_hex}/spec_enum */ u64 uint_64; /* For spec_u64 */ }; }; extern int fs_parse(struct fs_context *fc, const struct fs_parameter_description *desc, struct fs_parameter *value, struct fs_parse_result *result); extern int fs_lookup_param(struct fs_context *fc, struct fs_parameter *param, bool want_bdev, struct path *_path); extern int __lookup_constant(const struct constant_table tbl[], size_t tbl_size, const char *name, int not_found); #define lookup_constant(t, n, nf) __lookup_constant(t, ARRAY_SIZE(t), (n), (nf)) #ifdef CONFIG_VALIDATE_FS_PARSER extern bool validate_constant_table(const struct constant_table *tbl, size_t tbl_size, int low, int high, int special); extern bool fs_validate_description(const struct fs_parameter_description *desc); #else static inline bool validate_constant_table(const struct constant_table *tbl, size_t tbl_size, int low, int high, int special) { return true; } static inline bool fs_validate_description(const struct fs_parameter_description *desc) { return true; } #endif /* * Parameter type, name, index and flags element constructors. Use as: * * fsparam_xxxx("foo", Opt_foo) * * If existing helpers are not enough, direct use of __fsparam() would * work, but any such case is probably a sign that new helper is needed. * Helpers will remain stable; low-level implementation may change. */ #define __fsparam(TYPE, NAME, OPT, FLAGS) \ { \ .name = NAME, \ .opt = OPT, \ .type = TYPE, \ .flags = FLAGS \ } #define fsparam_flag(NAME, OPT) __fsparam(fs_param_is_flag, NAME, OPT, 0) #define fsparam_flag_no(NAME, OPT) \ __fsparam(fs_param_is_flag, NAME, OPT, \ fs_param_neg_with_no) #define fsparam_bool(NAME, OPT) __fsparam(fs_param_is_bool, NAME, OPT, 0) #define fsparam_u32(NAME, OPT) __fsparam(fs_param_is_u32, NAME, OPT, 0) #define fsparam_u32oct(NAME, OPT) \ __fsparam(fs_param_is_u32_octal, NAME, OPT, 0) #define fsparam_u32hex(NAME, OPT) \ __fsparam(fs_param_is_u32_hex, NAME, OPT, 0) #define fsparam_s32(NAME, OPT) __fsparam(fs_param_is_s32, NAME, OPT, 0) #define fsparam_u64(NAME, OPT) __fsparam(fs_param_is_u64, NAME, OPT, 0) #define fsparam_enum(NAME, OPT) __fsparam(fs_param_is_enum, NAME, OPT, 0) #define fsparam_string(NAME, OPT) \ __fsparam(fs_param_is_string, NAME, OPT, 0) #define fsparam_blob(NAME, OPT) __fsparam(fs_param_is_blob, NAME, OPT, 0) #define fsparam_bdev(NAME, OPT) __fsparam(fs_param_is_blockdev, NAME, OPT, 0) #define fsparam_path(NAME, OPT) __fsparam(fs_param_is_path, NAME, OPT, 0) #define fsparam_fd(NAME, OPT) __fsparam(fs_param_is_fd, NAME, OPT, 0) #endif /* _LINUX_FS_PARSER_H */ rtnetlink.h 0000644 00000010722 14722070374 0006737 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_RTNETLINK_H #define __LINUX_RTNETLINK_H #include <linux/mutex.h> #include <linux/netdevice.h> #include <linux/wait.h> #include <linux/refcount.h> #include <uapi/linux/rtnetlink.h> extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo); extern int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid); extern void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group, struct nlmsghdr *nlh, gfp_t flags); extern void rtnl_set_sk_err(struct net *net, u32 group, int error); extern int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics); extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id, long expires, u32 error); void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags); void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change, gfp_t flags, int *new_nsid, int new_ifindex); struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, unsigned change, u32 event, gfp_t flags, int *new_nsid, int new_ifindex); void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags); /* RTNL is used as a global lock for all changes to network configuration */ extern void rtnl_lock(void); extern void rtnl_unlock(void); extern int rtnl_trylock(void); extern int rtnl_is_locked(void); extern int rtnl_lock_killable(void); extern bool refcount_dec_and_rtnl_lock(refcount_t *r); extern wait_queue_head_t netdev_unregistering_wq; extern struct rw_semaphore pernet_ops_rwsem; extern struct rw_semaphore net_rwsem; #ifdef CONFIG_PROVE_LOCKING extern bool lockdep_rtnl_is_held(void); #else static inline bool lockdep_rtnl_is_held(void) { return true; } #endif /* #ifdef CONFIG_PROVE_LOCKING */ /** * rcu_dereference_rtnl - rcu_dereference with debug checking * @p: The pointer to read, prior to dereferencing * * Do an rcu_dereference(p), but check caller either holds rcu_read_lock() * or RTNL. Note : Please prefer rtnl_dereference() or rcu_dereference() */ #define rcu_dereference_rtnl(p) \ rcu_dereference_check(p, lockdep_rtnl_is_held()) /** * rcu_dereference_bh_rtnl - rcu_dereference_bh with debug checking * @p: The pointer to read, prior to dereference * * Do an rcu_dereference_bh(p), but check caller either holds rcu_read_lock_bh() * or RTNL. Note : Please prefer rtnl_dereference() or rcu_dereference_bh() */ #define rcu_dereference_bh_rtnl(p) \ rcu_dereference_bh_check(p, lockdep_rtnl_is_held()) /** * rtnl_dereference - fetch RCU pointer when updates are prevented by RTNL * @p: The pointer to read, prior to dereferencing * * Return the value of the specified RCU-protected pointer, but omit * the READ_ONCE(), because caller holds RTNL. */ #define rtnl_dereference(p) \ rcu_dereference_protected(p, lockdep_rtnl_is_held()) static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev) { return rtnl_dereference(dev->ingress_queue); } static inline struct netdev_queue *dev_ingress_queue_rcu(struct net_device *dev) { return rcu_dereference(dev->ingress_queue); } struct netdev_queue *dev_ingress_queue_create(struct net_device *dev); #ifdef CONFIG_NET_INGRESS void net_inc_ingress_queue(void); void net_dec_ingress_queue(void); #endif #ifdef CONFIG_NET_EGRESS void net_inc_egress_queue(void); void net_dec_egress_queue(void); #endif void rtnetlink_init(void); void __rtnl_unlock(void); void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail); #define ASSERT_RTNL() \ WARN_ONCE(!rtnl_is_locked(), \ "RTNL: assertion failed at %s (%d)\n", __FILE__, __LINE__) extern int ndo_dflt_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, struct net_device *dev, struct net_device *filter_dev, int *idx); extern int ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid, u16 flags); extern int ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid); extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, u16 mode, u32 flags, u32 mask, int nlflags, u32 filter_mask, int (*vlan_fill)(struct sk_buff *skb, struct net_device *dev, u32 filter_mask)); #endif /* __LINUX_RTNETLINK_H */ clock_cooling.h 0000644 00000003230 14722070374 0007526 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/include/linux/clock_cooling.h * * Copyright (C) 2014 Eduardo Valentin <edubezval@gmail.com> * * Copyright (C) 2013 Texas Instruments Inc. * Contact: Eduardo Valentin <eduardo.valentin@ti.com> * * Highly based on cpu_cooling.c. * Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com) * Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org> */ #ifndef __CPU_COOLING_H__ #define __CPU_COOLING_H__ #include <linux/of.h> #include <linux/thermal.h> #include <linux/cpumask.h> #ifdef CONFIG_CLOCK_THERMAL /** * clock_cooling_register - function to create clock cooling device. * @dev: struct device pointer to the device used as clock cooling device. * @clock_name: string containing the clock used as cooling mechanism. */ struct thermal_cooling_device * clock_cooling_register(struct device *dev, const char *clock_name); /** * clock_cooling_unregister - function to remove clock cooling device. * @cdev: thermal cooling device pointer. */ void clock_cooling_unregister(struct thermal_cooling_device *cdev); unsigned long clock_cooling_get_level(struct thermal_cooling_device *cdev, unsigned long freq); #else /* !CONFIG_CLOCK_THERMAL */ static inline struct thermal_cooling_device * clock_cooling_register(struct device *dev, const char *clock_name) { return NULL; } static inline void clock_cooling_unregister(struct thermal_cooling_device *cdev) { } static inline unsigned long clock_cooling_get_level(struct thermal_cooling_device *cdev, unsigned long freq) { return THERMAL_CSTATE_INVALID; } #endif /* CONFIG_CLOCK_THERMAL */ #endif /* __CPU_COOLING_H__ */ vringh.h 0000644 00000016161 14722070374 0006225 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Linux host-side vring helpers; for when the kernel needs to access * someone else's vring. * * Copyright IBM Corporation, 2013. * Parts taken from drivers/vhost/vhost.c Copyright 2009 Red Hat, Inc. * * Written by: Rusty Russell <rusty@rustcorp.com.au> */ #ifndef _LINUX_VRINGH_H #define _LINUX_VRINGH_H #include <uapi/linux/virtio_ring.h> #include <linux/virtio_byteorder.h> #include <linux/uio.h> #include <linux/slab.h> #include <asm/barrier.h> /* virtio_ring with information needed for host access. */ struct vringh { /* Everything is little endian */ bool little_endian; /* Guest publishes used event idx (note: we always do). */ bool event_indices; /* Can we get away with weak barriers? */ bool weak_barriers; /* Last available index we saw (ie. where we're up to). */ u16 last_avail_idx; /* Last index we used. */ u16 last_used_idx; /* How many descriptors we've completed since last need_notify(). */ u32 completed; /* The vring (note: it may contain user pointers!) */ struct vring vring; /* The function to call to notify the guest about added buffers */ void (*notify)(struct vringh *); }; /** * struct vringh_config_ops - ops for creating a host vring from a virtio driver * @find_vrhs: find the host vrings and instantiate them * vdev: the virtio_device * nhvrs: the number of host vrings to find * hvrs: on success, includes new host vrings * callbacks: array of driver callbacks, for each host vring * include a NULL entry for vqs that do not need a callback * Returns 0 on success or error status * @del_vrhs: free the host vrings found by find_vrhs(). */ struct virtio_device; typedef void vrh_callback_t(struct virtio_device *, struct vringh *); struct vringh_config_ops { int (*find_vrhs)(struct virtio_device *vdev, unsigned nhvrs, struct vringh *vrhs[], vrh_callback_t *callbacks[]); void (*del_vrhs)(struct virtio_device *vdev); }; /* The memory the vring can access, and what offset to apply. */ struct vringh_range { u64 start, end_incl; u64 offset; }; /** * struct vringh_iov - iovec mangler. * * Mangles iovec in place, and restores it. * Remaining data is iov + i, of used - i elements. */ struct vringh_iov { struct iovec *iov; size_t consumed; /* Within iov[i] */ unsigned i, used, max_num; }; /** * struct vringh_iov - kvec mangler. * * Mangles kvec in place, and restores it. * Remaining data is iov + i, of used - i elements. */ struct vringh_kiov { struct kvec *iov; size_t consumed; /* Within iov[i] */ unsigned i, used, max_num; }; /* Flag on max_num to indicate we're kmalloced. */ #define VRINGH_IOV_ALLOCATED 0x8000000 /* Helpers for userspace vrings. */ int vringh_init_user(struct vringh *vrh, u64 features, unsigned int num, bool weak_barriers, struct vring_desc __user *desc, struct vring_avail __user *avail, struct vring_used __user *used); static inline void vringh_iov_init(struct vringh_iov *iov, struct iovec *iovec, unsigned num) { iov->used = iov->i = 0; iov->consumed = 0; iov->max_num = num; iov->iov = iovec; } static inline void vringh_iov_reset(struct vringh_iov *iov) { iov->iov[iov->i].iov_len += iov->consumed; iov->iov[iov->i].iov_base -= iov->consumed; iov->consumed = 0; iov->i = 0; } static inline void vringh_iov_cleanup(struct vringh_iov *iov) { if (iov->max_num & VRINGH_IOV_ALLOCATED) kfree(iov->iov); iov->max_num = iov->used = iov->i = iov->consumed = 0; iov->iov = NULL; } /* Convert a descriptor into iovecs. */ int vringh_getdesc_user(struct vringh *vrh, struct vringh_iov *riov, struct vringh_iov *wiov, bool (*getrange)(struct vringh *vrh, u64 addr, struct vringh_range *r), u16 *head); /* Copy bytes from readable vsg, consuming it (and incrementing wiov->i). */ ssize_t vringh_iov_pull_user(struct vringh_iov *riov, void *dst, size_t len); /* Copy bytes into writable vsg, consuming it (and incrementing wiov->i). */ ssize_t vringh_iov_push_user(struct vringh_iov *wiov, const void *src, size_t len); /* Mark a descriptor as used. */ int vringh_complete_user(struct vringh *vrh, u16 head, u32 len); int vringh_complete_multi_user(struct vringh *vrh, const struct vring_used_elem used[], unsigned num_used); /* Pretend we've never seen descriptor (for easy error handling). */ void vringh_abandon_user(struct vringh *vrh, unsigned int num); /* Do we need to fire the eventfd to notify the other side? */ int vringh_need_notify_user(struct vringh *vrh); bool vringh_notify_enable_user(struct vringh *vrh); void vringh_notify_disable_user(struct vringh *vrh); /* Helpers for kernelspace vrings. */ int vringh_init_kern(struct vringh *vrh, u64 features, unsigned int num, bool weak_barriers, struct vring_desc *desc, struct vring_avail *avail, struct vring_used *used); static inline void vringh_kiov_init(struct vringh_kiov *kiov, struct kvec *kvec, unsigned num) { kiov->used = kiov->i = 0; kiov->consumed = 0; kiov->max_num = num; kiov->iov = kvec; } static inline void vringh_kiov_reset(struct vringh_kiov *kiov) { kiov->iov[kiov->i].iov_len += kiov->consumed; kiov->iov[kiov->i].iov_base -= kiov->consumed; kiov->consumed = 0; kiov->i = 0; } static inline void vringh_kiov_cleanup(struct vringh_kiov *kiov) { if (kiov->max_num & VRINGH_IOV_ALLOCATED) kfree(kiov->iov); kiov->max_num = kiov->used = kiov->i = kiov->consumed = 0; kiov->iov = NULL; } int vringh_getdesc_kern(struct vringh *vrh, struct vringh_kiov *riov, struct vringh_kiov *wiov, u16 *head, gfp_t gfp); ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len); ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov, const void *src, size_t len); void vringh_abandon_kern(struct vringh *vrh, unsigned int num); int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len); bool vringh_notify_enable_kern(struct vringh *vrh); void vringh_notify_disable_kern(struct vringh *vrh); int vringh_need_notify_kern(struct vringh *vrh); /* Notify the guest about buffers added to the used ring */ static inline void vringh_notify(struct vringh *vrh) { if (vrh->notify) vrh->notify(vrh); } static inline bool vringh_is_little_endian(const struct vringh *vrh) { return vrh->little_endian || virtio_legacy_is_little_endian(); } static inline u16 vringh16_to_cpu(const struct vringh *vrh, __virtio16 val) { return __virtio16_to_cpu(vringh_is_little_endian(vrh), val); } static inline __virtio16 cpu_to_vringh16(const struct vringh *vrh, u16 val) { return __cpu_to_virtio16(vringh_is_little_endian(vrh), val); } static inline u32 vringh32_to_cpu(const struct vringh *vrh, __virtio32 val) { return __virtio32_to_cpu(vringh_is_little_endian(vrh), val); } static inline __virtio32 cpu_to_vringh32(const struct vringh *vrh, u32 val) { return __cpu_to_virtio32(vringh_is_little_endian(vrh), val); } static inline u64 vringh64_to_cpu(const struct vringh *vrh, __virtio64 val) { return __virtio64_to_cpu(vringh_is_little_endian(vrh), val); } static inline __virtio64 cpu_to_vringh64(const struct vringh *vrh, u64 val) { return __cpu_to_virtio64(vringh_is_little_endian(vrh), val); } #endif /* _LINUX_VRINGH_H */ crc-itu-t.h 0000644 00000001023 14722070374 0006526 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * crc-itu-t.h - CRC ITU-T V.41 routine * * Implements the standard CRC ITU-T V.41: * Width 16 * Poly 0x1021 (x^16 + x^12 + x^15 + 1) * Init 0 */ #ifndef CRC_ITU_T_H #define CRC_ITU_T_H #include <linux/types.h> extern u16 const crc_itu_t_table[256]; extern u16 crc_itu_t(u16 crc, const u8 *buffer, size_t len); static inline u16 crc_itu_t_byte(u16 crc, const u8 data) { return (crc << 8) ^ crc_itu_t_table[((crc >> 8) ^ data) & 0xff]; } #endif /* CRC_ITU_T_H */ hdlc.h 0000644 00000006300 14722070374 0005634 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Generic HDLC support routines for Linux * * Copyright (C) 1999-2005 Krzysztof Halasa <khc@pm.waw.pl> */ #ifndef __HDLC_H #define __HDLC_H #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/hdlc/ioctl.h> #include <uapi/linux/hdlc.h> /* This structure is a private property of HDLC protocols. Hardware drivers have no interest here */ struct hdlc_proto { int (*open)(struct net_device *dev); void (*close)(struct net_device *dev); void (*start)(struct net_device *dev); /* if open & DCD */ void (*stop)(struct net_device *dev); /* if open & !DCD */ void (*detach)(struct net_device *dev); int (*ioctl)(struct net_device *dev, struct ifreq *ifr); __be16 (*type_trans)(struct sk_buff *skb, struct net_device *dev); int (*netif_rx)(struct sk_buff *skb); netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *dev); struct module *module; struct hdlc_proto *next; /* next protocol in the list */ }; /* Pointed to by netdev_priv(dev) */ typedef struct hdlc_device { /* used by HDLC layer to take control over HDLC device from hw driver*/ int (*attach)(struct net_device *dev, unsigned short encoding, unsigned short parity); /* hardware driver must handle this instead of dev->hard_start_xmit */ netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *dev); /* Things below are for HDLC layer internal use only */ const struct hdlc_proto *proto; int carrier; int open; spinlock_t state_lock; void *state; void *priv; } hdlc_device; /* Exported from hdlc module */ /* Called by hardware driver when a user requests HDLC service */ int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); /* Must be used by hardware driver on module startup/exit */ #define register_hdlc_device(dev) register_netdev(dev) void unregister_hdlc_device(struct net_device *dev); void register_hdlc_protocol(struct hdlc_proto *proto); void unregister_hdlc_protocol(struct hdlc_proto *proto); struct net_device *alloc_hdlcdev(void *priv); static inline struct hdlc_device* dev_to_hdlc(struct net_device *dev) { return netdev_priv(dev); } static __inline__ void debug_frame(const struct sk_buff *skb) { int i; for (i=0; i < skb->len; i++) { if (i == 100) { printk("...\n"); return; } printk(" %02X", skb->data[i]); } printk("\n"); } /* Must be called by hardware driver when HDLC device is being opened */ int hdlc_open(struct net_device *dev); /* Must be called by hardware driver when HDLC device is being closed */ void hdlc_close(struct net_device *dev); /* Must be pointed to by hw driver's dev->netdev_ops->ndo_start_xmit */ netdev_tx_t hdlc_start_xmit(struct sk_buff *skb, struct net_device *dev); int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto, size_t size); /* May be used by hardware driver to gain control over HDLC device */ int detach_hdlc_protocol(struct net_device *dev); static __inline__ __be16 hdlc_type_trans(struct sk_buff *skb, struct net_device *dev) { hdlc_device *hdlc = dev_to_hdlc(dev); skb->dev = dev; skb_reset_mac_header(skb); if (hdlc->proto->type_trans) return hdlc->proto->type_trans(skb, dev); else return htons(ETH_P_HDLC); } #endif /* __HDLC_H */ pm_opp.h 0000644 00000026645 14722070374 0006232 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Generic OPP Interface * * Copyright (C) 2009-2010 Texas Instruments Incorporated. * Nishanth Menon * Romit Dasgupta * Kevin Hilman */ #ifndef __LINUX_OPP_H__ #define __LINUX_OPP_H__ #include <linux/err.h> #include <linux/notifier.h> struct clk; struct regulator; struct dev_pm_opp; struct device; struct opp_table; enum dev_pm_opp_event { OPP_EVENT_ADD, OPP_EVENT_REMOVE, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE, }; /** * struct dev_pm_opp_supply - Power supply voltage/current values * @u_volt: Target voltage in microvolts corresponding to this OPP * @u_volt_min: Minimum voltage in microvolts corresponding to this OPP * @u_volt_max: Maximum voltage in microvolts corresponding to this OPP * @u_amp: Maximum current drawn by the device in microamperes * * This structure stores the voltage/current values for a single power supply. */ struct dev_pm_opp_supply { unsigned long u_volt; unsigned long u_volt_min; unsigned long u_volt_max; unsigned long u_amp; }; /** * struct dev_pm_opp_info - OPP freq/voltage/current values * @rate: Target clk rate in hz * @supplies: Array of voltage/current values for all power supplies * * This structure stores the freq/voltage/current values for a single OPP. */ struct dev_pm_opp_info { unsigned long rate; struct dev_pm_opp_supply *supplies; }; /** * struct dev_pm_set_opp_data - Set OPP data * @old_opp: Old OPP info * @new_opp: New OPP info * @regulators: Array of regulator pointers * @regulator_count: Number of regulators * @clk: Pointer to clk * @dev: Pointer to the struct device * * This structure contains all information required for setting an OPP. */ struct dev_pm_set_opp_data { struct dev_pm_opp_info old_opp; struct dev_pm_opp_info new_opp; struct regulator **regulators; unsigned int regulator_count; struct clk *clk; struct device *dev; }; #if defined(CONFIG_PM_OPP) struct opp_table *dev_pm_opp_get_opp_table(struct device *dev); struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *dev, int index); void dev_pm_opp_put_opp_table(struct opp_table *opp_table); unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp); unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp); unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp); bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp); int dev_pm_opp_get_opp_count(struct device *dev); unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev); unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev); unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev); unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev); struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, unsigned long freq, bool available); struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev, unsigned int level); struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, unsigned long *freq); struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev, unsigned long u_volt); struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, unsigned long *freq); void dev_pm_opp_put(struct dev_pm_opp *opp); int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt); void dev_pm_opp_remove(struct device *dev, unsigned long freq); void dev_pm_opp_remove_all_dynamic(struct device *dev); int dev_pm_opp_enable(struct device *dev, unsigned long freq); int dev_pm_opp_disable(struct device *dev, unsigned long freq); int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb); int dev_pm_opp_unregister_notifier(struct device *dev, struct notifier_block *nb); struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, unsigned int count); void dev_pm_opp_put_supported_hw(struct opp_table *opp_table); struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name); void dev_pm_opp_put_prop_name(struct opp_table *opp_table); struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count); void dev_pm_opp_put_regulators(struct opp_table *opp_table); struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char * name); void dev_pm_opp_put_clkname(struct opp_table *opp_table); struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)); void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table); struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs); void dev_pm_opp_detach_genpd(struct opp_table *opp_table); int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate); int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq); int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask); int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); void dev_pm_opp_remove_table(struct device *dev); void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask); #else static inline struct opp_table *dev_pm_opp_get_opp_table(struct device *dev) { return ERR_PTR(-ENOTSUPP); } static inline struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *dev, int index) { return ERR_PTR(-ENOTSUPP); } static inline void dev_pm_opp_put_opp_table(struct opp_table *opp_table) {} static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) { return 0; } static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) { return 0; } static inline unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp) { return 0; } static inline bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp) { return false; } static inline int dev_pm_opp_get_opp_count(struct device *dev) { return 0; } static inline unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev) { return 0; } static inline unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) { return 0; } static inline unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev) { return 0; } static inline unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev) { return 0; } static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, unsigned long freq, bool available) { return ERR_PTR(-ENOTSUPP); } static inline struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev, unsigned int level) { return ERR_PTR(-ENOTSUPP); } static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, unsigned long *freq) { return ERR_PTR(-ENOTSUPP); } static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev, unsigned long u_volt) { return ERR_PTR(-ENOTSUPP); } static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, unsigned long *freq) { return ERR_PTR(-ENOTSUPP); } static inline void dev_pm_opp_put(struct dev_pm_opp *opp) {} static inline int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) { return -ENOTSUPP; } static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq) { } static inline void dev_pm_opp_remove_all_dynamic(struct device *dev) { } static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq) { return 0; } static inline int dev_pm_opp_disable(struct device *dev, unsigned long freq) { return 0; } static inline int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb) { return -ENOTSUPP; } static inline int dev_pm_opp_unregister_notifier(struct device *dev, struct notifier_block *nb) { return -ENOTSUPP; } static inline struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, unsigned int count) { return ERR_PTR(-ENOTSUPP); } static inline void dev_pm_opp_put_supported_hw(struct opp_table *opp_table) {} static inline struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)) { return ERR_PTR(-ENOTSUPP); } static inline void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table) {} static inline struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name) { return ERR_PTR(-ENOTSUPP); } static inline void dev_pm_opp_put_prop_name(struct opp_table *opp_table) {} static inline struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count) { return ERR_PTR(-ENOTSUPP); } static inline void dev_pm_opp_put_regulators(struct opp_table *opp_table) {} static inline struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char * name) { return ERR_PTR(-ENOTSUPP); } static inline void dev_pm_opp_put_clkname(struct opp_table *opp_table) {} static inline struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs) { return ERR_PTR(-ENOTSUPP); } static inline void dev_pm_opp_detach_genpd(struct opp_table *opp_table) {} static inline int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate) { return -ENOTSUPP; } static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) { return -ENOTSUPP; } static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask) { return -ENOTSUPP; } static inline int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) { return -EINVAL; } static inline void dev_pm_opp_remove_table(struct device *dev) { } static inline void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask) { } #endif /* CONFIG_PM_OPP */ #if defined(CONFIG_PM_OPP) && defined(CONFIG_OF) int dev_pm_opp_of_add_table(struct device *dev); int dev_pm_opp_of_add_table_indexed(struct device *dev, int index); void dev_pm_opp_of_remove_table(struct device *dev); int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask); void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask); int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev); struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp); int of_get_required_opp_performance_state(struct device_node *np, int index); void dev_pm_opp_of_register_em(struct cpumask *cpus); #else static inline int dev_pm_opp_of_add_table(struct device *dev) { return -ENOTSUPP; } static inline int dev_pm_opp_of_add_table_indexed(struct device *dev, int index) { return -ENOTSUPP; } static inline void dev_pm_opp_of_remove_table(struct device *dev) { } static inline int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask) { return -ENOTSUPP; } static inline void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask) { } static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) { return -ENOTSUPP; } static inline struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev) { return NULL; } static inline struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp) { return NULL; } static inline void dev_pm_opp_of_register_em(struct cpumask *cpus) { } static inline int of_get_required_opp_performance_state(struct device_node *np, int index) { return -ENOTSUPP; } #endif #endif /* __LINUX_OPP_H__ */ sound.h 0000644 00000001255 14722070374 0006056 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SOUND_H #define _LINUX_SOUND_H #include <uapi/linux/sound.h> /* * Sound core interface functions */ struct device; extern int register_sound_special(const struct file_operations *fops, int unit); extern int register_sound_special_device(const struct file_operations *fops, int unit, struct device *dev); extern int register_sound_mixer(const struct file_operations *fops, int dev); extern int register_sound_dsp(const struct file_operations *fops, int dev); extern void unregister_sound_special(int unit); extern void unregister_sound_mixer(int unit); extern void unregister_sound_dsp(int unit); #endif /* _LINUX_SOUND_H */ hid-sensor-hub.h 0000644 00000021214 14722070374 0007552 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * HID Sensors Driver * Copyright (c) 2012, Intel Corporation. */ #ifndef _HID_SENSORS_HUB_H #define _HID_SENSORS_HUB_H #include <linux/hid.h> #include <linux/hid-sensor-ids.h> #include <linux/iio/iio.h> #include <linux/iio/trigger.h> /** * struct hid_sensor_hub_attribute_info - Attribute info * @usage_id: Parent usage id of a physical device. * @attrib_id: Attribute id for this attribute. * @report_id: Report id in which this information resides. * @index: Field index in the report. * @units: Measurment unit for this attribute. * @unit_expo: Exponent used in the data. * @size: Size in bytes for data size. * @logical_minimum: Logical minimum value for this attribute. * @logical_maximum: Logical maximum value for this attribute. */ struct hid_sensor_hub_attribute_info { u32 usage_id; u32 attrib_id; s32 report_id; s32 index; s32 units; s32 unit_expo; s32 size; s32 logical_minimum; s32 logical_maximum; }; /** * struct sensor_hub_pending - Synchronous read pending information * @status: Pending status true/false. * @ready: Completion synchronization data. * @usage_id: Usage id for physical device, E.g. Gyro usage id. * @attr_usage_id: Usage Id of a field, E.g. X-AXIS for a gyro. * @raw_size: Response size for a read request. * @raw_data: Place holder for received response. */ struct sensor_hub_pending { bool status; struct completion ready; u32 usage_id; u32 attr_usage_id; int raw_size; u8 *raw_data; }; /** * struct hid_sensor_hub_device - Stores the hub instance data * @hdev: Stores the hid instance. * @vendor_id: Vendor id of hub device. * @product_id: Product id of hub device. * @usage: Usage id for this hub device instance. * @start_collection_index: Starting index for a phy type collection * @end_collection_index: Last index for a phy type collection * @mutex_ptr: synchronizing mutex pointer. * @pending: Holds information of pending sync read request. */ struct hid_sensor_hub_device { struct hid_device *hdev; u32 vendor_id; u32 product_id; u32 usage; int start_collection_index; int end_collection_index; struct mutex *mutex_ptr; struct sensor_hub_pending pending; }; /** * struct hid_sensor_hub_callbacks - Client callback functions * @pdev: Platform device instance of the client driver. * @suspend: Suspend callback. * @resume: Resume callback. * @capture_sample: Callback to get a sample. * @send_event: Send notification to indicate all samples are * captured, process and send event */ struct hid_sensor_hub_callbacks { struct platform_device *pdev; int (*suspend)(struct hid_sensor_hub_device *hsdev, void *priv); int (*resume)(struct hid_sensor_hub_device *hsdev, void *priv); int (*capture_sample)(struct hid_sensor_hub_device *hsdev, u32 usage_id, size_t raw_len, char *raw_data, void *priv); int (*send_event)(struct hid_sensor_hub_device *hsdev, u32 usage_id, void *priv); }; /** * sensor_hub_device_open() - Open hub device * @hsdev: Hub device instance. * * Used to open hid device for sensor hub. */ int sensor_hub_device_open(struct hid_sensor_hub_device *hsdev); /** * sensor_hub_device_clode() - Close hub device * @hsdev: Hub device instance. * * Used to clode hid device for sensor hub. */ void sensor_hub_device_close(struct hid_sensor_hub_device *hsdev); /* Registration functions */ /** * sensor_hub_register_callback() - Register client callbacks * @hsdev: Hub device instance. * @usage_id: Usage id of the client (E.g. 0x200076 for Gyro). * @usage_callback: Callback function storage * * Used to register callbacks by client processing drivers. Sensor * hub core driver will call these callbacks to offload processing * of data streams and notifications. */ int sensor_hub_register_callback(struct hid_sensor_hub_device *hsdev, u32 usage_id, struct hid_sensor_hub_callbacks *usage_callback); /** * sensor_hub_remove_callback() - Remove client callbacks * @hsdev: Hub device instance. * @usage_id: Usage id of the client (E.g. 0x200076 for Gyro). * * If there is a callback registred, this call will remove that * callbacks, so that it will stop data and event notifications. */ int sensor_hub_remove_callback(struct hid_sensor_hub_device *hsdev, u32 usage_id); /* Hid sensor hub core interfaces */ /** * sensor_hub_input_get_attribute_info() - Get an attribute information * @hsdev: Hub device instance. * @type: Type of this attribute, input/output/feature * @usage_id: Attribute usage id of parent physical device as per spec * @attr_usage_id: Attribute usage id as per spec * @info: return information about attribute after parsing report * * Parses report and returns the attribute information such as report id, * field index, units and exponet etc. */ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev, u8 type, u32 usage_id, u32 attr_usage_id, struct hid_sensor_hub_attribute_info *info); /** * sensor_hub_input_attr_get_raw_value() - Synchronous read request * @hsdev: Hub device instance. * @usage_id: Attribute usage id of parent physical device as per spec * @attr_usage_id: Attribute usage id as per spec * @report_id: Report id to look for * @flag: Synchronous or asynchronous read * @is_signed: If true then fields < 32 bits will be sign-extended * * Issues a synchronous or asynchronous read request for an input attribute. * Returns data upto 32 bits. */ enum sensor_hub_read_flags { SENSOR_HUB_SYNC, SENSOR_HUB_ASYNC, }; int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev, u32 usage_id, u32 attr_usage_id, u32 report_id, enum sensor_hub_read_flags flag, bool is_signed ); /** * sensor_hub_set_feature() - Feature set request * @hsdev: Hub device instance. * @report_id: Report id to look for * @field_index: Field index inside a report * @buffer_size: size of the buffer * @buffer: buffer to use in the feature set * * Used to set a field in feature report. For example this can set polling * interval, sensitivity, activate/deactivate state. */ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id, u32 field_index, int buffer_size, void *buffer); /** * sensor_hub_get_feature() - Feature get request * @hsdev: Hub device instance. * @report_id: Report id to look for * @field_index: Field index inside a report * @buffer_size: size of the buffer * @buffer: buffer to copy output * * Used to get a field in feature report. For example this can get polling * interval, sensitivity, activate/deactivate state. On success it returns * number of bytes copied to buffer. On failure, it returns value < 0. */ int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id, u32 field_index, int buffer_size, void *buffer); /* hid-sensor-attributes */ /* Common hid sensor iio structure */ struct hid_sensor_common { struct hid_sensor_hub_device *hsdev; struct platform_device *pdev; unsigned usage_id; atomic_t data_ready; atomic_t user_requested_state; atomic_t runtime_pm_enable; int poll_interval; int raw_hystersis; int latency_ms; struct iio_trigger *trigger; int timestamp_ns_scale; struct hid_sensor_hub_attribute_info poll; struct hid_sensor_hub_attribute_info report_state; struct hid_sensor_hub_attribute_info power_state; struct hid_sensor_hub_attribute_info sensitivity; struct hid_sensor_hub_attribute_info report_latency; struct work_struct work; }; /* Convert from hid unit expo to regular exponent */ static inline int hid_sensor_convert_exponent(int unit_expo) { if (unit_expo < 0x08) return unit_expo; else if (unit_expo <= 0x0f) return -(0x0f-unit_expo+1); else return 0; } int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev, u32 usage_id, struct hid_sensor_common *st); int hid_sensor_write_raw_hyst_value(struct hid_sensor_common *st, int val1, int val2); int hid_sensor_read_raw_hyst_value(struct hid_sensor_common *st, int *val1, int *val2); int hid_sensor_write_samp_freq_value(struct hid_sensor_common *st, int val1, int val2); int hid_sensor_read_samp_freq_value(struct hid_sensor_common *st, int *val1, int *val2); int hid_sensor_get_usage_index(struct hid_sensor_hub_device *hsdev, u32 report_id, int field_index, u32 usage_id); int hid_sensor_format_scale(u32 usage_id, struct hid_sensor_hub_attribute_info *attr_info, int *val0, int *val1); s32 hid_sensor_read_poll_value(struct hid_sensor_common *st); int64_t hid_sensor_convert_timestamp(struct hid_sensor_common *st, int64_t raw_value); bool hid_sensor_batch_mode_supported(struct hid_sensor_common *st); int hid_sensor_set_report_latency(struct hid_sensor_common *st, int latency); int hid_sensor_get_report_latency(struct hid_sensor_common *st); #endif irq_sim.h 0000644 00000001731 14722070374 0006370 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (C) 2017-2018 Bartosz Golaszewski <brgl@bgdev.pl> */ #ifndef _LINUX_IRQ_SIM_H #define _LINUX_IRQ_SIM_H #include <linux/irq_work.h> #include <linux/device.h> /* * Provides a framework for allocating simulated interrupts which can be * requested like normal irqs and enqueued from process context. */ struct irq_sim_work_ctx { struct irq_work work; unsigned long *pending; }; struct irq_sim_irq_ctx { int irqnum; bool enabled; }; struct irq_sim { struct irq_sim_work_ctx work_ctx; int irq_base; unsigned int irq_count; struct irq_sim_irq_ctx *irqs; }; int irq_sim_init(struct irq_sim *sim, unsigned int num_irqs); int devm_irq_sim_init(struct device *dev, struct irq_sim *sim, unsigned int num_irqs); void irq_sim_fini(struct irq_sim *sim); void irq_sim_fire(struct irq_sim *sim, unsigned int offset); int irq_sim_irqnum(struct irq_sim *sim, unsigned int offset); #endif /* _LINUX_IRQ_SIM_H */ kmsg_dump.h 0000644 00000005553 14722070374 0006721 0 ustar 00 /* * linux/include/kmsg_dump.h * * Copyright (C) 2009 Net Insight AB * * Author: Simon Kagstrom <simon.kagstrom@netinsight.net> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #ifndef _LINUX_KMSG_DUMP_H #define _LINUX_KMSG_DUMP_H #include <linux/errno.h> #include <linux/list.h> /* * Keep this list arranged in rough order of priority. Anything listed after * KMSG_DUMP_OOPS will not be logged by default unless printk.always_kmsg_dump * is passed to the kernel. */ enum kmsg_dump_reason { KMSG_DUMP_UNDEF, KMSG_DUMP_PANIC, KMSG_DUMP_OOPS, KMSG_DUMP_EMERG, KMSG_DUMP_RESTART, KMSG_DUMP_HALT, KMSG_DUMP_POWEROFF, }; /** * struct kmsg_dumper - kernel crash message dumper structure * @list: Entry in the dumper list (private) * @dump: Call into dumping code which will retrieve the data with * through the record iterator * @max_reason: filter for highest reason number that should be dumped * @registered: Flag that specifies if this is already registered */ struct kmsg_dumper { struct list_head list; void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason); enum kmsg_dump_reason max_reason; bool active; bool registered; /* private state of the kmsg iterator */ u32 cur_idx; u32 next_idx; u64 cur_seq; u64 next_seq; }; #ifdef CONFIG_PRINTK void kmsg_dump(enum kmsg_dump_reason reason); bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog, char *line, size_t size, size_t *len); bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog, char *line, size_t size, size_t *len); bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog, char *buf, size_t size, size_t *len); void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper); void kmsg_dump_rewind(struct kmsg_dumper *dumper); int kmsg_dump_register(struct kmsg_dumper *dumper); int kmsg_dump_unregister(struct kmsg_dumper *dumper); #else static inline void kmsg_dump(enum kmsg_dump_reason reason) { } static inline bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog, const char *line, size_t size, size_t *len) { return false; } static inline bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog, const char *line, size_t size, size_t *len) { return false; } static inline bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog, char *buf, size_t size, size_t *len) { return false; } static inline void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper) { } static inline void kmsg_dump_rewind(struct kmsg_dumper *dumper) { } static inline int kmsg_dump_register(struct kmsg_dumper *dumper) { return -EINVAL; } static inline int kmsg_dump_unregister(struct kmsg_dumper *dumper) { return -EINVAL; } #endif #endif /* _LINUX_KMSG_DUMP_H */ device.h 0000644 00000207511 14722070374 0006170 0 ustar 00 // SPDX-License-Identifier: GPL-2.0 /* * device.h - generic, centralized driver model * * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org> * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de> * Copyright (c) 2008-2009 Novell Inc. * * See Documentation/driver-api/driver-model/ for more information. */ #ifndef _DEVICE_H_ #define _DEVICE_H_ #include <linux/ioport.h> #include <linux/kobject.h> #include <linux/klist.h> #include <linux/list.h> #include <linux/lockdep.h> #include <linux/compiler.h> #include <linux/types.h> #include <linux/mutex.h> #include <linux/pm.h> #include <linux/atomic.h> #include <linux/ratelimit.h> #include <linux/uidgid.h> #include <linux/gfp.h> #include <linux/overflow.h> #include <asm/device.h> struct device; struct device_private; struct device_driver; struct driver_private; struct module; struct class; struct subsys_private; struct bus_type; struct device_node; struct fwnode_handle; struct iommu_ops; struct iommu_group; struct iommu_fwspec; struct dev_pin_info; struct iommu_param; struct bus_attribute { struct attribute attr; ssize_t (*show)(struct bus_type *bus, char *buf); ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count); }; #define BUS_ATTR_RW(_name) \ struct bus_attribute bus_attr_##_name = __ATTR_RW(_name) #define BUS_ATTR_RO(_name) \ struct bus_attribute bus_attr_##_name = __ATTR_RO(_name) #define BUS_ATTR_WO(_name) \ struct bus_attribute bus_attr_##_name = __ATTR_WO(_name) extern int __must_check bus_create_file(struct bus_type *, struct bus_attribute *); extern void bus_remove_file(struct bus_type *, struct bus_attribute *); /** * struct bus_type - The bus type of the device * * @name: The name of the bus. * @dev_name: Used for subsystems to enumerate devices like ("foo%u", dev->id). * @dev_root: Default device to use as the parent. * @bus_groups: Default attributes of the bus. * @dev_groups: Default attributes of the devices on the bus. * @drv_groups: Default attributes of the device drivers on the bus. * @match: Called, perhaps multiple times, whenever a new device or driver * is added for this bus. It should return a positive value if the * given device can be handled by the given driver and zero * otherwise. It may also return error code if determining that * the driver supports the device is not possible. In case of * -EPROBE_DEFER it will queue the device for deferred probing. * @uevent: Called when a device is added, removed, or a few other things * that generate uevents to add the environment variables. * @probe: Called when a new device or driver add to this bus, and callback * the specific driver's probe to initial the matched device. * @remove: Called when a device removed from this bus. * @shutdown: Called at shut-down time to quiesce the device. * * @online: Called to put the device back online (after offlining it). * @offline: Called to put the device offline for hot-removal. May fail. * * @suspend: Called when a device on this bus wants to go to sleep mode. * @resume: Called to bring a device on this bus out of sleep mode. * @num_vf: Called to find out how many virtual functions a device on this * bus supports. * @dma_configure: Called to setup DMA configuration on a device on * this bus. * @pm: Power management operations of this bus, callback the specific * device driver's pm-ops. * @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU * driver implementations to a bus and allow the driver to do * bus-specific setup * @p: The private data of the driver core, only the driver core can * touch this. * @lock_key: Lock class key for use by the lock validator * @need_parent_lock: When probing or removing a device on this bus, the * device core should lock the device's parent. * * A bus is a channel between the processor and one or more devices. For the * purposes of the device model, all devices are connected via a bus, even if * it is an internal, virtual, "platform" bus. Buses can plug into each other. * A USB controller is usually a PCI device, for example. The device model * represents the actual connections between buses and the devices they control. * A bus is represented by the bus_type structure. It contains the name, the * default attributes, the bus' methods, PM operations, and the driver core's * private data. */ struct bus_type { const char *name; const char *dev_name; struct device *dev_root; const struct attribute_group **bus_groups; const struct attribute_group **dev_groups; const struct attribute_group **drv_groups; int (*match)(struct device *dev, struct device_driver *drv); int (*uevent)(struct device *dev, struct kobj_uevent_env *env); int (*probe)(struct device *dev); int (*remove)(struct device *dev); void (*shutdown)(struct device *dev); int (*online)(struct device *dev); int (*offline)(struct device *dev); int (*suspend)(struct device *dev, pm_message_t state); int (*resume)(struct device *dev); int (*num_vf)(struct device *dev); int (*dma_configure)(struct device *dev); const struct dev_pm_ops *pm; const struct iommu_ops *iommu_ops; struct subsys_private *p; struct lock_class_key lock_key; bool need_parent_lock; }; extern int __must_check bus_register(struct bus_type *bus); extern void bus_unregister(struct bus_type *bus); extern int __must_check bus_rescan_devices(struct bus_type *bus); /* iterator helpers for buses */ struct subsys_dev_iter { struct klist_iter ki; const struct device_type *type; }; void subsys_dev_iter_init(struct subsys_dev_iter *iter, struct bus_type *subsys, struct device *start, const struct device_type *type); struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter); void subsys_dev_iter_exit(struct subsys_dev_iter *iter); int device_match_name(struct device *dev, const void *name); int device_match_of_node(struct device *dev, const void *np); int device_match_fwnode(struct device *dev, const void *fwnode); int device_match_devt(struct device *dev, const void *pdevt); int device_match_acpi_dev(struct device *dev, const void *adev); int device_match_any(struct device *dev, const void *unused); int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data, int (*fn)(struct device *dev, void *data)); struct device *bus_find_device(struct bus_type *bus, struct device *start, const void *data, int (*match)(struct device *dev, const void *data)); /** * bus_find_device_by_name - device iterator for locating a particular device * of a specific name. * @bus: bus type * @start: Device to begin with * @name: name of the device to match */ static inline struct device *bus_find_device_by_name(struct bus_type *bus, struct device *start, const char *name) { return bus_find_device(bus, start, name, device_match_name); } /** * bus_find_device_by_of_node : device iterator for locating a particular device * matching the of_node. * @bus: bus type * @np: of_node of the device to match. */ static inline struct device * bus_find_device_by_of_node(struct bus_type *bus, const struct device_node *np) { return bus_find_device(bus, NULL, np, device_match_of_node); } /** * bus_find_device_by_fwnode : device iterator for locating a particular device * matching the fwnode. * @bus: bus type * @fwnode: fwnode of the device to match. */ static inline struct device * bus_find_device_by_fwnode(struct bus_type *bus, const struct fwnode_handle *fwnode) { return bus_find_device(bus, NULL, fwnode, device_match_fwnode); } /** * bus_find_device_by_devt : device iterator for locating a particular device * matching the device type. * @bus: bus type * @devt: device type of the device to match. */ static inline struct device *bus_find_device_by_devt(struct bus_type *bus, dev_t devt) { return bus_find_device(bus, NULL, &devt, device_match_devt); } /** * bus_find_next_device - Find the next device after a given device in a * given bus. * @bus: bus type * @cur: device to begin the search with. */ static inline struct device * bus_find_next_device(struct bus_type *bus,struct device *cur) { return bus_find_device(bus, cur, NULL, device_match_any); } #ifdef CONFIG_ACPI struct acpi_device; /** * bus_find_device_by_acpi_dev : device iterator for locating a particular device * matching the ACPI COMPANION device. * @bus: bus type * @adev: ACPI COMPANION device to match. */ static inline struct device * bus_find_device_by_acpi_dev(struct bus_type *bus, const struct acpi_device *adev) { return bus_find_device(bus, NULL, adev, device_match_acpi_dev); } #else static inline struct device * bus_find_device_by_acpi_dev(struct bus_type *bus, const void *adev) { return NULL; } #endif struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id, struct device *hint); int bus_for_each_drv(struct bus_type *bus, struct device_driver *start, void *data, int (*fn)(struct device_driver *, void *)); void bus_sort_breadthfirst(struct bus_type *bus, int (*compare)(const struct device *a, const struct device *b)); /* * Bus notifiers: Get notified of addition/removal of devices * and binding/unbinding of drivers to devices. * In the long run, it should be a replacement for the platform * notify hooks. */ struct notifier_block; extern int bus_register_notifier(struct bus_type *bus, struct notifier_block *nb); extern int bus_unregister_notifier(struct bus_type *bus, struct notifier_block *nb); /* All 4 notifers below get called with the target struct device * * as an argument. Note that those functions are likely to be called * with the device lock held in the core, so be careful. */ #define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */ #define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device to be removed */ #define BUS_NOTIFY_REMOVED_DEVICE 0x00000003 /* device removed */ #define BUS_NOTIFY_BIND_DRIVER 0x00000004 /* driver about to be bound */ #define BUS_NOTIFY_BOUND_DRIVER 0x00000005 /* driver bound to device */ #define BUS_NOTIFY_UNBIND_DRIVER 0x00000006 /* driver about to be unbound */ #define BUS_NOTIFY_UNBOUND_DRIVER 0x00000007 /* driver is unbound from the device */ #define BUS_NOTIFY_DRIVER_NOT_BOUND 0x00000008 /* driver fails to be bound */ extern struct kset *bus_get_kset(struct bus_type *bus); extern struct klist *bus_get_device_klist(struct bus_type *bus); /** * enum probe_type - device driver probe type to try * Device drivers may opt in for special handling of their * respective probe routines. This tells the core what to * expect and prefer. * * @PROBE_DEFAULT_STRATEGY: Used by drivers that work equally well * whether probed synchronously or asynchronously. * @PROBE_PREFER_ASYNCHRONOUS: Drivers for "slow" devices which * probing order is not essential for booting the system may * opt into executing their probes asynchronously. * @PROBE_FORCE_SYNCHRONOUS: Use this to annotate drivers that need * their probe routines to run synchronously with driver and * device registration (with the exception of -EPROBE_DEFER * handling - re-probing always ends up being done asynchronously). * * Note that the end goal is to switch the kernel to use asynchronous * probing by default, so annotating drivers with * %PROBE_PREFER_ASYNCHRONOUS is a temporary measure that allows us * to speed up boot process while we are validating the rest of the * drivers. */ enum probe_type { PROBE_DEFAULT_STRATEGY, PROBE_PREFER_ASYNCHRONOUS, PROBE_FORCE_SYNCHRONOUS, }; /** * struct device_driver - The basic device driver structure * @name: Name of the device driver. * @bus: The bus which the device of this driver belongs to. * @owner: The module owner. * @mod_name: Used for built-in modules. * @suppress_bind_attrs: Disables bind/unbind via sysfs. * @probe_type: Type of the probe (synchronous or asynchronous) to use. * @of_match_table: The open firmware table. * @acpi_match_table: The ACPI match table. * @probe: Called to query the existence of a specific device, * whether this driver can work with it, and bind the driver * to a specific device. * @remove: Called when the device is removed from the system to * unbind a device from this driver. * @shutdown: Called at shut-down time to quiesce the device. * @suspend: Called to put the device to sleep mode. Usually to a * low power state. * @resume: Called to bring a device from sleep mode. * @groups: Default attributes that get created by the driver core * automatically. * @dev_groups: Additional attributes attached to device instance once the * it is bound to the driver. * @pm: Power management operations of the device which matched * this driver. * @coredump: Called when sysfs entry is written to. The device driver * is expected to call the dev_coredump API resulting in a * uevent. * @p: Driver core's private data, no one other than the driver * core can touch this. * * The device driver-model tracks all of the drivers known to the system. * The main reason for this tracking is to enable the driver core to match * up drivers with new devices. Once drivers are known objects within the * system, however, a number of other things become possible. Device drivers * can export information and configuration variables that are independent * of any specific device. */ struct device_driver { const char *name; struct bus_type *bus; struct module *owner; const char *mod_name; /* used for built-in modules */ bool suppress_bind_attrs; /* disables bind/unbind via sysfs */ enum probe_type probe_type; const struct of_device_id *of_match_table; const struct acpi_device_id *acpi_match_table; int (*probe) (struct device *dev); int (*remove) (struct device *dev); void (*shutdown) (struct device *dev); int (*suspend) (struct device *dev, pm_message_t state); int (*resume) (struct device *dev); const struct attribute_group **groups; const struct attribute_group **dev_groups; const struct dev_pm_ops *pm; void (*coredump) (struct device *dev); struct driver_private *p; }; extern int __must_check driver_register(struct device_driver *drv); extern void driver_unregister(struct device_driver *drv); extern struct device_driver *driver_find(const char *name, struct bus_type *bus); extern int driver_probe_done(void); extern void wait_for_device_probe(void); /* sysfs interface for exporting driver attributes */ struct driver_attribute { struct attribute attr; ssize_t (*show)(struct device_driver *driver, char *buf); ssize_t (*store)(struct device_driver *driver, const char *buf, size_t count); }; #define DRIVER_ATTR_RW(_name) \ struct driver_attribute driver_attr_##_name = __ATTR_RW(_name) #define DRIVER_ATTR_RO(_name) \ struct driver_attribute driver_attr_##_name = __ATTR_RO(_name) #define DRIVER_ATTR_WO(_name) \ struct driver_attribute driver_attr_##_name = __ATTR_WO(_name) extern int __must_check driver_create_file(struct device_driver *driver, const struct driver_attribute *attr); extern void driver_remove_file(struct device_driver *driver, const struct driver_attribute *attr); int driver_set_override(struct device *dev, const char **override, const char *s, size_t len); extern int __must_check driver_for_each_device(struct device_driver *drv, struct device *start, void *data, int (*fn)(struct device *dev, void *)); struct device *driver_find_device(struct device_driver *drv, struct device *start, const void *data, int (*match)(struct device *dev, const void *data)); /** * driver_find_device_by_name - device iterator for locating a particular device * of a specific name. * @drv: the driver we're iterating * @name: name of the device to match */ static inline struct device *driver_find_device_by_name(struct device_driver *drv, const char *name) { return driver_find_device(drv, NULL, name, device_match_name); } /** * driver_find_device_by_of_node- device iterator for locating a particular device * by of_node pointer. * @drv: the driver we're iterating * @np: of_node pointer to match. */ static inline struct device * driver_find_device_by_of_node(struct device_driver *drv, const struct device_node *np) { return driver_find_device(drv, NULL, np, device_match_of_node); } /** * driver_find_device_by_fwnode- device iterator for locating a particular device * by fwnode pointer. * @drv: the driver we're iterating * @fwnode: fwnode pointer to match. */ static inline struct device * driver_find_device_by_fwnode(struct device_driver *drv, const struct fwnode_handle *fwnode) { return driver_find_device(drv, NULL, fwnode, device_match_fwnode); } /** * driver_find_device_by_devt- device iterator for locating a particular device * by devt. * @drv: the driver we're iterating * @devt: devt pointer to match. */ static inline struct device *driver_find_device_by_devt(struct device_driver *drv, dev_t devt) { return driver_find_device(drv, NULL, &devt, device_match_devt); } static inline struct device *driver_find_next_device(struct device_driver *drv, struct device *start) { return driver_find_device(drv, start, NULL, device_match_any); } #ifdef CONFIG_ACPI /** * driver_find_device_by_acpi_dev : device iterator for locating a particular * device matching the ACPI_COMPANION device. * @drv: the driver we're iterating * @adev: ACPI_COMPANION device to match. */ static inline struct device * driver_find_device_by_acpi_dev(struct device_driver *drv, const struct acpi_device *adev) { return driver_find_device(drv, NULL, adev, device_match_acpi_dev); } #else static inline struct device * driver_find_device_by_acpi_dev(struct device_driver *drv, const void *adev) { return NULL; } #endif void driver_deferred_probe_add(struct device *dev); int driver_deferred_probe_check_state(struct device *dev); int driver_deferred_probe_check_state_continue(struct device *dev); /** * struct subsys_interface - interfaces to device functions * @name: name of the device function * @subsys: subsytem of the devices to attach to * @node: the list of functions registered at the subsystem * @add_dev: device hookup to device function handler * @remove_dev: device hookup to device function handler * * Simple interfaces attached to a subsystem. Multiple interfaces can * attach to a subsystem and its devices. Unlike drivers, they do not * exclusively claim or control devices. Interfaces usually represent * a specific functionality of a subsystem/class of devices. */ struct subsys_interface { const char *name; struct bus_type *subsys; struct list_head node; int (*add_dev)(struct device *dev, struct subsys_interface *sif); void (*remove_dev)(struct device *dev, struct subsys_interface *sif); }; int subsys_interface_register(struct subsys_interface *sif); void subsys_interface_unregister(struct subsys_interface *sif); int subsys_system_register(struct bus_type *subsys, const struct attribute_group **groups); int subsys_virtual_register(struct bus_type *subsys, const struct attribute_group **groups); /** * struct class - device classes * @name: Name of the class. * @owner: The module owner. * @class_groups: Default attributes of this class. * @dev_groups: Default attributes of the devices that belong to the class. * @dev_kobj: The kobject that represents this class and links it into the hierarchy. * @dev_uevent: Called when a device is added, removed from this class, or a * few other things that generate uevents to add the environment * variables. * @devnode: Callback to provide the devtmpfs. * @class_release: Called to release this class. * @dev_release: Called to release the device. * @shutdown_pre: Called at shut-down time before driver shutdown. * @ns_type: Callbacks so sysfs can detemine namespaces. * @namespace: Namespace of the device belongs to this class. * @get_ownership: Allows class to specify uid/gid of the sysfs directories * for the devices belonging to the class. Usually tied to * device's namespace. * @pm: The default device power management operations of this class. * @p: The private data of the driver core, no one other than the * driver core can touch this. * * A class is a higher-level view of a device that abstracts out low-level * implementation details. Drivers may see a SCSI disk or an ATA disk, but, * at the class level, they are all simply disks. Classes allow user space * to work with devices based on what they do, rather than how they are * connected or how they work. */ struct class { const char *name; struct module *owner; const struct attribute_group **class_groups; const struct attribute_group **dev_groups; struct kobject *dev_kobj; int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env); char *(*devnode)(struct device *dev, umode_t *mode); void (*class_release)(struct class *class); void (*dev_release)(struct device *dev); int (*shutdown_pre)(struct device *dev); const struct kobj_ns_type_operations *ns_type; const void *(*namespace)(struct device *dev); void (*get_ownership)(struct device *dev, kuid_t *uid, kgid_t *gid); const struct dev_pm_ops *pm; struct subsys_private *p; }; struct class_dev_iter { struct klist_iter ki; const struct device_type *type; }; extern struct kobject *sysfs_dev_block_kobj; extern struct kobject *sysfs_dev_char_kobj; extern int __must_check __class_register(struct class *class, struct lock_class_key *key); extern void class_unregister(struct class *class); /* This is a #define to keep the compiler from merging different * instances of the __key variable */ #define class_register(class) \ ({ \ static struct lock_class_key __key; \ __class_register(class, &__key); \ }) struct class_compat; struct class_compat *class_compat_register(const char *name); void class_compat_unregister(struct class_compat *cls); int class_compat_create_link(struct class_compat *cls, struct device *dev, struct device *device_link); void class_compat_remove_link(struct class_compat *cls, struct device *dev, struct device *device_link); extern void class_dev_iter_init(struct class_dev_iter *iter, struct class *class, struct device *start, const struct device_type *type); extern struct device *class_dev_iter_next(struct class_dev_iter *iter); extern void class_dev_iter_exit(struct class_dev_iter *iter); extern int class_for_each_device(struct class *class, struct device *start, void *data, int (*fn)(struct device *dev, void *data)); extern struct device *class_find_device(struct class *class, struct device *start, const void *data, int (*match)(struct device *, const void *)); /** * class_find_device_by_name - device iterator for locating a particular device * of a specific name. * @class: class type * @name: name of the device to match */ static inline struct device *class_find_device_by_name(struct class *class, const char *name) { return class_find_device(class, NULL, name, device_match_name); } /** * class_find_device_by_of_node : device iterator for locating a particular device * matching the of_node. * @class: class type * @np: of_node of the device to match. */ static inline struct device * class_find_device_by_of_node(struct class *class, const struct device_node *np) { return class_find_device(class, NULL, np, device_match_of_node); } /** * class_find_device_by_fwnode : device iterator for locating a particular device * matching the fwnode. * @class: class type * @fwnode: fwnode of the device to match. */ static inline struct device * class_find_device_by_fwnode(struct class *class, const struct fwnode_handle *fwnode) { return class_find_device(class, NULL, fwnode, device_match_fwnode); } /** * class_find_device_by_devt : device iterator for locating a particular device * matching the device type. * @class: class type * @devt: device type of the device to match. */ static inline struct device *class_find_device_by_devt(struct class *class, dev_t devt) { return class_find_device(class, NULL, &devt, device_match_devt); } #ifdef CONFIG_ACPI struct acpi_device; /** * class_find_device_by_acpi_dev : device iterator for locating a particular * device matching the ACPI_COMPANION device. * @class: class type * @adev: ACPI_COMPANION device to match. */ static inline struct device * class_find_device_by_acpi_dev(struct class *class, const struct acpi_device *adev) { return class_find_device(class, NULL, adev, device_match_acpi_dev); } #else static inline struct device * class_find_device_by_acpi_dev(struct class *class, const void *adev) { return NULL; } #endif struct class_attribute { struct attribute attr; ssize_t (*show)(struct class *class, struct class_attribute *attr, char *buf); ssize_t (*store)(struct class *class, struct class_attribute *attr, const char *buf, size_t count); }; #define CLASS_ATTR_RW(_name) \ struct class_attribute class_attr_##_name = __ATTR_RW(_name) #define CLASS_ATTR_RO(_name) \ struct class_attribute class_attr_##_name = __ATTR_RO(_name) #define CLASS_ATTR_WO(_name) \ struct class_attribute class_attr_##_name = __ATTR_WO(_name) extern int __must_check class_create_file_ns(struct class *class, const struct class_attribute *attr, const void *ns); extern void class_remove_file_ns(struct class *class, const struct class_attribute *attr, const void *ns); static inline int __must_check class_create_file(struct class *class, const struct class_attribute *attr) { return class_create_file_ns(class, attr, NULL); } static inline void class_remove_file(struct class *class, const struct class_attribute *attr) { return class_remove_file_ns(class, attr, NULL); } /* Simple class attribute that is just a static string */ struct class_attribute_string { struct class_attribute attr; char *str; }; /* Currently read-only only */ #define _CLASS_ATTR_STRING(_name, _mode, _str) \ { __ATTR(_name, _mode, show_class_attr_string, NULL), _str } #define CLASS_ATTR_STRING(_name, _mode, _str) \ struct class_attribute_string class_attr_##_name = \ _CLASS_ATTR_STRING(_name, _mode, _str) extern ssize_t show_class_attr_string(struct class *class, struct class_attribute *attr, char *buf); struct class_interface { struct list_head node; struct class *class; int (*add_dev) (struct device *, struct class_interface *); void (*remove_dev) (struct device *, struct class_interface *); }; extern int __must_check class_interface_register(struct class_interface *); extern void class_interface_unregister(struct class_interface *); extern struct class * __must_check __class_create(struct module *owner, const char *name, struct lock_class_key *key); extern void class_destroy(struct class *cls); /* This is a #define to keep the compiler from merging different * instances of the __key variable */ #define class_create(owner, name) \ ({ \ static struct lock_class_key __key; \ __class_create(owner, name, &__key); \ }) /* * The type of device, "struct device" is embedded in. A class * or bus can contain devices of different types * like "partitions" and "disks", "mouse" and "event". * This identifies the device type and carries type-specific * information, equivalent to the kobj_type of a kobject. * If "name" is specified, the uevent will contain it in * the DEVTYPE variable. */ struct device_type { const char *name; const struct attribute_group **groups; int (*uevent)(struct device *dev, struct kobj_uevent_env *env); char *(*devnode)(struct device *dev, umode_t *mode, kuid_t *uid, kgid_t *gid); void (*release)(struct device *dev); const struct dev_pm_ops *pm; }; /* interface for exporting device attributes */ struct device_attribute { struct attribute attr; ssize_t (*show)(struct device *dev, struct device_attribute *attr, char *buf); ssize_t (*store)(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); }; struct dev_ext_attribute { struct device_attribute attr; void *var; }; ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr, char *buf); ssize_t device_store_ulong(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); ssize_t device_show_int(struct device *dev, struct device_attribute *attr, char *buf); ssize_t device_store_int(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); ssize_t device_show_bool(struct device *dev, struct device_attribute *attr, char *buf); ssize_t device_store_bool(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); #define DEVICE_ATTR(_name, _mode, _show, _store) \ struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store) #define DEVICE_ATTR_PREALLOC(_name, _mode, _show, _store) \ struct device_attribute dev_attr_##_name = \ __ATTR_PREALLOC(_name, _mode, _show, _store) #define DEVICE_ATTR_RW(_name) \ struct device_attribute dev_attr_##_name = __ATTR_RW(_name) #define DEVICE_ATTR_RO(_name) \ struct device_attribute dev_attr_##_name = __ATTR_RO(_name) #define DEVICE_ATTR_WO(_name) \ struct device_attribute dev_attr_##_name = __ATTR_WO(_name) #define DEVICE_ULONG_ATTR(_name, _mode, _var) \ struct dev_ext_attribute dev_attr_##_name = \ { __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) } #define DEVICE_INT_ATTR(_name, _mode, _var) \ struct dev_ext_attribute dev_attr_##_name = \ { __ATTR(_name, _mode, device_show_int, device_store_int), &(_var) } #define DEVICE_BOOL_ATTR(_name, _mode, _var) \ struct dev_ext_attribute dev_attr_##_name = \ { __ATTR(_name, _mode, device_show_bool, device_store_bool), &(_var) } #define DEVICE_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \ struct device_attribute dev_attr_##_name = \ __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) extern int device_create_file(struct device *device, const struct device_attribute *entry); extern void device_remove_file(struct device *dev, const struct device_attribute *attr); extern bool device_remove_file_self(struct device *dev, const struct device_attribute *attr); extern int __must_check device_create_bin_file(struct device *dev, const struct bin_attribute *attr); extern void device_remove_bin_file(struct device *dev, const struct bin_attribute *attr); /* device resource management */ typedef void (*dr_release_t)(struct device *dev, void *res); typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data); #ifdef CONFIG_DEBUG_DEVRES extern void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid, const char *name) __malloc; #define devres_alloc(release, size, gfp) \ __devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release) #define devres_alloc_node(release, size, gfp, nid) \ __devres_alloc_node(release, size, gfp, nid, #release) #else extern void *devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid) __malloc; static inline void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp) { return devres_alloc_node(release, size, gfp, NUMA_NO_NODE); } #endif extern void devres_for_each_res(struct device *dev, dr_release_t release, dr_match_t match, void *match_data, void (*fn)(struct device *, void *, void *), void *data); extern void devres_free(void *res); extern void devres_add(struct device *dev, void *res); extern void *devres_find(struct device *dev, dr_release_t release, dr_match_t match, void *match_data); extern void *devres_get(struct device *dev, void *new_res, dr_match_t match, void *match_data); extern void *devres_remove(struct device *dev, dr_release_t release, dr_match_t match, void *match_data); extern int devres_destroy(struct device *dev, dr_release_t release, dr_match_t match, void *match_data); extern int devres_release(struct device *dev, dr_release_t release, dr_match_t match, void *match_data); /* devres group */ extern void * __must_check devres_open_group(struct device *dev, void *id, gfp_t gfp); extern void devres_close_group(struct device *dev, void *id); extern void devres_remove_group(struct device *dev, void *id); extern int devres_release_group(struct device *dev, void *id); /* managed devm_k.alloc/kfree for device drivers */ extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __malloc; extern __printf(3, 0) char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap) __malloc; extern __printf(3, 4) char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) __malloc; static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp) { return devm_kmalloc(dev, size, gfp | __GFP_ZERO); } static inline void *devm_kmalloc_array(struct device *dev, size_t n, size_t size, gfp_t flags) { size_t bytes; if (unlikely(check_mul_overflow(n, size, &bytes))) return NULL; return devm_kmalloc(dev, bytes, flags); } static inline void *devm_kcalloc(struct device *dev, size_t n, size_t size, gfp_t flags) { return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO); } extern void devm_kfree(struct device *dev, const void *p); extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc; extern const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp); extern void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp); extern unsigned long devm_get_free_pages(struct device *dev, gfp_t gfp_mask, unsigned int order); extern void devm_free_pages(struct device *dev, unsigned long addr); void __iomem *devm_ioremap_resource(struct device *dev, const struct resource *res); void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index, resource_size_t *size); /* allows to add/remove a custom action to devres stack */ int devm_add_action(struct device *dev, void (*action)(void *), void *data); void devm_remove_action(struct device *dev, void (*action)(void *), void *data); void devm_release_action(struct device *dev, void (*action)(void *), void *data); static inline int devm_add_action_or_reset(struct device *dev, void (*action)(void *), void *data) { int ret; ret = devm_add_action(dev, action, data); if (ret) action(data); return ret; } /** * devm_alloc_percpu - Resource-managed alloc_percpu * @dev: Device to allocate per-cpu memory for * @type: Type to allocate per-cpu memory for * * Managed alloc_percpu. Per-cpu memory allocated with this function is * automatically freed on driver detach. * * RETURNS: * Pointer to allocated memory on success, NULL on failure. */ #define devm_alloc_percpu(dev, type) \ ((typeof(type) __percpu *)__devm_alloc_percpu((dev), sizeof(type), \ __alignof__(type))) void __percpu *__devm_alloc_percpu(struct device *dev, size_t size, size_t align); void devm_free_percpu(struct device *dev, void __percpu *pdata); struct device_dma_parameters { /* * a low level driver may set these to teach IOMMU code about * sg limitations. */ unsigned int max_segment_size; unsigned long segment_boundary_mask; }; /** * struct device_connection - Device Connection Descriptor * @fwnode: The device node of the connected device * @endpoint: The names of the two devices connected together * @id: Unique identifier for the connection * @list: List head, private, for internal use only * * NOTE: @fwnode is not used together with @endpoint. @fwnode is used when * platform firmware defines the connection. When the connection is registered * with device_connection_add() @endpoint is used instead. */ struct device_connection { struct fwnode_handle *fwnode; const char *endpoint[2]; const char *id; struct list_head list; }; typedef void *(*devcon_match_fn_t)(struct device_connection *con, int ep, void *data); void *fwnode_connection_find_match(struct fwnode_handle *fwnode, const char *con_id, void *data, devcon_match_fn_t match); void *device_connection_find_match(struct device *dev, const char *con_id, void *data, devcon_match_fn_t match); struct device *device_connection_find(struct device *dev, const char *con_id); void device_connection_add(struct device_connection *con); void device_connection_remove(struct device_connection *con); /** * device_connections_add - Add multiple device connections at once * @cons: Zero terminated array of device connection descriptors */ static inline void device_connections_add(struct device_connection *cons) { struct device_connection *c; for (c = cons; c->endpoint[0]; c++) device_connection_add(c); } /** * device_connections_remove - Remove multiple device connections at once * @cons: Zero terminated array of device connection descriptors */ static inline void device_connections_remove(struct device_connection *cons) { struct device_connection *c; for (c = cons; c->endpoint[0]; c++) device_connection_remove(c); } /** * enum device_link_state - Device link states. * @DL_STATE_NONE: The presence of the drivers is not being tracked. * @DL_STATE_DORMANT: None of the supplier/consumer drivers is present. * @DL_STATE_AVAILABLE: The supplier driver is present, but the consumer is not. * @DL_STATE_CONSUMER_PROBE: The consumer is probing (supplier driver present). * @DL_STATE_ACTIVE: Both the supplier and consumer drivers are present. * @DL_STATE_SUPPLIER_UNBIND: The supplier driver is unbinding. */ enum device_link_state { DL_STATE_NONE = -1, DL_STATE_DORMANT = 0, DL_STATE_AVAILABLE, DL_STATE_CONSUMER_PROBE, DL_STATE_ACTIVE, DL_STATE_SUPPLIER_UNBIND, }; /* * Device link flags. * * STATELESS: The core will not remove this link automatically. * AUTOREMOVE_CONSUMER: Remove the link automatically on consumer driver unbind. * PM_RUNTIME: If set, the runtime PM framework will use this link. * RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation. * AUTOREMOVE_SUPPLIER: Remove the link automatically on supplier driver unbind. * AUTOPROBE_CONSUMER: Probe consumer driver automatically after supplier binds. * MANAGED: The core tracks presence of supplier/consumer drivers (internal). */ #define DL_FLAG_STATELESS BIT(0) #define DL_FLAG_AUTOREMOVE_CONSUMER BIT(1) #define DL_FLAG_PM_RUNTIME BIT(2) #define DL_FLAG_RPM_ACTIVE BIT(3) #define DL_FLAG_AUTOREMOVE_SUPPLIER BIT(4) #define DL_FLAG_AUTOPROBE_CONSUMER BIT(5) #define DL_FLAG_MANAGED BIT(6) /** * struct device_link - Device link representation. * @supplier: The device on the supplier end of the link. * @s_node: Hook to the supplier device's list of links to consumers. * @consumer: The device on the consumer end of the link. * @c_node: Hook to the consumer device's list of links to suppliers. * @status: The state of the link (with respect to the presence of drivers). * @flags: Link flags. * @rpm_active: Whether or not the consumer device is runtime-PM-active. * @kref: Count repeated addition of the same link. * @rcu_head: An RCU head to use for deferred execution of SRCU callbacks. * @supplier_preactivated: Supplier has been made active before consumer probe. */ struct device_link { struct device *supplier; struct list_head s_node; struct device *consumer; struct list_head c_node; enum device_link_state status; u32 flags; refcount_t rpm_active; struct kref kref; #ifdef CONFIG_SRCU struct rcu_head rcu_head; #endif bool supplier_preactivated; /* Owned by consumer probe. */ }; /** * enum dl_dev_state - Device driver presence tracking information. * @DL_DEV_NO_DRIVER: There is no driver attached to the device. * @DL_DEV_PROBING: A driver is probing. * @DL_DEV_DRIVER_BOUND: The driver has been bound to the device. * @DL_DEV_UNBINDING: The driver is unbinding from the device. */ enum dl_dev_state { DL_DEV_NO_DRIVER = 0, DL_DEV_PROBING, DL_DEV_DRIVER_BOUND, DL_DEV_UNBINDING, }; /** * struct dev_links_info - Device data related to device links. * @suppliers: List of links to supplier devices. * @consumers: List of links to consumer devices. * @status: Driver status information. */ struct dev_links_info { struct list_head suppliers; struct list_head consumers; enum dl_dev_state status; }; /** * struct device - The basic device structure * @parent: The device's "parent" device, the device to which it is attached. * In most cases, a parent device is some sort of bus or host * controller. If parent is NULL, the device, is a top-level device, * which is not usually what you want. * @p: Holds the private data of the driver core portions of the device. * See the comment of the struct device_private for detail. * @kobj: A top-level, abstract class from which other classes are derived. * @init_name: Initial name of the device. * @type: The type of device. * This identifies the device type and carries type-specific * information. * @mutex: Mutex to synchronize calls to its driver. * @lockdep_mutex: An optional debug lock that a subsystem can use as a * peer lock to gain localized lockdep coverage of the device_lock. * @bus: Type of bus device is on. * @driver: Which driver has allocated this * @platform_data: Platform data specific to the device. * Example: For devices on custom boards, as typical of embedded * and SOC based hardware, Linux often uses platform_data to point * to board-specific structures describing devices and how they * are wired. That can include what ports are available, chip * variants, which GPIO pins act in what additional roles, and so * on. This shrinks the "Board Support Packages" (BSPs) and * minimizes board-specific #ifdefs in drivers. * @driver_data: Private pointer for driver specific info. * @links: Links to suppliers and consumers of this device. * @power: For device power management. * See Documentation/driver-api/pm/devices.rst for details. * @pm_domain: Provide callbacks that are executed during system suspend, * hibernation, system resume and during runtime PM transitions * along with subsystem-level and driver-level callbacks. * @pins: For device pin management. * See Documentation/driver-api/pinctl.rst for details. * @msi_list: Hosts MSI descriptors * @msi_domain: The generic MSI domain this device is using. * @numa_node: NUMA node this device is close to. * @dma_ops: DMA mapping operations for this device. * @dma_mask: Dma mask (if dma'ble device). * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all * hardware supports 64-bit addresses for consistent allocations * such descriptors. * @bus_dma_mask: Mask of an upstream bridge or bus which imposes a smaller DMA * limit than the device itself supports. * @dma_pfn_offset: offset of DMA memory range relatively of RAM * @dma_parms: A low level driver may set these to teach IOMMU code about * segment limitations. * @dma_pools: Dma pools (if dma'ble device). * @dma_mem: Internal for coherent mem override. * @cma_area: Contiguous memory area for dma allocations * @archdata: For arch-specific additions. * @of_node: Associated device tree node. * @fwnode: Associated device node supplied by platform firmware. * @devt: For creating the sysfs "dev". * @id: device instance * @devres_lock: Spinlock to protect the resource of the device. * @devres_head: The resources list of the device. * @knode_class: The node used to add the device to the class list. * @class: The class of the device. * @groups: Optional attribute groups. * @release: Callback to free the device after all references have * gone away. This should be set by the allocator of the * device (i.e. the bus driver that discovered the device). * @iommu_group: IOMMU group the device belongs to. * @iommu_fwspec: IOMMU-specific properties supplied by firmware. * @iommu_param: Per device generic IOMMU runtime data * * @offline_disabled: If set, the device is permanently online. * @offline: Set after successful invocation of bus type's .offline(). * @of_node_reused: Set if the device-tree node is shared with an ancestor * device. * @dma_coherent: this particular device is dma coherent, even if the * architecture supports non-coherent devices. * * At the lowest level, every device in a Linux system is represented by an * instance of struct device. The device structure contains the information * that the device model core needs to model the system. Most subsystems, * however, track additional information about the devices they host. As a * result, it is rare for devices to be represented by bare device structures; * instead, that structure, like kobject structures, is usually embedded within * a higher-level representation of the device. */ struct device { struct kobject kobj; struct device *parent; struct device_private *p; const char *init_name; /* initial name of the device */ const struct device_type *type; struct bus_type *bus; /* type of bus device is on */ struct device_driver *driver; /* which driver has allocated this device */ void *platform_data; /* Platform specific data, device core doesn't touch it */ void *driver_data; /* Driver data, set and get with dev_set_drvdata/dev_get_drvdata */ #ifdef CONFIG_PROVE_LOCKING struct mutex lockdep_mutex; #endif struct mutex mutex; /* mutex to synchronize calls to * its driver. */ struct dev_links_info links; struct dev_pm_info power; struct dev_pm_domain *pm_domain; #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN struct irq_domain *msi_domain; #endif #ifdef CONFIG_PINCTRL struct dev_pin_info *pins; #endif #ifdef CONFIG_GENERIC_MSI_IRQ raw_spinlock_t msi_lock; struct list_head msi_list; #endif const struct dma_map_ops *dma_ops; u64 *dma_mask; /* dma mask (if dma'able device) */ u64 coherent_dma_mask;/* Like dma_mask, but for alloc_coherent mappings as not all hardware supports 64 bit addresses for consistent allocations such descriptors. */ u64 bus_dma_mask; /* upstream dma_mask constraint */ unsigned long dma_pfn_offset; struct device_dma_parameters *dma_parms; struct list_head dma_pools; /* dma pools (if dma'ble) */ #ifdef CONFIG_DMA_DECLARE_COHERENT struct dma_coherent_mem *dma_mem; /* internal for coherent mem override */ #endif #ifdef CONFIG_DMA_CMA struct cma *cma_area; /* contiguous memory area for dma allocations */ #endif /* arch specific additions */ struct dev_archdata archdata; struct device_node *of_node; /* associated device tree node */ struct fwnode_handle *fwnode; /* firmware device node */ #ifdef CONFIG_NUMA int numa_node; /* NUMA node this device is close to */ #endif dev_t devt; /* dev_t, creates the sysfs "dev" */ u32 id; /* device instance */ spinlock_t devres_lock; struct list_head devres_head; struct class *class; const struct attribute_group **groups; /* optional groups */ void (*release)(struct device *dev); struct iommu_group *iommu_group; struct iommu_fwspec *iommu_fwspec; struct iommu_param *iommu_param; bool offline_disabled:1; bool offline:1; bool of_node_reused:1; #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) bool dma_coherent:1; #endif }; static inline struct device *kobj_to_dev(struct kobject *kobj) { return container_of(kobj, struct device, kobj); } /** * device_iommu_mapped - Returns true when the device DMA is translated * by an IOMMU * @dev: Device to perform the check on */ static inline bool device_iommu_mapped(struct device *dev) { return (dev->iommu_group != NULL); } /* Get the wakeup routines, which depend on struct device */ #include <linux/pm_wakeup.h> static inline const char *dev_name(const struct device *dev) { /* Use the init name until the kobject becomes available */ if (dev->init_name) return dev->init_name; return kobject_name(&dev->kobj); } extern __printf(2, 3) int dev_set_name(struct device *dev, const char *name, ...); #ifdef CONFIG_NUMA static inline int dev_to_node(struct device *dev) { return dev->numa_node; } static inline void set_dev_node(struct device *dev, int node) { dev->numa_node = node; } #else static inline int dev_to_node(struct device *dev) { return NUMA_NO_NODE; } static inline void set_dev_node(struct device *dev, int node) { } #endif static inline struct irq_domain *dev_get_msi_domain(const struct device *dev) { #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN return dev->msi_domain; #else return NULL; #endif } static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d) { #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN dev->msi_domain = d; #endif } static inline void *dev_get_drvdata(const struct device *dev) { return dev->driver_data; } static inline void dev_set_drvdata(struct device *dev, void *data) { dev->driver_data = data; } static inline struct pm_subsys_data *dev_to_psd(struct device *dev) { return dev ? dev->power.subsys_data : NULL; } static inline unsigned int dev_get_uevent_suppress(const struct device *dev) { return dev->kobj.uevent_suppress; } static inline void dev_set_uevent_suppress(struct device *dev, int val) { dev->kobj.uevent_suppress = val; } static inline int device_is_registered(struct device *dev) { return dev->kobj.state_in_sysfs; } static inline void device_enable_async_suspend(struct device *dev) { if (!dev->power.is_prepared) dev->power.async_suspend = true; } static inline void device_disable_async_suspend(struct device *dev) { if (!dev->power.is_prepared) dev->power.async_suspend = false; } static inline bool device_async_suspend_enabled(struct device *dev) { return !!dev->power.async_suspend; } static inline bool device_pm_not_required(struct device *dev) { return dev->power.no_pm; } static inline void device_set_pm_not_required(struct device *dev) { dev->power.no_pm = true; } static inline void dev_pm_syscore_device(struct device *dev, bool val) { #ifdef CONFIG_PM_SLEEP dev->power.syscore = val; #endif } static inline void dev_pm_set_driver_flags(struct device *dev, u32 flags) { dev->power.driver_flags = flags; } static inline bool dev_pm_test_driver_flags(struct device *dev, u32 flags) { return !!(dev->power.driver_flags & flags); } static inline void device_lock(struct device *dev) { mutex_lock(&dev->mutex); } static inline int device_lock_interruptible(struct device *dev) { return mutex_lock_interruptible(&dev->mutex); } static inline int device_trylock(struct device *dev) { return mutex_trylock(&dev->mutex); } static inline void device_unlock(struct device *dev) { mutex_unlock(&dev->mutex); } static inline void device_lock_assert(struct device *dev) { lockdep_assert_held(&dev->mutex); } static inline struct device_node *dev_of_node(struct device *dev) { if (!IS_ENABLED(CONFIG_OF) || !dev) return NULL; return dev->of_node; } void driver_init(void); /* * High level routines for use by the bus drivers */ extern int __must_check device_register(struct device *dev); extern void device_unregister(struct device *dev); extern void device_initialize(struct device *dev); extern int __must_check device_add(struct device *dev); extern void device_del(struct device *dev); extern int device_for_each_child(struct device *dev, void *data, int (*fn)(struct device *dev, void *data)); extern int device_for_each_child_reverse(struct device *dev, void *data, int (*fn)(struct device *dev, void *data)); extern struct device *device_find_child(struct device *dev, void *data, int (*match)(struct device *dev, void *data)); extern struct device *device_find_child_by_name(struct device *parent, const char *name); extern int device_rename(struct device *dev, const char *new_name); extern int device_move(struct device *dev, struct device *new_parent, enum dpm_order dpm_order); extern int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid); extern const char *device_get_devnode(struct device *dev, umode_t *mode, kuid_t *uid, kgid_t *gid, const char **tmp); static inline bool device_supports_offline(struct device *dev) { return dev->bus && dev->bus->offline && dev->bus->online; } extern void lock_device_hotplug(void); extern void unlock_device_hotplug(void); extern int lock_device_hotplug_sysfs(void); extern int device_offline(struct device *dev); extern int device_online(struct device *dev); extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode); extern void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode); void device_set_of_node_from_dev(struct device *dev, const struct device *dev2); void device_set_node(struct device *dev, struct fwnode_handle *fwnode); static inline int dev_num_vf(struct device *dev) { if (dev->bus && dev->bus->num_vf) return dev->bus->num_vf(dev); return 0; } /* * Root device objects for grouping under /sys/devices */ extern struct device *__root_device_register(const char *name, struct module *owner); /* This is a macro to avoid include problems with THIS_MODULE */ #define root_device_register(name) \ __root_device_register(name, THIS_MODULE) extern void root_device_unregister(struct device *root); static inline void *dev_get_platdata(const struct device *dev) { return dev->platform_data; } /* * Manual binding of a device to driver. See drivers/base/bus.c * for information on use. */ extern int __must_check device_bind_driver(struct device *dev); extern void device_release_driver(struct device *dev); extern int __must_check device_attach(struct device *dev); extern int __must_check driver_attach(struct device_driver *drv); extern void device_initial_probe(struct device *dev); extern int __must_check device_reprobe(struct device *dev); extern bool device_is_bound(struct device *dev); /* * Easy functions for dynamically creating devices on the fly */ extern __printf(5, 0) struct device *device_create_vargs(struct class *cls, struct device *parent, dev_t devt, void *drvdata, const char *fmt, va_list vargs); extern __printf(5, 6) struct device *device_create(struct class *cls, struct device *parent, dev_t devt, void *drvdata, const char *fmt, ...); extern __printf(6, 7) struct device *device_create_with_groups(struct class *cls, struct device *parent, dev_t devt, void *drvdata, const struct attribute_group **groups, const char *fmt, ...); extern void device_destroy(struct class *cls, dev_t devt); extern int __must_check device_add_groups(struct device *dev, const struct attribute_group **groups); extern void device_remove_groups(struct device *dev, const struct attribute_group **groups); static inline int __must_check device_add_group(struct device *dev, const struct attribute_group *grp) { const struct attribute_group *groups[] = { grp, NULL }; return device_add_groups(dev, groups); } static inline void device_remove_group(struct device *dev, const struct attribute_group *grp) { const struct attribute_group *groups[] = { grp, NULL }; return device_remove_groups(dev, groups); } extern int __must_check devm_device_add_groups(struct device *dev, const struct attribute_group **groups); extern void devm_device_remove_groups(struct device *dev, const struct attribute_group **groups); extern int __must_check devm_device_add_group(struct device *dev, const struct attribute_group *grp); extern void devm_device_remove_group(struct device *dev, const struct attribute_group *grp); /* * Platform "fixup" functions - allow the platform to have their say * about devices and actions that the general device layer doesn't * know about. */ /* Notify platform of device discovery */ extern int (*platform_notify)(struct device *dev); extern int (*platform_notify_remove)(struct device *dev); /* * get_device - atomically increment the reference count for the device. * */ extern struct device *get_device(struct device *dev); extern void put_device(struct device *dev); extern bool kill_device(struct device *dev); #ifdef CONFIG_DEVTMPFS extern int devtmpfs_create_node(struct device *dev); extern int devtmpfs_delete_node(struct device *dev); extern int devtmpfs_mount(const char *mntdir); #else static inline int devtmpfs_create_node(struct device *dev) { return 0; } static inline int devtmpfs_delete_node(struct device *dev) { return 0; } static inline int devtmpfs_mount(const char *mountpoint) { return 0; } #endif /* drivers/base/power/shutdown.c */ extern void device_shutdown(void); /* debugging and troubleshooting/diagnostic helpers. */ extern const char *dev_driver_string(const struct device *dev); /* Device links interface. */ struct device_link *device_link_add(struct device *consumer, struct device *supplier, u32 flags); void device_link_del(struct device_link *link); void device_link_remove(void *consumer, struct device *supplier); #ifndef dev_fmt #define dev_fmt(fmt) fmt #endif #ifdef CONFIG_PRINTK #if defined(__KMSG_CHECKER) && defined(KMSG_COMPONENT) /* generate magic string for scripts/kmsg-doc to parse */ #define _dev_emerg(dev, format, arg...) \ __KMSG_DEV(KERN_EMERG _FMT_ format _ARGS_ dev, ## arg _END_) #define _dev_alert(dev, format, arg...) \ __KMSG_DEV(KERN_ALERT _FMT_ format _ARGS_ dev, ## arg _END_) #define _dev_crit(dev, format, arg...) \ __KMSG_DEV(KERN_CRIT _FMT_ format _ARGS_ dev, ## arg _END_) #define _dev_err(dev, format, arg...) \ __KMSG_DEV(KERN_ERR _FMT_ format _ARGS_ dev, ## arg _END_) #define _dev_warn(dev, format, arg...) \ __KMSG_DEV(KERN_WARNING _FMT_ format _ARGS_ dev, ## arg _END_) #define _dev_notice(dev, format, arg...) \ __KMSG_DEV(KERN_NOTICE _FMT_ format _ARGS_ dev, ## arg _END_) #define _dev_info(dev, format, arg...) \ __KMSG_DEV(KERN_INFO _FMT_ format _ARGS_ dev, ## arg _END_) #elif defined(CONFIG_KMSG_IDS) && defined(KMSG_COMPONENT) extern int dev_printk_hash(const char *level, const struct device *dev, const char *fmt, ...); extern __printf(2,3) int dev_emerg_hash(const struct device *dev, const char *fmt, ...); extern __printf(2,3) int dev_alert_hash(const struct device *dev, const char *fmt, ...); extern __printf(2,3) int dev_crit_hash(const struct device *dev, const char *fmt, ...); extern __printf(2,3) int dev_err_hash(const struct device *dev, const char *fmt, ...); extern __printf(2,3) int dev_warn_hash(const struct device *dev, const char *fmt, ...); extern __printf(2,3) int dev_notice_hash(const struct device *dev, const char *fmt, ...); extern __printf(2,3) int _dev_info_hash(const struct device *dev, const char *fmt, ...); #define dev_printk(level, dev, format, arg...) \ dev_printk_hash(level, dev, "%s: " format, dev_name(dev), ## arg) #define _dev_emerg(dev, format, arg...) \ dev_emerg_hash(dev, "%s: " format, dev_name(dev), ## arg) #define _dev_alert(dev, format, arg...) \ dev_alert_hash(dev, "%s: " format, dev_name(dev), ## arg) #define _dev_crit(dev, format, arg...) \ dev_crit_hash(dev, "%s: " format, dev_name(dev), ## arg) #define _dev_err(dev, format, arg...) \ dev_err_hash(dev, "%s: " format, dev_name(dev), ## arg) #define _dev_warn(dev, format, arg...) \ dev_warn_hash(dev, "%s: " format, dev_name(dev), ## arg) #define _dev_notice(dev, format, arg...) \ dev_notice_hash(dev, "%s: " format, dev_name(dev), ## arg) #define _dev_info(dev, format, arg...) \ _dev_info_hash(dev, "%s: " format, dev_name(dev), ## arg) #else /* !defined(CONFIG_KMSG_IDS) */ __printf(3, 0) __cold int dev_vprintk_emit(int level, const struct device *dev, const char *fmt, va_list args); __printf(3, 4) __cold int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...); __printf(3, 4) __cold void dev_printk(const char *level, const struct device *dev, const char *fmt, ...); __printf(2, 3) __cold void _dev_emerg(const struct device *dev, const char *fmt, ...); __printf(2, 3) __cold void _dev_alert(const struct device *dev, const char *fmt, ...); __printf(2, 3) __cold void _dev_crit(const struct device *dev, const char *fmt, ...); __printf(2, 3) __cold void _dev_err(const struct device *dev, const char *fmt, ...); __printf(2, 3) __cold void _dev_warn(const struct device *dev, const char *fmt, ...); __printf(2, 3) __cold void _dev_notice(const struct device *dev, const char *fmt, ...); __printf(2, 3) __cold void _dev_info(const struct device *dev, const char *fmt, ...); #endif /* !defined(CONFIG_KMSG_IDS) */ #else /* !defined(CONFIG_PRINTK) */ static inline __printf(3, 0) int dev_vprintk_emit(int level, const struct device *dev, const char *fmt, va_list args) { return 0; } static inline __printf(3, 4) int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...) { return 0; } static inline void __dev_printk(const char *level, const struct device *dev, struct va_format *vaf) {} static inline __printf(3, 4) void dev_printk(const char *level, const struct device *dev, const char *fmt, ...) {} static inline __printf(2, 3) void _dev_emerg(const struct device *dev, const char *fmt, ...) {} static inline __printf(2, 3) void _dev_crit(const struct device *dev, const char *fmt, ...) {} static inline __printf(2, 3) void _dev_alert(const struct device *dev, const char *fmt, ...) {} static inline __printf(2, 3) void _dev_err(const struct device *dev, const char *fmt, ...) {} static inline __printf(2, 3) void _dev_warn(const struct device *dev, const char *fmt, ...) {} static inline __printf(2, 3) void _dev_notice(const struct device *dev, const char *fmt, ...) {} static inline __printf(2, 3) void _dev_info(const struct device *dev, const char *fmt, ...) {} #endif /* !defined(CONFIG_PRINTK) */ /* * #defines for all the dev_<level> macros to prefix with whatever * possible use of #define dev_fmt(fmt) ... */ #define dev_emerg(dev, fmt, ...) \ _dev_emerg(dev, dev_fmt(fmt), ##__VA_ARGS__) #define dev_crit(dev, fmt, ...) \ _dev_crit(dev, dev_fmt(fmt), ##__VA_ARGS__) #define dev_alert(dev, fmt, ...) \ _dev_alert(dev, dev_fmt(fmt), ##__VA_ARGS__) #define dev_err(dev, fmt, ...) \ _dev_err(dev, dev_fmt(fmt), ##__VA_ARGS__) #define dev_warn(dev, fmt, ...) \ _dev_warn(dev, dev_fmt(fmt), ##__VA_ARGS__) #define dev_notice(dev, fmt, ...) \ _dev_notice(dev, dev_fmt(fmt), ##__VA_ARGS__) #define dev_info(dev, fmt, ...) \ _dev_info(dev, dev_fmt(fmt), ##__VA_ARGS__) #if defined(CONFIG_DYNAMIC_DEBUG) #define dev_dbg(dev, fmt, ...) \ dynamic_dev_dbg(dev, dev_fmt(fmt), ##__VA_ARGS__) #elif defined(DEBUG) #define dev_dbg(dev, fmt, ...) \ dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__) #else #define dev_dbg(dev, fmt, ...) \ ({ \ if (0) \ dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \ }) #endif #ifdef CONFIG_PRINTK #define dev_level_once(dev_level, dev, fmt, ...) \ do { \ static bool __print_once __read_mostly; \ \ if (!__print_once) { \ __print_once = true; \ dev_level(dev, fmt, ##__VA_ARGS__); \ } \ } while (0) #else #define dev_level_once(dev_level, dev, fmt, ...) \ do { \ if (0) \ dev_level(dev, fmt, ##__VA_ARGS__); \ } while (0) #endif #define dev_emerg_once(dev, fmt, ...) \ dev_level_once(dev_emerg, dev, fmt, ##__VA_ARGS__) #define dev_alert_once(dev, fmt, ...) \ dev_level_once(dev_alert, dev, fmt, ##__VA_ARGS__) #define dev_crit_once(dev, fmt, ...) \ dev_level_once(dev_crit, dev, fmt, ##__VA_ARGS__) #define dev_err_once(dev, fmt, ...) \ dev_level_once(dev_err, dev, fmt, ##__VA_ARGS__) #define dev_warn_once(dev, fmt, ...) \ dev_level_once(dev_warn, dev, fmt, ##__VA_ARGS__) #define dev_notice_once(dev, fmt, ...) \ dev_level_once(dev_notice, dev, fmt, ##__VA_ARGS__) #define dev_info_once(dev, fmt, ...) \ dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__) #define dev_dbg_once(dev, fmt, ...) \ dev_level_once(dev_dbg, dev, fmt, ##__VA_ARGS__) #define dev_level_ratelimited(dev_level, dev, fmt, ...) \ do { \ static DEFINE_RATELIMIT_STATE(_rs, \ DEFAULT_RATELIMIT_INTERVAL, \ DEFAULT_RATELIMIT_BURST); \ if (__ratelimit(&_rs)) \ dev_level(dev, fmt, ##__VA_ARGS__); \ } while (0) #define dev_emerg_ratelimited(dev, fmt, ...) \ dev_level_ratelimited(dev_emerg, dev, fmt, ##__VA_ARGS__) #define dev_alert_ratelimited(dev, fmt, ...) \ dev_level_ratelimited(dev_alert, dev, fmt, ##__VA_ARGS__) #define dev_crit_ratelimited(dev, fmt, ...) \ dev_level_ratelimited(dev_crit, dev, fmt, ##__VA_ARGS__) #define dev_err_ratelimited(dev, fmt, ...) \ dev_level_ratelimited(dev_err, dev, fmt, ##__VA_ARGS__) #define dev_warn_ratelimited(dev, fmt, ...) \ dev_level_ratelimited(dev_warn, dev, fmt, ##__VA_ARGS__) #define dev_notice_ratelimited(dev, fmt, ...) \ dev_level_ratelimited(dev_notice, dev, fmt, ##__VA_ARGS__) #define dev_info_ratelimited(dev, fmt, ...) \ dev_level_ratelimited(dev_info, dev, fmt, ##__VA_ARGS__) #if defined(CONFIG_DYNAMIC_DEBUG) /* descriptor check is first to prevent flooding with "callbacks suppressed" */ #define dev_dbg_ratelimited(dev, fmt, ...) \ do { \ static DEFINE_RATELIMIT_STATE(_rs, \ DEFAULT_RATELIMIT_INTERVAL, \ DEFAULT_RATELIMIT_BURST); \ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ if (DYNAMIC_DEBUG_BRANCH(descriptor) && \ __ratelimit(&_rs)) \ __dynamic_dev_dbg(&descriptor, dev, dev_fmt(fmt), \ ##__VA_ARGS__); \ } while (0) #elif defined(DEBUG) #define dev_dbg_ratelimited(dev, fmt, ...) \ do { \ static DEFINE_RATELIMIT_STATE(_rs, \ DEFAULT_RATELIMIT_INTERVAL, \ DEFAULT_RATELIMIT_BURST); \ if (__ratelimit(&_rs)) \ dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \ } while (0) #else #define dev_dbg_ratelimited(dev, fmt, ...) \ do { \ if (0) \ dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \ } while (0) #endif #ifdef VERBOSE_DEBUG #define dev_vdbg dev_dbg #else #define dev_vdbg(dev, fmt, ...) \ ({ \ if (0) \ dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \ }) #endif /* * dev_WARN*() acts like dev_printk(), but with the key difference of * using WARN/WARN_ONCE to include file/line information and a backtrace. */ #define dev_WARN(dev, format, arg...) \ WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg); #define dev_WARN_ONCE(dev, condition, format, arg...) \ WARN_ONCE(condition, "%s %s: " format, \ dev_driver_string(dev), dev_name(dev), ## arg) extern __printf(3, 4) int dev_err_probe(const struct device *dev, int err, const char *fmt, ...); /* Create alias, so I can be autoloaded. */ #define MODULE_ALIAS_CHARDEV(major,minor) \ MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor)) #define MODULE_ALIAS_CHARDEV_MAJOR(major) \ MODULE_ALIAS("char-major-" __stringify(major) "-*") #ifdef CONFIG_SYSFS_DEPRECATED extern long sysfs_deprecated; #else #define sysfs_deprecated 0 #endif /** * module_driver() - Helper macro for drivers that don't do anything * special in module init/exit. This eliminates a lot of boilerplate. * Each module may only use this macro once, and calling it replaces * module_init() and module_exit(). * * @__driver: driver name * @__register: register function for this driver type * @__unregister: unregister function for this driver type * @...: Additional arguments to be passed to __register and __unregister. * * Use this macro to construct bus specific macros for registering * drivers, and do not use it on its own. */ #define module_driver(__driver, __register, __unregister, ...) \ static int __init __driver##_init(void) \ { \ return __register(&(__driver) , ##__VA_ARGS__); \ } \ module_init(__driver##_init); \ static void __exit __driver##_exit(void) \ { \ __unregister(&(__driver) , ##__VA_ARGS__); \ } \ module_exit(__driver##_exit); /** * builtin_driver() - Helper macro for drivers that don't do anything * special in init and have no exit. This eliminates some boilerplate. * Each driver may only use this macro once, and calling it replaces * device_initcall (or in some cases, the legacy __initcall). This is * meant to be a direct parallel of module_driver() above but without * the __exit stuff that is not used for builtin cases. * * @__driver: driver name * @__register: register function for this driver type * @...: Additional arguments to be passed to __register * * Use this macro to construct bus specific macros for registering * drivers, and do not use it on its own. */ #define builtin_driver(__driver, __register, ...) \ static int __init __driver##_init(void) \ { \ return __register(&(__driver) , ##__VA_ARGS__); \ } \ device_initcall(__driver##_init); #endif /* _DEVICE_H_ */ posix-clock.h 0000644 00000007646 14722070374 0007173 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * posix-clock.h - support for dynamic clock devices * * Copyright (C) 2010 OMICRON electronics GmbH */ #ifndef _LINUX_POSIX_CLOCK_H_ #define _LINUX_POSIX_CLOCK_H_ #include <linux/cdev.h> #include <linux/fs.h> #include <linux/poll.h> #include <linux/posix-timers.h> #include <linux/rwsem.h> struct posix_clock; /** * struct posix_clock_operations - functional interface to the clock * * Every posix clock is represented by a character device. Drivers may * optionally offer extended capabilities by implementing the * character device methods. The character device file operations are * first handled by the clock device layer, then passed on to the * driver by calling these functions. * * @owner: The clock driver should set to THIS_MODULE * @clock_adjtime: Adjust the clock * @clock_gettime: Read the current time * @clock_getres: Get the clock resolution * @clock_settime: Set the current time value * @open: Optional character device open method * @release: Optional character device release method * @ioctl: Optional character device ioctl method * @read: Optional character device read method * @poll: Optional character device poll method */ struct posix_clock_operations { struct module *owner; int (*clock_adjtime)(struct posix_clock *pc, struct __kernel_timex *tx); int (*clock_gettime)(struct posix_clock *pc, struct timespec64 *ts); int (*clock_getres) (struct posix_clock *pc, struct timespec64 *ts); int (*clock_settime)(struct posix_clock *pc, const struct timespec64 *ts); /* * Optional character device methods: */ long (*ioctl) (struct posix_clock *pc, unsigned int cmd, unsigned long arg); int (*open) (struct posix_clock *pc, fmode_t f_mode); __poll_t (*poll) (struct posix_clock *pc, struct file *file, poll_table *wait); int (*release) (struct posix_clock *pc); ssize_t (*read) (struct posix_clock *pc, uint flags, char __user *buf, size_t cnt); }; /** * struct posix_clock - represents a dynamic posix clock * * @ops: Functional interface to the clock * @cdev: Character device instance for this clock * @dev: Pointer to the clock's device. * @rwsem: Protects the 'zombie' field from concurrent access. * @zombie: If 'zombie' is true, then the hardware has disappeared. * * Drivers should embed their struct posix_clock within a private * structure, obtaining a reference to it during callbacks using * container_of(). * * Drivers should supply an initialized but not exposed struct device * to posix_clock_register(). It is used to manage lifetime of the * driver's private structure. It's 'release' field should be set to * a release function for this private structure. */ struct posix_clock { struct posix_clock_operations ops; struct cdev cdev; struct device *dev; struct rw_semaphore rwsem; bool zombie; }; /** * posix_clock_register() - register a new clock * @clk: Pointer to the clock. Caller must provide 'ops' field * @dev: Pointer to the initialized device. Caller must provide * 'release' field * * A clock driver calls this function to register itself with the * clock device subsystem. If 'clk' points to dynamically allocated * memory, then the caller must provide a 'release' function to free * that memory. * * Returns zero on success, non-zero otherwise. */ int posix_clock_register(struct posix_clock *clk, struct device *dev); /** * posix_clock_unregister() - unregister a clock * @clk: Clock instance previously registered via posix_clock_register() * * A clock driver calls this function to remove itself from the clock * device subsystem. The posix_clock itself will remain (in an * inactive state) until its reference count drops to zero, at which * point it will be deallocated with its 'release' method. */ void posix_clock_unregister(struct posix_clock *clk); #endif posix_acl_xattr.h 0000644 00000003125 14722070374 0010127 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* File: linux/posix_acl_xattr.h Extended attribute system call representation of Access Control Lists. Copyright (C) 2000 by Andreas Gruenbacher <a.gruenbacher@computer.org> Copyright (C) 2002 SGI - Silicon Graphics, Inc <linux-xfs@oss.sgi.com> */ #ifndef _POSIX_ACL_XATTR_H #define _POSIX_ACL_XATTR_H #include <uapi/linux/xattr.h> #include <uapi/linux/posix_acl_xattr.h> #include <linux/posix_acl.h> static inline size_t posix_acl_xattr_size(int count) { return (sizeof(struct posix_acl_xattr_header) + (count * sizeof(struct posix_acl_xattr_entry))); } static inline int posix_acl_xattr_count(size_t size) { if (size < sizeof(struct posix_acl_xattr_header)) return -1; size -= sizeof(struct posix_acl_xattr_header); if (size % sizeof(struct posix_acl_xattr_entry)) return -1; return size / sizeof(struct posix_acl_xattr_entry); } #ifdef CONFIG_FS_POSIX_ACL void posix_acl_fix_xattr_from_user(void *value, size_t size); void posix_acl_fix_xattr_to_user(void *value, size_t size); #else static inline void posix_acl_fix_xattr_from_user(void *value, size_t size) { } static inline void posix_acl_fix_xattr_to_user(void *value, size_t size) { } #endif struct posix_acl *posix_acl_from_xattr(struct user_namespace *user_ns, const void *value, size_t size); int posix_acl_to_xattr(struct user_namespace *user_ns, const struct posix_acl *acl, void *buffer, size_t size); extern const struct xattr_handler posix_acl_access_xattr_handler; extern const struct xattr_handler posix_acl_default_xattr_handler; #endif /* _POSIX_ACL_XATTR_H */ scc.h 0000644 00000005531 14722070374 0005477 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* $Id: scc.h,v 1.29 1997/04/02 14:56:45 jreuter Exp jreuter $ */ #ifndef _SCC_H #define _SCC_H #include <uapi/linux/scc.h> enum {TX_OFF, TX_ON}; /* command for scc_key_trx() */ /* Vector masks in RR2B */ #define VECTOR_MASK 0x06 #define TXINT 0x00 #define EXINT 0x02 #define RXINT 0x04 #define SPINT 0x06 #ifdef CONFIG_SCC_DELAY #define Inb(port) inb_p(port) #define Outb(port, val) outb_p(val, port) #else #define Inb(port) inb(port) #define Outb(port, val) outb(val, port) #endif /* SCC channel control structure for KISS */ struct scc_kiss { unsigned char txdelay; /* Transmit Delay 10 ms/cnt */ unsigned char persist; /* Persistence (0-255) as a % */ unsigned char slottime; /* Delay to wait on persistence hit */ unsigned char tailtime; /* Delay after last byte written */ unsigned char fulldup; /* Full Duplex mode 0=CSMA 1=DUP 2=ALWAYS KEYED */ unsigned char waittime; /* Waittime before any transmit attempt */ unsigned int maxkeyup; /* Maximum time to transmit (seconds) */ unsigned int mintime; /* Minimal offtime after MAXKEYUP timeout (seconds) */ unsigned int idletime; /* Maximum idle time in ALWAYS KEYED mode (seconds) */ unsigned int maxdefer; /* Timer for CSMA channel busy limit */ unsigned char tx_inhibit; /* Transmit is not allowed when set */ unsigned char group; /* Group ID for AX.25 TX interlocking */ unsigned char mode; /* 'normal' or 'hwctrl' mode (unused) */ unsigned char softdcd; /* Use DPLL instead of DCD pin for carrier detect */ }; /* SCC channel structure */ struct scc_channel { int init; /* channel exists? */ struct net_device *dev; /* link to device control structure */ struct net_device_stats dev_stat;/* device statistics */ char brand; /* manufacturer of the board */ long clock; /* used clock */ io_port ctrl; /* I/O address of CONTROL register */ io_port data; /* I/O address of DATA register */ io_port special; /* I/O address of special function port */ int irq; /* Number of Interrupt */ char option; char enhanced; /* Enhanced SCC support */ unsigned char wreg[16]; /* Copy of last written value in WRx */ unsigned char status; /* Copy of R0 at last external interrupt */ unsigned char dcd; /* DCD status */ struct scc_kiss kiss; /* control structure for KISS params */ struct scc_stat stat; /* statistical information */ struct scc_modem modem; /* modem information */ struct sk_buff_head tx_queue; /* next tx buffer */ struct sk_buff *rx_buff; /* pointer to frame currently received */ struct sk_buff *tx_buff; /* pointer to frame currently transmitted */ /* Timer */ struct timer_list tx_t; /* tx timer for this channel */ struct timer_list tx_wdog; /* tx watchdogs */ /* Channel lock */ spinlock_t lock; /* Channel guard lock */ }; #endif /* defined(_SCC_H) */ ftrace_irq.h 0000644 00000001467 14722070374 0007052 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_FTRACE_IRQ_H #define _LINUX_FTRACE_IRQ_H #ifdef CONFIG_FTRACE_NMI_ENTER extern void arch_ftrace_nmi_enter(void); extern void arch_ftrace_nmi_exit(void); #else static inline void arch_ftrace_nmi_enter(void) { } static inline void arch_ftrace_nmi_exit(void) { } #endif #ifdef CONFIG_HWLAT_TRACER extern bool trace_hwlat_callback_enabled; extern void trace_hwlat_callback(bool enter); #endif static inline void ftrace_nmi_enter(void) { #ifdef CONFIG_HWLAT_TRACER if (trace_hwlat_callback_enabled) trace_hwlat_callback(true); #endif arch_ftrace_nmi_enter(); } static inline void ftrace_nmi_exit(void) { arch_ftrace_nmi_exit(); #ifdef CONFIG_HWLAT_TRACER if (trace_hwlat_callback_enabled) trace_hwlat_callback(false); #endif } #endif /* _LINUX_FTRACE_IRQ_H */ property.h 0000644 00000034456 14722070374 0006623 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * property.h - Unified device property interface. * * Copyright (C) 2014, Intel Corporation * Authors: Rafael J. Wysocki <rafael.j.wysocki@intel.com> * Mika Westerberg <mika.westerberg@linux.intel.com> */ #ifndef _LINUX_PROPERTY_H_ #define _LINUX_PROPERTY_H_ #include <linux/bits.h> #include <linux/fwnode.h> #include <linux/types.h> struct device; enum dev_prop_type { DEV_PROP_U8, DEV_PROP_U16, DEV_PROP_U32, DEV_PROP_U64, DEV_PROP_STRING, DEV_PROP_MAX, }; enum dev_dma_attr { DEV_DMA_NOT_SUPPORTED, DEV_DMA_NON_COHERENT, DEV_DMA_COHERENT, }; struct fwnode_handle *dev_fwnode(struct device *dev); bool device_property_present(struct device *dev, const char *propname); int device_property_read_u8_array(struct device *dev, const char *propname, u8 *val, size_t nval); int device_property_read_u16_array(struct device *dev, const char *propname, u16 *val, size_t nval); int device_property_read_u32_array(struct device *dev, const char *propname, u32 *val, size_t nval); int device_property_read_u64_array(struct device *dev, const char *propname, u64 *val, size_t nval); int device_property_read_string_array(struct device *dev, const char *propname, const char **val, size_t nval); int device_property_read_string(struct device *dev, const char *propname, const char **val); int device_property_match_string(struct device *dev, const char *propname, const char *string); bool fwnode_device_is_available(const struct fwnode_handle *fwnode); bool fwnode_property_present(const struct fwnode_handle *fwnode, const char *propname); int fwnode_property_read_u8_array(const struct fwnode_handle *fwnode, const char *propname, u8 *val, size_t nval); int fwnode_property_read_u16_array(const struct fwnode_handle *fwnode, const char *propname, u16 *val, size_t nval); int fwnode_property_read_u32_array(const struct fwnode_handle *fwnode, const char *propname, u32 *val, size_t nval); int fwnode_property_read_u64_array(const struct fwnode_handle *fwnode, const char *propname, u64 *val, size_t nval); int fwnode_property_read_string_array(const struct fwnode_handle *fwnode, const char *propname, const char **val, size_t nval); int fwnode_property_read_string(const struct fwnode_handle *fwnode, const char *propname, const char **val); int fwnode_property_match_string(const struct fwnode_handle *fwnode, const char *propname, const char *string); int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode, const char *prop, const char *nargs_prop, unsigned int nargs, unsigned int index, struct fwnode_reference_args *args); struct fwnode_handle *fwnode_find_reference(const struct fwnode_handle *fwnode, const char *name, unsigned int index); struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode); struct fwnode_handle *fwnode_get_next_parent( struct fwnode_handle *fwnode); struct fwnode_handle *fwnode_get_next_child_node( const struct fwnode_handle *fwnode, struct fwnode_handle *child); struct fwnode_handle *fwnode_get_next_available_child_node( const struct fwnode_handle *fwnode, struct fwnode_handle *child); #define fwnode_for_each_child_node(fwnode, child) \ for (child = fwnode_get_next_child_node(fwnode, NULL); child; \ child = fwnode_get_next_child_node(fwnode, child)) #define fwnode_for_each_available_child_node(fwnode, child) \ for (child = fwnode_get_next_available_child_node(fwnode, NULL); child;\ child = fwnode_get_next_available_child_node(fwnode, child)) struct fwnode_handle *device_get_next_child_node( struct device *dev, struct fwnode_handle *child); #define device_for_each_child_node(dev, child) \ for (child = device_get_next_child_node(dev, NULL); child; \ child = device_get_next_child_node(dev, child)) struct fwnode_handle *fwnode_get_named_child_node( const struct fwnode_handle *fwnode, const char *childname); struct fwnode_handle *device_get_named_child_node(struct device *dev, const char *childname); struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode); void fwnode_handle_put(struct fwnode_handle *fwnode); int fwnode_irq_get(struct fwnode_handle *fwnode, unsigned int index); unsigned int device_get_child_node_count(struct device *dev); static inline bool device_property_read_bool(struct device *dev, const char *propname) { return device_property_present(dev, propname); } static inline int device_property_read_u8(struct device *dev, const char *propname, u8 *val) { return device_property_read_u8_array(dev, propname, val, 1); } static inline int device_property_read_u16(struct device *dev, const char *propname, u16 *val) { return device_property_read_u16_array(dev, propname, val, 1); } static inline int device_property_read_u32(struct device *dev, const char *propname, u32 *val) { return device_property_read_u32_array(dev, propname, val, 1); } static inline int device_property_read_u64(struct device *dev, const char *propname, u64 *val) { return device_property_read_u64_array(dev, propname, val, 1); } static inline int device_property_count_u8(struct device *dev, const char *propname) { return device_property_read_u8_array(dev, propname, NULL, 0); } static inline int device_property_count_u16(struct device *dev, const char *propname) { return device_property_read_u16_array(dev, propname, NULL, 0); } static inline int device_property_count_u32(struct device *dev, const char *propname) { return device_property_read_u32_array(dev, propname, NULL, 0); } static inline int device_property_count_u64(struct device *dev, const char *propname) { return device_property_read_u64_array(dev, propname, NULL, 0); } static inline bool fwnode_property_read_bool(const struct fwnode_handle *fwnode, const char *propname) { return fwnode_property_present(fwnode, propname); } static inline int fwnode_property_read_u8(const struct fwnode_handle *fwnode, const char *propname, u8 *val) { return fwnode_property_read_u8_array(fwnode, propname, val, 1); } static inline int fwnode_property_read_u16(const struct fwnode_handle *fwnode, const char *propname, u16 *val) { return fwnode_property_read_u16_array(fwnode, propname, val, 1); } static inline int fwnode_property_read_u32(const struct fwnode_handle *fwnode, const char *propname, u32 *val) { return fwnode_property_read_u32_array(fwnode, propname, val, 1); } static inline int fwnode_property_read_u64(const struct fwnode_handle *fwnode, const char *propname, u64 *val) { return fwnode_property_read_u64_array(fwnode, propname, val, 1); } static inline int fwnode_property_count_u8(const struct fwnode_handle *fwnode, const char *propname) { return fwnode_property_read_u8_array(fwnode, propname, NULL, 0); } static inline int fwnode_property_count_u16(const struct fwnode_handle *fwnode, const char *propname) { return fwnode_property_read_u16_array(fwnode, propname, NULL, 0); } static inline int fwnode_property_count_u32(const struct fwnode_handle *fwnode, const char *propname) { return fwnode_property_read_u32_array(fwnode, propname, NULL, 0); } static inline int fwnode_property_count_u64(const struct fwnode_handle *fwnode, const char *propname) { return fwnode_property_read_u64_array(fwnode, propname, NULL, 0); } /** * struct property_entry - "Built-in" device property representation. * @name: Name of the property. * @length: Length of data making up the value. * @is_array: True when the property is an array. * @type: Type of the data in unions. * @pointer: Pointer to the property (an array of items of the given type). * @value: Value of the property (when it is a single item of the given type). */ struct property_entry { const char *name; size_t length; bool is_array; enum dev_prop_type type; union { union { const u8 *u8_data; const u16 *u16_data; const u32 *u32_data; const u64 *u64_data; const char * const *str; } pointer; union { u8 u8_data; u16 u16_data; u32 u32_data; u64 u64_data; const char *str; } value; }; }; /* * Note: the below four initializers for the anonymous union are carefully * crafted to avoid gcc-4.4.4's problems with initialization of anon unions * and structs. */ #define PROPERTY_ENTRY_INTEGER_ARRAY(_name_, _type_, _Type_, _val_) \ (struct property_entry) { \ .name = _name_, \ .length = ARRAY_SIZE(_val_) * sizeof(_type_), \ .is_array = true, \ .type = DEV_PROP_##_Type_, \ { .pointer = { ._type_##_data = _val_ } }, \ } #define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_) \ PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u8, U8, _val_) #define PROPERTY_ENTRY_U16_ARRAY(_name_, _val_) \ PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u16, U16, _val_) #define PROPERTY_ENTRY_U32_ARRAY(_name_, _val_) \ PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u32, U32, _val_) #define PROPERTY_ENTRY_U64_ARRAY(_name_, _val_) \ PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u64, U64, _val_) #define PROPERTY_ENTRY_STRING_ARRAY(_name_, _val_) \ (struct property_entry) { \ .name = _name_, \ .length = ARRAY_SIZE(_val_) * sizeof(const char *), \ .is_array = true, \ .type = DEV_PROP_STRING, \ { .pointer = { .str = _val_ } }, \ } #define PROPERTY_ENTRY_INTEGER(_name_, _type_, _Type_, _val_) \ (struct property_entry) { \ .name = _name_, \ .length = sizeof(_type_), \ .type = DEV_PROP_##_Type_, \ { .value = { ._type_##_data = _val_ } }, \ } #define PROPERTY_ENTRY_U8(_name_, _val_) \ PROPERTY_ENTRY_INTEGER(_name_, u8, U8, _val_) #define PROPERTY_ENTRY_U16(_name_, _val_) \ PROPERTY_ENTRY_INTEGER(_name_, u16, U16, _val_) #define PROPERTY_ENTRY_U32(_name_, _val_) \ PROPERTY_ENTRY_INTEGER(_name_, u32, U32, _val_) #define PROPERTY_ENTRY_U64(_name_, _val_) \ PROPERTY_ENTRY_INTEGER(_name_, u64, U64, _val_) #define PROPERTY_ENTRY_STRING(_name_, _val_) \ (struct property_entry) { \ .name = _name_, \ .length = sizeof(const char *), \ .type = DEV_PROP_STRING, \ { .value = { .str = _val_ } }, \ } #define PROPERTY_ENTRY_BOOL(_name_) \ (struct property_entry) { \ .name = _name_, \ } struct property_entry * property_entries_dup(const struct property_entry *properties); void property_entries_free(const struct property_entry *properties); int device_add_properties(struct device *dev, const struct property_entry *properties); void device_remove_properties(struct device *dev); bool device_dma_supported(struct device *dev); enum dev_dma_attr device_get_dma_attr(struct device *dev); const void *device_get_match_data(struct device *dev); int device_get_phy_mode(struct device *dev); void *device_get_mac_address(struct device *dev, char *addr, int alen); int fwnode_get_phy_mode(struct fwnode_handle *fwnode); void *fwnode_get_mac_address(struct fwnode_handle *fwnode, char *addr, int alen); struct fwnode_handle *fwnode_graph_get_next_endpoint( const struct fwnode_handle *fwnode, struct fwnode_handle *prev); struct fwnode_handle * fwnode_graph_get_port_parent(const struct fwnode_handle *fwnode); struct fwnode_handle *fwnode_graph_get_remote_port_parent( const struct fwnode_handle *fwnode); struct fwnode_handle *fwnode_graph_get_remote_port( const struct fwnode_handle *fwnode); struct fwnode_handle *fwnode_graph_get_remote_endpoint( const struct fwnode_handle *fwnode); struct fwnode_handle * fwnode_graph_get_remote_node(const struct fwnode_handle *fwnode, u32 port, u32 endpoint); /* * Fwnode lookup flags * * @FWNODE_GRAPH_ENDPOINT_NEXT: In the case of no exact match, look for the * closest endpoint ID greater than the specified * one. * @FWNODE_GRAPH_DEVICE_DISABLED: That the device to which the remote * endpoint of the given endpoint belongs to, * may be disabled. */ #define FWNODE_GRAPH_ENDPOINT_NEXT BIT(0) #define FWNODE_GRAPH_DEVICE_DISABLED BIT(1) struct fwnode_handle * fwnode_graph_get_endpoint_by_id(const struct fwnode_handle *fwnode, u32 port, u32 endpoint, unsigned long flags); #define fwnode_graph_for_each_endpoint(fwnode, child) \ for (child = NULL; \ (child = fwnode_graph_get_next_endpoint(fwnode, child)); ) int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode, struct fwnode_endpoint *endpoint); /* -------------------------------------------------------------------------- */ /* Software fwnode support - when HW description is incomplete or missing */ struct software_node; /** * struct software_node_ref_args - Reference with additional arguments * @node: Reference to a software node * @nargs: Number of elements in @args array * @args: Integer arguments */ struct software_node_ref_args { const struct software_node *node; unsigned int nargs; u64 args[NR_FWNODE_REFERENCE_ARGS]; }; /** * struct software_node_reference - Named software node reference property * @name: Name of the property * @nrefs: Number of elements in @refs array * @refs: Array of references with optional arguments */ struct software_node_reference { const char *name; unsigned int nrefs; const struct software_node_ref_args *refs; }; /** * struct software_node - Software node description * @name: Name of the software node * @parent: Parent of the software node * @properties: Array of device properties * @references: Array of software node reference properties */ struct software_node { const char *name; const struct software_node *parent; const struct property_entry *properties; const struct software_node_reference *references; }; bool is_software_node(const struct fwnode_handle *fwnode); const struct software_node *to_software_node(struct fwnode_handle *fwnode); struct fwnode_handle *software_node_fwnode(const struct software_node *node); const struct software_node * software_node_find_by_name(const struct software_node *parent, const char *name); int software_node_register_nodes(const struct software_node *nodes); void software_node_unregister_nodes(const struct software_node *nodes); int software_node_register(const struct software_node *node); int software_node_notify(struct device *dev, unsigned long action); struct fwnode_handle * fwnode_create_software_node(const struct property_entry *properties, const struct fwnode_handle *parent); void fwnode_remove_software_node(struct fwnode_handle *fwnode); #endif /* _LINUX_PROPERTY_H_ */ netfilter_ipv4.h 0000644 00000002065 14722070374 0007664 0 ustar 00 /* IPv4-specific defines for netfilter. * (C)1998 Rusty Russell -- This code is GPL. */ #ifndef __LINUX_IP_NETFILTER_H #define __LINUX_IP_NETFILTER_H #include <uapi/linux/netfilter_ipv4.h> /* Extra routing may needed on local out, as the QUEUE target never returns * control to the table. */ struct ip_rt_info { __be32 daddr; __be32 saddr; u_int8_t tos; u_int32_t mark; }; int ip_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb, unsigned addr_type); struct nf_queue_entry; #ifdef CONFIG_INET __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol); int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl, bool strict); #else static inline __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol) { return 0; } static inline int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl, bool strict) { return -EOPNOTSUPP; } #endif /* CONFIG_INET */ #endif /*__LINUX_IP_NETFILTER_H*/ usbdevice_fs.h 0000644 00000004273 14722070374 0007372 0 ustar 00 // SPDX-License-Identifier: GPL-2.0+ /*****************************************************************************/ /* * usbdevice_fs.h -- USB device file system. * * Copyright (C) 2000 * Thomas Sailer (sailer@ife.ee.ethz.ch) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * History: * 0.1 04.01.2000 Created */ /*****************************************************************************/ #ifndef _LINUX_USBDEVICE_FS_H #define _LINUX_USBDEVICE_FS_H #include <uapi/linux/usbdevice_fs.h> #ifdef CONFIG_COMPAT #include <linux/compat.h> struct usbdevfs_ctrltransfer32 { u8 bRequestType; u8 bRequest; u16 wValue; u16 wIndex; u16 wLength; u32 timeout; /* in milliseconds */ compat_caddr_t data; }; struct usbdevfs_bulktransfer32 { compat_uint_t ep; compat_uint_t len; compat_uint_t timeout; /* in milliseconds */ compat_caddr_t data; }; struct usbdevfs_disconnectsignal32 { compat_int_t signr; compat_caddr_t context; }; struct usbdevfs_urb32 { unsigned char type; unsigned char endpoint; compat_int_t status; compat_uint_t flags; compat_caddr_t buffer; compat_int_t buffer_length; compat_int_t actual_length; compat_int_t start_frame; compat_int_t number_of_packets; compat_int_t error_count; compat_uint_t signr; compat_caddr_t usercontext; /* unused */ struct usbdevfs_iso_packet_desc iso_frame_desc[0]; }; struct usbdevfs_ioctl32 { s32 ifno; s32 ioctl_code; compat_caddr_t data; }; #endif #endif /* _LINUX_USBDEVICE_FS_H */ iocontext.h 0000644 00000011416 14722070374 0006742 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef IOCONTEXT_H #define IOCONTEXT_H #include <linux/radix-tree.h> #include <linux/rcupdate.h> #include <linux/workqueue.h> enum { ICQ_EXITED = 1 << 2, ICQ_DESTROYED = 1 << 3, }; /* * An io_cq (icq) is association between an io_context (ioc) and a * request_queue (q). This is used by elevators which need to track * information per ioc - q pair. * * Elevator can request use of icq by setting elevator_type->icq_size and * ->icq_align. Both size and align must be larger than that of struct * io_cq and elevator can use the tail area for private information. The * recommended way to do this is defining a struct which contains io_cq as * the first member followed by private members and using its size and * align. For example, * * struct snail_io_cq { * struct io_cq icq; * int poke_snail; * int feed_snail; * }; * * struct elevator_type snail_elv_type { * .ops = { ... }, * .icq_size = sizeof(struct snail_io_cq), * .icq_align = __alignof__(struct snail_io_cq), * ... * }; * * If icq_size is set, block core will manage icq's. All requests will * have its ->elv.icq field set before elevator_ops->elevator_set_req_fn() * is called and be holding a reference to the associated io_context. * * Whenever a new icq is created, elevator_ops->elevator_init_icq_fn() is * called and, on destruction, ->elevator_exit_icq_fn(). Both functions * are called with both the associated io_context and queue locks held. * * Elevator is allowed to lookup icq using ioc_lookup_icq() while holding * queue lock but the returned icq is valid only until the queue lock is * released. Elevators can not and should not try to create or destroy * icq's. * * As icq's are linked from both ioc and q, the locking rules are a bit * complex. * * - ioc lock nests inside q lock. * * - ioc->icq_list and icq->ioc_node are protected by ioc lock. * q->icq_list and icq->q_node by q lock. * * - ioc->icq_tree and ioc->icq_hint are protected by ioc lock, while icq * itself is protected by q lock. However, both the indexes and icq * itself are also RCU managed and lookup can be performed holding only * the q lock. * * - icq's are not reference counted. They are destroyed when either the * ioc or q goes away. Each request with icq set holds an extra * reference to ioc to ensure it stays until the request is completed. * * - Linking and unlinking icq's are performed while holding both ioc and q * locks. Due to the lock ordering, q exit is simple but ioc exit * requires reverse-order double lock dance. */ struct io_cq { struct request_queue *q; struct io_context *ioc; /* * q_node and ioc_node link io_cq through icq_list of q and ioc * respectively. Both fields are unused once ioc_exit_icq() is * called and shared with __rcu_icq_cache and __rcu_head which are * used for RCU free of io_cq. */ union { struct list_head q_node; struct kmem_cache *__rcu_icq_cache; }; union { struct hlist_node ioc_node; struct rcu_head __rcu_head; }; unsigned int flags; }; /* * I/O subsystem state of the associated processes. It is refcounted * and kmalloc'ed. These could be shared between processes. */ struct io_context { atomic_long_t refcount; atomic_t active_ref; atomic_t nr_tasks; /* all the fields below are protected by this lock */ spinlock_t lock; unsigned short ioprio; /* * For request batching */ int nr_batch_requests; /* Number of requests left in the batch */ unsigned long last_waited; /* Time last woken after wait for request */ struct radix_tree_root icq_tree; struct io_cq __rcu *icq_hint; struct hlist_head icq_list; struct work_struct release_work; }; /** * get_io_context_active - get active reference on ioc * @ioc: ioc of interest * * Only iocs with active reference can issue new IOs. This function * acquires an active reference on @ioc. The caller must already have an * active reference on @ioc. */ static inline void get_io_context_active(struct io_context *ioc) { WARN_ON_ONCE(atomic_long_read(&ioc->refcount) <= 0); WARN_ON_ONCE(atomic_read(&ioc->active_ref) <= 0); atomic_long_inc(&ioc->refcount); atomic_inc(&ioc->active_ref); } static inline void ioc_task_link(struct io_context *ioc) { get_io_context_active(ioc); WARN_ON_ONCE(atomic_read(&ioc->nr_tasks) <= 0); atomic_inc(&ioc->nr_tasks); } struct task_struct; #ifdef CONFIG_BLOCK void put_io_context(struct io_context *ioc); void put_io_context_active(struct io_context *ioc); void exit_io_context(struct task_struct *task); struct io_context *get_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node); #else struct io_context; static inline void put_io_context(struct io_context *ioc) { } static inline void exit_io_context(struct task_struct *task) { } #endif #endif bitfield.h 0000644 00000011615 14722070374 0006511 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2014 Felix Fietkau <nbd@nbd.name> * Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> */ #ifndef _LINUX_BITFIELD_H #define _LINUX_BITFIELD_H #include <linux/build_bug.h> #include <asm/byteorder.h> /* * Bitfield access macros * * FIELD_{GET,PREP} macros take as first parameter shifted mask * from which they extract the base mask and shift amount. * Mask must be a compilation time constant. * * Example: * * #define REG_FIELD_A GENMASK(6, 0) * #define REG_FIELD_B BIT(7) * #define REG_FIELD_C GENMASK(15, 8) * #define REG_FIELD_D GENMASK(31, 16) * * Get: * a = FIELD_GET(REG_FIELD_A, reg); * b = FIELD_GET(REG_FIELD_B, reg); * * Set: * reg = FIELD_PREP(REG_FIELD_A, 1) | * FIELD_PREP(REG_FIELD_B, 0) | * FIELD_PREP(REG_FIELD_C, c) | * FIELD_PREP(REG_FIELD_D, 0x40); * * Modify: * reg &= ~REG_FIELD_C; * reg |= FIELD_PREP(REG_FIELD_C, c); */ #define __bf_shf(x) (__builtin_ffsll(x) - 1) #define __scalar_type_to_unsigned_cases(type) \ unsigned type: (unsigned type)0, \ signed type: (unsigned type)0 #define __unsigned_scalar_typeof(x) typeof( \ _Generic((x), \ char: (unsigned char)0, \ __scalar_type_to_unsigned_cases(char), \ __scalar_type_to_unsigned_cases(short), \ __scalar_type_to_unsigned_cases(int), \ __scalar_type_to_unsigned_cases(long), \ __scalar_type_to_unsigned_cases(long long), \ default: (x))) #define __bf_cast_unsigned(type, x) ((__unsigned_scalar_typeof(type))(x)) #define __BF_FIELD_CHECK(_mask, _reg, _val, _pfx) \ ({ \ BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \ _pfx "mask is not constant"); \ BUILD_BUG_ON_MSG((_mask) == 0, _pfx "mask is zero"); \ BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ? \ ~((_mask) >> __bf_shf(_mask)) & (_val) : 0, \ _pfx "value too large for the field"); \ BUILD_BUG_ON_MSG(__bf_cast_unsigned(_mask, _mask) > \ __bf_cast_unsigned(_reg, ~0ull), \ _pfx "type of reg too small for mask"); \ __BUILD_BUG_ON_NOT_POWER_OF_2((_mask) + \ (1ULL << __bf_shf(_mask))); \ }) /** * FIELD_FIT() - check if value fits in the field * @_mask: shifted mask defining the field's length and position * @_val: value to test against the field * * Return: true if @_val can fit inside @_mask, false if @_val is too big. */ #define FIELD_FIT(_mask, _val) \ ({ \ __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_FIT: "); \ !((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \ }) /** * FIELD_PREP() - prepare a bitfield element * @_mask: shifted mask defining the field's length and position * @_val: value to put in the field * * FIELD_PREP() masks and shifts up the value. The result should * be combined with other fields of the bitfield using logical OR. */ #define FIELD_PREP(_mask, _val) \ ({ \ __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_PREP: "); \ ((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \ }) /** * FIELD_GET() - extract a bitfield element * @_mask: shifted mask defining the field's length and position * @_reg: value of entire bitfield * * FIELD_GET() extracts the field specified by @_mask from the * bitfield passed in as @_reg by masking and shifting it down. */ #define FIELD_GET(_mask, _reg) \ ({ \ __BF_FIELD_CHECK(_mask, _reg, 0U, "FIELD_GET: "); \ (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \ }) extern void __compiletime_error("value doesn't fit into mask") __field_overflow(void); extern void __compiletime_error("bad bitfield mask") __bad_mask(void); static __always_inline u64 field_multiplier(u64 field) { if ((field | (field - 1)) & ((field | (field - 1)) + 1)) __bad_mask(); return field & -field; } static __always_inline u64 field_mask(u64 field) { return field / field_multiplier(field); } #define ____MAKE_OP(type,base,to,from) \ static __always_inline __##type type##_encode_bits(base v, base field) \ { \ if (__builtin_constant_p(v) && (v & ~field_mask(field))) \ __field_overflow(); \ return to((v & field_mask(field)) * field_multiplier(field)); \ } \ static __always_inline __##type type##_replace_bits(__##type old, \ base val, base field) \ { \ return (old & ~to(field)) | type##_encode_bits(val, field); \ } \ static __always_inline void type##p_replace_bits(__##type *p, \ base val, base field) \ { \ *p = (*p & ~to(field)) | type##_encode_bits(val, field); \ } \ static __always_inline base type##_get_bits(__##type v, base field) \ { \ return (from(v) & field)/field_multiplier(field); \ } #define __MAKE_OP(size) \ ____MAKE_OP(le##size,u##size,cpu_to_le##size,le##size##_to_cpu) \ ____MAKE_OP(be##size,u##size,cpu_to_be##size,be##size##_to_cpu) \ ____MAKE_OP(u##size,u##size,,) ____MAKE_OP(u8,u8,,) __MAKE_OP(16) __MAKE_OP(32) __MAKE_OP(64) #undef __MAKE_OP #undef ____MAKE_OP #endif set_memory.h 0000644 00000002372 14722070374 0007112 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2017, Michael Ellerman, IBM Corporation. */ #ifndef _LINUX_SET_MEMORY_H_ #define _LINUX_SET_MEMORY_H_ #ifdef CONFIG_ARCH_HAS_SET_MEMORY #include <asm/set_memory.h> #else static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; } static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; } static inline int set_memory_x(unsigned long addr, int numpages) { return 0; } static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; } #endif #ifndef CONFIG_ARCH_HAS_SET_DIRECT_MAP static inline int set_direct_map_invalid_noflush(struct page *page) { return 0; } static inline int set_direct_map_default_noflush(struct page *page) { return 0; } #endif #ifndef set_mce_nospec static inline int set_mce_nospec(unsigned long pfn, bool unmap) { return 0; } #endif #ifndef clear_mce_nospec static inline int clear_mce_nospec(unsigned long pfn) { return 0; } #endif #ifndef CONFIG_ARCH_HAS_MEM_ENCRYPT static inline int set_memory_encrypted(unsigned long addr, int numpages) { return 0; } static inline int set_memory_decrypted(unsigned long addr, int numpages) { return 0; } #endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */ #endif /* _LINUX_SET_MEMORY_H_ */ generic-radix-tree.h 0000644 00000014555 14722070374 0010413 0 ustar 00 #ifndef _LINUX_GENERIC_RADIX_TREE_H #define _LINUX_GENERIC_RADIX_TREE_H /** * DOC: Generic radix trees/sparse arrays * * Very simple and minimalistic, supporting arbitrary size entries up to * PAGE_SIZE. * * A genradix is defined with the type it will store, like so: * * static GENRADIX(struct foo) foo_genradix; * * The main operations are: * * - genradix_init(radix) - initialize an empty genradix * * - genradix_free(radix) - free all memory owned by the genradix and * reinitialize it * * - genradix_ptr(radix, idx) - gets a pointer to the entry at idx, returning * NULL if that entry does not exist * * - genradix_ptr_alloc(radix, idx, gfp) - gets a pointer to an entry, * allocating it if necessary * * - genradix_for_each(radix, iter, p) - iterate over each entry in a genradix * * The radix tree allocates one page of entries at a time, so entries may exist * that were never explicitly allocated - they will be initialized to all * zeroes. * * Internally, a genradix is just a radix tree of pages, and indexing works in * terms of byte offsets. The wrappers in this header file use sizeof on the * type the radix contains to calculate a byte offset from the index - see * __idx_to_offset. */ #include <asm/page.h> #include <linux/bug.h> #include <linux/kernel.h> #include <linux/log2.h> struct genradix_root; struct __genradix { struct genradix_root __rcu *root; }; /* * NOTE: currently, sizeof(_type) must not be larger than PAGE_SIZE: */ #define __GENRADIX_INITIALIZER \ { \ .tree = { \ .root = NULL, \ } \ } /* * We use a 0 size array to stash the type we're storing without taking any * space at runtime - then the various accessor macros can use typeof() to get * to it for casts/sizeof - we also force the alignment so that storing a type * with a ridiculous alignment doesn't blow up the alignment or size of the * genradix. */ #define GENRADIX(_type) \ struct { \ struct __genradix tree; \ _type type[0] __aligned(1); \ } #define DEFINE_GENRADIX(_name, _type) \ GENRADIX(_type) _name = __GENRADIX_INITIALIZER /** * genradix_init - initialize a genradix * @_radix: genradix to initialize * * Does not fail */ #define genradix_init(_radix) \ do { \ *(_radix) = (typeof(*_radix)) __GENRADIX_INITIALIZER; \ } while (0) void __genradix_free(struct __genradix *); /** * genradix_free: free all memory owned by a genradix * @_radix: the genradix to free * * After freeing, @_radix will be reinitialized and empty */ #define genradix_free(_radix) __genradix_free(&(_radix)->tree) static inline size_t __idx_to_offset(size_t idx, size_t obj_size) { if (__builtin_constant_p(obj_size)) BUILD_BUG_ON(obj_size > PAGE_SIZE); else BUG_ON(obj_size > PAGE_SIZE); if (!is_power_of_2(obj_size)) { size_t objs_per_page = PAGE_SIZE / obj_size; return (idx / objs_per_page) * PAGE_SIZE + (idx % objs_per_page) * obj_size; } else { return idx * obj_size; } } #define __genradix_cast(_radix) (typeof((_radix)->type[0]) *) #define __genradix_obj_size(_radix) sizeof((_radix)->type[0]) #define __genradix_idx_to_offset(_radix, _idx) \ __idx_to_offset(_idx, __genradix_obj_size(_radix)) void *__genradix_ptr(struct __genradix *, size_t); /** * genradix_ptr - get a pointer to a genradix entry * @_radix: genradix to access * @_idx: index to fetch * * Returns a pointer to entry at @_idx, or NULL if that entry does not exist. */ #define genradix_ptr(_radix, _idx) \ (__genradix_cast(_radix) \ __genradix_ptr(&(_radix)->tree, \ __genradix_idx_to_offset(_radix, _idx))) void *__genradix_ptr_alloc(struct __genradix *, size_t, gfp_t); /** * genradix_ptr_alloc - get a pointer to a genradix entry, allocating it * if necessary * @_radix: genradix to access * @_idx: index to fetch * @_gfp: gfp mask * * Returns a pointer to entry at @_idx, or NULL on allocation failure */ #define genradix_ptr_alloc(_radix, _idx, _gfp) \ (__genradix_cast(_radix) \ __genradix_ptr_alloc(&(_radix)->tree, \ __genradix_idx_to_offset(_radix, _idx), \ _gfp)) struct genradix_iter { size_t offset; size_t pos; }; /** * genradix_iter_init - initialize a genradix_iter * @_radix: genradix that will be iterated over * @_idx: index to start iterating from */ #define genradix_iter_init(_radix, _idx) \ ((struct genradix_iter) { \ .pos = (_idx), \ .offset = __genradix_idx_to_offset((_radix), (_idx)),\ }) void *__genradix_iter_peek(struct genradix_iter *, struct __genradix *, size_t); /** * genradix_iter_peek - get first entry at or above iterator's current * position * @_iter: a genradix_iter * @_radix: genradix being iterated over * * If no more entries exist at or above @_iter's current position, returns NULL */ #define genradix_iter_peek(_iter, _radix) \ (__genradix_cast(_radix) \ __genradix_iter_peek(_iter, &(_radix)->tree, \ PAGE_SIZE / __genradix_obj_size(_radix))) static inline void __genradix_iter_advance(struct genradix_iter *iter, size_t obj_size) { iter->offset += obj_size; if (!is_power_of_2(obj_size) && (iter->offset & (PAGE_SIZE - 1)) + obj_size > PAGE_SIZE) iter->offset = round_up(iter->offset, PAGE_SIZE); iter->pos++; } #define genradix_iter_advance(_iter, _radix) \ __genradix_iter_advance(_iter, __genradix_obj_size(_radix)) #define genradix_for_each_from(_radix, _iter, _p, _start) \ for (_iter = genradix_iter_init(_radix, _start); \ (_p = genradix_iter_peek(&_iter, _radix)) != NULL; \ genradix_iter_advance(&_iter, _radix)) /** * genradix_for_each - iterate over entry in a genradix * @_radix: genradix to iterate over * @_iter: a genradix_iter to track current position * @_p: pointer to genradix entry type * * On every iteration, @_p will point to the current entry, and @_iter.pos * will be the current entry's index. */ #define genradix_for_each(_radix, _iter, _p) \ genradix_for_each_from(_radix, _iter, _p, 0) int __genradix_prealloc(struct __genradix *, size_t, gfp_t); /** * genradix_prealloc - preallocate entries in a generic radix tree * @_radix: genradix to preallocate * @_nr: number of entries to preallocate * @_gfp: gfp mask * * Returns 0 on success, -ENOMEM on failure */ #define genradix_prealloc(_radix, _nr, _gfp) \ __genradix_prealloc(&(_radix)->tree, \ __genradix_idx_to_offset(_radix, _nr + 1),\ _gfp) #endif /* _LINUX_GENERIC_RADIX_TREE_H */ netdev_features.h 0000644 00000023315 14722070374 0010112 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Network device features. */ #ifndef _LINUX_NETDEV_FEATURES_H #define _LINUX_NETDEV_FEATURES_H #include <linux/types.h> #include <linux/bitops.h> #include <asm/byteorder.h> typedef u64 netdev_features_t; enum { NETIF_F_SG_BIT, /* Scatter/gather IO. */ NETIF_F_IP_CSUM_BIT, /* Can checksum TCP/UDP over IPv4. */ __UNUSED_NETIF_F_1, NETIF_F_HW_CSUM_BIT, /* Can checksum all the packets. */ NETIF_F_IPV6_CSUM_BIT, /* Can checksum TCP/UDP over IPV6 */ NETIF_F_HIGHDMA_BIT, /* Can DMA to high memory. */ NETIF_F_FRAGLIST_BIT, /* Scatter/gather IO. */ NETIF_F_HW_VLAN_CTAG_TX_BIT, /* Transmit VLAN CTAG HW acceleration */ NETIF_F_HW_VLAN_CTAG_RX_BIT, /* Receive VLAN CTAG HW acceleration */ NETIF_F_HW_VLAN_CTAG_FILTER_BIT,/* Receive filtering on VLAN CTAGs */ NETIF_F_VLAN_CHALLENGED_BIT, /* Device cannot handle VLAN packets */ NETIF_F_GSO_BIT, /* Enable software GSO. */ NETIF_F_LLTX_BIT, /* LockLess TX - deprecated. Please */ /* do not use LLTX in new drivers */ NETIF_F_NETNS_LOCAL_BIT, /* Does not change network namespaces */ NETIF_F_GRO_BIT, /* Generic receive offload */ NETIF_F_LRO_BIT, /* large receive offload */ /**/NETIF_F_GSO_SHIFT, /* keep the order of SKB_GSO_* bits */ NETIF_F_TSO_BIT /* ... TCPv4 segmentation */ = NETIF_F_GSO_SHIFT, NETIF_F_GSO_ROBUST_BIT, /* ... ->SKB_GSO_DODGY */ NETIF_F_TSO_ECN_BIT, /* ... TCP ECN support */ NETIF_F_TSO_MANGLEID_BIT, /* ... IPV4 ID mangling allowed */ NETIF_F_TSO6_BIT, /* ... TCPv6 segmentation */ NETIF_F_FSO_BIT, /* ... FCoE segmentation */ NETIF_F_GSO_GRE_BIT, /* ... GRE with TSO */ NETIF_F_GSO_GRE_CSUM_BIT, /* ... GRE with csum with TSO */ NETIF_F_GSO_IPXIP4_BIT, /* ... IP4 or IP6 over IP4 with TSO */ NETIF_F_GSO_IPXIP6_BIT, /* ... IP4 or IP6 over IP6 with TSO */ NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */ NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT,/* ... UDP TUNNEL with TSO & CSUM */ NETIF_F_GSO_PARTIAL_BIT, /* ... Only segment inner-most L4 * in hardware and all other * headers in software. */ NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */ NETIF_F_GSO_SCTP_BIT, /* ... SCTP fragmentation */ NETIF_F_GSO_ESP_BIT, /* ... ESP with TSO */ NETIF_F_GSO_UDP_BIT, /* ... UFO, deprecated except tuntap */ NETIF_F_GSO_UDP_L4_BIT, /* ... UDP payload GSO (not UFO) */ /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */ NETIF_F_GSO_UDP_L4_BIT, NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */ NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */ NETIF_F_FCOE_MTU_BIT, /* Supports max FCoE MTU, 2158 bytes*/ NETIF_F_NTUPLE_BIT, /* N-tuple filters supported */ NETIF_F_RXHASH_BIT, /* Receive hashing offload */ NETIF_F_RXCSUM_BIT, /* Receive checksumming offload */ NETIF_F_NOCACHE_COPY_BIT, /* Use no-cache copyfromuser */ NETIF_F_LOOPBACK_BIT, /* Enable loopback */ NETIF_F_RXFCS_BIT, /* Append FCS to skb pkt data */ NETIF_F_RXALL_BIT, /* Receive errored frames too */ NETIF_F_HW_VLAN_STAG_TX_BIT, /* Transmit VLAN STAG HW acceleration */ NETIF_F_HW_VLAN_STAG_RX_BIT, /* Receive VLAN STAG HW acceleration */ NETIF_F_HW_VLAN_STAG_FILTER_BIT,/* Receive filtering on VLAN STAGs */ NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */ NETIF_F_HW_TC_BIT, /* Offload TC infrastructure */ NETIF_F_HW_ESP_BIT, /* Hardware ESP transformation offload */ NETIF_F_HW_ESP_TX_CSUM_BIT, /* ESP with TX checksum offload */ NETIF_F_RX_UDP_TUNNEL_PORT_BIT, /* Offload of RX port for UDP tunnels */ NETIF_F_HW_TLS_TX_BIT, /* Hardware TLS TX offload */ NETIF_F_HW_TLS_RX_BIT, /* Hardware TLS RX offload */ NETIF_F_GRO_HW_BIT, /* Hardware Generic receive offload */ NETIF_F_HW_TLS_RECORD_BIT, /* Offload TLS record */ /* * Add your fresh new feature above and remember to update * netdev_features_strings[] in net/ethtool/common.c and maybe * some feature mask #defines below. Please also describe it * in Documentation/networking/netdev-features.txt. */ /**/NETDEV_FEATURE_COUNT }; /* copy'n'paste compression ;) */ #define __NETIF_F_BIT(bit) ((netdev_features_t)1 << (bit)) #define __NETIF_F(name) __NETIF_F_BIT(NETIF_F_##name##_BIT) #define NETIF_F_FCOE_CRC __NETIF_F(FCOE_CRC) #define NETIF_F_FCOE_MTU __NETIF_F(FCOE_MTU) #define NETIF_F_FRAGLIST __NETIF_F(FRAGLIST) #define NETIF_F_FSO __NETIF_F(FSO) #define NETIF_F_GRO __NETIF_F(GRO) #define NETIF_F_GRO_HW __NETIF_F(GRO_HW) #define NETIF_F_GSO __NETIF_F(GSO) #define NETIF_F_GSO_ROBUST __NETIF_F(GSO_ROBUST) #define NETIF_F_HIGHDMA __NETIF_F(HIGHDMA) #define NETIF_F_HW_CSUM __NETIF_F(HW_CSUM) #define NETIF_F_HW_VLAN_CTAG_FILTER __NETIF_F(HW_VLAN_CTAG_FILTER) #define NETIF_F_HW_VLAN_CTAG_RX __NETIF_F(HW_VLAN_CTAG_RX) #define NETIF_F_HW_VLAN_CTAG_TX __NETIF_F(HW_VLAN_CTAG_TX) #define NETIF_F_IP_CSUM __NETIF_F(IP_CSUM) #define NETIF_F_IPV6_CSUM __NETIF_F(IPV6_CSUM) #define NETIF_F_LLTX __NETIF_F(LLTX) #define NETIF_F_LOOPBACK __NETIF_F(LOOPBACK) #define NETIF_F_LRO __NETIF_F(LRO) #define NETIF_F_NETNS_LOCAL __NETIF_F(NETNS_LOCAL) #define NETIF_F_NOCACHE_COPY __NETIF_F(NOCACHE_COPY) #define NETIF_F_NTUPLE __NETIF_F(NTUPLE) #define NETIF_F_RXCSUM __NETIF_F(RXCSUM) #define NETIF_F_RXHASH __NETIF_F(RXHASH) #define NETIF_F_SCTP_CRC __NETIF_F(SCTP_CRC) #define NETIF_F_SG __NETIF_F(SG) #define NETIF_F_TSO6 __NETIF_F(TSO6) #define NETIF_F_TSO_ECN __NETIF_F(TSO_ECN) #define NETIF_F_TSO __NETIF_F(TSO) #define NETIF_F_VLAN_CHALLENGED __NETIF_F(VLAN_CHALLENGED) #define NETIF_F_RXFCS __NETIF_F(RXFCS) #define NETIF_F_RXALL __NETIF_F(RXALL) #define NETIF_F_GSO_GRE __NETIF_F(GSO_GRE) #define NETIF_F_GSO_GRE_CSUM __NETIF_F(GSO_GRE_CSUM) #define NETIF_F_GSO_IPXIP4 __NETIF_F(GSO_IPXIP4) #define NETIF_F_GSO_IPXIP6 __NETIF_F(GSO_IPXIP6) #define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL) #define NETIF_F_GSO_UDP_TUNNEL_CSUM __NETIF_F(GSO_UDP_TUNNEL_CSUM) #define NETIF_F_TSO_MANGLEID __NETIF_F(TSO_MANGLEID) #define NETIF_F_GSO_PARTIAL __NETIF_F(GSO_PARTIAL) #define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM) #define NETIF_F_GSO_SCTP __NETIF_F(GSO_SCTP) #define NETIF_F_GSO_ESP __NETIF_F(GSO_ESP) #define NETIF_F_GSO_UDP __NETIF_F(GSO_UDP) #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) #define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) #define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) #define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD) #define NETIF_F_HW_TC __NETIF_F(HW_TC) #define NETIF_F_HW_ESP __NETIF_F(HW_ESP) #define NETIF_F_HW_ESP_TX_CSUM __NETIF_F(HW_ESP_TX_CSUM) #define NETIF_F_RX_UDP_TUNNEL_PORT __NETIF_F(RX_UDP_TUNNEL_PORT) #define NETIF_F_HW_TLS_RECORD __NETIF_F(HW_TLS_RECORD) #define NETIF_F_GSO_UDP_L4 __NETIF_F(GSO_UDP_L4) #define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX) #define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX) /* Finds the next feature with the highest number of the range of start-1 till 0. */ static inline int find_next_netdev_feature(u64 feature, unsigned long start) { /* like BITMAP_LAST_WORD_MASK() for u64 * this sets the most significant 64 - start to 0. */ feature &= ~0ULL >> (-start & ((sizeof(feature) * 8) - 1)); return fls64(feature) - 1; } /* This goes for the MSB to the LSB through the set feature bits, * mask_addr should be a u64 and bit an int */ #define for_each_netdev_feature(mask_addr, bit) \ for ((bit) = find_next_netdev_feature((mask_addr), \ NETDEV_FEATURE_COUNT); \ (bit) >= 0; \ (bit) = find_next_netdev_feature((mask_addr), (bit))) /* Features valid for ethtool to change */ /* = all defined minus driver/device-class-related */ #define NETIF_F_NEVER_CHANGE (NETIF_F_VLAN_CHALLENGED | \ NETIF_F_LLTX | NETIF_F_NETNS_LOCAL) /* remember that ((t)1 << t_BITS) is undefined in C99 */ #define NETIF_F_ETHTOOL_BITS ((__NETIF_F_BIT(NETDEV_FEATURE_COUNT - 1) | \ (__NETIF_F_BIT(NETDEV_FEATURE_COUNT - 1) - 1)) & \ ~NETIF_F_NEVER_CHANGE) /* Segmentation offload feature mask */ #define NETIF_F_GSO_MASK (__NETIF_F_BIT(NETIF_F_GSO_LAST + 1) - \ __NETIF_F_BIT(NETIF_F_GSO_SHIFT)) /* List of IP checksum features. Note that NETIF_F_ HW_CSUM should not be * set in features when NETIF_F_IP_CSUM or NETIF_F_IPV6_CSUM are set-- * this would be contradictory */ #define NETIF_F_CSUM_MASK (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \ NETIF_F_HW_CSUM) #define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | \ NETIF_F_TSO_ECN | NETIF_F_TSO_MANGLEID) #define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \ NETIF_F_FSO) /* List of features with software fallbacks. */ #define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | \ NETIF_F_GSO_SCTP) /* * If one device supports one of these features, then enable them * for all in netdev_increment_features. */ #define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \ NETIF_F_SG | NETIF_F_HIGHDMA | \ NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED) /* * If one device doesn't support one of these features, then disable it * for all in netdev_increment_features. */ #define NETIF_F_ALL_FOR_ALL (NETIF_F_NOCACHE_COPY | NETIF_F_FSO) /* * If upper/master device has these features disabled, they must be disabled * on all lower/slave devices as well. */ #define NETIF_F_UPPER_DISABLES NETIF_F_LRO /* changeable features with no special hardware requirements */ #define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO) #define NETIF_F_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \ NETIF_F_HW_VLAN_CTAG_RX | \ NETIF_F_HW_VLAN_CTAG_TX | \ NETIF_F_HW_VLAN_STAG_FILTER | \ NETIF_F_HW_VLAN_STAG_RX | \ NETIF_F_HW_VLAN_STAG_TX) #define NETIF_F_GSO_ENCAP_ALL (NETIF_F_GSO_GRE | \ NETIF_F_GSO_GRE_CSUM | \ NETIF_F_GSO_IPXIP4 | \ NETIF_F_GSO_IPXIP6 | \ NETIF_F_GSO_UDP_TUNNEL | \ NETIF_F_GSO_UDP_TUNNEL_CSUM) #endif /* _LINUX_NETDEV_FEATURES_H */ stmmac.h 0000644 00000011450 14722070374 0006210 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /******************************************************************************* Header file for stmmac platform data Copyright (C) 2009 STMicroelectronics Ltd Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ #ifndef __STMMAC_PLATFORM_DATA #define __STMMAC_PLATFORM_DATA #include <linux/platform_device.h> #define MTL_MAX_RX_QUEUES 8 #define MTL_MAX_TX_QUEUES 8 #define STMMAC_CH_MAX 8 #define STMMAC_RX_COE_NONE 0 #define STMMAC_RX_COE_TYPE1 1 #define STMMAC_RX_COE_TYPE2 2 /* Define the macros for CSR clock range parameters to be passed by * platform code. * This could also be configured at run time using CPU freq framework. */ /* MDC Clock Selection define*/ #define STMMAC_CSR_60_100M 0x0 /* MDC = clk_scr_i/42 */ #define STMMAC_CSR_100_150M 0x1 /* MDC = clk_scr_i/62 */ #define STMMAC_CSR_20_35M 0x2 /* MDC = clk_scr_i/16 */ #define STMMAC_CSR_35_60M 0x3 /* MDC = clk_scr_i/26 */ #define STMMAC_CSR_150_250M 0x4 /* MDC = clk_scr_i/102 */ #define STMMAC_CSR_250_300M 0x5 /* MDC = clk_scr_i/122 */ /* MTL algorithms identifiers */ #define MTL_TX_ALGORITHM_WRR 0x0 #define MTL_TX_ALGORITHM_WFQ 0x1 #define MTL_TX_ALGORITHM_DWRR 0x2 #define MTL_TX_ALGORITHM_SP 0x3 #define MTL_RX_ALGORITHM_SP 0x4 #define MTL_RX_ALGORITHM_WSP 0x5 /* RX/TX Queue Mode */ #define MTL_QUEUE_AVB 0x0 #define MTL_QUEUE_DCB 0x1 /* The MDC clock could be set higher than the IEEE 802.3 * specified frequency limit 0f 2.5 MHz, by programming a clock divider * of value different than the above defined values. The resultant MDIO * clock frequency of 12.5 MHz is applicable for the interfacing chips * supporting higher MDC clocks. * The MDC clock selection macros need to be defined for MDC clock rate * of 12.5 MHz, corresponding to the following selection. */ #define STMMAC_CSR_I_4 0x8 /* clk_csr_i/4 */ #define STMMAC_CSR_I_6 0x9 /* clk_csr_i/6 */ #define STMMAC_CSR_I_8 0xA /* clk_csr_i/8 */ #define STMMAC_CSR_I_10 0xB /* clk_csr_i/10 */ #define STMMAC_CSR_I_12 0xC /* clk_csr_i/12 */ #define STMMAC_CSR_I_14 0xD /* clk_csr_i/14 */ #define STMMAC_CSR_I_16 0xE /* clk_csr_i/16 */ #define STMMAC_CSR_I_18 0xF /* clk_csr_i/18 */ /* AXI DMA Burst length supported */ #define DMA_AXI_BLEN_4 (1 << 1) #define DMA_AXI_BLEN_8 (1 << 2) #define DMA_AXI_BLEN_16 (1 << 3) #define DMA_AXI_BLEN_32 (1 << 4) #define DMA_AXI_BLEN_64 (1 << 5) #define DMA_AXI_BLEN_128 (1 << 6) #define DMA_AXI_BLEN_256 (1 << 7) #define DMA_AXI_BLEN_ALL (DMA_AXI_BLEN_4 | DMA_AXI_BLEN_8 | DMA_AXI_BLEN_16 \ | DMA_AXI_BLEN_32 | DMA_AXI_BLEN_64 \ | DMA_AXI_BLEN_128 | DMA_AXI_BLEN_256) /* Platfrom data for platform device structure's platform_data field */ struct stmmac_mdio_bus_data { unsigned int phy_mask; int *irqs; int probed_phy_irq; bool needs_reset; }; struct stmmac_dma_cfg { int pbl; int txpbl; int rxpbl; bool pblx8; int fixed_burst; int mixed_burst; bool aal; }; #define AXI_BLEN 7 struct stmmac_axi { bool axi_lpi_en; bool axi_xit_frm; u32 axi_wr_osr_lmt; u32 axi_rd_osr_lmt; bool axi_kbbe; u32 axi_blen[AXI_BLEN]; bool axi_fb; bool axi_mb; bool axi_rb; }; struct stmmac_rxq_cfg { u8 mode_to_use; u32 chan; u8 pkt_route; bool use_prio; u32 prio; }; struct stmmac_txq_cfg { u32 weight; u8 mode_to_use; /* Credit Base Shaper parameters */ u32 send_slope; u32 idle_slope; u32 high_credit; u32 low_credit; bool use_prio; u32 prio; }; struct plat_stmmacenet_data { int bus_id; int phy_addr; int interface; int phy_interface; struct stmmac_mdio_bus_data *mdio_bus_data; struct device_node *phy_node; struct device_node *phylink_node; struct device_node *mdio_node; struct stmmac_dma_cfg *dma_cfg; int clk_csr; int has_gmac; int enh_desc; int tx_coe; int rx_coe; int bugged_jumbo; int pmt; int force_sf_dma_mode; int force_thresh_dma_mode; int riwt_off; int max_speed; int maxmtu; int multicast_filter_bins; int unicast_filter_entries; int tx_fifo_size; int rx_fifo_size; u32 rx_queues_to_use; u32 tx_queues_to_use; u8 rx_sched_algorithm; u8 tx_sched_algorithm; struct stmmac_rxq_cfg rx_queues_cfg[MTL_MAX_RX_QUEUES]; struct stmmac_txq_cfg tx_queues_cfg[MTL_MAX_TX_QUEUES]; void (*fix_mac_speed)(void *priv, unsigned int speed); int (*init)(struct platform_device *pdev, void *priv); void (*exit)(struct platform_device *pdev, void *priv); struct mac_device_info *(*setup)(void *priv); void *bsp_priv; struct clk *stmmac_clk; struct clk *pclk; struct clk *clk_ptp_ref; unsigned int clk_ptp_rate; unsigned int clk_ref_rate; s32 ptp_max_adj; struct reset_control *stmmac_rst; struct stmmac_axi *axi; int has_gmac4; bool has_sun8i; bool tso_en; int rss_en; int mac_port_sel_speed; bool en_tx_lpi_clockgating; bool rx_clk_runs_in_lpi; int has_xgmac; bool sph_disable; }; #endif uprobes.h 0000644 00000013777 14722070374 0006421 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef _LINUX_UPROBES_H #define _LINUX_UPROBES_H /* * User-space Probes (UProbes) * * Copyright (C) IBM Corporation, 2008-2012 * Authors: * Srikar Dronamraju * Jim Keniston * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra */ #include <linux/errno.h> #include <linux/rbtree.h> #include <linux/types.h> #include <linux/wait.h> struct vm_area_struct; struct mm_struct; struct inode; struct notifier_block; struct page; #define UPROBE_HANDLER_REMOVE 1 #define UPROBE_HANDLER_MASK 1 #define MAX_URETPROBE_DEPTH 64 enum uprobe_filter_ctx { UPROBE_FILTER_REGISTER, UPROBE_FILTER_UNREGISTER, UPROBE_FILTER_MMAP, }; struct uprobe_consumer { int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs); int (*ret_handler)(struct uprobe_consumer *self, unsigned long func, struct pt_regs *regs); bool (*filter)(struct uprobe_consumer *self, enum uprobe_filter_ctx ctx, struct mm_struct *mm); struct uprobe_consumer *next; }; #ifdef CONFIG_UPROBES #include <asm/uprobes.h> enum uprobe_task_state { UTASK_RUNNING, UTASK_SSTEP, UTASK_SSTEP_ACK, UTASK_SSTEP_TRAPPED, }; /* * uprobe_task: Metadata of a task while it singlesteps. */ struct uprobe_task { enum uprobe_task_state state; union { struct { struct arch_uprobe_task autask; unsigned long vaddr; }; struct { struct callback_head dup_xol_work; unsigned long dup_xol_addr; }; }; struct uprobe *active_uprobe; unsigned long xol_vaddr; struct return_instance *return_instances; unsigned int depth; }; struct return_instance { struct uprobe *uprobe; unsigned long func; unsigned long stack; /* stack pointer */ unsigned long orig_ret_vaddr; /* original return address */ bool chained; /* true, if instance is nested */ struct return_instance *next; /* keep as stack */ }; enum rp_check { RP_CHECK_CALL, RP_CHECK_CHAIN_CALL, RP_CHECK_RET, }; struct xol_area; struct uprobes_state { struct xol_area *xol_area; }; extern void __init uprobes_init(void); extern int set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr); extern int set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr); extern bool is_swbp_insn(uprobe_opcode_t *insn); extern bool is_trap_insn(uprobe_opcode_t *insn); extern unsigned long uprobe_get_swbp_addr(struct pt_regs *regs); extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs); extern int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t); extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); extern int uprobe_register_refctr(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc); extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool); extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); extern int uprobe_mmap(struct vm_area_struct *vma); extern void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void uprobe_start_dup_mmap(void); extern void uprobe_end_dup_mmap(void); extern void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm); extern void uprobe_free_utask(struct task_struct *t); extern void uprobe_copy_process(struct task_struct *t, unsigned long flags); extern int uprobe_post_sstep_notifier(struct pt_regs *regs); extern int uprobe_pre_sstep_notifier(struct pt_regs *regs); extern void uprobe_notify_resume(struct pt_regs *regs); extern bool uprobe_deny_signal(void); extern bool arch_uprobe_skip_sstep(struct arch_uprobe *aup, struct pt_regs *regs); extern void uprobe_clear_state(struct mm_struct *mm); extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr); extern int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs); extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs); extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk); extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data); extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs); extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs); extern bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, struct pt_regs *regs); extern bool arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs); extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, void *src, unsigned long len); #else /* !CONFIG_UPROBES */ struct uprobes_state { }; static inline void uprobes_init(void) { } #define uprobe_get_trap_addr(regs) instruction_pointer(regs) static inline int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc) { return -ENOSYS; } static inline int uprobe_register_refctr(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc) { return -ENOSYS; } static inline int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool add) { return -ENOSYS; } static inline void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc) { } static inline int uprobe_mmap(struct vm_area_struct *vma) { return 0; } static inline void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) { } static inline void uprobe_start_dup_mmap(void) { } static inline void uprobe_end_dup_mmap(void) { } static inline void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm) { } static inline void uprobe_notify_resume(struct pt_regs *regs) { } static inline bool uprobe_deny_signal(void) { return false; } static inline void uprobe_free_utask(struct task_struct *t) { } static inline void uprobe_copy_process(struct task_struct *t, unsigned long flags) { } static inline void uprobe_clear_state(struct mm_struct *mm) { } #endif /* !CONFIG_UPROBES */ #endif /* _LINUX_UPROBES_H */ pci-p2pdma.h 0000644 00000007721 14722070374 0006666 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * PCI Peer 2 Peer DMA support. * * Copyright (c) 2016-2018, Logan Gunthorpe * Copyright (c) 2016-2017, Microsemi Corporation * Copyright (c) 2017, Christoph Hellwig * Copyright (c) 2018, Eideticom Inc. */ #ifndef _LINUX_PCI_P2PDMA_H #define _LINUX_PCI_P2PDMA_H #include <linux/pci.h> struct block_device; struct scatterlist; #ifdef CONFIG_PCI_P2PDMA int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, u64 offset); int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients, int num_clients, bool verbose); bool pci_has_p2pmem(struct pci_dev *pdev); struct pci_dev *pci_p2pmem_find_many(struct device **clients, int num_clients); void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size); void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size); pci_bus_addr_t pci_p2pmem_virt_to_bus(struct pci_dev *pdev, void *addr); struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev, unsigned int *nents, u32 length); void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl); void pci_p2pmem_publish(struct pci_dev *pdev, bool publish); int pci_p2pdma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs); void pci_p2pdma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs); int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev, bool *use_p2pdma); ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev, bool use_p2pdma); #else /* CONFIG_PCI_P2PDMA */ static inline int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, u64 offset) { return -EOPNOTSUPP; } static inline int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients, int num_clients, bool verbose) { return -1; } static inline bool pci_has_p2pmem(struct pci_dev *pdev) { return false; } static inline struct pci_dev *pci_p2pmem_find_many(struct device **clients, int num_clients) { return NULL; } static inline void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size) { return NULL; } static inline void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size) { } static inline pci_bus_addr_t pci_p2pmem_virt_to_bus(struct pci_dev *pdev, void *addr) { return 0; } static inline struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev, unsigned int *nents, u32 length) { return NULL; } static inline void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl) { } static inline void pci_p2pmem_publish(struct pci_dev *pdev, bool publish) { } static inline int pci_p2pdma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs) { return 0; } static inline void pci_p2pdma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs) { } static inline int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev, bool *use_p2pdma) { *use_p2pdma = false; return 0; } static inline ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev, bool use_p2pdma) { return sprintf(page, "none\n"); } #endif /* CONFIG_PCI_P2PDMA */ static inline int pci_p2pdma_distance(struct pci_dev *provider, struct device *client, bool verbose) { return pci_p2pdma_distance_many(provider, &client, 1, verbose); } static inline struct pci_dev *pci_p2pmem_find(struct device *client) { return pci_p2pmem_find_many(&client, 1); } static inline int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { return pci_p2pdma_map_sg_attrs(dev, sg, nents, dir, 0); } static inline void pci_p2pdma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { pci_p2pdma_unmap_sg_attrs(dev, sg, nents, dir, 0); } #endif /* _LINUX_PCI_P2P_H */ zpool.h 0000644 00000006164 14722070374 0006075 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * zpool memory storage api * * Copyright (C) 2014 Dan Streetman * * This is a common frontend for the zbud and zsmalloc memory * storage pool implementations. Typically, this is used to * store compressed memory. */ #ifndef _ZPOOL_H_ #define _ZPOOL_H_ struct zpool; struct zpool_ops { int (*evict)(struct zpool *pool, unsigned long handle); }; /* * Control how a handle is mapped. It will be ignored if the * implementation does not support it. Its use is optional. * Note that this does not refer to memory protection, it * refers to how the memory will be copied in/out if copying * is necessary during mapping; read-write is the safest as * it copies the existing memory in on map, and copies the * changed memory back out on unmap. Write-only does not copy * in the memory and should only be used for initialization. * If in doubt, use ZPOOL_MM_DEFAULT which is read-write. */ enum zpool_mapmode { ZPOOL_MM_RW, /* normal read-write mapping */ ZPOOL_MM_RO, /* read-only (no copy-out at unmap time) */ ZPOOL_MM_WO, /* write-only (no copy-in at map time) */ ZPOOL_MM_DEFAULT = ZPOOL_MM_RW }; bool zpool_has_pool(char *type); struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp, const struct zpool_ops *ops); const char *zpool_get_type(struct zpool *pool); void zpool_destroy_pool(struct zpool *pool); bool zpool_malloc_support_movable(struct zpool *pool); int zpool_malloc(struct zpool *pool, size_t size, gfp_t gfp, unsigned long *handle); void zpool_free(struct zpool *pool, unsigned long handle); int zpool_shrink(struct zpool *pool, unsigned int pages, unsigned int *reclaimed); void *zpool_map_handle(struct zpool *pool, unsigned long handle, enum zpool_mapmode mm); void zpool_unmap_handle(struct zpool *pool, unsigned long handle); u64 zpool_get_total_size(struct zpool *pool); /** * struct zpool_driver - driver implementation for zpool * @type: name of the driver. * @list: entry in the list of zpool drivers. * @create: create a new pool. * @destroy: destroy a pool. * @malloc: allocate mem from a pool. * @free: free mem from a pool. * @shrink: shrink the pool. * @map: map a handle. * @unmap: unmap a handle. * @total_size: get total size of a pool. * * This is created by a zpool implementation and registered * with zpool. */ struct zpool_driver { char *type; struct module *owner; atomic_t refcount; struct list_head list; void *(*create)(const char *name, gfp_t gfp, const struct zpool_ops *ops, struct zpool *zpool); void (*destroy)(void *pool); bool malloc_support_movable; int (*malloc)(void *pool, size_t size, gfp_t gfp, unsigned long *handle); void (*free)(void *pool, unsigned long handle); int (*shrink)(void *pool, unsigned int pages, unsigned int *reclaimed); void *(*map)(void *pool, unsigned long handle, enum zpool_mapmode mm); void (*unmap)(void *pool, unsigned long handle); u64 (*total_size)(void *pool); }; void zpool_register_driver(struct zpool_driver *driver); int zpool_unregister_driver(struct zpool_driver *driver); bool zpool_evictable(struct zpool *pool); #endif dma-fence-array.h 0000644 00000004217 14722070374 0007662 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * fence-array: aggregates fence to be waited together * * Copyright (C) 2016 Collabora Ltd * Copyright (C) 2016 Advanced Micro Devices, Inc. * Authors: * Gustavo Padovan <gustavo@padovan.org> * Christian König <christian.koenig@amd.com> */ #ifndef __LINUX_DMA_FENCE_ARRAY_H #define __LINUX_DMA_FENCE_ARRAY_H #include <linux/dma-fence.h> #include <linux/irq_work.h> /** * struct dma_fence_array_cb - callback helper for fence array * @cb: fence callback structure for signaling * @array: reference to the parent fence array object */ struct dma_fence_array_cb { struct dma_fence_cb cb; struct dma_fence_array *array; }; /** * struct dma_fence_array - fence to represent an array of fences * @base: fence base class * @lock: spinlock for fence handling * @num_fences: number of fences in the array * @num_pending: fences in the array still pending * @fences: array of the fences * @work: internal irq_work function */ struct dma_fence_array { struct dma_fence base; spinlock_t lock; unsigned num_fences; atomic_t num_pending; struct dma_fence **fences; struct irq_work work; }; extern const struct dma_fence_ops dma_fence_array_ops; /** * dma_fence_is_array - check if a fence is from the array subsclass * @fence: fence to test * * Return true if it is a dma_fence_array and false otherwise. */ static inline bool dma_fence_is_array(struct dma_fence *fence) { return fence->ops == &dma_fence_array_ops; } /** * to_dma_fence_array - cast a fence to a dma_fence_array * @fence: fence to cast to a dma_fence_array * * Returns NULL if the fence is not a dma_fence_array, * or the dma_fence_array otherwise. */ static inline struct dma_fence_array * to_dma_fence_array(struct dma_fence *fence) { if (fence->ops != &dma_fence_array_ops) return NULL; return container_of(fence, struct dma_fence_array, base); } struct dma_fence_array *dma_fence_array_create(int num_fences, struct dma_fence **fences, u64 context, unsigned seqno, bool signal_on_any); bool dma_fence_match_context(struct dma_fence *fence, u64 context); #endif /* __LINUX_DMA_FENCE_ARRAY_H */ altera_uart.h 0000644 00000000615 14722070374 0007230 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * altera_uart.h -- Altera UART driver defines. */ #ifndef __ALTUART_H #define __ALTUART_H struct altera_uart_platform_uart { unsigned long mapbase; /* Physical address base */ unsigned int irq; /* Interrupt vector */ unsigned int uartclk; /* UART clock rate */ unsigned int bus_shift; /* Bus shift (address stride) */ }; #endif /* __ALTUART_H */ dma-fence.h 0000644 00000047322 14722070374 0006552 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Fence mechanism for dma-buf to allow for asynchronous dma access * * Copyright (C) 2012 Canonical Ltd * Copyright (C) 2012 Texas Instruments * * Authors: * Rob Clark <robdclark@gmail.com> * Maarten Lankhorst <maarten.lankhorst@canonical.com> */ #ifndef __LINUX_DMA_FENCE_H #define __LINUX_DMA_FENCE_H #include <linux/err.h> #include <linux/wait.h> #include <linux/list.h> #include <linux/bitops.h> #include <linux/kref.h> #include <linux/sched.h> #include <linux/printk.h> #include <linux/rcupdate.h> struct dma_fence; struct dma_fence_ops; struct dma_fence_cb; /** * struct dma_fence - software synchronization primitive * @refcount: refcount for this fence * @ops: dma_fence_ops associated with this fence * @rcu: used for releasing fence with kfree_rcu * @cb_list: list of all callbacks to call * @lock: spin_lock_irqsave used for locking * @context: execution context this fence belongs to, returned by * dma_fence_context_alloc() * @seqno: the sequence number of this fence inside the execution context, * can be compared to decide which fence would be signaled later. * @flags: A mask of DMA_FENCE_FLAG_* defined below * @timestamp: Timestamp when the fence was signaled. * @error: Optional, only valid if < 0, must be set before calling * dma_fence_signal, indicates that the fence has completed with an error. * * the flags member must be manipulated and read using the appropriate * atomic ops (bit_*), so taking the spinlock will not be needed most * of the time. * * DMA_FENCE_FLAG_SIGNALED_BIT - fence is already signaled * DMA_FENCE_FLAG_TIMESTAMP_BIT - timestamp recorded for fence signaling * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called * DMA_FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the * implementer of the fence for its own purposes. Can be used in different * ways by different fence implementers, so do not rely on this. * * Since atomic bitops are used, this is not guaranteed to be the case. * Particularly, if the bit was set, but dma_fence_signal was called right * before this bit was set, it would have been able to set the * DMA_FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called. * Adding a check for DMA_FENCE_FLAG_SIGNALED_BIT after setting * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that * after dma_fence_signal was called, any enable_signaling call will have either * been completed, or never called at all. */ struct dma_fence { spinlock_t *lock; const struct dma_fence_ops *ops; /* * We clear the callback list on kref_put so that by the time we * release the fence it is unused. No one should be adding to the * cb_list that they don't themselves hold a reference for. * * The lifetime of the timestamp is similarly tied to both the * rcu freelist and the cb_list. The timestamp is only set upon * signaling while simultaneously notifying the cb_list. Ergo, we * only use either the cb_list of timestamp. Upon destruction, * neither are accessible, and so we can use the rcu. This means * that the cb_list is *only* valid until the signal bit is set, * and to read either you *must* hold a reference to the fence, * and not just the rcu_read_lock. * * Listed in chronological order. */ union { struct list_head cb_list; /* @cb_list replaced by @timestamp on dma_fence_signal() */ ktime_t timestamp; /* @timestamp replaced by @rcu on dma_fence_release() */ struct rcu_head rcu; }; u64 context; u64 seqno; unsigned long flags; struct kref refcount; int error; }; enum dma_fence_flag_bits { DMA_FENCE_FLAG_SIGNALED_BIT, DMA_FENCE_FLAG_TIMESTAMP_BIT, DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, DMA_FENCE_FLAG_USER_BITS, /* must always be last member */ }; typedef void (*dma_fence_func_t)(struct dma_fence *fence, struct dma_fence_cb *cb); /** * struct dma_fence_cb - callback for dma_fence_add_callback() * @node: used by dma_fence_add_callback() to append this struct to fence::cb_list * @func: dma_fence_func_t to call * * This struct will be initialized by dma_fence_add_callback(), additional * data can be passed along by embedding dma_fence_cb in another struct. */ struct dma_fence_cb { struct list_head node; dma_fence_func_t func; }; /** * struct dma_fence_ops - operations implemented for fence * */ struct dma_fence_ops { /** * @use_64bit_seqno: * * True if this dma_fence implementation uses 64bit seqno, false * otherwise. */ bool use_64bit_seqno; /** * @get_driver_name: * * Returns the driver name. This is a callback to allow drivers to * compute the name at runtime, without having it to store permanently * for each fence, or build a cache of some sort. * * This callback is mandatory. */ const char * (*get_driver_name)(struct dma_fence *fence); /** * @get_timeline_name: * * Return the name of the context this fence belongs to. This is a * callback to allow drivers to compute the name at runtime, without * having it to store permanently for each fence, or build a cache of * some sort. * * This callback is mandatory. */ const char * (*get_timeline_name)(struct dma_fence *fence); /** * @enable_signaling: * * Enable software signaling of fence. * * For fence implementations that have the capability for hw->hw * signaling, they can implement this op to enable the necessary * interrupts, or insert commands into cmdstream, etc, to avoid these * costly operations for the common case where only hw->hw * synchronization is required. This is called in the first * dma_fence_wait() or dma_fence_add_callback() path to let the fence * implementation know that there is another driver waiting on the * signal (ie. hw->sw case). * * This function can be called from atomic context, but not * from irq context, so normal spinlocks can be used. * * A return value of false indicates the fence already passed, * or some failure occurred that made it impossible to enable * signaling. True indicates successful enabling. * * &dma_fence.error may be set in enable_signaling, but only when false * is returned. * * Since many implementations can call dma_fence_signal() even when before * @enable_signaling has been called there's a race window, where the * dma_fence_signal() might result in the final fence reference being * released and its memory freed. To avoid this, implementations of this * callback should grab their own reference using dma_fence_get(), to be * released when the fence is signalled (through e.g. the interrupt * handler). * * This callback is optional. If this callback is not present, then the * driver must always have signaling enabled. */ bool (*enable_signaling)(struct dma_fence *fence); /** * @signaled: * * Peek whether the fence is signaled, as a fastpath optimization for * e.g. dma_fence_wait() or dma_fence_add_callback(). Note that this * callback does not need to make any guarantees beyond that a fence * once indicates as signalled must always return true from this * callback. This callback may return false even if the fence has * completed already, in this case information hasn't propogated throug * the system yet. See also dma_fence_is_signaled(). * * May set &dma_fence.error if returning true. * * This callback is optional. */ bool (*signaled)(struct dma_fence *fence); /** * @wait: * * Custom wait implementation, defaults to dma_fence_default_wait() if * not set. * * The dma_fence_default_wait implementation should work for any fence, as long * as @enable_signaling works correctly. This hook allows drivers to * have an optimized version for the case where a process context is * already available, e.g. if @enable_signaling for the general case * needs to set up a worker thread. * * Must return -ERESTARTSYS if the wait is intr = true and the wait was * interrupted, and remaining jiffies if fence has signaled, or 0 if wait * timed out. Can also return other error values on custom implementations, * which should be treated as if the fence is signaled. For example a hardware * lockup could be reported like that. * * This callback is optional. */ signed long (*wait)(struct dma_fence *fence, bool intr, signed long timeout); /** * @release: * * Called on destruction of fence to release additional resources. * Can be called from irq context. This callback is optional. If it is * NULL, then dma_fence_free() is instead called as the default * implementation. */ void (*release)(struct dma_fence *fence); /** * @fence_value_str: * * Callback to fill in free-form debug info specific to this fence, like * the sequence number. * * This callback is optional. */ void (*fence_value_str)(struct dma_fence *fence, char *str, int size); /** * @timeline_value_str: * * Fills in the current value of the timeline as a string, like the * sequence number. Note that the specific fence passed to this function * should not matter, drivers should only use it to look up the * corresponding timeline structures. */ void (*timeline_value_str)(struct dma_fence *fence, char *str, int size); }; void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, spinlock_t *lock, u64 context, u64 seqno); void dma_fence_release(struct kref *kref); void dma_fence_free(struct dma_fence *fence); /** * dma_fence_put - decreases refcount of the fence * @fence: fence to reduce refcount of */ static inline void dma_fence_put(struct dma_fence *fence) { if (fence) kref_put(&fence->refcount, dma_fence_release); } /** * dma_fence_get - increases refcount of the fence * @fence: fence to increase refcount of * * Returns the same fence, with refcount increased by 1. */ static inline struct dma_fence *dma_fence_get(struct dma_fence *fence) { if (fence) kref_get(&fence->refcount); return fence; } /** * dma_fence_get_rcu - get a fence from a dma_resv_list with * rcu read lock * @fence: fence to increase refcount of * * Function returns NULL if no refcount could be obtained, or the fence. */ static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence) { if (kref_get_unless_zero(&fence->refcount)) return fence; else return NULL; } /** * dma_fence_get_rcu_safe - acquire a reference to an RCU tracked fence * @fencep: pointer to fence to increase refcount of * * Function returns NULL if no refcount could be obtained, or the fence. * This function handles acquiring a reference to a fence that may be * reallocated within the RCU grace period (such as with SLAB_TYPESAFE_BY_RCU), * so long as the caller is using RCU on the pointer to the fence. * * An alternative mechanism is to employ a seqlock to protect a bunch of * fences, such as used by struct dma_resv. When using a seqlock, * the seqlock must be taken before and checked after a reference to the * fence is acquired (as shown here). * * The caller is required to hold the RCU read lock. */ static inline struct dma_fence * dma_fence_get_rcu_safe(struct dma_fence __rcu **fencep) { do { struct dma_fence *fence; fence = rcu_dereference(*fencep); if (!fence) return NULL; if (!dma_fence_get_rcu(fence)) continue; /* The atomic_inc_not_zero() inside dma_fence_get_rcu() * provides a full memory barrier upon success (such as now). * This is paired with the write barrier from assigning * to the __rcu protected fence pointer so that if that * pointer still matches the current fence, we know we * have successfully acquire a reference to it. If it no * longer matches, we are holding a reference to some other * reallocated pointer. This is possible if the allocator * is using a freelist like SLAB_TYPESAFE_BY_RCU where the * fence remains valid for the RCU grace period, but it * may be reallocated. When using such allocators, we are * responsible for ensuring the reference we get is to * the right fence, as below. */ if (fence == rcu_access_pointer(*fencep)) return rcu_pointer_handoff(fence); dma_fence_put(fence); } while (1); } int dma_fence_signal(struct dma_fence *fence); int dma_fence_signal_locked(struct dma_fence *fence); signed long dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout); int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb, dma_fence_func_t func); bool dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb); void dma_fence_enable_sw_signaling(struct dma_fence *fence); /** * dma_fence_is_signaled_locked - Return an indication if the fence * is signaled yet. * @fence: the fence to check * * Returns true if the fence was already signaled, false if not. Since this * function doesn't enable signaling, it is not guaranteed to ever return * true if dma_fence_add_callback(), dma_fence_wait() or * dma_fence_enable_sw_signaling() haven't been called before. * * This function requires &dma_fence.lock to be held. * * See also dma_fence_is_signaled(). */ static inline bool dma_fence_is_signaled_locked(struct dma_fence *fence) { if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) return true; if (fence->ops->signaled && fence->ops->signaled(fence)) { dma_fence_signal_locked(fence); return true; } return false; } /** * dma_fence_is_signaled - Return an indication if the fence is signaled yet. * @fence: the fence to check * * Returns true if the fence was already signaled, false if not. Since this * function doesn't enable signaling, it is not guaranteed to ever return * true if dma_fence_add_callback(), dma_fence_wait() or * dma_fence_enable_sw_signaling() haven't been called before. * * It's recommended for seqno fences to call dma_fence_signal when the * operation is complete, it makes it possible to prevent issues from * wraparound between time of issue and time of use by checking the return * value of this function before calling hardware-specific wait instructions. * * See also dma_fence_is_signaled_locked(). */ static inline bool dma_fence_is_signaled(struct dma_fence *fence) { if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) return true; if (fence->ops->signaled && fence->ops->signaled(fence)) { dma_fence_signal(fence); return true; } return false; } /** * __dma_fence_is_later - return if f1 is chronologically later than f2 * @f1: the first fence's seqno * @f2: the second fence's seqno from the same context * @ops: dma_fence_ops associated with the seqno * * Returns true if f1 is chronologically later than f2. Both fences must be * from the same context, since a seqno is not common across contexts. */ static inline bool __dma_fence_is_later(u64 f1, u64 f2, const struct dma_fence_ops *ops) { /* This is for backward compatibility with drivers which can only handle * 32bit sequence numbers. Use a 64bit compare when the driver says to * do so. */ if (ops->use_64bit_seqno) return f1 > f2; return (int)(lower_32_bits(f1) - lower_32_bits(f2)) > 0; } /** * dma_fence_is_later - return if f1 is chronologically later than f2 * @f1: the first fence from the same context * @f2: the second fence from the same context * * Returns true if f1 is chronologically later than f2. Both fences must be * from the same context, since a seqno is not re-used across contexts. */ static inline bool dma_fence_is_later(struct dma_fence *f1, struct dma_fence *f2) { if (WARN_ON(f1->context != f2->context)) return false; return __dma_fence_is_later(f1->seqno, f2->seqno, f1->ops); } /** * dma_fence_later - return the chronologically later fence * @f1: the first fence from the same context * @f2: the second fence from the same context * * Returns NULL if both fences are signaled, otherwise the fence that would be * signaled last. Both fences must be from the same context, since a seqno is * not re-used across contexts. */ static inline struct dma_fence *dma_fence_later(struct dma_fence *f1, struct dma_fence *f2) { if (WARN_ON(f1->context != f2->context)) return NULL; /* * Can't check just DMA_FENCE_FLAG_SIGNALED_BIT here, it may never * have been set if enable_signaling wasn't called, and enabling that * here is overkill. */ if (dma_fence_is_later(f1, f2)) return dma_fence_is_signaled(f1) ? NULL : f1; else return dma_fence_is_signaled(f2) ? NULL : f2; } /** * dma_fence_get_status_locked - returns the status upon completion * @fence: the dma_fence to query * * Drivers can supply an optional error status condition before they signal * the fence (to indicate whether the fence was completed due to an error * rather than success). The value of the status condition is only valid * if the fence has been signaled, dma_fence_get_status_locked() first checks * the signal state before reporting the error status. * * Returns 0 if the fence has not yet been signaled, 1 if the fence has * been signaled without an error condition, or a negative error code * if the fence has been completed in err. */ static inline int dma_fence_get_status_locked(struct dma_fence *fence) { if (dma_fence_is_signaled_locked(fence)) return fence->error ?: 1; else return 0; } int dma_fence_get_status(struct dma_fence *fence); /** * dma_fence_set_error - flag an error condition on the fence * @fence: the dma_fence * @error: the error to store * * Drivers can supply an optional error status condition before they signal * the fence, to indicate that the fence was completed due to an error * rather than success. This must be set before signaling (so that the value * is visible before any waiters on the signal callback are woken). This * helper exists to help catching erroneous setting of #dma_fence.error. */ static inline void dma_fence_set_error(struct dma_fence *fence, int error) { WARN_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)); WARN_ON(error >= 0 || error < -MAX_ERRNO); fence->error = error; } signed long dma_fence_wait_timeout(struct dma_fence *, bool intr, signed long timeout); signed long dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count, bool intr, signed long timeout, uint32_t *idx); /** * dma_fence_wait - sleep until the fence gets signaled * @fence: the fence to wait on * @intr: if true, do an interruptible wait * * This function will return -ERESTARTSYS if interrupted by a signal, * or 0 if the fence was signaled. Other error values may be * returned on custom implementations. * * Performs a synchronous wait on this fence. It is assumed the caller * directly or indirectly holds a reference to the fence, otherwise the * fence might be freed before return, resulting in undefined behavior. * * See also dma_fence_wait_timeout() and dma_fence_wait_any_timeout(). */ static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr) { signed long ret; /* Since dma_fence_wait_timeout cannot timeout with * MAX_SCHEDULE_TIMEOUT, only valid return values are * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT. */ ret = dma_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT); return ret < 0 ? ret : 0; } struct dma_fence *dma_fence_get_stub(void); u64 dma_fence_context_alloc(unsigned num); #define DMA_FENCE_TRACE(f, fmt, args...) \ do { \ struct dma_fence *__ff = (f); \ if (IS_ENABLED(CONFIG_DMA_FENCE_TRACE)) \ pr_info("f %llu#%llu: " fmt, \ __ff->context, __ff->seqno, ##args); \ } while (0) #define DMA_FENCE_WARN(f, fmt, args...) \ do { \ struct dma_fence *__ff = (f); \ pr_warn("f %llu#%llu: " fmt, __ff->context, __ff->seqno,\ ##args); \ } while (0) #define DMA_FENCE_ERR(f, fmt, args...) \ do { \ struct dma_fence *__ff = (f); \ pr_err("f %llu#%llu: " fmt, __ff->context, __ff->seqno, \ ##args); \ } while (0) #endif /* __LINUX_DMA_FENCE_H */ blk-mq-pci.h 0000644 00000000415 14722070374 0006657 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_BLK_MQ_PCI_H #define _LINUX_BLK_MQ_PCI_H struct blk_mq_queue_map; struct pci_dev; int blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev, int offset); #endif /* _LINUX_BLK_MQ_PCI_H */ sh_eth.h 0000644 00000000561 14722070374 0006177 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_SH_ETH_H__ #define __ASM_SH_ETH_H__ #include <linux/phy.h> #include <linux/if_ether.h> struct sh_eth_plat_data { int phy; int phy_irq; phy_interface_t phy_interface; void (*set_mdio_gate)(void *addr); unsigned char mac_addr[ETH_ALEN]; unsigned no_ether_link:1; unsigned ether_link_active_low:1; }; #endif sfp.h 0000644 00000034164 14722070374 0005523 0 ustar 00 #ifndef LINUX_SFP_H #define LINUX_SFP_H #include <linux/phy.h> struct sfp_eeprom_base { u8 phys_id; u8 phys_ext_id; u8 connector; #if defined __BIG_ENDIAN_BITFIELD u8 e10g_base_er:1; u8 e10g_base_lrm:1; u8 e10g_base_lr:1; u8 e10g_base_sr:1; u8 if_1x_sx:1; u8 if_1x_lx:1; u8 if_1x_copper_active:1; u8 if_1x_copper_passive:1; u8 escon_mmf_1310_led:1; u8 escon_smf_1310_laser:1; u8 sonet_oc192_short_reach:1; u8 sonet_reach_bit1:1; u8 sonet_reach_bit2:1; u8 sonet_oc48_long_reach:1; u8 sonet_oc48_intermediate_reach:1; u8 sonet_oc48_short_reach:1; u8 unallocated_5_7:1; u8 sonet_oc12_smf_long_reach:1; u8 sonet_oc12_smf_intermediate_reach:1; u8 sonet_oc12_short_reach:1; u8 unallocated_5_3:1; u8 sonet_oc3_smf_long_reach:1; u8 sonet_oc3_smf_intermediate_reach:1; u8 sonet_oc3_short_reach:1; u8 e_base_px:1; u8 e_base_bx10:1; u8 e100_base_fx:1; u8 e100_base_lx:1; u8 e1000_base_t:1; u8 e1000_base_cx:1; u8 e1000_base_lx:1; u8 e1000_base_sx:1; u8 fc_ll_v:1; u8 fc_ll_s:1; u8 fc_ll_i:1; u8 fc_ll_l:1; u8 fc_ll_m:1; u8 fc_tech_sa:1; u8 fc_tech_lc:1; u8 fc_tech_electrical_inter_enclosure:1; u8 fc_tech_electrical_intra_enclosure:1; u8 fc_tech_sn:1; u8 fc_tech_sl:1; u8 fc_tech_ll:1; u8 sfp_ct_active:1; u8 sfp_ct_passive:1; u8 unallocated_8_1:1; u8 unallocated_8_0:1; u8 fc_media_tw:1; u8 fc_media_tp:1; u8 fc_media_mi:1; u8 fc_media_tv:1; u8 fc_media_m6:1; u8 fc_media_m5:1; u8 unallocated_9_1:1; u8 fc_media_sm:1; u8 fc_speed_1200:1; u8 fc_speed_800:1; u8 fc_speed_1600:1; u8 fc_speed_400:1; u8 fc_speed_3200:1; u8 fc_speed_200:1; u8 unallocated_10_1:1; u8 fc_speed_100:1; #elif defined __LITTLE_ENDIAN_BITFIELD u8 if_1x_copper_passive:1; u8 if_1x_copper_active:1; u8 if_1x_lx:1; u8 if_1x_sx:1; u8 e10g_base_sr:1; u8 e10g_base_lr:1; u8 e10g_base_lrm:1; u8 e10g_base_er:1; u8 sonet_oc3_short_reach:1; u8 sonet_oc3_smf_intermediate_reach:1; u8 sonet_oc3_smf_long_reach:1; u8 unallocated_5_3:1; u8 sonet_oc12_short_reach:1; u8 sonet_oc12_smf_intermediate_reach:1; u8 sonet_oc12_smf_long_reach:1; u8 unallocated_5_7:1; u8 sonet_oc48_short_reach:1; u8 sonet_oc48_intermediate_reach:1; u8 sonet_oc48_long_reach:1; u8 sonet_reach_bit2:1; u8 sonet_reach_bit1:1; u8 sonet_oc192_short_reach:1; u8 escon_smf_1310_laser:1; u8 escon_mmf_1310_led:1; u8 e1000_base_sx:1; u8 e1000_base_lx:1; u8 e1000_base_cx:1; u8 e1000_base_t:1; u8 e100_base_lx:1; u8 e100_base_fx:1; u8 e_base_bx10:1; u8 e_base_px:1; u8 fc_tech_electrical_inter_enclosure:1; u8 fc_tech_lc:1; u8 fc_tech_sa:1; u8 fc_ll_m:1; u8 fc_ll_l:1; u8 fc_ll_i:1; u8 fc_ll_s:1; u8 fc_ll_v:1; u8 unallocated_8_0:1; u8 unallocated_8_1:1; u8 sfp_ct_passive:1; u8 sfp_ct_active:1; u8 fc_tech_ll:1; u8 fc_tech_sl:1; u8 fc_tech_sn:1; u8 fc_tech_electrical_intra_enclosure:1; u8 fc_media_sm:1; u8 unallocated_9_1:1; u8 fc_media_m5:1; u8 fc_media_m6:1; u8 fc_media_tv:1; u8 fc_media_mi:1; u8 fc_media_tp:1; u8 fc_media_tw:1; u8 fc_speed_100:1; u8 unallocated_10_1:1; u8 fc_speed_200:1; u8 fc_speed_3200:1; u8 fc_speed_400:1; u8 fc_speed_1600:1; u8 fc_speed_800:1; u8 fc_speed_1200:1; #else #error Unknown Endian #endif u8 encoding; u8 br_nominal; u8 rate_id; u8 link_len[6]; char vendor_name[16]; u8 extended_cc; char vendor_oui[3]; char vendor_pn[16]; char vendor_rev[4]; union { __be16 optical_wavelength; __be16 cable_compliance; struct { #if defined __BIG_ENDIAN_BITFIELD u8 reserved60_2:6; u8 fc_pi_4_app_h:1; u8 sff8431_app_e:1; u8 reserved61:8; #elif defined __LITTLE_ENDIAN_BITFIELD u8 sff8431_app_e:1; u8 fc_pi_4_app_h:1; u8 reserved60_2:6; u8 reserved61:8; #else #error Unknown Endian #endif } __packed passive; struct { #if defined __BIG_ENDIAN_BITFIELD u8 reserved60_4:4; u8 fc_pi_4_lim:1; u8 sff8431_lim:1; u8 fc_pi_4_app_h:1; u8 sff8431_app_e:1; u8 reserved61:8; #elif defined __LITTLE_ENDIAN_BITFIELD u8 sff8431_app_e:1; u8 fc_pi_4_app_h:1; u8 sff8431_lim:1; u8 fc_pi_4_lim:1; u8 reserved60_4:4; u8 reserved61:8; #else #error Unknown Endian #endif } __packed active; } __packed; u8 reserved62; u8 cc_base; } __packed; struct sfp_eeprom_ext { __be16 options; u8 br_max; u8 br_min; char vendor_sn[16]; char datecode[8]; u8 diagmon; u8 enhopts; u8 sff8472_compliance; u8 cc_ext; } __packed; /** * struct sfp_eeprom_id - raw SFP module identification information * @base: base SFP module identification structure * @ext: extended SFP module identification structure * * See the SFF-8472 specification and related documents for the definition * of these structure members. This can be obtained from * https://www.snia.org/technology-communities/sff/specifications */ struct sfp_eeprom_id { struct sfp_eeprom_base base; struct sfp_eeprom_ext ext; } __packed; struct sfp_diag { __be16 temp_high_alarm; __be16 temp_low_alarm; __be16 temp_high_warn; __be16 temp_low_warn; __be16 volt_high_alarm; __be16 volt_low_alarm; __be16 volt_high_warn; __be16 volt_low_warn; __be16 bias_high_alarm; __be16 bias_low_alarm; __be16 bias_high_warn; __be16 bias_low_warn; __be16 txpwr_high_alarm; __be16 txpwr_low_alarm; __be16 txpwr_high_warn; __be16 txpwr_low_warn; __be16 rxpwr_high_alarm; __be16 rxpwr_low_alarm; __be16 rxpwr_high_warn; __be16 rxpwr_low_warn; __be16 laser_temp_high_alarm; __be16 laser_temp_low_alarm; __be16 laser_temp_high_warn; __be16 laser_temp_low_warn; __be16 tec_cur_high_alarm; __be16 tec_cur_low_alarm; __be16 tec_cur_high_warn; __be16 tec_cur_low_warn; __be32 cal_rxpwr4; __be32 cal_rxpwr3; __be32 cal_rxpwr2; __be32 cal_rxpwr1; __be32 cal_rxpwr0; __be16 cal_txi_slope; __be16 cal_txi_offset; __be16 cal_txpwr_slope; __be16 cal_txpwr_offset; __be16 cal_t_slope; __be16 cal_t_offset; __be16 cal_v_slope; __be16 cal_v_offset; } __packed; /* SFP EEPROM registers */ enum { SFP_PHYS_ID = 0x00, SFP_PHYS_EXT_ID = 0x01, SFP_CONNECTOR = 0x02, SFP_COMPLIANCE = 0x03, SFP_ENCODING = 0x0b, SFP_BR_NOMINAL = 0x0c, SFP_RATE_ID = 0x0d, SFP_LINK_LEN_SM_KM = 0x0e, SFP_LINK_LEN_SM_100M = 0x0f, SFP_LINK_LEN_50UM_OM2_10M = 0x10, SFP_LINK_LEN_62_5UM_OM1_10M = 0x11, SFP_LINK_LEN_COPPER_1M = 0x12, SFP_LINK_LEN_50UM_OM4_10M = 0x12, SFP_LINK_LEN_50UM_OM3_10M = 0x13, SFP_VENDOR_NAME = 0x14, SFP_VENDOR_OUI = 0x25, SFP_VENDOR_PN = 0x28, SFP_VENDOR_REV = 0x38, SFP_OPTICAL_WAVELENGTH_MSB = 0x3c, SFP_OPTICAL_WAVELENGTH_LSB = 0x3d, SFP_CABLE_SPEC = 0x3c, SFP_CC_BASE = 0x3f, SFP_OPTIONS = 0x40, /* 2 bytes, MSB, LSB */ SFP_BR_MAX = 0x42, SFP_BR_MIN = 0x43, SFP_VENDOR_SN = 0x44, SFP_DATECODE = 0x54, SFP_DIAGMON = 0x5c, SFP_ENHOPTS = 0x5d, SFP_SFF8472_COMPLIANCE = 0x5e, SFP_CC_EXT = 0x5f, SFP_PHYS_ID_SFF = 0x02, SFP_PHYS_ID_SFP = 0x03, SFP_PHYS_EXT_ID_SFP = 0x04, SFP_CONNECTOR_UNSPEC = 0x00, /* codes 01-05 not supportable on SFP, but some modules have single SC */ SFP_CONNECTOR_SC = 0x01, SFP_CONNECTOR_FIBERJACK = 0x06, SFP_CONNECTOR_LC = 0x07, SFP_CONNECTOR_MT_RJ = 0x08, SFP_CONNECTOR_MU = 0x09, SFP_CONNECTOR_SG = 0x0a, SFP_CONNECTOR_OPTICAL_PIGTAIL = 0x0b, SFP_CONNECTOR_MPO_1X12 = 0x0c, SFP_CONNECTOR_MPO_2X16 = 0x0d, SFP_CONNECTOR_HSSDC_II = 0x20, SFP_CONNECTOR_COPPER_PIGTAIL = 0x21, SFP_CONNECTOR_RJ45 = 0x22, SFP_CONNECTOR_NOSEPARATE = 0x23, SFP_CONNECTOR_MXC_2X16 = 0x24, SFP_ENCODING_UNSPEC = 0x00, SFP_ENCODING_8B10B = 0x01, SFP_ENCODING_4B5B = 0x02, SFP_ENCODING_NRZ = 0x03, SFP_ENCODING_8472_MANCHESTER = 0x04, SFP_ENCODING_8472_SONET = 0x05, SFP_ENCODING_8472_64B66B = 0x06, SFP_ENCODING_256B257B = 0x07, SFP_ENCODING_PAM4 = 0x08, SFP_OPTIONS_HIGH_POWER_LEVEL = BIT(13), SFP_OPTIONS_PAGING_A2 = BIT(12), SFP_OPTIONS_RETIMER = BIT(11), SFP_OPTIONS_COOLED_XCVR = BIT(10), SFP_OPTIONS_POWER_DECL = BIT(9), SFP_OPTIONS_RX_LINEAR_OUT = BIT(8), SFP_OPTIONS_RX_DECISION_THRESH = BIT(7), SFP_OPTIONS_TUNABLE_TX = BIT(6), SFP_OPTIONS_RATE_SELECT = BIT(5), SFP_OPTIONS_TX_DISABLE = BIT(4), SFP_OPTIONS_TX_FAULT = BIT(3), SFP_OPTIONS_LOS_INVERTED = BIT(2), SFP_OPTIONS_LOS_NORMAL = BIT(1), SFP_DIAGMON_DDM = BIT(6), SFP_DIAGMON_INT_CAL = BIT(5), SFP_DIAGMON_EXT_CAL = BIT(4), SFP_DIAGMON_RXPWR_AVG = BIT(3), SFP_DIAGMON_ADDRMODE = BIT(2), SFP_ENHOPTS_ALARMWARN = BIT(7), SFP_ENHOPTS_SOFT_TX_DISABLE = BIT(6), SFP_ENHOPTS_SOFT_TX_FAULT = BIT(5), SFP_ENHOPTS_SOFT_RX_LOS = BIT(4), SFP_ENHOPTS_SOFT_RATE_SELECT = BIT(3), SFP_ENHOPTS_APP_SELECT_SFF8079 = BIT(2), SFP_ENHOPTS_SOFT_RATE_SFF8431 = BIT(1), SFP_SFF8472_COMPLIANCE_NONE = 0x00, SFP_SFF8472_COMPLIANCE_REV9_3 = 0x01, SFP_SFF8472_COMPLIANCE_REV9_5 = 0x02, SFP_SFF8472_COMPLIANCE_REV10_2 = 0x03, SFP_SFF8472_COMPLIANCE_REV10_4 = 0x04, SFP_SFF8472_COMPLIANCE_REV11_0 = 0x05, SFP_SFF8472_COMPLIANCE_REV11_3 = 0x06, SFP_SFF8472_COMPLIANCE_REV11_4 = 0x07, SFP_SFF8472_COMPLIANCE_REV12_0 = 0x08, }; /* SFP Diagnostics */ enum { /* Alarm and warnings stored MSB at lower address then LSB */ SFP_TEMP_HIGH_ALARM = 0x00, SFP_TEMP_LOW_ALARM = 0x02, SFP_TEMP_HIGH_WARN = 0x04, SFP_TEMP_LOW_WARN = 0x06, SFP_VOLT_HIGH_ALARM = 0x08, SFP_VOLT_LOW_ALARM = 0x0a, SFP_VOLT_HIGH_WARN = 0x0c, SFP_VOLT_LOW_WARN = 0x0e, SFP_BIAS_HIGH_ALARM = 0x10, SFP_BIAS_LOW_ALARM = 0x12, SFP_BIAS_HIGH_WARN = 0x14, SFP_BIAS_LOW_WARN = 0x16, SFP_TXPWR_HIGH_ALARM = 0x18, SFP_TXPWR_LOW_ALARM = 0x1a, SFP_TXPWR_HIGH_WARN = 0x1c, SFP_TXPWR_LOW_WARN = 0x1e, SFP_RXPWR_HIGH_ALARM = 0x20, SFP_RXPWR_LOW_ALARM = 0x22, SFP_RXPWR_HIGH_WARN = 0x24, SFP_RXPWR_LOW_WARN = 0x26, SFP_LASER_TEMP_HIGH_ALARM = 0x28, SFP_LASER_TEMP_LOW_ALARM = 0x2a, SFP_LASER_TEMP_HIGH_WARN = 0x2c, SFP_LASER_TEMP_LOW_WARN = 0x2e, SFP_TEC_CUR_HIGH_ALARM = 0x30, SFP_TEC_CUR_LOW_ALARM = 0x32, SFP_TEC_CUR_HIGH_WARN = 0x34, SFP_TEC_CUR_LOW_WARN = 0x36, SFP_CAL_RXPWR4 = 0x38, SFP_CAL_RXPWR3 = 0x3c, SFP_CAL_RXPWR2 = 0x40, SFP_CAL_RXPWR1 = 0x44, SFP_CAL_RXPWR0 = 0x48, SFP_CAL_TXI_SLOPE = 0x4c, SFP_CAL_TXI_OFFSET = 0x4e, SFP_CAL_TXPWR_SLOPE = 0x50, SFP_CAL_TXPWR_OFFSET = 0x52, SFP_CAL_T_SLOPE = 0x54, SFP_CAL_T_OFFSET = 0x56, SFP_CAL_V_SLOPE = 0x58, SFP_CAL_V_OFFSET = 0x5a, SFP_CHKSUM = 0x5f, SFP_TEMP = 0x60, SFP_VCC = 0x62, SFP_TX_BIAS = 0x64, SFP_TX_POWER = 0x66, SFP_RX_POWER = 0x68, SFP_LASER_TEMP = 0x6a, SFP_TEC_CUR = 0x6c, SFP_STATUS = 0x6e, SFP_ALARM0 = 0x70, SFP_ALARM0_TEMP_HIGH = BIT(7), SFP_ALARM0_TEMP_LOW = BIT(6), SFP_ALARM0_VCC_HIGH = BIT(5), SFP_ALARM0_VCC_LOW = BIT(4), SFP_ALARM0_TX_BIAS_HIGH = BIT(3), SFP_ALARM0_TX_BIAS_LOW = BIT(2), SFP_ALARM0_TXPWR_HIGH = BIT(1), SFP_ALARM0_TXPWR_LOW = BIT(0), SFP_ALARM1 = 0x71, SFP_ALARM1_RXPWR_HIGH = BIT(7), SFP_ALARM1_RXPWR_LOW = BIT(6), SFP_WARN0 = 0x74, SFP_WARN0_TEMP_HIGH = BIT(7), SFP_WARN0_TEMP_LOW = BIT(6), SFP_WARN0_VCC_HIGH = BIT(5), SFP_WARN0_VCC_LOW = BIT(4), SFP_WARN0_TX_BIAS_HIGH = BIT(3), SFP_WARN0_TX_BIAS_LOW = BIT(2), SFP_WARN0_TXPWR_HIGH = BIT(1), SFP_WARN0_TXPWR_LOW = BIT(0), SFP_WARN1 = 0x75, SFP_WARN1_RXPWR_HIGH = BIT(7), SFP_WARN1_RXPWR_LOW = BIT(6), SFP_EXT_STATUS = 0x76, SFP_VSL = 0x78, SFP_PAGE = 0x7f, }; struct fwnode_handle; struct ethtool_eeprom; struct ethtool_modinfo; struct sfp_bus; /** * struct sfp_upstream_ops - upstream operations structure * @attach: called when the sfp socket driver is bound to the upstream * (mandatory). * @detach: called when the sfp socket driver is unbound from the upstream * (mandatory). * @module_insert: called after a module has been detected to determine * whether the module is supported for the upstream device. * @module_remove: called after the module has been removed. * @link_down: called when the link is non-operational for whatever * reason. * @link_up: called when the link is operational. * @connect_phy: called when an I2C accessible PHY has been detected * on the module. * @disconnect_phy: called when a module with an I2C accessible PHY has * been removed. */ struct sfp_upstream_ops { void (*attach)(void *priv, struct sfp_bus *bus); void (*detach)(void *priv, struct sfp_bus *bus); int (*module_insert)(void *priv, const struct sfp_eeprom_id *id); void (*module_remove)(void *priv); void (*link_down)(void *priv); void (*link_up)(void *priv); int (*connect_phy)(void *priv, struct phy_device *); void (*disconnect_phy)(void *priv); }; #if IS_ENABLED(CONFIG_SFP) int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id, unsigned long *support); void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, unsigned long *support); phy_interface_t sfp_select_interface(struct sfp_bus *bus, const struct sfp_eeprom_id *id, unsigned long *link_modes); int sfp_get_module_info(struct sfp_bus *bus, struct ethtool_modinfo *modinfo); int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee, u8 *data); void sfp_upstream_start(struct sfp_bus *bus); void sfp_upstream_stop(struct sfp_bus *bus); struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode, void *upstream, const struct sfp_upstream_ops *ops); void sfp_unregister_upstream(struct sfp_bus *bus); #else static inline int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id, unsigned long *support) { return PORT_OTHER; } static inline void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, unsigned long *support) { } static inline phy_interface_t sfp_select_interface(struct sfp_bus *bus, const struct sfp_eeprom_id *id, unsigned long *link_modes) { return PHY_INTERFACE_MODE_NA; } static inline int sfp_get_module_info(struct sfp_bus *bus, struct ethtool_modinfo *modinfo) { return -EOPNOTSUPP; } static inline int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee, u8 *data) { return -EOPNOTSUPP; } static inline void sfp_upstream_start(struct sfp_bus *bus) { } static inline void sfp_upstream_stop(struct sfp_bus *bus) { } static inline struct sfp_bus *sfp_register_upstream( struct fwnode_handle *fwnode, void *upstream, const struct sfp_upstream_ops *ops) { return (struct sfp_bus *)-1; } static inline void sfp_unregister_upstream(struct sfp_bus *bus) { } #endif #endif mmdebug.h 0000644 00000004511 14722070374 0006344 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_MM_DEBUG_H #define LINUX_MM_DEBUG_H 1 #include <linux/bug.h> #include <linux/stringify.h> struct page; struct vm_area_struct; struct mm_struct; extern void dump_page(struct page *page, const char *reason); extern void __dump_page(struct page *page, const char *reason); void dump_vma(const struct vm_area_struct *vma); void dump_mm(const struct mm_struct *mm); #ifdef CONFIG_DEBUG_VM #define VM_BUG_ON(cond) BUG_ON(cond) #define VM_BUG_ON_PAGE(cond, page) \ do { \ if (unlikely(cond)) { \ dump_page(page, "VM_BUG_ON_PAGE(" __stringify(cond)")");\ BUG(); \ } \ } while (0) #define VM_BUG_ON_VMA(cond, vma) \ do { \ if (unlikely(cond)) { \ dump_vma(vma); \ BUG(); \ } \ } while (0) #define VM_BUG_ON_MM(cond, mm) \ do { \ if (unlikely(cond)) { \ dump_mm(mm); \ BUG(); \ } \ } while (0) #define VM_WARN_ON_ONCE_PAGE(cond, page) ({ \ static bool __section(.data.once) __warned; \ int __ret_warn_once = !!(cond); \ \ if (unlikely(__ret_warn_once && !__warned)) { \ dump_page(page, "VM_WARN_ON_ONCE_PAGE(" __stringify(cond)")");\ __warned = true; \ WARN_ON(1); \ } \ unlikely(__ret_warn_once); \ }) #define VM_WARN_ON(cond) (void)WARN_ON(cond) #define VM_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond) #define VM_WARN_ONCE(cond, format...) (void)WARN_ONCE(cond, format) #define VM_WARN(cond, format...) (void)WARN(cond, format) #else #define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond) #define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond) #define VM_BUG_ON_VMA(cond, vma) VM_BUG_ON(cond) #define VM_BUG_ON_MM(cond, mm) VM_BUG_ON(cond) #define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ON_ONCE_PAGE(cond, page) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond) #define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond) #endif #ifdef CONFIG_DEBUG_VIRTUAL #define VIRTUAL_BUG_ON(cond) BUG_ON(cond) #else #define VIRTUAL_BUG_ON(cond) do { } while (0) #endif #ifdef CONFIG_DEBUG_VM_PGFLAGS #define VM_BUG_ON_PGFLAGS(cond, page) VM_BUG_ON_PAGE(cond, page) #else #define VM_BUG_ON_PGFLAGS(cond, page) BUILD_BUG_ON_INVALID(cond) #endif #endif cciss_ioctl.h 0000644 00000002035 14722070374 0007221 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef CCISS_IOCTLH #define CCISS_IOCTLH #include <uapi/linux/cciss_ioctl.h> #ifdef CONFIG_COMPAT /* 32 bit compatible ioctl structs */ typedef struct _IOCTL32_Command_struct { LUNAddr_struct LUN_info; RequestBlock_struct Request; ErrorInfo_struct error_info; WORD buf_size; /* size in bytes of the buf */ __u32 buf; /* 32 bit pointer to data buffer */ } IOCTL32_Command_struct; typedef struct _BIG_IOCTL32_Command_struct { LUNAddr_struct LUN_info; RequestBlock_struct Request; ErrorInfo_struct error_info; DWORD malloc_size; /* < MAX_KMALLOC_SIZE in cciss.c */ DWORD buf_size; /* size in bytes of the buf */ /* < malloc_size * MAXSGENTRIES */ __u32 buf; /* 32 bit pointer to data buffer */ } BIG_IOCTL32_Command_struct; #define CCISS_PASSTHRU32 _IOWR(CCISS_IOC_MAGIC, 11, IOCTL32_Command_struct) #define CCISS_BIG_PASSTHRU32 _IOWR(CCISS_IOC_MAGIC, 18, BIG_IOCTL32_Command_struct) #endif /* CONFIG_COMPAT */ #endif lz4.h 0000644 00000064703 14722070374 0005446 0 ustar 00 /* LZ4 Kernel Interface * * Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com> * Copyright (C) 2016, Sven Schmidt <4sschmid@informatik.uni-hamburg.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This file is based on the original header file * for LZ4 - Fast LZ compression algorithm. * * LZ4 - Fast LZ compression algorithm * Copyright (C) 2011-2016, Yann Collet. * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * You can contact the author at : * - LZ4 homepage : http://www.lz4.org * - LZ4 source repository : https://github.com/lz4/lz4 */ #ifndef __LZ4_H__ #define __LZ4_H__ #include <linux/types.h> #include <linux/string.h> /* memset, memcpy */ /*-************************************************************************ * CONSTANTS **************************************************************************/ /* * LZ4_MEMORY_USAGE : * Memory usage formula : N->2^N Bytes * (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) * Increasing memory usage improves compression ratio * Reduced memory usage can improve speed, due to cache effect * Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ #define LZ4_MEMORY_USAGE 14 #define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */ #define LZ4_COMPRESSBOUND(isize) (\ (unsigned int)(isize) > (unsigned int)LZ4_MAX_INPUT_SIZE \ ? 0 \ : (isize) + ((isize)/255) + 16) #define LZ4_ACCELERATION_DEFAULT 1 #define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2) #define LZ4_HASHTABLESIZE (1 << LZ4_MEMORY_USAGE) #define LZ4_HASH_SIZE_U32 (1 << LZ4_HASHLOG) #define LZ4HC_MIN_CLEVEL 3 #define LZ4HC_DEFAULT_CLEVEL 9 #define LZ4HC_MAX_CLEVEL 16 #define LZ4HC_DICTIONARY_LOGSIZE 16 #define LZ4HC_MAXD (1<<LZ4HC_DICTIONARY_LOGSIZE) #define LZ4HC_MAXD_MASK (LZ4HC_MAXD - 1) #define LZ4HC_HASH_LOG (LZ4HC_DICTIONARY_LOGSIZE - 1) #define LZ4HC_HASHTABLESIZE (1 << LZ4HC_HASH_LOG) #define LZ4HC_HASH_MASK (LZ4HC_HASHTABLESIZE - 1) /*-************************************************************************ * STREAMING CONSTANTS AND STRUCTURES **************************************************************************/ #define LZ4_STREAMSIZE_U64 ((1 << (LZ4_MEMORY_USAGE - 3)) + 4) #define LZ4_STREAMSIZE (LZ4_STREAMSIZE_U64 * sizeof(unsigned long long)) #define LZ4_STREAMHCSIZE 262192 #define LZ4_STREAMHCSIZE_SIZET (262192 / sizeof(size_t)) #define LZ4_STREAMDECODESIZE_U64 4 #define LZ4_STREAMDECODESIZE (LZ4_STREAMDECODESIZE_U64 * \ sizeof(unsigned long long)) /* * LZ4_stream_t - information structure to track an LZ4 stream. */ typedef struct { uint32_t hashTable[LZ4_HASH_SIZE_U32]; uint32_t currentOffset; uint32_t initCheck; const uint8_t *dictionary; uint8_t *bufferStart; uint32_t dictSize; } LZ4_stream_t_internal; typedef union { unsigned long long table[LZ4_STREAMSIZE_U64]; LZ4_stream_t_internal internal_donotuse; } LZ4_stream_t; /* * LZ4_streamHC_t - information structure to track an LZ4HC stream. */ typedef struct { unsigned int hashTable[LZ4HC_HASHTABLESIZE]; unsigned short chainTable[LZ4HC_MAXD]; /* next block to continue on current prefix */ const unsigned char *end; /* All index relative to this position */ const unsigned char *base; /* alternate base for extDict */ const unsigned char *dictBase; /* below that point, need extDict */ unsigned int dictLimit; /* below that point, no more dict */ unsigned int lowLimit; /* index from which to continue dict update */ unsigned int nextToUpdate; unsigned int compressionLevel; } LZ4HC_CCtx_internal; typedef union { size_t table[LZ4_STREAMHCSIZE_SIZET]; LZ4HC_CCtx_internal internal_donotuse; } LZ4_streamHC_t; /* * LZ4_streamDecode_t - information structure to track an * LZ4 stream during decompression. * * init this structure using LZ4_setStreamDecode (or memset()) before first use */ typedef struct { const uint8_t *externalDict; size_t extDictSize; const uint8_t *prefixEnd; size_t prefixSize; } LZ4_streamDecode_t_internal; typedef union { unsigned long long table[LZ4_STREAMDECODESIZE_U64]; LZ4_streamDecode_t_internal internal_donotuse; } LZ4_streamDecode_t; /*-************************************************************************ * SIZE OF STATE **************************************************************************/ #define LZ4_MEM_COMPRESS LZ4_STREAMSIZE #define LZ4HC_MEM_COMPRESS LZ4_STREAMHCSIZE /*-************************************************************************ * Compression Functions **************************************************************************/ /** * LZ4_compressBound() - Max. output size in worst case szenarios * @isize: Size of the input data * * Return: Max. size LZ4 may output in a "worst case" szenario * (data not compressible) */ static inline int LZ4_compressBound(size_t isize) { return LZ4_COMPRESSBOUND(isize); } /** * LZ4_compress_default() - Compress data from source to dest * @source: source address of the original data * @dest: output buffer address of the compressed data * @inputSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE * @maxOutputSize: full or partial size of buffer 'dest' * which must be already allocated * @wrkmem: address of the working memory. * This requires 'workmem' of LZ4_MEM_COMPRESS. * * Compresses 'sourceSize' bytes from buffer 'source' * into already allocated 'dest' buffer of size 'maxOutputSize'. * Compression is guaranteed to succeed if * 'maxOutputSize' >= LZ4_compressBound(inputSize). * It also runs faster, so it's a recommended setting. * If the function cannot compress 'source' into a more limited 'dest' budget, * compression stops *immediately*, and the function result is zero. * As a consequence, 'dest' content is not valid. * * Return: Number of bytes written into buffer 'dest' * (necessarily <= maxOutputSize) or 0 if compression fails */ int LZ4_compress_default(const char *source, char *dest, int inputSize, int maxOutputSize, void *wrkmem); /** * LZ4_compress_fast() - As LZ4_compress_default providing an acceleration param * @source: source address of the original data * @dest: output buffer address of the compressed data * @inputSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE * @maxOutputSize: full or partial size of buffer 'dest' * which must be already allocated * @acceleration: acceleration factor * @wrkmem: address of the working memory. * This requires 'workmem' of LZ4_MEM_COMPRESS. * * Same as LZ4_compress_default(), but allows to select an "acceleration" * factor. The larger the acceleration value, the faster the algorithm, * but also the lesser the compression. It's a trade-off. It can be fine tuned, * with each successive value providing roughly +~3% to speed. * An acceleration value of "1" is the same as regular LZ4_compress_default() * Values <= 0 will be replaced by LZ4_ACCELERATION_DEFAULT, which is 1. * * Return: Number of bytes written into buffer 'dest' * (necessarily <= maxOutputSize) or 0 if compression fails */ int LZ4_compress_fast(const char *source, char *dest, int inputSize, int maxOutputSize, int acceleration, void *wrkmem); /** * LZ4_compress_destSize() - Compress as much data as possible * from source to dest * @source: source address of the original data * @dest: output buffer address of the compressed data * @sourceSizePtr: will be modified to indicate how many bytes where read * from 'source' to fill 'dest'. New value is necessarily <= old value. * @targetDestSize: Size of buffer 'dest' which must be already allocated * @wrkmem: address of the working memory. * This requires 'workmem' of LZ4_MEM_COMPRESS. * * Reverse the logic, by compressing as much data as possible * from 'source' buffer into already allocated buffer 'dest' * of size 'targetDestSize'. * This function either compresses the entire 'source' content into 'dest' * if it's large enough, or fill 'dest' buffer completely with as much data as * possible from 'source'. * * Return: Number of bytes written into 'dest' (necessarily <= targetDestSize) * or 0 if compression fails */ int LZ4_compress_destSize(const char *source, char *dest, int *sourceSizePtr, int targetDestSize, void *wrkmem); /*-************************************************************************ * Decompression Functions **************************************************************************/ /** * LZ4_decompress_fast() - Decompresses data from 'source' into 'dest' * @source: source address of the compressed data * @dest: output buffer address of the uncompressed data * which must be already allocated with 'originalSize' bytes * @originalSize: is the original and therefore uncompressed size * * Decompresses data from 'source' into 'dest'. * This function fully respect memory boundaries for properly formed * compressed data. * It is a bit faster than LZ4_decompress_safe(). * However, it does not provide any protection against intentionally * modified data stream (malicious input). * Use this function in trusted environment only * (data to decode comes from a trusted source). * * Return: number of bytes read from the source buffer * or a negative result if decompression fails. */ int LZ4_decompress_fast(const char *source, char *dest, int originalSize); /** * LZ4_decompress_safe() - Decompression protected against buffer overflow * @source: source address of the compressed data * @dest: output buffer address of the uncompressed data * which must be already allocated * @compressedSize: is the precise full size of the compressed block * @maxDecompressedSize: is the size of 'dest' buffer * * Decompresses data from 'source' into 'dest'. * If the source stream is detected malformed, the function will * stop decoding and return a negative result. * This function is protected against buffer overflow exploits, * including malicious data packets. It never writes outside output buffer, * nor reads outside input buffer. * * Return: number of bytes decompressed into destination buffer * (necessarily <= maxDecompressedSize) * or a negative result in case of error */ int LZ4_decompress_safe(const char *source, char *dest, int compressedSize, int maxDecompressedSize); /** * LZ4_decompress_safe_partial() - Decompress a block of size 'compressedSize' * at position 'source' into buffer 'dest' * @source: source address of the compressed data * @dest: output buffer address of the decompressed data which must be * already allocated * @compressedSize: is the precise full size of the compressed block. * @targetOutputSize: the decompression operation will try * to stop as soon as 'targetOutputSize' has been reached * @maxDecompressedSize: is the size of destination buffer * * This function decompresses a compressed block of size 'compressedSize' * at position 'source' into destination buffer 'dest' * of size 'maxDecompressedSize'. * The function tries to stop decompressing operation as soon as * 'targetOutputSize' has been reached, reducing decompression time. * This function never writes outside of output buffer, * and never reads outside of input buffer. * It is therefore protected against malicious data packets. * * Return: the number of bytes decoded in the destination buffer * (necessarily <= maxDecompressedSize) * or a negative result in case of error * */ int LZ4_decompress_safe_partial(const char *source, char *dest, int compressedSize, int targetOutputSize, int maxDecompressedSize); /*-************************************************************************ * LZ4 HC Compression **************************************************************************/ /** * LZ4_compress_HC() - Compress data from `src` into `dst`, using HC algorithm * @src: source address of the original data * @dst: output buffer address of the compressed data * @srcSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE * @dstCapacity: full or partial size of buffer 'dst', * which must be already allocated * @compressionLevel: Recommended values are between 4 and 9, although any * value between 1 and LZ4HC_MAX_CLEVEL will work. * Values >LZ4HC_MAX_CLEVEL behave the same as 16. * @wrkmem: address of the working memory. * This requires 'wrkmem' of size LZ4HC_MEM_COMPRESS. * * Compress data from 'src' into 'dst', using the more powerful * but slower "HC" algorithm. Compression is guaranteed to succeed if * `dstCapacity >= LZ4_compressBound(srcSize) * * Return : the number of bytes written into 'dst' or 0 if compression fails. */ int LZ4_compress_HC(const char *src, char *dst, int srcSize, int dstCapacity, int compressionLevel, void *wrkmem); /** * LZ4_resetStreamHC() - Init an allocated 'LZ4_streamHC_t' structure * @streamHCPtr: pointer to the 'LZ4_streamHC_t' structure * @compressionLevel: Recommended values are between 4 and 9, although any * value between 1 and LZ4HC_MAX_CLEVEL will work. * Values >LZ4HC_MAX_CLEVEL behave the same as 16. * * An LZ4_streamHC_t structure can be allocated once * and re-used multiple times. * Use this function to init an allocated `LZ4_streamHC_t` structure * and start a new compression. */ void LZ4_resetStreamHC(LZ4_streamHC_t *streamHCPtr, int compressionLevel); /** * LZ4_loadDictHC() - Load a static dictionary into LZ4_streamHC * @streamHCPtr: pointer to the LZ4HC_stream_t * @dictionary: dictionary to load * @dictSize: size of dictionary * * Use this function to load a static dictionary into LZ4HC_stream. * Any previous data will be forgotten, only 'dictionary' * will remain in memory. * Loading a size of 0 is allowed. * * Return : dictionary size, in bytes (necessarily <= 64 KB) */ int LZ4_loadDictHC(LZ4_streamHC_t *streamHCPtr, const char *dictionary, int dictSize); /** * LZ4_compress_HC_continue() - Compress 'src' using data from previously * compressed blocks as a dictionary using the HC algorithm * @streamHCPtr: Pointer to the previous 'LZ4_streamHC_t' structure * @src: source address of the original data * @dst: output buffer address of the compressed data, * which must be already allocated * @srcSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE * @maxDstSize: full or partial size of buffer 'dest' * which must be already allocated * * These functions compress data in successive blocks of any size, using * previous blocks as dictionary. One key assumption is that previous * blocks (up to 64 KB) remain read-accessible while * compressing next blocks. There is an exception for ring buffers, * which can be smaller than 64 KB. * Ring buffers scenario is automatically detected and handled by * LZ4_compress_HC_continue(). * Before starting compression, state must be properly initialized, * using LZ4_resetStreamHC(). * A first "fictional block" can then be designated as * initial dictionary, using LZ4_loadDictHC() (Optional). * Then, use LZ4_compress_HC_continue() * to compress each successive block. Previous memory blocks * (including initial dictionary when present) must remain accessible * and unmodified during compression. * 'dst' buffer should be sized to handle worst case scenarios, using * LZ4_compressBound(), to ensure operation success. * If, for any reason, previous data blocks can't be preserved unmodified * in memory during next compression block, * you must save it to a safer memory space, using LZ4_saveDictHC(). * Return value of LZ4_saveDictHC() is the size of dictionary * effectively saved into 'safeBuffer'. * * Return: Number of bytes written into buffer 'dst' or 0 if compression fails */ int LZ4_compress_HC_continue(LZ4_streamHC_t *streamHCPtr, const char *src, char *dst, int srcSize, int maxDstSize); /** * LZ4_saveDictHC() - Save static dictionary from LZ4HC_stream * @streamHCPtr: pointer to the 'LZ4HC_stream_t' structure * @safeBuffer: buffer to save dictionary to, must be already allocated * @maxDictSize: size of 'safeBuffer' * * If previously compressed data block is not guaranteed * to remain available at its memory location, * save it into a safer place (char *safeBuffer). * Note : you don't need to call LZ4_loadDictHC() afterwards, * dictionary is immediately usable, you can therefore call * LZ4_compress_HC_continue(). * * Return : saved dictionary size in bytes (necessarily <= maxDictSize), * or 0 if error. */ int LZ4_saveDictHC(LZ4_streamHC_t *streamHCPtr, char *safeBuffer, int maxDictSize); /*-********************************************* * Streaming Compression Functions ***********************************************/ /** * LZ4_resetStream() - Init an allocated 'LZ4_stream_t' structure * @LZ4_stream: pointer to the 'LZ4_stream_t' structure * * An LZ4_stream_t structure can be allocated once * and re-used multiple times. * Use this function to init an allocated `LZ4_stream_t` structure * and start a new compression. */ void LZ4_resetStream(LZ4_stream_t *LZ4_stream); /** * LZ4_loadDict() - Load a static dictionary into LZ4_stream * @streamPtr: pointer to the LZ4_stream_t * @dictionary: dictionary to load * @dictSize: size of dictionary * * Use this function to load a static dictionary into LZ4_stream. * Any previous data will be forgotten, only 'dictionary' * will remain in memory. * Loading a size of 0 is allowed. * * Return : dictionary size, in bytes (necessarily <= 64 KB) */ int LZ4_loadDict(LZ4_stream_t *streamPtr, const char *dictionary, int dictSize); /** * LZ4_saveDict() - Save static dictionary from LZ4_stream * @streamPtr: pointer to the 'LZ4_stream_t' structure * @safeBuffer: buffer to save dictionary to, must be already allocated * @dictSize: size of 'safeBuffer' * * If previously compressed data block is not guaranteed * to remain available at its memory location, * save it into a safer place (char *safeBuffer). * Note : you don't need to call LZ4_loadDict() afterwards, * dictionary is immediately usable, you can therefore call * LZ4_compress_fast_continue(). * * Return : saved dictionary size in bytes (necessarily <= dictSize), * or 0 if error. */ int LZ4_saveDict(LZ4_stream_t *streamPtr, char *safeBuffer, int dictSize); /** * LZ4_compress_fast_continue() - Compress 'src' using data from previously * compressed blocks as a dictionary * @streamPtr: Pointer to the previous 'LZ4_stream_t' structure * @src: source address of the original data * @dst: output buffer address of the compressed data, * which must be already allocated * @srcSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE * @maxDstSize: full or partial size of buffer 'dest' * which must be already allocated * @acceleration: acceleration factor * * Compress buffer content 'src', using data from previously compressed blocks * as dictionary to improve compression ratio. * Important : Previous data blocks are assumed to still * be present and unmodified ! * If maxDstSize >= LZ4_compressBound(srcSize), * compression is guaranteed to succeed, and runs faster. * * Return: Number of bytes written into buffer 'dst' or 0 if compression fails */ int LZ4_compress_fast_continue(LZ4_stream_t *streamPtr, const char *src, char *dst, int srcSize, int maxDstSize, int acceleration); /** * LZ4_setStreamDecode() - Instruct where to find dictionary * @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure * @dictionary: dictionary to use * @dictSize: size of dictionary * * Use this function to instruct where to find the dictionary. * Setting a size of 0 is allowed (same effect as reset). * * Return: 1 if OK, 0 if error */ int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode, const char *dictionary, int dictSize); /** * LZ4_decompress_safe_continue() - Decompress blocks in streaming mode * @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure * @source: source address of the compressed data * @dest: output buffer address of the uncompressed data * which must be already allocated * @compressedSize: is the precise full size of the compressed block * @maxDecompressedSize: is the size of 'dest' buffer * * This decoding function allows decompression of multiple blocks * in "streaming" mode. * Previously decoded blocks *must* remain available at the memory position * where they were decoded (up to 64 KB) * In the case of a ring buffers, decoding buffer must be either : * - Exactly same size as encoding buffer, with same update rule * (block boundaries at same positions) In which case, * the decoding & encoding ring buffer can have any size, * including very small ones ( < 64 KB). * - Larger than encoding buffer, by a minimum of maxBlockSize more bytes. * maxBlockSize is implementation dependent. * It's the maximum size you intend to compress into a single block. * In which case, encoding and decoding buffers do not need * to be synchronized, and encoding ring buffer can have any size, * including small ones ( < 64 KB). * - _At least_ 64 KB + 8 bytes + maxBlockSize. * In which case, encoding and decoding buffers do not need to be * synchronized, and encoding ring buffer can have any size, * including larger than decoding buffer. W * Whenever these conditions are not possible, save the last 64KB of decoded * data into a safe buffer, and indicate where it is saved * using LZ4_setStreamDecode() * * Return: number of bytes decompressed into destination buffer * (necessarily <= maxDecompressedSize) * or a negative result in case of error */ int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode, const char *source, char *dest, int compressedSize, int maxDecompressedSize); /** * LZ4_decompress_fast_continue() - Decompress blocks in streaming mode * @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure * @source: source address of the compressed data * @dest: output buffer address of the uncompressed data * which must be already allocated with 'originalSize' bytes * @originalSize: is the original and therefore uncompressed size * * This decoding function allows decompression of multiple blocks * in "streaming" mode. * Previously decoded blocks *must* remain available at the memory position * where they were decoded (up to 64 KB) * In the case of a ring buffers, decoding buffer must be either : * - Exactly same size as encoding buffer, with same update rule * (block boundaries at same positions) In which case, * the decoding & encoding ring buffer can have any size, * including very small ones ( < 64 KB). * - Larger than encoding buffer, by a minimum of maxBlockSize more bytes. * maxBlockSize is implementation dependent. * It's the maximum size you intend to compress into a single block. * In which case, encoding and decoding buffers do not need * to be synchronized, and encoding ring buffer can have any size, * including small ones ( < 64 KB). * - _At least_ 64 KB + 8 bytes + maxBlockSize. * In which case, encoding and decoding buffers do not need to be * synchronized, and encoding ring buffer can have any size, * including larger than decoding buffer. W * Whenever these conditions are not possible, save the last 64KB of decoded * data into a safe buffer, and indicate where it is saved * using LZ4_setStreamDecode() * * Return: number of bytes decompressed into destination buffer * (necessarily <= maxDecompressedSize) * or a negative result in case of error */ int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode, const char *source, char *dest, int originalSize); /** * LZ4_decompress_safe_usingDict() - Same as LZ4_setStreamDecode() * followed by LZ4_decompress_safe_continue() * @source: source address of the compressed data * @dest: output buffer address of the uncompressed data * which must be already allocated * @compressedSize: is the precise full size of the compressed block * @maxDecompressedSize: is the size of 'dest' buffer * @dictStart: pointer to the start of the dictionary in memory * @dictSize: size of dictionary * * This decoding function works the same as * a combination of LZ4_setStreamDecode() followed by * LZ4_decompress_safe_continue() * It is stand-alone, and doesn't need an LZ4_streamDecode_t structure. * * Return: number of bytes decompressed into destination buffer * (necessarily <= maxDecompressedSize) * or a negative result in case of error */ int LZ4_decompress_safe_usingDict(const char *source, char *dest, int compressedSize, int maxDecompressedSize, const char *dictStart, int dictSize); /** * LZ4_decompress_fast_usingDict() - Same as LZ4_setStreamDecode() * followed by LZ4_decompress_fast_continue() * @source: source address of the compressed data * @dest: output buffer address of the uncompressed data * which must be already allocated with 'originalSize' bytes * @originalSize: is the original and therefore uncompressed size * @dictStart: pointer to the start of the dictionary in memory * @dictSize: size of dictionary * * This decoding function works the same as * a combination of LZ4_setStreamDecode() followed by * LZ4_decompress_fast_continue() * It is stand-alone, and doesn't need an LZ4_streamDecode_t structure. * * Return: number of bytes decompressed into destination buffer * (necessarily <= maxDecompressedSize) * or a negative result in case of error */ int LZ4_decompress_fast_usingDict(const char *source, char *dest, int originalSize, const char *dictStart, int dictSize); #endif cpufeature.h 0000644 00000003327 14722070374 0007073 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org> */ #ifndef __LINUX_CPUFEATURE_H #define __LINUX_CPUFEATURE_H #ifdef CONFIG_GENERIC_CPU_AUTOPROBE #include <linux/init.h> #include <linux/mod_devicetable.h> #include <asm/cpufeature.h> /* * Macros imported from <asm/cpufeature.h>: * - cpu_feature(x) ordinal value of feature called 'x' * - cpu_have_feature(u32 n) whether feature #n is available * - MAX_CPU_FEATURES upper bound for feature ordinal values * Optional: * - CPU_FEATURE_TYPEFMT format string fragment for printing the cpu type * - CPU_FEATURE_TYPEVAL set of values matching the format string above */ #ifndef CPU_FEATURE_TYPEFMT #define CPU_FEATURE_TYPEFMT "%s" #endif #ifndef CPU_FEATURE_TYPEVAL #define CPU_FEATURE_TYPEVAL ELF_PLATFORM #endif /* * Use module_cpu_feature_match(feature, module_init_function) to * declare that * a) the module shall be probed upon discovery of CPU feature 'feature' * (typically at boot time using udev) * b) the module must not be loaded if CPU feature 'feature' is not present * (not even by manual insmod). * * For a list of legal values for 'feature', please consult the file * 'asm/cpufeature.h' of your favorite architecture. */ #define module_cpu_feature_match(x, __initfunc) \ static struct cpu_feature const __maybe_unused cpu_feature_match_ ## x[] = \ { { .feature = cpu_feature(x) }, { } }; \ MODULE_DEVICE_TABLE(cpu, cpu_feature_match_ ## x); \ \ static int __init cpu_feature_match_ ## x ## _init(void) \ { \ if (!cpu_have_feature(cpu_feature(x))) \ return -ENODEV; \ return __initfunc(); \ } \ module_init(cpu_feature_match_ ## x ## _init) #endif #endif blockgroup_lock.h 0000644 00000001452 14722070374 0010104 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_BLOCKGROUP_LOCK_H #define _LINUX_BLOCKGROUP_LOCK_H /* * Per-blockgroup locking for ext2 and ext3. * * Simple hashed spinlocking. */ #include <linux/spinlock.h> #include <linux/cache.h> #ifdef CONFIG_SMP #define NR_BG_LOCKS (4 << ilog2(NR_CPUS < 32 ? NR_CPUS : 32)) #else #define NR_BG_LOCKS 1 #endif struct bgl_lock { spinlock_t lock; } ____cacheline_aligned_in_smp; struct blockgroup_lock { struct bgl_lock locks[NR_BG_LOCKS]; }; static inline void bgl_lock_init(struct blockgroup_lock *bgl) { int i; for (i = 0; i < NR_BG_LOCKS; i++) spin_lock_init(&bgl->locks[i].lock); } static inline spinlock_t * bgl_lock_ptr(struct blockgroup_lock *bgl, unsigned int block_group) { return &bgl->locks[block_group & (NR_BG_LOCKS-1)].lock; } #endif signal.h 0000644 00000031701 14722070374 0006202 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SIGNAL_H #define _LINUX_SIGNAL_H #include <linux/bug.h> #include <linux/signal_types.h> #include <linux/string.h> struct task_struct; /* for sysctl */ extern int print_fatal_signals; static inline void copy_siginfo(kernel_siginfo_t *to, const kernel_siginfo_t *from) { memcpy(to, from, sizeof(*to)); } static inline void clear_siginfo(kernel_siginfo_t *info) { memset(info, 0, sizeof(*info)); } #define SI_EXPANSION_SIZE (sizeof(struct siginfo) - sizeof(struct kernel_siginfo)) int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from); int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from); enum siginfo_layout { SIL_KILL, SIL_TIMER, SIL_POLL, SIL_FAULT, SIL_FAULT_MCEERR, SIL_FAULT_BNDERR, SIL_FAULT_PKUERR, SIL_CHLD, SIL_RT, SIL_SYS, }; enum siginfo_layout siginfo_layout(unsigned sig, int si_code); /* * Define some primitives to manipulate sigset_t. */ #ifndef __HAVE_ARCH_SIG_BITOPS #include <linux/bitops.h> /* We don't use <linux/bitops.h> for these because there is no need to be atomic. */ static inline void sigaddset(sigset_t *set, int _sig) { unsigned long sig = _sig - 1; if (_NSIG_WORDS == 1) set->sig[0] |= 1UL << sig; else set->sig[sig / _NSIG_BPW] |= 1UL << (sig % _NSIG_BPW); } static inline void sigdelset(sigset_t *set, int _sig) { unsigned long sig = _sig - 1; if (_NSIG_WORDS == 1) set->sig[0] &= ~(1UL << sig); else set->sig[sig / _NSIG_BPW] &= ~(1UL << (sig % _NSIG_BPW)); } static inline int sigismember(sigset_t *set, int _sig) { unsigned long sig = _sig - 1; if (_NSIG_WORDS == 1) return 1 & (set->sig[0] >> sig); else return 1 & (set->sig[sig / _NSIG_BPW] >> (sig % _NSIG_BPW)); } #endif /* __HAVE_ARCH_SIG_BITOPS */ static inline int sigisemptyset(sigset_t *set) { switch (_NSIG_WORDS) { case 4: return (set->sig[3] | set->sig[2] | set->sig[1] | set->sig[0]) == 0; case 2: return (set->sig[1] | set->sig[0]) == 0; case 1: return set->sig[0] == 0; default: BUILD_BUG(); return 0; } } static inline int sigequalsets(const sigset_t *set1, const sigset_t *set2) { switch (_NSIG_WORDS) { case 4: return (set1->sig[3] == set2->sig[3]) && (set1->sig[2] == set2->sig[2]) && (set1->sig[1] == set2->sig[1]) && (set1->sig[0] == set2->sig[0]); case 2: return (set1->sig[1] == set2->sig[1]) && (set1->sig[0] == set2->sig[0]); case 1: return set1->sig[0] == set2->sig[0]; } return 0; } #define sigmask(sig) (1UL << ((sig) - 1)) #ifndef __HAVE_ARCH_SIG_SETOPS #include <linux/string.h> #define _SIG_SET_BINOP(name, op) \ static inline void name(sigset_t *r, const sigset_t *a, const sigset_t *b) \ { \ unsigned long a0, a1, a2, a3, b0, b1, b2, b3; \ \ switch (_NSIG_WORDS) { \ case 4: \ a3 = a->sig[3]; a2 = a->sig[2]; \ b3 = b->sig[3]; b2 = b->sig[2]; \ r->sig[3] = op(a3, b3); \ r->sig[2] = op(a2, b2); \ /* fall through */ \ case 2: \ a1 = a->sig[1]; b1 = b->sig[1]; \ r->sig[1] = op(a1, b1); \ /* fall through */ \ case 1: \ a0 = a->sig[0]; b0 = b->sig[0]; \ r->sig[0] = op(a0, b0); \ break; \ default: \ BUILD_BUG(); \ } \ } #define _sig_or(x,y) ((x) | (y)) _SIG_SET_BINOP(sigorsets, _sig_or) #define _sig_and(x,y) ((x) & (y)) _SIG_SET_BINOP(sigandsets, _sig_and) #define _sig_andn(x,y) ((x) & ~(y)) _SIG_SET_BINOP(sigandnsets, _sig_andn) #undef _SIG_SET_BINOP #undef _sig_or #undef _sig_and #undef _sig_andn #define _SIG_SET_OP(name, op) \ static inline void name(sigset_t *set) \ { \ switch (_NSIG_WORDS) { \ case 4: set->sig[3] = op(set->sig[3]); \ set->sig[2] = op(set->sig[2]); \ /* fall through */ \ case 2: set->sig[1] = op(set->sig[1]); \ /* fall through */ \ case 1: set->sig[0] = op(set->sig[0]); \ break; \ default: \ BUILD_BUG(); \ } \ } #define _sig_not(x) (~(x)) _SIG_SET_OP(signotset, _sig_not) #undef _SIG_SET_OP #undef _sig_not static inline void sigemptyset(sigset_t *set) { switch (_NSIG_WORDS) { default: memset(set, 0, sizeof(sigset_t)); break; case 2: set->sig[1] = 0; /* fall through */ case 1: set->sig[0] = 0; break; } } static inline void sigfillset(sigset_t *set) { switch (_NSIG_WORDS) { default: memset(set, -1, sizeof(sigset_t)); break; case 2: set->sig[1] = -1; /* fall through */ case 1: set->sig[0] = -1; break; } } /* Some extensions for manipulating the low 32 signals in particular. */ static inline void sigaddsetmask(sigset_t *set, unsigned long mask) { set->sig[0] |= mask; } static inline void sigdelsetmask(sigset_t *set, unsigned long mask) { set->sig[0] &= ~mask; } static inline int sigtestsetmask(sigset_t *set, unsigned long mask) { return (set->sig[0] & mask) != 0; } static inline void siginitset(sigset_t *set, unsigned long mask) { set->sig[0] = mask; switch (_NSIG_WORDS) { default: memset(&set->sig[1], 0, sizeof(long)*(_NSIG_WORDS-1)); break; case 2: set->sig[1] = 0; case 1: ; } } static inline void siginitsetinv(sigset_t *set, unsigned long mask) { set->sig[0] = ~mask; switch (_NSIG_WORDS) { default: memset(&set->sig[1], -1, sizeof(long)*(_NSIG_WORDS-1)); break; case 2: set->sig[1] = -1; case 1: ; } } #endif /* __HAVE_ARCH_SIG_SETOPS */ static inline void init_sigpending(struct sigpending *sig) { sigemptyset(&sig->signal); INIT_LIST_HEAD(&sig->list); } extern void flush_sigqueue(struct sigpending *queue); /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */ static inline int valid_signal(unsigned long sig) { return sig <= _NSIG ? 1 : 0; } struct timespec; struct pt_regs; enum pid_type; extern int next_signal(struct sigpending *pending, sigset_t *mask); extern int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p, enum pid_type type); extern int group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p, enum pid_type type); extern int __group_send_sig_info(int, struct kernel_siginfo *, struct task_struct *); extern int sigprocmask(int, sigset_t *, sigset_t *); extern void set_current_blocked(sigset_t *); extern void __set_current_blocked(const sigset_t *); extern int show_unhandled_signals; extern bool get_signal(struct ksignal *ksig); extern void signal_setup_done(int failed, struct ksignal *ksig, int stepping); extern void exit_signals(struct task_struct *tsk); extern void kernel_sigaction(int, __sighandler_t); #define SIG_KTHREAD ((__force __sighandler_t)2) #define SIG_KTHREAD_KERNEL ((__force __sighandler_t)3) static inline void allow_signal(int sig) { /* * Kernel threads handle their own signals. Let the signal code * know it'll be handled, so that they don't get converted to * SIGKILL or just silently dropped. */ kernel_sigaction(sig, SIG_KTHREAD); } static inline void allow_kernel_signal(int sig) { /* * Kernel threads handle their own signals. Let the signal code * know signals sent by the kernel will be handled, so that they * don't get silently dropped. */ kernel_sigaction(sig, SIG_KTHREAD_KERNEL); } static inline void disallow_signal(int sig) { kernel_sigaction(sig, SIG_IGN); } extern struct kmem_cache *sighand_cachep; extern bool unhandled_signal(struct task_struct *tsk, int sig); /* * In POSIX a signal is sent either to a specific thread (Linux task) * or to the process as a whole (Linux thread group). How the signal * is sent determines whether it's to one thread or the whole group, * which determines which signal mask(s) are involved in blocking it * from being delivered until later. When the signal is delivered, * either it's caught or ignored by a user handler or it has a default * effect that applies to the whole thread group (POSIX process). * * The possible effects an unblocked signal set to SIG_DFL can have are: * ignore - Nothing Happens * terminate - kill the process, i.e. all threads in the group, * similar to exit_group. The group leader (only) reports * WIFSIGNALED status to its parent. * coredump - write a core dump file describing all threads using * the same mm and then kill all those threads * stop - stop all the threads in the group, i.e. TASK_STOPPED state * * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored. * Other signals when not blocked and set to SIG_DFL behaves as follows. * The job control signals also have other special effects. * * +--------------------+------------------+ * | POSIX signal | default action | * +--------------------+------------------+ * | SIGHUP | terminate | * | SIGINT | terminate | * | SIGQUIT | coredump | * | SIGILL | coredump | * | SIGTRAP | coredump | * | SIGABRT/SIGIOT | coredump | * | SIGBUS | coredump | * | SIGFPE | coredump | * | SIGKILL | terminate(+) | * | SIGUSR1 | terminate | * | SIGSEGV | coredump | * | SIGUSR2 | terminate | * | SIGPIPE | terminate | * | SIGALRM | terminate | * | SIGTERM | terminate | * | SIGCHLD | ignore | * | SIGCONT | ignore(*) | * | SIGSTOP | stop(*)(+) | * | SIGTSTP | stop(*) | * | SIGTTIN | stop(*) | * | SIGTTOU | stop(*) | * | SIGURG | ignore | * | SIGXCPU | coredump | * | SIGXFSZ | coredump | * | SIGVTALRM | terminate | * | SIGPROF | terminate | * | SIGPOLL/SIGIO | terminate | * | SIGSYS/SIGUNUSED | coredump | * | SIGSTKFLT | terminate | * | SIGWINCH | ignore | * | SIGPWR | terminate | * | SIGRTMIN-SIGRTMAX | terminate | * +--------------------+------------------+ * | non-POSIX signal | default action | * +--------------------+------------------+ * | SIGEMT | coredump | * +--------------------+------------------+ * * (+) For SIGKILL and SIGSTOP the action is "always", not just "default". * (*) Special job control effects: * When SIGCONT is sent, it resumes the process (all threads in the group) * from TASK_STOPPED state and also clears any pending/queued stop signals * (any of those marked with "stop(*)"). This happens regardless of blocking, * catching, or ignoring SIGCONT. When any stop signal is sent, it clears * any pending/queued SIGCONT signals; this happens regardless of blocking, * catching, or ignored the stop signal, though (except for SIGSTOP) the * default action of stopping the process may happen later or never. */ #ifdef SIGEMT #define SIGEMT_MASK rt_sigmask(SIGEMT) #else #define SIGEMT_MASK 0 #endif #if SIGRTMIN > BITS_PER_LONG #define rt_sigmask(sig) (1ULL << ((sig)-1)) #else #define rt_sigmask(sig) sigmask(sig) #endif #define siginmask(sig, mask) \ ((sig) > 0 && (sig) < SIGRTMIN && (rt_sigmask(sig) & (mask))) #define SIG_KERNEL_ONLY_MASK (\ rt_sigmask(SIGKILL) | rt_sigmask(SIGSTOP)) #define SIG_KERNEL_STOP_MASK (\ rt_sigmask(SIGSTOP) | rt_sigmask(SIGTSTP) | \ rt_sigmask(SIGTTIN) | rt_sigmask(SIGTTOU) ) #define SIG_KERNEL_COREDUMP_MASK (\ rt_sigmask(SIGQUIT) | rt_sigmask(SIGILL) | \ rt_sigmask(SIGTRAP) | rt_sigmask(SIGABRT) | \ rt_sigmask(SIGFPE) | rt_sigmask(SIGSEGV) | \ rt_sigmask(SIGBUS) | rt_sigmask(SIGSYS) | \ rt_sigmask(SIGXCPU) | rt_sigmask(SIGXFSZ) | \ SIGEMT_MASK ) #define SIG_KERNEL_IGNORE_MASK (\ rt_sigmask(SIGCONT) | rt_sigmask(SIGCHLD) | \ rt_sigmask(SIGWINCH) | rt_sigmask(SIGURG) ) #define SIG_SPECIFIC_SICODES_MASK (\ rt_sigmask(SIGILL) | rt_sigmask(SIGFPE) | \ rt_sigmask(SIGSEGV) | rt_sigmask(SIGBUS) | \ rt_sigmask(SIGTRAP) | rt_sigmask(SIGCHLD) | \ rt_sigmask(SIGPOLL) | rt_sigmask(SIGSYS) | \ SIGEMT_MASK ) #define sig_kernel_only(sig) siginmask(sig, SIG_KERNEL_ONLY_MASK) #define sig_kernel_coredump(sig) siginmask(sig, SIG_KERNEL_COREDUMP_MASK) #define sig_kernel_ignore(sig) siginmask(sig, SIG_KERNEL_IGNORE_MASK) #define sig_kernel_stop(sig) siginmask(sig, SIG_KERNEL_STOP_MASK) #define sig_specific_sicodes(sig) siginmask(sig, SIG_SPECIFIC_SICODES_MASK) #define sig_fatal(t, signr) \ (!siginmask(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \ (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL) void signals_init(void); int restore_altstack(const stack_t __user *); int __save_altstack(stack_t __user *, unsigned long); #define save_altstack_ex(uss, sp) do { \ stack_t __user *__uss = uss; \ struct task_struct *t = current; \ put_user_ex((void __user *)t->sas_ss_sp, &__uss->ss_sp); \ put_user_ex(t->sas_ss_flags, &__uss->ss_flags); \ put_user_ex(t->sas_ss_size, &__uss->ss_size); \ if (t->sas_ss_flags & SS_AUTODISARM) \ sas_ss_reset(t); \ } while (0); #ifdef CONFIG_PROC_FS struct seq_file; extern void render_sigset_t(struct seq_file *, const char *, sigset_t *); #endif #endif /* _LINUX_SIGNAL_H */ input/ad714x.h 0000644 00000002540 14722070374 0007073 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/input/ad714x.h * * AD714x is very flexible, it can be used as buttons, scrollwheel, * slider, touchpad at the same time. That depends on the boards. * The platform_data for the device's "struct device" holds this * information. * * Copyright 2009-2011 Analog Devices Inc. */ #ifndef __LINUX_INPUT_AD714X_H__ #define __LINUX_INPUT_AD714X_H__ #define STAGE_NUM 12 #define STAGE_CFGREG_NUM 8 #define SYS_CFGREG_NUM 8 /* board information which need be initialized in arch/mach... */ struct ad714x_slider_plat { int start_stage; int end_stage; int max_coord; }; struct ad714x_wheel_plat { int start_stage; int end_stage; int max_coord; }; struct ad714x_touchpad_plat { int x_start_stage; int x_end_stage; int x_max_coord; int y_start_stage; int y_end_stage; int y_max_coord; }; struct ad714x_button_plat { int keycode; unsigned short l_mask; unsigned short h_mask; }; struct ad714x_platform_data { int slider_num; int wheel_num; int touchpad_num; int button_num; struct ad714x_slider_plat *slider; struct ad714x_wheel_plat *wheel; struct ad714x_touchpad_plat *touchpad; struct ad714x_button_plat *button; unsigned short stage_cfg_reg[STAGE_NUM][STAGE_CFGREG_NUM]; unsigned short sys_cfg_reg[SYS_CFGREG_NUM]; unsigned long irqflags; }; #endif input/elan-i2c-ids.h 0000644 00000004011 14722070374 0010225 0 ustar 00 /* * Elan I2C/SMBus Touchpad device whitelist * * Copyright (c) 2013 ELAN Microelectronics Corp. * * Author: æ維 (Duson Lin) <dusonlin@emc.com.tw> * Author: KT Liao <kt.liao@emc.com.tw> * Version: 1.6.3 * * Based on cyapa driver: * copyright (c) 2011-2012 Cypress Semiconductor, Inc. * copyright (c) 2011-2012 Google, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * Trademarks are the property of their respective owners. */ #ifndef __ELAN_I2C_IDS_H #define __ELAN_I2C_IDS_H #include <linux/mod_devicetable.h> static const struct acpi_device_id elan_acpi_id[] = { { "ELAN0000", 0 }, { "ELAN0100", 0 }, { "ELAN0600", 0 }, { "ELAN0601", 0 }, { "ELAN0602", 0 }, { "ELAN0603", 0 }, { "ELAN0604", 0 }, { "ELAN0605", 0 }, { "ELAN0606", 0 }, { "ELAN0607", 0 }, { "ELAN0608", 0 }, { "ELAN0609", 0 }, { "ELAN060B", 0 }, { "ELAN060C", 0 }, { "ELAN060F", 0 }, { "ELAN0610", 0 }, { "ELAN0611", 0 }, { "ELAN0612", 0 }, { "ELAN0615", 0 }, { "ELAN0616", 0 }, { "ELAN0617", 0 }, { "ELAN0618", 0 }, { "ELAN0619", 0 }, { "ELAN061A", 0 }, /* { "ELAN061B", 0 }, not working on the Lenovo Legion Y7000 */ { "ELAN061C", 0 }, { "ELAN061D", 0 }, { "ELAN061E", 0 }, { "ELAN061F", 0 }, { "ELAN0620", 0 }, { "ELAN0621", 0 }, { "ELAN0622", 0 }, { "ELAN0623", 0 }, { "ELAN0624", 0 }, { "ELAN0625", 0 }, { "ELAN0626", 0 }, { "ELAN0627", 0 }, { "ELAN0628", 0 }, { "ELAN0629", 0 }, { "ELAN062A", 0 }, { "ELAN062B", 0 }, { "ELAN062C", 0 }, { "ELAN062D", 0 }, { "ELAN062E", 0 }, /* Lenovo V340 Whiskey Lake U */ { "ELAN062F", 0 }, /* Lenovo V340 Comet Lake U */ { "ELAN0631", 0 }, { "ELAN0632", 0 }, { "ELAN0633", 0 }, /* Lenovo S145 */ { "ELAN0634", 0 }, /* Lenovo V340 Ice lake */ { "ELAN0635", 0 }, /* Lenovo V1415-IIL */ { "ELAN0636", 0 }, /* Lenovo V1415-Dali */ { "ELAN0637", 0 }, /* Lenovo V1415-IGLR */ { "ELAN1000", 0 }, { } }; #endif /* __ELAN_I2C_IDS_H */ input/adxl34x.h 0000644 00000026474 14722070374 0007366 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/input/adxl34x.h * * Digital Accelerometer characteristics are highly application specific * and may vary between boards and models. The platform_data for the * device's "struct device" holds this information. * * Copyright 2009 Analog Devices Inc. */ #ifndef __LINUX_INPUT_ADXL34X_H__ #define __LINUX_INPUT_ADXL34X_H__ #include <linux/input.h> struct adxl34x_platform_data { /* * X,Y,Z Axis Offset: * offer user offset adjustments in twoscompliment * form with a scale factor of 15.6 mg/LSB (i.e. 0x7F = +2 g) */ s8 x_axis_offset; s8 y_axis_offset; s8 z_axis_offset; /* * TAP_X/Y/Z Enable: Setting TAP_X, Y, or Z Enable enables X, * Y, or Z participation in Tap detection. A '0' excludes the * selected axis from participation in Tap detection. * Setting the SUPPRESS bit suppresses Double Tap detection if * acceleration greater than tap_threshold is present during the * tap_latency period, i.e. after the first tap but before the * opening of the second tap window. */ #define ADXL_SUPPRESS (1 << 3) #define ADXL_TAP_X_EN (1 << 2) #define ADXL_TAP_Y_EN (1 << 1) #define ADXL_TAP_Z_EN (1 << 0) u8 tap_axis_control; /* * tap_threshold: * holds the threshold value for tap detection/interrupts. * The data format is unsigned. The scale factor is 62.5 mg/LSB * (i.e. 0xFF = +16 g). A zero value may result in undesirable * behavior if Tap/Double Tap is enabled. */ u8 tap_threshold; /* * tap_duration: * is an unsigned time value representing the maximum * time that an event must be above the tap_threshold threshold * to qualify as a tap event. The scale factor is 625 us/LSB. A zero * value will prevent Tap/Double Tap functions from working. */ u8 tap_duration; /* * tap_latency: * is an unsigned time value representing the wait time * from the detection of a tap event to the opening of the time * window tap_window for a possible second tap event. The scale * factor is 1.25 ms/LSB. A zero value will disable the Double Tap * function. */ u8 tap_latency; /* * tap_window: * is an unsigned time value representing the amount * of time after the expiration of tap_latency during which a second * tap can begin. The scale factor is 1.25 ms/LSB. A zero value will * disable the Double Tap function. */ u8 tap_window; /* * act_axis_control: * X/Y/Z Enable: A '1' enables X, Y, or Z participation in activity * or inactivity detection. A '0' excludes the selected axis from * participation. If all of the axes are excluded, the function is * disabled. * AC/DC: A '0' = DC coupled operation and a '1' = AC coupled * operation. In DC coupled operation, the current acceleration is * compared with activity_threshold and inactivity_threshold directly * to determine whether activity or inactivity is detected. In AC * coupled operation for activity detection, the acceleration value * at the start of activity detection is taken as a reference value. * New samples of acceleration are then compared to this * reference value and if the magnitude of the difference exceeds * activity_threshold the device will trigger an activity interrupt. In * AC coupled operation for inactivity detection, a reference value * is used again for comparison and is updated whenever the * device exceeds the inactivity threshold. Once the reference * value is selected, the device compares the magnitude of the * difference between the reference value and the current * acceleration with inactivity_threshold. If the difference is below * inactivity_threshold for a total of inactivity_time, the device is * considered inactive and the inactivity interrupt is triggered. */ #define ADXL_ACT_ACDC (1 << 7) #define ADXL_ACT_X_EN (1 << 6) #define ADXL_ACT_Y_EN (1 << 5) #define ADXL_ACT_Z_EN (1 << 4) #define ADXL_INACT_ACDC (1 << 3) #define ADXL_INACT_X_EN (1 << 2) #define ADXL_INACT_Y_EN (1 << 1) #define ADXL_INACT_Z_EN (1 << 0) u8 act_axis_control; /* * activity_threshold: * holds the threshold value for activity detection. * The data format is unsigned. The scale factor is * 62.5 mg/LSB. A zero value may result in undesirable behavior if * Activity interrupt is enabled. */ u8 activity_threshold; /* * inactivity_threshold: * holds the threshold value for inactivity * detection. The data format is unsigned. The scale * factor is 62.5 mg/LSB. A zero value may result in undesirable * behavior if Inactivity interrupt is enabled. */ u8 inactivity_threshold; /* * inactivity_time: * is an unsigned time value representing the * amount of time that acceleration must be below the value in * inactivity_threshold for inactivity to be declared. The scale factor * is 1 second/LSB. Unlike the other interrupt functions, which * operate on unfiltered data, the inactivity function operates on the * filtered output data. At least one output sample must be * generated for the inactivity interrupt to be triggered. This will * result in the function appearing un-responsive if the * inactivity_time register is set with a value less than the time * constant of the Output Data Rate. A zero value will result in an * interrupt when the output data is below inactivity_threshold. */ u8 inactivity_time; /* * free_fall_threshold: * holds the threshold value for Free-Fall detection. * The data format is unsigned. The root-sum-square(RSS) value * of all axes is calculated and compared to the value in * free_fall_threshold to determine if a free fall event may be * occurring. The scale factor is 62.5 mg/LSB. A zero value may * result in undesirable behavior if Free-Fall interrupt is * enabled. Values between 300 and 600 mg (0x05 to 0x09) are * recommended. */ u8 free_fall_threshold; /* * free_fall_time: * is an unsigned time value representing the minimum * time that the RSS value of all axes must be less than * free_fall_threshold to generate a Free-Fall interrupt. The * scale factor is 5 ms/LSB. A zero value may result in * undesirable behavior if Free-Fall interrupt is enabled. * Values between 100 to 350 ms (0x14 to 0x46) are recommended. */ u8 free_fall_time; /* * data_rate: * Selects device bandwidth and output data rate. * RATE = 3200 Hz / (2^(15 - x)). Default value is 0x0A, or 100 Hz * Output Data Rate. An Output Data Rate should be selected that * is appropriate for the communication protocol and frequency * selected. Selecting too high of an Output Data Rate with a low * communication speed will result in samples being discarded. */ u8 data_rate; /* * data_range: * FULL_RES: When this bit is set with the device is * in Full-Resolution Mode, where the output resolution increases * with RANGE to maintain a 4 mg/LSB scale factor. When this * bit is cleared the device is in 10-bit Mode and RANGE determine the * maximum g-Range and scale factor. */ #define ADXL_FULL_RES (1 << 3) #define ADXL_RANGE_PM_2g 0 #define ADXL_RANGE_PM_4g 1 #define ADXL_RANGE_PM_8g 2 #define ADXL_RANGE_PM_16g 3 u8 data_range; /* * low_power_mode: * A '0' = Normal operation and a '1' = Reduced * power operation with somewhat higher noise. */ u8 low_power_mode; /* * power_mode: * LINK: A '1' with both the activity and inactivity functions * enabled will delay the start of the activity function until * inactivity is detected. Once activity is detected, inactivity * detection will begin and prevent the detection of activity. This * bit serially links the activity and inactivity functions. When '0' * the inactivity and activity functions are concurrent. Additional * information can be found in the ADXL34x datasheet's Application * section under Link Mode. * AUTO_SLEEP: A '1' sets the ADXL34x to switch to Sleep Mode * when inactivity (acceleration has been below inactivity_threshold * for at least inactivity_time) is detected and the LINK bit is set. * A '0' disables automatic switching to Sleep Mode. See the * Sleep Bit section of the ADXL34x datasheet for more information. */ #define ADXL_LINK (1 << 5) #define ADXL_AUTO_SLEEP (1 << 4) u8 power_mode; /* * fifo_mode: * BYPASS The FIFO is bypassed * FIFO FIFO collects up to 32 values then stops collecting data * STREAM FIFO holds the last 32 data values. Once full, the FIFO's * oldest data is lost as it is replaced with newer data * * DEFAULT should be ADXL_FIFO_STREAM */ #define ADXL_FIFO_BYPASS 0 #define ADXL_FIFO_FIFO 1 #define ADXL_FIFO_STREAM 2 u8 fifo_mode; /* * watermark: * The Watermark feature can be used to reduce the interrupt load * of the system. The FIFO fills up to the value stored in watermark * [1..32] and then generates an interrupt. * A '0' disables the watermark feature. */ u8 watermark; /* * When acceleration measurements are received from the ADXL34x * events are sent to the event subsystem. The following settings * select the event type and event code for new x, y and z axis data * respectively. */ u32 ev_type; /* EV_ABS or EV_REL */ u32 ev_code_x; /* ABS_X,Y,Z or REL_X,Y,Z */ u32 ev_code_y; /* ABS_X,Y,Z or REL_X,Y,Z */ u32 ev_code_z; /* ABS_X,Y,Z or REL_X,Y,Z */ /* * A valid BTN or KEY Code; use tap_axis_control to disable * event reporting */ u32 ev_code_tap[3]; /* EV_KEY {X-Axis, Y-Axis, Z-Axis} */ /* * A valid BTN or KEY Code for Free-Fall or Activity enables * input event reporting. A '0' disables the Free-Fall or * Activity reporting. */ u32 ev_code_ff; /* EV_KEY */ u32 ev_code_act_inactivity; /* EV_KEY */ /* * Use ADXL34x INT2 pin instead of INT1 pin for interrupt output */ u8 use_int2; /* * ADXL346 only ORIENTATION SENSING feature * The orientation function of the ADXL346 reports both 2-D and * 3-D orientation concurrently. */ #define ADXL_EN_ORIENTATION_2D 1 #define ADXL_EN_ORIENTATION_3D 2 #define ADXL_EN_ORIENTATION_2D_3D 3 u8 orientation_enable; /* * The width of the deadzone region between two or more * orientation positions is determined by setting the Deadzone * value. The deadzone region size can be specified with a * resolution of 3.6deg. The deadzone angle represents the total * angle where the orientation is considered invalid. */ #define ADXL_DEADZONE_ANGLE_0p0 0 /* !!!0.0 [deg] */ #define ADXL_DEADZONE_ANGLE_3p6 1 /* 3.6 [deg] */ #define ADXL_DEADZONE_ANGLE_7p2 2 /* 7.2 [deg] */ #define ADXL_DEADZONE_ANGLE_10p8 3 /* 10.8 [deg] */ #define ADXL_DEADZONE_ANGLE_14p4 4 /* 14.4 [deg] */ #define ADXL_DEADZONE_ANGLE_18p0 5 /* 18.0 [deg] */ #define ADXL_DEADZONE_ANGLE_21p6 6 /* 21.6 [deg] */ #define ADXL_DEADZONE_ANGLE_25p2 7 /* 25.2 [deg] */ u8 deadzone_angle; /* * To eliminate most human motion such as walking or shaking, * a Divisor value should be selected to effectively limit the * orientation bandwidth. Set the depth of the filter used to * low-pass filter the measured acceleration for stable * orientation sensing */ #define ADXL_LP_FILTER_DIVISOR_2 0 #define ADXL_LP_FILTER_DIVISOR_4 1 #define ADXL_LP_FILTER_DIVISOR_8 2 #define ADXL_LP_FILTER_DIVISOR_16 3 #define ADXL_LP_FILTER_DIVISOR_32 4 #define ADXL_LP_FILTER_DIVISOR_64 5 #define ADXL_LP_FILTER_DIVISOR_128 6 #define ADXL_LP_FILTER_DIVISOR_256 7 u8 divisor_length; u32 ev_codes_orient_2d[4]; /* EV_KEY {+X, -X, +Y, -Y} */ u32 ev_codes_orient_3d[6]; /* EV_KEY {+Z, +Y, +X, -X, -Y, -Z} */ }; #endif input/touchscreen.h 0000644 00000001373 14722070374 0010410 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2014 Sebastian Reichel <sre@kernel.org> */ #ifndef _TOUCHSCREEN_H #define _TOUCHSCREEN_H struct input_dev; struct input_mt_pos; struct touchscreen_properties { unsigned int max_x; unsigned int max_y; bool invert_x; bool invert_y; bool swap_x_y; }; void touchscreen_parse_properties(struct input_dev *input, bool multitouch, struct touchscreen_properties *prop); void touchscreen_set_mt_pos(struct input_mt_pos *pos, const struct touchscreen_properties *prop, unsigned int x, unsigned int y); void touchscreen_report_pos(struct input_dev *input, const struct touchscreen_properties *prop, unsigned int x, unsigned int y, bool multitouch); #endif input/matrix_keypad.h 0000644 00000005722 14722070374 0010731 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _MATRIX_KEYPAD_H #define _MATRIX_KEYPAD_H #include <linux/types.h> #include <linux/input.h> #include <linux/of.h> #define MATRIX_MAX_ROWS 32 #define MATRIX_MAX_COLS 32 #define KEY(row, col, val) ((((row) & (MATRIX_MAX_ROWS - 1)) << 24) |\ (((col) & (MATRIX_MAX_COLS - 1)) << 16) |\ ((val) & 0xffff)) #define KEY_ROW(k) (((k) >> 24) & 0xff) #define KEY_COL(k) (((k) >> 16) & 0xff) #define KEY_VAL(k) ((k) & 0xffff) #define MATRIX_SCAN_CODE(row, col, row_shift) (((row) << (row_shift)) + (col)) /** * struct matrix_keymap_data - keymap for matrix keyboards * @keymap: pointer to array of uint32 values encoded with KEY() macro * representing keymap * @keymap_size: number of entries (initialized) in this keymap * * This structure is supposed to be used by platform code to supply * keymaps to drivers that implement matrix-like keypads/keyboards. */ struct matrix_keymap_data { const uint32_t *keymap; unsigned int keymap_size; }; /** * struct matrix_keypad_platform_data - platform-dependent keypad data * @keymap_data: pointer to &matrix_keymap_data * @row_gpios: pointer to array of gpio numbers representing rows * @col_gpios: pointer to array of gpio numbers reporesenting colums * @num_row_gpios: actual number of row gpios used by device * @num_col_gpios: actual number of col gpios used by device * @col_scan_delay_us: delay, measured in microseconds, that is * needed before we can keypad after activating column gpio * @debounce_ms: debounce interval in milliseconds * @clustered_irq: may be specified if interrupts of all row/column GPIOs * are bundled to one single irq * @clustered_irq_flags: flags that are needed for the clustered irq * @active_low: gpio polarity * @wakeup: controls whether the device should be set up as wakeup * source * @no_autorepeat: disable key autorepeat * @drive_inactive_cols: drive inactive columns during scan, rather than * making them inputs. * * This structure represents platform-specific data that use used by * matrix_keypad driver to perform proper initialization. */ struct matrix_keypad_platform_data { const struct matrix_keymap_data *keymap_data; const unsigned int *row_gpios; const unsigned int *col_gpios; unsigned int num_row_gpios; unsigned int num_col_gpios; unsigned int col_scan_delay_us; /* key debounce interval in milli-second */ unsigned int debounce_ms; unsigned int clustered_irq; unsigned int clustered_irq_flags; bool active_low; bool wakeup; bool no_autorepeat; bool drive_inactive_cols; }; int matrix_keypad_build_keymap(const struct matrix_keymap_data *keymap_data, const char *keymap_name, unsigned int rows, unsigned int cols, unsigned short *keymap, struct input_dev *input_dev); int matrix_keypad_parse_properties(struct device *dev, unsigned int *rows, unsigned int *cols); #define matrix_keypad_parse_of_params matrix_keypad_parse_properties #endif /* _MATRIX_KEYPAD_H */ input/sh_keysc.h 0000644 00000000657 14722070374 0007702 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __SH_KEYSC_H__ #define __SH_KEYSC_H__ #define SH_KEYSC_MAXKEYS 64 struct sh_keysc_info { enum { SH_KEYSC_MODE_1, SH_KEYSC_MODE_2, SH_KEYSC_MODE_3, SH_KEYSC_MODE_4, SH_KEYSC_MODE_5, SH_KEYSC_MODE_6 } mode; int scan_timing; /* 0 -> 7, see KYCR1, SCN[2:0] */ int delay; int kycr2_delay; int keycodes[SH_KEYSC_MAXKEYS]; /* KEYIN * KEYOUT */ }; #endif /* __SH_KEYSC_H__ */ input/gp2ap002a00f.h 0000644 00000001237 14722070374 0007767 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _GP2AP002A00F_H_ #define _GP2AP002A00F_H_ #include <linux/i2c.h> #define GP2A_I2C_NAME "gp2ap002a00f" /** * struct gp2a_platform_data - Sharp gp2ap002a00f proximity platform data * @vout_gpio: The gpio connected to the object detected pin (VOUT) * @wakeup: Set to true if the proximity can wake the device from suspend * @hw_setup: Callback for setting up hardware such as gpios and vregs * @hw_shutdown: Callback for properly shutting down hardware */ struct gp2a_platform_data { int vout_gpio; bool wakeup; int (*hw_setup)(struct i2c_client *client); int (*hw_shutdown)(struct i2c_client *client); }; #endif input/adp5589.h 0000644 00000013235 14722070374 0007165 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Analog Devices ADP5589/ADP5585 I/O Expander and QWERTY Keypad Controller * * Copyright 2010-2011 Analog Devices Inc. */ #ifndef _ADP5589_H #define _ADP5589_H /* * ADP5589 specific GPI and Keymap defines */ #define ADP5589_KEYMAPSIZE 88 #define ADP5589_GPI_PIN_ROW0 97 #define ADP5589_GPI_PIN_ROW1 98 #define ADP5589_GPI_PIN_ROW2 99 #define ADP5589_GPI_PIN_ROW3 100 #define ADP5589_GPI_PIN_ROW4 101 #define ADP5589_GPI_PIN_ROW5 102 #define ADP5589_GPI_PIN_ROW6 103 #define ADP5589_GPI_PIN_ROW7 104 #define ADP5589_GPI_PIN_COL0 105 #define ADP5589_GPI_PIN_COL1 106 #define ADP5589_GPI_PIN_COL2 107 #define ADP5589_GPI_PIN_COL3 108 #define ADP5589_GPI_PIN_COL4 109 #define ADP5589_GPI_PIN_COL5 110 #define ADP5589_GPI_PIN_COL6 111 #define ADP5589_GPI_PIN_COL7 112 #define ADP5589_GPI_PIN_COL8 113 #define ADP5589_GPI_PIN_COL9 114 #define ADP5589_GPI_PIN_COL10 115 #define GPI_LOGIC1 116 #define GPI_LOGIC2 117 #define ADP5589_GPI_PIN_ROW_BASE ADP5589_GPI_PIN_ROW0 #define ADP5589_GPI_PIN_ROW_END ADP5589_GPI_PIN_ROW7 #define ADP5589_GPI_PIN_COL_BASE ADP5589_GPI_PIN_COL0 #define ADP5589_GPI_PIN_COL_END ADP5589_GPI_PIN_COL10 #define ADP5589_GPI_PIN_BASE ADP5589_GPI_PIN_ROW_BASE #define ADP5589_GPI_PIN_END ADP5589_GPI_PIN_COL_END #define ADP5589_GPIMAPSIZE_MAX (ADP5589_GPI_PIN_END - ADP5589_GPI_PIN_BASE + 1) /* * ADP5585 specific GPI and Keymap defines */ #define ADP5585_KEYMAPSIZE 30 #define ADP5585_GPI_PIN_ROW0 37 #define ADP5585_GPI_PIN_ROW1 38 #define ADP5585_GPI_PIN_ROW2 39 #define ADP5585_GPI_PIN_ROW3 40 #define ADP5585_GPI_PIN_ROW4 41 #define ADP5585_GPI_PIN_ROW5 42 #define ADP5585_GPI_PIN_COL0 43 #define ADP5585_GPI_PIN_COL1 44 #define ADP5585_GPI_PIN_COL2 45 #define ADP5585_GPI_PIN_COL3 46 #define ADP5585_GPI_PIN_COL4 47 #define GPI_LOGIC 48 #define ADP5585_GPI_PIN_ROW_BASE ADP5585_GPI_PIN_ROW0 #define ADP5585_GPI_PIN_ROW_END ADP5585_GPI_PIN_ROW5 #define ADP5585_GPI_PIN_COL_BASE ADP5585_GPI_PIN_COL0 #define ADP5585_GPI_PIN_COL_END ADP5585_GPI_PIN_COL4 #define ADP5585_GPI_PIN_BASE ADP5585_GPI_PIN_ROW_BASE #define ADP5585_GPI_PIN_END ADP5585_GPI_PIN_COL_END #define ADP5585_GPIMAPSIZE_MAX (ADP5585_GPI_PIN_END - ADP5585_GPI_PIN_BASE + 1) struct adp5589_gpi_map { unsigned short pin; unsigned short sw_evt; }; /* scan_cycle_time */ #define ADP5589_SCAN_CYCLE_10ms 0 #define ADP5589_SCAN_CYCLE_20ms 1 #define ADP5589_SCAN_CYCLE_30ms 2 #define ADP5589_SCAN_CYCLE_40ms 3 /* RESET_CFG */ #define RESET_PULSE_WIDTH_500us 0 #define RESET_PULSE_WIDTH_1ms 1 #define RESET_PULSE_WIDTH_2ms 2 #define RESET_PULSE_WIDTH_10ms 3 #define RESET_TRIG_TIME_0ms (0 << 2) #define RESET_TRIG_TIME_1000ms (1 << 2) #define RESET_TRIG_TIME_1500ms (2 << 2) #define RESET_TRIG_TIME_2000ms (3 << 2) #define RESET_TRIG_TIME_2500ms (4 << 2) #define RESET_TRIG_TIME_3000ms (5 << 2) #define RESET_TRIG_TIME_3500ms (6 << 2) #define RESET_TRIG_TIME_4000ms (7 << 2) #define RESET_PASSTHRU_EN (1 << 5) #define RESET1_POL_HIGH (1 << 6) #define RESET1_POL_LOW (0 << 6) #define RESET2_POL_HIGH (1 << 7) #define RESET2_POL_LOW (0 << 7) /* ADP5589 Mask Bits: * C C C C C C C C C C C | R R R R R R R R * 1 9 8 7 6 5 4 3 2 1 0 | 7 6 5 4 3 2 1 0 * 0 * ---------------- BIT ------------------ * 1 1 1 1 1 1 1 1 1 0 0 | 0 0 0 0 0 0 0 0 * 8 7 6 5 4 3 2 1 0 9 8 | 7 6 5 4 3 2 1 0 */ #define ADP_ROW(x) (1 << (x)) #define ADP_COL(x) (1 << (x + 8)) #define ADP5589_ROW_MASK 0xFF #define ADP5589_COL_MASK 0xFF #define ADP5589_COL_SHIFT 8 #define ADP5589_MAX_ROW_NUM 7 #define ADP5589_MAX_COL_NUM 10 /* ADP5585 Mask Bits: * C C C C C | R R R R R R * 4 3 2 1 0 | 5 4 3 2 1 0 * * ---- BIT -- ----------- * 1 0 0 0 0 | 0 0 0 0 0 0 * 0 9 8 7 6 | 5 4 3 2 1 0 */ #define ADP5585_ROW_MASK 0x3F #define ADP5585_COL_MASK 0x1F #define ADP5585_ROW_SHIFT 0 #define ADP5585_COL_SHIFT 6 #define ADP5585_MAX_ROW_NUM 5 #define ADP5585_MAX_COL_NUM 4 #define ADP5585_ROW(x) (1 << ((x) & ADP5585_ROW_MASK)) #define ADP5585_COL(x) (1 << (((x) & ADP5585_COL_MASK) + ADP5585_COL_SHIFT)) /* Put one of these structures in i2c_board_info platform_data */ struct adp5589_kpad_platform_data { unsigned keypad_en_mask; /* Keypad (Rows/Columns) enable mask */ const unsigned short *keymap; /* Pointer to keymap */ unsigned short keymapsize; /* Keymap size */ bool repeat; /* Enable key repeat */ bool en_keylock; /* Enable key lock feature (ADP5589 only)*/ unsigned char unlock_key1; /* Unlock Key 1 (ADP5589 only) */ unsigned char unlock_key2; /* Unlock Key 2 (ADP5589 only) */ unsigned char unlock_timer; /* Time in seconds [0..7] between the two unlock keys 0=disable (ADP5589 only) */ unsigned char scan_cycle_time; /* Time between consecutive scan cycles */ unsigned char reset_cfg; /* Reset config */ unsigned short reset1_key_1; /* Reset Key 1 */ unsigned short reset1_key_2; /* Reset Key 2 */ unsigned short reset1_key_3; /* Reset Key 3 */ unsigned short reset2_key_1; /* Reset Key 1 */ unsigned short reset2_key_2; /* Reset Key 2 */ unsigned debounce_dis_mask; /* Disable debounce mask */ unsigned pull_dis_mask; /* Disable all pull resistors mask */ unsigned pullup_en_100k; /* Pull-Up 100k Enable Mask */ unsigned pullup_en_300k; /* Pull-Up 300k Enable Mask */ unsigned pulldown_en_300k; /* Pull-Down 300k Enable Mask */ const struct adp5589_gpi_map *gpimap; unsigned short gpimapsize; const struct adp5589_gpio_platform_data *gpio_data; }; struct i2c_client; /* forward declaration */ struct adp5589_gpio_platform_data { int gpio_start; /* GPIO Chip base # */ int (*setup)(struct i2c_client *client, int gpio, unsigned ngpio, void *context); int (*teardown)(struct i2c_client *client, int gpio, unsigned ngpio, void *context); void *context; }; #endif input/lm8333.h 0000644 00000001242 14722070374 0007012 0 ustar 00 /* * public include for LM8333 keypad driver - same license as driver * Copyright (C) 2012 Wolfram Sang, Pengutronix <w.sang@pengutronix.de> */ #ifndef _LM8333_H #define _LM8333_H struct lm8333; struct lm8333_platform_data { /* Keymap data */ const struct matrix_keymap_data *matrix_data; /* Active timeout before enter HALT mode in microseconds */ unsigned active_time; /* Debounce interval in microseconds */ unsigned debounce_time; }; extern int lm8333_read8(struct lm8333 *lm8333, u8 cmd); extern int lm8333_write8(struct lm8333 *lm8333, u8 cmd, u8 val); extern int lm8333_read_block(struct lm8333 *lm8333, u8 cmd, u8 len, u8 *buf); #endif /* _LM8333_H */ input/auo-pixcir-ts.h 0000644 00000001746 14722070374 0010576 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Driver for AUO in-cell touchscreens * * Copyright (c) 2011 Heiko Stuebner <heiko@sntech.de> * * based on auo_touch.h from Dell Streak kernel * * Copyright (c) 2008 QUALCOMM Incorporated. * Copyright (c) 2008 QUALCOMM USA, INC. */ #ifndef __AUO_PIXCIR_TS_H__ #define __AUO_PIXCIR_TS_H__ /* * Interrupt modes: * periodical: interrupt is asserted periodicaly * compare coordinates: interrupt is asserted when coordinates change * indicate touch: interrupt is asserted during touch */ #define AUO_PIXCIR_INT_PERIODICAL 0x00 #define AUO_PIXCIR_INT_COMP_COORD 0x01 #define AUO_PIXCIR_INT_TOUCH_IND 0x02 /* * @gpio_int interrupt gpio * @int_setting one of AUO_PIXCIR_INT_* * @init_hw hardwarespecific init * @exit_hw hardwarespecific shutdown * @x_max x-resolution * @y_max y-resolution */ struct auo_pixcir_ts_platdata { int gpio_int; int gpio_rst; int int_setting; unsigned int x_max; unsigned int y_max; }; #endif input/as5011.h 0000644 00000000617 14722070374 0007000 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ #ifndef _AS5011_H #define _AS5011_H /* * Copyright (c) 2010, 2011 Fabien Marteau <fabien.marteau@armadeus.com> */ struct as5011_platform_data { unsigned int button_gpio; unsigned int axis_irq; /* irq number */ unsigned long axis_irqflags; char xp, xn; /* threshold for x axis */ char yp, yn; /* threshold for y axis */ }; #endif /* _AS5011_H */ input/sparse-keymap.h 0000644 00000003660 14722070374 0010650 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ #ifndef _SPARSE_KEYMAP_H #define _SPARSE_KEYMAP_H /* * Copyright (c) 2009 Dmitry Torokhov */ #define KE_END 0 /* Indicates end of keymap */ #define KE_KEY 1 /* Ordinary key/button */ #define KE_SW 2 /* Switch (predetermined value) */ #define KE_VSW 3 /* Switch (value supplied at runtime) */ #define KE_IGNORE 4 /* Known entry that should be ignored */ #define KE_LAST KE_IGNORE /** * struct key_entry - keymap entry for use in sparse keymap * @type: Type of the key entry (KE_KEY, KE_SW, KE_VSW, KE_END); * drivers are allowed to extend the list with their own * private definitions. * @code: Device-specific data identifying the button/switch * @keycode: KEY_* code assigned to a key/button * @sw.code: SW_* code assigned to a switch * @sw.value: Value that should be sent in an input even when KE_SW * switch is toggled. KE_VSW switches ignore this field and * expect driver to supply value for the event. * * This structure defines an entry in a sparse keymap used by some * input devices for which traditional table-based approach is not * suitable. */ struct key_entry { int type; /* See KE_* above */ u32 code; union { u16 keycode; /* For KE_KEY */ struct { /* For KE_SW, KE_VSW */ u8 code; u8 value; /* For KE_SW, ignored by KE_VSW */ } sw; }; }; struct key_entry *sparse_keymap_entry_from_scancode(struct input_dev *dev, unsigned int code); struct key_entry *sparse_keymap_entry_from_keycode(struct input_dev *dev, unsigned int code); int sparse_keymap_setup(struct input_dev *dev, const struct key_entry *keymap, int (*setup)(struct input_dev *, struct key_entry *)); void sparse_keymap_report_entry(struct input_dev *dev, const struct key_entry *ke, unsigned int value, bool autorelease); bool sparse_keymap_report_event(struct input_dev *dev, unsigned int code, unsigned int value, bool autorelease); #endif /* _SPARSE_KEYMAP_H */ input/cy8ctmg110_pdata.h 0000644 00000000406 14722070374 0011033 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_CY8CTMG110_PDATA_H #define _LINUX_CY8CTMG110_PDATA_H struct cy8ctmg110_pdata { int reset_pin; /* Reset pin is wired to this GPIO (optional) */ int irq_pin; /* IRQ pin is wired to this GPIO */ }; #endif input/tps6507x-ts.h 0000644 00000001027 14722070374 0010026 0 ustar 00 /* linux/i2c/tps6507x-ts.h * * Functions to access TPS65070 touch screen chip. * * Copyright (c) 2009 RidgeRun (todd.fischer@ridgerun.com) * * * For licencing details see kernel-base/COPYING */ #ifndef __LINUX_I2C_TPS6507X_TS_H #define __LINUX_I2C_TPS6507X_TS_H /* Board specific touch screen initial values */ struct touchscreen_init_data { int poll_period; /* ms */ __u16 min_pressure; /* min reading to be treated as a touch */ __u16 vendor; __u16 product; __u16 version; }; #endif /* __LINUX_I2C_TPS6507X_TS_H */ input/navpoint.h 0000644 00000000364 14722070374 0007723 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2012 Paul Parsons <lost.distance@yahoo.com> */ struct navpoint_platform_data { int port; /* PXA SSP port for pxa_ssp_request() */ int gpio; /* GPIO for power on/off */ }; input/cma3000.h 0000644 00000002037 14722070374 0007127 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * VTI CMA3000_Dxx Accelerometer driver * * Copyright (C) 2010 Texas Instruments * Author: Hemanth V <hemanthv@ti.com> */ #ifndef _LINUX_CMA3000_H #define _LINUX_CMA3000_H #define CMAMODE_DEFAULT 0 #define CMAMODE_MEAS100 1 #define CMAMODE_MEAS400 2 #define CMAMODE_MEAS40 3 #define CMAMODE_MOTDET 4 #define CMAMODE_FF100 5 #define CMAMODE_FF400 6 #define CMAMODE_POFF 7 #define CMARANGE_2G 2000 #define CMARANGE_8G 8000 /** * struct cma3000_i2c_platform_data - CMA3000 Platform data * @fuzz_x: Noise on X Axis * @fuzz_y: Noise on Y Axis * @fuzz_z: Noise on Z Axis * @g_range: G range in milli g i.e 2000 or 8000 * @mode: Operating mode * @mdthr: Motion detect threshold value * @mdfftmr: Motion detect and free fall time value * @ffthr: Free fall threshold value */ struct cma3000_platform_data { int fuzz_x; int fuzz_y; int fuzz_z; int g_range; uint8_t mode; uint8_t mdthr; uint8_t mdfftmr; uint8_t ffthr; unsigned long irqflags; }; #endif input/samsung-keypad.h 0000644 00000002132 14722070374 0011010 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Samsung Keypad platform data definitions * * Copyright (C) 2010 Samsung Electronics Co.Ltd * Author: Joonyoung Shim <jy0922.shim@samsung.com> */ #ifndef __SAMSUNG_KEYPAD_H #define __SAMSUNG_KEYPAD_H #include <linux/input/matrix_keypad.h> #define SAMSUNG_MAX_ROWS 8 #define SAMSUNG_MAX_COLS 8 /** * struct samsung_keypad_platdata - Platform device data for Samsung Keypad. * @keymap_data: pointer to &matrix_keymap_data. * @rows: number of keypad row supported. * @cols: number of keypad col supported. * @no_autorepeat: disable key autorepeat. * @wakeup: controls whether the device should be set up as wakeup source. * @cfg_gpio: configure the GPIO. * * Initialisation data specific to either the machine or the platform * for the device driver to use or call-back when configuring gpio. */ struct samsung_keypad_platdata { const struct matrix_keymap_data *keymap_data; unsigned int rows; unsigned int cols; bool no_autorepeat; bool wakeup; void (*cfg_gpio)(unsigned int rows, unsigned int cols); }; #endif /* __SAMSUNG_KEYPAD_H */ input/cyttsp.h 0000644 00000001677 14722070374 0007423 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Header file for: * Cypress TrueTouch(TM) Standard Product (TTSP) touchscreen drivers. * For use with Cypress Txx3xx parts. * Supported parts include: * CY8CTST341 * CY8CTMA340 * * Copyright (C) 2009, 2010, 2011 Cypress Semiconductor, Inc. * Copyright (C) 2012 Javier Martinez Canillas <javier@dowhile0.org> * * Contact Cypress Semiconductor at www.cypress.com (kev@cypress.com) */ #ifndef _CYTTSP_H_ #define _CYTTSP_H_ #define CY_SPI_NAME "cyttsp-spi" #define CY_I2C_NAME "cyttsp-i2c" /* Active Power state scanning/processing refresh interval */ #define CY_ACT_INTRVL_DFLT 0x00 /* ms */ /* touch timeout for the Active power */ #define CY_TCH_TMOUT_DFLT 0xFF /* ms */ /* Low Power state scanning/processing refresh interval */ #define CY_LP_INTRVL_DFLT 0x0A /* ms */ /* Active distance in pixels for a gesture to be reported */ #define CY_ACT_DIST_DFLT 0xF8 /* pixels */ #endif /* _CYTTSP_H_ */ input/mt.h 0000644 00000006406 14722070374 0006510 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ #ifndef _INPUT_MT_H #define _INPUT_MT_H /* * Input Multitouch Library * * Copyright (c) 2010 Henrik Rydberg */ #include <linux/input.h> #define TRKID_MAX 0xffff #define INPUT_MT_POINTER 0x0001 /* pointer device, e.g. trackpad */ #define INPUT_MT_DIRECT 0x0002 /* direct device, e.g. touchscreen */ #define INPUT_MT_DROP_UNUSED 0x0004 /* drop contacts not seen in frame */ #define INPUT_MT_TRACK 0x0008 /* use in-kernel tracking */ #define INPUT_MT_SEMI_MT 0x0010 /* semi-mt device, finger count handled manually */ /** * struct input_mt_slot - represents the state of an input MT slot * @abs: holds current values of ABS_MT axes for this slot * @frame: last frame at which input_mt_report_slot_state() was called * @key: optional driver designation of this slot */ struct input_mt_slot { int abs[ABS_MT_LAST - ABS_MT_FIRST + 1]; unsigned int frame; unsigned int key; }; /** * struct input_mt - state of tracked contacts * @trkid: stores MT tracking ID for the next contact * @num_slots: number of MT slots the device uses * @slot: MT slot currently being transmitted * @flags: input_mt operation flags * @frame: increases every time input_mt_sync_frame() is called * @red: reduced cost matrix for in-kernel tracking * @slots: array of slots holding current values of tracked contacts */ struct input_mt { int trkid; int num_slots; int slot; unsigned int flags; unsigned int frame; int *red; struct input_mt_slot slots[]; }; static inline void input_mt_set_value(struct input_mt_slot *slot, unsigned code, int value) { slot->abs[code - ABS_MT_FIRST] = value; } static inline int input_mt_get_value(const struct input_mt_slot *slot, unsigned code) { return slot->abs[code - ABS_MT_FIRST]; } static inline bool input_mt_is_active(const struct input_mt_slot *slot) { return input_mt_get_value(slot, ABS_MT_TRACKING_ID) >= 0; } static inline bool input_mt_is_used(const struct input_mt *mt, const struct input_mt_slot *slot) { return slot->frame == mt->frame; } int input_mt_init_slots(struct input_dev *dev, unsigned int num_slots, unsigned int flags); void input_mt_destroy_slots(struct input_dev *dev); static inline int input_mt_new_trkid(struct input_mt *mt) { return mt->trkid++ & TRKID_MAX; } static inline void input_mt_slot(struct input_dev *dev, int slot) { input_event(dev, EV_ABS, ABS_MT_SLOT, slot); } static inline bool input_is_mt_value(int axis) { return axis >= ABS_MT_FIRST && axis <= ABS_MT_LAST; } static inline bool input_is_mt_axis(int axis) { return axis == ABS_MT_SLOT || input_is_mt_value(axis); } bool input_mt_report_slot_state(struct input_dev *dev, unsigned int tool_type, bool active); void input_mt_report_finger_count(struct input_dev *dev, int count); void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count); void input_mt_drop_unused(struct input_dev *dev); void input_mt_sync_frame(struct input_dev *dev); /** * struct input_mt_pos - contact position * @x: horizontal coordinate * @y: vertical coordinate */ struct input_mt_pos { s16 x, y; }; int input_mt_assign_slots(struct input_dev *dev, int *slots, const struct input_mt_pos *pos, int num_pos, int dmax); int input_mt_get_slot_by_key(struct input_dev *dev, int key); #endif input/kxtj9.h 0000644 00000002220 14722070374 0007127 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2011 Kionix, Inc. * Written by Chris Hudson <chudson@kionix.com> */ #ifndef __KXTJ9_H__ #define __KXTJ9_H__ #define KXTJ9_I2C_ADDR 0x0F struct kxtj9_platform_data { unsigned int min_interval; /* minimum poll interval (in milli-seconds) */ unsigned int init_interval; /* initial poll interval (in milli-seconds) */ /* * By default, x is axis 0, y is axis 1, z is axis 2; these can be * changed to account for sensor orientation within the host device. */ u8 axis_map_x; u8 axis_map_y; u8 axis_map_z; /* * Each axis can be negated to account for sensor orientation within * the host device. */ bool negate_x; bool negate_y; bool negate_z; /* CTRL_REG1: set resolution, g-range, data ready enable */ /* Output resolution: 8-bit valid or 12-bit valid */ #define RES_8BIT 0 #define RES_12BIT (1 << 6) u8 res_12bit; /* Output g-range: +/-2g, 4g, or 8g */ #define KXTJ9_G_2G 0 #define KXTJ9_G_4G (1 << 3) #define KXTJ9_G_8G (1 << 4) u8 g_range; int (*init)(void); void (*exit)(void); int (*power_on)(void); int (*power_off)(void); }; #endif /* __KXTJ9_H__ */ timb_dma.h 0000644 00000002251 14722070374 0006477 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * timb_dma.h timberdale FPGA DMA driver defines * Copyright (c) 2010 Intel Corporation */ /* Supports: * Timberdale FPGA DMA engine */ #ifndef _LINUX_TIMB_DMA_H #define _LINUX_TIMB_DMA_H /** * struct timb_dma_platform_data_channel - Description of each individual * DMA channel for the timberdale DMA driver * @rx: true if this channel handles data in the direction to * the CPU. * @bytes_per_line: Number of bytes per line, this is specific for channels * handling video data. For other channels this shall be left to 0. * @descriptors: Number of descriptors to allocate for this channel. * @descriptor_elements: Number of elements in each descriptor. * */ struct timb_dma_platform_data_channel { bool rx; unsigned int bytes_per_line; unsigned int descriptors; unsigned int descriptor_elements; }; /** * struct timb_dma_platform_data - Platform data of the timberdale DMA driver * @nr_channels: Number of defined channels in the channels array. * @channels: Definition of the each channel. * */ struct timb_dma_platform_data { unsigned nr_channels; struct timb_dma_platform_data_channel channels[32]; }; #endif xz.h 0000644 00000026247 14722070374 0005377 0 ustar 00 /* * XZ decompressor * * Authors: Lasse Collin <lasse.collin@tukaani.org> * Igor Pavlov <http://7-zip.org/> * * This file has been put into the public domain. * You can do whatever you want with this file. */ #ifndef XZ_H #define XZ_H #ifdef __KERNEL__ # include <linux/stddef.h> # include <linux/types.h> #else # include <stddef.h> # include <stdint.h> #endif /* In Linux, this is used to make extern functions static when needed. */ #ifndef XZ_EXTERN # define XZ_EXTERN extern #endif /** * enum xz_mode - Operation mode * * @XZ_SINGLE: Single-call mode. This uses less RAM than * than multi-call modes, because the LZMA2 * dictionary doesn't need to be allocated as * part of the decoder state. All required data * structures are allocated at initialization, * so xz_dec_run() cannot return XZ_MEM_ERROR. * @XZ_PREALLOC: Multi-call mode with preallocated LZMA2 * dictionary buffer. All data structures are * allocated at initialization, so xz_dec_run() * cannot return XZ_MEM_ERROR. * @XZ_DYNALLOC: Multi-call mode. The LZMA2 dictionary is * allocated once the required size has been * parsed from the stream headers. If the * allocation fails, xz_dec_run() will return * XZ_MEM_ERROR. * * It is possible to enable support only for a subset of the above * modes at compile time by defining XZ_DEC_SINGLE, XZ_DEC_PREALLOC, * or XZ_DEC_DYNALLOC. The xz_dec kernel module is always compiled * with support for all operation modes, but the preboot code may * be built with fewer features to minimize code size. */ enum xz_mode { XZ_SINGLE, XZ_PREALLOC, XZ_DYNALLOC }; /** * enum xz_ret - Return codes * @XZ_OK: Everything is OK so far. More input or more * output space is required to continue. This * return code is possible only in multi-call mode * (XZ_PREALLOC or XZ_DYNALLOC). * @XZ_STREAM_END: Operation finished successfully. * @XZ_UNSUPPORTED_CHECK: Integrity check type is not supported. Decoding * is still possible in multi-call mode by simply * calling xz_dec_run() again. * Note that this return value is used only if * XZ_DEC_ANY_CHECK was defined at build time, * which is not used in the kernel. Unsupported * check types return XZ_OPTIONS_ERROR if * XZ_DEC_ANY_CHECK was not defined at build time. * @XZ_MEM_ERROR: Allocating memory failed. This return code is * possible only if the decoder was initialized * with XZ_DYNALLOC. The amount of memory that was * tried to be allocated was no more than the * dict_max argument given to xz_dec_init(). * @XZ_MEMLIMIT_ERROR: A bigger LZMA2 dictionary would be needed than * allowed by the dict_max argument given to * xz_dec_init(). This return value is possible * only in multi-call mode (XZ_PREALLOC or * XZ_DYNALLOC); the single-call mode (XZ_SINGLE) * ignores the dict_max argument. * @XZ_FORMAT_ERROR: File format was not recognized (wrong magic * bytes). * @XZ_OPTIONS_ERROR: This implementation doesn't support the requested * compression options. In the decoder this means * that the header CRC32 matches, but the header * itself specifies something that we don't support. * @XZ_DATA_ERROR: Compressed data is corrupt. * @XZ_BUF_ERROR: Cannot make any progress. Details are slightly * different between multi-call and single-call * mode; more information below. * * In multi-call mode, XZ_BUF_ERROR is returned when two consecutive calls * to XZ code cannot consume any input and cannot produce any new output. * This happens when there is no new input available, or the output buffer * is full while at least one output byte is still pending. Assuming your * code is not buggy, you can get this error only when decoding a compressed * stream that is truncated or otherwise corrupt. * * In single-call mode, XZ_BUF_ERROR is returned only when the output buffer * is too small or the compressed input is corrupt in a way that makes the * decoder produce more output than the caller expected. When it is * (relatively) clear that the compressed input is truncated, XZ_DATA_ERROR * is used instead of XZ_BUF_ERROR. */ enum xz_ret { XZ_OK, XZ_STREAM_END, XZ_UNSUPPORTED_CHECK, XZ_MEM_ERROR, XZ_MEMLIMIT_ERROR, XZ_FORMAT_ERROR, XZ_OPTIONS_ERROR, XZ_DATA_ERROR, XZ_BUF_ERROR }; /** * struct xz_buf - Passing input and output buffers to XZ code * @in: Beginning of the input buffer. This may be NULL if and only * if in_pos is equal to in_size. * @in_pos: Current position in the input buffer. This must not exceed * in_size. * @in_size: Size of the input buffer * @out: Beginning of the output buffer. This may be NULL if and only * if out_pos is equal to out_size. * @out_pos: Current position in the output buffer. This must not exceed * out_size. * @out_size: Size of the output buffer * * Only the contents of the output buffer from out[out_pos] onward, and * the variables in_pos and out_pos are modified by the XZ code. */ struct xz_buf { const uint8_t *in; size_t in_pos; size_t in_size; uint8_t *out; size_t out_pos; size_t out_size; }; /** * struct xz_dec - Opaque type to hold the XZ decoder state */ struct xz_dec; /** * xz_dec_init() - Allocate and initialize a XZ decoder state * @mode: Operation mode * @dict_max: Maximum size of the LZMA2 dictionary (history buffer) for * multi-call decoding. This is ignored in single-call mode * (mode == XZ_SINGLE). LZMA2 dictionary is always 2^n bytes * or 2^n + 2^(n-1) bytes (the latter sizes are less common * in practice), so other values for dict_max don't make sense. * In the kernel, dictionary sizes of 64 KiB, 128 KiB, 256 KiB, * 512 KiB, and 1 MiB are probably the only reasonable values, * except for kernel and initramfs images where a bigger * dictionary can be fine and useful. * * Single-call mode (XZ_SINGLE): xz_dec_run() decodes the whole stream at * once. The caller must provide enough output space or the decoding will * fail. The output space is used as the dictionary buffer, which is why * there is no need to allocate the dictionary as part of the decoder's * internal state. * * Because the output buffer is used as the workspace, streams encoded using * a big dictionary are not a problem in single-call mode. It is enough that * the output buffer is big enough to hold the actual uncompressed data; it * can be smaller than the dictionary size stored in the stream headers. * * Multi-call mode with preallocated dictionary (XZ_PREALLOC): dict_max bytes * of memory is preallocated for the LZMA2 dictionary. This way there is no * risk that xz_dec_run() could run out of memory, since xz_dec_run() will * never allocate any memory. Instead, if the preallocated dictionary is too * small for decoding the given input stream, xz_dec_run() will return * XZ_MEMLIMIT_ERROR. Thus, it is important to know what kind of data will be * decoded to avoid allocating excessive amount of memory for the dictionary. * * Multi-call mode with dynamically allocated dictionary (XZ_DYNALLOC): * dict_max specifies the maximum allowed dictionary size that xz_dec_run() * may allocate once it has parsed the dictionary size from the stream * headers. This way excessive allocations can be avoided while still * limiting the maximum memory usage to a sane value to prevent running the * system out of memory when decompressing streams from untrusted sources. * * On success, xz_dec_init() returns a pointer to struct xz_dec, which is * ready to be used with xz_dec_run(). If memory allocation fails, * xz_dec_init() returns NULL. */ XZ_EXTERN struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max); /** * xz_dec_run() - Run the XZ decoder * @s: Decoder state allocated using xz_dec_init() * @b: Input and output buffers * * The possible return values depend on build options and operation mode. * See enum xz_ret for details. * * Note that if an error occurs in single-call mode (return value is not * XZ_STREAM_END), b->in_pos and b->out_pos are not modified and the * contents of the output buffer from b->out[b->out_pos] onward are * undefined. This is true even after XZ_BUF_ERROR, because with some filter * chains, there may be a second pass over the output buffer, and this pass * cannot be properly done if the output buffer is truncated. Thus, you * cannot give the single-call decoder a too small buffer and then expect to * get that amount valid data from the beginning of the stream. You must use * the multi-call decoder if you don't want to uncompress the whole stream. */ XZ_EXTERN enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b); /** * xz_dec_reset() - Reset an already allocated decoder state * @s: Decoder state allocated using xz_dec_init() * * This function can be used to reset the multi-call decoder state without * freeing and reallocating memory with xz_dec_end() and xz_dec_init(). * * In single-call mode, xz_dec_reset() is always called in the beginning of * xz_dec_run(). Thus, explicit call to xz_dec_reset() is useful only in * multi-call mode. */ XZ_EXTERN void xz_dec_reset(struct xz_dec *s); /** * xz_dec_end() - Free the memory allocated for the decoder state * @s: Decoder state allocated using xz_dec_init(). If s is NULL, * this function does nothing. */ XZ_EXTERN void xz_dec_end(struct xz_dec *s); /* * Standalone build (userspace build or in-kernel build for boot time use) * needs a CRC32 implementation. For normal in-kernel use, kernel's own * CRC32 module is used instead, and users of this module don't need to * care about the functions below. */ #ifndef XZ_INTERNAL_CRC32 # ifdef __KERNEL__ # define XZ_INTERNAL_CRC32 0 # else # define XZ_INTERNAL_CRC32 1 # endif #endif #if XZ_INTERNAL_CRC32 /* * This must be called before any other xz_* function to initialize * the CRC32 lookup table. */ XZ_EXTERN void xz_crc32_init(void); /* * Update CRC32 value using the polynomial from IEEE-802.3. To start a new * calculation, the third argument must be zero. To continue the calculation, * the previously returned value is passed as the third argument. */ XZ_EXTERN uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc); #endif #endif iscsi_boot_sysfs.h 0000644 00000007324 14722070374 0010315 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Export the iSCSI boot info to userland via sysfs. * * Copyright (C) 2010 Red Hat, Inc. All rights reserved. * Copyright (C) 2010 Mike Christie */ #ifndef _ISCSI_BOOT_SYSFS_ #define _ISCSI_BOOT_SYSFS_ /* * The text attributes names for each of the kobjects. */ enum iscsi_boot_eth_properties_enum { ISCSI_BOOT_ETH_INDEX, ISCSI_BOOT_ETH_FLAGS, ISCSI_BOOT_ETH_IP_ADDR, ISCSI_BOOT_ETH_PREFIX_LEN, ISCSI_BOOT_ETH_SUBNET_MASK, ISCSI_BOOT_ETH_ORIGIN, ISCSI_BOOT_ETH_GATEWAY, ISCSI_BOOT_ETH_PRIMARY_DNS, ISCSI_BOOT_ETH_SECONDARY_DNS, ISCSI_BOOT_ETH_DHCP, ISCSI_BOOT_ETH_VLAN, ISCSI_BOOT_ETH_MAC, /* eth_pci_bdf - this is replaced by link to the device itself. */ ISCSI_BOOT_ETH_HOSTNAME, ISCSI_BOOT_ETH_END_MARKER, }; enum iscsi_boot_tgt_properties_enum { ISCSI_BOOT_TGT_INDEX, ISCSI_BOOT_TGT_FLAGS, ISCSI_BOOT_TGT_IP_ADDR, ISCSI_BOOT_TGT_PORT, ISCSI_BOOT_TGT_LUN, ISCSI_BOOT_TGT_CHAP_TYPE, ISCSI_BOOT_TGT_NIC_ASSOC, ISCSI_BOOT_TGT_NAME, ISCSI_BOOT_TGT_CHAP_NAME, ISCSI_BOOT_TGT_CHAP_SECRET, ISCSI_BOOT_TGT_REV_CHAP_NAME, ISCSI_BOOT_TGT_REV_CHAP_SECRET, ISCSI_BOOT_TGT_END_MARKER, }; enum iscsi_boot_initiator_properties_enum { ISCSI_BOOT_INI_INDEX, ISCSI_BOOT_INI_FLAGS, ISCSI_BOOT_INI_ISNS_SERVER, ISCSI_BOOT_INI_SLP_SERVER, ISCSI_BOOT_INI_PRI_RADIUS_SERVER, ISCSI_BOOT_INI_SEC_RADIUS_SERVER, ISCSI_BOOT_INI_INITIATOR_NAME, ISCSI_BOOT_INI_END_MARKER, }; enum iscsi_boot_acpitbl_properties_enum { ISCSI_BOOT_ACPITBL_SIGNATURE, ISCSI_BOOT_ACPITBL_OEM_ID, ISCSI_BOOT_ACPITBL_OEM_TABLE_ID, }; struct attribute_group; struct iscsi_boot_kobj { struct kobject kobj; struct attribute_group *attr_group; struct list_head list; /* * Pointer to store driver specific info. If set this will * be freed for the LLD when the kobj release function is called. */ void *data; /* * Driver specific show function. * * The enum of the type. This can be any value of the above * properties. */ ssize_t (*show) (void *data, int type, char *buf); /* * Drivers specific visibility function. * The function should return if they the attr should be readable * writable or should not be shown. * * The enum of the type. This can be any value of the above * properties. */ umode_t (*is_visible) (void *data, int type); /* * Driver specific release function. * * The function should free the data passed in. */ void (*release) (void *data); }; struct iscsi_boot_kset { struct list_head kobj_list; struct kset *kset; }; struct iscsi_boot_kobj * iscsi_boot_create_initiator(struct iscsi_boot_kset *boot_kset, int index, void *data, ssize_t (*show) (void *data, int type, char *buf), umode_t (*is_visible) (void *data, int type), void (*release) (void *data)); struct iscsi_boot_kobj * iscsi_boot_create_ethernet(struct iscsi_boot_kset *boot_kset, int index, void *data, ssize_t (*show) (void *data, int type, char *buf), umode_t (*is_visible) (void *data, int type), void (*release) (void *data)); struct iscsi_boot_kobj * iscsi_boot_create_target(struct iscsi_boot_kset *boot_kset, int index, void *data, ssize_t (*show) (void *data, int type, char *buf), umode_t (*is_visible) (void *data, int type), void (*release) (void *data)); struct iscsi_boot_kobj * iscsi_boot_create_acpitbl(struct iscsi_boot_kset *boot_kset, int index, void *data, ssize_t (*show)(void *data, int type, char *buf), umode_t (*is_visible)(void *data, int type), void (*release)(void *data)); struct iscsi_boot_kset *iscsi_boot_create_kset(const char *set_name); struct iscsi_boot_kset *iscsi_boot_create_host_kset(unsigned int hostno); void iscsi_boot_destroy_kset(struct iscsi_boot_kset *boot_kset); #endif lantiq.h 0000644 00000000555 14722070374 0006220 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_LANTIQ_H #define __LINUX_LANTIQ_H #ifdef CONFIG_LANTIQ #include <lantiq_soc.h> #else #ifndef LTQ_EARLY_ASC #define LTQ_EARLY_ASC 0 #endif #ifndef CPHYSADDR #define CPHYSADDR(a) 0 #endif static inline struct clk *clk_get_fpi(void) { return NULL; } #endif /* CONFIG_LANTIQ */ #endif /* __LINUX_LANTIQ_H */ netfilter_ipv4/ip_tables.h 0000644 00000004606 14722070374 0011631 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * 25-Jul-1998 Major changes to allow for ip chain table * * 3-Jan-2000 Named tables to allow packet selection for different uses. */ /* * Format of an IP firewall descriptor * * src, dst, src_mask, dst_mask are always stored in network byte order. * flags are stored in host byte order (of course). * Port numbers are stored in HOST byte order. */ #ifndef _IPTABLES_H #define _IPTABLES_H #include <linux/if.h> #include <linux/in.h> #include <linux/init.h> #include <linux/ip.h> #include <linux/skbuff.h> #include <uapi/linux/netfilter_ipv4/ip_tables.h> int ipt_register_table(struct net *net, const struct xt_table *table, const struct ipt_replace *repl, const struct nf_hook_ops *ops, struct xt_table **res); void ipt_unregister_table(struct net *net, struct xt_table *table, const struct nf_hook_ops *ops); /* Standard entry. */ struct ipt_standard { struct ipt_entry entry; struct xt_standard_target target; }; struct ipt_error { struct ipt_entry entry; struct xt_error_target target; }; #define IPT_ENTRY_INIT(__size) \ { \ .target_offset = sizeof(struct ipt_entry), \ .next_offset = (__size), \ } #define IPT_STANDARD_INIT(__verdict) \ { \ .entry = IPT_ENTRY_INIT(sizeof(struct ipt_standard)), \ .target = XT_TARGET_INIT(XT_STANDARD_TARGET, \ sizeof(struct xt_standard_target)), \ .target.verdict = -(__verdict) - 1, \ } #define IPT_ERROR_INIT \ { \ .entry = IPT_ENTRY_INIT(sizeof(struct ipt_error)), \ .target = XT_TARGET_INIT(XT_ERROR_TARGET, \ sizeof(struct xt_error_target)), \ .target.errorname = "ERROR", \ } extern void *ipt_alloc_initial_table(const struct xt_table *); extern unsigned int ipt_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct xt_table *table); #ifdef CONFIG_COMPAT #include <net/compat.h> struct compat_ipt_entry { struct ipt_ip ip; compat_uint_t nfcache; __u16 target_offset; __u16 next_offset; compat_uint_t comefrom; struct compat_xt_counters counters; unsigned char elems[0]; }; /* Helper functions */ static inline struct xt_entry_target * compat_ipt_get_target(struct compat_ipt_entry *e) { return (void *)e + e->target_offset; } #endif /* CONFIG_COMPAT */ #endif /* _IPTABLES_H */ errno.h 0000644 00000002620 14722070374 0006050 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_ERRNO_H #define _LINUX_ERRNO_H #include <uapi/linux/errno.h> /* * These should never be seen by user programs. To return one of ERESTART* * codes, signal_pending() MUST be set. Note that ptrace can observe these * at syscall exit tracing, but they will never be left for the debugged user * process to see. */ #define ERESTARTSYS 512 #define ERESTARTNOINTR 513 #define ERESTARTNOHAND 514 /* restart if no handler.. */ #define ENOIOCTLCMD 515 /* No ioctl command */ #define ERESTART_RESTARTBLOCK 516 /* restart by calling sys_restart_syscall */ #define EPROBE_DEFER 517 /* Driver requests probe retry */ #define EOPENSTALE 518 /* open found a stale dentry */ #define ENOPARAM 519 /* Parameter not supported */ /* Defined for the NFSv3 protocol */ #define EBADHANDLE 521 /* Illegal NFS file handle */ #define ENOTSYNC 522 /* Update synchronization mismatch */ #define EBADCOOKIE 523 /* Cookie is stale */ #define ENOTSUPP 524 /* Operation is not supported */ #define ETOOSMALL 525 /* Buffer or request is too small */ #define ESERVERFAULT 526 /* An untranslatable error occurred */ #define EBADTYPE 527 /* Type not supported by server */ #define EJUKEBOX 528 /* Request initiated, but will not complete before timeout */ #define EIOCBQUEUED 529 /* iocb queued, will get completion event */ #define ERECALLCONFLICT 530 /* conflict with recalled state */ #endif seccomp.h 0000644 00000005707 14722070374 0006365 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SECCOMP_H #define _LINUX_SECCOMP_H #include <uapi/linux/seccomp.h> #define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \ SECCOMP_FILTER_FLAG_LOG | \ SECCOMP_FILTER_FLAG_SPEC_ALLOW | \ SECCOMP_FILTER_FLAG_NEW_LISTENER) #ifdef CONFIG_SECCOMP #include <linux/thread_info.h> #include <asm/seccomp.h> struct seccomp_filter; /** * struct seccomp - the state of a seccomp'ed process * * @mode: indicates one of the valid values above for controlled * system calls available to a process. * @filter: must always point to a valid seccomp-filter or NULL as it is * accessed without locking during system call entry. * * @filter must only be accessed from the context of current as there * is no read locking. */ struct seccomp { int mode; struct seccomp_filter *filter; }; #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER extern int __secure_computing(const struct seccomp_data *sd); static inline int secure_computing(const struct seccomp_data *sd) { if (unlikely(test_thread_flag(TIF_SECCOMP))) return __secure_computing(sd); return 0; } #else extern void secure_computing_strict(int this_syscall); #endif extern long prctl_get_seccomp(void); extern long prctl_set_seccomp(unsigned long, void __user *); static inline int seccomp_mode(struct seccomp *s) { return s->mode; } #else /* CONFIG_SECCOMP */ #include <linux/errno.h> struct seccomp { }; struct seccomp_filter { }; #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER static inline int secure_computing(struct seccomp_data *sd) { return 0; } #else static inline void secure_computing_strict(int this_syscall) { return; } #endif static inline long prctl_get_seccomp(void) { return -EINVAL; } static inline long prctl_set_seccomp(unsigned long arg2, char __user *arg3) { return -EINVAL; } static inline int seccomp_mode(struct seccomp *s) { return SECCOMP_MODE_DISABLED; } #endif /* CONFIG_SECCOMP */ #ifdef CONFIG_SECCOMP_FILTER extern void put_seccomp_filter(struct task_struct *tsk); extern void get_seccomp_filter(struct task_struct *tsk); #else /* CONFIG_SECCOMP_FILTER */ static inline void put_seccomp_filter(struct task_struct *tsk) { return; } static inline void get_seccomp_filter(struct task_struct *tsk) { return; } #endif /* CONFIG_SECCOMP_FILTER */ #if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE) extern long seccomp_get_filter(struct task_struct *task, unsigned long filter_off, void __user *data); extern long seccomp_get_metadata(struct task_struct *task, unsigned long filter_off, void __user *data); #else static inline long seccomp_get_filter(struct task_struct *task, unsigned long n, void __user *data) { return -EINVAL; } static inline long seccomp_get_metadata(struct task_struct *task, unsigned long filter_off, void __user *data) { return -EINVAL; } #endif /* CONFIG_SECCOMP_FILTER && CONFIG_CHECKPOINT_RESTORE */ #endif /* _LINUX_SECCOMP_H */ vga_switcheroo.h 0000644 00000021174 14722070374 0007753 0 ustar 00 /* * vga_switcheroo.h - Support for laptop with dual GPU using one set of outputs * * Copyright (c) 2010 Red Hat Inc. * Author : Dave Airlie <airlied@redhat.com> * * Copyright (c) 2015 Lukas Wunner <lukas@wunner.de> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS * IN THE SOFTWARE. * */ #ifndef _LINUX_VGA_SWITCHEROO_H_ #define _LINUX_VGA_SWITCHEROO_H_ #include <linux/fb.h> struct pci_dev; /** * enum vga_switcheroo_handler_flags_t - handler flags bitmask * @VGA_SWITCHEROO_CAN_SWITCH_DDC: whether the handler is able to switch the * DDC lines separately. This signals to clients that they should call * drm_get_edid_switcheroo() to probe the EDID * @VGA_SWITCHEROO_NEEDS_EDP_CONFIG: whether the handler is unable to switch * the AUX channel separately. This signals to clients that the active * GPU needs to train the link and communicate the link parameters to the * inactive GPU (mediated by vga_switcheroo). The inactive GPU may then * skip the AUX handshake and set up its output with these pre-calibrated * values (DisplayPort specification v1.1a, section 2.5.3.3) * * Handler flags bitmask. Used by handlers to declare their capabilities upon * registering with vga_switcheroo. */ enum vga_switcheroo_handler_flags_t { VGA_SWITCHEROO_CAN_SWITCH_DDC = (1 << 0), VGA_SWITCHEROO_NEEDS_EDP_CONFIG = (1 << 1), }; /** * enum vga_switcheroo_state - client power state * @VGA_SWITCHEROO_OFF: off * @VGA_SWITCHEROO_ON: on * @VGA_SWITCHEROO_NOT_FOUND: client has not registered with vga_switcheroo. * Only used in vga_switcheroo_get_client_state() which in turn is only * called from hda_intel.c * * Client power state. */ enum vga_switcheroo_state { VGA_SWITCHEROO_OFF, VGA_SWITCHEROO_ON, /* below are referred only from vga_switcheroo_get_client_state() */ VGA_SWITCHEROO_NOT_FOUND, }; /** * enum vga_switcheroo_client_id - client identifier * @VGA_SWITCHEROO_UNKNOWN_ID: initial identifier assigned to vga clients. * Determining the id requires the handler, so GPUs are given their * true id in a delayed fashion in vga_switcheroo_enable() * @VGA_SWITCHEROO_IGD: integrated graphics device * @VGA_SWITCHEROO_DIS: discrete graphics device * @VGA_SWITCHEROO_MAX_CLIENTS: currently no more than two GPUs are supported * * Client identifier. Audio clients use the same identifier & 0x100. */ enum vga_switcheroo_client_id { VGA_SWITCHEROO_UNKNOWN_ID = 0x1000, VGA_SWITCHEROO_IGD = 0, VGA_SWITCHEROO_DIS, VGA_SWITCHEROO_MAX_CLIENTS, }; /** * struct vga_switcheroo_handler - handler callbacks * @init: initialize handler. * Optional. This gets called when vga_switcheroo is enabled, i.e. when * two vga clients have registered. It allows the handler to perform * some delayed initialization that depends on the existence of the * vga clients. Currently only the radeon and amdgpu drivers use this. * The return value is ignored * @switchto: switch outputs to given client. * Mandatory. For muxless machines this should be a no-op. Returning 0 * denotes success, anything else failure (in which case the switch is * aborted) * @switch_ddc: switch DDC lines to given client. * Optional. Should return the previous DDC owner on success or a * negative int on failure * @power_state: cut or reinstate power of given client. * Optional. The return value is ignored * @get_client_id: determine if given pci device is integrated or discrete GPU. * Mandatory * * Handler callbacks. The multiplexer itself. The @switchto and @get_client_id * methods are mandatory, all others may be set to NULL. */ struct vga_switcheroo_handler { int (*init)(void); int (*switchto)(enum vga_switcheroo_client_id id); int (*switch_ddc)(enum vga_switcheroo_client_id id); int (*power_state)(enum vga_switcheroo_client_id id, enum vga_switcheroo_state state); enum vga_switcheroo_client_id (*get_client_id)(struct pci_dev *pdev); }; /** * struct vga_switcheroo_client_ops - client callbacks * @set_gpu_state: do the equivalent of suspend/resume for the card. * Mandatory. This should not cut power to the discrete GPU, * which is the job of the handler * @reprobe: poll outputs. * Optional. This gets called after waking the GPU and switching * the outputs to it * @can_switch: check if the device is in a position to switch now. * Mandatory. The client should return false if a user space process * has one of its device files open * @gpu_bound: notify the client id to audio client when the GPU is bound. * * Client callbacks. A client can be either a GPU or an audio device on a GPU. * The @set_gpu_state and @can_switch methods are mandatory, @reprobe may be * set to NULL. For audio clients, the @reprobe member is bogus. * OTOH, @gpu_bound is only for audio clients, and not used for GPU clients. */ struct vga_switcheroo_client_ops { void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state); void (*reprobe)(struct pci_dev *dev); bool (*can_switch)(struct pci_dev *dev); void (*gpu_bound)(struct pci_dev *dev, enum vga_switcheroo_client_id); }; #if defined(CONFIG_VGA_SWITCHEROO) void vga_switcheroo_unregister_client(struct pci_dev *dev); int vga_switcheroo_register_client(struct pci_dev *dev, const struct vga_switcheroo_client_ops *ops, bool driver_power_control); int vga_switcheroo_register_audio_client(struct pci_dev *pdev, const struct vga_switcheroo_client_ops *ops, struct pci_dev *vga_dev); void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info); int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler, enum vga_switcheroo_handler_flags_t handler_flags); void vga_switcheroo_unregister_handler(void); enum vga_switcheroo_handler_flags_t vga_switcheroo_handler_flags(void); int vga_switcheroo_lock_ddc(struct pci_dev *pdev); int vga_switcheroo_unlock_ddc(struct pci_dev *pdev); int vga_switcheroo_process_delayed_switch(void); bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev); enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev); int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain); void vga_switcheroo_fini_domain_pm_ops(struct device *dev); #else static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {} static inline int vga_switcheroo_register_client(struct pci_dev *dev, const struct vga_switcheroo_client_ops *ops, bool driver_power_control) { return 0; } static inline void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info) {} static inline int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler, enum vga_switcheroo_handler_flags_t handler_flags) { return 0; } static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev, const struct vga_switcheroo_client_ops *ops, struct pci_dev *vga_dev) { return 0; } static inline void vga_switcheroo_unregister_handler(void) {} static inline enum vga_switcheroo_handler_flags_t vga_switcheroo_handler_flags(void) { return 0; } static inline int vga_switcheroo_lock_ddc(struct pci_dev *pdev) { return -ENODEV; } static inline int vga_switcheroo_unlock_ddc(struct pci_dev *pdev) { return -ENODEV; } static inline int vga_switcheroo_process_delayed_switch(void) { return 0; } static inline bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev) { return false; } static inline enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; } static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; } static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {} #endif #endif /* _LINUX_VGA_SWITCHEROO_H_ */ udp.h 0000644 00000010335 14722070374 0005515 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Definitions for the UDP protocol. * * Version: @(#)udp.h 1.0.2 04/28/93 * * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> */ #ifndef _LINUX_UDP_H #define _LINUX_UDP_H #include <net/inet_sock.h> #include <linux/skbuff.h> #include <net/netns/hash.h> #include <uapi/linux/udp.h> static inline struct udphdr *udp_hdr(const struct sk_buff *skb) { return (struct udphdr *)skb_transport_header(skb); } static inline struct udphdr *inner_udp_hdr(const struct sk_buff *skb) { return (struct udphdr *)skb_inner_transport_header(skb); } #define UDP_HTABLE_SIZE_MIN (CONFIG_BASE_SMALL ? 128 : 256) static inline u32 udp_hashfn(const struct net *net, u32 num, u32 mask) { return (num + net_hash_mix(net)) & mask; } struct udp_sock { /* inet_sock has to be the first member */ struct inet_sock inet; #define udp_port_hash inet.sk.__sk_common.skc_u16hashes[0] #define udp_portaddr_hash inet.sk.__sk_common.skc_u16hashes[1] #define udp_portaddr_node inet.sk.__sk_common.skc_portaddr_node int pending; /* Any pending frames ? */ unsigned int corkflag; /* Cork is required */ __u8 encap_type; /* Is this an Encapsulation socket? */ unsigned char no_check6_tx:1,/* Send zero UDP6 checksums on TX? */ no_check6_rx:1,/* Allow zero UDP6 checksums on RX? */ encap_enabled:1, /* This socket enabled encap * processing; UDP tunnels and * different encapsulation layer set * this */ gro_enabled:1; /* Can accept GRO packets */ /* * Following member retains the information to create a UDP header * when the socket is uncorked. */ __u16 len; /* total length of pending frames */ __u16 gso_size; /* * Fields specific to UDP-Lite. */ __u16 pcslen; __u16 pcrlen; /* indicator bits used by pcflag: */ #define UDPLITE_BIT 0x1 /* set by udplite proto init function */ #define UDPLITE_SEND_CC 0x2 /* set via udplite setsockopt */ #define UDPLITE_RECV_CC 0x4 /* set via udplite setsocktopt */ __u8 pcflag; /* marks socket as UDP-Lite if > 0 */ __u8 unused[3]; /* * For encapsulation sockets. */ int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); int (*encap_err_lookup)(struct sock *sk, struct sk_buff *skb); void (*encap_destroy)(struct sock *sk); /* GRO functions for UDP socket */ struct sk_buff * (*gro_receive)(struct sock *sk, struct list_head *head, struct sk_buff *skb); int (*gro_complete)(struct sock *sk, struct sk_buff *skb, int nhoff); /* udp_recvmsg try to use this before splicing sk_receive_queue */ struct sk_buff_head reader_queue ____cacheline_aligned_in_smp; /* This field is dirtied by udp_recvmsg() */ int forward_deficit; }; #define UDP_MAX_SEGMENTS (1 << 6UL) static inline struct udp_sock *udp_sk(const struct sock *sk) { return (struct udp_sock *)sk; } static inline void udp_set_no_check6_tx(struct sock *sk, bool val) { udp_sk(sk)->no_check6_tx = val; } static inline void udp_set_no_check6_rx(struct sock *sk, bool val) { udp_sk(sk)->no_check6_rx = val; } static inline bool udp_get_no_check6_tx(struct sock *sk) { return udp_sk(sk)->no_check6_tx; } static inline bool udp_get_no_check6_rx(struct sock *sk) { return udp_sk(sk)->no_check6_rx; } static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { int gso_size; if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { gso_size = skb_shinfo(skb)->gso_size; put_cmsg(msg, SOL_UDP, UDP_GRO, sizeof(gso_size), &gso_size); } } static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb) { return !udp_sk(sk)->gro_enabled && skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4; } #define udp_portaddr_for_each_entry(__sk, list) \ hlist_for_each_entry(__sk, list, __sk_common.skc_portaddr_node) #define udp_portaddr_for_each_entry_rcu(__sk, list) \ hlist_for_each_entry_rcu(__sk, list, __sk_common.skc_portaddr_node) #define IS_UDPLITE(__sk) (__sk->sk_protocol == IPPROTO_UDPLITE) #endif /* _LINUX_UDP_H */ sungem_phy.h 0000644 00000007677 14722070374 0007122 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __SUNGEM_PHY_H__ #define __SUNGEM_PHY_H__ struct mii_phy; /* Operations supported by any kind of PHY */ struct mii_phy_ops { int (*init)(struct mii_phy *phy); int (*suspend)(struct mii_phy *phy); int (*setup_aneg)(struct mii_phy *phy, u32 advertise); int (*setup_forced)(struct mii_phy *phy, int speed, int fd); int (*poll_link)(struct mii_phy *phy); int (*read_link)(struct mii_phy *phy); int (*enable_fiber)(struct mii_phy *phy, int autoneg); }; /* Structure used to statically define an mii/gii based PHY */ struct mii_phy_def { u32 phy_id; /* Concatenated ID1 << 16 | ID2 */ u32 phy_id_mask; /* Significant bits */ u32 features; /* Ethtool SUPPORTED_* defines */ int magic_aneg; /* Autoneg does all speed test for us */ const char* name; const struct mii_phy_ops* ops; }; enum { BCM54XX_COPPER, BCM54XX_FIBER, BCM54XX_GBIC, BCM54XX_SGMII, BCM54XX_UNKNOWN, }; /* An instance of a PHY, partially borrowed from mii_if_info */ struct mii_phy { struct mii_phy_def* def; u32 advertising; int mii_id; /* 1: autoneg enabled, 0: disabled */ int autoneg; /* forced speed & duplex (no autoneg) * partner speed & duplex & pause (autoneg) */ int speed; int duplex; int pause; /* Provided by host chip */ struct net_device *dev; int (*mdio_read) (struct net_device *dev, int mii_id, int reg); void (*mdio_write) (struct net_device *dev, int mii_id, int reg, int val); void *platform_data; }; /* Pass in a struct mii_phy with dev, mdio_read and mdio_write * filled, the remaining fields will be filled on return */ extern int sungem_phy_probe(struct mii_phy *phy, int mii_id); /* MII definitions missing from mii.h */ #define BMCR_SPD2 0x0040 /* Gigabit enable (bcm54xx) */ #define LPA_PAUSE 0x0400 /* More PHY registers (model specific) */ /* MII BCM5201 MULTIPHY interrupt register */ #define MII_BCM5201_INTERRUPT 0x1A #define MII_BCM5201_INTERRUPT_INTENABLE 0x4000 #define MII_BCM5201_AUXMODE2 0x1B #define MII_BCM5201_AUXMODE2_LOWPOWER 0x0008 #define MII_BCM5201_MULTIPHY 0x1E /* MII BCM5201 MULTIPHY register bits */ #define MII_BCM5201_MULTIPHY_SERIALMODE 0x0002 #define MII_BCM5201_MULTIPHY_SUPERISOLATE 0x0008 /* MII BCM5221 Additional registers */ #define MII_BCM5221_TEST 0x1f #define MII_BCM5221_TEST_ENABLE_SHADOWS 0x0080 #define MII_BCM5221_SHDOW_AUX_STAT2 0x1b #define MII_BCM5221_SHDOW_AUX_STAT2_APD 0x0020 #define MII_BCM5221_SHDOW_AUX_MODE4 0x1a #define MII_BCM5221_SHDOW_AUX_MODE4_IDDQMODE 0x0001 #define MII_BCM5221_SHDOW_AUX_MODE4_CLKLOPWR 0x0004 /* MII BCM5241 Additional registers */ #define MII_BCM5241_SHDOW_AUX_MODE4_STANDBYPWR 0x0008 /* MII BCM5400 1000-BASET Control register */ #define MII_BCM5400_GB_CONTROL 0x09 #define MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP 0x0200 /* MII BCM5400 AUXCONTROL register */ #define MII_BCM5400_AUXCONTROL 0x18 #define MII_BCM5400_AUXCONTROL_PWR10BASET 0x0004 /* MII BCM5400 AUXSTATUS register */ #define MII_BCM5400_AUXSTATUS 0x19 #define MII_BCM5400_AUXSTATUS_LINKMODE_MASK 0x0700 #define MII_BCM5400_AUXSTATUS_LINKMODE_SHIFT 8 /* 1000BT control (Marvell & BCM54xx at least) */ #define MII_1000BASETCONTROL 0x09 #define MII_1000BASETCONTROL_FULLDUPLEXCAP 0x0200 #define MII_1000BASETCONTROL_HALFDUPLEXCAP 0x0100 /* Marvell 88E1011 PHY control */ #define MII_M1011_PHY_SPEC_CONTROL 0x10 #define MII_M1011_PHY_SPEC_CONTROL_MANUAL_MDIX 0x20 #define MII_M1011_PHY_SPEC_CONTROL_AUTO_MDIX 0x40 /* Marvell 88E1011 PHY status */ #define MII_M1011_PHY_SPEC_STATUS 0x11 #define MII_M1011_PHY_SPEC_STATUS_1000 0x8000 #define MII_M1011_PHY_SPEC_STATUS_100 0x4000 #define MII_M1011_PHY_SPEC_STATUS_SPD_MASK 0xc000 #define MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX 0x2000 #define MII_M1011_PHY_SPEC_STATUS_RESOLVED 0x0800 #define MII_M1011_PHY_SPEC_STATUS_TX_PAUSE 0x0008 #define MII_M1011_PHY_SPEC_STATUS_RX_PAUSE 0x0004 #endif /* __SUNGEM_PHY_H__ */ hwmon-sysfs.h 0000644 00000005430 14722070374 0007222 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * hwmon-sysfs.h - hardware monitoring chip driver sysfs defines * * Copyright (C) 2005 Yani Ioannou <yani.ioannou@gmail.com> */ #ifndef _LINUX_HWMON_SYSFS_H #define _LINUX_HWMON_SYSFS_H #include <linux/device.h> struct sensor_device_attribute{ struct device_attribute dev_attr; int index; }; #define to_sensor_dev_attr(_dev_attr) \ container_of(_dev_attr, struct sensor_device_attribute, dev_attr) #define SENSOR_ATTR(_name, _mode, _show, _store, _index) \ { .dev_attr = __ATTR(_name, _mode, _show, _store), \ .index = _index } #define SENSOR_ATTR_RO(_name, _func, _index) \ SENSOR_ATTR(_name, 0444, _func##_show, NULL, _index) #define SENSOR_ATTR_RW(_name, _func, _index) \ SENSOR_ATTR(_name, 0644, _func##_show, _func##_store, _index) #define SENSOR_ATTR_WO(_name, _func, _index) \ SENSOR_ATTR(_name, 0200, NULL, _func##_store, _index) #define SENSOR_DEVICE_ATTR(_name, _mode, _show, _store, _index) \ struct sensor_device_attribute sensor_dev_attr_##_name \ = SENSOR_ATTR(_name, _mode, _show, _store, _index) #define SENSOR_DEVICE_ATTR_RO(_name, _func, _index) \ SENSOR_DEVICE_ATTR(_name, 0444, _func##_show, NULL, _index) #define SENSOR_DEVICE_ATTR_RW(_name, _func, _index) \ SENSOR_DEVICE_ATTR(_name, 0644, _func##_show, _func##_store, _index) #define SENSOR_DEVICE_ATTR_WO(_name, _func, _index) \ SENSOR_DEVICE_ATTR(_name, 0200, NULL, _func##_store, _index) struct sensor_device_attribute_2 { struct device_attribute dev_attr; u8 index; u8 nr; }; #define to_sensor_dev_attr_2(_dev_attr) \ container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr) #define SENSOR_ATTR_2(_name, _mode, _show, _store, _nr, _index) \ { .dev_attr = __ATTR(_name, _mode, _show, _store), \ .index = _index, \ .nr = _nr } #define SENSOR_ATTR_2_RO(_name, _func, _nr, _index) \ SENSOR_ATTR_2(_name, 0444, _func##_show, NULL, _nr, _index) #define SENSOR_ATTR_2_RW(_name, _func, _nr, _index) \ SENSOR_ATTR_2(_name, 0644, _func##_show, _func##_store, _nr, _index) #define SENSOR_ATTR_2_WO(_name, _func, _nr, _index) \ SENSOR_ATTR_2(_name, 0200, NULL, _func##_store, _nr, _index) #define SENSOR_DEVICE_ATTR_2(_name,_mode,_show,_store,_nr,_index) \ struct sensor_device_attribute_2 sensor_dev_attr_##_name \ = SENSOR_ATTR_2(_name, _mode, _show, _store, _nr, _index) #define SENSOR_DEVICE_ATTR_2_RO(_name, _func, _nr, _index) \ SENSOR_DEVICE_ATTR_2(_name, 0444, _func##_show, NULL, \ _nr, _index) #define SENSOR_DEVICE_ATTR_2_RW(_name, _func, _nr, _index) \ SENSOR_DEVICE_ATTR_2(_name, 0644, _func##_show, _func##_store, \ _nr, _index) #define SENSOR_DEVICE_ATTR_2_WO(_name, _func, _nr, _index) \ SENSOR_DEVICE_ATTR_2(_name, 0200, NULL, _func##_store, \ _nr, _index) #endif /* _LINUX_HWMON_SYSFS_H */ stacktrace.h 0000644 00000006660 14722070374 0007057 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_STACKTRACE_H #define __LINUX_STACKTRACE_H #include <linux/types.h> #include <asm/errno.h> struct task_struct; struct pt_regs; #ifdef CONFIG_STACKTRACE void stack_trace_print(const unsigned long *trace, unsigned int nr_entries, int spaces); int stack_trace_snprint(char *buf, size_t size, const unsigned long *entries, unsigned int nr_entries, int spaces); unsigned int stack_trace_save(unsigned long *store, unsigned int size, unsigned int skipnr); unsigned int stack_trace_save_tsk(struct task_struct *task, unsigned long *store, unsigned int size, unsigned int skipnr); unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store, unsigned int size, unsigned int skipnr); unsigned int stack_trace_save_user(unsigned long *store, unsigned int size); /* Internal interfaces. Do not use in generic code */ #ifdef CONFIG_ARCH_STACKWALK /** * stack_trace_consume_fn - Callback for arch_stack_walk() * @cookie: Caller supplied pointer handed back by arch_stack_walk() * @addr: The stack entry address to consume * @reliable: True when the stack entry is reliable. Required by * some printk based consumers. * * Return: True, if the entry was consumed or skipped * False, if there is no space left to store */ typedef bool (*stack_trace_consume_fn)(void *cookie, unsigned long addr, bool reliable); /** * arch_stack_walk - Architecture specific function to walk the stack * @consume_entry: Callback which is invoked by the architecture code for * each entry. * @cookie: Caller supplied pointer which is handed back to * @consume_entry * @task: Pointer to a task struct, can be NULL * @regs: Pointer to registers, can be NULL * * ============ ======= ============================================ * task regs * ============ ======= ============================================ * task NULL Stack trace from task (can be current) * current regs Stack trace starting on regs->stackpointer * ============ ======= ============================================ */ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, struct task_struct *task, struct pt_regs *regs); int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, void *cookie, struct task_struct *task); void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, const struct pt_regs *regs); #else /* CONFIG_ARCH_STACKWALK */ struct stack_trace { unsigned int nr_entries, max_entries; unsigned long *entries; int skip; /* input argument: How many entries to skip */ }; extern void save_stack_trace(struct stack_trace *trace); extern void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace); extern void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace); extern int save_stack_trace_tsk_reliable(struct task_struct *tsk, struct stack_trace *trace); extern void save_stack_trace_user(struct stack_trace *trace); #endif /* !CONFIG_ARCH_STACKWALK */ #endif /* CONFIG_STACKTRACE */ #if defined(CONFIG_STACKTRACE) && defined(CONFIG_HAVE_RELIABLE_STACKTRACE) int stack_trace_save_tsk_reliable(struct task_struct *tsk, unsigned long *store, unsigned int size); #else static inline int stack_trace_save_tsk_reliable(struct task_struct *tsk, unsigned long *store, unsigned int size) { return -ENOSYS; } #endif #endif /* __LINUX_STACKTRACE_H */ netdevice.h 0000644 00000460450 14722070374 0006702 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Definitions for the Interfaces handler. * * Version: @(#)dev.h 1.0.10 08/12/93 * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Corey Minyard <wf-rch!minyard@relay.EU.net> * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov> * Alan Cox, <alan@lxorguk.ukuu.org.uk> * Bjorn Ekwall. <bj0rn@blox.se> * Pekka Riikonen <priikone@poseidon.pspt.fi> * * Moved to /usr/include/linux for NET3 */ #ifndef _LINUX_NETDEVICE_H #define _LINUX_NETDEVICE_H #include <linux/timer.h> #include <linux/bug.h> #include <linux/delay.h> #include <linux/atomic.h> #include <linux/prefetch.h> #include <asm/cache.h> #include <asm/byteorder.h> #include <linux/percpu.h> #include <linux/rculist.h> #include <linux/workqueue.h> #include <linux/dynamic_queue_limits.h> #include <linux/ethtool.h> #include <net/net_namespace.h> #ifdef CONFIG_DCB #include <net/dcbnl.h> #endif #include <net/netprio_cgroup.h> #include <net/xdp.h> #include <linux/netdev_features.h> #include <linux/neighbour.h> #include <uapi/linux/netdevice.h> #include <uapi/linux/if_bonding.h> #include <uapi/linux/pkt_cls.h> #include <linux/hashtable.h> struct netpoll_info; struct device; struct phy_device; struct dsa_port; struct sfp_bus; /* 802.11 specific */ struct wireless_dev; /* 802.15.4 specific */ struct wpan_dev; struct mpls_dev; /* UDP Tunnel offloads */ struct udp_tunnel_info; struct bpf_prog; struct xdp_buff; void netdev_set_default_ethtool_ops(struct net_device *dev, const struct ethtool_ops *ops); /* Backlog congestion levels */ #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ #define NET_RX_DROP 1 /* packet dropped */ #define MAX_NEST_DEV 8 /* * Transmit return codes: transmit return codes originate from three different * namespaces: * * - qdisc return codes * - driver transmit return codes * - errno values * * Drivers are allowed to return any one of those in their hard_start_xmit() * function. Real network devices commonly used with qdiscs should only return * the driver transmit return codes though - when qdiscs are used, the actual * transmission happens asynchronously, so the value is not propagated to * higher layers. Virtual network devices transmit synchronously; in this case * the driver transmit return codes are consumed by dev_queue_xmit(), and all * others are propagated to higher layers. */ /* qdisc ->enqueue() return codes. */ #define NET_XMIT_SUCCESS 0x00 #define NET_XMIT_DROP 0x01 /* skb dropped */ #define NET_XMIT_CN 0x02 /* congestion notification */ #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */ /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It * indicates that the device will soon be dropping packets, or already drops * some packets of the same priority; prompting us to send less aggressively. */ #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e)) #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0) /* Driver transmit return codes */ #define NETDEV_TX_MASK 0xf0 enum netdev_tx { __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */ NETDEV_TX_OK = 0x00, /* driver took care of packet */ NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/ }; typedef enum netdev_tx netdev_tx_t; /* * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant; * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed. */ static inline bool dev_xmit_complete(int rc) { /* * Positive cases with an skb consumed by a driver: * - successful transmission (rc == NETDEV_TX_OK) * - error while transmitting (rc < 0) * - error while queueing to a different device (rc & NET_XMIT_MASK) */ if (likely(rc < NET_XMIT_MASK)) return true; return false; } /* * Compute the worst-case header length according to the protocols * used. */ #if defined(CONFIG_HYPERV_NET) # define LL_MAX_HEADER 128 #elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25) # if defined(CONFIG_MAC80211_MESH) # define LL_MAX_HEADER 128 # else # define LL_MAX_HEADER 96 # endif #else # define LL_MAX_HEADER 32 #endif #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \ !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL) #define MAX_HEADER LL_MAX_HEADER #else #define MAX_HEADER (LL_MAX_HEADER + 48) #endif /* * Old network device statistics. Fields are native words * (unsigned long) so they can be read and written atomically. */ #define NET_DEV_STAT(FIELD) \ union { \ unsigned long FIELD; \ atomic_long_t __##FIELD; \ } struct net_device_stats { NET_DEV_STAT(rx_packets); NET_DEV_STAT(tx_packets); NET_DEV_STAT(rx_bytes); NET_DEV_STAT(tx_bytes); NET_DEV_STAT(rx_errors); NET_DEV_STAT(tx_errors); NET_DEV_STAT(rx_dropped); NET_DEV_STAT(tx_dropped); NET_DEV_STAT(multicast); NET_DEV_STAT(collisions); NET_DEV_STAT(rx_length_errors); NET_DEV_STAT(rx_over_errors); NET_DEV_STAT(rx_crc_errors); NET_DEV_STAT(rx_frame_errors); NET_DEV_STAT(rx_fifo_errors); NET_DEV_STAT(rx_missed_errors); NET_DEV_STAT(tx_aborted_errors); NET_DEV_STAT(tx_carrier_errors); NET_DEV_STAT(tx_fifo_errors); NET_DEV_STAT(tx_heartbeat_errors); NET_DEV_STAT(tx_window_errors); NET_DEV_STAT(rx_compressed); NET_DEV_STAT(tx_compressed); }; #undef NET_DEV_STAT #include <linux/cache.h> #include <linux/skbuff.h> #ifdef CONFIG_RPS #include <linux/static_key.h> extern struct static_key_false rps_needed; extern struct static_key_false rfs_needed; #endif struct neighbour; struct neigh_parms; struct sk_buff; struct netdev_hw_addr { struct list_head list; unsigned char addr[MAX_ADDR_LEN]; unsigned char type; #define NETDEV_HW_ADDR_T_LAN 1 #define NETDEV_HW_ADDR_T_SAN 2 #define NETDEV_HW_ADDR_T_SLAVE 3 #define NETDEV_HW_ADDR_T_UNICAST 4 #define NETDEV_HW_ADDR_T_MULTICAST 5 bool global_use; int sync_cnt; int refcount; int synced; struct rcu_head rcu_head; }; struct netdev_hw_addr_list { struct list_head list; int count; }; #define netdev_hw_addr_list_count(l) ((l)->count) #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0) #define netdev_hw_addr_list_for_each(ha, l) \ list_for_each_entry(ha, &(l)->list, list) #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc) #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc) #define netdev_for_each_uc_addr(ha, dev) \ netdev_hw_addr_list_for_each(ha, &(dev)->uc) #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc) #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc) #define netdev_for_each_mc_addr(ha, dev) \ netdev_hw_addr_list_for_each(ha, &(dev)->mc) struct hh_cache { unsigned int hh_len; seqlock_t hh_lock; /* cached hardware header; allow for machine alignment needs. */ #define HH_DATA_MOD 16 #define HH_DATA_OFF(__len) \ (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1)) #define HH_DATA_ALIGN(__len) \ (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1)) unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; }; /* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much. * Alternative is: * dev->hard_header_len ? (dev->hard_header_len + * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0 * * We could use other alignment values, but we must maintain the * relationship HH alignment <= LL alignment. */ #define LL_RESERVED_SPACE(dev) \ ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom)) \ & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD) #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom) + (extra)) \ & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD) struct header_ops { int (*create) (struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned int len); int (*parse)(const struct sk_buff *skb, unsigned char *haddr); int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); void (*cache_update)(struct hh_cache *hh, const struct net_device *dev, const unsigned char *haddr); bool (*validate)(const char *ll_header, unsigned int len); __be16 (*parse_protocol)(const struct sk_buff *skb); }; /* These flag bits are private to the generic network queueing * layer; they may not be explicitly referenced by any other * code. */ enum netdev_state_t { __LINK_STATE_START, __LINK_STATE_PRESENT, __LINK_STATE_NOCARRIER, __LINK_STATE_LINKWATCH_PENDING, __LINK_STATE_DORMANT, }; /* * This structure holds boot-time configured netdevice settings. They * are then used in the device probing. */ struct netdev_boot_setup { char name[IFNAMSIZ]; struct ifmap map; }; #define NETDEV_BOOT_SETUP_MAX 8 int __init netdev_boot_setup(char *str); struct gro_list { struct list_head list; int count; }; /* * size of gro hash buckets, must less than bit number of * napi_struct::gro_bitmask */ #define GRO_HASH_BUCKETS 8 /* * Structure for NAPI scheduling similar to tasklet but with weighting */ struct napi_struct { /* The poll_list must only be managed by the entity which * changes the state of the NAPI_STATE_SCHED bit. This means * whoever atomically sets that bit can add this napi_struct * to the per-CPU poll_list, and whoever clears that bit * can remove from the list right before clearing the bit. */ struct list_head poll_list; unsigned long state; int weight; unsigned long gro_bitmask; int (*poll)(struct napi_struct *, int); #ifdef CONFIG_NETPOLL int poll_owner; #endif struct net_device *dev; struct gro_list gro_hash[GRO_HASH_BUCKETS]; struct sk_buff *skb; struct list_head rx_list; /* Pending GRO_NORMAL skbs */ int rx_count; /* length of rx_list */ struct hrtimer timer; struct list_head dev_list; struct hlist_node napi_hash_node; unsigned int napi_id; }; enum { NAPI_STATE_SCHED, /* Poll is scheduled */ NAPI_STATE_MISSED, /* reschedule a napi */ NAPI_STATE_DISABLE, /* Disable pending */ NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */ NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */ NAPI_STATE_IN_BUSY_POLL,/* sk_busy_loop() owns this NAPI */ }; enum { NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED), NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED), NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE), NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC), NAPIF_STATE_HASHED = BIT(NAPI_STATE_HASHED), NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL), NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL), }; enum gro_result { GRO_MERGED, GRO_MERGED_FREE, GRO_HELD, GRO_NORMAL, GRO_DROP, GRO_CONSUMED, }; typedef enum gro_result gro_result_t; /* * enum rx_handler_result - Possible return values for rx_handlers. * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it * further. * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in * case skb->dev was changed by rx_handler. * @RX_HANDLER_EXACT: Force exact delivery, no wildcard. * @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called. * * rx_handlers are functions called from inside __netif_receive_skb(), to do * special processing of the skb, prior to delivery to protocol handlers. * * Currently, a net_device can only have a single rx_handler registered. Trying * to register a second rx_handler will return -EBUSY. * * To register a rx_handler on a net_device, use netdev_rx_handler_register(). * To unregister a rx_handler on a net_device, use * netdev_rx_handler_unregister(). * * Upon return, rx_handler is expected to tell __netif_receive_skb() what to * do with the skb. * * If the rx_handler consumed the skb in some way, it should return * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for * the skb to be delivered in some other way. * * If the rx_handler changed skb->dev, to divert the skb to another * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the * new device will be called if it exists. * * If the rx_handler decides the skb should be ignored, it should return * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that * are registered on exact device (ptype->dev == skb->dev). * * If the rx_handler didn't change skb->dev, but wants the skb to be normally * delivered, it should return RX_HANDLER_PASS. * * A device without a registered rx_handler will behave as if rx_handler * returned RX_HANDLER_PASS. */ enum rx_handler_result { RX_HANDLER_CONSUMED, RX_HANDLER_ANOTHER, RX_HANDLER_EXACT, RX_HANDLER_PASS, }; typedef enum rx_handler_result rx_handler_result_t; typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); void __napi_schedule(struct napi_struct *n); void __napi_schedule_irqoff(struct napi_struct *n); static inline bool napi_disable_pending(struct napi_struct *n) { return test_bit(NAPI_STATE_DISABLE, &n->state); } bool napi_schedule_prep(struct napi_struct *n); /** * napi_schedule - schedule NAPI poll * @n: NAPI context * * Schedule NAPI poll routine to be called if it is not already * running. */ static inline void napi_schedule(struct napi_struct *n) { if (napi_schedule_prep(n)) __napi_schedule(n); } /** * napi_schedule_irqoff - schedule NAPI poll * @n: NAPI context * * Variant of napi_schedule(), assuming hard irqs are masked. */ static inline void napi_schedule_irqoff(struct napi_struct *n) { if (napi_schedule_prep(n)) __napi_schedule_irqoff(n); } /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ static inline bool napi_reschedule(struct napi_struct *napi) { if (napi_schedule_prep(napi)) { __napi_schedule(napi); return true; } return false; } bool napi_complete_done(struct napi_struct *n, int work_done); /** * napi_complete - NAPI processing complete * @n: NAPI context * * Mark NAPI processing as complete. * Consider using napi_complete_done() instead. * Return false if device should avoid rearming interrupts. */ static inline bool napi_complete(struct napi_struct *n) { return napi_complete_done(n, 0); } /** * napi_hash_del - remove a NAPI from global table * @napi: NAPI context * * Warning: caller must observe RCU grace period * before freeing memory containing @napi, if * this function returns true. * Note: core networking stack automatically calls it * from netif_napi_del(). * Drivers might want to call this helper to combine all * the needed RCU grace periods into a single one. */ bool napi_hash_del(struct napi_struct *napi); /** * napi_disable - prevent NAPI from scheduling * @n: NAPI context * * Stop NAPI from being scheduled on this context. * Waits till any outstanding processing completes. */ void napi_disable(struct napi_struct *n); /** * napi_enable - enable NAPI scheduling * @n: NAPI context * * Resume NAPI from being scheduled on this context. * Must be paired with napi_disable. */ static inline void napi_enable(struct napi_struct *n) { BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); smp_mb__before_atomic(); clear_bit(NAPI_STATE_SCHED, &n->state); clear_bit(NAPI_STATE_NPSVC, &n->state); } /** * napi_synchronize - wait until NAPI is not running * @n: NAPI context * * Wait until NAPI is done being scheduled on this context. * Waits till any outstanding processing completes but * does not disable future activations. */ static inline void napi_synchronize(const struct napi_struct *n) { if (IS_ENABLED(CONFIG_SMP)) while (test_bit(NAPI_STATE_SCHED, &n->state)) msleep(1); else barrier(); } /** * napi_if_scheduled_mark_missed - if napi is running, set the * NAPIF_STATE_MISSED * @n: NAPI context * * If napi is running, set the NAPIF_STATE_MISSED, and return true if * NAPI is scheduled. **/ static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n) { unsigned long val, new; do { val = READ_ONCE(n->state); if (val & NAPIF_STATE_DISABLE) return true; if (!(val & NAPIF_STATE_SCHED)) return false; new = val | NAPIF_STATE_MISSED; } while (cmpxchg(&n->state, val, new) != val); return true; } enum netdev_queue_state_t { __QUEUE_STATE_DRV_XOFF, __QUEUE_STATE_STACK_XOFF, __QUEUE_STATE_FROZEN, }; #define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF) #define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF) #define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN) #define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF) #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \ QUEUE_STATE_FROZEN) #define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \ QUEUE_STATE_FROZEN) /* * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The * netif_tx_* functions below are used to manipulate this flag. The * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit * queue independently. The netif_xmit_*stopped functions below are called * to check if the queue has been stopped by the driver or stack (either * of the XOFF bits are set in the state). Drivers should not need to call * netif_xmit*stopped functions, they should only be using netif_tx_*. */ struct netdev_queue { /* * read-mostly part */ struct net_device *dev; struct Qdisc __rcu *qdisc; struct Qdisc *qdisc_sleeping; #ifdef CONFIG_SYSFS struct kobject kobj; #endif #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) int numa_node; #endif unsigned long tx_maxrate; /* * Number of TX timeouts for this queue * (/sys/class/net/DEV/Q/trans_timeout) */ unsigned long trans_timeout; /* Subordinate device that the queue has been assigned to */ struct net_device *sb_dev; #ifdef CONFIG_XDP_SOCKETS struct xdp_umem *umem; #endif /* * write-mostly part */ spinlock_t _xmit_lock ____cacheline_aligned_in_smp; int xmit_lock_owner; /* * Time (in jiffies) of last Tx */ unsigned long trans_start; unsigned long state; #ifdef CONFIG_BQL struct dql dql; #endif } ____cacheline_aligned_in_smp; extern int sysctl_fb_tunnels_only_for_init_net; extern int sysctl_devconf_inherit_init_net; static inline bool net_has_fallback_tunnels(const struct net *net) { return net == &init_net || !IS_ENABLED(CONFIG_SYSCTL) || !sysctl_fb_tunnels_only_for_init_net; } static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) { #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) return q->numa_node; #else return NUMA_NO_NODE; #endif } static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node) { #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) q->numa_node = node; #endif } #ifdef CONFIG_RPS /* * This structure holds an RPS map which can be of variable length. The * map is an array of CPUs. */ struct rps_map { unsigned int len; struct rcu_head rcu; u16 cpus[0]; }; #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16))) /* * The rps_dev_flow structure contains the mapping of a flow to a CPU, the * tail pointer for that CPU's input queue at the time of last enqueue, and * a hardware filter index. */ struct rps_dev_flow { u16 cpu; u16 filter; unsigned int last_qtail; }; #define RPS_NO_FILTER 0xffff /* * The rps_dev_flow_table structure contains a table of flow mappings. */ struct rps_dev_flow_table { unsigned int mask; struct rcu_head rcu; struct rps_dev_flow flows[0]; }; #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \ ((_num) * sizeof(struct rps_dev_flow))) /* * The rps_sock_flow_table contains mappings of flows to the last CPU * on which they were processed by the application (set in recvmsg). * Each entry is a 32bit value. Upper part is the high-order bits * of flow hash, lower part is CPU number. * rps_cpu_mask is used to partition the space, depending on number of * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1 * For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f, * meaning we use 32-6=26 bits for the hash. */ struct rps_sock_flow_table { u32 mask; u32 ents[0] ____cacheline_aligned_in_smp; }; #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num])) #define RPS_NO_CPU 0xffff extern u32 rps_cpu_mask; extern struct rps_sock_flow_table __rcu *rps_sock_flow_table; static inline void rps_record_sock_flow(struct rps_sock_flow_table *table, u32 hash) { if (table && hash) { unsigned int index = hash & table->mask; u32 val = hash & ~rps_cpu_mask; /* We only give a hint, preemption can change CPU under us */ val |= raw_smp_processor_id(); /* The following WRITE_ONCE() is paired with the READ_ONCE() * here, and another one in get_rps_cpu(). */ if (READ_ONCE(table->ents[index]) != val) WRITE_ONCE(table->ents[index], val); } } #ifdef CONFIG_RFS_ACCEL bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id, u16 filter_id); #endif #endif /* CONFIG_RPS */ /* This structure contains an instance of an RX queue. */ struct netdev_rx_queue { #ifdef CONFIG_RPS struct rps_map __rcu *rps_map; struct rps_dev_flow_table __rcu *rps_flow_table; #endif struct kobject kobj; struct net_device *dev; struct xdp_rxq_info xdp_rxq; #ifdef CONFIG_XDP_SOCKETS struct xdp_umem *umem; #endif } ____cacheline_aligned_in_smp; /* * RX queue sysfs structures and functions. */ struct rx_queue_attribute { struct attribute attr; ssize_t (*show)(struct netdev_rx_queue *queue, char *buf); ssize_t (*store)(struct netdev_rx_queue *queue, const char *buf, size_t len); }; #ifdef CONFIG_XPS /* * This structure holds an XPS map which can be of variable length. The * map is an array of queues. */ struct xps_map { unsigned int len; unsigned int alloc_len; struct rcu_head rcu; u16 queues[0]; }; #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16))) #define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \ - sizeof(struct xps_map)) / sizeof(u16)) /* * This structure holds all XPS maps for device. Maps are indexed by CPU. */ struct xps_dev_maps { struct rcu_head rcu; struct xps_map __rcu *attr_map[0]; /* Either CPUs map or RXQs map */ }; #define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \ (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *))) #define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\ (_rxqs * (_tcs) * sizeof(struct xps_map *))) #endif /* CONFIG_XPS */ #define TC_MAX_QUEUE 16 #define TC_BITMASK 15 /* HW offloaded queuing disciplines txq count and offset maps */ struct netdev_tc_txq { u16 count; u16 offset; }; #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) /* * This structure is to hold information about the device * configured to run FCoE protocol stack. */ struct netdev_fcoe_hbainfo { char manufacturer[64]; char serial_number[64]; char hardware_version[64]; char driver_version[64]; char optionrom_version[64]; char firmware_version[64]; char model[256]; char model_description[256]; }; #endif #define MAX_PHYS_ITEM_ID_LEN 32 /* This structure holds a unique identifier to identify some * physical item (port for example) used by a netdevice. */ struct netdev_phys_item_id { unsigned char id[MAX_PHYS_ITEM_ID_LEN]; unsigned char id_len; }; static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a, struct netdev_phys_item_id *b) { return a->id_len == b->id_len && memcmp(a->id, b->id, a->id_len) == 0; } typedef u16 (*select_queue_fallback_t)(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev); enum tc_setup_type { TC_SETUP_QDISC_MQPRIO, TC_SETUP_CLSU32, TC_SETUP_CLSFLOWER, TC_SETUP_CLSMATCHALL, TC_SETUP_CLSBPF, TC_SETUP_BLOCK, TC_SETUP_QDISC_CBS, TC_SETUP_QDISC_RED, TC_SETUP_QDISC_PRIO, TC_SETUP_QDISC_MQ, TC_SETUP_QDISC_ETF, TC_SETUP_ROOT_QDISC, TC_SETUP_QDISC_GRED, TC_SETUP_QDISC_TAPRIO, }; /* These structures hold the attributes of bpf state that are being passed * to the netdevice through the bpf op. */ enum bpf_netdev_command { /* Set or clear a bpf program used in the earliest stages of packet * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee * is responsible for calling bpf_prog_put on any old progs that are * stored. In case of error, the callee need not release the new prog * reference, but on success it takes ownership and must bpf_prog_put * when it is no longer used. */ XDP_SETUP_PROG, XDP_SETUP_PROG_HW, XDP_QUERY_PROG, XDP_QUERY_PROG_HW, /* BPF program for offload callbacks, invoked at program load time. */ BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE, XDP_SETUP_XSK_UMEM, }; struct bpf_prog_offload_ops; struct netlink_ext_ack; struct xdp_umem; struct netdev_bpf { enum bpf_netdev_command command; union { /* XDP_SETUP_PROG */ struct { u32 flags; struct bpf_prog *prog; struct netlink_ext_ack *extack; }; /* XDP_QUERY_PROG, XDP_QUERY_PROG_HW */ struct { u32 prog_id; /* flags with which program was installed */ u32 prog_flags; }; /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */ struct { struct bpf_offloaded_map *offmap; }; /* XDP_SETUP_XSK_UMEM */ struct { struct xdp_umem *umem; u16 queue_id; } xsk; }; }; /* Flags for ndo_xsk_wakeup. */ #define XDP_WAKEUP_RX (1 << 0) #define XDP_WAKEUP_TX (1 << 1) #ifdef CONFIG_XFRM_OFFLOAD struct xfrmdev_ops { int (*xdo_dev_state_add) (struct xfrm_state *x); void (*xdo_dev_state_delete) (struct xfrm_state *x); void (*xdo_dev_state_free) (struct xfrm_state *x); bool (*xdo_dev_offload_ok) (struct sk_buff *skb, struct xfrm_state *x); void (*xdo_dev_state_advance_esn) (struct xfrm_state *x); }; #endif struct dev_ifalias { struct rcu_head rcuhead; char ifalias[]; }; struct devlink; struct tlsdev_ops; /* * This structure defines the management hooks for network devices. * The following hooks can be defined; unless noted otherwise, they are * optional and can be filled with a null pointer. * * int (*ndo_init)(struct net_device *dev); * This function is called once when a network device is registered. * The network device can use this for any late stage initialization * or semantic validation. It can fail with an error code which will * be propagated back to register_netdev. * * void (*ndo_uninit)(struct net_device *dev); * This function is called when device is unregistered or when registration * fails. It is not called if init fails. * * int (*ndo_open)(struct net_device *dev); * This function is called when a network device transitions to the up * state. * * int (*ndo_stop)(struct net_device *dev); * This function is called when a network device transitions to the down * state. * * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, * struct net_device *dev); * Called when a packet needs to be transmitted. * Returns NETDEV_TX_OK. Can return NETDEV_TX_BUSY, but you should stop * the queue before that can happen; it's for obsolete devices and weird * corner cases, but the stack really does a non-trivial amount * of useless work if you return NETDEV_TX_BUSY. * Required; cannot be NULL. * * netdev_features_t (*ndo_features_check)(struct sk_buff *skb, * struct net_device *dev * netdev_features_t features); * Called by core transmit path to determine if device is capable of * performing offload operations on a given packet. This is to give * the device an opportunity to implement any restrictions that cannot * be otherwise expressed by feature flags. The check is called with * the set of features that the stack has calculated and it returns * those the driver believes to be appropriate. * * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, * struct net_device *sb_dev); * Called to decide which queue to use when device supports multiple * transmit queues. * * void (*ndo_change_rx_flags)(struct net_device *dev, int flags); * This function is called to allow device receiver to make * changes to configuration when multicast or promiscuous is enabled. * * void (*ndo_set_rx_mode)(struct net_device *dev); * This function is called device changes address list filtering. * If driver handles unicast address filtering, it should set * IFF_UNICAST_FLT in its priv_flags. * * int (*ndo_set_mac_address)(struct net_device *dev, void *addr); * This function is called when the Media Access Control address * needs to be changed. If this interface is not defined, the * MAC address can not be changed. * * int (*ndo_validate_addr)(struct net_device *dev); * Test if Media Access Control address is valid for the device. * * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); * Called when a user requests an ioctl which can't be handled by * the generic interface code. If not defined ioctls return * not supported error code. * * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map); * Used to set network devices bus interface parameters. This interface * is retained for legacy reasons; new devices should use the bus * interface (PCI) for low level management. * * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); * Called when a user wants to change the Maximum Transfer Unit * of a device. * * void (*ndo_tx_timeout)(struct net_device *dev); * Callback used when the transmitter has not made any progress * for dev->watchdog ticks. * * void (*ndo_get_stats64)(struct net_device *dev, * struct rtnl_link_stats64 *storage); * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); * Called when a user wants to get the network device usage * statistics. Drivers must do one of the following: * 1. Define @ndo_get_stats64 to fill in a zero-initialised * rtnl_link_stats64 structure passed by the caller. * 2. Define @ndo_get_stats to update a net_device_stats structure * (which should normally be dev->stats) and return a pointer to * it. The structure may be changed asynchronously only if each * field is written atomically. * 3. Update dev->stats asynchronously and atomically, and define * neither operation. * * bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id) * Return true if this device supports offload stats of this attr_id. * * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev, * void *attr_data) * Get statistics for offload operations by attr_id. Write it into the * attr_data pointer. * * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid); * If device supports VLAN filtering this function is called when a * VLAN id is registered. * * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid); * If device supports VLAN filtering this function is called when a * VLAN id is unregistered. * * void (*ndo_poll_controller)(struct net_device *dev); * * SR-IOV management functions. * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac); * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, * u8 qos, __be16 proto); * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate, * int max_tx_rate); * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting); * int (*ndo_get_vf_config)(struct net_device *dev, * int vf, struct ifla_vf_info *ivf); * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state); * int (*ndo_set_vf_port)(struct net_device *dev, int vf, * struct nlattr *port[]); * * Enable or disable the VF ability to query its RSS Redirection Table and * Hash Key. This is needed since on some devices VF share this information * with PF and querying it may introduce a theoretical security risk. * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting); * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); * int (*ndo_setup_tc)(struct net_device *dev, enum tc_setup_type type, * void *type_data); * Called to setup any 'tc' scheduler, classifier or action on @dev. * This is always called from the stack with the rtnl lock held and netif * tx queues stopped. This allows the netdevice to perform queue * management safely. * * Fiber Channel over Ethernet (FCoE) offload functions. * int (*ndo_fcoe_enable)(struct net_device *dev); * Called when the FCoE protocol stack wants to start using LLD for FCoE * so the underlying device can perform whatever needed configuration or * initialization to support acceleration of FCoE traffic. * * int (*ndo_fcoe_disable)(struct net_device *dev); * Called when the FCoE protocol stack wants to stop using LLD for FCoE * so the underlying device can perform whatever needed clean-ups to * stop supporting acceleration of FCoE traffic. * * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid, * struct scatterlist *sgl, unsigned int sgc); * Called when the FCoE Initiator wants to initialize an I/O that * is a possible candidate for Direct Data Placement (DDP). The LLD can * perform necessary setup and returns 1 to indicate the device is set up * successfully to perform DDP on this I/O, otherwise this returns 0. * * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid); * Called when the FCoE Initiator/Target is done with the DDPed I/O as * indicated by the FC exchange id 'xid', so the underlying device can * clean up and reuse resources for later DDP requests. * * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid, * struct scatterlist *sgl, unsigned int sgc); * Called when the FCoE Target wants to initialize an I/O that * is a possible candidate for Direct Data Placement (DDP). The LLD can * perform necessary setup and returns 1 to indicate the device is set up * successfully to perform DDP on this I/O, otherwise this returns 0. * * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, * struct netdev_fcoe_hbainfo *hbainfo); * Called when the FCoE Protocol stack wants information on the underlying * device. This information is utilized by the FCoE protocol stack to * register attributes with Fiber Channel management service as per the * FC-GS Fabric Device Management Information(FDMI) specification. * * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type); * Called when the underlying device wants to override default World Wide * Name (WWN) generation mechanism in FCoE protocol stack to pass its own * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE * protocol stack to use. * * RFS acceleration. * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb, * u16 rxq_index, u32 flow_id); * Set hardware filter for RFS. rxq_index is the target queue index; * flow_id is a flow ID to be passed to rps_may_expire_flow() later. * Return the filter ID on success, or a negative error code. * * Slave management functions (for bridge, bonding, etc). * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev); * Called to make another netdev an underling. * * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev); * Called to release previously enslaved netdev. * * Feature/offload setting functions. * netdev_features_t (*ndo_fix_features)(struct net_device *dev, * netdev_features_t features); * Adjusts the requested feature flags according to device-specific * constraints, and returns the resulting flags. Must not modify * the device state. * * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); * Called to update device configuration to new features. Passed * feature set might be less than what was returned by ndo_fix_features()). * Must return >0 or -errno if it changed dev->features itself. * * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], * struct net_device *dev, * const unsigned char *addr, u16 vid, u16 flags, * struct netlink_ext_ack *extack); * Adds an FDB entry to dev for addr. * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], * struct net_device *dev, * const unsigned char *addr, u16 vid) * Deletes the FDB entry from dev coresponding to addr. * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, * struct net_device *dev, struct net_device *filter_dev, * int *idx) * Used to add FDB entries to dump requests. Implementers should add * entries to skb and update idx with the number of entries. * * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, * u16 flags, struct netlink_ext_ack *extack) * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, * struct net_device *dev, u32 filter_mask, * int nlflags) * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh, * u16 flags); * * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); * Called to change device carrier. Soft-devices (like dummy, team, etc) * which do not represent real hardware may define this to allow their * userspace components to manage their virtual carrier state. Devices * that determine carrier state from physical hardware properties (eg * network cables) or protocol-dependent mechanisms (eg * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function. * * int (*ndo_get_phys_port_id)(struct net_device *dev, * struct netdev_phys_item_id *ppid); * Called to get ID of physical port of this device. If driver does * not implement this, it is assumed that the hw is not able to have * multiple net devices on single physical port. * * int (*ndo_get_port_parent_id)(struct net_device *dev, * struct netdev_phys_item_id *ppid) * Called to get the parent ID of the physical port of this device. * * void (*ndo_udp_tunnel_add)(struct net_device *dev, * struct udp_tunnel_info *ti); * Called by UDP tunnel to notify a driver about the UDP port and socket * address family that a UDP tunnel is listnening to. It is called only * when a new port starts listening. The operation is protected by the * RTNL. * * void (*ndo_udp_tunnel_del)(struct net_device *dev, * struct udp_tunnel_info *ti); * Called by UDP tunnel to notify the driver about a UDP port and socket * address family that the UDP tunnel is not listening to anymore. The * operation is protected by the RTNL. * * void* (*ndo_dfwd_add_station)(struct net_device *pdev, * struct net_device *dev) * Called by upper layer devices to accelerate switching or other * station functionality into hardware. 'pdev is the lowerdev * to use for the offload and 'dev' is the net device that will * back the offload. Returns a pointer to the private structure * the upper layer will maintain. * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv) * Called by upper layer device to delete the station created * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing * the station and priv is the structure returned by the add * operation. * int (*ndo_set_tx_maxrate)(struct net_device *dev, * int queue_index, u32 maxrate); * Called when a user wants to set a max-rate limitation of specific * TX queue. * int (*ndo_get_iflink)(const struct net_device *dev); * Called to get the iflink value of this device. * void (*ndo_change_proto_down)(struct net_device *dev, * bool proto_down); * This function is used to pass protocol port error state information * to the switch driver. The switch driver can react to the proto_down * by doing a phys down on the associated switch port. * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb); * This function is used to get egress tunnel information for given skb. * This is useful for retrieving outer tunnel header parameters while * sampling packet. * void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom); * This function is used to specify the headroom that the skb must * consider when allocation skb during packet reception. Setting * appropriate rx headroom value allows avoiding skb head copy on * forward. Setting a negative value resets the rx headroom to the * default value. * int (*ndo_bpf)(struct net_device *dev, struct netdev_bpf *bpf); * This function is used to set or query state related to XDP on the * netdevice and manage BPF offload. See definition of * enum bpf_netdev_command for details. * int (*ndo_xdp_xmit)(struct net_device *dev, int n, struct xdp_frame **xdp, * u32 flags); * This function is used to submit @n XDP packets for transmit on a * netdevice. Returns number of frames successfully transmitted, frames * that got dropped are freed/returned via xdp_return_frame(). * Returns negative number, means general error invoking ndo, meaning * no frames were xmit'ed and core-caller will free all frames. * int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags); * This function is used to wake up the softirq, ksoftirqd or kthread * responsible for sending and/or receiving packets on a specific * queue id bound to an AF_XDP socket. The flags field specifies if * only RX, only Tx, or both should be woken up using the flags * XDP_WAKEUP_RX and XDP_WAKEUP_TX. * struct devlink_port *(*ndo_get_devlink_port)(struct net_device *dev); * Get devlink port instance associated with a given netdev. * Called with a reference on the netdevice and devlink locks only, * rtnl_lock is not held. */ struct net_device_ops { int (*ndo_init)(struct net_device *dev); void (*ndo_uninit)(struct net_device *dev); int (*ndo_open)(struct net_device *dev); int (*ndo_stop)(struct net_device *dev); netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, struct net_device *dev); netdev_features_t (*ndo_features_check)(struct sk_buff *skb, struct net_device *dev, netdev_features_t features); u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev); void (*ndo_change_rx_flags)(struct net_device *dev, int flags); void (*ndo_set_rx_mode)(struct net_device *dev); int (*ndo_set_mac_address)(struct net_device *dev, void *addr); int (*ndo_validate_addr)(struct net_device *dev); int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); int (*ndo_set_config)(struct net_device *dev, struct ifmap *map); int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); int (*ndo_neigh_setup)(struct net_device *dev, struct neigh_parms *); void (*ndo_tx_timeout) (struct net_device *dev); void (*ndo_get_stats64)(struct net_device *dev, struct rtnl_link_stats64 *storage); bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id); int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev, void *attr_data); struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid); int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid); #ifdef CONFIG_NET_POLL_CONTROLLER void (*ndo_poll_controller)(struct net_device *dev); int (*ndo_netpoll_setup)(struct net_device *dev, struct netpoll_info *info); void (*ndo_netpoll_cleanup)(struct net_device *dev); #endif int (*ndo_set_vf_mac)(struct net_device *dev, int queue, u8 *mac); int (*ndo_set_vf_vlan)(struct net_device *dev, int queue, u16 vlan, u8 qos, __be16 proto); int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate, int max_tx_rate); int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting); int (*ndo_get_vf_config)(struct net_device *dev, int vf, struct ifla_vf_info *ivf); int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state); int (*ndo_get_vf_stats)(struct net_device *dev, int vf, struct ifla_vf_stats *vf_stats); int (*ndo_set_vf_port)(struct net_device *dev, int vf, struct nlattr *port[]); int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); int (*ndo_set_vf_guid)(struct net_device *dev, int vf, u64 guid, int guid_type); int (*ndo_set_vf_rss_query_en)( struct net_device *dev, int vf, bool setting); int (*ndo_setup_tc)(struct net_device *dev, enum tc_setup_type type, void *type_data); #if IS_ENABLED(CONFIG_FCOE) int (*ndo_fcoe_enable)(struct net_device *dev); int (*ndo_fcoe_disable)(struct net_device *dev); int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid, struct scatterlist *sgl, unsigned int sgc); int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid); int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid, struct scatterlist *sgl, unsigned int sgc); int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, struct netdev_fcoe_hbainfo *hbainfo); #endif #if IS_ENABLED(CONFIG_LIBFCOE) #define NETDEV_FCOE_WWNN 0 #define NETDEV_FCOE_WWPN 1 int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type); #endif #ifdef CONFIG_RFS_ACCEL int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb, u16 rxq_index, u32 flow_id); #endif int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev, struct netlink_ext_ack *extack); int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev); netdev_features_t (*ndo_fix_features)(struct net_device *dev, netdev_features_t features); int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); int (*ndo_neigh_construct)(struct net_device *dev, struct neighbour *n); void (*ndo_neigh_destroy)(struct net_device *dev, struct neighbour *n); int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid, u16 flags, struct netlink_ext_ack *extack); int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid); int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, struct net_device *dev, struct net_device *filter_dev, int *idx); int (*ndo_fdb_get)(struct sk_buff *skb, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid, u32 portid, u32 seq, struct netlink_ext_ack *extack); int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, u16 flags, struct netlink_ext_ack *extack); int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, u32 filter_mask, int nlflags); int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh, u16 flags); int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); int (*ndo_get_phys_port_id)(struct net_device *dev, struct netdev_phys_item_id *ppid); int (*ndo_get_port_parent_id)(struct net_device *dev, struct netdev_phys_item_id *ppid); int (*ndo_get_phys_port_name)(struct net_device *dev, char *name, size_t len); void (*ndo_udp_tunnel_add)(struct net_device *dev, struct udp_tunnel_info *ti); void (*ndo_udp_tunnel_del)(struct net_device *dev, struct udp_tunnel_info *ti); void* (*ndo_dfwd_add_station)(struct net_device *pdev, struct net_device *dev); void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv); int (*ndo_set_tx_maxrate)(struct net_device *dev, int queue_index, u32 maxrate); int (*ndo_get_iflink)(const struct net_device *dev); int (*ndo_change_proto_down)(struct net_device *dev, bool proto_down); int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb); void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom); int (*ndo_bpf)(struct net_device *dev, struct netdev_bpf *bpf); int (*ndo_xdp_xmit)(struct net_device *dev, int n, struct xdp_frame **xdp, u32 flags); int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags); struct devlink_port * (*ndo_get_devlink_port)(struct net_device *dev); }; /** * enum net_device_priv_flags - &struct net_device priv_flags * * These are the &struct net_device, they are only set internally * by drivers and used in the kernel. These flags are invisible to * userspace; this means that the order of these flags can change * during any kernel release. * * You should have a pretty good reason to be extending these flags. * * @IFF_802_1Q_VLAN: 802.1Q VLAN device * @IFF_EBRIDGE: Ethernet bridging device * @IFF_BONDING: bonding master or slave * @IFF_ISATAP: ISATAP interface (RFC4214) * @IFF_WAN_HDLC: WAN HDLC device * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to * release skb->dst * @IFF_DONT_BRIDGE: disallow bridging this ether dev * @IFF_DISABLE_NETPOLL: disable netpoll at run-time * @IFF_MACVLAN_PORT: device used as macvlan port * @IFF_BRIDGE_PORT: device used as bridge port * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit * @IFF_UNICAST_FLT: Supports unicast filtering * @IFF_TEAM_PORT: device used as team port * @IFF_SUPP_NOFCS: device supports sending custom FCS * @IFF_LIVE_ADDR_CHANGE: device supports hardware address * change when it's running * @IFF_MACVLAN: Macvlan device * @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account * underlying stacked devices * @IFF_L3MDEV_MASTER: device is an L3 master device * @IFF_NO_QUEUE: device can run without qdisc attached * @IFF_OPENVSWITCH: device is a Open vSwitch master * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device * @IFF_TEAM: device is a team device * @IFF_RXFH_CONFIGURED: device has had Rx Flow indirection table configured * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external * entity (i.e. the master device for bridged veth) * @IFF_MACSEC: device is a MACsec device * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook * @IFF_FAILOVER: device is a failover master device * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device * @IFF_LIVE_RENAME_OK: rename is allowed while device is up and running */ enum netdev_priv_flags { IFF_802_1Q_VLAN = 1<<0, IFF_EBRIDGE = 1<<1, IFF_BONDING = 1<<2, IFF_ISATAP = 1<<3, IFF_WAN_HDLC = 1<<4, IFF_XMIT_DST_RELEASE = 1<<5, IFF_DONT_BRIDGE = 1<<6, IFF_DISABLE_NETPOLL = 1<<7, IFF_MACVLAN_PORT = 1<<8, IFF_BRIDGE_PORT = 1<<9, IFF_OVS_DATAPATH = 1<<10, IFF_TX_SKB_SHARING = 1<<11, IFF_UNICAST_FLT = 1<<12, IFF_TEAM_PORT = 1<<13, IFF_SUPP_NOFCS = 1<<14, IFF_LIVE_ADDR_CHANGE = 1<<15, IFF_MACVLAN = 1<<16, IFF_XMIT_DST_RELEASE_PERM = 1<<17, IFF_L3MDEV_MASTER = 1<<18, IFF_NO_QUEUE = 1<<19, IFF_OPENVSWITCH = 1<<20, IFF_L3MDEV_SLAVE = 1<<21, IFF_TEAM = 1<<22, IFF_RXFH_CONFIGURED = 1<<23, IFF_PHONY_HEADROOM = 1<<24, IFF_MACSEC = 1<<25, IFF_NO_RX_HANDLER = 1<<26, IFF_FAILOVER = 1<<27, IFF_FAILOVER_SLAVE = 1<<28, IFF_L3MDEV_RX_HANDLER = 1<<29, IFF_LIVE_RENAME_OK = 1<<30, }; #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN #define IFF_EBRIDGE IFF_EBRIDGE #define IFF_BONDING IFF_BONDING #define IFF_ISATAP IFF_ISATAP #define IFF_WAN_HDLC IFF_WAN_HDLC #define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE #define IFF_DONT_BRIDGE IFF_DONT_BRIDGE #define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL #define IFF_MACVLAN_PORT IFF_MACVLAN_PORT #define IFF_BRIDGE_PORT IFF_BRIDGE_PORT #define IFF_OVS_DATAPATH IFF_OVS_DATAPATH #define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING #define IFF_UNICAST_FLT IFF_UNICAST_FLT #define IFF_TEAM_PORT IFF_TEAM_PORT #define IFF_SUPP_NOFCS IFF_SUPP_NOFCS #define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE #define IFF_MACVLAN IFF_MACVLAN #define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM #define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER #define IFF_NO_QUEUE IFF_NO_QUEUE #define IFF_OPENVSWITCH IFF_OPENVSWITCH #define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE #define IFF_TEAM IFF_TEAM #define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED #define IFF_MACSEC IFF_MACSEC #define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER #define IFF_FAILOVER IFF_FAILOVER #define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE #define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER #define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK /* Specifies the type of the struct net_device::ml_priv pointer */ enum netdev_ml_priv_type { ML_PRIV_NONE, ML_PRIV_CAN, }; /** * struct net_device - The DEVICE structure. * * Actually, this whole structure is a big mistake. It mixes I/O * data with strictly "high-level" data, and it has to know about * almost every data structure used in the INET module. * * @name: This is the first field of the "visible" part of this structure * (i.e. as seen by users in the "Space.c" file). It is the name * of the interface. * * @name_hlist: Device name hash chain, please keep it close to name[] * @ifalias: SNMP alias * @mem_end: Shared memory end * @mem_start: Shared memory start * @base_addr: Device I/O address * @irq: Device IRQ number * * @state: Generic network queuing layer state, see netdev_state_t * @dev_list: The global list of network devices * @napi_list: List entry used for polling NAPI devices * @unreg_list: List entry when we are unregistering the * device; see the function unregister_netdev * @close_list: List entry used when we are closing the device * @ptype_all: Device-specific packet handlers for all protocols * @ptype_specific: Device-specific, protocol-specific packet handlers * * @adj_list: Directly linked devices, like slaves for bonding * @features: Currently active device features * @hw_features: User-changeable features * * @wanted_features: User-requested features * @vlan_features: Mask of features inheritable by VLAN devices * * @hw_enc_features: Mask of features inherited by encapsulating devices * This field indicates what encapsulation * offloads the hardware is capable of doing, * and drivers will need to set them appropriately. * * @mpls_features: Mask of features inheritable by MPLS * * @ifindex: interface index * @group: The group the device belongs to * * @stats: Statistics struct, which was left as a legacy, use * rtnl_link_stats64 instead * * @rx_dropped: Dropped packets by core network, * do not use this in drivers * @tx_dropped: Dropped packets by core network, * do not use this in drivers * @rx_nohandler: nohandler dropped packets by core network on * inactive devices, do not use this in drivers * @carrier_up_count: Number of times the carrier has been up * @carrier_down_count: Number of times the carrier has been down * * @wireless_handlers: List of functions to handle Wireless Extensions, * instead of ioctl, * see <net/iw_handler.h> for details. * @wireless_data: Instance data managed by the core of wireless extensions * * @netdev_ops: Includes several pointers to callbacks, * if one wants to override the ndo_*() functions * @ethtool_ops: Management operations * @ndisc_ops: Includes callbacks for different IPv6 neighbour * discovery handling. Necessary for e.g. 6LoWPAN. * @header_ops: Includes callbacks for creating,parsing,caching,etc * of Layer 2 headers. * * @flags: Interface flags (a la BSD) * @priv_flags: Like 'flags' but invisible to userspace, * see if.h for the definitions * @gflags: Global flags ( kept as legacy ) * @padded: How much padding added by alloc_netdev() * @operstate: RFC2863 operstate * @link_mode: Mapping policy to operstate * @if_port: Selectable AUI, TP, ... * @dma: DMA channel * @mtu: Interface MTU value * @min_mtu: Interface Minimum MTU value * @max_mtu: Interface Maximum MTU value * @type: Interface hardware type * @hard_header_len: Maximum hardware header length. * @min_header_len: Minimum hardware header length * * @needed_headroom: Extra headroom the hardware may need, but not in all * cases can this be guaranteed * @needed_tailroom: Extra tailroom the hardware may need, but not in all * cases can this be guaranteed. Some cases also use * LL_MAX_HEADER instead to allocate the skb * * interface address info: * * @perm_addr: Permanent hw address * @addr_assign_type: Hw address assignment type * @addr_len: Hardware address length * @upper_level: Maximum depth level of upper devices. * @lower_level: Maximum depth level of lower devices. * @neigh_priv_len: Used in neigh_alloc() * @dev_id: Used to differentiate devices that share * the same link layer address * @dev_port: Used to differentiate devices that share * the same function * @addr_list_lock: XXX: need comments on this one * @uc_promisc: Counter that indicates promiscuous mode * has been enabled due to the need to listen to * additional unicast addresses in a device that * does not implement ndo_set_rx_mode() * @uc: unicast mac addresses * @mc: multicast mac addresses * @dev_addrs: list of device hw addresses * @queues_kset: Group of all Kobjects in the Tx and RX queues * @promiscuity: Number of times the NIC is told to work in * promiscuous mode; if it becomes 0 the NIC will * exit promiscuous mode * @allmulti: Counter, enables or disables allmulticast mode * * @vlan_info: VLAN info * @dsa_ptr: dsa specific data * @tipc_ptr: TIPC specific data * @atalk_ptr: AppleTalk link * @ip_ptr: IPv4 specific data * @ip6_ptr: IPv6 specific data * @ax25_ptr: AX.25 specific data * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering * * @dev_addr: Hw address (before bcast, * because most packets are unicast) * * @_rx: Array of RX queues * @num_rx_queues: Number of RX queues * allocated at register_netdev() time * @real_num_rx_queues: Number of RX queues currently active in device * * @rx_handler: handler for received packets * @rx_handler_data: XXX: need comments on this one * @miniq_ingress: ingress/clsact qdisc specific data for * ingress processing * @ingress_queue: XXX: need comments on this one * @broadcast: hw bcast address * * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts, * indexed by RX queue number. Assigned by driver. * This must only be set if the ndo_rx_flow_steer * operation is defined * @index_hlist: Device index hash chain * * @_tx: Array of TX queues * @num_tx_queues: Number of TX queues allocated at alloc_netdev_mq() time * @real_num_tx_queues: Number of TX queues currently active in device * @qdisc: Root qdisc from userspace point of view * @tx_queue_len: Max frames per queue allowed * @tx_global_lock: XXX: need comments on this one * * @xps_maps: XXX: need comments on this one * @miniq_egress: clsact qdisc specific data for * egress processing * @watchdog_timeo: Represents the timeout that is used by * the watchdog (see dev_watchdog()) * @watchdog_timer: List of timers * * @pcpu_refcnt: Number of references to this device * @todo_list: Delayed register/unregister * @link_watch_list: XXX: need comments on this one * * @reg_state: Register/unregister state machine * @dismantle: Device is going to be freed * @rtnl_link_state: This enum represents the phases of creating * a new link * * @needs_free_netdev: Should unregister perform free_netdev? * @priv_destructor: Called from unregister * @npinfo: XXX: need comments on this one * @nd_net: Network namespace this network device is inside * * @ml_priv: Mid-layer private * @ml_priv_type: Mid-layer private type * @lstats: Loopback statistics * @tstats: Tunnel statistics * @dstats: Dummy statistics * @vstats: Virtual ethernet statistics * * @garp_port: GARP * @mrp_port: MRP * * @dev: Class/net/name entry * @sysfs_groups: Space for optional device, statistics and wireless * sysfs groups * * @sysfs_rx_queue_group: Space for optional per-rx queue attributes * @rtnl_link_ops: Rtnl_link_ops * * @gso_max_size: Maximum size of generic segmentation offload * @gso_max_segs: Maximum number of segments that can be passed to the * NIC for GSO * * @dcbnl_ops: Data Center Bridging netlink ops * @num_tc: Number of traffic classes in the net device * @tc_to_txq: XXX: need comments on this one * @prio_tc_map: XXX: need comments on this one * * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp * * @priomap: XXX: need comments on this one * @phydev: Physical device may attach itself * for hardware timestamping * @sfp_bus: attached &struct sfp_bus structure. * @qdisc_tx_busylock_key: lockdep class annotating Qdisc->busylock * spinlock * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount * @qdisc_xmit_lock_key: lockdep class annotating * netdev_queue->_xmit_lock spinlock * @addr_list_lock_key: lockdep class annotating * net_device->addr_list_lock spinlock * * @proto_down: protocol port state information can be sent to the * switch driver and used to set the phys state of the * switch port. * * @wol_enabled: Wake-on-LAN is enabled * * FIXME: cleanup struct net_device such that network protocol info * moves out. */ struct net_device { char name[IFNAMSIZ]; struct hlist_node name_hlist; struct dev_ifalias __rcu *ifalias; /* * I/O specific fields * FIXME: Merge these and struct ifmap into one */ unsigned long mem_end; unsigned long mem_start; unsigned long base_addr; int irq; /* * Some hardware also needs these fields (state,dev_list, * napi_list,unreg_list,close_list) but they are not * part of the usual set specified in Space.c. */ unsigned long state; struct list_head dev_list; struct list_head napi_list; struct list_head unreg_list; struct list_head close_list; struct list_head ptype_all; struct list_head ptype_specific; struct { struct list_head upper; struct list_head lower; } adj_list; netdev_features_t features; netdev_features_t hw_features; netdev_features_t wanted_features; netdev_features_t vlan_features; netdev_features_t hw_enc_features; netdev_features_t mpls_features; netdev_features_t gso_partial_features; int ifindex; int group; struct net_device_stats stats; atomic_long_t rx_dropped; atomic_long_t tx_dropped; atomic_long_t rx_nohandler; /* Stats to monitor link on/off, flapping */ atomic_t carrier_up_count; atomic_t carrier_down_count; #ifdef CONFIG_WIRELESS_EXT const struct iw_handler_def *wireless_handlers; struct iw_public_data *wireless_data; #endif const struct net_device_ops *netdev_ops; const struct ethtool_ops *ethtool_ops; #ifdef CONFIG_NET_L3_MASTER_DEV const struct l3mdev_ops *l3mdev_ops; #endif #if IS_ENABLED(CONFIG_IPV6) const struct ndisc_ops *ndisc_ops; #endif #ifdef CONFIG_XFRM_OFFLOAD const struct xfrmdev_ops *xfrmdev_ops; #endif #if IS_ENABLED(CONFIG_TLS_DEVICE) const struct tlsdev_ops *tlsdev_ops; #endif const struct header_ops *header_ops; unsigned int flags; unsigned int priv_flags; unsigned short gflags; unsigned short padded; unsigned char operstate; unsigned char link_mode; unsigned char if_port; unsigned char dma; /* Note : dev->mtu is often read without holding a lock. * Writers usually hold RTNL. * It is recommended to use READ_ONCE() to annotate the reads, * and to use WRITE_ONCE() to annotate the writes. */ unsigned int mtu; unsigned int min_mtu; unsigned int max_mtu; unsigned short type; unsigned short hard_header_len; unsigned char min_header_len; unsigned short needed_headroom; unsigned short needed_tailroom; /* Interface address info. */ unsigned char perm_addr[MAX_ADDR_LEN]; unsigned char addr_assign_type; unsigned char addr_len; unsigned char upper_level; unsigned char lower_level; unsigned short neigh_priv_len; unsigned short dev_id; unsigned short dev_port; spinlock_t addr_list_lock; unsigned char name_assign_type; bool uc_promisc; struct netdev_hw_addr_list uc; struct netdev_hw_addr_list mc; struct netdev_hw_addr_list dev_addrs; #ifdef CONFIG_SYSFS struct kset *queues_kset; #endif unsigned int promiscuity; unsigned int allmulti; /* Protocol-specific pointers */ #if IS_ENABLED(CONFIG_VLAN_8021Q) struct vlan_info __rcu *vlan_info; #endif #if IS_ENABLED(CONFIG_NET_DSA) struct dsa_port *dsa_ptr; #endif #if IS_ENABLED(CONFIG_TIPC) struct tipc_bearer __rcu *tipc_ptr; #endif #if IS_ENABLED(CONFIG_IRDA) || IS_ENABLED(CONFIG_ATALK) void *atalk_ptr; #endif struct in_device __rcu *ip_ptr; struct inet6_dev __rcu *ip6_ptr; #if IS_ENABLED(CONFIG_AX25) void *ax25_ptr; #endif struct wireless_dev *ieee80211_ptr; struct wpan_dev *ieee802154_ptr; #if IS_ENABLED(CONFIG_MPLS_ROUTING) struct mpls_dev __rcu *mpls_ptr; #endif /* * Cache lines mostly used on receive path (including eth_type_trans()) */ /* Interface address info used in eth_type_trans() */ unsigned char *dev_addr; struct netdev_rx_queue *_rx; unsigned int num_rx_queues; unsigned int real_num_rx_queues; struct bpf_prog __rcu *xdp_prog; unsigned long gro_flush_timeout; rx_handler_func_t __rcu *rx_handler; void __rcu *rx_handler_data; #ifdef CONFIG_NET_CLS_ACT struct mini_Qdisc __rcu *miniq_ingress; #endif struct netdev_queue __rcu *ingress_queue; #ifdef CONFIG_NETFILTER_INGRESS struct nf_hook_entries __rcu *nf_hooks_ingress; #endif unsigned char broadcast[MAX_ADDR_LEN]; #ifdef CONFIG_RFS_ACCEL struct cpu_rmap *rx_cpu_rmap; #endif struct hlist_node index_hlist; /* * Cache lines mostly used on transmit path */ struct netdev_queue *_tx ____cacheline_aligned_in_smp; unsigned int num_tx_queues; unsigned int real_num_tx_queues; struct Qdisc __rcu *qdisc; #ifdef CONFIG_NET_SCHED DECLARE_HASHTABLE (qdisc_hash, 4); #endif unsigned int tx_queue_len; spinlock_t tx_global_lock; int watchdog_timeo; #ifdef CONFIG_XPS struct xps_dev_maps __rcu *xps_cpus_map; struct xps_dev_maps __rcu *xps_rxqs_map; #endif #ifdef CONFIG_NET_CLS_ACT struct mini_Qdisc __rcu *miniq_egress; #endif /* These may be needed for future network-power-down code. */ struct timer_list watchdog_timer; int __percpu *pcpu_refcnt; struct list_head todo_list; struct list_head link_watch_list; enum { NETREG_UNINITIALIZED=0, NETREG_REGISTERED, /* completed register_netdevice */ NETREG_UNREGISTERING, /* called unregister_netdevice */ NETREG_UNREGISTERED, /* completed unregister todo */ NETREG_RELEASED, /* called free_netdev */ NETREG_DUMMY, /* dummy device for NAPI poll */ } reg_state:8; bool dismantle; enum { RTNL_LINK_INITIALIZED, RTNL_LINK_INITIALIZING, } rtnl_link_state:16; bool needs_free_netdev; void (*priv_destructor)(struct net_device *dev); #ifdef CONFIG_NETPOLL struct netpoll_info __rcu *npinfo; #endif possible_net_t nd_net; /* mid-layer private */ void *ml_priv; enum netdev_ml_priv_type ml_priv_type; union { struct pcpu_lstats __percpu *lstats; struct pcpu_sw_netstats __percpu *tstats; struct pcpu_dstats __percpu *dstats; }; #if IS_ENABLED(CONFIG_GARP) struct garp_port __rcu *garp_port; #endif #if IS_ENABLED(CONFIG_MRP) struct mrp_port __rcu *mrp_port; #endif struct device dev; const struct attribute_group *sysfs_groups[4]; const struct attribute_group *sysfs_rx_queue_group; const struct rtnl_link_ops *rtnl_link_ops; /* for setting kernel sock attribute on TCP connection setup */ #define GSO_MAX_SIZE 65536 unsigned int gso_max_size; #define GSO_MAX_SEGS 65535 u16 gso_max_segs; #ifdef CONFIG_DCB const struct dcbnl_rtnl_ops *dcbnl_ops; #endif s16 num_tc; struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; u8 prio_tc_map[TC_BITMASK + 1]; #if IS_ENABLED(CONFIG_FCOE) unsigned int fcoe_ddp_xid; #endif #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) struct netprio_map __rcu *priomap; #endif struct phy_device *phydev; struct sfp_bus *sfp_bus; struct lock_class_key qdisc_tx_busylock_key; struct lock_class_key qdisc_running_key; struct lock_class_key qdisc_xmit_lock_key; struct lock_class_key addr_list_lock_key; bool proto_down; unsigned wol_enabled:1; }; #define to_net_dev(d) container_of(d, struct net_device, dev) static inline bool netif_elide_gro(const struct net_device *dev) { if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog) return true; return false; } #define NETDEV_ALIGN 32 static inline int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio) { return dev->prio_tc_map[prio & TC_BITMASK]; } static inline int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc) { if (tc >= dev->num_tc) return -EINVAL; dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK; return 0; } int netdev_txq_to_tc(struct net_device *dev, unsigned int txq); void netdev_reset_tc(struct net_device *dev); int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset); int netdev_set_num_tc(struct net_device *dev, u8 num_tc); static inline int netdev_get_num_tc(struct net_device *dev) { return dev->num_tc; } void netdev_unbind_sb_channel(struct net_device *dev, struct net_device *sb_dev); int netdev_bind_sb_channel_queue(struct net_device *dev, struct net_device *sb_dev, u8 tc, u16 count, u16 offset); int netdev_set_sb_channel(struct net_device *dev, u16 channel); static inline int netdev_get_sb_channel(struct net_device *dev) { return max_t(int, -dev->num_tc, 0); } static inline struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, unsigned int index) { return &dev->_tx[index]; } static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev, const struct sk_buff *skb) { return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); } static inline void netdev_for_each_tx_queue(struct net_device *dev, void (*f)(struct net_device *, struct netdev_queue *, void *), void *arg) { unsigned int i; for (i = 0; i < dev->num_tx_queues; i++) f(dev, &dev->_tx[i], arg); } u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev); struct netdev_queue *netdev_core_pick_tx(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev); /* returns the headroom that the master device needs to take in account * when forwarding to this dev */ static inline unsigned netdev_get_fwd_headroom(struct net_device *dev) { return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom; } static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr) { if (dev->netdev_ops->ndo_set_rx_headroom) dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr); } /* set the device rx headroom to the dev's default */ static inline void netdev_reset_rx_headroom(struct net_device *dev) { netdev_set_rx_headroom(dev, -1); } static inline void *netdev_get_ml_priv(struct net_device *dev, enum netdev_ml_priv_type type) { if (dev->ml_priv_type != type) return NULL; return dev->ml_priv; } static inline void netdev_set_ml_priv(struct net_device *dev, void *ml_priv, enum netdev_ml_priv_type type) { WARN(dev->ml_priv_type && dev->ml_priv_type != type, "Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n", dev->ml_priv_type, type); WARN(!dev->ml_priv_type && dev->ml_priv, "Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n"); dev->ml_priv = ml_priv; dev->ml_priv_type = type; } /* * Net namespace inlines */ static inline struct net *dev_net(const struct net_device *dev) { return read_pnet(&dev->nd_net); } static inline void dev_net_set(struct net_device *dev, struct net *net) { write_pnet(&dev->nd_net, net); } /** * netdev_priv - access network device private data * @dev: network device * * Get network device private data */ static inline void *netdev_priv(const struct net_device *dev) { return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN); } /* Set the sysfs physical device reference for the network logical device * if set prior to registration will cause a symlink during initialization. */ #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev)) /* Set the sysfs device type for the network logical device to allow * fine-grained identification of different network device types. For * example Ethernet, Wireless LAN, Bluetooth, WiMAX etc. */ #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype)) /* Default NAPI poll() weight * Device drivers are strongly advised to not use bigger value */ #define NAPI_POLL_WEIGHT 64 /** * netif_napi_add - initialize a NAPI context * @dev: network device * @napi: NAPI context * @poll: polling function * @weight: default weight * * netif_napi_add() must be used to initialize a NAPI context prior to calling * *any* of the other NAPI-related functions. */ void netif_napi_add(struct net_device *dev, struct napi_struct *napi, int (*poll)(struct napi_struct *, int), int weight); /** * netif_tx_napi_add - initialize a NAPI context * @dev: network device * @napi: NAPI context * @poll: polling function * @weight: default weight * * This variant of netif_napi_add() should be used from drivers using NAPI * to exclusively poll a TX queue. * This will avoid we add it into napi_hash[], thus polluting this hash table. */ static inline void netif_tx_napi_add(struct net_device *dev, struct napi_struct *napi, int (*poll)(struct napi_struct *, int), int weight) { set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state); netif_napi_add(dev, napi, poll, weight); } /** * netif_napi_del - remove a NAPI context * @napi: NAPI context * * netif_napi_del() removes a NAPI context from the network device NAPI list */ void netif_napi_del(struct napi_struct *napi); struct napi_gro_cb { /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */ void *frag0; /* Length of frag0. */ unsigned int frag0_len; /* This indicates where we are processing relative to skb->data. */ int data_offset; /* This is non-zero if the packet cannot be merged with the new skb. */ u16 flush; /* Save the IP ID here and check when we get to the transport layer */ u16 flush_id; /* Number of segments aggregated. */ u16 count; /* Start offset for remote checksum offload */ u16 gro_remcsum_start; /* jiffies when first packet was created/queued */ unsigned long age; /* Used in ipv6_gro_receive() and foo-over-udp */ u16 proto; /* This is non-zero if the packet may be of the same flow. */ u8 same_flow:1; /* Used in tunnel GRO receive */ u8 encap_mark:1; /* GRO checksum is valid */ u8 csum_valid:1; /* Number of checksums via CHECKSUM_UNNECESSARY */ u8 csum_cnt:3; /* Free the skb? */ u8 free:2; #define NAPI_GRO_FREE 1 #define NAPI_GRO_FREE_STOLEN_HEAD 2 /* Used in foo-over-udp, set in udp[46]_gro_receive */ u8 is_ipv6:1; /* Used in GRE, set in fou/gue_gro_receive */ u8 is_fou:1; /* Used to determine if flush_id can be ignored */ u8 is_atomic:1; /* Number of gro_receive callbacks this packet already went through */ u8 recursion_counter:4; /* 1 bit hole */ /* used to support CHECKSUM_COMPLETE for tunneling protocols */ __wsum csum; /* used in skb_gro_receive() slow path */ struct sk_buff *last; }; #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb) #define GRO_RECURSION_LIMIT 15 static inline int gro_recursion_inc_test(struct sk_buff *skb) { return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT; } typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *); static inline struct sk_buff *call_gro_receive(gro_receive_t cb, struct list_head *head, struct sk_buff *skb) { if (unlikely(gro_recursion_inc_test(skb))) { NAPI_GRO_CB(skb)->flush |= 1; return NULL; } return cb(head, skb); } typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *, struct sk_buff *); static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb, struct sock *sk, struct list_head *head, struct sk_buff *skb) { if (unlikely(gro_recursion_inc_test(skb))) { NAPI_GRO_CB(skb)->flush |= 1; return NULL; } return cb(sk, head, skb); } struct packet_type { __be16 type; /* This is really htons(ether_type). */ bool ignore_outgoing; struct net_device *dev; /* NULL is wildcarded here */ int (*func) (struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); void (*list_func) (struct list_head *, struct packet_type *, struct net_device *); bool (*id_match)(struct packet_type *ptype, struct sock *sk); struct net *af_packet_net; void *af_packet_priv; struct list_head list; }; struct offload_callbacks { struct sk_buff *(*gso_segment)(struct sk_buff *skb, netdev_features_t features); struct sk_buff *(*gro_receive)(struct list_head *head, struct sk_buff *skb); int (*gro_complete)(struct sk_buff *skb, int nhoff); }; struct packet_offload { __be16 type; /* This is really htons(ether_type). */ u16 priority; struct offload_callbacks callbacks; struct list_head list; }; /* often modified stats are per-CPU, other are shared (netdev->stats) */ struct pcpu_sw_netstats { u64 rx_packets; u64 rx_bytes; u64 tx_packets; u64 tx_bytes; struct u64_stats_sync syncp; } __aligned(4 * sizeof(u64)); struct pcpu_lstats { u64 packets; u64 bytes; struct u64_stats_sync syncp; } __aligned(2 * sizeof(u64)); #define __netdev_alloc_pcpu_stats(type, gfp) \ ({ \ typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\ if (pcpu_stats) { \ int __cpu; \ for_each_possible_cpu(__cpu) { \ typeof(type) *stat; \ stat = per_cpu_ptr(pcpu_stats, __cpu); \ u64_stats_init(&stat->syncp); \ } \ } \ pcpu_stats; \ }) #define netdev_alloc_pcpu_stats(type) \ __netdev_alloc_pcpu_stats(type, GFP_KERNEL) enum netdev_lag_tx_type { NETDEV_LAG_TX_TYPE_UNKNOWN, NETDEV_LAG_TX_TYPE_RANDOM, NETDEV_LAG_TX_TYPE_BROADCAST, NETDEV_LAG_TX_TYPE_ROUNDROBIN, NETDEV_LAG_TX_TYPE_ACTIVEBACKUP, NETDEV_LAG_TX_TYPE_HASH, }; enum netdev_lag_hash { NETDEV_LAG_HASH_NONE, NETDEV_LAG_HASH_L2, NETDEV_LAG_HASH_L34, NETDEV_LAG_HASH_L23, NETDEV_LAG_HASH_E23, NETDEV_LAG_HASH_E34, NETDEV_LAG_HASH_UNKNOWN, }; struct netdev_lag_upper_info { enum netdev_lag_tx_type tx_type; enum netdev_lag_hash hash_type; }; struct netdev_lag_lower_state_info { u8 link_up : 1, tx_enabled : 1; }; #include <linux/notifier.h> /* netdevice notifier chain. Please remember to update netdev_cmd_to_name() * and the rtnetlink notification exclusion list in rtnetlink_event() when * adding new types. */ enum netdev_cmd { NETDEV_UP = 1, /* For now you can't veto a device up/down */ NETDEV_DOWN, NETDEV_REBOOT, /* Tell a protocol stack a network interface detected a hardware crash and restarted - we can use this eg to kick tcp sessions once done */ NETDEV_CHANGE, /* Notify device state change */ NETDEV_REGISTER, NETDEV_UNREGISTER, NETDEV_CHANGEMTU, /* notify after mtu change happened */ NETDEV_CHANGEADDR, /* notify after the address change */ NETDEV_PRE_CHANGEADDR, /* notify before the address change */ NETDEV_GOING_DOWN, NETDEV_CHANGENAME, NETDEV_FEAT_CHANGE, NETDEV_BONDING_FAILOVER, NETDEV_PRE_UP, NETDEV_PRE_TYPE_CHANGE, NETDEV_POST_TYPE_CHANGE, NETDEV_POST_INIT, NETDEV_RELEASE, NETDEV_NOTIFY_PEERS, NETDEV_JOIN, NETDEV_CHANGEUPPER, NETDEV_RESEND_IGMP, NETDEV_PRECHANGEMTU, /* notify before mtu change happened */ NETDEV_CHANGEINFODATA, NETDEV_BONDING_INFO, NETDEV_PRECHANGEUPPER, NETDEV_CHANGELOWERSTATE, NETDEV_UDP_TUNNEL_PUSH_INFO, NETDEV_UDP_TUNNEL_DROP_INFO, NETDEV_CHANGE_TX_QUEUE_LEN, NETDEV_CVLAN_FILTER_PUSH_INFO, NETDEV_CVLAN_FILTER_DROP_INFO, NETDEV_SVLAN_FILTER_PUSH_INFO, NETDEV_SVLAN_FILTER_DROP_INFO, }; const char *netdev_cmd_to_name(enum netdev_cmd cmd); int register_netdevice_notifier(struct notifier_block *nb); int unregister_netdevice_notifier(struct notifier_block *nb); struct netdev_notifier_info { struct net_device *dev; struct netlink_ext_ack *extack; }; struct netdev_notifier_info_ext { struct netdev_notifier_info info; /* must be first */ union { u32 mtu; } ext; }; struct netdev_notifier_change_info { struct netdev_notifier_info info; /* must be first */ unsigned int flags_changed; }; struct netdev_notifier_changeupper_info { struct netdev_notifier_info info; /* must be first */ struct net_device *upper_dev; /* new upper dev */ bool master; /* is upper dev master */ bool linking; /* is the notification for link or unlink */ void *upper_info; /* upper dev info */ }; struct netdev_notifier_changelowerstate_info { struct netdev_notifier_info info; /* must be first */ void *lower_state_info; /* is lower dev state */ }; struct netdev_notifier_pre_changeaddr_info { struct netdev_notifier_info info; /* must be first */ const unsigned char *dev_addr; }; static inline void netdev_notifier_info_init(struct netdev_notifier_info *info, struct net_device *dev) { info->dev = dev; info->extack = NULL; } static inline struct net_device * netdev_notifier_info_to_dev(const struct netdev_notifier_info *info) { return info->dev; } static inline struct netlink_ext_ack * netdev_notifier_info_to_extack(const struct netdev_notifier_info *info) { return info->extack; } int call_netdevice_notifiers(unsigned long val, struct net_device *dev); extern rwlock_t dev_base_lock; /* Device list lock */ #define for_each_netdev(net, d) \ list_for_each_entry(d, &(net)->dev_base_head, dev_list) #define for_each_netdev_reverse(net, d) \ list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list) #define for_each_netdev_rcu(net, d) \ list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list) #define for_each_netdev_safe(net, d, n) \ list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list) #define for_each_netdev_continue(net, d) \ list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list) #define for_each_netdev_continue_rcu(net, d) \ list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) #define for_each_netdev_in_bond_rcu(bond, slave) \ for_each_netdev_rcu(&init_net, slave) \ if (netdev_master_upper_dev_get_rcu(slave) == (bond)) #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) static inline struct net_device *next_net_device(struct net_device *dev) { struct list_head *lh; struct net *net; net = dev_net(dev); lh = dev->dev_list.next; return lh == &net->dev_base_head ? NULL : net_device_entry(lh); } static inline struct net_device *next_net_device_rcu(struct net_device *dev) { struct list_head *lh; struct net *net; net = dev_net(dev); lh = rcu_dereference(list_next_rcu(&dev->dev_list)); return lh == &net->dev_base_head ? NULL : net_device_entry(lh); } static inline struct net_device *first_net_device(struct net *net) { return list_empty(&net->dev_base_head) ? NULL : net_device_entry(net->dev_base_head.next); } static inline struct net_device *first_net_device_rcu(struct net *net) { struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head)); return lh == &net->dev_base_head ? NULL : net_device_entry(lh); } int netdev_boot_setup_check(struct net_device *dev); unsigned long netdev_boot_base(const char *prefix, int unit); struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, const char *hwaddr); struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type); void dev_add_pack(struct packet_type *pt); void dev_remove_pack(struct packet_type *pt); void __dev_remove_pack(struct packet_type *pt); void dev_add_offload(struct packet_offload *po); void dev_remove_offload(struct packet_offload *po); int dev_get_iflink(const struct net_device *dev); int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb); struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags, unsigned short mask); struct net_device *dev_get_by_name(struct net *net, const char *name); struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); struct net_device *__dev_get_by_name(struct net *net, const char *name); int dev_alloc_name(struct net_device *dev, const char *name); int dev_open(struct net_device *dev, struct netlink_ext_ack *extack); void dev_close(struct net_device *dev); void dev_close_many(struct list_head *head, bool unlink); void dev_disable_lro(struct net_device *dev); int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev); u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev); int dev_queue_xmit(struct sk_buff *skb); int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev); int dev_direct_xmit(struct sk_buff *skb, u16 queue_id); int register_netdevice(struct net_device *dev); void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); void unregister_netdevice_many(struct list_head *head); static inline void unregister_netdevice(struct net_device *dev) { unregister_netdevice_queue(dev, NULL); } int netdev_refcnt_read(const struct net_device *dev); void free_netdev(struct net_device *dev); void netdev_freemem(struct net_device *dev); void synchronize_net(void); int init_dummy_netdev(struct net_device *dev); struct net_device *dev_get_by_index(struct net *net, int ifindex); struct net_device *__dev_get_by_index(struct net *net, int ifindex); struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); struct net_device *dev_get_by_napi_id(unsigned int napi_id); int netdev_get_name(struct net *net, char *name, int ifindex); int dev_restart(struct net_device *dev); int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb); static inline unsigned int skb_gro_offset(const struct sk_buff *skb) { return NAPI_GRO_CB(skb)->data_offset; } static inline unsigned int skb_gro_len(const struct sk_buff *skb) { return skb->len - NAPI_GRO_CB(skb)->data_offset; } static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len) { NAPI_GRO_CB(skb)->data_offset += len; } static inline void *skb_gro_header_fast(struct sk_buff *skb, unsigned int offset) { return NAPI_GRO_CB(skb)->frag0 + offset; } static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen) { return NAPI_GRO_CB(skb)->frag0_len < hlen; } static inline void skb_gro_frag0_invalidate(struct sk_buff *skb) { NAPI_GRO_CB(skb)->frag0 = NULL; NAPI_GRO_CB(skb)->frag0_len = 0; } static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen, unsigned int offset) { if (!pskb_may_pull(skb, hlen)) return NULL; skb_gro_frag0_invalidate(skb); return skb->data + offset; } static inline void *skb_gro_network_header(struct sk_buff *skb) { return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) + skb_network_offset(skb); } static inline void skb_gro_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len) { if (NAPI_GRO_CB(skb)->csum_valid) NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum, csum_partial(start, len, 0)); } /* GRO checksum functions. These are logical equivalents of the normal * checksum functions (in skbuff.h) except that they operate on the GRO * offsets and fields in sk_buff. */ __sum16 __skb_gro_checksum_complete(struct sk_buff *skb); static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb) { return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb)); } static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb, bool zero_okay, __sum16 check) { return ((skb->ip_summed != CHECKSUM_PARTIAL || skb_checksum_start_offset(skb) < skb_gro_offset(skb)) && !skb_at_gro_remcsum_start(skb) && NAPI_GRO_CB(skb)->csum_cnt == 0 && (!zero_okay || check)); } static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb, __wsum psum) { if (NAPI_GRO_CB(skb)->csum_valid && !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum))) return 0; NAPI_GRO_CB(skb)->csum = psum; return __skb_gro_checksum_complete(skb); } static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb) { if (NAPI_GRO_CB(skb)->csum_cnt > 0) { /* Consume a checksum from CHECKSUM_UNNECESSARY */ NAPI_GRO_CB(skb)->csum_cnt--; } else { /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we * verified a new top level checksum or an encapsulated one * during GRO. This saves work if we fallback to normal path. */ __skb_incr_checksum_unnecessary(skb); } } #define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \ compute_pseudo) \ ({ \ __sum16 __ret = 0; \ if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \ __ret = __skb_gro_checksum_validate_complete(skb, \ compute_pseudo(skb, proto)); \ if (!__ret) \ skb_gro_incr_csum_unnecessary(skb); \ __ret; \ }) #define skb_gro_checksum_validate(skb, proto, compute_pseudo) \ __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo) #define skb_gro_checksum_validate_zero_check(skb, proto, check, \ compute_pseudo) \ __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo) #define skb_gro_checksum_simple_validate(skb) \ __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo) static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb) { return (NAPI_GRO_CB(skb)->csum_cnt == 0 && !NAPI_GRO_CB(skb)->csum_valid); } static inline void __skb_gro_checksum_convert(struct sk_buff *skb, __sum16 check, __wsum pseudo) { NAPI_GRO_CB(skb)->csum = ~pseudo; NAPI_GRO_CB(skb)->csum_valid = 1; } #define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo) \ do { \ if (__skb_gro_checksum_convert_check(skb)) \ __skb_gro_checksum_convert(skb, check, \ compute_pseudo(skb, proto)); \ } while (0) struct gro_remcsum { int offset; __wsum delta; }; static inline void skb_gro_remcsum_init(struct gro_remcsum *grc) { grc->offset = 0; grc->delta = 0; } static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr, unsigned int off, size_t hdrlen, int start, int offset, struct gro_remcsum *grc, bool nopartial) { __wsum delta; size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start); BUG_ON(!NAPI_GRO_CB(skb)->csum_valid); if (!nopartial) { NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start; return ptr; } ptr = skb_gro_header_fast(skb, off); if (skb_gro_header_hard(skb, off + plen)) { ptr = skb_gro_header_slow(skb, off + plen, off); if (!ptr) return NULL; } delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum, start, offset); /* Adjust skb->csum since we changed the packet */ NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta); grc->offset = off + hdrlen + offset; grc->delta = delta; return ptr; } static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb, struct gro_remcsum *grc) { void *ptr; size_t plen = grc->offset + sizeof(u16); if (!grc->delta) return; ptr = skb_gro_header_fast(skb, grc->offset); if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) { ptr = skb_gro_header_slow(skb, plen, grc->offset); if (!ptr) return; } remcsum_unadjust((__sum16 *)ptr, grc->delta); } #ifdef CONFIG_XFRM_OFFLOAD static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush) { if (PTR_ERR(pp) != -EINPROGRESS) NAPI_GRO_CB(skb)->flush |= flush; } static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb, struct sk_buff *pp, int flush, struct gro_remcsum *grc) { if (PTR_ERR(pp) != -EINPROGRESS) { NAPI_GRO_CB(skb)->flush |= flush; skb_gro_remcsum_cleanup(skb, grc); skb->remcsum_offload = 0; } } #else static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush) { NAPI_GRO_CB(skb)->flush |= flush; } static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb, struct sk_buff *pp, int flush, struct gro_remcsum *grc) { NAPI_GRO_CB(skb)->flush |= flush; skb_gro_remcsum_cleanup(skb, grc); skb->remcsum_offload = 0; } #endif static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned int len) { if (!dev->header_ops || !dev->header_ops->create) return 0; return dev->header_ops->create(skb, dev, type, daddr, saddr, len); } static inline int dev_parse_header(const struct sk_buff *skb, unsigned char *haddr) { const struct net_device *dev = skb->dev; if (!dev->header_ops || !dev->header_ops->parse) return 0; return dev->header_ops->parse(skb, haddr); } static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb) { const struct net_device *dev = skb->dev; if (!dev->header_ops || !dev->header_ops->parse_protocol) return 0; return dev->header_ops->parse_protocol(skb); } /* ll_header must have at least hard_header_len allocated */ static inline bool dev_validate_header(const struct net_device *dev, char *ll_header, int len) { if (likely(len >= dev->hard_header_len)) return true; if (len < dev->min_header_len) return false; if (capable(CAP_SYS_RAWIO)) { memset(ll_header + len, 0, dev->hard_header_len - len); return true; } if (dev->header_ops && dev->header_ops->validate) return dev->header_ops->validate(ll_header, len); return false; } typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len, int size); int register_gifconf(unsigned int family, gifconf_func_t *gifconf); static inline int unregister_gifconf(unsigned int family) { return register_gifconf(family, NULL); } #ifdef CONFIG_NET_FLOW_LIMIT #define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */ struct sd_flow_limit { u64 count; unsigned int num_buckets; unsigned int history_head; u16 history[FLOW_LIMIT_HISTORY]; u8 buckets[]; }; extern int netdev_flow_limit_table_len; #endif /* CONFIG_NET_FLOW_LIMIT */ /* * Incoming packets are placed on per-CPU queues */ struct softnet_data { struct list_head poll_list; struct sk_buff_head process_queue; /* stats */ unsigned int processed; unsigned int time_squeeze; unsigned int received_rps; #ifdef CONFIG_RPS struct softnet_data *rps_ipi_list; #endif #ifdef CONFIG_NET_FLOW_LIMIT struct sd_flow_limit __rcu *flow_limit; #endif struct Qdisc *output_queue; struct Qdisc **output_queue_tailp; struct sk_buff *completion_queue; #ifdef CONFIG_XFRM_OFFLOAD struct sk_buff_head xfrm_backlog; #endif /* written and read only by owning cpu: */ struct { u16 recursion; u8 more; } xmit; #ifdef CONFIG_RPS /* input_queue_head should be written by cpu owning this struct, * and only read by other cpus. Worth using a cache line. */ unsigned int input_queue_head ____cacheline_aligned_in_smp; /* Elements below can be accessed between CPUs for RPS/RFS */ call_single_data_t csd ____cacheline_aligned_in_smp; struct softnet_data *rps_ipi_next; unsigned int cpu; unsigned int input_queue_tail; #endif unsigned int dropped; struct sk_buff_head input_pkt_queue; struct napi_struct backlog; }; static inline void input_queue_head_incr(struct softnet_data *sd) { #ifdef CONFIG_RPS sd->input_queue_head++; #endif } static inline void input_queue_tail_incr_save(struct softnet_data *sd, unsigned int *qtail) { #ifdef CONFIG_RPS *qtail = ++sd->input_queue_tail; #endif } DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); static inline int dev_recursion_level(void) { return this_cpu_read(softnet_data.xmit.recursion); } #define XMIT_RECURSION_LIMIT 8 static inline bool dev_xmit_recursion(void) { return unlikely(__this_cpu_read(softnet_data.xmit.recursion) > XMIT_RECURSION_LIMIT); } static inline void dev_xmit_recursion_inc(void) { __this_cpu_inc(softnet_data.xmit.recursion); } static inline void dev_xmit_recursion_dec(void) { __this_cpu_dec(softnet_data.xmit.recursion); } void __netif_schedule(struct Qdisc *q); void netif_schedule_queue(struct netdev_queue *txq); static inline void netif_tx_schedule_all(struct net_device *dev) { unsigned int i; for (i = 0; i < dev->num_tx_queues; i++) netif_schedule_queue(netdev_get_tx_queue(dev, i)); } static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue) { clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); } /** * netif_start_queue - allow transmit * @dev: network device * * Allow upper layers to call the device hard_start_xmit routine. */ static inline void netif_start_queue(struct net_device *dev) { netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); } static inline void netif_tx_start_all_queues(struct net_device *dev) { unsigned int i; for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); netif_tx_start_queue(txq); } } void netif_tx_wake_queue(struct netdev_queue *dev_queue); /** * netif_wake_queue - restart transmit * @dev: network device * * Allow upper layers to call the device hard_start_xmit routine. * Used for flow control when transmit resources are available. */ static inline void netif_wake_queue(struct net_device *dev) { netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); } static inline void netif_tx_wake_all_queues(struct net_device *dev) { unsigned int i; for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); netif_tx_wake_queue(txq); } } static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) { set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); } /** * netif_stop_queue - stop transmitted packets * @dev: network device * * Stop upper layers calling the device hard_start_xmit routine. * Used for flow control when transmit resources are unavailable. */ static inline void netif_stop_queue(struct net_device *dev) { netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); } void netif_tx_stop_all_queues(struct net_device *dev); void netdev_update_lockdep_key(struct net_device *dev); static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue) { return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); } /** * netif_queue_stopped - test if transmit queue is flowblocked * @dev: network device * * Test if transmit queue on device is currently unable to send. */ static inline bool netif_queue_stopped(const struct net_device *dev) { return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); } static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue) { return dev_queue->state & QUEUE_STATE_ANY_XOFF; } static inline bool netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue) { return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN; } static inline bool netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue) { return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN; } /** * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write * @dev_queue: pointer to transmit queue * * BQL enabled drivers might use this helper in their ndo_start_xmit(), * to give appropriate hint to the CPU. */ static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue) { #ifdef CONFIG_BQL prefetchw(&dev_queue->dql.num_queued); #endif } /** * netdev_txq_bql_complete_prefetchw - prefetch bql data for write * @dev_queue: pointer to transmit queue * * BQL enabled drivers might use this helper in their TX completion path, * to give appropriate hint to the CPU. */ static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue) { #ifdef CONFIG_BQL prefetchw(&dev_queue->dql.limit); #endif } static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, unsigned int bytes) { #ifdef CONFIG_BQL dql_queued(&dev_queue->dql, bytes); if (likely(dql_avail(&dev_queue->dql) >= 0)) return; set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); /* * The XOFF flag must be set before checking the dql_avail below, * because in netdev_tx_completed_queue we update the dql_completed * before checking the XOFF flag. */ smp_mb(); /* check again in case another CPU has just made room avail */ if (unlikely(dql_avail(&dev_queue->dql) >= 0)) clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); #endif } /* Variant of netdev_tx_sent_queue() for drivers that are aware * that they should not test BQL status themselves. * We do want to change __QUEUE_STATE_STACK_XOFF only for the last * skb of a batch. * Returns true if the doorbell must be used to kick the NIC. */ static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue, unsigned int bytes, bool xmit_more) { if (xmit_more) { #ifdef CONFIG_BQL dql_queued(&dev_queue->dql, bytes); #endif return netif_tx_queue_stopped(dev_queue); } netdev_tx_sent_queue(dev_queue, bytes); return true; } /** * netdev_sent_queue - report the number of bytes queued to hardware * @dev: network device * @bytes: number of bytes queued to the hardware device queue * * Report the number of bytes queued for sending/completion to the network * device hardware queue. @bytes should be a good approximation and should * exactly match netdev_completed_queue() @bytes */ static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes) { netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes); } static inline bool __netdev_sent_queue(struct net_device *dev, unsigned int bytes, bool xmit_more) { return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes, xmit_more); } static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, unsigned int pkts, unsigned int bytes) { #ifdef CONFIG_BQL if (unlikely(!bytes)) return; dql_completed(&dev_queue->dql, bytes); /* * Without the memory barrier there is a small possiblity that * netdev_tx_sent_queue will miss the update and cause the queue to * be stopped forever */ smp_mb(); if (unlikely(dql_avail(&dev_queue->dql) < 0)) return; if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state)) netif_schedule_queue(dev_queue); #endif } /** * netdev_completed_queue - report bytes and packets completed by device * @dev: network device * @pkts: actual number of packets sent over the medium * @bytes: actual number of bytes sent over the medium * * Report the number of bytes and packets transmitted by the network device * hardware queue over the physical medium, @bytes must exactly match the * @bytes amount passed to netdev_sent_queue() */ static inline void netdev_completed_queue(struct net_device *dev, unsigned int pkts, unsigned int bytes) { netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes); } static inline void netdev_tx_reset_queue(struct netdev_queue *q) { #ifdef CONFIG_BQL clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state); dql_reset(&q->dql); #endif } /** * netdev_reset_queue - reset the packets and bytes count of a network device * @dev_queue: network device * * Reset the bytes and packet count of a network device and clear the * software flow control OFF bit for this network device */ static inline void netdev_reset_queue(struct net_device *dev_queue) { netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0)); } /** * netdev_cap_txqueue - check if selected tx queue exceeds device queues * @dev: network device * @queue_index: given tx queue index * * Returns 0 if given tx queue index >= number of device tx queues, * otherwise returns the originally passed tx queue index. */ static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index) { if (unlikely(queue_index >= dev->real_num_tx_queues)) { net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n", dev->name, queue_index, dev->real_num_tx_queues); return 0; } return queue_index; } /** * netif_running - test if up * @dev: network device * * Test if the device has been brought up. */ static inline bool netif_running(const struct net_device *dev) { return test_bit(__LINK_STATE_START, &dev->state); } /* * Routines to manage the subqueues on a device. We only need start, * stop, and a check if it's stopped. All other device management is * done at the overall netdevice level. * Also test the device if we're multiqueue. */ /** * netif_start_subqueue - allow sending packets on subqueue * @dev: network device * @queue_index: sub queue index * * Start individual transmit queue of a device with multiple transmit queues. */ static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) { struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); netif_tx_start_queue(txq); } /** * netif_stop_subqueue - stop sending packets on subqueue * @dev: network device * @queue_index: sub queue index * * Stop individual transmit queue of a device with multiple transmit queues. */ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) { struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); netif_tx_stop_queue(txq); } /** * netif_subqueue_stopped - test status of subqueue * @dev: network device * @queue_index: sub queue index * * Check individual transmit queue of a device with multiple transmit queues. */ static inline bool __netif_subqueue_stopped(const struct net_device *dev, u16 queue_index) { struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); return netif_tx_queue_stopped(txq); } static inline bool netif_subqueue_stopped(const struct net_device *dev, struct sk_buff *skb) { return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); } /** * netif_wake_subqueue - allow sending packets on subqueue * @dev: network device * @queue_index: sub queue index * * Resume individual transmit queue of a device with multiple transmit queues. */ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) { struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); netif_tx_wake_queue(txq); } #ifdef CONFIG_XPS int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, u16 index); int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, u16 index, bool is_rxqs_map); /** * netif_attr_test_mask - Test a CPU or Rx queue set in a mask * @j: CPU/Rx queue index * @mask: bitmask of all cpus/rx queues * @nr_bits: number of bits in the bitmask * * Test if a CPU or Rx queue index is set in a mask of all CPU/Rx queues. */ static inline bool netif_attr_test_mask(unsigned long j, const unsigned long *mask, unsigned int nr_bits) { cpu_max_bits_warn(j, nr_bits); return test_bit(j, mask); } /** * netif_attr_test_online - Test for online CPU/Rx queue * @j: CPU/Rx queue index * @online_mask: bitmask for CPUs/Rx queues that are online * @nr_bits: number of bits in the bitmask * * Returns true if a CPU/Rx queue is online. */ static inline bool netif_attr_test_online(unsigned long j, const unsigned long *online_mask, unsigned int nr_bits) { cpu_max_bits_warn(j, nr_bits); if (online_mask) return test_bit(j, online_mask); return (j < nr_bits); } /** * netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask * @n: CPU/Rx queue index * @srcp: the cpumask/Rx queue mask pointer * @nr_bits: number of bits in the bitmask * * Returns >= nr_bits if no further CPUs/Rx queues set. */ static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp, unsigned int nr_bits) { /* -1 is a legal arg here. */ if (n != -1) cpu_max_bits_warn(n, nr_bits); if (srcp) return find_next_bit(srcp, nr_bits, n + 1); return n + 1; } /** * netif_attrmask_next_and - get the next CPU/Rx queue in *src1p & *src2p * @n: CPU/Rx queue index * @src1p: the first CPUs/Rx queues mask pointer * @src2p: the second CPUs/Rx queues mask pointer * @nr_bits: number of bits in the bitmask * * Returns >= nr_bits if no further CPUs/Rx queues set in both. */ static inline int netif_attrmask_next_and(int n, const unsigned long *src1p, const unsigned long *src2p, unsigned int nr_bits) { /* -1 is a legal arg here. */ if (n != -1) cpu_max_bits_warn(n, nr_bits); if (src1p && src2p) return find_next_and_bit(src1p, src2p, nr_bits, n + 1); else if (src1p) return find_next_bit(src1p, nr_bits, n + 1); else if (src2p) return find_next_bit(src2p, nr_bits, n + 1); return n + 1; } #else static inline int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, u16 index) { return 0; } static inline int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, u16 index, bool is_rxqs_map) { return 0; } #endif /** * netif_is_multiqueue - test if device has multiple transmit queues * @dev: network device * * Check if device has multiple transmit queues */ static inline bool netif_is_multiqueue(const struct net_device *dev) { return dev->num_tx_queues > 1; } int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq); #ifdef CONFIG_SYSFS int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq); #else static inline int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxqs) { dev->real_num_rx_queues = rxqs; return 0; } #endif static inline struct netdev_rx_queue * __netif_get_rx_queue(struct net_device *dev, unsigned int rxq) { return dev->_rx + rxq; } #ifdef CONFIG_SYSFS static inline unsigned int get_netdev_rx_queue_index( struct netdev_rx_queue *queue) { struct net_device *dev = queue->dev; int index = queue - dev->_rx; BUG_ON(index >= dev->num_rx_queues); return index; } #endif #define DEFAULT_MAX_NUM_RSS_QUEUES (8) int netif_get_num_default_rss_queues(void); enum skb_free_reason { SKB_REASON_CONSUMED, SKB_REASON_DROPPED, }; void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason); void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason); /* * It is not allowed to call kfree_skb() or consume_skb() from hardware * interrupt context or with hardware interrupts being disabled. * (in_irq() || irqs_disabled()) * * We provide four helpers that can be used in following contexts : * * dev_kfree_skb_irq(skb) when caller drops a packet from irq context, * replacing kfree_skb(skb) * * dev_consume_skb_irq(skb) when caller consumes a packet from irq context. * Typically used in place of consume_skb(skb) in TX completion path * * dev_kfree_skb_any(skb) when caller doesn't know its current irq context, * replacing kfree_skb(skb) * * dev_consume_skb_any(skb) when caller doesn't know its current irq context, * and consumed a packet. Used in place of consume_skb(skb) */ static inline void dev_kfree_skb_irq(struct sk_buff *skb) { __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED); } static inline void dev_consume_skb_irq(struct sk_buff *skb) { __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED); } static inline void dev_kfree_skb_any(struct sk_buff *skb) { __dev_kfree_skb_any(skb, SKB_REASON_DROPPED); } static inline void dev_consume_skb_any(struct sk_buff *skb) { __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED); } void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog); int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb); int netif_rx(struct sk_buff *skb); int netif_rx_ni(struct sk_buff *skb); int netif_receive_skb(struct sk_buff *skb); int netif_receive_skb_core(struct sk_buff *skb); void netif_receive_skb_list(struct list_head *head); gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb); void napi_gro_flush(struct napi_struct *napi, bool flush_old); struct sk_buff *napi_get_frags(struct napi_struct *napi); gro_result_t napi_gro_frags(struct napi_struct *napi); struct packet_offload *gro_find_receive_by_type(__be16 type); struct packet_offload *gro_find_complete_by_type(__be16 type); static inline void napi_free_frags(struct napi_struct *napi) { kfree_skb(napi->skb); napi->skb = NULL; } bool netdev_is_rx_handler_busy(struct net_device *dev); int netdev_rx_handler_register(struct net_device *dev, rx_handler_func_t *rx_handler, void *rx_handler_data); void netdev_rx_handler_unregister(struct net_device *dev); bool dev_valid_name(const char *name); static inline bool is_socket_ioctl_cmd(unsigned int cmd) { return _IOC_TYPE(cmd) == SOCK_IOC_TYPE; } int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_copyout); int dev_ifconf(struct net *net, struct ifconf *, int); int dev_ethtool(struct net *net, struct ifreq *); unsigned int dev_get_flags(const struct net_device *); int __dev_change_flags(struct net_device *dev, unsigned int flags, struct netlink_ext_ack *extack); int dev_change_flags(struct net_device *dev, unsigned int flags, struct netlink_ext_ack *extack); void __dev_notify_flags(struct net_device *, unsigned int old_flags, unsigned int gchanges); int dev_change_name(struct net_device *, const char *); int dev_set_alias(struct net_device *, const char *, size_t); int dev_get_alias(const struct net_device *, char *, size_t); int dev_change_net_namespace(struct net_device *, struct net *, const char *); int __dev_set_mtu(struct net_device *, int); int dev_validate_mtu(struct net_device *dev, int mtu, struct netlink_ext_ack *extack); int dev_set_mtu_ext(struct net_device *dev, int mtu, struct netlink_ext_ack *extack); int dev_set_mtu(struct net_device *, int); int dev_change_tx_queue_len(struct net_device *, unsigned long); void dev_set_group(struct net_device *, int); int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr, struct netlink_ext_ack *extack); int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, struct netlink_ext_ack *extack); int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa, struct netlink_ext_ack *extack); int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name); int dev_change_carrier(struct net_device *, bool new_carrier); int dev_get_phys_port_id(struct net_device *dev, struct netdev_phys_item_id *ppid); int dev_get_phys_port_name(struct net_device *dev, char *name, size_t len); int dev_get_port_parent_id(struct net_device *dev, struct netdev_phys_item_id *ppid, bool recurse); bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b); int dev_change_proto_down(struct net_device *dev, bool proto_down); int dev_change_proto_down_generic(struct net_device *dev, bool proto_down); struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again); struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq, int *ret); typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf); int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, int fd, u32 flags); u32 __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op, enum bpf_netdev_command cmd); int xdp_umem_query(struct net_device *dev, u16 queue_id); int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb); static __always_inline int ____dev_forward_skb(struct net_device *dev, struct sk_buff *skb) { if (skb_orphan_frags(skb, GFP_ATOMIC) || unlikely(!is_skb_forwardable(dev, skb))) { atomic_long_inc(&dev->rx_dropped); kfree_skb(skb); return NET_RX_DROP; } skb_scrub_packet(skb, !net_eq(dev_net(dev), dev_net(skb->dev))); skb->priority = 0; return 0; } bool dev_nit_active(struct net_device *dev); void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); extern int netdev_budget; extern unsigned int netdev_budget_usecs; /* Called by rtnetlink.c:rtnl_unlock() */ void netdev_run_todo(void); /** * dev_put - release reference to device * @dev: network device * * Release reference to device to allow it to be freed. */ static inline void dev_put(struct net_device *dev) { if (dev) this_cpu_dec(*dev->pcpu_refcnt); } /** * dev_hold - get reference to device * @dev: network device * * Hold reference to device to keep it from being freed. */ static inline void dev_hold(struct net_device *dev) { if (dev) this_cpu_inc(*dev->pcpu_refcnt); } /* Carrier loss detection, dial on demand. The functions netif_carrier_on * and _off may be called from IRQ context, but it is caller * who is responsible for serialization of these calls. * * The name carrier is inappropriate, these functions should really be * called netif_lowerlayer_*() because they represent the state of any * kind of lower layer not just hardware media. */ void linkwatch_init_dev(struct net_device *dev); void linkwatch_fire_event(struct net_device *dev); void linkwatch_forget_dev(struct net_device *dev); /** * netif_carrier_ok - test if carrier present * @dev: network device * * Check if carrier is present on device */ static inline bool netif_carrier_ok(const struct net_device *dev) { return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); } unsigned long dev_trans_start(struct net_device *dev); void __netdev_watchdog_up(struct net_device *dev); void netif_carrier_on(struct net_device *dev); void netif_carrier_off(struct net_device *dev); /** * netif_dormant_on - mark device as dormant. * @dev: network device * * Mark device as dormant (as per RFC2863). * * The dormant state indicates that the relevant interface is not * actually in a condition to pass packets (i.e., it is not 'up') but is * in a "pending" state, waiting for some external event. For "on- * demand" interfaces, this new state identifies the situation where the * interface is waiting for events to place it in the up state. */ static inline void netif_dormant_on(struct net_device *dev) { if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) linkwatch_fire_event(dev); } /** * netif_dormant_off - set device as not dormant. * @dev: network device * * Device is not in dormant state. */ static inline void netif_dormant_off(struct net_device *dev) { if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) linkwatch_fire_event(dev); } /** * netif_dormant - test if device is dormant * @dev: network device * * Check if device is dormant. */ static inline bool netif_dormant(const struct net_device *dev) { return test_bit(__LINK_STATE_DORMANT, &dev->state); } /** * netif_oper_up - test if device is operational * @dev: network device * * Check if carrier is operational */ static inline bool netif_oper_up(const struct net_device *dev) { return (dev->operstate == IF_OPER_UP || dev->operstate == IF_OPER_UNKNOWN /* backward compat */); } /** * netif_device_present - is device available or removed * @dev: network device * * Check if device has not been removed from system. */ static inline bool netif_device_present(struct net_device *dev) { return test_bit(__LINK_STATE_PRESENT, &dev->state); } void netif_device_detach(struct net_device *dev); void netif_device_attach(struct net_device *dev); /* * Network interface message level settings */ enum { NETIF_MSG_DRV = 0x0001, NETIF_MSG_PROBE = 0x0002, NETIF_MSG_LINK = 0x0004, NETIF_MSG_TIMER = 0x0008, NETIF_MSG_IFDOWN = 0x0010, NETIF_MSG_IFUP = 0x0020, NETIF_MSG_RX_ERR = 0x0040, NETIF_MSG_TX_ERR = 0x0080, NETIF_MSG_TX_QUEUED = 0x0100, NETIF_MSG_INTR = 0x0200, NETIF_MSG_TX_DONE = 0x0400, NETIF_MSG_RX_STATUS = 0x0800, NETIF_MSG_PKTDATA = 0x1000, NETIF_MSG_HW = 0x2000, NETIF_MSG_WOL = 0x4000, }; #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) { /* use default */ if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) return default_msg_enable_bits; if (debug_value == 0) /* no output */ return 0; /* set low N bits */ return (1U << debug_value) - 1; } static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) { spin_lock(&txq->_xmit_lock); /* Pairs with READ_ONCE() in __dev_queue_xmit() */ WRITE_ONCE(txq->xmit_lock_owner, cpu); } static inline bool __netif_tx_acquire(struct netdev_queue *txq) { __acquire(&txq->_xmit_lock); return true; } static inline void __netif_tx_release(struct netdev_queue *txq) { __release(&txq->_xmit_lock); } static inline void __netif_tx_lock_bh(struct netdev_queue *txq) { spin_lock_bh(&txq->_xmit_lock); /* Pairs with READ_ONCE() in __dev_queue_xmit() */ WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); } static inline bool __netif_tx_trylock(struct netdev_queue *txq) { bool ok = spin_trylock(&txq->_xmit_lock); if (likely(ok)) { /* Pairs with READ_ONCE() in __dev_queue_xmit() */ WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); } return ok; } static inline void __netif_tx_unlock(struct netdev_queue *txq) { /* Pairs with READ_ONCE() in __dev_queue_xmit() */ WRITE_ONCE(txq->xmit_lock_owner, -1); spin_unlock(&txq->_xmit_lock); } static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) { /* Pairs with READ_ONCE() in __dev_queue_xmit() */ WRITE_ONCE(txq->xmit_lock_owner, -1); spin_unlock_bh(&txq->_xmit_lock); } static inline void txq_trans_update(struct netdev_queue *txq) { if (txq->xmit_lock_owner != -1) txq->trans_start = jiffies; } /* legacy drivers only, netdev_start_xmit() sets txq->trans_start */ static inline void netif_trans_update(struct net_device *dev) { struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); if (txq->trans_start != jiffies) txq->trans_start = jiffies; } /** * netif_tx_lock - grab network device transmit lock * @dev: network device * * Get network device transmit lock */ static inline void netif_tx_lock(struct net_device *dev) { unsigned int i; int cpu; spin_lock(&dev->tx_global_lock); cpu = smp_processor_id(); for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); /* We are the only thread of execution doing a * freeze, but we have to grab the _xmit_lock in * order to synchronize with threads which are in * the ->hard_start_xmit() handler and already * checked the frozen bit. */ __netif_tx_lock(txq, cpu); set_bit(__QUEUE_STATE_FROZEN, &txq->state); __netif_tx_unlock(txq); } } static inline void netif_tx_lock_bh(struct net_device *dev) { local_bh_disable(); netif_tx_lock(dev); } static inline void netif_tx_unlock(struct net_device *dev) { unsigned int i; for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); /* No need to grab the _xmit_lock here. If the * queue is not stopped for another reason, we * force a schedule. */ clear_bit(__QUEUE_STATE_FROZEN, &txq->state); netif_schedule_queue(txq); } spin_unlock(&dev->tx_global_lock); } static inline void netif_tx_unlock_bh(struct net_device *dev) { netif_tx_unlock(dev); local_bh_enable(); } #define HARD_TX_LOCK(dev, txq, cpu) { \ if ((dev->features & NETIF_F_LLTX) == 0) { \ __netif_tx_lock(txq, cpu); \ } else { \ __netif_tx_acquire(txq); \ } \ } #define HARD_TX_TRYLOCK(dev, txq) \ (((dev->features & NETIF_F_LLTX) == 0) ? \ __netif_tx_trylock(txq) : \ __netif_tx_acquire(txq)) #define HARD_TX_UNLOCK(dev, txq) { \ if ((dev->features & NETIF_F_LLTX) == 0) { \ __netif_tx_unlock(txq); \ } else { \ __netif_tx_release(txq); \ } \ } static inline void netif_tx_disable(struct net_device *dev) { unsigned int i; int cpu; local_bh_disable(); cpu = smp_processor_id(); spin_lock(&dev->tx_global_lock); for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); __netif_tx_lock(txq, cpu); netif_tx_stop_queue(txq); __netif_tx_unlock(txq); } spin_unlock(&dev->tx_global_lock); local_bh_enable(); } static inline void netif_addr_lock(struct net_device *dev) { spin_lock(&dev->addr_list_lock); } static inline void netif_addr_lock_bh(struct net_device *dev) { spin_lock_bh(&dev->addr_list_lock); } static inline void netif_addr_unlock(struct net_device *dev) { spin_unlock(&dev->addr_list_lock); } static inline void netif_addr_unlock_bh(struct net_device *dev) { spin_unlock_bh(&dev->addr_list_lock); } /* * dev_addrs walker. Should be used only for read access. Call with * rcu_read_lock held. */ #define for_each_dev_addr(dev, ha) \ list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list) /* These functions live elsewhere (drivers/net/net_init.c, but related) */ void ether_setup(struct net_device *dev); /* Support for loadable net-drivers */ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, unsigned char name_assign_type, void (*setup)(struct net_device *), unsigned int txqs, unsigned int rxqs); int dev_get_valid_name(struct net *net, struct net_device *dev, const char *name); #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \ alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1) #define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \ alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \ count) int register_netdev(struct net_device *dev); void unregister_netdev(struct net_device *dev); /* General hardware address lists handling functions */ int __hw_addr_sync(struct netdev_hw_addr_list *to_list, struct netdev_hw_addr_list *from_list, int addr_len); void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, struct netdev_hw_addr_list *from_list, int addr_len); int __hw_addr_sync_dev(struct netdev_hw_addr_list *list, struct net_device *dev, int (*sync)(struct net_device *, const unsigned char *), int (*unsync)(struct net_device *, const unsigned char *)); int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list, struct net_device *dev, int (*sync)(struct net_device *, const unsigned char *, int), int (*unsync)(struct net_device *, const unsigned char *, int)); void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list, struct net_device *dev, int (*unsync)(struct net_device *, const unsigned char *, int)); void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list, struct net_device *dev, int (*unsync)(struct net_device *, const unsigned char *)); void __hw_addr_init(struct netdev_hw_addr_list *list); /* Functions used for device addresses handling */ static inline void __dev_addr_set(struct net_device *dev, const u8 *addr, size_t len) { memcpy(dev->dev_addr, addr, len); } static inline void dev_addr_set(struct net_device *dev, const u8 *addr) { __dev_addr_set(dev, addr, dev->addr_len); } static inline void dev_addr_mod(struct net_device *dev, unsigned int offset, const u8 *addr, size_t len) { memcpy(&dev->dev_addr[offset], addr, len); } int dev_addr_add(struct net_device *dev, const unsigned char *addr, unsigned char addr_type); int dev_addr_del(struct net_device *dev, const unsigned char *addr, unsigned char addr_type); void dev_addr_flush(struct net_device *dev); int dev_addr_init(struct net_device *dev); /* Functions used for unicast addresses handling */ int dev_uc_add(struct net_device *dev, const unsigned char *addr); int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr); int dev_uc_del(struct net_device *dev, const unsigned char *addr); int dev_uc_sync(struct net_device *to, struct net_device *from); int dev_uc_sync_multiple(struct net_device *to, struct net_device *from); void dev_uc_unsync(struct net_device *to, struct net_device *from); void dev_uc_flush(struct net_device *dev); void dev_uc_init(struct net_device *dev); /** * __dev_uc_sync - Synchonize device's unicast list * @dev: device to sync * @sync: function to call if address should be added * @unsync: function to call if address should be removed * * Add newly added addresses to the interface, and release * addresses that have been deleted. */ static inline int __dev_uc_sync(struct net_device *dev, int (*sync)(struct net_device *, const unsigned char *), int (*unsync)(struct net_device *, const unsigned char *)) { return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync); } /** * __dev_uc_unsync - Remove synchronized addresses from device * @dev: device to sync * @unsync: function to call if address should be removed * * Remove all addresses that were added to the device by dev_uc_sync(). */ static inline void __dev_uc_unsync(struct net_device *dev, int (*unsync)(struct net_device *, const unsigned char *)) { __hw_addr_unsync_dev(&dev->uc, dev, unsync); } /* Functions used for multicast addresses handling */ int dev_mc_add(struct net_device *dev, const unsigned char *addr); int dev_mc_add_global(struct net_device *dev, const unsigned char *addr); int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr); int dev_mc_del(struct net_device *dev, const unsigned char *addr); int dev_mc_del_global(struct net_device *dev, const unsigned char *addr); int dev_mc_sync(struct net_device *to, struct net_device *from); int dev_mc_sync_multiple(struct net_device *to, struct net_device *from); void dev_mc_unsync(struct net_device *to, struct net_device *from); void dev_mc_flush(struct net_device *dev); void dev_mc_init(struct net_device *dev); /** * __dev_mc_sync - Synchonize device's multicast list * @dev: device to sync * @sync: function to call if address should be added * @unsync: function to call if address should be removed * * Add newly added addresses to the interface, and release * addresses that have been deleted. */ static inline int __dev_mc_sync(struct net_device *dev, int (*sync)(struct net_device *, const unsigned char *), int (*unsync)(struct net_device *, const unsigned char *)) { return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync); } /** * __dev_mc_unsync - Remove synchronized addresses from device * @dev: device to sync * @unsync: function to call if address should be removed * * Remove all addresses that were added to the device by dev_mc_sync(). */ static inline void __dev_mc_unsync(struct net_device *dev, int (*unsync)(struct net_device *, const unsigned char *)) { __hw_addr_unsync_dev(&dev->mc, dev, unsync); } /* Functions used for secondary unicast and multicast support */ void dev_set_rx_mode(struct net_device *dev); void __dev_set_rx_mode(struct net_device *dev); int dev_set_promiscuity(struct net_device *dev, int inc); int dev_set_allmulti(struct net_device *dev, int inc); void netdev_state_change(struct net_device *dev); void netdev_notify_peers(struct net_device *dev); void netdev_features_change(struct net_device *dev); /* Load a device via the kmod */ void dev_load(struct net *net, const char *name); struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, struct rtnl_link_stats64 *storage); void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, const struct net_device_stats *netdev_stats); extern int netdev_max_backlog; extern int netdev_tstamp_prequeue; extern int weight_p; extern int dev_weight_rx_bias; extern int dev_weight_tx_bias; extern int dev_rx_weight; extern int dev_tx_weight; extern int gro_normal_batch; bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, struct list_head **iter); struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev, struct list_head **iter); /* iterate through upper list, must be called under RCU read lock */ #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \ for (iter = &(dev)->adj_list.upper, \ updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \ updev; \ updev = netdev_upper_get_next_dev_rcu(dev, &(iter))) int netdev_walk_all_upper_dev_rcu(struct net_device *dev, int (*fn)(struct net_device *upper_dev, void *data), void *data); bool netdev_has_upper_dev_all_rcu(struct net_device *dev, struct net_device *upper_dev); bool netdev_has_any_upper_dev(struct net_device *dev); void *netdev_lower_get_next_private(struct net_device *dev, struct list_head **iter); void *netdev_lower_get_next_private_rcu(struct net_device *dev, struct list_head **iter); #define netdev_for_each_lower_private(dev, priv, iter) \ for (iter = (dev)->adj_list.lower.next, \ priv = netdev_lower_get_next_private(dev, &(iter)); \ priv; \ priv = netdev_lower_get_next_private(dev, &(iter))) #define netdev_for_each_lower_private_rcu(dev, priv, iter) \ for (iter = &(dev)->adj_list.lower, \ priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \ priv; \ priv = netdev_lower_get_next_private_rcu(dev, &(iter))) void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter); #define netdev_for_each_lower_dev(dev, ldev, iter) \ for (iter = (dev)->adj_list.lower.next, \ ldev = netdev_lower_get_next(dev, &(iter)); \ ldev; \ ldev = netdev_lower_get_next(dev, &(iter))) struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev, struct list_head **iter); int netdev_walk_all_lower_dev(struct net_device *dev, int (*fn)(struct net_device *lower_dev, void *data), void *data); int netdev_walk_all_lower_dev_rcu(struct net_device *dev, int (*fn)(struct net_device *lower_dev, void *data), void *data); void *netdev_adjacent_get_private(struct list_head *adj_list); void *netdev_lower_get_first_private_rcu(struct net_device *dev); struct net_device *netdev_master_upper_dev_get(struct net_device *dev); struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev); int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev, struct netlink_ext_ack *extack); int netdev_master_upper_dev_link(struct net_device *dev, struct net_device *upper_dev, void *upper_priv, void *upper_info, struct netlink_ext_ack *extack); void netdev_upper_dev_unlink(struct net_device *dev, struct net_device *upper_dev); int netdev_adjacent_change_prepare(struct net_device *old_dev, struct net_device *new_dev, struct net_device *dev, struct netlink_ext_ack *extack); void netdev_adjacent_change_commit(struct net_device *old_dev, struct net_device *new_dev, struct net_device *dev); void netdev_adjacent_change_abort(struct net_device *old_dev, struct net_device *new_dev, struct net_device *dev); void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); void *netdev_lower_dev_get_private(struct net_device *dev, struct net_device *lower_dev); void netdev_lower_state_changed(struct net_device *lower_dev, void *lower_state_info); /* RSS keys are 40 or 52 bytes long */ #define NETDEV_RSS_KEY_LEN 52 extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly; void netdev_rss_key_fill(void *buffer, size_t len); int skb_checksum_help(struct sk_buff *skb); int skb_crc32c_csum_help(struct sk_buff *skb); int skb_csum_hwoffload_help(struct sk_buff *skb, const netdev_features_t features); struct sk_buff *__skb_gso_segment(struct sk_buff *skb, netdev_features_t features, bool tx_path); struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, netdev_features_t features); struct netdev_bonding_info { ifslave slave; ifbond master; }; struct netdev_notifier_bonding_info { struct netdev_notifier_info info; /* must be first */ struct netdev_bonding_info bonding_info; }; void netdev_bonding_info_change(struct net_device *dev, struct netdev_bonding_info *bonding_info); static inline struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features) { return __skb_gso_segment(skb, features, true); } __be16 skb_network_protocol(struct sk_buff *skb, int *depth); static inline bool can_checksum_protocol(netdev_features_t features, __be16 protocol) { if (protocol == htons(ETH_P_FCOE)) return !!(features & NETIF_F_FCOE_CRC); /* Assume this is an IP checksum (not SCTP CRC) */ if (features & NETIF_F_HW_CSUM) { /* Can checksum everything */ return true; } switch (protocol) { case htons(ETH_P_IP): return !!(features & NETIF_F_IP_CSUM); case htons(ETH_P_IPV6): return !!(features & NETIF_F_IPV6_CSUM); default: return false; } } #ifdef CONFIG_BUG void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb); #else static inline void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb) { } #endif /* rx skb timestamps */ void net_enable_timestamp(void); void net_disable_timestamp(void); #ifdef CONFIG_PROC_FS int __init dev_proc_init(void); #else #define dev_proc_init() 0 #endif static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops, struct sk_buff *skb, struct net_device *dev, bool more) { __this_cpu_write(softnet_data.xmit.more, more); return ops->ndo_start_xmit(skb, dev); } static inline bool netdev_xmit_more(void) { return __this_cpu_read(softnet_data.xmit.more); } static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq, bool more) { const struct net_device_ops *ops = dev->netdev_ops; netdev_tx_t rc; rc = __netdev_start_xmit(ops, skb, dev, more); if (rc == NETDEV_TX_OK) txq_trans_update(txq); return rc; } int netdev_class_create_file_ns(const struct class_attribute *class_attr, const void *ns); void netdev_class_remove_file_ns(const struct class_attribute *class_attr, const void *ns); static inline int netdev_class_create_file(const struct class_attribute *class_attr) { return netdev_class_create_file_ns(class_attr, NULL); } static inline void netdev_class_remove_file(const struct class_attribute *class_attr) { netdev_class_remove_file_ns(class_attr, NULL); } extern const struct kobj_ns_type_operations net_ns_type_operations; const char *netdev_drivername(const struct net_device *dev); void linkwatch_run_queue(void); static inline netdev_features_t netdev_intersect_features(netdev_features_t f1, netdev_features_t f2) { if ((f1 ^ f2) & NETIF_F_HW_CSUM) { if (f1 & NETIF_F_HW_CSUM) f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); else f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); } return f1 & f2; } static inline netdev_features_t netdev_get_wanted_features( struct net_device *dev) { return (dev->features & ~dev->hw_features) | dev->wanted_features; } netdev_features_t netdev_increment_features(netdev_features_t all, netdev_features_t one, netdev_features_t mask); /* Allow TSO being used on stacked device : * Performing the GSO segmentation before last device * is a performance improvement. */ static inline netdev_features_t netdev_add_tso_features(netdev_features_t features, netdev_features_t mask) { return netdev_increment_features(features, NETIF_F_ALL_TSO, mask); } int __netdev_update_features(struct net_device *dev); void netdev_update_features(struct net_device *dev); void netdev_change_features(struct net_device *dev); void netif_stacked_transfer_operstate(const struct net_device *rootdev, struct net_device *dev); netdev_features_t passthru_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features); netdev_features_t netif_skb_features(struct sk_buff *skb); static inline bool net_gso_ok(netdev_features_t features, int gso_type) { netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT; /* check flags correspondence */ BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT)); return (features & feature) == feature; } static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features) { return net_gso_ok(features, skb_shinfo(skb)->gso_type) && (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); } static inline bool netif_needs_gso(struct sk_buff *skb, netdev_features_t features) { return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || unlikely((skb->ip_summed != CHECKSUM_PARTIAL) && (skb->ip_summed != CHECKSUM_UNNECESSARY))); } static inline void netif_set_gso_max_size(struct net_device *dev, unsigned int size) { dev->gso_max_size = size; } static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol, int pulled_hlen, u16 mac_offset, int mac_len) { skb->protocol = protocol; skb->encapsulation = 1; skb_push(skb, pulled_hlen); skb_reset_transport_header(skb); skb->mac_header = mac_offset; skb->network_header = skb->mac_header + mac_len; skb->mac_len = mac_len; } static inline bool netif_is_macsec(const struct net_device *dev) { return dev->priv_flags & IFF_MACSEC; } static inline bool netif_is_macvlan(const struct net_device *dev) { return dev->priv_flags & IFF_MACVLAN; } static inline bool netif_is_macvlan_port(const struct net_device *dev) { return dev->priv_flags & IFF_MACVLAN_PORT; } static inline bool netif_is_bond_master(const struct net_device *dev) { return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; } static inline bool netif_is_bond_slave(const struct net_device *dev) { return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING; } static inline bool netif_supports_nofcs(struct net_device *dev) { return dev->priv_flags & IFF_SUPP_NOFCS; } static inline bool netif_has_l3_rx_handler(const struct net_device *dev) { return dev->priv_flags & IFF_L3MDEV_RX_HANDLER; } static inline bool netif_is_l3_master(const struct net_device *dev) { return dev->priv_flags & IFF_L3MDEV_MASTER; } static inline bool netif_is_l3_slave(const struct net_device *dev) { return dev->priv_flags & IFF_L3MDEV_SLAVE; } static inline bool netif_is_bridge_master(const struct net_device *dev) { return dev->priv_flags & IFF_EBRIDGE; } static inline bool netif_is_bridge_port(const struct net_device *dev) { return dev->priv_flags & IFF_BRIDGE_PORT; } static inline bool netif_is_ovs_master(const struct net_device *dev) { return dev->priv_flags & IFF_OPENVSWITCH; } static inline bool netif_is_ovs_port(const struct net_device *dev) { return dev->priv_flags & IFF_OVS_DATAPATH; } static inline bool netif_is_team_master(const struct net_device *dev) { return dev->priv_flags & IFF_TEAM; } static inline bool netif_is_team_port(const struct net_device *dev) { return dev->priv_flags & IFF_TEAM_PORT; } static inline bool netif_is_lag_master(const struct net_device *dev) { return netif_is_bond_master(dev) || netif_is_team_master(dev); } static inline bool netif_is_lag_port(const struct net_device *dev) { return netif_is_bond_slave(dev) || netif_is_team_port(dev); } static inline bool netif_is_rxfh_configured(const struct net_device *dev) { return dev->priv_flags & IFF_RXFH_CONFIGURED; } static inline bool netif_is_failover(const struct net_device *dev) { return dev->priv_flags & IFF_FAILOVER; } static inline bool netif_is_failover_slave(const struct net_device *dev) { return dev->priv_flags & IFF_FAILOVER_SLAVE; } /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */ static inline void netif_keep_dst(struct net_device *dev) { dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM); } /* return true if dev can't cope with mtu frames that need vlan tag insertion */ static inline bool netif_reduces_vlan_mtu(struct net_device *dev) { /* TODO: reserve and use an additional IFF bit, if we get more users */ return dev->priv_flags & IFF_MACSEC; } extern struct pernet_operations __net_initdata loopback_net_ops; /* Logging, debugging and troubleshooting/diagnostic helpers. */ /* netdev_printk helpers, similar to dev_printk */ static inline const char *netdev_name(const struct net_device *dev) { if (!dev->name[0] || strchr(dev->name, '%')) return "(unnamed net_device)"; return dev->name; } static inline bool netdev_unregistering(const struct net_device *dev) { return dev->reg_state == NETREG_UNREGISTERING; } static inline const char *netdev_reg_state(const struct net_device *dev) { switch (dev->reg_state) { case NETREG_UNINITIALIZED: return " (uninitialized)"; case NETREG_REGISTERED: return ""; case NETREG_UNREGISTERING: return " (unregistering)"; case NETREG_UNREGISTERED: return " (unregistered)"; case NETREG_RELEASED: return " (released)"; case NETREG_DUMMY: return " (dummy)"; } WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state); return " (unknown)"; } __printf(3, 4) __cold void netdev_printk(const char *level, const struct net_device *dev, const char *format, ...); __printf(2, 3) __cold void netdev_emerg(const struct net_device *dev, const char *format, ...); __printf(2, 3) __cold void netdev_alert(const struct net_device *dev, const char *format, ...); __printf(2, 3) __cold void netdev_crit(const struct net_device *dev, const char *format, ...); __printf(2, 3) __cold void netdev_err(const struct net_device *dev, const char *format, ...); __printf(2, 3) __cold void netdev_warn(const struct net_device *dev, const char *format, ...); __printf(2, 3) __cold void netdev_notice(const struct net_device *dev, const char *format, ...); __printf(2, 3) __cold void netdev_info(const struct net_device *dev, const char *format, ...); #define netdev_level_once(level, dev, fmt, ...) \ do { \ static bool __print_once __read_mostly; \ \ if (!__print_once) { \ __print_once = true; \ netdev_printk(level, dev, fmt, ##__VA_ARGS__); \ } \ } while (0) #define netdev_emerg_once(dev, fmt, ...) \ netdev_level_once(KERN_EMERG, dev, fmt, ##__VA_ARGS__) #define netdev_alert_once(dev, fmt, ...) \ netdev_level_once(KERN_ALERT, dev, fmt, ##__VA_ARGS__) #define netdev_crit_once(dev, fmt, ...) \ netdev_level_once(KERN_CRIT, dev, fmt, ##__VA_ARGS__) #define netdev_err_once(dev, fmt, ...) \ netdev_level_once(KERN_ERR, dev, fmt, ##__VA_ARGS__) #define netdev_warn_once(dev, fmt, ...) \ netdev_level_once(KERN_WARNING, dev, fmt, ##__VA_ARGS__) #define netdev_notice_once(dev, fmt, ...) \ netdev_level_once(KERN_NOTICE, dev, fmt, ##__VA_ARGS__) #define netdev_info_once(dev, fmt, ...) \ netdev_level_once(KERN_INFO, dev, fmt, ##__VA_ARGS__) #define MODULE_ALIAS_NETDEV(device) \ MODULE_ALIAS("netdev-" device) #if defined(CONFIG_DYNAMIC_DEBUG) #define netdev_dbg(__dev, format, args...) \ do { \ dynamic_netdev_dbg(__dev, format, ##args); \ } while (0) #elif defined(DEBUG) #define netdev_dbg(__dev, format, args...) \ netdev_printk(KERN_DEBUG, __dev, format, ##args) #else #define netdev_dbg(__dev, format, args...) \ ({ \ if (0) \ netdev_printk(KERN_DEBUG, __dev, format, ##args); \ }) #endif #if defined(VERBOSE_DEBUG) #define netdev_vdbg netdev_dbg #else #define netdev_vdbg(dev, format, args...) \ ({ \ if (0) \ netdev_printk(KERN_DEBUG, dev, format, ##args); \ 0; \ }) #endif /* * netdev_WARN() acts like dev_printk(), but with the key difference * of using a WARN/WARN_ON to get the message out, including the * file/line information and a backtrace. */ #define netdev_WARN(dev, format, args...) \ WARN(1, "netdevice: %s%s: " format, netdev_name(dev), \ netdev_reg_state(dev), ##args) #define netdev_WARN_ONCE(dev, format, args...) \ WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \ netdev_reg_state(dev), ##args) /* netif printk helpers, similar to netdev_printk */ #define netif_printk(priv, type, level, dev, fmt, args...) \ do { \ if (netif_msg_##type(priv)) \ netdev_printk(level, (dev), fmt, ##args); \ } while (0) #define netif_level(level, priv, type, dev, fmt, args...) \ do { \ if (netif_msg_##type(priv)) \ netdev_##level(dev, fmt, ##args); \ } while (0) #define netif_emerg(priv, type, dev, fmt, args...) \ netif_level(emerg, priv, type, dev, fmt, ##args) #define netif_alert(priv, type, dev, fmt, args...) \ netif_level(alert, priv, type, dev, fmt, ##args) #define netif_crit(priv, type, dev, fmt, args...) \ netif_level(crit, priv, type, dev, fmt, ##args) #define netif_err(priv, type, dev, fmt, args...) \ netif_level(err, priv, type, dev, fmt, ##args) #define netif_warn(priv, type, dev, fmt, args...) \ netif_level(warn, priv, type, dev, fmt, ##args) #define netif_notice(priv, type, dev, fmt, args...) \ netif_level(notice, priv, type, dev, fmt, ##args) #define netif_info(priv, type, dev, fmt, args...) \ netif_level(info, priv, type, dev, fmt, ##args) #if defined(CONFIG_DYNAMIC_DEBUG) #define netif_dbg(priv, type, netdev, format, args...) \ do { \ if (netif_msg_##type(priv)) \ dynamic_netdev_dbg(netdev, format, ##args); \ } while (0) #elif defined(DEBUG) #define netif_dbg(priv, type, dev, format, args...) \ netif_printk(priv, type, KERN_DEBUG, dev, format, ##args) #else #define netif_dbg(priv, type, dev, format, args...) \ ({ \ if (0) \ netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \ 0; \ }) #endif /* if @cond then downgrade to debug, else print at @level */ #define netif_cond_dbg(priv, type, netdev, cond, level, fmt, args...) \ do { \ if (cond) \ netif_dbg(priv, type, netdev, fmt, ##args); \ else \ netif_ ## level(priv, type, netdev, fmt, ##args); \ } while (0) #if defined(VERBOSE_DEBUG) #define netif_vdbg netif_dbg #else #define netif_vdbg(priv, type, dev, format, args...) \ ({ \ if (0) \ netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \ 0; \ }) #endif /* * The list of packet types we will receive (as opposed to discard) * and the routines to invoke. * * Why 16. Because with 16 the only overlap we get on a hash of the * low nibble of the protocol value is RARP/SNAP/X.25. * * 0800 IP * 0001 802.3 * 0002 AX.25 * 0004 802.2 * 8035 RARP * 0005 SNAP * 0805 X.25 * 0806 ARP * 8137 IPX * 0009 Localtalk * 86DD IPv6 */ #define PTYPE_HASH_SIZE (16) #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1) extern struct net_device *blackhole_netdev; /* Note: Avoid these macros in fast path, prefer per-cpu or per-queue counters. */ #define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD) #define DEV_STATS_ADD(DEV, FIELD, VAL) \ atomic_long_add((VAL), &(DEV)->stats.__##FIELD) #define DEV_STATS_READ(DEV, FIELD) atomic_long_read(&(DEV)->stats.__##FIELD) #endif /* _LINUX_NETDEVICE_H */ rcutree.h 0000644 00000003667 14722070374 0006410 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0+ */ /* * Read-Copy Update mechanism for mutual exclusion (tree-based version) * * Copyright IBM Corporation, 2008 * * Author: Dipankar Sarma <dipankar@in.ibm.com> * Paul E. McKenney <paulmck@linux.ibm.com> Hierarchical algorithm * * Based on the original work by Paul McKenney <paulmck@linux.ibm.com> * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * * For detailed explanation of Read-Copy Update mechanism see - * Documentation/RCU */ #ifndef __LINUX_RCUTREE_H #define __LINUX_RCUTREE_H void rcu_softirq_qs(void); void rcu_note_context_switch(bool preempt); int rcu_needs_cpu(u64 basem, u64 *nextevt); void rcu_cpu_stall_reset(void); /* * Note a virtualization-based context switch. This is simply a * wrapper around rcu_note_context_switch(), which allows TINY_RCU * to save a few bytes. The caller must have disabled interrupts. */ static inline void rcu_virt_note_context_switch(int cpu) { rcu_note_context_switch(false); } void synchronize_rcu_expedited(void); void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func); void rcu_barrier(void); bool rcu_eqs_special_set(int cpu); unsigned long get_state_synchronize_rcu(void); void cond_synchronize_rcu(unsigned long oldstate); void rcu_idle_enter(void); void rcu_idle_exit(void); void rcu_irq_enter(void); void rcu_irq_exit(void); void rcu_irq_enter_irqson(void); void rcu_irq_exit_irqson(void); void exit_rcu(void); void rcu_scheduler_starting(void); extern int rcu_scheduler_active __read_mostly; void rcu_end_inkernel_boot(void); bool rcu_is_watching(void); #ifndef CONFIG_PREEMPTION void rcu_all_qs(void); #endif /* RCUtree hotplug events */ int rcutree_prepare_cpu(unsigned int cpu); int rcutree_online_cpu(unsigned int cpu); int rcutree_offline_cpu(unsigned int cpu); int rcutree_dead_cpu(unsigned int cpu); int rcutree_dying_cpu(unsigned int cpu); void rcu_cpu_starting(unsigned int cpu); #endif /* __LINUX_RCUTREE_H */ host1x.h 0000644 00000020337 14722070374 0006156 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) 2009-2013, NVIDIA Corporation. All rights reserved. */ #ifndef __LINUX_HOST1X_H #define __LINUX_HOST1X_H #include <linux/device.h> #include <linux/types.h> enum host1x_class { HOST1X_CLASS_HOST1X = 0x1, HOST1X_CLASS_GR2D = 0x51, HOST1X_CLASS_GR2D_SB = 0x52, HOST1X_CLASS_VIC = 0x5D, HOST1X_CLASS_GR3D = 0x60, }; struct host1x_client; /** * struct host1x_client_ops - host1x client operations * @init: host1x client initialization code * @exit: host1x client tear down code */ struct host1x_client_ops { int (*init)(struct host1x_client *client); int (*exit)(struct host1x_client *client); }; /** * struct host1x_client - host1x client structure * @list: list node for the host1x client * @parent: pointer to struct device representing the host1x controller * @dev: pointer to struct device backing this host1x client * @ops: host1x client operations * @class: host1x class represented by this client * @channel: host1x channel associated with this client * @syncpts: array of syncpoints requested for this client * @num_syncpts: number of syncpoints requested for this client */ struct host1x_client { struct list_head list; struct device *parent; struct device *dev; const struct host1x_client_ops *ops; enum host1x_class class; struct host1x_channel *channel; struct host1x_syncpt **syncpts; unsigned int num_syncpts; }; /* * host1x buffer objects */ struct host1x_bo; struct sg_table; struct host1x_bo_ops { struct host1x_bo *(*get)(struct host1x_bo *bo); void (*put)(struct host1x_bo *bo); dma_addr_t (*pin)(struct host1x_bo *bo, struct sg_table **sgt); void (*unpin)(struct host1x_bo *bo, struct sg_table *sgt); void *(*mmap)(struct host1x_bo *bo); void (*munmap)(struct host1x_bo *bo, void *addr); void *(*kmap)(struct host1x_bo *bo, unsigned int pagenum); void (*kunmap)(struct host1x_bo *bo, unsigned int pagenum, void *addr); }; struct host1x_bo { const struct host1x_bo_ops *ops; }; static inline void host1x_bo_init(struct host1x_bo *bo, const struct host1x_bo_ops *ops) { bo->ops = ops; } static inline struct host1x_bo *host1x_bo_get(struct host1x_bo *bo) { return bo->ops->get(bo); } static inline void host1x_bo_put(struct host1x_bo *bo) { bo->ops->put(bo); } static inline dma_addr_t host1x_bo_pin(struct host1x_bo *bo, struct sg_table **sgt) { return bo->ops->pin(bo, sgt); } static inline void host1x_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt) { bo->ops->unpin(bo, sgt); } static inline void *host1x_bo_mmap(struct host1x_bo *bo) { return bo->ops->mmap(bo); } static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr) { bo->ops->munmap(bo, addr); } static inline void *host1x_bo_kmap(struct host1x_bo *bo, unsigned int pagenum) { return bo->ops->kmap(bo, pagenum); } static inline void host1x_bo_kunmap(struct host1x_bo *bo, unsigned int pagenum, void *addr) { bo->ops->kunmap(bo, pagenum, addr); } /* * host1x syncpoints */ #define HOST1X_SYNCPT_CLIENT_MANAGED (1 << 0) #define HOST1X_SYNCPT_HAS_BASE (1 << 1) struct host1x_syncpt_base; struct host1x_syncpt; struct host1x; struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id); u32 host1x_syncpt_id(struct host1x_syncpt *sp); u32 host1x_syncpt_read_min(struct host1x_syncpt *sp); u32 host1x_syncpt_read_max(struct host1x_syncpt *sp); u32 host1x_syncpt_read(struct host1x_syncpt *sp); int host1x_syncpt_incr(struct host1x_syncpt *sp); u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs); int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout, u32 *value); struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client, unsigned long flags); void host1x_syncpt_free(struct host1x_syncpt *sp); struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp); u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base); /* * host1x channel */ struct host1x_channel; struct host1x_job; struct host1x_channel *host1x_channel_request(struct device *dev); struct host1x_channel *host1x_channel_get(struct host1x_channel *channel); void host1x_channel_put(struct host1x_channel *channel); int host1x_job_submit(struct host1x_job *job); /* * host1x job */ struct host1x_reloc { struct { struct host1x_bo *bo; unsigned long offset; } cmdbuf; struct { struct host1x_bo *bo; unsigned long offset; } target; unsigned long shift; }; struct host1x_job { /* When refcount goes to zero, job can be freed */ struct kref ref; /* List entry */ struct list_head list; /* Channel where job is submitted to */ struct host1x_channel *channel; /* client where the job originated */ struct host1x_client *client; /* Gathers and their memory */ struct host1x_job_gather *gathers; unsigned int num_gathers; /* Array of handles to be pinned & unpinned */ struct host1x_reloc *relocs; unsigned int num_relocs; struct host1x_job_unpin_data *unpins; unsigned int num_unpins; dma_addr_t *addr_phys; dma_addr_t *gather_addr_phys; dma_addr_t *reloc_addr_phys; /* Sync point id, number of increments and end related to the submit */ u32 syncpt_id; u32 syncpt_incrs; u32 syncpt_end; /* Maximum time to wait for this job */ unsigned int timeout; /* Index and number of slots used in the push buffer */ unsigned int first_get; unsigned int num_slots; /* Copy of gathers */ size_t gather_copy_size; dma_addr_t gather_copy; u8 *gather_copy_mapped; /* Check if register is marked as an address reg */ int (*is_addr_reg)(struct device *dev, u32 class, u32 reg); /* Check if class belongs to the unit */ int (*is_valid_class)(u32 class); /* Request a SETCLASS to this class */ u32 class; /* Add a channel wait for previous ops to complete */ bool serialize; }; struct host1x_job *host1x_job_alloc(struct host1x_channel *ch, u32 num_cmdbufs, u32 num_relocs); void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo, unsigned int words, unsigned int offset); struct host1x_job *host1x_job_get(struct host1x_job *job); void host1x_job_put(struct host1x_job *job); int host1x_job_pin(struct host1x_job *job, struct device *dev); void host1x_job_unpin(struct host1x_job *job); /* * subdevice probe infrastructure */ struct host1x_device; /** * struct host1x_driver - host1x logical device driver * @driver: core driver * @subdevs: table of OF device IDs matching subdevices for this driver * @list: list node for the driver * @probe: called when the host1x logical device is probed * @remove: called when the host1x logical device is removed * @shutdown: called when the host1x logical device is shut down */ struct host1x_driver { struct device_driver driver; const struct of_device_id *subdevs; struct list_head list; int (*probe)(struct host1x_device *device); int (*remove)(struct host1x_device *device); void (*shutdown)(struct host1x_device *device); }; static inline struct host1x_driver * to_host1x_driver(struct device_driver *driver) { return container_of(driver, struct host1x_driver, driver); } int host1x_driver_register_full(struct host1x_driver *driver, struct module *owner); void host1x_driver_unregister(struct host1x_driver *driver); #define host1x_driver_register(driver) \ host1x_driver_register_full(driver, THIS_MODULE) struct host1x_device { struct host1x_driver *driver; struct list_head list; struct device dev; struct mutex subdevs_lock; struct list_head subdevs; struct list_head active; struct mutex clients_lock; struct list_head clients; bool registered; struct device_dma_parameters dma_parms; }; static inline struct host1x_device *to_host1x_device(struct device *dev) { return container_of(dev, struct host1x_device, dev); } int host1x_device_init(struct host1x_device *device); int host1x_device_exit(struct host1x_device *device); int host1x_client_register(struct host1x_client *client); int host1x_client_unregister(struct host1x_client *client); struct tegra_mipi_device; struct tegra_mipi_device *tegra_mipi_request(struct device *device, struct device_node *np); void tegra_mipi_free(struct tegra_mipi_device *device); int tegra_mipi_enable(struct tegra_mipi_device *device); int tegra_mipi_disable(struct tegra_mipi_device *device); int tegra_mipi_calibrate(struct tegra_mipi_device *device); #endif nfs.h 0000644 00000002473 14722070374 0005517 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * NFS protocol definitions * * This file contains constants mostly for Version 2 of the protocol, * but also has a couple of NFSv3 bits in (notably the error codes). */ #ifndef _LINUX_NFS_H #define _LINUX_NFS_H #include <linux/sunrpc/msg_prot.h> #include <linux/string.h> #include <uapi/linux/nfs.h> /* * This is the kernel NFS client file handle representation */ #define NFS_MAXFHSIZE 128 struct nfs_fh { unsigned short size; unsigned char data[NFS_MAXFHSIZE]; }; /* * Returns a zero iff the size and data fields match. * Checks only "size" bytes in the data field. */ static inline int nfs_compare_fh(const struct nfs_fh *a, const struct nfs_fh *b) { return a->size != b->size || memcmp(a->data, b->data, a->size) != 0; } static inline void nfs_copy_fh(struct nfs_fh *target, const struct nfs_fh *source) { target->size = source->size; memcpy(target->data, source->data, source->size); } /* * This is really a general kernel constant, but since nothing like * this is defined in the kernel headers, I have to do it here. */ #define NFS_OFFSET_MAX ((__s64)((~(__u64)0) >> 1)) enum nfs3_stable_how { NFS_UNSTABLE = 0, NFS_DATA_SYNC = 1, NFS_FILE_SYNC = 2, /* used by direct.c to mark verf as invalid */ NFS_INVALID_STABLE_HOW = -1 }; #endif /* _LINUX_NFS_H */ cgroup-defs.h 0000644 00000065567 14722070374 0007164 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/cgroup-defs.h - basic definitions for cgroup * * This file provides basic type and interface. Include this file directly * only if necessary to avoid cyclic dependencies. */ #ifndef _LINUX_CGROUP_DEFS_H #define _LINUX_CGROUP_DEFS_H #include <linux/limits.h> #include <linux/list.h> #include <linux/idr.h> #include <linux/wait.h> #include <linux/mutex.h> #include <linux/rcupdate.h> #include <linux/refcount.h> #include <linux/percpu-refcount.h> #include <linux/percpu-rwsem.h> #include <linux/u64_stats_sync.h> #include <linux/workqueue.h> #include <linux/bpf-cgroup.h> #include <linux/psi_types.h> #ifdef CONFIG_CGROUPS struct cgroup; struct cgroup_root; struct cgroup_subsys; struct cgroup_taskset; struct kernfs_node; struct kernfs_ops; struct kernfs_open_file; struct seq_file; struct poll_table_struct; #define MAX_CGROUP_TYPE_NAMELEN 32 #define MAX_CGROUP_ROOT_NAMELEN 64 #define MAX_CFTYPE_NAME 64 /* define the enumeration of all cgroup subsystems */ #define SUBSYS(_x) _x ## _cgrp_id, enum cgroup_subsys_id { #include <linux/cgroup_subsys.h> CGROUP_SUBSYS_COUNT, }; #undef SUBSYS /* bits in struct cgroup_subsys_state flags field */ enum { CSS_NO_REF = (1 << 0), /* no reference counting for this css */ CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */ CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */ CSS_VISIBLE = (1 << 3), /* css is visible to userland */ CSS_DYING = (1 << 4), /* css is dying */ }; /* bits in struct cgroup flags field */ enum { /* Control Group requires release notifications to userspace */ CGRP_NOTIFY_ON_RELEASE, /* * Clone the parent's configuration when creating a new child * cpuset cgroup. For historical reasons, this option can be * specified at mount time and thus is implemented here. */ CGRP_CPUSET_CLONE_CHILDREN, /* Control group has to be frozen. */ CGRP_FREEZE, /* Cgroup is frozen. */ CGRP_FROZEN, }; /* cgroup_root->flags */ enum { CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */ CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */ /* * Consider namespaces as delegation boundaries. If this flag is * set, controller specific interface files in a namespace root * aren't writeable from inside the namespace. */ CGRP_ROOT_NS_DELEGATE = (1 << 3), /* * Enable cpuset controller in v1 cgroup to use v2 behavior. */ CGRP_ROOT_CPUSET_V2_MODE = (1 << 4), /* * Enable legacy local memory.events. */ CGRP_ROOT_MEMORY_LOCAL_EVENTS = (1 << 5), }; /* cftype->flags */ enum { CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */ CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */ CFTYPE_NS_DELEGATABLE = (1 << 2), /* writeable beyond delegation boundaries */ CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */ CFTYPE_WORLD_WRITABLE = (1 << 4), /* (DON'T USE FOR NEW FILES) S_IWUGO */ CFTYPE_DEBUG = (1 << 5), /* create when cgroup_debug */ /* internal flags, do not use outside cgroup core proper */ __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */ __CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */ }; /* * cgroup_file is the handle for a file instance created in a cgroup which * is used, for example, to generate file changed notifications. This can * be obtained by setting cftype->file_offset. */ struct cgroup_file { /* do not access any fields from outside cgroup core */ struct kernfs_node *kn; unsigned long notified_at; struct timer_list notify_timer; }; /* * Per-subsystem/per-cgroup state maintained by the system. This is the * fundamental structural building block that controllers deal with. * * Fields marked with "PI:" are public and immutable and may be accessed * directly without synchronization. */ struct cgroup_subsys_state { /* PI: the cgroup that this css is attached to */ struct cgroup *cgroup; /* PI: the cgroup subsystem that this css is attached to */ struct cgroup_subsys *ss; /* reference count - access via css_[try]get() and css_put() */ struct percpu_ref refcnt; /* siblings list anchored at the parent's ->children */ struct list_head sibling; struct list_head children; /* flush target list anchored at cgrp->rstat_css_list */ struct list_head rstat_css_node; /* * PI: Subsys-unique ID. 0 is unused and root is always 1. The * matching css can be looked up using css_from_id(). */ int id; unsigned int flags; /* * Monotonically increasing unique serial number which defines a * uniform order among all csses. It's guaranteed that all * ->children lists are in the ascending order of ->serial_nr and * used to allow interrupting and resuming iterations. */ u64 serial_nr; /* * Incremented by online self and children. Used to guarantee that * parents are not offlined before their children. */ atomic_t online_cnt; /* percpu_ref killing and RCU release */ struct work_struct destroy_work; struct rcu_work destroy_rwork; /* * PI: the parent css. Placed here for cache proximity to following * fields of the containing structure. */ struct cgroup_subsys_state *parent; }; /* * A css_set is a structure holding pointers to a set of * cgroup_subsys_state objects. This saves space in the task struct * object and speeds up fork()/exit(), since a single inc/dec and a * list_add()/del() can bump the reference count on the entire cgroup * set for a task. */ struct css_set { /* * Set of subsystem states, one for each subsystem. This array is * immutable after creation apart from the init_css_set during * subsystem registration (at boot time). */ struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; /* reference count */ refcount_t refcount; /* * For a domain cgroup, the following points to self. If threaded, * to the matching cset of the nearest domain ancestor. The * dom_cset provides access to the domain cgroup and its csses to * which domain level resource consumptions should be charged. */ struct css_set *dom_cset; /* the default cgroup associated with this css_set */ struct cgroup *dfl_cgrp; /* internal task count, protected by css_set_lock */ int nr_tasks; /* * Lists running through all tasks using this cgroup group. * mg_tasks lists tasks which belong to this cset but are in the * process of being migrated out or in. Protected by * css_set_rwsem, but, during migration, once tasks are moved to * mg_tasks, it can be read safely while holding cgroup_mutex. */ struct list_head tasks; struct list_head mg_tasks; struct list_head dying_tasks; /* all css_task_iters currently walking this cset */ struct list_head task_iters; /* * On the default hierarhcy, ->subsys[ssid] may point to a css * attached to an ancestor instead of the cgroup this css_set is * associated with. The following node is anchored at * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to * iterate through all css's attached to a given cgroup. */ struct list_head e_cset_node[CGROUP_SUBSYS_COUNT]; /* all threaded csets whose ->dom_cset points to this cset */ struct list_head threaded_csets; struct list_head threaded_csets_node; /* * List running through all cgroup groups in the same hash * slot. Protected by css_set_lock */ struct hlist_node hlist; /* * List of cgrp_cset_links pointing at cgroups referenced from this * css_set. Protected by css_set_lock. */ struct list_head cgrp_links; /* * List of csets participating in the on-going migration either as * source or destination. Protected by cgroup_mutex. */ struct list_head mg_src_preload_node; struct list_head mg_dst_preload_node; struct list_head mg_node; /* * If this cset is acting as the source of migration the following * two fields are set. mg_src_cgrp and mg_dst_cgrp are * respectively the source and destination cgroups of the on-going * migration. mg_dst_cset is the destination cset the target tasks * on this cset should be migrated to. Protected by cgroup_mutex. */ struct cgroup *mg_src_cgrp; struct cgroup *mg_dst_cgrp; struct css_set *mg_dst_cset; /* dead and being drained, ignore for migration */ bool dead; /* For RCU-protected deletion */ struct rcu_head rcu_head; }; struct cgroup_base_stat { struct task_cputime cputime; }; /* * rstat - cgroup scalable recursive statistics. Accounting is done * per-cpu in cgroup_rstat_cpu which is then lazily propagated up the * hierarchy on reads. * * When a stat gets updated, the cgroup_rstat_cpu and its ancestors are * linked into the updated tree. On the following read, propagation only * considers and consumes the updated tree. This makes reading O(the * number of descendants which have been active since last read) instead of * O(the total number of descendants). * * This is important because there can be a lot of (draining) cgroups which * aren't active and stat may be read frequently. The combination can * become very expensive. By propagating selectively, increasing reading * frequency decreases the cost of each read. * * This struct hosts both the fields which implement the above - * updated_children and updated_next - and the fields which track basic * resource statistics on top of it - bsync, bstat and last_bstat. */ struct cgroup_rstat_cpu { /* * ->bsync protects ->bstat. These are the only fields which get * updated in the hot path. */ struct u64_stats_sync bsync; struct cgroup_base_stat bstat; /* * Snapshots at the last reading. These are used to calculate the * deltas to propagate to the global counters. */ struct cgroup_base_stat last_bstat; /* * Child cgroups with stat updates on this cpu since the last read * are linked on the parent's ->updated_children through * ->updated_next. * * In addition to being more compact, singly-linked list pointing * to the cgroup makes it unnecessary for each per-cpu struct to * point back to the associated cgroup. * * Protected by per-cpu cgroup_rstat_cpu_lock. */ struct cgroup *updated_children; /* terminated by self cgroup */ struct cgroup *updated_next; /* NULL iff not on the list */ }; struct cgroup_freezer_state { /* Should the cgroup and its descendants be frozen. */ bool freeze; /* Should the cgroup actually be frozen? */ int e_freeze; /* Fields below are protected by css_set_lock */ /* Number of frozen descendant cgroups */ int nr_frozen_descendants; /* * Number of tasks, which are counted as frozen: * frozen, SIGSTOPped, and PTRACEd. */ int nr_frozen_tasks; }; struct cgroup { /* self css with NULL ->ss, points back to this cgroup */ struct cgroup_subsys_state self; unsigned long flags; /* "unsigned long" so bitops work */ /* * idr allocated in-hierarchy ID. * * ID 0 is not used, the ID of the root cgroup is always 1, and a * new cgroup will be assigned with a smallest available ID. * * Allocating/Removing ID must be protected by cgroup_mutex. */ int id; /* * The depth this cgroup is at. The root is at depth zero and each * step down the hierarchy increments the level. This along with * ancestor_ids[] can determine whether a given cgroup is a * descendant of another without traversing the hierarchy. */ int level; /* Maximum allowed descent tree depth */ int max_depth; /* * Keep track of total numbers of visible and dying descent cgroups. * Dying cgroups are cgroups which were deleted by a user, * but are still existing because someone else is holding a reference. * max_descendants is a maximum allowed number of descent cgroups. * * nr_descendants and nr_dying_descendants are protected * by cgroup_mutex and css_set_lock. It's fine to read them holding * any of cgroup_mutex and css_set_lock; for writing both locks * should be held. */ int nr_descendants; int nr_dying_descendants; int max_descendants; /* * Each non-empty css_set associated with this cgroup contributes * one to nr_populated_csets. The counter is zero iff this cgroup * doesn't have any tasks. * * All children which have non-zero nr_populated_csets and/or * nr_populated_children of their own contribute one to either * nr_populated_domain_children or nr_populated_threaded_children * depending on their type. Each counter is zero iff all cgroups * of the type in the subtree proper don't have any tasks. */ int nr_populated_csets; int nr_populated_domain_children; int nr_populated_threaded_children; int nr_threaded_children; /* # of live threaded child cgroups */ struct kernfs_node *kn; /* cgroup kernfs entry */ struct cgroup_file procs_file; /* handle for "cgroup.procs" */ struct cgroup_file events_file; /* handle for "cgroup.events" */ /* * The bitmask of subsystems enabled on the child cgroups. * ->subtree_control is the one configured through * "cgroup.subtree_control" while ->child_ss_mask is the effective * one which may have more subsystems enabled. Controller knobs * are made available iff it's enabled in ->subtree_control. */ u16 subtree_control; u16 subtree_ss_mask; u16 old_subtree_control; u16 old_subtree_ss_mask; /* Private pointers for each registered subsystem */ struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT]; struct cgroup_root *root; /* * List of cgrp_cset_links pointing at css_sets with tasks in this * cgroup. Protected by css_set_lock. */ struct list_head cset_links; /* * On the default hierarchy, a css_set for a cgroup with some * susbsys disabled will point to css's which are associated with * the closest ancestor which has the subsys enabled. The * following lists all css_sets which point to this cgroup's css * for the given subsystem. */ struct list_head e_csets[CGROUP_SUBSYS_COUNT]; /* * If !threaded, self. If threaded, it points to the nearest * domain ancestor. Inside a threaded subtree, cgroups are exempt * from process granularity and no-internal-task constraint. * Domain level resource consumptions which aren't tied to a * specific task are charged to the dom_cgrp. */ struct cgroup *dom_cgrp; struct cgroup *old_dom_cgrp; /* used while enabling threaded */ /* per-cpu recursive resource statistics */ struct cgroup_rstat_cpu __percpu *rstat_cpu; struct list_head rstat_css_list; /* cgroup basic resource statistics */ struct cgroup_base_stat pending_bstat; /* pending from children */ struct cgroup_base_stat bstat; struct prev_cputime prev_cputime; /* for printing out cputime */ /* * list of pidlists, up to two for each namespace (one for procs, one * for tasks); created on demand. */ struct list_head pidlists; struct mutex pidlist_mutex; /* used to wait for offlining of csses */ wait_queue_head_t offline_waitq; /* used to schedule release agent */ struct work_struct release_agent_work; /* used to track pressure stalls */ struct psi_group psi; /* used to store eBPF programs */ struct cgroup_bpf bpf; /* If there is block congestion on this cgroup. */ atomic_t congestion_count; /* Used to store internal freezer state */ struct cgroup_freezer_state freezer; /* ids of the ancestors at each level including self */ int ancestor_ids[]; }; /* * A cgroup_root represents the root of a cgroup hierarchy, and may be * associated with a kernfs_root to form an active hierarchy. This is * internal to cgroup core. Don't access directly from controllers. */ struct cgroup_root { struct kernfs_root *kf_root; /* The bitmask of subsystems attached to this hierarchy */ unsigned int subsys_mask; /* Unique id for this hierarchy. */ int hierarchy_id; /* The root cgroup. Root is destroyed on its release. */ struct cgroup cgrp; /* for cgrp->ancestor_ids[0] */ int cgrp_ancestor_id_storage; /* Number of cgroups in the hierarchy, used only for /proc/cgroups */ atomic_t nr_cgrps; /* A list running through the active hierarchies */ struct list_head root_list; /* Hierarchy-specific flags */ unsigned int flags; /* IDs for cgroups in this hierarchy */ struct idr cgroup_idr; /* The path to use for release notifications. */ char release_agent_path[PATH_MAX]; /* The name for this hierarchy - may be empty */ char name[MAX_CGROUP_ROOT_NAMELEN]; }; /* * struct cftype: handler definitions for cgroup control files * * When reading/writing to a file: * - the cgroup to use is file->f_path.dentry->d_parent->d_fsdata * - the 'cftype' of the file is file->f_path.dentry->d_fsdata */ struct cftype { /* * By convention, the name should begin with the name of the * subsystem, followed by a period. Zero length string indicates * end of cftype array. */ char name[MAX_CFTYPE_NAME]; unsigned long private; /* * The maximum length of string, excluding trailing nul, that can * be passed to write. If < PAGE_SIZE-1, PAGE_SIZE-1 is assumed. */ size_t max_write_len; /* CFTYPE_* flags */ unsigned int flags; /* * If non-zero, should contain the offset from the start of css to * a struct cgroup_file field. cgroup will record the handle of * the created file into it. The recorded handle can be used as * long as the containing css remains accessible. */ unsigned int file_offset; /* * Fields used for internal bookkeeping. Initialized automatically * during registration. */ struct cgroup_subsys *ss; /* NULL for cgroup core files */ struct list_head node; /* anchored at ss->cfts */ struct kernfs_ops *kf_ops; int (*open)(struct kernfs_open_file *of); void (*release)(struct kernfs_open_file *of); /* * read_u64() is a shortcut for the common case of returning a * single integer. Use it in place of read() */ u64 (*read_u64)(struct cgroup_subsys_state *css, struct cftype *cft); /* * read_s64() is a signed version of read_u64() */ s64 (*read_s64)(struct cgroup_subsys_state *css, struct cftype *cft); /* generic seq_file read interface */ int (*seq_show)(struct seq_file *sf, void *v); /* optional ops, implement all or none */ void *(*seq_start)(struct seq_file *sf, loff_t *ppos); void *(*seq_next)(struct seq_file *sf, void *v, loff_t *ppos); void (*seq_stop)(struct seq_file *sf, void *v); /* * write_u64() is a shortcut for the common case of accepting * a single integer (as parsed by simple_strtoull) from * userspace. Use in place of write(); return 0 or error. */ int (*write_u64)(struct cgroup_subsys_state *css, struct cftype *cft, u64 val); /* * write_s64() is a signed version of write_u64() */ int (*write_s64)(struct cgroup_subsys_state *css, struct cftype *cft, s64 val); /* * write() is the generic write callback which maps directly to * kernfs write operation and overrides all other operations. * Maximum write size is determined by ->max_write_len. Use * of_css/cft() to access the associated css and cft. */ ssize_t (*write)(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off); __poll_t (*poll)(struct kernfs_open_file *of, struct poll_table_struct *pt); #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lock_class_key lockdep_key; #endif }; /* * Control Group subsystem type. * See Documentation/admin-guide/cgroup-v1/cgroups.rst for details */ struct cgroup_subsys { struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css); int (*css_online)(struct cgroup_subsys_state *css); void (*css_offline)(struct cgroup_subsys_state *css); void (*css_released)(struct cgroup_subsys_state *css); void (*css_free)(struct cgroup_subsys_state *css); void (*css_reset)(struct cgroup_subsys_state *css); void (*css_rstat_flush)(struct cgroup_subsys_state *css, int cpu); int (*css_extra_stat_show)(struct seq_file *seq, struct cgroup_subsys_state *css); int (*can_attach)(struct cgroup_taskset *tset); void (*cancel_attach)(struct cgroup_taskset *tset); void (*attach)(struct cgroup_taskset *tset); void (*post_attach)(void); int (*can_fork)(struct task_struct *task); void (*cancel_fork)(struct task_struct *task); void (*fork)(struct task_struct *task); void (*exit)(struct task_struct *task); void (*release)(struct task_struct *task); void (*bind)(struct cgroup_subsys_state *root_css); bool early_init:1; /* * If %true, the controller, on the default hierarchy, doesn't show * up in "cgroup.controllers" or "cgroup.subtree_control", is * implicitly enabled on all cgroups on the default hierarchy, and * bypasses the "no internal process" constraint. This is for * utility type controllers which is transparent to userland. * * An implicit controller can be stolen from the default hierarchy * anytime and thus must be okay with offline csses from previous * hierarchies coexisting with csses for the current one. */ bool implicit_on_dfl:1; /* * If %true, the controller, supports threaded mode on the default * hierarchy. In a threaded subtree, both process granularity and * no-internal-process constraint are ignored and a threaded * controllers should be able to handle that. * * Note that as an implicit controller is automatically enabled on * all cgroups on the default hierarchy, it should also be * threaded. implicit && !threaded is not supported. */ bool threaded:1; /* * If %false, this subsystem is properly hierarchical - * configuration, resource accounting and restriction on a parent * cgroup cover those of its children. If %true, hierarchy support * is broken in some ways - some subsystems ignore hierarchy * completely while others are only implemented half-way. * * It's now disallowed to create nested cgroups if the subsystem is * broken and cgroup core will emit a warning message on such * cases. Eventually, all subsystems will be made properly * hierarchical and this will go away. */ bool broken_hierarchy:1; bool warned_broken_hierarchy:1; /* the following two fields are initialized automtically during boot */ int id; const char *name; /* optional, initialized automatically during boot if not set */ const char *legacy_name; /* link to parent, protected by cgroup_lock() */ struct cgroup_root *root; /* idr for css->id */ struct idr css_idr; /* * List of cftypes. Each entry is the first entry of an array * terminated by zero length name. */ struct list_head cfts; /* * Base cftypes which are automatically registered. The two can * point to the same array. */ struct cftype *dfl_cftypes; /* for the default hierarchy */ struct cftype *legacy_cftypes; /* for the legacy hierarchies */ /* * A subsystem may depend on other subsystems. When such subsystem * is enabled on a cgroup, the depended-upon subsystems are enabled * together if available. Subsystems enabled due to dependency are * not visible to userland until explicitly enabled. The following * specifies the mask of subsystems that this one depends on. */ unsigned int depends_on; }; extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem; /** * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups * @tsk: target task * * Allows cgroup operations to synchronize against threadgroup changes * using a percpu_rw_semaphore. */ static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) { percpu_down_read(&cgroup_threadgroup_rwsem); } /** * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups * @tsk: target task * * Counterpart of cgroup_threadcgroup_change_begin(). */ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) { percpu_up_read(&cgroup_threadgroup_rwsem); } #else /* CONFIG_CGROUPS */ #define CGROUP_SUBSYS_COUNT 0 static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) { might_sleep(); } static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {} #endif /* CONFIG_CGROUPS */ #ifdef CONFIG_SOCK_CGROUP_DATA /* * sock_cgroup_data is embedded at sock->sk_cgrp_data and contains * per-socket cgroup information except for memcg association. * * On legacy hierarchies, net_prio and net_cls controllers directly set * attributes on each sock which can then be tested by the network layer. * On the default hierarchy, each sock is associated with the cgroup it was * created in and the networking layer can match the cgroup directly. * * To avoid carrying all three cgroup related fields separately in sock, * sock_cgroup_data overloads (prioidx, classid) and the cgroup pointer. * On boot, sock_cgroup_data records the cgroup that the sock was created * in so that cgroup2 matches can be made; however, once either net_prio or * net_cls starts being used, the area is overriden to carry prioidx and/or * classid. The two modes are distinguished by whether the lowest bit is * set. Clear bit indicates cgroup pointer while set bit prioidx and * classid. * * While userland may start using net_prio or net_cls at any time, once * either is used, cgroup2 matching no longer works. There is no reason to * mix the two and this is in line with how legacy and v2 compatibility is * handled. On mode switch, cgroup references which are already being * pointed to by socks may be leaked. While this can be remedied by adding * synchronization around sock_cgroup_data, given that the number of leaked * cgroups is bound and highly unlikely to be high, this seems to be the * better trade-off. */ struct sock_cgroup_data { union { #ifdef __LITTLE_ENDIAN struct { u8 is_data : 1; u8 no_refcnt : 1; u8 unused : 6; u8 padding; u16 prioidx; u32 classid; } __packed; #else struct { u32 classid; u16 prioidx; u8 padding; u8 unused : 6; u8 no_refcnt : 1; u8 is_data : 1; } __packed; #endif u64 val; }; }; /* * There's a theoretical window where the following accessors race with * updaters and return part of the previous pointer as the prioidx or * classid. Such races are short-lived and the result isn't critical. */ static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd) { /* fallback to 1 which is always the ID of the root cgroup */ return (skcd->is_data & 1) ? skcd->prioidx : 1; } static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd) { /* fallback to 0 which is the unconfigured default classid */ return (skcd->is_data & 1) ? skcd->classid : 0; } /* * If invoked concurrently, the updaters may clobber each other. The * caller is responsible for synchronization. */ static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd, u16 prioidx) { struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }}; if (sock_cgroup_prioidx(&skcd_buf) == prioidx) return; if (!(skcd_buf.is_data & 1)) { skcd_buf.val = 0; skcd_buf.is_data = 1; } skcd_buf.prioidx = prioidx; WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */ } static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd, u32 classid) { struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }}; if (sock_cgroup_classid(&skcd_buf) == classid) return; if (!(skcd_buf.is_data & 1)) { skcd_buf.val = 0; skcd_buf.is_data = 1; } skcd_buf.classid = classid; WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */ } #else /* CONFIG_SOCK_CGROUP_DATA */ struct sock_cgroup_data { }; #endif /* CONFIG_SOCK_CGROUP_DATA */ #endif /* _LINUX_CGROUP_DEFS_H */ compaction.h 0000644 00000016527 14722070374 0007072 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_COMPACTION_H #define _LINUX_COMPACTION_H /* * Determines how hard direct compaction should try to succeed. * Lower value means higher priority, analogically to reclaim priority. */ enum compact_priority { COMPACT_PRIO_SYNC_FULL, MIN_COMPACT_PRIORITY = COMPACT_PRIO_SYNC_FULL, COMPACT_PRIO_SYNC_LIGHT, MIN_COMPACT_COSTLY_PRIORITY = COMPACT_PRIO_SYNC_LIGHT, DEF_COMPACT_PRIORITY = COMPACT_PRIO_SYNC_LIGHT, COMPACT_PRIO_ASYNC, INIT_COMPACT_PRIORITY = COMPACT_PRIO_ASYNC }; /* Return values for compact_zone() and try_to_compact_pages() */ /* When adding new states, please adjust include/trace/events/compaction.h */ enum compact_result { /* For more detailed tracepoint output - internal to compaction */ COMPACT_NOT_SUITABLE_ZONE, /* * compaction didn't start as it was not possible or direct reclaim * was more suitable */ COMPACT_SKIPPED, /* compaction didn't start as it was deferred due to past failures */ COMPACT_DEFERRED, /* compaction not active last round */ COMPACT_INACTIVE = COMPACT_DEFERRED, /* For more detailed tracepoint output - internal to compaction */ COMPACT_NO_SUITABLE_PAGE, /* compaction should continue to another pageblock */ COMPACT_CONTINUE, /* * The full zone was compacted scanned but wasn't successfull to compact * suitable pages. */ COMPACT_COMPLETE, /* * direct compaction has scanned part of the zone but wasn't successfull * to compact suitable pages. */ COMPACT_PARTIAL_SKIPPED, /* compaction terminated prematurely due to lock contentions */ COMPACT_CONTENDED, /* * direct compaction terminated after concluding that the allocation * should now succeed */ COMPACT_SUCCESS, }; struct alloc_context; /* in mm/internal.h */ /* * Number of free order-0 pages that should be available above given watermark * to make sure compaction has reasonable chance of not running out of free * pages that it needs to isolate as migration target during its work. */ static inline unsigned long compact_gap(unsigned int order) { /* * Although all the isolations for migration are temporary, compaction * free scanner may have up to 1 << order pages on its list and then * try to split an (order - 1) free page. At that point, a gap of * 1 << order might not be enough, so it's safer to require twice that * amount. Note that the number of pages on the list is also * effectively limited by COMPACT_CLUSTER_MAX, as that's the maximum * that the migrate scanner can have isolated on migrate list, and free * scanner is only invoked when the number of isolated free pages is * lower than that. But it's not worth to complicate the formula here * as a bigger gap for higher orders than strictly necessary can also * improve chances of compaction success. */ return 2UL << order; } #ifdef CONFIG_COMPACTION extern int sysctl_compact_memory; extern int sysctl_compaction_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos); extern int sysctl_extfrag_threshold; extern int sysctl_compact_unevictable_allowed; extern int fragmentation_index(struct zone *zone, unsigned int order); extern enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, enum compact_priority prio, struct page **page); extern void reset_isolation_suitable(pg_data_t *pgdat); extern enum compact_result compaction_suitable(struct zone *zone, int order, unsigned int alloc_flags, int classzone_idx); extern void defer_compaction(struct zone *zone, int order); extern bool compaction_deferred(struct zone *zone, int order); extern void compaction_defer_reset(struct zone *zone, int order, bool alloc_success); extern bool compaction_restarting(struct zone *zone, int order); /* Compaction has made some progress and retrying makes sense */ static inline bool compaction_made_progress(enum compact_result result) { /* * Even though this might sound confusing this in fact tells us * that the compaction successfully isolated and migrated some * pageblocks. */ if (result == COMPACT_SUCCESS) return true; return false; } /* Compaction has failed and it doesn't make much sense to keep retrying. */ static inline bool compaction_failed(enum compact_result result) { /* All zones were scanned completely and still not result. */ if (result == COMPACT_COMPLETE) return true; return false; } /* Compaction needs reclaim to be performed first, so it can continue. */ static inline bool compaction_needs_reclaim(enum compact_result result) { /* * Compaction backed off due to watermark checks for order-0 * so the regular reclaim has to try harder and reclaim something. */ if (result == COMPACT_SKIPPED) return true; return false; } /* * Compaction has backed off for some reason after doing some work or none * at all. It might be throttling or lock contention. Retrying might be still * worthwhile, but with a higher priority if allowed. */ static inline bool compaction_withdrawn(enum compact_result result) { /* * If compaction is deferred for high-order allocations, it is * because sync compaction recently failed. If this is the case * and the caller requested a THP allocation, we do not want * to heavily disrupt the system, so we fail the allocation * instead of entering direct reclaim. */ if (result == COMPACT_DEFERRED) return true; /* * If compaction in async mode encounters contention or blocks higher * priority task we back off early rather than cause stalls. */ if (result == COMPACT_CONTENDED) return true; /* * Page scanners have met but we haven't scanned full zones so this * is a back off in fact. */ if (result == COMPACT_PARTIAL_SKIPPED) return true; return false; } bool compaction_zonelist_suitable(struct alloc_context *ac, int order, int alloc_flags); extern int kcompactd_run(int nid); extern void kcompactd_stop(int nid); extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx); #else static inline void reset_isolation_suitable(pg_data_t *pgdat) { } static inline enum compact_result compaction_suitable(struct zone *zone, int order, int alloc_flags, int classzone_idx) { return COMPACT_SKIPPED; } static inline void defer_compaction(struct zone *zone, int order) { } static inline bool compaction_deferred(struct zone *zone, int order) { return true; } static inline bool compaction_made_progress(enum compact_result result) { return false; } static inline bool compaction_failed(enum compact_result result) { return false; } static inline bool compaction_needs_reclaim(enum compact_result result) { return false; } static inline bool compaction_withdrawn(enum compact_result result) { return true; } static inline int kcompactd_run(int nid) { return 0; } static inline void kcompactd_stop(int nid) { } static inline void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx) { } #endif /* CONFIG_COMPACTION */ struct node; #if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) extern int compaction_register_node(struct node *node); extern void compaction_unregister_node(struct node *node); #else static inline int compaction_register_node(struct node *node) { return 0; } static inline void compaction_unregister_node(struct node *node) { } #endif /* CONFIG_COMPACTION && CONFIG_SYSFS && CONFIG_NUMA */ #endif /* _LINUX_COMPACTION_H */ phy_led_triggers.h 0000644 00000002013 14722070374 0010251 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* Copyright (C) 2016 National Instruments Corp. */ #ifndef __PHY_LED_TRIGGERS #define __PHY_LED_TRIGGERS struct phy_device; #ifdef CONFIG_LED_TRIGGER_PHY #include <linux/leds.h> #include <linux/phy.h> #define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE 11 #define PHY_LINK_LED_TRIGGER_NAME_SIZE (MII_BUS_ID_SIZE + \ FIELD_SIZEOF(struct mdio_device, addr)+\ PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE) struct phy_led_trigger { struct led_trigger trigger; char name[PHY_LINK_LED_TRIGGER_NAME_SIZE]; unsigned int speed; }; extern int phy_led_triggers_register(struct phy_device *phy); extern void phy_led_triggers_unregister(struct phy_device *phy); extern void phy_led_trigger_change_speed(struct phy_device *phy); #else static inline int phy_led_triggers_register(struct phy_device *phy) { return 0; } static inline void phy_led_triggers_unregister(struct phy_device *phy) { } static inline void phy_led_trigger_change_speed(struct phy_device *phy) { } #endif #endif nls.h 0000644 00000006132 14722070374 0005521 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_NLS_H #define _LINUX_NLS_H #include <linux/init.h> /* Unicode has changed over the years. Unicode code points no longer * fit into 16 bits; as of Unicode 5 valid code points range from 0 * to 0x10ffff (17 planes, where each plane holds 65536 code points). * * The original decision to represent Unicode characters as 16-bit * wchar_t values is now outdated. But plane 0 still includes the * most commonly used characters, so we will retain it. The newer * 32-bit unicode_t type can be used when it is necessary to * represent the full Unicode character set. */ /* Plane-0 Unicode character */ typedef u16 wchar_t; #define MAX_WCHAR_T 0xffff /* Arbitrary Unicode character */ typedef u32 unicode_t; struct nls_table { const char *charset; const char *alias; int (*uni2char) (wchar_t uni, unsigned char *out, int boundlen); int (*char2uni) (const unsigned char *rawstring, int boundlen, wchar_t *uni); const unsigned char *charset2lower; const unsigned char *charset2upper; struct module *owner; struct nls_table *next; }; /* this value hold the maximum octet of charset */ #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */ /* Byte order for UTF-16 strings */ enum utf16_endian { UTF16_HOST_ENDIAN, UTF16_LITTLE_ENDIAN, UTF16_BIG_ENDIAN }; /* nls_base.c */ extern int __register_nls(struct nls_table *, struct module *); extern int unregister_nls(struct nls_table *); extern struct nls_table *load_nls(const char *charset); extern void unload_nls(struct nls_table *); extern struct nls_table *load_nls_default(void); #define register_nls(nls) __register_nls((nls), THIS_MODULE) extern int utf8_to_utf32(const u8 *s, int len, unicode_t *pu); extern int utf32_to_utf8(unicode_t u, u8 *s, int maxlen); extern int utf8s_to_utf16s(const u8 *s, int len, enum utf16_endian endian, wchar_t *pwcs, int maxlen); extern int utf16s_to_utf8s(const wchar_t *pwcs, int len, enum utf16_endian endian, u8 *s, int maxlen); static inline unsigned char nls_tolower(struct nls_table *t, unsigned char c) { unsigned char nc = t->charset2lower[c]; return nc ? nc : c; } static inline unsigned char nls_toupper(struct nls_table *t, unsigned char c) { unsigned char nc = t->charset2upper[c]; return nc ? nc : c; } static inline int nls_strnicmp(struct nls_table *t, const unsigned char *s1, const unsigned char *s2, int len) { while (len--) { if (nls_tolower(t, *s1++) != nls_tolower(t, *s2++)) return 1; } return 0; } /* * nls_nullsize - return length of null character for codepage * @codepage - codepage for which to return length of NULL terminator * * Since we can't guarantee that the null terminator will be a particular * length, we have to check against the codepage. If there's a problem * determining it, assume a single-byte NULL terminator. */ static inline int nls_nullsize(const struct nls_table *codepage) { int charlen; char tmp[NLS_MAX_CHARSET_SIZE]; charlen = codepage->uni2char(0, tmp, NLS_MAX_CHARSET_SIZE); return charlen > 0 ? charlen : 1; } #define MODULE_ALIAS_NLS(name) MODULE_ALIAS("nls_" __stringify(name)) #endif /* _LINUX_NLS_H */ wait_bit.h 0000644 00000026314 14722070374 0006533 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_WAIT_BIT_H #define _LINUX_WAIT_BIT_H /* * Linux wait-bit related types and methods: */ #include <linux/wait.h> struct wait_bit_key { void *flags; int bit_nr; unsigned long timeout; }; struct wait_bit_queue_entry { struct wait_bit_key key; struct wait_queue_entry wq_entry; }; #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \ { .flags = word, .bit_nr = bit, } typedef int wait_bit_action_f(struct wait_bit_key *key, int mode); void __wake_up_bit(struct wait_queue_head *wq_head, void *word, int bit); int __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode); int __wait_on_bit_lock(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode); void wake_up_bit(void *word, int bit); int out_of_line_wait_on_bit(void *word, int, wait_bit_action_f *action, unsigned int mode); int out_of_line_wait_on_bit_timeout(void *word, int, wait_bit_action_f *action, unsigned int mode, unsigned long timeout); int out_of_line_wait_on_bit_lock(void *word, int, wait_bit_action_f *action, unsigned int mode); struct wait_queue_head *bit_waitqueue(void *word, int bit); extern void __init wait_bit_init(void); int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key); #define DEFINE_WAIT_BIT(name, word, bit) \ struct wait_bit_queue_entry name = { \ .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \ .wq_entry = { \ .private = current, \ .func = wake_bit_function, \ .entry = \ LIST_HEAD_INIT((name).wq_entry.entry), \ }, \ } extern int bit_wait(struct wait_bit_key *key, int mode); extern int bit_wait_io(struct wait_bit_key *key, int mode); extern int bit_wait_timeout(struct wait_bit_key *key, int mode); extern int bit_wait_io_timeout(struct wait_bit_key *key, int mode); /** * wait_on_bit - wait for a bit to be cleared * @word: the word being waited on, a kernel virtual address * @bit: the bit of the word being waited on * @mode: the task state to sleep in * * There is a standard hashed waitqueue table for generic use. This * is the part of the hashtable's accessor API that waits on a bit. * For instance, if one were to have waiters on a bitflag, one would * call wait_on_bit() in threads waiting for the bit to clear. * One uses wait_on_bit() where one is waiting for the bit to clear, * but has no intention of setting it. * Returned value will be zero if the bit was cleared, or non-zero * if the process received a signal and the mode permitted wakeup * on that signal. */ static inline int wait_on_bit(unsigned long *word, int bit, unsigned mode) { might_sleep(); if (!test_bit(bit, word)) return 0; return out_of_line_wait_on_bit(word, bit, bit_wait, mode); } /** * wait_on_bit_io - wait for a bit to be cleared * @word: the word being waited on, a kernel virtual address * @bit: the bit of the word being waited on * @mode: the task state to sleep in * * Use the standard hashed waitqueue table to wait for a bit * to be cleared. This is similar to wait_on_bit(), but calls * io_schedule() instead of schedule() for the actual waiting. * * Returned value will be zero if the bit was cleared, or non-zero * if the process received a signal and the mode permitted wakeup * on that signal. */ static inline int wait_on_bit_io(unsigned long *word, int bit, unsigned mode) { might_sleep(); if (!test_bit(bit, word)) return 0; return out_of_line_wait_on_bit(word, bit, bit_wait_io, mode); } /** * wait_on_bit_timeout - wait for a bit to be cleared or a timeout elapses * @word: the word being waited on, a kernel virtual address * @bit: the bit of the word being waited on * @mode: the task state to sleep in * @timeout: timeout, in jiffies * * Use the standard hashed waitqueue table to wait for a bit * to be cleared. This is similar to wait_on_bit(), except also takes a * timeout parameter. * * Returned value will be zero if the bit was cleared before the * @timeout elapsed, or non-zero if the @timeout elapsed or process * received a signal and the mode permitted wakeup on that signal. */ static inline int wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode, unsigned long timeout) { might_sleep(); if (!test_bit(bit, word)) return 0; return out_of_line_wait_on_bit_timeout(word, bit, bit_wait_timeout, mode, timeout); } /** * wait_on_bit_action - wait for a bit to be cleared * @word: the word being waited on, a kernel virtual address * @bit: the bit of the word being waited on * @action: the function used to sleep, which may take special actions * @mode: the task state to sleep in * * Use the standard hashed waitqueue table to wait for a bit * to be cleared, and allow the waiting action to be specified. * This is like wait_on_bit() but allows fine control of how the waiting * is done. * * Returned value will be zero if the bit was cleared, or non-zero * if the process received a signal and the mode permitted wakeup * on that signal. */ static inline int wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action, unsigned mode) { might_sleep(); if (!test_bit(bit, word)) return 0; return out_of_line_wait_on_bit(word, bit, action, mode); } /** * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it * @word: the word being waited on, a kernel virtual address * @bit: the bit of the word being waited on * @mode: the task state to sleep in * * There is a standard hashed waitqueue table for generic use. This * is the part of the hashtable's accessor API that waits on a bit * when one intends to set it, for instance, trying to lock bitflags. * For instance, if one were to have waiters trying to set bitflag * and waiting for it to clear before setting it, one would call * wait_on_bit() in threads waiting to be able to set the bit. * One uses wait_on_bit_lock() where one is waiting for the bit to * clear with the intention of setting it, and when done, clearing it. * * Returns zero if the bit was (eventually) found to be clear and was * set. Returns non-zero if a signal was delivered to the process and * the @mode allows that signal to wake the process. */ static inline int wait_on_bit_lock(unsigned long *word, int bit, unsigned mode) { might_sleep(); if (!test_and_set_bit(bit, word)) return 0; return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode); } /** * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it * @word: the word being waited on, a kernel virtual address * @bit: the bit of the word being waited on * @mode: the task state to sleep in * * Use the standard hashed waitqueue table to wait for a bit * to be cleared and then to atomically set it. This is similar * to wait_on_bit(), but calls io_schedule() instead of schedule() * for the actual waiting. * * Returns zero if the bit was (eventually) found to be clear and was * set. Returns non-zero if a signal was delivered to the process and * the @mode allows that signal to wake the process. */ static inline int wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode) { might_sleep(); if (!test_and_set_bit(bit, word)) return 0; return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode); } /** * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it * @word: the word being waited on, a kernel virtual address * @bit: the bit of the word being waited on * @action: the function used to sleep, which may take special actions * @mode: the task state to sleep in * * Use the standard hashed waitqueue table to wait for a bit * to be cleared and then to set it, and allow the waiting action * to be specified. * This is like wait_on_bit() but allows fine control of how the waiting * is done. * * Returns zero if the bit was (eventually) found to be clear and was * set. Returns non-zero if a signal was delivered to the process and * the @mode allows that signal to wake the process. */ static inline int wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action, unsigned mode) { might_sleep(); if (!test_and_set_bit(bit, word)) return 0; return out_of_line_wait_on_bit_lock(word, bit, action, mode); } extern void init_wait_var_entry(struct wait_bit_queue_entry *wbq_entry, void *var, int flags); extern void wake_up_var(void *var); extern wait_queue_head_t *__var_waitqueue(void *p); #define ___wait_var_event(var, condition, state, exclusive, ret, cmd) \ ({ \ __label__ __out; \ struct wait_queue_head *__wq_head = __var_waitqueue(var); \ struct wait_bit_queue_entry __wbq_entry; \ long __ret = ret; /* explicit shadow */ \ \ init_wait_var_entry(&__wbq_entry, var, \ exclusive ? WQ_FLAG_EXCLUSIVE : 0); \ for (;;) { \ long __int = prepare_to_wait_event(__wq_head, \ &__wbq_entry.wq_entry, \ state); \ if (condition) \ break; \ \ if (___wait_is_interruptible(state) && __int) { \ __ret = __int; \ goto __out; \ } \ \ cmd; \ } \ finish_wait(__wq_head, &__wbq_entry.wq_entry); \ __out: __ret; \ }) #define __wait_var_event(var, condition) \ ___wait_var_event(var, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ schedule()) #define wait_var_event(var, condition) \ do { \ might_sleep(); \ if (condition) \ break; \ __wait_var_event(var, condition); \ } while (0) #define __wait_var_event_killable(var, condition) \ ___wait_var_event(var, condition, TASK_KILLABLE, 0, 0, \ schedule()) #define wait_var_event_killable(var, condition) \ ({ \ int __ret = 0; \ might_sleep(); \ if (!(condition)) \ __ret = __wait_var_event_killable(var, condition); \ __ret; \ }) #define __wait_var_event_timeout(var, condition, timeout) \ ___wait_var_event(var, ___wait_cond_timeout(condition), \ TASK_UNINTERRUPTIBLE, 0, timeout, \ __ret = schedule_timeout(__ret)) #define wait_var_event_timeout(var, condition, timeout) \ ({ \ long __ret = timeout; \ might_sleep(); \ if (!___wait_cond_timeout(condition)) \ __ret = __wait_var_event_timeout(var, condition, timeout); \ __ret; \ }) #define __wait_var_event_interruptible(var, condition) \ ___wait_var_event(var, condition, TASK_INTERRUPTIBLE, 0, 0, \ schedule()) #define wait_var_event_interruptible(var, condition) \ ({ \ int __ret = 0; \ might_sleep(); \ if (!(condition)) \ __ret = __wait_var_event_interruptible(var, condition); \ __ret; \ }) /** * clear_and_wake_up_bit - clear a bit and wake up anyone waiting on that bit * * @bit: the bit of the word being waited on * @word: the word being waited on, a kernel virtual address * * You can use this helper if bitflags are manipulated atomically rather than * non-atomically under a lock. */ static inline void clear_and_wake_up_bit(int bit, void *word) { clear_bit_unlock(bit, word); /* See wake_up_bit() for which memory barrier you need to use. */ smp_mb__after_atomic(); wake_up_bit(word, bit); } #endif /* _LINUX_WAIT_BIT_H */ posix-timers.h 0000644 00000014474 14722070374 0007400 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _linux_POSIX_TIMERS_H #define _linux_POSIX_TIMERS_H #include <linux/spinlock.h> #include <linux/list.h> #include <linux/alarmtimer.h> #include <linux/timerqueue.h> struct kernel_siginfo; struct task_struct; /* * Bit fields within a clockid: * * The most significant 29 bits hold either a pid or a file descriptor. * * Bit 2 indicates whether a cpu clock refers to a thread or a process. * * Bits 1 and 0 give the type: PROF=0, VIRT=1, SCHED=2, or FD=3. * * A clockid is invalid if bits 2, 1, and 0 are all set. */ #define CPUCLOCK_PID(clock) ((pid_t) ~((clock) >> 3)) #define CPUCLOCK_PERTHREAD(clock) \ (((clock) & (clockid_t) CPUCLOCK_PERTHREAD_MASK) != 0) #define CPUCLOCK_PERTHREAD_MASK 4 #define CPUCLOCK_WHICH(clock) ((clock) & (clockid_t) CPUCLOCK_CLOCK_MASK) #define CPUCLOCK_CLOCK_MASK 3 #define CPUCLOCK_PROF 0 #define CPUCLOCK_VIRT 1 #define CPUCLOCK_SCHED 2 #define CPUCLOCK_MAX 3 #define CLOCKFD CPUCLOCK_MAX #define CLOCKFD_MASK (CPUCLOCK_PERTHREAD_MASK|CPUCLOCK_CLOCK_MASK) static inline clockid_t make_process_cpuclock(const unsigned int pid, const clockid_t clock) { return ((~pid) << 3) | clock; } static inline clockid_t make_thread_cpuclock(const unsigned int tid, const clockid_t clock) { return make_process_cpuclock(tid, clock | CPUCLOCK_PERTHREAD_MASK); } static inline clockid_t fd_to_clockid(const int fd) { return make_process_cpuclock((unsigned int) fd, CLOCKFD); } static inline int clockid_to_fd(const clockid_t clk) { return ~(clk >> 3); } #ifdef CONFIG_POSIX_TIMERS /** * cpu_timer - Posix CPU timer representation for k_itimer * @node: timerqueue node to queue in the task/sig * @head: timerqueue head on which this timer is queued * @task: Pointer to target task * @elist: List head for the expiry list * @firing: Timer is currently firing */ struct cpu_timer { struct timerqueue_node node; struct timerqueue_head *head; struct task_struct *task; struct list_head elist; int firing; }; static inline bool cpu_timer_enqueue(struct timerqueue_head *head, struct cpu_timer *ctmr) { ctmr->head = head; return timerqueue_add(head, &ctmr->node); } static inline void cpu_timer_dequeue(struct cpu_timer *ctmr) { if (ctmr->head) { timerqueue_del(ctmr->head, &ctmr->node); ctmr->head = NULL; } } static inline u64 cpu_timer_getexpires(struct cpu_timer *ctmr) { return ctmr->node.expires; } static inline void cpu_timer_setexpires(struct cpu_timer *ctmr, u64 exp) { ctmr->node.expires = exp; } /** * posix_cputimer_base - Container per posix CPU clock * @nextevt: Earliest-expiration cache * @tqhead: timerqueue head for cpu_timers */ struct posix_cputimer_base { u64 nextevt; struct timerqueue_head tqhead; }; /** * posix_cputimers - Container for posix CPU timer related data * @bases: Base container for posix CPU clocks * @timers_active: Timers are queued. * @expiry_active: Timer expiry is active. Used for * process wide timers to avoid multiple * task trying to handle expiry concurrently * * Used in task_struct and signal_struct */ struct posix_cputimers { struct posix_cputimer_base bases[CPUCLOCK_MAX]; unsigned int timers_active; unsigned int expiry_active; }; static inline void posix_cputimers_init(struct posix_cputimers *pct) { memset(pct, 0, sizeof(*pct)); pct->bases[0].nextevt = U64_MAX; pct->bases[1].nextevt = U64_MAX; pct->bases[2].nextevt = U64_MAX; } void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit); static inline void posix_cputimers_rt_watchdog(struct posix_cputimers *pct, u64 runtime) { pct->bases[CPUCLOCK_SCHED].nextevt = runtime; } /* Init task static initializer */ #define INIT_CPU_TIMERBASE(b) { \ .nextevt = U64_MAX, \ } #define INIT_CPU_TIMERBASES(b) { \ INIT_CPU_TIMERBASE(b[0]), \ INIT_CPU_TIMERBASE(b[1]), \ INIT_CPU_TIMERBASE(b[2]), \ } #define INIT_CPU_TIMERS(s) \ .posix_cputimers = { \ .bases = INIT_CPU_TIMERBASES(s.posix_cputimers.bases), \ }, #else struct posix_cputimers { }; struct cpu_timer { }; #define INIT_CPU_TIMERS(s) static inline void posix_cputimers_init(struct posix_cputimers *pct) { } static inline void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit) { } #endif #define REQUEUE_PENDING 1 /** * struct k_itimer - POSIX.1b interval timer structure. * @list: List head for binding the timer to signals->posix_timers * @t_hash: Entry in the posix timer hash table * @it_lock: Lock protecting the timer * @kclock: Pointer to the k_clock struct handling this timer * @it_clock: The posix timer clock id * @it_id: The posix timer id for identifying the timer * @it_active: Marker that timer is active * @it_overrun: The overrun counter for pending signals * @it_overrun_last: The overrun at the time of the last delivered signal * @it_requeue_pending: Indicator that timer waits for being requeued on * signal delivery * @it_sigev_notify: The notify word of sigevent struct for signal delivery * @it_interval: The interval for periodic timers * @it_signal: Pointer to the creators signal struct * @it_pid: The pid of the process/task targeted by the signal * @it_process: The task to wakeup on clock_nanosleep (CPU timers) * @sigq: Pointer to preallocated sigqueue * @it: Union representing the various posix timer type * internals. * @rcu: RCU head for freeing the timer. */ struct k_itimer { struct list_head list; struct hlist_node t_hash; spinlock_t it_lock; const struct k_clock *kclock; clockid_t it_clock; timer_t it_id; int it_active; s64 it_overrun; s64 it_overrun_last; int it_requeue_pending; int it_sigev_notify; ktime_t it_interval; struct signal_struct *it_signal; union { struct pid *it_pid; struct task_struct *it_process; }; struct sigqueue *sigq; union { struct { struct hrtimer timer; } real; struct cpu_timer cpu; struct { struct alarm alarmtimer; } alarm; } it; struct rcu_head rcu; }; void run_posix_cpu_timers(void); void posix_cpu_timers_exit(struct task_struct *task); void posix_cpu_timers_exit_group(struct task_struct *task); void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx, u64 *newval, u64 *oldval); void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new); void posixtimer_rearm(struct kernel_siginfo *info); #endif msi.h 0000644 00000031536 14722070374 0005523 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_MSI_H #define LINUX_MSI_H #include <linux/kobject.h> #include <linux/list.h> struct msi_msg { u32 address_lo; /* low 32 bits of msi message address */ u32 address_hi; /* high 32 bits of msi message address */ u32 data; /* 16 bits of msi message data */ }; extern int pci_msi_ignore_mask; /* Helper functions */ struct irq_data; struct msi_desc; struct pci_dev; struct platform_msi_priv_data; void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg); #ifdef CONFIG_GENERIC_MSI_IRQ void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); #else static inline void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) { } #endif typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc, struct msi_msg *msg); /** * platform_msi_desc - Platform device specific msi descriptor data * @msi_priv_data: Pointer to platform private data * @msi_index: The index of the MSI descriptor for multi MSI */ struct platform_msi_desc { struct platform_msi_priv_data *msi_priv_data; u16 msi_index; }; /** * fsl_mc_msi_desc - FSL-MC device specific msi descriptor data * @msi_index: The index of the MSI descriptor */ struct fsl_mc_msi_desc { u16 msi_index; }; /** * ti_sci_inta_msi_desc - TISCI based INTA specific msi descriptor data * @dev_index: TISCI device index */ struct ti_sci_inta_msi_desc { u16 dev_index; }; /** * struct msi_desc - Descriptor structure for MSI based interrupts * @list: List head for management * @irq: The base interrupt number * @nvec_used: The number of vectors used * @dev: Pointer to the device which uses this descriptor * @msg: The last set MSI message cached for reuse * @affinity: Optional pointer to a cpu affinity mask for this descriptor * * @write_msi_msg: Callback that may be called when the MSI message * address or data changes * @write_msi_msg_data: Data parameter for the callback. * * @masked: [PCI MSI/X] Mask bits * @is_msix: [PCI MSI/X] True if MSI-X * @multiple: [PCI MSI/X] log2 num of messages allocated * @multi_cap: [PCI MSI/X] log2 num of messages supported * @maskbit: [PCI MSI/X] Mask-Pending bit supported? * @is_64: [PCI MSI/X] Address size: 0=32bit 1=64bit * @entry_nr: [PCI MSI/X] Entry which is described by this descriptor * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq * @mask_pos: [PCI MSI] Mask register position * @mask_base: [PCI MSI-X] Mask register base address * @platform: [platform] Platform device specific msi descriptor data * @fsl_mc: [fsl-mc] FSL MC device specific msi descriptor data * @inta: [INTA] TISCI based INTA specific msi descriptor data */ struct msi_desc { /* Shared device/bus type independent data */ struct list_head list; unsigned int irq; unsigned int nvec_used; struct device *dev; struct msi_msg msg; struct irq_affinity_desc *affinity; #ifdef CONFIG_IRQ_MSI_IOMMU const void *iommu_cookie; #endif void (*write_msi_msg)(struct msi_desc *entry, void *data); void *write_msi_msg_data; union { /* PCI MSI/X specific data */ struct { u32 masked; struct { u8 is_msix : 1; u8 multiple : 3; u8 multi_cap : 3; u8 maskbit : 1; u8 is_64 : 1; u8 is_virtual : 1; u16 entry_nr; unsigned default_irq; } msi_attrib; union { u8 mask_pos; void __iomem *mask_base; }; }; /* * Non PCI variants add their data structure here. New * entries need to use a named structure. We want * proper name spaces for this. The PCI part is * anonymous for now as it would require an immediate * tree wide cleanup. */ struct platform_msi_desc platform; struct fsl_mc_msi_desc fsl_mc; struct ti_sci_inta_msi_desc inta; }; }; /* Helpers to hide struct msi_desc implementation details */ #define msi_desc_to_dev(desc) ((desc)->dev) #define dev_to_msi_list(dev) (&(dev)->msi_list) #define first_msi_entry(dev) \ list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list) #define for_each_msi_entry(desc, dev) \ list_for_each_entry((desc), dev_to_msi_list((dev)), list) #define for_each_msi_entry_safe(desc, tmp, dev) \ list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list) #define for_each_msi_vector(desc, __irq, dev) \ for_each_msi_entry((desc), (dev)) \ if ((desc)->irq) \ for (__irq = (desc)->irq; \ __irq < ((desc)->irq + (desc)->nvec_used); \ __irq++) #ifdef CONFIG_IRQ_MSI_IOMMU static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc) { return desc->iommu_cookie; } static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc, const void *iommu_cookie) { desc->iommu_cookie = iommu_cookie; } #else static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc) { return NULL; } static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc, const void *iommu_cookie) { } #endif #ifdef CONFIG_PCI_MSI #define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev) #define for_each_pci_msi_entry(desc, pdev) \ for_each_msi_entry((desc), &(pdev)->dev) struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc); void *msi_desc_to_pci_sysdata(struct msi_desc *desc); void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg); #else /* CONFIG_PCI_MSI */ static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc) { return NULL; } static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg) { } #endif /* CONFIG_PCI_MSI */ struct msi_desc *alloc_msi_entry(struct device *dev, int nvec, const struct irq_affinity_desc *affinity); void free_msi_entry(struct msi_desc *entry); void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag); void __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); void pci_msi_mask_irq(struct irq_data *data); void pci_msi_unmask_irq(struct irq_data *data); /* * The arch hooks to setup up msi irqs. Those functions are * implemented as weak symbols so that they /can/ be overriden by * architecture specific code if needed. */ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc); void arch_teardown_msi_irq(unsigned int irq); int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); void arch_teardown_msi_irqs(struct pci_dev *dev); void arch_restore_msi_irqs(struct pci_dev *dev); void default_teardown_msi_irqs(struct pci_dev *dev); void default_restore_msi_irqs(struct pci_dev *dev); struct msi_controller { struct module *owner; struct device *dev; struct device_node *of_node; struct list_head list; int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev, struct msi_desc *desc); int (*setup_irqs)(struct msi_controller *chip, struct pci_dev *dev, int nvec, int type); void (*teardown_irq)(struct msi_controller *chip, unsigned int irq); }; #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN #include <linux/irqhandler.h> #include <asm/msi.h> struct irq_domain; struct irq_domain_ops; struct irq_chip; struct device_node; struct fwnode_handle; struct msi_domain_info; /** * struct msi_domain_ops - MSI interrupt domain callbacks * @get_hwirq: Retrieve the resulting hw irq number * @msi_init: Domain specific init function for MSI interrupts * @msi_free: Domain specific function to free a MSI interrupts * @msi_check: Callback for verification of the domain/info/dev data * @msi_prepare: Prepare the allocation of the interrupts in the domain * @msi_finish: Optional callback to finalize the allocation * @set_desc: Set the msi descriptor for an interrupt * @handle_error: Optional error handler if the allocation fails * * @get_hwirq, @msi_init and @msi_free are callbacks used by * msi_create_irq_domain() and related interfaces * * @msi_check, @msi_prepare, @msi_finish, @set_desc and @handle_error * are callbacks used by msi_domain_alloc_irqs() and related * interfaces which are based on msi_desc. */ struct msi_domain_ops { irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info, msi_alloc_info_t *arg); int (*msi_init)(struct irq_domain *domain, struct msi_domain_info *info, unsigned int virq, irq_hw_number_t hwirq, msi_alloc_info_t *arg); void (*msi_free)(struct irq_domain *domain, struct msi_domain_info *info, unsigned int virq); int (*msi_check)(struct irq_domain *domain, struct msi_domain_info *info, struct device *dev); int (*msi_prepare)(struct irq_domain *domain, struct device *dev, int nvec, msi_alloc_info_t *arg); void (*msi_finish)(msi_alloc_info_t *arg, int retval); void (*set_desc)(msi_alloc_info_t *arg, struct msi_desc *desc); int (*handle_error)(struct irq_domain *domain, struct msi_desc *desc, int error); }; /** * struct msi_domain_info - MSI interrupt domain data * @flags: Flags to decribe features and capabilities * @ops: The callback data structure * @chip: Optional: associated interrupt chip * @chip_data: Optional: associated interrupt chip data * @handler: Optional: associated interrupt flow handler * @handler_data: Optional: associated interrupt flow handler data * @handler_name: Optional: associated interrupt flow handler name * @data: Optional: domain specific data */ struct msi_domain_info { u32 flags; struct msi_domain_ops *ops; struct irq_chip *chip; void *chip_data; irq_flow_handler_t handler; void *handler_data; const char *handler_name; void *data; }; /* Flags for msi_domain_info */ enum { /* * Init non implemented ops callbacks with default MSI domain * callbacks. */ MSI_FLAG_USE_DEF_DOM_OPS = (1 << 0), /* * Init non implemented chip callbacks with default MSI chip * callbacks. */ MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1), /* Support multiple PCI MSI interrupts */ MSI_FLAG_MULTI_PCI_MSI = (1 << 2), /* Support PCI MSIX interrupts */ MSI_FLAG_PCI_MSIX = (1 << 3), /* Needs early activate, required for PCI */ MSI_FLAG_ACTIVATE_EARLY = (1 << 4), /* * Must reactivate when irq is started even when * MSI_FLAG_ACTIVATE_EARLY has been set. */ MSI_FLAG_MUST_REACTIVATE = (1 << 5), /* Is level-triggered capable, using two messages */ MSI_FLAG_LEVEL_CAPABLE = (1 << 6), }; int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force); struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode, struct msi_domain_info *info, struct irq_domain *parent); int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, int nvec); void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev); struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain); struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode, struct msi_domain_info *info, struct irq_domain *parent); int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, irq_write_msi_msg_t write_msi_msg); void platform_msi_domain_free_irqs(struct device *dev); /* When an MSI domain is used as an intermediate domain */ int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev, int nvec, msi_alloc_info_t *args); int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev, int virq, int nvec, msi_alloc_info_t *args); struct irq_domain * __platform_msi_create_device_domain(struct device *dev, unsigned int nvec, bool is_tree, irq_write_msi_msg_t write_msi_msg, const struct irq_domain_ops *ops, void *host_data); #define platform_msi_create_device_domain(dev, nvec, write, ops, data) \ __platform_msi_create_device_domain(dev, nvec, false, write, ops, data) #define platform_msi_create_device_tree_domain(dev, nvec, write, ops, data) \ __platform_msi_create_device_domain(dev, nvec, true, write, ops, data) int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs); void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nvec); void *platform_msi_get_host_data(struct irq_domain *domain); #endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */ #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg); struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode, struct msi_domain_info *info, struct irq_domain *parent); irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev, struct msi_desc *desc); int pci_msi_domain_check_cap(struct irq_domain *domain, struct msi_domain_info *info, struct device *dev); u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev); struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev); #else static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev) { return NULL; } #endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */ #endif /* LINUX_MSI_H */ qnx6_fs.h 0000644 00000006425 14722070374 0006316 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Name : qnx6_fs.h * Author : Kai Bankett * Function : qnx6 global filesystem definitions * History : 17-01-2012 created */ #ifndef _LINUX_QNX6_FS_H #define _LINUX_QNX6_FS_H #include <linux/types.h> #include <linux/magic.h> #define QNX6_ROOT_INO 1 /* for di_status */ #define QNX6_FILE_DIRECTORY 0x01 #define QNX6_FILE_DELETED 0x02 #define QNX6_FILE_NORMAL 0x03 #define QNX6_SUPERBLOCK_SIZE 0x200 /* superblock always is 512 bytes */ #define QNX6_SUPERBLOCK_AREA 0x1000 /* area reserved for superblock */ #define QNX6_BOOTBLOCK_SIZE 0x2000 /* heading bootblock area */ #define QNX6_DIR_ENTRY_SIZE 0x20 /* dir entry size of 32 bytes */ #define QNX6_INODE_SIZE 0x80 /* each inode is 128 bytes */ #define QNX6_INODE_SIZE_BITS 7 /* inode entry size shift */ #define QNX6_NO_DIRECT_POINTERS 16 /* 16 blockptrs in sbl/inode */ #define QNX6_PTR_MAX_LEVELS 5 /* maximum indirect levels */ /* for filenames */ #define QNX6_SHORT_NAME_MAX 27 #define QNX6_LONG_NAME_MAX 510 /* list of mount options */ #define QNX6_MOUNT_MMI_FS 0x010000 /* mount as Audi MMI 3G fs */ /* * This is the original qnx6 inode layout on disk. * Each inode is 128 byte long. */ struct qnx6_inode_entry { __fs64 di_size; __fs32 di_uid; __fs32 di_gid; __fs32 di_ftime; __fs32 di_mtime; __fs32 di_atime; __fs32 di_ctime; __fs16 di_mode; __fs16 di_ext_mode; __fs32 di_block_ptr[QNX6_NO_DIRECT_POINTERS]; __u8 di_filelevels; __u8 di_status; __u8 di_unknown2[2]; __fs32 di_zero2[6]; }; /* * Each directory entry is maximum 32 bytes long. * If more characters or special characters required it is stored * in the longfilenames structure. */ struct qnx6_dir_entry { __fs32 de_inode; __u8 de_size; char de_fname[QNX6_SHORT_NAME_MAX]; }; /* * Longfilename direntries have a different structure */ struct qnx6_long_dir_entry { __fs32 de_inode; __u8 de_size; __u8 de_unknown[3]; __fs32 de_long_inode; __fs32 de_checksum; }; struct qnx6_long_filename { __fs16 lf_size; __u8 lf_fname[QNX6_LONG_NAME_MAX]; }; struct qnx6_root_node { __fs64 size; __fs32 ptr[QNX6_NO_DIRECT_POINTERS]; __u8 levels; __u8 mode; __u8 spare[6]; }; struct qnx6_super_block { __fs32 sb_magic; __fs32 sb_checksum; __fs64 sb_serial; __fs32 sb_ctime; /* time the fs was created */ __fs32 sb_atime; /* last access time */ __fs32 sb_flags; __fs16 sb_version1; /* filesystem version information */ __fs16 sb_version2; /* filesystem version information */ __u8 sb_volumeid[16]; __fs32 sb_blocksize; __fs32 sb_num_inodes; __fs32 sb_free_inodes; __fs32 sb_num_blocks; __fs32 sb_free_blocks; __fs32 sb_allocgroup; struct qnx6_root_node Inode; struct qnx6_root_node Bitmap; struct qnx6_root_node Longfile; struct qnx6_root_node Unknown; }; /* Audi MMI 3G superblock layout is different to plain qnx6 */ struct qnx6_mmi_super_block { __fs32 sb_magic; __fs32 sb_checksum; __fs64 sb_serial; __u8 sb_spare0[12]; __u8 sb_id[12]; __fs32 sb_blocksize; __fs32 sb_num_inodes; __fs32 sb_free_inodes; __fs32 sb_num_blocks; __fs32 sb_free_blocks; __u8 sb_spare1[4]; struct qnx6_root_node Inode; struct qnx6_root_node Bitmap; struct qnx6_root_node Longfile; struct qnx6_root_node Unknown; }; #endif crc4.h 0000644 00000000300 14722070374 0005547 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_CRC4_H #define _LINUX_CRC4_H #include <linux/types.h> extern uint8_t crc4(uint8_t c, uint64_t x, int bits); #endif /* _LINUX_CRC4_H */ bsg-lib.h 0000644 00000003317 14722070374 0006246 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * BSG helper library * * Copyright (C) 2008 James Smart, Emulex Corporation * Copyright (C) 2011 Red Hat, Inc. All rights reserved. * Copyright (C) 2011 Mike Christie */ #ifndef _BLK_BSG_ #define _BLK_BSG_ #include <linux/blkdev.h> #include <scsi/scsi_request.h> struct request; struct device; struct scatterlist; struct request_queue; typedef int (bsg_job_fn) (struct bsg_job *); typedef enum blk_eh_timer_return (bsg_timeout_fn)(struct request *); struct bsg_buffer { unsigned int payload_len; int sg_cnt; struct scatterlist *sg_list; }; struct bsg_job { struct device *dev; struct kref kref; unsigned int timeout; /* Transport/driver specific request/reply structs */ void *request; void *reply; unsigned int request_len; unsigned int reply_len; /* * On entry : reply_len indicates the buffer size allocated for * the reply. * * Upon completion : the message handler must set reply_len * to indicates the size of the reply to be returned to the * caller. */ /* DMA payloads for the request/response */ struct bsg_buffer request_payload; struct bsg_buffer reply_payload; int result; unsigned int reply_payload_rcv_len; /* BIDI support */ struct request *bidi_rq; struct bio *bidi_bio; void *dd_data; /* Used for driver-specific storage */ }; void bsg_job_done(struct bsg_job *job, int result, unsigned int reply_payload_rcv_len); struct request_queue *bsg_setup_queue(struct device *dev, const char *name, bsg_job_fn *job_fn, bsg_timeout_fn *timeout, int dd_job_size); void bsg_remove_queue(struct request_queue *q); void bsg_job_put(struct bsg_job *job); int __must_check bsg_job_get(struct bsg_job *job); #endif nodemask.h 0000644 00000042142 14722070374 0006527 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_NODEMASK_H #define __LINUX_NODEMASK_H /* * Nodemasks provide a bitmap suitable for representing the * set of Node's in a system, one bit position per Node number. * * See detailed comments in the file linux/bitmap.h describing the * data type on which these nodemasks are based. * * For details of nodemask_parse_user(), see bitmap_parse_user() in * lib/bitmap.c. For details of nodelist_parse(), see bitmap_parselist(), * also in bitmap.c. For details of node_remap(), see bitmap_bitremap in * lib/bitmap.c. For details of nodes_remap(), see bitmap_remap in * lib/bitmap.c. For details of nodes_onto(), see bitmap_onto in * lib/bitmap.c. For details of nodes_fold(), see bitmap_fold in * lib/bitmap.c. * * The available nodemask operations are: * * void node_set(node, mask) turn on bit 'node' in mask * void node_clear(node, mask) turn off bit 'node' in mask * void nodes_setall(mask) set all bits * void nodes_clear(mask) clear all bits * int node_isset(node, mask) true iff bit 'node' set in mask * int node_test_and_set(node, mask) test and set bit 'node' in mask * * void nodes_and(dst, src1, src2) dst = src1 & src2 [intersection] * void nodes_or(dst, src1, src2) dst = src1 | src2 [union] * void nodes_xor(dst, src1, src2) dst = src1 ^ src2 * void nodes_andnot(dst, src1, src2) dst = src1 & ~src2 * void nodes_complement(dst, src) dst = ~src * * int nodes_equal(mask1, mask2) Does mask1 == mask2? * int nodes_intersects(mask1, mask2) Do mask1 and mask2 intersect? * int nodes_subset(mask1, mask2) Is mask1 a subset of mask2? * int nodes_empty(mask) Is mask empty (no bits sets)? * int nodes_full(mask) Is mask full (all bits sets)? * int nodes_weight(mask) Hamming weight - number of set bits * * void nodes_shift_right(dst, src, n) Shift right * void nodes_shift_left(dst, src, n) Shift left * * unsigned int first_node(mask) Number lowest set bit, or MAX_NUMNODES * unsigend int next_node(node, mask) Next node past 'node', or MAX_NUMNODES * unsigned int next_node_in(node, mask) Next node past 'node', or wrap to first, * or MAX_NUMNODES * unsigned int first_unset_node(mask) First node not set in mask, or * MAX_NUMNODES * * nodemask_t nodemask_of_node(node) Return nodemask with bit 'node' set * NODE_MASK_ALL Initializer - all bits set * NODE_MASK_NONE Initializer - no bits set * unsigned long *nodes_addr(mask) Array of unsigned long's in mask * * int nodemask_parse_user(ubuf, ulen, mask) Parse ascii string as nodemask * int nodelist_parse(buf, map) Parse ascii string as nodelist * int node_remap(oldbit, old, new) newbit = map(old, new)(oldbit) * void nodes_remap(dst, src, old, new) *dst = map(old, new)(src) * void nodes_onto(dst, orig, relmap) *dst = orig relative to relmap * void nodes_fold(dst, orig, sz) dst bits = orig bits mod sz * * for_each_node_mask(node, mask) for-loop node over mask * * int num_online_nodes() Number of online Nodes * int num_possible_nodes() Number of all possible Nodes * * int node_random(mask) Random node with set bit in mask * * int node_online(node) Is some node online? * int node_possible(node) Is some node possible? * * node_set_online(node) set bit 'node' in node_online_map * node_set_offline(node) clear bit 'node' in node_online_map * * for_each_node(node) for-loop node over node_possible_map * for_each_online_node(node) for-loop node over node_online_map * * Subtlety: * 1) The 'type-checked' form of node_isset() causes gcc (3.3.2, anyway) * to generate slightly worse code. So use a simple one-line #define * for node_isset(), instead of wrapping an inline inside a macro, the * way we do the other calls. * * NODEMASK_SCRATCH * When doing above logical AND, OR, XOR, Remap operations the callers tend to * need temporary nodemask_t's on the stack. But if NODES_SHIFT is large, * nodemask_t's consume too much stack space. NODEMASK_SCRATCH is a helper * for such situations. See below and CPUMASK_ALLOC also. */ #include <linux/kernel.h> #include <linux/threads.h> #include <linux/bitmap.h> #include <linux/numa.h> typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t; extern nodemask_t _unused_nodemask_arg_; /** * nodemask_pr_args - printf args to output a nodemask * @maskp: nodemask to be printed * * Can be used to provide arguments for '%*pb[l]' when printing a nodemask. */ #define nodemask_pr_args(maskp) __nodemask_pr_numnodes(maskp), \ __nodemask_pr_bits(maskp) static inline unsigned int __nodemask_pr_numnodes(const nodemask_t *m) { return m ? MAX_NUMNODES : 0; } static inline const unsigned long *__nodemask_pr_bits(const nodemask_t *m) { return m ? m->bits : NULL; } /* * The inline keyword gives the compiler room to decide to inline, or * not inline a function as it sees best. However, as these functions * are called in both __init and non-__init functions, if they are not * inlined we will end up with a section mis-match error (of the type of * freeable items not being freed). So we must use __always_inline here * to fix the problem. If other functions in the future also end up in * this situation they will also need to be annotated as __always_inline */ #define node_set(node, dst) __node_set((node), &(dst)) static __always_inline void __node_set(int node, volatile nodemask_t *dstp) { set_bit(node, dstp->bits); } #define node_clear(node, dst) __node_clear((node), &(dst)) static inline void __node_clear(int node, volatile nodemask_t *dstp) { clear_bit(node, dstp->bits); } #define nodes_setall(dst) __nodes_setall(&(dst), MAX_NUMNODES) static inline void __nodes_setall(nodemask_t *dstp, unsigned int nbits) { bitmap_fill(dstp->bits, nbits); } #define nodes_clear(dst) __nodes_clear(&(dst), MAX_NUMNODES) static inline void __nodes_clear(nodemask_t *dstp, unsigned int nbits) { bitmap_zero(dstp->bits, nbits); } /* No static inline type checking - see Subtlety (1) above. */ #define node_isset(node, nodemask) test_bit((node), (nodemask).bits) #define node_test_and_set(node, nodemask) \ __node_test_and_set((node), &(nodemask)) static inline bool __node_test_and_set(int node, nodemask_t *addr) { return test_and_set_bit(node, addr->bits); } #define nodes_and(dst, src1, src2) \ __nodes_and(&(dst), &(src1), &(src2), MAX_NUMNODES) static inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p, const nodemask_t *src2p, unsigned int nbits) { bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits); } #define nodes_or(dst, src1, src2) \ __nodes_or(&(dst), &(src1), &(src2), MAX_NUMNODES) static inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p, const nodemask_t *src2p, unsigned int nbits) { bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits); } #define nodes_xor(dst, src1, src2) \ __nodes_xor(&(dst), &(src1), &(src2), MAX_NUMNODES) static inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p, const nodemask_t *src2p, unsigned int nbits) { bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits); } #define nodes_andnot(dst, src1, src2) \ __nodes_andnot(&(dst), &(src1), &(src2), MAX_NUMNODES) static inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p, const nodemask_t *src2p, unsigned int nbits) { bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits); } #define nodes_complement(dst, src) \ __nodes_complement(&(dst), &(src), MAX_NUMNODES) static inline void __nodes_complement(nodemask_t *dstp, const nodemask_t *srcp, unsigned int nbits) { bitmap_complement(dstp->bits, srcp->bits, nbits); } #define nodes_equal(src1, src2) \ __nodes_equal(&(src1), &(src2), MAX_NUMNODES) static inline bool __nodes_equal(const nodemask_t *src1p, const nodemask_t *src2p, unsigned int nbits) { return bitmap_equal(src1p->bits, src2p->bits, nbits); } #define nodes_intersects(src1, src2) \ __nodes_intersects(&(src1), &(src2), MAX_NUMNODES) static inline bool __nodes_intersects(const nodemask_t *src1p, const nodemask_t *src2p, unsigned int nbits) { return bitmap_intersects(src1p->bits, src2p->bits, nbits); } #define nodes_subset(src1, src2) \ __nodes_subset(&(src1), &(src2), MAX_NUMNODES) static inline bool __nodes_subset(const nodemask_t *src1p, const nodemask_t *src2p, unsigned int nbits) { return bitmap_subset(src1p->bits, src2p->bits, nbits); } #define nodes_empty(src) __nodes_empty(&(src), MAX_NUMNODES) static inline bool __nodes_empty(const nodemask_t *srcp, unsigned int nbits) { return bitmap_empty(srcp->bits, nbits); } #define nodes_full(nodemask) __nodes_full(&(nodemask), MAX_NUMNODES) static inline bool __nodes_full(const nodemask_t *srcp, unsigned int nbits) { return bitmap_full(srcp->bits, nbits); } #define nodes_weight(nodemask) __nodes_weight(&(nodemask), MAX_NUMNODES) static inline int __nodes_weight(const nodemask_t *srcp, unsigned int nbits) { return bitmap_weight(srcp->bits, nbits); } #define nodes_shift_right(dst, src, n) \ __nodes_shift_right(&(dst), &(src), (n), MAX_NUMNODES) static inline void __nodes_shift_right(nodemask_t *dstp, const nodemask_t *srcp, int n, int nbits) { bitmap_shift_right(dstp->bits, srcp->bits, n, nbits); } #define nodes_shift_left(dst, src, n) \ __nodes_shift_left(&(dst), &(src), (n), MAX_NUMNODES) static inline void __nodes_shift_left(nodemask_t *dstp, const nodemask_t *srcp, int n, int nbits) { bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); } /* FIXME: better would be to fix all architectures to never return > MAX_NUMNODES, then the silly min_ts could be dropped. */ #define first_node(src) __first_node(&(src)) static inline unsigned int __first_node(const nodemask_t *srcp) { return min_t(unsigned int, MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES)); } #define next_node(n, src) __next_node((n), &(src)) static inline unsigned int __next_node(int n, const nodemask_t *srcp) { return min_t(unsigned int, MAX_NUMNODES, find_next_bit(srcp->bits, MAX_NUMNODES, n+1)); } /* * Find the next present node in src, starting after node n, wrapping around to * the first node in src if needed. Returns MAX_NUMNODES if src is empty. */ #define next_node_in(n, src) __next_node_in((n), &(src)) unsigned int __next_node_in(int node, const nodemask_t *srcp); static inline void init_nodemask_of_node(nodemask_t *mask, int node) { nodes_clear(*mask); node_set(node, *mask); } #define nodemask_of_node(node) \ ({ \ typeof(_unused_nodemask_arg_) m; \ if (sizeof(m) == sizeof(unsigned long)) { \ m.bits[0] = 1UL << (node); \ } else { \ init_nodemask_of_node(&m, (node)); \ } \ m; \ }) #define first_unset_node(mask) __first_unset_node(&(mask)) static inline unsigned int __first_unset_node(const nodemask_t *maskp) { return min_t(unsigned int, MAX_NUMNODES, find_first_zero_bit(maskp->bits, MAX_NUMNODES)); } #define NODE_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(MAX_NUMNODES) #if MAX_NUMNODES <= BITS_PER_LONG #define NODE_MASK_ALL \ ((nodemask_t) { { \ [BITS_TO_LONGS(MAX_NUMNODES)-1] = NODE_MASK_LAST_WORD \ } }) #else #define NODE_MASK_ALL \ ((nodemask_t) { { \ [0 ... BITS_TO_LONGS(MAX_NUMNODES)-2] = ~0UL, \ [BITS_TO_LONGS(MAX_NUMNODES)-1] = NODE_MASK_LAST_WORD \ } }) #endif #define NODE_MASK_NONE \ ((nodemask_t) { { \ [0 ... BITS_TO_LONGS(MAX_NUMNODES)-1] = 0UL \ } }) #define nodes_addr(src) ((src).bits) #define nodemask_parse_user(ubuf, ulen, dst) \ __nodemask_parse_user((ubuf), (ulen), &(dst), MAX_NUMNODES) static inline int __nodemask_parse_user(const char __user *buf, int len, nodemask_t *dstp, int nbits) { return bitmap_parse_user(buf, len, dstp->bits, nbits); } #define nodelist_parse(buf, dst) __nodelist_parse((buf), &(dst), MAX_NUMNODES) static inline int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits) { return bitmap_parselist(buf, dstp->bits, nbits); } #define node_remap(oldbit, old, new) \ __node_remap((oldbit), &(old), &(new), MAX_NUMNODES) static inline int __node_remap(int oldbit, const nodemask_t *oldp, const nodemask_t *newp, int nbits) { return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits); } #define nodes_remap(dst, src, old, new) \ __nodes_remap(&(dst), &(src), &(old), &(new), MAX_NUMNODES) static inline void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp, const nodemask_t *oldp, const nodemask_t *newp, int nbits) { bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits); } #define nodes_onto(dst, orig, relmap) \ __nodes_onto(&(dst), &(orig), &(relmap), MAX_NUMNODES) static inline void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp, const nodemask_t *relmapp, int nbits) { bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits); } #define nodes_fold(dst, orig, sz) \ __nodes_fold(&(dst), &(orig), sz, MAX_NUMNODES) static inline void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp, int sz, int nbits) { bitmap_fold(dstp->bits, origp->bits, sz, nbits); } #if MAX_NUMNODES > 1 #define for_each_node_mask(node, mask) \ for ((node) = first_node(mask); \ (node >= 0) && (node) < MAX_NUMNODES; \ (node) = next_node((node), (mask))) #else /* MAX_NUMNODES == 1 */ #define for_each_node_mask(node, mask) \ for ((node) = 0; (node) < 1 && !nodes_empty(mask); (node)++) #endif /* MAX_NUMNODES */ /* * Bitmasks that are kept for all the nodes. */ enum node_states { N_POSSIBLE, /* The node could become online at some point */ N_ONLINE, /* The node is online */ N_NORMAL_MEMORY, /* The node has regular memory */ #ifdef CONFIG_HIGHMEM N_HIGH_MEMORY, /* The node has regular or high memory */ #else N_HIGH_MEMORY = N_NORMAL_MEMORY, #endif N_MEMORY, /* The node has memory(regular, high, movable) */ N_CPU, /* The node has one or more cpus */ NR_NODE_STATES }; /* * The following particular system nodemasks and operations * on them manage all possible and online nodes. */ extern nodemask_t node_states[NR_NODE_STATES]; #if MAX_NUMNODES > 1 static inline int node_state(int node, enum node_states state) { return node_isset(node, node_states[state]); } static inline void node_set_state(int node, enum node_states state) { __node_set(node, &node_states[state]); } static inline void node_clear_state(int node, enum node_states state) { __node_clear(node, &node_states[state]); } static inline int num_node_state(enum node_states state) { return nodes_weight(node_states[state]); } #define for_each_node_state(__node, __state) \ for_each_node_mask((__node), node_states[__state]) #define first_online_node first_node(node_states[N_ONLINE]) #define first_memory_node first_node(node_states[N_MEMORY]) static inline unsigned int next_online_node(int nid) { return next_node(nid, node_states[N_ONLINE]); } static inline unsigned int next_memory_node(int nid) { return next_node(nid, node_states[N_MEMORY]); } extern unsigned int nr_node_ids; extern unsigned int nr_online_nodes; static inline void node_set_online(int nid) { node_set_state(nid, N_ONLINE); nr_online_nodes = num_node_state(N_ONLINE); } static inline void node_set_offline(int nid) { node_clear_state(nid, N_ONLINE); nr_online_nodes = num_node_state(N_ONLINE); } #else static inline int node_state(int node, enum node_states state) { return node == 0; } static inline void node_set_state(int node, enum node_states state) { } static inline void node_clear_state(int node, enum node_states state) { } static inline int num_node_state(enum node_states state) { return 1; } #define for_each_node_state(node, __state) \ for ( (node) = 0; (node) == 0; (node) = 1) #define first_online_node 0 #define first_memory_node 0 #define next_online_node(nid) (MAX_NUMNODES) #define nr_node_ids 1U #define nr_online_nodes 1U #define node_set_online(node) node_set_state((node), N_ONLINE) #define node_set_offline(node) node_clear_state((node), N_ONLINE) #endif #if defined(CONFIG_NUMA) && (MAX_NUMNODES > 1) extern int node_random(const nodemask_t *maskp); #else static inline int node_random(const nodemask_t *mask) { return 0; } #endif #define node_online_map node_states[N_ONLINE] #define node_possible_map node_states[N_POSSIBLE] #define num_online_nodes() num_node_state(N_ONLINE) #define num_possible_nodes() num_node_state(N_POSSIBLE) #define node_online(node) node_state((node), N_ONLINE) #define node_possible(node) node_state((node), N_POSSIBLE) #define for_each_node(node) for_each_node_state(node, N_POSSIBLE) #define for_each_online_node(node) for_each_node_state(node, N_ONLINE) /* * For nodemask scrach area. * NODEMASK_ALLOC(type, name) allocates an object with a specified type and * name. */ #if NODES_SHIFT > 8 /* nodemask_t > 32 bytes */ #define NODEMASK_ALLOC(type, name, gfp_flags) \ type *name = kmalloc(sizeof(*name), gfp_flags) #define NODEMASK_FREE(m) kfree(m) #else #define NODEMASK_ALLOC(type, name, gfp_flags) type _##name, *name = &_##name #define NODEMASK_FREE(m) do {} while (0) #endif /* A example struture for using NODEMASK_ALLOC, used in mempolicy. */ struct nodemask_scratch { nodemask_t mask1; nodemask_t mask2; }; #define NODEMASK_SCRATCH(x) \ NODEMASK_ALLOC(struct nodemask_scratch, x, \ GFP_KERNEL | __GFP_NORETRY) #define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x) #endif /* __LINUX_NODEMASK_H */ crypto.h 0000644 00000176007 14722070374 0006256 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Scatterlist Cryptographic API. * * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> * Copyright (c) 2002 David S. Miller (davem@redhat.com) * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au> * * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no> * and Nettle, by Niels Möller. */ #ifndef _LINUX_CRYPTO_H #define _LINUX_CRYPTO_H #include <linux/atomic.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/bug.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/uaccess.h> #include <linux/completion.h> /* * Autoloaded crypto modules should only use a prefixed name to avoid allowing * arbitrary modules to be loaded. Loading from userspace may still need the * unprefixed names, so retains those aliases as well. * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3 * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro * expands twice on the same line. Instead, use a separate base name for the * alias. */ #define MODULE_ALIAS_CRYPTO(name) \ __MODULE_INFO(alias, alias_userspace, name); \ __MODULE_INFO(alias, alias_crypto, "crypto-" name) /* * Algorithm masks and types. */ #define CRYPTO_ALG_TYPE_MASK 0x0000000f #define CRYPTO_ALG_TYPE_CIPHER 0x00000001 #define CRYPTO_ALG_TYPE_COMPRESS 0x00000002 #define CRYPTO_ALG_TYPE_AEAD 0x00000003 #define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004 #define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005 #define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005 #define CRYPTO_ALG_TYPE_KPP 0x00000008 #define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a #define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b #define CRYPTO_ALG_TYPE_RNG 0x0000000c #define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d #define CRYPTO_ALG_TYPE_HASH 0x0000000e #define CRYPTO_ALG_TYPE_SHASH 0x0000000e #define CRYPTO_ALG_TYPE_AHASH 0x0000000f #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e #define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c #define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e #define CRYPTO_ALG_LARVAL 0x00000010 #define CRYPTO_ALG_DEAD 0x00000020 #define CRYPTO_ALG_DYING 0x00000040 #define CRYPTO_ALG_ASYNC 0x00000080 /* * Set this bit if and only if the algorithm requires another algorithm of * the same type to handle corner cases. */ #define CRYPTO_ALG_NEED_FALLBACK 0x00000100 /* * Set if the algorithm has passed automated run-time testing. Note that * if there is no run-time testing for a given algorithm it is considered * to have passed. */ #define CRYPTO_ALG_TESTED 0x00000400 /* * Set if the algorithm is an instance that is built from templates. */ #define CRYPTO_ALG_INSTANCE 0x00000800 /* Set this bit if the algorithm provided is hardware accelerated but * not available to userspace via instruction set or so. */ #define CRYPTO_ALG_KERN_DRIVER_ONLY 0x00001000 /* * Mark a cipher as a service implementation only usable by another * cipher and never by a normal user of the kernel crypto API */ #define CRYPTO_ALG_INTERNAL 0x00002000 /* * Set if the algorithm has a ->setkey() method but can be used without * calling it first, i.e. there is a default key. */ #define CRYPTO_ALG_OPTIONAL_KEY 0x00004000 /* * Don't trigger module loading */ #define CRYPTO_NOLOAD 0x00008000 /* * Transform masks and values (for crt_flags). */ #define CRYPTO_TFM_NEED_KEY 0x00000001 #define CRYPTO_TFM_REQ_MASK 0x000fff00 #define CRYPTO_TFM_RES_MASK 0xfff00000 #define CRYPTO_TFM_REQ_FORBID_WEAK_KEYS 0x00000100 #define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200 #define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400 #define CRYPTO_TFM_RES_WEAK_KEY 0x00100000 #define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000 #define CRYPTO_TFM_RES_BAD_KEY_SCHED 0x00400000 #define CRYPTO_TFM_RES_BAD_BLOCK_LEN 0x00800000 #define CRYPTO_TFM_RES_BAD_FLAGS 0x01000000 /* * Miscellaneous stuff. */ #define CRYPTO_MAX_ALG_NAME 128 /* * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual * declaration) is used to ensure that the crypto_tfm context structure is * aligned correctly for the given architecture so that there are no alignment * faults for C data types. On architectures that support non-cache coherent * DMA, such as ARM or arm64, it also takes into account the minimal alignment * that is required to ensure that the context struct member does not share any * cachelines with the rest of the struct. This is needed to ensure that cache * maintenance for non-coherent DMA (cache invalidation in particular) does not * affect data that may be accessed by the CPU concurrently. */ #define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN #define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN))) struct scatterlist; struct crypto_ablkcipher; struct crypto_async_request; struct crypto_blkcipher; struct crypto_tfm; struct crypto_type; typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err); /** * DOC: Block Cipher Context Data Structures * * These data structures define the operating context for each block cipher * type. */ struct crypto_async_request { struct list_head list; crypto_completion_t complete; void *data; struct crypto_tfm *tfm; u32 flags; }; struct ablkcipher_request { struct crypto_async_request base; unsigned int nbytes; void *info; struct scatterlist *src; struct scatterlist *dst; void *__ctx[] CRYPTO_MINALIGN_ATTR; }; struct blkcipher_desc { struct crypto_blkcipher *tfm; void *info; u32 flags; }; /** * DOC: Block Cipher Algorithm Definitions * * These data structures define modular crypto algorithm implementations, * managed via crypto_register_alg() and crypto_unregister_alg(). */ /** * struct ablkcipher_alg - asynchronous block cipher definition * @min_keysize: Minimum key size supported by the transformation. This is the * smallest key length supported by this transformation algorithm. * This must be set to one of the pre-defined values as this is * not hardware specific. Possible values for this field can be * found via git grep "_MIN_KEY_SIZE" include/crypto/ * @max_keysize: Maximum key size supported by the transformation. This is the * largest key length supported by this transformation algorithm. * This must be set to one of the pre-defined values as this is * not hardware specific. Possible values for this field can be * found via git grep "_MAX_KEY_SIZE" include/crypto/ * @setkey: Set key for the transformation. This function is used to either * program a supplied key into the hardware or store the key in the * transformation context for programming it later. Note that this * function does modify the transformation context. This function can * be called multiple times during the existence of the transformation * object, so one must make sure the key is properly reprogrammed into * the hardware. This function is also responsible for checking the key * length for validity. In case a software fallback was put in place in * the @cra_init call, this function might need to use the fallback if * the algorithm doesn't support all of the key sizes. * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt * the supplied scatterlist containing the blocks of data. The crypto * API consumer is responsible for aligning the entries of the * scatterlist properly and making sure the chunks are correctly * sized. In case a software fallback was put in place in the * @cra_init call, this function might need to use the fallback if * the algorithm doesn't support all of the key sizes. In case the * key was stored in transformation context, the key might need to be * re-programmed into the hardware in this function. This function * shall not modify the transformation context, as this function may * be called in parallel with the same transformation object. * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt * and the conditions are exactly the same. * @ivsize: IV size applicable for transformation. The consumer must provide an * IV of exactly that size to perform the encrypt or decrypt operation. * * All fields except @ivsize are mandatory and must be filled. */ struct ablkcipher_alg { int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, unsigned int keylen); int (*encrypt)(struct ablkcipher_request *req); int (*decrypt)(struct ablkcipher_request *req); unsigned int min_keysize; unsigned int max_keysize; unsigned int ivsize; }; /** * struct blkcipher_alg - synchronous block cipher definition * @min_keysize: see struct ablkcipher_alg * @max_keysize: see struct ablkcipher_alg * @setkey: see struct ablkcipher_alg * @encrypt: see struct ablkcipher_alg * @decrypt: see struct ablkcipher_alg * @ivsize: see struct ablkcipher_alg * * All fields except @ivsize are mandatory and must be filled. */ struct blkcipher_alg { int (*setkey)(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen); int (*encrypt)(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes); int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes); unsigned int min_keysize; unsigned int max_keysize; unsigned int ivsize; }; /** * struct cipher_alg - single-block symmetric ciphers definition * @cia_min_keysize: Minimum key size supported by the transformation. This is * the smallest key length supported by this transformation * algorithm. This must be set to one of the pre-defined * values as this is not hardware specific. Possible values * for this field can be found via git grep "_MIN_KEY_SIZE" * include/crypto/ * @cia_max_keysize: Maximum key size supported by the transformation. This is * the largest key length supported by this transformation * algorithm. This must be set to one of the pre-defined values * as this is not hardware specific. Possible values for this * field can be found via git grep "_MAX_KEY_SIZE" * include/crypto/ * @cia_setkey: Set key for the transformation. This function is used to either * program a supplied key into the hardware or store the key in the * transformation context for programming it later. Note that this * function does modify the transformation context. This function * can be called multiple times during the existence of the * transformation object, so one must make sure the key is properly * reprogrammed into the hardware. This function is also * responsible for checking the key length for validity. * @cia_encrypt: Encrypt a single block. This function is used to encrypt a * single block of data, which must be @cra_blocksize big. This * always operates on a full @cra_blocksize and it is not possible * to encrypt a block of smaller size. The supplied buffers must * therefore also be at least of @cra_blocksize size. Both the * input and output buffers are always aligned to @cra_alignmask. * In case either of the input or output buffer supplied by user * of the crypto API is not aligned to @cra_alignmask, the crypto * API will re-align the buffers. The re-alignment means that a * new buffer will be allocated, the data will be copied into the * new buffer, then the processing will happen on the new buffer, * then the data will be copied back into the original buffer and * finally the new buffer will be freed. In case a software * fallback was put in place in the @cra_init call, this function * might need to use the fallback if the algorithm doesn't support * all of the key sizes. In case the key was stored in * transformation context, the key might need to be re-programmed * into the hardware in this function. This function shall not * modify the transformation context, as this function may be * called in parallel with the same transformation object. * @cia_decrypt: Decrypt a single block. This is a reverse counterpart to * @cia_encrypt, and the conditions are exactly the same. * * All fields are mandatory and must be filled. */ struct cipher_alg { unsigned int cia_min_keysize; unsigned int cia_max_keysize; int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen); void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); }; /** * struct compress_alg - compression/decompression algorithm * @coa_compress: Compress a buffer of specified length, storing the resulting * data in the specified buffer. Return the length of the * compressed data in dlen. * @coa_decompress: Decompress the source buffer, storing the uncompressed * data in the specified buffer. The length of the data is * returned in dlen. * * All fields are mandatory. */ struct compress_alg { int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen); int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen); }; #ifdef CONFIG_CRYPTO_STATS /* * struct crypto_istat_aead - statistics for AEAD algorithm * @encrypt_cnt: number of encrypt requests * @encrypt_tlen: total data size handled by encrypt requests * @decrypt_cnt: number of decrypt requests * @decrypt_tlen: total data size handled by decrypt requests * @err_cnt: number of error for AEAD requests */ struct crypto_istat_aead { atomic64_t encrypt_cnt; atomic64_t encrypt_tlen; atomic64_t decrypt_cnt; atomic64_t decrypt_tlen; atomic64_t err_cnt; }; /* * struct crypto_istat_akcipher - statistics for akcipher algorithm * @encrypt_cnt: number of encrypt requests * @encrypt_tlen: total data size handled by encrypt requests * @decrypt_cnt: number of decrypt requests * @decrypt_tlen: total data size handled by decrypt requests * @verify_cnt: number of verify operation * @sign_cnt: number of sign requests * @err_cnt: number of error for akcipher requests */ struct crypto_istat_akcipher { atomic64_t encrypt_cnt; atomic64_t encrypt_tlen; atomic64_t decrypt_cnt; atomic64_t decrypt_tlen; atomic64_t verify_cnt; atomic64_t sign_cnt; atomic64_t err_cnt; }; /* * struct crypto_istat_cipher - statistics for cipher algorithm * @encrypt_cnt: number of encrypt requests * @encrypt_tlen: total data size handled by encrypt requests * @decrypt_cnt: number of decrypt requests * @decrypt_tlen: total data size handled by decrypt requests * @err_cnt: number of error for cipher requests */ struct crypto_istat_cipher { atomic64_t encrypt_cnt; atomic64_t encrypt_tlen; atomic64_t decrypt_cnt; atomic64_t decrypt_tlen; atomic64_t err_cnt; }; /* * struct crypto_istat_compress - statistics for compress algorithm * @compress_cnt: number of compress requests * @compress_tlen: total data size handled by compress requests * @decompress_cnt: number of decompress requests * @decompress_tlen: total data size handled by decompress requests * @err_cnt: number of error for compress requests */ struct crypto_istat_compress { atomic64_t compress_cnt; atomic64_t compress_tlen; atomic64_t decompress_cnt; atomic64_t decompress_tlen; atomic64_t err_cnt; }; /* * struct crypto_istat_hash - statistics for has algorithm * @hash_cnt: number of hash requests * @hash_tlen: total data size hashed * @err_cnt: number of error for hash requests */ struct crypto_istat_hash { atomic64_t hash_cnt; atomic64_t hash_tlen; atomic64_t err_cnt; }; /* * struct crypto_istat_kpp - statistics for KPP algorithm * @setsecret_cnt: number of setsecrey operation * @generate_public_key_cnt: number of generate_public_key operation * @compute_shared_secret_cnt: number of compute_shared_secret operation * @err_cnt: number of error for KPP requests */ struct crypto_istat_kpp { atomic64_t setsecret_cnt; atomic64_t generate_public_key_cnt; atomic64_t compute_shared_secret_cnt; atomic64_t err_cnt; }; /* * struct crypto_istat_rng: statistics for RNG algorithm * @generate_cnt: number of RNG generate requests * @generate_tlen: total data size of generated data by the RNG * @seed_cnt: number of times the RNG was seeded * @err_cnt: number of error for RNG requests */ struct crypto_istat_rng { atomic64_t generate_cnt; atomic64_t generate_tlen; atomic64_t seed_cnt; atomic64_t err_cnt; }; #endif /* CONFIG_CRYPTO_STATS */ #define cra_ablkcipher cra_u.ablkcipher #define cra_blkcipher cra_u.blkcipher #define cra_cipher cra_u.cipher #define cra_compress cra_u.compress /** * struct crypto_alg - definition of a cryptograpic cipher algorithm * @cra_flags: Flags describing this transformation. See include/linux/crypto.h * CRYPTO_ALG_* flags for the flags which go in here. Those are * used for fine-tuning the description of the transformation * algorithm. * @cra_blocksize: Minimum block size of this transformation. The size in bytes * of the smallest possible unit which can be transformed with * this algorithm. The users must respect this value. * In case of HASH transformation, it is possible for a smaller * block than @cra_blocksize to be passed to the crypto API for * transformation, in case of any other transformation type, an * error will be returned upon any attempt to transform smaller * than @cra_blocksize chunks. * @cra_ctxsize: Size of the operational context of the transformation. This * value informs the kernel crypto API about the memory size * needed to be allocated for the transformation context. * @cra_alignmask: Alignment mask for the input and output data buffer. The data * buffer containing the input data for the algorithm must be * aligned to this alignment mask. The data buffer for the * output data must be aligned to this alignment mask. Note that * the Crypto API will do the re-alignment in software, but * only under special conditions and there is a performance hit. * The re-alignment happens at these occasions for different * @cra_u types: cipher -- For both input data and output data * buffer; ahash -- For output hash destination buf; shash -- * For output hash destination buf. * This is needed on hardware which is flawed by design and * cannot pick data from arbitrary addresses. * @cra_priority: Priority of this transformation implementation. In case * multiple transformations with same @cra_name are available to * the Crypto API, the kernel will use the one with highest * @cra_priority. * @cra_name: Generic name (usable by multiple implementations) of the * transformation algorithm. This is the name of the transformation * itself. This field is used by the kernel when looking up the * providers of particular transformation. * @cra_driver_name: Unique name of the transformation provider. This is the * name of the provider of the transformation. This can be any * arbitrary value, but in the usual case, this contains the * name of the chip or provider and the name of the * transformation algorithm. * @cra_type: Type of the cryptographic transformation. This is a pointer to * struct crypto_type, which implements callbacks common for all * transformation types. There are multiple options: * &crypto_blkcipher_type, &crypto_ablkcipher_type, * &crypto_ahash_type, &crypto_rng_type. * This field might be empty. In that case, there are no common * callbacks. This is the case for: cipher, compress, shash. * @cra_u: Callbacks implementing the transformation. This is a union of * multiple structures. Depending on the type of transformation selected * by @cra_type and @cra_flags above, the associated structure must be * filled with callbacks. This field might be empty. This is the case * for ahash, shash. * @cra_init: Initialize the cryptographic transformation object. This function * is used to initialize the cryptographic transformation object. * This function is called only once at the instantiation time, right * after the transformation context was allocated. In case the * cryptographic hardware has some special requirements which need to * be handled by software, this function shall check for the precise * requirement of the transformation and put any software fallbacks * in place. * @cra_exit: Deinitialize the cryptographic transformation object. This is a * counterpart to @cra_init, used to remove various changes set in * @cra_init. * @cra_u.ablkcipher: Union member which contains an asynchronous block cipher * definition. See @struct @ablkcipher_alg. * @cra_u.blkcipher: Union member which contains a synchronous block cipher * definition See @struct @blkcipher_alg. * @cra_u.cipher: Union member which contains a single-block symmetric cipher * definition. See @struct @cipher_alg. * @cra_u.compress: Union member which contains a (de)compression algorithm. * See @struct @compress_alg. * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE * @cra_list: internally used * @cra_users: internally used * @cra_refcnt: internally used * @cra_destroy: internally used * * @stats: union of all possible crypto_istat_xxx structures * @stats.aead: statistics for AEAD algorithm * @stats.akcipher: statistics for akcipher algorithm * @stats.cipher: statistics for cipher algorithm * @stats.compress: statistics for compress algorithm * @stats.hash: statistics for hash algorithm * @stats.rng: statistics for rng algorithm * @stats.kpp: statistics for KPP algorithm * * The struct crypto_alg describes a generic Crypto API algorithm and is common * for all of the transformations. Any variable not documented here shall not * be used by a cipher implementation as it is internal to the Crypto API. */ struct crypto_alg { struct list_head cra_list; struct list_head cra_users; u32 cra_flags; unsigned int cra_blocksize; unsigned int cra_ctxsize; unsigned int cra_alignmask; int cra_priority; refcount_t cra_refcnt; char cra_name[CRYPTO_MAX_ALG_NAME]; char cra_driver_name[CRYPTO_MAX_ALG_NAME]; const struct crypto_type *cra_type; union { struct ablkcipher_alg ablkcipher; struct blkcipher_alg blkcipher; struct cipher_alg cipher; struct compress_alg compress; } cra_u; int (*cra_init)(struct crypto_tfm *tfm); void (*cra_exit)(struct crypto_tfm *tfm); void (*cra_destroy)(struct crypto_alg *alg); struct module *cra_module; #ifdef CONFIG_CRYPTO_STATS union { struct crypto_istat_aead aead; struct crypto_istat_akcipher akcipher; struct crypto_istat_cipher cipher; struct crypto_istat_compress compress; struct crypto_istat_hash hash; struct crypto_istat_rng rng; struct crypto_istat_kpp kpp; } stats; #endif /* CONFIG_CRYPTO_STATS */ } CRYPTO_MINALIGN_ATTR; #ifdef CONFIG_CRYPTO_STATS void crypto_stats_init(struct crypto_alg *alg); void crypto_stats_get(struct crypto_alg *alg); void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret, struct crypto_alg *alg); void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret, struct crypto_alg *alg); void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret); void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret); void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg); void crypto_stats_ahash_final(unsigned int nbytes, int ret, struct crypto_alg *alg); void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, struct crypto_alg *alg); void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, struct crypto_alg *alg); void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg); void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg); void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg); void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg); void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret); void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret); void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret); void crypto_stats_rng_seed(struct crypto_alg *alg, int ret); void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int ret); void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg); void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg); #else static inline void crypto_stats_init(struct crypto_alg *alg) {} static inline void crypto_stats_get(struct crypto_alg *alg) {} static inline void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret, struct crypto_alg *alg) {} static inline void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret, struct crypto_alg *alg) {} static inline void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret) {} static inline void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret) {} static inline void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg) {} static inline void crypto_stats_ahash_final(unsigned int nbytes, int ret, struct crypto_alg *alg) {} static inline void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, struct crypto_alg *alg) {} static inline void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, struct crypto_alg *alg) {} static inline void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg) {} static inline void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg) {} static inline void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg) {} static inline void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg) {} static inline void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret) {} static inline void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret) {} static inline void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret) {} static inline void crypto_stats_rng_seed(struct crypto_alg *alg, int ret) {} static inline void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int ret) {} static inline void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg) {} static inline void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg) {} #endif /* * A helper struct for waiting for completion of async crypto ops */ struct crypto_wait { struct completion completion; int err; }; /* * Macro for declaring a crypto op async wait object on stack */ #define DECLARE_CRYPTO_WAIT(_wait) \ struct crypto_wait _wait = { \ COMPLETION_INITIALIZER_ONSTACK((_wait).completion), 0 } /* * Async ops completion helper functioons */ void crypto_req_done(struct crypto_async_request *req, int err); static inline int crypto_wait_req(int err, struct crypto_wait *wait) { switch (err) { case -EINPROGRESS: case -EBUSY: wait_for_completion(&wait->completion); reinit_completion(&wait->completion); err = wait->err; break; }; return err; } static inline void crypto_init_wait(struct crypto_wait *wait) { init_completion(&wait->completion); } /* * Algorithm registration interface. */ int crypto_register_alg(struct crypto_alg *alg); int crypto_unregister_alg(struct crypto_alg *alg); int crypto_register_algs(struct crypto_alg *algs, int count); int crypto_unregister_algs(struct crypto_alg *algs, int count); /* * Algorithm query interface. */ int crypto_has_alg(const char *name, u32 type, u32 mask); /* * Transforms: user-instantiated objects which encapsulate algorithms * and core processing logic. Managed via crypto_alloc_*() and * crypto_free_*(), as well as the various helpers below. */ struct ablkcipher_tfm { int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, unsigned int keylen); int (*encrypt)(struct ablkcipher_request *req); int (*decrypt)(struct ablkcipher_request *req); struct crypto_ablkcipher *base; unsigned int ivsize; unsigned int reqsize; }; struct blkcipher_tfm { void *iv; int (*setkey)(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen); int (*encrypt)(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes); int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes); }; struct cipher_tfm { int (*cit_setkey)(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen); void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); }; struct compress_tfm { int (*cot_compress)(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen); int (*cot_decompress)(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen); }; #define crt_ablkcipher crt_u.ablkcipher #define crt_blkcipher crt_u.blkcipher #define crt_cipher crt_u.cipher #define crt_compress crt_u.compress struct crypto_tfm { u32 crt_flags; union { struct ablkcipher_tfm ablkcipher; struct blkcipher_tfm blkcipher; struct cipher_tfm cipher; struct compress_tfm compress; } crt_u; void (*exit)(struct crypto_tfm *tfm); struct crypto_alg *__crt_alg; void *__crt_ctx[] CRYPTO_MINALIGN_ATTR; }; struct crypto_ablkcipher { struct crypto_tfm base; }; struct crypto_blkcipher { struct crypto_tfm base; }; struct crypto_cipher { struct crypto_tfm base; }; struct crypto_comp { struct crypto_tfm base; }; enum { CRYPTOA_UNSPEC, CRYPTOA_ALG, CRYPTOA_TYPE, CRYPTOA_U32, __CRYPTOA_MAX, }; #define CRYPTOA_MAX (__CRYPTOA_MAX - 1) /* Maximum number of (rtattr) parameters for each template. */ #define CRYPTO_MAX_ATTRS 32 struct crypto_attr_alg { char name[CRYPTO_MAX_ALG_NAME]; }; struct crypto_attr_type { u32 type; u32 mask; }; struct crypto_attr_u32 { u32 num; }; /* * Transform user interface. */ struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask); void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm); static inline void crypto_free_tfm(struct crypto_tfm *tfm) { return crypto_destroy_tfm(tfm, tfm); } int alg_test(const char *driver, const char *alg, u32 type, u32 mask); /* * Transform helpers which query the underlying algorithm. */ static inline const char *crypto_tfm_alg_name(struct crypto_tfm *tfm) { return tfm->__crt_alg->cra_name; } static inline const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm) { return tfm->__crt_alg->cra_driver_name; } static inline int crypto_tfm_alg_priority(struct crypto_tfm *tfm) { return tfm->__crt_alg->cra_priority; } static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm) { return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK; } static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm) { return tfm->__crt_alg->cra_blocksize; } static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm) { return tfm->__crt_alg->cra_alignmask; } static inline u32 crypto_tfm_get_flags(struct crypto_tfm *tfm) { return tfm->crt_flags; } static inline void crypto_tfm_set_flags(struct crypto_tfm *tfm, u32 flags) { tfm->crt_flags |= flags; } static inline void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags) { tfm->crt_flags &= ~flags; } static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm) { return tfm->__crt_ctx; } static inline unsigned int crypto_tfm_ctx_alignment(void) { struct crypto_tfm *tfm; return __alignof__(tfm->__crt_ctx); } /* * API wrappers. */ static inline struct crypto_ablkcipher *__crypto_ablkcipher_cast( struct crypto_tfm *tfm) { return (struct crypto_ablkcipher *)tfm; } static inline u32 crypto_skcipher_type(u32 type) { type &= ~CRYPTO_ALG_TYPE_MASK; type |= CRYPTO_ALG_TYPE_BLKCIPHER; return type; } static inline u32 crypto_skcipher_mask(u32 mask) { mask &= ~CRYPTO_ALG_TYPE_MASK; mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK; return mask; } /** * DOC: Asynchronous Block Cipher API * * Asynchronous block cipher API is used with the ciphers of type * CRYPTO_ALG_TYPE_ABLKCIPHER (listed as type "ablkcipher" in /proc/crypto). * * Asynchronous cipher operations imply that the function invocation for a * cipher request returns immediately before the completion of the operation. * The cipher request is scheduled as a separate kernel thread and therefore * load-balanced on the different CPUs via the process scheduler. To allow * the kernel crypto API to inform the caller about the completion of a cipher * request, the caller must provide a callback function. That function is * invoked with the cipher handle when the request completes. * * To support the asynchronous operation, additional information than just the * cipher handle must be supplied to the kernel crypto API. That additional * information is given by filling in the ablkcipher_request data structure. * * For the asynchronous block cipher API, the state is maintained with the tfm * cipher handle. A single tfm can be used across multiple calls and in * parallel. For asynchronous block cipher calls, context data supplied and * only used by the caller can be referenced the request data structure in * addition to the IV used for the cipher request. The maintenance of such * state information would be important for a crypto driver implementer to * have, because when calling the callback function upon completion of the * cipher operation, that callback function may need some information about * which operation just finished if it invoked multiple in parallel. This * state information is unused by the kernel crypto API. */ static inline struct crypto_tfm *crypto_ablkcipher_tfm( struct crypto_ablkcipher *tfm) { return &tfm->base; } /** * crypto_free_ablkcipher() - zeroize and free cipher handle * @tfm: cipher handle to be freed */ static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm) { crypto_free_tfm(crypto_ablkcipher_tfm(tfm)); } /** * crypto_has_ablkcipher() - Search for the availability of an ablkcipher. * @alg_name: is the cra_name / name or cra_driver_name / driver name of the * ablkcipher * @type: specifies the type of the cipher * @mask: specifies the mask for the cipher * * Return: true when the ablkcipher is known to the kernel crypto API; false * otherwise */ static inline int crypto_has_ablkcipher(const char *alg_name, u32 type, u32 mask) { return crypto_has_alg(alg_name, crypto_skcipher_type(type), crypto_skcipher_mask(mask)); } static inline struct ablkcipher_tfm *crypto_ablkcipher_crt( struct crypto_ablkcipher *tfm) { return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher; } /** * crypto_ablkcipher_ivsize() - obtain IV size * @tfm: cipher handle * * The size of the IV for the ablkcipher referenced by the cipher handle is * returned. This IV size may be zero if the cipher does not need an IV. * * Return: IV size in bytes */ static inline unsigned int crypto_ablkcipher_ivsize( struct crypto_ablkcipher *tfm) { return crypto_ablkcipher_crt(tfm)->ivsize; } /** * crypto_ablkcipher_blocksize() - obtain block size of cipher * @tfm: cipher handle * * The block size for the ablkcipher referenced with the cipher handle is * returned. The caller may use that information to allocate appropriate * memory for the data returned by the encryption or decryption operation * * Return: block size of cipher */ static inline unsigned int crypto_ablkcipher_blocksize( struct crypto_ablkcipher *tfm) { return crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(tfm)); } static inline unsigned int crypto_ablkcipher_alignmask( struct crypto_ablkcipher *tfm) { return crypto_tfm_alg_alignmask(crypto_ablkcipher_tfm(tfm)); } static inline u32 crypto_ablkcipher_get_flags(struct crypto_ablkcipher *tfm) { return crypto_tfm_get_flags(crypto_ablkcipher_tfm(tfm)); } static inline void crypto_ablkcipher_set_flags(struct crypto_ablkcipher *tfm, u32 flags) { crypto_tfm_set_flags(crypto_ablkcipher_tfm(tfm), flags); } static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm, u32 flags) { crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags); } /** * crypto_ablkcipher_setkey() - set key for cipher * @tfm: cipher handle * @key: buffer holding the key * @keylen: length of the key in bytes * * The caller provided key is set for the ablkcipher referenced by the cipher * handle. * * Note, the key length determines the cipher type. Many block ciphers implement * different cipher modes depending on the key size, such as AES-128 vs AES-192 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 * is performed. * * Return: 0 if the setting of the key was successful; < 0 if an error occurred */ static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, const u8 *key, unsigned int keylen) { struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(tfm); return crt->setkey(crt->base, key, keylen); } /** * crypto_ablkcipher_reqtfm() - obtain cipher handle from request * @req: ablkcipher_request out of which the cipher handle is to be obtained * * Return the crypto_ablkcipher handle when furnishing an ablkcipher_request * data structure. * * Return: crypto_ablkcipher handle */ static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm( struct ablkcipher_request *req) { return __crypto_ablkcipher_cast(req->base.tfm); } /** * crypto_ablkcipher_encrypt() - encrypt plaintext * @req: reference to the ablkcipher_request handle that holds all information * needed to perform the cipher operation * * Encrypt plaintext data using the ablkcipher_request handle. That data * structure and how it is filled with data is discussed with the * ablkcipher_request_* functions. * * Return: 0 if the cipher operation was successful; < 0 if an error occurred */ static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req) { struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); struct crypto_alg *alg = crt->base->base.__crt_alg; unsigned int nbytes = req->nbytes; int ret; crypto_stats_get(alg); ret = crt->encrypt(req); crypto_stats_ablkcipher_encrypt(nbytes, ret, alg); return ret; } /** * crypto_ablkcipher_decrypt() - decrypt ciphertext * @req: reference to the ablkcipher_request handle that holds all information * needed to perform the cipher operation * * Decrypt ciphertext data using the ablkcipher_request handle. That data * structure and how it is filled with data is discussed with the * ablkcipher_request_* functions. * * Return: 0 if the cipher operation was successful; < 0 if an error occurred */ static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req) { struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); struct crypto_alg *alg = crt->base->base.__crt_alg; unsigned int nbytes = req->nbytes; int ret; crypto_stats_get(alg); ret = crt->decrypt(req); crypto_stats_ablkcipher_decrypt(nbytes, ret, alg); return ret; } /** * DOC: Asynchronous Cipher Request Handle * * The ablkcipher_request data structure contains all pointers to data * required for the asynchronous cipher operation. This includes the cipher * handle (which can be used by multiple ablkcipher_request instances), pointer * to plaintext and ciphertext, asynchronous callback function, etc. It acts * as a handle to the ablkcipher_request_* API calls in a similar way as * ablkcipher handle to the crypto_ablkcipher_* API calls. */ /** * crypto_ablkcipher_reqsize() - obtain size of the request data structure * @tfm: cipher handle * * Return: number of bytes */ static inline unsigned int crypto_ablkcipher_reqsize( struct crypto_ablkcipher *tfm) { return crypto_ablkcipher_crt(tfm)->reqsize; } /** * ablkcipher_request_set_tfm() - update cipher handle reference in request * @req: request handle to be modified * @tfm: cipher handle that shall be added to the request handle * * Allow the caller to replace the existing ablkcipher handle in the request * data structure with a different one. */ static inline void ablkcipher_request_set_tfm( struct ablkcipher_request *req, struct crypto_ablkcipher *tfm) { req->base.tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_crt(tfm)->base); } static inline struct ablkcipher_request *ablkcipher_request_cast( struct crypto_async_request *req) { return container_of(req, struct ablkcipher_request, base); } /** * ablkcipher_request_alloc() - allocate request data structure * @tfm: cipher handle to be registered with the request * @gfp: memory allocation flag that is handed to kmalloc by the API call. * * Allocate the request data structure that must be used with the ablkcipher * encrypt and decrypt API calls. During the allocation, the provided ablkcipher * handle is registered in the request data structure. * * Return: allocated request handle in case of success, or NULL if out of memory */ static inline struct ablkcipher_request *ablkcipher_request_alloc( struct crypto_ablkcipher *tfm, gfp_t gfp) { struct ablkcipher_request *req; req = kmalloc(sizeof(struct ablkcipher_request) + crypto_ablkcipher_reqsize(tfm), gfp); if (likely(req)) ablkcipher_request_set_tfm(req, tfm); return req; } /** * ablkcipher_request_free() - zeroize and free request data structure * @req: request data structure cipher handle to be freed */ static inline void ablkcipher_request_free(struct ablkcipher_request *req) { kzfree(req); } /** * ablkcipher_request_set_callback() - set asynchronous callback function * @req: request handle * @flags: specify zero or an ORing of the flags * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and * increase the wait queue beyond the initial maximum size; * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep * @compl: callback function pointer to be registered with the request handle * @data: The data pointer refers to memory that is not used by the kernel * crypto API, but provided to the callback function for it to use. Here, * the caller can provide a reference to memory the callback function can * operate on. As the callback function is invoked asynchronously to the * related functionality, it may need to access data structures of the * related functionality which can be referenced using this pointer. The * callback function can access the memory via the "data" field in the * crypto_async_request data structure provided to the callback function. * * This function allows setting the callback function that is triggered once the * cipher operation completes. * * The callback function is registered with the ablkcipher_request handle and * must comply with the following template:: * * void callback_function(struct crypto_async_request *req, int error) */ static inline void ablkcipher_request_set_callback( struct ablkcipher_request *req, u32 flags, crypto_completion_t compl, void *data) { req->base.complete = compl; req->base.data = data; req->base.flags = flags; } /** * ablkcipher_request_set_crypt() - set data buffers * @req: request handle * @src: source scatter / gather list * @dst: destination scatter / gather list * @nbytes: number of bytes to process from @src * @iv: IV for the cipher operation which must comply with the IV size defined * by crypto_ablkcipher_ivsize * * This function allows setting of the source data and destination data * scatter / gather lists. * * For encryption, the source is treated as the plaintext and the * destination is the ciphertext. For a decryption operation, the use is * reversed - the source is the ciphertext and the destination is the plaintext. */ static inline void ablkcipher_request_set_crypt( struct ablkcipher_request *req, struct scatterlist *src, struct scatterlist *dst, unsigned int nbytes, void *iv) { req->src = src; req->dst = dst; req->nbytes = nbytes; req->info = iv; } /** * DOC: Synchronous Block Cipher API * * The synchronous block cipher API is used with the ciphers of type * CRYPTO_ALG_TYPE_BLKCIPHER (listed as type "blkcipher" in /proc/crypto) * * Synchronous calls, have a context in the tfm. But since a single tfm can be * used in multiple calls and in parallel, this info should not be changeable * (unless a lock is used). This applies, for example, to the symmetric key. * However, the IV is changeable, so there is an iv field in blkcipher_tfm * structure for synchronous blkcipher api. So, its the only state info that can * be kept for synchronous calls without using a big lock across a tfm. * * The block cipher API allows the use of a complete cipher, i.e. a cipher * consisting of a template (a block chaining mode) and a single block cipher * primitive (e.g. AES). * * The plaintext data buffer and the ciphertext data buffer are pointed to * by using scatter/gather lists. The cipher operation is performed * on all segments of the provided scatter/gather lists. * * The kernel crypto API supports a cipher operation "in-place" which means that * the caller may provide the same scatter/gather list for the plaintext and * cipher text. After the completion of the cipher operation, the plaintext * data is replaced with the ciphertext data in case of an encryption and vice * versa for a decryption. The caller must ensure that the scatter/gather lists * for the output data point to sufficiently large buffers, i.e. multiples of * the block size of the cipher. */ static inline struct crypto_blkcipher *__crypto_blkcipher_cast( struct crypto_tfm *tfm) { return (struct crypto_blkcipher *)tfm; } static inline struct crypto_blkcipher *crypto_blkcipher_cast( struct crypto_tfm *tfm) { BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_BLKCIPHER); return __crypto_blkcipher_cast(tfm); } /** * crypto_alloc_blkcipher() - allocate synchronous block cipher handle * @alg_name: is the cra_name / name or cra_driver_name / driver name of the * blkcipher cipher * @type: specifies the type of the cipher * @mask: specifies the mask for the cipher * * Allocate a cipher handle for a block cipher. The returned struct * crypto_blkcipher is the cipher handle that is required for any subsequent * API invocation for that block cipher. * * Return: allocated cipher handle in case of success; IS_ERR() is true in case * of an error, PTR_ERR() returns the error code. */ static inline struct crypto_blkcipher *crypto_alloc_blkcipher( const char *alg_name, u32 type, u32 mask) { type &= ~CRYPTO_ALG_TYPE_MASK; type |= CRYPTO_ALG_TYPE_BLKCIPHER; mask |= CRYPTO_ALG_TYPE_MASK; return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask)); } static inline struct crypto_tfm *crypto_blkcipher_tfm( struct crypto_blkcipher *tfm) { return &tfm->base; } /** * crypto_free_blkcipher() - zeroize and free the block cipher handle * @tfm: cipher handle to be freed */ static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm) { crypto_free_tfm(crypto_blkcipher_tfm(tfm)); } /** * crypto_has_blkcipher() - Search for the availability of a block cipher * @alg_name: is the cra_name / name or cra_driver_name / driver name of the * block cipher * @type: specifies the type of the cipher * @mask: specifies the mask for the cipher * * Return: true when the block cipher is known to the kernel crypto API; false * otherwise */ static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask) { type &= ~CRYPTO_ALG_TYPE_MASK; type |= CRYPTO_ALG_TYPE_BLKCIPHER; mask |= CRYPTO_ALG_TYPE_MASK; return crypto_has_alg(alg_name, type, mask); } /** * crypto_blkcipher_name() - return the name / cra_name from the cipher handle * @tfm: cipher handle * * Return: The character string holding the name of the cipher */ static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm) { return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm)); } static inline struct blkcipher_tfm *crypto_blkcipher_crt( struct crypto_blkcipher *tfm) { return &crypto_blkcipher_tfm(tfm)->crt_blkcipher; } static inline struct blkcipher_alg *crypto_blkcipher_alg( struct crypto_blkcipher *tfm) { return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher; } /** * crypto_blkcipher_ivsize() - obtain IV size * @tfm: cipher handle * * The size of the IV for the block cipher referenced by the cipher handle is * returned. This IV size may be zero if the cipher does not need an IV. * * Return: IV size in bytes */ static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm) { return crypto_blkcipher_alg(tfm)->ivsize; } /** * crypto_blkcipher_blocksize() - obtain block size of cipher * @tfm: cipher handle * * The block size for the block cipher referenced with the cipher handle is * returned. The caller may use that information to allocate appropriate * memory for the data returned by the encryption or decryption operation. * * Return: block size of cipher */ static inline unsigned int crypto_blkcipher_blocksize( struct crypto_blkcipher *tfm) { return crypto_tfm_alg_blocksize(crypto_blkcipher_tfm(tfm)); } static inline unsigned int crypto_blkcipher_alignmask( struct crypto_blkcipher *tfm) { return crypto_tfm_alg_alignmask(crypto_blkcipher_tfm(tfm)); } static inline u32 crypto_blkcipher_get_flags(struct crypto_blkcipher *tfm) { return crypto_tfm_get_flags(crypto_blkcipher_tfm(tfm)); } static inline void crypto_blkcipher_set_flags(struct crypto_blkcipher *tfm, u32 flags) { crypto_tfm_set_flags(crypto_blkcipher_tfm(tfm), flags); } static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm, u32 flags) { crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags); } /** * crypto_blkcipher_setkey() - set key for cipher * @tfm: cipher handle * @key: buffer holding the key * @keylen: length of the key in bytes * * The caller provided key is set for the block cipher referenced by the cipher * handle. * * Note, the key length determines the cipher type. Many block ciphers implement * different cipher modes depending on the key size, such as AES-128 vs AES-192 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 * is performed. * * Return: 0 if the setting of the key was successful; < 0 if an error occurred */ static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm, const u8 *key, unsigned int keylen) { return crypto_blkcipher_crt(tfm)->setkey(crypto_blkcipher_tfm(tfm), key, keylen); } /** * crypto_blkcipher_encrypt() - encrypt plaintext * @desc: reference to the block cipher handle with meta data * @dst: scatter/gather list that is filled by the cipher operation with the * ciphertext * @src: scatter/gather list that holds the plaintext * @nbytes: number of bytes of the plaintext to encrypt. * * Encrypt plaintext data using the IV set by the caller with a preceding * call of crypto_blkcipher_set_iv. * * The blkcipher_desc data structure must be filled by the caller and can * reside on the stack. The caller must fill desc as follows: desc.tfm is filled * with the block cipher handle; desc.flags is filled with either * CRYPTO_TFM_REQ_MAY_SLEEP or 0. * * Return: 0 if the cipher operation was successful; < 0 if an error occurred */ static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { desc->info = crypto_blkcipher_crt(desc->tfm)->iv; return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); } /** * crypto_blkcipher_encrypt_iv() - encrypt plaintext with dedicated IV * @desc: reference to the block cipher handle with meta data * @dst: scatter/gather list that is filled by the cipher operation with the * ciphertext * @src: scatter/gather list that holds the plaintext * @nbytes: number of bytes of the plaintext to encrypt. * * Encrypt plaintext data with the use of an IV that is solely used for this * cipher operation. Any previously set IV is not used. * * The blkcipher_desc data structure must be filled by the caller and can * reside on the stack. The caller must fill desc as follows: desc.tfm is filled * with the block cipher handle; desc.info is filled with the IV to be used for * the current operation; desc.flags is filled with either * CRYPTO_TFM_REQ_MAY_SLEEP or 0. * * Return: 0 if the cipher operation was successful; < 0 if an error occurred */ static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); } /** * crypto_blkcipher_decrypt() - decrypt ciphertext * @desc: reference to the block cipher handle with meta data * @dst: scatter/gather list that is filled by the cipher operation with the * plaintext * @src: scatter/gather list that holds the ciphertext * @nbytes: number of bytes of the ciphertext to decrypt. * * Decrypt ciphertext data using the IV set by the caller with a preceding * call of crypto_blkcipher_set_iv. * * The blkcipher_desc data structure must be filled by the caller as documented * for the crypto_blkcipher_encrypt call above. * * Return: 0 if the cipher operation was successful; < 0 if an error occurred * */ static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { desc->info = crypto_blkcipher_crt(desc->tfm)->iv; return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); } /** * crypto_blkcipher_decrypt_iv() - decrypt ciphertext with dedicated IV * @desc: reference to the block cipher handle with meta data * @dst: scatter/gather list that is filled by the cipher operation with the * plaintext * @src: scatter/gather list that holds the ciphertext * @nbytes: number of bytes of the ciphertext to decrypt. * * Decrypt ciphertext data with the use of an IV that is solely used for this * cipher operation. Any previously set IV is not used. * * The blkcipher_desc data structure must be filled by the caller as documented * for the crypto_blkcipher_encrypt_iv call above. * * Return: 0 if the cipher operation was successful; < 0 if an error occurred */ static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); } /** * crypto_blkcipher_set_iv() - set IV for cipher * @tfm: cipher handle * @src: buffer holding the IV * @len: length of the IV in bytes * * The caller provided IV is set for the block cipher referenced by the cipher * handle. */ static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm, const u8 *src, unsigned int len) { memcpy(crypto_blkcipher_crt(tfm)->iv, src, len); } /** * crypto_blkcipher_get_iv() - obtain IV from cipher * @tfm: cipher handle * @dst: buffer filled with the IV * @len: length of the buffer dst * * The caller can obtain the IV set for the block cipher referenced by the * cipher handle and store it into the user-provided buffer. If the buffer * has an insufficient space, the IV is truncated to fit the buffer. */ static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm, u8 *dst, unsigned int len) { memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len); } /** * DOC: Single Block Cipher API * * The single block cipher API is used with the ciphers of type * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto). * * Using the single block cipher API calls, operations with the basic cipher * primitive can be implemented. These cipher primitives exclude any block * chaining operations including IV handling. * * The purpose of this single block cipher API is to support the implementation * of templates or other concepts that only need to perform the cipher operation * on one block at a time. Templates invoke the underlying cipher primitive * block-wise and process either the input or the output data of these cipher * operations. */ static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm) { return (struct crypto_cipher *)tfm; } static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm) { BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER); return __crypto_cipher_cast(tfm); } /** * crypto_alloc_cipher() - allocate single block cipher handle * @alg_name: is the cra_name / name or cra_driver_name / driver name of the * single block cipher * @type: specifies the type of the cipher * @mask: specifies the mask for the cipher * * Allocate a cipher handle for a single block cipher. The returned struct * crypto_cipher is the cipher handle that is required for any subsequent API * invocation for that single block cipher. * * Return: allocated cipher handle in case of success; IS_ERR() is true in case * of an error, PTR_ERR() returns the error code. */ static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name, u32 type, u32 mask) { type &= ~CRYPTO_ALG_TYPE_MASK; type |= CRYPTO_ALG_TYPE_CIPHER; mask |= CRYPTO_ALG_TYPE_MASK; return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask)); } static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm) { return &tfm->base; } /** * crypto_free_cipher() - zeroize and free the single block cipher handle * @tfm: cipher handle to be freed */ static inline void crypto_free_cipher(struct crypto_cipher *tfm) { crypto_free_tfm(crypto_cipher_tfm(tfm)); } /** * crypto_has_cipher() - Search for the availability of a single block cipher * @alg_name: is the cra_name / name or cra_driver_name / driver name of the * single block cipher * @type: specifies the type of the cipher * @mask: specifies the mask for the cipher * * Return: true when the single block cipher is known to the kernel crypto API; * false otherwise */ static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask) { type &= ~CRYPTO_ALG_TYPE_MASK; type |= CRYPTO_ALG_TYPE_CIPHER; mask |= CRYPTO_ALG_TYPE_MASK; return crypto_has_alg(alg_name, type, mask); } static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm) { return &crypto_cipher_tfm(tfm)->crt_cipher; } /** * crypto_cipher_blocksize() - obtain block size for cipher * @tfm: cipher handle * * The block size for the single block cipher referenced with the cipher handle * tfm is returned. The caller may use that information to allocate appropriate * memory for the data returned by the encryption or decryption operation * * Return: block size of cipher */ static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm) { return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm)); } static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm) { return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm)); } static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm) { return crypto_tfm_get_flags(crypto_cipher_tfm(tfm)); } static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm, u32 flags) { crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags); } static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm, u32 flags) { crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags); } /** * crypto_cipher_setkey() - set key for cipher * @tfm: cipher handle * @key: buffer holding the key * @keylen: length of the key in bytes * * The caller provided key is set for the single block cipher referenced by the * cipher handle. * * Note, the key length determines the cipher type. Many block ciphers implement * different cipher modes depending on the key size, such as AES-128 vs AES-192 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 * is performed. * * Return: 0 if the setting of the key was successful; < 0 if an error occurred */ static inline int crypto_cipher_setkey(struct crypto_cipher *tfm, const u8 *key, unsigned int keylen) { return crypto_cipher_crt(tfm)->cit_setkey(crypto_cipher_tfm(tfm), key, keylen); } /** * crypto_cipher_encrypt_one() - encrypt one block of plaintext * @tfm: cipher handle * @dst: points to the buffer that will be filled with the ciphertext * @src: buffer holding the plaintext to be encrypted * * Invoke the encryption operation of one block. The caller must ensure that * the plaintext and ciphertext buffers are at least one block in size. */ static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, u8 *dst, const u8 *src) { crypto_cipher_crt(tfm)->cit_encrypt_one(crypto_cipher_tfm(tfm), dst, src); } /** * crypto_cipher_decrypt_one() - decrypt one block of ciphertext * @tfm: cipher handle * @dst: points to the buffer that will be filled with the plaintext * @src: buffer holding the ciphertext to be decrypted * * Invoke the decryption operation of one block. The caller must ensure that * the plaintext and ciphertext buffers are at least one block in size. */ static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, u8 *dst, const u8 *src) { crypto_cipher_crt(tfm)->cit_decrypt_one(crypto_cipher_tfm(tfm), dst, src); } static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm) { return (struct crypto_comp *)tfm; } static inline struct crypto_comp *crypto_comp_cast(struct crypto_tfm *tfm) { BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_COMPRESS) & CRYPTO_ALG_TYPE_MASK); return __crypto_comp_cast(tfm); } static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name, u32 type, u32 mask) { type &= ~CRYPTO_ALG_TYPE_MASK; type |= CRYPTO_ALG_TYPE_COMPRESS; mask |= CRYPTO_ALG_TYPE_MASK; return __crypto_comp_cast(crypto_alloc_base(alg_name, type, mask)); } static inline struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm) { return &tfm->base; } static inline void crypto_free_comp(struct crypto_comp *tfm) { crypto_free_tfm(crypto_comp_tfm(tfm)); } static inline int crypto_has_comp(const char *alg_name, u32 type, u32 mask) { type &= ~CRYPTO_ALG_TYPE_MASK; type |= CRYPTO_ALG_TYPE_COMPRESS; mask |= CRYPTO_ALG_TYPE_MASK; return crypto_has_alg(alg_name, type, mask); } static inline const char *crypto_comp_name(struct crypto_comp *tfm) { return crypto_tfm_alg_name(crypto_comp_tfm(tfm)); } static inline struct compress_tfm *crypto_comp_crt(struct crypto_comp *tfm) { return &crypto_comp_tfm(tfm)->crt_compress; } static inline int crypto_comp_compress(struct crypto_comp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen) { return crypto_comp_crt(tfm)->cot_compress(crypto_comp_tfm(tfm), src, slen, dst, dlen); } static inline int crypto_comp_decompress(struct crypto_comp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen) { return crypto_comp_crt(tfm)->cot_decompress(crypto_comp_tfm(tfm), src, slen, dst, dlen); } #endif /* _LINUX_CRYPTO_H */ slab.h 0000644 00000052131 14722070374 0005646 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk). * * (C) SGI 2006, Christoph Lameter * Cleaned up and restructured to ease the addition of alternative * implementations of SLAB allocators. * (C) Linux Foundation 2008-2013 * Unified interface for all slab allocators */ #ifndef _LINUX_SLAB_H #define _LINUX_SLAB_H #include <linux/gfp.h> #include <linux/overflow.h> #include <linux/types.h> #include <linux/workqueue.h> #include <linux/percpu-refcount.h> /* * Flags to pass to kmem_cache_create(). * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set. */ /* DEBUG: Perform (expensive) checks on alloc/free */ #define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U) /* DEBUG: Red zone objs in a cache */ #define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U) /* DEBUG: Poison objects */ #define SLAB_POISON ((slab_flags_t __force)0x00000800U) /* Align objs on cache lines */ #define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U) /* Use GFP_DMA memory */ #define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U) /* Use GFP_DMA32 memory */ #define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U) /* DEBUG: Store the last owner for bug hunting */ #define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U) /* Panic if kmem_cache_create() fails */ #define SLAB_PANIC ((slab_flags_t __force)0x00040000U) /* * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS! * * This delays freeing the SLAB page by a grace period, it does _NOT_ * delay object freeing. This means that if you do kmem_cache_free() * that memory location is free to be reused at any time. Thus it may * be possible to see another object there in the same RCU grace period. * * This feature only ensures the memory location backing the object * stays valid, the trick to using this is relying on an independent * object validation pass. Something like: * * rcu_read_lock() * again: * obj = lockless_lookup(key); * if (obj) { * if (!try_get_ref(obj)) // might fail for free objects * goto again; * * if (obj->key != key) { // not the object we expected * put_ref(obj); * goto again; * } * } * rcu_read_unlock(); * * This is useful if we need to approach a kernel structure obliquely, * from its address obtained without the usual locking. We can lock * the structure to stabilize it and check it's still at the given address, * only if we can be sure that the memory has not been meanwhile reused * for some other kind of object (which our subsystem's lock might corrupt). * * rcu_read_lock before reading the address, then rcu_read_unlock after * taking the spinlock within the structure expected at that address. * * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU. */ /* Defer freeing slabs to RCU */ #define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U) /* Spread some memory over cpuset */ #define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000U) /* Trace allocations and frees */ #define SLAB_TRACE ((slab_flags_t __force)0x00200000U) /* Flag to prevent checks on free */ #ifdef CONFIG_DEBUG_OBJECTS # define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U) #else # define SLAB_DEBUG_OBJECTS 0 #endif /* Avoid kmemleak tracing */ #define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U) /* Fault injection mark */ #ifdef CONFIG_FAILSLAB # define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U) #else # define SLAB_FAILSLAB 0 #endif /* Account to memcg */ #ifdef CONFIG_MEMCG_KMEM # define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U) #else # define SLAB_ACCOUNT 0 #endif #ifdef CONFIG_KASAN #define SLAB_KASAN ((slab_flags_t __force)0x08000000U) #else #define SLAB_KASAN 0 #endif /* The following flags affect the page allocator grouping pages by mobility */ /* Objects are reclaimable */ #define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U) #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ /* Slab deactivation flag */ #define SLAB_DEACTIVATED ((slab_flags_t __force)0x10000000U) /* * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. * * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault. * * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. * Both make kfree a no-op. */ #define ZERO_SIZE_PTR ((void *)16) #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ (unsigned long)ZERO_SIZE_PTR) #include <linux/kasan.h> struct mem_cgroup; /* * struct kmem_cache related prototypes */ void __init kmem_cache_init(void); bool slab_is_available(void); extern bool usercopy_fallback; struct kmem_cache *kmem_cache_create(const char *name, unsigned int size, unsigned int align, slab_flags_t flags, void (*ctor)(void *)); struct kmem_cache *kmem_cache_create_usercopy(const char *name, unsigned int size, unsigned int align, slab_flags_t flags, unsigned int useroffset, unsigned int usersize, void (*ctor)(void *)); void kmem_cache_destroy(struct kmem_cache *); int kmem_cache_shrink(struct kmem_cache *); void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *); void memcg_deactivate_kmem_caches(struct mem_cgroup *, struct mem_cgroup *); /* * Please use this macro to create slab caches. Simply specify the * name of the structure and maybe some flags that are listed above. * * The alignment of the struct determines object alignment. If you * f.e. add ____cacheline_aligned_in_smp to the struct declaration * then the objects will be properly aligned in SMP configurations. */ #define KMEM_CACHE(__struct, __flags) \ kmem_cache_create(#__struct, sizeof(struct __struct), \ __alignof__(struct __struct), (__flags), NULL) /* * To whitelist a single field for copying to/from usercopy, use this * macro instead for KMEM_CACHE() above. */ #define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \ kmem_cache_create_usercopy(#__struct, \ sizeof(struct __struct), \ __alignof__(struct __struct), (__flags), \ offsetof(struct __struct, __field), \ sizeof_field(struct __struct, __field), NULL) /* * Common kmalloc functions provided by all allocators */ void * __must_check __krealloc(const void *, size_t, gfp_t); void * __must_check krealloc(const void *, size_t, gfp_t); void kfree(const void *); void kzfree(const void *); size_t __ksize(const void *); size_t ksize(const void *); #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR void __check_heap_object(const void *ptr, unsigned long n, struct page *page, bool to_user); #else static inline void __check_heap_object(const void *ptr, unsigned long n, struct page *page, bool to_user) { } #endif /* * Some archs want to perform DMA into kmalloc caches and need a guaranteed * alignment larger than the alignment of a 64-bit integer. * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that. */ #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN) #else #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) #endif /* * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. * Intended for arches that get misalignment faults even for 64 bit integer * aligned buffers. */ #ifndef ARCH_SLAB_MINALIGN #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) #endif /* * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN * aligned pointers. */ #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN) #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN) #define __assume_page_alignment __assume_aligned(PAGE_SIZE) /* * Kmalloc array related definitions */ #ifdef CONFIG_SLAB /* * The largest kmalloc size supported by the SLAB allocators is * 32 megabyte (2^25) or the maximum allocatable page order if that is * less than 32 MB. * * WARNING: Its not easy to increase this value since the allocators have * to do various tricks to work around compiler limitations in order to * ensure proper constant folding. */ #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \ (MAX_ORDER + PAGE_SHIFT - 1) : 25) #define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH #ifndef KMALLOC_SHIFT_LOW #define KMALLOC_SHIFT_LOW 5 #endif #endif #ifdef CONFIG_SLUB /* * SLUB directly allocates requests fitting in to an order-1 page * (PAGE_SIZE*2). Larger requests are passed to the page allocator. */ #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1) #ifndef KMALLOC_SHIFT_LOW #define KMALLOC_SHIFT_LOW 3 #endif #endif #ifdef CONFIG_SLOB /* * SLOB passes all requests larger than one page to the page allocator. * No kmalloc array is necessary since objects of different sizes can * be allocated from the same page. */ #define KMALLOC_SHIFT_HIGH PAGE_SHIFT #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1) #ifndef KMALLOC_SHIFT_LOW #define KMALLOC_SHIFT_LOW 3 #endif #endif /* Maximum allocatable size */ #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX) /* Maximum size for which we actually use a slab cache */ #define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH) /* Maximum order allocatable via the slab allocagtor */ #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT) /* * Kmalloc subsystem. */ #ifndef KMALLOC_MIN_SIZE #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW) #endif /* * This restriction comes from byte sized index implementation. * Page size is normally 2^12 bytes and, in this case, if we want to use * byte sized index which can represent 2^8 entries, the size of the object * should be equal or greater to 2^12 / 2^8 = 2^4 = 16. * If minimum size of kmalloc is less than 16, we use it as minimum object * size and give up to use byte sized index. */ #define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \ (KMALLOC_MIN_SIZE) : 16) /* * Whenever changing this, take care of that kmalloc_type() and * create_kmalloc_caches() still work as intended. */ enum kmalloc_cache_type { KMALLOC_NORMAL = 0, KMALLOC_RECLAIM, #ifdef CONFIG_ZONE_DMA KMALLOC_DMA, #endif NR_KMALLOC_TYPES }; #ifndef CONFIG_SLOB extern struct kmem_cache * kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1]; static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags) { #ifdef CONFIG_ZONE_DMA /* * The most common case is KMALLOC_NORMAL, so test for it * with a single branch for both flags. */ if (likely((flags & (__GFP_DMA | __GFP_RECLAIMABLE)) == 0)) return KMALLOC_NORMAL; /* * At least one of the flags has to be set. If both are, __GFP_DMA * is more important. */ return flags & __GFP_DMA ? KMALLOC_DMA : KMALLOC_RECLAIM; #else return flags & __GFP_RECLAIMABLE ? KMALLOC_RECLAIM : KMALLOC_NORMAL; #endif } /* * Figure out which kmalloc slab an allocation of a certain size * belongs to. * 0 = zero alloc * 1 = 65 .. 96 bytes * 2 = 129 .. 192 bytes * n = 2^(n-1)+1 .. 2^n */ static __always_inline unsigned int kmalloc_index(size_t size) { if (!size) return 0; if (size <= KMALLOC_MIN_SIZE) return KMALLOC_SHIFT_LOW; if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96) return 1; if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192) return 2; if (size <= 8) return 3; if (size <= 16) return 4; if (size <= 32) return 5; if (size <= 64) return 6; if (size <= 128) return 7; if (size <= 256) return 8; if (size <= 512) return 9; if (size <= 1024) return 10; if (size <= 2 * 1024) return 11; if (size <= 4 * 1024) return 12; if (size <= 8 * 1024) return 13; if (size <= 16 * 1024) return 14; if (size <= 32 * 1024) return 15; if (size <= 64 * 1024) return 16; if (size <= 128 * 1024) return 17; if (size <= 256 * 1024) return 18; if (size <= 512 * 1024) return 19; if (size <= 1024 * 1024) return 20; if (size <= 2 * 1024 * 1024) return 21; if (size <= 4 * 1024 * 1024) return 22; if (size <= 8 * 1024 * 1024) return 23; if (size <= 16 * 1024 * 1024) return 24; if (size <= 32 * 1024 * 1024) return 25; if (size <= 64 * 1024 * 1024) return 26; BUG(); /* Will never be reached. Needed because the compiler may complain */ return -1; } #endif /* !CONFIG_SLOB */ void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc; void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc; void kmem_cache_free(struct kmem_cache *, void *); /* * Bulk allocation and freeing operations. These are accelerated in an * allocator specific way to avoid taking locks repeatedly or building * metadata structures unnecessarily. * * Note that interrupts must be enabled when calling these functions. */ void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); /* * Caller must not use kfree_bulk() on memory not originally allocated * by kmalloc(), because the SLOB allocator cannot handle this. */ static __always_inline void kfree_bulk(size_t size, void **p) { kmem_cache_free_bulk(NULL, size, p); } #ifdef CONFIG_NUMA void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc; void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc; #else static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) { return __kmalloc(size, flags); } static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) { return kmem_cache_alloc(s, flags); } #endif #ifdef CONFIG_TRACING extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc; #ifdef CONFIG_NUMA extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, int node, size_t size) __assume_slab_alignment __malloc; #else static __always_inline void * kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, int node, size_t size) { return kmem_cache_alloc_trace(s, gfpflags, size); } #endif /* CONFIG_NUMA */ #else /* CONFIG_TRACING */ static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t flags, size_t size) { void *ret = kmem_cache_alloc(s, flags); ret = kasan_kmalloc(s, ret, size, flags); return ret; } static __always_inline void * kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, int node, size_t size) { void *ret = kmem_cache_alloc_node(s, gfpflags, node); ret = kasan_kmalloc(s, ret, size, gfpflags); return ret; } #endif /* CONFIG_TRACING */ extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc; #ifdef CONFIG_TRACING extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc; #else static __always_inline void * kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) { return kmalloc_order(size, flags, order); } #endif static __always_inline void *kmalloc_large(size_t size, gfp_t flags) { unsigned int order = get_order(size); return kmalloc_order_trace(size, flags, order); } /** * kmalloc - allocate memory * @size: how many bytes of memory are required. * @flags: the type of memory to allocate. * * kmalloc is the normal method of allocating memory * for objects smaller than page size in the kernel. * * The allocated object address is aligned to at least ARCH_KMALLOC_MINALIGN * bytes. For @size of power of two bytes, the alignment is also guaranteed * to be at least to the size. * * The @flags argument may be one of the GFP flags defined at * include/linux/gfp.h and described at * :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>` * * The recommended usage of the @flags is described at * :ref:`Documentation/core-api/memory-allocation.rst <memory-allocation>` * * Below is a brief outline of the most useful GFP flags * * %GFP_KERNEL * Allocate normal kernel ram. May sleep. * * %GFP_NOWAIT * Allocation will not sleep. * * %GFP_ATOMIC * Allocation will not sleep. May use emergency pools. * * %GFP_HIGHUSER * Allocate memory from high memory on behalf of user. * * Also it is possible to set different flags by OR'ing * in one or more of the following additional @flags: * * %__GFP_HIGH * This allocation has high priority and may use emergency pools. * * %__GFP_NOFAIL * Indicate that this allocation is in no way allowed to fail * (think twice before using). * * %__GFP_NORETRY * If memory is not immediately available, * then give up at once. * * %__GFP_NOWARN * If allocation fails, don't issue any warnings. * * %__GFP_RETRY_MAYFAIL * Try really hard to succeed the allocation but fail * eventually. */ static __always_inline void *kmalloc(size_t size, gfp_t flags) { if (__builtin_constant_p(size)) { #ifndef CONFIG_SLOB unsigned int index; #endif if (size > KMALLOC_MAX_CACHE_SIZE) return kmalloc_large(size, flags); #ifndef CONFIG_SLOB index = kmalloc_index(size); if (!index) return ZERO_SIZE_PTR; return kmem_cache_alloc_trace( kmalloc_caches[kmalloc_type(flags)][index], flags, size); #endif } return __kmalloc(size, flags); } /* * Determine size used for the nth kmalloc cache. * return size or 0 if a kmalloc cache for that * size does not exist */ static __always_inline unsigned int kmalloc_size(unsigned int n) { #ifndef CONFIG_SLOB if (n > 2) return 1U << n; if (n == 1 && KMALLOC_MIN_SIZE <= 32) return 96; if (n == 2 && KMALLOC_MIN_SIZE <= 64) return 192; #endif return 0; } static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) { #ifndef CONFIG_SLOB if (__builtin_constant_p(size) && size <= KMALLOC_MAX_CACHE_SIZE) { unsigned int i = kmalloc_index(size); if (!i) return ZERO_SIZE_PTR; return kmem_cache_alloc_node_trace( kmalloc_caches[kmalloc_type(flags)][i], flags, node, size); } #endif return __kmalloc_node(size, flags, node); } int memcg_update_all_caches(int num_memcgs); /** * kmalloc_array - allocate memory for an array. * @n: number of elements. * @size: element size. * @flags: the type of memory to allocate (see kmalloc). */ static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) { size_t bytes; if (unlikely(check_mul_overflow(n, size, &bytes))) return NULL; if (__builtin_constant_p(n) && __builtin_constant_p(size)) return kmalloc(bytes, flags); return __kmalloc(bytes, flags); } /** * kcalloc - allocate memory for an array. The memory is set to zero. * @n: number of elements. * @size: element size. * @flags: the type of memory to allocate (see kmalloc). */ static inline void *kcalloc(size_t n, size_t size, gfp_t flags) { return kmalloc_array(n, size, flags | __GFP_ZERO); } /* * kmalloc_track_caller is a special version of kmalloc that records the * calling function of the routine calling it for slab leak tracking instead * of just the calling function (confusing, eh?). * It's useful when the call to kmalloc comes from a widely-used standard * allocator where we care about the real place the memory allocation * request comes from. */ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); #define kmalloc_track_caller(size, flags) \ __kmalloc_track_caller(size, flags, _RET_IP_) static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags, int node) { size_t bytes; if (unlikely(check_mul_overflow(n, size, &bytes))) return NULL; if (__builtin_constant_p(n) && __builtin_constant_p(size)) return kmalloc_node(bytes, flags, node); return __kmalloc_node(bytes, flags, node); } static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node) { return kmalloc_array_node(n, size, flags | __GFP_ZERO, node); } #ifdef CONFIG_NUMA extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); #define kmalloc_node_track_caller(size, flags, node) \ __kmalloc_node_track_caller(size, flags, node, \ _RET_IP_) #else /* CONFIG_NUMA */ #define kmalloc_node_track_caller(size, flags, node) \ kmalloc_track_caller(size, flags) #endif /* CONFIG_NUMA */ /* * Shortcuts */ static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags) { return kmem_cache_alloc(k, flags | __GFP_ZERO); } /** * kzalloc - allocate memory. The memory is set to zero. * @size: how many bytes of memory are required. * @flags: the type of memory to allocate (see kmalloc). */ static inline void *kzalloc(size_t size, gfp_t flags) { return kmalloc(size, flags | __GFP_ZERO); } /** * kzalloc_node - allocate zeroed memory from a particular memory node. * @size: how many bytes of memory are required. * @flags: the type of memory to allocate (see kmalloc). * @node: memory node from which to allocate */ static inline void *kzalloc_node(size_t size, gfp_t flags, int node) { return kmalloc_node(size, flags | __GFP_ZERO, node); } unsigned int kmem_cache_size(struct kmem_cache *s); void __init kmem_cache_init_late(void); #if defined(CONFIG_SMP) && defined(CONFIG_SLAB) int slab_prepare_cpu(unsigned int cpu); int slab_dead_cpu(unsigned int cpu); #else #define slab_prepare_cpu NULL #define slab_dead_cpu NULL #endif #endif /* _LINUX_SLAB_H */ io-64-nonatomic-hi-lo.h 0000644 00000004630 14722070374 0010557 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_IO_64_NONATOMIC_HI_LO_H_ #define _LINUX_IO_64_NONATOMIC_HI_LO_H_ #include <linux/io.h> #include <asm-generic/int-ll64.h> static inline __u64 hi_lo_readq(const volatile void __iomem *addr) { const volatile u32 __iomem *p = addr; u32 low, high; high = readl(p + 1); low = readl(p); return low + ((u64)high << 32); } static inline void hi_lo_writeq(__u64 val, volatile void __iomem *addr) { writel(val >> 32, addr + 4); writel(val, addr); } static inline __u64 hi_lo_readq_relaxed(const volatile void __iomem *addr) { const volatile u32 __iomem *p = addr; u32 low, high; high = readl_relaxed(p + 1); low = readl_relaxed(p); return low + ((u64)high << 32); } static inline void hi_lo_writeq_relaxed(__u64 val, volatile void __iomem *addr) { writel_relaxed(val >> 32, addr + 4); writel_relaxed(val, addr); } #ifndef readq #define readq hi_lo_readq #endif #ifndef writeq #define writeq hi_lo_writeq #endif #ifndef readq_relaxed #define readq_relaxed hi_lo_readq_relaxed #endif #ifndef writeq_relaxed #define writeq_relaxed hi_lo_writeq_relaxed #endif #ifndef ioread64_hi_lo #define ioread64_hi_lo ioread64_hi_lo static inline u64 ioread64_hi_lo(void __iomem *addr) { u32 low, high; high = ioread32(addr + sizeof(u32)); low = ioread32(addr); return low + ((u64)high << 32); } #endif #ifndef iowrite64_hi_lo #define iowrite64_hi_lo iowrite64_hi_lo static inline void iowrite64_hi_lo(u64 val, void __iomem *addr) { iowrite32(val >> 32, addr + sizeof(u32)); iowrite32(val, addr); } #endif #ifndef ioread64be_hi_lo #define ioread64be_hi_lo ioread64be_hi_lo static inline u64 ioread64be_hi_lo(void __iomem *addr) { u32 low, high; high = ioread32be(addr); low = ioread32be(addr + sizeof(u32)); return low + ((u64)high << 32); } #endif #ifndef iowrite64be_hi_lo #define iowrite64be_hi_lo iowrite64be_hi_lo static inline void iowrite64be_hi_lo(u64 val, void __iomem *addr) { iowrite32be(val >> 32, addr); iowrite32be(val, addr + sizeof(u32)); } #endif #ifndef ioread64 #define ioread64_is_nonatomic #define ioread64 ioread64_hi_lo #endif #ifndef iowrite64 #define iowrite64_is_nonatomic #define iowrite64 iowrite64_hi_lo #endif #ifndef ioread64be #define ioread64be_is_nonatomic #define ioread64be ioread64be_hi_lo #endif #ifndef iowrite64be #define iowrite64be_is_nonatomic #define iowrite64be iowrite64be_hi_lo #endif #endif /* _LINUX_IO_64_NONATOMIC_HI_LO_H_ */ libata.h 0000644 00000202147 14722070374 0006165 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright 2003-2005 Red Hat, Inc. All rights reserved. * Copyright 2003-2005 Jeff Garzik * * libata documentation is available via 'make {ps|pdf}docs', * as Documentation/driver-api/libata.rst */ #ifndef __LINUX_LIBATA_H__ #define __LINUX_LIBATA_H__ #include <linux/delay.h> #include <linux/jiffies.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/scatterlist.h> #include <linux/io.h> #include <linux/ata.h> #include <linux/workqueue.h> #include <scsi/scsi_host.h> #include <linux/acpi.h> #include <linux/cdrom.h> #include <linux/sched.h> #include <linux/async.h> /* * Define if arch has non-standard setup. This is a _PCI_ standard * not a legacy or ISA standard. */ #ifdef CONFIG_ATA_NONSTANDARD #include <asm/libata-portmap.h> #else #define ATA_PRIMARY_IRQ(dev) 14 #define ATA_SECONDARY_IRQ(dev) 15 #endif /* * compile-time options: to be removed as soon as all the drivers are * converted to the new debugging mechanism */ #undef ATA_DEBUG /* debugging output */ #undef ATA_VERBOSE_DEBUG /* yet more debugging output */ #undef ATA_IRQ_TRAP /* define to ack screaming irqs */ #undef ATA_NDEBUG /* define to disable quick runtime checks */ /* note: prints function name for you */ #ifdef ATA_DEBUG #define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args) #ifdef ATA_VERBOSE_DEBUG #define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args) #else #define VPRINTK(fmt, args...) #endif /* ATA_VERBOSE_DEBUG */ #else #define DPRINTK(fmt, args...) #define VPRINTK(fmt, args...) #endif /* ATA_DEBUG */ #define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __func__, ## args) #define ata_print_version_once(dev, version) \ ({ \ static bool __print_once; \ \ if (!__print_once) { \ __print_once = true; \ ata_print_version(dev, version); \ } \ }) /* NEW: debug levels */ #define HAVE_LIBATA_MSG 1 enum { ATA_MSG_DRV = 0x0001, ATA_MSG_INFO = 0x0002, ATA_MSG_PROBE = 0x0004, ATA_MSG_WARN = 0x0008, ATA_MSG_MALLOC = 0x0010, ATA_MSG_CTL = 0x0020, ATA_MSG_INTR = 0x0040, ATA_MSG_ERR = 0x0080, }; #define ata_msg_drv(p) ((p)->msg_enable & ATA_MSG_DRV) #define ata_msg_info(p) ((p)->msg_enable & ATA_MSG_INFO) #define ata_msg_probe(p) ((p)->msg_enable & ATA_MSG_PROBE) #define ata_msg_warn(p) ((p)->msg_enable & ATA_MSG_WARN) #define ata_msg_malloc(p) ((p)->msg_enable & ATA_MSG_MALLOC) #define ata_msg_ctl(p) ((p)->msg_enable & ATA_MSG_CTL) #define ata_msg_intr(p) ((p)->msg_enable & ATA_MSG_INTR) #define ata_msg_err(p) ((p)->msg_enable & ATA_MSG_ERR) static inline u32 ata_msg_init(int dval, int default_msg_enable_bits) { if (dval < 0 || dval >= (sizeof(u32) * 8)) return default_msg_enable_bits; /* should be 0x1 - only driver info msgs */ if (!dval) return 0; return (1 << dval) - 1; } /* defines only for the constants which don't work well as enums */ #define ATA_TAG_POISON 0xfafbfcfdU enum { /* various global constants */ LIBATA_MAX_PRD = ATA_MAX_PRD / 2, LIBATA_DUMB_MAX_PRD = ATA_MAX_PRD / 4, /* Worst case */ ATA_DEF_QUEUE = 1, ATA_MAX_QUEUE = 32, ATA_TAG_INTERNAL = ATA_MAX_QUEUE, ATA_SHORT_PAUSE = 16, ATAPI_MAX_DRAIN = 16 << 10, ATA_ALL_DEVICES = (1 << ATA_MAX_DEVICES) - 1, ATA_SHT_EMULATED = 1, ATA_SHT_THIS_ID = -1, /* struct ata_taskfile flags */ ATA_TFLAG_LBA48 = (1 << 0), /* enable 48-bit LBA and "HOB" */ ATA_TFLAG_ISADDR = (1 << 1), /* enable r/w to nsect/lba regs */ ATA_TFLAG_DEVICE = (1 << 2), /* enable r/w to device reg */ ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */ ATA_TFLAG_LBA = (1 << 4), /* enable LBA */ ATA_TFLAG_FUA = (1 << 5), /* enable FUA */ ATA_TFLAG_POLLING = (1 << 6), /* set nIEN to 1 and use polling */ /* struct ata_device stuff */ ATA_DFLAG_LBA = (1 << 0), /* device supports LBA */ ATA_DFLAG_LBA48 = (1 << 1), /* device supports LBA48 */ ATA_DFLAG_CDB_INTR = (1 << 2), /* device asserts INTRQ when ready for CDB */ ATA_DFLAG_NCQ = (1 << 3), /* device supports NCQ */ ATA_DFLAG_FLUSH_EXT = (1 << 4), /* do FLUSH_EXT instead of FLUSH */ ATA_DFLAG_ACPI_PENDING = (1 << 5), /* ACPI resume action pending */ ATA_DFLAG_ACPI_FAILED = (1 << 6), /* ACPI on devcfg has failed */ ATA_DFLAG_AN = (1 << 7), /* AN configured */ ATA_DFLAG_TRUSTED = (1 << 8), /* device supports trusted send/recv */ ATA_DFLAG_DMADIR = (1 << 10), /* device requires DMADIR */ ATA_DFLAG_CFG_MASK = (1 << 12) - 1, ATA_DFLAG_PIO = (1 << 12), /* device limited to PIO mode */ ATA_DFLAG_NCQ_OFF = (1 << 13), /* device limited to non-NCQ mode */ ATA_DFLAG_SLEEPING = (1 << 15), /* device is sleeping */ ATA_DFLAG_DUBIOUS_XFER = (1 << 16), /* data transfer not verified */ ATA_DFLAG_NO_UNLOAD = (1 << 17), /* device doesn't support unload */ ATA_DFLAG_UNLOCK_HPA = (1 << 18), /* unlock HPA */ ATA_DFLAG_NCQ_SEND_RECV = (1 << 19), /* device supports NCQ SEND and RECV */ ATA_DFLAG_NCQ_PRIO = (1 << 20), /* device supports NCQ priority */ ATA_DFLAG_NCQ_PRIO_ENABLE = (1 << 21), /* Priority cmds sent to dev */ ATA_DFLAG_INIT_MASK = (1 << 24) - 1, ATA_DFLAG_DETACH = (1 << 24), ATA_DFLAG_DETACHED = (1 << 25), ATA_DFLAG_DA = (1 << 26), /* device supports Device Attention */ ATA_DFLAG_DEVSLP = (1 << 27), /* device supports Device Sleep */ ATA_DFLAG_ACPI_DISABLED = (1 << 28), /* ACPI for the device is disabled */ ATA_DFLAG_D_SENSE = (1 << 29), /* Descriptor sense requested */ ATA_DFLAG_ZAC = (1 << 30), /* ZAC device */ ATA_DEV_UNKNOWN = 0, /* unknown device */ ATA_DEV_ATA = 1, /* ATA device */ ATA_DEV_ATA_UNSUP = 2, /* ATA device (unsupported) */ ATA_DEV_ATAPI = 3, /* ATAPI device */ ATA_DEV_ATAPI_UNSUP = 4, /* ATAPI device (unsupported) */ ATA_DEV_PMP = 5, /* SATA port multiplier */ ATA_DEV_PMP_UNSUP = 6, /* SATA port multiplier (unsupported) */ ATA_DEV_SEMB = 7, /* SEMB */ ATA_DEV_SEMB_UNSUP = 8, /* SEMB (unsupported) */ ATA_DEV_ZAC = 9, /* ZAC device */ ATA_DEV_ZAC_UNSUP = 10, /* ZAC device (unsupported) */ ATA_DEV_NONE = 11, /* no device */ /* struct ata_link flags */ ATA_LFLAG_NO_HRST = (1 << 1), /* avoid hardreset */ ATA_LFLAG_NO_SRST = (1 << 2), /* avoid softreset */ ATA_LFLAG_ASSUME_ATA = (1 << 3), /* assume ATA class */ ATA_LFLAG_ASSUME_SEMB = (1 << 4), /* assume SEMB class */ ATA_LFLAG_ASSUME_CLASS = ATA_LFLAG_ASSUME_ATA | ATA_LFLAG_ASSUME_SEMB, ATA_LFLAG_NO_RETRY = (1 << 5), /* don't retry this link */ ATA_LFLAG_DISABLED = (1 << 6), /* link is disabled */ ATA_LFLAG_SW_ACTIVITY = (1 << 7), /* keep activity stats */ ATA_LFLAG_NO_LPM = (1 << 8), /* disable LPM on this link */ ATA_LFLAG_RST_ONCE = (1 << 9), /* limit recovery to one reset */ ATA_LFLAG_CHANGED = (1 << 10), /* LPM state changed on this link */ ATA_LFLAG_NO_DB_DELAY = (1 << 11), /* no debounce delay on link resume */ /* struct ata_port flags */ ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ /* (doesn't imply presence) */ ATA_FLAG_SATA = (1 << 1), ATA_FLAG_NO_LPM = (1 << 2), /* host not happy with LPM */ ATA_FLAG_NO_LOG_PAGE = (1 << 5), /* do not issue log page read */ ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */ ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */ ATA_FLAG_PIO_LBA48 = (1 << 8), /* Host DMA engine is LBA28 only */ ATA_FLAG_PIO_POLLING = (1 << 9), /* use polling PIO if LLD * doesn't handle PIO interrupts */ ATA_FLAG_NCQ = (1 << 10), /* host supports NCQ */ ATA_FLAG_NO_POWEROFF_SPINDOWN = (1 << 11), /* don't spindown before poweroff */ ATA_FLAG_NO_HIBERNATE_SPINDOWN = (1 << 12), /* don't spindown before hibernation */ ATA_FLAG_DEBUGMSG = (1 << 13), ATA_FLAG_FPDMA_AA = (1 << 14), /* driver supports Auto-Activate */ ATA_FLAG_IGN_SIMPLEX = (1 << 15), /* ignore SIMPLEX */ ATA_FLAG_NO_IORDY = (1 << 16), /* controller lacks iordy */ ATA_FLAG_ACPI_SATA = (1 << 17), /* need native SATA ACPI layout */ ATA_FLAG_AN = (1 << 18), /* controller supports AN */ ATA_FLAG_PMP = (1 << 19), /* controller supports PMP */ ATA_FLAG_FPDMA_AUX = (1 << 20), /* controller supports H2DFIS aux field */ ATA_FLAG_EM = (1 << 21), /* driver supports enclosure * management */ ATA_FLAG_SW_ACTIVITY = (1 << 22), /* driver supports sw activity * led */ ATA_FLAG_NO_DIPM = (1 << 23), /* host not happy with DIPM */ ATA_FLAG_SAS_HOST = (1 << 24), /* SAS host */ /* bits 24:31 of ap->flags are reserved for LLD specific flags */ /* struct ata_port pflags */ ATA_PFLAG_EH_PENDING = (1 << 0), /* EH pending */ ATA_PFLAG_EH_IN_PROGRESS = (1 << 1), /* EH in progress */ ATA_PFLAG_FROZEN = (1 << 2), /* port is frozen */ ATA_PFLAG_RECOVERED = (1 << 3), /* recovery action performed */ ATA_PFLAG_LOADING = (1 << 4), /* boot/loading probe */ ATA_PFLAG_SCSI_HOTPLUG = (1 << 6), /* SCSI hotplug scheduled */ ATA_PFLAG_INITIALIZING = (1 << 7), /* being initialized, don't touch */ ATA_PFLAG_RESETTING = (1 << 8), /* reset in progress */ ATA_PFLAG_UNLOADING = (1 << 9), /* driver is being unloaded */ ATA_PFLAG_UNLOADED = (1 << 10), /* driver is unloaded */ ATA_PFLAG_SUSPENDED = (1 << 17), /* port is suspended (power) */ ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */ ATA_PFLAG_INIT_GTM_VALID = (1 << 19), /* initial gtm data valid */ ATA_PFLAG_PIO32 = (1 << 20), /* 32bit PIO */ ATA_PFLAG_PIO32CHANGE = (1 << 21), /* 32bit PIO can be turned on/off */ ATA_PFLAG_EXTERNAL = (1 << 22), /* eSATA/external port */ /* struct ata_queued_cmd flags */ ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */ ATA_QCFLAG_DMAMAP = (1 << 1), /* SG table is DMA mapped */ ATA_QCFLAG_IO = (1 << 3), /* standard IO command */ ATA_QCFLAG_RESULT_TF = (1 << 4), /* result TF requested */ ATA_QCFLAG_CLEAR_EXCL = (1 << 5), /* clear excl_link on completion */ ATA_QCFLAG_QUIET = (1 << 6), /* don't report device error */ ATA_QCFLAG_RETRY = (1 << 7), /* retry after failure */ ATA_QCFLAG_FAILED = (1 << 16), /* cmd failed and is owned by EH */ ATA_QCFLAG_SENSE_VALID = (1 << 17), /* sense data valid */ ATA_QCFLAG_EH_SCHEDULED = (1 << 18), /* EH scheduled (obsolete) */ /* host set flags */ ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host only */ ATA_HOST_STARTED = (1 << 1), /* Host started */ ATA_HOST_PARALLEL_SCAN = (1 << 2), /* Ports on this host can be scanned in parallel */ ATA_HOST_IGNORE_ATA = (1 << 3), /* Ignore ATA devices on this host. */ ATA_HOST_NO_PART = (1 << 4), /* Host does not support partial */ ATA_HOST_NO_SSC = (1 << 5), /* Host does not support slumber */ ATA_HOST_NO_DEVSLP = (1 << 6), /* Host does not support devslp */ /* bits 24:31 of host->flags are reserved for LLD specific flags */ /* various lengths of time */ ATA_TMOUT_BOOT = 30000, /* heuristic */ ATA_TMOUT_BOOT_QUICK = 7000, /* heuristic */ ATA_TMOUT_INTERNAL_QUICK = 5000, ATA_TMOUT_MAX_PARK = 30000, /* * GoVault needs 2s and iVDR disk HHD424020F7SV00 800ms. 2s * is too much without parallel probing. Use 2s if parallel * probing is available, 800ms otherwise. */ ATA_TMOUT_FF_WAIT_LONG = 2000, ATA_TMOUT_FF_WAIT = 800, /* Spec mandates to wait for ">= 2ms" before checking status * after reset. We wait 150ms, because that was the magic * delay used for ATAPI devices in Hale Landis's ATADRVR, for * the period of time between when the ATA command register is * written, and then status is checked. Because waiting for * "a while" before checking status is fine, post SRST, we * perform this magic delay here as well. * * Old drivers/ide uses the 2mS rule and then waits for ready. */ ATA_WAIT_AFTER_RESET = 150, /* If PMP is supported, we have to do follow-up SRST. As some * PMPs don't send D2H Reg FIS after hardreset, LLDs are * advised to wait only for the following duration before * doing SRST. */ ATA_TMOUT_PMP_SRST_WAIT = 10000, /* When the LPM policy is set to ATA_LPM_MAX_POWER, there might * be a spurious PHY event, so ignore the first PHY event that * occurs within 10s after the policy change. */ ATA_TMOUT_SPURIOUS_PHY = 10000, /* ATA bus states */ BUS_UNKNOWN = 0, BUS_DMA = 1, BUS_IDLE = 2, BUS_NOINTR = 3, BUS_NODATA = 4, BUS_TIMER = 5, BUS_PIO = 6, BUS_EDD = 7, BUS_IDENTIFY = 8, BUS_PACKET = 9, /* SATA port states */ PORT_UNKNOWN = 0, PORT_ENABLED = 1, PORT_DISABLED = 2, /* encoding various smaller bitmaps into a single * unsigned long bitmap */ ATA_NR_PIO_MODES = 7, ATA_NR_MWDMA_MODES = 5, ATA_NR_UDMA_MODES = 8, ATA_SHIFT_PIO = 0, ATA_SHIFT_MWDMA = ATA_SHIFT_PIO + ATA_NR_PIO_MODES, ATA_SHIFT_UDMA = ATA_SHIFT_MWDMA + ATA_NR_MWDMA_MODES, ATA_SHIFT_PRIO = 6, ATA_PRIO_HIGH = 2, /* size of buffer to pad xfers ending on unaligned boundaries */ ATA_DMA_PAD_SZ = 4, /* ering size */ ATA_ERING_SIZE = 32, /* return values for ->qc_defer */ ATA_DEFER_LINK = 1, ATA_DEFER_PORT = 2, /* desc_len for ata_eh_info and context */ ATA_EH_DESC_LEN = 80, /* reset / recovery action types */ ATA_EH_REVALIDATE = (1 << 0), ATA_EH_SOFTRESET = (1 << 1), /* meaningful only in ->prereset */ ATA_EH_HARDRESET = (1 << 2), /* meaningful only in ->prereset */ ATA_EH_RESET = ATA_EH_SOFTRESET | ATA_EH_HARDRESET, ATA_EH_ENABLE_LINK = (1 << 3), ATA_EH_PARK = (1 << 5), /* unload heads and stop I/O */ ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_PARK, ATA_EH_ALL_ACTIONS = ATA_EH_REVALIDATE | ATA_EH_RESET | ATA_EH_ENABLE_LINK, /* ata_eh_info->flags */ ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */ ATA_EHI_QUIET = (1 << 3), /* be quiet */ ATA_EHI_NO_RECOVERY = (1 << 4), /* no recovery */ ATA_EHI_DID_SOFTRESET = (1 << 16), /* already soft-reset this port */ ATA_EHI_DID_HARDRESET = (1 << 17), /* already soft-reset this port */ ATA_EHI_PRINTINFO = (1 << 18), /* print configuration info */ ATA_EHI_SETMODE = (1 << 19), /* configure transfer mode */ ATA_EHI_POST_SETMODE = (1 << 20), /* revalidating after setmode */ ATA_EHI_DID_RESET = ATA_EHI_DID_SOFTRESET | ATA_EHI_DID_HARDRESET, /* mask of flags to transfer *to* the slave link */ ATA_EHI_TO_SLAVE_MASK = ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, /* max tries if error condition is still set after ->error_handler */ ATA_EH_MAX_TRIES = 5, /* sometimes resuming a link requires several retries */ ATA_LINK_RESUME_TRIES = 5, /* how hard are we gonna try to probe/recover devices */ ATA_PROBE_MAX_TRIES = 3, ATA_EH_DEV_TRIES = 3, ATA_EH_PMP_TRIES = 5, ATA_EH_PMP_LINK_TRIES = 3, SATA_PMP_RW_TIMEOUT = 3000, /* PMP read/write timeout */ /* This should match the actual table size of * ata_eh_cmd_timeout_table in libata-eh.c. */ ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 7, /* Horkage types. May be set by libata or controller on drives (some horkage may be drive/controller pair dependent */ ATA_HORKAGE_DIAGNOSTIC = (1 << 0), /* Failed boot diag */ ATA_HORKAGE_NODMA = (1 << 1), /* DMA problems */ ATA_HORKAGE_NONCQ = (1 << 2), /* Don't use NCQ */ ATA_HORKAGE_MAX_SEC_128 = (1 << 3), /* Limit max sects to 128 */ ATA_HORKAGE_BROKEN_HPA = (1 << 4), /* Broken HPA */ ATA_HORKAGE_DISABLE = (1 << 5), /* Disable it */ ATA_HORKAGE_HPA_SIZE = (1 << 6), /* native size off by one */ ATA_HORKAGE_IVB = (1 << 8), /* cbl det validity bit bugs */ ATA_HORKAGE_STUCK_ERR = (1 << 9), /* stuck ERR on next PACKET */ ATA_HORKAGE_BRIDGE_OK = (1 << 10), /* no bridge limits */ ATA_HORKAGE_ATAPI_MOD16_DMA = (1 << 11), /* use ATAPI DMA for commands not multiple of 16 bytes */ ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firmware update warning */ ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */ ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */ ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */ ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */ ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */ ATA_HORKAGE_ATAPI_DMADIR = (1 << 18), /* device requires dmadir */ ATA_HORKAGE_NO_NCQ_TRIM = (1 << 19), /* don't use queued TRIM */ ATA_HORKAGE_NOLPM = (1 << 20), /* don't use LPM */ ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */ ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */ ATA_HORKAGE_NO_DMA_LOG = (1 << 23), /* don't use DMA for log read */ ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */ ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */ ATA_HORKAGE_MAX_TRIM_128M = (1 << 26), /* Limit max trim size to 128M */ ATA_HORKAGE_NO_NCQ_ON_ATI = (1 << 27), /* Disable NCQ on ATI chipset */ /* DMA mask for user DMA control: User visible values; DO NOT renumber */ ATA_DMA_MASK_ATA = (1 << 0), /* DMA on ATA Disk */ ATA_DMA_MASK_ATAPI = (1 << 1), /* DMA on ATAPI */ ATA_DMA_MASK_CFA = (1 << 2), /* DMA on CF Card */ /* ATAPI command types */ ATAPI_READ = 0, /* READs */ ATAPI_WRITE = 1, /* WRITEs */ ATAPI_READ_CD = 2, /* READ CD [MSF] */ ATAPI_PASS_THRU = 3, /* SAT pass-thru */ ATAPI_MISC = 4, /* the rest */ /* Timing constants */ ATA_TIMING_SETUP = (1 << 0), ATA_TIMING_ACT8B = (1 << 1), ATA_TIMING_REC8B = (1 << 2), ATA_TIMING_CYC8B = (1 << 3), ATA_TIMING_8BIT = ATA_TIMING_ACT8B | ATA_TIMING_REC8B | ATA_TIMING_CYC8B, ATA_TIMING_ACTIVE = (1 << 4), ATA_TIMING_RECOVER = (1 << 5), ATA_TIMING_DMACK_HOLD = (1 << 6), ATA_TIMING_CYCLE = (1 << 7), ATA_TIMING_UDMA = (1 << 8), ATA_TIMING_ALL = ATA_TIMING_SETUP | ATA_TIMING_ACT8B | ATA_TIMING_REC8B | ATA_TIMING_CYC8B | ATA_TIMING_ACTIVE | ATA_TIMING_RECOVER | ATA_TIMING_DMACK_HOLD | ATA_TIMING_CYCLE | ATA_TIMING_UDMA, /* ACPI constants */ ATA_ACPI_FILTER_SETXFER = 1 << 0, ATA_ACPI_FILTER_LOCK = 1 << 1, ATA_ACPI_FILTER_DIPM = 1 << 2, ATA_ACPI_FILTER_FPDMA_OFFSET = 1 << 3, /* FPDMA non-zero offset */ ATA_ACPI_FILTER_FPDMA_AA = 1 << 4, /* FPDMA auto activate */ ATA_ACPI_FILTER_DEFAULT = ATA_ACPI_FILTER_SETXFER | ATA_ACPI_FILTER_LOCK | ATA_ACPI_FILTER_DIPM, }; enum ata_xfer_mask { ATA_MASK_PIO = ((1LU << ATA_NR_PIO_MODES) - 1) << ATA_SHIFT_PIO, ATA_MASK_MWDMA = ((1LU << ATA_NR_MWDMA_MODES) - 1) << ATA_SHIFT_MWDMA, ATA_MASK_UDMA = ((1LU << ATA_NR_UDMA_MODES) - 1) << ATA_SHIFT_UDMA, }; enum hsm_task_states { HSM_ST_IDLE, /* no command on going */ HSM_ST_FIRST, /* (waiting the device to) write CDB or first data block */ HSM_ST, /* (waiting the device to) transfer data */ HSM_ST_LAST, /* (waiting the device to) complete command */ HSM_ST_ERR, /* error */ }; enum ata_completion_errors { AC_ERR_OK = 0, /* no error */ AC_ERR_DEV = (1 << 0), /* device reported error */ AC_ERR_HSM = (1 << 1), /* host state machine violation */ AC_ERR_TIMEOUT = (1 << 2), /* timeout */ AC_ERR_MEDIA = (1 << 3), /* media error */ AC_ERR_ATA_BUS = (1 << 4), /* ATA bus error */ AC_ERR_HOST_BUS = (1 << 5), /* host bus error */ AC_ERR_SYSTEM = (1 << 6), /* system error */ AC_ERR_INVALID = (1 << 7), /* invalid argument */ AC_ERR_OTHER = (1 << 8), /* unknown */ AC_ERR_NODEV_HINT = (1 << 9), /* polling device detection hint */ AC_ERR_NCQ = (1 << 10), /* marker for offending NCQ qc */ }; /* * Link power management policy: If you alter this, you also need to * alter libata-scsi.c (for the ascii descriptions) */ enum ata_lpm_policy { ATA_LPM_UNKNOWN, ATA_LPM_MAX_POWER, ATA_LPM_MED_POWER, ATA_LPM_MED_POWER_WITH_DIPM, /* Med power + DIPM as win IRST does */ ATA_LPM_MIN_POWER_WITH_PARTIAL, /* Min Power + partial and slumber */ ATA_LPM_MIN_POWER, /* Min power + no partial (slumber only) */ }; enum ata_lpm_hints { ATA_LPM_EMPTY = (1 << 0), /* port empty/probing */ ATA_LPM_HIPM = (1 << 1), /* may use HIPM */ ATA_LPM_WAKE_ONLY = (1 << 2), /* only wake up link */ }; /* forward declarations */ struct scsi_device; struct ata_port_operations; struct ata_port; struct ata_link; struct ata_queued_cmd; /* typedefs */ typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc); typedef int (*ata_prereset_fn_t)(struct ata_link *link, unsigned long deadline); typedef int (*ata_reset_fn_t)(struct ata_link *link, unsigned int *classes, unsigned long deadline); typedef void (*ata_postreset_fn_t)(struct ata_link *link, unsigned int *classes); extern struct device_attribute dev_attr_link_power_management_policy; extern struct device_attribute dev_attr_unload_heads; extern struct device_attribute dev_attr_ncq_prio_enable; extern struct device_attribute dev_attr_em_message_type; extern struct device_attribute dev_attr_em_message; extern struct device_attribute dev_attr_sw_activity; enum sw_activity { OFF, BLINK_ON, BLINK_OFF, }; struct ata_taskfile { unsigned long flags; /* ATA_TFLAG_xxx */ u8 protocol; /* ATA_PROT_xxx */ u8 ctl; /* control reg */ u8 hob_feature; /* additional data */ u8 hob_nsect; /* to support LBA48 */ u8 hob_lbal; u8 hob_lbam; u8 hob_lbah; u8 feature; u8 nsect; u8 lbal; u8 lbam; u8 lbah; u8 device; u8 command; /* IO operation */ u32 auxiliary; /* auxiliary field */ /* from SATA 3.1 and */ /* ATA-8 ACS-3 */ }; #ifdef CONFIG_ATA_SFF struct ata_ioports { void __iomem *cmd_addr; void __iomem *data_addr; void __iomem *error_addr; void __iomem *feature_addr; void __iomem *nsect_addr; void __iomem *lbal_addr; void __iomem *lbam_addr; void __iomem *lbah_addr; void __iomem *device_addr; void __iomem *status_addr; void __iomem *command_addr; void __iomem *altstatus_addr; void __iomem *ctl_addr; #ifdef CONFIG_ATA_BMDMA void __iomem *bmdma_addr; #endif /* CONFIG_ATA_BMDMA */ void __iomem *scr_addr; }; #endif /* CONFIG_ATA_SFF */ struct ata_host { spinlock_t lock; struct device *dev; void __iomem * const *iomap; unsigned int n_ports; unsigned int n_tags; /* nr of NCQ tags */ void *private_data; struct ata_port_operations *ops; unsigned long flags; struct kref kref; struct mutex eh_mutex; struct task_struct *eh_owner; struct ata_port *simplex_claimed; /* channel owning the DMA */ struct ata_port *ports[0]; }; struct ata_queued_cmd { struct ata_port *ap; struct ata_device *dev; struct scsi_cmnd *scsicmd; void (*scsidone)(struct scsi_cmnd *); struct ata_taskfile tf; u8 cdb[ATAPI_CDB_LEN]; unsigned long flags; /* ATA_QCFLAG_xxx */ unsigned int tag; /* libata core tag */ unsigned int hw_tag; /* driver tag */ unsigned int n_elem; unsigned int orig_n_elem; int dma_dir; unsigned int sect_size; unsigned int nbytes; unsigned int extrabytes; unsigned int curbytes; struct scatterlist sgent; struct scatterlist *sg; struct scatterlist *cursg; unsigned int cursg_ofs; unsigned int err_mask; struct ata_taskfile result_tf; ata_qc_cb_t complete_fn; void *private_data; void *lldd_task; }; struct ata_port_stats { unsigned long unhandled_irq; unsigned long idle_irq; unsigned long rw_reqbuf; }; struct ata_ering_entry { unsigned int eflags; unsigned int err_mask; u64 timestamp; }; struct ata_ering { int cursor; struct ata_ering_entry ring[ATA_ERING_SIZE]; }; struct ata_device { struct ata_link *link; unsigned int devno; /* 0 or 1 */ unsigned int horkage; /* List of broken features */ unsigned long flags; /* ATA_DFLAG_xxx */ struct scsi_device *sdev; /* attached SCSI device */ void *private_data; #ifdef CONFIG_ATA_ACPI union acpi_object *gtf_cache; unsigned int gtf_filter; #endif #ifdef CONFIG_SATA_ZPODD void *zpodd; #endif struct device tdev; /* n_sector is CLEAR_BEGIN, read comment above CLEAR_BEGIN */ u64 n_sectors; /* size of device, if ATA */ u64 n_native_sectors; /* native size, if ATA */ unsigned int class; /* ATA_DEV_xxx */ unsigned long unpark_deadline; u8 pio_mode; u8 dma_mode; u8 xfer_mode; unsigned int xfer_shift; /* ATA_SHIFT_xxx */ unsigned int multi_count; /* sectors count for READ/WRITE MULTIPLE */ unsigned int max_sectors; /* per-device max sectors */ unsigned int cdb_len; /* per-dev xfer mask */ unsigned long pio_mask; unsigned long mwdma_mask; unsigned long udma_mask; /* for CHS addressing */ u16 cylinders; /* Number of cylinders */ u16 heads; /* Number of heads */ u16 sectors; /* Number of sectors per track */ union { u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */ u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */ } ____cacheline_aligned; /* DEVSLP Timing Variables from Identify Device Data Log */ u8 devslp_timing[ATA_LOG_DEVSLP_SIZE]; /* NCQ send and receive log subcommand support */ u8 ncq_send_recv_cmds[ATA_LOG_NCQ_SEND_RECV_SIZE]; u8 ncq_non_data_cmds[ATA_LOG_NCQ_NON_DATA_SIZE]; /* ZAC zone configuration */ u32 zac_zoned_cap; u32 zac_zones_optimal_open; u32 zac_zones_optimal_nonseq; u32 zac_zones_max_open; /* error history */ int spdn_cnt; /* ering is CLEAR_END, read comment above CLEAR_END */ struct ata_ering ering; }; /* Fields between ATA_DEVICE_CLEAR_BEGIN and ATA_DEVICE_CLEAR_END are * cleared to zero on ata_dev_init(). */ #define ATA_DEVICE_CLEAR_BEGIN offsetof(struct ata_device, n_sectors) #define ATA_DEVICE_CLEAR_END offsetof(struct ata_device, ering) struct ata_eh_info { struct ata_device *dev; /* offending device */ u32 serror; /* SError from LLDD */ unsigned int err_mask; /* port-wide err_mask */ unsigned int action; /* ATA_EH_* action mask */ unsigned int dev_action[ATA_MAX_DEVICES]; /* dev EH action */ unsigned int flags; /* ATA_EHI_* flags */ unsigned int probe_mask; char desc[ATA_EH_DESC_LEN]; int desc_len; }; struct ata_eh_context { struct ata_eh_info i; int tries[ATA_MAX_DEVICES]; int cmd_timeout_idx[ATA_MAX_DEVICES] [ATA_EH_CMD_TIMEOUT_TABLE_SIZE]; unsigned int classes[ATA_MAX_DEVICES]; unsigned int did_probe_mask; unsigned int unloaded_mask; unsigned int saved_ncq_enabled; u8 saved_xfer_mode[ATA_MAX_DEVICES]; /* timestamp for the last reset attempt or success */ unsigned long last_reset; }; struct ata_acpi_drive { u32 pio; u32 dma; } __packed; struct ata_acpi_gtm { struct ata_acpi_drive drive[2]; u32 flags; } __packed; struct ata_link { struct ata_port *ap; int pmp; /* port multiplier port # */ struct device tdev; unsigned int active_tag; /* active tag on this link */ u32 sactive; /* active NCQ commands */ unsigned int flags; /* ATA_LFLAG_xxx */ u32 saved_scontrol; /* SControl on probe */ unsigned int hw_sata_spd_limit; unsigned int sata_spd_limit; unsigned int sata_spd; /* current SATA PHY speed */ enum ata_lpm_policy lpm_policy; /* record runtime error info, protected by host_set lock */ struct ata_eh_info eh_info; /* EH context */ struct ata_eh_context eh_context; struct ata_device device[ATA_MAX_DEVICES]; unsigned long last_lpm_change; /* when last LPM change happened */ }; #define ATA_LINK_CLEAR_BEGIN offsetof(struct ata_link, active_tag) #define ATA_LINK_CLEAR_END offsetof(struct ata_link, device[0]) struct ata_port { struct Scsi_Host *scsi_host; /* our co-allocated scsi host */ struct ata_port_operations *ops; spinlock_t *lock; /* Flags owned by the EH context. Only EH should touch these once the port is active */ unsigned long flags; /* ATA_FLAG_xxx */ /* Flags that change dynamically, protected by ap->lock */ unsigned int pflags; /* ATA_PFLAG_xxx */ unsigned int print_id; /* user visible unique port ID */ unsigned int local_port_no; /* host local port num */ unsigned int port_no; /* 0 based port no. inside the host */ #ifdef CONFIG_ATA_SFF struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */ u8 ctl; /* cache of ATA control register */ u8 last_ctl; /* Cache last written value */ struct ata_link* sff_pio_task_link; /* link currently used */ struct delayed_work sff_pio_task; #ifdef CONFIG_ATA_BMDMA struct ata_bmdma_prd *bmdma_prd; /* BMDMA SG list */ dma_addr_t bmdma_prd_dma; /* and its DMA mapping */ #endif /* CONFIG_ATA_BMDMA */ #endif /* CONFIG_ATA_SFF */ unsigned int pio_mask; unsigned int mwdma_mask; unsigned int udma_mask; unsigned int cbl; /* cable type; ATA_CBL_xxx */ struct ata_queued_cmd qcmd[ATA_MAX_QUEUE + 1]; unsigned long sas_tag_allocated; /* for sas tag allocation only */ u64 qc_active; int nr_active_links; /* #links with active qcs */ unsigned int sas_last_tag; /* track next tag hw expects */ struct ata_link link; /* host default link */ struct ata_link *slave_link; /* see ata_slave_link_init() */ int nr_pmp_links; /* nr of available PMP links */ struct ata_link *pmp_link; /* array of PMP links */ struct ata_link *excl_link; /* for PMP qc exclusion */ struct ata_port_stats stats; struct ata_host *host; struct device *dev; struct device tdev; struct mutex scsi_scan_mutex; struct delayed_work hotplug_task; struct work_struct scsi_rescan_task; unsigned int hsm_task_state; u32 msg_enable; struct list_head eh_done_q; wait_queue_head_t eh_wait_q; int eh_tries; struct completion park_req_pending; pm_message_t pm_mesg; enum ata_lpm_policy target_lpm_policy; struct timer_list fastdrain_timer; unsigned long fastdrain_cnt; async_cookie_t cookie; int em_message_type; void *private_data; #ifdef CONFIG_ATA_ACPI struct ata_acpi_gtm __acpi_init_gtm; /* use ata_acpi_init_gtm() */ #endif /* owned by EH */ u8 sector_buf[ATA_SECT_SIZE] ____cacheline_aligned; }; /* The following initializer overrides a method to NULL whether one of * its parent has the method defined or not. This is equivalent to * ERR_PTR(-ENOENT). Unfortunately, ERR_PTR doesn't render a constant * expression and thus can't be used as an initializer. */ #define ATA_OP_NULL (void *)(unsigned long)(-ENOENT) struct ata_port_operations { /* * Command execution */ int (*qc_defer)(struct ata_queued_cmd *qc); int (*check_atapi_dma)(struct ata_queued_cmd *qc); enum ata_completion_errors (*qc_prep)(struct ata_queued_cmd *qc); unsigned int (*qc_issue)(struct ata_queued_cmd *qc); bool (*qc_fill_rtf)(struct ata_queued_cmd *qc); /* * Configuration and exception handling */ int (*cable_detect)(struct ata_port *ap); unsigned long (*mode_filter)(struct ata_device *dev, unsigned long xfer_mask); void (*set_piomode)(struct ata_port *ap, struct ata_device *dev); void (*set_dmamode)(struct ata_port *ap, struct ata_device *dev); int (*set_mode)(struct ata_link *link, struct ata_device **r_failed_dev); unsigned int (*read_id)(struct ata_device *dev, struct ata_taskfile *tf, u16 *id); void (*dev_config)(struct ata_device *dev); void (*freeze)(struct ata_port *ap); void (*thaw)(struct ata_port *ap); ata_prereset_fn_t prereset; ata_reset_fn_t softreset; ata_reset_fn_t hardreset; ata_postreset_fn_t postreset; ata_prereset_fn_t pmp_prereset; ata_reset_fn_t pmp_softreset; ata_reset_fn_t pmp_hardreset; ata_postreset_fn_t pmp_postreset; void (*error_handler)(struct ata_port *ap); void (*lost_interrupt)(struct ata_port *ap); void (*post_internal_cmd)(struct ata_queued_cmd *qc); void (*sched_eh)(struct ata_port *ap); void (*end_eh)(struct ata_port *ap); /* * Optional features */ int (*scr_read)(struct ata_link *link, unsigned int sc_reg, u32 *val); int (*scr_write)(struct ata_link *link, unsigned int sc_reg, u32 val); void (*pmp_attach)(struct ata_port *ap); void (*pmp_detach)(struct ata_port *ap); int (*set_lpm)(struct ata_link *link, enum ata_lpm_policy policy, unsigned hints); /* * Start, stop, suspend and resume */ int (*port_suspend)(struct ata_port *ap, pm_message_t mesg); int (*port_resume)(struct ata_port *ap); int (*port_start)(struct ata_port *ap); void (*port_stop)(struct ata_port *ap); void (*host_stop)(struct ata_host *host); #ifdef CONFIG_ATA_SFF /* * SFF / taskfile oriented ops */ void (*sff_dev_select)(struct ata_port *ap, unsigned int device); void (*sff_set_devctl)(struct ata_port *ap, u8 ctl); u8 (*sff_check_status)(struct ata_port *ap); u8 (*sff_check_altstatus)(struct ata_port *ap); void (*sff_tf_load)(struct ata_port *ap, const struct ata_taskfile *tf); void (*sff_tf_read)(struct ata_port *ap, struct ata_taskfile *tf); void (*sff_exec_command)(struct ata_port *ap, const struct ata_taskfile *tf); unsigned int (*sff_data_xfer)(struct ata_queued_cmd *qc, unsigned char *buf, unsigned int buflen, int rw); void (*sff_irq_on)(struct ata_port *); bool (*sff_irq_check)(struct ata_port *); void (*sff_irq_clear)(struct ata_port *); void (*sff_drain_fifo)(struct ata_queued_cmd *qc); #ifdef CONFIG_ATA_BMDMA void (*bmdma_setup)(struct ata_queued_cmd *qc); void (*bmdma_start)(struct ata_queued_cmd *qc); void (*bmdma_stop)(struct ata_queued_cmd *qc); u8 (*bmdma_status)(struct ata_port *ap); #endif /* CONFIG_ATA_BMDMA */ #endif /* CONFIG_ATA_SFF */ ssize_t (*em_show)(struct ata_port *ap, char *buf); ssize_t (*em_store)(struct ata_port *ap, const char *message, size_t size); ssize_t (*sw_activity_show)(struct ata_device *dev, char *buf); ssize_t (*sw_activity_store)(struct ata_device *dev, enum sw_activity val); ssize_t (*transmit_led_message)(struct ata_port *ap, u32 state, ssize_t size); /* * Obsolete */ void (*phy_reset)(struct ata_port *ap); void (*eng_timeout)(struct ata_port *ap); /* * ->inherits must be the last field and all the preceding * fields must be pointers. */ const struct ata_port_operations *inherits; }; struct ata_port_info { unsigned long flags; unsigned long link_flags; unsigned long pio_mask; unsigned long mwdma_mask; unsigned long udma_mask; struct ata_port_operations *port_ops; void *private_data; }; struct ata_timing { unsigned short mode; /* ATA mode */ unsigned short setup; /* t1 */ unsigned short act8b; /* t2 for 8-bit I/O */ unsigned short rec8b; /* t2i for 8-bit I/O */ unsigned short cyc8b; /* t0 for 8-bit I/O */ unsigned short active; /* t2 or tD */ unsigned short recover; /* t2i or tK */ unsigned short dmack_hold; /* tj */ unsigned short cycle; /* t0 */ unsigned short udma; /* t2CYCTYP/2 */ }; /* * Core layer - drivers/ata/libata-core.c */ extern const unsigned long sata_deb_timing_normal[]; extern const unsigned long sata_deb_timing_hotplug[]; extern const unsigned long sata_deb_timing_long[]; extern struct ata_port_operations ata_dummy_port_ops; extern const struct ata_port_info ata_dummy_port_info; static inline bool ata_is_atapi(u8 prot) { return prot & ATA_PROT_FLAG_ATAPI; } static inline bool ata_is_pio(u8 prot) { return prot & ATA_PROT_FLAG_PIO; } static inline bool ata_is_dma(u8 prot) { return prot & ATA_PROT_FLAG_DMA; } static inline bool ata_is_ncq(u8 prot) { return prot & ATA_PROT_FLAG_NCQ; } static inline bool ata_is_data(u8 prot) { return prot & (ATA_PROT_FLAG_PIO | ATA_PROT_FLAG_DMA); } static inline int is_multi_taskfile(struct ata_taskfile *tf) { return (tf->command == ATA_CMD_READ_MULTI) || (tf->command == ATA_CMD_WRITE_MULTI) || (tf->command == ATA_CMD_READ_MULTI_EXT) || (tf->command == ATA_CMD_WRITE_MULTI_EXT) || (tf->command == ATA_CMD_WRITE_MULTI_FUA_EXT); } static inline const unsigned long * sata_ehc_deb_timing(struct ata_eh_context *ehc) { if (ehc->i.flags & ATA_EHI_HOTPLUGGED) return sata_deb_timing_hotplug; else return sata_deb_timing_normal; } static inline int ata_port_is_dummy(struct ata_port *ap) { return ap->ops == &ata_dummy_port_ops; } extern int sata_set_spd(struct ata_link *link); extern int ata_std_prereset(struct ata_link *link, unsigned long deadline); extern int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, int (*check_ready)(struct ata_link *link)); extern int sata_link_debounce(struct ata_link *link, const unsigned long *params, unsigned long deadline); extern int sata_link_resume(struct ata_link *link, const unsigned long *params, unsigned long deadline); extern int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy, bool spm_wakeup); extern int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, unsigned long deadline, bool *online, int (*check_ready)(struct ata_link *)); extern int sata_std_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline); extern void ata_std_postreset(struct ata_link *link, unsigned int *classes); extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports); extern struct ata_host *ata_host_alloc_pinfo(struct device *dev, const struct ata_port_info * const * ppi, int n_ports); extern int ata_slave_link_init(struct ata_port *ap); extern void ata_host_get(struct ata_host *host); extern void ata_host_put(struct ata_host *host); extern int ata_host_start(struct ata_host *host); extern int ata_host_register(struct ata_host *host, struct scsi_host_template *sht); extern int ata_host_activate(struct ata_host *host, int irq, irq_handler_t irq_handler, unsigned long irq_flags, struct scsi_host_template *sht); extern void ata_host_detach(struct ata_host *host); extern void ata_host_init(struct ata_host *, struct device *, struct ata_port_operations *); extern int ata_scsi_detect(struct scsi_host_template *sht); extern int ata_scsi_ioctl(struct scsi_device *dev, unsigned int cmd, void __user *arg); extern int ata_scsi_queuecmd(struct Scsi_Host *h, struct scsi_cmnd *cmd); extern int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *dev, unsigned int cmd, void __user *arg); extern void ata_sas_port_destroy(struct ata_port *); extern struct ata_port *ata_sas_port_alloc(struct ata_host *, struct ata_port_info *, struct Scsi_Host *); extern void ata_sas_async_probe(struct ata_port *ap); extern int ata_sas_sync_probe(struct ata_port *ap); extern int ata_sas_port_init(struct ata_port *); extern int ata_sas_port_start(struct ata_port *ap); extern int ata_sas_tport_add(struct device *parent, struct ata_port *ap); extern void ata_sas_tport_delete(struct ata_port *ap); extern void ata_sas_port_stop(struct ata_port *ap); extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *); extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap); extern int sata_scr_valid(struct ata_link *link); extern int sata_scr_read(struct ata_link *link, int reg, u32 *val); extern int sata_scr_write(struct ata_link *link, int reg, u32 val); extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val); extern bool ata_link_online(struct ata_link *link); extern bool ata_link_offline(struct ata_link *link); #ifdef CONFIG_PM extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg); extern void ata_host_resume(struct ata_host *host); extern void ata_sas_port_suspend(struct ata_port *ap); extern void ata_sas_port_resume(struct ata_port *ap); #else static inline void ata_sas_port_suspend(struct ata_port *ap) { } static inline void ata_sas_port_resume(struct ata_port *ap) { } #endif extern int ata_ratelimit(void); extern void ata_msleep(struct ata_port *ap, unsigned int msecs); extern u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val, unsigned long interval, unsigned long timeout); extern int atapi_cmd_type(u8 opcode); extern void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis); extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf); extern unsigned long ata_pack_xfermask(unsigned long pio_mask, unsigned long mwdma_mask, unsigned long udma_mask); extern void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask, unsigned long *mwdma_mask, unsigned long *udma_mask); extern u8 ata_xfer_mask2mode(unsigned long xfer_mask); extern unsigned long ata_xfer_mode2mask(u8 xfer_mode); extern int ata_xfer_mode2shift(unsigned long xfer_mode); extern const char *ata_mode_string(unsigned long xfer_mask); extern unsigned long ata_id_xfermask(const u16 *id); extern int ata_std_qc_defer(struct ata_queued_cmd *qc); extern enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc); extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, unsigned int n_elem); extern unsigned int ata_dev_classify(const struct ata_taskfile *tf); extern void ata_dev_disable(struct ata_device *adev); extern void ata_id_string(const u16 *id, unsigned char *s, unsigned int ofs, unsigned int len); extern void ata_id_c_string(const u16 *id, unsigned char *s, unsigned int ofs, unsigned int len); extern unsigned int ata_do_dev_read_id(struct ata_device *dev, struct ata_taskfile *tf, u16 *id); extern void ata_qc_complete(struct ata_queued_cmd *qc); extern int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active); extern u64 ata_qc_get_active(struct ata_port *ap); extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd); extern int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]); extern void ata_scsi_unlock_native_capacity(struct scsi_device *sdev); extern int ata_scsi_slave_config(struct scsi_device *sdev); extern void ata_scsi_slave_destroy(struct scsi_device *sdev); extern int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth); extern int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev, int queue_depth); extern struct ata_device *ata_dev_pair(struct ata_device *adev); extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev); extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap); extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, struct list_head *eh_q); extern bool sata_lpm_ignore_phy_events(struct ata_link *link); extern int ata_cable_40wire(struct ata_port *ap); extern int ata_cable_80wire(struct ata_port *ap); extern int ata_cable_sata(struct ata_port *ap); extern int ata_cable_ignore(struct ata_port *ap); extern int ata_cable_unknown(struct ata_port *ap); /* Timing helpers */ extern unsigned int ata_pio_need_iordy(const struct ata_device *); extern const struct ata_timing *ata_timing_find_mode(u8 xfer_mode); extern int ata_timing_compute(struct ata_device *, unsigned short, struct ata_timing *, int, int); extern void ata_timing_merge(const struct ata_timing *, const struct ata_timing *, struct ata_timing *, unsigned int); extern u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle); /* PCI */ #ifdef CONFIG_PCI struct pci_dev; struct pci_bits { unsigned int reg; /* PCI config register to read */ unsigned int width; /* 1 (8 bit), 2 (16 bit), 4 (32 bit) */ unsigned long mask; unsigned long val; }; extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits); extern void ata_pci_shutdown_one(struct pci_dev *pdev); extern void ata_pci_remove_one(struct pci_dev *pdev); #ifdef CONFIG_PM extern void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg); extern int __must_check ata_pci_device_do_resume(struct pci_dev *pdev); extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); extern int ata_pci_device_resume(struct pci_dev *pdev); #endif /* CONFIG_PM */ #endif /* CONFIG_PCI */ struct platform_device; extern int ata_platform_remove_one(struct platform_device *pdev); /* * ACPI - drivers/ata/libata-acpi.c */ #ifdef CONFIG_ATA_ACPI static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap) { if (ap->pflags & ATA_PFLAG_INIT_GTM_VALID) return &ap->__acpi_init_gtm; return NULL; } int ata_acpi_stm(struct ata_port *ap, const struct ata_acpi_gtm *stm); int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *stm); unsigned long ata_acpi_gtm_xfermask(struct ata_device *dev, const struct ata_acpi_gtm *gtm); int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm); #else static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap) { return NULL; } static inline int ata_acpi_stm(const struct ata_port *ap, struct ata_acpi_gtm *stm) { return -ENOSYS; } static inline int ata_acpi_gtm(const struct ata_port *ap, struct ata_acpi_gtm *stm) { return -ENOSYS; } static inline unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev, const struct ata_acpi_gtm *gtm) { return 0; } static inline int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm) { return 0; } #endif /* * EH - drivers/ata/libata-eh.c */ extern void ata_port_schedule_eh(struct ata_port *ap); extern void ata_port_wait_eh(struct ata_port *ap); extern int ata_link_abort(struct ata_link *link); extern int ata_port_abort(struct ata_port *ap); extern int ata_port_freeze(struct ata_port *ap); extern int sata_async_notification(struct ata_port *ap); extern void ata_eh_freeze_port(struct ata_port *ap); extern void ata_eh_thaw_port(struct ata_port *ap); extern void ata_eh_qc_complete(struct ata_queued_cmd *qc); extern void ata_eh_qc_retry(struct ata_queued_cmd *qc); extern void ata_eh_analyze_ncq_error(struct ata_link *link); extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, ata_reset_fn_t softreset, ata_reset_fn_t hardreset, ata_postreset_fn_t postreset); extern void ata_std_error_handler(struct ata_port *ap); extern void ata_std_sched_eh(struct ata_port *ap); extern void ata_std_end_eh(struct ata_port *ap); extern int ata_link_nr_enabled(struct ata_link *link); /* * Base operations to inherit from and initializers for sht * * Operations * * base : Common to all libata drivers. * sata : SATA controllers w/ native interface. * pmp : SATA controllers w/ PMP support. * sff : SFF ATA controllers w/o BMDMA support. * bmdma : SFF ATA controllers w/ BMDMA support. * * sht initializers * * BASE : Common to all libata drivers. The user must set * sg_tablesize and dma_boundary. * PIO : SFF ATA controllers w/ only PIO support. * BMDMA : SFF ATA controllers w/ BMDMA support. sg_tablesize and * dma_boundary are set to BMDMA limits. * NCQ : SATA controllers supporting NCQ. The user must set * sg_tablesize, dma_boundary and can_queue. */ extern const struct ata_port_operations ata_base_port_ops; extern const struct ata_port_operations sata_port_ops; extern struct device_attribute *ata_common_sdev_attrs[]; /* * All sht initializers (BASE, PIO, BMDMA, NCQ) must be instantiated * by the edge drivers. Because the 'module' field of sht must be the * edge driver's module reference, otherwise the driver can be unloaded * even if the scsi_device is being accessed. */ #define ATA_BASE_SHT(drv_name) \ .module = THIS_MODULE, \ .name = drv_name, \ .ioctl = ata_scsi_ioctl, \ .queuecommand = ata_scsi_queuecmd, \ .can_queue = ATA_DEF_QUEUE, \ .tag_alloc_policy = BLK_TAG_ALLOC_RR, \ .this_id = ATA_SHT_THIS_ID, \ .emulated = ATA_SHT_EMULATED, \ .proc_name = drv_name, \ .slave_configure = ata_scsi_slave_config, \ .slave_destroy = ata_scsi_slave_destroy, \ .bios_param = ata_std_bios_param, \ .unlock_native_capacity = ata_scsi_unlock_native_capacity, \ .sdev_attrs = ata_common_sdev_attrs #define ATA_NCQ_SHT(drv_name) \ ATA_BASE_SHT(drv_name), \ .change_queue_depth = ata_scsi_change_queue_depth /* * PMP helpers */ #ifdef CONFIG_SATA_PMP static inline bool sata_pmp_supported(struct ata_port *ap) { return ap->flags & ATA_FLAG_PMP; } static inline bool sata_pmp_attached(struct ata_port *ap) { return ap->nr_pmp_links != 0; } static inline bool ata_is_host_link(const struct ata_link *link) { return link == &link->ap->link || link == link->ap->slave_link; } #else /* CONFIG_SATA_PMP */ static inline bool sata_pmp_supported(struct ata_port *ap) { return false; } static inline bool sata_pmp_attached(struct ata_port *ap) { return false; } static inline bool ata_is_host_link(const struct ata_link *link) { return 1; } #endif /* CONFIG_SATA_PMP */ static inline int sata_srst_pmp(struct ata_link *link) { if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) return SATA_PMP_CTRL_PORT; return link->pmp; } /* * printk helpers */ __printf(3, 4) void ata_port_printk(const struct ata_port *ap, const char *level, const char *fmt, ...); __printf(3, 4) void ata_link_printk(const struct ata_link *link, const char *level, const char *fmt, ...); __printf(3, 4) void ata_dev_printk(const struct ata_device *dev, const char *level, const char *fmt, ...); #define ata_port_err(ap, fmt, ...) \ ata_port_printk(ap, KERN_ERR, fmt, ##__VA_ARGS__) #define ata_port_warn(ap, fmt, ...) \ ata_port_printk(ap, KERN_WARNING, fmt, ##__VA_ARGS__) #define ata_port_notice(ap, fmt, ...) \ ata_port_printk(ap, KERN_NOTICE, fmt, ##__VA_ARGS__) #define ata_port_info(ap, fmt, ...) \ ata_port_printk(ap, KERN_INFO, fmt, ##__VA_ARGS__) #define ata_port_dbg(ap, fmt, ...) \ ata_port_printk(ap, KERN_DEBUG, fmt, ##__VA_ARGS__) #define ata_link_err(link, fmt, ...) \ ata_link_printk(link, KERN_ERR, fmt, ##__VA_ARGS__) #define ata_link_warn(link, fmt, ...) \ ata_link_printk(link, KERN_WARNING, fmt, ##__VA_ARGS__) #define ata_link_notice(link, fmt, ...) \ ata_link_printk(link, KERN_NOTICE, fmt, ##__VA_ARGS__) #define ata_link_info(link, fmt, ...) \ ata_link_printk(link, KERN_INFO, fmt, ##__VA_ARGS__) #define ata_link_dbg(link, fmt, ...) \ ata_link_printk(link, KERN_DEBUG, fmt, ##__VA_ARGS__) #define ata_dev_err(dev, fmt, ...) \ ata_dev_printk(dev, KERN_ERR, fmt, ##__VA_ARGS__) #define ata_dev_warn(dev, fmt, ...) \ ata_dev_printk(dev, KERN_WARNING, fmt, ##__VA_ARGS__) #define ata_dev_notice(dev, fmt, ...) \ ata_dev_printk(dev, KERN_NOTICE, fmt, ##__VA_ARGS__) #define ata_dev_info(dev, fmt, ...) \ ata_dev_printk(dev, KERN_INFO, fmt, ##__VA_ARGS__) #define ata_dev_dbg(dev, fmt, ...) \ ata_dev_printk(dev, KERN_DEBUG, fmt, ##__VA_ARGS__) void ata_print_version(const struct device *dev, const char *version); /* * ata_eh_info helpers */ extern __printf(2, 3) void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...); extern __printf(2, 3) void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...); extern void ata_ehi_clear_desc(struct ata_eh_info *ehi); static inline void ata_ehi_hotplugged(struct ata_eh_info *ehi) { ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1; ehi->flags |= ATA_EHI_HOTPLUGGED; ehi->action |= ATA_EH_RESET | ATA_EH_ENABLE_LINK; ehi->err_mask |= AC_ERR_ATA_BUS; } /* * port description helpers */ extern __printf(2, 3) void ata_port_desc(struct ata_port *ap, const char *fmt, ...); #ifdef CONFIG_PCI extern void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset, const char *name); #endif static inline bool ata_tag_internal(unsigned int tag) { return tag == ATA_TAG_INTERNAL; } static inline bool ata_tag_valid(unsigned int tag) { return tag < ATA_MAX_QUEUE || ata_tag_internal(tag); } #define __ata_qc_for_each(ap, qc, tag, max_tag, fn) \ for ((tag) = 0; (tag) < (max_tag) && \ ({ qc = fn((ap), (tag)); 1; }); (tag)++) \ /* * Internal use only, iterate commands ignoring error handling and * status of 'qc'. */ #define ata_qc_for_each_raw(ap, qc, tag) \ __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE, __ata_qc_from_tag) /* * Iterate all potential commands that can be queued */ #define ata_qc_for_each(ap, qc, tag) \ __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE, ata_qc_from_tag) /* * Like ata_qc_for_each, but with the internal tag included */ #define ata_qc_for_each_with_internal(ap, qc, tag) \ __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE + 1, ata_qc_from_tag) /* * device helpers */ static inline unsigned int ata_class_enabled(unsigned int class) { return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI || class == ATA_DEV_PMP || class == ATA_DEV_SEMB || class == ATA_DEV_ZAC; } static inline unsigned int ata_class_disabled(unsigned int class) { return class == ATA_DEV_ATA_UNSUP || class == ATA_DEV_ATAPI_UNSUP || class == ATA_DEV_PMP_UNSUP || class == ATA_DEV_SEMB_UNSUP || class == ATA_DEV_ZAC_UNSUP; } static inline unsigned int ata_class_absent(unsigned int class) { return !ata_class_enabled(class) && !ata_class_disabled(class); } static inline unsigned int ata_dev_enabled(const struct ata_device *dev) { return ata_class_enabled(dev->class); } static inline unsigned int ata_dev_disabled(const struct ata_device *dev) { return ata_class_disabled(dev->class); } static inline unsigned int ata_dev_absent(const struct ata_device *dev) { return ata_class_absent(dev->class); } /* * link helpers */ static inline int ata_link_max_devices(const struct ata_link *link) { if (ata_is_host_link(link) && link->ap->flags & ATA_FLAG_SLAVE_POSS) return 2; return 1; } static inline int ata_link_active(struct ata_link *link) { return ata_tag_valid(link->active_tag) || link->sactive; } /* * Iterators * * ATA_LITER_* constants are used to select link iteration mode and * ATA_DITER_* device iteration mode. * * For a custom iteration directly using ata_{link|dev}_next(), if * @link or @dev, respectively, is NULL, the first element is * returned. @dev and @link can be any valid device or link and the * next element according to the iteration mode will be returned. * After the last element, NULL is returned. */ enum ata_link_iter_mode { ATA_LITER_EDGE, /* if present, PMP links only; otherwise, * host link. no slave link */ ATA_LITER_HOST_FIRST, /* host link followed by PMP or slave links */ ATA_LITER_PMP_FIRST, /* PMP links followed by host link, * slave link still comes after host link */ }; enum ata_dev_iter_mode { ATA_DITER_ENABLED, ATA_DITER_ENABLED_REVERSE, ATA_DITER_ALL, ATA_DITER_ALL_REVERSE, }; extern struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap, enum ata_link_iter_mode mode); extern struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link, enum ata_dev_iter_mode mode); /* * Shortcut notation for iterations * * ata_for_each_link() iterates over each link of @ap according to * @mode. @link points to the current link in the loop. @link is * NULL after loop termination. ata_for_each_dev() works the same way * except that it iterates over each device of @link. * * Note that the mode prefixes ATA_{L|D}ITER_ shouldn't need to be * specified when using the following shorthand notations. Only the * mode itself (EDGE, HOST_FIRST, ENABLED, etc...) should be * specified. This not only increases brevity but also makes it * impossible to use ATA_LITER_* for device iteration or vice-versa. */ #define ata_for_each_link(link, ap, mode) \ for ((link) = ata_link_next(NULL, (ap), ATA_LITER_##mode); (link); \ (link) = ata_link_next((link), (ap), ATA_LITER_##mode)) #define ata_for_each_dev(dev, link, mode) \ for ((dev) = ata_dev_next(NULL, (link), ATA_DITER_##mode); (dev); \ (dev) = ata_dev_next((dev), (link), ATA_DITER_##mode)) /** * ata_ncq_enabled - Test whether NCQ is enabled * @dev: ATA device to test for * * LOCKING: * spin_lock_irqsave(host lock) * * RETURNS: * 1 if NCQ is enabled for @dev, 0 otherwise. */ static inline int ata_ncq_enabled(struct ata_device *dev) { return (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF | ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ; } static inline bool ata_fpdma_dsm_supported(struct ata_device *dev) { return (dev->flags & ATA_DFLAG_NCQ_SEND_RECV) && (dev->ncq_send_recv_cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] & ATA_LOG_NCQ_SEND_RECV_DSM_TRIM); } static inline bool ata_fpdma_read_log_supported(struct ata_device *dev) { return (dev->flags & ATA_DFLAG_NCQ_SEND_RECV) && (dev->ncq_send_recv_cmds[ATA_LOG_NCQ_SEND_RECV_RD_LOG_OFFSET] & ATA_LOG_NCQ_SEND_RECV_RD_LOG_SUPPORTED); } static inline bool ata_fpdma_zac_mgmt_in_supported(struct ata_device *dev) { return (dev->flags & ATA_DFLAG_NCQ_SEND_RECV) && (dev->ncq_send_recv_cmds[ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_OFFSET] & ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_IN_SUPPORTED); } static inline bool ata_fpdma_zac_mgmt_out_supported(struct ata_device *dev) { return (dev->ncq_non_data_cmds[ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OFFSET] & ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OUT); } static inline void ata_qc_set_polling(struct ata_queued_cmd *qc) { qc->tf.ctl |= ATA_NIEN; } static inline struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap, unsigned int tag) { if (ata_tag_valid(tag)) return &ap->qcmd[tag]; return NULL; } static inline struct ata_queued_cmd *ata_qc_from_tag(struct ata_port *ap, unsigned int tag) { struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); if (unlikely(!qc) || !ap->ops->error_handler) return qc; if ((qc->flags & (ATA_QCFLAG_ACTIVE | ATA_QCFLAG_FAILED)) == ATA_QCFLAG_ACTIVE) return qc; return NULL; } static inline unsigned int ata_qc_raw_nbytes(struct ata_queued_cmd *qc) { return qc->nbytes - min(qc->extrabytes, qc->nbytes); } static inline void ata_tf_init(struct ata_device *dev, struct ata_taskfile *tf) { memset(tf, 0, sizeof(*tf)); #ifdef CONFIG_ATA_SFF tf->ctl = dev->link->ap->ctl; #else tf->ctl = ATA_DEVCTL_OBS; #endif if (dev->devno == 0) tf->device = ATA_DEVICE_OBS; else tf->device = ATA_DEVICE_OBS | ATA_DEV1; } static inline void ata_qc_reinit(struct ata_queued_cmd *qc) { qc->dma_dir = DMA_NONE; qc->sg = NULL; qc->flags = 0; qc->cursg = NULL; qc->cursg_ofs = 0; qc->nbytes = qc->extrabytes = qc->curbytes = 0; qc->n_elem = 0; qc->err_mask = 0; qc->sect_size = ATA_SECT_SIZE; ata_tf_init(qc->dev, &qc->tf); /* init result_tf such that it indicates normal completion */ qc->result_tf.command = ATA_DRDY; qc->result_tf.feature = 0; } static inline int ata_try_flush_cache(const struct ata_device *dev) { return ata_id_wcache_enabled(dev->id) || ata_id_has_flush(dev->id) || ata_id_has_flush_ext(dev->id); } static inline unsigned int ac_err_mask(u8 status) { if (status & (ATA_BUSY | ATA_DRQ)) return AC_ERR_HSM; if (status & (ATA_ERR | ATA_DF)) return AC_ERR_DEV; return 0; } static inline unsigned int __ac_err_mask(u8 status) { unsigned int mask = ac_err_mask(status); if (mask == 0) return AC_ERR_OTHER; return mask; } static inline struct ata_port *ata_shost_to_port(struct Scsi_Host *host) { return *(struct ata_port **)&host->hostdata[0]; } static inline int ata_check_ready(u8 status) { if (!(status & ATA_BUSY)) return 1; /* 0xff indicates either no device or device not ready */ if (status == 0xff) return -ENODEV; return 0; } static inline unsigned long ata_deadline(unsigned long from_jiffies, unsigned long timeout_msecs) { return from_jiffies + msecs_to_jiffies(timeout_msecs); } /* Don't open code these in drivers as there are traps. Firstly the range may change in future hardware and specs, secondly 0xFF means 'no DMA' but is > UDMA_0. Dyma ddreigiau */ static inline int ata_using_mwdma(struct ata_device *adev) { if (adev->dma_mode >= XFER_MW_DMA_0 && adev->dma_mode <= XFER_MW_DMA_4) return 1; return 0; } static inline int ata_using_udma(struct ata_device *adev) { if (adev->dma_mode >= XFER_UDMA_0 && adev->dma_mode <= XFER_UDMA_7) return 1; return 0; } static inline int ata_dma_enabled(struct ata_device *adev) { return (adev->dma_mode == 0xFF ? 0 : 1); } /************************************************************************** * PMP - drivers/ata/libata-pmp.c */ #ifdef CONFIG_SATA_PMP extern const struct ata_port_operations sata_pmp_port_ops; extern int sata_pmp_qc_defer_cmd_switch(struct ata_queued_cmd *qc); extern void sata_pmp_error_handler(struct ata_port *ap); #else /* CONFIG_SATA_PMP */ #define sata_pmp_port_ops sata_port_ops #define sata_pmp_qc_defer_cmd_switch ata_std_qc_defer #define sata_pmp_error_handler ata_std_error_handler #endif /* CONFIG_SATA_PMP */ /************************************************************************** * SFF - drivers/ata/libata-sff.c */ #ifdef CONFIG_ATA_SFF extern const struct ata_port_operations ata_sff_port_ops; extern const struct ata_port_operations ata_bmdma32_port_ops; /* PIO only, sg_tablesize and dma_boundary limits can be removed */ #define ATA_PIO_SHT(drv_name) \ ATA_BASE_SHT(drv_name), \ .sg_tablesize = LIBATA_MAX_PRD, \ .dma_boundary = ATA_DMA_BOUNDARY extern void ata_sff_dev_select(struct ata_port *ap, unsigned int device); extern u8 ata_sff_check_status(struct ata_port *ap); extern void ata_sff_pause(struct ata_port *ap); extern void ata_sff_dma_pause(struct ata_port *ap); extern int ata_sff_busy_sleep(struct ata_port *ap, unsigned long timeout_pat, unsigned long timeout); extern int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline); extern void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf); extern void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf); extern void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf); extern unsigned int ata_sff_data_xfer(struct ata_queued_cmd *qc, unsigned char *buf, unsigned int buflen, int rw); extern unsigned int ata_sff_data_xfer32(struct ata_queued_cmd *qc, unsigned char *buf, unsigned int buflen, int rw); extern void ata_sff_irq_on(struct ata_port *ap); extern void ata_sff_irq_clear(struct ata_port *ap); extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, u8 status, int in_wq); extern void ata_sff_queue_work(struct work_struct *work); extern void ata_sff_queue_delayed_work(struct delayed_work *dwork, unsigned long delay); extern void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay); extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc); extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc); extern unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc); extern irqreturn_t ata_sff_interrupt(int irq, void *dev_instance); extern void ata_sff_lost_interrupt(struct ata_port *ap); extern void ata_sff_freeze(struct ata_port *ap); extern void ata_sff_thaw(struct ata_port *ap); extern int ata_sff_prereset(struct ata_link *link, unsigned long deadline); extern unsigned int ata_sff_dev_classify(struct ata_device *dev, int present, u8 *r_err); extern int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask, unsigned long deadline); extern int ata_sff_softreset(struct ata_link *link, unsigned int *classes, unsigned long deadline); extern int sata_sff_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline); extern void ata_sff_postreset(struct ata_link *link, unsigned int *classes); extern void ata_sff_drain_fifo(struct ata_queued_cmd *qc); extern void ata_sff_error_handler(struct ata_port *ap); extern void ata_sff_std_ports(struct ata_ioports *ioaddr); #ifdef CONFIG_PCI extern int ata_pci_sff_init_host(struct ata_host *host); extern int ata_pci_sff_prepare_host(struct pci_dev *pdev, const struct ata_port_info * const * ppi, struct ata_host **r_host); extern int ata_pci_sff_activate_host(struct ata_host *host, irq_handler_t irq_handler, struct scsi_host_template *sht); extern int ata_pci_sff_init_one(struct pci_dev *pdev, const struct ata_port_info * const * ppi, struct scsi_host_template *sht, void *host_priv, int hflags); #endif /* CONFIG_PCI */ #ifdef CONFIG_ATA_BMDMA extern const struct ata_port_operations ata_bmdma_port_ops; #define ATA_BMDMA_SHT(drv_name) \ ATA_BASE_SHT(drv_name), \ .sg_tablesize = LIBATA_MAX_PRD, \ .dma_boundary = ATA_DMA_BOUNDARY extern enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc); extern unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc); extern enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc); extern unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc); extern irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance); extern void ata_bmdma_error_handler(struct ata_port *ap); extern void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc); extern void ata_bmdma_irq_clear(struct ata_port *ap); extern void ata_bmdma_setup(struct ata_queued_cmd *qc); extern void ata_bmdma_start(struct ata_queued_cmd *qc); extern void ata_bmdma_stop(struct ata_queued_cmd *qc); extern u8 ata_bmdma_status(struct ata_port *ap); extern int ata_bmdma_port_start(struct ata_port *ap); extern int ata_bmdma_port_start32(struct ata_port *ap); #ifdef CONFIG_PCI extern int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev); extern void ata_pci_bmdma_init(struct ata_host *host); extern int ata_pci_bmdma_prepare_host(struct pci_dev *pdev, const struct ata_port_info * const * ppi, struct ata_host **r_host); extern int ata_pci_bmdma_init_one(struct pci_dev *pdev, const struct ata_port_info * const * ppi, struct scsi_host_template *sht, void *host_priv, int hflags); #endif /* CONFIG_PCI */ #endif /* CONFIG_ATA_BMDMA */ /** * ata_sff_busy_wait - Wait for a port status register * @ap: Port to wait for. * @bits: bits that must be clear * @max: number of 10uS waits to perform * * Waits up to max*10 microseconds for the selected bits in the port's * status register to be cleared. * Returns final value of status register. * * LOCKING: * Inherited from caller. */ static inline u8 ata_sff_busy_wait(struct ata_port *ap, unsigned int bits, unsigned int max) { u8 status; do { udelay(10); status = ap->ops->sff_check_status(ap); max--; } while (status != 0xff && (status & bits) && (max > 0)); return status; } /** * ata_wait_idle - Wait for a port to be idle. * @ap: Port to wait for. * * Waits up to 10ms for port's BUSY and DRQ signals to clear. * Returns final value of status register. * * LOCKING: * Inherited from caller. */ static inline u8 ata_wait_idle(struct ata_port *ap) { u8 status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); #ifdef ATA_DEBUG if (status != 0xff && (status & (ATA_BUSY | ATA_DRQ))) ata_port_printk(ap, KERN_DEBUG, "abnormal Status 0x%X\n", status); #endif return status; } #endif /* CONFIG_ATA_SFF */ #endif /* __LINUX_LIBATA_H__ */ hugetlb.h 0000644 00000047214 14722070374 0006365 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_HUGETLB_H #define _LINUX_HUGETLB_H #include <linux/mm_types.h> #include <linux/mmdebug.h> #include <linux/fs.h> #include <linux/hugetlb_inline.h> #include <linux/cgroup.h> #include <linux/page_ref.h> #include <linux/list.h> #include <linux/kref.h> #include <asm/pgtable.h> struct ctl_table; struct user_struct; struct mmu_gather; #ifndef is_hugepd typedef struct { unsigned long pd; } hugepd_t; #define is_hugepd(hugepd) (0) #define __hugepd(x) ((hugepd_t) { (x) }) #endif #ifdef CONFIG_HUGETLB_PAGE #include <linux/mempolicy.h> #include <linux/shm.h> #include <asm/tlbflush.h> struct hugepage_subpool { spinlock_t lock; long count; long max_hpages; /* Maximum huge pages or -1 if no maximum. */ long used_hpages; /* Used count against maximum, includes */ /* both alloced and reserved pages. */ struct hstate *hstate; long min_hpages; /* Minimum huge pages or -1 if no minimum. */ long rsv_hpages; /* Pages reserved against global pool to */ /* sasitfy minimum size. */ }; struct resv_map { struct kref refs; spinlock_t lock; struct list_head regions; long adds_in_progress; struct list_head region_cache; long region_cache_count; }; extern struct resv_map *resv_map_alloc(void); void resv_map_release(struct kref *ref); extern spinlock_t hugetlb_lock; extern int hugetlb_max_hstate __read_mostly; #define for_each_hstate(h) \ for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++) struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, long min_hpages); void hugepage_put_subpool(struct hugepage_subpool *spool); void reset_vma_resv_huge_pages(struct vm_area_struct *vma); int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); #ifdef CONFIG_NUMA int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); #endif int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, unsigned long *, long, unsigned int, int *); void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long, struct page *); void __unmap_hugepage_range_final(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start, unsigned long end, struct page *ref_page); void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start, unsigned long end, struct page *ref_page); void hugetlb_report_meminfo(struct seq_file *); int hugetlb_report_node_meminfo(int, char *); void hugetlb_show_meminfo(void); unsigned long hugetlb_total_pages(void); vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags); int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte, struct vm_area_struct *dst_vma, unsigned long dst_addr, unsigned long src_addr, struct page **pagep); int hugetlb_reserve_pages(struct inode *inode, long from, long to, struct vm_area_struct *vma, vm_flags_t vm_flags); long hugetlb_unreserve_pages(struct inode *inode, long start, long end, long freed); bool isolate_huge_page(struct page *page, struct list_head *list); void putback_active_hugepage(struct page *page); void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason); void free_huge_page(struct page *page); void hugetlb_fix_reserve_counts(struct inode *inode); extern struct mutex *hugetlb_fault_mutex_table; u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, pgoff_t idx); pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); extern int sysctl_hugetlb_shm_group; extern struct list_head huge_boot_pages; /* arch callbacks */ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz); pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz); int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep); void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, unsigned long *start, unsigned long *end); struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, int write); struct page *follow_huge_pd(struct vm_area_struct *vma, unsigned long address, hugepd_t hpd, int flags, int pdshift); struct page *follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address, int flags); struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, pud_t *pud, int flags); struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags); int pmd_huge(pmd_t pmd); int pud_huge(pud_t pud); unsigned long hugetlb_change_protection(struct vm_area_struct *vma, unsigned long address, unsigned long end, pgprot_t newprot); bool is_hugetlb_entry_migration(pte_t pte); #else /* !CONFIG_HUGETLB_PAGE */ static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) { } static inline unsigned long hugetlb_total_pages(void) { return 0; } static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) { return 0; } static inline void adjust_range_if_pmd_sharing_possible( struct vm_area_struct *vma, unsigned long *start, unsigned long *end) { } #define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; }) #define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL) #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; }) static inline void hugetlb_report_meminfo(struct seq_file *m) { } #define hugetlb_report_node_meminfo(n, buf) 0 static inline void hugetlb_show_meminfo(void) { } #define follow_huge_pd(vma, addr, hpd, flags, pdshift) NULL #define follow_huge_pmd_pte(vma, addr, flags) NULL #define follow_huge_pud(mm, addr, pud, flags) NULL #define follow_huge_pgd(mm, addr, pgd, flags) NULL #define prepare_hugepage_range(file, addr, len) (-EINVAL) #define pmd_huge(x) 0 #define pud_huge(x) 0 #define is_hugepage_only_range(mm, addr, len) 0 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; }) #define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \ src_addr, pagep) ({ BUG(); 0; }) #define huge_pte_offset(mm, address, sz) 0 static inline bool isolate_huge_page(struct page *page, struct list_head *list) { return false; } #define putback_active_hugepage(p) do {} while (0) #define move_hugetlb_state(old, new, reason) do {} while (0) static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma, unsigned long address, unsigned long end, pgprot_t newprot) { return 0; } static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start, unsigned long end, struct page *ref_page) { BUG(); } static inline void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start, unsigned long end, struct page *ref_page) { BUG(); } static inline vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags) { BUG(); return 0; } #endif /* !CONFIG_HUGETLB_PAGE */ /* * hugepages at page global directory. If arch support * hugepages at pgd level, they need to define this. */ #ifndef pgd_huge #define pgd_huge(x) 0 #endif #ifndef p4d_huge #define p4d_huge(x) 0 #endif #ifndef pgd_write static inline int pgd_write(pgd_t pgd) { BUG(); return 0; } #endif #define HUGETLB_ANON_FILE "anon_hugepage" enum { /* * The file will be used as an shm file so shmfs accounting rules * apply */ HUGETLB_SHMFS_INODE = 1, /* * The file is being created on the internal vfs mount and shmfs * accounting rules do not apply */ HUGETLB_ANONHUGE_INODE = 2, }; #ifdef CONFIG_HUGETLBFS struct hugetlbfs_sb_info { long max_inodes; /* inodes allowed */ long free_inodes; /* inodes free */ spinlock_t stat_lock; struct hstate *hstate; struct hugepage_subpool *spool; kuid_t uid; kgid_t gid; umode_t mode; }; static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) { return sb->s_fs_info; } struct hugetlbfs_inode_info { struct shared_policy policy; struct inode vfs_inode; unsigned int seals; }; static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode) { return container_of(inode, struct hugetlbfs_inode_info, vfs_inode); } extern const struct file_operations hugetlbfs_file_operations; extern const struct vm_operations_struct hugetlb_vm_ops; struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, struct user_struct **user, int creat_flags, int page_size_log); static inline bool is_file_hugepages(struct file *file) { if (file->f_op == &hugetlbfs_file_operations) return true; return is_file_shm_hugepages(file); } static inline struct hstate *hstate_inode(struct inode *i) { return HUGETLBFS_SB(i->i_sb)->hstate; } #else /* !CONFIG_HUGETLBFS */ #define is_file_hugepages(file) false static inline struct file * hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag, struct user_struct **user, int creat_flags, int page_size_log) { return ERR_PTR(-ENOSYS); } static inline struct hstate *hstate_inode(struct inode *i) { return NULL; } #endif /* !CONFIG_HUGETLBFS */ #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */ #ifdef CONFIG_HUGETLB_PAGE #define HSTATE_NAME_LEN 32 /* Defines one hugetlb page size */ struct hstate { int next_nid_to_alloc; int next_nid_to_free; unsigned int order; unsigned long mask; unsigned long max_huge_pages; unsigned long nr_huge_pages; unsigned long free_huge_pages; unsigned long resv_huge_pages; unsigned long surplus_huge_pages; unsigned long nr_overcommit_huge_pages; struct list_head hugepage_activelist; struct list_head hugepage_freelists[MAX_NUMNODES]; unsigned int nr_huge_pages_node[MAX_NUMNODES]; unsigned int free_huge_pages_node[MAX_NUMNODES]; unsigned int surplus_huge_pages_node[MAX_NUMNODES]; #ifdef CONFIG_CGROUP_HUGETLB /* cgroup control files */ struct cftype cgroup_files[5]; #endif char name[HSTATE_NAME_LEN]; }; struct huge_bootmem_page { struct list_head list; struct hstate *hstate; }; struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve); struct page *alloc_huge_page_node(struct hstate *h, int nid); struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask); struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, unsigned long address); struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask); int huge_add_to_page_cache(struct page *page, struct address_space *mapping, pgoff_t idx); /* arch callback */ int __init __alloc_bootmem_huge_page(struct hstate *h); int __init alloc_bootmem_huge_page(struct hstate *h); void __init hugetlb_bad_size(void); void __init hugetlb_add_hstate(unsigned order); struct hstate *size_to_hstate(unsigned long size); #ifndef HUGE_MAX_HSTATE #define HUGE_MAX_HSTATE 1 #endif extern struct hstate hstates[HUGE_MAX_HSTATE]; extern unsigned int default_hstate_idx; #define default_hstate (hstates[default_hstate_idx]) static inline struct hstate *hstate_file(struct file *f) { return hstate_inode(file_inode(f)); } static inline struct hstate *hstate_sizelog(int page_size_log) { if (!page_size_log) return &default_hstate; if (page_size_log < BITS_PER_LONG) return size_to_hstate(1UL << page_size_log); return NULL; } static inline struct hstate *hstate_vma(struct vm_area_struct *vma) { return hstate_file(vma->vm_file); } static inline unsigned long huge_page_size(struct hstate *h) { return (unsigned long)PAGE_SIZE << h->order; } extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma); extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma); static inline unsigned long huge_page_mask(struct hstate *h) { return h->mask; } static inline unsigned int huge_page_order(struct hstate *h) { return h->order; } static inline unsigned huge_page_shift(struct hstate *h) { return h->order + PAGE_SHIFT; } static inline bool hstate_is_gigantic(struct hstate *h) { return huge_page_order(h) >= MAX_ORDER; } static inline unsigned int pages_per_huge_page(struct hstate *h) { return 1 << h->order; } static inline unsigned int blocks_per_huge_page(struct hstate *h) { return huge_page_size(h) / 512; } #include <asm/hugetlb.h> #ifndef arch_make_huge_pte static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, struct page *page, int writable) { return entry; } #endif static inline struct hstate *page_hstate(struct page *page) { VM_BUG_ON_PAGE(!PageHuge(page), page); return size_to_hstate(page_size(page)); } static inline unsigned hstate_index_to_shift(unsigned index) { return hstates[index].order + PAGE_SHIFT; } static inline int hstate_index(struct hstate *h) { return h - hstates; } extern int dissolve_free_huge_page(struct page *page); extern int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn); #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION #ifndef arch_hugetlb_migration_supported static inline bool arch_hugetlb_migration_supported(struct hstate *h) { if ((huge_page_shift(h) == PMD_SHIFT) || (huge_page_shift(h) == PUD_SHIFT) || (huge_page_shift(h) == PGDIR_SHIFT)) return true; else return false; } #endif #else static inline bool arch_hugetlb_migration_supported(struct hstate *h) { return false; } #endif static inline bool hugepage_migration_supported(struct hstate *h) { return arch_hugetlb_migration_supported(h); } /* * Movability check is different as compared to migration check. * It determines whether or not a huge page should be placed on * movable zone or not. Movability of any huge page should be * required only if huge page size is supported for migration. * There wont be any reason for the huge page to be movable if * it is not migratable to start with. Also the size of the huge * page should be large enough to be placed under a movable zone * and still feasible enough to be migratable. Just the presence * in movable zone does not make the migration feasible. * * So even though large huge page sizes like the gigantic ones * are migratable they should not be movable because its not * feasible to migrate them from movable zone. */ static inline bool hugepage_movable_supported(struct hstate *h) { if (!hugepage_migration_supported(h)) return false; if (hstate_is_gigantic(h)) return false; return true; } static inline spinlock_t *huge_pte_lockptr(struct hstate *h, struct mm_struct *mm, pte_t *pte) { if (huge_page_size(h) == PMD_SIZE) return pmd_lockptr(mm, (pmd_t *) pte); VM_BUG_ON(huge_page_size(h) == PAGE_SIZE); return &mm->page_table_lock; } #ifndef hugepages_supported /* * Some platform decide whether they support huge pages at boot * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0 * when there is no such support */ #define hugepages_supported() (HPAGE_SHIFT != 0) #endif void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm); static inline void hugetlb_count_init(struct mm_struct *mm) { atomic_long_set(&mm->hugetlb_usage, 0); } static inline void hugetlb_count_add(long l, struct mm_struct *mm) { atomic_long_add(l, &mm->hugetlb_usage); } static inline void hugetlb_count_sub(long l, struct mm_struct *mm) { atomic_long_sub(l, &mm->hugetlb_usage); } #ifndef set_huge_swap_pte_at static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, unsigned long sz) { set_huge_pte_at(mm, addr, ptep, pte); } #endif #ifndef huge_ptep_modify_prot_start #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); } #endif #ifndef huge_ptep_modify_prot_commit #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t old_pte, pte_t pte) { set_huge_pte_at(vma->vm_mm, addr, ptep, pte); } #endif void set_page_huge_active(struct page *page); #else /* CONFIG_HUGETLB_PAGE */ struct hstate {}; static inline struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve) { return NULL; } static inline struct page *alloc_huge_page_node(struct hstate *h, int nid) { return NULL; } static inline struct page * alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask) { return NULL; } static inline struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, unsigned long address) { return NULL; } static inline int __alloc_bootmem_huge_page(struct hstate *h) { return 0; } static inline struct hstate *hstate_file(struct file *f) { return NULL; } static inline struct hstate *hstate_sizelog(int page_size_log) { return NULL; } static inline struct hstate *hstate_vma(struct vm_area_struct *vma) { return NULL; } static inline struct hstate *page_hstate(struct page *page) { return NULL; } static inline unsigned long huge_page_size(struct hstate *h) { return PAGE_SIZE; } static inline unsigned long huge_page_mask(struct hstate *h) { return PAGE_MASK; } static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) { return PAGE_SIZE; } static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) { return PAGE_SIZE; } static inline unsigned int huge_page_order(struct hstate *h) { return 0; } static inline unsigned int huge_page_shift(struct hstate *h) { return PAGE_SHIFT; } static inline bool hstate_is_gigantic(struct hstate *h) { return false; } static inline unsigned int pages_per_huge_page(struct hstate *h) { return 1; } static inline unsigned hstate_index_to_shift(unsigned index) { return 0; } static inline int hstate_index(struct hstate *h) { return 0; } static inline int dissolve_free_huge_page(struct page *page) { return 0; } static inline int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn) { return 0; } static inline bool hugepage_migration_supported(struct hstate *h) { return false; } static inline bool hugepage_movable_supported(struct hstate *h) { return false; } static inline spinlock_t *huge_pte_lockptr(struct hstate *h, struct mm_struct *mm, pte_t *pte) { return &mm->page_table_lock; } static inline void hugetlb_count_init(struct mm_struct *mm) { } static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m) { } static inline void hugetlb_count_sub(long l, struct mm_struct *mm) { } static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, unsigned long sz) { } #endif /* CONFIG_HUGETLB_PAGE */ static inline spinlock_t *huge_pte_lock(struct hstate *h, struct mm_struct *mm, pte_t *pte) { spinlock_t *ptl; ptl = huge_pte_lockptr(h, mm, pte); spin_lock(ptl); return ptl; } #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE static inline bool hugetlb_pmd_shared(pte_t *pte) { return page_count(virt_to_page(pte)) > 1; } #else static inline bool hugetlb_pmd_shared(pte_t *pte) { return false; } #endif #endif /* _LINUX_HUGETLB_H */ mm_inline.h 0000644 00000006566 14722070374 0006707 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_MM_INLINE_H #define LINUX_MM_INLINE_H #include <linux/huge_mm.h> #include <linux/swap.h> /** * page_is_file_cache - should the page be on a file LRU or anon LRU? * @page: the page to test * * Returns 1 if @page is page cache page backed by a regular filesystem, * or 0 if @page is anonymous, tmpfs or otherwise ram or swap backed. * Used by functions that manipulate the LRU lists, to sort a page * onto the right LRU list. * * We would like to get this info without a page flag, but the state * needs to survive until the page is last deleted from the LRU, which * could be as far down as __page_cache_release. */ static inline int page_is_file_cache(struct page *page) { return !PageSwapBacked(page); } static __always_inline void __update_lru_size(struct lruvec *lruvec, enum lru_list lru, enum zone_type zid, int nr_pages) { struct pglist_data *pgdat = lruvec_pgdat(lruvec); __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages); __mod_zone_page_state(&pgdat->node_zones[zid], NR_ZONE_LRU_BASE + lru, nr_pages); } static __always_inline void update_lru_size(struct lruvec *lruvec, enum lru_list lru, enum zone_type zid, int nr_pages) { __update_lru_size(lruvec, lru, zid, nr_pages); #ifdef CONFIG_MEMCG mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages); #endif } static __always_inline void add_page_to_lru_list(struct page *page, struct lruvec *lruvec, enum lru_list lru) { update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page)); list_add(&page->lru, &lruvec->lists[lru]); } static __always_inline void add_page_to_lru_list_tail(struct page *page, struct lruvec *lruvec, enum lru_list lru) { update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page)); list_add_tail(&page->lru, &lruvec->lists[lru]); } static __always_inline void del_page_from_lru_list(struct page *page, struct lruvec *lruvec, enum lru_list lru) { list_del(&page->lru); update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page)); } /** * page_lru_base_type - which LRU list type should a page be on? * @page: the page to test * * Used for LRU list index arithmetic. * * Returns the base LRU type - file or anon - @page should be on. */ static inline enum lru_list page_lru_base_type(struct page *page) { if (page_is_file_cache(page)) return LRU_INACTIVE_FILE; return LRU_INACTIVE_ANON; } /** * page_off_lru - which LRU list was page on? clearing its lru flags. * @page: the page to test * * Returns the LRU list a page was on, as an index into the array of LRU * lists; and clears its Unevictable or Active flags, ready for freeing. */ static __always_inline enum lru_list page_off_lru(struct page *page) { enum lru_list lru; if (PageUnevictable(page)) { __ClearPageUnevictable(page); lru = LRU_UNEVICTABLE; } else { lru = page_lru_base_type(page); if (PageActive(page)) { __ClearPageActive(page); lru += LRU_ACTIVE; } } return lru; } /** * page_lru - which LRU list should a page be on? * @page: the page to test * * Returns the LRU list a page should be on, as an index * into the array of LRU lists. */ static __always_inline enum lru_list page_lru(struct page *page) { enum lru_list lru; if (PageUnevictable(page)) lru = LRU_UNEVICTABLE; else { lru = page_lru_base_type(page); if (PageActive(page)) lru += LRU_ACTIVE; } return lru; } #endif seg6_genl.h 0000644 00000000210 14722070374 0006565 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SEG6_GENL_H #define _LINUX_SEG6_GENL_H #include <uapi/linux/seg6_genl.h> #endif blk-mq.h 0000644 00000026630 14722070374 0006115 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef BLK_MQ_H #define BLK_MQ_H #include <linux/blkdev.h> #include <linux/sbitmap.h> #include <linux/srcu.h> struct blk_mq_tags; struct blk_flush_queue; /** * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware block device */ struct blk_mq_hw_ctx { struct { spinlock_t lock; struct list_head dispatch; unsigned long state; /* BLK_MQ_S_* flags */ } ____cacheline_aligned_in_smp; struct delayed_work run_work; cpumask_var_t cpumask; int next_cpu; int next_cpu_batch; unsigned long flags; /* BLK_MQ_F_* flags */ void *sched_data; struct request_queue *queue; struct blk_flush_queue *fq; void *driver_data; struct sbitmap ctx_map; struct blk_mq_ctx *dispatch_from; unsigned int dispatch_busy; unsigned short type; unsigned short nr_ctx; struct blk_mq_ctx **ctxs; spinlock_t dispatch_wait_lock; wait_queue_entry_t dispatch_wait; atomic_t wait_index; struct blk_mq_tags *tags; struct blk_mq_tags *sched_tags; unsigned long queued; unsigned long run; #define BLK_MQ_MAX_DISPATCH_ORDER 7 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; unsigned int numa_node; unsigned int queue_num; atomic_t nr_active; struct hlist_node cpuhp_dead; struct kobject kobj; unsigned long poll_considered; unsigned long poll_invoked; unsigned long poll_success; #ifdef CONFIG_BLK_DEBUG_FS struct dentry *debugfs_dir; struct dentry *sched_debugfs_dir; #endif struct list_head hctx_list; /* Must be the last member - see also blk_mq_hw_ctx_size(). */ struct srcu_struct srcu[0]; }; struct blk_mq_queue_map { unsigned int *mq_map; unsigned int nr_queues; unsigned int queue_offset; }; enum hctx_type { HCTX_TYPE_DEFAULT, /* all I/O not otherwise accounted for */ HCTX_TYPE_READ, /* just for READ I/O */ HCTX_TYPE_POLL, /* polled I/O of any kind */ HCTX_MAX_TYPES, }; struct blk_mq_tag_set { /* * map[] holds ctx -> hctx mappings, one map exists for each type * that the driver wishes to support. There are no restrictions * on maps being of the same size, and it's perfectly legal to * share maps between types. */ struct blk_mq_queue_map map[HCTX_MAX_TYPES]; unsigned int nr_maps; /* nr entries in map[] */ const struct blk_mq_ops *ops; unsigned int nr_hw_queues; /* nr hw queues across maps */ unsigned int queue_depth; /* max hw supported */ unsigned int reserved_tags; unsigned int cmd_size; /* per-request extra data */ int numa_node; unsigned int timeout; unsigned int flags; /* BLK_MQ_F_* */ void *driver_data; struct blk_mq_tags **tags; struct mutex tag_list_lock; struct list_head tag_list; }; struct blk_mq_queue_data { struct request *rq; bool last; }; typedef blk_status_t (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); typedef void (commit_rqs_fn)(struct blk_mq_hw_ctx *); typedef bool (get_budget_fn)(struct blk_mq_hw_ctx *); typedef void (put_budget_fn)(struct blk_mq_hw_ctx *); typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool); typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); typedef int (init_request_fn)(struct blk_mq_tag_set *set, struct request *, unsigned int, unsigned int); typedef void (exit_request_fn)(struct blk_mq_tag_set *set, struct request *, unsigned int); typedef bool (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, bool); typedef bool (busy_tag_iter_fn)(struct request *, void *, bool); typedef int (poll_fn)(struct blk_mq_hw_ctx *); typedef int (map_queues_fn)(struct blk_mq_tag_set *set); typedef bool (busy_fn)(struct request_queue *); typedef void (complete_fn)(struct request *); typedef void (cleanup_rq_fn)(struct request *); struct blk_mq_ops { /* * Queue request */ queue_rq_fn *queue_rq; /* * If a driver uses bd->last to judge when to submit requests to * hardware, it must define this function. In case of errors that * make us stop issuing further requests, this hook serves the * purpose of kicking the hardware (which the last request otherwise * would have done). */ commit_rqs_fn *commit_rqs; /* * Reserve budget before queue request, once .queue_rq is * run, it is driver's responsibility to release the * reserved budget. Also we have to handle failure case * of .get_budget for avoiding I/O deadlock. */ get_budget_fn *get_budget; put_budget_fn *put_budget; /* * Called on request timeout */ timeout_fn *timeout; /* * Called to poll for completion of a specific tag. */ poll_fn *poll; complete_fn *complete; /* * Called when the block layer side of a hardware queue has been * set up, allowing the driver to allocate/init matching structures. * Ditto for exit/teardown. */ init_hctx_fn *init_hctx; exit_hctx_fn *exit_hctx; /* * Called for every command allocated by the block layer to allow * the driver to set up driver specific data. * * Tag greater than or equal to queue_depth is for setting up * flush request. * * Ditto for exit/teardown. */ init_request_fn *init_request; exit_request_fn *exit_request; /* Called from inside blk_get_request() */ void (*initialize_rq_fn)(struct request *rq); /* * Called before freeing one request which isn't completed yet, * and usually for freeing the driver private data */ cleanup_rq_fn *cleanup_rq; /* * If set, returns whether or not this queue currently is busy */ busy_fn *busy; map_queues_fn *map_queues; #ifdef CONFIG_BLK_DEBUG_FS /* * Used by the debugfs implementation to show driver-specific * information about a request. */ void (*show_rq)(struct seq_file *m, struct request *rq); #endif }; enum { BLK_MQ_F_SHOULD_MERGE = 1 << 0, BLK_MQ_F_TAG_SHARED = 1 << 1, BLK_MQ_F_BLOCKING = 1 << 5, BLK_MQ_F_NO_SCHED = 1 << 6, BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, BLK_MQ_F_ALLOC_POLICY_BITS = 1, BLK_MQ_S_STOPPED = 0, BLK_MQ_S_TAG_ACTIVE = 1, BLK_MQ_S_SCHED_RESTART = 2, BLK_MQ_MAX_DEPTH = 10240, BLK_MQ_CPU_WORK_BATCH = 8, }; #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \ ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \ ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) #define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \ ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \ << BLK_MQ_F_ALLOC_POLICY_START_BIT) struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, struct request_queue *q, bool elevator_init); struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set, const struct blk_mq_ops *ops, unsigned int queue_depth, unsigned int set_flags); void blk_mq_unregister_dev(struct device *, struct request_queue *); int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set); void blk_mq_free_tag_set(struct blk_mq_tag_set *set); void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); void blk_mq_free_request(struct request *rq); bool blk_mq_can_queue(struct blk_mq_hw_ctx *); bool blk_mq_queue_inflight(struct request_queue *q); enum { /* return when out of requests */ BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0), /* allocate from reserved pool */ BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1), /* allocate internal/sched tag */ BLK_MQ_REQ_INTERNAL = (__force blk_mq_req_flags_t)(1 << 2), /* set RQF_PREEMPT */ BLK_MQ_REQ_PREEMPT = (__force blk_mq_req_flags_t)(1 << 3), }; struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, blk_mq_req_flags_t flags); struct request *blk_mq_alloc_request_hctx(struct request_queue *q, unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx); struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); enum { BLK_MQ_UNIQUE_TAG_BITS = 16, BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1, }; u32 blk_mq_unique_tag(struct request *rq); static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag) { return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS; } static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) { return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; } int blk_mq_request_started(struct request *rq); int blk_mq_request_completed(struct request *rq); void blk_mq_start_request(struct request *rq); void blk_mq_end_request(struct request *rq, blk_status_t error); void __blk_mq_end_request(struct request *rq, blk_status_t error); void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); void blk_mq_kick_requeue_list(struct request_queue *q); void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); void blk_mq_complete_request(struct request *rq); bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, struct bio *bio, unsigned int nr_segs); bool blk_mq_queue_stopped(struct request_queue *q); void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); void blk_mq_stop_hw_queues(struct request_queue *q); void blk_mq_start_hw_queues(struct request_queue *q); void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); void blk_mq_quiesce_queue(struct request_queue *q); void blk_mq_unquiesce_queue(struct request_queue *q); void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); void blk_mq_run_hw_queues(struct request_queue *q, bool async); void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, busy_tag_iter_fn *fn, void *priv); void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset); void blk_mq_freeze_queue(struct request_queue *q); void blk_mq_unfreeze_queue(struct request_queue *q); void blk_freeze_queue_start(struct request_queue *q); void blk_mq_freeze_queue_wait(struct request_queue *q); int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, unsigned long timeout); int blk_mq_map_queues(struct blk_mq_queue_map *qmap); void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues); void blk_mq_quiesce_queue_nowait(struct request_queue *q); unsigned int blk_mq_rq_cpu(struct request *rq); bool __blk_should_fake_timeout(struct request_queue *q); static inline bool blk_should_fake_timeout(struct request_queue *q) { if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) && test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags)) return __blk_should_fake_timeout(q); return false; } /* * Driver command data is immediately after the request. So subtract request * size to get back to the original request, add request size to get the PDU. */ static inline struct request *blk_mq_rq_from_pdu(void *pdu) { return pdu - sizeof(struct request); } static inline void *blk_mq_rq_to_pdu(struct request *rq) { return rq + 1; } #define queue_for_each_hw_ctx(q, hctx, i) \ for ((i) = 0; (i) < (q)->nr_hw_queues && \ ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++) #define hctx_for_each_ctx(hctx, ctx, i) \ for ((i) = 0; (i) < (hctx)->nr_ctx && \ ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++) static inline blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq) { if (rq->tag != -1) return rq->tag | (hctx->queue_num << BLK_QC_T_SHIFT); return rq->internal_tag | (hctx->queue_num << BLK_QC_T_SHIFT) | BLK_QC_T_INTERNAL; } static inline void blk_mq_cleanup_rq(struct request *rq) { if (rq->q->mq_ops->cleanup_rq) rq->q->mq_ops->cleanup_rq(rq); } #endif irqreturn.h 0000644 00000000767 14722070374 0006770 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_IRQRETURN_H #define _LINUX_IRQRETURN_H /** * enum irqreturn * @IRQ_NONE interrupt was not from this device or was not handled * @IRQ_HANDLED interrupt was handled by this device * @IRQ_WAKE_THREAD handler requests to wake the handler thread */ enum irqreturn { IRQ_NONE = (0 << 0), IRQ_HANDLED = (1 << 0), IRQ_WAKE_THREAD = (1 << 1), }; typedef enum irqreturn irqreturn_t; #define IRQ_RETVAL(x) ((x) ? IRQ_HANDLED : IRQ_NONE) #endif if_macvlan.h 0000644 00000005254 14722070374 0007030 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_IF_MACVLAN_H #define _LINUX_IF_MACVLAN_H #include <linux/if_link.h> #include <linux/if_vlan.h> #include <linux/list.h> #include <linux/netdevice.h> #include <linux/netlink.h> #include <net/netlink.h> #include <linux/u64_stats_sync.h> struct macvlan_port; #define MACVLAN_MC_FILTER_BITS 8 #define MACVLAN_MC_FILTER_SZ (1 << MACVLAN_MC_FILTER_BITS) struct macvlan_dev { struct net_device *dev; struct list_head list; struct hlist_node hlist; struct macvlan_port *port; struct net_device *lowerdev; void *accel_priv; struct vlan_pcpu_stats __percpu *pcpu_stats; DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ); netdev_features_t set_features; enum macvlan_mode mode; u16 flags; unsigned int macaddr_count; #ifdef CONFIG_NET_POLL_CONTROLLER struct netpoll *netpoll; #endif }; static inline void macvlan_count_rx(const struct macvlan_dev *vlan, unsigned int len, bool success, bool multicast) { if (likely(success)) { struct vlan_pcpu_stats *pcpu_stats; pcpu_stats = get_cpu_ptr(vlan->pcpu_stats); u64_stats_update_begin(&pcpu_stats->syncp); pcpu_stats->rx_packets++; pcpu_stats->rx_bytes += len; if (multicast) pcpu_stats->rx_multicast++; u64_stats_update_end(&pcpu_stats->syncp); put_cpu_ptr(vlan->pcpu_stats); } else { this_cpu_inc(vlan->pcpu_stats->rx_errors); } } extern void macvlan_common_setup(struct net_device *dev); extern int macvlan_common_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack); extern void macvlan_dellink(struct net_device *dev, struct list_head *head); extern int macvlan_link_register(struct rtnl_link_ops *ops); #if IS_ENABLED(CONFIG_MACVLAN) static inline struct net_device * macvlan_dev_real_dev(const struct net_device *dev) { struct macvlan_dev *macvlan = netdev_priv(dev); return macvlan->lowerdev; } #else static inline struct net_device * macvlan_dev_real_dev(const struct net_device *dev) { BUG(); return NULL; } #endif static inline void *macvlan_accel_priv(struct net_device *dev) { struct macvlan_dev *macvlan = netdev_priv(dev); return macvlan->accel_priv; } static inline bool macvlan_supports_dest_filter(struct net_device *dev) { struct macvlan_dev *macvlan = netdev_priv(dev); return macvlan->mode == MACVLAN_MODE_PRIVATE || macvlan->mode == MACVLAN_MODE_VEPA || macvlan->mode == MACVLAN_MODE_BRIDGE; } static inline int macvlan_release_l2fw_offload(struct net_device *dev) { struct macvlan_dev *macvlan = netdev_priv(dev); macvlan->accel_priv = NULL; return dev_uc_add(macvlan->lowerdev, dev->dev_addr); } #endif /* _LINUX_IF_MACVLAN_H */ vt.h 0000644 00000001143 14722070374 0005353 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_VT_H #define _LINUX_VT_H #include <uapi/linux/vt.h> /* Virtual Terminal events. */ #define VT_ALLOCATE 0x0001 /* Console got allocated */ #define VT_DEALLOCATE 0x0002 /* Console will be deallocated */ #define VT_WRITE 0x0003 /* A char got output */ #define VT_UPDATE 0x0004 /* A bigger update occurred */ #define VT_PREWRITE 0x0005 /* A char is about to be written to the console */ #ifdef CONFIG_VT_CONSOLE extern int vt_kmsg_redirect(int new); #else static inline int vt_kmsg_redirect(int new) { return 0; } #endif #endif /* _LINUX_VT_H */ rwlock_types.h 0000644 00000002166 14722070374 0007455 0 ustar 00 #ifndef __LINUX_RWLOCK_TYPES_H #define __LINUX_RWLOCK_TYPES_H /* * include/linux/rwlock_types.h - generic rwlock type definitions * and initializers * * portions Copyright 2005, Red Hat, Inc., Ingo Molnar * Released under the General Public License (GPL). */ typedef struct { arch_rwlock_t raw_lock; #ifdef CONFIG_DEBUG_SPINLOCK unsigned int magic, owner_cpu; void *owner; #endif #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif } rwlock_t; #define RWLOCK_MAGIC 0xdeaf1eed #ifdef CONFIG_DEBUG_LOCK_ALLOC # define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } #else # define RW_DEP_MAP_INIT(lockname) #endif #ifdef CONFIG_DEBUG_SPINLOCK #define __RW_LOCK_UNLOCKED(lockname) \ (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \ .magic = RWLOCK_MAGIC, \ .owner = SPINLOCK_OWNER_INIT, \ .owner_cpu = -1, \ RW_DEP_MAP_INIT(lockname) } #else #define __RW_LOCK_UNLOCKED(lockname) \ (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \ RW_DEP_MAP_INIT(lockname) } #endif #define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) #endif /* __LINUX_RWLOCK_TYPES_H */ extcon.h 0000644 00000024271 14722070374 0006231 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * External Connector (extcon) framework * - linux/include/linux/extcon.h for extcon consumer device driver. * * Copyright (C) 2015 Samsung Electronics * Author: Chanwoo Choi <cw00.choi@samsung.com> * * Copyright (C) 2012 Samsung Electronics * Author: Donggeun Kim <dg77.kim@samsung.com> * Author: MyungJoo Ham <myungjoo.ham@samsung.com> * * based on switch class driver * Copyright (C) 2008 Google, Inc. * Author: Mike Lockwood <lockwood@android.com> */ #ifndef __LINUX_EXTCON_H__ #define __LINUX_EXTCON_H__ #include <linux/device.h> /* * Define the type of supported external connectors */ #define EXTCON_TYPE_USB BIT(0) /* USB connector */ #define EXTCON_TYPE_CHG BIT(1) /* Charger connector */ #define EXTCON_TYPE_JACK BIT(2) /* Jack connector */ #define EXTCON_TYPE_DISP BIT(3) /* Display connector */ #define EXTCON_TYPE_MISC BIT(4) /* Miscellaneous connector */ /* * Define the unique id of supported external connectors */ #define EXTCON_NONE 0 /* USB external connector */ #define EXTCON_USB 1 #define EXTCON_USB_HOST 2 /* * Charging external connector * * When one SDP charger connector was reported, we should also report * the USB connector, which means EXTCON_CHG_USB_SDP should always * appear together with EXTCON_USB. The same as ACA charger connector, * EXTCON_CHG_USB_ACA would normally appear with EXTCON_USB_HOST. * * The EXTCON_CHG_USB_SLOW connector can provide at least 500mA of * current at 5V. The EXTCON_CHG_USB_FAST connector can provide at * least 1A of current at 5V. */ #define EXTCON_CHG_USB_SDP 5 /* Standard Downstream Port */ #define EXTCON_CHG_USB_DCP 6 /* Dedicated Charging Port */ #define EXTCON_CHG_USB_CDP 7 /* Charging Downstream Port */ #define EXTCON_CHG_USB_ACA 8 /* Accessory Charger Adapter */ #define EXTCON_CHG_USB_FAST 9 #define EXTCON_CHG_USB_SLOW 10 #define EXTCON_CHG_WPT 11 /* Wireless Power Transfer */ #define EXTCON_CHG_USB_PD 12 /* USB Power Delivery */ /* Jack external connector */ #define EXTCON_JACK_MICROPHONE 20 #define EXTCON_JACK_HEADPHONE 21 #define EXTCON_JACK_LINE_IN 22 #define EXTCON_JACK_LINE_OUT 23 #define EXTCON_JACK_VIDEO_IN 24 #define EXTCON_JACK_VIDEO_OUT 25 #define EXTCON_JACK_SPDIF_IN 26 /* Sony Philips Digital InterFace */ #define EXTCON_JACK_SPDIF_OUT 27 /* Display external connector */ #define EXTCON_DISP_HDMI 40 /* High-Definition Multimedia Interface */ #define EXTCON_DISP_MHL 41 /* Mobile High-Definition Link */ #define EXTCON_DISP_DVI 42 /* Digital Visual Interface */ #define EXTCON_DISP_VGA 43 /* Video Graphics Array */ #define EXTCON_DISP_DP 44 /* Display Port */ #define EXTCON_DISP_HMD 45 /* Head-Mounted Display */ /* Miscellaneous external connector */ #define EXTCON_DOCK 60 #define EXTCON_JIG 61 #define EXTCON_MECHANICAL 62 #define EXTCON_NUM 63 /* * Define the properties of supported external connectors. * * When adding the new extcon property, they *must* have * the type/value/default information. Also, you *have to* * modify the EXTCON_PROP_[type]_START/END definitions * which mean the range of the supported properties * for each extcon type. * * The naming style of property * : EXTCON_PROP_[type]_[property name] * * EXTCON_PROP_USB_[property name] : USB property * EXTCON_PROP_CHG_[property name] : Charger property * EXTCON_PROP_JACK_[property name] : Jack property * EXTCON_PROP_DISP_[property name] : Display property */ /* * Properties of EXTCON_TYPE_USB. * * - EXTCON_PROP_USB_VBUS * @type: integer (intval) * @value: 0 (low) or 1 (high) * @default: 0 (low) * - EXTCON_PROP_USB_TYPEC_POLARITY * @type: integer (intval) * @value: 0 (normal) or 1 (flip) * @default: 0 (normal) * - EXTCON_PROP_USB_SS (SuperSpeed) * @type: integer (intval) * @value: 0 (USB/USB2) or 1 (USB3) * @default: 0 (USB/USB2) * */ #define EXTCON_PROP_USB_VBUS 0 #define EXTCON_PROP_USB_TYPEC_POLARITY 1 #define EXTCON_PROP_USB_SS 2 #define EXTCON_PROP_USB_MIN 0 #define EXTCON_PROP_USB_MAX 2 #define EXTCON_PROP_USB_CNT (EXTCON_PROP_USB_MAX - EXTCON_PROP_USB_MIN + 1) /* Properties of EXTCON_TYPE_CHG. */ #define EXTCON_PROP_CHG_MIN 50 #define EXTCON_PROP_CHG_MAX 50 #define EXTCON_PROP_CHG_CNT (EXTCON_PROP_CHG_MAX - EXTCON_PROP_CHG_MIN + 1) /* Properties of EXTCON_TYPE_JACK. */ #define EXTCON_PROP_JACK_MIN 100 #define EXTCON_PROP_JACK_MAX 100 #define EXTCON_PROP_JACK_CNT (EXTCON_PROP_JACK_MAX - EXTCON_PROP_JACK_MIN + 1) /* * Properties of EXTCON_TYPE_DISP. * * - EXTCON_PROP_DISP_HPD (Hot Plug Detect) * @type: integer (intval) * @value: 0 (no hpd) or 1 (hpd) * @default: 0 (no hpd) * */ #define EXTCON_PROP_DISP_HPD 150 /* Properties of EXTCON_TYPE_DISP. */ #define EXTCON_PROP_DISP_MIN 150 #define EXTCON_PROP_DISP_MAX 151 #define EXTCON_PROP_DISP_CNT (EXTCON_PROP_DISP_MAX - EXTCON_PROP_DISP_MIN + 1) /* * Define the type of property's value. * * Define the property's value as union type. Because each property * would need the different data type to store it. */ union extcon_property_value { int intval; /* type : integer (intval) */ }; struct extcon_dev; #if IS_ENABLED(CONFIG_EXTCON) /* * Following APIs get the connected state of each external connector. * The 'id' argument indicates the defined external connector. */ extern int extcon_get_state(struct extcon_dev *edev, unsigned int id); /* * Following APIs get the property of each external connector. * The 'id' argument indicates the defined external connector * and the 'prop' indicates the extcon property. * * And extcon_get_property_capability() get the capability of the property * for each external connector. They are used to get the capability of the * property of each external connector based on the id and property. */ extern int extcon_get_property(struct extcon_dev *edev, unsigned int id, unsigned int prop, union extcon_property_value *prop_val); extern int extcon_get_property_capability(struct extcon_dev *edev, unsigned int id, unsigned int prop); /* * Following APIs register the notifier block in order to detect * the change of both state and property value for each external connector. * * extcon_register_notifier(*edev, id, *nb) : Register a notifier block * for specific external connector of the extcon. * extcon_register_notifier_all(*edev, *nb) : Register a notifier block * for all supported external connectors of the extcon. */ extern int extcon_register_notifier(struct extcon_dev *edev, unsigned int id, struct notifier_block *nb); extern int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id, struct notifier_block *nb); extern int devm_extcon_register_notifier(struct device *dev, struct extcon_dev *edev, unsigned int id, struct notifier_block *nb); extern void devm_extcon_unregister_notifier(struct device *dev, struct extcon_dev *edev, unsigned int id, struct notifier_block *nb); extern int extcon_register_notifier_all(struct extcon_dev *edev, struct notifier_block *nb); extern int extcon_unregister_notifier_all(struct extcon_dev *edev, struct notifier_block *nb); extern int devm_extcon_register_notifier_all(struct device *dev, struct extcon_dev *edev, struct notifier_block *nb); extern void devm_extcon_unregister_notifier_all(struct device *dev, struct extcon_dev *edev, struct notifier_block *nb); /* * Following APIs get the extcon_dev from devicetree or by through extcon name. */ extern struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name); extern struct extcon_dev *extcon_find_edev_by_node(struct device_node *node); extern struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index); /* Following API get the name of extcon device. */ extern const char *extcon_get_edev_name(struct extcon_dev *edev); #else /* CONFIG_EXTCON */ static inline int extcon_get_state(struct extcon_dev *edev, unsigned int id) { return 0; } static inline int extcon_get_property(struct extcon_dev *edev, unsigned int id, unsigned int prop, union extcon_property_value *prop_val) { return 0; } static inline int extcon_get_property_capability(struct extcon_dev *edev, unsigned int id, unsigned int prop) { return 0; } static inline int extcon_register_notifier(struct extcon_dev *edev, unsigned int id, struct notifier_block *nb) { return 0; } static inline int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id, struct notifier_block *nb) { return 0; } static inline int devm_extcon_register_notifier(struct device *dev, struct extcon_dev *edev, unsigned int id, struct notifier_block *nb) { return -ENOSYS; } static inline void devm_extcon_unregister_notifier(struct device *dev, struct extcon_dev *edev, unsigned int id, struct notifier_block *nb) { } static inline int extcon_register_notifier_all(struct extcon_dev *edev, struct notifier_block *nb) { return 0; } static inline int extcon_unregister_notifier_all(struct extcon_dev *edev, struct notifier_block *nb) { return 0; } static inline int devm_extcon_register_notifier_all(struct device *dev, struct extcon_dev *edev, struct notifier_block *nb) { return 0; } static inline void devm_extcon_unregister_notifier_all(struct device *dev, struct extcon_dev *edev, struct notifier_block *nb) { } static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name) { return ERR_PTR(-ENODEV); } static inline struct extcon_dev *extcon_find_edev_by_node(struct device_node *node) { return ERR_PTR(-ENODEV); } static inline struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index) { return ERR_PTR(-ENODEV); } #endif /* CONFIG_EXTCON */ /* * Following structure and API are deprecated. EXTCON remains the function * definition to prevent the build break. */ struct extcon_specific_cable_nb { struct notifier_block *user_nb; int cable_index; struct extcon_dev *edev; unsigned long previous_value; }; static inline int extcon_register_interest(struct extcon_specific_cable_nb *obj, const char *extcon_name, const char *cable_name, struct notifier_block *nb) { return -EINVAL; } static inline int extcon_unregister_interest(struct extcon_specific_cable_nb *obj) { return -EINVAL; } #endif /* __LINUX_EXTCON_H__ */ vfio.h 0000644 00000014446 14722070374 0005677 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * VFIO API definition * * Copyright (C) 2012 Red Hat, Inc. All rights reserved. * Author: Alex Williamson <alex.williamson@redhat.com> */ #ifndef VFIO_H #define VFIO_H #include <linux/iommu.h> #include <linux/mm.h> #include <linux/workqueue.h> #include <linux/poll.h> #include <uapi/linux/vfio.h> /** * struct vfio_device_ops - VFIO bus driver device callbacks * * @open: Called when userspace creates new file descriptor for device * @release: Called when userspace releases file descriptor for device * @read: Perform read(2) on device file descriptor * @write: Perform write(2) on device file descriptor * @ioctl: Perform ioctl(2) on device file descriptor, supporting VFIO_DEVICE_* * operations documented below * @mmap: Perform mmap(2) on a region of the device file descriptor * @request: Request for the bus driver to release the device */ struct vfio_device_ops { char *name; int (*open)(void *device_data); void (*release)(void *device_data); ssize_t (*read)(void *device_data, char __user *buf, size_t count, loff_t *ppos); ssize_t (*write)(void *device_data, const char __user *buf, size_t count, loff_t *size); long (*ioctl)(void *device_data, unsigned int cmd, unsigned long arg); int (*mmap)(void *device_data, struct vm_area_struct *vma); void (*request)(void *device_data, unsigned int count); }; extern struct iommu_group *vfio_iommu_group_get(struct device *dev); extern void vfio_iommu_group_put(struct iommu_group *group, struct device *dev); extern int vfio_add_group_dev(struct device *dev, const struct vfio_device_ops *ops, void *device_data); extern void *vfio_del_group_dev(struct device *dev); extern struct vfio_device *vfio_device_get_from_dev(struct device *dev); extern void vfio_device_put(struct vfio_device *device); extern void *vfio_device_data(struct vfio_device *device); /** * struct vfio_iommu_driver_ops - VFIO IOMMU driver callbacks */ struct vfio_iommu_driver_ops { char *name; struct module *owner; void *(*open)(unsigned long arg); void (*release)(void *iommu_data); ssize_t (*read)(void *iommu_data, char __user *buf, size_t count, loff_t *ppos); ssize_t (*write)(void *iommu_data, const char __user *buf, size_t count, loff_t *size); long (*ioctl)(void *iommu_data, unsigned int cmd, unsigned long arg); int (*mmap)(void *iommu_data, struct vm_area_struct *vma); int (*attach_group)(void *iommu_data, struct iommu_group *group); void (*detach_group)(void *iommu_data, struct iommu_group *group); int (*pin_pages)(void *iommu_data, unsigned long *user_pfn, int npage, int prot, unsigned long *phys_pfn); int (*unpin_pages)(void *iommu_data, unsigned long *user_pfn, int npage); int (*register_notifier)(void *iommu_data, unsigned long *events, struct notifier_block *nb); int (*unregister_notifier)(void *iommu_data, struct notifier_block *nb); }; extern int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops); extern void vfio_unregister_iommu_driver( const struct vfio_iommu_driver_ops *ops); /* * External user API */ extern struct vfio_group *vfio_group_get_external_user(struct file *filep); extern void vfio_group_put_external_user(struct vfio_group *group); extern bool vfio_external_group_match_file(struct vfio_group *group, struct file *filep); extern int vfio_external_user_iommu_id(struct vfio_group *group); extern long vfio_external_check_extension(struct vfio_group *group, unsigned long arg); #define VFIO_PIN_PAGES_MAX_ENTRIES (PAGE_SIZE/sizeof(unsigned long)) extern int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage, int prot, unsigned long *phys_pfn); extern int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn, int npage); /* each type has independent events */ enum vfio_notify_type { VFIO_IOMMU_NOTIFY = 0, VFIO_GROUP_NOTIFY = 1, }; /* events for VFIO_IOMMU_NOTIFY */ #define VFIO_IOMMU_NOTIFY_DMA_UNMAP BIT(0) /* events for VFIO_GROUP_NOTIFY */ #define VFIO_GROUP_NOTIFY_SET_KVM BIT(0) extern int vfio_register_notifier(struct device *dev, enum vfio_notify_type type, unsigned long *required_events, struct notifier_block *nb); extern int vfio_unregister_notifier(struct device *dev, enum vfio_notify_type type, struct notifier_block *nb); struct kvm; extern void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm); /* * Sub-module helpers */ struct vfio_info_cap { struct vfio_info_cap_header *buf; size_t size; }; extern struct vfio_info_cap_header *vfio_info_cap_add( struct vfio_info_cap *caps, size_t size, u16 id, u16 version); extern void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset); extern int vfio_info_add_capability(struct vfio_info_cap *caps, struct vfio_info_cap_header *cap, size_t size); extern int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, int num_irqs, int max_irq_type, size_t *data_size); struct pci_dev; #if IS_ENABLED(CONFIG_VFIO_SPAPR_EEH) extern void vfio_spapr_pci_eeh_open(struct pci_dev *pdev); extern void vfio_spapr_pci_eeh_release(struct pci_dev *pdev); extern long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group, unsigned int cmd, unsigned long arg); #else static inline void vfio_spapr_pci_eeh_open(struct pci_dev *pdev) { } static inline void vfio_spapr_pci_eeh_release(struct pci_dev *pdev) { } static inline long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group, unsigned int cmd, unsigned long arg) { return -ENOTTY; } #endif /* CONFIG_VFIO_SPAPR_EEH */ /* * IRQfd - generic */ struct virqfd { void *opaque; struct eventfd_ctx *eventfd; int (*handler)(void *, void *); void (*thread)(void *, void *); void *data; struct work_struct inject; wait_queue_entry_t wait; poll_table pt; struct work_struct shutdown; struct work_struct flush_inject; struct virqfd **pvirqfd; }; extern int vfio_virqfd_enable(void *opaque, int (*handler)(void *, void *), void (*thread)(void *, void *), void *data, struct virqfd **pvirqfd, int fd); extern void vfio_virqfd_disable(struct virqfd **pvirqfd); void vfio_virqfd_flush_thread(struct virqfd **pvirqfd); #endif /* VFIO_H */ ioport.h 0000644 00000026647 14722070374 0006256 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * ioport.h Definitions of routines for detecting, reserving and * allocating system resources. * * Authors: Linus Torvalds */ #ifndef _LINUX_IOPORT_H #define _LINUX_IOPORT_H #ifndef __ASSEMBLY__ #include <linux/compiler.h> #include <linux/types.h> #include <linux/bits.h> /* * Resources are tree-like, allowing * nesting etc.. */ struct resource { resource_size_t start; resource_size_t end; const char *name; unsigned long flags; unsigned long desc; struct resource *parent, *sibling, *child; }; /* * IO resources have these defined flags. * * PCI devices expose these flags to userspace in the "resource" sysfs file, * so don't move them. */ #define IORESOURCE_BITS 0x000000ff /* Bus-specific bits */ #define IORESOURCE_TYPE_BITS 0x00001f00 /* Resource type */ #define IORESOURCE_IO 0x00000100 /* PCI/ISA I/O ports */ #define IORESOURCE_MEM 0x00000200 #define IORESOURCE_REG 0x00000300 /* Register offsets */ #define IORESOURCE_IRQ 0x00000400 #define IORESOURCE_DMA 0x00000800 #define IORESOURCE_BUS 0x00001000 #define IORESOURCE_PREFETCH 0x00002000 /* No side effects */ #define IORESOURCE_READONLY 0x00004000 #define IORESOURCE_CACHEABLE 0x00008000 #define IORESOURCE_RANGELENGTH 0x00010000 #define IORESOURCE_SHADOWABLE 0x00020000 #define IORESOURCE_SIZEALIGN 0x00040000 /* size indicates alignment */ #define IORESOURCE_STARTALIGN 0x00080000 /* start field is alignment */ #define IORESOURCE_MEM_64 0x00100000 #define IORESOURCE_WINDOW 0x00200000 /* forwarded by bridge */ #define IORESOURCE_MUXED 0x00400000 /* Resource is software muxed */ #define IORESOURCE_EXT_TYPE_BITS 0x01000000 /* Resource extended types */ #define IORESOURCE_SYSRAM 0x01000000 /* System RAM (modifier) */ #define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */ #define IORESOURCE_DISABLED 0x10000000 #define IORESOURCE_UNSET 0x20000000 /* No address assigned yet */ #define IORESOURCE_AUTO 0x40000000 #define IORESOURCE_BUSY 0x80000000 /* Driver has marked this resource busy */ /* I/O resource extended types */ #define IORESOURCE_SYSTEM_RAM (IORESOURCE_MEM|IORESOURCE_SYSRAM) /* PnP IRQ specific bits (IORESOURCE_BITS) */ #define IORESOURCE_IRQ_HIGHEDGE (1<<0) #define IORESOURCE_IRQ_LOWEDGE (1<<1) #define IORESOURCE_IRQ_HIGHLEVEL (1<<2) #define IORESOURCE_IRQ_LOWLEVEL (1<<3) #define IORESOURCE_IRQ_SHAREABLE (1<<4) #define IORESOURCE_IRQ_OPTIONAL (1<<5) /* PnP DMA specific bits (IORESOURCE_BITS) */ #define IORESOURCE_DMA_TYPE_MASK (3<<0) #define IORESOURCE_DMA_8BIT (0<<0) #define IORESOURCE_DMA_8AND16BIT (1<<0) #define IORESOURCE_DMA_16BIT (2<<0) #define IORESOURCE_DMA_MASTER (1<<2) #define IORESOURCE_DMA_BYTE (1<<3) #define IORESOURCE_DMA_WORD (1<<4) #define IORESOURCE_DMA_SPEED_MASK (3<<6) #define IORESOURCE_DMA_COMPATIBLE (0<<6) #define IORESOURCE_DMA_TYPEA (1<<6) #define IORESOURCE_DMA_TYPEB (2<<6) #define IORESOURCE_DMA_TYPEF (3<<6) /* PnP memory I/O specific bits (IORESOURCE_BITS) */ #define IORESOURCE_MEM_WRITEABLE (1<<0) /* dup: IORESOURCE_READONLY */ #define IORESOURCE_MEM_CACHEABLE (1<<1) /* dup: IORESOURCE_CACHEABLE */ #define IORESOURCE_MEM_RANGELENGTH (1<<2) /* dup: IORESOURCE_RANGELENGTH */ #define IORESOURCE_MEM_TYPE_MASK (3<<3) #define IORESOURCE_MEM_8BIT (0<<3) #define IORESOURCE_MEM_16BIT (1<<3) #define IORESOURCE_MEM_8AND16BIT (2<<3) #define IORESOURCE_MEM_32BIT (3<<3) #define IORESOURCE_MEM_SHADOWABLE (1<<5) /* dup: IORESOURCE_SHADOWABLE */ #define IORESOURCE_MEM_EXPANSIONROM (1<<6) /* PnP I/O specific bits (IORESOURCE_BITS) */ #define IORESOURCE_IO_16BIT_ADDR (1<<0) #define IORESOURCE_IO_FIXED (1<<1) #define IORESOURCE_IO_SPARSE (1<<2) /* PCI ROM control bits (IORESOURCE_BITS) */ #define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */ #define IORESOURCE_ROM_SHADOW (1<<1) /* Use RAM image, not ROM BAR */ /* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */ #define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */ #define IORESOURCE_PCI_EA_BEI (1<<5) /* BAR Equivalent Indicator */ /* * I/O Resource Descriptors * * Descriptors are used by walk_iomem_res_desc() and region_intersects() * for searching a specific resource range in the iomem table. Assign * a new descriptor when a resource range supports the search interfaces. * Otherwise, resource.desc must be set to IORES_DESC_NONE (0). */ enum { IORES_DESC_NONE = 0, IORES_DESC_CRASH_KERNEL = 1, IORES_DESC_ACPI_TABLES = 2, IORES_DESC_ACPI_NV_STORAGE = 3, IORES_DESC_PERSISTENT_MEMORY = 4, IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5, IORES_DESC_DEVICE_PRIVATE_MEMORY = 6, IORES_DESC_RESERVED = 7, }; /* * Flags controlling ioremap() behavior. */ enum { IORES_MAP_SYSTEM_RAM = BIT(0), IORES_MAP_ENCRYPTED = BIT(1), }; /* helpers to define resources */ #define DEFINE_RES_NAMED(_start, _size, _name, _flags) \ { \ .start = (_start), \ .end = (_start) + (_size) - 1, \ .name = (_name), \ .flags = (_flags), \ .desc = IORES_DESC_NONE, \ } #define DEFINE_RES_IO_NAMED(_start, _size, _name) \ DEFINE_RES_NAMED((_start), (_size), (_name), IORESOURCE_IO) #define DEFINE_RES_IO(_start, _size) \ DEFINE_RES_IO_NAMED((_start), (_size), NULL) #define DEFINE_RES_MEM_NAMED(_start, _size, _name) \ DEFINE_RES_NAMED((_start), (_size), (_name), IORESOURCE_MEM) #define DEFINE_RES_MEM(_start, _size) \ DEFINE_RES_MEM_NAMED((_start), (_size), NULL) #define DEFINE_RES_IRQ_NAMED(_irq, _name) \ DEFINE_RES_NAMED((_irq), 1, (_name), IORESOURCE_IRQ) #define DEFINE_RES_IRQ(_irq) \ DEFINE_RES_IRQ_NAMED((_irq), NULL) #define DEFINE_RES_DMA_NAMED(_dma, _name) \ DEFINE_RES_NAMED((_dma), 1, (_name), IORESOURCE_DMA) #define DEFINE_RES_DMA(_dma) \ DEFINE_RES_DMA_NAMED((_dma), NULL) /* PC/ISA/whatever - the normal PC address spaces: IO and memory */ extern struct resource ioport_resource; extern struct resource iomem_resource; extern struct resource *request_resource_conflict(struct resource *root, struct resource *new); extern int request_resource(struct resource *root, struct resource *new); extern int release_resource(struct resource *new); void release_child_resources(struct resource *new); extern void reserve_region_with_split(struct resource *root, resource_size_t start, resource_size_t end, const char *name); extern struct resource *insert_resource_conflict(struct resource *parent, struct resource *new); extern int insert_resource(struct resource *parent, struct resource *new); extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new); extern int remove_resource(struct resource *old); extern void arch_remove_reservations(struct resource *avail); extern int allocate_resource(struct resource *root, struct resource *new, resource_size_t size, resource_size_t min, resource_size_t max, resource_size_t align, resource_size_t (*alignf)(void *, const struct resource *, resource_size_t, resource_size_t), void *alignf_data); struct resource *lookup_resource(struct resource *root, resource_size_t start); int adjust_resource(struct resource *res, resource_size_t start, resource_size_t size); resource_size_t resource_alignment(struct resource *res); static inline resource_size_t resource_size(const struct resource *res) { return res->end - res->start + 1; } static inline unsigned long resource_type(const struct resource *res) { return res->flags & IORESOURCE_TYPE_BITS; } static inline unsigned long resource_ext_type(const struct resource *res) { return res->flags & IORESOURCE_EXT_TYPE_BITS; } /* True iff r1 completely contains r2 */ static inline bool resource_contains(struct resource *r1, struct resource *r2) { if (resource_type(r1) != resource_type(r2)) return false; if (r1->flags & IORESOURCE_UNSET || r2->flags & IORESOURCE_UNSET) return false; return r1->start <= r2->start && r1->end >= r2->end; } /* Convenience shorthand with allocation */ #define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), 0) #define request_muxed_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), IORESOURCE_MUXED) #define __request_mem_region(start,n,name, excl) __request_region(&iomem_resource, (start), (n), (name), excl) #define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name), 0) #define request_mem_region_exclusive(start,n,name) \ __request_region(&iomem_resource, (start), (n), (name), IORESOURCE_EXCLUSIVE) #define rename_region(region, newname) do { (region)->name = (newname); } while (0) extern struct resource * __request_region(struct resource *, resource_size_t start, resource_size_t n, const char *name, int flags); /* Compatibility cruft */ #define release_region(start,n) __release_region(&ioport_resource, (start), (n)) #define release_mem_region(start,n) __release_region(&iomem_resource, (start), (n)) extern void __release_region(struct resource *, resource_size_t, resource_size_t); #ifdef CONFIG_MEMORY_HOTREMOVE extern int release_mem_region_adjustable(struct resource *, resource_size_t, resource_size_t); #endif /* Wrappers for managed devices */ struct device; extern int devm_request_resource(struct device *dev, struct resource *root, struct resource *new); extern void devm_release_resource(struct device *dev, struct resource *new); #define devm_request_region(dev,start,n,name) \ __devm_request_region(dev, &ioport_resource, (start), (n), (name)) #define devm_request_mem_region(dev,start,n,name) \ __devm_request_region(dev, &iomem_resource, (start), (n), (name)) extern struct resource * __devm_request_region(struct device *dev, struct resource *parent, resource_size_t start, resource_size_t n, const char *name); #define devm_release_region(dev, start, n) \ __devm_release_region(dev, &ioport_resource, (start), (n)) #define devm_release_mem_region(dev, start, n) \ __devm_release_region(dev, &iomem_resource, (start), (n)) extern void __devm_release_region(struct device *dev, struct resource *parent, resource_size_t start, resource_size_t n); extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size); extern bool iomem_is_exclusive(u64 addr); extern int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, void *arg, int (*func)(unsigned long, unsigned long, void *)); extern int walk_mem_res(u64 start, u64 end, void *arg, int (*func)(struct resource *, void *)); extern int walk_system_ram_res(u64 start, u64 end, void *arg, int (*func)(struct resource *, void *)); extern int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, u64 end, void *arg, int (*func)(struct resource *, void *)); /* True if any part of r1 overlaps r2 */ static inline bool resource_overlaps(struct resource *r1, struct resource *r2) { return (r1->start <= r2->end && r1->end >= r2->start); } struct resource *devm_request_free_mem_region(struct device *dev, struct resource *base, unsigned long size); struct resource *request_free_mem_region(struct resource *base, unsigned long size, const char *name); static inline void irqresource_disabled(struct resource *res, u32 irq) { res->start = irq; res->end = irq; res->flags = IORESOURCE_IRQ | IORESOURCE_DISABLED | IORESOURCE_UNSET; } #ifdef CONFIG_IO_STRICT_DEVMEM void revoke_devmem(struct resource *res); #else static inline void revoke_devmem(struct resource *res) { }; #endif #endif /* __ASSEMBLY__ */ #endif /* _LINUX_IOPORT_H */ edd.h 0000644 00000002065 14722070374 0005462 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/include/linux/edd.h * Copyright (C) 2002, 2003, 2004 Dell Inc. * by Matt Domsch <Matt_Domsch@dell.com> * * structures and definitions for the int 13h, ax={41,48}h * BIOS Enhanced Disk Drive Services * This is based on the T13 group document D1572 Revision 0 (August 14 2002) * available at http://www.t13.org/docs2002/d1572r0.pdf. It is * very similar to D1484 Revision 3 http://www.t13.org/docs2002/d1484r3.pdf * * In a nutshell, arch/{i386,x86_64}/boot/setup.S populates a scratch * table in the boot_params that contains a list of BIOS-enumerated * boot devices. * In arch/{i386,x86_64}/kernel/setup.c, this information is * transferred into the edd structure, and in drivers/firmware/edd.c, that * information is used to identify BIOS boot disk. The code in setup.S * is very sensitive to the size of these structures. */ #ifndef _LINUX_EDD_H #define _LINUX_EDD_H #include <uapi/linux/edd.h> #ifndef __ASSEMBLY__ extern struct edd edd; #endif /*!__ASSEMBLY__ */ #endif /* _LINUX_EDD_H */ zbud.h 0000644 00000001344 14722070374 0005671 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ZBUD_H_ #define _ZBUD_H_ #include <linux/types.h> struct zbud_pool; struct zbud_ops { int (*evict)(struct zbud_pool *pool, unsigned long handle); }; struct zbud_pool *zbud_create_pool(gfp_t gfp, const struct zbud_ops *ops); void zbud_destroy_pool(struct zbud_pool *pool); int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp, unsigned long *handle); void zbud_free(struct zbud_pool *pool, unsigned long handle); int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries); void *zbud_map(struct zbud_pool *pool, unsigned long handle); void zbud_unmap(struct zbud_pool *pool, unsigned long handle); u64 zbud_get_pool_size(struct zbud_pool *pool); #endif /* _ZBUD_H_ */ nubus.h 0000644 00000013052 14722070374 0006060 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* nubus.h: various definitions and prototypes for NuBus drivers to use. Originally written by Alan Cox. Hacked to death by C. Scott Ananian and David Huggins-Daines. */ #ifndef LINUX_NUBUS_H #define LINUX_NUBUS_H #include <linux/device.h> #include <asm/nubus.h> #include <uapi/linux/nubus.h> struct proc_dir_entry; struct seq_file; struct nubus_dir { unsigned char *base; unsigned char *ptr; int done; int mask; struct proc_dir_entry *procdir; }; struct nubus_dirent { unsigned char *base; unsigned char type; __u32 data; /* Actually 24 bits used */ int mask; }; struct nubus_board { struct device dev; /* Only 9-E actually exist, though 0-8 are also theoretically possible, and 0 is a special case which represents the motherboard and onboard peripherals (Ethernet, video) */ int slot; /* For slot 0, this is bogus. */ char name[64]; /* Format block */ unsigned char *fblock; /* Root directory (does *not* always equal fblock + doffset!) */ unsigned char *directory; unsigned long slot_addr; /* Offset to root directory (sometimes) */ unsigned long doffset; /* Length over which to compute the crc */ unsigned long rom_length; /* Completely useless most of the time */ unsigned long crc; unsigned char rev; unsigned char format; unsigned char lanes; /* Directory entry in /proc/bus/nubus */ struct proc_dir_entry *procdir; }; struct nubus_rsrc { struct list_head list; /* The functional resource ID */ unsigned char resid; /* These are mostly here for convenience; we could always read them from the ROMs if we wanted to */ unsigned short category; unsigned short type; unsigned short dr_sw; unsigned short dr_hw; /* Functional directory */ unsigned char *directory; /* Much of our info comes from here */ struct nubus_board *board; }; /* This is all NuBus functional resources (used to find devices later on) */ extern struct list_head nubus_func_rsrcs; struct nubus_driver { struct device_driver driver; int (*probe)(struct nubus_board *board); int (*remove)(struct nubus_board *board); }; extern struct bus_type nubus_bus_type; /* Generic NuBus interface functions, modelled after the PCI interface */ #ifdef CONFIG_PROC_FS void nubus_proc_init(void); struct proc_dir_entry *nubus_proc_add_board(struct nubus_board *board); struct proc_dir_entry *nubus_proc_add_rsrc_dir(struct proc_dir_entry *procdir, const struct nubus_dirent *ent, struct nubus_board *board); void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir, const struct nubus_dirent *ent, unsigned int size); void nubus_proc_add_rsrc(struct proc_dir_entry *procdir, const struct nubus_dirent *ent); #else static inline void nubus_proc_init(void) {} static inline struct proc_dir_entry *nubus_proc_add_board(struct nubus_board *board) { return NULL; } static inline struct proc_dir_entry *nubus_proc_add_rsrc_dir(struct proc_dir_entry *procdir, const struct nubus_dirent *ent, struct nubus_board *board) { return NULL; } static inline void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir, const struct nubus_dirent *ent, unsigned int size) {} static inline void nubus_proc_add_rsrc(struct proc_dir_entry *procdir, const struct nubus_dirent *ent) {} #endif struct nubus_rsrc *nubus_first_rsrc_or_null(void); struct nubus_rsrc *nubus_next_rsrc_or_null(struct nubus_rsrc *from); #define for_each_func_rsrc(f) \ for (f = nubus_first_rsrc_or_null(); f; f = nubus_next_rsrc_or_null(f)) #define for_each_board_func_rsrc(b, f) \ for_each_func_rsrc(f) if (f->board != b) {} else /* These are somewhat more NuBus-specific. They all return 0 for success and -1 for failure, as you'd expect. */ /* The root directory which contains the board and functional directories */ int nubus_get_root_dir(const struct nubus_board *board, struct nubus_dir *dir); /* The board directory */ int nubus_get_board_dir(const struct nubus_board *board, struct nubus_dir *dir); /* The functional directory */ int nubus_get_func_dir(const struct nubus_rsrc *fres, struct nubus_dir *dir); /* These work on any directory gotten via the above */ int nubus_readdir(struct nubus_dir *dir, struct nubus_dirent *ent); int nubus_find_rsrc(struct nubus_dir *dir, unsigned char rsrc_type, struct nubus_dirent *ent); int nubus_rewinddir(struct nubus_dir *dir); /* Things to do with directory entries */ int nubus_get_subdir(const struct nubus_dirent *ent, struct nubus_dir *dir); void nubus_get_rsrc_mem(void *dest, const struct nubus_dirent *dirent, unsigned int len); unsigned int nubus_get_rsrc_str(char *dest, const struct nubus_dirent *dirent, unsigned int len); void nubus_seq_write_rsrc_mem(struct seq_file *m, const struct nubus_dirent *dirent, unsigned int len); unsigned char *nubus_dirptr(const struct nubus_dirent *nd); /* Declarations relating to driver model objects */ int nubus_parent_device_register(void); int nubus_device_register(struct nubus_board *board); int nubus_driver_register(struct nubus_driver *ndrv); void nubus_driver_unregister(struct nubus_driver *ndrv); int nubus_proc_show(struct seq_file *m, void *data); static inline void nubus_set_drvdata(struct nubus_board *board, void *data) { dev_set_drvdata(&board->dev, data); } static inline void *nubus_get_drvdata(struct nubus_board *board) { return dev_get_drvdata(&board->dev); } /* Returns a pointer to the "standard" slot space. */ static inline void *nubus_slot_addr(int slot) { return (void *)(0xF0000000 | (slot << 24)); } #endif /* LINUX_NUBUS_H */ atomic.h 0000644 00000004320 14722070374 0006176 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* Atomic operations usable in machine independent code */ #ifndef _LINUX_ATOMIC_H #define _LINUX_ATOMIC_H #include <linux/types.h> #include <asm/atomic.h> #include <asm/barrier.h> /* * Relaxed variants of xchg, cmpxchg and some atomic operations. * * We support four variants: * * - Fully ordered: The default implementation, no suffix required. * - Acquire: Provides ACQUIRE semantics, _acquire suffix. * - Release: Provides RELEASE semantics, _release suffix. * - Relaxed: No ordering guarantees, _relaxed suffix. * * For compound atomics performing both a load and a store, ACQUIRE * semantics apply only to the load and RELEASE semantics only to the * store portion of the operation. Note that a failed cmpxchg_acquire * does -not- imply any memory ordering constraints. * * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions. */ /* * The idea here is to build acquire/release variants by adding explicit * barriers on top of the relaxed variant. In the case where the relaxed * variant is already fully ordered, no additional barriers are needed. * * If an architecture overrides __atomic_acquire_fence() it will probably * want to define smp_mb__after_spinlock(). */ #ifndef __atomic_acquire_fence #define __atomic_acquire_fence smp_mb__after_atomic #endif #ifndef __atomic_release_fence #define __atomic_release_fence smp_mb__before_atomic #endif #ifndef __atomic_pre_full_fence #define __atomic_pre_full_fence smp_mb__before_atomic #endif #ifndef __atomic_post_full_fence #define __atomic_post_full_fence smp_mb__after_atomic #endif #define __atomic_op_acquire(op, args...) \ ({ \ typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \ __atomic_acquire_fence(); \ __ret; \ }) #define __atomic_op_release(op, args...) \ ({ \ __atomic_release_fence(); \ op##_relaxed(args); \ }) #define __atomic_op_fence(op, args...) \ ({ \ typeof(op##_relaxed(args)) __ret; \ __atomic_pre_full_fence(); \ __ret = op##_relaxed(args); \ __atomic_post_full_fence(); \ __ret; \ }) #include <linux/atomic-fallback.h> #include <asm-generic/atomic-long.h> #endif /* _LINUX_ATOMIC_H */ interrupt.h 0000644 00000055236 14722070374 0006772 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* interrupt.h */ #ifndef _LINUX_INTERRUPT_H #define _LINUX_INTERRUPT_H #include <linux/kernel.h> #include <linux/bitops.h> #include <linux/cpumask.h> #include <linux/irqreturn.h> #include <linux/irqnr.h> #include <linux/hardirq.h> #include <linux/irqflags.h> #include <linux/hrtimer.h> #include <linux/kref.h> #include <linux/workqueue.h> #include <linux/atomic.h> #include <asm/ptrace.h> #include <asm/irq.h> #include <asm/sections.h> /* * These correspond to the IORESOURCE_IRQ_* defines in * linux/ioport.h to select the interrupt line behaviour. When * requesting an interrupt without specifying a IRQF_TRIGGER, the * setting should be assumed to be "as already configured", which * may be as per machine or firmware initialisation. */ #define IRQF_TRIGGER_NONE 0x00000000 #define IRQF_TRIGGER_RISING 0x00000001 #define IRQF_TRIGGER_FALLING 0x00000002 #define IRQF_TRIGGER_HIGH 0x00000004 #define IRQF_TRIGGER_LOW 0x00000008 #define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING) #define IRQF_TRIGGER_PROBE 0x00000010 /* * These flags used only by the kernel as part of the * irq handling routines. * * IRQF_SHARED - allow sharing the irq among several devices * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur * IRQF_TIMER - Flag to mark this interrupt as timer interrupt * IRQF_PERCPU - Interrupt is per cpu * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is * registered first in a shared interrupt is considered for * performance reasons) * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished. * Used by threaded interrupts which need to keep the * irq line disabled until the threaded handler has been run. * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee * that this interrupt will wake the system from a suspended * state. See Documentation/power/suspend-and-interrupts.rst * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set * IRQF_NO_THREAD - Interrupt cannot be threaded * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device * resume time. * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this * interrupt handler after suspending interrupts. For system * wakeup devices users need to implement wakeup detection in * their interrupt handlers. * IRQF_NO_AUTOEN - Don't enable IRQ or NMI automatically when users request it. * Users will enable it explicitly by enable_irq() or enable_nmi() * later. */ #define IRQF_SHARED 0x00000080 #define IRQF_PROBE_SHARED 0x00000100 #define __IRQF_TIMER 0x00000200 #define IRQF_PERCPU 0x00000400 #define IRQF_NOBALANCING 0x00000800 #define IRQF_IRQPOLL 0x00001000 #define IRQF_ONESHOT 0x00002000 #define IRQF_NO_SUSPEND 0x00004000 #define IRQF_FORCE_RESUME 0x00008000 #define IRQF_NO_THREAD 0x00010000 #define IRQF_EARLY_RESUME 0x00020000 #define IRQF_COND_SUSPEND 0x00040000 #define IRQF_NO_AUTOEN 0x00080000 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) /* * These values can be returned by request_any_context_irq() and * describe the context the interrupt will be run in. * * IRQC_IS_HARDIRQ - interrupt runs in hardirq context * IRQC_IS_NESTED - interrupt runs in a nested threaded context */ enum { IRQC_IS_HARDIRQ = 0, IRQC_IS_NESTED, }; typedef irqreturn_t (*irq_handler_t)(int, void *); /** * struct irqaction - per interrupt action descriptor * @handler: interrupt handler function * @name: name of the device * @dev_id: cookie to identify the device * @percpu_dev_id: cookie to identify the device * @next: pointer to the next irqaction for shared interrupts * @irq: interrupt number * @flags: flags (see IRQF_* above) * @thread_fn: interrupt handler function for threaded interrupts * @thread: thread pointer for threaded interrupts * @secondary: pointer to secondary irqaction (force threading) * @thread_flags: flags related to @thread * @thread_mask: bitmask for keeping track of @thread activity * @dir: pointer to the proc/irq/NN/name entry */ struct irqaction { irq_handler_t handler; void *dev_id; void __percpu *percpu_dev_id; struct irqaction *next; irq_handler_t thread_fn; struct task_struct *thread; struct irqaction *secondary; unsigned int irq; unsigned int flags; unsigned long thread_flags; unsigned long thread_mask; const char *name; struct proc_dir_entry *dir; } ____cacheline_internodealigned_in_smp; extern irqreturn_t no_action(int cpl, void *dev_id); /* * If a (PCI) device interrupt is not connected we set dev->irq to * IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we * can distingiush that case from other error returns. * * 0x80000000 is guaranteed to be outside the available range of interrupts * and easy to distinguish from other possible incorrect values. */ #define IRQ_NOTCONNECTED (1U << 31) extern int __must_check request_threaded_irq(unsigned int irq, irq_handler_t handler, irq_handler_t thread_fn, unsigned long flags, const char *name, void *dev); static inline int __must_check request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, const char *name, void *dev) { return request_threaded_irq(irq, handler, NULL, flags, name, dev); } extern int __must_check request_any_context_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, const char *name, void *dev_id); extern int __must_check __request_percpu_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, const char *devname, void __percpu *percpu_dev_id); extern int __must_check request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags, const char *name, void *dev); static inline int __must_check request_percpu_irq(unsigned int irq, irq_handler_t handler, const char *devname, void __percpu *percpu_dev_id) { return __request_percpu_irq(irq, handler, 0, devname, percpu_dev_id); } extern int __must_check request_percpu_nmi(unsigned int irq, irq_handler_t handler, const char *devname, void __percpu *dev); extern const void *free_irq(unsigned int, void *); extern void free_percpu_irq(unsigned int, void __percpu *); extern const void *free_nmi(unsigned int irq, void *dev_id); extern void free_percpu_nmi(unsigned int irq, void __percpu *percpu_dev_id); struct device; extern int __must_check devm_request_threaded_irq(struct device *dev, unsigned int irq, irq_handler_t handler, irq_handler_t thread_fn, unsigned long irqflags, const char *devname, void *dev_id); static inline int __must_check devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags, devname, dev_id); } extern int __must_check devm_request_any_context_irq(struct device *dev, unsigned int irq, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); /* * On lockdep we dont want to enable hardirqs in hardirq * context. Use local_irq_enable_in_hardirq() to annotate * kernel code that has to do this nevertheless (pretty much * the only valid case is for old/broken hardware that is * insanely slow). * * NOTE: in theory this might break fragile code that relies * on hardirq delivery - in practice we dont seem to have such * places left. So the only effect should be slightly increased * irqs-off latencies. */ #ifdef CONFIG_LOCKDEP # define local_irq_enable_in_hardirq() do { } while (0) #else # define local_irq_enable_in_hardirq() local_irq_enable() #endif extern void disable_irq_nosync(unsigned int irq); extern bool disable_hardirq(unsigned int irq); extern void disable_irq(unsigned int irq); extern void disable_percpu_irq(unsigned int irq); extern void enable_irq(unsigned int irq); extern void enable_percpu_irq(unsigned int irq, unsigned int type); extern bool irq_percpu_is_enabled(unsigned int irq); extern void irq_wake_thread(unsigned int irq, void *dev_id); extern void disable_nmi_nosync(unsigned int irq); extern void disable_percpu_nmi(unsigned int irq); extern void enable_nmi(unsigned int irq); extern void enable_percpu_nmi(unsigned int irq, unsigned int type); extern int prepare_percpu_nmi(unsigned int irq); extern void teardown_percpu_nmi(unsigned int irq); /* The following three functions are for the core kernel use only. */ extern void suspend_device_irqs(void); extern void resume_device_irqs(void); extern void rearm_wake_irq(unsigned int irq); /** * struct irq_affinity_notify - context for notification of IRQ affinity changes * @irq: Interrupt to which notification applies * @kref: Reference count, for internal use * @work: Work item, for internal use * @notify: Function to be called on change. This will be * called in process context. * @release: Function to be called on release. This will be * called in process context. Once registered, the * structure must only be freed when this function is * called or later. */ struct irq_affinity_notify { unsigned int irq; struct kref kref; struct work_struct work; void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); void (*release)(struct kref *ref); }; #define IRQ_AFFINITY_MAX_SETS 4 /** * struct irq_affinity - Description for automatic irq affinity assignements * @pre_vectors: Don't apply affinity to @pre_vectors at beginning of * the MSI(-X) vector space * @post_vectors: Don't apply affinity to @post_vectors at end of * the MSI(-X) vector space * @nr_sets: The number of interrupt sets for which affinity * spreading is required * @set_size: Array holding the size of each interrupt set * @calc_sets: Callback for calculating the number and size * of interrupt sets * @priv: Private data for usage by @calc_sets, usually a * pointer to driver/device specific data. */ struct irq_affinity { unsigned int pre_vectors; unsigned int post_vectors; unsigned int nr_sets; unsigned int set_size[IRQ_AFFINITY_MAX_SETS]; void (*calc_sets)(struct irq_affinity *, unsigned int nvecs); void *priv; }; /** * struct irq_affinity_desc - Interrupt affinity descriptor * @mask: cpumask to hold the affinity assignment * @is_managed: 1 if the interrupt is managed internally */ struct irq_affinity_desc { struct cpumask mask; unsigned int is_managed : 1; }; #if defined(CONFIG_SMP) extern cpumask_var_t irq_default_affinity; /* Internal implementation. Use the helpers below */ extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask, bool force); /** * irq_set_affinity - Set the irq affinity of a given irq * @irq: Interrupt to set affinity * @cpumask: cpumask * * Fails if cpumask does not contain an online CPU */ static inline int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) { return __irq_set_affinity(irq, cpumask, false); } /** * irq_force_affinity - Force the irq affinity of a given irq * @irq: Interrupt to set affinity * @cpumask: cpumask * * Same as irq_set_affinity, but without checking the mask against * online cpus. * * Solely for low level cpu hotplug code, where we need to make per * cpu interrupts affine before the cpu becomes online. */ static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask) { return __irq_set_affinity(irq, cpumask, true); } extern int irq_can_set_affinity(unsigned int irq); extern int irq_select_affinity(unsigned int irq); extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); extern int irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); struct irq_affinity_desc * irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd); unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec, const struct irq_affinity *affd); #else /* CONFIG_SMP */ static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) { return -EINVAL; } static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask) { return 0; } static inline int irq_can_set_affinity(unsigned int irq) { return 0; } static inline int irq_select_affinity(unsigned int irq) { return 0; } static inline int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) { return -EINVAL; } static inline int irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) { return 0; } static inline struct irq_affinity_desc * irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd) { return NULL; } static inline unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec, const struct irq_affinity *affd) { return maxvec; } #endif /* CONFIG_SMP */ /* * Special lockdep variants of irq disabling/enabling. * These should be used for locking constructs that * know that a particular irq context which is disabled, * and which is the only irq-context user of a lock, * that it's safe to take the lock in the irq-disabled * section without disabling hardirqs. * * On !CONFIG_LOCKDEP they are equivalent to the normal * irq disable/enable methods. */ static inline void disable_irq_nosync_lockdep(unsigned int irq) { disable_irq_nosync(irq); #ifdef CONFIG_LOCKDEP local_irq_disable(); #endif } static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags) { disable_irq_nosync(irq); #ifdef CONFIG_LOCKDEP local_irq_save(*flags); #endif } static inline void disable_irq_lockdep(unsigned int irq) { disable_irq(irq); #ifdef CONFIG_LOCKDEP local_irq_disable(); #endif } static inline void enable_irq_lockdep(unsigned int irq) { #ifdef CONFIG_LOCKDEP local_irq_enable(); #endif enable_irq(irq); } static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags) { #ifdef CONFIG_LOCKDEP local_irq_restore(*flags); #endif enable_irq(irq); } /* IRQ wakeup (PM) control: */ extern int irq_set_irq_wake(unsigned int irq, unsigned int on); static inline int enable_irq_wake(unsigned int irq) { return irq_set_irq_wake(irq, 1); } static inline int disable_irq_wake(unsigned int irq) { return irq_set_irq_wake(irq, 0); } /* * irq_get_irqchip_state/irq_set_irqchip_state specific flags */ enum irqchip_irq_state { IRQCHIP_STATE_PENDING, /* Is interrupt pending? */ IRQCHIP_STATE_ACTIVE, /* Is interrupt in progress? */ IRQCHIP_STATE_MASKED, /* Is interrupt masked? */ IRQCHIP_STATE_LINE_LEVEL, /* Is IRQ line high? */ }; extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which, bool *state); extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, bool state); #ifdef CONFIG_IRQ_FORCED_THREADING # ifdef CONFIG_PREEMPT_RT # define force_irqthreads (true) # else extern bool force_irqthreads; # endif #else #define force_irqthreads (0) #endif #ifndef local_softirq_pending #ifndef local_softirq_pending_ref #define local_softirq_pending_ref irq_stat.__softirq_pending #endif #define local_softirq_pending() (__this_cpu_read(local_softirq_pending_ref)) #define set_softirq_pending(x) (__this_cpu_write(local_softirq_pending_ref, (x))) #define or_softirq_pending(x) (__this_cpu_or(local_softirq_pending_ref, (x))) #endif /* local_softirq_pending */ /* Some architectures might implement lazy enabling/disabling of * interrupts. In some cases, such as stop_machine, we might want * to ensure that after a local_irq_disable(), interrupts have * really been disabled in hardware. Such architectures need to * implement the following hook. */ #ifndef hard_irq_disable #define hard_irq_disable() do { } while(0) #endif /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high frequency threaded job scheduling. For almost all the purposes tasklets are more than enough. F.e. all serial device BHs et al. should be converted to tasklets, not to softirqs. */ enum { HI_SOFTIRQ=0, TIMER_SOFTIRQ, NET_TX_SOFTIRQ, NET_RX_SOFTIRQ, BLOCK_SOFTIRQ, IRQ_POLL_SOFTIRQ, TASKLET_SOFTIRQ, SCHED_SOFTIRQ, HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the numbering. Sigh! */ RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ NR_SOFTIRQS }; #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ)) /* map softirq index to softirq name. update 'softirq_to_name' in * kernel/softirq.c when adding a new softirq. */ extern const char * const softirq_to_name[NR_SOFTIRQS]; /* softirq mask and active fields moved to irq_cpustat_t in * asm/hardirq.h to get better cache usage. KAO */ struct softirq_action { void (*action)(struct softirq_action *); }; asmlinkage void do_softirq(void); asmlinkage void __do_softirq(void); #ifdef __ARCH_HAS_DO_SOFTIRQ void do_softirq_own_stack(void); #else static inline void do_softirq_own_stack(void) { __do_softirq(); } #endif extern void open_softirq(int nr, void (*action)(struct softirq_action *)); extern void softirq_init(void); extern void __raise_softirq_irqoff(unsigned int nr); extern void raise_softirq_irqoff(unsigned int nr); extern void raise_softirq(unsigned int nr); DECLARE_PER_CPU(struct task_struct *, ksoftirqd); static inline struct task_struct *this_cpu_ksoftirqd(void) { return this_cpu_read(ksoftirqd); } /* Tasklets --- multithreaded analogue of BHs. Main feature differing them of generic softirqs: tasklet is running only on one CPU simultaneously. Main feature differing them of BHs: different tasklets may be run simultaneously on different CPUs. Properties: * If tasklet_schedule() is called, then tasklet is guaranteed to be executed on some cpu at least once after this. * If the tasklet is already scheduled, but its execution is still not started, it will be executed only once. * If this tasklet is already running on another CPU (or schedule is called from tasklet itself), it is rescheduled for later. * Tasklet is strictly serialized wrt itself, but not wrt another tasklets. If client needs some intertask synchronization, he makes it with spinlocks. */ struct tasklet_struct { struct tasklet_struct *next; unsigned long state; atomic_t count; void (*func)(unsigned long); unsigned long data; }; #define DECLARE_TASKLET_OLD(name, _func) \ struct tasklet_struct name = { \ .count = ATOMIC_INIT(0), \ .func = _func, \ } #define DECLARE_TASKLET_DISABLED_OLD(name, _func) \ struct tasklet_struct name = { \ .count = ATOMIC_INIT(1), \ .func = _func, \ } enum { TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ }; #ifdef CONFIG_SMP static inline int tasklet_trylock(struct tasklet_struct *t) { return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); } static inline void tasklet_unlock(struct tasklet_struct *t) { smp_mb__before_atomic(); clear_bit(TASKLET_STATE_RUN, &(t)->state); } static inline void tasklet_unlock_wait(struct tasklet_struct *t) { while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } } #else #define tasklet_trylock(t) 1 #define tasklet_unlock_wait(t) do { } while (0) #define tasklet_unlock(t) do { } while (0) #endif extern void __tasklet_schedule(struct tasklet_struct *t); static inline void tasklet_schedule(struct tasklet_struct *t) { if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) __tasklet_schedule(t); } extern void __tasklet_hi_schedule(struct tasklet_struct *t); static inline void tasklet_hi_schedule(struct tasklet_struct *t) { if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) __tasklet_hi_schedule(t); } static inline void tasklet_disable_nosync(struct tasklet_struct *t) { atomic_inc(&t->count); smp_mb__after_atomic(); } static inline void tasklet_disable(struct tasklet_struct *t) { tasklet_disable_nosync(t); tasklet_unlock_wait(t); smp_mb(); } static inline void tasklet_enable(struct tasklet_struct *t) { smp_mb__before_atomic(); atomic_dec(&t->count); } extern void tasklet_kill(struct tasklet_struct *t); extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); extern void tasklet_init(struct tasklet_struct *t, void (*func)(unsigned long), unsigned long data); /* * Autoprobing for irqs: * * probe_irq_on() and probe_irq_off() provide robust primitives * for accurate IRQ probing during kernel initialization. They are * reasonably simple to use, are not "fooled" by spurious interrupts, * and, unlike other attempts at IRQ probing, they do not get hung on * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards). * * For reasonably foolproof probing, use them as follows: * * 1. clear and/or mask the device's internal interrupt. * 2. sti(); * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs * 4. enable the device and cause it to trigger an interrupt. * 5. wait for the device to interrupt, using non-intrusive polling or a delay. * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple * 7. service the device to clear its pending interrupt. * 8. loop again if paranoia is required. * * probe_irq_on() returns a mask of allocated irq's. * * probe_irq_off() takes the mask as a parameter, * and returns the irq number which occurred, * or zero if none occurred, or a negative irq number * if more than one irq occurred. */ #if !defined(CONFIG_GENERIC_IRQ_PROBE) static inline unsigned long probe_irq_on(void) { return 0; } static inline int probe_irq_off(unsigned long val) { return 0; } static inline unsigned int probe_irq_mask(unsigned long val) { return 0; } #else extern unsigned long probe_irq_on(void); /* returns 0 on failure */ extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */ extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */ #endif #ifdef CONFIG_PROC_FS /* Initialize /proc/irq/ */ extern void init_irq_proc(void); #else static inline void init_irq_proc(void) { } #endif #ifdef CONFIG_IRQ_TIMINGS void irq_timings_enable(void); void irq_timings_disable(void); u64 irq_timings_next_event(u64 now); #endif struct seq_file; int show_interrupts(struct seq_file *p, void *v); int arch_show_interrupts(struct seq_file *p, int prec); extern int early_irq_init(void); extern int arch_probe_nr_irqs(void); extern int arch_early_irq_init(void); /* * We want to know which function is an entrypoint of a hardirq or a softirq. */ #define __irq_entry __attribute__((__section__(".irqentry.text"))) #define __softirq_entry \ __attribute__((__section__(".softirqentry.text"))) #endif printk.h 0000644 00000037662 14722070374 0006250 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __KERNEL_PRINTK__ #define __KERNEL_PRINTK__ #include <stdarg.h> #include <linux/init.h> #include <linux/kern_levels.h> #include <linux/linkage.h> #include <linux/cache.h> extern const char linux_banner[]; extern const char linux_proc_banner[]; #define PRINTK_MAX_SINGLE_HEADER_LEN 2 static inline int printk_get_level(const char *buffer) { if (buffer[0] == KERN_SOH_ASCII && buffer[1]) { switch (buffer[1]) { case '0' ... '7': case 'c': /* KERN_CONT */ return buffer[1]; } } return 0; } static inline const char *printk_skip_level(const char *buffer) { if (printk_get_level(buffer)) return buffer + 2; return buffer; } static inline const char *printk_skip_headers(const char *buffer) { while (printk_get_level(buffer)) buffer = printk_skip_level(buffer); return buffer; } #define CONSOLE_EXT_LOG_MAX 8192 /* printk's without a loglevel use this.. */ #define MESSAGE_LOGLEVEL_DEFAULT CONFIG_MESSAGE_LOGLEVEL_DEFAULT /* We show everything that is MORE important than this.. */ #define CONSOLE_LOGLEVEL_SILENT 0 /* Mum's the word */ #define CONSOLE_LOGLEVEL_MIN 1 /* Minimum loglevel we let people use */ #define CONSOLE_LOGLEVEL_DEBUG 10 /* issue debug messages */ #define CONSOLE_LOGLEVEL_MOTORMOUTH 15 /* You can't shut this one up */ /* * Default used to be hard-coded at 7, quiet used to be hardcoded at 4, * we're now allowing both to be set from kernel config. */ #define CONSOLE_LOGLEVEL_DEFAULT CONFIG_CONSOLE_LOGLEVEL_DEFAULT #define CONSOLE_LOGLEVEL_QUIET CONFIG_CONSOLE_LOGLEVEL_QUIET extern int console_printk[]; #define console_loglevel (console_printk[0]) #define default_message_loglevel (console_printk[1]) #define minimum_console_loglevel (console_printk[2]) #define default_console_loglevel (console_printk[3]) static inline void console_silent(void) { console_loglevel = CONSOLE_LOGLEVEL_SILENT; } static inline void console_verbose(void) { if (console_loglevel) console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH; } /* strlen("ratelimit") + 1 */ #define DEVKMSG_STR_MAX_SIZE 10 extern char devkmsg_log_str[]; struct ctl_table; extern int suppress_printk; struct va_format { const char *fmt; va_list *va; }; /* * FW_BUG * Add this to a message where you are sure the firmware is buggy or behaves * really stupid or out of spec. Be aware that the responsible BIOS developer * should be able to fix this issue or at least get a concrete idea of the * problem by reading your message without the need of looking at the kernel * code. * * Use it for definite and high priority BIOS bugs. * * FW_WARN * Use it for not that clear (e.g. could the kernel messed up things already?) * and medium priority BIOS bugs. * * FW_INFO * Use this one if you want to tell the user or vendor about something * suspicious, but generally harmless related to the firmware. * * Use it for information or very low priority BIOS bugs. */ #define FW_BUG "[Firmware Bug]: " #define FW_WARN "[Firmware Warn]: " #define FW_INFO "[Firmware Info]: " /* * HW_ERR * Add this to a message for hardware errors, so that user can report * it to hardware vendor instead of LKML or software vendor. */ #define HW_ERR "[Hardware Error]: " /* * DEPRECATED * Add this to a message whenever you want to warn user space about the use * of a deprecated aspect of an API so they can stop using it */ #define DEPRECATED "[Deprecated]: " /* * Dummy printk for disabled debugging statements to use whilst maintaining * gcc's format checking. */ #define no_printk(fmt, ...) \ ({ \ if (0) \ printk(fmt, ##__VA_ARGS__); \ 0; \ }) #ifdef CONFIG_EARLY_PRINTK extern asmlinkage __printf(1, 2) void early_printk(const char *fmt, ...); #else static inline __printf(1, 2) __cold void early_printk(const char *s, ...) { } #endif #ifdef CONFIG_PRINTK_NMI extern void printk_nmi_enter(void); extern void printk_nmi_exit(void); extern void printk_nmi_direct_enter(void); extern void printk_nmi_direct_exit(void); #else static inline void printk_nmi_enter(void) { } static inline void printk_nmi_exit(void) { } static inline void printk_nmi_direct_enter(void) { } static inline void printk_nmi_direct_exit(void) { } #endif /* PRINTK_NMI */ #ifdef CONFIG_PRINTK asmlinkage __printf(5, 0) int vprintk_emit(int facility, int level, const char *dict, size_t dictlen, const char *fmt, va_list args); asmlinkage __printf(1, 0) int vprintk(const char *fmt, va_list args); asmlinkage __printf(1, 2) __cold int printk(const char *fmt, ...); /* * Special printk facility for scheduler/timekeeping use only, _DO_NOT_USE_ ! */ __printf(1, 2) __cold int printk_deferred(const char *fmt, ...); /* * Please don't use printk_ratelimit(), because it shares ratelimiting state * with all other unrelated printk_ratelimit() callsites. Instead use * printk_ratelimited() or plain old __ratelimit(). */ extern int __printk_ratelimit(const char *func); #define printk_ratelimit() __printk_ratelimit(__func__) extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, unsigned int interval_msec); extern int printk_delay_msec; extern int dmesg_restrict; extern int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, void __user *buf, size_t *lenp, loff_t *ppos); extern void wake_up_klogd(void); char *log_buf_addr_get(void); u32 log_buf_len_get(void); void log_buf_vmcoreinfo_setup(void); void __init setup_log_buf(int early); __printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...); void dump_stack_print_info(const char *log_lvl); void show_regs_print_info(const char *log_lvl); extern asmlinkage void dump_stack(void) __cold; extern void printk_safe_flush(void); extern void printk_safe_flush_on_panic(void); #else static inline __printf(1, 0) int vprintk(const char *s, va_list args) { return 0; } static inline __printf(1, 2) __cold int printk(const char *s, ...) { return 0; } static inline __printf(1, 2) __cold int printk_deferred(const char *s, ...) { return 0; } static inline int printk_ratelimit(void) { return 0; } static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, unsigned int interval_msec) { return false; } static inline void wake_up_klogd(void) { } static inline char *log_buf_addr_get(void) { return NULL; } static inline u32 log_buf_len_get(void) { return 0; } static inline void log_buf_vmcoreinfo_setup(void) { } static inline void setup_log_buf(int early) { } static inline __printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...) { } static inline void dump_stack_print_info(const char *log_lvl) { } static inline void show_regs_print_info(const char *log_lvl) { } static inline void dump_stack(void) { } static inline void printk_safe_flush(void) { } static inline void printk_safe_flush_on_panic(void) { } #endif extern int kptr_restrict; #ifndef pr_fmt #define pr_fmt(fmt) fmt #endif #if defined(__KMSG_CHECKER) && defined(KMSG_COMPONENT) /* generate magic string for scripts/kmsg-doc to parse */ #define pr_printk_hash(level, format, ...) \ __KMSG_PRINT(level _FMT_ format _ARGS_ __VA_ARGS__ _END_) #elif defined(CONFIG_KMSG_IDS) && defined(KMSG_COMPONENT) /* format element '%pj' prints the six digit jhash of a string */ #define _pr_printk_hash(pfx, fmt, ...) \ printk(pfx fmt, pfx fmt + __builtin_strlen(pfx), ##__VA_ARGS__) #define pr_printk_hash(level, format, ...) \ _pr_printk_hash(level KMSG_COMPONENT ".%pj: ", format, ##__VA_ARGS__) #else /* !defined(CONFIG_KMSG_IDS) */ #define pr_printk_hash(level, format, ...) \ printk(level pr_fmt(format), ##__VA_ARGS__) #endif /* * These can be used to print at the various log levels. * All of these will print unconditionally, although note that pr_debug() * and other debug macros are compiled out unless either DEBUG is defined * or CONFIG_DYNAMIC_DEBUG is set. */ #define pr_emerg(fmt, ...) \ pr_printk_hash(KERN_EMERG, fmt, ##__VA_ARGS__) #define pr_alert(fmt, ...) \ pr_printk_hash(KERN_ALERT, fmt, ##__VA_ARGS__) #define pr_crit(fmt, ...) \ pr_printk_hash(KERN_CRIT, fmt, ##__VA_ARGS__) #define pr_err(fmt, ...) \ pr_printk_hash(KERN_ERR, fmt, ##__VA_ARGS__) #define pr_warning(fmt, ...) \ pr_printk_hash(KERN_WARNING, fmt, ##__VA_ARGS__) #define pr_warn pr_warning #define pr_notice(fmt, ...) \ pr_printk_hash(KERN_NOTICE, fmt, ##__VA_ARGS__) #define pr_info(fmt, ...) \ pr_printk_hash(KERN_INFO, fmt, ##__VA_ARGS__) /* * Like KERN_CONT, pr_cont() should only be used when continuing * a line with no newline ('\n') enclosed. Otherwise it defaults * back to KERN_DEFAULT. */ #define pr_cont(fmt, ...) \ printk(KERN_CONT fmt, ##__VA_ARGS__) /* pr_devel() should produce zero code unless DEBUG is defined */ #ifdef DEBUG #define pr_devel(fmt, ...) \ printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #else #define pr_devel(fmt, ...) \ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #endif /* If you are writing a driver, please use dev_dbg instead */ #if defined(CONFIG_DYNAMIC_DEBUG) #include <linux/dynamic_debug.h> /* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */ #define pr_debug(fmt, ...) \ dynamic_pr_debug(fmt, ##__VA_ARGS__) #elif defined(DEBUG) #define pr_debug(fmt, ...) \ printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #else #define pr_debug(fmt, ...) \ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #endif /* * Print a one-time message (analogous to WARN_ONCE() et al): */ #ifdef CONFIG_PRINTK #define printk_once(fmt, ...) \ ({ \ static bool __section(.data.once) __print_once; \ bool __ret_print_once = !__print_once; \ \ if (!__print_once) { \ __print_once = true; \ printk(fmt, ##__VA_ARGS__); \ } \ unlikely(__ret_print_once); \ }) #define printk_deferred_once(fmt, ...) \ ({ \ static bool __section(.data.once) __print_once; \ bool __ret_print_once = !__print_once; \ \ if (!__print_once) { \ __print_once = true; \ printk_deferred(fmt, ##__VA_ARGS__); \ } \ unlikely(__ret_print_once); \ }) #else #define printk_once(fmt, ...) \ no_printk(fmt, ##__VA_ARGS__) #define printk_deferred_once(fmt, ...) \ no_printk(fmt, ##__VA_ARGS__) #endif #define pr_emerg_once(fmt, ...) \ printk_once(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) #define pr_alert_once(fmt, ...) \ printk_once(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) #define pr_crit_once(fmt, ...) \ printk_once(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) #define pr_err_once(fmt, ...) \ printk_once(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) #define pr_warn_once(fmt, ...) \ printk_once(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) #define pr_notice_once(fmt, ...) \ printk_once(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) #define pr_info_once(fmt, ...) \ printk_once(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) #define pr_cont_once(fmt, ...) \ printk_once(KERN_CONT pr_fmt(fmt), ##__VA_ARGS__) #if defined(DEBUG) #define pr_devel_once(fmt, ...) \ printk_once(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #else #define pr_devel_once(fmt, ...) \ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #endif /* If you are writing a driver, please use dev_dbg instead */ #if defined(DEBUG) #define pr_debug_once(fmt, ...) \ printk_once(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #else #define pr_debug_once(fmt, ...) \ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #endif /* * ratelimited messages with local ratelimit_state, * no local ratelimit_state used in the !PRINTK case */ #ifdef CONFIG_PRINTK #define printk_ratelimited(fmt, ...) \ ({ \ static DEFINE_RATELIMIT_STATE(_rs, \ DEFAULT_RATELIMIT_INTERVAL, \ DEFAULT_RATELIMIT_BURST); \ \ if (__ratelimit(&_rs)) \ printk(fmt, ##__VA_ARGS__); \ }) #else #define printk_ratelimited(fmt, ...) \ no_printk(fmt, ##__VA_ARGS__) #endif #define pr_emerg_ratelimited(fmt, ...) \ printk_ratelimited(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) #define pr_alert_ratelimited(fmt, ...) \ printk_ratelimited(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) #define pr_crit_ratelimited(fmt, ...) \ printk_ratelimited(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) #define pr_err_ratelimited(fmt, ...) \ printk_ratelimited(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) #define pr_warn_ratelimited(fmt, ...) \ printk_ratelimited(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) #define pr_notice_ratelimited(fmt, ...) \ printk_ratelimited(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) #define pr_info_ratelimited(fmt, ...) \ printk_ratelimited(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) /* no pr_cont_ratelimited, don't do that... */ #if defined(DEBUG) #define pr_devel_ratelimited(fmt, ...) \ printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #else #define pr_devel_ratelimited(fmt, ...) \ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #endif /* If you are writing a driver, please use dev_dbg instead */ #if defined(CONFIG_DYNAMIC_DEBUG) /* descriptor check is first to prevent flooding with "callbacks suppressed" */ #define pr_debug_ratelimited(fmt, ...) \ do { \ static DEFINE_RATELIMIT_STATE(_rs, \ DEFAULT_RATELIMIT_INTERVAL, \ DEFAULT_RATELIMIT_BURST); \ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, pr_fmt(fmt)); \ if (DYNAMIC_DEBUG_BRANCH(descriptor) && \ __ratelimit(&_rs)) \ __dynamic_pr_debug(&descriptor, pr_fmt(fmt), ##__VA_ARGS__); \ } while (0) #elif defined(DEBUG) #define pr_debug_ratelimited(fmt, ...) \ printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #else #define pr_debug_ratelimited(fmt, ...) \ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #endif extern const struct file_operations kmsg_fops; enum { DUMP_PREFIX_NONE, DUMP_PREFIX_ADDRESS, DUMP_PREFIX_OFFSET }; extern int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize, char *linebuf, size_t linebuflen, bool ascii); #ifdef CONFIG_PRINTK extern void print_hex_dump(const char *level, const char *prefix_str, int prefix_type, int rowsize, int groupsize, const void *buf, size_t len, bool ascii); #else static inline void print_hex_dump(const char *level, const char *prefix_str, int prefix_type, int rowsize, int groupsize, const void *buf, size_t len, bool ascii) { } static inline void print_hex_dump_bytes(const char *prefix_str, int prefix_type, const void *buf, size_t len) { } #endif #if defined(CONFIG_DYNAMIC_DEBUG) #define print_hex_dump_debug(prefix_str, prefix_type, rowsize, \ groupsize, buf, len, ascii) \ dynamic_hex_dump(prefix_str, prefix_type, rowsize, \ groupsize, buf, len, ascii) #elif defined(DEBUG) #define print_hex_dump_debug(prefix_str, prefix_type, rowsize, \ groupsize, buf, len, ascii) \ print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, rowsize, \ groupsize, buf, len, ascii) #else static inline void print_hex_dump_debug(const char *prefix_str, int prefix_type, int rowsize, int groupsize, const void *buf, size_t len, bool ascii) { } #endif /** * print_hex_dump_bytes - shorthand form of print_hex_dump() with default params * @prefix_str: string to prefix each line with; * caller supplies trailing spaces for alignment if desired * @prefix_type: controls whether prefix of an offset, address, or none * is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE) * @buf: data blob to dump * @len: number of bytes in the @buf * * Calls print_hex_dump(), with log level of KERN_DEBUG, * rowsize of 16, groupsize of 1, and ASCII output included. */ #define print_hex_dump_bytes(prefix_str, prefix_type, buf, len) \ print_hex_dump_debug(prefix_str, prefix_type, 16, 1, buf, len, true) #ifdef CONFIG_PRINTK extern void __printk_safe_enter(void); extern void __printk_safe_exit(void); /* * The printk_deferred_enter/exit macros are available only as a hack for * some code paths that need to defer all printk console printing. Interrupts * must be disabled for the deferred duration. */ #define printk_deferred_enter __printk_safe_enter #define printk_deferred_exit __printk_safe_exit #else static inline void printk_deferred_enter(void) { } static inline void printk_deferred_exit(void) { } #endif #endif pageblock-flags.h 0000644 00000005337 14722070374 0007754 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Macros for manipulating and testing flags related to a * pageblock_nr_pages number of pages. * * Copyright (C) IBM Corporation, 2006 * * Original author, Mel Gorman * Major cleanups and reduction of bit operations, Andy Whitcroft */ #ifndef PAGEBLOCK_FLAGS_H #define PAGEBLOCK_FLAGS_H #include <linux/types.h> #define PB_migratetype_bits 3 /* Bit indices that affect a whole block of pages */ enum pageblock_bits { PB_migrate, PB_migrate_end = PB_migrate + PB_migratetype_bits - 1, /* 3 bits required for migrate types */ PB_migrate_skip,/* If set the block is skipped by compaction */ /* * Assume the bits will always align on a word. If this assumption * changes then get/set pageblock needs updating. */ NR_PAGEBLOCK_BITS }; #ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE /* Huge page sizes are variable */ extern unsigned int pageblock_order; #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ /* Huge pages are a constant size */ #define pageblock_order HUGETLB_PAGE_ORDER #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ #else /* CONFIG_HUGETLB_PAGE */ /* If huge pages are not used, group by MAX_ORDER_NR_PAGES */ #define pageblock_order (MAX_ORDER-1) #endif /* CONFIG_HUGETLB_PAGE */ #define pageblock_nr_pages (1UL << pageblock_order) /* Forward declaration */ struct page; unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn, unsigned long end_bitidx, unsigned long mask); void set_pfnblock_flags_mask(struct page *page, unsigned long flags, unsigned long pfn, unsigned long end_bitidx, unsigned long mask); /* Declarations for getting and setting flags. See mm/page_alloc.c */ #define get_pageblock_flags_group(page, start_bitidx, end_bitidx) \ get_pfnblock_flags_mask(page, page_to_pfn(page), \ end_bitidx, \ (1 << (end_bitidx - start_bitidx + 1)) - 1) #define set_pageblock_flags_group(page, flags, start_bitidx, end_bitidx) \ set_pfnblock_flags_mask(page, flags, page_to_pfn(page), \ end_bitidx, \ (1 << (end_bitidx - start_bitidx + 1)) - 1) #ifdef CONFIG_COMPACTION #define get_pageblock_skip(page) \ get_pageblock_flags_group(page, PB_migrate_skip, \ PB_migrate_skip) #define clear_pageblock_skip(page) \ set_pageblock_flags_group(page, 0, PB_migrate_skip, \ PB_migrate_skip) #define set_pageblock_skip(page) \ set_pageblock_flags_group(page, 1, PB_migrate_skip, \ PB_migrate_skip) #else static inline bool get_pageblock_skip(struct page *page) { return false; } static inline void clear_pageblock_skip(struct page *page) { } static inline void set_pageblock_skip(struct page *page) { } #endif /* CONFIG_COMPACTION */ #endif /* PAGEBLOCK_FLAGS_H */ util_macros.h 0000644 00000002255 14722070374 0007250 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_HELPER_MACROS_H_ #define _LINUX_HELPER_MACROS_H_ #define __find_closest(x, a, as, op) \ ({ \ typeof(as) __fc_i, __fc_as = (as) - 1; \ typeof(x) __fc_x = (x); \ typeof(*a) const *__fc_a = (a); \ for (__fc_i = 0; __fc_i < __fc_as; __fc_i++) { \ if (__fc_x op DIV_ROUND_CLOSEST(__fc_a[__fc_i] + \ __fc_a[__fc_i + 1], 2)) \ break; \ } \ (__fc_i); \ }) /** * find_closest - locate the closest element in a sorted array * @x: The reference value. * @a: The array in which to look for the closest element. Must be sorted * in ascending order. * @as: Size of 'a'. * * Returns the index of the element closest to 'x'. */ #define find_closest(x, a, as) __find_closest(x, a, as, <=) /** * find_closest_descending - locate the closest element in a sorted array * @x: The reference value. * @a: The array in which to look for the closest element. Must be sorted * in descending order. * @as: Size of 'a'. * * Similar to find_closest() but 'a' is expected to be sorted in descending * order. */ #define find_closest_descending(x, a, as) __find_closest(x, a, as, >=) #endif page-isolation.h 0000644 00000003235 14722070374 0007641 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_PAGEISOLATION_H #define __LINUX_PAGEISOLATION_H #ifdef CONFIG_MEMORY_ISOLATION static inline bool has_isolate_pageblock(struct zone *zone) { return zone->nr_isolate_pageblock; } static inline bool is_migrate_isolate_page(struct page *page) { return get_pageblock_migratetype(page) == MIGRATE_ISOLATE; } static inline bool is_migrate_isolate(int migratetype) { return migratetype == MIGRATE_ISOLATE; } #else static inline bool has_isolate_pageblock(struct zone *zone) { return false; } static inline bool is_migrate_isolate_page(struct page *page) { return false; } static inline bool is_migrate_isolate(int migratetype) { return false; } #endif #define SKIP_HWPOISON 0x1 #define REPORT_FAILURE 0x2 bool has_unmovable_pages(struct zone *zone, struct page *page, int count, int migratetype, int flags); void set_pageblock_migratetype(struct page *page, int migratetype); int move_freepages_block(struct zone *zone, struct page *page, int migratetype, int *num_movable); /* * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE. */ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, unsigned migratetype, int flags); /* * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE. * target range is [start_pfn, end_pfn) */ void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, unsigned migratetype); /* * Test all pages in [start_pfn, end_pfn) are isolated or not. */ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, bool skip_hwpoisoned_pages); struct page *alloc_migrate_target(struct page *page, unsigned long private); #endif pci_ids.h 0000644 00000362721 14722070374 0006350 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * PCI Class, Vendor and Device IDs * * Please keep sorted. * * Do not add new entries to this file unless the definitions * are shared between multiple drivers. */ #ifndef _LINUX_PCI_IDS_H #define _LINUX_PCI_IDS_H /* Device classes and subclasses */ #define PCI_CLASS_NOT_DEFINED 0x0000 #define PCI_CLASS_NOT_DEFINED_VGA 0x0001 #define PCI_BASE_CLASS_STORAGE 0x01 #define PCI_CLASS_STORAGE_SCSI 0x0100 #define PCI_CLASS_STORAGE_IDE 0x0101 #define PCI_CLASS_STORAGE_FLOPPY 0x0102 #define PCI_CLASS_STORAGE_IPI 0x0103 #define PCI_CLASS_STORAGE_RAID 0x0104 #define PCI_CLASS_STORAGE_SATA 0x0106 #define PCI_CLASS_STORAGE_SATA_AHCI 0x010601 #define PCI_CLASS_STORAGE_SAS 0x0107 #define PCI_CLASS_STORAGE_EXPRESS 0x010802 #define PCI_CLASS_STORAGE_OTHER 0x0180 #define PCI_BASE_CLASS_NETWORK 0x02 #define PCI_CLASS_NETWORK_ETHERNET 0x0200 #define PCI_CLASS_NETWORK_TOKEN_RING 0x0201 #define PCI_CLASS_NETWORK_FDDI 0x0202 #define PCI_CLASS_NETWORK_ATM 0x0203 #define PCI_CLASS_NETWORK_OTHER 0x0280 #define PCI_BASE_CLASS_DISPLAY 0x03 #define PCI_CLASS_DISPLAY_VGA 0x0300 #define PCI_CLASS_DISPLAY_XGA 0x0301 #define PCI_CLASS_DISPLAY_3D 0x0302 #define PCI_CLASS_DISPLAY_OTHER 0x0380 #define PCI_BASE_CLASS_MULTIMEDIA 0x04 #define PCI_CLASS_MULTIMEDIA_VIDEO 0x0400 #define PCI_CLASS_MULTIMEDIA_AUDIO 0x0401 #define PCI_CLASS_MULTIMEDIA_PHONE 0x0402 #define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403 #define PCI_CLASS_MULTIMEDIA_OTHER 0x0480 #define PCI_BASE_CLASS_MEMORY 0x05 #define PCI_CLASS_MEMORY_RAM 0x0500 #define PCI_CLASS_MEMORY_FLASH 0x0501 #define PCI_CLASS_MEMORY_OTHER 0x0580 #define PCI_BASE_CLASS_BRIDGE 0x06 #define PCI_CLASS_BRIDGE_HOST 0x0600 #define PCI_CLASS_BRIDGE_ISA 0x0601 #define PCI_CLASS_BRIDGE_EISA 0x0602 #define PCI_CLASS_BRIDGE_MC 0x0603 #define PCI_CLASS_BRIDGE_PCI 0x0604 #define PCI_CLASS_BRIDGE_PCI_NORMAL 0x060400 #define PCI_CLASS_BRIDGE_PCI_SUBTRACTIVE 0x060401 #define PCI_CLASS_BRIDGE_PCMCIA 0x0605 #define PCI_CLASS_BRIDGE_NUBUS 0x0606 #define PCI_CLASS_BRIDGE_CARDBUS 0x0607 #define PCI_CLASS_BRIDGE_RACEWAY 0x0608 #define PCI_CLASS_BRIDGE_OTHER 0x0680 #define PCI_BASE_CLASS_COMMUNICATION 0x07 #define PCI_CLASS_COMMUNICATION_SERIAL 0x0700 #define PCI_CLASS_COMMUNICATION_PARALLEL 0x0701 #define PCI_CLASS_COMMUNICATION_MULTISERIAL 0x0702 #define PCI_CLASS_COMMUNICATION_MODEM 0x0703 #define PCI_CLASS_COMMUNICATION_OTHER 0x0780 #define PCI_BASE_CLASS_SYSTEM 0x08 #define PCI_CLASS_SYSTEM_PIC 0x0800 #define PCI_CLASS_SYSTEM_PIC_IOAPIC 0x080010 #define PCI_CLASS_SYSTEM_PIC_IOXAPIC 0x080020 #define PCI_CLASS_SYSTEM_DMA 0x0801 #define PCI_CLASS_SYSTEM_TIMER 0x0802 #define PCI_CLASS_SYSTEM_RTC 0x0803 #define PCI_CLASS_SYSTEM_PCI_HOTPLUG 0x0804 #define PCI_CLASS_SYSTEM_SDHCI 0x0805 #define PCI_CLASS_SYSTEM_OTHER 0x0880 #define PCI_BASE_CLASS_INPUT 0x09 #define PCI_CLASS_INPUT_KEYBOARD 0x0900 #define PCI_CLASS_INPUT_PEN 0x0901 #define PCI_CLASS_INPUT_MOUSE 0x0902 #define PCI_CLASS_INPUT_SCANNER 0x0903 #define PCI_CLASS_INPUT_GAMEPORT 0x0904 #define PCI_CLASS_INPUT_OTHER 0x0980 #define PCI_BASE_CLASS_DOCKING 0x0a #define PCI_CLASS_DOCKING_GENERIC 0x0a00 #define PCI_CLASS_DOCKING_OTHER 0x0a80 #define PCI_BASE_CLASS_PROCESSOR 0x0b #define PCI_CLASS_PROCESSOR_386 0x0b00 #define PCI_CLASS_PROCESSOR_486 0x0b01 #define PCI_CLASS_PROCESSOR_PENTIUM 0x0b02 #define PCI_CLASS_PROCESSOR_ALPHA 0x0b10 #define PCI_CLASS_PROCESSOR_POWERPC 0x0b20 #define PCI_CLASS_PROCESSOR_MIPS 0x0b30 #define PCI_CLASS_PROCESSOR_CO 0x0b40 #define PCI_BASE_CLASS_SERIAL 0x0c #define PCI_CLASS_SERIAL_FIREWIRE 0x0c00 #define PCI_CLASS_SERIAL_FIREWIRE_OHCI 0x0c0010 #define PCI_CLASS_SERIAL_ACCESS 0x0c01 #define PCI_CLASS_SERIAL_SSA 0x0c02 #define PCI_CLASS_SERIAL_USB 0x0c03 #define PCI_CLASS_SERIAL_USB_UHCI 0x0c0300 #define PCI_CLASS_SERIAL_USB_OHCI 0x0c0310 #define PCI_CLASS_SERIAL_USB_EHCI 0x0c0320 #define PCI_CLASS_SERIAL_USB_XHCI 0x0c0330 #define PCI_CLASS_SERIAL_USB_DEVICE 0x0c03fe #define PCI_CLASS_SERIAL_FIBER 0x0c04 #define PCI_CLASS_SERIAL_SMBUS 0x0c05 #define PCI_CLASS_SERIAL_IPMI 0x0c07 #define PCI_CLASS_SERIAL_IPMI_SMIC 0x0c0700 #define PCI_CLASS_SERIAL_IPMI_KCS 0x0c0701 #define PCI_CLASS_SERIAL_IPMI_BT 0x0c0702 #define PCI_BASE_CLASS_WIRELESS 0x0d #define PCI_CLASS_WIRELESS_RF_CONTROLLER 0x0d10 #define PCI_CLASS_WIRELESS_WHCI 0x0d1010 #define PCI_BASE_CLASS_INTELLIGENT 0x0e #define PCI_CLASS_INTELLIGENT_I2O 0x0e00 #define PCI_BASE_CLASS_SATELLITE 0x0f #define PCI_CLASS_SATELLITE_TV 0x0f00 #define PCI_CLASS_SATELLITE_AUDIO 0x0f01 #define PCI_CLASS_SATELLITE_VOICE 0x0f03 #define PCI_CLASS_SATELLITE_DATA 0x0f04 #define PCI_BASE_CLASS_CRYPT 0x10 #define PCI_CLASS_CRYPT_NETWORK 0x1000 #define PCI_CLASS_CRYPT_ENTERTAINMENT 0x1001 #define PCI_CLASS_CRYPT_OTHER 0x1080 #define PCI_BASE_CLASS_SIGNAL_PROCESSING 0x11 #define PCI_CLASS_SP_DPIO 0x1100 #define PCI_CLASS_SP_OTHER 0x1180 #define PCI_CLASS_OTHERS 0xff /* Vendors and devices. Sort key: vendor first, device next. */ #define PCI_VENDOR_ID_LOONGSON 0x0014 #define PCI_VENDOR_ID_TTTECH 0x0357 #define PCI_DEVICE_ID_TTTECH_MC322 0x000a #define PCI_VENDOR_ID_DYNALINK 0x0675 #define PCI_DEVICE_ID_DYNALINK_IS64PH 0x1702 #define PCI_VENDOR_ID_UBIQUITI 0x0777 #define PCI_VENDOR_ID_BERKOM 0x0871 #define PCI_DEVICE_ID_BERKOM_A1T 0xffa1 #define PCI_DEVICE_ID_BERKOM_T_CONCEPT 0xffa2 #define PCI_DEVICE_ID_BERKOM_A4T 0xffa4 #define PCI_DEVICE_ID_BERKOM_SCITEL_QUADRO 0xffa8 #define PCI_VENDOR_ID_COMPAQ 0x0e11 #define PCI_DEVICE_ID_COMPAQ_TOKENRING 0x0508 #define PCI_DEVICE_ID_COMPAQ_TACHYON 0xa0fc #define PCI_DEVICE_ID_COMPAQ_SMART2P 0xae10 #define PCI_DEVICE_ID_COMPAQ_NETEL100 0xae32 #define PCI_DEVICE_ID_COMPAQ_NETEL10 0xae34 #define PCI_DEVICE_ID_COMPAQ_TRIFLEX_IDE 0xae33 #define PCI_DEVICE_ID_COMPAQ_NETFLEX3I 0xae35 #define PCI_DEVICE_ID_COMPAQ_NETEL100D 0xae40 #define PCI_DEVICE_ID_COMPAQ_NETEL100PI 0xae43 #define PCI_DEVICE_ID_COMPAQ_NETEL100I 0xb011 #define PCI_DEVICE_ID_COMPAQ_CISS 0xb060 #define PCI_DEVICE_ID_COMPAQ_CISSB 0xb178 #define PCI_DEVICE_ID_COMPAQ_CISSC 0x46 #define PCI_DEVICE_ID_COMPAQ_THUNDER 0xf130 #define PCI_DEVICE_ID_COMPAQ_NETFLEX3B 0xf150 #define PCI_VENDOR_ID_NCR 0x1000 #define PCI_VENDOR_ID_LSI_LOGIC 0x1000 #define PCI_DEVICE_ID_NCR_53C810 0x0001 #define PCI_DEVICE_ID_NCR_53C820 0x0002 #define PCI_DEVICE_ID_NCR_53C825 0x0003 #define PCI_DEVICE_ID_NCR_53C815 0x0004 #define PCI_DEVICE_ID_LSI_53C810AP 0x0005 #define PCI_DEVICE_ID_NCR_53C860 0x0006 #define PCI_DEVICE_ID_LSI_53C1510 0x000a #define PCI_DEVICE_ID_NCR_53C896 0x000b #define PCI_DEVICE_ID_NCR_53C895 0x000c #define PCI_DEVICE_ID_NCR_53C885 0x000d #define PCI_DEVICE_ID_NCR_53C875 0x000f #define PCI_DEVICE_ID_NCR_53C1510 0x0010 #define PCI_DEVICE_ID_LSI_53C895A 0x0012 #define PCI_DEVICE_ID_LSI_53C875A 0x0013 #define PCI_DEVICE_ID_LSI_53C1010_33 0x0020 #define PCI_DEVICE_ID_LSI_53C1010_66 0x0021 #define PCI_DEVICE_ID_LSI_53C1030 0x0030 #define PCI_DEVICE_ID_LSI_1030_53C1035 0x0032 #define PCI_DEVICE_ID_LSI_53C1035 0x0040 #define PCI_DEVICE_ID_NCR_53C875J 0x008f #define PCI_DEVICE_ID_LSI_FC909 0x0621 #define PCI_DEVICE_ID_LSI_FC929 0x0622 #define PCI_DEVICE_ID_LSI_FC929_LAN 0x0623 #define PCI_DEVICE_ID_LSI_FC919 0x0624 #define PCI_DEVICE_ID_LSI_FC919_LAN 0x0625 #define PCI_DEVICE_ID_LSI_FC929X 0x0626 #define PCI_DEVICE_ID_LSI_FC939X 0x0642 #define PCI_DEVICE_ID_LSI_FC949X 0x0640 #define PCI_DEVICE_ID_LSI_FC949ES 0x0646 #define PCI_DEVICE_ID_LSI_FC919X 0x0628 #define PCI_DEVICE_ID_NCR_YELLOWFIN 0x0701 #define PCI_DEVICE_ID_LSI_61C102 0x0901 #define PCI_DEVICE_ID_LSI_63C815 0x1000 #define PCI_DEVICE_ID_LSI_SAS1064 0x0050 #define PCI_DEVICE_ID_LSI_SAS1064R 0x0411 #define PCI_DEVICE_ID_LSI_SAS1066 0x005E #define PCI_DEVICE_ID_LSI_SAS1068 0x0054 #define PCI_DEVICE_ID_LSI_SAS1064A 0x005C #define PCI_DEVICE_ID_LSI_SAS1064E 0x0056 #define PCI_DEVICE_ID_LSI_SAS1066E 0x005A #define PCI_DEVICE_ID_LSI_SAS1068E 0x0058 #define PCI_DEVICE_ID_LSI_SAS1078 0x0060 #define PCI_VENDOR_ID_ATI 0x1002 /* Mach64 */ #define PCI_DEVICE_ID_ATI_68800 0x4158 #define PCI_DEVICE_ID_ATI_215CT222 0x4354 #define PCI_DEVICE_ID_ATI_210888CX 0x4358 #define PCI_DEVICE_ID_ATI_215ET222 0x4554 /* Mach64 / Rage */ #define PCI_DEVICE_ID_ATI_215GB 0x4742 #define PCI_DEVICE_ID_ATI_215GD 0x4744 #define PCI_DEVICE_ID_ATI_215GI 0x4749 #define PCI_DEVICE_ID_ATI_215GP 0x4750 #define PCI_DEVICE_ID_ATI_215GQ 0x4751 #define PCI_DEVICE_ID_ATI_215XL 0x4752 #define PCI_DEVICE_ID_ATI_215GT 0x4754 #define PCI_DEVICE_ID_ATI_215GTB 0x4755 #define PCI_DEVICE_ID_ATI_215_IV 0x4756 #define PCI_DEVICE_ID_ATI_215_IW 0x4757 #define PCI_DEVICE_ID_ATI_215_IZ 0x475A #define PCI_DEVICE_ID_ATI_210888GX 0x4758 #define PCI_DEVICE_ID_ATI_215_LB 0x4c42 #define PCI_DEVICE_ID_ATI_215_LD 0x4c44 #define PCI_DEVICE_ID_ATI_215_LG 0x4c47 #define PCI_DEVICE_ID_ATI_215_LI 0x4c49 #define PCI_DEVICE_ID_ATI_215_LM 0x4c4D #define PCI_DEVICE_ID_ATI_215_LN 0x4c4E #define PCI_DEVICE_ID_ATI_215_LR 0x4c52 #define PCI_DEVICE_ID_ATI_215_LS 0x4c53 #define PCI_DEVICE_ID_ATI_264_LT 0x4c54 /* Mach64 VT */ #define PCI_DEVICE_ID_ATI_264VT 0x5654 #define PCI_DEVICE_ID_ATI_264VU 0x5655 #define PCI_DEVICE_ID_ATI_264VV 0x5656 /* Rage128 GL */ #define PCI_DEVICE_ID_ATI_RAGE128_RE 0x5245 #define PCI_DEVICE_ID_ATI_RAGE128_RF 0x5246 #define PCI_DEVICE_ID_ATI_RAGE128_RG 0x5247 /* Rage128 VR */ #define PCI_DEVICE_ID_ATI_RAGE128_RK 0x524b #define PCI_DEVICE_ID_ATI_RAGE128_RL 0x524c #define PCI_DEVICE_ID_ATI_RAGE128_SE 0x5345 #define PCI_DEVICE_ID_ATI_RAGE128_SF 0x5346 #define PCI_DEVICE_ID_ATI_RAGE128_SG 0x5347 #define PCI_DEVICE_ID_ATI_RAGE128_SH 0x5348 #define PCI_DEVICE_ID_ATI_RAGE128_SK 0x534b #define PCI_DEVICE_ID_ATI_RAGE128_SL 0x534c #define PCI_DEVICE_ID_ATI_RAGE128_SM 0x534d #define PCI_DEVICE_ID_ATI_RAGE128_SN 0x534e /* Rage128 Ultra */ #define PCI_DEVICE_ID_ATI_RAGE128_TF 0x5446 #define PCI_DEVICE_ID_ATI_RAGE128_TL 0x544c #define PCI_DEVICE_ID_ATI_RAGE128_TR 0x5452 #define PCI_DEVICE_ID_ATI_RAGE128_TS 0x5453 #define PCI_DEVICE_ID_ATI_RAGE128_TT 0x5454 #define PCI_DEVICE_ID_ATI_RAGE128_TU 0x5455 /* Rage128 M3 */ #define PCI_DEVICE_ID_ATI_RAGE128_LE 0x4c45 #define PCI_DEVICE_ID_ATI_RAGE128_LF 0x4c46 /* Rage128 M4 */ #define PCI_DEVICE_ID_ATI_RAGE128_MF 0x4d46 #define PCI_DEVICE_ID_ATI_RAGE128_ML 0x4d4c /* Rage128 Pro GL */ #define PCI_DEVICE_ID_ATI_RAGE128_PA 0x5041 #define PCI_DEVICE_ID_ATI_RAGE128_PB 0x5042 #define PCI_DEVICE_ID_ATI_RAGE128_PC 0x5043 #define PCI_DEVICE_ID_ATI_RAGE128_PD 0x5044 #define PCI_DEVICE_ID_ATI_RAGE128_PE 0x5045 #define PCI_DEVICE_ID_ATI_RAGE128_PF 0x5046 /* Rage128 Pro VR */ #define PCI_DEVICE_ID_ATI_RAGE128_PG 0x5047 #define PCI_DEVICE_ID_ATI_RAGE128_PH 0x5048 #define PCI_DEVICE_ID_ATI_RAGE128_PI 0x5049 #define PCI_DEVICE_ID_ATI_RAGE128_PJ 0x504A #define PCI_DEVICE_ID_ATI_RAGE128_PK 0x504B #define PCI_DEVICE_ID_ATI_RAGE128_PL 0x504C #define PCI_DEVICE_ID_ATI_RAGE128_PM 0x504D #define PCI_DEVICE_ID_ATI_RAGE128_PN 0x504E #define PCI_DEVICE_ID_ATI_RAGE128_PO 0x504F #define PCI_DEVICE_ID_ATI_RAGE128_PP 0x5050 #define PCI_DEVICE_ID_ATI_RAGE128_PQ 0x5051 #define PCI_DEVICE_ID_ATI_RAGE128_PR 0x5052 #define PCI_DEVICE_ID_ATI_RAGE128_PS 0x5053 #define PCI_DEVICE_ID_ATI_RAGE128_PT 0x5054 #define PCI_DEVICE_ID_ATI_RAGE128_PU 0x5055 #define PCI_DEVICE_ID_ATI_RAGE128_PV 0x5056 #define PCI_DEVICE_ID_ATI_RAGE128_PW 0x5057 #define PCI_DEVICE_ID_ATI_RAGE128_PX 0x5058 /* Rage128 M4 */ /* Radeon R100 */ #define PCI_DEVICE_ID_ATI_RADEON_QD 0x5144 #define PCI_DEVICE_ID_ATI_RADEON_QE 0x5145 #define PCI_DEVICE_ID_ATI_RADEON_QF 0x5146 #define PCI_DEVICE_ID_ATI_RADEON_QG 0x5147 /* Radeon RV100 (VE) */ #define PCI_DEVICE_ID_ATI_RADEON_QY 0x5159 #define PCI_DEVICE_ID_ATI_RADEON_QZ 0x515a /* Radeon R200 (8500) */ #define PCI_DEVICE_ID_ATI_RADEON_QL 0x514c #define PCI_DEVICE_ID_ATI_RADEON_QN 0x514e #define PCI_DEVICE_ID_ATI_RADEON_QO 0x514f #define PCI_DEVICE_ID_ATI_RADEON_Ql 0x516c #define PCI_DEVICE_ID_ATI_RADEON_BB 0x4242 /* Radeon R200 (9100) */ #define PCI_DEVICE_ID_ATI_RADEON_QM 0x514d /* Radeon RV200 (7500) */ #define PCI_DEVICE_ID_ATI_RADEON_QW 0x5157 #define PCI_DEVICE_ID_ATI_RADEON_QX 0x5158 /* Radeon NV-100 */ /* Radeon RV250 (9000) */ #define PCI_DEVICE_ID_ATI_RADEON_Id 0x4964 #define PCI_DEVICE_ID_ATI_RADEON_Ie 0x4965 #define PCI_DEVICE_ID_ATI_RADEON_If 0x4966 #define PCI_DEVICE_ID_ATI_RADEON_Ig 0x4967 /* Radeon RV280 (9200) */ #define PCI_DEVICE_ID_ATI_RADEON_Ya 0x5961 #define PCI_DEVICE_ID_ATI_RADEON_Yd 0x5964 /* Radeon R300 (9500) */ /* Radeon R300 (9700) */ #define PCI_DEVICE_ID_ATI_RADEON_ND 0x4e44 #define PCI_DEVICE_ID_ATI_RADEON_NE 0x4e45 #define PCI_DEVICE_ID_ATI_RADEON_NF 0x4e46 #define PCI_DEVICE_ID_ATI_RADEON_NG 0x4e47 /* Radeon R350 (9800) */ /* Radeon RV350 (9600) */ /* Radeon M6 */ #define PCI_DEVICE_ID_ATI_RADEON_LY 0x4c59 #define PCI_DEVICE_ID_ATI_RADEON_LZ 0x4c5a /* Radeon M7 */ #define PCI_DEVICE_ID_ATI_RADEON_LW 0x4c57 #define PCI_DEVICE_ID_ATI_RADEON_LX 0x4c58 /* Radeon M9 */ #define PCI_DEVICE_ID_ATI_RADEON_Ld 0x4c64 #define PCI_DEVICE_ID_ATI_RADEON_Le 0x4c65 #define PCI_DEVICE_ID_ATI_RADEON_Lf 0x4c66 #define PCI_DEVICE_ID_ATI_RADEON_Lg 0x4c67 /* Radeon */ /* RadeonIGP */ #define PCI_DEVICE_ID_ATI_RS100 0xcab0 #define PCI_DEVICE_ID_ATI_RS200 0xcab2 #define PCI_DEVICE_ID_ATI_RS200_B 0xcbb2 #define PCI_DEVICE_ID_ATI_RS250 0xcab3 #define PCI_DEVICE_ID_ATI_RS300_100 0x5830 #define PCI_DEVICE_ID_ATI_RS300_133 0x5831 #define PCI_DEVICE_ID_ATI_RS300_166 0x5832 #define PCI_DEVICE_ID_ATI_RS300_200 0x5833 #define PCI_DEVICE_ID_ATI_RS350_100 0x7830 #define PCI_DEVICE_ID_ATI_RS350_133 0x7831 #define PCI_DEVICE_ID_ATI_RS350_166 0x7832 #define PCI_DEVICE_ID_ATI_RS350_200 0x7833 #define PCI_DEVICE_ID_ATI_RS400_100 0x5a30 #define PCI_DEVICE_ID_ATI_RS400_133 0x5a31 #define PCI_DEVICE_ID_ATI_RS400_166 0x5a32 #define PCI_DEVICE_ID_ATI_RS400_200 0x5a33 #define PCI_DEVICE_ID_ATI_RS480 0x5950 /* ATI IXP Chipset */ #define PCI_DEVICE_ID_ATI_IXP200_IDE 0x4349 #define PCI_DEVICE_ID_ATI_IXP200_SMBUS 0x4353 #define PCI_DEVICE_ID_ATI_IXP300_SMBUS 0x4363 #define PCI_DEVICE_ID_ATI_IXP300_IDE 0x4369 #define PCI_DEVICE_ID_ATI_IXP300_SATA 0x436e #define PCI_DEVICE_ID_ATI_IXP400_SMBUS 0x4372 #define PCI_DEVICE_ID_ATI_IXP400_IDE 0x4376 #define PCI_DEVICE_ID_ATI_IXP400_SATA 0x4379 #define PCI_DEVICE_ID_ATI_IXP400_SATA2 0x437a #define PCI_DEVICE_ID_ATI_IXP600_SATA 0x4380 #define PCI_DEVICE_ID_ATI_SBX00_SMBUS 0x4385 #define PCI_DEVICE_ID_ATI_IXP600_IDE 0x438c #define PCI_DEVICE_ID_ATI_IXP700_SATA 0x4390 #define PCI_DEVICE_ID_ATI_IXP700_IDE 0x439c #define PCI_VENDOR_ID_VLSI 0x1004 #define PCI_DEVICE_ID_VLSI_82C592 0x0005 #define PCI_DEVICE_ID_VLSI_82C593 0x0006 #define PCI_DEVICE_ID_VLSI_82C594 0x0007 #define PCI_DEVICE_ID_VLSI_82C597 0x0009 #define PCI_DEVICE_ID_VLSI_82C541 0x000c #define PCI_DEVICE_ID_VLSI_82C543 0x000d #define PCI_DEVICE_ID_VLSI_82C532 0x0101 #define PCI_DEVICE_ID_VLSI_82C534 0x0102 #define PCI_DEVICE_ID_VLSI_82C535 0x0104 #define PCI_DEVICE_ID_VLSI_82C147 0x0105 #define PCI_DEVICE_ID_VLSI_VAS96011 0x0702 /* AMD RD890 Chipset */ #define PCI_DEVICE_ID_RD890_IOMMU 0x5a23 #define PCI_VENDOR_ID_ADL 0x1005 #define PCI_DEVICE_ID_ADL_2301 0x2301 #define PCI_VENDOR_ID_NS 0x100b #define PCI_DEVICE_ID_NS_87415 0x0002 #define PCI_DEVICE_ID_NS_87560_LIO 0x000e #define PCI_DEVICE_ID_NS_87560_USB 0x0012 #define PCI_DEVICE_ID_NS_83815 0x0020 #define PCI_DEVICE_ID_NS_83820 0x0022 #define PCI_DEVICE_ID_NS_CS5535_ISA 0x002b #define PCI_DEVICE_ID_NS_CS5535_IDE 0x002d #define PCI_DEVICE_ID_NS_CS5535_AUDIO 0x002e #define PCI_DEVICE_ID_NS_CS5535_USB 0x002f #define PCI_DEVICE_ID_NS_GX_VIDEO 0x0030 #define PCI_DEVICE_ID_NS_SATURN 0x0035 #define PCI_DEVICE_ID_NS_SCx200_BRIDGE 0x0500 #define PCI_DEVICE_ID_NS_SCx200_SMI 0x0501 #define PCI_DEVICE_ID_NS_SCx200_IDE 0x0502 #define PCI_DEVICE_ID_NS_SCx200_AUDIO 0x0503 #define PCI_DEVICE_ID_NS_SCx200_VIDEO 0x0504 #define PCI_DEVICE_ID_NS_SCx200_XBUS 0x0505 #define PCI_DEVICE_ID_NS_SC1100_BRIDGE 0x0510 #define PCI_DEVICE_ID_NS_SC1100_SMI 0x0511 #define PCI_DEVICE_ID_NS_SC1100_XBUS 0x0515 #define PCI_DEVICE_ID_NS_87410 0xd001 #define PCI_DEVICE_ID_NS_GX_HOST_BRIDGE 0x0028 #define PCI_VENDOR_ID_TSENG 0x100c #define PCI_DEVICE_ID_TSENG_W32P_2 0x3202 #define PCI_DEVICE_ID_TSENG_W32P_b 0x3205 #define PCI_DEVICE_ID_TSENG_W32P_c 0x3206 #define PCI_DEVICE_ID_TSENG_W32P_d 0x3207 #define PCI_DEVICE_ID_TSENG_ET6000 0x3208 #define PCI_VENDOR_ID_WEITEK 0x100e #define PCI_DEVICE_ID_WEITEK_P9000 0x9001 #define PCI_DEVICE_ID_WEITEK_P9100 0x9100 #define PCI_VENDOR_ID_DEC 0x1011 #define PCI_DEVICE_ID_DEC_BRD 0x0001 #define PCI_DEVICE_ID_DEC_TULIP 0x0002 #define PCI_DEVICE_ID_DEC_TGA 0x0004 #define PCI_DEVICE_ID_DEC_TULIP_FAST 0x0009 #define PCI_DEVICE_ID_DEC_TGA2 0x000D #define PCI_DEVICE_ID_DEC_FDDI 0x000F #define PCI_DEVICE_ID_DEC_TULIP_PLUS 0x0014 #define PCI_DEVICE_ID_DEC_21142 0x0019 #define PCI_DEVICE_ID_DEC_21052 0x0021 #define PCI_DEVICE_ID_DEC_21150 0x0022 #define PCI_DEVICE_ID_DEC_21152 0x0024 #define PCI_DEVICE_ID_DEC_21153 0x0025 #define PCI_DEVICE_ID_DEC_21154 0x0026 #define PCI_DEVICE_ID_DEC_21285 0x1065 #define PCI_DEVICE_ID_COMPAQ_42XX 0x0046 #define PCI_VENDOR_ID_CIRRUS 0x1013 #define PCI_DEVICE_ID_CIRRUS_7548 0x0038 #define PCI_DEVICE_ID_CIRRUS_5430 0x00a0 #define PCI_DEVICE_ID_CIRRUS_5434_4 0x00a4 #define PCI_DEVICE_ID_CIRRUS_5434_8 0x00a8 #define PCI_DEVICE_ID_CIRRUS_5436 0x00ac #define PCI_DEVICE_ID_CIRRUS_5446 0x00b8 #define PCI_DEVICE_ID_CIRRUS_5480 0x00bc #define PCI_DEVICE_ID_CIRRUS_5462 0x00d0 #define PCI_DEVICE_ID_CIRRUS_5464 0x00d4 #define PCI_DEVICE_ID_CIRRUS_5465 0x00d6 #define PCI_DEVICE_ID_CIRRUS_6729 0x1100 #define PCI_DEVICE_ID_CIRRUS_6832 0x1110 #define PCI_DEVICE_ID_CIRRUS_7543 0x1202 #define PCI_DEVICE_ID_CIRRUS_4610 0x6001 #define PCI_DEVICE_ID_CIRRUS_4612 0x6003 #define PCI_DEVICE_ID_CIRRUS_4615 0x6004 #define PCI_VENDOR_ID_IBM 0x1014 #define PCI_DEVICE_ID_IBM_TR 0x0018 #define PCI_DEVICE_ID_IBM_TR_WAKE 0x003e #define PCI_DEVICE_ID_IBM_CPC710_PCI64 0x00fc #define PCI_DEVICE_ID_IBM_SNIPE 0x0180 #define PCI_DEVICE_ID_IBM_CITRINE 0x028C #define PCI_DEVICE_ID_IBM_GEMSTONE 0xB166 #define PCI_DEVICE_ID_IBM_OBSIDIAN 0x02BD #define PCI_DEVICE_ID_IBM_ICOM_DEV_ID_1 0x0031 #define PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2 0x0219 #define PCI_DEVICE_ID_IBM_ICOM_V2_TWO_PORTS_RVX 0x021A #define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM 0x0251 #define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM_PCIE 0x0361 #define PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL 0x252 #define PCI_SUBVENDOR_ID_IBM 0x1014 #define PCI_SUBDEVICE_ID_IBM_SATURN_SERIAL_ONE_PORT 0x03d4 #define PCI_VENDOR_ID_UNISYS 0x1018 #define PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR 0x001C #define PCI_VENDOR_ID_COMPEX2 0x101a /* pci.ids says "AT&T GIS (NCR)" */ #define PCI_DEVICE_ID_COMPEX2_100VG 0x0005 #define PCI_VENDOR_ID_WD 0x101c #define PCI_DEVICE_ID_WD_90C 0xc24a #define PCI_VENDOR_ID_AMI 0x101e #define PCI_DEVICE_ID_AMI_MEGARAID3 0x1960 #define PCI_DEVICE_ID_AMI_MEGARAID 0x9010 #define PCI_DEVICE_ID_AMI_MEGARAID2 0x9060 #define PCI_VENDOR_ID_AMD 0x1022 #define PCI_DEVICE_ID_AMD_K8_NB 0x1100 #define PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP 0x1101 #define PCI_DEVICE_ID_AMD_K8_NB_MEMCTL 0x1102 #define PCI_DEVICE_ID_AMD_K8_NB_MISC 0x1103 #define PCI_DEVICE_ID_AMD_10H_NB_HT 0x1200 #define PCI_DEVICE_ID_AMD_10H_NB_MAP 0x1201 #define PCI_DEVICE_ID_AMD_10H_NB_DRAM 0x1202 #define PCI_DEVICE_ID_AMD_10H_NB_MISC 0x1203 #define PCI_DEVICE_ID_AMD_10H_NB_LINK 0x1204 #define PCI_DEVICE_ID_AMD_11H_NB_HT 0x1300 #define PCI_DEVICE_ID_AMD_11H_NB_MAP 0x1301 #define PCI_DEVICE_ID_AMD_11H_NB_DRAM 0x1302 #define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303 #define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304 #define PCI_DEVICE_ID_AMD_15H_M10H_F3 0x1403 #define PCI_DEVICE_ID_AMD_15H_M30H_NB_F3 0x141d #define PCI_DEVICE_ID_AMD_15H_M30H_NB_F4 0x141e #define PCI_DEVICE_ID_AMD_15H_M60H_NB_F3 0x1573 #define PCI_DEVICE_ID_AMD_15H_M60H_NB_F4 0x1574 #define PCI_DEVICE_ID_AMD_15H_NB_F0 0x1600 #define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601 #define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602 #define PCI_DEVICE_ID_AMD_15H_NB_F3 0x1603 #define PCI_DEVICE_ID_AMD_15H_NB_F4 0x1604 #define PCI_DEVICE_ID_AMD_15H_NB_F5 0x1605 #define PCI_DEVICE_ID_AMD_16H_NB_F3 0x1533 #define PCI_DEVICE_ID_AMD_16H_NB_F4 0x1534 #define PCI_DEVICE_ID_AMD_16H_M30H_NB_F3 0x1583 #define PCI_DEVICE_ID_AMD_16H_M30H_NB_F4 0x1584 #define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493 #define PCI_DEVICE_ID_AMD_17H_M60H_DF_F3 0x144b #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443 #define PCI_DEVICE_ID_AMD_VANGOGH_USB 0x163a #define PCI_DEVICE_ID_AMD_19H_DF_F3 0x1653 #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703 #define PCI_DEVICE_ID_AMD_LANCE 0x2000 #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 #define PCI_DEVICE_ID_AMD_SCSI 0x2020 #define PCI_DEVICE_ID_AMD_SERENADE 0x36c0 #define PCI_DEVICE_ID_AMD_FE_GATE_7006 0x7006 #define PCI_DEVICE_ID_AMD_FE_GATE_7007 0x7007 #define PCI_DEVICE_ID_AMD_FE_GATE_700C 0x700C #define PCI_DEVICE_ID_AMD_FE_GATE_700E 0x700E #define PCI_DEVICE_ID_AMD_COBRA_7401 0x7401 #define PCI_DEVICE_ID_AMD_VIPER_7409 0x7409 #define PCI_DEVICE_ID_AMD_VIPER_740B 0x740B #define PCI_DEVICE_ID_AMD_VIPER_7410 0x7410 #define PCI_DEVICE_ID_AMD_VIPER_7411 0x7411 #define PCI_DEVICE_ID_AMD_VIPER_7413 0x7413 #define PCI_DEVICE_ID_AMD_VIPER_7440 0x7440 #define PCI_DEVICE_ID_AMD_OPUS_7441 0x7441 #define PCI_DEVICE_ID_AMD_OPUS_7443 0x7443 #define PCI_DEVICE_ID_AMD_VIPER_7443 0x7443 #define PCI_DEVICE_ID_AMD_OPUS_7445 0x7445 #define PCI_DEVICE_ID_AMD_GOLAM_7450 0x7450 #define PCI_DEVICE_ID_AMD_8111_PCI 0x7460 #define PCI_DEVICE_ID_AMD_8111_LPC 0x7468 #define PCI_DEVICE_ID_AMD_8111_IDE 0x7469 #define PCI_DEVICE_ID_AMD_8111_SMBUS2 0x746a #define PCI_DEVICE_ID_AMD_8111_SMBUS 0x746b #define PCI_DEVICE_ID_AMD_8111_AUDIO 0x746d #define PCI_DEVICE_ID_AMD_8151_0 0x7454 #define PCI_DEVICE_ID_AMD_8131_BRIDGE 0x7450 #define PCI_DEVICE_ID_AMD_8131_APIC 0x7451 #define PCI_DEVICE_ID_AMD_8132_BRIDGE 0x7458 #define PCI_DEVICE_ID_AMD_NL_USB 0x7912 #define PCI_DEVICE_ID_AMD_CS5535_IDE 0x208F #define PCI_DEVICE_ID_AMD_CS5536_ISA 0x2090 #define PCI_DEVICE_ID_AMD_CS5536_FLASH 0x2091 #define PCI_DEVICE_ID_AMD_CS5536_AUDIO 0x2093 #define PCI_DEVICE_ID_AMD_CS5536_OHC 0x2094 #define PCI_DEVICE_ID_AMD_CS5536_EHC 0x2095 #define PCI_DEVICE_ID_AMD_CS5536_UDC 0x2096 #define PCI_DEVICE_ID_AMD_CS5536_UOC 0x2097 #define PCI_DEVICE_ID_AMD_CS5536_DEV_IDE 0x2092 #define PCI_DEVICE_ID_AMD_CS5536_IDE 0x209A #define PCI_DEVICE_ID_AMD_LX_VIDEO 0x2081 #define PCI_DEVICE_ID_AMD_LX_AES 0x2082 #define PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE 0x7800 #define PCI_DEVICE_ID_AMD_HUDSON2_SMBUS 0x780b #define PCI_DEVICE_ID_AMD_HUDSON2_IDE 0x780c #define PCI_DEVICE_ID_AMD_KERNCZ_SMBUS 0x790b #define PCI_VENDOR_ID_TRIDENT 0x1023 #define PCI_DEVICE_ID_TRIDENT_4DWAVE_DX 0x2000 #define PCI_DEVICE_ID_TRIDENT_4DWAVE_NX 0x2001 #define PCI_DEVICE_ID_TRIDENT_9320 0x9320 #define PCI_DEVICE_ID_TRIDENT_9388 0x9388 #define PCI_DEVICE_ID_TRIDENT_9397 0x9397 #define PCI_DEVICE_ID_TRIDENT_939A 0x939A #define PCI_DEVICE_ID_TRIDENT_9520 0x9520 #define PCI_DEVICE_ID_TRIDENT_9525 0x9525 #define PCI_DEVICE_ID_TRIDENT_9420 0x9420 #define PCI_DEVICE_ID_TRIDENT_9440 0x9440 #define PCI_DEVICE_ID_TRIDENT_9660 0x9660 #define PCI_DEVICE_ID_TRIDENT_9750 0x9750 #define PCI_DEVICE_ID_TRIDENT_9850 0x9850 #define PCI_DEVICE_ID_TRIDENT_9880 0x9880 #define PCI_DEVICE_ID_TRIDENT_8400 0x8400 #define PCI_DEVICE_ID_TRIDENT_8420 0x8420 #define PCI_DEVICE_ID_TRIDENT_8500 0x8500 #define PCI_VENDOR_ID_AI 0x1025 #define PCI_DEVICE_ID_AI_M1435 0x1435 #define PCI_VENDOR_ID_DELL 0x1028 #define PCI_DEVICE_ID_DELL_RACIII 0x0008 #define PCI_DEVICE_ID_DELL_RAC4 0x0012 #define PCI_DEVICE_ID_DELL_PERC5 0x0015 #define PCI_VENDOR_ID_MATROX 0x102B #define PCI_DEVICE_ID_MATROX_MGA_2 0x0518 #define PCI_DEVICE_ID_MATROX_MIL 0x0519 #define PCI_DEVICE_ID_MATROX_MYS 0x051A #define PCI_DEVICE_ID_MATROX_MIL_2 0x051b #define PCI_DEVICE_ID_MATROX_MYS_AGP 0x051e #define PCI_DEVICE_ID_MATROX_MIL_2_AGP 0x051f #define PCI_DEVICE_ID_MATROX_MGA_IMP 0x0d10 #define PCI_DEVICE_ID_MATROX_G100_MM 0x1000 #define PCI_DEVICE_ID_MATROX_G100_AGP 0x1001 #define PCI_DEVICE_ID_MATROX_G200_PCI 0x0520 #define PCI_DEVICE_ID_MATROX_G200_AGP 0x0521 #define PCI_DEVICE_ID_MATROX_G400 0x0525 #define PCI_DEVICE_ID_MATROX_G200EV_PCI 0x0530 #define PCI_DEVICE_ID_MATROX_G550 0x2527 #define PCI_DEVICE_ID_MATROX_VIA 0x4536 #define PCI_VENDOR_ID_MOBILITY_ELECTRONICS 0x14f2 #define PCI_VENDOR_ID_CT 0x102c #define PCI_DEVICE_ID_CT_69000 0x00c0 #define PCI_DEVICE_ID_CT_65545 0x00d8 #define PCI_DEVICE_ID_CT_65548 0x00dc #define PCI_DEVICE_ID_CT_65550 0x00e0 #define PCI_DEVICE_ID_CT_65554 0x00e4 #define PCI_DEVICE_ID_CT_65555 0x00e5 #define PCI_VENDOR_ID_MIRO 0x1031 #define PCI_DEVICE_ID_MIRO_36050 0x5601 #define PCI_DEVICE_ID_MIRO_DC10PLUS 0x7efe #define PCI_DEVICE_ID_MIRO_DC30PLUS 0xd801 #define PCI_VENDOR_ID_NEC 0x1033 #define PCI_DEVICE_ID_NEC_CBUS_1 0x0001 /* PCI-Cbus Bridge */ #define PCI_DEVICE_ID_NEC_LOCAL 0x0002 /* Local Bridge */ #define PCI_DEVICE_ID_NEC_ATM 0x0003 /* ATM LAN Controller */ #define PCI_DEVICE_ID_NEC_R4000 0x0004 /* R4000 Bridge */ #define PCI_DEVICE_ID_NEC_486 0x0005 /* 486 Like Peripheral Bus Bridge */ #define PCI_DEVICE_ID_NEC_ACCEL_1 0x0006 /* Graphic Accelerator */ #define PCI_DEVICE_ID_NEC_UXBUS 0x0007 /* UX-Bus Bridge */ #define PCI_DEVICE_ID_NEC_ACCEL_2 0x0008 /* Graphic Accelerator */ #define PCI_DEVICE_ID_NEC_GRAPH 0x0009 /* PCI-CoreGraph Bridge */ #define PCI_DEVICE_ID_NEC_VL 0x0016 /* PCI-VL Bridge */ #define PCI_DEVICE_ID_NEC_STARALPHA2 0x002c /* STAR ALPHA2 */ #define PCI_DEVICE_ID_NEC_CBUS_2 0x002d /* PCI-Cbus Bridge */ #define PCI_DEVICE_ID_NEC_USB 0x0035 /* PCI-USB Host */ #define PCI_DEVICE_ID_NEC_CBUS_3 0x003b #define PCI_DEVICE_ID_NEC_NAPCCARD 0x003e #define PCI_DEVICE_ID_NEC_PCX2 0x0046 /* PowerVR */ #define PCI_DEVICE_ID_NEC_VRC5476 0x009b #define PCI_DEVICE_ID_NEC_VRC4173 0x00a5 #define PCI_DEVICE_ID_NEC_VRC5477_AC97 0x00a6 #define PCI_DEVICE_ID_NEC_PC9821CS01 0x800c /* PC-9821-CS01 */ #define PCI_DEVICE_ID_NEC_PC9821NRB06 0x800d /* PC-9821NR-B06 */ #define PCI_VENDOR_ID_FD 0x1036 #define PCI_DEVICE_ID_FD_36C70 0x0000 #define PCI_VENDOR_ID_SI 0x1039 #define PCI_DEVICE_ID_SI_5591_AGP 0x0001 #define PCI_DEVICE_ID_SI_6202 0x0002 #define PCI_DEVICE_ID_SI_503 0x0008 #define PCI_DEVICE_ID_SI_ACPI 0x0009 #define PCI_DEVICE_ID_SI_SMBUS 0x0016 #define PCI_DEVICE_ID_SI_LPC 0x0018 #define PCI_DEVICE_ID_SI_5597_VGA 0x0200 #define PCI_DEVICE_ID_SI_6205 0x0205 #define PCI_DEVICE_ID_SI_501 0x0406 #define PCI_DEVICE_ID_SI_496 0x0496 #define PCI_DEVICE_ID_SI_300 0x0300 #define PCI_DEVICE_ID_SI_315H 0x0310 #define PCI_DEVICE_ID_SI_315 0x0315 #define PCI_DEVICE_ID_SI_315PRO 0x0325 #define PCI_DEVICE_ID_SI_530 0x0530 #define PCI_DEVICE_ID_SI_540 0x0540 #define PCI_DEVICE_ID_SI_550 0x0550 #define PCI_DEVICE_ID_SI_540_VGA 0x5300 #define PCI_DEVICE_ID_SI_550_VGA 0x5315 #define PCI_DEVICE_ID_SI_620 0x0620 #define PCI_DEVICE_ID_SI_630 0x0630 #define PCI_DEVICE_ID_SI_633 0x0633 #define PCI_DEVICE_ID_SI_635 0x0635 #define PCI_DEVICE_ID_SI_640 0x0640 #define PCI_DEVICE_ID_SI_645 0x0645 #define PCI_DEVICE_ID_SI_646 0x0646 #define PCI_DEVICE_ID_SI_648 0x0648 #define PCI_DEVICE_ID_SI_650 0x0650 #define PCI_DEVICE_ID_SI_651 0x0651 #define PCI_DEVICE_ID_SI_655 0x0655 #define PCI_DEVICE_ID_SI_661 0x0661 #define PCI_DEVICE_ID_SI_730 0x0730 #define PCI_DEVICE_ID_SI_733 0x0733 #define PCI_DEVICE_ID_SI_630_VGA 0x6300 #define PCI_DEVICE_ID_SI_735 0x0735 #define PCI_DEVICE_ID_SI_740 0x0740 #define PCI_DEVICE_ID_SI_741 0x0741 #define PCI_DEVICE_ID_SI_745 0x0745 #define PCI_DEVICE_ID_SI_746 0x0746 #define PCI_DEVICE_ID_SI_755 0x0755 #define PCI_DEVICE_ID_SI_760 0x0760 #define PCI_DEVICE_ID_SI_900 0x0900 #define PCI_DEVICE_ID_SI_961 0x0961 #define PCI_DEVICE_ID_SI_962 0x0962 #define PCI_DEVICE_ID_SI_963 0x0963 #define PCI_DEVICE_ID_SI_965 0x0965 #define PCI_DEVICE_ID_SI_966 0x0966 #define PCI_DEVICE_ID_SI_968 0x0968 #define PCI_DEVICE_ID_SI_1180 0x1180 #define PCI_DEVICE_ID_SI_5511 0x5511 #define PCI_DEVICE_ID_SI_5513 0x5513 #define PCI_DEVICE_ID_SI_5517 0x5517 #define PCI_DEVICE_ID_SI_5518 0x5518 #define PCI_DEVICE_ID_SI_5571 0x5571 #define PCI_DEVICE_ID_SI_5581 0x5581 #define PCI_DEVICE_ID_SI_5582 0x5582 #define PCI_DEVICE_ID_SI_5591 0x5591 #define PCI_DEVICE_ID_SI_5596 0x5596 #define PCI_DEVICE_ID_SI_5597 0x5597 #define PCI_DEVICE_ID_SI_5598 0x5598 #define PCI_DEVICE_ID_SI_5600 0x5600 #define PCI_DEVICE_ID_SI_7012 0x7012 #define PCI_DEVICE_ID_SI_7013 0x7013 #define PCI_DEVICE_ID_SI_7016 0x7016 #define PCI_DEVICE_ID_SI_7018 0x7018 #define PCI_VENDOR_ID_HP 0x103c #define PCI_VENDOR_ID_HP_3PAR 0x1590 #define PCI_DEVICE_ID_HP_VISUALIZE_EG 0x1005 #define PCI_DEVICE_ID_HP_VISUALIZE_FX6 0x1006 #define PCI_DEVICE_ID_HP_VISUALIZE_FX4 0x1008 #define PCI_DEVICE_ID_HP_VISUALIZE_FX2 0x100a #define PCI_DEVICE_ID_HP_TACHYON 0x1028 #define PCI_DEVICE_ID_HP_TACHLITE 0x1029 #define PCI_DEVICE_ID_HP_J2585A 0x1030 #define PCI_DEVICE_ID_HP_J2585B 0x1031 #define PCI_DEVICE_ID_HP_J2973A 0x1040 #define PCI_DEVICE_ID_HP_J2970A 0x1042 #define PCI_DEVICE_ID_HP_DIVA 0x1048 #define PCI_DEVICE_ID_HP_DIVA_TOSCA1 0x1049 #define PCI_DEVICE_ID_HP_DIVA_TOSCA2 0x104A #define PCI_DEVICE_ID_HP_DIVA_MAESTRO 0x104B #define PCI_DEVICE_ID_HP_REO_IOC 0x10f1 #define PCI_DEVICE_ID_HP_VISUALIZE_FXE 0x108b #define PCI_DEVICE_ID_HP_DIVA_HALFDOME 0x1223 #define PCI_DEVICE_ID_HP_DIVA_KEYSTONE 0x1226 #define PCI_DEVICE_ID_HP_DIVA_POWERBAR 0x1227 #define PCI_DEVICE_ID_HP_ZX1_IOC 0x122a #define PCI_DEVICE_ID_HP_PCIX_LBA 0x122e #define PCI_DEVICE_ID_HP_SX1000_IOC 0x127c #define PCI_DEVICE_ID_HP_DIVA_EVEREST 0x1282 #define PCI_DEVICE_ID_HP_DIVA_AUX 0x1290 #define PCI_DEVICE_ID_HP_DIVA_RMP3 0x1301 #define PCI_DEVICE_ID_HP_DIVA_HURRICANE 0x132a #define PCI_DEVICE_ID_HP_CISSA 0x3220 #define PCI_DEVICE_ID_HP_CISSC 0x3230 #define PCI_DEVICE_ID_HP_CISSD 0x3238 #define PCI_DEVICE_ID_HP_CISSE 0x323a #define PCI_DEVICE_ID_HP_CISSF 0x323b #define PCI_DEVICE_ID_HP_CISSH 0x323c #define PCI_DEVICE_ID_HP_CISSI 0x3239 #define PCI_DEVICE_ID_HP_ZX2_IOC 0x4031 #define PCI_VENDOR_ID_PCTECH 0x1042 #define PCI_DEVICE_ID_PCTECH_RZ1000 0x1000 #define PCI_DEVICE_ID_PCTECH_RZ1001 0x1001 #define PCI_DEVICE_ID_PCTECH_SAMURAI_IDE 0x3020 #define PCI_VENDOR_ID_ASUSTEK 0x1043 #define PCI_DEVICE_ID_ASUSTEK_0675 0x0675 #define PCI_VENDOR_ID_DPT 0x1044 #define PCI_DEVICE_ID_DPT 0xa400 #define PCI_VENDOR_ID_OPTI 0x1045 #define PCI_DEVICE_ID_OPTI_82C558 0xc558 #define PCI_DEVICE_ID_OPTI_82C621 0xc621 #define PCI_DEVICE_ID_OPTI_82C700 0xc700 #define PCI_DEVICE_ID_OPTI_82C825 0xd568 #define PCI_VENDOR_ID_ELSA 0x1048 #define PCI_DEVICE_ID_ELSA_MICROLINK 0x1000 #define PCI_DEVICE_ID_ELSA_QS3000 0x3000 #define PCI_VENDOR_ID_STMICRO 0x104A #define PCI_DEVICE_ID_STMICRO_USB_HOST 0xCC00 #define PCI_DEVICE_ID_STMICRO_USB_OHCI 0xCC01 #define PCI_DEVICE_ID_STMICRO_USB_OTG 0xCC02 #define PCI_DEVICE_ID_STMICRO_UART_HWFC 0xCC03 #define PCI_DEVICE_ID_STMICRO_UART_NO_HWFC 0xCC04 #define PCI_DEVICE_ID_STMICRO_SOC_DMA 0xCC05 #define PCI_DEVICE_ID_STMICRO_SATA 0xCC06 #define PCI_DEVICE_ID_STMICRO_I2C 0xCC07 #define PCI_DEVICE_ID_STMICRO_SPI_HS 0xCC08 #define PCI_DEVICE_ID_STMICRO_MAC 0xCC09 #define PCI_DEVICE_ID_STMICRO_SDIO_EMMC 0xCC0A #define PCI_DEVICE_ID_STMICRO_SDIO 0xCC0B #define PCI_DEVICE_ID_STMICRO_GPIO 0xCC0C #define PCI_DEVICE_ID_STMICRO_VIP 0xCC0D #define PCI_DEVICE_ID_STMICRO_AUDIO_ROUTER_DMA 0xCC0E #define PCI_DEVICE_ID_STMICRO_AUDIO_ROUTER_SRCS 0xCC0F #define PCI_DEVICE_ID_STMICRO_AUDIO_ROUTER_MSPS 0xCC10 #define PCI_DEVICE_ID_STMICRO_CAN 0xCC11 #define PCI_DEVICE_ID_STMICRO_MLB 0xCC12 #define PCI_DEVICE_ID_STMICRO_DBP 0xCC13 #define PCI_DEVICE_ID_STMICRO_SATA_PHY 0xCC14 #define PCI_DEVICE_ID_STMICRO_ESRAM 0xCC15 #define PCI_DEVICE_ID_STMICRO_VIC 0xCC16 #define PCI_VENDOR_ID_BUSLOGIC 0x104B #define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC 0x0140 #define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER 0x1040 #define PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT 0x8130 #define PCI_VENDOR_ID_TI 0x104c #define PCI_DEVICE_ID_TI_TVP4020 0x3d07 #define PCI_DEVICE_ID_TI_4450 0x8011 #define PCI_DEVICE_ID_TI_XX21_XX11 0x8031 #define PCI_DEVICE_ID_TI_XX21_XX11_FM 0x8033 #define PCI_DEVICE_ID_TI_XX21_XX11_SD 0x8034 #define PCI_DEVICE_ID_TI_X515 0x8036 #define PCI_DEVICE_ID_TI_XX12 0x8039 #define PCI_DEVICE_ID_TI_XX12_FM 0x803b #define PCI_DEVICE_ID_TI_XIO2000A 0x8231 #define PCI_DEVICE_ID_TI_1130 0xac12 #define PCI_DEVICE_ID_TI_1031 0xac13 #define PCI_DEVICE_ID_TI_1131 0xac15 #define PCI_DEVICE_ID_TI_1250 0xac16 #define PCI_DEVICE_ID_TI_1220 0xac17 #define PCI_DEVICE_ID_TI_1221 0xac19 #define PCI_DEVICE_ID_TI_1210 0xac1a #define PCI_DEVICE_ID_TI_1450 0xac1b #define PCI_DEVICE_ID_TI_1225 0xac1c #define PCI_DEVICE_ID_TI_1251A 0xac1d #define PCI_DEVICE_ID_TI_1211 0xac1e #define PCI_DEVICE_ID_TI_1251B 0xac1f #define PCI_DEVICE_ID_TI_4410 0xac41 #define PCI_DEVICE_ID_TI_4451 0xac42 #define PCI_DEVICE_ID_TI_4510 0xac44 #define PCI_DEVICE_ID_TI_4520 0xac46 #define PCI_DEVICE_ID_TI_7510 0xac47 #define PCI_DEVICE_ID_TI_7610 0xac48 #define PCI_DEVICE_ID_TI_7410 0xac49 #define PCI_DEVICE_ID_TI_1410 0xac50 #define PCI_DEVICE_ID_TI_1420 0xac51 #define PCI_DEVICE_ID_TI_1451A 0xac52 #define PCI_DEVICE_ID_TI_1620 0xac54 #define PCI_DEVICE_ID_TI_1520 0xac55 #define PCI_DEVICE_ID_TI_1510 0xac56 #define PCI_DEVICE_ID_TI_X620 0xac8d #define PCI_DEVICE_ID_TI_X420 0xac8e #define PCI_DEVICE_ID_TI_XX20_FM 0xac8f #define PCI_DEVICE_ID_TI_DRA74x 0xb500 #define PCI_DEVICE_ID_TI_DRA72x 0xb501 #define PCI_VENDOR_ID_SONY 0x104d /* Winbond have two vendor IDs! See 0x10ad as well */ #define PCI_VENDOR_ID_WINBOND2 0x1050 #define PCI_DEVICE_ID_WINBOND2_89C940F 0x5a5a #define PCI_DEVICE_ID_WINBOND2_6692 0x6692 #define PCI_VENDOR_ID_ANIGMA 0x1051 #define PCI_DEVICE_ID_ANIGMA_MC145575 0x0100 #define PCI_VENDOR_ID_EFAR 0x1055 #define PCI_DEVICE_ID_EFAR_SLC90E66_1 0x9130 #define PCI_DEVICE_ID_EFAR_SLC90E66_3 0x9463 #define PCI_VENDOR_ID_MOTOROLA 0x1057 #define PCI_DEVICE_ID_MOTOROLA_MPC105 0x0001 #define PCI_DEVICE_ID_MOTOROLA_MPC106 0x0002 #define PCI_DEVICE_ID_MOTOROLA_MPC107 0x0004 #define PCI_DEVICE_ID_MOTOROLA_RAVEN 0x4801 #define PCI_DEVICE_ID_MOTOROLA_FALCON 0x4802 #define PCI_DEVICE_ID_MOTOROLA_HAWK 0x4803 #define PCI_DEVICE_ID_MOTOROLA_HARRIER 0x480b #define PCI_DEVICE_ID_MOTOROLA_MPC5200 0x5803 #define PCI_DEVICE_ID_MOTOROLA_MPC5200B 0x5809 #define PCI_VENDOR_ID_PROMISE 0x105a #define PCI_DEVICE_ID_PROMISE_20265 0x0d30 #define PCI_DEVICE_ID_PROMISE_20267 0x4d30 #define PCI_DEVICE_ID_PROMISE_20246 0x4d33 #define PCI_DEVICE_ID_PROMISE_20262 0x4d38 #define PCI_DEVICE_ID_PROMISE_20263 0x0D38 #define PCI_DEVICE_ID_PROMISE_20268 0x4d68 #define PCI_DEVICE_ID_PROMISE_20269 0x4d69 #define PCI_DEVICE_ID_PROMISE_20270 0x6268 #define PCI_DEVICE_ID_PROMISE_20271 0x6269 #define PCI_DEVICE_ID_PROMISE_20275 0x1275 #define PCI_DEVICE_ID_PROMISE_20276 0x5275 #define PCI_DEVICE_ID_PROMISE_20277 0x7275 #define PCI_VENDOR_ID_FOXCONN 0x105b #define PCI_VENDOR_ID_UMC 0x1060 #define PCI_DEVICE_ID_UMC_UM8673F 0x0101 #define PCI_DEVICE_ID_UMC_UM8886BF 0x673a #define PCI_DEVICE_ID_UMC_UM8886A 0x886a #define PCI_VENDOR_ID_PICOPOWER 0x1066 #define PCI_DEVICE_ID_PICOPOWER_PT86C523 0x0002 #define PCI_DEVICE_ID_PICOPOWER_PT86C523BBP 0x8002 #define PCI_VENDOR_ID_MYLEX 0x1069 #define PCI_DEVICE_ID_MYLEX_DAC960_P 0x0001 #define PCI_DEVICE_ID_MYLEX_DAC960_PD 0x0002 #define PCI_DEVICE_ID_MYLEX_DAC960_PG 0x0010 #define PCI_DEVICE_ID_MYLEX_DAC960_LA 0x0020 #define PCI_DEVICE_ID_MYLEX_DAC960_LP 0x0050 #define PCI_DEVICE_ID_MYLEX_DAC960_BA 0xBA56 #define PCI_DEVICE_ID_MYLEX_DAC960_GEM 0xB166 #define PCI_VENDOR_ID_APPLE 0x106b #define PCI_DEVICE_ID_APPLE_BANDIT 0x0001 #define PCI_DEVICE_ID_APPLE_HYDRA 0x000e #define PCI_DEVICE_ID_APPLE_UNI_N_FW 0x0018 #define PCI_DEVICE_ID_APPLE_UNI_N_AGP 0x0020 #define PCI_DEVICE_ID_APPLE_UNI_N_GMAC 0x0021 #define PCI_DEVICE_ID_APPLE_UNI_N_GMACP 0x0024 #define PCI_DEVICE_ID_APPLE_UNI_N_AGP_P 0x0027 #define PCI_DEVICE_ID_APPLE_UNI_N_AGP15 0x002d #define PCI_DEVICE_ID_APPLE_UNI_N_PCI15 0x002e #define PCI_DEVICE_ID_APPLE_UNI_N_GMAC2 0x0032 #define PCI_DEVICE_ID_APPLE_UNI_N_ATA 0x0033 #define PCI_DEVICE_ID_APPLE_UNI_N_AGP2 0x0034 #define PCI_DEVICE_ID_APPLE_IPID_ATA100 0x003b #define PCI_DEVICE_ID_APPLE_K2_ATA100 0x0043 #define PCI_DEVICE_ID_APPLE_U3_AGP 0x004b #define PCI_DEVICE_ID_APPLE_K2_GMAC 0x004c #define PCI_DEVICE_ID_APPLE_SH_ATA 0x0050 #define PCI_DEVICE_ID_APPLE_SH_SUNGEM 0x0051 #define PCI_DEVICE_ID_APPLE_U3L_AGP 0x0058 #define PCI_DEVICE_ID_APPLE_U3H_AGP 0x0059 #define PCI_DEVICE_ID_APPLE_U4_PCIE 0x005b #define PCI_DEVICE_ID_APPLE_IPID2_AGP 0x0066 #define PCI_DEVICE_ID_APPLE_IPID2_ATA 0x0069 #define PCI_DEVICE_ID_APPLE_IPID2_FW 0x006a #define PCI_DEVICE_ID_APPLE_IPID2_GMAC 0x006b #define PCI_DEVICE_ID_APPLE_TIGON3 0x1645 #define PCI_VENDOR_ID_YAMAHA 0x1073 #define PCI_DEVICE_ID_YAMAHA_724 0x0004 #define PCI_DEVICE_ID_YAMAHA_724F 0x000d #define PCI_DEVICE_ID_YAMAHA_740 0x000a #define PCI_DEVICE_ID_YAMAHA_740C 0x000c #define PCI_DEVICE_ID_YAMAHA_744 0x0010 #define PCI_DEVICE_ID_YAMAHA_754 0x0012 #define PCI_VENDOR_ID_QLOGIC 0x1077 #define PCI_DEVICE_ID_QLOGIC_ISP10160 0x1016 #define PCI_DEVICE_ID_QLOGIC_ISP1020 0x1020 #define PCI_DEVICE_ID_QLOGIC_ISP1080 0x1080 #define PCI_DEVICE_ID_QLOGIC_ISP12160 0x1216 #define PCI_DEVICE_ID_QLOGIC_ISP1240 0x1240 #define PCI_DEVICE_ID_QLOGIC_ISP1280 0x1280 #define PCI_DEVICE_ID_QLOGIC_ISP2100 0x2100 #define PCI_DEVICE_ID_QLOGIC_ISP2200 0x2200 #define PCI_DEVICE_ID_QLOGIC_ISP2300 0x2300 #define PCI_DEVICE_ID_QLOGIC_ISP2312 0x2312 #define PCI_DEVICE_ID_QLOGIC_ISP2322 0x2322 #define PCI_DEVICE_ID_QLOGIC_ISP6312 0x6312 #define PCI_DEVICE_ID_QLOGIC_ISP6322 0x6322 #define PCI_DEVICE_ID_QLOGIC_ISP2422 0x2422 #define PCI_DEVICE_ID_QLOGIC_ISP2432 0x2432 #define PCI_DEVICE_ID_QLOGIC_ISP2512 0x2512 #define PCI_DEVICE_ID_QLOGIC_ISP2522 0x2522 #define PCI_DEVICE_ID_QLOGIC_ISP5422 0x5422 #define PCI_DEVICE_ID_QLOGIC_ISP5432 0x5432 #define PCI_VENDOR_ID_CYRIX 0x1078 #define PCI_DEVICE_ID_CYRIX_5510 0x0000 #define PCI_DEVICE_ID_CYRIX_PCI_MASTER 0x0001 #define PCI_DEVICE_ID_CYRIX_5520 0x0002 #define PCI_DEVICE_ID_CYRIX_5530_LEGACY 0x0100 #define PCI_DEVICE_ID_CYRIX_5530_IDE 0x0102 #define PCI_DEVICE_ID_CYRIX_5530_AUDIO 0x0103 #define PCI_DEVICE_ID_CYRIX_5530_VIDEO 0x0104 #define PCI_VENDOR_ID_CONTAQ 0x1080 #define PCI_DEVICE_ID_CONTAQ_82C693 0xc693 #define PCI_VENDOR_ID_OLICOM 0x108d #define PCI_DEVICE_ID_OLICOM_OC2325 0x0012 #define PCI_DEVICE_ID_OLICOM_OC2183 0x0013 #define PCI_DEVICE_ID_OLICOM_OC2326 0x0014 #define PCI_VENDOR_ID_SUN 0x108e #define PCI_DEVICE_ID_SUN_EBUS 0x1000 #define PCI_DEVICE_ID_SUN_HAPPYMEAL 0x1001 #define PCI_DEVICE_ID_SUN_RIO_EBUS 0x1100 #define PCI_DEVICE_ID_SUN_RIO_GEM 0x1101 #define PCI_DEVICE_ID_SUN_RIO_1394 0x1102 #define PCI_DEVICE_ID_SUN_RIO_USB 0x1103 #define PCI_DEVICE_ID_SUN_GEM 0x2bad #define PCI_DEVICE_ID_SUN_SIMBA 0x5000 #define PCI_DEVICE_ID_SUN_PBM 0x8000 #define PCI_DEVICE_ID_SUN_SCHIZO 0x8001 #define PCI_DEVICE_ID_SUN_SABRE 0xa000 #define PCI_DEVICE_ID_SUN_HUMMINGBIRD 0xa001 #define PCI_DEVICE_ID_SUN_TOMATILLO 0xa801 #define PCI_DEVICE_ID_SUN_CASSINI 0xabba #define PCI_VENDOR_ID_NI 0x1093 #define PCI_DEVICE_ID_NI_PCI2322 0xd130 #define PCI_DEVICE_ID_NI_PCI2324 0xd140 #define PCI_DEVICE_ID_NI_PCI2328 0xd150 #define PCI_DEVICE_ID_NI_PXI8422_2322 0xd190 #define PCI_DEVICE_ID_NI_PXI8422_2324 0xd1a0 #define PCI_DEVICE_ID_NI_PXI8420_2322 0xd1d0 #define PCI_DEVICE_ID_NI_PXI8420_2324 0xd1e0 #define PCI_DEVICE_ID_NI_PXI8420_2328 0xd1f0 #define PCI_DEVICE_ID_NI_PXI8420_23216 0xd1f1 #define PCI_DEVICE_ID_NI_PCI2322I 0xd250 #define PCI_DEVICE_ID_NI_PCI2324I 0xd270 #define PCI_DEVICE_ID_NI_PCI23216 0xd2b0 #define PCI_DEVICE_ID_NI_PXI8430_2322 0x7080 #define PCI_DEVICE_ID_NI_PCI8430_2322 0x70db #define PCI_DEVICE_ID_NI_PXI8430_2324 0x70dd #define PCI_DEVICE_ID_NI_PCI8430_2324 0x70df #define PCI_DEVICE_ID_NI_PXI8430_2328 0x70e2 #define PCI_DEVICE_ID_NI_PCI8430_2328 0x70e4 #define PCI_DEVICE_ID_NI_PXI8430_23216 0x70e6 #define PCI_DEVICE_ID_NI_PCI8430_23216 0x70e7 #define PCI_DEVICE_ID_NI_PXI8432_2322 0x70e8 #define PCI_DEVICE_ID_NI_PCI8432_2322 0x70ea #define PCI_DEVICE_ID_NI_PXI8432_2324 0x70ec #define PCI_DEVICE_ID_NI_PCI8432_2324 0x70ee #define PCI_VENDOR_ID_CMD 0x1095 #define PCI_DEVICE_ID_CMD_643 0x0643 #define PCI_DEVICE_ID_CMD_646 0x0646 #define PCI_DEVICE_ID_CMD_648 0x0648 #define PCI_DEVICE_ID_CMD_649 0x0649 #define PCI_DEVICE_ID_SII_680 0x0680 #define PCI_DEVICE_ID_SII_3112 0x3112 #define PCI_DEVICE_ID_SII_1210SA 0x0240 #define PCI_VENDOR_ID_BROOKTREE 0x109e #define PCI_DEVICE_ID_BROOKTREE_878 0x0878 #define PCI_DEVICE_ID_BROOKTREE_879 0x0879 #define PCI_VENDOR_ID_SGI 0x10a9 #define PCI_DEVICE_ID_SGI_IOC3 0x0003 #define PCI_DEVICE_ID_SGI_LITHIUM 0x1002 #define PCI_VENDOR_ID_WINBOND 0x10ad #define PCI_DEVICE_ID_WINBOND_82C105 0x0105 #define PCI_DEVICE_ID_WINBOND_83C553 0x0565 #define PCI_VENDOR_ID_PLX 0x10b5 #define PCI_DEVICE_ID_PLX_R685 0x1030 #define PCI_DEVICE_ID_PLX_ROMULUS 0x106a #define PCI_DEVICE_ID_PLX_SPCOM800 0x1076 #define PCI_DEVICE_ID_PLX_1077 0x1077 #define PCI_DEVICE_ID_PLX_SPCOM200 0x1103 #define PCI_DEVICE_ID_PLX_DJINN_ITOO 0x1151 #define PCI_DEVICE_ID_PLX_R753 0x1152 #define PCI_DEVICE_ID_PLX_OLITEC 0x1187 #define PCI_DEVICE_ID_PLX_PCI200SYN 0x3196 #define PCI_DEVICE_ID_PLX_9030 0x9030 #define PCI_DEVICE_ID_PLX_9050 0x9050 #define PCI_DEVICE_ID_PLX_9056 0x9056 #define PCI_DEVICE_ID_PLX_9080 0x9080 #define PCI_DEVICE_ID_PLX_GTEK_SERIAL2 0xa001 #define PCI_VENDOR_ID_MADGE 0x10b6 #define PCI_DEVICE_ID_MADGE_MK2 0x0002 #define PCI_VENDOR_ID_3COM 0x10b7 #define PCI_DEVICE_ID_3COM_3C985 0x0001 #define PCI_DEVICE_ID_3COM_3C940 0x1700 #define PCI_DEVICE_ID_3COM_3C339 0x3390 #define PCI_DEVICE_ID_3COM_3C359 0x3590 #define PCI_DEVICE_ID_3COM_3C940B 0x80eb #define PCI_DEVICE_ID_3COM_3CR990 0x9900 #define PCI_DEVICE_ID_3COM_3CR990_TX_95 0x9902 #define PCI_DEVICE_ID_3COM_3CR990_TX_97 0x9903 #define PCI_DEVICE_ID_3COM_3CR990B 0x9904 #define PCI_DEVICE_ID_3COM_3CR990_FX 0x9905 #define PCI_DEVICE_ID_3COM_3CR990SVR95 0x9908 #define PCI_DEVICE_ID_3COM_3CR990SVR97 0x9909 #define PCI_DEVICE_ID_3COM_3CR990SVR 0x990a #define PCI_VENDOR_ID_AL 0x10b9 #define PCI_DEVICE_ID_AL_M1533 0x1533 #define PCI_DEVICE_ID_AL_M1535 0x1535 #define PCI_DEVICE_ID_AL_M1541 0x1541 #define PCI_DEVICE_ID_AL_M1563 0x1563 #define PCI_DEVICE_ID_AL_M1621 0x1621 #define PCI_DEVICE_ID_AL_M1631 0x1631 #define PCI_DEVICE_ID_AL_M1632 0x1632 #define PCI_DEVICE_ID_AL_M1641 0x1641 #define PCI_DEVICE_ID_AL_M1644 0x1644 #define PCI_DEVICE_ID_AL_M1647 0x1647 #define PCI_DEVICE_ID_AL_M1651 0x1651 #define PCI_DEVICE_ID_AL_M1671 0x1671 #define PCI_DEVICE_ID_AL_M1681 0x1681 #define PCI_DEVICE_ID_AL_M1683 0x1683 #define PCI_DEVICE_ID_AL_M1689 0x1689 #define PCI_DEVICE_ID_AL_M5219 0x5219 #define PCI_DEVICE_ID_AL_M5228 0x5228 #define PCI_DEVICE_ID_AL_M5229 0x5229 #define PCI_DEVICE_ID_AL_M5451 0x5451 #define PCI_DEVICE_ID_AL_M7101 0x7101 #define PCI_VENDOR_ID_NEOMAGIC 0x10c8 #define PCI_DEVICE_ID_NEOMAGIC_NM256AV_AUDIO 0x8005 #define PCI_DEVICE_ID_NEOMAGIC_NM256ZX_AUDIO 0x8006 #define PCI_DEVICE_ID_NEOMAGIC_NM256XL_PLUS_AUDIO 0x8016 #define PCI_VENDOR_ID_TCONRAD 0x10da #define PCI_DEVICE_ID_TCONRAD_TOKENRING 0x0508 #define PCI_VENDOR_ID_ROHM 0x10db #define PCI_VENDOR_ID_NVIDIA 0x10de #define PCI_DEVICE_ID_NVIDIA_TNT 0x0020 #define PCI_DEVICE_ID_NVIDIA_TNT2 0x0028 #define PCI_DEVICE_ID_NVIDIA_UTNT2 0x0029 #define PCI_DEVICE_ID_NVIDIA_TNT_UNKNOWN 0x002a #define PCI_DEVICE_ID_NVIDIA_VTNT2 0x002C #define PCI_DEVICE_ID_NVIDIA_UVTNT2 0x002D #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SMBUS 0x0034 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE 0x0035 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA 0x0036 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2 0x003e #define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_ULTRA 0x0040 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800 0x0041 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_LE 0x0042 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_GT 0x0045 #define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_4000 0x004E #define PCI_DEVICE_ID_NVIDIA_NFORCE4_SMBUS 0x0052 #define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE 0x0053 #define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA 0x0054 #define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2 0x0055 #define PCI_DEVICE_ID_NVIDIA_CK804_AUDIO 0x0059 #define PCI_DEVICE_ID_NVIDIA_CK804_PCIE 0x005d #define PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS 0x0064 #define PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE 0x0065 #define PCI_DEVICE_ID_NVIDIA_MCP2_MODEM 0x0069 #define PCI_DEVICE_ID_NVIDIA_MCP2_AUDIO 0x006a #define PCI_DEVICE_ID_NVIDIA_NFORCE2S_SMBUS 0x0084 #define PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE 0x0085 #define PCI_DEVICE_ID_NVIDIA_MCP2S_MODEM 0x0089 #define PCI_DEVICE_ID_NVIDIA_CK8_AUDIO 0x008a #define PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA 0x008e #define PCI_DEVICE_ID_NVIDIA_GEFORCE_7800_GT 0x0090 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_7800_GTX 0x0091 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_7800 0x0098 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_7800_GTX 0x0099 #define PCI_DEVICE_ID_NVIDIA_ITNT2 0x00A0 #define PCI_DEVICE_ID_GEFORCE_6800A 0x00c1 #define PCI_DEVICE_ID_GEFORCE_6800A_LE 0x00c2 #define PCI_DEVICE_ID_GEFORCE_GO_6800 0x00c8 #define PCI_DEVICE_ID_GEFORCE_GO_6800_ULTRA 0x00c9 #define PCI_DEVICE_ID_QUADRO_FX_GO1400 0x00cc #define PCI_DEVICE_ID_QUADRO_FX_1400 0x00ce #define PCI_DEVICE_ID_NVIDIA_NFORCE3 0x00d1 #define PCI_DEVICE_ID_NVIDIA_NFORCE3_SMBUS 0x00d4 #define PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE 0x00d5 #define PCI_DEVICE_ID_NVIDIA_MCP3_MODEM 0x00d9 #define PCI_DEVICE_ID_NVIDIA_MCP3_AUDIO 0x00da #define PCI_DEVICE_ID_NVIDIA_NFORCE3S 0x00e1 #define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA 0x00e3 #define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SMBUS 0x00e4 #define PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE 0x00e5 #define PCI_DEVICE_ID_NVIDIA_CK8S_AUDIO 0x00ea #define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2 0x00ee #define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6800_ALT1 0x00f0 #define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6600_ALT1 0x00f1 #define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6600_ALT2 0x00f2 #define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6200_ALT1 0x00f3 #define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6800_GT 0x00f9 #define PCIE_DEVICE_ID_NVIDIA_QUADRO_NVS280 0x00fd #define PCI_DEVICE_ID_NVIDIA_GEFORCE_SDR 0x0100 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_DDR 0x0101 #define PCI_DEVICE_ID_NVIDIA_QUADRO 0x0103 #define PCI_DEVICE_ID_NVIDIA_GEFORCE2_MX 0x0110 #define PCI_DEVICE_ID_NVIDIA_GEFORCE2_MX2 0x0111 #define PCI_DEVICE_ID_NVIDIA_GEFORCE2_GO 0x0112 #define PCI_DEVICE_ID_NVIDIA_QUADRO2_MXR 0x0113 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_6600_GT 0x0140 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_6600 0x0141 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_6610_XL 0x0145 #define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_540 0x014E #define PCI_DEVICE_ID_NVIDIA_GEFORCE_6200 0x014F #define PCI_DEVICE_ID_NVIDIA_GEFORCE2_GTS 0x0150 #define PCI_DEVICE_ID_NVIDIA_GEFORCE2_GTS2 0x0151 #define PCI_DEVICE_ID_NVIDIA_GEFORCE2_ULTRA 0x0152 #define PCI_DEVICE_ID_NVIDIA_QUADRO2_PRO 0x0153 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_6200_TURBOCACHE 0x0161 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6200 0x0164 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6250 0x0166 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6200_1 0x0167 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6250_1 0x0168 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_460 0x0170 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440 0x0171 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_420 0x0172 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440_SE 0x0173 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_440_GO 0x0174 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_420_GO 0x0175 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_420_GO_M32 0x0176 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_460_GO 0x0177 #define PCI_DEVICE_ID_NVIDIA_QUADRO4_500XGL 0x0178 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_440_GO_M64 0x0179 #define PCI_DEVICE_ID_NVIDIA_QUADRO4_200 0x017A #define PCI_DEVICE_ID_NVIDIA_QUADRO4_550XGL 0x017B #define PCI_DEVICE_ID_NVIDIA_QUADRO4_500_GOGL 0x017C #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_410_GO_M16 0x017D #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440_8X 0x0181 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440SE_8X 0x0182 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_420_8X 0x0183 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_4000 0x0185 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_448_GO 0x0186 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_488_GO 0x0187 #define PCI_DEVICE_ID_NVIDIA_QUADRO4_580_XGL 0x0188 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_MAC 0x0189 #define PCI_DEVICE_ID_NVIDIA_QUADRO4_280_NVS 0x018A #define PCI_DEVICE_ID_NVIDIA_QUADRO4_380_XGL 0x018B #define PCI_DEVICE_ID_NVIDIA_IGEFORCE2 0x01a0 #define PCI_DEVICE_ID_NVIDIA_NFORCE 0x01a4 #define PCI_DEVICE_ID_NVIDIA_MCP1_AUDIO 0x01b1 #define PCI_DEVICE_ID_NVIDIA_NFORCE_SMBUS 0x01b4 #define PCI_DEVICE_ID_NVIDIA_NFORCE_IDE 0x01bc #define PCI_DEVICE_ID_NVIDIA_MCP1_MODEM 0x01c1 #define PCI_DEVICE_ID_NVIDIA_NFORCE2 0x01e0 #define PCI_DEVICE_ID_NVIDIA_GEFORCE3 0x0200 #define PCI_DEVICE_ID_NVIDIA_GEFORCE3_1 0x0201 #define PCI_DEVICE_ID_NVIDIA_GEFORCE3_2 0x0202 #define PCI_DEVICE_ID_NVIDIA_QUADRO_DDC 0x0203 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800B 0x0211 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800B_LE 0x0212 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800B_GT 0x0215 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4600 0x0250 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4400 0x0251 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4200 0x0253 #define PCI_DEVICE_ID_NVIDIA_QUADRO4_900XGL 0x0258 #define PCI_DEVICE_ID_NVIDIA_QUADRO4_750XGL 0x0259 #define PCI_DEVICE_ID_NVIDIA_QUADRO4_700XGL 0x025B #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SMBUS 0x0264 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE 0x0265 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA 0x0266 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2 0x0267 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS 0x0368 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE 0x036E #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA 0x037E #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2 0x037F #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800 0x0280 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800_8X 0x0281 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800SE 0x0282 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_4200_GO 0x0286 #define PCI_DEVICE_ID_NVIDIA_QUADRO4_980_XGL 0x0288 #define PCI_DEVICE_ID_NVIDIA_QUADRO4_780_XGL 0x0289 #define PCI_DEVICE_ID_NVIDIA_QUADRO4_700_GOGL 0x028C #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5800_ULTRA 0x0301 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5800 0x0302 #define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_2000 0x0308 #define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1000 0x0309 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5600_ULTRA 0x0311 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5600 0x0312 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5600SE 0x0314 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5600 0x031A #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5650 0x031B #define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_GO700 0x031C #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200 0x0320 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200_ULTRA 0x0321 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200_1 0x0322 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200SE 0x0323 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5200 0x0324 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5250 0x0325 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5500 0x0326 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5100 0x0327 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5250_32 0x0328 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO_5200 0x0329 #define PCI_DEVICE_ID_NVIDIA_QUADRO_NVS_280_PCI 0x032A #define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_500 0x032B #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5300 0x032C #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5100 0x032D #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900_ULTRA 0x0330 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900 0x0331 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900XT 0x0332 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5950_ULTRA 0x0333 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900ZT 0x0334 #define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_3000 0x0338 #define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_700 0x033F #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700_ULTRA 0x0341 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700 0x0342 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700LE 0x0343 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700VE 0x0344 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5700_1 0x0347 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5700_2 0x0348 #define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_GO1000 0x034C #define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1100 0x034E #define PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V0 0x0360 #define PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V4 0x0364 #define PCI_DEVICE_ID_NVIDIA_NVENET_15 0x0373 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA 0x03E7 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SMBUS 0x03EB #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE 0x03EC #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2 0x03F6 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3 0x03F7 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_SMBUS 0x0446 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE 0x0448 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_SMBUS 0x0542 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE 0x0560 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE 0x056C #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP78S_SMBUS 0x0752 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE 0x0759 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_SMBUS 0x07D8 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_320M 0x08A0 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP79_SMBUS 0x0AA2 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA 0x0D85 #define PCI_VENDOR_ID_IMS 0x10e0 #define PCI_DEVICE_ID_IMS_TT128 0x9128 #define PCI_DEVICE_ID_IMS_TT3D 0x9135 #define PCI_VENDOR_ID_AMCC 0x10e8 #define PCI_VENDOR_ID_AMPERE 0x1def #define PCI_VENDOR_ID_INTERG 0x10ea #define PCI_DEVICE_ID_INTERG_1682 0x1682 #define PCI_DEVICE_ID_INTERG_2000 0x2000 #define PCI_DEVICE_ID_INTERG_2010 0x2010 #define PCI_DEVICE_ID_INTERG_5000 0x5000 #define PCI_DEVICE_ID_INTERG_5050 0x5050 #define PCI_VENDOR_ID_REALTEK 0x10ec #define PCI_DEVICE_ID_REALTEK_8139 0x8139 #define PCI_VENDOR_ID_XILINX 0x10ee #define PCI_DEVICE_ID_RME_DIGI96 0x3fc0 #define PCI_DEVICE_ID_RME_DIGI96_8 0x3fc1 #define PCI_DEVICE_ID_RME_DIGI96_8_PRO 0x3fc2 #define PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST 0x3fc3 #define PCI_DEVICE_ID_XILINX_HAMMERFALL_DSP 0x3fc5 #define PCI_DEVICE_ID_XILINX_HAMMERFALL_DSP_MADI 0x3fc6 #define PCI_VENDOR_ID_INIT 0x1101 #define PCI_VENDOR_ID_CREATIVE 0x1102 /* duplicate: ECTIVA */ #define PCI_DEVICE_ID_CREATIVE_EMU10K1 0x0002 #define PCI_DEVICE_ID_CREATIVE_20K1 0x0005 #define PCI_DEVICE_ID_CREATIVE_20K2 0x000b #define PCI_SUBDEVICE_ID_CREATIVE_SB0760 0x0024 #define PCI_SUBDEVICE_ID_CREATIVE_SB08801 0x0041 #define PCI_SUBDEVICE_ID_CREATIVE_SB08802 0x0042 #define PCI_SUBDEVICE_ID_CREATIVE_SB08803 0x0043 #define PCI_SUBDEVICE_ID_CREATIVE_SB1270 0x0062 #define PCI_SUBDEVICE_ID_CREATIVE_HENDRIX 0x6000 #define PCI_VENDOR_ID_ECTIVA 0x1102 /* duplicate: CREATIVE */ #define PCI_DEVICE_ID_ECTIVA_EV1938 0x8938 #define PCI_VENDOR_ID_TTI 0x1103 #define PCI_DEVICE_ID_TTI_HPT343 0x0003 #define PCI_DEVICE_ID_TTI_HPT366 0x0004 #define PCI_DEVICE_ID_TTI_HPT372 0x0005 #define PCI_DEVICE_ID_TTI_HPT302 0x0006 #define PCI_DEVICE_ID_TTI_HPT371 0x0007 #define PCI_DEVICE_ID_TTI_HPT374 0x0008 #define PCI_DEVICE_ID_TTI_HPT372N 0x0009 /* apparently a 372N variant? */ #define PCI_VENDOR_ID_SIGMA 0x1105 #define PCI_VENDOR_ID_VIA 0x1106 #define PCI_DEVICE_ID_VIA_8763_0 0x0198 #define PCI_DEVICE_ID_VIA_8380_0 0x0204 #define PCI_DEVICE_ID_VIA_3238_0 0x0238 #define PCI_DEVICE_ID_VIA_PT880 0x0258 #define PCI_DEVICE_ID_VIA_PT880ULTRA 0x0308 #define PCI_DEVICE_ID_VIA_PX8X0_0 0x0259 #define PCI_DEVICE_ID_VIA_3269_0 0x0269 #define PCI_DEVICE_ID_VIA_K8T800PRO_0 0x0282 #define PCI_DEVICE_ID_VIA_3296_0 0x0296 #define PCI_DEVICE_ID_VIA_8363_0 0x0305 #define PCI_DEVICE_ID_VIA_P4M800CE 0x0314 #define PCI_DEVICE_ID_VIA_P4M890 0x0327 #define PCI_DEVICE_ID_VIA_VT3324 0x0324 #define PCI_DEVICE_ID_VIA_VT3336 0x0336 #define PCI_DEVICE_ID_VIA_VT3351 0x0351 #define PCI_DEVICE_ID_VIA_VT3364 0x0364 #define PCI_DEVICE_ID_VIA_8371_0 0x0391 #define PCI_DEVICE_ID_VIA_6415 0x0415 #define PCI_DEVICE_ID_VIA_8501_0 0x0501 #define PCI_DEVICE_ID_VIA_82C561 0x0561 #define PCI_DEVICE_ID_VIA_82C586_1 0x0571 #define PCI_DEVICE_ID_VIA_82C576 0x0576 #define PCI_DEVICE_ID_VIA_82C586_0 0x0586 #define PCI_DEVICE_ID_VIA_82C596 0x0596 #define PCI_DEVICE_ID_VIA_82C597_0 0x0597 #define PCI_DEVICE_ID_VIA_82C598_0 0x0598 #define PCI_DEVICE_ID_VIA_8601_0 0x0601 #define PCI_DEVICE_ID_VIA_8605_0 0x0605 #define PCI_DEVICE_ID_VIA_82C686 0x0686 #define PCI_DEVICE_ID_VIA_82C691_0 0x0691 #define PCI_DEVICE_ID_VIA_82C576_1 0x1571 #define PCI_DEVICE_ID_VIA_82C586_2 0x3038 #define PCI_DEVICE_ID_VIA_82C586_3 0x3040 #define PCI_DEVICE_ID_VIA_82C596_3 0x3050 #define PCI_DEVICE_ID_VIA_82C596B_3 0x3051 #define PCI_DEVICE_ID_VIA_82C686_4 0x3057 #define PCI_DEVICE_ID_VIA_82C686_5 0x3058 #define PCI_DEVICE_ID_VIA_8233_5 0x3059 #define PCI_DEVICE_ID_VIA_8233_0 0x3074 #define PCI_DEVICE_ID_VIA_8633_0 0x3091 #define PCI_DEVICE_ID_VIA_8367_0 0x3099 #define PCI_DEVICE_ID_VIA_8653_0 0x3101 #define PCI_DEVICE_ID_VIA_8622 0x3102 #define PCI_DEVICE_ID_VIA_8235_USB_2 0x3104 #define PCI_DEVICE_ID_VIA_8233C_0 0x3109 #define PCI_DEVICE_ID_VIA_8361 0x3112 #define PCI_DEVICE_ID_VIA_XM266 0x3116 #define PCI_DEVICE_ID_VIA_612X 0x3119 #define PCI_DEVICE_ID_VIA_862X_0 0x3123 #define PCI_DEVICE_ID_VIA_8753_0 0x3128 #define PCI_DEVICE_ID_VIA_8233A 0x3147 #define PCI_DEVICE_ID_VIA_8703_51_0 0x3148 #define PCI_DEVICE_ID_VIA_8237_SATA 0x3149 #define PCI_DEVICE_ID_VIA_XN266 0x3156 #define PCI_DEVICE_ID_VIA_6410 0x3164 #define PCI_DEVICE_ID_VIA_8754C_0 0x3168 #define PCI_DEVICE_ID_VIA_8235 0x3177 #define PCI_DEVICE_ID_VIA_8385_0 0x3188 #define PCI_DEVICE_ID_VIA_8377_0 0x3189 #define PCI_DEVICE_ID_VIA_8378_0 0x3205 #define PCI_DEVICE_ID_VIA_8783_0 0x3208 #define PCI_DEVICE_ID_VIA_8237 0x3227 #define PCI_DEVICE_ID_VIA_8251 0x3287 #define PCI_DEVICE_ID_VIA_8261 0x3402 #define PCI_DEVICE_ID_VIA_8237A 0x3337 #define PCI_DEVICE_ID_VIA_8237S 0x3372 #define PCI_DEVICE_ID_VIA_SATA_EIDE 0x5324 #define PCI_DEVICE_ID_VIA_8231 0x8231 #define PCI_DEVICE_ID_VIA_8231_4 0x8235 #define PCI_DEVICE_ID_VIA_8365_1 0x8305 #define PCI_DEVICE_ID_VIA_CX700 0x8324 #define PCI_DEVICE_ID_VIA_CX700_IDE 0x0581 #define PCI_DEVICE_ID_VIA_VX800 0x8353 #define PCI_DEVICE_ID_VIA_VX855 0x8409 #define PCI_DEVICE_ID_VIA_VX900 0x8410 #define PCI_DEVICE_ID_VIA_8371_1 0x8391 #define PCI_DEVICE_ID_VIA_82C598_1 0x8598 #define PCI_DEVICE_ID_VIA_838X_1 0xB188 #define PCI_DEVICE_ID_VIA_83_87XX_1 0xB198 #define PCI_DEVICE_ID_VIA_VX855_IDE 0xC409 #define PCI_DEVICE_ID_VIA_ANON 0xFFFF #define PCI_VENDOR_ID_SIEMENS 0x110A #define PCI_DEVICE_ID_SIEMENS_DSCC4 0x2102 #define PCI_VENDOR_ID_VORTEX 0x1119 #define PCI_DEVICE_ID_VORTEX_GDT60x0 0x0000 #define PCI_DEVICE_ID_VORTEX_GDT6000B 0x0001 #define PCI_DEVICE_ID_VORTEX_GDT6x10 0x0002 #define PCI_DEVICE_ID_VORTEX_GDT6x20 0x0003 #define PCI_DEVICE_ID_VORTEX_GDT6530 0x0004 #define PCI_DEVICE_ID_VORTEX_GDT6550 0x0005 #define PCI_DEVICE_ID_VORTEX_GDT6x17 0x0006 #define PCI_DEVICE_ID_VORTEX_GDT6x27 0x0007 #define PCI_DEVICE_ID_VORTEX_GDT6537 0x0008 #define PCI_DEVICE_ID_VORTEX_GDT6557 0x0009 #define PCI_DEVICE_ID_VORTEX_GDT6x15 0x000a #define PCI_DEVICE_ID_VORTEX_GDT6x25 0x000b #define PCI_DEVICE_ID_VORTEX_GDT6535 0x000c #define PCI_DEVICE_ID_VORTEX_GDT6555 0x000d #define PCI_DEVICE_ID_VORTEX_GDT6x17RP 0x0100 #define PCI_DEVICE_ID_VORTEX_GDT6x27RP 0x0101 #define PCI_DEVICE_ID_VORTEX_GDT6537RP 0x0102 #define PCI_DEVICE_ID_VORTEX_GDT6557RP 0x0103 #define PCI_DEVICE_ID_VORTEX_GDT6x11RP 0x0104 #define PCI_DEVICE_ID_VORTEX_GDT6x21RP 0x0105 #define PCI_VENDOR_ID_EF 0x111a #define PCI_DEVICE_ID_EF_ATM_FPGA 0x0000 #define PCI_DEVICE_ID_EF_ATM_ASIC 0x0002 #define PCI_DEVICE_ID_EF_ATM_LANAI2 0x0003 #define PCI_DEVICE_ID_EF_ATM_LANAIHB 0x0005 #define PCI_VENDOR_ID_IDT 0x111d #define PCI_DEVICE_ID_IDT_IDT77201 0x0001 #define PCI_VENDOR_ID_FORE 0x1127 #define PCI_DEVICE_ID_FORE_PCA200E 0x0300 #define PCI_VENDOR_ID_PHILIPS 0x1131 #define PCI_DEVICE_ID_PHILIPS_SAA7146 0x7146 #define PCI_DEVICE_ID_PHILIPS_SAA9730 0x9730 #define PCI_VENDOR_ID_EICON 0x1133 #define PCI_DEVICE_ID_EICON_DIVA20 0xe002 #define PCI_DEVICE_ID_EICON_DIVA20_U 0xe004 #define PCI_DEVICE_ID_EICON_DIVA201 0xe005 #define PCI_DEVICE_ID_EICON_DIVA202 0xe00b #define PCI_DEVICE_ID_EICON_MAESTRA 0xe010 #define PCI_DEVICE_ID_EICON_MAESTRAQ 0xe012 #define PCI_DEVICE_ID_EICON_MAESTRAQ_U 0xe013 #define PCI_DEVICE_ID_EICON_MAESTRAP 0xe014 #define PCI_VENDOR_ID_CISCO 0x1137 #define PCI_VENDOR_ID_ZIATECH 0x1138 #define PCI_DEVICE_ID_ZIATECH_5550_HC 0x5550 #define PCI_VENDOR_ID_SYSKONNECT 0x1148 #define PCI_DEVICE_ID_SYSKONNECT_TR 0x4200 #define PCI_DEVICE_ID_SYSKONNECT_GE 0x4300 #define PCI_DEVICE_ID_SYSKONNECT_YU 0x4320 #define PCI_DEVICE_ID_SYSKONNECT_9DXX 0x4400 #define PCI_DEVICE_ID_SYSKONNECT_9MXX 0x4500 #define PCI_VENDOR_ID_DIGI 0x114f #define PCI_DEVICE_ID_DIGI_DF_M_IOM2_E 0x0070 #define PCI_DEVICE_ID_DIGI_DF_M_E 0x0071 #define PCI_DEVICE_ID_DIGI_DF_M_IOM2_A 0x0072 #define PCI_DEVICE_ID_DIGI_DF_M_A 0x0073 #define PCI_DEVICE_ID_DIGI_NEO_8 0x00B1 #define PCI_DEVICE_ID_NEO_2DB9 0x00C8 #define PCI_DEVICE_ID_NEO_2DB9PRI 0x00C9 #define PCI_DEVICE_ID_NEO_2RJ45 0x00CA #define PCI_DEVICE_ID_NEO_2RJ45PRI 0x00CB #define PCIE_DEVICE_ID_NEO_4_IBM 0x00F4 #define PCI_VENDOR_ID_XIRCOM 0x115d #define PCI_DEVICE_ID_XIRCOM_RBM56G 0x0101 #define PCI_DEVICE_ID_XIRCOM_X3201_MDM 0x0103 #define PCI_VENDOR_ID_SERVERWORKS 0x1166 #define PCI_DEVICE_ID_SERVERWORKS_HE 0x0008 #define PCI_DEVICE_ID_SERVERWORKS_LE 0x0009 #define PCI_DEVICE_ID_SERVERWORKS_GCNB_LE 0x0017 #define PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB 0x0036 #define PCI_DEVICE_ID_SERVERWORKS_EPB 0x0103 #define PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE 0x0132 #define PCI_DEVICE_ID_SERVERWORKS_OSB4 0x0200 #define PCI_DEVICE_ID_SERVERWORKS_CSB5 0x0201 #define PCI_DEVICE_ID_SERVERWORKS_CSB6 0x0203 #define PCI_DEVICE_ID_SERVERWORKS_HT1000SB 0x0205 #define PCI_DEVICE_ID_SERVERWORKS_OSB4IDE 0x0211 #define PCI_DEVICE_ID_SERVERWORKS_CSB5IDE 0x0212 #define PCI_DEVICE_ID_SERVERWORKS_CSB6IDE 0x0213 #define PCI_DEVICE_ID_SERVERWORKS_HT1000IDE 0x0214 #define PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2 0x0217 #define PCI_DEVICE_ID_SERVERWORKS_CSB6LPC 0x0227 #define PCI_DEVICE_ID_SERVERWORKS_HT1100LD 0x0408 #define PCI_VENDOR_ID_ALTERA 0x1172 #define PCI_VENDOR_ID_SBE 0x1176 #define PCI_DEVICE_ID_SBE_WANXL100 0x0301 #define PCI_DEVICE_ID_SBE_WANXL200 0x0302 #define PCI_DEVICE_ID_SBE_WANXL400 0x0104 #define PCI_SUBDEVICE_ID_SBE_T3E3 0x0009 #define PCI_SUBDEVICE_ID_SBE_2T3E3_P0 0x0901 #define PCI_SUBDEVICE_ID_SBE_2T3E3_P1 0x0902 #define PCI_VENDOR_ID_TOSHIBA 0x1179 #define PCI_DEVICE_ID_TOSHIBA_PICCOLO_1 0x0101 #define PCI_DEVICE_ID_TOSHIBA_PICCOLO_2 0x0102 #define PCI_DEVICE_ID_TOSHIBA_PICCOLO_3 0x0103 #define PCI_DEVICE_ID_TOSHIBA_PICCOLO_5 0x0105 #define PCI_DEVICE_ID_TOSHIBA_TOPIC95 0x060a #define PCI_DEVICE_ID_TOSHIBA_TOPIC97 0x060f #define PCI_DEVICE_ID_TOSHIBA_TOPIC100 0x0617 #define PCI_VENDOR_ID_TOSHIBA_2 0x102f #define PCI_DEVICE_ID_TOSHIBA_TC35815CF 0x0030 #define PCI_DEVICE_ID_TOSHIBA_TC35815_NWU 0x0031 #define PCI_DEVICE_ID_TOSHIBA_TC35815_TX4939 0x0032 #define PCI_DEVICE_ID_TOSHIBA_TC86C001_IDE 0x0105 #define PCI_DEVICE_ID_TOSHIBA_TC86C001_MISC 0x0108 #define PCI_DEVICE_ID_TOSHIBA_SPIDER_NET 0x01b3 #define PCI_VENDOR_ID_ATTO 0x117c #define PCI_VENDOR_ID_RICOH 0x1180 #define PCI_DEVICE_ID_RICOH_RL5C465 0x0465 #define PCI_DEVICE_ID_RICOH_RL5C466 0x0466 #define PCI_DEVICE_ID_RICOH_RL5C475 0x0475 #define PCI_DEVICE_ID_RICOH_RL5C476 0x0476 #define PCI_DEVICE_ID_RICOH_RL5C478 0x0478 #define PCI_DEVICE_ID_RICOH_R5C822 0x0822 #define PCI_DEVICE_ID_RICOH_R5CE822 0xe822 #define PCI_DEVICE_ID_RICOH_R5CE823 0xe823 #define PCI_DEVICE_ID_RICOH_R5C832 0x0832 #define PCI_DEVICE_ID_RICOH_R5C843 0x0843 #define PCI_VENDOR_ID_DLINK 0x1186 #define PCI_DEVICE_ID_DLINK_DGE510T 0x4c00 #define PCI_VENDOR_ID_ARTOP 0x1191 #define PCI_DEVICE_ID_ARTOP_ATP850UF 0x0005 #define PCI_DEVICE_ID_ARTOP_ATP860 0x0006 #define PCI_DEVICE_ID_ARTOP_ATP860R 0x0007 #define PCI_DEVICE_ID_ARTOP_ATP865 0x0008 #define PCI_DEVICE_ID_ARTOP_ATP865R 0x0009 #define PCI_DEVICE_ID_ARTOP_ATP867A 0x000A #define PCI_DEVICE_ID_ARTOP_ATP867B 0x000B #define PCI_DEVICE_ID_ARTOP_AEC7610 0x8002 #define PCI_DEVICE_ID_ARTOP_AEC7612UW 0x8010 #define PCI_DEVICE_ID_ARTOP_AEC7612U 0x8020 #define PCI_DEVICE_ID_ARTOP_AEC7612S 0x8030 #define PCI_DEVICE_ID_ARTOP_AEC7612D 0x8040 #define PCI_DEVICE_ID_ARTOP_AEC7612SUW 0x8050 #define PCI_DEVICE_ID_ARTOP_8060 0x8060 #define PCI_VENDOR_ID_ZEITNET 0x1193 #define PCI_DEVICE_ID_ZEITNET_1221 0x0001 #define PCI_DEVICE_ID_ZEITNET_1225 0x0002 #define PCI_VENDOR_ID_FUJITSU_ME 0x119e #define PCI_DEVICE_ID_FUJITSU_FS155 0x0001 #define PCI_DEVICE_ID_FUJITSU_FS50 0x0003 #define PCI_SUBVENDOR_ID_KEYSPAN 0x11a9 #define PCI_SUBDEVICE_ID_KEYSPAN_SX2 0x5334 #define PCI_VENDOR_ID_MARVELL 0x11ab #define PCI_VENDOR_ID_MARVELL_EXT 0x1b4b #define PCI_DEVICE_ID_MARVELL_GT64111 0x4146 #define PCI_DEVICE_ID_MARVELL_GT64260 0x6430 #define PCI_DEVICE_ID_MARVELL_MV64360 0x6460 #define PCI_DEVICE_ID_MARVELL_MV64460 0x6480 #define PCI_DEVICE_ID_MARVELL_88ALP01_NAND 0x4100 #define PCI_DEVICE_ID_MARVELL_88ALP01_SD 0x4101 #define PCI_DEVICE_ID_MARVELL_88ALP01_CCIC 0x4102 #define PCI_VENDOR_ID_V3 0x11b0 #define PCI_DEVICE_ID_V3_V960 0x0001 #define PCI_DEVICE_ID_V3_V351 0x0002 #define PCI_VENDOR_ID_ATT 0x11c1 #define PCI_DEVICE_ID_ATT_VENUS_MODEM 0x480 #define PCI_VENDOR_ID_SPECIALIX 0x11cb #define PCI_SUBDEVICE_ID_SPECIALIX_SPEED4 0xa004 #define PCI_VENDOR_ID_ANALOG_DEVICES 0x11d4 #define PCI_DEVICE_ID_AD1889JS 0x1889 #define PCI_DEVICE_ID_SEGA_BBA 0x1234 #define PCI_VENDOR_ID_ZORAN 0x11de #define PCI_DEVICE_ID_ZORAN_36057 0x6057 #define PCI_DEVICE_ID_ZORAN_36120 0x6120 #define PCI_VENDOR_ID_COMPEX 0x11f6 #define PCI_DEVICE_ID_COMPEX_ENET100VG4 0x0112 #define PCI_VENDOR_ID_PMC_Sierra 0x11f8 #define PCI_VENDOR_ID_MICROSEMI 0x11f8 #define PCI_VENDOR_ID_RP 0x11fe #define PCI_DEVICE_ID_RP32INTF 0x0001 #define PCI_DEVICE_ID_RP8INTF 0x0002 #define PCI_DEVICE_ID_RP16INTF 0x0003 #define PCI_DEVICE_ID_RP4QUAD 0x0004 #define PCI_DEVICE_ID_RP8OCTA 0x0005 #define PCI_DEVICE_ID_RP8J 0x0006 #define PCI_DEVICE_ID_RP4J 0x0007 #define PCI_DEVICE_ID_RP8SNI 0x0008 #define PCI_DEVICE_ID_RP16SNI 0x0009 #define PCI_DEVICE_ID_RPP4 0x000A #define PCI_DEVICE_ID_RPP8 0x000B #define PCI_DEVICE_ID_RP4M 0x000D #define PCI_DEVICE_ID_RP2_232 0x000E #define PCI_DEVICE_ID_RP2_422 0x000F #define PCI_DEVICE_ID_URP32INTF 0x0801 #define PCI_DEVICE_ID_URP8INTF 0x0802 #define PCI_DEVICE_ID_URP16INTF 0x0803 #define PCI_DEVICE_ID_URP8OCTA 0x0805 #define PCI_DEVICE_ID_UPCI_RM3_8PORT 0x080C #define PCI_DEVICE_ID_UPCI_RM3_4PORT 0x080D #define PCI_DEVICE_ID_CRP16INTF 0x0903 #define PCI_VENDOR_ID_CYCLADES 0x120e #define PCI_DEVICE_ID_CYCLOM_Y_Lo 0x0100 #define PCI_DEVICE_ID_CYCLOM_Y_Hi 0x0101 #define PCI_DEVICE_ID_CYCLOM_4Y_Lo 0x0102 #define PCI_DEVICE_ID_CYCLOM_4Y_Hi 0x0103 #define PCI_DEVICE_ID_CYCLOM_8Y_Lo 0x0104 #define PCI_DEVICE_ID_CYCLOM_8Y_Hi 0x0105 #define PCI_DEVICE_ID_CYCLOM_Z_Lo 0x0200 #define PCI_DEVICE_ID_CYCLOM_Z_Hi 0x0201 #define PCI_DEVICE_ID_PC300_RX_2 0x0300 #define PCI_DEVICE_ID_PC300_RX_1 0x0301 #define PCI_DEVICE_ID_PC300_TE_2 0x0310 #define PCI_DEVICE_ID_PC300_TE_1 0x0311 #define PCI_DEVICE_ID_PC300_TE_M_2 0x0320 #define PCI_DEVICE_ID_PC300_TE_M_1 0x0321 #define PCI_VENDOR_ID_ESSENTIAL 0x120f #define PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER 0x0001 #define PCI_VENDOR_ID_O2 0x1217 #define PCI_DEVICE_ID_O2_6729 0x6729 #define PCI_DEVICE_ID_O2_6730 0x673a #define PCI_DEVICE_ID_O2_6832 0x6832 #define PCI_DEVICE_ID_O2_6836 0x6836 #define PCI_DEVICE_ID_O2_6812 0x6872 #define PCI_DEVICE_ID_O2_6933 0x6933 #define PCI_DEVICE_ID_O2_8120 0x8120 #define PCI_DEVICE_ID_O2_8220 0x8220 #define PCI_DEVICE_ID_O2_8221 0x8221 #define PCI_DEVICE_ID_O2_8320 0x8320 #define PCI_DEVICE_ID_O2_8321 0x8321 #define PCI_VENDOR_ID_3DFX 0x121a #define PCI_DEVICE_ID_3DFX_VOODOO 0x0001 #define PCI_DEVICE_ID_3DFX_VOODOO2 0x0002 #define PCI_DEVICE_ID_3DFX_BANSHEE 0x0003 #define PCI_DEVICE_ID_3DFX_VOODOO3 0x0005 #define PCI_DEVICE_ID_3DFX_VOODOO5 0x0009 #define PCI_VENDOR_ID_AVM 0x1244 #define PCI_DEVICE_ID_AVM_B1 0x0700 #define PCI_DEVICE_ID_AVM_C4 0x0800 #define PCI_DEVICE_ID_AVM_A1 0x0a00 #define PCI_DEVICE_ID_AVM_A1_V2 0x0e00 #define PCI_DEVICE_ID_AVM_C2 0x1100 #define PCI_DEVICE_ID_AVM_T1 0x1200 #define PCI_VENDOR_ID_STALLION 0x124d /* Allied Telesyn */ #define PCI_VENDOR_ID_AT 0x1259 #define PCI_SUBDEVICE_ID_AT_2700FX 0x2701 #define PCI_SUBDEVICE_ID_AT_2701FX 0x2703 #define PCI_VENDOR_ID_ESS 0x125d #define PCI_DEVICE_ID_ESS_ESS1968 0x1968 #define PCI_DEVICE_ID_ESS_ESS1978 0x1978 #define PCI_DEVICE_ID_ESS_ALLEGRO_1 0x1988 #define PCI_DEVICE_ID_ESS_ALLEGRO 0x1989 #define PCI_DEVICE_ID_ESS_CANYON3D_2LE 0x1990 #define PCI_DEVICE_ID_ESS_CANYON3D_2 0x1992 #define PCI_DEVICE_ID_ESS_MAESTRO3 0x1998 #define PCI_DEVICE_ID_ESS_MAESTRO3_1 0x1999 #define PCI_DEVICE_ID_ESS_MAESTRO3_HW 0x199a #define PCI_DEVICE_ID_ESS_MAESTRO3_2 0x199b #define PCI_VENDOR_ID_SATSAGEM 0x1267 #define PCI_DEVICE_ID_SATSAGEM_NICCY 0x1016 #define PCI_VENDOR_ID_ENSONIQ 0x1274 #define PCI_DEVICE_ID_ENSONIQ_CT5880 0x5880 #define PCI_DEVICE_ID_ENSONIQ_ES1370 0x5000 #define PCI_DEVICE_ID_ENSONIQ_ES1371 0x1371 #define PCI_VENDOR_ID_TRANSMETA 0x1279 #define PCI_DEVICE_ID_EFFICEON 0x0060 #define PCI_VENDOR_ID_ROCKWELL 0x127A #define PCI_VENDOR_ID_ITE 0x1283 #define PCI_DEVICE_ID_ITE_8172 0x8172 #define PCI_DEVICE_ID_ITE_8211 0x8211 #define PCI_DEVICE_ID_ITE_8212 0x8212 #define PCI_DEVICE_ID_ITE_8213 0x8213 #define PCI_DEVICE_ID_ITE_8152 0x8152 #define PCI_DEVICE_ID_ITE_8872 0x8872 #define PCI_DEVICE_ID_ITE_IT8330G_0 0xe886 /* formerly Platform Tech */ #define PCI_DEVICE_ID_ESS_ESS0100 0x0100 #define PCI_VENDOR_ID_ALTEON 0x12ae #define PCI_SUBVENDOR_ID_CONNECT_TECH 0x12c4 #define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_232 0x0001 #define PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_232 0x0002 #define PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_232 0x0003 #define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485 0x0004 #define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485_4_4 0x0005 #define PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_485 0x0006 #define PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_485_2_2 0x0007 #define PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_485 0x0008 #define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485_2_6 0x0009 #define PCI_SUBDEVICE_ID_CONNECT_TECH_BH081101V1 0x000A #define PCI_SUBDEVICE_ID_CONNECT_TECH_BH041101V1 0x000B #define PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_20MHZ 0x000C #define PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_PTM 0x000D #define PCI_SUBDEVICE_ID_CONNECT_TECH_NT960PCI 0x0100 #define PCI_SUBDEVICE_ID_CONNECT_TECH_TITAN_2 0x0201 #define PCI_SUBDEVICE_ID_CONNECT_TECH_TITAN_4 0x0202 #define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_232 0x0300 #define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_232 0x0301 #define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_232 0x0302 #define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_1_1 0x0310 #define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_2 0x0311 #define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_4 0x0312 #define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2 0x0320 #define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4 0x0321 #define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8 0x0322 #define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_485 0x0330 #define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_485 0x0331 #define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_485 0x0332 #define PCI_VENDOR_ID_NVIDIA_SGS 0x12d2 #define PCI_DEVICE_ID_NVIDIA_SGS_RIVA128 0x0018 #define PCI_VENDOR_ID_PERICOM 0x12D8 #define PCI_DEVICE_ID_PERICOM_PI7C9X7951 0x7951 #define PCI_DEVICE_ID_PERICOM_PI7C9X7952 0x7952 #define PCI_DEVICE_ID_PERICOM_PI7C9X7954 0x7954 #define PCI_DEVICE_ID_PERICOM_PI7C9X7958 0x7958 #define PCI_SUBVENDOR_ID_CHASE_PCIFAST 0x12E0 #define PCI_SUBDEVICE_ID_CHASE_PCIFAST4 0x0031 #define PCI_SUBDEVICE_ID_CHASE_PCIFAST8 0x0021 #define PCI_SUBDEVICE_ID_CHASE_PCIFAST16 0x0011 #define PCI_SUBDEVICE_ID_CHASE_PCIFAST16FMC 0x0041 #define PCI_SUBVENDOR_ID_CHASE_PCIRAS 0x124D #define PCI_SUBDEVICE_ID_CHASE_PCIRAS4 0xF001 #define PCI_SUBDEVICE_ID_CHASE_PCIRAS8 0xF010 #define PCI_VENDOR_ID_AUREAL 0x12eb #define PCI_DEVICE_ID_AUREAL_VORTEX_1 0x0001 #define PCI_DEVICE_ID_AUREAL_VORTEX_2 0x0002 #define PCI_DEVICE_ID_AUREAL_ADVANTAGE 0x0003 #define PCI_VENDOR_ID_ELECTRONICDESIGNGMBH 0x12f8 #define PCI_DEVICE_ID_LML_33R10 0x8a02 #define PCI_VENDOR_ID_ESDGMBH 0x12fe #define PCI_DEVICE_ID_ESDGMBH_CPCIASIO4 0x0111 #define PCI_VENDOR_ID_CB 0x1307 /* Measurement Computing */ #define PCI_VENDOR_ID_SIIG 0x131f #define PCI_SUBVENDOR_ID_SIIG 0x131f #define PCI_DEVICE_ID_SIIG_1S_10x_550 0x1000 #define PCI_DEVICE_ID_SIIG_1S_10x_650 0x1001 #define PCI_DEVICE_ID_SIIG_1S_10x_850 0x1002 #define PCI_DEVICE_ID_SIIG_1S1P_10x_550 0x1010 #define PCI_DEVICE_ID_SIIG_1S1P_10x_650 0x1011 #define PCI_DEVICE_ID_SIIG_1S1P_10x_850 0x1012 #define PCI_DEVICE_ID_SIIG_1P_10x 0x1020 #define PCI_DEVICE_ID_SIIG_2P_10x 0x1021 #define PCI_DEVICE_ID_SIIG_2S_10x_550 0x1030 #define PCI_DEVICE_ID_SIIG_2S_10x_650 0x1031 #define PCI_DEVICE_ID_SIIG_2S_10x_850 0x1032 #define PCI_DEVICE_ID_SIIG_2S1P_10x_550 0x1034 #define PCI_DEVICE_ID_SIIG_2S1P_10x_650 0x1035 #define PCI_DEVICE_ID_SIIG_2S1P_10x_850 0x1036 #define PCI_DEVICE_ID_SIIG_4S_10x_550 0x1050 #define PCI_DEVICE_ID_SIIG_4S_10x_650 0x1051 #define PCI_DEVICE_ID_SIIG_4S_10x_850 0x1052 #define PCI_DEVICE_ID_SIIG_1S_20x_550 0x2000 #define PCI_DEVICE_ID_SIIG_1S_20x_650 0x2001 #define PCI_DEVICE_ID_SIIG_1S_20x_850 0x2002 #define PCI_DEVICE_ID_SIIG_1P_20x 0x2020 #define PCI_DEVICE_ID_SIIG_2P_20x 0x2021 #define PCI_DEVICE_ID_SIIG_2S_20x_550 0x2030 #define PCI_DEVICE_ID_SIIG_2S_20x_650 0x2031 #define PCI_DEVICE_ID_SIIG_2S_20x_850 0x2032 #define PCI_DEVICE_ID_SIIG_2P1S_20x_550 0x2040 #define PCI_DEVICE_ID_SIIG_2P1S_20x_650 0x2041 #define PCI_DEVICE_ID_SIIG_2P1S_20x_850 0x2042 #define PCI_DEVICE_ID_SIIG_1S1P_20x_550 0x2010 #define PCI_DEVICE_ID_SIIG_1S1P_20x_650 0x2011 #define PCI_DEVICE_ID_SIIG_1S1P_20x_850 0x2012 #define PCI_DEVICE_ID_SIIG_4S_20x_550 0x2050 #define PCI_DEVICE_ID_SIIG_4S_20x_650 0x2051 #define PCI_DEVICE_ID_SIIG_4S_20x_850 0x2052 #define PCI_DEVICE_ID_SIIG_2S1P_20x_550 0x2060 #define PCI_DEVICE_ID_SIIG_2S1P_20x_650 0x2061 #define PCI_DEVICE_ID_SIIG_2S1P_20x_850 0x2062 #define PCI_DEVICE_ID_SIIG_8S_20x_550 0x2080 #define PCI_DEVICE_ID_SIIG_8S_20x_650 0x2081 #define PCI_DEVICE_ID_SIIG_8S_20x_850 0x2082 #define PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL 0x2050 #define PCI_VENDOR_ID_RADISYS 0x1331 #define PCI_VENDOR_ID_MICRO_MEMORY 0x1332 #define PCI_DEVICE_ID_MICRO_MEMORY_5415CN 0x5415 #define PCI_DEVICE_ID_MICRO_MEMORY_5425CN 0x5425 #define PCI_DEVICE_ID_MICRO_MEMORY_6155 0x6155 #define PCI_VENDOR_ID_DOMEX 0x134a #define PCI_DEVICE_ID_DOMEX_DMX3191D 0x0001 #define PCI_VENDOR_ID_INTASHIELD 0x135a #define PCI_DEVICE_ID_INTASHIELD_IS200 0x0d80 #define PCI_DEVICE_ID_INTASHIELD_IS400 0x0dc0 #define PCI_VENDOR_ID_QUATECH 0x135C #define PCI_DEVICE_ID_QUATECH_QSC100 0x0010 #define PCI_DEVICE_ID_QUATECH_DSC100 0x0020 #define PCI_DEVICE_ID_QUATECH_DSC200 0x0030 #define PCI_DEVICE_ID_QUATECH_QSC200 0x0040 #define PCI_DEVICE_ID_QUATECH_ESC100D 0x0050 #define PCI_DEVICE_ID_QUATECH_ESC100M 0x0060 #define PCI_DEVICE_ID_QUATECH_QSCP100 0x0120 #define PCI_DEVICE_ID_QUATECH_DSCP100 0x0130 #define PCI_DEVICE_ID_QUATECH_QSCP200 0x0140 #define PCI_DEVICE_ID_QUATECH_DSCP200 0x0150 #define PCI_DEVICE_ID_QUATECH_QSCLP100 0x0170 #define PCI_DEVICE_ID_QUATECH_DSCLP100 0x0180 #define PCI_DEVICE_ID_QUATECH_DSC100E 0x0181 #define PCI_DEVICE_ID_QUATECH_SSCLP100 0x0190 #define PCI_DEVICE_ID_QUATECH_QSCLP200 0x01A0 #define PCI_DEVICE_ID_QUATECH_DSCLP200 0x01B0 #define PCI_DEVICE_ID_QUATECH_DSC200E 0x01B1 #define PCI_DEVICE_ID_QUATECH_SSCLP200 0x01C0 #define PCI_DEVICE_ID_QUATECH_ESCLP100 0x01E0 #define PCI_DEVICE_ID_QUATECH_SPPXP_100 0x0278 #define PCI_VENDOR_ID_SEALEVEL 0x135e #define PCI_DEVICE_ID_SEALEVEL_U530 0x7101 #define PCI_DEVICE_ID_SEALEVEL_UCOMM2 0x7201 #define PCI_DEVICE_ID_SEALEVEL_UCOMM422 0x7402 #define PCI_DEVICE_ID_SEALEVEL_UCOMM232 0x7202 #define PCI_DEVICE_ID_SEALEVEL_COMM4 0x7401 #define PCI_DEVICE_ID_SEALEVEL_COMM8 0x7801 #define PCI_DEVICE_ID_SEALEVEL_7803 0x7803 #define PCI_DEVICE_ID_SEALEVEL_UCOMM8 0x7804 #define PCI_VENDOR_ID_HYPERCOPE 0x1365 #define PCI_DEVICE_ID_HYPERCOPE_PLX 0x9050 #define PCI_SUBDEVICE_ID_HYPERCOPE_OLD_ERGO 0x0104 #define PCI_SUBDEVICE_ID_HYPERCOPE_ERGO 0x0106 #define PCI_SUBDEVICE_ID_HYPERCOPE_METRO 0x0107 #define PCI_SUBDEVICE_ID_HYPERCOPE_CHAMP2 0x0108 #define PCI_VENDOR_ID_DIGIGRAM 0x1369 #define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_SERIAL_SUBSYSTEM 0xc001 #define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_CAE_SERIAL_SUBSYSTEM 0xc002 #define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ESE_SERIAL_SUBSYSTEM 0xc021 #define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ESE_CAE_SERIAL_SUBSYSTEM 0xc022 #define PCI_VENDOR_ID_KAWASAKI 0x136b #define PCI_DEVICE_ID_MCHIP_KL5A72002 0xff01 #define PCI_VENDOR_ID_CNET 0x1371 #define PCI_DEVICE_ID_CNET_GIGACARD 0x434e #define PCI_VENDOR_ID_LMC 0x1376 #define PCI_DEVICE_ID_LMC_HSSI 0x0003 #define PCI_DEVICE_ID_LMC_DS3 0x0004 #define PCI_DEVICE_ID_LMC_SSI 0x0005 #define PCI_DEVICE_ID_LMC_T1 0x0006 #define PCI_VENDOR_ID_NETGEAR 0x1385 #define PCI_DEVICE_ID_NETGEAR_GA620 0x620a #define PCI_VENDOR_ID_APPLICOM 0x1389 #define PCI_DEVICE_ID_APPLICOM_PCIGENERIC 0x0001 #define PCI_DEVICE_ID_APPLICOM_PCI2000IBS_CAN 0x0002 #define PCI_DEVICE_ID_APPLICOM_PCI2000PFB 0x0003 #define PCI_VENDOR_ID_MOXA 0x1393 #define PCI_DEVICE_ID_MOXA_RC7000 0x0001 #define PCI_DEVICE_ID_MOXA_CP102 0x1020 #define PCI_DEVICE_ID_MOXA_CP102UL 0x1021 #define PCI_DEVICE_ID_MOXA_CP102U 0x1022 #define PCI_DEVICE_ID_MOXA_C104 0x1040 #define PCI_DEVICE_ID_MOXA_CP104U 0x1041 #define PCI_DEVICE_ID_MOXA_CP104JU 0x1042 #define PCI_DEVICE_ID_MOXA_CP104EL 0x1043 #define PCI_DEVICE_ID_MOXA_CT114 0x1140 #define PCI_DEVICE_ID_MOXA_CP114 0x1141 #define PCI_DEVICE_ID_MOXA_CP118U 0x1180 #define PCI_DEVICE_ID_MOXA_CP118EL 0x1181 #define PCI_DEVICE_ID_MOXA_CP132 0x1320 #define PCI_DEVICE_ID_MOXA_CP132U 0x1321 #define PCI_DEVICE_ID_MOXA_CP134U 0x1340 #define PCI_DEVICE_ID_MOXA_C168 0x1680 #define PCI_DEVICE_ID_MOXA_CP168U 0x1681 #define PCI_DEVICE_ID_MOXA_CP168EL 0x1682 #define PCI_DEVICE_ID_MOXA_CP204J 0x2040 #define PCI_DEVICE_ID_MOXA_C218 0x2180 #define PCI_DEVICE_ID_MOXA_C320 0x3200 #define PCI_VENDOR_ID_CCD 0x1397 #define PCI_DEVICE_ID_CCD_HFC4S 0x08B4 #define PCI_SUBDEVICE_ID_CCD_PMX2S 0x1234 #define PCI_DEVICE_ID_CCD_HFC8S 0x16B8 #define PCI_DEVICE_ID_CCD_2BD0 0x2bd0 #define PCI_DEVICE_ID_CCD_HFCE1 0x30B1 #define PCI_SUBDEVICE_ID_CCD_SPD4S 0x3136 #define PCI_SUBDEVICE_ID_CCD_SPDE1 0x3137 #define PCI_DEVICE_ID_CCD_B000 0xb000 #define PCI_DEVICE_ID_CCD_B006 0xb006 #define PCI_DEVICE_ID_CCD_B007 0xb007 #define PCI_DEVICE_ID_CCD_B008 0xb008 #define PCI_DEVICE_ID_CCD_B009 0xb009 #define PCI_DEVICE_ID_CCD_B00A 0xb00a #define PCI_DEVICE_ID_CCD_B00B 0xb00b #define PCI_DEVICE_ID_CCD_B00C 0xb00c #define PCI_DEVICE_ID_CCD_B100 0xb100 #define PCI_SUBDEVICE_ID_CCD_IOB4ST 0xB520 #define PCI_SUBDEVICE_ID_CCD_IOB8STR 0xB521 #define PCI_SUBDEVICE_ID_CCD_IOB8ST 0xB522 #define PCI_SUBDEVICE_ID_CCD_IOB1E1 0xB523 #define PCI_SUBDEVICE_ID_CCD_SWYX4S 0xB540 #define PCI_SUBDEVICE_ID_CCD_JH4S20 0xB550 #define PCI_SUBDEVICE_ID_CCD_IOB8ST_1 0xB552 #define PCI_SUBDEVICE_ID_CCD_JHSE1 0xB553 #define PCI_SUBDEVICE_ID_CCD_JH8S 0xB55B #define PCI_SUBDEVICE_ID_CCD_BN4S 0xB560 #define PCI_SUBDEVICE_ID_CCD_BN8S 0xB562 #define PCI_SUBDEVICE_ID_CCD_BNE1 0xB563 #define PCI_SUBDEVICE_ID_CCD_BNE1D 0xB564 #define PCI_SUBDEVICE_ID_CCD_BNE1DP 0xB565 #define PCI_SUBDEVICE_ID_CCD_BN2S 0xB566 #define PCI_SUBDEVICE_ID_CCD_BN1SM 0xB567 #define PCI_SUBDEVICE_ID_CCD_BN4SM 0xB568 #define PCI_SUBDEVICE_ID_CCD_BN2SM 0xB569 #define PCI_SUBDEVICE_ID_CCD_BNE1M 0xB56A #define PCI_SUBDEVICE_ID_CCD_BN8SP 0xB56B #define PCI_SUBDEVICE_ID_CCD_HFC4S 0xB620 #define PCI_SUBDEVICE_ID_CCD_HFC8S 0xB622 #define PCI_DEVICE_ID_CCD_B700 0xb700 #define PCI_DEVICE_ID_CCD_B701 0xb701 #define PCI_SUBDEVICE_ID_CCD_HFCE1 0xC523 #define PCI_SUBDEVICE_ID_CCD_OV2S 0xE884 #define PCI_SUBDEVICE_ID_CCD_OV4S 0xE888 #define PCI_SUBDEVICE_ID_CCD_OV8S 0xE998 #define PCI_VENDOR_ID_EXAR 0x13a8 #define PCI_DEVICE_ID_EXAR_XR17C152 0x0152 #define PCI_DEVICE_ID_EXAR_XR17C154 0x0154 #define PCI_DEVICE_ID_EXAR_XR17C158 0x0158 #define PCI_DEVICE_ID_EXAR_XR17V352 0x0352 #define PCI_DEVICE_ID_EXAR_XR17V354 0x0354 #define PCI_DEVICE_ID_EXAR_XR17V358 0x0358 #define PCI_VENDOR_ID_MICROGATE 0x13c0 #define PCI_DEVICE_ID_MICROGATE_USC 0x0010 #define PCI_DEVICE_ID_MICROGATE_SCA 0x0030 #define PCI_VENDOR_ID_3WARE 0x13C1 #define PCI_DEVICE_ID_3WARE_1000 0x1000 #define PCI_DEVICE_ID_3WARE_7000 0x1001 #define PCI_DEVICE_ID_3WARE_9000 0x1002 #define PCI_VENDOR_ID_IOMEGA 0x13ca #define PCI_DEVICE_ID_IOMEGA_BUZ 0x4231 #define PCI_VENDOR_ID_ABOCOM 0x13D1 #define PCI_DEVICE_ID_ABOCOM_2BD1 0x2BD1 #define PCI_VENDOR_ID_SUNDANCE 0x13f0 #define PCI_VENDOR_ID_CMEDIA 0x13f6 #define PCI_DEVICE_ID_CMEDIA_CM8338A 0x0100 #define PCI_DEVICE_ID_CMEDIA_CM8338B 0x0101 #define PCI_DEVICE_ID_CMEDIA_CM8738 0x0111 #define PCI_DEVICE_ID_CMEDIA_CM8738B 0x0112 #define PCI_VENDOR_ID_ADVANTECH 0x13fe #define PCI_VENDOR_ID_MEILHAUS 0x1402 #define PCI_VENDOR_ID_LAVA 0x1407 #define PCI_DEVICE_ID_LAVA_DSERIAL 0x0100 /* 2x 16550 */ #define PCI_DEVICE_ID_LAVA_QUATRO_A 0x0101 /* 2x 16550, half of 4 port */ #define PCI_DEVICE_ID_LAVA_QUATRO_B 0x0102 /* 2x 16550, half of 4 port */ #define PCI_DEVICE_ID_LAVA_QUATTRO_A 0x0120 /* 2x 16550A, half of 4 port */ #define PCI_DEVICE_ID_LAVA_QUATTRO_B 0x0121 /* 2x 16550A, half of 4 port */ #define PCI_DEVICE_ID_LAVA_OCTO_A 0x0180 /* 4x 16550A, half of 8 port */ #define PCI_DEVICE_ID_LAVA_OCTO_B 0x0181 /* 4x 16550A, half of 8 port */ #define PCI_DEVICE_ID_LAVA_PORT_PLUS 0x0200 /* 2x 16650 */ #define PCI_DEVICE_ID_LAVA_QUAD_A 0x0201 /* 2x 16650, half of 4 port */ #define PCI_DEVICE_ID_LAVA_QUAD_B 0x0202 /* 2x 16650, half of 4 port */ #define PCI_DEVICE_ID_LAVA_SSERIAL 0x0500 /* 1x 16550 */ #define PCI_DEVICE_ID_LAVA_PORT_650 0x0600 /* 1x 16650 */ #define PCI_DEVICE_ID_LAVA_PARALLEL 0x8000 #define PCI_DEVICE_ID_LAVA_DUAL_PAR_A 0x8002 /* The Lava Dual Parallel is */ #define PCI_DEVICE_ID_LAVA_DUAL_PAR_B 0x8003 /* two PCI devices on a card */ #define PCI_DEVICE_ID_LAVA_BOCA_IOPPAR 0x8800 #define PCI_VENDOR_ID_TIMEDIA 0x1409 #define PCI_DEVICE_ID_TIMEDIA_1889 0x7168 #define PCI_VENDOR_ID_ICE 0x1412 #define PCI_DEVICE_ID_ICE_1712 0x1712 #define PCI_DEVICE_ID_VT1724 0x1724 #define PCI_VENDOR_ID_OXSEMI 0x1415 #define PCI_DEVICE_ID_OXSEMI_12PCI840 0x8403 #define PCI_DEVICE_ID_OXSEMI_PCIe840 0xC000 #define PCI_DEVICE_ID_OXSEMI_PCIe840_G 0xC004 #define PCI_DEVICE_ID_OXSEMI_PCIe952_0 0xC100 #define PCI_DEVICE_ID_OXSEMI_PCIe952_0_G 0xC104 #define PCI_DEVICE_ID_OXSEMI_PCIe952_1 0xC110 #define PCI_DEVICE_ID_OXSEMI_PCIe952_1_G 0xC114 #define PCI_DEVICE_ID_OXSEMI_PCIe952_1_U 0xC118 #define PCI_DEVICE_ID_OXSEMI_PCIe952_1_GU 0xC11C #define PCI_DEVICE_ID_OXSEMI_16PCI954 0x9501 #define PCI_DEVICE_ID_OXSEMI_C950 0x950B #define PCI_DEVICE_ID_OXSEMI_16PCI95N 0x9511 #define PCI_DEVICE_ID_OXSEMI_16PCI954PP 0x9513 #define PCI_DEVICE_ID_OXSEMI_16PCI952 0x9521 #define PCI_DEVICE_ID_OXSEMI_16PCI952PP 0x9523 #define PCI_SUBDEVICE_ID_OXSEMI_C950 0x0001 #define PCI_VENDOR_ID_CHELSIO 0x1425 #define PCI_VENDOR_ID_EDIMAX 0x1432 #define PCI_VENDOR_ID_ADLINK 0x144a #define PCI_VENDOR_ID_SAMSUNG 0x144d #define PCI_VENDOR_ID_GIGABYTE 0x1458 #define PCI_VENDOR_ID_AMBIT 0x1468 #define PCI_VENDOR_ID_MYRICOM 0x14c1 #define PCI_VENDOR_ID_MEDIATEK 0x14c3 #define PCI_DEVICE_ID_MEDIATEK_7629 0x7629 #define PCI_VENDOR_ID_TITAN 0x14D2 #define PCI_DEVICE_ID_TITAN_010L 0x8001 #define PCI_DEVICE_ID_TITAN_100L 0x8010 #define PCI_DEVICE_ID_TITAN_110L 0x8011 #define PCI_DEVICE_ID_TITAN_200L 0x8020 #define PCI_DEVICE_ID_TITAN_210L 0x8021 #define PCI_DEVICE_ID_TITAN_400L 0x8040 #define PCI_DEVICE_ID_TITAN_800L 0x8080 #define PCI_DEVICE_ID_TITAN_100 0xA001 #define PCI_DEVICE_ID_TITAN_200 0xA005 #define PCI_DEVICE_ID_TITAN_400 0xA003 #define PCI_DEVICE_ID_TITAN_800B 0xA004 #define PCI_VENDOR_ID_PANACOM 0x14d4 #define PCI_DEVICE_ID_PANACOM_QUADMODEM 0x0400 #define PCI_DEVICE_ID_PANACOM_DUALMODEM 0x0402 #define PCI_VENDOR_ID_SIPACKETS 0x14d9 #define PCI_DEVICE_ID_SP1011 0x0010 #define PCI_VENDOR_ID_AFAVLAB 0x14db #define PCI_DEVICE_ID_AFAVLAB_P028 0x2180 #define PCI_DEVICE_ID_AFAVLAB_P030 0x2182 #define PCI_SUBDEVICE_ID_AFAVLAB_P061 0x2150 #define PCI_VENDOR_ID_AMPLICON 0x14dc #define PCI_VENDOR_ID_BCM_GVC 0x14a4 #define PCI_VENDOR_ID_BROADCOM 0x14e4 #define PCI_DEVICE_ID_TIGON3_5752 0x1600 #define PCI_DEVICE_ID_TIGON3_5752M 0x1601 #define PCI_DEVICE_ID_NX2_5709 0x1639 #define PCI_DEVICE_ID_NX2_5709S 0x163a #define PCI_DEVICE_ID_TIGON3_5700 0x1644 #define PCI_DEVICE_ID_TIGON3_5701 0x1645 #define PCI_DEVICE_ID_TIGON3_5702 0x1646 #define PCI_DEVICE_ID_TIGON3_5703 0x1647 #define PCI_DEVICE_ID_TIGON3_5704 0x1648 #define PCI_DEVICE_ID_TIGON3_5704S_2 0x1649 #define PCI_DEVICE_ID_NX2_5706 0x164a #define PCI_DEVICE_ID_NX2_5708 0x164c #define PCI_DEVICE_ID_TIGON3_5702FE 0x164d #define PCI_DEVICE_ID_NX2_57710 0x164e #define PCI_DEVICE_ID_NX2_57711 0x164f #define PCI_DEVICE_ID_NX2_57711E 0x1650 #define PCI_DEVICE_ID_TIGON3_5705 0x1653 #define PCI_DEVICE_ID_TIGON3_5705_2 0x1654 #define PCI_DEVICE_ID_TIGON3_5719 0x1657 #define PCI_DEVICE_ID_TIGON3_5721 0x1659 #define PCI_DEVICE_ID_TIGON3_5722 0x165a #define PCI_DEVICE_ID_TIGON3_5723 0x165b #define PCI_DEVICE_ID_TIGON3_5705M 0x165d #define PCI_DEVICE_ID_TIGON3_5705M_2 0x165e #define PCI_DEVICE_ID_NX2_57712 0x1662 #define PCI_DEVICE_ID_NX2_57712E 0x1663 #define PCI_DEVICE_ID_NX2_57712_MF 0x1663 #define PCI_DEVICE_ID_TIGON3_5714 0x1668 #define PCI_DEVICE_ID_TIGON3_5714S 0x1669 #define PCI_DEVICE_ID_TIGON3_5780 0x166a #define PCI_DEVICE_ID_TIGON3_5780S 0x166b #define PCI_DEVICE_ID_TIGON3_5705F 0x166e #define PCI_DEVICE_ID_NX2_57712_VF 0x166f #define PCI_DEVICE_ID_TIGON3_5754M 0x1672 #define PCI_DEVICE_ID_TIGON3_5755M 0x1673 #define PCI_DEVICE_ID_TIGON3_5756 0x1674 #define PCI_DEVICE_ID_TIGON3_5750 0x1676 #define PCI_DEVICE_ID_TIGON3_5751 0x1677 #define PCI_DEVICE_ID_TIGON3_5715 0x1678 #define PCI_DEVICE_ID_TIGON3_5715S 0x1679 #define PCI_DEVICE_ID_TIGON3_5754 0x167a #define PCI_DEVICE_ID_TIGON3_5755 0x167b #define PCI_DEVICE_ID_TIGON3_5751M 0x167d #define PCI_DEVICE_ID_TIGON3_5751F 0x167e #define PCI_DEVICE_ID_TIGON3_5787F 0x167f #define PCI_DEVICE_ID_TIGON3_5761E 0x1680 #define PCI_DEVICE_ID_TIGON3_5761 0x1681 #define PCI_DEVICE_ID_TIGON3_5764 0x1684 #define PCI_DEVICE_ID_NX2_57800 0x168a #define PCI_DEVICE_ID_NX2_57840 0x168d #define PCI_DEVICE_ID_NX2_57810 0x168e #define PCI_DEVICE_ID_TIGON3_5787M 0x1693 #define PCI_DEVICE_ID_TIGON3_5782 0x1696 #define PCI_DEVICE_ID_TIGON3_5784 0x1698 #define PCI_DEVICE_ID_TIGON3_5786 0x169a #define PCI_DEVICE_ID_TIGON3_5787 0x169b #define PCI_DEVICE_ID_TIGON3_5788 0x169c #define PCI_DEVICE_ID_TIGON3_5789 0x169d #define PCI_DEVICE_ID_NX2_57840_4_10 0x16a1 #define PCI_DEVICE_ID_NX2_57840_2_20 0x16a2 #define PCI_DEVICE_ID_NX2_57840_MF 0x16a4 #define PCI_DEVICE_ID_NX2_57800_MF 0x16a5 #define PCI_DEVICE_ID_TIGON3_5702X 0x16a6 #define PCI_DEVICE_ID_TIGON3_5703X 0x16a7 #define PCI_DEVICE_ID_TIGON3_5704S 0x16a8 #define PCI_DEVICE_ID_NX2_57800_VF 0x16a9 #define PCI_DEVICE_ID_NX2_5706S 0x16aa #define PCI_DEVICE_ID_NX2_5708S 0x16ac #define PCI_DEVICE_ID_NX2_57840_VF 0x16ad #define PCI_DEVICE_ID_NX2_57810_MF 0x16ae #define PCI_DEVICE_ID_NX2_57810_VF 0x16af #define PCI_DEVICE_ID_TIGON3_5702A3 0x16c6 #define PCI_DEVICE_ID_TIGON3_5703A3 0x16c7 #define PCI_DEVICE_ID_TIGON3_5781 0x16dd #define PCI_DEVICE_ID_TIGON3_5753 0x16f7 #define PCI_DEVICE_ID_TIGON3_5753M 0x16fd #define PCI_DEVICE_ID_TIGON3_5753F 0x16fe #define PCI_DEVICE_ID_TIGON3_5901 0x170d #define PCI_DEVICE_ID_BCM4401B1 0x170c #define PCI_DEVICE_ID_TIGON3_5901_2 0x170e #define PCI_DEVICE_ID_TIGON3_5906 0x1712 #define PCI_DEVICE_ID_TIGON3_5906M 0x1713 #define PCI_DEVICE_ID_BCM4401 0x4401 #define PCI_DEVICE_ID_BCM4401B0 0x4402 #define PCI_VENDOR_ID_TOPIC 0x151f #define PCI_DEVICE_ID_TOPIC_TP560 0x0000 #define PCI_VENDOR_ID_MAINPINE 0x1522 #define PCI_DEVICE_ID_MAINPINE_PBRIDGE 0x0100 #define PCI_VENDOR_ID_ENE 0x1524 #define PCI_DEVICE_ID_ENE_CB710_FLASH 0x0510 #define PCI_DEVICE_ID_ENE_CB712_SD 0x0550 #define PCI_DEVICE_ID_ENE_CB712_SD_2 0x0551 #define PCI_DEVICE_ID_ENE_CB714_SD 0x0750 #define PCI_DEVICE_ID_ENE_CB714_SD_2 0x0751 #define PCI_DEVICE_ID_ENE_1211 0x1211 #define PCI_DEVICE_ID_ENE_1225 0x1225 #define PCI_DEVICE_ID_ENE_1410 0x1410 #define PCI_DEVICE_ID_ENE_710 0x1411 #define PCI_DEVICE_ID_ENE_712 0x1412 #define PCI_DEVICE_ID_ENE_1420 0x1420 #define PCI_DEVICE_ID_ENE_720 0x1421 #define PCI_DEVICE_ID_ENE_722 0x1422 #define PCI_SUBVENDOR_ID_PERLE 0x155f #define PCI_SUBDEVICE_ID_PCI_RAS4 0xf001 #define PCI_SUBDEVICE_ID_PCI_RAS8 0xf010 #define PCI_VENDOR_ID_SYBA 0x1592 #define PCI_DEVICE_ID_SYBA_2P_EPP 0x0782 #define PCI_DEVICE_ID_SYBA_1P_ECP 0x0783 #define PCI_VENDOR_ID_MORETON 0x15aa #define PCI_DEVICE_ID_RASTEL_2PORT 0x2000 #define PCI_VENDOR_ID_VMWARE 0x15ad #define PCI_DEVICE_ID_VMWARE_VMXNET3 0x07b0 #define PCI_VENDOR_ID_ZOLTRIX 0x15b0 #define PCI_DEVICE_ID_ZOLTRIX_2BD0 0x2bd0 #define PCI_VENDOR_ID_MELLANOX 0x15b3 #define PCI_DEVICE_ID_MELLANOX_CONNECTX3 0x1003 #define PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO 0x1007 #define PCI_DEVICE_ID_MELLANOX_CONNECTIB 0x1011 #define PCI_DEVICE_ID_MELLANOX_CONNECTX4 0x1013 #define PCI_DEVICE_ID_MELLANOX_CONNECTX4_LX 0x1015 #define PCI_DEVICE_ID_MELLANOX_TAVOR 0x5a44 #define PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE 0x5a46 #define PCI_DEVICE_ID_MELLANOX_SINAI_OLD 0x5e8c #define PCI_DEVICE_ID_MELLANOX_SINAI 0x6274 #define PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT 0x6278 #define PCI_DEVICE_ID_MELLANOX_ARBEL 0x6282 #define PCI_DEVICE_ID_MELLANOX_HERMON_SDR 0x6340 #define PCI_DEVICE_ID_MELLANOX_HERMON_DDR 0x634a #define PCI_DEVICE_ID_MELLANOX_HERMON_QDR 0x6354 #define PCI_DEVICE_ID_MELLANOX_HERMON_EN 0x6368 #define PCI_DEVICE_ID_MELLANOX_CONNECTX_EN 0x6372 #define PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2 0x6732 #define PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2 0x673c #define PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2 0x6746 #define PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2 0x6750 #define PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2 0x675a #define PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2 0x6764 #define PCI_DEVICE_ID_MELLANOX_CONNECTX2 0x676e #define PCI_VENDOR_ID_DFI 0x15bd #define PCI_VENDOR_ID_QUICKNET 0x15e2 #define PCI_DEVICE_ID_QUICKNET_XJ 0x0500 /* * ADDI-DATA GmbH communication cards <info@addi-data.com> */ #define PCI_VENDOR_ID_ADDIDATA 0x15B8 #define PCI_DEVICE_ID_ADDIDATA_APCI7500 0x7000 #define PCI_DEVICE_ID_ADDIDATA_APCI7420 0x7001 #define PCI_DEVICE_ID_ADDIDATA_APCI7300 0x7002 #define PCI_DEVICE_ID_ADDIDATA_APCI7500_2 0x7009 #define PCI_DEVICE_ID_ADDIDATA_APCI7420_2 0x700A #define PCI_DEVICE_ID_ADDIDATA_APCI7300_2 0x700B #define PCI_DEVICE_ID_ADDIDATA_APCI7500_3 0x700C #define PCI_DEVICE_ID_ADDIDATA_APCI7420_3 0x700D #define PCI_DEVICE_ID_ADDIDATA_APCI7300_3 0x700E #define PCI_DEVICE_ID_ADDIDATA_APCI7800_3 0x700F #define PCI_DEVICE_ID_ADDIDATA_APCIe7300 0x7010 #define PCI_DEVICE_ID_ADDIDATA_APCIe7420 0x7011 #define PCI_DEVICE_ID_ADDIDATA_APCIe7500 0x7012 #define PCI_DEVICE_ID_ADDIDATA_APCIe7800 0x7013 #define PCI_VENDOR_ID_PDC 0x15e9 #define PCI_VENDOR_ID_FARSITE 0x1619 #define PCI_DEVICE_ID_FARSITE_T2P 0x0400 #define PCI_DEVICE_ID_FARSITE_T4P 0x0440 #define PCI_DEVICE_ID_FARSITE_T1U 0x0610 #define PCI_DEVICE_ID_FARSITE_T2U 0x0620 #define PCI_DEVICE_ID_FARSITE_T4U 0x0640 #define PCI_DEVICE_ID_FARSITE_TE1 0x1610 #define PCI_DEVICE_ID_FARSITE_TE1C 0x1612 #define PCI_VENDOR_ID_ARIMA 0x161f #define PCI_VENDOR_ID_BROCADE 0x1657 #define PCI_DEVICE_ID_BROCADE_CT 0x0014 #define PCI_DEVICE_ID_BROCADE_FC_8G1P 0x0017 #define PCI_DEVICE_ID_BROCADE_CT_FC 0x0021 #define PCI_VENDOR_ID_SIBYTE 0x166d #define PCI_DEVICE_ID_BCM1250_PCI 0x0001 #define PCI_DEVICE_ID_BCM1250_HT 0x0002 #define PCI_VENDOR_ID_ATHEROS 0x168c #define PCI_VENDOR_ID_NETCELL 0x169c #define PCI_DEVICE_ID_REVOLUTION 0x0044 #define PCI_VENDOR_ID_CENATEK 0x16CA #define PCI_DEVICE_ID_CENATEK_IDE 0x0001 #define PCI_VENDOR_ID_SYNOPSYS 0x16c3 #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI 0xabce #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31 0xabcf #define PCI_DEVICE_ID_SYNOPSYS_EDDA 0xedda #define PCI_VENDOR_ID_USR 0x16ec #define PCI_VENDOR_ID_VITESSE 0x1725 #define PCI_DEVICE_ID_VITESSE_VSC7174 0x7174 #define PCI_VENDOR_ID_LINKSYS 0x1737 #define PCI_DEVICE_ID_LINKSYS_EG1064 0x1064 #define PCI_VENDOR_ID_ALTIMA 0x173b #define PCI_DEVICE_ID_ALTIMA_AC1000 0x03e8 #define PCI_DEVICE_ID_ALTIMA_AC1001 0x03e9 #define PCI_DEVICE_ID_ALTIMA_AC9100 0x03ea #define PCI_DEVICE_ID_ALTIMA_AC1003 0x03eb #define PCI_VENDOR_ID_CAVIUM 0x177d #define PCI_VENDOR_ID_TECHWELL 0x1797 #define PCI_DEVICE_ID_TECHWELL_6800 0x6800 #define PCI_DEVICE_ID_TECHWELL_6801 0x6801 #define PCI_DEVICE_ID_TECHWELL_6804 0x6804 #define PCI_DEVICE_ID_TECHWELL_6816_1 0x6810 #define PCI_DEVICE_ID_TECHWELL_6816_2 0x6811 #define PCI_DEVICE_ID_TECHWELL_6816_3 0x6812 #define PCI_DEVICE_ID_TECHWELL_6816_4 0x6813 #define PCI_VENDOR_ID_BELKIN 0x1799 #define PCI_DEVICE_ID_BELKIN_F5D7010V7 0x701f #define PCI_VENDOR_ID_RDC 0x17f3 #define PCI_DEVICE_ID_RDC_R6020 0x6020 #define PCI_DEVICE_ID_RDC_R6030 0x6030 #define PCI_DEVICE_ID_RDC_R6040 0x6040 #define PCI_DEVICE_ID_RDC_R6060 0x6060 #define PCI_DEVICE_ID_RDC_R6061 0x6061 #define PCI_DEVICE_ID_RDC_D1010 0x1010 #define PCI_VENDOR_ID_GLI 0x17a0 #define PCI_VENDOR_ID_LENOVO 0x17aa #define PCI_VENDOR_ID_QCOM 0x17cb #define PCI_VENDOR_ID_CDNS 0x17cd #define PCI_VENDOR_ID_ARECA 0x17d3 #define PCI_DEVICE_ID_ARECA_1110 0x1110 #define PCI_DEVICE_ID_ARECA_1120 0x1120 #define PCI_DEVICE_ID_ARECA_1130 0x1130 #define PCI_DEVICE_ID_ARECA_1160 0x1160 #define PCI_DEVICE_ID_ARECA_1170 0x1170 #define PCI_DEVICE_ID_ARECA_1200 0x1200 #define PCI_DEVICE_ID_ARECA_1201 0x1201 #define PCI_DEVICE_ID_ARECA_1202 0x1202 #define PCI_DEVICE_ID_ARECA_1210 0x1210 #define PCI_DEVICE_ID_ARECA_1220 0x1220 #define PCI_DEVICE_ID_ARECA_1230 0x1230 #define PCI_DEVICE_ID_ARECA_1260 0x1260 #define PCI_DEVICE_ID_ARECA_1270 0x1270 #define PCI_DEVICE_ID_ARECA_1280 0x1280 #define PCI_DEVICE_ID_ARECA_1380 0x1380 #define PCI_DEVICE_ID_ARECA_1381 0x1381 #define PCI_DEVICE_ID_ARECA_1680 0x1680 #define PCI_DEVICE_ID_ARECA_1681 0x1681 #define PCI_VENDOR_ID_S2IO 0x17d5 #define PCI_DEVICE_ID_S2IO_WIN 0x5731 #define PCI_DEVICE_ID_S2IO_UNI 0x5831 #define PCI_DEVICE_ID_HERC_WIN 0x5732 #define PCI_DEVICE_ID_HERC_UNI 0x5832 #define PCI_VENDOR_ID_SITECOM 0x182d #define PCI_DEVICE_ID_SITECOM_DC105V2 0x3069 #define PCI_VENDOR_ID_TOPSPIN 0x1867 #define PCI_VENDOR_ID_COMMTECH 0x18f7 #define PCI_VENDOR_ID_SILAN 0x1904 #define PCI_VENDOR_ID_RENESAS 0x1912 #define PCI_DEVICE_ID_RENESAS_SH7781 0x0001 #define PCI_DEVICE_ID_RENESAS_SH7780 0x0002 #define PCI_DEVICE_ID_RENESAS_SH7763 0x0004 #define PCI_DEVICE_ID_RENESAS_SH7785 0x0007 #define PCI_DEVICE_ID_RENESAS_SH7786 0x0010 #define PCI_VENDOR_ID_SOLARFLARE 0x1924 #define PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0 0x0703 #define PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1 0x6703 #define PCI_DEVICE_ID_SOLARFLARE_SFC4000B 0x0710 #define PCI_VENDOR_ID_TDI 0x192E #define PCI_DEVICE_ID_TDI_EHCI 0x0101 #define PCI_VENDOR_ID_FREESCALE 0x1957 /* duplicate: NXP */ #define PCI_VENDOR_ID_NXP 0x1957 /* duplicate: FREESCALE */ #define PCI_DEVICE_ID_MPC8308 0xc006 #define PCI_DEVICE_ID_MPC8315E 0x00b4 #define PCI_DEVICE_ID_MPC8315 0x00b5 #define PCI_DEVICE_ID_MPC8314E 0x00b6 #define PCI_DEVICE_ID_MPC8314 0x00b7 #define PCI_DEVICE_ID_MPC8378E 0x00c4 #define PCI_DEVICE_ID_MPC8378 0x00c5 #define PCI_DEVICE_ID_MPC8377E 0x00c6 #define PCI_DEVICE_ID_MPC8377 0x00c7 #define PCI_DEVICE_ID_MPC8548E 0x0012 #define PCI_DEVICE_ID_MPC8548 0x0013 #define PCI_DEVICE_ID_MPC8543E 0x0014 #define PCI_DEVICE_ID_MPC8543 0x0015 #define PCI_DEVICE_ID_MPC8547E 0x0018 #define PCI_DEVICE_ID_MPC8545E 0x0019 #define PCI_DEVICE_ID_MPC8545 0x001a #define PCI_DEVICE_ID_MPC8569E 0x0061 #define PCI_DEVICE_ID_MPC8569 0x0060 #define PCI_DEVICE_ID_MPC8568E 0x0020 #define PCI_DEVICE_ID_MPC8568 0x0021 #define PCI_DEVICE_ID_MPC8567E 0x0022 #define PCI_DEVICE_ID_MPC8567 0x0023 #define PCI_DEVICE_ID_MPC8533E 0x0030 #define PCI_DEVICE_ID_MPC8533 0x0031 #define PCI_DEVICE_ID_MPC8544E 0x0032 #define PCI_DEVICE_ID_MPC8544 0x0033 #define PCI_DEVICE_ID_MPC8572E 0x0040 #define PCI_DEVICE_ID_MPC8572 0x0041 #define PCI_DEVICE_ID_MPC8536E 0x0050 #define PCI_DEVICE_ID_MPC8536 0x0051 #define PCI_DEVICE_ID_P2020E 0x0070 #define PCI_DEVICE_ID_P2020 0x0071 #define PCI_DEVICE_ID_P2010E 0x0078 #define PCI_DEVICE_ID_P2010 0x0079 #define PCI_DEVICE_ID_P1020E 0x0100 #define PCI_DEVICE_ID_P1020 0x0101 #define PCI_DEVICE_ID_P1021E 0x0102 #define PCI_DEVICE_ID_P1021 0x0103 #define PCI_DEVICE_ID_P1011E 0x0108 #define PCI_DEVICE_ID_P1011 0x0109 #define PCI_DEVICE_ID_P1022E 0x0110 #define PCI_DEVICE_ID_P1022 0x0111 #define PCI_DEVICE_ID_P1013E 0x0118 #define PCI_DEVICE_ID_P1013 0x0119 #define PCI_DEVICE_ID_P4080E 0x0400 #define PCI_DEVICE_ID_P4080 0x0401 #define PCI_DEVICE_ID_P4040E 0x0408 #define PCI_DEVICE_ID_P4040 0x0409 #define PCI_DEVICE_ID_P2040E 0x0410 #define PCI_DEVICE_ID_P2040 0x0411 #define PCI_DEVICE_ID_P3041E 0x041E #define PCI_DEVICE_ID_P3041 0x041F #define PCI_DEVICE_ID_P5020E 0x0420 #define PCI_DEVICE_ID_P5020 0x0421 #define PCI_DEVICE_ID_P5010E 0x0428 #define PCI_DEVICE_ID_P5010 0x0429 #define PCI_DEVICE_ID_MPC8641 0x7010 #define PCI_DEVICE_ID_MPC8641D 0x7011 #define PCI_DEVICE_ID_MPC8610 0x7018 #define PCI_VENDOR_ID_PASEMI 0x1959 #define PCI_VENDOR_ID_ATTANSIC 0x1969 #define PCI_DEVICE_ID_ATTANSIC_L1 0x1048 #define PCI_DEVICE_ID_ATTANSIC_L2 0x2048 #define PCI_VENDOR_ID_JMICRON 0x197B #define PCI_DEVICE_ID_JMICRON_JMB360 0x2360 #define PCI_DEVICE_ID_JMICRON_JMB361 0x2361 #define PCI_DEVICE_ID_JMICRON_JMB362 0x2362 #define PCI_DEVICE_ID_JMICRON_JMB363 0x2363 #define PCI_DEVICE_ID_JMICRON_JMB364 0x2364 #define PCI_DEVICE_ID_JMICRON_JMB365 0x2365 #define PCI_DEVICE_ID_JMICRON_JMB366 0x2366 #define PCI_DEVICE_ID_JMICRON_JMB368 0x2368 #define PCI_DEVICE_ID_JMICRON_JMB369 0x2369 #define PCI_DEVICE_ID_JMICRON_JMB38X_SD 0x2381 #define PCI_DEVICE_ID_JMICRON_JMB38X_MMC 0x2382 #define PCI_DEVICE_ID_JMICRON_JMB38X_MS 0x2383 #define PCI_DEVICE_ID_JMICRON_JMB385_MS 0x2388 #define PCI_DEVICE_ID_JMICRON_JMB388_SD 0x2391 #define PCI_DEVICE_ID_JMICRON_JMB388_ESD 0x2392 #define PCI_DEVICE_ID_JMICRON_JMB390_MS 0x2393 #define PCI_VENDOR_ID_KORENIX 0x1982 #define PCI_DEVICE_ID_KORENIX_JETCARDF0 0x1600 #define PCI_DEVICE_ID_KORENIX_JETCARDF1 0x16ff #define PCI_DEVICE_ID_KORENIX_JETCARDF2 0x1700 #define PCI_DEVICE_ID_KORENIX_JETCARDF3 0x17ff #define PCI_VENDOR_ID_HUAWEI 0x19e5 #define PCI_VENDOR_ID_NETRONOME 0x19ee #define PCI_DEVICE_ID_NETRONOME_NFP4000 0x4000 #define PCI_DEVICE_ID_NETRONOME_NFP5000 0x5000 #define PCI_DEVICE_ID_NETRONOME_NFP6000 0x6000 #define PCI_DEVICE_ID_NETRONOME_NFP6000_VF 0x6003 #define PCI_VENDOR_ID_QMI 0x1a32 #define PCI_VENDOR_ID_AZWAVE 0x1a3b #define PCI_VENDOR_ID_REDHAT_QUMRANET 0x1af4 #define PCI_SUBVENDOR_ID_REDHAT_QUMRANET 0x1af4 #define PCI_SUBDEVICE_ID_QEMU 0x1100 #define PCI_VENDOR_ID_ASMEDIA 0x1b21 #define PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS 0x1c36 #define PCI_VENDOR_ID_CIRCUITCO 0x1cc8 #define PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD 0x0001 #define PCI_VENDOR_ID_AMAZON 0x1d0f #define PCI_VENDOR_ID_ZHAOXIN 0x1d17 #define PCI_VENDOR_ID_HYGON 0x1d94 #define PCI_VENDOR_ID_HXT 0x1dbf #define PCI_VENDOR_ID_TEKRAM 0x1de1 #define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29 #define PCI_VENDOR_ID_TEHUTI 0x1fc9 #define PCI_DEVICE_ID_TEHUTI_3009 0x3009 #define PCI_DEVICE_ID_TEHUTI_3010 0x3010 #define PCI_DEVICE_ID_TEHUTI_3014 0x3014 #define PCI_VENDOR_ID_SUNIX 0x1fd4 #define PCI_DEVICE_ID_SUNIX_1999 0x1999 #define PCI_VENDOR_ID_HINT 0x3388 #define PCI_DEVICE_ID_HINT_VXPROII_IDE 0x8013 #define PCI_VENDOR_ID_3DLABS 0x3d3d #define PCI_DEVICE_ID_3DLABS_PERMEDIA2 0x0007 #define PCI_DEVICE_ID_3DLABS_PERMEDIA2V 0x0009 #define PCI_VENDOR_ID_NETXEN 0x4040 #define PCI_DEVICE_ID_NX2031_10GXSR 0x0001 #define PCI_DEVICE_ID_NX2031_10GCX4 0x0002 #define PCI_DEVICE_ID_NX2031_4GCU 0x0003 #define PCI_DEVICE_ID_NX2031_IMEZ 0x0004 #define PCI_DEVICE_ID_NX2031_HMEZ 0x0005 #define PCI_DEVICE_ID_NX2031_XG_MGMT 0x0024 #define PCI_DEVICE_ID_NX2031_XG_MGMT2 0x0025 #define PCI_DEVICE_ID_NX3031 0x0100 #define PCI_VENDOR_ID_AKS 0x416c #define PCI_DEVICE_ID_AKS_ALADDINCARD 0x0100 #define PCI_VENDOR_ID_ACCESSIO 0x494f #define PCI_DEVICE_ID_ACCESSIO_WDG_CSM 0x22c0 #define PCI_VENDOR_ID_S3 0x5333 #define PCI_DEVICE_ID_S3_TRIO 0x8811 #define PCI_DEVICE_ID_S3_868 0x8880 #define PCI_DEVICE_ID_S3_968 0x88f0 #define PCI_DEVICE_ID_S3_SAVAGE4 0x8a25 #define PCI_DEVICE_ID_S3_PROSAVAGE8 0x8d04 #define PCI_DEVICE_ID_S3_SONICVIBES 0xca00 #define PCI_VENDOR_ID_DUNORD 0x5544 #define PCI_DEVICE_ID_DUNORD_I3000 0x0001 #define PCI_VENDOR_ID_DCI 0x6666 #define PCI_DEVICE_ID_DCI_PCCOM4 0x0001 #define PCI_DEVICE_ID_DCI_PCCOM8 0x0002 #define PCI_DEVICE_ID_DCI_PCCOM2 0x0004 #define PCI_VENDOR_ID_INTEL 0x8086 #define PCI_DEVICE_ID_INTEL_EESSC 0x0008 #define PCI_DEVICE_ID_INTEL_PXHD_0 0x0320 #define PCI_DEVICE_ID_INTEL_PXHD_1 0x0321 #define PCI_DEVICE_ID_INTEL_PXH_0 0x0329 #define PCI_DEVICE_ID_INTEL_PXH_1 0x032A #define PCI_DEVICE_ID_INTEL_PXHV 0x032C #define PCI_DEVICE_ID_INTEL_80332_0 0x0330 #define PCI_DEVICE_ID_INTEL_80332_1 0x0332 #define PCI_DEVICE_ID_INTEL_80333_0 0x0370 #define PCI_DEVICE_ID_INTEL_80333_1 0x0372 #define PCI_DEVICE_ID_INTEL_82375 0x0482 #define PCI_DEVICE_ID_INTEL_82424 0x0483 #define PCI_DEVICE_ID_INTEL_82378 0x0484 #define PCI_DEVICE_ID_INTEL_MRST_SD0 0x0807 #define PCI_DEVICE_ID_INTEL_MRST_SD1 0x0808 #define PCI_DEVICE_ID_INTEL_MFD_SD 0x0820 #define PCI_DEVICE_ID_INTEL_MFD_SDIO1 0x0821 #define PCI_DEVICE_ID_INTEL_MFD_SDIO2 0x0822 #define PCI_DEVICE_ID_INTEL_MFD_EMMC0 0x0823 #define PCI_DEVICE_ID_INTEL_MFD_EMMC1 0x0824 #define PCI_DEVICE_ID_INTEL_MRST_SD2 0x084F #define PCI_DEVICE_ID_INTEL_QUARK_X1000_ILB 0x095E #define PCI_DEVICE_ID_INTEL_I960 0x0960 #define PCI_DEVICE_ID_INTEL_I960RM 0x0962 #define PCI_DEVICE_ID_INTEL_CENTERTON_ILB 0x0c60 #define PCI_DEVICE_ID_INTEL_8257X_SOL 0x1062 #define PCI_DEVICE_ID_INTEL_82573E_SOL 0x1085 #define PCI_DEVICE_ID_INTEL_82573L_SOL 0x108F #define PCI_DEVICE_ID_INTEL_82815_MC 0x1130 #define PCI_DEVICE_ID_INTEL_82815_CGC 0x1132 #define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221 #define PCI_DEVICE_ID_INTEL_7505_0 0x2550 #define PCI_DEVICE_ID_INTEL_7205_0 0x255d #define PCI_DEVICE_ID_INTEL_82437 0x122d #define PCI_DEVICE_ID_INTEL_82371FB_0 0x122e #define PCI_DEVICE_ID_INTEL_82371FB_1 0x1230 #define PCI_DEVICE_ID_INTEL_82371MX 0x1234 #define PCI_DEVICE_ID_INTEL_82441 0x1237 #define PCI_DEVICE_ID_INTEL_82380FB 0x124b #define PCI_DEVICE_ID_INTEL_82439 0x1250 #define PCI_DEVICE_ID_INTEL_LIGHT_RIDGE 0x1513 /* Tbt 1 Gen 1 */ #define PCI_DEVICE_ID_INTEL_EAGLE_RIDGE 0x151a #define PCI_DEVICE_ID_INTEL_LIGHT_PEAK 0x151b #define PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C 0x1547 /* Tbt 1 Gen 2 */ #define PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C 0x1548 #define PCI_DEVICE_ID_INTEL_PORT_RIDGE 0x1549 #define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_NHI 0x1566 /* Tbt 1 Gen 3 */ #define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE 0x1567 #define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_NHI 0x1568 #define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE 0x1569 #define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI 0x156a /* Thunderbolt 2 */ #define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE 0x156b #define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI 0x156c #define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE 0x156d #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI 0x1575 /* Thunderbolt 3 */ #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE 0x1576 #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI 0x1577 #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE 0x1578 #define PCI_DEVICE_ID_INTEL_80960_RP 0x1960 #define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21 #define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30 #define PCI_DEVICE_ID_INTEL_IOAT 0x1a38 #define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN 0x1c41 #define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX 0x1c5f #define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_0 0x1d40 #define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_1 0x1d41 #define PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI 0x1e31 #define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MIN 0x1e40 #define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MAX 0x1e5f #define PCI_DEVICE_ID_INTEL_VMD_201D 0x201d #define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MIN 0x2310 #define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MAX 0x231f #define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410 #define PCI_DEVICE_ID_INTEL_82801AA_1 0x2411 #define PCI_DEVICE_ID_INTEL_82801AA_3 0x2413 #define PCI_DEVICE_ID_INTEL_82801AA_5 0x2415 #define PCI_DEVICE_ID_INTEL_82801AA_6 0x2416 #define PCI_DEVICE_ID_INTEL_82801AA_8 0x2418 #define PCI_DEVICE_ID_INTEL_82801AB_0 0x2420 #define PCI_DEVICE_ID_INTEL_82801AB_1 0x2421 #define PCI_DEVICE_ID_INTEL_82801AB_3 0x2423 #define PCI_DEVICE_ID_INTEL_82801AB_5 0x2425 #define PCI_DEVICE_ID_INTEL_82801AB_6 0x2426 #define PCI_DEVICE_ID_INTEL_82801AB_8 0x2428 #define PCI_DEVICE_ID_INTEL_82801BA_0 0x2440 #define PCI_DEVICE_ID_INTEL_82801BA_2 0x2443 #define PCI_DEVICE_ID_INTEL_82801BA_4 0x2445 #define PCI_DEVICE_ID_INTEL_82801BA_6 0x2448 #define PCI_DEVICE_ID_INTEL_82801BA_8 0x244a #define PCI_DEVICE_ID_INTEL_82801BA_9 0x244b #define PCI_DEVICE_ID_INTEL_82801BA_10 0x244c #define PCI_DEVICE_ID_INTEL_82801BA_11 0x244e #define PCI_DEVICE_ID_INTEL_82801E_0 0x2450 #define PCI_DEVICE_ID_INTEL_82801E_11 0x245b #define PCI_DEVICE_ID_INTEL_82801CA_0 0x2480 #define PCI_DEVICE_ID_INTEL_82801CA_3 0x2483 #define PCI_DEVICE_ID_INTEL_82801CA_5 0x2485 #define PCI_DEVICE_ID_INTEL_82801CA_6 0x2486 #define PCI_DEVICE_ID_INTEL_82801CA_10 0x248a #define PCI_DEVICE_ID_INTEL_82801CA_11 0x248b #define PCI_DEVICE_ID_INTEL_82801CA_12 0x248c #define PCI_DEVICE_ID_INTEL_82801DB_0 0x24c0 #define PCI_DEVICE_ID_INTEL_82801DB_1 0x24c1 #define PCI_DEVICE_ID_INTEL_82801DB_2 0x24c2 #define PCI_DEVICE_ID_INTEL_82801DB_3 0x24c3 #define PCI_DEVICE_ID_INTEL_82801DB_5 0x24c5 #define PCI_DEVICE_ID_INTEL_82801DB_6 0x24c6 #define PCI_DEVICE_ID_INTEL_82801DB_9 0x24c9 #define PCI_DEVICE_ID_INTEL_82801DB_10 0x24ca #define PCI_DEVICE_ID_INTEL_82801DB_11 0x24cb #define PCI_DEVICE_ID_INTEL_82801DB_12 0x24cc #define PCI_DEVICE_ID_INTEL_82801EB_0 0x24d0 #define PCI_DEVICE_ID_INTEL_82801EB_1 0x24d1 #define PCI_DEVICE_ID_INTEL_82801EB_3 0x24d3 #define PCI_DEVICE_ID_INTEL_82801EB_5 0x24d5 #define PCI_DEVICE_ID_INTEL_82801EB_6 0x24d6 #define PCI_DEVICE_ID_INTEL_82801EB_11 0x24db #define PCI_DEVICE_ID_INTEL_82801EB_12 0x24dc #define PCI_DEVICE_ID_INTEL_82801EB_13 0x24dd #define PCI_DEVICE_ID_INTEL_ESB_1 0x25a1 #define PCI_DEVICE_ID_INTEL_ESB_2 0x25a2 #define PCI_DEVICE_ID_INTEL_ESB_4 0x25a4 #define PCI_DEVICE_ID_INTEL_ESB_5 0x25a6 #define PCI_DEVICE_ID_INTEL_ESB_9 0x25ab #define PCI_DEVICE_ID_INTEL_ESB_10 0x25ac #define PCI_DEVICE_ID_INTEL_82820_HB 0x2500 #define PCI_DEVICE_ID_INTEL_82820_UP_HB 0x2501 #define PCI_DEVICE_ID_INTEL_82850_HB 0x2530 #define PCI_DEVICE_ID_INTEL_82860_HB 0x2531 #define PCI_DEVICE_ID_INTEL_E7501_MCH 0x254c #define PCI_DEVICE_ID_INTEL_82845G_HB 0x2560 #define PCI_DEVICE_ID_INTEL_82845G_IG 0x2562 #define PCI_DEVICE_ID_INTEL_82865_HB 0x2570 #define PCI_DEVICE_ID_INTEL_82865_IG 0x2572 #define PCI_DEVICE_ID_INTEL_82875_HB 0x2578 #define PCI_DEVICE_ID_INTEL_82915G_HB 0x2580 #define PCI_DEVICE_ID_INTEL_82915G_IG 0x2582 #define PCI_DEVICE_ID_INTEL_82915GM_HB 0x2590 #define PCI_DEVICE_ID_INTEL_82915GM_IG 0x2592 #define PCI_DEVICE_ID_INTEL_5000_ERR 0x25F0 #define PCI_DEVICE_ID_INTEL_5000_FBD0 0x25F5 #define PCI_DEVICE_ID_INTEL_5000_FBD1 0x25F6 #define PCI_DEVICE_ID_INTEL_82945G_HB 0x2770 #define PCI_DEVICE_ID_INTEL_82945G_IG 0x2772 #define PCI_DEVICE_ID_INTEL_3000_HB 0x2778 #define PCI_DEVICE_ID_INTEL_82945GM_HB 0x27A0 #define PCI_DEVICE_ID_INTEL_82945GM_IG 0x27A2 #define PCI_DEVICE_ID_INTEL_ICH6_0 0x2640 #define PCI_DEVICE_ID_INTEL_ICH6_1 0x2641 #define PCI_DEVICE_ID_INTEL_ICH6_2 0x2642 #define PCI_DEVICE_ID_INTEL_ICH6_16 0x266a #define PCI_DEVICE_ID_INTEL_ICH6_17 0x266d #define PCI_DEVICE_ID_INTEL_ICH6_18 0x266e #define PCI_DEVICE_ID_INTEL_ICH6_19 0x266f #define PCI_DEVICE_ID_INTEL_ESB2_0 0x2670 #define PCI_DEVICE_ID_INTEL_ESB2_14 0x2698 #define PCI_DEVICE_ID_INTEL_ESB2_17 0x269b #define PCI_DEVICE_ID_INTEL_ESB2_18 0x269e #define PCI_DEVICE_ID_INTEL_ICH7_0 0x27b8 #define PCI_DEVICE_ID_INTEL_ICH7_1 0x27b9 #define PCI_DEVICE_ID_INTEL_ICH7_30 0x27b0 #define PCI_DEVICE_ID_INTEL_TGP_LPC 0x27bc #define PCI_DEVICE_ID_INTEL_ICH7_31 0x27bd #define PCI_DEVICE_ID_INTEL_ICH7_17 0x27da #define PCI_DEVICE_ID_INTEL_ICH7_19 0x27dd #define PCI_DEVICE_ID_INTEL_ICH7_20 0x27de #define PCI_DEVICE_ID_INTEL_ICH7_21 0x27df #define PCI_DEVICE_ID_INTEL_ICH8_0 0x2810 #define PCI_DEVICE_ID_INTEL_ICH8_1 0x2811 #define PCI_DEVICE_ID_INTEL_ICH8_2 0x2812 #define PCI_DEVICE_ID_INTEL_ICH8_3 0x2814 #define PCI_DEVICE_ID_INTEL_ICH8_4 0x2815 #define PCI_DEVICE_ID_INTEL_ICH8_5 0x283e #define PCI_DEVICE_ID_INTEL_ICH8_6 0x2850 #define PCI_DEVICE_ID_INTEL_VMD_28C0 0x28c0 #define PCI_DEVICE_ID_INTEL_ICH9_0 0x2910 #define PCI_DEVICE_ID_INTEL_ICH9_1 0x2917 #define PCI_DEVICE_ID_INTEL_ICH9_2 0x2912 #define PCI_DEVICE_ID_INTEL_ICH9_3 0x2913 #define PCI_DEVICE_ID_INTEL_ICH9_4 0x2914 #define PCI_DEVICE_ID_INTEL_ICH9_5 0x2919 #define PCI_DEVICE_ID_INTEL_ICH9_6 0x2930 #define PCI_DEVICE_ID_INTEL_ICH9_7 0x2916 #define PCI_DEVICE_ID_INTEL_ICH9_8 0x2918 #define PCI_DEVICE_ID_INTEL_I7_MCR 0x2c18 #define PCI_DEVICE_ID_INTEL_I7_MC_TAD 0x2c19 #define PCI_DEVICE_ID_INTEL_I7_MC_RAS 0x2c1a #define PCI_DEVICE_ID_INTEL_I7_MC_TEST 0x2c1c #define PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL 0x2c20 #define PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR 0x2c21 #define PCI_DEVICE_ID_INTEL_I7_MC_CH0_RANK 0x2c22 #define PCI_DEVICE_ID_INTEL_I7_MC_CH0_TC 0x2c23 #define PCI_DEVICE_ID_INTEL_I7_MC_CH1_CTRL 0x2c28 #define PCI_DEVICE_ID_INTEL_I7_MC_CH1_ADDR 0x2c29 #define PCI_DEVICE_ID_INTEL_I7_MC_CH1_RANK 0x2c2a #define PCI_DEVICE_ID_INTEL_I7_MC_CH1_TC 0x2c2b #define PCI_DEVICE_ID_INTEL_I7_MC_CH2_CTRL 0x2c30 #define PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR 0x2c31 #define PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK 0x2c32 #define PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC 0x2c33 #define PCI_DEVICE_ID_INTEL_I7_NONCORE 0x2c41 #define PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT 0x2c40 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE 0x2c50 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT 0x2c51 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2 0x2c70 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_SAD 0x2c81 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0 0x2c90 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_PHY0 0x2c91 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR 0x2c98 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD 0x2c99 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST 0x2c9C #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL 0x2ca0 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR 0x2ca1 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK 0x2ca2 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC 0x2ca3 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL 0x2ca8 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR 0x2ca9 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK 0x2caa #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC 0x2cab #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR_REV2 0x2d98 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD_REV2 0x2d99 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_RAS_REV2 0x2d9a #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST_REV2 0x2d9c #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL_REV2 0x2da0 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR_REV2 0x2da1 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK_REV2 0x2da2 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC_REV2 0x2da3 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL_REV2 0x2da8 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR_REV2 0x2da9 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK_REV2 0x2daa #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC_REV2 0x2dab #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_CTRL_REV2 0x2db0 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2 0x2db1 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2 0x2db2 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2 0x2db3 #define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340 #define PCI_DEVICE_ID_INTEL_IOAT_TBG4 0x3429 #define PCI_DEVICE_ID_INTEL_IOAT_TBG5 0x342a #define PCI_DEVICE_ID_INTEL_IOAT_TBG6 0x342b #define PCI_DEVICE_ID_INTEL_IOAT_TBG7 0x342c #define PCI_DEVICE_ID_INTEL_X58_HUB_MGMT 0x342e #define PCI_DEVICE_ID_INTEL_IOAT_TBG0 0x3430 #define PCI_DEVICE_ID_INTEL_IOAT_TBG1 0x3431 #define PCI_DEVICE_ID_INTEL_IOAT_TBG2 0x3432 #define PCI_DEVICE_ID_INTEL_IOAT_TBG3 0x3433 #define PCI_DEVICE_ID_INTEL_82830_HB 0x3575 #define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577 #define PCI_DEVICE_ID_INTEL_82854_HB 0x358c #define PCI_DEVICE_ID_INTEL_82854_IG 0x358e #define PCI_DEVICE_ID_INTEL_82855GM_HB 0x3580 #define PCI_DEVICE_ID_INTEL_82855GM_IG 0x3582 #define PCI_DEVICE_ID_INTEL_E7520_MCH 0x3590 #define PCI_DEVICE_ID_INTEL_E7320_MCH 0x3592 #define PCI_DEVICE_ID_INTEL_MCH_PA 0x3595 #define PCI_DEVICE_ID_INTEL_MCH_PA1 0x3596 #define PCI_DEVICE_ID_INTEL_MCH_PB 0x3597 #define PCI_DEVICE_ID_INTEL_MCH_PB1 0x3598 #define PCI_DEVICE_ID_INTEL_MCH_PC 0x3599 #define PCI_DEVICE_ID_INTEL_MCH_PC1 0x359a #define PCI_DEVICE_ID_INTEL_E7525_MCH 0x359e #define PCI_DEVICE_ID_INTEL_I7300_MCH_ERR 0x360c #define PCI_DEVICE_ID_INTEL_I7300_MCH_FB0 0x360f #define PCI_DEVICE_ID_INTEL_I7300_MCH_FB1 0x3610 #define PCI_DEVICE_ID_INTEL_IOAT_CNB 0x360b #define PCI_DEVICE_ID_INTEL_FBD_CNB 0x360c #define PCI_DEVICE_ID_INTEL_IOAT_JSF0 0x3710 #define PCI_DEVICE_ID_INTEL_IOAT_JSF1 0x3711 #define PCI_DEVICE_ID_INTEL_IOAT_JSF2 0x3712 #define PCI_DEVICE_ID_INTEL_IOAT_JSF3 0x3713 #define PCI_DEVICE_ID_INTEL_IOAT_JSF4 0x3714 #define PCI_DEVICE_ID_INTEL_IOAT_JSF5 0x3715 #define PCI_DEVICE_ID_INTEL_IOAT_JSF6 0x3716 #define PCI_DEVICE_ID_INTEL_IOAT_JSF7 0x3717 #define PCI_DEVICE_ID_INTEL_IOAT_JSF8 0x3718 #define PCI_DEVICE_ID_INTEL_IOAT_JSF9 0x3719 #define PCI_DEVICE_ID_INTEL_ICH10_0 0x3a14 #define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16 #define PCI_DEVICE_ID_INTEL_ICH10_2 0x3a18 #define PCI_DEVICE_ID_INTEL_ICH10_3 0x3a1a #define PCI_DEVICE_ID_INTEL_ICH10_4 0x3a30 #define PCI_DEVICE_ID_INTEL_ICH10_5 0x3a60 #define PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MIN 0x3b00 #define PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MAX 0x3b1f #define PCI_DEVICE_ID_INTEL_IOAT_SNB0 0x3c20 #define PCI_DEVICE_ID_INTEL_IOAT_SNB1 0x3c21 #define PCI_DEVICE_ID_INTEL_IOAT_SNB2 0x3c22 #define PCI_DEVICE_ID_INTEL_IOAT_SNB3 0x3c23 #define PCI_DEVICE_ID_INTEL_IOAT_SNB4 0x3c24 #define PCI_DEVICE_ID_INTEL_IOAT_SNB5 0x3c25 #define PCI_DEVICE_ID_INTEL_IOAT_SNB6 0x3c26 #define PCI_DEVICE_ID_INTEL_IOAT_SNB7 0x3c27 #define PCI_DEVICE_ID_INTEL_IOAT_SNB8 0x3c2e #define PCI_DEVICE_ID_INTEL_IOAT_SNB9 0x3c2f #define PCI_DEVICE_ID_INTEL_UNC_HA 0x3c46 #define PCI_DEVICE_ID_INTEL_UNC_IMC0 0x3cb0 #define PCI_DEVICE_ID_INTEL_UNC_IMC1 0x3cb1 #define PCI_DEVICE_ID_INTEL_UNC_IMC2 0x3cb4 #define PCI_DEVICE_ID_INTEL_UNC_IMC3 0x3cb5 #define PCI_DEVICE_ID_INTEL_UNC_QPI0 0x3c41 #define PCI_DEVICE_ID_INTEL_UNC_QPI1 0x3c42 #define PCI_DEVICE_ID_INTEL_UNC_R2PCIE 0x3c43 #define PCI_DEVICE_ID_INTEL_UNC_R3QPI0 0x3c44 #define PCI_DEVICE_ID_INTEL_UNC_R3QPI1 0x3c45 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS 0x3c71 /* 15.1 */ #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR0 0x3c72 /* 16.2 */ #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR1 0x3c73 /* 16.3 */ #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR2 0x3c76 /* 16.6 */ #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR3 0x3c77 /* 16.7 */ #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0 0x3ca0 /* 14.0 */ #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA 0x3ca8 /* 15.0 */ #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0 0x3caa /* 15.2 */ #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1 0x3cab /* 15.3 */ #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2 0x3cac /* 15.4 */ #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3 0x3cad /* 15.5 */ #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO 0x3cb8 /* 17.0 */ #define PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX 0x3ce0 #define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0 0x3cf4 /* 12.6 */ #define PCI_DEVICE_ID_INTEL_SBRIDGE_BR 0x3cf5 /* 13.6 */ #define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1 0x3cf6 /* 12.7 */ #define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f #define PCI_DEVICE_ID_INTEL_5100_16 0x65f0 #define PCI_DEVICE_ID_INTEL_5100_19 0x65f3 #define PCI_DEVICE_ID_INTEL_5100_21 0x65f5 #define PCI_DEVICE_ID_INTEL_5100_22 0x65f6 #define PCI_DEVICE_ID_INTEL_5400_ERR 0x4030 #define PCI_DEVICE_ID_INTEL_5400_FBD0 0x4035 #define PCI_DEVICE_ID_INTEL_5400_FBD1 0x4036 #define PCI_DEVICE_ID_INTEL_IOAT_SCNB 0x65ff #define PCI_DEVICE_ID_INTEL_EP80579_0 0x5031 #define PCI_DEVICE_ID_INTEL_EP80579_1 0x5032 #define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000 #define PCI_DEVICE_ID_INTEL_82371SB_1 0x7010 #define PCI_DEVICE_ID_INTEL_82371SB_2 0x7020 #define PCI_DEVICE_ID_INTEL_82437VX 0x7030 #define PCI_DEVICE_ID_INTEL_82439TX 0x7100 #define PCI_DEVICE_ID_INTEL_82371AB_0 0x7110 #define PCI_DEVICE_ID_INTEL_82371AB 0x7111 #define PCI_DEVICE_ID_INTEL_82371AB_2 0x7112 #define PCI_DEVICE_ID_INTEL_82371AB_3 0x7113 #define PCI_DEVICE_ID_INTEL_82810_MC1 0x7120 #define PCI_DEVICE_ID_INTEL_82810_IG1 0x7121 #define PCI_DEVICE_ID_INTEL_82810_MC3 0x7122 #define PCI_DEVICE_ID_INTEL_82810_IG3 0x7123 #define PCI_DEVICE_ID_INTEL_82810E_MC 0x7124 #define PCI_DEVICE_ID_INTEL_82810E_IG 0x7125 #define PCI_DEVICE_ID_INTEL_82443LX_0 0x7180 #define PCI_DEVICE_ID_INTEL_82443LX_1 0x7181 #define PCI_DEVICE_ID_INTEL_82443BX_0 0x7190 #define PCI_DEVICE_ID_INTEL_82443BX_1 0x7191 #define PCI_DEVICE_ID_INTEL_82443BX_2 0x7192 #define PCI_DEVICE_ID_INTEL_440MX 0x7195 #define PCI_DEVICE_ID_INTEL_440MX_6 0x7196 #define PCI_DEVICE_ID_INTEL_82443MX_0 0x7198 #define PCI_DEVICE_ID_INTEL_82443MX_1 0x7199 #define PCI_DEVICE_ID_INTEL_82443MX_3 0x719b #define PCI_DEVICE_ID_INTEL_82443GX_0 0x71a0 #define PCI_DEVICE_ID_INTEL_82443GX_2 0x71a2 #define PCI_DEVICE_ID_INTEL_82372FB_1 0x7601 #define PCI_DEVICE_ID_INTEL_HDA_ARL 0x7728 #define PCI_DEVICE_ID_INTEL_SCH_LPC 0x8119 #define PCI_DEVICE_ID_INTEL_SCH_IDE 0x811a #define PCI_DEVICE_ID_INTEL_E6XX_CU 0x8183 #define PCI_DEVICE_ID_INTEL_ITC_LPC 0x8186 #define PCI_DEVICE_ID_INTEL_82454GX 0x84c4 #define PCI_DEVICE_ID_INTEL_82450GX 0x84c5 #define PCI_DEVICE_ID_INTEL_82451NX 0x84ca #define PCI_DEVICE_ID_INTEL_82454NX 0x84cb #define PCI_DEVICE_ID_INTEL_84460GX 0x84ea #define PCI_DEVICE_ID_INTEL_IXP4XX 0x8500 #define PCI_DEVICE_ID_INTEL_IXP2800 0x9004 #define PCI_DEVICE_ID_INTEL_VMD_9A0B 0x9a0b #define PCI_DEVICE_ID_INTEL_S21152BB 0xb152 #define PCI_VENDOR_ID_WANGXUN 0x8088 #define PCI_VENDOR_ID_SCALEMP 0x8686 #define PCI_DEVICE_ID_SCALEMP_VSMP_CTL 0x1010 #define PCI_VENDOR_ID_COMPUTONE 0x8e0e #define PCI_DEVICE_ID_COMPUTONE_PG 0x0302 #define PCI_SUBVENDOR_ID_COMPUTONE 0x8e0e #define PCI_SUBDEVICE_ID_COMPUTONE_PG4 0x0001 #define PCI_SUBDEVICE_ID_COMPUTONE_PG8 0x0002 #define PCI_SUBDEVICE_ID_COMPUTONE_PG6 0x0003 #define PCI_VENDOR_ID_KTI 0x8e2e #define PCI_VENDOR_ID_ADAPTEC 0x9004 #define PCI_DEVICE_ID_ADAPTEC_7810 0x1078 #define PCI_DEVICE_ID_ADAPTEC_7821 0x2178 #define PCI_DEVICE_ID_ADAPTEC_38602 0x3860 #define PCI_DEVICE_ID_ADAPTEC_7850 0x5078 #define PCI_DEVICE_ID_ADAPTEC_7855 0x5578 #define PCI_DEVICE_ID_ADAPTEC_3860 0x6038 #define PCI_DEVICE_ID_ADAPTEC_1480A 0x6075 #define PCI_DEVICE_ID_ADAPTEC_7860 0x6078 #define PCI_DEVICE_ID_ADAPTEC_7861 0x6178 #define PCI_DEVICE_ID_ADAPTEC_7870 0x7078 #define PCI_DEVICE_ID_ADAPTEC_7871 0x7178 #define PCI_DEVICE_ID_ADAPTEC_7872 0x7278 #define PCI_DEVICE_ID_ADAPTEC_7873 0x7378 #define PCI_DEVICE_ID_ADAPTEC_7874 0x7478 #define PCI_DEVICE_ID_ADAPTEC_7895 0x7895 #define PCI_DEVICE_ID_ADAPTEC_7880 0x8078 #define PCI_DEVICE_ID_ADAPTEC_7881 0x8178 #define PCI_DEVICE_ID_ADAPTEC_7882 0x8278 #define PCI_DEVICE_ID_ADAPTEC_7883 0x8378 #define PCI_DEVICE_ID_ADAPTEC_7884 0x8478 #define PCI_DEVICE_ID_ADAPTEC_7885 0x8578 #define PCI_DEVICE_ID_ADAPTEC_7886 0x8678 #define PCI_DEVICE_ID_ADAPTEC_7887 0x8778 #define PCI_DEVICE_ID_ADAPTEC_7888 0x8878 #define PCI_VENDOR_ID_ADAPTEC2 0x9005 #define PCI_DEVICE_ID_ADAPTEC2_2940U2 0x0010 #define PCI_DEVICE_ID_ADAPTEC2_2930U2 0x0011 #define PCI_DEVICE_ID_ADAPTEC2_7890B 0x0013 #define PCI_DEVICE_ID_ADAPTEC2_7890 0x001f #define PCI_DEVICE_ID_ADAPTEC2_3940U2 0x0050 #define PCI_DEVICE_ID_ADAPTEC2_3950U2D 0x0051 #define PCI_DEVICE_ID_ADAPTEC2_7896 0x005f #define PCI_DEVICE_ID_ADAPTEC2_7892A 0x0080 #define PCI_DEVICE_ID_ADAPTEC2_7892B 0x0081 #define PCI_DEVICE_ID_ADAPTEC2_7892D 0x0083 #define PCI_DEVICE_ID_ADAPTEC2_7892P 0x008f #define PCI_DEVICE_ID_ADAPTEC2_7899A 0x00c0 #define PCI_DEVICE_ID_ADAPTEC2_7899B 0x00c1 #define PCI_DEVICE_ID_ADAPTEC2_7899D 0x00c3 #define PCI_DEVICE_ID_ADAPTEC2_7899P 0x00cf #define PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN 0x0500 #define PCI_DEVICE_ID_ADAPTEC2_SCAMP 0x0503 #define PCI_VENDOR_ID_HOLTEK 0x9412 #define PCI_DEVICE_ID_HOLTEK_6565 0x6565 #define PCI_VENDOR_ID_NETMOS 0x9710 #define PCI_DEVICE_ID_NETMOS_9705 0x9705 #define PCI_DEVICE_ID_NETMOS_9715 0x9715 #define PCI_DEVICE_ID_NETMOS_9735 0x9735 #define PCI_DEVICE_ID_NETMOS_9745 0x9745 #define PCI_DEVICE_ID_NETMOS_9755 0x9755 #define PCI_DEVICE_ID_NETMOS_9805 0x9805 #define PCI_DEVICE_ID_NETMOS_9815 0x9815 #define PCI_DEVICE_ID_NETMOS_9835 0x9835 #define PCI_DEVICE_ID_NETMOS_9845 0x9845 #define PCI_DEVICE_ID_NETMOS_9855 0x9855 #define PCI_DEVICE_ID_NETMOS_9865 0x9865 #define PCI_DEVICE_ID_NETMOS_9900 0x9900 #define PCI_DEVICE_ID_NETMOS_9901 0x9901 #define PCI_DEVICE_ID_NETMOS_9904 0x9904 #define PCI_DEVICE_ID_NETMOS_9912 0x9912 #define PCI_DEVICE_ID_NETMOS_9922 0x9922 #define PCI_VENDOR_ID_3COM_2 0xa727 #define PCI_VENDOR_ID_SOLIDRUN 0xd063 #define PCI_VENDOR_ID_DIGIUM 0xd161 #define PCI_DEVICE_ID_DIGIUM_HFC4S 0xb410 #define PCI_SUBVENDOR_ID_EXSYS 0xd84d #define PCI_SUBDEVICE_ID_EXSYS_4014 0x4014 #define PCI_SUBDEVICE_ID_EXSYS_4055 0x4055 #define PCI_VENDOR_ID_TIGERJET 0xe159 #define PCI_DEVICE_ID_TIGERJET_300 0x0001 #define PCI_DEVICE_ID_TIGERJET_100 0x0002 #define PCI_VENDOR_ID_XILINX_RME 0xea60 #define PCI_DEVICE_ID_RME_DIGI32 0x9896 #define PCI_DEVICE_ID_RME_DIGI32_PRO 0x9897 #define PCI_DEVICE_ID_RME_DIGI32_8 0x9898 #define PCI_VENDOR_ID_XEN 0x5853 #define PCI_DEVICE_ID_XEN_PLATFORM 0x0001 #define PCI_VENDOR_ID_OCZ 0x1b85 #define PCI_VENDOR_ID_NCUBE 0x10ff #endif /* _LINUX_PCI_IDS_H */ zutil.h 0000644 00000005351 14722070374 0006076 0 ustar 00 /* zutil.h -- internal interface and configuration of the compression library * Copyright (C) 1995-1998 Jean-loup Gailly. * For conditions of distribution and use, see copyright notice in zlib.h */ /* WARNING: this file should *not* be used by applications. It is part of the implementation of the compression library and is subject to change. Applications should only use zlib.h. */ /* @(#) $Id: zutil.h,v 1.1 2000/01/01 03:32:23 davem Exp $ */ #ifndef _Z_UTIL_H #define _Z_UTIL_H #include <linux/zlib.h> #include <linux/string.h> #include <linux/kernel.h> typedef unsigned char uch; typedef unsigned short ush; typedef unsigned long ulg; /* common constants */ #define STORED_BLOCK 0 #define STATIC_TREES 1 #define DYN_TREES 2 /* The three kinds of block type */ #define MIN_MATCH 3 #define MAX_MATCH 258 /* The minimum and maximum match lengths */ #define PRESET_DICT 0x20 /* preset dictionary flag in zlib header */ /* target dependencies */ /* Common defaults */ #ifndef OS_CODE # define OS_CODE 0x03 /* assume Unix */ #endif /* functions */ typedef uLong (*check_func) (uLong check, const Byte *buf, uInt len); /* checksum functions */ #define BASE 65521L /* largest prime smaller than 65536 */ #define NMAX 5552 /* NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 */ #define DO1(buf,i) {s1 += buf[i]; s2 += s1;} #define DO2(buf,i) DO1(buf,i); DO1(buf,i+1); #define DO4(buf,i) DO2(buf,i); DO2(buf,i+2); #define DO8(buf,i) DO4(buf,i); DO4(buf,i+4); #define DO16(buf) DO8(buf,0); DO8(buf,8); /* ========================================================================= */ /* Update a running Adler-32 checksum with the bytes buf[0..len-1] and return the updated checksum. If buf is NULL, this function returns the required initial value for the checksum. An Adler-32 checksum is almost as reliable as a CRC32 but can be computed much faster. Usage example: uLong adler = zlib_adler32(0L, NULL, 0); while (read_buffer(buffer, length) != EOF) { adler = zlib_adler32(adler, buffer, length); } if (adler != original_adler) error(); */ static inline uLong zlib_adler32(uLong adler, const Byte *buf, uInt len) { unsigned long s1 = adler & 0xffff; unsigned long s2 = (adler >> 16) & 0xffff; int k; if (buf == NULL) return 1L; while (len > 0) { k = len < NMAX ? len : NMAX; len -= k; while (k >= 16) { DO16(buf); buf += 16; k -= 16; } if (k != 0) do { s1 += *buf++; s2 += s1; } while (--k); s1 %= BASE; s2 %= BASE; } return (s2 << 16) | s1; } #endif /* _Z_UTIL_H */ coresight.h 0000644 00000024310 14722070374 0006712 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2012, The Linux Foundation. All rights reserved. */ #ifndef _LINUX_CORESIGHT_H #define _LINUX_CORESIGHT_H #include <linux/device.h> #include <linux/perf_event.h> #include <linux/sched.h> /* Peripheral id registers (0xFD0-0xFEC) */ #define CORESIGHT_PERIPHIDR4 0xfd0 #define CORESIGHT_PERIPHIDR5 0xfd4 #define CORESIGHT_PERIPHIDR6 0xfd8 #define CORESIGHT_PERIPHIDR7 0xfdC #define CORESIGHT_PERIPHIDR0 0xfe0 #define CORESIGHT_PERIPHIDR1 0xfe4 #define CORESIGHT_PERIPHIDR2 0xfe8 #define CORESIGHT_PERIPHIDR3 0xfeC /* Component id registers (0xFF0-0xFFC) */ #define CORESIGHT_COMPIDR0 0xff0 #define CORESIGHT_COMPIDR1 0xff4 #define CORESIGHT_COMPIDR2 0xff8 #define CORESIGHT_COMPIDR3 0xffC #define ETM_ARCH_V3_3 0x23 #define ETM_ARCH_V3_5 0x25 #define PFT_ARCH_V1_0 0x30 #define PFT_ARCH_V1_1 0x31 #define CORESIGHT_UNLOCK 0xc5acce55 extern struct bus_type coresight_bustype; enum coresight_dev_type { CORESIGHT_DEV_TYPE_NONE, CORESIGHT_DEV_TYPE_SINK, CORESIGHT_DEV_TYPE_LINK, CORESIGHT_DEV_TYPE_LINKSINK, CORESIGHT_DEV_TYPE_SOURCE, CORESIGHT_DEV_TYPE_HELPER, }; enum coresight_dev_subtype_sink { CORESIGHT_DEV_SUBTYPE_SINK_NONE, CORESIGHT_DEV_SUBTYPE_SINK_PORT, CORESIGHT_DEV_SUBTYPE_SINK_BUFFER, }; enum coresight_dev_subtype_link { CORESIGHT_DEV_SUBTYPE_LINK_NONE, CORESIGHT_DEV_SUBTYPE_LINK_MERG, CORESIGHT_DEV_SUBTYPE_LINK_SPLIT, CORESIGHT_DEV_SUBTYPE_LINK_FIFO, }; enum coresight_dev_subtype_source { CORESIGHT_DEV_SUBTYPE_SOURCE_NONE, CORESIGHT_DEV_SUBTYPE_SOURCE_PROC, CORESIGHT_DEV_SUBTYPE_SOURCE_BUS, CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE, }; enum coresight_dev_subtype_helper { CORESIGHT_DEV_SUBTYPE_HELPER_NONE, CORESIGHT_DEV_SUBTYPE_HELPER_CATU, }; /** * union coresight_dev_subtype - further characterisation of a type * @sink_subtype: type of sink this component is, as defined * by @coresight_dev_subtype_sink. * @link_subtype: type of link this component is, as defined * by @coresight_dev_subtype_link. * @source_subtype: type of source this component is, as defined * by @coresight_dev_subtype_source. * @helper_subtype: type of helper this component is, as defined * by @coresight_dev_subtype_helper. */ union coresight_dev_subtype { /* We have some devices which acts as LINK and SINK */ struct { enum coresight_dev_subtype_sink sink_subtype; enum coresight_dev_subtype_link link_subtype; }; enum coresight_dev_subtype_source source_subtype; enum coresight_dev_subtype_helper helper_subtype; }; /** * struct coresight_platform_data - data harvested from the DT specification * @nr_inport: number of input ports for this component. * @nr_outport: number of output ports for this component. * @conns: Array of nr_outport connections from this component */ struct coresight_platform_data { int nr_inport; int nr_outport; struct coresight_connection *conns; }; /** * struct coresight_desc - description of a component required from drivers * @type: as defined by @coresight_dev_type. * @subtype: as defined by @coresight_dev_subtype. * @ops: generic operations for this component, as defined * by @coresight_ops. * @pdata: platform data collected from DT. * @dev: The device entity associated to this component. * @groups: operations specific to this component. These will end up * in the component's sysfs sub-directory. * @name: name for the coresight device, also shown under sysfs. */ struct coresight_desc { enum coresight_dev_type type; union coresight_dev_subtype subtype; const struct coresight_ops *ops; struct coresight_platform_data *pdata; struct device *dev; const struct attribute_group **groups; const char *name; }; /** * struct coresight_connection - representation of a single connection * @outport: a connection's output port number. * @child_port: remote component's port number @output is connected to. * @chid_fwnode: remote component's fwnode handle. * @child_dev: a @coresight_device representation of the component connected to @outport. */ struct coresight_connection { int outport; int child_port; struct fwnode_handle *child_fwnode; struct coresight_device *child_dev; }; /** * struct coresight_device - representation of a device as used by the framework * @pdata: Platform data with device connections associated to this device. * @type: as defined by @coresight_dev_type. * @subtype: as defined by @coresight_dev_subtype. * @ops: generic operations for this component, as defined by @coresight_ops. * @dev: The device entity associated to this component. * @refcnt: keep track of what is in use. * @orphan: true if the component has connections that haven't been linked. * @enable: 'true' if component is currently part of an active path. * @activated: 'true' only if a _sink_ has been activated. A sink can be * activated but not yet enabled. Enabling for a _sink_ * appens when a source has been selected for that it. * @ea: Device attribute for sink representation under PMU directory. */ struct coresight_device { struct coresight_platform_data *pdata; enum coresight_dev_type type; union coresight_dev_subtype subtype; const struct coresight_ops *ops; struct device dev; atomic_t *refcnt; bool orphan; bool enable; /* true only if configured as part of a path */ /* sink specific fields */ bool activated; /* true only if a sink is part of a path */ struct dev_ext_attribute *ea; }; /* * coresight_dev_list - Mapping for devices to "name" index for device * names. * * @nr_idx: Number of entries already allocated. * @pfx: Prefix pattern for device name. * @fwnode_list: Array of fwnode_handles associated with each allocated * index, upto nr_idx entries. */ struct coresight_dev_list { int nr_idx; const char *pfx; struct fwnode_handle **fwnode_list; }; #define DEFINE_CORESIGHT_DEVLIST(var, dev_pfx) \ static struct coresight_dev_list (var) = { \ .pfx = dev_pfx, \ .nr_idx = 0, \ .fwnode_list = NULL, \ } #define to_coresight_device(d) container_of(d, struct coresight_device, dev) #define source_ops(csdev) csdev->ops->source_ops #define sink_ops(csdev) csdev->ops->sink_ops #define link_ops(csdev) csdev->ops->link_ops #define helper_ops(csdev) csdev->ops->helper_ops /** * struct coresight_ops_sink - basic operations for a sink * Operations available for sinks * @enable: enables the sink. * @disable: disables the sink. * @alloc_buffer: initialises perf's ring buffer for trace collection. * @free_buffer: release memory allocated in @get_config. * @update_buffer: update buffer pointers after a trace session. */ struct coresight_ops_sink { int (*enable)(struct coresight_device *csdev, u32 mode, void *data); int (*disable)(struct coresight_device *csdev); void *(*alloc_buffer)(struct coresight_device *csdev, struct perf_event *event, void **pages, int nr_pages, bool overwrite); void (*free_buffer)(void *config); unsigned long (*update_buffer)(struct coresight_device *csdev, struct perf_output_handle *handle, void *sink_config); }; /** * struct coresight_ops_link - basic operations for a link * Operations available for links. * @enable: enables flow between iport and oport. * @disable: disables flow between iport and oport. */ struct coresight_ops_link { int (*enable)(struct coresight_device *csdev, int iport, int oport); void (*disable)(struct coresight_device *csdev, int iport, int oport); }; /** * struct coresight_ops_source - basic operations for a source * Operations available for sources. * @cpu_id: returns the value of the CPU number this component * is associated to. * @trace_id: returns the value of the component's trace ID as known * to the HW. * @enable: enables tracing for a source. * @disable: disables tracing for a source. */ struct coresight_ops_source { int (*cpu_id)(struct coresight_device *csdev); int (*trace_id)(struct coresight_device *csdev); int (*enable)(struct coresight_device *csdev, struct perf_event *event, u32 mode); void (*disable)(struct coresight_device *csdev, struct perf_event *event); }; /** * struct coresight_ops_helper - Operations for a helper device. * * All operations could pass in a device specific data, which could * help the helper device to determine what to do. * * @enable : Enable the device * @disable : Disable the device */ struct coresight_ops_helper { int (*enable)(struct coresight_device *csdev, void *data); int (*disable)(struct coresight_device *csdev, void *data); }; struct coresight_ops { const struct coresight_ops_sink *sink_ops; const struct coresight_ops_link *link_ops; const struct coresight_ops_source *source_ops; const struct coresight_ops_helper *helper_ops; }; #ifdef CONFIG_CORESIGHT extern struct coresight_device * coresight_register(struct coresight_desc *desc); extern void coresight_unregister(struct coresight_device *csdev); extern int coresight_enable(struct coresight_device *csdev); extern void coresight_disable(struct coresight_device *csdev); extern int coresight_timeout(void __iomem *addr, u32 offset, int position, int value); extern int coresight_claim_device(void __iomem *base); extern int coresight_claim_device_unlocked(void __iomem *base); extern void coresight_disclaim_device(void __iomem *base); extern void coresight_disclaim_device_unlocked(void __iomem *base); extern char *coresight_alloc_device_name(struct coresight_dev_list *devs, struct device *dev); #else static inline struct coresight_device * coresight_register(struct coresight_desc *desc) { return NULL; } static inline void coresight_unregister(struct coresight_device *csdev) {} static inline int coresight_enable(struct coresight_device *csdev) { return -ENOSYS; } static inline void coresight_disable(struct coresight_device *csdev) {} static inline int coresight_timeout(void __iomem *addr, u32 offset, int position, int value) { return 1; } static inline int coresight_claim_device_unlocked(void __iomem *base) { return -EINVAL; } static inline int coresight_claim_device(void __iomem *base) { return -EINVAL; } static inline void coresight_disclaim_device(void __iomem *base) {} static inline void coresight_disclaim_device_unlocked(void __iomem *base) {} #endif extern int coresight_get_cpu(struct device *dev); struct coresight_platform_data *coresight_get_platform_data(struct device *dev); #endif icmp.h 0000644 00000001214 14722070374 0005651 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Definitions for the ICMP protocol. * * Version: @(#)icmp.h 1.0.3 04/28/93 * * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> */ #ifndef _LINUX_ICMP_H #define _LINUX_ICMP_H #include <linux/skbuff.h> #include <uapi/linux/icmp.h> static inline struct icmphdr *icmp_hdr(const struct sk_buff *skb) { return (struct icmphdr *)skb_transport_header(skb); } #endif /* _LINUX_ICMP_H */ qed/qed_rdma_if.h 0000644 00000045271 14722070374 0007737 0 ustar 00 /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and /or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef _QED_RDMA_IF_H #define _QED_RDMA_IF_H #include <linux/types.h> #include <linux/delay.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/qed/qed_if.h> #include <linux/qed/qed_ll2_if.h> #include <linux/qed/rdma_common.h> #define QED_RDMA_MAX_CNQ_SIZE (0xFFFF) /* rdma interface */ enum qed_roce_qp_state { QED_ROCE_QP_STATE_RESET, QED_ROCE_QP_STATE_INIT, QED_ROCE_QP_STATE_RTR, QED_ROCE_QP_STATE_RTS, QED_ROCE_QP_STATE_SQD, QED_ROCE_QP_STATE_ERR, QED_ROCE_QP_STATE_SQE }; enum qed_rdma_tid_type { QED_RDMA_TID_REGISTERED_MR, QED_RDMA_TID_FMR, QED_RDMA_TID_MW }; struct qed_rdma_events { void *context; void (*affiliated_event)(void *context, u8 fw_event_code, void *fw_handle); void (*unaffiliated_event)(void *context, u8 event_code); }; struct qed_rdma_device { u32 vendor_id; u32 vendor_part_id; u32 hw_ver; u64 fw_ver; u64 node_guid; u64 sys_image_guid; u8 max_cnq; u8 max_sge; u8 max_srq_sge; u16 max_inline; u32 max_wqe; u32 max_srq_wqe; u8 max_qp_resp_rd_atomic_resc; u8 max_qp_req_rd_atomic_resc; u64 max_dev_resp_rd_atomic_resc; u32 max_cq; u32 max_qp; u32 max_srq; u32 max_mr; u64 max_mr_size; u32 max_cqe; u32 max_mw; u32 max_fmr; u32 max_mr_mw_fmr_pbl; u64 max_mr_mw_fmr_size; u32 max_pd; u32 max_ah; u8 max_pkey; u16 max_srq_wr; u8 max_stats_queues; u32 dev_caps; /* Abilty to support RNR-NAK generation */ #define QED_RDMA_DEV_CAP_RNR_NAK_MASK 0x1 #define QED_RDMA_DEV_CAP_RNR_NAK_SHIFT 0 /* Abilty to support shutdown port */ #define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_MASK 0x1 #define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_SHIFT 1 /* Abilty to support port active event */ #define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_MASK 0x1 #define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_SHIFT 2 /* Abilty to support port change event */ #define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_MASK 0x1 #define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_SHIFT 3 /* Abilty to support system image GUID */ #define QED_RDMA_DEV_CAP_SYS_IMAGE_MASK 0x1 #define QED_RDMA_DEV_CAP_SYS_IMAGE_SHIFT 4 /* Abilty to support bad P_Key counter support */ #define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_MASK 0x1 #define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_SHIFT 5 /* Abilty to support atomic operations */ #define QED_RDMA_DEV_CAP_ATOMIC_OP_MASK 0x1 #define QED_RDMA_DEV_CAP_ATOMIC_OP_SHIFT 6 #define QED_RDMA_DEV_CAP_RESIZE_CQ_MASK 0x1 #define QED_RDMA_DEV_CAP_RESIZE_CQ_SHIFT 7 /* Abilty to support modifying the maximum number of * outstanding work requests per QP */ #define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_MASK 0x1 #define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_SHIFT 8 /* Abilty to support automatic path migration */ #define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_MASK 0x1 #define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_SHIFT 9 /* Abilty to support the base memory management extensions */ #define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_MASK 0x1 #define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_SHIFT 10 #define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_MASK 0x1 #define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_SHIFT 11 /* Abilty to support multipile page sizes per memory region */ #define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_MASK 0x1 #define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_SHIFT 12 /* Abilty to support block list physical buffer list */ #define QED_RDMA_DEV_CAP_BLOCK_MODE_MASK 0x1 #define QED_RDMA_DEV_CAP_BLOCK_MODE_SHIFT 13 /* Abilty to support zero based virtual addresses */ #define QED_RDMA_DEV_CAP_ZBVA_MASK 0x1 #define QED_RDMA_DEV_CAP_ZBVA_SHIFT 14 /* Abilty to support local invalidate fencing */ #define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_MASK 0x1 #define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_SHIFT 15 /* Abilty to support Loopback on QP */ #define QED_RDMA_DEV_CAP_LB_INDICATOR_MASK 0x1 #define QED_RDMA_DEV_CAP_LB_INDICATOR_SHIFT 16 u64 page_size_caps; u8 dev_ack_delay; u32 reserved_lkey; u32 bad_pkey_counter; struct qed_rdma_events events; }; enum qed_port_state { QED_RDMA_PORT_UP, QED_RDMA_PORT_DOWN, }; enum qed_roce_capability { QED_ROCE_V1 = 1 << 0, QED_ROCE_V2 = 1 << 1, }; struct qed_rdma_port { enum qed_port_state port_state; int link_speed; u64 max_msg_size; u8 source_gid_table_len; void *source_gid_table_ptr; u8 pkey_table_len; void *pkey_table_ptr; u32 pkey_bad_counter; enum qed_roce_capability capability; }; struct qed_rdma_cnq_params { u8 num_pbl_pages; u64 pbl_ptr; }; /* The CQ Mode affects the CQ doorbell transaction size. * 64/32 bit machines should configure to 32/16 bits respectively. */ enum qed_rdma_cq_mode { QED_RDMA_CQ_MODE_16_BITS, QED_RDMA_CQ_MODE_32_BITS, }; struct qed_roce_dcqcn_params { u8 notification_point; u8 reaction_point; /* fields for notification point */ u32 cnp_send_timeout; /* fields for reaction point */ u32 rl_bc_rate; u16 rl_max_rate; u16 rl_r_ai; u16 rl_r_hai; u16 dcqcn_g; u32 dcqcn_k_us; u32 dcqcn_timeout_us; }; struct qed_rdma_start_in_params { struct qed_rdma_events *events; struct qed_rdma_cnq_params cnq_pbl_list[128]; u8 desired_cnq; enum qed_rdma_cq_mode cq_mode; struct qed_roce_dcqcn_params dcqcn_params; u16 max_mtu; u8 mac_addr[ETH_ALEN]; u8 iwarp_flags; }; struct qed_rdma_add_user_out_params { u16 dpi; void __iomem *dpi_addr; u64 dpi_phys_addr; u32 dpi_size; u16 wid_count; }; enum roce_mode { ROCE_V1, ROCE_V2_IPV4, ROCE_V2_IPV6, MAX_ROCE_MODE }; union qed_gid { u8 bytes[16]; u16 words[8]; u32 dwords[4]; u64 qwords[2]; u32 ipv4_addr; }; struct qed_rdma_register_tid_in_params { u32 itid; enum qed_rdma_tid_type tid_type; u8 key; u16 pd; bool local_read; bool local_write; bool remote_read; bool remote_write; bool remote_atomic; bool mw_bind; u64 pbl_ptr; bool pbl_two_level; u8 pbl_page_size_log; u8 page_size_log; u32 fbo; u64 length; u64 vaddr; bool zbva; bool phy_mr; bool dma_mr; bool dif_enabled; u64 dif_error_addr; }; struct qed_rdma_create_cq_in_params { u32 cq_handle_lo; u32 cq_handle_hi; u32 cq_size; u16 dpi; bool pbl_two_level; u64 pbl_ptr; u16 pbl_num_pages; u8 pbl_page_size_log; u8 cnq_id; u16 int_timeout; }; struct qed_rdma_create_srq_in_params { u64 pbl_base_addr; u64 prod_pair_addr; u16 num_pages; u16 pd_id; u16 page_size; }; struct qed_rdma_destroy_cq_in_params { u16 icid; }; struct qed_rdma_destroy_cq_out_params { u16 num_cq_notif; }; struct qed_rdma_create_qp_in_params { u32 qp_handle_lo; u32 qp_handle_hi; u32 qp_handle_async_lo; u32 qp_handle_async_hi; bool use_srq; bool signal_all; bool fmr_and_reserved_lkey; u16 pd; u16 dpi; u16 sq_cq_id; u16 sq_num_pages; u64 sq_pbl_ptr; u8 max_sq_sges; u16 rq_cq_id; u16 rq_num_pages; u64 rq_pbl_ptr; u16 srq_id; u8 stats_queue; }; struct qed_rdma_create_qp_out_params { u32 qp_id; u16 icid; void *rq_pbl_virt; dma_addr_t rq_pbl_phys; void *sq_pbl_virt; dma_addr_t sq_pbl_phys; }; struct qed_rdma_modify_qp_in_params { u32 modify_flags; #define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_MASK 0x1 #define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_SHIFT 0 #define QED_ROCE_MODIFY_QP_VALID_PKEY_MASK 0x1 #define QED_ROCE_MODIFY_QP_VALID_PKEY_SHIFT 1 #define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_MASK 0x1 #define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_SHIFT 2 #define QED_ROCE_MODIFY_QP_VALID_DEST_QP_MASK 0x1 #define QED_ROCE_MODIFY_QP_VALID_DEST_QP_SHIFT 3 #define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_MASK 0x1 #define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_SHIFT 4 #define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_MASK 0x1 #define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_SHIFT 5 #define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_MASK 0x1 #define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_SHIFT 6 #define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_MASK 0x1 #define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_SHIFT 7 #define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_MASK 0x1 #define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_SHIFT 8 #define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_MASK 0x1 #define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_SHIFT 9 #define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_MASK 0x1 #define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_SHIFT 10 #define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_MASK 0x1 #define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_SHIFT 11 #define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_MASK 0x1 #define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_SHIFT 12 #define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_MASK 0x1 #define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_SHIFT 13 #define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_MASK 0x1 #define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_SHIFT 14 enum qed_roce_qp_state new_state; u16 pkey; bool incoming_rdma_read_en; bool incoming_rdma_write_en; bool incoming_atomic_en; bool e2e_flow_control_en; u32 dest_qp; bool lb_indication; u16 mtu; u8 traffic_class_tos; u8 hop_limit_ttl; u32 flow_label; union qed_gid sgid; union qed_gid dgid; u16 udp_src_port; u16 vlan_id; u32 rq_psn; u32 sq_psn; u8 max_rd_atomic_resp; u8 max_rd_atomic_req; u32 ack_timeout; u8 retry_cnt; u8 rnr_retry_cnt; u8 min_rnr_nak_timer; bool sqd_async; u8 remote_mac_addr[6]; u8 local_mac_addr[6]; bool use_local_mac; enum roce_mode roce_mode; }; struct qed_rdma_query_qp_out_params { enum qed_roce_qp_state state; u32 rq_psn; u32 sq_psn; bool draining; u16 mtu; u32 dest_qp; bool incoming_rdma_read_en; bool incoming_rdma_write_en; bool incoming_atomic_en; bool e2e_flow_control_en; union qed_gid sgid; union qed_gid dgid; u32 flow_label; u8 hop_limit_ttl; u8 traffic_class_tos; u32 timeout; u8 rnr_retry; u8 retry_cnt; u8 min_rnr_nak_timer; u16 pkey_index; u8 max_rd_atomic; u8 max_dest_rd_atomic; bool sqd_async; }; struct qed_rdma_create_srq_out_params { u16 srq_id; }; struct qed_rdma_destroy_srq_in_params { u16 srq_id; }; struct qed_rdma_modify_srq_in_params { u32 wqe_limit; u16 srq_id; }; struct qed_rdma_stats_out_params { u64 sent_bytes; u64 sent_pkts; u64 rcv_bytes; u64 rcv_pkts; }; struct qed_rdma_counters_out_params { u64 pd_count; u64 max_pd; u64 dpi_count; u64 max_dpi; u64 cq_count; u64 max_cq; u64 qp_count; u64 max_qp; u64 tid_count; u64 max_tid; }; #define QED_ROCE_TX_HEAD_FAILURE (1) #define QED_ROCE_TX_FRAG_FAILURE (2) enum qed_iwarp_event_type { QED_IWARP_EVENT_MPA_REQUEST, /* Passive side request received */ QED_IWARP_EVENT_PASSIVE_COMPLETE, /* ack on mpa response */ QED_IWARP_EVENT_ACTIVE_COMPLETE, /* Active side reply received */ QED_IWARP_EVENT_DISCONNECT, QED_IWARP_EVENT_CLOSE, QED_IWARP_EVENT_IRQ_FULL, QED_IWARP_EVENT_RQ_EMPTY, QED_IWARP_EVENT_LLP_TIMEOUT, QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR, QED_IWARP_EVENT_CQ_OVERFLOW, QED_IWARP_EVENT_QP_CATASTROPHIC, QED_IWARP_EVENT_ACTIVE_MPA_REPLY, QED_IWARP_EVENT_LOCAL_ACCESS_ERROR, QED_IWARP_EVENT_REMOTE_OPERATION_ERROR, QED_IWARP_EVENT_TERMINATE_RECEIVED, QED_IWARP_EVENT_SRQ_LIMIT, QED_IWARP_EVENT_SRQ_EMPTY, }; enum qed_tcp_ip_version { QED_TCP_IPV4, QED_TCP_IPV6, }; struct qed_iwarp_cm_info { enum qed_tcp_ip_version ip_version; u32 remote_ip[4]; u32 local_ip[4]; u16 remote_port; u16 local_port; u16 vlan; u8 ord; u8 ird; u16 private_data_len; const void *private_data; }; struct qed_iwarp_cm_event_params { enum qed_iwarp_event_type event; const struct qed_iwarp_cm_info *cm_info; void *ep_context; /* To be passed to accept call */ int status; }; typedef int (*iwarp_event_handler) (void *context, struct qed_iwarp_cm_event_params *event); struct qed_iwarp_connect_in { iwarp_event_handler event_cb; void *cb_context; struct qed_rdma_qp *qp; struct qed_iwarp_cm_info cm_info; u16 mss; u8 remote_mac_addr[ETH_ALEN]; u8 local_mac_addr[ETH_ALEN]; }; struct qed_iwarp_connect_out { void *ep_context; }; struct qed_iwarp_listen_in { iwarp_event_handler event_cb; void *cb_context; /* passed to event_cb */ u32 max_backlog; enum qed_tcp_ip_version ip_version; u32 ip_addr[4]; u16 port; u16 vlan; }; struct qed_iwarp_listen_out { void *handle; }; struct qed_iwarp_accept_in { void *ep_context; void *cb_context; struct qed_rdma_qp *qp; const void *private_data; u16 private_data_len; u8 ord; u8 ird; }; struct qed_iwarp_reject_in { void *ep_context; void *cb_context; const void *private_data; u16 private_data_len; }; struct qed_iwarp_send_rtr_in { void *ep_context; }; struct qed_roce_ll2_header { void *vaddr; dma_addr_t baddr; size_t len; }; struct qed_roce_ll2_buffer { dma_addr_t baddr; size_t len; }; struct qed_roce_ll2_packet { struct qed_roce_ll2_header header; int n_seg; struct qed_roce_ll2_buffer payload[RDMA_MAX_SGE_PER_SQ_WQE]; int roce_mode; enum qed_ll2_tx_dest tx_dest; }; enum qed_rdma_type { QED_RDMA_TYPE_ROCE, QED_RDMA_TYPE_IWARP }; struct qed_dev_rdma_info { struct qed_dev_info common; enum qed_rdma_type rdma_type; u8 user_dpm_enabled; }; struct qed_rdma_ops { const struct qed_common_ops *common; int (*fill_dev_info)(struct qed_dev *cdev, struct qed_dev_rdma_info *info); void *(*rdma_get_rdma_ctx)(struct qed_dev *cdev); int (*rdma_init)(struct qed_dev *dev, struct qed_rdma_start_in_params *iparams); int (*rdma_add_user)(void *rdma_cxt, struct qed_rdma_add_user_out_params *oparams); void (*rdma_remove_user)(void *rdma_cxt, u16 dpi); int (*rdma_stop)(void *rdma_cxt); struct qed_rdma_device* (*rdma_query_device)(void *rdma_cxt); struct qed_rdma_port* (*rdma_query_port)(void *rdma_cxt); int (*rdma_get_start_sb)(struct qed_dev *cdev); int (*rdma_get_min_cnq_msix)(struct qed_dev *cdev); void (*rdma_cnq_prod_update)(void *rdma_cxt, u8 cnq_index, u16 prod); int (*rdma_get_rdma_int)(struct qed_dev *cdev, struct qed_int_info *info); int (*rdma_set_rdma_int)(struct qed_dev *cdev, u16 cnt); int (*rdma_alloc_pd)(void *rdma_cxt, u16 *pd); void (*rdma_dealloc_pd)(void *rdma_cxt, u16 pd); int (*rdma_create_cq)(void *rdma_cxt, struct qed_rdma_create_cq_in_params *params, u16 *icid); int (*rdma_destroy_cq)(void *rdma_cxt, struct qed_rdma_destroy_cq_in_params *iparams, struct qed_rdma_destroy_cq_out_params *oparams); struct qed_rdma_qp * (*rdma_create_qp)(void *rdma_cxt, struct qed_rdma_create_qp_in_params *iparams, struct qed_rdma_create_qp_out_params *oparams); int (*rdma_modify_qp)(void *roce_cxt, struct qed_rdma_qp *qp, struct qed_rdma_modify_qp_in_params *iparams); int (*rdma_query_qp)(void *rdma_cxt, struct qed_rdma_qp *qp, struct qed_rdma_query_qp_out_params *oparams); int (*rdma_destroy_qp)(void *rdma_cxt, struct qed_rdma_qp *qp); int (*rdma_register_tid)(void *rdma_cxt, struct qed_rdma_register_tid_in_params *iparams); int (*rdma_deregister_tid)(void *rdma_cxt, u32 itid); int (*rdma_alloc_tid)(void *rdma_cxt, u32 *itid); void (*rdma_free_tid)(void *rdma_cxt, u32 itid); int (*rdma_create_srq)(void *rdma_cxt, struct qed_rdma_create_srq_in_params *iparams, struct qed_rdma_create_srq_out_params *oparams); int (*rdma_destroy_srq)(void *rdma_cxt, struct qed_rdma_destroy_srq_in_params *iparams); int (*rdma_modify_srq)(void *rdma_cxt, struct qed_rdma_modify_srq_in_params *iparams); int (*ll2_acquire_connection)(void *rdma_cxt, struct qed_ll2_acquire_data *data); int (*ll2_establish_connection)(void *rdma_cxt, u8 connection_handle); int (*ll2_terminate_connection)(void *rdma_cxt, u8 connection_handle); void (*ll2_release_connection)(void *rdma_cxt, u8 connection_handle); int (*ll2_prepare_tx_packet)(void *rdma_cxt, u8 connection_handle, struct qed_ll2_tx_pkt_info *pkt, bool notify_fw); int (*ll2_set_fragment_of_tx_packet)(void *rdma_cxt, u8 connection_handle, dma_addr_t addr, u16 nbytes); int (*ll2_post_rx_buffer)(void *rdma_cxt, u8 connection_handle, dma_addr_t addr, u16 buf_len, void *cookie, u8 notify_fw); int (*ll2_get_stats)(void *rdma_cxt, u8 connection_handle, struct qed_ll2_stats *p_stats); int (*ll2_set_mac_filter)(struct qed_dev *cdev, u8 *old_mac_address, u8 *new_mac_address); int (*iwarp_set_engine_affin)(struct qed_dev *cdev, bool b_reset); int (*iwarp_connect)(void *rdma_cxt, struct qed_iwarp_connect_in *iparams, struct qed_iwarp_connect_out *oparams); int (*iwarp_create_listen)(void *rdma_cxt, struct qed_iwarp_listen_in *iparams, struct qed_iwarp_listen_out *oparams); int (*iwarp_accept)(void *rdma_cxt, struct qed_iwarp_accept_in *iparams); int (*iwarp_reject)(void *rdma_cxt, struct qed_iwarp_reject_in *iparams); int (*iwarp_destroy_listen)(void *rdma_cxt, void *handle); int (*iwarp_send_rtr)(void *rdma_cxt, struct qed_iwarp_send_rtr_in *iparams); }; const struct qed_rdma_ops *qed_get_rdma_ops(void); #endif qed/eth_common.h 0000644 00000034253 14722070374 0007633 0 ustar 00 /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and /or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef __ETH_COMMON__ #define __ETH_COMMON__ /********************/ /* ETH FW CONSTANTS */ /********************/ #define ETH_HSI_VER_MAJOR 3 #define ETH_HSI_VER_MINOR 10 #define ETH_HSI_VER_NO_PKT_LEN_TUNN 5 #define ETH_CACHE_LINE_SIZE 64 #define ETH_RX_CQE_GAP 32 #define ETH_MAX_RAMROD_PER_CON 8 #define ETH_TX_BD_PAGE_SIZE_BYTES 4096 #define ETH_RX_BD_PAGE_SIZE_BYTES 4096 #define ETH_RX_CQE_PAGE_SIZE_BYTES 4096 #define ETH_RX_NUM_NEXT_PAGE_BDS 2 #define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET 253 #define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET 251 #define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1 #define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18 #define ETH_TX_MAX_BDS_PER_LSO_PACKET 255 #define ETH_TX_MAX_LSO_HDR_NBD 4 #define ETH_TX_MIN_BDS_PER_LSO_PKT 3 #define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3 #define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2 #define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2 #define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 4 + 12 + 8)) #define ETH_TX_MAX_LSO_HDR_BYTES 510 #define ETH_TX_LSO_WINDOW_BDS_NUM (18 - 1) #define ETH_TX_LSO_WINDOW_MIN_LEN 9700 #define ETH_TX_MAX_LSO_PAYLOAD_LEN 0xFE000 #define ETH_TX_NUM_SAME_AS_LAST_ENTRIES 320 #define ETH_TX_INACTIVE_SAME_AS_LAST 0xFFFF #define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS #define ETH_NUM_STATISTIC_COUNTERS_DOUBLE_VF_ZONE \ (ETH_NUM_STATISTIC_COUNTERS - MAX_NUM_VFS / 2) #define ETH_NUM_STATISTIC_COUNTERS_QUAD_VF_ZONE \ (ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4) /* Maximum number of buffers, used for RX packet placement */ #define ETH_RX_MAX_BUFF_PER_PKT 5 #define ETH_RX_BD_THRESHOLD 12 /* Num of MAC/VLAN filters */ #define ETH_NUM_MAC_FILTERS 512 #define ETH_NUM_VLAN_FILTERS 512 /* Approx. multicast constants */ #define ETH_MULTICAST_BIN_FROM_MAC_SEED 0 #define ETH_MULTICAST_MAC_BINS 256 #define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32) /* Ethernet vport update constants */ #define ETH_FILTER_RULES_COUNT 10 #define ETH_RSS_IND_TABLE_ENTRIES_NUM 128 #define ETH_RSS_KEY_SIZE_REGS 10 #define ETH_RSS_ENGINE_NUM_K2 207 #define ETH_RSS_ENGINE_NUM_BB 127 /* TPA constants */ #define ETH_TPA_MAX_AGGS_NUM 64 #define ETH_TPA_CQE_START_LEN_LIST_SIZE ETH_RX_MAX_BUFF_PER_PKT #define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6 #define ETH_TPA_CQE_END_LEN_LIST_SIZE 4 /* Control frame check constants */ #define ETH_CTL_FRAME_ETH_TYPE_NUM 4 /* GFS constants */ #define ETH_GFT_TRASHCAN_VPORT 0x1FF /* GFT drop flow vport number */ /* Destination port mode */ enum dest_port_mode { DEST_PORT_PHY, DEST_PORT_LOOPBACK, DEST_PORT_PHY_LOOPBACK, DEST_PORT_DROP, MAX_DEST_PORT_MODE }; /* Ethernet address type */ enum eth_addr_type { BROADCAST_ADDRESS, MULTICAST_ADDRESS, UNICAST_ADDRESS, UNKNOWN_ADDRESS, MAX_ETH_ADDR_TYPE }; struct eth_tx_1st_bd_flags { u8 bitfields; #define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1 #define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 0 #define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1 #define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1 #define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1 #define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 2 #define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1 #define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 3 #define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1 #define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 4 #define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1 #define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 5 #define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1 #define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6 #define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1 #define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7 }; /* The parsing information data fo rthe first tx bd of a given packet */ struct eth_tx_data_1st_bd { __le16 vlan; u8 nbds; struct eth_tx_1st_bd_flags bd_flags; __le16 bitfields; #define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1 #define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0 #define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1 #define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1 #define ETH_TX_DATA_1ST_BD_PKT_LEN_MASK 0x3FFF #define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT 2 }; /* The parsing information data for the second tx bd of a given packet */ struct eth_tx_data_2nd_bd { __le16 tunn_ip_size; __le16 bitfields1; #define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF #define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0 #define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3 #define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4 #define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3 #define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6 #define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1 #define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8 #define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3 #define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 9 #define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1 #define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 11 #define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1 #define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 12 #define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1 #define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 13 #define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1 #define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 14 #define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1 #define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 15 __le16 bitfields2; #define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF #define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0 #define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7 #define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13 }; /* Firmware data for L2-EDPM packet */ struct eth_edpm_fw_data { struct eth_tx_data_1st_bd data_1st_bd; struct eth_tx_data_2nd_bd data_2nd_bd; __le32 reserved; }; /* Tunneling parsing flags */ struct eth_tunnel_parsing_flags { u8 flags; #define ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3 #define ETH_TUNNEL_PARSING_FLAGS_TYPE_SHIFT 0 #define ETH_TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_MASK 0x1 #define ETH_TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_SHIFT 2 #define ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK 0x3 #define ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT 3 #define ETH_TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_MASK 0x1 #define ETH_TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_SHIFT 5 #define ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK 0x1 #define ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT 6 #define ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_MASK 0x1 #define ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7 }; /* PMD flow control bits */ struct eth_pmd_flow_flags { u8 flags; #define ETH_PMD_FLOW_FLAGS_VALID_MASK 0x1 #define ETH_PMD_FLOW_FLAGS_VALID_SHIFT 0 #define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK 0x1 #define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT 1 #define ETH_PMD_FLOW_FLAGS_RESERVED_MASK 0x3F #define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2 }; /* Regular ETH Rx FP CQE */ struct eth_fast_path_rx_reg_cqe { u8 type; u8 bitfields; #define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7 #define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0 #define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF #define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3 #define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1 #define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7 __le16 pkt_len; struct parsing_and_err_flags pars_flags; __le16 vlan_tag; __le32 rss_hash; __le16 len_on_first_bd; u8 placement_offset; struct eth_tunnel_parsing_flags tunnel_pars_flags; u8 bd_num; u8 reserved; __le16 flow_id; u8 reserved1[11]; struct eth_pmd_flow_flags pmd_flags; }; /* TPA-continue ETH Rx FP CQE */ struct eth_fast_path_rx_tpa_cont_cqe { u8 type; u8 tpa_agg_index; __le16 len_list[ETH_TPA_CQE_CONT_LEN_LIST_SIZE]; u8 reserved; u8 reserved1; __le16 reserved2[ETH_TPA_CQE_CONT_LEN_LIST_SIZE]; u8 reserved3[3]; struct eth_pmd_flow_flags pmd_flags; }; /* TPA-end ETH Rx FP CQE */ struct eth_fast_path_rx_tpa_end_cqe { u8 type; u8 tpa_agg_index; __le16 total_packet_len; u8 num_of_bds; u8 end_reason; __le16 num_of_coalesced_segs; __le32 ts_delta; __le16 len_list[ETH_TPA_CQE_END_LEN_LIST_SIZE]; __le16 reserved3[ETH_TPA_CQE_END_LEN_LIST_SIZE]; __le16 reserved1; u8 reserved2; struct eth_pmd_flow_flags pmd_flags; }; /* TPA-start ETH Rx FP CQE */ struct eth_fast_path_rx_tpa_start_cqe { u8 type; u8 bitfields; #define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7 #define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0 #define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF #define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3 #define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1 #define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7 __le16 seg_len; struct parsing_and_err_flags pars_flags; __le16 vlan_tag; __le32 rss_hash; __le16 len_on_first_bd; u8 placement_offset; struct eth_tunnel_parsing_flags tunnel_pars_flags; u8 tpa_agg_index; u8 header_len; __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE]; __le16 flow_id; u8 reserved; struct eth_pmd_flow_flags pmd_flags; }; /* The L4 pseudo checksum mode for Ethernet */ enum eth_l4_pseudo_checksum_mode { ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH, ETH_L4_PSEUDO_CSUM_ZERO_LENGTH, MAX_ETH_L4_PSEUDO_CHECKSUM_MODE }; struct eth_rx_bd { struct regpair addr; }; /* Regular ETH Rx SP CQE */ struct eth_slow_path_rx_cqe { u8 type; u8 ramrod_cmd_id; u8 error_flag; u8 reserved[25]; __le16 echo; u8 reserved1; struct eth_pmd_flow_flags pmd_flags; }; /* Union for all ETH Rx CQE types */ union eth_rx_cqe { struct eth_fast_path_rx_reg_cqe fast_path_regular; struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start; struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont; struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end; struct eth_slow_path_rx_cqe slow_path; }; /* ETH Rx CQE type */ enum eth_rx_cqe_type { ETH_RX_CQE_TYPE_UNUSED, ETH_RX_CQE_TYPE_REGULAR, ETH_RX_CQE_TYPE_SLOW_PATH, ETH_RX_CQE_TYPE_TPA_START, ETH_RX_CQE_TYPE_TPA_CONT, ETH_RX_CQE_TYPE_TPA_END, MAX_ETH_RX_CQE_TYPE }; struct eth_rx_pmd_cqe { union eth_rx_cqe cqe; u8 reserved[ETH_RX_CQE_GAP]; }; enum eth_rx_tunn_type { ETH_RX_NO_TUNN, ETH_RX_TUNN_GENEVE, ETH_RX_TUNN_GRE, ETH_RX_TUNN_VXLAN, MAX_ETH_RX_TUNN_TYPE }; /* Aggregation end reason. */ enum eth_tpa_end_reason { ETH_AGG_END_UNUSED, ETH_AGG_END_SP_UPDATE, ETH_AGG_END_MAX_LEN, ETH_AGG_END_LAST_SEG, ETH_AGG_END_TIMEOUT, ETH_AGG_END_NOT_CONSISTENT, ETH_AGG_END_OUT_OF_ORDER, ETH_AGG_END_NON_TPA_SEG, MAX_ETH_TPA_END_REASON }; /* The first tx bd of a given packet */ struct eth_tx_1st_bd { struct regpair addr; __le16 nbytes; struct eth_tx_data_1st_bd data; }; /* The second tx bd of a given packet */ struct eth_tx_2nd_bd { struct regpair addr; __le16 nbytes; struct eth_tx_data_2nd_bd data; }; /* The parsing information data for the third tx bd of a given packet */ struct eth_tx_data_3rd_bd { __le16 lso_mss; __le16 bitfields; #define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF #define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0 #define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF #define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4 #define ETH_TX_DATA_3RD_BD_START_BD_MASK 0x1 #define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8 #define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F #define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9 u8 tunn_l4_hdr_start_offset_w; u8 tunn_hdr_size_w; }; /* The third tx bd of a given packet */ struct eth_tx_3rd_bd { struct regpair addr; __le16 nbytes; struct eth_tx_data_3rd_bd data; }; /* Complementary information for the regular tx bd of a given packet */ struct eth_tx_data_bd { __le16 reserved0; __le16 bitfields; #define ETH_TX_DATA_BD_RESERVED1_MASK 0xFF #define ETH_TX_DATA_BD_RESERVED1_SHIFT 0 #define ETH_TX_DATA_BD_START_BD_MASK 0x1 #define ETH_TX_DATA_BD_START_BD_SHIFT 8 #define ETH_TX_DATA_BD_RESERVED2_MASK 0x7F #define ETH_TX_DATA_BD_RESERVED2_SHIFT 9 __le16 reserved3; }; /* The common non-special TX BD ring element */ struct eth_tx_bd { struct regpair addr; __le16 nbytes; struct eth_tx_data_bd data; }; union eth_tx_bd_types { struct eth_tx_1st_bd first_bd; struct eth_tx_2nd_bd second_bd; struct eth_tx_3rd_bd third_bd; struct eth_tx_bd reg_bd; }; /* Mstorm Queue Zone */ enum eth_tx_tunn_type { ETH_TX_TUNN_GENEVE, ETH_TX_TUNN_TTAG, ETH_TX_TUNN_GRE, ETH_TX_TUNN_VXLAN, MAX_ETH_TX_TUNN_TYPE }; /* Ystorm Queue Zone */ struct xstorm_eth_queue_zone { struct coalescing_timeset int_coalescing_timeset; u8 reserved[7]; }; /* ETH doorbell data */ struct eth_db_data { u8 params; #define ETH_DB_DATA_DEST_MASK 0x3 #define ETH_DB_DATA_DEST_SHIFT 0 #define ETH_DB_DATA_AGG_CMD_MASK 0x3 #define ETH_DB_DATA_AGG_CMD_SHIFT 2 #define ETH_DB_DATA_BYPASS_EN_MASK 0x1 #define ETH_DB_DATA_BYPASS_EN_SHIFT 4 #define ETH_DB_DATA_RESERVED_MASK 0x1 #define ETH_DB_DATA_RESERVED_SHIFT 5 #define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3 #define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6 u8 agg_flags; __le16 bd_prod; }; /* RSS hash type */ enum rss_hash_type { RSS_HASH_TYPE_DEFAULT = 0, RSS_HASH_TYPE_IPV4 = 1, RSS_HASH_TYPE_TCP_IPV4 = 2, RSS_HASH_TYPE_IPV6 = 3, RSS_HASH_TYPE_TCP_IPV6 = 4, RSS_HASH_TYPE_UDP_IPV4 = 5, RSS_HASH_TYPE_UDP_IPV6 = 6, MAX_RSS_HASH_TYPE }; #endif /* __ETH_COMMON__ */ qed/qed_iov_if.h 0000644 00000004177 14722070374 0007611 0 ustar 00 /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and /or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef _QED_IOV_IF_H #define _QED_IOV_IF_H #include <linux/qed/qed_if.h> /* Structs used by PF to control and manipulate child VFs */ struct qed_iov_hv_ops { int (*configure)(struct qed_dev *cdev, int num_vfs_param); int (*set_mac) (struct qed_dev *cdev, u8 *mac, int vfid); int (*set_vlan) (struct qed_dev *cdev, u16 vid, int vfid); int (*get_config) (struct qed_dev *cdev, int vf_id, struct ifla_vf_info *ivi); int (*set_link_state) (struct qed_dev *cdev, int vf_id, int link_state); int (*set_spoof) (struct qed_dev *cdev, int vfid, bool val); int (*set_rate) (struct qed_dev *cdev, int vfid, u32 min_rate, u32 max_rate); int (*set_trust) (struct qed_dev *cdev, int vfid, bool trust); }; #endif qed/iwarp_common.h 0000644 00000004061 14722070374 0010167 0 ustar 00 /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and /or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef __IWARP_COMMON__ #define __IWARP_COMMON__ #include <linux/qed/rdma_common.h> /************************/ /* IWARP FW CONSTANTS */ /************************/ #define IWARP_ACTIVE_MODE 0 #define IWARP_PASSIVE_MODE 1 #define IWARP_SHARED_QUEUE_PAGE_SIZE (0x8000) #define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET (0x4000) #define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE (0x1000) #define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET (0x5000) #define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE (0x3000) #define IWARP_REQ_MAX_INLINE_DATA_SIZE (128) #define IWARP_REQ_MAX_SINGLE_SQ_WQE_SIZE (176) #define IWARP_MAX_QPS (64 * 1024) #endif /* __IWARP_COMMON__ */ qed/qede_rdma.h 0000644 00000006033 14722070374 0007417 0 ustar 00 /* QLogic qedr NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and /or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef QEDE_ROCE_H #define QEDE_ROCE_H #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/types.h> #include <linux/workqueue.h> struct qedr_dev; struct qed_dev; struct qede_dev; enum qede_rdma_event { QEDE_UP, QEDE_DOWN, QEDE_CHANGE_ADDR, QEDE_CLOSE }; struct qede_rdma_event_work { struct list_head list; struct work_struct work; void *ptr; enum qede_rdma_event event; }; struct qedr_driver { unsigned char name[32]; struct qedr_dev* (*add)(struct qed_dev *, struct pci_dev *, struct net_device *); void (*remove)(struct qedr_dev *); void (*notify)(struct qedr_dev *, enum qede_rdma_event); }; /* APIs for RDMA driver to register callback handlers, * which will be invoked when device is added, removed, ifup, ifdown */ int qede_rdma_register_driver(struct qedr_driver *drv); void qede_rdma_unregister_driver(struct qedr_driver *drv); bool qede_rdma_supported(struct qede_dev *dev); #if IS_ENABLED(CONFIG_QED_RDMA) int qede_rdma_dev_add(struct qede_dev *dev, bool recovery); void qede_rdma_dev_event_open(struct qede_dev *dev); void qede_rdma_dev_event_close(struct qede_dev *dev); void qede_rdma_dev_remove(struct qede_dev *dev, bool recovery); void qede_rdma_event_changeaddr(struct qede_dev *edr); #else static inline int qede_rdma_dev_add(struct qede_dev *dev, bool recovery) { return 0; } static inline void qede_rdma_dev_event_open(struct qede_dev *dev) {} static inline void qede_rdma_dev_event_close(struct qede_dev *dev) {} static inline void qede_rdma_dev_remove(struct qede_dev *dev, bool recovery) {} static inline void qede_rdma_event_changeaddr(struct qede_dev *edr) {} #endif #endif qed/common_hsi.h 0000644 00000135075 14722070374 0007642 0 ustar 00 /* QLogic qed NIC Driver * Copyright (c) 2015-2016 QLogic Corporation * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and /or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef _COMMON_HSI_H #define _COMMON_HSI_H #include <linux/types.h> #include <asm/byteorder.h> #include <linux/bitops.h> #include <linux/slab.h> /* dma_addr_t manip */ #define PTR_LO(x) ((u32)(((uintptr_t)(x)) & 0xffffffff)) #define PTR_HI(x) ((u32)((((uintptr_t)(x)) >> 16) >> 16)) #define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x)) #define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x)) #define DMA_REGPAIR_LE(x, val) do { \ (x).hi = DMA_HI_LE((val)); \ (x).lo = DMA_LO_LE((val)); \ } while (0) #define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo)) #define HILO_64(hi, lo) \ HILO_GEN(le32_to_cpu(hi), le32_to_cpu(lo), u64) #define HILO_64_REGPAIR(regpair) ({ \ typeof(regpair) __regpair = (regpair); \ HILO_64(__regpair.hi, __regpair.lo); }) #define HILO_DMA_REGPAIR(regpair) ((dma_addr_t)HILO_64_REGPAIR(regpair)) #ifndef __COMMON_HSI__ #define __COMMON_HSI__ /********************************/ /* PROTOCOL COMMON FW CONSTANTS */ /********************************/ #define X_FINAL_CLEANUP_AGG_INT 1 #define EVENT_RING_PAGE_SIZE_BYTES 4096 #define NUM_OF_GLOBAL_QUEUES 128 #define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE 64 #define ISCSI_CDU_TASK_SEG_TYPE 0 #define FCOE_CDU_TASK_SEG_TYPE 0 #define RDMA_CDU_TASK_SEG_TYPE 1 #define FW_ASSERT_GENERAL_ATTN_IDX 32 #define MAX_PINNED_CCFC 32 /* Queue Zone sizes in bytes */ #define TSTORM_QZONE_SIZE 8 #define MSTORM_QZONE_SIZE 16 #define USTORM_QZONE_SIZE 8 #define XSTORM_QZONE_SIZE 8 #define YSTORM_QZONE_SIZE 0 #define PSTORM_QZONE_SIZE 0 #define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG 7 #define ETH_MAX_NUM_RX_QUEUES_PER_VF_DEFAULT 16 #define ETH_MAX_NUM_RX_QUEUES_PER_VF_DOUBLE 48 #define ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD 112 /********************************/ /* CORE (LIGHT L2) FW CONSTANTS */ /********************************/ #define CORE_LL2_MAX_RAMROD_PER_CON 8 #define CORE_LL2_TX_BD_PAGE_SIZE_BYTES 4096 #define CORE_LL2_RX_BD_PAGE_SIZE_BYTES 4096 #define CORE_LL2_RX_CQE_PAGE_SIZE_BYTES 4096 #define CORE_LL2_RX_NUM_NEXT_PAGE_BDS 1 #define CORE_LL2_TX_MAX_BDS_PER_PACKET 12 #define CORE_SPQE_PAGE_SIZE_BYTES 4096 #define MAX_NUM_LL2_RX_QUEUES 48 #define MAX_NUM_LL2_TX_STATS_COUNTERS 48 #define FW_MAJOR_VERSION 8 #define FW_MINOR_VERSION 37 #define FW_REVISION_VERSION 7 #define FW_ENGINEERING_VERSION 0 /***********************/ /* COMMON HW CONSTANTS */ /***********************/ /* PCI functions */ #define MAX_NUM_PORTS_K2 (4) #define MAX_NUM_PORTS_BB (2) #define MAX_NUM_PORTS (MAX_NUM_PORTS_K2) #define MAX_NUM_PFS_K2 (16) #define MAX_NUM_PFS_BB (8) #define MAX_NUM_PFS (MAX_NUM_PFS_K2) #define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */ #define MAX_NUM_VFS_K2 (192) #define MAX_NUM_VFS_BB (120) #define MAX_NUM_VFS (MAX_NUM_VFS_K2) #define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB) #define MAX_NUM_FUNCTIONS (MAX_NUM_PFS + MAX_NUM_VFS) #define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB) #define MAX_FUNCTION_NUMBER (MAX_NUM_PFS + MAX_NUM_VFS) #define MAX_NUM_VPORTS_K2 (208) #define MAX_NUM_VPORTS_BB (160) #define MAX_NUM_VPORTS (MAX_NUM_VPORTS_K2) #define MAX_NUM_L2_QUEUES_K2 (320) #define MAX_NUM_L2_QUEUES_BB (256) #define MAX_NUM_L2_QUEUES (MAX_NUM_L2_QUEUES_K2) /* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */ #define NUM_PHYS_TCS_4PORT_K2 (4) #define NUM_OF_PHYS_TCS (8) #define PURE_LB_TC NUM_OF_PHYS_TCS #define NUM_TCS_4PORT_K2 (NUM_PHYS_TCS_4PORT_K2 + 1) #define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1) /* CIDs */ #define NUM_OF_CONNECTION_TYPES_E4 (8) #define NUM_OF_LCIDS (320) #define NUM_OF_LTIDS (320) /* Global PXP windows (GTT) */ #define NUM_OF_GTT 19 #define GTT_DWORD_SIZE_BITS 10 #define GTT_BYTE_SIZE_BITS (GTT_DWORD_SIZE_BITS + 2) #define GTT_DWORD_SIZE BIT(GTT_DWORD_SIZE_BITS) /* Tools Version */ #define TOOLS_VERSION 10 /*****************/ /* CDU CONSTANTS */ /*****************/ #define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17) #define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff) #define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (12) #define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0xfff) #define CDU_CONTEXT_VALIDATION_CFG_ENABLE_SHIFT (0) #define CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT (1) #define CDU_CONTEXT_VALIDATION_CFG_USE_TYPE (2) #define CDU_CONTEXT_VALIDATION_CFG_USE_REGION (3) #define CDU_CONTEXT_VALIDATION_CFG_USE_CID (4) #define CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE (5) /*****************/ /* DQ CONSTANTS */ /*****************/ /* DEMS */ #define DQ_DEMS_LEGACY 0 #define DQ_DEMS_TOE_MORE_TO_SEND 3 #define DQ_DEMS_TOE_LOCAL_ADV_WND 4 #define DQ_DEMS_ROCE_CQ_CONS 7 /* XCM agg val selection (HW) */ #define DQ_XCM_AGG_VAL_SEL_WORD2 0 #define DQ_XCM_AGG_VAL_SEL_WORD3 1 #define DQ_XCM_AGG_VAL_SEL_WORD4 2 #define DQ_XCM_AGG_VAL_SEL_WORD5 3 #define DQ_XCM_AGG_VAL_SEL_REG3 4 #define DQ_XCM_AGG_VAL_SEL_REG4 5 #define DQ_XCM_AGG_VAL_SEL_REG5 6 #define DQ_XCM_AGG_VAL_SEL_REG6 7 /* XCM agg val selection (FW) */ #define DQ_XCM_CORE_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 #define DQ_XCM_CORE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 #define DQ_XCM_CORE_SPQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 #define DQ_XCM_ETH_EDPM_NUM_BDS_CMD DQ_XCM_AGG_VAL_SEL_WORD2 #define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 #define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 #define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5 #define DQ_XCM_FCOE_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 #define DQ_XCM_FCOE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 #define DQ_XCM_FCOE_X_FERQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD5 #define DQ_XCM_ISCSI_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 #define DQ_XCM_ISCSI_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 #define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3 #define DQ_XCM_ISCSI_EXP_STAT_SN_CMD DQ_XCM_AGG_VAL_SEL_REG6 #define DQ_XCM_ROCE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 #define DQ_XCM_TOE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 #define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3 #define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4 /* UCM agg val selection (HW) */ #define DQ_UCM_AGG_VAL_SEL_WORD0 0 #define DQ_UCM_AGG_VAL_SEL_WORD1 1 #define DQ_UCM_AGG_VAL_SEL_WORD2 2 #define DQ_UCM_AGG_VAL_SEL_WORD3 3 #define DQ_UCM_AGG_VAL_SEL_REG0 4 #define DQ_UCM_AGG_VAL_SEL_REG1 5 #define DQ_UCM_AGG_VAL_SEL_REG2 6 #define DQ_UCM_AGG_VAL_SEL_REG3 7 /* UCM agg val selection (FW) */ #define DQ_UCM_ETH_PMD_TX_CONS_CMD DQ_UCM_AGG_VAL_SEL_WORD2 #define DQ_UCM_ETH_PMD_RX_CONS_CMD DQ_UCM_AGG_VAL_SEL_WORD3 #define DQ_UCM_ROCE_CQ_CONS_CMD DQ_UCM_AGG_VAL_SEL_REG0 #define DQ_UCM_ROCE_CQ_PROD_CMD DQ_UCM_AGG_VAL_SEL_REG2 /* TCM agg val selection (HW) */ #define DQ_TCM_AGG_VAL_SEL_WORD0 0 #define DQ_TCM_AGG_VAL_SEL_WORD1 1 #define DQ_TCM_AGG_VAL_SEL_WORD2 2 #define DQ_TCM_AGG_VAL_SEL_WORD3 3 #define DQ_TCM_AGG_VAL_SEL_REG1 4 #define DQ_TCM_AGG_VAL_SEL_REG2 5 #define DQ_TCM_AGG_VAL_SEL_REG6 6 #define DQ_TCM_AGG_VAL_SEL_REG9 7 /* TCM agg val selection (FW) */ #define DQ_TCM_L2B_BD_PROD_CMD \ DQ_TCM_AGG_VAL_SEL_WORD1 #define DQ_TCM_ROCE_RQ_PROD_CMD \ DQ_TCM_AGG_VAL_SEL_WORD0 /* XCM agg counter flag selection (HW) */ #define DQ_XCM_AGG_FLG_SHIFT_BIT14 0 #define DQ_XCM_AGG_FLG_SHIFT_BIT15 1 #define DQ_XCM_AGG_FLG_SHIFT_CF12 2 #define DQ_XCM_AGG_FLG_SHIFT_CF13 3 #define DQ_XCM_AGG_FLG_SHIFT_CF18 4 #define DQ_XCM_AGG_FLG_SHIFT_CF19 5 #define DQ_XCM_AGG_FLG_SHIFT_CF22 6 #define DQ_XCM_AGG_FLG_SHIFT_CF23 7 /* XCM agg counter flag selection (FW) */ #define DQ_XCM_CORE_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18) #define DQ_XCM_CORE_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) #define DQ_XCM_CORE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) #define DQ_XCM_ETH_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18) #define DQ_XCM_ETH_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) #define DQ_XCM_ETH_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) #define DQ_XCM_ETH_TPH_EN_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23) #define DQ_XCM_FCOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) #define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) #define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) #define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23) #define DQ_XCM_TOE_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) #define DQ_XCM_TOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) /* UCM agg counter flag selection (HW) */ #define DQ_UCM_AGG_FLG_SHIFT_CF0 0 #define DQ_UCM_AGG_FLG_SHIFT_CF1 1 #define DQ_UCM_AGG_FLG_SHIFT_CF3 2 #define DQ_UCM_AGG_FLG_SHIFT_CF4 3 #define DQ_UCM_AGG_FLG_SHIFT_CF5 4 #define DQ_UCM_AGG_FLG_SHIFT_CF6 5 #define DQ_UCM_AGG_FLG_SHIFT_RULE0EN 6 #define DQ_UCM_AGG_FLG_SHIFT_RULE1EN 7 /* UCM agg counter flag selection (FW) */ #define DQ_UCM_ETH_PMD_TX_ARM_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF4) #define DQ_UCM_ETH_PMD_RX_ARM_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5) #define DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF4) #define DQ_UCM_ROCE_CQ_ARM_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5) #define DQ_UCM_TOE_TIMER_STOP_ALL_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF3) #define DQ_UCM_TOE_SLOW_PATH_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF4) #define DQ_UCM_TOE_DQ_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5) /* TCM agg counter flag selection (HW) */ #define DQ_TCM_AGG_FLG_SHIFT_CF0 0 #define DQ_TCM_AGG_FLG_SHIFT_CF1 1 #define DQ_TCM_AGG_FLG_SHIFT_CF2 2 #define DQ_TCM_AGG_FLG_SHIFT_CF3 3 #define DQ_TCM_AGG_FLG_SHIFT_CF4 4 #define DQ_TCM_AGG_FLG_SHIFT_CF5 5 #define DQ_TCM_AGG_FLG_SHIFT_CF6 6 #define DQ_TCM_AGG_FLG_SHIFT_CF7 7 /* TCM agg counter flag selection (FW) */ #define DQ_TCM_FCOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) #define DQ_TCM_FCOE_DUMMY_TIMER_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF2) #define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3) #define DQ_TCM_ISCSI_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) #define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3) #define DQ_TCM_TOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) #define DQ_TCM_TOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3) #define DQ_TCM_IWARP_POST_RQ_CF_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) /* PWM address mapping */ #define DQ_PWM_OFFSET_DPM_BASE 0x0 #define DQ_PWM_OFFSET_DPM_END 0x27 #define DQ_PWM_OFFSET_XCM16_BASE 0x40 #define DQ_PWM_OFFSET_XCM32_BASE 0x44 #define DQ_PWM_OFFSET_UCM16_BASE 0x48 #define DQ_PWM_OFFSET_UCM32_BASE 0x4C #define DQ_PWM_OFFSET_UCM16_4 0x50 #define DQ_PWM_OFFSET_TCM16_BASE 0x58 #define DQ_PWM_OFFSET_TCM32_BASE 0x5C #define DQ_PWM_OFFSET_XCM_FLAGS 0x68 #define DQ_PWM_OFFSET_UCM_FLAGS 0x69 #define DQ_PWM_OFFSET_TCM_FLAGS 0x6B #define DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD (DQ_PWM_OFFSET_XCM16_BASE + 2) #define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT (DQ_PWM_OFFSET_UCM32_BASE) #define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_16BIT (DQ_PWM_OFFSET_UCM16_4) #define DQ_PWM_OFFSET_UCM_RDMA_INT_TIMEOUT (DQ_PWM_OFFSET_UCM16_BASE + 2) #define DQ_PWM_OFFSET_UCM_RDMA_ARM_FLAGS (DQ_PWM_OFFSET_UCM_FLAGS) #define DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 1) #define DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 3) #define DQ_REGION_SHIFT (12) /* DPM */ #define DQ_DPM_WQE_BUFF_SIZE (320) /* Conn type ranges */ #define DQ_CONN_TYPE_RANGE_SHIFT (4) /*****************/ /* QM CONSTANTS */ /*****************/ /* Number of TX queues in the QM */ #define MAX_QM_TX_QUEUES_K2 512 #define MAX_QM_TX_QUEUES_BB 448 #define MAX_QM_TX_QUEUES MAX_QM_TX_QUEUES_K2 /* Number of Other queues in the QM */ #define MAX_QM_OTHER_QUEUES_BB 64 #define MAX_QM_OTHER_QUEUES_K2 128 #define MAX_QM_OTHER_QUEUES MAX_QM_OTHER_QUEUES_K2 /* Number of queues in a PF queue group */ #define QM_PF_QUEUE_GROUP_SIZE 8 /* The size of a single queue element in bytes */ #define QM_PQ_ELEMENT_SIZE 4 /* Base number of Tx PQs in the CM PQ representation. * Should be used when storing PQ IDs in CM PQ registers and context. */ #define CM_TX_PQ_BASE 0x200 /* Number of global Vport/QCN rate limiters */ #define MAX_QM_GLOBAL_RLS 256 /* QM registers data */ #define QM_LINE_CRD_REG_WIDTH 16 #define QM_LINE_CRD_REG_SIGN_BIT BIT((QM_LINE_CRD_REG_WIDTH - 1)) #define QM_BYTE_CRD_REG_WIDTH 24 #define QM_BYTE_CRD_REG_SIGN_BIT BIT((QM_BYTE_CRD_REG_WIDTH - 1)) #define QM_WFQ_CRD_REG_WIDTH 32 #define QM_WFQ_CRD_REG_SIGN_BIT BIT((QM_WFQ_CRD_REG_WIDTH - 1)) #define QM_RL_CRD_REG_WIDTH 32 #define QM_RL_CRD_REG_SIGN_BIT BIT((QM_RL_CRD_REG_WIDTH - 1)) /*****************/ /* CAU CONSTANTS */ /*****************/ #define CAU_FSM_ETH_RX 0 #define CAU_FSM_ETH_TX 1 /* Number of Protocol Indices per Status Block */ #define PIS_PER_SB_E4 12 #define CAU_HC_STOPPED_STATE 3 #define CAU_HC_DISABLE_STATE 4 #define CAU_HC_ENABLE_STATE 0 /*****************/ /* IGU CONSTANTS */ /*****************/ #define MAX_SB_PER_PATH_K2 (368) #define MAX_SB_PER_PATH_BB (288) #define MAX_TOT_SB_PER_PATH \ MAX_SB_PER_PATH_K2 #define MAX_SB_PER_PF_MIMD 129 #define MAX_SB_PER_PF_SIMD 64 #define MAX_SB_PER_VF 64 /* Memory addresses on the BAR for the IGU Sub Block */ #define IGU_MEM_BASE 0x0000 #define IGU_MEM_MSIX_BASE 0x0000 #define IGU_MEM_MSIX_UPPER 0x0101 #define IGU_MEM_MSIX_RESERVED_UPPER 0x01ff #define IGU_MEM_PBA_MSIX_BASE 0x0200 #define IGU_MEM_PBA_MSIX_UPPER 0x0202 #define IGU_MEM_PBA_MSIX_RESERVED_UPPER 0x03ff #define IGU_CMD_INT_ACK_BASE 0x0400 #define IGU_CMD_INT_ACK_UPPER (IGU_CMD_INT_ACK_BASE + \ MAX_TOT_SB_PER_PATH - 1) #define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff #define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05f0 #define IGU_CMD_ATTN_BIT_SET_UPPER 0x05f1 #define IGU_CMD_ATTN_BIT_CLR_UPPER 0x05f2 #define IGU_REG_SISR_MDPC_WMASK_UPPER 0x05f3 #define IGU_REG_SISR_MDPC_WMASK_LSB_UPPER 0x05f4 #define IGU_REG_SISR_MDPC_WMASK_MSB_UPPER 0x05f5 #define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05f6 #define IGU_CMD_PROD_UPD_BASE 0x0600 #define IGU_CMD_PROD_UPD_UPPER (IGU_CMD_PROD_UPD_BASE +\ MAX_TOT_SB_PER_PATH - 1) #define IGU_CMD_PROD_UPD_RESERVED_UPPER 0x07ff /*****************/ /* PXP CONSTANTS */ /*****************/ /* Bars for Blocks */ #define PXP_BAR_GRC 0 #define PXP_BAR_TSDM 0 #define PXP_BAR_USDM 0 #define PXP_BAR_XSDM 0 #define PXP_BAR_MSDM 0 #define PXP_BAR_YSDM 0 #define PXP_BAR_PSDM 0 #define PXP_BAR_IGU 0 #define PXP_BAR_DQ 1 /* PTT and GTT */ #define PXP_PER_PF_ENTRY_SIZE 8 #define PXP_NUM_GLOBAL_WINDOWS 243 #define PXP_GLOBAL_ENTRY_SIZE 4 #define PXP_ADMIN_WINDOW_ALLOWED_LENGTH 4 #define PXP_PF_WINDOW_ADMIN_START 0 #define PXP_PF_WINDOW_ADMIN_LENGTH 0x1000 #define PXP_PF_WINDOW_ADMIN_END (PXP_PF_WINDOW_ADMIN_START + \ PXP_PF_WINDOW_ADMIN_LENGTH - 1) #define PXP_PF_WINDOW_ADMIN_PER_PF_START 0 #define PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH (PXP_NUM_PF_WINDOWS * \ PXP_PER_PF_ENTRY_SIZE) #define PXP_PF_WINDOW_ADMIN_PER_PF_END (PXP_PF_WINDOW_ADMIN_PER_PF_START + \ PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH - 1) #define PXP_PF_WINDOW_ADMIN_GLOBAL_START 0x200 #define PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH (PXP_NUM_GLOBAL_WINDOWS * \ PXP_GLOBAL_ENTRY_SIZE) #define PXP_PF_WINDOW_ADMIN_GLOBAL_END \ (PXP_PF_WINDOW_ADMIN_GLOBAL_START + \ PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH - 1) #define PXP_PF_GLOBAL_PRETEND_ADDR 0x1f0 #define PXP_PF_ME_OPAQUE_MASK_ADDR 0xf4 #define PXP_PF_ME_OPAQUE_ADDR 0x1f8 #define PXP_PF_ME_CONCRETE_ADDR 0x1fc #define PXP_NUM_PF_WINDOWS 12 #define PXP_EXTERNAL_BAR_PF_WINDOW_START 0x1000 #define PXP_EXTERNAL_BAR_PF_WINDOW_NUM PXP_NUM_PF_WINDOWS #define PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE 0x1000 #define PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH \ (PXP_EXTERNAL_BAR_PF_WINDOW_NUM * \ PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) #define PXP_EXTERNAL_BAR_PF_WINDOW_END \ (PXP_EXTERNAL_BAR_PF_WINDOW_START + \ PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH - 1) #define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START \ (PXP_EXTERNAL_BAR_PF_WINDOW_END + 1) #define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM PXP_NUM_GLOBAL_WINDOWS #define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE 0x1000 #define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH \ (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM * \ PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE) #define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_END \ (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + \ PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1) /* PF BAR */ #define PXP_BAR0_START_GRC 0x0000 #define PXP_BAR0_GRC_LENGTH 0x1C00000 #define PXP_BAR0_END_GRC (PXP_BAR0_START_GRC + \ PXP_BAR0_GRC_LENGTH - 1) #define PXP_BAR0_START_IGU 0x1C00000 #define PXP_BAR0_IGU_LENGTH 0x10000 #define PXP_BAR0_END_IGU (PXP_BAR0_START_IGU + \ PXP_BAR0_IGU_LENGTH - 1) #define PXP_BAR0_START_TSDM 0x1C80000 #define PXP_BAR0_SDM_LENGTH 0x40000 #define PXP_BAR0_SDM_RESERVED_LENGTH 0x40000 #define PXP_BAR0_END_TSDM (PXP_BAR0_START_TSDM + \ PXP_BAR0_SDM_LENGTH - 1) #define PXP_BAR0_START_MSDM 0x1D00000 #define PXP_BAR0_END_MSDM (PXP_BAR0_START_MSDM + \ PXP_BAR0_SDM_LENGTH - 1) #define PXP_BAR0_START_USDM 0x1D80000 #define PXP_BAR0_END_USDM (PXP_BAR0_START_USDM + \ PXP_BAR0_SDM_LENGTH - 1) #define PXP_BAR0_START_XSDM 0x1E00000 #define PXP_BAR0_END_XSDM (PXP_BAR0_START_XSDM + \ PXP_BAR0_SDM_LENGTH - 1) #define PXP_BAR0_START_YSDM 0x1E80000 #define PXP_BAR0_END_YSDM (PXP_BAR0_START_YSDM + \ PXP_BAR0_SDM_LENGTH - 1) #define PXP_BAR0_START_PSDM 0x1F00000 #define PXP_BAR0_END_PSDM (PXP_BAR0_START_PSDM + \ PXP_BAR0_SDM_LENGTH - 1) #define PXP_BAR0_FIRST_INVALID_ADDRESS (PXP_BAR0_END_PSDM + 1) /* VF BAR */ #define PXP_VF_BAR0 0 #define PXP_VF_BAR0_START_IGU 0 #define PXP_VF_BAR0_IGU_LENGTH 0x3000 #define PXP_VF_BAR0_END_IGU (PXP_VF_BAR0_START_IGU + \ PXP_VF_BAR0_IGU_LENGTH - 1) #define PXP_VF_BAR0_START_DQ 0x3000 #define PXP_VF_BAR0_DQ_LENGTH 0x200 #define PXP_VF_BAR0_DQ_OPAQUE_OFFSET 0 #define PXP_VF_BAR0_ME_OPAQUE_ADDRESS (PXP_VF_BAR0_START_DQ + \ PXP_VF_BAR0_DQ_OPAQUE_OFFSET) #define PXP_VF_BAR0_ME_CONCRETE_ADDRESS (PXP_VF_BAR0_ME_OPAQUE_ADDRESS \ + 4) #define PXP_VF_BAR0_END_DQ (PXP_VF_BAR0_START_DQ + \ PXP_VF_BAR0_DQ_LENGTH - 1) #define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200 #define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200 #define PXP_VF_BAR0_END_TSDM_ZONE_B (PXP_VF_BAR0_START_TSDM_ZONE_B + \ PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) #define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400 #define PXP_VF_BAR0_END_MSDM_ZONE_B (PXP_VF_BAR0_START_MSDM_ZONE_B + \ PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) #define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600 #define PXP_VF_BAR0_END_USDM_ZONE_B (PXP_VF_BAR0_START_USDM_ZONE_B + \ PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) #define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800 #define PXP_VF_BAR0_END_XSDM_ZONE_B (PXP_VF_BAR0_START_XSDM_ZONE_B + \ PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) #define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00 #define PXP_VF_BAR0_END_YSDM_ZONE_B (PXP_VF_BAR0_START_YSDM_ZONE_B + \ PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) #define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00 #define PXP_VF_BAR0_END_PSDM_ZONE_B (PXP_VF_BAR0_START_PSDM_ZONE_B + \ PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) #define PXP_VF_BAR0_START_GRC 0x3E00 #define PXP_VF_BAR0_GRC_LENGTH 0x200 #define PXP_VF_BAR0_END_GRC (PXP_VF_BAR0_START_GRC + \ PXP_VF_BAR0_GRC_LENGTH - 1) #define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000 #define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000 #define PXP_VF_BAR0_START_IGU2 0x10000 #define PXP_VF_BAR0_IGU2_LENGTH 0xD000 #define PXP_VF_BAR0_END_IGU2 (PXP_VF_BAR0_START_IGU2 + \ PXP_VF_BAR0_IGU2_LENGTH - 1) #define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32 #define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12 #define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024 /* ILT Records */ #define PXP_NUM_ILT_RECORDS_BB 7600 #define PXP_NUM_ILT_RECORDS_K2 11000 #define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2) /* Host Interface */ #define PXP_QUEUES_ZONE_MAX_NUM 320 /*****************/ /* PRM CONSTANTS */ /*****************/ #define PRM_DMA_PAD_BYTES_NUM 2 /*****************/ /* SDMs CONSTANTS */ /*****************/ #define SDM_OP_GEN_TRIG_NONE 0 #define SDM_OP_GEN_TRIG_WAKE_THREAD 1 #define SDM_OP_GEN_TRIG_AGG_INT 2 #define SDM_OP_GEN_TRIG_LOADER 4 #define SDM_OP_GEN_TRIG_INDICATE_ERROR 6 #define SDM_OP_GEN_TRIG_INC_ORDER_CNT 9 /********************/ /* Completion types */ /********************/ #define SDM_COMP_TYPE_NONE 0 #define SDM_COMP_TYPE_WAKE_THREAD 1 #define SDM_COMP_TYPE_AGG_INT 2 #define SDM_COMP_TYPE_CM 3 #define SDM_COMP_TYPE_LOADER 4 #define SDM_COMP_TYPE_PXP 5 #define SDM_COMP_TYPE_INDICATE_ERROR 6 #define SDM_COMP_TYPE_RELEASE_THREAD 7 #define SDM_COMP_TYPE_RAM 8 #define SDM_COMP_TYPE_INC_ORDER_CNT 9 /*****************/ /* PBF CONSTANTS */ /*****************/ /* Number of PBF command queue lines. Each line is 32B. */ #define PBF_MAX_CMD_LINES 3328 /* Number of BTB blocks. Each block is 256B. */ #define BTB_MAX_BLOCKS 1440 /*****************/ /* PRS CONSTANTS */ /*****************/ #define PRS_GFT_CAM_LINES_NO_MATCH 31 /* Interrupt coalescing TimeSet */ struct coalescing_timeset { u8 value; #define COALESCING_TIMESET_TIMESET_MASK 0x7F #define COALESCING_TIMESET_TIMESET_SHIFT 0 #define COALESCING_TIMESET_VALID_MASK 0x1 #define COALESCING_TIMESET_VALID_SHIFT 7 }; struct common_queue_zone { __le16 ring_drv_data_consumer; __le16 reserved; }; /* ETH Rx producers data */ struct eth_rx_prod_data { __le16 bd_prod; __le16 cqe_prod; }; struct tcp_ulp_connect_done_params { __le16 mss; u8 snd_wnd_scale; u8 flags; #define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_MASK 0x1 #define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_SHIFT 0 #define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_MASK 0x7F #define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_SHIFT 1 }; struct iscsi_connect_done_results { __le16 icid; __le16 conn_id; struct tcp_ulp_connect_done_params params; }; struct iscsi_eqe_data { __le16 icid; __le16 conn_id; __le16 reserved; u8 error_code; u8 error_pdu_opcode_reserved; #define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_MASK 0x3F #define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_SHIFT 0 #define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_VALID_MASK 0x1 #define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_VALID_SHIFT 6 #define ISCSI_EQE_DATA_RESERVED0_MASK 0x1 #define ISCSI_EQE_DATA_RESERVED0_SHIFT 7 }; /* Multi function mode */ enum mf_mode { ERROR_MODE /* Unsupported mode */, MF_OVLAN, MF_NPAR, MAX_MF_MODE }; /* Per-protocol connection types */ enum protocol_type { PROTOCOLID_ISCSI, PROTOCOLID_FCOE, PROTOCOLID_ROCE, PROTOCOLID_CORE, PROTOCOLID_ETH, PROTOCOLID_IWARP, PROTOCOLID_RESERVED0, PROTOCOLID_PREROCE, PROTOCOLID_COMMON, PROTOCOLID_RESERVED1, MAX_PROTOCOL_TYPE }; struct regpair { __le32 lo; __le32 hi; }; /* RoCE Destroy Event Data */ struct rdma_eqe_destroy_qp { __le32 cid; u8 reserved[4]; }; /* RDMA Event Data Union */ union rdma_eqe_data { struct regpair async_handle; struct rdma_eqe_destroy_qp rdma_destroy_qp_data; }; /* Ustorm Queue Zone */ struct ustorm_eth_queue_zone { struct coalescing_timeset int_coalescing_timeset; u8 reserved[3]; }; struct ustorm_queue_zone { struct ustorm_eth_queue_zone eth; struct common_queue_zone common; }; /* Status block structure */ struct cau_pi_entry { __le32 prod; #define CAU_PI_ENTRY_PROD_VAL_MASK 0xFFFF #define CAU_PI_ENTRY_PROD_VAL_SHIFT 0 #define CAU_PI_ENTRY_PI_TIMESET_MASK 0x7F #define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16 #define CAU_PI_ENTRY_FSM_SEL_MASK 0x1 #define CAU_PI_ENTRY_FSM_SEL_SHIFT 23 #define CAU_PI_ENTRY_RESERVED_MASK 0xFF #define CAU_PI_ENTRY_RESERVED_SHIFT 24 }; /* Status block structure */ struct cau_sb_entry { __le32 data; #define CAU_SB_ENTRY_SB_PROD_MASK 0xFFFFFF #define CAU_SB_ENTRY_SB_PROD_SHIFT 0 #define CAU_SB_ENTRY_STATE0_MASK 0xF #define CAU_SB_ENTRY_STATE0_SHIFT 24 #define CAU_SB_ENTRY_STATE1_MASK 0xF #define CAU_SB_ENTRY_STATE1_SHIFT 28 __le32 params; #define CAU_SB_ENTRY_SB_TIMESET0_MASK 0x7F #define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0 #define CAU_SB_ENTRY_SB_TIMESET1_MASK 0x7F #define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7 #define CAU_SB_ENTRY_TIMER_RES0_MASK 0x3 #define CAU_SB_ENTRY_TIMER_RES0_SHIFT 14 #define CAU_SB_ENTRY_TIMER_RES1_MASK 0x3 #define CAU_SB_ENTRY_TIMER_RES1_SHIFT 16 #define CAU_SB_ENTRY_VF_NUMBER_MASK 0xFF #define CAU_SB_ENTRY_VF_NUMBER_SHIFT 18 #define CAU_SB_ENTRY_VF_VALID_MASK 0x1 #define CAU_SB_ENTRY_VF_VALID_SHIFT 26 #define CAU_SB_ENTRY_PF_NUMBER_MASK 0xF #define CAU_SB_ENTRY_PF_NUMBER_SHIFT 27 #define CAU_SB_ENTRY_TPH_MASK 0x1 #define CAU_SB_ENTRY_TPH_SHIFT 31 }; /* Igu cleanup bit values to distinguish between clean or producer consumer * update. */ enum command_type_bit { IGU_COMMAND_TYPE_NOP = 0, IGU_COMMAND_TYPE_SET = 1, MAX_COMMAND_TYPE_BIT }; /* Core doorbell data */ struct core_db_data { u8 params; #define CORE_DB_DATA_DEST_MASK 0x3 #define CORE_DB_DATA_DEST_SHIFT 0 #define CORE_DB_DATA_AGG_CMD_MASK 0x3 #define CORE_DB_DATA_AGG_CMD_SHIFT 2 #define CORE_DB_DATA_BYPASS_EN_MASK 0x1 #define CORE_DB_DATA_BYPASS_EN_SHIFT 4 #define CORE_DB_DATA_RESERVED_MASK 0x1 #define CORE_DB_DATA_RESERVED_SHIFT 5 #define CORE_DB_DATA_AGG_VAL_SEL_MASK 0x3 #define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6 u8 agg_flags; __le16 spq_prod; }; /* Enum of doorbell aggregative command selection */ enum db_agg_cmd_sel { DB_AGG_CMD_NOP, DB_AGG_CMD_SET, DB_AGG_CMD_ADD, DB_AGG_CMD_MAX, MAX_DB_AGG_CMD_SEL }; /* Enum of doorbell destination */ enum db_dest { DB_DEST_XCM, DB_DEST_UCM, DB_DEST_TCM, DB_NUM_DESTINATIONS, MAX_DB_DEST }; /* Enum of doorbell DPM types */ enum db_dpm_type { DPM_LEGACY, DPM_RDMA, DPM_L2_INLINE, DPM_L2_BD, MAX_DB_DPM_TYPE }; /* Structure for doorbell data, in L2 DPM mode, for 1st db in a DPM burst */ struct db_l2_dpm_data { __le16 icid; __le16 bd_prod; __le32 params; #define DB_L2_DPM_DATA_SIZE_MASK 0x3F #define DB_L2_DPM_DATA_SIZE_SHIFT 0 #define DB_L2_DPM_DATA_DPM_TYPE_MASK 0x3 #define DB_L2_DPM_DATA_DPM_TYPE_SHIFT 6 #define DB_L2_DPM_DATA_NUM_BDS_MASK 0xFF #define DB_L2_DPM_DATA_NUM_BDS_SHIFT 8 #define DB_L2_DPM_DATA_PKT_SIZE_MASK 0x7FF #define DB_L2_DPM_DATA_PKT_SIZE_SHIFT 16 #define DB_L2_DPM_DATA_RESERVED0_MASK 0x1 #define DB_L2_DPM_DATA_RESERVED0_SHIFT 27 #define DB_L2_DPM_DATA_SGE_NUM_MASK 0x7 #define DB_L2_DPM_DATA_SGE_NUM_SHIFT 28 #define DB_L2_DPM_DATA_GFS_SRC_EN_MASK 0x1 #define DB_L2_DPM_DATA_GFS_SRC_EN_SHIFT 31 }; /* Structure for SGE in a DPM doorbell of type DPM_L2_BD */ struct db_l2_dpm_sge { struct regpair addr; __le16 nbytes; __le16 bitfields; #define DB_L2_DPM_SGE_TPH_ST_INDEX_MASK 0x1FF #define DB_L2_DPM_SGE_TPH_ST_INDEX_SHIFT 0 #define DB_L2_DPM_SGE_RESERVED0_MASK 0x3 #define DB_L2_DPM_SGE_RESERVED0_SHIFT 9 #define DB_L2_DPM_SGE_ST_VALID_MASK 0x1 #define DB_L2_DPM_SGE_ST_VALID_SHIFT 11 #define DB_L2_DPM_SGE_RESERVED1_MASK 0xF #define DB_L2_DPM_SGE_RESERVED1_SHIFT 12 __le32 reserved2; }; /* Structure for doorbell address, in legacy mode */ struct db_legacy_addr { __le32 addr; #define DB_LEGACY_ADDR_RESERVED0_MASK 0x3 #define DB_LEGACY_ADDR_RESERVED0_SHIFT 0 #define DB_LEGACY_ADDR_DEMS_MASK 0x7 #define DB_LEGACY_ADDR_DEMS_SHIFT 2 #define DB_LEGACY_ADDR_ICID_MASK 0x7FFFFFF #define DB_LEGACY_ADDR_ICID_SHIFT 5 }; /* Structure for doorbell address, in PWM mode */ struct db_pwm_addr { __le32 addr; #define DB_PWM_ADDR_RESERVED0_MASK 0x7 #define DB_PWM_ADDR_RESERVED0_SHIFT 0 #define DB_PWM_ADDR_OFFSET_MASK 0x7F #define DB_PWM_ADDR_OFFSET_SHIFT 3 #define DB_PWM_ADDR_WID_MASK 0x3 #define DB_PWM_ADDR_WID_SHIFT 10 #define DB_PWM_ADDR_DPI_MASK 0xFFFF #define DB_PWM_ADDR_DPI_SHIFT 12 #define DB_PWM_ADDR_RESERVED1_MASK 0xF #define DB_PWM_ADDR_RESERVED1_SHIFT 28 }; /* Parameters to RDMA firmware, passed in EDPM doorbell */ struct db_rdma_dpm_params { __le32 params; #define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F #define DB_RDMA_DPM_PARAMS_SIZE_SHIFT 0 #define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK 0x3 #define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT 6 #define DB_RDMA_DPM_PARAMS_OPCODE_MASK 0xFF #define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT 8 #define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK 0x7FF #define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16 #define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1 #define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27 #define DB_RDMA_DPM_PARAMS_ACK_REQUEST_MASK 0x1 #define DB_RDMA_DPM_PARAMS_ACK_REQUEST_SHIFT 28 #define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1 #define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29 #define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1 #define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 30 #define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1 #define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31 }; /* Structure for doorbell data, in RDMA DPM mode, for the first doorbell in a * DPM burst. */ struct db_rdma_dpm_data { __le16 icid; __le16 prod_val; struct db_rdma_dpm_params params; }; /* Igu interrupt command */ enum igu_int_cmd { IGU_INT_ENABLE = 0, IGU_INT_DISABLE = 1, IGU_INT_NOP = 2, IGU_INT_NOP2 = 3, MAX_IGU_INT_CMD }; /* IGU producer or consumer update command */ struct igu_prod_cons_update { __le32 sb_id_and_flags; #define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK 0xFFFFFF #define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT 0 #define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK 0x1 #define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT 24 #define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK 0x3 #define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT 25 #define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK 0x1 #define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27 #define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK 0x1 #define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT 28 #define IGU_PROD_CONS_UPDATE_RESERVED0_MASK 0x3 #define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT 29 #define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK 0x1 #define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT 31 __le32 reserved1; }; /* Igu segments access for default status block only */ enum igu_seg_access { IGU_SEG_ACCESS_REG = 0, IGU_SEG_ACCESS_ATTN = 1, MAX_IGU_SEG_ACCESS }; /* Enumeration for L3 type field of parsing_and_err_flags. * L3Type: 0 - unknown (not ip), 1 - Ipv4, 2 - Ipv6 * (This field can be filled according to the last-ethertype) */ enum l3_type { e_l3_type_unknown, e_l3_type_ipv4, e_l3_type_ipv6, MAX_L3_TYPE }; /* Enumeration for l4Protocol field of parsing_and_err_flags. * L4-protocol: 0 - none, 1 - TCP, 2 - UDP. * If the packet is IPv4 fragment, and its not the first fragment, the * protocol-type should be set to none. */ enum l4_protocol { e_l4_protocol_none, e_l4_protocol_tcp, e_l4_protocol_udp, MAX_L4_PROTOCOL }; /* Parsing and error flags field */ struct parsing_and_err_flags { __le16 flags; #define PARSING_AND_ERR_FLAGS_L3TYPE_MASK 0x3 #define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT 0 #define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK 0x3 #define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT 2 #define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK 0x1 #define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT 4 #define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK 0x1 #define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT 5 #define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK 0x1 #define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT 6 #define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK 0x1 #define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT 7 #define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK 0x1 #define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT 8 #define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK 0x1 #define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT 9 #define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK 0x1 #define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT 10 #define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK 0x1 #define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT 11 #define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK 0x1 #define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT 12 #define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK 0x1 #define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT 13 #define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK 0x1 #define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14 #define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK 0x1 #define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15 }; /* Parsing error flags bitmap */ struct parsing_err_flags { __le16 flags; #define PARSING_ERR_FLAGS_MAC_ERROR_MASK 0x1 #define PARSING_ERR_FLAGS_MAC_ERROR_SHIFT 0 #define PARSING_ERR_FLAGS_TRUNC_ERROR_MASK 0x1 #define PARSING_ERR_FLAGS_TRUNC_ERROR_SHIFT 1 #define PARSING_ERR_FLAGS_PKT_TOO_SMALL_MASK 0x1 #define PARSING_ERR_FLAGS_PKT_TOO_SMALL_SHIFT 2 #define PARSING_ERR_FLAGS_ANY_HDR_MISSING_TAG_MASK 0x1 #define PARSING_ERR_FLAGS_ANY_HDR_MISSING_TAG_SHIFT 3 #define PARSING_ERR_FLAGS_ANY_HDR_IP_VER_MISMTCH_MASK 0x1 #define PARSING_ERR_FLAGS_ANY_HDR_IP_VER_MISMTCH_SHIFT 4 #define PARSING_ERR_FLAGS_ANY_HDR_IP_V4_HDR_LEN_TOO_SMALL_MASK 0x1 #define PARSING_ERR_FLAGS_ANY_HDR_IP_V4_HDR_LEN_TOO_SMALL_SHIFT 5 #define PARSING_ERR_FLAGS_ANY_HDR_IP_BAD_TOTAL_LEN_MASK 0x1 #define PARSING_ERR_FLAGS_ANY_HDR_IP_BAD_TOTAL_LEN_SHIFT 6 #define PARSING_ERR_FLAGS_IP_V4_CHKSM_ERROR_MASK 0x1 #define PARSING_ERR_FLAGS_IP_V4_CHKSM_ERROR_SHIFT 7 #define PARSING_ERR_FLAGS_ANY_HDR_L4_IP_LEN_MISMTCH_MASK 0x1 #define PARSING_ERR_FLAGS_ANY_HDR_L4_IP_LEN_MISMTCH_SHIFT 8 #define PARSING_ERR_FLAGS_ZERO_UDP_IP_V6_CHKSM_MASK 0x1 #define PARSING_ERR_FLAGS_ZERO_UDP_IP_V6_CHKSM_SHIFT 9 #define PARSING_ERR_FLAGS_INNER_L4_CHKSM_ERROR_MASK 0x1 #define PARSING_ERR_FLAGS_INNER_L4_CHKSM_ERROR_SHIFT 10 #define PARSING_ERR_FLAGS_ANY_HDR_ZERO_TTL_OR_HOP_LIM_MASK 0x1 #define PARSING_ERR_FLAGS_ANY_HDR_ZERO_TTL_OR_HOP_LIM_SHIFT 11 #define PARSING_ERR_FLAGS_NON_8021Q_TAG_EXISTS_IN_BOTH_HDRS_MASK 0x1 #define PARSING_ERR_FLAGS_NON_8021Q_TAG_EXISTS_IN_BOTH_HDRS_SHIFT 12 #define PARSING_ERR_FLAGS_GENEVE_OPTION_OVERSIZED_MASK 0x1 #define PARSING_ERR_FLAGS_GENEVE_OPTION_OVERSIZED_SHIFT 13 #define PARSING_ERR_FLAGS_TUNNEL_IP_V4_CHKSM_ERROR_MASK 0x1 #define PARSING_ERR_FLAGS_TUNNEL_IP_V4_CHKSM_ERROR_SHIFT 14 #define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_MASK 0x1 #define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_SHIFT 15 }; /* Pb context */ struct pb_context { __le32 crc[4]; }; /* Concrete Function ID */ struct pxp_concrete_fid { __le16 fid; #define PXP_CONCRETE_FID_PFID_MASK 0xF #define PXP_CONCRETE_FID_PFID_SHIFT 0 #define PXP_CONCRETE_FID_PORT_MASK 0x3 #define PXP_CONCRETE_FID_PORT_SHIFT 4 #define PXP_CONCRETE_FID_PATH_MASK 0x1 #define PXP_CONCRETE_FID_PATH_SHIFT 6 #define PXP_CONCRETE_FID_VFVALID_MASK 0x1 #define PXP_CONCRETE_FID_VFVALID_SHIFT 7 #define PXP_CONCRETE_FID_VFID_MASK 0xFF #define PXP_CONCRETE_FID_VFID_SHIFT 8 }; /* Concrete Function ID */ struct pxp_pretend_concrete_fid { __le16 fid; #define PXP_PRETEND_CONCRETE_FID_PFID_MASK 0xF #define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT 0 #define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK 0x7 #define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4 #define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK 0x1 #define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT 7 #define PXP_PRETEND_CONCRETE_FID_VFID_MASK 0xFF #define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT 8 }; /* Function ID */ union pxp_pretend_fid { struct pxp_pretend_concrete_fid concrete_fid; __le16 opaque_fid; }; /* Pxp Pretend Command Register */ struct pxp_pretend_cmd { union pxp_pretend_fid fid; __le16 control; #define PXP_PRETEND_CMD_PATH_MASK 0x1 #define PXP_PRETEND_CMD_PATH_SHIFT 0 #define PXP_PRETEND_CMD_USE_PORT_MASK 0x1 #define PXP_PRETEND_CMD_USE_PORT_SHIFT 1 #define PXP_PRETEND_CMD_PORT_MASK 0x3 #define PXP_PRETEND_CMD_PORT_SHIFT 2 #define PXP_PRETEND_CMD_RESERVED0_MASK 0xF #define PXP_PRETEND_CMD_RESERVED0_SHIFT 4 #define PXP_PRETEND_CMD_RESERVED1_MASK 0xF #define PXP_PRETEND_CMD_RESERVED1_SHIFT 8 #define PXP_PRETEND_CMD_PRETEND_PATH_MASK 0x1 #define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT 12 #define PXP_PRETEND_CMD_PRETEND_PORT_MASK 0x1 #define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT 13 #define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK 0x1 #define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14 #define PXP_PRETEND_CMD_IS_CONCRETE_MASK 0x1 #define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT 15 }; /* PTT Record in PXP Admin Window */ struct pxp_ptt_entry { __le32 offset; #define PXP_PTT_ENTRY_OFFSET_MASK 0x7FFFFF #define PXP_PTT_ENTRY_OFFSET_SHIFT 0 #define PXP_PTT_ENTRY_RESERVED0_MASK 0x1FF #define PXP_PTT_ENTRY_RESERVED0_SHIFT 23 struct pxp_pretend_cmd pretend; }; /* VF Zone A Permission Register */ struct pxp_vf_zone_a_permission { __le32 control; #define PXP_VF_ZONE_A_PERMISSION_VFID_MASK 0xFF #define PXP_VF_ZONE_A_PERMISSION_VFID_SHIFT 0 #define PXP_VF_ZONE_A_PERMISSION_VALID_MASK 0x1 #define PXP_VF_ZONE_A_PERMISSION_VALID_SHIFT 8 #define PXP_VF_ZONE_A_PERMISSION_RESERVED0_MASK 0x7F #define PXP_VF_ZONE_A_PERMISSION_RESERVED0_SHIFT 9 #define PXP_VF_ZONE_A_PERMISSION_RESERVED1_MASK 0xFFFF #define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT 16 }; /* Rdif context */ struct rdif_task_context { __le32 initial_ref_tag; __le16 app_tag_value; __le16 app_tag_mask; u8 flags0; #define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK 0x1 #define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT 0 #define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK 0x1 #define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT 1 #define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK 0x1 #define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT 2 #define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK 0x1 #define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT 3 #define RDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK 0x3 #define RDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT 4 #define RDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1 #define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6 #define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK 0x1 #define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT 7 u8 partial_dif_data[7]; __le16 partial_crc_value; __le16 partial_checksum_value; __le32 offset_in_io; __le16 flags1; #define RDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK 0x1 #define RDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT 0 #define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK 0x1 #define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT 1 #define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK 0x1 #define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT 2 #define RDIF_TASK_CONTEXT_FORWARD_GUARD_MASK 0x1 #define RDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT 3 #define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK 0x1 #define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT 4 #define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK 0x1 #define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT 5 #define RDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK 0x7 #define RDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT 6 #define RDIF_TASK_CONTEXT_HOST_INTERFACE_MASK 0x3 #define RDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT 9 #define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK 0x1 #define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT 11 #define RDIF_TASK_CONTEXT_RESERVED0_MASK 0x1 #define RDIF_TASK_CONTEXT_RESERVED0_SHIFT 12 #define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK 0x1 #define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT 13 #define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK 0x1 #define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 14 #define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK 0x1 #define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 15 __le16 state; #define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_MASK 0xF #define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_SHIFT 0 #define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_MASK 0xF #define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_SHIFT 4 #define RDIF_TASK_CONTEXT_ERROR_IN_IO_MASK 0x1 #define RDIF_TASK_CONTEXT_ERROR_IN_IO_SHIFT 8 #define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_MASK 0x1 #define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_SHIFT 9 #define RDIF_TASK_CONTEXT_REF_TAG_MASK_MASK 0xF #define RDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT 10 #define RDIF_TASK_CONTEXT_RESERVED1_MASK 0x3 #define RDIF_TASK_CONTEXT_RESERVED1_SHIFT 14 __le32 reserved2; }; /* Status block structure */ struct status_block_e4 { __le16 pi_array[PIS_PER_SB_E4]; __le32 sb_num; #define STATUS_BLOCK_E4_SB_NUM_MASK 0x1FF #define STATUS_BLOCK_E4_SB_NUM_SHIFT 0 #define STATUS_BLOCK_E4_ZERO_PAD_MASK 0x7F #define STATUS_BLOCK_E4_ZERO_PAD_SHIFT 9 #define STATUS_BLOCK_E4_ZERO_PAD2_MASK 0xFFFF #define STATUS_BLOCK_E4_ZERO_PAD2_SHIFT 16 __le32 prod_index; #define STATUS_BLOCK_E4_PROD_INDEX_MASK 0xFFFFFF #define STATUS_BLOCK_E4_PROD_INDEX_SHIFT 0 #define STATUS_BLOCK_E4_ZERO_PAD3_MASK 0xFF #define STATUS_BLOCK_E4_ZERO_PAD3_SHIFT 24 }; /* Tdif context */ struct tdif_task_context { __le32 initial_ref_tag; __le16 app_tag_value; __le16 app_tag_mask; __le16 partial_crc_value_b; __le16 partial_checksum_value_b; __le16 stateB; #define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_MASK 0xF #define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_SHIFT 0 #define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_MASK 0xF #define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_SHIFT 4 #define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_MASK 0x1 #define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_SHIFT 8 #define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_MASK 0x1 #define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_SHIFT 9 #define TDIF_TASK_CONTEXT_RESERVED0_MASK 0x3F #define TDIF_TASK_CONTEXT_RESERVED0_SHIFT 10 u8 reserved1; u8 flags0; #define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK 0x1 #define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT 0 #define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK 0x1 #define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT 1 #define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK 0x1 #define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT 2 #define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK 0x1 #define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT 3 #define TDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK 0x3 #define TDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT 4 #define TDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1 #define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6 #define TDIF_TASK_CONTEXT_RESERVED2_MASK 0x1 #define TDIF_TASK_CONTEXT_RESERVED2_SHIFT 7 __le32 flags1; #define TDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK 0x1 #define TDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT 0 #define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK 0x1 #define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT 1 #define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK 0x1 #define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT 2 #define TDIF_TASK_CONTEXT_FORWARD_GUARD_MASK 0x1 #define TDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT 3 #define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK 0x1 #define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT 4 #define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK 0x1 #define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT 5 #define TDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK 0x7 #define TDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT 6 #define TDIF_TASK_CONTEXT_HOST_INTERFACE_MASK 0x3 #define TDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT 9 #define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK 0x1 #define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT 11 #define TDIF_TASK_CONTEXT_RESERVED3_MASK 0x1 #define TDIF_TASK_CONTEXT_RESERVED3_SHIFT 12 #define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK 0x1 #define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT 13 #define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_MASK 0xF #define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_SHIFT 14 #define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_MASK 0xF #define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_SHIFT 18 #define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_MASK 0x1 #define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_SHIFT 22 #define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_MASK 0x1 #define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_SHIFT 23 #define TDIF_TASK_CONTEXT_REF_TAG_MASK_MASK 0xF #define TDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT 24 #define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK 0x1 #define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 28 #define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK 0x1 #define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 29 #define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK 0x1 #define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT 30 #define TDIF_TASK_CONTEXT_RESERVED4_MASK 0x1 #define TDIF_TASK_CONTEXT_RESERVED4_SHIFT 31 __le32 offset_in_io_b; __le16 partial_crc_value_a; __le16 partial_checksum_value_a; __le32 offset_in_io_a; u8 partial_dif_data_a[8]; u8 partial_dif_data_b[8]; }; /* Timers context */ struct timers_context { __le32 logical_client_0; #define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0x7FFFFFF #define TIMERS_CONTEXT_EXPIRATIONTIMELC0_SHIFT 0 #define TIMERS_CONTEXT_RESERVED0_MASK 0x1 #define TIMERS_CONTEXT_RESERVED0_SHIFT 27 #define TIMERS_CONTEXT_VALIDLC0_MASK 0x1 #define TIMERS_CONTEXT_VALIDLC0_SHIFT 28 #define TIMERS_CONTEXT_ACTIVELC0_MASK 0x1 #define TIMERS_CONTEXT_ACTIVELC0_SHIFT 29 #define TIMERS_CONTEXT_RESERVED1_MASK 0x3 #define TIMERS_CONTEXT_RESERVED1_SHIFT 30 __le32 logical_client_1; #define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK 0x7FFFFFF #define TIMERS_CONTEXT_EXPIRATIONTIMELC1_SHIFT 0 #define TIMERS_CONTEXT_RESERVED2_MASK 0x1 #define TIMERS_CONTEXT_RESERVED2_SHIFT 27 #define TIMERS_CONTEXT_VALIDLC1_MASK 0x1 #define TIMERS_CONTEXT_VALIDLC1_SHIFT 28 #define TIMERS_CONTEXT_ACTIVELC1_MASK 0x1 #define TIMERS_CONTEXT_ACTIVELC1_SHIFT 29 #define TIMERS_CONTEXT_RESERVED3_MASK 0x3 #define TIMERS_CONTEXT_RESERVED3_SHIFT 30 __le32 logical_client_2; #define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK 0x7FFFFFF #define TIMERS_CONTEXT_EXPIRATIONTIMELC2_SHIFT 0 #define TIMERS_CONTEXT_RESERVED4_MASK 0x1 #define TIMERS_CONTEXT_RESERVED4_SHIFT 27 #define TIMERS_CONTEXT_VALIDLC2_MASK 0x1 #define TIMERS_CONTEXT_VALIDLC2_SHIFT 28 #define TIMERS_CONTEXT_ACTIVELC2_MASK 0x1 #define TIMERS_CONTEXT_ACTIVELC2_SHIFT 29 #define TIMERS_CONTEXT_RESERVED5_MASK 0x3 #define TIMERS_CONTEXT_RESERVED5_SHIFT 30 __le32 host_expiration_fields; #define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_MASK 0x7FFFFFF #define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_SHIFT 0 #define TIMERS_CONTEXT_RESERVED6_MASK 0x1 #define TIMERS_CONTEXT_RESERVED6_SHIFT 27 #define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_MASK 0x1 #define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_SHIFT 28 #define TIMERS_CONTEXT_RESERVED7_MASK 0x7 #define TIMERS_CONTEXT_RESERVED7_SHIFT 29 }; /* Enum for next_protocol field of tunnel_parsing_flags / tunnelTypeDesc */ enum tunnel_next_protocol { e_unknown = 0, e_l2 = 1, e_ipv4 = 2, e_ipv6 = 3, MAX_TUNNEL_NEXT_PROTOCOL }; #endif /* __COMMON_HSI__ */ #endif qed/storage_common.h 0000644 00000011722 14722070374 0010513 0 ustar 00 /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and /or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef __STORAGE_COMMON__ #define __STORAGE_COMMON__ /*********************/ /* SCSI CONSTANTS */ /*********************/ #define SCSI_MAX_NUM_OF_CMDQS (NUM_OF_GLOBAL_QUEUES / 2) #define BDQ_NUM_RESOURCES (4) #define BDQ_ID_RQ (0) #define BDQ_ID_IMM_DATA (1) #define BDQ_ID_TQ (2) #define BDQ_NUM_IDS (3) #define SCSI_NUM_SGES_SLOW_SGL_THR 8 #define BDQ_MAX_EXTERNAL_RING_SIZE BIT(15) /* SCSI op codes */ #define SCSI_OPCODE_COMPARE_AND_WRITE (0x89) #define SCSI_OPCODE_READ_10 (0x28) #define SCSI_OPCODE_WRITE_6 (0x0A) #define SCSI_OPCODE_WRITE_10 (0x2A) #define SCSI_OPCODE_WRITE_12 (0xAA) #define SCSI_OPCODE_WRITE_16 (0x8A) #define SCSI_OPCODE_WRITE_AND_VERIFY_10 (0x2E) #define SCSI_OPCODE_WRITE_AND_VERIFY_12 (0xAE) #define SCSI_OPCODE_WRITE_AND_VERIFY_16 (0x8E) /* iSCSI Drv opaque */ struct iscsi_drv_opaque { __le16 reserved_zero[3]; __le16 opaque; }; /* Scsi 2B/8B opaque union */ union scsi_opaque { struct regpair fcoe_opaque; struct iscsi_drv_opaque iscsi_opaque; }; /* SCSI buffer descriptor */ struct scsi_bd { struct regpair address; union scsi_opaque opaque; }; /* Scsi Drv BDQ struct */ struct scsi_bdq_ram_drv_data { __le16 external_producer; __le16 reserved0[3]; }; /* SCSI SGE entry */ struct scsi_sge { struct regpair sge_addr; __le32 sge_len; __le32 reserved; }; /* Cached SGEs section */ struct scsi_cached_sges { struct scsi_sge sge[4]; }; /* Scsi Drv CMDQ struct */ struct scsi_drv_cmdq { __le16 cmdq_cons; __le16 reserved0; __le32 reserved1; }; /* Common SCSI init params passed by driver to FW in function init ramrod */ struct scsi_init_func_params { __le16 num_tasks; u8 log_page_size; u8 debug_mode; u8 reserved2[12]; }; /* SCSI RQ/CQ/CMDQ firmware function init parameters */ struct scsi_init_func_queues { struct regpair glbl_q_params_addr; __le16 rq_buffer_size; __le16 cq_num_entries; __le16 cmdq_num_entries; u8 bdq_resource_id; u8 q_validity; #define SCSI_INIT_FUNC_QUEUES_RQ_VALID_MASK 0x1 #define SCSI_INIT_FUNC_QUEUES_RQ_VALID_SHIFT 0 #define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_MASK 0x1 #define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT 1 #define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK 0x1 #define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT 2 #define SCSI_INIT_FUNC_QUEUES_TQ_VALID_MASK 0x1 #define SCSI_INIT_FUNC_QUEUES_TQ_VALID_SHIFT 3 #define SCSI_INIT_FUNC_QUEUES_SOC_EN_MASK 0x1 #define SCSI_INIT_FUNC_QUEUES_SOC_EN_SHIFT 4 #define SCSI_INIT_FUNC_QUEUES_SOC_NUM_OF_BLOCKS_LOG_MASK 0x7 #define SCSI_INIT_FUNC_QUEUES_SOC_NUM_OF_BLOCKS_LOG_SHIFT 5 __le16 cq_cmdq_sb_num_arr[SCSI_MAX_NUM_OF_CMDQS]; u8 num_queues; u8 queue_relative_offset; u8 cq_sb_pi; u8 cmdq_sb_pi; u8 bdq_pbl_num_entries[BDQ_NUM_IDS]; u8 reserved1; struct regpair bdq_pbl_base_address[BDQ_NUM_IDS]; __le16 bdq_xoff_threshold[BDQ_NUM_IDS]; __le16 cmdq_xoff_threshold; __le16 bdq_xon_threshold[BDQ_NUM_IDS]; __le16 cmdq_xon_threshold; }; /* Scsi Drv BDQ Data struct (2 BDQ IDs: 0 - RQ, 1 - Immediate Data) */ struct scsi_ram_per_bdq_resource_drv_data { struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS]; }; /* SCSI SGL types */ enum scsi_sgl_mode { SCSI_TX_SLOW_SGL, SCSI_FAST_SGL, MAX_SCSI_SGL_MODE }; /* SCSI SGL parameters */ struct scsi_sgl_params { struct regpair sgl_addr; __le32 sgl_total_length; __le32 sge_offset; __le16 sgl_num_sges; u8 sgl_index; u8 reserved; }; /* SCSI terminate connection params */ struct scsi_terminate_extra_params { __le16 unsolicited_cq_count; __le16 cmdq_count; u8 reserved[4]; }; /* SCSI Task Queue Element */ struct scsi_tqe { __le16 itid; }; #endif /* __STORAGE_COMMON__ */ qed/qed_ll2_if.h 0000644 00000016375 14722070374 0007510 0 ustar 00 /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and /or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef _QED_LL2_IF_H #define _QED_LL2_IF_H #include <linux/types.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/skbuff.h> #include <linux/version.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/qed/qed_if.h> enum qed_ll2_conn_type { QED_LL2_TYPE_FCOE, QED_LL2_TYPE_ISCSI, QED_LL2_TYPE_TEST, QED_LL2_TYPE_OOO, QED_LL2_TYPE_RESERVED2, QED_LL2_TYPE_ROCE, QED_LL2_TYPE_IWARP, QED_LL2_TYPE_RESERVED3, MAX_QED_LL2_RX_CONN_TYPE }; enum qed_ll2_roce_flavor_type { QED_LL2_ROCE, QED_LL2_RROCE, MAX_QED_LL2_ROCE_FLAVOR_TYPE }; enum qed_ll2_tx_dest { QED_LL2_TX_DEST_NW, /* Light L2 TX Destination to the Network */ QED_LL2_TX_DEST_LB, /* Light L2 TX Destination to the Loopback */ QED_LL2_TX_DEST_DROP, /* Light L2 Drop the TX packet */ QED_LL2_TX_DEST_MAX }; enum qed_ll2_error_handle { QED_LL2_DROP_PACKET, QED_LL2_DO_NOTHING, QED_LL2_ASSERT, }; struct qed_ll2_stats { u64 gsi_invalid_hdr; u64 gsi_invalid_pkt_length; u64 gsi_unsupported_pkt_typ; u64 gsi_crcchksm_error; u64 packet_too_big_discard; u64 no_buff_discard; u64 rcv_ucast_bytes; u64 rcv_mcast_bytes; u64 rcv_bcast_bytes; u64 rcv_ucast_pkts; u64 rcv_mcast_pkts; u64 rcv_bcast_pkts; u64 sent_ucast_bytes; u64 sent_mcast_bytes; u64 sent_bcast_bytes; u64 sent_ucast_pkts; u64 sent_mcast_pkts; u64 sent_bcast_pkts; }; struct qed_ll2_comp_rx_data { void *cookie; dma_addr_t rx_buf_addr; u16 parse_flags; u16 err_flags; u16 vlan; bool b_last_packet; u8 connection_handle; union { u16 packet_length; u16 data_length; } length; u32 opaque_data_0; u32 opaque_data_1; /* GSI only */ u32 src_qp; u16 qp_id; union { u8 placement_offset; u8 data_length_error; } u; }; typedef void (*qed_ll2_complete_rx_packet_cb)(void *cxt, struct qed_ll2_comp_rx_data *data); typedef void (*qed_ll2_release_rx_packet_cb)(void *cxt, u8 connection_handle, void *cookie, dma_addr_t rx_buf_addr, bool b_last_packet); typedef void (*qed_ll2_complete_tx_packet_cb)(void *cxt, u8 connection_handle, void *cookie, dma_addr_t first_frag_addr, bool b_last_fragment, bool b_last_packet); typedef void (*qed_ll2_release_tx_packet_cb)(void *cxt, u8 connection_handle, void *cookie, dma_addr_t first_frag_addr, bool b_last_fragment, bool b_last_packet); typedef void (*qed_ll2_slowpath_cb)(void *cxt, u8 connection_handle, u32 opaque_data_0, u32 opaque_data_1); struct qed_ll2_cbs { qed_ll2_complete_rx_packet_cb rx_comp_cb; qed_ll2_release_rx_packet_cb rx_release_cb; qed_ll2_complete_tx_packet_cb tx_comp_cb; qed_ll2_release_tx_packet_cb tx_release_cb; qed_ll2_slowpath_cb slowpath_cb; void *cookie; }; struct qed_ll2_acquire_data_inputs { enum qed_ll2_conn_type conn_type; u16 mtu; u16 rx_num_desc; u16 rx_num_ooo_buffers; u8 rx_drop_ttl0_flg; u8 rx_vlan_removal_en; u16 tx_num_desc; u8 tx_max_bds_per_packet; u8 tx_tc; enum qed_ll2_tx_dest tx_dest; enum qed_ll2_error_handle ai_err_packet_too_big; enum qed_ll2_error_handle ai_err_no_buf; bool secondary_queue; u8 gsi_enable; }; struct qed_ll2_acquire_data { struct qed_ll2_acquire_data_inputs input; const struct qed_ll2_cbs *cbs; /* Output container for LL2 connection's handle */ u8 *p_connection_handle; }; struct qed_ll2_tx_pkt_info { void *cookie; dma_addr_t first_frag; enum qed_ll2_tx_dest tx_dest; enum qed_ll2_roce_flavor_type qed_roce_flavor; u16 vlan; u16 l4_hdr_offset_w; /* from start of packet */ u16 first_frag_len; u8 num_of_bds; u8 bd_flags; bool enable_ip_cksum; bool enable_l4_cksum; bool calc_ip_len; bool remove_stag; }; #define QED_LL2_UNUSED_HANDLE (0xff) struct qed_ll2_cb_ops { int (*rx_cb)(void *, struct sk_buff *, u32, u32); int (*tx_cb)(void *, struct sk_buff *, bool); }; struct qed_ll2_params { u16 mtu; bool drop_ttl0_packets; bool rx_vlan_stripping; u8 tx_tc; bool frags_mapped; u8 ll2_mac_address[ETH_ALEN]; }; enum qed_ll2_xmit_flags { /* FIP discovery packet */ QED_LL2_XMIT_FLAGS_FIP_DISCOVERY }; struct qed_ll2_ops { /** * @brief start - initializes ll2 * * @param cdev * @param params - protocol driver configuration for the ll2. * * @return 0 on success, otherwise error value. */ int (*start)(struct qed_dev *cdev, struct qed_ll2_params *params); /** * @brief stop - stops the ll2 * * @param cdev * * @return 0 on success, otherwise error value. */ int (*stop)(struct qed_dev *cdev); /** * @brief start_xmit - transmits an skb over the ll2 interface * * @param cdev * @param skb * @param xmit_flags - Transmit options defined by the enum qed_ll2_xmit_flags. * * @return 0 on success, otherwise error value. */ int (*start_xmit)(struct qed_dev *cdev, struct sk_buff *skb, unsigned long xmit_flags); /** * @brief register_cb_ops - protocol driver register the callback for Rx/Tx * packets. Should be called before `start'. * * @param cdev * @param cookie - to be passed to the callback functions. * @param ops - the callback functions to register for Rx / Tx. * * @return 0 on success, otherwise error value. */ void (*register_cb_ops)(struct qed_dev *cdev, const struct qed_ll2_cb_ops *ops, void *cookie); /** * @brief get LL2 related statistics * * @param cdev * @param stats - pointer to struct that would be filled with stats * * @return 0 on success, error otherwise. */ int (*get_stats)(struct qed_dev *cdev, struct qed_ll2_stats *stats); }; #ifdef CONFIG_QED_LL2 int qed_ll2_alloc_if(struct qed_dev *); void qed_ll2_dealloc_if(struct qed_dev *); #else static const struct qed_ll2_ops qed_ll2_ops_pass = { .start = NULL, .stop = NULL, .start_xmit = NULL, .register_cb_ops = NULL, .get_stats = NULL, }; static inline int qed_ll2_alloc_if(struct qed_dev *cdev) { return 0; } static inline void qed_ll2_dealloc_if(struct qed_dev *cdev) { } #endif #endif qed/qed_if.h 0000644 00000104177 14722070374 0006735 0 ustar 00 /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and /or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef _QED_IF_H #define _QED_IF_H #include <linux/types.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/skbuff.h> #include <asm/byteorder.h> #include <linux/io.h> #include <linux/compiler.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/qed/common_hsi.h> #include <linux/qed/qed_chain.h> #include <linux/io-64-nonatomic-lo-hi.h> enum dcbx_protocol_type { DCBX_PROTOCOL_ISCSI, DCBX_PROTOCOL_FCOE, DCBX_PROTOCOL_ROCE, DCBX_PROTOCOL_ROCE_V2, DCBX_PROTOCOL_ETH, DCBX_MAX_PROTOCOL_TYPE }; #define QED_ROCE_PROTOCOL_INDEX (3) #define QED_LLDP_CHASSIS_ID_STAT_LEN 4 #define QED_LLDP_PORT_ID_STAT_LEN 4 #define QED_DCBX_MAX_APP_PROTOCOL 32 #define QED_MAX_PFC_PRIORITIES 8 #define QED_DCBX_DSCP_SIZE 64 struct qed_dcbx_lldp_remote { u32 peer_chassis_id[QED_LLDP_CHASSIS_ID_STAT_LEN]; u32 peer_port_id[QED_LLDP_PORT_ID_STAT_LEN]; bool enable_rx; bool enable_tx; u32 tx_interval; u32 max_credit; }; struct qed_dcbx_lldp_local { u32 local_chassis_id[QED_LLDP_CHASSIS_ID_STAT_LEN]; u32 local_port_id[QED_LLDP_PORT_ID_STAT_LEN]; }; struct qed_dcbx_app_prio { u8 roce; u8 roce_v2; u8 fcoe; u8 iscsi; u8 eth; }; struct qed_dbcx_pfc_params { bool willing; bool enabled; u8 prio[QED_MAX_PFC_PRIORITIES]; u8 max_tc; }; enum qed_dcbx_sf_ieee_type { QED_DCBX_SF_IEEE_ETHTYPE, QED_DCBX_SF_IEEE_TCP_PORT, QED_DCBX_SF_IEEE_UDP_PORT, QED_DCBX_SF_IEEE_TCP_UDP_PORT }; struct qed_app_entry { bool ethtype; enum qed_dcbx_sf_ieee_type sf_ieee; bool enabled; u8 prio; u16 proto_id; enum dcbx_protocol_type proto_type; }; struct qed_dcbx_params { struct qed_app_entry app_entry[QED_DCBX_MAX_APP_PROTOCOL]; u16 num_app_entries; bool app_willing; bool app_valid; bool app_error; bool ets_willing; bool ets_enabled; bool ets_cbs; bool valid; u8 ets_pri_tc_tbl[QED_MAX_PFC_PRIORITIES]; u8 ets_tc_bw_tbl[QED_MAX_PFC_PRIORITIES]; u8 ets_tc_tsa_tbl[QED_MAX_PFC_PRIORITIES]; struct qed_dbcx_pfc_params pfc; u8 max_ets_tc; }; struct qed_dcbx_admin_params { struct qed_dcbx_params params; bool valid; }; struct qed_dcbx_remote_params { struct qed_dcbx_params params; bool valid; }; struct qed_dcbx_operational_params { struct qed_dcbx_app_prio app_prio; struct qed_dcbx_params params; bool valid; bool enabled; bool ieee; bool cee; bool local; u32 err; }; struct qed_dcbx_get { struct qed_dcbx_operational_params operational; struct qed_dcbx_lldp_remote lldp_remote; struct qed_dcbx_lldp_local lldp_local; struct qed_dcbx_remote_params remote; struct qed_dcbx_admin_params local; }; enum qed_nvm_images { QED_NVM_IMAGE_ISCSI_CFG, QED_NVM_IMAGE_FCOE_CFG, QED_NVM_IMAGE_NVM_CFG1, QED_NVM_IMAGE_DEFAULT_CFG, QED_NVM_IMAGE_NVM_META, }; struct qed_link_eee_params { u32 tx_lpi_timer; #define QED_EEE_1G_ADV BIT(0) #define QED_EEE_10G_ADV BIT(1) /* Capabilities are represented using QED_EEE_*_ADV values */ u8 adv_caps; u8 lp_adv_caps; bool enable; bool tx_lpi_enable; }; enum qed_led_mode { QED_LED_MODE_OFF, QED_LED_MODE_ON, QED_LED_MODE_RESTORE }; struct qed_mfw_tlv_eth { u16 lso_maxoff_size; bool lso_maxoff_size_set; u16 lso_minseg_size; bool lso_minseg_size_set; u8 prom_mode; bool prom_mode_set; u16 tx_descr_size; bool tx_descr_size_set; u16 rx_descr_size; bool rx_descr_size_set; u16 netq_count; bool netq_count_set; u32 tcp4_offloads; bool tcp4_offloads_set; u32 tcp6_offloads; bool tcp6_offloads_set; u16 tx_descr_qdepth; bool tx_descr_qdepth_set; u16 rx_descr_qdepth; bool rx_descr_qdepth_set; u8 iov_offload; #define QED_MFW_TLV_IOV_OFFLOAD_NONE (0) #define QED_MFW_TLV_IOV_OFFLOAD_MULTIQUEUE (1) #define QED_MFW_TLV_IOV_OFFLOAD_VEB (2) #define QED_MFW_TLV_IOV_OFFLOAD_VEPA (3) bool iov_offload_set; u8 txqs_empty; bool txqs_empty_set; u8 rxqs_empty; bool rxqs_empty_set; u8 num_txqs_full; bool num_txqs_full_set; u8 num_rxqs_full; bool num_rxqs_full_set; }; #define QED_MFW_TLV_TIME_SIZE 14 struct qed_mfw_tlv_time { bool b_set; u8 month; u8 day; u8 hour; u8 min; u16 msec; u16 usec; }; struct qed_mfw_tlv_fcoe { u8 scsi_timeout; bool scsi_timeout_set; u32 rt_tov; bool rt_tov_set; u32 ra_tov; bool ra_tov_set; u32 ed_tov; bool ed_tov_set; u32 cr_tov; bool cr_tov_set; u8 boot_type; bool boot_type_set; u8 npiv_state; bool npiv_state_set; u32 num_npiv_ids; bool num_npiv_ids_set; u8 switch_name[8]; bool switch_name_set; u16 switch_portnum; bool switch_portnum_set; u8 switch_portid[3]; bool switch_portid_set; u8 vendor_name[8]; bool vendor_name_set; u8 switch_model[8]; bool switch_model_set; u8 switch_fw_version[8]; bool switch_fw_version_set; u8 qos_pri; bool qos_pri_set; u8 port_alias[3]; bool port_alias_set; u8 port_state; #define QED_MFW_TLV_PORT_STATE_OFFLINE (0) #define QED_MFW_TLV_PORT_STATE_LOOP (1) #define QED_MFW_TLV_PORT_STATE_P2P (2) #define QED_MFW_TLV_PORT_STATE_FABRIC (3) bool port_state_set; u16 fip_tx_descr_size; bool fip_tx_descr_size_set; u16 fip_rx_descr_size; bool fip_rx_descr_size_set; u16 link_failures; bool link_failures_set; u8 fcoe_boot_progress; bool fcoe_boot_progress_set; u64 rx_bcast; bool rx_bcast_set; u64 tx_bcast; bool tx_bcast_set; u16 fcoe_txq_depth; bool fcoe_txq_depth_set; u16 fcoe_rxq_depth; bool fcoe_rxq_depth_set; u64 fcoe_rx_frames; bool fcoe_rx_frames_set; u64 fcoe_rx_bytes; bool fcoe_rx_bytes_set; u64 fcoe_tx_frames; bool fcoe_tx_frames_set; u64 fcoe_tx_bytes; bool fcoe_tx_bytes_set; u16 crc_count; bool crc_count_set; u32 crc_err_src_fcid[5]; bool crc_err_src_fcid_set[5]; struct qed_mfw_tlv_time crc_err[5]; u16 losync_err; bool losync_err_set; u16 losig_err; bool losig_err_set; u16 primtive_err; bool primtive_err_set; u16 disparity_err; bool disparity_err_set; u16 code_violation_err; bool code_violation_err_set; u32 flogi_param[4]; bool flogi_param_set[4]; struct qed_mfw_tlv_time flogi_tstamp; u32 flogi_acc_param[4]; bool flogi_acc_param_set[4]; struct qed_mfw_tlv_time flogi_acc_tstamp; u32 flogi_rjt; bool flogi_rjt_set; struct qed_mfw_tlv_time flogi_rjt_tstamp; u32 fdiscs; bool fdiscs_set; u8 fdisc_acc; bool fdisc_acc_set; u8 fdisc_rjt; bool fdisc_rjt_set; u8 plogi; bool plogi_set; u8 plogi_acc; bool plogi_acc_set; u8 plogi_rjt; bool plogi_rjt_set; u32 plogi_dst_fcid[5]; bool plogi_dst_fcid_set[5]; struct qed_mfw_tlv_time plogi_tstamp[5]; u32 plogi_acc_src_fcid[5]; bool plogi_acc_src_fcid_set[5]; struct qed_mfw_tlv_time plogi_acc_tstamp[5]; u8 tx_plogos; bool tx_plogos_set; u8 plogo_acc; bool plogo_acc_set; u8 plogo_rjt; bool plogo_rjt_set; u32 plogo_src_fcid[5]; bool plogo_src_fcid_set[5]; struct qed_mfw_tlv_time plogo_tstamp[5]; u8 rx_logos; bool rx_logos_set; u8 tx_accs; bool tx_accs_set; u8 tx_prlis; bool tx_prlis_set; u8 rx_accs; bool rx_accs_set; u8 tx_abts; bool tx_abts_set; u8 rx_abts_acc; bool rx_abts_acc_set; u8 rx_abts_rjt; bool rx_abts_rjt_set; u32 abts_dst_fcid[5]; bool abts_dst_fcid_set[5]; struct qed_mfw_tlv_time abts_tstamp[5]; u8 rx_rscn; bool rx_rscn_set; u32 rx_rscn_nport[4]; bool rx_rscn_nport_set[4]; u8 tx_lun_rst; bool tx_lun_rst_set; u8 abort_task_sets; bool abort_task_sets_set; u8 tx_tprlos; bool tx_tprlos_set; u8 tx_nos; bool tx_nos_set; u8 rx_nos; bool rx_nos_set; u8 ols; bool ols_set; u8 lr; bool lr_set; u8 lrr; bool lrr_set; u8 tx_lip; bool tx_lip_set; u8 rx_lip; bool rx_lip_set; u8 eofa; bool eofa_set; u8 eofni; bool eofni_set; u8 scsi_chks; bool scsi_chks_set; u8 scsi_cond_met; bool scsi_cond_met_set; u8 scsi_busy; bool scsi_busy_set; u8 scsi_inter; bool scsi_inter_set; u8 scsi_inter_cond_met; bool scsi_inter_cond_met_set; u8 scsi_rsv_conflicts; bool scsi_rsv_conflicts_set; u8 scsi_tsk_full; bool scsi_tsk_full_set; u8 scsi_aca_active; bool scsi_aca_active_set; u8 scsi_tsk_abort; bool scsi_tsk_abort_set; u32 scsi_rx_chk[5]; bool scsi_rx_chk_set[5]; struct qed_mfw_tlv_time scsi_chk_tstamp[5]; }; struct qed_mfw_tlv_iscsi { u8 target_llmnr; bool target_llmnr_set; u8 header_digest; bool header_digest_set; u8 data_digest; bool data_digest_set; u8 auth_method; #define QED_MFW_TLV_AUTH_METHOD_NONE (1) #define QED_MFW_TLV_AUTH_METHOD_CHAP (2) #define QED_MFW_TLV_AUTH_METHOD_MUTUAL_CHAP (3) bool auth_method_set; u16 boot_taget_portal; bool boot_taget_portal_set; u16 frame_size; bool frame_size_set; u16 tx_desc_size; bool tx_desc_size_set; u16 rx_desc_size; bool rx_desc_size_set; u8 boot_progress; bool boot_progress_set; u16 tx_desc_qdepth; bool tx_desc_qdepth_set; u16 rx_desc_qdepth; bool rx_desc_qdepth_set; u64 rx_frames; bool rx_frames_set; u64 rx_bytes; bool rx_bytes_set; u64 tx_frames; bool tx_frames_set; u64 tx_bytes; bool tx_bytes_set; }; enum qed_db_rec_width { DB_REC_WIDTH_32B, DB_REC_WIDTH_64B, }; enum qed_db_rec_space { DB_REC_KERNEL, DB_REC_USER, }; #define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \ (void __iomem *)(reg_addr)) #define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr)) #define DIRECT_REG_WR64(reg_addr, val) writeq((u32)val, \ (void __iomem *)(reg_addr)) #define QED_COALESCE_MAX 0x1FF #define QED_DEFAULT_RX_USECS 12 #define QED_DEFAULT_TX_USECS 48 /* forward */ struct qed_dev; struct qed_eth_pf_params { /* The following parameters are used during HW-init * and these parameters need to be passed as arguments * to update_pf_params routine invoked before slowpath start */ u16 num_cons; /* per-VF number of CIDs */ u8 num_vf_cons; #define ETH_PF_PARAMS_VF_CONS_DEFAULT (32) /* To enable arfs, previous to HW-init a positive number needs to be * set [as filters require allocated searcher ILT memory]. * This will set the maximal number of configured steering-filters. */ u32 num_arfs_filters; }; struct qed_fcoe_pf_params { /* The following parameters are used during protocol-init */ u64 glbl_q_params_addr; u64 bdq_pbl_base_addr[2]; /* The following parameters are used during HW-init * and these parameters need to be passed as arguments * to update_pf_params routine invoked before slowpath start */ u16 num_cons; u16 num_tasks; /* The following parameters are used during protocol-init */ u16 sq_num_pbl_pages; u16 cq_num_entries; u16 cmdq_num_entries; u16 rq_buffer_log_size; u16 mtu; u16 dummy_icid; u16 bdq_xoff_threshold[2]; u16 bdq_xon_threshold[2]; u16 rq_buffer_size; u8 num_cqs; /* num of global CQs */ u8 log_page_size; u8 gl_rq_pi; u8 gl_cmd_pi; u8 debug_mode; u8 is_target; u8 bdq_pbl_num_entries[2]; }; /* Most of the the parameters below are described in the FW iSCSI / TCP HSI */ struct qed_iscsi_pf_params { u64 glbl_q_params_addr; u64 bdq_pbl_base_addr[3]; u16 cq_num_entries; u16 cmdq_num_entries; u32 two_msl_timer; u16 tx_sws_timer; /* The following parameters are used during HW-init * and these parameters need to be passed as arguments * to update_pf_params routine invoked before slowpath start */ u16 num_cons; u16 num_tasks; /* The following parameters are used during protocol-init */ u16 half_way_close_timeout; u16 bdq_xoff_threshold[3]; u16 bdq_xon_threshold[3]; u16 cmdq_xoff_threshold; u16 cmdq_xon_threshold; u16 rq_buffer_size; u8 num_sq_pages_in_ring; u8 num_r2tq_pages_in_ring; u8 num_uhq_pages_in_ring; u8 num_queues; u8 log_page_size; u8 rqe_log_size; u8 max_fin_rt; u8 gl_rq_pi; u8 gl_cmd_pi; u8 debug_mode; u8 ll2_ooo_queue_id; u8 is_target; u8 is_soc_en; u8 soc_num_of_blocks_log; u8 bdq_pbl_num_entries[3]; }; struct qed_rdma_pf_params { /* Supplied to QED during resource allocation (may affect the ILT and * the doorbell BAR). */ u32 min_dpis; /* number of requested DPIs */ u32 num_qps; /* number of requested Queue Pairs */ u32 num_srqs; /* number of requested SRQ */ u8 roce_edpm_mode; /* see QED_ROCE_EDPM_MODE_ENABLE */ u8 gl_pi; /* protocol index */ /* Will allocate rate limiters to be used with QPs */ u8 enable_dcqcn; }; struct qed_pf_params { struct qed_eth_pf_params eth_pf_params; struct qed_fcoe_pf_params fcoe_pf_params; struct qed_iscsi_pf_params iscsi_pf_params; struct qed_rdma_pf_params rdma_pf_params; }; enum qed_int_mode { QED_INT_MODE_INTA, QED_INT_MODE_MSIX, QED_INT_MODE_MSI, QED_INT_MODE_POLL, }; struct qed_sb_info { struct status_block_e4 *sb_virt; dma_addr_t sb_phys; u32 sb_ack; /* Last given ack */ u16 igu_sb_id; void __iomem *igu_addr; u8 flags; #define QED_SB_INFO_INIT 0x1 #define QED_SB_INFO_SETUP 0x2 struct qed_dev *cdev; }; enum qed_dev_type { QED_DEV_TYPE_BB, QED_DEV_TYPE_AH, }; struct qed_dev_info { unsigned long pci_mem_start; unsigned long pci_mem_end; unsigned int pci_irq; u8 num_hwfns; u8 hw_mac[ETH_ALEN]; /* FW version */ u16 fw_major; u16 fw_minor; u16 fw_rev; u16 fw_eng; /* MFW version */ u32 mfw_rev; #define QED_MFW_VERSION_0_MASK 0x000000FF #define QED_MFW_VERSION_0_OFFSET 0 #define QED_MFW_VERSION_1_MASK 0x0000FF00 #define QED_MFW_VERSION_1_OFFSET 8 #define QED_MFW_VERSION_2_MASK 0x00FF0000 #define QED_MFW_VERSION_2_OFFSET 16 #define QED_MFW_VERSION_3_MASK 0xFF000000 #define QED_MFW_VERSION_3_OFFSET 24 u32 flash_size; bool b_arfs_capable; bool b_inter_pf_switch; bool tx_switching; bool rdma_supported; u16 mtu; bool wol_support; bool smart_an; /* MBI version */ u32 mbi_version; #define QED_MBI_VERSION_0_MASK 0x000000FF #define QED_MBI_VERSION_0_OFFSET 0 #define QED_MBI_VERSION_1_MASK 0x0000FF00 #define QED_MBI_VERSION_1_OFFSET 8 #define QED_MBI_VERSION_2_MASK 0x00FF0000 #define QED_MBI_VERSION_2_OFFSET 16 enum qed_dev_type dev_type; /* Output parameters for qede */ bool vxlan_enable; bool gre_enable; bool geneve_enable; u8 abs_pf_id; }; enum qed_sb_type { QED_SB_TYPE_L2_QUEUE, QED_SB_TYPE_CNQ, QED_SB_TYPE_STORAGE, }; enum qed_protocol { QED_PROTOCOL_ETH, QED_PROTOCOL_ISCSI, QED_PROTOCOL_FCOE, }; enum qed_link_mode_bits { QED_LM_FIBRE_BIT = BIT(0), QED_LM_Autoneg_BIT = BIT(1), QED_LM_Asym_Pause_BIT = BIT(2), QED_LM_Pause_BIT = BIT(3), QED_LM_1000baseT_Full_BIT = BIT(4), QED_LM_10000baseT_Full_BIT = BIT(5), QED_LM_10000baseKR_Full_BIT = BIT(6), QED_LM_20000baseKR2_Full_BIT = BIT(7), QED_LM_25000baseKR_Full_BIT = BIT(8), QED_LM_40000baseLR4_Full_BIT = BIT(9), QED_LM_50000baseKR2_Full_BIT = BIT(10), QED_LM_100000baseKR4_Full_BIT = BIT(11), QED_LM_TP_BIT = BIT(12), QED_LM_Backplane_BIT = BIT(13), QED_LM_1000baseKX_Full_BIT = BIT(14), QED_LM_10000baseKX4_Full_BIT = BIT(15), QED_LM_10000baseR_FEC_BIT = BIT(16), QED_LM_40000baseKR4_Full_BIT = BIT(17), QED_LM_40000baseCR4_Full_BIT = BIT(18), QED_LM_40000baseSR4_Full_BIT = BIT(19), QED_LM_25000baseCR_Full_BIT = BIT(20), QED_LM_25000baseSR_Full_BIT = BIT(21), QED_LM_50000baseCR2_Full_BIT = BIT(22), QED_LM_100000baseSR4_Full_BIT = BIT(23), QED_LM_100000baseCR4_Full_BIT = BIT(24), QED_LM_100000baseLR4_ER4_Full_BIT = BIT(25), QED_LM_50000baseSR2_Full_BIT = BIT(26), QED_LM_1000baseX_Full_BIT = BIT(27), QED_LM_10000baseCR_Full_BIT = BIT(28), QED_LM_10000baseSR_Full_BIT = BIT(29), QED_LM_10000baseLR_Full_BIT = BIT(30), QED_LM_10000baseLRM_Full_BIT = BIT(31), QED_LM_COUNT = 32 }; struct qed_link_params { bool link_up; #define QED_LINK_OVERRIDE_SPEED_AUTONEG BIT(0) #define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS BIT(1) #define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2) #define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3) #define QED_LINK_OVERRIDE_LOOPBACK_MODE BIT(4) #define QED_LINK_OVERRIDE_EEE_CONFIG BIT(5) u32 override_flags; bool autoneg; u32 adv_speeds; u32 forced_speed; #define QED_LINK_PAUSE_AUTONEG_ENABLE BIT(0) #define QED_LINK_PAUSE_RX_ENABLE BIT(1) #define QED_LINK_PAUSE_TX_ENABLE BIT(2) u32 pause_config; #define QED_LINK_LOOPBACK_NONE BIT(0) #define QED_LINK_LOOPBACK_INT_PHY BIT(1) #define QED_LINK_LOOPBACK_EXT_PHY BIT(2) #define QED_LINK_LOOPBACK_EXT BIT(3) #define QED_LINK_LOOPBACK_MAC BIT(4) u32 loopback_mode; struct qed_link_eee_params eee; }; struct qed_link_output { bool link_up; /* In QED_LM_* defs */ u32 supported_caps; u32 advertised_caps; u32 lp_caps; u32 speed; /* In Mb/s */ u8 duplex; /* In DUPLEX defs */ u8 port; /* In PORT defs */ bool autoneg; u32 pause_config; /* EEE - capability & param */ bool eee_supported; bool eee_active; u8 sup_caps; struct qed_link_eee_params eee; }; struct qed_probe_params { enum qed_protocol protocol; u32 dp_module; u8 dp_level; bool is_vf; bool recov_in_prog; }; #define QED_DRV_VER_STR_SIZE 12 struct qed_slowpath_params { u32 int_mode; u8 drv_major; u8 drv_minor; u8 drv_rev; u8 drv_eng; u8 name[QED_DRV_VER_STR_SIZE]; }; #define ILT_PAGE_SIZE_TCFC 0x8000 /* 32KB */ struct qed_int_info { struct msix_entry *msix; u8 msix_cnt; /* This should be updated by the protocol driver */ u8 used_cnt; }; struct qed_generic_tlvs { #define QED_TLV_IP_CSUM BIT(0) #define QED_TLV_LSO BIT(1) u16 feat_flags; #define QED_TLV_MAC_COUNT 3 u8 mac[QED_TLV_MAC_COUNT][ETH_ALEN]; }; #define QED_I2C_DEV_ADDR_A0 0xA0 #define QED_I2C_DEV_ADDR_A2 0xA2 #define QED_NVM_SIGNATURE 0x12435687 enum qed_nvm_flash_cmd { QED_NVM_FLASH_CMD_FILE_DATA = 0x2, QED_NVM_FLASH_CMD_FILE_START = 0x3, QED_NVM_FLASH_CMD_NVM_CHANGE = 0x4, QED_NVM_FLASH_CMD_NVM_CFG_ID = 0x5, QED_NVM_FLASH_CMD_NVM_MAX, }; struct qed_common_cb_ops { void (*arfs_filter_op)(void *dev, void *fltr, u8 fw_rc); void (*link_update)(void *dev, struct qed_link_output *link); void (*schedule_recovery_handler)(void *dev); void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type); void (*get_generic_tlv_data)(void *dev, struct qed_generic_tlvs *data); void (*get_protocol_tlv_data)(void *dev, void *data); }; struct qed_selftest_ops { /** * @brief selftest_interrupt - Perform interrupt test * * @param cdev * * @return 0 on success, error otherwise. */ int (*selftest_interrupt)(struct qed_dev *cdev); /** * @brief selftest_memory - Perform memory test * * @param cdev * * @return 0 on success, error otherwise. */ int (*selftest_memory)(struct qed_dev *cdev); /** * @brief selftest_register - Perform register test * * @param cdev * * @return 0 on success, error otherwise. */ int (*selftest_register)(struct qed_dev *cdev); /** * @brief selftest_clock - Perform clock test * * @param cdev * * @return 0 on success, error otherwise. */ int (*selftest_clock)(struct qed_dev *cdev); /** * @brief selftest_nvram - Perform nvram test * * @param cdev * * @return 0 on success, error otherwise. */ int (*selftest_nvram) (struct qed_dev *cdev); }; struct qed_common_ops { struct qed_selftest_ops *selftest; struct qed_dev* (*probe)(struct pci_dev *dev, struct qed_probe_params *params); void (*remove)(struct qed_dev *cdev); int (*set_power_state)(struct qed_dev *cdev, pci_power_t state); void (*set_name) (struct qed_dev *cdev, char name[]); /* Client drivers need to make this call before slowpath_start. * PF params required for the call before slowpath_start is * documented within the qed_pf_params structure definition. */ void (*update_pf_params)(struct qed_dev *cdev, struct qed_pf_params *params); int (*slowpath_start)(struct qed_dev *cdev, struct qed_slowpath_params *params); int (*slowpath_stop)(struct qed_dev *cdev); /* Requests to use `cnt' interrupts for fastpath. * upon success, returns number of interrupts allocated for fastpath. */ int (*set_fp_int)(struct qed_dev *cdev, u16 cnt); /* Fills `info' with pointers required for utilizing interrupts */ int (*get_fp_int)(struct qed_dev *cdev, struct qed_int_info *info); u32 (*sb_init)(struct qed_dev *cdev, struct qed_sb_info *sb_info, void *sb_virt_addr, dma_addr_t sb_phy_addr, u16 sb_id, enum qed_sb_type type); u32 (*sb_release)(struct qed_dev *cdev, struct qed_sb_info *sb_info, u16 sb_id, enum qed_sb_type type); void (*simd_handler_config)(struct qed_dev *cdev, void *token, int index, void (*handler)(void *)); void (*simd_handler_clean)(struct qed_dev *cdev, int index); int (*dbg_grc)(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes); int (*dbg_grc_size)(struct qed_dev *cdev); int (*dbg_all_data) (struct qed_dev *cdev, void *buffer); int (*dbg_all_data_size) (struct qed_dev *cdev); /** * @brief can_link_change - can the instance change the link or not * * @param cdev * * @return true if link-change is allowed, false otherwise. */ bool (*can_link_change)(struct qed_dev *cdev); /** * @brief set_link - set links according to params * * @param cdev * @param params - values used to override the default link configuration * * @return 0 on success, error otherwise. */ int (*set_link)(struct qed_dev *cdev, struct qed_link_params *params); /** * @brief get_link - returns the current link state. * * @param cdev * @param if_link - structure to be filled with current link configuration. */ void (*get_link)(struct qed_dev *cdev, struct qed_link_output *if_link); /** * @brief - drains chip in case Tx completions fail to arrive due to pause. * * @param cdev */ int (*drain)(struct qed_dev *cdev); /** * @brief update_msglvl - update module debug level * * @param cdev * @param dp_module * @param dp_level */ void (*update_msglvl)(struct qed_dev *cdev, u32 dp_module, u8 dp_level); int (*chain_alloc)(struct qed_dev *cdev, enum qed_chain_use_mode intended_use, enum qed_chain_mode mode, enum qed_chain_cnt_type cnt_type, u32 num_elems, size_t elem_size, struct qed_chain *p_chain, struct qed_chain_ext_pbl *ext_pbl); void (*chain_free)(struct qed_dev *cdev, struct qed_chain *p_chain); /** * @brief nvm_flash - Flash nvm data. * * @param cdev * @param name - file containing the data * * @return 0 on success, error otherwise. */ int (*nvm_flash)(struct qed_dev *cdev, const char *name); /** * @brief nvm_get_image - reads an entire image from nvram * * @param cdev * @param type - type of the request nvram image * @param buf - preallocated buffer to fill with the image * @param len - length of the allocated buffer * * @return 0 on success, error otherwise */ int (*nvm_get_image)(struct qed_dev *cdev, enum qed_nvm_images type, u8 *buf, u16 len); /** * @brief set_coalesce - Configure Rx coalesce value in usec * * @param cdev * @param rx_coal - Rx coalesce value in usec * @param tx_coal - Tx coalesce value in usec * @param qid - Queue index * @param sb_id - Status Block Id * * @return 0 on success, error otherwise. */ int (*set_coalesce)(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, void *handle); /** * @brief set_led - Configure LED mode * * @param cdev * @param mode - LED mode * * @return 0 on success, error otherwise. */ int (*set_led)(struct qed_dev *cdev, enum qed_led_mode mode); /** * @brief db_recovery_add - add doorbell information to the doorbell * recovery mechanism. * * @param cdev * @param db_addr - doorbell address * @param db_data - address of where db_data is stored * @param db_is_32b - doorbell is 32b pr 64b * @param db_is_user - doorbell recovery addresses are user or kernel space */ int (*db_recovery_add)(struct qed_dev *cdev, void __iomem *db_addr, void *db_data, enum qed_db_rec_width db_width, enum qed_db_rec_space db_space); /** * @brief db_recovery_del - remove doorbell information from the doorbell * recovery mechanism. db_data serves as key (db_addr is not unique). * * @param cdev * @param db_addr - doorbell address * @param db_data - address where db_data is stored. Serves as key for the * entry to delete. */ int (*db_recovery_del)(struct qed_dev *cdev, void __iomem *db_addr, void *db_data); /** * @brief recovery_process - Trigger a recovery process * * @param cdev * * @return 0 on success, error otherwise. */ int (*recovery_process)(struct qed_dev *cdev); /** * @brief recovery_prolog - Execute the prolog operations of a recovery process * * @param cdev * * @return 0 on success, error otherwise. */ int (*recovery_prolog)(struct qed_dev *cdev); /** * @brief update_drv_state - API to inform the change in the driver state. * * @param cdev * @param active * */ int (*update_drv_state)(struct qed_dev *cdev, bool active); /** * @brief update_mac - API to inform the change in the mac address * * @param cdev * @param mac * */ int (*update_mac)(struct qed_dev *cdev, u8 *mac); /** * @brief update_mtu - API to inform the change in the mtu * * @param cdev * @param mtu * */ int (*update_mtu)(struct qed_dev *cdev, u16 mtu); /** * @brief update_wol - update of changes in the WoL configuration * * @param cdev * @param enabled - true iff WoL should be enabled. */ int (*update_wol) (struct qed_dev *cdev, bool enabled); /** * @brief read_module_eeprom * * @param cdev * @param buf - buffer * @param dev_addr - PHY device memory region * @param offset - offset into eeprom contents to be read * @param len - buffer length, i.e., max bytes to be read */ int (*read_module_eeprom)(struct qed_dev *cdev, char *buf, u8 dev_addr, u32 offset, u32 len); /** * @brief get_affin_hwfn_idx * * @param cdev */ u8 (*get_affin_hwfn_idx)(struct qed_dev *cdev); /** * @brief read_nvm_cfg - Read NVM config attribute value. * @param cdev * @param buf - buffer * @param cmd - NVM CFG command id * @param entity_id - Entity id * */ int (*read_nvm_cfg)(struct qed_dev *cdev, u8 **buf, u32 cmd, u32 entity_id); /** * @brief read_nvm_cfg - Read NVM config attribute value. * @param cdev * @param cmd - NVM CFG command id * * @return config id length, 0 on error. */ int (*read_nvm_cfg_len)(struct qed_dev *cdev, u32 cmd); /** * @brief set_grc_config - Configure value for grc config id. * @param cdev * @param cfg_id - grc config id * @param val - grc config value * */ int (*set_grc_config)(struct qed_dev *cdev, u32 cfg_id, u32 val); }; #define MASK_FIELD(_name, _value) \ ((_value) &= (_name ## _MASK)) #define FIELD_VALUE(_name, _value) \ ((_value & _name ## _MASK) << _name ## _SHIFT) #define SET_FIELD(value, name, flag) \ do { \ (value) &= ~(name ## _MASK << name ## _SHIFT); \ (value) |= (((u64)flag) << (name ## _SHIFT)); \ } while (0) #define GET_FIELD(value, name) \ (((value) >> (name ## _SHIFT)) & name ## _MASK) /* Debug print definitions */ #define DP_ERR(cdev, fmt, ...) \ do { \ pr_err("[%s:%d(%s)]" fmt, \ __func__, __LINE__, \ DP_NAME(cdev) ? DP_NAME(cdev) : "", \ ## __VA_ARGS__); \ } while (0) #define DP_NOTICE(cdev, fmt, ...) \ do { \ if (unlikely((cdev)->dp_level <= QED_LEVEL_NOTICE)) { \ pr_notice("[%s:%d(%s)]" fmt, \ __func__, __LINE__, \ DP_NAME(cdev) ? DP_NAME(cdev) : "", \ ## __VA_ARGS__); \ \ } \ } while (0) #define DP_INFO(cdev, fmt, ...) \ do { \ if (unlikely((cdev)->dp_level <= QED_LEVEL_INFO)) { \ pr_notice("[%s:%d(%s)]" fmt, \ __func__, __LINE__, \ DP_NAME(cdev) ? DP_NAME(cdev) : "", \ ## __VA_ARGS__); \ } \ } while (0) #define DP_VERBOSE(cdev, module, fmt, ...) \ do { \ if (unlikely(((cdev)->dp_level <= QED_LEVEL_VERBOSE) && \ ((cdev)->dp_module & module))) { \ pr_notice("[%s:%d(%s)]" fmt, \ __func__, __LINE__, \ DP_NAME(cdev) ? DP_NAME(cdev) : "", \ ## __VA_ARGS__); \ } \ } while (0) enum DP_LEVEL { QED_LEVEL_VERBOSE = 0x0, QED_LEVEL_INFO = 0x1, QED_LEVEL_NOTICE = 0x2, QED_LEVEL_ERR = 0x3, }; #define QED_LOG_LEVEL_SHIFT (30) #define QED_LOG_VERBOSE_MASK (0x3fffffff) #define QED_LOG_INFO_MASK (0x40000000) #define QED_LOG_NOTICE_MASK (0x80000000) enum DP_MODULE { QED_MSG_SPQ = 0x10000, QED_MSG_STATS = 0x20000, QED_MSG_DCB = 0x40000, QED_MSG_IOV = 0x80000, QED_MSG_SP = 0x100000, QED_MSG_STORAGE = 0x200000, QED_MSG_CXT = 0x800000, QED_MSG_LL2 = 0x1000000, QED_MSG_ILT = 0x2000000, QED_MSG_RDMA = 0x4000000, QED_MSG_DEBUG = 0x8000000, /* to be added...up to 0x8000000 */ }; enum qed_mf_mode { QED_MF_DEFAULT, QED_MF_OVLAN, QED_MF_NPAR, }; struct qed_eth_stats_common { u64 no_buff_discards; u64 packet_too_big_discard; u64 ttl0_discard; u64 rx_ucast_bytes; u64 rx_mcast_bytes; u64 rx_bcast_bytes; u64 rx_ucast_pkts; u64 rx_mcast_pkts; u64 rx_bcast_pkts; u64 mftag_filter_discards; u64 mac_filter_discards; u64 gft_filter_drop; u64 tx_ucast_bytes; u64 tx_mcast_bytes; u64 tx_bcast_bytes; u64 tx_ucast_pkts; u64 tx_mcast_pkts; u64 tx_bcast_pkts; u64 tx_err_drop_pkts; u64 tpa_coalesced_pkts; u64 tpa_coalesced_events; u64 tpa_aborts_num; u64 tpa_not_coalesced_pkts; u64 tpa_coalesced_bytes; /* port */ u64 rx_64_byte_packets; u64 rx_65_to_127_byte_packets; u64 rx_128_to_255_byte_packets; u64 rx_256_to_511_byte_packets; u64 rx_512_to_1023_byte_packets; u64 rx_1024_to_1518_byte_packets; u64 rx_crc_errors; u64 rx_mac_crtl_frames; u64 rx_pause_frames; u64 rx_pfc_frames; u64 rx_align_errors; u64 rx_carrier_errors; u64 rx_oversize_packets; u64 rx_jabbers; u64 rx_undersize_packets; u64 rx_fragments; u64 tx_64_byte_packets; u64 tx_65_to_127_byte_packets; u64 tx_128_to_255_byte_packets; u64 tx_256_to_511_byte_packets; u64 tx_512_to_1023_byte_packets; u64 tx_1024_to_1518_byte_packets; u64 tx_pause_frames; u64 tx_pfc_frames; u64 brb_truncates; u64 brb_discards; u64 rx_mac_bytes; u64 rx_mac_uc_packets; u64 rx_mac_mc_packets; u64 rx_mac_bc_packets; u64 rx_mac_frames_ok; u64 tx_mac_bytes; u64 tx_mac_uc_packets; u64 tx_mac_mc_packets; u64 tx_mac_bc_packets; u64 tx_mac_ctrl_frames; u64 link_change_count; }; struct qed_eth_stats_bb { u64 rx_1519_to_1522_byte_packets; u64 rx_1519_to_2047_byte_packets; u64 rx_2048_to_4095_byte_packets; u64 rx_4096_to_9216_byte_packets; u64 rx_9217_to_16383_byte_packets; u64 tx_1519_to_2047_byte_packets; u64 tx_2048_to_4095_byte_packets; u64 tx_4096_to_9216_byte_packets; u64 tx_9217_to_16383_byte_packets; u64 tx_lpi_entry_count; u64 tx_total_collisions; }; struct qed_eth_stats_ah { u64 rx_1519_to_max_byte_packets; u64 tx_1519_to_max_byte_packets; }; struct qed_eth_stats { struct qed_eth_stats_common common; union { struct qed_eth_stats_bb bb; struct qed_eth_stats_ah ah; }; }; #define QED_SB_IDX 0x0002 #define RX_PI 0 #define TX_PI(tc) (RX_PI + 1 + tc) struct qed_sb_cnt_info { /* Original, current, and free SBs for PF */ int orig; int cnt; int free_cnt; /* Original, current and free SBS for child VFs */ int iov_orig; int iov_cnt; int free_cnt_iov; }; static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info) { u32 prod = 0; u16 rc = 0; prod = le32_to_cpu(sb_info->sb_virt->prod_index) & STATUS_BLOCK_E4_PROD_INDEX_MASK; if (sb_info->sb_ack != prod) { sb_info->sb_ack = prod; rc |= QED_SB_IDX; } /* Let SB update */ return rc; } /** * * @brief This function creates an update command for interrupts that is * written to the IGU. * * @param sb_info - This is the structure allocated and * initialized per status block. Assumption is * that it was initialized using qed_sb_init * @param int_cmd - Enable/Disable/Nop * @param upd_flg - whether igu consumer should be * updated. * * @return inline void */ static inline void qed_sb_ack(struct qed_sb_info *sb_info, enum igu_int_cmd int_cmd, u8 upd_flg) { struct igu_prod_cons_update igu_ack = { 0 }; igu_ack.sb_id_and_flags = ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | (IGU_SEG_ACCESS_REG << IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); DIRECT_REG_WR(sb_info->igu_addr, igu_ack.sb_id_and_flags); /* Both segments (interrupts & acks) are written to same place address; * Need to guarantee all commands will be received (in-order) by HW. */ barrier(); } static inline void __internal_ram_wr(void *p_hwfn, void __iomem *addr, int size, u32 *data) { unsigned int i; for (i = 0; i < size / sizeof(*data); i++) DIRECT_REG_WR(&((u32 __iomem *)addr)[i], data[i]); } static inline void internal_ram_wr(void __iomem *addr, int size, u32 *data) { __internal_ram_wr(NULL, addr, size, data); } enum qed_rss_caps { QED_RSS_IPV4 = 0x1, QED_RSS_IPV6 = 0x2, QED_RSS_IPV4_TCP = 0x4, QED_RSS_IPV6_TCP = 0x8, QED_RSS_IPV4_UDP = 0x10, QED_RSS_IPV6_UDP = 0x20, }; #define QED_RSS_IND_TABLE_SIZE 128 #define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */ #endif qed/roce_common.h 0000644 00000004716 14722070374 0010004 0 ustar 00 /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and /or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef __ROCE_COMMON__ #define __ROCE_COMMON__ /************************/ /* ROCE FW CONSTANTS */ /************************/ #define ROCE_REQ_MAX_INLINE_DATA_SIZE (256) #define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288) #define ROCE_MAX_QPS (32 * 1024) #define ROCE_DCQCN_NP_MAX_QPS (64) #define ROCE_DCQCN_RP_MAX_QPS (64) #define ROCE_LKEY_MW_DIF_EN_BIT (28) /* Affiliated asynchronous events / errors enumeration */ enum roce_async_events_type { ROCE_ASYNC_EVENT_NONE = 0, ROCE_ASYNC_EVENT_COMM_EST = 1, ROCE_ASYNC_EVENT_SQ_DRAINED, ROCE_ASYNC_EVENT_SRQ_LIMIT, ROCE_ASYNC_EVENT_LAST_WQE_REACHED, ROCE_ASYNC_EVENT_CQ_ERR, ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR, ROCE_ASYNC_EVENT_LOCAL_CATASTROPHIC_ERR, ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR, ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR, ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR, ROCE_ASYNC_EVENT_SRQ_EMPTY, ROCE_ASYNC_EVENT_DESTROY_QP_DONE, ROCE_ASYNC_EVENT_XRC_DOMAIN_ERR, ROCE_ASYNC_EVENT_INVALID_XRCETH_ERR, ROCE_ASYNC_EVENT_XRC_SRQ_CATASTROPHIC_ERR, MAX_ROCE_ASYNC_EVENTS_TYPE }; #endif /* __ROCE_COMMON__ */ qed/qed_iscsi_if.h 0000644 00000016221 14722070374 0010117 0 ustar 00 /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and /or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef _QED_ISCSI_IF_H #define _QED_ISCSI_IF_H #include <linux/types.h> #include <linux/qed/qed_if.h> typedef int (*iscsi_event_cb_t) (void *context, u8 fw_event_code, void *fw_handle); struct qed_iscsi_stats { u64 iscsi_rx_bytes_cnt; u64 iscsi_rx_packet_cnt; u64 iscsi_rx_new_ooo_isle_events_cnt; u32 iscsi_cmdq_threshold_cnt; u32 iscsi_rq_threshold_cnt; u32 iscsi_immq_threshold_cnt; u64 iscsi_rx_dropped_pdus_task_not_valid; u64 iscsi_rx_data_pdu_cnt; u64 iscsi_rx_r2t_pdu_cnt; u64 iscsi_rx_total_pdu_cnt; u64 iscsi_tx_go_to_slow_start_event_cnt; u64 iscsi_tx_fast_retransmit_event_cnt; u64 iscsi_tx_data_pdu_cnt; u64 iscsi_tx_r2t_pdu_cnt; u64 iscsi_tx_total_pdu_cnt; u64 iscsi_tx_bytes_cnt; u64 iscsi_tx_packet_cnt; }; struct qed_dev_iscsi_info { struct qed_dev_info common; void __iomem *primary_dbq_rq_addr; void __iomem *secondary_bdq_rq_addr; u8 num_cqs; }; struct qed_iscsi_id_params { u8 mac[ETH_ALEN]; u32 ip[4]; u16 port; }; struct qed_iscsi_params_offload { u8 layer_code; dma_addr_t sq_pbl_addr; u32 initial_ack; struct qed_iscsi_id_params src; struct qed_iscsi_id_params dst; u16 vlan_id; u8 tcp_flags; u8 ip_version; u8 default_cq; u8 ka_max_probe_cnt; u8 dup_ack_theshold; u32 rcv_next; u32 snd_una; u32 snd_next; u32 snd_max; u32 snd_wnd; u32 rcv_wnd; u32 snd_wl1; u32 cwnd; u32 ss_thresh; u16 srtt; u16 rtt_var; u32 ts_recent; u32 ts_recent_age; u32 total_rt; u32 ka_timeout_delta; u32 rt_timeout_delta; u8 dup_ack_cnt; u8 snd_wnd_probe_cnt; u8 ka_probe_cnt; u8 rt_cnt; u32 flow_label; u32 ka_timeout; u32 ka_interval; u32 max_rt_time; u32 initial_rcv_wnd; u8 ttl; u8 tos_or_tc; u16 remote_port; u16 local_port; u16 mss; u8 snd_wnd_scale; u8 rcv_wnd_scale; u16 da_timeout_value; u8 ack_frequency; }; struct qed_iscsi_params_update { u8 update_flag; #define QED_ISCSI_CONN_HD_EN BIT(0) #define QED_ISCSI_CONN_DD_EN BIT(1) #define QED_ISCSI_CONN_INITIAL_R2T BIT(2) #define QED_ISCSI_CONN_IMMEDIATE_DATA BIT(3) u32 max_seq_size; u32 max_recv_pdu_length; u32 max_send_pdu_length; u32 first_seq_length; u32 exp_stat_sn; }; #define MAX_TID_BLOCKS_ISCSI (512) struct qed_iscsi_tid { u32 size; /* In bytes per task */ u32 num_tids_per_block; u8 *blocks[MAX_TID_BLOCKS_ISCSI]; }; struct qed_iscsi_cb_ops { struct qed_common_cb_ops common; }; /** * struct qed_iscsi_ops - qed iSCSI operations. * @common: common operations pointer * @ll2: light L2 operations pointer * @fill_dev_info: fills iSCSI specific information * @param cdev * @param info * @return 0 on sucesss, otherwise error value. * @register_ops: register iscsi operations * @param cdev * @param ops - specified using qed_iscsi_cb_ops * @param cookie - driver private * @start: iscsi in FW * @param cdev * @param tasks - qed will fill information about tasks * return 0 on success, otherwise error value. * @stop: iscsi in FW * @param cdev * return 0 on success, otherwise error value. * @acquire_conn: acquire a new iscsi connection * @param cdev * @param handle - qed will fill handle that should be * used henceforth as identifier of the * connection. * @param p_doorbell - qed will fill the address of the * doorbell. * @return 0 on sucesss, otherwise error value. * @release_conn: release a previously acquired iscsi connection * @param cdev * @param handle - the connection handle. * @return 0 on success, otherwise error value. * @offload_conn: configures an offloaded connection * @param cdev * @param handle - the connection handle. * @param conn_info - the configuration to use for the * offload. * @return 0 on success, otherwise error value. * @update_conn: updates an offloaded connection * @param cdev * @param handle - the connection handle. * @param conn_info - the configuration to use for the * offload. * @return 0 on success, otherwise error value. * @destroy_conn: stops an offloaded connection * @param cdev * @param handle - the connection handle. * @return 0 on success, otherwise error value. * @clear_sq: clear all task in sq * @param cdev * @param handle - the connection handle. * @return 0 on success, otherwise error value. * @get_stats: iSCSI related statistics * @param cdev * @param stats - pointer to struck that would be filled * we stats * @return 0 on success, error otherwise. * @change_mac Change MAC of interface * @param cdev * @param handle - the connection handle. * @param mac - new MAC to configure. * @return 0 on success, otherwise error value. */ struct qed_iscsi_ops { const struct qed_common_ops *common; const struct qed_ll2_ops *ll2; int (*fill_dev_info)(struct qed_dev *cdev, struct qed_dev_iscsi_info *info); void (*register_ops)(struct qed_dev *cdev, struct qed_iscsi_cb_ops *ops, void *cookie); int (*start)(struct qed_dev *cdev, struct qed_iscsi_tid *tasks, void *event_context, iscsi_event_cb_t async_event_cb); int (*stop)(struct qed_dev *cdev); int (*acquire_conn)(struct qed_dev *cdev, u32 *handle, u32 *fw_cid, void __iomem **p_doorbell); int (*release_conn)(struct qed_dev *cdev, u32 handle); int (*offload_conn)(struct qed_dev *cdev, u32 handle, struct qed_iscsi_params_offload *conn_info); int (*update_conn)(struct qed_dev *cdev, u32 handle, struct qed_iscsi_params_update *conn_info); int (*destroy_conn)(struct qed_dev *cdev, u32 handle, u8 abrt_conn); int (*clear_sq)(struct qed_dev *cdev, u32 handle); int (*get_stats)(struct qed_dev *cdev, struct qed_iscsi_stats *stats); int (*change_mac)(struct qed_dev *cdev, u32 handle, const u8 *mac); }; const struct qed_iscsi_ops *qed_get_iscsi_ops(void); void qed_put_iscsi_ops(void); #endif qed/qed_eth_if.h 0000644 00000024546 14722070374 0007576 0 ustar 00 /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and /or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef _QED_ETH_IF_H #define _QED_ETH_IF_H #include <linux/list.h> #include <linux/if_link.h> #include <linux/qed/eth_common.h> #include <linux/qed/qed_if.h> #include <linux/qed/qed_iov_if.h> /* 64 max queues * (1 rx + 4 tx-cos + 1 xdp) */ #define QED_MIN_L2_CONS (2 + NUM_PHYS_TCS_4PORT_K2) #define QED_MAX_L2_CONS (64 * (QED_MIN_L2_CONS)) struct qed_queue_start_common_params { /* Should always be relative to entity sending this. */ u8 vport_id; u16 queue_id; /* Relative, but relevant only for PFs */ u8 stats_id; struct qed_sb_info *p_sb; u8 sb_idx; u8 tc; }; struct qed_rxq_start_ret_params { void __iomem *p_prod; void *p_handle; }; struct qed_txq_start_ret_params { void __iomem *p_doorbell; void *p_handle; }; enum qed_filter_config_mode { QED_FILTER_CONFIG_MODE_DISABLE, QED_FILTER_CONFIG_MODE_5_TUPLE, QED_FILTER_CONFIG_MODE_L4_PORT, QED_FILTER_CONFIG_MODE_IP_DEST, QED_FILTER_CONFIG_MODE_IP_SRC, }; struct qed_ntuple_filter_params { /* Physically mapped address containing header of buffer to be used * as filter. */ dma_addr_t addr; /* Length of header in bytes */ u16 length; /* Relative queue-id to receive classified packet */ #define QED_RFS_NTUPLE_QID_RSS ((u16)-1) u16 qid; /* Identifier can either be according to vport-id or vfid */ bool b_is_vf; u8 vport_id; u8 vf_id; /* true iff this filter is to be added. Else to be removed */ bool b_is_add; /* If flow needs to be dropped */ bool b_is_drop; }; struct qed_dev_eth_info { struct qed_dev_info common; u8 num_queues; u8 num_tc; u8 port_mac[ETH_ALEN]; u16 num_vlan_filters; u16 num_mac_filters; /* Legacy VF - this affects the datapath, so qede has to know */ bool is_legacy; /* Might depend on available resources [in case of VF] */ bool xdp_supported; }; struct qed_update_vport_rss_params { void *rss_ind_table[128]; u32 rss_key[10]; u8 rss_caps; }; struct qed_update_vport_params { u8 vport_id; u8 update_vport_active_flg; u8 vport_active_flg; u8 update_tx_switching_flg; u8 tx_switching_flg; u8 update_accept_any_vlan_flg; u8 accept_any_vlan; u8 update_rss_flg; struct qed_update_vport_rss_params rss_params; }; struct qed_start_vport_params { bool remove_inner_vlan; bool handle_ptp_pkts; bool gro_enable; bool drop_ttl0; u8 vport_id; u16 mtu; bool clear_stats; }; enum qed_filter_rx_mode_type { QED_FILTER_RX_MODE_TYPE_REGULAR, QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC, QED_FILTER_RX_MODE_TYPE_PROMISC, }; enum qed_filter_xcast_params_type { QED_FILTER_XCAST_TYPE_ADD, QED_FILTER_XCAST_TYPE_DEL, QED_FILTER_XCAST_TYPE_REPLACE, }; struct qed_filter_ucast_params { enum qed_filter_xcast_params_type type; u8 vlan_valid; u16 vlan; u8 mac_valid; unsigned char mac[ETH_ALEN]; }; struct qed_filter_mcast_params { enum qed_filter_xcast_params_type type; u8 num; unsigned char mac[64][ETH_ALEN]; }; enum qed_filter_type { QED_FILTER_TYPE_UCAST, QED_FILTER_TYPE_MCAST, QED_FILTER_TYPE_RX_MODE, QED_MAX_FILTER_TYPES, }; struct qed_tunn_params { u16 vxlan_port; u8 update_vxlan_port; u16 geneve_port; u8 update_geneve_port; }; struct qed_eth_cb_ops { struct qed_common_cb_ops common; void (*force_mac) (void *dev, u8 *mac, bool forced); void (*ports_update)(void *dev, u16 vxlan_port, u16 geneve_port); }; #define QED_MAX_PHC_DRIFT_PPB 291666666 enum qed_ptp_filter_type { QED_PTP_FILTER_NONE, QED_PTP_FILTER_ALL, QED_PTP_FILTER_V1_L4_EVENT, QED_PTP_FILTER_V1_L4_GEN, QED_PTP_FILTER_V2_L4_EVENT, QED_PTP_FILTER_V2_L4_GEN, QED_PTP_FILTER_V2_L2_EVENT, QED_PTP_FILTER_V2_L2_GEN, QED_PTP_FILTER_V2_EVENT, QED_PTP_FILTER_V2_GEN }; enum qed_ptp_hwtstamp_tx_type { QED_PTP_HWTSTAMP_TX_OFF, QED_PTP_HWTSTAMP_TX_ON, }; #ifdef CONFIG_DCB /* Prototype declaration of qed_eth_dcbnl_ops should match with the declaration * of dcbnl_rtnl_ops structure. */ struct qed_eth_dcbnl_ops { /* IEEE 802.1Qaz std */ int (*ieee_getpfc)(struct qed_dev *cdev, struct ieee_pfc *pfc); int (*ieee_setpfc)(struct qed_dev *cdev, struct ieee_pfc *pfc); int (*ieee_getets)(struct qed_dev *cdev, struct ieee_ets *ets); int (*ieee_setets)(struct qed_dev *cdev, struct ieee_ets *ets); int (*ieee_peer_getets)(struct qed_dev *cdev, struct ieee_ets *ets); int (*ieee_peer_getpfc)(struct qed_dev *cdev, struct ieee_pfc *pfc); int (*ieee_getapp)(struct qed_dev *cdev, struct dcb_app *app); int (*ieee_setapp)(struct qed_dev *cdev, struct dcb_app *app); /* CEE std */ u8 (*getstate)(struct qed_dev *cdev); u8 (*setstate)(struct qed_dev *cdev, u8 state); void (*getpgtccfgtx)(struct qed_dev *cdev, int prio, u8 *prio_type, u8 *pgid, u8 *bw_pct, u8 *up_map); void (*getpgbwgcfgtx)(struct qed_dev *cdev, int pgid, u8 *bw_pct); void (*getpgtccfgrx)(struct qed_dev *cdev, int prio, u8 *prio_type, u8 *pgid, u8 *bw_pct, u8 *up_map); void (*getpgbwgcfgrx)(struct qed_dev *cdev, int pgid, u8 *bw_pct); void (*getpfccfg)(struct qed_dev *cdev, int prio, u8 *setting); void (*setpfccfg)(struct qed_dev *cdev, int prio, u8 setting); u8 (*getcap)(struct qed_dev *cdev, int capid, u8 *cap); int (*getnumtcs)(struct qed_dev *cdev, int tcid, u8 *num); u8 (*getpfcstate)(struct qed_dev *cdev); int (*getapp)(struct qed_dev *cdev, u8 idtype, u16 id); u8 (*getfeatcfg)(struct qed_dev *cdev, int featid, u8 *flags); /* DCBX configuration */ u8 (*getdcbx)(struct qed_dev *cdev); void (*setpgtccfgtx)(struct qed_dev *cdev, int prio, u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map); void (*setpgtccfgrx)(struct qed_dev *cdev, int prio, u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map); void (*setpgbwgcfgtx)(struct qed_dev *cdev, int pgid, u8 bw_pct); void (*setpgbwgcfgrx)(struct qed_dev *cdev, int pgid, u8 bw_pct); u8 (*setall)(struct qed_dev *cdev); int (*setnumtcs)(struct qed_dev *cdev, int tcid, u8 num); void (*setpfcstate)(struct qed_dev *cdev, u8 state); int (*setapp)(struct qed_dev *cdev, u8 idtype, u16 idval, u8 up); u8 (*setdcbx)(struct qed_dev *cdev, u8 state); u8 (*setfeatcfg)(struct qed_dev *cdev, int featid, u8 flags); /* Peer apps */ int (*peer_getappinfo)(struct qed_dev *cdev, struct dcb_peer_app_info *info, u16 *app_count); int (*peer_getapptable)(struct qed_dev *cdev, struct dcb_app *table); /* CEE peer */ int (*cee_peer_getpfc)(struct qed_dev *cdev, struct cee_pfc *pfc); int (*cee_peer_getpg)(struct qed_dev *cdev, struct cee_pg *pg); }; #endif struct qed_eth_ptp_ops { int (*cfg_filters)(struct qed_dev *, enum qed_ptp_filter_type, enum qed_ptp_hwtstamp_tx_type); int (*read_rx_ts)(struct qed_dev *, u64 *); int (*read_tx_ts)(struct qed_dev *, u64 *); int (*read_cc)(struct qed_dev *, u64 *); int (*disable)(struct qed_dev *); int (*adjfreq)(struct qed_dev *, s32); int (*enable)(struct qed_dev *); }; struct qed_eth_ops { const struct qed_common_ops *common; #ifdef CONFIG_QED_SRIOV const struct qed_iov_hv_ops *iov; #endif #ifdef CONFIG_DCB const struct qed_eth_dcbnl_ops *dcb; #endif const struct qed_eth_ptp_ops *ptp; int (*fill_dev_info)(struct qed_dev *cdev, struct qed_dev_eth_info *info); void (*register_ops)(struct qed_dev *cdev, struct qed_eth_cb_ops *ops, void *cookie); bool(*check_mac) (struct qed_dev *cdev, u8 *mac); int (*vport_start)(struct qed_dev *cdev, struct qed_start_vport_params *params); int (*vport_stop)(struct qed_dev *cdev, u8 vport_id); int (*vport_update)(struct qed_dev *cdev, struct qed_update_vport_params *params); int (*q_rx_start)(struct qed_dev *cdev, u8 rss_num, struct qed_queue_start_common_params *params, u16 bd_max_bytes, dma_addr_t bd_chain_phys_addr, dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size, struct qed_rxq_start_ret_params *ret_params); int (*q_rx_stop)(struct qed_dev *cdev, u8 rss_id, void *handle); int (*q_tx_start)(struct qed_dev *cdev, u8 rss_num, struct qed_queue_start_common_params *params, dma_addr_t pbl_addr, u16 pbl_size, struct qed_txq_start_ret_params *ret_params); int (*q_tx_stop)(struct qed_dev *cdev, u8 rss_id, void *handle); int (*filter_config_rx_mode)(struct qed_dev *cdev, enum qed_filter_rx_mode_type type); int (*filter_config_ucast)(struct qed_dev *cdev, struct qed_filter_ucast_params *params); int (*filter_config_mcast)(struct qed_dev *cdev, struct qed_filter_mcast_params *params); int (*fastpath_stop)(struct qed_dev *cdev); int (*eth_cqe_completion)(struct qed_dev *cdev, u8 rss_id, struct eth_slow_path_rx_cqe *cqe); void (*get_vport_stats)(struct qed_dev *cdev, struct qed_eth_stats *stats); int (*tunn_config)(struct qed_dev *cdev, struct qed_tunn_params *params); int (*ntuple_filter_config)(struct qed_dev *cdev, void *cookie, struct qed_ntuple_filter_params *params); int (*configure_arfs_searcher)(struct qed_dev *cdev, enum qed_filter_config_mode mode); int (*get_coalesce)(struct qed_dev *cdev, u16 *coal, void *handle); int (*req_bulletin_update_mac)(struct qed_dev *cdev, u8 *mac); }; const struct qed_eth_ops *qed_get_eth_ops(void); void qed_put_eth_ops(void); #endif qed/fcoe_common.h 0000644 00000061341 14722070374 0007765 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* QLogic qed NIC Driver * Copyright (c) 2015 QLogic Corporation */ #ifndef __FCOE_COMMON__ #define __FCOE_COMMON__ /*********************/ /* FCOE FW CONSTANTS */ /*********************/ #define FC_ABTS_REPLY_MAX_PAYLOAD_LEN 12 /* The fcoe storm task context protection-information of Ystorm */ struct protection_info_ctx { __le16 flags; #define PROTECTION_INFO_CTX_HOST_INTERFACE_MASK 0x3 #define PROTECTION_INFO_CTX_HOST_INTERFACE_SHIFT 0 #define PROTECTION_INFO_CTX_DIF_TO_PEER_MASK 0x1 #define PROTECTION_INFO_CTX_DIF_TO_PEER_SHIFT 2 #define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_MASK 0x1 #define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_SHIFT 3 #define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_MASK 0xF #define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_SHIFT 4 #define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1 #define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_SHIFT 8 #define PROTECTION_INFO_CTX_RESERVED0_MASK 0x7F #define PROTECTION_INFO_CTX_RESERVED0_SHIFT 9 u8 dix_block_size; u8 dst_size; }; /* The fcoe storm task context protection-information of Ystorm */ union protection_info_union_ctx { struct protection_info_ctx info; __le32 value; }; /* FCP CMD payload */ struct fcoe_fcp_cmd_payload { __le32 opaque[8]; }; /* FCP RSP payload */ struct fcoe_fcp_rsp_payload { __le32 opaque[6]; }; /* FCP RSP payload */ struct fcp_rsp_payload_padded { struct fcoe_fcp_rsp_payload rsp_payload; __le32 reserved[2]; }; /* FCP RSP payload */ struct fcoe_fcp_xfer_payload { __le32 opaque[3]; }; /* FCP RSP payload */ struct fcp_xfer_payload_padded { struct fcoe_fcp_xfer_payload xfer_payload; __le32 reserved[5]; }; /* Task params */ struct fcoe_tx_data_params { __le32 data_offset; __le32 offset_in_io; u8 flags; #define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_MASK 0x1 #define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_SHIFT 0 #define FCOE_TX_DATA_PARAMS_DROP_DATA_MASK 0x1 #define FCOE_TX_DATA_PARAMS_DROP_DATA_SHIFT 1 #define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_MASK 0x1 #define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_SHIFT 2 #define FCOE_TX_DATA_PARAMS_RESERVED0_MASK 0x1F #define FCOE_TX_DATA_PARAMS_RESERVED0_SHIFT 3 u8 dif_residual; __le16 seq_cnt; __le16 single_sge_saved_offset; __le16 next_dif_offset; __le16 seq_id; __le16 reserved3; }; /* Middle path parameters: FC header fields provided by the driver */ struct fcoe_tx_mid_path_params { __le32 parameter; u8 r_ctl; u8 type; u8 cs_ctl; u8 df_ctl; __le16 rx_id; __le16 ox_id; }; /* Task params */ struct fcoe_tx_params { struct fcoe_tx_data_params data; struct fcoe_tx_mid_path_params mid_path; }; /* Union of FCP CMD payload \ TX params \ ABTS \ Cleanup */ union fcoe_tx_info_union_ctx { struct fcoe_fcp_cmd_payload fcp_cmd_payload; struct fcp_rsp_payload_padded fcp_rsp_payload; struct fcp_xfer_payload_padded fcp_xfer_payload; struct fcoe_tx_params tx_params; }; /* Data sgl */ struct fcoe_slow_sgl_ctx { struct regpair base_sgl_addr; __le16 curr_sge_off; __le16 remainder_num_sges; __le16 curr_sgl_index; __le16 reserved; }; /* Union of DIX SGL \ cached DIX sges */ union fcoe_dix_desc_ctx { struct fcoe_slow_sgl_ctx dix_sgl; struct scsi_sge cached_dix_sge; }; /* The fcoe storm task context of Ystorm */ struct ystorm_fcoe_task_st_ctx { u8 task_type; u8 sgl_mode; #define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1 #define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 0 #define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK 0x7F #define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT 1 u8 cached_dix_sge; u8 expect_first_xfer; __le32 num_pbf_zero_write; union protection_info_union_ctx protection_info_union; __le32 data_2_trns_rem; struct scsi_sgl_params sgl_params; u8 reserved1[12]; union fcoe_tx_info_union_ctx tx_info_union; union fcoe_dix_desc_ctx dix_desc; struct scsi_cached_sges data_desc; __le16 ox_id; __le16 rx_id; __le32 task_rety_identifier; u8 reserved2[8]; }; struct e4_ystorm_fcoe_task_ag_ctx { u8 byte0; u8 byte1; __le16 word0; u8 flags0; #define E4_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF #define E4_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0 #define E4_YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1 #define E4_YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4 #define E4_YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 #define E4_YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 #define E4_YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 #define E4_YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6 #define E4_YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 #define E4_YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7 u8 flags1; #define E4_YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3 #define E4_YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0 #define E4_YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 #define E4_YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2 #define E4_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 #define E4_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 #define E4_YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1 #define E4_YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6 #define E4_YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 #define E4_YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7 u8 flags2; #define E4_YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1 #define E4_YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0 #define E4_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 #define E4_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1 #define E4_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 #define E4_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2 #define E4_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 #define E4_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3 #define E4_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 #define E4_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4 #define E4_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 #define E4_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5 #define E4_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 #define E4_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6 #define E4_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 #define E4_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7 u8 byte2; __le32 reg0; u8 byte3; u8 byte4; __le16 rx_id; __le16 word2; __le16 word3; __le16 word4; __le16 word5; __le32 reg1; __le32 reg2; }; struct e4_tstorm_fcoe_task_ag_ctx { u8 reserved; u8 byte1; __le16 icid; u8 flags0; #define E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF #define E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 #define E4_TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 #define E4_TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 #define E4_TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 #define E4_TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 #define E4_TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK 0x1 #define E4_TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT 6 #define E4_TSTORM_FCOE_TASK_AG_CTX_VALID_MASK 0x1 #define E4_TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT 7 u8 flags1; #define E4_TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK 0x1 #define E4_TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT 0 #define E4_TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK 0x1 #define E4_TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT 1 #define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK 0x3 #define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2 #define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK 0x3 #define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT 4 #define E4_TSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 #define E4_TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 6 u8 flags2; #define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK 0x3 #define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT 0 #define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3 #define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 2 #define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK 0x3 #define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT 4 #define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK 0x3 #define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT 6 u8 flags3; #define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK 0x3 #define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT 0 #define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK 0x1 #define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT 2 #define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK 0x1 #define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT 3 #define E4_TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 #define E4_TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 4 #define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 #define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 5 #define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1 #define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6 #define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK 0x1 #define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT 7 u8 flags4; #define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK 0x1 #define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0 #define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK 0x1 #define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT 1 #define E4_TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 #define E4_TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 2 #define E4_TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 #define E4_TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 3 #define E4_TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 #define E4_TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 4 #define E4_TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 #define E4_TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 5 #define E4_TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 #define E4_TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 6 #define E4_TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 #define E4_TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 7 u8 cleanup_state; __le16 last_sent_tid; __le32 rec_rr_tov_exp_timeout; u8 byte3; u8 byte4; __le16 word2; __le16 word3; __le16 word4; __le32 data_offset_end_of_seq; __le32 data_offset_next; }; /* Cached data sges */ struct fcoe_exp_ro { __le32 data_offset; __le32 reserved; }; /* Union of Cleanup address \ expected relative offsets */ union fcoe_cleanup_addr_exp_ro_union { struct regpair abts_rsp_fc_payload_hi; struct fcoe_exp_ro exp_ro; }; /* Fields coppied from ABTSrsp pckt */ struct fcoe_abts_pkt { __le32 abts_rsp_fc_payload_lo; __le16 abts_rsp_rx_id; u8 abts_rsp_rctl; u8 reserved2; }; /* FW read- write (modifyable) part The fcoe task storm context of Tstorm */ struct fcoe_tstorm_fcoe_task_st_ctx_read_write { union fcoe_cleanup_addr_exp_ro_union cleanup_addr_exp_ro_union; __le16 flags; #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK 0x1 #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT 0 #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK 0x1 #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT 1 #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK 0x1 #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT 2 #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK 0x1 #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT 3 #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK 0x1 #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 4 #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK 0x1 #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT 5 #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK 0x3 #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT 6 #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK 0xFF #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT 8 __le16 seq_cnt; u8 seq_id; u8 ooo_rx_seq_id; __le16 rx_id; struct fcoe_abts_pkt abts_data; __le32 e_d_tov_exp_timeout_val; __le16 ooo_rx_seq_cnt; __le16 reserved1; }; /* FW read only part The fcoe task storm context of Tstorm */ struct fcoe_tstorm_fcoe_task_st_ctx_read_only { u8 task_type; u8 dev_type; u8 conf_supported; u8 glbl_q_num; __le32 cid; __le32 fcp_cmd_trns_size; __le32 rsrv; }; /** The fcoe task storm context of Tstorm */ struct tstorm_fcoe_task_st_ctx { struct fcoe_tstorm_fcoe_task_st_ctx_read_write read_write; struct fcoe_tstorm_fcoe_task_st_ctx_read_only read_only; }; struct e4_mstorm_fcoe_task_ag_ctx { u8 byte0; u8 byte1; __le16 icid; u8 flags0; #define E4_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF #define E4_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 #define E4_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 #define E4_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 #define E4_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1 #define E4_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5 #define E4_MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 #define E4_MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6 #define E4_MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 #define E4_MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7 u8 flags1; #define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3 #define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0 #define E4_MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 #define E4_MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2 #define E4_MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 #define E4_MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4 #define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1 #define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6 #define E4_MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 #define E4_MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7 u8 flags2; #define E4_MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 #define E4_MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0 #define E4_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 #define E4_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1 #define E4_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 #define E4_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2 #define E4_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 #define E4_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3 #define E4_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 #define E4_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4 #define E4_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 #define E4_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5 #define E4_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1 #define E4_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6 #define E4_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 #define E4_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7 u8 cleanup_state; __le32 received_bytes; u8 byte3; u8 glbl_q_num; __le16 word1; __le16 tid_to_xfer; __le16 word3; __le16 word4; __le16 word5; __le32 expected_bytes; __le32 reg2; }; /* The fcoe task storm context of Mstorm */ struct mstorm_fcoe_task_st_ctx { struct regpair rsp_buf_addr; __le32 rsrv[2]; struct scsi_sgl_params sgl_params; __le32 data_2_trns_rem; __le32 data_buffer_offset; __le16 parent_id; __le16 flags; #define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_MASK 0xF #define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_SHIFT 0 #define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_MASK 0x3 #define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_SHIFT 4 #define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_MASK 0x1 #define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_SHIFT 6 #define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_MASK 0x1 #define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_SHIFT 7 #define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_MASK 0x3 #define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_SHIFT 8 #define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1 #define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_SHIFT 10 #define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_MASK 0x1 #define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_SHIFT 11 #define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_MASK 0x1 #define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_SHIFT 12 #define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1 #define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 13 #define MSTORM_FCOE_TASK_ST_CTX_RESERVED_MASK 0x3 #define MSTORM_FCOE_TASK_ST_CTX_RESERVED_SHIFT 14 struct scsi_cached_sges data_desc; }; struct e4_ustorm_fcoe_task_ag_ctx { u8 reserved; u8 byte1; __le16 icid; u8 flags0; #define E4_USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF #define E4_USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 #define E4_USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 #define E4_USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 #define E4_USTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 #define E4_USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 #define E4_USTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3 #define E4_USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 6 u8 flags1; #define E4_USTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 #define E4_USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 0 #define E4_USTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 #define E4_USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 2 #define E4_USTORM_FCOE_TASK_AG_CTX_CF3_MASK 0x3 #define E4_USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT 4 #define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 #define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 u8 flags2; #define E4_USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1 #define E4_USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 0 #define E4_USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 #define E4_USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 1 #define E4_USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 #define E4_USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 2 #define E4_USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK 0x1 #define E4_USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT 3 #define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 #define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 #define E4_USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 #define E4_USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 5 #define E4_USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 #define E4_USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 6 #define E4_USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 #define E4_USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 7 u8 flags3; #define E4_USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 #define E4_USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 0 #define E4_USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 #define E4_USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 1 #define E4_USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 #define E4_USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 2 #define E4_USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 #define E4_USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 3 #define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF #define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 __le32 dif_err_intervals; __le32 dif_error_1st_interval; __le32 global_cq_num; __le32 reg3; __le32 reg4; __le32 reg5; }; /* FCoE task context */ struct e4_fcoe_task_context { struct ystorm_fcoe_task_st_ctx ystorm_st_context; struct regpair ystorm_st_padding[2]; struct tdif_task_context tdif_context; struct e4_ystorm_fcoe_task_ag_ctx ystorm_ag_context; struct e4_tstorm_fcoe_task_ag_ctx tstorm_ag_context; struct timers_context timer_context; struct tstorm_fcoe_task_st_ctx tstorm_st_context; struct regpair tstorm_st_padding[2]; struct e4_mstorm_fcoe_task_ag_ctx mstorm_ag_context; struct mstorm_fcoe_task_st_ctx mstorm_st_context; struct e4_ustorm_fcoe_task_ag_ctx ustorm_ag_context; struct rdif_task_context rdif_context; }; /* FCoE additional WQE (Sq/XferQ) information */ union fcoe_additional_info_union { __le32 previous_tid; __le32 parent_tid; __le32 burst_length; __le32 seq_rec_updated_offset; }; /* FCoE Ramrod Command IDs */ enum fcoe_completion_status { FCOE_COMPLETION_STATUS_SUCCESS, FCOE_COMPLETION_STATUS_FCOE_VER_ERR, FCOE_COMPLETION_STATUS_SRC_MAC_ADD_ARR_ERR, MAX_FCOE_COMPLETION_STATUS }; /* FC address (SID/DID) network presentation */ struct fc_addr_nw { u8 addr_lo; u8 addr_mid; u8 addr_hi; }; /* FCoE connection offload */ struct fcoe_conn_offload_ramrod_data { struct regpair sq_pbl_addr; struct regpair sq_curr_page_addr; struct regpair sq_next_page_addr; struct regpair xferq_pbl_addr; struct regpair xferq_curr_page_addr; struct regpair xferq_next_page_addr; struct regpair respq_pbl_addr; struct regpair respq_curr_page_addr; struct regpair respq_next_page_addr; __le16 dst_mac_addr_lo; __le16 dst_mac_addr_mid; __le16 dst_mac_addr_hi; __le16 src_mac_addr_lo; __le16 src_mac_addr_mid; __le16 src_mac_addr_hi; __le16 tx_max_fc_pay_len; __le16 e_d_tov_timer_val; __le16 rx_max_fc_pay_len; __le16 vlan_tag; #define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_MASK 0xFFF #define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT 0 #define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_MASK 0x1 #define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_SHIFT 12 #define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_MASK 0x7 #define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT 13 __le16 physical_q0; __le16 rec_rr_tov_timer_val; struct fc_addr_nw s_id; u8 max_conc_seqs_c3; struct fc_addr_nw d_id; u8 flags; #define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_MASK 0x1 #define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_SHIFT 0 #define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_MASK 0x1 #define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT 1 #define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_MASK 0x1 #define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT 2 #define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK 0x1 #define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT 3 #define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN_MASK 0x1 #define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN_SHIFT 4 #define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK 0x3 #define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT 5 #define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK 0x1 #define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT 7 __le16 conn_id; u8 def_q_idx; u8 reserved[5]; }; /* FCoE terminate connection request */ struct fcoe_conn_terminate_ramrod_data { struct regpair terminate_params_addr; }; /* FCoE device type */ enum fcoe_device_type { FCOE_TASK_DEV_TYPE_DISK, FCOE_TASK_DEV_TYPE_TAPE, MAX_FCOE_DEVICE_TYPE }; /* Data sgl */ struct fcoe_fast_sgl_ctx { struct regpair sgl_start_addr; __le32 sgl_byte_offset; __le16 task_reuse_cnt; __le16 init_offset_in_first_sge; }; /* FCoE firmware function init */ struct fcoe_init_func_ramrod_data { struct scsi_init_func_params func_params; struct scsi_init_func_queues q_params; __le16 mtu; __le16 sq_num_pages_in_pbl; __le32 reserved[3]; }; /* FCoE: Mode of the connection: Target or Initiator or both */ enum fcoe_mode_type { FCOE_INITIATOR_MODE = 0x0, FCOE_TARGET_MODE = 0x1, FCOE_BOTH_OR_NOT_CHOSEN = 0x3, MAX_FCOE_MODE_TYPE }; /* Per PF FCoE receive path statistics - tStorm RAM structure */ struct fcoe_rx_stat { struct regpair fcoe_rx_byte_cnt; struct regpair fcoe_rx_data_pkt_cnt; struct regpair fcoe_rx_xfer_pkt_cnt; struct regpair fcoe_rx_other_pkt_cnt; __le32 fcoe_silent_drop_pkt_cmdq_full_cnt; __le32 fcoe_silent_drop_pkt_rq_full_cnt; __le32 fcoe_silent_drop_pkt_crc_error_cnt; __le32 fcoe_silent_drop_pkt_task_invalid_cnt; __le32 fcoe_silent_drop_total_pkt_cnt; __le32 rsrv; }; /* FCoE SQE request type */ enum fcoe_sqe_request_type { SEND_FCOE_CMD, SEND_FCOE_MIDPATH, SEND_FCOE_ABTS_REQUEST, FCOE_EXCHANGE_CLEANUP, FCOE_SEQUENCE_RECOVERY, SEND_FCOE_XFER_RDY, SEND_FCOE_RSP, SEND_FCOE_RSP_WITH_SENSE_DATA, SEND_FCOE_TARGET_DATA, SEND_FCOE_INITIATOR_DATA, SEND_FCOE_XFER_CONTINUATION_RDY, SEND_FCOE_TARGET_ABTS_RSP, MAX_FCOE_SQE_REQUEST_TYPE }; /* FCoe statistics request */ struct fcoe_stat_ramrod_data { struct regpair stat_params_addr; }; /* FCoE task type */ enum fcoe_task_type { FCOE_TASK_TYPE_WRITE_INITIATOR, FCOE_TASK_TYPE_READ_INITIATOR, FCOE_TASK_TYPE_MIDPATH, FCOE_TASK_TYPE_UNSOLICITED, FCOE_TASK_TYPE_ABTS, FCOE_TASK_TYPE_EXCHANGE_CLEANUP, FCOE_TASK_TYPE_SEQUENCE_CLEANUP, FCOE_TASK_TYPE_WRITE_TARGET, FCOE_TASK_TYPE_READ_TARGET, FCOE_TASK_TYPE_RSP, FCOE_TASK_TYPE_RSP_SENSE_DATA, FCOE_TASK_TYPE_ABTS_TARGET, FCOE_TASK_TYPE_ENUM_SIZE, MAX_FCOE_TASK_TYPE }; /* Per PF FCoE transmit path statistics - pStorm RAM structure */ struct fcoe_tx_stat { struct regpair fcoe_tx_byte_cnt; struct regpair fcoe_tx_data_pkt_cnt; struct regpair fcoe_tx_xfer_pkt_cnt; struct regpair fcoe_tx_other_pkt_cnt; }; /* FCoE SQ/XferQ element */ struct fcoe_wqe { __le16 task_id; __le16 flags; #define FCOE_WQE_REQ_TYPE_MASK 0xF #define FCOE_WQE_REQ_TYPE_SHIFT 0 #define FCOE_WQE_SGL_MODE_MASK 0x1 #define FCOE_WQE_SGL_MODE_SHIFT 4 #define FCOE_WQE_CONTINUATION_MASK 0x1 #define FCOE_WQE_CONTINUATION_SHIFT 5 #define FCOE_WQE_SEND_AUTO_RSP_MASK 0x1 #define FCOE_WQE_SEND_AUTO_RSP_SHIFT 6 #define FCOE_WQE_RESERVED_MASK 0x1 #define FCOE_WQE_RESERVED_SHIFT 7 #define FCOE_WQE_NUM_SGES_MASK 0xF #define FCOE_WQE_NUM_SGES_SHIFT 8 #define FCOE_WQE_RESERVED1_MASK 0xF #define FCOE_WQE_RESERVED1_SHIFT 12 union fcoe_additional_info_union additional_info_union; }; /* FCoE XFRQ element */ struct xfrqe_prot_flags { u8 flags; #define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF #define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0 #define XFRQE_PROT_FLAGS_DIF_TO_PEER_MASK 0x1 #define XFRQE_PROT_FLAGS_DIF_TO_PEER_SHIFT 4 #define XFRQE_PROT_FLAGS_HOST_INTERFACE_MASK 0x3 #define XFRQE_PROT_FLAGS_HOST_INTERFACE_SHIFT 5 #define XFRQE_PROT_FLAGS_RESERVED_MASK 0x1 #define XFRQE_PROT_FLAGS_RESERVED_SHIFT 7 }; /* FCoE doorbell data */ struct fcoe_db_data { u8 params; #define FCOE_DB_DATA_DEST_MASK 0x3 #define FCOE_DB_DATA_DEST_SHIFT 0 #define FCOE_DB_DATA_AGG_CMD_MASK 0x3 #define FCOE_DB_DATA_AGG_CMD_SHIFT 2 #define FCOE_DB_DATA_BYPASS_EN_MASK 0x1 #define FCOE_DB_DATA_BYPASS_EN_SHIFT 4 #define FCOE_DB_DATA_RESERVED_MASK 0x1 #define FCOE_DB_DATA_RESERVED_SHIFT 5 #define FCOE_DB_DATA_AGG_VAL_SEL_MASK 0x3 #define FCOE_DB_DATA_AGG_VAL_SEL_SHIFT 6 u8 agg_flags; __le16 sq_prod; }; #endif /* __FCOE_COMMON__ */ qed/qed_fcoe_if.h 0000644 00000010056 14722070374 0007721 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _QED_FCOE_IF_H #define _QED_FCOE_IF_H #include <linux/types.h> #include <linux/qed/qed_if.h> struct qed_fcoe_stats { u64 fcoe_rx_byte_cnt; u64 fcoe_rx_data_pkt_cnt; u64 fcoe_rx_xfer_pkt_cnt; u64 fcoe_rx_other_pkt_cnt; u32 fcoe_silent_drop_pkt_cmdq_full_cnt; u32 fcoe_silent_drop_pkt_rq_full_cnt; u32 fcoe_silent_drop_pkt_crc_error_cnt; u32 fcoe_silent_drop_pkt_task_invalid_cnt; u32 fcoe_silent_drop_total_pkt_cnt; u64 fcoe_tx_byte_cnt; u64 fcoe_tx_data_pkt_cnt; u64 fcoe_tx_xfer_pkt_cnt; u64 fcoe_tx_other_pkt_cnt; }; struct qed_dev_fcoe_info { struct qed_dev_info common; void __iomem *primary_dbq_rq_addr; void __iomem *secondary_bdq_rq_addr; u64 wwpn; u64 wwnn; u8 num_cqs; }; struct qed_fcoe_params_offload { dma_addr_t sq_pbl_addr; dma_addr_t sq_curr_page_addr; dma_addr_t sq_next_page_addr; u8 src_mac[ETH_ALEN]; u8 dst_mac[ETH_ALEN]; u16 tx_max_fc_pay_len; u16 e_d_tov_timer_val; u16 rec_tov_timer_val; u16 rx_max_fc_pay_len; u16 vlan_tag; struct fc_addr_nw s_id; u8 max_conc_seqs_c3; struct fc_addr_nw d_id; u8 flags; u8 def_q_idx; }; #define MAX_TID_BLOCKS_FCOE (512) struct qed_fcoe_tid { u32 size; /* In bytes per task */ u32 num_tids_per_block; u8 *blocks[MAX_TID_BLOCKS_FCOE]; }; struct qed_fcoe_cb_ops { struct qed_common_cb_ops common; u32 (*get_login_failures)(void *cookie); }; void qed_fcoe_set_pf_params(struct qed_dev *cdev, struct qed_fcoe_pf_params *params); /** * struct qed_fcoe_ops - qed FCoE operations. * @common: common operations pointer * @fill_dev_info: fills FCoE specific information * @param cdev * @param info * @return 0 on sucesss, otherwise error value. * @register_ops: register FCoE operations * @param cdev * @param ops - specified using qed_iscsi_cb_ops * @param cookie - driver private * @ll2: light L2 operations pointer * @start: fcoe in FW * @param cdev * @param tasks - qed will fill information about tasks * return 0 on success, otherwise error value. * @stop: stops fcoe in FW * @param cdev * return 0 on success, otherwise error value. * @acquire_conn: acquire a new fcoe connection * @param cdev * @param handle - qed will fill handle that should be * used henceforth as identifier of the * connection. * @param p_doorbell - qed will fill the address of the * doorbell. * return 0 on sucesss, otherwise error value. * @release_conn: release a previously acquired fcoe connection * @param cdev * @param handle - the connection handle. * return 0 on success, otherwise error value. * @offload_conn: configures an offloaded connection * @param cdev * @param handle - the connection handle. * @param conn_info - the configuration to use for the * offload. * return 0 on success, otherwise error value. * @destroy_conn: stops an offloaded connection * @param cdev * @param handle - the connection handle. * @param terminate_params * return 0 on success, otherwise error value. * @get_stats: gets FCoE related statistics * @param cdev * @param stats - pointer to struck that would be filled * we stats * return 0 on success, error otherwise. */ struct qed_fcoe_ops { const struct qed_common_ops *common; int (*fill_dev_info)(struct qed_dev *cdev, struct qed_dev_fcoe_info *info); void (*register_ops)(struct qed_dev *cdev, struct qed_fcoe_cb_ops *ops, void *cookie); const struct qed_ll2_ops *ll2; int (*start)(struct qed_dev *cdev, struct qed_fcoe_tid *tasks); int (*stop)(struct qed_dev *cdev); int (*acquire_conn)(struct qed_dev *cdev, u32 *handle, u32 *fw_cid, void __iomem **p_doorbell); int (*release_conn)(struct qed_dev *cdev, u32 handle); int (*offload_conn)(struct qed_dev *cdev, u32 handle, struct qed_fcoe_params_offload *conn_info); int (*destroy_conn)(struct qed_dev *cdev, u32 handle, dma_addr_t terminate_params); int (*get_stats)(struct qed_dev *cdev, struct qed_fcoe_stats *stats); }; const struct qed_fcoe_ops *qed_get_fcoe_ops(void); void qed_put_fcoe_ops(void); #endif qed/qed_chain.h 0000644 00000047355 14722070374 0007425 0 ustar 00 /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and /or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef _QED_CHAIN_H #define _QED_CHAIN_H #include <linux/types.h> #include <asm/byteorder.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/qed/common_hsi.h> enum qed_chain_mode { /* Each Page contains a next pointer at its end */ QED_CHAIN_MODE_NEXT_PTR, /* Chain is a single page (next ptr) is unrequired */ QED_CHAIN_MODE_SINGLE, /* Page pointers are located in a side list */ QED_CHAIN_MODE_PBL, }; enum qed_chain_use_mode { QED_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */ QED_CHAIN_USE_TO_CONSUME, /* Chain starts full */ QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */ }; enum qed_chain_cnt_type { /* The chain's size/prod/cons are kept in 16-bit variables */ QED_CHAIN_CNT_TYPE_U16, /* The chain's size/prod/cons are kept in 32-bit variables */ QED_CHAIN_CNT_TYPE_U32, }; struct qed_chain_next { struct regpair next_phys; void *next_virt; }; struct qed_chain_pbl_u16 { u16 prod_page_idx; u16 cons_page_idx; }; struct qed_chain_pbl_u32 { u32 prod_page_idx; u32 cons_page_idx; }; struct qed_chain_ext_pbl { dma_addr_t p_pbl_phys; void *p_pbl_virt; }; struct qed_chain_u16 { /* Cyclic index of next element to produce/consme */ u16 prod_idx; u16 cons_idx; }; struct qed_chain_u32 { /* Cyclic index of next element to produce/consme */ u32 prod_idx; u32 cons_idx; }; struct addr_tbl_entry { void *virt_addr; dma_addr_t dma_map; }; struct qed_chain { /* fastpath portion of the chain - required for commands such * as produce / consume. */ /* Point to next element to produce/consume */ void *p_prod_elem; void *p_cons_elem; /* Fastpath portions of the PBL [if exists] */ struct { /* Table for keeping the virtual and physical addresses of the * chain pages, respectively to the physical addresses * in the pbl table. */ struct addr_tbl_entry *pp_addr_tbl; union { struct qed_chain_pbl_u16 u16; struct qed_chain_pbl_u32 u32; } c; } pbl; union { struct qed_chain_u16 chain16; struct qed_chain_u32 chain32; } u; /* Capacity counts only usable elements */ u32 capacity; u32 page_cnt; enum qed_chain_mode mode; /* Elements information for fast calculations */ u16 elem_per_page; u16 elem_per_page_mask; u16 elem_size; u16 next_page_mask; u16 usable_per_page; u8 elem_unusable; u8 cnt_type; /* Slowpath of the chain - required for initialization and destruction, * but isn't involved in regular functionality. */ /* Base address of a pre-allocated buffer for pbl */ struct { dma_addr_t p_phys_table; void *p_virt_table; } pbl_sp; /* Address of first page of the chain - the address is required * for fastpath operation [consume/produce] but only for the the SINGLE * flavour which isn't considered fastpath [== SPQ]. */ void *p_virt_addr; dma_addr_t p_phys_addr; /* Total number of elements [for entire chain] */ u32 size; u8 intended_use; bool b_external_pbl; }; #define QED_CHAIN_PBL_ENTRY_SIZE (8) #define QED_CHAIN_PAGE_SIZE (0x1000) #define ELEMS_PER_PAGE(elem_size) (QED_CHAIN_PAGE_SIZE / (elem_size)) #define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \ (((mode) == QED_CHAIN_MODE_NEXT_PTR) ? \ (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / \ (elem_size))) : 0) #define USABLE_ELEMS_PER_PAGE(elem_size, mode) \ ((u32)(ELEMS_PER_PAGE(elem_size) - \ UNUSABLE_ELEMS_PER_PAGE(elem_size, mode))) #define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \ DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode)) #define is_chain_u16(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16) #define is_chain_u32(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32) /* Accessors */ static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain) { return p_chain->u.chain16.prod_idx; } static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain) { return p_chain->u.chain16.cons_idx; } static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain) { return p_chain->u.chain32.cons_idx; } static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain) { u16 elem_per_page = p_chain->elem_per_page; u32 prod = p_chain->u.chain16.prod_idx; u32 cons = p_chain->u.chain16.cons_idx; u16 used; if (prod < cons) prod += (u32)U16_MAX + 1; used = (u16)(prod - cons); if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) used -= prod / elem_per_page - cons / elem_per_page; return (u16)(p_chain->capacity - used); } static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain) { u16 elem_per_page = p_chain->elem_per_page; u64 prod = p_chain->u.chain32.prod_idx; u64 cons = p_chain->u.chain32.cons_idx; u32 used; if (prod < cons) prod += (u64)U32_MAX + 1; used = (u32)(prod - cons); if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) used -= (u32)(prod / elem_per_page - cons / elem_per_page); return p_chain->capacity - used; } static inline u16 qed_chain_get_usable_per_page(struct qed_chain *p_chain) { return p_chain->usable_per_page; } static inline u8 qed_chain_get_unusable_per_page(struct qed_chain *p_chain) { return p_chain->elem_unusable; } static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain) { return p_chain->page_cnt; } static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain) { return p_chain->pbl_sp.p_phys_table; } /** * @brief qed_chain_advance_page - * * Advance the next element accros pages for a linked chain * * @param p_chain * @param p_next_elem * @param idx_to_inc * @param page_to_inc */ static inline void qed_chain_advance_page(struct qed_chain *p_chain, void **p_next_elem, void *idx_to_inc, void *page_to_inc) { struct qed_chain_next *p_next = NULL; u32 page_index = 0; switch (p_chain->mode) { case QED_CHAIN_MODE_NEXT_PTR: p_next = *p_next_elem; *p_next_elem = p_next->next_virt; if (is_chain_u16(p_chain)) *(u16 *)idx_to_inc += p_chain->elem_unusable; else *(u32 *)idx_to_inc += p_chain->elem_unusable; break; case QED_CHAIN_MODE_SINGLE: *p_next_elem = p_chain->p_virt_addr; break; case QED_CHAIN_MODE_PBL: if (is_chain_u16(p_chain)) { if (++(*(u16 *)page_to_inc) == p_chain->page_cnt) *(u16 *)page_to_inc = 0; page_index = *(u16 *)page_to_inc; } else { if (++(*(u32 *)page_to_inc) == p_chain->page_cnt) *(u32 *)page_to_inc = 0; page_index = *(u32 *)page_to_inc; } *p_next_elem = p_chain->pbl.pp_addr_tbl[page_index].virt_addr; } } #define is_unusable_idx(p, idx) \ (((p)->u.chain16.idx & (p)->elem_per_page_mask) == (p)->usable_per_page) #define is_unusable_idx_u32(p, idx) \ (((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page) #define is_unusable_next_idx(p, idx) \ ((((p)->u.chain16.idx + 1) & (p)->elem_per_page_mask) == \ (p)->usable_per_page) #define is_unusable_next_idx_u32(p, idx) \ ((((p)->u.chain32.idx + 1) & (p)->elem_per_page_mask) == \ (p)->usable_per_page) #define test_and_skip(p, idx) \ do { \ if (is_chain_u16(p)) { \ if (is_unusable_idx(p, idx)) \ (p)->u.chain16.idx += (p)->elem_unusable; \ } else { \ if (is_unusable_idx_u32(p, idx)) \ (p)->u.chain32.idx += (p)->elem_unusable; \ } \ } while (0) /** * @brief qed_chain_return_produced - * * A chain in which the driver "Produces" elements should use this API * to indicate previous produced elements are now consumed. * * @param p_chain */ static inline void qed_chain_return_produced(struct qed_chain *p_chain) { if (is_chain_u16(p_chain)) p_chain->u.chain16.cons_idx++; else p_chain->u.chain32.cons_idx++; test_and_skip(p_chain, cons_idx); } /** * @brief qed_chain_produce - * * A chain in which the driver "Produces" elements should use this to get * a pointer to the next element which can be "Produced". It's driver * responsibility to validate that the chain has room for new element. * * @param p_chain * * @return void*, a pointer to next element */ static inline void *qed_chain_produce(struct qed_chain *p_chain) { void *p_ret = NULL, *p_prod_idx, *p_prod_page_idx; if (is_chain_u16(p_chain)) { if ((p_chain->u.chain16.prod_idx & p_chain->elem_per_page_mask) == p_chain->next_page_mask) { p_prod_idx = &p_chain->u.chain16.prod_idx; p_prod_page_idx = &p_chain->pbl.c.u16.prod_page_idx; qed_chain_advance_page(p_chain, &p_chain->p_prod_elem, p_prod_idx, p_prod_page_idx); } p_chain->u.chain16.prod_idx++; } else { if ((p_chain->u.chain32.prod_idx & p_chain->elem_per_page_mask) == p_chain->next_page_mask) { p_prod_idx = &p_chain->u.chain32.prod_idx; p_prod_page_idx = &p_chain->pbl.c.u32.prod_page_idx; qed_chain_advance_page(p_chain, &p_chain->p_prod_elem, p_prod_idx, p_prod_page_idx); } p_chain->u.chain32.prod_idx++; } p_ret = p_chain->p_prod_elem; p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) + p_chain->elem_size); return p_ret; } /** * @brief qed_chain_get_capacity - * * Get the maximum number of BDs in chain * * @param p_chain * @param num * * @return number of unusable BDs */ static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain) { return p_chain->capacity; } /** * @brief qed_chain_recycle_consumed - * * Returns an element which was previously consumed; * Increments producers so they could be written to FW. * * @param p_chain */ static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain) { test_and_skip(p_chain, prod_idx); if (is_chain_u16(p_chain)) p_chain->u.chain16.prod_idx++; else p_chain->u.chain32.prod_idx++; } /** * @brief qed_chain_consume - * * A Chain in which the driver utilizes data written by a different source * (i.e., FW) should use this to access passed buffers. * * @param p_chain * * @return void*, a pointer to the next buffer written */ static inline void *qed_chain_consume(struct qed_chain *p_chain) { void *p_ret = NULL, *p_cons_idx, *p_cons_page_idx; if (is_chain_u16(p_chain)) { if ((p_chain->u.chain16.cons_idx & p_chain->elem_per_page_mask) == p_chain->next_page_mask) { p_cons_idx = &p_chain->u.chain16.cons_idx; p_cons_page_idx = &p_chain->pbl.c.u16.cons_page_idx; qed_chain_advance_page(p_chain, &p_chain->p_cons_elem, p_cons_idx, p_cons_page_idx); } p_chain->u.chain16.cons_idx++; } else { if ((p_chain->u.chain32.cons_idx & p_chain->elem_per_page_mask) == p_chain->next_page_mask) { p_cons_idx = &p_chain->u.chain32.cons_idx; p_cons_page_idx = &p_chain->pbl.c.u32.cons_page_idx; qed_chain_advance_page(p_chain, &p_chain->p_cons_elem, p_cons_idx, p_cons_page_idx); } p_chain->u.chain32.cons_idx++; } p_ret = p_chain->p_cons_elem; p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) + p_chain->elem_size); return p_ret; } /** * @brief qed_chain_reset - Resets the chain to its start state * * @param p_chain pointer to a previously allocted chain */ static inline void qed_chain_reset(struct qed_chain *p_chain) { u32 i; if (is_chain_u16(p_chain)) { p_chain->u.chain16.prod_idx = 0; p_chain->u.chain16.cons_idx = 0; } else { p_chain->u.chain32.prod_idx = 0; p_chain->u.chain32.cons_idx = 0; } p_chain->p_cons_elem = p_chain->p_virt_addr; p_chain->p_prod_elem = p_chain->p_virt_addr; if (p_chain->mode == QED_CHAIN_MODE_PBL) { /* Use (page_cnt - 1) as a reset value for the prod/cons page's * indices, to avoid unnecessary page advancing on the first * call to qed_chain_produce/consume. Instead, the indices * will be advanced to page_cnt and then will be wrapped to 0. */ u32 reset_val = p_chain->page_cnt - 1; if (is_chain_u16(p_chain)) { p_chain->pbl.c.u16.prod_page_idx = (u16)reset_val; p_chain->pbl.c.u16.cons_page_idx = (u16)reset_val; } else { p_chain->pbl.c.u32.prod_page_idx = reset_val; p_chain->pbl.c.u32.cons_page_idx = reset_val; } } switch (p_chain->intended_use) { case QED_CHAIN_USE_TO_CONSUME: /* produce empty elements */ for (i = 0; i < p_chain->capacity; i++) qed_chain_recycle_consumed(p_chain); break; case QED_CHAIN_USE_TO_CONSUME_PRODUCE: case QED_CHAIN_USE_TO_PRODUCE: default: /* Do nothing */ break; } } /** * @brief qed_chain_init - Initalizes a basic chain struct * * @param p_chain * @param p_virt_addr * @param p_phys_addr physical address of allocated buffer's beginning * @param page_cnt number of pages in the allocated buffer * @param elem_size size of each element in the chain * @param intended_use * @param mode */ static inline void qed_chain_init_params(struct qed_chain *p_chain, u32 page_cnt, u8 elem_size, enum qed_chain_use_mode intended_use, enum qed_chain_mode mode, enum qed_chain_cnt_type cnt_type) { /* chain fixed parameters */ p_chain->p_virt_addr = NULL; p_chain->p_phys_addr = 0; p_chain->elem_size = elem_size; p_chain->intended_use = (u8)intended_use; p_chain->mode = mode; p_chain->cnt_type = (u8)cnt_type; p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size); p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode); p_chain->elem_per_page_mask = p_chain->elem_per_page - 1; p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode); p_chain->next_page_mask = (p_chain->usable_per_page & p_chain->elem_per_page_mask); p_chain->page_cnt = page_cnt; p_chain->capacity = p_chain->usable_per_page * page_cnt; p_chain->size = p_chain->elem_per_page * page_cnt; p_chain->pbl_sp.p_phys_table = 0; p_chain->pbl_sp.p_virt_table = NULL; p_chain->pbl.pp_addr_tbl = NULL; } /** * @brief qed_chain_init_mem - * * Initalizes a basic chain struct with its chain buffers * * @param p_chain * @param p_virt_addr virtual address of allocated buffer's beginning * @param p_phys_addr physical address of allocated buffer's beginning * */ static inline void qed_chain_init_mem(struct qed_chain *p_chain, void *p_virt_addr, dma_addr_t p_phys_addr) { p_chain->p_virt_addr = p_virt_addr; p_chain->p_phys_addr = p_phys_addr; } /** * @brief qed_chain_init_pbl_mem - * * Initalizes a basic chain struct with its pbl buffers * * @param p_chain * @param p_virt_pbl pointer to a pre allocated side table which will hold * virtual page addresses. * @param p_phys_pbl pointer to a pre-allocated side table which will hold * physical page addresses. * @param pp_virt_addr_tbl * pointer to a pre-allocated side table which will hold * the virtual addresses of the chain pages. * */ static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain, void *p_virt_pbl, dma_addr_t p_phys_pbl, struct addr_tbl_entry *pp_addr_tbl) { p_chain->pbl_sp.p_phys_table = p_phys_pbl; p_chain->pbl_sp.p_virt_table = p_virt_pbl; p_chain->pbl.pp_addr_tbl = pp_addr_tbl; } /** * @brief qed_chain_init_next_ptr_elem - * * Initalizes a next pointer element * * @param p_chain * @param p_virt_curr virtual address of a chain page of which the next * pointer element is initialized * @param p_virt_next virtual address of the next chain page * @param p_phys_next physical address of the next chain page * */ static inline void qed_chain_init_next_ptr_elem(struct qed_chain *p_chain, void *p_virt_curr, void *p_virt_next, dma_addr_t p_phys_next) { struct qed_chain_next *p_next; u32 size; size = p_chain->elem_size * p_chain->usable_per_page; p_next = (struct qed_chain_next *)((u8 *)p_virt_curr + size); DMA_REGPAIR_LE(p_next->next_phys, p_phys_next); p_next->next_virt = p_virt_next; } /** * @brief qed_chain_get_last_elem - * * Returns a pointer to the last element of the chain * * @param p_chain * * @return void* */ static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain) { struct qed_chain_next *p_next = NULL; void *p_virt_addr = NULL; u32 size, last_page_idx; if (!p_chain->p_virt_addr) goto out; switch (p_chain->mode) { case QED_CHAIN_MODE_NEXT_PTR: size = p_chain->elem_size * p_chain->usable_per_page; p_virt_addr = p_chain->p_virt_addr; p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + size); while (p_next->next_virt != p_chain->p_virt_addr) { p_virt_addr = p_next->next_virt; p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + size); } break; case QED_CHAIN_MODE_SINGLE: p_virt_addr = p_chain->p_virt_addr; break; case QED_CHAIN_MODE_PBL: last_page_idx = p_chain->page_cnt - 1; p_virt_addr = p_chain->pbl.pp_addr_tbl[last_page_idx].virt_addr; break; } /* p_virt_addr points at this stage to the last page of the chain */ size = p_chain->elem_size * (p_chain->usable_per_page - 1); p_virt_addr = (u8 *)p_virt_addr + size; out: return p_virt_addr; } /** * @brief qed_chain_set_prod - sets the prod to the given value * * @param prod_idx * @param p_prod_elem */ static inline void qed_chain_set_prod(struct qed_chain *p_chain, u32 prod_idx, void *p_prod_elem) { if (p_chain->mode == QED_CHAIN_MODE_PBL) { u32 cur_prod, page_mask, page_cnt, page_diff; cur_prod = is_chain_u16(p_chain) ? p_chain->u.chain16.prod_idx : p_chain->u.chain32.prod_idx; /* Assume that number of elements in a page is power of 2 */ page_mask = ~p_chain->elem_per_page_mask; /* Use "cur_prod - 1" and "prod_idx - 1" since producer index * reaches the first element of next page before the page index * is incremented. See qed_chain_produce(). * Index wrap around is not a problem because the difference * between current and given producer indices is always * positive and lower than the chain's capacity. */ page_diff = (((cur_prod - 1) & page_mask) - ((prod_idx - 1) & page_mask)) / p_chain->elem_per_page; page_cnt = qed_chain_get_page_cnt(p_chain); if (is_chain_u16(p_chain)) p_chain->pbl.c.u16.prod_page_idx = (p_chain->pbl.c.u16.prod_page_idx - page_diff + page_cnt) % page_cnt; else p_chain->pbl.c.u32.prod_page_idx = (p_chain->pbl.c.u32.prod_page_idx - page_diff + page_cnt) % page_cnt; } if (is_chain_u16(p_chain)) p_chain->u.chain16.prod_idx = (u16) prod_idx; else p_chain->u.chain32.prod_idx = prod_idx; p_chain->p_prod_elem = p_prod_elem; } /** * @brief qed_chain_pbl_zero_mem - set chain memory to 0 * * @param p_chain */ static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain) { u32 i, page_cnt; if (p_chain->mode != QED_CHAIN_MODE_PBL) return; page_cnt = qed_chain_get_page_cnt(p_chain); for (i = 0; i < page_cnt; i++) memset(p_chain->pbl.pp_addr_tbl[i].virt_addr, 0, QED_CHAIN_PAGE_SIZE); } #endif qed/rdma_common.h 0000644 00000004612 14722070374 0007772 0 ustar 00 /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and /or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef __RDMA_COMMON__ #define __RDMA_COMMON__ /************************/ /* RDMA FW CONSTANTS */ /************************/ #define RDMA_RESERVED_LKEY (0) #define RDMA_RING_PAGE_SIZE (0x1000) #define RDMA_MAX_SGE_PER_SQ_WQE (4) #define RDMA_MAX_SGE_PER_RQ_WQE (4) #define RDMA_MAX_DATA_SIZE_IN_WQE (0x80000000) #define RDMA_REQ_RD_ATOMIC_ELM_SIZE (0x50) #define RDMA_RESP_RD_ATOMIC_ELM_SIZE (0x20) #define RDMA_MAX_CQS (64 * 1024) #define RDMA_MAX_TIDS (128 * 1024 - 1) #define RDMA_MAX_PDS (64 * 1024) #define RDMA_MAX_XRC_SRQS (1024) #define RDMA_MAX_SRQS (32 * 1024) #define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS #define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2 #define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB #define RDMA_TASK_TYPE (PROTOCOLID_ROCE) struct rdma_srq_id { __le16 srq_idx; __le16 opaque_fid; }; struct rdma_srq_producers { __le32 sge_prod; __le32 wqe_prod; }; #endif /* __RDMA_COMMON__ */ qed/iscsi_common.h 0000644 00000143566 14722070374 0010175 0 ustar 00 /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and /or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef __ISCSI_COMMON__ #define __ISCSI_COMMON__ /**********************/ /* ISCSI FW CONSTANTS */ /**********************/ /* iSCSI HSI constants */ #define ISCSI_DEFAULT_MTU (1500) /* KWQ (kernel work queue) layer codes */ #define ISCSI_SLOW_PATH_LAYER_CODE (6) /* iSCSI parameter defaults */ #define ISCSI_DEFAULT_HEADER_DIGEST (0) #define ISCSI_DEFAULT_DATA_DIGEST (0) #define ISCSI_DEFAULT_INITIAL_R2T (1) #define ISCSI_DEFAULT_IMMEDIATE_DATA (1) #define ISCSI_DEFAULT_MAX_PDU_LENGTH (0x2000) #define ISCSI_DEFAULT_FIRST_BURST_LENGTH (0x10000) #define ISCSI_DEFAULT_MAX_BURST_LENGTH (0x40000) #define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T (1) /* iSCSI parameter limits */ #define ISCSI_MIN_VAL_MAX_PDU_LENGTH (0x200) #define ISCSI_MAX_VAL_MAX_PDU_LENGTH (0xffffff) #define ISCSI_MIN_VAL_BURST_LENGTH (0x200) #define ISCSI_MAX_VAL_BURST_LENGTH (0xffffff) #define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T (1) #define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T (0xff) #define ISCSI_AHS_CNTL_SIZE 4 #define ISCSI_WQE_NUM_SGES_SLOWIO (0xf) /* iSCSI reserved params */ #define ISCSI_ITT_ALL_ONES (0xffffffff) #define ISCSI_TTT_ALL_ONES (0xffffffff) #define ISCSI_OPTION_1_OFF_CHIP_TCP 1 #define ISCSI_OPTION_2_ON_CHIP_TCP 2 #define ISCSI_INITIATOR_MODE 0 #define ISCSI_TARGET_MODE 1 /* iSCSI request op codes */ #define ISCSI_OPCODE_NOP_OUT (0) #define ISCSI_OPCODE_SCSI_CMD (1) #define ISCSI_OPCODE_TMF_REQUEST (2) #define ISCSI_OPCODE_LOGIN_REQUEST (3) #define ISCSI_OPCODE_TEXT_REQUEST (4) #define ISCSI_OPCODE_DATA_OUT (5) #define ISCSI_OPCODE_LOGOUT_REQUEST (6) /* iSCSI response/messages op codes */ #define ISCSI_OPCODE_NOP_IN (0x20) #define ISCSI_OPCODE_SCSI_RESPONSE (0x21) #define ISCSI_OPCODE_TMF_RESPONSE (0x22) #define ISCSI_OPCODE_LOGIN_RESPONSE (0x23) #define ISCSI_OPCODE_TEXT_RESPONSE (0x24) #define ISCSI_OPCODE_DATA_IN (0x25) #define ISCSI_OPCODE_LOGOUT_RESPONSE (0x26) #define ISCSI_OPCODE_R2T (0x31) #define ISCSI_OPCODE_ASYNC_MSG (0x32) #define ISCSI_OPCODE_REJECT (0x3f) /* iSCSI stages */ #define ISCSI_STAGE_SECURITY_NEGOTIATION (0) #define ISCSI_STAGE_LOGIN_OPERATIONAL_NEGOTIATION (1) #define ISCSI_STAGE_FULL_FEATURE_PHASE (3) /* iSCSI CQE errors */ #define CQE_ERROR_BITMAP_DATA_DIGEST (0x08) #define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN (0x10) #define CQE_ERROR_BITMAP_DATA_TRUNCATED (0x20) /* Union of data bd_opaque/ tq_tid */ union bd_opaque_tq_union { __le16 bd_opaque; __le16 tq_tid; }; /* ISCSI SGL entry */ struct cqe_error_bitmap { u8 cqe_error_status_bits; #define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK 0x7 #define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT 0 #define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK 0x1 #define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT 3 #define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK 0x1 #define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT 4 #define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_MASK 0x1 #define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_SHIFT 5 #define CQE_ERROR_BITMAP_UNDER_RUN_ERR_MASK 0x1 #define CQE_ERROR_BITMAP_UNDER_RUN_ERR_SHIFT 6 #define CQE_ERROR_BITMAP_RESERVED2_MASK 0x1 #define CQE_ERROR_BITMAP_RESERVED2_SHIFT 7 }; union cqe_error_status { u8 error_status; struct cqe_error_bitmap error_bits; }; /* iSCSI Login Response PDU header */ struct data_hdr { __le32 data[12]; }; struct lun_mapper_addr_reserved { struct regpair lun_mapper_addr; u8 reserved0[8]; }; /* rdif conetxt for dif on immediate */ struct dif_on_immediate_params { __le32 initial_ref_tag; __le16 application_tag; __le16 application_tag_mask; __le16 flags1; #define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_GUARD_MASK 0x1 #define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_GUARD_SHIFT 0 #define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_APP_TAG_MASK 0x1 #define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_APP_TAG_SHIFT 1 #define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_REF_TAG_MASK 0x1 #define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_REF_TAG_SHIFT 2 #define DIF_ON_IMMEDIATE_PARAMS_FORWARD_GUARD_MASK 0x1 #define DIF_ON_IMMEDIATE_PARAMS_FORWARD_GUARD_SHIFT 3 #define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_MASK 0x1 #define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_SHIFT 4 #define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_MASK 0x1 #define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_SHIFT 5 #define DIF_ON_IMMEDIATE_PARAMS_INTERVAL_SIZE_MASK 0x1 #define DIF_ON_IMMEDIATE_PARAMS_INTERVAL_SIZE_SHIFT 6 #define DIF_ON_IMMEDIATE_PARAMS_NETWORK_INTERFACE_MASK 0x1 #define DIF_ON_IMMEDIATE_PARAMS_NETWORK_INTERFACE_SHIFT 7 #define DIF_ON_IMMEDIATE_PARAMS_HOST_INTERFACE_MASK 0x3 #define DIF_ON_IMMEDIATE_PARAMS_HOST_INTERFACE_SHIFT 8 #define DIF_ON_IMMEDIATE_PARAMS_REF_TAG_MASK_MASK 0xF #define DIF_ON_IMMEDIATE_PARAMS_REF_TAG_MASK_SHIFT 10 #define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_WITH_MASK_MASK 0x1 #define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_WITH_MASK_SHIFT 14 #define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_WITH_MASK_MASK 0x1 #define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_WITH_MASK_SHIFT 15 u8 flags0; #define DIF_ON_IMMEDIATE_PARAMS_RESERVED_MASK 0x1 #define DIF_ON_IMMEDIATE_PARAMS_RESERVED_SHIFT 0 #define DIF_ON_IMMEDIATE_PARAMS_IGNORE_APP_TAG_MASK 0x1 #define DIF_ON_IMMEDIATE_PARAMS_IGNORE_APP_TAG_SHIFT 1 #define DIF_ON_IMMEDIATE_PARAMS_INITIAL_REF_TAG_IS_VALID_MASK 0x1 #define DIF_ON_IMMEDIATE_PARAMS_INITIAL_REF_TAG_IS_VALID_SHIFT 2 #define DIF_ON_IMMEDIATE_PARAMS_HOST_GUARD_TYPE_MASK 0x1 #define DIF_ON_IMMEDIATE_PARAMS_HOST_GUARD_TYPE_SHIFT 3 #define DIF_ON_IMMEDIATE_PARAMS_PROTECTION_TYPE_MASK 0x3 #define DIF_ON_IMMEDIATE_PARAMS_PROTECTION_TYPE_SHIFT 4 #define DIF_ON_IMMEDIATE_PARAMS_CRC_SEED_MASK 0x1 #define DIF_ON_IMMEDIATE_PARAMS_CRC_SEED_SHIFT 6 #define DIF_ON_IMMEDIATE_PARAMS_KEEP_REF_TAG_CONST_MASK 0x1 #define DIF_ON_IMMEDIATE_PARAMS_KEEP_REF_TAG_CONST_SHIFT 7 u8 reserved_zero[5]; }; /* iSCSI dif on immediate mode attributes union */ union dif_configuration_params { struct lun_mapper_addr_reserved lun_mapper_address; struct dif_on_immediate_params def_dif_conf; }; /* Union of data/r2t sequence number */ union iscsi_seq_num { __le16 data_sn; __le16 r2t_sn; }; /* iSCSI DIF flags */ struct iscsi_dif_flags { u8 flags; #define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF #define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0 #define ISCSI_DIF_FLAGS_DIF_TO_PEER_MASK 0x1 #define ISCSI_DIF_FLAGS_DIF_TO_PEER_SHIFT 4 #define ISCSI_DIF_FLAGS_HOST_INTERFACE_MASK 0x7 #define ISCSI_DIF_FLAGS_HOST_INTERFACE_SHIFT 5 }; /* The iscsi storm task context of Ystorm */ struct ystorm_iscsi_task_state { struct scsi_cached_sges data_desc; struct scsi_sgl_params sgl_params; __le32 exp_r2t_sn; __le32 buffer_offset; union iscsi_seq_num seq_num; struct iscsi_dif_flags dif_flags; u8 flags; #define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_MASK 0x1 #define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT 0 #define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK 0x1 #define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT 1 #define YSTORM_ISCSI_TASK_STATE_SET_DIF_OFFSET_MASK 0x1 #define YSTORM_ISCSI_TASK_STATE_SET_DIF_OFFSET_SHIFT 2 #define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK 0x1F #define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT 3 }; /* The iscsi storm task context of Ystorm */ struct ystorm_iscsi_task_rxmit_opt { __le32 fast_rxmit_sge_offset; __le32 scan_start_buffer_offset; __le32 fast_rxmit_buffer_offset; u8 scan_start_sgl_index; u8 fast_rxmit_sgl_index; __le16 reserved; }; /* iSCSI Common PDU header */ struct iscsi_common_hdr { u8 hdr_status; u8 hdr_response; u8 hdr_flags; u8 hdr_first_byte; #define ISCSI_COMMON_HDR_OPCODE_MASK 0x3F #define ISCSI_COMMON_HDR_OPCODE_SHIFT 0 #define ISCSI_COMMON_HDR_IMM_MASK 0x1 #define ISCSI_COMMON_HDR_IMM_SHIFT 6 #define ISCSI_COMMON_HDR_RSRV_MASK 0x1 #define ISCSI_COMMON_HDR_RSRV_SHIFT 7 __le32 hdr_second_dword; #define ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK 0xFFFFFF #define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT 0 #define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK 0xFF #define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair lun_reserved; __le32 itt; __le32 ttt; __le32 cmdstat_sn; __le32 exp_statcmd_sn; __le32 max_cmd_sn; __le32 data[3]; }; /* iSCSI Command PDU header */ struct iscsi_cmd_hdr { __le16 reserved1; u8 flags_attr; #define ISCSI_CMD_HDR_ATTR_MASK 0x7 #define ISCSI_CMD_HDR_ATTR_SHIFT 0 #define ISCSI_CMD_HDR_RSRV_MASK 0x3 #define ISCSI_CMD_HDR_RSRV_SHIFT 3 #define ISCSI_CMD_HDR_WRITE_MASK 0x1 #define ISCSI_CMD_HDR_WRITE_SHIFT 5 #define ISCSI_CMD_HDR_READ_MASK 0x1 #define ISCSI_CMD_HDR_READ_SHIFT 6 #define ISCSI_CMD_HDR_FINAL_MASK 0x1 #define ISCSI_CMD_HDR_FINAL_SHIFT 7 u8 hdr_first_byte; #define ISCSI_CMD_HDR_OPCODE_MASK 0x3F #define ISCSI_CMD_HDR_OPCODE_SHIFT 0 #define ISCSI_CMD_HDR_IMM_MASK 0x1 #define ISCSI_CMD_HDR_IMM_SHIFT 6 #define ISCSI_CMD_HDR_RSRV1_MASK 0x1 #define ISCSI_CMD_HDR_RSRV1_SHIFT 7 __le32 hdr_second_dword; #define ISCSI_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF #define ISCSI_CMD_HDR_DATA_SEG_LEN_SHIFT 0 #define ISCSI_CMD_HDR_TOTAL_AHS_LEN_MASK 0xFF #define ISCSI_CMD_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair lun; __le32 itt; __le32 expected_transfer_length; __le32 cmd_sn; __le32 exp_stat_sn; __le32 cdb[4]; }; /* iSCSI Command PDU header with Extended CDB (Initiator Mode) */ struct iscsi_ext_cdb_cmd_hdr { __le16 reserved1; u8 flags_attr; #define ISCSI_EXT_CDB_CMD_HDR_ATTR_MASK 0x7 #define ISCSI_EXT_CDB_CMD_HDR_ATTR_SHIFT 0 #define ISCSI_EXT_CDB_CMD_HDR_RSRV_MASK 0x3 #define ISCSI_EXT_CDB_CMD_HDR_RSRV_SHIFT 3 #define ISCSI_EXT_CDB_CMD_HDR_WRITE_MASK 0x1 #define ISCSI_EXT_CDB_CMD_HDR_WRITE_SHIFT 5 #define ISCSI_EXT_CDB_CMD_HDR_READ_MASK 0x1 #define ISCSI_EXT_CDB_CMD_HDR_READ_SHIFT 6 #define ISCSI_EXT_CDB_CMD_HDR_FINAL_MASK 0x1 #define ISCSI_EXT_CDB_CMD_HDR_FINAL_SHIFT 7 u8 opcode; __le32 hdr_second_dword; #define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF #define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_SHIFT 0 #define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_MASK 0xFF #define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_SHIFT 24 struct regpair lun; __le32 itt; __le32 expected_transfer_length; __le32 cmd_sn; __le32 exp_stat_sn; struct scsi_sge cdb_sge; }; /* iSCSI login request PDU header */ struct iscsi_login_req_hdr { u8 version_min; u8 version_max; u8 flags_attr; #define ISCSI_LOGIN_REQ_HDR_NSG_MASK 0x3 #define ISCSI_LOGIN_REQ_HDR_NSG_SHIFT 0 #define ISCSI_LOGIN_REQ_HDR_CSG_MASK 0x3 #define ISCSI_LOGIN_REQ_HDR_CSG_SHIFT 2 #define ISCSI_LOGIN_REQ_HDR_RSRV_MASK 0x3 #define ISCSI_LOGIN_REQ_HDR_RSRV_SHIFT 4 #define ISCSI_LOGIN_REQ_HDR_C_MASK 0x1 #define ISCSI_LOGIN_REQ_HDR_C_SHIFT 6 #define ISCSI_LOGIN_REQ_HDR_T_MASK 0x1 #define ISCSI_LOGIN_REQ_HDR_T_SHIFT 7 u8 opcode; __le32 hdr_second_dword; #define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_MASK 0xFFFFFF #define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT 0 #define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK 0xFF #define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24 __le32 isid_tabc; __le16 tsih; __le16 isid_d; __le32 itt; __le16 reserved1; __le16 cid; __le32 cmd_sn; __le32 exp_stat_sn; __le32 reserved2[4]; }; /* iSCSI logout request PDU header */ struct iscsi_logout_req_hdr { __le16 reserved0; u8 reason_code; u8 opcode; __le32 reserved1; __le32 reserved2[2]; __le32 itt; __le16 reserved3; __le16 cid; __le32 cmd_sn; __le32 exp_stat_sn; __le32 reserved4[4]; }; /* iSCSI Data-out PDU header */ struct iscsi_data_out_hdr { __le16 reserved1; u8 flags_attr; #define ISCSI_DATA_OUT_HDR_RSRV_MASK 0x7F #define ISCSI_DATA_OUT_HDR_RSRV_SHIFT 0 #define ISCSI_DATA_OUT_HDR_FINAL_MASK 0x1 #define ISCSI_DATA_OUT_HDR_FINAL_SHIFT 7 u8 opcode; __le32 reserved2; struct regpair lun; __le32 itt; __le32 ttt; __le32 reserved3; __le32 exp_stat_sn; __le32 reserved4; __le32 data_sn; __le32 buffer_offset; __le32 reserved5; }; /* iSCSI Data-in PDU header */ struct iscsi_data_in_hdr { u8 status_rsvd; u8 reserved1; u8 flags; #define ISCSI_DATA_IN_HDR_STATUS_MASK 0x1 #define ISCSI_DATA_IN_HDR_STATUS_SHIFT 0 #define ISCSI_DATA_IN_HDR_UNDERFLOW_MASK 0x1 #define ISCSI_DATA_IN_HDR_UNDERFLOW_SHIFT 1 #define ISCSI_DATA_IN_HDR_OVERFLOW_MASK 0x1 #define ISCSI_DATA_IN_HDR_OVERFLOW_SHIFT 2 #define ISCSI_DATA_IN_HDR_RSRV_MASK 0x7 #define ISCSI_DATA_IN_HDR_RSRV_SHIFT 3 #define ISCSI_DATA_IN_HDR_ACK_MASK 0x1 #define ISCSI_DATA_IN_HDR_ACK_SHIFT 6 #define ISCSI_DATA_IN_HDR_FINAL_MASK 0x1 #define ISCSI_DATA_IN_HDR_FINAL_SHIFT 7 u8 opcode; __le32 reserved2; struct regpair lun; __le32 itt; __le32 ttt; __le32 stat_sn; __le32 exp_cmd_sn; __le32 max_cmd_sn; __le32 data_sn; __le32 buffer_offset; __le32 residual_count; }; /* iSCSI R2T PDU header */ struct iscsi_r2t_hdr { u8 reserved0[3]; u8 opcode; __le32 reserved2; struct regpair lun; __le32 itt; __le32 ttt; __le32 stat_sn; __le32 exp_cmd_sn; __le32 max_cmd_sn; __le32 r2t_sn; __le32 buffer_offset; __le32 desired_data_trns_len; }; /* iSCSI NOP-out PDU header */ struct iscsi_nop_out_hdr { __le16 reserved1; u8 flags_attr; #define ISCSI_NOP_OUT_HDR_RSRV_MASK 0x7F #define ISCSI_NOP_OUT_HDR_RSRV_SHIFT 0 #define ISCSI_NOP_OUT_HDR_CONST1_MASK 0x1 #define ISCSI_NOP_OUT_HDR_CONST1_SHIFT 7 u8 opcode; __le32 reserved2; struct regpair lun; __le32 itt; __le32 ttt; __le32 cmd_sn; __le32 exp_stat_sn; __le32 reserved3; __le32 reserved4; __le32 reserved5; __le32 reserved6; }; /* iSCSI NOP-in PDU header */ struct iscsi_nop_in_hdr { __le16 reserved0; u8 flags_attr; #define ISCSI_NOP_IN_HDR_RSRV_MASK 0x7F #define ISCSI_NOP_IN_HDR_RSRV_SHIFT 0 #define ISCSI_NOP_IN_HDR_CONST1_MASK 0x1 #define ISCSI_NOP_IN_HDR_CONST1_SHIFT 7 u8 opcode; __le32 hdr_second_dword; #define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK 0xFFFFFF #define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_SHIFT 0 #define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_MASK 0xFF #define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair lun; __le32 itt; __le32 ttt; __le32 stat_sn; __le32 exp_cmd_sn; __le32 max_cmd_sn; __le32 reserved5; __le32 reserved6; __le32 reserved7; }; /* iSCSI Login Response PDU header */ struct iscsi_login_response_hdr { u8 version_active; u8 version_max; u8 flags_attr; #define ISCSI_LOGIN_RESPONSE_HDR_NSG_MASK 0x3 #define ISCSI_LOGIN_RESPONSE_HDR_NSG_SHIFT 0 #define ISCSI_LOGIN_RESPONSE_HDR_CSG_MASK 0x3 #define ISCSI_LOGIN_RESPONSE_HDR_CSG_SHIFT 2 #define ISCSI_LOGIN_RESPONSE_HDR_RSRV_MASK 0x3 #define ISCSI_LOGIN_RESPONSE_HDR_RSRV_SHIFT 4 #define ISCSI_LOGIN_RESPONSE_HDR_C_MASK 0x1 #define ISCSI_LOGIN_RESPONSE_HDR_C_SHIFT 6 #define ISCSI_LOGIN_RESPONSE_HDR_T_MASK 0x1 #define ISCSI_LOGIN_RESPONSE_HDR_T_SHIFT 7 u8 opcode; __le32 hdr_second_dword; #define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF #define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 #define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF #define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 __le32 isid_tabc; __le16 tsih; __le16 isid_d; __le32 itt; __le32 reserved1; __le32 stat_sn; __le32 exp_cmd_sn; __le32 max_cmd_sn; __le16 reserved2; u8 status_detail; u8 status_class; __le32 reserved4[2]; }; /* iSCSI Logout Response PDU header */ struct iscsi_logout_response_hdr { u8 reserved1; u8 response; u8 flags; u8 opcode; __le32 hdr_second_dword; #define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF #define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 #define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF #define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 __le32 reserved2[2]; __le32 itt; __le32 reserved3; __le32 stat_sn; __le32 exp_cmd_sn; __le32 max_cmd_sn; __le32 reserved4; __le16 time_2_retain; __le16 time_2_wait; __le32 reserved5[1]; }; /* iSCSI Text Request PDU header */ struct iscsi_text_request_hdr { __le16 reserved0; u8 flags_attr; #define ISCSI_TEXT_REQUEST_HDR_RSRV_MASK 0x3F #define ISCSI_TEXT_REQUEST_HDR_RSRV_SHIFT 0 #define ISCSI_TEXT_REQUEST_HDR_C_MASK 0x1 #define ISCSI_TEXT_REQUEST_HDR_C_SHIFT 6 #define ISCSI_TEXT_REQUEST_HDR_F_MASK 0x1 #define ISCSI_TEXT_REQUEST_HDR_F_SHIFT 7 u8 opcode; __le32 hdr_second_dword; #define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF #define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0 #define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF #define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair lun; __le32 itt; __le32 ttt; __le32 cmd_sn; __le32 exp_stat_sn; __le32 reserved4[4]; }; /* iSCSI Text Response PDU header */ struct iscsi_text_response_hdr { __le16 reserved1; u8 flags; #define ISCSI_TEXT_RESPONSE_HDR_RSRV_MASK 0x3F #define ISCSI_TEXT_RESPONSE_HDR_RSRV_SHIFT 0 #define ISCSI_TEXT_RESPONSE_HDR_C_MASK 0x1 #define ISCSI_TEXT_RESPONSE_HDR_C_SHIFT 6 #define ISCSI_TEXT_RESPONSE_HDR_F_MASK 0x1 #define ISCSI_TEXT_RESPONSE_HDR_F_SHIFT 7 u8 opcode; __le32 hdr_second_dword; #define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF #define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 #define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF #define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair lun; __le32 itt; __le32 ttt; __le32 stat_sn; __le32 exp_cmd_sn; __le32 max_cmd_sn; __le32 reserved4[3]; }; /* iSCSI TMF Request PDU header */ struct iscsi_tmf_request_hdr { __le16 reserved0; u8 function; u8 opcode; __le32 hdr_second_dword; #define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF #define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0 #define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF #define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair lun; __le32 itt; __le32 rtt; __le32 cmd_sn; __le32 exp_stat_sn; __le32 ref_cmd_sn; __le32 exp_data_sn; __le32 reserved4[2]; }; struct iscsi_tmf_response_hdr { u8 reserved2; u8 hdr_response; u8 hdr_flags; u8 opcode; __le32 hdr_second_dword; #define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF #define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 #define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF #define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair reserved0; __le32 itt; __le32 reserved1; __le32 stat_sn; __le32 exp_cmd_sn; __le32 max_cmd_sn; __le32 reserved4[3]; }; /* iSCSI Response PDU header */ struct iscsi_response_hdr { u8 hdr_status; u8 hdr_response; u8 hdr_flags; u8 opcode; __le32 hdr_second_dword; #define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF #define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 #define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF #define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair lun; __le32 itt; __le32 snack_tag; __le32 stat_sn; __le32 exp_cmd_sn; __le32 max_cmd_sn; __le32 exp_data_sn; __le32 bi_residual_count; __le32 residual_count; }; /* iSCSI Reject PDU header */ struct iscsi_reject_hdr { u8 reserved4; u8 hdr_reason; u8 hdr_flags; u8 opcode; __le32 hdr_second_dword; #define ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK 0xFFFFFF #define ISCSI_REJECT_HDR_DATA_SEG_LEN_SHIFT 0 #define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK 0xFF #define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair reserved0; __le32 all_ones; __le32 reserved2; __le32 stat_sn; __le32 exp_cmd_sn; __le32 max_cmd_sn; __le32 data_sn; __le32 reserved3[2]; }; /* iSCSI Asynchronous Message PDU header */ struct iscsi_async_msg_hdr { __le16 reserved0; u8 flags_attr; #define ISCSI_ASYNC_MSG_HDR_RSRV_MASK 0x7F #define ISCSI_ASYNC_MSG_HDR_RSRV_SHIFT 0 #define ISCSI_ASYNC_MSG_HDR_CONST1_MASK 0x1 #define ISCSI_ASYNC_MSG_HDR_CONST1_SHIFT 7 u8 opcode; __le32 hdr_second_dword; #define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK 0xFFFFFF #define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_SHIFT 0 #define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_MASK 0xFF #define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair lun; __le32 all_ones; __le32 reserved1; __le32 stat_sn; __le32 exp_cmd_sn; __le32 max_cmd_sn; __le16 param1_rsrv; u8 async_vcode; u8 async_event; __le16 param3_rsrv; __le16 param2_rsrv; __le32 reserved7; }; /* PDU header part of Ystorm task context */ union iscsi_task_hdr { struct iscsi_common_hdr common; struct data_hdr data; struct iscsi_cmd_hdr cmd; struct iscsi_ext_cdb_cmd_hdr ext_cdb_cmd; struct iscsi_login_req_hdr login_req; struct iscsi_logout_req_hdr logout_req; struct iscsi_data_out_hdr data_out; struct iscsi_data_in_hdr data_in; struct iscsi_r2t_hdr r2t; struct iscsi_nop_out_hdr nop_out; struct iscsi_nop_in_hdr nop_in; struct iscsi_login_response_hdr login_response; struct iscsi_logout_response_hdr logout_response; struct iscsi_text_request_hdr text_request; struct iscsi_text_response_hdr text_response; struct iscsi_tmf_request_hdr tmf_request; struct iscsi_tmf_response_hdr tmf_response; struct iscsi_response_hdr response; struct iscsi_reject_hdr reject; struct iscsi_async_msg_hdr async_msg; }; /* The iscsi storm task context of Ystorm */ struct ystorm_iscsi_task_st_ctx { struct ystorm_iscsi_task_state state; struct ystorm_iscsi_task_rxmit_opt rxmit_opt; union iscsi_task_hdr pdu_hdr; }; struct e4_ystorm_iscsi_task_ag_ctx { u8 reserved; u8 byte1; __le16 word0; u8 flags0; #define E4_YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF #define E4_YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0 #define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1 #define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4 #define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 #define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 #define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 #define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 #define E4_YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_MASK 0x1 /* bit3 */ #define E4_YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_SHIFT 7 u8 flags1; #define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3 #define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0 #define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 #define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2 #define E4_YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 #define E4_YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 #define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1 #define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6 #define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 #define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7 u8 flags2; #define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1 #define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0 #define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 #define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1 #define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 #define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2 #define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 #define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3 #define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 #define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4 #define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 #define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5 #define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 #define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6 #define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 #define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7 u8 byte2; __le32 TTT; u8 byte3; u8 byte4; __le16 word1; }; struct e4_mstorm_iscsi_task_ag_ctx { u8 cdu_validation; u8 byte1; __le16 task_cid; u8 flags0; #define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF #define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 #define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 #define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 #define E4_MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1 #define E4_MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5 #define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 #define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 #define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1 #define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7 u8 flags1; #define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3 #define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0 #define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 #define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2 #define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3 #define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4 #define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1 #define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6 #define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 #define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7 u8 flags2; #define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1 #define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0 #define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 #define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1 #define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 #define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2 #define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 #define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3 #define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 #define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4 #define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 #define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5 #define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 #define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6 #define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 #define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7 u8 byte2; __le32 reg0; u8 byte3; u8 byte4; __le16 word1; }; struct e4_ustorm_iscsi_task_ag_ctx { u8 reserved; u8 state; __le16 icid; u8 flags0; #define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF #define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 #define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 #define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 #define E4_USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1 #define E4_USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5 #define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3 #define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6 u8 flags1; #define E4_USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3 #define E4_USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0 #define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3 #define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2 #define E4_USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3 #define E4_USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4 #define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 #define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 u8 flags2; #define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1 #define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0 #define E4_USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1 #define E4_USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1 #define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1 #define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2 #define E4_USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1 #define E4_USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3 #define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 #define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 #define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1 #define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5 #define E4_USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 #define E4_USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6 #define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1 #define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7 u8 flags3; #define E4_USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 #define E4_USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0 #define E4_USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 #define E4_USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1 #define E4_USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 #define E4_USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2 #define E4_USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 #define E4_USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3 #define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF #define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 __le32 dif_err_intervals; __le32 dif_error_1st_interval; __le32 rcv_cont_len; __le32 exp_cont_len; __le32 total_data_acked; __le32 exp_data_acked; u8 byte2; u8 byte3; __le16 word1; __le16 next_tid; __le16 word3; __le32 hdr_residual_count; __le32 exp_r2t_sn; }; /* The iscsi storm task context of Mstorm */ struct mstorm_iscsi_task_st_ctx { struct scsi_cached_sges data_desc; struct scsi_sgl_params sgl_params; __le32 rem_task_size; __le32 data_buffer_offset; u8 task_type; struct iscsi_dif_flags dif_flags; __le16 dif_task_icid; struct regpair sense_db; __le32 expected_itt; __le32 reserved1; }; struct iscsi_reg1 { __le32 reg1_map; #define ISCSI_REG1_NUM_SGES_MASK 0xF #define ISCSI_REG1_NUM_SGES_SHIFT 0 #define ISCSI_REG1_RESERVED1_MASK 0xFFFFFFF #define ISCSI_REG1_RESERVED1_SHIFT 4 }; struct tqe_opaque { __le16 opaque[2]; }; /* The iscsi storm task context of Ustorm */ struct ustorm_iscsi_task_st_ctx { __le32 rem_rcv_len; __le32 exp_data_transfer_len; __le32 exp_data_sn; struct regpair lun; struct iscsi_reg1 reg1; u8 flags2; #define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_MASK 0x1 #define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT 0 #define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK 0x7F #define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT 1 struct iscsi_dif_flags dif_flags; __le16 reserved3; struct tqe_opaque tqe_opaque_list; __le32 reserved5; __le32 reserved6; __le32 reserved7; u8 task_type; u8 error_flags; #define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK 0x1 #define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT 0 #define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK 0x1 #define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT 1 #define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_MASK 0x1 #define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT 2 #define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_MASK 0x1F #define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_SHIFT 3 u8 flags; #define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_MASK 0x3 #define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_SHIFT 0 #define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_MASK 0x1 #define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT 2 #define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1 #define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3 #define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK 0x1 #define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4 #define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_MASK 0x1 #define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT 5 #define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1 #define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6 #define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK 0x1 #define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_SHIFT 7 u8 cq_rss_number; }; /* iscsi task context */ struct e4_iscsi_task_context { struct ystorm_iscsi_task_st_ctx ystorm_st_context; struct e4_ystorm_iscsi_task_ag_ctx ystorm_ag_context; struct regpair ystorm_ag_padding[2]; struct tdif_task_context tdif_context; struct e4_mstorm_iscsi_task_ag_ctx mstorm_ag_context; struct regpair mstorm_ag_padding[2]; struct e4_ustorm_iscsi_task_ag_ctx ustorm_ag_context; struct mstorm_iscsi_task_st_ctx mstorm_st_context; struct ustorm_iscsi_task_st_ctx ustorm_st_context; struct rdif_task_context rdif_context; }; /* iSCSI connection offload params passed by driver to FW in ISCSI offload * ramrod. */ struct iscsi_conn_offload_params { struct regpair sq_pbl_addr; struct regpair r2tq_pbl_addr; struct regpair xhq_pbl_addr; struct regpair uhq_pbl_addr; __le32 initial_ack; __le16 physical_q0; __le16 physical_q1; u8 flags; #define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK 0x1 #define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0 #define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1 #define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1 #define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1 #define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2 #define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x1F #define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 3 u8 pbl_page_size_log; u8 pbe_page_size_log; u8 default_cq; __le32 stat_sn; }; /* iSCSI connection statistics */ struct iscsi_conn_stats_params { struct regpair iscsi_tcp_tx_packets_cnt; struct regpair iscsi_tcp_tx_bytes_cnt; struct regpair iscsi_tcp_tx_rxmit_cnt; struct regpair iscsi_tcp_rx_packets_cnt; struct regpair iscsi_tcp_rx_bytes_cnt; struct regpair iscsi_tcp_rx_dup_ack_cnt; __le32 iscsi_tcp_rx_chksum_err_cnt; __le32 reserved; }; /* spe message header */ struct iscsi_slow_path_hdr { u8 op_code; u8 flags; #define ISCSI_SLOW_PATH_HDR_RESERVED0_MASK 0xF #define ISCSI_SLOW_PATH_HDR_RESERVED0_SHIFT 0 #define ISCSI_SLOW_PATH_HDR_LAYER_CODE_MASK 0x7 #define ISCSI_SLOW_PATH_HDR_LAYER_CODE_SHIFT 4 #define ISCSI_SLOW_PATH_HDR_RESERVED1_MASK 0x1 #define ISCSI_SLOW_PATH_HDR_RESERVED1_SHIFT 7 }; /* iSCSI connection update params passed by driver to FW in ISCSI update *ramrod. */ struct iscsi_conn_update_ramrod_params { struct iscsi_slow_path_hdr hdr; __le16 conn_id; __le32 fw_cid; u8 flags; #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1 #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0 #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK 0x1 #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT 1 #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_MASK 0x1 #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT 2 #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK 0x1 #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3 #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_MASK 0x1 #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT 4 #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK 0x1 #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT 5 #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_IMM_EN_MASK 0x1 #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_IMM_EN_SHIFT 6 #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_MASK 0x1 #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_SHIFT 7 u8 reserved0[3]; __le32 max_seq_size; __le32 max_send_pdu_length; __le32 max_recv_pdu_length; __le32 first_seq_length; __le32 exp_stat_sn; union dif_configuration_params dif_on_imme_params; }; /* iSCSI CQ element */ struct iscsi_cqe_common { __le16 conn_id; u8 cqe_type; union cqe_error_status error_bitmap; __le32 reserved[3]; union iscsi_task_hdr iscsi_hdr; }; /* iSCSI CQ element */ struct iscsi_cqe_solicited { __le16 conn_id; u8 cqe_type; union cqe_error_status error_bitmap; __le16 itid; u8 task_type; u8 fw_dbg_field; u8 caused_conn_err; u8 reserved0[3]; __le32 data_truncated_bytes; union iscsi_task_hdr iscsi_hdr; }; /* iSCSI CQ element */ struct iscsi_cqe_unsolicited { __le16 conn_id; u8 cqe_type; union cqe_error_status error_bitmap; __le16 reserved0; u8 reserved1; u8 unsol_cqe_type; __le16 rqe_opaque; __le16 reserved2[3]; union iscsi_task_hdr iscsi_hdr; }; /* iSCSI CQ element */ union iscsi_cqe { struct iscsi_cqe_common cqe_common; struct iscsi_cqe_solicited cqe_solicited; struct iscsi_cqe_unsolicited cqe_unsolicited; }; /* iSCSI CQE type */ enum iscsi_cqes_type { ISCSI_CQE_TYPE_SOLICITED = 1, ISCSI_CQE_TYPE_UNSOLICITED, ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE, ISCSI_CQE_TYPE_TASK_CLEANUP, ISCSI_CQE_TYPE_DUMMY, MAX_ISCSI_CQES_TYPE }; /* iSCSI CQE type */ enum iscsi_cqe_unsolicited_type { ISCSI_CQE_UNSOLICITED_NONE, ISCSI_CQE_UNSOLICITED_SINGLE, ISCSI_CQE_UNSOLICITED_FIRST, ISCSI_CQE_UNSOLICITED_MIDDLE, ISCSI_CQE_UNSOLICITED_LAST, MAX_ISCSI_CQE_UNSOLICITED_TYPE }; /* iscsi debug modes */ struct iscsi_debug_modes { u8 flags; #define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK 0x1 #define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT 0 #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK 0x1 #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT 1 #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK 0x1 #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT 2 #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK 0x1 #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT 3 #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK 0x1 #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4 #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1 #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5 #define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_OR_DATA_DIGEST_ERROR_MASK 0x1 #define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_OR_DATA_DIGEST_ERROR_SHIFT 6 #define ISCSI_DEBUG_MODES_ASSERT_IF_HQ_CORRUPT_MASK 0x1 #define ISCSI_DEBUG_MODES_ASSERT_IF_HQ_CORRUPT_SHIFT 7 }; /* iSCSI kernel completion queue IDs */ enum iscsi_eqe_opcode { ISCSI_EVENT_TYPE_INIT_FUNC = 0, ISCSI_EVENT_TYPE_DESTROY_FUNC, ISCSI_EVENT_TYPE_OFFLOAD_CONN, ISCSI_EVENT_TYPE_UPDATE_CONN, ISCSI_EVENT_TYPE_CLEAR_SQ, ISCSI_EVENT_TYPE_TERMINATE_CONN, ISCSI_EVENT_TYPE_MAC_UPDATE_CONN, ISCSI_EVENT_TYPE_COLLECT_STATS_CONN, ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE, ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE, ISCSI_EVENT_TYPE_START_OF_ERROR_TYPES = 10, ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD, ISCSI_EVENT_TYPE_ASYN_CLOSE_RCVD, ISCSI_EVENT_TYPE_ASYN_SYN_RCVD, ISCSI_EVENT_TYPE_ASYN_MAX_RT_TIME, ISCSI_EVENT_TYPE_ASYN_MAX_RT_CNT, ISCSI_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT, ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2, ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR, ISCSI_EVENT_TYPE_TCP_CONN_ERROR, MAX_ISCSI_EQE_OPCODE }; /* iSCSI EQE and CQE completion status */ enum iscsi_error_types { ISCSI_STATUS_NONE = 0, ISCSI_CQE_ERROR_UNSOLICITED_RCV_ON_INVALID_CONN = 1, ISCSI_CONN_ERROR_TASK_CID_MISMATCH, ISCSI_CONN_ERROR_TASK_NOT_VALID, ISCSI_CONN_ERROR_RQ_RING_IS_FULL, ISCSI_CONN_ERROR_CMDQ_RING_IS_FULL, ISCSI_CONN_ERROR_HQE_CACHING_FAILED, ISCSI_CONN_ERROR_HEADER_DIGEST_ERROR, ISCSI_CONN_ERROR_LOCAL_COMPLETION_ERROR, ISCSI_CONN_ERROR_DATA_OVERRUN, ISCSI_CONN_ERROR_OUT_OF_SGES_ERROR, ISCSI_CONN_ERROR_IP_OPTIONS_ERROR, ISCSI_CONN_ERROR_PRS_ERRORS, ISCSI_CONN_ERROR_CONNECT_INVALID_TCP_OPTION, ISCSI_CONN_ERROR_TCP_IP_FRAGMENT_ERROR, ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_LEN, ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_TYPE, ISCSI_CONN_ERROR_PROTOCOL_ERR_ITT_OUT_OF_RANGE, ISCSI_CONN_ERROR_PROTOCOL_ERR_TTT_OUT_OF_RANGE, ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_EXCEEDS_PDU_SIZE, ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE, ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE_BEFORE_UPDATE, ISCSI_CONN_ERROR_UNVALID_NOPIN_DSL, ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_CARRIES_NO_DATA, ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SN, ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_IN_TTT, ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_OUT_ITT, ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_TTT, ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_BUFFER_OFFSET, ISCSI_CONN_ERROR_PROTOCOL_ERR_BUFFER_OFFSET_OOO, ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_SN, ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0, ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1, ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_2, ISCSI_CONN_ERROR_PROTOCOL_ERR_LUN, ISCSI_CONN_ERROR_PROTOCOL_ERR_F_BIT_ZERO, ISCSI_CONN_ERROR_PROTOCOL_ERR_F_BIT_ZERO_S_BIT_ONE, ISCSI_CONN_ERROR_PROTOCOL_ERR_EXP_STAT_SN, ISCSI_CONN_ERROR_PROTOCOL_ERR_DSL_NOT_ZERO, ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_DSL, ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG, ISCSI_CONN_ERROR_PROTOCOL_ERR_OUTSTANDING_R2T_COUNT, ISCSI_CONN_ERROR_PROTOCOL_ERR_DIF_TX, ISCSI_CONN_ERROR_SENSE_DATA_LENGTH, ISCSI_CONN_ERROR_DATA_PLACEMENT_ERROR, ISCSI_CONN_ERROR_INVALID_ITT, ISCSI_ERROR_UNKNOWN, MAX_ISCSI_ERROR_TYPES }; /* iSCSI Ramrod Command IDs */ enum iscsi_ramrod_cmd_id { ISCSI_RAMROD_CMD_ID_UNUSED = 0, ISCSI_RAMROD_CMD_ID_INIT_FUNC = 1, ISCSI_RAMROD_CMD_ID_DESTROY_FUNC = 2, ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN = 3, ISCSI_RAMROD_CMD_ID_UPDATE_CONN = 4, ISCSI_RAMROD_CMD_ID_TERMINATION_CONN = 5, ISCSI_RAMROD_CMD_ID_CLEAR_SQ = 6, ISCSI_RAMROD_CMD_ID_MAC_UPDATE = 7, ISCSI_RAMROD_CMD_ID_CONN_STATS = 8, MAX_ISCSI_RAMROD_CMD_ID }; /* iSCSI connection termination request */ struct iscsi_spe_conn_mac_update { struct iscsi_slow_path_hdr hdr; __le16 conn_id; __le32 fw_cid; __le16 remote_mac_addr_lo; __le16 remote_mac_addr_mid; __le16 remote_mac_addr_hi; u8 reserved0[2]; }; /* iSCSI and TCP connection (Option 1) offload params passed by driver to FW in * iSCSI offload ramrod. */ struct iscsi_spe_conn_offload { struct iscsi_slow_path_hdr hdr; __le16 conn_id; __le32 fw_cid; struct iscsi_conn_offload_params iscsi; struct tcp_offload_params tcp; }; /* iSCSI and TCP connection(Option 2) offload params passed by driver to FW in * iSCSI offload ramrod. */ struct iscsi_spe_conn_offload_option2 { struct iscsi_slow_path_hdr hdr; __le16 conn_id; __le32 fw_cid; struct iscsi_conn_offload_params iscsi; struct tcp_offload_params_opt2 tcp; }; /* iSCSI collect connection statistics request */ struct iscsi_spe_conn_statistics { struct iscsi_slow_path_hdr hdr; __le16 conn_id; __le32 fw_cid; u8 reset_stats; u8 reserved0[7]; struct regpair stats_cnts_addr; }; /* iSCSI connection termination request */ struct iscsi_spe_conn_termination { struct iscsi_slow_path_hdr hdr; __le16 conn_id; __le32 fw_cid; u8 abortive; u8 reserved0[7]; struct regpair queue_cnts_addr; struct regpair query_params_addr; }; /* iSCSI firmware function destroy parameters */ struct iscsi_spe_func_dstry { struct iscsi_slow_path_hdr hdr; __le16 reserved0; __le32 reserved1; }; /* iSCSI firmware function init parameters */ struct iscsi_spe_func_init { struct iscsi_slow_path_hdr hdr; __le16 half_way_close_timeout; u8 num_sq_pages_in_ring; u8 num_r2tq_pages_in_ring; u8 num_uhq_pages_in_ring; u8 ll2_rx_queue_id; u8 flags; #define ISCSI_SPE_FUNC_INIT_COUNTERS_EN_MASK 0x1 #define ISCSI_SPE_FUNC_INIT_COUNTERS_EN_SHIFT 0 #define ISCSI_SPE_FUNC_INIT_RESERVED0_MASK 0x7F #define ISCSI_SPE_FUNC_INIT_RESERVED0_SHIFT 1 struct iscsi_debug_modes debug_mode; __le16 reserved1; __le32 reserved2; struct scsi_init_func_params func_params; struct scsi_init_func_queues q_params; }; /* iSCSI task type */ enum iscsi_task_type { ISCSI_TASK_TYPE_INITIATOR_WRITE, ISCSI_TASK_TYPE_INITIATOR_READ, ISCSI_TASK_TYPE_MIDPATH, ISCSI_TASK_TYPE_UNSOLIC, ISCSI_TASK_TYPE_EXCHCLEANUP, ISCSI_TASK_TYPE_IRRELEVANT, ISCSI_TASK_TYPE_TARGET_WRITE, ISCSI_TASK_TYPE_TARGET_READ, ISCSI_TASK_TYPE_TARGET_RESPONSE, ISCSI_TASK_TYPE_LOGIN_RESPONSE, ISCSI_TASK_TYPE_TARGET_IMM_W_DIF, MAX_ISCSI_TASK_TYPE }; /* iSCSI DesiredDataTransferLength/ttt union */ union iscsi_ttt_txlen_union { __le32 desired_tx_len; __le32 ttt; }; /* iSCSI uHQ element */ struct iscsi_uhqe { __le32 reg1; #define ISCSI_UHQE_PDU_PAYLOAD_LEN_MASK 0xFFFFF #define ISCSI_UHQE_PDU_PAYLOAD_LEN_SHIFT 0 #define ISCSI_UHQE_LOCAL_COMP_MASK 0x1 #define ISCSI_UHQE_LOCAL_COMP_SHIFT 20 #define ISCSI_UHQE_TOGGLE_BIT_MASK 0x1 #define ISCSI_UHQE_TOGGLE_BIT_SHIFT 21 #define ISCSI_UHQE_PURE_PAYLOAD_MASK 0x1 #define ISCSI_UHQE_PURE_PAYLOAD_SHIFT 22 #define ISCSI_UHQE_LOGIN_RESPONSE_PDU_MASK 0x1 #define ISCSI_UHQE_LOGIN_RESPONSE_PDU_SHIFT 23 #define ISCSI_UHQE_TASK_ID_HI_MASK 0xFF #define ISCSI_UHQE_TASK_ID_HI_SHIFT 24 __le32 reg2; #define ISCSI_UHQE_BUFFER_OFFSET_MASK 0xFFFFFF #define ISCSI_UHQE_BUFFER_OFFSET_SHIFT 0 #define ISCSI_UHQE_TASK_ID_LO_MASK 0xFF #define ISCSI_UHQE_TASK_ID_LO_SHIFT 24 }; /* iSCSI WQ element */ struct iscsi_wqe { __le16 task_id; u8 flags; #define ISCSI_WQE_WQE_TYPE_MASK 0x7 #define ISCSI_WQE_WQE_TYPE_SHIFT 0 #define ISCSI_WQE_NUM_SGES_MASK 0xF #define ISCSI_WQE_NUM_SGES_SHIFT 3 #define ISCSI_WQE_RESPONSE_MASK 0x1 #define ISCSI_WQE_RESPONSE_SHIFT 7 struct iscsi_dif_flags prot_flags; __le32 contlen_cdbsize; #define ISCSI_WQE_CONT_LEN_MASK 0xFFFFFF #define ISCSI_WQE_CONT_LEN_SHIFT 0 #define ISCSI_WQE_CDB_SIZE_MASK 0xFF #define ISCSI_WQE_CDB_SIZE_SHIFT 24 }; /* iSCSI wqe type */ enum iscsi_wqe_type { ISCSI_WQE_TYPE_NORMAL, ISCSI_WQE_TYPE_TASK_CLEANUP, ISCSI_WQE_TYPE_MIDDLE_PATH, ISCSI_WQE_TYPE_LOGIN, ISCSI_WQE_TYPE_FIRST_R2T_CONT, ISCSI_WQE_TYPE_NONFIRST_R2T_CONT, ISCSI_WQE_TYPE_RESPONSE, MAX_ISCSI_WQE_TYPE }; /* iSCSI xHQ element */ struct iscsi_xhqe { union iscsi_ttt_txlen_union ttt_or_txlen; __le32 exp_stat_sn; struct iscsi_dif_flags prot_flags; u8 total_ahs_length; u8 opcode; u8 flags; #define ISCSI_XHQE_FINAL_MASK 0x1 #define ISCSI_XHQE_FINAL_SHIFT 0 #define ISCSI_XHQE_STATUS_BIT_MASK 0x1 #define ISCSI_XHQE_STATUS_BIT_SHIFT 1 #define ISCSI_XHQE_NUM_SGES_MASK 0xF #define ISCSI_XHQE_NUM_SGES_SHIFT 2 #define ISCSI_XHQE_RESERVED0_MASK 0x3 #define ISCSI_XHQE_RESERVED0_SHIFT 6 union iscsi_seq_num seq_num; __le16 reserved1; }; /* Per PF iSCSI receive path statistics - mStorm RAM structure */ struct mstorm_iscsi_stats_drv { struct regpair iscsi_rx_dropped_pdus_task_not_valid; struct regpair iscsi_rx_dup_ack_cnt; }; /* Per PF iSCSI transmit path statistics - pStorm RAM structure */ struct pstorm_iscsi_stats_drv { struct regpair iscsi_tx_bytes_cnt; struct regpair iscsi_tx_packet_cnt; }; /* Per PF iSCSI receive path statistics - tStorm RAM structure */ struct tstorm_iscsi_stats_drv { struct regpair iscsi_rx_bytes_cnt; struct regpair iscsi_rx_packet_cnt; struct regpair iscsi_rx_new_ooo_isle_events_cnt; struct regpair iscsi_rx_tcp_payload_bytes_cnt; struct regpair iscsi_rx_tcp_pkt_cnt; struct regpair iscsi_rx_pure_ack_cnt; __le32 iscsi_cmdq_threshold_cnt; __le32 iscsi_rq_threshold_cnt; __le32 iscsi_immq_threshold_cnt; }; /* Per PF iSCSI receive path statistics - uStorm RAM structure */ struct ustorm_iscsi_stats_drv { struct regpair iscsi_rx_data_pdu_cnt; struct regpair iscsi_rx_r2t_pdu_cnt; struct regpair iscsi_rx_total_pdu_cnt; }; /* Per PF iSCSI transmit path statistics - xStorm RAM structure */ struct xstorm_iscsi_stats_drv { struct regpair iscsi_tx_go_to_slow_start_event_cnt; struct regpair iscsi_tx_fast_retransmit_event_cnt; struct regpair iscsi_tx_pure_ack_cnt; struct regpair iscsi_tx_delayed_ack_cnt; }; /* Per PF iSCSI transmit path statistics - yStorm RAM structure */ struct ystorm_iscsi_stats_drv { struct regpair iscsi_tx_data_pdu_cnt; struct regpair iscsi_tx_r2t_pdu_cnt; struct regpair iscsi_tx_total_pdu_cnt; struct regpair iscsi_tx_tcp_payload_bytes_cnt; struct regpair iscsi_tx_tcp_pkt_cnt; }; struct e4_tstorm_iscsi_task_ag_ctx { u8 byte0; u8 byte1; __le16 word0; u8 flags0; #define E4_TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF #define E4_TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0 #define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1 #define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4 #define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 #define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 #define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1 #define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6 #define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1 #define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7 u8 flags1; #define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1 #define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0 #define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1 #define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1 #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3 #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2 #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4 #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3 #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6 u8 flags2; #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3 #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0 #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3 #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2 #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3 #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4 #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3 #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6 u8 flags3; #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3 #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0 #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1 #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2 #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3 #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1 #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4 #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1 #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5 #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1 #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6 #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1 #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7 u8 flags4; #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1 #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0 #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1 #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1 #define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 #define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2 #define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 #define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3 #define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 #define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4 #define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 #define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5 #define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 #define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6 #define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 #define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7 u8 byte2; __le16 word1; __le32 reg0; u8 byte3; u8 byte4; __le16 word2; __le16 word3; __le16 word4; __le32 reg1; __le32 reg2; }; /* iSCSI doorbell data */ struct iscsi_db_data { u8 params; #define ISCSI_DB_DATA_DEST_MASK 0x3 #define ISCSI_DB_DATA_DEST_SHIFT 0 #define ISCSI_DB_DATA_AGG_CMD_MASK 0x3 #define ISCSI_DB_DATA_AGG_CMD_SHIFT 2 #define ISCSI_DB_DATA_BYPASS_EN_MASK 0x1 #define ISCSI_DB_DATA_BYPASS_EN_SHIFT 4 #define ISCSI_DB_DATA_RESERVED_MASK 0x1 #define ISCSI_DB_DATA_RESERVED_SHIFT 5 #define ISCSI_DB_DATA_AGG_VAL_SEL_MASK 0x3 #define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6 u8 agg_flags; __le16 sq_prod; }; #endif /* __ISCSI_COMMON__ */ qed/tcp_common.h 0000644 00000017565 14722070374 0007650 0 ustar 00 /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and /or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef __TCP_COMMON__ #define __TCP_COMMON__ /********************/ /* TCP FW CONSTANTS */ /********************/ #define TCP_INVALID_TIMEOUT_VAL -1 /* OOO opaque data received from LL2 */ struct ooo_opaque { __le32 cid; u8 drop_isle; u8 drop_size; u8 ooo_opcode; u8 ooo_isle; }; /* tcp connect mode enum */ enum tcp_connect_mode { TCP_CONNECT_ACTIVE, TCP_CONNECT_PASSIVE, MAX_TCP_CONNECT_MODE }; /* tcp function init parameters */ struct tcp_init_params { __le32 two_msl_timer; __le16 tx_sws_timer; u8 max_fin_rt; u8 reserved[9]; }; /* tcp IPv4/IPv6 enum */ enum tcp_ip_version { TCP_IPV4, TCP_IPV6, MAX_TCP_IP_VERSION }; /* tcp offload parameters */ struct tcp_offload_params { __le16 local_mac_addr_lo; __le16 local_mac_addr_mid; __le16 local_mac_addr_hi; __le16 remote_mac_addr_lo; __le16 remote_mac_addr_mid; __le16 remote_mac_addr_hi; __le16 vlan_id; __le16 flags; #define TCP_OFFLOAD_PARAMS_TS_EN_MASK 0x1 #define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT 0 #define TCP_OFFLOAD_PARAMS_DA_EN_MASK 0x1 #define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT 1 #define TCP_OFFLOAD_PARAMS_KA_EN_MASK 0x1 #define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT 2 #define TCP_OFFLOAD_PARAMS_ECN_SENDER_EN_MASK 0x1 #define TCP_OFFLOAD_PARAMS_ECN_SENDER_EN_SHIFT 3 #define TCP_OFFLOAD_PARAMS_ECN_RECEIVER_EN_MASK 0x1 #define TCP_OFFLOAD_PARAMS_ECN_RECEIVER_EN_SHIFT 4 #define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK 0x1 #define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT 5 #define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK 0x1 #define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT 6 #define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK 0x1 #define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT 7 #define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK 0x1 #define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 8 #define TCP_OFFLOAD_PARAMS_RESERVED_MASK 0x7F #define TCP_OFFLOAD_PARAMS_RESERVED_SHIFT 9 u8 ip_version; u8 reserved0[3]; __le32 remote_ip[4]; __le32 local_ip[4]; __le32 flow_label; u8 ttl; u8 tos_or_tc; __le16 remote_port; __le16 local_port; __le16 mss; u8 rcv_wnd_scale; u8 connect_mode; __le16 srtt; __le32 ss_thresh; __le32 rcv_wnd; __le32 cwnd; u8 ka_max_probe_cnt; u8 dup_ack_theshold; __le16 reserved1; __le32 ka_timeout; __le32 ka_interval; __le32 max_rt_time; __le32 initial_rcv_wnd; __le32 rcv_next; __le32 snd_una; __le32 snd_next; __le32 snd_max; __le32 snd_wnd; __le32 snd_wl1; __le32 ts_recent; __le32 ts_recent_age; __le32 total_rt; __le32 ka_timeout_delta; __le32 rt_timeout_delta; u8 dup_ack_cnt; u8 snd_wnd_probe_cnt; u8 ka_probe_cnt; u8 rt_cnt; __le16 rtt_var; __le16 fw_internal; u8 snd_wnd_scale; u8 ack_frequency; __le16 da_timeout_value; __le32 reserved3; }; /* tcp offload parameters */ struct tcp_offload_params_opt2 { __le16 local_mac_addr_lo; __le16 local_mac_addr_mid; __le16 local_mac_addr_hi; __le16 remote_mac_addr_lo; __le16 remote_mac_addr_mid; __le16 remote_mac_addr_hi; __le16 vlan_id; __le16 flags; #define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK 0x1 #define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT 0 #define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK 0x1 #define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT 1 #define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK 0x1 #define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT 2 #define TCP_OFFLOAD_PARAMS_OPT2_ECN_EN_MASK 0x1 #define TCP_OFFLOAD_PARAMS_OPT2_ECN_EN_SHIFT 3 #define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0xFFF #define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 4 u8 ip_version; u8 reserved1[3]; __le32 remote_ip[4]; __le32 local_ip[4]; __le32 flow_label; u8 ttl; u8 tos_or_tc; __le16 remote_port; __le16 local_port; __le16 mss; u8 rcv_wnd_scale; u8 connect_mode; __le16 syn_ip_payload_length; __le32 syn_phy_addr_lo; __le32 syn_phy_addr_hi; __le32 cwnd; u8 ka_max_probe_cnt; u8 reserved2[3]; __le32 ka_timeout; __le32 ka_interval; __le32 max_rt_time; __le32 reserved3[16]; }; /* tcp IPv4/IPv6 enum */ enum tcp_seg_placement_event { TCP_EVENT_ADD_PEN, TCP_EVENT_ADD_NEW_ISLE, TCP_EVENT_ADD_ISLE_RIGHT, TCP_EVENT_ADD_ISLE_LEFT, TCP_EVENT_JOIN, TCP_EVENT_DELETE_ISLES, TCP_EVENT_NOP, MAX_TCP_SEG_PLACEMENT_EVENT }; /* tcp init parameters */ struct tcp_update_params { __le16 flags; #define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK 0x1 #define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_SHIFT 0 #define TCP_UPDATE_PARAMS_MSS_CHANGED_MASK 0x1 #define TCP_UPDATE_PARAMS_MSS_CHANGED_SHIFT 1 #define TCP_UPDATE_PARAMS_TTL_CHANGED_MASK 0x1 #define TCP_UPDATE_PARAMS_TTL_CHANGED_SHIFT 2 #define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_MASK 0x1 #define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_SHIFT 3 #define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_MASK 0x1 #define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_SHIFT 4 #define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_MASK 0x1 #define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_SHIFT 5 #define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_MASK 0x1 #define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_SHIFT 6 #define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_MASK 0x1 #define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_SHIFT 7 #define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_MASK 0x1 #define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_SHIFT 8 #define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_MASK 0x1 #define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_SHIFT 9 #define TCP_UPDATE_PARAMS_KA_EN_CHANGED_MASK 0x1 #define TCP_UPDATE_PARAMS_KA_EN_CHANGED_SHIFT 10 #define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_MASK 0x1 #define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_SHIFT 11 #define TCP_UPDATE_PARAMS_KA_EN_MASK 0x1 #define TCP_UPDATE_PARAMS_KA_EN_SHIFT 12 #define TCP_UPDATE_PARAMS_NAGLE_EN_MASK 0x1 #define TCP_UPDATE_PARAMS_NAGLE_EN_SHIFT 13 #define TCP_UPDATE_PARAMS_KA_RESTART_MASK 0x1 #define TCP_UPDATE_PARAMS_KA_RESTART_SHIFT 14 #define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_MASK 0x1 #define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_SHIFT 15 __le16 remote_mac_addr_lo; __le16 remote_mac_addr_mid; __le16 remote_mac_addr_hi; __le16 mss; u8 ttl; u8 tos_or_tc; __le32 ka_timeout; __le32 ka_interval; __le32 max_rt_time; __le32 flow_label; __le32 initial_rcv_wnd; u8 ka_max_probe_cnt; u8 reserved1[7]; }; /* toe upload parameters */ struct tcp_upload_params { __le32 rcv_next; __le32 snd_una; __le32 snd_next; __le32 snd_max; __le32 snd_wnd; __le32 rcv_wnd; __le32 snd_wl1; __le32 cwnd; __le32 ss_thresh; __le16 srtt; __le16 rtt_var; __le32 ts_time; __le32 ts_recent; __le32 ts_recent_age; __le32 total_rt; __le32 ka_timeout_delta; __le32 rt_timeout_delta; u8 dup_ack_cnt; u8 snd_wnd_probe_cnt; u8 ka_probe_cnt; u8 rt_cnt; __le32 reserved; }; #endif /* __TCP_COMMON__ */ rio.h 0000644 00000046022 14722070374 0005520 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * RapidIO interconnect services * (RapidIO Interconnect Specification, http://www.rapidio.org) * * Copyright 2005 MontaVista Software, Inc. * Matt Porter <mporter@kernel.crashing.org> */ #ifndef LINUX_RIO_H #define LINUX_RIO_H #include <linux/types.h> #include <linux/ioport.h> #include <linux/list.h> #include <linux/errno.h> #include <linux/device.h> #include <linux/rio_regs.h> #include <linux/mod_devicetable.h> #ifdef CONFIG_RAPIDIO_DMA_ENGINE #include <linux/dmaengine.h> #endif #define RIO_NO_HOPCOUNT -1 #define RIO_INVALID_DESTID 0xffff #define RIO_MAX_MPORTS 8 #define RIO_MAX_MPORT_RESOURCES 16 #define RIO_MAX_DEV_RESOURCES 16 #define RIO_MAX_MPORT_NAME 40 #define RIO_GLOBAL_TABLE 0xff /* Indicates access of a switch's global routing table if it has multiple (or per port) tables */ #define RIO_INVALID_ROUTE 0xff /* Indicates that a route table entry is invalid (no route exists for the device ID) */ #define RIO_MAX_ROUTE_ENTRIES(size) (size ? (1 << 16) : (1 << 8)) #define RIO_ANY_DESTID(size) (size ? 0xffff : 0xff) #define RIO_MAX_MBOX 4 #define RIO_MAX_MSG_SIZE 0x1000 /* * Error values that may be returned by RIO functions. */ #define RIO_SUCCESSFUL 0x00 #define RIO_BAD_SIZE 0x81 /* * For RIO devices, the region numbers are assigned this way: * * 0 RapidIO outbound doorbells * 1-15 RapidIO memory regions * * For RIO master ports, the region number are assigned this way: * * 0 RapidIO inbound doorbells * 1 RapidIO inbound mailboxes * 2 RapidIO outbound mailboxes */ #define RIO_DOORBELL_RESOURCE 0 #define RIO_INB_MBOX_RESOURCE 1 #define RIO_OUTB_MBOX_RESOURCE 2 #define RIO_PW_MSG_SIZE 64 /* * A component tag value (stored in the component tag CSR) is used as device's * unique identifier assigned during enumeration. Besides being used for * identifying switches (which do not have device ID register), it also is used * by error management notification and therefore has to be assigned * to endpoints as well. */ #define RIO_CTAG_RESRVD 0xfffe0000 /* Reserved */ #define RIO_CTAG_UDEVID 0x0001ffff /* Unique device identifier */ extern struct bus_type rio_bus_type; extern struct class rio_mport_class; struct rio_mport; struct rio_dev; union rio_pw_msg; /** * struct rio_switch - RIO switch info * @node: Node in global list of switches * @route_table: Copy of switch routing table * @port_ok: Status of each port (one bit per port) - OK=1 or UNINIT=0 * @ops: pointer to switch-specific operations * @lock: lock to serialize operations updates * @nextdev: Array of per-port pointers to the next attached device */ struct rio_switch { struct list_head node; u8 *route_table; u32 port_ok; struct rio_switch_ops *ops; spinlock_t lock; struct rio_dev *nextdev[0]; }; /** * struct rio_switch_ops - Per-switch operations * @owner: The module owner of this structure * @add_entry: Callback for switch-specific route add function * @get_entry: Callback for switch-specific route get function * @clr_table: Callback for switch-specific clear route table function * @set_domain: Callback for switch-specific domain setting function * @get_domain: Callback for switch-specific domain get function * @em_init: Callback for switch-specific error management init function * @em_handle: Callback for switch-specific error management handler function * * Defines the operations that are necessary to initialize/control * a particular RIO switch device. */ struct rio_switch_ops { struct module *owner; int (*add_entry) (struct rio_mport *mport, u16 destid, u8 hopcount, u16 table, u16 route_destid, u8 route_port); int (*get_entry) (struct rio_mport *mport, u16 destid, u8 hopcount, u16 table, u16 route_destid, u8 *route_port); int (*clr_table) (struct rio_mport *mport, u16 destid, u8 hopcount, u16 table); int (*set_domain) (struct rio_mport *mport, u16 destid, u8 hopcount, u8 sw_domain); int (*get_domain) (struct rio_mport *mport, u16 destid, u8 hopcount, u8 *sw_domain); int (*em_init) (struct rio_dev *dev); int (*em_handle) (struct rio_dev *dev, u8 swport); }; enum rio_device_state { RIO_DEVICE_INITIALIZING, RIO_DEVICE_RUNNING, RIO_DEVICE_GONE, RIO_DEVICE_SHUTDOWN, }; /** * struct rio_dev - RIO device info * @global_list: Node in list of all RIO devices * @net_list: Node in list of RIO devices in a network * @net: Network this device is a part of * @do_enum: Enumeration flag * @did: Device ID * @vid: Vendor ID * @device_rev: Device revision * @asm_did: Assembly device ID * @asm_vid: Assembly vendor ID * @asm_rev: Assembly revision * @efptr: Extended feature pointer * @pef: Processing element features * @swpinfo: Switch port info * @src_ops: Source operation capabilities * @dst_ops: Destination operation capabilities * @comp_tag: RIO component tag * @phys_efptr: RIO device extended features pointer * @phys_rmap: LP-Serial Register Map Type (1 or 2) * @em_efptr: RIO Error Management features pointer * @dma_mask: Mask of bits of RIO address this device implements * @driver: Driver claiming this device * @dev: Device model device * @riores: RIO resources this device owns * @pwcback: port-write callback function for this device * @destid: Network destination ID (or associated destid for switch) * @hopcount: Hopcount to this device * @prev: Previous RIO device connected to the current one * @state: device state * @rswitch: struct rio_switch (if valid for this device) */ struct rio_dev { struct list_head global_list; /* node in list of all RIO devices */ struct list_head net_list; /* node in per net list */ struct rio_net *net; /* RIO net this device resides in */ bool do_enum; u16 did; u16 vid; u32 device_rev; u16 asm_did; u16 asm_vid; u16 asm_rev; u16 efptr; u32 pef; u32 swpinfo; u32 src_ops; u32 dst_ops; u32 comp_tag; u32 phys_efptr; u32 phys_rmap; u32 em_efptr; u64 dma_mask; struct rio_driver *driver; /* RIO driver claiming this device */ struct device dev; /* LDM device structure */ struct resource riores[RIO_MAX_DEV_RESOURCES]; int (*pwcback) (struct rio_dev *rdev, union rio_pw_msg *msg, int step); u16 destid; u8 hopcount; struct rio_dev *prev; atomic_t state; struct rio_switch rswitch[0]; /* RIO switch info */ }; #define rio_dev_g(n) list_entry(n, struct rio_dev, global_list) #define rio_dev_f(n) list_entry(n, struct rio_dev, net_list) #define to_rio_dev(n) container_of(n, struct rio_dev, dev) #define sw_to_rio_dev(n) container_of(n, struct rio_dev, rswitch[0]) #define to_rio_mport(n) container_of(n, struct rio_mport, dev) #define to_rio_net(n) container_of(n, struct rio_net, dev) /** * struct rio_msg - RIO message event * @res: Mailbox resource * @mcback: Message event callback */ struct rio_msg { struct resource *res; void (*mcback) (struct rio_mport * mport, void *dev_id, int mbox, int slot); }; /** * struct rio_dbell - RIO doorbell event * @node: Node in list of doorbell events * @res: Doorbell resource * @dinb: Doorbell event callback * @dev_id: Device specific pointer to pass on event */ struct rio_dbell { struct list_head node; struct resource *res; void (*dinb) (struct rio_mport *mport, void *dev_id, u16 src, u16 dst, u16 info); void *dev_id; }; /** * struct rio_mport - RIO master port info * @dbells: List of doorbell events * @pwrites: List of portwrite events * @node: Node in global list of master ports * @nnode: Node in network list of master ports * @net: RIO net this mport is attached to * @lock: lock to synchronize lists manipulations * @iores: I/O mem resource that this master port interface owns * @riores: RIO resources that this master port interfaces owns * @inb_msg: RIO inbound message event descriptors * @outb_msg: RIO outbound message event descriptors * @host_deviceid: Host device ID associated with this master port * @ops: configuration space functions * @id: Port ID, unique among all ports * @index: Port index, unique among all port interfaces of the same type * @sys_size: RapidIO common transport system size * @phys_efptr: RIO port extended features pointer * @phys_rmap: LP-Serial EFB Register Mapping type (1 or 2). * @name: Port name string * @dev: device structure associated with an mport * @priv: Master port private data * @dma: DMA device associated with mport * @nscan: RapidIO network enumeration/discovery operations * @state: mport device state * @pwe_refcnt: port-write enable ref counter to track enable/disable requests */ struct rio_mport { struct list_head dbells; /* list of doorbell events */ struct list_head pwrites; /* list of portwrite events */ struct list_head node; /* node in global list of ports */ struct list_head nnode; /* node in net list of ports */ struct rio_net *net; /* RIO net this mport is attached to */ struct mutex lock; struct resource iores; struct resource riores[RIO_MAX_MPORT_RESOURCES]; struct rio_msg inb_msg[RIO_MAX_MBOX]; struct rio_msg outb_msg[RIO_MAX_MBOX]; int host_deviceid; /* Host device ID */ struct rio_ops *ops; /* low-level architecture-dependent routines */ unsigned char id; /* port ID, unique among all ports */ unsigned char index; /* port index, unique among all port interfaces of the same type */ unsigned int sys_size; /* RapidIO common transport system size. * 0 - Small size. 256 devices. * 1 - Large size, 65536 devices. */ u32 phys_efptr; u32 phys_rmap; unsigned char name[RIO_MAX_MPORT_NAME]; struct device dev; void *priv; /* Master port private data */ #ifdef CONFIG_RAPIDIO_DMA_ENGINE struct dma_device dma; #endif struct rio_scan *nscan; atomic_t state; unsigned int pwe_refcnt; }; static inline int rio_mport_is_running(struct rio_mport *mport) { return atomic_read(&mport->state) == RIO_DEVICE_RUNNING; } /* * Enumeration/discovery control flags */ #define RIO_SCAN_ENUM_NO_WAIT 0x00000001 /* Do not wait for enum completed */ /** * struct rio_net - RIO network info * @node: Node in global list of RIO networks * @devices: List of devices in this network * @switches: List of switches in this network * @mports: List of master ports accessing this network * @hport: Default port for accessing this network * @id: RIO network ID * @dev: Device object * @enum_data: private data specific to a network enumerator * @release: enumerator-specific release callback */ struct rio_net { struct list_head node; /* node in list of networks */ struct list_head devices; /* list of devices in this net */ struct list_head switches; /* list of switches in this net */ struct list_head mports; /* list of ports accessing net */ struct rio_mport *hport; /* primary port for accessing net */ unsigned char id; /* RIO network ID */ struct device dev; void *enum_data; /* private data for enumerator of the network */ void (*release)(struct rio_net *net); }; enum rio_link_speed { RIO_LINK_DOWN = 0, /* SRIO Link not initialized */ RIO_LINK_125 = 1, /* 1.25 GBaud */ RIO_LINK_250 = 2, /* 2.5 GBaud */ RIO_LINK_312 = 3, /* 3.125 GBaud */ RIO_LINK_500 = 4, /* 5.0 GBaud */ RIO_LINK_625 = 5 /* 6.25 GBaud */ }; enum rio_link_width { RIO_LINK_1X = 0, RIO_LINK_1XR = 1, RIO_LINK_2X = 3, RIO_LINK_4X = 2, RIO_LINK_8X = 4, RIO_LINK_16X = 5 }; enum rio_mport_flags { RIO_MPORT_DMA = (1 << 0), /* supports DMA data transfers */ RIO_MPORT_DMA_SG = (1 << 1), /* DMA supports HW SG mode */ RIO_MPORT_IBSG = (1 << 2), /* inbound mapping supports SG */ }; /** * struct rio_mport_attr - RIO mport device attributes * @flags: mport device capability flags * @link_speed: SRIO link speed value (as defined by RapidIO specification) * @link_width: SRIO link width value (as defined by RapidIO specification) * @dma_max_sge: number of SG list entries that can be handled by DMA channel(s) * @dma_max_size: max number of bytes in single DMA transfer (SG entry) * @dma_align: alignment shift for DMA operations (as for other DMA operations) */ struct rio_mport_attr { int flags; int link_speed; int link_width; /* DMA capability info: valid only if RIO_MPORT_DMA flag is set */ int dma_max_sge; int dma_max_size; int dma_align; }; /* Low-level architecture-dependent routines */ /** * struct rio_ops - Low-level RIO configuration space operations * @lcread: Callback to perform local (master port) read of config space. * @lcwrite: Callback to perform local (master port) write of config space. * @cread: Callback to perform network read of config space. * @cwrite: Callback to perform network write of config space. * @dsend: Callback to send a doorbell message. * @pwenable: Callback to enable/disable port-write message handling. * @open_outb_mbox: Callback to initialize outbound mailbox. * @close_outb_mbox: Callback to shut down outbound mailbox. * @open_inb_mbox: Callback to initialize inbound mailbox. * @close_inb_mbox: Callback to shut down inbound mailbox. * @add_outb_message: Callback to add a message to an outbound mailbox queue. * @add_inb_buffer: Callback to add a buffer to an inbound mailbox queue. * @get_inb_message: Callback to get a message from an inbound mailbox queue. * @map_inb: Callback to map RapidIO address region into local memory space. * @unmap_inb: Callback to unmap RapidIO address region mapped with map_inb(). * @query_mport: Callback to query mport device attributes. * @map_outb: Callback to map outbound address region into local memory space. * @unmap_outb: Callback to unmap outbound RapidIO address region. */ struct rio_ops { int (*lcread) (struct rio_mport *mport, int index, u32 offset, int len, u32 *data); int (*lcwrite) (struct rio_mport *mport, int index, u32 offset, int len, u32 data); int (*cread) (struct rio_mport *mport, int index, u16 destid, u8 hopcount, u32 offset, int len, u32 *data); int (*cwrite) (struct rio_mport *mport, int index, u16 destid, u8 hopcount, u32 offset, int len, u32 data); int (*dsend) (struct rio_mport *mport, int index, u16 destid, u16 data); int (*pwenable) (struct rio_mport *mport, int enable); int (*open_outb_mbox)(struct rio_mport *mport, void *dev_id, int mbox, int entries); void (*close_outb_mbox)(struct rio_mport *mport, int mbox); int (*open_inb_mbox)(struct rio_mport *mport, void *dev_id, int mbox, int entries); void (*close_inb_mbox)(struct rio_mport *mport, int mbox); int (*add_outb_message)(struct rio_mport *mport, struct rio_dev *rdev, int mbox, void *buffer, size_t len); int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf); void *(*get_inb_message)(struct rio_mport *mport, int mbox); int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart, u64 rstart, u64 size, u32 flags); void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart); int (*query_mport)(struct rio_mport *mport, struct rio_mport_attr *attr); int (*map_outb)(struct rio_mport *mport, u16 destid, u64 rstart, u32 size, u32 flags, dma_addr_t *laddr); void (*unmap_outb)(struct rio_mport *mport, u16 destid, u64 rstart); }; #define RIO_RESOURCE_MEM 0x00000100 #define RIO_RESOURCE_DOORBELL 0x00000200 #define RIO_RESOURCE_MAILBOX 0x00000400 #define RIO_RESOURCE_CACHEABLE 0x00010000 #define RIO_RESOURCE_PCI 0x00020000 #define RIO_RESOURCE_BUSY 0x80000000 /** * struct rio_driver - RIO driver info * @node: Node in list of drivers * @name: RIO driver name * @id_table: RIO device ids to be associated with this driver * @probe: RIO device inserted * @remove: RIO device removed * @shutdown: shutdown notification callback * @suspend: RIO device suspended * @resume: RIO device awakened * @enable_wake: RIO device enable wake event * @driver: LDM driver struct * * Provides info on a RIO device driver for insertion/removal and * power management purposes. */ struct rio_driver { struct list_head node; char *name; const struct rio_device_id *id_table; int (*probe) (struct rio_dev * dev, const struct rio_device_id * id); void (*remove) (struct rio_dev * dev); void (*shutdown)(struct rio_dev *dev); int (*suspend) (struct rio_dev * dev, u32 state); int (*resume) (struct rio_dev * dev); int (*enable_wake) (struct rio_dev * dev, u32 state, int enable); struct device_driver driver; }; #define to_rio_driver(drv) container_of(drv,struct rio_driver, driver) union rio_pw_msg { struct { u32 comptag; /* Component Tag CSR */ u32 errdetect; /* Port N Error Detect CSR */ u32 is_port; /* Implementation specific + PortID */ u32 ltlerrdet; /* LTL Error Detect CSR */ u32 padding[12]; } em; u32 raw[RIO_PW_MSG_SIZE/sizeof(u32)]; }; #ifdef CONFIG_RAPIDIO_DMA_ENGINE /* * enum rio_write_type - RIO write transaction types used in DMA transfers * * Note: RapidIO specification defines write (NWRITE) and * write-with-response (NWRITE_R) data transfer operations. * Existing DMA controllers that service RapidIO may use one of these operations * for entire data transfer or their combination with only the last data packet * requires response. */ enum rio_write_type { RDW_DEFAULT, /* default method used by DMA driver */ RDW_ALL_NWRITE, /* all packets use NWRITE */ RDW_ALL_NWRITE_R, /* all packets use NWRITE_R */ RDW_LAST_NWRITE_R, /* last packet uses NWRITE_R, others - NWRITE */ }; struct rio_dma_ext { u16 destid; u64 rio_addr; /* low 64-bits of 66-bit RapidIO address */ u8 rio_addr_u; /* upper 2-bits of 66-bit RapidIO address */ enum rio_write_type wr_type; /* preferred RIO write operation type */ }; struct rio_dma_data { /* Local data (as scatterlist) */ struct scatterlist *sg; /* I/O scatter list */ unsigned int sg_len; /* size of scatter list */ /* Remote device address (flat buffer) */ u64 rio_addr; /* low 64-bits of 66-bit RapidIO address */ u8 rio_addr_u; /* upper 2-bits of 66-bit RapidIO address */ enum rio_write_type wr_type; /* preferred RIO write operation type */ }; static inline struct rio_mport *dma_to_mport(struct dma_device *ddev) { return container_of(ddev, struct rio_mport, dma); } #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ /** * struct rio_scan - RIO enumeration and discovery operations * @owner: The module owner of this structure * @enumerate: Callback to perform RapidIO fabric enumeration. * @discover: Callback to perform RapidIO fabric discovery. */ struct rio_scan { struct module *owner; int (*enumerate)(struct rio_mport *mport, u32 flags); int (*discover)(struct rio_mport *mport, u32 flags); }; /** * struct rio_scan_node - list node to register RapidIO enumeration and * discovery methods with RapidIO core. * @mport_id: ID of an mport (net) serviced by this enumerator * @node: node in global list of registered enumerators * @ops: RIO enumeration and discovery operations */ struct rio_scan_node { int mport_id; struct list_head node; struct rio_scan *ops; }; /* Architecture and hardware-specific functions */ extern int rio_mport_initialize(struct rio_mport *); extern int rio_register_mport(struct rio_mport *); extern int rio_unregister_mport(struct rio_mport *); extern int rio_open_inb_mbox(struct rio_mport *, void *, int, int); extern void rio_close_inb_mbox(struct rio_mport *, int); extern int rio_open_outb_mbox(struct rio_mport *, void *, int, int); extern void rio_close_outb_mbox(struct rio_mport *, int); extern int rio_query_mport(struct rio_mport *port, struct rio_mport_attr *mport_attr); #endif /* LINUX_RIO_H */ brcmphy.h 0000644 00000025464 14722070374 0006402 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_BRCMPHY_H #define _LINUX_BRCMPHY_H #include <linux/phy.h> /* All Broadcom Ethernet switches have a pseudo-PHY at address 30 which is used * to configure the switch internal registers via MDIO accesses. */ #define BRCM_PSEUDO_PHY_ADDR 30 #define PHY_ID_BCM50610 0x0143bd60 #define PHY_ID_BCM50610M 0x0143bd70 #define PHY_ID_BCM5241 0x0143bc30 #define PHY_ID_BCMAC131 0x0143bc70 #define PHY_ID_BCM5481 0x0143bca0 #define PHY_ID_BCM5395 0x0143bcf0 #define PHY_ID_BCM54810 0x03625d00 #define PHY_ID_BCM5482 0x0143bcb0 #define PHY_ID_BCM5411 0x00206070 #define PHY_ID_BCM5421 0x002060e0 #define PHY_ID_BCM54210E 0x600d84a0 #define PHY_ID_BCM5464 0x002060b0 #define PHY_ID_BCM5461 0x002060c0 #define PHY_ID_BCM54612E 0x03625e60 #define PHY_ID_BCM54616S 0x03625d10 #define PHY_ID_BCM57780 0x03625d90 #define PHY_ID_BCM89610 0x03625cd0 #define PHY_ID_BCM7250 0xae025280 #define PHY_ID_BCM7255 0xae025120 #define PHY_ID_BCM7260 0xae025190 #define PHY_ID_BCM7268 0xae025090 #define PHY_ID_BCM7271 0xae0253b0 #define PHY_ID_BCM7278 0xae0251a0 #define PHY_ID_BCM7364 0xae025260 #define PHY_ID_BCM7366 0x600d8490 #define PHY_ID_BCM7346 0x600d8650 #define PHY_ID_BCM7362 0x600d84b0 #define PHY_ID_BCM7425 0x600d86b0 #define PHY_ID_BCM7429 0x600d8730 #define PHY_ID_BCM7435 0x600d8750 #define PHY_ID_BCM74371 0xae0252e0 #define PHY_ID_BCM7439 0x600d8480 #define PHY_ID_BCM7439_2 0xae025080 #define PHY_ID_BCM7445 0x600d8510 #define PHY_ID_BCM_CYGNUS 0xae025200 #define PHY_ID_BCM_OMEGA 0xae025100 #define PHY_BCM_OUI_MASK 0xfffffc00 #define PHY_BCM_OUI_1 0x00206000 #define PHY_BCM_OUI_2 0x0143bc00 #define PHY_BCM_OUI_3 0x03625c00 #define PHY_BCM_OUI_4 0x600d8400 #define PHY_BCM_OUI_5 0x03625e00 #define PHY_BCM_OUI_6 0xae025000 #define PHY_BCM_FLAGS_MODE_COPPER 0x00000001 #define PHY_BCM_FLAGS_MODE_1000BX 0x00000002 #define PHY_BCM_FLAGS_INTF_SGMII 0x00000010 #define PHY_BCM_FLAGS_INTF_XAUI 0x00000020 #define PHY_BRCM_WIRESPEED_ENABLE 0x00000100 #define PHY_BRCM_AUTO_PWRDWN_ENABLE 0x00000200 #define PHY_BRCM_RX_REFCLK_UNUSED 0x00000400 #define PHY_BRCM_STD_IBND_DISABLE 0x00000800 #define PHY_BRCM_EXT_IBND_RX_ENABLE 0x00001000 #define PHY_BRCM_EXT_IBND_TX_ENABLE 0x00002000 #define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000 #define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000 #define PHY_BRCM_EN_MASTER_MODE 0x00010000 /* Broadcom BCM7xxx specific workarounds */ #define PHY_BRCM_7XXX_REV(x) (((x) >> 8) & 0xff) #define PHY_BRCM_7XXX_PATCH(x) ((x) & 0xff) #define PHY_BCM_FLAGS_VALID 0x80000000 /* Broadcom BCM54XX register definitions, common to most Broadcom PHYs */ #define MII_BCM54XX_ECR 0x10 /* BCM54xx extended control register */ #define MII_BCM54XX_ECR_IM 0x1000 /* Interrupt mask */ #define MII_BCM54XX_ECR_IF 0x0800 /* Interrupt force */ #define MII_BCM54XX_ESR 0x11 /* BCM54xx extended status register */ #define MII_BCM54XX_ESR_IS 0x1000 /* Interrupt status */ #define MII_BCM54XX_EXP_DATA 0x15 /* Expansion register data */ #define MII_BCM54XX_EXP_SEL 0x17 /* Expansion register select */ #define MII_BCM54XX_EXP_SEL_SSD 0x0e00 /* Secondary SerDes select */ #define MII_BCM54XX_EXP_SEL_ER 0x0f00 /* Expansion register select */ #define MII_BCM54XX_EXP_SEL_ETC 0x0d00 /* Expansion register spare + 2k mem */ #define MII_BCM54XX_AUX_CTL 0x18 /* Auxiliary control register */ #define MII_BCM54XX_ISR 0x1a /* BCM54xx interrupt status register */ #define MII_BCM54XX_IMR 0x1b /* BCM54xx interrupt mask register */ #define MII_BCM54XX_INT_CRCERR 0x0001 /* CRC error */ #define MII_BCM54XX_INT_LINK 0x0002 /* Link status changed */ #define MII_BCM54XX_INT_SPEED 0x0004 /* Link speed change */ #define MII_BCM54XX_INT_DUPLEX 0x0008 /* Duplex mode changed */ #define MII_BCM54XX_INT_LRS 0x0010 /* Local receiver status changed */ #define MII_BCM54XX_INT_RRS 0x0020 /* Remote receiver status changed */ #define MII_BCM54XX_INT_SSERR 0x0040 /* Scrambler synchronization error */ #define MII_BCM54XX_INT_UHCD 0x0080 /* Unsupported HCD negotiated */ #define MII_BCM54XX_INT_NHCD 0x0100 /* No HCD */ #define MII_BCM54XX_INT_NHCDL 0x0200 /* No HCD link */ #define MII_BCM54XX_INT_ANPR 0x0400 /* Auto-negotiation page received */ #define MII_BCM54XX_INT_LC 0x0800 /* All counters below 128 */ #define MII_BCM54XX_INT_HC 0x1000 /* Counter above 32768 */ #define MII_BCM54XX_INT_MDIX 0x2000 /* MDIX status change */ #define MII_BCM54XX_INT_PSERR 0x4000 /* Pair swap error */ #define MII_BCM54XX_SHD 0x1c /* 0x1c shadow registers */ #define MII_BCM54XX_SHD_WRITE 0x8000 #define MII_BCM54XX_SHD_VAL(x) ((x & 0x1f) << 10) #define MII_BCM54XX_SHD_DATA(x) ((x & 0x3ff) << 0) /* * AUXILIARY CONTROL SHADOW ACCESS REGISTERS. (PHY REG 0x18) */ #define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x00 #define MII_BCM54XX_AUXCTL_ACTL_TX_6DB 0x0400 #define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA 0x0800 #define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x07 #define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN 0x0010 #define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN 0x0100 #define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200 #define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000 #define MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT 12 #define MII_BCM54XX_AUXCTL_SHDWSEL_MASK 0x0007 /* * Broadcom LED source encodings. These are used in BCM5461, BCM5481, * BCM5482, and possibly some others. */ #define BCM_LED_SRC_LINKSPD1 0x0 #define BCM_LED_SRC_LINKSPD2 0x1 #define BCM_LED_SRC_XMITLED 0x2 #define BCM_LED_SRC_ACTIVITYLED 0x3 #define BCM_LED_SRC_FDXLED 0x4 #define BCM_LED_SRC_SLAVE 0x5 #define BCM_LED_SRC_INTR 0x6 #define BCM_LED_SRC_QUALITY 0x7 #define BCM_LED_SRC_RCVLED 0x8 #define BCM_LED_SRC_WIRESPEED 0x9 #define BCM_LED_SRC_MULTICOLOR1 0xa #define BCM_LED_SRC_OPENSHORT 0xb #define BCM_LED_SRC_OFF 0xe /* Tied high */ #define BCM_LED_SRC_ON 0xf /* Tied low */ /* * Broadcom Multicolor LED configurations (expansion register 4) */ #define BCM_EXP_MULTICOLOR (MII_BCM54XX_EXP_SEL_ER + 0x04) #define BCM_LED_MULTICOLOR_IN_PHASE BIT(8) #define BCM_LED_MULTICOLOR_LINK_ACT 0x0 #define BCM_LED_MULTICOLOR_SPEED 0x1 #define BCM_LED_MULTICOLOR_ACT_FLASH 0x2 #define BCM_LED_MULTICOLOR_FDX 0x3 #define BCM_LED_MULTICOLOR_OFF 0x4 #define BCM_LED_MULTICOLOR_ON 0x5 #define BCM_LED_MULTICOLOR_ALT 0x6 #define BCM_LED_MULTICOLOR_FLASH 0x7 #define BCM_LED_MULTICOLOR_LINK 0x8 #define BCM_LED_MULTICOLOR_ACT 0x9 #define BCM_LED_MULTICOLOR_PROGRAM 0xa /* * BCM5482: Shadow registers * Shadow values go into bits [14:10] of register 0x1c to select a shadow * register to access. */ /* 00100: Reserved control register 2 */ #define BCM54XX_SHD_SCR2 0x04 #define BCM54XX_SHD_SCR2_WSPD_RTRY_DIS 0x100 #define BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_SHIFT 2 #define BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_OFFSET 2 #define BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_MASK 0x7 /* 00101: Spare Control Register 3 */ #define BCM54XX_SHD_SCR3 0x05 #define BCM54XX_SHD_SCR3_DEF_CLK125 0x0001 #define BCM54XX_SHD_SCR3_DLLAPD_DIS 0x0002 #define BCM54XX_SHD_SCR3_TRDDAPD 0x0004 /* 01010: Auto Power-Down */ #define BCM54XX_SHD_APD 0x0a #define BCM_APD_CLR_MASK 0xFE9F /* clear bits 5, 6 & 8 */ #define BCM54XX_SHD_APD_EN 0x0020 #define BCM_NO_ANEG_APD_EN 0x0060 /* bits 5 & 6 */ #define BCM_APD_SINGLELP_EN 0x0100 /* Bit 8 */ #define BCM5482_SHD_LEDS1 0x0d /* 01101: LED Selector 1 */ /* LED3 / ~LINKSPD[2] selector */ #define BCM5482_SHD_LEDS1_LED3(src) ((src & 0xf) << 4) /* LED1 / ~LINKSPD[1] selector */ #define BCM5482_SHD_LEDS1_LED1(src) ((src & 0xf) << 0) #define BCM54XX_SHD_RGMII_MODE 0x0b /* 01011: RGMII Mode Selector */ #define BCM5482_SHD_SSD 0x14 /* 10100: Secondary SerDes control */ #define BCM5482_SHD_SSD_LEDM 0x0008 /* SSD LED Mode enable */ #define BCM5482_SHD_SSD_EN 0x0001 /* SSD enable */ #define BCM5482_SHD_MODE 0x1f /* 11111: Mode Control Register */ #define BCM5482_SHD_MODE_1000BX 0x0001 /* Enable 1000BASE-X registers */ /* * EXPANSION SHADOW ACCESS REGISTERS. (PHY REG 0x15, 0x16, and 0x17) */ #define MII_BCM54XX_EXP_AADJ1CH0 0x001f #define MII_BCM54XX_EXP_AADJ1CH0_SWP_ABCD_OEN 0x0200 #define MII_BCM54XX_EXP_AADJ1CH0_SWSEL_THPF 0x0100 #define MII_BCM54XX_EXP_AADJ1CH3 0x601f #define MII_BCM54XX_EXP_AADJ1CH3_ADCCKADJ 0x0002 #define MII_BCM54XX_EXP_EXP08 0x0F08 #define MII_BCM54XX_EXP_EXP08_RJCT_2MHZ 0x0001 #define MII_BCM54XX_EXP_EXP08_EARLY_DAC_WAKE 0x0200 #define MII_BCM54XX_EXP_EXP75 0x0f75 #define MII_BCM54XX_EXP_EXP75_VDACCTRL 0x003c #define MII_BCM54XX_EXP_EXP75_CM_OSC 0x0001 #define MII_BCM54XX_EXP_EXP96 0x0f96 #define MII_BCM54XX_EXP_EXP96_MYST 0x0010 #define MII_BCM54XX_EXP_EXP97 0x0f97 #define MII_BCM54XX_EXP_EXP97_MYST 0x0c0c /* * BCM5482: Secondary SerDes registers */ #define BCM5482_SSD_1000BX_CTL 0x00 /* 1000BASE-X Control */ #define BCM5482_SSD_1000BX_CTL_PWRDOWN 0x0800 /* Power-down SSD */ #define BCM5482_SSD_SGMII_SLAVE 0x15 /* SGMII Slave Register */ #define BCM5482_SSD_SGMII_SLAVE_EN 0x0002 /* Slave mode enable */ #define BCM5482_SSD_SGMII_SLAVE_AD 0x0001 /* Slave auto-detection */ /* BCM54810 Registers */ #define BCM54810_EXP_BROADREACH_LRE_MISC_CTL (MII_BCM54XX_EXP_SEL_ER + 0x90) #define BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN (1 << 0) #define BCM54810_SHD_CLK_CTL 0x3 #define BCM54810_SHD_CLK_CTL_GTXCLK_EN (1 << 9) /* BCM54612E Registers */ #define BCM54612E_EXP_SPARE0 (MII_BCM54XX_EXP_SEL_ETC + 0x34) #define BCM54612E_LED4_CLK125OUT_EN (1 << 1) /*****************************************************************************/ /* Fast Ethernet Transceiver definitions. */ /*****************************************************************************/ #define MII_BRCM_FET_INTREG 0x1a /* Interrupt register */ #define MII_BRCM_FET_IR_MASK 0x0100 /* Mask all interrupts */ #define MII_BRCM_FET_IR_LINK_EN 0x0200 /* Link status change enable */ #define MII_BRCM_FET_IR_SPEED_EN 0x0400 /* Link speed change enable */ #define MII_BRCM_FET_IR_DUPLEX_EN 0x0800 /* Duplex mode change enable */ #define MII_BRCM_FET_IR_ENABLE 0x4000 /* Interrupt enable */ #define MII_BRCM_FET_BRCMTEST 0x1f /* Brcm test register */ #define MII_BRCM_FET_BT_SRE 0x0080 /* Shadow register enable */ /*** Shadow register definitions ***/ #define MII_BRCM_FET_SHDW_MISCCTRL 0x10 /* Shadow misc ctrl */ #define MII_BRCM_FET_SHDW_MC_FAME 0x4000 /* Force Auto MDIX enable */ #define MII_BRCM_FET_SHDW_AUXMODE4 0x1a /* Auxiliary mode 4 */ #define MII_BRCM_FET_SHDW_AM4_LED_MASK 0x0003 #define MII_BRCM_FET_SHDW_AM4_LED_MODE1 0x0001 #define MII_BRCM_FET_SHDW_AUXSTAT2 0x1b /* Auxiliary status 2 */ #define MII_BRCM_FET_SHDW_AS2_APDE 0x0020 /* Auto power down enable */ #define BRCM_CL45VEN_EEE_CONTROL 0x803d #define LPI_FEATURE_EN 0x8000 #define LPI_FEATURE_EN_DIG1000X 0x4000 /* Core register definitions*/ #define MII_BRCM_CORE_BASE12 0x12 #define MII_BRCM_CORE_BASE13 0x13 #define MII_BRCM_CORE_BASE14 0x14 #define MII_BRCM_CORE_BASE1E 0x1E #define MII_BRCM_CORE_EXPB0 0xB0 #define MII_BRCM_CORE_EXPB1 0xB1 #endif /* _LINUX_BRCMPHY_H */ sysrq.h 0000644 00000003446 14722070374 0006113 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* -*- linux-c -*- * * $Id: sysrq.h,v 1.3 1997/07/17 11:54:33 mj Exp $ * * Linux Magic System Request Key Hacks * * (c) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz> * * (c) 2000 Crutcher Dunnavant <crutcher+kernel@datastacks.com> * overhauled to use key registration * based upon discusions in irc://irc.openprojects.net/#kernelnewbies */ #ifndef _LINUX_SYSRQ_H #define _LINUX_SYSRQ_H #include <linux/errno.h> #include <linux/types.h> /* Possible values of bitmask for enabling sysrq functions */ /* 0x0001 is reserved for enable everything */ #define SYSRQ_ENABLE_LOG 0x0002 #define SYSRQ_ENABLE_KEYBOARD 0x0004 #define SYSRQ_ENABLE_DUMP 0x0008 #define SYSRQ_ENABLE_SYNC 0x0010 #define SYSRQ_ENABLE_REMOUNT 0x0020 #define SYSRQ_ENABLE_SIGNAL 0x0040 #define SYSRQ_ENABLE_BOOT 0x0080 #define SYSRQ_ENABLE_RTNICE 0x0100 struct sysrq_key_op { void (*handler)(int); char *help_msg; char *action_msg; int enable_mask; }; #ifdef CONFIG_MAGIC_SYSRQ /* Generic SysRq interface -- you may call it from any device driver, supplying * ASCII code of the key, pointer to registers and kbd/tty structs (if they * are available -- else NULL's). */ void handle_sysrq(int key); void __handle_sysrq(int key, bool check_mask); int register_sysrq_key(int key, struct sysrq_key_op *op); int unregister_sysrq_key(int key, struct sysrq_key_op *op); struct sysrq_key_op *__sysrq_get_key_op(int key); int sysrq_toggle_support(int enable_mask); #else static inline void handle_sysrq(int key) { } static inline void __handle_sysrq(int key, bool check_mask) { } static inline int register_sysrq_key(int key, struct sysrq_key_op *op) { return -EINVAL; } static inline int unregister_sysrq_key(int key, struct sysrq_key_op *op) { return -EINVAL; } #endif #endif /* _LINUX_SYSRQ_H */ iomap.h 0000644 00000016265 14722070374 0006042 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_IOMAP_H #define LINUX_IOMAP_H 1 #include <linux/atomic.h> #include <linux/bitmap.h> #include <linux/mm.h> #include <linux/types.h> #include <linux/mm_types.h> #include <linux/blkdev.h> struct address_space; struct fiemap_extent_info; struct inode; struct iov_iter; struct kiocb; struct page; struct vm_area_struct; struct vm_fault; /* * Types of block ranges for iomap mappings: */ #define IOMAP_HOLE 0x01 /* no blocks allocated, need allocation */ #define IOMAP_DELALLOC 0x02 /* delayed allocation blocks */ #define IOMAP_MAPPED 0x03 /* blocks allocated at @addr */ #define IOMAP_UNWRITTEN 0x04 /* blocks allocated at @addr in unwritten state */ #define IOMAP_INLINE 0x05 /* data inline in the inode */ /* * Flags for all iomap mappings: * * IOMAP_F_DIRTY indicates the inode has uncommitted metadata needed to access * written data and requires fdatasync to commit them to persistent storage. * This needs to take into account metadata changes that *may* be made at IO * completion, such as file size updates from direct IO. */ #define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */ #define IOMAP_F_DIRTY 0x02 /* uncommitted metadata */ #define IOMAP_F_BUFFER_HEAD 0x04 /* file system requires buffer heads */ #define IOMAP_F_SIZE_CHANGED 0x08 /* file size has changed */ /* * Flags that only need to be reported for IOMAP_REPORT requests: */ #define IOMAP_F_MERGED 0x10 /* contains multiple blocks/extents */ #define IOMAP_F_SHARED 0x20 /* block shared with another file */ /* * Flags from 0x1000 up are for file system specific usage: */ #define IOMAP_F_PRIVATE 0x1000 /* * Magic value for addr: */ #define IOMAP_NULL_ADDR -1ULL /* addr is not valid */ struct iomap_page_ops; struct iomap { u64 addr; /* disk offset of mapping, bytes */ loff_t offset; /* file offset of mapping, bytes */ u64 length; /* length of mapping, bytes */ u16 type; /* type of mapping */ u16 flags; /* flags for mapping */ struct block_device *bdev; /* block device for I/O */ struct dax_device *dax_dev; /* dax_dev for dax operations */ void *inline_data; void *private; /* filesystem private */ const struct iomap_page_ops *page_ops; }; static inline sector_t iomap_sector(struct iomap *iomap, loff_t pos) { return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT; } /* * When a filesystem sets page_ops in an iomap mapping it returns, page_prepare * and page_done will be called for each page written to. This only applies to * buffered writes as unbuffered writes will not typically have pages * associated with them. * * When page_prepare succeeds, page_done will always be called to do any * cleanup work necessary. In that page_done call, @page will be NULL if the * associated page could not be obtained. */ struct iomap_page_ops { int (*page_prepare)(struct inode *inode, loff_t pos, unsigned len, struct iomap *iomap); void (*page_done)(struct inode *inode, loff_t pos, unsigned copied, struct page *page, struct iomap *iomap); }; /* * Flags for iomap_begin / iomap_end. No flag implies a read. */ #define IOMAP_WRITE (1 << 0) /* writing, must allocate blocks */ #define IOMAP_ZERO (1 << 1) /* zeroing operation, may skip holes */ #define IOMAP_REPORT (1 << 2) /* report extent status, e.g. FIEMAP */ #define IOMAP_FAULT (1 << 3) /* mapping for page fault */ #define IOMAP_DIRECT (1 << 4) /* direct I/O */ #define IOMAP_NOWAIT (1 << 5) /* do not block */ struct iomap_ops { /* * Return the existing mapping at pos, or reserve space starting at * pos for up to length, as long as we can do it as a single mapping. * The actual length is returned in iomap->length. */ int (*iomap_begin)(struct inode *inode, loff_t pos, loff_t length, unsigned flags, struct iomap *iomap); /* * Commit and/or unreserve space previous allocated using iomap_begin. * Written indicates the length of the successful write operation which * needs to be commited, while the rest needs to be unreserved. * Written might be zero if no data was written. */ int (*iomap_end)(struct inode *inode, loff_t pos, loff_t length, ssize_t written, unsigned flags, struct iomap *iomap); }; /* * Main iomap iterator function. */ typedef loff_t (*iomap_actor_t)(struct inode *inode, loff_t pos, loff_t len, void *data, struct iomap *iomap); loff_t iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags, const struct iomap_ops *ops, void *data, iomap_actor_t actor); /* * Structure allocate for each page when block size < PAGE_SIZE to track * sub-page uptodate status and I/O completions. */ struct iomap_page { atomic_t read_count; atomic_t write_count; spinlock_t uptodate_lock; DECLARE_BITMAP(uptodate, PAGE_SIZE / 512); }; static inline struct iomap_page *to_iomap_page(struct page *page) { if (page_has_private(page)) return (struct iomap_page *)page_private(page); return NULL; } ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from, const struct iomap_ops *ops); int iomap_readpage(struct page *page, const struct iomap_ops *ops); int iomap_readpages(struct address_space *mapping, struct list_head *pages, unsigned nr_pages, const struct iomap_ops *ops); int iomap_set_page_dirty(struct page *page); int iomap_is_partially_uptodate(struct page *page, unsigned long from, unsigned long count); int iomap_releasepage(struct page *page, gfp_t gfp_mask); void iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len); #ifdef CONFIG_MIGRATION int iomap_migrate_page(struct address_space *mapping, struct page *newpage, struct page *page, enum migrate_mode mode); #else #define iomap_migrate_page NULL #endif int iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len, const struct iomap_ops *ops); int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, const struct iomap_ops *ops); int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, const struct iomap_ops *ops); vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops); int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, loff_t start, loff_t len, const struct iomap_ops *ops); loff_t iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops); loff_t iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops); sector_t iomap_bmap(struct address_space *mapping, sector_t bno, const struct iomap_ops *ops); /* * Flags for direct I/O ->end_io: */ #define IOMAP_DIO_UNWRITTEN (1 << 0) /* covers unwritten extent(s) */ #define IOMAP_DIO_COW (1 << 1) /* covers COW extent(s) */ struct iomap_dio_ops { int (*end_io)(struct kiocb *iocb, ssize_t size, int error, unsigned flags); }; ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, const struct iomap_ops *ops, const struct iomap_dio_ops *dops); int iomap_dio_iopoll(struct kiocb *kiocb, bool spin); #ifdef CONFIG_SWAP struct file; struct swap_info_struct; int iomap_swapfile_activate(struct swap_info_struct *sis, struct file *swap_file, sector_t *pagespan, const struct iomap_ops *ops); #else # define iomap_swapfile_activate(sis, swapfile, pagespan, ops) (-EIO) #endif /* CONFIG_SWAP */ #endif /* LINUX_IOMAP_H */ srcutree.h 0000644 00000011577 14722070374 0006572 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0+ */ /* * Sleepable Read-Copy Update mechanism for mutual exclusion, * tree variant. * * Copyright (C) IBM Corporation, 2017 * * Author: Paul McKenney <paulmck@linux.ibm.com> */ #ifndef _LINUX_SRCU_TREE_H #define _LINUX_SRCU_TREE_H #include <linux/rcu_node_tree.h> #include <linux/completion.h> struct srcu_node; struct srcu_struct; /* * Per-CPU structure feeding into leaf srcu_node, similar in function * to rcu_node. */ struct srcu_data { /* Read-side state. */ unsigned long srcu_lock_count[2]; /* Locks per CPU. */ unsigned long srcu_unlock_count[2]; /* Unlocks per CPU. */ /* Update-side state. */ spinlock_t __private lock ____cacheline_internodealigned_in_smp; struct rcu_segcblist srcu_cblist; /* List of callbacks.*/ unsigned long srcu_gp_seq_needed; /* Furthest future GP needed. */ unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */ bool srcu_cblist_invoking; /* Invoking these CBs? */ struct timer_list delay_work; /* Delay for CB invoking */ struct work_struct work; /* Context for CB invoking. */ struct rcu_head srcu_barrier_head; /* For srcu_barrier() use. */ struct srcu_node *mynode; /* Leaf srcu_node. */ unsigned long grpmask; /* Mask for leaf srcu_node */ /* ->srcu_data_have_cbs[]. */ int cpu; struct srcu_struct *ssp; }; /* * Node in SRCU combining tree, similar in function to rcu_data. */ struct srcu_node { spinlock_t __private lock; unsigned long srcu_have_cbs[4]; /* GP seq for children */ /* having CBs, but only */ /* is > ->srcu_gq_seq. */ unsigned long srcu_data_have_cbs[4]; /* Which srcu_data structs */ /* have CBs for given GP? */ unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */ struct srcu_node *srcu_parent; /* Next up in tree. */ int grplo; /* Least CPU for node. */ int grphi; /* Biggest CPU for node. */ }; /* * Per-SRCU-domain structure, similar in function to rcu_state. */ struct srcu_struct { struct srcu_node node[NUM_RCU_NODES]; /* Combining tree. */ struct srcu_node *level[RCU_NUM_LVLS + 1]; /* First node at each level. */ struct mutex srcu_cb_mutex; /* Serialize CB preparation. */ spinlock_t __private lock; /* Protect counters */ struct mutex srcu_gp_mutex; /* Serialize GP work. */ unsigned int srcu_idx; /* Current rdr array element. */ unsigned long srcu_gp_seq; /* Grace-period seq #. */ unsigned long srcu_gp_seq_needed; /* Latest gp_seq needed. */ unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */ unsigned long srcu_last_gp_end; /* Last GP end timestamp (ns) */ struct srcu_data __percpu *sda; /* Per-CPU srcu_data array. */ unsigned long srcu_barrier_seq; /* srcu_barrier seq #. */ struct mutex srcu_barrier_mutex; /* Serialize barrier ops. */ struct completion srcu_barrier_completion; /* Awaken barrier rq at end. */ atomic_t srcu_barrier_cpu_cnt; /* # CPUs not yet posting a */ /* callback for the barrier */ /* operation. */ struct delayed_work work; #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ }; /* Values for state variable (bottom bits of ->srcu_gp_seq). */ #define SRCU_STATE_IDLE 0 #define SRCU_STATE_SCAN1 1 #define SRCU_STATE_SCAN2 2 #define __SRCU_STRUCT_INIT(name, pcpu_name) \ { \ .sda = &pcpu_name, \ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ .srcu_gp_seq_needed = -1UL, \ .work = __DELAYED_WORK_INITIALIZER(name.work, NULL, 0), \ __SRCU_DEP_MAP_INIT(name) \ } /* * Define and initialize a srcu struct at build time. * Do -not- call init_srcu_struct() nor cleanup_srcu_struct() on it. * * Note that although DEFINE_STATIC_SRCU() hides the name from other * files, the per-CPU variable rules nevertheless require that the * chosen name be globally unique. These rules also prohibit use of * DEFINE_STATIC_SRCU() within a function. If these rules are too * restrictive, declare the srcu_struct manually. For example, in * each file: * * static struct srcu_struct my_srcu; * * Then, before the first use of each my_srcu, manually initialize it: * * init_srcu_struct(&my_srcu); * * See include/linux/percpu-defs.h for the rules on per-CPU variables. */ #ifdef MODULE # define __DEFINE_SRCU(name, is_static) \ is_static struct srcu_struct name; \ struct srcu_struct * const __srcu_struct_##name \ __section("___srcu_struct_ptrs") = &name #else # define __DEFINE_SRCU(name, is_static) \ static DEFINE_PER_CPU(struct srcu_data, name##_srcu_data); \ is_static struct srcu_struct name = \ __SRCU_STRUCT_INIT(name, name##_srcu_data) #endif #define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */) #define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static) void synchronize_srcu_expedited(struct srcu_struct *ssp); void srcu_barrier(struct srcu_struct *ssp); void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf); #endif mailbox_client.h 0000644 00000003332 14722070374 0007715 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013-2014 Linaro Ltd. * Author: Jassi Brar <jassisinghbrar@gmail.com> */ #ifndef __MAILBOX_CLIENT_H #define __MAILBOX_CLIENT_H #include <linux/of.h> #include <linux/device.h> struct mbox_chan; /** * struct mbox_client - User of a mailbox * @dev: The client device * @tx_block: If the mbox_send_message should block until data is * transmitted. * @tx_tout: Max block period in ms before TX is assumed failure * @knows_txdone: If the client could run the TX state machine. Usually * if the client receives some ACK packet for transmission. * Unused if the controller already has TX_Done/RTR IRQ. * @rx_callback: Atomic callback to provide client the data received * @tx_prepare: Atomic callback to ask client to prepare the payload * before initiating the transmission if required. * @tx_done: Atomic callback to tell client of data transmission */ struct mbox_client { struct device *dev; bool tx_block; unsigned long tx_tout; bool knows_txdone; void (*rx_callback)(struct mbox_client *cl, void *mssg); void (*tx_prepare)(struct mbox_client *cl, void *mssg); void (*tx_done)(struct mbox_client *cl, void *mssg, int r); }; struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl, const char *name); struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index); int mbox_send_message(struct mbox_chan *chan, void *mssg); int mbox_flush(struct mbox_chan *chan, unsigned long timeout); void mbox_client_txdone(struct mbox_chan *chan, int r); /* atomic */ bool mbox_client_peek_data(struct mbox_chan *chan); /* atomic */ void mbox_free_channel(struct mbox_chan *chan); /* may sleep */ #endif /* __MAILBOX_CLIENT_H */ sysctl.h 0000644 00000017434 14722070374 0006255 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * sysctl.h: General linux system control interface * * Begun 24 March 1995, Stephen Tweedie * **************************************************************** **************************************************************** ** ** WARNING: ** The values in this file are exported to user space via ** the sysctl() binary interface. Do *NOT* change the ** numbering of any existing values here, and do not change ** any numbers within any one set of values. If you have to ** redefine an existing interface, use a new number for it. ** The kernel will then return -ENOTDIR to any application using ** the old binary interface. ** **************************************************************** **************************************************************** */ #ifndef _LINUX_SYSCTL_H #define _LINUX_SYSCTL_H #include <linux/list.h> #include <linux/rcupdate.h> #include <linux/wait.h> #include <linux/rbtree.h> #include <linux/uidgid.h> #include <uapi/linux/sysctl.h> /* For the /proc/sys support */ struct completion; struct ctl_table; struct nsproxy; struct ctl_table_root; struct ctl_table_header; struct ctl_dir; /* Keep the same order as in fs/proc/proc_sysctl.c */ #define SYSCTL_ZERO ((void *)&sysctl_vals[0]) #define SYSCTL_ONE ((void *)&sysctl_vals[1]) #define SYSCTL_INT_MAX ((void *)&sysctl_vals[2]) extern const int sysctl_vals[]; typedef int proc_handler (struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos); extern int proc_dostring(struct ctl_table *, int, void __user *, size_t *, loff_t *); extern int proc_dointvec(struct ctl_table *, int, void __user *, size_t *, loff_t *); extern int proc_douintvec(struct ctl_table *, int, void __user *, size_t *, loff_t *); extern int proc_dointvec_minmax(struct ctl_table *, int, void __user *, size_t *, loff_t *); extern int proc_douintvec_minmax(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); extern int proc_dointvec_jiffies(struct ctl_table *, int, void __user *, size_t *, loff_t *); extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int, void __user *, size_t *, loff_t *); extern int proc_dointvec_ms_jiffies(struct ctl_table *, int, void __user *, size_t *, loff_t *); extern int proc_doulongvec_minmax(struct ctl_table *, int, void __user *, size_t *, loff_t *); extern int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int, void __user *, size_t *, loff_t *); extern int proc_do_large_bitmap(struct ctl_table *, int, void __user *, size_t *, loff_t *); extern int proc_do_static_key(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); /* * Register a set of sysctl names by calling register_sysctl_table * with an initialised array of struct ctl_table's. An entry with * NULL procname terminates the table. table->de will be * set up by the registration and need not be initialised in advance. * * sysctl names can be mirrored automatically under /proc/sys. The * procname supplied controls /proc naming. * * The table's mode will be honoured both for sys_sysctl(2) and * proc-fs access. * * Leaf nodes in the sysctl tree will be represented by a single file * under /proc; non-leaf nodes will be represented by directories. A * null procname disables /proc mirroring at this node. * * sysctl(2) can automatically manage read and write requests through * the sysctl table. The data and maxlen fields of the ctl_table * struct enable minimal validation of the values being written to be * performed, and the mode field allows minimal authentication. * * There must be a proc_handler routine for any terminal nodes * mirrored under /proc/sys (non-terminals are handled by a built-in * directory handler). Several default handlers are available to * cover common cases. */ /* Support for userspace poll() to watch for changes */ struct ctl_table_poll { atomic_t event; wait_queue_head_t wait; }; static inline void *proc_sys_poll_event(struct ctl_table_poll *poll) { return (void *)(unsigned long)atomic_read(&poll->event); } #define __CTL_TABLE_POLL_INITIALIZER(name) { \ .event = ATOMIC_INIT(0), \ .wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.wait) } #define DEFINE_CTL_TABLE_POLL(name) \ struct ctl_table_poll name = __CTL_TABLE_POLL_INITIALIZER(name) /* A sysctl table is an array of struct ctl_table: */ struct ctl_table { const char *procname; /* Text ID for /proc/sys, or zero */ void *data; int maxlen; umode_t mode; struct ctl_table *child; /* Deprecated */ proc_handler *proc_handler; /* Callback for text formatting */ struct ctl_table_poll *poll; void *extra1; void *extra2; } __randomize_layout; struct ctl_node { struct rb_node node; struct ctl_table_header *header; }; /* struct ctl_table_header is used to maintain dynamic lists of struct ctl_table trees. */ struct ctl_table_header { union { struct { struct ctl_table *ctl_table; int used; int count; int nreg; }; struct rcu_head rcu; }; struct completion *unregistering; struct ctl_table *ctl_table_arg; struct ctl_table_root *root; struct ctl_table_set *set; struct ctl_dir *parent; struct ctl_node *node; struct hlist_head inodes; /* head for proc_inode->sysctl_inodes */ }; struct ctl_dir { /* Header must be at the start of ctl_dir */ struct ctl_table_header header; struct rb_root root; }; struct ctl_table_set { int (*is_seen)(struct ctl_table_set *); struct ctl_dir dir; }; struct ctl_table_root { struct ctl_table_set default_set; struct ctl_table_set *(*lookup)(struct ctl_table_root *root); void (*set_ownership)(struct ctl_table_header *head, struct ctl_table *table, kuid_t *uid, kgid_t *gid); int (*permissions)(struct ctl_table_header *head, struct ctl_table *table); }; /* struct ctl_path describes where in the hierarchy a table is added */ struct ctl_path { const char *procname; }; #ifdef CONFIG_SYSCTL void proc_sys_poll_notify(struct ctl_table_poll *poll); extern void setup_sysctl_set(struct ctl_table_set *p, struct ctl_table_root *root, int (*is_seen)(struct ctl_table_set *)); extern void retire_sysctl_set(struct ctl_table_set *set); struct ctl_table_header *__register_sysctl_table( struct ctl_table_set *set, const char *path, struct ctl_table *table); struct ctl_table_header *__register_sysctl_paths( struct ctl_table_set *set, const struct ctl_path *path, struct ctl_table *table); struct ctl_table_header *register_sysctl(const char *path, struct ctl_table *table); struct ctl_table_header *register_sysctl_table(struct ctl_table * table); struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path, struct ctl_table *table); void unregister_sysctl_table(struct ctl_table_header * table); extern int sysctl_init(void); extern void __register_sysctl_init(const char *path, struct ctl_table *table, const char *table_name); #define register_sysctl_init(path, table) __register_sysctl_init(path, table, #table) extern struct ctl_table sysctl_mount_point[]; #else /* CONFIG_SYSCTL */ static inline struct ctl_table_header *register_sysctl_table(struct ctl_table * table) { return NULL; } static inline struct ctl_table_header *register_sysctl_paths( const struct ctl_path *path, struct ctl_table *table) { return NULL; } static inline struct ctl_table_header *register_sysctl(const char *path, struct ctl_table *table) { return NULL; } static inline void unregister_sysctl_table(struct ctl_table_header * table) { } static inline void setup_sysctl_set(struct ctl_table_set *p, struct ctl_table_root *root, int (*is_seen)(struct ctl_table_set *)) { } #endif /* CONFIG_SYSCTL */ int sysctl_max_threads(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); #endif /* _LINUX_SYSCTL_H */ log2.h 0000644 00000014300 14722070374 0005564 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* Integer base 2 logarithm calculation * * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #ifndef _LINUX_LOG2_H #define _LINUX_LOG2_H #include <linux/types.h> #include <linux/bitops.h> /* * non-constant log of base 2 calculators * - the arch may override these in asm/bitops.h if they can be implemented * more efficiently than using fls() and fls64() * - the arch is not required to handle n==0 if implementing the fallback */ #ifndef CONFIG_ARCH_HAS_ILOG2_U32 static inline __attribute__((const)) int __ilog2_u32(u32 n) { return fls(n) - 1; } #endif #ifndef CONFIG_ARCH_HAS_ILOG2_U64 static inline __attribute__((const)) int __ilog2_u64(u64 n) { return fls64(n) - 1; } #endif /** * is_power_of_2() - check if a value is a power of two * @n: the value to check * * Determine whether some value is a power of two, where zero is * *not* considered a power of two. * Return: true if @n is a power of 2, otherwise false. */ static inline __attribute__((const)) bool is_power_of_2(unsigned long n) { return (n != 0 && ((n & (n - 1)) == 0)); } /** * __roundup_pow_of_two() - round up to nearest power of two * @n: value to round up */ static inline __attribute__((const)) unsigned long __roundup_pow_of_two(unsigned long n) { return 1UL << fls_long(n - 1); } /** * __rounddown_pow_of_two() - round down to nearest power of two * @n: value to round down */ static inline __attribute__((const)) unsigned long __rounddown_pow_of_two(unsigned long n) { return 1UL << (fls_long(n) - 1); } /** * const_ilog2 - log base 2 of 32-bit or a 64-bit constant unsigned value * @n: parameter * * Use this where sparse expects a true constant expression, e.g. for array * indices. */ #define const_ilog2(n) \ ( \ __builtin_constant_p(n) ? ( \ (n) < 2 ? 0 : \ (n) & (1ULL << 63) ? 63 : \ (n) & (1ULL << 62) ? 62 : \ (n) & (1ULL << 61) ? 61 : \ (n) & (1ULL << 60) ? 60 : \ (n) & (1ULL << 59) ? 59 : \ (n) & (1ULL << 58) ? 58 : \ (n) & (1ULL << 57) ? 57 : \ (n) & (1ULL << 56) ? 56 : \ (n) & (1ULL << 55) ? 55 : \ (n) & (1ULL << 54) ? 54 : \ (n) & (1ULL << 53) ? 53 : \ (n) & (1ULL << 52) ? 52 : \ (n) & (1ULL << 51) ? 51 : \ (n) & (1ULL << 50) ? 50 : \ (n) & (1ULL << 49) ? 49 : \ (n) & (1ULL << 48) ? 48 : \ (n) & (1ULL << 47) ? 47 : \ (n) & (1ULL << 46) ? 46 : \ (n) & (1ULL << 45) ? 45 : \ (n) & (1ULL << 44) ? 44 : \ (n) & (1ULL << 43) ? 43 : \ (n) & (1ULL << 42) ? 42 : \ (n) & (1ULL << 41) ? 41 : \ (n) & (1ULL << 40) ? 40 : \ (n) & (1ULL << 39) ? 39 : \ (n) & (1ULL << 38) ? 38 : \ (n) & (1ULL << 37) ? 37 : \ (n) & (1ULL << 36) ? 36 : \ (n) & (1ULL << 35) ? 35 : \ (n) & (1ULL << 34) ? 34 : \ (n) & (1ULL << 33) ? 33 : \ (n) & (1ULL << 32) ? 32 : \ (n) & (1ULL << 31) ? 31 : \ (n) & (1ULL << 30) ? 30 : \ (n) & (1ULL << 29) ? 29 : \ (n) & (1ULL << 28) ? 28 : \ (n) & (1ULL << 27) ? 27 : \ (n) & (1ULL << 26) ? 26 : \ (n) & (1ULL << 25) ? 25 : \ (n) & (1ULL << 24) ? 24 : \ (n) & (1ULL << 23) ? 23 : \ (n) & (1ULL << 22) ? 22 : \ (n) & (1ULL << 21) ? 21 : \ (n) & (1ULL << 20) ? 20 : \ (n) & (1ULL << 19) ? 19 : \ (n) & (1ULL << 18) ? 18 : \ (n) & (1ULL << 17) ? 17 : \ (n) & (1ULL << 16) ? 16 : \ (n) & (1ULL << 15) ? 15 : \ (n) & (1ULL << 14) ? 14 : \ (n) & (1ULL << 13) ? 13 : \ (n) & (1ULL << 12) ? 12 : \ (n) & (1ULL << 11) ? 11 : \ (n) & (1ULL << 10) ? 10 : \ (n) & (1ULL << 9) ? 9 : \ (n) & (1ULL << 8) ? 8 : \ (n) & (1ULL << 7) ? 7 : \ (n) & (1ULL << 6) ? 6 : \ (n) & (1ULL << 5) ? 5 : \ (n) & (1ULL << 4) ? 4 : \ (n) & (1ULL << 3) ? 3 : \ (n) & (1ULL << 2) ? 2 : \ 1) : \ -1) /** * ilog2 - log base 2 of 32-bit or a 64-bit unsigned value * @n: parameter * * constant-capable log of base 2 calculation * - this can be used to initialise global variables from constant data, hence * the massive ternary operator construction * * selects the appropriately-sized optimised version depending on sizeof(n) */ #define ilog2(n) \ ( \ __builtin_constant_p(n) ? \ const_ilog2(n) : \ (sizeof(n) <= 4) ? \ __ilog2_u32(n) : \ __ilog2_u64(n) \ ) /** * roundup_pow_of_two - round the given value up to nearest power of two * @n: parameter * * round the given value up to the nearest power of two * - the result is undefined when n == 0 * - this can be used to initialise global variables from constant data */ #define roundup_pow_of_two(n) \ ( \ __builtin_constant_p(n) ? ( \ ((n) == 1) ? 1 : \ (1UL << (ilog2((n) - 1) + 1)) \ ) : \ __roundup_pow_of_two(n) \ ) /** * rounddown_pow_of_two - round the given value down to nearest power of two * @n: parameter * * round the given value down to the nearest power of two * - the result is undefined when n == 0 * - this can be used to initialise global variables from constant data */ #define rounddown_pow_of_two(n) \ ( \ __builtin_constant_p(n) ? ( \ (1UL << ilog2(n))) : \ __rounddown_pow_of_two(n) \ ) static inline __attribute_const__ int __order_base_2(unsigned long n) { return n > 1 ? ilog2(n - 1) + 1 : 0; } /** * order_base_2 - calculate the (rounded up) base 2 order of the argument * @n: parameter * * The first few values calculated by this routine: * ob2(0) = 0 * ob2(1) = 0 * ob2(2) = 1 * ob2(3) = 2 * ob2(4) = 2 * ob2(5) = 3 * ... and so on. */ #define order_base_2(n) \ ( \ __builtin_constant_p(n) ? ( \ ((n) == 0 || (n) == 1) ? 0 : \ ilog2((n) - 1) + 1) : \ __order_base_2(n) \ ) static inline __attribute__((const)) int __bits_per(unsigned long n) { if (n < 2) return 1; if (is_power_of_2(n)) return order_base_2(n) + 1; return order_base_2(n); } /** * bits_per - calculate the number of bits required for the argument * @n: parameter * * This is constant-capable and can be used for compile time * initializations, e.g bitfields. * * The first few values calculated by this routine: * bf(0) = 1 * bf(1) = 1 * bf(2) = 2 * bf(3) = 2 * bf(4) = 3 * ... and so on. */ #define bits_per(n) \ ( \ __builtin_constant_p(n) ? ( \ ((n) == 0 || (n) == 1) \ ? 1 : ilog2(n) + 1 \ ) : \ __bits_per(n) \ ) #endif /* _LINUX_LOG2_H */ timeriomem-rng.h 0000644 00000000660 14722070374 0007660 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/include/linux/timeriomem-rng.h * * Copyright (c) 2009 Alexander Clouter <alex@digriz.org.uk> */ #ifndef _LINUX_TIMERIOMEM_RNG_H #define _LINUX_TIMERIOMEM_RNG_H struct timeriomem_rng_data { void __iomem *address; /* measures in usecs */ unsigned int period; /* bits of entropy per 1024 bits read */ unsigned int quality; }; #endif /* _LINUX_TIMERIOMEM_RNG_H */ leds-ti-lmu-common.h 0000644 00000002174 14722070374 0010351 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ // TI LMU Common Core // Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/ #ifndef _TI_LMU_COMMON_H_ #define _TI_LMU_COMMON_H_ #include <linux/delay.h> #include <linux/device.h> #include <linux/init.h> #include <linux/leds.h> #include <linux/module.h> #include <linux/regmap.h> #include <linux/slab.h> #include <uapi/linux/uleds.h> #define LMU_11BIT_LSB_MASK (BIT(0) | BIT(1) | BIT(2)) #define LMU_11BIT_MSB_SHIFT 3 #define MAX_BRIGHTNESS_8BIT 255 #define MAX_BRIGHTNESS_11BIT 2047 struct ti_lmu_bank { struct regmap *regmap; int max_brightness; u8 lsb_brightness_reg; u8 msb_brightness_reg; u8 runtime_ramp_reg; u32 ramp_up_usec; u32 ramp_down_usec; }; int ti_lmu_common_set_brightness(struct ti_lmu_bank *lmu_bank, int brightness); int ti_lmu_common_set_ramp(struct ti_lmu_bank *lmu_bank); int ti_lmu_common_get_ramp_params(struct device *dev, struct fwnode_handle *child, struct ti_lmu_bank *lmu_data); int ti_lmu_common_get_brt_res(struct device *dev, struct fwnode_handle *child, struct ti_lmu_bank *lmu_data); #endif /* _TI_LMU_COMMON_H_ */ olpc-ec.h 0000644 00000003720 14722070374 0006247 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_OLPC_EC_H #define _LINUX_OLPC_EC_H #include <linux/bits.h> /* XO-1 EC commands */ #define EC_FIRMWARE_REV 0x08 #define EC_WRITE_SCI_MASK 0x1b #define EC_WAKE_UP_WLAN 0x24 #define EC_WLAN_LEAVE_RESET 0x25 #define EC_DCON_POWER_MODE 0x26 #define EC_READ_EB_MODE 0x2a #define EC_SET_SCI_INHIBIT 0x32 #define EC_SET_SCI_INHIBIT_RELEASE 0x34 #define EC_WLAN_ENTER_RESET 0x35 #define EC_WRITE_EXT_SCI_MASK 0x38 #define EC_SCI_QUERY 0x84 #define EC_EXT_SCI_QUERY 0x85 /* SCI source values */ #define EC_SCI_SRC_GAME BIT(0) #define EC_SCI_SRC_BATTERY BIT(1) #define EC_SCI_SRC_BATSOC BIT(2) #define EC_SCI_SRC_BATERR BIT(3) #define EC_SCI_SRC_EBOOK BIT(4) /* XO-1 only */ #define EC_SCI_SRC_WLAN BIT(5) /* XO-1 only */ #define EC_SCI_SRC_ACPWR BIT(6) #define EC_SCI_SRC_BATCRIT BIT(7) #define EC_SCI_SRC_GPWAKE BIT(8) /* XO-1.5 only */ #define EC_SCI_SRC_ALL GENMASK(8, 0) struct platform_device; struct olpc_ec_driver { int (*suspend)(struct platform_device *); int (*resume)(struct platform_device *); int (*ec_cmd)(u8, u8 *, size_t, u8 *, size_t, void *); bool wakeup_available; }; #ifdef CONFIG_OLPC_EC extern void olpc_ec_driver_register(struct olpc_ec_driver *drv, void *arg); extern int olpc_ec_cmd(u8 cmd, u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen); extern void olpc_ec_wakeup_set(u16 value); extern void olpc_ec_wakeup_clear(u16 value); extern int olpc_ec_mask_write(u16 bits); extern int olpc_ec_sci_query(u16 *sci_value); extern bool olpc_ec_wakeup_available(void); #else static inline int olpc_ec_cmd(u8 cmd, u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen) { return -ENODEV; } static inline void olpc_ec_wakeup_set(u16 value) { } static inline void olpc_ec_wakeup_clear(u16 value) { } static inline bool olpc_ec_wakeup_available(void) { return false; } #endif /* CONFIG_OLPC_EC */ #endif /* _LINUX_OLPC_EC_H */ via.h 0000644 00000001644 14722070374 0005507 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* Miscellaneous definitions for VIA chipsets Currently used only by drivers/parport/parport_pc.c */ /* Values for SuperIO function select configuration register */ #define VIA_FUNCTION_PARPORT_SPP 0x00 #define VIA_FUNCTION_PARPORT_ECP 0x01 #define VIA_FUNCTION_PARPORT_EPP 0x02 #define VIA_FUNCTION_PARPORT_DISABLE 0x03 #define VIA_FUNCTION_PROBE 0xFF /* Special magic value to be used in code, not to be written into chip */ /* Bits for parallel port mode configuration register */ #define VIA_PARPORT_ECPEPP 0X20 #define VIA_PARPORT_BIDIR 0x80 /* VIA configuration registers */ #define VIA_CONFIG_INDEX 0x3F0 #define VIA_CONFIG_DATA 0x3F1 /* Mask for parallel port IRQ bits (in ISA PnP IRQ routing register 1) */ #define VIA_IRQCONTROL_PARALLEL 0xF0 /* Mask for parallel port DMA bits (in ISA PnP DMA routing register) */ #define VIA_DMACONTROL_PARALLEL 0x0C nvme-rdma.h 0000644 00000004045 14722070374 0006614 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2015 Mellanox Technologies. All rights reserved. */ #ifndef _LINUX_NVME_RDMA_H #define _LINUX_NVME_RDMA_H enum nvme_rdma_cm_fmt { NVME_RDMA_CM_FMT_1_0 = 0x0, }; enum nvme_rdma_cm_status { NVME_RDMA_CM_INVALID_LEN = 0x01, NVME_RDMA_CM_INVALID_RECFMT = 0x02, NVME_RDMA_CM_INVALID_QID = 0x03, NVME_RDMA_CM_INVALID_HSQSIZE = 0x04, NVME_RDMA_CM_INVALID_HRQSIZE = 0x05, NVME_RDMA_CM_NO_RSC = 0x06, NVME_RDMA_CM_INVALID_IRD = 0x07, NVME_RDMA_CM_INVALID_ORD = 0x08, }; static inline const char *nvme_rdma_cm_msg(enum nvme_rdma_cm_status status) { switch (status) { case NVME_RDMA_CM_INVALID_LEN: return "invalid length"; case NVME_RDMA_CM_INVALID_RECFMT: return "invalid record format"; case NVME_RDMA_CM_INVALID_QID: return "invalid queue ID"; case NVME_RDMA_CM_INVALID_HSQSIZE: return "invalid host SQ size"; case NVME_RDMA_CM_INVALID_HRQSIZE: return "invalid host RQ size"; case NVME_RDMA_CM_NO_RSC: return "resource not found"; case NVME_RDMA_CM_INVALID_IRD: return "invalid IRD"; case NVME_RDMA_CM_INVALID_ORD: return "Invalid ORD"; default: return "unrecognized reason"; } } /** * struct nvme_rdma_cm_req - rdma connect request * * @recfmt: format of the RDMA Private Data * @qid: queue Identifier for the Admin or I/O Queue * @hrqsize: host receive queue size to be created * @hsqsize: host send queue size to be created */ struct nvme_rdma_cm_req { __le16 recfmt; __le16 qid; __le16 hrqsize; __le16 hsqsize; u8 rsvd[24]; }; /** * struct nvme_rdma_cm_rep - rdma connect reply * * @recfmt: format of the RDMA Private Data * @crqsize: controller receive queue size */ struct nvme_rdma_cm_rep { __le16 recfmt; __le16 crqsize; u8 rsvd[28]; }; /** * struct nvme_rdma_cm_rej - rdma connect reject * * @recfmt: format of the RDMA Private Data * @sts: error status for the associated connect request */ struct nvme_rdma_cm_rej { __le16 recfmt; __le16 sts; }; #endif /* _LINUX_NVME_RDMA_H */ hil_mlc.h 0000644 00000012200 14722070374 0006325 0 ustar 00 /* * HP Human Interface Loop Master Link Controller driver. * * Copyright (c) 2001 Brian S. Julin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL"). * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * * References: * HP-HIL Technical Reference Manual. Hewlett Packard Product No. 45918A * */ #include <linux/hil.h> #include <linux/time.h> #include <linux/interrupt.h> #include <linux/semaphore.h> #include <linux/serio.h> #include <linux/list.h> typedef struct hil_mlc hil_mlc; /* The HIL has a complicated state engine. * We define the structure of nodes in the state engine here. */ enum hilse_act { /* HILSE_OUT prepares to receive input if the next node * is an IN or EXPECT, and then sends the given packet. */ HILSE_OUT = 0, /* HILSE_CTS checks if the loop is busy. */ HILSE_CTS, /* HILSE_OUT_LAST sends the given command packet to * the last configured/running device on the loop. */ HILSE_OUT_LAST, /* HILSE_OUT_DISC sends the given command packet to * the next device past the last configured/running one. */ HILSE_OUT_DISC, /* HILSE_FUNC runs a callback function with given arguments. * a positive return value causes the "ugly" branch to be taken. */ HILSE_FUNC, /* HILSE_IN simply expects any non-errored packet to arrive * within arg usecs. */ HILSE_IN = 0x100, /* HILSE_EXPECT expects a particular packet to arrive * within arg usecs, any other packet is considered an error. */ HILSE_EXPECT, /* HILSE_EXPECT_LAST as above but dev field should be last * discovered/operational device. */ HILSE_EXPECT_LAST, /* HILSE_EXPECT_LAST as above but dev field should be first * undiscovered/inoperational device. */ HILSE_EXPECT_DISC }; typedef int (hilse_func) (hil_mlc *mlc, int arg); struct hilse_node { enum hilse_act act; /* How to process this node */ union { hilse_func *func; /* Function to call if HILSE_FUNC */ hil_packet packet; /* Packet to send or to compare */ } object; int arg; /* Timeout in usec or parm for func */ int good; /* Node to jump to on success */ int bad; /* Node to jump to on error */ int ugly; /* Node to jump to on timeout */ }; /* Methods for back-end drivers, e.g. hp_sdc_mlc */ typedef int (hil_mlc_cts) (hil_mlc *mlc); typedef int (hil_mlc_out) (hil_mlc *mlc); typedef int (hil_mlc_in) (hil_mlc *mlc, suseconds_t timeout); struct hil_mlc_devinfo { uint8_t idd[16]; /* Device ID Byte and Describe Record */ uint8_t rsc[16]; /* Security Code Header and Record */ uint8_t exd[16]; /* Extended Describe Record */ uint8_t rnm[16]; /* Device name as returned by RNM command */ }; struct hil_mlc_serio_map { hil_mlc *mlc; int di_revmap; int didx; }; /* How many (possibly old/detached) devices the we try to keep track of */ #define HIL_MLC_DEVMEM 16 struct hil_mlc { struct list_head list; /* hil_mlc is organized as linked list */ rwlock_t lock; void *priv; /* Data specific to a particular type of MLC */ int seidx; /* Current node in state engine */ int istarted, ostarted; hil_mlc_cts *cts; struct semaphore csem; /* Raised when loop idle */ hil_mlc_out *out; struct semaphore osem; /* Raised when outpacket dispatched */ hil_packet opacket; hil_mlc_in *in; struct semaphore isem; /* Raised when a packet arrives */ hil_packet ipacket[16]; hil_packet imatch; int icount; unsigned long instart; unsigned long intimeout; int ddi; /* Last operational device id */ int lcv; /* LCV to throttle loops */ time64_t lcv_time; /* Time loop was started */ int di_map[7]; /* Maps below items to live devs */ struct hil_mlc_devinfo di[HIL_MLC_DEVMEM]; struct serio *serio[HIL_MLC_DEVMEM]; struct hil_mlc_serio_map serio_map[HIL_MLC_DEVMEM]; hil_packet serio_opacket[HIL_MLC_DEVMEM]; int serio_oidx[HIL_MLC_DEVMEM]; struct hil_mlc_devinfo di_scratch; /* Temporary area */ int opercnt; struct tasklet_struct *tasklet; }; int hil_mlc_register(hil_mlc *mlc); int hil_mlc_unregister(hil_mlc *mlc); gfp.h 0000644 00000057706 14722070374 0005516 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_GFP_H #define __LINUX_GFP_H #include <linux/mmdebug.h> #include <linux/mmzone.h> #include <linux/stddef.h> #include <linux/linkage.h> #include <linux/topology.h> struct vm_area_struct; /* * In case of changes, please don't forget to update * include/trace/events/mmflags.h and tools/perf/builtin-kmem.c */ /* Plain integer GFP bitmasks. Do not use this directly. */ #define ___GFP_DMA 0x01u #define ___GFP_HIGHMEM 0x02u #define ___GFP_DMA32 0x04u #define ___GFP_MOVABLE 0x08u #define ___GFP_RECLAIMABLE 0x10u #define ___GFP_HIGH 0x20u #define ___GFP_IO 0x40u #define ___GFP_FS 0x80u #define ___GFP_ZERO 0x100u #define ___GFP_ATOMIC 0x200u #define ___GFP_DIRECT_RECLAIM 0x400u #define ___GFP_KSWAPD_RECLAIM 0x800u #define ___GFP_WRITE 0x1000u #define ___GFP_NOWARN 0x2000u #define ___GFP_RETRY_MAYFAIL 0x4000u #define ___GFP_NOFAIL 0x8000u #define ___GFP_NORETRY 0x10000u #define ___GFP_MEMALLOC 0x20000u #define ___GFP_COMP 0x40000u #define ___GFP_NOMEMALLOC 0x80000u #define ___GFP_HARDWALL 0x100000u #define ___GFP_THISNODE 0x200000u #define ___GFP_ACCOUNT 0x400000u #ifdef CONFIG_LOCKDEP #define ___GFP_NOLOCKDEP 0x800000u #else #define ___GFP_NOLOCKDEP 0 #endif /* If the above are modified, __GFP_BITS_SHIFT may need updating */ /* * Physical address zone modifiers (see linux/mmzone.h - low four bits) * * Do not put any conditional on these. If necessary modify the definitions * without the underscores and use them consistently. The definitions here may * be used in bit comparisons. */ #define __GFP_DMA ((__force gfp_t)___GFP_DMA) #define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM) #define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */ #define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) /** * DOC: Page mobility and placement hints * * Page mobility and placement hints * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * These flags provide hints about how mobile the page is. Pages with similar * mobility are placed within the same pageblocks to minimise problems due * to external fragmentation. * * %__GFP_MOVABLE (also a zone modifier) indicates that the page can be * moved by page migration during memory compaction or can be reclaimed. * * %__GFP_RECLAIMABLE is used for slab allocations that specify * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers. * * %__GFP_WRITE indicates the caller intends to dirty the page. Where possible, * these pages will be spread between local zones to avoid all the dirty * pages being in one zone (fair zone allocation policy). * * %__GFP_HARDWALL enforces the cpuset memory allocation policy. * * %__GFP_THISNODE forces the allocation to be satisfied from the requested * node with no fallbacks or placement policy enforcements. * * %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg. */ #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) #define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) #define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE) #define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT) /** * DOC: Watermark modifiers * * Watermark modifiers -- controls access to emergency reserves * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * %__GFP_HIGH indicates that the caller is high-priority and that granting * the request is necessary before the system can make forward progress. * For example, creating an IO context to clean pages. * * %__GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is * high priority. Users are typically interrupt handlers. This may be * used in conjunction with %__GFP_HIGH * * %__GFP_MEMALLOC allows access to all memory. This should only be used when * the caller guarantees the allocation will allow more memory to be freed * very shortly e.g. process exiting or swapping. Users either should * be the MM or co-ordinating closely with the VM (e.g. swap over NFS). * * %__GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves. * This takes precedence over the %__GFP_MEMALLOC flag if both are set. */ #define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC) #define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) #define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC) #define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) /** * DOC: Reclaim modifiers * * Reclaim modifiers * ~~~~~~~~~~~~~~~~~ * * %__GFP_IO can start physical IO. * * %__GFP_FS can call down to the low-level FS. Clearing the flag avoids the * allocator recursing into the filesystem which might already be holding * locks. * * %__GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim. * This flag can be cleared to avoid unnecessary delays when a fallback * option is available. * * %__GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when * the low watermark is reached and have it reclaim pages until the high * watermark is reached. A caller may wish to clear this flag when fallback * options are available and the reclaim is likely to disrupt the system. The * canonical example is THP allocation where a fallback is cheap but * reclaim/compaction may cause indirect stalls. * * %__GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim. * * The default allocator behavior depends on the request size. We have a concept * of so called costly allocations (with order > %PAGE_ALLOC_COSTLY_ORDER). * !costly allocations are too essential to fail so they are implicitly * non-failing by default (with some exceptions like OOM victims might fail so * the caller still has to check for failures) while costly requests try to be * not disruptive and back off even without invoking the OOM killer. * The following three modifiers might be used to override some of these * implicit rules * * %__GFP_NORETRY: The VM implementation will try only very lightweight * memory direct reclaim to get some memory under memory pressure (thus * it can sleep). It will avoid disruptive actions like OOM killer. The * caller must handle the failure which is quite likely to happen under * heavy memory pressure. The flag is suitable when failure can easily be * handled at small cost, such as reduced throughput * * %__GFP_RETRY_MAYFAIL: The VM implementation will retry memory reclaim * procedures that have previously failed if there is some indication * that progress has been made else where. It can wait for other * tasks to attempt high level approaches to freeing memory such as * compaction (which removes fragmentation) and page-out. * There is still a definite limit to the number of retries, but it is * a larger limit than with %__GFP_NORETRY. * Allocations with this flag may fail, but only when there is * genuinely little unused memory. While these allocations do not * directly trigger the OOM killer, their failure indicates that * the system is likely to need to use the OOM killer soon. The * caller must handle failure, but can reasonably do so by failing * a higher-level request, or completing it only in a much less * efficient manner. * If the allocation does fail, and the caller is in a position to * free some non-essential memory, doing so could benefit the system * as a whole. * * %__GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller * cannot handle allocation failures. The allocation could block * indefinitely but will never return with failure. Testing for * failure is pointless. * New users should be evaluated carefully (and the flag should be * used only when there is no reasonable failure policy) but it is * definitely preferable to use the flag rather than opencode endless * loop around allocator. * Using this flag for costly allocations is _highly_ discouraged. */ #define __GFP_IO ((__force gfp_t)___GFP_IO) #define __GFP_FS ((__force gfp_t)___GFP_FS) #define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */ #define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */ #define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM)) #define __GFP_RETRY_MAYFAIL ((__force gfp_t)___GFP_RETRY_MAYFAIL) #define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) #define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) /** * DOC: Action modifiers * * Action modifiers * ~~~~~~~~~~~~~~~~ * * %__GFP_NOWARN suppresses allocation failure reports. * * %__GFP_COMP address compound page metadata. * * %__GFP_ZERO returns a zeroed page on success. */ #define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) #define __GFP_COMP ((__force gfp_t)___GFP_COMP) #define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) /* Disable lockdep for GFP context tracking */ #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) /* Room for N __GFP_FOO bits */ #define __GFP_BITS_SHIFT (23 + IS_ENABLED(CONFIG_LOCKDEP)) #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) /** * DOC: Useful GFP flag combinations * * Useful GFP flag combinations * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * Useful GFP flag combinations that are commonly used. It is recommended * that subsystems start with one of these combinations and then set/clear * %__GFP_FOO flags as necessary. * * %GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower * watermark is applied to allow access to "atomic reserves" * * %GFP_KERNEL is typical for kernel-internal allocations. The caller requires * %ZONE_NORMAL or a lower zone for direct access but can direct reclaim. * * %GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is * accounted to kmemcg. * * %GFP_NOWAIT is for kernel allocations that should not stall for direct * reclaim, start physical IO or use any filesystem callback. * * %GFP_NOIO will use direct reclaim to discard clean pages or slab pages * that do not require the starting of any physical IO. * Please try to avoid using this flag directly and instead use * memalloc_noio_{save,restore} to mark the whole scope which cannot * perform any IO with a short explanation why. All allocation requests * will inherit GFP_NOIO implicitly. * * %GFP_NOFS will use direct reclaim but will not use any filesystem interfaces. * Please try to avoid using this flag directly and instead use * memalloc_nofs_{save,restore} to mark the whole scope which cannot/shouldn't * recurse into the FS layer with a short explanation why. All allocation * requests will inherit GFP_NOFS implicitly. * * %GFP_USER is for userspace allocations that also need to be directly * accessibly by the kernel or hardware. It is typically used by hardware * for buffers that are mapped to userspace (e.g. graphics) that hardware * still must DMA to. cpuset limits are enforced for these allocations. * * %GFP_DMA exists for historical reasons and should be avoided where possible. * The flags indicates that the caller requires that the lowest zone be * used (%ZONE_DMA or 16M on x86-64). Ideally, this would be removed but * it would require careful auditing as some users really require it and * others use the flag to avoid lowmem reserves in %ZONE_DMA and treat the * lowest zone as a type of emergency reserve. * * %GFP_DMA32 is similar to %GFP_DMA except that the caller requires a 32-bit * address. * * %GFP_HIGHUSER is for userspace allocations that may be mapped to userspace, * do not need to be directly accessible by the kernel but that cannot * move once in use. An example may be a hardware allocation that maps * data directly into userspace but has no addressing limitations. * * %GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not * need direct access to but can use kmap() when access is required. They * are expected to be movable via page reclaim or page migration. Typically, * pages on the LRU would also be allocated with %GFP_HIGHUSER_MOVABLE. * * %GFP_TRANSHUGE and %GFP_TRANSHUGE_LIGHT are used for THP allocations. They * are compound allocations that will generally fail quickly if memory is not * available and will not wake kswapd/kcompactd on failure. The _LIGHT * version does not attempt reclaim/compaction at all and is by default used * in page fault path, while the non-light is used by khugepaged. */ #define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM) #define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS) #define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT) #define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM) #define GFP_NOIO (__GFP_RECLAIM) #define GFP_NOFS (__GFP_RECLAIM | __GFP_IO) #define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL) #define GFP_DMA __GFP_DMA #define GFP_DMA32 __GFP_DMA32 #define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) #define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE) #define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM) #define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM) /* Convert GFP flags to their corresponding migrate type */ #define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) #define GFP_MOVABLE_SHIFT 3 static inline int gfpflags_to_migratetype(const gfp_t gfp_flags) { VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE); BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE); if (unlikely(page_group_by_mobility_disabled)) return MIGRATE_UNMOVABLE; /* Group based on mobility */ return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; } #undef GFP_MOVABLE_MASK #undef GFP_MOVABLE_SHIFT static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) { return !!(gfp_flags & __GFP_DIRECT_RECLAIM); } /** * gfpflags_normal_context - is gfp_flags a normal sleepable context? * @gfp_flags: gfp_flags to test * * Test whether @gfp_flags indicates that the allocation is from the * %current context and allowed to sleep. * * An allocation being allowed to block doesn't mean it owns the %current * context. When direct reclaim path tries to allocate memory, the * allocation context is nested inside whatever %current was doing at the * time of the original allocation. The nested allocation may be allowed * to block but modifying anything %current owns can corrupt the outer * context's expectations. * * %true result from this function indicates that the allocation context * can sleep and use anything that's associated with %current. */ static inline bool gfpflags_normal_context(const gfp_t gfp_flags) { return (gfp_flags & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC)) == __GFP_DIRECT_RECLAIM; } #ifdef CONFIG_HIGHMEM #define OPT_ZONE_HIGHMEM ZONE_HIGHMEM #else #define OPT_ZONE_HIGHMEM ZONE_NORMAL #endif #ifdef CONFIG_ZONE_DMA #define OPT_ZONE_DMA ZONE_DMA #else #define OPT_ZONE_DMA ZONE_NORMAL #endif #ifdef CONFIG_ZONE_DMA32 #define OPT_ZONE_DMA32 ZONE_DMA32 #else #define OPT_ZONE_DMA32 ZONE_NORMAL #endif /* * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the * zone to use given the lowest 4 bits of gfp_t. Entries are GFP_ZONES_SHIFT * bits long and there are 16 of them to cover all possible combinations of * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM. * * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA. * But GFP_MOVABLE is not only a zone specifier but also an allocation * policy. Therefore __GFP_MOVABLE plus another zone selector is valid. * Only 1 bit of the lowest 3 bits (DMA,DMA32,HIGHMEM) can be set to "1". * * bit result * ================= * 0x0 => NORMAL * 0x1 => DMA or NORMAL * 0x2 => HIGHMEM or NORMAL * 0x3 => BAD (DMA+HIGHMEM) * 0x4 => DMA32 or NORMAL * 0x5 => BAD (DMA+DMA32) * 0x6 => BAD (HIGHMEM+DMA32) * 0x7 => BAD (HIGHMEM+DMA32+DMA) * 0x8 => NORMAL (MOVABLE+0) * 0x9 => DMA or NORMAL (MOVABLE+DMA) * 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too) * 0xb => BAD (MOVABLE+HIGHMEM+DMA) * 0xc => DMA32 or NORMAL (MOVABLE+DMA32) * 0xd => BAD (MOVABLE+DMA32+DMA) * 0xe => BAD (MOVABLE+DMA32+HIGHMEM) * 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA) * * GFP_ZONES_SHIFT must be <= 2 on 32 bit platforms. */ #if defined(CONFIG_ZONE_DEVICE) && (MAX_NR_ZONES-1) <= 4 /* ZONE_DEVICE is not a valid GFP zone specifier */ #define GFP_ZONES_SHIFT 2 #else #define GFP_ZONES_SHIFT ZONES_SHIFT #endif #if 16 * GFP_ZONES_SHIFT > BITS_PER_LONG #error GFP_ZONES_SHIFT too large to create GFP_ZONE_TABLE integer #endif #define GFP_ZONE_TABLE ( \ (ZONE_NORMAL << 0 * GFP_ZONES_SHIFT) \ | (OPT_ZONE_DMA << ___GFP_DMA * GFP_ZONES_SHIFT) \ | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * GFP_ZONES_SHIFT) \ | (OPT_ZONE_DMA32 << ___GFP_DMA32 * GFP_ZONES_SHIFT) \ | (ZONE_NORMAL << ___GFP_MOVABLE * GFP_ZONES_SHIFT) \ | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * GFP_ZONES_SHIFT) \ | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * GFP_ZONES_SHIFT)\ | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * GFP_ZONES_SHIFT)\ ) /* * GFP_ZONE_BAD is a bitmap for all combinations of __GFP_DMA, __GFP_DMA32 * __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per * entry starting with bit 0. Bit is set if the combination is not * allowed. */ #define GFP_ZONE_BAD ( \ 1 << (___GFP_DMA | ___GFP_HIGHMEM) \ | 1 << (___GFP_DMA | ___GFP_DMA32) \ | 1 << (___GFP_DMA32 | ___GFP_HIGHMEM) \ | 1 << (___GFP_DMA | ___GFP_DMA32 | ___GFP_HIGHMEM) \ | 1 << (___GFP_MOVABLE | ___GFP_HIGHMEM | ___GFP_DMA) \ | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA) \ | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_HIGHMEM) \ | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA | ___GFP_HIGHMEM) \ ) static inline enum zone_type gfp_zone(gfp_t flags) { enum zone_type z; int bit = (__force int) (flags & GFP_ZONEMASK); z = (GFP_ZONE_TABLE >> (bit * GFP_ZONES_SHIFT)) & ((1 << GFP_ZONES_SHIFT) - 1); VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1); return z; } /* * There is only one page-allocator function, and two main namespaces to * it. The alloc_page*() variants return 'struct page *' and as such * can allocate highmem pages, the *get*page*() variants return * virtual kernel addresses to the allocated page(s). */ static inline int gfp_zonelist(gfp_t flags) { #ifdef CONFIG_NUMA if (unlikely(flags & __GFP_THISNODE)) return ZONELIST_NOFALLBACK; #endif return ZONELIST_FALLBACK; } /* * We get the zone list from the current node and the gfp_mask. * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones. * There are two zonelists per node, one for all zones with memory and * one containing just zones from the node the zonelist belongs to. * * For the case of non-NUMA systems the NODE_DATA() gets optimized to * &contig_page_data at compile-time. */ static inline struct zonelist *node_zonelist(int nid, gfp_t flags) { return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags); } #ifndef HAVE_ARCH_FREE_PAGE static inline void arch_free_page(struct page *page, int order) { } #endif #ifndef HAVE_ARCH_ALLOC_PAGE static inline void arch_alloc_page(struct page *page, int order) { } #endif #ifndef HAVE_ARCH_MAKE_PAGE_ACCESSIBLE static inline int arch_make_page_accessible(struct page *page) { return 0; } #endif struct page * __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, nodemask_t *nodemask); static inline struct page * __alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid) { return __alloc_pages_nodemask(gfp_mask, order, preferred_nid, NULL); } /* * Allocate pages, preferring the node given as nid. The node must be valid and * online. For more general interface, see alloc_pages_node(). */ static inline struct page * __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) { VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid)); return __alloc_pages(gfp_mask, order, nid); } /* * Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE, * prefer the current CPU's closest node. Otherwise node must be valid and * online. */ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) { if (nid == NUMA_NO_NODE) nid = numa_mem_id(); return __alloc_pages_node(nid, gfp_mask, order); } #ifdef CONFIG_NUMA extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); static inline struct page * alloc_pages(gfp_t gfp_mask, unsigned int order) { return alloc_pages_current(gfp_mask, order); } extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, struct vm_area_struct *vma, unsigned long addr, int node, bool hugepage); #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true) #else #define alloc_pages(gfp_mask, order) \ alloc_pages_node(numa_node_id(), gfp_mask, order) #define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\ alloc_pages(gfp_mask, order) #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ alloc_pages(gfp_mask, order) #endif #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) #define alloc_page_vma(gfp_mask, vma, addr) \ alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false) #define alloc_page_vma_node(gfp_mask, vma, addr, node) \ alloc_pages_vma(gfp_mask, 0, vma, addr, node, false) extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); extern unsigned long get_zeroed_page(gfp_t gfp_mask); void *alloc_pages_exact(size_t size, gfp_t gfp_mask); void free_pages_exact(void *virt, size_t size); void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask); #define __get_free_page(gfp_mask) \ __get_free_pages((gfp_mask), 0) #define __get_dma_pages(gfp_mask, order) \ __get_free_pages((gfp_mask) | GFP_DMA, (order)) extern void __free_pages(struct page *page, unsigned int order); extern void free_pages(unsigned long addr, unsigned int order); extern void free_unref_page(struct page *page); extern void free_unref_page_list(struct list_head *list); struct page_frag_cache; extern void __page_frag_cache_drain(struct page *page, unsigned int count); extern void *page_frag_alloc(struct page_frag_cache *nc, unsigned int fragsz, gfp_t gfp_mask); extern void page_frag_free(void *addr); #define __free_page(page) __free_pages((page), 0) #define free_page(addr) free_pages((addr), 0) void page_alloc_init(void); void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); void drain_all_pages(struct zone *zone); void drain_local_pages(struct zone *zone); void page_alloc_init_late(void); /* * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what * GFP flags are used before interrupts are enabled. Once interrupts are * enabled, it is set to __GFP_BITS_MASK while the system is running. During * hibernation, it is used by PM to avoid I/O during memory allocation while * devices are suspended. */ extern gfp_t gfp_allowed_mask; /* Returns true if the gfp_mask allows use of ALLOC_NO_WATERMARK */ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask); extern void pm_restrict_gfp_mask(void); extern void pm_restore_gfp_mask(void); #ifdef CONFIG_PM_SLEEP extern bool pm_suspended_storage(void); #else static inline bool pm_suspended_storage(void) { return false; } #endif /* CONFIG_PM_SLEEP */ /* * Check if the gfp flags allow compaction - GFP_NOIO is a really * tricky context because the migration might require IO. */ static inline bool gfp_compaction_allowed(gfp_t gfp_mask) { return IS_ENABLED(CONFIG_COMPACTION) && (gfp_mask & __GFP_IO); } #ifdef CONFIG_CONTIG_ALLOC /* The below functions must be run on a range from a single zone. */ extern int alloc_contig_range(unsigned long start, unsigned long end, unsigned migratetype, gfp_t gfp_mask); #endif void free_contig_range(unsigned long pfn, unsigned int nr_pages); #ifdef CONFIG_CMA /* CMA stuff */ extern void init_cma_reserved_pageblock(struct page *page); #endif #endif /* __LINUX_GFP_H */ kvm_host.h 0000644 00000122231 14722070374 0006556 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ #ifndef __KVM_HOST_H #define __KVM_HOST_H #include <linux/types.h> #include <linux/hardirq.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/spinlock.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/bug.h> #include <linux/mm.h> #include <linux/mmu_notifier.h> #include <linux/preempt.h> #include <linux/msi.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/rcupdate.h> #include <linux/ratelimit.h> #include <linux/err.h> #include <linux/irqflags.h> #include <linux/context_tracking.h> #include <linux/irqbypass.h> #include <linux/swait.h> #include <linux/refcount.h> #include <linux/nospec.h> #include <asm/signal.h> #include <linux/kvm.h> #include <linux/kvm_para.h> #include <linux/kvm_types.h> #include <asm/kvm_host.h> #ifndef KVM_MAX_VCPU_ID #define KVM_MAX_VCPU_ID KVM_MAX_VCPUS #endif /* * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used * in kvm, other bits are visible for userspace which are defined in * include/linux/kvm_h. */ #define KVM_MEMSLOT_INVALID (1UL << 16) /* * Bit 63 of the memslot generation number is an "update in-progress flag", * e.g. is temporarily set for the duration of install_new_memslots(). * This flag effectively creates a unique generation number that is used to * mark cached memslot data, e.g. MMIO accesses, as potentially being stale, * i.e. may (or may not) have come from the previous memslots generation. * * This is necessary because the actual memslots update is not atomic with * respect to the generation number update. Updating the generation number * first would allow a vCPU to cache a spte from the old memslots using the * new generation number, and updating the generation number after switching * to the new memslots would allow cache hits using the old generation number * to reference the defunct memslots. * * This mechanism is used to prevent getting hits in KVM's caches while a * memslot update is in-progress, and to prevent cache hits *after* updating * the actual generation number against accesses that were inserted into the * cache *before* the memslots were updated. */ #define KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS BIT_ULL(63) /* Two fragments for cross MMIO pages. */ #define KVM_MAX_MMIO_FRAGMENTS 2 #ifndef KVM_ADDRESS_SPACE_NUM #define KVM_ADDRESS_SPACE_NUM 1 #endif /* * For the normal pfn, the highest 12 bits should be zero, * so we can mask bit 62 ~ bit 52 to indicate the error pfn, * mask bit 63 to indicate the noslot pfn. */ #define KVM_PFN_ERR_MASK (0x7ffULL << 52) #define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52) #define KVM_PFN_NOSLOT (0x1ULL << 63) #define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK) #define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1) #define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2) /* * error pfns indicate that the gfn is in slot but faild to * translate it to pfn on host. */ static inline bool is_error_pfn(kvm_pfn_t pfn) { return !!(pfn & KVM_PFN_ERR_MASK); } /* * error_noslot pfns indicate that the gfn can not be * translated to pfn - it is not in slot or failed to * translate it to pfn. */ static inline bool is_error_noslot_pfn(kvm_pfn_t pfn) { return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK); } /* noslot pfn indicates that the gfn is not in slot. */ static inline bool is_noslot_pfn(kvm_pfn_t pfn) { return pfn == KVM_PFN_NOSLOT; } /* * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390) * provide own defines and kvm_is_error_hva */ #ifndef KVM_HVA_ERR_BAD #define KVM_HVA_ERR_BAD (PAGE_OFFSET) #define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE) static inline bool kvm_is_error_hva(unsigned long addr) { return addr >= PAGE_OFFSET; } #endif #define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT)) static inline bool is_error_page(struct page *page) { return IS_ERR(page); } #define KVM_REQUEST_MASK GENMASK(7,0) #define KVM_REQUEST_NO_WAKEUP BIT(8) #define KVM_REQUEST_WAIT BIT(9) /* * Architecture-independent vcpu->requests bit members * Bits 4-7 are reserved for more arch-independent bits. */ #define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_PENDING_TIMER 2 #define KVM_REQ_UNHALT 3 #define KVM_REQ_VM_BUGGED (4 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQUEST_ARCH_BASE 8 #define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \ BUILD_BUG_ON((unsigned)(nr) >= (FIELD_SIZEOF(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \ (unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \ }) #define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0) #define KVM_USERSPACE_IRQ_SOURCE_ID 0 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 extern struct kmem_cache *kvm_vcpu_cache; extern struct mutex kvm_lock; extern struct list_head vm_list; struct kvm_io_range { gpa_t addr; int len; struct kvm_io_device *dev; }; #define NR_IOBUS_DEVS 1000 struct kvm_io_bus { int dev_count; int ioeventfd_count; struct kvm_io_range range[]; }; enum kvm_bus { KVM_MMIO_BUS, KVM_PIO_BUS, KVM_VIRTIO_CCW_NOTIFY_BUS, KVM_FAST_MMIO_BUS, KVM_NR_BUSES }; int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, int len, const void *val); int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, int len, const void *val, long cookie); int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, int len, void *val); int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len, struct kvm_io_device *dev); int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, struct kvm_io_device *dev); struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr); #ifdef CONFIG_KVM_ASYNC_PF struct kvm_async_pf { struct work_struct work; struct list_head link; struct list_head queue; struct kvm_vcpu *vcpu; struct mm_struct *mm; gpa_t cr2_or_gpa; unsigned long addr; struct kvm_arch_async_pf arch; bool wakeup_all; }; void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu); int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, unsigned long hva, struct kvm_arch_async_pf *arch); int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); #endif enum { OUTSIDE_GUEST_MODE, IN_GUEST_MODE, EXITING_GUEST_MODE, READING_SHADOW_PAGE_TABLES, }; #define KVM_UNMAPPED_PAGE ((void *) 0x500 + POISON_POINTER_DELTA) struct kvm_host_map { /* * Only valid if the 'pfn' is managed by the host kernel (i.e. There is * a 'struct page' for it. When using mem= kernel parameter some memory * can be used as guest memory but they are not managed by host * kernel). * If 'pfn' is not managed by the host kernel, this field is * initialized to KVM_UNMAPPED_PAGE. */ struct page *page; void *hva; kvm_pfn_t pfn; kvm_pfn_t gfn; }; /* * Used to check if the mapping is valid or not. Never use 'kvm_host_map' * directly to check for that. */ static inline bool kvm_vcpu_mapped(struct kvm_host_map *map) { return !!map->hva; } /* * Sometimes a large or cross-page mmio needs to be broken up into separate * exits for userspace servicing. */ struct kvm_mmio_fragment { gpa_t gpa; void *data; unsigned len; }; struct kvm_vcpu { struct kvm *kvm; #ifdef CONFIG_PREEMPT_NOTIFIERS struct preempt_notifier preempt_notifier; #endif int cpu; int vcpu_id; /* id given by userspace at creation */ int vcpu_idx; /* index in kvm->vcpus array */ int srcu_idx; int mode; u64 requests; unsigned long guest_debug; int pre_pcpu; struct list_head blocked_vcpu_list; struct mutex mutex; struct kvm_run *run; int guest_xcr0_loaded; struct swait_queue_head wq; struct pid __rcu *pid; int sigset_active; sigset_t sigset; struct kvm_vcpu_stat stat; unsigned int halt_poll_ns; bool valid_wakeup; #ifdef CONFIG_HAS_IOMEM int mmio_needed; int mmio_read_completed; int mmio_is_write; int mmio_cur_fragment; int mmio_nr_fragments; struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS]; #endif #ifdef CONFIG_KVM_ASYNC_PF struct { u32 queued; struct list_head queue; struct list_head done; spinlock_t lock; } async_pf; #endif #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT /* * Cpu relax intercept or pause loop exit optimization * in_spin_loop: set when a vcpu does a pause loop exit * or cpu relax intercepted. * dy_eligible: indicates whether vcpu is eligible for directed yield. */ struct { bool in_spin_loop; bool dy_eligible; } spin_loop; #endif bool preempted; bool ready; struct kvm_vcpu_arch arch; struct dentry *debugfs_dentry; }; static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) { /* * The memory barrier ensures a previous write to vcpu->requests cannot * be reordered with the read of vcpu->mode. It pairs with the general * memory barrier following the write of vcpu->mode in VCPU RUN. */ smp_mb__before_atomic(); return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE); } /* * Some of the bitops functions do not support too long bitmaps. * This number must be determined not to exceed such limits. */ #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) struct kvm_memory_slot { gfn_t base_gfn; unsigned long npages; unsigned long *dirty_bitmap; struct kvm_arch_memory_slot arch; unsigned long userspace_addr; u32 flags; short id; }; static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) { return ALIGN(memslot->npages, BITS_PER_LONG) / 8; } static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *memslot) { unsigned long len = kvm_dirty_bitmap_bytes(memslot); return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap); } struct kvm_s390_adapter_int { u64 ind_addr; u64 summary_addr; u64 ind_offset; u32 summary_offset; u32 adapter_id; }; struct kvm_hv_sint { u32 vcpu; u32 sint; }; struct kvm_kernel_irq_routing_entry { u32 gsi; u32 type; int (*set)(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, int irq_source_id, int level, bool line_status); union { struct { unsigned irqchip; unsigned pin; } irqchip; struct { u32 address_lo; u32 address_hi; u32 data; u32 flags; u32 devid; } msi; struct kvm_s390_adapter_int adapter; struct kvm_hv_sint hv_sint; }; struct hlist_node link; }; #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING struct kvm_irq_routing_table { int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS]; u32 nr_rt_entries; /* * Array indexed by gsi. Each entry contains list of irq chips * the gsi is connected to. */ struct hlist_head map[0]; }; #endif #ifndef KVM_PRIVATE_MEM_SLOTS #define KVM_PRIVATE_MEM_SLOTS 0 #endif #ifndef KVM_MEM_SLOTS_NUM #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) #endif #ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu) { return 0; } #endif /* * Note: * memslots are not sorted by id anymore, please use id_to_memslot() * to get the memslot by its id. */ struct kvm_memslots { u64 generation; struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM]; /* The mapping table from slot id to the index in memslots[]. */ short id_to_index[KVM_MEM_SLOTS_NUM]; atomic_t lru_slot; int used_slots; }; struct kvm { spinlock_t mmu_lock; struct mutex slots_lock; struct mm_struct *mm; /* userspace tied to this vm */ struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM]; struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; /* * created_vcpus is protected by kvm->lock, and is incremented * at the beginning of KVM_CREATE_VCPU. online_vcpus is only * incremented after storing the kvm_vcpu pointer in vcpus, * and is accessed atomically. */ atomic_t online_vcpus; int created_vcpus; int last_boosted_vcpu; struct list_head vm_list; struct mutex lock; struct kvm_io_bus __rcu *buses[KVM_NR_BUSES]; #ifdef CONFIG_HAVE_KVM_EVENTFD struct { spinlock_t lock; struct list_head items; struct list_head resampler_list; struct mutex resampler_lock; } irqfds; struct list_head ioeventfds; #endif struct kvm_vm_stat stat; struct kvm_arch arch; refcount_t users_count; #ifdef CONFIG_KVM_MMIO struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; spinlock_t ring_lock; struct list_head coalesced_zones; #endif struct mutex irq_lock; #ifdef CONFIG_HAVE_KVM_IRQCHIP /* * Update side is protected by irq_lock. */ struct kvm_irq_routing_table __rcu *irq_routing; #endif #ifdef CONFIG_HAVE_KVM_IRQFD struct hlist_head irq_ack_notifier_list; #endif #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) struct mmu_notifier mmu_notifier; unsigned long mmu_notifier_seq; long mmu_notifier_count; #endif long tlbs_dirty; struct list_head devices; bool manual_dirty_log_protect; struct dentry *debugfs_dentry; struct kvm_stat_data **debugfs_stat_data; struct srcu_struct srcu; struct srcu_struct irq_srcu; pid_t userspace_pid; bool vm_bugged; }; #define kvm_err(fmt, ...) \ pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) #define kvm_info(fmt, ...) \ pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) #define kvm_debug(fmt, ...) \ pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) #define kvm_debug_ratelimited(fmt, ...) \ pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \ ## __VA_ARGS__) #define kvm_pr_unimpl(fmt, ...) \ pr_err_ratelimited("kvm [%i]: " fmt, \ task_tgid_nr(current), ## __VA_ARGS__) /* The guest did something we don't support. */ #define vcpu_unimpl(vcpu, fmt, ...) \ kvm_pr_unimpl("vcpu%i, guest rIP: 0x%lx " fmt, \ (vcpu)->vcpu_id, kvm_rip_read(vcpu), ## __VA_ARGS__) #define vcpu_debug(vcpu, fmt, ...) \ kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) #define vcpu_debug_ratelimited(vcpu, fmt, ...) \ kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id, \ ## __VA_ARGS__) #define vcpu_err(vcpu, fmt, ...) \ kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); static inline void kvm_vm_bugged(struct kvm *kvm) { kvm->vm_bugged = true; kvm_make_all_cpus_request(kvm, KVM_REQ_VM_BUGGED); } #define KVM_BUG(cond, kvm, fmt...) \ ({ \ int __ret = (cond); \ \ if (WARN_ONCE(__ret && !(kvm)->vm_bugged, fmt)) \ kvm_vm_bugged(kvm); \ unlikely(__ret); \ }) #define KVM_BUG_ON(cond, kvm) \ ({ \ int __ret = (cond); \ \ if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \ kvm_vm_bugged(kvm); \ unlikely(__ret); \ }) static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx) { return srcu_dereference_check(kvm->buses[idx], &kvm->srcu, lockdep_is_held(&kvm->slots_lock) || !refcount_read(&kvm->users_count)); } static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) { int num_vcpus = atomic_read(&kvm->online_vcpus); i = array_index_nospec(i, num_vcpus); /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */ smp_rmb(); return kvm->vcpus[i]; } #define kvm_for_each_vcpu(idx, vcpup, kvm) \ for (idx = 0; \ idx < atomic_read(&kvm->online_vcpus) && \ (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \ idx++) static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) { struct kvm_vcpu *vcpu = NULL; int i; if (id < 0) return NULL; if (id < KVM_MAX_VCPUS) vcpu = kvm_get_vcpu(kvm, id); if (vcpu && vcpu->vcpu_id == id) return vcpu; kvm_for_each_vcpu(i, vcpu, kvm) if (vcpu->vcpu_id == id) return vcpu; return NULL; } static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu) { return vcpu->vcpu_idx; } #define kvm_for_each_memslot(memslot, slots) \ for (memslot = &slots->memslots[0]; \ memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\ memslot++) int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); void vcpu_load(struct kvm_vcpu *vcpu); void vcpu_put(struct kvm_vcpu *vcpu); #ifdef __KVM_HAVE_IOAPIC void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm); void kvm_arch_post_irq_routing_update(struct kvm *kvm); #else static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm) { } static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm) { } #endif #ifdef CONFIG_HAVE_KVM_IRQFD int kvm_irqfd_init(void); void kvm_irqfd_exit(void); #else static inline int kvm_irqfd_init(void) { return 0; } static inline void kvm_irqfd_exit(void) { } #endif int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, struct module *module); void kvm_exit(void); void kvm_get_kvm(struct kvm *kvm); void kvm_put_kvm(struct kvm *kvm); static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id) { as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM); return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu, lockdep_is_held(&kvm->slots_lock) || !refcount_read(&kvm->users_count)); } static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) { return __kvm_memslots(kvm, 0); } static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu) { int as_id = kvm_arch_vcpu_memslots_id(vcpu); return __kvm_memslots(vcpu->kvm, as_id); } static inline struct kvm_memory_slot * id_to_memslot(struct kvm_memslots *slots, int id) { int index = slots->id_to_index[id]; struct kvm_memory_slot *slot; slot = &slots->memslots[index]; WARN_ON(slot->id != id); return slot; } /* * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations: * - create a new memory slot * - delete an existing memory slot * - modify an existing memory slot * -- move it in the guest physical memory space * -- just change its flags * * Since flags can be changed by some of these operations, the following * differentiation is the best we can do for __kvm_set_memory_region(): */ enum kvm_mr_change { KVM_MR_CREATE, KVM_MR_DELETE, KVM_MR_MOVE, KVM_MR_FLAGS_ONLY, }; int kvm_set_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem); int __kvm_set_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem); void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, struct kvm_memory_slot *dont); int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, unsigned long npages); void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen); int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, const struct kvm_userspace_memory_region *mem, enum kvm_mr_change change); void kvm_arch_commit_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem, const struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change); bool kvm_largepages_enabled(void); void kvm_disable_largepages(void); /* flush all memory translations */ void kvm_arch_flush_shadow_all(struct kvm *kvm); /* flush memory translations pointing to 'slot' */ void kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot); int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, struct page **pages, int nr_pages); struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable); unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn, bool *writable); void kvm_release_page_clean(struct page *page); void kvm_release_page_dirty(struct page *page); void kvm_set_page_accessed(struct page *page); kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, bool *writable); kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn); kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic, bool *async, bool write_fault, bool *writable); void kvm_release_pfn_clean(kvm_pfn_t pfn); void kvm_release_pfn_dirty(kvm_pfn_t pfn); void kvm_set_pfn_dirty(kvm_pfn_t pfn); void kvm_set_pfn_accessed(kvm_pfn_t pfn); void kvm_get_pfn(kvm_pfn_t pfn); void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache); int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, int len); int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned long len); int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, int offset, int len); int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, unsigned long len); int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned long len); int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned int offset, unsigned long len); int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, gpa_t gpa, unsigned long len); int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn); void mark_page_dirty(struct kvm *kvm, gfn_t gfn); struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu); struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn); kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn); kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map); int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map, struct gfn_to_pfn_cache *cache, bool atomic); struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn); void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty); int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, struct gfn_to_pfn_cache *cache, bool dirty, bool atomic); unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn); unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable); int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, int len); int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len); int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len); int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data, int offset, int len); int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, unsigned long len); void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); void kvm_sigset_activate(struct kvm_vcpu *vcpu); void kvm_sigset_deactivate(struct kvm_vcpu *vcpu); void kvm_vcpu_block(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu); bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu); void kvm_vcpu_kick(struct kvm_vcpu *vcpu); int kvm_vcpu_yield_to(struct kvm_vcpu *target); void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible); void kvm_flush_remote_tlbs(struct kvm *kvm); void kvm_reload_remote_mmus(struct kvm *kvm); bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, unsigned long *vcpu_bitmap, cpumask_var_t tmp); long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf); int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext); int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, int *is_dirty); int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log, bool *flush); int kvm_clear_dirty_log_protect(struct kvm *kvm, struct kvm_clear_dirty_log *log, bool *flush); void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask); int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log); int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, struct kvm_clear_dirty_log *log); int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, bool line_status); int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap); long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr); int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state); int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state); int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg); int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); int kvm_arch_init(void *opaque); void kvm_arch_exit(void); int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu); void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu); void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id); int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu); #endif int kvm_arch_hardware_enable(void); void kvm_arch_hardware_disable(void); int kvm_arch_hardware_setup(void); void kvm_arch_hardware_unsetup(void); int kvm_arch_check_processor_compat(void); int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu); int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu); #ifndef __KVM_HAVE_ARCH_VM_ALLOC /* * All architectures that want to use vzalloc currently also * need their own kvm_arch_alloc_vm implementation. */ static inline struct kvm *kvm_arch_alloc_vm(void) { return kzalloc(sizeof(struct kvm), GFP_KERNEL); } static inline void kvm_arch_free_vm(struct kvm *kvm) { kfree(kvm); } #endif #ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm) { return -ENOTSUPP; } #endif #ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA void kvm_arch_register_noncoherent_dma(struct kvm *kvm); void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm); bool kvm_arch_has_noncoherent_dma(struct kvm *kvm); #else static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm) { } static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) { } static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) { return false; } #endif #ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE void kvm_arch_start_assignment(struct kvm *kvm); void kvm_arch_end_assignment(struct kvm *kvm); bool kvm_arch_has_assigned_device(struct kvm *kvm); #else static inline void kvm_arch_start_assignment(struct kvm *kvm) { } static inline void kvm_arch_end_assignment(struct kvm *kvm) { } static __always_inline bool kvm_arch_has_assigned_device(struct kvm *kvm) { return false; } #endif static inline struct swait_queue_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) { #ifdef __KVM_HAVE_ARCH_WQP return vcpu->arch.wqp; #else return &vcpu->wq; #endif } #ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED /* * returns true if the virtual interrupt controller is initialized and * ready to accept virtual IRQ. On some architectures the virtual interrupt * controller is dynamically instantiated and this is not always true. */ bool kvm_arch_intc_initialized(struct kvm *kvm); #else static inline bool kvm_arch_intc_initialized(struct kvm *kvm) { return true; } #endif int kvm_arch_init_vm(struct kvm *kvm, unsigned long type); void kvm_arch_destroy_vm(struct kvm *kvm); void kvm_arch_sync_events(struct kvm *kvm); int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); void kvm_vcpu_kick(struct kvm_vcpu *vcpu); bool kvm_is_reserved_pfn(kvm_pfn_t pfn); bool kvm_is_zone_device_pfn(kvm_pfn_t pfn); struct kvm_irq_ack_notifier { struct hlist_node link; unsigned gsi; void (*irq_acked)(struct kvm_irq_ack_notifier *kian); }; int kvm_irq_map_gsi(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *entries, int gsi); int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin); int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, bool line_status); int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, int irq_source_id, int level, bool line_status); int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, int irq_source_id, int level, bool line_status); bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin); void kvm_notify_acked_gsi(struct kvm *kvm, int gsi); void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); void kvm_register_irq_ack_notifier(struct kvm *kvm, struct kvm_irq_ack_notifier *kian); void kvm_unregister_irq_ack_notifier(struct kvm *kvm, struct kvm_irq_ack_notifier *kian); int kvm_request_irq_source_id(struct kvm *kvm); void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args); /* * search_memslots() and __gfn_to_memslot() are here because they are * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c. * gfn_to_memslot() itself isn't here as an inline because that would * bloat other code too much. */ static inline struct kvm_memory_slot * search_memslots(struct kvm_memslots *slots, gfn_t gfn) { int start = 0, end = slots->used_slots; int slot = atomic_read(&slots->lru_slot); struct kvm_memory_slot *memslots = slots->memslots; if (gfn >= memslots[slot].base_gfn && gfn < memslots[slot].base_gfn + memslots[slot].npages) return &memslots[slot]; while (start < end) { slot = start + (end - start) / 2; if (gfn >= memslots[slot].base_gfn) end = slot; else start = slot + 1; } if (start < slots->used_slots && gfn >= memslots[start].base_gfn && gfn < memslots[start].base_gfn + memslots[start].npages) { atomic_set(&slots->lru_slot, start); return &memslots[start]; } return NULL; } static inline struct kvm_memory_slot * __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) { return search_memslots(slots, gfn); } static inline unsigned long __gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) { /* * The index was checked originally in search_memslots. To avoid * that a malicious guest builds a Spectre gadget out of e.g. page * table walks, do not let the processor speculate loads outside * the guest's registered memslots. */ unsigned long offset = gfn - slot->base_gfn; offset = array_index_nospec(offset, slot->npages); return slot->userspace_addr + offset * PAGE_SIZE; } static inline int memslot_id(struct kvm *kvm, gfn_t gfn) { return gfn_to_memslot(kvm, gfn)->id; } static inline gfn_t hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot) { gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT; return slot->base_gfn + gfn_offset; } static inline gpa_t gfn_to_gpa(gfn_t gfn) { return (gpa_t)gfn << PAGE_SHIFT; } static inline gfn_t gpa_to_gfn(gpa_t gpa) { return (gfn_t)(gpa >> PAGE_SHIFT); } static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn) { return (hpa_t)pfn << PAGE_SHIFT; } static inline struct page *kvm_vcpu_gpa_to_page(struct kvm_vcpu *vcpu, gpa_t gpa) { return kvm_vcpu_gfn_to_page(vcpu, gpa_to_gfn(gpa)); } static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa) { unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); return kvm_is_error_hva(hva); } enum kvm_stat_kind { KVM_STAT_VM, KVM_STAT_VCPU, }; struct kvm_stat_data { int offset; int mode; struct kvm *kvm; }; struct kvm_stats_debugfs_item { const char *name; int offset; enum kvm_stat_kind kind; int mode; }; extern struct kvm_stats_debugfs_item debugfs_entries[]; extern struct dentry *kvm_debugfs_dir; #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq) { if (unlikely(kvm->mmu_notifier_count)) return 1; /* * Ensure the read of mmu_notifier_count happens before the read * of mmu_notifier_seq. This interacts with the smp_wmb() in * mmu_notifier_invalidate_range_end to make sure that the caller * either sees the old (non-zero) value of mmu_notifier_count or * the new (incremented) value of mmu_notifier_seq. * PowerPC Book3s HV KVM calls this under a per-page lock * rather than under kvm->mmu_lock, for scalability, so * can't rely on kvm->mmu_lock to keep things ordered. */ smp_rmb(); if (kvm->mmu_notifier_seq != mmu_seq) return 1; return 0; } #endif #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING #define KVM_MAX_IRQ_ROUTES 4096 /* might need extension/rework in the future */ bool kvm_arch_can_set_irq_routing(struct kvm *kvm); int kvm_set_irq_routing(struct kvm *kvm, const struct kvm_irq_routing_entry *entries, unsigned nr, unsigned flags); int kvm_set_routing_entry(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e, const struct kvm_irq_routing_entry *ue); void kvm_free_irq_routing(struct kvm *kvm); #else static inline void kvm_free_irq_routing(struct kvm *kvm) {} #endif int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi); #ifdef CONFIG_HAVE_KVM_EVENTFD void kvm_eventfd_init(struct kvm *kvm); int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args); #ifdef CONFIG_HAVE_KVM_IRQFD int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args); void kvm_irqfd_release(struct kvm *kvm); void kvm_irq_routing_update(struct kvm *); #else static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) { return -EINVAL; } static inline void kvm_irqfd_release(struct kvm *kvm) {} #endif #else static inline void kvm_eventfd_init(struct kvm *kvm) {} static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) { return -EINVAL; } static inline void kvm_irqfd_release(struct kvm *kvm) {} #ifdef CONFIG_HAVE_KVM_IRQCHIP static inline void kvm_irq_routing_update(struct kvm *kvm) { } #endif static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) { return -ENOSYS; } #endif /* CONFIG_HAVE_KVM_EVENTFD */ void kvm_arch_irq_routing_update(struct kvm *kvm); static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) { /* * Ensure the rest of the request is published to kvm_check_request's * caller. Paired with the smp_mb__after_atomic in kvm_check_request. */ smp_wmb(); set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); } static inline bool kvm_request_pending(struct kvm_vcpu *vcpu) { return READ_ONCE(vcpu->requests); } static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu) { return test_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); } static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu) { clear_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); } static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) { if (kvm_test_request(req, vcpu)) { kvm_clear_request(req, vcpu); /* * Ensure the rest of the request is visible to kvm_check_request's * caller. Paired with the smp_wmb in kvm_make_request. */ smp_mb__after_atomic(); return true; } else { return false; } } extern bool kvm_rebooting; extern unsigned int halt_poll_ns; extern unsigned int halt_poll_ns_grow; extern unsigned int halt_poll_ns_grow_start; extern unsigned int halt_poll_ns_shrink; struct kvm_device { struct kvm_device_ops *ops; struct kvm *kvm; void *private; struct list_head vm_node; }; /* create, destroy, and name are mandatory */ struct kvm_device_ops { const char *name; /* * create is called holding kvm->lock and any operations not suitable * to do while holding the lock should be deferred to init (see * below). */ int (*create)(struct kvm_device *dev, u32 type); /* * init is called after create if create is successful and is called * outside of holding kvm->lock. */ void (*init)(struct kvm_device *dev); /* * Destroy is responsible for freeing dev. * * Destroy may be called before or after destructors are called * on emulated I/O regions, depending on whether a reference is * held by a vcpu or other kvm component that gets destroyed * after the emulated I/O. */ void (*destroy)(struct kvm_device *dev); /* * Release is an alternative method to free the device. It is * called when the device file descriptor is closed. Once * release is called, the destroy method will not be called * anymore as the device is removed from the device list of * the VM. kvm->lock is held. */ void (*release)(struct kvm_device *dev); int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); long (*ioctl)(struct kvm_device *dev, unsigned int ioctl, unsigned long arg); int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma); }; void kvm_device_get(struct kvm_device *dev); void kvm_device_put(struct kvm_device *dev); struct kvm_device *kvm_device_from_filp(struct file *filp); int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type); void kvm_unregister_device_ops(u32 type); extern struct kvm_device_ops kvm_mpic_ops; extern struct kvm_device_ops kvm_arm_vgic_v2_ops; extern struct kvm_device_ops kvm_arm_vgic_v3_ops; #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) { vcpu->spin_loop.in_spin_loop = val; } static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) { vcpu->spin_loop.dy_eligible = val; } #else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) { } static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) { } #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS bool kvm_arch_has_irq_bypass(void); int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *, struct irq_bypass_producer *); void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *, struct irq_bypass_producer *); void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *); void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *); int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq, bool set); #endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */ #ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS /* If we wakeup during the poll time, was it a sucessful poll? */ static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) { return vcpu->valid_wakeup; } #else static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) { return true; } #endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */ #ifdef CONFIG_HAVE_KVM_NO_POLL /* Callback that tells if we must not poll */ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu); #else static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) { return false; } #endif /* CONFIG_HAVE_KVM_NO_POLL */ #ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL long kvm_arch_vcpu_async_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); #else static inline long kvm_arch_vcpu_async_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { return -ENOIOCTLCMD; } #endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */ void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, unsigned long start, unsigned long end); #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu); #else static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) { return 0; } #endif /* CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE */ typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data); int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn, uintptr_t data, const char *name, struct task_struct **thread_ptr); #endif zlib.h 0000644 00000070160 14722070374 0005667 0 ustar 00 /* zlib.h -- interface of the 'zlib' general purpose compression library Copyright (C) 1995-2005 Jean-loup Gailly and Mark Adler This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. Jean-loup Gailly Mark Adler jloup@gzip.org madler@alumni.caltech.edu The data format used by the zlib library is described by RFCs (Request for Comments) 1950 to 1952 in the files http://www.ietf.org/rfc/rfc1950.txt (zlib format), rfc1951.txt (deflate format) and rfc1952.txt (gzip format). */ #ifndef _ZLIB_H #define _ZLIB_H #include <linux/zconf.h> /* zlib deflate based on ZLIB_VERSION "1.1.3" */ /* zlib inflate based on ZLIB_VERSION "1.2.3" */ /* This is a modified version of zlib for use inside the Linux kernel. The main changes are to perform all memory allocation in advance. Inflation Changes: * Z_PACKET_FLUSH is added and used by ppp_deflate. Before returning this checks there is no more input data available and the next data is a STORED block. It also resets the mode to be read for the next data, all as per PPP requirements. * Addition of zlib_inflateIncomp which copies incompressible data into the history window and adjusts the accoutning without calling zlib_inflate itself to inflate the data. */ /* The 'zlib' compression library provides in-memory compression and decompression functions, including integrity checks of the uncompressed data. This version of the library supports only one compression method (deflation) but other algorithms will be added later and will have the same stream interface. Compression can be done in a single step if the buffers are large enough (for example if an input file is mmap'ed), or can be done by repeated calls of the compression function. In the latter case, the application must provide more input and/or consume the output (providing more output space) before each call. The compressed data format used by default by the in-memory functions is the zlib format, which is a zlib wrapper documented in RFC 1950, wrapped around a deflate stream, which is itself documented in RFC 1951. The library also supports reading and writing files in gzip (.gz) format with an interface similar to that of stdio. The zlib format was designed to be compact and fast for use in memory and on communications channels. The gzip format was designed for single- file compression on file systems, has a larger header than zlib to maintain directory information, and uses a different, slower check method than zlib. The library does not install any signal handler. The decoder checks the consistency of the compressed data, so the library should never crash even in case of corrupted input. */ struct internal_state; typedef struct z_stream_s { const Byte *next_in; /* next input byte */ uLong avail_in; /* number of bytes available at next_in */ uLong total_in; /* total nb of input bytes read so far */ Byte *next_out; /* next output byte should be put there */ uLong avail_out; /* remaining free space at next_out */ uLong total_out; /* total nb of bytes output so far */ char *msg; /* last error message, NULL if no error */ struct internal_state *state; /* not visible by applications */ void *workspace; /* memory allocated for this stream */ int data_type; /* best guess about the data type: ascii or binary */ uLong adler; /* adler32 value of the uncompressed data */ uLong reserved; /* reserved for future use */ } z_stream; typedef z_stream *z_streamp; /* The application must update next_in and avail_in when avail_in has dropped to zero. It must update next_out and avail_out when avail_out has dropped to zero. The application must initialize zalloc, zfree and opaque before calling the init function. All other fields are set by the compression library and must not be updated by the application. The opaque value provided by the application will be passed as the first parameter for calls of zalloc and zfree. This can be useful for custom memory management. The compression library attaches no meaning to the opaque value. zalloc must return NULL if there is not enough memory for the object. If zlib is used in a multi-threaded application, zalloc and zfree must be thread safe. On 16-bit systems, the functions zalloc and zfree must be able to allocate exactly 65536 bytes, but will not be required to allocate more than this if the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS, pointers returned by zalloc for objects of exactly 65536 bytes *must* have their offset normalized to zero. The default allocation function provided by this library ensures this (see zutil.c). To reduce memory requirements and avoid any allocation of 64K objects, at the expense of compression ratio, compile the library with -DMAX_WBITS=14 (see zconf.h). The fields total_in and total_out can be used for statistics or progress reports. After compression, total_in holds the total size of the uncompressed data and may be saved for use in the decompressor (particularly if the decompressor wants to decompress everything in a single step). */ /* constants */ #define Z_NO_FLUSH 0 #define Z_PARTIAL_FLUSH 1 /* will be removed, use Z_SYNC_FLUSH instead */ #define Z_PACKET_FLUSH 2 #define Z_SYNC_FLUSH 3 #define Z_FULL_FLUSH 4 #define Z_FINISH 5 #define Z_BLOCK 6 /* Only for inflate at present */ /* Allowed flush values; see deflate() and inflate() below for details */ #define Z_OK 0 #define Z_STREAM_END 1 #define Z_NEED_DICT 2 #define Z_ERRNO (-1) #define Z_STREAM_ERROR (-2) #define Z_DATA_ERROR (-3) #define Z_MEM_ERROR (-4) #define Z_BUF_ERROR (-5) #define Z_VERSION_ERROR (-6) /* Return codes for the compression/decompression functions. Negative * values are errors, positive values are used for special but normal events. */ #define Z_NO_COMPRESSION 0 #define Z_BEST_SPEED 1 #define Z_BEST_COMPRESSION 9 #define Z_DEFAULT_COMPRESSION (-1) /* compression levels */ #define Z_FILTERED 1 #define Z_HUFFMAN_ONLY 2 #define Z_DEFAULT_STRATEGY 0 /* compression strategy; see deflateInit2() below for details */ #define Z_BINARY 0 #define Z_ASCII 1 #define Z_UNKNOWN 2 /* Possible values of the data_type field */ #define Z_DEFLATED 8 /* The deflate compression method (the only one supported in this version) */ /* basic functions */ extern int zlib_deflate_workspacesize (int windowBits, int memLevel); /* Returns the number of bytes that needs to be allocated for a per- stream workspace with the specified parameters. A pointer to this number of bytes should be returned in stream->workspace before you call zlib_deflateInit() or zlib_deflateInit2(). If you call zlib_deflateInit(), specify windowBits = MAX_WBITS and memLevel = MAX_MEM_LEVEL here. If you call zlib_deflateInit2(), the windowBits and memLevel parameters passed to zlib_deflateInit2() must not exceed those passed here. */ extern int zlib_deflate_dfltcc_enabled (void); /* Returns 1 if Deflate-Conversion facility is installed and enabled, otherwise 0. */ /* extern int deflateInit (z_streamp strm, int level); Initializes the internal stream state for compression. The fields zalloc, zfree and opaque must be initialized before by the caller. If zalloc and zfree are set to NULL, deflateInit updates them to use default allocation functions. The compression level must be Z_DEFAULT_COMPRESSION, or between 0 and 9: 1 gives best speed, 9 gives best compression, 0 gives no compression at all (the input data is simply copied a block at a time). Z_DEFAULT_COMPRESSION requests a default compromise between speed and compression (currently equivalent to level 6). deflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough memory, Z_STREAM_ERROR if level is not a valid compression level, Z_VERSION_ERROR if the zlib library version (zlib_version) is incompatible with the version assumed by the caller (ZLIB_VERSION). msg is set to null if there is no error message. deflateInit does not perform any compression: this will be done by deflate(). */ extern int zlib_deflate (z_streamp strm, int flush); /* deflate compresses as much data as possible, and stops when the input buffer becomes empty or the output buffer becomes full. It may introduce some output latency (reading input without producing any output) except when forced to flush. The detailed semantics are as follows. deflate performs one or both of the following actions: - Compress more input starting at next_in and update next_in and avail_in accordingly. If not all input can be processed (because there is not enough room in the output buffer), next_in and avail_in are updated and processing will resume at this point for the next call of deflate(). - Provide more output starting at next_out and update next_out and avail_out accordingly. This action is forced if the parameter flush is non zero. Forcing flush frequently degrades the compression ratio, so this parameter should be set only when necessary (in interactive applications). Some output may be provided even if flush is not set. Before the call of deflate(), the application should ensure that at least one of the actions is possible, by providing more input and/or consuming more output, and updating avail_in or avail_out accordingly; avail_out should never be zero before the call. The application can consume the compressed output when it wants, for example when the output buffer is full (avail_out == 0), or after each call of deflate(). If deflate returns Z_OK and with zero avail_out, it must be called again after making room in the output buffer because there might be more output pending. If the parameter flush is set to Z_SYNC_FLUSH, all pending output is flushed to the output buffer and the output is aligned on a byte boundary, so that the decompressor can get all input data available so far. (In particular avail_in is zero after the call if enough output space has been provided before the call.) Flushing may degrade compression for some compression algorithms and so it should be used only when necessary. If flush is set to Z_FULL_FLUSH, all output is flushed as with Z_SYNC_FLUSH, and the compression state is reset so that decompression can restart from this point if previous compressed data has been damaged or if random access is desired. Using Z_FULL_FLUSH too often can seriously degrade the compression. If deflate returns with avail_out == 0, this function must be called again with the same value of the flush parameter and more output space (updated avail_out), until the flush is complete (deflate returns with non-zero avail_out). If the parameter flush is set to Z_FINISH, pending input is processed, pending output is flushed and deflate returns with Z_STREAM_END if there was enough output space; if deflate returns with Z_OK, this function must be called again with Z_FINISH and more output space (updated avail_out) but no more input data, until it returns with Z_STREAM_END or an error. After deflate has returned Z_STREAM_END, the only possible operations on the stream are deflateReset or deflateEnd. Z_FINISH can be used immediately after deflateInit if all the compression is to be done in a single step. In this case, avail_out must be at least 0.1% larger than avail_in plus 12 bytes. If deflate does not return Z_STREAM_END, then it must be called again as described above. deflate() sets strm->adler to the adler32 checksum of all input read so far (that is, total_in bytes). deflate() may update data_type if it can make a good guess about the input data type (Z_ASCII or Z_BINARY). In doubt, the data is considered binary. This field is only for information purposes and does not affect the compression algorithm in any manner. deflate() returns Z_OK if some progress has been made (more input processed or more output produced), Z_STREAM_END if all input has been consumed and all output has been produced (only when flush is set to Z_FINISH), Z_STREAM_ERROR if the stream state was inconsistent (for example if next_in or next_out was NULL), Z_BUF_ERROR if no progress is possible (for example avail_in or avail_out was zero). */ extern int zlib_deflateEnd (z_streamp strm); /* All dynamically allocated data structures for this stream are freed. This function discards any unprocessed input and does not flush any pending output. deflateEnd returns Z_OK if success, Z_STREAM_ERROR if the stream state was inconsistent, Z_DATA_ERROR if the stream was freed prematurely (some input or output was discarded). In the error case, msg may be set but then points to a static string (which must not be deallocated). */ extern int zlib_inflate_workspacesize (void); /* Returns the number of bytes that needs to be allocated for a per- stream workspace. A pointer to this number of bytes should be returned in stream->workspace before calling zlib_inflateInit(). */ /* extern int zlib_inflateInit (z_streamp strm); Initializes the internal stream state for decompression. The fields next_in, avail_in, and workspace must be initialized before by the caller. If next_in is not NULL and avail_in is large enough (the exact value depends on the compression method), inflateInit determines the compression method from the zlib header and allocates all data structures accordingly; otherwise the allocation will be deferred to the first call of inflate. If zalloc and zfree are set to NULL, inflateInit updates them to use default allocation functions. inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough memory, Z_VERSION_ERROR if the zlib library version is incompatible with the version assumed by the caller. msg is set to null if there is no error message. inflateInit does not perform any decompression apart from reading the zlib header if present: this will be done by inflate(). (So next_in and avail_in may be modified, but next_out and avail_out are unchanged.) */ extern int zlib_inflate (z_streamp strm, int flush); /* inflate decompresses as much data as possible, and stops when the input buffer becomes empty or the output buffer becomes full. It may introduce some output latency (reading input without producing any output) except when forced to flush. The detailed semantics are as follows. inflate performs one or both of the following actions: - Decompress more input starting at next_in and update next_in and avail_in accordingly. If not all input can be processed (because there is not enough room in the output buffer), next_in is updated and processing will resume at this point for the next call of inflate(). - Provide more output starting at next_out and update next_out and avail_out accordingly. inflate() provides as much output as possible, until there is no more input data or no more space in the output buffer (see below about the flush parameter). Before the call of inflate(), the application should ensure that at least one of the actions is possible, by providing more input and/or consuming more output, and updating the next_* and avail_* values accordingly. The application can consume the uncompressed output when it wants, for example when the output buffer is full (avail_out == 0), or after each call of inflate(). If inflate returns Z_OK and with zero avail_out, it must be called again after making room in the output buffer because there might be more output pending. The flush parameter of inflate() can be Z_NO_FLUSH, Z_SYNC_FLUSH, Z_FINISH, or Z_BLOCK. Z_SYNC_FLUSH requests that inflate() flush as much output as possible to the output buffer. Z_BLOCK requests that inflate() stop if and when it gets to the next deflate block boundary. When decoding the zlib or gzip format, this will cause inflate() to return immediately after the header and before the first block. When doing a raw inflate, inflate() will go ahead and process the first block, and will return when it gets to the end of that block, or when it runs out of data. The Z_BLOCK option assists in appending to or combining deflate streams. Also to assist in this, on return inflate() will set strm->data_type to the number of unused bits in the last byte taken from strm->next_in, plus 64 if inflate() is currently decoding the last block in the deflate stream, plus 128 if inflate() returned immediately after decoding an end-of-block code or decoding the complete header up to just before the first byte of the deflate stream. The end-of-block will not be indicated until all of the uncompressed data from that block has been written to strm->next_out. The number of unused bits may in general be greater than seven, except when bit 7 of data_type is set, in which case the number of unused bits will be less than eight. inflate() should normally be called until it returns Z_STREAM_END or an error. However if all decompression is to be performed in a single step (a single call of inflate), the parameter flush should be set to Z_FINISH. In this case all pending input is processed and all pending output is flushed; avail_out must be large enough to hold all the uncompressed data. (The size of the uncompressed data may have been saved by the compressor for this purpose.) The next operation on this stream must be inflateEnd to deallocate the decompression state. The use of Z_FINISH is never required, but can be used to inform inflate that a faster approach may be used for the single inflate() call. In this implementation, inflate() always flushes as much output as possible to the output buffer, and always uses the faster approach on the first call. So the only effect of the flush parameter in this implementation is on the return value of inflate(), as noted below, or when it returns early because Z_BLOCK is used. If a preset dictionary is needed after this call (see inflateSetDictionary below), inflate sets strm->adler to the adler32 checksum of the dictionary chosen by the compressor and returns Z_NEED_DICT; otherwise it sets strm->adler to the adler32 checksum of all output produced so far (that is, total_out bytes) and returns Z_OK, Z_STREAM_END or an error code as described below. At the end of the stream, inflate() checks that its computed adler32 checksum is equal to that saved by the compressor and returns Z_STREAM_END only if the checksum is correct. inflate() will decompress and check either zlib-wrapped or gzip-wrapped deflate data. The header type is detected automatically. Any information contained in the gzip header is not retained, so applications that need that information should instead use raw inflate, see inflateInit2() below, or inflateBack() and perform their own processing of the gzip header and trailer. inflate() returns Z_OK if some progress has been made (more input processed or more output produced), Z_STREAM_END if the end of the compressed data has been reached and all uncompressed output has been produced, Z_NEED_DICT if a preset dictionary is needed at this point, Z_DATA_ERROR if the input data was corrupted (input stream not conforming to the zlib format or incorrect check value), Z_STREAM_ERROR if the stream structure was inconsistent (for example if next_in or next_out was NULL), Z_MEM_ERROR if there was not enough memory, Z_BUF_ERROR if no progress is possible or if there was not enough room in the output buffer when Z_FINISH is used. Note that Z_BUF_ERROR is not fatal, and inflate() can be called again with more input and more output space to continue decompressing. If Z_DATA_ERROR is returned, the application may then call inflateSync() to look for a good compression block if a partial recovery of the data is desired. */ extern int zlib_inflateEnd (z_streamp strm); /* All dynamically allocated data structures for this stream are freed. This function discards any unprocessed input and does not flush any pending output. inflateEnd returns Z_OK if success, Z_STREAM_ERROR if the stream state was inconsistent. In the error case, msg may be set but then points to a static string (which must not be deallocated). */ /* Advanced functions */ /* The following functions are needed only in some special applications. */ /* extern int deflateInit2 (z_streamp strm, int level, int method, int windowBits, int memLevel, int strategy); This is another version of deflateInit with more compression options. The fields next_in, zalloc, zfree and opaque must be initialized before by the caller. The method parameter is the compression method. It must be Z_DEFLATED in this version of the library. The windowBits parameter is the base two logarithm of the window size (the size of the history buffer). It should be in the range 8..15 for this version of the library. Larger values of this parameter result in better compression at the expense of memory usage. The default value is 15 if deflateInit is used instead. The memLevel parameter specifies how much memory should be allocated for the internal compression state. memLevel=1 uses minimum memory but is slow and reduces compression ratio; memLevel=9 uses maximum memory for optimal speed. The default value is 8. See zconf.h for total memory usage as a function of windowBits and memLevel. The strategy parameter is used to tune the compression algorithm. Use the value Z_DEFAULT_STRATEGY for normal data, Z_FILTERED for data produced by a filter (or predictor), or Z_HUFFMAN_ONLY to force Huffman encoding only (no string match). Filtered data consists mostly of small values with a somewhat random distribution. In this case, the compression algorithm is tuned to compress them better. The effect of Z_FILTERED is to force more Huffman coding and less string matching; it is somewhat intermediate between Z_DEFAULT and Z_HUFFMAN_ONLY. The strategy parameter only affects the compression ratio but not the correctness of the compressed output even if it is not set appropriately. deflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough memory, Z_STREAM_ERROR if a parameter is invalid (such as an invalid method). msg is set to null if there is no error message. deflateInit2 does not perform any compression: this will be done by deflate(). */ extern int zlib_deflateReset (z_streamp strm); /* This function is equivalent to deflateEnd followed by deflateInit, but does not free and reallocate all the internal compression state. The stream will keep the same compression level and any other attributes that may have been set by deflateInit2. deflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source stream state was inconsistent (such as zalloc or state being NULL). */ static inline unsigned long deflateBound(unsigned long s) { return s + ((s + 7) >> 3) + ((s + 63) >> 6) + 11; } /* extern int inflateInit2 (z_streamp strm, int windowBits); This is another version of inflateInit with an extra parameter. The fields next_in, avail_in, zalloc, zfree and opaque must be initialized before by the caller. The windowBits parameter is the base two logarithm of the maximum window size (the size of the history buffer). It should be in the range 8..15 for this version of the library. The default value is 15 if inflateInit is used instead. windowBits must be greater than or equal to the windowBits value provided to deflateInit2() while compressing, or it must be equal to 15 if deflateInit2() was not used. If a compressed stream with a larger window size is given as input, inflate() will return with the error code Z_DATA_ERROR instead of trying to allocate a larger window. windowBits can also be -8..-15 for raw inflate. In this case, -windowBits determines the window size. inflate() will then process raw deflate data, not looking for a zlib or gzip header, not generating a check value, and not looking for any check values for comparison at the end of the stream. This is for use with other formats that use the deflate compressed data format such as zip. Those formats provide their own check values. If a custom format is developed using the raw deflate format for compressed data, it is recommended that a check value such as an adler32 or a crc32 be applied to the uncompressed data as is done in the zlib, gzip, and zip formats. For most applications, the zlib format should be used as is. Note that comments above on the use in deflateInit2() applies to the magnitude of windowBits. windowBits can also be greater than 15 for optional gzip decoding. Add 32 to windowBits to enable zlib and gzip decoding with automatic header detection, or add 16 to decode only the gzip format (the zlib format will return a Z_DATA_ERROR). If a gzip stream is being decoded, strm->adler is a crc32 instead of an adler32. inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough memory, Z_STREAM_ERROR if a parameter is invalid (such as a null strm). msg is set to null if there is no error message. inflateInit2 does not perform any decompression apart from reading the zlib header if present: this will be done by inflate(). (So next_in and avail_in may be modified, but next_out and avail_out are unchanged.) */ extern int zlib_inflateReset (z_streamp strm); /* This function is equivalent to inflateEnd followed by inflateInit, but does not free and reallocate all the internal decompression state. The stream will keep attributes that may have been set by inflateInit2. inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source stream state was inconsistent (such as zalloc or state being NULL). */ extern int zlib_inflateIncomp (z_stream *strm); /* This function adds the data at next_in (avail_in bytes) to the output history without performing any output. There must be no pending output, and the decompressor must be expecting to see the start of a block. Calling this function is equivalent to decompressing a stored block containing the data at next_in (except that the data is not output). */ #define zlib_deflateInit(strm, level) \ zlib_deflateInit2((strm), (level), Z_DEFLATED, MAX_WBITS, \ DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY) #define zlib_inflateInit(strm) \ zlib_inflateInit2((strm), DEF_WBITS) extern int zlib_deflateInit2(z_streamp strm, int level, int method, int windowBits, int memLevel, int strategy); extern int zlib_inflateInit2(z_streamp strm, int windowBits); #if !defined(_Z_UTIL_H) && !defined(NO_DUMMY_DECL) struct internal_state {int dummy;}; /* hack for buggy compilers */ #endif /* Utility function: initialize zlib, unpack binary blob, clean up zlib, * return len or negative error code. */ extern int zlib_inflate_blob(void *dst, unsigned dst_sz, const void *src, unsigned src_sz); #endif /* _ZLIB_H */ rio_drv.h 0000644 00000035000 14722070374 0006365 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * RapidIO driver services * * Copyright 2005 MontaVista Software, Inc. * Matt Porter <mporter@kernel.crashing.org> */ #ifndef LINUX_RIO_DRV_H #define LINUX_RIO_DRV_H #include <linux/types.h> #include <linux/ioport.h> #include <linux/list.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/rio.h> extern int __rio_local_read_config_32(struct rio_mport *port, u32 offset, u32 * data); extern int __rio_local_write_config_32(struct rio_mport *port, u32 offset, u32 data); extern int __rio_local_read_config_16(struct rio_mport *port, u32 offset, u16 * data); extern int __rio_local_write_config_16(struct rio_mport *port, u32 offset, u16 data); extern int __rio_local_read_config_8(struct rio_mport *port, u32 offset, u8 * data); extern int __rio_local_write_config_8(struct rio_mport *port, u32 offset, u8 data); extern int rio_mport_read_config_32(struct rio_mport *port, u16 destid, u8 hopcount, u32 offset, u32 * data); extern int rio_mport_write_config_32(struct rio_mport *port, u16 destid, u8 hopcount, u32 offset, u32 data); extern int rio_mport_read_config_16(struct rio_mport *port, u16 destid, u8 hopcount, u32 offset, u16 * data); extern int rio_mport_write_config_16(struct rio_mport *port, u16 destid, u8 hopcount, u32 offset, u16 data); extern int rio_mport_read_config_8(struct rio_mport *port, u16 destid, u8 hopcount, u32 offset, u8 * data); extern int rio_mport_write_config_8(struct rio_mport *port, u16 destid, u8 hopcount, u32 offset, u8 data); /** * rio_local_read_config_32 - Read 32 bits from local configuration space * @port: Master port * @offset: Offset into local configuration space * @data: Pointer to read data into * * Reads 32 bits of data from the specified offset within the local * device's configuration space. */ static inline int rio_local_read_config_32(struct rio_mport *port, u32 offset, u32 * data) { return __rio_local_read_config_32(port, offset, data); } /** * rio_local_write_config_32 - Write 32 bits to local configuration space * @port: Master port * @offset: Offset into local configuration space * @data: Data to be written * * Writes 32 bits of data to the specified offset within the local * device's configuration space. */ static inline int rio_local_write_config_32(struct rio_mport *port, u32 offset, u32 data) { return __rio_local_write_config_32(port, offset, data); } /** * rio_local_read_config_16 - Read 16 bits from local configuration space * @port: Master port * @offset: Offset into local configuration space * @data: Pointer to read data into * * Reads 16 bits of data from the specified offset within the local * device's configuration space. */ static inline int rio_local_read_config_16(struct rio_mport *port, u32 offset, u16 * data) { return __rio_local_read_config_16(port, offset, data); } /** * rio_local_write_config_16 - Write 16 bits to local configuration space * @port: Master port * @offset: Offset into local configuration space * @data: Data to be written * * Writes 16 bits of data to the specified offset within the local * device's configuration space. */ static inline int rio_local_write_config_16(struct rio_mport *port, u32 offset, u16 data) { return __rio_local_write_config_16(port, offset, data); } /** * rio_local_read_config_8 - Read 8 bits from local configuration space * @port: Master port * @offset: Offset into local configuration space * @data: Pointer to read data into * * Reads 8 bits of data from the specified offset within the local * device's configuration space. */ static inline int rio_local_read_config_8(struct rio_mport *port, u32 offset, u8 * data) { return __rio_local_read_config_8(port, offset, data); } /** * rio_local_write_config_8 - Write 8 bits to local configuration space * @port: Master port * @offset: Offset into local configuration space * @data: Data to be written * * Writes 8 bits of data to the specified offset within the local * device's configuration space. */ static inline int rio_local_write_config_8(struct rio_mport *port, u32 offset, u8 data) { return __rio_local_write_config_8(port, offset, data); } /** * rio_read_config_32 - Read 32 bits from configuration space * @rdev: RIO device * @offset: Offset into device configuration space * @data: Pointer to read data into * * Reads 32 bits of data from the specified offset within the * RIO device's configuration space. */ static inline int rio_read_config_32(struct rio_dev *rdev, u32 offset, u32 * data) { return rio_mport_read_config_32(rdev->net->hport, rdev->destid, rdev->hopcount, offset, data); }; /** * rio_write_config_32 - Write 32 bits to configuration space * @rdev: RIO device * @offset: Offset into device configuration space * @data: Data to be written * * Writes 32 bits of data to the specified offset within the * RIO device's configuration space. */ static inline int rio_write_config_32(struct rio_dev *rdev, u32 offset, u32 data) { return rio_mport_write_config_32(rdev->net->hport, rdev->destid, rdev->hopcount, offset, data); }; /** * rio_read_config_16 - Read 16 bits from configuration space * @rdev: RIO device * @offset: Offset into device configuration space * @data: Pointer to read data into * * Reads 16 bits of data from the specified offset within the * RIO device's configuration space. */ static inline int rio_read_config_16(struct rio_dev *rdev, u32 offset, u16 * data) { return rio_mport_read_config_16(rdev->net->hport, rdev->destid, rdev->hopcount, offset, data); }; /** * rio_write_config_16 - Write 16 bits to configuration space * @rdev: RIO device * @offset: Offset into device configuration space * @data: Data to be written * * Writes 16 bits of data to the specified offset within the * RIO device's configuration space. */ static inline int rio_write_config_16(struct rio_dev *rdev, u32 offset, u16 data) { return rio_mport_write_config_16(rdev->net->hport, rdev->destid, rdev->hopcount, offset, data); }; /** * rio_read_config_8 - Read 8 bits from configuration space * @rdev: RIO device * @offset: Offset into device configuration space * @data: Pointer to read data into * * Reads 8 bits of data from the specified offset within the * RIO device's configuration space. */ static inline int rio_read_config_8(struct rio_dev *rdev, u32 offset, u8 * data) { return rio_mport_read_config_8(rdev->net->hport, rdev->destid, rdev->hopcount, offset, data); }; /** * rio_write_config_8 - Write 8 bits to configuration space * @rdev: RIO device * @offset: Offset into device configuration space * @data: Data to be written * * Writes 8 bits of data to the specified offset within the * RIO device's configuration space. */ static inline int rio_write_config_8(struct rio_dev *rdev, u32 offset, u8 data) { return rio_mport_write_config_8(rdev->net->hport, rdev->destid, rdev->hopcount, offset, data); }; extern int rio_mport_send_doorbell(struct rio_mport *mport, u16 destid, u16 data); /** * rio_send_doorbell - Send a doorbell message to a device * @rdev: RIO device * @data: Doorbell message data * * Send a doorbell message to a RIO device. The doorbell message * has a 16-bit info field provided by the @data argument. */ static inline int rio_send_doorbell(struct rio_dev *rdev, u16 data) { return rio_mport_send_doorbell(rdev->net->hport, rdev->destid, data); }; /** * rio_init_mbox_res - Initialize a RIO mailbox resource * @res: resource struct * @start: start of mailbox range * @end: end of mailbox range * * This function is used to initialize the fields of a resource * for use as a mailbox resource. It initializes a range of * mailboxes using the start and end arguments. */ static inline void rio_init_mbox_res(struct resource *res, int start, int end) { memset(res, 0, sizeof(struct resource)); res->start = start; res->end = end; res->flags = RIO_RESOURCE_MAILBOX; } /** * rio_init_dbell_res - Initialize a RIO doorbell resource * @res: resource struct * @start: start of doorbell range * @end: end of doorbell range * * This function is used to initialize the fields of a resource * for use as a doorbell resource. It initializes a range of * doorbell messages using the start and end arguments. */ static inline void rio_init_dbell_res(struct resource *res, u16 start, u16 end) { memset(res, 0, sizeof(struct resource)); res->start = start; res->end = end; res->flags = RIO_RESOURCE_DOORBELL; } /** * RIO_DEVICE - macro used to describe a specific RIO device * @dev: the 16 bit RIO device ID * @ven: the 16 bit RIO vendor ID * * This macro is used to create a struct rio_device_id that matches a * specific device. The assembly vendor and assembly device fields * will be set to %RIO_ANY_ID. */ #define RIO_DEVICE(dev,ven) \ .did = (dev), .vid = (ven), \ .asm_did = RIO_ANY_ID, .asm_vid = RIO_ANY_ID /* Mailbox management */ extern int rio_request_outb_mbox(struct rio_mport *, void *, int, int, void (*)(struct rio_mport *, void *,int, int)); extern int rio_release_outb_mbox(struct rio_mport *, int); /** * rio_add_outb_message - Add RIO message to an outbound mailbox queue * @mport: RIO master port containing the outbound queue * @rdev: RIO device the message is be sent to * @mbox: The outbound mailbox queue * @buffer: Pointer to the message buffer * @len: Length of the message buffer * * Adds a RIO message buffer to an outbound mailbox queue for * transmission. Returns 0 on success. */ static inline int rio_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox, void *buffer, size_t len) { return mport->ops->add_outb_message(mport, rdev, mbox, buffer, len); } extern int rio_request_inb_mbox(struct rio_mport *, void *, int, int, void (*)(struct rio_mport *, void *, int, int)); extern int rio_release_inb_mbox(struct rio_mport *, int); /** * rio_add_inb_buffer - Add buffer to an inbound mailbox queue * @mport: Master port containing the inbound mailbox * @mbox: The inbound mailbox number * @buffer: Pointer to the message buffer * * Adds a buffer to an inbound mailbox queue for reception. Returns * 0 on success. */ static inline int rio_add_inb_buffer(struct rio_mport *mport, int mbox, void *buffer) { return mport->ops->add_inb_buffer(mport, mbox, buffer); } /** * rio_get_inb_message - Get A RIO message from an inbound mailbox queue * @mport: Master port containing the inbound mailbox * @mbox: The inbound mailbox number * * Get a RIO message from an inbound mailbox queue. Returns 0 on success. */ static inline void *rio_get_inb_message(struct rio_mport *mport, int mbox) { return mport->ops->get_inb_message(mport, mbox); } /* Doorbell management */ extern int rio_request_inb_dbell(struct rio_mport *, void *, u16, u16, void (*)(struct rio_mport *, void *, u16, u16, u16)); extern int rio_release_inb_dbell(struct rio_mport *, u16, u16); extern struct resource *rio_request_outb_dbell(struct rio_dev *, u16, u16); extern int rio_release_outb_dbell(struct rio_dev *, struct resource *); /* Memory region management */ int rio_claim_resource(struct rio_dev *, int); int rio_request_regions(struct rio_dev *, char *); void rio_release_regions(struct rio_dev *); int rio_request_region(struct rio_dev *, int, char *); void rio_release_region(struct rio_dev *, int); /* Memory mapping functions */ extern int rio_map_inb_region(struct rio_mport *mport, dma_addr_t local, u64 rbase, u32 size, u32 rflags); extern void rio_unmap_inb_region(struct rio_mport *mport, dma_addr_t lstart); extern int rio_map_outb_region(struct rio_mport *mport, u16 destid, u64 rbase, u32 size, u32 rflags, dma_addr_t *local); extern void rio_unmap_outb_region(struct rio_mport *mport, u16 destid, u64 rstart); /* Port-Write management */ extern int rio_request_inb_pwrite(struct rio_dev *, int (*)(struct rio_dev *, union rio_pw_msg*, int)); extern int rio_release_inb_pwrite(struct rio_dev *); extern int rio_add_mport_pw_handler(struct rio_mport *mport, void *dev_id, int (*pwcback)(struct rio_mport *mport, void *dev_id, union rio_pw_msg *msg, int step)); extern int rio_del_mport_pw_handler(struct rio_mport *mport, void *dev_id, int (*pwcback)(struct rio_mport *mport, void *dev_id, union rio_pw_msg *msg, int step)); extern int rio_inb_pwrite_handler(struct rio_mport *mport, union rio_pw_msg *pw_msg); extern void rio_pw_enable(struct rio_mport *mport, int enable); /* LDM support */ int rio_register_driver(struct rio_driver *); void rio_unregister_driver(struct rio_driver *); struct rio_dev *rio_dev_get(struct rio_dev *); void rio_dev_put(struct rio_dev *); #ifdef CONFIG_RAPIDIO_DMA_ENGINE extern struct dma_chan *rio_request_dma(struct rio_dev *rdev); extern struct dma_chan *rio_request_mport_dma(struct rio_mport *mport); extern void rio_release_dma(struct dma_chan *dchan); extern struct dma_async_tx_descriptor *rio_dma_prep_slave_sg( struct rio_dev *rdev, struct dma_chan *dchan, struct rio_dma_data *data, enum dma_transfer_direction direction, unsigned long flags); extern struct dma_async_tx_descriptor *rio_dma_prep_xfer( struct dma_chan *dchan, u16 destid, struct rio_dma_data *data, enum dma_transfer_direction direction, unsigned long flags); #endif /** * rio_name - Get the unique RIO device identifier * @rdev: RIO device * * Get the unique RIO device identifier. Returns the device * identifier string. */ static inline const char *rio_name(struct rio_dev *rdev) { return dev_name(&rdev->dev); } /** * rio_get_drvdata - Get RIO driver specific data * @rdev: RIO device * * Get RIO driver specific data. Returns a pointer to the * driver specific data. */ static inline void *rio_get_drvdata(struct rio_dev *rdev) { return dev_get_drvdata(&rdev->dev); } /** * rio_set_drvdata - Set RIO driver specific data * @rdev: RIO device * @data: Pointer to driver specific data * * Set RIO driver specific data. device struct driver data pointer * is set to the @data argument. */ static inline void rio_set_drvdata(struct rio_dev *rdev, void *data) { dev_set_drvdata(&rdev->dev, data); } /* Misc driver helpers */ extern u16 rio_local_get_device_id(struct rio_mport *port); extern void rio_local_set_device_id(struct rio_mport *port, u16 did); extern struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from); extern struct rio_dev *rio_get_asm(u16 vid, u16 did, u16 asm_vid, u16 asm_did, struct rio_dev *from); extern int rio_init_mports(void); #endif /* LINUX_RIO_DRV_H */ iommu.h 0000644 00000076101 14722070374 0006056 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. * Author: Joerg Roedel <joerg.roedel@amd.com> */ #ifndef __LINUX_IOMMU_H #define __LINUX_IOMMU_H #include <linux/scatterlist.h> #include <linux/device.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/of.h> #include <uapi/linux/iommu.h> #define IOMMU_READ (1 << 0) #define IOMMU_WRITE (1 << 1) #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ #define IOMMU_NOEXEC (1 << 3) #define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */ /* * Where the bus hardware includes a privilege level as part of its access type * markings, and certain devices are capable of issuing transactions marked as * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other * given permission flags only apply to accesses at the higher privilege level, * and that unprivileged transactions should have as little access as possible. * This would usually imply the same permissions as kernel mappings on the CPU, * if the IOMMU page table format is equivalent. */ #define IOMMU_PRIV (1 << 5) /* * Non-coherent masters on few Qualcomm SoCs can use this page protection flag * to set correct cacheability attributes to use an outer level of cache - * last level cache, aka system cache. */ #define IOMMU_QCOM_SYS_CACHE (1 << 6) struct iommu_ops; struct iommu_group; struct bus_type; struct device; struct iommu_domain; struct notifier_block; struct iommu_sva; struct iommu_fault_event; /* iommu fault flags */ #define IOMMU_FAULT_READ 0x0 #define IOMMU_FAULT_WRITE 0x1 typedef int (*iommu_fault_handler_t)(struct iommu_domain *, struct device *, unsigned long, int, void *); typedef int (*iommu_mm_exit_handler_t)(struct device *dev, struct iommu_sva *, void *); typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *); struct iommu_domain_geometry { dma_addr_t aperture_start; /* First address that can be mapped */ dma_addr_t aperture_end; /* Last address that can be mapped */ bool force_aperture; /* DMA only allowed in mappable range? */ }; /* Domain feature flags */ #define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */ #define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API implementation */ #define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */ /* * This are the possible domain-types * * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate * devices * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used * for VMs * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations. * This flag allows IOMMU drivers to implement * certain optimizations for these domains */ #define IOMMU_DOMAIN_BLOCKED (0U) #define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT) #define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING) #define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \ __IOMMU_DOMAIN_DMA_API) struct iommu_domain { unsigned type; const struct iommu_ops *ops; unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ iommu_fault_handler_t handler; void *handler_token; struct iommu_domain_geometry geometry; void *iova_cookie; }; enum iommu_cap { IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA transactions */ IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */ IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */ }; /* * Following constraints are specifc to FSL_PAMUV1: * -aperture must be power of 2, and naturally aligned * -number of windows must be power of 2, and address space size * of each window is determined by aperture size / # of windows * -the actual size of the mapped region of a window must be power * of 2 starting with 4KB and physical address must be naturally * aligned. * DOMAIN_ATTR_FSL_PAMUV1 corresponds to the above mentioned contraints. * The caller can invoke iommu_domain_get_attr to check if the underlying * iommu implementation supports these constraints. */ enum iommu_attr { DOMAIN_ATTR_GEOMETRY, DOMAIN_ATTR_PAGING, DOMAIN_ATTR_WINDOWS, DOMAIN_ATTR_FSL_PAMU_STASH, DOMAIN_ATTR_FSL_PAMU_ENABLE, DOMAIN_ATTR_FSL_PAMUV1, DOMAIN_ATTR_NESTING, /* two stages of translation */ DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, DOMAIN_ATTR_MAX, }; /* These are the possible reserved region types */ enum iommu_resv_type { /* Memory regions which must be mapped 1:1 at all times */ IOMMU_RESV_DIRECT, /* * Memory regions which are advertised to be 1:1 but are * commonly considered relaxable in some conditions, * for instance in device assignment use case (USB, Graphics) */ IOMMU_RESV_DIRECT_RELAXABLE, /* Arbitrary "never map this or give it to a device" address ranges */ IOMMU_RESV_RESERVED, /* Hardware MSI region (untranslated) */ IOMMU_RESV_MSI, /* Software-managed MSI translation window */ IOMMU_RESV_SW_MSI, }; /** * struct iommu_resv_region - descriptor for a reserved memory region * @list: Linked list pointers * @start: System physical start address of the region * @length: Length of the region in bytes * @prot: IOMMU Protection flags (READ/WRITE/...) * @type: Type of the reserved region */ struct iommu_resv_region { struct list_head list; phys_addr_t start; size_t length; int prot; enum iommu_resv_type type; }; /* Per device IOMMU features */ enum iommu_dev_features { IOMMU_DEV_FEAT_AUX, /* Aux-domain feature */ IOMMU_DEV_FEAT_SVA, /* Shared Virtual Addresses */ }; #define IOMMU_PASID_INVALID (-1U) /** * struct iommu_sva_ops - device driver callbacks for an SVA context * * @mm_exit: called when the mm is about to be torn down by exit_mmap. After * @mm_exit returns, the device must not issue any more transaction * with the PASID given as argument. * * The @mm_exit handler is allowed to sleep. Be careful about the * locks taken in @mm_exit, because they might lead to deadlocks if * they are also held when dropping references to the mm. Consider the * following call chain: * mutex_lock(A); mmput(mm) -> exit_mm() -> @mm_exit() -> mutex_lock(A) * Using mmput_async() prevents this scenario. * */ struct iommu_sva_ops { iommu_mm_exit_handler_t mm_exit; }; #ifdef CONFIG_IOMMU_API /** * struct iommu_iotlb_gather - Range information for a pending IOTLB flush * * @start: IOVA representing the start of the range to be flushed * @end: IOVA representing the end of the range to be flushed (exclusive) * @pgsize: The interval at which to perform the flush * * This structure is intended to be updated by multiple calls to the * ->unmap() function in struct iommu_ops before eventually being passed * into ->iotlb_sync(). */ struct iommu_iotlb_gather { unsigned long start; unsigned long end; size_t pgsize; }; /** * struct iommu_ops - iommu ops and capabilities * @capable: check capability * @domain_alloc: allocate iommu domain * @domain_free: free iommu domain * @attach_dev: attach device to an iommu domain * @detach_dev: detach device from an iommu domain * @map: map a physically contiguous memory region to an iommu domain * @unmap: unmap a physically contiguous memory region from an iommu domain * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain * @iotlb_sync_map: Sync mappings created recently using @map to the hardware * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush * queue * @iova_to_phys: translate iova to physical address * @add_device: add device to iommu grouping * @remove_device: remove device from iommu grouping * @device_group: find iommu group for a particular device * @domain_get_attr: Query domain attributes * @domain_set_attr: Change domain attributes * @get_resv_regions: Request list of reserved regions for a device * @put_resv_regions: Free list of reserved regions for a device * @apply_resv_region: Temporary helper call-back for iova reserved ranges * @domain_window_enable: Configure and enable a particular window for a domain * @domain_window_disable: Disable a particular window for a domain * @of_xlate: add OF master IDs to iommu grouping * @is_attach_deferred: Check if domain attach should be deferred from iommu * driver init to device driver init (default no) * @dev_has/enable/disable_feat: per device entries to check/enable/disable * iommu specific features. * @dev_feat_enabled: check enabled feature * @aux_attach/detach_dev: aux-domain specific attach/detach entries. * @aux_get_pasid: get the pasid given an aux-domain * @sva_bind: Bind process address space to device * @sva_unbind: Unbind process address space from device * @sva_get_pasid: Get PASID associated to a SVA handle * @page_response: handle page request response * @pgsize_bitmap: bitmap of all possible supported page sizes */ struct iommu_ops { bool (*capable)(enum iommu_cap); /* Domain allocation and freeing by the iommu driver */ struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type); void (*domain_free)(struct iommu_domain *); int (*attach_dev)(struct iommu_domain *domain, struct device *dev); void (*detach_dev)(struct iommu_domain *domain, struct device *dev); int (*map)(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot, gfp_t gfp); size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, size_t size, struct iommu_iotlb_gather *iotlb_gather); void (*flush_iotlb_all)(struct iommu_domain *domain); void (*iotlb_sync_map)(struct iommu_domain *domain); void (*iotlb_sync)(struct iommu_domain *domain, struct iommu_iotlb_gather *iotlb_gather); phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); int (*add_device)(struct device *dev); void (*remove_device)(struct device *dev); struct iommu_group *(*device_group)(struct device *dev); int (*domain_get_attr)(struct iommu_domain *domain, enum iommu_attr attr, void *data); int (*domain_set_attr)(struct iommu_domain *domain, enum iommu_attr attr, void *data); /* Request/Free a list of reserved regions for a device */ void (*get_resv_regions)(struct device *dev, struct list_head *list); void (*put_resv_regions)(struct device *dev, struct list_head *list); void (*apply_resv_region)(struct device *dev, struct iommu_domain *domain, struct iommu_resv_region *region); /* Window handling functions */ int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr, phys_addr_t paddr, u64 size, int prot); void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr); int (*of_xlate)(struct device *dev, struct of_phandle_args *args); bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev); /* Per device IOMMU features */ bool (*dev_has_feat)(struct device *dev, enum iommu_dev_features f); bool (*dev_feat_enabled)(struct device *dev, enum iommu_dev_features f); int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f); int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f); /* Aux-domain specific attach/detach entries */ int (*aux_attach_dev)(struct iommu_domain *domain, struct device *dev); void (*aux_detach_dev)(struct iommu_domain *domain, struct device *dev); int (*aux_get_pasid)(struct iommu_domain *domain, struct device *dev); struct iommu_sva *(*sva_bind)(struct device *dev, struct mm_struct *mm, void *drvdata); void (*sva_unbind)(struct iommu_sva *handle); int (*sva_get_pasid)(struct iommu_sva *handle); int (*page_response)(struct device *dev, struct iommu_fault_event *evt, struct iommu_page_response *msg); unsigned long pgsize_bitmap; }; /** * struct iommu_device - IOMMU core representation of one IOMMU hardware * instance * @list: Used by the iommu-core to keep a list of registered iommus * @ops: iommu-ops for talking to this iommu * @dev: struct device for sysfs handling */ struct iommu_device { struct list_head list; const struct iommu_ops *ops; struct fwnode_handle *fwnode; struct device *dev; }; /** * struct iommu_fault_event - Generic fault event * * Can represent recoverable faults such as a page requests or * unrecoverable faults such as DMA or IRQ remapping faults. * * @fault: fault descriptor * @list: pending fault event list, used for tracking responses */ struct iommu_fault_event { struct iommu_fault fault; struct list_head list; }; /** * struct iommu_fault_param - per-device IOMMU fault data * @handler: Callback function to handle IOMMU faults at device level * @data: handler private data * @faults: holds the pending faults which needs response * @lock: protect pending faults list */ struct iommu_fault_param { iommu_dev_fault_handler_t handler; void *data; struct list_head faults; struct mutex lock; }; /** * struct iommu_param - collection of per-device IOMMU data * * @fault_param: IOMMU detected device fault reporting data * * TODO: migrate other per device data pointers under iommu_dev_data, e.g. * struct iommu_group *iommu_group; * struct iommu_fwspec *iommu_fwspec; */ struct iommu_param { struct mutex lock; struct iommu_fault_param *fault_param; }; int iommu_device_register(struct iommu_device *iommu); void iommu_device_unregister(struct iommu_device *iommu); int iommu_device_sysfs_add(struct iommu_device *iommu, struct device *parent, const struct attribute_group **groups, const char *fmt, ...) __printf(4, 5); void iommu_device_sysfs_remove(struct iommu_device *iommu); int iommu_device_link(struct iommu_device *iommu, struct device *link); void iommu_device_unlink(struct iommu_device *iommu, struct device *link); static inline void iommu_device_set_ops(struct iommu_device *iommu, const struct iommu_ops *ops) { iommu->ops = ops; } static inline void iommu_device_set_fwnode(struct iommu_device *iommu, struct fwnode_handle *fwnode) { iommu->fwnode = fwnode; } static inline struct iommu_device *dev_to_iommu_device(struct device *dev) { return (struct iommu_device *)dev_get_drvdata(dev); } static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) { *gather = (struct iommu_iotlb_gather) { .start = ULONG_MAX, }; } #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */ #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */ #define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */ #define IOMMU_GROUP_NOTIFY_BOUND_DRIVER 4 /* Post Driver bind */ #define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER 5 /* Pre Driver unbind */ #define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6 /* Post Driver unbind */ extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops); extern bool iommu_present(struct bus_type *bus); extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap); extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus); extern struct iommu_group *iommu_group_get_by_id(int id); extern void iommu_domain_free(struct iommu_domain *domain); extern int iommu_attach_device(struct iommu_domain *domain, struct device *dev); extern void iommu_detach_device(struct iommu_domain *domain, struct device *dev); extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev); extern struct iommu_domain *iommu_get_dma_domain(struct device *dev); extern int iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot); extern int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot); extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size); extern size_t iommu_unmap_fast(struct iommu_domain *domain, unsigned long iova, size_t size, struct iommu_iotlb_gather *iotlb_gather); extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg,unsigned int nents, int prot); extern size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg, unsigned int nents, int prot); extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); extern void iommu_set_fault_handler(struct iommu_domain *domain, iommu_fault_handler_t handler, void *token); extern void iommu_get_resv_regions(struct device *dev, struct list_head *list); extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); extern int iommu_request_dm_for_dev(struct device *dev); extern int iommu_request_dma_domain_for_dev(struct device *dev); extern void iommu_set_default_passthrough(bool cmd_line); extern void iommu_set_default_translated(bool cmd_line); extern bool iommu_default_passthrough(void); extern struct iommu_resv_region * iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, enum iommu_resv_type type); extern int iommu_get_group_resv_regions(struct iommu_group *group, struct list_head *head); extern int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group); extern void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group); extern struct iommu_group *iommu_group_alloc(void); extern void *iommu_group_get_iommudata(struct iommu_group *group); extern void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data, void (*release)(void *iommu_data)); extern int iommu_group_set_name(struct iommu_group *group, const char *name); extern int iommu_group_add_device(struct iommu_group *group, struct device *dev); extern void iommu_group_remove_device(struct device *dev); extern int iommu_group_for_each_dev(struct iommu_group *group, void *data, int (*fn)(struct device *, void *)); extern struct iommu_group *iommu_group_get(struct device *dev); extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group); extern void iommu_group_put(struct iommu_group *group); extern int iommu_group_register_notifier(struct iommu_group *group, struct notifier_block *nb); extern int iommu_group_unregister_notifier(struct iommu_group *group, struct notifier_block *nb); extern int iommu_register_device_fault_handler(struct device *dev, iommu_dev_fault_handler_t handler, void *data); extern int iommu_unregister_device_fault_handler(struct device *dev); extern int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt); extern int iommu_page_response(struct device *dev, struct iommu_page_response *msg); extern int iommu_group_id(struct iommu_group *group); extern struct iommu_group *iommu_group_get_for_dev(struct device *dev); extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *); extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr, void *data); extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr, void *data); /* Window handling function prototypes */ extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, phys_addr_t offset, u64 size, int prot); extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr); extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags); static inline void iommu_flush_tlb_all(struct iommu_domain *domain) { if (domain->ops->flush_iotlb_all) domain->ops->flush_iotlb_all(domain); } static inline void iommu_tlb_sync(struct iommu_domain *domain, struct iommu_iotlb_gather *iotlb_gather) { if (domain->ops->iotlb_sync) domain->ops->iotlb_sync(domain, iotlb_gather); iommu_iotlb_gather_init(iotlb_gather); } static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, struct iommu_iotlb_gather *gather, unsigned long iova, size_t size) { unsigned long start = iova, end = start + size; /* * If the new page is disjoint from the current range or is mapped at * a different granularity, then sync the TLB so that the gather * structure can be rewritten. */ if (gather->pgsize != size || end < gather->start || start > gather->end) { if (gather->pgsize) iommu_tlb_sync(domain, gather); gather->pgsize = size; } if (gather->end < end) gather->end = end; if (gather->start > start) gather->start = start; } /* PCI device grouping function */ extern struct iommu_group *pci_device_group(struct device *dev); /* Generic device grouping function */ extern struct iommu_group *generic_device_group(struct device *dev); /* FSL-MC device grouping function */ struct iommu_group *fsl_mc_device_group(struct device *dev); /** * struct iommu_fwspec - per-device IOMMU instance data * @ops: ops for this device's IOMMU * @iommu_fwnode: firmware handle for this device's IOMMU * @iommu_priv: IOMMU driver private data for this device * @num_ids: number of associated device IDs * @ids: IDs which this device may present to the IOMMU */ struct iommu_fwspec { const struct iommu_ops *ops; struct fwnode_handle *iommu_fwnode; void *iommu_priv; u32 flags; unsigned int num_ids; u32 ids[1]; }; /* ATS is supported */ #define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0) /** * struct iommu_sva - handle to a device-mm bond */ struct iommu_sva { struct device *dev; const struct iommu_sva_ops *ops; }; int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, const struct iommu_ops *ops); void iommu_fwspec_free(struct device *dev); int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids); const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode); static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) { return dev->iommu_fwspec; } static inline void dev_iommu_fwspec_set(struct device *dev, struct iommu_fwspec *fwspec) { dev->iommu_fwspec = fwspec; } int iommu_probe_device(struct device *dev); void iommu_release_device(struct device *dev); bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features f); int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f); int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f); bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features f); int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev); void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev); int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev); struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata); void iommu_sva_unbind_device(struct iommu_sva *handle); int iommu_sva_set_ops(struct iommu_sva *handle, const struct iommu_sva_ops *ops); int iommu_sva_get_pasid(struct iommu_sva *handle); #else /* CONFIG_IOMMU_API */ struct iommu_ops {}; struct iommu_group {}; struct iommu_fwspec {}; struct iommu_device {}; struct iommu_fault_param {}; struct iommu_iotlb_gather {}; static inline bool iommu_present(struct bus_type *bus) { return false; } static inline bool iommu_capable(struct bus_type *bus, enum iommu_cap cap) { return false; } static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) { return NULL; } static inline struct iommu_group *iommu_group_get_by_id(int id) { return NULL; } static inline void iommu_domain_free(struct iommu_domain *domain) { } static inline int iommu_attach_device(struct iommu_domain *domain, struct device *dev) { return -ENODEV; } static inline void iommu_detach_device(struct iommu_domain *domain, struct device *dev) { } static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) { return NULL; } static inline int iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot) { return -ENODEV; } static inline int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot) { return -ENODEV; } static inline size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) { return 0; } static inline size_t iommu_unmap_fast(struct iommu_domain *domain, unsigned long iova, int gfp_order, struct iommu_iotlb_gather *iotlb_gather) { return 0; } static inline size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg, unsigned int nents, int prot) { return 0; } static inline size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg, unsigned int nents, int prot) { return 0; } static inline void iommu_flush_tlb_all(struct iommu_domain *domain) { } static inline void iommu_tlb_sync(struct iommu_domain *domain, struct iommu_iotlb_gather *iotlb_gather) { } static inline int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, phys_addr_t paddr, u64 size, int prot) { return -ENODEV; } static inline void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr) { } static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) { return 0; } static inline void iommu_set_fault_handler(struct iommu_domain *domain, iommu_fault_handler_t handler, void *token) { } static inline void iommu_get_resv_regions(struct device *dev, struct list_head *list) { } static inline void iommu_put_resv_regions(struct device *dev, struct list_head *list) { } static inline int iommu_get_group_resv_regions(struct iommu_group *group, struct list_head *head) { return -ENODEV; } static inline int iommu_request_dm_for_dev(struct device *dev) { return -ENODEV; } static inline int iommu_request_dma_domain_for_dev(struct device *dev) { return -ENODEV; } static inline void iommu_set_default_passthrough(bool cmd_line) { } static inline void iommu_set_default_translated(bool cmd_line) { } static inline bool iommu_default_passthrough(void) { return true; } static inline int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) { return -ENODEV; } static inline void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) { } static inline struct iommu_group *iommu_group_alloc(void) { return ERR_PTR(-ENODEV); } static inline void *iommu_group_get_iommudata(struct iommu_group *group) { return NULL; } static inline void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data, void (*release)(void *iommu_data)) { } static inline int iommu_group_set_name(struct iommu_group *group, const char *name) { return -ENODEV; } static inline int iommu_group_add_device(struct iommu_group *group, struct device *dev) { return -ENODEV; } static inline void iommu_group_remove_device(struct device *dev) { } static inline int iommu_group_for_each_dev(struct iommu_group *group, void *data, int (*fn)(struct device *, void *)) { return -ENODEV; } static inline struct iommu_group *iommu_group_get(struct device *dev) { return NULL; } static inline void iommu_group_put(struct iommu_group *group) { } static inline int iommu_group_register_notifier(struct iommu_group *group, struct notifier_block *nb) { return -ENODEV; } static inline int iommu_group_unregister_notifier(struct iommu_group *group, struct notifier_block *nb) { return 0; } static inline int iommu_register_device_fault_handler(struct device *dev, iommu_dev_fault_handler_t handler, void *data) { return -ENODEV; } static inline int iommu_unregister_device_fault_handler(struct device *dev) { return 0; } static inline int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) { return -ENODEV; } static inline int iommu_page_response(struct device *dev, struct iommu_page_response *msg) { return -ENODEV; } static inline int iommu_group_id(struct iommu_group *group) { return -ENODEV; } static inline int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr attr, void *data) { return -EINVAL; } static inline int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr attr, void *data) { return -EINVAL; } static inline int iommu_device_register(struct iommu_device *iommu) { return -ENODEV; } static inline void iommu_device_set_ops(struct iommu_device *iommu, const struct iommu_ops *ops) { } static inline void iommu_device_set_fwnode(struct iommu_device *iommu, struct fwnode_handle *fwnode) { } static inline struct iommu_device *dev_to_iommu_device(struct device *dev) { return NULL; } static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) { } static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, struct iommu_iotlb_gather *gather, unsigned long iova, size_t size) { } static inline void iommu_device_unregister(struct iommu_device *iommu) { } static inline int iommu_device_sysfs_add(struct iommu_device *iommu, struct device *parent, const struct attribute_group **groups, const char *fmt, ...) { return -ENODEV; } static inline void iommu_device_sysfs_remove(struct iommu_device *iommu) { } static inline int iommu_device_link(struct device *dev, struct device *link) { return -EINVAL; } static inline void iommu_device_unlink(struct device *dev, struct device *link) { } static inline int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, const struct iommu_ops *ops) { return -ENODEV; } static inline void iommu_fwspec_free(struct device *dev) { } static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids) { return -ENODEV; } static inline const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) { return NULL; } static inline bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat) { return false; } static inline bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat) { return false; } static inline int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) { return -ENODEV; } static inline int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) { return -ENODEV; } static inline int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev) { return -ENODEV; } static inline void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev) { } static inline int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev) { return -ENODEV; } static inline struct iommu_sva * iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata) { return ERR_PTR(-ENODEV); } static inline void iommu_sva_unbind_device(struct iommu_sva *handle) { } static inline int iommu_sva_set_ops(struct iommu_sva *handle, const struct iommu_sva_ops *ops) { return -EINVAL; } static inline int iommu_sva_get_pasid(struct iommu_sva *handle) { return IOMMU_PASID_INVALID; } #endif /* CONFIG_IOMMU_API */ #ifdef CONFIG_IOMMU_DEBUGFS extern struct dentry *iommu_debugfs_dir; void iommu_debugfs_setup(void); #else static inline void iommu_debugfs_setup(void) {} #endif #endif /* __LINUX_IOMMU_H */ gpio-pxa.h 0000644 00000001073 14722070374 0006450 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __GPIO_PXA_H #define __GPIO_PXA_H #define GPIO_bit(x) (1 << ((x) & 0x1f)) #define gpio_to_bank(gpio) ((gpio) >> 5) /* NOTE: some PXAs have fewer on-chip GPIOs (like PXA255, with 85). * Those cases currently cause holes in the GPIO number space, the * actual number of the last GPIO is recorded by 'pxa_last_gpio'. */ extern int pxa_last_gpio; extern int pxa_irq_to_gpio(int irq); struct pxa_gpio_platform_data { int irq_base; int (*gpio_set_wake)(unsigned int gpio, unsigned int on); }; #endif /* __GPIO_PXA_H */ if_team.h 0000644 00000017677 14722070374 0006351 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/if_team.h - Network team device driver header * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com> */ #ifndef _LINUX_IF_TEAM_H_ #define _LINUX_IF_TEAM_H_ #include <linux/netpoll.h> #include <net/sch_generic.h> #include <linux/types.h> #include <uapi/linux/if_team.h> struct team_pcpu_stats { u64 rx_packets; u64 rx_bytes; u64 rx_multicast; u64 tx_packets; u64 tx_bytes; struct u64_stats_sync syncp; u32 rx_dropped; u32 tx_dropped; u32 rx_nohandler; }; struct team; struct team_port { struct net_device *dev; struct hlist_node hlist; /* node in enabled ports hash list */ struct list_head list; /* node in ordinary list */ struct team *team; int index; /* index of enabled port. If disabled, it's set to -1 */ bool linkup; /* either state.linkup or user.linkup */ struct { bool linkup; u32 speed; u8 duplex; } state; /* Values set by userspace */ struct { bool linkup; bool linkup_enabled; } user; /* Custom gennetlink interface related flags */ bool changed; bool removed; /* * A place for storing original values of the device before it * become a port. */ struct { unsigned char dev_addr[MAX_ADDR_LEN]; unsigned int mtu; } orig; #ifdef CONFIG_NET_POLL_CONTROLLER struct netpoll *np; #endif s32 priority; /* lower number ~ higher priority */ u16 queue_id; struct list_head qom_list; /* node in queue override mapping list */ struct rcu_head rcu; long mode_priv[0]; }; static inline struct team_port *team_port_get_rcu(const struct net_device *dev) { return rcu_dereference(dev->rx_handler_data); } static inline bool team_port_enabled(struct team_port *port) { return port->index != -1; } static inline bool team_port_txable(struct team_port *port) { return port->linkup && team_port_enabled(port); } static inline bool team_port_dev_txable(const struct net_device *port_dev) { struct team_port *port; bool txable; rcu_read_lock(); port = team_port_get_rcu(port_dev); txable = port ? team_port_txable(port) : false; rcu_read_unlock(); return txable; } #ifdef CONFIG_NET_POLL_CONTROLLER static inline void team_netpoll_send_skb(struct team_port *port, struct sk_buff *skb) { struct netpoll *np = port->np; if (np) netpoll_send_skb(np, skb); } #else static inline void team_netpoll_send_skb(struct team_port *port, struct sk_buff *skb) { } #endif struct team_mode_ops { int (*init)(struct team *team); void (*exit)(struct team *team); rx_handler_result_t (*receive)(struct team *team, struct team_port *port, struct sk_buff *skb); bool (*transmit)(struct team *team, struct sk_buff *skb); int (*port_enter)(struct team *team, struct team_port *port); void (*port_leave)(struct team *team, struct team_port *port); void (*port_change_dev_addr)(struct team *team, struct team_port *port); void (*port_enabled)(struct team *team, struct team_port *port); void (*port_disabled)(struct team *team, struct team_port *port); }; extern int team_modeop_port_enter(struct team *team, struct team_port *port); extern void team_modeop_port_change_dev_addr(struct team *team, struct team_port *port); enum team_option_type { TEAM_OPTION_TYPE_U32, TEAM_OPTION_TYPE_STRING, TEAM_OPTION_TYPE_BINARY, TEAM_OPTION_TYPE_BOOL, TEAM_OPTION_TYPE_S32, }; struct team_option_inst_info { u32 array_index; struct team_port *port; /* != NULL if per-port */ }; struct team_gsetter_ctx { union { u32 u32_val; const char *str_val; struct { const void *ptr; u32 len; } bin_val; bool bool_val; s32 s32_val; } data; struct team_option_inst_info *info; }; struct team_option { struct list_head list; const char *name; bool per_port; unsigned int array_size; /* != 0 means the option is array */ enum team_option_type type; int (*init)(struct team *team, struct team_option_inst_info *info); int (*getter)(struct team *team, struct team_gsetter_ctx *ctx); int (*setter)(struct team *team, struct team_gsetter_ctx *ctx); }; extern void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info); extern void team_options_change_check(struct team *team); struct team_mode { const char *kind; struct module *owner; size_t priv_size; size_t port_priv_size; const struct team_mode_ops *ops; enum netdev_lag_tx_type lag_tx_type; }; #define TEAM_PORT_HASHBITS 4 #define TEAM_PORT_HASHENTRIES (1 << TEAM_PORT_HASHBITS) #define TEAM_MODE_PRIV_LONGS 4 #define TEAM_MODE_PRIV_SIZE (sizeof(long) * TEAM_MODE_PRIV_LONGS) struct team { struct net_device *dev; /* associated netdevice */ struct team_pcpu_stats __percpu *pcpu_stats; const struct header_ops *header_ops_cache; struct mutex lock; /* used for overall locking, e.g. port lists write */ /* * List of enabled ports and their count */ int en_port_count; struct hlist_head en_port_hlist[TEAM_PORT_HASHENTRIES]; struct list_head port_list; /* list of all ports */ struct list_head option_list; struct list_head option_inst_list; /* list of option instances */ const struct team_mode *mode; struct team_mode_ops ops; bool user_carrier_enabled; bool queue_override_enabled; struct list_head *qom_lists; /* array of queue override mapping lists */ bool port_mtu_change_allowed; bool notifier_ctx; struct { unsigned int count; unsigned int interval; /* in ms */ atomic_t count_pending; struct delayed_work dw; } notify_peers; struct { unsigned int count; unsigned int interval; /* in ms */ atomic_t count_pending; struct delayed_work dw; } mcast_rejoin; struct lock_class_key team_lock_key; long mode_priv[TEAM_MODE_PRIV_LONGS]; }; static inline int team_dev_queue_xmit(struct team *team, struct team_port *port, struct sk_buff *skb) { BUILD_BUG_ON(sizeof(skb->queue_mapping) != sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping)); skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping); skb->dev = port->dev; if (unlikely(netpoll_tx_running(team->dev))) { team_netpoll_send_skb(port, skb); return 0; } return dev_queue_xmit(skb); } static inline struct hlist_head *team_port_index_hash(struct team *team, int port_index) { return &team->en_port_hlist[port_index & (TEAM_PORT_HASHENTRIES - 1)]; } static inline struct team_port *team_get_port_by_index(struct team *team, int port_index) { struct team_port *port; struct hlist_head *head = team_port_index_hash(team, port_index); hlist_for_each_entry(port, head, hlist) if (port->index == port_index) return port; return NULL; } static inline int team_num_to_port_index(struct team *team, unsigned int num) { int en_port_count = READ_ONCE(team->en_port_count); if (unlikely(!en_port_count)) return 0; return num % en_port_count; } static inline struct team_port *team_get_port_by_index_rcu(struct team *team, int port_index) { struct team_port *port; struct hlist_head *head = team_port_index_hash(team, port_index); hlist_for_each_entry_rcu(port, head, hlist) if (port->index == port_index) return port; return NULL; } static inline struct team_port * team_get_first_port_txable_rcu(struct team *team, struct team_port *port) { struct team_port *cur; if (likely(team_port_txable(port))) return port; cur = port; list_for_each_entry_continue_rcu(cur, &team->port_list, list) if (team_port_txable(cur)) return cur; list_for_each_entry_rcu(cur, &team->port_list, list) { if (cur == port) break; if (team_port_txable(cur)) return cur; } return NULL; } extern int team_options_register(struct team *team, const struct team_option *option, size_t option_count); extern void team_options_unregister(struct team *team, const struct team_option *option, size_t option_count); extern int team_mode_register(const struct team_mode *mode); extern void team_mode_unregister(const struct team_mode *mode); #define TEAM_DEFAULT_NUM_TX_QUEUES 16 #define TEAM_DEFAULT_NUM_RX_QUEUES 16 #define MODULE_ALIAS_TEAM_MODE(kind) MODULE_ALIAS("team-mode-" kind) #endif /* _LINUX_IF_TEAM_H_ */ mroute.h 0000644 00000003716 14722070374 0006245 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_MROUTE_H #define __LINUX_MROUTE_H #include <linux/in.h> #include <linux/pim.h> #include <net/fib_rules.h> #include <net/fib_notifier.h> #include <uapi/linux/mroute.h> #include <linux/mroute_base.h> #ifdef CONFIG_IP_MROUTE static inline int ip_mroute_opt(int opt) { return opt >= MRT_BASE && opt <= MRT_MAX; } int ip_mroute_setsockopt(struct sock *, int, char __user *, unsigned int); int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *); int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg); int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg); int ip_mr_init(void); bool ipmr_rule_default(const struct fib_rule *rule); #else static inline int ip_mroute_setsockopt(struct sock *sock, int optname, char __user *optval, unsigned int optlen) { return -ENOPROTOOPT; } static inline int ip_mroute_getsockopt(struct sock *sock, int optname, char __user *optval, int __user *optlen) { return -ENOPROTOOPT; } static inline int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) { return -ENOIOCTLCMD; } static inline int ip_mr_init(void) { return 0; } static inline int ip_mroute_opt(int opt) { return 0; } static inline bool ipmr_rule_default(const struct fib_rule *rule) { return true; } #endif #define VIFF_STATIC 0x8000 struct mfc_cache_cmp_arg { __be32 mfc_mcastgrp; __be32 mfc_origin; }; /** * struct mfc_cache - multicast routing entries * @_c: Common multicast routing information; has to be first [for casting] * @mfc_mcastgrp: destination multicast group address * @mfc_origin: source address * @cmparg: used for rhashtable comparisons */ struct mfc_cache { struct mr_mfc _c; union { struct { __be32 mfc_mcastgrp; __be32 mfc_origin; }; struct mfc_cache_cmp_arg cmparg; }; }; struct rtmsg; int ipmr_get_route(struct net *net, struct sk_buff *skb, __be32 saddr, __be32 daddr, struct rtmsg *rtm, u32 portid); #endif mdev.h 0000644 00000012323 14722070374 0005657 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Mediated device definition * * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. * Author: Neo Jia <cjia@nvidia.com> * Kirti Wankhede <kwankhede@nvidia.com> */ #ifndef MDEV_H #define MDEV_H struct mdev_device; /* * Called by the parent device driver to set the device which represents * this mdev in iommu protection scope. By default, the iommu device is * NULL, that indicates using vendor defined isolation. * * @dev: the mediated device that iommu will isolate. * @iommu_device: a pci device which represents the iommu for @dev. * * Return 0 for success, otherwise negative error value. */ int mdev_set_iommu_device(struct device *dev, struct device *iommu_device); struct device *mdev_get_iommu_device(struct device *dev); /** * struct mdev_parent_ops - Structure to be registered for each parent device to * register the device to mdev module. * * @owner: The module owner. * @dev_attr_groups: Attributes of the parent device. * @mdev_attr_groups: Attributes of the mediated device. * @supported_type_groups: Attributes to define supported types. It is mandatory * to provide supported types. * @create: Called to allocate basic resources in parent device's * driver for a particular mediated device. It is * mandatory to provide create ops. * @kobj: kobject of type for which 'create' is called. * @mdev: mdev_device structure on of mediated device * that is being created * Returns integer: success (0) or error (< 0) * @remove: Called to free resources in parent device's driver for a * a mediated device. It is mandatory to provide 'remove' * ops. * @mdev: mdev_device device structure which is being * destroyed * Returns integer: success (0) or error (< 0) * @open: Open mediated device. * @mdev: mediated device. * Returns integer: success (0) or error (< 0) * @release: release mediated device * @mdev: mediated device. * @read: Read emulation callback * @mdev: mediated device structure * @buf: read buffer * @count: number of bytes to read * @ppos: address. * Retuns number on bytes read on success or error. * @write: Write emulation callback * @mdev: mediated device structure * @buf: write buffer * @count: number of bytes to be written * @ppos: address. * Retuns number on bytes written on success or error. * @ioctl: IOCTL callback * @mdev: mediated device structure * @cmd: ioctl command * @arg: arguments to ioctl * @mmap: mmap callback * @mdev: mediated device structure * @vma: vma structure * Parent device that support mediated device should be registered with mdev * module with mdev_parent_ops structure. **/ struct mdev_parent_ops { struct module *owner; const struct attribute_group **dev_attr_groups; const struct attribute_group **mdev_attr_groups; struct attribute_group **supported_type_groups; int (*create)(struct kobject *kobj, struct mdev_device *mdev); int (*remove)(struct mdev_device *mdev); int (*open)(struct mdev_device *mdev); void (*release)(struct mdev_device *mdev); ssize_t (*read)(struct mdev_device *mdev, char __user *buf, size_t count, loff_t *ppos); ssize_t (*write)(struct mdev_device *mdev, const char __user *buf, size_t count, loff_t *ppos); long (*ioctl)(struct mdev_device *mdev, unsigned int cmd, unsigned long arg); int (*mmap)(struct mdev_device *mdev, struct vm_area_struct *vma); }; /* interface for exporting mdev supported type attributes */ struct mdev_type_attribute { struct attribute attr; ssize_t (*show)(struct kobject *kobj, struct device *dev, char *buf); ssize_t (*store)(struct kobject *kobj, struct device *dev, const char *buf, size_t count); }; #define MDEV_TYPE_ATTR(_name, _mode, _show, _store) \ struct mdev_type_attribute mdev_type_attr_##_name = \ __ATTR(_name, _mode, _show, _store) #define MDEV_TYPE_ATTR_RW(_name) \ struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_RW(_name) #define MDEV_TYPE_ATTR_RO(_name) \ struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_RO(_name) #define MDEV_TYPE_ATTR_WO(_name) \ struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_WO(_name) /** * struct mdev_driver - Mediated device driver * @name: driver name * @probe: called when new device created * @remove: called when device removed * @driver: device driver structure * **/ struct mdev_driver { const char *name; int (*probe)(struct device *dev); void (*remove)(struct device *dev); struct device_driver driver; }; #define to_mdev_driver(drv) container_of(drv, struct mdev_driver, driver) void *mdev_get_drvdata(struct mdev_device *mdev); void mdev_set_drvdata(struct mdev_device *mdev, void *data); const guid_t *mdev_uuid(struct mdev_device *mdev); extern struct bus_type mdev_bus_type; int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops); void mdev_unregister_device(struct device *dev); int mdev_register_driver(struct mdev_driver *drv, struct module *owner); void mdev_unregister_driver(struct mdev_driver *drv); struct device *mdev_parent_dev(struct mdev_device *mdev); struct device *mdev_dev(struct mdev_device *mdev); struct mdev_device *mdev_from_dev(struct device *dev); #endif /* MDEV_H */ cmdline-parser.h 0000644 00000002326 14722070374 0007633 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Parsing command line, get the partitions information. * * Written by Cai Zhiyong <caizhiyong@huawei.com> * */ #ifndef CMDLINEPARSEH #define CMDLINEPARSEH #include <linux/blkdev.h> #include <linux/fs.h> #include <linux/slab.h> /* partition flags */ #define PF_RDONLY 0x01 /* Device is read only */ #define PF_POWERUP_LOCK 0x02 /* Always locked after reset */ struct cmdline_subpart { char name[BDEVNAME_SIZE]; /* partition name, such as 'rootfs' */ sector_t from; sector_t size; int flags; struct cmdline_subpart *next_subpart; }; struct cmdline_parts { char name[BDEVNAME_SIZE]; /* block device, such as 'mmcblk0' */ unsigned int nr_subparts; struct cmdline_subpart *subpart; struct cmdline_parts *next_parts; }; void cmdline_parts_free(struct cmdline_parts **parts); int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline); struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts, const char *bdev); int cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size, int slot, int (*add_part)(int, struct cmdline_subpart *, void *), void *param); #endif /* CMDLINEPARSEH */ numa.h 0000644 00000000444 14722070374 0005665 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_NUMA_H #define _LINUX_NUMA_H #ifdef CONFIG_NODES_SHIFT #define NODES_SHIFT CONFIG_NODES_SHIFT #else #define NODES_SHIFT 0 #endif #define MAX_NUMNODES (1 << NODES_SHIFT) #define NUMA_NO_NODE (-1) #endif /* _LINUX_NUMA_H */ delayed_call.h 0000644 00000001305 14722070374 0007324 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _DELAYED_CALL_H #define _DELAYED_CALL_H /* * Poor man's closures; I wish we could've done them sanely polymorphic, * but... */ struct delayed_call { void (*fn)(void *); void *arg; }; #define DEFINE_DELAYED_CALL(name) struct delayed_call name = {NULL, NULL} /* I really wish we had closures with sane typechecking... */ static inline void set_delayed_call(struct delayed_call *call, void (*fn)(void *), void *arg) { call->fn = fn; call->arg = arg; } static inline void do_delayed_call(struct delayed_call *call) { if (call->fn) call->fn(call->arg); } static inline void clear_delayed_call(struct delayed_call *call) { call->fn = NULL; } #endif pagewalk.h 0000644 00000004624 14722070374 0006524 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PAGEWALK_H #define _LINUX_PAGEWALK_H #include <linux/mm.h> struct mm_walk; /** * mm_walk_ops - callbacks for walk_page_range * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry * this handler should only handle pud_trans_huge() puds. * the pmd_entry or pte_entry callbacks will be used for * regular PUDs. * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry * this handler is required to be able to handle * pmd_trans_huge() pmds. They may simply choose to * split_huge_page() instead of handling it explicitly. * @pte_entry: if set, called for each non-empty PTE (4th-level) entry * @pte_hole: if set, called for each hole at all levels * @hugetlb_entry: if set, called for each hugetlb entry * @test_walk: caller specific callback function to determine whether * we walk over the current vma or not. Returning 0 means * "do page table walk over the current vma", returning * a negative value means "abort current page table walk * right now" and returning 1 means "skip the current vma" */ struct mm_walk_ops { int (*pud_entry)(pud_t *pud, unsigned long addr, unsigned long next, struct mm_walk *walk); int (*pmd_entry)(pmd_t *pmd, unsigned long addr, unsigned long next, struct mm_walk *walk); int (*pte_entry)(pte_t *pte, unsigned long addr, unsigned long next, struct mm_walk *walk); int (*pte_hole)(unsigned long addr, unsigned long next, struct mm_walk *walk); int (*hugetlb_entry)(pte_t *pte, unsigned long hmask, unsigned long addr, unsigned long next, struct mm_walk *walk); int (*test_walk)(unsigned long addr, unsigned long next, struct mm_walk *walk); }; /** * mm_walk - walk_page_range data * @ops: operation to call during the walk * @mm: mm_struct representing the target process of page table walk * @vma: vma currently walked (NULL if walking outside vmas) * @private: private data for callbacks' usage * * (see the comment on walk_page_range() for more details) */ struct mm_walk { const struct mm_walk_ops *ops; struct mm_struct *mm; struct vm_area_struct *vma; void *private; }; int walk_page_range(struct mm_struct *mm, unsigned long start, unsigned long end, const struct mm_walk_ops *ops, void *private); int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops, void *private); #endif /* _LINUX_PAGEWALK_H */ iova.h 0000644 00000016137 14722070374 0005671 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2006, Intel Corporation. * * Copyright (C) 2006-2008 Intel Corporation * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> */ #ifndef _IOVA_H_ #define _IOVA_H_ #include <linux/types.h> #include <linux/kernel.h> #include <linux/rbtree.h> #include <linux/atomic.h> #include <linux/dma-mapping.h> /* iova structure */ struct iova { struct rb_node node; unsigned long pfn_hi; /* Highest allocated pfn */ unsigned long pfn_lo; /* Lowest allocated pfn */ }; struct iova_magazine; struct iova_cpu_rcache; #define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */ #define MAX_GLOBAL_MAGS 32 /* magazines per bin */ struct iova_rcache { spinlock_t lock; unsigned long depot_size; struct iova_magazine *depot[MAX_GLOBAL_MAGS]; struct iova_cpu_rcache __percpu *cpu_rcaches; }; struct iova_domain; /* Call-Back from IOVA code into IOMMU drivers */ typedef void (* iova_flush_cb)(struct iova_domain *domain); /* Destructor for per-entry data */ typedef void (* iova_entry_dtor)(unsigned long data); /* Number of entries per Flush Queue */ #define IOVA_FQ_SIZE 256 /* Timeout (in ms) after which entries are flushed from the Flush-Queue */ #define IOVA_FQ_TIMEOUT 10 /* Flush Queue entry for defered flushing */ struct iova_fq_entry { unsigned long iova_pfn; unsigned long pages; unsigned long data; u64 counter; /* Flush counter when this entrie was added */ }; /* Per-CPU Flush Queue structure */ struct iova_fq { struct iova_fq_entry entries[IOVA_FQ_SIZE]; unsigned head, tail; spinlock_t lock; }; /* holds all the iova translations for a domain */ struct iova_domain { spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */ struct rb_root rbroot; /* iova domain rbtree root */ struct rb_node *cached_node; /* Save last alloced node */ struct rb_node *cached32_node; /* Save last 32-bit alloced node */ unsigned long granule; /* pfn granularity for this domain */ unsigned long start_pfn; /* Lower limit for this domain */ unsigned long dma_32bit_pfn; unsigned long max32_alloc_size; /* Size of last failed allocation */ struct iova_fq __percpu *fq; /* Flush Queue */ atomic64_t fq_flush_start_cnt; /* Number of TLB flushes that have been started */ atomic64_t fq_flush_finish_cnt; /* Number of TLB flushes that have been finished */ struct iova anchor; /* rbtree lookup anchor */ struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */ iova_flush_cb flush_cb; /* Call-Back function to flush IOMMU TLBs */ iova_entry_dtor entry_dtor; /* IOMMU driver specific destructor for iova entry */ struct timer_list fq_timer; /* Timer to regularily empty the flush-queues */ atomic_t fq_timer_on; /* 1 when timer is active, 0 when not */ }; static inline unsigned long iova_size(struct iova *iova) { return iova->pfn_hi - iova->pfn_lo + 1; } static inline unsigned long iova_shift(struct iova_domain *iovad) { return __ffs(iovad->granule); } static inline unsigned long iova_mask(struct iova_domain *iovad) { return iovad->granule - 1; } static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova) { return iova & iova_mask(iovad); } static inline size_t iova_align(struct iova_domain *iovad, size_t size) { return ALIGN(size, iovad->granule); } static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova) { return (dma_addr_t)iova->pfn_lo << iova_shift(iovad); } static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova) { return iova >> iova_shift(iovad); } #if IS_REACHABLE(CONFIG_IOMMU_IOVA) int iova_cache_get(void); void iova_cache_put(void); struct iova *alloc_iova_mem(void); void free_iova_mem(struct iova *iova); void free_iova(struct iova_domain *iovad, unsigned long pfn); void __free_iova(struct iova_domain *iovad, struct iova *iova); struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size, unsigned long limit_pfn, bool size_aligned); void free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size); void queue_iova(struct iova_domain *iovad, unsigned long pfn, unsigned long pages, unsigned long data); unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size, unsigned long limit_pfn, bool flush_rcache); struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, unsigned long pfn_hi); void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to); void init_iova_domain(struct iova_domain *iovad, unsigned long granule, unsigned long start_pfn); bool has_iova_flush_queue(struct iova_domain *iovad); int init_iova_flush_queue(struct iova_domain *iovad, iova_flush_cb flush_cb, iova_entry_dtor entry_dtor); struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); void put_iova_domain(struct iova_domain *iovad); struct iova *split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi); void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad); #else static inline int iova_cache_get(void) { return -ENOTSUPP; } static inline void iova_cache_put(void) { } static inline struct iova *alloc_iova_mem(void) { return NULL; } static inline void free_iova_mem(struct iova *iova) { } static inline void free_iova(struct iova_domain *iovad, unsigned long pfn) { } static inline void __free_iova(struct iova_domain *iovad, struct iova *iova) { } static inline struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size, unsigned long limit_pfn, bool size_aligned) { return NULL; } static inline void free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size) { } static inline void queue_iova(struct iova_domain *iovad, unsigned long pfn, unsigned long pages, unsigned long data) { } static inline unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size, unsigned long limit_pfn, bool flush_rcache) { return 0; } static inline struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, unsigned long pfn_hi) { return NULL; } static inline void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) { } static inline void init_iova_domain(struct iova_domain *iovad, unsigned long granule, unsigned long start_pfn) { } static inline bool has_iova_flush_queue(struct iova_domain *iovad) { return false; } static inline int init_iova_flush_queue(struct iova_domain *iovad, iova_flush_cb flush_cb, iova_entry_dtor entry_dtor) { return -ENODEV; } static inline struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) { return NULL; } static inline void put_iova_domain(struct iova_domain *iovad) { } static inline struct iova *split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi) { return NULL; } static inline void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad) { } #endif #endif mmu_context.h 0000644 00000000572 14722070374 0007271 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MMU_CONTEXT_H #define _LINUX_MMU_CONTEXT_H #include <asm/mmu_context.h> struct mm_struct; void use_mm(struct mm_struct *mm); void unuse_mm(struct mm_struct *mm); /* Architectures that care about IRQ state in switch_mm can override this. */ #ifndef switch_mm_irqs_off # define switch_mm_irqs_off switch_mm #endif #endif errseq.h 0000644 00000000576 14722070374 0006234 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * See Documentation/core-api/errseq.rst and lib/errseq.c */ #ifndef _LINUX_ERRSEQ_H #define _LINUX_ERRSEQ_H typedef u32 errseq_t; errseq_t errseq_set(errseq_t *eseq, int err); errseq_t errseq_sample(errseq_t *eseq); int errseq_check(errseq_t *eseq, errseq_t since); int errseq_check_and_advance(errseq_t *eseq, errseq_t *since); #endif libfdt_env.h 0000644 00000000753 14722070374 0007044 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef LIBFDT_ENV_H #define LIBFDT_ENV_H #include <linux/kernel.h> /* For INT_MAX */ #include <linux/string.h> #include <asm/byteorder.h> #define INT32_MAX S32_MAX #define UINT32_MAX U32_MAX typedef __be16 fdt16_t; typedef __be32 fdt32_t; typedef __be64 fdt64_t; #define fdt32_to_cpu(x) be32_to_cpu(x) #define cpu_to_fdt32(x) cpu_to_be32(x) #define fdt64_to_cpu(x) be64_to_cpu(x) #define cpu_to_fdt64(x) cpu_to_be64(x) #endif /* LIBFDT_ENV_H */ shdma-base.h 0000644 00000010423 14722070374 0006727 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 * * Dmaengine driver base library for DMA controllers, found on SH-based SoCs * * extracted from shdma.c and headers * * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de> * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. */ #ifndef SHDMA_BASE_H #define SHDMA_BASE_H #include <linux/dmaengine.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/types.h> /** * shdma_pm_state - DMA channel PM state * SHDMA_PM_ESTABLISHED: either idle or during data transfer * SHDMA_PM_BUSY: during the transfer preparation, when we have to * drop the lock temporarily * SHDMA_PM_PENDING: transfers pending */ enum shdma_pm_state { SHDMA_PM_ESTABLISHED, SHDMA_PM_BUSY, SHDMA_PM_PENDING, }; struct device; /* * Drivers, using this library are expected to embed struct shdma_dev, * struct shdma_chan, struct shdma_desc, and struct shdma_slave * in their respective device, channel, descriptor and slave objects. */ struct shdma_slave { int slave_id; }; struct shdma_desc { struct list_head node; struct dma_async_tx_descriptor async_tx; enum dma_transfer_direction direction; size_t partial; dma_cookie_t cookie; int chunks; int mark; bool cyclic; /* used as cyclic transfer */ }; struct shdma_chan { spinlock_t chan_lock; /* Channel operation lock */ struct list_head ld_queue; /* Link descriptors queue */ struct list_head ld_free; /* Free link descriptors */ struct dma_chan dma_chan; /* DMA channel */ struct device *dev; /* Channel device */ void *desc; /* buffer for descriptor array */ int desc_num; /* desc count */ size_t max_xfer_len; /* max transfer length */ int id; /* Raw id of this channel */ int irq; /* Channel IRQ */ int slave_id; /* Client ID for slave DMA */ int real_slave_id; /* argument passed to filter function */ int hw_req; /* DMA request line for slave DMA - same * as MID/RID, used with DT */ enum shdma_pm_state pm_state; }; /** * struct shdma_ops - simple DMA driver operations * desc_completed: return true, if this is the descriptor, that just has * completed (atomic) * halt_channel: stop DMA channel operation (atomic) * channel_busy: return true, if the channel is busy (atomic) * slave_addr: return slave DMA address * desc_setup: set up the hardware specific descriptor portion (atomic) * set_slave: bind channel to a slave * setup_xfer: configure channel hardware for operation (atomic) * start_xfer: start the DMA transfer (atomic) * embedded_desc: return Nth struct shdma_desc pointer from the * descriptor array * chan_irq: process channel IRQ, return true if a transfer has * completed (atomic) */ struct shdma_ops { bool (*desc_completed)(struct shdma_chan *, struct shdma_desc *); void (*halt_channel)(struct shdma_chan *); bool (*channel_busy)(struct shdma_chan *); dma_addr_t (*slave_addr)(struct shdma_chan *); int (*desc_setup)(struct shdma_chan *, struct shdma_desc *, dma_addr_t, dma_addr_t, size_t *); int (*set_slave)(struct shdma_chan *, int, dma_addr_t, bool); void (*setup_xfer)(struct shdma_chan *, int); void (*start_xfer)(struct shdma_chan *, struct shdma_desc *); struct shdma_desc *(*embedded_desc)(void *, int); bool (*chan_irq)(struct shdma_chan *, int); size_t (*get_partial)(struct shdma_chan *, struct shdma_desc *); }; struct shdma_dev { struct dma_device dma_dev; struct shdma_chan **schan; const struct shdma_ops *ops; size_t desc_size; }; #define shdma_for_each_chan(c, d, i) for (i = 0, c = (d)->schan[0]; \ i < (d)->dma_dev.chancnt; c = (d)->schan[++i]) int shdma_request_irq(struct shdma_chan *, int, unsigned long, const char *); bool shdma_reset(struct shdma_dev *sdev); void shdma_chan_probe(struct shdma_dev *sdev, struct shdma_chan *schan, int id); void shdma_chan_remove(struct shdma_chan *schan); int shdma_init(struct device *dev, struct shdma_dev *sdev, int chan_num); void shdma_cleanup(struct shdma_dev *sdev); #if IS_ENABLED(CONFIG_SH_DMAE_BASE) bool shdma_chan_filter(struct dma_chan *chan, void *arg); #else static inline bool shdma_chan_filter(struct dma_chan *chan, void *arg) { return false; } #endif #endif vexpress.h 0000644 00000002075 14722070374 0006606 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * * Copyright (C) 2012 ARM Limited */ #ifndef _LINUX_VEXPRESS_H #define _LINUX_VEXPRESS_H #include <linux/device.h> #include <linux/regmap.h> #define VEXPRESS_SITE_MB 0 #define VEXPRESS_SITE_DB1 1 #define VEXPRESS_SITE_DB2 2 #define VEXPRESS_SITE_MASTER 0xf /* Config infrastructure */ void vexpress_config_set_master(u32 site); u32 vexpress_config_get_master(void); void vexpress_config_lock(void *arg); void vexpress_config_unlock(void *arg); int vexpress_config_get_topo(struct device_node *node, u32 *site, u32 *position, u32 *dcc); /* Config bridge API */ struct vexpress_config_bridge_ops { struct regmap * (*regmap_init)(struct device *dev, void *context); void (*regmap_exit)(struct regmap *regmap, void *context); }; struct device *vexpress_config_bridge_register(struct device *parent, struct vexpress_config_bridge_ops *ops, void *context); /* Config regmap API */ struct regmap *devm_regmap_init_vexpress_config(struct device *dev); /* Platform control */ void vexpress_flags_set(u32 data); #endif kmemleak.h 0000644 00000006425 14722070374 0006520 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * include/linux/kmemleak.h * * Copyright (C) 2008 ARM Limited * Written by Catalin Marinas <catalin.marinas@arm.com> */ #ifndef __KMEMLEAK_H #define __KMEMLEAK_H #include <linux/slab.h> #include <linux/vmalloc.h> #ifdef CONFIG_DEBUG_KMEMLEAK extern void kmemleak_init(void) __init; extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, gfp_t gfp) __ref; extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size, gfp_t gfp) __ref; extern void kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp) __ref; extern void kmemleak_free(const void *ptr) __ref; extern void kmemleak_free_part(const void *ptr, size_t size) __ref; extern void kmemleak_free_percpu(const void __percpu *ptr) __ref; extern void kmemleak_update_trace(const void *ptr) __ref; extern void kmemleak_not_leak(const void *ptr) __ref; extern void kmemleak_ignore(const void *ptr) __ref; extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref; extern void kmemleak_no_scan(const void *ptr) __ref; extern void kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count, gfp_t gfp) __ref; extern void kmemleak_free_part_phys(phys_addr_t phys, size_t size) __ref; extern void kmemleak_not_leak_phys(phys_addr_t phys) __ref; extern void kmemleak_ignore_phys(phys_addr_t phys) __ref; static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, int min_count, slab_flags_t flags, gfp_t gfp) { if (!(flags & SLAB_NOLEAKTRACE)) kmemleak_alloc(ptr, size, min_count, gfp); } static inline void kmemleak_free_recursive(const void *ptr, slab_flags_t flags) { if (!(flags & SLAB_NOLEAKTRACE)) kmemleak_free(ptr); } static inline void kmemleak_erase(void **ptr) { *ptr = NULL; } #else static inline void kmemleak_init(void) { } static inline void kmemleak_alloc(const void *ptr, size_t size, int min_count, gfp_t gfp) { } static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, int min_count, slab_flags_t flags, gfp_t gfp) { } static inline void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size, gfp_t gfp) { } static inline void kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp) { } static inline void kmemleak_free(const void *ptr) { } static inline void kmemleak_free_part(const void *ptr, size_t size) { } static inline void kmemleak_free_recursive(const void *ptr, slab_flags_t flags) { } static inline void kmemleak_free_percpu(const void __percpu *ptr) { } static inline void kmemleak_update_trace(const void *ptr) { } static inline void kmemleak_not_leak(const void *ptr) { } static inline void kmemleak_ignore(const void *ptr) { } static inline void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) { } static inline void kmemleak_erase(void **ptr) { } static inline void kmemleak_no_scan(const void *ptr) { } static inline void kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count, gfp_t gfp) { } static inline void kmemleak_free_part_phys(phys_addr_t phys, size_t size) { } static inline void kmemleak_not_leak_phys(phys_addr_t phys) { } static inline void kmemleak_ignore_phys(phys_addr_t phys) { } #endif /* CONFIG_DEBUG_KMEMLEAK */ #endif /* __KMEMLEAK_H */ dma-buf.h 0000644 00000035113 14722070374 0006241 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Header file for dma buffer sharing framework. * * Copyright(C) 2011 Linaro Limited. All rights reserved. * Author: Sumit Semwal <sumit.semwal@ti.com> * * Many thanks to linaro-mm-sig list, and specially * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and * Daniel Vetter <daniel@ffwll.ch> for their support in creation and * refining of this idea. */ #ifndef __DMA_BUF_H__ #define __DMA_BUF_H__ #include <linux/file.h> #include <linux/err.h> #include <linux/scatterlist.h> #include <linux/list.h> #include <linux/dma-mapping.h> #include <linux/fs.h> #include <linux/dma-fence.h> #include <linux/wait.h> struct device; struct dma_buf; struct dma_buf_attachment; /** * struct dma_buf_ops - operations possible on struct dma_buf * @vmap: [optional] creates a virtual mapping for the buffer into kernel * address space. Same restrictions as for vmap and friends apply. * @vunmap: [optional] unmaps a vmap from the buffer */ struct dma_buf_ops { /** * @cache_sgt_mapping: * * If true the framework will cache the first mapping made for each * attachment. This avoids creating mappings for attachments multiple * times. */ bool cache_sgt_mapping; /** * @attach: * * This is called from dma_buf_attach() to make sure that a given * &dma_buf_attachment.dev can access the provided &dma_buf. Exporters * which support buffer objects in special locations like VRAM or * device-specific carveout areas should check whether the buffer could * be move to system memory (or directly accessed by the provided * device), and otherwise need to fail the attach operation. * * The exporter should also in general check whether the current * allocation fullfills the DMA constraints of the new device. If this * is not the case, and the allocation cannot be moved, it should also * fail the attach operation. * * Any exporter-private housekeeping data can be stored in the * &dma_buf_attachment.priv pointer. * * This callback is optional. * * Returns: * * 0 on success, negative error code on failure. It might return -EBUSY * to signal that backing storage is already allocated and incompatible * with the requirements of requesting device. */ int (*attach)(struct dma_buf *, struct dma_buf_attachment *); /** * @detach: * * This is called by dma_buf_detach() to release a &dma_buf_attachment. * Provided so that exporters can clean up any housekeeping for an * &dma_buf_attachment. * * This callback is optional. */ void (*detach)(struct dma_buf *, struct dma_buf_attachment *); /** * @map_dma_buf: * * This is called by dma_buf_map_attachment() and is used to map a * shared &dma_buf into device address space, and it is mandatory. It * can only be called if @attach has been called successfully. This * essentially pins the DMA buffer into place, and it cannot be moved * any more * * This call may sleep, e.g. when the backing storage first needs to be * allocated, or moved to a location suitable for all currently attached * devices. * * Note that any specific buffer attributes required for this function * should get added to device_dma_parameters accessible via * &device.dma_params from the &dma_buf_attachment. The @attach callback * should also check these constraints. * * If this is being called for the first time, the exporter can now * choose to scan through the list of attachments for this buffer, * collate the requirements of the attached devices, and choose an * appropriate backing storage for the buffer. * * Based on enum dma_data_direction, it might be possible to have * multiple users accessing at the same time (for reading, maybe), or * any other kind of sharing that the exporter might wish to make * available to buffer-users. * * Returns: * * A &sg_table scatter list of or the backing storage of the DMA buffer, * already mapped into the device address space of the &device attached * with the provided &dma_buf_attachment. * * On failure, returns a negative error value wrapped into a pointer. * May also return -EINTR when a signal was received while being * blocked. */ struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *, enum dma_data_direction); /** * @unmap_dma_buf: * * This is called by dma_buf_unmap_attachment() and should unmap and * release the &sg_table allocated in @map_dma_buf, and it is mandatory. * It should also unpin the backing storage if this is the last mapping * of the DMA buffer, it the exporter supports backing storage * migration. */ void (*unmap_dma_buf)(struct dma_buf_attachment *, struct sg_table *, enum dma_data_direction); /* TODO: Add try_map_dma_buf version, to return immed with -EBUSY * if the call would block. */ /** * @release: * * Called after the last dma_buf_put to release the &dma_buf, and * mandatory. */ void (*release)(struct dma_buf *); /** * @begin_cpu_access: * * This is called from dma_buf_begin_cpu_access() and allows the * exporter to ensure that the memory is actually available for cpu * access - the exporter might need to allocate or swap-in and pin the * backing storage. The exporter also needs to ensure that cpu access is * coherent for the access direction. The direction can be used by the * exporter to optimize the cache flushing, i.e. access with a different * direction (read instead of write) might return stale or even bogus * data (e.g. when the exporter needs to copy the data to temporary * storage). * * This callback is optional. * * FIXME: This is both called through the DMA_BUF_IOCTL_SYNC command * from userspace (where storage shouldn't be pinned to avoid handing * de-factor mlock rights to userspace) and for the kernel-internal * users of the various kmap interfaces, where the backing storage must * be pinned to guarantee that the atomic kmap calls can succeed. Since * there's no in-kernel users of the kmap interfaces yet this isn't a * real problem. * * Returns: * * 0 on success or a negative error code on failure. This can for * example fail when the backing storage can't be allocated. Can also * return -ERESTARTSYS or -EINTR when the call has been interrupted and * needs to be restarted. */ int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction); /** * @end_cpu_access: * * This is called from dma_buf_end_cpu_access() when the importer is * done accessing the CPU. The exporter can use this to flush caches and * unpin any resources pinned in @begin_cpu_access. * The result of any dma_buf kmap calls after end_cpu_access is * undefined. * * This callback is optional. * * Returns: * * 0 on success or a negative error code on failure. Can return * -ERESTARTSYS or -EINTR when the call has been interrupted and needs * to be restarted. */ int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); /** * @mmap: * * This callback is used by the dma_buf_mmap() function * * Note that the mapping needs to be incoherent, userspace is expected * to braket CPU access using the DMA_BUF_IOCTL_SYNC interface. * * Because dma-buf buffers have invariant size over their lifetime, the * dma-buf core checks whether a vma is too large and rejects such * mappings. The exporter hence does not need to duplicate this check. * Drivers do not need to check this themselves. * * If an exporter needs to manually flush caches and hence needs to fake * coherency for mmap support, it needs to be able to zap all the ptes * pointing at the backing storage. Now linux mm needs a struct * address_space associated with the struct file stored in vma->vm_file * to do that with the function unmap_mapping_range. But the dma_buf * framework only backs every dma_buf fd with the anon_file struct file, * i.e. all dma_bufs share the same file. * * Hence exporters need to setup their own file (and address_space) * association by setting vma->vm_file and adjusting vma->vm_pgoff in * the dma_buf mmap callback. In the specific case of a gem driver the * exporter could use the shmem file already provided by gem (and set * vm_pgoff = 0). Exporters can then zap ptes by unmapping the * corresponding range of the struct address_space associated with their * own file. * * This callback is optional. * * Returns: * * 0 on success or a negative error code on failure. */ int (*mmap)(struct dma_buf *, struct vm_area_struct *vma); /** * @map: * * Maps a page from the buffer into kernel address space. The page is * specified by offset into the buffer in PAGE_SIZE units. * * This callback is optional. * * Returns: * * Virtual address pointer where requested page can be accessed. NULL * on error or when this function is unimplemented by the exporter. */ void *(*map)(struct dma_buf *, unsigned long); /** * @unmap: * * Unmaps a page from the buffer. Page offset and address pointer should * be the same as the one passed to and returned by matching call to map. * * This callback is optional. */ void (*unmap)(struct dma_buf *, unsigned long, void *); void *(*vmap)(struct dma_buf *); void (*vunmap)(struct dma_buf *, void *vaddr); }; /** * struct dma_buf - shared buffer object * @size: size of the buffer * @file: file pointer used for sharing buffers across, and for refcounting. * @attachments: list of dma_buf_attachment that denotes all devices attached. * @ops: dma_buf_ops associated with this buffer object. * @lock: used internally to serialize list manipulation, attach/detach and * vmap/unmap, and accesses to name * @vmapping_counter: used internally to refcnt the vmaps * @vmap_ptr: the current vmap ptr if vmapping_counter > 0 * @exp_name: name of the exporter; useful for debugging. * @name: userspace-provided name; useful for accounting and debugging. * @owner: pointer to exporter module; used for refcounting when exporter is a * kernel module. * @list_node: node for dma_buf accounting and debugging. * @priv: exporter specific private data for this buffer object. * @resv: reservation object linked to this dma-buf * @poll: for userspace poll support * @cb_excl: for userspace poll support * @cb_shared: for userspace poll support * * This represents a shared buffer, created by calling dma_buf_export(). The * userspace representation is a normal file descriptor, which can be created by * calling dma_buf_fd(). * * Shared dma buffers are reference counted using dma_buf_put() and * get_dma_buf(). * * Device DMA access is handled by the separate &struct dma_buf_attachment. */ struct dma_buf { size_t size; struct file *file; struct list_head attachments; const struct dma_buf_ops *ops; struct mutex lock; unsigned vmapping_counter; void *vmap_ptr; const char *exp_name; const char *name; spinlock_t name_lock; /* spinlock to protect name access */ struct module *owner; struct list_head list_node; void *priv; struct dma_resv *resv; /* poll support */ wait_queue_head_t poll; struct dma_buf_poll_cb_t { struct dma_fence_cb cb; wait_queue_head_t *poll; __poll_t active; } cb_excl, cb_shared; }; /** * struct dma_buf_attachment - holds device-buffer attachment data * @dmabuf: buffer for this attachment. * @dev: device attached to the buffer. * @node: list of dma_buf_attachment. * @sgt: cached mapping. * @dir: direction of cached mapping. * @priv: exporter specific attachment data. * * This structure holds the attachment information between the dma_buf buffer * and its user device(s). The list contains one attachment struct per device * attached to the buffer. * * An attachment is created by calling dma_buf_attach(), and released again by * calling dma_buf_detach(). The DMA mapping itself needed to initiate a * transfer is created by dma_buf_map_attachment() and freed again by calling * dma_buf_unmap_attachment(). */ struct dma_buf_attachment { struct dma_buf *dmabuf; struct device *dev; struct list_head node; struct sg_table *sgt; enum dma_data_direction dir; void *priv; }; /** * struct dma_buf_export_info - holds information needed to export a dma_buf * @exp_name: name of the exporter - useful for debugging. * @owner: pointer to exporter module - used for refcounting kernel module * @ops: Attach allocator-defined dma buf ops to the new buffer * @size: Size of the buffer * @flags: mode flags for the file * @resv: reservation-object, NULL to allocate default one * @priv: Attach private data of allocator to this buffer * * This structure holds the information required to export the buffer. Used * with dma_buf_export() only. */ struct dma_buf_export_info { const char *exp_name; struct module *owner; const struct dma_buf_ops *ops; size_t size; int flags; struct dma_resv *resv; void *priv; }; /** * DEFINE_DMA_BUF_EXPORT_INFO - helper macro for exporters * @name: export-info name * * DEFINE_DMA_BUF_EXPORT_INFO macro defines the &struct dma_buf_export_info, * zeroes it out and pre-populates exp_name in it. */ #define DEFINE_DMA_BUF_EXPORT_INFO(name) \ struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \ .owner = THIS_MODULE } /** * get_dma_buf - convenience wrapper for get_file. * @dmabuf: [in] pointer to dma_buf * * Increments the reference count on the dma-buf, needed in case of drivers * that either need to create additional references to the dmabuf on the * kernel side. For example, an exporter that needs to keep a dmabuf ptr * so that subsequent exports don't create a new dmabuf. */ static inline void get_dma_buf(struct dma_buf *dmabuf) { get_file(dmabuf->file); } struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, struct device *dev); void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *dmabuf_attach); struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info); int dma_buf_fd(struct dma_buf *dmabuf, int flags); struct dma_buf *dma_buf_get(int fd); void dma_buf_put(struct dma_buf *dmabuf); struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *, enum dma_data_direction); void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *, enum dma_data_direction); int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction dir); int dma_buf_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction dir); void *dma_buf_kmap(struct dma_buf *, unsigned long); void dma_buf_kunmap(struct dma_buf *, unsigned long, void *); int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, unsigned long); void *dma_buf_vmap(struct dma_buf *); void dma_buf_vunmap(struct dma_buf *, void *vaddr); #endif /* __DMA_BUF_H__ */ smpboot.h 0000644 00000003275 14722070374 0006415 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SMPBOOT_H #define _LINUX_SMPBOOT_H #include <linux/types.h> struct task_struct; /* Cookie handed to the thread_fn*/ struct smpboot_thread_data; /** * struct smp_hotplug_thread - CPU hotplug related thread descriptor * @store: Pointer to per cpu storage for the task pointers * @list: List head for core management * @thread_should_run: Check whether the thread should run or not. Called with * preemption disabled. * @thread_fn: The associated thread function * @create: Optional setup function, called when the thread gets * created (Not called from the thread context) * @setup: Optional setup function, called when the thread gets * operational the first time * @cleanup: Optional cleanup function, called when the thread * should stop (module exit) * @park: Optional park function, called when the thread is * parked (cpu offline) * @unpark: Optional unpark function, called when the thread is * unparked (cpu online) * @selfparking: Thread is not parked by the park function. * @thread_comm: The base name of the thread */ struct smp_hotplug_thread { struct task_struct * __percpu *store; struct list_head list; int (*thread_should_run)(unsigned int cpu); void (*thread_fn)(unsigned int cpu); void (*create)(unsigned int cpu); void (*setup)(unsigned int cpu); void (*cleanup)(unsigned int cpu, bool online); void (*park)(unsigned int cpu); void (*unpark)(unsigned int cpu); bool selfparking; const char *thread_comm; }; int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread); void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread); #endif mdio-gpio.h 0000644 00000000261 14722070374 0006606 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_MDIO_GPIO_H #define __LINUX_MDIO_GPIO_H #define MDIO_GPIO_MDC 0 #define MDIO_GPIO_MDIO 1 #define MDIO_GPIO_MDO 2 #endif n_r3964.h 0000644 00000010073 14722070374 0006030 0 ustar 00 /* r3964 linediscipline for linux * * ----------------------------------------------------------- * Copyright by * Philips Automation Projects * Kassel (Germany) * ----------------------------------------------------------- * This software may be used and distributed according to the terms of * the GNU General Public License, incorporated herein by reference. * * Author: * L. Haag * * $Log: r3964.h,v $ * Revision 1.4 2005/12/21 19:54:24 Kurt Huwig <kurt huwig de> * Fixed HZ usage on 2.6 kernels * Removed unnecessary include * * Revision 1.3 2001/03/18 13:02:24 dwmw2 * Fix timer usage, use spinlocks properly. * * Revision 1.2 2001/03/18 12:53:15 dwmw2 * Merge changes in 2.4.2 * * Revision 1.1.1.1 1998/10/13 16:43:14 dwmw2 * This'll screw the version control * * Revision 1.6 1998/09/30 00:40:38 dwmw2 * Updated to use kernel's N_R3964 if available * * Revision 1.4 1998/04/02 20:29:44 lhaag * select, blocking, ... * * Revision 1.3 1998/02/12 18:58:43 root * fixed some memory leaks * calculation of checksum characters * * Revision 1.2 1998/02/07 13:03:17 root * ioctl read_telegram * * Revision 1.1 1998/02/06 19:19:43 root * Initial revision * * */ #ifndef __LINUX_N_R3964_H__ #define __LINUX_N_R3964_H__ #include <linux/param.h> #include <uapi/linux/n_r3964.h> /* * Common ascii handshake characters: */ #define STX 0x02 #define ETX 0x03 #define DLE 0x10 #define NAK 0x15 /* * Timeouts (from milliseconds to jiffies) */ #define R3964_TO_QVZ ((550)*HZ/1000) #define R3964_TO_ZVZ ((220)*HZ/1000) #define R3964_TO_NO_BUF ((400)*HZ/1000) #define R3964_NO_TX_ROOM ((100)*HZ/1000) #define R3964_TO_RX_PANIC ((4000)*HZ/1000) #define R3964_MAX_RETRIES 5 enum { R3964_IDLE, R3964_TX_REQUEST, R3964_TRANSMITTING, R3964_WAIT_ZVZ_BEFORE_TX_RETRY, R3964_WAIT_FOR_TX_ACK, R3964_WAIT_FOR_RX_BUF, R3964_RECEIVING, R3964_WAIT_FOR_BCC, R3964_WAIT_FOR_RX_REPEAT }; /* * All open file-handles are 'clients' and are stored in a linked list: */ struct r3964_message; struct r3964_client_info { spinlock_t lock; struct pid *pid; unsigned int sig_flags; struct r3964_client_info *next; struct r3964_message *first_msg; struct r3964_message *last_msg; struct r3964_block_header *next_block_to_read; int msg_count; }; struct r3964_block_header; /* internal version of client_message: */ struct r3964_message { int msg_id; int arg; int error_code; struct r3964_block_header *block; struct r3964_message *next; }; /* * Header of received block in rx_buf/tx_buf: */ struct r3964_block_header { unsigned int length; /* length in chars without header */ unsigned char *data; /* usually data is located immediately behind this struct */ unsigned int locks; /* only used in rx_buffer */ struct r3964_block_header *next; struct r3964_client_info *owner; /* =NULL in rx_buffer */ }; /* * If rx_buf hasn't enough space to store R3964_MTU chars, * we will reject all incoming STX-requests by sending NAK. */ #define RX_BUF_SIZE 4000 #define TX_BUF_SIZE 4000 #define R3964_MAX_BLOCKS_IN_RX_QUEUE 100 #define R3964_PARITY 0x0001 #define R3964_FRAME 0x0002 #define R3964_OVERRUN 0x0004 #define R3964_UNKNOWN 0x0008 #define R3964_BREAK 0x0010 #define R3964_CHECKSUM 0x0020 #define R3964_ERROR 0x003f #define R3964_BCC 0x4000 #define R3964_DEBUG 0x8000 struct r3964_info { spinlock_t lock; struct tty_struct *tty; unsigned char priority; unsigned char *rx_buf; /* ring buffer */ unsigned char *tx_buf; struct r3964_block_header *rx_first; struct r3964_block_header *rx_last; struct r3964_block_header *tx_first; struct r3964_block_header *tx_last; unsigned int tx_position; unsigned int rx_position; unsigned char last_rx; unsigned char bcc; unsigned int blocks_in_rx_queue; struct mutex read_lock; /* serialize r3964_read */ struct r3964_client_info *firstClient; unsigned int state; unsigned int flags; struct timer_list tmr; int nRetry; }; #endif memremap.h 0000644 00000012555 14722070374 0006536 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MEMREMAP_H_ #define _LINUX_MEMREMAP_H_ #include <linux/ioport.h> #include <linux/percpu-refcount.h> struct resource; struct device; /** * struct vmem_altmap - pre-allocated storage for vmemmap_populate * @base_pfn: base of the entire dev_pagemap mapping * @reserve: pages mapped, but reserved for driver use (relative to @base) * @free: free pages set aside in the mapping for memmap storage * @align: pages reserved to meet allocation alignments * @alloc: track pages consumed, private to vmemmap_populate() */ struct vmem_altmap { const unsigned long base_pfn; const unsigned long end_pfn; const unsigned long reserve; unsigned long free; unsigned long align; unsigned long alloc; }; /* * Specialize ZONE_DEVICE memory into multiple types each having differents * usage. * * MEMORY_DEVICE_PRIVATE: * Device memory that is not directly addressable by the CPU: CPU can neither * read nor write private memory. In this case, we do still have struct pages * backing the device memory. Doing so simplifies the implementation, but it is * important to remember that there are certain points at which the struct page * must be treated as an opaque object, rather than a "normal" struct page. * * A more complete discussion of unaddressable memory may be found in * include/linux/hmm.h and Documentation/vm/hmm.rst. * * MEMORY_DEVICE_FS_DAX: * Host memory that has similar access semantics as System RAM i.e. DMA * coherent and supports page pinning. In support of coordinating page * pinning vs other operations MEMORY_DEVICE_FS_DAX arranges for a * wakeup event whenever a page is unpinned and becomes idle. This * wakeup is used to coordinate physical address space management (ex: * fs truncate/hole punch) vs pinned pages (ex: device dma). * * MEMORY_DEVICE_DEVDAX: * Host memory that has similar access semantics as System RAM i.e. DMA * coherent and supports page pinning. In contrast to * MEMORY_DEVICE_FS_DAX, this memory is access via a device-dax * character device. * * MEMORY_DEVICE_PCI_P2PDMA: * Device memory residing in a PCI BAR intended for use with Peer-to-Peer * transactions. */ enum memory_type { /* 0 is reserved to catch uninitialized type fields */ MEMORY_DEVICE_PRIVATE = 1, MEMORY_DEVICE_FS_DAX, MEMORY_DEVICE_DEVDAX, MEMORY_DEVICE_PCI_P2PDMA, }; struct dev_pagemap_ops { /* * Called once the page refcount reaches 1. (ZONE_DEVICE pages never * reach 0 refcount unless there is a refcount bug. This allows the * device driver to implement its own memory management.) */ void (*page_free)(struct page *page); /* * Transition the refcount in struct dev_pagemap to the dead state. */ void (*kill)(struct dev_pagemap *pgmap); /* * Wait for refcount in struct dev_pagemap to be idle and reap it. */ void (*cleanup)(struct dev_pagemap *pgmap); /* * Used for private (un-addressable) device memory only. Must migrate * the page back to a CPU accessible page. */ vm_fault_t (*migrate_to_ram)(struct vm_fault *vmf); }; #define PGMAP_ALTMAP_VALID (1 << 0) /** * struct dev_pagemap - metadata for ZONE_DEVICE mappings * @altmap: pre-allocated/reserved memory for vmemmap allocations * @res: physical address range covered by @ref * @ref: reference count that pins the devm_memremap_pages() mapping * @internal_ref: internal reference if @ref is not provided by the caller * @done: completion for @internal_ref * @dev: host device of the mapping for debug * @data: private data pointer for page_free() * @type: memory type: see MEMORY_* in memory_hotplug.h * @flags: PGMAP_* flags to specify defailed behavior * @ops: method table */ struct dev_pagemap { struct vmem_altmap altmap; struct resource res; struct percpu_ref *ref; struct percpu_ref internal_ref; struct completion done; enum memory_type type; unsigned int flags; const struct dev_pagemap_ops *ops; }; static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap) { if (pgmap->flags & PGMAP_ALTMAP_VALID) return &pgmap->altmap; return NULL; } #ifdef CONFIG_ZONE_DEVICE void *memremap_pages(struct dev_pagemap *pgmap, int nid); void memunmap_pages(struct dev_pagemap *pgmap); void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap); void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap); struct dev_pagemap *get_dev_pagemap(unsigned long pfn, struct dev_pagemap *pgmap); unsigned long vmem_altmap_offset(struct vmem_altmap *altmap); void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns); #else static inline void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) { /* * Fail attempts to call devm_memremap_pages() without * ZONE_DEVICE support enabled, this requires callers to fall * back to plain devm_memremap() based on config */ WARN_ON_ONCE(1); return ERR_PTR(-ENXIO); } static inline void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap) { } static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn, struct dev_pagemap *pgmap) { return NULL; } static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap) { return 0; } static inline void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns) { } #endif /* CONFIG_ZONE_DEVICE */ static inline void put_dev_pagemap(struct dev_pagemap *pgmap) { if (pgmap) percpu_ref_put(pgmap->ref); } #endif /* _LINUX_MEMREMAP_H_ */ percpu-refcount.h 0000644 00000024727 14722070374 0010060 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Percpu refcounts: * (C) 2012 Google, Inc. * Author: Kent Overstreet <koverstreet@google.com> * * This implements a refcount with similar semantics to atomic_t - atomic_inc(), * atomic_dec_and_test() - but percpu. * * There's one important difference between percpu refs and normal atomic_t * refcounts; you have to keep track of your initial refcount, and then when you * start shutting down you call percpu_ref_kill() _before_ dropping the initial * refcount. * * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less * than an atomic_t - this is because of the way shutdown works, see * percpu_ref_kill()/PERCPU_COUNT_BIAS. * * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill() * puts the ref back in single atomic_t mode, collecting the per cpu refs and * issuing the appropriate barriers, and then marks the ref as shutting down so * that percpu_ref_put() will check for the ref hitting 0. After it returns, * it's safe to drop the initial ref. * * USAGE: * * See fs/aio.c for some example usage; it's used there for struct kioctx, which * is created when userspaces calls io_setup(), and destroyed when userspace * calls io_destroy() or the process exits. * * In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it * removes the kioctx from the proccess's table of kioctxs and kills percpu_ref. * After that, there can't be any new users of the kioctx (from lookup_ioctx()) * and it's then safe to drop the initial ref with percpu_ref_put(). * * Note that the free path, free_ioctx(), needs to go through explicit call_rcu() * to synchronize with RCU protected lookup_ioctx(). percpu_ref operations don't * imply RCU grace periods of any kind and if a user wants to combine percpu_ref * with RCU protection, it must be done explicitly. * * Code that does a two stage shutdown like this often needs some kind of * explicit synchronization to ensure the initial refcount can only be dropped * once - percpu_ref_kill() does this for you, it returns true once and false if * someone else already called it. The aio code uses it this way, but it's not * necessary if the code has some other mechanism to synchronize teardown. * around. */ #ifndef _LINUX_PERCPU_REFCOUNT_H #define _LINUX_PERCPU_REFCOUNT_H #include <linux/atomic.h> #include <linux/kernel.h> #include <linux/percpu.h> #include <linux/rcupdate.h> #include <linux/gfp.h> struct percpu_ref; typedef void (percpu_ref_func_t)(struct percpu_ref *); /* flags set in the lower bits of percpu_ref->percpu_count_ptr */ enum { __PERCPU_REF_ATOMIC = 1LU << 0, /* operating in atomic mode */ __PERCPU_REF_DEAD = 1LU << 1, /* (being) killed */ __PERCPU_REF_ATOMIC_DEAD = __PERCPU_REF_ATOMIC | __PERCPU_REF_DEAD, __PERCPU_REF_FLAG_BITS = 2, }; /* @flags for percpu_ref_init() */ enum { /* * Start w/ ref == 1 in atomic mode. Can be switched to percpu * operation using percpu_ref_switch_to_percpu(). If initialized * with this flag, the ref will stay in atomic mode until * percpu_ref_switch_to_percpu() is invoked on it. * Implies ALLOW_REINIT. */ PERCPU_REF_INIT_ATOMIC = 1 << 0, /* * Start dead w/ ref == 0 in atomic mode. Must be revived with * percpu_ref_reinit() before used. Implies INIT_ATOMIC and * ALLOW_REINIT. */ PERCPU_REF_INIT_DEAD = 1 << 1, /* * Allow switching from atomic mode to percpu mode. */ PERCPU_REF_ALLOW_REINIT = 1 << 2, }; struct percpu_ref { atomic_long_t count; /* * The low bit of the pointer indicates whether the ref is in percpu * mode; if set, then get/put will manipulate the atomic_t. */ unsigned long percpu_count_ptr; percpu_ref_func_t *release; percpu_ref_func_t *confirm_switch; bool force_atomic:1; bool allow_reinit:1; struct rcu_head rcu; }; int __must_check percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release, unsigned int flags, gfp_t gfp); void percpu_ref_exit(struct percpu_ref *ref); void percpu_ref_switch_to_atomic(struct percpu_ref *ref, percpu_ref_func_t *confirm_switch); void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref); void percpu_ref_switch_to_percpu(struct percpu_ref *ref); void percpu_ref_kill_and_confirm(struct percpu_ref *ref, percpu_ref_func_t *confirm_kill); void percpu_ref_resurrect(struct percpu_ref *ref); void percpu_ref_reinit(struct percpu_ref *ref); /** * percpu_ref_kill - drop the initial ref * @ref: percpu_ref to kill * * Must be used to drop the initial ref on a percpu refcount; must be called * precisely once before shutdown. * * Switches @ref into atomic mode before gathering up the percpu counters * and dropping the initial ref. * * There are no implied RCU grace periods between kill and release. */ static inline void percpu_ref_kill(struct percpu_ref *ref) { percpu_ref_kill_and_confirm(ref, NULL); } /* * Internal helper. Don't use outside percpu-refcount proper. The * function doesn't return the pointer and let the caller test it for NULL * because doing so forces the compiler to generate two conditional * branches as it can't assume that @ref->percpu_count is not NULL. */ static inline bool __ref_is_percpu(struct percpu_ref *ref, unsigned long __percpu **percpu_countp) { unsigned long percpu_ptr; /* * The value of @ref->percpu_count_ptr is tested for * !__PERCPU_REF_ATOMIC, which may be set asynchronously, and then * used as a pointer. If the compiler generates a separate fetch * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in * between contaminating the pointer value, meaning that * READ_ONCE() is required when fetching it. * * The smp_read_barrier_depends() implied by READ_ONCE() pairs * with smp_store_release() in __percpu_ref_switch_to_percpu(). */ percpu_ptr = READ_ONCE(ref->percpu_count_ptr); /* * Theoretically, the following could test just ATOMIC; however, * then we'd have to mask off DEAD separately as DEAD may be * visible without ATOMIC if we race with percpu_ref_kill(). DEAD * implies ATOMIC anyway. Test them together. */ if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD)) return false; *percpu_countp = (unsigned long __percpu *)percpu_ptr; return true; } /** * percpu_ref_get_many - increment a percpu refcount * @ref: percpu_ref to get * @nr: number of references to get * * Analogous to atomic_long_add(). * * This function is safe to call as long as @ref is between init and exit. */ static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr) { unsigned long __percpu *percpu_count; rcu_read_lock_sched(); if (__ref_is_percpu(ref, &percpu_count)) this_cpu_add(*percpu_count, nr); else atomic_long_add(nr, &ref->count); rcu_read_unlock_sched(); } /** * percpu_ref_get - increment a percpu refcount * @ref: percpu_ref to get * * Analagous to atomic_long_inc(). * * This function is safe to call as long as @ref is between init and exit. */ static inline void percpu_ref_get(struct percpu_ref *ref) { percpu_ref_get_many(ref, 1); } /** * percpu_ref_tryget - try to increment a percpu refcount * @ref: percpu_ref to try-get * * Increment a percpu refcount unless its count already reached zero. * Returns %true on success; %false on failure. * * This function is safe to call as long as @ref is between init and exit. */ static inline bool percpu_ref_tryget(struct percpu_ref *ref) { unsigned long __percpu *percpu_count; bool ret; rcu_read_lock_sched(); if (__ref_is_percpu(ref, &percpu_count)) { this_cpu_inc(*percpu_count); ret = true; } else { ret = atomic_long_inc_not_zero(&ref->count); } rcu_read_unlock_sched(); return ret; } /** * percpu_ref_tryget_live - try to increment a live percpu refcount * @ref: percpu_ref to try-get * * Increment a percpu refcount unless it has already been killed. Returns * %true on success; %false on failure. * * Completion of percpu_ref_kill() in itself doesn't guarantee that this * function will fail. For such guarantee, percpu_ref_kill_and_confirm() * should be used. After the confirm_kill callback is invoked, it's * guaranteed that no new reference will be given out by * percpu_ref_tryget_live(). * * This function is safe to call as long as @ref is between init and exit. */ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) { unsigned long __percpu *percpu_count; bool ret = false; rcu_read_lock_sched(); if (__ref_is_percpu(ref, &percpu_count)) { this_cpu_inc(*percpu_count); ret = true; } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) { ret = atomic_long_inc_not_zero(&ref->count); } rcu_read_unlock_sched(); return ret; } /** * percpu_ref_put_many - decrement a percpu refcount * @ref: percpu_ref to put * @nr: number of references to put * * Decrement the refcount, and if 0, call the release function (which was passed * to percpu_ref_init()) * * This function is safe to call as long as @ref is between init and exit. */ static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr) { unsigned long __percpu *percpu_count; rcu_read_lock_sched(); if (__ref_is_percpu(ref, &percpu_count)) this_cpu_sub(*percpu_count, nr); else if (unlikely(atomic_long_sub_and_test(nr, &ref->count))) ref->release(ref); rcu_read_unlock_sched(); } /** * percpu_ref_put - decrement a percpu refcount * @ref: percpu_ref to put * * Decrement the refcount, and if 0, call the release function (which was passed * to percpu_ref_init()) * * This function is safe to call as long as @ref is between init and exit. */ static inline void percpu_ref_put(struct percpu_ref *ref) { percpu_ref_put_many(ref, 1); } /** * percpu_ref_is_dying - test whether a percpu refcount is dying or dead * @ref: percpu_ref to test * * Returns %true if @ref is dying or dead. * * This function is safe to call as long as @ref is between init and exit * and the caller is responsible for synchronizing against state changes. */ static inline bool percpu_ref_is_dying(struct percpu_ref *ref) { return ref->percpu_count_ptr & __PERCPU_REF_DEAD; } /** * percpu_ref_is_zero - test whether a percpu refcount reached zero * @ref: percpu_ref to test * * Returns %true if @ref reached zero. * * This function is safe to call as long as @ref is between init and exit. */ static inline bool percpu_ref_is_zero(struct percpu_ref *ref) { unsigned long __percpu *percpu_count; if (__ref_is_percpu(ref, &percpu_count)) return false; return !atomic_long_read(&ref->count); } #endif if_fddi.h 0000644 00000006363 14722070374 0006317 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Global definitions for the ANSI FDDI interface. * * Version: @(#)if_fddi.h 1.0.2 Sep 29 2004 * * Author: Lawrence V. Stefani, <stefani@lkg.dec.com> * * if_fddi.h is based on previous if_ether.h and if_tr.h work by * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Donald Becker, <becker@super.org> * Alan Cox, <alan@lxorguk.ukuu.org.uk> * Steve Whitehouse, <gw7rrm@eeshack3.swan.ac.uk> * Peter De Schrijver, <stud11@cc4.kuleuven.ac.be> */ #ifndef _LINUX_IF_FDDI_H #define _LINUX_IF_FDDI_H #include <linux/netdevice.h> #include <uapi/linux/if_fddi.h> /* Define FDDI statistics structure */ struct fddi_statistics { /* Generic statistics. */ struct net_device_stats gen; /* Detailed FDDI statistics. Adopted from RFC 1512 */ __u8 smt_station_id[8]; __u32 smt_op_version_id; __u32 smt_hi_version_id; __u32 smt_lo_version_id; __u8 smt_user_data[32]; __u32 smt_mib_version_id; __u32 smt_mac_cts; __u32 smt_non_master_cts; __u32 smt_master_cts; __u32 smt_available_paths; __u32 smt_config_capabilities; __u32 smt_config_policy; __u32 smt_connection_policy; __u32 smt_t_notify; __u32 smt_stat_rpt_policy; __u32 smt_trace_max_expiration; __u32 smt_bypass_present; __u32 smt_ecm_state; __u32 smt_cf_state; __u32 smt_remote_disconnect_flag; __u32 smt_station_status; __u32 smt_peer_wrap_flag; __u32 smt_time_stamp; __u32 smt_transition_time_stamp; __u32 mac_frame_status_functions; __u32 mac_t_max_capability; __u32 mac_tvx_capability; __u32 mac_available_paths; __u32 mac_current_path; __u8 mac_upstream_nbr[FDDI_K_ALEN]; __u8 mac_downstream_nbr[FDDI_K_ALEN]; __u8 mac_old_upstream_nbr[FDDI_K_ALEN]; __u8 mac_old_downstream_nbr[FDDI_K_ALEN]; __u32 mac_dup_address_test; __u32 mac_requested_paths; __u32 mac_downstream_port_type; __u8 mac_smt_address[FDDI_K_ALEN]; __u32 mac_t_req; __u32 mac_t_neg; __u32 mac_t_max; __u32 mac_tvx_value; __u32 mac_frame_cts; __u32 mac_copied_cts; __u32 mac_transmit_cts; __u32 mac_error_cts; __u32 mac_lost_cts; __u32 mac_frame_error_threshold; __u32 mac_frame_error_ratio; __u32 mac_rmt_state; __u32 mac_da_flag; __u32 mac_una_da_flag; __u32 mac_frame_error_flag; __u32 mac_ma_unitdata_available; __u32 mac_hardware_present; __u32 mac_ma_unitdata_enable; __u32 path_tvx_lower_bound; __u32 path_t_max_lower_bound; __u32 path_max_t_req; __u32 path_configuration[8]; __u32 port_my_type[2]; __u32 port_neighbor_type[2]; __u32 port_connection_policies[2]; __u32 port_mac_indicated[2]; __u32 port_current_path[2]; __u8 port_requested_paths[3*2]; __u32 port_mac_placement[2]; __u32 port_available_paths[2]; __u32 port_pmd_class[2]; __u32 port_connection_capabilities[2]; __u32 port_bs_flag[2]; __u32 port_lct_fail_cts[2]; __u32 port_ler_estimate[2]; __u32 port_lem_reject_cts[2]; __u32 port_lem_cts[2]; __u32 port_ler_cutoff[2]; __u32 port_ler_alarm[2]; __u32 port_connect_state[2]; __u32 port_pcm_state[2]; __u32 port_pc_withhold[2]; __u32 port_ler_flag[2]; __u32 port_hardware_present[2]; }; #endif /* _LINUX_IF_FDDI_H */ t10-pi.h 0000644 00000003002 14722070374 0005730 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_T10_PI_H #define _LINUX_T10_PI_H #include <linux/types.h> #include <linux/blkdev.h> /* * A T10 PI-capable target device can be formatted with different * protection schemes. Currently 0 through 3 are defined: * * Type 0 is regular (unprotected) I/O * * Type 1 defines the contents of the guard and reference tags * * Type 2 defines the contents of the guard and reference tags and * uses 32-byte commands to seed the latter * * Type 3 defines the contents of the guard tag only */ enum t10_dif_type { T10_PI_TYPE0_PROTECTION = 0x0, T10_PI_TYPE1_PROTECTION = 0x1, T10_PI_TYPE2_PROTECTION = 0x2, T10_PI_TYPE3_PROTECTION = 0x3, }; /* * T10 Protection Information tuple. */ struct t10_pi_tuple { __be16 guard_tag; /* Checksum */ __be16 app_tag; /* Opaque storage */ __be32 ref_tag; /* Target LBA or indirect LBA */ }; #define T10_PI_APP_ESCAPE cpu_to_be16(0xffff) #define T10_PI_REF_ESCAPE cpu_to_be32(0xffffffff) static inline u32 t10_pi_ref_tag(struct request *rq) { unsigned int shift = ilog2(queue_logical_block_size(rq->q)); #ifdef CONFIG_BLK_DEV_INTEGRITY if (rq->q->integrity.interval_exp) shift = rq->q->integrity.interval_exp; #endif return blk_rq_pos(rq) >> (shift - SECTOR_SHIFT) & 0xffffffff; } extern const struct blk_integrity_profile t10_pi_type1_crc; extern const struct blk_integrity_profile t10_pi_type1_ip; extern const struct blk_integrity_profile t10_pi_type3_crc; extern const struct blk_integrity_profile t10_pi_type3_ip; #endif intel-svm.h 0000644 00000011103 14722070374 0006635 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright © 2015 Intel Corporation. * * Authors: David Woodhouse <David.Woodhouse@intel.com> */ #ifndef __INTEL_SVM_H__ #define __INTEL_SVM_H__ struct device; struct svm_dev_ops { void (*fault_cb)(struct device *dev, int pasid, u64 address, void *private, int rwxp, int response); }; /* Values for rxwp in fault_cb callback */ #define SVM_REQ_READ (1<<3) #define SVM_REQ_WRITE (1<<2) #define SVM_REQ_EXEC (1<<1) #define SVM_REQ_PRIV (1<<0) /* * The SVM_FLAG_PRIVATE_PASID flag requests a PASID which is *not* the "main" * PASID for the current process. Even if a PASID already exists, a new one * will be allocated. And the PASID allocated with SVM_FLAG_PRIVATE_PASID * will not be given to subsequent callers. This facility allows a driver to * disambiguate between multiple device contexts which access the same MM, * if there is no other way to do so. It should be used sparingly, if at all. */ #define SVM_FLAG_PRIVATE_PASID (1<<0) /* * The SVM_FLAG_SUPERVISOR_MODE flag requests a PASID which can be used only * for access to kernel addresses. No IOTLB flushes are automatically done * for kernel mappings; it is valid only for access to the kernel's static * 1:1 mapping of physical memory — not to vmalloc or even module mappings. * A future API addition may permit the use of such ranges, by means of an * explicit IOTLB flush call (akin to the DMA API's unmap method). * * It is unlikely that we will ever hook into flush_tlb_kernel_range() to * do such IOTLB flushes automatically. */ #define SVM_FLAG_SUPERVISOR_MODE (1<<1) #ifdef CONFIG_INTEL_IOMMU_SVM /** * intel_svm_bind_mm() - Bind the current process to a PASID * @dev: Device to be granted access * @pasid: Address for allocated PASID * @flags: Flags. Later for requesting supervisor mode, etc. * @ops: Callbacks to device driver * * This function attempts to enable PASID support for the given device. * If the @pasid argument is non-%NULL, a PASID is allocated for access * to the MM of the current process. * * By using a %NULL value for the @pasid argument, this function can * be used to simply validate that PASID support is available for the * given device — i.e. that it is behind an IOMMU which has the * requisite support, and is enabled. * * Page faults are handled transparently by the IOMMU code, and there * should be no need for the device driver to be involved. If a page * fault cannot be handled (i.e. is an invalid address rather than * just needs paging in), then the page request will be completed by * the core IOMMU code with appropriate status, and the device itself * can then report the resulting fault to its driver via whatever * mechanism is appropriate. * * Multiple calls from the same process may result in the same PASID * being re-used. A reference count is kept. */ extern int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ops *ops); /** * intel_svm_unbind_mm() - Unbind a specified PASID * @dev: Device for which PASID was allocated * @pasid: PASID value to be unbound * * This function allows a PASID to be retired when the device no * longer requires access to the address space of a given process. * * If the use count for the PASID in question reaches zero, the * PASID is revoked and may no longer be used by hardware. * * Device drivers are required to ensure that no access (including * page requests) is currently outstanding for the PASID in question, * before calling this function. */ extern int intel_svm_unbind_mm(struct device *dev, int pasid); /** * intel_svm_is_pasid_valid() - check if pasid is valid * @dev: Device for which PASID was allocated * @pasid: PASID value to be checked * * This function checks if the specified pasid is still valid. A * valid pasid means the backing mm is still having a valid user. * For kernel callers init_mm is always valid. for other mm, if mm->mm_users * is non-zero, it is valid. * * returns -EINVAL if invalid pasid, 0 if pasid ref count is invalid * 1 if pasid is valid. */ extern int intel_svm_is_pasid_valid(struct device *dev, int pasid); #else /* CONFIG_INTEL_IOMMU_SVM */ static inline int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ops *ops) { return -ENOSYS; } static inline int intel_svm_unbind_mm(struct device *dev, int pasid) { BUG(); } static inline int intel_svm_is_pasid_valid(struct device *dev, int pasid) { return -EINVAL; } #endif /* CONFIG_INTEL_IOMMU_SVM */ #define intel_svm_available(dev) (!intel_svm_bind_mm((dev), NULL, 0, NULL)) #endif /* __INTEL_SVM_H__ */ range.h 0000644 00000001213 14722070374 0006014 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_RANGE_H #define _LINUX_RANGE_H struct range { u64 start; u64 end; }; int add_range(struct range *range, int az, int nr_range, u64 start, u64 end); int add_range_with_merge(struct range *range, int az, int nr_range, u64 start, u64 end); void subtract_range(struct range *range, int az, u64 start, u64 end); int clean_sort_range(struct range *range, int az); void sort_range(struct range *range, int nr_range); #define MAX_RESOURCE ((resource_size_t)~0) static inline resource_size_t cap_resource(u64 val) { if (val > MAX_RESOURCE) return MAX_RESOURCE; return val; } #endif serial_bcm63xx.h 0000644 00000011353 14722070374 0007557 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SERIAL_BCM63XX_H #define _LINUX_SERIAL_BCM63XX_H /* UART Control Register */ #define UART_CTL_REG 0x0 #define UART_CTL_RXTMOUTCNT_SHIFT 0 #define UART_CTL_RXTMOUTCNT_MASK (0x1f << UART_CTL_RXTMOUTCNT_SHIFT) #define UART_CTL_RSTTXDN_SHIFT 5 #define UART_CTL_RSTTXDN_MASK (1 << UART_CTL_RSTTXDN_SHIFT) #define UART_CTL_RSTRXFIFO_SHIFT 6 #define UART_CTL_RSTRXFIFO_MASK (1 << UART_CTL_RSTRXFIFO_SHIFT) #define UART_CTL_RSTTXFIFO_SHIFT 7 #define UART_CTL_RSTTXFIFO_MASK (1 << UART_CTL_RSTTXFIFO_SHIFT) #define UART_CTL_STOPBITS_SHIFT 8 #define UART_CTL_STOPBITS_MASK (0xf << UART_CTL_STOPBITS_SHIFT) #define UART_CTL_STOPBITS_1 (0x7 << UART_CTL_STOPBITS_SHIFT) #define UART_CTL_STOPBITS_2 (0xf << UART_CTL_STOPBITS_SHIFT) #define UART_CTL_BITSPERSYM_SHIFT 12 #define UART_CTL_BITSPERSYM_MASK (0x3 << UART_CTL_BITSPERSYM_SHIFT) #define UART_CTL_XMITBRK_SHIFT 14 #define UART_CTL_XMITBRK_MASK (1 << UART_CTL_XMITBRK_SHIFT) #define UART_CTL_RSVD_SHIFT 15 #define UART_CTL_RSVD_MASK (1 << UART_CTL_RSVD_SHIFT) #define UART_CTL_RXPAREVEN_SHIFT 16 #define UART_CTL_RXPAREVEN_MASK (1 << UART_CTL_RXPAREVEN_SHIFT) #define UART_CTL_RXPAREN_SHIFT 17 #define UART_CTL_RXPAREN_MASK (1 << UART_CTL_RXPAREN_SHIFT) #define UART_CTL_TXPAREVEN_SHIFT 18 #define UART_CTL_TXPAREVEN_MASK (1 << UART_CTL_TXPAREVEN_SHIFT) #define UART_CTL_TXPAREN_SHIFT 18 #define UART_CTL_TXPAREN_MASK (1 << UART_CTL_TXPAREN_SHIFT) #define UART_CTL_LOOPBACK_SHIFT 20 #define UART_CTL_LOOPBACK_MASK (1 << UART_CTL_LOOPBACK_SHIFT) #define UART_CTL_RXEN_SHIFT 21 #define UART_CTL_RXEN_MASK (1 << UART_CTL_RXEN_SHIFT) #define UART_CTL_TXEN_SHIFT 22 #define UART_CTL_TXEN_MASK (1 << UART_CTL_TXEN_SHIFT) #define UART_CTL_BRGEN_SHIFT 23 #define UART_CTL_BRGEN_MASK (1 << UART_CTL_BRGEN_SHIFT) /* UART Baudword register */ #define UART_BAUD_REG 0x4 /* UART Misc Control register */ #define UART_MCTL_REG 0x8 #define UART_MCTL_DTR_SHIFT 0 #define UART_MCTL_DTR_MASK (1 << UART_MCTL_DTR_SHIFT) #define UART_MCTL_RTS_SHIFT 1 #define UART_MCTL_RTS_MASK (1 << UART_MCTL_RTS_SHIFT) #define UART_MCTL_RXFIFOTHRESH_SHIFT 8 #define UART_MCTL_RXFIFOTHRESH_MASK (0xf << UART_MCTL_RXFIFOTHRESH_SHIFT) #define UART_MCTL_TXFIFOTHRESH_SHIFT 12 #define UART_MCTL_TXFIFOTHRESH_MASK (0xf << UART_MCTL_TXFIFOTHRESH_SHIFT) #define UART_MCTL_RXFIFOFILL_SHIFT 16 #define UART_MCTL_RXFIFOFILL_MASK (0x1f << UART_MCTL_RXFIFOFILL_SHIFT) #define UART_MCTL_TXFIFOFILL_SHIFT 24 #define UART_MCTL_TXFIFOFILL_MASK (0x1f << UART_MCTL_TXFIFOFILL_SHIFT) /* UART External Input Configuration register */ #define UART_EXTINP_REG 0xc #define UART_EXTINP_RI_SHIFT 0 #define UART_EXTINP_RI_MASK (1 << UART_EXTINP_RI_SHIFT) #define UART_EXTINP_CTS_SHIFT 1 #define UART_EXTINP_CTS_MASK (1 << UART_EXTINP_CTS_SHIFT) #define UART_EXTINP_DCD_SHIFT 2 #define UART_EXTINP_DCD_MASK (1 << UART_EXTINP_DCD_SHIFT) #define UART_EXTINP_DSR_SHIFT 3 #define UART_EXTINP_DSR_MASK (1 << UART_EXTINP_DSR_SHIFT) #define UART_EXTINP_IRSTAT(x) (1 << (x + 4)) #define UART_EXTINP_IRMASK(x) (1 << (x + 8)) #define UART_EXTINP_IR_RI 0 #define UART_EXTINP_IR_CTS 1 #define UART_EXTINP_IR_DCD 2 #define UART_EXTINP_IR_DSR 3 #define UART_EXTINP_RI_NOSENSE_SHIFT 16 #define UART_EXTINP_RI_NOSENSE_MASK (1 << UART_EXTINP_RI_NOSENSE_SHIFT) #define UART_EXTINP_CTS_NOSENSE_SHIFT 17 #define UART_EXTINP_CTS_NOSENSE_MASK (1 << UART_EXTINP_CTS_NOSENSE_SHIFT) #define UART_EXTINP_DCD_NOSENSE_SHIFT 18 #define UART_EXTINP_DCD_NOSENSE_MASK (1 << UART_EXTINP_DCD_NOSENSE_SHIFT) #define UART_EXTINP_DSR_NOSENSE_SHIFT 19 #define UART_EXTINP_DSR_NOSENSE_MASK (1 << UART_EXTINP_DSR_NOSENSE_SHIFT) /* UART Interrupt register */ #define UART_IR_REG 0x10 #define UART_IR_MASK(x) (1 << (x + 16)) #define UART_IR_STAT(x) (1 << (x)) #define UART_IR_EXTIP 0 #define UART_IR_TXUNDER 1 #define UART_IR_TXOVER 2 #define UART_IR_TXTRESH 3 #define UART_IR_TXRDLATCH 4 #define UART_IR_TXEMPTY 5 #define UART_IR_RXUNDER 6 #define UART_IR_RXOVER 7 #define UART_IR_RXTIMEOUT 8 #define UART_IR_RXFULL 9 #define UART_IR_RXTHRESH 10 #define UART_IR_RXNOTEMPTY 11 #define UART_IR_RXFRAMEERR 12 #define UART_IR_RXPARERR 13 #define UART_IR_RXBRK 14 #define UART_IR_TXDONE 15 /* UART Fifo register */ #define UART_FIFO_REG 0x14 #define UART_FIFO_VALID_SHIFT 0 #define UART_FIFO_VALID_MASK 0xff #define UART_FIFO_FRAMEERR_SHIFT 8 #define UART_FIFO_FRAMEERR_MASK (1 << UART_FIFO_FRAMEERR_SHIFT) #define UART_FIFO_PARERR_SHIFT 9 #define UART_FIFO_PARERR_MASK (1 << UART_FIFO_PARERR_SHIFT) #define UART_FIFO_BRKDET_SHIFT 10 #define UART_FIFO_BRKDET_MASK (1 << UART_FIFO_BRKDET_SHIFT) #define UART_FIFO_ANYERR_MASK (UART_FIFO_FRAMEERR_MASK | \ UART_FIFO_PARERR_MASK | \ UART_FIFO_BRKDET_MASK) #endif /* _LINUX_SERIAL_BCM63XX_H */ byteorder/generic.h 0000644 00000014164 14722070374 0010344 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_BYTEORDER_GENERIC_H #define _LINUX_BYTEORDER_GENERIC_H /* * linux/byteorder/generic.h * Generic Byte-reordering support * * The "... p" macros, like le64_to_cpup, can be used with pointers * to unaligned data, but there will be a performance penalty on * some architectures. Use get_unaligned for unaligned data. * * Francois-Rene Rideau <fare@tunes.org> 19970707 * gathered all the good ideas from all asm-foo/byteorder.h into one file, * cleaned them up. * I hope it is compliant with non-GCC compilers. * I decided to put __BYTEORDER_HAS_U64__ in byteorder.h, * because I wasn't sure it would be ok to put it in types.h * Upgraded it to 2.1.43 * Francois-Rene Rideau <fare@tunes.org> 19971012 * Upgraded it to 2.1.57 * to please Linus T., replaced huge #ifdef's between little/big endian * by nestedly #include'd files. * Francois-Rene Rideau <fare@tunes.org> 19971205 * Made it to 2.1.71; now a facelift: * Put files under include/linux/byteorder/ * Split swab from generic support. * * TODO: * = Regular kernel maintainers could also replace all these manual * byteswap macros that remain, disseminated among drivers, * after some grep or the sources... * = Linus might want to rename all these macros and files to fit his taste, * to fit his personal naming scheme. * = it seems that a few drivers would also appreciate * nybble swapping support... * = every architecture could add their byteswap macro in asm/byteorder.h * see how some architectures already do (i386, alpha, ppc, etc) * = cpu_to_beXX and beXX_to_cpu might some day need to be well * distinguished throughout the kernel. This is not the case currently, * since little endian, big endian, and pdp endian machines needn't it. * But this might be the case for, say, a port of Linux to 20/21 bit * architectures (and F21 Linux addict around?). */ /* * The following macros are to be defined by <asm/byteorder.h>: * * Conversion of long and short int between network and host format * ntohl(__u32 x) * ntohs(__u16 x) * htonl(__u32 x) * htons(__u16 x) * It seems that some programs (which? where? or perhaps a standard? POSIX?) * might like the above to be functions, not macros (why?). * if that's true, then detect them, and take measures. * Anyway, the measure is: define only ___ntohl as a macro instead, * and in a separate file, have * unsigned long inline ntohl(x){return ___ntohl(x);} * * The same for constant arguments * __constant_ntohl(__u32 x) * __constant_ntohs(__u16 x) * __constant_htonl(__u32 x) * __constant_htons(__u16 x) * * Conversion of XX-bit integers (16- 32- or 64-) * between native CPU format and little/big endian format * 64-bit stuff only defined for proper architectures * cpu_to_[bl]eXX(__uXX x) * [bl]eXX_to_cpu(__uXX x) * * The same, but takes a pointer to the value to convert * cpu_to_[bl]eXXp(__uXX x) * [bl]eXX_to_cpup(__uXX x) * * The same, but change in situ * cpu_to_[bl]eXXs(__uXX x) * [bl]eXX_to_cpus(__uXX x) * * See asm-foo/byteorder.h for examples of how to provide * architecture-optimized versions * */ #define cpu_to_le64 __cpu_to_le64 #define le64_to_cpu __le64_to_cpu #define cpu_to_le32 __cpu_to_le32 #define le32_to_cpu __le32_to_cpu #define cpu_to_le16 __cpu_to_le16 #define le16_to_cpu __le16_to_cpu #define cpu_to_be64 __cpu_to_be64 #define be64_to_cpu __be64_to_cpu #define cpu_to_be32 __cpu_to_be32 #define be32_to_cpu __be32_to_cpu #define cpu_to_be16 __cpu_to_be16 #define be16_to_cpu __be16_to_cpu #define cpu_to_le64p __cpu_to_le64p #define le64_to_cpup __le64_to_cpup #define cpu_to_le32p __cpu_to_le32p #define le32_to_cpup __le32_to_cpup #define cpu_to_le16p __cpu_to_le16p #define le16_to_cpup __le16_to_cpup #define cpu_to_be64p __cpu_to_be64p #define be64_to_cpup __be64_to_cpup #define cpu_to_be32p __cpu_to_be32p #define be32_to_cpup __be32_to_cpup #define cpu_to_be16p __cpu_to_be16p #define be16_to_cpup __be16_to_cpup #define cpu_to_le64s __cpu_to_le64s #define le64_to_cpus __le64_to_cpus #define cpu_to_le32s __cpu_to_le32s #define le32_to_cpus __le32_to_cpus #define cpu_to_le16s __cpu_to_le16s #define le16_to_cpus __le16_to_cpus #define cpu_to_be64s __cpu_to_be64s #define be64_to_cpus __be64_to_cpus #define cpu_to_be32s __cpu_to_be32s #define be32_to_cpus __be32_to_cpus #define cpu_to_be16s __cpu_to_be16s #define be16_to_cpus __be16_to_cpus /* * They have to be macros in order to do the constant folding * correctly - if the argument passed into a inline function * it is no longer constant according to gcc.. */ #undef ntohl #undef ntohs #undef htonl #undef htons #define ___htonl(x) __cpu_to_be32(x) #define ___htons(x) __cpu_to_be16(x) #define ___ntohl(x) __be32_to_cpu(x) #define ___ntohs(x) __be16_to_cpu(x) #define htonl(x) ___htonl(x) #define ntohl(x) ___ntohl(x) #define htons(x) ___htons(x) #define ntohs(x) ___ntohs(x) static inline void le16_add_cpu(__le16 *var, u16 val) { *var = cpu_to_le16(le16_to_cpu(*var) + val); } static inline void le32_add_cpu(__le32 *var, u32 val) { *var = cpu_to_le32(le32_to_cpu(*var) + val); } static inline void le64_add_cpu(__le64 *var, u64 val) { *var = cpu_to_le64(le64_to_cpu(*var) + val); } /* XXX: this stuff can be optimized */ static inline void le32_to_cpu_array(u32 *buf, unsigned int words) { while (words--) { __le32_to_cpus(buf); buf++; } } static inline void cpu_to_le32_array(u32 *buf, unsigned int words) { while (words--) { __cpu_to_le32s(buf); buf++; } } static inline void be16_add_cpu(__be16 *var, u16 val) { *var = cpu_to_be16(be16_to_cpu(*var) + val); } static inline void be32_add_cpu(__be32 *var, u32 val) { *var = cpu_to_be32(be32_to_cpu(*var) + val); } static inline void be64_add_cpu(__be64 *var, u64 val) { *var = cpu_to_be64(be64_to_cpu(*var) + val); } static inline void cpu_to_be32_array(__be32 *dst, const u32 *src, size_t len) { int i; for (i = 0; i < len; i++) dst[i] = cpu_to_be32(src[i]); } static inline void be32_to_cpu_array(u32 *dst, const __be32 *src, size_t len) { int i; for (i = 0; i < len; i++) dst[i] = be32_to_cpu(src[i]); } #endif /* _LINUX_BYTEORDER_GENERIC_H */ byteorder/little_endian.h 0000644 00000000545 14722070374 0011541 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_BYTEORDER_LITTLE_ENDIAN_H #define _LINUX_BYTEORDER_LITTLE_ENDIAN_H #include <uapi/linux/byteorder/little_endian.h> #ifdef CONFIG_CPU_BIG_ENDIAN #warning inconsistent configuration, CONFIG_CPU_BIG_ENDIAN is set #endif #include <linux/byteorder/generic.h> #endif /* _LINUX_BYTEORDER_LITTLE_ENDIAN_H */ byteorder/big_endian.h 0000644 00000000531 14722070374 0011000 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_BYTEORDER_BIG_ENDIAN_H #define _LINUX_BYTEORDER_BIG_ENDIAN_H #include <uapi/linux/byteorder/big_endian.h> #ifndef CONFIG_CPU_BIG_ENDIAN #warning inconsistent configuration, needs CONFIG_CPU_BIG_ENDIAN #endif #include <linux/byteorder/generic.h> #endif /* _LINUX_BYTEORDER_BIG_ENDIAN_H */ pci_hotplug.h 0000644 00000010403 14722070374 0007236 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0+ */ /* * PCI HotPlug Core Functions * * Copyright (C) 1995,2001 Compaq Computer Corporation * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com) * Copyright (C) 2001 IBM Corp. * * All rights reserved. * * Send feedback to <kristen.c.accardi@intel.com> * */ #ifndef _PCI_HOTPLUG_H #define _PCI_HOTPLUG_H /** * struct hotplug_slot_ops -the callbacks that the hotplug pci core can use * @enable_slot: Called when the user wants to enable a specific pci slot * @disable_slot: Called when the user wants to disable a specific pci slot * @set_attention_status: Called to set the specific slot's attention LED to * the specified value * @hardware_test: Called to run a specified hardware test on the specified * slot. * @get_power_status: Called to get the current power status of a slot. * @get_attention_status: Called to get the current attention status of a slot. * @get_latch_status: Called to get the current latch status of a slot. * @get_adapter_status: Called to get see if an adapter is present in the slot or not. * @reset_slot: Optional interface to allow override of a bus reset for the * slot for cases where a secondary bus reset can result in spurious * hotplug events or where a slot can be reset independent of the bus. * * The table of function pointers that is passed to the hotplug pci core by a * hotplug pci driver. These functions are called by the hotplug pci core when * the user wants to do something to a specific slot (query it for information, * set an LED, enable / disable power, etc.) */ struct hotplug_slot_ops { int (*enable_slot) (struct hotplug_slot *slot); int (*disable_slot) (struct hotplug_slot *slot); int (*set_attention_status) (struct hotplug_slot *slot, u8 value); int (*hardware_test) (struct hotplug_slot *slot, u32 value); int (*get_power_status) (struct hotplug_slot *slot, u8 *value); int (*get_attention_status) (struct hotplug_slot *slot, u8 *value); int (*get_latch_status) (struct hotplug_slot *slot, u8 *value); int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value); int (*reset_slot) (struct hotplug_slot *slot, int probe); }; /** * struct hotplug_slot - used to register a physical slot with the hotplug pci core * @ops: pointer to the &struct hotplug_slot_ops to be used for this slot * @owner: The module owner of this structure * @mod_name: The module name (KBUILD_MODNAME) of this structure */ struct hotplug_slot { const struct hotplug_slot_ops *ops; /* Variables below this are for use only by the hotplug pci core. */ struct list_head slot_list; struct pci_slot *pci_slot; struct module *owner; const char *mod_name; }; static inline const char *hotplug_slot_name(const struct hotplug_slot *slot) { return pci_slot_name(slot->pci_slot); } int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *pbus, int nr, const char *name, struct module *owner, const char *mod_name); int __pci_hp_initialize(struct hotplug_slot *slot, struct pci_bus *bus, int nr, const char *name, struct module *owner, const char *mod_name); int pci_hp_add(struct hotplug_slot *slot); void pci_hp_del(struct hotplug_slot *slot); void pci_hp_destroy(struct hotplug_slot *slot); void pci_hp_deregister(struct hotplug_slot *slot); /* use a define to avoid include chaining to get THIS_MODULE & friends */ #define pci_hp_register(slot, pbus, devnr, name) \ __pci_hp_register(slot, pbus, devnr, name, THIS_MODULE, KBUILD_MODNAME) #define pci_hp_initialize(slot, bus, nr, name) \ __pci_hp_initialize(slot, bus, nr, name, THIS_MODULE, KBUILD_MODNAME) #ifdef CONFIG_ACPI #include <linux/acpi.h> bool pciehp_is_native(struct pci_dev *bridge); int acpi_get_hp_hw_control_from_firmware(struct pci_dev *bridge); bool shpchp_is_native(struct pci_dev *bridge); int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle); int acpi_pci_detect_ejectable(acpi_handle handle); #else static inline int acpi_get_hp_hw_control_from_firmware(struct pci_dev *bridge) { return 0; } static inline bool pciehp_is_native(struct pci_dev *bridge) { return true; } static inline bool shpchp_is_native(struct pci_dev *bridge) { return true; } #endif static inline bool hotplug_is_native(struct pci_dev *bridge) { return pciehp_is_native(bridge) || shpchp_is_native(bridge); } #endif trace.h 0000644 00000001704 14722070374 0006023 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_TRACE_H #define _LINUX_TRACE_H #ifdef CONFIG_TRACING /* * The trace export - an export of Ftrace output. The trace_export * can process traces and export them to a registered destination as * an addition to the current only output of Ftrace - i.e. ring buffer. * * If you want traces to be sent to some other place rather than ring * buffer only, just need to register a new trace_export and implement * its own .write() function for writing traces to the storage. * * next - pointer to the next trace_export * write - copy traces which have been delt with ->commit() to * the destination */ struct trace_export { struct trace_export __rcu *next; void (*write)(struct trace_export *, const void *, unsigned int); }; int register_ftrace_export(struct trace_export *export); int unregister_ftrace_export(struct trace_export *export); #endif /* CONFIG_TRACING */ #endif /* _LINUX_TRACE_H */ fscrypt.h 0000644 00000063340 14722070374 0006423 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * fscrypt.h: declarations for per-file encryption * * Filesystems that implement per-file encryption must include this header * file. * * Copyright (C) 2015, Google, Inc. * * Written by Michael Halcrow, 2015. * Modified by Jaegeuk Kim, 2015. */ #ifndef _LINUX_FSCRYPT_H #define _LINUX_FSCRYPT_H #include <linux/fs.h> #include <linux/mm.h> #include <linux/slab.h> #include <uapi/linux/fscrypt.h> #define FS_CRYPTO_BLOCK_SIZE 16 struct fscrypt_ctx; struct fscrypt_info; struct fscrypt_str { unsigned char *name; u32 len; }; struct fscrypt_name { const struct qstr *usr_fname; struct fscrypt_str disk_name; u32 hash; u32 minor_hash; struct fscrypt_str crypto_buf; bool is_ciphertext_name; }; #define FSTR_INIT(n, l) { .name = n, .len = l } #define FSTR_TO_QSTR(f) QSTR_INIT((f)->name, (f)->len) #define fname_name(p) ((p)->disk_name.name) #define fname_len(p) ((p)->disk_name.len) /* Maximum value for the third parameter of fscrypt_operations.set_context(). */ #define FSCRYPT_SET_CONTEXT_MAX_SIZE 40 #ifdef CONFIG_FS_ENCRYPTION /* * fscrypt superblock flags */ #define FS_CFLG_OWN_PAGES (1U << 1) /* * crypto operations for filesystems */ struct fscrypt_operations { unsigned int flags; const char *key_prefix; int (*get_context)(struct inode *, void *, size_t); int (*set_context)(struct inode *, const void *, size_t, void *); bool (*dummy_context)(struct inode *); bool (*empty_dir)(struct inode *); unsigned int max_namelen; }; /* Decryption work */ struct fscrypt_ctx { union { struct { struct bio *bio; struct work_struct work; }; struct list_head free_list; /* Free list */ }; u8 flags; /* Flags */ }; static inline bool fscrypt_has_encryption_key(const struct inode *inode) { /* pairs with cmpxchg_release() in fscrypt_get_encryption_info() */ return READ_ONCE(inode->i_crypt_info) != NULL; } static inline bool fscrypt_dummy_context_enabled(struct inode *inode) { return inode->i_sb->s_cop->dummy_context && inode->i_sb->s_cop->dummy_context(inode); } /* * When d_splice_alias() moves a directory's encrypted alias to its decrypted * alias as a result of the encryption key being added, DCACHE_ENCRYPTED_NAME * must be cleared. Note that we don't have to support arbitrary moves of this * flag because fscrypt doesn't allow encrypted aliases to be the source or * target of a rename(). */ static inline void fscrypt_handle_d_move(struct dentry *dentry) { dentry->d_flags &= ~DCACHE_ENCRYPTED_NAME; } /** * fscrypt_is_nokey_name() - test whether a dentry is a no-key name * @dentry: the dentry to check * * This returns true if the dentry is a no-key dentry. A no-key dentry is a * dentry that was created in an encrypted directory that hasn't had its * encryption key added yet. Such dentries may be either positive or negative. * * When a filesystem is asked to create a new filename in an encrypted directory * and the new filename's dentry is a no-key dentry, it must fail the operation * with ENOKEY. This includes ->create(), ->mkdir(), ->mknod(), ->symlink(), * ->rename(), and ->link(). (However, ->rename() and ->link() are already * handled by fscrypt_prepare_rename() and fscrypt_prepare_link().) * * This is necessary because creating a filename requires the directory's * encryption key, but just checking for the key on the directory inode during * the final filesystem operation doesn't guarantee that the key was available * during the preceding dentry lookup. And the key must have already been * available during the dentry lookup in order for it to have been checked * whether the filename already exists in the directory and for the new file's * dentry not to be invalidated due to it incorrectly having the no-key flag. * * Return: %true if the dentry is a no-key name */ static inline bool fscrypt_is_nokey_name(const struct dentry *dentry) { return dentry->d_flags & DCACHE_ENCRYPTED_NAME; } /* crypto.c */ extern void fscrypt_enqueue_decrypt_work(struct work_struct *); extern struct fscrypt_ctx *fscrypt_get_ctx(gfp_t); extern void fscrypt_release_ctx(struct fscrypt_ctx *); extern struct page *fscrypt_encrypt_pagecache_blocks(struct page *page, unsigned int len, unsigned int offs, gfp_t gfp_flags); extern int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page, unsigned int len, unsigned int offs, u64 lblk_num, gfp_t gfp_flags); extern int fscrypt_decrypt_pagecache_blocks(struct page *page, unsigned int len, unsigned int offs); extern int fscrypt_decrypt_block_inplace(const struct inode *inode, struct page *page, unsigned int len, unsigned int offs, u64 lblk_num); static inline bool fscrypt_is_bounce_page(struct page *page) { return page->mapping == NULL; } static inline struct page *fscrypt_pagecache_page(struct page *bounce_page) { return (struct page *)page_private(bounce_page); } extern void fscrypt_free_bounce_page(struct page *bounce_page); /* policy.c */ extern int fscrypt_ioctl_set_policy(struct file *, const void __user *); extern int fscrypt_ioctl_get_policy(struct file *, void __user *); extern int fscrypt_ioctl_get_policy_ex(struct file *, void __user *); extern int fscrypt_has_permitted_context(struct inode *, struct inode *); extern int fscrypt_inherit_context(struct inode *, struct inode *, void *, bool); /* keyring.c */ extern void fscrypt_sb_free(struct super_block *sb); extern int fscrypt_ioctl_add_key(struct file *filp, void __user *arg); extern int fscrypt_ioctl_remove_key(struct file *filp, void __user *arg); extern int fscrypt_ioctl_remove_key_all_users(struct file *filp, void __user *arg); extern int fscrypt_ioctl_get_key_status(struct file *filp, void __user *arg); /* keysetup.c */ extern int fscrypt_get_encryption_info(struct inode *); extern void fscrypt_put_encryption_info(struct inode *); extern void fscrypt_free_inode(struct inode *); extern int fscrypt_drop_inode(struct inode *inode); /* fname.c */ extern int fscrypt_setup_filename(struct inode *, const struct qstr *, int lookup, struct fscrypt_name *); static inline void fscrypt_free_filename(struct fscrypt_name *fname) { kfree(fname->crypto_buf.name); } extern int fscrypt_fname_alloc_buffer(const struct inode *, u32, struct fscrypt_str *); extern void fscrypt_fname_free_buffer(struct fscrypt_str *); extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32, const struct fscrypt_str *, struct fscrypt_str *); #define FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE 32 /* Extracts the second-to-last ciphertext block; see explanation below */ #define FSCRYPT_FNAME_DIGEST(name, len) \ ((name) + round_down((len) - FS_CRYPTO_BLOCK_SIZE - 1, \ FS_CRYPTO_BLOCK_SIZE)) #define FSCRYPT_FNAME_DIGEST_SIZE FS_CRYPTO_BLOCK_SIZE /** * fscrypt_digested_name - alternate identifier for an on-disk filename * * When userspace lists an encrypted directory without access to the key, * filenames whose ciphertext is longer than FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE * bytes are shown in this abbreviated form (base64-encoded) rather than as the * full ciphertext (base64-encoded). This is necessary to allow supporting * filenames up to NAME_MAX bytes, since base64 encoding expands the length. * * To make it possible for filesystems to still find the correct directory entry * despite not knowing the full on-disk name, we encode any filesystem-specific * 'hash' and/or 'minor_hash' which the filesystem may need for its lookups, * followed by the second-to-last ciphertext block of the filename. Due to the * use of the CBC-CTS encryption mode, the second-to-last ciphertext block * depends on the full plaintext. (Note that ciphertext stealing causes the * last two blocks to appear "flipped".) This makes accidental collisions very * unlikely: just a 1 in 2^128 chance for two filenames to collide even if they * share the same filesystem-specific hashes. * * However, this scheme isn't immune to intentional collisions, which can be * created by anyone able to create arbitrary plaintext filenames and view them * without the key. Making the "digest" be a real cryptographic hash like * SHA-256 over the full ciphertext would prevent this, although it would be * less efficient and harder to implement, especially since the filesystem would * need to calculate it for each directory entry examined during a search. */ struct fscrypt_digested_name { u32 hash; u32 minor_hash; u8 digest[FSCRYPT_FNAME_DIGEST_SIZE]; }; /** * fscrypt_match_name() - test whether the given name matches a directory entry * @fname: the name being searched for * @de_name: the name from the directory entry * @de_name_len: the length of @de_name in bytes * * Normally @fname->disk_name will be set, and in that case we simply compare * that to the name stored in the directory entry. The only exception is that * if we don't have the key for an encrypted directory and a filename in it is * very long, then we won't have the full disk_name and we'll instead need to * match against the fscrypt_digested_name. * * Return: %true if the name matches, otherwise %false. */ static inline bool fscrypt_match_name(const struct fscrypt_name *fname, const u8 *de_name, u32 de_name_len) { if (unlikely(!fname->disk_name.name)) { const struct fscrypt_digested_name *n = (const void *)fname->crypto_buf.name; if (WARN_ON_ONCE(fname->usr_fname->name[0] != '_')) return false; if (de_name_len <= FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE) return false; return !memcmp(FSCRYPT_FNAME_DIGEST(de_name, de_name_len), n->digest, FSCRYPT_FNAME_DIGEST_SIZE); } if (de_name_len != fname->disk_name.len) return false; return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len); } /* bio.c */ extern void fscrypt_decrypt_bio(struct bio *); extern void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, struct bio *bio); extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t, unsigned int); /* hooks.c */ extern int fscrypt_file_open(struct inode *inode, struct file *filp); extern int __fscrypt_prepare_link(struct inode *inode, struct inode *dir, struct dentry *dentry); extern int __fscrypt_prepare_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags); extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry, struct fscrypt_name *fname); extern int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len, unsigned int max_len, struct fscrypt_str *disk_link); extern int __fscrypt_encrypt_symlink(struct inode *inode, const char *target, unsigned int len, struct fscrypt_str *disk_link); extern const char *fscrypt_get_symlink(struct inode *inode, const void *caddr, unsigned int max_size, struct delayed_call *done); int fscrypt_symlink_getattr(const struct path *path, struct kstat *stat); static inline void fscrypt_set_ops(struct super_block *sb, const struct fscrypt_operations *s_cop) { sb->s_cop = s_cop; } #else /* !CONFIG_FS_ENCRYPTION */ static inline bool fscrypt_has_encryption_key(const struct inode *inode) { return false; } static inline bool fscrypt_dummy_context_enabled(struct inode *inode) { return false; } static inline void fscrypt_handle_d_move(struct dentry *dentry) { } static inline bool fscrypt_is_nokey_name(const struct dentry *dentry) { return false; } /* crypto.c */ static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work) { } static inline struct fscrypt_ctx *fscrypt_get_ctx(gfp_t gfp_flags) { return ERR_PTR(-EOPNOTSUPP); } static inline void fscrypt_release_ctx(struct fscrypt_ctx *ctx) { return; } static inline struct page *fscrypt_encrypt_pagecache_blocks(struct page *page, unsigned int len, unsigned int offs, gfp_t gfp_flags) { return ERR_PTR(-EOPNOTSUPP); } static inline int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page, unsigned int len, unsigned int offs, u64 lblk_num, gfp_t gfp_flags) { return -EOPNOTSUPP; } static inline int fscrypt_decrypt_pagecache_blocks(struct page *page, unsigned int len, unsigned int offs) { return -EOPNOTSUPP; } static inline int fscrypt_decrypt_block_inplace(const struct inode *inode, struct page *page, unsigned int len, unsigned int offs, u64 lblk_num) { return -EOPNOTSUPP; } static inline bool fscrypt_is_bounce_page(struct page *page) { return false; } static inline struct page *fscrypt_pagecache_page(struct page *bounce_page) { WARN_ON_ONCE(1); return ERR_PTR(-EINVAL); } static inline void fscrypt_free_bounce_page(struct page *bounce_page) { } /* policy.c */ static inline int fscrypt_ioctl_set_policy(struct file *filp, const void __user *arg) { return -EOPNOTSUPP; } static inline int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg) { return -EOPNOTSUPP; } static inline int fscrypt_ioctl_get_policy_ex(struct file *filp, void __user *arg) { return -EOPNOTSUPP; } static inline int fscrypt_has_permitted_context(struct inode *parent, struct inode *child) { return 0; } static inline int fscrypt_inherit_context(struct inode *parent, struct inode *child, void *fs_data, bool preload) { return -EOPNOTSUPP; } /* keyring.c */ static inline void fscrypt_sb_free(struct super_block *sb) { } static inline int fscrypt_ioctl_add_key(struct file *filp, void __user *arg) { return -EOPNOTSUPP; } static inline int fscrypt_ioctl_remove_key(struct file *filp, void __user *arg) { return -EOPNOTSUPP; } static inline int fscrypt_ioctl_remove_key_all_users(struct file *filp, void __user *arg) { return -EOPNOTSUPP; } static inline int fscrypt_ioctl_get_key_status(struct file *filp, void __user *arg) { return -EOPNOTSUPP; } /* keysetup.c */ static inline int fscrypt_get_encryption_info(struct inode *inode) { return -EOPNOTSUPP; } static inline void fscrypt_put_encryption_info(struct inode *inode) { return; } static inline void fscrypt_free_inode(struct inode *inode) { } static inline int fscrypt_drop_inode(struct inode *inode) { return 0; } /* fname.c */ static inline int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname, int lookup, struct fscrypt_name *fname) { if (IS_ENCRYPTED(dir)) return -EOPNOTSUPP; memset(fname, 0, sizeof(*fname)); fname->usr_fname = iname; fname->disk_name.name = (unsigned char *)iname->name; fname->disk_name.len = iname->len; return 0; } static inline void fscrypt_free_filename(struct fscrypt_name *fname) { return; } static inline int fscrypt_fname_alloc_buffer(const struct inode *inode, u32 max_encrypted_len, struct fscrypt_str *crypto_str) { return -EOPNOTSUPP; } static inline void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str) { return; } static inline int fscrypt_fname_disk_to_usr(struct inode *inode, u32 hash, u32 minor_hash, const struct fscrypt_str *iname, struct fscrypt_str *oname) { return -EOPNOTSUPP; } static inline bool fscrypt_match_name(const struct fscrypt_name *fname, const u8 *de_name, u32 de_name_len) { /* Encryption support disabled; use standard comparison */ if (de_name_len != fname->disk_name.len) return false; return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len); } /* bio.c */ static inline void fscrypt_decrypt_bio(struct bio *bio) { } static inline void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, struct bio *bio) { } static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, sector_t pblk, unsigned int len) { return -EOPNOTSUPP; } /* hooks.c */ static inline int fscrypt_file_open(struct inode *inode, struct file *filp) { if (IS_ENCRYPTED(inode)) return -EOPNOTSUPP; return 0; } static inline int __fscrypt_prepare_link(struct inode *inode, struct inode *dir, struct dentry *dentry) { return -EOPNOTSUPP; } static inline int __fscrypt_prepare_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { return -EOPNOTSUPP; } static inline int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry, struct fscrypt_name *fname) { return -EOPNOTSUPP; } static inline int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len, unsigned int max_len, struct fscrypt_str *disk_link) { return -EOPNOTSUPP; } static inline int __fscrypt_encrypt_symlink(struct inode *inode, const char *target, unsigned int len, struct fscrypt_str *disk_link) { return -EOPNOTSUPP; } static inline const char *fscrypt_get_symlink(struct inode *inode, const void *caddr, unsigned int max_size, struct delayed_call *done) { return ERR_PTR(-EOPNOTSUPP); } static inline int fscrypt_symlink_getattr(const struct path *path, struct kstat *stat) { return -EOPNOTSUPP; } static inline void fscrypt_set_ops(struct super_block *sb, const struct fscrypt_operations *s_cop) { } #endif /* !CONFIG_FS_ENCRYPTION */ /** * fscrypt_require_key - require an inode's encryption key * @inode: the inode we need the key for * * If the inode is encrypted, set up its encryption key if not already done. * Then require that the key be present and return -ENOKEY otherwise. * * No locks are needed, and the key will live as long as the struct inode --- so * it won't go away from under you. * * Return: 0 on success, -ENOKEY if the key is missing, or another -errno code * if a problem occurred while setting up the encryption key. */ static inline int fscrypt_require_key(struct inode *inode) { if (IS_ENCRYPTED(inode)) { int err = fscrypt_get_encryption_info(inode); if (err) return err; if (!fscrypt_has_encryption_key(inode)) return -ENOKEY; } return 0; } /** * fscrypt_prepare_link - prepare to link an inode into a possibly-encrypted directory * @old_dentry: an existing dentry for the inode being linked * @dir: the target directory * @dentry: negative dentry for the target filename * * A new link can only be added to an encrypted directory if the directory's * encryption key is available --- since otherwise we'd have no way to encrypt * the filename. Therefore, we first set up the directory's encryption key (if * not already done) and return an error if it's unavailable. * * We also verify that the link will not violate the constraint that all files * in an encrypted directory tree use the same encryption policy. * * Return: 0 on success, -ENOKEY if the directory's encryption key is missing, * -EXDEV if the link would result in an inconsistent encryption policy, or * another -errno code. */ static inline int fscrypt_prepare_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { if (IS_ENCRYPTED(dir)) return __fscrypt_prepare_link(d_inode(old_dentry), dir, dentry); return 0; } /** * fscrypt_prepare_rename - prepare for a rename between possibly-encrypted directories * @old_dir: source directory * @old_dentry: dentry for source file * @new_dir: target directory * @new_dentry: dentry for target location (may be negative unless exchanging) * @flags: rename flags (we care at least about %RENAME_EXCHANGE) * * Prepare for ->rename() where the source and/or target directories may be * encrypted. A new link can only be added to an encrypted directory if the * directory's encryption key is available --- since otherwise we'd have no way * to encrypt the filename. A rename to an existing name, on the other hand, * *is* cryptographically possible without the key. However, we take the more * conservative approach and just forbid all no-key renames. * * We also verify that the rename will not violate the constraint that all files * in an encrypted directory tree use the same encryption policy. * * Return: 0 on success, -ENOKEY if an encryption key is missing, -EXDEV if the * rename would cause inconsistent encryption policies, or another -errno code. */ static inline int fscrypt_prepare_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { if (IS_ENCRYPTED(old_dir) || IS_ENCRYPTED(new_dir)) return __fscrypt_prepare_rename(old_dir, old_dentry, new_dir, new_dentry, flags); return 0; } /** * fscrypt_prepare_lookup - prepare to lookup a name in a possibly-encrypted directory * @dir: directory being searched * @dentry: filename being looked up * @fname: (output) the name to use to search the on-disk directory * * Prepare for ->lookup() in a directory which may be encrypted by determining * the name that will actually be used to search the directory on-disk. Lookups * can be done with or without the directory's encryption key; without the key, * filenames are presented in encrypted form. Therefore, we'll try to set up * the directory's encryption key, but even without it the lookup can continue. * * This also installs a custom ->d_revalidate() method which will invalidate the * dentry if it was created without the key and the key is later added. * * Return: 0 on success; -ENOENT if key is unavailable but the filename isn't a * correctly formed encoded ciphertext name, so a negative dentry should be * created; or another -errno code. */ static inline int fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry, struct fscrypt_name *fname) { if (IS_ENCRYPTED(dir)) return __fscrypt_prepare_lookup(dir, dentry, fname); memset(fname, 0, sizeof(*fname)); fname->usr_fname = &dentry->d_name; fname->disk_name.name = (unsigned char *)dentry->d_name.name; fname->disk_name.len = dentry->d_name.len; return 0; } /** * fscrypt_prepare_setattr - prepare to change a possibly-encrypted inode's attributes * @dentry: dentry through which the inode is being changed * @attr: attributes to change * * Prepare for ->setattr() on a possibly-encrypted inode. On an encrypted file, * most attribute changes are allowed even without the encryption key. However, * without the encryption key we do have to forbid truncates. This is needed * because the size being truncated to may not be a multiple of the filesystem * block size, and in that case we'd have to decrypt the final block, zero the * portion past i_size, and re-encrypt it. (We *could* allow truncating to a * filesystem block boundary, but it's simpler to just forbid all truncates --- * and we already forbid all other contents modifications without the key.) * * Return: 0 on success, -ENOKEY if the key is missing, or another -errno code * if a problem occurred while setting up the encryption key. */ static inline int fscrypt_prepare_setattr(struct dentry *dentry, struct iattr *attr) { if (attr->ia_valid & ATTR_SIZE) return fscrypt_require_key(d_inode(dentry)); return 0; } /** * fscrypt_prepare_symlink - prepare to create a possibly-encrypted symlink * @dir: directory in which the symlink is being created * @target: plaintext symlink target * @len: length of @target excluding null terminator * @max_len: space the filesystem has available to store the symlink target * @disk_link: (out) the on-disk symlink target being prepared * * This function computes the size the symlink target will require on-disk, * stores it in @disk_link->len, and validates it against @max_len. An * encrypted symlink may be longer than the original. * * Additionally, @disk_link->name is set to @target if the symlink will be * unencrypted, but left NULL if the symlink will be encrypted. For encrypted * symlinks, the filesystem must call fscrypt_encrypt_symlink() to create the * on-disk target later. (The reason for the two-step process is that some * filesystems need to know the size of the symlink target before creating the * inode, e.g. to determine whether it will be a "fast" or "slow" symlink.) * * Return: 0 on success, -ENAMETOOLONG if the symlink target is too long, * -ENOKEY if the encryption key is missing, or another -errno code if a problem * occurred while setting up the encryption key. */ static inline int fscrypt_prepare_symlink(struct inode *dir, const char *target, unsigned int len, unsigned int max_len, struct fscrypt_str *disk_link) { if (IS_ENCRYPTED(dir) || fscrypt_dummy_context_enabled(dir)) return __fscrypt_prepare_symlink(dir, len, max_len, disk_link); disk_link->name = (unsigned char *)target; disk_link->len = len + 1; if (disk_link->len > max_len) return -ENAMETOOLONG; return 0; } /** * fscrypt_encrypt_symlink - encrypt the symlink target if needed * @inode: symlink inode * @target: plaintext symlink target * @len: length of @target excluding null terminator * @disk_link: (in/out) the on-disk symlink target being prepared * * If the symlink target needs to be encrypted, then this function encrypts it * into @disk_link->name. fscrypt_prepare_symlink() must have been called * previously to compute @disk_link->len. If the filesystem did not allocate a * buffer for @disk_link->name after calling fscrypt_prepare_link(), then one * will be kmalloc()'ed and the filesystem will be responsible for freeing it. * * Return: 0 on success, -errno on failure */ static inline int fscrypt_encrypt_symlink(struct inode *inode, const char *target, unsigned int len, struct fscrypt_str *disk_link) { if (IS_ENCRYPTED(inode)) return __fscrypt_encrypt_symlink(inode, target, len, disk_link); return 0; } /* If *pagep is a bounce page, free it and set *pagep to the pagecache page */ static inline void fscrypt_finalize_bounce_page(struct page **pagep) { struct page *page = *pagep; if (fscrypt_is_bounce_page(page)) { *pagep = fscrypt_pagecache_page(page); fscrypt_free_bounce_page(page); } } #endif /* _LINUX_FSCRYPT_H */ tpm_eventlog.h 0000644 00000015042 14722070374 0007430 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_TPM_EVENTLOG_H__ #define __LINUX_TPM_EVENTLOG_H__ #include <linux/tpm.h> #define TCG_EVENT_NAME_LEN_MAX 255 #define MAX_TEXT_EVENT 1000 /* Max event string length */ #define ACPI_TCPA_SIG "TCPA" /* 0x41504354 /'TCPA' */ #define EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2 0x1 #define EFI_TCG2_EVENT_LOG_FORMAT_TCG_2 0x2 #ifdef CONFIG_PPC64 #define do_endian_conversion(x) be32_to_cpu(x) #else #define do_endian_conversion(x) x #endif enum bios_platform_class { BIOS_CLIENT = 0x00, BIOS_SERVER = 0x01, }; struct tcpa_event { u32 pcr_index; u32 event_type; u8 pcr_value[20]; /* SHA1 */ u32 event_size; u8 event_data[0]; }; enum tcpa_event_types { PREBOOT = 0, POST_CODE, UNUSED, NO_ACTION, SEPARATOR, ACTION, EVENT_TAG, SCRTM_CONTENTS, SCRTM_VERSION, CPU_MICROCODE, PLATFORM_CONFIG_FLAGS, TABLE_OF_DEVICES, COMPACT_HASH, IPL, IPL_PARTITION_DATA, NONHOST_CODE, NONHOST_CONFIG, NONHOST_INFO, }; struct tcpa_pc_event { u32 event_id; u32 event_size; u8 event_data[0]; }; enum tcpa_pc_event_ids { SMBIOS = 1, BIS_CERT, POST_BIOS_ROM, ESCD, CMOS, NVRAM, OPTION_ROM_EXEC, OPTION_ROM_CONFIG, OPTION_ROM_MICROCODE = 10, S_CRTM_VERSION, S_CRTM_CONTENTS, POST_CONTENTS, HOST_TABLE_OF_DEVICES, }; /* http://www.trustedcomputinggroup.org/tcg-efi-protocol-specification/ */ struct tcg_efi_specid_event_algs { u16 alg_id; u16 digest_size; } __packed; #define TCG_SPECID_SIG "Spec ID Event03" struct tcg_efi_specid_event_head { u8 signature[16]; u32 platform_class; u8 spec_version_minor; u8 spec_version_major; u8 spec_errata; u8 uintnsize; u32 num_algs; struct tcg_efi_specid_event_algs digest_sizes[]; } __packed; struct tcg_pcr_event { u32 pcr_idx; u32 event_type; u8 digest[20]; u32 event_size; u8 event[0]; } __packed; struct tcg_event_field { u32 event_size; u8 event[0]; } __packed; struct tcg_pcr_event2_head { u32 pcr_idx; u32 event_type; u32 count; struct tpm_digest digests[]; } __packed; struct tcg_algorithm_size { u16 algorithm_id; u16 algorithm_size; }; struct tcg_algorithm_info { u8 signature[16]; u32 platform_class; u8 spec_version_minor; u8 spec_version_major; u8 spec_errata; u8 uintn_size; u32 number_of_algorithms; struct tcg_algorithm_size digest_sizes[]; }; #ifndef TPM_MEMREMAP #define TPM_MEMREMAP(start, size) NULL #endif #ifndef TPM_MEMUNMAP #define TPM_MEMUNMAP(start, size) do{} while(0) #endif /** * __calc_tpm2_event_size - calculate the size of a TPM2 event log entry * @event: Pointer to the event whose size should be calculated * @event_header: Pointer to the initial event containing the digest lengths * @do_mapping: Whether or not the event needs to be mapped * * The TPM2 event log format can contain multiple digests corresponding to * separate PCR banks, and also contains a variable length of the data that * was measured. This requires knowledge of how long each digest type is, * and this information is contained within the first event in the log. * * We calculate the length by examining the number of events, and then looking * at each event in turn to determine how much space is used for events in * total. Once we've done this we know the offset of the data length field, * and can calculate the total size of the event. * * Return: size of the event on success, 0 on failure */ static __always_inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event, struct tcg_pcr_event *event_header, bool do_mapping) { struct tcg_efi_specid_event_head *efispecid; struct tcg_event_field *event_field; void *mapping = NULL; int mapping_size; void *marker; void *marker_start; u32 halg_size; size_t size; u16 halg; int i; int j; u32 count, event_type; const u8 zero_digest[sizeof(event_header->digest)] = {0}; marker = event; marker_start = marker; marker = marker + sizeof(event->pcr_idx) + sizeof(event->event_type) + sizeof(event->count); /* Map the event header */ if (do_mapping) { mapping_size = marker - marker_start; mapping = TPM_MEMREMAP((unsigned long)marker_start, mapping_size); if (!mapping) { size = 0; goto out; } } else { mapping = marker_start; } event = (struct tcg_pcr_event2_head *)mapping; /* * The loop below will unmap these fields if the log is larger than * one page, so save them here for reference: */ count = event->count; event_type = event->event_type; /* Verify that it's the log header */ if (event_header->pcr_idx != 0 || event_header->event_type != NO_ACTION || memcmp(event_header->digest, zero_digest, sizeof(zero_digest))) { size = 0; goto out; } efispecid = (struct tcg_efi_specid_event_head *)event_header->event; /* * Perform validation of the event in order to identify malformed * events. This function may be asked to parse arbitrary byte sequences * immediately following a valid event log. The caller expects this * function to recognize that the byte sequence is not a valid event * and to return an event size of 0. */ if (memcmp(efispecid->signature, TCG_SPECID_SIG, sizeof(TCG_SPECID_SIG)) || !efispecid->num_algs || count != efispecid->num_algs) { size = 0; goto out; } for (i = 0; i < count; i++) { halg_size = sizeof(event->digests[i].alg_id); /* Map the digest's algorithm identifier */ if (do_mapping) { TPM_MEMUNMAP(mapping, mapping_size); mapping_size = halg_size; mapping = TPM_MEMREMAP((unsigned long)marker, mapping_size); if (!mapping) { size = 0; goto out; } } else { mapping = marker; } memcpy(&halg, mapping, halg_size); marker = marker + halg_size; for (j = 0; j < efispecid->num_algs; j++) { if (halg == efispecid->digest_sizes[j].alg_id) { marker += efispecid->digest_sizes[j].digest_size; break; } } /* Algorithm without known length. Such event is unparseable. */ if (j == efispecid->num_algs) { size = 0; goto out; } } /* * Map the event size - we don't read from the event itself, so * we don't need to map it */ if (do_mapping) { TPM_MEMUNMAP(mapping, mapping_size); mapping_size += sizeof(event_field->event_size); mapping = TPM_MEMREMAP((unsigned long)marker, mapping_size); if (!mapping) { size = 0; goto out; } } else { mapping = marker; } event_field = (struct tcg_event_field *)mapping; marker = marker + sizeof(event_field->event_size) + event_field->event_size; size = marker - marker_start; if (event_type == 0 && event_field->event_size == 0) size = 0; out: if (do_mapping) TPM_MEMUNMAP(mapping, mapping_size); return size; } #endif apm-emulation.h 0000644 00000003077 14722070374 0007502 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* -*- linux-c -*- * * (C) 2003 zecke@handhelds.org * * based on arch/arm/kernel/apm.c * factor out the information needed by architectures to provide * apm status */ #ifndef __LINUX_APM_EMULATION_H #define __LINUX_APM_EMULATION_H #include <linux/apm_bios.h> /* * This structure gets filled in by the machine specific 'get_power_status' * implementation. Any fields which are not set default to a safe value. */ struct apm_power_info { unsigned char ac_line_status; #define APM_AC_OFFLINE 0 #define APM_AC_ONLINE 1 #define APM_AC_BACKUP 2 #define APM_AC_UNKNOWN 0xff unsigned char battery_status; #define APM_BATTERY_STATUS_HIGH 0 #define APM_BATTERY_STATUS_LOW 1 #define APM_BATTERY_STATUS_CRITICAL 2 #define APM_BATTERY_STATUS_CHARGING 3 #define APM_BATTERY_STATUS_NOT_PRESENT 4 #define APM_BATTERY_STATUS_UNKNOWN 0xff unsigned char battery_flag; #define APM_BATTERY_FLAG_HIGH (1 << 0) #define APM_BATTERY_FLAG_LOW (1 << 1) #define APM_BATTERY_FLAG_CRITICAL (1 << 2) #define APM_BATTERY_FLAG_CHARGING (1 << 3) #define APM_BATTERY_FLAG_NOT_PRESENT (1 << 7) #define APM_BATTERY_FLAG_UNKNOWN 0xff int battery_life; int time; int units; #define APM_UNITS_MINS 0 #define APM_UNITS_SECS 1 #define APM_UNITS_UNKNOWN -1 }; /* * This allows machines to provide their own "apm get power status" function. */ extern void (*apm_get_power_status)(struct apm_power_info *); /* * Queue an event (APM_SYS_SUSPEND or APM_CRITICAL_SUSPEND) */ void apm_queue_event(apm_event_t event); #endif /* __LINUX_APM_EMULATION_H */ of_gpio.h 0000644 00000007725 14722070374 0006360 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0+ */ /* * OF helpers for the GPIO API * * Copyright (c) 2007-2008 MontaVista Software, Inc. * * Author: Anton Vorontsov <avorontsov@ru.mvista.com> */ #ifndef __LINUX_OF_GPIO_H #define __LINUX_OF_GPIO_H #include <linux/compiler.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/gpio.h> #include <linux/of.h> struct device_node; /* * This is Linux-specific flags. By default controllers' and Linux' mapping * match, but GPIO controllers are free to translate their own flags to * Linux-specific in their .xlate callback. Though, 1:1 mapping is recommended. */ enum of_gpio_flags { OF_GPIO_ACTIVE_LOW = 0x1, OF_GPIO_SINGLE_ENDED = 0x2, OF_GPIO_OPEN_DRAIN = 0x4, OF_GPIO_TRANSITORY = 0x8, OF_GPIO_PULL_UP = 0x10, OF_GPIO_PULL_DOWN = 0x20, }; #ifdef CONFIG_OF_GPIO /* * OF GPIO chip for memory mapped banks */ struct of_mm_gpio_chip { struct gpio_chip gc; void (*save_regs)(struct of_mm_gpio_chip *mm_gc); void __iomem *regs; }; static inline struct of_mm_gpio_chip *to_of_mm_gpio_chip(struct gpio_chip *gc) { return container_of(gc, struct of_mm_gpio_chip, gc); } extern int of_get_named_gpio_flags(struct device_node *np, const char *list_name, int index, enum of_gpio_flags *flags); extern int of_mm_gpiochip_add_data(struct device_node *np, struct of_mm_gpio_chip *mm_gc, void *data); static inline int of_mm_gpiochip_add(struct device_node *np, struct of_mm_gpio_chip *mm_gc) { return of_mm_gpiochip_add_data(np, mm_gc, NULL); } extern void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc); #else /* CONFIG_OF_GPIO */ /* Drivers may not strictly depend on the GPIO support, so let them link. */ static inline int of_get_named_gpio_flags(struct device_node *np, const char *list_name, int index, enum of_gpio_flags *flags) { if (flags) *flags = 0; return -ENOSYS; } #endif /* CONFIG_OF_GPIO */ /** * of_gpio_named_count() - Count GPIOs for a device * @np: device node to count GPIOs for * @propname: property name containing gpio specifier(s) * * The function returns the count of GPIOs specified for a node. * Note that the empty GPIO specifiers count too. Returns either * Number of gpios defined in property, * -EINVAL for an incorrectly formed gpios property, or * -ENOENT for a missing gpios property * * Example: * gpios = <0 * &gpio1 1 2 * 0 * &gpio2 3 4>; * * The above example defines four GPIOs, two of which are not specified. * This function will return '4' */ static inline int of_gpio_named_count(struct device_node *np, const char* propname) { return of_count_phandle_with_args(np, propname, "#gpio-cells"); } /** * of_gpio_count() - Count GPIOs for a device * @np: device node to count GPIOs for * * Same as of_gpio_named_count, but hard coded to use the 'gpios' property */ static inline int of_gpio_count(struct device_node *np) { return of_gpio_named_count(np, "gpios"); } static inline int of_get_gpio_flags(struct device_node *np, int index, enum of_gpio_flags *flags) { return of_get_named_gpio_flags(np, "gpios", index, flags); } /** * of_get_named_gpio() - Get a GPIO number to use with GPIO API * @np: device node to get GPIO from * @propname: Name of property containing gpio specifier(s) * @index: index of the GPIO * * Returns GPIO number to use with Linux generic GPIO API, or one of the errno * value on the error condition. */ static inline int of_get_named_gpio(struct device_node *np, const char *propname, int index) { return of_get_named_gpio_flags(np, propname, index, NULL); } /** * of_get_gpio() - Get a GPIO number to use with GPIO API * @np: device node to get GPIO from * @index: index of the GPIO * * Returns GPIO number to use with Linux generic GPIO API, or one of the errno * value on the error condition. */ static inline int of_get_gpio(struct device_node *np, int index) { return of_get_gpio_flags(np, index, NULL); } #endif /* __LINUX_OF_GPIO_H */ attribute_container.h 0000644 00000004742 14722070374 0010777 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * attribute_container.h - a generic container for all classes * * Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com> */ #ifndef _ATTRIBUTE_CONTAINER_H_ #define _ATTRIBUTE_CONTAINER_H_ #include <linux/list.h> #include <linux/klist.h> struct device; struct attribute_container { struct list_head node; struct klist containers; struct class *class; const struct attribute_group *grp; struct device_attribute **attrs; int (*match)(struct attribute_container *, struct device *); #define ATTRIBUTE_CONTAINER_NO_CLASSDEVS 0x01 unsigned long flags; }; static inline int attribute_container_no_classdevs(struct attribute_container *atc) { return atc->flags & ATTRIBUTE_CONTAINER_NO_CLASSDEVS; } static inline void attribute_container_set_no_classdevs(struct attribute_container *atc) { atc->flags |= ATTRIBUTE_CONTAINER_NO_CLASSDEVS; } int attribute_container_register(struct attribute_container *cont); int __must_check attribute_container_unregister(struct attribute_container *cont); void attribute_container_create_device(struct device *dev, int (*fn)(struct attribute_container *, struct device *, struct device *)); void attribute_container_add_device(struct device *dev, int (*fn)(struct attribute_container *, struct device *, struct device *)); void attribute_container_remove_device(struct device *dev, void (*fn)(struct attribute_container *, struct device *, struct device *)); void attribute_container_device_trigger(struct device *dev, int (*fn)(struct attribute_container *, struct device *, struct device *)); void attribute_container_trigger(struct device *dev, int (*fn)(struct attribute_container *, struct device *)); int attribute_container_add_attrs(struct device *classdev); int attribute_container_add_class_device(struct device *classdev); int attribute_container_add_class_device_adapter(struct attribute_container *cont, struct device *dev, struct device *classdev); void attribute_container_remove_attrs(struct device *classdev); void attribute_container_class_device_del(struct device *classdev); struct attribute_container *attribute_container_classdev_to_container(struct device *); struct device *attribute_container_find_class_device(struct attribute_container *, struct device *); struct device_attribute **attribute_container_classdev_to_attrs(const struct device *classdev); #endif stringify.h 0000644 00000000525 14722070374 0006743 0 ustar 00 #ifndef __LINUX_STRINGIFY_H #define __LINUX_STRINGIFY_H /* Indirect stringification. Doing two levels allows the parameter to be a * macro itself. For example, compile with -DFOO=bar, __stringify(FOO) * converts to "bar". */ #define __stringify_1(x...) #x #define __stringify(x...) __stringify_1(x) #endif /* !__LINUX_STRINGIFY_H */ bma150.h 0000644 00000002406 14722070374 0005712 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) 2011 Bosch Sensortec GmbH * Copyright (c) 2011 Unixphere */ #ifndef _BMA150_H_ #define _BMA150_H_ #define BMA150_DRIVER "bma150" #define BMA150_RANGE_2G 0 #define BMA150_RANGE_4G 1 #define BMA150_RANGE_8G 2 #define BMA150_BW_25HZ 0 #define BMA150_BW_50HZ 1 #define BMA150_BW_100HZ 2 #define BMA150_BW_190HZ 3 #define BMA150_BW_375HZ 4 #define BMA150_BW_750HZ 5 #define BMA150_BW_1500HZ 6 struct bma150_cfg { bool any_motion_int; /* Set to enable any-motion interrupt */ bool hg_int; /* Set to enable high-G interrupt */ bool lg_int; /* Set to enable low-G interrupt */ unsigned char any_motion_dur; /* Any-motion duration */ unsigned char any_motion_thres; /* Any-motion threshold */ unsigned char hg_hyst; /* High-G hysterisis */ unsigned char hg_dur; /* High-G duration */ unsigned char hg_thres; /* High-G threshold */ unsigned char lg_hyst; /* Low-G hysterisis */ unsigned char lg_dur; /* Low-G duration */ unsigned char lg_thres; /* Low-G threshold */ unsigned char range; /* one of BMA0150_RANGE_xxx */ unsigned char bandwidth; /* one of BMA0150_BW_xxx */ }; struct bma150_platform_data { struct bma150_cfg cfg; int (*irq_gpio_cfg)(void); }; #endif /* _BMA150_H_ */ rcutiny.h 0000644 00000005127 14722070374 0006425 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0+ */ /* * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. * * Copyright IBM Corporation, 2008 * * Author: Paul E. McKenney <paulmck@linux.ibm.com> * * For detailed explanation of Read-Copy Update mechanism see - * Documentation/RCU */ #ifndef __LINUX_TINY_H #define __LINUX_TINY_H #include <asm/param.h> /* for HZ */ /* Never flag non-existent other CPUs! */ static inline bool rcu_eqs_special_set(int cpu) { return false; } static inline unsigned long get_state_synchronize_rcu(void) { return 0; } static inline void cond_synchronize_rcu(unsigned long oldstate) { might_sleep(); } extern void rcu_barrier(void); static inline void synchronize_rcu_expedited(void) { synchronize_rcu(); } static inline void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func) { call_rcu(head, func); } void rcu_qs(void); static inline void rcu_softirq_qs(void) { rcu_qs(); } #define rcu_note_context_switch(preempt) \ do { \ rcu_qs(); \ rcu_tasks_qs(current); \ } while (0) static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt) { *nextevt = KTIME_MAX; return 0; } /* * Take advantage of the fact that there is only one CPU, which * allows us to ignore virtualization-based context switches. */ static inline void rcu_virt_note_context_switch(int cpu) { } static inline void rcu_cpu_stall_reset(void) { } static inline int rcu_jiffies_till_stall_check(void) { return 21 * HZ; } static inline void rcu_idle_enter(void) { } static inline void rcu_idle_exit(void) { } static inline void rcu_irq_enter(void) { } static inline void rcu_irq_exit_irqson(void) { } static inline void rcu_irq_enter_irqson(void) { } static inline void rcu_irq_exit(void) { } static inline void exit_rcu(void) { } static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t) { return false; } static inline void rcu_preempt_deferred_qs(struct task_struct *t) { } #ifdef CONFIG_SRCU void rcu_scheduler_starting(void); #else /* #ifndef CONFIG_SRCU */ static inline void rcu_scheduler_starting(void) { } #endif /* #else #ifndef CONFIG_SRCU */ static inline void rcu_end_inkernel_boot(void) { } static inline bool rcu_is_watching(void) { return true; } /* Avoid RCU read-side critical sections leaking across. */ static inline void rcu_all_qs(void) { barrier(); } /* RCUtree hotplug events */ #define rcutree_prepare_cpu NULL #define rcutree_online_cpu NULL #define rcutree_offline_cpu NULL #define rcutree_dead_cpu NULL #define rcutree_dying_cpu NULL static inline void rcu_cpu_starting(unsigned int cpu) { } #endif /* __LINUX_RCUTINY_H */ if_tunnel.h 0000644 00000000631 14722070374 0006706 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _IF_TUNNEL_H_ #define _IF_TUNNEL_H_ #include <linux/ip.h> #include <linux/in6.h> #include <uapi/linux/if_tunnel.h> #include <linux/u64_stats_sync.h> /* * Locking : hash tables are protected by RCU and RTNL */ #define for_each_ip_tunnel_rcu(pos, start) \ for (pos = rcu_dereference(start); pos; pos = rcu_dereference(pos->next)) #endif /* _IF_TUNNEL_H_ */ list_bl.h 0000644 00000011451 14722070374 0006355 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_LIST_BL_H #define _LINUX_LIST_BL_H #include <linux/list.h> #include <linux/bit_spinlock.h> /* * Special version of lists, where head of the list has a lock in the lowest * bit. This is useful for scalable hash tables without increasing memory * footprint overhead. * * For modification operations, the 0 bit of hlist_bl_head->first * pointer must be set. * * With some small modifications, this can easily be adapted to store several * arbitrary bits (not just a single lock bit), if the need arises to store * some fast and compact auxiliary data. */ #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) #define LIST_BL_LOCKMASK 1UL #else #define LIST_BL_LOCKMASK 0UL #endif #ifdef CONFIG_DEBUG_LIST #define LIST_BL_BUG_ON(x) BUG_ON(x) #else #define LIST_BL_BUG_ON(x) #endif struct hlist_bl_head { struct hlist_bl_node *first; }; struct hlist_bl_node { struct hlist_bl_node *next, **pprev; }; #define INIT_HLIST_BL_HEAD(ptr) \ ((ptr)->first = NULL) static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h) { h->next = NULL; h->pprev = NULL; } #define hlist_bl_entry(ptr, type, member) container_of(ptr,type,member) static inline bool hlist_bl_unhashed(const struct hlist_bl_node *h) { return !h->pprev; } static inline struct hlist_bl_node *hlist_bl_first(struct hlist_bl_head *h) { return (struct hlist_bl_node *) ((unsigned long)h->first & ~LIST_BL_LOCKMASK); } static inline void hlist_bl_set_first(struct hlist_bl_head *h, struct hlist_bl_node *n) { LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK); LIST_BL_BUG_ON(((unsigned long)h->first & LIST_BL_LOCKMASK) != LIST_BL_LOCKMASK); h->first = (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK); } static inline bool hlist_bl_empty(const struct hlist_bl_head *h) { return !((unsigned long)READ_ONCE(h->first) & ~LIST_BL_LOCKMASK); } static inline void hlist_bl_add_head(struct hlist_bl_node *n, struct hlist_bl_head *h) { struct hlist_bl_node *first = hlist_bl_first(h); n->next = first; if (first) first->pprev = &n->next; n->pprev = &h->first; hlist_bl_set_first(h, n); } static inline void hlist_bl_add_before(struct hlist_bl_node *n, struct hlist_bl_node *next) { struct hlist_bl_node **pprev = next->pprev; n->pprev = pprev; n->next = next; next->pprev = &n->next; /* pprev may be `first`, so be careful not to lose the lock bit */ WRITE_ONCE(*pprev, (struct hlist_bl_node *) ((uintptr_t)n | ((uintptr_t)*pprev & LIST_BL_LOCKMASK))); } static inline void hlist_bl_add_behind(struct hlist_bl_node *n, struct hlist_bl_node *prev) { n->next = prev->next; n->pprev = &prev->next; prev->next = n; if (n->next) n->next->pprev = &n->next; } static inline void __hlist_bl_del(struct hlist_bl_node *n) { struct hlist_bl_node *next = n->next; struct hlist_bl_node **pprev = n->pprev; LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK); /* pprev may be `first`, so be careful not to lose the lock bit */ WRITE_ONCE(*pprev, (struct hlist_bl_node *) ((unsigned long)next | ((unsigned long)*pprev & LIST_BL_LOCKMASK))); if (next) next->pprev = pprev; } static inline void hlist_bl_del(struct hlist_bl_node *n) { __hlist_bl_del(n); n->next = LIST_POISON1; n->pprev = LIST_POISON2; } static inline void hlist_bl_del_init(struct hlist_bl_node *n) { if (!hlist_bl_unhashed(n)) { __hlist_bl_del(n); INIT_HLIST_BL_NODE(n); } } static inline void hlist_bl_lock(struct hlist_bl_head *b) { bit_spin_lock(0, (unsigned long *)b); } static inline void hlist_bl_unlock(struct hlist_bl_head *b) { __bit_spin_unlock(0, (unsigned long *)b); } static inline bool hlist_bl_is_locked(struct hlist_bl_head *b) { return bit_spin_is_locked(0, (unsigned long *)b); } /** * hlist_bl_for_each_entry - iterate over list of given type * @tpos: the type * to use as a loop cursor. * @pos: the &struct hlist_node to use as a loop cursor. * @head: the head for your list. * @member: the name of the hlist_node within the struct. * */ #define hlist_bl_for_each_entry(tpos, pos, head, member) \ for (pos = hlist_bl_first(head); \ pos && \ ({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1;}); \ pos = pos->next) /** * hlist_bl_for_each_entry_safe - iterate over list of given type safe against removal of list entry * @tpos: the type * to use as a loop cursor. * @pos: the &struct hlist_node to use as a loop cursor. * @n: another &struct hlist_node to use as temporary storage * @head: the head for your list. * @member: the name of the hlist_node within the struct. */ #define hlist_bl_for_each_entry_safe(tpos, pos, n, head, member) \ for (pos = hlist_bl_first(head); \ pos && ({ n = pos->next; 1; }) && \ ({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1;}); \ pos = n) #endif davinci_emac.h 0000644 00000002070 14722070374 0007324 0 ustar 00 /* * TI DaVinci EMAC platform support * * Author: Kevin Hilman, Deep Root Systems, LLC * * 2007 (c) Deep Root Systems, LLC. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. */ #ifndef _LINUX_DAVINCI_EMAC_H #define _LINUX_DAVINCI_EMAC_H #include <linux/if_ether.h> #include <linux/nvmem-consumer.h> struct mdio_platform_data { unsigned long bus_freq; }; struct emac_platform_data { char mac_addr[ETH_ALEN]; u32 ctrl_reg_offset; u32 ctrl_mod_reg_offset; u32 ctrl_ram_offset; u32 hw_ram_addr; u32 ctrl_ram_size; /* * phy_id can be one of the following: * - NULL : use the first phy on the bus, * - "" : force to 100/full, no mdio control * - "<bus>:<addr>" : use the specified bus and phy */ const char *phy_id; u8 rmii_en; u8 version; bool no_bd_ram; void (*interrupt_enable) (void); void (*interrupt_disable) (void); }; enum { EMAC_VERSION_1, /* DM644x */ EMAC_VERSION_2, /* DM646x */ }; #endif crc16.h 0000644 00000001034 14722070374 0005637 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * crc16.h - CRC-16 routine * * Implements the standard CRC-16: * Width 16 * Poly 0x8005 (x^16 + x^15 + x^2 + 1) * Init 0 * * Copyright (c) 2005 Ben Gardner <bgardner@wabtec.com> */ #ifndef __CRC16_H #define __CRC16_H #include <linux/types.h> extern u16 const crc16_table[256]; extern u16 crc16(u16 crc, const u8 *buffer, size_t len); static inline u16 crc16_byte(u16 crc, const u8 data) { return (crc >> 8) ^ crc16_table[(crc ^ data) & 0xff]; } #endif /* __CRC16_H */ futex.h 0000644 00000004701 14722070374 0006060 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_FUTEX_H #define _LINUX_FUTEX_H #include <linux/sched.h> #include <linux/ktime.h> #include <uapi/linux/futex.h> struct inode; struct mm_struct; struct task_struct; /* * Futexes are matched on equal values of this key. * The key type depends on whether it's a shared or private mapping. * Don't rearrange members without looking at hash_futex(). * * offset is aligned to a multiple of sizeof(u32) (== 4) by definition. * We use the two low order bits of offset to tell what is the kind of key : * 00 : Private process futex (PTHREAD_PROCESS_PRIVATE) * (no reference on an inode or mm) * 01 : Shared futex (PTHREAD_PROCESS_SHARED) * mapped on a file (reference on the underlying inode) * 10 : Shared futex (PTHREAD_PROCESS_SHARED) * (but private mapping on an mm, and reference taken on it) */ #define FUT_OFF_INODE 1 /* We set bit 0 if key has a reference on inode */ #define FUT_OFF_MMSHARED 2 /* We set bit 1 if key has a reference on mm */ union futex_key { struct { u64 i_seq; unsigned long pgoff; unsigned int offset; } shared; struct { union { struct mm_struct *mm; u64 __tmp; }; unsigned long address; unsigned int offset; } private; struct { u64 ptr; unsigned long word; unsigned int offset; } both; }; #define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = 0ULL } } #ifdef CONFIG_FUTEX enum { FUTEX_STATE_OK, FUTEX_STATE_EXITING, FUTEX_STATE_DEAD, }; static inline void futex_init_task(struct task_struct *tsk) { tsk->robust_list = NULL; #ifdef CONFIG_COMPAT tsk->compat_robust_list = NULL; #endif INIT_LIST_HEAD(&tsk->pi_state_list); tsk->pi_state_cache = NULL; tsk->futex_state = FUTEX_STATE_OK; mutex_init(&tsk->futex_exit_mutex); } void futex_exit_recursive(struct task_struct *tsk); void futex_exit_release(struct task_struct *tsk); void futex_exec_release(struct task_struct *tsk); long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, u32 __user *uaddr2, u32 val2, u32 val3); #else static inline void futex_init_task(struct task_struct *tsk) { } static inline void futex_exit_recursive(struct task_struct *tsk) { } static inline void futex_exit_release(struct task_struct *tsk) { } static inline void futex_exec_release(struct task_struct *tsk) { } static inline long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, u32 __user *uaddr2, u32 val2, u32 val3) { return -EINVAL; } #endif #endif irq_work.h 0000644 00000002535 14722070374 0006565 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_IRQ_WORK_H #define _LINUX_IRQ_WORK_H #include <linux/llist.h> /* * An entry can be in one of four states: * * free NULL, 0 -> {claimed} : free to be used * claimed NULL, 3 -> {pending} : claimed to be enqueued * pending next, 3 -> {busy} : queued, pending callback * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed */ #define IRQ_WORK_PENDING BIT(0) #define IRQ_WORK_BUSY BIT(1) /* Doesn't want IPI, wait for tick: */ #define IRQ_WORK_LAZY BIT(2) #define IRQ_WORK_CLAIMED (IRQ_WORK_PENDING | IRQ_WORK_BUSY) struct irq_work { unsigned long flags; struct llist_node llnode; void (*func)(struct irq_work *); }; static inline void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *)) { work->flags = 0; work->func = func; } #define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { .func = (_f), } bool irq_work_queue(struct irq_work *work); bool irq_work_queue_on(struct irq_work *work, int cpu); void irq_work_tick(void); void irq_work_sync(struct irq_work *work); #ifdef CONFIG_IRQ_WORK #include <asm/irq_work.h> void irq_work_run(void); bool irq_work_needs_cpu(void); #else static inline bool irq_work_needs_cpu(void) { return false; } static inline void irq_work_run(void) { } #endif #endif /* _LINUX_IRQ_WORK_H */ restart_block.h 0000644 00000002130 14722070374 0007555 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Common syscall restarting data */ #ifndef __LINUX_RESTART_BLOCK_H #define __LINUX_RESTART_BLOCK_H #include <linux/compiler.h> #include <linux/types.h> #include <linux/time64.h> struct timespec; struct old_timespec32; struct pollfd; enum timespec_type { TT_NONE = 0, TT_NATIVE = 1, TT_COMPAT = 2, }; /* * System call restart block. */ struct restart_block { long (*fn)(struct restart_block *); union { /* For futex_wait and futex_wait_requeue_pi */ struct { u32 __user *uaddr; u32 val; u32 flags; u32 bitset; u64 time; u32 __user *uaddr2; } futex; /* For nanosleep */ struct { clockid_t clockid; enum timespec_type type; union { struct __kernel_timespec __user *rmtp; struct old_timespec32 __user *compat_rmtp; }; u64 expires; } nanosleep; /* For poll */ struct { struct pollfd __user *ufds; int nfds; int has_timeout; unsigned long tv_sec; unsigned long tv_nsec; } poll; }; }; extern long do_no_restart_syscall(struct restart_block *parm); #endif /* __LINUX_RESTART_BLOCK_H */ irqhandler.h 0000644 00000000552 14722070374 0007056 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_IRQHANDLER_H #define _LINUX_IRQHANDLER_H /* * Interrupt flow handler typedefs are defined here to avoid circular * include dependencies. */ struct irq_desc; struct irq_data; typedef void (*irq_flow_handler_t)(struct irq_desc *desc); typedef void (*irq_preflow_handler_t)(struct irq_data *data); #endif in6.h 0000644 00000003207 14722070374 0005421 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Types and definitions for AF_INET6 * Linux INET6 implementation * * Authors: * Pedro Roque <roque@di.fc.ul.pt> * * Sources: * IPv6 Program Interfaces for BSD Systems * <draft-ietf-ipngwg-bsd-api-05.txt> * * Advanced Sockets API for IPv6 * <draft-stevens-advanced-api-00.txt> */ #ifndef _LINUX_IN6_H #define _LINUX_IN6_H #include <uapi/linux/in6.h> /* IPv6 Wildcard Address (::) and Loopback Address (::1) defined in RFC2553 * NOTE: Be aware the IN6ADDR_* constants and in6addr_* externals are defined * in network byte order, not in host byte order as are the IPv4 equivalents */ extern const struct in6_addr in6addr_any; #define IN6ADDR_ANY_INIT { { { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 } } } extern const struct in6_addr in6addr_loopback; #define IN6ADDR_LOOPBACK_INIT { { { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 } } } extern const struct in6_addr in6addr_linklocal_allnodes; #define IN6ADDR_LINKLOCAL_ALLNODES_INIT \ { { { 0xff,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1 } } } extern const struct in6_addr in6addr_linklocal_allrouters; #define IN6ADDR_LINKLOCAL_ALLROUTERS_INIT \ { { { 0xff,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2 } } } extern const struct in6_addr in6addr_interfacelocal_allnodes; #define IN6ADDR_INTERFACELOCAL_ALLNODES_INIT \ { { { 0xff,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1 } } } extern const struct in6_addr in6addr_interfacelocal_allrouters; #define IN6ADDR_INTERFACELOCAL_ALLROUTERS_INIT \ { { { 0xff,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2 } } } extern const struct in6_addr in6addr_sitelocal_allrouters; #define IN6ADDR_SITELOCAL_ALLROUTERS_INIT \ { { { 0xff,5,0,0,0,0,0,0,0,0,0,0,0,0,0,2 } } } #endif enclosure.h 0000644 00000010021 14722070374 0006714 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Enclosure Services * * Copyright (C) 2008 James Bottomley <James.Bottomley@HansenPartnership.com> * **----------------------------------------------------------------------------- ** ** **----------------------------------------------------------------------------- */ #ifndef _LINUX_ENCLOSURE_H_ #define _LINUX_ENCLOSURE_H_ #include <linux/device.h> #include <linux/list.h> /* A few generic types ... taken from ses-2 */ enum enclosure_component_type { ENCLOSURE_COMPONENT_DEVICE = 0x01, ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS = 0x07, ENCLOSURE_COMPONENT_SCSI_TARGET_PORT = 0x14, ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT = 0x15, ENCLOSURE_COMPONENT_ARRAY_DEVICE = 0x17, ENCLOSURE_COMPONENT_SAS_EXPANDER = 0x18, }; /* ses-2 common element status */ enum enclosure_status { ENCLOSURE_STATUS_UNSUPPORTED = 0, ENCLOSURE_STATUS_OK, ENCLOSURE_STATUS_CRITICAL, ENCLOSURE_STATUS_NON_CRITICAL, ENCLOSURE_STATUS_UNRECOVERABLE, ENCLOSURE_STATUS_NOT_INSTALLED, ENCLOSURE_STATUS_UNKNOWN, ENCLOSURE_STATUS_UNAVAILABLE, /* last element for counting purposes */ ENCLOSURE_STATUS_MAX }; /* SFF-8485 activity light settings */ enum enclosure_component_setting { ENCLOSURE_SETTING_DISABLED = 0, ENCLOSURE_SETTING_ENABLED = 1, ENCLOSURE_SETTING_BLINK_A_ON_OFF = 2, ENCLOSURE_SETTING_BLINK_A_OFF_ON = 3, ENCLOSURE_SETTING_BLINK_B_ON_OFF = 6, ENCLOSURE_SETTING_BLINK_B_OFF_ON = 7, }; struct enclosure_device; struct enclosure_component; struct enclosure_component_callbacks { void (*get_status)(struct enclosure_device *, struct enclosure_component *); int (*set_status)(struct enclosure_device *, struct enclosure_component *, enum enclosure_status); void (*get_fault)(struct enclosure_device *, struct enclosure_component *); int (*set_fault)(struct enclosure_device *, struct enclosure_component *, enum enclosure_component_setting); void (*get_active)(struct enclosure_device *, struct enclosure_component *); int (*set_active)(struct enclosure_device *, struct enclosure_component *, enum enclosure_component_setting); void (*get_locate)(struct enclosure_device *, struct enclosure_component *); int (*set_locate)(struct enclosure_device *, struct enclosure_component *, enum enclosure_component_setting); void (*get_power_status)(struct enclosure_device *, struct enclosure_component *); int (*set_power_status)(struct enclosure_device *, struct enclosure_component *, int); int (*show_id)(struct enclosure_device *, char *buf); }; struct enclosure_component { void *scratch; struct device cdev; struct device *dev; enum enclosure_component_type type; int number; int fault; int active; int locate; int slot; enum enclosure_status status; int power_status; }; struct enclosure_device { void *scratch; struct list_head node; struct device edev; struct enclosure_component_callbacks *cb; int components; struct enclosure_component component[0]; }; static inline struct enclosure_device * to_enclosure_device(struct device *dev) { return container_of(dev, struct enclosure_device, edev); } static inline struct enclosure_component * to_enclosure_component(struct device *dev) { return container_of(dev, struct enclosure_component, cdev); } struct enclosure_device * enclosure_register(struct device *, const char *, int, struct enclosure_component_callbacks *); void enclosure_unregister(struct enclosure_device *); struct enclosure_component * enclosure_component_alloc(struct enclosure_device *, unsigned int, enum enclosure_component_type, const char *); int enclosure_component_register(struct enclosure_component *); int enclosure_add_device(struct enclosure_device *enclosure, int component, struct device *dev); int enclosure_remove_device(struct enclosure_device *, struct device *); struct enclosure_device *enclosure_find(struct device *dev, struct enclosure_device *start); int enclosure_for_each_device(int (*fn)(struct enclosure_device *, void *), void *data); #endif /* _LINUX_ENCLOSURE_H_ */ max17040_battery.h 0000644 00000000510 14722070374 0007632 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2009 Samsung Electronics * Minkyu Kang <mk7.kang@samsung.com> */ #ifndef __MAX17040_BATTERY_H_ #define __MAX17040_BATTERY_H_ struct max17040_platform_data { int (*battery_online)(void); int (*charger_online)(void); int (*charger_enable)(void); }; #endif slimbus.h 0000644 00000015670 14722070374 0006412 0 ustar 00 // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2011-2017, The Linux Foundation */ #ifndef _LINUX_SLIMBUS_H #define _LINUX_SLIMBUS_H #include <linux/device.h> #include <linux/module.h> #include <linux/completion.h> #include <linux/mod_devicetable.h> extern struct bus_type slimbus_bus; /** * struct slim_eaddr - Enumeration address for a SLIMbus device * @instance: Instance value * @dev_index: Device index * @prod_code: Product code * @manf_id: Manufacturer Id for the device */ struct slim_eaddr { u8 instance; u8 dev_index; u16 prod_code; u16 manf_id; } __packed; /** * enum slim_device_status - slim device status * @SLIM_DEVICE_STATUS_DOWN: Slim device is absent or not reported yet. * @SLIM_DEVICE_STATUS_UP: Slim device is announced on the bus. * @SLIM_DEVICE_STATUS_RESERVED: Reserved for future use. */ enum slim_device_status { SLIM_DEVICE_STATUS_DOWN = 0, SLIM_DEVICE_STATUS_UP, SLIM_DEVICE_STATUS_RESERVED, }; struct slim_controller; /** * struct slim_device - Slim device handle. * @dev: Driver model representation of the device. * @e_addr: Enumeration address of this device. * @status: slim device status * @ctrl: slim controller instance. * @laddr: 1-byte Logical address of this device. * @is_laddr_valid: indicates if the laddr is valid or not * @stream_list: List of streams on this device * @stream_list_lock: lock to protect the stream list * * This is the client/device handle returned when a SLIMbus * device is registered with a controller. * Pointer to this structure is used by client-driver as a handle. */ struct slim_device { struct device dev; struct slim_eaddr e_addr; struct slim_controller *ctrl; enum slim_device_status status; u8 laddr; bool is_laddr_valid; struct list_head stream_list; spinlock_t stream_list_lock; }; #define to_slim_device(d) container_of(d, struct slim_device, dev) /** * struct slim_driver - SLIMbus 'generic device' (slave) device driver * (similar to 'spi_device' on SPI) * @probe: Binds this driver to a SLIMbus device. * @remove: Unbinds this driver from the SLIMbus device. * @shutdown: Standard shutdown callback used during powerdown/halt. * @device_status: This callback is called when * - The device reports present and gets a laddr assigned * - The device reports absent, or the bus goes down. * @driver: SLIMbus device drivers should initialize name and owner field of * this structure * @id_table: List of SLIMbus devices supported by this driver */ struct slim_driver { int (*probe)(struct slim_device *sl); void (*remove)(struct slim_device *sl); void (*shutdown)(struct slim_device *sl); int (*device_status)(struct slim_device *sl, enum slim_device_status s); struct device_driver driver; const struct slim_device_id *id_table; }; #define to_slim_driver(d) container_of(d, struct slim_driver, driver) /** * struct slim_val_inf - Slimbus value or information element * @start_offset: Specifies starting offset in information/value element map * @rbuf: buffer to read the values * @wbuf: buffer to write * @num_bytes: upto 16. This ensures that the message will fit the slicesize * per SLIMbus spec * @comp: completion for asynchronous operations, valid only if TID is * required for transaction, like REQUEST operations. * Rest of the transactions are synchronous anyway. */ struct slim_val_inf { u16 start_offset; u8 num_bytes; u8 *rbuf; const u8 *wbuf; struct completion *comp; }; #define SLIM_DEVICE_MAX_CHANNELS 256 /* A SLIMBus Device may have frmo 0 to 31 Ports (inclusive) */ #define SLIM_DEVICE_MAX_PORTS 32 /** * struct slim_stream_config - SLIMbus stream configuration * Configuring a stream is done at hw_params or prepare call * from audio drivers where they have all the required information * regarding rate, number of channels and so on. * There is a 1:1 mapping of channel and ports. * * @rate: data rate * @bps: bits per data sample * @ch_count: number of channels * @chs: pointer to list of channel numbers * @port_mask: port mask of ports to use for this stream * @direction: direction of the stream, SNDRV_PCM_STREAM_PLAYBACK * or SNDRV_PCM_STREAM_CAPTURE. */ struct slim_stream_config { unsigned int rate; unsigned int bps; /* MAX 256 channels */ unsigned int ch_count; unsigned int *chs; /* Max 32 ports per device */ unsigned long port_mask; int direction; }; /* * use a macro to avoid include chaining to get THIS_MODULE */ #define slim_driver_register(drv) \ __slim_driver_register(drv, THIS_MODULE) int __slim_driver_register(struct slim_driver *drv, struct module *owner); void slim_driver_unregister(struct slim_driver *drv); /** * module_slim_driver() - Helper macro for registering a SLIMbus driver * @__slim_driver: slimbus_driver struct * * Helper macro for SLIMbus drivers which do not do anything special in module * init/exit. This eliminates a lot of boilerplate. Each module may only * use this macro once, and calling it replaces module_init() and module_exit() */ #define module_slim_driver(__slim_driver) \ module_driver(__slim_driver, slim_driver_register, \ slim_driver_unregister) static inline void *slim_get_devicedata(const struct slim_device *dev) { return dev_get_drvdata(&dev->dev); } static inline void slim_set_devicedata(struct slim_device *dev, void *data) { dev_set_drvdata(&dev->dev, data); } struct slim_device *of_slim_get_device(struct slim_controller *ctrl, struct device_node *np); struct slim_device *slim_get_device(struct slim_controller *ctrl, struct slim_eaddr *e_addr); int slim_get_logical_addr(struct slim_device *sbdev); /* Information Element management messages */ #define SLIM_MSG_MC_REQUEST_INFORMATION 0x20 #define SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION 0x21 #define SLIM_MSG_MC_REPLY_INFORMATION 0x24 #define SLIM_MSG_MC_CLEAR_INFORMATION 0x28 #define SLIM_MSG_MC_REPORT_INFORMATION 0x29 /* Value Element management messages */ #define SLIM_MSG_MC_REQUEST_VALUE 0x60 #define SLIM_MSG_MC_REQUEST_CHANGE_VALUE 0x61 #define SLIM_MSG_MC_REPLY_VALUE 0x64 #define SLIM_MSG_MC_CHANGE_VALUE 0x68 int slim_xfer_msg(struct slim_device *sbdev, struct slim_val_inf *msg, u8 mc); int slim_readb(struct slim_device *sdev, u32 addr); int slim_writeb(struct slim_device *sdev, u32 addr, u8 value); int slim_read(struct slim_device *sdev, u32 addr, size_t count, u8 *val); int slim_write(struct slim_device *sdev, u32 addr, size_t count, u8 *val); /* SLIMbus Stream apis */ struct slim_stream_runtime; struct slim_stream_runtime *slim_stream_allocate(struct slim_device *dev, const char *sname); int slim_stream_prepare(struct slim_stream_runtime *stream, struct slim_stream_config *c); int slim_stream_enable(struct slim_stream_runtime *stream); int slim_stream_disable(struct slim_stream_runtime *stream); int slim_stream_unprepare(struct slim_stream_runtime *stream); int slim_stream_free(struct slim_stream_runtime *stream); #endif /* _LINUX_SLIMBUS_H */ atmel-ssc.h 0000644 00000023363 14722070374 0006622 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __INCLUDE_ATMEL_SSC_H #define __INCLUDE_ATMEL_SSC_H #include <linux/platform_device.h> #include <linux/list.h> #include <linux/io.h> struct atmel_ssc_platform_data { int use_dma; int has_fslen_ext; }; struct ssc_device { struct list_head list; dma_addr_t phybase; void __iomem *regs; struct platform_device *pdev; struct atmel_ssc_platform_data *pdata; struct clk *clk; int user; int irq; bool clk_from_rk_pin; bool sound_dai; }; struct ssc_device * __must_check ssc_request(unsigned int ssc_num); void ssc_free(struct ssc_device *ssc); /* SSC register offsets */ /* SSC Control Register */ #define SSC_CR 0x00000000 #define SSC_CR_RXDIS_SIZE 1 #define SSC_CR_RXDIS_OFFSET 1 #define SSC_CR_RXEN_SIZE 1 #define SSC_CR_RXEN_OFFSET 0 #define SSC_CR_SWRST_SIZE 1 #define SSC_CR_SWRST_OFFSET 15 #define SSC_CR_TXDIS_SIZE 1 #define SSC_CR_TXDIS_OFFSET 9 #define SSC_CR_TXEN_SIZE 1 #define SSC_CR_TXEN_OFFSET 8 /* SSC Clock Mode Register */ #define SSC_CMR 0x00000004 #define SSC_CMR_DIV_SIZE 12 #define SSC_CMR_DIV_OFFSET 0 /* SSC Receive Clock Mode Register */ #define SSC_RCMR 0x00000010 #define SSC_RCMR_CKG_SIZE 2 #define SSC_RCMR_CKG_OFFSET 6 #define SSC_RCMR_CKI_SIZE 1 #define SSC_RCMR_CKI_OFFSET 5 #define SSC_RCMR_CKO_SIZE 3 #define SSC_RCMR_CKO_OFFSET 2 #define SSC_RCMR_CKS_SIZE 2 #define SSC_RCMR_CKS_OFFSET 0 #define SSC_RCMR_PERIOD_SIZE 8 #define SSC_RCMR_PERIOD_OFFSET 24 #define SSC_RCMR_START_SIZE 4 #define SSC_RCMR_START_OFFSET 8 #define SSC_RCMR_STOP_SIZE 1 #define SSC_RCMR_STOP_OFFSET 12 #define SSC_RCMR_STTDLY_SIZE 8 #define SSC_RCMR_STTDLY_OFFSET 16 /* SSC Receive Frame Mode Register */ #define SSC_RFMR 0x00000014 #define SSC_RFMR_DATLEN_SIZE 5 #define SSC_RFMR_DATLEN_OFFSET 0 #define SSC_RFMR_DATNB_SIZE 4 #define SSC_RFMR_DATNB_OFFSET 8 #define SSC_RFMR_FSEDGE_SIZE 1 #define SSC_RFMR_FSEDGE_OFFSET 24 /* * The FSLEN_EXT exist on at91sam9rl, at91sam9g10, * at91sam9g20, and at91sam9g45 and newer SoCs */ #define SSC_RFMR_FSLEN_EXT_SIZE 4 #define SSC_RFMR_FSLEN_EXT_OFFSET 28 #define SSC_RFMR_FSLEN_SIZE 4 #define SSC_RFMR_FSLEN_OFFSET 16 #define SSC_RFMR_FSOS_SIZE 4 #define SSC_RFMR_FSOS_OFFSET 20 #define SSC_RFMR_LOOP_SIZE 1 #define SSC_RFMR_LOOP_OFFSET 5 #define SSC_RFMR_MSBF_SIZE 1 #define SSC_RFMR_MSBF_OFFSET 7 /* SSC Transmit Clock Mode Register */ #define SSC_TCMR 0x00000018 #define SSC_TCMR_CKG_SIZE 2 #define SSC_TCMR_CKG_OFFSET 6 #define SSC_TCMR_CKI_SIZE 1 #define SSC_TCMR_CKI_OFFSET 5 #define SSC_TCMR_CKO_SIZE 3 #define SSC_TCMR_CKO_OFFSET 2 #define SSC_TCMR_CKS_SIZE 2 #define SSC_TCMR_CKS_OFFSET 0 #define SSC_TCMR_PERIOD_SIZE 8 #define SSC_TCMR_PERIOD_OFFSET 24 #define SSC_TCMR_START_SIZE 4 #define SSC_TCMR_START_OFFSET 8 #define SSC_TCMR_STTDLY_SIZE 8 #define SSC_TCMR_STTDLY_OFFSET 16 /* SSC Transmit Frame Mode Register */ #define SSC_TFMR 0x0000001c #define SSC_TFMR_DATDEF_SIZE 1 #define SSC_TFMR_DATDEF_OFFSET 5 #define SSC_TFMR_DATLEN_SIZE 5 #define SSC_TFMR_DATLEN_OFFSET 0 #define SSC_TFMR_DATNB_SIZE 4 #define SSC_TFMR_DATNB_OFFSET 8 #define SSC_TFMR_FSDEN_SIZE 1 #define SSC_TFMR_FSDEN_OFFSET 23 #define SSC_TFMR_FSEDGE_SIZE 1 #define SSC_TFMR_FSEDGE_OFFSET 24 /* * The FSLEN_EXT exist on at91sam9rl, at91sam9g10, * at91sam9g20, and at91sam9g45 and newer SoCs */ #define SSC_TFMR_FSLEN_EXT_SIZE 4 #define SSC_TFMR_FSLEN_EXT_OFFSET 28 #define SSC_TFMR_FSLEN_SIZE 4 #define SSC_TFMR_FSLEN_OFFSET 16 #define SSC_TFMR_FSOS_SIZE 3 #define SSC_TFMR_FSOS_OFFSET 20 #define SSC_TFMR_MSBF_SIZE 1 #define SSC_TFMR_MSBF_OFFSET 7 /* SSC Receive Hold Register */ #define SSC_RHR 0x00000020 #define SSC_RHR_RDAT_SIZE 32 #define SSC_RHR_RDAT_OFFSET 0 /* SSC Transmit Hold Register */ #define SSC_THR 0x00000024 #define SSC_THR_TDAT_SIZE 32 #define SSC_THR_TDAT_OFFSET 0 /* SSC Receive Sync. Holding Register */ #define SSC_RSHR 0x00000030 #define SSC_RSHR_RSDAT_SIZE 16 #define SSC_RSHR_RSDAT_OFFSET 0 /* SSC Transmit Sync. Holding Register */ #define SSC_TSHR 0x00000034 #define SSC_TSHR_TSDAT_SIZE 16 #define SSC_TSHR_RSDAT_OFFSET 0 /* SSC Receive Compare 0 Register */ #define SSC_RC0R 0x00000038 #define SSC_RC0R_CP0_SIZE 16 #define SSC_RC0R_CP0_OFFSET 0 /* SSC Receive Compare 1 Register */ #define SSC_RC1R 0x0000003c #define SSC_RC1R_CP1_SIZE 16 #define SSC_RC1R_CP1_OFFSET 0 /* SSC Status Register */ #define SSC_SR 0x00000040 #define SSC_SR_CP0_SIZE 1 #define SSC_SR_CP0_OFFSET 8 #define SSC_SR_CP1_SIZE 1 #define SSC_SR_CP1_OFFSET 9 #define SSC_SR_ENDRX_SIZE 1 #define SSC_SR_ENDRX_OFFSET 6 #define SSC_SR_ENDTX_SIZE 1 #define SSC_SR_ENDTX_OFFSET 2 #define SSC_SR_OVRUN_SIZE 1 #define SSC_SR_OVRUN_OFFSET 5 #define SSC_SR_RXBUFF_SIZE 1 #define SSC_SR_RXBUFF_OFFSET 7 #define SSC_SR_RXEN_SIZE 1 #define SSC_SR_RXEN_OFFSET 17 #define SSC_SR_RXRDY_SIZE 1 #define SSC_SR_RXRDY_OFFSET 4 #define SSC_SR_RXSYN_SIZE 1 #define SSC_SR_RXSYN_OFFSET 11 #define SSC_SR_TXBUFE_SIZE 1 #define SSC_SR_TXBUFE_OFFSET 3 #define SSC_SR_TXEMPTY_SIZE 1 #define SSC_SR_TXEMPTY_OFFSET 1 #define SSC_SR_TXEN_SIZE 1 #define SSC_SR_TXEN_OFFSET 16 #define SSC_SR_TXRDY_SIZE 1 #define SSC_SR_TXRDY_OFFSET 0 #define SSC_SR_TXSYN_SIZE 1 #define SSC_SR_TXSYN_OFFSET 10 /* SSC Interrupt Enable Register */ #define SSC_IER 0x00000044 #define SSC_IER_CP0_SIZE 1 #define SSC_IER_CP0_OFFSET 8 #define SSC_IER_CP1_SIZE 1 #define SSC_IER_CP1_OFFSET 9 #define SSC_IER_ENDRX_SIZE 1 #define SSC_IER_ENDRX_OFFSET 6 #define SSC_IER_ENDTX_SIZE 1 #define SSC_IER_ENDTX_OFFSET 2 #define SSC_IER_OVRUN_SIZE 1 #define SSC_IER_OVRUN_OFFSET 5 #define SSC_IER_RXBUFF_SIZE 1 #define SSC_IER_RXBUFF_OFFSET 7 #define SSC_IER_RXRDY_SIZE 1 #define SSC_IER_RXRDY_OFFSET 4 #define SSC_IER_RXSYN_SIZE 1 #define SSC_IER_RXSYN_OFFSET 11 #define SSC_IER_TXBUFE_SIZE 1 #define SSC_IER_TXBUFE_OFFSET 3 #define SSC_IER_TXEMPTY_SIZE 1 #define SSC_IER_TXEMPTY_OFFSET 1 #define SSC_IER_TXRDY_SIZE 1 #define SSC_IER_TXRDY_OFFSET 0 #define SSC_IER_TXSYN_SIZE 1 #define SSC_IER_TXSYN_OFFSET 10 /* SSC Interrupt Disable Register */ #define SSC_IDR 0x00000048 #define SSC_IDR_CP0_SIZE 1 #define SSC_IDR_CP0_OFFSET 8 #define SSC_IDR_CP1_SIZE 1 #define SSC_IDR_CP1_OFFSET 9 #define SSC_IDR_ENDRX_SIZE 1 #define SSC_IDR_ENDRX_OFFSET 6 #define SSC_IDR_ENDTX_SIZE 1 #define SSC_IDR_ENDTX_OFFSET 2 #define SSC_IDR_OVRUN_SIZE 1 #define SSC_IDR_OVRUN_OFFSET 5 #define SSC_IDR_RXBUFF_SIZE 1 #define SSC_IDR_RXBUFF_OFFSET 7 #define SSC_IDR_RXRDY_SIZE 1 #define SSC_IDR_RXRDY_OFFSET 4 #define SSC_IDR_RXSYN_SIZE 1 #define SSC_IDR_RXSYN_OFFSET 11 #define SSC_IDR_TXBUFE_SIZE 1 #define SSC_IDR_TXBUFE_OFFSET 3 #define SSC_IDR_TXEMPTY_SIZE 1 #define SSC_IDR_TXEMPTY_OFFSET 1 #define SSC_IDR_TXRDY_SIZE 1 #define SSC_IDR_TXRDY_OFFSET 0 #define SSC_IDR_TXSYN_SIZE 1 #define SSC_IDR_TXSYN_OFFSET 10 /* SSC Interrupt Mask Register */ #define SSC_IMR 0x0000004c #define SSC_IMR_CP0_SIZE 1 #define SSC_IMR_CP0_OFFSET 8 #define SSC_IMR_CP1_SIZE 1 #define SSC_IMR_CP1_OFFSET 9 #define SSC_IMR_ENDRX_SIZE 1 #define SSC_IMR_ENDRX_OFFSET 6 #define SSC_IMR_ENDTX_SIZE 1 #define SSC_IMR_ENDTX_OFFSET 2 #define SSC_IMR_OVRUN_SIZE 1 #define SSC_IMR_OVRUN_OFFSET 5 #define SSC_IMR_RXBUFF_SIZE 1 #define SSC_IMR_RXBUFF_OFFSET 7 #define SSC_IMR_RXRDY_SIZE 1 #define SSC_IMR_RXRDY_OFFSET 4 #define SSC_IMR_RXSYN_SIZE 1 #define SSC_IMR_RXSYN_OFFSET 11 #define SSC_IMR_TXBUFE_SIZE 1 #define SSC_IMR_TXBUFE_OFFSET 3 #define SSC_IMR_TXEMPTY_SIZE 1 #define SSC_IMR_TXEMPTY_OFFSET 1 #define SSC_IMR_TXRDY_SIZE 1 #define SSC_IMR_TXRDY_OFFSET 0 #define SSC_IMR_TXSYN_SIZE 1 #define SSC_IMR_TXSYN_OFFSET 10 /* SSC PDC Receive Pointer Register */ #define SSC_PDC_RPR 0x00000100 /* SSC PDC Receive Counter Register */ #define SSC_PDC_RCR 0x00000104 /* SSC PDC Transmit Pointer Register */ #define SSC_PDC_TPR 0x00000108 /* SSC PDC Receive Next Pointer Register */ #define SSC_PDC_RNPR 0x00000110 /* SSC PDC Receive Next Counter Register */ #define SSC_PDC_RNCR 0x00000114 /* SSC PDC Transmit Counter Register */ #define SSC_PDC_TCR 0x0000010c /* SSC PDC Transmit Next Pointer Register */ #define SSC_PDC_TNPR 0x00000118 /* SSC PDC Transmit Next Counter Register */ #define SSC_PDC_TNCR 0x0000011c /* SSC PDC Transfer Control Register */ #define SSC_PDC_PTCR 0x00000120 #define SSC_PDC_PTCR_RXTDIS_SIZE 1 #define SSC_PDC_PTCR_RXTDIS_OFFSET 1 #define SSC_PDC_PTCR_RXTEN_SIZE 1 #define SSC_PDC_PTCR_RXTEN_OFFSET 0 #define SSC_PDC_PTCR_TXTDIS_SIZE 1 #define SSC_PDC_PTCR_TXTDIS_OFFSET 9 #define SSC_PDC_PTCR_TXTEN_SIZE 1 #define SSC_PDC_PTCR_TXTEN_OFFSET 8 /* SSC PDC Transfer Status Register */ #define SSC_PDC_PTSR 0x00000124 #define SSC_PDC_PTSR_RXTEN_SIZE 1 #define SSC_PDC_PTSR_RXTEN_OFFSET 0 #define SSC_PDC_PTSR_TXTEN_SIZE 1 #define SSC_PDC_PTSR_TXTEN_OFFSET 8 /* Bit manipulation macros */ #define SSC_BIT(name) \ (1 << SSC_##name##_OFFSET) #define SSC_BF(name, value) \ (((value) & ((1 << SSC_##name##_SIZE) - 1)) \ << SSC_##name##_OFFSET) #define SSC_BFEXT(name, value) \ (((value) >> SSC_##name##_OFFSET) \ & ((1 << SSC_##name##_SIZE) - 1)) #define SSC_BFINS(name, value, old) \ (((old) & ~(((1 << SSC_##name##_SIZE) - 1) \ << SSC_##name##_OFFSET)) | SSC_BF(name, value)) /* Register access macros */ #define ssc_readl(base, reg) __raw_readl(base + SSC_##reg) #define ssc_writel(base, reg, value) __raw_writel((value), base + SSC_##reg) #endif /* __INCLUDE_ATMEL_SSC_H */ unaligned/be_memmove.h 0000644 00000001451 14722070374 0011005 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_UNALIGNED_BE_MEMMOVE_H #define _LINUX_UNALIGNED_BE_MEMMOVE_H #include <linux/unaligned/memmove.h> static inline u16 get_unaligned_be16(const void *p) { return __get_unaligned_memmove16((const u8 *)p); } static inline u32 get_unaligned_be32(const void *p) { return __get_unaligned_memmove32((const u8 *)p); } static inline u64 get_unaligned_be64(const void *p) { return __get_unaligned_memmove64((const u8 *)p); } static inline void put_unaligned_be16(u16 val, void *p) { __put_unaligned_memmove16(val, p); } static inline void put_unaligned_be32(u32 val, void *p) { __put_unaligned_memmove32(val, p); } static inline void put_unaligned_be64(u64 val, void *p) { __put_unaligned_memmove64(val, p); } #endif /* _LINUX_UNALIGNED_LE_MEMMOVE_H */ unaligned/be_byteshift.h 0000644 00000002666 14722070374 0011352 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_UNALIGNED_BE_BYTESHIFT_H #define _LINUX_UNALIGNED_BE_BYTESHIFT_H #include <linux/types.h> static inline u16 __get_unaligned_be16(const u8 *p) { return p[0] << 8 | p[1]; } static inline u32 __get_unaligned_be32(const u8 *p) { return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3]; } static inline u64 __get_unaligned_be64(const u8 *p) { return (u64)__get_unaligned_be32(p) << 32 | __get_unaligned_be32(p + 4); } static inline void __put_unaligned_be16(u16 val, u8 *p) { *p++ = val >> 8; *p++ = val; } static inline void __put_unaligned_be32(u32 val, u8 *p) { __put_unaligned_be16(val >> 16, p); __put_unaligned_be16(val, p + 2); } static inline void __put_unaligned_be64(u64 val, u8 *p) { __put_unaligned_be32(val >> 32, p); __put_unaligned_be32(val, p + 4); } static inline u16 get_unaligned_be16(const void *p) { return __get_unaligned_be16((const u8 *)p); } static inline u32 get_unaligned_be32(const void *p) { return __get_unaligned_be32((const u8 *)p); } static inline u64 get_unaligned_be64(const void *p) { return __get_unaligned_be64((const u8 *)p); } static inline void put_unaligned_be16(u16 val, void *p) { __put_unaligned_be16(val, p); } static inline void put_unaligned_be32(u32 val, void *p) { __put_unaligned_be32(val, p); } static inline void put_unaligned_be64(u64 val, void *p) { __put_unaligned_be64(val, p); } #endif /* _LINUX_UNALIGNED_BE_BYTESHIFT_H */ unaligned/packed_struct.h 0000644 00000002044 14722070374 0011524 0 ustar 00 #ifndef _LINUX_UNALIGNED_PACKED_STRUCT_H #define _LINUX_UNALIGNED_PACKED_STRUCT_H #include <linux/kernel.h> struct __una_u16 { u16 x; } __packed; struct __una_u32 { u32 x; } __packed; struct __una_u64 { u64 x; } __packed; static inline u16 __get_unaligned_cpu16(const void *p) { const struct __una_u16 *ptr = (const struct __una_u16 *)p; return ptr->x; } static inline u32 __get_unaligned_cpu32(const void *p) { const struct __una_u32 *ptr = (const struct __una_u32 *)p; return ptr->x; } static inline u64 __get_unaligned_cpu64(const void *p) { const struct __una_u64 *ptr = (const struct __una_u64 *)p; return ptr->x; } static inline void __put_unaligned_cpu16(u16 val, void *p) { struct __una_u16 *ptr = (struct __una_u16 *)p; ptr->x = val; } static inline void __put_unaligned_cpu32(u32 val, void *p) { struct __una_u32 *ptr = (struct __una_u32 *)p; ptr->x = val; } static inline void __put_unaligned_cpu64(u64 val, void *p) { struct __una_u64 *ptr = (struct __una_u64 *)p; ptr->x = val; } #endif /* _LINUX_UNALIGNED_PACKED_STRUCT_H */ unaligned/be_struct.h 0000644 00000001424 14722070374 0010664 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_UNALIGNED_BE_STRUCT_H #define _LINUX_UNALIGNED_BE_STRUCT_H #include <linux/unaligned/packed_struct.h> static inline u16 get_unaligned_be16(const void *p) { return __get_unaligned_cpu16((const u8 *)p); } static inline u32 get_unaligned_be32(const void *p) { return __get_unaligned_cpu32((const u8 *)p); } static inline u64 get_unaligned_be64(const void *p) { return __get_unaligned_cpu64((const u8 *)p); } static inline void put_unaligned_be16(u16 val, void *p) { __put_unaligned_cpu16(val, p); } static inline void put_unaligned_be32(u32 val, void *p) { __put_unaligned_cpu32(val, p); } static inline void put_unaligned_be64(u64 val, void *p) { __put_unaligned_cpu64(val, p); } #endif /* _LINUX_UNALIGNED_BE_STRUCT_H */ unaligned/generic.h 0000644 00000004170 14722070374 0010307 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_UNALIGNED_GENERIC_H #define _LINUX_UNALIGNED_GENERIC_H /* * Cause a link-time error if we try an unaligned access other than * 1,2,4 or 8 bytes long */ extern void __bad_unaligned_access_size(void); #define __get_unaligned_le(ptr) ((__force typeof(*(ptr)))({ \ __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \ __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_le16((ptr)), \ __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_le32((ptr)), \ __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_le64((ptr)), \ __bad_unaligned_access_size())))); \ })) #define __get_unaligned_be(ptr) ((__force typeof(*(ptr)))({ \ __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \ __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_be16((ptr)), \ __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_be32((ptr)), \ __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_be64((ptr)), \ __bad_unaligned_access_size())))); \ })) #define __put_unaligned_le(val, ptr) ({ \ void *__gu_p = (ptr); \ switch (sizeof(*(ptr))) { \ case 1: \ *(u8 *)__gu_p = (__force u8)(val); \ break; \ case 2: \ put_unaligned_le16((__force u16)(val), __gu_p); \ break; \ case 4: \ put_unaligned_le32((__force u32)(val), __gu_p); \ break; \ case 8: \ put_unaligned_le64((__force u64)(val), __gu_p); \ break; \ default: \ __bad_unaligned_access_size(); \ break; \ } \ (void)0; }) #define __put_unaligned_be(val, ptr) ({ \ void *__gu_p = (ptr); \ switch (sizeof(*(ptr))) { \ case 1: \ *(u8 *)__gu_p = (__force u8)(val); \ break; \ case 2: \ put_unaligned_be16((__force u16)(val), __gu_p); \ break; \ case 4: \ put_unaligned_be32((__force u32)(val), __gu_p); \ break; \ case 8: \ put_unaligned_be64((__force u64)(val), __gu_p); \ break; \ default: \ __bad_unaligned_access_size(); \ break; \ } \ (void)0; }) #endif /* _LINUX_UNALIGNED_GENERIC_H */ unaligned/memmove.h 0000644 00000001544 14722070374 0010342 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_UNALIGNED_MEMMOVE_H #define _LINUX_UNALIGNED_MEMMOVE_H #include <linux/kernel.h> #include <linux/string.h> /* Use memmove here, so gcc does not insert a __builtin_memcpy. */ static inline u16 __get_unaligned_memmove16(const void *p) { u16 tmp; memmove(&tmp, p, 2); return tmp; } static inline u32 __get_unaligned_memmove32(const void *p) { u32 tmp; memmove(&tmp, p, 4); return tmp; } static inline u64 __get_unaligned_memmove64(const void *p) { u64 tmp; memmove(&tmp, p, 8); return tmp; } static inline void __put_unaligned_memmove16(u16 val, void *p) { memmove(p, &val, 2); } static inline void __put_unaligned_memmove32(u32 val, void *p) { memmove(p, &val, 4); } static inline void __put_unaligned_memmove64(u64 val, void *p) { memmove(p, &val, 8); } #endif /* _LINUX_UNALIGNED_MEMMOVE_H */ unaligned/le_byteshift.h 0000644 00000002666 14722070374 0011364 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_UNALIGNED_LE_BYTESHIFT_H #define _LINUX_UNALIGNED_LE_BYTESHIFT_H #include <linux/types.h> static inline u16 __get_unaligned_le16(const u8 *p) { return p[0] | p[1] << 8; } static inline u32 __get_unaligned_le32(const u8 *p) { return p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24; } static inline u64 __get_unaligned_le64(const u8 *p) { return (u64)__get_unaligned_le32(p + 4) << 32 | __get_unaligned_le32(p); } static inline void __put_unaligned_le16(u16 val, u8 *p) { *p++ = val; *p++ = val >> 8; } static inline void __put_unaligned_le32(u32 val, u8 *p) { __put_unaligned_le16(val >> 16, p + 2); __put_unaligned_le16(val, p); } static inline void __put_unaligned_le64(u64 val, u8 *p) { __put_unaligned_le32(val >> 32, p + 4); __put_unaligned_le32(val, p); } static inline u16 get_unaligned_le16(const void *p) { return __get_unaligned_le16((const u8 *)p); } static inline u32 get_unaligned_le32(const void *p) { return __get_unaligned_le32((const u8 *)p); } static inline u64 get_unaligned_le64(const void *p) { return __get_unaligned_le64((const u8 *)p); } static inline void put_unaligned_le16(u16 val, void *p) { __put_unaligned_le16(val, p); } static inline void put_unaligned_le32(u32 val, void *p) { __put_unaligned_le32(val, p); } static inline void put_unaligned_le64(u64 val, void *p) { __put_unaligned_le64(val, p); } #endif /* _LINUX_UNALIGNED_LE_BYTESHIFT_H */ unaligned/le_memmove.h 0000644 00000001451 14722070374 0011017 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_UNALIGNED_LE_MEMMOVE_H #define _LINUX_UNALIGNED_LE_MEMMOVE_H #include <linux/unaligned/memmove.h> static inline u16 get_unaligned_le16(const void *p) { return __get_unaligned_memmove16((const u8 *)p); } static inline u32 get_unaligned_le32(const void *p) { return __get_unaligned_memmove32((const u8 *)p); } static inline u64 get_unaligned_le64(const void *p) { return __get_unaligned_memmove64((const u8 *)p); } static inline void put_unaligned_le16(u16 val, void *p) { __put_unaligned_memmove16(val, p); } static inline void put_unaligned_le32(u32 val, void *p) { __put_unaligned_memmove32(val, p); } static inline void put_unaligned_le64(u64 val, void *p) { __put_unaligned_memmove64(val, p); } #endif /* _LINUX_UNALIGNED_LE_MEMMOVE_H */ unaligned/access_ok.h 0000644 00000002654 14722070374 0010632 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_UNALIGNED_ACCESS_OK_H #define _LINUX_UNALIGNED_ACCESS_OK_H #include <linux/kernel.h> #include <asm/byteorder.h> static __always_inline u16 get_unaligned_le16(const void *p) { return le16_to_cpup((__le16 *)p); } static __always_inline u32 get_unaligned_le32(const void *p) { return le32_to_cpup((__le32 *)p); } static __always_inline u64 get_unaligned_le64(const void *p) { return le64_to_cpup((__le64 *)p); } static __always_inline u16 get_unaligned_be16(const void *p) { return be16_to_cpup((__be16 *)p); } static __always_inline u32 get_unaligned_be32(const void *p) { return be32_to_cpup((__be32 *)p); } static __always_inline u64 get_unaligned_be64(const void *p) { return be64_to_cpup((__be64 *)p); } static __always_inline void put_unaligned_le16(u16 val, void *p) { *((__le16 *)p) = cpu_to_le16(val); } static __always_inline void put_unaligned_le32(u32 val, void *p) { *((__le32 *)p) = cpu_to_le32(val); } static __always_inline void put_unaligned_le64(u64 val, void *p) { *((__le64 *)p) = cpu_to_le64(val); } static __always_inline void put_unaligned_be16(u16 val, void *p) { *((__be16 *)p) = cpu_to_be16(val); } static __always_inline void put_unaligned_be32(u32 val, void *p) { *((__be32 *)p) = cpu_to_be32(val); } static __always_inline void put_unaligned_be64(u64 val, void *p) { *((__be64 *)p) = cpu_to_be64(val); } #endif /* _LINUX_UNALIGNED_ACCESS_OK_H */ unaligned/le_struct.h 0000644 00000001424 14722070374 0010676 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_UNALIGNED_LE_STRUCT_H #define _LINUX_UNALIGNED_LE_STRUCT_H #include <linux/unaligned/packed_struct.h> static inline u16 get_unaligned_le16(const void *p) { return __get_unaligned_cpu16((const u8 *)p); } static inline u32 get_unaligned_le32(const void *p) { return __get_unaligned_cpu32((const u8 *)p); } static inline u64 get_unaligned_le64(const void *p) { return __get_unaligned_cpu64((const u8 *)p); } static inline void put_unaligned_le16(u16 val, void *p) { __put_unaligned_cpu16(val, p); } static inline void put_unaligned_le32(u32 val, void *p) { __put_unaligned_cpu32(val, p); } static inline void put_unaligned_le64(u64 val, void *p) { __put_unaligned_cpu64(val, p); } #endif /* _LINUX_UNALIGNED_LE_STRUCT_H */ if_tap.h 0000644 00000004332 14722070374 0006167 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_IF_TAP_H_ #define _LINUX_IF_TAP_H_ #if IS_ENABLED(CONFIG_TAP) struct socket *tap_get_socket(struct file *); struct ptr_ring *tap_get_ptr_ring(struct file *file); #else #include <linux/err.h> #include <linux/errno.h> struct file; struct socket; static inline struct socket *tap_get_socket(struct file *f) { return ERR_PTR(-EINVAL); } static inline struct ptr_ring *tap_get_ptr_ring(struct file *f) { return ERR_PTR(-EINVAL); } #endif /* CONFIG_TAP */ #include <net/sock.h> #include <linux/skb_array.h> /* * Maximum times a tap device can be opened. This can be used to * configure the number of receive queue, e.g. for multiqueue virtio. */ #define MAX_TAP_QUEUES 256 struct tap_queue; struct tap_dev { struct net_device *dev; u16 flags; /* This array tracks active taps. */ struct tap_queue __rcu *taps[MAX_TAP_QUEUES]; /* This list tracks all taps (both enabled and disabled) */ struct list_head queue_list; int numvtaps; int numqueues; netdev_features_t tap_features; int minor; void (*update_features)(struct tap_dev *tap, netdev_features_t features); void (*count_tx_dropped)(struct tap_dev *tap); void (*count_rx_dropped)(struct tap_dev *tap); }; /* * A tap queue is the central object of tap module, it connects * an open character device to virtual interface. There can be * multiple queues on one interface, which map back to queues * implemented in hardware on the underlying device. * * tap_proto is used to allocate queues through the sock allocation * mechanism. * */ struct tap_queue { struct sock sk; struct socket sock; int vnet_hdr_sz; struct tap_dev __rcu *tap; struct file *file; unsigned int flags; u16 queue_index; bool enabled; struct list_head next; struct ptr_ring ring; }; rx_handler_result_t tap_handle_frame(struct sk_buff **pskb); void tap_del_queues(struct tap_dev *tap); int tap_get_minor(dev_t major, struct tap_dev *tap); void tap_free_minor(dev_t major, struct tap_dev *tap); int tap_queue_resize(struct tap_dev *tap); int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major, const char *device_name, struct module *module); void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev); #endif /*_LINUX_IF_TAP_H_*/ crc-ccitt.h 0000644 00000001141 14722070374 0006573 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_CRC_CCITT_H #define _LINUX_CRC_CCITT_H #include <linux/types.h> extern u16 const crc_ccitt_table[256]; extern u16 const crc_ccitt_false_table[256]; extern u16 crc_ccitt(u16 crc, const u8 *buffer, size_t len); extern u16 crc_ccitt_false(u16 crc, const u8 *buffer, size_t len); static inline u16 crc_ccitt_byte(u16 crc, const u8 c) { return (crc >> 8) ^ crc_ccitt_table[(crc ^ c) & 0xff]; } static inline u16 crc_ccitt_false_byte(u16 crc, const u8 c) { return (crc << 8) ^ crc_ccitt_false_table[(crc >> 8) ^ c]; } #endif /* _LINUX_CRC_CCITT_H */ watchdog.h 0000644 00000017350 14722070374 0006531 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Generic watchdog defines. Derived from.. * * Berkshire PC Watchdog Defines * by Ken Hollis <khollis@bitgate.com> * */ #ifndef _LINUX_WATCHDOG_H #define _LINUX_WATCHDOG_H #include <linux/bitops.h> #include <linux/cdev.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/notifier.h> #include <uapi/linux/watchdog.h> struct watchdog_ops; struct watchdog_device; struct watchdog_core_data; struct watchdog_governor; /** struct watchdog_ops - The watchdog-devices operations * * @owner: The module owner. * @start: The routine for starting the watchdog device. * @stop: The routine for stopping the watchdog device. * @ping: The routine that sends a keepalive ping to the watchdog device. * @status: The routine that shows the status of the watchdog device. * @set_timeout:The routine for setting the watchdog devices timeout value (in seconds). * @set_pretimeout:The routine for setting the watchdog devices pretimeout. * @get_timeleft:The routine that gets the time left before a reset (in seconds). * @restart: The routine for restarting the machine. * @ioctl: The routines that handles extra ioctl calls. * * The watchdog_ops structure contains a list of low-level operations * that control a watchdog device. It also contains the module that owns * these operations. The start and stop function are mandatory, all other * functions are optional. */ struct watchdog_ops { struct module *owner; /* mandatory operations */ int (*start)(struct watchdog_device *); int (*stop)(struct watchdog_device *); /* optional operations */ int (*ping)(struct watchdog_device *); unsigned int (*status)(struct watchdog_device *); int (*set_timeout)(struct watchdog_device *, unsigned int); int (*set_pretimeout)(struct watchdog_device *, unsigned int); unsigned int (*get_timeleft)(struct watchdog_device *); int (*restart)(struct watchdog_device *, unsigned long, void *); long (*ioctl)(struct watchdog_device *, unsigned int, unsigned long); }; /** struct watchdog_device - The structure that defines a watchdog device * * @id: The watchdog's ID. (Allocated by watchdog_register_device) * @parent: The parent bus device * @groups: List of sysfs attribute groups to create when creating the * watchdog device. * @info: Pointer to a watchdog_info structure. * @ops: Pointer to the list of watchdog operations. * @gov: Pointer to watchdog pretimeout governor. * @bootstatus: Status of the watchdog device at boot. * @timeout: The watchdog devices timeout value (in seconds). * @pretimeout: The watchdog devices pre_timeout value. * @min_timeout:The watchdog devices minimum timeout value (in seconds). * @max_timeout:The watchdog devices maximum timeout value (in seconds) * as configurable from user space. Only relevant if * max_hw_heartbeat_ms is not provided. * @min_hw_heartbeat_ms: * Hardware limit for minimum time between heartbeats, * in milli-seconds. * @max_hw_heartbeat_ms: * Hardware limit for maximum timeout, in milli-seconds. * Replaces max_timeout if specified. * @reboot_nb: The notifier block to stop watchdog on reboot. * @restart_nb: The notifier block to register a restart function. * @driver_data:Pointer to the drivers private data. * @wd_data: Pointer to watchdog core internal data. * @status: Field that contains the devices internal status bits. * @deferred: Entry in wtd_deferred_reg_list which is used to * register early initialized watchdogs. * * The watchdog_device structure contains all information about a * watchdog timer device. * * The driver-data field may not be accessed directly. It must be accessed * via the watchdog_set_drvdata and watchdog_get_drvdata helpers. */ struct watchdog_device { int id; struct device *parent; const struct attribute_group **groups; const struct watchdog_info *info; const struct watchdog_ops *ops; const struct watchdog_governor *gov; unsigned int bootstatus; unsigned int timeout; unsigned int pretimeout; unsigned int min_timeout; unsigned int max_timeout; unsigned int min_hw_heartbeat_ms; unsigned int max_hw_heartbeat_ms; struct notifier_block reboot_nb; struct notifier_block restart_nb; void *driver_data; struct watchdog_core_data *wd_data; unsigned long status; /* Bit numbers for status flags */ #define WDOG_ACTIVE 0 /* Is the watchdog running/active */ #define WDOG_NO_WAY_OUT 1 /* Is 'nowayout' feature set ? */ #define WDOG_STOP_ON_REBOOT 2 /* Should be stopped on reboot */ #define WDOG_HW_RUNNING 3 /* True if HW watchdog running */ #define WDOG_STOP_ON_UNREGISTER 4 /* Should be stopped on unregister */ struct list_head deferred; }; #define WATCHDOG_NOWAYOUT IS_BUILTIN(CONFIG_WATCHDOG_NOWAYOUT) #define WATCHDOG_NOWAYOUT_INIT_STATUS (WATCHDOG_NOWAYOUT << WDOG_NO_WAY_OUT) /* Use the following function to check whether or not the watchdog is active */ static inline bool watchdog_active(struct watchdog_device *wdd) { return test_bit(WDOG_ACTIVE, &wdd->status); } /* * Use the following function to check whether or not the hardware watchdog * is running */ static inline bool watchdog_hw_running(struct watchdog_device *wdd) { return test_bit(WDOG_HW_RUNNING, &wdd->status); } /* Use the following function to set the nowayout feature */ static inline void watchdog_set_nowayout(struct watchdog_device *wdd, bool nowayout) { if (nowayout) set_bit(WDOG_NO_WAY_OUT, &wdd->status); } /* Use the following function to stop the watchdog on reboot */ static inline void watchdog_stop_on_reboot(struct watchdog_device *wdd) { set_bit(WDOG_STOP_ON_REBOOT, &wdd->status); } /* Use the following function to stop the watchdog when unregistering it */ static inline void watchdog_stop_on_unregister(struct watchdog_device *wdd) { set_bit(WDOG_STOP_ON_UNREGISTER, &wdd->status); } /* Use the following function to check if a timeout value is invalid */ static inline bool watchdog_timeout_invalid(struct watchdog_device *wdd, unsigned int t) { /* * The timeout is invalid if * - the requested value is larger than UINT_MAX / 1000 * (since internal calculations are done in milli-seconds), * or * - the requested value is smaller than the configured minimum timeout, * or * - a maximum hardware timeout is not configured, a maximum timeout * is configured, and the requested value is larger than the * configured maximum timeout. */ return t > UINT_MAX / 1000 || t < wdd->min_timeout || (!wdd->max_hw_heartbeat_ms && wdd->max_timeout && t > wdd->max_timeout); } /* Use the following function to check if a pretimeout value is invalid */ static inline bool watchdog_pretimeout_invalid(struct watchdog_device *wdd, unsigned int t) { return t && wdd->timeout && t >= wdd->timeout; } /* Use the following functions to manipulate watchdog driver specific data */ static inline void watchdog_set_drvdata(struct watchdog_device *wdd, void *data) { wdd->driver_data = data; } static inline void *watchdog_get_drvdata(struct watchdog_device *wdd) { return wdd->driver_data; } /* Use the following functions to report watchdog pretimeout event */ #if IS_ENABLED(CONFIG_WATCHDOG_PRETIMEOUT_GOV) void watchdog_notify_pretimeout(struct watchdog_device *wdd); #else static inline void watchdog_notify_pretimeout(struct watchdog_device *wdd) { pr_alert("watchdog%d: pretimeout event\n", wdd->id); } #endif /* drivers/watchdog/watchdog_core.c */ void watchdog_set_restart_priority(struct watchdog_device *wdd, int priority); extern int watchdog_init_timeout(struct watchdog_device *wdd, unsigned int timeout_parm, struct device *dev); extern int watchdog_register_device(struct watchdog_device *); extern void watchdog_unregister_device(struct watchdog_device *); /* devres register variant */ int devm_watchdog_register_device(struct device *dev, struct watchdog_device *); #endif /* ifndef _LINUX_WATCHDOG_H */ highuid.h 0000644 00000006172 14722070374 0006352 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_HIGHUID_H #define _LINUX_HIGHUID_H #include <linux/types.h> /* * general notes: * * CONFIG_UID16 is defined if the given architecture needs to * support backwards compatibility for old system calls. * * kernel code should use uid_t and gid_t at all times when dealing with * kernel-private data. * * old_uid_t and old_gid_t should only be different if CONFIG_UID16 is * defined, else the platform should provide dummy typedefs for them * such that they are equivalent to __kernel_{u,g}id_t. * * uid16_t and gid16_t are used on all architectures. (when dealing * with structures hard coded to 16 bits, such as in filesystems) */ /* * This is the "overflow" UID and GID. They are used to signify uid/gid * overflow to old programs when they request uid/gid information but are * using the old 16 bit interfaces. * When you run a libc5 program, it will think that all highuid files or * processes are owned by this uid/gid. * The idea is that it's better to do so than possibly return 0 in lieu of * 65536, etc. */ extern int overflowuid; extern int overflowgid; extern void __bad_uid(void); extern void __bad_gid(void); #define DEFAULT_OVERFLOWUID 65534 #define DEFAULT_OVERFLOWGID 65534 #ifdef CONFIG_UID16 /* prevent uid mod 65536 effect by returning a default value for high UIDs */ #define high2lowuid(uid) ((uid) & ~0xFFFF ? (old_uid_t)overflowuid : (old_uid_t)(uid)) #define high2lowgid(gid) ((gid) & ~0xFFFF ? (old_gid_t)overflowgid : (old_gid_t)(gid)) /* * -1 is different in 16 bits than it is in 32 bits * these macros are used by chown(), setreuid(), ..., */ #define low2highuid(uid) ((uid) == (old_uid_t)-1 ? (uid_t)-1 : (uid_t)(uid)) #define low2highgid(gid) ((gid) == (old_gid_t)-1 ? (gid_t)-1 : (gid_t)(gid)) #define __convert_uid(size, uid) \ (size >= sizeof(uid) ? (uid) : high2lowuid(uid)) #define __convert_gid(size, gid) \ (size >= sizeof(gid) ? (gid) : high2lowgid(gid)) #else #define __convert_uid(size, uid) (uid) #define __convert_gid(size, gid) (gid) #endif /* !CONFIG_UID16 */ /* uid/gid input should be always 32bit uid_t */ #define SET_UID(var, uid) do { (var) = __convert_uid(sizeof(var), (uid)); } while (0) #define SET_GID(var, gid) do { (var) = __convert_gid(sizeof(var), (gid)); } while (0) /* * Everything below this line is needed on all architectures, to deal with * filesystems that only store 16 bits of the UID/GID, etc. */ /* * This is the UID and GID that will get written to disk if a filesystem * only supports 16-bit UIDs and the kernel has a high UID/GID to write */ extern int fs_overflowuid; extern int fs_overflowgid; #define DEFAULT_FS_OVERFLOWUID 65534 #define DEFAULT_FS_OVERFLOWGID 65534 /* * Since these macros are used in architectures that only need limited * 16-bit UID back compatibility, we won't use old_uid_t and old_gid_t */ #define fs_high2lowuid(uid) ((uid) & ~0xFFFF ? (uid16_t)fs_overflowuid : (uid16_t)(uid)) #define fs_high2lowgid(gid) ((gid) & ~0xFFFF ? (gid16_t)fs_overflowgid : (gid16_t)(gid)) #define low_16_bits(x) ((x) & 0xFFFF) #define high_16_bits(x) (((x) & 0xFFFF0000) >> 16) #endif /* _LINUX_HIGHUID_H */ mutex.h 0000644 00000015057 14722070374 0006075 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Mutexes: blocking mutual exclusion locks * * started by Ingo Molnar: * * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> * * This file contains the main data structure and API definitions. */ #ifndef __LINUX_MUTEX_H #define __LINUX_MUTEX_H #include <asm/current.h> #include <linux/list.h> #include <linux/spinlock_types.h> #include <linux/lockdep.h> #include <linux/atomic.h> #include <asm/processor.h> #include <linux/osq_lock.h> #include <linux/debug_locks.h> struct ww_acquire_ctx; /* * Simple, straightforward mutexes with strict semantics: * * - only one task can hold the mutex at a time * - only the owner can unlock the mutex * - multiple unlocks are not permitted * - recursive locking is not permitted * - a mutex object must be initialized via the API * - a mutex object must not be initialized via memset or copying * - task may not exit with mutex held * - memory areas where held locks reside must not be freed * - held mutexes must not be reinitialized * - mutexes may not be used in hardware or software interrupt * contexts such as tasklets and timers * * These semantics are fully enforced when DEBUG_MUTEXES is * enabled. Furthermore, besides enforcing the above rules, the mutex * debugging code also implements a number of additional features * that make lock debugging easier and faster: * * - uses symbolic names of mutexes, whenever they are printed in debug output * - point-of-acquire tracking, symbolic lookup of function names * - list of all locks held in the system, printout of them * - owner tracking * - detects self-recursing locks and prints out all relevant info * - detects multi-task circular deadlocks and prints out all affected * locks and tasks (and only those tasks) */ struct mutex { atomic_long_t owner; spinlock_t wait_lock; #ifdef CONFIG_MUTEX_SPIN_ON_OWNER struct optimistic_spin_queue osq; /* Spinner MCS lock */ #endif struct list_head wait_list; #ifdef CONFIG_DEBUG_MUTEXES void *magic; #endif #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif }; /* * This is the control structure for tasks blocked on mutex, * which resides on the blocked task's kernel stack: */ struct mutex_waiter { struct list_head list; struct task_struct *task; struct ww_acquire_ctx *ww_ctx; #ifdef CONFIG_DEBUG_MUTEXES void *magic; #endif }; #ifdef CONFIG_DEBUG_MUTEXES #define __DEBUG_MUTEX_INITIALIZER(lockname) \ , .magic = &lockname extern void mutex_destroy(struct mutex *lock); #else # define __DEBUG_MUTEX_INITIALIZER(lockname) static inline void mutex_destroy(struct mutex *lock) {} #endif /** * mutex_init - initialize the mutex * @mutex: the mutex to be initialized * * Initialize the mutex to unlocked state. * * It is not allowed to initialize an already locked mutex. */ #define mutex_init(mutex) \ do { \ static struct lock_class_key __key; \ \ __mutex_init((mutex), #mutex, &__key); \ } while (0) #ifdef CONFIG_DEBUG_LOCK_ALLOC # define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ , .dep_map = { .name = #lockname } #else # define __DEP_MAP_MUTEX_INITIALIZER(lockname) #endif #define __MUTEX_INITIALIZER(lockname) \ { .owner = ATOMIC_LONG_INIT(0) \ , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \ __DEBUG_MUTEX_INITIALIZER(lockname) \ __DEP_MAP_MUTEX_INITIALIZER(lockname) } #define DEFINE_MUTEX(mutexname) \ struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) extern void __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key); /** * mutex_is_locked - is the mutex locked * @lock: the mutex to be queried * * Returns true if the mutex is locked, false if unlocked. */ extern bool mutex_is_locked(struct mutex *lock); /* * See kernel/locking/mutex.c for detailed documentation of these APIs. * Also see Documentation/locking/mutex-design.rst. */ #ifdef CONFIG_DEBUG_LOCK_ALLOC extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass); extern int __must_check mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass); extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass); #define mutex_lock(lock) mutex_lock_nested(lock, 0) #define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0) #define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0) #define mutex_lock_io(lock) mutex_lock_io_nested(lock, 0) #define mutex_lock_nest_lock(lock, nest_lock) \ do { \ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \ } while (0) #else extern void mutex_lock(struct mutex *lock); extern int __must_check mutex_lock_interruptible(struct mutex *lock); extern int __must_check mutex_lock_killable(struct mutex *lock); extern void mutex_lock_io(struct mutex *lock); # define mutex_lock_nested(lock, subclass) mutex_lock(lock) # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) # define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock) # define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock) # define mutex_lock_io_nested(lock, subclass) mutex_lock_io(lock) #endif /* * NOTE: mutex_trylock() follows the spin_trylock() convention, * not the down_trylock() convention! * * Returns 1 if the mutex has been acquired successfully, and 0 on contention. */ extern int mutex_trylock(struct mutex *lock); extern void mutex_unlock(struct mutex *lock); extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); /* * These values are chosen such that FAIL and SUCCESS match the * values of the regular mutex_trylock(). */ enum mutex_trylock_recursive_enum { MUTEX_TRYLOCK_FAILED = 0, MUTEX_TRYLOCK_SUCCESS = 1, MUTEX_TRYLOCK_RECURSIVE, }; /** * mutex_trylock_recursive - trylock variant that allows recursive locking * @lock: mutex to be locked * * This function should not be used, _ever_. It is purely for hysterical GEM * raisins, and once those are gone this will be removed. * * Returns: * - MUTEX_TRYLOCK_FAILED - trylock failed, * - MUTEX_TRYLOCK_SUCCESS - lock acquired, * - MUTEX_TRYLOCK_RECURSIVE - we already owned the lock. */ extern /* __deprecated */ __must_check enum mutex_trylock_recursive_enum mutex_trylock_recursive(struct mutex *lock); #endif /* __LINUX_MUTEX_H */ vmstat.h 0000644 00000025014 14722070374 0006243 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_VMSTAT_H #define _LINUX_VMSTAT_H #include <linux/types.h> #include <linux/percpu.h> #include <linux/mmzone.h> #include <linux/vm_event_item.h> #include <linux/atomic.h> #include <linux/static_key.h> extern int sysctl_stat_interval; #ifdef CONFIG_NUMA #define ENABLE_NUMA_STAT 1 #define DISABLE_NUMA_STAT 0 extern int sysctl_vm_numa_stat; DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key); extern int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos); #endif struct reclaim_stat { unsigned nr_dirty; unsigned nr_unqueued_dirty; unsigned nr_congested; unsigned nr_writeback; unsigned nr_immediate; unsigned nr_activate[2]; unsigned nr_ref_keep; unsigned nr_unmap_fail; }; #ifdef CONFIG_VM_EVENT_COUNTERS /* * Light weight per cpu counter implementation. * * Counters should only be incremented and no critical kernel component * should rely on the counter values. * * Counters are handled completely inline. On many platforms the code * generated will simply be the increment of a global address. */ struct vm_event_state { unsigned long event[NR_VM_EVENT_ITEMS]; }; DECLARE_PER_CPU(struct vm_event_state, vm_event_states); /* * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the * local_irq_disable overhead. */ static inline void __count_vm_event(enum vm_event_item item) { raw_cpu_inc(vm_event_states.event[item]); } static inline void count_vm_event(enum vm_event_item item) { this_cpu_inc(vm_event_states.event[item]); } static inline void __count_vm_events(enum vm_event_item item, long delta) { raw_cpu_add(vm_event_states.event[item], delta); } static inline void count_vm_events(enum vm_event_item item, long delta) { this_cpu_add(vm_event_states.event[item], delta); } extern void all_vm_events(unsigned long *); extern void vm_events_fold_cpu(int cpu); #else /* Disable counters */ static inline void count_vm_event(enum vm_event_item item) { } static inline void count_vm_events(enum vm_event_item item, long delta) { } static inline void __count_vm_event(enum vm_event_item item) { } static inline void __count_vm_events(enum vm_event_item item, long delta) { } static inline void all_vm_events(unsigned long *ret) { } static inline void vm_events_fold_cpu(int cpu) { } #endif /* CONFIG_VM_EVENT_COUNTERS */ #ifdef CONFIG_NUMA_BALANCING #define count_vm_numa_event(x) count_vm_event(x) #define count_vm_numa_events(x, y) count_vm_events(x, y) #else #define count_vm_numa_event(x) do {} while (0) #define count_vm_numa_events(x, y) do { (void)(y); } while (0) #endif /* CONFIG_NUMA_BALANCING */ #ifdef CONFIG_DEBUG_TLBFLUSH #define count_vm_tlb_event(x) count_vm_event(x) #define count_vm_tlb_events(x, y) count_vm_events(x, y) #else #define count_vm_tlb_event(x) do {} while (0) #define count_vm_tlb_events(x, y) do { (void)(y); } while (0) #endif #ifdef CONFIG_DEBUG_VM_VMACACHE #define count_vm_vmacache_event(x) count_vm_event(x) #else #define count_vm_vmacache_event(x) do {} while (0) #endif #define __count_zid_vm_events(item, zid, delta) \ __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta) /* * Zone and node-based page accounting with per cpu differentials. */ extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS]; extern atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS]; extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS]; #ifdef CONFIG_NUMA static inline void zone_numa_state_add(long x, struct zone *zone, enum numa_stat_item item) { atomic_long_add(x, &zone->vm_numa_stat[item]); atomic_long_add(x, &vm_numa_stat[item]); } static inline unsigned long global_numa_state(enum numa_stat_item item) { long x = atomic_long_read(&vm_numa_stat[item]); return x; } static inline unsigned long zone_numa_state_snapshot(struct zone *zone, enum numa_stat_item item) { long x = atomic_long_read(&zone->vm_numa_stat[item]); int cpu; for_each_online_cpu(cpu) x += per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item]; return x; } #endif /* CONFIG_NUMA */ static inline void zone_page_state_add(long x, struct zone *zone, enum zone_stat_item item) { atomic_long_add(x, &zone->vm_stat[item]); atomic_long_add(x, &vm_zone_stat[item]); } static inline void node_page_state_add(long x, struct pglist_data *pgdat, enum node_stat_item item) { atomic_long_add(x, &pgdat->vm_stat[item]); atomic_long_add(x, &vm_node_stat[item]); } static inline unsigned long global_zone_page_state(enum zone_stat_item item) { long x = atomic_long_read(&vm_zone_stat[item]); #ifdef CONFIG_SMP if (x < 0) x = 0; #endif return x; } static inline unsigned long global_node_page_state(enum node_stat_item item) { long x = atomic_long_read(&vm_node_stat[item]); #ifdef CONFIG_SMP if (x < 0) x = 0; #endif return x; } static inline unsigned long zone_page_state(struct zone *zone, enum zone_stat_item item) { long x = atomic_long_read(&zone->vm_stat[item]); #ifdef CONFIG_SMP if (x < 0) x = 0; #endif return x; } /* * More accurate version that also considers the currently pending * deltas. For that we need to loop over all cpus to find the current * deltas. There is no synchronization so the result cannot be * exactly accurate either. */ static inline unsigned long zone_page_state_snapshot(struct zone *zone, enum zone_stat_item item) { long x = atomic_long_read(&zone->vm_stat[item]); #ifdef CONFIG_SMP int cpu; for_each_online_cpu(cpu) x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item]; if (x < 0) x = 0; #endif return x; } #ifdef CONFIG_NUMA extern void __inc_numa_state(struct zone *zone, enum numa_stat_item item); extern unsigned long sum_zone_node_page_state(int node, enum zone_stat_item item); extern unsigned long sum_zone_numa_state(int node, enum numa_stat_item item); extern unsigned long node_page_state(struct pglist_data *pgdat, enum node_stat_item item); #else #define sum_zone_node_page_state(node, item) global_zone_page_state(item) #define node_page_state(node, item) global_node_page_state(item) #endif /* CONFIG_NUMA */ #ifdef CONFIG_SMP void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long); void __inc_zone_page_state(struct page *, enum zone_stat_item); void __dec_zone_page_state(struct page *, enum zone_stat_item); void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long); void __inc_node_page_state(struct page *, enum node_stat_item); void __dec_node_page_state(struct page *, enum node_stat_item); void mod_zone_page_state(struct zone *, enum zone_stat_item, long); void inc_zone_page_state(struct page *, enum zone_stat_item); void dec_zone_page_state(struct page *, enum zone_stat_item); void mod_node_page_state(struct pglist_data *, enum node_stat_item, long); void inc_node_page_state(struct page *, enum node_stat_item); void dec_node_page_state(struct page *, enum node_stat_item); extern void inc_node_state(struct pglist_data *, enum node_stat_item); extern void __inc_zone_state(struct zone *, enum zone_stat_item); extern void __inc_node_state(struct pglist_data *, enum node_stat_item); extern void dec_zone_state(struct zone *, enum zone_stat_item); extern void __dec_zone_state(struct zone *, enum zone_stat_item); extern void __dec_node_state(struct pglist_data *, enum node_stat_item); void quiet_vmstat(void); void cpu_vm_stats_fold(int cpu); void refresh_zone_stat_thresholds(void); struct ctl_table; int vmstat_refresh(struct ctl_table *, int write, void __user *buffer, size_t *lenp, loff_t *ppos); void drain_zonestat(struct zone *zone, struct per_cpu_pageset *); int calculate_pressure_threshold(struct zone *zone); int calculate_normal_threshold(struct zone *zone); void set_pgdat_percpu_threshold(pg_data_t *pgdat, int (*calculate_pressure)(struct zone *)); #else /* CONFIG_SMP */ /* * We do not maintain differentials in a single processor configuration. * The functions directly modify the zone and global counters. */ static inline void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, long delta) { zone_page_state_add(delta, zone, item); } static inline void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item, int delta) { node_page_state_add(delta, pgdat, item); } static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) { atomic_long_inc(&zone->vm_stat[item]); atomic_long_inc(&vm_zone_stat[item]); } static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) { atomic_long_inc(&pgdat->vm_stat[item]); atomic_long_inc(&vm_node_stat[item]); } static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) { atomic_long_dec(&zone->vm_stat[item]); atomic_long_dec(&vm_zone_stat[item]); } static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) { atomic_long_dec(&pgdat->vm_stat[item]); atomic_long_dec(&vm_node_stat[item]); } static inline void __inc_zone_page_state(struct page *page, enum zone_stat_item item) { __inc_zone_state(page_zone(page), item); } static inline void __inc_node_page_state(struct page *page, enum node_stat_item item) { __inc_node_state(page_pgdat(page), item); } static inline void __dec_zone_page_state(struct page *page, enum zone_stat_item item) { __dec_zone_state(page_zone(page), item); } static inline void __dec_node_page_state(struct page *page, enum node_stat_item item) { __dec_node_state(page_pgdat(page), item); } /* * We only use atomic operations to update counters. So there is no need to * disable interrupts. */ #define inc_zone_page_state __inc_zone_page_state #define dec_zone_page_state __dec_zone_page_state #define mod_zone_page_state __mod_zone_page_state #define inc_node_page_state __inc_node_page_state #define dec_node_page_state __dec_node_page_state #define mod_node_page_state __mod_node_page_state #define inc_zone_state __inc_zone_state #define inc_node_state __inc_node_state #define dec_zone_state __dec_zone_state #define set_pgdat_percpu_threshold(pgdat, callback) { } static inline void refresh_zone_stat_thresholds(void) { } static inline void cpu_vm_stats_fold(int cpu) { } static inline void quiet_vmstat(void) { } static inline void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset) { } #endif /* CONFIG_SMP */ static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages, int migratetype) { __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); if (is_migrate_cma(migratetype)) __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages); } extern const char * const vmstat_text[]; #endif /* _LINUX_VMSTAT_H */ ktime.h 0000644 00000015327 14722070374 0006044 0 ustar 00 /* * include/linux/ktime.h * * ktime_t - nanosecond-resolution time format. * * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de> * Copyright(C) 2005, Red Hat, Inc., Ingo Molnar * * data type definitions, declarations, prototypes and macros. * * Started by: Thomas Gleixner and Ingo Molnar * * Credits: * * Roman Zippel provided the ideas and primary code snippets of * the ktime_t union and further simplifications of the original * code. * * For licencing details see kernel-base/COPYING */ #ifndef _LINUX_KTIME_H #define _LINUX_KTIME_H #include <linux/time.h> #include <linux/jiffies.h> /* Nanosecond scalar representation for kernel time values */ typedef s64 ktime_t; /** * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value * @secs: seconds to set * @nsecs: nanoseconds to set * * Return: The ktime_t representation of the value. */ static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs) { if (unlikely(secs >= KTIME_SEC_MAX)) return KTIME_MAX; return secs * NSEC_PER_SEC + (s64)nsecs; } /* Subtract two ktime_t variables. rem = lhs -rhs: */ #define ktime_sub(lhs, rhs) ((lhs) - (rhs)) /* Add two ktime_t variables. res = lhs + rhs: */ #define ktime_add(lhs, rhs) ((lhs) + (rhs)) /* * Same as ktime_add(), but avoids undefined behaviour on overflow; however, * this means that you must check the result for overflow yourself. */ #define ktime_add_unsafe(lhs, rhs) ((u64) (lhs) + (rhs)) /* * Add a ktime_t variable and a scalar nanosecond value. * res = kt + nsval: */ #define ktime_add_ns(kt, nsval) ((kt) + (nsval)) /* * Subtract a scalar nanosecod from a ktime_t variable * res = kt - nsval: */ #define ktime_sub_ns(kt, nsval) ((kt) - (nsval)) /* convert a timespec to ktime_t format: */ static inline ktime_t timespec_to_ktime(struct timespec ts) { return ktime_set(ts.tv_sec, ts.tv_nsec); } /* convert a timespec64 to ktime_t format: */ static inline ktime_t timespec64_to_ktime(struct timespec64 ts) { return ktime_set(ts.tv_sec, ts.tv_nsec); } /* convert a timeval to ktime_t format: */ static inline ktime_t timeval_to_ktime(struct timeval tv) { return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC); } /* Map the ktime_t to timespec conversion to ns_to_timespec function */ #define ktime_to_timespec(kt) ns_to_timespec((kt)) /* Map the ktime_t to timespec conversion to ns_to_timespec function */ #define ktime_to_timespec64(kt) ns_to_timespec64((kt)) /* Map the ktime_t to timeval conversion to ns_to_timeval function */ #define ktime_to_timeval(kt) ns_to_timeval((kt)) /* Convert ktime_t to nanoseconds */ static inline s64 ktime_to_ns(const ktime_t kt) { return kt; } /** * ktime_compare - Compares two ktime_t variables for less, greater or equal * @cmp1: comparable1 * @cmp2: comparable2 * * Return: ... * cmp1 < cmp2: return <0 * cmp1 == cmp2: return 0 * cmp1 > cmp2: return >0 */ static inline int ktime_compare(const ktime_t cmp1, const ktime_t cmp2) { if (cmp1 < cmp2) return -1; if (cmp1 > cmp2) return 1; return 0; } /** * ktime_after - Compare if a ktime_t value is bigger than another one. * @cmp1: comparable1 * @cmp2: comparable2 * * Return: true if cmp1 happened after cmp2. */ static inline bool ktime_after(const ktime_t cmp1, const ktime_t cmp2) { return ktime_compare(cmp1, cmp2) > 0; } /** * ktime_before - Compare if a ktime_t value is smaller than another one. * @cmp1: comparable1 * @cmp2: comparable2 * * Return: true if cmp1 happened before cmp2. */ static inline bool ktime_before(const ktime_t cmp1, const ktime_t cmp2) { return ktime_compare(cmp1, cmp2) < 0; } #if BITS_PER_LONG < 64 extern s64 __ktime_divns(const ktime_t kt, s64 div); static inline s64 ktime_divns(const ktime_t kt, s64 div) { /* * Negative divisors could cause an inf loop, * so bug out here. */ BUG_ON(div < 0); if (__builtin_constant_p(div) && !(div >> 32)) { s64 ns = kt; u64 tmp = ns < 0 ? -ns : ns; do_div(tmp, div); return ns < 0 ? -tmp : tmp; } else { return __ktime_divns(kt, div); } } #else /* BITS_PER_LONG < 64 */ static inline s64 ktime_divns(const ktime_t kt, s64 div) { /* * 32-bit implementation cannot handle negative divisors, * so catch them on 64bit as well. */ WARN_ON(div < 0); return kt / div; } #endif static inline s64 ktime_to_us(const ktime_t kt) { return ktime_divns(kt, NSEC_PER_USEC); } static inline s64 ktime_to_ms(const ktime_t kt) { return ktime_divns(kt, NSEC_PER_MSEC); } static inline s64 ktime_us_delta(const ktime_t later, const ktime_t earlier) { return ktime_to_us(ktime_sub(later, earlier)); } static inline s64 ktime_ms_delta(const ktime_t later, const ktime_t earlier) { return ktime_to_ms(ktime_sub(later, earlier)); } static inline ktime_t ktime_add_us(const ktime_t kt, const u64 usec) { return ktime_add_ns(kt, usec * NSEC_PER_USEC); } static inline ktime_t ktime_add_ms(const ktime_t kt, const u64 msec) { return ktime_add_ns(kt, msec * NSEC_PER_MSEC); } static inline ktime_t ktime_sub_us(const ktime_t kt, const u64 usec) { return ktime_sub_ns(kt, usec * NSEC_PER_USEC); } static inline ktime_t ktime_sub_ms(const ktime_t kt, const u64 msec) { return ktime_sub_ns(kt, msec * NSEC_PER_MSEC); } extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs); /** * ktime_to_timespec_cond - convert a ktime_t variable to timespec * format only if the variable contains data * @kt: the ktime_t variable to convert * @ts: the timespec variable to store the result in * * Return: %true if there was a successful conversion, %false if kt was 0. */ static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt, struct timespec *ts) { if (kt) { *ts = ktime_to_timespec(kt); return true; } else { return false; } } /** * ktime_to_timespec64_cond - convert a ktime_t variable to timespec64 * format only if the variable contains data * @kt: the ktime_t variable to convert * @ts: the timespec variable to store the result in * * Return: %true if there was a successful conversion, %false if kt was 0. */ static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt, struct timespec64 *ts) { if (kt) { *ts = ktime_to_timespec64(kt); return true; } else { return false; } } /* * The resolution of the clocks. The resolution value is returned in * the clock_getres() system call to give application programmers an * idea of the (in)accuracy of timers. Timer values are rounded up to * this resolution values. */ #define LOW_RES_NSEC TICK_NSEC #define KTIME_LOW_RES (LOW_RES_NSEC) static inline ktime_t ns_to_ktime(u64 ns) { return ns; } static inline ktime_t ms_to_ktime(u64 ms) { return ms * NSEC_PER_MSEC; } # include <linux/timekeeping.h> # include <linux/timekeeping32.h> #endif delayacct.h 0000644 00000012406 14722070374 0006657 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* delayacct.h - per-task delay accounting * * Copyright (C) Shailabh Nagar, IBM Corp. 2006 */ #ifndef _LINUX_DELAYACCT_H #define _LINUX_DELAYACCT_H #include <uapi/linux/taskstats.h> /* * Per-task flags relevant to delay accounting * maintained privately to avoid exhausting similar flags in sched.h:PF_* * Used to set current->delays->flags */ #define DELAYACCT_PF_SWAPIN 0x00000001 /* I am doing a swapin */ #define DELAYACCT_PF_BLKIO 0x00000002 /* I am waiting on IO */ #ifdef CONFIG_TASK_DELAY_ACCT struct task_delay_info { raw_spinlock_t lock; unsigned int flags; /* Private per-task flags */ /* For each stat XXX, add following, aligned appropriately * * struct timespec XXX_start, XXX_end; * u64 XXX_delay; * u32 XXX_count; * * Atomicity of updates to XXX_delay, XXX_count protected by * single lock above (split into XXX_lock if contention is an issue). */ /* * XXX_count is incremented on every XXX operation, the delay * associated with the operation is added to XXX_delay. * XXX_delay contains the accumulated delay time in nanoseconds. */ u64 blkio_start; /* Shared by blkio, swapin */ u64 blkio_delay; /* wait for sync block io completion */ u64 swapin_delay; /* wait for swapin block io completion */ u32 blkio_count; /* total count of the number of sync block */ /* io operations performed */ u32 swapin_count; /* total count of the number of swapin block */ /* io operations performed */ u64 freepages_start; u64 freepages_delay; /* wait for memory reclaim */ u64 thrashing_start; u64 thrashing_delay; /* wait for thrashing page */ u32 freepages_count; /* total count of memory reclaim */ u32 thrashing_count; /* total count of thrash waits */ }; #endif #include <linux/sched.h> #include <linux/slab.h> #ifdef CONFIG_TASK_DELAY_ACCT extern int delayacct_on; /* Delay accounting turned on/off */ extern struct kmem_cache *delayacct_cache; extern void delayacct_init(void); extern void __delayacct_tsk_init(struct task_struct *); extern void __delayacct_tsk_exit(struct task_struct *); extern void __delayacct_blkio_start(void); extern void __delayacct_blkio_end(struct task_struct *); extern int __delayacct_add_tsk(struct taskstats *, struct task_struct *); extern __u64 __delayacct_blkio_ticks(struct task_struct *); extern void __delayacct_freepages_start(void); extern void __delayacct_freepages_end(void); extern void __delayacct_thrashing_start(void); extern void __delayacct_thrashing_end(void); static inline int delayacct_is_task_waiting_on_io(struct task_struct *p) { if (p->delays) return (p->delays->flags & DELAYACCT_PF_BLKIO); else return 0; } static inline void delayacct_set_flag(int flag) { if (current->delays) current->delays->flags |= flag; } static inline void delayacct_clear_flag(int flag) { if (current->delays) current->delays->flags &= ~flag; } static inline void delayacct_tsk_init(struct task_struct *tsk) { /* reinitialize in case parent's non-null pointer was dup'ed*/ tsk->delays = NULL; if (delayacct_on) __delayacct_tsk_init(tsk); } /* Free tsk->delays. Called from bad fork and __put_task_struct * where there's no risk of tsk->delays being accessed elsewhere */ static inline void delayacct_tsk_free(struct task_struct *tsk) { if (tsk->delays) kmem_cache_free(delayacct_cache, tsk->delays); tsk->delays = NULL; } static inline void delayacct_blkio_start(void) { delayacct_set_flag(DELAYACCT_PF_BLKIO); if (current->delays) __delayacct_blkio_start(); } static inline void delayacct_blkio_end(struct task_struct *p) { if (p->delays) __delayacct_blkio_end(p); delayacct_clear_flag(DELAYACCT_PF_BLKIO); } static inline int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) { if (!delayacct_on || !tsk->delays) return 0; return __delayacct_add_tsk(d, tsk); } static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk) { if (tsk->delays) return __delayacct_blkio_ticks(tsk); return 0; } static inline void delayacct_freepages_start(void) { if (current->delays) __delayacct_freepages_start(); } static inline void delayacct_freepages_end(void) { if (current->delays) __delayacct_freepages_end(); } static inline void delayacct_thrashing_start(void) { if (current->delays) __delayacct_thrashing_start(); } static inline void delayacct_thrashing_end(void) { if (current->delays) __delayacct_thrashing_end(); } #else static inline void delayacct_set_flag(int flag) {} static inline void delayacct_clear_flag(int flag) {} static inline void delayacct_init(void) {} static inline void delayacct_tsk_init(struct task_struct *tsk) {} static inline void delayacct_tsk_free(struct task_struct *tsk) {} static inline void delayacct_blkio_start(void) {} static inline void delayacct_blkio_end(struct task_struct *p) {} static inline int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) { return 0; } static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk) { return 0; } static inline int delayacct_is_task_waiting_on_io(struct task_struct *p) { return 0; } static inline void delayacct_freepages_start(void) {} static inline void delayacct_freepages_end(void) {} static inline void delayacct_thrashing_start(void) {} static inline void delayacct_thrashing_end(void) {} #endif /* CONFIG_TASK_DELAY_ACCT */ #endif dio.h 0000644 00000025735 14722070374 0005512 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* header file for DIO boards for the HP300 architecture. * Maybe this should handle DIO-II later? * The general structure of this is vaguely based on how * the Amiga port handles Zorro boards. * Copyright (C) Peter Maydell 05/1998 <pmaydell@chiark.greenend.org.uk> * Converted to driver model Jochen Friedrich <jochen@scram.de> * * The board IDs are from the NetBSD kernel, which for once provided * helpful comments... * * This goes with drivers/dio/dio.c */ #ifndef _LINUX_DIO_H #define _LINUX_DIO_H /* The DIO boards in a system are distinguished by 'select codes' which * range from 0-63 (DIO) and 132-255 (DIO-II). * The DIO board with select code sc is located at physical address * 0x600000 + sc * 0x10000 * So DIO cards cover [0x600000-0x800000); the areas [0x200000-0x400000) and * [0x800000-0x1000000) are for additional space required by things * like framebuffers. [0x400000-0x600000) is for miscellaneous internal I/O. * On Linux, this is currently all mapped into the virtual address space * at 0xf0000000 on bootup. * DIO-II boards are at 0x1000000 + (sc - 132) * 0x400000 * which is address range [0x1000000-0x20000000) -- too big to map completely, * so currently we just don't handle DIO-II boards. It wouldn't be hard to * do with ioremap() though. */ #include <linux/device.h> #ifdef __KERNEL__ #include <asm/hp300hw.h> typedef __u16 dio_id; /* * DIO devices */ struct dio_dev { struct dio_bus *bus; dio_id id; int scode; struct dio_driver *driver; /* which driver has allocated this device */ struct device dev; /* Generic device interface */ u8 ipl; char name[64]; struct resource resource; }; #define to_dio_dev(n) container_of(n, struct dio_dev, dev) /* * DIO bus */ struct dio_bus { struct list_head devices; /* list of devices on this bus */ unsigned int num_resources; /* number of resources */ struct resource resources[2]; /* address space routed to this bus */ struct device dev; char name[10]; }; extern struct dio_bus dio_bus; /* Single DIO bus */ extern struct bus_type dio_bus_type; /* * DIO device IDs */ struct dio_device_id { dio_id id; /* Device ID or DIO_WILDCARD */ unsigned long driver_data; /* Data private to the driver */ }; /* * DIO device drivers */ struct dio_driver { struct list_head node; char *name; const struct dio_device_id *id_table; /* NULL if wants all devices */ int (*probe)(struct dio_dev *z, const struct dio_device_id *id); /* New device inserted */ void (*remove)(struct dio_dev *z); /* Device removed (NULL if not a hot-plug capable driver) */ struct device_driver driver; }; #define to_dio_driver(drv) container_of(drv, struct dio_driver, driver) /* DIO/DIO-II boards all have the following 8bit registers. * These are offsets from the base of the device. */ #define DIO_IDOFF 0x01 /* primary device ID */ #define DIO_IPLOFF 0x03 /* interrupt priority level */ #define DIO_SECIDOFF 0x15 /* secondary device ID */ #define DIOII_SIZEOFF 0x101 /* device size, DIO-II only */ #define DIO_VIRADDRBASE 0xf0000000UL /* vir addr where IOspace is mapped */ #define DIO_BASE 0x600000 /* start of DIO space */ #define DIO_END 0x1000000 /* end of DIO space */ #define DIO_DEVSIZE 0x10000 /* size of a DIO device */ #define DIOII_BASE 0x01000000 /* start of DIO-II space */ #define DIOII_END 0x20000000 /* end of DIO-II space */ #define DIOII_DEVSIZE 0x00400000 /* size of a DIO-II device */ /* Highest valid select code. If we add DIO-II support this should become * 256 for everything except HP320, which only has DIO. */ #define DIO_SCMAX (hp300_model == HP_320 ? 32 : 256) #define DIOII_SCBASE 132 /* lowest DIO-II select code */ #define DIO_SCINHOLE(scode) (((scode) >= 32) && ((scode) < DIOII_SCBASE)) #define DIO_ISDIOII(scode) ((scode) >= 132 && (scode) < 256) /* macros to read device IDs, given base address */ #define DIO_ID(baseaddr) in_8((baseaddr) + DIO_IDOFF) #define DIO_SECID(baseaddr) in_8((baseaddr) + DIO_SECIDOFF) /* extract the interrupt level */ #define DIO_IPL(baseaddr) (((in_8((baseaddr) + DIO_IPLOFF) >> 4) & 0x03) + 3) /* find the size of a DIO-II board's address space. * DIO boards are all fixed length. */ #define DIOII_SIZE(baseaddr) ((in_8((baseaddr) + DIOII_SIZEOFF) + 1) * 0x100000) /* general purpose macro for both DIO and DIO-II */ #define DIO_SIZE(scode, base) (DIO_ISDIOII((scode)) ? DIOII_SIZE((base)) : DIO_DEVSIZE) /* The hardware has primary and secondary IDs; we encode these in a single * int as PRIMARY ID & (SECONDARY ID << 8). * In practice this is only important for framebuffers, * and everybody else just sets ID fields equal to the DIO_ID_FOO value. */ #define DIO_ENCODE_ID(pr,sec) ((((int)sec & 0xff) << 8) | ((int)pr & 0xff)) /* macro to determine whether a given primary ID requires a secondary ID byte */ #define DIO_NEEDSSECID(id) ((id) == DIO_ID_FBUFFER) #define DIO_WILDCARD 0xff /* Now a whole slew of macros giving device IDs and descriptive strings: */ #define DIO_ID_DCA0 0x02 /* 98644A serial */ #define DIO_DESC_DCA0 "98644A DCA0 serial" #define DIO_ID_DCA0REM 0x82 /* 98644A serial */ #define DIO_DESC_DCA0REM "98644A DCA0REM serial" #define DIO_ID_DCA1 0x42 /* 98644A serial */ #define DIO_DESC_DCA1 "98644A DCA1 serial" #define DIO_ID_DCA1REM 0xc2 /* 98644A serial */ #define DIO_DESC_DCA1REM "98644A DCA1REM serial" #define DIO_ID_DCM 0x05 /* 98642A serial MUX */ #define DIO_DESC_DCM "98642A DCM serial MUX" #define DIO_ID_DCMREM 0x85 /* 98642A serial MUX */ #define DIO_DESC_DCMREM "98642A DCMREM serial MUX" #define DIO_ID_LAN 0x15 /* 98643A LAN */ #define DIO_DESC_LAN "98643A LANCE ethernet" #define DIO_ID_FHPIB 0x08 /* 98625A/98625B fast HP-IB */ #define DIO_DESC_FHPIB "98625A/98625B fast HPIB" #define DIO_ID_NHPIB 0x01 /* 98624A HP-IB (normal ie slow) */ #define DIO_DESC_NHPIB "98624A HPIB" #define DIO_ID_SCSI0 0x07 /* 98265A SCSI */ #define DIO_DESC_SCSI0 "98265A SCSI0" #define DIO_ID_SCSI1 0x27 /* ditto */ #define DIO_DESC_SCSI1 "98265A SCSI1" #define DIO_ID_SCSI2 0x47 /* ditto */ #define DIO_DESC_SCSI2 "98265A SCSI2" #define DIO_ID_SCSI3 0x67 /* ditto */ #define DIO_DESC_SCSI3 "98265A SCSI3" #define DIO_ID_FBUFFER 0x39 /* framebuffer: flavour is distinguished by secondary ID */ #define DIO_DESC_FBUFFER "bitmapped display" /* the NetBSD kernel source is a bit unsure as to what these next IDs actually do :-> */ #define DIO_ID_MISC0 0x03 /* 98622A */ #define DIO_DESC_MISC0 "98622A" #define DIO_ID_MISC1 0x04 /* 98623A */ #define DIO_DESC_MISC1 "98623A" #define DIO_ID_PARALLEL 0x06 /* internal parallel */ #define DIO_DESC_PARALLEL "internal parallel" #define DIO_ID_MISC2 0x09 /* 98287A keyboard */ #define DIO_DESC_MISC2 "98287A keyboard" #define DIO_ID_MISC3 0x0a /* HP98635A FP accelerator */ #define DIO_DESC_MISC3 "HP98635A FP accelerator" #define DIO_ID_MISC4 0x0b /* timer */ #define DIO_DESC_MISC4 "timer" #define DIO_ID_MISC5 0x12 /* 98640A */ #define DIO_DESC_MISC5 "98640A" #define DIO_ID_MISC6 0x16 /* 98659A */ #define DIO_DESC_MISC6 "98659A" #define DIO_ID_MISC7 0x19 /* 237 display */ #define DIO_DESC_MISC7 "237 display" #define DIO_ID_MISC8 0x1a /* quad-wide card */ #define DIO_DESC_MISC8 "quad-wide card" #define DIO_ID_MISC9 0x1b /* 98253A */ #define DIO_DESC_MISC9 "98253A" #define DIO_ID_MISC10 0x1c /* 98627A */ #define DIO_DESC_MISC10 "98253A" #define DIO_ID_MISC11 0x1d /* 98633A */ #define DIO_DESC_MISC11 "98633A" #define DIO_ID_MISC12 0x1e /* 98259A */ #define DIO_DESC_MISC12 "98259A" #define DIO_ID_MISC13 0x1f /* 8741 */ #define DIO_DESC_MISC13 "8741" #define DIO_ID_VME 0x31 /* 98577A VME adapter */ #define DIO_DESC_VME "98577A VME adapter" #define DIO_ID_DCL 0x34 /* 98628A serial */ #define DIO_DESC_DCL "98628A DCL serial" #define DIO_ID_DCLREM 0xb4 /* 98628A serial */ #define DIO_DESC_DCLREM "98628A DCLREM serial" /* These are the secondary IDs for the framebuffers */ #define DIO_ID2_GATORBOX 0x01 /* 98700/98710 "gatorbox" */ #define DIO_DESC2_GATORBOX "98700/98710 \"gatorbox\" display" #define DIO_ID2_TOPCAT 0x02 /* 98544/98545/98547 "topcat" */ #define DIO_DESC2_TOPCAT "98544/98545/98547 \"topcat\" display" #define DIO_ID2_RENAISSANCE 0x04 /* 98720/98721 "renaissance" */ #define DIO_DESC2_RENAISSANCE "98720/98721 \"renaissance\" display" #define DIO_ID2_LRCATSEYE 0x05 /* lowres "catseye" */ #define DIO_DESC2_LRCATSEYE "low-res catseye display" #define DIO_ID2_HRCCATSEYE 0x06 /* highres colour "catseye" */ #define DIO_DESC2_HRCCATSEYE "high-res color catseye display" #define DIO_ID2_HRMCATSEYE 0x07 /* highres mono "catseye" */ #define DIO_DESC2_HRMCATSEYE "high-res mono catseye display" #define DIO_ID2_DAVINCI 0x08 /* 98730/98731 "davinci" */ #define DIO_DESC2_DAVINCI "98730/98731 \"davinci\" display" #define DIO_ID2_XXXCATSEYE 0x09 /* "catseye" */ #define DIO_DESC2_XXXCATSEYE "catseye display" #define DIO_ID2_HYPERION 0x0e /* A1096A "hyperion" */ #define DIO_DESC2_HYPERION "A1096A \"hyperion\" display" #define DIO_ID2_XGENESIS 0x0b /* "x-genesis"; no NetBSD support */ #define DIO_DESC2_XGENESIS "\"x-genesis\" display" #define DIO_ID2_TIGER 0x0c /* "tiger"; no NetBSD support */ #define DIO_DESC2_TIGER "\"tiger\" display" #define DIO_ID2_YGENESIS 0x0d /* "y-genesis"; no NetBSD support */ #define DIO_DESC2_YGENESIS "\"y-genesis\" display" /* if you add new IDs then you should tell dio.c about them so it can * identify them... */ extern int dio_find(int deviceid); extern unsigned long dio_scodetophysaddr(int scode); extern int dio_create_sysfs_dev_files(struct dio_dev *); /* New-style probing */ extern int dio_register_driver(struct dio_driver *); extern void dio_unregister_driver(struct dio_driver *); extern const struct dio_device_id *dio_match_device(const struct dio_device_id *ids, const struct dio_dev *z); static inline struct dio_driver *dio_dev_driver(const struct dio_dev *d) { return d->driver; } #define dio_resource_start(d) ((d)->resource.start) #define dio_resource_end(d) ((d)->resource.end) #define dio_resource_len(d) (resource_size(&(d)->resource)) #define dio_resource_flags(d) ((d)->resource.flags) #define dio_request_device(d, name) \ request_mem_region(dio_resource_start(d), dio_resource_len(d), name) #define dio_release_device(d) \ release_mem_region(dio_resource_start(d), dio_resource_len(d)) /* Similar to the helpers above, these manipulate per-dio_dev * driver-specific data. They are really just a wrapper around * the generic device structure functions of these calls. */ static inline void *dio_get_drvdata (struct dio_dev *d) { return dev_get_drvdata(&d->dev); } static inline void dio_set_drvdata (struct dio_dev *d, void *data) { dev_set_drvdata(&d->dev, data); } #endif /* __KERNEL__ */ #endif /* ndef _LINUX_DIO_H */ mman.h 0000644 00000006435 14722070374 0005663 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MMAN_H #define _LINUX_MMAN_H #include <linux/mm.h> #include <linux/percpu_counter.h> #include <linux/atomic.h> #include <uapi/linux/mman.h> /* * Arrange for legacy / undefined architecture specific flags to be * ignored by mmap handling code. */ #ifndef MAP_32BIT #define MAP_32BIT 0 #endif #ifndef MAP_HUGE_2MB #define MAP_HUGE_2MB 0 #endif #ifndef MAP_HUGE_1GB #define MAP_HUGE_1GB 0 #endif #ifndef MAP_UNINITIALIZED #define MAP_UNINITIALIZED 0 #endif #ifndef MAP_SYNC #define MAP_SYNC 0 #endif /* * The historical set of flags that all mmap implementations implicitly * support when a ->mmap_validate() op is not provided in file_operations. */ #define LEGACY_MAP_MASK (MAP_SHARED \ | MAP_PRIVATE \ | MAP_FIXED \ | MAP_ANONYMOUS \ | MAP_DENYWRITE \ | MAP_EXECUTABLE \ | MAP_UNINITIALIZED \ | MAP_GROWSDOWN \ | MAP_LOCKED \ | MAP_NORESERVE \ | MAP_POPULATE \ | MAP_NONBLOCK \ | MAP_STACK \ | MAP_HUGETLB \ | MAP_32BIT \ | MAP_HUGE_2MB \ | MAP_HUGE_1GB) extern int sysctl_overcommit_memory; extern int sysctl_overcommit_ratio; extern unsigned long sysctl_overcommit_kbytes; extern struct percpu_counter vm_committed_as; #ifdef CONFIG_SMP extern s32 vm_committed_as_batch; #else #define vm_committed_as_batch 0 #endif unsigned long vm_memory_committed(void); static inline void vm_acct_memory(long pages) { percpu_counter_add_batch(&vm_committed_as, pages, vm_committed_as_batch); } static inline void vm_unacct_memory(long pages) { vm_acct_memory(-pages); } /* * Allow architectures to handle additional protection bits */ #ifndef arch_calc_vm_prot_bits #define arch_calc_vm_prot_bits(prot, pkey) 0 #endif #ifndef arch_vm_get_page_prot #define arch_vm_get_page_prot(vm_flags) __pgprot(0) #endif #ifndef arch_validate_prot /* * This is called from mprotect(). PROT_GROWSDOWN and PROT_GROWSUP have * already been masked out. * * Returns true if the prot flags are valid */ static inline bool arch_validate_prot(unsigned long prot, unsigned long addr) { return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0; } #define arch_validate_prot arch_validate_prot #endif /* * Optimisation macro. It is equivalent to: * (x & bit1) ? bit2 : 0 * but this version is faster. * ("bit1" and "bit2" must be single bits) */ #define _calc_vm_trans(x, bit1, bit2) \ ((!(bit1) || !(bit2)) ? 0 : \ ((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \ : ((x) & (bit1)) / ((bit1) / (bit2)))) /* * Combine the mmap "prot" argument into "vm_flags" used internally. */ static inline unsigned long calc_vm_prot_bits(unsigned long prot, unsigned long pkey) { return _calc_vm_trans(prot, PROT_READ, VM_READ ) | _calc_vm_trans(prot, PROT_WRITE, VM_WRITE) | _calc_vm_trans(prot, PROT_EXEC, VM_EXEC) | arch_calc_vm_prot_bits(prot, pkey); } /* * Combine the mmap "flags" argument into "vm_flags" used internally. */ static inline unsigned long calc_vm_flag_bits(unsigned long flags) { return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) | _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) | _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) | _calc_vm_trans(flags, MAP_SYNC, VM_SYNC ); } unsigned long vm_commit_limit(void); #endif /* _LINUX_MMAN_H */ percpu.h 0000644 00000011232 14722070374 0006220 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_PERCPU_H #define __LINUX_PERCPU_H #include <linux/mmdebug.h> #include <linux/preempt.h> #include <linux/smp.h> #include <linux/cpumask.h> #include <linux/printk.h> #include <linux/pfn.h> #include <linux/init.h> #include <asm/percpu.h> /* enough to cover all DEFINE_PER_CPUs in modules */ #ifdef CONFIG_MODULES #define PERCPU_MODULE_RESERVE (8 << 10) #else #define PERCPU_MODULE_RESERVE 0 #endif /* minimum unit size, also is the maximum supported allocation size */ #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) /* minimum allocation size and shift in bytes */ #define PCPU_MIN_ALLOC_SHIFT 2 #define PCPU_MIN_ALLOC_SIZE (1 << PCPU_MIN_ALLOC_SHIFT) /* * The PCPU_BITMAP_BLOCK_SIZE must be the same size as PAGE_SIZE as the * updating of hints is used to manage the nr_empty_pop_pages in both * the chunk and globally. */ #define PCPU_BITMAP_BLOCK_SIZE PAGE_SIZE #define PCPU_BITMAP_BLOCK_BITS (PCPU_BITMAP_BLOCK_SIZE >> \ PCPU_MIN_ALLOC_SHIFT) /* * Percpu allocator can serve percpu allocations before slab is * initialized which allows slab to depend on the percpu allocator. * The following two parameters decide how much resource to * preallocate for this. Keep PERCPU_DYNAMIC_RESERVE equal to or * larger than PERCPU_DYNAMIC_EARLY_SIZE. */ #define PERCPU_DYNAMIC_EARLY_SLOTS 128 #define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10) /* * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy * back on the first chunk for dynamic percpu allocation if arch is * manually allocating and mapping it for faster access (as a part of * large page mapping for example). * * The following values give between one and two pages of free space * after typical minimal boot (2-way SMP, single disk and NIC) with * both defconfig and a distro config on x86_64 and 32. More * intelligent way to determine this would be nice. */ #if BITS_PER_LONG > 32 #define PERCPU_DYNAMIC_RESERVE (28 << 10) #else #define PERCPU_DYNAMIC_RESERVE (20 << 10) #endif extern void *pcpu_base_addr; extern const unsigned long *pcpu_unit_offsets; struct pcpu_group_info { int nr_units; /* aligned # of units */ unsigned long base_offset; /* base address offset */ unsigned int *cpu_map; /* unit->cpu map, empty * entries contain NR_CPUS */ }; struct pcpu_alloc_info { size_t static_size; size_t reserved_size; size_t dyn_size; size_t unit_size; size_t atom_size; size_t alloc_size; size_t __ai_size; /* internal, don't use */ int nr_groups; /* 0 if grouping unnecessary */ struct pcpu_group_info groups[]; }; enum pcpu_fc { PCPU_FC_AUTO, PCPU_FC_EMBED, PCPU_FC_PAGE, PCPU_FC_NR, }; extern const char * const pcpu_fc_names[PCPU_FC_NR]; extern enum pcpu_fc pcpu_chosen_fc; typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size, size_t align); typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size); typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr); typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to); extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, int nr_units); extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai); extern void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, void *base_addr); #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK extern int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, size_t atom_size, pcpu_fc_cpu_distance_fn_t cpu_distance_fn, pcpu_fc_alloc_fn_t alloc_fn, pcpu_fc_free_fn_t free_fn); #endif #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK extern int __init pcpu_page_first_chunk(size_t reserved_size, pcpu_fc_alloc_fn_t alloc_fn, pcpu_fc_free_fn_t free_fn, pcpu_fc_populate_pte_fn_t populate_pte_fn); #endif extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align); extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr); extern bool is_kernel_percpu_address(unsigned long addr); #if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) extern void __init setup_per_cpu_areas(void); #endif extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp); extern void __percpu *__alloc_percpu(size_t size, size_t align); extern void free_percpu(void __percpu *__pdata); extern phys_addr_t per_cpu_ptr_to_phys(void *addr); #define alloc_percpu_gfp(type, gfp) \ (typeof(type) __percpu *)__alloc_percpu_gfp(sizeof(type), \ __alignof__(type), gfp) #define alloc_percpu(type) \ (typeof(type) __percpu *)__alloc_percpu(sizeof(type), \ __alignof__(type)) extern unsigned long pcpu_nr_pages(void); #endif /* __LINUX_PERCPU_H */ crc32.h 0000644 00000005516 14722070374 0005646 0 ustar 00 /* * crc32.h * See linux/lib/crc32.c for license and changes */ #ifndef _LINUX_CRC32_H #define _LINUX_CRC32_H #include <linux/types.h> #include <linux/bitrev.h> u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len); u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len); /** * crc32_le_combine - Combine two crc32 check values into one. For two * sequences of bytes, seq1 and seq2 with lengths len1 * and len2, crc32_le() check values were calculated * for each, crc1 and crc2. * * @crc1: crc32 of the first block * @crc2: crc32 of the second block * @len2: length of the second block * * Return: The crc32_le() check value of seq1 and seq2 concatenated, * requiring only crc1, crc2, and len2. Note: If seq_full denotes * the concatenated memory area of seq1 with seq2, and crc_full * the crc32_le() value of seq_full, then crc_full == * crc32_le_combine(crc1, crc2, len2) when crc_full was seeded * with the same initializer as crc1, and crc2 seed was 0. See * also crc32_combine_test(). */ u32 __attribute_const__ crc32_le_shift(u32 crc, size_t len); static inline u32 crc32_le_combine(u32 crc1, u32 crc2, size_t len2) { return crc32_le_shift(crc1, len2) ^ crc2; } u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len); /** * __crc32c_le_combine - Combine two crc32c check values into one. For two * sequences of bytes, seq1 and seq2 with lengths len1 * and len2, __crc32c_le() check values were calculated * for each, crc1 and crc2. * * @crc1: crc32c of the first block * @crc2: crc32c of the second block * @len2: length of the second block * * Return: The __crc32c_le() check value of seq1 and seq2 concatenated, * requiring only crc1, crc2, and len2. Note: If seq_full denotes * the concatenated memory area of seq1 with seq2, and crc_full * the __crc32c_le() value of seq_full, then crc_full == * __crc32c_le_combine(crc1, crc2, len2) when crc_full was * seeded with the same initializer as crc1, and crc2 seed * was 0. See also crc32c_combine_test(). */ u32 __attribute_const__ __crc32c_le_shift(u32 crc, size_t len); static inline u32 __crc32c_le_combine(u32 crc1, u32 crc2, size_t len2) { return __crc32c_le_shift(crc1, len2) ^ crc2; } #define crc32(seed, data, length) crc32_le(seed, (unsigned char const *)(data), length) /* * Helpers for hash table generation of ethernet nics: * * Ethernet sends the least significant bit of a byte first, thus crc32_le * is used. The output of crc32_le is bit reversed [most significant bit * is in bit nr 0], thus it must be reversed before use. Except for * nics that bit swap the result internally... */ #define ether_crc(length, data) bitrev32(crc32_le(~0, data, length)) #define ether_crc_le(length, data) crc32_le(~0, data, length) #endif /* _LINUX_CRC32_H */ atmel-mci.h 0000644 00000002625 14722070374 0006600 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_ATMEL_MCI_H #define __LINUX_ATMEL_MCI_H #include <linux/types.h> #include <linux/dmaengine.h> #define ATMCI_MAX_NR_SLOTS 2 /** * struct mci_slot_pdata - board-specific per-slot configuration * @bus_width: Number of data lines wired up the slot * @detect_pin: GPIO pin wired to the card detect switch * @wp_pin: GPIO pin wired to the write protect sensor * @detect_is_active_high: The state of the detect pin when it is active * @non_removable: The slot is not removable, only detect once * * If a given slot is not present on the board, @bus_width should be * set to 0. The other fields are ignored in this case. * * Any pins that aren't available should be set to a negative value. * * Note that support for multiple slots is experimental -- some cards * might get upset if we don't get the clock management exactly right. * But in most cases, it should work just fine. */ struct mci_slot_pdata { unsigned int bus_width; int detect_pin; int wp_pin; bool detect_is_active_high; bool non_removable; }; /** * struct mci_platform_data - board-specific MMC/SDcard configuration * @dma_slave: DMA slave interface to use in data transfers. * @slot: Per-slot configuration data. */ struct mci_platform_data { void *dma_slave; dma_filter_fn dma_filter; struct mci_slot_pdata slot[ATMCI_MAX_NR_SLOTS]; }; #endif /* __LINUX_ATMEL_MCI_H */ ftrace.h 0000644 00000074212 14722070374 0006175 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Ftrace header. For implementation details beyond the random comments * scattered below, see: Documentation/trace/ftrace-design.rst */ #ifndef _LINUX_FTRACE_H #define _LINUX_FTRACE_H #include <linux/trace_clock.h> #include <linux/kallsyms.h> #include <linux/linkage.h> #include <linux/bitops.h> #include <linux/ptrace.h> #include <linux/ktime.h> #include <linux/sched.h> #include <linux/types.h> #include <linux/init.h> #include <linux/fs.h> #include <asm/ftrace.h> /* * If the arch supports passing the variable contents of * function_trace_op as the third parameter back from the * mcount call, then the arch should define this as 1. */ #ifndef ARCH_SUPPORTS_FTRACE_OPS #define ARCH_SUPPORTS_FTRACE_OPS 0 #endif /* * If the arch's mcount caller does not support all of ftrace's * features, then it must call an indirect function that * does. Or at least does enough to prevent any unwelcomed side effects. */ #if !ARCH_SUPPORTS_FTRACE_OPS # define FTRACE_FORCE_LIST_FUNC 1 #else # define FTRACE_FORCE_LIST_FUNC 0 #endif /* Main tracing buffer and events set up */ #ifdef CONFIG_TRACING void trace_init(void); void early_trace_init(void); #else static inline void trace_init(void) { } static inline void early_trace_init(void) { } #endif struct module; struct ftrace_hash; #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \ defined(CONFIG_DYNAMIC_FTRACE) const char * ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, unsigned long *off, char **modname, char *sym); int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, char *type, char *name, char *module_name, int *exported); #else static inline const char * ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, unsigned long *off, char **modname, char *sym) { return NULL; } static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, char *type, char *name, char *module_name, int *exported) { return -1; } #endif #ifdef CONFIG_FUNCTION_TRACER extern int ftrace_enabled; extern int ftrace_enable_sysctl(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); struct ftrace_ops; typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct pt_regs *regs); ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); /* * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are * set in the flags member. * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and * IPMODIFY are a kind of attribute flags which can be set only before * registering the ftrace_ops, and can not be modified while registered. * Changing those attribute flags after registering ftrace_ops will * cause unexpected results. * * ENABLED - set/unset when ftrace_ops is registered/unregistered * DYNAMIC - set when ftrace_ops is registered to denote dynamically * allocated ftrace_ops which need special care * SAVE_REGS - The ftrace_ops wants regs saved at each function called * and passed to the callback. If this flag is set, but the * architecture does not support passing regs * (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the * ftrace_ops will fail to register, unless the next flag * is set. * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the * handler can handle an arch that does not save regs * (the handler tests if regs == NULL), then it can set * this flag instead. It will not fail registering the ftrace_ops * but, the regs field will be NULL if the arch does not support * passing regs to the handler. * Note, if this flag is set, the SAVE_REGS flag will automatically * get set upon registering the ftrace_ops, if the arch supports it. * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure * that the call back has its own recursion protection. If it does * not set this, then the ftrace infrastructure will add recursion * protection for the caller. * STUB - The ftrace_ops is just a place holder. * INITIALIZED - The ftrace_ops has already been initialized (first use time * register_ftrace_function() is called, it will initialized the ops) * DELETED - The ops are being deleted, do not let them be registered again. * ADDING - The ops is in the process of being added. * REMOVING - The ops is in the process of being removed. * MODIFYING - The ops is in the process of changing its filter functions. * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code. * The arch specific code sets this flag when it allocated a * trampoline. This lets the arch know that it can update the * trampoline in case the callback function changes. * The ftrace_ops trampoline can be set by the ftrace users, and * in such cases the arch must not modify it. Only the arch ftrace * core code should set this flag. * IPMODIFY - The ops can modify the IP register. This can only be set with * SAVE_REGS. If another ops with this flag set is already registered * for any of the functions that this ops will be registered for, then * this ops will fail to register or set_filter_ip. * PID - Is affected by set_ftrace_pid (allows filtering on those pids) * RCU - Set when the ops can only be called when RCU is watching. * TRACE_ARRAY - The ops->private points to a trace_array descriptor. */ enum { FTRACE_OPS_FL_ENABLED = 1 << 0, FTRACE_OPS_FL_DYNAMIC = 1 << 1, FTRACE_OPS_FL_SAVE_REGS = 1 << 2, FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 3, FTRACE_OPS_FL_RECURSION_SAFE = 1 << 4, FTRACE_OPS_FL_STUB = 1 << 5, FTRACE_OPS_FL_INITIALIZED = 1 << 6, FTRACE_OPS_FL_DELETED = 1 << 7, FTRACE_OPS_FL_ADDING = 1 << 8, FTRACE_OPS_FL_REMOVING = 1 << 9, FTRACE_OPS_FL_MODIFYING = 1 << 10, FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 11, FTRACE_OPS_FL_IPMODIFY = 1 << 12, FTRACE_OPS_FL_PID = 1 << 13, FTRACE_OPS_FL_RCU = 1 << 14, FTRACE_OPS_FL_TRACE_ARRAY = 1 << 15, }; #ifdef CONFIG_DYNAMIC_FTRACE /* The hash used to know what functions callbacks trace */ struct ftrace_ops_hash { struct ftrace_hash __rcu *notrace_hash; struct ftrace_hash __rcu *filter_hash; struct mutex regex_lock; }; void ftrace_free_init_mem(void); void ftrace_free_mem(struct module *mod, void *start, void *end); #else static inline void ftrace_free_init_mem(void) { } static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { } #endif /* * Note, ftrace_ops can be referenced outside of RCU protection, unless * the RCU flag is set. If ftrace_ops is allocated and not part of kernel * core data, the unregistering of it will perform a scheduling on all CPUs * to make sure that there are no more users. Depending on the load of the * system that may take a bit of time. * * Any private data added must also take care not to be freed and if private * data is added to a ftrace_ops that is in core code, the user of the * ftrace_ops must perform a schedule_on_each_cpu() before freeing it. */ struct ftrace_ops { ftrace_func_t func; struct ftrace_ops __rcu *next; unsigned long flags; void *private; ftrace_func_t saved_func; #ifdef CONFIG_DYNAMIC_FTRACE struct ftrace_ops_hash local_hash; struct ftrace_ops_hash *func_hash; struct ftrace_ops_hash old_hash; unsigned long trampoline; unsigned long trampoline_size; #endif }; /* * Type of the current tracing. */ enum ftrace_tracing_type_t { FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */ FTRACE_TYPE_RETURN, /* Hook the return of the function */ }; /* Current tracing type, default is FTRACE_TYPE_ENTER */ extern enum ftrace_tracing_type_t ftrace_tracing_type; /* * The ftrace_ops must be a static and should also * be read_mostly. These functions do modify read_mostly variables * so use them sparely. Never free an ftrace_op or modify the * next pointer after it has been registered. Even after unregistering * it, the next pointer may still be used internally. */ int register_ftrace_function(struct ftrace_ops *ops); int unregister_ftrace_function(struct ftrace_ops *ops); extern void ftrace_stub(unsigned long a0, unsigned long a1, struct ftrace_ops *op, struct pt_regs *regs); #else /* !CONFIG_FUNCTION_TRACER */ /* * (un)register_ftrace_function must be a macro since the ops parameter * must not be evaluated. */ #define register_ftrace_function(ops) ({ 0; }) #define unregister_ftrace_function(ops) ({ 0; }) static inline void ftrace_kill(void) { } static inline void ftrace_free_init_mem(void) { } static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { } #endif /* CONFIG_FUNCTION_TRACER */ #ifdef CONFIG_STACK_TRACER extern int stack_tracer_enabled; int stack_trace_sysctl(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); /* DO NOT MODIFY THIS VARIABLE DIRECTLY! */ DECLARE_PER_CPU(int, disable_stack_tracer); /** * stack_tracer_disable - temporarily disable the stack tracer * * There's a few locations (namely in RCU) where stack tracing * cannot be executed. This function is used to disable stack * tracing during those critical sections. * * This function must be called with preemption or interrupts * disabled and stack_tracer_enable() must be called shortly after * while preemption or interrupts are still disabled. */ static inline void stack_tracer_disable(void) { /* Preemption or interupts must be disabled */ if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) WARN_ON_ONCE(!preempt_count() || !irqs_disabled()); this_cpu_inc(disable_stack_tracer); } /** * stack_tracer_enable - re-enable the stack tracer * * After stack_tracer_disable() is called, stack_tracer_enable() * must be called shortly afterward. */ static inline void stack_tracer_enable(void) { if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) WARN_ON_ONCE(!preempt_count() || !irqs_disabled()); this_cpu_dec(disable_stack_tracer); } #else static inline void stack_tracer_disable(void) { } static inline void stack_tracer_enable(void) { } #endif #ifdef CONFIG_DYNAMIC_FTRACE int ftrace_arch_code_modify_prepare(void); int ftrace_arch_code_modify_post_process(void); struct dyn_ftrace; enum ftrace_bug_type { FTRACE_BUG_UNKNOWN, FTRACE_BUG_INIT, FTRACE_BUG_NOP, FTRACE_BUG_CALL, FTRACE_BUG_UPDATE, }; extern enum ftrace_bug_type ftrace_bug_type; /* * Archs can set this to point to a variable that holds the value that was * expected at the call site before calling ftrace_bug(). */ extern const void *ftrace_expected; void ftrace_bug(int err, struct dyn_ftrace *rec); struct seq_file; extern int ftrace_text_reserved(const void *start, const void *end); struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr); bool is_ftrace_trampoline(unsigned long addr); /* * The dyn_ftrace record's flags field is split into two parts. * the first part which is '0-FTRACE_REF_MAX' is a counter of * the number of callbacks that have registered the function that * the dyn_ftrace descriptor represents. * * The second part is a mask: * ENABLED - the function is being traced * REGS - the record wants the function to save regs * REGS_EN - the function is set up to save regs. * IPMODIFY - the record allows for the IP address to be changed. * DISABLED - the record is not ready to be touched yet * * When a new ftrace_ops is registered and wants a function to save * pt_regs, the rec->flag REGS is set. When the function has been * set up to save regs, the REG_EN flag is set. Once a function * starts saving regs it will do so until all ftrace_ops are removed * from tracing that function. */ enum { FTRACE_FL_ENABLED = (1UL << 31), FTRACE_FL_REGS = (1UL << 30), FTRACE_FL_REGS_EN = (1UL << 29), FTRACE_FL_TRAMP = (1UL << 28), FTRACE_FL_TRAMP_EN = (1UL << 27), FTRACE_FL_IPMODIFY = (1UL << 26), FTRACE_FL_DISABLED = (1UL << 25), }; #define FTRACE_REF_MAX_SHIFT 25 #define FTRACE_FL_BITS 7 #define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1) #define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT) #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1) #define ftrace_rec_count(rec) ((rec)->flags & ~FTRACE_FL_MASK) struct dyn_ftrace { unsigned long ip; /* address of mcount call-site */ unsigned long flags; struct dyn_arch_ftrace arch; }; int ftrace_force_update(void); int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, int remove, int reset); int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, int len, int reset); int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, int len, int reset); void ftrace_set_global_filter(unsigned char *buf, int len, int reset); void ftrace_set_global_notrace(unsigned char *buf, int len, int reset); void ftrace_free_filter(struct ftrace_ops *ops); void ftrace_ops_set_global_filter(struct ftrace_ops *ops); enum { FTRACE_UPDATE_CALLS = (1 << 0), FTRACE_DISABLE_CALLS = (1 << 1), FTRACE_UPDATE_TRACE_FUNC = (1 << 2), FTRACE_START_FUNC_RET = (1 << 3), FTRACE_STOP_FUNC_RET = (1 << 4), FTRACE_MAY_SLEEP = (1 << 5), }; /* * The FTRACE_UPDATE_* enum is used to pass information back * from the ftrace_update_record() and ftrace_test_record() * functions. These are called by the code update routines * to find out what is to be done for a given function. * * IGNORE - The function is already what we want it to be * MAKE_CALL - Start tracing the function * MODIFY_CALL - Stop saving regs for the function * MAKE_NOP - Stop tracing the function */ enum { FTRACE_UPDATE_IGNORE, FTRACE_UPDATE_MAKE_CALL, FTRACE_UPDATE_MODIFY_CALL, FTRACE_UPDATE_MAKE_NOP, }; enum { FTRACE_ITER_FILTER = (1 << 0), FTRACE_ITER_NOTRACE = (1 << 1), FTRACE_ITER_PRINTALL = (1 << 2), FTRACE_ITER_DO_PROBES = (1 << 3), FTRACE_ITER_PROBE = (1 << 4), FTRACE_ITER_MOD = (1 << 5), FTRACE_ITER_ENABLED = (1 << 6), }; void arch_ftrace_update_code(int command); void arch_ftrace_update_trampoline(struct ftrace_ops *ops); void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec); void arch_ftrace_trampoline_free(struct ftrace_ops *ops); struct ftrace_rec_iter; struct ftrace_rec_iter *ftrace_rec_iter_start(void); struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter); struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter); #define for_ftrace_rec_iter(iter) \ for (iter = ftrace_rec_iter_start(); \ iter; \ iter = ftrace_rec_iter_next(iter)) int ftrace_update_record(struct dyn_ftrace *rec, bool enable); int ftrace_test_record(struct dyn_ftrace *rec, bool enable); void ftrace_run_stop_machine(int command); unsigned long ftrace_location(unsigned long ip); unsigned long ftrace_location_range(unsigned long start, unsigned long end); unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec); unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec); extern ftrace_func_t ftrace_trace_function; int ftrace_regex_open(struct ftrace_ops *ops, int flag, struct inode *inode, struct file *file); ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos); ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos); int ftrace_regex_release(struct inode *inode, struct file *file); void __init ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable); /* defined in arch */ extern int ftrace_ip_converted(unsigned long ip); extern int ftrace_dyn_arch_init(void); extern void ftrace_replace_code(int enable); extern int ftrace_update_ftrace_func(ftrace_func_t func); extern void ftrace_caller(void); extern void ftrace_regs_caller(void); extern void ftrace_call(void); extern void ftrace_regs_call(void); extern void mcount_call(void); void ftrace_modify_all_code(int command); #ifndef FTRACE_ADDR #define FTRACE_ADDR ((unsigned long)ftrace_caller) #endif #ifndef FTRACE_GRAPH_ADDR #define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller) #endif #ifndef FTRACE_REGS_ADDR #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller) #else # define FTRACE_REGS_ADDR FTRACE_ADDR #endif #endif /* * If an arch would like functions that are only traced * by the function graph tracer to jump directly to its own * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR * to be that address to jump to. */ #ifndef FTRACE_GRAPH_TRAMP_ADDR #define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0) #endif #ifdef CONFIG_FUNCTION_GRAPH_TRACER extern void ftrace_graph_caller(void); extern int ftrace_enable_ftrace_graph_caller(void); extern int ftrace_disable_ftrace_graph_caller(void); #else static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; } static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; } #endif /** * ftrace_make_nop - convert code into nop * @mod: module structure if called by module load initialization * @rec: the call site record (e.g. mcount/fentry) * @addr: the address that the call site should be calling * * This is a very sensitive operation and great care needs * to be taken by the arch. The operation should carefully * read the location, check to see if what is read is indeed * what we expect it to be, and then on success of the compare, * it should write to the location. * * The code segment at @rec->ip should be a caller to @addr * * Return must be: * 0 on success * -EFAULT on error reading the location * -EINVAL on a failed compare of the contents * -EPERM on error writing to the location * Any other value will be considered a failure. */ extern int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr); /** * ftrace_init_nop - initialize a nop call site * @mod: module structure if called by module load initialization * @rec: the call site record (e.g. mcount/fentry) * * This is a very sensitive operation and great care needs * to be taken by the arch. The operation should carefully * read the location, check to see if what is read is indeed * what we expect it to be, and then on success of the compare, * it should write to the location. * * The code segment at @rec->ip should contain the contents created by * the compiler * * Return must be: * 0 on success * -EFAULT on error reading the location * -EINVAL on a failed compare of the contents * -EPERM on error writing to the location * Any other value will be considered a failure. */ #ifndef ftrace_init_nop static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) { return ftrace_make_nop(mod, rec, MCOUNT_ADDR); } #endif /** * ftrace_make_call - convert a nop call site into a call to addr * @rec: the call site record (e.g. mcount/fentry) * @addr: the address that the call site should call * * This is a very sensitive operation and great care needs * to be taken by the arch. The operation should carefully * read the location, check to see if what is read is indeed * what we expect it to be, and then on success of the compare, * it should write to the location. * * The code segment at @rec->ip should be a nop * * Return must be: * 0 on success * -EFAULT on error reading the location * -EINVAL on a failed compare of the contents * -EPERM on error writing to the location * Any other value will be considered a failure. */ extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS /** * ftrace_modify_call - convert from one addr to another (no nop) * @rec: the call site record (e.g. mcount/fentry) * @old_addr: the address expected to be currently called to * @addr: the address to change to * * This is a very sensitive operation and great care needs * to be taken by the arch. The operation should carefully * read the location, check to see if what is read is indeed * what we expect it to be, and then on success of the compare, * it should write to the location. * * The code segment at @rec->ip should be a caller to @old_addr * * Return must be: * 0 on success * -EFAULT on error reading the location * -EINVAL on a failed compare of the contents * -EPERM on error writing to the location * Any other value will be considered a failure. */ extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr); #else /* Should never be called */ static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr) { return -EINVAL; } #endif /* May be defined in arch */ extern int ftrace_arch_read_dyn_info(char *buf, int size); extern int skip_trace(unsigned long ip); extern void ftrace_module_init(struct module *mod); extern void ftrace_module_enable(struct module *mod); extern void ftrace_release_mod(struct module *mod); extern void ftrace_disable_daemon(void); extern void ftrace_enable_daemon(void); #else /* CONFIG_DYNAMIC_FTRACE */ static inline int skip_trace(unsigned long ip) { return 0; } static inline int ftrace_force_update(void) { return 0; } static inline void ftrace_disable_daemon(void) { } static inline void ftrace_enable_daemon(void) { } static inline void ftrace_module_init(struct module *mod) { } static inline void ftrace_module_enable(struct module *mod) { } static inline void ftrace_release_mod(struct module *mod) { } static inline int ftrace_text_reserved(const void *start, const void *end) { return 0; } static inline unsigned long ftrace_location(unsigned long ip) { return 0; } /* * Again users of functions that have ftrace_ops may not * have them defined when ftrace is not enabled, but these * functions may still be called. Use a macro instead of inline. */ #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; }) #define ftrace_set_early_filter(ops, buf, enable) do { } while (0) #define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; }) #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; }) #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; }) #define ftrace_free_filter(ops) do { } while (0) #define ftrace_ops_set_global_filter(ops) do { } while (0) static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos) { return -ENODEV; } static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos) { return -ENODEV; } static inline int ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; } static inline bool is_ftrace_trampoline(unsigned long addr) { return false; } #endif /* CONFIG_DYNAMIC_FTRACE */ /* totally disable ftrace - can not re-enable after this */ void ftrace_kill(void); static inline void tracer_disable(void) { #ifdef CONFIG_FUNCTION_TRACER ftrace_enabled = 0; #endif } /* * Ftrace disable/restore without lock. Some synchronization mechanism * must be used to prevent ftrace_enabled to be changed between * disable/restore. */ static inline int __ftrace_enabled_save(void) { #ifdef CONFIG_FUNCTION_TRACER int saved_ftrace_enabled = ftrace_enabled; ftrace_enabled = 0; return saved_ftrace_enabled; #else return 0; #endif } static inline void __ftrace_enabled_restore(int enabled) { #ifdef CONFIG_FUNCTION_TRACER ftrace_enabled = enabled; #endif } /* All archs should have this, but we define it for consistency */ #ifndef ftrace_return_address0 # define ftrace_return_address0 __builtin_return_address(0) #endif /* Archs may use other ways for ADDR1 and beyond */ #ifndef ftrace_return_address # ifdef CONFIG_FRAME_POINTER # define ftrace_return_address(n) __builtin_return_address(n) # else # define ftrace_return_address(n) 0UL # endif #endif #define CALLER_ADDR0 ((unsigned long)ftrace_return_address0) #define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1)) #define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2)) #define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3)) #define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4)) #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5)) #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6)) static __always_inline unsigned long get_lock_parent_ip(void) { unsigned long addr = CALLER_ADDR0; if (!in_lock_functions(addr)) return addr; addr = CALLER_ADDR1; if (!in_lock_functions(addr)) return addr; return CALLER_ADDR2; } #ifdef CONFIG_TRACE_PREEMPT_TOGGLE extern void trace_preempt_on(unsigned long a0, unsigned long a1); extern void trace_preempt_off(unsigned long a0, unsigned long a1); #else /* * Use defines instead of static inlines because some arches will make code out * of the CALLER_ADDR, when we really want these to be a real nop. */ # define trace_preempt_on(a0, a1) do { } while (0) # define trace_preempt_off(a0, a1) do { } while (0) #endif #ifdef CONFIG_FTRACE_MCOUNT_RECORD extern void ftrace_init(void); #ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY #define FTRACE_CALLSITE_SECTION "__patchable_function_entries" #else #define FTRACE_CALLSITE_SECTION "__mcount_loc" #endif #else static inline void ftrace_init(void) { } #endif /* * Structure that defines an entry function trace. * It's already packed but the attribute "packed" is needed * to remove extra padding at the end. */ struct ftrace_graph_ent { unsigned long func; /* Current function */ int depth; } __packed; /* * Structure that defines a return function trace. * It's already packed but the attribute "packed" is needed * to remove extra padding at the end. */ struct ftrace_graph_ret { unsigned long func; /* Current function */ /* Number of functions that overran the depth limit for current task */ unsigned long overrun; unsigned long long calltime; unsigned long long rettime; int depth; } __packed; /* Type of the callback handlers for tracing function graph*/ typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */ typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */ extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace); #ifdef CONFIG_FUNCTION_GRAPH_TRACER struct fgraph_ops { trace_func_graph_ent_t entryfunc; trace_func_graph_ret_t retfunc; }; /* * Stack of return addresses for functions * of a thread. * Used in struct thread_info */ struct ftrace_ret_stack { unsigned long ret; unsigned long func; unsigned long long calltime; #ifdef CONFIG_FUNCTION_PROFILER unsigned long long subtime; #endif #ifdef HAVE_FUNCTION_GRAPH_FP_TEST unsigned long fp; #endif #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR unsigned long *retp; #endif }; /* * Primary handler of a function return. * It relays on ftrace_return_to_handler. * Defined in entry_32/64.S */ extern void return_to_handler(void); extern int function_graph_enter(unsigned long ret, unsigned long func, unsigned long frame_pointer, unsigned long *retp); struct ftrace_ret_stack * ftrace_graph_get_ret_stack(struct task_struct *task, int idx); unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret, unsigned long *retp); /* * Sometimes we don't want to trace a function with the function * graph tracer but we want them to keep traced by the usual function * tracer if the function graph tracer is not configured. */ #define __notrace_funcgraph notrace #define FTRACE_RETFUNC_DEPTH 50 #define FTRACE_RETSTACK_ALLOC_SIZE 32 extern int register_ftrace_graph(struct fgraph_ops *ops); extern void unregister_ftrace_graph(struct fgraph_ops *ops); extern bool ftrace_graph_is_dead(void); extern void ftrace_graph_stop(void); /* The current handlers in use */ extern trace_func_graph_ret_t ftrace_graph_return; extern trace_func_graph_ent_t ftrace_graph_entry; extern void ftrace_graph_init_task(struct task_struct *t); extern void ftrace_graph_exit_task(struct task_struct *t); extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu); static inline void pause_graph_tracing(void) { atomic_inc(¤t->tracing_graph_pause); } static inline void unpause_graph_tracing(void) { atomic_dec(¤t->tracing_graph_pause); } #else /* !CONFIG_FUNCTION_GRAPH_TRACER */ #define __notrace_funcgraph static inline void ftrace_graph_init_task(struct task_struct *t) { } static inline void ftrace_graph_exit_task(struct task_struct *t) { } static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { } /* Define as macros as fgraph_ops may not be defined */ #define register_ftrace_graph(ops) ({ -1; }) #define unregister_ftrace_graph(ops) do { } while (0) static inline unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret, unsigned long *retp) { return ret; } static inline void pause_graph_tracing(void) { } static inline void unpause_graph_tracing(void) { } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #ifdef CONFIG_TRACING /* flags for current->trace */ enum { TSK_TRACE_FL_TRACE_BIT = 0, TSK_TRACE_FL_GRAPH_BIT = 1, }; enum { TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT, TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT, }; static inline void set_tsk_trace_trace(struct task_struct *tsk) { set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); } static inline void clear_tsk_trace_trace(struct task_struct *tsk) { clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); } static inline int test_tsk_trace_trace(struct task_struct *tsk) { return tsk->trace & TSK_TRACE_FL_TRACE; } static inline void set_tsk_trace_graph(struct task_struct *tsk) { set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); } static inline void clear_tsk_trace_graph(struct task_struct *tsk) { clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); } static inline int test_tsk_trace_graph(struct task_struct *tsk) { return tsk->trace & TSK_TRACE_FL_GRAPH; } enum ftrace_dump_mode; extern enum ftrace_dump_mode ftrace_dump_on_oops; extern int tracepoint_printk; extern void disable_trace_on_warning(void); extern int __disable_trace_on_warning; int tracepoint_printk_sysctl(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); #else /* CONFIG_TRACING */ static inline void disable_trace_on_warning(void) { } #endif /* CONFIG_TRACING */ #ifdef CONFIG_FTRACE_SYSCALLS unsigned long arch_syscall_addr(int nr); #endif /* CONFIG_FTRACE_SYSCALLS */ #endif /* _LINUX_FTRACE_H */ kbuild.h 0000644 00000000574 14722070374 0006203 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_KBUILD_H #define __LINUX_KBUILD_H #define DEFINE(sym, val) \ asm volatile("\n.ascii \"->" #sym " %0 " #val "\"" : : "i" (val)) #define BLANK() asm volatile("\n.ascii \"->\"" : : ) #define OFFSET(sym, str, mem) \ DEFINE(sym, offsetof(struct str, mem)) #define COMMENT(x) \ asm volatile("\n.ascii \"->#" x "\"") #endif devcoredump.h 0000644 00000004332 14722070374 0007242 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright(c) 2015 Intel Deutschland GmbH */ #ifndef __DEVCOREDUMP_H #define __DEVCOREDUMP_H #include <linux/device.h> #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/scatterlist.h> #include <linux/slab.h> /* * _devcd_free_sgtable - free all the memory of the given scatterlist table * (i.e. both pages and scatterlist instances) * NOTE: if two tables allocated and chained using the sg_chain function then * this function should be called only once on the first table * @table: pointer to sg_table to free */ static inline void _devcd_free_sgtable(struct scatterlist *table) { int i; struct page *page; struct scatterlist *iter; struct scatterlist *delete_iter; /* free pages */ iter = table; for_each_sg(table, iter, sg_nents(table), i) { page = sg_page(iter); if (page) __free_page(page); } /* then free all chained tables */ iter = table; delete_iter = table; /* always points on a head of a table */ while (!sg_is_last(iter)) { iter++; if (sg_is_chain(iter)) { iter = sg_chain_ptr(iter); kfree(delete_iter); delete_iter = iter; } } /* free the last table */ kfree(delete_iter); } #ifdef CONFIG_DEV_COREDUMP void dev_coredumpv(struct device *dev, void *data, size_t datalen, gfp_t gfp); void dev_coredumpm(struct device *dev, struct module *owner, void *data, size_t datalen, gfp_t gfp, ssize_t (*read)(char *buffer, loff_t offset, size_t count, void *data, size_t datalen), void (*free)(void *data)); void dev_coredumpsg(struct device *dev, struct scatterlist *table, size_t datalen, gfp_t gfp); #else static inline void dev_coredumpv(struct device *dev, void *data, size_t datalen, gfp_t gfp) { vfree(data); } static inline void dev_coredumpm(struct device *dev, struct module *owner, void *data, size_t datalen, gfp_t gfp, ssize_t (*read)(char *buffer, loff_t offset, size_t count, void *data, size_t datalen), void (*free)(void *data)) { free(data); } static inline void dev_coredumpsg(struct device *dev, struct scatterlist *table, size_t datalen, gfp_t gfp) { _devcd_free_sgtable(table); } #endif /* CONFIG_DEV_COREDUMP */ #endif /* __DEVCOREDUMP_H */ mm_types_task.h 0000644 00000004773 14722070374 0007615 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MM_TYPES_TASK_H #define _LINUX_MM_TYPES_TASK_H /* * Here are the definitions of the MM data types that are embedded in 'struct task_struct'. * * (These are defined separately to decouple sched.h from mm_types.h as much as possible.) */ #include <linux/types.h> #include <linux/threads.h> #include <linux/atomic.h> #include <linux/cpumask.h> #include <asm/page.h> #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH #include <asm/tlbbatch.h> #endif #define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) #define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK)) #define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8) /* * The per task VMA cache array: */ #define VMACACHE_BITS 2 #define VMACACHE_SIZE (1U << VMACACHE_BITS) #define VMACACHE_MASK (VMACACHE_SIZE - 1) struct vmacache { u64 seqnum; struct vm_area_struct *vmas[VMACACHE_SIZE]; }; /* * When updating this, please also update struct resident_page_types[] in * kernel/fork.c */ enum { MM_FILEPAGES, /* Resident file mapping pages */ MM_ANONPAGES, /* Resident anonymous pages */ MM_SWAPENTS, /* Anonymous swap entries */ MM_SHMEMPAGES, /* Resident shared memory pages */ NR_MM_COUNTERS }; #if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU) #define SPLIT_RSS_COUNTING /* per-thread cached information, */ struct task_rss_stat { int events; /* for synchronization threshold */ int count[NR_MM_COUNTERS]; }; #endif /* USE_SPLIT_PTE_PTLOCKS */ struct mm_rss_stat { atomic_long_t count[NR_MM_COUNTERS]; }; struct page_frag { struct page *page; #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) __u32 offset; __u32 size; #else __u16 offset; __u16 size; #endif }; /* Track pages that require TLB flushes */ struct tlbflush_unmap_batch { #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH /* * The arch code makes the following promise: generic code can modify a * PTE, then call arch_tlbbatch_add_mm() (which internally provides all * needed barriers), then call arch_tlbbatch_flush(), and the entries * will be flushed on all CPUs by the time that arch_tlbbatch_flush() * returns. */ struct arch_tlbflush_unmap_batch arch; /* True if a flush is needed. */ bool flush_required; /* * If true then the PTE was dirty when unmapped. The entry must be * flushed before IO is initiated or a stale TLB entry potentially * allows an update without redirtying the page. */ bool writable; #endif }; #endif /* _LINUX_MM_TYPES_TASK_H */ mxm-wmi.h 0000644 00000000617 14722070374 0006322 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * MXM WMI driver * * Copyright(C) 2010 Red Hat. */ #ifndef MXM_WMI_H #define MXM_WMI_H /* discrete adapters */ #define MXM_MXDS_ADAPTER_0 0x0 #define MXM_MXDS_ADAPTER_1 0x0 /* integrated adapter */ #define MXM_MXDS_ADAPTER_IGD 0x10 int mxm_wmi_call_mxds(int adapter); int mxm_wmi_call_mxmx(int adapter); bool mxm_wmi_supported(void); #endif mfd/asic3.h 0000644 00000030120 14722070374 0006467 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * include/linux/mfd/asic3.h * * Compaq ASIC3 headers. * * Copyright 2001 Compaq Computer Corporation. * Copyright 2007-2008 OpenedHand Ltd. */ #ifndef __ASIC3_H__ #define __ASIC3_H__ #include <linux/types.h> struct led_classdev; struct asic3_led { const char *name; const char *default_trigger; struct led_classdev *cdev; }; struct asic3_platform_data { u16 *gpio_config; unsigned int gpio_config_num; unsigned int irq_base; unsigned int gpio_base; unsigned int clock_rate; struct asic3_led *leds; }; #define ASIC3_NUM_GPIO_BANKS 4 #define ASIC3_GPIOS_PER_BANK 16 #define ASIC3_NUM_GPIOS 64 #define ASIC3_NR_IRQS ASIC3_NUM_GPIOS + 6 #define ASIC3_IRQ_LED0 64 #define ASIC3_IRQ_LED1 65 #define ASIC3_IRQ_LED2 66 #define ASIC3_IRQ_SPI 67 #define ASIC3_IRQ_SMBUS 68 #define ASIC3_IRQ_OWM 69 #define ASIC3_TO_GPIO(gpio) (NR_BUILTIN_GPIO + (gpio)) #define ASIC3_GPIO_BANK_A 0 #define ASIC3_GPIO_BANK_B 1 #define ASIC3_GPIO_BANK_C 2 #define ASIC3_GPIO_BANK_D 3 #define ASIC3_GPIO(bank, gpio) \ ((ASIC3_GPIOS_PER_BANK * ASIC3_GPIO_BANK_##bank) + (gpio)) #define ASIC3_GPIO_bit(gpio) (1 << (gpio & 0xf)) /* All offsets below are specified with this address bus shift */ #define ASIC3_DEFAULT_ADDR_SHIFT 2 #define ASIC3_OFFSET(base, reg) (ASIC3_##base##_BASE + ASIC3_##base##_##reg) #define ASIC3_GPIO_OFFSET(base, reg) \ (ASIC3_GPIO_##base##_BASE + ASIC3_GPIO_##reg) #define ASIC3_GPIO_A_BASE 0x0000 #define ASIC3_GPIO_B_BASE 0x0100 #define ASIC3_GPIO_C_BASE 0x0200 #define ASIC3_GPIO_D_BASE 0x0300 #define ASIC3_GPIO_TO_BANK(gpio) ((gpio) >> 4) #define ASIC3_GPIO_TO_BIT(gpio) ((gpio) - \ (ASIC3_GPIOS_PER_BANK * ((gpio) >> 4))) #define ASIC3_GPIO_TO_MASK(gpio) (1 << ASIC3_GPIO_TO_BIT(gpio)) #define ASIC3_GPIO_TO_BASE(gpio) (ASIC3_GPIO_A_BASE + (((gpio) >> 4) * 0x0100)) #define ASIC3_BANK_TO_BASE(bank) (ASIC3_GPIO_A_BASE + ((bank) * 0x100)) #define ASIC3_GPIO_MASK 0x00 /* R/W 0:don't mask */ #define ASIC3_GPIO_DIRECTION 0x04 /* R/W 0:input */ #define ASIC3_GPIO_OUT 0x08 /* R/W 0:output low */ #define ASIC3_GPIO_TRIGGER_TYPE 0x0c /* R/W 0:level */ #define ASIC3_GPIO_EDGE_TRIGGER 0x10 /* R/W 0:falling */ #define ASIC3_GPIO_LEVEL_TRIGGER 0x14 /* R/W 0:low level detect */ #define ASIC3_GPIO_SLEEP_MASK 0x18 /* R/W 0:don't mask in sleep mode */ #define ASIC3_GPIO_SLEEP_OUT 0x1c /* R/W level 0:low in sleep mode */ #define ASIC3_GPIO_BAT_FAULT_OUT 0x20 /* R/W level 0:low in batt_fault */ #define ASIC3_GPIO_INT_STATUS 0x24 /* R/W 0:none, 1:detect */ #define ASIC3_GPIO_ALT_FUNCTION 0x28 /* R/W 1:LED register control */ #define ASIC3_GPIO_SLEEP_CONF 0x2c /* * R/W bit 1: autosleep * 0: disable gposlpout in normal mode, * enable gposlpout in sleep mode. */ #define ASIC3_GPIO_STATUS 0x30 /* R Pin status */ /* * ASIC3 GPIO config * * Bits 0..6 gpio number * Bits 7..13 Alternate function * Bit 14 Direction * Bit 15 Initial value * */ #define ASIC3_CONFIG_GPIO_PIN(config) ((config) & 0x7f) #define ASIC3_CONFIG_GPIO_ALT(config) (((config) & (0x7f << 7)) >> 7) #define ASIC3_CONFIG_GPIO_DIR(config) ((config & (1 << 14)) >> 14) #define ASIC3_CONFIG_GPIO_INIT(config) ((config & (1 << 15)) >> 15) #define ASIC3_CONFIG_GPIO(gpio, alt, dir, init) (((gpio) & 0x7f) \ | (((alt) & 0x7f) << 7) | (((dir) & 0x1) << 14) \ | (((init) & 0x1) << 15)) #define ASIC3_CONFIG_GPIO_DEFAULT(gpio, dir, init) \ ASIC3_CONFIG_GPIO((gpio), 0, (dir), (init)) #define ASIC3_CONFIG_GPIO_DEFAULT_OUT(gpio, init) \ ASIC3_CONFIG_GPIO((gpio), 0, 1, (init)) /* * Alternate functions */ #define ASIC3_GPIOA11_PWM0 ASIC3_CONFIG_GPIO(11, 1, 1, 0) #define ASIC3_GPIOA12_PWM1 ASIC3_CONFIG_GPIO(12, 1, 1, 0) #define ASIC3_GPIOA15_CONTROL_CX ASIC3_CONFIG_GPIO(15, 1, 1, 0) #define ASIC3_GPIOC0_LED0 ASIC3_CONFIG_GPIO(32, 1, 0, 0) #define ASIC3_GPIOC1_LED1 ASIC3_CONFIG_GPIO(33, 1, 0, 0) #define ASIC3_GPIOC2_LED2 ASIC3_CONFIG_GPIO(34, 1, 0, 0) #define ASIC3_GPIOC3_SPI_RXD ASIC3_CONFIG_GPIO(35, 1, 0, 0) #define ASIC3_GPIOC4_CF_nCD ASIC3_CONFIG_GPIO(36, 1, 0, 0) #define ASIC3_GPIOC4_SPI_TXD ASIC3_CONFIG_GPIO(36, 1, 1, 0) #define ASIC3_GPIOC5_SPI_CLK ASIC3_CONFIG_GPIO(37, 1, 1, 0) #define ASIC3_GPIOC5_nCIOW ASIC3_CONFIG_GPIO(37, 1, 1, 0) #define ASIC3_GPIOC6_nCIOR ASIC3_CONFIG_GPIO(38, 1, 1, 0) #define ASIC3_GPIOC7_nPCE_1 ASIC3_CONFIG_GPIO(39, 1, 0, 0) #define ASIC3_GPIOC8_nPCE_2 ASIC3_CONFIG_GPIO(40, 1, 0, 0) #define ASIC3_GPIOC9_nPOE ASIC3_CONFIG_GPIO(41, 1, 0, 0) #define ASIC3_GPIOC10_nPWE ASIC3_CONFIG_GPIO(42, 1, 0, 0) #define ASIC3_GPIOC11_PSKTSEL ASIC3_CONFIG_GPIO(43, 1, 0, 0) #define ASIC3_GPIOC12_nPREG ASIC3_CONFIG_GPIO(44, 1, 0, 0) #define ASIC3_GPIOC13_nPWAIT ASIC3_CONFIG_GPIO(45, 1, 1, 0) #define ASIC3_GPIOC14_nPIOIS16 ASIC3_CONFIG_GPIO(46, 1, 1, 0) #define ASIC3_GPIOC15_nPIOR ASIC3_CONFIG_GPIO(47, 1, 0, 0) #define ASIC3_GPIOD4_CF_nCD ASIC3_CONFIG_GPIO(52, 1, 0, 0) #define ASIC3_GPIOD11_nCIOIS16 ASIC3_CONFIG_GPIO(59, 1, 0, 0) #define ASIC3_GPIOD12_nCWAIT ASIC3_CONFIG_GPIO(60, 1, 0, 0) #define ASIC3_GPIOD15_nPIOW ASIC3_CONFIG_GPIO(63, 1, 0, 0) #define ASIC3_SPI_Base 0x0400 #define ASIC3_SPI_Control 0x0000 #define ASIC3_SPI_TxData 0x0004 #define ASIC3_SPI_RxData 0x0008 #define ASIC3_SPI_Int 0x000c #define ASIC3_SPI_Status 0x0010 #define SPI_CONTROL_SPR(clk) ((clk) & 0x0f) /* Clock rate */ #define ASIC3_PWM_0_Base 0x0500 #define ASIC3_PWM_1_Base 0x0600 #define ASIC3_PWM_TimeBase 0x0000 #define ASIC3_PWM_PeriodTime 0x0004 #define ASIC3_PWM_DutyTime 0x0008 #define PWM_TIMEBASE_VALUE(x) ((x)&0xf) /* Low 4 bits sets time base */ #define PWM_TIMEBASE_ENABLE (1 << 4) /* Enable clock */ #define ASIC3_NUM_LEDS 3 #define ASIC3_LED_0_Base 0x0700 #define ASIC3_LED_1_Base 0x0800 #define ASIC3_LED_2_Base 0x0900 #define ASIC3_LED_TimeBase 0x0000 /* R/W 7 bits */ #define ASIC3_LED_PeriodTime 0x0004 /* R/W 12 bits */ #define ASIC3_LED_DutyTime 0x0008 /* R/W 12 bits */ #define ASIC3_LED_AutoStopCount 0x000c /* R/W 16 bits */ /* LED TimeBase bits - match ASIC2 */ #define LED_TBS 0x0f /* Low 4 bits sets time base, max = 13 */ /* Note: max = 5 on hx4700 */ /* 0: maximum time base */ /* 1: maximum time base / 2 */ /* n: maximum time base / 2^n */ #define LED_EN (1 << 4) /* LED ON/OFF 0:off, 1:on */ #define LED_AUTOSTOP (1 << 5) /* LED ON/OFF auto stop 0:disable, 1:enable */ #define LED_ALWAYS (1 << 6) /* LED Interrupt Mask 0:No mask, 1:mask */ #define ASIC3_CLOCK_BASE 0x0A00 #define ASIC3_CLOCK_CDEX 0x00 #define ASIC3_CLOCK_SEL 0x04 #define CLOCK_CDEX_SOURCE (1 << 0) /* 2 bits */ #define CLOCK_CDEX_SOURCE0 (1 << 0) #define CLOCK_CDEX_SOURCE1 (1 << 1) #define CLOCK_CDEX_SPI (1 << 2) #define CLOCK_CDEX_OWM (1 << 3) #define CLOCK_CDEX_PWM0 (1 << 4) #define CLOCK_CDEX_PWM1 (1 << 5) #define CLOCK_CDEX_LED0 (1 << 6) #define CLOCK_CDEX_LED1 (1 << 7) #define CLOCK_CDEX_LED2 (1 << 8) /* Clocks settings: 1 for 24.576 MHz, 0 for 12.288Mhz */ #define CLOCK_CDEX_SD_HOST (1 << 9) /* R/W: SD host clock source */ #define CLOCK_CDEX_SD_BUS (1 << 10) /* R/W: SD bus clock source ctrl */ #define CLOCK_CDEX_SMBUS (1 << 11) #define CLOCK_CDEX_CONTROL_CX (1 << 12) #define CLOCK_CDEX_EX0 (1 << 13) /* R/W: 32.768 kHz crystal */ #define CLOCK_CDEX_EX1 (1 << 14) /* R/W: 24.576 MHz crystal */ #define CLOCK_SEL_SD_HCLK_SEL (1 << 0) /* R/W: SDIO host clock select */ #define CLOCK_SEL_SD_BCLK_SEL (1 << 1) /* R/W: SDIO bus clock select */ /* R/W: INT clock source control (32.768 kHz) */ #define CLOCK_SEL_CX (1 << 2) #define ASIC3_INTR_BASE 0x0B00 #define ASIC3_INTR_INT_MASK 0x00 /* Interrupt mask control */ #define ASIC3_INTR_P_INT_STAT 0x04 /* Peripheral interrupt status */ #define ASIC3_INTR_INT_CPS 0x08 /* Interrupt timer clock pre-scale */ #define ASIC3_INTR_INT_TBS 0x0c /* Interrupt timer set */ #define ASIC3_INTMASK_GINTMASK (1 << 0) /* Global INTs mask 1:enable */ #define ASIC3_INTMASK_GINTEL (1 << 1) /* 1: rising edge, 0: hi level */ #define ASIC3_INTMASK_MASK0 (1 << 2) #define ASIC3_INTMASK_MASK1 (1 << 3) #define ASIC3_INTMASK_MASK2 (1 << 4) #define ASIC3_INTMASK_MASK3 (1 << 5) #define ASIC3_INTMASK_MASK4 (1 << 6) #define ASIC3_INTMASK_MASK5 (1 << 7) #define ASIC3_INTR_PERIPHERAL_A (1 << 0) #define ASIC3_INTR_PERIPHERAL_B (1 << 1) #define ASIC3_INTR_PERIPHERAL_C (1 << 2) #define ASIC3_INTR_PERIPHERAL_D (1 << 3) #define ASIC3_INTR_LED0 (1 << 4) #define ASIC3_INTR_LED1 (1 << 5) #define ASIC3_INTR_LED2 (1 << 6) #define ASIC3_INTR_SPI (1 << 7) #define ASIC3_INTR_SMBUS (1 << 8) #define ASIC3_INTR_OWM (1 << 9) #define ASIC3_INTR_CPS(x) ((x)&0x0f) /* 4 bits, max 14 */ #define ASIC3_INTR_CPS_SET (1 << 4) /* Time base enable */ /* Basic control of the SD ASIC */ #define ASIC3_SDHWCTRL_BASE 0x0E00 #define ASIC3_SDHWCTRL_SDCONF 0x00 #define ASIC3_SDHWCTRL_SUSPEND (1 << 0) /* 1=suspend all SD operations */ #define ASIC3_SDHWCTRL_CLKSEL (1 << 1) /* 1=SDICK, 0=HCLK */ #define ASIC3_SDHWCTRL_PCLR (1 << 2) /* All registers of SDIO cleared */ #define ASIC3_SDHWCTRL_LEVCD (1 << 3) /* SD card detection: 0:low */ /* SD card write protection: 0=high */ #define ASIC3_SDHWCTRL_LEVWP (1 << 4) #define ASIC3_SDHWCTRL_SDLED (1 << 5) /* SD card LED signal 0=disable */ /* SD card power supply ctrl 1=enable */ #define ASIC3_SDHWCTRL_SDPWR (1 << 6) #define ASIC3_EXTCF_BASE 0x1100 #define ASIC3_EXTCF_SELECT 0x00 #define ASIC3_EXTCF_RESET 0x04 #define ASIC3_EXTCF_SMOD0 (1 << 0) /* slot number of mode 0 */ #define ASIC3_EXTCF_SMOD1 (1 << 1) /* slot number of mode 1 */ #define ASIC3_EXTCF_SMOD2 (1 << 2) /* slot number of mode 2 */ #define ASIC3_EXTCF_OWM_EN (1 << 4) /* enable onewire module */ #define ASIC3_EXTCF_OWM_SMB (1 << 5) /* OWM bus selection */ #define ASIC3_EXTCF_OWM_RESET (1 << 6) /* ?? used by OWM and CF */ #define ASIC3_EXTCF_CF0_SLEEP_MODE (1 << 7) /* CF0 sleep state */ #define ASIC3_EXTCF_CF1_SLEEP_MODE (1 << 8) /* CF1 sleep state */ #define ASIC3_EXTCF_CF0_PWAIT_EN (1 << 10) /* CF0 PWAIT_n control */ #define ASIC3_EXTCF_CF1_PWAIT_EN (1 << 11) /* CF1 PWAIT_n control */ #define ASIC3_EXTCF_CF0_BUF_EN (1 << 12) /* CF0 buffer control */ #define ASIC3_EXTCF_CF1_BUF_EN (1 << 13) /* CF1 buffer control */ #define ASIC3_EXTCF_SD_MEM_ENABLE (1 << 14) #define ASIC3_EXTCF_CF_SLEEP (1 << 15) /* CF sleep mode control */ /********************************************* * The Onewire interface (DS1WM) is handled * by the ds1wm driver. * *********************************************/ #define ASIC3_OWM_BASE 0xC00 /***************************************************************************** * The SD configuration registers are at a completely different location * in memory. They are divided into three sets of registers: * * SD_CONFIG Core configuration register * SD_CTRL Control registers for SD operations * SDIO_CTRL Control registers for SDIO operations * *****************************************************************************/ #define ASIC3_SD_CONFIG_BASE 0x0400 /* Assumes 32 bit addressing */ #define ASIC3_SD_CONFIG_SIZE 0x0200 /* Assumes 32 bit addressing */ #define ASIC3_SD_CTRL_BASE 0x1000 #define ASIC3_SDIO_CTRL_BASE 0x1200 #define ASIC3_MAP_SIZE_32BIT 0x2000 #define ASIC3_MAP_SIZE_16BIT 0x1000 /* Functions needed by leds-asic3 */ struct asic3; extern void asic3_write_register(struct asic3 *asic, unsigned int reg, u32 val); extern u32 asic3_read_register(struct asic3 *asic, unsigned int reg); #endif /* __ASIC3_H__ */ mfd/viperboard.h 0000644 00000005362 14722070374 0007634 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/mfd/viperboard.h * * Nano River Technologies viperboard definitions * * (C) 2012 by Lemonage GmbH * Author: Lars Poeschel <poeschel@lemonage.de> * All rights reserved. */ #ifndef __MFD_VIPERBOARD_H__ #define __MFD_VIPERBOARD_H__ #include <linux/types.h> #include <linux/usb.h> #define VPRBRD_EP_OUT 0x02 #define VPRBRD_EP_IN 0x86 #define VPRBRD_I2C_MSG_LEN 512 /* max length of a msg on USB level */ #define VPRBRD_I2C_FREQ_6MHZ 1 /* 6 MBit/s */ #define VPRBRD_I2C_FREQ_3MHZ 2 /* 3 MBit/s */ #define VPRBRD_I2C_FREQ_1MHZ 3 /* 1 MBit/s */ #define VPRBRD_I2C_FREQ_FAST 4 /* 400 kbit/s */ #define VPRBRD_I2C_FREQ_400KHZ VPRBRD_I2C_FREQ_FAST #define VPRBRD_I2C_FREQ_200KHZ 5 /* 200 kbit/s */ #define VPRBRD_I2C_FREQ_STD 6 /* 100 kbit/s */ #define VPRBRD_I2C_FREQ_100KHZ VPRBRD_I2C_FREQ_STD #define VPRBRD_I2C_FREQ_10KHZ 7 /* 10 kbit/s */ #define VPRBRD_I2C_CMD_WRITE 0x00 #define VPRBRD_I2C_CMD_READ 0x01 #define VPRBRD_I2C_CMD_ADDR 0x02 #define VPRBRD_USB_TYPE_OUT 0x40 #define VPRBRD_USB_TYPE_IN 0xc0 #define VPRBRD_USB_TIMEOUT_MS 100 #define VPRBRD_USB_REQUEST_I2C_FREQ 0xe6 #define VPRBRD_USB_REQUEST_I2C 0xe9 #define VPRBRD_USB_REQUEST_MAJOR 0xea #define VPRBRD_USB_REQUEST_MINOR 0xeb #define VPRBRD_USB_REQUEST_ADC 0xec #define VPRBRD_USB_REQUEST_GPIOA 0xed #define VPRBRD_USB_REQUEST_GPIOB 0xdd struct vprbrd_i2c_write_hdr { u8 cmd; u16 addr; u8 len1; u8 len2; u8 last; u8 chan; u16 spi; } __packed; struct vprbrd_i2c_read_hdr { u8 cmd; u16 addr; u8 len0; u8 len1; u8 len2; u8 len3; u8 len4; u8 len5; u16 tf1; /* transfer 1 length */ u16 tf2; /* transfer 2 length */ } __packed; struct vprbrd_i2c_status { u8 unknown[11]; u8 status; } __packed; struct vprbrd_i2c_write_msg { struct vprbrd_i2c_write_hdr header; u8 data[VPRBRD_I2C_MSG_LEN - sizeof(struct vprbrd_i2c_write_hdr)]; } __packed; struct vprbrd_i2c_read_msg { struct vprbrd_i2c_read_hdr header; u8 data[VPRBRD_I2C_MSG_LEN - sizeof(struct vprbrd_i2c_read_hdr)]; } __packed; struct vprbrd_i2c_addr_msg { u8 cmd; u8 addr; u8 unknown1; u16 len; u8 unknown2; u8 unknown3; } __packed; /* Structure to hold all device specific stuff */ struct vprbrd { struct usb_device *usb_dev; /* the usb device for this device */ struct mutex lock; u8 buf[sizeof(struct vprbrd_i2c_write_msg)]; struct platform_device pdev; }; #endif /* __MFD_VIPERBOARD_H__ */ mfd/hi6421-pmic.h 0000644 00000002237 14722070374 0007340 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Header file for device driver Hi6421 PMIC * * Copyright (c) <2011-2014> HiSilicon Technologies Co., Ltd. * http://www.hisilicon.com * Copyright (c) <2013-2014> Linaro Ltd. * http://www.linaro.org * * Author: Guodong Xu <guodong.xu@linaro.org> */ #ifndef __HI6421_PMIC_H #define __HI6421_PMIC_H /* Hi6421 registers are mapped to memory bus in 4 bytes stride */ #define HI6421_REG_TO_BUS_ADDR(x) (x << 2) /* Hi6421 maximum register number */ #define HI6421_REG_MAX 0xFF /* Hi6421 OCP (over current protection) and DEB (debounce) control register */ #define HI6421_OCP_DEB_CTRL_REG HI6421_REG_TO_BUS_ADDR(0x51) #define HI6421_OCP_DEB_SEL_MASK 0x0C #define HI6421_OCP_DEB_SEL_8MS 0x00 #define HI6421_OCP_DEB_SEL_16MS 0x04 #define HI6421_OCP_DEB_SEL_32MS 0x08 #define HI6421_OCP_DEB_SEL_64MS 0x0C #define HI6421_OCP_EN_DEBOUNCE_MASK 0x02 #define HI6421_OCP_EN_DEBOUNCE_ENABLE 0x02 #define HI6421_OCP_AUTO_STOP_MASK 0x01 #define HI6421_OCP_AUTO_STOP_ENABLE 0x01 struct hi6421_pmic { struct regmap *regmap; }; enum hi6421_type { HI6421 = 0, HI6421_V530, }; #endif /* __HI6421_PMIC_H */ mfd/retu.h 0000644 00000001323 14722070374 0006447 0 ustar 00 /* * Retu/Tahvo MFD driver interface * * This file is subject to the terms and conditions of the GNU General * Public License. See the file "COPYING" in the main directory of this * archive for more details. */ #ifndef __LINUX_MFD_RETU_H #define __LINUX_MFD_RETU_H struct retu_dev; int retu_read(struct retu_dev *, u8); int retu_write(struct retu_dev *, u8, u16); /* Registers */ #define RETU_REG_WATCHDOG 0x17 /* Watchdog */ #define RETU_REG_CC1 0x0d /* Common control register 1 */ #define RETU_REG_STATUS 0x16 /* Status register */ /* Interrupt sources */ #define TAHVO_INT_VBUS 0 /* VBUS state */ /* Interrupt status */ #define TAHVO_STAT_VBUS (1 << TAHVO_INT_VBUS) #endif /* __LINUX_MFD_RETU_H */ mfd/madera/pdata.h 0000644 00000003076 14722070374 0010021 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Platform data for Cirrus Logic Madera codecs * * Copyright (C) 2015-2018 Cirrus Logic */ #ifndef MADERA_PDATA_H #define MADERA_PDATA_H #include <linux/kernel.h> #include <linux/regulator/arizona-ldo1.h> #include <linux/regulator/arizona-micsupp.h> #include <linux/regulator/machine.h> #include <sound/madera-pdata.h> #define MADERA_MAX_MICBIAS 4 #define MADERA_MAX_CHILD_MICBIAS 4 #define MADERA_MAX_GPSW 2 struct gpio_desc; struct pinctrl_map; struct madera_codec_pdata; /** * struct madera_pdata - Configuration data for Madera devices * * @reset: GPIO controlling /RESET (NULL = none) * @ldo1: Substruct of pdata for the LDO1 regulator * @micvdd: Substruct of pdata for the MICVDD regulator * @irq_flags: Mode for primary IRQ (defaults to active low) * @gpio_base: Base GPIO number * @gpio_configs: Array of GPIO configurations (See * Documentation/driver-api/pinctl.rst) * @n_gpio_configs: Number of entries in gpio_configs * @gpsw: General purpose switch mode setting. Depends on the external * hardware connected to the switch. (See the SW1_MODE field * in the datasheet for the available values for your codec) * @codec: Substruct of pdata for the ASoC codec driver */ struct madera_pdata { struct gpio_desc *reset; struct arizona_ldo1_pdata ldo1; struct arizona_micsupp_pdata micvdd; unsigned int irq_flags; int gpio_base; const struct pinctrl_map *gpio_configs; int n_gpio_configs; u32 gpsw[MADERA_MAX_GPSW]; struct madera_codec_pdata codec; }; #endif mfd/madera/registers.h 0000644 00000475156 14722070374 0010753 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Madera register definitions * * Copyright (C) 2015-2018 Cirrus Logic */ #ifndef MADERA_REGISTERS_H #define MADERA_REGISTERS_H /* * Register Addresses. */ #define MADERA_SOFTWARE_RESET 0x00 #define MADERA_HARDWARE_REVISION 0x01 #define MADERA_CTRL_IF_CFG_1 0x08 #define MADERA_CTRL_IF_CFG_2 0x09 #define MADERA_CTRL_IF_CFG_3 0x0A #define MADERA_WRITE_SEQUENCER_CTRL_0 0x16 #define MADERA_WRITE_SEQUENCER_CTRL_1 0x17 #define MADERA_WRITE_SEQUENCER_CTRL_2 0x18 #define MADERA_TONE_GENERATOR_1 0x20 #define MADERA_TONE_GENERATOR_2 0x21 #define MADERA_TONE_GENERATOR_3 0x22 #define MADERA_TONE_GENERATOR_4 0x23 #define MADERA_TONE_GENERATOR_5 0x24 #define MADERA_PWM_DRIVE_1 0x30 #define MADERA_PWM_DRIVE_2 0x31 #define MADERA_PWM_DRIVE_3 0x32 #define MADERA_SEQUENCE_CONTROL 0x41 #define MADERA_SAMPLE_RATE_SEQUENCE_SELECT_1 0x61 #define MADERA_SAMPLE_RATE_SEQUENCE_SELECT_2 0x62 #define MADERA_SAMPLE_RATE_SEQUENCE_SELECT_3 0x63 #define MADERA_SAMPLE_RATE_SEQUENCE_SELECT_4 0x64 #define MADERA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_1 0x66 #define MADERA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_2 0x67 #define MADERA_HAPTICS_CONTROL_1 0x90 #define MADERA_HAPTICS_CONTROL_2 0x91 #define MADERA_HAPTICS_PHASE_1_INTENSITY 0x92 #define MADERA_HAPTICS_PHASE_1_DURATION 0x93 #define MADERA_HAPTICS_PHASE_2_INTENSITY 0x94 #define MADERA_HAPTICS_PHASE_2_DURATION 0x95 #define MADERA_HAPTICS_PHASE_3_INTENSITY 0x96 #define MADERA_HAPTICS_PHASE_3_DURATION 0x97 #define MADERA_HAPTICS_STATUS 0x98 #define MADERA_COMFORT_NOISE_GENERATOR 0xA0 #define MADERA_CLOCK_32K_1 0x100 #define MADERA_SYSTEM_CLOCK_1 0x101 #define MADERA_SAMPLE_RATE_1 0x102 #define MADERA_SAMPLE_RATE_2 0x103 #define MADERA_SAMPLE_RATE_3 0x104 #define MADERA_SAMPLE_RATE_1_STATUS 0x10A #define MADERA_SAMPLE_RATE_2_STATUS 0x10B #define MADERA_SAMPLE_RATE_3_STATUS 0x10C #define MADERA_ASYNC_CLOCK_1 0x112 #define MADERA_ASYNC_SAMPLE_RATE_1 0x113 #define MADERA_ASYNC_SAMPLE_RATE_2 0x114 #define MADERA_ASYNC_SAMPLE_RATE_1_STATUS 0x11B #define MADERA_ASYNC_SAMPLE_RATE_2_STATUS 0x11C #define MADERA_DSP_CLOCK_1 0x120 #define MADERA_DSP_CLOCK_2 0x122 #define MADERA_OUTPUT_SYSTEM_CLOCK 0x149 #define MADERA_OUTPUT_ASYNC_CLOCK 0x14A #define MADERA_RATE_ESTIMATOR_1 0x152 #define MADERA_RATE_ESTIMATOR_2 0x153 #define MADERA_RATE_ESTIMATOR_3 0x154 #define MADERA_RATE_ESTIMATOR_4 0x155 #define MADERA_RATE_ESTIMATOR_5 0x156 #define MADERA_FLL1_CONTROL_1 0x171 #define MADERA_FLL1_CONTROL_2 0x172 #define MADERA_FLL1_CONTROL_3 0x173 #define MADERA_FLL1_CONTROL_4 0x174 #define MADERA_FLL1_CONTROL_5 0x175 #define MADERA_FLL1_CONTROL_6 0x176 #define CS47L92_FLL1_CONTROL_7 0x177 #define CS47L92_FLL1_CONTROL_8 0x178 #define MADERA_FLL1_CONTROL_7 0x179 #define CS47L92_FLL1_CONTROL_9 0x179 #define MADERA_FLL1_EFS_2 0x17A #define CS47L92_FLL1_CONTROL_10 0x17A #define MADERA_FLL1_CONTROL_11 0x17B #define MADERA_FLL1_DIGITAL_TEST_1 0x17D #define CS47L35_FLL1_SYNCHRONISER_1 0x17F #define CS47L35_FLL1_SYNCHRONISER_2 0x180 #define CS47L35_FLL1_SYNCHRONISER_3 0x181 #define CS47L35_FLL1_SYNCHRONISER_4 0x182 #define CS47L35_FLL1_SYNCHRONISER_5 0x183 #define CS47L35_FLL1_SYNCHRONISER_6 0x184 #define CS47L35_FLL1_SYNCHRONISER_7 0x185 #define CS47L35_FLL1_SPREAD_SPECTRUM 0x187 #define CS47L35_FLL1_GPIO_CLOCK 0x188 #define MADERA_FLL1_SYNCHRONISER_1 0x181 #define MADERA_FLL1_SYNCHRONISER_2 0x182 #define MADERA_FLL1_SYNCHRONISER_3 0x183 #define MADERA_FLL1_SYNCHRONISER_4 0x184 #define MADERA_FLL1_SYNCHRONISER_5 0x185 #define MADERA_FLL1_SYNCHRONISER_6 0x186 #define MADERA_FLL1_SYNCHRONISER_7 0x187 #define MADERA_FLL1_SPREAD_SPECTRUM 0x189 #define MADERA_FLL1_GPIO_CLOCK 0x18A #define CS47L92_FLL1_GPIO_CLOCK 0x18E #define MADERA_FLL2_CONTROL_1 0x191 #define MADERA_FLL2_CONTROL_2 0x192 #define MADERA_FLL2_CONTROL_3 0x193 #define MADERA_FLL2_CONTROL_4 0x194 #define MADERA_FLL2_CONTROL_5 0x195 #define MADERA_FLL2_CONTROL_6 0x196 #define CS47L92_FLL2_CONTROL_7 0x197 #define CS47L92_FLL2_CONTROL_8 0x198 #define MADERA_FLL2_CONTROL_7 0x199 #define CS47L92_FLL2_CONTROL_9 0x199 #define MADERA_FLL2_EFS_2 0x19A #define CS47L92_FLL2_CONTROL_10 0x19A #define MADERA_FLL2_CONTROL_11 0x19B #define MADERA_FLL2_DIGITAL_TEST_1 0x19D #define MADERA_FLL2_SYNCHRONISER_1 0x1A1 #define MADERA_FLL2_SYNCHRONISER_2 0x1A2 #define MADERA_FLL2_SYNCHRONISER_3 0x1A3 #define MADERA_FLL2_SYNCHRONISER_4 0x1A4 #define MADERA_FLL2_SYNCHRONISER_5 0x1A5 #define MADERA_FLL2_SYNCHRONISER_6 0x1A6 #define MADERA_FLL2_SYNCHRONISER_7 0x1A7 #define MADERA_FLL2_SPREAD_SPECTRUM 0x1A9 #define MADERA_FLL2_GPIO_CLOCK 0x1AA #define CS47L92_FLL2_GPIO_CLOCK 0x1AE #define MADERA_FLL3_CONTROL_1 0x1B1 #define MADERA_FLL3_CONTROL_2 0x1B2 #define MADERA_FLL3_CONTROL_3 0x1B3 #define MADERA_FLL3_CONTROL_4 0x1B4 #define MADERA_FLL3_CONTROL_5 0x1B5 #define MADERA_FLL3_CONTROL_6 0x1B6 #define MADERA_FLL3_CONTROL_7 0x1B9 #define MADERA_FLL3_SYNCHRONISER_1 0x1C1 #define MADERA_FLL3_SYNCHRONISER_2 0x1C2 #define MADERA_FLL3_SYNCHRONISER_3 0x1C3 #define MADERA_FLL3_SYNCHRONISER_4 0x1C4 #define MADERA_FLL3_SYNCHRONISER_5 0x1C5 #define MADERA_FLL3_SYNCHRONISER_6 0x1C6 #define MADERA_FLL3_SYNCHRONISER_7 0x1C7 #define MADERA_FLL3_SPREAD_SPECTRUM 0x1C9 #define MADERA_FLL3_GPIO_CLOCK 0x1CA #define MADERA_FLLAO_CONTROL_1 0x1D1 #define MADERA_FLLAO_CONTROL_2 0x1D2 #define MADERA_FLLAO_CONTROL_3 0x1D3 #define MADERA_FLLAO_CONTROL_4 0x1D4 #define MADERA_FLLAO_CONTROL_5 0x1D5 #define MADERA_FLLAO_CONTROL_6 0x1D6 #define MADERA_FLLAO_CONTROL_7 0x1D8 #define MADERA_FLLAO_CONTROL_8 0x1DA #define MADERA_FLLAO_CONTROL_9 0x1DB #define MADERA_FLLAO_CONTROL_10 0x1DC #define MADERA_FLLAO_CONTROL_11 0x1DD #define MADERA_MIC_CHARGE_PUMP_1 0x200 #define MADERA_HP_CHARGE_PUMP_8 0x20B #define MADERA_LDO1_CONTROL_1 0x210 #define MADERA_LDO2_CONTROL_1 0x213 #define MADERA_MIC_BIAS_CTRL_1 0x218 #define MADERA_MIC_BIAS_CTRL_2 0x219 #define MADERA_MIC_BIAS_CTRL_3 0x21A #define MADERA_MIC_BIAS_CTRL_4 0x21B #define MADERA_MIC_BIAS_CTRL_5 0x21C #define MADERA_MIC_BIAS_CTRL_6 0x21E #define MADERA_HP_CTRL_1L 0x225 #define MADERA_HP_CTRL_1R 0x226 #define MADERA_HP_CTRL_2L 0x227 #define MADERA_HP_CTRL_2R 0x228 #define MADERA_HP_CTRL_3L 0x229 #define MADERA_HP_CTRL_3R 0x22A #define MADERA_DCS_HP1L_CONTROL 0x232 #define MADERA_DCS_HP1R_CONTROL 0x238 #define MADERA_EDRE_HP_STEREO_CONTROL 0x27E #define MADERA_ACCESSORY_DETECT_MODE_1 0x293 #define MADERA_HEADPHONE_DETECT_0 0x299 #define MADERA_HEADPHONE_DETECT_1 0x29B #define MADERA_HEADPHONE_DETECT_2 0x29C #define MADERA_HEADPHONE_DETECT_3 0x29D #define MADERA_HEADPHONE_DETECT_4 0x29E #define MADERA_HEADPHONE_DETECT_5 0x29F #define MADERA_MIC_DETECT_1_CONTROL_0 0x2A2 #define MADERA_MIC_DETECT_1_CONTROL_1 0x2A3 #define MADERA_MIC_DETECT_1_CONTROL_2 0x2A4 #define MADERA_MIC_DETECT_1_CONTROL_3 0x2A5 #define MADERA_MIC_DETECT_1_LEVEL_1 0x2A6 #define MADERA_MIC_DETECT_1_LEVEL_2 0x2A7 #define MADERA_MIC_DETECT_1_LEVEL_3 0x2A8 #define MADERA_MIC_DETECT_1_LEVEL_4 0x2A9 #define MADERA_MIC_DETECT_1_CONTROL_4 0x2AB #define MADERA_MIC_DETECT_2_CONTROL_0 0x2B2 #define MADERA_MIC_DETECT_2_CONTROL_1 0x2B3 #define MADERA_MIC_DETECT_2_CONTROL_2 0x2B4 #define MADERA_MIC_DETECT_2_CONTROL_3 0x2B5 #define MADERA_MIC_DETECT_2_LEVEL_1 0x2B6 #define MADERA_MIC_DETECT_2_LEVEL_2 0x2B7 #define MADERA_MIC_DETECT_2_LEVEL_3 0x2B8 #define MADERA_MIC_DETECT_2_LEVEL_4 0x2B9 #define MADERA_MIC_DETECT_2_CONTROL_4 0x2BB #define MADERA_MICD_CLAMP_CONTROL 0x2C6 #define MADERA_GP_SWITCH_1 0x2C8 #define MADERA_JACK_DETECT_ANALOGUE 0x2D3 #define MADERA_INPUT_ENABLES 0x300 #define MADERA_INPUT_ENABLES_STATUS 0x301 #define MADERA_INPUT_RATE 0x308 #define MADERA_INPUT_VOLUME_RAMP 0x309 #define MADERA_HPF_CONTROL 0x30C #define MADERA_IN1L_CONTROL 0x310 #define MADERA_ADC_DIGITAL_VOLUME_1L 0x311 #define MADERA_DMIC1L_CONTROL 0x312 #define MADERA_IN1L_RATE_CONTROL 0x313 #define MADERA_IN1R_CONTROL 0x314 #define MADERA_ADC_DIGITAL_VOLUME_1R 0x315 #define MADERA_DMIC1R_CONTROL 0x316 #define MADERA_IN1R_RATE_CONTROL 0x317 #define MADERA_IN2L_CONTROL 0x318 #define MADERA_ADC_DIGITAL_VOLUME_2L 0x319 #define MADERA_DMIC2L_CONTROL 0x31A #define MADERA_IN2L_RATE_CONTROL 0x31B #define MADERA_IN2R_CONTROL 0x31C #define MADERA_ADC_DIGITAL_VOLUME_2R 0x31D #define MADERA_DMIC2R_CONTROL 0x31E #define MADERA_IN2R_RATE_CONTROL 0x31F #define MADERA_IN3L_CONTROL 0x320 #define MADERA_ADC_DIGITAL_VOLUME_3L 0x321 #define MADERA_DMIC3L_CONTROL 0x322 #define MADERA_IN3L_RATE_CONTROL 0x323 #define MADERA_IN3R_CONTROL 0x324 #define MADERA_ADC_DIGITAL_VOLUME_3R 0x325 #define MADERA_DMIC3R_CONTROL 0x326 #define MADERA_IN3R_RATE_CONTROL 0x327 #define MADERA_IN4L_CONTROL 0x328 #define MADERA_ADC_DIGITAL_VOLUME_4L 0x329 #define MADERA_DMIC4L_CONTROL 0x32A #define MADERA_IN4L_RATE_CONTROL 0x32B #define MADERA_IN4R_CONTROL 0x32C #define MADERA_ADC_DIGITAL_VOLUME_4R 0x32D #define MADERA_DMIC4R_CONTROL 0x32E #define MADERA_IN4R_RATE_CONTROL 0x32F #define MADERA_IN5L_CONTROL 0x330 #define MADERA_ADC_DIGITAL_VOLUME_5L 0x331 #define MADERA_DMIC5L_CONTROL 0x332 #define MADERA_IN5L_RATE_CONTROL 0x333 #define MADERA_IN5R_CONTROL 0x334 #define MADERA_ADC_DIGITAL_VOLUME_5R 0x335 #define MADERA_DMIC5R_CONTROL 0x336 #define MADERA_IN5R_RATE_CONTROL 0x337 #define MADERA_IN6L_CONTROL 0x338 #define MADERA_ADC_DIGITAL_VOLUME_6L 0x339 #define MADERA_DMIC6L_CONTROL 0x33A #define MADERA_IN6R_CONTROL 0x33C #define MADERA_ADC_DIGITAL_VOLUME_6R 0x33D #define MADERA_DMIC6R_CONTROL 0x33E #define CS47L15_ADC_INT_BIAS 0x3A8 #define CS47L15_PGA_BIAS_SEL 0x3C4 #define MADERA_OUTPUT_ENABLES_1 0x400 #define MADERA_OUTPUT_STATUS_1 0x401 #define MADERA_RAW_OUTPUT_STATUS_1 0x406 #define MADERA_OUTPUT_RATE_1 0x408 #define MADERA_OUTPUT_VOLUME_RAMP 0x409 #define MADERA_OUTPUT_PATH_CONFIG_1L 0x410 #define MADERA_DAC_DIGITAL_VOLUME_1L 0x411 #define MADERA_OUTPUT_PATH_CONFIG_1 0x412 #define MADERA_NOISE_GATE_SELECT_1L 0x413 #define MADERA_OUTPUT_PATH_CONFIG_1R 0x414 #define MADERA_DAC_DIGITAL_VOLUME_1R 0x415 #define MADERA_NOISE_GATE_SELECT_1R 0x417 #define MADERA_OUTPUT_PATH_CONFIG_2L 0x418 #define MADERA_DAC_DIGITAL_VOLUME_2L 0x419 #define MADERA_OUTPUT_PATH_CONFIG_2 0x41A #define MADERA_NOISE_GATE_SELECT_2L 0x41B #define MADERA_OUTPUT_PATH_CONFIG_2R 0x41C #define MADERA_DAC_DIGITAL_VOLUME_2R 0x41D #define MADERA_NOISE_GATE_SELECT_2R 0x41F #define MADERA_OUTPUT_PATH_CONFIG_3L 0x420 #define MADERA_DAC_DIGITAL_VOLUME_3L 0x421 #define MADERA_OUTPUT_PATH_CONFIG_3 0x422 #define MADERA_NOISE_GATE_SELECT_3L 0x423 #define MADERA_OUTPUT_PATH_CONFIG_3R 0x424 #define MADERA_DAC_DIGITAL_VOLUME_3R 0x425 #define MADERA_NOISE_GATE_SELECT_3R 0x427 #define MADERA_OUTPUT_PATH_CONFIG_4L 0x428 #define MADERA_DAC_DIGITAL_VOLUME_4L 0x429 #define MADERA_NOISE_GATE_SELECT_4L 0x42B #define MADERA_OUTPUT_PATH_CONFIG_4R 0x42C #define MADERA_DAC_DIGITAL_VOLUME_4R 0x42D #define MADERA_NOISE_GATE_SELECT_4R 0x42F #define MADERA_OUTPUT_PATH_CONFIG_5L 0x430 #define MADERA_DAC_DIGITAL_VOLUME_5L 0x431 #define MADERA_NOISE_GATE_SELECT_5L 0x433 #define MADERA_OUTPUT_PATH_CONFIG_5R 0x434 #define MADERA_DAC_DIGITAL_VOLUME_5R 0x435 #define MADERA_NOISE_GATE_SELECT_5R 0x437 #define MADERA_OUTPUT_PATH_CONFIG_6L 0x438 #define MADERA_DAC_DIGITAL_VOLUME_6L 0x439 #define MADERA_NOISE_GATE_SELECT_6L 0x43B #define MADERA_OUTPUT_PATH_CONFIG_6R 0x43C #define MADERA_DAC_DIGITAL_VOLUME_6R 0x43D #define MADERA_NOISE_GATE_SELECT_6R 0x43F #define MADERA_DAC_AEC_CONTROL_1 0x450 #define MADERA_DAC_AEC_CONTROL_2 0x451 #define MADERA_NOISE_GATE_CONTROL 0x458 #define MADERA_PDM_SPK1_CTRL_1 0x490 #define MADERA_PDM_SPK1_CTRL_2 0x491 #define MADERA_PDM_SPK2_CTRL_1 0x492 #define MADERA_PDM_SPK2_CTRL_2 0x493 #define MADERA_HP1_SHORT_CIRCUIT_CTRL 0x4A0 #define MADERA_HP2_SHORT_CIRCUIT_CTRL 0x4A1 #define MADERA_HP3_SHORT_CIRCUIT_CTRL 0x4A2 #define MADERA_HP_TEST_CTRL_1 0x4A4 #define MADERA_HP_TEST_CTRL_5 0x4A8 #define MADERA_HP_TEST_CTRL_6 0x4A9 #define MADERA_AIF1_BCLK_CTRL 0x500 #define MADERA_AIF1_TX_PIN_CTRL 0x501 #define MADERA_AIF1_RX_PIN_CTRL 0x502 #define MADERA_AIF1_RATE_CTRL 0x503 #define MADERA_AIF1_FORMAT 0x504 #define MADERA_AIF1_RX_BCLK_RATE 0x506 #define MADERA_AIF1_FRAME_CTRL_1 0x507 #define MADERA_AIF1_FRAME_CTRL_2 0x508 #define MADERA_AIF1_FRAME_CTRL_3 0x509 #define MADERA_AIF1_FRAME_CTRL_4 0x50A #define MADERA_AIF1_FRAME_CTRL_5 0x50B #define MADERA_AIF1_FRAME_CTRL_6 0x50C #define MADERA_AIF1_FRAME_CTRL_7 0x50D #define MADERA_AIF1_FRAME_CTRL_8 0x50E #define MADERA_AIF1_FRAME_CTRL_9 0x50F #define MADERA_AIF1_FRAME_CTRL_10 0x510 #define MADERA_AIF1_FRAME_CTRL_11 0x511 #define MADERA_AIF1_FRAME_CTRL_12 0x512 #define MADERA_AIF1_FRAME_CTRL_13 0x513 #define MADERA_AIF1_FRAME_CTRL_14 0x514 #define MADERA_AIF1_FRAME_CTRL_15 0x515 #define MADERA_AIF1_FRAME_CTRL_16 0x516 #define MADERA_AIF1_FRAME_CTRL_17 0x517 #define MADERA_AIF1_FRAME_CTRL_18 0x518 #define MADERA_AIF1_TX_ENABLES 0x519 #define MADERA_AIF1_RX_ENABLES 0x51A #define MADERA_AIF1_FORCE_WRITE 0x51B #define MADERA_AIF2_BCLK_CTRL 0x540 #define MADERA_AIF2_TX_PIN_CTRL 0x541 #define MADERA_AIF2_RX_PIN_CTRL 0x542 #define MADERA_AIF2_RATE_CTRL 0x543 #define MADERA_AIF2_FORMAT 0x544 #define MADERA_AIF2_RX_BCLK_RATE 0x546 #define MADERA_AIF2_FRAME_CTRL_1 0x547 #define MADERA_AIF2_FRAME_CTRL_2 0x548 #define MADERA_AIF2_FRAME_CTRL_3 0x549 #define MADERA_AIF2_FRAME_CTRL_4 0x54A #define MADERA_AIF2_FRAME_CTRL_5 0x54B #define MADERA_AIF2_FRAME_CTRL_6 0x54C #define MADERA_AIF2_FRAME_CTRL_7 0x54D #define MADERA_AIF2_FRAME_CTRL_8 0x54E #define MADERA_AIF2_FRAME_CTRL_9 0x54F #define MADERA_AIF2_FRAME_CTRL_10 0x550 #define MADERA_AIF2_FRAME_CTRL_11 0x551 #define MADERA_AIF2_FRAME_CTRL_12 0x552 #define MADERA_AIF2_FRAME_CTRL_13 0x553 #define MADERA_AIF2_FRAME_CTRL_14 0x554 #define MADERA_AIF2_FRAME_CTRL_15 0x555 #define MADERA_AIF2_FRAME_CTRL_16 0x556 #define MADERA_AIF2_FRAME_CTRL_17 0x557 #define MADERA_AIF2_FRAME_CTRL_18 0x558 #define MADERA_AIF2_TX_ENABLES 0x559 #define MADERA_AIF2_RX_ENABLES 0x55A #define MADERA_AIF2_FORCE_WRITE 0x55B #define MADERA_AIF3_BCLK_CTRL 0x580 #define MADERA_AIF3_TX_PIN_CTRL 0x581 #define MADERA_AIF3_RX_PIN_CTRL 0x582 #define MADERA_AIF3_RATE_CTRL 0x583 #define MADERA_AIF3_FORMAT 0x584 #define MADERA_AIF3_RX_BCLK_RATE 0x586 #define MADERA_AIF3_FRAME_CTRL_1 0x587 #define MADERA_AIF3_FRAME_CTRL_2 0x588 #define MADERA_AIF3_FRAME_CTRL_3 0x589 #define MADERA_AIF3_FRAME_CTRL_4 0x58A #define MADERA_AIF3_FRAME_CTRL_5 0x58B #define MADERA_AIF3_FRAME_CTRL_6 0x58C #define MADERA_AIF3_FRAME_CTRL_7 0x58D #define MADERA_AIF3_FRAME_CTRL_8 0x58E #define MADERA_AIF3_FRAME_CTRL_9 0x58F #define MADERA_AIF3_FRAME_CTRL_10 0x590 #define MADERA_AIF3_FRAME_CTRL_11 0x591 #define MADERA_AIF3_FRAME_CTRL_12 0x592 #define MADERA_AIF3_FRAME_CTRL_13 0x593 #define MADERA_AIF3_FRAME_CTRL_14 0x594 #define MADERA_AIF3_FRAME_CTRL_15 0x595 #define MADERA_AIF3_FRAME_CTRL_16 0x596 #define MADERA_AIF3_FRAME_CTRL_17 0x597 #define MADERA_AIF3_FRAME_CTRL_18 0x598 #define MADERA_AIF3_TX_ENABLES 0x599 #define MADERA_AIF3_RX_ENABLES 0x59A #define MADERA_AIF3_FORCE_WRITE 0x59B #define MADERA_AIF4_BCLK_CTRL 0x5A0 #define MADERA_AIF4_TX_PIN_CTRL 0x5A1 #define MADERA_AIF4_RX_PIN_CTRL 0x5A2 #define MADERA_AIF4_RATE_CTRL 0x5A3 #define MADERA_AIF4_FORMAT 0x5A4 #define MADERA_AIF4_RX_BCLK_RATE 0x5A6 #define MADERA_AIF4_FRAME_CTRL_1 0x5A7 #define MADERA_AIF4_FRAME_CTRL_2 0x5A8 #define MADERA_AIF4_FRAME_CTRL_3 0x5A9 #define MADERA_AIF4_FRAME_CTRL_4 0x5AA #define MADERA_AIF4_FRAME_CTRL_11 0x5B1 #define MADERA_AIF4_FRAME_CTRL_12 0x5B2 #define MADERA_AIF4_TX_ENABLES 0x5B9 #define MADERA_AIF4_RX_ENABLES 0x5BA #define MADERA_AIF4_FORCE_WRITE 0x5BB #define MADERA_SPD1_TX_CONTROL 0x5C2 #define MADERA_SPD1_TX_CHANNEL_STATUS_1 0x5C3 #define MADERA_SPD1_TX_CHANNEL_STATUS_2 0x5C4 #define MADERA_SPD1_TX_CHANNEL_STATUS_3 0x5C5 #define MADERA_SLIMBUS_FRAMER_REF_GEAR 0x5E3 #define MADERA_SLIMBUS_RATES_1 0x5E5 #define MADERA_SLIMBUS_RATES_2 0x5E6 #define MADERA_SLIMBUS_RATES_3 0x5E7 #define MADERA_SLIMBUS_RATES_4 0x5E8 #define MADERA_SLIMBUS_RATES_5 0x5E9 #define MADERA_SLIMBUS_RATES_6 0x5EA #define MADERA_SLIMBUS_RATES_7 0x5EB #define MADERA_SLIMBUS_RATES_8 0x5EC #define MADERA_SLIMBUS_RX_CHANNEL_ENABLE 0x5F5 #define MADERA_SLIMBUS_TX_CHANNEL_ENABLE 0x5F6 #define MADERA_SLIMBUS_RX_PORT_STATUS 0x5F7 #define MADERA_SLIMBUS_TX_PORT_STATUS 0x5F8 #define MADERA_PWM1MIX_INPUT_1_SOURCE 0x640 #define MADERA_PWM1MIX_INPUT_1_VOLUME 0x641 #define MADERA_PWM1MIX_INPUT_2_SOURCE 0x642 #define MADERA_PWM1MIX_INPUT_2_VOLUME 0x643 #define MADERA_PWM1MIX_INPUT_3_SOURCE 0x644 #define MADERA_PWM1MIX_INPUT_3_VOLUME 0x645 #define MADERA_PWM1MIX_INPUT_4_SOURCE 0x646 #define MADERA_PWM1MIX_INPUT_4_VOLUME 0x647 #define MADERA_PWM2MIX_INPUT_1_SOURCE 0x648 #define MADERA_PWM2MIX_INPUT_1_VOLUME 0x649 #define MADERA_PWM2MIX_INPUT_2_SOURCE 0x64A #define MADERA_PWM2MIX_INPUT_2_VOLUME 0x64B #define MADERA_PWM2MIX_INPUT_3_SOURCE 0x64C #define MADERA_PWM2MIX_INPUT_3_VOLUME 0x64D #define MADERA_PWM2MIX_INPUT_4_SOURCE 0x64E #define MADERA_PWM2MIX_INPUT_4_VOLUME 0x64F #define MADERA_OUT1LMIX_INPUT_1_SOURCE 0x680 #define MADERA_OUT1LMIX_INPUT_1_VOLUME 0x681 #define MADERA_OUT1LMIX_INPUT_2_SOURCE 0x682 #define MADERA_OUT1LMIX_INPUT_2_VOLUME 0x683 #define MADERA_OUT1LMIX_INPUT_3_SOURCE 0x684 #define MADERA_OUT1LMIX_INPUT_3_VOLUME 0x685 #define MADERA_OUT1LMIX_INPUT_4_SOURCE 0x686 #define MADERA_OUT1LMIX_INPUT_4_VOLUME 0x687 #define MADERA_OUT1RMIX_INPUT_1_SOURCE 0x688 #define MADERA_OUT1RMIX_INPUT_1_VOLUME 0x689 #define MADERA_OUT1RMIX_INPUT_2_SOURCE 0x68A #define MADERA_OUT1RMIX_INPUT_2_VOLUME 0x68B #define MADERA_OUT1RMIX_INPUT_3_SOURCE 0x68C #define MADERA_OUT1RMIX_INPUT_3_VOLUME 0x68D #define MADERA_OUT1RMIX_INPUT_4_SOURCE 0x68E #define MADERA_OUT1RMIX_INPUT_4_VOLUME 0x68F #define MADERA_OUT2LMIX_INPUT_1_SOURCE 0x690 #define MADERA_OUT2LMIX_INPUT_1_VOLUME 0x691 #define MADERA_OUT2LMIX_INPUT_2_SOURCE 0x692 #define MADERA_OUT2LMIX_INPUT_2_VOLUME 0x693 #define MADERA_OUT2LMIX_INPUT_3_SOURCE 0x694 #define MADERA_OUT2LMIX_INPUT_3_VOLUME 0x695 #define MADERA_OUT2LMIX_INPUT_4_SOURCE 0x696 #define MADERA_OUT2LMIX_INPUT_4_VOLUME 0x697 #define MADERA_OUT2RMIX_INPUT_1_SOURCE 0x698 #define MADERA_OUT2RMIX_INPUT_1_VOLUME 0x699 #define MADERA_OUT2RMIX_INPUT_2_SOURCE 0x69A #define MADERA_OUT2RMIX_INPUT_2_VOLUME 0x69B #define MADERA_OUT2RMIX_INPUT_3_SOURCE 0x69C #define MADERA_OUT2RMIX_INPUT_3_VOLUME 0x69D #define MADERA_OUT2RMIX_INPUT_4_SOURCE 0x69E #define MADERA_OUT2RMIX_INPUT_4_VOLUME 0x69F #define MADERA_OUT3LMIX_INPUT_1_SOURCE 0x6A0 #define MADERA_OUT3LMIX_INPUT_1_VOLUME 0x6A1 #define MADERA_OUT3LMIX_INPUT_2_SOURCE 0x6A2 #define MADERA_OUT3LMIX_INPUT_2_VOLUME 0x6A3 #define MADERA_OUT3LMIX_INPUT_3_SOURCE 0x6A4 #define MADERA_OUT3LMIX_INPUT_3_VOLUME 0x6A5 #define MADERA_OUT3LMIX_INPUT_4_SOURCE 0x6A6 #define MADERA_OUT3LMIX_INPUT_4_VOLUME 0x6A7 #define MADERA_OUT3RMIX_INPUT_1_SOURCE 0x6A8 #define MADERA_OUT3RMIX_INPUT_1_VOLUME 0x6A9 #define MADERA_OUT3RMIX_INPUT_2_SOURCE 0x6AA #define MADERA_OUT3RMIX_INPUT_2_VOLUME 0x6AB #define MADERA_OUT3RMIX_INPUT_3_SOURCE 0x6AC #define MADERA_OUT3RMIX_INPUT_3_VOLUME 0x6AD #define MADERA_OUT3RMIX_INPUT_4_SOURCE 0x6AE #define MADERA_OUT3RMIX_INPUT_4_VOLUME 0x6AF #define MADERA_OUT4LMIX_INPUT_1_SOURCE 0x6B0 #define MADERA_OUT4LMIX_INPUT_1_VOLUME 0x6B1 #define MADERA_OUT4LMIX_INPUT_2_SOURCE 0x6B2 #define MADERA_OUT4LMIX_INPUT_2_VOLUME 0x6B3 #define MADERA_OUT4LMIX_INPUT_3_SOURCE 0x6B4 #define MADERA_OUT4LMIX_INPUT_3_VOLUME 0x6B5 #define MADERA_OUT4LMIX_INPUT_4_SOURCE 0x6B6 #define MADERA_OUT4LMIX_INPUT_4_VOLUME 0x6B7 #define MADERA_OUT4RMIX_INPUT_1_SOURCE 0x6B8 #define MADERA_OUT4RMIX_INPUT_1_VOLUME 0x6B9 #define MADERA_OUT4RMIX_INPUT_2_SOURCE 0x6BA #define MADERA_OUT4RMIX_INPUT_2_VOLUME 0x6BB #define MADERA_OUT4RMIX_INPUT_3_SOURCE 0x6BC #define MADERA_OUT4RMIX_INPUT_3_VOLUME 0x6BD #define MADERA_OUT4RMIX_INPUT_4_SOURCE 0x6BE #define MADERA_OUT4RMIX_INPUT_4_VOLUME 0x6BF #define MADERA_OUT5LMIX_INPUT_1_SOURCE 0x6C0 #define MADERA_OUT5LMIX_INPUT_1_VOLUME 0x6C1 #define MADERA_OUT5LMIX_INPUT_2_SOURCE 0x6C2 #define MADERA_OUT5LMIX_INPUT_2_VOLUME 0x6C3 #define MADERA_OUT5LMIX_INPUT_3_SOURCE 0x6C4 #define MADERA_OUT5LMIX_INPUT_3_VOLUME 0x6C5 #define MADERA_OUT5LMIX_INPUT_4_SOURCE 0x6C6 #define MADERA_OUT5LMIX_INPUT_4_VOLUME 0x6C7 #define MADERA_OUT5RMIX_INPUT_1_SOURCE 0x6C8 #define MADERA_OUT5RMIX_INPUT_1_VOLUME 0x6C9 #define MADERA_OUT5RMIX_INPUT_2_SOURCE 0x6CA #define MADERA_OUT5RMIX_INPUT_2_VOLUME 0x6CB #define MADERA_OUT5RMIX_INPUT_3_SOURCE 0x6CC #define MADERA_OUT5RMIX_INPUT_3_VOLUME 0x6CD #define MADERA_OUT5RMIX_INPUT_4_SOURCE 0x6CE #define MADERA_OUT5RMIX_INPUT_4_VOLUME 0x6CF #define MADERA_OUT6LMIX_INPUT_1_SOURCE 0x6D0 #define MADERA_OUT6LMIX_INPUT_1_VOLUME 0x6D1 #define MADERA_OUT6LMIX_INPUT_2_SOURCE 0x6D2 #define MADERA_OUT6LMIX_INPUT_2_VOLUME 0x6D3 #define MADERA_OUT6LMIX_INPUT_3_SOURCE 0x6D4 #define MADERA_OUT6LMIX_INPUT_3_VOLUME 0x6D5 #define MADERA_OUT6LMIX_INPUT_4_SOURCE 0x6D6 #define MADERA_OUT6LMIX_INPUT_4_VOLUME 0x6D7 #define MADERA_OUT6RMIX_INPUT_1_SOURCE 0x6D8 #define MADERA_OUT6RMIX_INPUT_1_VOLUME 0x6D9 #define MADERA_OUT6RMIX_INPUT_2_SOURCE 0x6DA #define MADERA_OUT6RMIX_INPUT_2_VOLUME 0x6DB #define MADERA_OUT6RMIX_INPUT_3_SOURCE 0x6DC #define MADERA_OUT6RMIX_INPUT_3_VOLUME 0x6DD #define MADERA_OUT6RMIX_INPUT_4_SOURCE 0x6DE #define MADERA_OUT6RMIX_INPUT_4_VOLUME 0x6DF #define MADERA_AIF1TX1MIX_INPUT_1_SOURCE 0x700 #define MADERA_AIF1TX1MIX_INPUT_1_VOLUME 0x701 #define MADERA_AIF1TX1MIX_INPUT_2_SOURCE 0x702 #define MADERA_AIF1TX1MIX_INPUT_2_VOLUME 0x703 #define MADERA_AIF1TX1MIX_INPUT_3_SOURCE 0x704 #define MADERA_AIF1TX1MIX_INPUT_3_VOLUME 0x705 #define MADERA_AIF1TX1MIX_INPUT_4_SOURCE 0x706 #define MADERA_AIF1TX1MIX_INPUT_4_VOLUME 0x707 #define MADERA_AIF1TX2MIX_INPUT_1_SOURCE 0x708 #define MADERA_AIF1TX2MIX_INPUT_1_VOLUME 0x709 #define MADERA_AIF1TX2MIX_INPUT_2_SOURCE 0x70A #define MADERA_AIF1TX2MIX_INPUT_2_VOLUME 0x70B #define MADERA_AIF1TX2MIX_INPUT_3_SOURCE 0x70C #define MADERA_AIF1TX2MIX_INPUT_3_VOLUME 0x70D #define MADERA_AIF1TX2MIX_INPUT_4_SOURCE 0x70E #define MADERA_AIF1TX2MIX_INPUT_4_VOLUME 0x70F #define MADERA_AIF1TX3MIX_INPUT_1_SOURCE 0x710 #define MADERA_AIF1TX3MIX_INPUT_1_VOLUME 0x711 #define MADERA_AIF1TX3MIX_INPUT_2_SOURCE 0x712 #define MADERA_AIF1TX3MIX_INPUT_2_VOLUME 0x713 #define MADERA_AIF1TX3MIX_INPUT_3_SOURCE 0x714 #define MADERA_AIF1TX3MIX_INPUT_3_VOLUME 0x715 #define MADERA_AIF1TX3MIX_INPUT_4_SOURCE 0x716 #define MADERA_AIF1TX3MIX_INPUT_4_VOLUME 0x717 #define MADERA_AIF1TX4MIX_INPUT_1_SOURCE 0x718 #define MADERA_AIF1TX4MIX_INPUT_1_VOLUME 0x719 #define MADERA_AIF1TX4MIX_INPUT_2_SOURCE 0x71A #define MADERA_AIF1TX4MIX_INPUT_2_VOLUME 0x71B #define MADERA_AIF1TX4MIX_INPUT_3_SOURCE 0x71C #define MADERA_AIF1TX4MIX_INPUT_3_VOLUME 0x71D #define MADERA_AIF1TX4MIX_INPUT_4_SOURCE 0x71E #define MADERA_AIF1TX4MIX_INPUT_4_VOLUME 0x71F #define MADERA_AIF1TX5MIX_INPUT_1_SOURCE 0x720 #define MADERA_AIF1TX5MIX_INPUT_1_VOLUME 0x721 #define MADERA_AIF1TX5MIX_INPUT_2_SOURCE 0x722 #define MADERA_AIF1TX5MIX_INPUT_2_VOLUME 0x723 #define MADERA_AIF1TX5MIX_INPUT_3_SOURCE 0x724 #define MADERA_AIF1TX5MIX_INPUT_3_VOLUME 0x725 #define MADERA_AIF1TX5MIX_INPUT_4_SOURCE 0x726 #define MADERA_AIF1TX5MIX_INPUT_4_VOLUME 0x727 #define MADERA_AIF1TX6MIX_INPUT_1_SOURCE 0x728 #define MADERA_AIF1TX6MIX_INPUT_1_VOLUME 0x729 #define MADERA_AIF1TX6MIX_INPUT_2_SOURCE 0x72A #define MADERA_AIF1TX6MIX_INPUT_2_VOLUME 0x72B #define MADERA_AIF1TX6MIX_INPUT_3_SOURCE 0x72C #define MADERA_AIF1TX6MIX_INPUT_3_VOLUME 0x72D #define MADERA_AIF1TX6MIX_INPUT_4_SOURCE 0x72E #define MADERA_AIF1TX6MIX_INPUT_4_VOLUME 0x72F #define MADERA_AIF1TX7MIX_INPUT_1_SOURCE 0x730 #define MADERA_AIF1TX7MIX_INPUT_1_VOLUME 0x731 #define MADERA_AIF1TX7MIX_INPUT_2_SOURCE 0x732 #define MADERA_AIF1TX7MIX_INPUT_2_VOLUME 0x733 #define MADERA_AIF1TX7MIX_INPUT_3_SOURCE 0x734 #define MADERA_AIF1TX7MIX_INPUT_3_VOLUME 0x735 #define MADERA_AIF1TX7MIX_INPUT_4_SOURCE 0x736 #define MADERA_AIF1TX7MIX_INPUT_4_VOLUME 0x737 #define MADERA_AIF1TX8MIX_INPUT_1_SOURCE 0x738 #define MADERA_AIF1TX8MIX_INPUT_1_VOLUME 0x739 #define MADERA_AIF1TX8MIX_INPUT_2_SOURCE 0x73A #define MADERA_AIF1TX8MIX_INPUT_2_VOLUME 0x73B #define MADERA_AIF1TX8MIX_INPUT_3_SOURCE 0x73C #define MADERA_AIF1TX8MIX_INPUT_3_VOLUME 0x73D #define MADERA_AIF1TX8MIX_INPUT_4_SOURCE 0x73E #define MADERA_AIF1TX8MIX_INPUT_4_VOLUME 0x73F #define MADERA_AIF2TX1MIX_INPUT_1_SOURCE 0x740 #define MADERA_AIF2TX1MIX_INPUT_1_VOLUME 0x741 #define MADERA_AIF2TX1MIX_INPUT_2_SOURCE 0x742 #define MADERA_AIF2TX1MIX_INPUT_2_VOLUME 0x743 #define MADERA_AIF2TX1MIX_INPUT_3_SOURCE 0x744 #define MADERA_AIF2TX1MIX_INPUT_3_VOLUME 0x745 #define MADERA_AIF2TX1MIX_INPUT_4_SOURCE 0x746 #define MADERA_AIF2TX1MIX_INPUT_4_VOLUME 0x747 #define MADERA_AIF2TX2MIX_INPUT_1_SOURCE 0x748 #define MADERA_AIF2TX2MIX_INPUT_1_VOLUME 0x749 #define MADERA_AIF2TX2MIX_INPUT_2_SOURCE 0x74A #define MADERA_AIF2TX2MIX_INPUT_2_VOLUME 0x74B #define MADERA_AIF2TX2MIX_INPUT_3_SOURCE 0x74C #define MADERA_AIF2TX2MIX_INPUT_3_VOLUME 0x74D #define MADERA_AIF2TX2MIX_INPUT_4_SOURCE 0x74E #define MADERA_AIF2TX2MIX_INPUT_4_VOLUME 0x74F #define MADERA_AIF2TX3MIX_INPUT_1_SOURCE 0x750 #define MADERA_AIF2TX3MIX_INPUT_1_VOLUME 0x751 #define MADERA_AIF2TX3MIX_INPUT_2_SOURCE 0x752 #define MADERA_AIF2TX3MIX_INPUT_2_VOLUME 0x753 #define MADERA_AIF2TX3MIX_INPUT_3_SOURCE 0x754 #define MADERA_AIF2TX3MIX_INPUT_3_VOLUME 0x755 #define MADERA_AIF2TX3MIX_INPUT_4_SOURCE 0x756 #define MADERA_AIF2TX3MIX_INPUT_4_VOLUME 0x757 #define MADERA_AIF2TX4MIX_INPUT_1_SOURCE 0x758 #define MADERA_AIF2TX4MIX_INPUT_1_VOLUME 0x759 #define MADERA_AIF2TX4MIX_INPUT_2_SOURCE 0x75A #define MADERA_AIF2TX4MIX_INPUT_2_VOLUME 0x75B #define MADERA_AIF2TX4MIX_INPUT_3_SOURCE 0x75C #define MADERA_AIF2TX4MIX_INPUT_3_VOLUME 0x75D #define MADERA_AIF2TX4MIX_INPUT_4_SOURCE 0x75E #define MADERA_AIF2TX4MIX_INPUT_4_VOLUME 0x75F #define MADERA_AIF2TX5MIX_INPUT_1_SOURCE 0x760 #define MADERA_AIF2TX5MIX_INPUT_1_VOLUME 0x761 #define MADERA_AIF2TX5MIX_INPUT_2_SOURCE 0x762 #define MADERA_AIF2TX5MIX_INPUT_2_VOLUME 0x763 #define MADERA_AIF2TX5MIX_INPUT_3_SOURCE 0x764 #define MADERA_AIF2TX5MIX_INPUT_3_VOLUME 0x765 #define MADERA_AIF2TX5MIX_INPUT_4_SOURCE 0x766 #define MADERA_AIF2TX5MIX_INPUT_4_VOLUME 0x767 #define MADERA_AIF2TX6MIX_INPUT_1_SOURCE 0x768 #define MADERA_AIF2TX6MIX_INPUT_1_VOLUME 0x769 #define MADERA_AIF2TX6MIX_INPUT_2_SOURCE 0x76A #define MADERA_AIF2TX6MIX_INPUT_2_VOLUME 0x76B #define MADERA_AIF2TX6MIX_INPUT_3_SOURCE 0x76C #define MADERA_AIF2TX6MIX_INPUT_3_VOLUME 0x76D #define MADERA_AIF2TX6MIX_INPUT_4_SOURCE 0x76E #define MADERA_AIF2TX6MIX_INPUT_4_VOLUME 0x76F #define MADERA_AIF2TX7MIX_INPUT_1_SOURCE 0x770 #define MADERA_AIF2TX7MIX_INPUT_1_VOLUME 0x771 #define MADERA_AIF2TX7MIX_INPUT_2_SOURCE 0x772 #define MADERA_AIF2TX7MIX_INPUT_2_VOLUME 0x773 #define MADERA_AIF2TX7MIX_INPUT_3_SOURCE 0x774 #define MADERA_AIF2TX7MIX_INPUT_3_VOLUME 0x775 #define MADERA_AIF2TX7MIX_INPUT_4_SOURCE 0x776 #define MADERA_AIF2TX7MIX_INPUT_4_VOLUME 0x777 #define MADERA_AIF2TX8MIX_INPUT_1_SOURCE 0x778 #define MADERA_AIF2TX8MIX_INPUT_1_VOLUME 0x779 #define MADERA_AIF2TX8MIX_INPUT_2_SOURCE 0x77A #define MADERA_AIF2TX8MIX_INPUT_2_VOLUME 0x77B #define MADERA_AIF2TX8MIX_INPUT_3_SOURCE 0x77C #define MADERA_AIF2TX8MIX_INPUT_3_VOLUME 0x77D #define MADERA_AIF2TX8MIX_INPUT_4_SOURCE 0x77E #define MADERA_AIF2TX8MIX_INPUT_4_VOLUME 0x77F #define MADERA_AIF3TX1MIX_INPUT_1_SOURCE 0x780 #define MADERA_AIF3TX1MIX_INPUT_1_VOLUME 0x781 #define MADERA_AIF3TX1MIX_INPUT_2_SOURCE 0x782 #define MADERA_AIF3TX1MIX_INPUT_2_VOLUME 0x783 #define MADERA_AIF3TX1MIX_INPUT_3_SOURCE 0x784 #define MADERA_AIF3TX1MIX_INPUT_3_VOLUME 0x785 #define MADERA_AIF3TX1MIX_INPUT_4_SOURCE 0x786 #define MADERA_AIF3TX1MIX_INPUT_4_VOLUME 0x787 #define MADERA_AIF3TX2MIX_INPUT_1_SOURCE 0x788 #define MADERA_AIF3TX2MIX_INPUT_1_VOLUME 0x789 #define MADERA_AIF3TX2MIX_INPUT_2_SOURCE 0x78A #define MADERA_AIF3TX2MIX_INPUT_2_VOLUME 0x78B #define MADERA_AIF3TX2MIX_INPUT_3_SOURCE 0x78C #define MADERA_AIF3TX2MIX_INPUT_3_VOLUME 0x78D #define MADERA_AIF3TX2MIX_INPUT_4_SOURCE 0x78E #define MADERA_AIF3TX2MIX_INPUT_4_VOLUME 0x78F #define MADERA_AIF3TX3MIX_INPUT_1_SOURCE 0x790 #define MADERA_AIF3TX3MIX_INPUT_1_VOLUME 0x791 #define MADERA_AIF3TX3MIX_INPUT_2_SOURCE 0x792 #define MADERA_AIF3TX3MIX_INPUT_2_VOLUME 0x793 #define MADERA_AIF3TX3MIX_INPUT_3_SOURCE 0x794 #define MADERA_AIF3TX3MIX_INPUT_3_VOLUME 0x795 #define MADERA_AIF3TX3MIX_INPUT_4_SOURCE 0x796 #define MADERA_AIF3TX3MIX_INPUT_4_VOLUME 0x797 #define MADERA_AIF3TX4MIX_INPUT_1_SOURCE 0x798 #define MADERA_AIF3TX4MIX_INPUT_1_VOLUME 0x799 #define MADERA_AIF3TX4MIX_INPUT_2_SOURCE 0x79A #define MADERA_AIF3TX4MIX_INPUT_2_VOLUME 0x79B #define MADERA_AIF3TX4MIX_INPUT_3_SOURCE 0x79C #define MADERA_AIF3TX4MIX_INPUT_3_VOLUME 0x79D #define MADERA_AIF3TX4MIX_INPUT_4_SOURCE 0x79E #define MADERA_AIF3TX4MIX_INPUT_4_VOLUME 0x79F #define CS47L92_AIF3TX5MIX_INPUT_1_SOURCE 0x7A0 #define CS47L92_AIF3TX5MIX_INPUT_1_VOLUME 0x7A1 #define CS47L92_AIF3TX5MIX_INPUT_2_SOURCE 0x7A2 #define CS47L92_AIF3TX5MIX_INPUT_2_VOLUME 0x7A3 #define CS47L92_AIF3TX5MIX_INPUT_3_SOURCE 0x7A4 #define CS47L92_AIF3TX5MIX_INPUT_3_VOLUME 0x7A5 #define CS47L92_AIF3TX5MIX_INPUT_4_SOURCE 0x7A6 #define CS47L92_AIF3TX5MIX_INPUT_4_VOLUME 0x7A7 #define CS47L92_AIF3TX6MIX_INPUT_1_SOURCE 0x7A8 #define CS47L92_AIF3TX6MIX_INPUT_1_VOLUME 0x7A9 #define CS47L92_AIF3TX6MIX_INPUT_2_SOURCE 0x7AA #define CS47L92_AIF3TX6MIX_INPUT_2_VOLUME 0x7AB #define CS47L92_AIF3TX6MIX_INPUT_3_SOURCE 0x7AC #define CS47L92_AIF3TX6MIX_INPUT_3_VOLUME 0x7AD #define CS47L92_AIF3TX6MIX_INPUT_4_SOURCE 0x7AE #define CS47L92_AIF3TX6MIX_INPUT_4_VOLUME 0x7AF #define CS47L92_AIF3TX7MIX_INPUT_1_SOURCE 0x7B0 #define CS47L92_AIF3TX7MIX_INPUT_1_VOLUME 0x7B1 #define CS47L92_AIF3TX7MIX_INPUT_2_SOURCE 0x7B2 #define CS47L92_AIF3TX7MIX_INPUT_2_VOLUME 0x7B3 #define CS47L92_AIF3TX7MIX_INPUT_3_SOURCE 0x7B4 #define CS47L92_AIF3TX7MIX_INPUT_3_VOLUME 0x7B5 #define CS47L92_AIF3TX7MIX_INPUT_4_SOURCE 0x7B6 #define CS47L92_AIF3TX7MIX_INPUT_4_VOLUME 0x7B7 #define CS47L92_AIF3TX8MIX_INPUT_1_SOURCE 0x7B8 #define CS47L92_AIF3TX8MIX_INPUT_1_VOLUME 0x7B9 #define CS47L92_AIF3TX8MIX_INPUT_2_SOURCE 0x7BA #define CS47L92_AIF3TX8MIX_INPUT_2_VOLUME 0x7BB #define CS47L92_AIF3TX8MIX_INPUT_3_SOURCE 0x7BC #define CS47L92_AIF3TX8MIX_INPUT_3_VOLUME 0x7BD #define CS47L92_AIF3TX8MIX_INPUT_4_SOURCE 0x7BE #define CS47L92_AIF3TX8MIX_INPUT_4_VOLUME 0x7BF #define MADERA_AIF4TX1MIX_INPUT_1_SOURCE 0x7A0 #define MADERA_AIF4TX1MIX_INPUT_1_VOLUME 0x7A1 #define MADERA_AIF4TX1MIX_INPUT_2_SOURCE 0x7A2 #define MADERA_AIF4TX1MIX_INPUT_2_VOLUME 0x7A3 #define MADERA_AIF4TX1MIX_INPUT_3_SOURCE 0x7A4 #define MADERA_AIF4TX1MIX_INPUT_3_VOLUME 0x7A5 #define MADERA_AIF4TX1MIX_INPUT_4_SOURCE 0x7A6 #define MADERA_AIF4TX1MIX_INPUT_4_VOLUME 0x7A7 #define MADERA_AIF4TX2MIX_INPUT_1_SOURCE 0x7A8 #define MADERA_AIF4TX2MIX_INPUT_1_VOLUME 0x7A9 #define MADERA_AIF4TX2MIX_INPUT_2_SOURCE 0x7AA #define MADERA_AIF4TX2MIX_INPUT_2_VOLUME 0x7AB #define MADERA_AIF4TX2MIX_INPUT_3_SOURCE 0x7AC #define MADERA_AIF4TX2MIX_INPUT_3_VOLUME 0x7AD #define MADERA_AIF4TX2MIX_INPUT_4_SOURCE 0x7AE #define MADERA_AIF4TX2MIX_INPUT_4_VOLUME 0x7AF #define MADERA_SLIMTX1MIX_INPUT_1_SOURCE 0x7C0 #define MADERA_SLIMTX1MIX_INPUT_1_VOLUME 0x7C1 #define MADERA_SLIMTX1MIX_INPUT_2_SOURCE 0x7C2 #define MADERA_SLIMTX1MIX_INPUT_2_VOLUME 0x7C3 #define MADERA_SLIMTX1MIX_INPUT_3_SOURCE 0x7C4 #define MADERA_SLIMTX1MIX_INPUT_3_VOLUME 0x7C5 #define MADERA_SLIMTX1MIX_INPUT_4_SOURCE 0x7C6 #define MADERA_SLIMTX1MIX_INPUT_4_VOLUME 0x7C7 #define MADERA_SLIMTX2MIX_INPUT_1_SOURCE 0x7C8 #define MADERA_SLIMTX2MIX_INPUT_1_VOLUME 0x7C9 #define MADERA_SLIMTX2MIX_INPUT_2_SOURCE 0x7CA #define MADERA_SLIMTX2MIX_INPUT_2_VOLUME 0x7CB #define MADERA_SLIMTX2MIX_INPUT_3_SOURCE 0x7CC #define MADERA_SLIMTX2MIX_INPUT_3_VOLUME 0x7CD #define MADERA_SLIMTX2MIX_INPUT_4_SOURCE 0x7CE #define MADERA_SLIMTX2MIX_INPUT_4_VOLUME 0x7CF #define MADERA_SLIMTX3MIX_INPUT_1_SOURCE 0x7D0 #define MADERA_SLIMTX3MIX_INPUT_1_VOLUME 0x7D1 #define MADERA_SLIMTX3MIX_INPUT_2_SOURCE 0x7D2 #define MADERA_SLIMTX3MIX_INPUT_2_VOLUME 0x7D3 #define MADERA_SLIMTX3MIX_INPUT_3_SOURCE 0x7D4 #define MADERA_SLIMTX3MIX_INPUT_3_VOLUME 0x7D5 #define MADERA_SLIMTX3MIX_INPUT_4_SOURCE 0x7D6 #define MADERA_SLIMTX3MIX_INPUT_4_VOLUME 0x7D7 #define MADERA_SLIMTX4MIX_INPUT_1_SOURCE 0x7D8 #define MADERA_SLIMTX4MIX_INPUT_1_VOLUME 0x7D9 #define MADERA_SLIMTX4MIX_INPUT_2_SOURCE 0x7DA #define MADERA_SLIMTX4MIX_INPUT_2_VOLUME 0x7DB #define MADERA_SLIMTX4MIX_INPUT_3_SOURCE 0x7DC #define MADERA_SLIMTX4MIX_INPUT_3_VOLUME 0x7DD #define MADERA_SLIMTX4MIX_INPUT_4_SOURCE 0x7DE #define MADERA_SLIMTX4MIX_INPUT_4_VOLUME 0x7DF #define MADERA_SLIMTX5MIX_INPUT_1_SOURCE 0x7E0 #define MADERA_SLIMTX5MIX_INPUT_1_VOLUME 0x7E1 #define MADERA_SLIMTX5MIX_INPUT_2_SOURCE 0x7E2 #define MADERA_SLIMTX5MIX_INPUT_2_VOLUME 0x7E3 #define MADERA_SLIMTX5MIX_INPUT_3_SOURCE 0x7E4 #define MADERA_SLIMTX5MIX_INPUT_3_VOLUME 0x7E5 #define MADERA_SLIMTX5MIX_INPUT_4_SOURCE 0x7E6 #define MADERA_SLIMTX5MIX_INPUT_4_VOLUME 0x7E7 #define MADERA_SLIMTX6MIX_INPUT_1_SOURCE 0x7E8 #define MADERA_SLIMTX6MIX_INPUT_1_VOLUME 0x7E9 #define MADERA_SLIMTX6MIX_INPUT_2_SOURCE 0x7EA #define MADERA_SLIMTX6MIX_INPUT_2_VOLUME 0x7EB #define MADERA_SLIMTX6MIX_INPUT_3_SOURCE 0x7EC #define MADERA_SLIMTX6MIX_INPUT_3_VOLUME 0x7ED #define MADERA_SLIMTX6MIX_INPUT_4_SOURCE 0x7EE #define MADERA_SLIMTX6MIX_INPUT_4_VOLUME 0x7EF #define MADERA_SLIMTX7MIX_INPUT_1_SOURCE 0x7F0 #define MADERA_SLIMTX7MIX_INPUT_1_VOLUME 0x7F1 #define MADERA_SLIMTX7MIX_INPUT_2_SOURCE 0x7F2 #define MADERA_SLIMTX7MIX_INPUT_2_VOLUME 0x7F3 #define MADERA_SLIMTX7MIX_INPUT_3_SOURCE 0x7F4 #define MADERA_SLIMTX7MIX_INPUT_3_VOLUME 0x7F5 #define MADERA_SLIMTX7MIX_INPUT_4_SOURCE 0x7F6 #define MADERA_SLIMTX7MIX_INPUT_4_VOLUME 0x7F7 #define MADERA_SLIMTX8MIX_INPUT_1_SOURCE 0x7F8 #define MADERA_SLIMTX8MIX_INPUT_1_VOLUME 0x7F9 #define MADERA_SLIMTX8MIX_INPUT_2_SOURCE 0x7FA #define MADERA_SLIMTX8MIX_INPUT_2_VOLUME 0x7FB #define MADERA_SLIMTX8MIX_INPUT_3_SOURCE 0x7FC #define MADERA_SLIMTX8MIX_INPUT_3_VOLUME 0x7FD #define MADERA_SLIMTX8MIX_INPUT_4_SOURCE 0x7FE #define MADERA_SLIMTX8MIX_INPUT_4_VOLUME 0x7FF #define MADERA_SPDIF1TX1MIX_INPUT_1_SOURCE 0x800 #define MADERA_SPDIF1TX1MIX_INPUT_1_VOLUME 0x801 #define MADERA_SPDIF1TX2MIX_INPUT_1_SOURCE 0x808 #define MADERA_SPDIF1TX2MIX_INPUT_1_VOLUME 0x809 #define MADERA_EQ1MIX_INPUT_1_SOURCE 0x880 #define MADERA_EQ1MIX_INPUT_1_VOLUME 0x881 #define MADERA_EQ1MIX_INPUT_2_SOURCE 0x882 #define MADERA_EQ1MIX_INPUT_2_VOLUME 0x883 #define MADERA_EQ1MIX_INPUT_3_SOURCE 0x884 #define MADERA_EQ1MIX_INPUT_3_VOLUME 0x885 #define MADERA_EQ1MIX_INPUT_4_SOURCE 0x886 #define MADERA_EQ1MIX_INPUT_4_VOLUME 0x887 #define MADERA_EQ2MIX_INPUT_1_SOURCE 0x888 #define MADERA_EQ2MIX_INPUT_1_VOLUME 0x889 #define MADERA_EQ2MIX_INPUT_2_SOURCE 0x88A #define MADERA_EQ2MIX_INPUT_2_VOLUME 0x88B #define MADERA_EQ2MIX_INPUT_3_SOURCE 0x88C #define MADERA_EQ2MIX_INPUT_3_VOLUME 0x88D #define MADERA_EQ2MIX_INPUT_4_SOURCE 0x88E #define MADERA_EQ2MIX_INPUT_4_VOLUME 0x88F #define MADERA_EQ3MIX_INPUT_1_SOURCE 0x890 #define MADERA_EQ3MIX_INPUT_1_VOLUME 0x891 #define MADERA_EQ3MIX_INPUT_2_SOURCE 0x892 #define MADERA_EQ3MIX_INPUT_2_VOLUME 0x893 #define MADERA_EQ3MIX_INPUT_3_SOURCE 0x894 #define MADERA_EQ3MIX_INPUT_3_VOLUME 0x895 #define MADERA_EQ3MIX_INPUT_4_SOURCE 0x896 #define MADERA_EQ3MIX_INPUT_4_VOLUME 0x897 #define MADERA_EQ4MIX_INPUT_1_SOURCE 0x898 #define MADERA_EQ4MIX_INPUT_1_VOLUME 0x899 #define MADERA_EQ4MIX_INPUT_2_SOURCE 0x89A #define MADERA_EQ4MIX_INPUT_2_VOLUME 0x89B #define MADERA_EQ4MIX_INPUT_3_SOURCE 0x89C #define MADERA_EQ4MIX_INPUT_3_VOLUME 0x89D #define MADERA_EQ4MIX_INPUT_4_SOURCE 0x89E #define MADERA_EQ4MIX_INPUT_4_VOLUME 0x89F #define MADERA_DRC1LMIX_INPUT_1_SOURCE 0x8C0 #define MADERA_DRC1LMIX_INPUT_1_VOLUME 0x8C1 #define MADERA_DRC1LMIX_INPUT_2_SOURCE 0x8C2 #define MADERA_DRC1LMIX_INPUT_2_VOLUME 0x8C3 #define MADERA_DRC1LMIX_INPUT_3_SOURCE 0x8C4 #define MADERA_DRC1LMIX_INPUT_3_VOLUME 0x8C5 #define MADERA_DRC1LMIX_INPUT_4_SOURCE 0x8C6 #define MADERA_DRC1LMIX_INPUT_4_VOLUME 0x8C7 #define MADERA_DRC1RMIX_INPUT_1_SOURCE 0x8C8 #define MADERA_DRC1RMIX_INPUT_1_VOLUME 0x8C9 #define MADERA_DRC1RMIX_INPUT_2_SOURCE 0x8CA #define MADERA_DRC1RMIX_INPUT_2_VOLUME 0x8CB #define MADERA_DRC1RMIX_INPUT_3_SOURCE 0x8CC #define MADERA_DRC1RMIX_INPUT_3_VOLUME 0x8CD #define MADERA_DRC1RMIX_INPUT_4_SOURCE 0x8CE #define MADERA_DRC1RMIX_INPUT_4_VOLUME 0x8CF #define MADERA_DRC2LMIX_INPUT_1_SOURCE 0x8D0 #define MADERA_DRC2LMIX_INPUT_1_VOLUME 0x8D1 #define MADERA_DRC2LMIX_INPUT_2_SOURCE 0x8D2 #define MADERA_DRC2LMIX_INPUT_2_VOLUME 0x8D3 #define MADERA_DRC2LMIX_INPUT_3_SOURCE 0x8D4 #define MADERA_DRC2LMIX_INPUT_3_VOLUME 0x8D5 #define MADERA_DRC2LMIX_INPUT_4_SOURCE 0x8D6 #define MADERA_DRC2LMIX_INPUT_4_VOLUME 0x8D7 #define MADERA_DRC2RMIX_INPUT_1_SOURCE 0x8D8 #define MADERA_DRC2RMIX_INPUT_1_VOLUME 0x8D9 #define MADERA_DRC2RMIX_INPUT_2_SOURCE 0x8DA #define MADERA_DRC2RMIX_INPUT_2_VOLUME 0x8DB #define MADERA_DRC2RMIX_INPUT_3_SOURCE 0x8DC #define MADERA_DRC2RMIX_INPUT_3_VOLUME 0x8DD #define MADERA_DRC2RMIX_INPUT_4_SOURCE 0x8DE #define MADERA_DRC2RMIX_INPUT_4_VOLUME 0x8DF #define MADERA_HPLP1MIX_INPUT_1_SOURCE 0x900 #define MADERA_HPLP1MIX_INPUT_1_VOLUME 0x901 #define MADERA_HPLP1MIX_INPUT_2_SOURCE 0x902 #define MADERA_HPLP1MIX_INPUT_2_VOLUME 0x903 #define MADERA_HPLP1MIX_INPUT_3_SOURCE 0x904 #define MADERA_HPLP1MIX_INPUT_3_VOLUME 0x905 #define MADERA_HPLP1MIX_INPUT_4_SOURCE 0x906 #define MADERA_HPLP1MIX_INPUT_4_VOLUME 0x907 #define MADERA_HPLP2MIX_INPUT_1_SOURCE 0x908 #define MADERA_HPLP2MIX_INPUT_1_VOLUME 0x909 #define MADERA_HPLP2MIX_INPUT_2_SOURCE 0x90A #define MADERA_HPLP2MIX_INPUT_2_VOLUME 0x90B #define MADERA_HPLP2MIX_INPUT_3_SOURCE 0x90C #define MADERA_HPLP2MIX_INPUT_3_VOLUME 0x90D #define MADERA_HPLP2MIX_INPUT_4_SOURCE 0x90E #define MADERA_HPLP2MIX_INPUT_4_VOLUME 0x90F #define MADERA_HPLP3MIX_INPUT_1_SOURCE 0x910 #define MADERA_HPLP3MIX_INPUT_1_VOLUME 0x911 #define MADERA_HPLP3MIX_INPUT_2_SOURCE 0x912 #define MADERA_HPLP3MIX_INPUT_2_VOLUME 0x913 #define MADERA_HPLP3MIX_INPUT_3_SOURCE 0x914 #define MADERA_HPLP3MIX_INPUT_3_VOLUME 0x915 #define MADERA_HPLP3MIX_INPUT_4_SOURCE 0x916 #define MADERA_HPLP3MIX_INPUT_4_VOLUME 0x917 #define MADERA_HPLP4MIX_INPUT_1_SOURCE 0x918 #define MADERA_HPLP4MIX_INPUT_1_VOLUME 0x919 #define MADERA_HPLP4MIX_INPUT_2_SOURCE 0x91A #define MADERA_HPLP4MIX_INPUT_2_VOLUME 0x91B #define MADERA_HPLP4MIX_INPUT_3_SOURCE 0x91C #define MADERA_HPLP4MIX_INPUT_3_VOLUME 0x91D #define MADERA_HPLP4MIX_INPUT_4_SOURCE 0x91E #define MADERA_HPLP4MIX_INPUT_4_VOLUME 0x91F #define MADERA_DSP1LMIX_INPUT_1_SOURCE 0x940 #define MADERA_DSP1LMIX_INPUT_1_VOLUME 0x941 #define MADERA_DSP1LMIX_INPUT_2_SOURCE 0x942 #define MADERA_DSP1LMIX_INPUT_2_VOLUME 0x943 #define MADERA_DSP1LMIX_INPUT_3_SOURCE 0x944 #define MADERA_DSP1LMIX_INPUT_3_VOLUME 0x945 #define MADERA_DSP1LMIX_INPUT_4_SOURCE 0x946 #define MADERA_DSP1LMIX_INPUT_4_VOLUME 0x947 #define MADERA_DSP1RMIX_INPUT_1_SOURCE 0x948 #define MADERA_DSP1RMIX_INPUT_1_VOLUME 0x949 #define MADERA_DSP1RMIX_INPUT_2_SOURCE 0x94A #define MADERA_DSP1RMIX_INPUT_2_VOLUME 0x94B #define MADERA_DSP1RMIX_INPUT_3_SOURCE 0x94C #define MADERA_DSP1RMIX_INPUT_3_VOLUME 0x94D #define MADERA_DSP1RMIX_INPUT_4_SOURCE 0x94E #define MADERA_DSP1RMIX_INPUT_4_VOLUME 0x94F #define MADERA_DSP1AUX1MIX_INPUT_1_SOURCE 0x950 #define MADERA_DSP1AUX2MIX_INPUT_1_SOURCE 0x958 #define MADERA_DSP1AUX3MIX_INPUT_1_SOURCE 0x960 #define MADERA_DSP1AUX4MIX_INPUT_1_SOURCE 0x968 #define MADERA_DSP1AUX5MIX_INPUT_1_SOURCE 0x970 #define MADERA_DSP1AUX6MIX_INPUT_1_SOURCE 0x978 #define MADERA_DSP2LMIX_INPUT_1_SOURCE 0x980 #define MADERA_DSP2LMIX_INPUT_1_VOLUME 0x981 #define MADERA_DSP2LMIX_INPUT_2_SOURCE 0x982 #define MADERA_DSP2LMIX_INPUT_2_VOLUME 0x983 #define MADERA_DSP2LMIX_INPUT_3_SOURCE 0x984 #define MADERA_DSP2LMIX_INPUT_3_VOLUME 0x985 #define MADERA_DSP2LMIX_INPUT_4_SOURCE 0x986 #define MADERA_DSP2LMIX_INPUT_4_VOLUME 0x987 #define MADERA_DSP2RMIX_INPUT_1_SOURCE 0x988 #define MADERA_DSP2RMIX_INPUT_1_VOLUME 0x989 #define MADERA_DSP2RMIX_INPUT_2_SOURCE 0x98A #define MADERA_DSP2RMIX_INPUT_2_VOLUME 0x98B #define MADERA_DSP2RMIX_INPUT_3_SOURCE 0x98C #define MADERA_DSP2RMIX_INPUT_3_VOLUME 0x98D #define MADERA_DSP2RMIX_INPUT_4_SOURCE 0x98E #define MADERA_DSP2RMIX_INPUT_4_VOLUME 0x98F #define MADERA_DSP2AUX1MIX_INPUT_1_SOURCE 0x990 #define MADERA_DSP2AUX2MIX_INPUT_1_SOURCE 0x998 #define MADERA_DSP2AUX3MIX_INPUT_1_SOURCE 0x9A0 #define MADERA_DSP2AUX4MIX_INPUT_1_SOURCE 0x9A8 #define MADERA_DSP2AUX5MIX_INPUT_1_SOURCE 0x9B0 #define MADERA_DSP2AUX6MIX_INPUT_1_SOURCE 0x9B8 #define MADERA_DSP3LMIX_INPUT_1_SOURCE 0x9C0 #define MADERA_DSP3LMIX_INPUT_1_VOLUME 0x9C1 #define MADERA_DSP3LMIX_INPUT_2_SOURCE 0x9C2 #define MADERA_DSP3LMIX_INPUT_2_VOLUME 0x9C3 #define MADERA_DSP3LMIX_INPUT_3_SOURCE 0x9C4 #define MADERA_DSP3LMIX_INPUT_3_VOLUME 0x9C5 #define MADERA_DSP3LMIX_INPUT_4_SOURCE 0x9C6 #define MADERA_DSP3LMIX_INPUT_4_VOLUME 0x9C7 #define MADERA_DSP3RMIX_INPUT_1_SOURCE 0x9C8 #define MADERA_DSP3RMIX_INPUT_1_VOLUME 0x9C9 #define MADERA_DSP3RMIX_INPUT_2_SOURCE 0x9CA #define MADERA_DSP3RMIX_INPUT_2_VOLUME 0x9CB #define MADERA_DSP3RMIX_INPUT_3_SOURCE 0x9CC #define MADERA_DSP3RMIX_INPUT_3_VOLUME 0x9CD #define MADERA_DSP3RMIX_INPUT_4_SOURCE 0x9CE #define MADERA_DSP3RMIX_INPUT_4_VOLUME 0x9CF #define MADERA_DSP3AUX1MIX_INPUT_1_SOURCE 0x9D0 #define MADERA_DSP3AUX2MIX_INPUT_1_SOURCE 0x9D8 #define MADERA_DSP3AUX3MIX_INPUT_1_SOURCE 0x9E0 #define MADERA_DSP3AUX4MIX_INPUT_1_SOURCE 0x9E8 #define MADERA_DSP3AUX5MIX_INPUT_1_SOURCE 0x9F0 #define MADERA_DSP3AUX6MIX_INPUT_1_SOURCE 0x9F8 #define MADERA_DSP4LMIX_INPUT_1_SOURCE 0xA00 #define MADERA_DSP4LMIX_INPUT_1_VOLUME 0xA01 #define MADERA_DSP4LMIX_INPUT_2_SOURCE 0xA02 #define MADERA_DSP4LMIX_INPUT_2_VOLUME 0xA03 #define MADERA_DSP4LMIX_INPUT_3_SOURCE 0xA04 #define MADERA_DSP4LMIX_INPUT_3_VOLUME 0xA05 #define MADERA_DSP4LMIX_INPUT_4_SOURCE 0xA06 #define MADERA_DSP4LMIX_INPUT_4_VOLUME 0xA07 #define MADERA_DSP4RMIX_INPUT_1_SOURCE 0xA08 #define MADERA_DSP4RMIX_INPUT_1_VOLUME 0xA09 #define MADERA_DSP4RMIX_INPUT_2_SOURCE 0xA0A #define MADERA_DSP4RMIX_INPUT_2_VOLUME 0xA0B #define MADERA_DSP4RMIX_INPUT_3_SOURCE 0xA0C #define MADERA_DSP4RMIX_INPUT_3_VOLUME 0xA0D #define MADERA_DSP4RMIX_INPUT_4_SOURCE 0xA0E #define MADERA_DSP4RMIX_INPUT_4_VOLUME 0xA0F #define MADERA_DSP4AUX1MIX_INPUT_1_SOURCE 0xA10 #define MADERA_DSP4AUX2MIX_INPUT_1_SOURCE 0xA18 #define MADERA_DSP4AUX3MIX_INPUT_1_SOURCE 0xA20 #define MADERA_DSP4AUX4MIX_INPUT_1_SOURCE 0xA28 #define MADERA_DSP4AUX5MIX_INPUT_1_SOURCE 0xA30 #define MADERA_DSP4AUX6MIX_INPUT_1_SOURCE 0xA38 #define MADERA_DSP5LMIX_INPUT_1_SOURCE 0xA40 #define MADERA_DSP5LMIX_INPUT_1_VOLUME 0xA41 #define MADERA_DSP5LMIX_INPUT_2_SOURCE 0xA42 #define MADERA_DSP5LMIX_INPUT_2_VOLUME 0xA43 #define MADERA_DSP5LMIX_INPUT_3_SOURCE 0xA44 #define MADERA_DSP5LMIX_INPUT_3_VOLUME 0xA45 #define MADERA_DSP5LMIX_INPUT_4_SOURCE 0xA46 #define MADERA_DSP5LMIX_INPUT_4_VOLUME 0xA47 #define MADERA_DSP5RMIX_INPUT_1_SOURCE 0xA48 #define MADERA_DSP5RMIX_INPUT_1_VOLUME 0xA49 #define MADERA_DSP5RMIX_INPUT_2_SOURCE 0xA4A #define MADERA_DSP5RMIX_INPUT_2_VOLUME 0xA4B #define MADERA_DSP5RMIX_INPUT_3_SOURCE 0xA4C #define MADERA_DSP5RMIX_INPUT_3_VOLUME 0xA4D #define MADERA_DSP5RMIX_INPUT_4_SOURCE 0xA4E #define MADERA_DSP5RMIX_INPUT_4_VOLUME 0xA4F #define MADERA_DSP5AUX1MIX_INPUT_1_SOURCE 0xA50 #define MADERA_DSP5AUX2MIX_INPUT_1_SOURCE 0xA58 #define MADERA_DSP5AUX3MIX_INPUT_1_SOURCE 0xA60 #define MADERA_DSP5AUX4MIX_INPUT_1_SOURCE 0xA68 #define MADERA_DSP5AUX5MIX_INPUT_1_SOURCE 0xA70 #define MADERA_DSP5AUX6MIX_INPUT_1_SOURCE 0xA78 #define MADERA_ASRC1_1LMIX_INPUT_1_SOURCE 0xA80 #define MADERA_ASRC1_1RMIX_INPUT_1_SOURCE 0xA88 #define MADERA_ASRC1_2LMIX_INPUT_1_SOURCE 0xA90 #define MADERA_ASRC1_2RMIX_INPUT_1_SOURCE 0xA98 #define MADERA_ASRC2_1LMIX_INPUT_1_SOURCE 0xAA0 #define MADERA_ASRC2_1RMIX_INPUT_1_SOURCE 0xAA8 #define MADERA_ASRC2_2LMIX_INPUT_1_SOURCE 0xAB0 #define MADERA_ASRC2_2RMIX_INPUT_1_SOURCE 0xAB8 #define MADERA_ISRC1DEC1MIX_INPUT_1_SOURCE 0xB00 #define MADERA_ISRC1DEC2MIX_INPUT_1_SOURCE 0xB08 #define MADERA_ISRC1DEC3MIX_INPUT_1_SOURCE 0xB10 #define MADERA_ISRC1DEC4MIX_INPUT_1_SOURCE 0xB18 #define MADERA_ISRC1INT1MIX_INPUT_1_SOURCE 0xB20 #define MADERA_ISRC1INT2MIX_INPUT_1_SOURCE 0xB28 #define MADERA_ISRC1INT3MIX_INPUT_1_SOURCE 0xB30 #define MADERA_ISRC1INT4MIX_INPUT_1_SOURCE 0xB38 #define MADERA_ISRC2DEC1MIX_INPUT_1_SOURCE 0xB40 #define MADERA_ISRC2DEC2MIX_INPUT_1_SOURCE 0xB48 #define MADERA_ISRC2DEC3MIX_INPUT_1_SOURCE 0xB50 #define MADERA_ISRC2DEC4MIX_INPUT_1_SOURCE 0xB58 #define MADERA_ISRC2INT1MIX_INPUT_1_SOURCE 0xB60 #define MADERA_ISRC2INT2MIX_INPUT_1_SOURCE 0xB68 #define MADERA_ISRC2INT3MIX_INPUT_1_SOURCE 0xB70 #define MADERA_ISRC2INT4MIX_INPUT_1_SOURCE 0xB78 #define MADERA_ISRC3DEC1MIX_INPUT_1_SOURCE 0xB80 #define MADERA_ISRC3DEC2MIX_INPUT_1_SOURCE 0xB88 #define MADERA_ISRC3DEC3MIX_INPUT_1_SOURCE 0xB90 #define MADERA_ISRC3DEC4MIX_INPUT_1_SOURCE 0xB98 #define MADERA_ISRC3INT1MIX_INPUT_1_SOURCE 0xBA0 #define MADERA_ISRC3INT2MIX_INPUT_1_SOURCE 0xBA8 #define MADERA_ISRC3INT3MIX_INPUT_1_SOURCE 0xBB0 #define MADERA_ISRC3INT4MIX_INPUT_1_SOURCE 0xBB8 #define MADERA_ISRC4DEC1MIX_INPUT_1_SOURCE 0xBC0 #define MADERA_ISRC4DEC2MIX_INPUT_1_SOURCE 0xBC8 #define MADERA_ISRC4INT1MIX_INPUT_1_SOURCE 0xBE0 #define MADERA_ISRC4INT2MIX_INPUT_1_SOURCE 0xBE8 #define MADERA_DSP6LMIX_INPUT_1_SOURCE 0xC00 #define MADERA_DSP6LMIX_INPUT_1_VOLUME 0xC01 #define MADERA_DSP6LMIX_INPUT_2_SOURCE 0xC02 #define MADERA_DSP6LMIX_INPUT_2_VOLUME 0xC03 #define MADERA_DSP6LMIX_INPUT_3_SOURCE 0xC04 #define MADERA_DSP6LMIX_INPUT_3_VOLUME 0xC05 #define MADERA_DSP6LMIX_INPUT_4_SOURCE 0xC06 #define MADERA_DSP6LMIX_INPUT_4_VOLUME 0xC07 #define MADERA_DSP6RMIX_INPUT_1_SOURCE 0xC08 #define MADERA_DSP6RMIX_INPUT_1_VOLUME 0xC09 #define MADERA_DSP6RMIX_INPUT_2_SOURCE 0xC0A #define MADERA_DSP6RMIX_INPUT_2_VOLUME 0xC0B #define MADERA_DSP6RMIX_INPUT_3_SOURCE 0xC0C #define MADERA_DSP6RMIX_INPUT_3_VOLUME 0xC0D #define MADERA_DSP6RMIX_INPUT_4_SOURCE 0xC0E #define MADERA_DSP6RMIX_INPUT_4_VOLUME 0xC0F #define MADERA_DSP6AUX1MIX_INPUT_1_SOURCE 0xC10 #define MADERA_DSP6AUX2MIX_INPUT_1_SOURCE 0xC18 #define MADERA_DSP6AUX3MIX_INPUT_1_SOURCE 0xC20 #define MADERA_DSP6AUX4MIX_INPUT_1_SOURCE 0xC28 #define MADERA_DSP6AUX5MIX_INPUT_1_SOURCE 0xC30 #define MADERA_DSP6AUX6MIX_INPUT_1_SOURCE 0xC38 #define MADERA_DSP7LMIX_INPUT_1_SOURCE 0xC40 #define MADERA_DSP7LMIX_INPUT_1_VOLUME 0xC41 #define MADERA_DSP7LMIX_INPUT_2_SOURCE 0xC42 #define MADERA_DSP7LMIX_INPUT_2_VOLUME 0xC43 #define MADERA_DSP7LMIX_INPUT_3_SOURCE 0xC44 #define MADERA_DSP7LMIX_INPUT_3_VOLUME 0xC45 #define MADERA_DSP7LMIX_INPUT_4_SOURCE 0xC46 #define MADERA_DSP7LMIX_INPUT_4_VOLUME 0xC47 #define MADERA_DSP7RMIX_INPUT_1_SOURCE 0xC48 #define MADERA_DSP7RMIX_INPUT_1_VOLUME 0xC49 #define MADERA_DSP7RMIX_INPUT_2_SOURCE 0xC4A #define MADERA_DSP7RMIX_INPUT_2_VOLUME 0xC4B #define MADERA_DSP7RMIX_INPUT_3_SOURCE 0xC4C #define MADERA_DSP7RMIX_INPUT_3_VOLUME 0xC4D #define MADERA_DSP7RMIX_INPUT_4_SOURCE 0xC4E #define MADERA_DSP7RMIX_INPUT_4_VOLUME 0xC4F #define MADERA_DSP7AUX1MIX_INPUT_1_SOURCE 0xC50 #define MADERA_DSP7AUX2MIX_INPUT_1_SOURCE 0xC58 #define MADERA_DSP7AUX3MIX_INPUT_1_SOURCE 0xC60 #define MADERA_DSP7AUX4MIX_INPUT_1_SOURCE 0xC68 #define MADERA_DSP7AUX5MIX_INPUT_1_SOURCE 0xC70 #define MADERA_DSP7AUX6MIX_INPUT_1_SOURCE 0xC78 #define MADERA_DFC1MIX_INPUT_1_SOURCE 0xDC0 #define MADERA_DFC2MIX_INPUT_1_SOURCE 0xDC8 #define MADERA_DFC3MIX_INPUT_1_SOURCE 0xDD0 #define MADERA_DFC4MIX_INPUT_1_SOURCE 0xDD8 #define MADERA_DFC5MIX_INPUT_1_SOURCE 0xDE0 #define MADERA_DFC6MIX_INPUT_1_SOURCE 0xDE8 #define MADERA_DFC7MIX_INPUT_1_SOURCE 0xDF0 #define MADERA_DFC8MIX_INPUT_1_SOURCE 0xDF8 #define MADERA_FX_CTRL1 0xE00 #define MADERA_FX_CTRL2 0xE01 #define MADERA_EQ1_1 0xE10 #define MADERA_EQ1_2 0xE11 #define MADERA_EQ1_21 0xE24 #define MADERA_EQ2_1 0xE26 #define MADERA_EQ2_2 0xE27 #define MADERA_EQ2_21 0xE3A #define MADERA_EQ3_1 0xE3C #define MADERA_EQ3_2 0xE3D #define MADERA_EQ3_21 0xE50 #define MADERA_EQ4_1 0xE52 #define MADERA_EQ4_2 0xE53 #define MADERA_EQ4_21 0xE66 #define MADERA_DRC1_CTRL1 0xE80 #define MADERA_DRC1_CTRL2 0xE81 #define MADERA_DRC1_CTRL3 0xE82 #define MADERA_DRC1_CTRL4 0xE83 #define MADERA_DRC1_CTRL5 0xE84 #define MADERA_DRC2_CTRL1 0xE88 #define MADERA_DRC2_CTRL2 0xE89 #define MADERA_DRC2_CTRL3 0xE8A #define MADERA_DRC2_CTRL4 0xE8B #define MADERA_DRC2_CTRL5 0xE8C #define MADERA_HPLPF1_1 0xEC0 #define MADERA_HPLPF1_2 0xEC1 #define MADERA_HPLPF2_1 0xEC4 #define MADERA_HPLPF2_2 0xEC5 #define MADERA_HPLPF3_1 0xEC8 #define MADERA_HPLPF3_2 0xEC9 #define MADERA_HPLPF4_1 0xECC #define MADERA_HPLPF4_2 0xECD #define MADERA_ASRC2_ENABLE 0xED0 #define MADERA_ASRC2_STATUS 0xED1 #define MADERA_ASRC2_RATE1 0xED2 #define MADERA_ASRC2_RATE2 0xED3 #define MADERA_ASRC1_ENABLE 0xEE0 #define MADERA_ASRC1_STATUS 0xEE1 #define MADERA_ASRC1_RATE1 0xEE2 #define MADERA_ASRC1_RATE2 0xEE3 #define MADERA_ISRC_1_CTRL_1 0xEF0 #define MADERA_ISRC_1_CTRL_2 0xEF1 #define MADERA_ISRC_1_CTRL_3 0xEF2 #define MADERA_ISRC_2_CTRL_1 0xEF3 #define MADERA_ISRC_2_CTRL_2 0xEF4 #define MADERA_ISRC_2_CTRL_3 0xEF5 #define MADERA_ISRC_3_CTRL_1 0xEF6 #define MADERA_ISRC_3_CTRL_2 0xEF7 #define MADERA_ISRC_3_CTRL_3 0xEF8 #define MADERA_ISRC_4_CTRL_1 0xEF9 #define MADERA_ISRC_4_CTRL_2 0xEFA #define MADERA_ISRC_4_CTRL_3 0xEFB #define MADERA_CLOCK_CONTROL 0xF00 #define MADERA_ANC_SRC 0xF01 #define MADERA_DSP_STATUS 0xF02 #define MADERA_ANC_COEFF_START 0xF08 #define MADERA_ANC_COEFF_END 0xF12 #define MADERA_FCL_FILTER_CONTROL 0xF15 #define MADERA_FCL_ADC_REFORMATTER_CONTROL 0xF17 #define MADERA_FCL_COEFF_START 0xF18 #define MADERA_FCL_COEFF_END 0xF69 #define MADERA_FCR_FILTER_CONTROL 0xF71 #define MADERA_FCR_ADC_REFORMATTER_CONTROL 0xF73 #define MADERA_FCR_COEFF_START 0xF74 #define MADERA_FCR_COEFF_END 0xFC5 #define MADERA_AUXPDM1_CTRL_0 0x10C0 #define MADERA_AUXPDM1_CTRL_1 0x10C1 #define MADERA_DFC1_CTRL 0x1480 #define MADERA_DFC1_RX 0x1482 #define MADERA_DFC1_TX 0x1484 #define MADERA_DFC2_CTRL 0x1486 #define MADERA_DFC2_RX 0x1488 #define MADERA_DFC2_TX 0x148A #define MADERA_DFC3_CTRL 0x148C #define MADERA_DFC3_RX 0x148E #define MADERA_DFC3_TX 0x1490 #define MADERA_DFC4_CTRL 0x1492 #define MADERA_DFC4_RX 0x1494 #define MADERA_DFC4_TX 0x1496 #define MADERA_DFC5_CTRL 0x1498 #define MADERA_DFC5_RX 0x149A #define MADERA_DFC5_TX 0x149C #define MADERA_DFC6_CTRL 0x149E #define MADERA_DFC6_RX 0x14A0 #define MADERA_DFC6_TX 0x14A2 #define MADERA_DFC7_CTRL 0x14A4 #define MADERA_DFC7_RX 0x14A6 #define MADERA_DFC7_TX 0x14A8 #define MADERA_DFC8_CTRL 0x14AA #define MADERA_DFC8_RX 0x14AC #define MADERA_DFC8_TX 0x14AE #define MADERA_DFC_STATUS 0x14B6 #define MADERA_ADSP2_IRQ0 0x1600 #define MADERA_ADSP2_IRQ1 0x1601 #define MADERA_ADSP2_IRQ2 0x1602 #define MADERA_ADSP2_IRQ3 0x1603 #define MADERA_ADSP2_IRQ4 0x1604 #define MADERA_ADSP2_IRQ5 0x1605 #define MADERA_ADSP2_IRQ6 0x1606 #define MADERA_ADSP2_IRQ7 0x1607 #define MADERA_GPIO1_CTRL_1 0x1700 #define MADERA_GPIO1_CTRL_2 0x1701 #define MADERA_GPIO2_CTRL_1 0x1702 #define MADERA_GPIO2_CTRL_2 0x1703 #define MADERA_GPIO15_CTRL_1 0x171C #define MADERA_GPIO15_CTRL_2 0x171D #define MADERA_GPIO16_CTRL_1 0x171E #define MADERA_GPIO16_CTRL_2 0x171F #define MADERA_GPIO38_CTRL_1 0x174A #define MADERA_GPIO38_CTRL_2 0x174B #define MADERA_GPIO40_CTRL_1 0x174E #define MADERA_GPIO40_CTRL_2 0x174F #define MADERA_IRQ1_STATUS_1 0x1800 #define MADERA_IRQ1_STATUS_2 0x1801 #define MADERA_IRQ1_STATUS_6 0x1805 #define MADERA_IRQ1_STATUS_7 0x1806 #define MADERA_IRQ1_STATUS_9 0x1808 #define MADERA_IRQ1_STATUS_11 0x180A #define MADERA_IRQ1_STATUS_12 0x180B #define MADERA_IRQ1_STATUS_15 0x180E #define MADERA_IRQ1_STATUS_33 0x1820 #define MADERA_IRQ1_MASK_1 0x1840 #define MADERA_IRQ1_MASK_2 0x1841 #define MADERA_IRQ1_MASK_6 0x1845 #define MADERA_IRQ1_MASK_33 0x1860 #define MADERA_IRQ1_RAW_STATUS_1 0x1880 #define MADERA_IRQ1_RAW_STATUS_2 0x1881 #define MADERA_IRQ1_RAW_STATUS_7 0x1886 #define MADERA_IRQ1_RAW_STATUS_15 0x188E #define MADERA_IRQ1_RAW_STATUS_33 0x18A0 #define MADERA_INTERRUPT_DEBOUNCE_7 0x1A06 #define MADERA_INTERRUPT_DEBOUNCE_15 0x1A0E #define MADERA_IRQ1_CTRL 0x1A80 #define MADERA_IRQ2_CTRL 0x1A82 #define MADERA_INTERRUPT_RAW_STATUS_1 0x1AA0 #define MADERA_WSEQ_SEQUENCE_1 0x3000 #define MADERA_WSEQ_SEQUENCE_225 0x31C0 #define MADERA_WSEQ_SEQUENCE_252 0x31F6 #define CS47L35_OTP_HPDET_CAL_1 0x31F8 #define CS47L35_OTP_HPDET_CAL_2 0x31FA #define MADERA_WSEQ_SEQUENCE_508 0x33F6 #define CS47L85_OTP_HPDET_CAL_1 0x33F8 #define CS47L85_OTP_HPDET_CAL_2 0x33FA #define MADERA_OTP_HPDET_CAL_1 0x20004 #define MADERA_OTP_HPDET_CAL_2 0x20006 #define MADERA_DSP1_CONFIG_1 0x0FFE00 #define MADERA_DSP1_CONFIG_2 0x0FFE02 #define MADERA_DSP1_SCRATCH_1 0x0FFE40 #define MADERA_DSP1_SCRATCH_2 0x0FFE42 #define MADERA_DSP1_PMEM_ERR_ADDR___XMEM_ERR_ADDR 0xFFE7C #define MADERA_DSP2_CONFIG_1 0x17FE00 #define MADERA_DSP2_CONFIG_2 0x17FE02 #define MADERA_DSP2_SCRATCH_1 0x17FE40 #define MADERA_DSP2_SCRATCH_2 0x17FE42 #define MADERA_DSP2_PMEM_ERR_ADDR___XMEM_ERR_ADDR 0x17FE7C #define MADERA_DSP3_CONFIG_1 0x1FFE00 #define MADERA_DSP3_CONFIG_2 0x1FFE02 #define MADERA_DSP3_SCRATCH_1 0x1FFE40 #define MADERA_DSP3_SCRATCH_2 0x1FFE42 #define MADERA_DSP3_PMEM_ERR_ADDR___XMEM_ERR_ADDR 0x1FFE7C #define MADERA_DSP4_CONFIG_1 0x27FE00 #define MADERA_DSP4_CONFIG_2 0x27FE02 #define MADERA_DSP4_SCRATCH_1 0x27FE40 #define MADERA_DSP4_SCRATCH_2 0x27FE42 #define MADERA_DSP4_PMEM_ERR_ADDR___XMEM_ERR_ADDR 0x27FE7C #define MADERA_DSP5_CONFIG_1 0x2FFE00 #define MADERA_DSP5_CONFIG_2 0x2FFE02 #define MADERA_DSP5_SCRATCH_1 0x2FFE40 #define MADERA_DSP5_SCRATCH_2 0x2FFE42 #define MADERA_DSP5_PMEM_ERR_ADDR___XMEM_ERR_ADDR 0x2FFE7C #define MADERA_DSP6_CONFIG_1 0x37FE00 #define MADERA_DSP6_CONFIG_2 0x37FE02 #define MADERA_DSP6_SCRATCH_1 0x37FE40 #define MADERA_DSP6_SCRATCH_2 0x37FE42 #define MADERA_DSP6_PMEM_ERR_ADDR___XMEM_ERR_ADDR 0x37FE7C #define MADERA_DSP7_CONFIG_1 0x3FFE00 #define MADERA_DSP7_CONFIG_2 0x3FFE02 #define MADERA_DSP7_SCRATCH_1 0x3FFE40 #define MADERA_DSP7_SCRATCH_2 0x3FFE42 #define MADERA_DSP7_PMEM_ERR_ADDR___XMEM_ERR_ADDR 0x3FFE7C /* (0x0000) Software_Reset */ #define MADERA_SW_RST_DEV_ID1_MASK 0xFFFF #define MADERA_SW_RST_DEV_ID1_SHIFT 0 #define MADERA_SW_RST_DEV_ID1_WIDTH 16 /* (0x0001) Hardware_Revision */ #define MADERA_HW_REVISION_MASK 0x00FF #define MADERA_HW_REVISION_SHIFT 0 #define MADERA_HW_REVISION_WIDTH 8 /* (0x0020) Tone_Generator_1 */ #define MADERA_TONE2_ENA 0x0002 #define MADERA_TONE2_ENA_MASK 0x0002 #define MADERA_TONE2_ENA_SHIFT 1 #define MADERA_TONE2_ENA_WIDTH 1 #define MADERA_TONE1_ENA 0x0001 #define MADERA_TONE1_ENA_MASK 0x0001 #define MADERA_TONE1_ENA_SHIFT 0 #define MADERA_TONE1_ENA_WIDTH 1 /* (0x0021) Tone_Generator_2 */ #define MADERA_TONE1_LVL_0_MASK 0xFFFF #define MADERA_TONE1_LVL_0_SHIFT 0 #define MADERA_TONE1_LVL_0_WIDTH 16 /* (0x0022) Tone_Generator_3 */ #define MADERA_TONE1_LVL_MASK 0x00FF #define MADERA_TONE1_LVL_SHIFT 0 #define MADERA_TONE1_LVL_WIDTH 8 /* (0x0023) Tone_Generator_4 */ #define MADERA_TONE2_LVL_0_MASK 0xFFFF #define MADERA_TONE2_LVL_0_SHIFT 0 #define MADERA_TONE2_LVL_0_WIDTH 16 /* (0x0024) Tone_Generator_5 */ #define MADERA_TONE2_LVL_MASK 0x00FF #define MADERA_TONE2_LVL_SHIFT 0 #define MADERA_TONE2_LVL_WIDTH 8 /* (0x0030) PWM_Drive_1 */ #define MADERA_PWM2_ENA 0x0002 #define MADERA_PWM2_ENA_MASK 0x0002 #define MADERA_PWM2_ENA_SHIFT 1 #define MADERA_PWM2_ENA_WIDTH 1 #define MADERA_PWM1_ENA 0x0001 #define MADERA_PWM1_ENA_MASK 0x0001 #define MADERA_PWM1_ENA_SHIFT 0 #define MADERA_PWM1_ENA_WIDTH 1 /* (0x00A0) Comfort_Noise_Generator */ #define MADERA_NOISE_GEN_ENA 0x0020 #define MADERA_NOISE_GEN_ENA_MASK 0x0020 #define MADERA_NOISE_GEN_ENA_SHIFT 5 #define MADERA_NOISE_GEN_ENA_WIDTH 1 #define MADERA_NOISE_GEN_GAIN_MASK 0x001F #define MADERA_NOISE_GEN_GAIN_SHIFT 0 #define MADERA_NOISE_GEN_GAIN_WIDTH 5 /* (0x0100) Clock_32k_1 */ #define MADERA_CLK_32K_ENA 0x0040 #define MADERA_CLK_32K_ENA_MASK 0x0040 #define MADERA_CLK_32K_ENA_SHIFT 6 #define MADERA_CLK_32K_ENA_WIDTH 1 #define MADERA_CLK_32K_SRC_MASK 0x0003 #define MADERA_CLK_32K_SRC_SHIFT 0 #define MADERA_CLK_32K_SRC_WIDTH 2 /* (0x0101) System_Clock_1 */ #define MADERA_SYSCLK_FRAC 0x8000 #define MADERA_SYSCLK_FRAC_MASK 0x8000 #define MADERA_SYSCLK_FRAC_SHIFT 15 #define MADERA_SYSCLK_FRAC_WIDTH 1 #define MADERA_SYSCLK_FREQ_MASK 0x0700 #define MADERA_SYSCLK_FREQ_SHIFT 8 #define MADERA_SYSCLK_FREQ_WIDTH 3 #define MADERA_SYSCLK_ENA 0x0040 #define MADERA_SYSCLK_ENA_MASK 0x0040 #define MADERA_SYSCLK_ENA_SHIFT 6 #define MADERA_SYSCLK_ENA_WIDTH 1 #define MADERA_SYSCLK_SRC_MASK 0x000F #define MADERA_SYSCLK_SRC_SHIFT 0 #define MADERA_SYSCLK_SRC_WIDTH 4 /* (0x0102) Sample_rate_1 */ #define MADERA_SAMPLE_RATE_1_MASK 0x001F #define MADERA_SAMPLE_RATE_1_SHIFT 0 #define MADERA_SAMPLE_RATE_1_WIDTH 5 /* (0x0103) Sample_rate_2 */ #define MADERA_SAMPLE_RATE_2_MASK 0x001F #define MADERA_SAMPLE_RATE_2_SHIFT 0 #define MADERA_SAMPLE_RATE_2_WIDTH 5 /* (0x0104) Sample_rate_3 */ #define MADERA_SAMPLE_RATE_3_MASK 0x001F #define MADERA_SAMPLE_RATE_3_SHIFT 0 #define MADERA_SAMPLE_RATE_3_WIDTH 5 /* (0x0112) Async_clock_1 */ #define MADERA_ASYNC_CLK_FREQ_MASK 0x0700 #define MADERA_ASYNC_CLK_FREQ_SHIFT 8 #define MADERA_ASYNC_CLK_FREQ_WIDTH 3 #define MADERA_ASYNC_CLK_ENA 0x0040 #define MADERA_ASYNC_CLK_ENA_MASK 0x0040 #define MADERA_ASYNC_CLK_ENA_SHIFT 6 #define MADERA_ASYNC_CLK_ENA_WIDTH 1 #define MADERA_ASYNC_CLK_SRC_MASK 0x000F #define MADERA_ASYNC_CLK_SRC_SHIFT 0 #define MADERA_ASYNC_CLK_SRC_WIDTH 4 /* (0x0113) Async_sample_rate_1 */ #define MADERA_ASYNC_SAMPLE_RATE_1_MASK 0x001F #define MADERA_ASYNC_SAMPLE_RATE_1_SHIFT 0 #define MADERA_ASYNC_SAMPLE_RATE_1_WIDTH 5 /* (0x0114) Async_sample_rate_2 */ #define MADERA_ASYNC_SAMPLE_RATE_2_MASK 0x001F #define MADERA_ASYNC_SAMPLE_RATE_2_SHIFT 0 #define MADERA_ASYNC_SAMPLE_RATE_2_WIDTH 5 /* (0x0120) DSP_Clock_1 */ #define MADERA_DSP_CLK_FREQ_LEGACY 0x0700 #define MADERA_DSP_CLK_FREQ_LEGACY_MASK 0x0700 #define MADERA_DSP_CLK_FREQ_LEGACY_SHIFT 8 #define MADERA_DSP_CLK_FREQ_LEGACY_WIDTH 3 #define MADERA_DSP_CLK_ENA 0x0040 #define MADERA_DSP_CLK_ENA_MASK 0x0040 #define MADERA_DSP_CLK_ENA_SHIFT 6 #define MADERA_DSP_CLK_ENA_WIDTH 1 #define MADERA_DSP_CLK_SRC 0x000F #define MADERA_DSP_CLK_SRC_MASK 0x000F #define MADERA_DSP_CLK_SRC_SHIFT 0 #define MADERA_DSP_CLK_SRC_WIDTH 4 /* (0x0122) DSP_Clock_2 */ #define MADERA_DSP_CLK_FREQ_MASK 0x03FF #define MADERA_DSP_CLK_FREQ_SHIFT 0 #define MADERA_DSP_CLK_FREQ_WIDTH 10 /* (0x0149) Output_system_clock */ #define MADERA_OPCLK_ENA 0x8000 #define MADERA_OPCLK_ENA_MASK 0x8000 #define MADERA_OPCLK_ENA_SHIFT 15 #define MADERA_OPCLK_ENA_WIDTH 1 #define MADERA_OPCLK_DIV_MASK 0x00F8 #define MADERA_OPCLK_DIV_SHIFT 3 #define MADERA_OPCLK_DIV_WIDTH 5 #define MADERA_OPCLK_SEL_MASK 0x0007 #define MADERA_OPCLK_SEL_SHIFT 0 #define MADERA_OPCLK_SEL_WIDTH 3 /* (0x014A) Output_async_clock */ #define MADERA_OPCLK_ASYNC_ENA 0x8000 #define MADERA_OPCLK_ASYNC_ENA_MASK 0x8000 #define MADERA_OPCLK_ASYNC_ENA_SHIFT 15 #define MADERA_OPCLK_ASYNC_ENA_WIDTH 1 #define MADERA_OPCLK_ASYNC_DIV_MASK 0x00F8 #define MADERA_OPCLK_ASYNC_DIV_SHIFT 3 #define MADERA_OPCLK_ASYNC_DIV_WIDTH 5 #define MADERA_OPCLK_ASYNC_SEL_MASK 0x0007 #define MADERA_OPCLK_ASYNC_SEL_SHIFT 0 #define MADERA_OPCLK_ASYNC_SEL_WIDTH 3 /* (0x0171) FLL1_Control_1 */ #define CS47L92_FLL1_REFCLK_SRC_MASK 0xF000 #define CS47L92_FLL1_REFCLK_SRC_SHIFT 12 #define CS47L92_FLL1_REFCLK_SRC_WIDTH 4 #define MADERA_FLL1_HOLD_MASK 0x0004 #define MADERA_FLL1_HOLD_SHIFT 2 #define MADERA_FLL1_HOLD_WIDTH 1 #define MADERA_FLL1_FREERUN 0x0002 #define MADERA_FLL1_FREERUN_MASK 0x0002 #define MADERA_FLL1_FREERUN_SHIFT 1 #define MADERA_FLL1_FREERUN_WIDTH 1 #define MADERA_FLL1_ENA 0x0001 #define MADERA_FLL1_ENA_MASK 0x0001 #define MADERA_FLL1_ENA_SHIFT 0 #define MADERA_FLL1_ENA_WIDTH 1 /* (0x0172) FLL1_Control_2 */ #define MADERA_FLL1_CTRL_UPD 0x8000 #define MADERA_FLL1_CTRL_UPD_MASK 0x8000 #define MADERA_FLL1_CTRL_UPD_SHIFT 15 #define MADERA_FLL1_CTRL_UPD_WIDTH 1 #define MADERA_FLL1_N_MASK 0x03FF #define MADERA_FLL1_N_SHIFT 0 #define MADERA_FLL1_N_WIDTH 10 /* (0x0173) FLL1_Control_3 */ #define MADERA_FLL1_THETA_MASK 0xFFFF #define MADERA_FLL1_THETA_SHIFT 0 #define MADERA_FLL1_THETA_WIDTH 16 /* (0x0174) FLL1_Control_4 */ #define MADERA_FLL1_LAMBDA_MASK 0xFFFF #define MADERA_FLL1_LAMBDA_SHIFT 0 #define MADERA_FLL1_LAMBDA_WIDTH 16 /* (0x0175) FLL1_Control_5 */ #define MADERA_FLL1_FRATIO_MASK 0x0F00 #define MADERA_FLL1_FRATIO_SHIFT 8 #define MADERA_FLL1_FRATIO_WIDTH 4 #define MADERA_FLL1_FB_DIV_MASK 0x03FF #define MADERA_FLL1_FB_DIV_SHIFT 0 #define MADERA_FLL1_FB_DIV_WIDTH 10 /* (0x0176) FLL1_Control_6 */ #define MADERA_FLL1_REFCLK_DIV_MASK 0x00C0 #define MADERA_FLL1_REFCLK_DIV_SHIFT 6 #define MADERA_FLL1_REFCLK_DIV_WIDTH 2 #define MADERA_FLL1_REFCLK_SRC_MASK 0x000F #define MADERA_FLL1_REFCLK_SRC_SHIFT 0 #define MADERA_FLL1_REFCLK_SRC_WIDTH 4 /* (0x0179) FLL1_Control_7 */ #define MADERA_FLL1_GAIN_MASK 0x003c #define MADERA_FLL1_GAIN_SHIFT 2 #define MADERA_FLL1_GAIN_WIDTH 4 /* (0x017A) FLL1_EFS_2 */ #define MADERA_FLL1_PHASE_GAIN_MASK 0xF000 #define MADERA_FLL1_PHASE_GAIN_SHIFT 12 #define MADERA_FLL1_PHASE_GAIN_WIDTH 4 #define MADERA_FLL1_PHASE_ENA_MASK 0x0800 #define MADERA_FLL1_PHASE_ENA_SHIFT 11 #define MADERA_FLL1_PHASE_ENA_WIDTH 1 /* (0x017A) FLL1_Control_10 */ #define MADERA_FLL1_HP_MASK 0xC000 #define MADERA_FLL1_HP_SHIFT 14 #define MADERA_FLL1_HP_WIDTH 2 #define MADERA_FLL1_PHASEDET_ENA_MASK 0x1000 #define MADERA_FLL1_PHASEDET_ENA_SHIFT 12 #define MADERA_FLL1_PHASEDET_ENA_WIDTH 1 /* (0x017B) FLL1_Control_11 */ #define MADERA_FLL1_LOCKDET_THR_MASK 0x001E #define MADERA_FLL1_LOCKDET_THR_SHIFT 1 #define MADERA_FLL1_LOCKDET_THR_WIDTH 4 #define MADERA_FLL1_LOCKDET_MASK 0x0001 #define MADERA_FLL1_LOCKDET_SHIFT 0 #define MADERA_FLL1_LOCKDET_WIDTH 1 /* (0x017D) FLL1_Digital_Test_1 */ #define MADERA_FLL1_SYNC_EFS_ENA_MASK 0x0100 #define MADERA_FLL1_SYNC_EFS_ENA_SHIFT 8 #define MADERA_FLL1_SYNC_EFS_ENA_WIDTH 1 #define MADERA_FLL1_CLK_VCO_FAST_SRC_MASK 0x0003 #define MADERA_FLL1_CLK_VCO_FAST_SRC_SHIFT 0 #define MADERA_FLL1_CLK_VCO_FAST_SRC_WIDTH 2 /* (0x0181) FLL1_Synchroniser_1 */ #define MADERA_FLL1_SYNC_ENA 0x0001 #define MADERA_FLL1_SYNC_ENA_MASK 0x0001 #define MADERA_FLL1_SYNC_ENA_SHIFT 0 #define MADERA_FLL1_SYNC_ENA_WIDTH 1 /* (0x0182) FLL1_Synchroniser_2 */ #define MADERA_FLL1_SYNC_N_MASK 0x03FF #define MADERA_FLL1_SYNC_N_SHIFT 0 #define MADERA_FLL1_SYNC_N_WIDTH 10 /* (0x0183) FLL1_Synchroniser_3 */ #define MADERA_FLL1_SYNC_THETA_MASK 0xFFFF #define MADERA_FLL1_SYNC_THETA_SHIFT 0 #define MADERA_FLL1_SYNC_THETA_WIDTH 16 /* (0x0184) FLL1_Synchroniser_4 */ #define MADERA_FLL1_SYNC_LAMBDA_MASK 0xFFFF #define MADERA_FLL1_SYNC_LAMBDA_SHIFT 0 #define MADERA_FLL1_SYNC_LAMBDA_WIDTH 16 /* (0x0185) FLL1_Synchroniser_5 */ #define MADERA_FLL1_SYNC_FRATIO_MASK 0x0700 #define MADERA_FLL1_SYNC_FRATIO_SHIFT 8 #define MADERA_FLL1_SYNC_FRATIO_WIDTH 3 /* (0x0186) FLL1_Synchroniser_6 */ #define MADERA_FLL1_SYNCCLK_DIV_MASK 0x00C0 #define MADERA_FLL1_SYNCCLK_DIV_SHIFT 6 #define MADERA_FLL1_SYNCCLK_DIV_WIDTH 2 #define MADERA_FLL1_SYNCCLK_SRC_MASK 0x000F #define MADERA_FLL1_SYNCCLK_SRC_SHIFT 0 #define MADERA_FLL1_SYNCCLK_SRC_WIDTH 4 /* (0x0187) FLL1_Synchroniser_7 */ #define MADERA_FLL1_SYNC_GAIN_MASK 0x003c #define MADERA_FLL1_SYNC_GAIN_SHIFT 2 #define MADERA_FLL1_SYNC_GAIN_WIDTH 4 #define MADERA_FLL1_SYNC_DFSAT 0x0001 #define MADERA_FLL1_SYNC_DFSAT_MASK 0x0001 #define MADERA_FLL1_SYNC_DFSAT_SHIFT 0 #define MADERA_FLL1_SYNC_DFSAT_WIDTH 1 /* (0x01D1) FLL_AO_Control_1 */ #define MADERA_FLL_AO_HOLD 0x0004 #define MADERA_FLL_AO_HOLD_MASK 0x0004 #define MADERA_FLL_AO_HOLD_SHIFT 2 #define MADERA_FLL_AO_HOLD_WIDTH 1 #define MADERA_FLL_AO_FREERUN 0x0002 #define MADERA_FLL_AO_FREERUN_MASK 0x0002 #define MADERA_FLL_AO_FREERUN_SHIFT 1 #define MADERA_FLL_AO_FREERUN_WIDTH 1 #define MADERA_FLL_AO_ENA 0x0001 #define MADERA_FLL_AO_ENA_MASK 0x0001 #define MADERA_FLL_AO_ENA_SHIFT 0 #define MADERA_FLL_AO_ENA_WIDTH 1 /* (0x01D2) FLL_AO_Control_2 */ #define MADERA_FLL_AO_CTRL_UPD 0x8000 #define MADERA_FLL_AO_CTRL_UPD_MASK 0x8000 #define MADERA_FLL_AO_CTRL_UPD_SHIFT 15 #define MADERA_FLL_AO_CTRL_UPD_WIDTH 1 /* (0x01D6) FLL_AO_Control_6 */ #define MADERA_FLL_AO_REFCLK_SRC_MASK 0x000F #define MADERA_FLL_AO_REFCLK_SRC_SHIFT 0 #define MADERA_FLL_AO_REFCLK_SRC_WIDTH 4 /* (0x0200) Mic_Charge_Pump_1 */ #define MADERA_CPMIC_BYPASS 0x0002 #define MADERA_CPMIC_BYPASS_MASK 0x0002 #define MADERA_CPMIC_BYPASS_SHIFT 1 #define MADERA_CPMIC_BYPASS_WIDTH 1 #define MADERA_CPMIC_ENA 0x0001 #define MADERA_CPMIC_ENA_MASK 0x0001 #define MADERA_CPMIC_ENA_SHIFT 0 #define MADERA_CPMIC_ENA_WIDTH 1 /* (0x0210) LDO1_Control_1 */ #define MADERA_LDO1_VSEL_MASK 0x07E0 #define MADERA_LDO1_VSEL_SHIFT 5 #define MADERA_LDO1_VSEL_WIDTH 6 #define MADERA_LDO1_FAST 0x0010 #define MADERA_LDO1_FAST_MASK 0x0010 #define MADERA_LDO1_FAST_SHIFT 4 #define MADERA_LDO1_FAST_WIDTH 1 #define MADERA_LDO1_DISCH 0x0004 #define MADERA_LDO1_DISCH_MASK 0x0004 #define MADERA_LDO1_DISCH_SHIFT 2 #define MADERA_LDO1_DISCH_WIDTH 1 #define MADERA_LDO1_BYPASS 0x0002 #define MADERA_LDO1_BYPASS_MASK 0x0002 #define MADERA_LDO1_BYPASS_SHIFT 1 #define MADERA_LDO1_BYPASS_WIDTH 1 #define MADERA_LDO1_ENA 0x0001 #define MADERA_LDO1_ENA_MASK 0x0001 #define MADERA_LDO1_ENA_SHIFT 0 #define MADERA_LDO1_ENA_WIDTH 1 /* (0x0213) LDO2_Control_1 */ #define MADERA_LDO2_VSEL_MASK 0x07E0 #define MADERA_LDO2_VSEL_SHIFT 5 #define MADERA_LDO2_VSEL_WIDTH 6 #define MADERA_LDO2_FAST 0x0010 #define MADERA_LDO2_FAST_MASK 0x0010 #define MADERA_LDO2_FAST_SHIFT 4 #define MADERA_LDO2_FAST_WIDTH 1 #define MADERA_LDO2_DISCH 0x0004 #define MADERA_LDO2_DISCH_MASK 0x0004 #define MADERA_LDO2_DISCH_SHIFT 2 #define MADERA_LDO2_DISCH_WIDTH 1 #define MADERA_LDO2_BYPASS 0x0002 #define MADERA_LDO2_BYPASS_MASK 0x0002 #define MADERA_LDO2_BYPASS_SHIFT 1 #define MADERA_LDO2_BYPASS_WIDTH 1 #define MADERA_LDO2_ENA 0x0001 #define MADERA_LDO2_ENA_MASK 0x0001 #define MADERA_LDO2_ENA_SHIFT 0 #define MADERA_LDO2_ENA_WIDTH 1 /* (0x0218) Mic_Bias_Ctrl_1 */ #define MADERA_MICB1_EXT_CAP 0x8000 #define MADERA_MICB1_EXT_CAP_MASK 0x8000 #define MADERA_MICB1_EXT_CAP_SHIFT 15 #define MADERA_MICB1_EXT_CAP_WIDTH 1 #define MADERA_MICB1_LVL_MASK 0x01E0 #define MADERA_MICB1_LVL_SHIFT 5 #define MADERA_MICB1_LVL_WIDTH 4 #define MADERA_MICB1_ENA 0x0001 #define MADERA_MICB1_ENA_MASK 0x0001 #define MADERA_MICB1_ENA_SHIFT 0 #define MADERA_MICB1_ENA_WIDTH 1 /* (0x021C) Mic_Bias_Ctrl_5 */ #define MADERA_MICB1D_ENA 0x1000 #define MADERA_MICB1D_ENA_MASK 0x1000 #define MADERA_MICB1D_ENA_SHIFT 12 #define MADERA_MICB1D_ENA_WIDTH 1 #define MADERA_MICB1C_ENA 0x0100 #define MADERA_MICB1C_ENA_MASK 0x0100 #define MADERA_MICB1C_ENA_SHIFT 8 #define MADERA_MICB1C_ENA_WIDTH 1 #define MADERA_MICB1B_ENA 0x0010 #define MADERA_MICB1B_ENA_MASK 0x0010 #define MADERA_MICB1B_ENA_SHIFT 4 #define MADERA_MICB1B_ENA_WIDTH 1 #define MADERA_MICB1A_ENA 0x0001 #define MADERA_MICB1A_ENA_MASK 0x0001 #define MADERA_MICB1A_ENA_SHIFT 0 #define MADERA_MICB1A_ENA_WIDTH 1 /* (0x021E) Mic_Bias_Ctrl_6 */ #define MADERA_MICB2D_ENA 0x1000 #define MADERA_MICB2D_ENA_MASK 0x1000 #define MADERA_MICB2D_ENA_SHIFT 12 #define MADERA_MICB2D_ENA_WIDTH 1 #define MADERA_MICB2C_ENA 0x0100 #define MADERA_MICB2C_ENA_MASK 0x0100 #define MADERA_MICB2C_ENA_SHIFT 8 #define MADERA_MICB2C_ENA_WIDTH 1 #define MADERA_MICB2B_ENA 0x0010 #define MADERA_MICB2B_ENA_MASK 0x0010 #define MADERA_MICB2B_ENA_SHIFT 4 #define MADERA_MICB2B_ENA_WIDTH 1 #define MADERA_MICB2A_ENA 0x0001 #define MADERA_MICB2A_ENA_MASK 0x0001 #define MADERA_MICB2A_ENA_SHIFT 0 #define MADERA_MICB2A_ENA_WIDTH 1 /* (0x0225) - HP Ctrl 1L */ #define MADERA_RMV_SHRT_HP1L 0x4000 #define MADERA_RMV_SHRT_HP1L_MASK 0x4000 #define MADERA_RMV_SHRT_HP1L_SHIFT 14 #define MADERA_RMV_SHRT_HP1L_WIDTH 1 #define MADERA_HP1L_FLWR 0x0004 #define MADERA_HP1L_FLWR_MASK 0x0004 #define MADERA_HP1L_FLWR_SHIFT 2 #define MADERA_HP1L_FLWR_WIDTH 1 #define MADERA_HP1L_SHRTI 0x0002 #define MADERA_HP1L_SHRTI_MASK 0x0002 #define MADERA_HP1L_SHRTI_SHIFT 1 #define MADERA_HP1L_SHRTI_WIDTH 1 #define MADERA_HP1L_SHRTO 0x0001 #define MADERA_HP1L_SHRTO_MASK 0x0001 #define MADERA_HP1L_SHRTO_SHIFT 0 #define MADERA_HP1L_SHRTO_WIDTH 1 /* (0x0226) - HP Ctrl 1R */ #define MADERA_RMV_SHRT_HP1R 0x4000 #define MADERA_RMV_SHRT_HP1R_MASK 0x4000 #define MADERA_RMV_SHRT_HP1R_SHIFT 14 #define MADERA_RMV_SHRT_HP1R_WIDTH 1 #define MADERA_HP1R_FLWR 0x0004 #define MADERA_HP1R_FLWR_MASK 0x0004 #define MADERA_HP1R_FLWR_SHIFT 2 #define MADERA_HP1R_FLWR_WIDTH 1 #define MADERA_HP1R_SHRTI 0x0002 #define MADERA_HP1R_SHRTI_MASK 0x0002 #define MADERA_HP1R_SHRTI_SHIFT 1 #define MADERA_HP1R_SHRTI_WIDTH 1 #define MADERA_HP1R_SHRTO 0x0001 #define MADERA_HP1R_SHRTO_MASK 0x0001 #define MADERA_HP1R_SHRTO_SHIFT 0 #define MADERA_HP1R_SHRTO_WIDTH 1 /* (0x0293) Accessory_Detect_Mode_1 */ #define MADERA_ACCDET_SRC 0x2000 #define MADERA_ACCDET_SRC_MASK 0x2000 #define MADERA_ACCDET_SRC_SHIFT 13 #define MADERA_ACCDET_SRC_WIDTH 1 #define MADERA_ACCDET_POLARITY_INV_ENA 0x0080 #define MADERA_ACCDET_POLARITY_INV_ENA_MASK 0x0080 #define MADERA_ACCDET_POLARITY_INV_ENA_SHIFT 7 #define MADERA_ACCDET_POLARITY_INV_ENA_WIDTH 1 #define MADERA_ACCDET_MODE_MASK 0x0007 #define MADERA_ACCDET_MODE_SHIFT 0 #define MADERA_ACCDET_MODE_WIDTH 3 /* (0x0299) Headphone_Detect_0 */ #define MADERA_HPD_GND_SEL 0x0007 #define MADERA_HPD_GND_SEL_MASK 0x0007 #define MADERA_HPD_GND_SEL_SHIFT 0 #define MADERA_HPD_GND_SEL_WIDTH 3 #define MADERA_HPD_SENSE_SEL 0x00F0 #define MADERA_HPD_SENSE_SEL_MASK 0x00F0 #define MADERA_HPD_SENSE_SEL_SHIFT 4 #define MADERA_HPD_SENSE_SEL_WIDTH 4 #define MADERA_HPD_FRC_SEL 0x0F00 #define MADERA_HPD_FRC_SEL_MASK 0x0F00 #define MADERA_HPD_FRC_SEL_SHIFT 8 #define MADERA_HPD_FRC_SEL_WIDTH 4 #define MADERA_HPD_OUT_SEL 0x7000 #define MADERA_HPD_OUT_SEL_MASK 0x7000 #define MADERA_HPD_OUT_SEL_SHIFT 12 #define MADERA_HPD_OUT_SEL_WIDTH 3 #define MADERA_HPD_OVD_ENA_SEL 0x8000 #define MADERA_HPD_OVD_ENA_SEL_MASK 0x8000 #define MADERA_HPD_OVD_ENA_SEL_SHIFT 15 #define MADERA_HPD_OVD_ENA_SEL_WIDTH 1 /* (0x029B) Headphone_Detect_1 */ #define MADERA_HP_IMPEDANCE_RANGE_MASK 0x0600 #define MADERA_HP_IMPEDANCE_RANGE_SHIFT 9 #define MADERA_HP_IMPEDANCE_RANGE_WIDTH 2 #define MADERA_HP_STEP_SIZE 0x0100 #define MADERA_HP_STEP_SIZE_MASK 0x0100 #define MADERA_HP_STEP_SIZE_SHIFT 8 #define MADERA_HP_STEP_SIZE_WIDTH 1 #define MADERA_HP_CLK_DIV_MASK 0x0018 #define MADERA_HP_CLK_DIV_SHIFT 3 #define MADERA_HP_CLK_DIV_WIDTH 2 #define MADERA_HP_RATE_MASK 0x0006 #define MADERA_HP_RATE_SHIFT 1 #define MADERA_HP_RATE_WIDTH 2 #define MADERA_HP_POLL 0x0001 #define MADERA_HP_POLL_MASK 0x0001 #define MADERA_HP_POLL_SHIFT 0 #define MADERA_HP_POLL_WIDTH 1 /* (0x029C) Headphone_Detect_2 */ #define MADERA_HP_DONE_MASK 0x8000 #define MADERA_HP_DONE_SHIFT 15 #define MADERA_HP_DONE_WIDTH 1 #define MADERA_HP_LVL_MASK 0x7FFF #define MADERA_HP_LVL_SHIFT 0 #define MADERA_HP_LVL_WIDTH 15 /* (0x029D) Headphone_Detect_3 */ #define MADERA_HP_DACVAL_MASK 0x03FF #define MADERA_HP_DACVAL_SHIFT 0 #define MADERA_HP_DACVAL_WIDTH 10 /* (0x029F) - Headphone Detect 5 */ #define MADERA_HP_DACVAL_DOWN_MASK 0x03FF #define MADERA_HP_DACVAL_DOWN_SHIFT 0 #define MADERA_HP_DACVAL_DOWN_WIDTH 10 /* (0x02A2) Mic_Detect_1_Control_0 */ #define MADERA_MICD1_GND_MASK 0x0007 #define MADERA_MICD1_GND_SHIFT 0 #define MADERA_MICD1_GND_WIDTH 3 #define MADERA_MICD1_SENSE_MASK 0x00F0 #define MADERA_MICD1_SENSE_SHIFT 4 #define MADERA_MICD1_SENSE_WIDTH 4 #define MADERA_MICD1_ADC_MODE_MASK 0x8000 #define MADERA_MICD1_ADC_MODE_SHIFT 15 #define MADERA_MICD1_ADC_MODE_WIDTH 1 /* (0x02A3) Mic_Detect_1_Control_1 */ #define MADERA_MICD_BIAS_STARTTIME_MASK 0xF000 #define MADERA_MICD_BIAS_STARTTIME_SHIFT 12 #define MADERA_MICD_BIAS_STARTTIME_WIDTH 4 #define MADERA_MICD_RATE_MASK 0x0F00 #define MADERA_MICD_RATE_SHIFT 8 #define MADERA_MICD_RATE_WIDTH 4 #define MADERA_MICD_BIAS_SRC_MASK 0x00F0 #define MADERA_MICD_BIAS_SRC_SHIFT 4 #define MADERA_MICD_BIAS_SRC_WIDTH 4 #define MADERA_MICD_DBTIME 0x0002 #define MADERA_MICD_DBTIME_MASK 0x0002 #define MADERA_MICD_DBTIME_SHIFT 1 #define MADERA_MICD_DBTIME_WIDTH 1 #define MADERA_MICD_ENA 0x0001 #define MADERA_MICD_ENA_MASK 0x0001 #define MADERA_MICD_ENA_SHIFT 0 #define MADERA_MICD_ENA_WIDTH 1 /* (0x02A4) Mic_Detect_1_Control_2 */ #define MADERA_MICD_LVL_SEL_MASK 0x00FF #define MADERA_MICD_LVL_SEL_SHIFT 0 #define MADERA_MICD_LVL_SEL_WIDTH 8 /* (0x02A5) Mic_Detect_1_Control_3 */ #define MADERA_MICD_LVL_0 0x0004 #define MADERA_MICD_LVL_1 0x0008 #define MADERA_MICD_LVL_2 0x0010 #define MADERA_MICD_LVL_3 0x0020 #define MADERA_MICD_LVL_4 0x0040 #define MADERA_MICD_LVL_5 0x0080 #define MADERA_MICD_LVL_6 0x0100 #define MADERA_MICD_LVL_7 0x0200 #define MADERA_MICD_LVL_8 0x0400 #define MADERA_MICD_LVL_MASK 0x07FC #define MADERA_MICD_LVL_SHIFT 2 #define MADERA_MICD_LVL_WIDTH 9 #define MADERA_MICD_VALID 0x0002 #define MADERA_MICD_VALID_MASK 0x0002 #define MADERA_MICD_VALID_SHIFT 1 #define MADERA_MICD_VALID_WIDTH 1 #define MADERA_MICD_STS 0x0001 #define MADERA_MICD_STS_MASK 0x0001 #define MADERA_MICD_STS_SHIFT 0 #define MADERA_MICD_STS_WIDTH 1 /* (0x02AB) Mic_Detect_1_Control_4 */ #define MADERA_MICDET_ADCVAL_DIFF_MASK 0xFF00 #define MADERA_MICDET_ADCVAL_DIFF_SHIFT 8 #define MADERA_MICDET_ADCVAL_DIFF_WIDTH 8 #define MADERA_MICDET_ADCVAL_MASK 0x007F #define MADERA_MICDET_ADCVAL_SHIFT 0 #define MADERA_MICDET_ADCVAL_WIDTH 7 /* (0x02C6) Micd_Clamp_control */ #define MADERA_MICD_CLAMP_OVD 0x0010 #define MADERA_MICD_CLAMP_OVD_MASK 0x0010 #define MADERA_MICD_CLAMP_OVD_SHIFT 4 #define MADERA_MICD_CLAMP_OVD_WIDTH 1 #define MADERA_MICD_CLAMP_MODE_MASK 0x000F #define MADERA_MICD_CLAMP_MODE_SHIFT 0 #define MADERA_MICD_CLAMP_MODE_WIDTH 4 /* (0x02C8) GP_Switch_1 */ #define MADERA_SW2_MODE_MASK 0x000C #define MADERA_SW2_MODE_SHIFT 2 #define MADERA_SW2_MODE_WIDTH 2 #define MADERA_SW1_MODE_MASK 0x0003 #define MADERA_SW1_MODE_SHIFT 0 #define MADERA_SW1_MODE_WIDTH 2 /* (0x02D3) Jack_detect_analogue */ #define MADERA_JD2_ENA 0x0002 #define MADERA_JD2_ENA_MASK 0x0002 #define MADERA_JD2_ENA_SHIFT 1 #define MADERA_JD2_ENA_WIDTH 1 #define MADERA_JD1_ENA 0x0001 #define MADERA_JD1_ENA_MASK 0x0001 #define MADERA_JD1_ENA_SHIFT 0 #define MADERA_JD1_ENA_WIDTH 1 /* (0x0300) Input_Enables */ #define MADERA_IN6L_ENA 0x0800 #define MADERA_IN6L_ENA_MASK 0x0800 #define MADERA_IN6L_ENA_SHIFT 11 #define MADERA_IN6L_ENA_WIDTH 1 #define MADERA_IN6R_ENA 0x0400 #define MADERA_IN6R_ENA_MASK 0x0400 #define MADERA_IN6R_ENA_SHIFT 10 #define MADERA_IN6R_ENA_WIDTH 1 #define MADERA_IN5L_ENA 0x0200 #define MADERA_IN5L_ENA_MASK 0x0200 #define MADERA_IN5L_ENA_SHIFT 9 #define MADERA_IN5L_ENA_WIDTH 1 #define MADERA_IN5R_ENA 0x0100 #define MADERA_IN5R_ENA_MASK 0x0100 #define MADERA_IN5R_ENA_SHIFT 8 #define MADERA_IN5R_ENA_WIDTH 1 #define MADERA_IN4L_ENA 0x0080 #define MADERA_IN4L_ENA_MASK 0x0080 #define MADERA_IN4L_ENA_SHIFT 7 #define MADERA_IN4L_ENA_WIDTH 1 #define MADERA_IN4R_ENA 0x0040 #define MADERA_IN4R_ENA_MASK 0x0040 #define MADERA_IN4R_ENA_SHIFT 6 #define MADERA_IN4R_ENA_WIDTH 1 #define MADERA_IN3L_ENA 0x0020 #define MADERA_IN3L_ENA_MASK 0x0020 #define MADERA_IN3L_ENA_SHIFT 5 #define MADERA_IN3L_ENA_WIDTH 1 #define MADERA_IN3R_ENA 0x0010 #define MADERA_IN3R_ENA_MASK 0x0010 #define MADERA_IN3R_ENA_SHIFT 4 #define MADERA_IN3R_ENA_WIDTH 1 #define MADERA_IN2L_ENA 0x0008 #define MADERA_IN2L_ENA_MASK 0x0008 #define MADERA_IN2L_ENA_SHIFT 3 #define MADERA_IN2L_ENA_WIDTH 1 #define MADERA_IN2R_ENA 0x0004 #define MADERA_IN2R_ENA_MASK 0x0004 #define MADERA_IN2R_ENA_SHIFT 2 #define MADERA_IN2R_ENA_WIDTH 1 #define MADERA_IN1L_ENA 0x0002 #define MADERA_IN1L_ENA_MASK 0x0002 #define MADERA_IN1L_ENA_SHIFT 1 #define MADERA_IN1L_ENA_WIDTH 1 #define MADERA_IN1R_ENA 0x0001 #define MADERA_IN1R_ENA_MASK 0x0001 #define MADERA_IN1R_ENA_SHIFT 0 #define MADERA_IN1R_ENA_WIDTH 1 /* (0x0308) Input_Rate */ #define MADERA_IN_RATE_MASK 0xF800 #define MADERA_IN_RATE_SHIFT 11 #define MADERA_IN_RATE_WIDTH 5 #define MADERA_IN_MODE_MASK 0x0400 #define MADERA_IN_MODE_SHIFT 10 #define MADERA_IN_MODE_WIDTH 1 /* (0x0309) Input_Volume_Ramp */ #define MADERA_IN_VD_RAMP_MASK 0x0070 #define MADERA_IN_VD_RAMP_SHIFT 4 #define MADERA_IN_VD_RAMP_WIDTH 3 #define MADERA_IN_VI_RAMP_MASK 0x0007 #define MADERA_IN_VI_RAMP_SHIFT 0 #define MADERA_IN_VI_RAMP_WIDTH 3 /* (0x030C) HPF_Control */ #define MADERA_IN_HPF_CUT_MASK 0x0007 #define MADERA_IN_HPF_CUT_SHIFT 0 #define MADERA_IN_HPF_CUT_WIDTH 3 /* (0x0310) IN1L_Control */ #define MADERA_IN1L_HPF_MASK 0x8000 #define MADERA_IN1L_HPF_SHIFT 15 #define MADERA_IN1L_HPF_WIDTH 1 #define MADERA_IN1_DMIC_SUP_MASK 0x1800 #define MADERA_IN1_DMIC_SUP_SHIFT 11 #define MADERA_IN1_DMIC_SUP_WIDTH 2 #define MADERA_IN1_MODE_MASK 0x0400 #define MADERA_IN1_MODE_SHIFT 10 #define MADERA_IN1_MODE_WIDTH 1 #define MADERA_IN1L_PGA_VOL_MASK 0x00FE #define MADERA_IN1L_PGA_VOL_SHIFT 1 #define MADERA_IN1L_PGA_VOL_WIDTH 7 /* (0x0311) ADC_Digital_Volume_1L */ #define MADERA_IN1L_SRC_MASK 0x4000 #define MADERA_IN1L_SRC_SHIFT 14 #define MADERA_IN1L_SRC_WIDTH 1 #define MADERA_IN1L_SRC_SE_MASK 0x2000 #define MADERA_IN1L_SRC_SE_SHIFT 13 #define MADERA_IN1L_SRC_SE_WIDTH 1 #define MADERA_IN1L_LP_MODE 0x0800 #define MADERA_IN1L_LP_MODE_MASK 0x0800 #define MADERA_IN1L_LP_MODE_SHIFT 11 #define MADERA_IN1L_LP_MODE_WIDTH 1 #define MADERA_IN_VU 0x0200 #define MADERA_IN_VU_MASK 0x0200 #define MADERA_IN_VU_SHIFT 9 #define MADERA_IN_VU_WIDTH 1 #define MADERA_IN1L_MUTE 0x0100 #define MADERA_IN1L_MUTE_MASK 0x0100 #define MADERA_IN1L_MUTE_SHIFT 8 #define MADERA_IN1L_MUTE_WIDTH 1 #define MADERA_IN1L_DIG_VOL_MASK 0x00FF #define MADERA_IN1L_DIG_VOL_SHIFT 0 #define MADERA_IN1L_DIG_VOL_WIDTH 8 /* (0x0312) DMIC1L_Control */ #define MADERA_IN1_OSR_MASK 0x0700 #define MADERA_IN1_OSR_SHIFT 8 #define MADERA_IN1_OSR_WIDTH 3 /* (0x0313) IN1L_Rate_Control */ #define MADERA_IN1L_RATE_MASK 0xF800 #define MADERA_IN1L_RATE_SHIFT 11 #define MADERA_IN1L_RATE_WIDTH 5 /* (0x0314) IN1R_Control */ #define MADERA_IN1R_HPF_MASK 0x8000 #define MADERA_IN1R_HPF_SHIFT 15 #define MADERA_IN1R_HPF_WIDTH 1 #define MADERA_IN1R_PGA_VOL_MASK 0x00FE #define MADERA_IN1R_PGA_VOL_SHIFT 1 #define MADERA_IN1R_PGA_VOL_WIDTH 7 #define MADERA_IN1_DMICCLK_SRC_MASK 0x1800 #define MADERA_IN1_DMICCLK_SRC_SHIFT 11 #define MADERA_IN1_DMICCLK_SRC_WIDTH 2 /* (0x0315) ADC_Digital_Volume_1R */ #define MADERA_IN1R_SRC_MASK 0x4000 #define MADERA_IN1R_SRC_SHIFT 14 #define MADERA_IN1R_SRC_WIDTH 1 #define MADERA_IN1R_SRC_SE_MASK 0x2000 #define MADERA_IN1R_SRC_SE_SHIFT 13 #define MADERA_IN1R_SRC_SE_WIDTH 1 #define MADERA_IN1R_LP_MODE 0x0800 #define MADERA_IN1R_LP_MODE_MASK 0x0800 #define MADERA_IN1R_LP_MODE_SHIFT 11 #define MADERA_IN1R_LP_MODE_WIDTH 1 #define MADERA_IN1R_MUTE 0x0100 #define MADERA_IN1R_MUTE_MASK 0x0100 #define MADERA_IN1R_MUTE_SHIFT 8 #define MADERA_IN1R_MUTE_WIDTH 1 #define MADERA_IN1R_DIG_VOL_MASK 0x00FF #define MADERA_IN1R_DIG_VOL_SHIFT 0 #define MADERA_IN1R_DIG_VOL_WIDTH 8 /* (0x0317) IN1R_Rate_Control */ #define MADERA_IN1R_RATE_MASK 0xF800 #define MADERA_IN1R_RATE_SHIFT 11 #define MADERA_IN1R_RATE_WIDTH 5 /* (0x0318) IN2L_Control */ #define MADERA_IN2L_HPF_MASK 0x8000 #define MADERA_IN2L_HPF_SHIFT 15 #define MADERA_IN2L_HPF_WIDTH 1 #define MADERA_IN2_DMIC_SUP_MASK 0x1800 #define MADERA_IN2_DMIC_SUP_SHIFT 11 #define MADERA_IN2_DMIC_SUP_WIDTH 2 #define MADERA_IN2_MODE_MASK 0x0400 #define MADERA_IN2_MODE_SHIFT 10 #define MADERA_IN2_MODE_WIDTH 1 #define MADERA_IN2L_PGA_VOL_MASK 0x00FE #define MADERA_IN2L_PGA_VOL_SHIFT 1 #define MADERA_IN2L_PGA_VOL_WIDTH 7 /* (0x0319) ADC_Digital_Volume_2L */ #define MADERA_IN2L_SRC_MASK 0x4000 #define MADERA_IN2L_SRC_SHIFT 14 #define MADERA_IN2L_SRC_WIDTH 1 #define MADERA_IN2L_SRC_SE_MASK 0x2000 #define MADERA_IN2L_SRC_SE_SHIFT 13 #define MADERA_IN2L_SRC_SE_WIDTH 1 #define MADERA_IN2L_LP_MODE 0x0800 #define MADERA_IN2L_LP_MODE_MASK 0x0800 #define MADERA_IN2L_LP_MODE_SHIFT 11 #define MADERA_IN2L_LP_MODE_WIDTH 1 #define MADERA_IN2L_MUTE 0x0100 #define MADERA_IN2L_MUTE_MASK 0x0100 #define MADERA_IN2L_MUTE_SHIFT 8 #define MADERA_IN2L_MUTE_WIDTH 1 #define MADERA_IN2L_DIG_VOL_MASK 0x00FF #define MADERA_IN2L_DIG_VOL_SHIFT 0 #define MADERA_IN2L_DIG_VOL_WIDTH 8 /* (0x031A) DMIC2L_Control */ #define MADERA_IN2_OSR_MASK 0x0700 #define MADERA_IN2_OSR_SHIFT 8 #define MADERA_IN2_OSR_WIDTH 3 /* (0x031C) IN2R_Control */ #define MADERA_IN2R_HPF_MASK 0x8000 #define MADERA_IN2R_HPF_SHIFT 15 #define MADERA_IN2R_HPF_WIDTH 1 #define MADERA_IN2R_PGA_VOL_MASK 0x00FE #define MADERA_IN2R_PGA_VOL_SHIFT 1 #define MADERA_IN2R_PGA_VOL_WIDTH 7 #define MADERA_IN2_DMICCLK_SRC_MASK 0x1800 #define MADERA_IN2_DMICCLK_SRC_SHIFT 11 #define MADERA_IN2_DMICCLK_SRC_WIDTH 2 /* (0x031D) ADC_Digital_Volume_2R */ #define MADERA_IN2R_SRC_MASK 0x4000 #define MADERA_IN2R_SRC_SHIFT 14 #define MADERA_IN2R_SRC_WIDTH 1 #define MADERA_IN2R_SRC_SE_MASK 0x2000 #define MADERA_IN2R_SRC_SE_SHIFT 13 #define MADERA_IN2R_SRC_SE_WIDTH 1 #define MADERA_IN2R_LP_MODE 0x0800 #define MADERA_IN2R_LP_MODE_MASK 0x0800 #define MADERA_IN2R_LP_MODE_SHIFT 11 #define MADERA_IN2R_LP_MODE_WIDTH 1 #define MADERA_IN2R_MUTE 0x0100 #define MADERA_IN2R_MUTE_MASK 0x0100 #define MADERA_IN2R_MUTE_SHIFT 8 #define MADERA_IN2R_MUTE_WIDTH 1 #define MADERA_IN2R_DIG_VOL_MASK 0x00FF #define MADERA_IN2R_DIG_VOL_SHIFT 0 #define MADERA_IN2R_DIG_VOL_WIDTH 8 /* (0x0320) IN3L_Control */ #define MADERA_IN3L_HPF_MASK 0x8000 #define MADERA_IN3L_HPF_SHIFT 15 #define MADERA_IN3L_HPF_WIDTH 1 #define MADERA_IN3_DMIC_SUP_MASK 0x1800 #define MADERA_IN3_DMIC_SUP_SHIFT 11 #define MADERA_IN3_DMIC_SUP_WIDTH 2 #define MADERA_IN3_MODE_MASK 0x0400 #define MADERA_IN3_MODE_SHIFT 10 #define MADERA_IN3_MODE_WIDTH 1 #define MADERA_IN3L_PGA_VOL_MASK 0x00FE #define MADERA_IN3L_PGA_VOL_SHIFT 1 #define MADERA_IN3L_PGA_VOL_WIDTH 7 /* (0x0321) ADC_Digital_Volume_3L */ #define MADERA_IN3L_MUTE 0x0100 #define MADERA_IN3L_MUTE_MASK 0x0100 #define MADERA_IN3L_MUTE_SHIFT 8 #define MADERA_IN3L_MUTE_WIDTH 1 #define MADERA_IN3L_DIG_VOL_MASK 0x00FF #define MADERA_IN3L_DIG_VOL_SHIFT 0 #define MADERA_IN3L_DIG_VOL_WIDTH 8 /* (0x0322) DMIC3L_Control */ #define MADERA_IN3_OSR_MASK 0x0700 #define MADERA_IN3_OSR_SHIFT 8 #define MADERA_IN3_OSR_WIDTH 3 /* (0x0324) IN3R_Control */ #define MADERA_IN3R_HPF_MASK 0x8000 #define MADERA_IN3R_HPF_SHIFT 15 #define MADERA_IN3R_HPF_WIDTH 1 #define MADERA_IN3R_PGA_VOL_MASK 0x00FE #define MADERA_IN3R_PGA_VOL_SHIFT 1 #define MADERA_IN3R_PGA_VOL_WIDTH 7 #define MADERA_IN3_DMICCLK_SRC_MASK 0x1800 #define MADERA_IN3_DMICCLK_SRC_SHIFT 11 #define MADERA_IN3_DMICCLK_SRC_WIDTH 2 /* (0x0325) ADC_Digital_Volume_3R */ #define MADERA_IN3R_MUTE 0x0100 #define MADERA_IN3R_MUTE_MASK 0x0100 #define MADERA_IN3R_MUTE_SHIFT 8 #define MADERA_IN3R_MUTE_WIDTH 1 #define MADERA_IN3R_DIG_VOL_MASK 0x00FF #define MADERA_IN3R_DIG_VOL_SHIFT 0 #define MADERA_IN3R_DIG_VOL_WIDTH 8 /* (0x0328) IN4L_Control */ #define MADERA_IN4L_HPF_MASK 0x8000 #define MADERA_IN4L_HPF_SHIFT 15 #define MADERA_IN4L_HPF_WIDTH 1 #define MADERA_IN4_DMIC_SUP_MASK 0x1800 #define MADERA_IN4_DMIC_SUP_SHIFT 11 #define MADERA_IN4_DMIC_SUP_WIDTH 2 /* (0x0329) ADC_Digital_Volume_4L */ #define MADERA_IN4L_MUTE 0x0100 #define MADERA_IN4L_MUTE_MASK 0x0100 #define MADERA_IN4L_MUTE_SHIFT 8 #define MADERA_IN4L_MUTE_WIDTH 1 #define MADERA_IN4L_DIG_VOL_MASK 0x00FF #define MADERA_IN4L_DIG_VOL_SHIFT 0 #define MADERA_IN4L_DIG_VOL_WIDTH 8 /* (0x032A) DMIC4L_Control */ #define MADERA_IN4_OSR_MASK 0x0700 #define MADERA_IN4_OSR_SHIFT 8 #define MADERA_IN4_OSR_WIDTH 3 /* (0x032C) IN4R_Control */ #define MADERA_IN4R_HPF_MASK 0x8000 #define MADERA_IN4R_HPF_SHIFT 15 #define MADERA_IN4R_HPF_WIDTH 1 #define MADERA_IN4_DMICCLK_SRC_MASK 0x1800 #define MADERA_IN4_DMICCLK_SRC_SHIFT 11 #define MADERA_IN4_DMICCLK_SRC_WIDTH 2 /* (0x032D) ADC_Digital_Volume_4R */ #define MADERA_IN4R_MUTE 0x0100 #define MADERA_IN4R_MUTE_MASK 0x0100 #define MADERA_IN4R_MUTE_SHIFT 8 #define MADERA_IN4R_MUTE_WIDTH 1 #define MADERA_IN4R_DIG_VOL_MASK 0x00FF #define MADERA_IN4R_DIG_VOL_SHIFT 0 #define MADERA_IN4R_DIG_VOL_WIDTH 8 /* (0x0330) IN5L_Control */ #define MADERA_IN5L_HPF_MASK 0x8000 #define MADERA_IN5L_HPF_SHIFT 15 #define MADERA_IN5L_HPF_WIDTH 1 #define MADERA_IN5_DMIC_SUP_MASK 0x1800 #define MADERA_IN5_DMIC_SUP_SHIFT 11 #define MADERA_IN5_DMIC_SUP_WIDTH 2 /* (0x0331) ADC_Digital_Volume_5L */ #define MADERA_IN5L_MUTE 0x0100 #define MADERA_IN5L_MUTE_MASK 0x0100 #define MADERA_IN5L_MUTE_SHIFT 8 #define MADERA_IN5L_MUTE_WIDTH 1 #define MADERA_IN5L_DIG_VOL_MASK 0x00FF #define MADERA_IN5L_DIG_VOL_SHIFT 0 #define MADERA_IN5L_DIG_VOL_WIDTH 8 /* (0x0332) DMIC5L_Control */ #define MADERA_IN5_OSR_MASK 0x0700 #define MADERA_IN5_OSR_SHIFT 8 #define MADERA_IN5_OSR_WIDTH 3 /* (0x0334) IN5R_Control */ #define MADERA_IN5R_HPF_MASK 0x8000 #define MADERA_IN5R_HPF_SHIFT 15 #define MADERA_IN5R_HPF_WIDTH 1 #define MADERA_IN5_DMICCLK_SRC_MASK 0x1800 #define MADERA_IN5_DMICCLK_SRC_SHIFT 11 #define MADERA_IN5_DMICCLK_SRC_WIDTH 2 /* (0x0335) ADC_Digital_Volume_5R */ #define MADERA_IN5R_MUTE 0x0100 #define MADERA_IN5R_MUTE_MASK 0x0100 #define MADERA_IN5R_MUTE_SHIFT 8 #define MADERA_IN5R_MUTE_WIDTH 1 #define MADERA_IN5R_DIG_VOL_MASK 0x00FF #define MADERA_IN5R_DIG_VOL_SHIFT 0 #define MADERA_IN5R_DIG_VOL_WIDTH 8 /* (0x0338) IN6L_Control */ #define MADERA_IN6L_HPF_MASK 0x8000 #define MADERA_IN6L_HPF_SHIFT 15 #define MADERA_IN6L_HPF_WIDTH 1 #define MADERA_IN6_DMIC_SUP_MASK 0x1800 #define MADERA_IN6_DMIC_SUP_SHIFT 11 #define MADERA_IN6_DMIC_SUP_WIDTH 2 /* (0x0339) ADC_Digital_Volume_6L */ #define MADERA_IN6L_MUTE 0x0100 #define MADERA_IN6L_MUTE_MASK 0x0100 #define MADERA_IN6L_MUTE_SHIFT 8 #define MADERA_IN6L_MUTE_WIDTH 1 #define MADERA_IN6L_DIG_VOL_MASK 0x00FF #define MADERA_IN6L_DIG_VOL_SHIFT 0 #define MADERA_IN6L_DIG_VOL_WIDTH 8 /* (0x033A) DMIC6L_Control */ #define MADERA_IN6_OSR_MASK 0x0700 #define MADERA_IN6_OSR_SHIFT 8 #define MADERA_IN6_OSR_WIDTH 3 /* (0x033C) IN6R_Control */ #define MADERA_IN6R_HPF_MASK 0x8000 #define MADERA_IN6R_HPF_SHIFT 15 #define MADERA_IN6R_HPF_WIDTH 1 /* (0x033D) ADC_Digital_Volume_6R */ #define MADERA_IN6R_MUTE 0x0100 #define MADERA_IN6R_MUTE_MASK 0x0100 #define MADERA_IN6R_MUTE_SHIFT 8 #define MADERA_IN6R_MUTE_WIDTH 1 #define MADERA_IN6R_DIG_VOL_MASK 0x00FF #define MADERA_IN6R_DIG_VOL_SHIFT 0 #define MADERA_IN6R_DIG_VOL_WIDTH 8 /* (0x033E) DMIC6R_Control */ #define MADERA_IN6_DMICCLK_SRC_MASK 0x1800 #define MADERA_IN6_DMICCLK_SRC_SHIFT 11 #define MADERA_IN6_DMICCLK_SRC_WIDTH 2 /* (0x0400) Output_Enables_1 */ #define MADERA_EP_SEL 0x8000 #define MADERA_EP_SEL_MASK 0x8000 #define MADERA_EP_SEL_SHIFT 15 #define MADERA_EP_SEL_WIDTH 1 #define MADERA_OUT6L_ENA 0x0800 #define MADERA_OUT6L_ENA_MASK 0x0800 #define MADERA_OUT6L_ENA_SHIFT 11 #define MADERA_OUT6L_ENA_WIDTH 1 #define MADERA_OUT6R_ENA 0x0400 #define MADERA_OUT6R_ENA_MASK 0x0400 #define MADERA_OUT6R_ENA_SHIFT 10 #define MADERA_OUT6R_ENA_WIDTH 1 #define MADERA_OUT5L_ENA 0x0200 #define MADERA_OUT5L_ENA_MASK 0x0200 #define MADERA_OUT5L_ENA_SHIFT 9 #define MADERA_OUT5L_ENA_WIDTH 1 #define MADERA_OUT5R_ENA 0x0100 #define MADERA_OUT5R_ENA_MASK 0x0100 #define MADERA_OUT5R_ENA_SHIFT 8 #define MADERA_OUT5R_ENA_WIDTH 1 #define MADERA_OUT4L_ENA 0x0080 #define MADERA_OUT4L_ENA_MASK 0x0080 #define MADERA_OUT4L_ENA_SHIFT 7 #define MADERA_OUT4L_ENA_WIDTH 1 #define MADERA_OUT4R_ENA 0x0040 #define MADERA_OUT4R_ENA_MASK 0x0040 #define MADERA_OUT4R_ENA_SHIFT 6 #define MADERA_OUT4R_ENA_WIDTH 1 #define MADERA_OUT3L_ENA 0x0020 #define MADERA_OUT3L_ENA_MASK 0x0020 #define MADERA_OUT3L_ENA_SHIFT 5 #define MADERA_OUT3L_ENA_WIDTH 1 #define MADERA_OUT3R_ENA 0x0010 #define MADERA_OUT3R_ENA_MASK 0x0010 #define MADERA_OUT3R_ENA_SHIFT 4 #define MADERA_OUT3R_ENA_WIDTH 1 #define MADERA_OUT2L_ENA 0x0008 #define MADERA_OUT2L_ENA_MASK 0x0008 #define MADERA_OUT2L_ENA_SHIFT 3 #define MADERA_OUT2L_ENA_WIDTH 1 #define MADERA_OUT2R_ENA 0x0004 #define MADERA_OUT2R_ENA_MASK 0x0004 #define MADERA_OUT2R_ENA_SHIFT 2 #define MADERA_OUT2R_ENA_WIDTH 1 #define MADERA_OUT1L_ENA 0x0002 #define MADERA_OUT1L_ENA_MASK 0x0002 #define MADERA_OUT1L_ENA_SHIFT 1 #define MADERA_OUT1L_ENA_WIDTH 1 #define MADERA_OUT1R_ENA 0x0001 #define MADERA_OUT1R_ENA_MASK 0x0001 #define MADERA_OUT1R_ENA_SHIFT 0 #define MADERA_OUT1R_ENA_WIDTH 1 /* (0x0408) Output_Rate_1 */ #define MADERA_CP_DAC_MODE_MASK 0x0040 #define MADERA_CP_DAC_MODE_SHIFT 6 #define MADERA_CP_DAC_MODE_WIDTH 1 #define MADERA_OUT_EXT_CLK_DIV_MASK 0x0030 #define MADERA_OUT_EXT_CLK_DIV_SHIFT 4 #define MADERA_OUT_EXT_CLK_DIV_WIDTH 2 #define MADERA_OUT_CLK_SRC_MASK 0x0007 #define MADERA_OUT_CLK_SRC_SHIFT 0 #define MADERA_OUT_CLK_SRC_WIDTH 3 /* (0x0409) Output_Volume_Ramp */ #define MADERA_OUT_VD_RAMP_MASK 0x0070 #define MADERA_OUT_VD_RAMP_SHIFT 4 #define MADERA_OUT_VD_RAMP_WIDTH 3 #define MADERA_OUT_VI_RAMP_MASK 0x0007 #define MADERA_OUT_VI_RAMP_SHIFT 0 #define MADERA_OUT_VI_RAMP_WIDTH 3 /* (0x0410) Output_Path_Config_1L */ #define MADERA_OUT1_MONO 0x1000 #define MADERA_OUT1_MONO_MASK 0x1000 #define MADERA_OUT1_MONO_SHIFT 12 #define MADERA_OUT1_MONO_WIDTH 1 #define MADERA_OUT1L_ANC_SRC_MASK 0x0C00 #define MADERA_OUT1L_ANC_SRC_SHIFT 10 #define MADERA_OUT1L_ANC_SRC_WIDTH 2 /* (0x0411) DAC_Digital_Volume_1L */ #define MADERA_OUT1L_VU 0x0200 #define MADERA_OUT1L_VU_MASK 0x0200 #define MADERA_OUT1L_VU_SHIFT 9 #define MADERA_OUT1L_VU_WIDTH 1 #define MADERA_OUT1L_MUTE 0x0100 #define MADERA_OUT1L_MUTE_MASK 0x0100 #define MADERA_OUT1L_MUTE_SHIFT 8 #define MADERA_OUT1L_MUTE_WIDTH 1 #define MADERA_OUT1L_VOL_MASK 0x00FF #define MADERA_OUT1L_VOL_SHIFT 0 #define MADERA_OUT1L_VOL_WIDTH 8 /* (0x0412) Output_Path_Config_1 */ #define MADERA_HP1_GND_SEL_MASK 0x0007 #define MADERA_HP1_GND_SEL_SHIFT 0 #define MADERA_HP1_GND_SEL_WIDTH 3 /* (0x0414) Output_Path_Config_1R */ #define MADERA_OUT1R_ANC_SRC_MASK 0x0C00 #define MADERA_OUT1R_ANC_SRC_SHIFT 10 #define MADERA_OUT1R_ANC_SRC_WIDTH 2 /* (0x0415) DAC_Digital_Volume_1R */ #define MADERA_OUT1R_MUTE 0x0100 #define MADERA_OUT1R_MUTE_MASK 0x0100 #define MADERA_OUT1R_MUTE_SHIFT 8 #define MADERA_OUT1R_MUTE_WIDTH 1 #define MADERA_OUT1R_VOL_MASK 0x00FF #define MADERA_OUT1R_VOL_SHIFT 0 #define MADERA_OUT1R_VOL_WIDTH 8 /* (0x0418) Output_Path_Config_2L */ #define MADERA_OUT2L_ANC_SRC_MASK 0x0C00 #define MADERA_OUT2L_ANC_SRC_SHIFT 10 #define MADERA_OUT2L_ANC_SRC_WIDTH 2 /* (0x0419) DAC_Digital_Volume_2L */ #define MADERA_OUT2L_MUTE 0x0100 #define MADERA_OUT2L_MUTE_MASK 0x0100 #define MADERA_OUT2L_MUTE_SHIFT 8 #define MADERA_OUT2L_MUTE_WIDTH 1 #define MADERA_OUT2L_VOL_MASK 0x00FF #define MADERA_OUT2L_VOL_SHIFT 0 #define MADERA_OUT2L_VOL_WIDTH 8 /* (0x041A) Output_Path_Config_2 */ #define MADERA_HP2_GND_SEL_MASK 0x0007 #define MADERA_HP2_GND_SEL_SHIFT 0 #define MADERA_HP2_GND_SEL_WIDTH 3 /* (0x041C) Output_Path_Config_2R */ #define MADERA_OUT2R_ANC_SRC_MASK 0x0C00 #define MADERA_OUT2R_ANC_SRC_SHIFT 10 #define MADERA_OUT2R_ANC_SRC_WIDTH 2 /* (0x041D) DAC_Digital_Volume_2R */ #define MADERA_OUT2R_MUTE 0x0100 #define MADERA_OUT2R_MUTE_MASK 0x0100 #define MADERA_OUT2R_MUTE_SHIFT 8 #define MADERA_OUT2R_MUTE_WIDTH 1 #define MADERA_OUT2R_VOL_MASK 0x00FF #define MADERA_OUT2R_VOL_SHIFT 0 #define MADERA_OUT2R_VOL_WIDTH 8 /* (0x0420) Output_Path_Config_3L */ #define MADERA_OUT3L_ANC_SRC_MASK 0x0C00 #define MADERA_OUT3L_ANC_SRC_SHIFT 10 #define MADERA_OUT3L_ANC_SRC_WIDTH 2 /* (0x0421) DAC_Digital_Volume_3L */ #define MADERA_OUT3L_MUTE 0x0100 #define MADERA_OUT3L_MUTE_MASK 0x0100 #define MADERA_OUT3L_MUTE_SHIFT 8 #define MADERA_OUT3L_MUTE_WIDTH 1 #define MADERA_OUT3L_VOL_MASK 0x00FF #define MADERA_OUT3L_VOL_SHIFT 0 #define MADERA_OUT3L_VOL_WIDTH 8 /* (0x0424) Output_Path_Config_3R */ #define MADERA_OUT3R_ANC_SRC_MASK 0x0C00 #define MADERA_OUT3R_ANC_SRC_SHIFT 10 #define MADERA_OUT3R_ANC_SRC_WIDTH 2 /* (0x0425) DAC_Digital_Volume_3R */ #define MADERA_OUT3R_MUTE 0x0100 #define MADERA_OUT3R_MUTE_MASK 0x0100 #define MADERA_OUT3R_MUTE_SHIFT 8 #define MADERA_OUT3R_MUTE_WIDTH 1 #define MADERA_OUT3R_VOL_MASK 0x00FF #define MADERA_OUT3R_VOL_SHIFT 0 #define MADERA_OUT3R_VOL_WIDTH 8 /* (0x0428) Output_Path_Config_4L */ #define MADERA_OUT4L_ANC_SRC_MASK 0x0C00 #define MADERA_OUT4L_ANC_SRC_SHIFT 10 #define MADERA_OUT4L_ANC_SRC_WIDTH 2 /* (0x0429) DAC_Digital_Volume_4L */ #define MADERA_OUT4L_MUTE 0x0100 #define MADERA_OUT4L_MUTE_MASK 0x0100 #define MADERA_OUT4L_MUTE_SHIFT 8 #define MADERA_OUT4L_MUTE_WIDTH 1 #define MADERA_OUT4L_VOL_MASK 0x00FF #define MADERA_OUT4L_VOL_SHIFT 0 #define MADERA_OUT4L_VOL_WIDTH 8 /* (0x042C) Output_Path_Config_4R */ #define MADERA_OUT4R_ANC_SRC_MASK 0x0C00 #define MADERA_OUT4R_ANC_SRC_SHIFT 10 #define MADERA_OUT4R_ANC_SRC_WIDTH 2 /* (0x042D) DAC_Digital_Volume_4R */ #define MADERA_OUT4R_MUTE 0x0100 #define MADERA_OUT4R_MUTE_MASK 0x0100 #define MADERA_OUT4R_MUTE_SHIFT 8 #define MADERA_OUT4R_MUTE_WIDTH 1 #define MADERA_OUT4R_VOL_MASK 0x00FF #define MADERA_OUT4R_VOL_SHIFT 0 #define MADERA_OUT4R_VOL_WIDTH 8 /* (0x0430) Output_Path_Config_5L */ #define MADERA_OUT5_OSR 0x2000 #define MADERA_OUT5_OSR_MASK 0x2000 #define MADERA_OUT5_OSR_SHIFT 13 #define MADERA_OUT5_OSR_WIDTH 1 #define MADERA_OUT5L_ANC_SRC_MASK 0x0C00 #define MADERA_OUT5L_ANC_SRC_SHIFT 10 #define MADERA_OUT5L_ANC_SRC_WIDTH 2 /* (0x0431) DAC_Digital_Volume_5L */ #define MADERA_OUT5L_MUTE 0x0100 #define MADERA_OUT5L_MUTE_MASK 0x0100 #define MADERA_OUT5L_MUTE_SHIFT 8 #define MADERA_OUT5L_MUTE_WIDTH 1 #define MADERA_OUT5L_VOL_MASK 0x00FF #define MADERA_OUT5L_VOL_SHIFT 0 #define MADERA_OUT5L_VOL_WIDTH 8 /* (0x0434) Output_Path_Config_5R */ #define MADERA_OUT5R_ANC_SRC_MASK 0x0C00 #define MADERA_OUT5R_ANC_SRC_SHIFT 10 #define MADERA_OUT5R_ANC_SRC_WIDTH 2 /* (0x0435) DAC_Digital_Volume_5R */ #define MADERA_OUT5R_MUTE 0x0100 #define MADERA_OUT5R_MUTE_MASK 0x0100 #define MADERA_OUT5R_MUTE_SHIFT 8 #define MADERA_OUT5R_MUTE_WIDTH 1 #define MADERA_OUT5R_VOL_MASK 0x00FF #define MADERA_OUT5R_VOL_SHIFT 0 #define MADERA_OUT5R_VOL_WIDTH 8 /* (0x0438) Output_Path_Config_6L */ #define MADERA_OUT6_OSR 0x2000 #define MADERA_OUT6_OSR_MASK 0x2000 #define MADERA_OUT6_OSR_SHIFT 13 #define MADERA_OUT6_OSR_WIDTH 1 #define MADERA_OUT6L_ANC_SRC_MASK 0x0C00 #define MADERA_OUT6L_ANC_SRC_SHIFT 10 #define MADERA_OUT6L_ANC_SRC_WIDTH 2 /* (0x0439) DAC_Digital_Volume_6L */ #define MADERA_OUT6L_MUTE 0x0100 #define MADERA_OUT6L_MUTE_MASK 0x0100 #define MADERA_OUT6L_MUTE_SHIFT 8 #define MADERA_OUT6L_MUTE_WIDTH 1 #define MADERA_OUT6L_VOL_MASK 0x00FF #define MADERA_OUT6L_VOL_SHIFT 0 #define MADERA_OUT6L_VOL_WIDTH 8 /* (0x043C) Output_Path_Config_6R */ #define MADERA_OUT6R_ANC_SRC_MASK 0x0C00 #define MADERA_OUT6R_ANC_SRC_SHIFT 10 #define MADERA_OUT6R_ANC_SRC_WIDTH 2 /* (0x043D) DAC_Digital_Volume_6R */ #define MADERA_OUT6R_MUTE 0x0100 #define MADERA_OUT6R_MUTE_MASK 0x0100 #define MADERA_OUT6R_MUTE_SHIFT 8 #define MADERA_OUT6R_MUTE_WIDTH 1 #define MADERA_OUT6R_VOL_MASK 0x00FF #define MADERA_OUT6R_VOL_SHIFT 0 #define MADERA_OUT6R_VOL_WIDTH 8 /* (0x0450) - DAC AEC Control 1 */ #define MADERA_AEC1_LOOPBACK_SRC_MASK 0x003C #define MADERA_AEC1_LOOPBACK_SRC_SHIFT 2 #define MADERA_AEC1_LOOPBACK_SRC_WIDTH 4 #define MADERA_AEC1_ENA_STS 0x0002 #define MADERA_AEC1_ENA_STS_MASK 0x0002 #define MADERA_AEC1_ENA_STS_SHIFT 1 #define MADERA_AEC1_ENA_STS_WIDTH 1 #define MADERA_AEC1_LOOPBACK_ENA 0x0001 #define MADERA_AEC1_LOOPBACK_ENA_MASK 0x0001 #define MADERA_AEC1_LOOPBACK_ENA_SHIFT 0 #define MADERA_AEC1_LOOPBACK_ENA_WIDTH 1 /* (0x0451) DAC_AEC_Control_2 */ #define MADERA_AEC2_LOOPBACK_SRC_MASK 0x003C #define MADERA_AEC2_LOOPBACK_SRC_SHIFT 2 #define MADERA_AEC2_LOOPBACK_SRC_WIDTH 4 #define MADERA_AEC2_ENA_STS 0x0002 #define MADERA_AEC2_ENA_STS_MASK 0x0002 #define MADERA_AEC2_ENA_STS_SHIFT 1 #define MADERA_AEC2_ENA_STS_WIDTH 1 #define MADERA_AEC2_LOOPBACK_ENA 0x0001 #define MADERA_AEC2_LOOPBACK_ENA_MASK 0x0001 #define MADERA_AEC2_LOOPBACK_ENA_SHIFT 0 #define MADERA_AEC2_LOOPBACK_ENA_WIDTH 1 /* (0x0458) Noise_Gate_Control */ #define MADERA_NGATE_HOLD_MASK 0x0030 #define MADERA_NGATE_HOLD_SHIFT 4 #define MADERA_NGATE_HOLD_WIDTH 2 #define MADERA_NGATE_THR_MASK 0x000E #define MADERA_NGATE_THR_SHIFT 1 #define MADERA_NGATE_THR_WIDTH 3 #define MADERA_NGATE_ENA 0x0001 #define MADERA_NGATE_ENA_MASK 0x0001 #define MADERA_NGATE_ENA_SHIFT 0 #define MADERA_NGATE_ENA_WIDTH 1 /* (0x0490) PDM_SPK1_CTRL_1 */ #define MADERA_SPK1R_MUTE 0x2000 #define MADERA_SPK1R_MUTE_MASK 0x2000 #define MADERA_SPK1R_MUTE_SHIFT 13 #define MADERA_SPK1R_MUTE_WIDTH 1 #define MADERA_SPK1L_MUTE 0x1000 #define MADERA_SPK1L_MUTE_MASK 0x1000 #define MADERA_SPK1L_MUTE_SHIFT 12 #define MADERA_SPK1L_MUTE_WIDTH 1 #define MADERA_SPK1_MUTE_ENDIAN 0x0100 #define MADERA_SPK1_MUTE_ENDIAN_MASK 0x0100 #define MADERA_SPK1_MUTE_ENDIAN_SHIFT 8 #define MADERA_SPK1_MUTE_ENDIAN_WIDTH 1 #define MADERA_SPK1_MUTE_SEQ1_MASK 0x00FF #define MADERA_SPK1_MUTE_SEQ1_SHIFT 0 #define MADERA_SPK1_MUTE_SEQ1_WIDTH 8 /* (0x0491) PDM_SPK1_CTRL_2 */ #define MADERA_SPK1_FMT 0x0001 #define MADERA_SPK1_FMT_MASK 0x0001 #define MADERA_SPK1_FMT_SHIFT 0 #define MADERA_SPK1_FMT_WIDTH 1 /* (0x0492) PDM_SPK2_CTRL_1 */ #define MADERA_SPK2R_MUTE 0x2000 #define MADERA_SPK2R_MUTE_MASK 0x2000 #define MADERA_SPK2R_MUTE_SHIFT 13 #define MADERA_SPK2R_MUTE_WIDTH 1 #define MADERA_SPK2L_MUTE 0x1000 #define MADERA_SPK2L_MUTE_MASK 0x1000 #define MADERA_SPK2L_MUTE_SHIFT 12 #define MADERA_SPK2L_MUTE_WIDTH 1 /* (0x04A0) - HP1 Short Circuit Ctrl */ #define MADERA_HP1_SC_ENA 0x1000 #define MADERA_HP1_SC_ENA_MASK 0x1000 #define MADERA_HP1_SC_ENA_SHIFT 12 #define MADERA_HP1_SC_ENA_WIDTH 1 /* (0x04A1) - HP2 Short Circuit Ctrl */ #define MADERA_HP2_SC_ENA 0x1000 #define MADERA_HP2_SC_ENA_MASK 0x1000 #define MADERA_HP2_SC_ENA_SHIFT 12 #define MADERA_HP2_SC_ENA_WIDTH 1 /* (0x04A2) - HP3 Short Circuit Ctrl */ #define MADERA_HP3_SC_ENA 0x1000 #define MADERA_HP3_SC_ENA_MASK 0x1000 #define MADERA_HP3_SC_ENA_SHIFT 12 #define MADERA_HP3_SC_ENA_WIDTH 1 /* (0x04A8) - HP_Test_Ctrl_5 */ #define MADERA_HP1L_ONEFLT 0x0100 #define MADERA_HP1L_ONEFLT_MASK 0x0100 #define MADERA_HP1L_ONEFLT_SHIFT 8 #define MADERA_HP1L_ONEFLT_WIDTH 1 /* (0x04A9) - HP_Test_Ctrl_6 */ #define MADERA_HP1R_ONEFLT 0x0100 #define MADERA_HP1R_ONEFLT_MASK 0x0100 #define MADERA_HP1R_ONEFLT_SHIFT 8 #define MADERA_HP1R_ONEFLT_WIDTH 1 /* (0x0500) AIF1_BCLK_Ctrl */ #define MADERA_AIF1_BCLK_INV 0x0080 #define MADERA_AIF1_BCLK_INV_MASK 0x0080 #define MADERA_AIF1_BCLK_INV_SHIFT 7 #define MADERA_AIF1_BCLK_INV_WIDTH 1 #define MADERA_AIF1_BCLK_MSTR 0x0020 #define MADERA_AIF1_BCLK_MSTR_MASK 0x0020 #define MADERA_AIF1_BCLK_MSTR_SHIFT 5 #define MADERA_AIF1_BCLK_MSTR_WIDTH 1 #define MADERA_AIF1_BCLK_FREQ_MASK 0x001F #define MADERA_AIF1_BCLK_FREQ_SHIFT 0 #define MADERA_AIF1_BCLK_FREQ_WIDTH 5 /* (0x0501) AIF1_Tx_Pin_Ctrl */ #define MADERA_AIF1TX_LRCLK_SRC 0x0008 #define MADERA_AIF1TX_LRCLK_SRC_MASK 0x0008 #define MADERA_AIF1TX_LRCLK_SRC_SHIFT 3 #define MADERA_AIF1TX_LRCLK_SRC_WIDTH 1 #define MADERA_AIF1TX_LRCLK_INV 0x0004 #define MADERA_AIF1TX_LRCLK_INV_MASK 0x0004 #define MADERA_AIF1TX_LRCLK_INV_SHIFT 2 #define MADERA_AIF1TX_LRCLK_INV_WIDTH 1 #define MADERA_AIF1TX_LRCLK_MSTR 0x0001 #define MADERA_AIF1TX_LRCLK_MSTR_MASK 0x0001 #define MADERA_AIF1TX_LRCLK_MSTR_SHIFT 0 #define MADERA_AIF1TX_LRCLK_MSTR_WIDTH 1 /* (0x0502) AIF1_Rx_Pin_Ctrl */ #define MADERA_AIF1RX_LRCLK_INV 0x0004 #define MADERA_AIF1RX_LRCLK_INV_MASK 0x0004 #define MADERA_AIF1RX_LRCLK_INV_SHIFT 2 #define MADERA_AIF1RX_LRCLK_INV_WIDTH 1 #define MADERA_AIF1RX_LRCLK_FRC 0x0002 #define MADERA_AIF1RX_LRCLK_FRC_MASK 0x0002 #define MADERA_AIF1RX_LRCLK_FRC_SHIFT 1 #define MADERA_AIF1RX_LRCLK_FRC_WIDTH 1 #define MADERA_AIF1RX_LRCLK_MSTR 0x0001 #define MADERA_AIF1RX_LRCLK_MSTR_MASK 0x0001 #define MADERA_AIF1RX_LRCLK_MSTR_SHIFT 0 #define MADERA_AIF1RX_LRCLK_MSTR_WIDTH 1 /* (0x0503) AIF1_Rate_Ctrl */ #define MADERA_AIF1_RATE_MASK 0xF800 #define MADERA_AIF1_RATE_SHIFT 11 #define MADERA_AIF1_RATE_WIDTH 5 #define MADERA_AIF1_TRI 0x0040 #define MADERA_AIF1_TRI_MASK 0x0040 #define MADERA_AIF1_TRI_SHIFT 6 #define MADERA_AIF1_TRI_WIDTH 1 /* (0x0504) AIF1_Format */ #define MADERA_AIF1_FMT_MASK 0x0007 #define MADERA_AIF1_FMT_SHIFT 0 #define MADERA_AIF1_FMT_WIDTH 3 /* (0x0506) AIF1_Rx_BCLK_Rate */ #define MADERA_AIF1RX_BCPF_MASK 0x1FFF #define MADERA_AIF1RX_BCPF_SHIFT 0 #define MADERA_AIF1RX_BCPF_WIDTH 13 /* (0x0507) AIF1_Frame_Ctrl_1 */ #define MADERA_AIF1TX_WL_MASK 0x3F00 #define MADERA_AIF1TX_WL_SHIFT 8 #define MADERA_AIF1TX_WL_WIDTH 6 #define MADERA_AIF1TX_SLOT_LEN_MASK 0x00FF #define MADERA_AIF1TX_SLOT_LEN_SHIFT 0 #define MADERA_AIF1TX_SLOT_LEN_WIDTH 8 /* (0x0508) AIF1_Frame_Ctrl_2 */ #define MADERA_AIF1RX_WL_MASK 0x3F00 #define MADERA_AIF1RX_WL_SHIFT 8 #define MADERA_AIF1RX_WL_WIDTH 6 #define MADERA_AIF1RX_SLOT_LEN_MASK 0x00FF #define MADERA_AIF1RX_SLOT_LEN_SHIFT 0 #define MADERA_AIF1RX_SLOT_LEN_WIDTH 8 /* (0x0509) AIF1_Frame_Ctrl_3 */ #define MADERA_AIF1TX1_SLOT_MASK 0x003F #define MADERA_AIF1TX1_SLOT_SHIFT 0 #define MADERA_AIF1TX1_SLOT_WIDTH 6 /* (0x0519) AIF1_Tx_Enables */ #define MADERA_AIF1TX8_ENA 0x0080 #define MADERA_AIF1TX8_ENA_MASK 0x0080 #define MADERA_AIF1TX8_ENA_SHIFT 7 #define MADERA_AIF1TX8_ENA_WIDTH 1 #define MADERA_AIF1TX7_ENA 0x0040 #define MADERA_AIF1TX7_ENA_MASK 0x0040 #define MADERA_AIF1TX7_ENA_SHIFT 6 #define MADERA_AIF1TX7_ENA_WIDTH 1 #define MADERA_AIF1TX6_ENA 0x0020 #define MADERA_AIF1TX6_ENA_MASK 0x0020 #define MADERA_AIF1TX6_ENA_SHIFT 5 #define MADERA_AIF1TX6_ENA_WIDTH 1 #define MADERA_AIF1TX5_ENA 0x0010 #define MADERA_AIF1TX5_ENA_MASK 0x0010 #define MADERA_AIF1TX5_ENA_SHIFT 4 #define MADERA_AIF1TX5_ENA_WIDTH 1 #define MADERA_AIF1TX4_ENA 0x0008 #define MADERA_AIF1TX4_ENA_MASK 0x0008 #define MADERA_AIF1TX4_ENA_SHIFT 3 #define MADERA_AIF1TX4_ENA_WIDTH 1 #define MADERA_AIF1TX3_ENA 0x0004 #define MADERA_AIF1TX3_ENA_MASK 0x0004 #define MADERA_AIF1TX3_ENA_SHIFT 2 #define MADERA_AIF1TX3_ENA_WIDTH 1 #define MADERA_AIF1TX2_ENA 0x0002 #define MADERA_AIF1TX2_ENA_MASK 0x0002 #define MADERA_AIF1TX2_ENA_SHIFT 1 #define MADERA_AIF1TX2_ENA_WIDTH 1 #define MADERA_AIF1TX1_ENA 0x0001 #define MADERA_AIF1TX1_ENA_MASK 0x0001 #define MADERA_AIF1TX1_ENA_SHIFT 0 #define MADERA_AIF1TX1_ENA_WIDTH 1 /* (0x051A) AIF1_Rx_Enables */ #define MADERA_AIF1RX8_ENA 0x0080 #define MADERA_AIF1RX8_ENA_MASK 0x0080 #define MADERA_AIF1RX8_ENA_SHIFT 7 #define MADERA_AIF1RX8_ENA_WIDTH 1 #define MADERA_AIF1RX7_ENA 0x0040 #define MADERA_AIF1RX7_ENA_MASK 0x0040 #define MADERA_AIF1RX7_ENA_SHIFT 6 #define MADERA_AIF1RX7_ENA_WIDTH 1 #define MADERA_AIF1RX6_ENA 0x0020 #define MADERA_AIF1RX6_ENA_MASK 0x0020 #define MADERA_AIF1RX6_ENA_SHIFT 5 #define MADERA_AIF1RX6_ENA_WIDTH 1 #define MADERA_AIF1RX5_ENA 0x0010 #define MADERA_AIF1RX5_ENA_MASK 0x0010 #define MADERA_AIF1RX5_ENA_SHIFT 4 #define MADERA_AIF1RX5_ENA_WIDTH 1 #define MADERA_AIF1RX4_ENA 0x0008 #define MADERA_AIF1RX4_ENA_MASK 0x0008 #define MADERA_AIF1RX4_ENA_SHIFT 3 #define MADERA_AIF1RX4_ENA_WIDTH 1 #define MADERA_AIF1RX3_ENA 0x0004 #define MADERA_AIF1RX3_ENA_MASK 0x0004 #define MADERA_AIF1RX3_ENA_SHIFT 2 #define MADERA_AIF1RX3_ENA_WIDTH 1 #define MADERA_AIF1RX2_ENA 0x0002 #define MADERA_AIF1RX2_ENA_MASK 0x0002 #define MADERA_AIF1RX2_ENA_SHIFT 1 #define MADERA_AIF1RX2_ENA_WIDTH 1 #define MADERA_AIF1RX1_ENA 0x0001 #define MADERA_AIF1RX1_ENA_MASK 0x0001 #define MADERA_AIF1RX1_ENA_SHIFT 0 #define MADERA_AIF1RX1_ENA_WIDTH 1 /* (0x0559) AIF2_Tx_Enables */ #define MADERA_AIF2TX8_ENA 0x0080 #define MADERA_AIF2TX8_ENA_MASK 0x0080 #define MADERA_AIF2TX8_ENA_SHIFT 7 #define MADERA_AIF2TX8_ENA_WIDTH 1 #define MADERA_AIF2TX7_ENA 0x0040 #define MADERA_AIF2TX7_ENA_MASK 0x0040 #define MADERA_AIF2TX7_ENA_SHIFT 6 #define MADERA_AIF2TX7_ENA_WIDTH 1 #define MADERA_AIF2TX6_ENA 0x0020 #define MADERA_AIF2TX6_ENA_MASK 0x0020 #define MADERA_AIF2TX6_ENA_SHIFT 5 #define MADERA_AIF2TX6_ENA_WIDTH 1 #define MADERA_AIF2TX5_ENA 0x0010 #define MADERA_AIF2TX5_ENA_MASK 0x0010 #define MADERA_AIF2TX5_ENA_SHIFT 4 #define MADERA_AIF2TX5_ENA_WIDTH 1 #define MADERA_AIF2TX4_ENA 0x0008 #define MADERA_AIF2TX4_ENA_MASK 0x0008 #define MADERA_AIF2TX4_ENA_SHIFT 3 #define MADERA_AIF2TX4_ENA_WIDTH 1 #define MADERA_AIF2TX3_ENA 0x0004 #define MADERA_AIF2TX3_ENA_MASK 0x0004 #define MADERA_AIF2TX3_ENA_SHIFT 2 #define MADERA_AIF2TX3_ENA_WIDTH 1 #define MADERA_AIF2TX2_ENA 0x0002 #define MADERA_AIF2TX2_ENA_MASK 0x0002 #define MADERA_AIF2TX2_ENA_SHIFT 1 #define MADERA_AIF2TX2_ENA_WIDTH 1 #define MADERA_AIF2TX1_ENA 0x0001 #define MADERA_AIF2TX1_ENA_MASK 0x0001 #define MADERA_AIF2TX1_ENA_SHIFT 0 #define MADERA_AIF2TX1_ENA_WIDTH 1 /* (0x055A) AIF2_Rx_Enables */ #define MADERA_AIF2RX8_ENA 0x0080 #define MADERA_AIF2RX8_ENA_MASK 0x0080 #define MADERA_AIF2RX8_ENA_SHIFT 7 #define MADERA_AIF2RX8_ENA_WIDTH 1 #define MADERA_AIF2RX7_ENA 0x0040 #define MADERA_AIF2RX7_ENA_MASK 0x0040 #define MADERA_AIF2RX7_ENA_SHIFT 6 #define MADERA_AIF2RX7_ENA_WIDTH 1 #define MADERA_AIF2RX6_ENA 0x0020 #define MADERA_AIF2RX6_ENA_MASK 0x0020 #define MADERA_AIF2RX6_ENA_SHIFT 5 #define MADERA_AIF2RX6_ENA_WIDTH 1 #define MADERA_AIF2RX5_ENA 0x0010 #define MADERA_AIF2RX5_ENA_MASK 0x0010 #define MADERA_AIF2RX5_ENA_SHIFT 4 #define MADERA_AIF2RX5_ENA_WIDTH 1 #define MADERA_AIF2RX4_ENA 0x0008 #define MADERA_AIF2RX4_ENA_MASK 0x0008 #define MADERA_AIF2RX4_ENA_SHIFT 3 #define MADERA_AIF2RX4_ENA_WIDTH 1 #define MADERA_AIF2RX3_ENA 0x0004 #define MADERA_AIF2RX3_ENA_MASK 0x0004 #define MADERA_AIF2RX3_ENA_SHIFT 2 #define MADERA_AIF2RX3_ENA_WIDTH 1 #define MADERA_AIF2RX2_ENA 0x0002 #define MADERA_AIF2RX2_ENA_MASK 0x0002 #define MADERA_AIF2RX2_ENA_SHIFT 1 #define MADERA_AIF2RX2_ENA_WIDTH 1 #define MADERA_AIF2RX1_ENA 0x0001 #define MADERA_AIF2RX1_ENA_MASK 0x0001 #define MADERA_AIF2RX1_ENA_SHIFT 0 #define MADERA_AIF2RX1_ENA_WIDTH 1 /* (0x0599) AIF3_Tx_Enables */ #define MADERA_AIF3TX8_ENA 0x0080 #define MADERA_AIF3TX8_ENA_MASK 0x0080 #define MADERA_AIF3TX8_ENA_SHIFT 7 #define MADERA_AIF3TX8_ENA_WIDTH 1 #define MADERA_AIF3TX7_ENA 0x0040 #define MADERA_AIF3TX7_ENA_MASK 0x0040 #define MADERA_AIF3TX7_ENA_SHIFT 6 #define MADERA_AIF3TX7_ENA_WIDTH 1 #define MADERA_AIF3TX6_ENA 0x0020 #define MADERA_AIF3TX6_ENA_MASK 0x0020 #define MADERA_AIF3TX6_ENA_SHIFT 5 #define MADERA_AIF3TX6_ENA_WIDTH 1 #define MADERA_AIF3TX5_ENA 0x0010 #define MADERA_AIF3TX5_ENA_MASK 0x0010 #define MADERA_AIF3TX5_ENA_SHIFT 4 #define MADERA_AIF3TX5_ENA_WIDTH 1 #define MADERA_AIF3TX4_ENA 0x0008 #define MADERA_AIF3TX4_ENA_MASK 0x0008 #define MADERA_AIF3TX4_ENA_SHIFT 3 #define MADERA_AIF3TX4_ENA_WIDTH 1 #define MADERA_AIF3TX3_ENA 0x0004 #define MADERA_AIF3TX3_ENA_MASK 0x0004 #define MADERA_AIF3TX3_ENA_SHIFT 2 #define MADERA_AIF3TX3_ENA_WIDTH 1 #define MADERA_AIF3TX2_ENA 0x0002 #define MADERA_AIF3TX2_ENA_MASK 0x0002 #define MADERA_AIF3TX2_ENA_SHIFT 1 #define MADERA_AIF3TX2_ENA_WIDTH 1 #define MADERA_AIF3TX1_ENA 0x0001 #define MADERA_AIF3TX1_ENA_MASK 0x0001 #define MADERA_AIF3TX1_ENA_SHIFT 0 #define MADERA_AIF3TX1_ENA_WIDTH 1 /* (0x059A) AIF3_Rx_Enables */ #define MADERA_AIF3RX8_ENA 0x0080 #define MADERA_AIF3RX8_ENA_MASK 0x0080 #define MADERA_AIF3RX8_ENA_SHIFT 7 #define MADERA_AIF3RX8_ENA_WIDTH 1 #define MADERA_AIF3RX7_ENA 0x0040 #define MADERA_AIF3RX7_ENA_MASK 0x0040 #define MADERA_AIF3RX7_ENA_SHIFT 6 #define MADERA_AIF3RX7_ENA_WIDTH 1 #define MADERA_AIF3RX6_ENA 0x0020 #define MADERA_AIF3RX6_ENA_MASK 0x0020 #define MADERA_AIF3RX6_ENA_SHIFT 5 #define MADERA_AIF3RX6_ENA_WIDTH 1 #define MADERA_AIF3RX5_ENA 0x0010 #define MADERA_AIF3RX5_ENA_MASK 0x0010 #define MADERA_AIF3RX5_ENA_SHIFT 4 #define MADERA_AIF3RX5_ENA_WIDTH 1 #define MADERA_AIF3RX4_ENA 0x0008 #define MADERA_AIF3RX4_ENA_MASK 0x0008 #define MADERA_AIF3RX4_ENA_SHIFT 3 #define MADERA_AIF3RX4_ENA_WIDTH 1 #define MADERA_AIF3RX3_ENA 0x0004 #define MADERA_AIF3RX3_ENA_MASK 0x0004 #define MADERA_AIF3RX3_ENA_SHIFT 2 #define MADERA_AIF3RX3_ENA_WIDTH 1 #define MADERA_AIF3RX2_ENA 0x0002 #define MADERA_AIF3RX2_ENA_MASK 0x0002 #define MADERA_AIF3RX2_ENA_SHIFT 1 #define MADERA_AIF3RX2_ENA_WIDTH 1 #define MADERA_AIF3RX1_ENA 0x0001 #define MADERA_AIF3RX1_ENA_MASK 0x0001 #define MADERA_AIF3RX1_ENA_SHIFT 0 #define MADERA_AIF3RX1_ENA_WIDTH 1 /* (0x05B9) AIF4_Tx_Enables */ #define MADERA_AIF4TX2_ENA 0x0002 #define MADERA_AIF4TX2_ENA_MASK 0x0002 #define MADERA_AIF4TX2_ENA_SHIFT 1 #define MADERA_AIF4TX2_ENA_WIDTH 1 #define MADERA_AIF4TX1_ENA 0x0001 #define MADERA_AIF4TX1_ENA_MASK 0x0001 #define MADERA_AIF4TX1_ENA_SHIFT 0 #define MADERA_AIF4TX1_ENA_WIDTH 1 /* (0x05BA) AIF4_Rx_Enables */ #define MADERA_AIF4RX2_ENA 0x0002 #define MADERA_AIF4RX2_ENA_MASK 0x0002 #define MADERA_AIF4RX2_ENA_SHIFT 1 #define MADERA_AIF4RX2_ENA_WIDTH 1 #define MADERA_AIF4RX1_ENA 0x0001 #define MADERA_AIF4RX1_ENA_MASK 0x0001 #define MADERA_AIF4RX1_ENA_SHIFT 0 #define MADERA_AIF4RX1_ENA_WIDTH 1 /* (0x05C2) SPD1_TX_Control */ #define MADERA_SPD1_VAL2 0x2000 #define MADERA_SPD1_VAL2_MASK 0x2000 #define MADERA_SPD1_VAL2_SHIFT 13 #define MADERA_SPD1_VAL2_WIDTH 1 #define MADERA_SPD1_VAL1 0x1000 #define MADERA_SPD1_VAL1_MASK 0x1000 #define MADERA_SPD1_VAL1_SHIFT 12 #define MADERA_SPD1_VAL1_WIDTH 1 #define MADERA_SPD1_RATE_MASK 0x00F0 #define MADERA_SPD1_RATE_SHIFT 4 #define MADERA_SPD1_RATE_WIDTH 4 #define MADERA_SPD1_ENA 0x0001 #define MADERA_SPD1_ENA_MASK 0x0001 #define MADERA_SPD1_ENA_SHIFT 0 #define MADERA_SPD1_ENA_WIDTH 1 /* (0x05F5) SLIMbus_RX_Channel_Enable */ #define MADERA_SLIMRX8_ENA 0x0080 #define MADERA_SLIMRX8_ENA_MASK 0x0080 #define MADERA_SLIMRX8_ENA_SHIFT 7 #define MADERA_SLIMRX8_ENA_WIDTH 1 #define MADERA_SLIMRX7_ENA 0x0040 #define MADERA_SLIMRX7_ENA_MASK 0x0040 #define MADERA_SLIMRX7_ENA_SHIFT 6 #define MADERA_SLIMRX7_ENA_WIDTH 1 #define MADERA_SLIMRX6_ENA 0x0020 #define MADERA_SLIMRX6_ENA_MASK 0x0020 #define MADERA_SLIMRX6_ENA_SHIFT 5 #define MADERA_SLIMRX6_ENA_WIDTH 1 #define MADERA_SLIMRX5_ENA 0x0010 #define MADERA_SLIMRX5_ENA_MASK 0x0010 #define MADERA_SLIMRX5_ENA_SHIFT 4 #define MADERA_SLIMRX5_ENA_WIDTH 1 #define MADERA_SLIMRX4_ENA 0x0008 #define MADERA_SLIMRX4_ENA_MASK 0x0008 #define MADERA_SLIMRX4_ENA_SHIFT 3 #define MADERA_SLIMRX4_ENA_WIDTH 1 #define MADERA_SLIMRX3_ENA 0x0004 #define MADERA_SLIMRX3_ENA_MASK 0x0004 #define MADERA_SLIMRX3_ENA_SHIFT 2 #define MADERA_SLIMRX3_ENA_WIDTH 1 #define MADERA_SLIMRX2_ENA 0x0002 #define MADERA_SLIMRX2_ENA_MASK 0x0002 #define MADERA_SLIMRX2_ENA_SHIFT 1 #define MADERA_SLIMRX2_ENA_WIDTH 1 #define MADERA_SLIMRX1_ENA 0x0001 #define MADERA_SLIMRX1_ENA_MASK 0x0001 #define MADERA_SLIMRX1_ENA_SHIFT 0 #define MADERA_SLIMRX1_ENA_WIDTH 1 /* (0x05F6) SLIMbus_TX_Channel_Enable */ #define MADERA_SLIMTX8_ENA 0x0080 #define MADERA_SLIMTX8_ENA_MASK 0x0080 #define MADERA_SLIMTX8_ENA_SHIFT 7 #define MADERA_SLIMTX8_ENA_WIDTH 1 #define MADERA_SLIMTX7_ENA 0x0040 #define MADERA_SLIMTX7_ENA_MASK 0x0040 #define MADERA_SLIMTX7_ENA_SHIFT 6 #define MADERA_SLIMTX7_ENA_WIDTH 1 #define MADERA_SLIMTX6_ENA 0x0020 #define MADERA_SLIMTX6_ENA_MASK 0x0020 #define MADERA_SLIMTX6_ENA_SHIFT 5 #define MADERA_SLIMTX6_ENA_WIDTH 1 #define MADERA_SLIMTX5_ENA 0x0010 #define MADERA_SLIMTX5_ENA_MASK 0x0010 #define MADERA_SLIMTX5_ENA_SHIFT 4 #define MADERA_SLIMTX5_ENA_WIDTH 1 #define MADERA_SLIMTX4_ENA 0x0008 #define MADERA_SLIMTX4_ENA_MASK 0x0008 #define MADERA_SLIMTX4_ENA_SHIFT 3 #define MADERA_SLIMTX4_ENA_WIDTH 1 #define MADERA_SLIMTX3_ENA 0x0004 #define MADERA_SLIMTX3_ENA_MASK 0x0004 #define MADERA_SLIMTX3_ENA_SHIFT 2 #define MADERA_SLIMTX3_ENA_WIDTH 1 #define MADERA_SLIMTX2_ENA 0x0002 #define MADERA_SLIMTX2_ENA_MASK 0x0002 #define MADERA_SLIMTX2_ENA_SHIFT 1 #define MADERA_SLIMTX2_ENA_WIDTH 1 #define MADERA_SLIMTX1_ENA 0x0001 #define MADERA_SLIMTX1_ENA_MASK 0x0001 #define MADERA_SLIMTX1_ENA_SHIFT 0 #define MADERA_SLIMTX1_ENA_WIDTH 1 /* (0x0E10) EQ1_1 */ #define MADERA_EQ1_B1_GAIN_MASK 0xF800 #define MADERA_EQ1_B1_GAIN_SHIFT 11 #define MADERA_EQ1_B1_GAIN_WIDTH 5 #define MADERA_EQ1_B2_GAIN_MASK 0x07C0 #define MADERA_EQ1_B2_GAIN_SHIFT 6 #define MADERA_EQ1_B2_GAIN_WIDTH 5 #define MADERA_EQ1_B3_GAIN_MASK 0x003E #define MADERA_EQ1_B3_GAIN_SHIFT 1 #define MADERA_EQ1_B3_GAIN_WIDTH 5 #define MADERA_EQ1_ENA 0x0001 #define MADERA_EQ1_ENA_MASK 0x0001 #define MADERA_EQ1_ENA_SHIFT 0 #define MADERA_EQ1_ENA_WIDTH 1 /* (0x0E11) EQ1_2 */ #define MADERA_EQ1_B4_GAIN_MASK 0xF800 #define MADERA_EQ1_B4_GAIN_SHIFT 11 #define MADERA_EQ1_B4_GAIN_WIDTH 5 #define MADERA_EQ1_B5_GAIN_MASK 0x07C0 #define MADERA_EQ1_B5_GAIN_SHIFT 6 #define MADERA_EQ1_B5_GAIN_WIDTH 5 #define MADERA_EQ1_B1_MODE 0x0001 #define MADERA_EQ1_B1_MODE_MASK 0x0001 #define MADERA_EQ1_B1_MODE_SHIFT 0 #define MADERA_EQ1_B1_MODE_WIDTH 1 /* (0x0E26) EQ2_1 */ #define MADERA_EQ2_B1_GAIN_MASK 0xF800 #define MADERA_EQ2_B1_GAIN_SHIFT 11 #define MADERA_EQ2_B1_GAIN_WIDTH 5 #define MADERA_EQ2_B2_GAIN_MASK 0x07C0 #define MADERA_EQ2_B2_GAIN_SHIFT 6 #define MADERA_EQ2_B2_GAIN_WIDTH 5 #define MADERA_EQ2_B3_GAIN_MASK 0x003E #define MADERA_EQ2_B3_GAIN_SHIFT 1 #define MADERA_EQ2_B3_GAIN_WIDTH 5 #define MADERA_EQ2_ENA 0x0001 #define MADERA_EQ2_ENA_MASK 0x0001 #define MADERA_EQ2_ENA_SHIFT 0 #define MADERA_EQ2_ENA_WIDTH 1 /* (0x0E27) EQ2_2 */ #define MADERA_EQ2_B4_GAIN_MASK 0xF800 #define MADERA_EQ2_B4_GAIN_SHIFT 11 #define MADERA_EQ2_B4_GAIN_WIDTH 5 #define MADERA_EQ2_B5_GAIN_MASK 0x07C0 #define MADERA_EQ2_B5_GAIN_SHIFT 6 #define MADERA_EQ2_B5_GAIN_WIDTH 5 #define MADERA_EQ2_B1_MODE 0x0001 #define MADERA_EQ2_B1_MODE_MASK 0x0001 #define MADERA_EQ2_B1_MODE_SHIFT 0 #define MADERA_EQ2_B1_MODE_WIDTH 1 /* (0x0E3C) EQ3_1 */ #define MADERA_EQ3_B1_GAIN_MASK 0xF800 #define MADERA_EQ3_B1_GAIN_SHIFT 11 #define MADERA_EQ3_B1_GAIN_WIDTH 5 #define MADERA_EQ3_B2_GAIN_MASK 0x07C0 #define MADERA_EQ3_B2_GAIN_SHIFT 6 #define MADERA_EQ3_B2_GAIN_WIDTH 5 #define MADERA_EQ3_B3_GAIN_MASK 0x003E #define MADERA_EQ3_B3_GAIN_SHIFT 1 #define MADERA_EQ3_B3_GAIN_WIDTH 5 #define MADERA_EQ3_ENA 0x0001 #define MADERA_EQ3_ENA_MASK 0x0001 #define MADERA_EQ3_ENA_SHIFT 0 #define MADERA_EQ3_ENA_WIDTH 1 /* (0x0E3D) EQ3_2 */ #define MADERA_EQ3_B4_GAIN_MASK 0xF800 #define MADERA_EQ3_B4_GAIN_SHIFT 11 #define MADERA_EQ3_B4_GAIN_WIDTH 5 #define MADERA_EQ3_B5_GAIN_MASK 0x07C0 #define MADERA_EQ3_B5_GAIN_SHIFT 6 #define MADERA_EQ3_B5_GAIN_WIDTH 5 #define MADERA_EQ3_B1_MODE 0x0001 #define MADERA_EQ3_B1_MODE_MASK 0x0001 #define MADERA_EQ3_B1_MODE_SHIFT 0 #define MADERA_EQ3_B1_MODE_WIDTH 1 /* (0x0E52) EQ4_1 */ #define MADERA_EQ4_B1_GAIN_MASK 0xF800 #define MADERA_EQ4_B1_GAIN_SHIFT 11 #define MADERA_EQ4_B1_GAIN_WIDTH 5 #define MADERA_EQ4_B2_GAIN_MASK 0x07C0 #define MADERA_EQ4_B2_GAIN_SHIFT 6 #define MADERA_EQ4_B2_GAIN_WIDTH 5 #define MADERA_EQ4_B3_GAIN_MASK 0x003E #define MADERA_EQ4_B3_GAIN_SHIFT 1 #define MADERA_EQ4_B3_GAIN_WIDTH 5 #define MADERA_EQ4_ENA 0x0001 #define MADERA_EQ4_ENA_MASK 0x0001 #define MADERA_EQ4_ENA_SHIFT 0 #define MADERA_EQ4_ENA_WIDTH 1 /* (0x0E53) EQ4_2 */ #define MADERA_EQ4_B4_GAIN_MASK 0xF800 #define MADERA_EQ4_B4_GAIN_SHIFT 11 #define MADERA_EQ4_B4_GAIN_WIDTH 5 #define MADERA_EQ4_B5_GAIN_MASK 0x07C0 #define MADERA_EQ4_B5_GAIN_SHIFT 6 #define MADERA_EQ4_B5_GAIN_WIDTH 5 #define MADERA_EQ4_B1_MODE 0x0001 #define MADERA_EQ4_B1_MODE_MASK 0x0001 #define MADERA_EQ4_B1_MODE_SHIFT 0 #define MADERA_EQ4_B1_MODE_WIDTH 1 /* (0x0E80) DRC1_ctrl1 */ #define MADERA_DRC1L_ENA 0x0002 #define MADERA_DRC1L_ENA_MASK 0x0002 #define MADERA_DRC1L_ENA_SHIFT 1 #define MADERA_DRC1L_ENA_WIDTH 1 #define MADERA_DRC1R_ENA 0x0001 #define MADERA_DRC1R_ENA_MASK 0x0001 #define MADERA_DRC1R_ENA_SHIFT 0 #define MADERA_DRC1R_ENA_WIDTH 1 /* (0x0E88) DRC2_ctrl1 */ #define MADERA_DRC2L_ENA 0x0002 #define MADERA_DRC2L_ENA_MASK 0x0002 #define MADERA_DRC2L_ENA_SHIFT 1 #define MADERA_DRC2L_ENA_WIDTH 1 #define MADERA_DRC2R_ENA 0x0001 #define MADERA_DRC2R_ENA_MASK 0x0001 #define MADERA_DRC2R_ENA_SHIFT 0 #define MADERA_DRC2R_ENA_WIDTH 1 /* (0x0EC0) HPLPF1_1 */ #define MADERA_LHPF1_MODE 0x0002 #define MADERA_LHPF1_MODE_MASK 0x0002 #define MADERA_LHPF1_MODE_SHIFT 1 #define MADERA_LHPF1_MODE_WIDTH 1 #define MADERA_LHPF1_ENA 0x0001 #define MADERA_LHPF1_ENA_MASK 0x0001 #define MADERA_LHPF1_ENA_SHIFT 0 #define MADERA_LHPF1_ENA_WIDTH 1 /* (0x0EC1) HPLPF1_2 */ #define MADERA_LHPF1_COEFF_MASK 0xFFFF #define MADERA_LHPF1_COEFF_SHIFT 0 #define MADERA_LHPF1_COEFF_WIDTH 16 /* (0x0EC4) HPLPF2_1 */ #define MADERA_LHPF2_MODE 0x0002 #define MADERA_LHPF2_MODE_MASK 0x0002 #define MADERA_LHPF2_MODE_SHIFT 1 #define MADERA_LHPF2_MODE_WIDTH 1 #define MADERA_LHPF2_ENA 0x0001 #define MADERA_LHPF2_ENA_MASK 0x0001 #define MADERA_LHPF2_ENA_SHIFT 0 #define MADERA_LHPF2_ENA_WIDTH 1 /* (0x0EC5) HPLPF2_2 */ #define MADERA_LHPF2_COEFF_MASK 0xFFFF #define MADERA_LHPF2_COEFF_SHIFT 0 #define MADERA_LHPF2_COEFF_WIDTH 16 /* (0x0EC8) HPLPF3_1 */ #define MADERA_LHPF3_MODE 0x0002 #define MADERA_LHPF3_MODE_MASK 0x0002 #define MADERA_LHPF3_MODE_SHIFT 1 #define MADERA_LHPF3_MODE_WIDTH 1 #define MADERA_LHPF3_ENA 0x0001 #define MADERA_LHPF3_ENA_MASK 0x0001 #define MADERA_LHPF3_ENA_SHIFT 0 #define MADERA_LHPF3_ENA_WIDTH 1 /* (0x0EC9) HPLPF3_2 */ #define MADERA_LHPF3_COEFF_MASK 0xFFFF #define MADERA_LHPF3_COEFF_SHIFT 0 #define MADERA_LHPF3_COEFF_WIDTH 16 /* (0x0ECC) HPLPF4_1 */ #define MADERA_LHPF4_MODE 0x0002 #define MADERA_LHPF4_MODE_MASK 0x0002 #define MADERA_LHPF4_MODE_SHIFT 1 #define MADERA_LHPF4_MODE_WIDTH 1 #define MADERA_LHPF4_ENA 0x0001 #define MADERA_LHPF4_ENA_MASK 0x0001 #define MADERA_LHPF4_ENA_SHIFT 0 #define MADERA_LHPF4_ENA_WIDTH 1 /* (0x0ECD) HPLPF4_2 */ #define MADERA_LHPF4_COEFF_MASK 0xFFFF #define MADERA_LHPF4_COEFF_SHIFT 0 #define MADERA_LHPF4_COEFF_WIDTH 16 /* (0x0ED0) ASRC2_ENABLE */ #define MADERA_ASRC2_IN2L_ENA 0x0008 #define MADERA_ASRC2_IN2L_ENA_MASK 0x0008 #define MADERA_ASRC2_IN2L_ENA_SHIFT 3 #define MADERA_ASRC2_IN2L_ENA_WIDTH 1 #define MADERA_ASRC2_IN2R_ENA 0x0004 #define MADERA_ASRC2_IN2R_ENA_MASK 0x0004 #define MADERA_ASRC2_IN2R_ENA_SHIFT 2 #define MADERA_ASRC2_IN2R_ENA_WIDTH 1 #define MADERA_ASRC2_IN1L_ENA 0x0002 #define MADERA_ASRC2_IN1L_ENA_MASK 0x0002 #define MADERA_ASRC2_IN1L_ENA_SHIFT 1 #define MADERA_ASRC2_IN1L_ENA_WIDTH 1 #define MADERA_ASRC2_IN1R_ENA 0x0001 #define MADERA_ASRC2_IN1R_ENA_MASK 0x0001 #define MADERA_ASRC2_IN1R_ENA_SHIFT 0 #define MADERA_ASRC2_IN1R_ENA_WIDTH 1 /* (0x0ED2) ASRC2_RATE1 */ #define MADERA_ASRC2_RATE1_MASK 0xF800 #define MADERA_ASRC2_RATE1_SHIFT 11 #define MADERA_ASRC2_RATE1_WIDTH 5 /* (0x0ED3) ASRC2_RATE2 */ #define MADERA_ASRC2_RATE2_MASK 0xF800 #define MADERA_ASRC2_RATE2_SHIFT 11 #define MADERA_ASRC2_RATE2_WIDTH 5 /* (0x0EE0) ASRC1_ENABLE */ #define MADERA_ASRC1_IN2L_ENA 0x0008 #define MADERA_ASRC1_IN2L_ENA_MASK 0x0008 #define MADERA_ASRC1_IN2L_ENA_SHIFT 3 #define MADERA_ASRC1_IN2L_ENA_WIDTH 1 #define MADERA_ASRC1_IN2R_ENA 0x0004 #define MADERA_ASRC1_IN2R_ENA_MASK 0x0004 #define MADERA_ASRC1_IN2R_ENA_SHIFT 2 #define MADERA_ASRC1_IN2R_ENA_WIDTH 1 #define MADERA_ASRC1_IN1L_ENA 0x0002 #define MADERA_ASRC1_IN1L_ENA_MASK 0x0002 #define MADERA_ASRC1_IN1L_ENA_SHIFT 1 #define MADERA_ASRC1_IN1L_ENA_WIDTH 1 #define MADERA_ASRC1_IN1R_ENA 0x0001 #define MADERA_ASRC1_IN1R_ENA_MASK 0x0001 #define MADERA_ASRC1_IN1R_ENA_SHIFT 0 #define MADERA_ASRC1_IN1R_ENA_WIDTH 1 /* (0x0EE2) ASRC1_RATE1 */ #define MADERA_ASRC1_RATE1_MASK 0xF800 #define MADERA_ASRC1_RATE1_SHIFT 11 #define MADERA_ASRC1_RATE1_WIDTH 5 /* (0x0EE3) ASRC1_RATE2 */ #define MADERA_ASRC1_RATE2_MASK 0xF800 #define MADERA_ASRC1_RATE2_SHIFT 11 #define MADERA_ASRC1_RATE2_WIDTH 5 /* (0x0EF0) - ISRC1 CTRL 1 */ #define MADERA_ISRC1_FSH_MASK 0xF800 #define MADERA_ISRC1_FSH_SHIFT 11 #define MADERA_ISRC1_FSH_WIDTH 5 #define MADERA_ISRC1_CLK_SEL_MASK 0x0700 #define MADERA_ISRC1_CLK_SEL_SHIFT 8 #define MADERA_ISRC1_CLK_SEL_WIDTH 3 /* (0x0EF1) ISRC1_CTRL_2 */ #define MADERA_ISRC1_FSL_MASK 0xF800 #define MADERA_ISRC1_FSL_SHIFT 11 #define MADERA_ISRC1_FSL_WIDTH 5 /* (0x0EF2) ISRC1_CTRL_3 */ #define MADERA_ISRC1_INT1_ENA 0x8000 #define MADERA_ISRC1_INT1_ENA_MASK 0x8000 #define MADERA_ISRC1_INT1_ENA_SHIFT 15 #define MADERA_ISRC1_INT1_ENA_WIDTH 1 #define MADERA_ISRC1_INT2_ENA 0x4000 #define MADERA_ISRC1_INT2_ENA_MASK 0x4000 #define MADERA_ISRC1_INT2_ENA_SHIFT 14 #define MADERA_ISRC1_INT2_ENA_WIDTH 1 #define MADERA_ISRC1_INT3_ENA 0x2000 #define MADERA_ISRC1_INT3_ENA_MASK 0x2000 #define MADERA_ISRC1_INT3_ENA_SHIFT 13 #define MADERA_ISRC1_INT3_ENA_WIDTH 1 #define MADERA_ISRC1_INT4_ENA 0x1000 #define MADERA_ISRC1_INT4_ENA_MASK 0x1000 #define MADERA_ISRC1_INT4_ENA_SHIFT 12 #define MADERA_ISRC1_INT4_ENA_WIDTH 1 #define MADERA_ISRC1_DEC1_ENA 0x0200 #define MADERA_ISRC1_DEC1_ENA_MASK 0x0200 #define MADERA_ISRC1_DEC1_ENA_SHIFT 9 #define MADERA_ISRC1_DEC1_ENA_WIDTH 1 #define MADERA_ISRC1_DEC2_ENA 0x0100 #define MADERA_ISRC1_DEC2_ENA_MASK 0x0100 #define MADERA_ISRC1_DEC2_ENA_SHIFT 8 #define MADERA_ISRC1_DEC2_ENA_WIDTH 1 #define MADERA_ISRC1_DEC3_ENA 0x0080 #define MADERA_ISRC1_DEC3_ENA_MASK 0x0080 #define MADERA_ISRC1_DEC3_ENA_SHIFT 7 #define MADERA_ISRC1_DEC3_ENA_WIDTH 1 #define MADERA_ISRC1_DEC4_ENA 0x0040 #define MADERA_ISRC1_DEC4_ENA_MASK 0x0040 #define MADERA_ISRC1_DEC4_ENA_SHIFT 6 #define MADERA_ISRC1_DEC4_ENA_WIDTH 1 #define MADERA_ISRC1_NOTCH_ENA 0x0001 #define MADERA_ISRC1_NOTCH_ENA_MASK 0x0001 #define MADERA_ISRC1_NOTCH_ENA_SHIFT 0 #define MADERA_ISRC1_NOTCH_ENA_WIDTH 1 /* (0x0EF3) ISRC2_CTRL_1 */ #define MADERA_ISRC2_FSH_MASK 0xF800 #define MADERA_ISRC2_FSH_SHIFT 11 #define MADERA_ISRC2_FSH_WIDTH 5 #define MADERA_ISRC2_CLK_SEL_MASK 0x0700 #define MADERA_ISRC2_CLK_SEL_SHIFT 8 #define MADERA_ISRC2_CLK_SEL_WIDTH 3 /* (0x0EF4) ISRC2_CTRL_2 */ #define MADERA_ISRC2_FSL_MASK 0xF800 #define MADERA_ISRC2_FSL_SHIFT 11 #define MADERA_ISRC2_FSL_WIDTH 5 /* (0x0EF5) ISRC2_CTRL_3 */ #define MADERA_ISRC2_INT1_ENA 0x8000 #define MADERA_ISRC2_INT1_ENA_MASK 0x8000 #define MADERA_ISRC2_INT1_ENA_SHIFT 15 #define MADERA_ISRC2_INT1_ENA_WIDTH 1 #define MADERA_ISRC2_INT2_ENA 0x4000 #define MADERA_ISRC2_INT2_ENA_MASK 0x4000 #define MADERA_ISRC2_INT2_ENA_SHIFT 14 #define MADERA_ISRC2_INT2_ENA_WIDTH 1 #define MADERA_ISRC2_INT3_ENA 0x2000 #define MADERA_ISRC2_INT3_ENA_MASK 0x2000 #define MADERA_ISRC2_INT3_ENA_SHIFT 13 #define MADERA_ISRC2_INT3_ENA_WIDTH 1 #define MADERA_ISRC2_INT4_ENA 0x1000 #define MADERA_ISRC2_INT4_ENA_MASK 0x1000 #define MADERA_ISRC2_INT4_ENA_SHIFT 12 #define MADERA_ISRC2_INT4_ENA_WIDTH 1 #define MADERA_ISRC2_DEC1_ENA 0x0200 #define MADERA_ISRC2_DEC1_ENA_MASK 0x0200 #define MADERA_ISRC2_DEC1_ENA_SHIFT 9 #define MADERA_ISRC2_DEC1_ENA_WIDTH 1 #define MADERA_ISRC2_DEC2_ENA 0x0100 #define MADERA_ISRC2_DEC2_ENA_MASK 0x0100 #define MADERA_ISRC2_DEC2_ENA_SHIFT 8 #define MADERA_ISRC2_DEC2_ENA_WIDTH 1 #define MADERA_ISRC2_DEC3_ENA 0x0080 #define MADERA_ISRC2_DEC3_ENA_MASK 0x0080 #define MADERA_ISRC2_DEC3_ENA_SHIFT 7 #define MADERA_ISRC2_DEC3_ENA_WIDTH 1 #define MADERA_ISRC2_DEC4_ENA 0x0040 #define MADERA_ISRC2_DEC4_ENA_MASK 0x0040 #define MADERA_ISRC2_DEC4_ENA_SHIFT 6 #define MADERA_ISRC2_DEC4_ENA_WIDTH 1 #define MADERA_ISRC2_NOTCH_ENA 0x0001 #define MADERA_ISRC2_NOTCH_ENA_MASK 0x0001 #define MADERA_ISRC2_NOTCH_ENA_SHIFT 0 #define MADERA_ISRC2_NOTCH_ENA_WIDTH 1 /* (0x0EF6) ISRC3_CTRL_1 */ #define MADERA_ISRC3_FSH_MASK 0xF800 #define MADERA_ISRC3_FSH_SHIFT 11 #define MADERA_ISRC3_FSH_WIDTH 5 #define MADERA_ISRC3_CLK_SEL_MASK 0x0700 #define MADERA_ISRC3_CLK_SEL_SHIFT 8 #define MADERA_ISRC3_CLK_SEL_WIDTH 3 /* (0x0EF7) ISRC3_CTRL_2 */ #define MADERA_ISRC3_FSL_MASK 0xF800 #define MADERA_ISRC3_FSL_SHIFT 11 #define MADERA_ISRC3_FSL_WIDTH 5 /* (0x0EF8) ISRC3_CTRL_3 */ #define MADERA_ISRC3_INT1_ENA 0x8000 #define MADERA_ISRC3_INT1_ENA_MASK 0x8000 #define MADERA_ISRC3_INT1_ENA_SHIFT 15 #define MADERA_ISRC3_INT1_ENA_WIDTH 1 #define MADERA_ISRC3_INT2_ENA 0x4000 #define MADERA_ISRC3_INT2_ENA_MASK 0x4000 #define MADERA_ISRC3_INT2_ENA_SHIFT 14 #define MADERA_ISRC3_INT2_ENA_WIDTH 1 #define MADERA_ISRC3_INT3_ENA 0x2000 #define MADERA_ISRC3_INT3_ENA_MASK 0x2000 #define MADERA_ISRC3_INT3_ENA_SHIFT 13 #define MADERA_ISRC3_INT3_ENA_WIDTH 1 #define MADERA_ISRC3_INT4_ENA 0x1000 #define MADERA_ISRC3_INT4_ENA_MASK 0x1000 #define MADERA_ISRC3_INT4_ENA_SHIFT 12 #define MADERA_ISRC3_INT4_ENA_WIDTH 1 #define MADERA_ISRC3_DEC1_ENA 0x0200 #define MADERA_ISRC3_DEC1_ENA_MASK 0x0200 #define MADERA_ISRC3_DEC1_ENA_SHIFT 9 #define MADERA_ISRC3_DEC1_ENA_WIDTH 1 #define MADERA_ISRC3_DEC2_ENA 0x0100 #define MADERA_ISRC3_DEC2_ENA_MASK 0x0100 #define MADERA_ISRC3_DEC2_ENA_SHIFT 8 #define MADERA_ISRC3_DEC2_ENA_WIDTH 1 #define MADERA_ISRC3_DEC3_ENA 0x0080 #define MADERA_ISRC3_DEC3_ENA_MASK 0x0080 #define MADERA_ISRC3_DEC3_ENA_SHIFT 7 #define MADERA_ISRC3_DEC3_ENA_WIDTH 1 #define MADERA_ISRC3_DEC4_ENA 0x0040 #define MADERA_ISRC3_DEC4_ENA_MASK 0x0040 #define MADERA_ISRC3_DEC4_ENA_SHIFT 6 #define MADERA_ISRC3_DEC4_ENA_WIDTH 1 #define MADERA_ISRC3_NOTCH_ENA 0x0001 #define MADERA_ISRC3_NOTCH_ENA_MASK 0x0001 #define MADERA_ISRC3_NOTCH_ENA_SHIFT 0 #define MADERA_ISRC3_NOTCH_ENA_WIDTH 1 /* (0x0EF9) ISRC4_CTRL_1 */ #define MADERA_ISRC4_FSH_MASK 0xF800 #define MADERA_ISRC4_FSH_SHIFT 11 #define MADERA_ISRC4_FSH_WIDTH 5 #define MADERA_ISRC4_CLK_SEL_MASK 0x0700 #define MADERA_ISRC4_CLK_SEL_SHIFT 8 #define MADERA_ISRC4_CLK_SEL_WIDTH 3 /* (0x0EFA) ISRC4_CTRL_2 */ #define MADERA_ISRC4_FSL_MASK 0xF800 #define MADERA_ISRC4_FSL_SHIFT 11 #define MADERA_ISRC4_FSL_WIDTH 5 /* (0x0EFB) ISRC4_CTRL_3 */ #define MADERA_ISRC4_INT1_ENA 0x8000 #define MADERA_ISRC4_INT1_ENA_MASK 0x8000 #define MADERA_ISRC4_INT1_ENA_SHIFT 15 #define MADERA_ISRC4_INT1_ENA_WIDTH 1 #define MADERA_ISRC4_INT2_ENA 0x4000 #define MADERA_ISRC4_INT2_ENA_MASK 0x4000 #define MADERA_ISRC4_INT2_ENA_SHIFT 14 #define MADERA_ISRC4_INT2_ENA_WIDTH 1 #define MADERA_ISRC4_INT3_ENA 0x2000 #define MADERA_ISRC4_INT3_ENA_MASK 0x2000 #define MADERA_ISRC4_INT3_ENA_SHIFT 13 #define MADERA_ISRC4_INT3_ENA_WIDTH 1 #define MADERA_ISRC4_INT4_ENA 0x1000 #define MADERA_ISRC4_INT4_ENA_MASK 0x1000 #define MADERA_ISRC4_INT4_ENA_SHIFT 12 #define MADERA_ISRC4_INT4_ENA_WIDTH 1 #define MADERA_ISRC4_DEC1_ENA 0x0200 #define MADERA_ISRC4_DEC1_ENA_MASK 0x0200 #define MADERA_ISRC4_DEC1_ENA_SHIFT 9 #define MADERA_ISRC4_DEC1_ENA_WIDTH 1 #define MADERA_ISRC4_DEC2_ENA 0x0100 #define MADERA_ISRC4_DEC2_ENA_MASK 0x0100 #define MADERA_ISRC4_DEC2_ENA_SHIFT 8 #define MADERA_ISRC4_DEC2_ENA_WIDTH 1 #define MADERA_ISRC4_DEC3_ENA 0x0080 #define MADERA_ISRC4_DEC3_ENA_MASK 0x0080 #define MADERA_ISRC4_DEC3_ENA_SHIFT 7 #define MADERA_ISRC4_DEC3_ENA_WIDTH 1 #define MADERA_ISRC4_DEC4_ENA 0x0040 #define MADERA_ISRC4_DEC4_ENA_MASK 0x0040 #define MADERA_ISRC4_DEC4_ENA_SHIFT 6 #define MADERA_ISRC4_DEC4_ENA_WIDTH 1 #define MADERA_ISRC4_NOTCH_ENA 0x0001 #define MADERA_ISRC4_NOTCH_ENA_MASK 0x0001 #define MADERA_ISRC4_NOTCH_ENA_SHIFT 0 #define MADERA_ISRC4_NOTCH_ENA_WIDTH 1 /* (0x0F00) Clock_Control */ #define MADERA_EXT_NG_SEL_CLR 0x0080 #define MADERA_EXT_NG_SEL_CLR_MASK 0x0080 #define MADERA_EXT_NG_SEL_CLR_SHIFT 7 #define MADERA_EXT_NG_SEL_CLR_WIDTH 1 #define MADERA_EXT_NG_SEL_SET 0x0040 #define MADERA_EXT_NG_SEL_SET_MASK 0x0040 #define MADERA_EXT_NG_SEL_SET_SHIFT 6 #define MADERA_EXT_NG_SEL_SET_WIDTH 1 #define MADERA_CLK_R_ENA_CLR 0x0020 #define MADERA_CLK_R_ENA_CLR_MASK 0x0020 #define MADERA_CLK_R_ENA_CLR_SHIFT 5 #define MADERA_CLK_R_ENA_CLR_WIDTH 1 #define MADERA_CLK_R_ENA_SET 0x0010 #define MADERA_CLK_R_ENA_SET_MASK 0x0010 #define MADERA_CLK_R_ENA_SET_SHIFT 4 #define MADERA_CLK_R_ENA_SET_WIDTH 1 #define MADERA_CLK_NG_ENA_CLR 0x0008 #define MADERA_CLK_NG_ENA_CLR_MASK 0x0008 #define MADERA_CLK_NG_ENA_CLR_SHIFT 3 #define MADERA_CLK_NG_ENA_CLR_WIDTH 1 #define MADERA_CLK_NG_ENA_SET 0x0004 #define MADERA_CLK_NG_ENA_SET_MASK 0x0004 #define MADERA_CLK_NG_ENA_SET_SHIFT 2 #define MADERA_CLK_NG_ENA_SET_WIDTH 1 #define MADERA_CLK_L_ENA_CLR 0x0002 #define MADERA_CLK_L_ENA_CLR_MASK 0x0002 #define MADERA_CLK_L_ENA_CLR_SHIFT 1 #define MADERA_CLK_L_ENA_CLR_WIDTH 1 #define MADERA_CLK_L_ENA_SET 0x0001 #define MADERA_CLK_L_ENA_SET_MASK 0x0001 #define MADERA_CLK_L_ENA_SET_SHIFT 0 #define MADERA_CLK_L_ENA_SET_WIDTH 1 /* (0x0F01) ANC_SRC */ #define MADERA_IN_RXANCR_SEL_MASK 0x0070 #define MADERA_IN_RXANCR_SEL_SHIFT 4 #define MADERA_IN_RXANCR_SEL_WIDTH 3 #define MADERA_IN_RXANCL_SEL_MASK 0x0007 #define MADERA_IN_RXANCL_SEL_SHIFT 0 #define MADERA_IN_RXANCL_SEL_WIDTH 3 /* (0x0F17) FCL_ADC_reformatter_control */ #define MADERA_FCL_MIC_MODE_SEL 0x000C #define MADERA_FCL_MIC_MODE_SEL_SHIFT 2 #define MADERA_FCL_MIC_MODE_SEL_WIDTH 2 /* (0x0F73) FCR_ADC_reformatter_control */ #define MADERA_FCR_MIC_MODE_SEL 0x000C #define MADERA_FCR_MIC_MODE_SEL_SHIFT 2 #define MADERA_FCR_MIC_MODE_SEL_WIDTH 2 /* (0x10C0) AUXPDM1_CTRL_0 */ #define MADERA_AUXPDM1_SRC_MASK 0x0F00 #define MADERA_AUXPDM1_SRC_SHIFT 8 #define MADERA_AUXPDM1_SRC_WIDTH 4 #define MADERA_AUXPDM1_TXEDGE_MASK 0x0010 #define MADERA_AUXPDM1_TXEDGE_SHIFT 4 #define MADERA_AUXPDM1_TXEDGE_WIDTH 1 #define MADERA_AUXPDM1_MSTR_MASK 0x0008 #define MADERA_AUXPDM1_MSTR_SHIFT 3 #define MADERA_AUXPDM1_MSTR_WIDTH 1 #define MADERA_AUXPDM1_ENABLE_MASK 0x0001 #define MADERA_AUXPDM1_ENABLE_SHIFT 0 #define MADERA_AUXPDM1_ENABLE_WIDTH 1 /* (0x10C1) AUXPDM1_CTRL_1 */ #define MADERA_AUXPDM1_CLK_FREQ_MASK 0xC000 #define MADERA_AUXPDM1_CLK_FREQ_SHIFT 14 #define MADERA_AUXPDM1_CLK_FREQ_WIDTH 2 /* (0x1480) DFC1_CTRL_W0 */ #define MADERA_DFC1_RATE_MASK 0x007C #define MADERA_DFC1_RATE_SHIFT 2 #define MADERA_DFC1_RATE_WIDTH 5 #define MADERA_DFC1_DITH_ENA 0x0002 #define MADERA_DFC1_DITH_ENA_MASK 0x0002 #define MADERA_DFC1_DITH_ENA_SHIFT 1 #define MADERA_DFC1_DITH_ENA_WIDTH 1 #define MADERA_DFC1_ENA 0x0001 #define MADERA_DFC1_ENA_MASK 0x0001 #define MADERA_DFC1_ENA_SHIFT 0 #define MADERA_DFC1_ENA_WIDTH 1 /* (0x1482) DFC1_RX_W0 */ #define MADERA_DFC1_RX_DATA_WIDTH_MASK 0x1F00 #define MADERA_DFC1_RX_DATA_WIDTH_SHIFT 8 #define MADERA_DFC1_RX_DATA_WIDTH_WIDTH 5 #define MADERA_DFC1_RX_DATA_TYPE_MASK 0x0007 #define MADERA_DFC1_RX_DATA_TYPE_SHIFT 0 #define MADERA_DFC1_RX_DATA_TYPE_WIDTH 3 /* (0x1484) DFC1_TX_W0 */ #define MADERA_DFC1_TX_DATA_WIDTH_MASK 0x1F00 #define MADERA_DFC1_TX_DATA_WIDTH_SHIFT 8 #define MADERA_DFC1_TX_DATA_WIDTH_WIDTH 5 #define MADERA_DFC1_TX_DATA_TYPE_MASK 0x0007 #define MADERA_DFC1_TX_DATA_TYPE_SHIFT 0 #define MADERA_DFC1_TX_DATA_TYPE_WIDTH 3 /* (0x1600) ADSP2_IRQ0 */ #define MADERA_DSP_IRQ2 0x0002 #define MADERA_DSP_IRQ1 0x0001 /* (0x1601) ADSP2_IRQ1 */ #define MADERA_DSP_IRQ4 0x0002 #define MADERA_DSP_IRQ3 0x0001 /* (0x1602) ADSP2_IRQ2 */ #define MADERA_DSP_IRQ6 0x0002 #define MADERA_DSP_IRQ5 0x0001 /* (0x1603) ADSP2_IRQ3 */ #define MADERA_DSP_IRQ8 0x0002 #define MADERA_DSP_IRQ7 0x0001 /* (0x1604) ADSP2_IRQ4 */ #define MADERA_DSP_IRQ10 0x0002 #define MADERA_DSP_IRQ9 0x0001 /* (0x1605) ADSP2_IRQ5 */ #define MADERA_DSP_IRQ12 0x0002 #define MADERA_DSP_IRQ11 0x0001 /* (0x1606) ADSP2_IRQ6 */ #define MADERA_DSP_IRQ14 0x0002 #define MADERA_DSP_IRQ13 0x0001 /* (0x1607) ADSP2_IRQ7 */ #define MADERA_DSP_IRQ16 0x0002 #define MADERA_DSP_IRQ15 0x0001 /* (0x1700) GPIO1_CTRL_1 */ #define MADERA_GP1_LVL 0x8000 #define MADERA_GP1_LVL_MASK 0x8000 #define MADERA_GP1_LVL_SHIFT 15 #define MADERA_GP1_LVL_WIDTH 1 #define MADERA_GP1_OP_CFG 0x4000 #define MADERA_GP1_OP_CFG_MASK 0x4000 #define MADERA_GP1_OP_CFG_SHIFT 14 #define MADERA_GP1_OP_CFG_WIDTH 1 #define MADERA_GP1_DB 0x2000 #define MADERA_GP1_DB_MASK 0x2000 #define MADERA_GP1_DB_SHIFT 13 #define MADERA_GP1_DB_WIDTH 1 #define MADERA_GP1_POL 0x1000 #define MADERA_GP1_POL_MASK 0x1000 #define MADERA_GP1_POL_SHIFT 12 #define MADERA_GP1_POL_WIDTH 1 #define MADERA_GP1_IP_CFG 0x0800 #define MADERA_GP1_IP_CFG_MASK 0x0800 #define MADERA_GP1_IP_CFG_SHIFT 11 #define MADERA_GP1_IP_CFG_WIDTH 1 #define MADERA_GP1_FN_MASK 0x03FF #define MADERA_GP1_FN_SHIFT 0 #define MADERA_GP1_FN_WIDTH 10 /* (0x1701) GPIO1_CTRL_2 */ #define MADERA_GP1_DIR 0x8000 #define MADERA_GP1_DIR_MASK 0x8000 #define MADERA_GP1_DIR_SHIFT 15 #define MADERA_GP1_DIR_WIDTH 1 #define MADERA_GP1_PU 0x4000 #define MADERA_GP1_PU_MASK 0x4000 #define MADERA_GP1_PU_SHIFT 14 #define MADERA_GP1_PU_WIDTH 1 #define MADERA_GP1_PD 0x2000 #define MADERA_GP1_PD_MASK 0x2000 #define MADERA_GP1_PD_SHIFT 13 #define MADERA_GP1_PD_WIDTH 1 #define MADERA_GP1_DRV_STR_MASK 0x1800 #define MADERA_GP1_DRV_STR_SHIFT 11 #define MADERA_GP1_DRV_STR_WIDTH 2 /* (0x1800) IRQ1_Status_1 */ #define MADERA_CTRLIF_ERR_EINT1 0x1000 #define MADERA_CTRLIF_ERR_EINT1_MASK 0x1000 #define MADERA_CTRLIF_ERR_EINT1_SHIFT 12 #define MADERA_CTRLIF_ERR_EINT1_WIDTH 1 #define MADERA_SYSCLK_FAIL_EINT1 0x0200 #define MADERA_SYSCLK_FAIL_EINT1_MASK 0x0200 #define MADERA_SYSCLK_FAIL_EINT1_SHIFT 9 #define MADERA_SYSCLK_FAIL_EINT1_WIDTH 1 #define MADERA_CLOCK_DETECT_EINT1 0x0100 #define MADERA_CLOCK_DETECT_EINT1_MASK 0x0100 #define MADERA_CLOCK_DETECT_EINT1_SHIFT 8 #define MADERA_CLOCK_DETECT_EINT1_WIDTH 1 #define MADERA_BOOT_DONE_EINT1 0x0080 #define MADERA_BOOT_DONE_EINT1_MASK 0x0080 #define MADERA_BOOT_DONE_EINT1_SHIFT 7 #define MADERA_BOOT_DONE_EINT1_WIDTH 1 /* (0x1801) IRQ1_Status_2 */ #define MADERA_FLLAO_LOCK_EINT1 0x0800 #define MADERA_FLLAO_LOCK_EINT1_MASK 0x0800 #define MADERA_FLLAO_LOCK_EINT1_SHIFT 11 #define MADERA_FLLAO_LOCK_EINT1_WIDTH 1 #define MADERA_FLL3_LOCK_EINT1 0x0400 #define MADERA_FLL3_LOCK_EINT1_MASK 0x0400 #define MADERA_FLL3_LOCK_EINT1_SHIFT 10 #define MADERA_FLL3_LOCK_EINT1_WIDTH 1 #define MADERA_FLL2_LOCK_EINT1 0x0200 #define MADERA_FLL2_LOCK_EINT1_MASK 0x0200 #define MADERA_FLL2_LOCK_EINT1_SHIFT 9 #define MADERA_FLL2_LOCK_EINT1_WIDTH 1 #define MADERA_FLL1_LOCK_EINT1 0x0100 #define MADERA_FLL1_LOCK_EINT1_MASK 0x0100 #define MADERA_FLL1_LOCK_EINT1_SHIFT 8 #define MADERA_FLL1_LOCK_EINT1_WIDTH 1 /* (0x1805) IRQ1_Status_6 */ #define MADERA_MICDET2_EINT1 0x0200 #define MADERA_MICDET2_EINT1_MASK 0x0200 #define MADERA_MICDET2_EINT1_SHIFT 9 #define MADERA_MICDET2_EINT1_WIDTH 1 #define MADERA_MICDET1_EINT1 0x0100 #define MADERA_MICDET1_EINT1_MASK 0x0100 #define MADERA_MICDET1_EINT1_SHIFT 8 #define MADERA_MICDET1_EINT1_WIDTH 1 #define MADERA_HPDET_EINT1 0x0001 #define MADERA_HPDET_EINT1_MASK 0x0001 #define MADERA_HPDET_EINT1_SHIFT 0 #define MADERA_HPDET_EINT1_WIDTH 1 /* (0x1806) IRQ1_Status_7 */ #define MADERA_MICD_CLAMP_FALL_EINT1 0x0020 #define MADERA_MICD_CLAMP_FALL_EINT1_MASK 0x0020 #define MADERA_MICD_CLAMP_FALL_EINT1_SHIFT 5 #define MADERA_MICD_CLAMP_FALL_EINT1_WIDTH 1 #define MADERA_MICD_CLAMP_RISE_EINT1 0x0010 #define MADERA_MICD_CLAMP_RISE_EINT1_MASK 0x0010 #define MADERA_MICD_CLAMP_RISE_EINT1_SHIFT 4 #define MADERA_MICD_CLAMP_RISE_EINT1_WIDTH 1 #define MADERA_JD2_FALL_EINT1 0x0008 #define MADERA_JD2_FALL_EINT1_MASK 0x0008 #define MADERA_JD2_FALL_EINT1_SHIFT 3 #define MADERA_JD2_FALL_EINT1_WIDTH 1 #define MADERA_JD2_RISE_EINT1 0x0004 #define MADERA_JD2_RISE_EINT1_MASK 0x0004 #define MADERA_JD2_RISE_EINT1_SHIFT 2 #define MADERA_JD2_RISE_EINT1_WIDTH 1 #define MADERA_JD1_FALL_EINT1 0x0002 #define MADERA_JD1_FALL_EINT1_MASK 0x0002 #define MADERA_JD1_FALL_EINT1_SHIFT 1 #define MADERA_JD1_FALL_EINT1_WIDTH 1 #define MADERA_JD1_RISE_EINT1 0x0001 #define MADERA_JD1_RISE_EINT1_MASK 0x0001 #define MADERA_JD1_RISE_EINT1_SHIFT 0 #define MADERA_JD1_RISE_EINT1_WIDTH 1 /* (0x1808) IRQ1_Status_9 */ #define MADERA_ASRC2_IN2_LOCK_EINT1 0x0800 #define MADERA_ASRC2_IN2_LOCK_EINT1_MASK 0x0800 #define MADERA_ASRC2_IN2_LOCK_EINT1_SHIFT 11 #define MADERA_ASRC2_IN2_LOCK_EINT1_WIDTH 1 #define MADERA_ASRC2_IN1_LOCK_EINT1 0x0400 #define MADERA_ASRC2_IN1_LOCK_EINT1_MASK 0x0400 #define MADERA_ASRC2_IN1_LOCK_EINT1_SHIFT 10 #define MADERA_ASRC2_IN1_LOCK_EINT1_WIDTH 1 #define MADERA_ASRC1_IN2_LOCK_EINT1 0x0200 #define MADERA_ASRC1_IN2_LOCK_EINT1_MASK 0x0200 #define MADERA_ASRC1_IN2_LOCK_EINT1_SHIFT 9 #define MADERA_ASRC1_IN2_LOCK_EINT1_WIDTH 1 #define MADERA_ASRC1_IN1_LOCK_EINT1 0x0100 #define MADERA_ASRC1_IN1_LOCK_EINT1_MASK 0x0100 #define MADERA_ASRC1_IN1_LOCK_EINT1_SHIFT 8 #define MADERA_ASRC1_IN1_LOCK_EINT1_WIDTH 1 #define MADERA_DRC2_SIG_DET_EINT1 0x0002 #define MADERA_DRC2_SIG_DET_EINT1_MASK 0x0002 #define MADERA_DRC2_SIG_DET_EINT1_SHIFT 1 #define MADERA_DRC2_SIG_DET_EINT1_WIDTH 1 #define MADERA_DRC1_SIG_DET_EINT1 0x0001 #define MADERA_DRC1_SIG_DET_EINT1_MASK 0x0001 #define MADERA_DRC1_SIG_DET_EINT1_SHIFT 0 #define MADERA_DRC1_SIG_DET_EINT1_WIDTH 1 /* (0x180A) IRQ1_Status_11 */ #define MADERA_DSP_IRQ16_EINT1 0x8000 #define MADERA_DSP_IRQ16_EINT1_MASK 0x8000 #define MADERA_DSP_IRQ16_EINT1_SHIFT 15 #define MADERA_DSP_IRQ16_EINT1_WIDTH 1 #define MADERA_DSP_IRQ15_EINT1 0x4000 #define MADERA_DSP_IRQ15_EINT1_MASK 0x4000 #define MADERA_DSP_IRQ15_EINT1_SHIFT 14 #define MADERA_DSP_IRQ15_EINT1_WIDTH 1 #define MADERA_DSP_IRQ14_EINT1 0x2000 #define MADERA_DSP_IRQ14_EINT1_MASK 0x2000 #define MADERA_DSP_IRQ14_EINT1_SHIFT 13 #define MADERA_DSP_IRQ14_EINT1_WIDTH 1 #define MADERA_DSP_IRQ13_EINT1 0x1000 #define MADERA_DSP_IRQ13_EINT1_MASK 0x1000 #define MADERA_DSP_IRQ13_EINT1_SHIFT 12 #define MADERA_DSP_IRQ13_EINT1_WIDTH 1 #define MADERA_DSP_IRQ12_EINT1 0x0800 #define MADERA_DSP_IRQ12_EINT1_MASK 0x0800 #define MADERA_DSP_IRQ12_EINT1_SHIFT 11 #define MADERA_DSP_IRQ12_EINT1_WIDTH 1 #define MADERA_DSP_IRQ11_EINT1 0x0400 #define MADERA_DSP_IRQ11_EINT1_MASK 0x0400 #define MADERA_DSP_IRQ11_EINT1_SHIFT 10 #define MADERA_DSP_IRQ11_EINT1_WIDTH 1 #define MADERA_DSP_IRQ10_EINT1 0x0200 #define MADERA_DSP_IRQ10_EINT1_MASK 0x0200 #define MADERA_DSP_IRQ10_EINT1_SHIFT 9 #define MADERA_DSP_IRQ10_EINT1_WIDTH 1 #define MADERA_DSP_IRQ9_EINT1 0x0100 #define MADERA_DSP_IRQ9_EINT1_MASK 0x0100 #define MADERA_DSP_IRQ9_EINT1_SHIFT 8 #define MADERA_DSP_IRQ9_EINT1_WIDTH 1 #define MADERA_DSP_IRQ8_EINT1 0x0080 #define MADERA_DSP_IRQ8_EINT1_MASK 0x0080 #define MADERA_DSP_IRQ8_EINT1_SHIFT 7 #define MADERA_DSP_IRQ8_EINT1_WIDTH 1 #define MADERA_DSP_IRQ7_EINT1 0x0040 #define MADERA_DSP_IRQ7_EINT1_MASK 0x0040 #define MADERA_DSP_IRQ7_EINT1_SHIFT 6 #define MADERA_DSP_IRQ7_EINT1_WIDTH 1 #define MADERA_DSP_IRQ6_EINT1 0x0020 #define MADERA_DSP_IRQ6_EINT1_MASK 0x0020 #define MADERA_DSP_IRQ6_EINT1_SHIFT 5 #define MADERA_DSP_IRQ6_EINT1_WIDTH 1 #define MADERA_DSP_IRQ5_EINT1 0x0010 #define MADERA_DSP_IRQ5_EINT1_MASK 0x0010 #define MADERA_DSP_IRQ5_EINT1_SHIFT 4 #define MADERA_DSP_IRQ5_EINT1_WIDTH 1 #define MADERA_DSP_IRQ4_EINT1 0x0008 #define MADERA_DSP_IRQ4_EINT1_MASK 0x0008 #define MADERA_DSP_IRQ4_EINT1_SHIFT 3 #define MADERA_DSP_IRQ4_EINT1_WIDTH 1 #define MADERA_DSP_IRQ3_EINT1 0x0004 #define MADERA_DSP_IRQ3_EINT1_MASK 0x0004 #define MADERA_DSP_IRQ3_EINT1_SHIFT 2 #define MADERA_DSP_IRQ3_EINT1_WIDTH 1 #define MADERA_DSP_IRQ2_EINT1 0x0002 #define MADERA_DSP_IRQ2_EINT1_MASK 0x0002 #define MADERA_DSP_IRQ2_EINT1_SHIFT 1 #define MADERA_DSP_IRQ2_EINT1_WIDTH 1 #define MADERA_DSP_IRQ1_EINT1 0x0001 #define MADERA_DSP_IRQ1_EINT1_MASK 0x0001 #define MADERA_DSP_IRQ1_EINT1_SHIFT 0 #define MADERA_DSP_IRQ1_EINT1_WIDTH 1 /* (0x180B) IRQ1_Status_12 */ #define MADERA_SPKOUTR_SC_EINT1 0x0080 #define MADERA_SPKOUTR_SC_EINT1_MASK 0x0080 #define MADERA_SPKOUTR_SC_EINT1_SHIFT 7 #define MADERA_SPKOUTR_SC_EINT1_WIDTH 1 #define MADERA_SPKOUTL_SC_EINT1 0x0040 #define MADERA_SPKOUTL_SC_EINT1_MASK 0x0040 #define MADERA_SPKOUTL_SC_EINT1_SHIFT 6 #define MADERA_SPKOUTL_SC_EINT1_WIDTH 1 #define MADERA_HP3R_SC_EINT1 0x0020 #define MADERA_HP3R_SC_EINT1_MASK 0x0020 #define MADERA_HP3R_SC_EINT1_SHIFT 5 #define MADERA_HP3R_SC_EINT1_WIDTH 1 #define MADERA_HP3L_SC_EINT1 0x0010 #define MADERA_HP3L_SC_EINT1_MASK 0x0010 #define MADERA_HP3L_SC_EINT1_SHIFT 4 #define MADERA_HP3L_SC_EINT1_WIDTH 1 #define MADERA_HP2R_SC_EINT1 0x0008 #define MADERA_HP2R_SC_EINT1_MASK 0x0008 #define MADERA_HP2R_SC_EINT1_SHIFT 3 #define MADERA_HP2R_SC_EINT1_WIDTH 1 #define MADERA_HP2L_SC_EINT1 0x0004 #define MADERA_HP2L_SC_EINT1_MASK 0x0004 #define MADERA_HP2L_SC_EINT1_SHIFT 2 #define MADERA_HP2L_SC_EINT1_WIDTH 1 #define MADERA_HP1R_SC_EINT1 0x0002 #define MADERA_HP1R_SC_EINT1_MASK 0x0002 #define MADERA_HP1R_SC_EINT1_SHIFT 1 #define MADERA_HP1R_SC_EINT1_WIDTH 1 #define MADERA_HP1L_SC_EINT1 0x0001 #define MADERA_HP1L_SC_EINT1_MASK 0x0001 #define MADERA_HP1L_SC_EINT1_SHIFT 0 #define MADERA_HP1L_SC_EINT1_WIDTH 1 /* (0x180E) IRQ1_Status_15 */ #define MADERA_SPK_OVERHEAT_WARN_EINT1 0x0004 #define MADERA_SPK_OVERHEAT_WARN_EINT1_MASK 0x0004 #define MADERA_SPK_OVERHEAT_WARN_EINT1_SHIFT 2 #define MADERA_SPK_OVERHEAT_WARN_EINT1_WIDTH 1 #define MADERA_SPK_OVERHEAT_EINT1 0x0002 #define MADERA_SPK_OVERHEAT_EINT1_MASK 0x0002 #define MADERA_SPK_OVERHEAT_EINT1_SHIFT 1 #define MADERA_SPK_OVERHEAT_EINT1_WIDTH 1 #define MADERA_SPK_SHUTDOWN_EINT1 0x0001 #define MADERA_SPK_SHUTDOWN_EINT1_MASK 0x0001 #define MADERA_SPK_SHUTDOWN_EINT1_SHIFT 0 #define MADERA_SPK_SHUTDOWN_EINT1_WIDTH 1 /* (0x1820) - IRQ1 Status 33 */ #define MADERA_DSP7_BUS_ERR_EINT1 0x0040 #define MADERA_DSP7_BUS_ERR_EINT1_MASK 0x0040 #define MADERA_DSP7_BUS_ERR_EINT1_SHIFT 6 #define MADERA_DSP7_BUS_ERR_EINT1_WIDTH 1 #define MADERA_DSP6_BUS_ERR_EINT1 0x0020 #define MADERA_DSP6_BUS_ERR_EINT1_MASK 0x0020 #define MADERA_DSP6_BUS_ERR_EINT1_SHIFT 5 #define MADERA_DSP6_BUS_ERR_EINT1_WIDTH 1 #define MADERA_DSP5_BUS_ERR_EINT1 0x0010 #define MADERA_DSP5_BUS_ERR_EINT1_MASK 0x0010 #define MADERA_DSP5_BUS_ERR_EINT1_SHIFT 4 #define MADERA_DSP5_BUS_ERR_EINT1_WIDTH 1 #define MADERA_DSP4_BUS_ERR_EINT1 0x0008 #define MADERA_DSP4_BUS_ERR_EINT1_MASK 0x0008 #define MADERA_DSP4_BUS_ERR_EINT1_SHIFT 3 #define MADERA_DSP4_BUS_ERR_EINT1_WIDTH 1 #define MADERA_DSP3_BUS_ERR_EINT1 0x0004 #define MADERA_DSP3_BUS_ERR_EINT1_MASK 0x0004 #define MADERA_DSP3_BUS_ERR_EINT1_SHIFT 2 #define MADERA_DSP3_BUS_ERR_EINT1_WIDTH 1 #define MADERA_DSP2_BUS_ERR_EINT1 0x0002 #define MADERA_DSP2_BUS_ERR_EINT1_MASK 0x0002 #define MADERA_DSP2_BUS_ERR_EINT1_SHIFT 1 #define MADERA_DSP2_BUS_ERR_EINT1_WIDTH 1 #define MADERA_DSP1_BUS_ERR_EINT1 0x0001 #define MADERA_DSP1_BUS_ERR_EINT1_MASK 0x0001 #define MADERA_DSP1_BUS_ERR_EINT1_SHIFT 0 #define MADERA_DSP1_BUS_ERR_EINT1_WIDTH 1 /* (0x1845) IRQ1_Mask_6 */ #define MADERA_IM_MICDET2_EINT1 0x0200 #define MADERA_IM_MICDET2_EINT1_MASK 0x0200 #define MADERA_IM_MICDET2_EINT1_SHIFT 9 #define MADERA_IM_MICDET2_EINT1_WIDTH 1 #define MADERA_IM_MICDET1_EINT1 0x0100 #define MADERA_IM_MICDET1_EINT1_MASK 0x0100 #define MADERA_IM_MICDET1_EINT1_SHIFT 8 #define MADERA_IM_MICDET1_EINT1_WIDTH 1 #define MADERA_IM_HPDET_EINT1 0x0001 #define MADERA_IM_HPDET_EINT1_MASK 0x0001 #define MADERA_IM_HPDET_EINT1_SHIFT 0 #define MADERA_IM_HPDET_EINT1_WIDTH 1 /* (0x184E) IRQ1_Mask_15 */ #define MADERA_IM_SPK_OVERHEAT_WARN_EINT1 0x0004 #define MADERA_IM_SPK_OVERHEAT_WARN_EINT1_MASK 0x0004 #define MADERA_IM_SPK_OVERHEAT_WARN_EINT1_SHIFT 2 #define MADERA_IM_SPK_OVERHEAT_WARN_EINT1_WIDTH 1 #define MADERA_IM_SPK_OVERHEAT_EINT1 0x0002 #define MADERA_IM_SPK_OVERHEAT_EINT1_MASK 0x0002 #define MADERA_IM_SPK_OVERHEAT_EINT1_SHIFT 1 #define MADERA_IM_SPK_OVERHEAT_EINT1_WIDTH 1 #define MADERA_IM_SPK_SHUTDOWN_EINT1 0x0001 #define MADERA_IM_SPK_SHUTDOWN_EINT1_MASK 0x0001 #define MADERA_IM_SPK_SHUTDOWN_EINT1_SHIFT 0 #define MADERA_IM_SPK_SHUTDOWN_EINT1_WIDTH 1 /* (0x1880) - IRQ1 Raw Status 1 */ #define MADERA_CTRLIF_ERR_STS1 0x1000 #define MADERA_CTRLIF_ERR_STS1_MASK 0x1000 #define MADERA_CTRLIF_ERR_STS1_SHIFT 12 #define MADERA_CTRLIF_ERR_STS1_WIDTH 1 #define MADERA_SYSCLK_FAIL_STS1 0x0200 #define MADERA_SYSCLK_FAIL_STS1_MASK 0x0200 #define MADERA_SYSCLK_FAIL_STS1_SHIFT 9 #define MADERA_SYSCLK_FAIL_STS1_WIDTH 1 #define MADERA_CLOCK_DETECT_STS1 0x0100 #define MADERA_CLOCK_DETECT_STS1_MASK 0x0100 #define MADERA_CLOCK_DETECT_STS1_SHIFT 8 #define MADERA_CLOCK_DETECT_STS1_WIDTH 1 #define MADERA_BOOT_DONE_STS1 0x0080 #define MADERA_BOOT_DONE_STS1_MASK 0x0080 #define MADERA_BOOT_DONE_STS1_SHIFT 7 #define MADERA_BOOT_DONE_STS1_WIDTH 1 /* (0x1881) - IRQ1 Raw Status 2 */ #define MADERA_FLL3_LOCK_STS1 0x0400 #define MADERA_FLL3_LOCK_STS1_MASK 0x0400 #define MADERA_FLL3_LOCK_STS1_SHIFT 10 #define MADERA_FLL3_LOCK_STS1_WIDTH 1 #define MADERA_FLL2_LOCK_STS1 0x0200 #define MADERA_FLL2_LOCK_STS1_MASK 0x0200 #define MADERA_FLL2_LOCK_STS1_SHIFT 9 #define MADERA_FLL2_LOCK_STS1_WIDTH 1 #define MADERA_FLL1_LOCK_STS1 0x0100 #define MADERA_FLL1_LOCK_STS1_MASK 0x0100 #define MADERA_FLL1_LOCK_STS1_SHIFT 8 #define MADERA_FLL1_LOCK_STS1_WIDTH 1 /* (0x1886) - IRQ1 Raw Status 7 */ #define MADERA_MICD_CLAMP_FALL_STS1 0x0020 #define MADERA_MICD_CLAMP_FALL_STS1_MASK 0x0020 #define MADERA_MICD_CLAMP_FALL_STS1_SHIFT 5 #define MADERA_MICD_CLAMP_FALL_STS1_WIDTH 1 #define MADERA_MICD_CLAMP_RISE_STS1 0x0010 #define MADERA_MICD_CLAMP_RISE_STS1_MASK 0x0010 #define MADERA_MICD_CLAMP_RISE_STS1_SHIFT 4 #define MADERA_MICD_CLAMP_RISE_STS1_WIDTH 1 #define MADERA_JD2_FALL_STS1 0x0008 #define MADERA_JD2_FALL_STS1_MASK 0x0008 #define MADERA_JD2_FALL_STS1_SHIFT 3 #define MADERA_JD2_FALL_STS1_WIDTH 1 #define MADERA_JD2_RISE_STS1 0x0004 #define MADERA_JD2_RISE_STS1_MASK 0x0004 #define MADERA_JD2_RISE_STS1_SHIFT 2 #define MADERA_JD2_RISE_STS1_WIDTH 1 #define MADERA_JD1_FALL_STS1 0x0002 #define MADERA_JD1_FALL_STS1_MASK 0x0002 #define MADERA_JD1_FALL_STS1_SHIFT 1 #define MADERA_JD1_FALL_STS1_WIDTH 1 #define MADERA_JD1_RISE_STS1 0x0001 #define MADERA_JD1_RISE_STS1_MASK 0x0001 #define MADERA_JD1_RISE_STS1_SHIFT 0 #define MADERA_JD1_RISE_STS1_WIDTH 1 /* (0x188E) - IRQ1 Raw Status 15 */ #define MADERA_SPK_OVERHEAT_WARN_STS1 0x0004 #define MADERA_SPK_OVERHEAT_WARN_STS1_MASK 0x0004 #define MADERA_SPK_OVERHEAT_WARN_STS1_SHIFT 2 #define MADERA_SPK_OVERHEAT_WARN_STS1_WIDTH 1 #define MADERA_SPK_OVERHEAT_STS1 0x0002 #define MADERA_SPK_OVERHEAT_STS1_MASK 0x0002 #define MADERA_SPK_OVERHEAT_STS1_SHIFT 1 #define MADERA_SPK_OVERHEAT_STS1_WIDTH 1 #define MADERA_SPK_SHUTDOWN_STS1 0x0001 #define MADERA_SPK_SHUTDOWN_STS1_MASK 0x0001 #define MADERA_SPK_SHUTDOWN_STS1_SHIFT 0 #define MADERA_SPK_SHUTDOWN_STS1_WIDTH 1 /* (0x1A06) Interrupt_Debounce_7 */ #define MADERA_MICD_CLAMP_DB 0x0010 #define MADERA_MICD_CLAMP_DB_MASK 0x0010 #define MADERA_MICD_CLAMP_DB_SHIFT 4 #define MADERA_MICD_CLAMP_DB_WIDTH 1 #define MADERA_JD2_DB 0x0004 #define MADERA_JD2_DB_MASK 0x0004 #define MADERA_JD2_DB_SHIFT 2 #define MADERA_JD2_DB_WIDTH 1 #define MADERA_JD1_DB 0x0001 #define MADERA_JD1_DB_MASK 0x0001 #define MADERA_JD1_DB_SHIFT 0 #define MADERA_JD1_DB_WIDTH 1 /* (0x1A0E) Interrupt_Debounce_15 */ #define MADERA_SPK_OVERHEAT_WARN_DB 0x0004 #define MADERA_SPK_OVERHEAT_WARN_DB_MASK 0x0004 #define MADERA_SPK_OVERHEAT_WARN_DB_SHIFT 2 #define MADERA_SPK_OVERHEAT_WARN_DB_WIDTH 1 #define MADERA_SPK_OVERHEAT_DB 0x0002 #define MADERA_SPK_OVERHEAT_DB_MASK 0x0002 #define MADERA_SPK_OVERHEAT_DB_SHIFT 1 #define MADERA_SPK_OVERHEAT_DB_WIDTH 1 /* (0x1A80) IRQ1_CTRL */ #define MADERA_IM_IRQ1 0x0800 #define MADERA_IM_IRQ1_MASK 0x0800 #define MADERA_IM_IRQ1_SHIFT 11 #define MADERA_IM_IRQ1_WIDTH 1 #define MADERA_IRQ_POL 0x0400 #define MADERA_IRQ_POL_MASK 0x0400 #define MADERA_IRQ_POL_SHIFT 10 #define MADERA_IRQ_POL_WIDTH 1 /* (0x20004) OTP_HPDET_Cal_1 */ #define MADERA_OTP_HPDET_CALIB_OFFSET_11 0xFF000000 #define MADERA_OTP_HPDET_CALIB_OFFSET_11_MASK 0xFF000000 #define MADERA_OTP_HPDET_CALIB_OFFSET_11_SHIFT 24 #define MADERA_OTP_HPDET_CALIB_OFFSET_11_WIDTH 8 #define MADERA_OTP_HPDET_CALIB_OFFSET_10 0x00FF0000 #define MADERA_OTP_HPDET_CALIB_OFFSET_10_MASK 0x00FF0000 #define MADERA_OTP_HPDET_CALIB_OFFSET_10_SHIFT 16 #define MADERA_OTP_HPDET_CALIB_OFFSET_10_WIDTH 8 #define MADERA_OTP_HPDET_CALIB_OFFSET_01 0x0000FF00 #define MADERA_OTP_HPDET_CALIB_OFFSET_01_MASK 0x0000FF00 #define MADERA_OTP_HPDET_CALIB_OFFSET_01_SHIFT 8 #define MADERA_OTP_HPDET_CALIB_OFFSET_01_WIDTH 8 #define MADERA_OTP_HPDET_CALIB_OFFSET_00 0x000000FF #define MADERA_OTP_HPDET_CALIB_OFFSET_00_MASK 0x000000FF #define MADERA_OTP_HPDET_CALIB_OFFSET_00_SHIFT 0 #define MADERA_OTP_HPDET_CALIB_OFFSET_00_WIDTH 8 /* (0x20006) OTP_HPDET_Cal_2 */ #define MADERA_OTP_HPDET_GRADIENT_1X 0x0000FF00 #define MADERA_OTP_HPDET_GRADIENT_1X_MASK 0x0000FF00 #define MADERA_OTP_HPDET_GRADIENT_1X_SHIFT 8 #define MADERA_OTP_HPDET_GRADIENT_1X_WIDTH 8 #define MADERA_OTP_HPDET_GRADIENT_0X 0x000000FF #define MADERA_OTP_HPDET_GRADIENT_0X_MASK 0x000000FF #define MADERA_OTP_HPDET_GRADIENT_0X_SHIFT 0 #define MADERA_OTP_HPDET_GRADIENT_0X_WIDTH 8 #endif mfd/madera/core.h 0000644 00000014315 14722070374 0007656 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * MFD internals for Cirrus Logic Madera codecs * * Copyright (C) 2015-2018 Cirrus Logic */ #ifndef MADERA_CORE_H #define MADERA_CORE_H #include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/mfd/madera/pdata.h> #include <linux/mutex.h> #include <linux/notifier.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> enum madera_type { /* 0 is reserved for indicating failure to identify */ CS47L35 = 1, CS47L85 = 2, CS47L90 = 3, CS47L91 = 4, CS47L92 = 5, CS47L93 = 6, WM1840 = 7, CS47L15 = 8, CS42L92 = 9, }; #define MADERA_MAX_CORE_SUPPLIES 2 #define MADERA_MAX_GPIOS 40 #define CS47L15_NUM_GPIOS 15 #define CS47L35_NUM_GPIOS 16 #define CS47L85_NUM_GPIOS 40 #define CS47L90_NUM_GPIOS 38 #define CS47L92_NUM_GPIOS 16 #define MADERA_MAX_MICBIAS 4 #define MADERA_MAX_HP_OUTPUT 3 /* Notifier events */ #define MADERA_NOTIFY_VOICE_TRIGGER 0x1 #define MADERA_NOTIFY_HPDET 0x2 #define MADERA_NOTIFY_MICDET 0x4 /* GPIO Function Definitions */ #define MADERA_GP_FN_ALTERNATE 0x00 #define MADERA_GP_FN_GPIO 0x01 #define MADERA_GP_FN_DSP_GPIO 0x02 #define MADERA_GP_FN_IRQ1 0x03 #define MADERA_GP_FN_IRQ2 0x04 #define MADERA_GP_FN_FLL1_CLOCK 0x10 #define MADERA_GP_FN_FLL2_CLOCK 0x11 #define MADERA_GP_FN_FLL3_CLOCK 0x12 #define MADERA_GP_FN_FLLAO_CLOCK 0x13 #define MADERA_GP_FN_FLL1_LOCK 0x18 #define MADERA_GP_FN_FLL2_LOCK 0x19 #define MADERA_GP_FN_FLL3_LOCK 0x1A #define MADERA_GP_FN_FLLAO_LOCK 0x1B #define MADERA_GP_FN_OPCLK_OUT 0x40 #define MADERA_GP_FN_OPCLK_ASYNC_OUT 0x41 #define MADERA_GP_FN_PWM1 0x48 #define MADERA_GP_FN_PWM2 0x49 #define MADERA_GP_FN_SPDIF_OUT 0x4C #define MADERA_GP_FN_HEADPHONE_DET 0x50 #define MADERA_GP_FN_MIC_DET 0x58 #define MADERA_GP_FN_DRC1_SIGNAL_DETECT 0x80 #define MADERA_GP_FN_DRC2_SIGNAL_DETECT 0x81 #define MADERA_GP_FN_ASRC1_IN1_LOCK 0x88 #define MADERA_GP_FN_ASRC1_IN2_LOCK 0x89 #define MADERA_GP_FN_ASRC2_IN1_LOCK 0x8A #define MADERA_GP_FN_ASRC2_IN2_LOCK 0x8B #define MADERA_GP_FN_DSP_IRQ1 0xA0 #define MADERA_GP_FN_DSP_IRQ2 0xA1 #define MADERA_GP_FN_DSP_IRQ3 0xA2 #define MADERA_GP_FN_DSP_IRQ4 0xA3 #define MADERA_GP_FN_DSP_IRQ5 0xA4 #define MADERA_GP_FN_DSP_IRQ6 0xA5 #define MADERA_GP_FN_DSP_IRQ7 0xA6 #define MADERA_GP_FN_DSP_IRQ8 0xA7 #define MADERA_GP_FN_DSP_IRQ9 0xA8 #define MADERA_GP_FN_DSP_IRQ10 0xA9 #define MADERA_GP_FN_DSP_IRQ11 0xAA #define MADERA_GP_FN_DSP_IRQ12 0xAB #define MADERA_GP_FN_DSP_IRQ13 0xAC #define MADERA_GP_FN_DSP_IRQ14 0xAD #define MADERA_GP_FN_DSP_IRQ15 0xAE #define MADERA_GP_FN_DSP_IRQ16 0xAF #define MADERA_GP_FN_HPOUT1L_SC 0xB0 #define MADERA_GP_FN_HPOUT1R_SC 0xB1 #define MADERA_GP_FN_HPOUT2L_SC 0xB2 #define MADERA_GP_FN_HPOUT2R_SC 0xB3 #define MADERA_GP_FN_HPOUT3L_SC 0xB4 #define MADERA_GP_FN_HPOUT4R_SC 0xB5 #define MADERA_GP_FN_SPKOUTL_SC 0xB6 #define MADERA_GP_FN_SPKOUTR_SC 0xB7 #define MADERA_GP_FN_HPOUT1L_ENA 0xC0 #define MADERA_GP_FN_HPOUT1R_ENA 0xC1 #define MADERA_GP_FN_HPOUT2L_ENA 0xC2 #define MADERA_GP_FN_HPOUT2R_ENA 0xC3 #define MADERA_GP_FN_HPOUT3L_ENA 0xC4 #define MADERA_GP_FN_HPOUT4R_ENA 0xC5 #define MADERA_GP_FN_SPKOUTL_ENA 0xC6 #define MADERA_GP_FN_SPKOUTR_ENA 0xC7 #define MADERA_GP_FN_HPOUT1L_DIS 0xD0 #define MADERA_GP_FN_HPOUT1R_DIS 0xD1 #define MADERA_GP_FN_HPOUT2L_DIS 0xD2 #define MADERA_GP_FN_HPOUT2R_DIS 0xD3 #define MADERA_GP_FN_HPOUT3L_DIS 0xD4 #define MADERA_GP_FN_HPOUT4R_DIS 0xD5 #define MADERA_GP_FN_SPKOUTL_DIS 0xD6 #define MADERA_GP_FN_SPKOUTR_DIS 0xD7 #define MADERA_GP_FN_SPK_SHUTDOWN 0xE0 #define MADERA_GP_FN_SPK_OVH_SHUTDOWN 0xE1 #define MADERA_GP_FN_SPK_OVH_WARN 0xE2 #define MADERA_GP_FN_TIMER1_STATUS 0x140 #define MADERA_GP_FN_TIMER2_STATUS 0x141 #define MADERA_GP_FN_TIMER3_STATUS 0x142 #define MADERA_GP_FN_TIMER4_STATUS 0x143 #define MADERA_GP_FN_TIMER5_STATUS 0x144 #define MADERA_GP_FN_TIMER6_STATUS 0x145 #define MADERA_GP_FN_TIMER7_STATUS 0x146 #define MADERA_GP_FN_TIMER8_STATUS 0x147 #define MADERA_GP_FN_EVENTLOG1_FIFO_STS 0x150 #define MADERA_GP_FN_EVENTLOG2_FIFO_STS 0x151 #define MADERA_GP_FN_EVENTLOG3_FIFO_STS 0x152 #define MADERA_GP_FN_EVENTLOG4_FIFO_STS 0x153 #define MADERA_GP_FN_EVENTLOG5_FIFO_STS 0x154 #define MADERA_GP_FN_EVENTLOG6_FIFO_STS 0x155 #define MADERA_GP_FN_EVENTLOG7_FIFO_STS 0x156 #define MADERA_GP_FN_EVENTLOG8_FIFO_STS 0x157 struct snd_soc_dapm_context; /* * struct madera - internal data shared by the set of Madera drivers * * This should not be used by anything except child drivers of the Madera MFD * * @regmap: pointer to the regmap instance for 16-bit registers * @regmap_32bit: pointer to the regmap instance for 32-bit registers * @dev: pointer to the MFD device * @type: type of codec * @rev: silicon revision * @type_name: display name of this codec * @num_core_supplies: number of core supply regulators * @core_supplies: list of core supplies that are always required * @dcvdd: pointer to DCVDD regulator * @internal_dcvdd: true if DCVDD is supplied from the internal LDO1 * @pdata: our pdata * @irq_dev: the irqchip child driver device * @irq_data: pointer to irqchip data for the child irqchip driver * @irq: host irq number from SPI or I2C configuration * @out_clamp: indicates output clamp state for each analogue output * @out_shorted: indicates short circuit state for each analogue output * @hp_ena: bitflags of enable state for the headphone outputs * @num_micbias: number of MICBIAS outputs * @num_childbias: number of child biases for each MICBIAS * @dapm: pointer to codec driver DAPM context * @notifier: notifier for signalling events to ASoC machine driver */ struct madera { struct regmap *regmap; struct regmap *regmap_32bit; struct device *dev; enum madera_type type; unsigned int rev; const char *type_name; int num_core_supplies; struct regulator_bulk_data core_supplies[MADERA_MAX_CORE_SUPPLIES]; struct regulator *dcvdd; bool internal_dcvdd; struct madera_pdata pdata; struct device *irq_dev; struct regmap_irq_chip_data *irq_data; int irq; unsigned int num_micbias; unsigned int num_childbias[MADERA_MAX_MICBIAS]; struct snd_soc_dapm_context *dapm; struct mutex dapm_ptr_lock; unsigned int hp_ena; bool out_clamp[MADERA_MAX_HP_OUTPUT]; bool out_shorted[MADERA_MAX_HP_OUTPUT]; struct blocking_notifier_head notifier; }; #endif mfd/tmio.h 0000644 00000007250 14722070374 0006445 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef MFD_TMIO_H #define MFD_TMIO_H #include <linux/device.h> #include <linux/fb.h> #include <linux/io.h> #include <linux/jiffies.h> #include <linux/mmc/card.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #define tmio_ioread8(addr) readb(addr) #define tmio_ioread16(addr) readw(addr) #define tmio_ioread16_rep(r, b, l) readsw(r, b, l) #define tmio_ioread32(addr) \ (((u32)readw((addr))) | (((u32)readw((addr) + 2)) << 16)) #define tmio_iowrite8(val, addr) writeb((val), (addr)) #define tmio_iowrite16(val, addr) writew((val), (addr)) #define tmio_iowrite16_rep(r, b, l) writesw(r, b, l) #define tmio_iowrite32(val, addr) \ do { \ writew((val), (addr)); \ writew((val) >> 16, (addr) + 2); \ } while (0) #define sd_config_write8(base, shift, reg, val) \ tmio_iowrite8((val), (base) + ((reg) << (shift))) #define sd_config_write16(base, shift, reg, val) \ tmio_iowrite16((val), (base) + ((reg) << (shift))) #define sd_config_write32(base, shift, reg, val) \ do { \ tmio_iowrite16((val), (base) + ((reg) << (shift))); \ tmio_iowrite16((val) >> 16, (base) + ((reg + 2) << (shift))); \ } while (0) /* tmio MMC platform flags */ /* * Some controllers can support a 2-byte block size when the bus width * is configured in 4-bit mode. */ #define TMIO_MMC_BLKSZ_2BYTES BIT(1) /* * Some controllers can support SDIO IRQ signalling. */ #define TMIO_MMC_SDIO_IRQ BIT(2) /* Some features are only available or tested on R-Car Gen2 or later */ #define TMIO_MMC_MIN_RCAR2 BIT(3) /* * Some controllers require waiting for the SD bus to become * idle before writing to some registers. */ #define TMIO_MMC_HAS_IDLE_WAIT BIT(4) /* BIT(5) is unused */ /* * Some controllers have CMD12 automatically * issue/non-issue register */ #define TMIO_MMC_HAVE_CMD12_CTRL BIT(7) /* Controller has some SDIO status bits which must be 1 */ #define TMIO_MMC_SDIO_STATUS_SETBITS BIT(8) /* * Some controllers have a 32-bit wide data port register */ #define TMIO_MMC_32BIT_DATA_PORT BIT(9) /* * Some controllers allows to set SDx actual clock */ #define TMIO_MMC_CLK_ACTUAL BIT(10) /* Some controllers have a CBSY bit */ #define TMIO_MMC_HAVE_CBSY BIT(11) /* Some controllers that support HS400 use 4 taps while others use 8. */ #define TMIO_MMC_HAVE_4TAP_HS400 BIT(13) int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base); int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base); void tmio_core_mmc_pwr(void __iomem *cnf, int shift, int state); void tmio_core_mmc_clk_div(void __iomem *cnf, int shift, int state); struct dma_chan; /* * data for the MMC controller */ struct tmio_mmc_data { void *chan_priv_tx; void *chan_priv_rx; unsigned int hclk; unsigned long capabilities; unsigned long capabilities2; unsigned long flags; u32 ocr_mask; /* available voltages */ int alignment_shift; dma_addr_t dma_rx_offset; unsigned int max_blk_count; unsigned short max_segs; void (*set_pwr)(struct platform_device *host, int state); void (*set_clk_div)(struct platform_device *host, int state); }; /* * data for the NAND controller */ struct tmio_nand_data { struct nand_bbt_descr *badblock_pattern; struct mtd_partition *partition; unsigned int num_partitions; const char *const *part_parsers; }; #define FBIO_TMIO_ACC_WRITE 0x7C639300 #define FBIO_TMIO_ACC_SYNC 0x7C639301 struct tmio_fb_data { int (*lcd_set_power)(struct platform_device *fb_dev, bool on); int (*lcd_mode)(struct platform_device *fb_dev, const struct fb_videomode *mode); int num_modes; struct fb_videomode *modes; /* in mm: size of screen */ int height; int width; }; #endif mfd/rc5t583.h 0000644 00000022435 14722070374 0006614 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Core driver interface to access RICOH_RC5T583 power management chip. * * Copyright (c) 2011-2012, NVIDIA CORPORATION. All rights reserved. * Author: Laxman dewangan <ldewangan@nvidia.com> * * Based on code * Copyright (C) 2011 RICOH COMPANY,LTD */ #ifndef __LINUX_MFD_RC5T583_H #define __LINUX_MFD_RC5T583_H #include <linux/mutex.h> #include <linux/types.h> #include <linux/regmap.h> /* Maximum number of main interrupts */ #define MAX_MAIN_INTERRUPT 5 #define RC5T583_MAX_GPEDGE_REG 2 #define RC5T583_MAX_INTERRUPT_EN_REGS 8 #define RC5T583_MAX_INTERRUPT_MASK_REGS 9 /* Interrupt enable register */ #define RC5T583_INT_EN_SYS1 0x19 #define RC5T583_INT_EN_SYS2 0x1D #define RC5T583_INT_EN_DCDC 0x41 #define RC5T583_INT_EN_RTC 0xED #define RC5T583_INT_EN_ADC1 0x90 #define RC5T583_INT_EN_ADC2 0x91 #define RC5T583_INT_EN_ADC3 0x92 /* Interrupt status registers (monitor regs in Ricoh)*/ #define RC5T583_INTC_INTPOL 0xAD #define RC5T583_INTC_INTEN 0xAE #define RC5T583_INTC_INTMON 0xAF #define RC5T583_INT_MON_GRP 0xAF #define RC5T583_INT_MON_SYS1 0x1B #define RC5T583_INT_MON_SYS2 0x1F #define RC5T583_INT_MON_DCDC 0x43 #define RC5T583_INT_MON_RTC 0xEE /* Interrupt clearing registers */ #define RC5T583_INT_IR_SYS1 0x1A #define RC5T583_INT_IR_SYS2 0x1E #define RC5T583_INT_IR_DCDC 0x42 #define RC5T583_INT_IR_RTC 0xEE #define RC5T583_INT_IR_ADCL 0x94 #define RC5T583_INT_IR_ADCH 0x95 #define RC5T583_INT_IR_ADCEND 0x96 #define RC5T583_INT_IR_GPIOR 0xA9 #define RC5T583_INT_IR_GPIOF 0xAA /* Sleep sequence registers */ #define RC5T583_SLPSEQ1 0x21 #define RC5T583_SLPSEQ2 0x22 #define RC5T583_SLPSEQ3 0x23 #define RC5T583_SLPSEQ4 0x24 #define RC5T583_SLPSEQ5 0x25 #define RC5T583_SLPSEQ6 0x26 #define RC5T583_SLPSEQ7 0x27 #define RC5T583_SLPSEQ8 0x28 #define RC5T583_SLPSEQ9 0x29 #define RC5T583_SLPSEQ10 0x2A #define RC5T583_SLPSEQ11 0x2B /* Regulator registers */ #define RC5T583_REG_DC0CTL 0x30 #define RC5T583_REG_DC0DAC 0x31 #define RC5T583_REG_DC0LATCTL 0x32 #define RC5T583_REG_SR0CTL 0x33 #define RC5T583_REG_DC1CTL 0x34 #define RC5T583_REG_DC1DAC 0x35 #define RC5T583_REG_DC1LATCTL 0x36 #define RC5T583_REG_SR1CTL 0x37 #define RC5T583_REG_DC2CTL 0x38 #define RC5T583_REG_DC2DAC 0x39 #define RC5T583_REG_DC2LATCTL 0x3A #define RC5T583_REG_SR2CTL 0x3B #define RC5T583_REG_DC3CTL 0x3C #define RC5T583_REG_DC3DAC 0x3D #define RC5T583_REG_DC3LATCTL 0x3E #define RC5T583_REG_SR3CTL 0x3F #define RC5T583_REG_LDOEN1 0x50 #define RC5T583_REG_LDOEN2 0x51 #define RC5T583_REG_LDODIS1 0x52 #define RC5T583_REG_LDODIS2 0x53 #define RC5T583_REG_LDO0DAC 0x54 #define RC5T583_REG_LDO1DAC 0x55 #define RC5T583_REG_LDO2DAC 0x56 #define RC5T583_REG_LDO3DAC 0x57 #define RC5T583_REG_LDO4DAC 0x58 #define RC5T583_REG_LDO5DAC 0x59 #define RC5T583_REG_LDO6DAC 0x5A #define RC5T583_REG_LDO7DAC 0x5B #define RC5T583_REG_LDO8DAC 0x5C #define RC5T583_REG_LDO9DAC 0x5D #define RC5T583_REG_DC0DAC_DS 0x60 #define RC5T583_REG_DC1DAC_DS 0x61 #define RC5T583_REG_DC2DAC_DS 0x62 #define RC5T583_REG_DC3DAC_DS 0x63 #define RC5T583_REG_LDO0DAC_DS 0x64 #define RC5T583_REG_LDO1DAC_DS 0x65 #define RC5T583_REG_LDO2DAC_DS 0x66 #define RC5T583_REG_LDO3DAC_DS 0x67 #define RC5T583_REG_LDO4DAC_DS 0x68 #define RC5T583_REG_LDO5DAC_DS 0x69 #define RC5T583_REG_LDO6DAC_DS 0x6A #define RC5T583_REG_LDO7DAC_DS 0x6B #define RC5T583_REG_LDO8DAC_DS 0x6C #define RC5T583_REG_LDO9DAC_DS 0x6D /* GPIO register base address */ #define RC5T583_GPIO_IOSEL 0xA0 #define RC5T583_GPIO_PDEN 0xA1 #define RC5T583_GPIO_IOOUT 0xA2 #define RC5T583_GPIO_PGSEL 0xA3 #define RC5T583_GPIO_GPINV 0xA4 #define RC5T583_GPIO_GPDEB 0xA5 #define RC5T583_GPIO_GPEDGE1 0xA6 #define RC5T583_GPIO_GPEDGE2 0xA7 #define RC5T583_GPIO_EN_INT 0xA8 #define RC5T583_GPIO_MON_IOIN 0xAB #define RC5T583_GPIO_GPOFUNC 0xAC /* RTC registers */ #define RC5T583_RTC_SEC 0xE0 #define RC5T583_RTC_MIN 0xE1 #define RC5T583_RTC_HOUR 0xE2 #define RC5T583_RTC_WDAY 0xE3 #define RC5T583_RTC_DAY 0xE4 #define RC5T583_RTC_MONTH 0xE5 #define RC5T583_RTC_YEAR 0xE6 #define RC5T583_RTC_ADJ 0xE7 #define RC5T583_RTC_AW_MIN 0xE8 #define RC5T583_RTC_AW_HOUR 0xE9 #define RC5T583_RTC_AW_WEEK 0xEA #define RC5T583_RTC_AD_MIN 0xEB #define RC5T583_RTC_AD_HOUR 0xEC #define RC5T583_RTC_CTL1 0xED #define RC5T583_RTC_CTL2 0xEE #define RC5T583_RTC_AY_MIN 0xF0 #define RC5T583_RTC_AY_HOUR 0xF1 #define RC5T583_RTC_AY_DAY 0xF2 #define RC5T583_RTC_AY_MONTH 0xF3 #define RC5T583_RTC_AY_YEAR 0xF4 #define RC5T583_MAX_REG 0xF7 #define RC5T583_NUM_REGS (RC5T583_MAX_REG + 1) /* RICOH_RC5T583 IRQ definitions */ enum { RC5T583_IRQ_ONKEY, RC5T583_IRQ_ACOK, RC5T583_IRQ_LIDOPEN, RC5T583_IRQ_PREOT, RC5T583_IRQ_CLKSTP, RC5T583_IRQ_ONKEY_OFF, RC5T583_IRQ_WD, RC5T583_IRQ_EN_PWRREQ1, RC5T583_IRQ_EN_PWRREQ2, RC5T583_IRQ_PRE_VINDET, RC5T583_IRQ_DC0LIM, RC5T583_IRQ_DC1LIM, RC5T583_IRQ_DC2LIM, RC5T583_IRQ_DC3LIM, RC5T583_IRQ_CTC, RC5T583_IRQ_YALE, RC5T583_IRQ_DALE, RC5T583_IRQ_WALE, RC5T583_IRQ_AIN1L, RC5T583_IRQ_AIN2L, RC5T583_IRQ_AIN3L, RC5T583_IRQ_VBATL, RC5T583_IRQ_VIN3L, RC5T583_IRQ_VIN8L, RC5T583_IRQ_AIN1H, RC5T583_IRQ_AIN2H, RC5T583_IRQ_AIN3H, RC5T583_IRQ_VBATH, RC5T583_IRQ_VIN3H, RC5T583_IRQ_VIN8H, RC5T583_IRQ_ADCEND, RC5T583_IRQ_GPIO0, RC5T583_IRQ_GPIO1, RC5T583_IRQ_GPIO2, RC5T583_IRQ_GPIO3, RC5T583_IRQ_GPIO4, RC5T583_IRQ_GPIO5, RC5T583_IRQ_GPIO6, RC5T583_IRQ_GPIO7, /* Should be last entry */ RC5T583_MAX_IRQS, }; /* Ricoh583 gpio definitions */ enum { RC5T583_GPIO0, RC5T583_GPIO1, RC5T583_GPIO2, RC5T583_GPIO3, RC5T583_GPIO4, RC5T583_GPIO5, RC5T583_GPIO6, RC5T583_GPIO7, /* Should be last entry */ RC5T583_MAX_GPIO, }; enum { RC5T583_DS_NONE, RC5T583_DS_DC0, RC5T583_DS_DC1, RC5T583_DS_DC2, RC5T583_DS_DC3, RC5T583_DS_LDO0, RC5T583_DS_LDO1, RC5T583_DS_LDO2, RC5T583_DS_LDO3, RC5T583_DS_LDO4, RC5T583_DS_LDO5, RC5T583_DS_LDO6, RC5T583_DS_LDO7, RC5T583_DS_LDO8, RC5T583_DS_LDO9, RC5T583_DS_PSO0, RC5T583_DS_PSO1, RC5T583_DS_PSO2, RC5T583_DS_PSO3, RC5T583_DS_PSO4, RC5T583_DS_PSO5, RC5T583_DS_PSO6, RC5T583_DS_PSO7, /* Should be last entry */ RC5T583_DS_MAX, }; /* * Ricoh pmic RC5T583 supports sleep through two external controls. * The output of gpios and regulator can be enable/disable through * this external signals. */ enum { RC5T583_EXT_PWRREQ1_CONTROL = 0x1, RC5T583_EXT_PWRREQ2_CONTROL = 0x2, }; enum { RC5T583_REGULATOR_DC0, RC5T583_REGULATOR_DC1, RC5T583_REGULATOR_DC2, RC5T583_REGULATOR_DC3, RC5T583_REGULATOR_LDO0, RC5T583_REGULATOR_LDO1, RC5T583_REGULATOR_LDO2, RC5T583_REGULATOR_LDO3, RC5T583_REGULATOR_LDO4, RC5T583_REGULATOR_LDO5, RC5T583_REGULATOR_LDO6, RC5T583_REGULATOR_LDO7, RC5T583_REGULATOR_LDO8, RC5T583_REGULATOR_LDO9, /* Should be last entry */ RC5T583_REGULATOR_MAX, }; struct rc5t583 { struct device *dev; struct regmap *regmap; int chip_irq; int irq_base; struct mutex irq_lock; unsigned long group_irq_en[MAX_MAIN_INTERRUPT]; /* For main interrupt bits in INTC */ uint8_t intc_inten_reg; /* For group interrupt bits and address */ uint8_t irq_en_reg[RC5T583_MAX_INTERRUPT_EN_REGS]; /* For gpio edge */ uint8_t gpedge_reg[RC5T583_MAX_GPEDGE_REG]; }; /* * rc5t583_platform_data: Platform data for ricoh rc5t583 pmu. * The board specific data is provided through this structure. * @irq_base: Irq base number on which this device registers their interrupts. * @gpio_base: GPIO base from which gpio of this device will start. * @enable_shutdown: Enable shutdown through the input pin "shutdown". * @regulator_deepsleep_slot: The slot number on which device goes to sleep * in device sleep mode. * @regulator_ext_pwr_control: External power request regulator control. The * regulator output enable/disable is controlled by the external * power request input state. * @reg_init_data: Regulator init data. */ struct rc5t583_platform_data { int irq_base; int gpio_base; bool enable_shutdown; int regulator_deepsleep_slot[RC5T583_REGULATOR_MAX]; unsigned long regulator_ext_pwr_control[RC5T583_REGULATOR_MAX]; struct regulator_init_data *reg_init_data[RC5T583_REGULATOR_MAX]; }; static inline int rc5t583_write(struct device *dev, uint8_t reg, uint8_t val) { struct rc5t583 *rc5t583 = dev_get_drvdata(dev); return regmap_write(rc5t583->regmap, reg, val); } static inline int rc5t583_read(struct device *dev, uint8_t reg, uint8_t *val) { struct rc5t583 *rc5t583 = dev_get_drvdata(dev); unsigned int ival; int ret; ret = regmap_read(rc5t583->regmap, reg, &ival); if (!ret) *val = (uint8_t)ival; return ret; } static inline int rc5t583_set_bits(struct device *dev, unsigned int reg, unsigned int bit_mask) { struct rc5t583 *rc5t583 = dev_get_drvdata(dev); return regmap_update_bits(rc5t583->regmap, reg, bit_mask, bit_mask); } static inline int rc5t583_clear_bits(struct device *dev, unsigned int reg, unsigned int bit_mask) { struct rc5t583 *rc5t583 = dev_get_drvdata(dev); return regmap_update_bits(rc5t583->regmap, reg, bit_mask, 0); } static inline int rc5t583_update(struct device *dev, unsigned int reg, unsigned int val, unsigned int mask) { struct rc5t583 *rc5t583 = dev_get_drvdata(dev); return regmap_update_bits(rc5t583->regmap, reg, mask, val); } int rc5t583_ext_power_req_config(struct device *dev, int deepsleep_id, int ext_pwr_req, int deepsleep_slot_nr); int rc5t583_irq_init(struct rc5t583 *rc5t583, int irq, int irq_base); int rc5t583_irq_exit(struct rc5t583 *rc5t583); #endif mfd/ab3100.h 0000644 00000010222 14722070374 0006354 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2007-2009 ST-Ericsson AB * AB3100 core access functions * Author: Linus Walleij <linus.walleij@stericsson.com> */ #include <linux/regulator/machine.h> struct device; #ifndef MFD_AB3100_H #define MFD_AB3100_H #define AB3100_P1A 0xc0 #define AB3100_P1B 0xc1 #define AB3100_P1C 0xc2 #define AB3100_P1D 0xc3 #define AB3100_P1E 0xc4 #define AB3100_P1F 0xc5 #define AB3100_P1G 0xc6 #define AB3100_R2A 0xc7 #define AB3100_R2B 0xc8 /* * AB3100, EVENTA1, A2 and A3 event register flags * these are catenated into a single 32-bit flag in the code * for event notification broadcasts. */ #define AB3100_EVENTA1_ONSWA (0x01<<16) #define AB3100_EVENTA1_ONSWB (0x02<<16) #define AB3100_EVENTA1_ONSWC (0x04<<16) #define AB3100_EVENTA1_DCIO (0x08<<16) #define AB3100_EVENTA1_OVER_TEMP (0x10<<16) #define AB3100_EVENTA1_SIM_OFF (0x20<<16) #define AB3100_EVENTA1_VBUS (0x40<<16) #define AB3100_EVENTA1_VSET_USB (0x80<<16) #define AB3100_EVENTA2_READY_TX (0x01<<8) #define AB3100_EVENTA2_READY_RX (0x02<<8) #define AB3100_EVENTA2_OVERRUN_ERROR (0x04<<8) #define AB3100_EVENTA2_FRAMING_ERROR (0x08<<8) #define AB3100_EVENTA2_CHARG_OVERCURRENT (0x10<<8) #define AB3100_EVENTA2_MIDR (0x20<<8) #define AB3100_EVENTA2_BATTERY_REM (0x40<<8) #define AB3100_EVENTA2_ALARM (0x80<<8) #define AB3100_EVENTA3_ADC_TRIG5 (0x01) #define AB3100_EVENTA3_ADC_TRIG4 (0x02) #define AB3100_EVENTA3_ADC_TRIG3 (0x04) #define AB3100_EVENTA3_ADC_TRIG2 (0x08) #define AB3100_EVENTA3_ADC_TRIGVBAT (0x10) #define AB3100_EVENTA3_ADC_TRIGVTX (0x20) #define AB3100_EVENTA3_ADC_TRIG1 (0x40) #define AB3100_EVENTA3_ADC_TRIG0 (0x80) /* AB3100, STR register flags */ #define AB3100_STR_ONSWA (0x01) #define AB3100_STR_ONSWB (0x02) #define AB3100_STR_ONSWC (0x04) #define AB3100_STR_DCIO (0x08) #define AB3100_STR_BOOT_MODE (0x10) #define AB3100_STR_SIM_OFF (0x20) #define AB3100_STR_BATT_REMOVAL (0x40) #define AB3100_STR_VBUS (0x80) /* * AB3100 contains 8 regulators, one external regulator controller * and a buck converter, further the LDO E and buck converter can * have separate settings if they are in sleep mode, this is * modeled as a separate regulator. */ #define AB3100_NUM_REGULATORS 10 /** * struct ab3100 * @access_mutex: lock out concurrent accesses to the AB3100 registers * @dev: pointer to the containing device * @i2c_client: I2C client for this chip * @testreg_client: secondary client for test registers * @chip_name: name of this chip variant * @chip_id: 8 bit chip ID for this chip variant * @event_subscribers: event subscribers are listed here * @startup_events: a copy of the first reading of the event registers * @startup_events_read: whether the first events have been read * * This struct is PRIVATE and devices using it should NOT * access ANY fields. It is used as a token for calling the * AB3100 functions. */ struct ab3100 { struct mutex access_mutex; struct device *dev; struct i2c_client *i2c_client; struct i2c_client *testreg_client; char chip_name[32]; u8 chip_id; struct blocking_notifier_head event_subscribers; u8 startup_events[3]; bool startup_events_read; }; /** * struct ab3100_platform_data * Data supplied to initialize board connections to the AB3100 * @reg_constraints: regulator constraints for target board * the order of these constraints are: LDO A, C, D, E, * F, G, H, K, EXT and BUCK. * @reg_initvals: initial values for the regulator registers * plus two sleep settings for LDO E and the BUCK converter. * exactly AB3100_NUM_REGULATORS+2 values must be sent in. * Order: LDO A, C, E, E sleep, F, G, H, K, EXT, BUCK, * BUCK sleep, LDO D. (LDO D need to be initialized last.) * @external_voltage: voltage level of the external regulator. */ struct ab3100_platform_data { struct regulator_init_data reg_constraints[AB3100_NUM_REGULATORS]; u8 reg_initvals[AB3100_NUM_REGULATORS+2]; int external_voltage; }; int ab3100_event_register(struct ab3100 *ab3100, struct notifier_block *nb); int ab3100_event_unregister(struct ab3100 *ab3100, struct notifier_block *nb); #endif /* MFD_AB3100_H */ mfd/lp8788.h 0000644 00000020350 14722070374 0006443 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * TI LP8788 MFD Device * * Copyright 2012 Texas Instruments * * Author: Milo(Woogyom) Kim <milo.kim@ti.com> */ #ifndef __MFD_LP8788_H__ #define __MFD_LP8788_H__ #include <linux/gpio.h> #include <linux/irqdomain.h> #include <linux/pwm.h> #include <linux/regmap.h> #define LP8788_DEV_BUCK "lp8788-buck" #define LP8788_DEV_DLDO "lp8788-dldo" #define LP8788_DEV_ALDO "lp8788-aldo" #define LP8788_DEV_CHARGER "lp8788-charger" #define LP8788_DEV_RTC "lp8788-rtc" #define LP8788_DEV_BACKLIGHT "lp8788-backlight" #define LP8788_DEV_VIBRATOR "lp8788-vibrator" #define LP8788_DEV_KEYLED "lp8788-keyled" #define LP8788_DEV_ADC "lp8788-adc" #define LP8788_NUM_BUCKS 4 #define LP8788_NUM_DLDOS 12 #define LP8788_NUM_ALDOS 10 #define LP8788_NUM_BUCK2_DVS 2 #define LP8788_CHG_IRQ "CHG_IRQ" #define LP8788_PRSW_IRQ "PRSW_IRQ" #define LP8788_BATT_IRQ "BATT_IRQ" #define LP8788_ALM_IRQ "ALARM_IRQ" enum lp8788_int_id { /* interrup register 1 : Addr 00h */ LP8788_INT_TSDL, LP8788_INT_TSDH, LP8788_INT_UVLO, LP8788_INT_FLAGMON, LP8788_INT_PWRON_TIME, LP8788_INT_PWRON, LP8788_INT_COMP1, LP8788_INT_COMP2, /* interrupt register 2 : Addr 01h */ LP8788_INT_CHG_INPUT_STATE, LP8788_INT_CHG_STATE, LP8788_INT_EOC, LP8788_INT_CHG_RESTART, LP8788_INT_RESTART_TIMEOUT, LP8788_INT_FULLCHG_TIMEOUT, LP8788_INT_PRECHG_TIMEOUT, /* interrupt register 3 : Addr 02h */ LP8788_INT_RTC_ALARM1 = 17, LP8788_INT_RTC_ALARM2, LP8788_INT_ENTER_SYS_SUPPORT, LP8788_INT_EXIT_SYS_SUPPORT, LP8788_INT_BATT_LOW, LP8788_INT_NO_BATT, LP8788_INT_MAX = 24, }; enum lp8788_dvs_sel { DVS_SEL_V0, DVS_SEL_V1, DVS_SEL_V2, DVS_SEL_V3, }; enum lp8788_ext_ldo_en_id { EN_ALDO1, EN_ALDO234, EN_ALDO5, EN_ALDO7, EN_DLDO7, EN_DLDO911, EN_LDOS_MAX, }; enum lp8788_charger_event { NO_CHARGER, CHARGER_DETECTED, }; enum lp8788_bl_ctrl_mode { LP8788_BL_REGISTER_ONLY, LP8788_BL_COMB_PWM_BASED, /* PWM + I2C, changed by PWM input */ LP8788_BL_COMB_REGISTER_BASED, /* PWM + I2C, changed by I2C */ }; enum lp8788_bl_dim_mode { LP8788_DIM_EXPONENTIAL, LP8788_DIM_LINEAR, }; enum lp8788_bl_full_scale_current { LP8788_FULLSCALE_5000uA, LP8788_FULLSCALE_8500uA, LP8788_FULLSCALE_1200uA, LP8788_FULLSCALE_1550uA, LP8788_FULLSCALE_1900uA, LP8788_FULLSCALE_2250uA, LP8788_FULLSCALE_2600uA, LP8788_FULLSCALE_2950uA, }; enum lp8788_bl_ramp_step { LP8788_RAMP_8us, LP8788_RAMP_1024us, LP8788_RAMP_2048us, LP8788_RAMP_4096us, LP8788_RAMP_8192us, LP8788_RAMP_16384us, LP8788_RAMP_32768us, LP8788_RAMP_65538us, }; enum lp8788_isink_scale { LP8788_ISINK_SCALE_100mA, LP8788_ISINK_SCALE_120mA, }; enum lp8788_isink_number { LP8788_ISINK_1, LP8788_ISINK_2, LP8788_ISINK_3, }; enum lp8788_alarm_sel { LP8788_ALARM_1, LP8788_ALARM_2, LP8788_ALARM_MAX, }; enum lp8788_adc_id { LPADC_VBATT_5P5, LPADC_VIN_CHG, LPADC_IBATT, LPADC_IC_TEMP, LPADC_VBATT_6P0, LPADC_VBATT_5P0, LPADC_ADC1, LPADC_ADC2, LPADC_VDD, LPADC_VCOIN, LPADC_VDD_LDO, LPADC_ADC3, LPADC_ADC4, LPADC_MAX, }; struct lp8788; /* * lp8788_buck1_dvs * @gpio : gpio pin number for dvs control * @vsel : dvs selector for buck v1 register */ struct lp8788_buck1_dvs { int gpio; enum lp8788_dvs_sel vsel; }; /* * lp8788_buck2_dvs * @gpio : two gpio pin numbers are used for dvs * @vsel : dvs selector for buck v2 register */ struct lp8788_buck2_dvs { int gpio[LP8788_NUM_BUCK2_DVS]; enum lp8788_dvs_sel vsel; }; /* * struct lp8788_chg_param * @addr : charging control register address (range : 0x11 ~ 0x1C) * @val : charging parameter value */ struct lp8788_chg_param { u8 addr; u8 val; }; /* * struct lp8788_charger_platform_data * @adc_vbatt : adc channel name for battery voltage * @adc_batt_temp : adc channel name for battery temperature * @max_vbatt_mv : used for calculating battery capacity * @chg_params : initial charging parameters * @num_chg_params : numbers of charging parameters * @charger_event : the charger event can be reported to the platform side */ struct lp8788_charger_platform_data { const char *adc_vbatt; const char *adc_batt_temp; unsigned int max_vbatt_mv; struct lp8788_chg_param *chg_params; int num_chg_params; void (*charger_event) (struct lp8788 *lp, enum lp8788_charger_event event); }; /* * struct lp8788_backlight_platform_data * @name : backlight driver name. (default: "lcd-backlight") * @initial_brightness : initial value of backlight brightness * @bl_mode : brightness control by pwm or lp8788 register * @dim_mode : dimming mode selection * @full_scale : full scale current setting * @rise_time : brightness ramp up step time * @fall_time : brightness ramp down step time * @pwm_pol : pwm polarity setting when bl_mode is pwm based * @period_ns : platform specific pwm period value. unit is nano. Only valid when bl_mode is LP8788_BL_COMB_PWM_BASED */ struct lp8788_backlight_platform_data { char *name; int initial_brightness; enum lp8788_bl_ctrl_mode bl_mode; enum lp8788_bl_dim_mode dim_mode; enum lp8788_bl_full_scale_current full_scale; enum lp8788_bl_ramp_step rise_time; enum lp8788_bl_ramp_step fall_time; enum pwm_polarity pwm_pol; unsigned int period_ns; }; /* * struct lp8788_led_platform_data * @name : led driver name. (default: "keyboard-backlight") * @scale : current scale * @num : current sink number * @iout_code : current output value (Addr 9Ah ~ 9Bh) */ struct lp8788_led_platform_data { char *name; enum lp8788_isink_scale scale; enum lp8788_isink_number num; int iout_code; }; /* * struct lp8788_vib_platform_data * @name : vibrator driver name * @scale : current scale * @num : current sink number * @iout_code : current output value (Addr 9Ah ~ 9Bh) * @pwm_code : PWM code value (Addr 9Ch ~ 9Eh) */ struct lp8788_vib_platform_data { char *name; enum lp8788_isink_scale scale; enum lp8788_isink_number num; int iout_code; int pwm_code; }; /* * struct lp8788_platform_data * @init_func : used for initializing registers * before mfd driver is registered * @buck_data : regulator initial data for buck * @dldo_data : regulator initial data for digital ldo * @aldo_data : regulator initial data for analog ldo * @buck1_dvs : gpio configurations for buck1 dvs * @buck2_dvs : gpio configurations for buck2 dvs * @chg_pdata : platform data for charger driver * @alarm_sel : rtc alarm selection (1 or 2) * @bl_pdata : configurable data for backlight driver * @led_pdata : configurable data for led driver * @vib_pdata : configurable data for vibrator driver * @adc_pdata : iio map data for adc driver */ struct lp8788_platform_data { /* general system information */ int (*init_func) (struct lp8788 *lp); /* regulators */ struct regulator_init_data *buck_data[LP8788_NUM_BUCKS]; struct regulator_init_data *dldo_data[LP8788_NUM_DLDOS]; struct regulator_init_data *aldo_data[LP8788_NUM_ALDOS]; struct lp8788_buck1_dvs *buck1_dvs; struct lp8788_buck2_dvs *buck2_dvs; /* charger */ struct lp8788_charger_platform_data *chg_pdata; /* rtc alarm */ enum lp8788_alarm_sel alarm_sel; /* backlight */ struct lp8788_backlight_platform_data *bl_pdata; /* current sinks */ struct lp8788_led_platform_data *led_pdata; struct lp8788_vib_platform_data *vib_pdata; /* adc iio map data */ struct iio_map *adc_pdata; }; /* * struct lp8788 * @dev : parent device pointer * @regmap : used for i2c communcation on accessing registers * @irqdm : interrupt domain for handling nested interrupt * @irq : pin number of IRQ_N * @pdata : lp8788 platform specific data */ struct lp8788 { struct device *dev; struct regmap *regmap; struct irq_domain *irqdm; int irq; struct lp8788_platform_data *pdata; }; int lp8788_irq_init(struct lp8788 *lp, int chip_irq); void lp8788_irq_exit(struct lp8788 *lp); int lp8788_read_byte(struct lp8788 *lp, u8 reg, u8 *data); int lp8788_read_multi_bytes(struct lp8788 *lp, u8 reg, u8 *data, size_t count); int lp8788_write_byte(struct lp8788 *lp, u8 reg, u8 data); int lp8788_update_bits(struct lp8788 *lp, u8 reg, u8 mask, u8 data); #endif mfd/stm32-timers.h 0000644 00000013230 14722070374 0007741 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) STMicroelectronics 2016 * Author: Benjamin Gaignard <benjamin.gaignard@st.com> */ #ifndef _LINUX_STM32_GPTIMER_H_ #define _LINUX_STM32_GPTIMER_H_ #include <linux/clk.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/regmap.h> #define TIM_CR1 0x00 /* Control Register 1 */ #define TIM_CR2 0x04 /* Control Register 2 */ #define TIM_SMCR 0x08 /* Slave mode control reg */ #define TIM_DIER 0x0C /* DMA/interrupt register */ #define TIM_SR 0x10 /* Status register */ #define TIM_EGR 0x14 /* Event Generation Reg */ #define TIM_CCMR1 0x18 /* Capt/Comp 1 Mode Reg */ #define TIM_CCMR2 0x1C /* Capt/Comp 2 Mode Reg */ #define TIM_CCER 0x20 /* Capt/Comp Enable Reg */ #define TIM_CNT 0x24 /* Counter */ #define TIM_PSC 0x28 /* Prescaler */ #define TIM_ARR 0x2c /* Auto-Reload Register */ #define TIM_CCR1 0x34 /* Capt/Comp Register 1 */ #define TIM_CCR2 0x38 /* Capt/Comp Register 2 */ #define TIM_CCR3 0x3C /* Capt/Comp Register 3 */ #define TIM_CCR4 0x40 /* Capt/Comp Register 4 */ #define TIM_BDTR 0x44 /* Break and Dead-Time Reg */ #define TIM_DCR 0x48 /* DMA control register */ #define TIM_DMAR 0x4C /* DMA register for transfer */ #define TIM_CR1_CEN BIT(0) /* Counter Enable */ #define TIM_CR1_DIR BIT(4) /* Counter Direction */ #define TIM_CR1_ARPE BIT(7) /* Auto-reload Preload Ena */ #define TIM_CR2_MMS (BIT(4) | BIT(5) | BIT(6)) /* Master mode selection */ #define TIM_CR2_MMS2 GENMASK(23, 20) /* Master mode selection 2 */ #define TIM_SMCR_SMS (BIT(0) | BIT(1) | BIT(2)) /* Slave mode selection */ #define TIM_SMCR_TS (BIT(4) | BIT(5) | BIT(6)) /* Trigger selection */ #define TIM_DIER_UIE BIT(0) /* Update interrupt */ #define TIM_DIER_UDE BIT(8) /* Update DMA request Enable */ #define TIM_DIER_CC1DE BIT(9) /* CC1 DMA request Enable */ #define TIM_DIER_CC2DE BIT(10) /* CC2 DMA request Enable */ #define TIM_DIER_CC3DE BIT(11) /* CC3 DMA request Enable */ #define TIM_DIER_CC4DE BIT(12) /* CC4 DMA request Enable */ #define TIM_DIER_COMDE BIT(13) /* COM DMA request Enable */ #define TIM_DIER_TDE BIT(14) /* Trigger DMA request Enable */ #define TIM_SR_UIF BIT(0) /* Update interrupt flag */ #define TIM_EGR_UG BIT(0) /* Update Generation */ #define TIM_CCMR_PE BIT(3) /* Channel Preload Enable */ #define TIM_CCMR_M1 (BIT(6) | BIT(5)) /* Channel PWM Mode 1 */ #define TIM_CCMR_CC1S (BIT(0) | BIT(1)) /* Capture/compare 1 sel */ #define TIM_CCMR_IC1PSC GENMASK(3, 2) /* Input capture 1 prescaler */ #define TIM_CCMR_CC2S (BIT(8) | BIT(9)) /* Capture/compare 2 sel */ #define TIM_CCMR_IC2PSC GENMASK(11, 10) /* Input capture 2 prescaler */ #define TIM_CCMR_CC1S_TI1 BIT(0) /* IC1/IC3 selects TI1/TI3 */ #define TIM_CCMR_CC1S_TI2 BIT(1) /* IC1/IC3 selects TI2/TI4 */ #define TIM_CCMR_CC2S_TI2 BIT(8) /* IC2/IC4 selects TI2/TI4 */ #define TIM_CCMR_CC2S_TI1 BIT(9) /* IC2/IC4 selects TI1/TI3 */ #define TIM_CCER_CC1E BIT(0) /* Capt/Comp 1 out Ena */ #define TIM_CCER_CC1P BIT(1) /* Capt/Comp 1 Polarity */ #define TIM_CCER_CC1NE BIT(2) /* Capt/Comp 1N out Ena */ #define TIM_CCER_CC1NP BIT(3) /* Capt/Comp 1N Polarity */ #define TIM_CCER_CC2E BIT(4) /* Capt/Comp 2 out Ena */ #define TIM_CCER_CC2P BIT(5) /* Capt/Comp 2 Polarity */ #define TIM_CCER_CC3E BIT(8) /* Capt/Comp 3 out Ena */ #define TIM_CCER_CC3P BIT(9) /* Capt/Comp 3 Polarity */ #define TIM_CCER_CC4E BIT(12) /* Capt/Comp 4 out Ena */ #define TIM_CCER_CC4P BIT(13) /* Capt/Comp 4 Polarity */ #define TIM_CCER_CCXE (BIT(0) | BIT(4) | BIT(8) | BIT(12)) #define TIM_BDTR_BKE BIT(12) /* Break input enable */ #define TIM_BDTR_BKP BIT(13) /* Break input polarity */ #define TIM_BDTR_AOE BIT(14) /* Automatic Output Enable */ #define TIM_BDTR_MOE BIT(15) /* Main Output Enable */ #define TIM_BDTR_BKF (BIT(16) | BIT(17) | BIT(18) | BIT(19)) #define TIM_BDTR_BK2F (BIT(20) | BIT(21) | BIT(22) | BIT(23)) #define TIM_BDTR_BK2E BIT(24) /* Break 2 input enable */ #define TIM_BDTR_BK2P BIT(25) /* Break 2 input polarity */ #define TIM_DCR_DBA GENMASK(4, 0) /* DMA base addr */ #define TIM_DCR_DBL GENMASK(12, 8) /* DMA burst len */ #define MAX_TIM_PSC 0xFFFF #define MAX_TIM_ICPSC 0x3 #define TIM_CR2_MMS_SHIFT 4 #define TIM_CR2_MMS2_SHIFT 20 #define TIM_SMCR_TS_SHIFT 4 #define TIM_BDTR_BKF_MASK 0xF #define TIM_BDTR_BKF_SHIFT 16 #define TIM_BDTR_BK2F_SHIFT 20 enum stm32_timers_dmas { STM32_TIMERS_DMA_CH1, STM32_TIMERS_DMA_CH2, STM32_TIMERS_DMA_CH3, STM32_TIMERS_DMA_CH4, STM32_TIMERS_DMA_UP, STM32_TIMERS_DMA_TRIG, STM32_TIMERS_DMA_COM, STM32_TIMERS_MAX_DMAS, }; /** * struct stm32_timers_dma - STM32 timer DMA handling. * @completion: end of DMA transfer completion * @phys_base: control registers physical base address * @lock: protect DMA access * @chan: DMA channel in use * @chans: DMA channels available for this timer instance */ struct stm32_timers_dma { struct completion completion; phys_addr_t phys_base; struct mutex lock; struct dma_chan *chan; struct dma_chan *chans[STM32_TIMERS_MAX_DMAS]; }; struct stm32_timers { struct clk *clk; struct regmap *regmap; u32 max_arr; struct stm32_timers_dma dma; /* Only to be used by the parent */ }; #if IS_REACHABLE(CONFIG_MFD_STM32_TIMERS) int stm32_timers_dma_burst_read(struct device *dev, u32 *buf, enum stm32_timers_dmas id, u32 reg, unsigned int num_reg, unsigned int bursts, unsigned long tmo_ms); #else static inline int stm32_timers_dma_burst_read(struct device *dev, u32 *buf, enum stm32_timers_dmas id, u32 reg, unsigned int num_reg, unsigned int bursts, unsigned long tmo_ms) { return -ENODEV; } #endif #endif mfd/max8998.h 0000644 00000005625 14722070374 0006630 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0+ */ /* * max8998.h - Voltage regulator driver for the Maxim 8998 * * Copyright (C) 2009-2010 Samsung Electrnoics * Kyungmin Park <kyungmin.park@samsung.com> * Marek Szyprowski <m.szyprowski@samsung.com> */ #ifndef __LINUX_MFD_MAX8998_H #define __LINUX_MFD_MAX8998_H #include <linux/regulator/machine.h> /* MAX 8998 regulator ids */ enum { MAX8998_LDO2 = 2, MAX8998_LDO3, MAX8998_LDO4, MAX8998_LDO5, MAX8998_LDO6, MAX8998_LDO7, MAX8998_LDO8, MAX8998_LDO9, MAX8998_LDO10, MAX8998_LDO11, MAX8998_LDO12, MAX8998_LDO13, MAX8998_LDO14, MAX8998_LDO15, MAX8998_LDO16, MAX8998_LDO17, MAX8998_BUCK1, MAX8998_BUCK2, MAX8998_BUCK3, MAX8998_BUCK4, MAX8998_EN32KHZ_AP, MAX8998_EN32KHZ_CP, MAX8998_ENVICHG, MAX8998_ESAFEOUT1, MAX8998_ESAFEOUT2, }; /** * max8998_regulator_data - regulator data * @id: regulator id * @initdata: regulator init data (contraints, supplies, ...) * @reg_node: DT node of regulator (unused on non-DT platforms) */ struct max8998_regulator_data { int id; struct regulator_init_data *initdata; struct device_node *reg_node; }; /** * struct max8998_board - packages regulator init data * @regulators: array of defined regulators * @num_regulators: number of regulators used * @irq_base: base IRQ number for max8998, required for IRQs * @ono: power onoff IRQ number for max8998 * @buck_voltage_lock: Do NOT change the values of the following six * registers set by buck?_voltage?. The voltage of BUCK1/2 cannot * be other than the preset values. * @buck1_voltage: BUCK1 DVS mode 1 voltage registers * @buck2_voltage: BUCK2 DVS mode 2 voltage registers * @buck1_set1: BUCK1 gpio pin 1 to set output voltage * @buck1_set2: BUCK1 gpio pin 2 to set output voltage * @buck1_default_idx: Default for BUCK1 gpio pin 1, 2 * @buck2_set3: BUCK2 gpio pin to set output voltage * @buck2_default_idx: Default for BUCK2 gpio pin. * @wakeup: Allow to wake up from suspend * @rtc_delay: LP3974 RTC chip bug that requires delay after a register * write before reading it. * @eoc: End of Charge Level in percent: 10% ~ 45% by 5% step * If it equals 0, leave it unchanged. * Otherwise, it is a invalid value. * @restart: Restart Level in mV: 100, 150, 200, and -1 for disable. * If it equals 0, leave it unchanged. * Otherwise, it is a invalid value. * @timeout: Full Timeout in hours: 5, 6, 7, and -1 for disable. * If it equals 0, leave it unchanged. * Otherwise, leave it unchanged. */ struct max8998_platform_data { struct max8998_regulator_data *regulators; int num_regulators; unsigned int irq_base; int ono; bool buck_voltage_lock; int buck1_voltage[4]; int buck2_voltage[2]; int buck1_set1; int buck1_set2; int buck1_default_idx; int buck2_set3; int buck2_default_idx; bool wakeup; bool rtc_delay; int eoc; int restart; int timeout; }; #endif /* __LINUX_MFD_MAX8998_H */ mfd/da9150/registers.h 0000644 00000110353 14722070374 0010406 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * DA9150 MFD Driver - Registers * * Copyright (c) 2014 Dialog Semiconductor * * Author: Adam Thomson <Adam.Thomson.Opensource@diasemi.com> */ #ifndef __DA9150_REGISTERS_H #define __DA9150_REGISTERS_H #include <linux/bitops.h> /* Registers */ #define DA9150_PAGE_CON 0x000 #define DA9150_STATUS_A 0x068 #define DA9150_STATUS_B 0x069 #define DA9150_STATUS_C 0x06A #define DA9150_STATUS_D 0x06B #define DA9150_STATUS_E 0x06C #define DA9150_STATUS_F 0x06D #define DA9150_STATUS_G 0x06E #define DA9150_STATUS_H 0x06F #define DA9150_STATUS_I 0x070 #define DA9150_STATUS_J 0x071 #define DA9150_STATUS_K 0x072 #define DA9150_STATUS_L 0x073 #define DA9150_STATUS_N 0x074 #define DA9150_FAULT_LOG_A 0x076 #define DA9150_FAULT_LOG_B 0x077 #define DA9150_EVENT_E 0x078 #define DA9150_EVENT_F 0x079 #define DA9150_EVENT_G 0x07A #define DA9150_EVENT_H 0x07B #define DA9150_IRQ_MASK_E 0x07C #define DA9150_IRQ_MASK_F 0x07D #define DA9150_IRQ_MASK_G 0x07E #define DA9150_IRQ_MASK_H 0x07F #define DA9150_PAGE_CON_1 0x080 #define DA9150_CONFIG_A 0x0E0 #define DA9150_CONFIG_B 0x0E1 #define DA9150_CONFIG_C 0x0E2 #define DA9150_CONFIG_D 0x0E3 #define DA9150_CONFIG_E 0x0E4 #define DA9150_CONTROL_A 0x0E5 #define DA9150_CONTROL_B 0x0E6 #define DA9150_CONTROL_C 0x0E7 #define DA9150_GPIO_A_B 0x0E8 #define DA9150_GPIO_C_D 0x0E9 #define DA9150_GPIO_MODE_CONT 0x0EA #define DA9150_GPIO_CTRL_B 0x0EB #define DA9150_GPIO_CTRL_A 0x0EC #define DA9150_GPIO_CTRL_C 0x0ED #define DA9150_GPIO_CFG_A 0x0EE #define DA9150_GPIO_CFG_B 0x0EF #define DA9150_GPIO_CFG_C 0x0F0 #define DA9150_GPADC_MAN 0x0F2 #define DA9150_GPADC_RES_A 0x0F4 #define DA9150_GPADC_RES_B 0x0F5 #define DA9150_PAGE_CON_2 0x100 #define DA9150_OTP_CONT_SHARED 0x101 #define DA9150_INTERFACE_SHARED 0x105 #define DA9150_CONFIG_A_SHARED 0x106 #define DA9150_CONFIG_D_SHARED 0x109 #define DA9150_ADETVB_CFG_C 0x150 #define DA9150_ADETD_STAT 0x151 #define DA9150_ADET_CMPSTAT 0x152 #define DA9150_ADET_CTRL_A 0x153 #define DA9150_ADETVB_CFG_B 0x154 #define DA9150_ADETVB_CFG_A 0x155 #define DA9150_ADETAC_CFG_A 0x156 #define DA9150_ADDETAC_CFG_B 0x157 #define DA9150_ADETAC_CFG_C 0x158 #define DA9150_ADETAC_CFG_D 0x159 #define DA9150_ADETVB_CFG_D 0x15A #define DA9150_ADETID_CFG_A 0x15B #define DA9150_ADET_RID_PT_CHG_H 0x15C #define DA9150_ADET_RID_PT_CHG_L 0x15D #define DA9150_PPR_TCTR_B 0x160 #define DA9150_PPR_BKCTRL_A 0x163 #define DA9150_PPR_BKCFG_A 0x164 #define DA9150_PPR_BKCFG_B 0x165 #define DA9150_PPR_CHGCTRL_A 0x166 #define DA9150_PPR_CHGCTRL_B 0x167 #define DA9150_PPR_CHGCTRL_C 0x168 #define DA9150_PPR_TCTR_A 0x169 #define DA9150_PPR_CHGCTRL_D 0x16A #define DA9150_PPR_CHGCTRL_E 0x16B #define DA9150_PPR_CHGCTRL_F 0x16C #define DA9150_PPR_CHGCTRL_G 0x16D #define DA9150_PPR_CHGCTRL_H 0x16E #define DA9150_PPR_CHGCTRL_I 0x16F #define DA9150_PPR_CHGCTRL_J 0x170 #define DA9150_PPR_CHGCTRL_K 0x171 #define DA9150_PPR_CHGCTRL_L 0x172 #define DA9150_PPR_CHGCTRL_M 0x173 #define DA9150_PPR_THYST_A 0x174 #define DA9150_PPR_THYST_B 0x175 #define DA9150_PPR_THYST_C 0x176 #define DA9150_PPR_THYST_D 0x177 #define DA9150_PPR_THYST_E 0x178 #define DA9150_PPR_THYST_F 0x179 #define DA9150_PPR_THYST_G 0x17A #define DA9150_PAGE_CON_3 0x180 #define DA9150_PAGE_CON_4 0x200 #define DA9150_PAGE_CON_5 0x280 #define DA9150_PAGE_CON_6 0x300 #define DA9150_COREBTLD_STAT_A 0x302 #define DA9150_COREBTLD_CTRL_A 0x303 #define DA9150_CORE_CONFIG_A 0x304 #define DA9150_CORE_CONFIG_C 0x305 #define DA9150_CORE_CONFIG_B 0x306 #define DA9150_CORE_CFG_DATA_A 0x307 #define DA9150_CORE_CFG_DATA_B 0x308 #define DA9150_CORE_CMD_A 0x309 #define DA9150_CORE_DATA_A 0x30A #define DA9150_CORE_DATA_B 0x30B #define DA9150_CORE_DATA_C 0x30C #define DA9150_CORE_DATA_D 0x30D #define DA9150_CORE2WIRE_STAT_A 0x310 #define DA9150_CORE2WIRE_CTRL_A 0x311 #define DA9150_FW_CTRL_A 0x312 #define DA9150_FW_CTRL_C 0x313 #define DA9150_FW_CTRL_D 0x314 #define DA9150_FG_CTRL_A 0x315 #define DA9150_FG_CTRL_B 0x316 #define DA9150_FW_CTRL_E 0x317 #define DA9150_FW_CTRL_B 0x318 #define DA9150_GPADC_CMAN 0x320 #define DA9150_GPADC_CRES_A 0x322 #define DA9150_GPADC_CRES_B 0x323 #define DA9150_CC_CFG_A 0x328 #define DA9150_CC_CFG_B 0x329 #define DA9150_CC_ICHG_RES_A 0x32A #define DA9150_CC_ICHG_RES_B 0x32B #define DA9150_CC_IAVG_RES_A 0x32C #define DA9150_CC_IAVG_RES_B 0x32D #define DA9150_TAUX_CTRL_A 0x330 #define DA9150_TAUX_RELOAD_H 0x332 #define DA9150_TAUX_RELOAD_L 0x333 #define DA9150_TAUX_VALUE_H 0x334 #define DA9150_TAUX_VALUE_L 0x335 #define DA9150_AUX_DATA_0 0x338 #define DA9150_AUX_DATA_1 0x339 #define DA9150_AUX_DATA_2 0x33A #define DA9150_AUX_DATA_3 0x33B #define DA9150_BIF_CTRL 0x340 #define DA9150_TBAT_CTRL_A 0x342 #define DA9150_TBAT_CTRL_B 0x343 #define DA9150_TBAT_RES_A 0x344 #define DA9150_TBAT_RES_B 0x345 /* DA9150_PAGE_CON = 0x000 */ #define DA9150_PAGE_SHIFT 0 #define DA9150_PAGE_MASK (0x3f << 0) #define DA9150_I2C_PAGE_SHIFT 1 #define DA9150_I2C_PAGE_MASK (0x1f << 1) #define DA9150_WRITE_MODE_SHIFT 6 #define DA9150_WRITE_MODE_MASK BIT(6) #define DA9150_REVERT_SHIFT 7 #define DA9150_REVERT_MASK BIT(7) /* DA9150_STATUS_A = 0x068 */ #define DA9150_WKUP_STAT_SHIFT 2 #define DA9150_WKUP_STAT_MASK (0x0f << 2) #define DA9150_SLEEP_STAT_SHIFT 6 #define DA9150_SLEEP_STAT_MASK (0x03 << 6) /* DA9150_STATUS_B = 0x069 */ #define DA9150_VFAULT_STAT_SHIFT 0 #define DA9150_VFAULT_STAT_MASK BIT(0) #define DA9150_TFAULT_STAT_SHIFT 1 #define DA9150_TFAULT_STAT_MASK BIT(1) /* DA9150_STATUS_C = 0x06A */ #define DA9150_VDD33_STAT_SHIFT 0 #define DA9150_VDD33_STAT_MASK BIT(0) #define DA9150_VDD33_SLEEP_SHIFT 1 #define DA9150_VDD33_SLEEP_MASK BIT(1) #define DA9150_LFOSC_STAT_SHIFT 7 #define DA9150_LFOSC_STAT_MASK BIT(7) /* DA9150_STATUS_D = 0x06B */ #define DA9150_GPIOA_STAT_SHIFT 0 #define DA9150_GPIOA_STAT_MASK BIT(0) #define DA9150_GPIOB_STAT_SHIFT 1 #define DA9150_GPIOB_STAT_MASK BIT(1) #define DA9150_GPIOC_STAT_SHIFT 2 #define DA9150_GPIOC_STAT_MASK BIT(2) #define DA9150_GPIOD_STAT_SHIFT 3 #define DA9150_GPIOD_STAT_MASK BIT(3) /* DA9150_STATUS_E = 0x06C */ #define DA9150_DTYPE_SHIFT 0 #define DA9150_DTYPE_MASK (0x1f << 0) #define DA9150_DTYPE_DT_NIL (0x00 << 0) #define DA9150_DTYPE_DT_USB_OTG BIT(0) #define DA9150_DTYPE_DT_USB_STD (0x02 << 0) #define DA9150_DTYPE_DT_USB_CHG (0x03 << 0) #define DA9150_DTYPE_DT_ACA_CHG (0x04 << 0) #define DA9150_DTYPE_DT_ACA_OTG (0x05 << 0) #define DA9150_DTYPE_DT_ACA_DOC (0x06 << 0) #define DA9150_DTYPE_DT_DED_CHG (0x07 << 0) #define DA9150_DTYPE_DT_CR5_CHG (0x08 << 0) #define DA9150_DTYPE_DT_CR4_CHG (0x0c << 0) #define DA9150_DTYPE_DT_PT_CHG (0x11 << 0) #define DA9150_DTYPE_DT_NN_ACC (0x16 << 0) #define DA9150_DTYPE_DT_NN_CHG (0x17 << 0) /* DA9150_STATUS_F = 0x06D */ #define DA9150_SESS_VLD_SHIFT 0 #define DA9150_SESS_VLD_MASK BIT(0) #define DA9150_ID_ERR_SHIFT 1 #define DA9150_ID_ERR_MASK BIT(1) #define DA9150_PT_CHG_SHIFT 2 #define DA9150_PT_CHG_MASK BIT(2) /* DA9150_STATUS_G = 0x06E */ #define DA9150_RID_SHIFT 0 #define DA9150_RID_MASK (0xff << 0) /* DA9150_STATUS_H = 0x06F */ #define DA9150_VBUS_STAT_SHIFT 0 #define DA9150_VBUS_STAT_MASK (0x07 << 0) #define DA9150_VBUS_STAT_OFF (0x00 << 0) #define DA9150_VBUS_STAT_WAIT BIT(0) #define DA9150_VBUS_STAT_CHG (0x02 << 0) #define DA9150_VBUS_TRED_SHIFT 3 #define DA9150_VBUS_TRED_MASK BIT(3) #define DA9150_VBUS_DROP_STAT_SHIFT 4 #define DA9150_VBUS_DROP_STAT_MASK (0x0f << 4) /* DA9150_STATUS_I = 0x070 */ #define DA9150_VBUS_ISET_STAT_SHIFT 0 #define DA9150_VBUS_ISET_STAT_MASK (0x1f << 0) #define DA9150_VBUS_OT_SHIFT 7 #define DA9150_VBUS_OT_MASK BIT(7) /* DA9150_STATUS_J = 0x071 */ #define DA9150_CHG_STAT_SHIFT 0 #define DA9150_CHG_STAT_MASK (0x0f << 0) #define DA9150_CHG_STAT_OFF (0x00 << 0) #define DA9150_CHG_STAT_SUSP BIT(0) #define DA9150_CHG_STAT_ACT (0x02 << 0) #define DA9150_CHG_STAT_PRE (0x03 << 0) #define DA9150_CHG_STAT_CC (0x04 << 0) #define DA9150_CHG_STAT_CV (0x05 << 0) #define DA9150_CHG_STAT_FULL (0x06 << 0) #define DA9150_CHG_STAT_TEMP (0x07 << 0) #define DA9150_CHG_STAT_TIME (0x08 << 0) #define DA9150_CHG_STAT_BAT (0x09 << 0) #define DA9150_CHG_TEMP_SHIFT 4 #define DA9150_CHG_TEMP_MASK (0x07 << 4) #define DA9150_CHG_TEMP_UNDER (0x06 << 4) #define DA9150_CHG_TEMP_OVER (0x07 << 4) #define DA9150_CHG_IEND_STAT_SHIFT 7 #define DA9150_CHG_IEND_STAT_MASK BIT(7) /* DA9150_STATUS_K = 0x072 */ #define DA9150_CHG_IAV_H_SHIFT 0 #define DA9150_CHG_IAV_H_MASK (0xff << 0) /* DA9150_STATUS_L = 0x073 */ #define DA9150_CHG_IAV_L_SHIFT 5 #define DA9150_CHG_IAV_L_MASK (0x07 << 5) /* DA9150_STATUS_N = 0x074 */ #define DA9150_CHG_TIME_SHIFT 1 #define DA9150_CHG_TIME_MASK BIT(1) #define DA9150_CHG_TRED_SHIFT 2 #define DA9150_CHG_TRED_MASK BIT(2) #define DA9150_CHG_TJUNC_CLASS_SHIFT 3 #define DA9150_CHG_TJUNC_CLASS_MASK (0x07 << 3) #define DA9150_CHG_TJUNC_CLASS_6 (0x06 << 3) #define DA9150_EBS_STAT_SHIFT 6 #define DA9150_EBS_STAT_MASK BIT(6) #define DA9150_CHG_BAT_REMOVED_SHIFT 7 #define DA9150_CHG_BAT_REMOVED_MASK BIT(7) /* DA9150_FAULT_LOG_A = 0x076 */ #define DA9150_TEMP_FAULT_SHIFT 0 #define DA9150_TEMP_FAULT_MASK BIT(0) #define DA9150_VSYS_FAULT_SHIFT 1 #define DA9150_VSYS_FAULT_MASK BIT(1) #define DA9150_START_FAULT_SHIFT 2 #define DA9150_START_FAULT_MASK BIT(2) #define DA9150_EXT_FAULT_SHIFT 3 #define DA9150_EXT_FAULT_MASK BIT(3) #define DA9150_POR_FAULT_SHIFT 4 #define DA9150_POR_FAULT_MASK BIT(4) /* DA9150_FAULT_LOG_B = 0x077 */ #define DA9150_VBUS_FAULT_SHIFT 0 #define DA9150_VBUS_FAULT_MASK BIT(0) #define DA9150_OTG_FAULT_SHIFT 1 #define DA9150_OTG_FAULT_MASK BIT(1) /* DA9150_EVENT_E = 0x078 */ #define DA9150_E_VBUS_SHIFT 0 #define DA9150_E_VBUS_MASK BIT(0) #define DA9150_E_CHG_SHIFT 1 #define DA9150_E_CHG_MASK BIT(1) #define DA9150_E_TCLASS_SHIFT 2 #define DA9150_E_TCLASS_MASK BIT(2) #define DA9150_E_TJUNC_SHIFT 3 #define DA9150_E_TJUNC_MASK BIT(3) #define DA9150_E_VFAULT_SHIFT 4 #define DA9150_E_VFAULT_MASK BIT(4) #define DA9150_EVENTS_H_SHIFT 5 #define DA9150_EVENTS_H_MASK BIT(5) #define DA9150_EVENTS_G_SHIFT 6 #define DA9150_EVENTS_G_MASK BIT(6) #define DA9150_EVENTS_F_SHIFT 7 #define DA9150_EVENTS_F_MASK BIT(7) /* DA9150_EVENT_F = 0x079 */ #define DA9150_E_CONF_SHIFT 0 #define DA9150_E_CONF_MASK BIT(0) #define DA9150_E_DAT_SHIFT 1 #define DA9150_E_DAT_MASK BIT(1) #define DA9150_E_DTYPE_SHIFT 3 #define DA9150_E_DTYPE_MASK BIT(3) #define DA9150_E_ID_SHIFT 4 #define DA9150_E_ID_MASK BIT(4) #define DA9150_E_ADP_SHIFT 5 #define DA9150_E_ADP_MASK BIT(5) #define DA9150_E_SESS_END_SHIFT 6 #define DA9150_E_SESS_END_MASK BIT(6) #define DA9150_E_SESS_VLD_SHIFT 7 #define DA9150_E_SESS_VLD_MASK BIT(7) /* DA9150_EVENT_G = 0x07A */ #define DA9150_E_FG_SHIFT 0 #define DA9150_E_FG_MASK BIT(0) #define DA9150_E_GP_SHIFT 1 #define DA9150_E_GP_MASK BIT(1) #define DA9150_E_TBAT_SHIFT 2 #define DA9150_E_TBAT_MASK BIT(2) #define DA9150_E_GPIOA_SHIFT 3 #define DA9150_E_GPIOA_MASK BIT(3) #define DA9150_E_GPIOB_SHIFT 4 #define DA9150_E_GPIOB_MASK BIT(4) #define DA9150_E_GPIOC_SHIFT 5 #define DA9150_E_GPIOC_MASK BIT(5) #define DA9150_E_GPIOD_SHIFT 6 #define DA9150_E_GPIOD_MASK BIT(6) #define DA9150_E_GPADC_SHIFT 7 #define DA9150_E_GPADC_MASK BIT(7) /* DA9150_EVENT_H = 0x07B */ #define DA9150_E_WKUP_SHIFT 0 #define DA9150_E_WKUP_MASK BIT(0) /* DA9150_IRQ_MASK_E = 0x07C */ #define DA9150_M_VBUS_SHIFT 0 #define DA9150_M_VBUS_MASK BIT(0) #define DA9150_M_CHG_SHIFT 1 #define DA9150_M_CHG_MASK BIT(1) #define DA9150_M_TJUNC_SHIFT 3 #define DA9150_M_TJUNC_MASK BIT(3) #define DA9150_M_VFAULT_SHIFT 4 #define DA9150_M_VFAULT_MASK BIT(4) /* DA9150_IRQ_MASK_F = 0x07D */ #define DA9150_M_CONF_SHIFT 0 #define DA9150_M_CONF_MASK BIT(0) #define DA9150_M_DAT_SHIFT 1 #define DA9150_M_DAT_MASK BIT(1) #define DA9150_M_DTYPE_SHIFT 3 #define DA9150_M_DTYPE_MASK BIT(3) #define DA9150_M_ID_SHIFT 4 #define DA9150_M_ID_MASK BIT(4) #define DA9150_M_ADP_SHIFT 5 #define DA9150_M_ADP_MASK BIT(5) #define DA9150_M_SESS_END_SHIFT 6 #define DA9150_M_SESS_END_MASK BIT(6) #define DA9150_M_SESS_VLD_SHIFT 7 #define DA9150_M_SESS_VLD_MASK BIT(7) /* DA9150_IRQ_MASK_G = 0x07E */ #define DA9150_M_FG_SHIFT 0 #define DA9150_M_FG_MASK BIT(0) #define DA9150_M_GP_SHIFT 1 #define DA9150_M_GP_MASK BIT(1) #define DA9150_M_TBAT_SHIFT 2 #define DA9150_M_TBAT_MASK BIT(2) #define DA9150_M_GPIOA_SHIFT 3 #define DA9150_M_GPIOA_MASK BIT(3) #define DA9150_M_GPIOB_SHIFT 4 #define DA9150_M_GPIOB_MASK BIT(4) #define DA9150_M_GPIOC_SHIFT 5 #define DA9150_M_GPIOC_MASK BIT(5) #define DA9150_M_GPIOD_SHIFT 6 #define DA9150_M_GPIOD_MASK BIT(6) #define DA9150_M_GPADC_SHIFT 7 #define DA9150_M_GPADC_MASK BIT(7) /* DA9150_IRQ_MASK_H = 0x07F */ #define DA9150_M_WKUP_SHIFT 0 #define DA9150_M_WKUP_MASK BIT(0) /* DA9150_PAGE_CON_1 = 0x080 */ #define DA9150_PAGE_SHIFT 0 #define DA9150_PAGE_MASK (0x3f << 0) #define DA9150_WRITE_MODE_SHIFT 6 #define DA9150_WRITE_MODE_MASK BIT(6) #define DA9150_REVERT_SHIFT 7 #define DA9150_REVERT_MASK BIT(7) /* DA9150_CONFIG_A = 0x0E0 */ #define DA9150_RESET_DUR_SHIFT 0 #define DA9150_RESET_DUR_MASK (0x03 << 0) #define DA9150_RESET_EXT_SHIFT 2 #define DA9150_RESET_EXT_MASK (0x03 << 2) #define DA9150_START_MAX_SHIFT 4 #define DA9150_START_MAX_MASK (0x03 << 4) #define DA9150_PS_WAIT_EN_SHIFT 6 #define DA9150_PS_WAIT_EN_MASK BIT(6) #define DA9150_PS_DISABLE_DIRECT_SHIFT 7 #define DA9150_PS_DISABLE_DIRECT_MASK BIT(7) /* DA9150_CONFIG_B = 0x0E1 */ #define DA9150_VFAULT_ADJ_SHIFT 0 #define DA9150_VFAULT_ADJ_MASK (0x0f << 0) #define DA9150_VFAULT_HYST_SHIFT 4 #define DA9150_VFAULT_HYST_MASK (0x07 << 4) #define DA9150_VFAULT_EN_SHIFT 7 #define DA9150_VFAULT_EN_MASK BIT(7) /* DA9150_CONFIG_C = 0x0E2 */ #define DA9150_VSYS_MIN_SHIFT 3 #define DA9150_VSYS_MIN_MASK (0x1f << 3) /* DA9150_CONFIG_D = 0x0E3 */ #define DA9150_LFOSC_EXT_SHIFT 0 #define DA9150_LFOSC_EXT_MASK BIT(0) #define DA9150_VDD33_DWN_SHIFT 1 #define DA9150_VDD33_DWN_MASK BIT(1) #define DA9150_WKUP_PM_EN_SHIFT 2 #define DA9150_WKUP_PM_EN_MASK BIT(2) #define DA9150_WKUP_CE_SEL_SHIFT 3 #define DA9150_WKUP_CE_SEL_MASK (0x03 << 3) #define DA9150_WKUP_CLK32K_EN_SHIFT 5 #define DA9150_WKUP_CLK32K_EN_MASK BIT(5) #define DA9150_DISABLE_DEL_SHIFT 7 #define DA9150_DISABLE_DEL_MASK BIT(7) /* DA9150_CONFIG_E = 0x0E4 */ #define DA9150_PM_SPKSUP_DIS_SHIFT 0 #define DA9150_PM_SPKSUP_DIS_MASK BIT(0) #define DA9150_PM_MERGE_SHIFT 1 #define DA9150_PM_MERGE_MASK BIT(1) #define DA9150_PM_SR_OFF_SHIFT 2 #define DA9150_PM_SR_OFF_MASK BIT(2) #define DA9150_PM_TIMEOUT_EN_SHIFT 3 #define DA9150_PM_TIMEOUT_EN_MASK BIT(3) #define DA9150_PM_DLY_SEL_SHIFT 4 #define DA9150_PM_DLY_SEL_MASK (0x07 << 4) #define DA9150_PM_OUT_DLY_SEL_SHIFT 7 #define DA9150_PM_OUT_DLY_SEL_MASK BIT(7) /* DA9150_CONTROL_A = 0x0E5 */ #define DA9150_VDD33_SL_SHIFT 0 #define DA9150_VDD33_SL_MASK BIT(0) #define DA9150_VDD33_LPM_SHIFT 1 #define DA9150_VDD33_LPM_MASK (0x03 << 1) #define DA9150_VDD33_EN_SHIFT 3 #define DA9150_VDD33_EN_MASK BIT(3) #define DA9150_GPI_LPM_SHIFT 6 #define DA9150_GPI_LPM_MASK BIT(6) #define DA9150_PM_IF_LPM_SHIFT 7 #define DA9150_PM_IF_LPM_MASK BIT(7) /* DA9150_CONTROL_B = 0x0E6 */ #define DA9150_LPM_SHIFT 0 #define DA9150_LPM_MASK BIT(0) #define DA9150_RESET_SHIFT 1 #define DA9150_RESET_MASK BIT(1) #define DA9150_RESET_USRCONF_EN_SHIFT 2 #define DA9150_RESET_USRCONF_EN_MASK BIT(2) /* DA9150_CONTROL_C = 0x0E7 */ #define DA9150_DISABLE_SHIFT 0 #define DA9150_DISABLE_MASK BIT(0) /* DA9150_GPIO_A_B = 0x0E8 */ #define DA9150_GPIOA_PIN_SHIFT 0 #define DA9150_GPIOA_PIN_MASK (0x07 << 0) #define DA9150_GPIOA_PIN_GPI (0x00 << 0) #define DA9150_GPIOA_PIN_GPO_OD BIT(0) #define DA9150_GPIOA_TYPE_SHIFT 3 #define DA9150_GPIOA_TYPE_MASK BIT(3) #define DA9150_GPIOB_PIN_SHIFT 4 #define DA9150_GPIOB_PIN_MASK (0x07 << 4) #define DA9150_GPIOB_PIN_GPI (0x00 << 4) #define DA9150_GPIOB_PIN_GPO_OD BIT(4) #define DA9150_GPIOB_TYPE_SHIFT 7 #define DA9150_GPIOB_TYPE_MASK BIT(7) /* DA9150_GPIO_C_D = 0x0E9 */ #define DA9150_GPIOC_PIN_SHIFT 0 #define DA9150_GPIOC_PIN_MASK (0x07 << 0) #define DA9150_GPIOC_PIN_GPI (0x00 << 0) #define DA9150_GPIOC_PIN_GPO_OD BIT(0) #define DA9150_GPIOC_TYPE_SHIFT 3 #define DA9150_GPIOC_TYPE_MASK BIT(3) #define DA9150_GPIOD_PIN_SHIFT 4 #define DA9150_GPIOD_PIN_MASK (0x07 << 4) #define DA9150_GPIOD_PIN_GPI (0x00 << 4) #define DA9150_GPIOD_PIN_GPO_OD BIT(4) #define DA9150_GPIOD_TYPE_SHIFT 7 #define DA9150_GPIOD_TYPE_MASK BIT(7) /* DA9150_GPIO_MODE_CONT = 0x0EA */ #define DA9150_GPIOA_MODE_SHIFT 0 #define DA9150_GPIOA_MODE_MASK BIT(0) #define DA9150_GPIOB_MODE_SHIFT 1 #define DA9150_GPIOB_MODE_MASK BIT(1) #define DA9150_GPIOC_MODE_SHIFT 2 #define DA9150_GPIOC_MODE_MASK BIT(2) #define DA9150_GPIOD_MODE_SHIFT 3 #define DA9150_GPIOD_MODE_MASK BIT(3) #define DA9150_GPIOA_CONT_SHIFT 4 #define DA9150_GPIOA_CONT_MASK BIT(4) #define DA9150_GPIOB_CONT_SHIFT 5 #define DA9150_GPIOB_CONT_MASK BIT(5) #define DA9150_GPIOC_CONT_SHIFT 6 #define DA9150_GPIOC_CONT_MASK BIT(6) #define DA9150_GPIOD_CONT_SHIFT 7 #define DA9150_GPIOD_CONT_MASK BIT(7) /* DA9150_GPIO_CTRL_B = 0x0EB */ #define DA9150_WAKE_PIN_SHIFT 0 #define DA9150_WAKE_PIN_MASK (0x03 << 0) #define DA9150_WAKE_MODE_SHIFT 2 #define DA9150_WAKE_MODE_MASK BIT(2) #define DA9150_WAKE_CONT_SHIFT 3 #define DA9150_WAKE_CONT_MASK BIT(3) #define DA9150_WAKE_DLY_SHIFT 4 #define DA9150_WAKE_DLY_MASK BIT(4) /* DA9150_GPIO_CTRL_A = 0x0EC */ #define DA9150_GPIOA_ANAEN_SHIFT 0 #define DA9150_GPIOA_ANAEN_MASK BIT(0) #define DA9150_GPIOB_ANAEN_SHIFT 1 #define DA9150_GPIOB_ANAEN_MASK BIT(1) #define DA9150_GPIOC_ANAEN_SHIFT 2 #define DA9150_GPIOC_ANAEN_MASK BIT(2) #define DA9150_GPIOD_ANAEN_SHIFT 3 #define DA9150_GPIOD_ANAEN_MASK BIT(3) #define DA9150_GPIO_ANAEN 0x01 #define DA9150_GPIO_ANAEN_MASK 0x0F #define DA9150_CHGLED_PIN_SHIFT 5 #define DA9150_CHGLED_PIN_MASK (0x07 << 5) /* DA9150_GPIO_CTRL_C = 0x0ED */ #define DA9150_CHGBL_DUR_SHIFT 0 #define DA9150_CHGBL_DUR_MASK (0x03 << 0) #define DA9150_CHGBL_DBL_SHIFT 2 #define DA9150_CHGBL_DBL_MASK BIT(2) #define DA9150_CHGBL_FRQ_SHIFT 3 #define DA9150_CHGBL_FRQ_MASK (0x03 << 3) #define DA9150_CHGBL_FLKR_SHIFT 5 #define DA9150_CHGBL_FLKR_MASK BIT(5) /* DA9150_GPIO_CFG_A = 0x0EE */ #define DA9150_CE_LPM_DEB_SHIFT 0 #define DA9150_CE_LPM_DEB_MASK (0x07 << 0) /* DA9150_GPIO_CFG_B = 0x0EF */ #define DA9150_GPIOA_PUPD_SHIFT 0 #define DA9150_GPIOA_PUPD_MASK BIT(0) #define DA9150_GPIOB_PUPD_SHIFT 1 #define DA9150_GPIOB_PUPD_MASK BIT(1) #define DA9150_GPIOC_PUPD_SHIFT 2 #define DA9150_GPIOC_PUPD_MASK BIT(2) #define DA9150_GPIOD_PUPD_SHIFT 3 #define DA9150_GPIOD_PUPD_MASK BIT(3) #define DA9150_GPIO_PUPD_MASK (0xF << 0) #define DA9150_GPI_DEB_SHIFT 4 #define DA9150_GPI_DEB_MASK (0x07 << 4) #define DA9150_LPM_EN_SHIFT 7 #define DA9150_LPM_EN_MASK BIT(7) /* DA9150_GPIO_CFG_C = 0x0F0 */ #define DA9150_GPI_V_SHIFT 0 #define DA9150_GPI_V_MASK BIT(0) #define DA9150_VDDIO_INT_SHIFT 1 #define DA9150_VDDIO_INT_MASK BIT(1) #define DA9150_FAULT_PIN_SHIFT 3 #define DA9150_FAULT_PIN_MASK (0x07 << 3) #define DA9150_FAULT_TYPE_SHIFT 6 #define DA9150_FAULT_TYPE_MASK BIT(6) #define DA9150_NIRQ_PUPD_SHIFT 7 #define DA9150_NIRQ_PUPD_MASK BIT(7) /* DA9150_GPADC_MAN = 0x0F2 */ #define DA9150_GPADC_EN_SHIFT 0 #define DA9150_GPADC_EN_MASK BIT(0) #define DA9150_GPADC_MUX_SHIFT 1 #define DA9150_GPADC_MUX_MASK (0x1f << 1) /* DA9150_GPADC_RES_A = 0x0F4 */ #define DA9150_GPADC_RES_H_SHIFT 0 #define DA9150_GPADC_RES_H_MASK (0xff << 0) /* DA9150_GPADC_RES_B = 0x0F5 */ #define DA9150_GPADC_RUN_SHIFT 0 #define DA9150_GPADC_RUN_MASK BIT(0) #define DA9150_GPADC_RES_L_SHIFT 6 #define DA9150_GPADC_RES_L_MASK (0x03 << 6) #define DA9150_GPADC_RES_L_BITS 2 /* DA9150_PAGE_CON_2 = 0x100 */ #define DA9150_PAGE_SHIFT 0 #define DA9150_PAGE_MASK (0x3f << 0) #define DA9150_WRITE_MODE_SHIFT 6 #define DA9150_WRITE_MODE_MASK BIT(6) #define DA9150_REVERT_SHIFT 7 #define DA9150_REVERT_MASK BIT(7) /* DA9150_OTP_CONT_SHARED = 0x101 */ #define DA9150_PC_DONE_SHIFT 3 #define DA9150_PC_DONE_MASK BIT(3) /* DA9150_INTERFACE_SHARED = 0x105 */ #define DA9150_IF_BASE_ADDR_SHIFT 4 #define DA9150_IF_BASE_ADDR_MASK (0x0f << 4) /* DA9150_CONFIG_A_SHARED = 0x106 */ #define DA9150_NIRQ_VDD_SHIFT 1 #define DA9150_NIRQ_VDD_MASK BIT(1) #define DA9150_NIRQ_PIN_SHIFT 2 #define DA9150_NIRQ_PIN_MASK BIT(2) #define DA9150_NIRQ_TYPE_SHIFT 3 #define DA9150_NIRQ_TYPE_MASK BIT(3) #define DA9150_PM_IF_V_SHIFT 4 #define DA9150_PM_IF_V_MASK BIT(4) #define DA9150_PM_IF_FMP_SHIFT 5 #define DA9150_PM_IF_FMP_MASK BIT(5) #define DA9150_PM_IF_HSM_SHIFT 6 #define DA9150_PM_IF_HSM_MASK BIT(6) /* DA9150_CONFIG_D_SHARED = 0x109 */ #define DA9150_NIRQ_MODE_SHIFT 1 #define DA9150_NIRQ_MODE_MASK BIT(1) /* DA9150_ADETVB_CFG_C = 0x150 */ #define DA9150_TADP_RISE_SHIFT 0 #define DA9150_TADP_RISE_MASK (0xff << 0) /* DA9150_ADETD_STAT = 0x151 */ #define DA9150_DCD_STAT_SHIFT 0 #define DA9150_DCD_STAT_MASK BIT(0) #define DA9150_PCD_STAT_SHIFT 1 #define DA9150_PCD_STAT_MASK (0x03 << 1) #define DA9150_SCD_STAT_SHIFT 3 #define DA9150_SCD_STAT_MASK (0x03 << 3) #define DA9150_DP_STAT_SHIFT 5 #define DA9150_DP_STAT_MASK BIT(5) #define DA9150_DM_STAT_SHIFT 6 #define DA9150_DM_STAT_MASK BIT(6) /* DA9150_ADET_CMPSTAT = 0x152 */ #define DA9150_DP_COMP_SHIFT 1 #define DA9150_DP_COMP_MASK BIT(1) #define DA9150_DM_COMP_SHIFT 2 #define DA9150_DM_COMP_MASK BIT(2) #define DA9150_ADP_SNS_COMP_SHIFT 3 #define DA9150_ADP_SNS_COMP_MASK BIT(3) #define DA9150_ADP_PRB_COMP_SHIFT 4 #define DA9150_ADP_PRB_COMP_MASK BIT(4) #define DA9150_ID_COMP_SHIFT 5 #define DA9150_ID_COMP_MASK BIT(5) /* DA9150_ADET_CTRL_A = 0x153 */ #define DA9150_AID_DAT_SHIFT 0 #define DA9150_AID_DAT_MASK BIT(0) #define DA9150_AID_ID_SHIFT 1 #define DA9150_AID_ID_MASK BIT(1) #define DA9150_AID_TRIG_SHIFT 2 #define DA9150_AID_TRIG_MASK BIT(2) /* DA9150_ADETVB_CFG_B = 0x154 */ #define DA9150_VB_MODE_SHIFT 0 #define DA9150_VB_MODE_MASK (0x03 << 0) #define DA9150_VB_MODE_VB_SESS BIT(0) #define DA9150_TADP_PRB_SHIFT 2 #define DA9150_TADP_PRB_MASK BIT(2) #define DA9150_DAT_RPD_EXT_SHIFT 5 #define DA9150_DAT_RPD_EXT_MASK BIT(5) #define DA9150_CONF_RPD_SHIFT 6 #define DA9150_CONF_RPD_MASK BIT(6) #define DA9150_CONF_SRP_SHIFT 7 #define DA9150_CONF_SRP_MASK BIT(7) /* DA9150_ADETVB_CFG_A = 0x155 */ #define DA9150_AID_MODE_SHIFT 0 #define DA9150_AID_MODE_MASK (0x03 << 0) #define DA9150_AID_EXT_POL_SHIFT 2 #define DA9150_AID_EXT_POL_MASK BIT(2) /* DA9150_ADETAC_CFG_A = 0x156 */ #define DA9150_ISET_CDP_SHIFT 0 #define DA9150_ISET_CDP_MASK (0x1f << 0) #define DA9150_CONF_DBP_SHIFT 5 #define DA9150_CONF_DBP_MASK BIT(5) /* DA9150_ADDETAC_CFG_B = 0x157 */ #define DA9150_ISET_DCHG_SHIFT 0 #define DA9150_ISET_DCHG_MASK (0x1f << 0) #define DA9150_CONF_GPIOA_SHIFT 5 #define DA9150_CONF_GPIOA_MASK BIT(5) #define DA9150_CONF_GPIOB_SHIFT 6 #define DA9150_CONF_GPIOB_MASK BIT(6) #define DA9150_AID_VB_SHIFT 7 #define DA9150_AID_VB_MASK BIT(7) /* DA9150_ADETAC_CFG_C = 0x158 */ #define DA9150_ISET_DEF_SHIFT 0 #define DA9150_ISET_DEF_MASK (0x1f << 0) #define DA9150_CONF_MODE_SHIFT 5 #define DA9150_CONF_MODE_MASK (0x03 << 5) #define DA9150_AID_CR_DIS_SHIFT 7 #define DA9150_AID_CR_DIS_MASK BIT(7) /* DA9150_ADETAC_CFG_D = 0x159 */ #define DA9150_ISET_UNIT_SHIFT 0 #define DA9150_ISET_UNIT_MASK (0x1f << 0) #define DA9150_AID_UNCLAMP_SHIFT 5 #define DA9150_AID_UNCLAMP_MASK BIT(5) /* DA9150_ADETVB_CFG_D = 0x15A */ #define DA9150_ID_MODE_SHIFT 0 #define DA9150_ID_MODE_MASK (0x03 << 0) #define DA9150_DAT_MODE_SHIFT 2 #define DA9150_DAT_MODE_MASK (0x0f << 2) #define DA9150_DAT_SWP_SHIFT 6 #define DA9150_DAT_SWP_MASK BIT(6) #define DA9150_DAT_CLAMP_EXT_SHIFT 7 #define DA9150_DAT_CLAMP_EXT_MASK BIT(7) /* DA9150_ADETID_CFG_A = 0x15B */ #define DA9150_TID_POLL_SHIFT 0 #define DA9150_TID_POLL_MASK (0x07 << 0) #define DA9150_RID_CONV_SHIFT 3 #define DA9150_RID_CONV_MASK BIT(3) /* DA9150_ADET_RID_PT_CHG_H = 0x15C */ #define DA9150_RID_PT_CHG_H_SHIFT 0 #define DA9150_RID_PT_CHG_H_MASK (0xff << 0) /* DA9150_ADET_RID_PT_CHG_L = 0x15D */ #define DA9150_RID_PT_CHG_L_SHIFT 6 #define DA9150_RID_PT_CHG_L_MASK (0x03 << 6) /* DA9150_PPR_TCTR_B = 0x160 */ #define DA9150_CHG_TCTR_VAL_SHIFT 0 #define DA9150_CHG_TCTR_VAL_MASK (0xff << 0) /* DA9150_PPR_BKCTRL_A = 0x163 */ #define DA9150_VBUS_MODE_SHIFT 0 #define DA9150_VBUS_MODE_MASK (0x03 << 0) #define DA9150_VBUS_MODE_CHG BIT(0) #define DA9150_VBUS_MODE_OTG (0x02 << 0) #define DA9150_VBUS_LPM_SHIFT 2 #define DA9150_VBUS_LPM_MASK (0x03 << 2) #define DA9150_VBUS_SUSP_SHIFT 4 #define DA9150_VBUS_SUSP_MASK BIT(4) #define DA9150_VBUS_PWM_SHIFT 5 #define DA9150_VBUS_PWM_MASK BIT(5) #define DA9150_VBUS_ISO_SHIFT 6 #define DA9150_VBUS_ISO_MASK BIT(6) #define DA9150_VBUS_LDO_SHIFT 7 #define DA9150_VBUS_LDO_MASK BIT(7) /* DA9150_PPR_BKCFG_A = 0x164 */ #define DA9150_VBUS_ISET_SHIFT 0 #define DA9150_VBUS_ISET_MASK (0x1f << 0) #define DA9150_VBUS_IMAX_SHIFT 5 #define DA9150_VBUS_IMAX_MASK BIT(5) #define DA9150_VBUS_IOTG_SHIFT 6 #define DA9150_VBUS_IOTG_MASK (0x03 << 6) /* DA9150_PPR_BKCFG_B = 0x165 */ #define DA9150_VBUS_DROP_SHIFT 0 #define DA9150_VBUS_DROP_MASK (0x0f << 0) #define DA9150_VBUS_FAULT_DIS_SHIFT 6 #define DA9150_VBUS_FAULT_DIS_MASK BIT(6) #define DA9150_OTG_FAULT_DIS_SHIFT 7 #define DA9150_OTG_FAULT_DIS_MASK BIT(7) /* DA9150_PPR_CHGCTRL_A = 0x166 */ #define DA9150_CHG_EN_SHIFT 0 #define DA9150_CHG_EN_MASK BIT(0) /* DA9150_PPR_CHGCTRL_B = 0x167 */ #define DA9150_CHG_VBAT_SHIFT 0 #define DA9150_CHG_VBAT_MASK (0x1f << 0) #define DA9150_CHG_VDROP_SHIFT 6 #define DA9150_CHG_VDROP_MASK (0x03 << 6) /* DA9150_PPR_CHGCTRL_C = 0x168 */ #define DA9150_CHG_VFAULT_SHIFT 0 #define DA9150_CHG_VFAULT_MASK (0x0f << 0) #define DA9150_CHG_IPRE_SHIFT 4 #define DA9150_CHG_IPRE_MASK (0x03 << 4) /* DA9150_PPR_TCTR_A = 0x169 */ #define DA9150_CHG_TCTR_SHIFT 0 #define DA9150_CHG_TCTR_MASK (0x07 << 0) #define DA9150_CHG_TCTR_MODE_SHIFT 4 #define DA9150_CHG_TCTR_MODE_MASK BIT(4) /* DA9150_PPR_CHGCTRL_D = 0x16A */ #define DA9150_CHG_IBAT_SHIFT 0 #define DA9150_CHG_IBAT_MASK (0xff << 0) /* DA9150_PPR_CHGCTRL_E = 0x16B */ #define DA9150_CHG_IEND_SHIFT 0 #define DA9150_CHG_IEND_MASK (0xff << 0) /* DA9150_PPR_CHGCTRL_F = 0x16C */ #define DA9150_CHG_VCOLD_SHIFT 0 #define DA9150_CHG_VCOLD_MASK (0x1f << 0) #define DA9150_TBAT_TQA_EN_SHIFT 6 #define DA9150_TBAT_TQA_EN_MASK BIT(6) #define DA9150_TBAT_TDP_EN_SHIFT 7 #define DA9150_TBAT_TDP_EN_MASK BIT(7) /* DA9150_PPR_CHGCTRL_G = 0x16D */ #define DA9150_CHG_VWARM_SHIFT 0 #define DA9150_CHG_VWARM_MASK (0x1f << 0) /* DA9150_PPR_CHGCTRL_H = 0x16E */ #define DA9150_CHG_VHOT_SHIFT 0 #define DA9150_CHG_VHOT_MASK (0x1f << 0) /* DA9150_PPR_CHGCTRL_I = 0x16F */ #define DA9150_CHG_ICOLD_SHIFT 0 #define DA9150_CHG_ICOLD_MASK (0xff << 0) /* DA9150_PPR_CHGCTRL_J = 0x170 */ #define DA9150_CHG_IWARM_SHIFT 0 #define DA9150_CHG_IWARM_MASK (0xff << 0) /* DA9150_PPR_CHGCTRL_K = 0x171 */ #define DA9150_CHG_IHOT_SHIFT 0 #define DA9150_CHG_IHOT_MASK (0xff << 0) /* DA9150_PPR_CHGCTRL_L = 0x172 */ #define DA9150_CHG_IBAT_TRED_SHIFT 0 #define DA9150_CHG_IBAT_TRED_MASK (0xff << 0) /* DA9150_PPR_CHGCTRL_M = 0x173 */ #define DA9150_CHG_VFLOAT_SHIFT 0 #define DA9150_CHG_VFLOAT_MASK (0x0f << 0) #define DA9150_CHG_LPM_SHIFT 5 #define DA9150_CHG_LPM_MASK BIT(5) #define DA9150_CHG_NBLO_SHIFT 6 #define DA9150_CHG_NBLO_MASK BIT(6) #define DA9150_EBS_EN_SHIFT 7 #define DA9150_EBS_EN_MASK BIT(7) /* DA9150_PPR_THYST_A = 0x174 */ #define DA9150_TBAT_T1_SHIFT 0 #define DA9150_TBAT_T1_MASK (0xff << 0) /* DA9150_PPR_THYST_B = 0x175 */ #define DA9150_TBAT_T2_SHIFT 0 #define DA9150_TBAT_T2_MASK (0xff << 0) /* DA9150_PPR_THYST_C = 0x176 */ #define DA9150_TBAT_T3_SHIFT 0 #define DA9150_TBAT_T3_MASK (0xff << 0) /* DA9150_PPR_THYST_D = 0x177 */ #define DA9150_TBAT_T4_SHIFT 0 #define DA9150_TBAT_T4_MASK (0xff << 0) /* DA9150_PPR_THYST_E = 0x178 */ #define DA9150_TBAT_T5_SHIFT 0 #define DA9150_TBAT_T5_MASK (0xff << 0) /* DA9150_PPR_THYST_F = 0x179 */ #define DA9150_TBAT_H1_SHIFT 0 #define DA9150_TBAT_H1_MASK (0xff << 0) /* DA9150_PPR_THYST_G = 0x17A */ #define DA9150_TBAT_H5_SHIFT 0 #define DA9150_TBAT_H5_MASK (0xff << 0) /* DA9150_PAGE_CON_3 = 0x180 */ #define DA9150_PAGE_SHIFT 0 #define DA9150_PAGE_MASK (0x3f << 0) #define DA9150_WRITE_MODE_SHIFT 6 #define DA9150_WRITE_MODE_MASK BIT(6) #define DA9150_REVERT_SHIFT 7 #define DA9150_REVERT_MASK BIT(7) /* DA9150_PAGE_CON_4 = 0x200 */ #define DA9150_PAGE_SHIFT 0 #define DA9150_PAGE_MASK (0x3f << 0) #define DA9150_WRITE_MODE_SHIFT 6 #define DA9150_WRITE_MODE_MASK BIT(6) #define DA9150_REVERT_SHIFT 7 #define DA9150_REVERT_MASK BIT(7) /* DA9150_PAGE_CON_5 = 0x280 */ #define DA9150_PAGE_SHIFT 0 #define DA9150_PAGE_MASK (0x3f << 0) #define DA9150_WRITE_MODE_SHIFT 6 #define DA9150_WRITE_MODE_MASK BIT(6) #define DA9150_REVERT_SHIFT 7 #define DA9150_REVERT_MASK BIT(7) /* DA9150_PAGE_CON_6 = 0x300 */ #define DA9150_PAGE_SHIFT 0 #define DA9150_PAGE_MASK (0x3f << 0) #define DA9150_WRITE_MODE_SHIFT 6 #define DA9150_WRITE_MODE_MASK BIT(6) #define DA9150_REVERT_SHIFT 7 #define DA9150_REVERT_MASK BIT(7) /* DA9150_COREBTLD_STAT_A = 0x302 */ #define DA9150_BOOTLD_STAT_SHIFT 0 #define DA9150_BOOTLD_STAT_MASK (0x03 << 0) #define DA9150_CORE_LOCKUP_SHIFT 2 #define DA9150_CORE_LOCKUP_MASK BIT(2) /* DA9150_COREBTLD_CTRL_A = 0x303 */ #define DA9150_CORE_RESET_SHIFT 0 #define DA9150_CORE_RESET_MASK BIT(0) #define DA9150_CORE_STOP_SHIFT 1 #define DA9150_CORE_STOP_MASK BIT(1) /* DA9150_CORE_CONFIG_A = 0x304 */ #define DA9150_CORE_MEMMUX_SHIFT 0 #define DA9150_CORE_MEMMUX_MASK (0x03 << 0) #define DA9150_WDT_AUTO_START_SHIFT 2 #define DA9150_WDT_AUTO_START_MASK BIT(2) #define DA9150_WDT_AUTO_LOCK_SHIFT 3 #define DA9150_WDT_AUTO_LOCK_MASK BIT(3) #define DA9150_WDT_HLT_NO_CLK_SHIFT 4 #define DA9150_WDT_HLT_NO_CLK_MASK BIT(4) /* DA9150_CORE_CONFIG_C = 0x305 */ #define DA9150_CORE_SW_SIZE_SHIFT 0 #define DA9150_CORE_SW_SIZE_MASK (0xff << 0) /* DA9150_CORE_CONFIG_B = 0x306 */ #define DA9150_BOOTLD_EN_SHIFT 0 #define DA9150_BOOTLD_EN_MASK BIT(0) #define DA9150_CORE_EN_SHIFT 2 #define DA9150_CORE_EN_MASK BIT(2) #define DA9150_CORE_SW_SRC_SHIFT 3 #define DA9150_CORE_SW_SRC_MASK (0x07 << 3) #define DA9150_DEEP_SLEEP_EN_SHIFT 7 #define DA9150_DEEP_SLEEP_EN_MASK BIT(7) /* DA9150_CORE_CFG_DATA_A = 0x307 */ #define DA9150_CORE_CFG_DT_A_SHIFT 0 #define DA9150_CORE_CFG_DT_A_MASK (0xff << 0) /* DA9150_CORE_CFG_DATA_B = 0x308 */ #define DA9150_CORE_CFG_DT_B_SHIFT 0 #define DA9150_CORE_CFG_DT_B_MASK (0xff << 0) /* DA9150_CORE_CMD_A = 0x309 */ #define DA9150_CORE_CMD_SHIFT 0 #define DA9150_CORE_CMD_MASK (0xff << 0) /* DA9150_CORE_DATA_A = 0x30A */ #define DA9150_CORE_DATA_0_SHIFT 0 #define DA9150_CORE_DATA_0_MASK (0xff << 0) /* DA9150_CORE_DATA_B = 0x30B */ #define DA9150_CORE_DATA_1_SHIFT 0 #define DA9150_CORE_DATA_1_MASK (0xff << 0) /* DA9150_CORE_DATA_C = 0x30C */ #define DA9150_CORE_DATA_2_SHIFT 0 #define DA9150_CORE_DATA_2_MASK (0xff << 0) /* DA9150_CORE_DATA_D = 0x30D */ #define DA9150_CORE_DATA_3_SHIFT 0 #define DA9150_CORE_DATA_3_MASK (0xff << 0) /* DA9150_CORE2WIRE_STAT_A = 0x310 */ #define DA9150_FW_FWDL_ERR_SHIFT 7 #define DA9150_FW_FWDL_ERR_MASK BIT(7) /* DA9150_CORE2WIRE_CTRL_A = 0x311 */ #define DA9150_FW_FWDL_EN_SHIFT 0 #define DA9150_FW_FWDL_EN_MASK BIT(0) #define DA9150_FG_QIF_EN_SHIFT 1 #define DA9150_FG_QIF_EN_MASK BIT(1) #define DA9150_CORE_BASE_ADDR_SHIFT 4 #define DA9150_CORE_BASE_ADDR_MASK (0x0f << 4) /* DA9150_FW_CTRL_A = 0x312 */ #define DA9150_FW_SEAL_SHIFT 0 #define DA9150_FW_SEAL_MASK (0xff << 0) /* DA9150_FW_CTRL_C = 0x313 */ #define DA9150_FW_FWDL_CRC_SHIFT 0 #define DA9150_FW_FWDL_CRC_MASK (0xff << 0) /* DA9150_FW_CTRL_D = 0x314 */ #define DA9150_FW_FWDL_BASE_SHIFT 0 #define DA9150_FW_FWDL_BASE_MASK (0x0f << 0) /* DA9150_FG_CTRL_A = 0x315 */ #define DA9150_FG_QIF_CODE_SHIFT 0 #define DA9150_FG_QIF_CODE_MASK (0xff << 0) /* DA9150_FG_CTRL_B = 0x316 */ #define DA9150_FG_QIF_VALUE_SHIFT 0 #define DA9150_FG_QIF_VALUE_MASK (0xff << 0) /* DA9150_FW_CTRL_E = 0x317 */ #define DA9150_FW_FWDL_SEG_SHIFT 0 #define DA9150_FW_FWDL_SEG_MASK (0xff << 0) /* DA9150_FW_CTRL_B = 0x318 */ #define DA9150_FW_FWDL_VALUE_SHIFT 0 #define DA9150_FW_FWDL_VALUE_MASK (0xff << 0) /* DA9150_GPADC_CMAN = 0x320 */ #define DA9150_GPADC_CEN_SHIFT 0 #define DA9150_GPADC_CEN_MASK BIT(0) #define DA9150_GPADC_CMUX_SHIFT 1 #define DA9150_GPADC_CMUX_MASK (0x1f << 1) /* DA9150_GPADC_CRES_A = 0x322 */ #define DA9150_GPADC_CRES_H_SHIFT 0 #define DA9150_GPADC_CRES_H_MASK (0xff << 0) /* DA9150_GPADC_CRES_B = 0x323 */ #define DA9150_GPADC_CRUN_SHIFT 0 #define DA9150_GPADC_CRUN_MASK BIT(0) #define DA9150_GPADC_CRES_L_SHIFT 6 #define DA9150_GPADC_CRES_L_MASK (0x03 << 6) /* DA9150_CC_CFG_A = 0x328 */ #define DA9150_CC_EN_SHIFT 0 #define DA9150_CC_EN_MASK BIT(0) #define DA9150_CC_TIMEBASE_SHIFT 1 #define DA9150_CC_TIMEBASE_MASK (0x03 << 1) #define DA9150_CC_CFG_SHIFT 5 #define DA9150_CC_CFG_MASK (0x03 << 5) #define DA9150_CC_ENDLESS_MODE_SHIFT 7 #define DA9150_CC_ENDLESS_MODE_MASK BIT(7) /* DA9150_CC_CFG_B = 0x329 */ #define DA9150_CC_OPT_SHIFT 0 #define DA9150_CC_OPT_MASK (0x03 << 0) #define DA9150_CC_PREAMP_SHIFT 2 #define DA9150_CC_PREAMP_MASK (0x03 << 2) /* DA9150_CC_ICHG_RES_A = 0x32A */ #define DA9150_CC_ICHG_RES_H_SHIFT 0 #define DA9150_CC_ICHG_RES_H_MASK (0xff << 0) /* DA9150_CC_ICHG_RES_B = 0x32B */ #define DA9150_CC_ICHG_RES_L_SHIFT 3 #define DA9150_CC_ICHG_RES_L_MASK (0x1f << 3) /* DA9150_CC_IAVG_RES_A = 0x32C */ #define DA9150_CC_IAVG_RES_H_SHIFT 0 #define DA9150_CC_IAVG_RES_H_MASK (0xff << 0) /* DA9150_CC_IAVG_RES_B = 0x32D */ #define DA9150_CC_IAVG_RES_L_SHIFT 0 #define DA9150_CC_IAVG_RES_L_MASK (0xff << 0) /* DA9150_TAUX_CTRL_A = 0x330 */ #define DA9150_TAUX_EN_SHIFT 0 #define DA9150_TAUX_EN_MASK BIT(0) #define DA9150_TAUX_MOD_SHIFT 1 #define DA9150_TAUX_MOD_MASK BIT(1) #define DA9150_TAUX_UPDATE_SHIFT 2 #define DA9150_TAUX_UPDATE_MASK BIT(2) /* DA9150_TAUX_RELOAD_H = 0x332 */ #define DA9150_TAUX_RLD_H_SHIFT 0 #define DA9150_TAUX_RLD_H_MASK (0xff << 0) /* DA9150_TAUX_RELOAD_L = 0x333 */ #define DA9150_TAUX_RLD_L_SHIFT 3 #define DA9150_TAUX_RLD_L_MASK (0x1f << 3) /* DA9150_TAUX_VALUE_H = 0x334 */ #define DA9150_TAUX_VAL_H_SHIFT 0 #define DA9150_TAUX_VAL_H_MASK (0xff << 0) /* DA9150_TAUX_VALUE_L = 0x335 */ #define DA9150_TAUX_VAL_L_SHIFT 3 #define DA9150_TAUX_VAL_L_MASK (0x1f << 3) /* DA9150_AUX_DATA_0 = 0x338 */ #define DA9150_AUX_DAT_0_SHIFT 0 #define DA9150_AUX_DAT_0_MASK (0xff << 0) /* DA9150_AUX_DATA_1 = 0x339 */ #define DA9150_AUX_DAT_1_SHIFT 0 #define DA9150_AUX_DAT_1_MASK (0xff << 0) /* DA9150_AUX_DATA_2 = 0x33A */ #define DA9150_AUX_DAT_2_SHIFT 0 #define DA9150_AUX_DAT_2_MASK (0xff << 0) /* DA9150_AUX_DATA_3 = 0x33B */ #define DA9150_AUX_DAT_3_SHIFT 0 #define DA9150_AUX_DAT_3_MASK (0xff << 0) /* DA9150_BIF_CTRL = 0x340 */ #define DA9150_BIF_ISRC_EN_SHIFT 0 #define DA9150_BIF_ISRC_EN_MASK BIT(0) /* DA9150_TBAT_CTRL_A = 0x342 */ #define DA9150_TBAT_EN_SHIFT 0 #define DA9150_TBAT_EN_MASK BIT(0) #define DA9150_TBAT_SW1_SHIFT 1 #define DA9150_TBAT_SW1_MASK BIT(1) #define DA9150_TBAT_SW2_SHIFT 2 #define DA9150_TBAT_SW2_MASK BIT(2) /* DA9150_TBAT_CTRL_B = 0x343 */ #define DA9150_TBAT_SW_FRC_SHIFT 0 #define DA9150_TBAT_SW_FRC_MASK BIT(0) #define DA9150_TBAT_STAT_SW1_SHIFT 1 #define DA9150_TBAT_STAT_SW1_MASK BIT(1) #define DA9150_TBAT_STAT_SW2_SHIFT 2 #define DA9150_TBAT_STAT_SW2_MASK BIT(2) #define DA9150_TBAT_HIGH_CURR_SHIFT 3 #define DA9150_TBAT_HIGH_CURR_MASK BIT(3) /* DA9150_TBAT_RES_A = 0x344 */ #define DA9150_TBAT_RES_H_SHIFT 0 #define DA9150_TBAT_RES_H_MASK (0xff << 0) /* DA9150_TBAT_RES_B = 0x345 */ #define DA9150_TBAT_RES_DIS_SHIFT 0 #define DA9150_TBAT_RES_DIS_MASK BIT(0) #define DA9150_TBAT_RES_L_SHIFT 6 #define DA9150_TBAT_RES_L_MASK (0x03 << 6) #endif /* __DA9150_REGISTERS_H */ mfd/da9150/core.h 0000644 00000004070 14722070374 0007325 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * DA9150 MFD Driver - Core Data * * Copyright (c) 2014 Dialog Semiconductor * * Author: Adam Thomson <Adam.Thomson.Opensource@diasemi.com> */ #ifndef __DA9150_CORE_H #define __DA9150_CORE_H #include <linux/device.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/regmap.h> /* I2C address paging */ #define DA9150_REG_PAGE_SHIFT 8 #define DA9150_REG_PAGE_MASK 0xFF /* IRQs */ #define DA9150_NUM_IRQ_REGS 4 #define DA9150_IRQ_VBUS 0 #define DA9150_IRQ_CHG 1 #define DA9150_IRQ_TCLASS 2 #define DA9150_IRQ_TJUNC 3 #define DA9150_IRQ_VFAULT 4 #define DA9150_IRQ_CONF 5 #define DA9150_IRQ_DAT 6 #define DA9150_IRQ_DTYPE 7 #define DA9150_IRQ_ID 8 #define DA9150_IRQ_ADP 9 #define DA9150_IRQ_SESS_END 10 #define DA9150_IRQ_SESS_VLD 11 #define DA9150_IRQ_FG 12 #define DA9150_IRQ_GP 13 #define DA9150_IRQ_TBAT 14 #define DA9150_IRQ_GPIOA 15 #define DA9150_IRQ_GPIOB 16 #define DA9150_IRQ_GPIOC 17 #define DA9150_IRQ_GPIOD 18 #define DA9150_IRQ_GPADC 19 #define DA9150_IRQ_WKUP 20 /* I2C sub-device address */ #define DA9150_QIF_I2C_ADDR_LSB 0x5 struct da9150_fg_pdata { u32 update_interval; /* msecs */ u8 warn_soc_lvl; /* % value */ u8 crit_soc_lvl; /* % value */ }; struct da9150_pdata { int irq_base; struct da9150_fg_pdata *fg_pdata; }; struct da9150 { struct device *dev; struct regmap *regmap; struct i2c_client *core_qif; struct regmap_irq_chip_data *regmap_irq_data; int irq; int irq_base; }; /* Device I/O - Query Interface for FG and standard register access */ void da9150_read_qif(struct da9150 *da9150, u8 addr, int count, u8 *buf); void da9150_write_qif(struct da9150 *da9150, u8 addr, int count, const u8 *buf); u8 da9150_reg_read(struct da9150 *da9150, u16 reg); void da9150_reg_write(struct da9150 *da9150, u16 reg, u8 val); void da9150_set_bits(struct da9150 *da9150, u16 reg, u8 mask, u8 val); void da9150_bulk_read(struct da9150 *da9150, u16 reg, int count, u8 *buf); void da9150_bulk_write(struct da9150 *da9150, u16 reg, int count, const u8 *buf); #endif /* __DA9150_CORE_H */ mfd/tps65090.h 0000644 00000007346 14722070374 0006715 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Core driver interface for TI TPS65090 PMIC family * * Copyright (C) 2012 NVIDIA Corporation */ #ifndef __LINUX_MFD_TPS65090_H #define __LINUX_MFD_TPS65090_H #include <linux/irq.h> #include <linux/regmap.h> /* TPS65090 IRQs */ enum { TPS65090_IRQ_INTERRUPT, TPS65090_IRQ_VAC_STATUS_CHANGE, TPS65090_IRQ_VSYS_STATUS_CHANGE, TPS65090_IRQ_BAT_STATUS_CHANGE, TPS65090_IRQ_CHARGING_STATUS_CHANGE, TPS65090_IRQ_CHARGING_COMPLETE, TPS65090_IRQ_OVERLOAD_DCDC1, TPS65090_IRQ_OVERLOAD_DCDC2, TPS65090_IRQ_OVERLOAD_DCDC3, TPS65090_IRQ_OVERLOAD_FET1, TPS65090_IRQ_OVERLOAD_FET2, TPS65090_IRQ_OVERLOAD_FET3, TPS65090_IRQ_OVERLOAD_FET4, TPS65090_IRQ_OVERLOAD_FET5, TPS65090_IRQ_OVERLOAD_FET6, TPS65090_IRQ_OVERLOAD_FET7, }; /* TPS65090 Regulator ID */ enum { TPS65090_REGULATOR_DCDC1, TPS65090_REGULATOR_DCDC2, TPS65090_REGULATOR_DCDC3, TPS65090_REGULATOR_FET1, TPS65090_REGULATOR_FET2, TPS65090_REGULATOR_FET3, TPS65090_REGULATOR_FET4, TPS65090_REGULATOR_FET5, TPS65090_REGULATOR_FET6, TPS65090_REGULATOR_FET7, TPS65090_REGULATOR_LDO1, TPS65090_REGULATOR_LDO2, /* Last entry for maximum ID */ TPS65090_REGULATOR_MAX, }; /* Register addresses */ #define TPS65090_REG_INTR_STS 0x00 #define TPS65090_REG_INTR_STS2 0x01 #define TPS65090_REG_INTR_MASK 0x02 #define TPS65090_REG_INTR_MASK2 0x03 #define TPS65090_REG_CG_CTRL0 0x04 #define TPS65090_REG_CG_CTRL1 0x05 #define TPS65090_REG_CG_CTRL2 0x06 #define TPS65090_REG_CG_CTRL3 0x07 #define TPS65090_REG_CG_CTRL4 0x08 #define TPS65090_REG_CG_CTRL5 0x09 #define TPS65090_REG_CG_STATUS1 0x0a #define TPS65090_REG_CG_STATUS2 0x0b #define TPS65090_REG_AD_OUT1 0x17 #define TPS65090_REG_AD_OUT2 0x18 #define TPS65090_MAX_REG TPS65090_REG_AD_OUT2 #define TPS65090_NUM_REGS (TPS65090_MAX_REG + 1) struct gpio_desc; struct tps65090 { struct device *dev; struct regmap *rmap; struct regmap_irq_chip_data *irq_data; }; /* * struct tps65090_regulator_plat_data * * @reg_init_data: The regulator init data. * @enable_ext_control: Enable extrenal control or not. Only available for * DCDC1, DCDC2 and DCDC3. * @gpiod: Gpio descriptor if external control is enabled and controlled through * gpio * @overcurrent_wait_valid: True if the overcurrent_wait should be applied. * @overcurrent_wait: Value to set as the overcurrent wait time. This is the * actual bitfield value, not a time in ms (valid value are 0 - 3). */ struct tps65090_regulator_plat_data { struct regulator_init_data *reg_init_data; bool enable_ext_control; struct gpio_desc *gpiod; bool overcurrent_wait_valid; int overcurrent_wait; }; struct tps65090_platform_data { int irq_base; char **supplied_to; size_t num_supplicants; int enable_low_current_chrg; struct tps65090_regulator_plat_data *reg_pdata[TPS65090_REGULATOR_MAX]; }; /* * NOTE: the functions below are not intended for use outside * of the TPS65090 sub-device drivers */ static inline int tps65090_write(struct device *dev, int reg, uint8_t val) { struct tps65090 *tps = dev_get_drvdata(dev); return regmap_write(tps->rmap, reg, val); } static inline int tps65090_read(struct device *dev, int reg, uint8_t *val) { struct tps65090 *tps = dev_get_drvdata(dev); unsigned int temp_val; int ret; ret = regmap_read(tps->rmap, reg, &temp_val); if (!ret) *val = temp_val; return ret; } static inline int tps65090_set_bits(struct device *dev, int reg, uint8_t bit_num) { struct tps65090 *tps = dev_get_drvdata(dev); return regmap_update_bits(tps->rmap, reg, BIT(bit_num), ~0u); } static inline int tps65090_clr_bits(struct device *dev, int reg, uint8_t bit_num) { struct tps65090 *tps = dev_get_drvdata(dev); return regmap_update_bits(tps->rmap, reg, BIT(bit_num), 0u); } #endif /*__LINUX_MFD_TPS65090_H */ mfd/rk808.h 0000644 00000041564 14722070374 0006357 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Register definitions for Rockchip's RK808/RK818 PMIC * * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd * * Author: Chris Zhong <zyw@rock-chips.com> * Author: Zhang Qing <zhangqing@rock-chips.com> * * Copyright (C) 2016 PHYTEC Messtechnik GmbH * * Author: Wadim Egorov <w.egorov@phytec.de> */ #ifndef __LINUX_REGULATOR_RK808_H #define __LINUX_REGULATOR_RK808_H #include <linux/regulator/machine.h> #include <linux/regmap.h> /* * rk808 Global Register Map. */ #define RK808_DCDC1 0 /* (0+RK808_START) */ #define RK808_LDO1 4 /* (4+RK808_START) */ #define RK808_NUM_REGULATORS 14 enum rk808_reg { RK808_ID_DCDC1, RK808_ID_DCDC2, RK808_ID_DCDC3, RK808_ID_DCDC4, RK808_ID_LDO1, RK808_ID_LDO2, RK808_ID_LDO3, RK808_ID_LDO4, RK808_ID_LDO5, RK808_ID_LDO6, RK808_ID_LDO7, RK808_ID_LDO8, RK808_ID_SWITCH1, RK808_ID_SWITCH2, }; #define RK808_SECONDS_REG 0x00 #define RK808_MINUTES_REG 0x01 #define RK808_HOURS_REG 0x02 #define RK808_DAYS_REG 0x03 #define RK808_MONTHS_REG 0x04 #define RK808_YEARS_REG 0x05 #define RK808_WEEKS_REG 0x06 #define RK808_ALARM_SECONDS_REG 0x08 #define RK808_ALARM_MINUTES_REG 0x09 #define RK808_ALARM_HOURS_REG 0x0a #define RK808_ALARM_DAYS_REG 0x0b #define RK808_ALARM_MONTHS_REG 0x0c #define RK808_ALARM_YEARS_REG 0x0d #define RK808_RTC_CTRL_REG 0x10 #define RK808_RTC_STATUS_REG 0x11 #define RK808_RTC_INT_REG 0x12 #define RK808_RTC_COMP_LSB_REG 0x13 #define RK808_RTC_COMP_MSB_REG 0x14 #define RK808_ID_MSB 0x17 #define RK808_ID_LSB 0x18 #define RK808_CLK32OUT_REG 0x20 #define RK808_VB_MON_REG 0x21 #define RK808_THERMAL_REG 0x22 #define RK808_DCDC_EN_REG 0x23 #define RK808_LDO_EN_REG 0x24 #define RK808_SLEEP_SET_OFF_REG1 0x25 #define RK808_SLEEP_SET_OFF_REG2 0x26 #define RK808_DCDC_UV_STS_REG 0x27 #define RK808_DCDC_UV_ACT_REG 0x28 #define RK808_LDO_UV_STS_REG 0x29 #define RK808_LDO_UV_ACT_REG 0x2a #define RK808_DCDC_PG_REG 0x2b #define RK808_LDO_PG_REG 0x2c #define RK808_VOUT_MON_TDB_REG 0x2d #define RK808_BUCK1_CONFIG_REG 0x2e #define RK808_BUCK1_ON_VSEL_REG 0x2f #define RK808_BUCK1_SLP_VSEL_REG 0x30 #define RK808_BUCK1_DVS_VSEL_REG 0x31 #define RK808_BUCK2_CONFIG_REG 0x32 #define RK808_BUCK2_ON_VSEL_REG 0x33 #define RK808_BUCK2_SLP_VSEL_REG 0x34 #define RK808_BUCK2_DVS_VSEL_REG 0x35 #define RK808_BUCK3_CONFIG_REG 0x36 #define RK808_BUCK4_CONFIG_REG 0x37 #define RK808_BUCK4_ON_VSEL_REG 0x38 #define RK808_BUCK4_SLP_VSEL_REG 0x39 #define RK808_BOOST_CONFIG_REG 0x3a #define RK808_LDO1_ON_VSEL_REG 0x3b #define RK808_LDO1_SLP_VSEL_REG 0x3c #define RK808_LDO2_ON_VSEL_REG 0x3d #define RK808_LDO2_SLP_VSEL_REG 0x3e #define RK808_LDO3_ON_VSEL_REG 0x3f #define RK808_LDO3_SLP_VSEL_REG 0x40 #define RK808_LDO4_ON_VSEL_REG 0x41 #define RK808_LDO4_SLP_VSEL_REG 0x42 #define RK808_LDO5_ON_VSEL_REG 0x43 #define RK808_LDO5_SLP_VSEL_REG 0x44 #define RK808_LDO6_ON_VSEL_REG 0x45 #define RK808_LDO6_SLP_VSEL_REG 0x46 #define RK808_LDO7_ON_VSEL_REG 0x47 #define RK808_LDO7_SLP_VSEL_REG 0x48 #define RK808_LDO8_ON_VSEL_REG 0x49 #define RK808_LDO8_SLP_VSEL_REG 0x4a #define RK808_DEVCTRL_REG 0x4b #define RK808_INT_STS_REG1 0x4c #define RK808_INT_STS_MSK_REG1 0x4d #define RK808_INT_STS_REG2 0x4e #define RK808_INT_STS_MSK_REG2 0x4f #define RK808_IO_POL_REG 0x50 /* RK818 */ #define RK818_DCDC1 0 #define RK818_LDO1 4 #define RK818_NUM_REGULATORS 17 enum rk818_reg { RK818_ID_DCDC1, RK818_ID_DCDC2, RK818_ID_DCDC3, RK818_ID_DCDC4, RK818_ID_BOOST, RK818_ID_LDO1, RK818_ID_LDO2, RK818_ID_LDO3, RK818_ID_LDO4, RK818_ID_LDO5, RK818_ID_LDO6, RK818_ID_LDO7, RK818_ID_LDO8, RK818_ID_LDO9, RK818_ID_SWITCH, RK818_ID_HDMI_SWITCH, RK818_ID_OTG_SWITCH, }; #define RK818_DCDC_EN_REG 0x23 #define RK818_LDO_EN_REG 0x24 #define RK818_SLEEP_SET_OFF_REG1 0x25 #define RK818_SLEEP_SET_OFF_REG2 0x26 #define RK818_DCDC_UV_STS_REG 0x27 #define RK818_DCDC_UV_ACT_REG 0x28 #define RK818_LDO_UV_STS_REG 0x29 #define RK818_LDO_UV_ACT_REG 0x2a #define RK818_DCDC_PG_REG 0x2b #define RK818_LDO_PG_REG 0x2c #define RK818_VOUT_MON_TDB_REG 0x2d #define RK818_BUCK1_CONFIG_REG 0x2e #define RK818_BUCK1_ON_VSEL_REG 0x2f #define RK818_BUCK1_SLP_VSEL_REG 0x30 #define RK818_BUCK2_CONFIG_REG 0x32 #define RK818_BUCK2_ON_VSEL_REG 0x33 #define RK818_BUCK2_SLP_VSEL_REG 0x34 #define RK818_BUCK3_CONFIG_REG 0x36 #define RK818_BUCK4_CONFIG_REG 0x37 #define RK818_BUCK4_ON_VSEL_REG 0x38 #define RK818_BUCK4_SLP_VSEL_REG 0x39 #define RK818_BOOST_CONFIG_REG 0x3a #define RK818_LDO1_ON_VSEL_REG 0x3b #define RK818_LDO1_SLP_VSEL_REG 0x3c #define RK818_LDO2_ON_VSEL_REG 0x3d #define RK818_LDO2_SLP_VSEL_REG 0x3e #define RK818_LDO3_ON_VSEL_REG 0x3f #define RK818_LDO3_SLP_VSEL_REG 0x40 #define RK818_LDO4_ON_VSEL_REG 0x41 #define RK818_LDO4_SLP_VSEL_REG 0x42 #define RK818_LDO5_ON_VSEL_REG 0x43 #define RK818_LDO5_SLP_VSEL_REG 0x44 #define RK818_LDO6_ON_VSEL_REG 0x45 #define RK818_LDO6_SLP_VSEL_REG 0x46 #define RK818_LDO7_ON_VSEL_REG 0x47 #define RK818_LDO7_SLP_VSEL_REG 0x48 #define RK818_LDO8_ON_VSEL_REG 0x49 #define RK818_LDO8_SLP_VSEL_REG 0x4a #define RK818_BOOST_LDO9_ON_VSEL_REG 0x54 #define RK818_BOOST_LDO9_SLP_VSEL_REG 0x55 #define RK818_DEVCTRL_REG 0x4b #define RK818_INT_STS_REG1 0X4c #define RK818_INT_STS_MSK_REG1 0x4d #define RK818_INT_STS_REG2 0x4e #define RK818_INT_STS_MSK_REG2 0x4f #define RK818_IO_POL_REG 0x50 #define RK818_H5V_EN_REG 0x52 #define RK818_SLEEP_SET_OFF_REG3 0x53 #define RK818_BOOST_LDO9_ON_VSEL_REG 0x54 #define RK818_BOOST_LDO9_SLP_VSEL_REG 0x55 #define RK818_BOOST_CTRL_REG 0x56 #define RK818_DCDC_ILMAX 0x90 #define RK818_USB_CTRL_REG 0xa1 #define RK818_H5V_EN BIT(0) #define RK818_REF_RDY_CTRL BIT(1) #define RK818_USB_ILIM_SEL_MASK 0xf #define RK818_USB_ILMIN_2000MA 0x7 #define RK818_USB_CHG_SD_VSEL_MASK 0x70 /* RK805 */ enum rk805_reg { RK805_ID_DCDC1, RK805_ID_DCDC2, RK805_ID_DCDC3, RK805_ID_DCDC4, RK805_ID_LDO1, RK805_ID_LDO2, RK805_ID_LDO3, }; /* CONFIG REGISTER */ #define RK805_VB_MON_REG 0x21 #define RK805_THERMAL_REG 0x22 /* POWER CHANNELS ENABLE REGISTER */ #define RK805_DCDC_EN_REG 0x23 #define RK805_SLP_DCDC_EN_REG 0x25 #define RK805_SLP_LDO_EN_REG 0x26 #define RK805_LDO_EN_REG 0x27 /* BUCK AND LDO CONFIG REGISTER */ #define RK805_BUCK_LDO_SLP_LP_EN_REG 0x2A #define RK805_BUCK1_CONFIG_REG 0x2E #define RK805_BUCK1_ON_VSEL_REG 0x2F #define RK805_BUCK1_SLP_VSEL_REG 0x30 #define RK805_BUCK2_CONFIG_REG 0x32 #define RK805_BUCK2_ON_VSEL_REG 0x33 #define RK805_BUCK2_SLP_VSEL_REG 0x34 #define RK805_BUCK3_CONFIG_REG 0x36 #define RK805_BUCK4_CONFIG_REG 0x37 #define RK805_BUCK4_ON_VSEL_REG 0x38 #define RK805_BUCK4_SLP_VSEL_REG 0x39 #define RK805_LDO1_ON_VSEL_REG 0x3B #define RK805_LDO1_SLP_VSEL_REG 0x3C #define RK805_LDO2_ON_VSEL_REG 0x3D #define RK805_LDO2_SLP_VSEL_REG 0x3E #define RK805_LDO3_ON_VSEL_REG 0x3F #define RK805_LDO3_SLP_VSEL_REG 0x40 /* INTERRUPT REGISTER */ #define RK805_PWRON_LP_INT_TIME_REG 0x47 #define RK805_PWRON_DB_REG 0x48 #define RK805_DEV_CTRL_REG 0x4B #define RK805_INT_STS_REG 0x4C #define RK805_INT_STS_MSK_REG 0x4D #define RK805_GPIO_IO_POL_REG 0x50 #define RK805_OUT_REG 0x52 #define RK805_ON_SOURCE_REG 0xAE #define RK805_OFF_SOURCE_REG 0xAF #define RK805_NUM_REGULATORS 7 #define RK805_PWRON_FALL_RISE_INT_EN 0x0 #define RK805_PWRON_FALL_RISE_INT_MSK 0x81 /* RK805 IRQ Definitions */ #define RK805_IRQ_PWRON_RISE 0 #define RK805_IRQ_VB_LOW 1 #define RK805_IRQ_PWRON 2 #define RK805_IRQ_PWRON_LP 3 #define RK805_IRQ_HOTDIE 4 #define RK805_IRQ_RTC_ALARM 5 #define RK805_IRQ_RTC_PERIOD 6 #define RK805_IRQ_PWRON_FALL 7 #define RK805_IRQ_PWRON_RISE_MSK BIT(0) #define RK805_IRQ_VB_LOW_MSK BIT(1) #define RK805_IRQ_PWRON_MSK BIT(2) #define RK805_IRQ_PWRON_LP_MSK BIT(3) #define RK805_IRQ_HOTDIE_MSK BIT(4) #define RK805_IRQ_RTC_ALARM_MSK BIT(5) #define RK805_IRQ_RTC_PERIOD_MSK BIT(6) #define RK805_IRQ_PWRON_FALL_MSK BIT(7) #define RK805_PWR_RISE_INT_STATUS BIT(0) #define RK805_VB_LOW_INT_STATUS BIT(1) #define RK805_PWRON_INT_STATUS BIT(2) #define RK805_PWRON_LP_INT_STATUS BIT(3) #define RK805_HOTDIE_INT_STATUS BIT(4) #define RK805_ALARM_INT_STATUS BIT(5) #define RK805_PERIOD_INT_STATUS BIT(6) #define RK805_PWR_FALL_INT_STATUS BIT(7) #define RK805_BUCK1_2_ILMAX_MASK (3 << 6) #define RK805_BUCK3_4_ILMAX_MASK (3 << 3) #define RK805_RTC_PERIOD_INT_MASK (1 << 6) #define RK805_RTC_ALARM_INT_MASK (1 << 5) #define RK805_INT_ALARM_EN (1 << 3) #define RK805_INT_TIMER_EN (1 << 2) /* RK808 IRQ Definitions */ #define RK808_IRQ_VOUT_LO 0 #define RK808_IRQ_VB_LO 1 #define RK808_IRQ_PWRON 2 #define RK808_IRQ_PWRON_LP 3 #define RK808_IRQ_HOTDIE 4 #define RK808_IRQ_RTC_ALARM 5 #define RK808_IRQ_RTC_PERIOD 6 #define RK808_IRQ_PLUG_IN_INT 7 #define RK808_IRQ_PLUG_OUT_INT 8 #define RK808_NUM_IRQ 9 #define RK808_IRQ_VOUT_LO_MSK BIT(0) #define RK808_IRQ_VB_LO_MSK BIT(1) #define RK808_IRQ_PWRON_MSK BIT(2) #define RK808_IRQ_PWRON_LP_MSK BIT(3) #define RK808_IRQ_HOTDIE_MSK BIT(4) #define RK808_IRQ_RTC_ALARM_MSK BIT(5) #define RK808_IRQ_RTC_PERIOD_MSK BIT(6) #define RK808_IRQ_PLUG_IN_INT_MSK BIT(0) #define RK808_IRQ_PLUG_OUT_INT_MSK BIT(1) /* RK818 IRQ Definitions */ #define RK818_IRQ_VOUT_LO 0 #define RK818_IRQ_VB_LO 1 #define RK818_IRQ_PWRON 2 #define RK818_IRQ_PWRON_LP 3 #define RK818_IRQ_HOTDIE 4 #define RK818_IRQ_RTC_ALARM 5 #define RK818_IRQ_RTC_PERIOD 6 #define RK818_IRQ_USB_OV 7 #define RK818_IRQ_PLUG_IN 8 #define RK818_IRQ_PLUG_OUT 9 #define RK818_IRQ_CHG_OK 10 #define RK818_IRQ_CHG_TE 11 #define RK818_IRQ_CHG_TS1 12 #define RK818_IRQ_TS2 13 #define RK818_IRQ_CHG_CVTLIM 14 #define RK818_IRQ_DISCHG_ILIM 15 #define RK818_IRQ_VOUT_LO_MSK BIT(0) #define RK818_IRQ_VB_LO_MSK BIT(1) #define RK818_IRQ_PWRON_MSK BIT(2) #define RK818_IRQ_PWRON_LP_MSK BIT(3) #define RK818_IRQ_HOTDIE_MSK BIT(4) #define RK818_IRQ_RTC_ALARM_MSK BIT(5) #define RK818_IRQ_RTC_PERIOD_MSK BIT(6) #define RK818_IRQ_USB_OV_MSK BIT(7) #define RK818_IRQ_PLUG_IN_MSK BIT(0) #define RK818_IRQ_PLUG_OUT_MSK BIT(1) #define RK818_IRQ_CHG_OK_MSK BIT(2) #define RK818_IRQ_CHG_TE_MSK BIT(3) #define RK818_IRQ_CHG_TS1_MSK BIT(4) #define RK818_IRQ_TS2_MSK BIT(5) #define RK818_IRQ_CHG_CVTLIM_MSK BIT(6) #define RK818_IRQ_DISCHG_ILIM_MSK BIT(7) #define RK818_NUM_IRQ 16 #define RK808_VBAT_LOW_2V8 0x00 #define RK808_VBAT_LOW_2V9 0x01 #define RK808_VBAT_LOW_3V0 0x02 #define RK808_VBAT_LOW_3V1 0x03 #define RK808_VBAT_LOW_3V2 0x04 #define RK808_VBAT_LOW_3V3 0x05 #define RK808_VBAT_LOW_3V4 0x06 #define RK808_VBAT_LOW_3V5 0x07 #define VBAT_LOW_VOL_MASK (0x07 << 0) #define EN_VABT_LOW_SHUT_DOWN (0x00 << 4) #define EN_VBAT_LOW_IRQ (0x1 << 4) #define VBAT_LOW_ACT_MASK (0x1 << 4) #define BUCK_ILMIN_MASK (7 << 0) #define BOOST_ILMIN_MASK (7 << 0) #define BUCK1_RATE_MASK (3 << 3) #define BUCK2_RATE_MASK (3 << 3) #define MASK_ALL 0xff #define BUCK_UV_ACT_MASK 0x0f #define BUCK_UV_ACT_DISABLE 0 #define SWITCH2_EN BIT(6) #define SWITCH1_EN BIT(5) #define DEV_OFF_RST BIT(3) #define DEV_OFF BIT(0) #define RTC_STOP BIT(0) #define VB_LO_ACT BIT(4) #define VB_LO_SEL_3500MV (7 << 0) #define VOUT_LO_INT BIT(0) #define CLK32KOUT2_EN BIT(0) #define TEMP115C 0x0c #define TEMP_HOTDIE_MSK 0x0c #define SLP_SD_MSK (0x3 << 2) #define SHUTDOWN_FUN (0x2 << 2) #define SLEEP_FUN (0x1 << 2) #define RK8XX_ID_MSK 0xfff0 #define PWM_MODE_MSK BIT(7) #define FPWM_MODE BIT(7) #define AUTO_PWM_MODE 0 enum rk817_reg_id { RK817_ID_DCDC1 = 0, RK817_ID_DCDC2, RK817_ID_DCDC3, RK817_ID_DCDC4, RK817_ID_LDO1, RK817_ID_LDO2, RK817_ID_LDO3, RK817_ID_LDO4, RK817_ID_LDO5, RK817_ID_LDO6, RK817_ID_LDO7, RK817_ID_LDO8, RK817_ID_LDO9, RK817_ID_BOOST, RK817_ID_BOOST_OTG_SW, RK817_NUM_REGULATORS }; enum rk809_reg_id { RK809_ID_DCDC5 = RK817_ID_BOOST, RK809_ID_SW1, RK809_ID_SW2, RK809_NUM_REGULATORS }; #define RK817_SECONDS_REG 0x00 #define RK817_MINUTES_REG 0x01 #define RK817_HOURS_REG 0x02 #define RK817_DAYS_REG 0x03 #define RK817_MONTHS_REG 0x04 #define RK817_YEARS_REG 0x05 #define RK817_WEEKS_REG 0x06 #define RK817_ALARM_SECONDS_REG 0x07 #define RK817_ALARM_MINUTES_REG 0x08 #define RK817_ALARM_HOURS_REG 0x09 #define RK817_ALARM_DAYS_REG 0x0a #define RK817_ALARM_MONTHS_REG 0x0b #define RK817_ALARM_YEARS_REG 0x0c #define RK817_RTC_CTRL_REG 0xd #define RK817_RTC_STATUS_REG 0xe #define RK817_RTC_INT_REG 0xf #define RK817_RTC_COMP_LSB_REG 0x10 #define RK817_RTC_COMP_MSB_REG 0x11 #define RK817_POWER_EN_REG(i) (0xb1 + (i)) #define RK817_POWER_SLP_EN_REG(i) (0xb5 + (i)) #define RK817_POWER_CONFIG (0xb9) #define RK817_BUCK_CONFIG_REG(i) (0xba + (i) * 3) #define RK817_BUCK1_ON_VSEL_REG 0xBB #define RK817_BUCK1_SLP_VSEL_REG 0xBC #define RK817_BUCK2_CONFIG_REG 0xBD #define RK817_BUCK2_ON_VSEL_REG 0xBE #define RK817_BUCK2_SLP_VSEL_REG 0xBF #define RK817_BUCK3_CONFIG_REG 0xC0 #define RK817_BUCK3_ON_VSEL_REG 0xC1 #define RK817_BUCK3_SLP_VSEL_REG 0xC2 #define RK817_BUCK4_CONFIG_REG 0xC3 #define RK817_BUCK4_ON_VSEL_REG 0xC4 #define RK817_BUCK4_SLP_VSEL_REG 0xC5 #define RK817_LDO_ON_VSEL_REG(idx) (0xcc + (idx) * 2) #define RK817_BOOST_OTG_CFG (0xde) #define RK817_ID_MSB 0xed #define RK817_ID_LSB 0xee #define RK817_SYS_STS 0xf0 #define RK817_SYS_CFG(i) (0xf1 + (i)) #define RK817_ON_SOURCE_REG 0xf5 #define RK817_OFF_SOURCE_REG 0xf6 /* INTERRUPT REGISTER */ #define RK817_INT_STS_REG0 0xf8 #define RK817_INT_STS_MSK_REG0 0xf9 #define RK817_INT_STS_REG1 0xfa #define RK817_INT_STS_MSK_REG1 0xfb #define RK817_INT_STS_REG2 0xfc #define RK817_INT_STS_MSK_REG2 0xfd #define RK817_GPIO_INT_CFG 0xfe /* IRQ Definitions */ #define RK817_IRQ_PWRON_FALL 0 #define RK817_IRQ_PWRON_RISE 1 #define RK817_IRQ_PWRON 2 #define RK817_IRQ_PWMON_LP 3 #define RK817_IRQ_HOTDIE 4 #define RK817_IRQ_RTC_ALARM 5 #define RK817_IRQ_RTC_PERIOD 6 #define RK817_IRQ_VB_LO 7 #define RK817_IRQ_PLUG_IN 8 #define RK817_IRQ_PLUG_OUT 9 #define RK817_IRQ_CHRG_TERM 10 #define RK817_IRQ_CHRG_TIME 11 #define RK817_IRQ_CHRG_TS 12 #define RK817_IRQ_USB_OV 13 #define RK817_IRQ_CHRG_IN_CLMP 14 #define RK817_IRQ_BAT_DIS_ILIM 15 #define RK817_IRQ_GATE_GPIO 16 #define RK817_IRQ_TS_GPIO 17 #define RK817_IRQ_CODEC_PD 18 #define RK817_IRQ_CODEC_PO 19 #define RK817_IRQ_CLASSD_MUTE_DONE 20 #define RK817_IRQ_CLASSD_OCP 21 #define RK817_IRQ_BAT_OVP 22 #define RK817_IRQ_CHRG_BAT_HI 23 #define RK817_IRQ_END (RK817_IRQ_CHRG_BAT_HI + 1) /* * rtc_ctrl 0xd * same as 808, except bit4 */ #define RK817_RTC_CTRL_RSV4 BIT(4) /* power config 0xb9 */ #define RK817_BUCK3_FB_RES_MSK BIT(6) #define RK817_BUCK3_FB_RES_INTER BIT(6) #define RK817_BUCK3_FB_RES_EXT 0 /* buck config 0xba */ #define RK817_RAMP_RATE_OFFSET 6 #define RK817_RAMP_RATE_MASK (0x3 << RK817_RAMP_RATE_OFFSET) #define RK817_RAMP_RATE_3MV_PER_US (0x0 << RK817_RAMP_RATE_OFFSET) #define RK817_RAMP_RATE_6_3MV_PER_US (0x1 << RK817_RAMP_RATE_OFFSET) #define RK817_RAMP_RATE_12_5MV_PER_US (0x2 << RK817_RAMP_RATE_OFFSET) #define RK817_RAMP_RATE_25MV_PER_US (0x3 << RK817_RAMP_RATE_OFFSET) /* sys_cfg1 0xf2 */ #define RK817_HOTDIE_TEMP_MSK (0x3 << 4) #define RK817_HOTDIE_85 (0x0 << 4) #define RK817_HOTDIE_95 (0x1 << 4) #define RK817_HOTDIE_105 (0x2 << 4) #define RK817_HOTDIE_115 (0x3 << 4) #define RK817_TSD_TEMP_MSK BIT(6) #define RK817_TSD_140 0 #define RK817_TSD_160 BIT(6) #define RK817_CLK32KOUT2_EN BIT(7) /* sys_cfg3 0xf4 */ #define RK817_SLPPIN_FUNC_MSK (0x3 << 3) #define SLPPIN_NULL_FUN (0x0 << 3) #define SLPPIN_SLP_FUN (0x1 << 3) #define SLPPIN_DN_FUN (0x2 << 3) #define SLPPIN_RST_FUN (0x3 << 3) #define RK817_RST_FUNC_MSK (0x3 << 6) #define RK817_RST_FUNC_SFT (6) #define RK817_RST_FUNC_CNT (3) #define RK817_RST_FUNC_DEV (0) /* reset the dev */ #define RK817_RST_FUNC_REG (0x1 << 6) /* reset the reg only */ #define RK817_SLPPOL_MSK BIT(5) #define RK817_SLPPOL_H BIT(5) #define RK817_SLPPOL_L (0) /* gpio&int 0xfe */ #define RK817_INT_POL_MSK BIT(1) #define RK817_INT_POL_H BIT(1) #define RK817_INT_POL_L 0 #define RK809_BUCK5_CONFIG(i) (RK817_BOOST_OTG_CFG + (i) * 1) enum { BUCK_ILMIN_50MA, BUCK_ILMIN_100MA, BUCK_ILMIN_150MA, BUCK_ILMIN_200MA, BUCK_ILMIN_250MA, BUCK_ILMIN_300MA, BUCK_ILMIN_350MA, BUCK_ILMIN_400MA, }; enum { BOOST_ILMIN_75MA, BOOST_ILMIN_100MA, BOOST_ILMIN_125MA, BOOST_ILMIN_150MA, BOOST_ILMIN_175MA, BOOST_ILMIN_200MA, BOOST_ILMIN_225MA, BOOST_ILMIN_250MA, }; enum { RK805_BUCK1_2_ILMAX_2500MA, RK805_BUCK1_2_ILMAX_3000MA, RK805_BUCK1_2_ILMAX_3500MA, RK805_BUCK1_2_ILMAX_4000MA, }; enum { RK805_BUCK3_ILMAX_1500MA, RK805_BUCK3_ILMAX_2000MA, RK805_BUCK3_ILMAX_2500MA, RK805_BUCK3_ILMAX_3000MA, }; enum { RK805_BUCK4_ILMAX_2000MA, RK805_BUCK4_ILMAX_2500MA, RK805_BUCK4_ILMAX_3000MA, RK805_BUCK4_ILMAX_3500MA, }; enum { RK805_ID = 0x8050, RK808_ID = 0x0000, RK809_ID = 0x8090, RK817_ID = 0x8170, RK818_ID = 0x8180, }; struct rk808 { struct i2c_client *i2c; struct regmap_irq_chip_data *irq_data; struct regmap *regmap; long variant; const struct regmap_config *regmap_cfg; const struct regmap_irq_chip *regmap_irq_chip; void (*pm_pwroff_fn)(void); void (*pm_pwroff_prep_fn)(void); }; #endif /* __LINUX_REGULATOR_RK808_H */ mfd/lochnagar.h 0000644 00000003130 14722070374 0007424 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Lochnagar internals * * Copyright (c) 2013-2018 Cirrus Logic, Inc. and * Cirrus Logic International Semiconductor Ltd. * * Author: Charles Keepax <ckeepax@opensource.cirrus.com> */ #include <linux/device.h> #include <linux/mutex.h> #include <linux/regmap.h> #ifndef CIRRUS_LOCHNAGAR_H #define CIRRUS_LOCHNAGAR_H enum lochnagar_type { LOCHNAGAR1, LOCHNAGAR2, }; /** * struct lochnagar - Core data for the Lochnagar audio board driver. * * @type: The type of Lochnagar device connected. * @dev: A pointer to the struct device for the main MFD. * @regmap: The devices main register map. * @analogue_config_lock: Lock used to protect updates in the analogue * configuration as these must not be changed whilst the hardware is processing * the last update. */ struct lochnagar { enum lochnagar_type type; struct device *dev; struct regmap *regmap; /* Lock to protect updates to the analogue configuration */ struct mutex analogue_config_lock; }; /* Register Addresses */ #define LOCHNAGAR_SOFTWARE_RESET 0x00 #define LOCHNAGAR_FIRMWARE_ID1 0x01 #define LOCHNAGAR_FIRMWARE_ID2 0x02 /* (0x0000) Software Reset */ #define LOCHNAGAR_DEVICE_ID_MASK 0xFFFC #define LOCHNAGAR_DEVICE_ID_SHIFT 2 #define LOCHNAGAR_REV_ID_MASK 0x0003 #define LOCHNAGAR_REV_ID_SHIFT 0 int lochnagar_update_config(struct lochnagar *lochnagar); #endif mfd/da9062/registers.h 0000644 00000104005 14722070374 0010405 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2015-2017 Dialog Semiconductor */ #ifndef __DA9062_H__ #define __DA9062_H__ #define DA9062_PMIC_DEVICE_ID 0x62 #define DA9062_PMIC_VARIANT_MRC_AA 0x01 #define DA9062_PMIC_VARIANT_VRC_DA9061 0x01 #define DA9062_PMIC_VARIANT_VRC_DA9062 0x02 #define DA9062_I2C_PAGE_SEL_SHIFT 1 /* * Registers */ #define DA9062AA_PAGE_CON 0x000 #define DA9062AA_STATUS_A 0x001 #define DA9062AA_STATUS_B 0x002 #define DA9062AA_STATUS_D 0x004 #define DA9062AA_FAULT_LOG 0x005 #define DA9062AA_EVENT_A 0x006 #define DA9062AA_EVENT_B 0x007 #define DA9062AA_EVENT_C 0x008 #define DA9062AA_IRQ_MASK_A 0x00A #define DA9062AA_IRQ_MASK_B 0x00B #define DA9062AA_IRQ_MASK_C 0x00C #define DA9062AA_CONTROL_A 0x00E #define DA9062AA_CONTROL_B 0x00F #define DA9062AA_CONTROL_C 0x010 #define DA9062AA_CONTROL_D 0x011 #define DA9062AA_CONTROL_E 0x012 #define DA9062AA_CONTROL_F 0x013 #define DA9062AA_PD_DIS 0x014 #define DA9062AA_GPIO_0_1 0x015 #define DA9062AA_GPIO_2_3 0x016 #define DA9062AA_GPIO_4 0x017 #define DA9062AA_GPIO_WKUP_MODE 0x01C #define DA9062AA_GPIO_MODE0_4 0x01D #define DA9062AA_GPIO_OUT0_2 0x01E #define DA9062AA_GPIO_OUT3_4 0x01F #define DA9062AA_BUCK2_CONT 0x020 #define DA9062AA_BUCK1_CONT 0x021 #define DA9062AA_BUCK4_CONT 0x022 #define DA9062AA_BUCK3_CONT 0x024 #define DA9062AA_LDO1_CONT 0x026 #define DA9062AA_LDO2_CONT 0x027 #define DA9062AA_LDO3_CONT 0x028 #define DA9062AA_LDO4_CONT 0x029 #define DA9062AA_DVC_1 0x032 #define DA9062AA_COUNT_S 0x040 #define DA9062AA_COUNT_MI 0x041 #define DA9062AA_COUNT_H 0x042 #define DA9062AA_COUNT_D 0x043 #define DA9062AA_COUNT_MO 0x044 #define DA9062AA_COUNT_Y 0x045 #define DA9062AA_ALARM_S 0x046 #define DA9062AA_ALARM_MI 0x047 #define DA9062AA_ALARM_H 0x048 #define DA9062AA_ALARM_D 0x049 #define DA9062AA_ALARM_MO 0x04A #define DA9062AA_ALARM_Y 0x04B #define DA9062AA_SECOND_A 0x04C #define DA9062AA_SECOND_B 0x04D #define DA9062AA_SECOND_C 0x04E #define DA9062AA_SECOND_D 0x04F #define DA9062AA_SEQ 0x081 #define DA9062AA_SEQ_TIMER 0x082 #define DA9062AA_ID_2_1 0x083 #define DA9062AA_ID_4_3 0x084 #define DA9062AA_ID_12_11 0x088 #define DA9062AA_ID_14_13 0x089 #define DA9062AA_ID_16_15 0x08A #define DA9062AA_ID_22_21 0x08D #define DA9062AA_ID_24_23 0x08E #define DA9062AA_ID_26_25 0x08F #define DA9062AA_ID_28_27 0x090 #define DA9062AA_ID_30_29 0x091 #define DA9062AA_ID_32_31 0x092 #define DA9062AA_SEQ_A 0x095 #define DA9062AA_SEQ_B 0x096 #define DA9062AA_WAIT 0x097 #define DA9062AA_EN_32K 0x098 #define DA9062AA_RESET 0x099 #define DA9062AA_BUCK_ILIM_A 0x09A #define DA9062AA_BUCK_ILIM_B 0x09B #define DA9062AA_BUCK_ILIM_C 0x09C #define DA9062AA_BUCK2_CFG 0x09D #define DA9062AA_BUCK1_CFG 0x09E #define DA9062AA_BUCK4_CFG 0x09F #define DA9062AA_BUCK3_CFG 0x0A0 #define DA9062AA_VBUCK2_A 0x0A3 #define DA9062AA_VBUCK1_A 0x0A4 #define DA9062AA_VBUCK4_A 0x0A5 #define DA9062AA_VBUCK3_A 0x0A7 #define DA9062AA_VLDO1_A 0x0A9 #define DA9062AA_VLDO2_A 0x0AA #define DA9062AA_VLDO3_A 0x0AB #define DA9062AA_VLDO4_A 0x0AC #define DA9062AA_VBUCK2_B 0x0B4 #define DA9062AA_VBUCK1_B 0x0B5 #define DA9062AA_VBUCK4_B 0x0B6 #define DA9062AA_VBUCK3_B 0x0B8 #define DA9062AA_VLDO1_B 0x0BA #define DA9062AA_VLDO2_B 0x0BB #define DA9062AA_VLDO3_B 0x0BC #define DA9062AA_VLDO4_B 0x0BD #define DA9062AA_BBAT_CONT 0x0C5 #define DA9062AA_INTERFACE 0x105 #define DA9062AA_CONFIG_A 0x106 #define DA9062AA_CONFIG_B 0x107 #define DA9062AA_CONFIG_C 0x108 #define DA9062AA_CONFIG_D 0x109 #define DA9062AA_CONFIG_E 0x10A #define DA9062AA_CONFIG_G 0x10C #define DA9062AA_CONFIG_H 0x10D #define DA9062AA_CONFIG_I 0x10E #define DA9062AA_CONFIG_J 0x10F #define DA9062AA_CONFIG_K 0x110 #define DA9062AA_CONFIG_M 0x112 #define DA9062AA_TRIM_CLDR 0x120 #define DA9062AA_GP_ID_0 0x121 #define DA9062AA_GP_ID_1 0x122 #define DA9062AA_GP_ID_2 0x123 #define DA9062AA_GP_ID_3 0x124 #define DA9062AA_GP_ID_4 0x125 #define DA9062AA_GP_ID_5 0x126 #define DA9062AA_GP_ID_6 0x127 #define DA9062AA_GP_ID_7 0x128 #define DA9062AA_GP_ID_8 0x129 #define DA9062AA_GP_ID_9 0x12A #define DA9062AA_GP_ID_10 0x12B #define DA9062AA_GP_ID_11 0x12C #define DA9062AA_GP_ID_12 0x12D #define DA9062AA_GP_ID_13 0x12E #define DA9062AA_GP_ID_14 0x12F #define DA9062AA_GP_ID_15 0x130 #define DA9062AA_GP_ID_16 0x131 #define DA9062AA_GP_ID_17 0x132 #define DA9062AA_GP_ID_18 0x133 #define DA9062AA_GP_ID_19 0x134 #define DA9062AA_DEVICE_ID 0x181 #define DA9062AA_VARIANT_ID 0x182 #define DA9062AA_CUSTOMER_ID 0x183 #define DA9062AA_CONFIG_ID 0x184 /* * Bit fields */ /* DA9062AA_PAGE_CON = 0x000 */ #define DA9062AA_PAGE_SHIFT 0 #define DA9062AA_PAGE_MASK 0x3f #define DA9062AA_WRITE_MODE_SHIFT 6 #define DA9062AA_WRITE_MODE_MASK BIT(6) #define DA9062AA_REVERT_SHIFT 7 #define DA9062AA_REVERT_MASK BIT(7) /* DA9062AA_STATUS_A = 0x001 */ #define DA9062AA_NONKEY_SHIFT 0 #define DA9062AA_NONKEY_MASK 0x01 #define DA9062AA_DVC_BUSY_SHIFT 2 #define DA9062AA_DVC_BUSY_MASK BIT(2) /* DA9062AA_STATUS_B = 0x002 */ #define DA9062AA_GPI0_SHIFT 0 #define DA9062AA_GPI0_MASK 0x01 #define DA9062AA_GPI1_SHIFT 1 #define DA9062AA_GPI1_MASK BIT(1) #define DA9062AA_GPI2_SHIFT 2 #define DA9062AA_GPI2_MASK BIT(2) #define DA9062AA_GPI3_SHIFT 3 #define DA9062AA_GPI3_MASK BIT(3) #define DA9062AA_GPI4_SHIFT 4 #define DA9062AA_GPI4_MASK BIT(4) /* DA9062AA_STATUS_D = 0x004 */ #define DA9062AA_LDO1_ILIM_SHIFT 0 #define DA9062AA_LDO1_ILIM_MASK 0x01 #define DA9062AA_LDO2_ILIM_SHIFT 1 #define DA9062AA_LDO2_ILIM_MASK BIT(1) #define DA9062AA_LDO3_ILIM_SHIFT 2 #define DA9062AA_LDO3_ILIM_MASK BIT(2) #define DA9062AA_LDO4_ILIM_SHIFT 3 #define DA9062AA_LDO4_ILIM_MASK BIT(3) /* DA9062AA_FAULT_LOG = 0x005 */ #define DA9062AA_TWD_ERROR_SHIFT 0 #define DA9062AA_TWD_ERROR_MASK 0x01 #define DA9062AA_POR_SHIFT 1 #define DA9062AA_POR_MASK BIT(1) #define DA9062AA_VDD_FAULT_SHIFT 2 #define DA9062AA_VDD_FAULT_MASK BIT(2) #define DA9062AA_VDD_START_SHIFT 3 #define DA9062AA_VDD_START_MASK BIT(3) #define DA9062AA_TEMP_CRIT_SHIFT 4 #define DA9062AA_TEMP_CRIT_MASK BIT(4) #define DA9062AA_KEY_RESET_SHIFT 5 #define DA9062AA_KEY_RESET_MASK BIT(5) #define DA9062AA_NSHUTDOWN_SHIFT 6 #define DA9062AA_NSHUTDOWN_MASK BIT(6) #define DA9062AA_WAIT_SHUT_SHIFT 7 #define DA9062AA_WAIT_SHUT_MASK BIT(7) /* DA9062AA_EVENT_A = 0x006 */ #define DA9062AA_E_NONKEY_SHIFT 0 #define DA9062AA_E_NONKEY_MASK 0x01 #define DA9062AA_E_ALARM_SHIFT 1 #define DA9062AA_E_ALARM_MASK BIT(1) #define DA9062AA_E_TICK_SHIFT 2 #define DA9062AA_E_TICK_MASK BIT(2) #define DA9062AA_E_WDG_WARN_SHIFT 3 #define DA9062AA_E_WDG_WARN_MASK BIT(3) #define DA9062AA_E_SEQ_RDY_SHIFT 4 #define DA9062AA_E_SEQ_RDY_MASK BIT(4) #define DA9062AA_EVENTS_B_SHIFT 5 #define DA9062AA_EVENTS_B_MASK BIT(5) #define DA9062AA_EVENTS_C_SHIFT 6 #define DA9062AA_EVENTS_C_MASK BIT(6) /* DA9062AA_EVENT_B = 0x007 */ #define DA9062AA_E_TEMP_SHIFT 1 #define DA9062AA_E_TEMP_MASK BIT(1) #define DA9062AA_E_LDO_LIM_SHIFT 3 #define DA9062AA_E_LDO_LIM_MASK BIT(3) #define DA9062AA_E_DVC_RDY_SHIFT 5 #define DA9062AA_E_DVC_RDY_MASK BIT(5) #define DA9062AA_E_VDD_WARN_SHIFT 7 #define DA9062AA_E_VDD_WARN_MASK BIT(7) /* DA9062AA_EVENT_C = 0x008 */ #define DA9062AA_E_GPI0_SHIFT 0 #define DA9062AA_E_GPI0_MASK 0x01 #define DA9062AA_E_GPI1_SHIFT 1 #define DA9062AA_E_GPI1_MASK BIT(1) #define DA9062AA_E_GPI2_SHIFT 2 #define DA9062AA_E_GPI2_MASK BIT(2) #define DA9062AA_E_GPI3_SHIFT 3 #define DA9062AA_E_GPI3_MASK BIT(3) #define DA9062AA_E_GPI4_SHIFT 4 #define DA9062AA_E_GPI4_MASK BIT(4) /* DA9062AA_IRQ_MASK_A = 0x00A */ #define DA9062AA_M_NONKEY_SHIFT 0 #define DA9062AA_M_NONKEY_MASK 0x01 #define DA9062AA_M_ALARM_SHIFT 1 #define DA9062AA_M_ALARM_MASK BIT(1) #define DA9062AA_M_TICK_SHIFT 2 #define DA9062AA_M_TICK_MASK BIT(2) #define DA9062AA_M_WDG_WARN_SHIFT 3 #define DA9062AA_M_WDG_WARN_MASK BIT(3) #define DA9062AA_M_SEQ_RDY_SHIFT 4 #define DA9062AA_M_SEQ_RDY_MASK BIT(4) /* DA9062AA_IRQ_MASK_B = 0x00B */ #define DA9062AA_M_TEMP_SHIFT 1 #define DA9062AA_M_TEMP_MASK BIT(1) #define DA9062AA_M_LDO_LIM_SHIFT 3 #define DA9062AA_M_LDO_LIM_MASK BIT(3) #define DA9062AA_M_DVC_RDY_SHIFT 5 #define DA9062AA_M_DVC_RDY_MASK BIT(5) #define DA9062AA_M_VDD_WARN_SHIFT 7 #define DA9062AA_M_VDD_WARN_MASK BIT(7) /* DA9062AA_IRQ_MASK_C = 0x00C */ #define DA9062AA_M_GPI0_SHIFT 0 #define DA9062AA_M_GPI0_MASK 0x01 #define DA9062AA_M_GPI1_SHIFT 1 #define DA9062AA_M_GPI1_MASK BIT(1) #define DA9062AA_M_GPI2_SHIFT 2 #define DA9062AA_M_GPI2_MASK BIT(2) #define DA9062AA_M_GPI3_SHIFT 3 #define DA9062AA_M_GPI3_MASK BIT(3) #define DA9062AA_M_GPI4_SHIFT 4 #define DA9062AA_M_GPI4_MASK BIT(4) /* DA9062AA_CONTROL_A = 0x00E */ #define DA9062AA_SYSTEM_EN_SHIFT 0 #define DA9062AA_SYSTEM_EN_MASK 0x01 #define DA9062AA_POWER_EN_SHIFT 1 #define DA9062AA_POWER_EN_MASK BIT(1) #define DA9062AA_POWER1_EN_SHIFT 2 #define DA9062AA_POWER1_EN_MASK BIT(2) #define DA9062AA_STANDBY_SHIFT 3 #define DA9062AA_STANDBY_MASK BIT(3) #define DA9062AA_M_SYSTEM_EN_SHIFT 4 #define DA9062AA_M_SYSTEM_EN_MASK BIT(4) #define DA9062AA_M_POWER_EN_SHIFT 5 #define DA9062AA_M_POWER_EN_MASK BIT(5) #define DA9062AA_M_POWER1_EN_SHIFT 6 #define DA9062AA_M_POWER1_EN_MASK BIT(6) /* DA9062AA_CONTROL_B = 0x00F */ #define DA9062AA_WATCHDOG_PD_SHIFT 1 #define DA9062AA_WATCHDOG_PD_MASK BIT(1) #define DA9062AA_FREEZE_EN_SHIFT 2 #define DA9062AA_FREEZE_EN_MASK BIT(2) #define DA9062AA_NRES_MODE_SHIFT 3 #define DA9062AA_NRES_MODE_MASK BIT(3) #define DA9062AA_NONKEY_LOCK_SHIFT 4 #define DA9062AA_NONKEY_LOCK_MASK BIT(4) #define DA9062AA_NFREEZE_SHIFT 5 #define DA9062AA_NFREEZE_MASK (0x03 << 5) #define DA9062AA_BUCK_SLOWSTART_SHIFT 7 #define DA9062AA_BUCK_SLOWSTART_MASK BIT(7) /* DA9062AA_CONTROL_C = 0x010 */ #define DA9062AA_DEBOUNCING_SHIFT 0 #define DA9062AA_DEBOUNCING_MASK 0x07 #define DA9062AA_AUTO_BOOT_SHIFT 3 #define DA9062AA_AUTO_BOOT_MASK BIT(3) #define DA9062AA_OTPREAD_EN_SHIFT 4 #define DA9062AA_OTPREAD_EN_MASK BIT(4) #define DA9062AA_SLEW_RATE_SHIFT 5 #define DA9062AA_SLEW_RATE_MASK (0x03 << 5) #define DA9062AA_DEF_SUPPLY_SHIFT 7 #define DA9062AA_DEF_SUPPLY_MASK BIT(7) /* DA9062AA_CONTROL_D = 0x011 */ #define DA9062AA_TWDSCALE_SHIFT 0 #define DA9062AA_TWDSCALE_MASK 0x07 /* DA9062AA_CONTROL_E = 0x012 */ #define DA9062AA_RTC_MODE_PD_SHIFT 0 #define DA9062AA_RTC_MODE_PD_MASK 0x01 #define DA9062AA_RTC_MODE_SD_SHIFT 1 #define DA9062AA_RTC_MODE_SD_MASK BIT(1) #define DA9062AA_RTC_EN_SHIFT 2 #define DA9062AA_RTC_EN_MASK BIT(2) #define DA9062AA_V_LOCK_SHIFT 7 #define DA9062AA_V_LOCK_MASK BIT(7) /* DA9062AA_CONTROL_F = 0x013 */ #define DA9062AA_WATCHDOG_SHIFT 0 #define DA9062AA_WATCHDOG_MASK 0x01 #define DA9062AA_SHUTDOWN_SHIFT 1 #define DA9062AA_SHUTDOWN_MASK BIT(1) #define DA9062AA_WAKE_UP_SHIFT 2 #define DA9062AA_WAKE_UP_MASK BIT(2) /* DA9062AA_PD_DIS = 0x014 */ #define DA9062AA_GPI_DIS_SHIFT 0 #define DA9062AA_GPI_DIS_MASK 0x01 #define DA9062AA_PMIF_DIS_SHIFT 2 #define DA9062AA_PMIF_DIS_MASK BIT(2) #define DA9062AA_CLDR_PAUSE_SHIFT 4 #define DA9062AA_CLDR_PAUSE_MASK BIT(4) #define DA9062AA_BBAT_DIS_SHIFT 5 #define DA9062AA_BBAT_DIS_MASK BIT(5) #define DA9062AA_OUT32K_PAUSE_SHIFT 6 #define DA9062AA_OUT32K_PAUSE_MASK BIT(6) #define DA9062AA_PMCONT_DIS_SHIFT 7 #define DA9062AA_PMCONT_DIS_MASK BIT(7) /* DA9062AA_GPIO_0_1 = 0x015 */ #define DA9062AA_GPIO0_PIN_SHIFT 0 #define DA9062AA_GPIO0_PIN_MASK 0x03 #define DA9062AA_GPIO0_TYPE_SHIFT 2 #define DA9062AA_GPIO0_TYPE_MASK BIT(2) #define DA9062AA_GPIO0_WEN_SHIFT 3 #define DA9062AA_GPIO0_WEN_MASK BIT(3) #define DA9062AA_GPIO1_PIN_SHIFT 4 #define DA9062AA_GPIO1_PIN_MASK (0x03 << 4) #define DA9062AA_GPIO1_TYPE_SHIFT 6 #define DA9062AA_GPIO1_TYPE_MASK BIT(6) #define DA9062AA_GPIO1_WEN_SHIFT 7 #define DA9062AA_GPIO1_WEN_MASK BIT(7) /* DA9062AA_GPIO_2_3 = 0x016 */ #define DA9062AA_GPIO2_PIN_SHIFT 0 #define DA9062AA_GPIO2_PIN_MASK 0x03 #define DA9062AA_GPIO2_TYPE_SHIFT 2 #define DA9062AA_GPIO2_TYPE_MASK BIT(2) #define DA9062AA_GPIO2_WEN_SHIFT 3 #define DA9062AA_GPIO2_WEN_MASK BIT(3) #define DA9062AA_GPIO3_PIN_SHIFT 4 #define DA9062AA_GPIO3_PIN_MASK (0x03 << 4) #define DA9062AA_GPIO3_TYPE_SHIFT 6 #define DA9062AA_GPIO3_TYPE_MASK BIT(6) #define DA9062AA_GPIO3_WEN_SHIFT 7 #define DA9062AA_GPIO3_WEN_MASK BIT(7) /* DA9062AA_GPIO_4 = 0x017 */ #define DA9062AA_GPIO4_PIN_SHIFT 0 #define DA9062AA_GPIO4_PIN_MASK 0x03 #define DA9062AA_GPIO4_TYPE_SHIFT 2 #define DA9062AA_GPIO4_TYPE_MASK BIT(2) #define DA9062AA_GPIO4_WEN_SHIFT 3 #define DA9062AA_GPIO4_WEN_MASK BIT(3) /* DA9062AA_GPIO_WKUP_MODE = 0x01C */ #define DA9062AA_GPIO0_WKUP_MODE_SHIFT 0 #define DA9062AA_GPIO0_WKUP_MODE_MASK 0x01 #define DA9062AA_GPIO1_WKUP_MODE_SHIFT 1 #define DA9062AA_GPIO1_WKUP_MODE_MASK BIT(1) #define DA9062AA_GPIO2_WKUP_MODE_SHIFT 2 #define DA9062AA_GPIO2_WKUP_MODE_MASK BIT(2) #define DA9062AA_GPIO3_WKUP_MODE_SHIFT 3 #define DA9062AA_GPIO3_WKUP_MODE_MASK BIT(3) #define DA9062AA_GPIO4_WKUP_MODE_SHIFT 4 #define DA9062AA_GPIO4_WKUP_MODE_MASK BIT(4) /* DA9062AA_GPIO_MODE0_4 = 0x01D */ #define DA9062AA_GPIO0_MODE_SHIFT 0 #define DA9062AA_GPIO0_MODE_MASK 0x01 #define DA9062AA_GPIO1_MODE_SHIFT 1 #define DA9062AA_GPIO1_MODE_MASK BIT(1) #define DA9062AA_GPIO2_MODE_SHIFT 2 #define DA9062AA_GPIO2_MODE_MASK BIT(2) #define DA9062AA_GPIO3_MODE_SHIFT 3 #define DA9062AA_GPIO3_MODE_MASK BIT(3) #define DA9062AA_GPIO4_MODE_SHIFT 4 #define DA9062AA_GPIO4_MODE_MASK BIT(4) /* DA9062AA_GPIO_OUT0_2 = 0x01E */ #define DA9062AA_GPIO0_OUT_SHIFT 0 #define DA9062AA_GPIO0_OUT_MASK 0x07 #define DA9062AA_GPIO1_OUT_SHIFT 3 #define DA9062AA_GPIO1_OUT_MASK (0x07 << 3) #define DA9062AA_GPIO2_OUT_SHIFT 6 #define DA9062AA_GPIO2_OUT_MASK (0x03 << 6) /* DA9062AA_GPIO_OUT3_4 = 0x01F */ #define DA9062AA_GPIO3_OUT_SHIFT 0 #define DA9062AA_GPIO3_OUT_MASK 0x07 #define DA9062AA_GPIO4_OUT_SHIFT 3 #define DA9062AA_GPIO4_OUT_MASK (0x03 << 3) /* DA9062AA_BUCK2_CONT = 0x020 */ #define DA9062AA_BUCK2_EN_SHIFT 0 #define DA9062AA_BUCK2_EN_MASK 0x01 #define DA9062AA_BUCK2_GPI_SHIFT 1 #define DA9062AA_BUCK2_GPI_MASK (0x03 << 1) #define DA9062AA_BUCK2_CONF_SHIFT 3 #define DA9062AA_BUCK2_CONF_MASK BIT(3) #define DA9062AA_VBUCK2_GPI_SHIFT 5 #define DA9062AA_VBUCK2_GPI_MASK (0x03 << 5) /* DA9062AA_BUCK1_CONT = 0x021 */ #define DA9062AA_BUCK1_EN_SHIFT 0 #define DA9062AA_BUCK1_EN_MASK 0x01 #define DA9062AA_BUCK1_GPI_SHIFT 1 #define DA9062AA_BUCK1_GPI_MASK (0x03 << 1) #define DA9062AA_BUCK1_CONF_SHIFT 3 #define DA9062AA_BUCK1_CONF_MASK BIT(3) #define DA9062AA_VBUCK1_GPI_SHIFT 5 #define DA9062AA_VBUCK1_GPI_MASK (0x03 << 5) /* DA9062AA_BUCK4_CONT = 0x022 */ #define DA9062AA_BUCK4_EN_SHIFT 0 #define DA9062AA_BUCK4_EN_MASK 0x01 #define DA9062AA_BUCK4_GPI_SHIFT 1 #define DA9062AA_BUCK4_GPI_MASK (0x03 << 1) #define DA9062AA_BUCK4_CONF_SHIFT 3 #define DA9062AA_BUCK4_CONF_MASK BIT(3) #define DA9062AA_VBUCK4_GPI_SHIFT 5 #define DA9062AA_VBUCK4_GPI_MASK (0x03 << 5) /* DA9062AA_BUCK3_CONT = 0x024 */ #define DA9062AA_BUCK3_EN_SHIFT 0 #define DA9062AA_BUCK3_EN_MASK 0x01 #define DA9062AA_BUCK3_GPI_SHIFT 1 #define DA9062AA_BUCK3_GPI_MASK (0x03 << 1) #define DA9062AA_BUCK3_CONF_SHIFT 3 #define DA9062AA_BUCK3_CONF_MASK BIT(3) #define DA9062AA_VBUCK3_GPI_SHIFT 5 #define DA9062AA_VBUCK3_GPI_MASK (0x03 << 5) /* DA9062AA_LDO1_CONT = 0x026 */ #define DA9062AA_LDO1_EN_SHIFT 0 #define DA9062AA_LDO1_EN_MASK 0x01 #define DA9062AA_LDO1_GPI_SHIFT 1 #define DA9062AA_LDO1_GPI_MASK (0x03 << 1) #define DA9062AA_LDO1_PD_DIS_SHIFT 3 #define DA9062AA_LDO1_PD_DIS_MASK BIT(3) #define DA9062AA_VLDO1_GPI_SHIFT 5 #define DA9062AA_VLDO1_GPI_MASK (0x03 << 5) #define DA9062AA_LDO1_CONF_SHIFT 7 #define DA9062AA_LDO1_CONF_MASK BIT(7) /* DA9062AA_LDO2_CONT = 0x027 */ #define DA9062AA_LDO2_EN_SHIFT 0 #define DA9062AA_LDO2_EN_MASK 0x01 #define DA9062AA_LDO2_GPI_SHIFT 1 #define DA9062AA_LDO2_GPI_MASK (0x03 << 1) #define DA9062AA_LDO2_PD_DIS_SHIFT 3 #define DA9062AA_LDO2_PD_DIS_MASK BIT(3) #define DA9062AA_VLDO2_GPI_SHIFT 5 #define DA9062AA_VLDO2_GPI_MASK (0x03 << 5) #define DA9062AA_LDO2_CONF_SHIFT 7 #define DA9062AA_LDO2_CONF_MASK BIT(7) /* DA9062AA_LDO3_CONT = 0x028 */ #define DA9062AA_LDO3_EN_SHIFT 0 #define DA9062AA_LDO3_EN_MASK 0x01 #define DA9062AA_LDO3_GPI_SHIFT 1 #define DA9062AA_LDO3_GPI_MASK (0x03 << 1) #define DA9062AA_LDO3_PD_DIS_SHIFT 3 #define DA9062AA_LDO3_PD_DIS_MASK BIT(3) #define DA9062AA_VLDO3_GPI_SHIFT 5 #define DA9062AA_VLDO3_GPI_MASK (0x03 << 5) #define DA9062AA_LDO3_CONF_SHIFT 7 #define DA9062AA_LDO3_CONF_MASK BIT(7) /* DA9062AA_LDO4_CONT = 0x029 */ #define DA9062AA_LDO4_EN_SHIFT 0 #define DA9062AA_LDO4_EN_MASK 0x01 #define DA9062AA_LDO4_GPI_SHIFT 1 #define DA9062AA_LDO4_GPI_MASK (0x03 << 1) #define DA9062AA_LDO4_PD_DIS_SHIFT 3 #define DA9062AA_LDO4_PD_DIS_MASK BIT(3) #define DA9062AA_VLDO4_GPI_SHIFT 5 #define DA9062AA_VLDO4_GPI_MASK (0x03 << 5) #define DA9062AA_LDO4_CONF_SHIFT 7 #define DA9062AA_LDO4_CONF_MASK BIT(7) /* DA9062AA_DVC_1 = 0x032 */ #define DA9062AA_VBUCK1_SEL_SHIFT 0 #define DA9062AA_VBUCK1_SEL_MASK 0x01 #define DA9062AA_VBUCK2_SEL_SHIFT 1 #define DA9062AA_VBUCK2_SEL_MASK BIT(1) #define DA9062AA_VBUCK4_SEL_SHIFT 2 #define DA9062AA_VBUCK4_SEL_MASK BIT(2) #define DA9062AA_VBUCK3_SEL_SHIFT 3 #define DA9062AA_VBUCK3_SEL_MASK BIT(3) #define DA9062AA_VLDO1_SEL_SHIFT 4 #define DA9062AA_VLDO1_SEL_MASK BIT(4) #define DA9062AA_VLDO2_SEL_SHIFT 5 #define DA9062AA_VLDO2_SEL_MASK BIT(5) #define DA9062AA_VLDO3_SEL_SHIFT 6 #define DA9062AA_VLDO3_SEL_MASK BIT(6) #define DA9062AA_VLDO4_SEL_SHIFT 7 #define DA9062AA_VLDO4_SEL_MASK BIT(7) /* DA9062AA_COUNT_S = 0x040 */ #define DA9062AA_COUNT_SEC_SHIFT 0 #define DA9062AA_COUNT_SEC_MASK 0x3f #define DA9062AA_RTC_READ_SHIFT 7 #define DA9062AA_RTC_READ_MASK BIT(7) /* DA9062AA_COUNT_MI = 0x041 */ #define DA9062AA_COUNT_MIN_SHIFT 0 #define DA9062AA_COUNT_MIN_MASK 0x3f /* DA9062AA_COUNT_H = 0x042 */ #define DA9062AA_COUNT_HOUR_SHIFT 0 #define DA9062AA_COUNT_HOUR_MASK 0x1f /* DA9062AA_COUNT_D = 0x043 */ #define DA9062AA_COUNT_DAY_SHIFT 0 #define DA9062AA_COUNT_DAY_MASK 0x1f /* DA9062AA_COUNT_MO = 0x044 */ #define DA9062AA_COUNT_MONTH_SHIFT 0 #define DA9062AA_COUNT_MONTH_MASK 0x0f /* DA9062AA_COUNT_Y = 0x045 */ #define DA9062AA_COUNT_YEAR_SHIFT 0 #define DA9062AA_COUNT_YEAR_MASK 0x3f #define DA9062AA_MONITOR_SHIFT 6 #define DA9062AA_MONITOR_MASK BIT(6) /* DA9062AA_ALARM_S = 0x046 */ #define DA9062AA_ALARM_SEC_SHIFT 0 #define DA9062AA_ALARM_SEC_MASK 0x3f #define DA9062AA_ALARM_STATUS_SHIFT 6 #define DA9062AA_ALARM_STATUS_MASK (0x03 << 6) /* DA9062AA_ALARM_MI = 0x047 */ #define DA9062AA_ALARM_MIN_SHIFT 0 #define DA9062AA_ALARM_MIN_MASK 0x3f /* DA9062AA_ALARM_H = 0x048 */ #define DA9062AA_ALARM_HOUR_SHIFT 0 #define DA9062AA_ALARM_HOUR_MASK 0x1f /* DA9062AA_ALARM_D = 0x049 */ #define DA9062AA_ALARM_DAY_SHIFT 0 #define DA9062AA_ALARM_DAY_MASK 0x1f /* DA9062AA_ALARM_MO = 0x04A */ #define DA9062AA_ALARM_MONTH_SHIFT 0 #define DA9062AA_ALARM_MONTH_MASK 0x0f #define DA9062AA_TICK_TYPE_SHIFT 4 #define DA9062AA_TICK_TYPE_MASK BIT(4) #define DA9062AA_TICK_WAKE_SHIFT 5 #define DA9062AA_TICK_WAKE_MASK BIT(5) /* DA9062AA_ALARM_Y = 0x04B */ #define DA9062AA_ALARM_YEAR_SHIFT 0 #define DA9062AA_ALARM_YEAR_MASK 0x3f #define DA9062AA_ALARM_ON_SHIFT 6 #define DA9062AA_ALARM_ON_MASK BIT(6) #define DA9062AA_TICK_ON_SHIFT 7 #define DA9062AA_TICK_ON_MASK BIT(7) /* DA9062AA_SECOND_A = 0x04C */ #define DA9062AA_SECONDS_A_SHIFT 0 #define DA9062AA_SECONDS_A_MASK 0xff /* DA9062AA_SECOND_B = 0x04D */ #define DA9062AA_SECONDS_B_SHIFT 0 #define DA9062AA_SECONDS_B_MASK 0xff /* DA9062AA_SECOND_C = 0x04E */ #define DA9062AA_SECONDS_C_SHIFT 0 #define DA9062AA_SECONDS_C_MASK 0xff /* DA9062AA_SECOND_D = 0x04F */ #define DA9062AA_SECONDS_D_SHIFT 0 #define DA9062AA_SECONDS_D_MASK 0xff /* DA9062AA_SEQ = 0x081 */ #define DA9062AA_SEQ_POINTER_SHIFT 0 #define DA9062AA_SEQ_POINTER_MASK 0x0f #define DA9062AA_NXT_SEQ_START_SHIFT 4 #define DA9062AA_NXT_SEQ_START_MASK (0x0f << 4) /* DA9062AA_SEQ_TIMER = 0x082 */ #define DA9062AA_SEQ_TIME_SHIFT 0 #define DA9062AA_SEQ_TIME_MASK 0x0f #define DA9062AA_SEQ_DUMMY_SHIFT 4 #define DA9062AA_SEQ_DUMMY_MASK (0x0f << 4) /* DA9062AA_ID_2_1 = 0x083 */ #define DA9062AA_LDO1_STEP_SHIFT 0 #define DA9062AA_LDO1_STEP_MASK 0x0f #define DA9062AA_LDO2_STEP_SHIFT 4 #define DA9062AA_LDO2_STEP_MASK (0x0f << 4) /* DA9062AA_ID_4_3 = 0x084 */ #define DA9062AA_LDO3_STEP_SHIFT 0 #define DA9062AA_LDO3_STEP_MASK 0x0f #define DA9062AA_LDO4_STEP_SHIFT 4 #define DA9062AA_LDO4_STEP_MASK (0x0f << 4) /* DA9062AA_ID_12_11 = 0x088 */ #define DA9062AA_PD_DIS_STEP_SHIFT 4 #define DA9062AA_PD_DIS_STEP_MASK (0x0f << 4) /* DA9062AA_ID_14_13 = 0x089 */ #define DA9062AA_BUCK1_STEP_SHIFT 0 #define DA9062AA_BUCK1_STEP_MASK 0x0f #define DA9062AA_BUCK2_STEP_SHIFT 4 #define DA9062AA_BUCK2_STEP_MASK (0x0f << 4) /* DA9062AA_ID_16_15 = 0x08A */ #define DA9062AA_BUCK4_STEP_SHIFT 0 #define DA9062AA_BUCK4_STEP_MASK 0x0f #define DA9062AA_BUCK3_STEP_SHIFT 4 #define DA9062AA_BUCK3_STEP_MASK (0x0f << 4) /* DA9062AA_ID_22_21 = 0x08D */ #define DA9062AA_GP_RISE1_STEP_SHIFT 0 #define DA9062AA_GP_RISE1_STEP_MASK 0x0f #define DA9062AA_GP_FALL1_STEP_SHIFT 4 #define DA9062AA_GP_FALL1_STEP_MASK (0x0f << 4) /* DA9062AA_ID_24_23 = 0x08E */ #define DA9062AA_GP_RISE2_STEP_SHIFT 0 #define DA9062AA_GP_RISE2_STEP_MASK 0x0f #define DA9062AA_GP_FALL2_STEP_SHIFT 4 #define DA9062AA_GP_FALL2_STEP_MASK (0x0f << 4) /* DA9062AA_ID_26_25 = 0x08F */ #define DA9062AA_GP_RISE3_STEP_SHIFT 0 #define DA9062AA_GP_RISE3_STEP_MASK 0x0f #define DA9062AA_GP_FALL3_STEP_SHIFT 4 #define DA9062AA_GP_FALL3_STEP_MASK (0x0f << 4) /* DA9062AA_ID_28_27 = 0x090 */ #define DA9062AA_GP_RISE4_STEP_SHIFT 0 #define DA9062AA_GP_RISE4_STEP_MASK 0x0f #define DA9062AA_GP_FALL4_STEP_SHIFT 4 #define DA9062AA_GP_FALL4_STEP_MASK (0x0f << 4) /* DA9062AA_ID_30_29 = 0x091 */ #define DA9062AA_GP_RISE5_STEP_SHIFT 0 #define DA9062AA_GP_RISE5_STEP_MASK 0x0f #define DA9062AA_GP_FALL5_STEP_SHIFT 4 #define DA9062AA_GP_FALL5_STEP_MASK (0x0f << 4) /* DA9062AA_ID_32_31 = 0x092 */ #define DA9062AA_WAIT_STEP_SHIFT 0 #define DA9062AA_WAIT_STEP_MASK 0x0f #define DA9062AA_EN32K_STEP_SHIFT 4 #define DA9062AA_EN32K_STEP_MASK (0x0f << 4) /* DA9062AA_SEQ_A = 0x095 */ #define DA9062AA_SYSTEM_END_SHIFT 0 #define DA9062AA_SYSTEM_END_MASK 0x0f #define DA9062AA_POWER_END_SHIFT 4 #define DA9062AA_POWER_END_MASK (0x0f << 4) /* DA9062AA_SEQ_B = 0x096 */ #define DA9062AA_MAX_COUNT_SHIFT 0 #define DA9062AA_MAX_COUNT_MASK 0x0f #define DA9062AA_PART_DOWN_SHIFT 4 #define DA9062AA_PART_DOWN_MASK (0x0f << 4) /* DA9062AA_WAIT = 0x097 */ #define DA9062AA_WAIT_TIME_SHIFT 0 #define DA9062AA_WAIT_TIME_MASK 0x0f #define DA9062AA_WAIT_MODE_SHIFT 4 #define DA9062AA_WAIT_MODE_MASK BIT(4) #define DA9062AA_TIME_OUT_SHIFT 5 #define DA9062AA_TIME_OUT_MASK BIT(5) #define DA9062AA_WAIT_DIR_SHIFT 6 #define DA9062AA_WAIT_DIR_MASK (0x03 << 6) /* DA9062AA_EN_32K = 0x098 */ #define DA9062AA_STABILISATION_TIME_SHIFT 0 #define DA9062AA_STABILISATION_TIME_MASK 0x07 #define DA9062AA_CRYSTAL_SHIFT 3 #define DA9062AA_CRYSTAL_MASK BIT(3) #define DA9062AA_DELAY_MODE_SHIFT 4 #define DA9062AA_DELAY_MODE_MASK BIT(4) #define DA9062AA_OUT_CLOCK_SHIFT 5 #define DA9062AA_OUT_CLOCK_MASK BIT(5) #define DA9062AA_RTC_CLOCK_SHIFT 6 #define DA9062AA_RTC_CLOCK_MASK BIT(6) #define DA9062AA_EN_32KOUT_SHIFT 7 #define DA9062AA_EN_32KOUT_MASK BIT(7) /* DA9062AA_RESET = 0x099 */ #define DA9062AA_RESET_TIMER_SHIFT 0 #define DA9062AA_RESET_TIMER_MASK 0x3f #define DA9062AA_RESET_EVENT_SHIFT 6 #define DA9062AA_RESET_EVENT_MASK (0x03 << 6) /* DA9062AA_BUCK_ILIM_A = 0x09A */ #define DA9062AA_BUCK3_ILIM_SHIFT 0 #define DA9062AA_BUCK3_ILIM_MASK 0x0f /* DA9062AA_BUCK_ILIM_B = 0x09B */ #define DA9062AA_BUCK4_ILIM_SHIFT 0 #define DA9062AA_BUCK4_ILIM_MASK 0x0f /* DA9062AA_BUCK_ILIM_C = 0x09C */ #define DA9062AA_BUCK1_ILIM_SHIFT 0 #define DA9062AA_BUCK1_ILIM_MASK 0x0f #define DA9062AA_BUCK2_ILIM_SHIFT 4 #define DA9062AA_BUCK2_ILIM_MASK (0x0f << 4) /* DA9062AA_BUCK2_CFG = 0x09D */ #define DA9062AA_BUCK2_PD_DIS_SHIFT 5 #define DA9062AA_BUCK2_PD_DIS_MASK BIT(5) #define DA9062AA_BUCK2_MODE_SHIFT 6 #define DA9062AA_BUCK2_MODE_MASK (0x03 << 6) /* DA9062AA_BUCK1_CFG = 0x09E */ #define DA9062AA_BUCK1_PD_DIS_SHIFT 5 #define DA9062AA_BUCK1_PD_DIS_MASK BIT(5) #define DA9062AA_BUCK1_MODE_SHIFT 6 #define DA9062AA_BUCK1_MODE_MASK (0x03 << 6) /* DA9062AA_BUCK4_CFG = 0x09F */ #define DA9062AA_BUCK4_VTTR_EN_SHIFT 3 #define DA9062AA_BUCK4_VTTR_EN_MASK BIT(3) #define DA9062AA_BUCK4_VTT_EN_SHIFT 4 #define DA9062AA_BUCK4_VTT_EN_MASK BIT(4) #define DA9062AA_BUCK4_PD_DIS_SHIFT 5 #define DA9062AA_BUCK4_PD_DIS_MASK BIT(5) #define DA9062AA_BUCK4_MODE_SHIFT 6 #define DA9062AA_BUCK4_MODE_MASK (0x03 << 6) /* DA9062AA_BUCK3_CFG = 0x0A0 */ #define DA9062AA_BUCK3_PD_DIS_SHIFT 5 #define DA9062AA_BUCK3_PD_DIS_MASK BIT(5) #define DA9062AA_BUCK3_MODE_SHIFT 6 #define DA9062AA_BUCK3_MODE_MASK (0x03 << 6) /* DA9062AA_VBUCK2_A = 0x0A3 */ #define DA9062AA_VBUCK2_A_SHIFT 0 #define DA9062AA_VBUCK2_A_MASK 0x7f #define DA9062AA_BUCK2_SL_A_SHIFT 7 #define DA9062AA_BUCK2_SL_A_MASK BIT(7) /* DA9062AA_VBUCK1_A = 0x0A4 */ #define DA9062AA_VBUCK1_A_SHIFT 0 #define DA9062AA_VBUCK1_A_MASK 0x7f #define DA9062AA_BUCK1_SL_A_SHIFT 7 #define DA9062AA_BUCK1_SL_A_MASK BIT(7) /* DA9062AA_VBUCK4_A = 0x0A5 */ #define DA9062AA_VBUCK4_A_SHIFT 0 #define DA9062AA_VBUCK4_A_MASK 0x7f #define DA9062AA_BUCK4_SL_A_SHIFT 7 #define DA9062AA_BUCK4_SL_A_MASK BIT(7) /* DA9062AA_VBUCK3_A = 0x0A7 */ #define DA9062AA_VBUCK3_A_SHIFT 0 #define DA9062AA_VBUCK3_A_MASK 0x7f #define DA9062AA_BUCK3_SL_A_SHIFT 7 #define DA9062AA_BUCK3_SL_A_MASK BIT(7) /* DA9062AA_VLDO[1-4]_A common */ #define DA9062AA_VLDO_A_MIN_SEL 2 /* DA9062AA_VLDO1_A = 0x0A9 */ #define DA9062AA_VLDO1_A_SHIFT 0 #define DA9062AA_VLDO1_A_MASK 0x3f #define DA9062AA_LDO1_SL_A_SHIFT 7 #define DA9062AA_LDO1_SL_A_MASK BIT(7) /* DA9062AA_VLDO2_A = 0x0AA */ #define DA9062AA_VLDO2_A_SHIFT 0 #define DA9062AA_VLDO2_A_MASK 0x3f #define DA9062AA_LDO2_SL_A_SHIFT 7 #define DA9062AA_LDO2_SL_A_MASK BIT(7) /* DA9062AA_VLDO3_A = 0x0AB */ #define DA9062AA_VLDO3_A_SHIFT 0 #define DA9062AA_VLDO3_A_MASK 0x3f #define DA9062AA_LDO3_SL_A_SHIFT 7 #define DA9062AA_LDO3_SL_A_MASK BIT(7) /* DA9062AA_VLDO4_A = 0x0AC */ #define DA9062AA_VLDO4_A_SHIFT 0 #define DA9062AA_VLDO4_A_MASK 0x3f #define DA9062AA_LDO4_SL_A_SHIFT 7 #define DA9062AA_LDO4_SL_A_MASK BIT(7) /* DA9062AA_VBUCK2_B = 0x0B4 */ #define DA9062AA_VBUCK2_B_SHIFT 0 #define DA9062AA_VBUCK2_B_MASK 0x7f #define DA9062AA_BUCK2_SL_B_SHIFT 7 #define DA9062AA_BUCK2_SL_B_MASK BIT(7) /* DA9062AA_VBUCK1_B = 0x0B5 */ #define DA9062AA_VBUCK1_B_SHIFT 0 #define DA9062AA_VBUCK1_B_MASK 0x7f #define DA9062AA_BUCK1_SL_B_SHIFT 7 #define DA9062AA_BUCK1_SL_B_MASK BIT(7) /* DA9062AA_VBUCK4_B = 0x0B6 */ #define DA9062AA_VBUCK4_B_SHIFT 0 #define DA9062AA_VBUCK4_B_MASK 0x7f #define DA9062AA_BUCK4_SL_B_SHIFT 7 #define DA9062AA_BUCK4_SL_B_MASK BIT(7) /* DA9062AA_VBUCK3_B = 0x0B8 */ #define DA9062AA_VBUCK3_B_SHIFT 0 #define DA9062AA_VBUCK3_B_MASK 0x7f #define DA9062AA_BUCK3_SL_B_SHIFT 7 #define DA9062AA_BUCK3_SL_B_MASK BIT(7) /* DA9062AA_VLDO1_B = 0x0BA */ #define DA9062AA_VLDO1_B_SHIFT 0 #define DA9062AA_VLDO1_B_MASK 0x3f #define DA9062AA_LDO1_SL_B_SHIFT 7 #define DA9062AA_LDO1_SL_B_MASK BIT(7) /* DA9062AA_VLDO2_B = 0x0BB */ #define DA9062AA_VLDO2_B_SHIFT 0 #define DA9062AA_VLDO2_B_MASK 0x3f #define DA9062AA_LDO2_SL_B_SHIFT 7 #define DA9062AA_LDO2_SL_B_MASK BIT(7) /* DA9062AA_VLDO3_B = 0x0BC */ #define DA9062AA_VLDO3_B_SHIFT 0 #define DA9062AA_VLDO3_B_MASK 0x3f #define DA9062AA_LDO3_SL_B_SHIFT 7 #define DA9062AA_LDO3_SL_B_MASK BIT(7) /* DA9062AA_VLDO4_B = 0x0BD */ #define DA9062AA_VLDO4_B_SHIFT 0 #define DA9062AA_VLDO4_B_MASK 0x3f #define DA9062AA_LDO4_SL_B_SHIFT 7 #define DA9062AA_LDO4_SL_B_MASK BIT(7) /* DA9062AA_BBAT_CONT = 0x0C5 */ #define DA9062AA_BCHG_VSET_SHIFT 0 #define DA9062AA_BCHG_VSET_MASK 0x0f #define DA9062AA_BCHG_ISET_SHIFT 4 #define DA9062AA_BCHG_ISET_MASK (0x0f << 4) /* DA9062AA_INTERFACE = 0x105 */ #define DA9062AA_IF_BASE_ADDR_SHIFT 4 #define DA9062AA_IF_BASE_ADDR_MASK (0x0f << 4) /* DA9062AA_CONFIG_A = 0x106 */ #define DA9062AA_PM_I_V_SHIFT 0 #define DA9062AA_PM_I_V_MASK 0x01 #define DA9062AA_PM_O_TYPE_SHIFT 2 #define DA9062AA_PM_O_TYPE_MASK BIT(2) #define DA9062AA_IRQ_TYPE_SHIFT 3 #define DA9062AA_IRQ_TYPE_MASK BIT(3) #define DA9062AA_PM_IF_V_SHIFT 4 #define DA9062AA_PM_IF_V_MASK BIT(4) #define DA9062AA_PM_IF_FMP_SHIFT 5 #define DA9062AA_PM_IF_FMP_MASK BIT(5) #define DA9062AA_PM_IF_HSM_SHIFT 6 #define DA9062AA_PM_IF_HSM_MASK BIT(6) /* DA9062AA_CONFIG_B = 0x107 */ #define DA9062AA_VDD_FAULT_ADJ_SHIFT 0 #define DA9062AA_VDD_FAULT_ADJ_MASK 0x0f #define DA9062AA_VDD_HYST_ADJ_SHIFT 4 #define DA9062AA_VDD_HYST_ADJ_MASK (0x07 << 4) /* DA9062AA_CONFIG_C = 0x108 */ #define DA9062AA_BUCK_ACTV_DISCHRG_SHIFT 2 #define DA9062AA_BUCK_ACTV_DISCHRG_MASK BIT(2) #define DA9062AA_BUCK1_CLK_INV_SHIFT 3 #define DA9062AA_BUCK1_CLK_INV_MASK BIT(3) #define DA9062AA_BUCK4_CLK_INV_SHIFT 4 #define DA9062AA_BUCK4_CLK_INV_MASK BIT(4) #define DA9062AA_BUCK3_CLK_INV_SHIFT 6 #define DA9062AA_BUCK3_CLK_INV_MASK BIT(6) /* DA9062AA_CONFIG_D = 0x109 */ #define DA9062AA_GPI_V_SHIFT 0 #define DA9062AA_GPI_V_MASK 0x01 #define DA9062AA_NIRQ_MODE_SHIFT 1 #define DA9062AA_NIRQ_MODE_MASK BIT(1) #define DA9062AA_SYSTEM_EN_RD_SHIFT 2 #define DA9062AA_SYSTEM_EN_RD_MASK BIT(2) #define DA9062AA_FORCE_RESET_SHIFT 5 #define DA9062AA_FORCE_RESET_MASK BIT(5) /* DA9062AA_CONFIG_E = 0x10A */ #define DA9062AA_BUCK1_AUTO_SHIFT 0 #define DA9062AA_BUCK1_AUTO_MASK 0x01 #define DA9062AA_BUCK2_AUTO_SHIFT 1 #define DA9062AA_BUCK2_AUTO_MASK BIT(1) #define DA9062AA_BUCK4_AUTO_SHIFT 2 #define DA9062AA_BUCK4_AUTO_MASK BIT(2) #define DA9062AA_BUCK3_AUTO_SHIFT 4 #define DA9062AA_BUCK3_AUTO_MASK BIT(4) /* DA9062AA_CONFIG_G = 0x10C */ #define DA9062AA_LDO1_AUTO_SHIFT 0 #define DA9062AA_LDO1_AUTO_MASK 0x01 #define DA9062AA_LDO2_AUTO_SHIFT 1 #define DA9062AA_LDO2_AUTO_MASK BIT(1) #define DA9062AA_LDO3_AUTO_SHIFT 2 #define DA9062AA_LDO3_AUTO_MASK BIT(2) #define DA9062AA_LDO4_AUTO_SHIFT 3 #define DA9062AA_LDO4_AUTO_MASK BIT(3) /* DA9062AA_CONFIG_H = 0x10D */ #define DA9062AA_BUCK1_2_MERGE_SHIFT 3 #define DA9062AA_BUCK1_2_MERGE_MASK BIT(3) #define DA9062AA_BUCK2_OD_SHIFT 5 #define DA9062AA_BUCK2_OD_MASK BIT(5) #define DA9062AA_BUCK1_OD_SHIFT 6 #define DA9062AA_BUCK1_OD_MASK BIT(6) /* DA9062AA_CONFIG_I = 0x10E */ #define DA9062AA_NONKEY_PIN_SHIFT 0 #define DA9062AA_NONKEY_PIN_MASK 0x03 #define DA9062AA_nONKEY_SD_SHIFT 2 #define DA9062AA_nONKEY_SD_MASK BIT(2) #define DA9062AA_WATCHDOG_SD_SHIFT 3 #define DA9062AA_WATCHDOG_SD_MASK BIT(3) #define DA9062AA_KEY_SD_MODE_SHIFT 4 #define DA9062AA_KEY_SD_MODE_MASK BIT(4) #define DA9062AA_HOST_SD_MODE_SHIFT 5 #define DA9062AA_HOST_SD_MODE_MASK BIT(5) #define DA9062AA_INT_SD_MODE_SHIFT 6 #define DA9062AA_INT_SD_MODE_MASK BIT(6) #define DA9062AA_LDO_SD_SHIFT 7 #define DA9062AA_LDO_SD_MASK BIT(7) /* DA9062AA_CONFIG_J = 0x10F */ #define DA9062AA_KEY_DELAY_SHIFT 0 #define DA9062AA_KEY_DELAY_MASK 0x03 #define DA9062AA_SHUT_DELAY_SHIFT 2 #define DA9062AA_SHUT_DELAY_MASK (0x03 << 2) #define DA9062AA_RESET_DURATION_SHIFT 4 #define DA9062AA_RESET_DURATION_MASK (0x03 << 4) #define DA9062AA_TWOWIRE_TO_SHIFT 6 #define DA9062AA_TWOWIRE_TO_MASK BIT(6) #define DA9062AA_IF_RESET_SHIFT 7 #define DA9062AA_IF_RESET_MASK BIT(7) /* DA9062AA_CONFIG_K = 0x110 */ #define DA9062AA_GPIO0_PUPD_SHIFT 0 #define DA9062AA_GPIO0_PUPD_MASK 0x01 #define DA9062AA_GPIO1_PUPD_SHIFT 1 #define DA9062AA_GPIO1_PUPD_MASK BIT(1) #define DA9062AA_GPIO2_PUPD_SHIFT 2 #define DA9062AA_GPIO2_PUPD_MASK BIT(2) #define DA9062AA_GPIO3_PUPD_SHIFT 3 #define DA9062AA_GPIO3_PUPD_MASK BIT(3) #define DA9062AA_GPIO4_PUPD_SHIFT 4 #define DA9062AA_GPIO4_PUPD_MASK BIT(4) /* DA9062AA_CONFIG_M = 0x112 */ #define DA9062AA_NSHUTDOWN_PU_SHIFT 1 #define DA9062AA_NSHUTDOWN_PU_MASK BIT(1) #define DA9062AA_WDG_MODE_SHIFT 3 #define DA9062AA_WDG_MODE_MASK BIT(3) #define DA9062AA_OSC_FRQ_SHIFT 4 #define DA9062AA_OSC_FRQ_MASK (0x0f << 4) /* DA9062AA_TRIM_CLDR = 0x120 */ #define DA9062AA_TRIM_CLDR_SHIFT 0 #define DA9062AA_TRIM_CLDR_MASK 0xff /* DA9062AA_GP_ID_0 = 0x121 */ #define DA9062AA_GP_0_SHIFT 0 #define DA9062AA_GP_0_MASK 0xff /* DA9062AA_GP_ID_1 = 0x122 */ #define DA9062AA_GP_1_SHIFT 0 #define DA9062AA_GP_1_MASK 0xff /* DA9062AA_GP_ID_2 = 0x123 */ #define DA9062AA_GP_2_SHIFT 0 #define DA9062AA_GP_2_MASK 0xff /* DA9062AA_GP_ID_3 = 0x124 */ #define DA9062AA_GP_3_SHIFT 0 #define DA9062AA_GP_3_MASK 0xff /* DA9062AA_GP_ID_4 = 0x125 */ #define DA9062AA_GP_4_SHIFT 0 #define DA9062AA_GP_4_MASK 0xff /* DA9062AA_GP_ID_5 = 0x126 */ #define DA9062AA_GP_5_SHIFT 0 #define DA9062AA_GP_5_MASK 0xff /* DA9062AA_GP_ID_6 = 0x127 */ #define DA9062AA_GP_6_SHIFT 0 #define DA9062AA_GP_6_MASK 0xff /* DA9062AA_GP_ID_7 = 0x128 */ #define DA9062AA_GP_7_SHIFT 0 #define DA9062AA_GP_7_MASK 0xff /* DA9062AA_GP_ID_8 = 0x129 */ #define DA9062AA_GP_8_SHIFT 0 #define DA9062AA_GP_8_MASK 0xff /* DA9062AA_GP_ID_9 = 0x12A */ #define DA9062AA_GP_9_SHIFT 0 #define DA9062AA_GP_9_MASK 0xff /* DA9062AA_GP_ID_10 = 0x12B */ #define DA9062AA_GP_10_SHIFT 0 #define DA9062AA_GP_10_MASK 0xff /* DA9062AA_GP_ID_11 = 0x12C */ #define DA9062AA_GP_11_SHIFT 0 #define DA9062AA_GP_11_MASK 0xff /* DA9062AA_GP_ID_12 = 0x12D */ #define DA9062AA_GP_12_SHIFT 0 #define DA9062AA_GP_12_MASK 0xff /* DA9062AA_GP_ID_13 = 0x12E */ #define DA9062AA_GP_13_SHIFT 0 #define DA9062AA_GP_13_MASK 0xff /* DA9062AA_GP_ID_14 = 0x12F */ #define DA9062AA_GP_14_SHIFT 0 #define DA9062AA_GP_14_MASK 0xff /* DA9062AA_GP_ID_15 = 0x130 */ #define DA9062AA_GP_15_SHIFT 0 #define DA9062AA_GP_15_MASK 0xff /* DA9062AA_GP_ID_16 = 0x131 */ #define DA9062AA_GP_16_SHIFT 0 #define DA9062AA_GP_16_MASK 0xff /* DA9062AA_GP_ID_17 = 0x132 */ #define DA9062AA_GP_17_SHIFT 0 #define DA9062AA_GP_17_MASK 0xff /* DA9062AA_GP_ID_18 = 0x133 */ #define DA9062AA_GP_18_SHIFT 0 #define DA9062AA_GP_18_MASK 0xff /* DA9062AA_GP_ID_19 = 0x134 */ #define DA9062AA_GP_19_SHIFT 0 #define DA9062AA_GP_19_MASK 0xff /* DA9062AA_DEVICE_ID = 0x181 */ #define DA9062AA_DEV_ID_SHIFT 0 #define DA9062AA_DEV_ID_MASK 0xff /* DA9062AA_VARIANT_ID = 0x182 */ #define DA9062AA_VRC_SHIFT 0 #define DA9062AA_VRC_MASK 0x0f #define DA9062AA_MRC_SHIFT 4 #define DA9062AA_MRC_MASK (0x0f << 4) /* DA9062AA_CUSTOMER_ID = 0x183 */ #define DA9062AA_CUST_ID_SHIFT 0 #define DA9062AA_CUST_ID_MASK 0xff /* DA9062AA_CONFIG_ID = 0x184 */ #define DA9062AA_CONFIG_REV_SHIFT 0 #define DA9062AA_CONFIG_REV_MASK 0xff #endif /* __DA9062_H__ */ mfd/da9062/core.h 0000644 00000002213 14722070374 0007324 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2015-2017 Dialog Semiconductor */ #ifndef __MFD_DA9062_CORE_H__ #define __MFD_DA9062_CORE_H__ #include <linux/interrupt.h> #include <linux/mfd/da9062/registers.h> enum da9062_compatible_types { COMPAT_TYPE_DA9061 = 1, COMPAT_TYPE_DA9062, }; enum da9061_irqs { /* IRQ A */ DA9061_IRQ_ONKEY, DA9061_IRQ_WDG_WARN, DA9061_IRQ_SEQ_RDY, /* IRQ B*/ DA9061_IRQ_TEMP, DA9061_IRQ_LDO_LIM, DA9061_IRQ_DVC_RDY, DA9061_IRQ_VDD_WARN, /* IRQ C */ DA9061_IRQ_GPI0, DA9061_IRQ_GPI1, DA9061_IRQ_GPI2, DA9061_IRQ_GPI3, DA9061_IRQ_GPI4, DA9061_NUM_IRQ, }; enum da9062_irqs { /* IRQ A */ DA9062_IRQ_ONKEY, DA9062_IRQ_ALARM, DA9062_IRQ_TICK, DA9062_IRQ_WDG_WARN, DA9062_IRQ_SEQ_RDY, /* IRQ B*/ DA9062_IRQ_TEMP, DA9062_IRQ_LDO_LIM, DA9062_IRQ_DVC_RDY, DA9062_IRQ_VDD_WARN, /* IRQ C */ DA9062_IRQ_GPI0, DA9062_IRQ_GPI1, DA9062_IRQ_GPI2, DA9062_IRQ_GPI3, DA9062_IRQ_GPI4, DA9062_NUM_IRQ, }; struct da9062 { struct device *dev; struct regmap *regmap; struct regmap_irq_chip_data *regmap_irq; enum da9062_compatible_types chip_type; }; #endif /* __MFD_DA9062_CORE_H__ */ mfd/wm97xx.h 0000644 00000000561 14722070374 0006656 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * wm97xx client interface * * Copyright (C) 2017 Robert Jarzmik */ #ifndef __LINUX_MFD_WM97XX_H #define __LINUX_MFD_WM97XX_H struct regmap; struct wm97xx_batt_pdata; struct snd_ac97; struct wm97xx_platform_data { struct snd_ac97 *ac97; struct regmap *regmap; struct wm97xx_batt_pdata *batt_pdata; }; #endif mfd/wm8400-audio.h 0000644 00000212243 14722070374 0007533 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * wm8400 private definitions for audio * * Copyright 2008 Wolfson Microelectronics plc */ #ifndef __LINUX_MFD_WM8400_AUDIO_H #define __LINUX_MFD_WM8400_AUDIO_H #include <linux/mfd/wm8400-audio.h> /* * R2 (0x02) - Power Management (1) */ #define WM8400_CODEC_ENA 0x8000 /* CODEC_ENA */ #define WM8400_CODEC_ENA_MASK 0x8000 /* CODEC_ENA */ #define WM8400_CODEC_ENA_SHIFT 15 /* CODEC_ENA */ #define WM8400_CODEC_ENA_WIDTH 1 /* CODEC_ENA */ #define WM8400_SYSCLK_ENA 0x4000 /* SYSCLK_ENA */ #define WM8400_SYSCLK_ENA_MASK 0x4000 /* SYSCLK_ENA */ #define WM8400_SYSCLK_ENA_SHIFT 14 /* SYSCLK_ENA */ #define WM8400_SYSCLK_ENA_WIDTH 1 /* SYSCLK_ENA */ #define WM8400_SPK_MIX_ENA 0x2000 /* SPK_MIX_ENA */ #define WM8400_SPK_MIX_ENA_MASK 0x2000 /* SPK_MIX_ENA */ #define WM8400_SPK_MIX_ENA_SHIFT 13 /* SPK_MIX_ENA */ #define WM8400_SPK_MIX_ENA_WIDTH 1 /* SPK_MIX_ENA */ #define WM8400_SPK_ENA 0x1000 /* SPK_ENA */ #define WM8400_SPK_ENA_MASK 0x1000 /* SPK_ENA */ #define WM8400_SPK_ENA_SHIFT 12 /* SPK_ENA */ #define WM8400_SPK_ENA_WIDTH 1 /* SPK_ENA */ #define WM8400_OUT3_ENA 0x0800 /* OUT3_ENA */ #define WM8400_OUT3_ENA_MASK 0x0800 /* OUT3_ENA */ #define WM8400_OUT3_ENA_SHIFT 11 /* OUT3_ENA */ #define WM8400_OUT3_ENA_WIDTH 1 /* OUT3_ENA */ #define WM8400_OUT4_ENA 0x0400 /* OUT4_ENA */ #define WM8400_OUT4_ENA_MASK 0x0400 /* OUT4_ENA */ #define WM8400_OUT4_ENA_SHIFT 10 /* OUT4_ENA */ #define WM8400_OUT4_ENA_WIDTH 1 /* OUT4_ENA */ #define WM8400_LOUT_ENA 0x0200 /* LOUT_ENA */ #define WM8400_LOUT_ENA_MASK 0x0200 /* LOUT_ENA */ #define WM8400_LOUT_ENA_SHIFT 9 /* LOUT_ENA */ #define WM8400_LOUT_ENA_WIDTH 1 /* LOUT_ENA */ #define WM8400_ROUT_ENA 0x0100 /* ROUT_ENA */ #define WM8400_ROUT_ENA_MASK 0x0100 /* ROUT_ENA */ #define WM8400_ROUT_ENA_SHIFT 8 /* ROUT_ENA */ #define WM8400_ROUT_ENA_WIDTH 1 /* ROUT_ENA */ #define WM8400_MIC1BIAS_ENA 0x0010 /* MIC1BIAS_ENA */ #define WM8400_MIC1BIAS_ENA_MASK 0x0010 /* MIC1BIAS_ENA */ #define WM8400_MIC1BIAS_ENA_SHIFT 4 /* MIC1BIAS_ENA */ #define WM8400_MIC1BIAS_ENA_WIDTH 1 /* MIC1BIAS_ENA */ #define WM8400_VMID_MODE_MASK 0x0006 /* VMID_MODE - [2:1] */ #define WM8400_VMID_MODE_SHIFT 1 /* VMID_MODE - [2:1] */ #define WM8400_VMID_MODE_WIDTH 2 /* VMID_MODE - [2:1] */ #define WM8400_VREF_ENA 0x0001 /* VREF_ENA */ #define WM8400_VREF_ENA_MASK 0x0001 /* VREF_ENA */ #define WM8400_VREF_ENA_SHIFT 0 /* VREF_ENA */ #define WM8400_VREF_ENA_WIDTH 1 /* VREF_ENA */ /* * R3 (0x03) - Power Management (2) */ #define WM8400_FLL_ENA 0x8000 /* FLL_ENA */ #define WM8400_FLL_ENA_MASK 0x8000 /* FLL_ENA */ #define WM8400_FLL_ENA_SHIFT 15 /* FLL_ENA */ #define WM8400_FLL_ENA_WIDTH 1 /* FLL_ENA */ #define WM8400_TSHUT_ENA 0x4000 /* TSHUT_ENA */ #define WM8400_TSHUT_ENA_MASK 0x4000 /* TSHUT_ENA */ #define WM8400_TSHUT_ENA_SHIFT 14 /* TSHUT_ENA */ #define WM8400_TSHUT_ENA_WIDTH 1 /* TSHUT_ENA */ #define WM8400_TSHUT_OPDIS 0x2000 /* TSHUT_OPDIS */ #define WM8400_TSHUT_OPDIS_MASK 0x2000 /* TSHUT_OPDIS */ #define WM8400_TSHUT_OPDIS_SHIFT 13 /* TSHUT_OPDIS */ #define WM8400_TSHUT_OPDIS_WIDTH 1 /* TSHUT_OPDIS */ #define WM8400_OPCLK_ENA 0x0800 /* OPCLK_ENA */ #define WM8400_OPCLK_ENA_MASK 0x0800 /* OPCLK_ENA */ #define WM8400_OPCLK_ENA_SHIFT 11 /* OPCLK_ENA */ #define WM8400_OPCLK_ENA_WIDTH 1 /* OPCLK_ENA */ #define WM8400_AINL_ENA 0x0200 /* AINL_ENA */ #define WM8400_AINL_ENA_MASK 0x0200 /* AINL_ENA */ #define WM8400_AINL_ENA_SHIFT 9 /* AINL_ENA */ #define WM8400_AINL_ENA_WIDTH 1 /* AINL_ENA */ #define WM8400_AINR_ENA 0x0100 /* AINR_ENA */ #define WM8400_AINR_ENA_MASK 0x0100 /* AINR_ENA */ #define WM8400_AINR_ENA_SHIFT 8 /* AINR_ENA */ #define WM8400_AINR_ENA_WIDTH 1 /* AINR_ENA */ #define WM8400_LIN34_ENA 0x0080 /* LIN34_ENA */ #define WM8400_LIN34_ENA_MASK 0x0080 /* LIN34_ENA */ #define WM8400_LIN34_ENA_SHIFT 7 /* LIN34_ENA */ #define WM8400_LIN34_ENA_WIDTH 1 /* LIN34_ENA */ #define WM8400_LIN12_ENA 0x0040 /* LIN12_ENA */ #define WM8400_LIN12_ENA_MASK 0x0040 /* LIN12_ENA */ #define WM8400_LIN12_ENA_SHIFT 6 /* LIN12_ENA */ #define WM8400_LIN12_ENA_WIDTH 1 /* LIN12_ENA */ #define WM8400_RIN34_ENA 0x0020 /* RIN34_ENA */ #define WM8400_RIN34_ENA_MASK 0x0020 /* RIN34_ENA */ #define WM8400_RIN34_ENA_SHIFT 5 /* RIN34_ENA */ #define WM8400_RIN34_ENA_WIDTH 1 /* RIN34_ENA */ #define WM8400_RIN12_ENA 0x0010 /* RIN12_ENA */ #define WM8400_RIN12_ENA_MASK 0x0010 /* RIN12_ENA */ #define WM8400_RIN12_ENA_SHIFT 4 /* RIN12_ENA */ #define WM8400_RIN12_ENA_WIDTH 1 /* RIN12_ENA */ #define WM8400_ADCL_ENA 0x0002 /* ADCL_ENA */ #define WM8400_ADCL_ENA_MASK 0x0002 /* ADCL_ENA */ #define WM8400_ADCL_ENA_SHIFT 1 /* ADCL_ENA */ #define WM8400_ADCL_ENA_WIDTH 1 /* ADCL_ENA */ #define WM8400_ADCR_ENA 0x0001 /* ADCR_ENA */ #define WM8400_ADCR_ENA_MASK 0x0001 /* ADCR_ENA */ #define WM8400_ADCR_ENA_SHIFT 0 /* ADCR_ENA */ #define WM8400_ADCR_ENA_WIDTH 1 /* ADCR_ENA */ /* * R4 (0x04) - Power Management (3) */ #define WM8400_LON_ENA 0x2000 /* LON_ENA */ #define WM8400_LON_ENA_MASK 0x2000 /* LON_ENA */ #define WM8400_LON_ENA_SHIFT 13 /* LON_ENA */ #define WM8400_LON_ENA_WIDTH 1 /* LON_ENA */ #define WM8400_LOP_ENA 0x1000 /* LOP_ENA */ #define WM8400_LOP_ENA_MASK 0x1000 /* LOP_ENA */ #define WM8400_LOP_ENA_SHIFT 12 /* LOP_ENA */ #define WM8400_LOP_ENA_WIDTH 1 /* LOP_ENA */ #define WM8400_RON_ENA 0x0800 /* RON_ENA */ #define WM8400_RON_ENA_MASK 0x0800 /* RON_ENA */ #define WM8400_RON_ENA_SHIFT 11 /* RON_ENA */ #define WM8400_RON_ENA_WIDTH 1 /* RON_ENA */ #define WM8400_ROP_ENA 0x0400 /* ROP_ENA */ #define WM8400_ROP_ENA_MASK 0x0400 /* ROP_ENA */ #define WM8400_ROP_ENA_SHIFT 10 /* ROP_ENA */ #define WM8400_ROP_ENA_WIDTH 1 /* ROP_ENA */ #define WM8400_LOPGA_ENA 0x0080 /* LOPGA_ENA */ #define WM8400_LOPGA_ENA_MASK 0x0080 /* LOPGA_ENA */ #define WM8400_LOPGA_ENA_SHIFT 7 /* LOPGA_ENA */ #define WM8400_LOPGA_ENA_WIDTH 1 /* LOPGA_ENA */ #define WM8400_ROPGA_ENA 0x0040 /* ROPGA_ENA */ #define WM8400_ROPGA_ENA_MASK 0x0040 /* ROPGA_ENA */ #define WM8400_ROPGA_ENA_SHIFT 6 /* ROPGA_ENA */ #define WM8400_ROPGA_ENA_WIDTH 1 /* ROPGA_ENA */ #define WM8400_LOMIX_ENA 0x0020 /* LOMIX_ENA */ #define WM8400_LOMIX_ENA_MASK 0x0020 /* LOMIX_ENA */ #define WM8400_LOMIX_ENA_SHIFT 5 /* LOMIX_ENA */ #define WM8400_LOMIX_ENA_WIDTH 1 /* LOMIX_ENA */ #define WM8400_ROMIX_ENA 0x0010 /* ROMIX_ENA */ #define WM8400_ROMIX_ENA_MASK 0x0010 /* ROMIX_ENA */ #define WM8400_ROMIX_ENA_SHIFT 4 /* ROMIX_ENA */ #define WM8400_ROMIX_ENA_WIDTH 1 /* ROMIX_ENA */ #define WM8400_DACL_ENA 0x0002 /* DACL_ENA */ #define WM8400_DACL_ENA_MASK 0x0002 /* DACL_ENA */ #define WM8400_DACL_ENA_SHIFT 1 /* DACL_ENA */ #define WM8400_DACL_ENA_WIDTH 1 /* DACL_ENA */ #define WM8400_DACR_ENA 0x0001 /* DACR_ENA */ #define WM8400_DACR_ENA_MASK 0x0001 /* DACR_ENA */ #define WM8400_DACR_ENA_SHIFT 0 /* DACR_ENA */ #define WM8400_DACR_ENA_WIDTH 1 /* DACR_ENA */ /* * R5 (0x05) - Audio Interface (1) */ #define WM8400_AIFADCL_SRC 0x8000 /* AIFADCL_SRC */ #define WM8400_AIFADCL_SRC_MASK 0x8000 /* AIFADCL_SRC */ #define WM8400_AIFADCL_SRC_SHIFT 15 /* AIFADCL_SRC */ #define WM8400_AIFADCL_SRC_WIDTH 1 /* AIFADCL_SRC */ #define WM8400_AIFADCR_SRC 0x4000 /* AIFADCR_SRC */ #define WM8400_AIFADCR_SRC_MASK 0x4000 /* AIFADCR_SRC */ #define WM8400_AIFADCR_SRC_SHIFT 14 /* AIFADCR_SRC */ #define WM8400_AIFADCR_SRC_WIDTH 1 /* AIFADCR_SRC */ #define WM8400_AIFADC_TDM 0x2000 /* AIFADC_TDM */ #define WM8400_AIFADC_TDM_MASK 0x2000 /* AIFADC_TDM */ #define WM8400_AIFADC_TDM_SHIFT 13 /* AIFADC_TDM */ #define WM8400_AIFADC_TDM_WIDTH 1 /* AIFADC_TDM */ #define WM8400_AIFADC_TDM_CHAN 0x1000 /* AIFADC_TDM_CHAN */ #define WM8400_AIFADC_TDM_CHAN_MASK 0x1000 /* AIFADC_TDM_CHAN */ #define WM8400_AIFADC_TDM_CHAN_SHIFT 12 /* AIFADC_TDM_CHAN */ #define WM8400_AIFADC_TDM_CHAN_WIDTH 1 /* AIFADC_TDM_CHAN */ #define WM8400_AIF_BCLK_INV 0x0100 /* AIF_BCLK_INV */ #define WM8400_AIF_BCLK_INV_MASK 0x0100 /* AIF_BCLK_INV */ #define WM8400_AIF_BCLK_INV_SHIFT 8 /* AIF_BCLK_INV */ #define WM8400_AIF_BCLK_INV_WIDTH 1 /* AIF_BCLK_INV */ #define WM8400_AIF_LRCLK_INV 0x0080 /* AIF_LRCLK_INV */ #define WM8400_AIF_LRCLK_INV_MASK 0x0080 /* AIF_LRCLK_INV */ #define WM8400_AIF_LRCLK_INV_SHIFT 7 /* AIF_LRCLK_INV */ #define WM8400_AIF_LRCLK_INV_WIDTH 1 /* AIF_LRCLK_INV */ #define WM8400_AIF_WL_MASK 0x0060 /* AIF_WL - [6:5] */ #define WM8400_AIF_WL_SHIFT 5 /* AIF_WL - [6:5] */ #define WM8400_AIF_WL_WIDTH 2 /* AIF_WL - [6:5] */ #define WM8400_AIF_WL_16BITS (0 << 5) #define WM8400_AIF_WL_20BITS (1 << 5) #define WM8400_AIF_WL_24BITS (2 << 5) #define WM8400_AIF_WL_32BITS (3 << 5) #define WM8400_AIF_FMT_MASK 0x0018 /* AIF_FMT - [4:3] */ #define WM8400_AIF_FMT_SHIFT 3 /* AIF_FMT - [4:3] */ #define WM8400_AIF_FMT_WIDTH 2 /* AIF_FMT - [4:3] */ #define WM8400_AIF_FMT_RIGHTJ (0 << 3) #define WM8400_AIF_FMT_LEFTJ (1 << 3) #define WM8400_AIF_FMT_I2S (2 << 3) #define WM8400_AIF_FMT_DSP (3 << 3) /* * R6 (0x06) - Audio Interface (2) */ #define WM8400_DACL_SRC 0x8000 /* DACL_SRC */ #define WM8400_DACL_SRC_MASK 0x8000 /* DACL_SRC */ #define WM8400_DACL_SRC_SHIFT 15 /* DACL_SRC */ #define WM8400_DACL_SRC_WIDTH 1 /* DACL_SRC */ #define WM8400_DACR_SRC 0x4000 /* DACR_SRC */ #define WM8400_DACR_SRC_MASK 0x4000 /* DACR_SRC */ #define WM8400_DACR_SRC_SHIFT 14 /* DACR_SRC */ #define WM8400_DACR_SRC_WIDTH 1 /* DACR_SRC */ #define WM8400_AIFDAC_TDM 0x2000 /* AIFDAC_TDM */ #define WM8400_AIFDAC_TDM_MASK 0x2000 /* AIFDAC_TDM */ #define WM8400_AIFDAC_TDM_SHIFT 13 /* AIFDAC_TDM */ #define WM8400_AIFDAC_TDM_WIDTH 1 /* AIFDAC_TDM */ #define WM8400_AIFDAC_TDM_CHAN 0x1000 /* AIFDAC_TDM_CHAN */ #define WM8400_AIFDAC_TDM_CHAN_MASK 0x1000 /* AIFDAC_TDM_CHAN */ #define WM8400_AIFDAC_TDM_CHAN_SHIFT 12 /* AIFDAC_TDM_CHAN */ #define WM8400_AIFDAC_TDM_CHAN_WIDTH 1 /* AIFDAC_TDM_CHAN */ #define WM8400_DAC_BOOST_MASK 0x0C00 /* DAC_BOOST - [11:10] */ #define WM8400_DAC_BOOST_SHIFT 10 /* DAC_BOOST - [11:10] */ #define WM8400_DAC_BOOST_WIDTH 2 /* DAC_BOOST - [11:10] */ #define WM8400_DAC_COMP 0x0010 /* DAC_COMP */ #define WM8400_DAC_COMP_MASK 0x0010 /* DAC_COMP */ #define WM8400_DAC_COMP_SHIFT 4 /* DAC_COMP */ #define WM8400_DAC_COMP_WIDTH 1 /* DAC_COMP */ #define WM8400_DAC_COMPMODE 0x0008 /* DAC_COMPMODE */ #define WM8400_DAC_COMPMODE_MASK 0x0008 /* DAC_COMPMODE */ #define WM8400_DAC_COMPMODE_SHIFT 3 /* DAC_COMPMODE */ #define WM8400_DAC_COMPMODE_WIDTH 1 /* DAC_COMPMODE */ #define WM8400_ADC_COMP 0x0004 /* ADC_COMP */ #define WM8400_ADC_COMP_MASK 0x0004 /* ADC_COMP */ #define WM8400_ADC_COMP_SHIFT 2 /* ADC_COMP */ #define WM8400_ADC_COMP_WIDTH 1 /* ADC_COMP */ #define WM8400_ADC_COMPMODE 0x0002 /* ADC_COMPMODE */ #define WM8400_ADC_COMPMODE_MASK 0x0002 /* ADC_COMPMODE */ #define WM8400_ADC_COMPMODE_SHIFT 1 /* ADC_COMPMODE */ #define WM8400_ADC_COMPMODE_WIDTH 1 /* ADC_COMPMODE */ #define WM8400_LOOPBACK 0x0001 /* LOOPBACK */ #define WM8400_LOOPBACK_MASK 0x0001 /* LOOPBACK */ #define WM8400_LOOPBACK_SHIFT 0 /* LOOPBACK */ #define WM8400_LOOPBACK_WIDTH 1 /* LOOPBACK */ /* * R7 (0x07) - Clocking (1) */ #define WM8400_TOCLK_RATE 0x8000 /* TOCLK_RATE */ #define WM8400_TOCLK_RATE_MASK 0x8000 /* TOCLK_RATE */ #define WM8400_TOCLK_RATE_SHIFT 15 /* TOCLK_RATE */ #define WM8400_TOCLK_RATE_WIDTH 1 /* TOCLK_RATE */ #define WM8400_TOCLK_ENA 0x4000 /* TOCLK_ENA */ #define WM8400_TOCLK_ENA_MASK 0x4000 /* TOCLK_ENA */ #define WM8400_TOCLK_ENA_SHIFT 14 /* TOCLK_ENA */ #define WM8400_TOCLK_ENA_WIDTH 1 /* TOCLK_ENA */ #define WM8400_OPCLKDIV_MASK 0x1E00 /* OPCLKDIV - [12:9] */ #define WM8400_OPCLKDIV_SHIFT 9 /* OPCLKDIV - [12:9] */ #define WM8400_OPCLKDIV_WIDTH 4 /* OPCLKDIV - [12:9] */ #define WM8400_DCLKDIV_MASK 0x01C0 /* DCLKDIV - [8:6] */ #define WM8400_DCLKDIV_SHIFT 6 /* DCLKDIV - [8:6] */ #define WM8400_DCLKDIV_WIDTH 3 /* DCLKDIV - [8:6] */ #define WM8400_BCLK_DIV_MASK 0x001E /* BCLK_DIV - [4:1] */ #define WM8400_BCLK_DIV_SHIFT 1 /* BCLK_DIV - [4:1] */ #define WM8400_BCLK_DIV_WIDTH 4 /* BCLK_DIV - [4:1] */ /* * R8 (0x08) - Clocking (2) */ #define WM8400_MCLK_SRC 0x8000 /* MCLK_SRC */ #define WM8400_MCLK_SRC_MASK 0x8000 /* MCLK_SRC */ #define WM8400_MCLK_SRC_SHIFT 15 /* MCLK_SRC */ #define WM8400_MCLK_SRC_WIDTH 1 /* MCLK_SRC */ #define WM8400_SYSCLK_SRC 0x4000 /* SYSCLK_SRC */ #define WM8400_SYSCLK_SRC_MASK 0x4000 /* SYSCLK_SRC */ #define WM8400_SYSCLK_SRC_SHIFT 14 /* SYSCLK_SRC */ #define WM8400_SYSCLK_SRC_WIDTH 1 /* SYSCLK_SRC */ #define WM8400_CLK_FORCE 0x2000 /* CLK_FORCE */ #define WM8400_CLK_FORCE_MASK 0x2000 /* CLK_FORCE */ #define WM8400_CLK_FORCE_SHIFT 13 /* CLK_FORCE */ #define WM8400_CLK_FORCE_WIDTH 1 /* CLK_FORCE */ #define WM8400_MCLK_DIV_MASK 0x1800 /* MCLK_DIV - [12:11] */ #define WM8400_MCLK_DIV_SHIFT 11 /* MCLK_DIV - [12:11] */ #define WM8400_MCLK_DIV_WIDTH 2 /* MCLK_DIV - [12:11] */ #define WM8400_MCLK_INV 0x0400 /* MCLK_INV */ #define WM8400_MCLK_INV_MASK 0x0400 /* MCLK_INV */ #define WM8400_MCLK_INV_SHIFT 10 /* MCLK_INV */ #define WM8400_MCLK_INV_WIDTH 1 /* MCLK_INV */ #define WM8400_ADC_CLKDIV_MASK 0x00E0 /* ADC_CLKDIV - [7:5] */ #define WM8400_ADC_CLKDIV_SHIFT 5 /* ADC_CLKDIV - [7:5] */ #define WM8400_ADC_CLKDIV_WIDTH 3 /* ADC_CLKDIV - [7:5] */ #define WM8400_DAC_CLKDIV_MASK 0x001C /* DAC_CLKDIV - [4:2] */ #define WM8400_DAC_CLKDIV_SHIFT 2 /* DAC_CLKDIV - [4:2] */ #define WM8400_DAC_CLKDIV_WIDTH 3 /* DAC_CLKDIV - [4:2] */ /* * R9 (0x09) - Audio Interface (3) */ #define WM8400_AIF_MSTR1 0x8000 /* AIF_MSTR1 */ #define WM8400_AIF_MSTR1_MASK 0x8000 /* AIF_MSTR1 */ #define WM8400_AIF_MSTR1_SHIFT 15 /* AIF_MSTR1 */ #define WM8400_AIF_MSTR1_WIDTH 1 /* AIF_MSTR1 */ #define WM8400_AIF_MSTR2 0x4000 /* AIF_MSTR2 */ #define WM8400_AIF_MSTR2_MASK 0x4000 /* AIF_MSTR2 */ #define WM8400_AIF_MSTR2_SHIFT 14 /* AIF_MSTR2 */ #define WM8400_AIF_MSTR2_WIDTH 1 /* AIF_MSTR2 */ #define WM8400_AIF_SEL 0x2000 /* AIF_SEL */ #define WM8400_AIF_SEL_MASK 0x2000 /* AIF_SEL */ #define WM8400_AIF_SEL_SHIFT 13 /* AIF_SEL */ #define WM8400_AIF_SEL_WIDTH 1 /* AIF_SEL */ #define WM8400_ADCLRC_DIR 0x0800 /* ADCLRC_DIR */ #define WM8400_ADCLRC_DIR_MASK 0x0800 /* ADCLRC_DIR */ #define WM8400_ADCLRC_DIR_SHIFT 11 /* ADCLRC_DIR */ #define WM8400_ADCLRC_DIR_WIDTH 1 /* ADCLRC_DIR */ #define WM8400_ADCLRC_RATE_MASK 0x07FF /* ADCLRC_RATE - [10:0] */ #define WM8400_ADCLRC_RATE_SHIFT 0 /* ADCLRC_RATE - [10:0] */ #define WM8400_ADCLRC_RATE_WIDTH 11 /* ADCLRC_RATE - [10:0] */ /* * R10 (0x0A) - Audio Interface (4) */ #define WM8400_ALRCGPIO1 0x8000 /* ALRCGPIO1 */ #define WM8400_ALRCGPIO1_MASK 0x8000 /* ALRCGPIO1 */ #define WM8400_ALRCGPIO1_SHIFT 15 /* ALRCGPIO1 */ #define WM8400_ALRCGPIO1_WIDTH 1 /* ALRCGPIO1 */ #define WM8400_ALRCBGPIO6 0x4000 /* ALRCBGPIO6 */ #define WM8400_ALRCBGPIO6_MASK 0x4000 /* ALRCBGPIO6 */ #define WM8400_ALRCBGPIO6_SHIFT 14 /* ALRCBGPIO6 */ #define WM8400_ALRCBGPIO6_WIDTH 1 /* ALRCBGPIO6 */ #define WM8400_AIF_TRIS 0x2000 /* AIF_TRIS */ #define WM8400_AIF_TRIS_MASK 0x2000 /* AIF_TRIS */ #define WM8400_AIF_TRIS_SHIFT 13 /* AIF_TRIS */ #define WM8400_AIF_TRIS_WIDTH 1 /* AIF_TRIS */ #define WM8400_DACLRC_DIR 0x0800 /* DACLRC_DIR */ #define WM8400_DACLRC_DIR_MASK 0x0800 /* DACLRC_DIR */ #define WM8400_DACLRC_DIR_SHIFT 11 /* DACLRC_DIR */ #define WM8400_DACLRC_DIR_WIDTH 1 /* DACLRC_DIR */ #define WM8400_DACLRC_RATE_MASK 0x07FF /* DACLRC_RATE - [10:0] */ #define WM8400_DACLRC_RATE_SHIFT 0 /* DACLRC_RATE - [10:0] */ #define WM8400_DACLRC_RATE_WIDTH 11 /* DACLRC_RATE - [10:0] */ /* * R11 (0x0B) - DAC CTRL */ #define WM8400_DAC_SDMCLK_RATE 0x2000 /* DAC_SDMCLK_RATE */ #define WM8400_DAC_SDMCLK_RATE_MASK 0x2000 /* DAC_SDMCLK_RATE */ #define WM8400_DAC_SDMCLK_RATE_SHIFT 13 /* DAC_SDMCLK_RATE */ #define WM8400_DAC_SDMCLK_RATE_WIDTH 1 /* DAC_SDMCLK_RATE */ #define WM8400_AIF_LRCLKRATE 0x0400 /* AIF_LRCLKRATE */ #define WM8400_AIF_LRCLKRATE_MASK 0x0400 /* AIF_LRCLKRATE */ #define WM8400_AIF_LRCLKRATE_SHIFT 10 /* AIF_LRCLKRATE */ #define WM8400_AIF_LRCLKRATE_WIDTH 1 /* AIF_LRCLKRATE */ #define WM8400_DAC_MONO 0x0200 /* DAC_MONO */ #define WM8400_DAC_MONO_MASK 0x0200 /* DAC_MONO */ #define WM8400_DAC_MONO_SHIFT 9 /* DAC_MONO */ #define WM8400_DAC_MONO_WIDTH 1 /* DAC_MONO */ #define WM8400_DAC_SB_FILT 0x0100 /* DAC_SB_FILT */ #define WM8400_DAC_SB_FILT_MASK 0x0100 /* DAC_SB_FILT */ #define WM8400_DAC_SB_FILT_SHIFT 8 /* DAC_SB_FILT */ #define WM8400_DAC_SB_FILT_WIDTH 1 /* DAC_SB_FILT */ #define WM8400_DAC_MUTERATE 0x0080 /* DAC_MUTERATE */ #define WM8400_DAC_MUTERATE_MASK 0x0080 /* DAC_MUTERATE */ #define WM8400_DAC_MUTERATE_SHIFT 7 /* DAC_MUTERATE */ #define WM8400_DAC_MUTERATE_WIDTH 1 /* DAC_MUTERATE */ #define WM8400_DAC_MUTEMODE 0x0040 /* DAC_MUTEMODE */ #define WM8400_DAC_MUTEMODE_MASK 0x0040 /* DAC_MUTEMODE */ #define WM8400_DAC_MUTEMODE_SHIFT 6 /* DAC_MUTEMODE */ #define WM8400_DAC_MUTEMODE_WIDTH 1 /* DAC_MUTEMODE */ #define WM8400_DEEMP_MASK 0x0030 /* DEEMP - [5:4] */ #define WM8400_DEEMP_SHIFT 4 /* DEEMP - [5:4] */ #define WM8400_DEEMP_WIDTH 2 /* DEEMP - [5:4] */ #define WM8400_DAC_MUTE 0x0004 /* DAC_MUTE */ #define WM8400_DAC_MUTE_MASK 0x0004 /* DAC_MUTE */ #define WM8400_DAC_MUTE_SHIFT 2 /* DAC_MUTE */ #define WM8400_DAC_MUTE_WIDTH 1 /* DAC_MUTE */ #define WM8400_DACL_DATINV 0x0002 /* DACL_DATINV */ #define WM8400_DACL_DATINV_MASK 0x0002 /* DACL_DATINV */ #define WM8400_DACL_DATINV_SHIFT 1 /* DACL_DATINV */ #define WM8400_DACL_DATINV_WIDTH 1 /* DACL_DATINV */ #define WM8400_DACR_DATINV 0x0001 /* DACR_DATINV */ #define WM8400_DACR_DATINV_MASK 0x0001 /* DACR_DATINV */ #define WM8400_DACR_DATINV_SHIFT 0 /* DACR_DATINV */ #define WM8400_DACR_DATINV_WIDTH 1 /* DACR_DATINV */ /* * R12 (0x0C) - Left DAC Digital Volume */ #define WM8400_DAC_VU 0x0100 /* DAC_VU */ #define WM8400_DAC_VU_MASK 0x0100 /* DAC_VU */ #define WM8400_DAC_VU_SHIFT 8 /* DAC_VU */ #define WM8400_DAC_VU_WIDTH 1 /* DAC_VU */ #define WM8400_DACL_VOL_MASK 0x00FF /* DACL_VOL - [7:0] */ #define WM8400_DACL_VOL_SHIFT 0 /* DACL_VOL - [7:0] */ #define WM8400_DACL_VOL_WIDTH 8 /* DACL_VOL - [7:0] */ /* * R13 (0x0D) - Right DAC Digital Volume */ #define WM8400_DAC_VU 0x0100 /* DAC_VU */ #define WM8400_DAC_VU_MASK 0x0100 /* DAC_VU */ #define WM8400_DAC_VU_SHIFT 8 /* DAC_VU */ #define WM8400_DAC_VU_WIDTH 1 /* DAC_VU */ #define WM8400_DACR_VOL_MASK 0x00FF /* DACR_VOL - [7:0] */ #define WM8400_DACR_VOL_SHIFT 0 /* DACR_VOL - [7:0] */ #define WM8400_DACR_VOL_WIDTH 8 /* DACR_VOL - [7:0] */ /* * R14 (0x0E) - Digital Side Tone */ #define WM8400_ADCL_DAC_SVOL_MASK 0x1E00 /* ADCL_DAC_SVOL - [12:9] */ #define WM8400_ADCL_DAC_SVOL_SHIFT 9 /* ADCL_DAC_SVOL - [12:9] */ #define WM8400_ADCL_DAC_SVOL_WIDTH 4 /* ADCL_DAC_SVOL - [12:9] */ #define WM8400_ADCR_DAC_SVOL_MASK 0x01E0 /* ADCR_DAC_SVOL - [8:5] */ #define WM8400_ADCR_DAC_SVOL_SHIFT 5 /* ADCR_DAC_SVOL - [8:5] */ #define WM8400_ADCR_DAC_SVOL_WIDTH 4 /* ADCR_DAC_SVOL - [8:5] */ #define WM8400_ADC_TO_DACL_MASK 0x000C /* ADC_TO_DACL - [3:2] */ #define WM8400_ADC_TO_DACL_SHIFT 2 /* ADC_TO_DACL - [3:2] */ #define WM8400_ADC_TO_DACL_WIDTH 2 /* ADC_TO_DACL - [3:2] */ #define WM8400_ADC_TO_DACR_MASK 0x0003 /* ADC_TO_DACR - [1:0] */ #define WM8400_ADC_TO_DACR_SHIFT 0 /* ADC_TO_DACR - [1:0] */ #define WM8400_ADC_TO_DACR_WIDTH 2 /* ADC_TO_DACR - [1:0] */ /* * R15 (0x0F) - ADC CTRL */ #define WM8400_ADC_HPF_ENA 0x0100 /* ADC_HPF_ENA */ #define WM8400_ADC_HPF_ENA_MASK 0x0100 /* ADC_HPF_ENA */ #define WM8400_ADC_HPF_ENA_SHIFT 8 /* ADC_HPF_ENA */ #define WM8400_ADC_HPF_ENA_WIDTH 1 /* ADC_HPF_ENA */ #define WM8400_ADC_HPF_CUT_MASK 0x0060 /* ADC_HPF_CUT - [6:5] */ #define WM8400_ADC_HPF_CUT_SHIFT 5 /* ADC_HPF_CUT - [6:5] */ #define WM8400_ADC_HPF_CUT_WIDTH 2 /* ADC_HPF_CUT - [6:5] */ #define WM8400_ADCL_DATINV 0x0002 /* ADCL_DATINV */ #define WM8400_ADCL_DATINV_MASK 0x0002 /* ADCL_DATINV */ #define WM8400_ADCL_DATINV_SHIFT 1 /* ADCL_DATINV */ #define WM8400_ADCL_DATINV_WIDTH 1 /* ADCL_DATINV */ #define WM8400_ADCR_DATINV 0x0001 /* ADCR_DATINV */ #define WM8400_ADCR_DATINV_MASK 0x0001 /* ADCR_DATINV */ #define WM8400_ADCR_DATINV_SHIFT 0 /* ADCR_DATINV */ #define WM8400_ADCR_DATINV_WIDTH 1 /* ADCR_DATINV */ /* * R16 (0x10) - Left ADC Digital Volume */ #define WM8400_ADC_VU 0x0100 /* ADC_VU */ #define WM8400_ADC_VU_MASK 0x0100 /* ADC_VU */ #define WM8400_ADC_VU_SHIFT 8 /* ADC_VU */ #define WM8400_ADC_VU_WIDTH 1 /* ADC_VU */ #define WM8400_ADCL_VOL_MASK 0x00FF /* ADCL_VOL - [7:0] */ #define WM8400_ADCL_VOL_SHIFT 0 /* ADCL_VOL - [7:0] */ #define WM8400_ADCL_VOL_WIDTH 8 /* ADCL_VOL - [7:0] */ /* * R17 (0x11) - Right ADC Digital Volume */ #define WM8400_ADC_VU 0x0100 /* ADC_VU */ #define WM8400_ADC_VU_MASK 0x0100 /* ADC_VU */ #define WM8400_ADC_VU_SHIFT 8 /* ADC_VU */ #define WM8400_ADC_VU_WIDTH 1 /* ADC_VU */ #define WM8400_ADCR_VOL_MASK 0x00FF /* ADCR_VOL - [7:0] */ #define WM8400_ADCR_VOL_SHIFT 0 /* ADCR_VOL - [7:0] */ #define WM8400_ADCR_VOL_WIDTH 8 /* ADCR_VOL - [7:0] */ /* * R24 (0x18) - Left Line Input 1&2 Volume */ #define WM8400_IPVU 0x0100 /* IPVU */ #define WM8400_IPVU_MASK 0x0100 /* IPVU */ #define WM8400_IPVU_SHIFT 8 /* IPVU */ #define WM8400_IPVU_WIDTH 1 /* IPVU */ #define WM8400_LI12MUTE 0x0080 /* LI12MUTE */ #define WM8400_LI12MUTE_MASK 0x0080 /* LI12MUTE */ #define WM8400_LI12MUTE_SHIFT 7 /* LI12MUTE */ #define WM8400_LI12MUTE_WIDTH 1 /* LI12MUTE */ #define WM8400_LI12ZC 0x0040 /* LI12ZC */ #define WM8400_LI12ZC_MASK 0x0040 /* LI12ZC */ #define WM8400_LI12ZC_SHIFT 6 /* LI12ZC */ #define WM8400_LI12ZC_WIDTH 1 /* LI12ZC */ #define WM8400_LIN12VOL_MASK 0x001F /* LIN12VOL - [4:0] */ #define WM8400_LIN12VOL_SHIFT 0 /* LIN12VOL - [4:0] */ #define WM8400_LIN12VOL_WIDTH 5 /* LIN12VOL - [4:0] */ /* * R25 (0x19) - Left Line Input 3&4 Volume */ #define WM8400_IPVU 0x0100 /* IPVU */ #define WM8400_IPVU_MASK 0x0100 /* IPVU */ #define WM8400_IPVU_SHIFT 8 /* IPVU */ #define WM8400_IPVU_WIDTH 1 /* IPVU */ #define WM8400_LI34MUTE 0x0080 /* LI34MUTE */ #define WM8400_LI34MUTE_MASK 0x0080 /* LI34MUTE */ #define WM8400_LI34MUTE_SHIFT 7 /* LI34MUTE */ #define WM8400_LI34MUTE_WIDTH 1 /* LI34MUTE */ #define WM8400_LI34ZC 0x0040 /* LI34ZC */ #define WM8400_LI34ZC_MASK 0x0040 /* LI34ZC */ #define WM8400_LI34ZC_SHIFT 6 /* LI34ZC */ #define WM8400_LI34ZC_WIDTH 1 /* LI34ZC */ #define WM8400_LIN34VOL_MASK 0x001F /* LIN34VOL - [4:0] */ #define WM8400_LIN34VOL_SHIFT 0 /* LIN34VOL - [4:0] */ #define WM8400_LIN34VOL_WIDTH 5 /* LIN34VOL - [4:0] */ /* * R26 (0x1A) - Right Line Input 1&2 Volume */ #define WM8400_IPVU 0x0100 /* IPVU */ #define WM8400_IPVU_MASK 0x0100 /* IPVU */ #define WM8400_IPVU_SHIFT 8 /* IPVU */ #define WM8400_IPVU_WIDTH 1 /* IPVU */ #define WM8400_RI12MUTE 0x0080 /* RI12MUTE */ #define WM8400_RI12MUTE_MASK 0x0080 /* RI12MUTE */ #define WM8400_RI12MUTE_SHIFT 7 /* RI12MUTE */ #define WM8400_RI12MUTE_WIDTH 1 /* RI12MUTE */ #define WM8400_RI12ZC 0x0040 /* RI12ZC */ #define WM8400_RI12ZC_MASK 0x0040 /* RI12ZC */ #define WM8400_RI12ZC_SHIFT 6 /* RI12ZC */ #define WM8400_RI12ZC_WIDTH 1 /* RI12ZC */ #define WM8400_RIN12VOL_MASK 0x001F /* RIN12VOL - [4:0] */ #define WM8400_RIN12VOL_SHIFT 0 /* RIN12VOL - [4:0] */ #define WM8400_RIN12VOL_WIDTH 5 /* RIN12VOL - [4:0] */ /* * R27 (0x1B) - Right Line Input 3&4 Volume */ #define WM8400_IPVU 0x0100 /* IPVU */ #define WM8400_IPVU_MASK 0x0100 /* IPVU */ #define WM8400_IPVU_SHIFT 8 /* IPVU */ #define WM8400_IPVU_WIDTH 1 /* IPVU */ #define WM8400_RI34MUTE 0x0080 /* RI34MUTE */ #define WM8400_RI34MUTE_MASK 0x0080 /* RI34MUTE */ #define WM8400_RI34MUTE_SHIFT 7 /* RI34MUTE */ #define WM8400_RI34MUTE_WIDTH 1 /* RI34MUTE */ #define WM8400_RI34ZC 0x0040 /* RI34ZC */ #define WM8400_RI34ZC_MASK 0x0040 /* RI34ZC */ #define WM8400_RI34ZC_SHIFT 6 /* RI34ZC */ #define WM8400_RI34ZC_WIDTH 1 /* RI34ZC */ #define WM8400_RIN34VOL_MASK 0x001F /* RIN34VOL - [4:0] */ #define WM8400_RIN34VOL_SHIFT 0 /* RIN34VOL - [4:0] */ #define WM8400_RIN34VOL_WIDTH 5 /* RIN34VOL - [4:0] */ /* * R28 (0x1C) - Left Output Volume */ #define WM8400_OPVU 0x0100 /* OPVU */ #define WM8400_OPVU_MASK 0x0100 /* OPVU */ #define WM8400_OPVU_SHIFT 8 /* OPVU */ #define WM8400_OPVU_WIDTH 1 /* OPVU */ #define WM8400_LOZC 0x0080 /* LOZC */ #define WM8400_LOZC_MASK 0x0080 /* LOZC */ #define WM8400_LOZC_SHIFT 7 /* LOZC */ #define WM8400_LOZC_WIDTH 1 /* LOZC */ #define WM8400_LOUTVOL_MASK 0x007F /* LOUTVOL - [6:0] */ #define WM8400_LOUTVOL_SHIFT 0 /* LOUTVOL - [6:0] */ #define WM8400_LOUTVOL_WIDTH 7 /* LOUTVOL - [6:0] */ /* * R29 (0x1D) - Right Output Volume */ #define WM8400_OPVU 0x0100 /* OPVU */ #define WM8400_OPVU_MASK 0x0100 /* OPVU */ #define WM8400_OPVU_SHIFT 8 /* OPVU */ #define WM8400_OPVU_WIDTH 1 /* OPVU */ #define WM8400_ROZC 0x0080 /* ROZC */ #define WM8400_ROZC_MASK 0x0080 /* ROZC */ #define WM8400_ROZC_SHIFT 7 /* ROZC */ #define WM8400_ROZC_WIDTH 1 /* ROZC */ #define WM8400_ROUTVOL_MASK 0x007F /* ROUTVOL - [6:0] */ #define WM8400_ROUTVOL_SHIFT 0 /* ROUTVOL - [6:0] */ #define WM8400_ROUTVOL_WIDTH 7 /* ROUTVOL - [6:0] */ /* * R30 (0x1E) - Line Outputs Volume */ #define WM8400_LONMUTE 0x0040 /* LONMUTE */ #define WM8400_LONMUTE_MASK 0x0040 /* LONMUTE */ #define WM8400_LONMUTE_SHIFT 6 /* LONMUTE */ #define WM8400_LONMUTE_WIDTH 1 /* LONMUTE */ #define WM8400_LOPMUTE 0x0020 /* LOPMUTE */ #define WM8400_LOPMUTE_MASK 0x0020 /* LOPMUTE */ #define WM8400_LOPMUTE_SHIFT 5 /* LOPMUTE */ #define WM8400_LOPMUTE_WIDTH 1 /* LOPMUTE */ #define WM8400_LOATTN 0x0010 /* LOATTN */ #define WM8400_LOATTN_MASK 0x0010 /* LOATTN */ #define WM8400_LOATTN_SHIFT 4 /* LOATTN */ #define WM8400_LOATTN_WIDTH 1 /* LOATTN */ #define WM8400_RONMUTE 0x0004 /* RONMUTE */ #define WM8400_RONMUTE_MASK 0x0004 /* RONMUTE */ #define WM8400_RONMUTE_SHIFT 2 /* RONMUTE */ #define WM8400_RONMUTE_WIDTH 1 /* RONMUTE */ #define WM8400_ROPMUTE 0x0002 /* ROPMUTE */ #define WM8400_ROPMUTE_MASK 0x0002 /* ROPMUTE */ #define WM8400_ROPMUTE_SHIFT 1 /* ROPMUTE */ #define WM8400_ROPMUTE_WIDTH 1 /* ROPMUTE */ #define WM8400_ROATTN 0x0001 /* ROATTN */ #define WM8400_ROATTN_MASK 0x0001 /* ROATTN */ #define WM8400_ROATTN_SHIFT 0 /* ROATTN */ #define WM8400_ROATTN_WIDTH 1 /* ROATTN */ /* * R31 (0x1F) - Out3/4 Volume */ #define WM8400_OUT3MUTE 0x0020 /* OUT3MUTE */ #define WM8400_OUT3MUTE_MASK 0x0020 /* OUT3MUTE */ #define WM8400_OUT3MUTE_SHIFT 5 /* OUT3MUTE */ #define WM8400_OUT3MUTE_WIDTH 1 /* OUT3MUTE */ #define WM8400_OUT3ATTN 0x0010 /* OUT3ATTN */ #define WM8400_OUT3ATTN_MASK 0x0010 /* OUT3ATTN */ #define WM8400_OUT3ATTN_SHIFT 4 /* OUT3ATTN */ #define WM8400_OUT3ATTN_WIDTH 1 /* OUT3ATTN */ #define WM8400_OUT4MUTE 0x0002 /* OUT4MUTE */ #define WM8400_OUT4MUTE_MASK 0x0002 /* OUT4MUTE */ #define WM8400_OUT4MUTE_SHIFT 1 /* OUT4MUTE */ #define WM8400_OUT4MUTE_WIDTH 1 /* OUT4MUTE */ #define WM8400_OUT4ATTN 0x0001 /* OUT4ATTN */ #define WM8400_OUT4ATTN_MASK 0x0001 /* OUT4ATTN */ #define WM8400_OUT4ATTN_SHIFT 0 /* OUT4ATTN */ #define WM8400_OUT4ATTN_WIDTH 1 /* OUT4ATTN */ /* * R32 (0x20) - Left OPGA Volume */ #define WM8400_OPVU 0x0100 /* OPVU */ #define WM8400_OPVU_MASK 0x0100 /* OPVU */ #define WM8400_OPVU_SHIFT 8 /* OPVU */ #define WM8400_OPVU_WIDTH 1 /* OPVU */ #define WM8400_LOPGAZC 0x0080 /* LOPGAZC */ #define WM8400_LOPGAZC_MASK 0x0080 /* LOPGAZC */ #define WM8400_LOPGAZC_SHIFT 7 /* LOPGAZC */ #define WM8400_LOPGAZC_WIDTH 1 /* LOPGAZC */ #define WM8400_LOPGAVOL_MASK 0x007F /* LOPGAVOL - [6:0] */ #define WM8400_LOPGAVOL_SHIFT 0 /* LOPGAVOL - [6:0] */ #define WM8400_LOPGAVOL_WIDTH 7 /* LOPGAVOL - [6:0] */ /* * R33 (0x21) - Right OPGA Volume */ #define WM8400_OPVU 0x0100 /* OPVU */ #define WM8400_OPVU_MASK 0x0100 /* OPVU */ #define WM8400_OPVU_SHIFT 8 /* OPVU */ #define WM8400_OPVU_WIDTH 1 /* OPVU */ #define WM8400_ROPGAZC 0x0080 /* ROPGAZC */ #define WM8400_ROPGAZC_MASK 0x0080 /* ROPGAZC */ #define WM8400_ROPGAZC_SHIFT 7 /* ROPGAZC */ #define WM8400_ROPGAZC_WIDTH 1 /* ROPGAZC */ #define WM8400_ROPGAVOL_MASK 0x007F /* ROPGAVOL - [6:0] */ #define WM8400_ROPGAVOL_SHIFT 0 /* ROPGAVOL - [6:0] */ #define WM8400_ROPGAVOL_WIDTH 7 /* ROPGAVOL - [6:0] */ /* * R34 (0x22) - Speaker Volume */ #define WM8400_SPKATTN_MASK 0x0003 /* SPKATTN - [1:0] */ #define WM8400_SPKATTN_SHIFT 0 /* SPKATTN - [1:0] */ #define WM8400_SPKATTN_WIDTH 2 /* SPKATTN - [1:0] */ /* * R35 (0x23) - ClassD1 */ #define WM8400_CDMODE 0x0100 /* CDMODE */ #define WM8400_CDMODE_MASK 0x0100 /* CDMODE */ #define WM8400_CDMODE_SHIFT 8 /* CDMODE */ #define WM8400_CDMODE_WIDTH 1 /* CDMODE */ #define WM8400_CLASSD_CLK_SEL 0x0080 /* CLASSD_CLK_SEL */ #define WM8400_CLASSD_CLK_SEL_MASK 0x0080 /* CLASSD_CLK_SEL */ #define WM8400_CLASSD_CLK_SEL_SHIFT 7 /* CLASSD_CLK_SEL */ #define WM8400_CLASSD_CLK_SEL_WIDTH 1 /* CLASSD_CLK_SEL */ #define WM8400_CD_SRCTRL 0x0040 /* CD_SRCTRL */ #define WM8400_CD_SRCTRL_MASK 0x0040 /* CD_SRCTRL */ #define WM8400_CD_SRCTRL_SHIFT 6 /* CD_SRCTRL */ #define WM8400_CD_SRCTRL_WIDTH 1 /* CD_SRCTRL */ #define WM8400_SPKNOPOP 0x0020 /* SPKNOPOP */ #define WM8400_SPKNOPOP_MASK 0x0020 /* SPKNOPOP */ #define WM8400_SPKNOPOP_SHIFT 5 /* SPKNOPOP */ #define WM8400_SPKNOPOP_WIDTH 1 /* SPKNOPOP */ #define WM8400_DBLERATE 0x0010 /* DBLERATE */ #define WM8400_DBLERATE_MASK 0x0010 /* DBLERATE */ #define WM8400_DBLERATE_SHIFT 4 /* DBLERATE */ #define WM8400_DBLERATE_WIDTH 1 /* DBLERATE */ #define WM8400_LOOPTEST 0x0008 /* LOOPTEST */ #define WM8400_LOOPTEST_MASK 0x0008 /* LOOPTEST */ #define WM8400_LOOPTEST_SHIFT 3 /* LOOPTEST */ #define WM8400_LOOPTEST_WIDTH 1 /* LOOPTEST */ #define WM8400_HALFABBIAS 0x0004 /* HALFABBIAS */ #define WM8400_HALFABBIAS_MASK 0x0004 /* HALFABBIAS */ #define WM8400_HALFABBIAS_SHIFT 2 /* HALFABBIAS */ #define WM8400_HALFABBIAS_WIDTH 1 /* HALFABBIAS */ #define WM8400_TRIDEL_MASK 0x0003 /* TRIDEL - [1:0] */ #define WM8400_TRIDEL_SHIFT 0 /* TRIDEL - [1:0] */ #define WM8400_TRIDEL_WIDTH 2 /* TRIDEL - [1:0] */ /* * R37 (0x25) - ClassD3 */ #define WM8400_DCGAIN_MASK 0x0038 /* DCGAIN - [5:3] */ #define WM8400_DCGAIN_SHIFT 3 /* DCGAIN - [5:3] */ #define WM8400_DCGAIN_WIDTH 3 /* DCGAIN - [5:3] */ #define WM8400_ACGAIN_MASK 0x0007 /* ACGAIN - [2:0] */ #define WM8400_ACGAIN_SHIFT 0 /* ACGAIN - [2:0] */ #define WM8400_ACGAIN_WIDTH 3 /* ACGAIN - [2:0] */ /* * R39 (0x27) - Input Mixer1 */ #define WM8400_AINLMODE_MASK 0x000C /* AINLMODE - [3:2] */ #define WM8400_AINLMODE_SHIFT 2 /* AINLMODE - [3:2] */ #define WM8400_AINLMODE_WIDTH 2 /* AINLMODE - [3:2] */ #define WM8400_AINRMODE_MASK 0x0003 /* AINRMODE - [1:0] */ #define WM8400_AINRMODE_SHIFT 0 /* AINRMODE - [1:0] */ #define WM8400_AINRMODE_WIDTH 2 /* AINRMODE - [1:0] */ /* * R40 (0x28) - Input Mixer2 */ #define WM8400_LMP4 0x0080 /* LMP4 */ #define WM8400_LMP4_MASK 0x0080 /* LMP4 */ #define WM8400_LMP4_SHIFT 7 /* LMP4 */ #define WM8400_LMP4_WIDTH 1 /* LMP4 */ #define WM8400_LMN3 0x0040 /* LMN3 */ #define WM8400_LMN3_MASK 0x0040 /* LMN3 */ #define WM8400_LMN3_SHIFT 6 /* LMN3 */ #define WM8400_LMN3_WIDTH 1 /* LMN3 */ #define WM8400_LMP2 0x0020 /* LMP2 */ #define WM8400_LMP2_MASK 0x0020 /* LMP2 */ #define WM8400_LMP2_SHIFT 5 /* LMP2 */ #define WM8400_LMP2_WIDTH 1 /* LMP2 */ #define WM8400_LMN1 0x0010 /* LMN1 */ #define WM8400_LMN1_MASK 0x0010 /* LMN1 */ #define WM8400_LMN1_SHIFT 4 /* LMN1 */ #define WM8400_LMN1_WIDTH 1 /* LMN1 */ #define WM8400_RMP4 0x0008 /* RMP4 */ #define WM8400_RMP4_MASK 0x0008 /* RMP4 */ #define WM8400_RMP4_SHIFT 3 /* RMP4 */ #define WM8400_RMP4_WIDTH 1 /* RMP4 */ #define WM8400_RMN3 0x0004 /* RMN3 */ #define WM8400_RMN3_MASK 0x0004 /* RMN3 */ #define WM8400_RMN3_SHIFT 2 /* RMN3 */ #define WM8400_RMN3_WIDTH 1 /* RMN3 */ #define WM8400_RMP2 0x0002 /* RMP2 */ #define WM8400_RMP2_MASK 0x0002 /* RMP2 */ #define WM8400_RMP2_SHIFT 1 /* RMP2 */ #define WM8400_RMP2_WIDTH 1 /* RMP2 */ #define WM8400_RMN1 0x0001 /* RMN1 */ #define WM8400_RMN1_MASK 0x0001 /* RMN1 */ #define WM8400_RMN1_SHIFT 0 /* RMN1 */ #define WM8400_RMN1_WIDTH 1 /* RMN1 */ /* * R41 (0x29) - Input Mixer3 */ #define WM8400_L34MNB 0x0100 /* L34MNB */ #define WM8400_L34MNB_MASK 0x0100 /* L34MNB */ #define WM8400_L34MNB_SHIFT 8 /* L34MNB */ #define WM8400_L34MNB_WIDTH 1 /* L34MNB */ #define WM8400_L34MNBST 0x0080 /* L34MNBST */ #define WM8400_L34MNBST_MASK 0x0080 /* L34MNBST */ #define WM8400_L34MNBST_SHIFT 7 /* L34MNBST */ #define WM8400_L34MNBST_WIDTH 1 /* L34MNBST */ #define WM8400_L12MNB 0x0020 /* L12MNB */ #define WM8400_L12MNB_MASK 0x0020 /* L12MNB */ #define WM8400_L12MNB_SHIFT 5 /* L12MNB */ #define WM8400_L12MNB_WIDTH 1 /* L12MNB */ #define WM8400_L12MNBST 0x0010 /* L12MNBST */ #define WM8400_L12MNBST_MASK 0x0010 /* L12MNBST */ #define WM8400_L12MNBST_SHIFT 4 /* L12MNBST */ #define WM8400_L12MNBST_WIDTH 1 /* L12MNBST */ #define WM8400_LDBVOL_MASK 0x0007 /* LDBVOL - [2:0] */ #define WM8400_LDBVOL_SHIFT 0 /* LDBVOL - [2:0] */ #define WM8400_LDBVOL_WIDTH 3 /* LDBVOL - [2:0] */ /* * R42 (0x2A) - Input Mixer4 */ #define WM8400_R34MNB 0x0100 /* R34MNB */ #define WM8400_R34MNB_MASK 0x0100 /* R34MNB */ #define WM8400_R34MNB_SHIFT 8 /* R34MNB */ #define WM8400_R34MNB_WIDTH 1 /* R34MNB */ #define WM8400_R34MNBST 0x0080 /* R34MNBST */ #define WM8400_R34MNBST_MASK 0x0080 /* R34MNBST */ #define WM8400_R34MNBST_SHIFT 7 /* R34MNBST */ #define WM8400_R34MNBST_WIDTH 1 /* R34MNBST */ #define WM8400_R12MNB 0x0020 /* R12MNB */ #define WM8400_R12MNB_MASK 0x0020 /* R12MNB */ #define WM8400_R12MNB_SHIFT 5 /* R12MNB */ #define WM8400_R12MNB_WIDTH 1 /* R12MNB */ #define WM8400_R12MNBST 0x0010 /* R12MNBST */ #define WM8400_R12MNBST_MASK 0x0010 /* R12MNBST */ #define WM8400_R12MNBST_SHIFT 4 /* R12MNBST */ #define WM8400_R12MNBST_WIDTH 1 /* R12MNBST */ #define WM8400_RDBVOL_MASK 0x0007 /* RDBVOL - [2:0] */ #define WM8400_RDBVOL_SHIFT 0 /* RDBVOL - [2:0] */ #define WM8400_RDBVOL_WIDTH 3 /* RDBVOL - [2:0] */ /* * R43 (0x2B) - Input Mixer5 */ #define WM8400_LI2BVOL_MASK 0x01C0 /* LI2BVOL - [8:6] */ #define WM8400_LI2BVOL_SHIFT 6 /* LI2BVOL - [8:6] */ #define WM8400_LI2BVOL_WIDTH 3 /* LI2BVOL - [8:6] */ #define WM8400_LR4BVOL_MASK 0x0038 /* LR4BVOL - [5:3] */ #define WM8400_LR4BVOL_SHIFT 3 /* LR4BVOL - [5:3] */ #define WM8400_LR4BVOL_WIDTH 3 /* LR4BVOL - [5:3] */ #define WM8400_LL4BVOL_MASK 0x0007 /* LL4BVOL - [2:0] */ #define WM8400_LL4BVOL_SHIFT 0 /* LL4BVOL - [2:0] */ #define WM8400_LL4BVOL_WIDTH 3 /* LL4BVOL - [2:0] */ /* * R44 (0x2C) - Input Mixer6 */ #define WM8400_RI2BVOL_MASK 0x01C0 /* RI2BVOL - [8:6] */ #define WM8400_RI2BVOL_SHIFT 6 /* RI2BVOL - [8:6] */ #define WM8400_RI2BVOL_WIDTH 3 /* RI2BVOL - [8:6] */ #define WM8400_RL4BVOL_MASK 0x0038 /* RL4BVOL - [5:3] */ #define WM8400_RL4BVOL_SHIFT 3 /* RL4BVOL - [5:3] */ #define WM8400_RL4BVOL_WIDTH 3 /* RL4BVOL - [5:3] */ #define WM8400_RR4BVOL_MASK 0x0007 /* RR4BVOL - [2:0] */ #define WM8400_RR4BVOL_SHIFT 0 /* RR4BVOL - [2:0] */ #define WM8400_RR4BVOL_WIDTH 3 /* RR4BVOL - [2:0] */ /* * R45 (0x2D) - Output Mixer1 */ #define WM8400_LRBLO 0x0080 /* LRBLO */ #define WM8400_LRBLO_MASK 0x0080 /* LRBLO */ #define WM8400_LRBLO_SHIFT 7 /* LRBLO */ #define WM8400_LRBLO_WIDTH 1 /* LRBLO */ #define WM8400_LLBLO 0x0040 /* LLBLO */ #define WM8400_LLBLO_MASK 0x0040 /* LLBLO */ #define WM8400_LLBLO_SHIFT 6 /* LLBLO */ #define WM8400_LLBLO_WIDTH 1 /* LLBLO */ #define WM8400_LRI3LO 0x0020 /* LRI3LO */ #define WM8400_LRI3LO_MASK 0x0020 /* LRI3LO */ #define WM8400_LRI3LO_SHIFT 5 /* LRI3LO */ #define WM8400_LRI3LO_WIDTH 1 /* LRI3LO */ #define WM8400_LLI3LO 0x0010 /* LLI3LO */ #define WM8400_LLI3LO_MASK 0x0010 /* LLI3LO */ #define WM8400_LLI3LO_SHIFT 4 /* LLI3LO */ #define WM8400_LLI3LO_WIDTH 1 /* LLI3LO */ #define WM8400_LR12LO 0x0008 /* LR12LO */ #define WM8400_LR12LO_MASK 0x0008 /* LR12LO */ #define WM8400_LR12LO_SHIFT 3 /* LR12LO */ #define WM8400_LR12LO_WIDTH 1 /* LR12LO */ #define WM8400_LL12LO 0x0004 /* LL12LO */ #define WM8400_LL12LO_MASK 0x0004 /* LL12LO */ #define WM8400_LL12LO_SHIFT 2 /* LL12LO */ #define WM8400_LL12LO_WIDTH 1 /* LL12LO */ #define WM8400_LDLO 0x0001 /* LDLO */ #define WM8400_LDLO_MASK 0x0001 /* LDLO */ #define WM8400_LDLO_SHIFT 0 /* LDLO */ #define WM8400_LDLO_WIDTH 1 /* LDLO */ /* * R46 (0x2E) - Output Mixer2 */ #define WM8400_RLBRO 0x0080 /* RLBRO */ #define WM8400_RLBRO_MASK 0x0080 /* RLBRO */ #define WM8400_RLBRO_SHIFT 7 /* RLBRO */ #define WM8400_RLBRO_WIDTH 1 /* RLBRO */ #define WM8400_RRBRO 0x0040 /* RRBRO */ #define WM8400_RRBRO_MASK 0x0040 /* RRBRO */ #define WM8400_RRBRO_SHIFT 6 /* RRBRO */ #define WM8400_RRBRO_WIDTH 1 /* RRBRO */ #define WM8400_RLI3RO 0x0020 /* RLI3RO */ #define WM8400_RLI3RO_MASK 0x0020 /* RLI3RO */ #define WM8400_RLI3RO_SHIFT 5 /* RLI3RO */ #define WM8400_RLI3RO_WIDTH 1 /* RLI3RO */ #define WM8400_RRI3RO 0x0010 /* RRI3RO */ #define WM8400_RRI3RO_MASK 0x0010 /* RRI3RO */ #define WM8400_RRI3RO_SHIFT 4 /* RRI3RO */ #define WM8400_RRI3RO_WIDTH 1 /* RRI3RO */ #define WM8400_RL12RO 0x0008 /* RL12RO */ #define WM8400_RL12RO_MASK 0x0008 /* RL12RO */ #define WM8400_RL12RO_SHIFT 3 /* RL12RO */ #define WM8400_RL12RO_WIDTH 1 /* RL12RO */ #define WM8400_RR12RO 0x0004 /* RR12RO */ #define WM8400_RR12RO_MASK 0x0004 /* RR12RO */ #define WM8400_RR12RO_SHIFT 2 /* RR12RO */ #define WM8400_RR12RO_WIDTH 1 /* RR12RO */ #define WM8400_RDRO 0x0001 /* RDRO */ #define WM8400_RDRO_MASK 0x0001 /* RDRO */ #define WM8400_RDRO_SHIFT 0 /* RDRO */ #define WM8400_RDRO_WIDTH 1 /* RDRO */ /* * R47 (0x2F) - Output Mixer3 */ #define WM8400_LLI3LOVOL_MASK 0x01C0 /* LLI3LOVOL - [8:6] */ #define WM8400_LLI3LOVOL_SHIFT 6 /* LLI3LOVOL - [8:6] */ #define WM8400_LLI3LOVOL_WIDTH 3 /* LLI3LOVOL - [8:6] */ #define WM8400_LR12LOVOL_MASK 0x0038 /* LR12LOVOL - [5:3] */ #define WM8400_LR12LOVOL_SHIFT 3 /* LR12LOVOL - [5:3] */ #define WM8400_LR12LOVOL_WIDTH 3 /* LR12LOVOL - [5:3] */ #define WM8400_LL12LOVOL_MASK 0x0007 /* LL12LOVOL - [2:0] */ #define WM8400_LL12LOVOL_SHIFT 0 /* LL12LOVOL - [2:0] */ #define WM8400_LL12LOVOL_WIDTH 3 /* LL12LOVOL - [2:0] */ /* * R48 (0x30) - Output Mixer4 */ #define WM8400_RRI3ROVOL_MASK 0x01C0 /* RRI3ROVOL - [8:6] */ #define WM8400_RRI3ROVOL_SHIFT 6 /* RRI3ROVOL - [8:6] */ #define WM8400_RRI3ROVOL_WIDTH 3 /* RRI3ROVOL - [8:6] */ #define WM8400_RL12ROVOL_MASK 0x0038 /* RL12ROVOL - [5:3] */ #define WM8400_RL12ROVOL_SHIFT 3 /* RL12ROVOL - [5:3] */ #define WM8400_RL12ROVOL_WIDTH 3 /* RL12ROVOL - [5:3] */ #define WM8400_RR12ROVOL_MASK 0x0007 /* RR12ROVOL - [2:0] */ #define WM8400_RR12ROVOL_SHIFT 0 /* RR12ROVOL - [2:0] */ #define WM8400_RR12ROVOL_WIDTH 3 /* RR12ROVOL - [2:0] */ /* * R49 (0x31) - Output Mixer5 */ #define WM8400_LRI3LOVOL_MASK 0x01C0 /* LRI3LOVOL - [8:6] */ #define WM8400_LRI3LOVOL_SHIFT 6 /* LRI3LOVOL - [8:6] */ #define WM8400_LRI3LOVOL_WIDTH 3 /* LRI3LOVOL - [8:6] */ #define WM8400_LRBLOVOL_MASK 0x0038 /* LRBLOVOL - [5:3] */ #define WM8400_LRBLOVOL_SHIFT 3 /* LRBLOVOL - [5:3] */ #define WM8400_LRBLOVOL_WIDTH 3 /* LRBLOVOL - [5:3] */ #define WM8400_LLBLOVOL_MASK 0x0007 /* LLBLOVOL - [2:0] */ #define WM8400_LLBLOVOL_SHIFT 0 /* LLBLOVOL - [2:0] */ #define WM8400_LLBLOVOL_WIDTH 3 /* LLBLOVOL - [2:0] */ /* * R50 (0x32) - Output Mixer6 */ #define WM8400_RLI3ROVOL_MASK 0x01C0 /* RLI3ROVOL - [8:6] */ #define WM8400_RLI3ROVOL_SHIFT 6 /* RLI3ROVOL - [8:6] */ #define WM8400_RLI3ROVOL_WIDTH 3 /* RLI3ROVOL - [8:6] */ #define WM8400_RLBROVOL_MASK 0x0038 /* RLBROVOL - [5:3] */ #define WM8400_RLBROVOL_SHIFT 3 /* RLBROVOL - [5:3] */ #define WM8400_RLBROVOL_WIDTH 3 /* RLBROVOL - [5:3] */ #define WM8400_RRBROVOL_MASK 0x0007 /* RRBROVOL - [2:0] */ #define WM8400_RRBROVOL_SHIFT 0 /* RRBROVOL - [2:0] */ #define WM8400_RRBROVOL_WIDTH 3 /* RRBROVOL - [2:0] */ /* * R51 (0x33) - Out3/4 Mixer */ #define WM8400_VSEL_MASK 0x0180 /* VSEL - [8:7] */ #define WM8400_VSEL_SHIFT 7 /* VSEL - [8:7] */ #define WM8400_VSEL_WIDTH 2 /* VSEL - [8:7] */ #define WM8400_LI4O3 0x0020 /* LI4O3 */ #define WM8400_LI4O3_MASK 0x0020 /* LI4O3 */ #define WM8400_LI4O3_SHIFT 5 /* LI4O3 */ #define WM8400_LI4O3_WIDTH 1 /* LI4O3 */ #define WM8400_LPGAO3 0x0010 /* LPGAO3 */ #define WM8400_LPGAO3_MASK 0x0010 /* LPGAO3 */ #define WM8400_LPGAO3_SHIFT 4 /* LPGAO3 */ #define WM8400_LPGAO3_WIDTH 1 /* LPGAO3 */ #define WM8400_RI4O4 0x0002 /* RI4O4 */ #define WM8400_RI4O4_MASK 0x0002 /* RI4O4 */ #define WM8400_RI4O4_SHIFT 1 /* RI4O4 */ #define WM8400_RI4O4_WIDTH 1 /* RI4O4 */ #define WM8400_RPGAO4 0x0001 /* RPGAO4 */ #define WM8400_RPGAO4_MASK 0x0001 /* RPGAO4 */ #define WM8400_RPGAO4_SHIFT 0 /* RPGAO4 */ #define WM8400_RPGAO4_WIDTH 1 /* RPGAO4 */ /* * R52 (0x34) - Line Mixer1 */ #define WM8400_LLOPGALON 0x0040 /* LLOPGALON */ #define WM8400_LLOPGALON_MASK 0x0040 /* LLOPGALON */ #define WM8400_LLOPGALON_SHIFT 6 /* LLOPGALON */ #define WM8400_LLOPGALON_WIDTH 1 /* LLOPGALON */ #define WM8400_LROPGALON 0x0020 /* LROPGALON */ #define WM8400_LROPGALON_MASK 0x0020 /* LROPGALON */ #define WM8400_LROPGALON_SHIFT 5 /* LROPGALON */ #define WM8400_LROPGALON_WIDTH 1 /* LROPGALON */ #define WM8400_LOPLON 0x0010 /* LOPLON */ #define WM8400_LOPLON_MASK 0x0010 /* LOPLON */ #define WM8400_LOPLON_SHIFT 4 /* LOPLON */ #define WM8400_LOPLON_WIDTH 1 /* LOPLON */ #define WM8400_LR12LOP 0x0004 /* LR12LOP */ #define WM8400_LR12LOP_MASK 0x0004 /* LR12LOP */ #define WM8400_LR12LOP_SHIFT 2 /* LR12LOP */ #define WM8400_LR12LOP_WIDTH 1 /* LR12LOP */ #define WM8400_LL12LOP 0x0002 /* LL12LOP */ #define WM8400_LL12LOP_MASK 0x0002 /* LL12LOP */ #define WM8400_LL12LOP_SHIFT 1 /* LL12LOP */ #define WM8400_LL12LOP_WIDTH 1 /* LL12LOP */ #define WM8400_LLOPGALOP 0x0001 /* LLOPGALOP */ #define WM8400_LLOPGALOP_MASK 0x0001 /* LLOPGALOP */ #define WM8400_LLOPGALOP_SHIFT 0 /* LLOPGALOP */ #define WM8400_LLOPGALOP_WIDTH 1 /* LLOPGALOP */ /* * R53 (0x35) - Line Mixer2 */ #define WM8400_RROPGARON 0x0040 /* RROPGARON */ #define WM8400_RROPGARON_MASK 0x0040 /* RROPGARON */ #define WM8400_RROPGARON_SHIFT 6 /* RROPGARON */ #define WM8400_RROPGARON_WIDTH 1 /* RROPGARON */ #define WM8400_RLOPGARON 0x0020 /* RLOPGARON */ #define WM8400_RLOPGARON_MASK 0x0020 /* RLOPGARON */ #define WM8400_RLOPGARON_SHIFT 5 /* RLOPGARON */ #define WM8400_RLOPGARON_WIDTH 1 /* RLOPGARON */ #define WM8400_ROPRON 0x0010 /* ROPRON */ #define WM8400_ROPRON_MASK 0x0010 /* ROPRON */ #define WM8400_ROPRON_SHIFT 4 /* ROPRON */ #define WM8400_ROPRON_WIDTH 1 /* ROPRON */ #define WM8400_RL12ROP 0x0004 /* RL12ROP */ #define WM8400_RL12ROP_MASK 0x0004 /* RL12ROP */ #define WM8400_RL12ROP_SHIFT 2 /* RL12ROP */ #define WM8400_RL12ROP_WIDTH 1 /* RL12ROP */ #define WM8400_RR12ROP 0x0002 /* RR12ROP */ #define WM8400_RR12ROP_MASK 0x0002 /* RR12ROP */ #define WM8400_RR12ROP_SHIFT 1 /* RR12ROP */ #define WM8400_RR12ROP_WIDTH 1 /* RR12ROP */ #define WM8400_RROPGAROP 0x0001 /* RROPGAROP */ #define WM8400_RROPGAROP_MASK 0x0001 /* RROPGAROP */ #define WM8400_RROPGAROP_SHIFT 0 /* RROPGAROP */ #define WM8400_RROPGAROP_WIDTH 1 /* RROPGAROP */ /* * R54 (0x36) - Speaker Mixer */ #define WM8400_LB2SPK 0x0080 /* LB2SPK */ #define WM8400_LB2SPK_MASK 0x0080 /* LB2SPK */ #define WM8400_LB2SPK_SHIFT 7 /* LB2SPK */ #define WM8400_LB2SPK_WIDTH 1 /* LB2SPK */ #define WM8400_RB2SPK 0x0040 /* RB2SPK */ #define WM8400_RB2SPK_MASK 0x0040 /* RB2SPK */ #define WM8400_RB2SPK_SHIFT 6 /* RB2SPK */ #define WM8400_RB2SPK_WIDTH 1 /* RB2SPK */ #define WM8400_LI2SPK 0x0020 /* LI2SPK */ #define WM8400_LI2SPK_MASK 0x0020 /* LI2SPK */ #define WM8400_LI2SPK_SHIFT 5 /* LI2SPK */ #define WM8400_LI2SPK_WIDTH 1 /* LI2SPK */ #define WM8400_RI2SPK 0x0010 /* RI2SPK */ #define WM8400_RI2SPK_MASK 0x0010 /* RI2SPK */ #define WM8400_RI2SPK_SHIFT 4 /* RI2SPK */ #define WM8400_RI2SPK_WIDTH 1 /* RI2SPK */ #define WM8400_LOPGASPK 0x0008 /* LOPGASPK */ #define WM8400_LOPGASPK_MASK 0x0008 /* LOPGASPK */ #define WM8400_LOPGASPK_SHIFT 3 /* LOPGASPK */ #define WM8400_LOPGASPK_WIDTH 1 /* LOPGASPK */ #define WM8400_ROPGASPK 0x0004 /* ROPGASPK */ #define WM8400_ROPGASPK_MASK 0x0004 /* ROPGASPK */ #define WM8400_ROPGASPK_SHIFT 2 /* ROPGASPK */ #define WM8400_ROPGASPK_WIDTH 1 /* ROPGASPK */ #define WM8400_LDSPK 0x0002 /* LDSPK */ #define WM8400_LDSPK_MASK 0x0002 /* LDSPK */ #define WM8400_LDSPK_SHIFT 1 /* LDSPK */ #define WM8400_LDSPK_WIDTH 1 /* LDSPK */ #define WM8400_RDSPK 0x0001 /* RDSPK */ #define WM8400_RDSPK_MASK 0x0001 /* RDSPK */ #define WM8400_RDSPK_SHIFT 0 /* RDSPK */ #define WM8400_RDSPK_WIDTH 1 /* RDSPK */ /* * R55 (0x37) - Additional Control */ #define WM8400_VROI 0x0001 /* VROI */ #define WM8400_VROI_MASK 0x0001 /* VROI */ #define WM8400_VROI_SHIFT 0 /* VROI */ #define WM8400_VROI_WIDTH 1 /* VROI */ /* * R56 (0x38) - AntiPOP1 */ #define WM8400_DIS_LLINE 0x0020 /* DIS_LLINE */ #define WM8400_DIS_LLINE_MASK 0x0020 /* DIS_LLINE */ #define WM8400_DIS_LLINE_SHIFT 5 /* DIS_LLINE */ #define WM8400_DIS_LLINE_WIDTH 1 /* DIS_LLINE */ #define WM8400_DIS_RLINE 0x0010 /* DIS_RLINE */ #define WM8400_DIS_RLINE_MASK 0x0010 /* DIS_RLINE */ #define WM8400_DIS_RLINE_SHIFT 4 /* DIS_RLINE */ #define WM8400_DIS_RLINE_WIDTH 1 /* DIS_RLINE */ #define WM8400_DIS_OUT3 0x0008 /* DIS_OUT3 */ #define WM8400_DIS_OUT3_MASK 0x0008 /* DIS_OUT3 */ #define WM8400_DIS_OUT3_SHIFT 3 /* DIS_OUT3 */ #define WM8400_DIS_OUT3_WIDTH 1 /* DIS_OUT3 */ #define WM8400_DIS_OUT4 0x0004 /* DIS_OUT4 */ #define WM8400_DIS_OUT4_MASK 0x0004 /* DIS_OUT4 */ #define WM8400_DIS_OUT4_SHIFT 2 /* DIS_OUT4 */ #define WM8400_DIS_OUT4_WIDTH 1 /* DIS_OUT4 */ #define WM8400_DIS_LOUT 0x0002 /* DIS_LOUT */ #define WM8400_DIS_LOUT_MASK 0x0002 /* DIS_LOUT */ #define WM8400_DIS_LOUT_SHIFT 1 /* DIS_LOUT */ #define WM8400_DIS_LOUT_WIDTH 1 /* DIS_LOUT */ #define WM8400_DIS_ROUT 0x0001 /* DIS_ROUT */ #define WM8400_DIS_ROUT_MASK 0x0001 /* DIS_ROUT */ #define WM8400_DIS_ROUT_SHIFT 0 /* DIS_ROUT */ #define WM8400_DIS_ROUT_WIDTH 1 /* DIS_ROUT */ /* * R57 (0x39) - AntiPOP2 */ #define WM8400_SOFTST 0x0040 /* SOFTST */ #define WM8400_SOFTST_MASK 0x0040 /* SOFTST */ #define WM8400_SOFTST_SHIFT 6 /* SOFTST */ #define WM8400_SOFTST_WIDTH 1 /* SOFTST */ #define WM8400_BUFIOEN 0x0008 /* BUFIOEN */ #define WM8400_BUFIOEN_MASK 0x0008 /* BUFIOEN */ #define WM8400_BUFIOEN_SHIFT 3 /* BUFIOEN */ #define WM8400_BUFIOEN_WIDTH 1 /* BUFIOEN */ #define WM8400_BUFDCOPEN 0x0004 /* BUFDCOPEN */ #define WM8400_BUFDCOPEN_MASK 0x0004 /* BUFDCOPEN */ #define WM8400_BUFDCOPEN_SHIFT 2 /* BUFDCOPEN */ #define WM8400_BUFDCOPEN_WIDTH 1 /* BUFDCOPEN */ #define WM8400_POBCTRL 0x0002 /* POBCTRL */ #define WM8400_POBCTRL_MASK 0x0002 /* POBCTRL */ #define WM8400_POBCTRL_SHIFT 1 /* POBCTRL */ #define WM8400_POBCTRL_WIDTH 1 /* POBCTRL */ #define WM8400_VMIDTOG 0x0001 /* VMIDTOG */ #define WM8400_VMIDTOG_MASK 0x0001 /* VMIDTOG */ #define WM8400_VMIDTOG_SHIFT 0 /* VMIDTOG */ #define WM8400_VMIDTOG_WIDTH 1 /* VMIDTOG */ /* * R58 (0x3A) - MICBIAS */ #define WM8400_MCDSCTH_MASK 0x00C0 /* MCDSCTH - [7:6] */ #define WM8400_MCDSCTH_SHIFT 6 /* MCDSCTH - [7:6] */ #define WM8400_MCDSCTH_WIDTH 2 /* MCDSCTH - [7:6] */ #define WM8400_MCDTHR_MASK 0x0038 /* MCDTHR - [5:3] */ #define WM8400_MCDTHR_SHIFT 3 /* MCDTHR - [5:3] */ #define WM8400_MCDTHR_WIDTH 3 /* MCDTHR - [5:3] */ #define WM8400_MCD 0x0004 /* MCD */ #define WM8400_MCD_MASK 0x0004 /* MCD */ #define WM8400_MCD_SHIFT 2 /* MCD */ #define WM8400_MCD_WIDTH 1 /* MCD */ #define WM8400_MBSEL 0x0001 /* MBSEL */ #define WM8400_MBSEL_MASK 0x0001 /* MBSEL */ #define WM8400_MBSEL_SHIFT 0 /* MBSEL */ #define WM8400_MBSEL_WIDTH 1 /* MBSEL */ /* * R60 (0x3C) - FLL Control 1 */ #define WM8400_FLL_REF_FREQ 0x1000 /* FLL_REF_FREQ */ #define WM8400_FLL_REF_FREQ_MASK 0x1000 /* FLL_REF_FREQ */ #define WM8400_FLL_REF_FREQ_SHIFT 12 /* FLL_REF_FREQ */ #define WM8400_FLL_REF_FREQ_WIDTH 1 /* FLL_REF_FREQ */ #define WM8400_FLL_CLK_SRC_MASK 0x0C00 /* FLL_CLK_SRC - [11:10] */ #define WM8400_FLL_CLK_SRC_SHIFT 10 /* FLL_CLK_SRC - [11:10] */ #define WM8400_FLL_CLK_SRC_WIDTH 2 /* FLL_CLK_SRC - [11:10] */ #define WM8400_FLL_FRAC 0x0200 /* FLL_FRAC */ #define WM8400_FLL_FRAC_MASK 0x0200 /* FLL_FRAC */ #define WM8400_FLL_FRAC_SHIFT 9 /* FLL_FRAC */ #define WM8400_FLL_FRAC_WIDTH 1 /* FLL_FRAC */ #define WM8400_FLL_OSC_ENA 0x0100 /* FLL_OSC_ENA */ #define WM8400_FLL_OSC_ENA_MASK 0x0100 /* FLL_OSC_ENA */ #define WM8400_FLL_OSC_ENA_SHIFT 8 /* FLL_OSC_ENA */ #define WM8400_FLL_OSC_ENA_WIDTH 1 /* FLL_OSC_ENA */ #define WM8400_FLL_CTRL_RATE_MASK 0x00E0 /* FLL_CTRL_RATE - [7:5] */ #define WM8400_FLL_CTRL_RATE_SHIFT 5 /* FLL_CTRL_RATE - [7:5] */ #define WM8400_FLL_CTRL_RATE_WIDTH 3 /* FLL_CTRL_RATE - [7:5] */ #define WM8400_FLL_FRATIO_MASK 0x001F /* FLL_FRATIO - [4:0] */ #define WM8400_FLL_FRATIO_SHIFT 0 /* FLL_FRATIO - [4:0] */ #define WM8400_FLL_FRATIO_WIDTH 5 /* FLL_FRATIO - [4:0] */ /* * R61 (0x3D) - FLL Control 2 */ #define WM8400_FLL_K_MASK 0xFFFF /* FLL_K - [15:0] */ #define WM8400_FLL_K_SHIFT 0 /* FLL_K - [15:0] */ #define WM8400_FLL_K_WIDTH 16 /* FLL_K - [15:0] */ /* * R62 (0x3E) - FLL Control 3 */ #define WM8400_FLL_N_MASK 0x03FF /* FLL_N - [9:0] */ #define WM8400_FLL_N_SHIFT 0 /* FLL_N - [9:0] */ #define WM8400_FLL_N_WIDTH 10 /* FLL_N - [9:0] */ /* * R63 (0x3F) - FLL Control 4 */ #define WM8400_FLL_TRK_GAIN_MASK 0x0078 /* FLL_TRK_GAIN - [6:3] */ #define WM8400_FLL_TRK_GAIN_SHIFT 3 /* FLL_TRK_GAIN - [6:3] */ #define WM8400_FLL_TRK_GAIN_WIDTH 4 /* FLL_TRK_GAIN - [6:3] */ #define WM8400_FLL_OUTDIV_MASK 0x0007 /* FLL_OUTDIV - [2:0] */ #define WM8400_FLL_OUTDIV_SHIFT 0 /* FLL_OUTDIV - [2:0] */ #define WM8400_FLL_OUTDIV_WIDTH 3 /* FLL_OUTDIV - [2:0] */ struct wm8400; void wm8400_reset_codec_reg_cache(struct wm8400 *wm8400); #endif mfd/da9063/registers.h 0000644 00000103604 14722070374 0010412 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0+ */ /* * Registers definition for DA9063 modules * * Copyright 2012 Dialog Semiconductor Ltd. * * Author: Michal Hajduk, Dialog Semiconductor * Author: Krystian Garbaciak, Dialog Semiconductor */ #ifndef _DA9063_REG_H #define _DA9063_REG_H #define DA9063_I2C_PAGE_SEL_SHIFT 1 #define DA9063_EVENT_REG_NUM 4 /* Page selection I2C or SPI always in the begining of any page. */ /* Page 0 : I2C access 0x000 - 0x0FF SPI access 0x000 - 0x07F */ /* Page 1 : SPI access 0x080 - 0x0FF */ /* Page 2 : I2C access 0x100 - 0x1FF SPI access 0x100 - 0x17F */ /* Page 3 : SPI access 0x180 - 0x1FF */ #define DA9063_REG_PAGE_CON 0x00 /* System Control and Event Registers */ #define DA9063_REG_STATUS_A 0x01 #define DA9063_REG_STATUS_B 0x02 #define DA9063_REG_STATUS_C 0x03 #define DA9063_REG_STATUS_D 0x04 #define DA9063_REG_FAULT_LOG 0x05 #define DA9063_REG_EVENT_A 0x06 #define DA9063_REG_EVENT_B 0x07 #define DA9063_REG_EVENT_C 0x08 #define DA9063_REG_EVENT_D 0x09 #define DA9063_REG_IRQ_MASK_A 0x0A #define DA9063_REG_IRQ_MASK_B 0x0B #define DA9063_REG_IRQ_MASK_C 0x0C #define DA9063_REG_IRQ_MASK_D 0x0D #define DA9063_REG_CONTROL_A 0x0E #define DA9063_REG_CONTROL_B 0x0F #define DA9063_REG_CONTROL_C 0x10 #define DA9063_REG_CONTROL_D 0x11 #define DA9063_REG_CONTROL_E 0x12 #define DA9063_REG_CONTROL_F 0x13 #define DA9063_REG_PD_DIS 0x14 /* GPIO Control Registers */ #define DA9063_REG_GPIO_0_1 0x15 #define DA9063_REG_GPIO_2_3 0x16 #define DA9063_REG_GPIO_4_5 0x17 #define DA9063_REG_GPIO_6_7 0x18 #define DA9063_REG_GPIO_8_9 0x19 #define DA9063_REG_GPIO_10_11 0x1A #define DA9063_REG_GPIO_12_13 0x1B #define DA9063_REG_GPIO_14_15 0x1C #define DA9063_REG_GPIO_MODE0_7 0x1D #define DA9063_REG_GPIO_MODE8_15 0x1E #define DA9063_REG_SWITCH_CONT 0x1F /* Regulator Control Registers */ #define DA9063_REG_BCORE2_CONT 0x20 #define DA9063_REG_BCORE1_CONT 0x21 #define DA9063_REG_BPRO_CONT 0x22 #define DA9063_REG_BMEM_CONT 0x23 #define DA9063_REG_BIO_CONT 0x24 #define DA9063_REG_BPERI_CONT 0x25 #define DA9063_REG_LDO1_CONT 0x26 #define DA9063_REG_LDO2_CONT 0x27 #define DA9063_REG_LDO3_CONT 0x28 #define DA9063_REG_LDO4_CONT 0x29 #define DA9063_REG_LDO5_CONT 0x2A #define DA9063_REG_LDO6_CONT 0x2B #define DA9063_REG_LDO7_CONT 0x2C #define DA9063_REG_LDO8_CONT 0x2D #define DA9063_REG_LDO9_CONT 0x2E #define DA9063_REG_LDO10_CONT 0x2F #define DA9063_REG_LDO11_CONT 0x30 #define DA9063_REG_SUPPLIES 0x31 #define DA9063_REG_DVC_1 0x32 #define DA9063_REG_DVC_2 0x33 /* GP-ADC Control Registers */ #define DA9063_REG_ADC_MAN 0x34 #define DA9063_REG_ADC_CONT 0x35 #define DA9063_REG_VSYS_MON 0x36 #define DA9063_REG_ADC_RES_L 0x37 #define DA9063_REG_ADC_RES_H 0x38 #define DA9063_REG_VSYS_RES 0x39 #define DA9063_REG_ADCIN1_RES 0x3A #define DA9063_REG_ADCIN2_RES 0x3B #define DA9063_REG_ADCIN3_RES 0x3C #define DA9063_REG_MON_A8_RES 0x3D #define DA9063_REG_MON_A9_RES 0x3E #define DA9063_REG_MON_A10_RES 0x3F /* RTC Calendar and Alarm Registers */ #define DA9063_REG_COUNT_S 0x40 #define DA9063_REG_COUNT_MI 0x41 #define DA9063_REG_COUNT_H 0x42 #define DA9063_REG_COUNT_D 0x43 #define DA9063_REG_COUNT_MO 0x44 #define DA9063_REG_COUNT_Y 0x45 #define DA9063_AD_REG_ALARM_MI 0x46 #define DA9063_AD_REG_ALARM_H 0x47 #define DA9063_AD_REG_ALARM_D 0x48 #define DA9063_AD_REG_ALARM_MO 0x49 #define DA9063_AD_REG_ALARM_Y 0x4A #define DA9063_AD_REG_SECOND_A 0x4B #define DA9063_AD_REG_SECOND_B 0x4C #define DA9063_AD_REG_SECOND_C 0x4D #define DA9063_AD_REG_SECOND_D 0x4E #define DA9063_BB_REG_ALARM_S 0x46 #define DA9063_BB_REG_ALARM_MI 0x47 #define DA9063_BB_REG_ALARM_H 0x48 #define DA9063_BB_REG_ALARM_D 0x49 #define DA9063_BB_REG_ALARM_MO 0x4A #define DA9063_BB_REG_ALARM_Y 0x4B #define DA9063_BB_REG_SECOND_A 0x4C #define DA9063_BB_REG_SECOND_B 0x4D #define DA9063_BB_REG_SECOND_C 0x4E #define DA9063_BB_REG_SECOND_D 0x4F /* Sequencer Control Registers */ #define DA9063_REG_SEQ 0x81 #define DA9063_REG_SEQ_TIMER 0x82 #define DA9063_REG_ID_2_1 0x83 #define DA9063_REG_ID_4_3 0x84 #define DA9063_REG_ID_6_5 0x85 #define DA9063_REG_ID_8_7 0x86 #define DA9063_REG_ID_10_9 0x87 #define DA9063_REG_ID_12_11 0x88 #define DA9063_REG_ID_14_13 0x89 #define DA9063_REG_ID_16_15 0x8A #define DA9063_REG_ID_18_17 0x8B #define DA9063_REG_ID_20_19 0x8C #define DA9063_REG_ID_22_21 0x8D #define DA9063_REG_ID_24_23 0x8E #define DA9063_REG_ID_26_25 0x8F #define DA9063_REG_ID_28_27 0x90 #define DA9063_REG_ID_30_29 0x91 #define DA9063_REG_ID_32_31 0x92 #define DA9063_REG_SEQ_A 0x95 #define DA9063_REG_SEQ_B 0x96 #define DA9063_REG_WAIT 0x97 #define DA9063_REG_EN_32K 0x98 #define DA9063_REG_RESET 0x99 /* Regulator Setting Registers */ #define DA9063_REG_BUCK_ILIM_A 0x9A #define DA9063_REG_BUCK_ILIM_B 0x9B #define DA9063_REG_BUCK_ILIM_C 0x9C #define DA9063_REG_BCORE2_CFG 0x9D #define DA9063_REG_BCORE1_CFG 0x9E #define DA9063_REG_BPRO_CFG 0x9F #define DA9063_REG_BIO_CFG 0xA0 #define DA9063_REG_BMEM_CFG 0xA1 #define DA9063_REG_BPERI_CFG 0xA2 #define DA9063_REG_VBCORE2_A 0xA3 #define DA9063_REG_VBCORE1_A 0xA4 #define DA9063_REG_VBPRO_A 0xA5 #define DA9063_REG_VBMEM_A 0xA6 #define DA9063_REG_VBIO_A 0xA7 #define DA9063_REG_VBPERI_A 0xA8 #define DA9063_REG_VLDO1_A 0xA9 #define DA9063_REG_VLDO2_A 0xAA #define DA9063_REG_VLDO3_A 0xAB #define DA9063_REG_VLDO4_A 0xAC #define DA9063_REG_VLDO5_A 0xAD #define DA9063_REG_VLDO6_A 0xAE #define DA9063_REG_VLDO7_A 0xAF #define DA9063_REG_VLDO8_A 0xB0 #define DA9063_REG_VLDO9_A 0xB1 #define DA9063_REG_VLDO10_A 0xB2 #define DA9063_REG_VLDO11_A 0xB3 #define DA9063_REG_VBCORE2_B 0xB4 #define DA9063_REG_VBCORE1_B 0xB5 #define DA9063_REG_VBPRO_B 0xB6 #define DA9063_REG_VBMEM_B 0xB7 #define DA9063_REG_VBIO_B 0xB8 #define DA9063_REG_VBPERI_B 0xB9 #define DA9063_REG_VLDO1_B 0xBA #define DA9063_REG_VLDO2_B 0xBB #define DA9063_REG_VLDO3_B 0xBC #define DA9063_REG_VLDO4_B 0xBD #define DA9063_REG_VLDO5_B 0xBE #define DA9063_REG_VLDO6_B 0xBF #define DA9063_REG_VLDO7_B 0xC0 #define DA9063_REG_VLDO8_B 0xC1 #define DA9063_REG_VLDO9_B 0xC2 #define DA9063_REG_VLDO10_B 0xC3 #define DA9063_REG_VLDO11_B 0xC4 /* Backup Battery Charger Control Register */ #define DA9063_REG_BBAT_CONT 0xC5 /* GPIO PWM (LED) */ #define DA9063_REG_GPO11_LED 0xC6 #define DA9063_REG_GPO14_LED 0xC7 #define DA9063_REG_GPO15_LED 0xC8 /* GP-ADC Threshold Registers */ #define DA9063_REG_ADC_CFG 0xC9 #define DA9063_REG_AUTO1_HIGH 0xCA #define DA9063_REG_AUTO1_LOW 0xCB #define DA9063_REG_AUTO2_HIGH 0xCC #define DA9063_REG_AUTO2_LOW 0xCD #define DA9063_REG_AUTO3_HIGH 0xCE #define DA9063_REG_AUTO3_LOW 0xCF /* DA9063 Configuration registers */ /* OTP */ #define DA9063_REG_OTP_CONT 0x101 #define DA9063_REG_OTP_ADDR 0x102 #define DA9063_REG_OTP_DATA 0x103 /* Customer Trim and Configuration */ #define DA9063_REG_T_OFFSET 0x104 #define DA9063_REG_INTERFACE 0x105 #define DA9063_REG_CONFIG_A 0x106 #define DA9063_REG_CONFIG_B 0x107 #define DA9063_REG_CONFIG_C 0x108 #define DA9063_REG_CONFIG_D 0x109 #define DA9063_REG_CONFIG_E 0x10A #define DA9063_REG_CONFIG_F 0x10B #define DA9063_REG_CONFIG_G 0x10C #define DA9063_REG_CONFIG_H 0x10D #define DA9063_REG_CONFIG_I 0x10E #define DA9063_REG_CONFIG_J 0x10F #define DA9063_REG_CONFIG_K 0x110 #define DA9063_REG_CONFIG_L 0x111 #define DA9063_AD_REG_MON_REG_1 0x112 #define DA9063_AD_REG_MON_REG_2 0x113 #define DA9063_AD_REG_MON_REG_3 0x114 #define DA9063_AD_REG_MON_REG_4 0x115 #define DA9063_AD_REG_MON_REG_5 0x116 #define DA9063_AD_REG_MON_REG_6 0x117 #define DA9063_AD_REG_TRIM_CLDR 0x118 #define DA9063_AD_REG_GP_ID_0 0x119 #define DA9063_AD_REG_GP_ID_1 0x11A #define DA9063_AD_REG_GP_ID_2 0x11B #define DA9063_AD_REG_GP_ID_3 0x11C #define DA9063_AD_REG_GP_ID_4 0x11D #define DA9063_AD_REG_GP_ID_5 0x11E #define DA9063_AD_REG_GP_ID_6 0x11F #define DA9063_AD_REG_GP_ID_7 0x120 #define DA9063_AD_REG_GP_ID_8 0x121 #define DA9063_AD_REG_GP_ID_9 0x122 #define DA9063_AD_REG_GP_ID_10 0x123 #define DA9063_AD_REG_GP_ID_11 0x124 #define DA9063_AD_REG_GP_ID_12 0x125 #define DA9063_AD_REG_GP_ID_13 0x126 #define DA9063_AD_REG_GP_ID_14 0x127 #define DA9063_AD_REG_GP_ID_15 0x128 #define DA9063_AD_REG_GP_ID_16 0x129 #define DA9063_AD_REG_GP_ID_17 0x12A #define DA9063_AD_REG_GP_ID_18 0x12B #define DA9063_AD_REG_GP_ID_19 0x12C #define DA9063_BB_REG_CONFIG_M 0x112 #define DA9063_BB_REG_CONFIG_N 0x113 #define DA9063_BB_REG_MON_REG_1 0x114 #define DA9063_BB_REG_MON_REG_2 0x115 #define DA9063_BB_REG_MON_REG_3 0x116 #define DA9063_BB_REG_MON_REG_4 0x117 #define DA9063_BB_REG_MON_REG_5 0x11E #define DA9063_BB_REG_MON_REG_6 0x11F #define DA9063_BB_REG_TRIM_CLDR 0x120 /* General Purpose Registers */ #define DA9063_BB_REG_GP_ID_0 0x121 #define DA9063_BB_REG_GP_ID_1 0x122 #define DA9063_BB_REG_GP_ID_2 0x123 #define DA9063_BB_REG_GP_ID_3 0x124 #define DA9063_BB_REG_GP_ID_4 0x125 #define DA9063_BB_REG_GP_ID_5 0x126 #define DA9063_BB_REG_GP_ID_6 0x127 #define DA9063_BB_REG_GP_ID_7 0x128 #define DA9063_BB_REG_GP_ID_8 0x129 #define DA9063_BB_REG_GP_ID_9 0x12A #define DA9063_BB_REG_GP_ID_10 0x12B #define DA9063_BB_REG_GP_ID_11 0x12C #define DA9063_BB_REG_GP_ID_12 0x12D #define DA9063_BB_REG_GP_ID_13 0x12E #define DA9063_BB_REG_GP_ID_14 0x12F #define DA9063_BB_REG_GP_ID_15 0x130 #define DA9063_BB_REG_GP_ID_16 0x131 #define DA9063_BB_REG_GP_ID_17 0x132 #define DA9063_BB_REG_GP_ID_18 0x133 #define DA9063_BB_REG_GP_ID_19 0x134 /* Chip ID and variant */ #define DA9063_REG_CHIP_ID 0x181 #define DA9063_REG_CHIP_VARIANT 0x182 /* * PMIC registers bits */ /* DA9063_REG_PAGE_CON (addr=0x00) */ #define DA9063_PEG_PAGE_SHIFT 0 #define DA9063_REG_PAGE_MASK 0x07 #define DA9063_REG_PAGE0 0x00 #define DA9063_REG_PAGE2 0x02 #define DA9063_PAGE_WRITE_MODE 0x00 #define DA9063_REPEAT_WRITE_MODE 0x40 #define DA9063_PAGE_REVERT 0x80 /* DA9063_REG_STATUS_A (addr=0x01) */ #define DA9063_NONKEY 0x01 #define DA9063_WAKE 0x02 #define DA9063_DVC_BUSY 0x04 #define DA9063_COMP_1V2 0x08 /* DA9063_REG_STATUS_B (addr=0x02) */ #define DA9063_GPI0 0x01 #define DA9063_GPI1 0x02 #define DA9063_GPI2 0x04 #define DA9063_GPI3 0x08 #define DA9063_GPI4 0x10 #define DA9063_GPI5 0x20 #define DA9063_GPI6 0x40 #define DA9063_GPI7 0x80 /* DA9063_REG_STATUS_C (addr=0x03) */ #define DA9063_GPI8 0x01 #define DA9063_GPI9 0x02 #define DA9063_GPI10 0x04 #define DA9063_GPI11 0x08 #define DA9063_GPI12 0x10 #define DA9063_GPI13 0x20 #define DA9063_GPI14 0x40 #define DA9063_GPI15 0x80 /* DA9063_REG_STATUS_D (addr=0x04) */ #define DA9063_LDO3_LIM 0x08 #define DA9063_LDO4_LIM 0x10 #define DA9063_LDO7_LIM 0x20 #define DA9063_LDO8_LIM 0x40 #define DA9063_LDO11_LIM 0x80 /* DA9063_REG_FAULT_LOG (addr=0x05) */ #define DA9063_TWD_ERROR 0x01 #define DA9063_POR 0x02 #define DA9063_VDD_FAULT 0x04 #define DA9063_VDD_START 0x08 #define DA9063_TEMP_CRIT 0x10 #define DA9063_KEY_RESET 0x20 #define DA9063_NSHUTDOWN 0x40 #define DA9063_WAIT_SHUT 0x80 /* DA9063_REG_EVENT_A (addr=0x06) */ #define DA9063_E_NONKEY 0x01 #define DA9063_E_ALARM 0x02 #define DA9063_E_TICK 0x04 #define DA9063_E_ADC_RDY 0x08 #define DA9063_E_SEQ_RDY 0x10 #define DA9063_EVENTS_B 0x20 #define DA9063_EVENTS_C 0x40 #define DA9063_EVENTS_D 0x80 /* DA9063_REG_EVENT_B (addr=0x07) */ #define DA9063_E_WAKE 0x01 #define DA9063_E_TEMP 0x02 #define DA9063_E_COMP_1V2 0x04 #define DA9063_E_LDO_LIM 0x08 #define DA9063_E_REG_UVOV 0x10 #define DA9063_E_DVC_RDY 0x20 #define DA9063_E_VDD_MON 0x40 #define DA9063_E_VDD_WARN 0x80 /* DA9063_REG_EVENT_C (addr=0x08) */ #define DA9063_E_GPI0 0x01 #define DA9063_E_GPI1 0x02 #define DA9063_E_GPI2 0x04 #define DA9063_E_GPI3 0x08 #define DA9063_E_GPI4 0x10 #define DA9063_E_GPI5 0x20 #define DA9063_E_GPI6 0x40 #define DA9063_E_GPI7 0x80 /* DA9063_REG_EVENT_D (addr=0x09) */ #define DA9063_E_GPI8 0x01 #define DA9063_E_GPI9 0x02 #define DA9063_E_GPI10 0x04 #define DA9063_E_GPI11 0x08 #define DA9063_E_GPI12 0x10 #define DA9063_E_GPI13 0x20 #define DA9063_E_GPI14 0x40 #define DA9063_E_GPI15 0x80 /* DA9063_REG_IRQ_MASK_A (addr=0x0A) */ #define DA9063_M_ONKEY 0x01 #define DA9063_M_ALARM 0x02 #define DA9063_M_TICK 0x04 #define DA9063_M_ADC_RDY 0x08 #define DA9063_M_SEQ_RDY 0x10 /* DA9063_REG_IRQ_MASK_B (addr=0x0B) */ #define DA9063_M_WAKE 0x01 #define DA9063_M_TEMP 0x02 #define DA9063_M_COMP_1V2 0x04 #define DA9063_M_LDO_LIM 0x08 #define DA9063_M_UVOV 0x10 #define DA9063_M_DVC_RDY 0x20 #define DA9063_M_VDD_MON 0x40 #define DA9063_M_VDD_WARN 0x80 /* DA9063_REG_IRQ_MASK_C (addr=0x0C) */ #define DA9063_M_GPI0 0x01 #define DA9063_M_GPI1 0x02 #define DA9063_M_GPI2 0x04 #define DA9063_M_GPI3 0x08 #define DA9063_M_GPI4 0x10 #define DA9063_M_GPI5 0x20 #define DA9063_M_GPI6 0x40 #define DA9063_M_GPI7 0x80 /* DA9063_REG_IRQ_MASK_D (addr=0x0D) */ #define DA9063_M_GPI8 0x01 #define DA9063_M_GPI9 0x02 #define DA9063_M_GPI10 0x04 #define DA9063_M_GPI11 0x08 #define DA9063_M_GPI12 0x10 #define DA9063_M_GPI13 0x20 #define DA9063_M_GPI14 0x40 #define DA9063_M_GPI15 0x80 /* DA9063_REG_CONTROL_A (addr=0x0E) */ #define DA9063_SYSTEM_EN 0x01 #define DA9063_POWER_EN 0x02 #define DA9063_POWER1_EN 0x04 #define DA9063_STANDBY 0x08 #define DA9063_M_SYSTEM_EN 0x10 #define DA9063_M_POWER_EN 0x20 #define DA9063_M_POWER1_EN 0x40 #define DA9063_CP_EN 0x80 /* DA9063_REG_CONTROL_B (addr=0x0F) */ #define DA9063_CHG_SEL 0x01 #define DA9063_WATCHDOG_PD 0x02 #define DA9063_BB_RESET_BLINKING 0x04 #define DA9063_NRES_MODE 0x08 #define DA9063_NONKEY_LOCK 0x10 #define DA9063_BB_BUCK_SLOWSTART 0x80 /* DA9063_REG_CONTROL_C (addr=0x10) */ #define DA9063_DEBOUNCING_MASK 0x07 #define DA9063_DEBOUNCING_OFF 0x0 #define DA9063_DEBOUNCING_0MS1 0x1 #define DA9063_DEBOUNCING_1MS 0x2 #define DA9063_DEBOUNCING_10MS24 0x3 #define DA9063_DEBOUNCING_51MS2 0x4 #define DA9063_DEBOUNCING_256MS 0x5 #define DA9063_DEBOUNCING_512MS 0x6 #define DA9063_DEBOUNCING_1024MS 0x7 #define DA9063_AUTO_BOOT 0x08 #define DA9063_OTPREAD_EN 0x10 #define DA9063_SLEW_RATE_MASK 0x60 #define DA9063_SLEW_RATE_4US 0x00 #define DA9063_SLEW_RATE_3US 0x20 #define DA9063_SLEW_RATE_1US 0x40 #define DA9063_SLEW_RATE_0US5 0x60 #define DA9063_DEF_SUPPLY 0x80 /* DA9063_REG_CONTROL_D (addr=0x11) */ #define DA9063_TWDSCALE_MASK 0x07 #define DA9063_BLINK_FRQ_MASK 0x38 #define DA9063_BLINK_FRQ_OFF 0x00 #define DA9063_BLINK_FRQ_1S0 0x08 #define DA9063_BLINK_FRQ_2S0 0x10 #define DA9063_BLINK_FRQ_4S0 0x18 #define DA9063_BLINK_FRQ_0S18 0x20 #define DA9063_BLINK_FRQ_2S0_VDD 0x28 #define DA9063_BLINK_FRQ_4S0_VDD 0x30 #define DA9063_BLINK_FRQ_0S18_VDD 0x38 #define DA9063_BLINK_DUR_MASK 0xC0 #define DA9063_BLINK_DUR_10MS 0x00 #define DA9063_BLINK_DUR_20MS 0x40 #define DA9063_BLINK_DUR_40MS 0x80 #define DA9063_BLINK_DUR_20MSDBL 0xC0 /* DA9063_REG_CONTROL_E (addr=0x12) */ #define DA9063_RTC_MODE_PD 0x01 #define DA9063_RTC_MODE_SD 0x02 #define DA9063_RTC_EN 0x04 #define DA9063_ECO_MODE 0x08 #define DA9063_PM_FB1_PIN 0x10 #define DA9063_PM_FB2_PIN 0x20 #define DA9063_PM_FB3_PIN 0x40 #define DA9063_V_LOCK 0x80 /* DA9063_REG_CONTROL_F (addr=0x13) */ #define DA9063_WATCHDOG 0x01 #define DA9063_SHUTDOWN 0x02 #define DA9063_WAKE_UP 0x04 /* DA9063_REG_PD_DIS (addr=0x14) */ #define DA9063_GPI_DIS 0x01 #define DA9063_GPADC_PAUSE 0x02 #define DA9063_PMIF_DIS 0x04 #define DA9063_HS2WIRE_DIS 0x08 #define DA9063_BB_CLDR_PAUSE 0x10 #define DA9063_BBAT_DIS 0x20 #define DA9063_OUT_32K_PAUSE 0x40 #define DA9063_PMCONT_DIS 0x80 /* DA9063_REG_GPIO_0_1 (addr=0x15) */ #define DA9063_GPIO0_PIN_MASK 0x03 #define DA9063_GPIO0_PIN_ADCIN1 0x00 #define DA9063_GPIO0_PIN_GPI 0x01 #define DA9063_GPIO0_PIN_GPO_OD 0x02 #define DA9063_GPIO0_PIN_GPO 0x03 #define DA9063_GPIO0_TYPE 0x04 #define DA9063_GPIO0_TYPE_GPI_ACT_LOW 0x00 #define DA9063_GPIO0_TYPE_GPO_VDD_IO1 0x00 #define DA9063_GPIO0_TYPE_GPI_ACT_HIGH 0x04 #define DA9063_GPIO0_TYPE_GPO_VDD_IO2 0x04 #define DA9063_GPIO0_NO_WAKEUP 0x08 #define DA9063_GPIO1_PIN_MASK 0x30 #define DA9063_GPIO1_PIN_ADCIN2_COMP 0x00 #define DA9063_GPIO1_PIN_GPI 0x10 #define DA9063_GPIO1_PIN_GPO_OD 0x20 #define DA9063_GPIO1_PIN_GPO 0x30 #define DA9063_GPIO1_TYPE 0x40 #define DA9063_GPIO1_TYPE_GPI_ACT_LOW 0x00 #define DA9063_GPIO1_TYPE_GPO_VDD_IO1 0x00 #define DA9063_GPIO1_TYPE_GPI_ACT_HIGH 0x04 #define DA9063_GPIO1_TYPE_GPO_VDD_IO2 0x04 #define DA9063_GPIO1_NO_WAKEUP 0x80 /* DA9063_REG_GPIO_2_3 (addr=0x16) */ #define DA9063_GPIO2_PIN_MASK 0x03 #define DA9063_GPIO2_PIN_ADCIN3 0x00 #define DA9063_GPIO2_PIN_GPI 0x01 #define DA9063_GPIO2_PIN_GPO_PSS 0x02 #define DA9063_GPIO2_PIN_GPO 0x03 #define DA9063_GPIO2_TYPE 0x04 #define DA9063_GPIO2_TYPE_GPI_ACT_LOW 0x00 #define DA9063_GPIO2_TYPE_GPO_VDD_IO1 0x00 #define DA9063_GPIO2_TYPE_GPI_ACT_HIGH 0x04 #define DA9063_GPIO2_TYPE_GPO_VDD_IO2 0x04 #define DA9063_GPIO2_NO_WAKEUP 0x08 #define DA9063_GPIO3_PIN_MASK 0x30 #define DA9063_GPIO3_PIN_CORE_SW_G 0x00 #define DA9063_GPIO3_PIN_GPI 0x10 #define DA9063_GPIO3_PIN_GPO_OD 0x20 #define DA9063_GPIO3_PIN_GPO 0x30 #define DA9063_GPIO3_TYPE 0x40 #define DA9063_GPIO3_TYPE_GPI_ACT_LOW 0x00 #define DA9063_GPIO3_TYPE_GPO_VDD_IO1 0x00 #define DA9063_GPIO3_TYPE_GPI_ACT_HIGH 0x04 #define DA9063_GPIO3_TYPE_GPO_VDD_IO2 0x04 #define DA9063_GPIO3_NO_WAKEUP 0x80 /* DA9063_REG_GPIO_4_5 (addr=0x17) */ #define DA9063_GPIO4_PIN_MASK 0x03 #define DA9063_GPIO4_PIN_CORE_SW_S 0x00 #define DA9063_GPIO4_PIN_GPI 0x01 #define DA9063_GPIO4_PIN_GPO_OD 0x02 #define DA9063_GPIO4_PIN_GPO 0x03 #define DA9063_GPIO4_TYPE 0x04 #define DA9063_GPIO4_TYPE_GPI_ACT_LOW 0x00 #define DA9063_GPIO4_TYPE_GPO_VDD_IO1 0x00 #define DA9063_GPIO4_TYPE_GPI_ACT_HIGH 0x04 #define DA9063_GPIO4_TYPE_GPO_VDD_IO2 0x04 #define DA9063_GPIO4_NO_WAKEUP 0x08 #define DA9063_GPIO5_PIN_MASK 0x30 #define DA9063_GPIO5_PIN_PERI_SW_G 0x00 #define DA9063_GPIO5_PIN_GPI 0x10 #define DA9063_GPIO5_PIN_GPO_OD 0x20 #define DA9063_GPIO5_PIN_GPO 0x30 #define DA9063_GPIO5_TYPE 0x40 #define DA9063_GPIO5_TYPE_GPI_ACT_LOW 0x00 #define DA9063_GPIO5_TYPE_GPO_VDD_IO1 0x00 #define DA9063_GPIO5_TYPE_GPI_ACT_HIGH 0x04 #define DA9063_GPIO5_TYPE_GPO_VDD_IO2 0x04 #define DA9063_GPIO5_NO_WAKEUP 0x80 /* DA9063_REG_GPIO_6_7 (addr=0x18) */ #define DA9063_GPIO6_PIN_MASK 0x03 #define DA9063_GPIO6_PIN_PERI_SW_S 0x00 #define DA9063_GPIO6_PIN_GPI 0x01 #define DA9063_GPIO6_PIN_GPO_OD 0x02 #define DA9063_GPIO6_PIN_GPO 0x03 #define DA9063_GPIO6_TYPE 0x04 #define DA9063_GPIO6_TYPE_GPI_ACT_LOW 0x00 #define DA9063_GPIO6_TYPE_GPO_VDD_IO1 0x00 #define DA9063_GPIO6_TYPE_GPI_ACT_HIGH 0x04 #define DA9063_GPIO6_TYPE_GPO_VDD_IO2 0x04 #define DA9063_GPIO6_NO_WAKEUP 0x08 #define DA9063_GPIO7_PIN_MASK 0x30 #define DA9063_GPIO7_PIN_GPI 0x10 #define DA9063_GPIO7_PIN_GPO_PSS 0x20 #define DA9063_GPIO7_PIN_GPO 0x30 #define DA9063_GPIO7_TYPE 0x40 #define DA9063_GPIO7_TYPE_GPI_ACT_LOW 0x00 #define DA9063_GPIO7_TYPE_GPO_VDD_IO1 0x00 #define DA9063_GPIO7_TYPE_GPI_ACT_HIGH 0x04 #define DA9063_GPIO7_TYPE_GPO_VDD_IO2 0x04 #define DA9063_GPIO7_NO_WAKEUP 0x80 /* DA9063_REG_GPIO_8_9 (addr=0x19) */ #define DA9063_GPIO8_PIN_MASK 0x03 #define DA9063_GPIO8_PIN_GPI_SYS_EN 0x00 #define DA9063_GPIO8_PIN_GPI 0x01 #define DA9063_GPIO8_PIN_GPO_PSS 0x02 #define DA9063_GPIO8_PIN_GPO 0x03 #define DA9063_GPIO8_TYPE 0x04 #define DA9063_GPIO8_TYPE_GPI_ACT_LOW 0x00 #define DA9063_GPIO8_TYPE_GPO_VDD_IO1 0x00 #define DA9063_GPIO8_TYPE_GPI_ACT_HIGH 0x04 #define DA9063_GPIO8_TYPE_GPO_VDD_IO2 0x04 #define DA9063_GPIO8_NO_WAKEUP 0x08 #define DA9063_GPIO9_PIN_MASK 0x30 #define DA9063_GPIO9_PIN_GPI_PWR_EN 0x00 #define DA9063_GPIO9_PIN_GPI 0x10 #define DA9063_GPIO9_PIN_GPO_PSS 0x20 #define DA9063_GPIO9_PIN_GPO 0x30 #define DA9063_GPIO9_TYPE 0x40 #define DA9063_GPIO9_TYPE_GPI_ACT_LOW 0x00 #define DA9063_GPIO9_TYPE_GPO_VDD_IO1 0x00 #define DA9063_GPIO9_TYPE_GPI_ACT_HIGH 0x04 #define DA9063_GPIO9_TYPE_GPO_VDD_IO2 0x04 #define DA9063_GPIO9_NO_WAKEUP 0x80 /* DA9063_REG_GPIO_10_11 (addr=0x1A) */ #define DA9063_GPIO10_PIN_MASK 0x03 #define DA9063_GPIO10_PIN_GPI_PWR1_EN 0x00 #define DA9063_GPIO10_PIN_GPI 0x01 #define DA9063_GPIO10_PIN_GPO_OD 0x02 #define DA9063_GPIO10_PIN_GPO 0x03 #define DA9063_GPIO10_TYPE 0x04 #define DA9063_GPIO10_TYPE_GPI_ACT_LOW 0x00 #define DA9063_GPIO10_TYPE_GPO_VDD_IO1 0x00 #define DA9063_GPIO10_TYPE_GPI_ACT_HIGH 0x04 #define DA9063_GPIO10_TYPE_GPO_VDD_IO2 0x04 #define DA9063_GPIO10_NO_WAKEUP 0x08 #define DA9063_GPIO11_PIN_MASK 0x30 #define DA9063_GPIO11_PIN_GPO_OD 0x00 #define DA9063_GPIO11_PIN_GPI 0x10 #define DA9063_GPIO11_PIN_GPO_PSS 0x20 #define DA9063_GPIO11_PIN_GPO 0x30 #define DA9063_GPIO11_TYPE 0x40 #define DA9063_GPIO11_TYPE_GPI_ACT_LOW 0x00 #define DA9063_GPIO11_TYPE_GPO_VDD_IO1 0x00 #define DA9063_GPIO11_TYPE_GPI_ACT_HIGH 0x04 #define DA9063_GPIO11_TYPE_GPO_VDD_IO2 0x04 #define DA9063_GPIO11_NO_WAKEUP 0x80 /* DA9063_REG_GPIO_12_13 (addr=0x1B) */ #define DA9063_GPIO12_PIN_MASK 0x03 #define DA9063_GPIO12_PIN_NVDDFLT_OUT 0x00 #define DA9063_GPIO12_PIN_GPI 0x01 #define DA9063_GPIO12_PIN_VSYSMON_OUT 0x02 #define DA9063_GPIO12_PIN_GPO 0x03 #define DA9063_GPIO12_TYPE 0x04 #define DA9063_GPIO12_TYPE_GPI_ACT_LOW 0x00 #define DA9063_GPIO12_TYPE_GPO_VDD_IO1 0x00 #define DA9063_GPIO12_TYPE_GPI_ACT_HIGH 0x04 #define DA9063_GPIO12_TYPE_GPO_VDD_IO2 0x04 #define DA9063_GPIO12_NO_WAKEUP 0x08 #define DA9063_GPIO13_PIN_MASK 0x30 #define DA9063_GPIO13_PIN_GPFB1_OUT 0x00 #define DA9063_GPIO13_PIN_GPI 0x10 #define DA9063_GPIO13_PIN_GPFB1_OUTOD 0x20 #define DA9063_GPIO13_PIN_GPO 0x30 #define DA9063_GPIO13_TYPE 0x40 #define DA9063_GPIO13_TYPE_GPFB1_OUT 0x00 #define DA9063_GPIO13_TYPE_GPI 0x00 #define DA9063_GPIO13_TYPE_GPFB1_OUTOD 0x04 #define DA9063_GPIO13_TYPE_GPO 0x04 #define DA9063_GPIO13_NO_WAKEUP 0x80 /* DA9063_REG_GPIO_14_15 (addr=0x1C) */ #define DA9063_GPIO14_PIN_MASK 0x03 #define DA9063_GPIO14_PIN_GPO_OD 0x00 #define DA9063_GPIO14_PIN_GPI 0x01 #define DA9063_GPIO14_PIN_HS2DATA 0x02 #define DA9063_GPIO14_PIN_GPO 0x03 #define DA9063_GPIO14_TYPE 0x04 #define DA9063_GPIO14_TYPE_GPI_ACT_LOW 0x00 #define DA9063_GPIO14_TYPE_GPO_VDD_IO1 0x00 #define DA9063_GPIO14_TYPE_GPI_ACT_HIGH 0x04 #define DA9063_GPIO14_TYPE_GPO_VDD_IO2 0x04 #define DA9063_GPIO14_NO_WAKEUP 0x08 #define DA9063_GPIO15_PIN_MASK 0x30 #define DA9063_GPIO15_PIN_GPO_OD 0x00 #define DA9063_GPIO15_PIN_GPI 0x10 #define DA9063_GPIO15_PIN_GPO 0x30 #define DA9063_GPIO15_TYPE 0x40 #define DA9063_GPIO15_TYPE_GPFB1_OUT 0x00 #define DA9063_GPIO15_TYPE_GPI 0x00 #define DA9063_GPIO15_TYPE_GPFB1_OUTOD 0x04 #define DA9063_GPIO15_TYPE_GPO 0x04 #define DA9063_GPIO15_NO_WAKEUP 0x80 /* DA9063_REG_GPIO_MODE0_7 (addr=0x1D) */ #define DA9063_GPIO0_MODE 0x01 #define DA9063_GPIO1_MODE 0x02 #define DA9063_GPIO2_MODE 0x04 #define DA9063_GPIO3_MODE 0x08 #define DA9063_GPIO4_MODE 0x10 #define DA9063_GPIO5_MODE 0x20 #define DA9063_GPIO6_MODE 0x40 #define DA9063_GPIO7_MODE 0x80 /* DA9063_REG_GPIO_MODE8_15 (addr=0x1E) */ #define DA9063_GPIO8_MODE 0x01 #define DA9063_GPIO9_MODE 0x02 #define DA9063_GPIO10_MODE 0x04 #define DA9063_GPIO11_MODE 0x08 #define DA9063_GPIO11_MODE_LED_ACT_HIGH 0x00 #define DA9063_GPIO11_MODE_LED_ACT_LOW 0x08 #define DA9063_GPIO12_MODE 0x10 #define DA9063_GPIO13_MODE 0x20 #define DA9063_GPIO14_MODE 0x40 #define DA9063_GPIO14_MODE_LED_ACT_HIGH 0x00 #define DA9063_GPIO14_MODE_LED_ACT_LOW 0x40 #define DA9063_GPIO15_MODE 0x80 #define DA9063_GPIO15_MODE_LED_ACT_HIGH 0x00 #define DA9063_GPIO15_MODE_LED_ACT_LOW 0x80 /* DA9063_REG_SWITCH_CONT (addr=0x1F) */ #define DA9063_CORE_SW_GPI_MASK 0x03 #define DA9063_CORE_SW_GPI_OFF 0x00 #define DA9063_CORE_SW_GPI_GPIO1 0x01 #define DA9063_CORE_SW_GPI_GPIO2 0x02 #define DA9063_CORE_SW_GPI_GPIO13 0x03 #define DA9063_PERI_SW_GPI_MASK 0x0C #define DA9063_PERI_SW_GPI_OFF 0x00 #define DA9063_PERI_SW_GPI_GPIO1 0x04 #define DA9063_PERI_SW_GPI_GPIO2 0x08 #define DA9063_PERI_SW_GPI_GPIO13 0x0C #define DA9063_SWITCH_SR_MASK 0x30 #define DA9063_SWITCH_SR_1MV 0x00 #define DA9063_SWITCH_SR_5MV 0x10 #define DA9063_SWITCH_SR_10MV 0x20 #define DA9063_SWITCH_SR_50MV 0x30 #define DA9063_CORE_SW_INTERNAL 0x40 #define DA9063_CP_EN_MODE 0x80 /* DA9063_REGL_Bxxxx_CONT common bits (addr=0x20-0x25) */ #define DA9063_BUCK_EN 0x01 #define DA9063_BUCK_GPI_MASK 0x06 #define DA9063_BUCK_GPI_OFF 0x00 #define DA9063_BUCK_GPI_GPIO1 0x02 #define DA9063_BUCK_GPI_GPIO2 0x04 #define DA9063_BUCK_GPI_GPIO13 0x06 #define DA9063_BUCK_CONF 0x08 #define DA9063_VBUCK_GPI_MASK 0x60 #define DA9063_VBUCK_GPI_OFF 0x00 #define DA9063_VBUCK_GPI_GPIO1 0x20 #define DA9063_VBUCK_GPI_GPIO2 0x40 #define DA9063_VBUCK_GPI_GPIO13 0x60 /* DA9063_REG_BCORE1_CONT specific bits (addr=0x21) */ #define DA9063_CORE_SW_EN 0x10 #define DA9063_CORE_SW_CONF 0x80 /* DA9063_REG_BPERI_CONT specific bits (addr=0x25) */ #define DA9063_PERI_SW_EN 0x10 #define DA9063_PERI_SW_CONF 0x80 /* DA9063_REG_LDOx_CONT common bits (addr=0x26-0x30) */ #define DA9063_LDO_EN 0x01 #define DA9063_LDO_GPI_MASK 0x06 #define DA9063_LDO_GPI_OFF 0x00 #define DA9063_LDO_GPI_GPIO1 0x02 #define DA9063_LDO_GPI_GPIO2 0x04 #define DA9063_LDO_GPI_GPIO13 0x06 #define DA9063_LDO_PD_DIS 0x08 #define DA9063_VLDO_GPI_MASK 0x60 #define DA9063_VLDO_GPI_OFF 0x00 #define DA9063_VLDO_GPI_GPIO1 0x20 #define DA9063_VLDO_GPI_GPIO2 0x40 #define DA9063_VLDO_GPI_GPIO13 0x60 #define DA9063_LDO_CONF 0x80 /* DA9063_REG_LDO5_CONT specific bits (addr=0x2A) */ #define DA9063_VLDO5_SEL 0x10 /* DA9063_REG_LDO6_CONT specific bits (addr=0x2B) */ #define DA9063_VLDO6_SEL 0x10 /* DA9063_REG_LDO7_CONT specific bits (addr=0x2C) */ #define DA9063_VLDO7_SEL 0x10 /* DA9063_REG_LDO8_CONT specific bits (addr=0x2D) */ #define DA9063_VLDO8_SEL 0x10 /* DA9063_REG_LDO9_CONT specific bits (addr=0x2E) */ #define DA9063_VLDO9_SEL 0x10 /* DA9063_REG_LDO10_CONT specific bits (addr=0x2F) */ #define DA9063_VLDO10_SEL 0x10 /* DA9063_REG_LDO11_CONT specific bits (addr=0x30) */ #define DA9063_VLDO11_SEL 0x10 /* DA9063_REG_VIB (addr=0x31) */ #define DA9063_VIB_SET_MASK 0x3F #define DA9063_VIB_SET_OFF 0 #define DA9063_VIB_SET_MAX 0x3F /* DA9063_REG_DVC_1 (addr=0x32) */ #define DA9063_VBCORE1_SEL 0x01 #define DA9063_VBCORE2_SEL 0x02 #define DA9063_VBPRO_SEL 0x04 #define DA9063_VBMEM_SEL 0x08 #define DA9063_VBPERI_SEL 0x10 #define DA9063_VLDO1_SEL 0x20 #define DA9063_VLDO2_SEL 0x40 #define DA9063_VLDO3_SEL 0x80 /* DA9063_REG_DVC_2 (addr=0x33) */ #define DA9063_VBIO_SEL 0x01 #define DA9063_VLDO4_SEL 0x80 /* DA9063_REG_ADC_MAN (addr=0x34) */ #define DA9063_ADC_MUX_MASK 0x0F #define DA9063_ADC_MUX_VSYS 0x00 #define DA9063_ADC_MUX_ADCIN1 0x01 #define DA9063_ADC_MUX_ADCIN2 0x02 #define DA9063_ADC_MUX_ADCIN3 0x03 #define DA9063_ADC_MUX_T_SENSE 0x04 #define DA9063_ADC_MUX_VBBAT 0x05 #define DA9063_ADC_MUX_LDO_G1 0x08 #define DA9063_ADC_MUX_LDO_G2 0x09 #define DA9063_ADC_MUX_LDO_G3 0x0A #define DA9063_ADC_MAN 0x10 #define DA9063_ADC_MODE 0x20 /* DA9063_REG_ADC_CONT (addr=0x35) */ #define DA9063_ADC_AUTO_VSYS_EN 0x01 #define DA9063_ADC_AUTO_AD1_EN 0x02 #define DA9063_ADC_AUTO_AD2_EN 0x04 #define DA9063_ADC_AUTO_AD3_EN 0x08 #define DA9063_ADC_AD1_ISRC_EN 0x10 #define DA9063_ADC_AD2_ISRC_EN 0x20 #define DA9063_ADC_AD3_ISRC_EN 0x40 #define DA9063_COMP1V2_EN 0x80 /* DA9063_REG_VSYS_MON (addr=0x36) */ #define DA9063_VSYS_VAL_MASK 0xFF #define DA9063_VSYS_VAL_BASE 0x00 /* DA9063_REG_ADC_RES_L (addr=0x37) */ #define DA9063_ADC_RES_L_BITS 2 #define DA9063_ADC_RES_L_MASK 0xC0 /* DA9063_REG_ADC_RES_H (addr=0x38) */ #define DA9063_ADC_RES_M_BITS 8 #define DA9063_ADC_RES_M_MASK 0xFF /* DA9063_REG_(xxx_RES/ADC_RES_H) (addr=0x39-0x3F) */ #define DA9063_ADC_VAL_MASK 0xFF /* DA9063_REG_COUNT_S (addr=0x40) */ #define DA9063_RTC_READ 0x80 #define DA9063_COUNT_SEC_MASK 0x3F /* DA9063_REG_COUNT_MI (addr=0x41) */ #define DA9063_COUNT_MIN_MASK 0x3F /* DA9063_REG_COUNT_H (addr=0x42) */ #define DA9063_COUNT_HOUR_MASK 0x1F /* DA9063_REG_COUNT_D (addr=0x43) */ #define DA9063_COUNT_DAY_MASK 0x1F /* DA9063_REG_COUNT_MO (addr=0x44) */ #define DA9063_COUNT_MONTH_MASK 0x0F /* DA9063_REG_COUNT_Y (addr=0x45) */ #define DA9063_COUNT_YEAR_MASK 0x3F #define DA9063_MONITOR 0x40 /* DA9063_REG_ALARM_S (addr=0x46) */ #define DA9063_BB_ALARM_S_MASK 0x3F #define DA9063_ALARM_STATUS_ALARM 0x80 #define DA9063_ALARM_STATUS_TICK 0x40 /* DA9063_REG_ALARM_MI (addr=0x47) */ #define DA9063_ALARM_MIN_MASK 0x3F /* DA9063_REG_ALARM_H (addr=0x48) */ #define DA9063_ALARM_HOUR_MASK 0x1F /* DA9063_REG_ALARM_D (addr=0x49) */ #define DA9063_ALARM_DAY_MASK 0x1F /* DA9063_REG_ALARM_MO (addr=0x4A) */ #define DA9063_TICK_WAKE 0x20 #define DA9063_TICK_TYPE 0x10 #define DA9063_TICK_TYPE_SEC 0x00 #define DA9063_TICK_TYPE_MIN 0x10 #define DA9063_ALARM_MONTH_MASK 0x0F /* DA9063_REG_ALARM_Y (addr=0x4B) */ #define DA9063_TICK_ON 0x80 #define DA9063_ALARM_ON 0x40 #define DA9063_ALARM_YEAR_MASK 0x3F /* DA9063_REG_WAIT (addr=0x97)*/ #define DA9063_REG_WAIT_TIME_MASK 0xF #define DA9063_WAIT_TIME_0_US 0x0 #define DA9063_WAIT_TIME_512_US 0x1 #define DA9063_WAIT_TIME_1_MS 0x2 #define DA9063_WAIT_TIME_2_MS 0x3 #define DA9063_WAIT_TIME_4_1_MS 0x4 #define DA9063_WAIT_TIME_8_2_MS 0x5 #define DA9063_WAIT_TIME_16_4_MS 0x6 #define DA9063_WAIT_TIME_32_8_MS 0x7 #define DA9063_WAIT_TIME_65_5_MS 0x8 #define DA9063_WAIT_TIME_128_MS 0x9 #define DA9063_WAIT_TIME_256_MS 0xA #define DA9063_WAIT_TIME_512_MS 0xB #define DA9063_WAIT_TIME_1_S 0xC #define DA9063_WAIT_TIME_2_1_S 0xD /* DA9063_REG_EN_32K (addr=0x98)*/ #define DA9063_STABILIZ_TIME_MASK 0x7 #define DA9063_CRYSTAL 0x08 #define DA9063_DELAY_MODE 0x10 #define DA9063_OUT_CLOCK 0x20 #define DA9063_RTC_CLOCK 0x40 #define DA9063_OUT_32K_EN 0x80 /* DA9063_REG_CHIP_VARIANT */ #define DA9063_CHIP_VARIANT_SHIFT 4 /* DA9063_REG_BUCK_ILIM_A (addr=0x9A) */ #define DA9063_BIO_ILIM_MASK 0x0F #define DA9063_BMEM_ILIM_MASK 0xF0 /* DA9063_REG_BUCK_ILIM_B (addr=0x9B) */ #define DA9063_BPRO_ILIM_MASK 0x0F #define DA9063_BPERI_ILIM_MASK 0xF0 /* DA9063_REG_BUCK_ILIM_C (addr=0x9C) */ #define DA9063_BCORE1_ILIM_MASK 0x0F #define DA9063_BCORE2_ILIM_MASK 0xF0 /* DA9063_REG_Bxxxx_CFG common bits (addr=0x9D-0xA2) */ #define DA9063_BUCK_FB_MASK 0x07 #define DA9063_BUCK_PD_DIS_MASK 0x20 #define DA9063_BUCK_MODE_MASK 0xC0 #define DA9063_BUCK_MODE_MANUAL 0x00 #define DA9063_BUCK_MODE_SLEEP 0x40 #define DA9063_BUCK_MODE_SYNC 0x80 #define DA9063_BUCK_MODE_AUTO 0xC0 /* DA9063_REG_BPRO_CFG (addr=0x9F) */ #define DA9063_BPRO_VTTR_EN 0x08 #define DA9063_BPRO_VTT_EN 0x10 /* DA9063_REG_VBxxxx_A/B (addr=0xA3-0xA8, 0xB4-0xB9) */ #define DA9063_VBUCK_MASK 0x7F #define DA9063_VBUCK_BIAS 0 #define DA9063_BUCK_SL 0x80 /* DA9063_REG_VLDOx_A/B (addr=0xA9-0x3, 0xBA-0xC4) */ #define DA9063_LDO_SL 0x80 /* DA9063_REG_VLDO1_A/B (addr=0xA9, 0xBA) */ #define DA9063_VLDO1_MASK 0x3F #define DA9063_VLDO1_BIAS 0 /* DA9063_REG_VLDO2_A/B (addr=0xAA, 0xBB) */ #define DA9063_VLDO2_MASK 0x3F #define DA9063_VLDO2_BIAS 0 /* DA9063_REG_VLDO3_A/B (addr=0xAB, 0xBC) */ #define DA9063_VLDO3_MASK 0x7F #define DA9063_VLDO3_BIAS 0 /* DA9063_REG_VLDO4_A/B (addr=0xAC, 0xBD) */ #define DA9063_VLDO4_MASK 0x7F #define DA9063_VLDO4_BIAS 0 /* DA9063_REG_VLDO5_A/B (addr=0xAD, 0xBE) */ #define DA9063_VLDO5_MASK 0x3F #define DA9063_VLDO5_BIAS 2 /* DA9063_REG_VLDO6_A/B (addr=0xAE, 0xBF) */ #define DA9063_VLDO6_MASK 0x3F #define DA9063_VLDO6_BIAS 2 /* DA9063_REG_VLDO7_A/B (addr=0xAF, 0xC0) */ #define DA9063_VLDO7_MASK 0x3F #define DA9063_VLDO7_BIAS 2 /* DA9063_REG_VLDO8_A/B (addr=0xB0, 0xC1) */ #define DA9063_VLDO8_MASK 0x3F #define DA9063_VLDO8_BIAS 2 /* DA9063_REG_VLDO9_A/B (addr=0xB1, 0xC2) */ #define DA9063_VLDO9_MASK 0x3F #define DA9063_VLDO9_BIAS 3 /* DA9063_REG_VLDO10_A/B (addr=0xB2, 0xC3) */ #define DA9063_VLDO10_MASK 0x3F #define DA9063_VLDO10_BIAS 2 /* DA9063_REG_VLDO11_A/B (addr=0xB3, 0xC4) */ #define DA9063_VLDO11_MASK 0x3F #define DA9063_VLDO11_BIAS 2 /* DA9063_REG_GPO11_LED (addr=0xC6) */ /* DA9063_REG_GPO14_LED (addr=0xC7) */ /* DA9063_REG_GPO15_LED (addr=0xC8) */ #define DA9063_GPIO_DIM 0x80 #define DA9063_GPIO_PWM_MASK 0x7F /* DA9063_REG_CONFIG_H (addr=0x10D) */ #define DA9063_PWM_CLK_MASK 0x01 #define DA9063_PWM_CLK_PWM2MHZ 0x00 #define DA9063_PWM_CLK_PWM1MHZ 0x01 #define DA9063_LDO8_MODE_MASK 0x02 #define DA9063_LDO8_MODE_LDO 0 #define DA9063_LDO8_MODE_VIBR 0x02 #define DA9063_MERGE_SENSE_MASK 0x04 #define DA9063_MERGE_SENSE_GP_FB2 0x00 #define DA9063_MERGE_SENSE_GPIO4 0x04 #define DA9063_BCORE_MERGE 0x08 #define DA9063_BPRO_OD 0x10 #define DA9063_BCORE2_OD 0x20 #define DA9063_BCORE1_OD 0x40 #define DA9063_BUCK_MERGE 0x80 /* DA9063_REG_CONFIG_I (addr=0x10E) */ #define DA9063_NONKEY_PIN_MASK 0x03 #define DA9063_NONKEY_PIN_PORT 0x00 #define DA9063_NONKEY_PIN_SWDOWN 0x01 #define DA9063_NONKEY_PIN_AUTODOWN 0x02 #define DA9063_NONKEY_PIN_AUTOFLPRT 0x03 /* DA9063_REG_MON_REG_5 (addr=0x116) */ #define DA9063_MON_A8_IDX_MASK 0x07 #define DA9063_MON_A8_IDX_NONE 0x00 #define DA9063_MON_A8_IDX_BCORE1 0x01 #define DA9063_MON_A8_IDX_BCORE2 0x02 #define DA9063_MON_A8_IDX_BPRO 0x03 #define DA9063_MON_A8_IDX_LDO3 0x04 #define DA9063_MON_A8_IDX_LDO4 0x05 #define DA9063_MON_A8_IDX_LDO11 0x06 #define DA9063_MON_A9_IDX_MASK 0x70 #define DA9063_MON_A9_IDX_NONE 0x00 #define DA9063_MON_A9_IDX_BIO 0x01 #define DA9063_MON_A9_IDX_BMEM 0x02 #define DA9063_MON_A9_IDX_BPERI 0x03 #define DA9063_MON_A9_IDX_LDO1 0x04 #define DA9063_MON_A9_IDX_LDO2 0x05 #define DA9063_MON_A9_IDX_LDO5 0x06 /* DA9063_REG_MON_REG_6 (addr=0x117) */ #define DA9063_MON_A10_IDX_MASK 0x07 #define DA9063_MON_A10_IDX_NONE 0x00 #define DA9063_MON_A10_IDX_LDO6 0x01 #define DA9063_MON_A10_IDX_LDO7 0x02 #define DA9063_MON_A10_IDX_LDO8 0x03 #define DA9063_MON_A10_IDX_LDO9 0x04 #define DA9063_MON_A10_IDX_LDO10 0x05 #endif /* _DA9063_REG_H */ mfd/da9063/core.h 0000644 00000003701 14722070374 0007330 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0+ */ /* * Definitions for DA9063 MFD driver * * Copyright 2012 Dialog Semiconductor Ltd. * * Author: Michal Hajduk, Dialog Semiconductor * Author: Krystian Garbaciak, Dialog Semiconductor */ #ifndef __MFD_DA9063_CORE_H__ #define __MFD_DA9063_CORE_H__ #include <linux/interrupt.h> #include <linux/mfd/da9063/registers.h> /* DA9063 modules */ #define DA9063_DRVNAME_CORE "da9063-core" #define DA9063_DRVNAME_REGULATORS "da9063-regulators" #define DA9063_DRVNAME_LEDS "da9063-leds" #define DA9063_DRVNAME_WATCHDOG "da9063-watchdog" #define DA9063_DRVNAME_HWMON "da9063-hwmon" #define DA9063_DRVNAME_ONKEY "da9063-onkey" #define DA9063_DRVNAME_RTC "da9063-rtc" #define DA9063_DRVNAME_VIBRATION "da9063-vibration" #define PMIC_CHIP_ID_DA9063 0x61 enum da9063_type { PMIC_TYPE_DA9063 = 0, PMIC_TYPE_DA9063L, }; enum da9063_variant_codes { PMIC_DA9063_AD = 0x3, PMIC_DA9063_BB = 0x5, PMIC_DA9063_CA = 0x6, }; /* Interrupts */ enum da9063_irqs { DA9063_IRQ_ONKEY = 0, DA9063_IRQ_ALARM, DA9063_IRQ_TICK, DA9063_IRQ_ADC_RDY, DA9063_IRQ_SEQ_RDY, DA9063_IRQ_WAKE, DA9063_IRQ_TEMP, DA9063_IRQ_COMP_1V2, DA9063_IRQ_LDO_LIM, DA9063_IRQ_REG_UVOV, DA9063_IRQ_DVC_RDY, DA9063_IRQ_VDD_MON, DA9063_IRQ_WARN, DA9063_IRQ_GPI0, DA9063_IRQ_GPI1, DA9063_IRQ_GPI2, DA9063_IRQ_GPI3, DA9063_IRQ_GPI4, DA9063_IRQ_GPI5, DA9063_IRQ_GPI6, DA9063_IRQ_GPI7, DA9063_IRQ_GPI8, DA9063_IRQ_GPI9, DA9063_IRQ_GPI10, DA9063_IRQ_GPI11, DA9063_IRQ_GPI12, DA9063_IRQ_GPI13, DA9063_IRQ_GPI14, DA9063_IRQ_GPI15, }; struct da9063 { /* Device */ struct device *dev; enum da9063_type type; unsigned char variant_code; unsigned int flags; /* Control interface */ struct regmap *regmap; /* Interrupts */ int chip_irq; unsigned int irq_base; struct regmap_irq_chip_data *regmap_irq; }; int da9063_device_init(struct da9063 *da9063, unsigned int irq); int da9063_irq_init(struct da9063 *da9063); #endif /* __MFD_DA9063_CORE_H__ */ mfd/wm831x/irq.h 0000644 00000142775 14722070374 0007353 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/mfd/wm831x/irq.h -- Interrupt controller for WM831x * * Copyright 2009 Wolfson Microelectronics PLC. * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> */ #ifndef __MFD_WM831X_IRQ_H__ #define __MFD_WM831X_IRQ_H__ /* Interrupt number assignments within Linux */ #define WM831X_IRQ_TEMP_THW 0 #define WM831X_IRQ_GPIO_1 1 #define WM831X_IRQ_GPIO_2 2 #define WM831X_IRQ_GPIO_3 3 #define WM831X_IRQ_GPIO_4 4 #define WM831X_IRQ_GPIO_5 5 #define WM831X_IRQ_GPIO_6 6 #define WM831X_IRQ_GPIO_7 7 #define WM831X_IRQ_GPIO_8 8 #define WM831X_IRQ_GPIO_9 9 #define WM831X_IRQ_GPIO_10 10 #define WM831X_IRQ_GPIO_11 11 #define WM831X_IRQ_GPIO_12 12 #define WM831X_IRQ_GPIO_13 13 #define WM831X_IRQ_GPIO_14 14 #define WM831X_IRQ_GPIO_15 15 #define WM831X_IRQ_GPIO_16 16 #define WM831X_IRQ_ON 17 #define WM831X_IRQ_PPM_SYSLO 18 #define WM831X_IRQ_PPM_PWR_SRC 19 #define WM831X_IRQ_PPM_USB_CURR 20 #define WM831X_IRQ_WDOG_TO 21 #define WM831X_IRQ_RTC_PER 22 #define WM831X_IRQ_RTC_ALM 23 #define WM831X_IRQ_CHG_BATT_HOT 24 #define WM831X_IRQ_CHG_BATT_COLD 25 #define WM831X_IRQ_CHG_BATT_FAIL 26 #define WM831X_IRQ_CHG_OV 27 #define WM831X_IRQ_CHG_END 29 #define WM831X_IRQ_CHG_TO 30 #define WM831X_IRQ_CHG_MODE 31 #define WM831X_IRQ_CHG_START 32 #define WM831X_IRQ_TCHDATA 33 #define WM831X_IRQ_TCHPD 34 #define WM831X_IRQ_AUXADC_DATA 35 #define WM831X_IRQ_AUXADC_DCOMP1 36 #define WM831X_IRQ_AUXADC_DCOMP2 37 #define WM831X_IRQ_AUXADC_DCOMP3 38 #define WM831X_IRQ_AUXADC_DCOMP4 39 #define WM831X_IRQ_CS1 40 #define WM831X_IRQ_CS2 41 #define WM831X_IRQ_HC_DC1 42 #define WM831X_IRQ_HC_DC2 43 #define WM831X_IRQ_UV_LDO1 44 #define WM831X_IRQ_UV_LDO2 45 #define WM831X_IRQ_UV_LDO3 46 #define WM831X_IRQ_UV_LDO4 47 #define WM831X_IRQ_UV_LDO5 48 #define WM831X_IRQ_UV_LDO6 49 #define WM831X_IRQ_UV_LDO7 50 #define WM831X_IRQ_UV_LDO8 51 #define WM831X_IRQ_UV_LDO9 52 #define WM831X_IRQ_UV_LDO10 53 #define WM831X_IRQ_UV_DC1 54 #define WM831X_IRQ_UV_DC2 55 #define WM831X_IRQ_UV_DC3 56 #define WM831X_IRQ_UV_DC4 57 #define WM831X_NUM_IRQS 58 /* * R16400 (0x4010) - System Interrupts */ #define WM831X_PS_INT 0x8000 /* PS_INT */ #define WM831X_PS_INT_MASK 0x8000 /* PS_INT */ #define WM831X_PS_INT_SHIFT 15 /* PS_INT */ #define WM831X_PS_INT_WIDTH 1 /* PS_INT */ #define WM831X_TEMP_INT 0x4000 /* TEMP_INT */ #define WM831X_TEMP_INT_MASK 0x4000 /* TEMP_INT */ #define WM831X_TEMP_INT_SHIFT 14 /* TEMP_INT */ #define WM831X_TEMP_INT_WIDTH 1 /* TEMP_INT */ #define WM831X_GP_INT 0x2000 /* GP_INT */ #define WM831X_GP_INT_MASK 0x2000 /* GP_INT */ #define WM831X_GP_INT_SHIFT 13 /* GP_INT */ #define WM831X_GP_INT_WIDTH 1 /* GP_INT */ #define WM831X_ON_PIN_INT 0x1000 /* ON_PIN_INT */ #define WM831X_ON_PIN_INT_MASK 0x1000 /* ON_PIN_INT */ #define WM831X_ON_PIN_INT_SHIFT 12 /* ON_PIN_INT */ #define WM831X_ON_PIN_INT_WIDTH 1 /* ON_PIN_INT */ #define WM831X_WDOG_INT 0x0800 /* WDOG_INT */ #define WM831X_WDOG_INT_MASK 0x0800 /* WDOG_INT */ #define WM831X_WDOG_INT_SHIFT 11 /* WDOG_INT */ #define WM831X_WDOG_INT_WIDTH 1 /* WDOG_INT */ #define WM831X_TCHDATA_INT 0x0400 /* TCHDATA_INT */ #define WM831X_TCHDATA_INT_MASK 0x0400 /* TCHDATA_INT */ #define WM831X_TCHDATA_INT_SHIFT 10 /* TCHDATA_INT */ #define WM831X_TCHDATA_INT_WIDTH 1 /* TCHDATA_INT */ #define WM831X_TCHPD_INT 0x0200 /* TCHPD_INT */ #define WM831X_TCHPD_INT_MASK 0x0200 /* TCHPD_INT */ #define WM831X_TCHPD_INT_SHIFT 9 /* TCHPD_INT */ #define WM831X_TCHPD_INT_WIDTH 1 /* TCHPD_INT */ #define WM831X_AUXADC_INT 0x0100 /* AUXADC_INT */ #define WM831X_AUXADC_INT_MASK 0x0100 /* AUXADC_INT */ #define WM831X_AUXADC_INT_SHIFT 8 /* AUXADC_INT */ #define WM831X_AUXADC_INT_WIDTH 1 /* AUXADC_INT */ #define WM831X_PPM_INT 0x0080 /* PPM_INT */ #define WM831X_PPM_INT_MASK 0x0080 /* PPM_INT */ #define WM831X_PPM_INT_SHIFT 7 /* PPM_INT */ #define WM831X_PPM_INT_WIDTH 1 /* PPM_INT */ #define WM831X_CS_INT 0x0040 /* CS_INT */ #define WM831X_CS_INT_MASK 0x0040 /* CS_INT */ #define WM831X_CS_INT_SHIFT 6 /* CS_INT */ #define WM831X_CS_INT_WIDTH 1 /* CS_INT */ #define WM831X_RTC_INT 0x0020 /* RTC_INT */ #define WM831X_RTC_INT_MASK 0x0020 /* RTC_INT */ #define WM831X_RTC_INT_SHIFT 5 /* RTC_INT */ #define WM831X_RTC_INT_WIDTH 1 /* RTC_INT */ #define WM831X_OTP_INT 0x0010 /* OTP_INT */ #define WM831X_OTP_INT_MASK 0x0010 /* OTP_INT */ #define WM831X_OTP_INT_SHIFT 4 /* OTP_INT */ #define WM831X_OTP_INT_WIDTH 1 /* OTP_INT */ #define WM831X_CHILD_INT 0x0008 /* CHILD_INT */ #define WM831X_CHILD_INT_MASK 0x0008 /* CHILD_INT */ #define WM831X_CHILD_INT_SHIFT 3 /* CHILD_INT */ #define WM831X_CHILD_INT_WIDTH 1 /* CHILD_INT */ #define WM831X_CHG_INT 0x0004 /* CHG_INT */ #define WM831X_CHG_INT_MASK 0x0004 /* CHG_INT */ #define WM831X_CHG_INT_SHIFT 2 /* CHG_INT */ #define WM831X_CHG_INT_WIDTH 1 /* CHG_INT */ #define WM831X_HC_INT 0x0002 /* HC_INT */ #define WM831X_HC_INT_MASK 0x0002 /* HC_INT */ #define WM831X_HC_INT_SHIFT 1 /* HC_INT */ #define WM831X_HC_INT_WIDTH 1 /* HC_INT */ #define WM831X_UV_INT 0x0001 /* UV_INT */ #define WM831X_UV_INT_MASK 0x0001 /* UV_INT */ #define WM831X_UV_INT_SHIFT 0 /* UV_INT */ #define WM831X_UV_INT_WIDTH 1 /* UV_INT */ /* * R16401 (0x4011) - Interrupt Status 1 */ #define WM831X_PPM_SYSLO_EINT 0x8000 /* PPM_SYSLO_EINT */ #define WM831X_PPM_SYSLO_EINT_MASK 0x8000 /* PPM_SYSLO_EINT */ #define WM831X_PPM_SYSLO_EINT_SHIFT 15 /* PPM_SYSLO_EINT */ #define WM831X_PPM_SYSLO_EINT_WIDTH 1 /* PPM_SYSLO_EINT */ #define WM831X_PPM_PWR_SRC_EINT 0x4000 /* PPM_PWR_SRC_EINT */ #define WM831X_PPM_PWR_SRC_EINT_MASK 0x4000 /* PPM_PWR_SRC_EINT */ #define WM831X_PPM_PWR_SRC_EINT_SHIFT 14 /* PPM_PWR_SRC_EINT */ #define WM831X_PPM_PWR_SRC_EINT_WIDTH 1 /* PPM_PWR_SRC_EINT */ #define WM831X_PPM_USB_CURR_EINT 0x2000 /* PPM_USB_CURR_EINT */ #define WM831X_PPM_USB_CURR_EINT_MASK 0x2000 /* PPM_USB_CURR_EINT */ #define WM831X_PPM_USB_CURR_EINT_SHIFT 13 /* PPM_USB_CURR_EINT */ #define WM831X_PPM_USB_CURR_EINT_WIDTH 1 /* PPM_USB_CURR_EINT */ #define WM831X_ON_PIN_EINT 0x1000 /* ON_PIN_EINT */ #define WM831X_ON_PIN_EINT_MASK 0x1000 /* ON_PIN_EINT */ #define WM831X_ON_PIN_EINT_SHIFT 12 /* ON_PIN_EINT */ #define WM831X_ON_PIN_EINT_WIDTH 1 /* ON_PIN_EINT */ #define WM831X_WDOG_TO_EINT 0x0800 /* WDOG_TO_EINT */ #define WM831X_WDOG_TO_EINT_MASK 0x0800 /* WDOG_TO_EINT */ #define WM831X_WDOG_TO_EINT_SHIFT 11 /* WDOG_TO_EINT */ #define WM831X_WDOG_TO_EINT_WIDTH 1 /* WDOG_TO_EINT */ #define WM831X_TCHDATA_EINT 0x0400 /* TCHDATA_EINT */ #define WM831X_TCHDATA_EINT_MASK 0x0400 /* TCHDATA_EINT */ #define WM831X_TCHDATA_EINT_SHIFT 10 /* TCHDATA_EINT */ #define WM831X_TCHDATA_EINT_WIDTH 1 /* TCHDATA_EINT */ #define WM831X_TCHPD_EINT 0x0200 /* TCHPD_EINT */ #define WM831X_TCHPD_EINT_MASK 0x0200 /* TCHPD_EINT */ #define WM831X_TCHPD_EINT_SHIFT 9 /* TCHPD_EINT */ #define WM831X_TCHPD_EINT_WIDTH 1 /* TCHPD_EINT */ #define WM831X_AUXADC_DATA_EINT 0x0100 /* AUXADC_DATA_EINT */ #define WM831X_AUXADC_DATA_EINT_MASK 0x0100 /* AUXADC_DATA_EINT */ #define WM831X_AUXADC_DATA_EINT_SHIFT 8 /* AUXADC_DATA_EINT */ #define WM831X_AUXADC_DATA_EINT_WIDTH 1 /* AUXADC_DATA_EINT */ #define WM831X_AUXADC_DCOMP4_EINT 0x0080 /* AUXADC_DCOMP4_EINT */ #define WM831X_AUXADC_DCOMP4_EINT_MASK 0x0080 /* AUXADC_DCOMP4_EINT */ #define WM831X_AUXADC_DCOMP4_EINT_SHIFT 7 /* AUXADC_DCOMP4_EINT */ #define WM831X_AUXADC_DCOMP4_EINT_WIDTH 1 /* AUXADC_DCOMP4_EINT */ #define WM831X_AUXADC_DCOMP3_EINT 0x0040 /* AUXADC_DCOMP3_EINT */ #define WM831X_AUXADC_DCOMP3_EINT_MASK 0x0040 /* AUXADC_DCOMP3_EINT */ #define WM831X_AUXADC_DCOMP3_EINT_SHIFT 6 /* AUXADC_DCOMP3_EINT */ #define WM831X_AUXADC_DCOMP3_EINT_WIDTH 1 /* AUXADC_DCOMP3_EINT */ #define WM831X_AUXADC_DCOMP2_EINT 0x0020 /* AUXADC_DCOMP2_EINT */ #define WM831X_AUXADC_DCOMP2_EINT_MASK 0x0020 /* AUXADC_DCOMP2_EINT */ #define WM831X_AUXADC_DCOMP2_EINT_SHIFT 5 /* AUXADC_DCOMP2_EINT */ #define WM831X_AUXADC_DCOMP2_EINT_WIDTH 1 /* AUXADC_DCOMP2_EINT */ #define WM831X_AUXADC_DCOMP1_EINT 0x0010 /* AUXADC_DCOMP1_EINT */ #define WM831X_AUXADC_DCOMP1_EINT_MASK 0x0010 /* AUXADC_DCOMP1_EINT */ #define WM831X_AUXADC_DCOMP1_EINT_SHIFT 4 /* AUXADC_DCOMP1_EINT */ #define WM831X_AUXADC_DCOMP1_EINT_WIDTH 1 /* AUXADC_DCOMP1_EINT */ #define WM831X_RTC_PER_EINT 0x0008 /* RTC_PER_EINT */ #define WM831X_RTC_PER_EINT_MASK 0x0008 /* RTC_PER_EINT */ #define WM831X_RTC_PER_EINT_SHIFT 3 /* RTC_PER_EINT */ #define WM831X_RTC_PER_EINT_WIDTH 1 /* RTC_PER_EINT */ #define WM831X_RTC_ALM_EINT 0x0004 /* RTC_ALM_EINT */ #define WM831X_RTC_ALM_EINT_MASK 0x0004 /* RTC_ALM_EINT */ #define WM831X_RTC_ALM_EINT_SHIFT 2 /* RTC_ALM_EINT */ #define WM831X_RTC_ALM_EINT_WIDTH 1 /* RTC_ALM_EINT */ #define WM831X_TEMP_THW_EINT 0x0002 /* TEMP_THW_EINT */ #define WM831X_TEMP_THW_EINT_MASK 0x0002 /* TEMP_THW_EINT */ #define WM831X_TEMP_THW_EINT_SHIFT 1 /* TEMP_THW_EINT */ #define WM831X_TEMP_THW_EINT_WIDTH 1 /* TEMP_THW_EINT */ /* * R16402 (0x4012) - Interrupt Status 2 */ #define WM831X_CHG_BATT_HOT_EINT 0x8000 /* CHG_BATT_HOT_EINT */ #define WM831X_CHG_BATT_HOT_EINT_MASK 0x8000 /* CHG_BATT_HOT_EINT */ #define WM831X_CHG_BATT_HOT_EINT_SHIFT 15 /* CHG_BATT_HOT_EINT */ #define WM831X_CHG_BATT_HOT_EINT_WIDTH 1 /* CHG_BATT_HOT_EINT */ #define WM831X_CHG_BATT_COLD_EINT 0x4000 /* CHG_BATT_COLD_EINT */ #define WM831X_CHG_BATT_COLD_EINT_MASK 0x4000 /* CHG_BATT_COLD_EINT */ #define WM831X_CHG_BATT_COLD_EINT_SHIFT 14 /* CHG_BATT_COLD_EINT */ #define WM831X_CHG_BATT_COLD_EINT_WIDTH 1 /* CHG_BATT_COLD_EINT */ #define WM831X_CHG_BATT_FAIL_EINT 0x2000 /* CHG_BATT_FAIL_EINT */ #define WM831X_CHG_BATT_FAIL_EINT_MASK 0x2000 /* CHG_BATT_FAIL_EINT */ #define WM831X_CHG_BATT_FAIL_EINT_SHIFT 13 /* CHG_BATT_FAIL_EINT */ #define WM831X_CHG_BATT_FAIL_EINT_WIDTH 1 /* CHG_BATT_FAIL_EINT */ #define WM831X_CHG_OV_EINT 0x1000 /* CHG_OV_EINT */ #define WM831X_CHG_OV_EINT_MASK 0x1000 /* CHG_OV_EINT */ #define WM831X_CHG_OV_EINT_SHIFT 12 /* CHG_OV_EINT */ #define WM831X_CHG_OV_EINT_WIDTH 1 /* CHG_OV_EINT */ #define WM831X_CHG_END_EINT 0x0800 /* CHG_END_EINT */ #define WM831X_CHG_END_EINT_MASK 0x0800 /* CHG_END_EINT */ #define WM831X_CHG_END_EINT_SHIFT 11 /* CHG_END_EINT */ #define WM831X_CHG_END_EINT_WIDTH 1 /* CHG_END_EINT */ #define WM831X_CHG_TO_EINT 0x0400 /* CHG_TO_EINT */ #define WM831X_CHG_TO_EINT_MASK 0x0400 /* CHG_TO_EINT */ #define WM831X_CHG_TO_EINT_SHIFT 10 /* CHG_TO_EINT */ #define WM831X_CHG_TO_EINT_WIDTH 1 /* CHG_TO_EINT */ #define WM831X_CHG_MODE_EINT 0x0200 /* CHG_MODE_EINT */ #define WM831X_CHG_MODE_EINT_MASK 0x0200 /* CHG_MODE_EINT */ #define WM831X_CHG_MODE_EINT_SHIFT 9 /* CHG_MODE_EINT */ #define WM831X_CHG_MODE_EINT_WIDTH 1 /* CHG_MODE_EINT */ #define WM831X_CHG_START_EINT 0x0100 /* CHG_START_EINT */ #define WM831X_CHG_START_EINT_MASK 0x0100 /* CHG_START_EINT */ #define WM831X_CHG_START_EINT_SHIFT 8 /* CHG_START_EINT */ #define WM831X_CHG_START_EINT_WIDTH 1 /* CHG_START_EINT */ #define WM831X_CS2_EINT 0x0080 /* CS2_EINT */ #define WM831X_CS2_EINT_MASK 0x0080 /* CS2_EINT */ #define WM831X_CS2_EINT_SHIFT 7 /* CS2_EINT */ #define WM831X_CS2_EINT_WIDTH 1 /* CS2_EINT */ #define WM831X_CS1_EINT 0x0040 /* CS1_EINT */ #define WM831X_CS1_EINT_MASK 0x0040 /* CS1_EINT */ #define WM831X_CS1_EINT_SHIFT 6 /* CS1_EINT */ #define WM831X_CS1_EINT_WIDTH 1 /* CS1_EINT */ #define WM831X_OTP_CMD_END_EINT 0x0020 /* OTP_CMD_END_EINT */ #define WM831X_OTP_CMD_END_EINT_MASK 0x0020 /* OTP_CMD_END_EINT */ #define WM831X_OTP_CMD_END_EINT_SHIFT 5 /* OTP_CMD_END_EINT */ #define WM831X_OTP_CMD_END_EINT_WIDTH 1 /* OTP_CMD_END_EINT */ #define WM831X_OTP_ERR_EINT 0x0010 /* OTP_ERR_EINT */ #define WM831X_OTP_ERR_EINT_MASK 0x0010 /* OTP_ERR_EINT */ #define WM831X_OTP_ERR_EINT_SHIFT 4 /* OTP_ERR_EINT */ #define WM831X_OTP_ERR_EINT_WIDTH 1 /* OTP_ERR_EINT */ #define WM831X_PS_POR_EINT 0x0004 /* PS_POR_EINT */ #define WM831X_PS_POR_EINT_MASK 0x0004 /* PS_POR_EINT */ #define WM831X_PS_POR_EINT_SHIFT 2 /* PS_POR_EINT */ #define WM831X_PS_POR_EINT_WIDTH 1 /* PS_POR_EINT */ #define WM831X_PS_SLEEP_OFF_EINT 0x0002 /* PS_SLEEP_OFF_EINT */ #define WM831X_PS_SLEEP_OFF_EINT_MASK 0x0002 /* PS_SLEEP_OFF_EINT */ #define WM831X_PS_SLEEP_OFF_EINT_SHIFT 1 /* PS_SLEEP_OFF_EINT */ #define WM831X_PS_SLEEP_OFF_EINT_WIDTH 1 /* PS_SLEEP_OFF_EINT */ #define WM831X_PS_ON_WAKE_EINT 0x0001 /* PS_ON_WAKE_EINT */ #define WM831X_PS_ON_WAKE_EINT_MASK 0x0001 /* PS_ON_WAKE_EINT */ #define WM831X_PS_ON_WAKE_EINT_SHIFT 0 /* PS_ON_WAKE_EINT */ #define WM831X_PS_ON_WAKE_EINT_WIDTH 1 /* PS_ON_WAKE_EINT */ /* * R16403 (0x4013) - Interrupt Status 3 */ #define WM831X_UV_LDO10_EINT 0x0200 /* UV_LDO10_EINT */ #define WM831X_UV_LDO10_EINT_MASK 0x0200 /* UV_LDO10_EINT */ #define WM831X_UV_LDO10_EINT_SHIFT 9 /* UV_LDO10_EINT */ #define WM831X_UV_LDO10_EINT_WIDTH 1 /* UV_LDO10_EINT */ #define WM831X_UV_LDO9_EINT 0x0100 /* UV_LDO9_EINT */ #define WM831X_UV_LDO9_EINT_MASK 0x0100 /* UV_LDO9_EINT */ #define WM831X_UV_LDO9_EINT_SHIFT 8 /* UV_LDO9_EINT */ #define WM831X_UV_LDO9_EINT_WIDTH 1 /* UV_LDO9_EINT */ #define WM831X_UV_LDO8_EINT 0x0080 /* UV_LDO8_EINT */ #define WM831X_UV_LDO8_EINT_MASK 0x0080 /* UV_LDO8_EINT */ #define WM831X_UV_LDO8_EINT_SHIFT 7 /* UV_LDO8_EINT */ #define WM831X_UV_LDO8_EINT_WIDTH 1 /* UV_LDO8_EINT */ #define WM831X_UV_LDO7_EINT 0x0040 /* UV_LDO7_EINT */ #define WM831X_UV_LDO7_EINT_MASK 0x0040 /* UV_LDO7_EINT */ #define WM831X_UV_LDO7_EINT_SHIFT 6 /* UV_LDO7_EINT */ #define WM831X_UV_LDO7_EINT_WIDTH 1 /* UV_LDO7_EINT */ #define WM831X_UV_LDO6_EINT 0x0020 /* UV_LDO6_EINT */ #define WM831X_UV_LDO6_EINT_MASK 0x0020 /* UV_LDO6_EINT */ #define WM831X_UV_LDO6_EINT_SHIFT 5 /* UV_LDO6_EINT */ #define WM831X_UV_LDO6_EINT_WIDTH 1 /* UV_LDO6_EINT */ #define WM831X_UV_LDO5_EINT 0x0010 /* UV_LDO5_EINT */ #define WM831X_UV_LDO5_EINT_MASK 0x0010 /* UV_LDO5_EINT */ #define WM831X_UV_LDO5_EINT_SHIFT 4 /* UV_LDO5_EINT */ #define WM831X_UV_LDO5_EINT_WIDTH 1 /* UV_LDO5_EINT */ #define WM831X_UV_LDO4_EINT 0x0008 /* UV_LDO4_EINT */ #define WM831X_UV_LDO4_EINT_MASK 0x0008 /* UV_LDO4_EINT */ #define WM831X_UV_LDO4_EINT_SHIFT 3 /* UV_LDO4_EINT */ #define WM831X_UV_LDO4_EINT_WIDTH 1 /* UV_LDO4_EINT */ #define WM831X_UV_LDO3_EINT 0x0004 /* UV_LDO3_EINT */ #define WM831X_UV_LDO3_EINT_MASK 0x0004 /* UV_LDO3_EINT */ #define WM831X_UV_LDO3_EINT_SHIFT 2 /* UV_LDO3_EINT */ #define WM831X_UV_LDO3_EINT_WIDTH 1 /* UV_LDO3_EINT */ #define WM831X_UV_LDO2_EINT 0x0002 /* UV_LDO2_EINT */ #define WM831X_UV_LDO2_EINT_MASK 0x0002 /* UV_LDO2_EINT */ #define WM831X_UV_LDO2_EINT_SHIFT 1 /* UV_LDO2_EINT */ #define WM831X_UV_LDO2_EINT_WIDTH 1 /* UV_LDO2_EINT */ #define WM831X_UV_LDO1_EINT 0x0001 /* UV_LDO1_EINT */ #define WM831X_UV_LDO1_EINT_MASK 0x0001 /* UV_LDO1_EINT */ #define WM831X_UV_LDO1_EINT_SHIFT 0 /* UV_LDO1_EINT */ #define WM831X_UV_LDO1_EINT_WIDTH 1 /* UV_LDO1_EINT */ /* * R16404 (0x4014) - Interrupt Status 4 */ #define WM831X_HC_DC2_EINT 0x0200 /* HC_DC2_EINT */ #define WM831X_HC_DC2_EINT_MASK 0x0200 /* HC_DC2_EINT */ #define WM831X_HC_DC2_EINT_SHIFT 9 /* HC_DC2_EINT */ #define WM831X_HC_DC2_EINT_WIDTH 1 /* HC_DC2_EINT */ #define WM831X_HC_DC1_EINT 0x0100 /* HC_DC1_EINT */ #define WM831X_HC_DC1_EINT_MASK 0x0100 /* HC_DC1_EINT */ #define WM831X_HC_DC1_EINT_SHIFT 8 /* HC_DC1_EINT */ #define WM831X_HC_DC1_EINT_WIDTH 1 /* HC_DC1_EINT */ #define WM831X_UV_DC4_EINT 0x0008 /* UV_DC4_EINT */ #define WM831X_UV_DC4_EINT_MASK 0x0008 /* UV_DC4_EINT */ #define WM831X_UV_DC4_EINT_SHIFT 3 /* UV_DC4_EINT */ #define WM831X_UV_DC4_EINT_WIDTH 1 /* UV_DC4_EINT */ #define WM831X_UV_DC3_EINT 0x0004 /* UV_DC3_EINT */ #define WM831X_UV_DC3_EINT_MASK 0x0004 /* UV_DC3_EINT */ #define WM831X_UV_DC3_EINT_SHIFT 2 /* UV_DC3_EINT */ #define WM831X_UV_DC3_EINT_WIDTH 1 /* UV_DC3_EINT */ #define WM831X_UV_DC2_EINT 0x0002 /* UV_DC2_EINT */ #define WM831X_UV_DC2_EINT_MASK 0x0002 /* UV_DC2_EINT */ #define WM831X_UV_DC2_EINT_SHIFT 1 /* UV_DC2_EINT */ #define WM831X_UV_DC2_EINT_WIDTH 1 /* UV_DC2_EINT */ #define WM831X_UV_DC1_EINT 0x0001 /* UV_DC1_EINT */ #define WM831X_UV_DC1_EINT_MASK 0x0001 /* UV_DC1_EINT */ #define WM831X_UV_DC1_EINT_SHIFT 0 /* UV_DC1_EINT */ #define WM831X_UV_DC1_EINT_WIDTH 1 /* UV_DC1_EINT */ /* * R16405 (0x4015) - Interrupt Status 5 */ #define WM831X_GP16_EINT 0x8000 /* GP16_EINT */ #define WM831X_GP16_EINT_MASK 0x8000 /* GP16_EINT */ #define WM831X_GP16_EINT_SHIFT 15 /* GP16_EINT */ #define WM831X_GP16_EINT_WIDTH 1 /* GP16_EINT */ #define WM831X_GP15_EINT 0x4000 /* GP15_EINT */ #define WM831X_GP15_EINT_MASK 0x4000 /* GP15_EINT */ #define WM831X_GP15_EINT_SHIFT 14 /* GP15_EINT */ #define WM831X_GP15_EINT_WIDTH 1 /* GP15_EINT */ #define WM831X_GP14_EINT 0x2000 /* GP14_EINT */ #define WM831X_GP14_EINT_MASK 0x2000 /* GP14_EINT */ #define WM831X_GP14_EINT_SHIFT 13 /* GP14_EINT */ #define WM831X_GP14_EINT_WIDTH 1 /* GP14_EINT */ #define WM831X_GP13_EINT 0x1000 /* GP13_EINT */ #define WM831X_GP13_EINT_MASK 0x1000 /* GP13_EINT */ #define WM831X_GP13_EINT_SHIFT 12 /* GP13_EINT */ #define WM831X_GP13_EINT_WIDTH 1 /* GP13_EINT */ #define WM831X_GP12_EINT 0x0800 /* GP12_EINT */ #define WM831X_GP12_EINT_MASK 0x0800 /* GP12_EINT */ #define WM831X_GP12_EINT_SHIFT 11 /* GP12_EINT */ #define WM831X_GP12_EINT_WIDTH 1 /* GP12_EINT */ #define WM831X_GP11_EINT 0x0400 /* GP11_EINT */ #define WM831X_GP11_EINT_MASK 0x0400 /* GP11_EINT */ #define WM831X_GP11_EINT_SHIFT 10 /* GP11_EINT */ #define WM831X_GP11_EINT_WIDTH 1 /* GP11_EINT */ #define WM831X_GP10_EINT 0x0200 /* GP10_EINT */ #define WM831X_GP10_EINT_MASK 0x0200 /* GP10_EINT */ #define WM831X_GP10_EINT_SHIFT 9 /* GP10_EINT */ #define WM831X_GP10_EINT_WIDTH 1 /* GP10_EINT */ #define WM831X_GP9_EINT 0x0100 /* GP9_EINT */ #define WM831X_GP9_EINT_MASK 0x0100 /* GP9_EINT */ #define WM831X_GP9_EINT_SHIFT 8 /* GP9_EINT */ #define WM831X_GP9_EINT_WIDTH 1 /* GP9_EINT */ #define WM831X_GP8_EINT 0x0080 /* GP8_EINT */ #define WM831X_GP8_EINT_MASK 0x0080 /* GP8_EINT */ #define WM831X_GP8_EINT_SHIFT 7 /* GP8_EINT */ #define WM831X_GP8_EINT_WIDTH 1 /* GP8_EINT */ #define WM831X_GP7_EINT 0x0040 /* GP7_EINT */ #define WM831X_GP7_EINT_MASK 0x0040 /* GP7_EINT */ #define WM831X_GP7_EINT_SHIFT 6 /* GP7_EINT */ #define WM831X_GP7_EINT_WIDTH 1 /* GP7_EINT */ #define WM831X_GP6_EINT 0x0020 /* GP6_EINT */ #define WM831X_GP6_EINT_MASK 0x0020 /* GP6_EINT */ #define WM831X_GP6_EINT_SHIFT 5 /* GP6_EINT */ #define WM831X_GP6_EINT_WIDTH 1 /* GP6_EINT */ #define WM831X_GP5_EINT 0x0010 /* GP5_EINT */ #define WM831X_GP5_EINT_MASK 0x0010 /* GP5_EINT */ #define WM831X_GP5_EINT_SHIFT 4 /* GP5_EINT */ #define WM831X_GP5_EINT_WIDTH 1 /* GP5_EINT */ #define WM831X_GP4_EINT 0x0008 /* GP4_EINT */ #define WM831X_GP4_EINT_MASK 0x0008 /* GP4_EINT */ #define WM831X_GP4_EINT_SHIFT 3 /* GP4_EINT */ #define WM831X_GP4_EINT_WIDTH 1 /* GP4_EINT */ #define WM831X_GP3_EINT 0x0004 /* GP3_EINT */ #define WM831X_GP3_EINT_MASK 0x0004 /* GP3_EINT */ #define WM831X_GP3_EINT_SHIFT 2 /* GP3_EINT */ #define WM831X_GP3_EINT_WIDTH 1 /* GP3_EINT */ #define WM831X_GP2_EINT 0x0002 /* GP2_EINT */ #define WM831X_GP2_EINT_MASK 0x0002 /* GP2_EINT */ #define WM831X_GP2_EINT_SHIFT 1 /* GP2_EINT */ #define WM831X_GP2_EINT_WIDTH 1 /* GP2_EINT */ #define WM831X_GP1_EINT 0x0001 /* GP1_EINT */ #define WM831X_GP1_EINT_MASK 0x0001 /* GP1_EINT */ #define WM831X_GP1_EINT_SHIFT 0 /* GP1_EINT */ #define WM831X_GP1_EINT_WIDTH 1 /* GP1_EINT */ /* * R16407 (0x4017) - IRQ Config */ #define WM831X_IRQ_OD 0x0002 /* IRQ_OD */ #define WM831X_IRQ_OD_MASK 0x0002 /* IRQ_OD */ #define WM831X_IRQ_OD_SHIFT 1 /* IRQ_OD */ #define WM831X_IRQ_OD_WIDTH 1 /* IRQ_OD */ #define WM831X_IM_IRQ 0x0001 /* IM_IRQ */ #define WM831X_IM_IRQ_MASK 0x0001 /* IM_IRQ */ #define WM831X_IM_IRQ_SHIFT 0 /* IM_IRQ */ #define WM831X_IM_IRQ_WIDTH 1 /* IM_IRQ */ /* * R16408 (0x4018) - System Interrupts Mask */ #define WM831X_IM_PS_INT 0x8000 /* IM_PS_INT */ #define WM831X_IM_PS_INT_MASK 0x8000 /* IM_PS_INT */ #define WM831X_IM_PS_INT_SHIFT 15 /* IM_PS_INT */ #define WM831X_IM_PS_INT_WIDTH 1 /* IM_PS_INT */ #define WM831X_IM_TEMP_INT 0x4000 /* IM_TEMP_INT */ #define WM831X_IM_TEMP_INT_MASK 0x4000 /* IM_TEMP_INT */ #define WM831X_IM_TEMP_INT_SHIFT 14 /* IM_TEMP_INT */ #define WM831X_IM_TEMP_INT_WIDTH 1 /* IM_TEMP_INT */ #define WM831X_IM_GP_INT 0x2000 /* IM_GP_INT */ #define WM831X_IM_GP_INT_MASK 0x2000 /* IM_GP_INT */ #define WM831X_IM_GP_INT_SHIFT 13 /* IM_GP_INT */ #define WM831X_IM_GP_INT_WIDTH 1 /* IM_GP_INT */ #define WM831X_IM_ON_PIN_INT 0x1000 /* IM_ON_PIN_INT */ #define WM831X_IM_ON_PIN_INT_MASK 0x1000 /* IM_ON_PIN_INT */ #define WM831X_IM_ON_PIN_INT_SHIFT 12 /* IM_ON_PIN_INT */ #define WM831X_IM_ON_PIN_INT_WIDTH 1 /* IM_ON_PIN_INT */ #define WM831X_IM_WDOG_INT 0x0800 /* IM_WDOG_INT */ #define WM831X_IM_WDOG_INT_MASK 0x0800 /* IM_WDOG_INT */ #define WM831X_IM_WDOG_INT_SHIFT 11 /* IM_WDOG_INT */ #define WM831X_IM_WDOG_INT_WIDTH 1 /* IM_WDOG_INT */ #define WM831X_IM_TCHDATA_INT 0x0400 /* IM_TCHDATA_INT */ #define WM831X_IM_TCHDATA_INT_MASK 0x0400 /* IM_TCHDATA_INT */ #define WM831X_IM_TCHDATA_INT_SHIFT 10 /* IM_TCHDATA_INT */ #define WM831X_IM_TCHDATA_INT_WIDTH 1 /* IM_TCHDATA_INT */ #define WM831X_IM_TCHPD_INT 0x0200 /* IM_TCHPD_INT */ #define WM831X_IM_TCHPD_INT_MASK 0x0200 /* IM_TCHPD_INT */ #define WM831X_IM_TCHPD_INT_SHIFT 9 /* IM_TCHPD_INT */ #define WM831X_IM_TCHPD_INT_WIDTH 1 /* IM_TCHPD_INT */ #define WM831X_IM_AUXADC_INT 0x0100 /* IM_AUXADC_INT */ #define WM831X_IM_AUXADC_INT_MASK 0x0100 /* IM_AUXADC_INT */ #define WM831X_IM_AUXADC_INT_SHIFT 8 /* IM_AUXADC_INT */ #define WM831X_IM_AUXADC_INT_WIDTH 1 /* IM_AUXADC_INT */ #define WM831X_IM_PPM_INT 0x0080 /* IM_PPM_INT */ #define WM831X_IM_PPM_INT_MASK 0x0080 /* IM_PPM_INT */ #define WM831X_IM_PPM_INT_SHIFT 7 /* IM_PPM_INT */ #define WM831X_IM_PPM_INT_WIDTH 1 /* IM_PPM_INT */ #define WM831X_IM_CS_INT 0x0040 /* IM_CS_INT */ #define WM831X_IM_CS_INT_MASK 0x0040 /* IM_CS_INT */ #define WM831X_IM_CS_INT_SHIFT 6 /* IM_CS_INT */ #define WM831X_IM_CS_INT_WIDTH 1 /* IM_CS_INT */ #define WM831X_IM_RTC_INT 0x0020 /* IM_RTC_INT */ #define WM831X_IM_RTC_INT_MASK 0x0020 /* IM_RTC_INT */ #define WM831X_IM_RTC_INT_SHIFT 5 /* IM_RTC_INT */ #define WM831X_IM_RTC_INT_WIDTH 1 /* IM_RTC_INT */ #define WM831X_IM_OTP_INT 0x0010 /* IM_OTP_INT */ #define WM831X_IM_OTP_INT_MASK 0x0010 /* IM_OTP_INT */ #define WM831X_IM_OTP_INT_SHIFT 4 /* IM_OTP_INT */ #define WM831X_IM_OTP_INT_WIDTH 1 /* IM_OTP_INT */ #define WM831X_IM_CHILD_INT 0x0008 /* IM_CHILD_INT */ #define WM831X_IM_CHILD_INT_MASK 0x0008 /* IM_CHILD_INT */ #define WM831X_IM_CHILD_INT_SHIFT 3 /* IM_CHILD_INT */ #define WM831X_IM_CHILD_INT_WIDTH 1 /* IM_CHILD_INT */ #define WM831X_IM_CHG_INT 0x0004 /* IM_CHG_INT */ #define WM831X_IM_CHG_INT_MASK 0x0004 /* IM_CHG_INT */ #define WM831X_IM_CHG_INT_SHIFT 2 /* IM_CHG_INT */ #define WM831X_IM_CHG_INT_WIDTH 1 /* IM_CHG_INT */ #define WM831X_IM_HC_INT 0x0002 /* IM_HC_INT */ #define WM831X_IM_HC_INT_MASK 0x0002 /* IM_HC_INT */ #define WM831X_IM_HC_INT_SHIFT 1 /* IM_HC_INT */ #define WM831X_IM_HC_INT_WIDTH 1 /* IM_HC_INT */ #define WM831X_IM_UV_INT 0x0001 /* IM_UV_INT */ #define WM831X_IM_UV_INT_MASK 0x0001 /* IM_UV_INT */ #define WM831X_IM_UV_INT_SHIFT 0 /* IM_UV_INT */ #define WM831X_IM_UV_INT_WIDTH 1 /* IM_UV_INT */ /* * R16409 (0x4019) - Interrupt Status 1 Mask */ #define WM831X_IM_PPM_SYSLO_EINT 0x8000 /* IM_PPM_SYSLO_EINT */ #define WM831X_IM_PPM_SYSLO_EINT_MASK 0x8000 /* IM_PPM_SYSLO_EINT */ #define WM831X_IM_PPM_SYSLO_EINT_SHIFT 15 /* IM_PPM_SYSLO_EINT */ #define WM831X_IM_PPM_SYSLO_EINT_WIDTH 1 /* IM_PPM_SYSLO_EINT */ #define WM831X_IM_PPM_PWR_SRC_EINT 0x4000 /* IM_PPM_PWR_SRC_EINT */ #define WM831X_IM_PPM_PWR_SRC_EINT_MASK 0x4000 /* IM_PPM_PWR_SRC_EINT */ #define WM831X_IM_PPM_PWR_SRC_EINT_SHIFT 14 /* IM_PPM_PWR_SRC_EINT */ #define WM831X_IM_PPM_PWR_SRC_EINT_WIDTH 1 /* IM_PPM_PWR_SRC_EINT */ #define WM831X_IM_PPM_USB_CURR_EINT 0x2000 /* IM_PPM_USB_CURR_EINT */ #define WM831X_IM_PPM_USB_CURR_EINT_MASK 0x2000 /* IM_PPM_USB_CURR_EINT */ #define WM831X_IM_PPM_USB_CURR_EINT_SHIFT 13 /* IM_PPM_USB_CURR_EINT */ #define WM831X_IM_PPM_USB_CURR_EINT_WIDTH 1 /* IM_PPM_USB_CURR_EINT */ #define WM831X_IM_ON_PIN_EINT 0x1000 /* IM_ON_PIN_EINT */ #define WM831X_IM_ON_PIN_EINT_MASK 0x1000 /* IM_ON_PIN_EINT */ #define WM831X_IM_ON_PIN_EINT_SHIFT 12 /* IM_ON_PIN_EINT */ #define WM831X_IM_ON_PIN_EINT_WIDTH 1 /* IM_ON_PIN_EINT */ #define WM831X_IM_WDOG_TO_EINT 0x0800 /* IM_WDOG_TO_EINT */ #define WM831X_IM_WDOG_TO_EINT_MASK 0x0800 /* IM_WDOG_TO_EINT */ #define WM831X_IM_WDOG_TO_EINT_SHIFT 11 /* IM_WDOG_TO_EINT */ #define WM831X_IM_WDOG_TO_EINT_WIDTH 1 /* IM_WDOG_TO_EINT */ #define WM831X_IM_TCHDATA_EINT 0x0400 /* IM_TCHDATA_EINT */ #define WM831X_IM_TCHDATA_EINT_MASK 0x0400 /* IM_TCHDATA_EINT */ #define WM831X_IM_TCHDATA_EINT_SHIFT 10 /* IM_TCHDATA_EINT */ #define WM831X_IM_TCHDATA_EINT_WIDTH 1 /* IM_TCHDATA_EINT */ #define WM831X_IM_TCHPD_EINT 0x0200 /* IM_TCHPD_EINT */ #define WM831X_IM_TCHPD_EINT_MASK 0x0200 /* IM_TCHPD_EINT */ #define WM831X_IM_TCHPD_EINT_SHIFT 9 /* IM_TCHPD_EINT */ #define WM831X_IM_TCHPD_EINT_WIDTH 1 /* IM_TCHPD_EINT */ #define WM831X_IM_AUXADC_DATA_EINT 0x0100 /* IM_AUXADC_DATA_EINT */ #define WM831X_IM_AUXADC_DATA_EINT_MASK 0x0100 /* IM_AUXADC_DATA_EINT */ #define WM831X_IM_AUXADC_DATA_EINT_SHIFT 8 /* IM_AUXADC_DATA_EINT */ #define WM831X_IM_AUXADC_DATA_EINT_WIDTH 1 /* IM_AUXADC_DATA_EINT */ #define WM831X_IM_AUXADC_DCOMP4_EINT 0x0080 /* IM_AUXADC_DCOMP4_EINT */ #define WM831X_IM_AUXADC_DCOMP4_EINT_MASK 0x0080 /* IM_AUXADC_DCOMP4_EINT */ #define WM831X_IM_AUXADC_DCOMP4_EINT_SHIFT 7 /* IM_AUXADC_DCOMP4_EINT */ #define WM831X_IM_AUXADC_DCOMP4_EINT_WIDTH 1 /* IM_AUXADC_DCOMP4_EINT */ #define WM831X_IM_AUXADC_DCOMP3_EINT 0x0040 /* IM_AUXADC_DCOMP3_EINT */ #define WM831X_IM_AUXADC_DCOMP3_EINT_MASK 0x0040 /* IM_AUXADC_DCOMP3_EINT */ #define WM831X_IM_AUXADC_DCOMP3_EINT_SHIFT 6 /* IM_AUXADC_DCOMP3_EINT */ #define WM831X_IM_AUXADC_DCOMP3_EINT_WIDTH 1 /* IM_AUXADC_DCOMP3_EINT */ #define WM831X_IM_AUXADC_DCOMP2_EINT 0x0020 /* IM_AUXADC_DCOMP2_EINT */ #define WM831X_IM_AUXADC_DCOMP2_EINT_MASK 0x0020 /* IM_AUXADC_DCOMP2_EINT */ #define WM831X_IM_AUXADC_DCOMP2_EINT_SHIFT 5 /* IM_AUXADC_DCOMP2_EINT */ #define WM831X_IM_AUXADC_DCOMP2_EINT_WIDTH 1 /* IM_AUXADC_DCOMP2_EINT */ #define WM831X_IM_AUXADC_DCOMP1_EINT 0x0010 /* IM_AUXADC_DCOMP1_EINT */ #define WM831X_IM_AUXADC_DCOMP1_EINT_MASK 0x0010 /* IM_AUXADC_DCOMP1_EINT */ #define WM831X_IM_AUXADC_DCOMP1_EINT_SHIFT 4 /* IM_AUXADC_DCOMP1_EINT */ #define WM831X_IM_AUXADC_DCOMP1_EINT_WIDTH 1 /* IM_AUXADC_DCOMP1_EINT */ #define WM831X_IM_RTC_PER_EINT 0x0008 /* IM_RTC_PER_EINT */ #define WM831X_IM_RTC_PER_EINT_MASK 0x0008 /* IM_RTC_PER_EINT */ #define WM831X_IM_RTC_PER_EINT_SHIFT 3 /* IM_RTC_PER_EINT */ #define WM831X_IM_RTC_PER_EINT_WIDTH 1 /* IM_RTC_PER_EINT */ #define WM831X_IM_RTC_ALM_EINT 0x0004 /* IM_RTC_ALM_EINT */ #define WM831X_IM_RTC_ALM_EINT_MASK 0x0004 /* IM_RTC_ALM_EINT */ #define WM831X_IM_RTC_ALM_EINT_SHIFT 2 /* IM_RTC_ALM_EINT */ #define WM831X_IM_RTC_ALM_EINT_WIDTH 1 /* IM_RTC_ALM_EINT */ #define WM831X_IM_TEMP_THW_EINT 0x0002 /* IM_TEMP_THW_EINT */ #define WM831X_IM_TEMP_THW_EINT_MASK 0x0002 /* IM_TEMP_THW_EINT */ #define WM831X_IM_TEMP_THW_EINT_SHIFT 1 /* IM_TEMP_THW_EINT */ #define WM831X_IM_TEMP_THW_EINT_WIDTH 1 /* IM_TEMP_THW_EINT */ /* * R16410 (0x401A) - Interrupt Status 2 Mask */ #define WM831X_IM_CHG_BATT_HOT_EINT 0x8000 /* IM_CHG_BATT_HOT_EINT */ #define WM831X_IM_CHG_BATT_HOT_EINT_MASK 0x8000 /* IM_CHG_BATT_HOT_EINT */ #define WM831X_IM_CHG_BATT_HOT_EINT_SHIFT 15 /* IM_CHG_BATT_HOT_EINT */ #define WM831X_IM_CHG_BATT_HOT_EINT_WIDTH 1 /* IM_CHG_BATT_HOT_EINT */ #define WM831X_IM_CHG_BATT_COLD_EINT 0x4000 /* IM_CHG_BATT_COLD_EINT */ #define WM831X_IM_CHG_BATT_COLD_EINT_MASK 0x4000 /* IM_CHG_BATT_COLD_EINT */ #define WM831X_IM_CHG_BATT_COLD_EINT_SHIFT 14 /* IM_CHG_BATT_COLD_EINT */ #define WM831X_IM_CHG_BATT_COLD_EINT_WIDTH 1 /* IM_CHG_BATT_COLD_EINT */ #define WM831X_IM_CHG_BATT_FAIL_EINT 0x2000 /* IM_CHG_BATT_FAIL_EINT */ #define WM831X_IM_CHG_BATT_FAIL_EINT_MASK 0x2000 /* IM_CHG_BATT_FAIL_EINT */ #define WM831X_IM_CHG_BATT_FAIL_EINT_SHIFT 13 /* IM_CHG_BATT_FAIL_EINT */ #define WM831X_IM_CHG_BATT_FAIL_EINT_WIDTH 1 /* IM_CHG_BATT_FAIL_EINT */ #define WM831X_IM_CHG_OV_EINT 0x1000 /* IM_CHG_OV_EINT */ #define WM831X_IM_CHG_OV_EINT_MASK 0x1000 /* IM_CHG_OV_EINT */ #define WM831X_IM_CHG_OV_EINT_SHIFT 12 /* IM_CHG_OV_EINT */ #define WM831X_IM_CHG_OV_EINT_WIDTH 1 /* IM_CHG_OV_EINT */ #define WM831X_IM_CHG_END_EINT 0x0800 /* IM_CHG_END_EINT */ #define WM831X_IM_CHG_END_EINT_MASK 0x0800 /* IM_CHG_END_EINT */ #define WM831X_IM_CHG_END_EINT_SHIFT 11 /* IM_CHG_END_EINT */ #define WM831X_IM_CHG_END_EINT_WIDTH 1 /* IM_CHG_END_EINT */ #define WM831X_IM_CHG_TO_EINT 0x0400 /* IM_CHG_TO_EINT */ #define WM831X_IM_CHG_TO_EINT_MASK 0x0400 /* IM_CHG_TO_EINT */ #define WM831X_IM_CHG_TO_EINT_SHIFT 10 /* IM_CHG_TO_EINT */ #define WM831X_IM_CHG_TO_EINT_WIDTH 1 /* IM_CHG_TO_EINT */ #define WM831X_IM_CHG_MODE_EINT 0x0200 /* IM_CHG_MODE_EINT */ #define WM831X_IM_CHG_MODE_EINT_MASK 0x0200 /* IM_CHG_MODE_EINT */ #define WM831X_IM_CHG_MODE_EINT_SHIFT 9 /* IM_CHG_MODE_EINT */ #define WM831X_IM_CHG_MODE_EINT_WIDTH 1 /* IM_CHG_MODE_EINT */ #define WM831X_IM_CHG_START_EINT 0x0100 /* IM_CHG_START_EINT */ #define WM831X_IM_CHG_START_EINT_MASK 0x0100 /* IM_CHG_START_EINT */ #define WM831X_IM_CHG_START_EINT_SHIFT 8 /* IM_CHG_START_EINT */ #define WM831X_IM_CHG_START_EINT_WIDTH 1 /* IM_CHG_START_EINT */ #define WM831X_IM_CS2_EINT 0x0080 /* IM_CS2_EINT */ #define WM831X_IM_CS2_EINT_MASK 0x0080 /* IM_CS2_EINT */ #define WM831X_IM_CS2_EINT_SHIFT 7 /* IM_CS2_EINT */ #define WM831X_IM_CS2_EINT_WIDTH 1 /* IM_CS2_EINT */ #define WM831X_IM_CS1_EINT 0x0040 /* IM_CS1_EINT */ #define WM831X_IM_CS1_EINT_MASK 0x0040 /* IM_CS1_EINT */ #define WM831X_IM_CS1_EINT_SHIFT 6 /* IM_CS1_EINT */ #define WM831X_IM_CS1_EINT_WIDTH 1 /* IM_CS1_EINT */ #define WM831X_IM_OTP_CMD_END_EINT 0x0020 /* IM_OTP_CMD_END_EINT */ #define WM831X_IM_OTP_CMD_END_EINT_MASK 0x0020 /* IM_OTP_CMD_END_EINT */ #define WM831X_IM_OTP_CMD_END_EINT_SHIFT 5 /* IM_OTP_CMD_END_EINT */ #define WM831X_IM_OTP_CMD_END_EINT_WIDTH 1 /* IM_OTP_CMD_END_EINT */ #define WM831X_IM_OTP_ERR_EINT 0x0010 /* IM_OTP_ERR_EINT */ #define WM831X_IM_OTP_ERR_EINT_MASK 0x0010 /* IM_OTP_ERR_EINT */ #define WM831X_IM_OTP_ERR_EINT_SHIFT 4 /* IM_OTP_ERR_EINT */ #define WM831X_IM_OTP_ERR_EINT_WIDTH 1 /* IM_OTP_ERR_EINT */ #define WM831X_IM_PS_POR_EINT 0x0004 /* IM_PS_POR_EINT */ #define WM831X_IM_PS_POR_EINT_MASK 0x0004 /* IM_PS_POR_EINT */ #define WM831X_IM_PS_POR_EINT_SHIFT 2 /* IM_PS_POR_EINT */ #define WM831X_IM_PS_POR_EINT_WIDTH 1 /* IM_PS_POR_EINT */ #define WM831X_IM_PS_SLEEP_OFF_EINT 0x0002 /* IM_PS_SLEEP_OFF_EINT */ #define WM831X_IM_PS_SLEEP_OFF_EINT_MASK 0x0002 /* IM_PS_SLEEP_OFF_EINT */ #define WM831X_IM_PS_SLEEP_OFF_EINT_SHIFT 1 /* IM_PS_SLEEP_OFF_EINT */ #define WM831X_IM_PS_SLEEP_OFF_EINT_WIDTH 1 /* IM_PS_SLEEP_OFF_EINT */ #define WM831X_IM_PS_ON_WAKE_EINT 0x0001 /* IM_PS_ON_WAKE_EINT */ #define WM831X_IM_PS_ON_WAKE_EINT_MASK 0x0001 /* IM_PS_ON_WAKE_EINT */ #define WM831X_IM_PS_ON_WAKE_EINT_SHIFT 0 /* IM_PS_ON_WAKE_EINT */ #define WM831X_IM_PS_ON_WAKE_EINT_WIDTH 1 /* IM_PS_ON_WAKE_EINT */ /* * R16411 (0x401B) - Interrupt Status 3 Mask */ #define WM831X_IM_UV_LDO10_EINT 0x0200 /* IM_UV_LDO10_EINT */ #define WM831X_IM_UV_LDO10_EINT_MASK 0x0200 /* IM_UV_LDO10_EINT */ #define WM831X_IM_UV_LDO10_EINT_SHIFT 9 /* IM_UV_LDO10_EINT */ #define WM831X_IM_UV_LDO10_EINT_WIDTH 1 /* IM_UV_LDO10_EINT */ #define WM831X_IM_UV_LDO9_EINT 0x0100 /* IM_UV_LDO9_EINT */ #define WM831X_IM_UV_LDO9_EINT_MASK 0x0100 /* IM_UV_LDO9_EINT */ #define WM831X_IM_UV_LDO9_EINT_SHIFT 8 /* IM_UV_LDO9_EINT */ #define WM831X_IM_UV_LDO9_EINT_WIDTH 1 /* IM_UV_LDO9_EINT */ #define WM831X_IM_UV_LDO8_EINT 0x0080 /* IM_UV_LDO8_EINT */ #define WM831X_IM_UV_LDO8_EINT_MASK 0x0080 /* IM_UV_LDO8_EINT */ #define WM831X_IM_UV_LDO8_EINT_SHIFT 7 /* IM_UV_LDO8_EINT */ #define WM831X_IM_UV_LDO8_EINT_WIDTH 1 /* IM_UV_LDO8_EINT */ #define WM831X_IM_UV_LDO7_EINT 0x0040 /* IM_UV_LDO7_EINT */ #define WM831X_IM_UV_LDO7_EINT_MASK 0x0040 /* IM_UV_LDO7_EINT */ #define WM831X_IM_UV_LDO7_EINT_SHIFT 6 /* IM_UV_LDO7_EINT */ #define WM831X_IM_UV_LDO7_EINT_WIDTH 1 /* IM_UV_LDO7_EINT */ #define WM831X_IM_UV_LDO6_EINT 0x0020 /* IM_UV_LDO6_EINT */ #define WM831X_IM_UV_LDO6_EINT_MASK 0x0020 /* IM_UV_LDO6_EINT */ #define WM831X_IM_UV_LDO6_EINT_SHIFT 5 /* IM_UV_LDO6_EINT */ #define WM831X_IM_UV_LDO6_EINT_WIDTH 1 /* IM_UV_LDO6_EINT */ #define WM831X_IM_UV_LDO5_EINT 0x0010 /* IM_UV_LDO5_EINT */ #define WM831X_IM_UV_LDO5_EINT_MASK 0x0010 /* IM_UV_LDO5_EINT */ #define WM831X_IM_UV_LDO5_EINT_SHIFT 4 /* IM_UV_LDO5_EINT */ #define WM831X_IM_UV_LDO5_EINT_WIDTH 1 /* IM_UV_LDO5_EINT */ #define WM831X_IM_UV_LDO4_EINT 0x0008 /* IM_UV_LDO4_EINT */ #define WM831X_IM_UV_LDO4_EINT_MASK 0x0008 /* IM_UV_LDO4_EINT */ #define WM831X_IM_UV_LDO4_EINT_SHIFT 3 /* IM_UV_LDO4_EINT */ #define WM831X_IM_UV_LDO4_EINT_WIDTH 1 /* IM_UV_LDO4_EINT */ #define WM831X_IM_UV_LDO3_EINT 0x0004 /* IM_UV_LDO3_EINT */ #define WM831X_IM_UV_LDO3_EINT_MASK 0x0004 /* IM_UV_LDO3_EINT */ #define WM831X_IM_UV_LDO3_EINT_SHIFT 2 /* IM_UV_LDO3_EINT */ #define WM831X_IM_UV_LDO3_EINT_WIDTH 1 /* IM_UV_LDO3_EINT */ #define WM831X_IM_UV_LDO2_EINT 0x0002 /* IM_UV_LDO2_EINT */ #define WM831X_IM_UV_LDO2_EINT_MASK 0x0002 /* IM_UV_LDO2_EINT */ #define WM831X_IM_UV_LDO2_EINT_SHIFT 1 /* IM_UV_LDO2_EINT */ #define WM831X_IM_UV_LDO2_EINT_WIDTH 1 /* IM_UV_LDO2_EINT */ #define WM831X_IM_UV_LDO1_EINT 0x0001 /* IM_UV_LDO1_EINT */ #define WM831X_IM_UV_LDO1_EINT_MASK 0x0001 /* IM_UV_LDO1_EINT */ #define WM831X_IM_UV_LDO1_EINT_SHIFT 0 /* IM_UV_LDO1_EINT */ #define WM831X_IM_UV_LDO1_EINT_WIDTH 1 /* IM_UV_LDO1_EINT */ /* * R16412 (0x401C) - Interrupt Status 4 Mask */ #define WM831X_IM_HC_DC2_EINT 0x0200 /* IM_HC_DC2_EINT */ #define WM831X_IM_HC_DC2_EINT_MASK 0x0200 /* IM_HC_DC2_EINT */ #define WM831X_IM_HC_DC2_EINT_SHIFT 9 /* IM_HC_DC2_EINT */ #define WM831X_IM_HC_DC2_EINT_WIDTH 1 /* IM_HC_DC2_EINT */ #define WM831X_IM_HC_DC1_EINT 0x0100 /* IM_HC_DC1_EINT */ #define WM831X_IM_HC_DC1_EINT_MASK 0x0100 /* IM_HC_DC1_EINT */ #define WM831X_IM_HC_DC1_EINT_SHIFT 8 /* IM_HC_DC1_EINT */ #define WM831X_IM_HC_DC1_EINT_WIDTH 1 /* IM_HC_DC1_EINT */ #define WM831X_IM_UV_DC4_EINT 0x0008 /* IM_UV_DC4_EINT */ #define WM831X_IM_UV_DC4_EINT_MASK 0x0008 /* IM_UV_DC4_EINT */ #define WM831X_IM_UV_DC4_EINT_SHIFT 3 /* IM_UV_DC4_EINT */ #define WM831X_IM_UV_DC4_EINT_WIDTH 1 /* IM_UV_DC4_EINT */ #define WM831X_IM_UV_DC3_EINT 0x0004 /* IM_UV_DC3_EINT */ #define WM831X_IM_UV_DC3_EINT_MASK 0x0004 /* IM_UV_DC3_EINT */ #define WM831X_IM_UV_DC3_EINT_SHIFT 2 /* IM_UV_DC3_EINT */ #define WM831X_IM_UV_DC3_EINT_WIDTH 1 /* IM_UV_DC3_EINT */ #define WM831X_IM_UV_DC2_EINT 0x0002 /* IM_UV_DC2_EINT */ #define WM831X_IM_UV_DC2_EINT_MASK 0x0002 /* IM_UV_DC2_EINT */ #define WM831X_IM_UV_DC2_EINT_SHIFT 1 /* IM_UV_DC2_EINT */ #define WM831X_IM_UV_DC2_EINT_WIDTH 1 /* IM_UV_DC2_EINT */ #define WM831X_IM_UV_DC1_EINT 0x0001 /* IM_UV_DC1_EINT */ #define WM831X_IM_UV_DC1_EINT_MASK 0x0001 /* IM_UV_DC1_EINT */ #define WM831X_IM_UV_DC1_EINT_SHIFT 0 /* IM_UV_DC1_EINT */ #define WM831X_IM_UV_DC1_EINT_WIDTH 1 /* IM_UV_DC1_EINT */ /* * R16413 (0x401D) - Interrupt Status 5 Mask */ #define WM831X_IM_GP16_EINT 0x8000 /* IM_GP16_EINT */ #define WM831X_IM_GP16_EINT_MASK 0x8000 /* IM_GP16_EINT */ #define WM831X_IM_GP16_EINT_SHIFT 15 /* IM_GP16_EINT */ #define WM831X_IM_GP16_EINT_WIDTH 1 /* IM_GP16_EINT */ #define WM831X_IM_GP15_EINT 0x4000 /* IM_GP15_EINT */ #define WM831X_IM_GP15_EINT_MASK 0x4000 /* IM_GP15_EINT */ #define WM831X_IM_GP15_EINT_SHIFT 14 /* IM_GP15_EINT */ #define WM831X_IM_GP15_EINT_WIDTH 1 /* IM_GP15_EINT */ #define WM831X_IM_GP14_EINT 0x2000 /* IM_GP14_EINT */ #define WM831X_IM_GP14_EINT_MASK 0x2000 /* IM_GP14_EINT */ #define WM831X_IM_GP14_EINT_SHIFT 13 /* IM_GP14_EINT */ #define WM831X_IM_GP14_EINT_WIDTH 1 /* IM_GP14_EINT */ #define WM831X_IM_GP13_EINT 0x1000 /* IM_GP13_EINT */ #define WM831X_IM_GP13_EINT_MASK 0x1000 /* IM_GP13_EINT */ #define WM831X_IM_GP13_EINT_SHIFT 12 /* IM_GP13_EINT */ #define WM831X_IM_GP13_EINT_WIDTH 1 /* IM_GP13_EINT */ #define WM831X_IM_GP12_EINT 0x0800 /* IM_GP12_EINT */ #define WM831X_IM_GP12_EINT_MASK 0x0800 /* IM_GP12_EINT */ #define WM831X_IM_GP12_EINT_SHIFT 11 /* IM_GP12_EINT */ #define WM831X_IM_GP12_EINT_WIDTH 1 /* IM_GP12_EINT */ #define WM831X_IM_GP11_EINT 0x0400 /* IM_GP11_EINT */ #define WM831X_IM_GP11_EINT_MASK 0x0400 /* IM_GP11_EINT */ #define WM831X_IM_GP11_EINT_SHIFT 10 /* IM_GP11_EINT */ #define WM831X_IM_GP11_EINT_WIDTH 1 /* IM_GP11_EINT */ #define WM831X_IM_GP10_EINT 0x0200 /* IM_GP10_EINT */ #define WM831X_IM_GP10_EINT_MASK 0x0200 /* IM_GP10_EINT */ #define WM831X_IM_GP10_EINT_SHIFT 9 /* IM_GP10_EINT */ #define WM831X_IM_GP10_EINT_WIDTH 1 /* IM_GP10_EINT */ #define WM831X_IM_GP9_EINT 0x0100 /* IM_GP9_EINT */ #define WM831X_IM_GP9_EINT_MASK 0x0100 /* IM_GP9_EINT */ #define WM831X_IM_GP9_EINT_SHIFT 8 /* IM_GP9_EINT */ #define WM831X_IM_GP9_EINT_WIDTH 1 /* IM_GP9_EINT */ #define WM831X_IM_GP8_EINT 0x0080 /* IM_GP8_EINT */ #define WM831X_IM_GP8_EINT_MASK 0x0080 /* IM_GP8_EINT */ #define WM831X_IM_GP8_EINT_SHIFT 7 /* IM_GP8_EINT */ #define WM831X_IM_GP8_EINT_WIDTH 1 /* IM_GP8_EINT */ #define WM831X_IM_GP7_EINT 0x0040 /* IM_GP7_EINT */ #define WM831X_IM_GP7_EINT_MASK 0x0040 /* IM_GP7_EINT */ #define WM831X_IM_GP7_EINT_SHIFT 6 /* IM_GP7_EINT */ #define WM831X_IM_GP7_EINT_WIDTH 1 /* IM_GP7_EINT */ #define WM831X_IM_GP6_EINT 0x0020 /* IM_GP6_EINT */ #define WM831X_IM_GP6_EINT_MASK 0x0020 /* IM_GP6_EINT */ #define WM831X_IM_GP6_EINT_SHIFT 5 /* IM_GP6_EINT */ #define WM831X_IM_GP6_EINT_WIDTH 1 /* IM_GP6_EINT */ #define WM831X_IM_GP5_EINT 0x0010 /* IM_GP5_EINT */ #define WM831X_IM_GP5_EINT_MASK 0x0010 /* IM_GP5_EINT */ #define WM831X_IM_GP5_EINT_SHIFT 4 /* IM_GP5_EINT */ #define WM831X_IM_GP5_EINT_WIDTH 1 /* IM_GP5_EINT */ #define WM831X_IM_GP4_EINT 0x0008 /* IM_GP4_EINT */ #define WM831X_IM_GP4_EINT_MASK 0x0008 /* IM_GP4_EINT */ #define WM831X_IM_GP4_EINT_SHIFT 3 /* IM_GP4_EINT */ #define WM831X_IM_GP4_EINT_WIDTH 1 /* IM_GP4_EINT */ #define WM831X_IM_GP3_EINT 0x0004 /* IM_GP3_EINT */ #define WM831X_IM_GP3_EINT_MASK 0x0004 /* IM_GP3_EINT */ #define WM831X_IM_GP3_EINT_SHIFT 2 /* IM_GP3_EINT */ #define WM831X_IM_GP3_EINT_WIDTH 1 /* IM_GP3_EINT */ #define WM831X_IM_GP2_EINT 0x0002 /* IM_GP2_EINT */ #define WM831X_IM_GP2_EINT_MASK 0x0002 /* IM_GP2_EINT */ #define WM831X_IM_GP2_EINT_SHIFT 1 /* IM_GP2_EINT */ #define WM831X_IM_GP2_EINT_WIDTH 1 /* IM_GP2_EINT */ #define WM831X_IM_GP1_EINT 0x0001 /* IM_GP1_EINT */ #define WM831X_IM_GP1_EINT_MASK 0x0001 /* IM_GP1_EINT */ #define WM831X_IM_GP1_EINT_SHIFT 0 /* IM_GP1_EINT */ #define WM831X_IM_GP1_EINT_WIDTH 1 /* IM_GP1_EINT */ #endif mfd/wm831x/auxadc.h 0000644 00000027620 14722070374 0010014 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/mfd/wm831x/auxadc.h -- Auxiliary ADC interface for WM831x * * Copyright 2009 Wolfson Microelectronics PLC. * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> */ #ifndef __MFD_WM831X_AUXADC_H__ #define __MFD_WM831X_AUXADC_H__ struct wm831x; /* * R16429 (0x402D) - AuxADC Data */ #define WM831X_AUX_DATA_SRC_MASK 0xF000 /* AUX_DATA_SRC - [15:12] */ #define WM831X_AUX_DATA_SRC_SHIFT 12 /* AUX_DATA_SRC - [15:12] */ #define WM831X_AUX_DATA_SRC_WIDTH 4 /* AUX_DATA_SRC - [15:12] */ #define WM831X_AUX_DATA_MASK 0x0FFF /* AUX_DATA - [11:0] */ #define WM831X_AUX_DATA_SHIFT 0 /* AUX_DATA - [11:0] */ #define WM831X_AUX_DATA_WIDTH 12 /* AUX_DATA - [11:0] */ /* * R16430 (0x402E) - AuxADC Control */ #define WM831X_AUX_ENA 0x8000 /* AUX_ENA */ #define WM831X_AUX_ENA_MASK 0x8000 /* AUX_ENA */ #define WM831X_AUX_ENA_SHIFT 15 /* AUX_ENA */ #define WM831X_AUX_ENA_WIDTH 1 /* AUX_ENA */ #define WM831X_AUX_CVT_ENA 0x4000 /* AUX_CVT_ENA */ #define WM831X_AUX_CVT_ENA_MASK 0x4000 /* AUX_CVT_ENA */ #define WM831X_AUX_CVT_ENA_SHIFT 14 /* AUX_CVT_ENA */ #define WM831X_AUX_CVT_ENA_WIDTH 1 /* AUX_CVT_ENA */ #define WM831X_AUX_SLPENA 0x1000 /* AUX_SLPENA */ #define WM831X_AUX_SLPENA_MASK 0x1000 /* AUX_SLPENA */ #define WM831X_AUX_SLPENA_SHIFT 12 /* AUX_SLPENA */ #define WM831X_AUX_SLPENA_WIDTH 1 /* AUX_SLPENA */ #define WM831X_AUX_FRC_ENA 0x0800 /* AUX_FRC_ENA */ #define WM831X_AUX_FRC_ENA_MASK 0x0800 /* AUX_FRC_ENA */ #define WM831X_AUX_FRC_ENA_SHIFT 11 /* AUX_FRC_ENA */ #define WM831X_AUX_FRC_ENA_WIDTH 1 /* AUX_FRC_ENA */ #define WM831X_AUX_RATE_MASK 0x003F /* AUX_RATE - [5:0] */ #define WM831X_AUX_RATE_SHIFT 0 /* AUX_RATE - [5:0] */ #define WM831X_AUX_RATE_WIDTH 6 /* AUX_RATE - [5:0] */ /* * R16431 (0x402F) - AuxADC Source */ #define WM831X_AUX_CAL_SEL 0x8000 /* AUX_CAL_SEL */ #define WM831X_AUX_CAL_SEL_MASK 0x8000 /* AUX_CAL_SEL */ #define WM831X_AUX_CAL_SEL_SHIFT 15 /* AUX_CAL_SEL */ #define WM831X_AUX_CAL_SEL_WIDTH 1 /* AUX_CAL_SEL */ #define WM831X_AUX_BKUP_BATT_SEL 0x0400 /* AUX_BKUP_BATT_SEL */ #define WM831X_AUX_BKUP_BATT_SEL_MASK 0x0400 /* AUX_BKUP_BATT_SEL */ #define WM831X_AUX_BKUP_BATT_SEL_SHIFT 10 /* AUX_BKUP_BATT_SEL */ #define WM831X_AUX_BKUP_BATT_SEL_WIDTH 1 /* AUX_BKUP_BATT_SEL */ #define WM831X_AUX_WALL_SEL 0x0200 /* AUX_WALL_SEL */ #define WM831X_AUX_WALL_SEL_MASK 0x0200 /* AUX_WALL_SEL */ #define WM831X_AUX_WALL_SEL_SHIFT 9 /* AUX_WALL_SEL */ #define WM831X_AUX_WALL_SEL_WIDTH 1 /* AUX_WALL_SEL */ #define WM831X_AUX_BATT_SEL 0x0100 /* AUX_BATT_SEL */ #define WM831X_AUX_BATT_SEL_MASK 0x0100 /* AUX_BATT_SEL */ #define WM831X_AUX_BATT_SEL_SHIFT 8 /* AUX_BATT_SEL */ #define WM831X_AUX_BATT_SEL_WIDTH 1 /* AUX_BATT_SEL */ #define WM831X_AUX_USB_SEL 0x0080 /* AUX_USB_SEL */ #define WM831X_AUX_USB_SEL_MASK 0x0080 /* AUX_USB_SEL */ #define WM831X_AUX_USB_SEL_SHIFT 7 /* AUX_USB_SEL */ #define WM831X_AUX_USB_SEL_WIDTH 1 /* AUX_USB_SEL */ #define WM831X_AUX_SYSVDD_SEL 0x0040 /* AUX_SYSVDD_SEL */ #define WM831X_AUX_SYSVDD_SEL_MASK 0x0040 /* AUX_SYSVDD_SEL */ #define WM831X_AUX_SYSVDD_SEL_SHIFT 6 /* AUX_SYSVDD_SEL */ #define WM831X_AUX_SYSVDD_SEL_WIDTH 1 /* AUX_SYSVDD_SEL */ #define WM831X_AUX_BATT_TEMP_SEL 0x0020 /* AUX_BATT_TEMP_SEL */ #define WM831X_AUX_BATT_TEMP_SEL_MASK 0x0020 /* AUX_BATT_TEMP_SEL */ #define WM831X_AUX_BATT_TEMP_SEL_SHIFT 5 /* AUX_BATT_TEMP_SEL */ #define WM831X_AUX_BATT_TEMP_SEL_WIDTH 1 /* AUX_BATT_TEMP_SEL */ #define WM831X_AUX_CHIP_TEMP_SEL 0x0010 /* AUX_CHIP_TEMP_SEL */ #define WM831X_AUX_CHIP_TEMP_SEL_MASK 0x0010 /* AUX_CHIP_TEMP_SEL */ #define WM831X_AUX_CHIP_TEMP_SEL_SHIFT 4 /* AUX_CHIP_TEMP_SEL */ #define WM831X_AUX_CHIP_TEMP_SEL_WIDTH 1 /* AUX_CHIP_TEMP_SEL */ #define WM831X_AUX_AUX4_SEL 0x0008 /* AUX_AUX4_SEL */ #define WM831X_AUX_AUX4_SEL_MASK 0x0008 /* AUX_AUX4_SEL */ #define WM831X_AUX_AUX4_SEL_SHIFT 3 /* AUX_AUX4_SEL */ #define WM831X_AUX_AUX4_SEL_WIDTH 1 /* AUX_AUX4_SEL */ #define WM831X_AUX_AUX3_SEL 0x0004 /* AUX_AUX3_SEL */ #define WM831X_AUX_AUX3_SEL_MASK 0x0004 /* AUX_AUX3_SEL */ #define WM831X_AUX_AUX3_SEL_SHIFT 2 /* AUX_AUX3_SEL */ #define WM831X_AUX_AUX3_SEL_WIDTH 1 /* AUX_AUX3_SEL */ #define WM831X_AUX_AUX2_SEL 0x0002 /* AUX_AUX2_SEL */ #define WM831X_AUX_AUX2_SEL_MASK 0x0002 /* AUX_AUX2_SEL */ #define WM831X_AUX_AUX2_SEL_SHIFT 1 /* AUX_AUX2_SEL */ #define WM831X_AUX_AUX2_SEL_WIDTH 1 /* AUX_AUX2_SEL */ #define WM831X_AUX_AUX1_SEL 0x0001 /* AUX_AUX1_SEL */ #define WM831X_AUX_AUX1_SEL_MASK 0x0001 /* AUX_AUX1_SEL */ #define WM831X_AUX_AUX1_SEL_SHIFT 0 /* AUX_AUX1_SEL */ #define WM831X_AUX_AUX1_SEL_WIDTH 1 /* AUX_AUX1_SEL */ /* * R16432 (0x4030) - Comparator Control */ #define WM831X_DCOMP4_STS 0x0800 /* DCOMP4_STS */ #define WM831X_DCOMP4_STS_MASK 0x0800 /* DCOMP4_STS */ #define WM831X_DCOMP4_STS_SHIFT 11 /* DCOMP4_STS */ #define WM831X_DCOMP4_STS_WIDTH 1 /* DCOMP4_STS */ #define WM831X_DCOMP3_STS 0x0400 /* DCOMP3_STS */ #define WM831X_DCOMP3_STS_MASK 0x0400 /* DCOMP3_STS */ #define WM831X_DCOMP3_STS_SHIFT 10 /* DCOMP3_STS */ #define WM831X_DCOMP3_STS_WIDTH 1 /* DCOMP3_STS */ #define WM831X_DCOMP2_STS 0x0200 /* DCOMP2_STS */ #define WM831X_DCOMP2_STS_MASK 0x0200 /* DCOMP2_STS */ #define WM831X_DCOMP2_STS_SHIFT 9 /* DCOMP2_STS */ #define WM831X_DCOMP2_STS_WIDTH 1 /* DCOMP2_STS */ #define WM831X_DCOMP1_STS 0x0100 /* DCOMP1_STS */ #define WM831X_DCOMP1_STS_MASK 0x0100 /* DCOMP1_STS */ #define WM831X_DCOMP1_STS_SHIFT 8 /* DCOMP1_STS */ #define WM831X_DCOMP1_STS_WIDTH 1 /* DCOMP1_STS */ #define WM831X_DCMP4_ENA 0x0008 /* DCMP4_ENA */ #define WM831X_DCMP4_ENA_MASK 0x0008 /* DCMP4_ENA */ #define WM831X_DCMP4_ENA_SHIFT 3 /* DCMP4_ENA */ #define WM831X_DCMP4_ENA_WIDTH 1 /* DCMP4_ENA */ #define WM831X_DCMP3_ENA 0x0004 /* DCMP3_ENA */ #define WM831X_DCMP3_ENA_MASK 0x0004 /* DCMP3_ENA */ #define WM831X_DCMP3_ENA_SHIFT 2 /* DCMP3_ENA */ #define WM831X_DCMP3_ENA_WIDTH 1 /* DCMP3_ENA */ #define WM831X_DCMP2_ENA 0x0002 /* DCMP2_ENA */ #define WM831X_DCMP2_ENA_MASK 0x0002 /* DCMP2_ENA */ #define WM831X_DCMP2_ENA_SHIFT 1 /* DCMP2_ENA */ #define WM831X_DCMP2_ENA_WIDTH 1 /* DCMP2_ENA */ #define WM831X_DCMP1_ENA 0x0001 /* DCMP1_ENA */ #define WM831X_DCMP1_ENA_MASK 0x0001 /* DCMP1_ENA */ #define WM831X_DCMP1_ENA_SHIFT 0 /* DCMP1_ENA */ #define WM831X_DCMP1_ENA_WIDTH 1 /* DCMP1_ENA */ /* * R16433 (0x4031) - Comparator 1 */ #define WM831X_DCMP1_SRC_MASK 0xE000 /* DCMP1_SRC - [15:13] */ #define WM831X_DCMP1_SRC_SHIFT 13 /* DCMP1_SRC - [15:13] */ #define WM831X_DCMP1_SRC_WIDTH 3 /* DCMP1_SRC - [15:13] */ #define WM831X_DCMP1_GT 0x1000 /* DCMP1_GT */ #define WM831X_DCMP1_GT_MASK 0x1000 /* DCMP1_GT */ #define WM831X_DCMP1_GT_SHIFT 12 /* DCMP1_GT */ #define WM831X_DCMP1_GT_WIDTH 1 /* DCMP1_GT */ #define WM831X_DCMP1_THR_MASK 0x0FFF /* DCMP1_THR - [11:0] */ #define WM831X_DCMP1_THR_SHIFT 0 /* DCMP1_THR - [11:0] */ #define WM831X_DCMP1_THR_WIDTH 12 /* DCMP1_THR - [11:0] */ /* * R16434 (0x4032) - Comparator 2 */ #define WM831X_DCMP2_SRC_MASK 0xE000 /* DCMP2_SRC - [15:13] */ #define WM831X_DCMP2_SRC_SHIFT 13 /* DCMP2_SRC - [15:13] */ #define WM831X_DCMP2_SRC_WIDTH 3 /* DCMP2_SRC - [15:13] */ #define WM831X_DCMP2_GT 0x1000 /* DCMP2_GT */ #define WM831X_DCMP2_GT_MASK 0x1000 /* DCMP2_GT */ #define WM831X_DCMP2_GT_SHIFT 12 /* DCMP2_GT */ #define WM831X_DCMP2_GT_WIDTH 1 /* DCMP2_GT */ #define WM831X_DCMP2_THR_MASK 0x0FFF /* DCMP2_THR - [11:0] */ #define WM831X_DCMP2_THR_SHIFT 0 /* DCMP2_THR - [11:0] */ #define WM831X_DCMP2_THR_WIDTH 12 /* DCMP2_THR - [11:0] */ /* * R16435 (0x4033) - Comparator 3 */ #define WM831X_DCMP3_SRC_MASK 0xE000 /* DCMP3_SRC - [15:13] */ #define WM831X_DCMP3_SRC_SHIFT 13 /* DCMP3_SRC - [15:13] */ #define WM831X_DCMP3_SRC_WIDTH 3 /* DCMP3_SRC - [15:13] */ #define WM831X_DCMP3_GT 0x1000 /* DCMP3_GT */ #define WM831X_DCMP3_GT_MASK 0x1000 /* DCMP3_GT */ #define WM831X_DCMP3_GT_SHIFT 12 /* DCMP3_GT */ #define WM831X_DCMP3_GT_WIDTH 1 /* DCMP3_GT */ #define WM831X_DCMP3_THR_MASK 0x0FFF /* DCMP3_THR - [11:0] */ #define WM831X_DCMP3_THR_SHIFT 0 /* DCMP3_THR - [11:0] */ #define WM831X_DCMP3_THR_WIDTH 12 /* DCMP3_THR - [11:0] */ /* * R16436 (0x4034) - Comparator 4 */ #define WM831X_DCMP4_SRC_MASK 0xE000 /* DCMP4_SRC - [15:13] */ #define WM831X_DCMP4_SRC_SHIFT 13 /* DCMP4_SRC - [15:13] */ #define WM831X_DCMP4_SRC_WIDTH 3 /* DCMP4_SRC - [15:13] */ #define WM831X_DCMP4_GT 0x1000 /* DCMP4_GT */ #define WM831X_DCMP4_GT_MASK 0x1000 /* DCMP4_GT */ #define WM831X_DCMP4_GT_SHIFT 12 /* DCMP4_GT */ #define WM831X_DCMP4_GT_WIDTH 1 /* DCMP4_GT */ #define WM831X_DCMP4_THR_MASK 0x0FFF /* DCMP4_THR - [11:0] */ #define WM831X_DCMP4_THR_SHIFT 0 /* DCMP4_THR - [11:0] */ #define WM831X_DCMP4_THR_WIDTH 12 /* DCMP4_THR - [11:0] */ #define WM831X_AUX_CAL_FACTOR 0xfff #define WM831X_AUX_CAL_NOMINAL 0x222 enum wm831x_auxadc { WM831X_AUX_CAL = 15, WM831X_AUX_BKUP_BATT = 10, WM831X_AUX_WALL = 9, WM831X_AUX_BATT = 8, WM831X_AUX_USB = 7, WM831X_AUX_SYSVDD = 6, WM831X_AUX_BATT_TEMP = 5, WM831X_AUX_CHIP_TEMP = 4, WM831X_AUX_AUX4 = 3, WM831X_AUX_AUX3 = 2, WM831X_AUX_AUX2 = 1, WM831X_AUX_AUX1 = 0, }; int wm831x_auxadc_read(struct wm831x *wm831x, enum wm831x_auxadc input); int wm831x_auxadc_read_uv(struct wm831x *wm831x, enum wm831x_auxadc input); #endif mfd/wm831x/regulator.h 0000644 00000227125 14722070374 0010555 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * linux/mfd/wm831x/regulator.h -- Regulator definitons for wm831x * * Copyright 2009 Wolfson Microelectronics PLC. * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> */ #ifndef __MFD_WM831X_REGULATOR_H__ #define __MFD_WM831X_REGULATOR_H__ /* * R16462 (0x404E) - Current Sink 1 */ #define WM831X_CS1_ENA 0x8000 /* CS1_ENA */ #define WM831X_CS1_ENA_MASK 0x8000 /* CS1_ENA */ #define WM831X_CS1_ENA_SHIFT 15 /* CS1_ENA */ #define WM831X_CS1_ENA_WIDTH 1 /* CS1_ENA */ #define WM831X_CS1_DRIVE 0x4000 /* CS1_DRIVE */ #define WM831X_CS1_DRIVE_MASK 0x4000 /* CS1_DRIVE */ #define WM831X_CS1_DRIVE_SHIFT 14 /* CS1_DRIVE */ #define WM831X_CS1_DRIVE_WIDTH 1 /* CS1_DRIVE */ #define WM831X_CS1_SLPENA 0x1000 /* CS1_SLPENA */ #define WM831X_CS1_SLPENA_MASK 0x1000 /* CS1_SLPENA */ #define WM831X_CS1_SLPENA_SHIFT 12 /* CS1_SLPENA */ #define WM831X_CS1_SLPENA_WIDTH 1 /* CS1_SLPENA */ #define WM831X_CS1_OFF_RAMP_MASK 0x0C00 /* CS1_OFF_RAMP - [11:10] */ #define WM831X_CS1_OFF_RAMP_SHIFT 10 /* CS1_OFF_RAMP - [11:10] */ #define WM831X_CS1_OFF_RAMP_WIDTH 2 /* CS1_OFF_RAMP - [11:10] */ #define WM831X_CS1_ON_RAMP_MASK 0x0300 /* CS1_ON_RAMP - [9:8] */ #define WM831X_CS1_ON_RAMP_SHIFT 8 /* CS1_ON_RAMP - [9:8] */ #define WM831X_CS1_ON_RAMP_WIDTH 2 /* CS1_ON_RAMP - [9:8] */ #define WM831X_CS1_ISEL_MASK 0x003F /* CS1_ISEL - [5:0] */ #define WM831X_CS1_ISEL_SHIFT 0 /* CS1_ISEL - [5:0] */ #define WM831X_CS1_ISEL_WIDTH 6 /* CS1_ISEL - [5:0] */ /* * R16463 (0x404F) - Current Sink 2 */ #define WM831X_CS2_ENA 0x8000 /* CS2_ENA */ #define WM831X_CS2_ENA_MASK 0x8000 /* CS2_ENA */ #define WM831X_CS2_ENA_SHIFT 15 /* CS2_ENA */ #define WM831X_CS2_ENA_WIDTH 1 /* CS2_ENA */ #define WM831X_CS2_DRIVE 0x4000 /* CS2_DRIVE */ #define WM831X_CS2_DRIVE_MASK 0x4000 /* CS2_DRIVE */ #define WM831X_CS2_DRIVE_SHIFT 14 /* CS2_DRIVE */ #define WM831X_CS2_DRIVE_WIDTH 1 /* CS2_DRIVE */ #define WM831X_CS2_SLPENA 0x1000 /* CS2_SLPENA */ #define WM831X_CS2_SLPENA_MASK 0x1000 /* CS2_SLPENA */ #define WM831X_CS2_SLPENA_SHIFT 12 /* CS2_SLPENA */ #define WM831X_CS2_SLPENA_WIDTH 1 /* CS2_SLPENA */ #define WM831X_CS2_OFF_RAMP_MASK 0x0C00 /* CS2_OFF_RAMP - [11:10] */ #define WM831X_CS2_OFF_RAMP_SHIFT 10 /* CS2_OFF_RAMP - [11:10] */ #define WM831X_CS2_OFF_RAMP_WIDTH 2 /* CS2_OFF_RAMP - [11:10] */ #define WM831X_CS2_ON_RAMP_MASK 0x0300 /* CS2_ON_RAMP - [9:8] */ #define WM831X_CS2_ON_RAMP_SHIFT 8 /* CS2_ON_RAMP - [9:8] */ #define WM831X_CS2_ON_RAMP_WIDTH 2 /* CS2_ON_RAMP - [9:8] */ #define WM831X_CS2_ISEL_MASK 0x003F /* CS2_ISEL - [5:0] */ #define WM831X_CS2_ISEL_SHIFT 0 /* CS2_ISEL - [5:0] */ #define WM831X_CS2_ISEL_WIDTH 6 /* CS2_ISEL - [5:0] */ /* * R16464 (0x4050) - DCDC Enable */ #define WM831X_EPE2_ENA 0x0080 /* EPE2_ENA */ #define WM831X_EPE2_ENA_MASK 0x0080 /* EPE2_ENA */ #define WM831X_EPE2_ENA_SHIFT 7 /* EPE2_ENA */ #define WM831X_EPE2_ENA_WIDTH 1 /* EPE2_ENA */ #define WM831X_EPE1_ENA 0x0040 /* EPE1_ENA */ #define WM831X_EPE1_ENA_MASK 0x0040 /* EPE1_ENA */ #define WM831X_EPE1_ENA_SHIFT 6 /* EPE1_ENA */ #define WM831X_EPE1_ENA_WIDTH 1 /* EPE1_ENA */ #define WM831X_DC4_ENA 0x0008 /* DC4_ENA */ #define WM831X_DC4_ENA_MASK 0x0008 /* DC4_ENA */ #define WM831X_DC4_ENA_SHIFT 3 /* DC4_ENA */ #define WM831X_DC4_ENA_WIDTH 1 /* DC4_ENA */ #define WM831X_DC3_ENA 0x0004 /* DC3_ENA */ #define WM831X_DC3_ENA_MASK 0x0004 /* DC3_ENA */ #define WM831X_DC3_ENA_SHIFT 2 /* DC3_ENA */ #define WM831X_DC3_ENA_WIDTH 1 /* DC3_ENA */ #define WM831X_DC2_ENA 0x0002 /* DC2_ENA */ #define WM831X_DC2_ENA_MASK 0x0002 /* DC2_ENA */ #define WM831X_DC2_ENA_SHIFT 1 /* DC2_ENA */ #define WM831X_DC2_ENA_WIDTH 1 /* DC2_ENA */ #define WM831X_DC1_ENA 0x0001 /* DC1_ENA */ #define WM831X_DC1_ENA_MASK 0x0001 /* DC1_ENA */ #define WM831X_DC1_ENA_SHIFT 0 /* DC1_ENA */ #define WM831X_DC1_ENA_WIDTH 1 /* DC1_ENA */ /* * R16465 (0x4051) - LDO Enable */ #define WM831X_LDO11_ENA 0x0400 /* LDO11_ENA */ #define WM831X_LDO11_ENA_MASK 0x0400 /* LDO11_ENA */ #define WM831X_LDO11_ENA_SHIFT 10 /* LDO11_ENA */ #define WM831X_LDO11_ENA_WIDTH 1 /* LDO11_ENA */ #define WM831X_LDO10_ENA 0x0200 /* LDO10_ENA */ #define WM831X_LDO10_ENA_MASK 0x0200 /* LDO10_ENA */ #define WM831X_LDO10_ENA_SHIFT 9 /* LDO10_ENA */ #define WM831X_LDO10_ENA_WIDTH 1 /* LDO10_ENA */ #define WM831X_LDO9_ENA 0x0100 /* LDO9_ENA */ #define WM831X_LDO9_ENA_MASK 0x0100 /* LDO9_ENA */ #define WM831X_LDO9_ENA_SHIFT 8 /* LDO9_ENA */ #define WM831X_LDO9_ENA_WIDTH 1 /* LDO9_ENA */ #define WM831X_LDO8_ENA 0x0080 /* LDO8_ENA */ #define WM831X_LDO8_ENA_MASK 0x0080 /* LDO8_ENA */ #define WM831X_LDO8_ENA_SHIFT 7 /* LDO8_ENA */ #define WM831X_LDO8_ENA_WIDTH 1 /* LDO8_ENA */ #define WM831X_LDO7_ENA 0x0040 /* LDO7_ENA */ #define WM831X_LDO7_ENA_MASK 0x0040 /* LDO7_ENA */ #define WM831X_LDO7_ENA_SHIFT 6 /* LDO7_ENA */ #define WM831X_LDO7_ENA_WIDTH 1 /* LDO7_ENA */ #define WM831X_LDO6_ENA 0x0020 /* LDO6_ENA */ #define WM831X_LDO6_ENA_MASK 0x0020 /* LDO6_ENA */ #define WM831X_LDO6_ENA_SHIFT 5 /* LDO6_ENA */ #define WM831X_LDO6_ENA_WIDTH 1 /* LDO6_ENA */ #define WM831X_LDO5_ENA 0x0010 /* LDO5_ENA */ #define WM831X_LDO5_ENA_MASK 0x0010 /* LDO5_ENA */ #define WM831X_LDO5_ENA_SHIFT 4 /* LDO5_ENA */ #define WM831X_LDO5_ENA_WIDTH 1 /* LDO5_ENA */ #define WM831X_LDO4_ENA 0x0008 /* LDO4_ENA */ #define WM831X_LDO4_ENA_MASK 0x0008 /* LDO4_ENA */ #define WM831X_LDO4_ENA_SHIFT 3 /* LDO4_ENA */ #define WM831X_LDO4_ENA_WIDTH 1 /* LDO4_ENA */ #define WM831X_LDO3_ENA 0x0004 /* LDO3_ENA */ #define WM831X_LDO3_ENA_MASK 0x0004 /* LDO3_ENA */ #define WM831X_LDO3_ENA_SHIFT 2 /* LDO3_ENA */ #define WM831X_LDO3_ENA_WIDTH 1 /* LDO3_ENA */ #define WM831X_LDO2_ENA 0x0002 /* LDO2_ENA */ #define WM831X_LDO2_ENA_MASK 0x0002 /* LDO2_ENA */ #define WM831X_LDO2_ENA_SHIFT 1 /* LDO2_ENA */ #define WM831X_LDO2_ENA_WIDTH 1 /* LDO2_ENA */ #define WM831X_LDO1_ENA 0x0001 /* LDO1_ENA */ #define WM831X_LDO1_ENA_MASK 0x0001 /* LDO1_ENA */ #define WM831X_LDO1_ENA_SHIFT 0 /* LDO1_ENA */ #define WM831X_LDO1_ENA_WIDTH 1 /* LDO1_ENA */ /* * R16466 (0x4052) - DCDC Status */ #define WM831X_EPE2_STS 0x0080 /* EPE2_STS */ #define WM831X_EPE2_STS_MASK 0x0080 /* EPE2_STS */ #define WM831X_EPE2_STS_SHIFT 7 /* EPE2_STS */ #define WM831X_EPE2_STS_WIDTH 1 /* EPE2_STS */ #define WM831X_EPE1_STS 0x0040 /* EPE1_STS */ #define WM831X_EPE1_STS_MASK 0x0040 /* EPE1_STS */ #define WM831X_EPE1_STS_SHIFT 6 /* EPE1_STS */ #define WM831X_EPE1_STS_WIDTH 1 /* EPE1_STS */ #define WM831X_DC4_STS 0x0008 /* DC4_STS */ #define WM831X_DC4_STS_MASK 0x0008 /* DC4_STS */ #define WM831X_DC4_STS_SHIFT 3 /* DC4_STS */ #define WM831X_DC4_STS_WIDTH 1 /* DC4_STS */ #define WM831X_DC3_STS 0x0004 /* DC3_STS */ #define WM831X_DC3_STS_MASK 0x0004 /* DC3_STS */ #define WM831X_DC3_STS_SHIFT 2 /* DC3_STS */ #define WM831X_DC3_STS_WIDTH 1 /* DC3_STS */ #define WM831X_DC2_STS 0x0002 /* DC2_STS */ #define WM831X_DC2_STS_MASK 0x0002 /* DC2_STS */ #define WM831X_DC2_STS_SHIFT 1 /* DC2_STS */ #define WM831X_DC2_STS_WIDTH 1 /* DC2_STS */ #define WM831X_DC1_STS 0x0001 /* DC1_STS */ #define WM831X_DC1_STS_MASK 0x0001 /* DC1_STS */ #define WM831X_DC1_STS_SHIFT 0 /* DC1_STS */ #define WM831X_DC1_STS_WIDTH 1 /* DC1_STS */ /* * R16467 (0x4053) - LDO Status */ #define WM831X_LDO11_STS 0x0400 /* LDO11_STS */ #define WM831X_LDO11_STS_MASK 0x0400 /* LDO11_STS */ #define WM831X_LDO11_STS_SHIFT 10 /* LDO11_STS */ #define WM831X_LDO11_STS_WIDTH 1 /* LDO11_STS */ #define WM831X_LDO10_STS 0x0200 /* LDO10_STS */ #define WM831X_LDO10_STS_MASK 0x0200 /* LDO10_STS */ #define WM831X_LDO10_STS_SHIFT 9 /* LDO10_STS */ #define WM831X_LDO10_STS_WIDTH 1 /* LDO10_STS */ #define WM831X_LDO9_STS 0x0100 /* LDO9_STS */ #define WM831X_LDO9_STS_MASK 0x0100 /* LDO9_STS */ #define WM831X_LDO9_STS_SHIFT 8 /* LDO9_STS */ #define WM831X_LDO9_STS_WIDTH 1 /* LDO9_STS */ #define WM831X_LDO8_STS 0x0080 /* LDO8_STS */ #define WM831X_LDO8_STS_MASK 0x0080 /* LDO8_STS */ #define WM831X_LDO8_STS_SHIFT 7 /* LDO8_STS */ #define WM831X_LDO8_STS_WIDTH 1 /* LDO8_STS */ #define WM831X_LDO7_STS 0x0040 /* LDO7_STS */ #define WM831X_LDO7_STS_MASK 0x0040 /* LDO7_STS */ #define WM831X_LDO7_STS_SHIFT 6 /* LDO7_STS */ #define WM831X_LDO7_STS_WIDTH 1 /* LDO7_STS */ #define WM831X_LDO6_STS 0x0020 /* LDO6_STS */ #define WM831X_LDO6_STS_MASK 0x0020 /* LDO6_STS */ #define WM831X_LDO6_STS_SHIFT 5 /* LDO6_STS */ #define WM831X_LDO6_STS_WIDTH 1 /* LDO6_STS */ #define WM831X_LDO5_STS 0x0010 /* LDO5_STS */ #define WM831X_LDO5_STS_MASK 0x0010 /* LDO5_STS */ #define WM831X_LDO5_STS_SHIFT 4 /* LDO5_STS */ #define WM831X_LDO5_STS_WIDTH 1 /* LDO5_STS */ #define WM831X_LDO4_STS 0x0008 /* LDO4_STS */ #define WM831X_LDO4_STS_MASK 0x0008 /* LDO4_STS */ #define WM831X_LDO4_STS_SHIFT 3 /* LDO4_STS */ #define WM831X_LDO4_STS_WIDTH 1 /* LDO4_STS */ #define WM831X_LDO3_STS 0x0004 /* LDO3_STS */ #define WM831X_LDO3_STS_MASK 0x0004 /* LDO3_STS */ #define WM831X_LDO3_STS_SHIFT 2 /* LDO3_STS */ #define WM831X_LDO3_STS_WIDTH 1 /* LDO3_STS */ #define WM831X_LDO2_STS 0x0002 /* LDO2_STS */ #define WM831X_LDO2_STS_MASK 0x0002 /* LDO2_STS */ #define WM831X_LDO2_STS_SHIFT 1 /* LDO2_STS */ #define WM831X_LDO2_STS_WIDTH 1 /* LDO2_STS */ #define WM831X_LDO1_STS 0x0001 /* LDO1_STS */ #define WM831X_LDO1_STS_MASK 0x0001 /* LDO1_STS */ #define WM831X_LDO1_STS_SHIFT 0 /* LDO1_STS */ #define WM831X_LDO1_STS_WIDTH 1 /* LDO1_STS */ /* * R16468 (0x4054) - DCDC UV Status */ #define WM831X_DC2_OV_STS 0x2000 /* DC2_OV_STS */ #define WM831X_DC2_OV_STS_MASK 0x2000 /* DC2_OV_STS */ #define WM831X_DC2_OV_STS_SHIFT 13 /* DC2_OV_STS */ #define WM831X_DC2_OV_STS_WIDTH 1 /* DC2_OV_STS */ #define WM831X_DC1_OV_STS 0x1000 /* DC1_OV_STS */ #define WM831X_DC1_OV_STS_MASK 0x1000 /* DC1_OV_STS */ #define WM831X_DC1_OV_STS_SHIFT 12 /* DC1_OV_STS */ #define WM831X_DC1_OV_STS_WIDTH 1 /* DC1_OV_STS */ #define WM831X_DC2_HC_STS 0x0200 /* DC2_HC_STS */ #define WM831X_DC2_HC_STS_MASK 0x0200 /* DC2_HC_STS */ #define WM831X_DC2_HC_STS_SHIFT 9 /* DC2_HC_STS */ #define WM831X_DC2_HC_STS_WIDTH 1 /* DC2_HC_STS */ #define WM831X_DC1_HC_STS 0x0100 /* DC1_HC_STS */ #define WM831X_DC1_HC_STS_MASK 0x0100 /* DC1_HC_STS */ #define WM831X_DC1_HC_STS_SHIFT 8 /* DC1_HC_STS */ #define WM831X_DC1_HC_STS_WIDTH 1 /* DC1_HC_STS */ #define WM831X_DC4_UV_STS 0x0008 /* DC4_UV_STS */ #define WM831X_DC4_UV_STS_MASK 0x0008 /* DC4_UV_STS */ #define WM831X_DC4_UV_STS_SHIFT 3 /* DC4_UV_STS */ #define WM831X_DC4_UV_STS_WIDTH 1 /* DC4_UV_STS */ #define WM831X_DC3_UV_STS 0x0004 /* DC3_UV_STS */ #define WM831X_DC3_UV_STS_MASK 0x0004 /* DC3_UV_STS */ #define WM831X_DC3_UV_STS_SHIFT 2 /* DC3_UV_STS */ #define WM831X_DC3_UV_STS_WIDTH 1 /* DC3_UV_STS */ #define WM831X_DC2_UV_STS 0x0002 /* DC2_UV_STS */ #define WM831X_DC2_UV_STS_MASK 0x0002 /* DC2_UV_STS */ #define WM831X_DC2_UV_STS_SHIFT 1 /* DC2_UV_STS */ #define WM831X_DC2_UV_STS_WIDTH 1 /* DC2_UV_STS */ #define WM831X_DC1_UV_STS 0x0001 /* DC1_UV_STS */ #define WM831X_DC1_UV_STS_MASK 0x0001 /* DC1_UV_STS */ #define WM831X_DC1_UV_STS_SHIFT 0 /* DC1_UV_STS */ #define WM831X_DC1_UV_STS_WIDTH 1 /* DC1_UV_STS */ /* * R16469 (0x4055) - LDO UV Status */ #define WM831X_INTLDO_UV_STS 0x8000 /* INTLDO_UV_STS */ #define WM831X_INTLDO_UV_STS_MASK 0x8000 /* INTLDO_UV_STS */ #define WM831X_INTLDO_UV_STS_SHIFT 15 /* INTLDO_UV_STS */ #define WM831X_INTLDO_UV_STS_WIDTH 1 /* INTLDO_UV_STS */ #define WM831X_LDO10_UV_STS 0x0200 /* LDO10_UV_STS */ #define WM831X_LDO10_UV_STS_MASK 0x0200 /* LDO10_UV_STS */ #define WM831X_LDO10_UV_STS_SHIFT 9 /* LDO10_UV_STS */ #define WM831X_LDO10_UV_STS_WIDTH 1 /* LDO10_UV_STS */ #define WM831X_LDO9_UV_STS 0x0100 /* LDO9_UV_STS */ #define WM831X_LDO9_UV_STS_MASK 0x0100 /* LDO9_UV_STS */ #define WM831X_LDO9_UV_STS_SHIFT 8 /* LDO9_UV_STS */ #define WM831X_LDO9_UV_STS_WIDTH 1 /* LDO9_UV_STS */ #define WM831X_LDO8_UV_STS 0x0080 /* LDO8_UV_STS */ #define WM831X_LDO8_UV_STS_MASK 0x0080 /* LDO8_UV_STS */ #define WM831X_LDO8_UV_STS_SHIFT 7 /* LDO8_UV_STS */ #define WM831X_LDO8_UV_STS_WIDTH 1 /* LDO8_UV_STS */ #define WM831X_LDO7_UV_STS 0x0040 /* LDO7_UV_STS */ #define WM831X_LDO7_UV_STS_MASK 0x0040 /* LDO7_UV_STS */ #define WM831X_LDO7_UV_STS_SHIFT 6 /* LDO7_UV_STS */ #define WM831X_LDO7_UV_STS_WIDTH 1 /* LDO7_UV_STS */ #define WM831X_LDO6_UV_STS 0x0020 /* LDO6_UV_STS */ #define WM831X_LDO6_UV_STS_MASK 0x0020 /* LDO6_UV_STS */ #define WM831X_LDO6_UV_STS_SHIFT 5 /* LDO6_UV_STS */ #define WM831X_LDO6_UV_STS_WIDTH 1 /* LDO6_UV_STS */ #define WM831X_LDO5_UV_STS 0x0010 /* LDO5_UV_STS */ #define WM831X_LDO5_UV_STS_MASK 0x0010 /* LDO5_UV_STS */ #define WM831X_LDO5_UV_STS_SHIFT 4 /* LDO5_UV_STS */ #define WM831X_LDO5_UV_STS_WIDTH 1 /* LDO5_UV_STS */ #define WM831X_LDO4_UV_STS 0x0008 /* LDO4_UV_STS */ #define WM831X_LDO4_UV_STS_MASK 0x0008 /* LDO4_UV_STS */ #define WM831X_LDO4_UV_STS_SHIFT 3 /* LDO4_UV_STS */ #define WM831X_LDO4_UV_STS_WIDTH 1 /* LDO4_UV_STS */ #define WM831X_LDO3_UV_STS 0x0004 /* LDO3_UV_STS */ #define WM831X_LDO3_UV_STS_MASK 0x0004 /* LDO3_UV_STS */ #define WM831X_LDO3_UV_STS_SHIFT 2 /* LDO3_UV_STS */ #define WM831X_LDO3_UV_STS_WIDTH 1 /* LDO3_UV_STS */ #define WM831X_LDO2_UV_STS 0x0002 /* LDO2_UV_STS */ #define WM831X_LDO2_UV_STS_MASK 0x0002 /* LDO2_UV_STS */ #define WM831X_LDO2_UV_STS_SHIFT 1 /* LDO2_UV_STS */ #define WM831X_LDO2_UV_STS_WIDTH 1 /* LDO2_UV_STS */ #define WM831X_LDO1_UV_STS 0x0001 /* LDO1_UV_STS */ #define WM831X_LDO1_UV_STS_MASK 0x0001 /* LDO1_UV_STS */ #define WM831X_LDO1_UV_STS_SHIFT 0 /* LDO1_UV_STS */ #define WM831X_LDO1_UV_STS_WIDTH 1 /* LDO1_UV_STS */ /* * R16470 (0x4056) - DC1 Control 1 */ #define WM831X_DC1_RATE_MASK 0xC000 /* DC1_RATE - [15:14] */ #define WM831X_DC1_RATE_SHIFT 14 /* DC1_RATE - [15:14] */ #define WM831X_DC1_RATE_WIDTH 2 /* DC1_RATE - [15:14] */ #define WM831X_DC1_PHASE 0x1000 /* DC1_PHASE */ #define WM831X_DC1_PHASE_MASK 0x1000 /* DC1_PHASE */ #define WM831X_DC1_PHASE_SHIFT 12 /* DC1_PHASE */ #define WM831X_DC1_PHASE_WIDTH 1 /* DC1_PHASE */ #define WM831X_DC1_FREQ_MASK 0x0300 /* DC1_FREQ - [9:8] */ #define WM831X_DC1_FREQ_SHIFT 8 /* DC1_FREQ - [9:8] */ #define WM831X_DC1_FREQ_WIDTH 2 /* DC1_FREQ - [9:8] */ #define WM831X_DC1_FLT 0x0080 /* DC1_FLT */ #define WM831X_DC1_FLT_MASK 0x0080 /* DC1_FLT */ #define WM831X_DC1_FLT_SHIFT 7 /* DC1_FLT */ #define WM831X_DC1_FLT_WIDTH 1 /* DC1_FLT */ #define WM831X_DC1_SOFT_START_MASK 0x0030 /* DC1_SOFT_START - [5:4] */ #define WM831X_DC1_SOFT_START_SHIFT 4 /* DC1_SOFT_START - [5:4] */ #define WM831X_DC1_SOFT_START_WIDTH 2 /* DC1_SOFT_START - [5:4] */ #define WM831X_DC1_CAP_MASK 0x0003 /* DC1_CAP - [1:0] */ #define WM831X_DC1_CAP_SHIFT 0 /* DC1_CAP - [1:0] */ #define WM831X_DC1_CAP_WIDTH 2 /* DC1_CAP - [1:0] */ /* * R16471 (0x4057) - DC1 Control 2 */ #define WM831X_DC1_ERR_ACT_MASK 0xC000 /* DC1_ERR_ACT - [15:14] */ #define WM831X_DC1_ERR_ACT_SHIFT 14 /* DC1_ERR_ACT - [15:14] */ #define WM831X_DC1_ERR_ACT_WIDTH 2 /* DC1_ERR_ACT - [15:14] */ #define WM831X_DC1_HWC_SRC_MASK 0x1800 /* DC1_HWC_SRC - [12:11] */ #define WM831X_DC1_HWC_SRC_SHIFT 11 /* DC1_HWC_SRC - [12:11] */ #define WM831X_DC1_HWC_SRC_WIDTH 2 /* DC1_HWC_SRC - [12:11] */ #define WM831X_DC1_HWC_VSEL 0x0400 /* DC1_HWC_VSEL */ #define WM831X_DC1_HWC_VSEL_MASK 0x0400 /* DC1_HWC_VSEL */ #define WM831X_DC1_HWC_VSEL_SHIFT 10 /* DC1_HWC_VSEL */ #define WM831X_DC1_HWC_VSEL_WIDTH 1 /* DC1_HWC_VSEL */ #define WM831X_DC1_HWC_MODE_MASK 0x0300 /* DC1_HWC_MODE - [9:8] */ #define WM831X_DC1_HWC_MODE_SHIFT 8 /* DC1_HWC_MODE - [9:8] */ #define WM831X_DC1_HWC_MODE_WIDTH 2 /* DC1_HWC_MODE - [9:8] */ #define WM831X_DC1_HC_THR_MASK 0x0070 /* DC1_HC_THR - [6:4] */ #define WM831X_DC1_HC_THR_SHIFT 4 /* DC1_HC_THR - [6:4] */ #define WM831X_DC1_HC_THR_WIDTH 3 /* DC1_HC_THR - [6:4] */ #define WM831X_DC1_HC_IND_ENA 0x0001 /* DC1_HC_IND_ENA */ #define WM831X_DC1_HC_IND_ENA_MASK 0x0001 /* DC1_HC_IND_ENA */ #define WM831X_DC1_HC_IND_ENA_SHIFT 0 /* DC1_HC_IND_ENA */ #define WM831X_DC1_HC_IND_ENA_WIDTH 1 /* DC1_HC_IND_ENA */ /* * R16472 (0x4058) - DC1 ON Config */ #define WM831X_DC1_ON_SLOT_MASK 0xE000 /* DC1_ON_SLOT - [15:13] */ #define WM831X_DC1_ON_SLOT_SHIFT 13 /* DC1_ON_SLOT - [15:13] */ #define WM831X_DC1_ON_SLOT_WIDTH 3 /* DC1_ON_SLOT - [15:13] */ #define WM831X_DC1_ON_MODE_MASK 0x0300 /* DC1_ON_MODE - [9:8] */ #define WM831X_DC1_ON_MODE_SHIFT 8 /* DC1_ON_MODE - [9:8] */ #define WM831X_DC1_ON_MODE_WIDTH 2 /* DC1_ON_MODE - [9:8] */ #define WM831X_DC1_ON_VSEL_MASK 0x007F /* DC1_ON_VSEL - [6:0] */ #define WM831X_DC1_ON_VSEL_SHIFT 0 /* DC1_ON_VSEL - [6:0] */ #define WM831X_DC1_ON_VSEL_WIDTH 7 /* DC1_ON_VSEL - [6:0] */ /* * R16473 (0x4059) - DC1 SLEEP Control */ #define WM831X_DC1_SLP_SLOT_MASK 0xE000 /* DC1_SLP_SLOT - [15:13] */ #define WM831X_DC1_SLP_SLOT_SHIFT 13 /* DC1_SLP_SLOT - [15:13] */ #define WM831X_DC1_SLP_SLOT_WIDTH 3 /* DC1_SLP_SLOT - [15:13] */ #define WM831X_DC1_SLP_MODE_MASK 0x0300 /* DC1_SLP_MODE - [9:8] */ #define WM831X_DC1_SLP_MODE_SHIFT 8 /* DC1_SLP_MODE - [9:8] */ #define WM831X_DC1_SLP_MODE_WIDTH 2 /* DC1_SLP_MODE - [9:8] */ #define WM831X_DC1_SLP_VSEL_MASK 0x007F /* DC1_SLP_VSEL - [6:0] */ #define WM831X_DC1_SLP_VSEL_SHIFT 0 /* DC1_SLP_VSEL - [6:0] */ #define WM831X_DC1_SLP_VSEL_WIDTH 7 /* DC1_SLP_VSEL - [6:0] */ /* * R16474 (0x405A) - DC1 DVS Control */ #define WM831X_DC1_DVS_SRC_MASK 0x1800 /* DC1_DVS_SRC - [12:11] */ #define WM831X_DC1_DVS_SRC_SHIFT 11 /* DC1_DVS_SRC - [12:11] */ #define WM831X_DC1_DVS_SRC_WIDTH 2 /* DC1_DVS_SRC - [12:11] */ #define WM831X_DC1_DVS_VSEL_MASK 0x007F /* DC1_DVS_VSEL - [6:0] */ #define WM831X_DC1_DVS_VSEL_SHIFT 0 /* DC1_DVS_VSEL - [6:0] */ #define WM831X_DC1_DVS_VSEL_WIDTH 7 /* DC1_DVS_VSEL - [6:0] */ /* * R16475 (0x405B) - DC2 Control 1 */ #define WM831X_DC2_RATE_MASK 0xC000 /* DC2_RATE - [15:14] */ #define WM831X_DC2_RATE_SHIFT 14 /* DC2_RATE - [15:14] */ #define WM831X_DC2_RATE_WIDTH 2 /* DC2_RATE - [15:14] */ #define WM831X_DC2_PHASE 0x1000 /* DC2_PHASE */ #define WM831X_DC2_PHASE_MASK 0x1000 /* DC2_PHASE */ #define WM831X_DC2_PHASE_SHIFT 12 /* DC2_PHASE */ #define WM831X_DC2_PHASE_WIDTH 1 /* DC2_PHASE */ #define WM831X_DC2_FREQ_MASK 0x0300 /* DC2_FREQ - [9:8] */ #define WM831X_DC2_FREQ_SHIFT 8 /* DC2_FREQ - [9:8] */ #define WM831X_DC2_FREQ_WIDTH 2 /* DC2_FREQ - [9:8] */ #define WM831X_DC2_FLT 0x0080 /* DC2_FLT */ #define WM831X_DC2_FLT_MASK 0x0080 /* DC2_FLT */ #define WM831X_DC2_FLT_SHIFT 7 /* DC2_FLT */ #define WM831X_DC2_FLT_WIDTH 1 /* DC2_FLT */ #define WM831X_DC2_SOFT_START_MASK 0x0030 /* DC2_SOFT_START - [5:4] */ #define WM831X_DC2_SOFT_START_SHIFT 4 /* DC2_SOFT_START - [5:4] */ #define WM831X_DC2_SOFT_START_WIDTH 2 /* DC2_SOFT_START - [5:4] */ #define WM831X_DC2_CAP_MASK 0x0003 /* DC2_CAP - [1:0] */ #define WM831X_DC2_CAP_SHIFT 0 /* DC2_CAP - [1:0] */ #define WM831X_DC2_CAP_WIDTH 2 /* DC2_CAP - [1:0] */ /* * R16476 (0x405C) - DC2 Control 2 */ #define WM831X_DC2_ERR_ACT_MASK 0xC000 /* DC2_ERR_ACT - [15:14] */ #define WM831X_DC2_ERR_ACT_SHIFT 14 /* DC2_ERR_ACT - [15:14] */ #define WM831X_DC2_ERR_ACT_WIDTH 2 /* DC2_ERR_ACT - [15:14] */ #define WM831X_DC2_HWC_SRC_MASK 0x1800 /* DC2_HWC_SRC - [12:11] */ #define WM831X_DC2_HWC_SRC_SHIFT 11 /* DC2_HWC_SRC - [12:11] */ #define WM831X_DC2_HWC_SRC_WIDTH 2 /* DC2_HWC_SRC - [12:11] */ #define WM831X_DC2_HWC_VSEL 0x0400 /* DC2_HWC_VSEL */ #define WM831X_DC2_HWC_VSEL_MASK 0x0400 /* DC2_HWC_VSEL */ #define WM831X_DC2_HWC_VSEL_SHIFT 10 /* DC2_HWC_VSEL */ #define WM831X_DC2_HWC_VSEL_WIDTH 1 /* DC2_HWC_VSEL */ #define WM831X_DC2_HWC_MODE_MASK 0x0300 /* DC2_HWC_MODE - [9:8] */ #define WM831X_DC2_HWC_MODE_SHIFT 8 /* DC2_HWC_MODE - [9:8] */ #define WM831X_DC2_HWC_MODE_WIDTH 2 /* DC2_HWC_MODE - [9:8] */ #define WM831X_DC2_HC_THR_MASK 0x0070 /* DC2_HC_THR - [6:4] */ #define WM831X_DC2_HC_THR_SHIFT 4 /* DC2_HC_THR - [6:4] */ #define WM831X_DC2_HC_THR_WIDTH 3 /* DC2_HC_THR - [6:4] */ #define WM831X_DC2_HC_IND_ENA 0x0001 /* DC2_HC_IND_ENA */ #define WM831X_DC2_HC_IND_ENA_MASK 0x0001 /* DC2_HC_IND_ENA */ #define WM831X_DC2_HC_IND_ENA_SHIFT 0 /* DC2_HC_IND_ENA */ #define WM831X_DC2_HC_IND_ENA_WIDTH 1 /* DC2_HC_IND_ENA */ /* * R16477 (0x405D) - DC2 ON Config */ #define WM831X_DC2_ON_SLOT_MASK 0xE000 /* DC2_ON_SLOT - [15:13] */ #define WM831X_DC2_ON_SLOT_SHIFT 13 /* DC2_ON_SLOT - [15:13] */ #define WM831X_DC2_ON_SLOT_WIDTH 3 /* DC2_ON_SLOT - [15:13] */ #define WM831X_DC2_ON_MODE_MASK 0x0300 /* DC2_ON_MODE - [9:8] */ #define WM831X_DC2_ON_MODE_SHIFT 8 /* DC2_ON_MODE - [9:8] */ #define WM831X_DC2_ON_MODE_WIDTH 2 /* DC2_ON_MODE - [9:8] */ #define WM831X_DC2_ON_VSEL_MASK 0x007F /* DC2_ON_VSEL - [6:0] */ #define WM831X_DC2_ON_VSEL_SHIFT 0 /* DC2_ON_VSEL - [6:0] */ #define WM831X_DC2_ON_VSEL_WIDTH 7 /* DC2_ON_VSEL - [6:0] */ /* * R16478 (0x405E) - DC2 SLEEP Control */ #define WM831X_DC2_SLP_SLOT_MASK 0xE000 /* DC2_SLP_SLOT - [15:13] */ #define WM831X_DC2_SLP_SLOT_SHIFT 13 /* DC2_SLP_SLOT - [15:13] */ #define WM831X_DC2_SLP_SLOT_WIDTH 3 /* DC2_SLP_SLOT - [15:13] */ #define WM831X_DC2_SLP_MODE_MASK 0x0300 /* DC2_SLP_MODE - [9:8] */ #define WM831X_DC2_SLP_MODE_SHIFT 8 /* DC2_SLP_MODE - [9:8] */ #define WM831X_DC2_SLP_MODE_WIDTH 2 /* DC2_SLP_MODE - [9:8] */ #define WM831X_DC2_SLP_VSEL_MASK 0x007F /* DC2_SLP_VSEL - [6:0] */ #define WM831X_DC2_SLP_VSEL_SHIFT 0 /* DC2_SLP_VSEL - [6:0] */ #define WM831X_DC2_SLP_VSEL_WIDTH 7 /* DC2_SLP_VSEL - [6:0] */ /* * R16479 (0x405F) - DC2 DVS Control */ #define WM831X_DC2_DVS_SRC_MASK 0x1800 /* DC2_DVS_SRC - [12:11] */ #define WM831X_DC2_DVS_SRC_SHIFT 11 /* DC2_DVS_SRC - [12:11] */ #define WM831X_DC2_DVS_SRC_WIDTH 2 /* DC2_DVS_SRC - [12:11] */ #define WM831X_DC2_DVS_VSEL_MASK 0x007F /* DC2_DVS_VSEL - [6:0] */ #define WM831X_DC2_DVS_VSEL_SHIFT 0 /* DC2_DVS_VSEL - [6:0] */ #define WM831X_DC2_DVS_VSEL_WIDTH 7 /* DC2_DVS_VSEL - [6:0] */ /* * R16480 (0x4060) - DC3 Control 1 */ #define WM831X_DC3_PHASE 0x1000 /* DC3_PHASE */ #define WM831X_DC3_PHASE_MASK 0x1000 /* DC3_PHASE */ #define WM831X_DC3_PHASE_SHIFT 12 /* DC3_PHASE */ #define WM831X_DC3_PHASE_WIDTH 1 /* DC3_PHASE */ #define WM831X_DC3_FLT 0x0080 /* DC3_FLT */ #define WM831X_DC3_FLT_MASK 0x0080 /* DC3_FLT */ #define WM831X_DC3_FLT_SHIFT 7 /* DC3_FLT */ #define WM831X_DC3_FLT_WIDTH 1 /* DC3_FLT */ #define WM831X_DC3_SOFT_START_MASK 0x0030 /* DC3_SOFT_START - [5:4] */ #define WM831X_DC3_SOFT_START_SHIFT 4 /* DC3_SOFT_START - [5:4] */ #define WM831X_DC3_SOFT_START_WIDTH 2 /* DC3_SOFT_START - [5:4] */ #define WM831X_DC3_STNBY_LIM_MASK 0x000C /* DC3_STNBY_LIM - [3:2] */ #define WM831X_DC3_STNBY_LIM_SHIFT 2 /* DC3_STNBY_LIM - [3:2] */ #define WM831X_DC3_STNBY_LIM_WIDTH 2 /* DC3_STNBY_LIM - [3:2] */ #define WM831X_DC3_CAP_MASK 0x0003 /* DC3_CAP - [1:0] */ #define WM831X_DC3_CAP_SHIFT 0 /* DC3_CAP - [1:0] */ #define WM831X_DC3_CAP_WIDTH 2 /* DC3_CAP - [1:0] */ /* * R16481 (0x4061) - DC3 Control 2 */ #define WM831X_DC3_ERR_ACT_MASK 0xC000 /* DC3_ERR_ACT - [15:14] */ #define WM831X_DC3_ERR_ACT_SHIFT 14 /* DC3_ERR_ACT - [15:14] */ #define WM831X_DC3_ERR_ACT_WIDTH 2 /* DC3_ERR_ACT - [15:14] */ #define WM831X_DC3_HWC_SRC_MASK 0x1800 /* DC3_HWC_SRC - [12:11] */ #define WM831X_DC3_HWC_SRC_SHIFT 11 /* DC3_HWC_SRC - [12:11] */ #define WM831X_DC3_HWC_SRC_WIDTH 2 /* DC3_HWC_SRC - [12:11] */ #define WM831X_DC3_HWC_VSEL 0x0400 /* DC3_HWC_VSEL */ #define WM831X_DC3_HWC_VSEL_MASK 0x0400 /* DC3_HWC_VSEL */ #define WM831X_DC3_HWC_VSEL_SHIFT 10 /* DC3_HWC_VSEL */ #define WM831X_DC3_HWC_VSEL_WIDTH 1 /* DC3_HWC_VSEL */ #define WM831X_DC3_HWC_MODE_MASK 0x0300 /* DC3_HWC_MODE - [9:8] */ #define WM831X_DC3_HWC_MODE_SHIFT 8 /* DC3_HWC_MODE - [9:8] */ #define WM831X_DC3_HWC_MODE_WIDTH 2 /* DC3_HWC_MODE - [9:8] */ #define WM831X_DC3_OVP 0x0080 /* DC3_OVP */ #define WM831X_DC3_OVP_MASK 0x0080 /* DC3_OVP */ #define WM831X_DC3_OVP_SHIFT 7 /* DC3_OVP */ #define WM831X_DC3_OVP_WIDTH 1 /* DC3_OVP */ /* * R16482 (0x4062) - DC3 ON Config */ #define WM831X_DC3_ON_SLOT_MASK 0xE000 /* DC3_ON_SLOT - [15:13] */ #define WM831X_DC3_ON_SLOT_SHIFT 13 /* DC3_ON_SLOT - [15:13] */ #define WM831X_DC3_ON_SLOT_WIDTH 3 /* DC3_ON_SLOT - [15:13] */ #define WM831X_DC3_ON_MODE_MASK 0x0300 /* DC3_ON_MODE - [9:8] */ #define WM831X_DC3_ON_MODE_SHIFT 8 /* DC3_ON_MODE - [9:8] */ #define WM831X_DC3_ON_MODE_WIDTH 2 /* DC3_ON_MODE - [9:8] */ #define WM831X_DC3_ON_VSEL_MASK 0x007F /* DC3_ON_VSEL - [6:0] */ #define WM831X_DC3_ON_VSEL_SHIFT 0 /* DC3_ON_VSEL - [6:0] */ #define WM831X_DC3_ON_VSEL_WIDTH 7 /* DC3_ON_VSEL - [6:0] */ /* * R16483 (0x4063) - DC3 SLEEP Control */ #define WM831X_DC3_SLP_SLOT_MASK 0xE000 /* DC3_SLP_SLOT - [15:13] */ #define WM831X_DC3_SLP_SLOT_SHIFT 13 /* DC3_SLP_SLOT - [15:13] */ #define WM831X_DC3_SLP_SLOT_WIDTH 3 /* DC3_SLP_SLOT - [15:13] */ #define WM831X_DC3_SLP_MODE_MASK 0x0300 /* DC3_SLP_MODE - [9:8] */ #define WM831X_DC3_SLP_MODE_SHIFT 8 /* DC3_SLP_MODE - [9:8] */ #define WM831X_DC3_SLP_MODE_WIDTH 2 /* DC3_SLP_MODE - [9:8] */ #define WM831X_DC3_SLP_VSEL_MASK 0x007F /* DC3_SLP_VSEL - [6:0] */ #define WM831X_DC3_SLP_VSEL_SHIFT 0 /* DC3_SLP_VSEL - [6:0] */ #define WM831X_DC3_SLP_VSEL_WIDTH 7 /* DC3_SLP_VSEL - [6:0] */ /* * R16484 (0x4064) - DC4 Control */ #define WM831X_DC4_ERR_ACT_MASK 0xC000 /* DC4_ERR_ACT - [15:14] */ #define WM831X_DC4_ERR_ACT_SHIFT 14 /* DC4_ERR_ACT - [15:14] */ #define WM831X_DC4_ERR_ACT_WIDTH 2 /* DC4_ERR_ACT - [15:14] */ #define WM831X_DC4_HWC_SRC_MASK 0x1800 /* DC4_HWC_SRC - [12:11] */ #define WM831X_DC4_HWC_SRC_SHIFT 11 /* DC4_HWC_SRC - [12:11] */ #define WM831X_DC4_HWC_SRC_WIDTH 2 /* DC4_HWC_SRC - [12:11] */ #define WM831X_DC4_HWC_MODE 0x0100 /* DC4_HWC_MODE */ #define WM831X_DC4_HWC_MODE_MASK 0x0100 /* DC4_HWC_MODE */ #define WM831X_DC4_HWC_MODE_SHIFT 8 /* DC4_HWC_MODE */ #define WM831X_DC4_HWC_MODE_WIDTH 1 /* DC4_HWC_MODE */ #define WM831X_DC4_RANGE_MASK 0x000C /* DC4_RANGE - [3:2] */ #define WM831X_DC4_RANGE_SHIFT 2 /* DC4_RANGE - [3:2] */ #define WM831X_DC4_RANGE_WIDTH 2 /* DC4_RANGE - [3:2] */ #define WM831X_DC4_FBSRC 0x0001 /* DC4_FBSRC */ #define WM831X_DC4_FBSRC_MASK 0x0001 /* DC4_FBSRC */ #define WM831X_DC4_FBSRC_SHIFT 0 /* DC4_FBSRC */ #define WM831X_DC4_FBSRC_WIDTH 1 /* DC4_FBSRC */ /* * R16485 (0x4065) - DC4 SLEEP Control */ #define WM831X_DC4_SLPENA 0x0100 /* DC4_SLPENA */ #define WM831X_DC4_SLPENA_MASK 0x0100 /* DC4_SLPENA */ #define WM831X_DC4_SLPENA_SHIFT 8 /* DC4_SLPENA */ #define WM831X_DC4_SLPENA_WIDTH 1 /* DC4_SLPENA */ /* * R16488 (0x4068) - LDO1 Control */ #define WM831X_LDO1_ERR_ACT_MASK 0xC000 /* LDO1_ERR_ACT - [15:14] */ #define WM831X_LDO1_ERR_ACT_SHIFT 14 /* LDO1_ERR_ACT - [15:14] */ #define WM831X_LDO1_ERR_ACT_WIDTH 2 /* LDO1_ERR_ACT - [15:14] */ #define WM831X_LDO1_HWC_SRC_MASK 0x1800 /* LDO1_HWC_SRC - [12:11] */ #define WM831X_LDO1_HWC_SRC_SHIFT 11 /* LDO1_HWC_SRC - [12:11] */ #define WM831X_LDO1_HWC_SRC_WIDTH 2 /* LDO1_HWC_SRC - [12:11] */ #define WM831X_LDO1_HWC_VSEL 0x0400 /* LDO1_HWC_VSEL */ #define WM831X_LDO1_HWC_VSEL_MASK 0x0400 /* LDO1_HWC_VSEL */ #define WM831X_LDO1_HWC_VSEL_SHIFT 10 /* LDO1_HWC_VSEL */ #define WM831X_LDO1_HWC_VSEL_WIDTH 1 /* LDO1_HWC_VSEL */ #define WM831X_LDO1_HWC_MODE_MASK 0x0300 /* LDO1_HWC_MODE - [9:8] */ #define WM831X_LDO1_HWC_MODE_SHIFT 8 /* LDO1_HWC_MODE - [9:8] */ #define WM831X_LDO1_HWC_MODE_WIDTH 2 /* LDO1_HWC_MODE - [9:8] */ #define WM831X_LDO1_FLT 0x0080 /* LDO1_FLT */ #define WM831X_LDO1_FLT_MASK 0x0080 /* LDO1_FLT */ #define WM831X_LDO1_FLT_SHIFT 7 /* LDO1_FLT */ #define WM831X_LDO1_FLT_WIDTH 1 /* LDO1_FLT */ #define WM831X_LDO1_SWI 0x0040 /* LDO1_SWI */ #define WM831X_LDO1_SWI_MASK 0x0040 /* LDO1_SWI */ #define WM831X_LDO1_SWI_SHIFT 6 /* LDO1_SWI */ #define WM831X_LDO1_SWI_WIDTH 1 /* LDO1_SWI */ #define WM831X_LDO1_LP_MODE 0x0001 /* LDO1_LP_MODE */ #define WM831X_LDO1_LP_MODE_MASK 0x0001 /* LDO1_LP_MODE */ #define WM831X_LDO1_LP_MODE_SHIFT 0 /* LDO1_LP_MODE */ #define WM831X_LDO1_LP_MODE_WIDTH 1 /* LDO1_LP_MODE */ /* * R16489 (0x4069) - LDO1 ON Control */ #define WM831X_LDO1_ON_SLOT_MASK 0xE000 /* LDO1_ON_SLOT - [15:13] */ #define WM831X_LDO1_ON_SLOT_SHIFT 13 /* LDO1_ON_SLOT - [15:13] */ #define WM831X_LDO1_ON_SLOT_WIDTH 3 /* LDO1_ON_SLOT - [15:13] */ #define WM831X_LDO1_ON_MODE 0x0100 /* LDO1_ON_MODE */ #define WM831X_LDO1_ON_MODE_MASK 0x0100 /* LDO1_ON_MODE */ #define WM831X_LDO1_ON_MODE_SHIFT 8 /* LDO1_ON_MODE */ #define WM831X_LDO1_ON_MODE_WIDTH 1 /* LDO1_ON_MODE */ #define WM831X_LDO1_ON_VSEL_MASK 0x001F /* LDO1_ON_VSEL - [4:0] */ #define WM831X_LDO1_ON_VSEL_SHIFT 0 /* LDO1_ON_VSEL - [4:0] */ #define WM831X_LDO1_ON_VSEL_WIDTH 5 /* LDO1_ON_VSEL - [4:0] */ /* * R16490 (0x406A) - LDO1 SLEEP Control */ #define WM831X_LDO1_SLP_SLOT_MASK 0xE000 /* LDO1_SLP_SLOT - [15:13] */ #define WM831X_LDO1_SLP_SLOT_SHIFT 13 /* LDO1_SLP_SLOT - [15:13] */ #define WM831X_LDO1_SLP_SLOT_WIDTH 3 /* LDO1_SLP_SLOT - [15:13] */ #define WM831X_LDO1_SLP_MODE 0x0100 /* LDO1_SLP_MODE */ #define WM831X_LDO1_SLP_MODE_MASK 0x0100 /* LDO1_SLP_MODE */ #define WM831X_LDO1_SLP_MODE_SHIFT 8 /* LDO1_SLP_MODE */ #define WM831X_LDO1_SLP_MODE_WIDTH 1 /* LDO1_SLP_MODE */ #define WM831X_LDO1_SLP_VSEL_MASK 0x001F /* LDO1_SLP_VSEL - [4:0] */ #define WM831X_LDO1_SLP_VSEL_SHIFT 0 /* LDO1_SLP_VSEL - [4:0] */ #define WM831X_LDO1_SLP_VSEL_WIDTH 5 /* LDO1_SLP_VSEL - [4:0] */ /* * R16491 (0x406B) - LDO2 Control */ #define WM831X_LDO2_ERR_ACT_MASK 0xC000 /* LDO2_ERR_ACT - [15:14] */ #define WM831X_LDO2_ERR_ACT_SHIFT 14 /* LDO2_ERR_ACT - [15:14] */ #define WM831X_LDO2_ERR_ACT_WIDTH 2 /* LDO2_ERR_ACT - [15:14] */ #define WM831X_LDO2_HWC_SRC_MASK 0x1800 /* LDO2_HWC_SRC - [12:11] */ #define WM831X_LDO2_HWC_SRC_SHIFT 11 /* LDO2_HWC_SRC - [12:11] */ #define WM831X_LDO2_HWC_SRC_WIDTH 2 /* LDO2_HWC_SRC - [12:11] */ #define WM831X_LDO2_HWC_VSEL 0x0400 /* LDO2_HWC_VSEL */ #define WM831X_LDO2_HWC_VSEL_MASK 0x0400 /* LDO2_HWC_VSEL */ #define WM831X_LDO2_HWC_VSEL_SHIFT 10 /* LDO2_HWC_VSEL */ #define WM831X_LDO2_HWC_VSEL_WIDTH 1 /* LDO2_HWC_VSEL */ #define WM831X_LDO2_HWC_MODE_MASK 0x0300 /* LDO2_HWC_MODE - [9:8] */ #define WM831X_LDO2_HWC_MODE_SHIFT 8 /* LDO2_HWC_MODE - [9:8] */ #define WM831X_LDO2_HWC_MODE_WIDTH 2 /* LDO2_HWC_MODE - [9:8] */ #define WM831X_LDO2_FLT 0x0080 /* LDO2_FLT */ #define WM831X_LDO2_FLT_MASK 0x0080 /* LDO2_FLT */ #define WM831X_LDO2_FLT_SHIFT 7 /* LDO2_FLT */ #define WM831X_LDO2_FLT_WIDTH 1 /* LDO2_FLT */ #define WM831X_LDO2_SWI 0x0040 /* LDO2_SWI */ #define WM831X_LDO2_SWI_MASK 0x0040 /* LDO2_SWI */ #define WM831X_LDO2_SWI_SHIFT 6 /* LDO2_SWI */ #define WM831X_LDO2_SWI_WIDTH 1 /* LDO2_SWI */ #define WM831X_LDO2_LP_MODE 0x0001 /* LDO2_LP_MODE */ #define WM831X_LDO2_LP_MODE_MASK 0x0001 /* LDO2_LP_MODE */ #define WM831X_LDO2_LP_MODE_SHIFT 0 /* LDO2_LP_MODE */ #define WM831X_LDO2_LP_MODE_WIDTH 1 /* LDO2_LP_MODE */ /* * R16492 (0x406C) - LDO2 ON Control */ #define WM831X_LDO2_ON_SLOT_MASK 0xE000 /* LDO2_ON_SLOT - [15:13] */ #define WM831X_LDO2_ON_SLOT_SHIFT 13 /* LDO2_ON_SLOT - [15:13] */ #define WM831X_LDO2_ON_SLOT_WIDTH 3 /* LDO2_ON_SLOT - [15:13] */ #define WM831X_LDO2_ON_MODE 0x0100 /* LDO2_ON_MODE */ #define WM831X_LDO2_ON_MODE_MASK 0x0100 /* LDO2_ON_MODE */ #define WM831X_LDO2_ON_MODE_SHIFT 8 /* LDO2_ON_MODE */ #define WM831X_LDO2_ON_MODE_WIDTH 1 /* LDO2_ON_MODE */ #define WM831X_LDO2_ON_VSEL_MASK 0x001F /* LDO2_ON_VSEL - [4:0] */ #define WM831X_LDO2_ON_VSEL_SHIFT 0 /* LDO2_ON_VSEL - [4:0] */ #define WM831X_LDO2_ON_VSEL_WIDTH 5 /* LDO2_ON_VSEL - [4:0] */ /* * R16493 (0x406D) - LDO2 SLEEP Control */ #define WM831X_LDO2_SLP_SLOT_MASK 0xE000 /* LDO2_SLP_SLOT - [15:13] */ #define WM831X_LDO2_SLP_SLOT_SHIFT 13 /* LDO2_SLP_SLOT - [15:13] */ #define WM831X_LDO2_SLP_SLOT_WIDTH 3 /* LDO2_SLP_SLOT - [15:13] */ #define WM831X_LDO2_SLP_MODE 0x0100 /* LDO2_SLP_MODE */ #define WM831X_LDO2_SLP_MODE_MASK 0x0100 /* LDO2_SLP_MODE */ #define WM831X_LDO2_SLP_MODE_SHIFT 8 /* LDO2_SLP_MODE */ #define WM831X_LDO2_SLP_MODE_WIDTH 1 /* LDO2_SLP_MODE */ #define WM831X_LDO2_SLP_VSEL_MASK 0x001F /* LDO2_SLP_VSEL - [4:0] */ #define WM831X_LDO2_SLP_VSEL_SHIFT 0 /* LDO2_SLP_VSEL - [4:0] */ #define WM831X_LDO2_SLP_VSEL_WIDTH 5 /* LDO2_SLP_VSEL - [4:0] */ /* * R16494 (0x406E) - LDO3 Control */ #define WM831X_LDO3_ERR_ACT_MASK 0xC000 /* LDO3_ERR_ACT - [15:14] */ #define WM831X_LDO3_ERR_ACT_SHIFT 14 /* LDO3_ERR_ACT - [15:14] */ #define WM831X_LDO3_ERR_ACT_WIDTH 2 /* LDO3_ERR_ACT - [15:14] */ #define WM831X_LDO3_HWC_SRC_MASK 0x1800 /* LDO3_HWC_SRC - [12:11] */ #define WM831X_LDO3_HWC_SRC_SHIFT 11 /* LDO3_HWC_SRC - [12:11] */ #define WM831X_LDO3_HWC_SRC_WIDTH 2 /* LDO3_HWC_SRC - [12:11] */ #define WM831X_LDO3_HWC_VSEL 0x0400 /* LDO3_HWC_VSEL */ #define WM831X_LDO3_HWC_VSEL_MASK 0x0400 /* LDO3_HWC_VSEL */ #define WM831X_LDO3_HWC_VSEL_SHIFT 10 /* LDO3_HWC_VSEL */ #define WM831X_LDO3_HWC_VSEL_WIDTH 1 /* LDO3_HWC_VSEL */ #define WM831X_LDO3_HWC_MODE_MASK 0x0300 /* LDO3_HWC_MODE - [9:8] */ #define WM831X_LDO3_HWC_MODE_SHIFT 8 /* LDO3_HWC_MODE - [9:8] */ #define WM831X_LDO3_HWC_MODE_WIDTH 2 /* LDO3_HWC_MODE - [9:8] */ #define WM831X_LDO3_FLT 0x0080 /* LDO3_FLT */ #define WM831X_LDO3_FLT_MASK 0x0080 /* LDO3_FLT */ #define WM831X_LDO3_FLT_SHIFT 7 /* LDO3_FLT */ #define WM831X_LDO3_FLT_WIDTH 1 /* LDO3_FLT */ #define WM831X_LDO3_SWI 0x0040 /* LDO3_SWI */ #define WM831X_LDO3_SWI_MASK 0x0040 /* LDO3_SWI */ #define WM831X_LDO3_SWI_SHIFT 6 /* LDO3_SWI */ #define WM831X_LDO3_SWI_WIDTH 1 /* LDO3_SWI */ #define WM831X_LDO3_LP_MODE 0x0001 /* LDO3_LP_MODE */ #define WM831X_LDO3_LP_MODE_MASK 0x0001 /* LDO3_LP_MODE */ #define WM831X_LDO3_LP_MODE_SHIFT 0 /* LDO3_LP_MODE */ #define WM831X_LDO3_LP_MODE_WIDTH 1 /* LDO3_LP_MODE */ /* * R16495 (0x406F) - LDO3 ON Control */ #define WM831X_LDO3_ON_SLOT_MASK 0xE000 /* LDO3_ON_SLOT - [15:13] */ #define WM831X_LDO3_ON_SLOT_SHIFT 13 /* LDO3_ON_SLOT - [15:13] */ #define WM831X_LDO3_ON_SLOT_WIDTH 3 /* LDO3_ON_SLOT - [15:13] */ #define WM831X_LDO3_ON_MODE 0x0100 /* LDO3_ON_MODE */ #define WM831X_LDO3_ON_MODE_MASK 0x0100 /* LDO3_ON_MODE */ #define WM831X_LDO3_ON_MODE_SHIFT 8 /* LDO3_ON_MODE */ #define WM831X_LDO3_ON_MODE_WIDTH 1 /* LDO3_ON_MODE */ #define WM831X_LDO3_ON_VSEL_MASK 0x001F /* LDO3_ON_VSEL - [4:0] */ #define WM831X_LDO3_ON_VSEL_SHIFT 0 /* LDO3_ON_VSEL - [4:0] */ #define WM831X_LDO3_ON_VSEL_WIDTH 5 /* LDO3_ON_VSEL - [4:0] */ /* * R16496 (0x4070) - LDO3 SLEEP Control */ #define WM831X_LDO3_SLP_SLOT_MASK 0xE000 /* LDO3_SLP_SLOT - [15:13] */ #define WM831X_LDO3_SLP_SLOT_SHIFT 13 /* LDO3_SLP_SLOT - [15:13] */ #define WM831X_LDO3_SLP_SLOT_WIDTH 3 /* LDO3_SLP_SLOT - [15:13] */ #define WM831X_LDO3_SLP_MODE 0x0100 /* LDO3_SLP_MODE */ #define WM831X_LDO3_SLP_MODE_MASK 0x0100 /* LDO3_SLP_MODE */ #define WM831X_LDO3_SLP_MODE_SHIFT 8 /* LDO3_SLP_MODE */ #define WM831X_LDO3_SLP_MODE_WIDTH 1 /* LDO3_SLP_MODE */ #define WM831X_LDO3_SLP_VSEL_MASK 0x001F /* LDO3_SLP_VSEL - [4:0] */ #define WM831X_LDO3_SLP_VSEL_SHIFT 0 /* LDO3_SLP_VSEL - [4:0] */ #define WM831X_LDO3_SLP_VSEL_WIDTH 5 /* LDO3_SLP_VSEL - [4:0] */ /* * R16497 (0x4071) - LDO4 Control */ #define WM831X_LDO4_ERR_ACT_MASK 0xC000 /* LDO4_ERR_ACT - [15:14] */ #define WM831X_LDO4_ERR_ACT_SHIFT 14 /* LDO4_ERR_ACT - [15:14] */ #define WM831X_LDO4_ERR_ACT_WIDTH 2 /* LDO4_ERR_ACT - [15:14] */ #define WM831X_LDO4_HWC_SRC_MASK 0x1800 /* LDO4_HWC_SRC - [12:11] */ #define WM831X_LDO4_HWC_SRC_SHIFT 11 /* LDO4_HWC_SRC - [12:11] */ #define WM831X_LDO4_HWC_SRC_WIDTH 2 /* LDO4_HWC_SRC - [12:11] */ #define WM831X_LDO4_HWC_VSEL 0x0400 /* LDO4_HWC_VSEL */ #define WM831X_LDO4_HWC_VSEL_MASK 0x0400 /* LDO4_HWC_VSEL */ #define WM831X_LDO4_HWC_VSEL_SHIFT 10 /* LDO4_HWC_VSEL */ #define WM831X_LDO4_HWC_VSEL_WIDTH 1 /* LDO4_HWC_VSEL */ #define WM831X_LDO4_HWC_MODE_MASK 0x0300 /* LDO4_HWC_MODE - [9:8] */ #define WM831X_LDO4_HWC_MODE_SHIFT 8 /* LDO4_HWC_MODE - [9:8] */ #define WM831X_LDO4_HWC_MODE_WIDTH 2 /* LDO4_HWC_MODE - [9:8] */ #define WM831X_LDO4_FLT 0x0080 /* LDO4_FLT */ #define WM831X_LDO4_FLT_MASK 0x0080 /* LDO4_FLT */ #define WM831X_LDO4_FLT_SHIFT 7 /* LDO4_FLT */ #define WM831X_LDO4_FLT_WIDTH 1 /* LDO4_FLT */ #define WM831X_LDO4_SWI 0x0040 /* LDO4_SWI */ #define WM831X_LDO4_SWI_MASK 0x0040 /* LDO4_SWI */ #define WM831X_LDO4_SWI_SHIFT 6 /* LDO4_SWI */ #define WM831X_LDO4_SWI_WIDTH 1 /* LDO4_SWI */ #define WM831X_LDO4_LP_MODE 0x0001 /* LDO4_LP_MODE */ #define WM831X_LDO4_LP_MODE_MASK 0x0001 /* LDO4_LP_MODE */ #define WM831X_LDO4_LP_MODE_SHIFT 0 /* LDO4_LP_MODE */ #define WM831X_LDO4_LP_MODE_WIDTH 1 /* LDO4_LP_MODE */ /* * R16498 (0x4072) - LDO4 ON Control */ #define WM831X_LDO4_ON_SLOT_MASK 0xE000 /* LDO4_ON_SLOT - [15:13] */ #define WM831X_LDO4_ON_SLOT_SHIFT 13 /* LDO4_ON_SLOT - [15:13] */ #define WM831X_LDO4_ON_SLOT_WIDTH 3 /* LDO4_ON_SLOT - [15:13] */ #define WM831X_LDO4_ON_MODE 0x0100 /* LDO4_ON_MODE */ #define WM831X_LDO4_ON_MODE_MASK 0x0100 /* LDO4_ON_MODE */ #define WM831X_LDO4_ON_MODE_SHIFT 8 /* LDO4_ON_MODE */ #define WM831X_LDO4_ON_MODE_WIDTH 1 /* LDO4_ON_MODE */ #define WM831X_LDO4_ON_VSEL_MASK 0x001F /* LDO4_ON_VSEL - [4:0] */ #define WM831X_LDO4_ON_VSEL_SHIFT 0 /* LDO4_ON_VSEL - [4:0] */ #define WM831X_LDO4_ON_VSEL_WIDTH 5 /* LDO4_ON_VSEL - [4:0] */ /* * R16499 (0x4073) - LDO4 SLEEP Control */ #define WM831X_LDO4_SLP_SLOT_MASK 0xE000 /* LDO4_SLP_SLOT - [15:13] */ #define WM831X_LDO4_SLP_SLOT_SHIFT 13 /* LDO4_SLP_SLOT - [15:13] */ #define WM831X_LDO4_SLP_SLOT_WIDTH 3 /* LDO4_SLP_SLOT - [15:13] */ #define WM831X_LDO4_SLP_MODE 0x0100 /* LDO4_SLP_MODE */ #define WM831X_LDO4_SLP_MODE_MASK 0x0100 /* LDO4_SLP_MODE */ #define WM831X_LDO4_SLP_MODE_SHIFT 8 /* LDO4_SLP_MODE */ #define WM831X_LDO4_SLP_MODE_WIDTH 1 /* LDO4_SLP_MODE */ #define WM831X_LDO4_SLP_VSEL_MASK 0x001F /* LDO4_SLP_VSEL - [4:0] */ #define WM831X_LDO4_SLP_VSEL_SHIFT 0 /* LDO4_SLP_VSEL - [4:0] */ #define WM831X_LDO4_SLP_VSEL_WIDTH 5 /* LDO4_SLP_VSEL - [4:0] */ /* * R16500 (0x4074) - LDO5 Control */ #define WM831X_LDO5_ERR_ACT_MASK 0xC000 /* LDO5_ERR_ACT - [15:14] */ #define WM831X_LDO5_ERR_ACT_SHIFT 14 /* LDO5_ERR_ACT - [15:14] */ #define WM831X_LDO5_ERR_ACT_WIDTH 2 /* LDO5_ERR_ACT - [15:14] */ #define WM831X_LDO5_HWC_SRC_MASK 0x1800 /* LDO5_HWC_SRC - [12:11] */ #define WM831X_LDO5_HWC_SRC_SHIFT 11 /* LDO5_HWC_SRC - [12:11] */ #define WM831X_LDO5_HWC_SRC_WIDTH 2 /* LDO5_HWC_SRC - [12:11] */ #define WM831X_LDO5_HWC_VSEL 0x0400 /* LDO5_HWC_VSEL */ #define WM831X_LDO5_HWC_VSEL_MASK 0x0400 /* LDO5_HWC_VSEL */ #define WM831X_LDO5_HWC_VSEL_SHIFT 10 /* LDO5_HWC_VSEL */ #define WM831X_LDO5_HWC_VSEL_WIDTH 1 /* LDO5_HWC_VSEL */ #define WM831X_LDO5_HWC_MODE_MASK 0x0300 /* LDO5_HWC_MODE - [9:8] */ #define WM831X_LDO5_HWC_MODE_SHIFT 8 /* LDO5_HWC_MODE - [9:8] */ #define WM831X_LDO5_HWC_MODE_WIDTH 2 /* LDO5_HWC_MODE - [9:8] */ #define WM831X_LDO5_FLT 0x0080 /* LDO5_FLT */ #define WM831X_LDO5_FLT_MASK 0x0080 /* LDO5_FLT */ #define WM831X_LDO5_FLT_SHIFT 7 /* LDO5_FLT */ #define WM831X_LDO5_FLT_WIDTH 1 /* LDO5_FLT */ #define WM831X_LDO5_SWI 0x0040 /* LDO5_SWI */ #define WM831X_LDO5_SWI_MASK 0x0040 /* LDO5_SWI */ #define WM831X_LDO5_SWI_SHIFT 6 /* LDO5_SWI */ #define WM831X_LDO5_SWI_WIDTH 1 /* LDO5_SWI */ #define WM831X_LDO5_LP_MODE 0x0001 /* LDO5_LP_MODE */ #define WM831X_LDO5_LP_MODE_MASK 0x0001 /* LDO5_LP_MODE */ #define WM831X_LDO5_LP_MODE_SHIFT 0 /* LDO5_LP_MODE */ #define WM831X_LDO5_LP_MODE_WIDTH 1 /* LDO5_LP_MODE */ /* * R16501 (0x4075) - LDO5 ON Control */ #define WM831X_LDO5_ON_SLOT_MASK 0xE000 /* LDO5_ON_SLOT - [15:13] */ #define WM831X_LDO5_ON_SLOT_SHIFT 13 /* LDO5_ON_SLOT - [15:13] */ #define WM831X_LDO5_ON_SLOT_WIDTH 3 /* LDO5_ON_SLOT - [15:13] */ #define WM831X_LDO5_ON_MODE 0x0100 /* LDO5_ON_MODE */ #define WM831X_LDO5_ON_MODE_MASK 0x0100 /* LDO5_ON_MODE */ #define WM831X_LDO5_ON_MODE_SHIFT 8 /* LDO5_ON_MODE */ #define WM831X_LDO5_ON_MODE_WIDTH 1 /* LDO5_ON_MODE */ #define WM831X_LDO5_ON_VSEL_MASK 0x001F /* LDO5_ON_VSEL - [4:0] */ #define WM831X_LDO5_ON_VSEL_SHIFT 0 /* LDO5_ON_VSEL - [4:0] */ #define WM831X_LDO5_ON_VSEL_WIDTH 5 /* LDO5_ON_VSEL - [4:0] */ /* * R16502 (0x4076) - LDO5 SLEEP Control */ #define WM831X_LDO5_SLP_SLOT_MASK 0xE000 /* LDO5_SLP_SLOT - [15:13] */ #define WM831X_LDO5_SLP_SLOT_SHIFT 13 /* LDO5_SLP_SLOT - [15:13] */ #define WM831X_LDO5_SLP_SLOT_WIDTH 3 /* LDO5_SLP_SLOT - [15:13] */ #define WM831X_LDO5_SLP_MODE 0x0100 /* LDO5_SLP_MODE */ #define WM831X_LDO5_SLP_MODE_MASK 0x0100 /* LDO5_SLP_MODE */ #define WM831X_LDO5_SLP_MODE_SHIFT 8 /* LDO5_SLP_MODE */ #define WM831X_LDO5_SLP_MODE_WIDTH 1 /* LDO5_SLP_MODE */ #define WM831X_LDO5_SLP_VSEL_MASK 0x001F /* LDO5_SLP_VSEL - [4:0] */ #define WM831X_LDO5_SLP_VSEL_SHIFT 0 /* LDO5_SLP_VSEL - [4:0] */ #define WM831X_LDO5_SLP_VSEL_WIDTH 5 /* LDO5_SLP_VSEL - [4:0] */ /* * R16503 (0x4077) - LDO6 Control */ #define WM831X_LDO6_ERR_ACT_MASK 0xC000 /* LDO6_ERR_ACT - [15:14] */ #define WM831X_LDO6_ERR_ACT_SHIFT 14 /* LDO6_ERR_ACT - [15:14] */ #define WM831X_LDO6_ERR_ACT_WIDTH 2 /* LDO6_ERR_ACT - [15:14] */ #define WM831X_LDO6_HWC_SRC_MASK 0x1800 /* LDO6_HWC_SRC - [12:11] */ #define WM831X_LDO6_HWC_SRC_SHIFT 11 /* LDO6_HWC_SRC - [12:11] */ #define WM831X_LDO6_HWC_SRC_WIDTH 2 /* LDO6_HWC_SRC - [12:11] */ #define WM831X_LDO6_HWC_VSEL 0x0400 /* LDO6_HWC_VSEL */ #define WM831X_LDO6_HWC_VSEL_MASK 0x0400 /* LDO6_HWC_VSEL */ #define WM831X_LDO6_HWC_VSEL_SHIFT 10 /* LDO6_HWC_VSEL */ #define WM831X_LDO6_HWC_VSEL_WIDTH 1 /* LDO6_HWC_VSEL */ #define WM831X_LDO6_HWC_MODE_MASK 0x0300 /* LDO6_HWC_MODE - [9:8] */ #define WM831X_LDO6_HWC_MODE_SHIFT 8 /* LDO6_HWC_MODE - [9:8] */ #define WM831X_LDO6_HWC_MODE_WIDTH 2 /* LDO6_HWC_MODE - [9:8] */ #define WM831X_LDO6_FLT 0x0080 /* LDO6_FLT */ #define WM831X_LDO6_FLT_MASK 0x0080 /* LDO6_FLT */ #define WM831X_LDO6_FLT_SHIFT 7 /* LDO6_FLT */ #define WM831X_LDO6_FLT_WIDTH 1 /* LDO6_FLT */ #define WM831X_LDO6_SWI 0x0040 /* LDO6_SWI */ #define WM831X_LDO6_SWI_MASK 0x0040 /* LDO6_SWI */ #define WM831X_LDO6_SWI_SHIFT 6 /* LDO6_SWI */ #define WM831X_LDO6_SWI_WIDTH 1 /* LDO6_SWI */ #define WM831X_LDO6_LP_MODE 0x0001 /* LDO6_LP_MODE */ #define WM831X_LDO6_LP_MODE_MASK 0x0001 /* LDO6_LP_MODE */ #define WM831X_LDO6_LP_MODE_SHIFT 0 /* LDO6_LP_MODE */ #define WM831X_LDO6_LP_MODE_WIDTH 1 /* LDO6_LP_MODE */ /* * R16504 (0x4078) - LDO6 ON Control */ #define WM831X_LDO6_ON_SLOT_MASK 0xE000 /* LDO6_ON_SLOT - [15:13] */ #define WM831X_LDO6_ON_SLOT_SHIFT 13 /* LDO6_ON_SLOT - [15:13] */ #define WM831X_LDO6_ON_SLOT_WIDTH 3 /* LDO6_ON_SLOT - [15:13] */ #define WM831X_LDO6_ON_MODE 0x0100 /* LDO6_ON_MODE */ #define WM831X_LDO6_ON_MODE_MASK 0x0100 /* LDO6_ON_MODE */ #define WM831X_LDO6_ON_MODE_SHIFT 8 /* LDO6_ON_MODE */ #define WM831X_LDO6_ON_MODE_WIDTH 1 /* LDO6_ON_MODE */ #define WM831X_LDO6_ON_VSEL_MASK 0x001F /* LDO6_ON_VSEL - [4:0] */ #define WM831X_LDO6_ON_VSEL_SHIFT 0 /* LDO6_ON_VSEL - [4:0] */ #define WM831X_LDO6_ON_VSEL_WIDTH 5 /* LDO6_ON_VSEL - [4:0] */ /* * R16505 (0x4079) - LDO6 SLEEP Control */ #define WM831X_LDO6_SLP_SLOT_MASK 0xE000 /* LDO6_SLP_SLOT - [15:13] */ #define WM831X_LDO6_SLP_SLOT_SHIFT 13 /* LDO6_SLP_SLOT - [15:13] */ #define WM831X_LDO6_SLP_SLOT_WIDTH 3 /* LDO6_SLP_SLOT - [15:13] */ #define WM831X_LDO6_SLP_MODE 0x0100 /* LDO6_SLP_MODE */ #define WM831X_LDO6_SLP_MODE_MASK 0x0100 /* LDO6_SLP_MODE */ #define WM831X_LDO6_SLP_MODE_SHIFT 8 /* LDO6_SLP_MODE */ #define WM831X_LDO6_SLP_MODE_WIDTH 1 /* LDO6_SLP_MODE */ #define WM831X_LDO6_SLP_VSEL_MASK 0x001F /* LDO6_SLP_VSEL - [4:0] */ #define WM831X_LDO6_SLP_VSEL_SHIFT 0 /* LDO6_SLP_VSEL - [4:0] */ #define WM831X_LDO6_SLP_VSEL_WIDTH 5 /* LDO6_SLP_VSEL - [4:0] */ /* * R16506 (0x407A) - LDO7 Control */ #define WM831X_LDO7_ERR_ACT_MASK 0xC000 /* LDO7_ERR_ACT - [15:14] */ #define WM831X_LDO7_ERR_ACT_SHIFT 14 /* LDO7_ERR_ACT - [15:14] */ #define WM831X_LDO7_ERR_ACT_WIDTH 2 /* LDO7_ERR_ACT - [15:14] */ #define WM831X_LDO7_HWC_SRC_MASK 0x1800 /* LDO7_HWC_SRC - [12:11] */ #define WM831X_LDO7_HWC_SRC_SHIFT 11 /* LDO7_HWC_SRC - [12:11] */ #define WM831X_LDO7_HWC_SRC_WIDTH 2 /* LDO7_HWC_SRC - [12:11] */ #define WM831X_LDO7_HWC_VSEL 0x0400 /* LDO7_HWC_VSEL */ #define WM831X_LDO7_HWC_VSEL_MASK 0x0400 /* LDO7_HWC_VSEL */ #define WM831X_LDO7_HWC_VSEL_SHIFT 10 /* LDO7_HWC_VSEL */ #define WM831X_LDO7_HWC_VSEL_WIDTH 1 /* LDO7_HWC_VSEL */ #define WM831X_LDO7_HWC_MODE_MASK 0x0300 /* LDO7_HWC_MODE - [9:8] */ #define WM831X_LDO7_HWC_MODE_SHIFT 8 /* LDO7_HWC_MODE - [9:8] */ #define WM831X_LDO7_HWC_MODE_WIDTH 2 /* LDO7_HWC_MODE - [9:8] */ #define WM831X_LDO7_FLT 0x0080 /* LDO7_FLT */ #define WM831X_LDO7_FLT_MASK 0x0080 /* LDO7_FLT */ #define WM831X_LDO7_FLT_SHIFT 7 /* LDO7_FLT */ #define WM831X_LDO7_FLT_WIDTH 1 /* LDO7_FLT */ #define WM831X_LDO7_SWI 0x0040 /* LDO7_SWI */ #define WM831X_LDO7_SWI_MASK 0x0040 /* LDO7_SWI */ #define WM831X_LDO7_SWI_SHIFT 6 /* LDO7_SWI */ #define WM831X_LDO7_SWI_WIDTH 1 /* LDO7_SWI */ /* * R16507 (0x407B) - LDO7 ON Control */ #define WM831X_LDO7_ON_SLOT_MASK 0xE000 /* LDO7_ON_SLOT - [15:13] */ #define WM831X_LDO7_ON_SLOT_SHIFT 13 /* LDO7_ON_SLOT - [15:13] */ #define WM831X_LDO7_ON_SLOT_WIDTH 3 /* LDO7_ON_SLOT - [15:13] */ #define WM831X_LDO7_ON_MODE 0x0100 /* LDO7_ON_MODE */ #define WM831X_LDO7_ON_MODE_MASK 0x0100 /* LDO7_ON_MODE */ #define WM831X_LDO7_ON_MODE_SHIFT 8 /* LDO7_ON_MODE */ #define WM831X_LDO7_ON_MODE_WIDTH 1 /* LDO7_ON_MODE */ #define WM831X_LDO7_ON_VSEL_MASK 0x001F /* LDO7_ON_VSEL - [4:0] */ #define WM831X_LDO7_ON_VSEL_SHIFT 0 /* LDO7_ON_VSEL - [4:0] */ #define WM831X_LDO7_ON_VSEL_WIDTH 5 /* LDO7_ON_VSEL - [4:0] */ /* * R16508 (0x407C) - LDO7 SLEEP Control */ #define WM831X_LDO7_SLP_SLOT_MASK 0xE000 /* LDO7_SLP_SLOT - [15:13] */ #define WM831X_LDO7_SLP_SLOT_SHIFT 13 /* LDO7_SLP_SLOT - [15:13] */ #define WM831X_LDO7_SLP_SLOT_WIDTH 3 /* LDO7_SLP_SLOT - [15:13] */ #define WM831X_LDO7_SLP_MODE 0x0100 /* LDO7_SLP_MODE */ #define WM831X_LDO7_SLP_MODE_MASK 0x0100 /* LDO7_SLP_MODE */ #define WM831X_LDO7_SLP_MODE_SHIFT 8 /* LDO7_SLP_MODE */ #define WM831X_LDO7_SLP_MODE_WIDTH 1 /* LDO7_SLP_MODE */ #define WM831X_LDO7_SLP_VSEL_MASK 0x001F /* LDO7_SLP_VSEL - [4:0] */ #define WM831X_LDO7_SLP_VSEL_SHIFT 0 /* LDO7_SLP_VSEL - [4:0] */ #define WM831X_LDO7_SLP_VSEL_WIDTH 5 /* LDO7_SLP_VSEL - [4:0] */ /* * R16509 (0x407D) - LDO8 Control */ #define WM831X_LDO8_ERR_ACT_MASK 0xC000 /* LDO8_ERR_ACT - [15:14] */ #define WM831X_LDO8_ERR_ACT_SHIFT 14 /* LDO8_ERR_ACT - [15:14] */ #define WM831X_LDO8_ERR_ACT_WIDTH 2 /* LDO8_ERR_ACT - [15:14] */ #define WM831X_LDO8_HWC_SRC_MASK 0x1800 /* LDO8_HWC_SRC - [12:11] */ #define WM831X_LDO8_HWC_SRC_SHIFT 11 /* LDO8_HWC_SRC - [12:11] */ #define WM831X_LDO8_HWC_SRC_WIDTH 2 /* LDO8_HWC_SRC - [12:11] */ #define WM831X_LDO8_HWC_VSEL 0x0400 /* LDO8_HWC_VSEL */ #define WM831X_LDO8_HWC_VSEL_MASK 0x0400 /* LDO8_HWC_VSEL */ #define WM831X_LDO8_HWC_VSEL_SHIFT 10 /* LDO8_HWC_VSEL */ #define WM831X_LDO8_HWC_VSEL_WIDTH 1 /* LDO8_HWC_VSEL */ #define WM831X_LDO8_HWC_MODE_MASK 0x0300 /* LDO8_HWC_MODE - [9:8] */ #define WM831X_LDO8_HWC_MODE_SHIFT 8 /* LDO8_HWC_MODE - [9:8] */ #define WM831X_LDO8_HWC_MODE_WIDTH 2 /* LDO8_HWC_MODE - [9:8] */ #define WM831X_LDO8_FLT 0x0080 /* LDO8_FLT */ #define WM831X_LDO8_FLT_MASK 0x0080 /* LDO8_FLT */ #define WM831X_LDO8_FLT_SHIFT 7 /* LDO8_FLT */ #define WM831X_LDO8_FLT_WIDTH 1 /* LDO8_FLT */ #define WM831X_LDO8_SWI 0x0040 /* LDO8_SWI */ #define WM831X_LDO8_SWI_MASK 0x0040 /* LDO8_SWI */ #define WM831X_LDO8_SWI_SHIFT 6 /* LDO8_SWI */ #define WM831X_LDO8_SWI_WIDTH 1 /* LDO8_SWI */ /* * R16510 (0x407E) - LDO8 ON Control */ #define WM831X_LDO8_ON_SLOT_MASK 0xE000 /* LDO8_ON_SLOT - [15:13] */ #define WM831X_LDO8_ON_SLOT_SHIFT 13 /* LDO8_ON_SLOT - [15:13] */ #define WM831X_LDO8_ON_SLOT_WIDTH 3 /* LDO8_ON_SLOT - [15:13] */ #define WM831X_LDO8_ON_MODE 0x0100 /* LDO8_ON_MODE */ #define WM831X_LDO8_ON_MODE_MASK 0x0100 /* LDO8_ON_MODE */ #define WM831X_LDO8_ON_MODE_SHIFT 8 /* LDO8_ON_MODE */ #define WM831X_LDO8_ON_MODE_WIDTH 1 /* LDO8_ON_MODE */ #define WM831X_LDO8_ON_VSEL_MASK 0x001F /* LDO8_ON_VSEL - [4:0] */ #define WM831X_LDO8_ON_VSEL_SHIFT 0 /* LDO8_ON_VSEL - [4:0] */ #define WM831X_LDO8_ON_VSEL_WIDTH 5 /* LDO8_ON_VSEL - [4:0] */ /* * R16511 (0x407F) - LDO8 SLEEP Control */ #define WM831X_LDO8_SLP_SLOT_MASK 0xE000 /* LDO8_SLP_SLOT - [15:13] */ #define WM831X_LDO8_SLP_SLOT_SHIFT 13 /* LDO8_SLP_SLOT - [15:13] */ #define WM831X_LDO8_SLP_SLOT_WIDTH 3 /* LDO8_SLP_SLOT - [15:13] */ #define WM831X_LDO8_SLP_MODE 0x0100 /* LDO8_SLP_MODE */ #define WM831X_LDO8_SLP_MODE_MASK 0x0100 /* LDO8_SLP_MODE */ #define WM831X_LDO8_SLP_MODE_SHIFT 8 /* LDO8_SLP_MODE */ #define WM831X_LDO8_SLP_MODE_WIDTH 1 /* LDO8_SLP_MODE */ #define WM831X_LDO8_SLP_VSEL_MASK 0x001F /* LDO8_SLP_VSEL - [4:0] */ #define WM831X_LDO8_SLP_VSEL_SHIFT 0 /* LDO8_SLP_VSEL - [4:0] */ #define WM831X_LDO8_SLP_VSEL_WIDTH 5 /* LDO8_SLP_VSEL - [4:0] */ /* * R16512 (0x4080) - LDO9 Control */ #define WM831X_LDO9_ERR_ACT_MASK 0xC000 /* LDO9_ERR_ACT - [15:14] */ #define WM831X_LDO9_ERR_ACT_SHIFT 14 /* LDO9_ERR_ACT - [15:14] */ #define WM831X_LDO9_ERR_ACT_WIDTH 2 /* LDO9_ERR_ACT - [15:14] */ #define WM831X_LDO9_HWC_SRC_MASK 0x1800 /* LDO9_HWC_SRC - [12:11] */ #define WM831X_LDO9_HWC_SRC_SHIFT 11 /* LDO9_HWC_SRC - [12:11] */ #define WM831X_LDO9_HWC_SRC_WIDTH 2 /* LDO9_HWC_SRC - [12:11] */ #define WM831X_LDO9_HWC_VSEL 0x0400 /* LDO9_HWC_VSEL */ #define WM831X_LDO9_HWC_VSEL_MASK 0x0400 /* LDO9_HWC_VSEL */ #define WM831X_LDO9_HWC_VSEL_SHIFT 10 /* LDO9_HWC_VSEL */ #define WM831X_LDO9_HWC_VSEL_WIDTH 1 /* LDO9_HWC_VSEL */ #define WM831X_LDO9_HWC_MODE_MASK 0x0300 /* LDO9_HWC_MODE - [9:8] */ #define WM831X_LDO9_HWC_MODE_SHIFT 8 /* LDO9_HWC_MODE - [9:8] */ #define WM831X_LDO9_HWC_MODE_WIDTH 2 /* LDO9_HWC_MODE - [9:8] */ #define WM831X_LDO9_FLT 0x0080 /* LDO9_FLT */ #define WM831X_LDO9_FLT_MASK 0x0080 /* LDO9_FLT */ #define WM831X_LDO9_FLT_SHIFT 7 /* LDO9_FLT */ #define WM831X_LDO9_FLT_WIDTH 1 /* LDO9_FLT */ #define WM831X_LDO9_SWI 0x0040 /* LDO9_SWI */ #define WM831X_LDO9_SWI_MASK 0x0040 /* LDO9_SWI */ #define WM831X_LDO9_SWI_SHIFT 6 /* LDO9_SWI */ #define WM831X_LDO9_SWI_WIDTH 1 /* LDO9_SWI */ /* * R16513 (0x4081) - LDO9 ON Control */ #define WM831X_LDO9_ON_SLOT_MASK 0xE000 /* LDO9_ON_SLOT - [15:13] */ #define WM831X_LDO9_ON_SLOT_SHIFT 13 /* LDO9_ON_SLOT - [15:13] */ #define WM831X_LDO9_ON_SLOT_WIDTH 3 /* LDO9_ON_SLOT - [15:13] */ #define WM831X_LDO9_ON_MODE 0x0100 /* LDO9_ON_MODE */ #define WM831X_LDO9_ON_MODE_MASK 0x0100 /* LDO9_ON_MODE */ #define WM831X_LDO9_ON_MODE_SHIFT 8 /* LDO9_ON_MODE */ #define WM831X_LDO9_ON_MODE_WIDTH 1 /* LDO9_ON_MODE */ #define WM831X_LDO9_ON_VSEL_MASK 0x001F /* LDO9_ON_VSEL - [4:0] */ #define WM831X_LDO9_ON_VSEL_SHIFT 0 /* LDO9_ON_VSEL - [4:0] */ #define WM831X_LDO9_ON_VSEL_WIDTH 5 /* LDO9_ON_VSEL - [4:0] */ /* * R16514 (0x4082) - LDO9 SLEEP Control */ #define WM831X_LDO9_SLP_SLOT_MASK 0xE000 /* LDO9_SLP_SLOT - [15:13] */ #define WM831X_LDO9_SLP_SLOT_SHIFT 13 /* LDO9_SLP_SLOT - [15:13] */ #define WM831X_LDO9_SLP_SLOT_WIDTH 3 /* LDO9_SLP_SLOT - [15:13] */ #define WM831X_LDO9_SLP_MODE 0x0100 /* LDO9_SLP_MODE */ #define WM831X_LDO9_SLP_MODE_MASK 0x0100 /* LDO9_SLP_MODE */ #define WM831X_LDO9_SLP_MODE_SHIFT 8 /* LDO9_SLP_MODE */ #define WM831X_LDO9_SLP_MODE_WIDTH 1 /* LDO9_SLP_MODE */ #define WM831X_LDO9_SLP_VSEL_MASK 0x001F /* LDO9_SLP_VSEL - [4:0] */ #define WM831X_LDO9_SLP_VSEL_SHIFT 0 /* LDO9_SLP_VSEL - [4:0] */ #define WM831X_LDO9_SLP_VSEL_WIDTH 5 /* LDO9_SLP_VSEL - [4:0] */ /* * R16515 (0x4083) - LDO10 Control */ #define WM831X_LDO10_ERR_ACT_MASK 0xC000 /* LDO10_ERR_ACT - [15:14] */ #define WM831X_LDO10_ERR_ACT_SHIFT 14 /* LDO10_ERR_ACT - [15:14] */ #define WM831X_LDO10_ERR_ACT_WIDTH 2 /* LDO10_ERR_ACT - [15:14] */ #define WM831X_LDO10_HWC_SRC_MASK 0x1800 /* LDO10_HWC_SRC - [12:11] */ #define WM831X_LDO10_HWC_SRC_SHIFT 11 /* LDO10_HWC_SRC - [12:11] */ #define WM831X_LDO10_HWC_SRC_WIDTH 2 /* LDO10_HWC_SRC - [12:11] */ #define WM831X_LDO10_HWC_VSEL 0x0400 /* LDO10_HWC_VSEL */ #define WM831X_LDO10_HWC_VSEL_MASK 0x0400 /* LDO10_HWC_VSEL */ #define WM831X_LDO10_HWC_VSEL_SHIFT 10 /* LDO10_HWC_VSEL */ #define WM831X_LDO10_HWC_VSEL_WIDTH 1 /* LDO10_HWC_VSEL */ #define WM831X_LDO10_HWC_MODE_MASK 0x0300 /* LDO10_HWC_MODE - [9:8] */ #define WM831X_LDO10_HWC_MODE_SHIFT 8 /* LDO10_HWC_MODE - [9:8] */ #define WM831X_LDO10_HWC_MODE_WIDTH 2 /* LDO10_HWC_MODE - [9:8] */ #define WM831X_LDO10_FLT 0x0080 /* LDO10_FLT */ #define WM831X_LDO10_FLT_MASK 0x0080 /* LDO10_FLT */ #define WM831X_LDO10_FLT_SHIFT 7 /* LDO10_FLT */ #define WM831X_LDO10_FLT_WIDTH 1 /* LDO10_FLT */ #define WM831X_LDO10_SWI 0x0040 /* LDO10_SWI */ #define WM831X_LDO10_SWI_MASK 0x0040 /* LDO10_SWI */ #define WM831X_LDO10_SWI_SHIFT 6 /* LDO10_SWI */ #define WM831X_LDO10_SWI_WIDTH 1 /* LDO10_SWI */ /* * R16516 (0x4084) - LDO10 ON Control */ #define WM831X_LDO10_ON_SLOT_MASK 0xE000 /* LDO10_ON_SLOT - [15:13] */ #define WM831X_LDO10_ON_SLOT_SHIFT 13 /* LDO10_ON_SLOT - [15:13] */ #define WM831X_LDO10_ON_SLOT_WIDTH 3 /* LDO10_ON_SLOT - [15:13] */ #define WM831X_LDO10_ON_MODE 0x0100 /* LDO10_ON_MODE */ #define WM831X_LDO10_ON_MODE_MASK 0x0100 /* LDO10_ON_MODE */ #define WM831X_LDO10_ON_MODE_SHIFT 8 /* LDO10_ON_MODE */ #define WM831X_LDO10_ON_MODE_WIDTH 1 /* LDO10_ON_MODE */ #define WM831X_LDO10_ON_VSEL_MASK 0x001F /* LDO10_ON_VSEL - [4:0] */ #define WM831X_LDO10_ON_VSEL_SHIFT 0 /* LDO10_ON_VSEL - [4:0] */ #define WM831X_LDO10_ON_VSEL_WIDTH 5 /* LDO10_ON_VSEL - [4:0] */ /* * R16517 (0x4085) - LDO10 SLEEP Control */ #define WM831X_LDO10_SLP_SLOT_MASK 0xE000 /* LDO10_SLP_SLOT - [15:13] */ #define WM831X_LDO10_SLP_SLOT_SHIFT 13 /* LDO10_SLP_SLOT - [15:13] */ #define WM831X_LDO10_SLP_SLOT_WIDTH 3 /* LDO10_SLP_SLOT - [15:13] */ #define WM831X_LDO10_SLP_MODE 0x0100 /* LDO10_SLP_MODE */ #define WM831X_LDO10_SLP_MODE_MASK 0x0100 /* LDO10_SLP_MODE */ #define WM831X_LDO10_SLP_MODE_SHIFT 8 /* LDO10_SLP_MODE */ #define WM831X_LDO10_SLP_MODE_WIDTH 1 /* LDO10_SLP_MODE */ #define WM831X_LDO10_SLP_VSEL_MASK 0x001F /* LDO10_SLP_VSEL - [4:0] */ #define WM831X_LDO10_SLP_VSEL_SHIFT 0 /* LDO10_SLP_VSEL - [4:0] */ #define WM831X_LDO10_SLP_VSEL_WIDTH 5 /* LDO10_SLP_VSEL - [4:0] */ /* * R16519 (0x4087) - LDO11 ON Control */ #define WM831X_LDO11_ON_SLOT_MASK 0xE000 /* LDO11_ON_SLOT - [15:13] */ #define WM831X_LDO11_ON_SLOT_SHIFT 13 /* LDO11_ON_SLOT - [15:13] */ #define WM831X_LDO11_ON_SLOT_WIDTH 3 /* LDO11_ON_SLOT - [15:13] */ #define WM831X_LDO11_OFFENA 0x1000 /* LDO11_OFFENA */ #define WM831X_LDO11_OFFENA_MASK 0x1000 /* LDO11_OFFENA */ #define WM831X_LDO11_OFFENA_SHIFT 12 /* LDO11_OFFENA */ #define WM831X_LDO11_OFFENA_WIDTH 1 /* LDO11_OFFENA */ #define WM831X_LDO11_VSEL_SRC 0x0080 /* LDO11_VSEL_SRC */ #define WM831X_LDO11_VSEL_SRC_MASK 0x0080 /* LDO11_VSEL_SRC */ #define WM831X_LDO11_VSEL_SRC_SHIFT 7 /* LDO11_VSEL_SRC */ #define WM831X_LDO11_VSEL_SRC_WIDTH 1 /* LDO11_VSEL_SRC */ #define WM831X_LDO11_ON_VSEL_MASK 0x000F /* LDO11_ON_VSEL - [3:0] */ #define WM831X_LDO11_ON_VSEL_SHIFT 0 /* LDO11_ON_VSEL - [3:0] */ #define WM831X_LDO11_ON_VSEL_WIDTH 4 /* LDO11_ON_VSEL - [3:0] */ /* * R16520 (0x4088) - LDO11 SLEEP Control */ #define WM831X_LDO11_SLP_SLOT_MASK 0xE000 /* LDO11_SLP_SLOT - [15:13] */ #define WM831X_LDO11_SLP_SLOT_SHIFT 13 /* LDO11_SLP_SLOT - [15:13] */ #define WM831X_LDO11_SLP_SLOT_WIDTH 3 /* LDO11_SLP_SLOT - [15:13] */ #define WM831X_LDO11_SLP_VSEL_MASK 0x000F /* LDO11_SLP_VSEL - [3:0] */ #define WM831X_LDO11_SLP_VSEL_SHIFT 0 /* LDO11_SLP_VSEL - [3:0] */ #define WM831X_LDO11_SLP_VSEL_WIDTH 4 /* LDO11_SLP_VSEL - [3:0] */ /* * R16526 (0x408E) - Power Good Source 1 */ #define WM831X_DC4_OK 0x0008 /* DC4_OK */ #define WM831X_DC4_OK_MASK 0x0008 /* DC4_OK */ #define WM831X_DC4_OK_SHIFT 3 /* DC4_OK */ #define WM831X_DC4_OK_WIDTH 1 /* DC4_OK */ #define WM831X_DC3_OK 0x0004 /* DC3_OK */ #define WM831X_DC3_OK_MASK 0x0004 /* DC3_OK */ #define WM831X_DC3_OK_SHIFT 2 /* DC3_OK */ #define WM831X_DC3_OK_WIDTH 1 /* DC3_OK */ #define WM831X_DC2_OK 0x0002 /* DC2_OK */ #define WM831X_DC2_OK_MASK 0x0002 /* DC2_OK */ #define WM831X_DC2_OK_SHIFT 1 /* DC2_OK */ #define WM831X_DC2_OK_WIDTH 1 /* DC2_OK */ #define WM831X_DC1_OK 0x0001 /* DC1_OK */ #define WM831X_DC1_OK_MASK 0x0001 /* DC1_OK */ #define WM831X_DC1_OK_SHIFT 0 /* DC1_OK */ #define WM831X_DC1_OK_WIDTH 1 /* DC1_OK */ /* * R16527 (0x408F) - Power Good Source 2 */ #define WM831X_LDO10_OK 0x0200 /* LDO10_OK */ #define WM831X_LDO10_OK_MASK 0x0200 /* LDO10_OK */ #define WM831X_LDO10_OK_SHIFT 9 /* LDO10_OK */ #define WM831X_LDO10_OK_WIDTH 1 /* LDO10_OK */ #define WM831X_LDO9_OK 0x0100 /* LDO9_OK */ #define WM831X_LDO9_OK_MASK 0x0100 /* LDO9_OK */ #define WM831X_LDO9_OK_SHIFT 8 /* LDO9_OK */ #define WM831X_LDO9_OK_WIDTH 1 /* LDO9_OK */ #define WM831X_LDO8_OK 0x0080 /* LDO8_OK */ #define WM831X_LDO8_OK_MASK 0x0080 /* LDO8_OK */ #define WM831X_LDO8_OK_SHIFT 7 /* LDO8_OK */ #define WM831X_LDO8_OK_WIDTH 1 /* LDO8_OK */ #define WM831X_LDO7_OK 0x0040 /* LDO7_OK */ #define WM831X_LDO7_OK_MASK 0x0040 /* LDO7_OK */ #define WM831X_LDO7_OK_SHIFT 6 /* LDO7_OK */ #define WM831X_LDO7_OK_WIDTH 1 /* LDO7_OK */ #define WM831X_LDO6_OK 0x0020 /* LDO6_OK */ #define WM831X_LDO6_OK_MASK 0x0020 /* LDO6_OK */ #define WM831X_LDO6_OK_SHIFT 5 /* LDO6_OK */ #define WM831X_LDO6_OK_WIDTH 1 /* LDO6_OK */ #define WM831X_LDO5_OK 0x0010 /* LDO5_OK */ #define WM831X_LDO5_OK_MASK 0x0010 /* LDO5_OK */ #define WM831X_LDO5_OK_SHIFT 4 /* LDO5_OK */ #define WM831X_LDO5_OK_WIDTH 1 /* LDO5_OK */ #define WM831X_LDO4_OK 0x0008 /* LDO4_OK */ #define WM831X_LDO4_OK_MASK 0x0008 /* LDO4_OK */ #define WM831X_LDO4_OK_SHIFT 3 /* LDO4_OK */ #define WM831X_LDO4_OK_WIDTH 1 /* LDO4_OK */ #define WM831X_LDO3_OK 0x0004 /* LDO3_OK */ #define WM831X_LDO3_OK_MASK 0x0004 /* LDO3_OK */ #define WM831X_LDO3_OK_SHIFT 2 /* LDO3_OK */ #define WM831X_LDO3_OK_WIDTH 1 /* LDO3_OK */ #define WM831X_LDO2_OK 0x0002 /* LDO2_OK */ #define WM831X_LDO2_OK_MASK 0x0002 /* LDO2_OK */ #define WM831X_LDO2_OK_SHIFT 1 /* LDO2_OK */ #define WM831X_LDO2_OK_WIDTH 1 /* LDO2_OK */ #define WM831X_LDO1_OK 0x0001 /* LDO1_OK */ #define WM831X_LDO1_OK_MASK 0x0001 /* LDO1_OK */ #define WM831X_LDO1_OK_SHIFT 0 /* LDO1_OK */ #define WM831X_LDO1_OK_WIDTH 1 /* LDO1_OK */ #define WM831X_ISINK_MAX_ISEL 55 extern const unsigned int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL + 1]; #endif mfd/wm831x/status.h 0000644 00000002731 14722070374 0010066 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/mfd/wm831x/status.h -- Status LEDs for WM831x * * Copyright 2009 Wolfson Microelectronics PLC. * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> */ #ifndef __MFD_WM831X_STATUS_H__ #define __MFD_WM831X_STATUS_H__ #define WM831X_LED_SRC_MASK 0xC000 /* LED_SRC - [15:14] */ #define WM831X_LED_SRC_SHIFT 14 /* LED_SRC - [15:14] */ #define WM831X_LED_SRC_WIDTH 2 /* LED_SRC - [15:14] */ #define WM831X_LED_MODE_MASK 0x0300 /* LED_MODE - [9:8] */ #define WM831X_LED_MODE_SHIFT 8 /* LED_MODE - [9:8] */ #define WM831X_LED_MODE_WIDTH 2 /* LED_MODE - [9:8] */ #define WM831X_LED_SEQ_LEN_MASK 0x0030 /* LED_SEQ_LEN - [5:4] */ #define WM831X_LED_SEQ_LEN_SHIFT 4 /* LED_SEQ_LEN - [5:4] */ #define WM831X_LED_SEQ_LEN_WIDTH 2 /* LED_SEQ_LEN - [5:4] */ #define WM831X_LED_DUR_MASK 0x000C /* LED_DUR - [3:2] */ #define WM831X_LED_DUR_SHIFT 2 /* LED_DUR - [3:2] */ #define WM831X_LED_DUR_WIDTH 2 /* LED_DUR - [3:2] */ #define WM831X_LED_DUTY_CYC_MASK 0x0003 /* LED_DUTY_CYC - [1:0] */ #define WM831X_LED_DUTY_CYC_SHIFT 0 /* LED_DUTY_CYC - [1:0] */ #define WM831X_LED_DUTY_CYC_WIDTH 2 /* LED_DUTY_CYC - [1:0] */ #endif mfd/wm831x/gpio.h 0000644 00000005657 14722070374 0007513 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/mfd/wm831x/gpio.h -- GPIO for WM831x * * Copyright 2009 Wolfson Microelectronics PLC. * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> */ #ifndef __MFD_WM831X_GPIO_H__ #define __MFD_WM831X_GPIO_H__ /* * R16440-16455 (0x4038-0x4047) - GPIOx Control */ #define WM831X_GPN_DIR 0x8000 /* GPN_DIR */ #define WM831X_GPN_DIR_MASK 0x8000 /* GPN_DIR */ #define WM831X_GPN_DIR_SHIFT 15 /* GPN_DIR */ #define WM831X_GPN_DIR_WIDTH 1 /* GPN_DIR */ #define WM831X_GPN_PULL_MASK 0x6000 /* GPN_PULL - [14:13] */ #define WM831X_GPN_PULL_SHIFT 13 /* GPN_PULL - [14:13] */ #define WM831X_GPN_PULL_WIDTH 2 /* GPN_PULL - [14:13] */ #define WM831X_GPN_INT_MODE 0x1000 /* GPN_INT_MODE */ #define WM831X_GPN_INT_MODE_MASK 0x1000 /* GPN_INT_MODE */ #define WM831X_GPN_INT_MODE_SHIFT 12 /* GPN_INT_MODE */ #define WM831X_GPN_INT_MODE_WIDTH 1 /* GPN_INT_MODE */ #define WM831X_GPN_PWR_DOM 0x0800 /* GPN_PWR_DOM */ #define WM831X_GPN_PWR_DOM_MASK 0x0800 /* GPN_PWR_DOM */ #define WM831X_GPN_PWR_DOM_SHIFT 11 /* GPN_PWR_DOM */ #define WM831X_GPN_PWR_DOM_WIDTH 1 /* GPN_PWR_DOM */ #define WM831X_GPN_POL 0x0400 /* GPN_POL */ #define WM831X_GPN_POL_MASK 0x0400 /* GPN_POL */ #define WM831X_GPN_POL_SHIFT 10 /* GPN_POL */ #define WM831X_GPN_POL_WIDTH 1 /* GPN_POL */ #define WM831X_GPN_OD 0x0200 /* GPN_OD */ #define WM831X_GPN_OD_MASK 0x0200 /* GPN_OD */ #define WM831X_GPN_OD_SHIFT 9 /* GPN_OD */ #define WM831X_GPN_OD_WIDTH 1 /* GPN_OD */ #define WM831X_GPN_ENA 0x0080 /* GPN_ENA */ #define WM831X_GPN_ENA_MASK 0x0080 /* GPN_ENA */ #define WM831X_GPN_ENA_SHIFT 7 /* GPN_ENA */ #define WM831X_GPN_ENA_WIDTH 1 /* GPN_ENA */ #define WM831X_GPN_TRI 0x0080 /* GPN_TRI */ #define WM831X_GPN_TRI_MASK 0x0080 /* GPN_TRI */ #define WM831X_GPN_TRI_SHIFT 7 /* GPN_TRI */ #define WM831X_GPN_TRI_WIDTH 1 /* GPN_TRI */ #define WM831X_GPN_FN_MASK 0x000F /* GPN_FN - [3:0] */ #define WM831X_GPN_FN_SHIFT 0 /* GPN_FN - [3:0] */ #define WM831X_GPN_FN_WIDTH 4 /* GPN_FN - [3:0] */ #define WM831X_GPIO_PULL_NONE (0 << WM831X_GPN_PULL_SHIFT) #define WM831X_GPIO_PULL_DOWN (1 << WM831X_GPN_PULL_SHIFT) #define WM831X_GPIO_PULL_UP (2 << WM831X_GPN_PULL_SHIFT) #endif mfd/wm831x/pmu.h 0000644 00000026513 14722070374 0007350 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/mfd/wm831x/pmu.h -- PMU for WM831x * * Copyright 2009 Wolfson Microelectronics PLC. * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> */ #ifndef __MFD_WM831X_PMU_H__ #define __MFD_WM831X_PMU_H__ /* * R16387 (0x4003) - Power State */ #define WM831X_CHIP_ON 0x8000 /* CHIP_ON */ #define WM831X_CHIP_ON_MASK 0x8000 /* CHIP_ON */ #define WM831X_CHIP_ON_SHIFT 15 /* CHIP_ON */ #define WM831X_CHIP_ON_WIDTH 1 /* CHIP_ON */ #define WM831X_CHIP_SLP 0x4000 /* CHIP_SLP */ #define WM831X_CHIP_SLP_MASK 0x4000 /* CHIP_SLP */ #define WM831X_CHIP_SLP_SHIFT 14 /* CHIP_SLP */ #define WM831X_CHIP_SLP_WIDTH 1 /* CHIP_SLP */ #define WM831X_REF_LP 0x1000 /* REF_LP */ #define WM831X_REF_LP_MASK 0x1000 /* REF_LP */ #define WM831X_REF_LP_SHIFT 12 /* REF_LP */ #define WM831X_REF_LP_WIDTH 1 /* REF_LP */ #define WM831X_PWRSTATE_DLY_MASK 0x0C00 /* PWRSTATE_DLY - [11:10] */ #define WM831X_PWRSTATE_DLY_SHIFT 10 /* PWRSTATE_DLY - [11:10] */ #define WM831X_PWRSTATE_DLY_WIDTH 2 /* PWRSTATE_DLY - [11:10] */ #define WM831X_SWRST_DLY 0x0200 /* SWRST_DLY */ #define WM831X_SWRST_DLY_MASK 0x0200 /* SWRST_DLY */ #define WM831X_SWRST_DLY_SHIFT 9 /* SWRST_DLY */ #define WM831X_SWRST_DLY_WIDTH 1 /* SWRST_DLY */ #define WM831X_USB100MA_STARTUP_MASK 0x0030 /* USB100MA_STARTUP - [5:4] */ #define WM831X_USB100MA_STARTUP_SHIFT 4 /* USB100MA_STARTUP - [5:4] */ #define WM831X_USB100MA_STARTUP_WIDTH 2 /* USB100MA_STARTUP - [5:4] */ #define WM831X_USB_CURR_STS 0x0008 /* USB_CURR_STS */ #define WM831X_USB_CURR_STS_MASK 0x0008 /* USB_CURR_STS */ #define WM831X_USB_CURR_STS_SHIFT 3 /* USB_CURR_STS */ #define WM831X_USB_CURR_STS_WIDTH 1 /* USB_CURR_STS */ #define WM831X_USB_ILIM_MASK 0x0007 /* USB_ILIM - [2:0] */ #define WM831X_USB_ILIM_SHIFT 0 /* USB_ILIM - [2:0] */ #define WM831X_USB_ILIM_WIDTH 3 /* USB_ILIM - [2:0] */ /* * R16397 (0x400D) - System Status */ #define WM831X_THW_STS 0x8000 /* THW_STS */ #define WM831X_THW_STS_MASK 0x8000 /* THW_STS */ #define WM831X_THW_STS_SHIFT 15 /* THW_STS */ #define WM831X_THW_STS_WIDTH 1 /* THW_STS */ #define WM831X_PWR_SRC_BATT 0x0400 /* PWR_SRC_BATT */ #define WM831X_PWR_SRC_BATT_MASK 0x0400 /* PWR_SRC_BATT */ #define WM831X_PWR_SRC_BATT_SHIFT 10 /* PWR_SRC_BATT */ #define WM831X_PWR_SRC_BATT_WIDTH 1 /* PWR_SRC_BATT */ #define WM831X_PWR_WALL 0x0200 /* PWR_WALL */ #define WM831X_PWR_WALL_MASK 0x0200 /* PWR_WALL */ #define WM831X_PWR_WALL_SHIFT 9 /* PWR_WALL */ #define WM831X_PWR_WALL_WIDTH 1 /* PWR_WALL */ #define WM831X_PWR_USB 0x0100 /* PWR_USB */ #define WM831X_PWR_USB_MASK 0x0100 /* PWR_USB */ #define WM831X_PWR_USB_SHIFT 8 /* PWR_USB */ #define WM831X_PWR_USB_WIDTH 1 /* PWR_USB */ #define WM831X_MAIN_STATE_MASK 0x001F /* MAIN_STATE - [4:0] */ #define WM831X_MAIN_STATE_SHIFT 0 /* MAIN_STATE - [4:0] */ #define WM831X_MAIN_STATE_WIDTH 5 /* MAIN_STATE - [4:0] */ /* * R16456 (0x4048) - Charger Control 1 */ #define WM831X_CHG_ENA 0x8000 /* CHG_ENA */ #define WM831X_CHG_ENA_MASK 0x8000 /* CHG_ENA */ #define WM831X_CHG_ENA_SHIFT 15 /* CHG_ENA */ #define WM831X_CHG_ENA_WIDTH 1 /* CHG_ENA */ #define WM831X_CHG_FRC 0x4000 /* CHG_FRC */ #define WM831X_CHG_FRC_MASK 0x4000 /* CHG_FRC */ #define WM831X_CHG_FRC_SHIFT 14 /* CHG_FRC */ #define WM831X_CHG_FRC_WIDTH 1 /* CHG_FRC */ #define WM831X_CHG_ITERM_MASK 0x1C00 /* CHG_ITERM - [12:10] */ #define WM831X_CHG_ITERM_SHIFT 10 /* CHG_ITERM - [12:10] */ #define WM831X_CHG_ITERM_WIDTH 3 /* CHG_ITERM - [12:10] */ #define WM831X_CHG_FAST 0x0020 /* CHG_FAST */ #define WM831X_CHG_FAST_MASK 0x0020 /* CHG_FAST */ #define WM831X_CHG_FAST_SHIFT 5 /* CHG_FAST */ #define WM831X_CHG_FAST_WIDTH 1 /* CHG_FAST */ #define WM831X_CHG_IMON_ENA 0x0002 /* CHG_IMON_ENA */ #define WM831X_CHG_IMON_ENA_MASK 0x0002 /* CHG_IMON_ENA */ #define WM831X_CHG_IMON_ENA_SHIFT 1 /* CHG_IMON_ENA */ #define WM831X_CHG_IMON_ENA_WIDTH 1 /* CHG_IMON_ENA */ #define WM831X_CHG_CHIP_TEMP_MON 0x0001 /* CHG_CHIP_TEMP_MON */ #define WM831X_CHG_CHIP_TEMP_MON_MASK 0x0001 /* CHG_CHIP_TEMP_MON */ #define WM831X_CHG_CHIP_TEMP_MON_SHIFT 0 /* CHG_CHIP_TEMP_MON */ #define WM831X_CHG_CHIP_TEMP_MON_WIDTH 1 /* CHG_CHIP_TEMP_MON */ /* * R16457 (0x4049) - Charger Control 2 */ #define WM831X_CHG_OFF_MSK 0x4000 /* CHG_OFF_MSK */ #define WM831X_CHG_OFF_MSK_MASK 0x4000 /* CHG_OFF_MSK */ #define WM831X_CHG_OFF_MSK_SHIFT 14 /* CHG_OFF_MSK */ #define WM831X_CHG_OFF_MSK_WIDTH 1 /* CHG_OFF_MSK */ #define WM831X_CHG_TIME_MASK 0x0F00 /* CHG_TIME - [11:8] */ #define WM831X_CHG_TIME_SHIFT 8 /* CHG_TIME - [11:8] */ #define WM831X_CHG_TIME_WIDTH 4 /* CHG_TIME - [11:8] */ #define WM831X_CHG_TRKL_ILIM_MASK 0x00C0 /* CHG_TRKL_ILIM - [7:6] */ #define WM831X_CHG_TRKL_ILIM_SHIFT 6 /* CHG_TRKL_ILIM - [7:6] */ #define WM831X_CHG_TRKL_ILIM_WIDTH 2 /* CHG_TRKL_ILIM - [7:6] */ #define WM831X_CHG_VSEL_MASK 0x0030 /* CHG_VSEL - [5:4] */ #define WM831X_CHG_VSEL_SHIFT 4 /* CHG_VSEL - [5:4] */ #define WM831X_CHG_VSEL_WIDTH 2 /* CHG_VSEL - [5:4] */ #define WM831X_CHG_FAST_ILIM_MASK 0x000F /* CHG_FAST_ILIM - [3:0] */ #define WM831X_CHG_FAST_ILIM_SHIFT 0 /* CHG_FAST_ILIM - [3:0] */ #define WM831X_CHG_FAST_ILIM_WIDTH 4 /* CHG_FAST_ILIM - [3:0] */ /* * R16458 (0x404A) - Charger Status */ #define WM831X_BATT_OV_STS 0x8000 /* BATT_OV_STS */ #define WM831X_BATT_OV_STS_MASK 0x8000 /* BATT_OV_STS */ #define WM831X_BATT_OV_STS_SHIFT 15 /* BATT_OV_STS */ #define WM831X_BATT_OV_STS_WIDTH 1 /* BATT_OV_STS */ #define WM831X_CHG_STATE_MASK 0x7000 /* CHG_STATE - [14:12] */ #define WM831X_CHG_STATE_SHIFT 12 /* CHG_STATE - [14:12] */ #define WM831X_CHG_STATE_WIDTH 3 /* CHG_STATE - [14:12] */ #define WM831X_BATT_HOT_STS 0x0800 /* BATT_HOT_STS */ #define WM831X_BATT_HOT_STS_MASK 0x0800 /* BATT_HOT_STS */ #define WM831X_BATT_HOT_STS_SHIFT 11 /* BATT_HOT_STS */ #define WM831X_BATT_HOT_STS_WIDTH 1 /* BATT_HOT_STS */ #define WM831X_BATT_COLD_STS 0x0400 /* BATT_COLD_STS */ #define WM831X_BATT_COLD_STS_MASK 0x0400 /* BATT_COLD_STS */ #define WM831X_BATT_COLD_STS_SHIFT 10 /* BATT_COLD_STS */ #define WM831X_BATT_COLD_STS_WIDTH 1 /* BATT_COLD_STS */ #define WM831X_CHG_TOPOFF 0x0200 /* CHG_TOPOFF */ #define WM831X_CHG_TOPOFF_MASK 0x0200 /* CHG_TOPOFF */ #define WM831X_CHG_TOPOFF_SHIFT 9 /* CHG_TOPOFF */ #define WM831X_CHG_TOPOFF_WIDTH 1 /* CHG_TOPOFF */ #define WM831X_CHG_ACTIVE 0x0100 /* CHG_ACTIVE */ #define WM831X_CHG_ACTIVE_MASK 0x0100 /* CHG_ACTIVE */ #define WM831X_CHG_ACTIVE_SHIFT 8 /* CHG_ACTIVE */ #define WM831X_CHG_ACTIVE_WIDTH 1 /* CHG_ACTIVE */ #define WM831X_CHG_TIME_ELAPSED_MASK 0x00FF /* CHG_TIME_ELAPSED - [7:0] */ #define WM831X_CHG_TIME_ELAPSED_SHIFT 0 /* CHG_TIME_ELAPSED - [7:0] */ #define WM831X_CHG_TIME_ELAPSED_WIDTH 8 /* CHG_TIME_ELAPSED - [7:0] */ #define WM831X_CHG_STATE_OFF (0 << WM831X_CHG_STATE_SHIFT) #define WM831X_CHG_STATE_TRICKLE (1 << WM831X_CHG_STATE_SHIFT) #define WM831X_CHG_STATE_FAST (2 << WM831X_CHG_STATE_SHIFT) #define WM831X_CHG_STATE_TRICKLE_OT (3 << WM831X_CHG_STATE_SHIFT) #define WM831X_CHG_STATE_FAST_OT (4 << WM831X_CHG_STATE_SHIFT) #define WM831X_CHG_STATE_DEFECTIVE (5 << WM831X_CHG_STATE_SHIFT) /* * R16459 (0x404B) - Backup Charger Control */ #define WM831X_BKUP_CHG_ENA 0x8000 /* BKUP_CHG_ENA */ #define WM831X_BKUP_CHG_ENA_MASK 0x8000 /* BKUP_CHG_ENA */ #define WM831X_BKUP_CHG_ENA_SHIFT 15 /* BKUP_CHG_ENA */ #define WM831X_BKUP_CHG_ENA_WIDTH 1 /* BKUP_CHG_ENA */ #define WM831X_BKUP_CHG_STS 0x4000 /* BKUP_CHG_STS */ #define WM831X_BKUP_CHG_STS_MASK 0x4000 /* BKUP_CHG_STS */ #define WM831X_BKUP_CHG_STS_SHIFT 14 /* BKUP_CHG_STS */ #define WM831X_BKUP_CHG_STS_WIDTH 1 /* BKUP_CHG_STS */ #define WM831X_BKUP_CHG_MODE 0x1000 /* BKUP_CHG_MODE */ #define WM831X_BKUP_CHG_MODE_MASK 0x1000 /* BKUP_CHG_MODE */ #define WM831X_BKUP_CHG_MODE_SHIFT 12 /* BKUP_CHG_MODE */ #define WM831X_BKUP_CHG_MODE_WIDTH 1 /* BKUP_CHG_MODE */ #define WM831X_BKUP_BATT_DET_ENA 0x0800 /* BKUP_BATT_DET_ENA */ #define WM831X_BKUP_BATT_DET_ENA_MASK 0x0800 /* BKUP_BATT_DET_ENA */ #define WM831X_BKUP_BATT_DET_ENA_SHIFT 11 /* BKUP_BATT_DET_ENA */ #define WM831X_BKUP_BATT_DET_ENA_WIDTH 1 /* BKUP_BATT_DET_ENA */ #define WM831X_BKUP_BATT_STS 0x0400 /* BKUP_BATT_STS */ #define WM831X_BKUP_BATT_STS_MASK 0x0400 /* BKUP_BATT_STS */ #define WM831X_BKUP_BATT_STS_SHIFT 10 /* BKUP_BATT_STS */ #define WM831X_BKUP_BATT_STS_WIDTH 1 /* BKUP_BATT_STS */ #define WM831X_BKUP_CHG_VLIM 0x0010 /* BKUP_CHG_VLIM */ #define WM831X_BKUP_CHG_VLIM_MASK 0x0010 /* BKUP_CHG_VLIM */ #define WM831X_BKUP_CHG_VLIM_SHIFT 4 /* BKUP_CHG_VLIM */ #define WM831X_BKUP_CHG_VLIM_WIDTH 1 /* BKUP_CHG_VLIM */ #define WM831X_BKUP_CHG_ILIM_MASK 0x0003 /* BKUP_CHG_ILIM - [1:0] */ #define WM831X_BKUP_CHG_ILIM_SHIFT 0 /* BKUP_CHG_ILIM - [1:0] */ #define WM831X_BKUP_CHG_ILIM_WIDTH 2 /* BKUP_CHG_ILIM - [1:0] */ #endif mfd/wm831x/pdata.h 0000644 00000010433 14722070374 0007632 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/mfd/wm831x/pdata.h -- Platform data for WM831x * * Copyright 2009 Wolfson Microelectronics PLC. * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> */ #ifndef __MFD_WM831X_PDATA_H__ #define __MFD_WM831X_PDATA_H__ struct wm831x; struct regulator_init_data; struct wm831x_backlight_pdata { int isink; /** ISINK to use, 1 or 2 */ int max_uA; /** Maximum current to allow */ }; struct wm831x_backup_pdata { int charger_enable; int no_constant_voltage; /** Disable constant voltage charging */ int vlim; /** Voltage limit in millivolts */ int ilim; /** Current limit in microamps */ }; struct wm831x_battery_pdata { int enable; /** Enable charging */ int fast_enable; /** Enable fast charging */ int off_mask; /** Mask OFF while charging */ int trickle_ilim; /** Trickle charge current limit, in mA */ int vsel; /** Target voltage, in mV */ int eoc_iterm; /** End of trickle charge current, in mA */ int fast_ilim; /** Fast charge current limit, in mA */ int timeout; /** Charge cycle timeout, in minutes */ }; /** * Configuration for the WM831x DC-DC BuckWise convertors. This * should be passed as driver_data in the regulator_init_data. * * Currently all the configuration is for the fast DVS switching * support of the devices. This allows MFPs on the device to be * configured as an input to switch between two output voltages, * allowing voltage transitions without the expense of an access over * I2C or SPI buses. */ struct wm831x_buckv_pdata { int dvs_control_src; /** Hardware DVS source to use (1 or 2) */ int dvs_init_state; /** DVS state to expect on startup */ int dvs_state_gpio; /** CPU GPIO to use for monitoring status */ }; /* Sources for status LED configuration. Values are register values * plus 1 to allow for a zero default for preserve. */ enum wm831x_status_src { WM831X_STATUS_PRESERVE = 0, /* Keep the current hardware setting */ WM831X_STATUS_OTP = 1, WM831X_STATUS_POWER = 2, WM831X_STATUS_CHARGER = 3, WM831X_STATUS_MANUAL = 4, }; struct wm831x_status_pdata { enum wm831x_status_src default_src; const char *name; const char *default_trigger; }; struct wm831x_touch_pdata { int fivewire; /** 1 for five wire mode, 0 for 4 wire */ int isel; /** Current for pen down (uA) */ int rpu; /** Pen down sensitivity resistor divider */ int pressure; /** Report pressure (boolean) */ unsigned int data_irq; /** Touch data ready IRQ */ int data_irqf; /** IRQ flags for data ready IRQ */ unsigned int pd_irq; /** Touch pendown detect IRQ */ int pd_irqf; /** IRQ flags for pen down IRQ */ }; enum wm831x_watchdog_action { WM831X_WDOG_NONE = 0, WM831X_WDOG_INTERRUPT = 1, WM831X_WDOG_RESET = 2, WM831X_WDOG_WAKE = 3, }; struct wm831x_watchdog_pdata { enum wm831x_watchdog_action primary, secondary; int update_gpio; unsigned int software:1; }; #define WM831X_MAX_STATUS 2 #define WM831X_MAX_DCDC 4 #define WM831X_MAX_EPE 2 #define WM831X_MAX_LDO 11 #define WM831X_MAX_ISINK 2 #define WM831X_GPIO_CONFIGURE 0x10000 #define WM831X_GPIO_NUM 16 struct wm831x_pdata { /** Used to distinguish multiple WM831x chips */ int wm831x_num; /** Called before subdevices are set up */ int (*pre_init)(struct wm831x *wm831x); /** Called after subdevices are set up */ int (*post_init)(struct wm831x *wm831x); /** Put the /IRQ line into CMOS mode */ bool irq_cmos; /** Disable the touchscreen */ bool disable_touch; /** The driver should initiate a power off sequence during shutdown */ bool soft_shutdown; int irq_base; int gpio_base; int gpio_defaults[WM831X_GPIO_NUM]; struct wm831x_backlight_pdata *backlight; struct wm831x_backup_pdata *backup; struct wm831x_battery_pdata *battery; struct wm831x_touch_pdata *touch; struct wm831x_watchdog_pdata *watchdog; /** LED1 = 0 and so on */ struct wm831x_status_pdata *status[WM831X_MAX_STATUS]; /** DCDC1 = 0 and so on */ struct regulator_init_data *dcdc[WM831X_MAX_DCDC]; /** EPE1 = 0 and so on */ struct regulator_init_data *epe[WM831X_MAX_EPE]; /** LDO1 = 0 and so on */ struct regulator_init_data *ldo[WM831X_MAX_LDO]; /** ISINK1 = 0 and so on*/ struct regulator_init_data *isink[WM831X_MAX_ISINK]; }; #endif mfd/wm831x/watchdog.h 0000644 00000004754 14722070374 0010352 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/mfd/wm831x/watchdog.h -- Watchdog for WM831x * * Copyright 2009 Wolfson Microelectronics PLC. * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> */ #ifndef __MFD_WM831X_WATCHDOG_H__ #define __MFD_WM831X_WATCHDOG_H__ /* * R16388 (0x4004) - Watchdog */ #define WM831X_WDOG_ENA 0x8000 /* WDOG_ENA */ #define WM831X_WDOG_ENA_MASK 0x8000 /* WDOG_ENA */ #define WM831X_WDOG_ENA_SHIFT 15 /* WDOG_ENA */ #define WM831X_WDOG_ENA_WIDTH 1 /* WDOG_ENA */ #define WM831X_WDOG_DEBUG 0x4000 /* WDOG_DEBUG */ #define WM831X_WDOG_DEBUG_MASK 0x4000 /* WDOG_DEBUG */ #define WM831X_WDOG_DEBUG_SHIFT 14 /* WDOG_DEBUG */ #define WM831X_WDOG_DEBUG_WIDTH 1 /* WDOG_DEBUG */ #define WM831X_WDOG_RST_SRC 0x2000 /* WDOG_RST_SRC */ #define WM831X_WDOG_RST_SRC_MASK 0x2000 /* WDOG_RST_SRC */ #define WM831X_WDOG_RST_SRC_SHIFT 13 /* WDOG_RST_SRC */ #define WM831X_WDOG_RST_SRC_WIDTH 1 /* WDOG_RST_SRC */ #define WM831X_WDOG_SLPENA 0x1000 /* WDOG_SLPENA */ #define WM831X_WDOG_SLPENA_MASK 0x1000 /* WDOG_SLPENA */ #define WM831X_WDOG_SLPENA_SHIFT 12 /* WDOG_SLPENA */ #define WM831X_WDOG_SLPENA_WIDTH 1 /* WDOG_SLPENA */ #define WM831X_WDOG_RESET 0x0800 /* WDOG_RESET */ #define WM831X_WDOG_RESET_MASK 0x0800 /* WDOG_RESET */ #define WM831X_WDOG_RESET_SHIFT 11 /* WDOG_RESET */ #define WM831X_WDOG_RESET_WIDTH 1 /* WDOG_RESET */ #define WM831X_WDOG_SECACT_MASK 0x0300 /* WDOG_SECACT - [9:8] */ #define WM831X_WDOG_SECACT_SHIFT 8 /* WDOG_SECACT - [9:8] */ #define WM831X_WDOG_SECACT_WIDTH 2 /* WDOG_SECACT - [9:8] */ #define WM831X_WDOG_PRIMACT_MASK 0x0030 /* WDOG_PRIMACT - [5:4] */ #define WM831X_WDOG_PRIMACT_SHIFT 4 /* WDOG_PRIMACT - [5:4] */ #define WM831X_WDOG_PRIMACT_WIDTH 2 /* WDOG_PRIMACT - [5:4] */ #define WM831X_WDOG_TO_MASK 0x0007 /* WDOG_TO - [2:0] */ #define WM831X_WDOG_TO_SHIFT 0 /* WDOG_TO - [2:0] */ #define WM831X_WDOG_TO_WIDTH 3 /* WDOG_TO - [2:0] */ #endif mfd/wm831x/otp.h 0000644 00000015731 14722070374 0007351 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/mfd/wm831x/otp.h -- OTP interface for WM831x * * Copyright 2009 Wolfson Microelectronics PLC. * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> */ #ifndef __MFD_WM831X_OTP_H__ #define __MFD_WM831X_OTP_H__ int wm831x_otp_init(struct wm831x *wm831x); void wm831x_otp_exit(struct wm831x *wm831x); /* * R30720 (0x7800) - Unique ID 1 */ #define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */ #define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */ #define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */ /* * R30721 (0x7801) - Unique ID 2 */ #define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */ #define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */ #define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */ /* * R30722 (0x7802) - Unique ID 3 */ #define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */ #define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */ #define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */ /* * R30723 (0x7803) - Unique ID 4 */ #define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */ #define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */ #define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */ /* * R30724 (0x7804) - Unique ID 5 */ #define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */ #define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */ #define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */ /* * R30725 (0x7805) - Unique ID 6 */ #define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */ #define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */ #define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */ /* * R30726 (0x7806) - Unique ID 7 */ #define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */ #define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */ #define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */ /* * R30727 (0x7807) - Unique ID 8 */ #define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */ #define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */ #define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */ /* * R30728 (0x7808) - Factory OTP ID */ #define WM831X_OTP_FACT_ID_MASK 0xFFFE /* OTP_FACT_ID - [15:1] */ #define WM831X_OTP_FACT_ID_SHIFT 1 /* OTP_FACT_ID - [15:1] */ #define WM831X_OTP_FACT_ID_WIDTH 15 /* OTP_FACT_ID - [15:1] */ #define WM831X_OTP_FACT_FINAL 0x0001 /* OTP_FACT_FINAL */ #define WM831X_OTP_FACT_FINAL_MASK 0x0001 /* OTP_FACT_FINAL */ #define WM831X_OTP_FACT_FINAL_SHIFT 0 /* OTP_FACT_FINAL */ #define WM831X_OTP_FACT_FINAL_WIDTH 1 /* OTP_FACT_FINAL */ /* * R30729 (0x7809) - Factory OTP 1 */ #define WM831X_DC3_TRIM_MASK 0xF000 /* DC3_TRIM - [15:12] */ #define WM831X_DC3_TRIM_SHIFT 12 /* DC3_TRIM - [15:12] */ #define WM831X_DC3_TRIM_WIDTH 4 /* DC3_TRIM - [15:12] */ #define WM831X_DC2_TRIM_MASK 0x0FC0 /* DC2_TRIM - [11:6] */ #define WM831X_DC2_TRIM_SHIFT 6 /* DC2_TRIM - [11:6] */ #define WM831X_DC2_TRIM_WIDTH 6 /* DC2_TRIM - [11:6] */ #define WM831X_DC1_TRIM_MASK 0x003F /* DC1_TRIM - [5:0] */ #define WM831X_DC1_TRIM_SHIFT 0 /* DC1_TRIM - [5:0] */ #define WM831X_DC1_TRIM_WIDTH 6 /* DC1_TRIM - [5:0] */ /* * R30730 (0x780A) - Factory OTP 2 */ #define WM831X_CHIP_ID_MASK 0xFFFF /* CHIP_ID - [15:0] */ #define WM831X_CHIP_ID_SHIFT 0 /* CHIP_ID - [15:0] */ #define WM831X_CHIP_ID_WIDTH 16 /* CHIP_ID - [15:0] */ /* * R30731 (0x780B) - Factory OTP 3 */ #define WM831X_OSC_TRIM_MASK 0x0780 /* OSC_TRIM - [10:7] */ #define WM831X_OSC_TRIM_SHIFT 7 /* OSC_TRIM - [10:7] */ #define WM831X_OSC_TRIM_WIDTH 4 /* OSC_TRIM - [10:7] */ #define WM831X_BG_TRIM_MASK 0x0078 /* BG_TRIM - [6:3] */ #define WM831X_BG_TRIM_SHIFT 3 /* BG_TRIM - [6:3] */ #define WM831X_BG_TRIM_WIDTH 4 /* BG_TRIM - [6:3] */ #define WM831X_LPBG_TRIM_MASK 0x0007 /* LPBG_TRIM - [2:0] */ #define WM831X_LPBG_TRIM_SHIFT 0 /* LPBG_TRIM - [2:0] */ #define WM831X_LPBG_TRIM_WIDTH 3 /* LPBG_TRIM - [2:0] */ /* * R30732 (0x780C) - Factory OTP 4 */ #define WM831X_CHILD_I2C_ADDR_MASK 0x00FE /* CHILD_I2C_ADDR - [7:1] */ #define WM831X_CHILD_I2C_ADDR_SHIFT 1 /* CHILD_I2C_ADDR - [7:1] */ #define WM831X_CHILD_I2C_ADDR_WIDTH 7 /* CHILD_I2C_ADDR - [7:1] */ #define WM831X_CH_AW 0x0001 /* CH_AW */ #define WM831X_CH_AW_MASK 0x0001 /* CH_AW */ #define WM831X_CH_AW_SHIFT 0 /* CH_AW */ #define WM831X_CH_AW_WIDTH 1 /* CH_AW */ /* * R30733 (0x780D) - Factory OTP 5 */ #define WM831X_CHARGE_TRIM_MASK 0x003F /* CHARGE_TRIM - [5:0] */ #define WM831X_CHARGE_TRIM_SHIFT 0 /* CHARGE_TRIM - [5:0] */ #define WM831X_CHARGE_TRIM_WIDTH 6 /* CHARGE_TRIM - [5:0] */ /* * R30736 (0x7810) - Customer OTP ID */ #define WM831X_OTP_AUTO_PROG 0x8000 /* OTP_AUTO_PROG */ #define WM831X_OTP_AUTO_PROG_MASK 0x8000 /* OTP_AUTO_PROG */ #define WM831X_OTP_AUTO_PROG_SHIFT 15 /* OTP_AUTO_PROG */ #define WM831X_OTP_AUTO_PROG_WIDTH 1 /* OTP_AUTO_PROG */ #define WM831X_OTP_CUST_ID_MASK 0x7FFE /* OTP_CUST_ID - [14:1] */ #define WM831X_OTP_CUST_ID_SHIFT 1 /* OTP_CUST_ID - [14:1] */ #define WM831X_OTP_CUST_ID_WIDTH 14 /* OTP_CUST_ID - [14:1] */ #define WM831X_OTP_CUST_FINAL 0x0001 /* OTP_CUST_FINAL */ #define WM831X_OTP_CUST_FINAL_MASK 0x0001 /* OTP_CUST_FINAL */ #define WM831X_OTP_CUST_FINAL_SHIFT 0 /* OTP_CUST_FINAL */ #define WM831X_OTP_CUST_FINAL_WIDTH 1 /* OTP_CUST_FINAL */ /* * R30759 (0x7827) - DBE CHECK DATA */ #define WM831X_DBE_VALID_DATA_MASK 0xFFFF /* DBE_VALID_DATA - [15:0] */ #define WM831X_DBE_VALID_DATA_SHIFT 0 /* DBE_VALID_DATA - [15:0] */ #define WM831X_DBE_VALID_DATA_WIDTH 16 /* DBE_VALID_DATA - [15:0] */ #endif mfd/wm831x/core.h 0000644 00000050165 14722070374 0007477 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/mfd/wm831x/core.h -- Core interface for WM831x * * Copyright 2009 Wolfson Microelectronics PLC. * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> */ #ifndef __MFD_WM831X_CORE_H__ #define __MFD_WM831X_CORE_H__ #include <linux/completion.h> #include <linux/interrupt.h> #include <linux/irqdomain.h> #include <linux/list.h> #include <linux/regmap.h> #include <linux/mfd/wm831x/auxadc.h> #include <linux/mfd/wm831x/pdata.h> #include <linux/of.h> /* * Register values. */ #define WM831X_RESET_ID 0x00 #define WM831X_REVISION 0x01 #define WM831X_PARENT_ID 0x4000 #define WM831X_SYSVDD_CONTROL 0x4001 #define WM831X_THERMAL_MONITORING 0x4002 #define WM831X_POWER_STATE 0x4003 #define WM831X_WATCHDOG 0x4004 #define WM831X_ON_PIN_CONTROL 0x4005 #define WM831X_RESET_CONTROL 0x4006 #define WM831X_CONTROL_INTERFACE 0x4007 #define WM831X_SECURITY_KEY 0x4008 #define WM831X_SOFTWARE_SCRATCH 0x4009 #define WM831X_OTP_CONTROL 0x400A #define WM831X_GPIO_LEVEL 0x400C #define WM831X_SYSTEM_STATUS 0x400D #define WM831X_ON_SOURCE 0x400E #define WM831X_OFF_SOURCE 0x400F #define WM831X_SYSTEM_INTERRUPTS 0x4010 #define WM831X_INTERRUPT_STATUS_1 0x4011 #define WM831X_INTERRUPT_STATUS_2 0x4012 #define WM831X_INTERRUPT_STATUS_3 0x4013 #define WM831X_INTERRUPT_STATUS_4 0x4014 #define WM831X_INTERRUPT_STATUS_5 0x4015 #define WM831X_IRQ_CONFIG 0x4017 #define WM831X_SYSTEM_INTERRUPTS_MASK 0x4018 #define WM831X_INTERRUPT_STATUS_1_MASK 0x4019 #define WM831X_INTERRUPT_STATUS_2_MASK 0x401A #define WM831X_INTERRUPT_STATUS_3_MASK 0x401B #define WM831X_INTERRUPT_STATUS_4_MASK 0x401C #define WM831X_INTERRUPT_STATUS_5_MASK 0x401D #define WM831X_RTC_WRITE_COUNTER 0x4020 #define WM831X_RTC_TIME_1 0x4021 #define WM831X_RTC_TIME_2 0x4022 #define WM831X_RTC_ALARM_1 0x4023 #define WM831X_RTC_ALARM_2 0x4024 #define WM831X_RTC_CONTROL 0x4025 #define WM831X_RTC_TRIM 0x4026 #define WM831X_TOUCH_CONTROL_1 0x4028 #define WM831X_TOUCH_CONTROL_2 0x4029 #define WM831X_TOUCH_DATA_X 0x402A #define WM831X_TOUCH_DATA_Y 0x402B #define WM831X_TOUCH_DATA_Z 0x402C #define WM831X_AUXADC_DATA 0x402D #define WM831X_AUXADC_CONTROL 0x402E #define WM831X_AUXADC_SOURCE 0x402F #define WM831X_COMPARATOR_CONTROL 0x4030 #define WM831X_COMPARATOR_1 0x4031 #define WM831X_COMPARATOR_2 0x4032 #define WM831X_COMPARATOR_3 0x4033 #define WM831X_COMPARATOR_4 0x4034 #define WM831X_GPIO1_CONTROL 0x4038 #define WM831X_GPIO2_CONTROL 0x4039 #define WM831X_GPIO3_CONTROL 0x403A #define WM831X_GPIO4_CONTROL 0x403B #define WM831X_GPIO5_CONTROL 0x403C #define WM831X_GPIO6_CONTROL 0x403D #define WM831X_GPIO7_CONTROL 0x403E #define WM831X_GPIO8_CONTROL 0x403F #define WM831X_GPIO9_CONTROL 0x4040 #define WM831X_GPIO10_CONTROL 0x4041 #define WM831X_GPIO11_CONTROL 0x4042 #define WM831X_GPIO12_CONTROL 0x4043 #define WM831X_GPIO13_CONTROL 0x4044 #define WM831X_GPIO14_CONTROL 0x4045 #define WM831X_GPIO15_CONTROL 0x4046 #define WM831X_GPIO16_CONTROL 0x4047 #define WM831X_CHARGER_CONTROL_1 0x4048 #define WM831X_CHARGER_CONTROL_2 0x4049 #define WM831X_CHARGER_STATUS 0x404A #define WM831X_BACKUP_CHARGER_CONTROL 0x404B #define WM831X_STATUS_LED_1 0x404C #define WM831X_STATUS_LED_2 0x404D #define WM831X_CURRENT_SINK_1 0x404E #define WM831X_CURRENT_SINK_2 0x404F #define WM831X_DCDC_ENABLE 0x4050 #define WM831X_LDO_ENABLE 0x4051 #define WM831X_DCDC_STATUS 0x4052 #define WM831X_LDO_STATUS 0x4053 #define WM831X_DCDC_UV_STATUS 0x4054 #define WM831X_LDO_UV_STATUS 0x4055 #define WM831X_DC1_CONTROL_1 0x4056 #define WM831X_DC1_CONTROL_2 0x4057 #define WM831X_DC1_ON_CONFIG 0x4058 #define WM831X_DC1_SLEEP_CONTROL 0x4059 #define WM831X_DC1_DVS_CONTROL 0x405A #define WM831X_DC2_CONTROL_1 0x405B #define WM831X_DC2_CONTROL_2 0x405C #define WM831X_DC2_ON_CONFIG 0x405D #define WM831X_DC2_SLEEP_CONTROL 0x405E #define WM831X_DC2_DVS_CONTROL 0x405F #define WM831X_DC3_CONTROL_1 0x4060 #define WM831X_DC3_CONTROL_2 0x4061 #define WM831X_DC3_ON_CONFIG 0x4062 #define WM831X_DC3_SLEEP_CONTROL 0x4063 #define WM831X_DC4_CONTROL 0x4064 #define WM831X_DC4_SLEEP_CONTROL 0x4065 #define WM832X_DC4_SLEEP_CONTROL 0x4067 #define WM831X_EPE1_CONTROL 0x4066 #define WM831X_EPE2_CONTROL 0x4067 #define WM831X_LDO1_CONTROL 0x4068 #define WM831X_LDO1_ON_CONTROL 0x4069 #define WM831X_LDO1_SLEEP_CONTROL 0x406A #define WM831X_LDO2_CONTROL 0x406B #define WM831X_LDO2_ON_CONTROL 0x406C #define WM831X_LDO2_SLEEP_CONTROL 0x406D #define WM831X_LDO3_CONTROL 0x406E #define WM831X_LDO3_ON_CONTROL 0x406F #define WM831X_LDO3_SLEEP_CONTROL 0x4070 #define WM831X_LDO4_CONTROL 0x4071 #define WM831X_LDO4_ON_CONTROL 0x4072 #define WM831X_LDO4_SLEEP_CONTROL 0x4073 #define WM831X_LDO5_CONTROL 0x4074 #define WM831X_LDO5_ON_CONTROL 0x4075 #define WM831X_LDO5_SLEEP_CONTROL 0x4076 #define WM831X_LDO6_CONTROL 0x4077 #define WM831X_LDO6_ON_CONTROL 0x4078 #define WM831X_LDO6_SLEEP_CONTROL 0x4079 #define WM831X_LDO7_CONTROL 0x407A #define WM831X_LDO7_ON_CONTROL 0x407B #define WM831X_LDO7_SLEEP_CONTROL 0x407C #define WM831X_LDO8_CONTROL 0x407D #define WM831X_LDO8_ON_CONTROL 0x407E #define WM831X_LDO8_SLEEP_CONTROL 0x407F #define WM831X_LDO9_CONTROL 0x4080 #define WM831X_LDO9_ON_CONTROL 0x4081 #define WM831X_LDO9_SLEEP_CONTROL 0x4082 #define WM831X_LDO10_CONTROL 0x4083 #define WM831X_LDO10_ON_CONTROL 0x4084 #define WM831X_LDO10_SLEEP_CONTROL 0x4085 #define WM831X_LDO11_ON_CONTROL 0x4087 #define WM831X_LDO11_SLEEP_CONTROL 0x4088 #define WM831X_POWER_GOOD_SOURCE_1 0x408E #define WM831X_POWER_GOOD_SOURCE_2 0x408F #define WM831X_CLOCK_CONTROL_1 0x4090 #define WM831X_CLOCK_CONTROL_2 0x4091 #define WM831X_FLL_CONTROL_1 0x4092 #define WM831X_FLL_CONTROL_2 0x4093 #define WM831X_FLL_CONTROL_3 0x4094 #define WM831X_FLL_CONTROL_4 0x4095 #define WM831X_FLL_CONTROL_5 0x4096 #define WM831X_UNIQUE_ID_1 0x7800 #define WM831X_UNIQUE_ID_2 0x7801 #define WM831X_UNIQUE_ID_3 0x7802 #define WM831X_UNIQUE_ID_4 0x7803 #define WM831X_UNIQUE_ID_5 0x7804 #define WM831X_UNIQUE_ID_6 0x7805 #define WM831X_UNIQUE_ID_7 0x7806 #define WM831X_UNIQUE_ID_8 0x7807 #define WM831X_FACTORY_OTP_ID 0x7808 #define WM831X_FACTORY_OTP_1 0x7809 #define WM831X_FACTORY_OTP_2 0x780A #define WM831X_FACTORY_OTP_3 0x780B #define WM831X_FACTORY_OTP_4 0x780C #define WM831X_FACTORY_OTP_5 0x780D #define WM831X_CUSTOMER_OTP_ID 0x7810 #define WM831X_DC1_OTP_CONTROL 0x7811 #define WM831X_DC2_OTP_CONTROL 0x7812 #define WM831X_DC3_OTP_CONTROL 0x7813 #define WM831X_LDO1_2_OTP_CONTROL 0x7814 #define WM831X_LDO3_4_OTP_CONTROL 0x7815 #define WM831X_LDO5_6_OTP_CONTROL 0x7816 #define WM831X_LDO7_8_OTP_CONTROL 0x7817 #define WM831X_LDO9_10_OTP_CONTROL 0x7818 #define WM831X_LDO11_EPE_CONTROL 0x7819 #define WM831X_GPIO1_OTP_CONTROL 0x781A #define WM831X_GPIO2_OTP_CONTROL 0x781B #define WM831X_GPIO3_OTP_CONTROL 0x781C #define WM831X_GPIO4_OTP_CONTROL 0x781D #define WM831X_GPIO5_OTP_CONTROL 0x781E #define WM831X_GPIO6_OTP_CONTROL 0x781F #define WM831X_DBE_CHECK_DATA 0x7827 /* * R0 (0x00) - Reset ID */ #define WM831X_CHIP_ID_MASK 0xFFFF /* CHIP_ID - [15:0] */ #define WM831X_CHIP_ID_SHIFT 0 /* CHIP_ID - [15:0] */ #define WM831X_CHIP_ID_WIDTH 16 /* CHIP_ID - [15:0] */ /* * R1 (0x01) - Revision */ #define WM831X_PARENT_REV_MASK 0xFF00 /* PARENT_REV - [15:8] */ #define WM831X_PARENT_REV_SHIFT 8 /* PARENT_REV - [15:8] */ #define WM831X_PARENT_REV_WIDTH 8 /* PARENT_REV - [15:8] */ #define WM831X_CHILD_REV_MASK 0x00FF /* CHILD_REV - [7:0] */ #define WM831X_CHILD_REV_SHIFT 0 /* CHILD_REV - [7:0] */ #define WM831X_CHILD_REV_WIDTH 8 /* CHILD_REV - [7:0] */ /* * R16384 (0x4000) - Parent ID */ #define WM831X_PARENT_ID_MASK 0xFFFF /* PARENT_ID - [15:0] */ #define WM831X_PARENT_ID_SHIFT 0 /* PARENT_ID - [15:0] */ #define WM831X_PARENT_ID_WIDTH 16 /* PARENT_ID - [15:0] */ /* * R16389 (0x4005) - ON Pin Control */ #define WM831X_ON_PIN_SECACT_MASK 0x0300 /* ON_PIN_SECACT - [9:8] */ #define WM831X_ON_PIN_SECACT_SHIFT 8 /* ON_PIN_SECACT - [9:8] */ #define WM831X_ON_PIN_SECACT_WIDTH 2 /* ON_PIN_SECACT - [9:8] */ #define WM831X_ON_PIN_PRIMACT_MASK 0x0030 /* ON_PIN_PRIMACT - [5:4] */ #define WM831X_ON_PIN_PRIMACT_SHIFT 4 /* ON_PIN_PRIMACT - [5:4] */ #define WM831X_ON_PIN_PRIMACT_WIDTH 2 /* ON_PIN_PRIMACT - [5:4] */ #define WM831X_ON_PIN_STS 0x0008 /* ON_PIN_STS */ #define WM831X_ON_PIN_STS_MASK 0x0008 /* ON_PIN_STS */ #define WM831X_ON_PIN_STS_SHIFT 3 /* ON_PIN_STS */ #define WM831X_ON_PIN_STS_WIDTH 1 /* ON_PIN_STS */ #define WM831X_ON_PIN_TO_MASK 0x0003 /* ON_PIN_TO - [1:0] */ #define WM831X_ON_PIN_TO_SHIFT 0 /* ON_PIN_TO - [1:0] */ #define WM831X_ON_PIN_TO_WIDTH 2 /* ON_PIN_TO - [1:0] */ /* * R16528 (0x4090) - Clock Control 1 */ #define WM831X_CLKOUT_ENA 0x8000 /* CLKOUT_ENA */ #define WM831X_CLKOUT_ENA_MASK 0x8000 /* CLKOUT_ENA */ #define WM831X_CLKOUT_ENA_SHIFT 15 /* CLKOUT_ENA */ #define WM831X_CLKOUT_ENA_WIDTH 1 /* CLKOUT_ENA */ #define WM831X_CLKOUT_OD 0x2000 /* CLKOUT_OD */ #define WM831X_CLKOUT_OD_MASK 0x2000 /* CLKOUT_OD */ #define WM831X_CLKOUT_OD_SHIFT 13 /* CLKOUT_OD */ #define WM831X_CLKOUT_OD_WIDTH 1 /* CLKOUT_OD */ #define WM831X_CLKOUT_SLOT_MASK 0x0700 /* CLKOUT_SLOT - [10:8] */ #define WM831X_CLKOUT_SLOT_SHIFT 8 /* CLKOUT_SLOT - [10:8] */ #define WM831X_CLKOUT_SLOT_WIDTH 3 /* CLKOUT_SLOT - [10:8] */ #define WM831X_CLKOUT_SLPSLOT_MASK 0x0070 /* CLKOUT_SLPSLOT - [6:4] */ #define WM831X_CLKOUT_SLPSLOT_SHIFT 4 /* CLKOUT_SLPSLOT - [6:4] */ #define WM831X_CLKOUT_SLPSLOT_WIDTH 3 /* CLKOUT_SLPSLOT - [6:4] */ #define WM831X_CLKOUT_SRC 0x0001 /* CLKOUT_SRC */ #define WM831X_CLKOUT_SRC_MASK 0x0001 /* CLKOUT_SRC */ #define WM831X_CLKOUT_SRC_SHIFT 0 /* CLKOUT_SRC */ #define WM831X_CLKOUT_SRC_WIDTH 1 /* CLKOUT_SRC */ /* * R16529 (0x4091) - Clock Control 2 */ #define WM831X_XTAL_INH 0x8000 /* XTAL_INH */ #define WM831X_XTAL_INH_MASK 0x8000 /* XTAL_INH */ #define WM831X_XTAL_INH_SHIFT 15 /* XTAL_INH */ #define WM831X_XTAL_INH_WIDTH 1 /* XTAL_INH */ #define WM831X_XTAL_ENA 0x2000 /* XTAL_ENA */ #define WM831X_XTAL_ENA_MASK 0x2000 /* XTAL_ENA */ #define WM831X_XTAL_ENA_SHIFT 13 /* XTAL_ENA */ #define WM831X_XTAL_ENA_WIDTH 1 /* XTAL_ENA */ #define WM831X_XTAL_BKUPENA 0x1000 /* XTAL_BKUPENA */ #define WM831X_XTAL_BKUPENA_MASK 0x1000 /* XTAL_BKUPENA */ #define WM831X_XTAL_BKUPENA_SHIFT 12 /* XTAL_BKUPENA */ #define WM831X_XTAL_BKUPENA_WIDTH 1 /* XTAL_BKUPENA */ #define WM831X_FLL_AUTO 0x0080 /* FLL_AUTO */ #define WM831X_FLL_AUTO_MASK 0x0080 /* FLL_AUTO */ #define WM831X_FLL_AUTO_SHIFT 7 /* FLL_AUTO */ #define WM831X_FLL_AUTO_WIDTH 1 /* FLL_AUTO */ #define WM831X_FLL_AUTO_FREQ_MASK 0x0007 /* FLL_AUTO_FREQ - [2:0] */ #define WM831X_FLL_AUTO_FREQ_SHIFT 0 /* FLL_AUTO_FREQ - [2:0] */ #define WM831X_FLL_AUTO_FREQ_WIDTH 3 /* FLL_AUTO_FREQ - [2:0] */ /* * R16530 (0x4092) - FLL Control 1 */ #define WM831X_FLL_FRAC 0x0004 /* FLL_FRAC */ #define WM831X_FLL_FRAC_MASK 0x0004 /* FLL_FRAC */ #define WM831X_FLL_FRAC_SHIFT 2 /* FLL_FRAC */ #define WM831X_FLL_FRAC_WIDTH 1 /* FLL_FRAC */ #define WM831X_FLL_OSC_ENA 0x0002 /* FLL_OSC_ENA */ #define WM831X_FLL_OSC_ENA_MASK 0x0002 /* FLL_OSC_ENA */ #define WM831X_FLL_OSC_ENA_SHIFT 1 /* FLL_OSC_ENA */ #define WM831X_FLL_OSC_ENA_WIDTH 1 /* FLL_OSC_ENA */ #define WM831X_FLL_ENA 0x0001 /* FLL_ENA */ #define WM831X_FLL_ENA_MASK 0x0001 /* FLL_ENA */ #define WM831X_FLL_ENA_SHIFT 0 /* FLL_ENA */ #define WM831X_FLL_ENA_WIDTH 1 /* FLL_ENA */ /* * R16531 (0x4093) - FLL Control 2 */ #define WM831X_FLL_OUTDIV_MASK 0x3F00 /* FLL_OUTDIV - [13:8] */ #define WM831X_FLL_OUTDIV_SHIFT 8 /* FLL_OUTDIV - [13:8] */ #define WM831X_FLL_OUTDIV_WIDTH 6 /* FLL_OUTDIV - [13:8] */ #define WM831X_FLL_CTRL_RATE_MASK 0x0070 /* FLL_CTRL_RATE - [6:4] */ #define WM831X_FLL_CTRL_RATE_SHIFT 4 /* FLL_CTRL_RATE - [6:4] */ #define WM831X_FLL_CTRL_RATE_WIDTH 3 /* FLL_CTRL_RATE - [6:4] */ #define WM831X_FLL_FRATIO_MASK 0x0007 /* FLL_FRATIO - [2:0] */ #define WM831X_FLL_FRATIO_SHIFT 0 /* FLL_FRATIO - [2:0] */ #define WM831X_FLL_FRATIO_WIDTH 3 /* FLL_FRATIO - [2:0] */ /* * R16532 (0x4094) - FLL Control 3 */ #define WM831X_FLL_K_MASK 0xFFFF /* FLL_K - [15:0] */ #define WM831X_FLL_K_SHIFT 0 /* FLL_K - [15:0] */ #define WM831X_FLL_K_WIDTH 16 /* FLL_K - [15:0] */ /* * R16533 (0x4095) - FLL Control 4 */ #define WM831X_FLL_N_MASK 0x7FE0 /* FLL_N - [14:5] */ #define WM831X_FLL_N_SHIFT 5 /* FLL_N - [14:5] */ #define WM831X_FLL_N_WIDTH 10 /* FLL_N - [14:5] */ #define WM831X_FLL_GAIN_MASK 0x000F /* FLL_GAIN - [3:0] */ #define WM831X_FLL_GAIN_SHIFT 0 /* FLL_GAIN - [3:0] */ #define WM831X_FLL_GAIN_WIDTH 4 /* FLL_GAIN - [3:0] */ /* * R16534 (0x4096) - FLL Control 5 */ #define WM831X_FLL_CLK_REF_DIV_MASK 0x0018 /* FLL_CLK_REF_DIV - [4:3] */ #define WM831X_FLL_CLK_REF_DIV_SHIFT 3 /* FLL_CLK_REF_DIV - [4:3] */ #define WM831X_FLL_CLK_REF_DIV_WIDTH 2 /* FLL_CLK_REF_DIV - [4:3] */ #define WM831X_FLL_CLK_SRC_MASK 0x0003 /* FLL_CLK_SRC - [1:0] */ #define WM831X_FLL_CLK_SRC_SHIFT 0 /* FLL_CLK_SRC - [1:0] */ #define WM831X_FLL_CLK_SRC_WIDTH 2 /* FLL_CLK_SRC - [1:0] */ struct regulator_dev; struct irq_domain; #define WM831X_NUM_IRQ_REGS 5 #define WM831X_NUM_GPIO_REGS 16 enum wm831x_parent { WM8310 = 0x8310, WM8311 = 0x8311, WM8312 = 0x8312, WM8320 = 0x8320, WM8321 = 0x8321, WM8325 = 0x8325, WM8326 = 0x8326, }; struct wm831x; typedef int (*wm831x_auxadc_read_fn)(struct wm831x *wm831x, enum wm831x_auxadc input); struct wm831x { struct mutex io_lock; struct device *dev; struct regmap *regmap; struct wm831x_pdata pdata; enum wm831x_parent type; int irq; /* Our chip IRQ */ struct mutex irq_lock; struct irq_domain *irq_domain; int irq_masks_cur[WM831X_NUM_IRQ_REGS]; /* Currently active value */ int irq_masks_cache[WM831X_NUM_IRQ_REGS]; /* Cached hardware value */ bool soft_shutdown; /* Chip revision based flags */ unsigned has_gpio_ena:1; /* Has GPIO enable bit */ unsigned has_cs_sts:1; /* Has current sink status bit */ unsigned charger_irq_wake:1; /* Are charger IRQs a wake source? */ int num_gpio; /* Used by the interrupt controller code to post writes */ int gpio_update[WM831X_NUM_GPIO_REGS]; bool gpio_level_high[WM831X_NUM_GPIO_REGS]; bool gpio_level_low[WM831X_NUM_GPIO_REGS]; struct mutex auxadc_lock; struct list_head auxadc_pending; u16 auxadc_active; wm831x_auxadc_read_fn auxadc_read; /* The WM831x has a security key blocking access to certain * registers. The mutex is taken by the accessors for locking * and unlocking the security key, locked is used to fail * writes if the lock is held. */ struct mutex key_lock; unsigned int locked:1; }; /* Device I/O API */ int wm831x_reg_read(struct wm831x *wm831x, unsigned short reg); int wm831x_reg_write(struct wm831x *wm831x, unsigned short reg, unsigned short val); void wm831x_reg_lock(struct wm831x *wm831x); int wm831x_reg_unlock(struct wm831x *wm831x); int wm831x_set_bits(struct wm831x *wm831x, unsigned short reg, unsigned short mask, unsigned short val); int wm831x_bulk_read(struct wm831x *wm831x, unsigned short reg, int count, u16 *buf); int wm831x_device_init(struct wm831x *wm831x, int irq); int wm831x_device_suspend(struct wm831x *wm831x); void wm831x_device_shutdown(struct wm831x *wm831x); int wm831x_irq_init(struct wm831x *wm831x, int irq); void wm831x_irq_exit(struct wm831x *wm831x); void wm831x_auxadc_init(struct wm831x *wm831x); static inline int wm831x_irq(struct wm831x *wm831x, int irq) { return irq_create_mapping(wm831x->irq_domain, irq); } extern struct regmap_config wm831x_regmap_config; extern const struct of_device_id wm831x_of_match[]; #endif mfd/tc3589x.h 0000644 00000007620 14722070374 0006625 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) ST-Ericsson SA 2010 */ #ifndef __LINUX_MFD_TC3589x_H #define __LINUX_MFD_TC3589x_H struct device; enum tx3589x_block { TC3589x_BLOCK_GPIO = 1 << 0, TC3589x_BLOCK_KEYPAD = 1 << 1, }; #define TC3589x_RSTCTRL_IRQRST (1 << 4) #define TC3589x_RSTCTRL_TIMRST (1 << 3) #define TC3589x_RSTCTRL_ROTRST (1 << 2) #define TC3589x_RSTCTRL_KBDRST (1 << 1) #define TC3589x_RSTCTRL_GPIRST (1 << 0) /* Keyboard Configuration Registers */ #define TC3589x_KBDSETTLE_REG 0x01 #define TC3589x_KBDBOUNCE 0x02 #define TC3589x_KBDSIZE 0x03 #define TC3589x_KBCFG_LSB 0x04 #define TC3589x_KBCFG_MSB 0x05 #define TC3589x_KBDIC 0x08 #define TC3589x_KBDMSK 0x09 #define TC3589x_EVTCODE_FIFO 0x10 #define TC3589x_KBDMFS 0x8F #define TC3589x_IRQST 0x91 #define TC3589x_MANFCODE_MAGIC 0x03 #define TC3589x_MANFCODE 0x80 #define TC3589x_VERSION 0x81 #define TC3589x_IOCFG 0xA7 #define TC3589x_CLKMODE 0x88 #define TC3589x_CLKCFG 0x89 #define TC3589x_CLKEN 0x8A #define TC3589x_RSTCTRL 0x82 #define TC3589x_EXTRSTN 0x83 #define TC3589x_RSTINTCLR 0x84 /* Pull up/down configuration registers */ #define TC3589x_IOCFG 0xA7 #define TC3589x_IOPULLCFG0_LSB 0xAA #define TC3589x_IOPULLCFG0_MSB 0xAB #define TC3589x_IOPULLCFG1_LSB 0xAC #define TC3589x_IOPULLCFG1_MSB 0xAD #define TC3589x_IOPULLCFG2_LSB 0xAE #define TC3589x_GPIOIS0 0xC9 #define TC3589x_GPIOIS1 0xCA #define TC3589x_GPIOIS2 0xCB #define TC3589x_GPIOIBE0 0xCC #define TC3589x_GPIOIBE1 0xCD #define TC3589x_GPIOIBE2 0xCE #define TC3589x_GPIOIEV0 0xCF #define TC3589x_GPIOIEV1 0xD0 #define TC3589x_GPIOIEV2 0xD1 #define TC3589x_GPIOIE0 0xD2 #define TC3589x_GPIOIE1 0xD3 #define TC3589x_GPIOIE2 0xD4 #define TC3589x_GPIORIS0 0xD6 #define TC3589x_GPIORIS1 0xD7 #define TC3589x_GPIORIS2 0xD8 #define TC3589x_GPIOMIS0 0xD9 #define TC3589x_GPIOMIS1 0xDA #define TC3589x_GPIOMIS2 0xDB #define TC3589x_GPIOIC0 0xDC #define TC3589x_GPIOIC1 0xDD #define TC3589x_GPIOIC2 0xDE #define TC3589x_GPIODATA0 0xC0 #define TC3589x_GPIOMASK0 0xc1 #define TC3589x_GPIODATA1 0xC2 #define TC3589x_GPIOMASK1 0xc3 #define TC3589x_GPIODATA2 0xC4 #define TC3589x_GPIOMASK2 0xC5 #define TC3589x_GPIODIR0 0xC6 #define TC3589x_GPIODIR1 0xC7 #define TC3589x_GPIODIR2 0xC8 #define TC3589x_GPIOSYNC0 0xE6 #define TC3589x_GPIOSYNC1 0xE7 #define TC3589x_GPIOSYNC2 0xE8 #define TC3589x_GPIOWAKE0 0xE9 #define TC3589x_GPIOWAKE1 0xEA #define TC3589x_GPIOWAKE2 0xEB #define TC3589x_GPIOODM0 0xE0 #define TC3589x_GPIOODE0 0xE1 #define TC3589x_GPIOODM1 0xE2 #define TC3589x_GPIOODE1 0xE3 #define TC3589x_GPIOODM2 0xE4 #define TC3589x_GPIOODE2 0xE5 #define TC3589x_INT_GPIIRQ 0 #define TC3589x_INT_TI0IRQ 1 #define TC3589x_INT_TI1IRQ 2 #define TC3589x_INT_TI2IRQ 3 #define TC3589x_INT_ROTIRQ 5 #define TC3589x_INT_KBDIRQ 6 #define TC3589x_INT_PORIRQ 7 #define TC3589x_NR_INTERNAL_IRQS 8 struct tc3589x { struct mutex lock; struct device *dev; struct i2c_client *i2c; struct irq_domain *domain; int irq_base; int num_gpio; struct tc3589x_platform_data *pdata; }; extern int tc3589x_reg_write(struct tc3589x *tc3589x, u8 reg, u8 data); extern int tc3589x_reg_read(struct tc3589x *tc3589x, u8 reg); extern int tc3589x_block_read(struct tc3589x *tc3589x, u8 reg, u8 length, u8 *values); extern int tc3589x_block_write(struct tc3589x *tc3589x, u8 reg, u8 length, const u8 *values); extern int tc3589x_set_bits(struct tc3589x *tc3589x, u8 reg, u8 mask, u8 val); /* * Keypad related platform specific constants * These values may be modified for fine tuning */ #define TC_KPD_ROWS 0x8 #define TC_KPD_COLUMNS 0x8 #define TC_KPD_DEBOUNCE_PERIOD 0xA3 #define TC_KPD_SETTLE_TIME 0xA3 /** * struct tc3589x_platform_data - TC3589x platform data * @block: bitmask of blocks to enable (use TC3589x_BLOCK_*) */ struct tc3589x_platform_data { unsigned int block; }; #endif mfd/bcm590xx.h 0000644 00000001152 14722070374 0007047 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Broadcom BCM590xx PMU * * Copyright 2014 Linaro Limited * Author: Matt Porter <mporter@linaro.org> */ #ifndef __LINUX_MFD_BCM590XX_H #define __LINUX_MFD_BCM590XX_H #include <linux/device.h> #include <linux/i2c.h> #include <linux/regmap.h> /* max register address */ #define BCM590XX_MAX_REGISTER_PRI 0xe7 #define BCM590XX_MAX_REGISTER_SEC 0xf0 struct bcm590xx { struct device *dev; struct i2c_client *i2c_pri; struct i2c_client *i2c_sec; struct regmap *regmap_pri; struct regmap *regmap_sec; unsigned int id; }; #endif /* __LINUX_MFD_BCM590XX_H */ mfd/mt6323/registers.h 0000644 00000040016 14722070374 0010437 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2016 Chen Zhong <chen.zhong@mediatek.com> */ #ifndef __MFD_MT6323_REGISTERS_H__ #define __MFD_MT6323_REGISTERS_H__ /* PMIC Registers */ #define MT6323_CHR_CON0 0x0000 #define MT6323_CHR_CON1 0x0002 #define MT6323_CHR_CON2 0x0004 #define MT6323_CHR_CON3 0x0006 #define MT6323_CHR_CON4 0x0008 #define MT6323_CHR_CON5 0x000A #define MT6323_CHR_CON6 0x000C #define MT6323_CHR_CON7 0x000E #define MT6323_CHR_CON8 0x0010 #define MT6323_CHR_CON9 0x0012 #define MT6323_CHR_CON10 0x0014 #define MT6323_CHR_CON11 0x0016 #define MT6323_CHR_CON12 0x0018 #define MT6323_CHR_CON13 0x001A #define MT6323_CHR_CON14 0x001C #define MT6323_CHR_CON15 0x001E #define MT6323_CHR_CON16 0x0020 #define MT6323_CHR_CON17 0x0022 #define MT6323_CHR_CON18 0x0024 #define MT6323_CHR_CON19 0x0026 #define MT6323_CHR_CON20 0x0028 #define MT6323_CHR_CON21 0x002A #define MT6323_CHR_CON22 0x002C #define MT6323_CHR_CON23 0x002E #define MT6323_CHR_CON24 0x0030 #define MT6323_CHR_CON25 0x0032 #define MT6323_CHR_CON26 0x0034 #define MT6323_CHR_CON27 0x0036 #define MT6323_CHR_CON28 0x0038 #define MT6323_CHR_CON29 0x003A #define MT6323_STRUP_CON0 0x003C #define MT6323_STRUP_CON2 0x003E #define MT6323_STRUP_CON3 0x0040 #define MT6323_STRUP_CON4 0x0042 #define MT6323_STRUP_CON5 0x0044 #define MT6323_STRUP_CON6 0x0046 #define MT6323_STRUP_CON7 0x0048 #define MT6323_STRUP_CON8 0x004A #define MT6323_STRUP_CON9 0x004C #define MT6323_STRUP_CON10 0x004E #define MT6323_STRUP_CON11 0x0050 #define MT6323_SPK_CON0 0x0052 #define MT6323_SPK_CON1 0x0054 #define MT6323_SPK_CON2 0x0056 #define MT6323_SPK_CON6 0x005E #define MT6323_SPK_CON7 0x0060 #define MT6323_SPK_CON8 0x0062 #define MT6323_SPK_CON9 0x0064 #define MT6323_SPK_CON10 0x0066 #define MT6323_SPK_CON11 0x0068 #define MT6323_SPK_CON12 0x006A #define MT6323_CID 0x0100 #define MT6323_TOP_CKPDN0 0x0102 #define MT6323_TOP_CKPDN0_SET 0x0104 #define MT6323_TOP_CKPDN0_CLR 0x0106 #define MT6323_TOP_CKPDN1 0x0108 #define MT6323_TOP_CKPDN1_SET 0x010A #define MT6323_TOP_CKPDN1_CLR 0x010C #define MT6323_TOP_CKPDN2 0x010E #define MT6323_TOP_CKPDN2_SET 0x0110 #define MT6323_TOP_CKPDN2_CLR 0x0112 #define MT6323_TOP_RST_CON 0x0114 #define MT6323_TOP_RST_CON_SET 0x0116 #define MT6323_TOP_RST_CON_CLR 0x0118 #define MT6323_TOP_RST_MISC 0x011A #define MT6323_TOP_RST_MISC_SET 0x011C #define MT6323_TOP_RST_MISC_CLR 0x011E #define MT6323_TOP_CKCON0 0x0120 #define MT6323_TOP_CKCON0_SET 0x0122 #define MT6323_TOP_CKCON0_CLR 0x0124 #define MT6323_TOP_CKCON1 0x0126 #define MT6323_TOP_CKCON1_SET 0x0128 #define MT6323_TOP_CKCON1_CLR 0x012A #define MT6323_TOP_CKTST0 0x012C #define MT6323_TOP_CKTST1 0x012E #define MT6323_TOP_CKTST2 0x0130 #define MT6323_TEST_OUT 0x0132 #define MT6323_TEST_CON0 0x0134 #define MT6323_TEST_CON1 0x0136 #define MT6323_EN_STATUS0 0x0138 #define MT6323_EN_STATUS1 0x013A #define MT6323_OCSTATUS0 0x013C #define MT6323_OCSTATUS1 0x013E #define MT6323_PGSTATUS 0x0140 #define MT6323_CHRSTATUS 0x0142 #define MT6323_TDSEL_CON 0x0144 #define MT6323_RDSEL_CON 0x0146 #define MT6323_SMT_CON0 0x0148 #define MT6323_SMT_CON1 0x014A #define MT6323_SMT_CON2 0x014C #define MT6323_SMT_CON3 0x014E #define MT6323_SMT_CON4 0x0150 #define MT6323_DRV_CON0 0x0152 #define MT6323_DRV_CON1 0x0154 #define MT6323_DRV_CON2 0x0156 #define MT6323_DRV_CON3 0x0158 #define MT6323_DRV_CON4 0x015A #define MT6323_SIMLS1_CON 0x015C #define MT6323_SIMLS2_CON 0x015E #define MT6323_INT_CON0 0x0160 #define MT6323_INT_CON0_SET 0x0162 #define MT6323_INT_CON0_CLR 0x0164 #define MT6323_INT_CON1 0x0166 #define MT6323_INT_CON1_SET 0x0168 #define MT6323_INT_CON1_CLR 0x016A #define MT6323_INT_MISC_CON 0x016C #define MT6323_INT_MISC_CON_SET 0x016E #define MT6323_INT_MISC_CON_CLR 0x0170 #define MT6323_INT_STATUS0 0x0172 #define MT6323_INT_STATUS1 0x0174 #define MT6323_OC_GEAR_0 0x0176 #define MT6323_OC_GEAR_1 0x0178 #define MT6323_OC_GEAR_2 0x017A #define MT6323_OC_CTL_VPROC 0x017C #define MT6323_OC_CTL_VSYS 0x017E #define MT6323_OC_CTL_VPA 0x0180 #define MT6323_FQMTR_CON0 0x0182 #define MT6323_FQMTR_CON1 0x0184 #define MT6323_FQMTR_CON2 0x0186 #define MT6323_RG_SPI_CON 0x0188 #define MT6323_DEW_DIO_EN 0x018A #define MT6323_DEW_READ_TEST 0x018C #define MT6323_DEW_WRITE_TEST 0x018E #define MT6323_DEW_CRC_SWRST 0x0190 #define MT6323_DEW_CRC_EN 0x0192 #define MT6323_DEW_CRC_VAL 0x0194 #define MT6323_DEW_DBG_MON_SEL 0x0196 #define MT6323_DEW_CIPHER_KEY_SEL 0x0198 #define MT6323_DEW_CIPHER_IV_SEL 0x019A #define MT6323_DEW_CIPHER_EN 0x019C #define MT6323_DEW_CIPHER_RDY 0x019E #define MT6323_DEW_CIPHER_MODE 0x01A0 #define MT6323_DEW_CIPHER_SWRST 0x01A2 #define MT6323_DEW_RDDMY_NO 0x01A4 #define MT6323_DEW_RDATA_DLY_SEL 0x01A6 #define MT6323_BUCK_CON0 0x0200 #define MT6323_BUCK_CON1 0x0202 #define MT6323_BUCK_CON2 0x0204 #define MT6323_BUCK_CON3 0x0206 #define MT6323_BUCK_CON4 0x0208 #define MT6323_BUCK_CON5 0x020A #define MT6323_VPROC_CON0 0x020C #define MT6323_VPROC_CON1 0x020E #define MT6323_VPROC_CON2 0x0210 #define MT6323_VPROC_CON3 0x0212 #define MT6323_VPROC_CON4 0x0214 #define MT6323_VPROC_CON5 0x0216 #define MT6323_VPROC_CON7 0x021A #define MT6323_VPROC_CON8 0x021C #define MT6323_VPROC_CON9 0x021E #define MT6323_VPROC_CON10 0x0220 #define MT6323_VPROC_CON11 0x0222 #define MT6323_VPROC_CON12 0x0224 #define MT6323_VPROC_CON13 0x0226 #define MT6323_VPROC_CON14 0x0228 #define MT6323_VPROC_CON15 0x022A #define MT6323_VPROC_CON18 0x0230 #define MT6323_VSYS_CON0 0x0232 #define MT6323_VSYS_CON1 0x0234 #define MT6323_VSYS_CON2 0x0236 #define MT6323_VSYS_CON3 0x0238 #define MT6323_VSYS_CON4 0x023A #define MT6323_VSYS_CON5 0x023C #define MT6323_VSYS_CON7 0x0240 #define MT6323_VSYS_CON8 0x0242 #define MT6323_VSYS_CON9 0x0244 #define MT6323_VSYS_CON10 0x0246 #define MT6323_VSYS_CON11 0x0248 #define MT6323_VSYS_CON12 0x024A #define MT6323_VSYS_CON13 0x024C #define MT6323_VSYS_CON14 0x024E #define MT6323_VSYS_CON15 0x0250 #define MT6323_VSYS_CON18 0x0256 #define MT6323_VPA_CON0 0x0300 #define MT6323_VPA_CON1 0x0302 #define MT6323_VPA_CON2 0x0304 #define MT6323_VPA_CON3 0x0306 #define MT6323_VPA_CON4 0x0308 #define MT6323_VPA_CON5 0x030A #define MT6323_VPA_CON7 0x030E #define MT6323_VPA_CON8 0x0310 #define MT6323_VPA_CON9 0x0312 #define MT6323_VPA_CON10 0x0314 #define MT6323_VPA_CON11 0x0316 #define MT6323_VPA_CON12 0x0318 #define MT6323_VPA_CON14 0x031C #define MT6323_VPA_CON16 0x0320 #define MT6323_VPA_CON17 0x0322 #define MT6323_VPA_CON18 0x0324 #define MT6323_VPA_CON19 0x0326 #define MT6323_VPA_CON20 0x0328 #define MT6323_BUCK_K_CON0 0x032A #define MT6323_BUCK_K_CON1 0x032C #define MT6323_BUCK_K_CON2 0x032E #define MT6323_ISINK0_CON0 0x0330 #define MT6323_ISINK0_CON1 0x0332 #define MT6323_ISINK0_CON2 0x0334 #define MT6323_ISINK0_CON3 0x0336 #define MT6323_ISINK1_CON0 0x0338 #define MT6323_ISINK1_CON1 0x033A #define MT6323_ISINK1_CON2 0x033C #define MT6323_ISINK1_CON3 0x033E #define MT6323_ISINK2_CON0 0x0340 #define MT6323_ISINK2_CON1 0x0342 #define MT6323_ISINK2_CON2 0x0344 #define MT6323_ISINK2_CON3 0x0346 #define MT6323_ISINK3_CON0 0x0348 #define MT6323_ISINK3_CON1 0x034A #define MT6323_ISINK3_CON2 0x034C #define MT6323_ISINK3_CON3 0x034E #define MT6323_ISINK_ANA0 0x0350 #define MT6323_ISINK_ANA1 0x0352 #define MT6323_ISINK_PHASE_DLY 0x0354 #define MT6323_ISINK_EN_CTRL 0x0356 #define MT6323_ANALDO_CON0 0x0400 #define MT6323_ANALDO_CON1 0x0402 #define MT6323_ANALDO_CON2 0x0404 #define MT6323_ANALDO_CON3 0x0406 #define MT6323_ANALDO_CON4 0x0408 #define MT6323_ANALDO_CON5 0x040A #define MT6323_ANALDO_CON6 0x040C #define MT6323_ANALDO_CON7 0x040E #define MT6323_ANALDO_CON8 0x0410 #define MT6323_ANALDO_CON10 0x0412 #define MT6323_ANALDO_CON15 0x0414 #define MT6323_ANALDO_CON16 0x0416 #define MT6323_ANALDO_CON17 0x0418 #define MT6323_ANALDO_CON18 0x041A #define MT6323_ANALDO_CON19 0x041C #define MT6323_ANALDO_CON20 0x041E #define MT6323_ANALDO_CON21 0x0420 #define MT6323_DIGLDO_CON0 0x0500 #define MT6323_DIGLDO_CON2 0x0502 #define MT6323_DIGLDO_CON3 0x0504 #define MT6323_DIGLDO_CON5 0x0506 #define MT6323_DIGLDO_CON6 0x0508 #define MT6323_DIGLDO_CON7 0x050A #define MT6323_DIGLDO_CON8 0x050C #define MT6323_DIGLDO_CON9 0x050E #define MT6323_DIGLDO_CON10 0x0510 #define MT6323_DIGLDO_CON11 0x0512 #define MT6323_DIGLDO_CON12 0x0514 #define MT6323_DIGLDO_CON13 0x0516 #define MT6323_DIGLDO_CON14 0x0518 #define MT6323_DIGLDO_CON15 0x051A #define MT6323_DIGLDO_CON16 0x051C #define MT6323_DIGLDO_CON17 0x051E #define MT6323_DIGLDO_CON18 0x0520 #define MT6323_DIGLDO_CON19 0x0522 #define MT6323_DIGLDO_CON20 0x0524 #define MT6323_DIGLDO_CON21 0x0526 #define MT6323_DIGLDO_CON23 0x0528 #define MT6323_DIGLDO_CON24 0x052A #define MT6323_DIGLDO_CON26 0x052C #define MT6323_DIGLDO_CON27 0x052E #define MT6323_DIGLDO_CON28 0x0530 #define MT6323_DIGLDO_CON29 0x0532 #define MT6323_DIGLDO_CON30 0x0534 #define MT6323_DIGLDO_CON31 0x0536 #define MT6323_DIGLDO_CON32 0x0538 #define MT6323_DIGLDO_CON33 0x053A #define MT6323_DIGLDO_CON34 0x053C #define MT6323_DIGLDO_CON35 0x053E #define MT6323_DIGLDO_CON36 0x0540 #define MT6323_DIGLDO_CON39 0x0542 #define MT6323_DIGLDO_CON40 0x0544 #define MT6323_DIGLDO_CON41 0x0546 #define MT6323_DIGLDO_CON42 0x0548 #define MT6323_DIGLDO_CON43 0x054A #define MT6323_DIGLDO_CON44 0x054C #define MT6323_DIGLDO_CON45 0x054E #define MT6323_DIGLDO_CON46 0x0550 #define MT6323_DIGLDO_CON47 0x0552 #define MT6323_DIGLDO_CON48 0x0554 #define MT6323_DIGLDO_CON49 0x0556 #define MT6323_DIGLDO_CON50 0x0558 #define MT6323_DIGLDO_CON51 0x055A #define MT6323_DIGLDO_CON52 0x055C #define MT6323_DIGLDO_CON53 0x055E #define MT6323_DIGLDO_CON54 0x0560 #define MT6323_EFUSE_CON0 0x0600 #define MT6323_EFUSE_CON1 0x0602 #define MT6323_EFUSE_CON2 0x0604 #define MT6323_EFUSE_CON3 0x0606 #define MT6323_EFUSE_CON4 0x0608 #define MT6323_EFUSE_CON5 0x060A #define MT6323_EFUSE_CON6 0x060C #define MT6323_EFUSE_VAL_0_15 0x060E #define MT6323_EFUSE_VAL_16_31 0x0610 #define MT6323_EFUSE_VAL_32_47 0x0612 #define MT6323_EFUSE_VAL_48_63 0x0614 #define MT6323_EFUSE_VAL_64_79 0x0616 #define MT6323_EFUSE_VAL_80_95 0x0618 #define MT6323_EFUSE_VAL_96_111 0x061A #define MT6323_EFUSE_VAL_112_127 0x061C #define MT6323_EFUSE_VAL_128_143 0x061E #define MT6323_EFUSE_VAL_144_159 0x0620 #define MT6323_EFUSE_VAL_160_175 0x0622 #define MT6323_EFUSE_VAL_176_191 0x0624 #define MT6323_EFUSE_DOUT_0_15 0x0626 #define MT6323_EFUSE_DOUT_16_31 0x0628 #define MT6323_EFUSE_DOUT_32_47 0x062A #define MT6323_EFUSE_DOUT_48_63 0x062C #define MT6323_EFUSE_DOUT_64_79 0x062E #define MT6323_EFUSE_DOUT_80_95 0x0630 #define MT6323_EFUSE_DOUT_96_111 0x0632 #define MT6323_EFUSE_DOUT_112_127 0x0634 #define MT6323_EFUSE_DOUT_128_143 0x0636 #define MT6323_EFUSE_DOUT_144_159 0x0638 #define MT6323_EFUSE_DOUT_160_175 0x063A #define MT6323_EFUSE_DOUT_176_191 0x063C #define MT6323_EFUSE_CON7 0x063E #define MT6323_EFUSE_CON8 0x0640 #define MT6323_EFUSE_CON9 0x0642 #define MT6323_RTC_MIX_CON0 0x0644 #define MT6323_RTC_MIX_CON1 0x0646 #define MT6323_AUDTOP_CON0 0x0700 #define MT6323_AUDTOP_CON1 0x0702 #define MT6323_AUDTOP_CON2 0x0704 #define MT6323_AUDTOP_CON3 0x0706 #define MT6323_AUDTOP_CON4 0x0708 #define MT6323_AUDTOP_CON5 0x070A #define MT6323_AUDTOP_CON6 0x070C #define MT6323_AUDTOP_CON7 0x070E #define MT6323_AUDTOP_CON8 0x0710 #define MT6323_AUDTOP_CON9 0x0712 #define MT6323_AUXADC_ADC0 0x0714 #define MT6323_AUXADC_ADC1 0x0716 #define MT6323_AUXADC_ADC2 0x0718 #define MT6323_AUXADC_ADC3 0x071A #define MT6323_AUXADC_ADC4 0x071C #define MT6323_AUXADC_ADC5 0x071E #define MT6323_AUXADC_ADC6 0x0720 #define MT6323_AUXADC_ADC7 0x0722 #define MT6323_AUXADC_ADC8 0x0724 #define MT6323_AUXADC_ADC9 0x0726 #define MT6323_AUXADC_ADC10 0x0728 #define MT6323_AUXADC_ADC11 0x072A #define MT6323_AUXADC_ADC12 0x072C #define MT6323_AUXADC_ADC13 0x072E #define MT6323_AUXADC_ADC14 0x0730 #define MT6323_AUXADC_ADC15 0x0732 #define MT6323_AUXADC_ADC16 0x0734 #define MT6323_AUXADC_ADC17 0x0736 #define MT6323_AUXADC_ADC18 0x0738 #define MT6323_AUXADC_ADC19 0x073A #define MT6323_AUXADC_ADC20 0x073C #define MT6323_AUXADC_RSV1 0x073E #define MT6323_AUXADC_RSV2 0x0740 #define MT6323_AUXADC_CON0 0x0742 #define MT6323_AUXADC_CON1 0x0744 #define MT6323_AUXADC_CON2 0x0746 #define MT6323_AUXADC_CON3 0x0748 #define MT6323_AUXADC_CON4 0x074A #define MT6323_AUXADC_CON5 0x074C #define MT6323_AUXADC_CON6 0x074E #define MT6323_AUXADC_CON7 0x0750 #define MT6323_AUXADC_CON8 0x0752 #define MT6323_AUXADC_CON9 0x0754 #define MT6323_AUXADC_CON10 0x0756 #define MT6323_AUXADC_CON11 0x0758 #define MT6323_AUXADC_CON12 0x075A #define MT6323_AUXADC_CON13 0x075C #define MT6323_AUXADC_CON14 0x075E #define MT6323_AUXADC_CON15 0x0760 #define MT6323_AUXADC_CON16 0x0762 #define MT6323_AUXADC_CON17 0x0764 #define MT6323_AUXADC_CON18 0x0766 #define MT6323_AUXADC_CON19 0x0768 #define MT6323_AUXADC_CON20 0x076A #define MT6323_AUXADC_CON21 0x076C #define MT6323_AUXADC_CON22 0x076E #define MT6323_AUXADC_CON23 0x0770 #define MT6323_AUXADC_CON24 0x0772 #define MT6323_AUXADC_CON25 0x0774 #define MT6323_AUXADC_CON26 0x0776 #define MT6323_AUXADC_CON27 0x0778 #define MT6323_ACCDET_CON0 0x077A #define MT6323_ACCDET_CON1 0x077C #define MT6323_ACCDET_CON2 0x077E #define MT6323_ACCDET_CON3 0x0780 #define MT6323_ACCDET_CON4 0x0782 #define MT6323_ACCDET_CON5 0x0784 #define MT6323_ACCDET_CON6 0x0786 #define MT6323_ACCDET_CON7 0x0788 #define MT6323_ACCDET_CON8 0x078A #define MT6323_ACCDET_CON9 0x078C #define MT6323_ACCDET_CON10 0x078E #define MT6323_ACCDET_CON11 0x0790 #define MT6323_ACCDET_CON12 0x0792 #define MT6323_ACCDET_CON13 0x0794 #define MT6323_ACCDET_CON14 0x0796 #define MT6323_ACCDET_CON15 0x0798 #define MT6323_ACCDET_CON16 0x079A #endif /* __MFD_MT6323_REGISTERS_H__ */ mfd/mt6323/core.h 0000644 00000001447 14722070374 0007365 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2016 Chen Zhong <chen.zhong@mediatek.com> */ #ifndef __MFD_MT6323_CORE_H__ #define __MFD_MT6323_CORE_H__ enum MT6323_IRQ_STATUS_numbers { MT6323_IRQ_STATUS_SPKL_AB = 0, MT6323_IRQ_STATUS_SPKL, MT6323_IRQ_STATUS_BAT_L, MT6323_IRQ_STATUS_BAT_H, MT6323_IRQ_STATUS_WATCHDOG, MT6323_IRQ_STATUS_PWRKEY, MT6323_IRQ_STATUS_THR_L, MT6323_IRQ_STATUS_THR_H, MT6323_IRQ_STATUS_VBATON_UNDET, MT6323_IRQ_STATUS_BVALID_DET, MT6323_IRQ_STATUS_CHRDET, MT6323_IRQ_STATUS_OV, MT6323_IRQ_STATUS_LDO = 16, MT6323_IRQ_STATUS_FCHRKEY, MT6323_IRQ_STATUS_ACCDET, MT6323_IRQ_STATUS_AUDIO, MT6323_IRQ_STATUS_RTC, MT6323_IRQ_STATUS_VPROC, MT6323_IRQ_STATUS_VSYS, MT6323_IRQ_STATUS_VPA, MT6323_IRQ_STATUS_NR, }; #endif /* __MFD_MT6323_CORE_H__ */ mfd/max8907.h 0000644 00000016603 14722070374 0006614 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Functions to access MAX8907 power management chip. * * Copyright (C) 2010 Gyungoh Yoo <jack.yoo@maxim-ic.com> * Copyright (C) 2012, NVIDIA CORPORATION. All rights reserved. */ #ifndef __LINUX_MFD_MAX8907_H #define __LINUX_MFD_MAX8907_H #include <linux/mutex.h> #include <linux/pm.h> #define MAX8907_GEN_I2C_ADDR (0x78 >> 1) #define MAX8907_ADC_I2C_ADDR (0x8e >> 1) #define MAX8907_RTC_I2C_ADDR (0xd0 >> 1) /* MAX8907 register map */ #define MAX8907_REG_SYSENSEL 0x00 #define MAX8907_REG_ON_OFF_IRQ1 0x01 #define MAX8907_REG_ON_OFF_IRQ1_MASK 0x02 #define MAX8907_REG_ON_OFF_STAT 0x03 #define MAX8907_REG_SDCTL1 0x04 #define MAX8907_REG_SDSEQCNT1 0x05 #define MAX8907_REG_SDV1 0x06 #define MAX8907_REG_SDCTL2 0x07 #define MAX8907_REG_SDSEQCNT2 0x08 #define MAX8907_REG_SDV2 0x09 #define MAX8907_REG_SDCTL3 0x0A #define MAX8907_REG_SDSEQCNT3 0x0B #define MAX8907_REG_SDV3 0x0C #define MAX8907_REG_ON_OFF_IRQ2 0x0D #define MAX8907_REG_ON_OFF_IRQ2_MASK 0x0E #define MAX8907_REG_RESET_CNFG 0x0F #define MAX8907_REG_LDOCTL16 0x10 #define MAX8907_REG_LDOSEQCNT16 0x11 #define MAX8907_REG_LDO16VOUT 0x12 #define MAX8907_REG_SDBYSEQCNT 0x13 #define MAX8907_REG_LDOCTL17 0x14 #define MAX8907_REG_LDOSEQCNT17 0x15 #define MAX8907_REG_LDO17VOUT 0x16 #define MAX8907_REG_LDOCTL1 0x18 #define MAX8907_REG_LDOSEQCNT1 0x19 #define MAX8907_REG_LDO1VOUT 0x1A #define MAX8907_REG_LDOCTL2 0x1C #define MAX8907_REG_LDOSEQCNT2 0x1D #define MAX8907_REG_LDO2VOUT 0x1E #define MAX8907_REG_LDOCTL3 0x20 #define MAX8907_REG_LDOSEQCNT3 0x21 #define MAX8907_REG_LDO3VOUT 0x22 #define MAX8907_REG_LDOCTL4 0x24 #define MAX8907_REG_LDOSEQCNT4 0x25 #define MAX8907_REG_LDO4VOUT 0x26 #define MAX8907_REG_LDOCTL5 0x28 #define MAX8907_REG_LDOSEQCNT5 0x29 #define MAX8907_REG_LDO5VOUT 0x2A #define MAX8907_REG_LDOCTL6 0x2C #define MAX8907_REG_LDOSEQCNT6 0x2D #define MAX8907_REG_LDO6VOUT 0x2E #define MAX8907_REG_LDOCTL7 0x30 #define MAX8907_REG_LDOSEQCNT7 0x31 #define MAX8907_REG_LDO7VOUT 0x32 #define MAX8907_REG_LDOCTL8 0x34 #define MAX8907_REG_LDOSEQCNT8 0x35 #define MAX8907_REG_LDO8VOUT 0x36 #define MAX8907_REG_LDOCTL9 0x38 #define MAX8907_REG_LDOSEQCNT9 0x39 #define MAX8907_REG_LDO9VOUT 0x3A #define MAX8907_REG_LDOCTL10 0x3C #define MAX8907_REG_LDOSEQCNT10 0x3D #define MAX8907_REG_LDO10VOUT 0x3E #define MAX8907_REG_LDOCTL11 0x40 #define MAX8907_REG_LDOSEQCNT11 0x41 #define MAX8907_REG_LDO11VOUT 0x42 #define MAX8907_REG_LDOCTL12 0x44 #define MAX8907_REG_LDOSEQCNT12 0x45 #define MAX8907_REG_LDO12VOUT 0x46 #define MAX8907_REG_LDOCTL13 0x48 #define MAX8907_REG_LDOSEQCNT13 0x49 #define MAX8907_REG_LDO13VOUT 0x4A #define MAX8907_REG_LDOCTL14 0x4C #define MAX8907_REG_LDOSEQCNT14 0x4D #define MAX8907_REG_LDO14VOUT 0x4E #define MAX8907_REG_LDOCTL15 0x50 #define MAX8907_REG_LDOSEQCNT15 0x51 #define MAX8907_REG_LDO15VOUT 0x52 #define MAX8907_REG_OUT5VEN 0x54 #define MAX8907_REG_OUT5VSEQ 0x55 #define MAX8907_REG_OUT33VEN 0x58 #define MAX8907_REG_OUT33VSEQ 0x59 #define MAX8907_REG_LDOCTL19 0x5C #define MAX8907_REG_LDOSEQCNT19 0x5D #define MAX8907_REG_LDO19VOUT 0x5E #define MAX8907_REG_LBCNFG 0x60 #define MAX8907_REG_SEQ1CNFG 0x64 #define MAX8907_REG_SEQ2CNFG 0x65 #define MAX8907_REG_SEQ3CNFG 0x66 #define MAX8907_REG_SEQ4CNFG 0x67 #define MAX8907_REG_SEQ5CNFG 0x68 #define MAX8907_REG_SEQ6CNFG 0x69 #define MAX8907_REG_SEQ7CNFG 0x6A #define MAX8907_REG_LDOCTL18 0x72 #define MAX8907_REG_LDOSEQCNT18 0x73 #define MAX8907_REG_LDO18VOUT 0x74 #define MAX8907_REG_BBAT_CNFG 0x78 #define MAX8907_REG_CHG_CNTL1 0x7C #define MAX8907_REG_CHG_CNTL2 0x7D #define MAX8907_REG_CHG_IRQ1 0x7E #define MAX8907_REG_CHG_IRQ2 0x7F #define MAX8907_REG_CHG_IRQ1_MASK 0x80 #define MAX8907_REG_CHG_IRQ2_MASK 0x81 #define MAX8907_REG_CHG_STAT 0x82 #define MAX8907_REG_WLED_MODE_CNTL 0x84 #define MAX8907_REG_ILED_CNTL 0x84 #define MAX8907_REG_II1RR 0x8E #define MAX8907_REG_II2RR 0x8F #define MAX8907_REG_LDOCTL20 0x9C #define MAX8907_REG_LDOSEQCNT20 0x9D #define MAX8907_REG_LDO20VOUT 0x9E /* RTC register map */ #define MAX8907_REG_RTC_SEC 0x00 #define MAX8907_REG_RTC_MIN 0x01 #define MAX8907_REG_RTC_HOURS 0x02 #define MAX8907_REG_RTC_WEEKDAY 0x03 #define MAX8907_REG_RTC_DATE 0x04 #define MAX8907_REG_RTC_MONTH 0x05 #define MAX8907_REG_RTC_YEAR1 0x06 #define MAX8907_REG_RTC_YEAR2 0x07 #define MAX8907_REG_ALARM0_SEC 0x08 #define MAX8907_REG_ALARM0_MIN 0x09 #define MAX8907_REG_ALARM0_HOURS 0x0A #define MAX8907_REG_ALARM0_WEEKDAY 0x0B #define MAX8907_REG_ALARM0_DATE 0x0C #define MAX8907_REG_ALARM0_MONTH 0x0D #define MAX8907_REG_ALARM0_YEAR1 0x0E #define MAX8907_REG_ALARM0_YEAR2 0x0F #define MAX8907_REG_ALARM1_SEC 0x10 #define MAX8907_REG_ALARM1_MIN 0x11 #define MAX8907_REG_ALARM1_HOURS 0x12 #define MAX8907_REG_ALARM1_WEEKDAY 0x13 #define MAX8907_REG_ALARM1_DATE 0x14 #define MAX8907_REG_ALARM1_MONTH 0x15 #define MAX8907_REG_ALARM1_YEAR1 0x16 #define MAX8907_REG_ALARM1_YEAR2 0x17 #define MAX8907_REG_ALARM0_CNTL 0x18 #define MAX8907_REG_ALARM1_CNTL 0x19 #define MAX8907_REG_RTC_STATUS 0x1A #define MAX8907_REG_RTC_CNTL 0x1B #define MAX8907_REG_RTC_IRQ 0x1C #define MAX8907_REG_RTC_IRQ_MASK 0x1D #define MAX8907_REG_MPL_CNTL 0x1E /* ADC and Touch Screen Controller register map */ #define MAX8907_CTL 0 #define MAX8907_SEQCNT 1 #define MAX8907_VOUT 2 /* mask bit fields */ #define MAX8907_MASK_LDO_SEQ 0x1C #define MAX8907_MASK_LDO_EN 0x01 #define MAX8907_MASK_VBBATTCV 0x03 #define MAX8907_MASK_OUT5V_VINEN 0x10 #define MAX8907_MASK_OUT5V_ENSRC 0x0E #define MAX8907_MASK_OUT5V_EN 0x01 #define MAX8907_MASK_POWER_OFF 0x40 /* Regulator IDs */ #define MAX8907_MBATT 0 #define MAX8907_SD1 1 #define MAX8907_SD2 2 #define MAX8907_SD3 3 #define MAX8907_LDO1 4 #define MAX8907_LDO2 5 #define MAX8907_LDO3 6 #define MAX8907_LDO4 7 #define MAX8907_LDO5 8 #define MAX8907_LDO6 9 #define MAX8907_LDO7 10 #define MAX8907_LDO8 11 #define MAX8907_LDO9 12 #define MAX8907_LDO10 13 #define MAX8907_LDO11 14 #define MAX8907_LDO12 15 #define MAX8907_LDO13 16 #define MAX8907_LDO14 17 #define MAX8907_LDO15 18 #define MAX8907_LDO16 19 #define MAX8907_LDO17 20 #define MAX8907_LDO18 21 #define MAX8907_LDO19 22 #define MAX8907_LDO20 23 #define MAX8907_OUT5V 24 #define MAX8907_OUT33V 25 #define MAX8907_BBAT 26 #define MAX8907_SDBY 27 #define MAX8907_VRTC 28 #define MAX8907_NUM_REGULATORS (MAX8907_VRTC + 1) /* IRQ definitions */ enum { MAX8907_IRQ_VCHG_DC_OVP = 0, MAX8907_IRQ_VCHG_DC_F, MAX8907_IRQ_VCHG_DC_R, MAX8907_IRQ_VCHG_THM_OK_R, MAX8907_IRQ_VCHG_THM_OK_F, MAX8907_IRQ_VCHG_MBATTLOW_F, MAX8907_IRQ_VCHG_MBATTLOW_R, MAX8907_IRQ_VCHG_RST, MAX8907_IRQ_VCHG_DONE, MAX8907_IRQ_VCHG_TOPOFF, MAX8907_IRQ_VCHG_TMR_FAULT, MAX8907_IRQ_GPM_RSTIN = 0, MAX8907_IRQ_GPM_MPL, MAX8907_IRQ_GPM_SW_3SEC, MAX8907_IRQ_GPM_EXTON_F, MAX8907_IRQ_GPM_EXTON_R, MAX8907_IRQ_GPM_SW_1SEC, MAX8907_IRQ_GPM_SW_F, MAX8907_IRQ_GPM_SW_R, MAX8907_IRQ_GPM_SYSCKEN_F, MAX8907_IRQ_GPM_SYSCKEN_R, MAX8907_IRQ_RTC_ALARM1 = 0, MAX8907_IRQ_RTC_ALARM0, }; struct max8907_platform_data { struct regulator_init_data *init_data[MAX8907_NUM_REGULATORS]; bool pm_off; }; struct regmap_irq_chips_data; struct max8907 { struct device *dev; struct mutex irq_lock; struct i2c_client *i2c_gen; struct i2c_client *i2c_rtc; struct regmap *regmap_gen; struct regmap *regmap_rtc; struct regmap_irq_chip_data *irqc_chg; struct regmap_irq_chip_data *irqc_on_off; struct regmap_irq_chip_data *irqc_rtc; }; #endif mfd/htc-pasic3.h 0000644 00000002311 14722070374 0007424 0 ustar 00 /* * HTC PASIC3 driver - LEDs and DS1WM * * Copyright (c) 2007 Philipp Zabel <philipp.zabel@gmail.com> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. * */ #ifndef __PASIC3_H #define __PASIC3_H #include <linux/platform_device.h> #include <linux/leds.h> extern void pasic3_write_register(struct device *dev, u32 reg, u8 val); extern u8 pasic3_read_register(struct device *dev, u32 reg); /* * mask for registers 0x20,0x21,0x22 */ #define PASIC3_MASK_LED0 0x04 #define PASIC3_MASK_LED1 0x08 #define PASIC3_MASK_LED2 0x40 /* * bits in register 0x06 */ #define PASIC3_BIT2_LED0 0x08 #define PASIC3_BIT2_LED1 0x10 #define PASIC3_BIT2_LED2 0x20 struct pasic3_led { struct led_classdev led; unsigned int hw_num; unsigned int bit2; unsigned int mask; struct pasic3_leds_machinfo *pdata; }; struct pasic3_leds_machinfo { unsigned int num_leds; unsigned int power_gpio; struct pasic3_led *leds; }; struct pasic3_platform_data { struct pasic3_leds_machinfo *led_pdata; unsigned int clock_rate; }; #endif mfd/rohm-bd70528.h 0000644 00000026423 14722070374 0007436 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* Copyright (C) 2018 ROHM Semiconductors */ #ifndef __LINUX_MFD_BD70528_H__ #define __LINUX_MFD_BD70528_H__ #include <linux/bits.h> #include <linux/device.h> #include <linux/mfd/rohm-generic.h> #include <linux/regmap.h> enum { BD70528_BUCK1, BD70528_BUCK2, BD70528_BUCK3, BD70528_LDO1, BD70528_LDO2, BD70528_LDO3, BD70528_LED1, BD70528_LED2, }; struct bd70528_data { struct rohm_regmap_dev chip; struct mutex rtc_timer_lock; }; #define BD70528_BUCK_VOLTS 0x10 #define BD70528_LDO_VOLTS 0x20 #define BD70528_REG_BUCK1_EN 0x0F #define BD70528_REG_BUCK1_VOLT 0x15 #define BD70528_REG_BUCK2_EN 0x10 #define BD70528_REG_BUCK2_VOLT 0x16 #define BD70528_REG_BUCK3_EN 0x11 #define BD70528_REG_BUCK3_VOLT 0x17 #define BD70528_REG_LDO1_EN 0x1b #define BD70528_REG_LDO1_VOLT 0x1e #define BD70528_REG_LDO2_EN 0x1c #define BD70528_REG_LDO2_VOLT 0x1f #define BD70528_REG_LDO3_EN 0x1d #define BD70528_REG_LDO3_VOLT 0x20 #define BD70528_REG_LED_CTRL 0x2b #define BD70528_REG_LED_VOLT 0x29 #define BD70528_REG_LED_EN 0x2a /* main irq registers */ #define BD70528_REG_INT_MAIN 0x7E #define BD70528_REG_INT_MAIN_MASK 0x74 /* 'sub irq' registers */ #define BD70528_REG_INT_SHDN 0x7F #define BD70528_REG_INT_PWR_FLT 0x80 #define BD70528_REG_INT_VR_FLT 0x81 #define BD70528_REG_INT_MISC 0x82 #define BD70528_REG_INT_BAT1 0x83 #define BD70528_REG_INT_BAT2 0x84 #define BD70528_REG_INT_RTC 0x85 #define BD70528_REG_INT_GPIO 0x86 #define BD70528_REG_INT_OP_FAIL 0x87 #define BD70528_REG_INT_SHDN_MASK 0x75 #define BD70528_REG_INT_PWR_FLT_MASK 0x76 #define BD70528_REG_INT_VR_FLT_MASK 0x77 #define BD70528_REG_INT_MISC_MASK 0x78 #define BD70528_REG_INT_BAT1_MASK 0x79 #define BD70528_REG_INT_BAT2_MASK 0x7a #define BD70528_REG_INT_RTC_MASK 0x7b #define BD70528_REG_INT_GPIO_MASK 0x7c #define BD70528_REG_INT_OP_FAIL_MASK 0x7d /* Reset related 'magic' registers */ #define BD70528_REG_SHIPMODE 0x03 #define BD70528_REG_HWRESET 0x04 #define BD70528_REG_WARMRESET 0x05 #define BD70528_REG_STANDBY 0x06 /* GPIO registers */ #define BD70528_REG_GPIO_STATE 0x8F #define BD70528_REG_GPIO1_IN 0x4d #define BD70528_REG_GPIO2_IN 0x4f #define BD70528_REG_GPIO3_IN 0x51 #define BD70528_REG_GPIO4_IN 0x53 #define BD70528_REG_GPIO1_OUT 0x4e #define BD70528_REG_GPIO2_OUT 0x50 #define BD70528_REG_GPIO3_OUT 0x52 #define BD70528_REG_GPIO4_OUT 0x54 /* clk control */ #define BD70528_REG_CLK_OUT 0x2c /* RTC */ #define BD70528_REG_RTC_COUNT_H 0x2d #define BD70528_REG_RTC_COUNT_L 0x2e #define BD70528_REG_RTC_SEC 0x2f #define BD70528_REG_RTC_MINUTE 0x30 #define BD70528_REG_RTC_HOUR 0x31 #define BD70528_REG_RTC_WEEK 0x32 #define BD70528_REG_RTC_DAY 0x33 #define BD70528_REG_RTC_MONTH 0x34 #define BD70528_REG_RTC_YEAR 0x35 #define BD70528_REG_RTC_ALM_SEC 0x36 #define BD70528_REG_RTC_ALM_START BD70528_REG_RTC_ALM_SEC #define BD70528_REG_RTC_ALM_MINUTE 0x37 #define BD70528_REG_RTC_ALM_HOUR 0x38 #define BD70528_REG_RTC_ALM_WEEK 0x39 #define BD70528_REG_RTC_ALM_DAY 0x3a #define BD70528_REG_RTC_ALM_MONTH 0x3b #define BD70528_REG_RTC_ALM_YEAR 0x3c #define BD70528_REG_RTC_ALM_MASK 0x3d #define BD70528_REG_RTC_ALM_REPEAT 0x3e #define BD70528_REG_RTC_START BD70528_REG_RTC_SEC #define BD70528_REG_RTC_WAKE_SEC 0x43 #define BD70528_REG_RTC_WAKE_START BD70528_REG_RTC_WAKE_SEC #define BD70528_REG_RTC_WAKE_MIN 0x44 #define BD70528_REG_RTC_WAKE_HOUR 0x45 #define BD70528_REG_RTC_WAKE_CTRL 0x46 #define BD70528_REG_ELAPSED_TIMER_EN 0x42 #define BD70528_REG_WAKE_EN 0x46 /* WDT registers */ #define BD70528_REG_WDT_CTRL 0x4A #define BD70528_REG_WDT_HOUR 0x49 #define BD70528_REG_WDT_MINUTE 0x48 #define BD70528_REG_WDT_SEC 0x47 /* Charger / Battery */ #define BD70528_REG_CHG_CURR_STAT 0x59 #define BD70528_REG_CHG_BAT_STAT 0x57 #define BD70528_REG_CHG_BAT_TEMP 0x58 #define BD70528_REG_CHG_IN_STAT 0x56 #define BD70528_REG_CHG_DCIN_ILIM 0x5d #define BD70528_REG_CHG_CHG_CURR_WARM 0x61 #define BD70528_REG_CHG_CHG_CURR_COLD 0x62 /* Masks for main IRQ register bits */ enum { BD70528_INT_SHDN, #define BD70528_INT_SHDN_MASK BIT(BD70528_INT_SHDN) BD70528_INT_PWR_FLT, #define BD70528_INT_PWR_FLT_MASK BIT(BD70528_INT_PWR_FLT) BD70528_INT_VR_FLT, #define BD70528_INT_VR_FLT_MASK BIT(BD70528_INT_VR_FLT) BD70528_INT_MISC, #define BD70528_INT_MISC_MASK BIT(BD70528_INT_MISC) BD70528_INT_BAT1, #define BD70528_INT_BAT1_MASK BIT(BD70528_INT_BAT1) BD70528_INT_RTC, #define BD70528_INT_RTC_MASK BIT(BD70528_INT_RTC) BD70528_INT_GPIO, #define BD70528_INT_GPIO_MASK BIT(BD70528_INT_GPIO) BD70528_INT_OP_FAIL, #define BD70528_INT_OP_FAIL_MASK BIT(BD70528_INT_OP_FAIL) }; /* IRQs */ enum { /* Shutdown register IRQs */ BD70528_INT_LONGPUSH, BD70528_INT_WDT, BD70528_INT_HWRESET, BD70528_INT_RSTB_FAULT, BD70528_INT_VBAT_UVLO, BD70528_INT_TSD, BD70528_INT_RSTIN, /* Power failure register IRQs */ BD70528_INT_BUCK1_FAULT, BD70528_INT_BUCK2_FAULT, BD70528_INT_BUCK3_FAULT, BD70528_INT_LDO1_FAULT, BD70528_INT_LDO2_FAULT, BD70528_INT_LDO3_FAULT, BD70528_INT_LED1_FAULT, BD70528_INT_LED2_FAULT, /* VR FAULT register IRQs */ BD70528_INT_BUCK1_OCP, BD70528_INT_BUCK2_OCP, BD70528_INT_BUCK3_OCP, BD70528_INT_LED1_OCP, BD70528_INT_LED2_OCP, BD70528_INT_BUCK1_FULLON, BD70528_INT_BUCK2_FULLON, /* PMU register interrupts */ BD70528_INT_SHORTPUSH, BD70528_INT_AUTO_WAKEUP, BD70528_INT_STATE_CHANGE, /* Charger 1 register IRQs */ BD70528_INT_BAT_OV_RES, BD70528_INT_BAT_OV_DET, BD70528_INT_DBAT_DET, BD70528_INT_BATTSD_COLD_RES, BD70528_INT_BATTSD_COLD_DET, BD70528_INT_BATTSD_HOT_RES, BD70528_INT_BATTSD_HOT_DET, BD70528_INT_CHG_TSD, /* Charger 2 register IRQs */ BD70528_INT_BAT_RMV, BD70528_INT_BAT_DET, BD70528_INT_DCIN2_OV_RES, BD70528_INT_DCIN2_OV_DET, BD70528_INT_DCIN2_RMV, BD70528_INT_DCIN2_DET, BD70528_INT_DCIN1_RMV, BD70528_INT_DCIN1_DET, /* RTC register IRQs */ BD70528_INT_RTC_ALARM, BD70528_INT_ELPS_TIM, /* GPIO register IRQs */ BD70528_INT_GPIO0, BD70528_INT_GPIO1, BD70528_INT_GPIO2, BD70528_INT_GPIO3, /* Invalid operation register IRQs */ BD70528_INT_BUCK1_DVS_OPFAIL, BD70528_INT_BUCK2_DVS_OPFAIL, BD70528_INT_BUCK3_DVS_OPFAIL, BD70528_INT_LED1_VOLT_OPFAIL, BD70528_INT_LED2_VOLT_OPFAIL, }; /* Masks */ #define BD70528_INT_LONGPUSH_MASK 0x1 #define BD70528_INT_WDT_MASK 0x2 #define BD70528_INT_HWRESET_MASK 0x4 #define BD70528_INT_RSTB_FAULT_MASK 0x8 #define BD70528_INT_VBAT_UVLO_MASK 0x10 #define BD70528_INT_TSD_MASK 0x20 #define BD70528_INT_RSTIN_MASK 0x40 #define BD70528_INT_BUCK1_FAULT_MASK 0x1 #define BD70528_INT_BUCK2_FAULT_MASK 0x2 #define BD70528_INT_BUCK3_FAULT_MASK 0x4 #define BD70528_INT_LDO1_FAULT_MASK 0x8 #define BD70528_INT_LDO2_FAULT_MASK 0x10 #define BD70528_INT_LDO3_FAULT_MASK 0x20 #define BD70528_INT_LED1_FAULT_MASK 0x40 #define BD70528_INT_LED2_FAULT_MASK 0x80 #define BD70528_INT_BUCK1_OCP_MASK 0x1 #define BD70528_INT_BUCK2_OCP_MASK 0x2 #define BD70528_INT_BUCK3_OCP_MASK 0x4 #define BD70528_INT_LED1_OCP_MASK 0x8 #define BD70528_INT_LED2_OCP_MASK 0x10 #define BD70528_INT_BUCK1_FULLON_MASK 0x20 #define BD70528_INT_BUCK2_FULLON_MASK 0x40 #define BD70528_INT_SHORTPUSH_MASK 0x1 #define BD70528_INT_AUTO_WAKEUP_MASK 0x2 #define BD70528_INT_STATE_CHANGE_MASK 0x10 #define BD70528_INT_BAT_OV_RES_MASK 0x1 #define BD70528_INT_BAT_OV_DET_MASK 0x2 #define BD70528_INT_DBAT_DET_MASK 0x4 #define BD70528_INT_BATTSD_COLD_RES_MASK 0x8 #define BD70528_INT_BATTSD_COLD_DET_MASK 0x10 #define BD70528_INT_BATTSD_HOT_RES_MASK 0x20 #define BD70528_INT_BATTSD_HOT_DET_MASK 0x40 #define BD70528_INT_CHG_TSD_MASK 0x80 #define BD70528_INT_BAT_RMV_MASK 0x1 #define BD70528_INT_BAT_DET_MASK 0x2 #define BD70528_INT_DCIN2_OV_RES_MASK 0x4 #define BD70528_INT_DCIN2_OV_DET_MASK 0x8 #define BD70528_INT_DCIN2_RMV_MASK 0x10 #define BD70528_INT_DCIN2_DET_MASK 0x20 #define BD70528_INT_DCIN1_RMV_MASK 0x40 #define BD70528_INT_DCIN1_DET_MASK 0x80 #define BD70528_INT_RTC_ALARM_MASK 0x1 #define BD70528_INT_ELPS_TIM_MASK 0x2 #define BD70528_INT_GPIO0_MASK 0x1 #define BD70528_INT_GPIO1_MASK 0x2 #define BD70528_INT_GPIO2_MASK 0x4 #define BD70528_INT_GPIO3_MASK 0x8 #define BD70528_INT_BUCK1_DVS_OPFAIL_MASK 0x1 #define BD70528_INT_BUCK2_DVS_OPFAIL_MASK 0x2 #define BD70528_INT_BUCK3_DVS_OPFAIL_MASK 0x4 #define BD70528_INT_LED1_VOLT_OPFAIL_MASK 0x10 #define BD70528_INT_LED2_VOLT_OPFAIL_MASK 0x20 #define BD70528_DEBOUNCE_MASK 0x3 #define BD70528_DEBOUNCE_DISABLE 0 #define BD70528_DEBOUNCE_15MS 1 #define BD70528_DEBOUNCE_30MS 2 #define BD70528_DEBOUNCE_50MS 3 #define BD70528_GPIO_DRIVE_MASK 0x2 #define BD70528_GPIO_PUSH_PULL 0x0 #define BD70528_GPIO_OPEN_DRAIN 0x2 #define BD70528_GPIO_OUT_EN_MASK 0x80 #define BD70528_GPIO_OUT_ENABLE 0x80 #define BD70528_GPIO_OUT_DISABLE 0x0 #define BD70528_GPIO_OUT_HI 0x1 #define BD70528_GPIO_OUT_LO 0x0 #define BD70528_GPIO_OUT_MASK 0x1 #define BD70528_GPIO_IN_STATE_BASE 1 #define BD70528_CLK_OUT_EN_MASK 0x1 /* RTC masks to mask out reserved bits */ #define BD70528_MASK_RTC_SEC 0x7f #define BD70528_MASK_RTC_MINUTE 0x7f #define BD70528_MASK_RTC_HOUR_24H 0x80 #define BD70528_MASK_RTC_HOUR_PM 0x20 #define BD70528_MASK_RTC_HOUR 0x3f #define BD70528_MASK_RTC_DAY 0x3f #define BD70528_MASK_RTC_WEEK 0x07 #define BD70528_MASK_RTC_MONTH 0x1f #define BD70528_MASK_RTC_YEAR 0xff #define BD70528_MASK_RTC_COUNT_L 0x7f #define BD70528_MASK_ELAPSED_TIMER_EN 0x1 /* Mask second, min and hour fields * HW would support ALM irq for over 24h * (by setting day, month and year too) * but as we wish to keep this same as for * wake-up we limit ALM to 24H and only * unmask sec, min and hour */ #define BD70528_MASK_ALM_EN 0x7 #define BD70528_MASK_WAKE_EN 0x1 /* WDT masks */ #define BD70528_MASK_WDT_EN 0x1 #define BD70528_MASK_WDT_HOUR 0x1 #define BD70528_MASK_WDT_MINUTE 0x7f #define BD70528_MASK_WDT_SEC 0x7f #define BD70528_WDT_STATE_BIT 0x1 #define BD70528_ELAPSED_STATE_BIT 0x2 #define BD70528_WAKE_STATE_BIT 0x4 /* Charger masks */ #define BD70528_MASK_CHG_STAT 0x7f #define BD70528_MASK_CHG_BAT_TIMER 0x20 #define BD70528_MASK_CHG_BAT_OVERVOLT 0x10 #define BD70528_MASK_CHG_BAT_DETECT 0x1 #define BD70528_MASK_CHG_DCIN1_UVLO 0x1 #define BD70528_MASK_CHG_DCIN_ILIM 0x3f #define BD70528_MASK_CHG_CHG_CURR 0x1f #define BD70528_MASK_CHG_TRICKLE_CURR 0x10 /* * Note, external battery register is the lonely rider at * address 0xc5. See how to stuff that in the regmap */ #define BD70528_MAX_REGISTER 0x94 /* Buck control masks */ #define BD70528_MASK_RUN_EN 0x4 #define BD70528_MASK_STBY_EN 0x2 #define BD70528_MASK_IDLE_EN 0x1 #define BD70528_MASK_LED1_EN 0x1 #define BD70528_MASK_LED2_EN 0x10 #define BD70528_MASK_BUCK_VOLT 0xf #define BD70528_MASK_LDO_VOLT 0x1f #define BD70528_MASK_LED1_VOLT 0x1 #define BD70528_MASK_LED2_VOLT 0x10 /* Misc irq masks */ #define BD70528_INT_MASK_SHORT_PUSH 1 #define BD70528_INT_MASK_AUTO_WAKE 2 #define BD70528_INT_MASK_POWER_STATE 4 #define BD70528_MASK_BUCK_RAMP 0x10 #define BD70528_SIFT_BUCK_RAMP 4 #if IS_ENABLED(CONFIG_BD70528_WATCHDOG) int bd70528_wdt_set(struct rohm_regmap_dev *data, int enable, int *old_state); void bd70528_wdt_lock(struct rohm_regmap_dev *data); void bd70528_wdt_unlock(struct rohm_regmap_dev *data); #else /* CONFIG_BD70528_WATCHDOG */ static inline int bd70528_wdt_set(struct rohm_regmap_dev *data, int enable, int *old_state) { return 0; } static inline void bd70528_wdt_lock(struct rohm_regmap_dev *data) { } static inline void bd70528_wdt_unlock(struct rohm_regmap_dev *data) { } #endif /* CONFIG_BD70528_WATCHDOG */ #endif /* __LINUX_MFD_BD70528_H__ */ mfd/as3711.h 0000644 00000005370 14722070374 0006415 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * AS3711 PMIC MFC driver header * * Copyright (C) 2012 Renesas Electronics Corporation * Author: Guennadi Liakhovetski, <g.liakhovetski@gmx.de> */ #ifndef MFD_AS3711_H #define MFD_AS3711_H /* * Client data */ /* Register addresses */ #define AS3711_SD_1_VOLTAGE 0 /* Digital Step-Down */ #define AS3711_SD_2_VOLTAGE 1 #define AS3711_SD_3_VOLTAGE 2 #define AS3711_SD_4_VOLTAGE 3 #define AS3711_LDO_1_VOLTAGE 4 /* Analog LDO */ #define AS3711_LDO_2_VOLTAGE 5 #define AS3711_LDO_3_VOLTAGE 6 /* Digital LDO */ #define AS3711_LDO_4_VOLTAGE 7 #define AS3711_LDO_5_VOLTAGE 8 #define AS3711_LDO_6_VOLTAGE 9 #define AS3711_LDO_7_VOLTAGE 0xa #define AS3711_LDO_8_VOLTAGE 0xb #define AS3711_SD_CONTROL 0x10 #define AS3711_GPIO_SIGNAL_OUT 0x20 #define AS3711_GPIO_SIGNAL_IN 0x21 #define AS3711_SD_CONTROL_1 0x30 #define AS3711_SD_CONTROL_2 0x31 #define AS3711_CURR_CONTROL 0x40 #define AS3711_CURR1_VALUE 0x43 #define AS3711_CURR2_VALUE 0x44 #define AS3711_CURR3_VALUE 0x45 #define AS3711_STEPUP_CONTROL_1 0x50 #define AS3711_STEPUP_CONTROL_2 0x51 #define AS3711_STEPUP_CONTROL_4 0x53 #define AS3711_STEPUP_CONTROL_5 0x54 #define AS3711_REG_STATUS 0x73 #define AS3711_INTERRUPT_STATUS_1 0x77 #define AS3711_INTERRUPT_STATUS_2 0x78 #define AS3711_INTERRUPT_STATUS_3 0x79 #define AS3711_CHARGER_STATUS_1 0x86 #define AS3711_CHARGER_STATUS_2 0x87 #define AS3711_ASIC_ID_1 0x90 #define AS3711_ASIC_ID_2 0x91 #define AS3711_MAX_REG AS3711_ASIC_ID_2 #define AS3711_NUM_REGS (AS3711_MAX_REG + 1) /* Regulators */ enum { AS3711_REGULATOR_SD_1, AS3711_REGULATOR_SD_2, AS3711_REGULATOR_SD_3, AS3711_REGULATOR_SD_4, AS3711_REGULATOR_LDO_1, AS3711_REGULATOR_LDO_2, AS3711_REGULATOR_LDO_3, AS3711_REGULATOR_LDO_4, AS3711_REGULATOR_LDO_5, AS3711_REGULATOR_LDO_6, AS3711_REGULATOR_LDO_7, AS3711_REGULATOR_LDO_8, AS3711_REGULATOR_MAX, }; struct device; struct regmap; struct as3711 { struct device *dev; struct regmap *regmap; }; #define AS3711_MAX_STEPDOWN 4 #define AS3711_MAX_STEPUP 2 #define AS3711_MAX_LDO 8 enum as3711_su2_feedback { AS3711_SU2_VOLTAGE, AS3711_SU2_CURR1, AS3711_SU2_CURR2, AS3711_SU2_CURR3, AS3711_SU2_CURR_AUTO, }; enum as3711_su2_fbprot { AS3711_SU2_LX_SD4, AS3711_SU2_GPIO2, AS3711_SU2_GPIO3, AS3711_SU2_GPIO4, }; /* * Platform data */ struct as3711_regulator_pdata { struct regulator_init_data *init_data[AS3711_REGULATOR_MAX]; }; struct as3711_bl_pdata { bool su1_fb; int su1_max_uA; bool su2_fb; int su2_max_uA; enum as3711_su2_feedback su2_feedback; enum as3711_su2_fbprot su2_fbprot; bool su2_auto_curr1; bool su2_auto_curr2; bool su2_auto_curr3; }; struct as3711_platform_data { struct as3711_regulator_pdata regulator; struct as3711_bl_pdata backlight; }; #endif mfd/motorola-cpcap.h 0000644 00000030553 14722070374 0010417 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * The register defines are based on earlier cpcap.h in Motorola Linux kernel * tree. * * Copyright (C) 2007-2009 Motorola, Inc. * * Rewritten for the real register offsets instead of enumeration * to make the defines usable with Linux kernel regmap support * * Copyright (C) 2016 Tony Lindgren <tony@atomide.com> */ #include <linux/device.h> #include <linux/regmap.h> #define CPCAP_VENDOR_ST 0 #define CPCAP_VENDOR_TI 1 #define CPCAP_REVISION_MAJOR(r) (((r) >> 4) + 1) #define CPCAP_REVISION_MINOR(r) ((r) & 0xf) #define CPCAP_REVISION_1_0 0x08 #define CPCAP_REVISION_1_1 0x09 #define CPCAP_REVISION_2_0 0x10 #define CPCAP_REVISION_2_1 0x11 /* CPCAP registers */ #define CPCAP_REG_INT1 0x0000 /* Interrupt 1 */ #define CPCAP_REG_INT2 0x0004 /* Interrupt 2 */ #define CPCAP_REG_INT3 0x0008 /* Interrupt 3 */ #define CPCAP_REG_INT4 0x000c /* Interrupt 4 */ #define CPCAP_REG_INTM1 0x0010 /* Interrupt Mask 1 */ #define CPCAP_REG_INTM2 0x0014 /* Interrupt Mask 2 */ #define CPCAP_REG_INTM3 0x0018 /* Interrupt Mask 3 */ #define CPCAP_REG_INTM4 0x001c /* Interrupt Mask 4 */ #define CPCAP_REG_INTS1 0x0020 /* Interrupt Sense 1 */ #define CPCAP_REG_INTS2 0x0024 /* Interrupt Sense 2 */ #define CPCAP_REG_INTS3 0x0028 /* Interrupt Sense 3 */ #define CPCAP_REG_INTS4 0x002c /* Interrupt Sense 4 */ #define CPCAP_REG_ASSIGN1 0x0030 /* Resource Assignment 1 */ #define CPCAP_REG_ASSIGN2 0x0034 /* Resource Assignment 2 */ #define CPCAP_REG_ASSIGN3 0x0038 /* Resource Assignment 3 */ #define CPCAP_REG_ASSIGN4 0x003c /* Resource Assignment 4 */ #define CPCAP_REG_ASSIGN5 0x0040 /* Resource Assignment 5 */ #define CPCAP_REG_ASSIGN6 0x0044 /* Resource Assignment 6 */ #define CPCAP_REG_VERSC1 0x0048 /* Version Control 1 */ #define CPCAP_REG_VERSC2 0x004c /* Version Control 2 */ #define CPCAP_REG_MI1 0x0200 /* Macro Interrupt 1 */ #define CPCAP_REG_MIM1 0x0204 /* Macro Interrupt Mask 1 */ #define CPCAP_REG_MI2 0x0208 /* Macro Interrupt 2 */ #define CPCAP_REG_MIM2 0x020c /* Macro Interrupt Mask 2 */ #define CPCAP_REG_UCC1 0x0210 /* UC Control 1 */ #define CPCAP_REG_UCC2 0x0214 /* UC Control 2 */ #define CPCAP_REG_PC1 0x021c /* Power Cut 1 */ #define CPCAP_REG_PC2 0x0220 /* Power Cut 2 */ #define CPCAP_REG_BPEOL 0x0224 /* BP and EOL */ #define CPCAP_REG_PGC 0x0228 /* Power Gate and Control */ #define CPCAP_REG_MT1 0x022c /* Memory Transfer 1 */ #define CPCAP_REG_MT2 0x0230 /* Memory Transfer 2 */ #define CPCAP_REG_MT3 0x0234 /* Memory Transfer 3 */ #define CPCAP_REG_PF 0x0238 /* Print Format */ #define CPCAP_REG_SCC 0x0400 /* System Clock Control */ #define CPCAP_REG_SW1 0x0404 /* Stop Watch 1 */ #define CPCAP_REG_SW2 0x0408 /* Stop Watch 2 */ #define CPCAP_REG_UCTM 0x040c /* UC Turbo Mode */ #define CPCAP_REG_TOD1 0x0410 /* Time of Day 1 */ #define CPCAP_REG_TOD2 0x0414 /* Time of Day 2 */ #define CPCAP_REG_TODA1 0x0418 /* Time of Day Alarm 1 */ #define CPCAP_REG_TODA2 0x041c /* Time of Day Alarm 2 */ #define CPCAP_REG_DAY 0x0420 /* Day */ #define CPCAP_REG_DAYA 0x0424 /* Day Alarm */ #define CPCAP_REG_VAL1 0x0428 /* Validity 1 */ #define CPCAP_REG_VAL2 0x042c /* Validity 2 */ #define CPCAP_REG_SDVSPLL 0x0600 /* Switcher DVS and PLL */ #define CPCAP_REG_SI2CC1 0x0604 /* Switcher I2C Control 1 */ #define CPCAP_REG_Si2CC2 0x0608 /* Switcher I2C Control 2 */ #define CPCAP_REG_S1C1 0x060c /* Switcher 1 Control 1 */ #define CPCAP_REG_S1C2 0x0610 /* Switcher 1 Control 2 */ #define CPCAP_REG_S2C1 0x0614 /* Switcher 2 Control 1 */ #define CPCAP_REG_S2C2 0x0618 /* Switcher 2 Control 2 */ #define CPCAP_REG_S3C 0x061c /* Switcher 3 Control */ #define CPCAP_REG_S4C1 0x0620 /* Switcher 4 Control 1 */ #define CPCAP_REG_S4C2 0x0624 /* Switcher 4 Control 2 */ #define CPCAP_REG_S5C 0x0628 /* Switcher 5 Control */ #define CPCAP_REG_S6C 0x062c /* Switcher 6 Control */ #define CPCAP_REG_VCAMC 0x0630 /* VCAM Control */ #define CPCAP_REG_VCSIC 0x0634 /* VCSI Control */ #define CPCAP_REG_VDACC 0x0638 /* VDAC Control */ #define CPCAP_REG_VDIGC 0x063c /* VDIG Control */ #define CPCAP_REG_VFUSEC 0x0640 /* VFUSE Control */ #define CPCAP_REG_VHVIOC 0x0644 /* VHVIO Control */ #define CPCAP_REG_VSDIOC 0x0648 /* VSDIO Control */ #define CPCAP_REG_VPLLC 0x064c /* VPLL Control */ #define CPCAP_REG_VRF1C 0x0650 /* VRF1 Control */ #define CPCAP_REG_VRF2C 0x0654 /* VRF2 Control */ #define CPCAP_REG_VRFREFC 0x0658 /* VRFREF Control */ #define CPCAP_REG_VWLAN1C 0x065c /* VWLAN1 Control */ #define CPCAP_REG_VWLAN2C 0x0660 /* VWLAN2 Control */ #define CPCAP_REG_VSIMC 0x0664 /* VSIM Control */ #define CPCAP_REG_VVIBC 0x0668 /* VVIB Control */ #define CPCAP_REG_VUSBC 0x066c /* VUSB Control */ #define CPCAP_REG_VUSBINT1C 0x0670 /* VUSBINT1 Control */ #define CPCAP_REG_VUSBINT2C 0x0674 /* VUSBINT2 Control */ #define CPCAP_REG_URT 0x0678 /* Useroff Regulator Trigger */ #define CPCAP_REG_URM1 0x067c /* Useroff Regulator Mask 1 */ #define CPCAP_REG_URM2 0x0680 /* Useroff Regulator Mask 2 */ #define CPCAP_REG_VAUDIOC 0x0800 /* VAUDIO Control */ #define CPCAP_REG_CC 0x0804 /* Codec Control */ #define CPCAP_REG_CDI 0x0808 /* Codec Digital Interface */ #define CPCAP_REG_SDAC 0x080c /* Stereo DAC */ #define CPCAP_REG_SDACDI 0x0810 /* Stereo DAC Digital Interface */ #define CPCAP_REG_TXI 0x0814 /* TX Inputs */ #define CPCAP_REG_TXMP 0x0818 /* TX MIC PGA's */ #define CPCAP_REG_RXOA 0x081c /* RX Output Amplifiers */ #define CPCAP_REG_RXVC 0x0820 /* RX Volume Control */ #define CPCAP_REG_RXCOA 0x0824 /* RX Codec to Output Amps */ #define CPCAP_REG_RXSDOA 0x0828 /* RX Stereo DAC to Output Amps */ #define CPCAP_REG_RXEPOA 0x082c /* RX External PGA to Output Amps */ #define CPCAP_REG_RXLL 0x0830 /* RX Low Latency */ #define CPCAP_REG_A2LA 0x0834 /* A2 Loudspeaker Amplifier */ #define CPCAP_REG_MIPIS1 0x0838 /* MIPI Slimbus 1 */ #define CPCAP_REG_MIPIS2 0x083c /* MIPI Slimbus 2 */ #define CPCAP_REG_MIPIS3 0x0840 /* MIPI Slimbus 3. */ #define CPCAP_REG_LVAB 0x0844 /* LMR Volume and A4 Balanced. */ #define CPCAP_REG_CCC1 0x0a00 /* Coulomb Counter Control 1 */ #define CPCAP_REG_CRM 0x0a04 /* Charger and Reverse Mode */ #define CPCAP_REG_CCCC2 0x0a08 /* Coincell and Coulomb Ctr Ctrl 2 */ #define CPCAP_REG_CCS1 0x0a0c /* Coulomb Counter Sample 1 */ #define CPCAP_REG_CCS2 0x0a10 /* Coulomb Counter Sample 2 */ #define CPCAP_REG_CCA1 0x0a14 /* Coulomb Counter Accumulator 1 */ #define CPCAP_REG_CCA2 0x0a18 /* Coulomb Counter Accumulator 2 */ #define CPCAP_REG_CCM 0x0a1c /* Coulomb Counter Mode */ #define CPCAP_REG_CCO 0x0a20 /* Coulomb Counter Offset */ #define CPCAP_REG_CCI 0x0a24 /* Coulomb Counter Integrator */ #define CPCAP_REG_ADCC1 0x0c00 /* A/D Converter Configuration 1 */ #define CPCAP_REG_ADCC2 0x0c04 /* A/D Converter Configuration 2 */ #define CPCAP_REG_ADCD0 0x0c08 /* A/D Converter Data 0 */ #define CPCAP_REG_ADCD1 0x0c0c /* A/D Converter Data 1 */ #define CPCAP_REG_ADCD2 0x0c10 /* A/D Converter Data 2 */ #define CPCAP_REG_ADCD3 0x0c14 /* A/D Converter Data 3 */ #define CPCAP_REG_ADCD4 0x0c18 /* A/D Converter Data 4 */ #define CPCAP_REG_ADCD5 0x0c1c /* A/D Converter Data 5 */ #define CPCAP_REG_ADCD6 0x0c20 /* A/D Converter Data 6 */ #define CPCAP_REG_ADCD7 0x0c24 /* A/D Converter Data 7 */ #define CPCAP_REG_ADCAL1 0x0c28 /* A/D Converter Calibration 1 */ #define CPCAP_REG_ADCAL2 0x0c2c /* A/D Converter Calibration 2 */ #define CPCAP_REG_USBC1 0x0e00 /* USB Control 1 */ #define CPCAP_REG_USBC2 0x0e04 /* USB Control 2 */ #define CPCAP_REG_USBC3 0x0e08 /* USB Control 3 */ #define CPCAP_REG_UVIDL 0x0e0c /* ULPI Vendor ID Low */ #define CPCAP_REG_UVIDH 0x0e10 /* ULPI Vendor ID High */ #define CPCAP_REG_UPIDL 0x0e14 /* ULPI Product ID Low */ #define CPCAP_REG_UPIDH 0x0e18 /* ULPI Product ID High */ #define CPCAP_REG_UFC1 0x0e1c /* ULPI Function Control 1 */ #define CPCAP_REG_UFC2 0x0e20 /* ULPI Function Control 2 */ #define CPCAP_REG_UFC3 0x0e24 /* ULPI Function Control 3 */ #define CPCAP_REG_UIC1 0x0e28 /* ULPI Interface Control 1 */ #define CPCAP_REG_UIC2 0x0e2c /* ULPI Interface Control 2 */ #define CPCAP_REG_UIC3 0x0e30 /* ULPI Interface Control 3 */ #define CPCAP_REG_USBOTG1 0x0e34 /* USB OTG Control 1 */ #define CPCAP_REG_USBOTG2 0x0e38 /* USB OTG Control 2 */ #define CPCAP_REG_USBOTG3 0x0e3c /* USB OTG Control 3 */ #define CPCAP_REG_UIER1 0x0e40 /* USB Interrupt Enable Rising 1 */ #define CPCAP_REG_UIER2 0x0e44 /* USB Interrupt Enable Rising 2 */ #define CPCAP_REG_UIER3 0x0e48 /* USB Interrupt Enable Rising 3 */ #define CPCAP_REG_UIEF1 0x0e4c /* USB Interrupt Enable Falling 1 */ #define CPCAP_REG_UIEF2 0x0e50 /* USB Interrupt Enable Falling 1 */ #define CPCAP_REG_UIEF3 0x0e54 /* USB Interrupt Enable Falling 1 */ #define CPCAP_REG_UIS 0x0e58 /* USB Interrupt Status */ #define CPCAP_REG_UIL 0x0e5c /* USB Interrupt Latch */ #define CPCAP_REG_USBD 0x0e60 /* USB Debug */ #define CPCAP_REG_SCR1 0x0e64 /* Scratch 1 */ #define CPCAP_REG_SCR2 0x0e68 /* Scratch 2 */ #define CPCAP_REG_SCR3 0x0e6c /* Scratch 3 */ #define CPCAP_REG_VMC 0x0eac /* Video Mux Control */ #define CPCAP_REG_OWDC 0x0eb0 /* One Wire Device Control */ #define CPCAP_REG_GPIO0 0x0eb4 /* GPIO 0 Control */ #define CPCAP_REG_GPIO1 0x0ebc /* GPIO 1 Control */ #define CPCAP_REG_GPIO2 0x0ec4 /* GPIO 2 Control */ #define CPCAP_REG_GPIO3 0x0ecc /* GPIO 3 Control */ #define CPCAP_REG_GPIO4 0x0ed4 /* GPIO 4 Control */ #define CPCAP_REG_GPIO5 0x0edc /* GPIO 5 Control */ #define CPCAP_REG_GPIO6 0x0ee4 /* GPIO 6 Control */ #define CPCAP_REG_MDLC 0x1000 /* Main Display Lighting Control */ #define CPCAP_REG_KLC 0x1004 /* Keypad Lighting Control */ #define CPCAP_REG_ADLC 0x1008 /* Aux Display Lighting Control */ #define CPCAP_REG_REDC 0x100c /* Red Triode Control */ #define CPCAP_REG_GREENC 0x1010 /* Green Triode Control */ #define CPCAP_REG_BLUEC 0x1014 /* Blue Triode Control */ #define CPCAP_REG_CFC 0x1018 /* Camera Flash Control */ #define CPCAP_REG_ABC 0x101c /* Adaptive Boost Control */ #define CPCAP_REG_BLEDC 0x1020 /* Bluetooth LED Control */ #define CPCAP_REG_CLEDC 0x1024 /* Camera Privacy LED Control */ #define CPCAP_REG_OW1C 0x1200 /* One Wire 1 Command */ #define CPCAP_REG_OW1D 0x1204 /* One Wire 1 Data */ #define CPCAP_REG_OW1I 0x1208 /* One Wire 1 Interrupt */ #define CPCAP_REG_OW1IE 0x120c /* One Wire 1 Interrupt Enable */ #define CPCAP_REG_OW1 0x1214 /* One Wire 1 Control */ #define CPCAP_REG_OW2C 0x1220 /* One Wire 2 Command */ #define CPCAP_REG_OW2D 0x1224 /* One Wire 2 Data */ #define CPCAP_REG_OW2I 0x1228 /* One Wire 2 Interrupt */ #define CPCAP_REG_OW2IE 0x122c /* One Wire 2 Interrupt Enable */ #define CPCAP_REG_OW2 0x1234 /* One Wire 2 Control */ #define CPCAP_REG_OW3C 0x1240 /* One Wire 3 Command */ #define CPCAP_REG_OW3D 0x1244 /* One Wire 3 Data */ #define CPCAP_REG_OW3I 0x1248 /* One Wire 3 Interrupt */ #define CPCAP_REG_OW3IE 0x124c /* One Wire 3 Interrupt Enable */ #define CPCAP_REG_OW3 0x1254 /* One Wire 3 Control */ #define CPCAP_REG_GCAIC 0x1258 /* GCAI Clock Control */ #define CPCAP_REG_GCAIM 0x125c /* GCAI GPIO Mode */ #define CPCAP_REG_LGDIR 0x1260 /* LMR GCAI GPIO Direction */ #define CPCAP_REG_LGPU 0x1264 /* LMR GCAI GPIO Pull-up */ #define CPCAP_REG_LGPIN 0x1268 /* LMR GCAI GPIO Pin */ #define CPCAP_REG_LGMASK 0x126c /* LMR GCAI GPIO Mask */ #define CPCAP_REG_LDEB 0x1270 /* LMR Debounce Settings */ #define CPCAP_REG_LGDET 0x1274 /* LMR GCAI Detach Detect */ #define CPCAP_REG_LMISC 0x1278 /* LMR Misc Bits */ #define CPCAP_REG_LMACE 0x127c /* LMR Mace IC Support */ #define CPCAP_REG_TEST 0x7c00 /* Test */ #define CPCAP_REG_ST_TEST1 0x7d08 /* ST Test1 */ #define CPCAP_REG_ST_TEST2 0x7d18 /* ST Test2 */ /* * Helpers for child devices to check the revision and vendor. * * REVISIT: No documentation for the bits below, please update * to use proper names for defines when available. */ static inline int cpcap_get_revision(struct device *dev, struct regmap *regmap, u16 *revision) { unsigned int val; int ret; ret = regmap_read(regmap, CPCAP_REG_VERSC1, &val); if (ret) { dev_err(dev, "Could not read revision\n"); return ret; } *revision = ((val >> 3) & 0x7) | ((val << 3) & 0x38); return 0; } static inline int cpcap_get_vendor(struct device *dev, struct regmap *regmap, u16 *vendor) { unsigned int val; int ret; ret = regmap_read(regmap, CPCAP_REG_VERSC1, &val); if (ret) { dev_err(dev, "Could not read vendor\n"); return ret; } *vendor = (val >> 6) & 0x7; return 0; } extern int cpcap_sense_virq(struct regmap *regmap, int virq); mfd/max77686.h 0000644 00000003761 14722070374 0006707 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0+ */ /* * max77686.h - Driver for the Maxim 77686/802 * * Copyright (C) 2012 Samsung Electrnoics * Chiwoong Byun <woong.byun@samsung.com> * * This driver is based on max8997.h * * MAX77686 has PMIC, RTC devices. * The devices share the same I2C bus and included in * this mfd driver. */ #ifndef __LINUX_MFD_MAX77686_H #define __LINUX_MFD_MAX77686_H #include <linux/regulator/consumer.h> /* MAX77686 regulator IDs */ enum max77686_regulators { MAX77686_LDO1 = 0, MAX77686_LDO2, MAX77686_LDO3, MAX77686_LDO4, MAX77686_LDO5, MAX77686_LDO6, MAX77686_LDO7, MAX77686_LDO8, MAX77686_LDO9, MAX77686_LDO10, MAX77686_LDO11, MAX77686_LDO12, MAX77686_LDO13, MAX77686_LDO14, MAX77686_LDO15, MAX77686_LDO16, MAX77686_LDO17, MAX77686_LDO18, MAX77686_LDO19, MAX77686_LDO20, MAX77686_LDO21, MAX77686_LDO22, MAX77686_LDO23, MAX77686_LDO24, MAX77686_LDO25, MAX77686_LDO26, MAX77686_BUCK1, MAX77686_BUCK2, MAX77686_BUCK3, MAX77686_BUCK4, MAX77686_BUCK5, MAX77686_BUCK6, MAX77686_BUCK7, MAX77686_BUCK8, MAX77686_BUCK9, MAX77686_REG_MAX, }; /* MAX77802 regulator IDs */ enum max77802_regulators { MAX77802_BUCK1 = 0, MAX77802_BUCK2, MAX77802_BUCK3, MAX77802_BUCK4, MAX77802_BUCK5, MAX77802_BUCK6, MAX77802_BUCK7, MAX77802_BUCK8, MAX77802_BUCK9, MAX77802_BUCK10, MAX77802_LDO1, MAX77802_LDO2, MAX77802_LDO3, MAX77802_LDO4, MAX77802_LDO5, MAX77802_LDO6, MAX77802_LDO7, MAX77802_LDO8, MAX77802_LDO9, MAX77802_LDO10, MAX77802_LDO11, MAX77802_LDO12, MAX77802_LDO13, MAX77802_LDO14, MAX77802_LDO15, MAX77802_LDO17, MAX77802_LDO18, MAX77802_LDO19, MAX77802_LDO20, MAX77802_LDO21, MAX77802_LDO23, MAX77802_LDO24, MAX77802_LDO25, MAX77802_LDO26, MAX77802_LDO27, MAX77802_LDO28, MAX77802_LDO29, MAX77802_LDO30, MAX77802_LDO32, MAX77802_LDO33, MAX77802_LDO34, MAX77802_LDO35, MAX77802_REG_MAX, }; enum max77686_opmode { MAX77686_OPMODE_NORMAL, MAX77686_OPMODE_LP, MAX77686_OPMODE_STANDBY, }; #endif /* __LINUX_MFD_MAX77686_H */ mfd/menelaus.h 0000644 00000002404 14722070374 0007302 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Functions to access Menelaus power management chip */ #ifndef __ASM_ARCH_MENELAUS_H #define __ASM_ARCH_MENELAUS_H struct device; struct menelaus_platform_data { int (* late_init)(struct device *dev); }; extern int menelaus_register_mmc_callback(void (*callback)(void *data, u8 card_mask), void *data); extern void menelaus_unregister_mmc_callback(void); extern int menelaus_set_mmc_opendrain(int slot, int enable); extern int menelaus_set_mmc_slot(int slot, int enable, int power, int cd_on); extern int menelaus_set_vmem(unsigned int mV); extern int menelaus_set_vio(unsigned int mV); extern int menelaus_set_vmmc(unsigned int mV); extern int menelaus_set_vaux(unsigned int mV); extern int menelaus_set_vdcdc(int dcdc, unsigned int mV); extern int menelaus_set_slot_sel(int enable); extern int menelaus_get_slot_pin_states(void); extern int menelaus_set_vcore_hw(unsigned int roof_mV, unsigned int floor_mV); #define EN_VPLL_SLEEP (1 << 7) #define EN_VMMC_SLEEP (1 << 6) #define EN_VAUX_SLEEP (1 << 5) #define EN_VIO_SLEEP (1 << 4) #define EN_VMEM_SLEEP (1 << 3) #define EN_DC3_SLEEP (1 << 2) #define EN_DC2_SLEEP (1 << 1) #define EN_VC_SLEEP (1 << 0) extern int menelaus_set_regulator_sleep(int enable, u32 val); #endif mfd/intel_soc_pmic_mrfld.h 0000644 00000004360 14722070374 0011647 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Header file for Intel Merrifield Basin Cove PMIC * * Copyright (C) 2019 Intel Corporation. All rights reserved. */ #ifndef __INTEL_SOC_PMIC_MRFLD_H__ #define __INTEL_SOC_PMIC_MRFLD_H__ #include <linux/bits.h> #define BCOVE_ID 0x00 #define BCOVE_ID_MINREV0 GENMASK(2, 0) #define BCOVE_ID_MAJREV0 GENMASK(5, 3) #define BCOVE_ID_VENDID0 GENMASK(7, 6) #define BCOVE_MINOR(x) (unsigned int)(((x) & BCOVE_ID_MINREV0) >> 0) #define BCOVE_MAJOR(x) (unsigned int)(((x) & BCOVE_ID_MAJREV0) >> 3) #define BCOVE_VENDOR(x) (unsigned int)(((x) & BCOVE_ID_VENDID0) >> 6) #define BCOVE_IRQLVL1 0x01 #define BCOVE_PBIRQ 0x02 #define BCOVE_TMUIRQ 0x03 #define BCOVE_THRMIRQ 0x04 #define BCOVE_BCUIRQ 0x05 #define BCOVE_ADCIRQ 0x06 #define BCOVE_CHGRIRQ0 0x07 #define BCOVE_CHGRIRQ1 0x08 #define BCOVE_GPIOIRQ 0x09 #define BCOVE_CRITIRQ 0x0B #define BCOVE_MIRQLVL1 0x0C #define BCOVE_MPBIRQ 0x0D #define BCOVE_MTMUIRQ 0x0E #define BCOVE_MTHRMIRQ 0x0F #define BCOVE_MBCUIRQ 0x10 #define BCOVE_MADCIRQ 0x11 #define BCOVE_MCHGRIRQ0 0x12 #define BCOVE_MCHGRIRQ1 0x13 #define BCOVE_MGPIOIRQ 0x14 #define BCOVE_MCRITIRQ 0x16 #define BCOVE_SCHGRIRQ0 0x4E #define BCOVE_SCHGRIRQ1 0x4F /* Level 1 IRQs */ #define BCOVE_LVL1_PWRBTN BIT(0) /* power button */ #define BCOVE_LVL1_TMU BIT(1) /* time management unit */ #define BCOVE_LVL1_THRM BIT(2) /* thermal */ #define BCOVE_LVL1_BCU BIT(3) /* burst control unit */ #define BCOVE_LVL1_ADC BIT(4) /* ADC */ #define BCOVE_LVL1_CHGR BIT(5) /* charger */ #define BCOVE_LVL1_GPIO BIT(6) /* GPIO */ #define BCOVE_LVL1_CRIT BIT(7) /* critical event */ /* Level 2 IRQs: power button */ #define BCOVE_PBIRQ_PBTN BIT(0) #define BCOVE_PBIRQ_UBTN BIT(1) /* Level 2 IRQs: ADC */ #define BCOVE_ADCIRQ_BATTEMP BIT(2) #define BCOVE_ADCIRQ_SYSTEMP BIT(3) #define BCOVE_ADCIRQ_BATTID BIT(4) #define BCOVE_ADCIRQ_VIBATT BIT(5) #define BCOVE_ADCIRQ_CCTICK BIT(7) /* Level 2 IRQs: charger */ #define BCOVE_CHGRIRQ_BAT0ALRT BIT(4) #define BCOVE_CHGRIRQ_BAT1ALRT BIT(5) #define BCOVE_CHGRIRQ_BATCRIT BIT(6) #define BCOVE_CHGRIRQ_VBUSDET BIT(0) #define BCOVE_CHGRIRQ_DCDET BIT(1) #define BCOVE_CHGRIRQ_BATTDET BIT(2) #define BCOVE_CHGRIRQ_USBIDDET BIT(3) #endif /* __INTEL_SOC_PMIC_MRFLD_H__ */ mfd/max14577-private.h 0000644 00000036640 14722070374 0010347 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0+ */ /* * max14577-private.h - Common API for the Maxim 14577/77836 internal sub chip * * Copyright (C) 2014 Samsung Electrnoics * Chanwoo Choi <cw00.choi@samsung.com> * Krzysztof Kozlowski <krzk@kernel.org> */ #ifndef __MAX14577_PRIVATE_H__ #define __MAX14577_PRIVATE_H__ #include <linux/i2c.h> #include <linux/regmap.h> #define I2C_ADDR_PMIC (0x46 >> 1) #define I2C_ADDR_MUIC (0x4A >> 1) #define I2C_ADDR_FG (0x6C >> 1) enum maxim_device_type { MAXIM_DEVICE_TYPE_UNKNOWN = 0, MAXIM_DEVICE_TYPE_MAX14577, MAXIM_DEVICE_TYPE_MAX77836, MAXIM_DEVICE_TYPE_NUM, }; /* Slave addr = 0x4A: MUIC and Charger */ enum max14577_reg { MAX14577_REG_DEVICEID = 0x00, MAX14577_REG_INT1 = 0x01, MAX14577_REG_INT2 = 0x02, MAX14577_REG_INT3 = 0x03, MAX14577_REG_STATUS1 = 0x04, MAX14577_REG_STATUS2 = 0x05, MAX14577_REG_STATUS3 = 0x06, MAX14577_REG_INTMASK1 = 0x07, MAX14577_REG_INTMASK2 = 0x08, MAX14577_REG_INTMASK3 = 0x09, MAX14577_REG_CDETCTRL1 = 0x0A, MAX14577_REG_RFU = 0x0B, MAX14577_REG_CONTROL1 = 0x0C, MAX14577_REG_CONTROL2 = 0x0D, MAX14577_REG_CONTROL3 = 0x0E, MAX14577_REG_CHGCTRL1 = 0x0F, MAX14577_REG_CHGCTRL2 = 0x10, MAX14577_REG_CHGCTRL3 = 0x11, MAX14577_REG_CHGCTRL4 = 0x12, MAX14577_REG_CHGCTRL5 = 0x13, MAX14577_REG_CHGCTRL6 = 0x14, MAX14577_REG_CHGCTRL7 = 0x15, MAX14577_REG_END, }; /* Slave addr = 0x4A: MUIC */ enum max14577_muic_reg { MAX14577_MUIC_REG_STATUS1 = 0x04, MAX14577_MUIC_REG_STATUS2 = 0x05, MAX14577_MUIC_REG_CONTROL1 = 0x0C, MAX14577_MUIC_REG_CONTROL3 = 0x0E, MAX14577_MUIC_REG_END, }; /* * Combined charger types for max14577 and max77836. * * On max14577 three lower bits map to STATUS2/CHGTYP field. * However the max77836 has different two last values of STATUS2/CHGTYP. * To indicate the difference enum has two additional values for max77836. * These values are just a register value bitwise OR with 0x8. */ enum max14577_muic_charger_type { MAX14577_CHARGER_TYPE_NONE = 0x0, MAX14577_CHARGER_TYPE_USB = 0x1, MAX14577_CHARGER_TYPE_DOWNSTREAM_PORT = 0x2, MAX14577_CHARGER_TYPE_DEDICATED_CHG = 0x3, MAX14577_CHARGER_TYPE_SPECIAL_500MA = 0x4, /* Special 1A or 2A charger */ MAX14577_CHARGER_TYPE_SPECIAL_1A = 0x5, /* max14577: reserved, used on max77836 */ MAX14577_CHARGER_TYPE_RESERVED = 0x6, /* max14577: dead-battery charing with maximum current 100mA */ MAX14577_CHARGER_TYPE_DEAD_BATTERY = 0x7, /* * max77836: special charger (bias on D+/D-), * matches register value of 0x6 */ MAX77836_CHARGER_TYPE_SPECIAL_BIAS = 0xe, /* max77836: reserved, register value 0x7 */ MAX77836_CHARGER_TYPE_RESERVED = 0xf, }; /* MAX14577 interrupts */ #define MAX14577_INT1_ADC_MASK BIT(0) #define MAX14577_INT1_ADCLOW_MASK BIT(1) #define MAX14577_INT1_ADCERR_MASK BIT(2) #define MAX77836_INT1_ADC1K_MASK BIT(3) #define MAX14577_INT2_CHGTYP_MASK BIT(0) #define MAX14577_INT2_CHGDETRUN_MASK BIT(1) #define MAX14577_INT2_DCDTMR_MASK BIT(2) #define MAX14577_INT2_DBCHG_MASK BIT(3) #define MAX14577_INT2_VBVOLT_MASK BIT(4) #define MAX77836_INT2_VIDRM_MASK BIT(5) #define MAX14577_INT3_EOC_MASK BIT(0) #define MAX14577_INT3_CGMBC_MASK BIT(1) #define MAX14577_INT3_OVP_MASK BIT(2) #define MAX14577_INT3_MBCCHGERR_MASK BIT(3) /* MAX14577 DEVICE ID register */ #define DEVID_VENDORID_SHIFT 0 #define DEVID_DEVICEID_SHIFT 3 #define DEVID_VENDORID_MASK (0x07 << DEVID_VENDORID_SHIFT) #define DEVID_DEVICEID_MASK (0x1f << DEVID_DEVICEID_SHIFT) /* MAX14577 STATUS1 register */ #define STATUS1_ADC_SHIFT 0 #define STATUS1_ADCLOW_SHIFT 5 #define STATUS1_ADCERR_SHIFT 6 #define MAX77836_STATUS1_ADC1K_SHIFT 7 #define STATUS1_ADC_MASK (0x1f << STATUS1_ADC_SHIFT) #define STATUS1_ADCLOW_MASK BIT(STATUS1_ADCLOW_SHIFT) #define STATUS1_ADCERR_MASK BIT(STATUS1_ADCERR_SHIFT) #define MAX77836_STATUS1_ADC1K_MASK BIT(MAX77836_STATUS1_ADC1K_SHIFT) /* MAX14577 STATUS2 register */ #define STATUS2_CHGTYP_SHIFT 0 #define STATUS2_CHGDETRUN_SHIFT 3 #define STATUS2_DCDTMR_SHIFT 4 #define MAX14577_STATUS2_DBCHG_SHIFT 5 #define MAX77836_STATUS2_DXOVP_SHIFT 5 #define STATUS2_VBVOLT_SHIFT 6 #define MAX77836_STATUS2_VIDRM_SHIFT 7 #define STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT) #define STATUS2_CHGDETRUN_MASK BIT(STATUS2_CHGDETRUN_SHIFT) #define STATUS2_DCDTMR_MASK BIT(STATUS2_DCDTMR_SHIFT) #define MAX14577_STATUS2_DBCHG_MASK BIT(MAX14577_STATUS2_DBCHG_SHIFT) #define MAX77836_STATUS2_DXOVP_MASK BIT(MAX77836_STATUS2_DXOVP_SHIFT) #define STATUS2_VBVOLT_MASK BIT(STATUS2_VBVOLT_SHIFT) #define MAX77836_STATUS2_VIDRM_MASK BIT(MAX77836_STATUS2_VIDRM_SHIFT) /* MAX14577 CONTROL1 register */ #define COMN1SW_SHIFT 0 #define COMP2SW_SHIFT 3 #define MICEN_SHIFT 6 #define IDBEN_SHIFT 7 #define COMN1SW_MASK (0x7 << COMN1SW_SHIFT) #define COMP2SW_MASK (0x7 << COMP2SW_SHIFT) #define MICEN_MASK BIT(MICEN_SHIFT) #define IDBEN_MASK BIT(IDBEN_SHIFT) #define CLEAR_IDBEN_MICEN_MASK (COMN1SW_MASK | COMP2SW_MASK) #define CTRL1_SW_USB ((1 << COMP2SW_SHIFT) \ | (1 << COMN1SW_SHIFT)) #define CTRL1_SW_AUDIO ((2 << COMP2SW_SHIFT) \ | (2 << COMN1SW_SHIFT)) #define CTRL1_SW_UART ((3 << COMP2SW_SHIFT) \ | (3 << COMN1SW_SHIFT)) #define CTRL1_SW_OPEN ((0 << COMP2SW_SHIFT) \ | (0 << COMN1SW_SHIFT)) /* MAX14577 CONTROL2 register */ #define CTRL2_LOWPWR_SHIFT (0) #define CTRL2_ADCEN_SHIFT (1) #define CTRL2_CPEN_SHIFT (2) #define CTRL2_SFOUTASRT_SHIFT (3) #define CTRL2_SFOUTORD_SHIFT (4) #define CTRL2_ACCDET_SHIFT (5) #define CTRL2_USBCPINT_SHIFT (6) #define CTRL2_RCPS_SHIFT (7) #define CTRL2_LOWPWR_MASK BIT(CTRL2_LOWPWR_SHIFT) #define CTRL2_ADCEN_MASK BIT(CTRL2_ADCEN_SHIFT) #define CTRL2_CPEN_MASK BIT(CTRL2_CPEN_SHIFT) #define CTRL2_SFOUTASRT_MASK BIT(CTRL2_SFOUTASRT_SHIFT) #define CTRL2_SFOUTORD_MASK BIT(CTRL2_SFOUTORD_SHIFT) #define CTRL2_ACCDET_MASK BIT(CTRL2_ACCDET_SHIFT) #define CTRL2_USBCPINT_MASK BIT(CTRL2_USBCPINT_SHIFT) #define CTRL2_RCPS_MASK BIT(CTRL2_RCPS_SHIFT) #define CTRL2_CPEN1_LOWPWR0 ((1 << CTRL2_CPEN_SHIFT) | \ (0 << CTRL2_LOWPWR_SHIFT)) #define CTRL2_CPEN0_LOWPWR1 ((0 << CTRL2_CPEN_SHIFT) | \ (1 << CTRL2_LOWPWR_SHIFT)) /* MAX14577 CONTROL3 register */ #define CTRL3_JIGSET_SHIFT 0 #define CTRL3_BOOTSET_SHIFT 2 #define CTRL3_ADCDBSET_SHIFT 4 #define CTRL3_WBTH_SHIFT 6 #define CTRL3_JIGSET_MASK (0x3 << CTRL3_JIGSET_SHIFT) #define CTRL3_BOOTSET_MASK (0x3 << CTRL3_BOOTSET_SHIFT) #define CTRL3_ADCDBSET_MASK (0x3 << CTRL3_ADCDBSET_SHIFT) #define CTRL3_WBTH_MASK (0x3 << CTRL3_WBTH_SHIFT) /* Slave addr = 0x4A: Charger */ enum max14577_charger_reg { MAX14577_CHG_REG_STATUS3 = 0x06, MAX14577_CHG_REG_CHG_CTRL1 = 0x0F, MAX14577_CHG_REG_CHG_CTRL2 = 0x10, MAX14577_CHG_REG_CHG_CTRL3 = 0x11, MAX14577_CHG_REG_CHG_CTRL4 = 0x12, MAX14577_CHG_REG_CHG_CTRL5 = 0x13, MAX14577_CHG_REG_CHG_CTRL6 = 0x14, MAX14577_CHG_REG_CHG_CTRL7 = 0x15, MAX14577_CHG_REG_END, }; /* MAX14577 STATUS3 register */ #define STATUS3_EOC_SHIFT 0 #define STATUS3_CGMBC_SHIFT 1 #define STATUS3_OVP_SHIFT 2 #define STATUS3_MBCCHGERR_SHIFT 3 #define STATUS3_EOC_MASK (0x1 << STATUS3_EOC_SHIFT) #define STATUS3_CGMBC_MASK (0x1 << STATUS3_CGMBC_SHIFT) #define STATUS3_OVP_MASK (0x1 << STATUS3_OVP_SHIFT) #define STATUS3_MBCCHGERR_MASK (0x1 << STATUS3_MBCCHGERR_SHIFT) /* MAX14577 CDETCTRL1 register */ #define CDETCTRL1_CHGDETEN_SHIFT 0 #define CDETCTRL1_CHGTYPMAN_SHIFT 1 #define CDETCTRL1_DCDEN_SHIFT 2 #define CDETCTRL1_DCD2SCT_SHIFT 3 #define MAX14577_CDETCTRL1_DCHKTM_SHIFT 4 #define MAX77836_CDETCTRL1_CDLY_SHIFT 4 #define MAX14577_CDETCTRL1_DBEXIT_SHIFT 5 #define MAX77836_CDETCTRL1_DCDCPL_SHIFT 5 #define CDETCTRL1_DBIDLE_SHIFT 6 #define CDETCTRL1_CDPDET_SHIFT 7 #define CDETCTRL1_CHGDETEN_MASK BIT(CDETCTRL1_CHGDETEN_SHIFT) #define CDETCTRL1_CHGTYPMAN_MASK BIT(CDETCTRL1_CHGTYPMAN_SHIFT) #define CDETCTRL1_DCDEN_MASK BIT(CDETCTRL1_DCDEN_SHIFT) #define CDETCTRL1_DCD2SCT_MASK BIT(CDETCTRL1_DCD2SCT_SHIFT) #define MAX14577_CDETCTRL1_DCHKTM_MASK BIT(MAX14577_CDETCTRL1_DCHKTM_SHIFT) #define MAX77836_CDETCTRL1_CDDLY_MASK BIT(MAX77836_CDETCTRL1_CDDLY_SHIFT) #define MAX14577_CDETCTRL1_DBEXIT_MASK BIT(MAX14577_CDETCTRL1_DBEXIT_SHIFT) #define MAX77836_CDETCTRL1_DCDCPL_MASK BIT(MAX77836_CDETCTRL1_DCDCPL_SHIFT) #define CDETCTRL1_DBIDLE_MASK BIT(CDETCTRL1_DBIDLE_SHIFT) #define CDETCTRL1_CDPDET_MASK BIT(CDETCTRL1_CDPDET_SHIFT) /* MAX14577 CHGCTRL1 register */ #define CHGCTRL1_TCHW_SHIFT 4 #define CHGCTRL1_TCHW_MASK (0x7 << CHGCTRL1_TCHW_SHIFT) /* MAX14577 CHGCTRL2 register */ #define CHGCTRL2_MBCHOSTEN_SHIFT 6 #define CHGCTRL2_MBCHOSTEN_MASK BIT(CHGCTRL2_MBCHOSTEN_SHIFT) #define CHGCTRL2_VCHGR_RC_SHIFT 7 #define CHGCTRL2_VCHGR_RC_MASK BIT(CHGCTRL2_VCHGR_RC_SHIFT) /* MAX14577 CHGCTRL3 register */ #define CHGCTRL3_MBCCVWRC_SHIFT 0 #define CHGCTRL3_MBCCVWRC_MASK (0xf << CHGCTRL3_MBCCVWRC_SHIFT) /* MAX14577 CHGCTRL4 register */ #define CHGCTRL4_MBCICHWRCH_SHIFT 0 #define CHGCTRL4_MBCICHWRCH_MASK (0xf << CHGCTRL4_MBCICHWRCH_SHIFT) #define CHGCTRL4_MBCICHWRCL_SHIFT 4 #define CHGCTRL4_MBCICHWRCL_MASK BIT(CHGCTRL4_MBCICHWRCL_SHIFT) /* MAX14577 CHGCTRL5 register */ #define CHGCTRL5_EOCS_SHIFT 0 #define CHGCTRL5_EOCS_MASK (0xf << CHGCTRL5_EOCS_SHIFT) /* MAX14577 CHGCTRL6 register */ #define CHGCTRL6_AUTOSTOP_SHIFT 5 #define CHGCTRL6_AUTOSTOP_MASK BIT(CHGCTRL6_AUTOSTOP_SHIFT) /* MAX14577 CHGCTRL7 register */ #define CHGCTRL7_OTPCGHCVS_SHIFT 0 #define CHGCTRL7_OTPCGHCVS_MASK (0x3 << CHGCTRL7_OTPCGHCVS_SHIFT) /* MAX14577 charger current limits (as in CHGCTRL4 register), uA */ #define MAX14577_CHARGER_CURRENT_LIMIT_MIN 90000U #define MAX14577_CHARGER_CURRENT_LIMIT_HIGH_START 200000U #define MAX14577_CHARGER_CURRENT_LIMIT_HIGH_STEP 50000U #define MAX14577_CHARGER_CURRENT_LIMIT_MAX 950000U /* MAX77836 charger current limits (as in CHGCTRL4 register), uA */ #define MAX77836_CHARGER_CURRENT_LIMIT_MIN 45000U #define MAX77836_CHARGER_CURRENT_LIMIT_HIGH_START 100000U #define MAX77836_CHARGER_CURRENT_LIMIT_HIGH_STEP 25000U #define MAX77836_CHARGER_CURRENT_LIMIT_MAX 475000U /* * MAX14577 charger End-Of-Charge current limits * (as in CHGCTRL5 register), uA */ #define MAX14577_CHARGER_EOC_CURRENT_LIMIT_MIN 50000U #define MAX14577_CHARGER_EOC_CURRENT_LIMIT_STEP 10000U #define MAX14577_CHARGER_EOC_CURRENT_LIMIT_MAX 200000U /* * MAX14577/MAX77836 Battery Constant Voltage * (as in CHGCTRL3 register), uV */ #define MAXIM_CHARGER_CONSTANT_VOLTAGE_MIN 4000000U #define MAXIM_CHARGER_CONSTANT_VOLTAGE_STEP 20000U #define MAXIM_CHARGER_CONSTANT_VOLTAGE_MAX 4350000U /* Default value for fast charge timer, in hours */ #define MAXIM_CHARGER_FAST_CHARGE_TIMER_DEFAULT 5 /* MAX14577 regulator SFOUT LDO voltage, fixed, uV */ #define MAX14577_REGULATOR_SAFEOUT_VOLTAGE 4900000 /* MAX77836 regulator LDOx voltage, uV */ #define MAX77836_REGULATOR_LDO_VOLTAGE_MIN 800000 #define MAX77836_REGULATOR_LDO_VOLTAGE_MAX 3950000 #define MAX77836_REGULATOR_LDO_VOLTAGE_STEP 50000 #define MAX77836_REGULATOR_LDO_VOLTAGE_STEPS_NUM 64 /* Slave addr = 0x46: PMIC */ enum max77836_pmic_reg { MAX77836_PMIC_REG_PMIC_ID = 0x20, MAX77836_PMIC_REG_PMIC_REV = 0x21, MAX77836_PMIC_REG_INTSRC = 0x22, MAX77836_PMIC_REG_INTSRC_MASK = 0x23, MAX77836_PMIC_REG_TOPSYS_INT = 0x24, MAX77836_PMIC_REG_TOPSYS_INT_MASK = 0x26, MAX77836_PMIC_REG_TOPSYS_STAT = 0x28, MAX77836_PMIC_REG_MRSTB_CNTL = 0x2A, MAX77836_PMIC_REG_LSCNFG = 0x2B, MAX77836_LDO_REG_CNFG1_LDO1 = 0x51, MAX77836_LDO_REG_CNFG2_LDO1 = 0x52, MAX77836_LDO_REG_CNFG1_LDO2 = 0x53, MAX77836_LDO_REG_CNFG2_LDO2 = 0x54, MAX77836_LDO_REG_CNFG_LDO_BIAS = 0x55, MAX77836_COMP_REG_COMP1 = 0x60, MAX77836_PMIC_REG_END, }; #define MAX77836_INTSRC_MASK_TOP_INT_SHIFT 1 #define MAX77836_INTSRC_MASK_MUIC_CHG_INT_SHIFT 3 #define MAX77836_INTSRC_MASK_TOP_INT_MASK BIT(MAX77836_INTSRC_MASK_TOP_INT_SHIFT) #define MAX77836_INTSRC_MASK_MUIC_CHG_INT_MASK BIT(MAX77836_INTSRC_MASK_MUIC_CHG_INT_SHIFT) /* MAX77836 PMIC interrupts */ #define MAX77836_TOPSYS_INT_T120C_SHIFT 0 #define MAX77836_TOPSYS_INT_T140C_SHIFT 1 #define MAX77836_TOPSYS_INT_T120C_MASK BIT(MAX77836_TOPSYS_INT_T120C_SHIFT) #define MAX77836_TOPSYS_INT_T140C_MASK BIT(MAX77836_TOPSYS_INT_T140C_SHIFT) /* LDO1/LDO2 CONFIG1 register */ #define MAX77836_CNFG1_LDO_PWRMD_SHIFT 6 #define MAX77836_CNFG1_LDO_TV_SHIFT 0 #define MAX77836_CNFG1_LDO_PWRMD_MASK (0x3 << MAX77836_CNFG1_LDO_PWRMD_SHIFT) #define MAX77836_CNFG1_LDO_TV_MASK (0x3f << MAX77836_CNFG1_LDO_TV_SHIFT) /* LDO1/LDO2 CONFIG2 register */ #define MAX77836_CNFG2_LDO_OVCLMPEN_SHIFT 7 #define MAX77836_CNFG2_LDO_ALPMEN_SHIFT 6 #define MAX77836_CNFG2_LDO_COMP_SHIFT 4 #define MAX77836_CNFG2_LDO_POK_SHIFT 3 #define MAX77836_CNFG2_LDO_ADE_SHIFT 1 #define MAX77836_CNFG2_LDO_SS_SHIFT 0 #define MAX77836_CNFG2_LDO_OVCLMPEN_MASK BIT(MAX77836_CNFG2_LDO_OVCLMPEN_SHIFT) #define MAX77836_CNFG2_LDO_ALPMEN_MASK BIT(MAX77836_CNFG2_LDO_ALPMEN_SHIFT) #define MAX77836_CNFG2_LDO_COMP_MASK (0x3 << MAX77836_CNFG2_LDO_COMP_SHIFT) #define MAX77836_CNFG2_LDO_POK_MASK BIT(MAX77836_CNFG2_LDO_POK_SHIFT) #define MAX77836_CNFG2_LDO_ADE_MASK BIT(MAX77836_CNFG2_LDO_ADE_SHIFT) #define MAX77836_CNFG2_LDO_SS_MASK BIT(MAX77836_CNFG2_LDO_SS_SHIFT) /* Slave addr = 0x6C: Fuel-Gauge/Battery */ enum max77836_fg_reg { MAX77836_FG_REG_VCELL_MSB = 0x02, MAX77836_FG_REG_VCELL_LSB = 0x03, MAX77836_FG_REG_SOC_MSB = 0x04, MAX77836_FG_REG_SOC_LSB = 0x05, MAX77836_FG_REG_MODE_H = 0x06, MAX77836_FG_REG_MODE_L = 0x07, MAX77836_FG_REG_VERSION_MSB = 0x08, MAX77836_FG_REG_VERSION_LSB = 0x09, MAX77836_FG_REG_HIBRT_H = 0x0A, MAX77836_FG_REG_HIBRT_L = 0x0B, MAX77836_FG_REG_CONFIG_H = 0x0C, MAX77836_FG_REG_CONFIG_L = 0x0D, MAX77836_FG_REG_VALRT_MIN = 0x14, MAX77836_FG_REG_VALRT_MAX = 0x15, MAX77836_FG_REG_CRATE_MSB = 0x16, MAX77836_FG_REG_CRATE_LSB = 0x17, MAX77836_FG_REG_VRESET = 0x18, MAX77836_FG_REG_FGID = 0x19, MAX77836_FG_REG_STATUS_H = 0x1A, MAX77836_FG_REG_STATUS_L = 0x1B, /* * TODO: TABLE registers * TODO: CMD register */ MAX77836_FG_REG_END, }; enum max14577_irq { /* INT1 */ MAX14577_IRQ_INT1_ADC, MAX14577_IRQ_INT1_ADCLOW, MAX14577_IRQ_INT1_ADCERR, MAX77836_IRQ_INT1_ADC1K, /* INT2 */ MAX14577_IRQ_INT2_CHGTYP, MAX14577_IRQ_INT2_CHGDETRUN, MAX14577_IRQ_INT2_DCDTMR, MAX14577_IRQ_INT2_DBCHG, MAX14577_IRQ_INT2_VBVOLT, MAX77836_IRQ_INT2_VIDRM, /* INT3 */ MAX14577_IRQ_INT3_EOC, MAX14577_IRQ_INT3_CGMBC, MAX14577_IRQ_INT3_OVP, MAX14577_IRQ_INT3_MBCCHGERR, /* TOPSYS_INT, only MAX77836 */ MAX77836_IRQ_TOPSYS_T140C, MAX77836_IRQ_TOPSYS_T120C, MAX14577_IRQ_NUM, }; struct max14577 { struct device *dev; struct i2c_client *i2c; /* Slave addr = 0x4A */ struct i2c_client *i2c_pmic; /* Slave addr = 0x46 */ enum maxim_device_type dev_type; struct regmap *regmap; /* For MUIC and Charger */ struct regmap *regmap_pmic; struct regmap_irq_chip_data *irq_data; /* For MUIC and Charger */ struct regmap_irq_chip_data *irq_data_pmic; int irq; }; /* MAX14577 shared regmap API function */ static inline int max14577_read_reg(struct regmap *map, u8 reg, u8 *dest) { unsigned int val; int ret; ret = regmap_read(map, reg, &val); *dest = val; return ret; } static inline int max14577_bulk_read(struct regmap *map, u8 reg, u8 *buf, int count) { return regmap_bulk_read(map, reg, buf, count); } static inline int max14577_write_reg(struct regmap *map, u8 reg, u8 value) { return regmap_write(map, reg, value); } static inline int max14577_bulk_write(struct regmap *map, u8 reg, u8 *buf, int count) { return regmap_bulk_write(map, reg, buf, count); } static inline int max14577_update_reg(struct regmap *map, u8 reg, u8 mask, u8 val) { return regmap_update_bits(map, reg, mask, val); } #endif /* __MAX14577_PRIVATE_H__ */ mfd/ahc1ec0.h 0000644 00000023166 14722070374 0006705 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ #ifndef __LINUX_MFD_AHC1EC0_H #define __LINUX_MFD_AHC1EC0_H #include <linux/device.h> #define EC_COMMAND_PORT 0x29A /* EC I/O command port */ #define EC_STATUS_PORT 0x299 /* EC I/O data port */ #define EC_RETRY_UDELAY 200 /* EC command retry delay in microseconds */ #define EC_MAX_TIMEOUT_COUNT 5000 /* EC command max retry count */ #define EC_COMMAND_BIT_OBF 0x01 /* Bit 0 is for OBF ready (Output buffer full) */ #define EC_COMMAND_BIT_IBF 0x02 /* Bit 1 is for IBF ready (Input buffer full) */ /* Analog to digital converter command */ #define EC_AD_INDEX_WRITE 0x15 /* Write ADC port number into index */ #define EC_AD_LSB_READ 0x16 /* Read ADC LSB value from ADC port */ #define EC_AD_MSB_READ 0x1F /* Read ADC MSB value from ADC port */ /* Voltage device ID */ #define EC_DID_SMBOEM0 0x28 /* SMBUS/I2C. Smbus channel 0 */ #define EC_DID_CMOSBAT 0x50 /* CMOS coin battery voltage */ #define EC_DID_CMOSBAT_X2 0x51 /* CMOS coin battery voltage*2 */ #define EC_DID_CMOSBAT_X10 0x52 /* CMOS coin battery voltage*10 */ #define EC_DID_5VS0 0x56 /* 5VS0 voltage */ #define EC_DID_5VS0_X2 0x57 /* 5VS0 voltage*2 */ #define EC_DID_5VS0_X10 0x58 /* 5VS0 voltage*10 */ #define EC_DID_5VS5 0x59 /* 5VS5 voltage */ #define EC_DID_5VS5_X2 0x5A /* 5VS5 voltage*2 */ #define EC_DID_5VS5_X10 0x5B /* 5VS5 voltage*10 */ #define EC_DID_12VS0 0x62 /* 12VS0 voltage */ #define EC_DID_12VS0_X2 0x63 /* 12VS0 voltage*2 */ #define EC_DID_12VS0_X10 0x64 /* 12VS0 voltage*10 */ #define EC_DID_VCOREA 0x65 /* CPU A core voltage */ #define EC_DID_VCOREA_X2 0x66 /* CPU A core voltage*2 */ #define EC_DID_VCOREA_X10 0x67 /* CPU A core voltage*10 */ #define EC_DID_VCOREB 0x68 /* CPU B core voltage */ #define EC_DID_VCOREB_X2 0x69 /* CPU B core voltage*2 */ #define EC_DID_VCOREB_X10 0x6A /* CPU B core voltage*10 */ #define EC_DID_DC 0x6B /* ADC. onboard voltage */ #define EC_DID_DC_X2 0x6C /* ADC. onboard voltage*2 */ #define EC_DID_DC_X10 0x6D /* ADC. onboard voltage*10 */ /* Current device ID */ #define EC_DID_CURRENT 0x74 /* ACPI commands */ #define EC_ACPI_RAM_READ 0x80 #define EC_ACPI_RAM_WRITE 0x81 /* * Dynamic control table commands * The table includes HW pin number, Device ID, and Pin polarity */ #define EC_TBL_WRITE_ITEM 0x20 #define EC_TBL_GET_PIN 0x21 #define EC_TBL_GET_DEVID 0x22 #define EC_MAX_TBL_NUM 32 /* LED Device ID table */ #define EC_DID_LED_RUN 0xE1 #define EC_DID_LED_ERR 0xE2 #define EC_DID_LED_SYS_RECOVERY 0xE3 #define EC_DID_LED_D105_G 0xE4 #define EC_DID_LED_D106_G 0xE5 #define EC_DID_LED_D107_G 0xE6 /* LED control HW RAM address 0xA0-0xAF */ #define EC_HWRAM_LED_BASE_ADDR 0xA0 #define EC_HWRAM_LED_PIN(N) (EC_HWRAM_LED_BASE_ADDR + (4 * (N))) // N:0-3 #define EC_HWRAM_LED_CTRL_HIBYTE(N) (EC_HWRAM_LED_BASE_ADDR + (4 * (N)) + 1) #define EC_HWRAM_LED_CTRL_LOBYTE(N) (EC_HWRAM_LED_BASE_ADDR + (4 * (N)) + 2) #define EC_HWRAM_LED_DEVICE_ID(N) (EC_HWRAM_LED_BASE_ADDR + (4 * (N)) + 3) /* LED control bit */ #define LED_CTRL_ENABLE_BIT() BIT(4) #define LED_CTRL_INTCTL_BIT() BIT(5) #define LED_CTRL_LEDBIT_MASK (0x03FF << 6) #define LED_CTRL_POLARITY_MASK (0x000F << 0) #define LED_CTRL_INTCTL_EXTERNAL 0 #define LED_CTRL_INTCTL_INTERNAL 1 #define LED_DISABLE 0x0 #define LED_ON 0x1 #define LED_FAST 0x3 #define LED_NORMAL 0x5 #define LED_SLOW 0x7 #define LED_MANUAL 0xF #define LED_CTRL_LEDBIT_DISABLE 0x0000 #define LED_CTRL_LEDBIT_ON 0x03FF #define LED_CTRL_LEDBIT_FAST 0x02AA #define LED_CTRL_LEDBIT_NORMAL 0x0333 #define LED_CTRL_LEDBIT_SLOW 0x03E0 /* Get the device name */ #define AMI_ADVANTECH_BOARD_ID_LENGTH 32 /* * Advantech Embedded Controller watchdog commands * EC can send multi-stage watchdog event. System can setup watchdog event * independently to make up event sequence. */ #define EC_COMMANS_PORT_IBF_MASK 0x02 #define EC_RESET_EVENT 0x04 #define EC_WDT_START 0x28 #define EC_WDT_STOP 0x29 #define EC_WDT_RESET 0x2A #define EC_WDT_BOOTTMEWDT_STOP 0x2B #define EC_HW_RAM 0x89 #define EC_EVENT_FLAG 0x57 #define EC_ENABLE_DELAY_H 0x58 #define EC_ENABLE_DELAY_L 0x59 #define EC_POWER_BTN_TIME_H 0x5A #define EC_POWER_BTN_TIME_L 0x5B #define EC_RESET_DELAY_TIME_H 0x5E #define EC_RESET_DELAY_TIME_L 0x5F #define EC_PIN_DELAY_TIME_H 0x60 #define EC_PIN_DELAY_TIME_L 0x61 #define EC_SCI_DELAY_TIME_H 0x62 #define EC_SCI_DELAY_TIME_L 0x63 /* EC ACPI commands */ #define EC_ACPI_DATA_READ 0x80 #define EC_ACPI_DATA_WRITE 0x81 /* Brightness ACPI Addr */ #define BRIGHTNESS_ACPI_ADDR 0x50 /* EC HW RAM commands */ #define EC_HW_EXTEND_RAM_READ 0x86 #define EC_HW_EXTEND_RAM_WRITE 0x87 #define EC_HW_RAM_READ 0x88 #define EC_HW_RAM_WRITE 0x89 /* EC Smbus commands */ #define EC_SMBUS_CHANNEL_SET 0x8A /* Set selector number (SMBUS channel) */ #define EC_SMBUS_ENABLE_I2C 0x8C /* Enable channel I2C */ #define EC_SMBUS_DISABLE_I2C 0x8D /* Disable channel I2C */ /* Smbus transmit protocol */ #define EC_SMBUS_PROTOCOL 0x00 /* SMBUS status */ #define EC_SMBUS_STATUS 0x01 /* SMBUS device slave address (bit0 must be 0) */ #define EC_SMBUS_SLV_ADDR 0x02 /* SMBUS device command */ #define EC_SMBUS_CMD 0x03 /* 0x04-0x24 Data In read process, return data are stored in this address */ #define EC_SMBUS_DATA 0x04 #define EC_SMBUS_DAT_OFFSET(n) (EC_SMBUS_DATA + (n)) /* SMBUS channel selector (0-4) */ #define EC_SMBUS_CHANNEL 0x2B /* EC SMBUS transmit Protocol code */ #define SMBUS_QUICK_WRITE 0x02 /* Write Quick Command */ #define SMBUS_QUICK_READ 0x03 /* Read Quick Command */ #define SMBUS_BYTE_SEND 0x04 /* Send Byte */ #define SMBUS_BYTE_RECEIVE 0x05 /* Receive Byte */ #define SMBUS_BYTE_WRITE 0x06 /* Write Byte */ #define SMBUS_BYTE_READ 0x07 /* Read Byte */ #define SMBUS_WORD_WRITE 0x08 /* Write Word */ #define SMBUS_WORD_READ 0x09 /* Read Word */ #define SMBUS_BLOCK_WRITE 0x0A /* Write Block */ #define SMBUS_BLOCK_READ 0x0B /* Read Block */ #define SMBUS_PROC_CALL 0x0C /* Process Call */ #define SMBUS_BLOCK_PROC_CALL 0x0D /* Write Block-Read Block Process Call */ #define SMBUS_I2C_READ_WRITE 0x0E /* I2C block Read-Write */ #define SMBUS_I2C_WRITE_READ 0x0F /* I2C block Write-Read */ /* GPIO control commands */ #define EC_GPIO_INDEX_WRITE 0x10 #define EC_GPIO_STATUS_READ 0x11 #define EC_GPIO_STATUS_WRITE 0x12 #define EC_GPIO_DIR_READ 0x1D #define EC_GPIO_DIR_WRITE 0x1E /* One Key Recovery commands */ #define EC_ONE_KEY_FLAG 0x9C /* ASG OEM commands */ #define EC_ASG_OEM 0xEA #define EC_ASG_OEM_READ 0x00 #define EC_ASG_OEM_WRITE 0x01 #define EC_OEM_POWER_STATUS_VIN1 0X10 #define EC_OEM_POWER_STATUS_VIN2 0X11 #define EC_OEM_POWER_STATUS_BAT1 0X12 #define EC_OEM_POWER_STATUS_BAT2 0X13 /* GPIO DEVICE ID */ #define EC_DID_ALTGPIO_0 0x10 /* 0x10 AltGpio0 User define gpio */ #define EC_DID_ALTGPIO_1 0x11 /* 0x11 AltGpio1 User define gpio */ #define EC_DID_ALTGPIO_2 0x12 /* 0x12 AltGpio2 User define gpio */ #define EC_DID_ALTGPIO_3 0x13 /* 0x13 AltGpio3 User define gpio */ #define EC_DID_ALTGPIO_4 0x14 /* 0x14 AltGpio4 User define gpio */ #define EC_DID_ALTGPIO_5 0x15 /* 0x15 AltGpio5 User define gpio */ #define EC_DID_ALTGPIO_6 0x16 /* 0x16 AltGpio6 User define gpio */ #define EC_DID_ALTGPIO_7 0x17 /* 0x17 AltGpio7 User define gpio */ /* Lmsensor Chip Register */ #define NSLM96163_CHANNEL 0x02 /* NS_LM96163 address 0x98 */ #define NSLM96163_ADDR 0x98 /* LM96163 index(0x00) Local Temperature (Signed MSB) */ #define NSLM96163_LOC_TEMP 0x00 /* HWMON registers */ #define INA266_REG_VOLTAGE 0x02 /* 1.25mV */ #define INA266_REG_POWER 0x03 /* 25mW */ #define INA266_REG_CURRENT 0x04 /* 1mA */ struct ec_hw_pin_table { unsigned int vbat[2]; unsigned int v5[2]; unsigned int v12[2]; unsigned int vcore[2]; unsigned int vdc[2]; unsigned int ec_current[2]; unsigned int power[2]; }; struct ec_dynamic_table { unsigned char device_id; unsigned char hw_pin_num; }; struct ec_smbuso_em0 { unsigned char hw_pin_num; }; struct pled_hw_pin_tbl { unsigned int pled[6]; }; struct adv_ec_platform_data { char *bios_product_name; int sub_dev_nb; u32 sub_dev_mask; struct mutex lock; struct device *dev; struct class *adv_ec_class; struct ec_dynamic_table *dym_tbl; }; int read_ad_value(struct adv_ec_platform_data *adv_ec_data, unsigned char hwpin, unsigned char multi); int read_acpi_value(struct adv_ec_platform_data *adv_ec_data, unsigned char addr, unsigned char *pvalue); int write_acpi_value(struct adv_ec_platform_data *adv_ec_data, unsigned char addr, unsigned char value); int read_hw_ram(struct adv_ec_platform_data *adv_ec_data, unsigned char addr, unsigned char *data); int write_hw_ram(struct adv_ec_platform_data *adv_ec_data, unsigned char addr, unsigned char data); int write_hwram_command(struct adv_ec_platform_data *adv_ec_data, unsigned char data); int read_gpio_status(struct adv_ec_platform_data *adv_ec_data, unsigned char PinNumber, unsigned char *pvalue); int write_gpio_status(struct adv_ec_platform_data *adv_ec_data, unsigned char PinNumber, unsigned char value); int read_gpio_dir(struct adv_ec_platform_data *adv_ec_data, unsigned char PinNumber, unsigned char *pvalue); int write_gpio_dir(struct adv_ec_platform_data *adv_ec_data, unsigned char PinNumber, unsigned char value); #endif /* __LINUX_MFD_AHC1EC0_H */ mfd/ucb1x00.h 0000644 00000014637 14722070374 0006666 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/include/mfd/ucb1x00.h * * Copyright (C) 2001 Russell King, All Rights Reserved. */ #ifndef UCB1200_H #define UCB1200_H #include <linux/device.h> #include <linux/mfd/mcp.h> #include <linux/gpio.h> #include <linux/mutex.h> #define UCB_IO_DATA 0x00 #define UCB_IO_DIR 0x01 #define UCB_IO_0 (1 << 0) #define UCB_IO_1 (1 << 1) #define UCB_IO_2 (1 << 2) #define UCB_IO_3 (1 << 3) #define UCB_IO_4 (1 << 4) #define UCB_IO_5 (1 << 5) #define UCB_IO_6 (1 << 6) #define UCB_IO_7 (1 << 7) #define UCB_IO_8 (1 << 8) #define UCB_IO_9 (1 << 9) #define UCB_IE_RIS 0x02 #define UCB_IE_FAL 0x03 #define UCB_IE_STATUS 0x04 #define UCB_IE_CLEAR 0x04 #define UCB_IE_ADC (1 << 11) #define UCB_IE_TSPX (1 << 12) #define UCB_IE_TSMX (1 << 13) #define UCB_IE_TCLIP (1 << 14) #define UCB_IE_ACLIP (1 << 15) #define UCB_IRQ_TSPX 12 #define UCB_TC_A 0x05 #define UCB_TC_A_LOOP (1 << 7) /* UCB1200 */ #define UCB_TC_A_AMPL (1 << 7) /* UCB1300 */ #define UCB_TC_B 0x06 #define UCB_TC_B_VOICE_ENA (1 << 3) #define UCB_TC_B_CLIP (1 << 4) #define UCB_TC_B_ATT (1 << 6) #define UCB_TC_B_SIDE_ENA (1 << 11) #define UCB_TC_B_MUTE (1 << 13) #define UCB_TC_B_IN_ENA (1 << 14) #define UCB_TC_B_OUT_ENA (1 << 15) #define UCB_AC_A 0x07 #define UCB_AC_B 0x08 #define UCB_AC_B_LOOP (1 << 8) #define UCB_AC_B_MUTE (1 << 13) #define UCB_AC_B_IN_ENA (1 << 14) #define UCB_AC_B_OUT_ENA (1 << 15) #define UCB_TS_CR 0x09 #define UCB_TS_CR_TSMX_POW (1 << 0) #define UCB_TS_CR_TSPX_POW (1 << 1) #define UCB_TS_CR_TSMY_POW (1 << 2) #define UCB_TS_CR_TSPY_POW (1 << 3) #define UCB_TS_CR_TSMX_GND (1 << 4) #define UCB_TS_CR_TSPX_GND (1 << 5) #define UCB_TS_CR_TSMY_GND (1 << 6) #define UCB_TS_CR_TSPY_GND (1 << 7) #define UCB_TS_CR_MODE_INT (0 << 8) #define UCB_TS_CR_MODE_PRES (1 << 8) #define UCB_TS_CR_MODE_POS (2 << 8) #define UCB_TS_CR_BIAS_ENA (1 << 11) #define UCB_TS_CR_TSPX_LOW (1 << 12) #define UCB_TS_CR_TSMX_LOW (1 << 13) #define UCB_ADC_CR 0x0a #define UCB_ADC_SYNC_ENA (1 << 0) #define UCB_ADC_VREFBYP_CON (1 << 1) #define UCB_ADC_INP_TSPX (0 << 2) #define UCB_ADC_INP_TSMX (1 << 2) #define UCB_ADC_INP_TSPY (2 << 2) #define UCB_ADC_INP_TSMY (3 << 2) #define UCB_ADC_INP_AD0 (4 << 2) #define UCB_ADC_INP_AD1 (5 << 2) #define UCB_ADC_INP_AD2 (6 << 2) #define UCB_ADC_INP_AD3 (7 << 2) #define UCB_ADC_EXT_REF (1 << 5) #define UCB_ADC_START (1 << 7) #define UCB_ADC_ENA (1 << 15) #define UCB_ADC_DATA 0x0b #define UCB_ADC_DAT_VAL (1 << 15) #define UCB_ADC_DAT(x) (((x) & 0x7fe0) >> 5) #define UCB_ID 0x0c #define UCB_ID_1200 0x1004 #define UCB_ID_1300 0x1005 #define UCB_ID_TC35143 0x9712 #define UCB_MODE 0x0d #define UCB_MODE_DYN_VFLAG_ENA (1 << 12) #define UCB_MODE_AUD_OFF_CAN (1 << 13) enum ucb1x00_reset { UCB_RST_PROBE, UCB_RST_RESUME, UCB_RST_SUSPEND, UCB_RST_REMOVE, UCB_RST_PROBE_FAIL, }; struct ucb1x00_plat_data { void (*reset)(enum ucb1x00_reset); unsigned irq_base; int gpio_base; unsigned can_wakeup; }; struct ucb1x00 { raw_spinlock_t irq_lock; struct mcp *mcp; unsigned int irq; int irq_base; struct mutex adc_mutex; spinlock_t io_lock; u16 id; u16 io_dir; u16 io_out; u16 adc_cr; u16 irq_fal_enbl; u16 irq_ris_enbl; u16 irq_mask; u16 irq_wake; struct device dev; struct list_head node; struct list_head devs; struct gpio_chip gpio; }; struct ucb1x00_driver; struct ucb1x00_dev { struct list_head dev_node; struct list_head drv_node; struct ucb1x00 *ucb; struct ucb1x00_driver *drv; void *priv; }; struct ucb1x00_driver { struct list_head node; struct list_head devs; int (*add)(struct ucb1x00_dev *dev); void (*remove)(struct ucb1x00_dev *dev); int (*suspend)(struct ucb1x00_dev *dev); int (*resume)(struct ucb1x00_dev *dev); }; #define classdev_to_ucb1x00(cd) container_of(cd, struct ucb1x00, dev) int ucb1x00_register_driver(struct ucb1x00_driver *); void ucb1x00_unregister_driver(struct ucb1x00_driver *); /** * ucb1x00_clkrate - return the UCB1x00 SIB clock rate * @ucb: UCB1x00 structure describing chip * * Return the SIB clock rate in Hz. */ static inline unsigned int ucb1x00_clkrate(struct ucb1x00 *ucb) { return mcp_get_sclk_rate(ucb->mcp); } /** * ucb1x00_enable - enable the UCB1x00 SIB clock * @ucb: UCB1x00 structure describing chip * * Enable the SIB clock. This can be called multiple times. */ static inline void ucb1x00_enable(struct ucb1x00 *ucb) { mcp_enable(ucb->mcp); } /** * ucb1x00_disable - disable the UCB1x00 SIB clock * @ucb: UCB1x00 structure describing chip * * Disable the SIB clock. The SIB clock will only be disabled * when the number of ucb1x00_enable calls match the number of * ucb1x00_disable calls. */ static inline void ucb1x00_disable(struct ucb1x00 *ucb) { mcp_disable(ucb->mcp); } /** * ucb1x00_reg_write - write a UCB1x00 register * @ucb: UCB1x00 structure describing chip * @reg: UCB1x00 4-bit register index to write * @val: UCB1x00 16-bit value to write * * Write the UCB1x00 register @reg with value @val. The SIB * clock must be running for this function to return. */ static inline void ucb1x00_reg_write(struct ucb1x00 *ucb, unsigned int reg, unsigned int val) { mcp_reg_write(ucb->mcp, reg, val); } /** * ucb1x00_reg_read - read a UCB1x00 register * @ucb: UCB1x00 structure describing chip * @reg: UCB1x00 4-bit register index to write * * Read the UCB1x00 register @reg and return its value. The SIB * clock must be running for this function to return. */ static inline unsigned int ucb1x00_reg_read(struct ucb1x00 *ucb, unsigned int reg) { return mcp_reg_read(ucb->mcp, reg); } /** * ucb1x00_set_audio_divisor - * @ucb: UCB1x00 structure describing chip * @div: SIB clock divisor */ static inline void ucb1x00_set_audio_divisor(struct ucb1x00 *ucb, unsigned int div) { mcp_set_audio_divisor(ucb->mcp, div); } /** * ucb1x00_set_telecom_divisor - * @ucb: UCB1x00 structure describing chip * @div: SIB clock divisor */ static inline void ucb1x00_set_telecom_divisor(struct ucb1x00 *ucb, unsigned int div) { mcp_set_telecom_divisor(ucb->mcp, div); } void ucb1x00_io_set_dir(struct ucb1x00 *ucb, unsigned int, unsigned int); void ucb1x00_io_write(struct ucb1x00 *ucb, unsigned int, unsigned int); unsigned int ucb1x00_io_read(struct ucb1x00 *ucb); #define UCB_NOSYNC (0) #define UCB_SYNC (1) unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync); void ucb1x00_adc_enable(struct ucb1x00 *ucb); void ucb1x00_adc_disable(struct ucb1x00 *ucb); #endif mfd/max77650.h 0000644 00000003537 14722070374 0006677 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2018 BayLibre SAS * Author: Bartosz Golaszewski <bgolaszewski@baylibre.com> * * Common definitions for MAXIM 77650/77651 charger/power-supply. */ #ifndef MAX77650_H #define MAX77650_H #include <linux/bits.h> #define MAX77650_REG_INT_GLBL 0x00 #define MAX77650_REG_INT_CHG 0x01 #define MAX77650_REG_STAT_CHG_A 0x02 #define MAX77650_REG_STAT_CHG_B 0x03 #define MAX77650_REG_ERCFLAG 0x04 #define MAX77650_REG_STAT_GLBL 0x05 #define MAX77650_REG_INTM_GLBL 0x06 #define MAX77650_REG_INTM_CHG 0x07 #define MAX77650_REG_CNFG_GLBL 0x10 #define MAX77650_REG_CID 0x11 #define MAX77650_REG_CNFG_GPIO 0x12 #define MAX77650_REG_CNFG_CHG_A 0x18 #define MAX77650_REG_CNFG_CHG_B 0x19 #define MAX77650_REG_CNFG_CHG_C 0x1a #define MAX77650_REG_CNFG_CHG_D 0x1b #define MAX77650_REG_CNFG_CHG_E 0x1c #define MAX77650_REG_CNFG_CHG_F 0x1d #define MAX77650_REG_CNFG_CHG_G 0x1e #define MAX77650_REG_CNFG_CHG_H 0x1f #define MAX77650_REG_CNFG_CHG_I 0x20 #define MAX77650_REG_CNFG_SBB_TOP 0x28 #define MAX77650_REG_CNFG_SBB0_A 0x29 #define MAX77650_REG_CNFG_SBB0_B 0x2a #define MAX77650_REG_CNFG_SBB1_A 0x2b #define MAX77650_REG_CNFG_SBB1_B 0x2c #define MAX77650_REG_CNFG_SBB2_A 0x2d #define MAX77650_REG_CNFG_SBB2_B 0x2e #define MAX77650_REG_CNFG_LDO_A 0x38 #define MAX77650_REG_CNFG_LDO_B 0x39 #define MAX77650_REG_CNFG_LED0_A 0x40 #define MAX77650_REG_CNFG_LED1_A 0x41 #define MAX77650_REG_CNFG_LED2_A 0x42 #define MAX77650_REG_CNFG_LED0_B 0x43 #define MAX77650_REG_CNFG_LED1_B 0x44 #define MAX77650_REG_CNFG_LED2_B 0x45 #define MAX77650_REG_CNFG_LED_TOP 0x46 #define MAX77650_CID_MASK GENMASK(3, 0) #define MAX77650_CID_BITS(_reg) (_reg & MAX77650_CID_MASK) #define MAX77650_CID_77650A 0x03 #define MAX77650_CID_77650C 0x0a #define MAX77650_CID_77651A 0x06 #define MAX77650_CID_77651B 0x08 #endif /* MAX77650_H */ mfd/arizona/pdata.h 0000644 00000013223 14722070374 0010226 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Platform data for Arizona devices * * Copyright 2012 Wolfson Microelectronics. PLC. */ #ifndef _ARIZONA_PDATA_H #define _ARIZONA_PDATA_H #include <dt-bindings/mfd/arizona.h> #include <linux/regulator/arizona-ldo1.h> #include <linux/regulator/arizona-micsupp.h> #define ARIZONA_GPN_DIR_MASK 0x8000 /* GPN_DIR */ #define ARIZONA_GPN_DIR_SHIFT 15 /* GPN_DIR */ #define ARIZONA_GPN_DIR_WIDTH 1 /* GPN_DIR */ #define ARIZONA_GPN_PU_MASK 0x4000 /* GPN_PU */ #define ARIZONA_GPN_PU_SHIFT 14 /* GPN_PU */ #define ARIZONA_GPN_PU_WIDTH 1 /* GPN_PU */ #define ARIZONA_GPN_PD_MASK 0x2000 /* GPN_PD */ #define ARIZONA_GPN_PD_SHIFT 13 /* GPN_PD */ #define ARIZONA_GPN_PD_WIDTH 1 /* GPN_PD */ #define ARIZONA_GPN_LVL_MASK 0x0800 /* GPN_LVL */ #define ARIZONA_GPN_LVL_SHIFT 11 /* GPN_LVL */ #define ARIZONA_GPN_LVL_WIDTH 1 /* GPN_LVL */ #define ARIZONA_GPN_POL_MASK 0x0400 /* GPN_POL */ #define ARIZONA_GPN_POL_SHIFT 10 /* GPN_POL */ #define ARIZONA_GPN_POL_WIDTH 1 /* GPN_POL */ #define ARIZONA_GPN_OP_CFG_MASK 0x0200 /* GPN_OP_CFG */ #define ARIZONA_GPN_OP_CFG_SHIFT 9 /* GPN_OP_CFG */ #define ARIZONA_GPN_OP_CFG_WIDTH 1 /* GPN_OP_CFG */ #define ARIZONA_GPN_DB_MASK 0x0100 /* GPN_DB */ #define ARIZONA_GPN_DB_SHIFT 8 /* GPN_DB */ #define ARIZONA_GPN_DB_WIDTH 1 /* GPN_DB */ #define ARIZONA_GPN_FN_MASK 0x007F /* GPN_FN - [6:0] */ #define ARIZONA_GPN_FN_SHIFT 0 /* GPN_FN - [6:0] */ #define ARIZONA_GPN_FN_WIDTH 7 /* GPN_FN - [6:0] */ #define ARIZONA_MAX_GPIO 5 #define ARIZONA_MAX_INPUT 4 #define ARIZONA_MAX_MICBIAS 3 #define ARIZONA_MAX_OUTPUT 6 #define ARIZONA_MAX_AIF 3 #define ARIZONA_HAP_ACT_ERM 0 #define ARIZONA_HAP_ACT_LRA 2 #define ARIZONA_MAX_PDM_SPK 2 struct regulator_init_data; struct gpio_desc; struct arizona_micbias { int mV; /** Regulated voltage */ unsigned int ext_cap:1; /** External capacitor fitted */ unsigned int discharge:1; /** Actively discharge */ unsigned int soft_start:1; /** Disable aggressive startup ramp rate */ unsigned int bypass:1; /** Use bypass mode */ }; struct arizona_micd_config { unsigned int src; unsigned int bias; bool gpio; }; struct arizona_micd_range { int max; /** Ohms */ int key; /** Key to report to input layer */ }; struct arizona_pdata { struct gpio_desc *reset; /** GPIO controlling /RESET, if any */ /** Regulator configuration for MICVDD */ struct arizona_micsupp_pdata micvdd; /** Regulator configuration for LDO1 */ struct arizona_ldo1_pdata ldo1; /** If a direct 32kHz clock is provided on an MCLK specify it here */ int clk32k_src; /** Mode for primary IRQ (defaults to active low) */ unsigned int irq_flags; /* Base GPIO */ int gpio_base; /** Pin state for GPIO pins */ unsigned int gpio_defaults[ARIZONA_MAX_GPIO]; /** * Maximum number of channels clocks will be generated for, * useful for systems where and I2S bus with multiple data * lines is mastered. */ unsigned int max_channels_clocked[ARIZONA_MAX_AIF]; /** GPIO5 is used for jack detection */ bool jd_gpio5; /** Internal pull on GPIO5 is disabled when used for jack detection */ bool jd_gpio5_nopull; /** set to true if jackdet contact opens on insert */ bool jd_invert; /** Use the headphone detect circuit to identify the accessory */ bool hpdet_acc_id; /** Check for line output with HPDET method */ bool hpdet_acc_id_line; /** GPIO used for mic isolation with HPDET */ int hpdet_id_gpio; /** Channel to use for headphone detection */ unsigned int hpdet_channel; /** Use software comparison to determine mic presence */ bool micd_software_compare; /** Extra debounce timeout used during initial mic detection (ms) */ unsigned int micd_detect_debounce; /** GPIO for mic detection polarity */ int micd_pol_gpio; /** Mic detect ramp rate */ unsigned int micd_bias_start_time; /** Mic detect sample rate */ unsigned int micd_rate; /** Mic detect debounce level */ unsigned int micd_dbtime; /** Mic detect timeout (ms) */ unsigned int micd_timeout; /** Force MICBIAS on for mic detect */ bool micd_force_micbias; /** Mic detect level parameters */ const struct arizona_micd_range *micd_ranges; int num_micd_ranges; /** Headset polarity configurations */ struct arizona_micd_config *micd_configs; int num_micd_configs; /** Reference voltage for DMIC inputs */ int dmic_ref[ARIZONA_MAX_INPUT]; /** MICBIAS configurations */ struct arizona_micbias micbias[ARIZONA_MAX_MICBIAS]; /** * Mode of input structures * One of the ARIZONA_INMODE_xxx values * wm5102/wm5110/wm8280/wm8997: [0]=IN1 [1]=IN2 [2]=IN3 [3]=IN4 * wm8998: [0]=IN1A [1]=IN2A [2]=IN1B [3]=IN2B */ int inmode[ARIZONA_MAX_INPUT]; /** Mode for outputs */ int out_mono[ARIZONA_MAX_OUTPUT]; /** Limit output volumes */ unsigned int out_vol_limit[2 * ARIZONA_MAX_OUTPUT]; /** PDM speaker mute setting */ unsigned int spk_mute[ARIZONA_MAX_PDM_SPK]; /** PDM speaker format */ unsigned int spk_fmt[ARIZONA_MAX_PDM_SPK]; /** Haptic actuator type */ unsigned int hap_act; /** GPIO for primary IRQ (used for edge triggered emulation) */ int irq_gpio; /** General purpose switch control */ unsigned int gpsw; }; #endif mfd/arizona/registers.h 0000644 00001672136 14722070374 0011163 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * ARIZONA register definitions * * Copyright 2012 Wolfson Microelectronics plc * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> */ #ifndef _ARIZONA_REGISTERS_H #define _ARIZONA_REGISTERS_H /* * Register values. */ #define ARIZONA_SOFTWARE_RESET 0x00 #define ARIZONA_DEVICE_REVISION 0x01 #define ARIZONA_CTRL_IF_SPI_CFG_1 0x08 #define ARIZONA_CTRL_IF_I2C1_CFG_1 0x09 #define ARIZONA_CTRL_IF_I2C2_CFG_1 0x0A #define ARIZONA_CTRL_IF_I2C1_CFG_2 0x0B #define ARIZONA_CTRL_IF_I2C2_CFG_2 0x0C #define ARIZONA_CTRL_IF_STATUS_1 0x0D #define ARIZONA_WRITE_SEQUENCER_CTRL_0 0x16 #define ARIZONA_WRITE_SEQUENCER_CTRL_1 0x17 #define ARIZONA_WRITE_SEQUENCER_CTRL_2 0x18 #define ARIZONA_WRITE_SEQUENCER_CTRL_3 0x19 #define ARIZONA_WRITE_SEQUENCER_PROM 0x1A #define ARIZONA_TONE_GENERATOR_1 0x20 #define ARIZONA_TONE_GENERATOR_2 0x21 #define ARIZONA_TONE_GENERATOR_3 0x22 #define ARIZONA_TONE_GENERATOR_4 0x23 #define ARIZONA_TONE_GENERATOR_5 0x24 #define ARIZONA_PWM_DRIVE_1 0x30 #define ARIZONA_PWM_DRIVE_2 0x31 #define ARIZONA_PWM_DRIVE_3 0x32 #define ARIZONA_WAKE_CONTROL 0x40 #define ARIZONA_SEQUENCE_CONTROL 0x41 #define ARIZONA_SPARE_TRIGGERS 0x42 #define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_1 0x61 #define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_2 0x62 #define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_3 0x63 #define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_4 0x64 #define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_1 0x66 #define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_2 0x67 #define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_3 0x68 #define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_4 0x69 #define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_5 0x6A #define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_6 0x6B #define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_7 0x6C #define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_8 0x6D #define ARIZONA_COMFORT_NOISE_GENERATOR 0x70 #define ARIZONA_HAPTICS_CONTROL_1 0x90 #define ARIZONA_HAPTICS_CONTROL_2 0x91 #define ARIZONA_HAPTICS_PHASE_1_INTENSITY 0x92 #define ARIZONA_HAPTICS_PHASE_1_DURATION 0x93 #define ARIZONA_HAPTICS_PHASE_2_INTENSITY 0x94 #define ARIZONA_HAPTICS_PHASE_2_DURATION 0x95 #define ARIZONA_HAPTICS_PHASE_3_INTENSITY 0x96 #define ARIZONA_HAPTICS_PHASE_3_DURATION 0x97 #define ARIZONA_HAPTICS_STATUS 0x98 #define ARIZONA_CLOCK_32K_1 0x100 #define ARIZONA_SYSTEM_CLOCK_1 0x101 #define ARIZONA_SAMPLE_RATE_1 0x102 #define ARIZONA_SAMPLE_RATE_2 0x103 #define ARIZONA_SAMPLE_RATE_3 0x104 #define ARIZONA_SAMPLE_RATE_1_STATUS 0x10A #define ARIZONA_SAMPLE_RATE_2_STATUS 0x10B #define ARIZONA_SAMPLE_RATE_3_STATUS 0x10C #define ARIZONA_ASYNC_CLOCK_1 0x112 #define ARIZONA_ASYNC_SAMPLE_RATE_1 0x113 #define ARIZONA_ASYNC_SAMPLE_RATE_2 0x114 #define ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS 0x11B #define ARIZONA_ASYNC_SAMPLE_RATE_2_STATUS 0x11C #define ARIZONA_OUTPUT_SYSTEM_CLOCK 0x149 #define ARIZONA_OUTPUT_ASYNC_CLOCK 0x14A #define ARIZONA_RATE_ESTIMATOR_1 0x152 #define ARIZONA_RATE_ESTIMATOR_2 0x153 #define ARIZONA_RATE_ESTIMATOR_3 0x154 #define ARIZONA_RATE_ESTIMATOR_4 0x155 #define ARIZONA_RATE_ESTIMATOR_5 0x156 #define ARIZONA_DYNAMIC_FREQUENCY_SCALING_1 0x161 #define ARIZONA_FLL1_CONTROL_1 0x171 #define ARIZONA_FLL1_CONTROL_2 0x172 #define ARIZONA_FLL1_CONTROL_3 0x173 #define ARIZONA_FLL1_CONTROL_4 0x174 #define ARIZONA_FLL1_CONTROL_5 0x175 #define ARIZONA_FLL1_CONTROL_6 0x176 #define ARIZONA_FLL1_LOOP_FILTER_TEST_1 0x177 #define ARIZONA_FLL1_NCO_TEST_0 0x178 #define ARIZONA_FLL1_CONTROL_7 0x179 #define ARIZONA_FLL1_SYNCHRONISER_1 0x181 #define ARIZONA_FLL1_SYNCHRONISER_2 0x182 #define ARIZONA_FLL1_SYNCHRONISER_3 0x183 #define ARIZONA_FLL1_SYNCHRONISER_4 0x184 #define ARIZONA_FLL1_SYNCHRONISER_5 0x185 #define ARIZONA_FLL1_SYNCHRONISER_6 0x186 #define ARIZONA_FLL1_SYNCHRONISER_7 0x187 #define ARIZONA_FLL1_SPREAD_SPECTRUM 0x189 #define ARIZONA_FLL1_GPIO_CLOCK 0x18A #define ARIZONA_FLL2_CONTROL_1 0x191 #define ARIZONA_FLL2_CONTROL_2 0x192 #define ARIZONA_FLL2_CONTROL_3 0x193 #define ARIZONA_FLL2_CONTROL_4 0x194 #define ARIZONA_FLL2_CONTRO