diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index ac413543..f9d79388 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -5,6 +5,7 @@ config ARM select HAVE_DMA_API_DEBUG select HAVE_IDE if PCI || ISA || PCMCIA select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7) + select HAVE_DMA_ATTRS select HAVE_MEMBLOCK select RTC_LIB select SYS_SUPPORTS_APM_EMULATION @@ -46,6 +47,14 @@ config ARM config ARM_HAS_SG_CHAIN bool +config NEED_SG_DMA_LENGTH + bool + +config ARM_DMA_USE_IOMMU + select NEED_SG_DMA_LENGTH + select ARM_HAS_SG_CHAIN + bool + config HAVE_PWM bool @@ -1869,9 +1878,6 @@ config ARCH_MEMORY_PROBE config ARCH_MEMORY_REMOVE def_bool n -config ARCH_POPULATES_NODE_MAP - def_bool n - config ENABLE_DMM def_bool n diff --git a/arch/arm/configs/dlxp_ul_defconfig b/arch/arm/configs/dlxp_ul_defconfig index 67b2e374..c5525d58 100644 --- a/arch/arm/configs/dlxp_ul_defconfig +++ b/arch/arm/configs/dlxp_ul_defconfig @@ -113,7 +113,7 @@ CONFIG_RD_GZIP=y # CONFIG_RD_LZMA is not set # CONFIG_RD_XZ is not set # CONFIG_RD_LZO is not set -CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=n CONFIG_SYSCTL=y CONFIG_ANON_INODES=y CONFIG_PANIC_TIMEOUT=5 @@ -163,6 +163,7 @@ CONFIG_HAVE_OPROFILE=y # CONFIG_JUMP_LABEL is not set CONFIG_HAVE_KPROBES=y CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_DMA_ATTRS=y CONFIG_HAVE_DMA_CONTIGUOUS=y CONFIG_USE_GENERIC_SMP_HELPERS=y CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y @@ -569,6 +570,7 @@ CONFIG_MACH_DLXP_U=y CONFIG_SUPPORT_USB_SPEAKER=y CONFIG_SENSE_4_PLUS=y CONFIG_MSM_NONSMD_PACKET_FILTER=y +# CONFIG_MONITOR_STREAMING_PORT_SOCKET is not set # CONFIG_IO_FOOTPRINT is not set CONFIG_BT_WBS_BRCM=y CONFIG_HTC_POWEROFF_MODEM_IN_OFFMODE_CHARGING=y @@ -703,7 +705,6 @@ CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 CONFIG_CLEANCACHE=y # CONFIG_ARCH_MEMORY_PROBE is not set # CONFIG_ARCH_MEMORY_REMOVE is not set -# CONFIG_ARCH_POPULATES_NODE_MAP is not set # CONFIG_ENABLE_DMM is not set # CONFIG_FIX_MOVABLE_ZONE is not set CONFIG_DONT_MAP_HOLE_AFTER_MEMBANK0=y @@ -1568,6 +1569,7 @@ CONFIG_USB_NET_ZAURUS=y # CONFIG_USB_SIERRA_NET is not set # CONFIG_USB_VL600 is not set CONFIG_MSM_RMNET_USB=y +# CONFIG_RIL_PCN001_HTC_QUEUE_URB_TO_DEFERRED_ANCHOR is not set CONFIG_WLAN=y # CONFIG_USB_ZD1201 is not set # CONFIG_USB_NET_RNDIS_WLAN is not set @@ -1724,6 +1726,7 @@ CONFIG_INPUT_TOUCHSCREEN=y # CONFIG_TOUCHSCREEN_MK712 is not set # CONFIG_TOUCHSCREEN_PENMOUNT is not set # CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI is not set +CONFIG_TOUCHSCREEN_SYNAPTICS_SWEEP2WAKE=y # CONFIG_TOUCHSCREEN_SYNAPTICS_RMI4_I2C is not set # CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set # CONFIG_TOUCHSCREEN_TOUCHWIN is not set @@ -1800,6 +1803,7 @@ CONFIG_UNIX98_PTYS=y # CONFIG_TRACE_SINK is not set # CONFIG_DEVMEM is not set # CONFIG_DEVKMEM is not set +CONFIG_FRANDOM=y # # Serial drivers @@ -2529,7 +2533,7 @@ CONFIG_MSM_KGSL_PAGE_TABLE_SIZE=0xFFF0000 CONFIG_MSM_KGSL_PAGE_TABLE_COUNT=24 CONFIG_MSM_KGSL_MMU_PAGE_FAULT=y # CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES is not set -CONFIG_MSM_KGSL_KILL_HANG_PROCESS=y +# CONFIG_MSM_KGSL_KILL_HANG_PROCESS is not set # CONFIG_MSM_KGSL_GPU_USAGE is not set CONFIG_MSM_KGSL_SIMPLE_GOV=y # CONFIG_VGASTATE is not set @@ -2727,7 +2731,7 @@ CONFIG_FB_MSM_DEFAULT_DEPTH_RGBA8888=y # CONFIG_MSM_ALT_DSI_ESCAPE_CLOCK is not set # CONFIG_FB_MSM_ESD_WORKAROUND is not set CONFIG_FB_MSM_CABC_LEVEL_CONTROL=y -CONFIG_FB_MSM_UNDERFLOW_WORKAROUND=y +# CONFIG_FB_MSM_UNDERFLOW_WORKAROUND is not set # CONFIG_EXYNOS_VIDEO is not set CONFIG_BACKLIGHT_LCD_SUPPORT=y CONFIG_LCD_CLASS_DEVICE=m @@ -3347,6 +3351,7 @@ CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES=y # CONFIG_ANDROID_INTF_ALARM_DEV is not set # CONFIG_PHONE is not set # CONFIG_USB_WPAN_HCD is not set +# CONFIG_CPUPOWER is not set # # Qualcomm MSM specific device drivers @@ -3440,6 +3445,8 @@ CONFIG_VFAT_FS=y CONFIG_FAT_DEFAULT_CODEPAGE=437 CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" # CONFIG_NTFS_FS is not set +CONFIG_EXFAT_DEFAULT_CODEPAGE=437 +CONFIG_EXFAT_DEFAULT_IOCHARSET="utf8" # # Pseudo filesystems @@ -3453,6 +3460,7 @@ CONFIG_TMPFS=y # CONFIG_TMPFS_XATTR is not set # CONFIG_HUGETLB_PAGE is not set # CONFIG_CONFIGFS_FS is not set +# CONFIG_RESTRICT_ROOTFS_SLAVE is not set CONFIG_MISC_FILESYSTEMS=y # CONFIG_ADFS_FS is not set # CONFIG_AFFS_FS is not set @@ -3525,7 +3533,7 @@ CONFIG_NLS_ISO8859_1=y CONFIG_NLS_UTF8=y CONFIG_EXFAT_FS=m CONFIG_EXFAT_PATH="texfat_dlxpul" -CONFIG_EXFAT_VERSION="target/htc.d/htc-3013.6.27" +CONFIG_EXFAT_VERSION="target/htc.d/htc-3014.1.24" # # Kernel hacking diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h index 7aa36800..b69c0d32 100644 --- a/arch/arm/include/asm/device.h +++ b/arch/arm/include/asm/device.h @@ -7,12 +7,16 @@ #define ASMARM_DEVICE_H struct dev_archdata { + struct dma_map_ops *dma_ops; #ifdef CONFIG_DMABOUNCE struct dmabounce_device_info *dmabounce; #endif #ifdef CONFIG_IOMMU_API void *iommu; /* private IOMMU data */ #endif +#ifdef CONFIG_ARM_DMA_USE_IOMMU + struct dma_iommu_mapping *mapping; +#endif }; struct omap_device; diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h new file mode 100644 index 00000000..dcb65c81 --- /dev/null +++ b/arch/arm/include/asm/dma-iommu.h @@ -0,0 +1,34 @@ +#ifndef ASMARM_DMA_IOMMU_H +#define ASMARM_DMA_IOMMU_H + +#ifdef __KERNEL__ + +#include +#include +#include +#include + +struct dma_iommu_mapping { + + struct iommu_domain *domain; + + void *bitmap; + size_t bits; + unsigned int order; + dma_addr_t base; + + spinlock_t lock; + struct kref kref; +}; + +struct dma_iommu_mapping * +arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size, + int order); + +void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping); + +int arm_iommu_attach_device(struct device *dev, + struct dma_iommu_mapping *mapping); + +#endif +#endif diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index dc988ff0..7ca28f45 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -5,20 +5,39 @@ #include #include +#include #include #include #include +#define DMA_ERROR_CODE (~0) +extern struct dma_map_ops arm_dma_ops; + +static inline struct dma_map_ops *get_dma_ops(struct device *dev) +{ + if (dev && dev->archdata.dma_ops) + return dev->archdata.dma_ops; + return &arm_dma_ops; +} + +static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) +{ + BUG_ON(!dev); + dev->archdata.dma_ops = ops; +} + +#include + +static inline int dma_set_mask(struct device *dev, u64 mask) +{ + return get_dma_ops(dev)->set_dma_mask(dev, mask); +} + #ifdef __arch_page_to_dma #error Please update to __arch_pfn_to_dma #endif -/* - * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private - * functions used internally by the DMA-mapping API to provide DMA - * addresses. They must not be used by drivers. - */ #ifndef __arch_pfn_to_dma static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn) { @@ -61,75 +80,11 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) } #endif -/* - * The DMA API is built upon the notion of "buffer ownership". A buffer - * is either exclusively owned by the CPU (and therefore may be accessed - * by it) or exclusively owned by the DMA device. These helper functions - * represent the transitions between these two ownership states. - * - * Note, however, that on later ARMs, this notion does not work due to - * speculative prefetches. We model our approach on the assumption that - * the CPU does do speculative prefetches, which means we clean caches - * before transfers and delay cache invalidation until transfer completion. - * - * Private support functions: these are not part of the API and are - * liable to change. Drivers must not use these. - */ -static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size, - enum dma_data_direction dir) -{ - extern void ___dma_single_cpu_to_dev(const void *, size_t, - enum dma_data_direction); - - if (!arch_is_coherent()) - ___dma_single_cpu_to_dev(kaddr, size, dir); -} - -static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size, - enum dma_data_direction dir) -{ - extern void ___dma_single_dev_to_cpu(const void *, size_t, - enum dma_data_direction); - - if (!arch_is_coherent()) - ___dma_single_dev_to_cpu(kaddr, size, dir); -} - -static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off, - size_t size, enum dma_data_direction dir) -{ - extern void ___dma_page_cpu_to_dev(struct page *, unsigned long, - size_t, enum dma_data_direction); - - if (!arch_is_coherent()) - ___dma_page_cpu_to_dev(page, off, size, dir); -} - -static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off, - size_t size, enum dma_data_direction dir) -{ - extern void ___dma_page_dev_to_cpu(struct page *, unsigned long, - size_t, enum dma_data_direction); - - if (!arch_is_coherent()) - ___dma_page_dev_to_cpu(page, off, size, dir); -} - -extern int dma_supported(struct device *, u64); -extern int dma_set_mask(struct device *, u64); - -/* - * DMA errors are defined by all-bits-set in the DMA address. - */ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { - return dma_addr == ~0; + return dma_addr == DMA_ERROR_CODE; } -/* - * Dummy noncoherent implementation. We don't provide a dma_cache_sync - * function so drivers using this API are highlighted with build warnings. - */ static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) { @@ -142,16 +97,6 @@ static inline void dma_free_noncoherent(struct device *dev, size_t size, } -/* - * dma_coherent_pre_ops - barrier functions for coherent memory before DMA. - * A barrier is required to ensure memory operations are complete before the - * initiation of a DMA xfer. - * If the coherent memory is Strongly Ordered - * - pre ARMv7 and 8x50 guarantees ordering wrt other mem accesses - * - ARMv7 guarantees ordering only within a 1KB block, so we need a barrier - * If coherent memory is normal then we need a barrier to prevent - * reordering - */ static inline void dma_coherent_pre_ops(void) { #if COHERENT_IS_NORMAL == 1 @@ -163,12 +108,6 @@ static inline void dma_coherent_pre_ops(void) barrier(); #endif } -/* - * dma_post_coherent_ops - barrier functions for coherent memory after DMA. - * If the coherent memory is Strongly Ordered we dont need a barrier since - * there are no speculative fetches to Strongly Ordered memory. - * If coherent memory is normal then we need a barrier to prevent reordering - */ static inline void dma_coherent_post_ops(void) { #if COHERENT_IS_NORMAL == 1 @@ -181,195 +120,143 @@ static inline void dma_coherent_post_ops(void) #endif } -/** - * dma_alloc_coherent - allocate consistent memory for DMA - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @size: required memory size - * @handle: bus-specific DMA address - * - * Allocate some uncached, unbuffered memory for a device for - * performing DMA. This function allocates pages, and will - * return the CPU-viewed address, and sets @handle to be the - * device-viewed address. - */ -extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t); - -/** - * dma_free_coherent - free memory allocated by dma_alloc_coherent - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @size: size of memory originally requested in dma_alloc_coherent - * @cpu_addr: CPU-view address returned from dma_alloc_coherent - * @handle: device-view address returned from dma_alloc_coherent - * - * Free (and unmap) a DMA buffer previously allocated by - * dma_alloc_coherent(). - * - * References to memory and mappings associated with cpu_addr/handle - * during and after this call executing are illegal. - */ -extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t); +extern int dma_supported(struct device *dev, u64 mask); -/** - * dma_mmap_coherent - map a coherent DMA allocation into user space - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @vma: vm_area_struct describing requested user mapping - * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent - * @handle: device-view address returned from dma_alloc_coherent - * @size: size of memory originally requested in dma_alloc_coherent - * - * Map a coherent DMA buffer previously allocated by dma_alloc_coherent - * into user space. The coherent DMA buffer must not be freed by the - * driver until the user space mapping has been released. - */ -int dma_mmap_coherent(struct device *, struct vm_area_struct *, - void *, dma_addr_t, size_t); +extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, + gfp_t gfp, struct dma_attrs *attrs); +#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) -/** - * dma_alloc_writecombine - allocate writecombining memory for DMA - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @size: required memory size - * @handle: bus-specific DMA address - * - * Allocate some uncached, buffered memory for a device for - * performing DMA. This function allocates pages, and will - * return the CPU-viewed address, and sets @handle to be the - * device-viewed address. - */ -extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *, - gfp_t); +static inline void *dma_alloc_attrs(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag, + struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + void *cpu_addr; + BUG_ON(!ops); -#define dma_free_writecombine(dev,size,cpu_addr,handle) \ - dma_free_coherent(dev,size,cpu_addr,handle) + cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); + debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); + return cpu_addr; +} -int dma_mmap_writecombine(struct device *, struct vm_area_struct *, - void *, dma_addr_t, size_t); +extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t handle, struct dma_attrs *attrs); -/* - * This can be called during boot to increase the size of the consistent - * DMA region above it's default value of 2MB. It must be called before the - * memory allocator is initialised, i.e. before any core_initcall. - */ -extern void __init init_consistent_dma_size(unsigned long size); +#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL) +static inline void dma_free_attrs(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle, + struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + BUG_ON(!ops); -#ifdef CONFIG_DMABOUNCE -/* - * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" - * and utilize bounce buffers as needed to work around limited DMA windows. - * - * On the SA-1111, a bug limits DMA to only certain regions of RAM. - * On the IXP425, the PCI inbound window is 64MB (256MB total RAM) - * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM) - * - * The following are helper functions used by the dmabounce subystem - * - */ + debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); + ops->free(dev, size, cpu_addr, dma_handle, attrs); +} -/** - * dmabounce_register_dev - * - * @dev: valid struct device pointer - * @small_buf_size: size of buffers to use with small buffer pool - * @large_buf_size: size of buffers to use with large buffer pool (can be 0) - * @needs_bounce_fn: called to determine whether buffer needs bouncing - * - * This function should be called by low-level platform code to register - * a device as requireing DMA buffer bouncing. The function will allocate - * appropriate DMA pools for the device. - */ -extern int dmabounce_register_dev(struct device *, unsigned long, - unsigned long, int (*)(struct device *, dma_addr_t, size_t)); +extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + struct dma_attrs *attrs); -/** - * dmabounce_unregister_dev - * - * @dev: valid struct device pointer - * - * This function should be called by low-level platform code when device - * that was previously registered with dmabounce_register_dev is removed - * from the system. - * - */ -extern void dmabounce_unregister_dev(struct device *); +#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL) -/* - * The DMA API, implemented by dmabounce.c. See below for descriptions. - */ -extern dma_addr_t __dma_map_page(struct device *, struct page *, - unsigned long, size_t, enum dma_data_direction); -extern void __dma_unmap_page(struct device *, dma_addr_t, size_t, - enum dma_data_direction); +static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, + size_t size, struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + BUG_ON(!ops); + return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); +} -/* - * Private functions - */ -int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long, - size_t, enum dma_data_direction); -int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long, - size_t, enum dma_data_direction); -#else -static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr, - unsigned long offset, size_t size, enum dma_data_direction dir) +static inline void *dma_alloc_writecombine(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) { - return 1; + DEFINE_DMA_ATTRS(attrs); + dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); + return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs); } -static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr, - unsigned long offset, size_t size, enum dma_data_direction dir) +static inline void dma_free_writecombine(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle) { - return 1; + DEFINE_DMA_ATTRS(attrs); + dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); + return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); } +static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size) +{ + DEFINE_DMA_ATTRS(attrs); + dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); + return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs); +} -static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction dir) +static inline void *dma_alloc_stronglyordered(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) { - __dma_page_cpu_to_dev(page, offset, size, dir); - return pfn_to_dma(dev, page_to_pfn(page)) + offset; + DEFINE_DMA_ATTRS(attrs); + dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &attrs); + return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs); } -static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir) +static inline void dma_free_stronglyordered(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle) { - __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), - handle & ~PAGE_MASK, size, dir); + DEFINE_DMA_ATTRS(attrs); + dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &attrs); + return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); } -#endif /* CONFIG_DMABOUNCE */ -/** - * dma_map_single - map a single buffer for streaming DMA - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @cpu_addr: CPU direct mapped address of buffer - * @size: size of buffer to map - * @dir: DMA transfer direction - * - * Ensure that any data held in the cache is appropriately discarded - * or written back. - * - * The device owns this memory once this call has completed. The CPU - * can regain ownership by calling dma_unmap_single() or - * dma_sync_single_for_cpu(). - */ -static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, - size_t size, enum dma_data_direction dir) +static inline int dma_mmap_stronglyordered(struct device *dev, + struct vm_area_struct *vma, void *cpu_addr, + dma_addr_t dma_addr, size_t size) { - unsigned long offset; - struct page *page; - dma_addr_t addr; + DEFINE_DMA_ATTRS(attrs); + dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &attrs); + return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs); +} - BUG_ON(!virt_addr_valid(cpu_addr)); - BUG_ON(!virt_addr_valid(cpu_addr + size - 1)); - BUG_ON(!valid_dma_direction(dir)); +static inline void *dma_alloc_nonconsistent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + DEFINE_DMA_ATTRS(attrs); + dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); + return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs); +} - page = virt_to_page(cpu_addr); - offset = (unsigned long)cpu_addr & ~PAGE_MASK; - addr = __dma_map_page(dev, page, offset, size, dir); - debug_dma_map_page(dev, page, offset, size, dir, addr, true); +static inline void dma_free_nonconsistent(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle) +{ + DEFINE_DMA_ATTRS(attrs); + dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); + return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); +} - return addr; +static inline int dma_mmap_nonconsistent(struct device *dev, + struct vm_area_struct *vma, void *cpu_addr, + dma_addr_t dma_addr, size_t size) +{ + DEFINE_DMA_ATTRS(attrs); + dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); + return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs); } + + +extern void __init init_consistent_dma_size(unsigned long size); + + +extern int dmabounce_register_dev(struct device *, unsigned long, + unsigned long, int (*)(struct device *, dma_addr_t, size_t)); + +extern void dmabounce_unregister_dev(struct device *); + + + /** * dma_cache_pre_ops - clean or invalidate cache before dma transfer is * initiated and perform a barrier operation. @@ -414,153 +301,17 @@ static inline void dma_cache_post_ops(void *virtual_addr, if (arch_has_speculative_dfetch() && !arch_is_coherent() && dir != DMA_TO_DEVICE) - /* - * Treat DMA_BIDIRECTIONAL and DMA_FROM_DEVICE - * identically: invalidate - */ ___dma_single_cpu_to_dev(virtual_addr, size, DMA_FROM_DEVICE); } - -/** - * dma_map_page - map a portion of a page for streaming DMA - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @page: page that buffer resides in - * @offset: offset into page for start of buffer - * @size: size of buffer to map - * @dir: DMA transfer direction - * - * Ensure that any data held in the cache is appropriately discarded - * or written back. - * - * The device owns this memory once this call has completed. The CPU - * can regain ownership by calling dma_unmap_page(). - */ -static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction dir) -{ - dma_addr_t addr; - - BUG_ON(!valid_dma_direction(dir)); - - addr = __dma_map_page(dev, page, offset, size, dir); - debug_dma_map_page(dev, page, offset, size, dir, addr, false); - - return addr; -} - -/** - * dma_unmap_single - unmap a single buffer previously mapped - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @handle: DMA address of buffer - * @size: size of buffer (same as passed to dma_map_single) - * @dir: DMA transfer direction (same as passed to dma_map_single) - * - * Unmap a single streaming mode DMA translation. The handle and size - * must match what was provided in the previous dma_map_single() call. - * All other usages are undefined. - * - * After this call, reads by the CPU to the buffer are guaranteed to see - * whatever the device wrote there. - */ -static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir) -{ - debug_dma_unmap_page(dev, handle, size, dir, true); - __dma_unmap_page(dev, handle, size, dir); -} - -/** - * dma_unmap_page - unmap a buffer previously mapped through dma_map_page() - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @handle: DMA address of buffer - * @size: size of buffer (same as passed to dma_map_page) - * @dir: DMA transfer direction (same as passed to dma_map_page) - * - * Unmap a page streaming mode DMA translation. The handle and size - * must match what was provided in the previous dma_map_page() call. - * All other usages are undefined. - * - * After this call, reads by the CPU to the buffer are guaranteed to see - * whatever the device wrote there. - */ -static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir) -{ - debug_dma_unmap_page(dev, handle, size, dir, false); - __dma_unmap_page(dev, handle, size, dir); -} - -/** - * dma_sync_single_range_for_cpu - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @handle: DMA address of buffer - * @offset: offset of region to start sync - * @size: size of region to sync - * @dir: DMA transfer direction (same as passed to dma_map_single) - * - * Make physical memory consistent for a single streaming mode DMA - * translation after a transfer. - * - * If you perform a dma_map_single() but wish to interrogate the - * buffer using the cpu, yet do not wish to teardown the PCI dma - * mapping, you must call this function before doing so. At the - * next point you give the PCI dma address back to the card, you - * must first the perform a dma_sync_for_device, and then the - * device again owns the buffer. - */ -static inline void dma_sync_single_range_for_cpu(struct device *dev, - dma_addr_t handle, unsigned long offset, size_t size, - enum dma_data_direction dir) -{ - BUG_ON(!valid_dma_direction(dir)); - - debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir); - - if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) - return; - - __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir); -} - -static inline void dma_sync_single_range_for_device(struct device *dev, - dma_addr_t handle, unsigned long offset, size_t size, - enum dma_data_direction dir) -{ - BUG_ON(!valid_dma_direction(dir)); - - debug_dma_sync_single_for_device(dev, handle + offset, size, dir); - - if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) - return; - - __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir); -} - -static inline void dma_sync_single_for_cpu(struct device *dev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) -{ - dma_sync_single_range_for_cpu(dev, handle, 0, size, dir); -} - -static inline void dma_sync_single_for_device(struct device *dev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) -{ - dma_sync_single_range_for_device(dev, handle, 0, size, dir); -} - -/* - * The scatter list versions of the above methods. - */ -extern int dma_map_sg(struct device *, struct scatterlist *, int, - enum dma_data_direction); -extern void dma_unmap_sg(struct device *, struct scatterlist *, int, +extern int arm_dma_map_sg(struct device *, struct scatterlist *, int, + enum dma_data_direction, struct dma_attrs *attrs); +extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int, + enum dma_data_direction, struct dma_attrs *attrs); +extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int, enum dma_data_direction); -extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int, +extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int, enum dma_data_direction); -extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int, - enum dma_data_direction); - -#endif /* __KERNEL__ */ +#endif #endif diff --git a/arch/arm/mach-msm/board-dlxp_ul-display.c b/arch/arm/mach-msm/board-dlxp_ul-display.c index 6243d4ae..ad37e470 100644 --- a/arch/arm/mach-msm/board-dlxp_ul-display.c +++ b/arch/arm/mach-msm/board-dlxp_ul-display.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include #include @@ -1510,7 +1510,7 @@ static const struct i2c_device_id pwm_i2c_id[] = { static int pwm_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { - int rc; + int rc = 0; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE | I2C_FUNC_I2C)) diff --git a/arch/arm/mach-msm/board-dlxp_ul-pmic.c b/arch/arm/mach-msm/board-dlxp_ul-pmic.c index 00b7ac3e..f1ddb46c 100644 --- a/arch/arm/mach-msm/board-dlxp_ul-pmic.c +++ b/arch/arm/mach-msm/board-dlxp_ul-pmic.c @@ -468,8 +468,10 @@ pm8921_bms_pdata __devinitdata = { .v_failure = 3000, .max_voltage_uv = MAX_VOLTAGE_MV * 1000, .rconn_mohm = 0, + .store_batt_data_soc_thre = 100, .criteria_sw_est_ocv = 86400000, .rconn_mohm_sw_est_ocv = 10, + .qb_mode_cc_criteria_uAh = 10000, }; static int __init check_dq_setup(char *str) diff --git a/arch/arm/mach-msm/board-dlxp_ul.c b/arch/arm/mach-msm/board-dlxp_ul.c index 1d84a4cc..46d20014 100644 --- a/arch/arm/mach-msm/board-dlxp_ul.c +++ b/arch/arm/mach-msm/board-dlxp_ul.c @@ -27,7 +27,7 @@ #include #include #include -#include +#include #include #include #include @@ -419,6 +419,7 @@ static struct ion_cp_heap_pdata cp_mm_dlxp_ul_ion_pdata = { .reusable = FMEM_ENABLED, .mem_is_fmem = FMEM_ENABLED, .fixed_position = FIXED_MIDDLE, + .no_nonsecure_alloc = 0, }; static struct ion_cp_heap_pdata cp_mfc_dlxp_ul_ion_pdata = { @@ -427,6 +428,7 @@ static struct ion_cp_heap_pdata cp_mfc_dlxp_ul_ion_pdata = { .reusable = 0, .mem_is_fmem = FMEM_ENABLED, .fixed_position = FIXED_HIGH, + .no_nonsecure_alloc = 0, }; static struct ion_co_heap_pdata co_dlxp_ul_ion_pdata = { @@ -902,6 +904,9 @@ static struct htc_battery_platform_data htc_battery_pdev_data = { .igauge.get_battery_temperature = pm8921_get_batt_temperature, .igauge.get_battery_id = pm8921_get_batt_id, .igauge.get_battery_soc = pm8921_bms_get_batt_soc, + .igauge.enter_qb_mode = pm8921_bms_enter_qb_mode, + .igauge.exit_qb_mode = pm8921_bms_exit_qb_mode, + .igauge.qb_mode_pwr_consumption_check = pm8921_qb_mode_pwr_consumption_check, .igauge.get_battery_cc = pm8921_bms_get_batt_cc, .igauge.is_battery_temp_fault = pm8921_is_batt_temperature_fault, .igauge.is_battery_full = pm8921_is_batt_full, diff --git a/arch/arm/mach-msm/devices-8064.c b/arch/arm/mach-msm/devices-8064.c index eef6602f..fea46b0c 100644 --- a/arch/arm/mach-msm/devices-8064.c +++ b/arch/arm/mach-msm/devices-8064.c @@ -33,7 +33,7 @@ #include #include #include -#include +#include #include "clock.h" #include "devices.h" #include "footswitch.h" diff --git a/arch/arm/mach-msm/devices-8960.c b/arch/arm/mach-msm/devices-8960.c index fc7afaa5..2f289154 100644 --- a/arch/arm/mach-msm/devices-8960.c +++ b/arch/arm/mach-msm/devices-8960.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-msm/devices_htc.c b/arch/arm/mach-msm/devices_htc.c index 4961b041..07cd2423 100644 --- a/arch/arm/mach-msm/devices_htc.c +++ b/arch/arm/mach-msm/devices_htc.c @@ -748,7 +748,7 @@ char *board_get_mid(void) } static int __init board_set_mid(char *mid) { - strncpy(model_id, mid, sizeof(model_id)); + strncpy(model_id, mid, sizeof(model_id)-1); return 1; } __setup("androidboot.mid=", board_set_mid); diff --git a/arch/arm/mach-msm/htc_battery_8960.c b/arch/arm/mach-msm/htc_battery_8960.c index 5707b765..5f933f81 100644 --- a/arch/arm/mach-msm/htc_battery_8960.c +++ b/arch/arm/mach-msm/htc_battery_8960.c @@ -41,10 +41,6 @@ #include #define MSPERIOD(end, start) ktime_to_ms(ktime_sub(end, start)) -#ifdef CONFIG_FORCE_FAST_CHARGE -#include -#endif - #define HTC_BATT_CHG_DIS_BIT_EOC (1) #define HTC_BATT_CHG_DIS_BIT_ID (1<<1) #define HTC_BATT_CHG_DIS_BIT_TMP (1<<2) @@ -68,6 +64,9 @@ static int chg_dis_control_mask = HTC_BATT_CHG_DIS_BIT_ID static int chg_dis_pj_mask = HTC_BATT_CHG_DIS_BIT_ID | HTC_BATT_CHG_DIS_BIT_TMR; +#ifdef CONFIG_FORCE_FAST_CHARGE +#include +#endif #define HTC_BATT_PWRSRC_DIS_BIT_MFG (1) #define HTC_BATT_PWRSRC_DIS_BIT_API (1<<1) @@ -125,8 +124,10 @@ static struct kset *htc_batt_kset; #define BATT_REMOVED_SHUTDOWN_DELAY_MS (50) #define BATT_CRITICAL_VOL_SHUTDOWN_DELAY_MS (1000) +#define BATT_QB_MODE_REAL_POWEROFF_DELAY_MS (5000) static void shutdown_worker(struct work_struct *work); struct delayed_work shutdown_work; +static void batt_qb_mode_pwr_consumption_check(unsigned long cur_jiffies); #define BATT_CRITICAL_LOW_VOLTAGE (3000) #define VOL_ALARM_RESUME_AFTER_LEVEL (5) @@ -144,6 +145,7 @@ static int ac_suspend_flag; #endif static int htc_ext_5v_output_now; static int htc_ext_5v_output_old; +static bool qb_mode_enter = false; static int latest_chg_src = CHARGER_BATTERY; @@ -166,6 +168,7 @@ struct htc_battery_info { int overload_vol_thr_mv; int overload_curr_thr_ma; int smooth_chg_full_delay_min; + int decreased_batt_level_check; struct kobject batt_timer_kobj; struct kobject batt_cable_kobj; @@ -236,6 +239,18 @@ struct max_level_by_current_ma { int level_boundary; }; +struct dec_level_by_current_ua { + int threshold_ua; + int dec_level; +}; +static struct dec_level_by_current_ua dec_level_curr_table[] = { + {900000, 2}, + {600000, 4}, + {0, 6}, +}; + +static const int DEC_LEVEL_CURR_TABLE_SIZE = sizeof(dec_level_curr_table) / sizeof (dec_level_curr_table[0]); + #ifdef CONFIG_DUTY_CYCLE_LIMIT enum { LIMIT_CHG_TIMER_STATE_NONE = 0, @@ -390,6 +405,14 @@ int htc_gauge_event_notify(enum htc_gauge_event event) sw_stimer_counter = 0; htc_batt_schedule_batt_info_update(); break; + case HTC_GAUGE_EVENT_QB_MODE_ENTER: + htc_batt_schedule_batt_info_update(); + break; + case HTC_GAUGE_EVENT_QB_MODE_DO_REAL_POWEROFF: + wake_lock(&batt_shutdown_wake_lock); + schedule_delayed_work(&shutdown_work, + msecs_to_jiffies(BATT_QB_MODE_REAL_POWEROFF_DELAY_MS)); + break; default: pr_info("[BATT] unsupported gauge event(%d)\n", event); break; @@ -450,16 +473,10 @@ int htc_charger_event_notify(enum htc_charger_event event) UNKNOWN_USB_DETECT_DELAY_MS))); break; case HTC_CHARGER_EVENT_SRC_UNKNOWN_USB: - if (get_kernel_flag() & KERNEL_FLAG_ENABLE_FAST_CHARGE) + if ((get_kernel_flag() & KERNEL_FLAG_ENABLE_FAST_CHARGE) || force_fast_charge == 1) latest_chg_src = CHARGER_AC; else - if (force_fast_charge == 1) { - printk("[FASTCHARGE] Forcing CHARGER_AC"); - latest_chg_src = CHARGER_AC; - } else { - printk("[FASTCHARGE] NOT set, using normal CHARGER_USB"); - latest_chg_src = CHARGER_USB; - } + latest_chg_src = CHARGER_UNKNOWN_USB; htc_batt_schedule_batt_info_update(); break; case HTC_CHARGER_EVENT_OVP: @@ -980,6 +997,82 @@ static void htc_batt_set_full_level_dis_batt_chg(int percent) return; } +static void htc_batt_trigger_store_battery_data(int triggle_flag) +{ + if (triggle_flag == 1) + { + if (htc_batt_info.igauge && + htc_batt_info.igauge->store_battery_data) { + htc_batt_info.igauge->store_battery_data(); + } + } + return; +} + +static void htc_batt_qb_mode_shutdown_status(int triggle_flag) +{ + if (triggle_flag == 1) + { + if (htc_batt_info.igauge && + htc_batt_info.igauge->enter_qb_mode) { + qb_mode_enter = true; + htc_batt_info.igauge->enter_qb_mode(); + } + } + + if (triggle_flag == 0) + { + if (htc_batt_info.igauge && + htc_batt_info.igauge->exit_qb_mode) { + qb_mode_enter = false; + htc_batt_info.igauge->exit_qb_mode(); + } + } + return; +} + +static void batt_qb_mode_pwr_consumption_check(unsigned long time_since_last_update_ms) +{ + if (htc_batt_info.igauge && + htc_batt_info.igauge->qb_mode_pwr_consumption_check) { + htc_batt_info.igauge->qb_mode_pwr_consumption_check(time_since_last_update_ms); + } + return; +} + +static void htc_batt_store_battery_ui_soc(int soc_ui) +{ + + if (soc_ui <= 0 || soc_ui > 100) + return; + + if (htc_batt_info.igauge && + htc_batt_info.igauge->store_battery_ui_soc) { + htc_batt_info.igauge->store_battery_ui_soc(soc_ui); + } + return; +} + +static void htc_batt_get_battery_ui_soc(int *soc_ui) +{ + int temp_soc; + int orig_soc = *soc_ui; + + if (htc_batt_info.igauge && + htc_batt_info.igauge->get_battery_ui_soc) { + temp_soc = htc_batt_info.igauge->get_battery_ui_soc(); + + + if (temp_soc > 0 && temp_soc <= 100) + *soc_ui = temp_soc; + } + + BATT_LOG("%s: original soc: %d, changed soc: %d.", __func__, + orig_soc, *soc_ui); + + return; +} + static int htc_battery_get_rt_attr(enum htc_batt_rt_attr attr, int *val) { int ret = 0; @@ -1004,6 +1097,12 @@ static int htc_battery_get_rt_attr(enum htc_batt_rt_attr attr, int *val) if (htc_batt_info.igauge->get_pj_voltage) htc_batt_info.igauge->get_pj_voltage(val); break; + case HTC_BATT_RT_VOLTAGE_UV: + if (htc_batt_info.igauge->get_battery_voltage) { + ret = htc_batt_info.igauge->get_battery_voltage(val); + *val *= 1000; + } + break; default: ret = -EINVAL; break; @@ -1434,12 +1533,18 @@ inline static int is_voltage_critical_low(int voltage_mv) static void batt_check_overload(void) { static unsigned int overload_count; + int is_full = 0; + + if(htc_batt_info.igauge && htc_batt_info.igauge->is_battery_full) + htc_batt_info.igauge->is_battery_full(&is_full); - pr_debug("[BATT] Chk overload by CS=%d V=%d I=%d count=%d overload=%d\n", + pr_debug("[BATT] Chk overload by CS=%d V=%d I=%d count=%d overload=%d" + "is_full=%d\n", htc_batt_info.rep.charging_source, htc_batt_info.rep.batt_vol, - htc_batt_info.rep.batt_current, overload_count, htc_batt_info.rep.overload); + htc_batt_info.rep.batt_current, overload_count, htc_batt_info.rep.overload, + is_full); if ((htc_batt_info.rep.charging_source > 0) && - (htc_batt_info.rep.batt_vol < htc_batt_info.overload_vol_thr_mv) && + (!is_full) && ((htc_batt_info.rep.batt_current / 1000) > htc_batt_info.overload_curr_thr_ma)) { if (overload_count++ < 3) { @@ -1452,137 +1557,236 @@ static void batt_check_overload(void) } } -#define CHG_ONE_PERCENT_LIMIT_PERIOD_MS (1000 * 60) +static void batt_check_critical_low_level(int *dec_level, int batt_current) +{ + int i; + + for(i = 0; i < DEC_LEVEL_CURR_TABLE_SIZE; i++) { + if (batt_current > dec_level_curr_table[i].threshold_ua) { + *dec_level = dec_level_curr_table[i].dec_level; + + pr_debug("%s: i=%d, dec_level=%d, threshold_ua=%d\n", + __func__, i, *dec_level, dec_level_curr_table[i].threshold_ua); + break; + } + } +} + +static void adjust_store_level(int *store_level, int drop_raw, int drop_ui, int prev) { + int store = *store_level; + + store += drop_raw - drop_ui; + if (store >= 0) + htc_batt_info.rep.level = prev - drop_ui; + else { + htc_batt_info.rep.level = prev; + store += drop_ui; + } + *store_level = store; +} + +#define CHG_ONE_PERCENT_LIMIT_PERIOD_MS (1000 * 60) #define DISCHG_UPDATE_PERIOD_MS (1000 * 60) #define ONE_PERCENT_LIMIT_PERIOD_MS (1000 * (60 + 10)) #define FIVE_PERCENT_LIMIT_PERIOD_MS (1000 * (300 + 10)) +#define ONE_MINUTES_MS (1000 * (60 + 10)) +#define FOURTY_MINUTES_MS (1000 * (2400 + 10)) +#define SIXTY_MINUTES_MS (1000 * (3600 + 10)) static void batt_level_adjust(unsigned long time_since_last_update_ms) { static int first = 1; static int critical_low_enter = 0; - int prev_level, drop_level; - int is_full = 0; - int prev_current; - unsigned long level_since_last_update_ms; - unsigned long cur_jiffies = 0; - static unsigned long pre_jiffies; + static int store_level = 0; + static int pre_ten_digit, ten_digit; + static bool stored_level_flag = false; + static bool allow_drop_one_percent_flag = false; + int prev_raw_level, drop_raw_level; + int prev_level; + int is_full = 0, dec_level = 0; + int dropping_level; + static unsigned long time_accumulated_level_change = 0; const struct battery_info_reply *prev_batt_info_rep = htc_battery_core_get_batt_info_rep(); if (!first) { prev_level = prev_batt_info_rep->level; - prev_current = prev_batt_info_rep->batt_current; + prev_raw_level = prev_batt_info_rep->level_raw; } else { prev_level = htc_batt_info.rep.level; - prev_current = htc_batt_info.rep.batt_current; - pre_jiffies = 0; - level_since_last_update_ms = DISCHG_UPDATE_PERIOD_MS; + prev_raw_level = htc_batt_info.rep.level_raw; + pre_ten_digit = htc_batt_info.rep.level / 10; } - drop_level = prev_level - htc_batt_info.rep.level; + drop_raw_level = prev_raw_level - htc_batt_info.rep.level_raw; + time_accumulated_level_change += time_since_last_update_ms; if ((prev_batt_info_rep->charging_source > 0) && htc_batt_info.rep.charging_source == 0 && prev_level == 100) { BATT_LOG("%s: Cable plug out when level 100, reset timer.",__func__); - pre_jiffies = jiffies; + time_accumulated_level_change = 0; htc_batt_info.rep.level = prev_level; return; } + if ((htc_batt_info.rep.charging_source == 0) + && (stored_level_flag == false)) { + store_level = prev_level - prev_raw_level; + BATT_LOG("%s: Cable plug out, to store difference between" + " UI & SOC. store_level:%d, prev_level:%d, prev_raw_level:%d" + ,__func__, store_level, prev_level, prev_raw_level); + stored_level_flag = true; + } else if (htc_batt_info.rep.charging_source > 0) + stored_level_flag = false; + if (!prev_batt_info_rep->charging_enabled && !((prev_batt_info_rep->charging_source == 0) && htc_batt_info.rep.charging_source > 0)) { - if (drop_level > 0) { - cur_jiffies = jiffies; - level_since_last_update_ms = - (cur_jiffies - pre_jiffies) * MSEC_PER_SEC / HZ; + if (time_accumulated_level_change < DISCHG_UPDATE_PERIOD_MS) { + BATT_LOG("%s: total_time since last batt level update = %lu ms.", - __func__, level_since_last_update_ms); - if (time_since_last_update_ms < DISCHG_UPDATE_PERIOD_MS && - level_since_last_update_ms < DISCHG_UPDATE_PERIOD_MS) { - htc_batt_info.rep.level = prev_level; - return; - } + __func__, time_accumulated_level_change); + htc_batt_info.rep.level = prev_level; + store_level += drop_raw_level; + return; } if (is_voltage_critical_low(htc_batt_info.rep.batt_vol)) { critical_low_enter = 1; - pr_info("[BATT] battery level force decreses 6%% from %d%%" - " (soc=%d)on critical low (%d mV)\n", prev_level, - htc_batt_info.rep.level, - htc_batt_info.critical_low_voltage_mv); + if (htc_batt_info.decreased_batt_level_check) + batt_check_critical_low_level(&dec_level, + htc_batt_info.rep.batt_current); + else + dec_level = 6; + htc_batt_info.rep.level = - (prev_level - 6 > 0) ? (prev_level - 6) : 0; + (prev_level - dec_level > 0) ? (prev_level - dec_level) : 0; + + pr_info("[BATT] battery level force decreses %d%% from %d%%" + " (soc=%d)on critical low (%d mV)(%d mA)\n", dec_level, prev_level, + htc_batt_info.rep.level, htc_batt_info.critical_low_voltage_mv, + htc_batt_info.rep.batt_current); } else { + + + if ((htc_batt_info.rep.level_raw < 20) || + (prev_level - prev_raw_level > 10)) + allow_drop_one_percent_flag = true; + + + htc_batt_info.rep.level = prev_level; + if (time_since_last_update_ms <= ONE_PERCENT_LIMIT_PERIOD_MS) { - if (2 < drop_level) { - pr_info("[BATT] soc drop = %d%% > 2%% in %lu ms.\n", - drop_level,time_since_last_update_ms); - htc_batt_info.rep.level = prev_level - 2; - } else if (drop_level < 0) { - if (htc_batt_info.rep.pj_src > 0 - && htc_batt_info.rep.pj_chg_status == PJ_CHG_STATUS_DCHG) { - - pr_info("[BATT] level increase due to PJ charge battery" - " in %lu ms.\n",time_since_last_update_ms); - } else { - if (critical_low_enter) { - pr_warn("[BATT] level increase because of" - " exit critical_low!\n"); - } - htc_batt_info.rep.level = prev_level; - } - } else { - + if (1 <= drop_raw_level) { + adjust_store_level(&store_level, drop_raw_level, 1, prev_level); + pr_info("[BATT] remap: normal soc drop = %d%% in %lu ms." + " UI only allow -1%%, store_level:%d, ui:%d%%\n", + drop_raw_level, time_since_last_update_ms, + store_level, htc_batt_info.rep.level); } } else if ((chg_limit_reason & HTC_BATT_CHG_LIMIT_BIT_TALK) && (time_since_last_update_ms <= FIVE_PERCENT_LIMIT_PERIOD_MS)) { - if (5 < drop_level) { - pr_info("[BATT] soc drop = %d%% > 5%% in %lu ms.\n", - drop_level,time_since_last_update_ms); - htc_batt_info.rep.level = prev_level - 5; - } else if (drop_level < 0) { - if (htc_batt_info.rep.pj_src > 0 - && htc_batt_info.rep.pj_chg_status == PJ_CHG_STATUS_DCHG) { - - pr_info("[BATT] level increase due to PJ charge battery" - " in phone call."); - } else { - if (critical_low_enter) { - pr_warn("[BATT] level increase because of" - " exit critical_low!\n"); + if (5 < drop_raw_level) { + adjust_store_level(&store_level, drop_raw_level, 5, prev_level); + } else if (1 <= drop_raw_level && drop_raw_level <= 5) { + adjust_store_level(&store_level, drop_raw_level, 1, prev_level); + } + pr_info("[BATT] remap: phone soc drop = %d%% in %lu ms." + " UI only allow -1%% or -5%%, store_level:%d, ui:%d%%\n", + drop_raw_level, time_since_last_update_ms, + store_level, htc_batt_info.rep.level); + } else { + if (1 <= drop_raw_level) { + if ((ONE_MINUTES_MS < time_since_last_update_ms) && + (time_since_last_update_ms <= FOURTY_MINUTES_MS)) { + adjust_store_level(&store_level, drop_raw_level, 1, prev_level); + } else if ((FOURTY_MINUTES_MS < time_since_last_update_ms) && + (time_since_last_update_ms <= SIXTY_MINUTES_MS)) { + if (2 <= drop_raw_level) { + adjust_store_level(&store_level, drop_raw_level, 2, prev_level); + } else { + adjust_store_level(&store_level, drop_raw_level, 1, prev_level); + } + } else if (SIXTY_MINUTES_MS < time_since_last_update_ms) { + if (3 <= drop_raw_level) { + adjust_store_level(&store_level, drop_raw_level, 3, prev_level); + } else if (drop_raw_level == 2) { + adjust_store_level(&store_level, drop_raw_level, 2, prev_level); + } else { + adjust_store_level(&store_level, drop_raw_level, 1, prev_level); } - htc_batt_info.rep.level = prev_level; } - } else { - + pr_info("[BATT] remap: suspend soc drop: %d%% in %lu ms." + " UI only allow -1%% to -3%%, store_level:%d, ui:%d%%\n", + drop_raw_level, time_since_last_update_ms, + store_level, htc_batt_info.rep.level); } - } else { - if (3 < drop_level) { - pr_info("[BATT] soc drop = %d%% > 3%% in %lu ms.\n", - drop_level,time_since_last_update_ms); - htc_batt_info.rep.level = prev_level - 3; - } else if (drop_level < 0) { - if (htc_batt_info.rep.pj_src > 0 + } + + if ((allow_drop_one_percent_flag == false) + && (drop_raw_level == 0)) { + htc_batt_info.rep.level = prev_level; + pr_info("[BATT] remap: no soc drop and no additional 1%%," + " ui:%d%%\n", htc_batt_info.rep.level); + } else if ((allow_drop_one_percent_flag == true) + && (drop_raw_level == 0) + && (store_level > 0)) { + store_level--; + htc_batt_info.rep.level = prev_level - 1; + allow_drop_one_percent_flag = false; + pr_info("[BATT] remap: drop additional 1%%. store_level:%d," + " ui:%d%%\n", store_level + , htc_batt_info.rep.level); + } else if (drop_raw_level < 0) { + if (htc_batt_info.rep.pj_src > 0 && htc_batt_info.rep.pj_chg_status == PJ_CHG_STATUS_DCHG) { - pr_info("[BATT] level increase due to PJ charge battery."); - } else { - if (critical_low_enter) { - pr_warn("[BATT] level increase because of" - " exit critical_low!\n"); - } - htc_batt_info.rep.level = prev_level; - } + pr_info("[BATT] level increase due to PJ charge battery" + " in %lu ms.\n",time_since_last_update_ms); + htc_batt_info.rep.level = prev_level - drop_raw_level; } else { - + if (critical_low_enter) { + pr_warn("[BATT] remap: level increase because of" + " exit critical_low!\n"); + } + store_level += drop_raw_level; + htc_batt_info.rep.level = prev_level; + pr_info("[BATT] remap: soc increased. store_level:%d," + " ui:%d%%\n", store_level, htc_batt_info.rep.level); + } + } + + + ten_digit = htc_batt_info.rep.level / 10; + if (htc_batt_info.rep.level != 100) { + + if ((pre_ten_digit != 10) && (pre_ten_digit > ten_digit)) { + allow_drop_one_percent_flag = true; + pr_info("[BATT] remap: allow to drop additional 1%% at next" + " level:%d%%.\n", htc_batt_info.rep.level - 1); } } + pre_ten_digit = ten_digit; if (critical_low_enter) { critical_low_enter = 0; pr_warn("[BATT] exit critical_low without charge!\n"); } + + if (htc_batt_info.rep.batt_temp < 0 && + drop_raw_level == 0 && + store_level >= 2) { + dropping_level = prev_level - htc_batt_info.rep.level; + if((dropping_level == 1) || (dropping_level == 0)) { + store_level = store_level - (2 - dropping_level); + htc_batt_info.rep.level = htc_batt_info.rep.level - + (2 - dropping_level); + } + pr_info("[BATT] remap: enter low temperature section, " + "store_level:%d%%, dropping_level:%d%%, " + "prev_level:%d%%, level:%d%%.\n" + , store_level, prev_level, dropping_level + , htc_batt_info.rep.level); + } } if (htc_batt_info.rep.level == 0) { if (prev_level > 1) { @@ -1604,28 +1808,59 @@ static void batt_level_adjust(unsigned long time_since_last_update_ms) htc_batt_info.igauge->is_battery_full(&is_full); if (is_full != 0) { if (htc_batt_info.smooth_chg_full_delay_min - && prev_level < 100) { - htc_batt_info.rep.level = prev_level + 1; + && prev_level < 100) { + + if (time_accumulated_level_change < + (htc_batt_info.smooth_chg_full_delay_min + * CHG_ONE_PERCENT_LIMIT_PERIOD_MS)) { + htc_batt_info.rep.level = prev_level; + } else { + htc_batt_info.rep.level = prev_level + 1; + } } else { htc_batt_info.rep.level = 100; } } else { - if (99 < htc_batt_info.rep.level) + if (prev_level > htc_batt_info.rep.level) { + + if (!htc_batt_info.rep.overload) { + pr_info("[BATT] pre_level=%d, new_level=%d, " + "level drop but overloading doesn't happen!\n", + prev_level, htc_batt_info.rep.level); + htc_batt_info.rep.level = prev_level; + } + } + else if (99 < htc_batt_info.rep.level && prev_level < 100) htc_batt_info.rep.level = 99; else if (prev_level < htc_batt_info.rep.level) { - if(time_since_last_update_ms > - CHG_ONE_PERCENT_LIMIT_PERIOD_MS) - htc_batt_info.rep.level = prev_level + 1; - else - htc_batt_info.rep.level = prev_level; + if(time_since_last_update_ms > + CHG_ONE_PERCENT_LIMIT_PERIOD_MS) + htc_batt_info.rep.level = prev_level + 1; + else + htc_batt_info.rep.level = prev_level; + + if (htc_batt_info.rep.level > 100) + htc_batt_info.rep.level = 100; + } + else { + pr_info("[BATT] pre_level=%d, new_level=%d, " + "level would use raw level!\n", + prev_level, htc_batt_info.rep.level); } } } critical_low_enter = 0; + allow_drop_one_percent_flag = false; } + htc_batt_store_battery_ui_soc(htc_batt_info.rep.level); + + + if (first) + htc_batt_get_battery_ui_soc(&htc_batt_info.rep.level); + if (htc_batt_info.rep.level != prev_level) - pre_jiffies = cur_jiffies; + time_accumulated_level_change = 0; first = 0; } @@ -1799,7 +2034,8 @@ static void power_jacket_level_update(int first) __func__, htc_batt_info.rep.pj_vol, htc_batt_info.rep.pj_level, htc_batt_info.rep.pj_level_pre, is_chg); - if (htc_batt_info.rep.pj_full == PJ_FULL) { + if (htc_batt_info.rep.pj_full) { + htc_batt_info.rep.pj_level = 100; } else if (htc_batt_info.rep.pj_vol < 2400) { @@ -1823,9 +2059,6 @@ static void power_jacket_level_update(int first) if (htc_batt_info.rep.pj_level - htc_batt_info.rep.pj_level_pre > 19) htc_batt_info.rep.pj_level = htc_batt_info.rep.pj_level_pre + 19; - - if (htc_batt_info.rep.pj_full == PJ_FULL) - htc_batt_info.rep.pj_level = 100; } } @@ -1840,7 +2073,9 @@ void power_jacket_info_update(void) { int pj_chg_status = htc_batt_info.rep.pj_chg_status; int err = 0, prev_pj_src = 0; + int pj_full_status; static int first = 1; + static int chg_src_pre; static ktime_t start_ktime; static ktime_t end_ktime; @@ -1848,14 +2083,16 @@ void power_jacket_info_update(void) prev_pj_src = htc_batt_info.rep.pj_src; htc_batt_info.rep.pj_src = latest_pj_src; - htc_batt_info.igauge->is_pj_full(&htc_batt_info.rep.pj_full); + htc_batt_info.igauge->is_pj_full(&pj_full_status); if (htc_batt_info.rep.pj_src) { + end_ktime = ktime_get_real(); if (!prev_pj_src || first) { power_jacket_level_update(1); + htc_batt_info.rep.pj_level_pre = htc_batt_info.rep.pj_level; start_ktime = end_ktime; } @@ -1881,9 +2118,19 @@ void power_jacket_info_update(void) if ((chg_dis_reason & chg_dis_pj_mask) || charger_dis_temp_fault) { pj_chg_status = PJ_CHG_STATUS_OFF; } else { - if (htc_batt_info.rep.pj_full == PJ_FULL) { - pj_chg_status = PJ_CHG_STATUS_OFF; + if (pj_full_status == PJ_FULL) { + htc_batt_info.rep.pj_full = 1; htc_batt_info.rep.pj_level = 100; + } else { + if (!first && htc_batt_info.rep.level_raw == 100) { + if ((chg_src_pre == 0 && htc_batt_info.rep.charging_source > 0) || !prev_pj_src) { + if (htc_batt_info.rep.pj_level > 95) + htc_batt_info.rep.pj_full = 1; + } + } + } + if (pj_full_status == PJ_FULL) { + pj_chg_status = PJ_CHG_STATUS_OFF; } else pj_chg_status = PJ_CHG_STATUS_CHG; } @@ -1892,14 +2139,16 @@ void power_jacket_info_update(void) pj_chg_status = PJ_CHG_STATUS_OFF; else pj_chg_status = PJ_CHG_STATUS_DCHG; + + htc_batt_info.rep.pj_full = 0; } - pr_info("[BATT]%s: pj_vol:%d, pj_full_detect:%d, pj_level:%d\n", - __func__, htc_batt_info.rep.pj_vol, - htc_batt_info.rep.pj_full, htc_batt_info.rep.pj_level); + pr_info("[BATT]%s: pj_vol:%d, pj_full:%d, pj_status:%d, pj_level:%d\n", + __func__, htc_batt_info.rep.pj_vol, htc_batt_info.rep.pj_full, pj_full_status, htc_batt_info.rep.pj_level); } else { htc_batt_info.rep.pj_vol = 0; htc_batt_info.rep.pj_level = 0; + htc_batt_info.rep.pj_full = 0; pj_chg_status = PJ_CHG_STATUS_OFF; if (delayed_work_pending(&htc_batt_timer.calculate_pj_level_work)) { wake_unlock(&htc_batt_timer.calculate_pj_level_lock); @@ -1936,6 +2185,7 @@ void power_jacket_info_update(void) } first = 0; + chg_src_pre = htc_batt_info.rep.charging_source; } static void calculate_pj_level_worker(struct work_struct *work) @@ -2197,6 +2447,9 @@ static void batt_worker(struct work_struct *work) && htc_batt_info.igauge->calculate_pj_level) power_jacket_info_update(); + + if(qb_mode_enter) + batt_qb_mode_pwr_consumption_check(time_since_last_update_ms); if (htc_batt_info.icharger) { @@ -2602,6 +2855,10 @@ static int htc_battery_probe(struct platform_device *pdev) htc_batt_context_event_handler; htc_battery_core_ptr->func_notify_pnpmgr_charging_enabled = pdata->notify_pnpmgr_charging_enabled; + htc_battery_core_ptr->func_trigger_store_battery_data = + htc_batt_trigger_store_battery_data; + htc_battery_core_ptr->func_qb_mode_shutdown_status = + htc_batt_qb_mode_shutdown_status; htc_battery_core_register(&pdev->dev, htc_battery_core_ptr); @@ -2627,6 +2884,7 @@ static int htc_battery_probe(struct platform_device *pdev) htc_batt_info.overload_vol_thr_mv = pdata->overload_vol_thr_mv; htc_batt_info.overload_curr_thr_ma = pdata->overload_curr_thr_ma; htc_batt_info.smooth_chg_full_delay_min = pdata->smooth_chg_full_delay_min; + htc_batt_info.decreased_batt_level_check = pdata->decreased_batt_level_check; chg_limit_active_mask = pdata->chg_limit_active_mask; #ifdef CONFIG_DUTY_CYCLE_LIMIT chg_limit_timer_sub_mask = pdata->chg_limit_timer_sub_mask; diff --git a/arch/arm/mach-msm/htc_battery_core.c b/arch/arm/mach-msm/htc_battery_core.c index fde11f1c..a08696ab 100644 --- a/arch/arm/mach-msm/htc_battery_core.c +++ b/arch/arm/mach-msm/htc_battery_core.c @@ -547,6 +547,58 @@ static ssize_t htc_battery_set_disable_limit_chg(struct device *dev, return count; } +static ssize_t htc_battery_trigger_store_battery_data(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int rc = 0; + unsigned long trigger_flag = 0; + + rc = strict_strtoul(buf, 10, &trigger_flag); + if (rc) + return rc; + + BATT_LOG("Set context trigger_flag = %lu", trigger_flag); + + if((trigger_flag != 0) && (trigger_flag != 1)) + return -EINVAL; + + if (!battery_core_info.func.func_trigger_store_battery_data) { + BATT_ERR("No set trigger store battery data function!"); + return -ENOENT; + } + + battery_core_info.func.func_trigger_store_battery_data(trigger_flag); + + return count; +} + +static ssize_t htc_battery_qb_mode_shutdown_status(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int rc = 0; + unsigned long trigger_flag = 0; + + rc = strict_strtoul(buf, 10, &trigger_flag); + if (rc) + return rc; + + BATT_LOG("Set context trigger_flag = %lu", trigger_flag); + + if((trigger_flag != 0) && (trigger_flag != 1)) + return -EINVAL; + + if (!battery_core_info.func.func_qb_mode_shutdown_status) { + BATT_ERR("No set trigger qb mode shutdown status function!"); + return -ENOENT; + } + + battery_core_info.func.func_qb_mode_shutdown_status(trigger_flag); + + return count; +} + static struct device_attribute htc_battery_attrs[] = { HTC_BATTERY_ATTR(batt_id), HTC_BATTERY_ATTR(batt_vol), @@ -590,6 +642,10 @@ static struct device_attribute htc_set_delta_attrs[] = { htc_battery_set_context_event), __ATTR(disable_limit_chg, S_IWUSR | S_IWGRP, NULL, htc_battery_set_disable_limit_chg), + __ATTR(store_battery_data, S_IWUSR | S_IWGRP, NULL, + htc_battery_trigger_store_battery_data), + __ATTR(qb_mode_shutdown, S_IWUSR | S_IWGRP, NULL, + htc_battery_qb_mode_shutdown_status), }; static struct device_attribute htc_battery_rt_attrs[] = { @@ -598,6 +654,7 @@ static struct device_attribute htc_battery_rt_attrs[] = { __ATTR(batt_temp_now, S_IRUGO, htc_battery_rt_attr_show, NULL), __ATTR(pj_exist_now, S_IRUGO, htc_battery_rt_attr_show, NULL), __ATTR(pj_vol_now, S_IRUGO, htc_battery_rt_attr_show, NULL), + __ATTR(voltage_now, S_IRUGO, htc_battery_rt_attr_show, NULL), }; @@ -791,7 +848,7 @@ static ssize_t htc_battery_show_property(struct device *dev, case PJ_STATUS: if (battery_core_info.rep.pj_src) { - if (battery_core_info.rep.pj_full == 3) { + if (battery_core_info.rep.pj_full) { if ((battery_core_info.rep.pj_level - battery_core_info.rep.pj_level_pre) >= 19) BATT_LOG("level diff over 19, level:%d, pre_level:%d\n", battery_core_info.rep.pj_level, battery_core_info.rep.pj_level_pre); @@ -1157,6 +1214,12 @@ int htc_battery_core_register(struct device *dev, if (htc_battery->func_notify_pnpmgr_charging_enabled) battery_core_info.func.func_notify_pnpmgr_charging_enabled = htc_battery->func_notify_pnpmgr_charging_enabled; + if (htc_battery->func_trigger_store_battery_data) + battery_core_info.func.func_trigger_store_battery_data = + htc_battery->func_trigger_store_battery_data; + if (htc_battery->func_qb_mode_shutdown_status) + battery_core_info.func.func_qb_mode_shutdown_status = + htc_battery->func_qb_mode_shutdown_status; for (i = 0; i < ARRAY_SIZE(htc_power_supplies); i++) { diff --git a/arch/arm/mach-msm/htc_restart_handler.c b/arch/arm/mach-msm/htc_restart_handler.c index 7227623b..9d8c977b 100644 --- a/arch/arm/mach-msm/htc_restart_handler.c +++ b/arch/arm/mach-msm/htc_restart_handler.c @@ -54,9 +54,9 @@ int read_backup_cc_uv(void) { pr_info("%s: cc_backup_uv=%d, magic=%x\n", __func__, reboot_params->cc_backup_uv, reboot_params->batt_magic); - if((reboot_params->batt_magic & 0xFFFFFF00) + if((reboot_params->batt_magic & MAGIC_NUM_FOR_BATT_SAVE) == MAGIC_NUM_FOR_BATT_SAVE) { - if ((reboot_params->batt_magic & 0xFF) <= BATT_SAVE_MASK) { + if ((reboot_params->batt_magic & BATT_SAVE_MASK) <= BATT_SAVE_MASK) { if ((reboot_params->batt_magic & HTC_BATT_SAVE_CC) == HTC_BATT_SAVE_CC) return reboot_params->cc_backup_uv; @@ -68,10 +68,9 @@ EXPORT_SYMBOL(read_backup_cc_uv); void write_backup_cc_uv(int cc_reading) { - pr_info("%s: ori cc_backup_uv= %d, cc_reading=%d, magic_num=%x\n", - __func__, reboot_params->cc_backup_uv, cc_reading, - reboot_params->batt_magic); - if ((reboot_params->batt_magic & ~BATT_SAVE_MASK) + pr_info("%s: ori cc_backup_uv= %d, cc_reading=%d\n", __func__, + reboot_params->cc_backup_uv, cc_reading); + if ((reboot_params->batt_magic & MAGIC_NUM_FOR_BATT_SAVE) != MAGIC_NUM_FOR_BATT_SAVE) reboot_params->batt_magic = MAGIC_NUM_FOR_BATT_SAVE; reboot_params->batt_magic |= HTC_BATT_SAVE_CC; @@ -85,9 +84,9 @@ uint16_t read_backup_ocv_at_100(void) { pr_info("%s: ocv_at_100=%x, magic=%x\n", __func__, reboot_params->ocv_reading_at_100, reboot_params->batt_magic); - if((reboot_params->batt_magic & 0xFFFFFF00) + if((reboot_params->batt_magic & MAGIC_NUM_FOR_BATT_SAVE) == MAGIC_NUM_FOR_BATT_SAVE) { - if ((reboot_params->batt_magic & 0xFF) <= BATT_SAVE_MASK) { + if ((reboot_params->batt_magic & BATT_SAVE_MASK) <= BATT_SAVE_MASK) { if ((reboot_params->batt_magic & HTC_BATT_SAVE_OCV_RAW) == HTC_BATT_SAVE_OCV_RAW) return reboot_params->ocv_reading_at_100; @@ -99,10 +98,9 @@ EXPORT_SYMBOL(read_backup_ocv_at_100); void write_backup_ocv_at_100(uint16_t ocv_reading) { - pr_info("%s: ori ocv_at_100=%x, ocv_reading=%x, magic_num=%x\n", - __func__, reboot_params->ocv_reading_at_100, ocv_reading, - reboot_params->batt_magic); - if((reboot_params->batt_magic & ~BATT_SAVE_MASK) + pr_info("%s: ori ocv_at_100=%x, ocv_reading=%x\n", __func__, + reboot_params->ocv_reading_at_100, ocv_reading); + if((reboot_params->batt_magic & MAGIC_NUM_FOR_BATT_SAVE) != MAGIC_NUM_FOR_BATT_SAVE) reboot_params->batt_magic = MAGIC_NUM_FOR_BATT_SAVE; reboot_params->batt_magic |= HTC_BATT_SAVE_OCV_RAW; @@ -116,9 +114,9 @@ int read_backup_ocv_uv(void) { pr_info("%s: ocv_backup_uv=%d, magic=%x\n", __func__, reboot_params->ocv_backup_uv, reboot_params->batt_magic); - if((reboot_params->batt_magic & 0xFFFFFF00) + if((reboot_params->batt_magic & MAGIC_NUM_FOR_BATT_SAVE) == MAGIC_NUM_FOR_BATT_SAVE) { - if ((reboot_params->batt_magic & 0xFF) <= BATT_SAVE_MASK) { + if ((reboot_params->batt_magic & BATT_SAVE_MASK) <= BATT_SAVE_MASK) { if ((reboot_params->batt_magic & HTC_BATT_SAVE_OCV_UV) == HTC_BATT_SAVE_OCV_UV) return reboot_params->ocv_backup_uv; @@ -130,10 +128,9 @@ EXPORT_SYMBOL(read_backup_ocv_uv); void write_backup_ocv_uv(int ocv_backup) { - pr_info("%s: ori ocv_backup_uv=%d, ocv_backup=%d, magic_num=%x\n", - __func__, reboot_params->ocv_backup_uv, ocv_backup, - reboot_params->batt_magic); - if((reboot_params->batt_magic & ~BATT_SAVE_MASK) + pr_info("%s: ori ocv_backup_uv=%d, ocv_backup=%d\n", __func__, + reboot_params->ocv_backup_uv, ocv_backup); + if((reboot_params->batt_magic & MAGIC_NUM_FOR_BATT_SAVE) != MAGIC_NUM_FOR_BATT_SAVE) reboot_params->batt_magic = MAGIC_NUM_FOR_BATT_SAVE; reboot_params->batt_magic |= HTC_BATT_SAVE_OCV_UV; diff --git a/arch/arm/mach-msm/include/mach/camera.h b/arch/arm/mach-msm/include/mach/camera.h index 16834ff1..45ab40a0 100644 --- a/arch/arm/mach-msm/include/mach/camera.h +++ b/arch/arm/mach-msm/include/mach/camera.h @@ -25,7 +25,7 @@ #include #include #include -#include +#include #define CONFIG_MSM_CAMERA_DEBUG #ifdef CONFIG_MSM_CAMERA_DEBUG diff --git a/arch/arm/mach-msm/include/mach/htc_battery_8960.h b/arch/arm/mach-msm/include/mach/htc_battery_8960.h index 6e4a8681..7e3d9dad 100644 --- a/arch/arm/mach-msm/include/mach/htc_battery_8960.h +++ b/arch/arm/mach-msm/include/mach/htc_battery_8960.h @@ -121,6 +121,7 @@ struct htc_battery_platform_data { int overload_vol_thr_mv; int overload_curr_thr_ma; int smooth_chg_full_delay_min; + int decreased_batt_level_check; struct htc_gauge igauge; struct htc_charger icharger; int (*get_thermal_sensor_temp)(int sensor_num, unsigned long *temp); diff --git a/arch/arm/mach-msm/include/mach/htc_battery_core.h b/arch/arm/mach-msm/include/mach/htc_battery_core.h index 5f630b8a..0a422d36 100644 --- a/arch/arm/mach-msm/include/mach/htc_battery_core.h +++ b/arch/arm/mach-msm/include/mach/htc_battery_core.h @@ -63,6 +63,7 @@ enum htc_batt_rt_attr { HTC_BATT_RT_TEMPERATURE, HTC_PJ_RT_EXISTS, HTC_PJ_RT_VOLTAGE, + HTC_BATT_RT_VOLTAGE_UV, }; struct battery_info_reply { @@ -102,6 +103,8 @@ struct htc_battery_core { void (*func_set_full_level_dis_batt_chg)(int full_level_dis_batt_chg); int (*func_set_max_input_current)(int target_ma); int (*func_notify_pnpmgr_charging_enabled)(int charging_enabled); + void (*func_trigger_store_battery_data)(int trigger_flag); + void (*func_qb_mode_shutdown_status)(int trigger_flag); }; #ifdef CONFIG_HTC_BATT_CORE extern int htc_battery_core_update_changed(void); diff --git a/arch/arm/mach-msm/include/mach/htc_gauge.h b/arch/arm/mach-msm/include/mach/htc_gauge.h index 9bf546b7..b13032da 100644 --- a/arch/arm/mach-msm/include/mach/htc_gauge.h +++ b/arch/arm/mach-msm/include/mach/htc_gauge.h @@ -23,6 +23,8 @@ enum htc_gauge_event { HTC_GAUGE_EVENT_OVERLOAD, HTC_GAUGE_EVENT_EOC_STOP_CHG, HTC_GAUGE_EVENT_PJ_FULL, + HTC_GAUGE_EVENT_QB_MODE_ENTER, + HTC_GAUGE_EVENT_QB_MODE_DO_REAL_POWEROFF, }; struct htc_gauge { @@ -34,6 +36,9 @@ struct htc_gauge { int (*get_battery_id)(int *result); int (*get_battery_soc)(int *result); int (*get_battery_cc)(int *result); + int (*store_battery_data)(void); + int (*store_battery_ui_soc)(int soc_ui); + int (*get_battery_ui_soc)(void); int (*is_battery_temp_fault)(int *result); int (*is_battery_full)(int *result); int (*is_pj_full)(int *result); @@ -48,9 +53,12 @@ struct htc_gauge { int (*enable_lower_voltage_alarm)(int enable); int (*set_lower_voltage_alarm_threshold)(int thres_mV); int (*set_chg_ovp)(int is_ovp); + int (*enter_qb_mode)(void); + int (*exit_qb_mode)(void); + int (*qb_mode_pwr_consumption_check)(unsigned long time_stamp); }; int htc_gauge_event_notify(enum htc_gauge_event); int htc_gauge_get_battery_voltage(int *result); int htc_gauge_set_chg_ovp(int is_ovp); -#endif +#endif \ No newline at end of file diff --git a/arch/arm/mach-msm/include/mach/htc_usb.h b/arch/arm/mach-msm/include/mach/htc_usb.h index 21e4f23d..e31b3878 100644 --- a/arch/arm/mach-msm/include/mach/htc_usb.h +++ b/arch/arm/mach-msm/include/mach/htc_usb.h @@ -1113,7 +1113,7 @@ static struct android_usb_product usb_products[] = { .functions = usb_functions_adb, }, { - .product_id = 0x0f91, + .product_id = 0x0f25, .num_functions = ARRAY_SIZE(usb_functions_mtp_ums), .functions = usb_functions_mtp_ums, }, diff --git a/arch/arm/mach-msm/include/mach/tfa9887.h b/arch/arm/mach-msm/include/mach/tfa9887.h index 0d3952a4..5ec9da4f 100644 --- a/arch/arm/mach-msm/include/mach/tfa9887.h +++ b/arch/arm/mach-msm/include/mach/tfa9887.h @@ -13,6 +13,9 @@ struct tfa9887_platform_data { }; +extern int tfa9887l_step; +extern int tfa9887l_step_en; + void set_tfa9887_spkamp(int en, int dsp_mode); void set_tfa9887l_spkamp(int en, int dsp_mode); int tfa9887_l_write(char *txData, int length); diff --git a/arch/arm/mach-msm/mdm_common.c b/arch/arm/mach-msm/mdm_common.c index 2f5b25a3..35cac2d1 100644 --- a/arch/arm/mach-msm/mdm_common.c +++ b/arch/arm/mach-msm/mdm_common.c @@ -173,7 +173,7 @@ static ssize_t modem_silent_reset_info_store(struct device *dev, snprintf(msr_info_list[mdm_msr_index].modem_errmsg, RD_BUF_SIZE, "%s", buf); len = strlen(msr_info_list[mdm_msr_index].modem_errmsg); - if ( len == 0) + if ( len == 0 ) { spin_unlock_irqrestore(&msr_info_lock, flags); return count; diff --git a/arch/arm/mach-msm/pil-riva.c b/arch/arm/mach-msm/pil-riva.c index 77716663..69b9b90f 100644 --- a/arch/arm/mach-msm/pil-riva.c +++ b/arch/arm/mach-msm/pil-riva.c @@ -378,8 +378,6 @@ static int pil_riva_reset(struct pil_desc *pil) static int pil_riva_shutdown(struct pil_desc *pil) { - printk("[WLAN] pil_riva_shutdown"); - writel_relaxed(1, RIVA_RESET); mb(); diff --git a/arch/arm/mach-msm/qdsp6v2/audio_acdb.c b/arch/arm/mach-msm/qdsp6v2/audio_acdb.c index 54865883..b27364ae 100644 --- a/arch/arm/mach-msm/qdsp6v2/audio_acdb.c +++ b/arch/arm/mach-msm/qdsp6v2/audio_acdb.c @@ -668,7 +668,7 @@ static int register_memory(void) } kvptr = ion_map_kernel(acdb_data.ion_client, - acdb_data.ion_handle, 0); + acdb_data.ion_handle); if (IS_ERR_OR_NULL(kvptr)) { pr_err("%s: Could not get kernel virt addr!!!\n", __func__); result = PTR_ERR(kvptr); diff --git a/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c b/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c index d3a5b7a6..0c933236 100644 --- a/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c +++ b/arch/arm/mach-msm/qdsp6v2/audio_utils_aio.c @@ -768,7 +768,7 @@ static int audio_aio_ion_add(struct q6audio_aio *audio, goto flag_error; } - temp_ptr = ion_map_kernel(audio->client, handle, ionflag); + temp_ptr = ion_map_kernel(audio->client, handle); if (IS_ERR_OR_NULL(temp_ptr)) { pr_err("%s: could not get virtual address\n", __func__); goto map_error; diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 68c02296..3c61c72e 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -21,6 +21,8 @@ #include #include #include +#include +#include #include #include @@ -31,11 +33,85 @@ #include #include #include +#include #include #include "mm.h" +static void __dma_page_cpu_to_dev(struct page *, unsigned long, + size_t, enum dma_data_direction); +static void __dma_page_dev_to_cpu(struct page *, unsigned long, + size_t, enum dma_data_direction); + +/** + * arm_dma_map_page - map a portion of a page for streaming DMA + * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices + * @page: page that buffer resides in + * @offset: offset into page for start of buffer + * @size: size of buffer to map + * @dir: DMA transfer direction + * + * Ensure that any data held in the cache is appropriately discarded + * or written back. + * + * The device owns this memory once this call has completed. The CPU + * can regain ownership by calling dma_unmap_page(). + */ +static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + if (!arch_is_coherent()) + __dma_page_cpu_to_dev(page, offset, size, dir); + return pfn_to_dma(dev, page_to_pfn(page)) + offset; +} + +static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle, + size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + if (!arch_is_coherent()) + __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), + handle & ~PAGE_MASK, size, dir); +} + +static void arm_dma_sync_single_for_cpu(struct device *dev, + dma_addr_t handle, size_t size, enum dma_data_direction dir) +{ + unsigned int offset = handle & (PAGE_SIZE - 1); + struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); + if (!arch_is_coherent()) + __dma_page_dev_to_cpu(page, offset, size, dir); +} + +static void arm_dma_sync_single_for_device(struct device *dev, + dma_addr_t handle, size_t size, enum dma_data_direction dir) +{ + unsigned int offset = handle & (PAGE_SIZE - 1); + struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); + if (!arch_is_coherent()) + __dma_page_cpu_to_dev(page, offset, size, dir); +} + +static int arm_dma_set_mask(struct device *dev, u64 dma_mask); + +struct dma_map_ops arm_dma_ops = { + .alloc = arm_dma_alloc, + .free = arm_dma_free, + .mmap = arm_dma_mmap, + .map_page = arm_dma_map_page, + .unmap_page = arm_dma_unmap_page, + .map_sg = arm_dma_map_sg, + .unmap_sg = arm_dma_unmap_sg, + .sync_single_for_cpu = arm_dma_sync_single_for_cpu, + .sync_single_for_device = arm_dma_sync_single_for_device, + .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, + .sync_sg_for_device = arm_dma_sync_sg_for_device, + .set_dma_mask = arm_dma_set_mask, +}; +EXPORT_SYMBOL(arm_dma_ops); + static u64 get_coherent_dma_mask(struct device *dev) { u64 mask = (u64)arm_dma_limit; @@ -61,11 +137,26 @@ static u64 get_coherent_dma_mask(struct device *dev) static void __dma_clear_buffer(struct page *page, size_t size) { - void *ptr; - ptr = page_address(page); - memset(ptr, 0, size); - dmac_flush_range(ptr, ptr + size); - outer_flush_range(__pa(ptr), __pa(ptr) + size); + if (!PageHighMem(page)) { + void *ptr = page_address(page); + if (ptr) { + memset(ptr, 0, size); + dmac_flush_range(ptr, ptr + size); + outer_flush_range(__pa(ptr), __pa(ptr) + size); + } + } else { + phys_addr_t base = __pfn_to_phys(page_to_pfn(page)); + phys_addr_t end = base + size; + while (size > 0) { + void *ptr = kmap_atomic(page); + memset(ptr, 0, PAGE_SIZE); + dmac_flush_range(ptr, ptr + PAGE_SIZE); + kunmap_atomic(ptr); + page++; + size -= PAGE_SIZE; + } + outer_flush_range(base, end); + } } static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp) @@ -106,7 +197,7 @@ static pte_t **consistent_pte; #define DEFAULT_CONSISTENT_DMA_SIZE (7*SZ_2M) -unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE; +static unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE; void __init init_consistent_dma_size(unsigned long size) { @@ -160,14 +251,14 @@ static int __init consistent_init(void) pud = pud_alloc(&init_mm, pgd, base); if (!pud) { - printk(KERN_ERR "%s: no pud tables\n", __func__); + pr_err("%s: no pud tables\n", __func__); ret = -ENOMEM; break; } pmd = pmd_alloc(&init_mm, pud, base); if (!pmd) { - printk(KERN_ERR "%s: no pmd tables\n", __func__); + pr_err("%s: no pmd tables\n", __func__); ret = -ENOMEM; break; } @@ -175,7 +266,7 @@ static int __init consistent_init(void) pte = pte_alloc_kernel(pmd, base); if (!pte) { - printk(KERN_ERR "%s: no pte tables\n", __func__); + pr_err("%s: no pte tables\n", __func__); ret = -ENOMEM; break; } @@ -189,14 +280,15 @@ static int __init consistent_init(void) core_initcall(consistent_init); static void *__alloc_from_contiguous(struct device *dev, size_t size, - pgprot_t prot, struct page **ret_page); + pgprot_t prot, struct page **ret_page, + bool no_kernel_mapping, const void *caller); static struct arm_vmregion_head coherent_head = { .vm_lock = __SPIN_LOCK_UNLOCKED(&coherent_head.vm_lock), .vm_list = LIST_HEAD_INIT(coherent_head.vm_list), }; -size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8; +static size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8; static int __init early_coherent_pool(char *p) { @@ -215,7 +307,8 @@ static int __init coherent_init(void) if (!IS_ENABLED(CONFIG_CMA)) return 0; - ptr = __alloc_from_contiguous(NULL, size, prot, &page); + ptr = __alloc_from_contiguous(NULL, size, prot, &page, false, + coherent_init); if (ptr) { coherent_head.vm_start = (unsigned long) ptr; coherent_head.vm_end = (unsigned long) ptr + size; @@ -257,7 +350,7 @@ void __init dma_contiguous_remap(void) if (end > arm_lowmem_limit) end = arm_lowmem_limit; if (start >= end) - return; + continue; map.pfn = __phys_to_pfn(start); map.virtual = __phys_to_virt(start); @@ -265,7 +358,7 @@ void __init dma_contiguous_remap(void) map.type = MT_MEMORY_DMA_READY; for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); - addr += PGDIR_SIZE) + addr += PMD_SIZE) pmd_clear(pmd_off_k(addr)); iotable_init(&map, 1); @@ -281,7 +374,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, int bit; if (!consistent_pte) { - printk(KERN_ERR "%s: not initialised\n", __func__); + pr_err("%s: not initialised\n", __func__); dump_stack(); return NULL; } @@ -299,7 +392,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); pte = consistent_pte[idx] + off; - c->vm_pages = page; + c->priv = page; do { BUG_ON(!pte_none(*pte)); @@ -321,7 +414,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, return NULL; } -static void __dma_free_remap(void *cpu_addr, size_t size) +static void __dma_free_remap(void *cpu_addr, size_t size, bool no_warn) { struct arm_vmregion *c; unsigned long addr; @@ -331,14 +424,16 @@ static void __dma_free_remap(void *cpu_addr, size_t size) c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr); if (!c) { - printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n", - __func__, cpu_addr); - dump_stack(); + if (!no_warn) { + pr_err("%s: trying to free invalid coherent area: %p\n", + __func__, cpu_addr); + dump_stack(); + } return; } if ((c->vm_end - c->vm_start) != size) { - printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", + pr_err("%s: freeing wrong coherent size (%ld != %d)\n", __func__, c->vm_end - c->vm_start, size); dump_stack(); size = c->vm_end - c->vm_start; @@ -360,8 +455,8 @@ static void __dma_free_remap(void *cpu_addr, size_t size) } if (pte_none(pte) || !pte_present(pte)) - printk(KERN_CRIT "%s: bad page in kernel page table\n", - __func__); + pr_crit("%s: bad page in kernel page table\n", + __func__); } while (size -= PAGE_SIZE); flush_tlb_kernel_range(c->vm_start, c->vm_end); @@ -379,12 +474,27 @@ static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr, return 0; } -static void __dma_remap(struct page *page, size_t size, pgprot_t prot) +static int __dma_clear_pte(pte_t *pte, pgtable_t token, unsigned long addr, + void *data) +{ + pte_clear(&init_mm, addr, pte); + return 0; +} + +static void __dma_remap(struct page *page, size_t size, pgprot_t prot, + bool no_kernel_map) { unsigned long start = (unsigned long) page_address(page); unsigned end = start + size; + int (*func)(pte_t *pte, pgtable_t token, unsigned long addr, + void *data); - apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot); + if (no_kernel_map) + func = __dma_clear_pte; + else + func = __dma_update_pte; + + apply_to_page_range(&init_mm, start, size, func, &prot); dsb(); flush_tlb_kernel_range(start, end); } @@ -455,31 +565,65 @@ static int __free_from_pool(void *cpu_addr, size_t size) return 1; } +#define NO_KERNEL_MAPPING_DUMMY 0x2222 static void *__alloc_from_contiguous(struct device *dev, size_t size, - pgprot_t prot, struct page **ret_page) + pgprot_t prot, struct page **ret_page, + bool no_kernel_mapping, + const void *caller) { unsigned long order = get_order(size); size_t count = size >> PAGE_SHIFT; struct page *page; + void *ptr; page = dma_alloc_from_contiguous(dev, count, order); if (!page) return NULL; __dma_clear_buffer(page, size); - __dma_remap(page, size, prot); + if (!PageHighMem(page)) { + __dma_remap(page, size, prot, no_kernel_mapping); + ptr = page_address(page); + } else { + if (no_kernel_mapping) { + ptr = (void *)NO_KERNEL_MAPPING_DUMMY; + } else { + ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, + caller); + if (!ptr) { + dma_release_from_contiguous(dev, page, count); + return NULL; + } + } + } *ret_page = page; - return page_address(page); + return ptr; } static void __free_from_contiguous(struct device *dev, struct page *page, - size_t size) + void *cpu_addr, size_t size) { - __dma_remap(page, size, pgprot_kernel); + if (!PageHighMem(page)) + __dma_remap(page, size, pgprot_kernel, false); + else + __dma_free_remap(cpu_addr, size, true); dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); } +static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot) +{ + if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) + prot = pgprot_writecombine(prot); + else if (dma_get_attr(DMA_ATTR_STRONGLY_ORDERED, attrs)) + prot = pgprot_stronglyordered(prot); + + else if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) + prot = pgprot_dmacoherent(prot); + + return prot; +} + #define nommu() 0 #else @@ -488,10 +632,11 @@ static void __free_from_contiguous(struct device *dev, struct page *page, #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL #define __alloc_from_pool(dev, size, ret_page, c) NULL -#define __alloc_from_contiguous(dev, size, prot, ret) NULL +#define __alloc_from_contiguous(dev, size, prot, ret, w) NULL #define __free_from_pool(cpu_addr, size) 0 #define __free_from_contiguous(dev, page, size) do { } while (0) #define __dma_free_remap(cpu_addr, size) do { } while (0) +#define __get_dma_pgprot(attrs, prot) __pgprot(0) #endif @@ -510,7 +655,8 @@ static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp, static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, - gfp_t gfp, pgprot_t prot, const void *caller) + gfp_t gfp, pgprot_t prot, const void *caller, + bool no_kernel_mapping) { u64 mask = get_coherent_dma_mask(dev); struct page *page; @@ -533,7 +679,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp &= ~(__GFP_COMP); - *handle = ~0; + *handle = DMA_ERROR_CODE; size = PAGE_ALIGN(size); if (arch_is_coherent() || nommu()) @@ -543,7 +689,8 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, else if (gfp & GFP_ATOMIC) addr = __alloc_from_pool(dev, size, &page, caller); else - addr = __alloc_from_contiguous(dev, size, prot, &page); + addr = __alloc_from_contiguous(dev, size, prot, &page, + no_kernel_mapping, caller); if (addr) { *handle = pfn_to_dma(dev, page_to_pfn(page)); @@ -553,35 +700,33 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, return addr; } -void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, - gfp_t gfp) +void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, + gfp_t gfp, struct dma_attrs *attrs) { + pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel); void *memory; + bool no_kernel_mapping = dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, + attrs); if (dma_alloc_from_coherent(dev, size, handle, &memory)) return memory; - return __dma_alloc(dev, size, handle, gfp, - pgprot_dmacoherent(pgprot_kernel), - __builtin_return_address(0)); -} -EXPORT_SYMBOL(dma_alloc_coherent); - -void * -dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) -{ - return __dma_alloc(dev, size, handle, gfp, - pgprot_writecombine(pgprot_kernel), - __builtin_return_address(0)); + return __dma_alloc(dev, size, handle, gfp, prot, + __builtin_return_address(0), no_kernel_mapping); } -EXPORT_SYMBOL(dma_alloc_writecombine); -static int dma_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size) +int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + struct dma_attrs *attrs) { int ret = -ENXIO; #ifdef CONFIG_MMU unsigned long pfn = dma_to_pfn(dev, dma_addr); + vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); + + if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) + return ret; + ret = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, vma->vm_end - vma->vm_start, @@ -591,24 +736,8 @@ static int dma_mmap(struct device *dev, struct vm_area_struct *vma, return ret; } -int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size) -{ - vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot); - return dma_mmap(dev, vma, cpu_addr, dma_addr, size); -} -EXPORT_SYMBOL(dma_mmap_coherent); - -int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size) -{ - vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); - return dma_mmap(dev, vma, cpu_addr, dma_addr, size); -} -EXPORT_SYMBOL(dma_mmap_writecombine); - - -void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) +void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t handle, struct dma_attrs *attrs) { struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); @@ -620,56 +749,15 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr if (arch_is_coherent() || nommu()) { __dma_free_buffer(page, size); } else if (!IS_ENABLED(CONFIG_CMA)) { - __dma_free_remap(cpu_addr, size); + __dma_free_remap(cpu_addr, size, false); __dma_free_buffer(page, size); } else { if (__free_from_pool(cpu_addr, size)) return; WARN_ON(irqs_disabled()); - __free_from_contiguous(dev, page, size); + __free_from_contiguous(dev, page, cpu_addr, size); } } -EXPORT_SYMBOL(dma_free_coherent); - -void ___dma_single_cpu_to_dev(const void *kaddr, size_t size, - enum dma_data_direction dir) -{ -#ifdef CONFIG_OUTER_CACHE - unsigned long paddr; - - BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1)); -#endif - - dmac_map_area(kaddr, size, dir); - -#ifdef CONFIG_OUTER_CACHE - paddr = __pa(kaddr); - if (dir == DMA_FROM_DEVICE) { - outer_inv_range(paddr, paddr + size); - } else { - outer_clean_range(paddr, paddr + size); - } -#endif - -} -EXPORT_SYMBOL(___dma_single_cpu_to_dev); - -void ___dma_single_dev_to_cpu(const void *kaddr, size_t size, - enum dma_data_direction dir) -{ -#ifdef CONFIG_OUTER_CACHE - BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1)); - - - - if (dir != DMA_TO_DEVICE) { - unsigned long paddr = __pa(kaddr); - outer_inv_range(paddr, paddr + size); - } -#endif - dmac_unmap_area(kaddr, size, dir); -} -EXPORT_SYMBOL(___dma_single_dev_to_cpu); static void dma_cache_maint_page(struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, @@ -709,7 +797,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, } while (left); } -void ___dma_page_cpu_to_dev(struct page *page, unsigned long off, +static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, size_t size, enum dma_data_direction dir) { unsigned long paddr; @@ -724,9 +812,8 @@ void ___dma_page_cpu_to_dev(struct page *page, unsigned long off, } } -EXPORT_SYMBOL(___dma_page_cpu_to_dev); -void ___dma_page_dev_to_cpu(struct page *page, unsigned long off, +static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, size_t size, enum dma_data_direction dir) { unsigned long paddr = page_to_phys(page) + off; @@ -741,82 +828,66 @@ void ___dma_page_dev_to_cpu(struct page *page, unsigned long off, if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE) set_bit(PG_dcache_clean, &page->flags); } -EXPORT_SYMBOL(___dma_page_dev_to_cpu); -int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir) +int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir, struct dma_attrs *attrs) { + struct dma_map_ops *ops = get_dma_ops(dev); struct scatterlist *s; int i, j; - BUG_ON(!valid_dma_direction(dir)); - for_each_sg(sg, s, nents, i) { - s->dma_address = __dma_map_page(dev, sg_page(s), s->offset, - s->length, dir); +#ifdef CONFIG_NEED_SG_DMA_LENGTH + s->dma_length = s->length; +#endif + s->dma_address = ops->map_page(dev, sg_page(s), s->offset, + s->length, dir, attrs); if (dma_mapping_error(dev, s->dma_address)) goto bad_mapping; } - debug_dma_map_sg(dev, sg, nents, nents, dir); return nents; bad_mapping: for_each_sg(sg, s, i, j) - __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); + ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); return 0; } -EXPORT_SYMBOL(dma_map_sg); -void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir) +void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir, struct dma_attrs *attrs) { + struct dma_map_ops *ops = get_dma_ops(dev); struct scatterlist *s; - int i; - debug_dma_unmap_sg(dev, sg, nents, dir); + int i; for_each_sg(sg, s, nents, i) - __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); + ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); } -EXPORT_SYMBOL(dma_unmap_sg); -void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, +void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { + struct dma_map_ops *ops = get_dma_ops(dev); struct scatterlist *s; int i; - for_each_sg(sg, s, nents, i) { - if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0, - sg_dma_len(s), dir)) - continue; - - __dma_page_dev_to_cpu(sg_page(s), s->offset, - s->length, dir); - } - - debug_dma_sync_sg_for_cpu(dev, sg, nents, dir); + for_each_sg(sg, s, nents, i) + ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length, + dir); } -EXPORT_SYMBOL(dma_sync_sg_for_cpu); -void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, +void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { + struct dma_map_ops *ops = get_dma_ops(dev); struct scatterlist *s; int i; - for_each_sg(sg, s, nents, i) { - if (!dmabounce_sync_for_device(dev, sg_dma_address(s), 0, - sg_dma_len(s), dir)) - continue; - - __dma_page_cpu_to_dev(sg_page(s), s->offset, - s->length, dir); - } - - debug_dma_sync_sg_for_device(dev, sg, nents, dir); + for_each_sg(sg, s, nents, i) + ops->sync_single_for_device(dev, sg_dma_address(s), s->length, + dir); } -EXPORT_SYMBOL(dma_sync_sg_for_device); int dma_supported(struct device *dev, u64 mask) { @@ -826,18 +897,15 @@ int dma_supported(struct device *dev, u64 mask) } EXPORT_SYMBOL(dma_supported); -int dma_set_mask(struct device *dev, u64 dma_mask) +static int arm_dma_set_mask(struct device *dev, u64 dma_mask) { if (!dev->dma_mask || !dma_supported(dev, dma_mask)) return -EIO; -#ifndef CONFIG_DMABOUNCE *dev->dma_mask = dma_mask; -#endif return 0; } -EXPORT_SYMBOL(dma_set_mask); #define PREALLOC_DMA_DEBUG_ENTRIES 4096 @@ -850,3 +918,572 @@ static int __init dma_debug_do_init(void) return 0; } fs_initcall(dma_debug_do_init); + +#ifdef CONFIG_ARM_DMA_USE_IOMMU + + +static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, + size_t size) +{ + unsigned int order = get_order(size); + unsigned int align = 0; + unsigned int count, start; + unsigned long flags; + + count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) + + (1 << mapping->order) - 1) >> mapping->order; + + if (order > mapping->order) + align = (1 << (order - mapping->order)) - 1; + + spin_lock_irqsave(&mapping->lock, flags); + start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0, + count, align); + if (start > mapping->bits) { + spin_unlock_irqrestore(&mapping->lock, flags); + return DMA_ERROR_CODE; + } + + bitmap_set(mapping->bitmap, start, count); + spin_unlock_irqrestore(&mapping->lock, flags); + + return mapping->base + (start << (mapping->order + PAGE_SHIFT)); +} + +static inline void __free_iova(struct dma_iommu_mapping *mapping, + dma_addr_t addr, size_t size) +{ + unsigned int start = (addr - mapping->base) >> + (mapping->order + PAGE_SHIFT); + unsigned int count = ((size >> PAGE_SHIFT) + + (1 << mapping->order) - 1) >> mapping->order; + unsigned long flags; + + spin_lock_irqsave(&mapping->lock, flags); + bitmap_clear(mapping->bitmap, start, count); + spin_unlock_irqrestore(&mapping->lock, flags); +} + +static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t gfp) +{ + struct page **pages; + int count = size >> PAGE_SHIFT; + int array_size = count * sizeof(struct page *); + int i = 0; + + if (array_size <= PAGE_SIZE) + pages = kzalloc(array_size, gfp); + else + pages = vzalloc(array_size); + if (!pages) + return NULL; + + while (count) { + int j, order = __fls(count); + + pages[i] = alloc_pages(gfp | __GFP_NOWARN, order); + while (!pages[i] && order) + pages[i] = alloc_pages(gfp | __GFP_NOWARN, --order); + if (!pages[i]) + goto error; + + if (order) + split_page(pages[i], order); + j = 1 << order; + while (--j) + pages[i + j] = pages[i] + j; + + __dma_clear_buffer(pages[i], PAGE_SIZE << order); + i += 1 << order; + count -= 1 << order; + } + + return pages; +error: + while (--i) + if (pages[i]) + __free_pages(pages[i], 0); + if (array_size < PAGE_SIZE) + kfree(pages); + else + vfree(pages); + return NULL; +} + +static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t size) +{ + int count = size >> PAGE_SHIFT; + int array_size = count * sizeof(struct page *); + int i; + for (i = 0; i < count; i++) + if (pages[i]) + __free_pages(pages[i], 0); + if (array_size < PAGE_SIZE) + kfree(pages); + else + vfree(pages); + return 0; +} + +static void * +__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot) +{ + struct arm_vmregion *c; + size_t align; + size_t count = size >> PAGE_SHIFT; + int bit; + + if (!consistent_pte[0]) { + pr_err("%s: not initialised\n", __func__); + dump_stack(); + return NULL; + } + + bit = fls(size - 1); + if (bit > SECTION_SHIFT) + bit = SECTION_SHIFT; + align = 1 << bit; + + c = arm_vmregion_alloc(&consistent_head, align, size, + gfp & ~(__GFP_DMA | __GFP_HIGHMEM), NULL); + if (c) { + pte_t *pte; + int idx = CONSISTENT_PTE_INDEX(c->vm_start); + int i = 0; + u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); + + pte = consistent_pte[idx] + off; + c->priv = pages; + + do { + BUG_ON(!pte_none(*pte)); + + set_pte_ext(pte, mk_pte(pages[i], prot), 0); + pte++; + off++; + i++; + if (off >= PTRS_PER_PTE) { + off = 0; + pte = consistent_pte[++idx]; + } + } while (i < count); + + dsb(); + + return (void *)c->vm_start; + } + return NULL; +} + +static dma_addr_t +__iommu_create_mapping(struct device *dev, struct page **pages, size_t size) +{ + struct dma_iommu_mapping *mapping = dev->archdata.mapping; + unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; + dma_addr_t dma_addr, iova; + int i, ret = DMA_ERROR_CODE; + + dma_addr = __alloc_iova(mapping, size); + if (dma_addr == DMA_ERROR_CODE) + return dma_addr; + + iova = dma_addr; + for (i = 0; i < count; ) { + unsigned int next_pfn = page_to_pfn(pages[i]) + 1; + phys_addr_t phys = page_to_phys(pages[i]); + unsigned int len, j; + + for (j = i + 1; j < count; j++, next_pfn++) + if (page_to_pfn(pages[j]) != next_pfn) + break; + + len = (j - i) << PAGE_SHIFT; + ret = iommu_map(mapping->domain, iova, phys, len, 0); + if (ret < 0) + goto fail; + iova += len; + i = j; + } + return dma_addr; +fail: + iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); + __free_iova(mapping, dma_addr, size); + return DMA_ERROR_CODE; +} + +static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) +{ + struct dma_iommu_mapping *mapping = dev->archdata.mapping; + + size = PAGE_ALIGN((iova & ~PAGE_MASK) + size); + iova &= PAGE_MASK; + + iommu_unmap(mapping->domain, iova, size); + __free_iova(mapping, iova, size); + return 0; +} + +static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, + dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) +{ + pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel); + struct page **pages; + void *addr = NULL; + + *handle = DMA_ERROR_CODE; + size = PAGE_ALIGN(size); + + pages = __iommu_alloc_buffer(dev, size, gfp); + if (!pages) + return NULL; + + *handle = __iommu_create_mapping(dev, pages, size); + if (*handle == DMA_ERROR_CODE) + goto err_buffer; + + addr = __iommu_alloc_remap(pages, size, gfp, prot); + if (!addr) + goto err_mapping; + + return addr; + +err_mapping: + __iommu_remove_mapping(dev, *handle, size); +err_buffer: + __iommu_free_buffer(dev, pages, size); + return NULL; +} + +static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + struct dma_attrs *attrs) +{ + struct arm_vmregion *c; + + vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); + c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr); + + if (c) { + struct page **pages = c->priv; + + unsigned long uaddr = vma->vm_start; + unsigned long usize = vma->vm_end - vma->vm_start; + int i = 0; + + do { + int ret; + + ret = vm_insert_page(vma, uaddr, pages[i++]); + if (ret) { + pr_err("Remapping memory, error: %d\n", ret); + return ret; + } + + uaddr += PAGE_SIZE; + usize -= PAGE_SIZE; + } while (usize > 0); + } + return 0; +} + +void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t handle, struct dma_attrs *attrs) +{ + struct arm_vmregion *c; + size = PAGE_ALIGN(size); + + c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr); + if (c) { + struct page **pages = c->priv; + __dma_free_remap(cpu_addr, size, false); + __iommu_remove_mapping(dev, handle, size); + __iommu_free_buffer(dev, pages, size); + } +} + +static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, + size_t size, dma_addr_t *handle, + enum dma_data_direction dir) +{ + struct dma_iommu_mapping *mapping = dev->archdata.mapping; + dma_addr_t iova, iova_base; + int ret = 0; + unsigned int count; + struct scatterlist *s; + + size = PAGE_ALIGN(size); + *handle = DMA_ERROR_CODE; + + iova_base = iova = __alloc_iova(mapping, size); + if (iova == DMA_ERROR_CODE) + return -ENOMEM; + + for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { + phys_addr_t phys = page_to_phys(sg_page(s)); + unsigned int len = PAGE_ALIGN(s->offset + s->length); + + if (!arch_is_coherent()) + __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); + + ret = iommu_map(mapping->domain, iova, phys, len, 0); + if (ret < 0) + goto fail; + count += len >> PAGE_SHIFT; + iova += len; + } + *handle = iova_base; + + return 0; +fail: + iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE); + __free_iova(mapping, iova_base, size); + return ret; +} + +int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir, struct dma_attrs *attrs) +{ + struct scatterlist *s = sg, *dma = sg, *start = sg; + int i, count = 0; + unsigned int offset = s->offset; + unsigned int size = s->offset + s->length; + unsigned int max = dma_get_max_seg_size(dev); + + for (i = 1; i < nents; i++) { + s = sg_next(s); + + s->dma_address = DMA_ERROR_CODE; + s->dma_length = 0; + + if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { + if (__map_sg_chunk(dev, start, size, &dma->dma_address, + dir) < 0) + goto bad_mapping; + + dma->dma_address += offset; + dma->dma_length = size - offset; + + size = offset = s->offset; + start = s; + dma = sg_next(dma); + count += 1; + } + size += s->length; + } + if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir) < 0) + goto bad_mapping; + + dma->dma_address += offset; + dma->dma_length = size - offset; + + return count+1; + +bad_mapping: + for_each_sg(sg, s, count, i) + __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s)); + return 0; +} + +void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir, struct dma_attrs *attrs) +{ + struct scatterlist *s; + int i; + + for_each_sg(sg, s, nents, i) { + if (sg_dma_len(s)) + __iommu_remove_mapping(dev, sg_dma_address(s), + sg_dma_len(s)); + if (!arch_is_coherent()) + __dma_page_dev_to_cpu(sg_page(s), s->offset, + s->length, dir); + } +} + +void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir) +{ + struct scatterlist *s; + int i; + + for_each_sg(sg, s, nents, i) + if (!arch_is_coherent()) + __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); + +} + +void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir) +{ + struct scatterlist *s; + int i; + + for_each_sg(sg, s, nents, i) + if (!arch_is_coherent()) + __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); +} + + +static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + struct dma_iommu_mapping *mapping = dev->archdata.mapping; + dma_addr_t dma_addr; + int ret, len = PAGE_ALIGN(size + offset); + + if (!arch_is_coherent()) + __dma_page_cpu_to_dev(page, offset, size, dir); + + dma_addr = __alloc_iova(mapping, len); + if (dma_addr == DMA_ERROR_CODE) + return dma_addr; + + ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, 0); + if (ret < 0) + goto fail; + + return dma_addr + offset; +fail: + __free_iova(mapping, dma_addr, len); + return DMA_ERROR_CODE; +} + +static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, + size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + struct dma_iommu_mapping *mapping = dev->archdata.mapping; + dma_addr_t iova = handle & PAGE_MASK; + struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); + int offset = handle & ~PAGE_MASK; + int len = PAGE_ALIGN(size + offset); + + if (!iova) + return; + + if (!arch_is_coherent()) + __dma_page_dev_to_cpu(page, offset, size, dir); + + iommu_unmap(mapping->domain, iova, len); + __free_iova(mapping, iova, len); +} + +static void arm_iommu_sync_single_for_cpu(struct device *dev, + dma_addr_t handle, size_t size, enum dma_data_direction dir) +{ + struct dma_iommu_mapping *mapping = dev->archdata.mapping; + dma_addr_t iova = handle & PAGE_MASK; + struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); + unsigned int offset = handle & ~PAGE_MASK; + + if (!iova) + return; + + if (!arch_is_coherent()) + __dma_page_dev_to_cpu(page, offset, size, dir); +} + +static void arm_iommu_sync_single_for_device(struct device *dev, + dma_addr_t handle, size_t size, enum dma_data_direction dir) +{ + struct dma_iommu_mapping *mapping = dev->archdata.mapping; + dma_addr_t iova = handle & PAGE_MASK; + struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); + unsigned int offset = handle & ~PAGE_MASK; + + if (!iova) + return; + + __dma_page_cpu_to_dev(page, offset, size, dir); +} + +struct dma_map_ops iommu_ops = { + .alloc = arm_iommu_alloc_attrs, + .free = arm_iommu_free_attrs, + .mmap = arm_iommu_mmap_attrs, + + .map_page = arm_iommu_map_page, + .unmap_page = arm_iommu_unmap_page, + .sync_single_for_cpu = arm_iommu_sync_single_for_cpu, + .sync_single_for_device = arm_iommu_sync_single_for_device, + + .map_sg = arm_iommu_map_sg, + .unmap_sg = arm_iommu_unmap_sg, + .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu, + .sync_sg_for_device = arm_iommu_sync_sg_for_device, +}; + +struct dma_iommu_mapping * +arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size, + int order) +{ + unsigned int count = size >> (PAGE_SHIFT + order); + unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long); + struct dma_iommu_mapping *mapping; + int err = -ENOMEM; + + if (!count) + return ERR_PTR(-EINVAL); + + mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL); + if (!mapping) + goto err; + + mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL); + if (!mapping->bitmap) + goto err2; + + mapping->base = base; + mapping->bits = BITS_PER_BYTE * bitmap_size; + mapping->order = order; + spin_lock_init(&mapping->lock); + + mapping->domain = iommu_domain_alloc(bus); + if (!mapping->domain) + goto err3; + + kref_init(&mapping->kref); + return mapping; +err3: + kfree(mapping->bitmap); +err2: + kfree(mapping); +err: + return ERR_PTR(err); +} + +static void release_iommu_mapping(struct kref *kref) +{ + struct dma_iommu_mapping *mapping = + container_of(kref, struct dma_iommu_mapping, kref); + + iommu_domain_free(mapping->domain); + kfree(mapping->bitmap); + kfree(mapping); +} + +void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping) +{ + if (mapping) + kref_put(&mapping->kref, release_iommu_mapping); +} + +int arm_iommu_attach_device(struct device *dev, + struct dma_iommu_mapping *mapping) +{ + int err; + + err = iommu_attach_device(mapping->domain, dev); + if (err) + return err; + + kref_get(&mapping->kref); + dev->archdata.mapping = mapping; + set_dma_ops(dev, &iommu_ops); + + pr_info("Attached IOMMU controller to %s device.\n", dev_name(dev)); + return 0; +} + +#endif diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 4384b087..26eae7f3 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -256,8 +256,8 @@ void __init setup_dma_zone(struct machine_desc *mdesc) #endif } -#ifdef CONFIG_ARCH_POPULATES_NODE_MAP -static void __init arm_bootmem_free_apnm(unsigned long max_low, +#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP +static void __init arm_bootmem_free_hmnm(unsigned long max_low, unsigned long max_high) { unsigned long max_zone_pfns[MAX_NR_ZONES]; @@ -273,7 +273,7 @@ static void __init arm_bootmem_free_apnm(unsigned long max_low, unsigned long start = memblock_region_memory_base_pfn(reg); unsigned long end = memblock_region_memory_end_pfn(reg); - add_active_range(0, start, end); + memblock_set_node(PFN_PHYS(start), PFN_PHYS(end - start), 0); } free_area_init_nodes(max_zone_pfns); } @@ -497,8 +497,8 @@ void __init bootmem_init(void) */ sparse_init(); -#ifdef CONFIG_ARCH_POPULATES_NODE_MAP - arm_bootmem_free_apnm(max_low, max_high); +#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP + arm_bootmem_free_hmnm(max_low, max_high); #else /* * Now free the memory - free_area_init_node needs diff --git a/arch/arm/mm/vmregion.h b/arch/arm/mm/vmregion.h index 162be662..bf312c35 100644 --- a/arch/arm/mm/vmregion.h +++ b/arch/arm/mm/vmregion.h @@ -17,7 +17,7 @@ struct arm_vmregion { struct list_head vm_list; unsigned long vm_start; unsigned long vm_end; - struct page *vm_pages; + void *priv; int vm_active; const void *caller; }; diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index e62af21a..5ad35d79 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -84,6 +84,7 @@ static void vfp_force_reload(unsigned int cpu, struct thread_info *thread) } #ifdef CONFIG_SMP thread->vfpstate.hard.cpu = NR_CPUS; + vfp_current_hw_state[cpu] = NULL; #endif } diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c index bb0025c5..1833aa8d 100644 --- a/drivers/base/dma-coherent.c +++ b/drivers/base/dma-coherent.c @@ -10,6 +10,7 @@ struct dma_coherent_mem { void *virt_base; dma_addr_t device_base; + phys_addr_t pfn_base; int size; int flags; unsigned long *bitmap; @@ -44,6 +45,7 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, dev->dma_mem->virt_base = mem_base; dev->dma_mem->device_base = device_addr; + dev->dma_mem->pfn_base = PFN_DOWN(bus_addr); dev->dma_mem->size = pages; dev->dma_mem->flags = flags; @@ -176,3 +178,28 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr) return 0; } EXPORT_SYMBOL(dma_release_from_coherent); + +int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, + void *vaddr, size_t size, int *ret) +{ + struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; + + if (mem && vaddr >= mem->virt_base && vaddr + size <= + (mem->virt_base + (mem->size << PAGE_SHIFT))) { + unsigned long off = vma->vm_pgoff; + int start = (vaddr - mem->virt_base) >> PAGE_SHIFT; + int user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + int count = size >> PAGE_SHIFT; + + *ret = -ENXIO; + if (off < count && user_count <= count - off) { + unsigned pfn = mem->pfn_base + start + off; + *ret = remap_pfn_range(vma, vma->vm_start, pfn, + user_count << PAGE_SHIFT, + vma->vm_page_prot); + } + return 1; + } + return 0; +} +EXPORT_SYMBOL(dma_mmap_from_coherent); diff --git a/drivers/char/msm_rotator.c b/drivers/char/msm_rotator.c index 48c2c973..f829203e 100644 --- a/drivers/char/msm_rotator.c +++ b/drivers/char/msm_rotator.c @@ -29,6 +29,9 @@ #include #include #include +#include +#include + #ifdef CONFIG_MSM_BUS_SCALING #include #include @@ -89,11 +92,20 @@ #define INVALID_SESSION -1 #define VERSION_KEY_MASK 0xFFFFFF00 #define MAX_DOWNSCALE_RATIO 3 +#define MAX_COMMIT_QUEUE 4 +#define WAIT_ROT_TIMEOUT 1000 + +#define MAX_TIMELINE_NAME_LEN 16 +#define WAIT_FENCE_FIRST_TIMEOUT MSEC_PER_SEC +#define WAIT_FENCE_FINAL_TIMEOUT (10 * MSEC_PER_SEC) #define ROTATOR_REVISION_V0 0 #define ROTATOR_REVISION_V1 1 #define ROTATOR_REVISION_V2 2 #define ROTATOR_REVISION_NONE 0xffffffff +#define BASE_ADDR(height, y_stride) ((height % 64) * y_stride) +#define HW_BASE_ADDR(height, y_stride) (((dstp0_ystride >> 5) << 11) - \ + ((dst_height & 0x3f) * dstp0_ystride)) uint32_t rotator_hw_revision; static char rot_iommu_split_domain; @@ -120,12 +132,52 @@ struct msm_rotator_fd_info { struct list_head list; }; +struct rot_sync_info { + u32 initialized; + struct sync_fence *acq_fen; + struct sync_fence *rel_fen; + int rel_fen_fd; + struct sw_sync_timeline *timeline; + int timeline_value; + struct mutex sync_mutex; + atomic_t queue_buf_cnt; +}; + +struct msm_rotator_session { + struct msm_rotator_img_info img_info; + struct msm_rotator_fd_info fd_info; + int fast_yuv_enable; +}; + +struct msm_rotator_commit_info { + struct msm_rotator_data_info data_info; + struct msm_rotator_img_info img_info; + unsigned int format; + unsigned int in_paddr; + unsigned int out_paddr; + unsigned int in_chroma_paddr; + unsigned int out_chroma_paddr; + unsigned int in_chroma2_paddr; + unsigned int out_chroma2_paddr; + struct file *srcp0_file; + struct file *srcp1_file; + struct file *dstp0_file; + struct file *dstp1_file; + struct ion_handle *srcp0_ihdl; + struct ion_handle *srcp1_ihdl; + struct ion_handle *dstp0_ihdl; + struct ion_handle *dstp1_ihdl; + int ps0_need; + int session_index; + struct sync_fence *acq_fen; + int fast_yuv_en; +}; + struct msm_rotator_dev { void __iomem *io_base; int irq; - struct msm_rotator_img_info *img_info[MAX_SESSIONS]; struct clk *core_clk; - struct msm_rotator_fd_info *fd_info[MAX_SESSIONS]; + struct msm_rotator_session *rot_session[MAX_SESSIONS]; struct list_head fd_list; struct clk *pclk; int rot_clk_state; @@ -149,14 +201,26 @@ struct msm_rotator_dev { #ifdef CONFIG_MSM_BUS_SCALING uint32_t bus_client_handle; #endif + struct rot_sync_info sync_info[MAX_SESSIONS]; + + struct mutex commit_mutex; + struct mutex commit_wq_mutex; + struct completion commit_comp; + u32 commit_running; + struct work_struct commit_work; + struct msm_rotator_commit_info commit_info[MAX_COMMIT_QUEUE]; + atomic_t commit_q_r; + atomic_t commit_q_w; + atomic_t commit_q_cnt; }; #define COMPONENT_5BITS 1 #define COMPONENT_6BITS 2 #define COMPONENT_8BITS 3 - static struct msm_rotator_dev *msm_rotator_dev; +#define mrd msm_rotator_dev +static void rot_wait_for_commit_queue(u32 is_all); enum { CLK_EN, @@ -235,7 +299,7 @@ int msm_rotator_imem_allocate(int requestor) rc = 1; #endif if (rc == 1) { - cancel_delayed_work(&msm_rotator_dev->imem_clk_work); + cancel_delayed_work_sync(&msm_rotator_dev->imem_clk_work); if (msm_rotator_dev->imem_clk_state != CLK_EN && msm_rotator_dev->imem_clk) { clk_prepare_enable(msm_rotator_dev->imem_clk); @@ -319,6 +383,175 @@ static irqreturn_t msm_rotator_isr(int irq, void *dev_id) return IRQ_HANDLED; } +static void msm_rotator_signal_timeline(u32 session_index) +{ + struct rot_sync_info *sync_info; + sync_info = &msm_rotator_dev->sync_info[session_index]; + + if ((!sync_info->timeline) || (!sync_info->initialized)) + return; + + mutex_lock(&sync_info->sync_mutex); + sw_sync_timeline_inc(sync_info->timeline, 1); + sync_info->timeline_value++; + mutex_unlock(&sync_info->sync_mutex); +} + +static void msm_rotator_signal_timeline_done(u32 session_index) +{ + struct rot_sync_info *sync_info; + sync_info = &msm_rotator_dev->sync_info[session_index]; + + if ((sync_info->timeline == NULL) || + (sync_info->initialized == false)) + return; + mutex_lock(&sync_info->sync_mutex); + sw_sync_timeline_inc(sync_info->timeline, 1); + sync_info->timeline_value++; + if (atomic_read(&sync_info->queue_buf_cnt) <= 0) + pr_err("%s queue_buf_cnt=%d", __func__, + atomic_read(&sync_info->queue_buf_cnt)); + else + atomic_dec(&sync_info->queue_buf_cnt); + mutex_unlock(&sync_info->sync_mutex); +} + +static void msm_rotator_release_acq_fence(u32 session_index) +{ + struct rot_sync_info *sync_info; + sync_info = &msm_rotator_dev->sync_info[session_index]; + + if ((!sync_info->timeline) || (!sync_info->initialized)) + return; + mutex_lock(&sync_info->sync_mutex); + sync_info->acq_fen = NULL; + mutex_unlock(&sync_info->sync_mutex); +} + +static void msm_rotator_release_all_timeline(void) +{ + int i; + struct rot_sync_info *sync_info; + for (i = 0; i < MAX_SESSIONS; i++) { + sync_info = &msm_rotator_dev->sync_info[i]; + if (sync_info->initialized) { + msm_rotator_signal_timeline(i); + msm_rotator_release_acq_fence(i); + } + } +} + +static void msm_rotator_wait_for_fence(struct sync_fence *acq_fen) +{ + int ret; + if (acq_fen) { + ret = sync_fence_wait(acq_fen, + WAIT_FENCE_FIRST_TIMEOUT); + if (ret == -ETIME) { + pr_warn("%s: timeout, wait %ld more ms\n", + __func__, WAIT_FENCE_FINAL_TIMEOUT); + ret = sync_fence_wait(acq_fen, + WAIT_FENCE_FINAL_TIMEOUT); + } + if (ret < 0) { + pr_err("%s: sync_fence_wait failed! ret = %x\n", + __func__, ret); + } + sync_fence_put(acq_fen); + } +} + +static int msm_rotator_buf_sync(unsigned long arg) +{ + struct msm_rotator_buf_sync buf_sync; + int ret = 0; + struct sync_fence *fence = NULL; + struct rot_sync_info *sync_info; + struct sync_pt *rel_sync_pt; + struct sync_fence *rel_fence; + int rel_fen_fd; + u32 s; + + if (copy_from_user(&buf_sync, (void __user *)arg, sizeof(buf_sync))) + return -EFAULT; + + rot_wait_for_commit_queue(false); + for (s = 0; s < MAX_SESSIONS; s++) + if ((msm_rotator_dev->rot_session[s] != NULL) && + (buf_sync.session_id == + (unsigned int)msm_rotator_dev->rot_session[s] + )) + break; + + if (s == MAX_SESSIONS) { + pr_err("%s invalid session id %d", __func__, + buf_sync.session_id); + return -EINVAL; + } + + sync_info = &msm_rotator_dev->sync_info[s]; + + if (sync_info->acq_fen) + pr_err("%s previous acq_fen will be overwritten", __func__); + + if ((sync_info->timeline == NULL) || + (sync_info->initialized == false)) + return -EINVAL; + + mutex_lock(&sync_info->sync_mutex); + if (buf_sync.acq_fen_fd >= 0) + fence = sync_fence_fdget(buf_sync.acq_fen_fd); + + sync_info->acq_fen = fence; + + if (sync_info->acq_fen && + (buf_sync.flags & MDP_BUF_SYNC_FLAG_WAIT)) { + msm_rotator_wait_for_fence(sync_info->acq_fen); + sync_info->acq_fen = NULL; + } + + rel_sync_pt = sw_sync_pt_create(sync_info->timeline, + sync_info->timeline_value + + atomic_read(&sync_info->queue_buf_cnt) + 1); + if (rel_sync_pt == NULL) { + pr_err("%s: cannot create sync point", __func__); + ret = -ENOMEM; + goto buf_sync_err_1; + } + + rel_fence = sync_fence_create("msm_rotator-fence", + rel_sync_pt); + if (rel_fence == NULL) { + sync_pt_free(rel_sync_pt); + pr_err("%s: cannot create fence", __func__); + ret = -ENOMEM; + goto buf_sync_err_1; + } + + rel_fen_fd = get_unused_fd_flags(0); + if (rel_fen_fd < 0) { + pr_err("%s: get_unused_fd_flags failed", __func__); + ret = -EIO; + goto buf_sync_err_2; + } + sync_fence_install(rel_fence, rel_fen_fd); + buf_sync.rel_fen_fd = rel_fen_fd; + sync_info->rel_fen = rel_fence; + sync_info->rel_fen_fd = rel_fen_fd; + + ret = copy_to_user((void __user *)arg, &buf_sync, sizeof(buf_sync)); + mutex_unlock(&sync_info->sync_mutex); + return ret; +buf_sync_err_2: + sync_fence_put(rel_fence); +buf_sync_err_1: + if (sync_info->acq_fen) + sync_fence_put(sync_info->acq_fen); + sync_info->acq_fen = NULL; + mutex_unlock(&sync_info->sync_mutex); + return ret; +} + static unsigned int tile_size(unsigned int src_width, unsigned int src_height, const struct tile_parm *tp) @@ -447,6 +680,61 @@ static int msm_rotator_get_plane_sizes(uint32_t format, uint32_t w, uint32_t h, return 0; } + +uint32_t fast_yuv_invalid_size_checker(unsigned char rot_mode, + uint32_t src_width, + uint32_t dst_width, + uint32_t dst_height, + uint32_t dstp0_ystride, + uint32_t is_planar420) +{ + uint32_t hw_limit; + + hw_limit = is_planar420 ? 512 : 256; + + + if ((src_width > hw_limit) && ((src_width % (hw_limit / 2)) == 8)) + return -EINVAL; + + if (rot_mode & MDP_ROT_90) { + + if (((dst_width % 8) != 0) || ((dst_height % 8) != 0)) + return -EINVAL; + + if ((rot_mode & MDP_FLIP_UD) || + (rot_mode & (MDP_FLIP_UD | MDP_FLIP_LR))) { + + if (((dst_height % 64) != 0) && + ((dst_height / 64) >= 4)) { + + if (BASE_ADDR(dst_height, dstp0_ystride) != + HW_BASE_ADDR(dst_height, dstp0_ystride)) + return -EINVAL; + } + + if (is_planar420) { + dst_width = dst_width / 2; + dstp0_ystride = dstp0_ystride / 2; + } + + dst_height = dst_height / 2; + + if (((dst_height % 64) != 0) && ((dst_height / 64) >= + (hw_limit / 128))) { + + if (BASE_ADDR(dst_height, dstp0_ystride) != + HW_BASE_ADDR(dst_height, dstp0_ystride)) + return -EINVAL; + } + } + } else { + if (((dst_width % 8) != 0) || ((dst_height % 2) != 0)) + return -EINVAL; + } + + return 0; +} + static int msm_rotator_ycxcx_h2v1(struct msm_rotator_img_info *info, unsigned int in_paddr, unsigned int out_paddr, @@ -545,7 +833,9 @@ static int msm_rotator_ycxcx_h2v2(struct msm_rotator_img_info *info, int new_session, unsigned int in_chroma_paddr, unsigned int out_chroma_paddr, - unsigned int in_chroma2_paddr) + unsigned int in_chroma2_paddr, + unsigned int out_chroma2_paddr, + int fast_yuv_en) { uint32_t dst_format; int is_tile = 0; @@ -553,14 +843,26 @@ static int msm_rotator_ycxcx_h2v2(struct msm_rotator_img_info *info, switch (info->src.format) { case MDP_Y_CRCB_H2V2_TILE: is_tile = 1; + dst_format = MDP_Y_CRCB_H2V2; + break; case MDP_Y_CR_CB_H2V2: case MDP_Y_CR_CB_GH2V2: + if (fast_yuv_en) { + dst_format = info->src.format; + break; + } case MDP_Y_CRCB_H2V2: dst_format = MDP_Y_CRCB_H2V2; break; + case MDP_Y_CB_CR_H2V2: + if (fast_yuv_en) { + dst_format = info->src.format; + break; + } + dst_format = MDP_Y_CBCR_H2V2; + break; case MDP_Y_CBCR_H2V2_TILE: is_tile = 1; - case MDP_Y_CB_CR_H2V2: case MDP_Y_CBCR_H2V2: dst_format = MDP_Y_CBCR_H2V2; break; @@ -584,8 +886,12 @@ static int msm_rotator_ycxcx_h2v2(struct msm_rotator_img_info *info, ((info->dst_y * info->dst.width) + info->dst_x), MSM_ROTATOR_OUTP0_ADDR); iowrite32(out_chroma_paddr + - ((info->dst_y * info->dst.width)/2 + info->dst_x), + (((info->dst_y * info->dst.width)/2) + info->dst_x), MSM_ROTATOR_OUTP1_ADDR); + if (out_chroma2_paddr) + iowrite32(out_chroma2_paddr + + (((info->dst_y * info->dst.width)/2) + info->dst_x), + MSM_ROTATOR_OUTP2_ADDR); if (new_session) { if (in_chroma2_paddr) { @@ -607,11 +913,28 @@ static int msm_rotator_ycxcx_h2v2(struct msm_rotator_img_info *info, info->src.width << 16, MSM_ROTATOR_SRC_YSTRIDE1); } - iowrite32(info->dst.width | - info->dst.width << 16, - MSM_ROTATOR_OUT_YSTRIDE1); + if (out_chroma2_paddr) { + if (info->dst.format == MDP_Y_CR_CB_GH2V2) { + iowrite32(ALIGN(info->dst.width, 16) | + ALIGN((info->dst.width / 2), 16) << 16, + MSM_ROTATOR_OUT_YSTRIDE1); + iowrite32(ALIGN((info->dst.width / 2), 16), + MSM_ROTATOR_OUT_YSTRIDE2); + } else { + iowrite32(info->dst.width | + info->dst.width/2 << 16, + MSM_ROTATOR_OUT_YSTRIDE1); + iowrite32(info->dst.width/2, + MSM_ROTATOR_OUT_YSTRIDE2); + } + } else { + iowrite32(info->dst.width | + info->dst.width << 16, + MSM_ROTATOR_OUT_YSTRIDE1); + } - if (dst_format == MDP_Y_CBCR_H2V2) { + if (dst_format == MDP_Y_CBCR_H2V2 || + dst_format == MDP_Y_CB_CR_H2V2) { iowrite32(GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8), MSM_ROTATOR_SRC_UNPACK_PATTERN1); iowrite32(GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8), @@ -622,9 +945,11 @@ static int msm_rotator_ycxcx_h2v2(struct msm_rotator_img_info *info, iowrite32(GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8), MSM_ROTATOR_OUT_PACK_PATTERN1); } + iowrite32((3 << 18) | (ROTATIONS_TO_BITMASK(info->rotations) << 9) | 1 << 8 | + fast_yuv_en << 4 | info->downscale_ratio << 2 | info->downscale_ratio, MSM_ROTATOR_SUB_BLOCK_CFG); @@ -931,31 +1256,33 @@ static void put_img(struct file *p_file, struct ion_handle *p_ihdl, } #endif } -static int msm_rotator_do_rotate(unsigned long arg) + +static int msm_rotator_rotate_prepare( + struct msm_rotator_data_info *data_info, + struct msm_rotator_commit_info *commit_info) { - unsigned int status, format; + unsigned int format; struct msm_rotator_data_info info; unsigned int in_paddr, out_paddr; unsigned long src_len, dst_len; - int use_imem = 0, rc = 0, s, secure_flag = 0; + int rc = 0, s; struct file *srcp0_file = NULL, *dstp0_file = NULL; struct file *srcp1_file = NULL, *dstp1_file = NULL; struct ion_handle *srcp0_ihdl = NULL, *dstp0_ihdl = NULL; struct ion_handle *srcp1_ihdl = NULL, *dstp1_ihdl = NULL; int ps0_need = 0, p_need; unsigned int in_chroma_paddr = 0, out_chroma_paddr = 0; - unsigned int in_chroma2_paddr = 0; + unsigned int in_chroma2_paddr = 0, out_chroma2_paddr = 0; struct msm_rotator_img_info *img_info; struct msm_rotator_mem_planes src_planes, dst_planes; - if (copy_from_user(&info, (void __user *)arg, sizeof(info))) - return -EFAULT; - mutex_lock(&msm_rotator_dev->rotator_lock); + info = *data_info; + for (s = 0; s < MAX_SESSIONS; s++) - if ((msm_rotator_dev->img_info[s] != NULL) && + if ((msm_rotator_dev->rot_session[s] != NULL) && (info.session_id == - (unsigned int)msm_rotator_dev->img_info[s] + (unsigned int)msm_rotator_dev->rot_session[s] )) break; @@ -963,25 +1290,27 @@ static int msm_rotator_do_rotate(unsigned long arg) pr_err("%s() : Attempt to use invalid session_id %d\n", __func__, s); rc = -EINVAL; - goto do_rotate_unlock_mutex; + mutex_unlock(&msm_rotator_dev->rotator_lock); + return rc; } - if (msm_rotator_dev->img_info[s]->enable == 0) { + img_info = &(msm_rotator_dev->rot_session[s]->img_info); + if (img_info->enable == 0) { dev_dbg(msm_rotator_dev->device, - "%s() : Session_id %d not enabled \n", - __func__, s); + "%s() : Session_id %d not enabled\n", __func__, s); rc = -EINVAL; - goto do_rotate_unlock_mutex; + mutex_unlock(&msm_rotator_dev->rotator_lock); + return rc; } - img_info = msm_rotator_dev->img_info[s]; if (msm_rotator_get_plane_sizes(img_info->src.format, img_info->src.width, img_info->src.height, &src_planes)) { pr_err("%s: invalid src format\n", __func__); rc = -EINVAL; - goto do_rotate_unlock_mutex; + mutex_unlock(&msm_rotator_dev->rotator_lock); + return rc; } if (msm_rotator_get_plane_sizes(img_info->dst.format, img_info->dst.width, @@ -989,7 +1318,8 @@ static int msm_rotator_do_rotate(unsigned long arg) &dst_planes)) { pr_err("%s: invalid dst format\n", __func__); rc = -EINVAL; - goto do_rotate_unlock_mutex; + mutex_unlock(&msm_rotator_dev->rotator_lock); + return rc; } rc = get_img(&info.src, ROTATOR_SRC_DOMAIN, (unsigned long *)&in_paddr, @@ -998,7 +1328,7 @@ static int msm_rotator_do_rotate(unsigned long arg) if (rc) { pr_err("%s: in get_img() failed id=0x%08x\n", DRIVER_NAME, info.src.memory_id); - goto do_rotate_unlock_mutex; + goto rotate_prepare_error; } rc = get_img(&info.dst, ROTATOR_DST_DOMAIN, (unsigned long *)&out_paddr, @@ -1007,10 +1337,10 @@ static int msm_rotator_do_rotate(unsigned long arg) if (rc) { pr_err("%s: out get_img() failed id=0x%08x\n", DRIVER_NAME, info.dst.memory_id); - goto do_rotate_unlock_mutex; + goto rotate_prepare_error; } - format = msm_rotator_dev->img_info[s]->src.format; + format = img_info->src.format; if (((info.version_key & VERSION_KEY_MASK) == 0xA5B4C300) && ((info.version_key & ~VERSION_KEY_MASK) > 0) && (src_planes.num_planes == 2)) { @@ -1020,7 +1350,7 @@ static int msm_rotator_do_rotate(unsigned long arg) pr_err("%s: invalid src buffer (len=%lu offset=%x)\n", __func__, src_len, info.src.offset); rc = -ERANGE; - goto do_rotate_unlock_mutex; + goto rotate_prepare_error; } if (checkoffset(info.dst.offset, dst_planes.plane_size[0], @@ -1028,7 +1358,7 @@ static int msm_rotator_do_rotate(unsigned long arg) pr_err("%s: invalid dst buffer (len=%lu offset=%x)\n", __func__, dst_len, info.dst.offset); rc = -ERANGE; - goto do_rotate_unlock_mutex; + goto rotate_prepare_error; } rc = get_img(&info.src_chroma, ROTATOR_SRC_DOMAIN, @@ -1038,7 +1368,7 @@ static int msm_rotator_do_rotate(unsigned long arg) if (rc) { pr_err("%s: in chroma get_img() failed id=0x%08x\n", DRIVER_NAME, info.src_chroma.memory_id); - goto do_rotate_unlock_mutex; + goto rotate_prepare_error; } rc = get_img(&info.dst_chroma, ROTATOR_DST_DOMAIN, @@ -1048,7 +1378,7 @@ static int msm_rotator_do_rotate(unsigned long arg) if (rc) { pr_err("%s: out chroma get_img() failed id=0x%08x\n", DRIVER_NAME, info.dst_chroma.memory_id); - goto do_rotate_unlock_mutex; + goto rotate_prepare_error; } if (checkoffset(info.src_chroma.offset, @@ -1057,7 +1387,7 @@ static int msm_rotator_do_rotate(unsigned long arg) pr_err("%s: invalid chr src buf len=%lu offset=%x\n", __func__, src_len, info.src_chroma.offset); rc = -ERANGE; - goto do_rotate_unlock_mutex; + goto rotate_prepare_error; } if (checkoffset(info.dst_chroma.offset, @@ -1066,7 +1396,7 @@ static int msm_rotator_do_rotate(unsigned long arg) pr_err("%s: invalid chr dst buf len=%lu offset=%x\n", __func__, dst_len, info.dst_chroma.offset); rc = -ERANGE; - goto do_rotate_unlock_mutex; + goto rotate_prepare_error; } in_chroma_paddr += info.src_chroma.offset; @@ -1078,7 +1408,7 @@ static int msm_rotator_do_rotate(unsigned long arg) pr_err("%s: invalid src buffer (len=%lu offset=%x)\n", __func__, src_len, info.src.offset); rc = -ERANGE; - goto do_rotate_unlock_mutex; + goto rotate_prepare_error; } if (checkoffset(info.dst.offset, dst_planes.total_size, @@ -1086,7 +1416,7 @@ static int msm_rotator_do_rotate(unsigned long arg) pr_err("%s: invalid dst buffer (len=%lu offset=%x)\n", __func__, dst_len, info.dst.offset); rc = -ERANGE; - goto do_rotate_unlock_mutex; + goto rotate_prepare_error; } } @@ -1099,8 +1429,93 @@ static int msm_rotator_do_rotate(unsigned long arg) out_chroma_paddr = out_paddr + dst_planes.plane_size[0]; if (src_planes.num_planes >= 3) in_chroma2_paddr = in_chroma_paddr + src_planes.plane_size[1]; + if (dst_planes.num_planes >= 3) + out_chroma2_paddr = out_chroma_paddr + dst_planes.plane_size[1]; + + commit_info->data_info = info; + commit_info->img_info = *img_info; + commit_info->format = format; + commit_info->in_paddr = in_paddr; + commit_info->out_paddr = out_paddr; + commit_info->in_chroma_paddr = in_chroma_paddr; + commit_info->out_chroma_paddr = out_chroma_paddr; + commit_info->in_chroma2_paddr = in_chroma2_paddr; + commit_info->out_chroma2_paddr = out_chroma2_paddr; + commit_info->srcp0_file = srcp0_file; + commit_info->srcp1_file = srcp1_file; + commit_info->srcp0_ihdl = srcp0_ihdl; + commit_info->srcp1_ihdl = srcp1_ihdl; + commit_info->dstp0_file = dstp0_file; + commit_info->dstp0_ihdl = dstp0_ihdl; + commit_info->dstp1_file = dstp1_file; + commit_info->dstp1_ihdl = dstp1_ihdl; + commit_info->ps0_need = ps0_need; + commit_info->session_index = s; + commit_info->acq_fen = msm_rotator_dev->sync_info[s].acq_fen; + commit_info->fast_yuv_en = mrd->rot_session[s]->fast_yuv_enable; + mutex_unlock(&msm_rotator_dev->rotator_lock); + return 0; + +rotate_prepare_error: + put_img(dstp1_file, dstp1_ihdl, ROTATOR_DST_DOMAIN, + msm_rotator_dev->rot_session[s]->img_info.secure); + put_img(srcp1_file, srcp1_ihdl, ROTATOR_SRC_DOMAIN, 0); + put_img(dstp0_file, dstp0_ihdl, ROTATOR_DST_DOMAIN, + msm_rotator_dev->rot_session[s]->img_info.secure); + + + if (info.src.flags & MDP_MEMORY_ID_TYPE_FB) + fput_light(srcp0_file, ps0_need); + else + put_img(srcp0_file, srcp0_ihdl, ROTATOR_SRC_DOMAIN, 0); + dev_dbg(msm_rotator_dev->device, "%s() returning rc = %d\n", + __func__, rc); + mutex_unlock(&msm_rotator_dev->rotator_lock); + return rc; +} - cancel_delayed_work(&msm_rotator_dev->rot_clk_work); +static int msm_rotator_do_rotate_sub( + struct msm_rotator_commit_info *commit_info) +{ + unsigned int status, format; + struct msm_rotator_data_info info; + unsigned int in_paddr, out_paddr; + int use_imem = 0, rc = 0; + struct file *srcp0_file, *dstp0_file; + struct file *srcp1_file, *dstp1_file; + struct ion_handle *srcp0_ihdl, *dstp0_ihdl; + struct ion_handle *srcp1_ihdl, *dstp1_ihdl; + int s, ps0_need; + unsigned int in_chroma_paddr, out_chroma_paddr; + unsigned int in_chroma2_paddr, out_chroma2_paddr; + struct msm_rotator_img_info *img_info; + + mutex_lock(&msm_rotator_dev->rotator_lock); + + info = commit_info->data_info; + img_info = &commit_info->img_info; + format = commit_info->format; + in_paddr = commit_info->in_paddr; + out_paddr = commit_info->out_paddr; + in_chroma_paddr = commit_info->in_chroma_paddr; + out_chroma_paddr = commit_info->out_chroma_paddr; + in_chroma2_paddr = commit_info->in_chroma2_paddr; + out_chroma2_paddr = commit_info->out_chroma2_paddr; + srcp0_file = commit_info->srcp0_file; + srcp1_file = commit_info->srcp1_file; + srcp0_ihdl = commit_info->srcp0_ihdl; + srcp1_ihdl = commit_info->srcp1_ihdl; + dstp0_file = commit_info->dstp0_file; + dstp0_ihdl = commit_info->dstp0_ihdl; + dstp1_file = commit_info->dstp1_file; + dstp1_ihdl = commit_info->dstp1_ihdl; + ps0_need = commit_info->ps0_need; + s = commit_info->session_index; + + msm_rotator_wait_for_fence(commit_info->acq_fen); + commit_info->acq_fen = NULL; + + cancel_delayed_work_sync(&msm_rotator_dev->rot_clk_work); if (msm_rotator_dev->rot_clk_state != CLK_EN) { enable_rot_clks(); msm_rotator_dev->rot_clk_state = CLK_EN; @@ -1115,17 +1530,17 @@ static int msm_rotator_do_rotate(unsigned long arg) if (use_imem) iowrite32(0x42, MSM_ROTATOR_MAX_BURST_SIZE); - iowrite32(((msm_rotator_dev->img_info[s]->src_rect.h & 0x1fff) + iowrite32(((img_info->src_rect.h & 0x1fff) << 16) | - (msm_rotator_dev->img_info[s]->src_rect.w & 0x1fff), + (img_info->src_rect.w & 0x1fff), MSM_ROTATOR_SRC_SIZE); - iowrite32(((msm_rotator_dev->img_info[s]->src_rect.y & 0x1fff) + iowrite32(((img_info->src_rect.y & 0x1fff) << 16) | - (msm_rotator_dev->img_info[s]->src_rect.x & 0x1fff), + (img_info->src_rect.x & 0x1fff), MSM_ROTATOR_SRC_XY); - iowrite32(((msm_rotator_dev->img_info[s]->src.height & 0x1fff) + iowrite32(((img_info->src.height & 0x1fff) << 16) | - (msm_rotator_dev->img_info[s]->src.width & 0x1fff), + (img_info->src.width & 0x1fff), MSM_ROTATOR_SRC_IMAGE_SIZE); switch (format) { @@ -1139,7 +1554,7 @@ static int msm_rotator_do_rotate(unsigned long arg) case MDP_RGBX_8888: case MDP_YCBCR_H1V1: case MDP_YCRCB_H1V1: - rc = msm_rotator_rgb_types(msm_rotator_dev->img_info[s], + rc = msm_rotator_rgb_types(img_info, in_paddr, out_paddr, use_imem, msm_rotator_dev->last_session_idx @@ -1152,17 +1567,19 @@ static int msm_rotator_do_rotate(unsigned long arg) case MDP_Y_CR_CB_GH2V2: case MDP_Y_CRCB_H2V2_TILE: case MDP_Y_CBCR_H2V2_TILE: - rc = msm_rotator_ycxcx_h2v2(msm_rotator_dev->img_info[s], + rc = msm_rotator_ycxcx_h2v2(img_info, in_paddr, out_paddr, use_imem, msm_rotator_dev->last_session_idx != s, in_chroma_paddr, out_chroma_paddr, - in_chroma2_paddr); + in_chroma2_paddr, + out_chroma2_paddr, + commit_info->fast_yuv_en); break; case MDP_Y_CBCR_H2V1: case MDP_Y_CRCB_H2V1: - rc = msm_rotator_ycxcx_h2v1(msm_rotator_dev->img_info[s], + rc = msm_rotator_ycxcx_h2v1(img_info, in_paddr, out_paddr, use_imem, msm_rotator_dev->last_session_idx != s, @@ -1171,7 +1588,7 @@ static int msm_rotator_do_rotate(unsigned long arg) break; case MDP_YCBYCR_H2V1: case MDP_YCRYCB_H2V1: - rc = msm_rotator_ycxycx(msm_rotator_dev->img_info[s], + rc = msm_rotator_ycxycx(img_info, in_paddr, out_paddr, use_imem, msm_rotator_dev->last_session_idx != s, out_chroma_paddr); @@ -1192,10 +1609,12 @@ static int msm_rotator_do_rotate(unsigned long arg) msm_rotator_dev->processing = 1; iowrite32(0x1, MSM_ROTATOR_START); - - wait_event(msm_rotator_dev->wq, - (msm_rotator_dev->processing == 0)); - + rc = wait_event_timeout(msm_rotator_dev->wq, + (msm_rotator_dev->processing == 0), WAIT_FENCE_FIRST_TIMEOUT); + if(!rc) { + pr_err("%s(): Wait event timeout!\n", __func__); + msm_rotator_dev->processing = 0; + } status = (unsigned char)ioread32(MSM_ROTATOR_INTR_STATUS); if ((status & 0x03) != 0x01) { pr_err("%s(): AXI Bus Error, issuing SW_RESET\n", __func__); @@ -1211,25 +1630,132 @@ static int msm_rotator_do_rotate(unsigned long arg) msm_rotator_imem_free(ROTATOR_REQUEST); #endif schedule_delayed_work(&msm_rotator_dev->rot_clk_work, HZ); -do_rotate_unlock_mutex: - if (s < MAX_SESSIONS) - secure_flag = msm_rotator_dev->img_info[s]->secure; - - put_img(dstp1_file, dstp1_ihdl, ROTATOR_DST_DOMAIN, secure_flag); + put_img(dstp1_file, dstp1_ihdl, ROTATOR_DST_DOMAIN, + img_info->secure); put_img(srcp1_file, srcp1_ihdl, ROTATOR_SRC_DOMAIN, 0); - put_img(dstp0_file, dstp0_ihdl, ROTATOR_DST_DOMAIN, secure_flag); + put_img(dstp0_file, dstp0_ihdl, ROTATOR_DST_DOMAIN, + img_info->secure); if ((info.src.flags & MDP_MEMORY_ID_TYPE_FB) && srcp0_file) fput_light(srcp0_file, ps0_need); else put_img(srcp0_file, srcp0_ihdl, ROTATOR_SRC_DOMAIN, 0); + msm_rotator_signal_timeline_done(s); mutex_unlock(&msm_rotator_dev->rotator_lock); dev_dbg(msm_rotator_dev->device, "%s() returning rc = %d\n", __func__, rc); + return rc; } +static void rot_wait_for_commit_queue(u32 is_all) +{ + int ret = 0; + u32 loop_cnt = 0; + + while (1) { + mutex_lock(&mrd->commit_mutex); + if (is_all && (atomic_read(&mrd->commit_q_cnt) == 0)) + break; + if ((!is_all) && + (atomic_read(&mrd->commit_q_cnt) < MAX_COMMIT_QUEUE)) + break; + INIT_COMPLETION(mrd->commit_comp); + mutex_unlock(&mrd->commit_mutex); + ret = wait_for_completion_interruptible_timeout( + &mrd->commit_comp, + msecs_to_jiffies(WAIT_ROT_TIMEOUT)); + if ((ret <= 0) || + (atomic_read(&mrd->commit_q_cnt) >= MAX_COMMIT_QUEUE) || + (loop_cnt > MAX_COMMIT_QUEUE)) { + pr_err("%s wait for commit queue failed ret=%d pointers:%d %d", + __func__, ret, atomic_read(&mrd->commit_q_r), + atomic_read(&mrd->commit_q_w)); + mutex_lock(&mrd->commit_mutex); + ret = -ETIME; + break; + } else { + ret = 0; + } + loop_cnt++; + }; + if (is_all || ret) { + atomic_set(&mrd->commit_q_r, 0); + atomic_set(&mrd->commit_q_cnt, 0); + atomic_set(&mrd->commit_q_w, 0); + } + mutex_unlock(&mrd->commit_mutex); +} + +static int msm_rotator_do_rotate(unsigned long arg) +{ + struct msm_rotator_data_info info; + struct rot_sync_info *sync_info; + int session_index, ret; + int commit_q_w; + + if (copy_from_user(&info, (void __user *)arg, sizeof(info))) + return -EFAULT; + + rot_wait_for_commit_queue(false); + mutex_lock(&mrd->commit_mutex); + commit_q_w = atomic_read(&mrd->commit_q_w); + ret = msm_rotator_rotate_prepare(&info, + &mrd->commit_info[commit_q_w]); + if (ret) { + mutex_unlock(&mrd->commit_mutex); + return ret; + } + + session_index = mrd->commit_info[commit_q_w].session_index; + sync_info = &msm_rotator_dev->sync_info[session_index]; + mutex_lock(&sync_info->sync_mutex); + atomic_inc(&sync_info->queue_buf_cnt); + sync_info->acq_fen = NULL; + mutex_unlock(&sync_info->sync_mutex); + + if (atomic_inc_return(&mrd->commit_q_w) >= MAX_COMMIT_QUEUE) + atomic_set(&mrd->commit_q_w, 0); + atomic_inc(&mrd->commit_q_cnt); + + schedule_work(&mrd->commit_work); + mutex_unlock(&mrd->commit_mutex); + + if (info.wait_for_finish) + rot_wait_for_commit_queue(true); + + return 0; +} + +static void rot_commit_wq_handler(struct work_struct *work) +{ + mutex_lock(&mrd->commit_wq_mutex); + mutex_lock(&mrd->commit_mutex); + while (atomic_read(&mrd->commit_q_cnt) > 0) { + mrd->commit_running = true; + mutex_unlock(&mrd->commit_mutex); + msm_rotator_do_rotate_sub( + &mrd->commit_info[atomic_read(&mrd->commit_q_r)]); + mutex_lock(&mrd->commit_mutex); + if (atomic_read(&mrd->commit_q_cnt) > 0) { + atomic_dec(&mrd->commit_q_cnt); + if (atomic_inc_return(&mrd->commit_q_r) >= + MAX_COMMIT_QUEUE) + atomic_set(&mrd->commit_q_r, 0); + } + complete_all(&mrd->commit_comp); + } + mrd->commit_running = false; + if (atomic_read(&mrd->commit_q_r) != atomic_read(&mrd->commit_q_w)) + pr_err("%s invalid state: r=%d w=%d cnt=%d", __func__, + atomic_read(&mrd->commit_q_r), + atomic_read(&mrd->commit_q_w), + atomic_read(&mrd->commit_q_cnt)); + mutex_unlock(&mrd->commit_mutex); + mutex_unlock(&mrd->commit_wq_mutex); +} + static void msm_rotator_set_perf_level(u32 wh, u32 is_rgb) { u32 perf_level; @@ -1254,10 +1780,14 @@ static int msm_rotator_start(unsigned long arg, struct msm_rotator_fd_info *fd_info) { struct msm_rotator_img_info info; + struct msm_rotator_session *rot_session = NULL; int rc = 0; int s, is_rgb = 0; - int first_free_index = INVALID_SESSION; + int first_free_idx = INVALID_SESSION; unsigned int dst_w, dst_h; + unsigned int is_planar420 = 0; + int fast_yuv_en = 0; + struct rot_sync_info *sync_info; if (copy_from_user(&info, (void __user *)arg, sizeof(info))) return -EFAULT; @@ -1288,6 +1818,30 @@ static int msm_rotator_start(unsigned long arg, return -ERANGE; } + switch (info.src.format) { + case MDP_Y_CB_CR_H2V2: + case MDP_Y_CR_CB_H2V2: + case MDP_Y_CR_CB_GH2V2: + is_planar420 = 1; + case MDP_Y_CBCR_H2V2: + case MDP_Y_CRCB_H2V2: + case MDP_Y_CRCB_H2V2_TILE: + case MDP_Y_CBCR_H2V2_TILE: + if (rotator_hw_revision >= ROTATOR_REVISION_V2 && + !(info.downscale_ratio && + (info.rotations & MDP_ROT_90))) + fast_yuv_en = !fast_yuv_invalid_size_checker( + info.rotations, + info.src.width, + dst_w, + dst_h, + dst_w, + is_planar420); + break; + default: + fast_yuv_en = 0; + } + switch (info.src.format) { case MDP_RGB_565: case MDP_BGR_565: @@ -1329,11 +1883,19 @@ static int msm_rotator_start(unsigned long arg, info.dst.format = MDP_Y_CRCB_H2V1; break; case MDP_Y_CB_CR_H2V2: + if (fast_yuv_en) { + info.dst.format = info.src.format; + break; + } case MDP_Y_CBCR_H2V2_TILE: info.dst.format = MDP_Y_CBCR_H2V2; break; case MDP_Y_CR_CB_H2V2: case MDP_Y_CR_CB_GH2V2: + if (fast_yuv_en) { + info.dst.format = info.src.format; + break; + } case MDP_Y_CRCB_H2V2_TILE: info.dst.format = MDP_Y_CRCB_H2V2; break; @@ -1346,12 +1908,14 @@ static int msm_rotator_start(unsigned long arg, msm_rotator_set_perf_level((info.src.width*info.src.height), is_rgb); for (s = 0; s < MAX_SESSIONS; s++) { - if ((msm_rotator_dev->img_info[s] != NULL) && + if ((msm_rotator_dev->rot_session[s] != NULL) && (info.session_id == - (unsigned int)msm_rotator_dev->img_info[s] + (unsigned int)msm_rotator_dev->rot_session[s] )) { - *(msm_rotator_dev->img_info[s]) = info; - msm_rotator_dev->fd_info[s] = fd_info; + rot_session = msm_rotator_dev->rot_session[s]; + rot_session->img_info = info; + rot_session->fd_info = *fd_info; + rot_session->fast_yuv_enable = fast_yuv_en; if (msm_rotator_dev->last_session_idx == s) msm_rotator_dev->last_session_idx = @@ -1359,36 +1923,60 @@ static int msm_rotator_start(unsigned long arg, break; } - if ((msm_rotator_dev->img_info[s] == NULL) && - (first_free_index == - INVALID_SESSION)) - first_free_index = s; + if ((msm_rotator_dev->rot_session[s] == NULL) && + (first_free_idx == INVALID_SESSION)) + first_free_idx = s; } - if ((s == MAX_SESSIONS) && (first_free_index != INVALID_SESSION)) { + if ((s == MAX_SESSIONS) && (first_free_idx != INVALID_SESSION)) { - msm_rotator_dev->img_info[first_free_index] = - kzalloc(sizeof(struct msm_rotator_img_info), + msm_rotator_dev->rot_session[first_free_idx] = + kzalloc(sizeof(struct msm_rotator_session), GFP_KERNEL); - if (!msm_rotator_dev->img_info[first_free_index]) { + if (!msm_rotator_dev->rot_session[first_free_idx]) { printk(KERN_ERR "%s : unable to alloc mem\n", __func__); rc = -ENOMEM; goto rotator_start_exit; } info.session_id = (unsigned int) - msm_rotator_dev->img_info[first_free_index]; - *(msm_rotator_dev->img_info[first_free_index]) = info; - msm_rotator_dev->fd_info[first_free_index] = fd_info; + msm_rotator_dev->rot_session[first_free_idx]; + rot_session = msm_rotator_dev->rot_session[first_free_idx]; + + rot_session->img_info = info; + rot_session->fd_info = *fd_info; + rot_session->fast_yuv_enable = fast_yuv_en; + s = first_free_idx; } else if (s == MAX_SESSIONS) { dev_dbg(msm_rotator_dev->device, "%s: all sessions in use\n", __func__); rc = -EBUSY; + goto rotator_start_exit; } - if (rc == 0 && copy_to_user((void __user *)arg, &info, sizeof(info))) + if (rc == 0 && copy_to_user((void __user *)arg, &info, sizeof(info))) { rc = -EFAULT; + goto rotator_start_exit; + } + sync_info = &msm_rotator_dev->sync_info[s]; + if ((rc == 0) && (sync_info->initialized == false)) { + char timeline_name[MAX_TIMELINE_NAME_LEN]; + if (sync_info->timeline == NULL) { + snprintf(timeline_name, sizeof(timeline_name), + "msm_rot_%d", first_free_idx); + sync_info->timeline = + sw_sync_timeline_create(timeline_name); + if (sync_info->timeline == NULL) + pr_err("%s: cannot create %s time line", + __func__, timeline_name); + sync_info->timeline_value = 0; + } + mutex_init(&sync_info->sync_mutex); + sync_info->initialized = true; + } + sync_info->acq_fen = NULL; + atomic_set(&sync_info->queue_buf_cnt, 0); rotator_start_exit: mutex_unlock(&msm_rotator_dev->rotator_lock); @@ -1406,15 +1994,16 @@ static int msm_rotator_finish(unsigned long arg) mutex_lock(&msm_rotator_dev->rotator_lock); for (s = 0; s < MAX_SESSIONS; s++) { - if ((msm_rotator_dev->img_info[s] != NULL) && + if ((msm_rotator_dev->rot_session[s] != NULL) && (session_id == - (unsigned int)msm_rotator_dev->img_info[s])) { + (unsigned int)msm_rotator_dev->rot_session[s])) { if (msm_rotator_dev->last_session_idx == s) msm_rotator_dev->last_session_idx = INVALID_SESSION; - kfree(msm_rotator_dev->img_info[s]); - msm_rotator_dev->img_info[s] = NULL; - msm_rotator_dev->fd_info[s] = NULL; + msm_rotator_signal_timeline(s); + msm_rotator_release_acq_fence(s); + kfree(msm_rotator_dev->rot_session[s]); + msm_rotator_dev->rot_session[s] = NULL; break; } } @@ -1440,7 +2029,7 @@ msm_rotator_open(struct inode *inode, struct file *filp) mutex_lock(&msm_rotator_dev->rotator_lock); for (i = 0; i < MAX_SESSIONS; i++) { - if (msm_rotator_dev->fd_info[i] == NULL) + if (msm_rotator_dev->rot_session[i] == NULL) break; } @@ -1490,14 +2079,15 @@ msm_rotator_close(struct inode *inode, struct file *filp) } for (s = 0; s < MAX_SESSIONS; s++) { - if (msm_rotator_dev->img_info[s] != NULL && - msm_rotator_dev->fd_info[s] == fd_info) { + if (msm_rotator_dev->rot_session[s] != NULL && + &(msm_rotator_dev->rot_session[s]->fd_info) == fd_info) { pr_debug("%s: freeing rotator session %p (pid %d)\n", - __func__, msm_rotator_dev->img_info[s], + __func__, msm_rotator_dev->rot_session[s], fd_info->pid); - kfree(msm_rotator_dev->img_info[s]); - msm_rotator_dev->img_info[s] = NULL; - msm_rotator_dev->fd_info[s] = NULL; + rot_wait_for_commit_queue(true); + msm_rotator_signal_timeline(s); + kfree(msm_rotator_dev->rot_session[s]); + msm_rotator_dev->rot_session[s] = NULL; if (msm_rotator_dev->last_session_idx == s) msm_rotator_dev->last_session_idx = INVALID_SESSION; @@ -1527,6 +2117,8 @@ static long msm_rotator_ioctl(struct file *file, unsigned cmd, return msm_rotator_do_rotate(arg); case MSM_ROTATOR_IOCTL_FINISH: return msm_rotator_finish(arg); + case MSM_ROTATOR_IOCTL_BUFFER_SYNC: + return msm_rotator_buf_sync(arg); default: dev_dbg(msm_rotator_dev->device, @@ -1557,7 +2149,7 @@ static int __devinit msm_rotator_probe(struct platform_device *pdev) return -ENOMEM; } for (i = 0; i < MAX_SESSIONS; i++) - msm_rotator_dev->img_info[i] = NULL; + msm_rotator_dev->rot_session[i] = NULL; msm_rotator_dev->last_session_idx = INVALID_SESSION; pdata = pdev->dev.platform_data; @@ -1751,6 +2343,13 @@ static int __devinit msm_rotator_probe(struct platform_device *pdev) } init_waitqueue_head(&msm_rotator_dev->wq); + INIT_WORK(&msm_rotator_dev->commit_work, rot_commit_wq_handler); + init_completion(&msm_rotator_dev->commit_comp); + mutex_init(&msm_rotator_dev->commit_mutex); + mutex_init(&msm_rotator_dev->commit_wq_mutex); + atomic_set(&msm_rotator_dev->commit_q_w, 0); + atomic_set(&msm_rotator_dev->commit_q_r, 0); + atomic_set(&msm_rotator_dev->commit_q_cnt, 0); dev_dbg(msm_rotator_dev->device, "probe successful\n"); return rc; @@ -1784,6 +2383,7 @@ static int __devexit msm_rotator_remove(struct platform_device *plat_dev) { int i; + rot_wait_for_commit_queue(true); #ifdef CONFIG_MSM_BUS_SCALING if (msm_rotator_dev->bus_client_handle) { msm_bus_scale_unregister_client @@ -1814,8 +2414,8 @@ static int __devexit msm_rotator_remove(struct platform_device *plat_dev) msm_rotator_dev->pclk = NULL; mutex_destroy(&msm_rotator_dev->imem_lock); for (i = 0; i < MAX_SESSIONS; i++) - if (msm_rotator_dev->img_info[i] != NULL) - kfree(msm_rotator_dev->img_info[i]); + if (msm_rotator_dev->rot_session[i] != NULL) + kfree(msm_rotator_dev->rot_session[i]); kfree(msm_rotator_dev); return 0; } @@ -1823,6 +2423,7 @@ static int __devexit msm_rotator_remove(struct platform_device *plat_dev) #ifdef CONFIG_PM static int msm_rotator_suspend(struct platform_device *dev, pm_message_t state) { + rot_wait_for_commit_queue(true); mutex_lock(&msm_rotator_dev->imem_lock); if (msm_rotator_dev->imem_clk_state == CLK_EN && msm_rotator_dev->imem_clk) { @@ -1835,6 +2436,7 @@ static int msm_rotator_suspend(struct platform_device *dev, pm_message_t state) disable_rot_clks(); msm_rotator_dev->rot_clk_state = CLK_SUSPEND; } + msm_rotator_release_all_timeline(); mutex_unlock(&msm_rotator_dev->rotator_lock); return 0; } diff --git a/drivers/gpu/ion/Makefile b/drivers/gpu/ion/Makefile index e6989ab3..e3a60121 100644 --- a/drivers/gpu/ion/Makefile +++ b/drivers/gpu/ion/Makefile @@ -1,4 +1,5 @@ ccflags-y := -O3 -ffast-math -fgcse-lm -fgcse-sm -fsched-spec-load -fforce-addr -fsingle-precision-constant -mcpu=cortex-a15 -mtune=cortex-a15 -marm -mfpu=neon-vfpv4 obj-$(CONFIG_ION) += ion.o ion_heap.o ion_system_heap.o ion_carveout_heap.o ion_iommu_heap.o ion_cp_heap.o +obj-$(CONFIG_CMA) += ion_cma_heap.o obj-$(CONFIG_ION_TEGRA) += tegra/ obj-$(CONFIG_ION_MSM) += msm/ diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c index b8ac0bc8..0fcc50af 100644 --- a/drivers/gpu/ion/ion.c +++ b/drivers/gpu/ion/ion.c @@ -2,7 +2,7 @@ * drivers/gpu/ion/ion.c * * Copyright (C) 2011 Google, Inc. - * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. + * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -73,24 +73,6 @@ struct ion_handle { static void ion_iommu_release(struct kref *kref); -static int ion_validate_buffer_flags(struct ion_buffer *buffer, - unsigned long flags) -{ - if (buffer->kmap_cnt || buffer->dmap_cnt || buffer->umap_cnt || - buffer->iommu_map_cnt) { - if (buffer->flags != flags) { - pr_err("%s: buffer was already mapped with flags %lx," - " cannot map with flags %lx\n", __func__, - buffer->flags, flags); - return 1; - } - - } else { - buffer->flags = flags; - } - return 0; -} - static void ion_buffer_add(struct ion_device *dev, struct ion_buffer *buffer) { @@ -196,6 +178,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, buffer->dev = dev; buffer->size = len; + buffer->flags = flags; table = buffer->heap->ops->map_dma(buffer->heap, buffer); if (IS_ERR_OR_NULL(table)) { @@ -372,7 +355,8 @@ static void ion_handle_add(struct ion_client *client, struct ion_handle *handle) } struct ion_handle *ion_alloc(struct ion_client *client, size_t len, - size_t align, unsigned int flags) + size_t align, unsigned int heap_mask, + unsigned int flags) { struct rb_node *n; struct ion_handle *handle; @@ -397,10 +381,11 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len, if (!((1 << heap->type) & client->heap_mask)) continue; - if (!((1 << heap->id) & flags)) + if (!((1 << heap->id) & heap_mask)) continue; - if (secure_allocation && (heap->type != ION_HEAP_TYPE_CP)) + if (secure_allocation && + (heap->type != (enum ion_heap_type) ION_HEAP_TYPE_CP)) continue; buffer = ion_buffer_create(heap, dev, len, align, flags); if (!IS_ERR_OR_NULL(buffer)) @@ -718,8 +703,7 @@ void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle, } EXPORT_SYMBOL(ion_unmap_iommu); -void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle, - unsigned long flags) +void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) { struct ion_buffer *buffer; void *vaddr; @@ -741,11 +725,6 @@ void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle, return ERR_PTR(-ENODEV); } - if (ion_validate_buffer_flags(buffer, flags)) { - mutex_unlock(&client->lock); - return ERR_PTR(-EEXIST); - } - mutex_lock(&buffer->lock); vaddr = ion_handle_kmap_get(handle); mutex_unlock(&buffer->lock); @@ -832,7 +811,7 @@ static int ion_debug_client_show(struct seq_file *s, void *unused) if (type == ION_HEAP_TYPE_SYSTEM_CONTIG || type == ION_HEAP_TYPE_CARVEOUT || - type == ION_HEAP_TYPE_CP) + type == (enum ion_heap_type) ION_HEAP_TYPE_CP) seq_printf(s, " : %12lx", handle->buffer->priv_phys); else seq_printf(s, " : %12s", "N/A"); @@ -951,9 +930,7 @@ void ion_client_destroy(struct ion_client *client) while ((n = rb_first(&client->handles))) { struct ion_handle *handle = rb_entry(n, struct ion_handle, node); - mutex_lock(&client->lock); ion_handle_destroy(&handle->ref); - mutex_unlock(&client->lock); } mutex_lock(&dev->lock); if (client->task) @@ -1176,9 +1153,12 @@ static int ion_share_set_flags(struct ion_client *client, { struct ion_buffer *buffer; bool valid_handle; - unsigned long ion_flags = ION_SET_CACHE(CACHED); + unsigned long ion_flags = 0; if (flags & O_DSYNC) - ion_flags = ION_SET_CACHE(UNCACHED); + ion_flags = ION_SET_UNCACHED(ion_flags); + else + ion_flags = ION_SET_CACHED(ion_flags); + mutex_lock(&client->lock); valid_handle = ion_handle_validate(client, handle); @@ -1190,12 +1170,6 @@ static int ion_share_set_flags(struct ion_client *client, buffer = handle->buffer; - mutex_lock(&buffer->lock); - if (ion_validate_buffer_flags(buffer, ion_flags)) { - mutex_unlock(&buffer->lock); - return -EEXIST; - } - mutex_unlock(&buffer->lock); return 0; } @@ -1272,14 +1246,14 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) struct ion_client *client = filp->private_data; switch (cmd) { - case ION_IOC_ALLOC_NEW: + case ION_IOC_ALLOC: { - struct ion_allocation_data_new data; + struct ion_allocation_data data; if (copy_from_user(&data, (void __user *)arg, sizeof(data))) return -EFAULT; data.handle = ion_alloc(client, data.len, data.align, - data.flags | data.heap_mask); + data.heap_mask, data.flags); if (IS_ERR(data.handle)) return PTR_ERR(data.handle); @@ -1289,16 +1263,21 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return -EFAULT; } break; - } - case ION_IOC_ALLOC: + case ION_IOC_ALLOC_OLD: { - struct ion_allocation_data data; + struct ion_allocation_data_old data; + unsigned int heap_mask = 0; + unsigned int flags = 0; if (copy_from_user(&data, (void __user *)arg, sizeof(data))) return -EFAULT; + + heap_mask = data.flags & ~(ION_SECURE | ION_FLAG_CACHED); + flags = data.flags & (ION_SECURE | ION_FLAG_CACHED); + data.handle = ion_alloc(client, data.len, data.align, - data.flags); + heap_mask, flags); if (IS_ERR(data.handle)) return PTR_ERR(data.handle); @@ -1352,8 +1331,10 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) sizeof(struct ion_fd_data))) return -EFAULT; data.handle = ion_import_dma_buf(client, data.fd); - if (IS_ERR(data.handle)) + if (IS_ERR(data.handle)) { + ret = PTR_ERR(data.handle); data.handle = NULL; + } if (copy_to_user((void __user *)arg, &data, sizeof(struct ion_fd_data))) return -EFAULT; @@ -1373,19 +1354,15 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return -EFAULT; return dev->custom_ioctl(client, data.cmd, data.arg); } - case ION_IOC_CLEAN_CACHES_OLD: case ION_IOC_CLEAN_CACHES: return client->dev->custom_ioctl(client, ION_IOC_CLEAN_CACHES, arg); - case ION_IOC_INV_CACHES_OLD: case ION_IOC_INV_CACHES: return client->dev->custom_ioctl(client, ION_IOC_INV_CACHES, arg); - case ION_IOC_CLEAN_INV_CACHES_OLD: case ION_IOC_CLEAN_INV_CACHES: return client->dev->custom_ioctl(client, ION_IOC_CLEAN_INV_CACHES, arg); - case ION_IOC_GET_FLAGS_OLD: case ION_IOC_GET_FLAGS: return client->dev->custom_ioctl(client, ION_IOC_GET_FLAGS, arg); @@ -1671,7 +1648,7 @@ int ion_secure_heap(struct ion_device *dev, int heap_id, int version, mutex_lock(&dev->lock); for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) { struct ion_heap *heap = rb_entry(n, struct ion_heap, node); - if (heap->type != ION_HEAP_TYPE_CP) + if (heap->type != (enum ion_heap_type) ION_HEAP_TYPE_CP) continue; if (ION_HEAP(heap->id) != heap_id) continue; @@ -1695,7 +1672,7 @@ int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version, mutex_lock(&dev->lock); for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) { struct ion_heap *heap = rb_entry(n, struct ion_heap, node); - if (heap->type != ION_HEAP_TYPE_CP) + if (heap->type != (enum ion_heap_type) ION_HEAP_TYPE_CP) continue; if (ION_HEAP(heap->id) != heap_id) continue; diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c index 5f7fe37d..ad0e4cca 100644 --- a/drivers/gpu/ion/ion_carveout_heap.c +++ b/drivers/gpu/ion/ion_carveout_heap.c @@ -2,7 +2,7 @@ * drivers/gpu/ion/ion_carveout_heap.c * * Copyright (C) 2011 Google, Inc. - * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. + * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -31,6 +31,7 @@ #include #include #include +#include struct ion_carveout_heap { struct ion_heap heap; @@ -240,30 +241,80 @@ int ion_carveout_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer, void *vaddr, unsigned int offset, unsigned int length, unsigned int cmd) { - void (*outer_cache_op)(phys_addr_t, phys_addr_t); + void (*outer_cache_op)(phys_addr_t, phys_addr_t) = NULL; struct ion_carveout_heap *carveout_heap = container_of(heap, struct ion_carveout_heap, heap); - - switch (cmd) { - case ION_IOC_CLEAN_CACHES: - dmac_clean_range(vaddr, vaddr + length); - outer_cache_op = outer_clean_range; - break; - case ION_IOC_INV_CACHES: - dmac_inv_range(vaddr, vaddr + length); - outer_cache_op = outer_inv_range; - break; - case ION_IOC_CLEAN_INV_CACHES: - dmac_flush_range(vaddr, vaddr + length); - outer_cache_op = outer_flush_range; - break; - default: - return -EINVAL; + unsigned int size_to_vmap, total_size; + int i, j; + void *ptr = NULL; + ion_phys_addr_t buff_phys = buffer->priv_phys; + + if (!vaddr) { + size_to_vmap = ((VMALLOC_END - VMALLOC_START)/8); + total_size = buffer->size; + + for (i = 0; i < total_size; i += size_to_vmap) { + size_to_vmap = min(size_to_vmap, total_size - i); + for (j = 0; j < 10 && size_to_vmap; ++j) { + ptr = ioremap(buff_phys, size_to_vmap); + if (ptr) { + switch (cmd) { + case ION_IOC_CLEAN_CACHES: + dmac_clean_range(ptr, + ptr + size_to_vmap); + outer_cache_op = + outer_clean_range; + break; + case ION_IOC_INV_CACHES: + dmac_inv_range(ptr, + ptr + size_to_vmap); + outer_cache_op = + outer_inv_range; + break; + case ION_IOC_CLEAN_INV_CACHES: + dmac_flush_range(ptr, + ptr + size_to_vmap); + outer_cache_op = + outer_flush_range; + break; + default: + return -EINVAL; + } + buff_phys += size_to_vmap; + break; + } else { + size_to_vmap >>= 1; + } + } + if (!ptr) { + pr_err("Couldn't io-remap the memory\n"); + return -EINVAL; + } + iounmap(ptr); + } + } else { + switch (cmd) { + case ION_IOC_CLEAN_CACHES: + dmac_clean_range(vaddr, vaddr + length); + outer_cache_op = outer_clean_range; + break; + case ION_IOC_INV_CACHES: + dmac_inv_range(vaddr, vaddr + length); + outer_cache_op = outer_inv_range; + break; + case ION_IOC_CLEAN_INV_CACHES: + dmac_flush_range(vaddr, vaddr + length); + outer_cache_op = outer_flush_range; + break; + default: + return -EINVAL; + } } if (carveout_heap->has_outer_cache) { unsigned long pstart = buffer->priv_phys + offset; - outer_cache_op(pstart, pstart + length); + if (outer_cache_op) + outer_cache_op(pstart, pstart + length); } return 0; } @@ -361,7 +412,7 @@ int ion_carveout_heap_map_iommu(struct ion_buffer *buffer, goto out1; } - sglist = vmalloc(sizeof(*sglist)); + sglist = kmalloc(sizeof(*sglist), GFP_KERNEL); if (!sglist) goto out1; @@ -385,13 +436,13 @@ int ion_carveout_heap_map_iommu(struct ion_buffer *buffer, if (ret) goto out2; } - vfree(sglist); + kfree(sglist); return ret; out2: iommu_unmap_range(domain, data->iova_addr, buffer->size); out1: - vfree(sglist); + kfree(sglist); msm_free_iova_address(data->iova_addr, domain_num, partition_num, data->mapped_size); diff --git a/drivers/gpu/ion/ion_cp_heap.c b/drivers/gpu/ion/ion_cp_heap.c index 2557e919..bd2ad3ca 100644 --- a/drivers/gpu/ion/ion_cp_heap.c +++ b/drivers/gpu/ion/ion_cp_heap.c @@ -2,7 +2,7 @@ * drivers/gpu/ion/ion_cp_heap.c * * Copyright (C) 2011 Google, Inc. - * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. + * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -15,19 +15,19 @@ * */ #include - +#include #include #include #include -#include +#include #include #include #include -#include #include #include #include #include +#include #include @@ -41,39 +41,6 @@ #include #include "msm/ion_cp_common.h" -/** - * struct ion_cp_heap - container for the heap and shared heap data - - * @heap: the heap information structure - * @pool: memory pool to allocate from. - * @base: the base address of the memory pool. - * @permission_type: Identifier for the memory used by SCM for protecting - * and unprotecting memory. - * @secure_base: Base address used when securing a heap that is shared. - * @secure_size: Size used when securing a heap that is shared. - * @lock: mutex to protect shared access. - * @heap_protected: Indicates whether heap has been protected or not. - * @allocated_bytes: the total number of allocated bytes from the pool. - * @total_size: the total size of the memory pool. - * @request_region: function pointer to call when first mapping of memory - * occurs. - * @release_region: function pointer to call when last mapping of memory - * unmapped. - * @bus_id: token used with request/release region. - * @kmap_cached_count: the total number of times this heap has been mapped in - * kernel space (cached). - * @kmap_uncached_count:the total number of times this heap has been mapped in - * kernel space (un-cached). - * @umap_count: the total number of times this heap has been mapped in - * user space. - * @iommu_iova: saved iova when mapping full heap at once. - * @iommu_partition: partition used to map full heap. - * @reusable: indicates if the memory should be reused via fmem. - * @reserved_vrange: reserved virtual address range for use with fmem - * @iommu_map_all: Indicates whether we should map whole heap into IOMMU. - * @iommu_2x_map_domain: Indicates the domain to use for overmapping. - * @has_outer_cache: set to 1 if outer cache is used, 0 otherwise. -*/ struct ion_cp_heap { struct ion_heap heap; struct gen_pool *pool; @@ -85,8 +52,8 @@ struct ion_cp_heap { unsigned int heap_protected; unsigned long allocated_bytes; unsigned long total_size; - int (*request_region)(void *); - int (*release_region)(void *); + int (*heap_request_region)(void *); + int (*heap_release_region)(void *); void *bus_id; unsigned long kmap_cached_count; unsigned long kmap_uncached_count; @@ -99,6 +66,11 @@ struct ion_cp_heap { int iommu_2x_map_domain; unsigned int has_outer_cache; atomic_t protect_cnt; + void *cpu_addr; + size_t heap_size; + dma_addr_t handle; + int cma; + int disallow_non_secure_allocation; }; enum { @@ -106,6 +78,8 @@ enum { HEAP_PROTECTED = 1, }; +#define DMA_ALLOC_RETRIES 5 + static int ion_cp_protect_mem(unsigned int phy_base, unsigned int size, unsigned int permission_type, int version, void *data); @@ -114,21 +88,111 @@ static int ion_cp_unprotect_mem(unsigned int phy_base, unsigned int size, unsigned int permission_type, int version, void *data); -/** - * Get the total number of kernel mappings. - * Must be called with heap->lock locked. - */ +static int allocate_heap_memory(struct ion_heap *heap) +{ + struct device *dev = heap->priv; + struct ion_cp_heap *cp_heap = + container_of(heap, struct ion_cp_heap, heap); + int ret; + int tries = 0; + DEFINE_DMA_ATTRS(attrs); + dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); + + + if (cp_heap->cpu_addr) + return 0; + + while (!cp_heap->cpu_addr && (++tries < DMA_ALLOC_RETRIES)) { + cp_heap->cpu_addr = dma_alloc_attrs(dev, + cp_heap->heap_size, + &(cp_heap->handle), + 0, + &attrs); + if (!cp_heap->cpu_addr) + msleep(20); + } + + if (!cp_heap->cpu_addr) + goto out; + + cp_heap->base = cp_heap->handle; + + cp_heap->pool = gen_pool_create(12, -1); + if (!cp_heap->pool) + goto out_free; + + ret = gen_pool_add(cp_heap->pool, cp_heap->base, + cp_heap->heap_size, -1); + if (ret < 0) + goto out_pool; + + return 0; + +out_pool: + gen_pool_destroy(cp_heap->pool); +out_free: + dma_free_coherent(dev, cp_heap->heap_size, cp_heap->cpu_addr, + cp_heap->handle); +out: + return ION_CP_ALLOCATE_FAIL; +} + +static void free_heap_memory(struct ion_heap *heap) +{ + struct device *dev = heap->priv; + struct ion_cp_heap *cp_heap = + container_of(heap, struct ion_cp_heap, heap); + + + dma_free_coherent(dev, cp_heap->heap_size, cp_heap->cpu_addr, + cp_heap->handle); + gen_pool_destroy(cp_heap->pool); + cp_heap->pool = NULL; + cp_heap->cpu_addr = 0; +} + + + static unsigned long ion_cp_get_total_kmap_count( const struct ion_cp_heap *cp_heap) { return cp_heap->kmap_cached_count + cp_heap->kmap_uncached_count; } -/** - * Protects memory if heap is unsecured heap. Also ensures that we are in - * the correct FMEM state if this heap is a reusable heap. - * Must be called with heap->lock locked. - */ +static int ion_on_first_alloc(struct ion_heap *heap) +{ + struct ion_cp_heap *cp_heap = + container_of(heap, struct ion_cp_heap, heap); + int ret_value; + + if (cp_heap->reusable) { + ret_value = fmem_set_state(FMEM_C_STATE); + if (ret_value) + return 1; + } + + if (cp_heap->cma) { + ret_value = allocate_heap_memory(heap); + if (ret_value) + return 1; + } + return 0; +} + +static void ion_on_last_free(struct ion_heap *heap) +{ + struct ion_cp_heap *cp_heap = + container_of(heap, struct ion_cp_heap, heap); + + if (cp_heap->reusable) + if (fmem_set_state(FMEM_T_STATE) != 0) + pr_err("%s: unable to transition heap to T-state\n", + __func__); + + if (cp_heap->cma) + free_heap_memory(heap); +} + static int ion_cp_protect(struct ion_heap *heap, int version, void *data) { struct ion_cp_heap *cp_heap = @@ -136,11 +200,13 @@ static int ion_cp_protect(struct ion_heap *heap, int version, void *data) int ret_value = 0; if (atomic_inc_return(&cp_heap->protect_cnt) == 1) { - /* Make sure we are in C state when the heap is protected. */ - if (cp_heap->reusable && !cp_heap->allocated_bytes) { - ret_value = fmem_set_state(FMEM_C_STATE); - if (ret_value) + + if (!cp_heap->allocated_bytes) { + ret_value = ion_on_first_alloc(heap); + if (ret_value) { + atomic_dec(&cp_heap->protect_cnt); goto out; + } } ret_value = ion_cp_protect_mem(cp_heap->secure_base, @@ -150,11 +216,9 @@ static int ion_cp_protect(struct ion_heap *heap, int version, void *data) pr_err("Failed to protect memory for heap %s - " "error code: %d\n", heap->name, ret_value); - if (cp_heap->reusable && !cp_heap->allocated_bytes) { - if (fmem_set_state(FMEM_T_STATE) != 0) - pr_err("%s: unable to transition heap to T-state\n", - __func__); - } + if (!cp_heap->allocated_bytes) + ion_on_last_free(heap); + atomic_dec(&cp_heap->protect_cnt); } else { cp_heap->heap_protected = HEAP_PROTECTED; @@ -169,11 +233,6 @@ static int ion_cp_protect(struct ion_heap *heap, int version, void *data) return ret_value; } -/** - * Unprotects memory if heap is secure heap. Also ensures that we are in - * the correct FMEM state if this heap is a reusable heap. - * Must be called with heap->lock locked. - */ static void ion_cp_unprotect(struct ion_heap *heap, int version, void *data) { struct ion_cp_heap *cp_heap = @@ -191,11 +250,8 @@ static void ion_cp_unprotect(struct ion_heap *heap, int version, void *data) pr_debug("Un-protected heap %s @ 0x%x\n", heap->name, (unsigned int) cp_heap->base); - if (cp_heap->reusable && !cp_heap->allocated_bytes) { - if (fmem_set_state(FMEM_T_STATE) != 0) - pr_err("%s: unable to transition heap to T-state", - __func__); - } + if (!cp_heap->allocated_bytes) + ion_on_last_free(heap); } } pr_debug("%s: protect count is %d\n", __func__, @@ -210,6 +266,7 @@ ion_phys_addr_t ion_cp_allocate(struct ion_heap *heap, { unsigned long offset; unsigned long secure_allocation = flags & ION_SECURE; + unsigned long force_contig = flags & ION_FORCE_CONTIGUOUS; struct ion_cp_heap *cp_heap = container_of(heap, struct ion_cp_heap, heap); @@ -222,6 +279,14 @@ ion_phys_addr_t ion_cp_allocate(struct ion_heap *heap, return ION_CP_ALLOCATE_FAIL; } + if (!force_contig && !secure_allocation && + cp_heap->disallow_non_secure_allocation) { + mutex_unlock(&cp_heap->lock); + pr_debug("%s: non-secure allocation disallowed from this heap\n", + __func__); + return ION_CP_ALLOCATE_FAIL; + } + if (secure_allocation && (cp_heap->umap_count > 0 || cp_heap->kmap_cached_count > 0)) { mutex_unlock(&cp_heap->lock); @@ -232,16 +297,11 @@ ion_phys_addr_t ion_cp_allocate(struct ion_heap *heap, return ION_CP_ALLOCATE_FAIL; } - /* - * if this is the first reusable allocation, transition - * the heap - */ - if (cp_heap->reusable && !cp_heap->allocated_bytes) { - if (fmem_set_state(FMEM_C_STATE) != 0) { + if (!cp_heap->allocated_bytes) + if (ion_on_first_alloc(heap)) { mutex_unlock(&cp_heap->lock); return ION_RESERVED_ALLOCATE_FAIL; } - } cp_heap->allocated_bytes += size; mutex_unlock(&cp_heap->lock); @@ -260,13 +320,9 @@ ion_phys_addr_t ion_cp_allocate(struct ion_heap *heap, __func__, heap->name, cp_heap->total_size - cp_heap->allocated_bytes, size); - - if (cp_heap->reusable && !cp_heap->allocated_bytes && - cp_heap->heap_protected == HEAP_NOT_PROTECTED) { - if (fmem_set_state(FMEM_T_STATE) != 0) - pr_err("%s: unable to transition heap to T-state\n", - __func__); - } + if (!cp_heap->allocated_bytes && + cp_heap->heap_protected == HEAP_NOT_PROTECTED) + ion_on_last_free(heap); mutex_unlock(&cp_heap->lock); return ION_CP_ALLOCATE_FAIL; @@ -311,14 +367,11 @@ void ion_cp_free(struct ion_heap *heap, ion_phys_addr_t addr, mutex_lock(&cp_heap->lock); cp_heap->allocated_bytes -= size; - if (cp_heap->reusable && !cp_heap->allocated_bytes && - cp_heap->heap_protected == HEAP_NOT_PROTECTED) { - if (fmem_set_state(FMEM_T_STATE) != 0) - pr_err("%s: unable to transition heap to T-state\n", - __func__); - } + if (!cp_heap->allocated_bytes && + cp_heap->heap_protected == HEAP_NOT_PROTECTED) + ion_on_last_free(heap); - /* Unmap everything if we previously mapped the whole heap at once. */ + if (!cp_heap->allocated_bytes) { unsigned int i; for (i = 0; i < MAX_DOMAINS; ++i) { @@ -404,27 +457,23 @@ void ion_cp_heap_unmap_dma(struct ion_heap *heap, buffer->sg_table = 0; } -/** - * Call request region for SMI memory of this is the first mapping. - */ static int ion_cp_request_region(struct ion_cp_heap *cp_heap) { int ret_value = 0; if ((cp_heap->umap_count + ion_cp_get_total_kmap_count(cp_heap)) == 0) - if (cp_heap->request_region) - ret_value = cp_heap->request_region(cp_heap->bus_id); + if (cp_heap->heap_request_region) + ret_value = cp_heap->heap_request_region( + cp_heap->bus_id); return ret_value; } -/** - * Call release region for SMI memory of this is the last un-mapping. - */ static int ion_cp_release_region(struct ion_cp_heap *cp_heap) { int ret_value = 0; if ((cp_heap->umap_count + ion_cp_get_total_kmap_count(cp_heap)) == 0) - if (cp_heap->release_region) - ret_value = cp_heap->release_region(cp_heap->bus_id); + if (cp_heap->heap_release_region) + ret_value = cp_heap->heap_release_region( + cp_heap->bus_id); return ret_value; } @@ -469,7 +518,29 @@ void *ion_cp_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer) if (cp_heap->reusable) { ret_value = ion_map_fmem_buffer(buffer, cp_heap->base, cp_heap->reserved_vrange, buffer->flags); + } else if (cp_heap->cma) { + int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; + struct page **pages = vmalloc( + sizeof(struct page *) * npages); + int i; + pgprot_t pgprot; + + if (!pages) { + mutex_unlock(&cp_heap->lock); + return ERR_PTR(-ENOMEM); + } + if (ION_IS_CACHED(buffer->flags)) + pgprot = PAGE_KERNEL; + else + pgprot = pgprot_writecombine(PAGE_KERNEL); + + for (i = 0; i < npages; i++) { + pages[i] = phys_to_page(buffer->priv_phys + + i * PAGE_SIZE); + } + ret_value = vmap(pages, npages, VM_IOREMAP, pgprot); + vfree(pages); } else { if (ION_IS_CACHED(buffer->flags)) ret_value = ioremap_cached(buffer->priv_phys, @@ -500,6 +571,8 @@ void ion_cp_heap_unmap_kernel(struct ion_heap *heap, if (cp_heap->reusable) unmap_kernel_range((unsigned long)buffer->vaddr, buffer->size); + else if (cp_heap->cma) + vunmap(buffer->vaddr); else __arm_iounmap(buffer->vaddr); @@ -564,30 +637,79 @@ int ion_cp_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer, void *vaddr, unsigned int offset, unsigned int length, unsigned int cmd) { - void (*outer_cache_op)(phys_addr_t, phys_addr_t); + void (*outer_cache_op)(phys_addr_t, phys_addr_t) = NULL; struct ion_cp_heap *cp_heap = - container_of(heap, struct ion_cp_heap, heap); - - switch (cmd) { - case ION_IOC_CLEAN_CACHES: - dmac_clean_range(vaddr, vaddr + length); - outer_cache_op = outer_clean_range; - break; - case ION_IOC_INV_CACHES: - dmac_inv_range(vaddr, vaddr + length); - outer_cache_op = outer_inv_range; - break; - case ION_IOC_CLEAN_INV_CACHES: - dmac_flush_range(vaddr, vaddr + length); - outer_cache_op = outer_flush_range; - break; - default: - return -EINVAL; + container_of(heap, struct ion_cp_heap, heap); + unsigned int size_to_vmap, total_size; + int i, j; + void *ptr = NULL; + ion_phys_addr_t buff_phys = buffer->priv_phys; + + if (!vaddr) { + size_to_vmap = (VMALLOC_END - VMALLOC_START)/8; + total_size = buffer->size; + for (i = 0; i < total_size; i += size_to_vmap) { + size_to_vmap = min(size_to_vmap, total_size - i); + for (j = 0; j < 10 && size_to_vmap; ++j) { + ptr = ioremap(buff_phys, size_to_vmap); + if (ptr) { + switch (cmd) { + case ION_IOC_CLEAN_CACHES: + dmac_clean_range(ptr, + ptr + size_to_vmap); + outer_cache_op = + outer_clean_range; + break; + case ION_IOC_INV_CACHES: + dmac_inv_range(ptr, + ptr + size_to_vmap); + outer_cache_op = + outer_inv_range; + break; + case ION_IOC_CLEAN_INV_CACHES: + dmac_flush_range(ptr, + ptr + size_to_vmap); + outer_cache_op = + outer_flush_range; + break; + default: + return -EINVAL; + } + buff_phys += size_to_vmap; + break; + } else { + size_to_vmap >>= 1; + } + } + if (!ptr) { + pr_err("Couldn't io-remap the memory\n"); + return -EINVAL; + } + iounmap(ptr); + } + } else { + switch (cmd) { + case ION_IOC_CLEAN_CACHES: + dmac_clean_range(vaddr, vaddr + length); + outer_cache_op = outer_clean_range; + break; + case ION_IOC_INV_CACHES: + dmac_inv_range(vaddr, vaddr + length); + outer_cache_op = outer_inv_range; + break; + case ION_IOC_CLEAN_INV_CACHES: + dmac_flush_range(vaddr, vaddr + length); + outer_cache_op = outer_flush_range; + break; + default: + return -EINVAL; + } } if (cp_heap->has_outer_cache) { unsigned long pstart = buffer->priv_phys + offset; - outer_cache_op(pstart, pstart + length); + if (outer_cache_op) + outer_cache_op(pstart, pstart + length); } return 0; } @@ -703,9 +825,6 @@ static int iommu_map_all(unsigned long domain_num, struct ion_cp_heap *cp_heap, unsigned long virt_addr_len = cp_heap->total_size; struct iommu_domain *domain = msm_get_iommu_domain(domain_num); - /* If we are mapping into the video domain we need to map twice the - * size of the heap to account for prefetch issue in video core. - */ if (domain_num == cp_heap->iommu_2x_map_domain) virt_addr_len <<= 1; @@ -788,7 +907,7 @@ static int ion_cp_heap_map_iommu(struct ion_buffer *buffer, } if (cp_heap->iommu_iova[domain_num]) { - /* Already mapped. */ + unsigned long offset = buffer->priv_phys - cp_heap->base; data->iova_addr = cp_heap->iommu_iova[domain_num] + offset; return 0; @@ -800,10 +919,6 @@ static int ion_cp_heap_map_iommu(struct ion_buffer *buffer, data->iova_addr = cp_heap->iommu_iova[domain_num] + offset; cp_heap->iommu_partition[domain_num] = partition_num; - /* - clear delayed map flag so that we don't interfere - with this feature (we are already delaying). - */ data->flags &= ~ION_IOMMU_UNMAP_DELAYED; return 0; } else { @@ -869,8 +984,6 @@ static void ion_cp_heap_unmap_iommu(struct ion_iommu_map *data) domain_num = iommu_map_domain(data); - /* If we are mapping everything we'll wait to unmap until everything - is freed. */ if (cp_heap->iommu_iova[domain_num]) return; @@ -919,14 +1032,6 @@ struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *heap_data) mutex_init(&cp_heap->lock); - cp_heap->pool = gen_pool_create(12, -1); - if (!cp_heap->pool) - goto free_heap; - - cp_heap->base = heap_data->base; - ret = gen_pool_add(cp_heap->pool, cp_heap->base, heap_data->size, -1); - if (ret < 0) - goto destroy_pool; cp_heap->allocated_bytes = 0; cp_heap->umap_count = 0; @@ -934,11 +1039,13 @@ struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *heap_data) cp_heap->kmap_uncached_count = 0; cp_heap->total_size = heap_data->size; cp_heap->heap.ops = &cp_heap_ops; - cp_heap->heap.type = ION_HEAP_TYPE_CP; + cp_heap->heap.type = (enum ion_heap_type) ION_HEAP_TYPE_CP; cp_heap->heap_protected = HEAP_NOT_PROTECTED; - cp_heap->secure_base = cp_heap->base; + cp_heap->secure_base = heap_data->base; cp_heap->secure_size = heap_data->size; cp_heap->has_outer_cache = heap_data->has_outer_cache; + cp_heap->heap_size = heap_data->size; + atomic_set(&cp_heap->protect_cnt, 0); if (heap_data->extra_data) { struct ion_cp_heap_pdata *extra_data = @@ -953,16 +1060,37 @@ struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *heap_data) if (extra_data->setup_region) cp_heap->bus_id = extra_data->setup_region(); if (extra_data->request_region) - cp_heap->request_region = extra_data->request_region; + cp_heap->heap_request_region = + extra_data->request_region; if (extra_data->release_region) - cp_heap->release_region = extra_data->release_region; + cp_heap->heap_release_region = + extra_data->release_region; cp_heap->iommu_map_all = extra_data->iommu_map_all; cp_heap->iommu_2x_map_domain = extra_data->iommu_2x_map_domain; + cp_heap->cma = extra_data->is_cma; + cp_heap->disallow_non_secure_allocation = + extra_data->no_nonsecure_alloc; } + if (cp_heap->cma) { + cp_heap->pool = NULL; + cp_heap->cpu_addr = 0; + cp_heap->heap.priv = heap_data->priv; + } else { + cp_heap->pool = gen_pool_create(12, -1); + if (!cp_heap->pool) + goto free_heap; + + cp_heap->base = heap_data->base; + ret = gen_pool_add(cp_heap->pool, cp_heap->base, + heap_data->size, -1); + if (ret < 0) + goto destroy_pool; + + } return &cp_heap->heap; destroy_pool: @@ -993,7 +1121,6 @@ void ion_cp_heap_get_base(struct ion_heap *heap, unsigned long *base, *size = cp_heap->total_size; } -/* SCM related code for locking down memory for content protection */ #define SCM_CP_LOCK_CMD_ID 0x1 #define SCM_CP_PROTECT 0x1 diff --git a/drivers/gpu/ion/ion_heap.c b/drivers/gpu/ion/ion_heap.c index 6ea49dbd..98c1a8c3 100644 --- a/drivers/gpu/ion/ion_heap.c +++ b/drivers/gpu/ion/ion_heap.c @@ -2,7 +2,7 @@ * drivers/gpu/ion/ion_heap.c * * Copyright (C) 2011 Google, Inc. - * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. + * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -18,12 +18,13 @@ #include #include #include "ion_priv.h" +#include struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data) { struct ion_heap *heap = NULL; - switch (heap_data->type) { + switch ((int) heap_data->type) { case ION_HEAP_TYPE_SYSTEM_CONTIG: heap = ion_system_contig_heap_create(heap_data); break; @@ -39,6 +40,11 @@ struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data) case ION_HEAP_TYPE_CP: heap = ion_cp_heap_create(heap_data); break; +#ifdef CONFIG_CMA + case ION_HEAP_TYPE_DMA: + heap = ion_cma_heap_create(heap_data); + break; +#endif default: pr_err("%s: Invalid heap type %d\n", __func__, heap_data->type); @@ -54,6 +60,7 @@ struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data) heap->name = heap_data->name; heap->id = heap_data->id; + heap->priv = heap_data->priv; return heap; } @@ -62,7 +69,7 @@ void ion_heap_destroy(struct ion_heap *heap) if (!heap) return; - switch (heap->type) { + switch ((int) heap->type) { case ION_HEAP_TYPE_SYSTEM_CONTIG: ion_system_contig_heap_destroy(heap); break; @@ -78,6 +85,11 @@ void ion_heap_destroy(struct ion_heap *heap) case ION_HEAP_TYPE_CP: ion_cp_heap_destroy(heap); break; +#ifdef CONFIG_CMA + case ION_HEAP_TYPE_DMA: + ion_cma_heap_destroy(heap); + break; +#endif default: pr_err("%s: Invalid heap type %d\n", __func__, heap->type); diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h index de1fa7c1..1a3781b0 100644 --- a/drivers/gpu/ion/ion_priv.h +++ b/drivers/gpu/ion/ion_priv.h @@ -2,7 +2,7 @@ * drivers/gpu/ion/ion_priv.h * * Copyright (C) 2011 Google, Inc. - * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. + * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -173,6 +173,7 @@ struct ion_heap { struct ion_heap_ops *ops; int id; const char *name; + void *priv; }; /** @@ -256,6 +257,10 @@ ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size, void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, unsigned long size); +#ifdef CONFIG_CMA +struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *); +void ion_cma_heap_destroy(struct ion_heap *); +#endif struct ion_heap *msm_get_contiguous_heap(void); /** diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c index c79c1843..3e9c1765 100644 --- a/drivers/gpu/ion/ion_system_heap.c +++ b/drivers/gpu/ion/ion_system_heap.c @@ -2,7 +2,7 @@ * drivers/gpu/ion/ion_system_heap.c * * Copyright (C) 2011 Google, Inc. - * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. + * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -27,6 +27,8 @@ #include "ion_priv.h" #include #include +#include +#include static atomic_t system_heap_allocated; static atomic_t system_contig_heap_allocated; @@ -183,15 +185,30 @@ int ion_system_heap_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer, switch (cmd) { case ION_IOC_CLEAN_CACHES: - dmac_clean_range(vaddr, vaddr + length); + if (!vaddr) + dma_sync_sg_for_device(NULL, buffer->sg_table->sgl, + buffer->sg_table->nents, DMA_TO_DEVICE); + else + dmac_clean_range(vaddr, vaddr + length); outer_cache_op = outer_clean_range; break; case ION_IOC_INV_CACHES: - dmac_inv_range(vaddr, vaddr + length); + if (!vaddr) + dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl, + buffer->sg_table->nents, DMA_FROM_DEVICE); + else + dmac_inv_range(vaddr, vaddr + length); outer_cache_op = outer_inv_range; break; case ION_IOC_CLEAN_INV_CACHES: - dmac_flush_range(vaddr, vaddr + length); + if (!vaddr) { + dma_sync_sg_for_device(NULL, buffer->sg_table->sgl, + buffer->sg_table->nents, DMA_TO_DEVICE); + dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl, + buffer->sg_table->nents, DMA_FROM_DEVICE); + } else { + dmac_flush_range(vaddr, vaddr + length); + } outer_cache_op = outer_flush_range; break; default: @@ -254,6 +271,9 @@ int ion_system_heap_map_iommu(struct ion_buffer *buffer, data->mapped_size = iova_length; extra = iova_length - buffer->size; + if (table->sgl->length > align) + align = table->sgl->length; + ret = msm_allocate_iova_address(domain_num, partition_num, data->mapped_size, align, &data->iova_addr); @@ -482,7 +502,7 @@ int ion_system_contig_heap_map_iommu(struct ion_buffer *buffer, } page = virt_to_page(buffer->vaddr); - sglist = vmalloc(sizeof(*sglist)); + sglist = kmalloc(sizeof(*sglist), GFP_KERNEL); if (!sglist) goto out1; @@ -504,13 +524,13 @@ int ion_system_contig_heap_map_iommu(struct ion_buffer *buffer, if (ret) goto out2; } - vfree(sglist); + kfree(sglist); return ret; out2: iommu_unmap_range(domain, data->iova_addr, buffer->size); out1: - vfree(sglist); + kfree(sglist); msm_free_iova_address(data->iova_addr, domain_num, partition_num, data->mapped_size); out: diff --git a/drivers/gpu/ion/msm/ion_cp_common.c b/drivers/gpu/ion/msm/ion_cp_common.c index b274ba25..41e0a04e 100644 --- a/drivers/gpu/ion/msm/ion_cp_common.c +++ b/drivers/gpu/ion/msm/ion_cp_common.c @@ -1,6 +1,6 @@ /* * Copyright (C) 2011 Google, Inc - * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. + * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and diff --git a/drivers/gpu/ion/msm/ion_cp_common.h b/drivers/gpu/ion/msm/ion_cp_common.h index 69dd19e6..eec66e6f 100644 --- a/drivers/gpu/ion/msm/ion_cp_common.h +++ b/drivers/gpu/ion/msm/ion_cp_common.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, Code Aurora Forum. All rights reserved. + * Copyright (c) 2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -15,7 +15,7 @@ #define ION_CP_COMMON_H #include -#include +#include #define ION_CP_V1 1 #define ION_CP_V2 2 diff --git a/drivers/gpu/ion/msm/msm_ion.c b/drivers/gpu/ion/msm/msm_ion.c index 1541162f..3cb926bf 100644 --- a/drivers/gpu/ion/msm/msm_ion.c +++ b/drivers/gpu/ion/msm/msm_ion.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -197,7 +197,7 @@ static void msm_ion_allocate(struct ion_platform_heap *heap) if (!heap->base && heap->extra_data) { unsigned int align = 0; - switch (heap->type) { + switch ((int) heap->type) { case ION_HEAP_TYPE_CARVEOUT: align = ((struct ion_co_heap_pdata *) heap->extra_data)->align; @@ -320,7 +320,7 @@ static long msm_ion_custom_ioctl(struct ion_client *client, start = (unsigned long) data.vaddr; end = (unsigned long) data.vaddr + data.length; - if (check_vaddr_bounds(start, end)) { + if (start && check_vaddr_bounds(start, end)) { pr_err("%s: virtual address %p is out of bounds\n", __func__, data.vaddr); return -EINVAL; diff --git a/drivers/gpu/msm/Kconfig b/drivers/gpu/msm/Kconfig index fcfb9f02..5d56ed3b 100644 --- a/drivers/gpu/msm/Kconfig +++ b/drivers/gpu/msm/Kconfig @@ -99,7 +99,7 @@ config MSM_KGSL_DISABLE_SHADOW_WRITES config MSM_KGSL_KILL_HANG_PROCESS bool "Enable killing recoverable gpu hang process routine" - default y + default n ---help--- We only enable this config in CRC branch. diff --git a/drivers/gpu/msm/a2xx_reg.h b/drivers/gpu/msm/a2xx_reg.h index 19fc60b0..ea443a1c 100644 --- a/drivers/gpu/msm/a2xx_reg.h +++ b/drivers/gpu/msm/a2xx_reg.h @@ -183,6 +183,7 @@ union reg_cp_rb_cntl { #define SQ_INT_CNTL__VS_WATCHDOG_MASK 0x00000002L #define RBBM_INT_CNTL__RDERR_INT_MASK 0x00000001L +#define RBBM_INT_CNTL__PROTECT_INT_MASK 0x00100000L #define RBBM_INT_CNTL__DISPLAY_UPDATE_INT_MASK 0x00000002L #define RBBM_INT_CNTL__GUI_IDLE_INT_MASK 0x00080000L @@ -336,6 +337,23 @@ union reg_cp_rb_cntl { #define REG_RBBM_SOFT_RESET 0x003C #define REG_RBBM_STATUS 0x05D0 +#define REG_RBBM_PROTECT_0 0x0140 +#define REG_RBBM_PROTECT_1 0x0141 +#define REG_RBBM_PROTECT_2 0x0142 +#define REG_RBBM_PROTECT_3 0x0143 +#define REG_RBBM_PROTECT_4 0x0144 +#define REG_RBBM_PROTECT_5 0x0145 +#define REG_RBBM_PROTECT_6 0x0146 +#define REG_RBBM_PROTECT_7 0x0147 +#define REG_RBBM_PROTECT_8 0x0148 +#define REG_RBBM_PROTECT_9 0x0149 +#define REG_RBBM_PROTECT_A 0x014A +#define REG_RBBM_PROTECT_B 0x014B +#define REG_RBBM_PROTECT_C 0x014C +#define REG_RBBM_PROTECT_D 0x014D +#define REG_RBBM_PROTECT_E 0x014E +#define REG_RBBM_PROTECT_F 0x014F + #define REG_RB_COLORCONTROL 0x2202 #define REG_RB_COLOR_DEST_MASK 0x2326 #define REG_RB_COLOR_MASK 0x2104 diff --git a/drivers/gpu/msm/a3xx_reg.h b/drivers/gpu/msm/a3xx_reg.h index d9a2a8a6..aefdfffe 100644 --- a/drivers/gpu/msm/a3xx_reg.h +++ b/drivers/gpu/msm/a3xx_reg.h @@ -227,7 +227,6 @@ #define A3XX_PC_PERFCOUNTER1_SELECT 0xC49 #define A3XX_PC_PERFCOUNTER2_SELECT 0xC4A #define A3XX_PC_PERFCOUNTER3_SELECT 0xC4B -#define A3XX_GRAS_TSE_DEBUG_ECO 0xC81 #define A3XX_GRAS_PERFCOUNTER0_SELECT 0xC88 #define A3XX_GRAS_PERFCOUNTER1_SELECT 0xC89 #define A3XX_GRAS_PERFCOUNTER2_SELECT 0xC8A @@ -265,10 +264,6 @@ #define A3XX_HLSQ_PERFCOUNTER3_SELECT 0xE03 #define A3XX_HLSQ_PERFCOUNTER4_SELECT 0xE04 #define A3XX_HLSQ_PERFCOUNTER5_SELECT 0xE05 -#define A3XX_RB_DEBUG_ECO_CONTROLS_ADDR 0xCC1 -#define A3XX_RB_PERFCOUNTER0_SELECT 0xCC6 -#define A3XX_RB_PERFCOUNTER1_SELECT 0xCC7 -#define A3XX_RB_FRAME_BUFFER_DIMENSION 0xCE0 #define A3XX_VFD_PERFCOUNTER0_SELECT 0xE44 #define A3XX_VFD_PERFCOUNTER1_SELECT 0xE45 #define A3XX_VPC_VPC_DEBUG_RAM_SEL 0xE61 @@ -300,9 +295,6 @@ #define A3XX_GRAS_CL_CLIP_CNTL 0x2040 #define A3XX_GRAS_CL_GB_CLIP_ADJ 0x2044 #define A3XX_GRAS_CL_VPORT_XOFFSET 0x2048 -#define A3XX_GRAS_CL_VPORT_XSCALE 0x2049 -#define A3XX_GRAS_CL_VPORT_YOFFSET 0x204A -#define A3XX_GRAS_CL_VPORT_YSCALE 0x204B #define A3XX_GRAS_CL_VPORT_ZOFFSET 0x204C #define A3XX_GRAS_CL_VPORT_ZSCALE 0x204D #define A3XX_GRAS_SU_POINT_MINMAX 0x2068 @@ -318,75 +310,30 @@ #define A3XX_RB_MODE_CONTROL 0x20C0 #define A3XX_RB_RENDER_CONTROL 0x20C1 #define A3XX_RB_MSAA_CONTROL 0x20C2 -#define A3XX_RB_ALPHA_REFERENCE 0x20C3 #define A3XX_RB_MRT_CONTROL0 0x20C4 #define A3XX_RB_MRT_BUF_INFO0 0x20C5 -#define A3XX_RB_MRT_BUF_BASE0 0x20C6 #define A3XX_RB_MRT_BLEND_CONTROL0 0x20C7 -#define A3XX_RB_MRT_CONTROL1 0x20C8 -#define A3XX_RB_MRT_BUF_INFO1 0x20C9 -#define A3XX_RB_MRT_BUF_BASE1 0x20CA #define A3XX_RB_MRT_BLEND_CONTROL1 0x20CB -#define A3XX_RB_MRT_CONTROL2 0x20CC -#define A3XX_RB_MRT_BUF_INFO2 0x20CD -#define A3XX_RB_MRT_BUF_BASE2 0x20CE #define A3XX_RB_MRT_BLEND_CONTROL2 0x20CF -#define A3XX_RB_MRT_CONTROL3 0x20D0 -#define A3XX_RB_MRT_BUF_INFO3 0x20D1 -#define A3XX_RB_MRT_BUF_BASE3 0x20D2 #define A3XX_RB_MRT_BLEND_CONTROL3 0x20D3 #define A3XX_RB_BLEND_RED 0x20E4 -#define A3XX_RB_BLEND_GREEN 0x20E5 -#define A3XX_RB_BLEND_BLUE 0x20E6 -#define A3XX_RB_BLEND_ALPHA 0x20E7 -#define A3XX_RB_CLEAR_COLOR_DW0 0x20E8 -#define A3XX_RB_CLEAR_COLOR_DW1 0x20E9 -#define A3XX_RB_CLEAR_COLOR_DW2 0x20EA -#define A3XX_RB_CLEAR_COLOR_DW3 0x20EB #define A3XX_RB_COPY_CONTROL 0x20EC -#define A3XX_RB_COPY_DEST_BASE 0x20ED -#define A3XX_RB_COPY_DEST_PITCH 0x20EE #define A3XX_RB_COPY_DEST_INFO 0x20EF #define A3XX_RB_DEPTH_CONTROL 0x2100 -#define A3XX_RB_DEPTH_CLEAR 0x2101 -#define A3XX_RB_DEPTH_BUF_INFO 0x2102 -#define A3XX_RB_DEPTH_BUF_PITCH 0x2103 #define A3XX_RB_STENCIL_CONTROL 0x2104 -#define A3XX_RB_STENCIL_CLEAR 0x2105 -#define A3XX_RB_STENCIL_BUF_INFO 0x2106 -#define A3XX_RB_STENCIL_BUF_PITCH 0x2107 -#define A3XX_RB_STENCIL_REF_MASK 0x2108 -#define A3XX_RB_STENCIL_REF_MASK_BF 0x2109 -#define A3XX_RB_LRZ_VSC_CONTROL 0x210C -#define A3XX_RB_WINDOW_OFFSET 0x210E -#define A3XX_RB_SAMPLE_COUNT_CONTROL 0x2110 -#define A3XX_RB_SAMPLE_COUNT_ADDR 0x2111 -#define A3XX_RB_Z_CLAMP_MIN 0x2114 -#define A3XX_RB_Z_CLAMP_MAX 0x2115 #define A3XX_PC_VSTREAM_CONTROL 0x21E4 #define A3XX_PC_VERTEX_REUSE_BLOCK_CNTL 0x21EA #define A3XX_PC_PRIM_VTX_CNTL 0x21EC #define A3XX_PC_RESTART_INDEX 0x21ED #define A3XX_HLSQ_CONTROL_0_REG 0x2200 -#define A3XX_HLSQ_CONTROL_1_REG 0x2201 -#define A3XX_HLSQ_CONTROL_2_REG 0x2202 -#define A3XX_HLSQ_CONTROL_3_REG 0x2203 #define A3XX_HLSQ_VS_CONTROL_REG 0x2204 -#define A3XX_HLSQ_FS_CONTROL_REG 0x2205 -#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG 0x2206 #define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG 0x2207 #define A3XX_HLSQ_CL_NDRANGE_0_REG 0x220A -#define A3XX_HLSQ_CL_NDRANGE_1_REG 0x220B #define A3XX_HLSQ_CL_NDRANGE_2_REG 0x220C -#define A3XX_HLSQ_CL_NDRANGE_3_REG 0x220D -#define A3XX_HLSQ_CL_NDRANGE_4_REG 0x220E -#define A3XX_HLSQ_CL_NDRANGE_5_REG 0x220F -#define A3XX_HLSQ_CL_NDRANGE_6_REG 0x2210 #define A3XX_HLSQ_CL_CONTROL_0_REG 0x2211 #define A3XX_HLSQ_CL_CONTROL_1_REG 0x2212 #define A3XX_HLSQ_CL_KERNEL_CONST_REG 0x2214 #define A3XX_HLSQ_CL_KERNEL_GROUP_X_REG 0x2215 -#define A3XX_HLSQ_CL_KERNEL_GROUP_Y_REG 0x2216 #define A3XX_HLSQ_CL_KERNEL_GROUP_Z_REG 0x2217 #define A3XX_HLSQ_CL_WG_OFFSET_REG 0x221A #define A3XX_VFD_CONTROL_0 0x2240 @@ -403,21 +350,10 @@ #define A3XX_SP_VS_CTRL_REG0 0x22C4 #define A3XX_SP_VS_CTRL_REG1 0x22C5 #define A3XX_SP_VS_PARAM_REG 0x22C6 -#define A3XX_SP_VS_OUT_REG_0 0x22C7 -#define A3XX_SP_VS_OUT_REG_1 0x22C8 -#define A3XX_SP_VS_OUT_REG_2 0x22C9 -#define A3XX_SP_VS_OUT_REG_3 0x22CA -#define A3XX_SP_VS_OUT_REG_4 0x22CB -#define A3XX_SP_VS_OUT_REG_5 0x22CC -#define A3XX_SP_VS_OUT_REG_6 0x22CD #define A3XX_SP_VS_OUT_REG_7 0x22CE #define A3XX_SP_VS_VPC_DST_REG_0 0x22D0 -#define A3XX_SP_VS_VPC_DST_REG_1 0x22D1 -#define A3XX_SP_VS_VPC_DST_REG_2 0x22D2 -#define A3XX_SP_VS_VPC_DST_REG_3 0x22D3 #define A3XX_SP_VS_OBJ_OFFSET_REG 0x22D4 #define A3XX_SP_VS_OBJ_START_REG 0x22D5 -#define A3XX_SP_VS_PVT_MEM_PARAM_REG 0x22D6 #define A3XX_SP_VS_PVT_MEM_ADDR_REG 0x22D7 #define A3XX_SP_VS_PVT_MEM_SIZE_REG 0x22D8 #define A3XX_SP_VS_LENGTH_REG 0x22DF @@ -425,19 +361,13 @@ #define A3XX_SP_FS_CTRL_REG1 0x22E1 #define A3XX_SP_FS_OBJ_OFFSET_REG 0x22E2 #define A3XX_SP_FS_OBJ_START_REG 0x22E3 -#define A3XX_SP_FS_PVT_MEM_PARAM_REG 0x22E4 #define A3XX_SP_FS_PVT_MEM_ADDR_REG 0x22E5 #define A3XX_SP_FS_PVT_MEM_SIZE_REG 0x22E6 #define A3XX_SP_FS_FLAT_SHAD_MODE_REG_0 0x22E8 #define A3XX_SP_FS_FLAT_SHAD_MODE_REG_1 0x22E9 #define A3XX_SP_FS_OUTPUT_REG 0x22EC #define A3XX_SP_FS_MRT_REG_0 0x22F0 -#define A3XX_SP_FS_MRT_REG_1 0x22F1 -#define A3XX_SP_FS_MRT_REG_2 0x22F2 -#define A3XX_SP_FS_MRT_REG_3 0x22F3 #define A3XX_SP_FS_IMAGE_OUTPUT_REG_0 0x22F4 -#define A3XX_SP_FS_IMAGE_OUTPUT_REG_1 0x22F5 -#define A3XX_SP_FS_IMAGE_OUTPUT_REG_2 0x22F6 #define A3XX_SP_FS_IMAGE_OUTPUT_REG_3 0x22F7 #define A3XX_SP_FS_LENGTH_REG 0x22FF #define A3XX_TPL1_TP_VS_TEX_OFFSET 0x2340 diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c index c1b853e3..87c67eaa 100644 --- a/drivers/gpu/msm/adreno.c +++ b/drivers/gpu/msm/adreno.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved. +/* Copyright (c) 2002,2007-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -481,15 +481,13 @@ static void adreno_cleanup_pt(struct kgsl_device *device, kgsl_mmu_unmap(pagetable, &device->memstore); - kgsl_mmu_unmap(pagetable, &adreno_dev->pwron_fixup); - kgsl_mmu_unmap(pagetable, &device->mmu.setstate_memory); } static int adreno_setup_pt(struct kgsl_device *device, struct kgsl_pagetable *pagetable) { - int result; + int result = 0; struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer; @@ -505,21 +503,14 @@ static int adreno_setup_pt(struct kgsl_device *device, if (result) goto unmap_memptrs_desc; - result = kgsl_mmu_map_global(pagetable, &adreno_dev->pwron_fixup); - if (result) - goto unmap_memstore_desc; - result = kgsl_mmu_map_global(pagetable, &device->mmu.setstate_memory); if (result) - goto unmap_pwron_fixup_desc; + goto unmap_memstore_desc; device->mh.mpu_range = device->mmu.setstate_memory.gpuaddr + device->mmu.setstate_memory.size; return result; -unmap_pwron_fixup_desc: - kgsl_mmu_unmap(pagetable, &adreno_dev->pwron_fixup); - unmap_memstore_desc: kgsl_mmu_unmap(pagetable, &device->memstore); @@ -546,9 +537,7 @@ static void adreno_iommu_setstate(struct kgsl_device *device, uint32_t flags) { unsigned int pt_val, reg_pt_val; - unsigned int link[250]; - unsigned int *cmds = &link[0]; - int sizedwords = 0; + unsigned int *link = NULL, *cmds; struct adreno_device *adreno_dev = ADRENO_DEVICE(device); int num_iommu_units, i; struct kgsl_context *context; @@ -565,6 +554,12 @@ static void adreno_iommu_setstate(struct kgsl_device *device, return; adreno_ctx = context->devctxt; + link = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (link == NULL) + goto done; + + cmds = link; + if (kgsl_mmu_enable_clk(&device->mmu, KGSL_IOMMU_CONTEXT_USER)) goto done; @@ -651,24 +646,23 @@ static void adreno_iommu_setstate(struct kgsl_device *device, cmds += adreno_add_idle_cmds(adreno_dev, cmds); - sizedwords += (cmds - &link[0]); - if (sizedwords) { + if ((unsigned int) (cmds - link)) { *cmds++ = cp_type3_packet(CP_INVALIDATE_STATE, 1); *cmds++ = 0x7fff; - sizedwords += 2; adreno_ringbuffer_issuecmds(device, adreno_ctx, KGSL_CMD_FLAGS_PMODE, - &link[0], sizedwords); + link, (unsigned int)(cmds - link)); kgsl_mmu_disable_clk_on_ts(&device->mmu, adreno_dev->ringbuffer.global_ts, true); } - if (sizedwords > (sizeof(link)/sizeof(unsigned int))) { + if ((unsigned int) (cmds - link) > (PAGE_SIZE / sizeof(unsigned int))) { KGSL_DRV_ERR(device, "Temp command buffer overflow\n"); BUG(); } done: + kfree(link); kgsl_context_put(context); } @@ -1480,11 +1474,6 @@ static int adreno_start(struct kgsl_device *device) kgsl_pwrctrl_enable(device); - - - set_bit(ADRENO_DEVICE_PWRON, &adreno_dev->priv); - - if (adreno_is_a2xx(adreno_dev)) { if (adreno_is_a20x(adreno_dev)) { device->mh.mh_intf_cfg1 = 0; @@ -2918,9 +2907,6 @@ struct kgsl_memdesc *adreno_find_region(struct kgsl_device *device, if (kgsl_gpuaddr_in_memdesc(&device->memstore, gpuaddr, size)) return &device->memstore; - if (kgsl_gpuaddr_in_memdesc(&adreno_dev->pwron_fixup, gpuaddr, size)) - return &adreno_dev->pwron_fixup; - if (kgsl_gpuaddr_in_memdesc(&device->mmu.setstate_memory, gpuaddr, size)) return &device->mmu.setstate_memory; @@ -3185,6 +3171,9 @@ unsigned int adreno_ft_detect(struct kgsl_device *device, (kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED) + 1), curr_global_ts + 1); + kgsl_context_put(context); + context = NULL; + curr_context = NULL; return 1; } @@ -3220,6 +3209,8 @@ unsigned int adreno_ft_detect(struct kgsl_device *device, curr_context->ib_gpu_time_used = 0; kgsl_context_put(context); + context = NULL; + curr_context = NULL; return 1; } } diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h index 924da068..e92e3758 100644 --- a/drivers/gpu/msm/adreno.h +++ b/drivers/gpu/msm/adreno.h @@ -19,6 +19,8 @@ #include "kgsl_iommu.h" #include +#include "a3xx_reg.h" + #define DEVICE_3D_NAME "kgsl-3d" #define DEVICE_3D0_NAME "kgsl-3d0" @@ -34,7 +36,6 @@ #define KGSL_CMD_FLAGS_PMODE 0x00000001 #define KGSL_CMD_FLAGS_INTERNAL_ISSUE 0x00000002 #define KGSL_CMD_FLAGS_GET_INT 0x00000004 -#define KGSL_CMD_FLAGS_PWRON_FIXUP 0x00000008 #define KGSL_CMD_FLAGS_EOF 0x00000100 #define KGSL_CONTEXT_TO_MEM_IDENTIFIER 0x2EADBEEF @@ -44,7 +45,6 @@ #define KGSL_END_OF_IB_IDENTIFIER 0x2ABEDEAD #define KGSL_END_OF_FRAME_IDENTIFIER 0x2E0F2E0F #define KGSL_NOP_IB_IDENTIFIER 0x20F20F20 -#define KGSL_PWRON_FIXUP_IDENTIFIER 0x2AFAFAFA #ifdef CONFIG_MSM_SCM #define ADRENO_DEFAULT_PWRSCALE_POLICY (&kgsl_pwrscale_policy_tz) @@ -77,7 +77,6 @@ struct adreno_gpudev; struct adreno_device { struct kgsl_device dev; - unsigned long priv; unsigned int chip_id; enum adreno_gpurev gpurev; unsigned long gmem_base; @@ -110,8 +109,6 @@ struct adreno_device { struct ocmem_buf *ocmem_hdl; unsigned int ocmem_base; unsigned int gpu_cycles; - struct kgsl_memdesc pwron_fixup; - unsigned int pwron_fixup_dwords; }; #define PERFCOUNTER_FLAG_NONE 0x0 @@ -135,11 +132,6 @@ struct adreno_perfcounters { unsigned int group_count; }; -enum adreno_device_flags { - ADRENO_DEVICE_PWRON = 0, - ADRENO_DEVICE_PWRON_FIXUP = 1, -}; - struct adreno_gpudev { unsigned int reg_rbbm_status; unsigned int reg_cp_pfp_ucode_data; @@ -268,8 +260,6 @@ int adreno_perfcounter_put(struct adreno_device *adreno_dev, int adreno_ft_init_sysfs(struct kgsl_device *device); void adreno_ft_uninit_sysfs(struct kgsl_device *device); -int adreno_a3xx_pwron_fixup_init(struct adreno_device *adreno_dev); - static inline int adreno_is_a200(struct adreno_device *adreno_dev) { return (adreno_dev->gpurev == ADRENO_REV_A200); @@ -416,6 +406,10 @@ static inline int adreno_add_read_cmds(struct kgsl_device *device, *cmds++ = val; *cmds++ = 0xFFFFFFFF; *cmds++ = 0xFFFFFFFF; + + *cmds++ = cp_type3_packet(CP_SET_PROTECTED_MODE, 1); + *cmds++ = 0; + cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr); return cmds - start; } @@ -442,5 +436,32 @@ void adreno_debugfs_init(struct kgsl_device *device); #else static inline void adreno_debugfs_init(struct kgsl_device *device) { } #endif +static inline void adreno_set_protected_registers(struct kgsl_device *device, + unsigned int *index, unsigned int reg, int mask) +{ + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + unsigned int val; + + unsigned int protect_reg_offset; + + + BUG_ON(*index >= 16); + + if (adreno_is_a3xx(adreno_dev)) { + val = 0x60000000 | ((mask & 0x1F) << 24) | + ((reg << 2) & 0x1FFFF); + protect_reg_offset = A3XX_CP_PROTECT_REG_0; + } else if (adreno_is_a2xx(adreno_dev)) { + val = 0xc0000000 | ((reg << 2) << 16) | (mask & 0xffff); + protect_reg_offset = REG_RBBM_PROTECT_0; + } else { + return; + } + + + kgsl_regwrite(device, protect_reg_offset + *index, val); + *index = *index + 1; +} + #endif diff --git a/drivers/gpu/msm/adreno_a2xx.c b/drivers/gpu/msm/adreno_a2xx.c index ac4af9d4..a8fcface 100644 --- a/drivers/gpu/msm/adreno_a2xx.c +++ b/drivers/gpu/msm/adreno_a2xx.c @@ -1439,7 +1439,9 @@ static void a2xx_drawctxt_restore(struct adreno_device *adreno_dev, } -#define RBBM_INT_MASK RBBM_INT_CNTL__RDERR_INT_MASK +#define RBBM_INT_MASK (RBBM_INT_CNTL__RDERR_INT_MASK | \ + RBBM_INT_CNTL__PROTECT_INT_MASK) + #define CP_INT_MASK \ (CP_INT_CNTL__T0_PACKET_IN_IB_MASK | \ @@ -1545,6 +1547,16 @@ static void a2xx_rbbm_intrcallback(struct kgsl_device *device) KGSL_DRV_CRIT(device, "rbbm read error interrupt: %s reg: %04X\n", source, addr); + } else if (status & RBBM_INT_CNTL__PROTECT_INT_MASK) { + adreno_regread(device, REG_RBBM_READ_ERROR, &rderr); + source = (rderr & RBBM_READ_ERROR_REQUESTER) + ? "host" : "cp"; + + addr = (rderr & RBBM_READ_ERROR_ADDRESS_MASK) >> 2; + KGSL_DRV_CRIT(device, + "RBBM | Protected mode error |%s|%s| addr=%x\n", + rderr & (1 << 31) ? "WRITE" : "READ", source, + addr); } status &= RBBM_INT_MASK; @@ -1656,10 +1668,8 @@ static int a2xx_rb_init(struct adreno_device *adreno_dev, GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000); - if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) - GSL_RB_WRITE(cmds, cmds_gpu, 0); - else - GSL_RB_WRITE(cmds, cmds_gpu, GSL_RB_PROTECTED_MODE_CONTROL); + + GSL_RB_WRITE(cmds, cmds_gpu, GSL_RB_PROTECTED_MODE_CONTROL); GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000); @@ -1718,6 +1728,31 @@ static void a2xx_gmeminit(struct adreno_device *adreno_dev) adreno_regwrite(device, REG_RB_EDRAM_INFO, rb_edram_info.val); } +static void a2xx_protect_init(struct kgsl_device *device) +{ + int index = 0; + + + kgsl_regwrite(device, REG_RBBM_INT_CNTL, + RBBM_INT_CNTL__PROTECT_INT_MASK); + + + adreno_set_protected_registers(device, &index, 0x03C, 0x0); + + adreno_set_protected_registers(device, &index, 0x3B4, 0x1); + + adreno_set_protected_registers(device, &index, 0x140, 0xF); + + + adreno_set_protected_registers(device, &index, 0x1C0, 0x20); + + adreno_set_protected_registers(device, &index, 0x1EC, 0x1); + + adreno_set_protected_registers(device, &index, 0x1F6, 0x7); + + + adreno_set_protected_registers(device, &index, 0x042, 0x0); +} static void a2xx_start(struct adreno_device *adreno_dev) { @@ -1766,6 +1801,9 @@ static void a2xx_start(struct adreno_device *adreno_dev) adreno_regwrite(device, REG_RBBM_DEBUG, 0x00080000); + a2xx_protect_init(device); + + adreno_regwrite(device, REG_RBBM_INT_CNTL, 0); adreno_regwrite(device, REG_CP_INT_CNTL, 0); adreno_regwrite(device, REG_SQ_INT_CNTL, 0); diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c index 69ebdeb9..761eca47 100644 --- a/drivers/gpu/msm/adreno_a3xx.c +++ b/drivers/gpu/msm/adreno_a3xx.c @@ -2268,256 +2268,6 @@ static void a3xx_drawctxt_restore(struct adreno_device *adreno_dev, } } -static const unsigned int _a3xx_pwron_fixup_fs_instructions[] = { - 0x00000000, 0x10000400, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x03000000, -}; - -int adreno_a3xx_pwron_fixup_init(struct adreno_device *adreno_dev) -{ - unsigned int *cmds; - int count = sizeof(_a3xx_pwron_fixup_fs_instructions) >> 2; - int ret; - - if (test_bit(ADRENO_DEVICE_PWRON_FIXUP, &adreno_dev->priv)) - return 0; - - ret = kgsl_allocate_contiguous(&adreno_dev->pwron_fixup, PAGE_SIZE); - - if (ret) - return ret; - adreno_dev->pwron_fixup.flags |= KGSL_MEMFLAGS_GPUREADONLY; - cmds = adreno_dev->pwron_fixup.hostptr; - - *cmds++ = cp_type0_packet(A3XX_UCHE_CACHE_INVALIDATE0_REG, 2); - *cmds++ = 0x00000000; - *cmds++ = 0x90000000; - *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type3_packet(CP_REG_RMW, 3); - *cmds++ = A3XX_RBBM_CLOCK_CTL; - *cmds++ = 0xFFFCFFFF; - *cmds++ = 0x00010000; - *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_HLSQ_CONTROL_0_REG, 1); - *cmds++ = 0x1E000150; - *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); - *cmds++ = CP_REG(A3XX_HLSQ_CONTROL_0_REG) | (0x1 << 30); - *cmds++ = 0x1E000150; - *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_HLSQ_CONTROL_0_REG, 1); - *cmds++ = 0x1E000150; - *cmds++ = cp_type0_packet(A3XX_HLSQ_CONTROL_1_REG, 1); - *cmds++ = 0x00000040; - *cmds++ = cp_type0_packet(A3XX_HLSQ_CONTROL_2_REG, 1); - *cmds++ = 0x80000000; - *cmds++ = cp_type0_packet(A3XX_HLSQ_CONTROL_3_REG, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_HLSQ_VS_CONTROL_REG, 1); - *cmds++ = 0x00000001; - *cmds++ = cp_type0_packet(A3XX_HLSQ_FS_CONTROL_REG, 1); - *cmds++ = 0x00001002 | (count >> 3) << 24; - *cmds++ = cp_type0_packet(A3XX_HLSQ_CONST_VSPRESV_RANGE_REG, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_HLSQ_CONST_FSPRESV_RANGE_REG, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_NDRANGE_0_REG, 1); - *cmds++ = 0x00401101; - *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_NDRANGE_1_REG, 1); - *cmds++ = 0x00000400; - *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_NDRANGE_2_REG, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_NDRANGE_3_REG, 1); - *cmds++ = 0x00000001; - *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_NDRANGE_4_REG, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_NDRANGE_5_REG, 1); - *cmds++ = 0x00000001; - *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_NDRANGE_6_REG, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_CONTROL_0_REG, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_CONTROL_1_REG, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_KERNEL_CONST_REG, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_KERNEL_GROUP_X_REG, 1); - *cmds++ = 0x00000010; - *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_KERNEL_GROUP_Y_REG, 1); - *cmds++ = 0x00000001; - *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_KERNEL_GROUP_Z_REG, 1); - *cmds++ = 0x00000001; - *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_WG_OFFSET_REG, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_SP_SP_CTRL_REG, 1); - *cmds++ = 0x00040000; - *cmds++ = cp_type0_packet(A3XX_SP_VS_CTRL_REG0, 1); - *cmds++ = 0x0000000A; - *cmds++ = cp_type0_packet(A3XX_SP_VS_CTRL_REG1, 1); - *cmds++ = 0x00000001; - *cmds++ = cp_type0_packet(A3XX_SP_VS_PARAM_REG, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_SP_VS_OUT_REG_0, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_SP_VS_OUT_REG_1, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_SP_VS_OUT_REG_2, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_SP_VS_OUT_REG_3, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_SP_VS_OUT_REG_4, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_SP_VS_OUT_REG_5, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_SP_VS_OUT_REG_6, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_SP_VS_OUT_REG_7, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_SP_VS_VPC_DST_REG_0, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_SP_VS_VPC_DST_REG_1, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_SP_VS_VPC_DST_REG_2, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_SP_VS_VPC_DST_REG_3, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_SP_VS_OBJ_OFFSET_REG, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_SP_VS_OBJ_START_REG, 1); - *cmds++ = 0x00000004; - *cmds++ = cp_type0_packet(A3XX_SP_VS_PVT_MEM_PARAM_REG, 1); - *cmds++ = 0x04008001; - *cmds++ = cp_type0_packet(A3XX_SP_VS_PVT_MEM_ADDR_REG, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_SP_VS_PVT_MEM_SIZE_REG, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_SP_VS_LENGTH_REG, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_SP_FS_CTRL_REG0, 1); - *cmds++ = 0x00B0400A | (count >> 3) << 24; - *cmds++ = cp_type0_packet(A3XX_SP_FS_CTRL_REG1, 1); - *cmds++ = 0x00300402; - *cmds++ = cp_type0_packet(A3XX_SP_FS_OBJ_OFFSET_REG, 1); - *cmds++ = 0x00010000; - *cmds++ = cp_type0_packet(A3XX_SP_FS_OBJ_START_REG, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_SP_FS_PVT_MEM_PARAM_REG, 1); - *cmds++ = 0x04008001; - *cmds++ = cp_type0_packet(A3XX_SP_FS_PVT_MEM_ADDR_REG, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_SP_FS_PVT_MEM_SIZE_REG, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_SP_FS_FLAT_SHAD_MODE_REG_0, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_SP_FS_FLAT_SHAD_MODE_REG_1, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_SP_FS_OUTPUT_REG, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_SP_FS_MRT_REG_0, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_SP_FS_MRT_REG_1, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_SP_FS_MRT_REG_2, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_SP_FS_MRT_REG_3, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_SP_FS_IMAGE_OUTPUT_REG_0, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_SP_FS_IMAGE_OUTPUT_REG_1, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_SP_FS_IMAGE_OUTPUT_REG_2, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_SP_FS_IMAGE_OUTPUT_REG_3, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_SP_FS_LENGTH_REG, 1); - *cmds++ = count >> 3; - *cmds++ = cp_type0_packet(A3XX_RB_MODE_CONTROL, 1); - *cmds++ = 0x00008000; - *cmds++ = cp_type0_packet(A3XX_RB_RENDER_CONTROL, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_RB_MSAA_CONTROL, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_RB_ALPHA_REFERENCE, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_RB_MRT_CONTROL0, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_RB_MRT_CONTROL1, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_RB_MRT_CONTROL2, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_RB_MRT_CONTROL3, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_RB_MRT_BUF_INFO0, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_RB_MRT_BUF_INFO1, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_RB_MRT_BUF_INFO2, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_RB_MRT_BUF_INFO3, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_RB_MRT_BUF_BASE0, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_RB_MRT_BUF_BASE1, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_RB_MRT_BUF_BASE2, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_RB_MRT_BUF_BASE3, 1); - *cmds++ = 0x00000000; - - *cmds++ = cp_type0_packet(A3XX_RB_PERFCOUNTER0_SELECT, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_RB_PERFCOUNTER1_SELECT, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_RB_FRAME_BUFFER_DIMENSION, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); - *cmds++ = 0x00000000; - - *cmds++ = cp_type3_packet(CP_LOAD_STATE, 2 + count); - *cmds++ = (6 << CP_LOADSTATE_STATEBLOCKID_SHIFT) | - ((count >> 3) << CP_LOADSTATE_NUMOFUNITS_SHIFT); - *cmds++ = 0x00000000; - memcpy(cmds, _a3xx_pwron_fixup_fs_instructions, count << 2); - cmds += count; - - *cmds++ = cp_type3_packet(CP_EXEC_CL, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_nop_packet(1); - *cmds++ = 0x00000000; - *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_CONTROL_0_REG, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type0_packet(A3XX_HLSQ_CONTROL_0_REG, 1); - *cmds++ = 0x1E000150; - *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); - *cmds++ = CP_REG(A3XX_HLSQ_CONTROL_0_REG); - *cmds++ = 0x1E000050; - *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); - *cmds++ = 0x00000000; - *cmds++ = cp_type3_packet(CP_REG_RMW, 3); - *cmds++ = A3XX_RBBM_CLOCK_CTL; - *cmds++ = 0xFFFCFFFF; - *cmds++ = 0x00000000; - *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); - *cmds++ = 0x00000000; - - adreno_dev->pwron_fixup_dwords = - (cmds - (unsigned int *)adreno_dev->pwron_fixup.hostptr); - - - set_bit(ADRENO_DEVICE_PWRON_FIXUP, &adreno_dev->priv); - return 0; -} - static int a3xx_rb_init(struct adreno_device *adreno_dev, struct adreno_ringbuffer *rb) { @@ -2544,7 +2294,7 @@ static int a3xx_rb_init(struct adreno_device *adreno_dev, GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000); GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000); - GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000); + GSL_RB_WRITE(cmds, cmds_gpu, 0x20000000); GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000); GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000); @@ -2602,9 +2352,16 @@ static void a3xx_err_callback(struct adreno_device *adreno_dev, int bit) case A3XX_INT_CP_HW_FAULT: err = "ringbuffer hardware fault"; break; - case A3XX_INT_CP_REG_PROTECT_FAULT: - err = "ringbuffer protected mode error interrupt"; - break; + case A3XX_INT_CP_REG_PROTECT_FAULT: { + unsigned int reg; + kgsl_regread(device, A3XX_CP_PROTECT_STATUS, ®); + + KGSL_DRV_CRIT(device, + "CP | Protected mode error| %s | addr=%x\n", + reg & (1 << 24) ? "WRITE" : "READ", + (reg & 0x1FFFF) >> 2); + return; + } case A3XX_INT_CP_AHB_ERROR_HALT: err = "ringbuffer AHB error interrupt"; break; @@ -3120,6 +2877,40 @@ static void a3xx_perfcounter_init(struct adreno_device *adreno_dev) NULL, PERFCOUNTER_FLAG_KERNEL); } +static void a3xx_protect_init(struct kgsl_device *device) +{ + int index = 0; + + + kgsl_regwrite(device, A3XX_CP_PROTECT_CTRL, 0x00000007); + + + adreno_set_protected_registers(device, &index, 0x18, 0); + adreno_set_protected_registers(device, &index, 0x20, 2); + adreno_set_protected_registers(device, &index, 0x33, 0); + adreno_set_protected_registers(device, &index, 0x42, 0); + adreno_set_protected_registers(device, &index, 0x50, 4); + adreno_set_protected_registers(device, &index, 0x63, 0); + adreno_set_protected_registers(device, &index, 0x100, 4); + + + adreno_set_protected_registers(device, &index, 0x1C0, 5); + adreno_set_protected_registers(device, &index, 0x1EC, 1); + adreno_set_protected_registers(device, &index, 0x1F6, 1); + adreno_set_protected_registers(device, &index, 0x1F8, 2); + adreno_set_protected_registers(device, &index, 0x45E, 2); + adreno_set_protected_registers(device, &index, 0x460, 4); + + + adreno_set_protected_registers(device, &index, 0xCC0, 0); + + + adreno_set_protected_registers(device, &index, 0x3000, 6); + + + adreno_set_protected_registers(device, &index, 0x4000, 14); + +} static void a3xx_start(struct adreno_device *adreno_dev) { struct kgsl_device *device = &adreno_dev->dev; @@ -3180,6 +2971,10 @@ static void a3xx_start(struct adreno_device *adreno_dev) (unsigned int)(adreno_dev->ocmem_base >> 14)); } + + + a3xx_protect_init(device); + adreno_regwrite(device, A3XX_RBBM_PERFCTR_CTL, 0x01); diff --git a/drivers/gpu/msm/adreno_pm4types.h b/drivers/gpu/msm/adreno_pm4types.h index 964174eb..29c9972a 100644 --- a/drivers/gpu/msm/adreno_pm4types.h +++ b/drivers/gpu/msm/adreno_pm4types.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved. +/* Copyright (c) 2002,2007-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -121,8 +121,6 @@ #define CP_INDIRECT_BUFFER_PFE 0x3F -#define CP_EXEC_CL 0x31 - #define CP_LOADSTATE_DSTOFFSET_SHIFT 0x00000000 #define CP_LOADSTATE_STATESRC_SHIFT 0x00000010 #define CP_LOADSTATE_STATEBLOCKID_SHIFT 0x00000013 diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c index c63f54e7..0efbe919 100644 --- a/drivers/gpu/msm/adreno_ringbuffer.c +++ b/drivers/gpu/msm/adreno_ringbuffer.c @@ -355,32 +355,6 @@ int adreno_ringbuffer_start(struct adreno_ringbuffer *rb) rb->memptrs_desc.gpuaddr + GSL_RB_MEMPTRS_RPTR_OFFSET); - if (adreno_is_a3xx(adreno_dev)) { - - adreno_regwrite(device, A3XX_CP_PROTECT_CTRL, 0x00000007); - - - adreno_regwrite(device, A3XX_CP_PROTECT_REG_0, 0x63000040); - adreno_regwrite(device, A3XX_CP_PROTECT_REG_1, 0x62000080); - adreno_regwrite(device, A3XX_CP_PROTECT_REG_2, 0x600000CC); - adreno_regwrite(device, A3XX_CP_PROTECT_REG_3, 0x60000108); - adreno_regwrite(device, A3XX_CP_PROTECT_REG_4, 0x64000140); - adreno_regwrite(device, A3XX_CP_PROTECT_REG_5, 0x66000400); - - - adreno_regwrite(device, A3XX_CP_PROTECT_REG_6, 0x65000700); - adreno_regwrite(device, A3XX_CP_PROTECT_REG_7, 0x610007D8); - adreno_regwrite(device, A3XX_CP_PROTECT_REG_8, 0x620007E0); - adreno_regwrite(device, A3XX_CP_PROTECT_REG_9, 0x61001178); - adreno_regwrite(device, A3XX_CP_PROTECT_REG_A, 0x64001180); - - - adreno_regwrite(device, A3XX_CP_PROTECT_REG_B, 0x60003300); - - - adreno_regwrite(device, A3XX_CP_PROTECT_REG_C, 0x6B00C000); - } - if (adreno_is_a2xx(adreno_dev)) { adreno_regwrite(device, REG_CP_INT_ACK, 0xFFFFFFFF); @@ -542,10 +516,6 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb, if (flags & KGSL_CMD_FLAGS_EOF) total_sizedwords += 2; - - if (flags & KGSL_CMD_FLAGS_PWRON_FIXUP) - total_sizedwords += 5; - ringcmds = adreno_ringbuffer_allocspace(rb, context, total_sizedwords); if (!ringcmds) return -ENOSPC; @@ -553,18 +523,6 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb, rcmd_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*(rb->wptr-total_sizedwords); - if (flags & KGSL_CMD_FLAGS_PWRON_FIXUP) { - GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_nop_packet(1)); - GSL_RB_WRITE(ringcmds, rcmd_gpu, - KGSL_PWRON_FIXUP_IDENTIFIER); - GSL_RB_WRITE(ringcmds, rcmd_gpu, - CP_HDR_INDIRECT_BUFFER_PFD); - GSL_RB_WRITE(ringcmds, rcmd_gpu, - adreno_dev->pwron_fixup.gpuaddr); - GSL_RB_WRITE(ringcmds, rcmd_gpu, - adreno_dev->pwron_fixup_dwords); - } - GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_nop_packet(1)); GSL_RB_WRITE(ringcmds, rcmd_gpu, KGSL_CMD_IDENTIFIER); @@ -1022,18 +980,9 @@ adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv, } else drawctxt->timestamp++; - flags &= KGSL_CMD_FLAGS_EOF; - - - if (test_and_clear_bit(ADRENO_DEVICE_PWRON, &adreno_dev->priv) && - test_bit(ADRENO_DEVICE_PWRON_FIXUP, &adreno_dev->priv)) - { - flags |= KGSL_CMD_FLAGS_PWRON_FIXUP; - } - ret = adreno_ringbuffer_addcmds(&adreno_dev->ringbuffer, drawctxt, - flags, + (flags & KGSL_CMD_FLAGS_EOF), &link[0], (cmds - link)); if (ret) goto done; diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index cf729905..58b8dba1 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2008-2012, The Linux Foundation. All rights reserved. +/* Copyright (c) 2008-2012,2014 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -95,6 +95,9 @@ unsigned int kgsl_get_alloc_size(int detailed) return ret; } +static void kgsl_put_process_private(struct kgsl_device *device, + struct kgsl_process_private *private); + static void kgsl_mem_entry_detach_process(struct kgsl_mem_entry *entry); void kgsl_hang_check(struct work_struct *work) @@ -122,9 +125,13 @@ void hang_timer(unsigned long data) { struct kgsl_device *device = (struct kgsl_device *) data; - if (device->state == KGSL_STATE_ACTIVE) { - - queue_work(device->work_queue, &device->hang_check_ws); + + if (device->id == KGSL_DEVICE_3D0) { + if (device->state == KGSL_STATE_ACTIVE) { + + + queue_work(device->work_queue, &device->hang_check_ws); + } } } @@ -332,14 +339,19 @@ kgsl_mem_entry_untrack_gpuaddr(struct kgsl_process_private *process, static int kgsl_mem_entry_attach_process(struct kgsl_mem_entry *entry, - struct kgsl_process_private *process) + struct kgsl_device_private *dev_priv) { int ret; + struct kgsl_process_private *process = dev_priv->process_priv; + + ret = kref_get_unless_zero(&process->refcount); + if (!ret) + return -EBADF; while (1) { if (idr_pre_get(&process->mem_idr, GFP_KERNEL) == 0) { ret = -ENOMEM; - goto err; + goto err_put_proc_priv; } spin_lock(&process->mem_lock); @@ -350,9 +362,10 @@ kgsl_mem_entry_attach_process(struct kgsl_mem_entry *entry, if (ret == 0) break; else if (ret != -EAGAIN) - goto err; + goto err_put_proc_priv; } entry->priv = process; + entry->dev_priv = dev_priv; entry->memdesc.private = process; spin_lock(&process->mem_lock); @@ -361,14 +374,17 @@ kgsl_mem_entry_attach_process(struct kgsl_mem_entry *entry, idr_remove(&process->mem_idr, entry->id); spin_unlock(&process->mem_lock); if (ret) - goto err; + goto err_put_proc_priv; if (entry->memdesc.gpuaddr) { ret = kgsl_mmu_map(process->pagetable, &entry->memdesc); if (ret) kgsl_mem_entry_detach_process(entry); } -err: + return ret; + +err_put_proc_priv: + kgsl_put_process_private(dev_priv->device, process); return ret; } @@ -390,6 +406,7 @@ static void kgsl_mem_entry_detach_process(struct kgsl_mem_entry *entry) entry->priv->stats[entry->memtype].cur -= entry->memdesc.size; spin_unlock(&entry->priv->mem_lock); + kgsl_put_process_private(entry->dev_priv->device, entry->priv); entry->priv = NULL; } @@ -727,11 +744,6 @@ EXPORT_SYMBOL(kgsl_late_resume_driver); static void kgsl_destroy_process_private(struct kref *kref) { - - struct kgsl_mem_entry *entry = NULL; - int next = 0; - - struct kgsl_process_private *private = container_of(kref, struct kgsl_process_private, refcount); @@ -746,23 +758,14 @@ static void kgsl_destroy_process_private(struct kref *kref) if (private->debug_root) debugfs_remove_recursive(private->debug_root); - while (1) { - rcu_read_lock(); - entry = idr_get_next(&private->mem_idr, &next); - rcu_read_unlock(); - if (entry == NULL) - break; - kgsl_mem_entry_put(entry); - next = 0; - } - kgsl_mmu_putpagetable(private->pagetable); - idr_destroy(&private->mem_idr); - spin_lock(&kgsl_driver.process_dump_lock); list_del(&private->list); spin_unlock(&kgsl_driver.process_dump_lock); mutex_unlock(&kgsl_driver.process_mutex); + kgsl_mmu_putpagetable(private->pagetable); + idr_destroy(&private->mem_idr); + kfree(private); return; } @@ -875,6 +878,7 @@ static int kgsl_release(struct inode *inodep, struct file *filep) struct kgsl_process_private *private = dev_priv->process_priv; struct kgsl_device *device = dev_priv->device; struct kgsl_context *context; + struct kgsl_mem_entry *entry; int next = 0; filep->private_data = NULL; @@ -895,6 +899,19 @@ static int kgsl_release(struct inode *inodep, struct file *filep) next = next + 1; } + next = 0; + while (1) { + spin_lock(&private->mem_lock); + entry = idr_get_next(&private->mem_idr, &next); + spin_unlock(&private->mem_lock); + if (entry == NULL) + break; + if (entry->dev_priv == dev_priv && !entry->pending_free) { + entry->pending_free = 1; + kgsl_mem_entry_put(entry); + } + next = next + 1; + } kgsl_cancel_events(device, dev_priv); device->open_count--; @@ -1022,7 +1039,8 @@ kgsl_sharedmem_find_region(struct kgsl_process_private *private, entry = rb_entry(node, struct kgsl_mem_entry, node); if (kgsl_gpuaddr_in_memdesc(&entry->memdesc, gpuaddr, size)) { - kgsl_mem_entry_get(entry); + if (!kgsl_mem_entry_get(entry)) + break; spin_unlock(&private->mem_lock); return entry; } @@ -1092,14 +1110,17 @@ kgsl_sharedmem_region_empty(struct kgsl_process_private *private, static inline struct kgsl_mem_entry * __must_check kgsl_sharedmem_find_id(struct kgsl_process_private *process, unsigned int id) { + int result = 0; struct kgsl_mem_entry *entry; rcu_read_lock(); entry = idr_find(&process->mem_idr, id); if (entry) - kgsl_mem_entry_get(entry); + result = kgsl_mem_entry_get(entry); rcu_read_unlock(); + if (!result) + return NULL; return entry; } @@ -2002,7 +2023,7 @@ static long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv, param->flags = entry->memdesc.flags; - result = kgsl_mem_entry_attach_process(entry, private); + result = kgsl_mem_entry_attach_process(entry, dev_priv); if (result) goto error_attach; @@ -2170,7 +2191,7 @@ kgsl_ioctl_gpumem_alloc(struct kgsl_device_private *dev_priv, if (result) return result; - result = kgsl_mem_entry_attach_process(entry, private); + result = kgsl_mem_entry_attach_process(entry, dev_priv); if (result != 0) goto err; @@ -2203,7 +2224,7 @@ kgsl_ioctl_gpumem_alloc_id(struct kgsl_device_private *dev_priv, if (result != 0) goto err; - result = kgsl_mem_entry_attach_process(entry, private); + result = kgsl_mem_entry_attach_process(entry, dev_priv); if (result != 0) goto err; @@ -2593,7 +2614,8 @@ kgsl_mmap_memstore(struct kgsl_device *device, struct vm_area_struct *vma) static void kgsl_gpumem_vm_open(struct vm_area_struct *vma) { struct kgsl_mem_entry *entry = vma->vm_private_data; - kgsl_mem_entry_get(entry); + if (!kgsl_mem_entry_get(entry)) + vma->vm_private_data = NULL; } static int @@ -2601,6 +2623,8 @@ kgsl_gpumem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct kgsl_mem_entry *entry = vma->vm_private_data; + if (!entry) + return VM_FAULT_SIGBUS; if (!entry->memdesc.ops || !entry->memdesc.ops->vmfault) return VM_FAULT_SIGBUS; @@ -2612,6 +2636,9 @@ kgsl_gpumem_vm_close(struct vm_area_struct *vma) { struct kgsl_mem_entry *entry = vma->vm_private_data; + if (!entry) + return; + entry->memdesc.useraddr = 0; kgsl_mem_entry_put(entry); } diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h index 22e17508..330d7b94 100644 --- a/drivers/gpu/msm/kgsl.h +++ b/drivers/gpu/msm/kgsl.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved. +/* Copyright (c) 2008-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include @@ -196,6 +196,7 @@ struct kgsl_mem_entry { struct kgsl_process_private *priv; int pending_free; + struct kgsl_device_private *dev_priv; }; #ifdef CONFIG_MSM_KGSL_MMU_PAGE_FAULT @@ -311,10 +312,10 @@ static inline int timestamp_cmp(unsigned int a, unsigned int b) return ((a > b) && (a - b <= KGSL_TIMESTAMP_WINDOW)) ? 1 : -1; } -static inline void +static inline int kgsl_mem_entry_get(struct kgsl_mem_entry *entry) { - kref_get(&entry->refcount); + return kref_get_unless_zero(&entry->refcount); } static inline void diff --git a/drivers/gpu/msm/kgsl_debugfs.c b/drivers/gpu/msm/kgsl_debugfs.c index cf4b6daa..ee048e80 100644 --- a/drivers/gpu/msm/kgsl_debugfs.c +++ b/drivers/gpu/msm/kgsl_debugfs.c @@ -325,16 +325,37 @@ static const struct file_operations process_mem_fops = { .release = single_release, }; -void +int kgsl_process_init_debugfs(struct kgsl_process_private *private) { unsigned char name[16]; + int ret = 0; + struct dentry *dentry; snprintf(name, sizeof(name), "%d", private->pid); private->debug_root = debugfs_create_dir(name, proc_d_debugfs); - debugfs_create_file("mem", 0400, private->debug_root, private, + + if (!private->debug_root) + return -EINVAL; + + private->debug_root->d_inode->i_uid = proc_d_debugfs->d_inode->i_uid; + private->debug_root->d_inode->i_gid = proc_d_debugfs->d_inode->i_gid; + + dentry = debugfs_create_file("mem", 0400, private->debug_root, private, &process_mem_fops); + + if (IS_ERR(dentry)) { + ret = PTR_ERR(dentry); + + if (ret == -ENODEV) + ret = 0; + } else if (dentry) { + dentry->d_inode->i_uid = proc_d_debugfs->d_inode->i_uid; + dentry->d_inode->i_gid = proc_d_debugfs->d_inode->i_gid; + } + + return ret; } void kgsl_core_debugfs_init(void) diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h index ec58db93..cf9f27d7 100644 --- a/drivers/gpu/msm/kgsl_device.h +++ b/drivers/gpu/msm/kgsl_device.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2002,2007-2012, The Linux Foundation. All rights reserved. +/* Copyright (c) 2002,2007-2012,2014 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -422,25 +422,30 @@ kgsl_context_put(struct kgsl_context *context) kref_put(&context->refcount, kgsl_context_destroy); } -static inline void _kgsl_context_get(struct kgsl_context *context) +static inline int _kgsl_context_get(struct kgsl_context *context) { + int ret = 0; if (context) - kref_get(&context->refcount); + ret = kref_get_unless_zero(&context->refcount); + return ret; } static inline struct kgsl_context *kgsl_context_get(struct kgsl_device *device, uint32_t id) { + int result = 0; struct kgsl_context *context = NULL; read_lock(&device->context_lock); context = idr_find(&device->context_idr, id); - _kgsl_context_get(context); + result = _kgsl_context_get(context); read_unlock(&device->context_lock); + if (!result) + return NULL; return context; } @@ -472,4 +477,18 @@ static inline void kgsl_cancel_events_timestamp(struct kgsl_device *device, kgsl_signal_event(device, context, timestamp, KGSL_EVENT_CANCELLED); } +static inline int kgsl_sysfs_store(const char *buf, unsigned int *ptr) +{ + unsigned int val; + int rc; + + rc = kstrtou32(buf, 0, &val); + if (rc) + return rc; + + if (ptr) + *ptr = val; + + return 0; +} #endif diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c index a9c600e2..b388ac56 100644 --- a/drivers/gpu/msm/kgsl_iommu.c +++ b/drivers/gpu/msm/kgsl_iommu.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. +/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -266,7 +266,7 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain, unsigned int no_page_fault_log = 0; unsigned int curr_context_id = 0; unsigned int curr_global_ts = 0; - static struct kgsl_context *context; + struct kgsl_context *context; unsigned int pid; unsigned int fsynr0, fsynr1; int write; @@ -348,6 +348,7 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain, if (ret < 0) { KGSL_CORE_ERR("Invalid curr_global_ts = %d\n", curr_global_ts); + kgsl_context_put(context); goto done; } @@ -771,6 +772,10 @@ inline unsigned int kgsl_iommu_sync_lock(struct kgsl_mmu *mmu, *cmds++ = 0x1; *cmds++ = 0x1; + + *cmds++ = cp_type3_packet(CP_SET_PROTECTED_MODE, 1); + *cmds++ = 0; + *cmds++ = cp_type3_packet(CP_MEM_WRITE, 2); *cmds++ = lock_vars->turn; *cmds++ = 0; @@ -785,11 +790,19 @@ inline unsigned int kgsl_iommu_sync_lock(struct kgsl_mmu *mmu, *cmds++ = 0x1; *cmds++ = 0x1; + + *cmds++ = cp_type3_packet(CP_SET_PROTECTED_MODE, 1); + *cmds++ = 0; + *cmds++ = cp_type3_packet(CP_TEST_TWO_MEMS, 3); *cmds++ = lock_vars->flag[PROC_APPS]; *cmds++ = lock_vars->turn; *cmds++ = 0; + + *cmds++ = cp_type3_packet(CP_SET_PROTECTED_MODE, 1); + *cmds++ = 0; + cmds += adreno_add_idle_cmds(adreno_dev, cmds); return cmds - start; @@ -820,6 +833,10 @@ inline unsigned int kgsl_iommu_sync_unlock(struct kgsl_mmu *mmu, *cmds++ = 0x1; *cmds++ = 0x1; + + *cmds++ = cp_type3_packet(CP_SET_PROTECTED_MODE, 1); + *cmds++ = 0; + cmds += adreno_add_idle_cmds(adreno_dev, cmds); return cmds - start; diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c index 99572979..c935dc8e 100644 --- a/drivers/gpu/msm/kgsl_mmu.c +++ b/drivers/gpu/msm/kgsl_mmu.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved. +/* Copyright (c) 2002,2007-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -136,12 +136,12 @@ kgsl_get_pagetable(unsigned long name) static struct kgsl_pagetable * _get_pt_from_kobj(struct kobject *kobj) { - unsigned long ptname; + unsigned int ptname; if (!kobj) return NULL; - if (sscanf(kobj->name, "%ld", &ptname) != 1) + if (kstrtou32(kobj->name, 0, &ptname)) return NULL; return kgsl_get_pagetable(ptname); @@ -371,6 +371,10 @@ int kgsl_mmu_init(struct kgsl_device *device) status = kgsl_allocate_contiguous(&mmu->setstate_memory, PAGE_SIZE); if (status) return status; + + + mmu->setstate_memory.flags |= KGSL_MEMFLAGS_GPUREADONLY; + kgsl_sharedmem_set(&mmu->setstate_memory, 0, 0, mmu->setstate_memory.size); diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c index 5d06477c..2eea1c0f 100644 --- a/drivers/gpu/msm/kgsl_pwrctrl.c +++ b/drivers/gpu/msm/kgsl_pwrctrl.c @@ -190,19 +190,18 @@ static int kgsl_pwrctrl_thermal_pwrlevel_store(struct device *dev, { struct kgsl_device *device = kgsl_device_from_dev(dev); struct kgsl_pwrctrl *pwr; - int ret, level; + int ret; + unsigned int level = 0; if (device == NULL) return 0; pwr = &device->pwrctrl; - ret = sscanf(buf, "%d", &level); - if (ret != 1) - return count; + ret = kgsl_sysfs_store(buf, &level); - if (level < 0) - return count; + if (ret) + return ret; mutex_lock(&device->mutex); @@ -240,20 +239,17 @@ static int kgsl_pwrctrl_max_pwrlevel_store(struct device *dev, { struct kgsl_device *device = kgsl_device_from_dev(dev); struct kgsl_pwrctrl *pwr; - int ret, level, max_level; + int ret, max_level; + unsigned int level = 0; if (device == NULL) return 0; pwr = &device->pwrctrl; - ret = sscanf(buf, "%d", &level); - if (ret != 1) - return count; - - - if (level < 0) - return count; + ret = kgsl_sysfs_store(buf, &level); + if (ret) + return ret; mutex_lock(&device->mutex); @@ -294,20 +290,17 @@ static int kgsl_pwrctrl_min_pwrlevel_store(struct device *dev, const char *buf, size_t count) { struct kgsl_device *device = kgsl_device_from_dev(dev); struct kgsl_pwrctrl *pwr; - int ret, level, min_level; + int ret, min_level; + unsigned int level = 0; if (device == NULL) return 0; pwr = &device->pwrctrl; - ret = sscanf(buf, "%d", &level); - if (ret != 1) - return count; - - - if (level < 0) - return count; + ret = kgsl_sysfs_store(buf, &level); + if (ret) + return ret; mutex_lock(&device->mutex); if (level > pwr->num_pwrlevels - 2) @@ -374,7 +367,7 @@ static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev, { struct kgsl_device *device = kgsl_device_from_dev(dev); struct kgsl_pwrctrl *pwr; - unsigned long val; + unsigned int val = 0; int ret, level; if (device == NULL) @@ -382,9 +375,10 @@ static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev, pwr = &device->pwrctrl; - ret = sscanf(buf, "%ld", &val); - if (ret != 1) - return count; + ret = kgsl_sysfs_store(buf, &val); + if (ret) + return ret; + mutex_lock(&device->mutex); level = _get_nearest_pwrlevel(pwr, val); @@ -422,7 +416,7 @@ static int kgsl_pwrctrl_gpuclk_store(struct device *dev, { struct kgsl_device *device = kgsl_device_from_dev(dev); struct kgsl_pwrctrl *pwr; - unsigned long val; + unsigned int val = 0; int ret, level; if (device == NULL) @@ -430,9 +424,9 @@ static int kgsl_pwrctrl_gpuclk_store(struct device *dev, pwr = &device->pwrctrl; - ret = sscanf(buf, "%ld", &val); - if (ret != 1) - return count; + ret = kgsl_sysfs_store(buf, &val); + if (ret) + return ret; mutex_lock(&device->mutex); level = _get_nearest_pwrlevel(pwr, val); @@ -460,21 +454,18 @@ static int kgsl_pwrctrl_pwrnap_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - char temp[20]; - unsigned long val; + unsigned int val = 0; struct kgsl_device *device = kgsl_device_from_dev(dev); struct kgsl_pwrctrl *pwr; - int rc; + int ret; if (device == NULL) return 0; pwr = &device->pwrctrl; - snprintf(temp, sizeof(temp), "%.*s", - (int)min(count, sizeof(temp) - 1), buf); - rc = strict_strtoul(temp, 0, &val); - if (rc) - return rc; + ret = kgsl_sysfs_store(buf, &val); + if (ret) + return ret; mutex_lock(&device->mutex); @@ -503,23 +494,20 @@ static int kgsl_pwrctrl_idle_timer_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - char temp[20]; - unsigned long val; + unsigned int val = 0; struct kgsl_device *device = kgsl_device_from_dev(dev); struct kgsl_pwrctrl *pwr; const long div = 1000/HZ; static unsigned int org_interval_timeout = 1; - int rc; + int ret; if (device == NULL) return 0; pwr = &device->pwrctrl; - snprintf(temp, sizeof(temp), "%.*s", - (int)min(count, sizeof(temp) - 1), buf); - rc = strict_strtoul(temp, 0, &val); - if (rc) - return rc; + ret = kgsl_sysfs_store(buf, &val); + if (ret) + return ret; if (org_interval_timeout == 1) org_interval_timeout = pwr->interval_timeout; diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c index d79d9811..7746bb1a 100644 --- a/drivers/gpu/msm/kgsl_sharedmem.c +++ b/drivers/gpu/msm/kgsl_sharedmem.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2002,2007-2012, The Linux Foundation. All rights reserved. +/* Copyright (c) 2002,2007-2012,2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -83,12 +83,12 @@ static struct kgsl_process_private * _get_priv_from_kobj(struct kobject *kobj) { struct kgsl_process_private *private; - unsigned long name; + unsigned int name; if (!kobj) return NULL; - if (sscanf(kobj->name, "%ld", &name) != 1) + if (kstrtou32(kobj->name, 0, &name)) return NULL; list_for_each_entry(private, &kgsl_driver.process_list, list) { @@ -780,7 +780,7 @@ _kgsl_sharedmem_ion_alloc(struct kgsl_memdesc *memdesc, goto done; } - handle = ion_alloc(kgsl_client, size, SZ_4K, 0x1 << ION_SF_HEAP_ID); + handle = ion_alloc(kgsl_client, size, SZ_4K, 0x1 << ION_SF_HEAP_ID,0); if (IS_ERR_OR_NULL(handle)) { ret = -ENOMEM; goto done; diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h index b763bb33..003b1997 100644 --- a/drivers/gpu/msm/kgsl_sharedmem.h +++ b/drivers/gpu/msm/kgsl_sharedmem.h @@ -122,6 +122,9 @@ kgsl_sharedmem_map_vma(struct vm_area_struct *vma, static inline void *kgsl_sg_alloc(unsigned int sglen) { + if ((sglen == 0) || (sglen >= ULONG_MAX / sizeof(struct scatterlist))) + return NULL; + if ((sglen * sizeof(struct scatterlist)) < PAGE_SIZE) return kzalloc(sglen * sizeof(struct scatterlist), GFP_KERNEL); else { diff --git a/drivers/gpu/msm/z180.c b/drivers/gpu/msm/z180.c index 7fcdba11..d10a78f3 100644 --- a/drivers/gpu/msm/z180.c +++ b/drivers/gpu/msm/z180.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved. +/* Copyright (c) 2002,2007-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -419,7 +419,7 @@ z180_cmdstream_issueibcmds(struct kgsl_device_private *dev_priv, "Cannot make kernel mapping for gpuaddr 0x%x\n", cmd); result = -EINVAL; - goto error; + goto error_put; } KGSL_CMD_INFO(device, "ctxt %d ibaddr 0x%08x sizedwords %d\n", @@ -445,7 +445,7 @@ z180_cmdstream_issueibcmds(struct kgsl_device_private *dev_priv, if (result < 0) { KGSL_CMD_ERR(device, "wait_event_interruptible_timeout " "failed: %ld\n", result); - goto error; + goto error_put; } result = 0; @@ -477,6 +477,8 @@ z180_cmdstream_issueibcmds(struct kgsl_device_private *dev_priv, z180_cmdwindow_write(device, ADDR_VGV3_CONTROL, cmd); z180_cmdwindow_write(device, ADDR_VGV3_CONTROL, 0); +error_put: + kgsl_mem_entry_put(entry); error: kgsl_trace_issueibcmds(device, context->id, ibdesc, numibs, diff --git a/drivers/i2c/chips/Makefile b/drivers/i2c/chips/Makefile index d85d44d8..8e7e1cb2 100644 --- a/drivers/i2c/chips/Makefile +++ b/drivers/i2c/chips/Makefile @@ -9,7 +9,7 @@ # * RTC chip drivers go to drivers/rtc # * I/O expander drivers go to drivers/gpio # -ccflags-y := -Wno-maybe-uninitialized +ccflags-y := -Wno-maybe-uninitialized -Wframe-larger-than=1040 proximity-objs := cm3629.o obj-$(CONFIG_SENSORS_NFC_PN544) += pn544.o diff --git a/drivers/i2c/chips/akm8963.c b/drivers/i2c/chips/akm8963.c index b8fc618a..6b23952c 100644 --- a/drivers/i2c/chips/akm8963.c +++ b/drivers/i2c/chips/akm8963.c @@ -336,6 +336,8 @@ static void AKECS_SetYPR( rbuf[5], rbuf[6], rbuf[7], rbuf[8]); AKM_DATA(&akm->input->dev, " Orientation[YPR] : %6d,%6d,%6d", rbuf[9], rbuf[10], rbuf[11]); + AKM_DATA(&akm->input->dev, " Rotation V : %6d,%6d,%6d,%6d", + rbuf[18], rbuf[19], rbuf[20], rbuf[21]); if (!rbuf[0]) { @@ -363,10 +365,17 @@ static void AKECS_SetYPR( } if (ready & ORI_DATA_READY) { + input_report_abs(akm->input, ABS_HAT0X, rbuf[9]); input_report_abs(akm->input, ABS_HAT0Y, rbuf[10]); input_report_abs(akm->input, ABS_HAT1X, rbuf[11]); input_report_abs(akm->input, ABS_HAT1Y, rbuf[4]); + + + input_report_abs(akm->input, ABS_TILT_X, rbuf[18]); + input_report_abs(akm->input, ABS_TILT_Y, rbuf[19]); + input_report_abs(akm->input, ABS_TOOL_WIDTH, rbuf[20]); + input_report_abs(akm->input, ABS_VOLUME, rbuf[21]); } input_sync(akm->input); @@ -418,15 +427,15 @@ AKECS_ioctl(struct file *file, unsigned int cmd, unsigned long arg) struct akm8963_data *akm = file->private_data; - char i2c_buf[RWBUF_SIZE] = {0}; - int8_t sensor_buf[SENSOR_DATA_SIZE] = {0}; - int32_t ypr_buf[YPR_DATA_SIZE] = {0}; - int16_t acc_buf[3] = {0}; - int64_t delay[AKM_NUM_SENSORS] = {0}; - char mode = 0; - char layout = 0; - char outbit = 0; - int status = 0; + char i2c_buf[RWBUF_SIZE] = {0}; + int8_t sensor_buf[SENSOR_DATA_SIZE] = {0}; + int32_t ypr_buf[YPR_DATA_SIZE] = {0}; + int16_t acc_buf[3] = {0}; + int64_t delay[AKM_NUM_SENSORS] = {0}; + char mode = 0; + char layout = 0; + char outbit = 0; + int status = 0; int ret = -1; switch (cmd) { @@ -1189,6 +1198,15 @@ static int akm8963_input_init( 0, 3, 0, 0); + input_set_abs_params(*input, ABS_TILT_X, + -16384, 16384, 0, 0); + input_set_abs_params(*input, ABS_TILT_Y, + -16384, 16384, 0, 0); + input_set_abs_params(*input, ABS_TOOL_WIDTH, + -16384, 16384, 0, 0); + input_set_abs_params(*input, ABS_VOLUME, + -16384, 16384, 0, 0); + (*input)->name = "compass"; diff --git a/drivers/i2c/chips/cm3629.c b/drivers/i2c/chips/cm3629.c index 40fec81d..41e492ee 100644 --- a/drivers/i2c/chips/cm3629.c +++ b/drivers/i2c/chips/cm3629.c @@ -799,7 +799,7 @@ static void sensor_irq_do_work(struct work_struct *work) } if (!(add & 0x3F)) { - if (inter_error < 30) { + if (inter_error < 10) { D("[PS][cm3629 warning]%s unkown interrupt: 0x%x!\n", __func__, add); inter_error++ ; diff --git a/drivers/i2c/chips/r3gd20.c b/drivers/i2c/chips/r3gd20.c index ca1a61a3..6950c537 100644 --- a/drivers/i2c/chips/r3gd20.c +++ b/drivers/i2c/chips/r3gd20.c @@ -1863,8 +1863,13 @@ static int r3gd20_suspend(struct i2c_client *client, pm_message_t mesg) u8 buf[2]; int err = -1; - data->is_suspended = 1; - I("%s++: data->is_suspended = %d\n", __func__, data->is_suspended); + if (data) { + data->is_suspended = 1; + I("%s++: data->is_suspended = %d\n", __func__, data->is_suspended); + } else { + E("%s: data = NULL\n", __func__); + return -EINVAL; + } #if DEBUG I("r3gd20_suspend\n"); @@ -1893,7 +1898,7 @@ static int r3gd20_suspend(struct i2c_client *client, pm_message_t mesg) #endif - if (data && (data->pdata->power_LPM)) + if (data->pdata->power_LPM) data->pdata->power_LPM(1); I("%s:--\n", __func__); diff --git a/drivers/i2c/chips/tfa9887.c b/drivers/i2c/chips/tfa9887.c index c6444051..5dcce6b1 100644 --- a/drivers/i2c/chips/tfa9887.c +++ b/drivers/i2c/chips/tfa9887.c @@ -52,6 +52,10 @@ struct mutex spk_amp_lock; static int tfa9887_opened; static int last_spkamp_state; static int dsp_enabled; + +static int tfa9887_step = -1; +static int tfa9887_step_en = 0; + static int tfa9887_i2c_write(char *txData, int length); static int tfa9887_i2c_read(char *rxData, int length); #ifdef CONFIG_DEBUG_FS @@ -180,6 +184,12 @@ static int tfa9887_i2c_write(char *txData, int length) .buf = txData, }, }; + + if (tfa9887_step_en) + tfa9887_step ++; +#if DEBUG + pr_err("%s: tfa9887_step %d\n", __func__, tfa9887_step); +#endif rc = i2c_transfer(this_client->adapter, msg, 1); if (rc < 0) { @@ -187,7 +197,11 @@ static int tfa9887_i2c_write(char *txData, int length) return rc; } + + if (tfa9887_step_en) + tfa9887_step ++; #if DEBUG + pr_err("%s: tfa9887_step %d\n", __func__, tfa9887_step); { int i = 0; for (i = 0; i < length; i++) @@ -211,13 +225,25 @@ static int tfa9887_i2c_read(char *rxData, int length) }, }; + + if (tfa9887_step_en) + tfa9887_step ++; +#if DEBUG + pr_err("%s: tfa9887_step %d\n", __func__, tfa9887_step); +#endif + rc = i2c_transfer(this_client->adapter, msgs, 1); if (rc < 0) { pr_err("%s: transfer error %d\n", __func__, rc); return rc; } + + if (tfa9887_step_en) + tfa9887_step ++; + #if DEBUG + pr_err("%s: tfa9887_step %d\n", __func__, tfa9887_step); { int i = 0; for (i = 0; i < length; i++) @@ -261,6 +287,9 @@ void set_tfa9887_spkamp(int en, int dsp_mode) pr_info("%s: en = %d dsp_enabled = %d\n", __func__, en, dsp_enabled); mutex_lock(&spk_amp_lock); + + tfa9887_step = 0; + tfa9887_step_en = 1; if (en && !last_spkamp_state) { last_spkamp_state = 1; @@ -316,6 +345,10 @@ void set_tfa9887_spkamp(int en, int dsp_mode) tfa9887_i2c_write(power_data, 3); } } + + tfa9887_step_en = 0; + tfa9887_step = -1; + mutex_unlock(&spk_amp_lock); } @@ -340,7 +373,15 @@ static long tfa9887_ioctl(struct file *file, unsigned int cmd, len = reg_value[0]; addr = (char *)reg_value[1]; + + if (tfa9887_step_en) + pr_info("%s TPA9887_WRITE_CONFIG tfa9887_step_en = %d, tfa9887_step = %d\n", + __func__, tfa9887_step_en, tfa9887_step); + tfa9887_step = 0; + tfa9887_step_en = 2; tfa9887_i2c_write(addr+1, len -1); + tfa9887_step_en = 0; + tfa9887_step = -1; break; case TPA9887_READ_CONFIG: @@ -353,7 +394,16 @@ static long tfa9887_ioctl(struct file *file, unsigned int cmd, len = reg_value[0]; addr = (char *)reg_value[1]; + + + if (tfa9887_step_en) + pr_info("%s TPA9887_READ_CONFIG tfa9887_step_en = %d, tfa9887_step = %d\n", + __func__, tfa9887_step_en, tfa9887_step); + tfa9887_step = 0; + tfa9887_step_en = 2; tfa9887_i2c_read(addr, len); + tfa9887_step_en = 0; + tfa9887_step = -1; rc = copy_to_user(argp, reg_value, sizeof(reg_value)); if (rc) { @@ -372,7 +422,15 @@ static long tfa9887_ioctl(struct file *file, unsigned int cmd, len = reg_value[0]; addr = (char *)reg_value[1]; + + if (tfa9887l_step_en) + pr_info("%s TPA9887_WRITE_L_CONFIG tfa9887l_step_en = %d, tfa9887l_step = %d\n", + __func__, tfa9887l_step_en, tfa9887l_step); + tfa9887l_step = 0; + tfa9887l_step_en = 2; tfa9887_l_write(addr+1, len -1); + tfa9887l_step_en = 0; + tfa9887l_step = -1; break; case TPA9887_READ_L_CONFIG: pr_debug("%s: TPA9887_READ_CONFIG_L\n", __func__); @@ -384,7 +442,15 @@ static long tfa9887_ioctl(struct file *file, unsigned int cmd, len = reg_value[0]; addr = (char *)reg_value[1]; + + if (tfa9887l_step_en) + pr_info("%s TPA9887_READ_L_CONFIG tfa9887l_step_en = %d, tfa9887l_step = %d\n", + __func__, tfa9887l_step_en, tfa9887l_step); + tfa9887l_step = 0; + tfa9887l_step_en = 2; tfa9887_l_read(addr, len); + tfa9887l_step_en = 0; + tfa9887l_step = -1; rc = copy_to_user(argp, reg_value, sizeof(reg_value)); if (rc) { @@ -394,7 +460,6 @@ static long tfa9887_ioctl(struct file *file, unsigned int cmd, break; #endif case TPA9887_ENABLE_DSP: - pr_info("%s: TPA9887_ENABLE_DSP\n", __func__); rc = copy_from_user(reg_value, argp, sizeof(reg_value));; if (rc) { pr_err("%s: copy from user failed.\n", __func__); @@ -403,6 +468,7 @@ static long tfa9887_ioctl(struct file *file, unsigned int cmd, len = reg_value[0]; dsp_enabled = reg_value[1]; + pr_info("%s: TPA9887_ENABLE_DSP (%d)\n", __func__, dsp_enabled); break; case TPA9887_KERNEL_LOCK: rc = copy_from_user(reg_value, argp, sizeof(reg_value));; @@ -413,11 +479,13 @@ static long tfa9887_ioctl(struct file *file, unsigned int cmd, len = reg_value[0]; - pr_debug("TPA9887_KLOCK1 %d\n", reg_value[1]); - if (reg_value[1]) + if (reg_value[1]) { mutex_lock(&spk_amp_lock); - else + pr_info("TPA9887_KLOCK1 ++\n"); + } else { + pr_info("TPA9887_KLOCK1 --\n"); mutex_unlock(&spk_amp_lock); + } break; } err: diff --git a/drivers/i2c/chips/tfa9887l.c b/drivers/i2c/chips/tfa9887l.c index 207bf0e5..70c7652f 100644 --- a/drivers/i2c/chips/tfa9887l.c +++ b/drivers/i2c/chips/tfa9887l.c @@ -50,8 +50,11 @@ struct mutex spk_ampl_lock; static int tfa9887l_opened; static int last_spkampl_state; static int dspl_enabled; -static int tfa9887_step; -static int tfa9887_step_en; +int tfa9887l_step = -1; +EXPORT_SYMBOL(tfa9887l_step); +int tfa9887l_step_en = 0; +EXPORT_SYMBOL(tfa9887l_step_en); + static int tfa9887_i2c_write(char *txData, int length); static int tfa9887_i2c_read(char *rxData, int length); #ifdef CONFIG_DEBUG_FS @@ -175,25 +178,24 @@ static int tfa9887_i2c_write(char *txData, int length) }, }; - if (tfa9887_step_en) - tfa9887_step ++; + if (tfa9887l_step_en) + tfa9887l_step ++; #if DEBUG - pr_err("%s: tfa9887_step %d\n", __func__, tfa9887_step); + pr_err("%s: tfa9887l_step %d\n", __func__, tfa9887l_step); #endif rc = i2c_transfer(this_client->adapter, msg, 1); - if (rc < 0) { pr_err("%s: transfer error %d\n", __func__, rc); return rc; } - if (tfa9887_step_en) - tfa9887_step ++; + if (tfa9887l_step_en) + tfa9887l_step ++; #if DEBUG - pr_err("%s: tfa9887_step %d\n", __func__, tfa9887_step); + pr_err("%s: tfa9887l_step %d\n", __func__, tfa9887l_step); { int i = 0; for (i = 0; i < length; i++) @@ -218,10 +220,10 @@ static int tfa9887_i2c_read(char *rxData, int length) }; - if (tfa9887_step_en) - tfa9887_step ++; + if (tfa9887l_step_en) + tfa9887l_step ++; #if DEBUG - pr_err("%s: tfa9887_step %d\n", __func__, tfa9887_step); + pr_err("%s: tfa9887l_step %d\n", __func__, tfa9887l_step); #endif rc = i2c_transfer(this_client->adapter, msgs, 1); @@ -231,11 +233,11 @@ static int tfa9887_i2c_read(char *rxData, int length) } - if (tfa9887_step_en) - tfa9887_step ++; + if (tfa9887l_step_en) + tfa9887l_step ++; #if DEBUG - pr_err("%s: tfa9887_step %d\n", __func__, tfa9887_step); + pr_err("%s: tfa9887l_step %d\n", __func__, tfa9887l_step); { int i = 0; for (i = 0; i < length; i++) @@ -255,7 +257,7 @@ static int tfa9887l_open(struct inode *inode, struct file *file) pr_info("%s: busy\n", __func__); } tfa9887l_opened = 1; - tfa9887_step_en = 0; + return rc; } @@ -286,11 +288,11 @@ void set_tfa9887l_spkamp(int en, int dsp_mode) unsigned char power_data[3] = {0, 0, 0}; unsigned char SPK_CR[3] = {0x8, 0x8, 0}; - - tfa9887_step = 0; - tfa9887_step_en = 1; pr_info("%s: en = %d dsp_enabled = %d\n", __func__, en, dspl_enabled); mutex_lock(&spk_ampl_lock); + + tfa9887l_step = 0; + tfa9887l_step_en = 1; if (en && !last_spkampl_state) { last_spkampl_state = 1; @@ -346,10 +348,11 @@ void set_tfa9887l_spkamp(int en, int dsp_mode) tfa9887_i2c_write(power_data, 3); } } - mutex_unlock(&spk_ampl_lock); - - tfa9887_step_en = 0; + tfa9887l_step_en = 0; + tfa9887l_step = -1; + + mutex_unlock(&spk_ampl_lock); } static long tfa9887l_ioctl(struct file *file, unsigned int cmd, @@ -373,7 +376,12 @@ static long tfa9887l_ioctl(struct file *file, unsigned int cmd, len = reg_value[0]; addr = (char *)reg_value[1]; + + tfa9887l_step = 0; + tfa9887l_step_en = 2; tfa9887_i2c_write(addr+1, len -1); + tfa9887l_step_en = 0; + tfa9887l_step = -1; break; case TPA9887_READ_CONFIG: @@ -386,7 +394,13 @@ static long tfa9887l_ioctl(struct file *file, unsigned int cmd, len = reg_value[0]; addr = (char *)reg_value[1]; + + + tfa9887l_step = 0; + tfa9887l_step_en = 2; tfa9887_i2c_read(addr, len); + tfa9887l_step_en = 0; + tfa9887l_step = -1; rc = copy_to_user(argp, reg_value, sizeof(reg_value)); if (rc) { @@ -395,7 +409,6 @@ static long tfa9887l_ioctl(struct file *file, unsigned int cmd, } break; case TPA9887_ENABLE_DSP: - pr_info("%s: TPA9887_ENABLE_DSP\n", __func__); rc = copy_from_user(reg_value, argp, sizeof(reg_value));; if (rc) { pr_err("%s: copy from user failed.\n", __func__); @@ -404,6 +417,7 @@ static long tfa9887l_ioctl(struct file *file, unsigned int cmd, len = reg_value[0]; dspl_enabled = reg_value[1]; + pr_info("%s: TPA9887_ENABLE_DSP(%d)\n", __func__, dspl_enabled); break; case TPA9887_KERNEL_LOCK: rc = copy_from_user(reg_value, argp, sizeof(reg_value));; @@ -414,11 +428,13 @@ static long tfa9887l_ioctl(struct file *file, unsigned int cmd, len = reg_value[0]; - pr_debug("TPA9887_KLOCK2 %d\n", reg_value[1]); - if (reg_value[1]) + if (reg_value[1]) { mutex_lock(&spk_ampl_lock); - else + pr_info("TPA9887_KLOCK2 ++\n"); + } else { + pr_info("TPA9887_KLOCK2 --\n"); mutex_unlock(&spk_ampl_lock); + } break; } err: diff --git a/drivers/input/touchscreen/synaptics_3200.c b/drivers/input/touchscreen/synaptics_3200.c index 9e7cb92f..87c64d22 100644 --- a/drivers/input/touchscreen/synaptics_3200.c +++ b/drivers/input/touchscreen/synaptics_3200.c @@ -1011,10 +1011,13 @@ static int syn_config_update(struct synaptics_ts_data *ts, int attr) uint32_t crc_checksum; int ret; - crc_checksum = - syn_crc((uint16_t *)ts->config, SYN_CONFIG_SIZE / 2 - 2); - memcpy(&ts->config[SYN_CONFIG_SIZE - 4], &crc_checksum, 4); - printk(KERN_INFO "[TP] CRC = %X\n" , syn_crc((uint16_t *)ts->config, SYN_CONFIG_SIZE / 2 - 2)); + if (ts->config != NULL) + { + crc_checksum = + syn_crc((uint16_t *)ts->config, SYN_CONFIG_SIZE / 2 - 2); + memcpy(&ts->config[SYN_CONFIG_SIZE - 4], &crc_checksum, 4); + printk(KERN_INFO "[TP] CRC = %X\n" , syn_crc((uint16_t *)ts->config, SYN_CONFIG_SIZE / 2 - 2)); + } if (ts->tw_pin_mask == 0) { ret = enable_flash_programming(ts, attr); @@ -2053,6 +2056,24 @@ static int register_sr_touch_device(void) return SUCCESS; } +static ssize_t get_en_sr(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct synaptics_ts_data *ts = gl_ts; + size_t count = 0; + + if (ts->sr_input_dev) + { + count += sprintf(buf + count, "%s ", ts->sr_input_dev->name); + count += sprintf(buf + count, "\n"); + } + else + count += sprintf(buf + count, "0\n"); + + + return count; +} + static ssize_t set_en_sr(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { @@ -2066,7 +2087,7 @@ static ssize_t set_en_sr(struct device *dev, struct device_attribute *attr, return count; } -static DEVICE_ATTR(sr_en, S_IWUSR, 0, set_en_sr); +static DEVICE_ATTR(sr_en, (S_IWUSR|S_IRUGO), get_en_sr, set_en_sr); static struct kobject *android_touch_kobj; diff --git a/drivers/leds/tps61310_flashlight.c b/drivers/leds/tps61310_flashlight.c index 982bac47..24b8d551 100644 --- a/drivers/leds/tps61310_flashlight.c +++ b/drivers/leds/tps61310_flashlight.c @@ -732,12 +732,14 @@ static void fl_lcdev_brightness_set(struct led_classdev *led_cdev, static void flashlight_early_suspend(struct early_suspend *handler) { FLT_INFO_LOG("%s\n", __func__); - if (this_tps61310 != NULL && this_tps61310->mode_status) - flashlight_turn_off(); - if (this_tps61310->power_save) - gpio_set_value_cansleep(this_tps61310->power_save, 0); - if (this_tps61310->power_save_2) - gpio_set_value_cansleep(this_tps61310->power_save_2, 0); + if (this_tps61310 != NULL){ + if(this_tps61310->mode_status) + flashlight_turn_off(); + if (this_tps61310->power_save) + gpio_set_value_cansleep(this_tps61310->power_save, 0); + if (this_tps61310->power_save_2) + gpio_set_value_cansleep(this_tps61310->power_save_2, 0); + } } static void flashlight_late_resume(struct early_suspend *handler) diff --git a/drivers/media/video/msm/gemini/msm_gemini_hw.h b/drivers/media/video/msm/gemini/msm_gemini_hw.h index e1702a53..f282d95b 100644 --- a/drivers/media/video/msm/gemini/msm_gemini_hw.h +++ b/drivers/media/video/msm/gemini/msm_gemini_hw.h @@ -16,7 +16,7 @@ #include #include "msm_gemini_hw_reg.h" #include -#include +#include struct msm_gemini_hw_buf { struct msm_gemini_buf vbuf; diff --git a/drivers/media/video/msm/gemini/msm_gemini_platform.c b/drivers/media/video/msm/gemini/msm_gemini_platform.c index 225613f9..c2600f9b 100644 --- a/drivers/media/video/msm/gemini/msm_gemini_platform.c +++ b/drivers/media/video/msm/gemini/msm_gemini_platform.c @@ -52,7 +52,7 @@ uint32_t msm_gemini_platform_v2p(int fd, uint32_t len, struct file **file_p, return 0; rc = ion_map_iommu(gemini_client, *ionhandle, CAMERA_DOMAIN, GEN_POOL, - SZ_4K, 0, &paddr, (unsigned long *)&size, UNCACHED, 0); + SZ_4K, 0, &paddr, (unsigned long *)&size, 0, 0); #elif CONFIG_ANDROID_PMEM unsigned long kvstart; rc = get_pmem_file(fd, &paddr, &kvstart, &size, file_p); diff --git a/drivers/media/video/msm/gemini/msm_gemini_platform.h b/drivers/media/video/msm/gemini/msm_gemini_platform.h index 19779c0a..2fdc47e0 100644 --- a/drivers/media/video/msm/gemini/msm_gemini_platform.h +++ b/drivers/media/video/msm/gemini/msm_gemini_platform.h @@ -15,7 +15,7 @@ #include #include -#include +#include #include void msm_gemini_platform_p2v(struct file *file, diff --git a/drivers/media/video/msm/msm.h b/drivers/media/video/msm/msm.h index ce28a021..0cff34fb 100644 --- a/drivers/media/video/msm/msm.h +++ b/drivers/media/video/msm/msm.h @@ -31,7 +31,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/media/video/msm/msm_mctl.c b/drivers/media/video/msm/msm_mctl.c index de849033..79cf6b80 100644 --- a/drivers/media/video/msm/msm_mctl.c +++ b/drivers/media/video/msm/msm_mctl.c @@ -753,6 +753,18 @@ static int msm_mctl_open(struct msm_cam_media_controller *p_mctl, goto sensor_sdev_failed; } + if (s_ctrl->func_tbl->sensor_mode_init == NULL) { + rc = -EFAULT; + goto sensor_sdev_failed; + } + + s_ctrl->first_init=0; + rc = s_ctrl->func_tbl->sensor_mode_init (s_ctrl, MSM_SENSOR_REG_INIT, 0); + if (rc < 0) { + pr_err("%s: sensor_mode_init failed: %d\n", __func__, rc); + goto sensor_sdev_failed; + } + if (p_mctl->actctrl->a_init_table) rc = p_mctl->actctrl->a_init_table(); @@ -846,27 +858,31 @@ static int msm_mctl_open(struct msm_cam_media_controller *p_mctl, static int msm_mctl_release(struct msm_cam_media_controller *p_mctl) { int rc = 0; - struct msm_sensor_ctrl_t *s_ctrl=NULL; - struct msm_camera_sensor_info *sinfo=NULL; - struct msm_camera_device_platform_data *camdev=NULL; + struct msm_sensor_ctrl_t *s_ctrl = 0; + struct msm_camera_sensor_info *sinfo = 0; + struct msm_camera_device_platform_data *camdev = 0; + if (!p_mctl) { pr_err("%s p_mctl is null\n", __func__); - return -EINVAL; + return -EFAULT; } + s_ctrl = get_sctrl(p_mctl->sensor_sdev); if (!s_ctrl) { pr_err("%s s_ctrl is null\n", __func__); - return -EINVAL; + return -EFAULT; } - sinfo = (struct msm_camera_sensor_info *) s_ctrl->sensordata; + + sinfo = (struct msm_camera_sensor_info *) s_ctrl->sensordata; if (!sinfo) { pr_err("%s sinfo is null\n", __func__); - return -EINVAL; + return -EFAULT; } + camdev = sinfo->pdata; if (!camdev) { pr_err("%s camdev is null\n", __func__); - return -EINVAL; + return -EFAULT; } #if 0 @@ -909,8 +925,8 @@ static int msm_mctl_release(struct msm_cam_media_controller *p_mctl) if (p_mctl) { if (!p_mctl->axi_sdev) { - pr_err("%s axi_sdev is null\n", __func__); - return -EINVAL; + pr_err("%s p_mctl->axi_sdev is null\n", __func__); + return -EFAULT; } if (p_mctl == (struct msm_cam_media_controller *) v4l2_get_subdev_hostdata(p_mctl->axi_sdev)) { diff --git a/drivers/media/video/msm/msm_mctl_buf.c b/drivers/media/video/msm/msm_mctl_buf.c index de833958..f08f3569 100644 --- a/drivers/media/video/msm/msm_mctl_buf.c +++ b/drivers/media/video/msm/msm_mctl_buf.c @@ -271,12 +271,10 @@ static void msm_vb2_ops_buf_cleanup(struct vb2_buffer *vb) mem = vb2_plane_cookie(vb, i); if (!mem) { pr_err("%s: null pointer check, line(%d)", __func__, __LINE__); - buf->state = MSM_BUFFER_STATE_UNUSED; return; } if (!pmctl->client) { pr_err("%s: null pointer check, line(%d)", __func__, __LINE__); - buf->state = MSM_BUFFER_STATE_UNUSED; return; } videobuf2_pmem_contig_user_put(mem, pmctl->client); diff --git a/drivers/media/video/msm/msm_mctl_pp.c b/drivers/media/video/msm/msm_mctl_pp.c index dac6f6a3..902d849a 100644 --- a/drivers/media/video/msm/msm_mctl_pp.c +++ b/drivers/media/video/msm/msm_mctl_pp.c @@ -266,10 +266,10 @@ int msm_mctl_do_pp_divert( div.frame.num_planes = pcam_inst->plane_info.num_planes; for (i = 0; i < div.frame.num_planes; i++) { mem = vb2_plane_cookie(&vb->vidbuf, i); - if (!mem) { - pr_info("%s mem is null\n", __func__); - return -EINVAL; - } + if (!mem) { + pr_info("%s mem is null\n", __func__); + return -EINVAL; + } div.frame.mp[i].phy_addr = videobuf2_to_pmem_contig(&vb->vidbuf, i); if (!pcam_inst->buf_offset) @@ -326,10 +326,10 @@ static int msm_mctl_pp_get_phy_addr( pp_frame->num_planes = pcam_inst->plane_info.num_planes; for (i = 0; i < pp_frame->num_planes; i++) { mem = vb2_plane_cookie(&vb->vidbuf, i); - if (!mem) { - pr_info("%s mem is null\n", __func__); - return -EINVAL; - } + if (!mem) { + pr_info("%s mem is null\n", __func__); + return -EINVAL; + } pp_frame->mp[i].addr_offset = mem->addr_offset; pp_frame->mp[i].phy_addr = videobuf2_to_pmem_contig(&vb->vidbuf, i); diff --git a/drivers/media/video/msm/msm_mem.c b/drivers/media/video/msm/msm_mem.c index b6cd58ff..00febf31 100644 --- a/drivers/media/video/msm/msm_mem.c +++ b/drivers/media/video/msm/msm_mem.c @@ -139,7 +139,7 @@ static int msm_pmem_table_add(struct hlist_head *ptype, if (IS_ERR_OR_NULL(region->handle)) goto out1; if (ion_map_iommu(client, region->handle, CAMERA_DOMAIN, GEN_POOL, - SZ_4K, 0, &paddr, &len, UNCACHED, 0) < 0) + SZ_4K, 0, &paddr, &len, 0 , 0) < 0) goto out2; rc = ion_handle_get_flags(client, region->handle, &ionflag); @@ -148,7 +148,7 @@ static int msm_pmem_table_add(struct hlist_head *ptype, return 0; } D("ionflag=%ld\n", ionflag); - vaddr = ion_map_kernel(client, region->handle, ionflag); + vaddr = ion_map_kernel(client, region->handle); if (IS_ERR_OR_NULL(vaddr)) { pr_err("%s: could not get virtual address\n", __func__); return 0; diff --git a/drivers/media/video/msm/msm_vfe32.c b/drivers/media/video/msm/msm_vfe32.c index 20a80de7..5ab02794 100644 --- a/drivers/media/video/msm/msm_vfe32.c +++ b/drivers/media/video/msm/msm_vfe32.c @@ -3080,6 +3080,10 @@ static void vfe32_process_reg_update_irq(void) vfe32_ctrl->liveshot_state = VFE_STATE_STARTED; } + else { + pr_info("%s output_mode 0x%x\n", __func__, + vfe32_ctrl->outpath.output_mode); + } break; case VFE_STATE_STARTED: @@ -3097,6 +3101,12 @@ static void vfe32_process_reg_update_irq(void) msm_io_w_mb(1, vfe32_ctrl->vfebase + VFE_REG_UPDATE_CMD); } + else { + pr_info("%s output_mode 0x%x, vfe_capture_count=%d, recording_state=%d\n", __func__, + vfe32_ctrl->outpath.output_mode, + vfe32_ctrl->vfe_capture_count, + vfe32_ctrl->recording_state); + } break; case VFE_STATE_HW_STOP_REQUESTED: vfe32_ctrl->liveshot_state = VFE_STATE_HW_STOPPED; diff --git a/drivers/media/video/msm/sensors/ar0260_v4l2.c b/drivers/media/video/msm/sensors/ar0260_v4l2.c index ba746d47..7c62cd9a 100644 --- a/drivers/media/video/msm/sensors/ar0260_v4l2.c +++ b/drivers/media/video/msm/sensors/ar0260_v4l2.c @@ -1442,6 +1442,7 @@ static struct msm_camera_i2c_client ar0260_sensor_i2c_client = { int32_t ar0260_power_up(struct msm_sensor_ctrl_t *s_ctrl) { int rc; + struct sensor_cfg_data cdata; struct msm_camera_sensor_info *sdata = NULL; pr_info("%s\n", __func__); @@ -1520,6 +1521,14 @@ int32_t ar0260_power_up(struct msm_sensor_ctrl_t *s_ctrl) #endif ar0260_sensor_open_init(sdata); + if (s_ctrl->func_tbl->sensor_i2c_read_fuseid == NULL) { + rc = -EFAULT; + return rc; + } + rc = s_ctrl->func_tbl->sensor_i2c_read_fuseid(&cdata, s_ctrl); + if (rc < 0) { + return rc; + } pr_info("%s end\n", __func__); return 0; diff --git a/drivers/media/video/msm/sensors/msm_sensor.c b/drivers/media/video/msm/sensors/msm_sensor.c index 9984da99..c0fb509c 100644 --- a/drivers/media/video/msm/sensors/msm_sensor.c +++ b/drivers/media/video/msm/sensors/msm_sensor.c @@ -78,8 +78,13 @@ static int oem_sensor_init(void *arg) #ifdef CONFIG_RAWCHIP if (s_ctrl->sensordata->use_rawchip) { rawchip_data.sensor_name = s_ctrl->sensordata->sensor_name; - rawchip_data.datatype = s_ctrl->curr_csi_params->csid_params.lut_params.vc_cfg->dt; - rawchip_data.lane_cnt = s_ctrl->curr_csi_params->csid_params.lane_cnt; + if (s_ctrl->curr_csi_params) { + rawchip_data.datatype = s_ctrl->curr_csi_params->csid_params.lut_params.vc_cfg->dt; + rawchip_data.lane_cnt = s_ctrl->curr_csi_params->csid_params.lane_cnt; + } + else { + pr_info("%s: s_ctrl->curr_csi_params is null\n", __func__); + } rawchip_data.pixel_clk = s_ctrl->msm_sensor_reg->output_settings[res].op_pixel_clk; rawchip_data.mirror_flip = s_ctrl->mirror_flip; @@ -691,6 +696,10 @@ int32_t msm_sensor_setting_parallel(struct msm_sensor_ctrl_t *s_ctrl, pr_info("%s: update_type=%d, res=%d\n", __func__, update_type, res); if (update_type == MSM_SENSOR_REG_INIT) { + if (s_ctrl->first_init) { + pr_info("%s: MSM_SENSOR_REG_INIT already inited\n", __func__); + return rc; + } mutex_lock(s_ctrl->sensor_first_mutex); #ifdef CONFIG_RAWCHIPII @@ -1152,8 +1161,13 @@ static int oem_sensor_init_ov(void *arg) #ifdef CONFIG_RAWCHIP if (s_ctrl->sensordata->use_rawchip) { rawchip_data.sensor_name = s_ctrl->sensordata->sensor_name; - rawchip_data.datatype = s_ctrl->curr_csi_params->csid_params.lut_params.vc_cfg->dt; - rawchip_data.lane_cnt = s_ctrl->curr_csi_params->csid_params.lane_cnt; + if (s_ctrl->curr_csi_params) { + rawchip_data.datatype = s_ctrl->curr_csi_params->csid_params.lut_params.vc_cfg->dt; + rawchip_data.lane_cnt = s_ctrl->curr_csi_params->csid_params.lane_cnt; + } + else { + pr_info("%s: s_ctrl->curr_csi_params is null\n", __func__); + } rawchip_data.pixel_clk = s_ctrl->msm_sensor_reg->output_settings[res].op_pixel_clk; rawchip_data.mirror_flip = s_ctrl->mirror_flip; @@ -1207,6 +1221,10 @@ int32_t msm_sensor_setting_parallel_ov(struct msm_sensor_ctrl_t *s_ctrl, pr_info("%s: update_type=%d, res=%d\n", __func__, update_type, res); if (update_type == MSM_SENSOR_REG_INIT) { + if (s_ctrl->first_init) { + pr_info("%s: MSM_SENSOR_REG_INIT already inited\n", __func__); + return rc; + } mutex_lock(s_ctrl->sensor_first_mutex); #ifdef CONFIG_RAWCHIPII diff --git a/drivers/media/video/msm/sensors/ov2722_v4l2.c b/drivers/media/video/msm/sensors/ov2722_v4l2.c index 60df25cb..c674b806 100644 --- a/drivers/media/video/msm/sensors/ov2722_v4l2.c +++ b/drivers/media/video/msm/sensors/ov2722_v4l2.c @@ -926,6 +926,7 @@ static struct msm_camera_i2c_client ov2722_sensor_i2c_client = { int32_t ov2722_power_up(struct msm_sensor_ctrl_t *s_ctrl) { int rc; + struct sensor_cfg_data cdata; struct msm_camera_sensor_info *sdata = NULL; pr_info("%s\n", __func__); @@ -960,6 +961,15 @@ int32_t ov2722_power_up(struct msm_sensor_ctrl_t *s_ctrl) #endif ov2722_sensor_open_init(sdata); + if (s_ctrl->func_tbl->sensor_i2c_read_fuseid == NULL) { + rc = -EFAULT; + return rc; + } + rc = s_ctrl->func_tbl->sensor_i2c_read_fuseid(&cdata, s_ctrl); + if (rc < 0) { + return rc; + } + pr_info("%s end\n", __func__); return 0; diff --git a/drivers/media/video/msm/wfd/enc-subdev.c b/drivers/media/video/msm/wfd/enc-subdev.c index 2e6f8d56..49330111 100644 --- a/drivers/media/video/msm/wfd/enc-subdev.c +++ b/drivers/media/video/msm/wfd/enc-subdev.c @@ -2010,7 +2010,8 @@ static long venc_alloc_recon_buffers(struct v4l2_subdev *sd, void *arg) struct vcd_property_enc_recon_buffer *ctrl = NULL; unsigned long phy_addr; int i = 0; - int flags = 0; + int heap_mask = 0; + int flags = 0; u32 len; control.width = inst->width; control.height = inst->height; @@ -2023,8 +2024,13 @@ static long venc_alloc_recon_buffers(struct v4l2_subdev *sd, void *arg) WFD_MSG_ERR("Failed to get recon buf size\n"); goto err; } - flags = ION_HEAP(ION_CP_MM_HEAP_ID); - flags |= inst->secure ? ION_SECURE : ION_HEAP(ION_IOMMU_HEAP_ID); + + if (inst->secure) { + heap_mask = ION_HEAP(ION_CP_MM_HEAP_ID); + flags |= ION_SECURE; + } else { + heap_mask = (ION_HEAP(ION_CP_MM_HEAP_ID) | ION_HEAP(ION_IOMMU_HEAP_ID)); + } if (vcd_get_ion_status()) { for (i = 0; i < 4; ++i) { @@ -2035,11 +2041,11 @@ static long venc_alloc_recon_buffers(struct v4l2_subdev *sd, void *arg) ctrl->user_virtual_addr = (void *)i; client_ctx->recon_buffer_ion_handle[i] = ion_alloc(client_ctx->user_ion_client, - control.size, SZ_8K, flags); + control.size, SZ_8K, heap_mask, flags); ctrl->kernel_virtual_addr = ion_map_kernel( client_ctx->user_ion_client, - client_ctx->recon_buffer_ion_handle[i], 0); + client_ctx->recon_buffer_ion_handle[i]); rc = ion_map_iommu(client_ctx->user_ion_client, client_ctx->recon_buffer_ion_handle[i], diff --git a/drivers/media/video/msm/wfd/enc-subdev.h b/drivers/media/video/msm/wfd/enc-subdev.h index f2740d40..4a15f715 100644 --- a/drivers/media/video/msm/wfd/enc-subdev.h +++ b/drivers/media/video/msm/wfd/enc-subdev.h @@ -14,7 +14,7 @@ #ifndef _WFD_ENC_SUBDEV_ #define _WFD_ENC_SUBDEV_ -#include +#include #include #include #define VENC_MAGIC_IOCTL 'V' diff --git a/drivers/media/video/msm/wfd/wfd-ioctl.c b/drivers/media/video/msm/wfd/wfd-ioctl.c index 6078150e..3dc426c1 100644 --- a/drivers/media/video/msm/wfd/wfd-ioctl.c +++ b/drivers/media/video/msm/wfd/wfd-ioctl.c @@ -157,13 +157,18 @@ static int wfd_allocate_ion_buffer(struct ion_client *client, void *kvaddr, *phys_addr; unsigned long size; unsigned int alloc_regions = 0; + unsigned int flags = 0; int rc; - alloc_regions = ION_HEAP(ION_CP_MM_HEAP_ID); - alloc_regions |= secure ? ION_SECURE : - ION_HEAP(ION_IOMMU_HEAP_ID); + if (secure) { + alloc_regions = ION_HEAP(ION_CP_MM_HEAP_ID); + flags |= ION_SECURE; + } else { + alloc_regions = (ION_HEAP(ION_CP_MM_HEAP_ID) | ION_HEAP(ION_IOMMU_HEAP_ID)); + } + handle = ion_alloc(client, - mregion->size, SZ_4K, alloc_regions); + mregion->size, SZ_4K, alloc_regions, flags); if (IS_ERR_OR_NULL(handle)) { WFD_MSG_ERR("Failed to allocate input buffer\n"); @@ -171,7 +176,7 @@ static int wfd_allocate_ion_buffer(struct ion_client *client, goto alloc_fail; } - kvaddr = ion_map_kernel(client, handle, CACHED); + kvaddr = ion_map_kernel(client, handle); if (IS_ERR_OR_NULL(kvaddr)) { WFD_MSG_ERR("Failed to get virtual addr\n"); diff --git a/drivers/media/video/videobuf2-msm-mem.c b/drivers/media/video/videobuf2-msm-mem.c index 4ec540d8..962e5a66 100644 --- a/drivers/media/video/videobuf2-msm-mem.c +++ b/drivers/media/video/videobuf2-msm-mem.c @@ -57,7 +57,7 @@ static unsigned long msm_mem_allocate(struct videobuf2_contig_pmem *mem) goto client_failed; } mem->ion_handle = ion_alloc(mem->client, mem->size, SZ_4K, - (0x1 << ION_CP_MM_HEAP_ID | 0x1 << ION_IOMMU_HEAP_ID)); + (0x1 << ION_CP_MM_HEAP_ID | 0x1 << ION_IOMMU_HEAP_ID), 0); if (IS_ERR((void *)mem->ion_handle)) { pr_err("%s Could not allocate\n", __func__); goto alloc_failed; @@ -65,7 +65,7 @@ static unsigned long msm_mem_allocate(struct videobuf2_contig_pmem *mem) rc = ion_map_iommu(mem->client, mem->ion_handle, CAMERA_DOMAIN, GEN_POOL, SZ_4K, 0, (unsigned long *)&phyaddr, - (unsigned long *)&len, UNCACHED, 0); + (unsigned long *)&len, 0 , 0); if (rc < 0) { pr_err("%s Could not get physical address\n", __func__); goto phys_failed; @@ -194,7 +194,7 @@ int videobuf2_pmem_contig_user_get(struct videobuf2_contig_pmem *mem, return PTR_ERR(mem->ion_handle); } rc = ion_map_iommu(client, mem->ion_handle, CAMERA_DOMAIN, GEN_POOL, - SZ_4K, 0, (unsigned long *)&mem->phyaddr, &len, UNCACHED, 0); + SZ_4K, 0, (unsigned long *)&mem->phyaddr, &len, 0 , 0); if (rc < 0) ion_free(client, mem->ion_handle); @@ -204,7 +204,7 @@ int videobuf2_pmem_contig_user_get(struct videobuf2_contig_pmem *mem, return 0; } D("ionflag=%ld\n", ionflag); - vaddr = ion_map_kernel(client, mem->ion_handle, ionflag); + vaddr = ion_map_kernel(client, mem->ion_handle); if (IS_ERR_OR_NULL(vaddr)) { pr_err("%s: could not get virtual address\n", __func__); return 0; diff --git a/drivers/mfd/wcd9xxx-core.c b/drivers/mfd/wcd9xxx-core.c index dd8c240b..f0b65d4e 100644 --- a/drivers/mfd/wcd9xxx-core.c +++ b/drivers/mfd/wcd9xxx-core.c @@ -731,13 +731,6 @@ static int __devinit wcd9xxx_i2c_probe(struct i2c_client *client, return ret; } - wcd9xxx = kzalloc(sizeof(struct wcd9xxx), GFP_KERNEL); - if (wcd9xxx == NULL) { - pr_err("%s: error, allocation failed\n", __func__); - ret = -ENOMEM; - goto fail; - } - if (!pdata) { dev_dbg(&client->dev, "no platform data?\n"); ret = -EINVAL; @@ -748,6 +741,16 @@ static int __devinit wcd9xxx_i2c_probe(struct i2c_client *client, ret = -EIO; goto fail; } + + + wcd9xxx = kzalloc(sizeof(struct wcd9xxx), GFP_KERNEL); + if (wcd9xxx == NULL) { + pr_err("%s: error, allocation failed\n", __func__); + ret = -ENOMEM; + goto fail; + } + + dev_set_drvdata(&client->dev, wcd9xxx); wcd9xxx->dev = &client->dev; wcd9xxx->reset_gpio = pdata->reset_gpio; diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c index 97e75380..45e8bf3c 100644 --- a/drivers/misc/qseecom.c +++ b/drivers/misc/qseecom.c @@ -83,10 +83,10 @@ __packed struct qseecom_check_app_ireq { __packed struct qseecom_load_app_ireq { uint32_t qsee_cmd_id; - uint32_t mdt_len; /* Length of the mdt file */ - uint32_t img_len; /* Length of .bxx and .mdt files */ - uint32_t phy_addr; /* phy addr of the start of image */ - char app_name[MAX_APP_NAME_SIZE]; /* application name*/ + uint32_t mdt_len; + uint32_t img_len; + uint32_t phy_addr; + char app_name[MAX_APP_NAME_SIZE]; }; __packed struct qseecom_unload_app_ireq { @@ -111,23 +111,15 @@ __packed struct qseecom_client_send_data_ireq { uint32_t app_id; void *req_ptr; uint32_t req_len; - void *rsp_ptr; /* First 4 bytes should always be the return status */ + void *rsp_ptr; uint32_t rsp_len; }; -/* send_data resp */ __packed struct qseecom_client_listener_data_irsp { uint32_t qsee_cmd_id; uint32_t listener_id; }; -/* - * struct qseecom_command_scm_resp - qseecom response buffer - * @cmd_status: value from enum tz_sched_cmd_status - * @sb_in_rsp_addr: points to physical location of response - * buffer - * @sb_in_rsp_len: length of command response - */ __packed struct qseecom_command_scm_resp { uint32_t result; enum qseecom_command_scm_resp_type resp_type; @@ -138,7 +130,6 @@ static struct class *driver_class; static dev_t qseecom_device_no; static struct cdev qseecom_cdev; -/* Data structures used in legacy support */ static void *pil; static uint32_t pil_ref_cnt; static DEFINE_MUTEX(pil_access_lock); @@ -159,7 +150,7 @@ struct qseecom_registered_listener_list { u8 *sb_virt; s32 sb_phys; size_t sb_length; - struct ion_handle *ihandle; /* Retrieve phy addr */ + struct ion_handle *ihandle; wait_queue_head_t rcv_req_wq; int rcv_req_flag; @@ -172,7 +163,7 @@ struct qseecom_registered_app_list { }; struct qseecom_control { - struct ion_client *ion_clnt; /* Ion client */ + struct ion_client *ion_clnt; struct list_head registered_listener_list_head; spinlock_t registered_listener_list_lock; @@ -191,7 +182,7 @@ struct qseecom_client_handle { s32 sb_phys; uint32_t user_virt_sb_base; size_t sb_length; - struct ion_handle *ihandle; /* Retrieve phy addr */ + struct ion_handle *ihandle; }; struct qseecom_listener_handle { @@ -215,7 +206,6 @@ struct qseecom_dev_handle { DECLARE_COMPLETION(qseecomd_finish); DECLARE_COMPLETION(pil_work_finished); -/* Function proto types */ static int qsee_vote_for_clock(int32_t); static void qsee_disable_clock_vote(int32_t); @@ -252,6 +242,12 @@ static struct qseecom_registered_listener_list *__qseecom_find_svc( break; } spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags); + + if ((entry != NULL) && (entry->svc.listener_id != listener_id)) { + pr_err("Service id: %u is not found\n", listener_id); + return NULL; + } + return entry; } @@ -260,12 +256,11 @@ static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc, struct qseecom_register_listener_req *listener) { int ret = 0; - unsigned int flags = 0; struct qseecom_register_listener_ireq req; struct qseecom_command_scm_resp resp; ion_phys_addr_t pa; - /* Get the handle of the shared fd */ + svc->ihandle = ion_import_dma_buf(qseecom.ion_clnt, listener->ifd_data_fd); if (svc->ihandle == NULL) { @@ -273,12 +268,11 @@ static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc, return -ENOMEM; } - /* Get the physical address of the ION BUF */ + ret = ion_phys(qseecom.ion_clnt, svc->ihandle, &pa, &svc->sb_length); - /* Populate the structure for sending scm call to load image */ - svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, - svc->ihandle, flags); + + svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, svc->ihandle); svc->sb_phys = pa; if (qseecom.qseos_version == QSEOS_VERSION_14) { @@ -309,6 +303,10 @@ static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc, svc->sb_reg_req = kzalloc((sizeof(sb_init_req) + sizeof(sb_init_rsp)), GFP_KERNEL); + if (!svc->sb_reg_req) { + pr_err("Error Failed to allocate memory\n"); + return -ENOMEM; + } sb_init_req.pr_cmd = TZ_SCHED_CMD_ID_REGISTER_LISTENER; sb_init_req.listener_id = svc->svc.listener_id; @@ -317,7 +315,7 @@ static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc, memcpy(svc->sb_reg_req, &sb_init_req, sizeof(sb_init_req)); - /* It will always be a new cmd from this method */ + cmd.cmd_type = TZ_SCHED_CMD_NEW; cmd.sb_in_cmd_addr = (u8 *)(virt_to_phys(svc->sb_reg_req)); cmd.sb_in_cmd_len = sizeof(sb_init_req); @@ -397,7 +395,7 @@ static int qseecom_unregister_listener(struct qseecom_dev_handle *data) struct qseecom_register_listener_ireq req; struct qseecom_registered_listener_list *ptr_svc = NULL; struct qseecom_command_scm_resp resp; - struct ion_handle *ihandle = NULL; /* Retrieve phy addr */ + struct ion_handle *ihandle = NULL; if (qseecom.qseos_version == QSEOS_VERSION_14) { req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER; @@ -422,6 +420,11 @@ static int qseecom_unregister_listener(struct qseecom_dev_handle *data) struct qseecom_registered_listener_list *svc; svc = __qseecom_find_svc(data->listener.id); + if (!svc) { + pr_err("service (%d) is not found\n", data->listener.id); + return -ENODATA; + } + sb_init_req.pr_cmd = TZ_SCHED_CMD_ID_REGISTER_LISTENER; sb_init_req.listener_id = data->listener.id; sb_init_req.sb_len = 0; @@ -429,7 +432,7 @@ static int qseecom_unregister_listener(struct qseecom_dev_handle *data) memcpy(svc->sb_reg_req, &sb_init_req, sizeof(sb_init_req)); - /* It will always be a new cmd from this method */ + cmd.cmd_type = TZ_SCHED_CMD_NEW; cmd.sb_in_cmd_addr = (u8 *)(virt_to_phys(svc->sb_reg_req)); cmd.sb_in_cmd_len = sizeof(sb_init_req); @@ -484,7 +487,7 @@ static int qseecom_unregister_listener(struct qseecom_dev_handle *data) } spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags); - /* Unmap the memory */ + if (unmap_mem) { if (!IS_ERR_OR_NULL(ihandle)) { ion_unmap_kernel(qseecom.ion_clnt, ihandle); @@ -500,27 +503,24 @@ static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data, { ion_phys_addr_t pa; int32_t ret; - unsigned int flags = 0; struct qseecom_set_sb_mem_param_req req; uint32_t len; - /* Copy the relevant information needed for loading the image */ - if (__copy_from_user(&req, (void __user *)argp, sizeof(req))) + + if (copy_from_user(&req, (void __user *)argp, sizeof(req))) return -EFAULT; - /* Get the handle of the shared fd */ + data->client.ihandle = ion_import_dma_buf(qseecom.ion_clnt, req.ifd_data_fd); if (IS_ERR_OR_NULL(data->client.ihandle)) { pr_err("Ion client could not retrieve the handle\n"); return -ENOMEM; } - /* Get the physical address of the ION BUF */ + ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len); - /* Populate the structure for sending scm call to load image */ - data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, - data->client.ihandle, - flags); + + data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, data->client.ihandle); data->client.sb_phys = pa; data->client.sb_length = req.sb_len; data->client.user_virt_sb_base = req.virt_sb_base; @@ -547,9 +547,6 @@ static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data, while (resp->result == QSEOS_RESULT_INCOMPLETE) { lstnr = resp->data; - /* - * Wake up blocking lsitener service with the lstnr id - */ spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags); list_for_each_entry(ptr_svc, @@ -605,7 +602,7 @@ static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req) int32_t ret; struct qseecom_command_scm_resp resp; - /* SCM_CALL to check if app_id for the mentioned app exists */ + ret = scm_call(SCM_SVC_TZSCHEDULER, 1, &req, sizeof(struct qseecom_check_app_ireq), &resp, sizeof(resp)); @@ -618,7 +615,7 @@ static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req) return 0; } else { switch (resp.resp_type) { - /*qsee returned listener type response */ + case QSEOS_LISTENER_ID: pr_err("resp type is of listener type instead of app"); return -EINVAL; @@ -639,7 +636,7 @@ static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp) struct qseecom_registered_app_list *entry = NULL; unsigned long flags = 0; u32 app_id = 0; - struct ion_handle *ihandle; /* Ion handle */ + struct ion_handle *ihandle; struct qseecom_load_img_req load_img_req; int32_t ret; ion_phys_addr_t pa = 0; @@ -648,24 +645,25 @@ static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp) struct qseecom_check_app_ireq req; struct qseecom_load_app_ireq load_req; - /* Copy the relevant information needed for loading the image */ - if (__copy_from_user(&load_img_req, + + if (copy_from_user(&load_img_req, (void __user *)argp, sizeof(struct qseecom_load_img_req))) { pr_err("copy_from_user failed\n"); return -EFAULT; } - /* Vote for the SFPB clock */ + ret = qsee_vote_for_clock(CLK_SFPB); if (ret) pr_warning("Unable to vote for SFPB clock"); req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND; + load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0'; memcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE); pr_warn("App (%s) does not exist, loading apps for first time\n", (char *)(req.app_name)); - /* Get the handle of the shared fd */ + ihandle = ion_import_dma_buf(qseecom.ion_clnt, load_img_req.ifd_data_fd); if (IS_ERR_OR_NULL(ihandle)) { @@ -674,16 +672,16 @@ static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp) return -ENOMEM; } - /* Get the physical address of the ION BUF */ + ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len); - /* Populate the structure for sending scm call to load image */ + load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND; load_req.mdt_len = load_img_req.mdt_len; load_req.img_len = load_img_req.img_len; load_req.phy_addr = pa; - /* SCM_CALL to load the app and get the app_id back */ + ret = scm_call(SCM_SVC_TZSCHEDULER, 1, &load_req, sizeof(struct qseecom_load_app_ireq), &resp, sizeof(resp)); @@ -732,7 +730,7 @@ static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp) entry->app_id = app_id; entry->ref_cnt = 1; - /* Deallocate the handle */ + if (!IS_ERR_OR_NULL(ihandle)) ion_free(qseecom.ion_clnt, ihandle); @@ -766,7 +764,7 @@ static int __qseecom_cleanup_app(struct qseecom_dev_handle *data) break; } } - /* Set unload app */ + return 1; } @@ -808,11 +806,11 @@ static int qseecom_unload_app(struct qseecom_dev_handle *data) if ((unload) && (qseecom.qseos_version == QSEOS_VERSION_14)) { struct qseecom_unload_app_ireq req; - /* Populate the structure for sending scm call to load image */ + req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND; req.app_id = data->client.app_id; - /* SCM_CALL to unload the app */ + ret = scm_call(SCM_SVC_TZSCHEDULER, 1, &req, sizeof(struct qseecom_unload_app_ireq), &resp, sizeof(resp)); @@ -903,16 +901,7 @@ static int __qseecom_send_cmd_legacy(struct qseecom_dev_handle *data, } while (resp.cmd_status != TZ_SCHED_STATUS_COMPLETE) { - /* - * If cmd is incomplete, get the callback cmd out from SB out - * and put it on the list - */ struct qseecom_registered_listener_list *ptr_svc = NULL; - /* - * We don't know which service can handle the command. so we - * wake up all blocking services and let them figure out if - * they can handle the given command. - */ spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags); list_for_each_entry(ptr_svc, @@ -959,9 +948,22 @@ static int __qseecom_send_cmd(struct qseecom_dev_handle *data, pr_err("cmd buffer or response buffer is null\n"); return -EINVAL; } + if (((uint32_t)req->cmd_req_buf < data->client.user_virt_sb_base) || + ((uint32_t)req->cmd_req_buf >= (data->client.user_virt_sb_base + + data->client.sb_length))) { + pr_err("cmd buffer address not within shared bufffer\n"); + return -EINVAL; + } - if (req->cmd_req_len <= 0 || - req->resp_len <= 0 || + + if (((uint32_t)req->resp_buf < data->client.user_virt_sb_base) || + ((uint32_t)req->resp_buf >= (data->client.user_virt_sb_base + + data->client.sb_length))){ + pr_err("response buffer address not within shared bufffer\n"); + return -EINVAL; + } + + if ((req->cmd_req_len == 0) || (req->resp_len == 0) || req->cmd_req_len > data->client.sb_length || req->resp_len > data->client.sb_length) { pr_err("cmd buffer length or " @@ -969,6 +971,11 @@ static int __qseecom_send_cmd(struct qseecom_dev_handle *data, return -EINVAL; } + if (req->cmd_req_len > UINT_MAX - req->resp_len) { + pr_err("Integer overflow detected in req_len & rsp_len, exiting now\n"); + return -EINVAL; + } + reqd_len_sb_in = req->cmd_req_len + req->resp_len; if (reqd_len_sb_in > data->client.sb_length) { pr_debug("Not enough memory to fit cmd_buf and " @@ -1065,7 +1072,7 @@ static int __qseecom_update_with_phy_addr( for (i = 0; i < MAX_ION_FD; i++) { if (req->ifd_data[i].fd > 0) { - /* Get the handle of the shared fd */ + ihandle = ion_import_dma_buf(qseecom.ion_clnt, req->ifd_data[i].fd); if (IS_ERR_OR_NULL(ihandle)) { @@ -1076,13 +1083,13 @@ static int __qseecom_update_with_phy_addr( req->ifd_data[i].cmd_buf_offset; update = (uint32_t *) field; - /* Populate the cmd data structure with the phys_addr */ + ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &length); if (ret) return -ENOMEM; *update = (uint32_t)pa; - /* Deallocate the handle */ + if (!IS_ERR_OR_NULL(ihandle)) ion_free(qseecom.ion_clnt, ihandle); } @@ -1094,6 +1101,7 @@ static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data, void __user *argp) { int ret = 0; + int i; struct qseecom_send_modfd_cmd_req req; struct qseecom_send_cmd_req send_cmd_req; @@ -1107,6 +1115,14 @@ static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data, send_cmd_req.resp_buf = req.resp_buf; send_cmd_req.resp_len = req.resp_len; + + for (i = 0; i < MAX_ION_FD; i++) { + if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) { + pr_err("Invalid offset %d = 0x%x\n", + i, req.ifd_data[i].cmd_buf_offset); + return -EINVAL; + } + } ret = __qseecom_update_with_phy_addr(&req); if (ret) return ret; @@ -1138,12 +1154,17 @@ static int qseecom_receive_req(struct qseecom_dev_handle *data) struct qseecom_registered_listener_list *this_lstnr; this_lstnr = __qseecom_find_svc(data->listener.id); + if (!this_lstnr) { + pr_err("Invalid listener ID\n"); + return -ENODATA; + } + while (1) { if (wait_event_freezable(this_lstnr->rcv_req_wq, __qseecom_listener_has_rcvd_req(data, this_lstnr))) { pr_warning("Interrupted: exiting wait_rcv_req loop\n"); - /* woken up for different reason */ + return -ERESTARTSYS; } @@ -1195,7 +1216,7 @@ static int qsee_vote_for_clock(int32_t clk_type) switch (clk_type) { case CLK_DFAB: - /* Check if the clk is valid */ + if (IS_ERR_OR_NULL(qseecom_bus_clk)) { pr_warn("qseecom bus clock is null or error"); return -EINVAL; @@ -1241,7 +1262,7 @@ static void qsee_disable_clock_vote(int32_t clk_type) switch (clk_type) { case CLK_DFAB: - /* Check if the DFAB clk is valid */ + if (IS_ERR_OR_NULL(qseecom_bus_clk)) { pr_warn("qseecom bus clock is null or error"); return; @@ -1281,7 +1302,7 @@ static void qsee_disable_clock_vote(int32_t clk_type) static int qseecom_load_external_elf(struct qseecom_dev_handle *data, void __user *argp) { - struct ion_handle *ihandle; /* Ion handle */ + struct ion_handle *ihandle; struct qseecom_load_img_req load_img_req; int ret; int set_cpu_ret = 0; @@ -1291,15 +1312,15 @@ static int qseecom_load_external_elf(struct qseecom_dev_handle *data, struct qseecom_load_app_ireq load_req; struct qseecom_command_scm_resp resp; - /* Copy the relevant information needed for loading the image */ - if (__copy_from_user(&load_img_req, + + if (copy_from_user(&load_img_req, (void __user *)argp, sizeof(struct qseecom_load_img_req))) { pr_err("copy_from_user failed\n"); return -EFAULT; } - /* Get the handle of the shared fd */ + ihandle = ion_import_dma_buf(qseecom.ion_clnt, load_img_req.ifd_data_fd); if (IS_ERR_OR_NULL(ihandle)) { @@ -1307,16 +1328,16 @@ static int qseecom_load_external_elf(struct qseecom_dev_handle *data, return -ENOMEM; } - /* Get the physical address of the ION BUF */ + ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len); - /* Populate the structure for sending scm call to load image */ + load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND; load_req.mdt_len = load_img_req.mdt_len; load_req.img_len = load_img_req.img_len; load_req.phy_addr = pa; - /* SCM_CALL tied to Core0 */ + mask = CPU_MASK_CPU0; set_cpu_ret = set_cpus_allowed_ptr(current, &mask); if (set_cpu_ret) { @@ -1326,7 +1347,7 @@ static int qseecom_load_external_elf(struct qseecom_dev_handle *data, goto qseecom_load_external_elf_set_cpu_err; } - /* SCM_CALL to load the external elf */ + ret = scm_call(SCM_SVC_TZSCHEDULER, 1, &load_req, sizeof(struct qseecom_load_app_ireq), &resp, sizeof(resp)); @@ -1351,7 +1372,7 @@ static int qseecom_load_external_elf(struct qseecom_dev_handle *data, } qseecom_load_external_elf_scm_err: - /* Restore the CPU mask */ + mask = CPU_MASK_ALL; set_cpu_ret = set_cpus_allowed_ptr(current, &mask); if (set_cpu_ret) { @@ -1361,7 +1382,7 @@ static int qseecom_load_external_elf(struct qseecom_dev_handle *data, } qseecom_load_external_elf_set_cpu_err: - /* Deallocate the handle */ + if (!IS_ERR_OR_NULL(ihandle)) ion_free(qseecom.ion_clnt, ihandle); @@ -1376,10 +1397,10 @@ static int qseecom_unload_external_elf(struct qseecom_dev_handle *data) struct qseecom_unload_app_ireq req; struct cpumask mask; - /* Populate the structure for sending scm call to unload image */ + req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND; - /* SCM_CALL tied to Core0 */ + mask = CPU_MASK_CPU0; ret = set_cpus_allowed_ptr(current, &mask); if (ret) { @@ -1388,7 +1409,7 @@ static int qseecom_unload_external_elf(struct qseecom_dev_handle *data) return -EFAULT; } - /* SCM_CALL to unload the external elf */ + ret = scm_call(SCM_SVC_TZSCHEDULER, 1, &req, sizeof(struct qseecom_unload_app_ireq), &resp, sizeof(resp)); @@ -1412,7 +1433,7 @@ static int qseecom_unload_external_elf(struct qseecom_dev_handle *data) } qseecom_unload_external_elf_scm_err: - /* Restore the CPU mask */ + mask = CPU_MASK_ALL; set_cpu_ret = set_cpus_allowed_ptr(current, &mask); if (set_cpu_ret) { @@ -1434,8 +1455,8 @@ static int qseecom_query_app_loaded(struct qseecom_dev_handle *data, struct qseecom_registered_app_list *entry = NULL; unsigned long flags = 0; - /* Copy the relevant information needed for loading the image */ - if (__copy_from_user(&query_req, + + if (copy_from_user(&query_req, (void __user *)argp, sizeof(struct qseecom_qseos_app_load_query))) { pr_err("copy_from_user failed\n"); @@ -1443,13 +1464,14 @@ static int qseecom_query_app_loaded(struct qseecom_dev_handle *data, } req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND; + query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0'; memcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE); ret = __qseecom_check_app_exists(req); if ((ret == -EINVAL) || (ret == -ENODEV)) { pr_err(" scm call to check if app is loaded failed"); - return ret; /* scm call failed */ + return ret; } else if (ret > 0) { pr_warn("App id %d (%s) already exists\n", ret, (char *)(req.app_name)); @@ -1470,9 +1492,9 @@ static int qseecom_query_app_loaded(struct qseecom_dev_handle *data, pr_err("copy_to_user failed\n"); return -EFAULT; } - return -EEXIST; /* app already loaded */ + return -EEXIST; } else { - return 0; /* app not loaded */ + return 0; } } @@ -1514,7 +1536,7 @@ static long qseecom_ioctl(struct file *file, unsigned cmd, } case QSEECOM_IOCTL_SEND_CMD_REQ: { pr_debug("ioctl send_cmd_req()\n"); - /* Only one client allowed here at a time */ + mutex_lock(&app_access_lock); atomic_inc(&data->ioctl_count); ret = qseecom_send_cmd(data, argp); @@ -1527,7 +1549,7 @@ static long qseecom_ioctl(struct file *file, unsigned cmd, } case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ: { pr_debug("ioctl send_modfd_cmd_req()\n"); - /* Only one client allowed here at a time */ + mutex_lock(&app_access_lock); atomic_inc(&data->ioctl_count); ret = qseecom_send_modfd_cmd(data, argp); @@ -1797,7 +1819,7 @@ static int __devinit qseecom_probe(struct platform_device *pdev) pil = NULL; pil_ref_cnt = 0; } - /* Create ION msm client */ + qseecom.ion_clnt = msm_ion_client_create(0x03, "qseecom-kernel"); if (qseecom.ion_clnt == NULL) { pr_err("Ion client cannot be created\n"); @@ -1805,7 +1827,7 @@ static int __devinit qseecom_probe(struct platform_device *pdev) goto err; } - /* register client for bus scaling */ + if (!pdev->dev.of_node) { qseecom_platform_support = (struct msm_bus_scale_pdata *) pdev->dev.platform_data; diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 0468aa59..d4cfd306 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -1543,6 +1543,7 @@ void mmc_remove_sd_card(struct work_struct *work) host->bus_ops->remove(host); mmc_claim_host(host); mmc_detach_bus(host); + mdelay(500); mmc_release_host(host); } mmc_bus_put(host); @@ -1803,6 +1804,10 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from, R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG); out: diff = ktime_sub(ktime_get(), start); + if (ktime_to_ms(diff) >= 3000) + pr_info("%s: erase(sector %u to %u) takes %lld ms\n", + mmc_hostname(card->host), from, to, ktime_to_ms(diff)); + if (card->host->tp_enable) trace_mmc_request_done(&(card->host->class_dev), MMC_ERASE, from, to - from + 1, ktime_to_ms(diff)); diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c index 463da6f2..dacd8a85 100644 --- a/drivers/mmc/host/msm_sdcc.c +++ b/drivers/mmc/host/msm_sdcc.c @@ -1833,6 +1833,7 @@ static irqreturn_t msmsdcc_irq(int irq, void *dev_id) { struct msmsdcc_host *host = dev_id; + struct mmc_host *mmc = host->mmc; u32 status; int ret = 0; int timer = 0; @@ -1892,6 +1893,13 @@ if (is_wimax_platform(host->plat) && mmc_wimax_get_status()) if (host->plat->sdiowakeup_irq) wake_lock(&host->sdio_wlock); } else { + if (!mmc->card || (mmc->card && + !mmc_card_sdio(mmc->card))) { + pr_warning("%s: SDCC core interrupt received for non-SDIO cards when SDCC clocks are off\n", + mmc_hostname(mmc)); + ret = 1; + break; + } spin_unlock(&host->lock); mmc_signal_sdio_irq(host->mmc); spin_lock(&host->lock); @@ -1920,6 +1928,13 @@ if (is_wimax_platform(host->plat) && mmc_wimax_get_status()) #endif if (status & MCI_SDIOINTROPE) { + if (!mmc->card || (mmc->card && + !mmc_card_sdio(mmc->card))) { + pr_warning("%s: SDIO interrupt (SDIOINTROPE) received for non-SDIO card\n", + mmc_hostname(mmc)); + ret = 1; + break; + } if (host->sdcc_suspending) wake_lock(&host->sdio_suspend_wlock); spin_unlock(&host->lock); diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index 4f026b05..46d9de47 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig @@ -493,4 +493,13 @@ config MSM_RMNET_USB To compile this driver as a module, choose M here: the module will be called rmnet_usb. If unsure, choose N. +config RIL_PCN001_HTC_QUEUE_URB_TO_DEFERRED_ANCHOR + tristate "RIL_PCN001_HTC_QUEUE_URB_TO_DEFERRED_ANCHOR" + depends on USB_USBNET + help + Sometime the IE browser will return server error when user use wifi hotspot. + It cause by the package will be drop by usbnet when device in suspend/resume process, + usbnet will check device suspend/resume status, + if device not finished the resume process, the urb will be drop. + endmenu diff --git a/drivers/net/usb/rmnet_usb_ctrl.c b/drivers/net/usb/rmnet_usb_ctrl.c index 5498ab91..075f8249 100644 --- a/drivers/net/usb/rmnet_usb_ctrl.c +++ b/drivers/net/usb/rmnet_usb_ctrl.c @@ -1163,7 +1163,7 @@ static const struct file_operations ctrldev_fops = { #define RMNET_CTRL_ITC_AP_ISIS ITC_BIT(3) #define RMNET_CTRL_ITC_AP_VT ITC_BIT(4) #define RMNET_CTRL_ITC_AP_MAX ITC_BIT(4) -#define RMNET_CTRL_ITC_AP_COUNT 3 +#define RMNET_CTRL_ITC_AP_COUNT 3 #define RMNET_CTRL_ITC_AUDIO_INIT RMNET_CTRL_ITC_AP_AUDIO | RMNET_CTRL_ITC_INIT #define RMNET_CTRL_ITC_AUDIO_ENABLE RMNET_CTRL_ITC_AP_AUDIO | RMNET_CTRL_ITC_ENABLE @@ -1174,11 +1174,162 @@ static const struct file_operations ctrldev_fops = { #define RMNET_CTRL_ITC_VT_INIT RMNET_CTRL_ITC_AP_VT | RMNET_CTRL_ITC_INIT #define RMNET_CTRL_ITC_VT_ENABLE RMNET_CTRL_ITC_AP_VT | RMNET_CTRL_ITC_ENABLE #define RMNET_CTRL_ITC_VT_DISABLE RMNET_CTRL_ITC_AP_VT | RMNET_CTRL_ITC_DISABLE +unsigned int rmnet_ctrl_itc_catch = 0; + +inline void set_itc_bit(unsigned int* value, unsigned int bit) +{ + if (!value) + { + pr_err("[%s] value is null\n", __func__); + return; + } + + (*value) |= bit; + return; +} + +inline void clear_itc_bit(unsigned int* value, unsigned int bit) +{ + if (!value) + { + pr_err("[%s] value is null\n", __func__); + return; + } + + if ( (*value) & bit ) + { + (*value) &= ~bit; + } + + return; +} + +int rmnet_ctrl_set_itc_value_check(int value, int* p_enable, int* p_need_set) +{ + int ret = 0; + int enable = 0; + int is_init = 0; + int ap_bit = 0; + int need_set = 1; + int has_ap_in_catch = 0; + char ap_name[128] = {0}; + int i = 0; + + enable = (value & RMNET_CTRL_ITC_ENABLE) ? 1 : 0; + is_init = (value & RMNET_CTRL_ITC_INIT) ? 1 : 0; + + for ( i = 0; i < RMNET_CTRL_ITC_AP_COUNT; i++) + { + if (value & (RMNET_CTRL_ITC_AP_START << i)) { + ap_bit = (RMNET_CTRL_ITC_AP_START << i); + break; + } + } + + if ( !ap_bit ) + { + + pr_err("[%s] can't find ap, value=[0x%x]\n", __func__, value); + enable = 0; + need_set = 0; + ret = -1; + goto end; + } + + switch (ap_bit) + { + case RMNET_CTRL_ITC_AP_AUDIO: + sprintf( ap_name, "%s", "audio"); + break; + case RMNET_CTRL_ITC_AP_ISIS: + sprintf( ap_name, "%s", "isis"); + break; + case RMNET_CTRL_ITC_AP_VT: + sprintf( ap_name, "%s", "vt"); + break; + default: + break; + } + + pr_info("[%s] value=[0x%x], ap=[%s(0x%x)], enable=[%d], is_init=[%d], rmnet_ctrl_itc_catch=[0x%x]\n", __func__, value, ap_name, ap_bit, enable, is_init, rmnet_ctrl_itc_catch); + + if ( is_init ) + { + + + clear_itc_bit ( &rmnet_ctrl_itc_catch, ap_bit ); + goto check_if_need_set_itc; + } + + if (enable) + { + + set_itc_bit ( &rmnet_ctrl_itc_catch, ap_bit ); + } + else + { + + + clear_itc_bit ( &rmnet_ctrl_itc_catch, ap_bit ); + } + + +check_if_need_set_itc: + + for ( i = 0; i < RMNET_CTRL_ITC_AP_COUNT; i++) + { + if (rmnet_ctrl_itc_catch & (RMNET_CTRL_ITC_AP_START << i)) { + has_ap_in_catch = (RMNET_CTRL_ITC_AP_START << i); + break; + } + } + + if ( has_ap_in_catch ) + { + enable = 1; + } + else + { + enable = 0; + } + + + + + if ( enable != ((rmnet_ctrl_itc_catch & RMNET_CTRL_ITC_ENABLE) ? 1 : 0 ) ) + { + need_set = 1; + + if ( enable ) + { + set_itc_bit ( &rmnet_ctrl_itc_catch, RMNET_CTRL_ITC_ENABLE ); + } + else + { + + clear_itc_bit ( &rmnet_ctrl_itc_catch, RMNET_CTRL_ITC_ENABLE ); + } + } + else + { + need_set = 0; + } + + pr_info("[%s] need_set: value=[0x%x], ap=[%s(0x%x)], enable=[%d], is_init=[%d], rmnet_ctrl_itc_catch=[0x%x], has_ap_in_catch=[%d], need_set=[%d]\n", __func__, value, ap_name, ap_bit, enable, is_init, rmnet_ctrl_itc_catch, has_ap_in_catch, need_set); +end: + *p_enable = enable; + *p_need_set = need_set; + return ret; +} + static int enable_shorten_itc_count = 0; static int rmnet_ctrl_set_itc( struct rmnet_ctrl_dev *dev, int value ) { int ret = 0; int enable = 0; + int is_oldversion = 0; + int check_itc_err = 0; + int need_set_itc = 0; struct usb_device *udev; if (!dev) { @@ -1191,31 +1342,50 @@ static int rmnet_ctrl_set_itc( struct rmnet_ctrl_dev *dev, int value ) { return -ENODEV; } + + mutex_lock(&dev->dev_lock); + udev = interface_to_usbdev(dev->intf); - if ( value & RMNET_CTRL_ITC_INIT ) { - pr_info("[%s][%s] value=[%d], not support init\n", __func__, dev->name, value); - return -ENODEV; + if ( value <= 1 ) { + is_oldversion = 1; } - if ( value & RMNET_CTRL_ITC_ENABLE ) { - enable = 1; + if ( is_oldversion ) { + switch(value) { + case 1://enable + enable = 1; + break; + case 0://disable + enable = 0; + break; + default://other + pr_info("[%s][%s] value=[%d]\n", __func__, dev->name, value); + mutex_unlock(&dev->dev_lock); + return -ENODEV; + } } else { - enable = 0; + check_itc_err = rmnet_ctrl_set_itc_value_check( value, &enable, &need_set_itc ); + pr_info("[%s][%s] value=[%d], check_itc_err=[%d], enable=[%d], need_set_itc=[%d]\n", __func__, dev->name, value, check_itc_err, enable, need_set_itc); + if ( check_itc_err ) { + mutex_unlock(&dev->dev_lock); + return -ENODEV; + } + if ( need_set_itc == 0 ) { + mutex_unlock(&dev->dev_lock); + return ret; + } } - pr_info("[%s][%s] mutex_lock\n", __func__, dev->name); - mutex_lock(&dev->dev_lock); - if (!(test_bit(RMNET_CTRL_DEV_OPEN, &dev->status) && test_bit(RMNET_CTRL_DEV_READY, &dev->status))) { pr_err("[%s] is_opened=[%d], resp_available=[%d]\n", __func__, test_bit(RMNET_CTRL_DEV_OPEN, &dev->status), test_bit(RMNET_CTRL_DEV_READY, &dev->status)); mutex_unlock(&dev->dev_lock); return -ENODEV; } - pr_info("[%s] is_opened=[%d], resp_available=[%d]\n", __func__, test_bit(RMNET_CTRL_DEV_OPEN, &dev->status), test_bit(RMNET_CTRL_DEV_READY, &dev->status)); - pr_info("[%s] enable_shorten_itc_count:%d enable:%d\n", __func__, enable_shorten_itc_count, enable); + pr_info("[%s] is_opened=[%d], resp_available=[%d], enable_shorten_itc_count=[%d], enable=[%d], is_oldversion=[%d]\n", __func__, test_bit(RMNET_CTRL_DEV_OPEN, &dev->status), test_bit(RMNET_CTRL_DEV_READY, &dev->status), enable_shorten_itc_count, enable, is_oldversion); + if ( enable ) { if (enable_shorten_itc_count == 0) { @@ -1226,9 +1396,10 @@ static int rmnet_ctrl_set_itc( struct rmnet_ctrl_dev *dev, int value ) { return -ENODEV; } - pr_info("[%s][%s] usb_set_interrupt_latency(1)+\n", __func__, dev->name); + pr_info("[%s][%s] usb_set_interrupt_latency(1)\n", __func__, dev->name); + ret = usb_set_interrupt_latency(udev, HSIC_FAST_INTERRUPT_LATENCY); - pr_info("[%s][%s] usb_set_interrupt_latency-\n", __func__, dev->name); + if ( dev->intf ) usb_autopm_put_interface(dev->intf); else @@ -1248,9 +1419,10 @@ static int rmnet_ctrl_set_itc( struct rmnet_ctrl_dev *dev, int value ) { mutex_unlock(&dev->dev_lock); return -ENODEV; } - pr_info("[%s][%s] usb_set_interrupt_latency(6)+\n", __func__, dev->name); + pr_info("[%s][%s] usb_set_interrupt_latency(6)\n", __func__, dev->name); + ret = usb_set_interrupt_latency(udev, HSIC_SLOW_INTERRUPT_LATENCY); - pr_info("[%s][%s] usb_set_interrupt_latency-\n", __func__, dev->name); + if ( dev->intf ) usb_autopm_put_interface(dev->intf); else @@ -1259,7 +1431,7 @@ static int rmnet_ctrl_set_itc( struct rmnet_ctrl_dev *dev, int value ) { } } mutex_unlock(&dev->dev_lock); - pr_info("[%s][%s] mutex_unlock\n", __func__, dev->name); + return ret; } @@ -1706,8 +1878,16 @@ int rmnet_usb_ctrl_init(int no_rmnet_devs, int no_rmnet_insts_per_dev) __func__, PTR_ERR(dev->devicep)); cdev_del(&dev->cdev); destroy_workqueue(dev->wq); + + status = PTR_ERR(dev->devicep); + kfree(dev); - return PTR_ERR(dev->devicep); + + + + + return status; + } diff --git a/drivers/net/usb/rmnet_usb_data.c b/drivers/net/usb/rmnet_usb_data.c index 74018713..ed6e5f22 100644 --- a/drivers/net/usb/rmnet_usb_data.c +++ b/drivers/net/usb/rmnet_usb_data.c @@ -138,6 +138,15 @@ module_param_cb(rmnet_data_init, &rmnet_init_ops, &rmnet_data_init, S_IRUGO | S_IWUSR); static void rmnet_usb_setup(struct net_device *, int mux_enabled); + +#if defined(CONFIG_MONITOR_STREAMING_PORT_SOCKET) && defined(CONFIG_MSM_NONSMD_PACKET_FILTER) +#define EXTEND_AUTOSUSPEND_TIMER 3000 +extern bool is_streaming_sock_connectted; + +static int original_autosuspend_timer = 0; +static bool use_extend_suspend_timer = false; +#endif + static int rmnet_ioctl(struct net_device *, struct ifreq *, int); static int rmnet_usb_suspend(struct usb_interface *iface, pm_message_t message) @@ -186,6 +195,20 @@ static int rmnet_usb_suspend(struct usb_interface *iface, pm_message_t message) netif_device_attach(unet->net); } + #if defined(CONFIG_MONITOR_STREAMING_PORT_SOCKET) && defined(CONFIG_MSM_NONSMD_PACKET_FILTER) + if (use_extend_suspend_timer) { + if (original_autosuspend_timer != 0) { + struct usb_device *udev= unet->udev; + + if (udev) { + use_extend_suspend_timer= false; + pm_runtime_set_autosuspend_delay(&udev->dev, original_autosuspend_timer); + dev_err(&udev->dev, "is_streaming_sock_connectted:%d pm_runtime_set_autosuspend_delay %d\n", is_streaming_sock_connectted, original_autosuspend_timer); + } + } + } + #endif + return 0; abort_suspend: @@ -364,6 +387,11 @@ static struct sk_buff *rmnet_usb_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { struct QMI_QOS_HDR_S *qmih; + struct mux_hdr *hdr; + unsigned int len_before = skb->len; + unsigned int len_after; + char event[128]; + bool muxing = false; if (test_bit(RMNET_MODE_QOS, &dev->data[0])) { if (test_bit(RMNET_MODE_ALIGNED_QOS, &dev->data[0])) { @@ -378,14 +406,32 @@ static struct sk_buff *rmnet_usb_tx_fixup(struct usbnet *dev, qmih->flow_id = skb->mark; } - if (dev->data[4]) + if (skb && dev->data[4]) { + muxing = true; skb = rmnet_usb_data_mux(skb, dev->data[3]); + } if (skb) DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n", dev->net->name, dev->net->stats.tx_packets, skb->len, skb->mark); + if ( !skb ) { + pr_err("%s: skb is null\n", __func__); + goto out; + } + + if (!muxing) + goto out; + + hdr = (struct mux_hdr *)skb->data; + len_after = skb->len; + snprintf(event, 128, + "skb=%p,len1=%d, len2=%d, mux_id=%d, pad_info=%d, pkt_len_w_pad=%d", + skb, len_before, len_after, hdr->mux_id, + hdr->padding_info , hdr->pkt_len_w_padding); + dbg_log_event_debug(dev, event); +out: return skb; } @@ -446,6 +492,20 @@ static int rmnet_usb_rx_fixup(struct usbnet *dev, struct sk_buff *skb) else skb->protocol = 0; + #if defined(CONFIG_MONITOR_STREAMING_PORT_SOCKET) && defined(CONFIG_MSM_NONSMD_PACKET_FILTER) + if (is_streaming_sock_connectted) { + if (!use_extend_suspend_timer) { + struct usb_device *udev = dev->udev; + + if (udev) { + use_extend_suspend_timer = true; + pm_runtime_set_autosuspend_delay(&udev->dev, EXTEND_AUTOSUSPEND_TIMER); + dev_info(&udev->dev, "is_streaming_sock_connectted:%d pm_runtime_set_autosuspend_delay %d\n", is_streaming_sock_connectted, EXTEND_AUTOSUSPEND_TIMER); + } + } + } + #endif + DBG1("[%s] Rx packet #%lu len=%d\n", dev->net->name, dev->net->stats.rx_packets, skb->len); @@ -812,6 +872,11 @@ static int rmnet_usb_probe(struct usb_interface *iface, pm_runtime_set_autosuspend_delay(&udev->dev, 1000); pm_runtime_set_autosuspend_delay(&udev->parent->dev, 200); + + #if defined(CONFIG_MONITOR_STREAMING_PORT_SOCKET) && defined(CONFIG_MSM_NONSMD_PACKET_FILTER) + original_autosuspend_timer = udev->dev.power.autosuspend_delay; + dev_info(&udev->dev, "original_autosuspend_timer:%d\n", original_autosuspend_timer); + #endif } return 0; diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index a909fed3..033cb9f8 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -66,6 +66,26 @@ static int msg_level = -1; module_param (msg_level, int, 0); MODULE_PARM_DESC (msg_level, "Override default message level"); +void +dbg_log_event_debug(struct usbnet *dev, char *event) +{ + unsigned long flags; + unsigned long long t; + unsigned long nanosec; + + if ( !dev || !event ) + return; + + write_lock_irqsave(&dev->dbg_lock, flags); + t = cpu_clock(smp_processor_id()); + nanosec = do_div(t, 1000000000)/1000; + scnprintf(dev->dbgbuf[dev->dbg_idx], DBG_MSG_LEN, "%5lu.%06lu:%s", + (unsigned long)t, nanosec, event); + dev->dbg_idx++; + dev->dbg_idx = dev->dbg_idx % DBG_MAX_MSG; + write_unlock_irqrestore(&dev->dbg_lock, flags); +} + static bool enable_tx_rx_debug = false; static bool usb_pm_debug_enabled = false; @@ -977,6 +997,11 @@ static void tx_complete (struct urb *urb) struct sk_buff *skb = (struct sk_buff *) urb->context; struct skb_data *entry = (struct skb_data *) skb->cb; struct usbnet *dev = entry->dev; + char event[128]; + + snprintf(event, 128, "skb=%p, actual_len=%d, status=%d", + skb, urb->actual_length, urb->status); + dbg_log_event_debug(dev, event); if (enable_tx_rx_debug) @@ -1102,16 +1127,44 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, spin_lock_irqsave(&dev->txq.lock, flags); retval = usb_autopm_get_interface_async(dev->intf); +#ifdef CONFIG_RIL_PCN001_HTC_QUEUE_URB_TO_DEFERRED_ANCHOR + + if (retval < 0 && ( retval != -EACCES ) ) { +#else if (retval < 0) { +#endif spin_unlock_irqrestore(&dev->txq.lock, flags); netdev_info(dev->net, "%s usb_autopm_get_interface_async return: %d\n",__func__, retval); goto drop; +#ifdef CONFIG_RIL_PCN001_HTC_QUEUE_URB_TO_DEFERRED_ANCHOR + } else if ( retval == -EACCES ) { + netdev_info(dev->net, "%s usb_autopm_get_interface_async return: %d, try Delaying transmission for resumption\n",__func__, retval); +#endif } else if (retval > 0) netdev_info(dev->net, "%s usb_autopm_get_interface_async return: %d\n",__func__, retval); #ifdef CONFIG_PM +#ifdef CONFIG_RIL_PCN001_HTC_QUEUE_URB_TO_DEFERRED_ANCHOR + + if ( retval == -EACCES ) { + usb_anchor_urb(urb, &dev->deferred); + + netif_stop_queue(net); + usb_put_urb(urb); + spin_unlock_irqrestore(&dev->txq.lock, flags); + netdev_info(dev->net, "Delaying transmission for resumption\n"); + + + dev->udev->bus->skip_resume = false; + + goto deferred; + } + + else if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) { +#else if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) { +#endif usb_anchor_urb(urb, &dev->deferred); @@ -1367,6 +1420,9 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) init_timer (&dev->delay); mutex_init (&dev->phy_mutex); + dev->dbg_idx = 0; + dev->dbg_lock = __RW_LOCK_UNLOCKED(lck); + dev->net = net; strcpy (net->name, "usb%d"); memcpy (net->dev_addr, node_id, sizeof node_id); @@ -1507,7 +1563,6 @@ int usbnet_resume (struct usb_interface *intf) spin_lock_irq(&dev->txq.lock); while ((res = usb_get_from_anchor(&dev->deferred))) { - skb = (struct sk_buff *)res->context; retval = usb_submit_urb(res, GFP_ATOMIC); if (retval < 0) { @@ -1520,6 +1575,12 @@ int usbnet_resume (struct usb_interface *intf) } } +#ifdef CONFIG_RIL_PCN001_HTC_QUEUE_URB_TO_DEFERRED_ANCHOR + if ( dev->udev->bus->skip_resume == false ) { + pr_info("%s: set skip_resume to true\n", __func__); + dev->udev->bus->skip_resume = true; + } +#endif smp_mb(); clear_bit(EVENT_DEV_ASLEEP, &dev->flags); spin_unlock_irq(&dev->txq.lock); @@ -1563,4 +1624,4 @@ module_exit(usbnet_exit); MODULE_AUTHOR("David Brownell"); MODULE_DESCRIPTION("USB network driver framework"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL"); \ No newline at end of file diff --git a/drivers/net/wireless/bcmdhd_4335_mcc/dhd_custom_gpio.c b/drivers/net/wireless/bcmdhd_4335_mcc/dhd_custom_gpio.c index 52858125..74d6de77 100644 --- a/drivers/net/wireless/bcmdhd_4335_mcc/dhd_custom_gpio.c +++ b/drivers/net/wireless/bcmdhd_4335_mcc/dhd_custom_gpio.c @@ -269,7 +269,7 @@ const struct cntry_locales_custom translate_custom_table[] = { {"IS", "IS", 3}, {"IT", "IT", 3}, {"JO", "JO", 4}, - {"JP", "JP", 36}, + {"JP", "JP", 45}, {"KR", "KR", 45}, {"KZ", "KZ", 1}, {"KW", "KW", 5}, diff --git a/drivers/net/wireless/bcmdhd_4335_mcc/dhd_pno.c b/drivers/net/wireless/bcmdhd_4335_mcc/dhd_pno.c index f6d3f00f..d94220d5 100644 --- a/drivers/net/wireless/bcmdhd_4335_mcc/dhd_pno.c +++ b/drivers/net/wireless/bcmdhd_4335_mcc/dhd_pno.c @@ -1323,8 +1323,10 @@ _dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason) exit: if (plbestnet) MFREE(dhd->osh, plbestnet, PNO_BESTNET_LEN); - _params->params_batch.get_batch.buf = NULL; - _params->params_batch.get_batch.bufsize = 0; + if(_params){ + _params->params_batch.get_batch.buf = NULL; + _params->params_batch.get_batch.bufsize = 0; + } mutex_unlock(&_pno_state->pno_mutex); complete(&_pno_state->get_batch_done); return err; diff --git a/drivers/net/wireless/bcmdhd_4335_mcc/wl_cfg80211.c b/drivers/net/wireless/bcmdhd_4335_mcc/wl_cfg80211.c index 9d3926f9..57fa1011 100644 --- a/drivers/net/wireless/bcmdhd_4335_mcc/wl_cfg80211.c +++ b/drivers/net/wireless/bcmdhd_4335_mcc/wl_cfg80211.c @@ -3495,7 +3495,7 @@ wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, RETURN_EIO_IF_NOT_UP(wl); -#if 0 +#if 0 if(dev == wl_to_prmry_ndev(wl)){ printf("%s sme->ssid[%s],sme->ssid_len[%d]\n", __FUNCTION__, sme->ssid,sme->ssid_len); dhd_set_pfn_ssid(sme->ssid, sme->ssid_len); @@ -8522,10 +8522,12 @@ wl_notify_pfn_status(struct wl_priv *wl, bcm_struct_cfgdev *cfgdev, ndev = cfgdev_to_wlc_ndev(cfgdev, wl); #ifndef WL_SCHED_SCAN +#ifndef CUSTOMER_HW_ONE mutex_lock(&wl->usr_sync); cfg80211_disconnected(ndev, 0, NULL, 0, GFP_KERNEL); mutex_unlock(&wl->usr_sync); +#endif #else wl_notify_sched_scan_results(wl, ndev, e, data); #endif diff --git a/drivers/power/pm8921-bms-htc.c b/drivers/power/pm8921-bms-htc.c index 80eb095a..d8c69961 100644 --- a/drivers/power/pm8921-bms-htc.c +++ b/drivers/power/pm8921-bms-htc.c @@ -31,6 +31,7 @@ #include #include #include +#include #ifdef CONFIG_HTC_BATT_8960 #include "mach/htc_battery_cell.h" @@ -108,6 +109,13 @@ struct pm8921_rbatt_params { int vbatt_for_rbatt_uv; }; +struct pm8921_battery_data_store { + int store_soc; + int store_ocv_uv; + int store_cc; + unsigned long store_currtime_ms; +}; + struct pm8921_bms_chip { struct device *dev; struct dentry *dent; @@ -159,6 +167,7 @@ struct pm8921_bms_chip { int level_ocv_update_stop_end; unsigned int criteria_sw_est_ocv; unsigned int rconn_mohm_sw_est_ocv; + int qb_mode_cc_criteria_uAh; void (*get_pj_status) (int *full, int *status, int *exist); struct single_row_lut *pj_vth_discharge_lut; struct single_row_lut *pj_dvi_discharge_lut; @@ -179,6 +188,7 @@ struct pm8921_bms_debug { int batt_temp; int soc_rbatt; int last_ocv_raw_uv; + int cc_uah; }; static struct pm8921_bms_debug bms_dbg; @@ -243,11 +253,18 @@ static int sw_ocv_update_stop_active_mask = OCV_UPDATE_STOP_BIT_ATTR_FILE | OCV_UPDATE_STOP_BIT_BOOT_UP; static int sw_ocv_update_stop_reason; static int level_dropped_after_cable_out = 5; -static int level_dropped_after_boot_up = 5; static int new_boot_soc; +static unsigned long allow_ocv_time = 0; +static int store_soc_ui = -1; +static int consistent_flag = 0; static int bms_discharge_percent; static int is_ocv_update_start; +static struct pm8921_battery_data_store store_emmc; struct mutex ocv_update_lock; +static bool qb_mode_enter = false; +static int qb_mode_cc_accumulation_uah, qb_mode_prev_cc; +static int qb_mode_ocv_start = 0, qb_mode_cc_start = 0, qb_mode_over_criteria_count = 0; +static unsigned long qb_mode_time_accumulation = 0; static int bms_ro_ops_set(const char *val, const struct kernel_param *kp) { @@ -1449,6 +1466,7 @@ static int calculate_state_of_charge(struct pm8921_bms_chip *chip, &cc_uah, &rbatt); bms_dbg.batt_temp = batt_temp; + bms_dbg.cc_uah = cc_uah; remaining_usable_charge_uah = remaining_charge_uah @@ -1475,7 +1493,7 @@ static int calculate_state_of_charge(struct pm8921_bms_chip *chip, "SOC_R=%d,start_percent=%d,end_percent=%d,OCV=%d,OCV_raw=%d," "rbatt=%d,rbatt_sf=%d,batt_temp=%d,soc_rbatt=%d,last_rbatt=%d," "V_unusable_uv=%d,pc_unusable=%d,rc_pc=%d,scalefactor=%d," - "no_ocv_update_ms=%lu\n", + "no_ocv_update_ms=%lu,consistent=%d\n", fcc_uah, unusable_charge_uah, remaining_charge_uah, cc_uah, chip->cc_backup_uv, remaining_usable_charge_uah, soc, soc_remainder, chip->start_percent, chip->end_percent, @@ -1483,7 +1501,7 @@ static int calculate_state_of_charge(struct pm8921_bms_chip *chip, bms_dbg.rbatt, bms_dbg.rbatt_sf, bms_dbg.batt_temp, bms_dbg.soc_rbatt, last_rbatt, bms_dbg.voltage_unusable_uv, bms_dbg.pc_unusable, bms_dbg.rc_pc, bms_dbg.scalefactor, - htc_batt_bms_timer.no_ocv_update_period_ms); + htc_batt_bms_timer.no_ocv_update_period_ms, consistent_flag); } if (soc > 100) @@ -1931,10 +1949,15 @@ int pm8921_bms_get_batt_current(int *result) int pm8921_bms_get_batt_soc(int *result) { - int batt_temp, rc; + int batt_temp, rc, state_of_charge; struct pm8xxx_adc_chan_result temp_result; struct pm8921_soc_params raw; unsigned long time_since_last_update_ms, cur_jiffies; + struct timespec xtime; + unsigned long currtime_ms; + + xtime = CURRENT_TIME; + currtime_ms = xtime.tv_sec * MSEC_PER_SEC + xtime.tv_nsec / NSEC_PER_MSEC; if (!the_chip) { pr_err("called before initialization\n"); @@ -1964,24 +1987,43 @@ int pm8921_bms_get_batt_soc(int *result) *result = calculate_state_of_charge(the_chip, &raw, batt_temp, last_chargecycles, 1); + + state_of_charge = *result; + if (bms_discharge_percent && ((bms_discharge_percent - *result) >= level_dropped_after_cable_out)) { - pr_info("OCV can be update due to %d - %d >= %d\n", + pr_info("OCV can be update due to %d - %d >= %d (OCV_UPDATE_STOP_BIT_CABLE_IN)\n", bms_discharge_percent, *result, level_dropped_after_cable_out); bms_discharge_percent = 0; disable_ocv_update_with_reason(false, OCV_UPDATE_STOP_BIT_CABLE_IN); } - if (new_boot_soc && - ((new_boot_soc - *result) >= - level_dropped_after_boot_up)) { - pr_info("OCV can be update due to %d - %d >= %d\n", - new_boot_soc, *result, - level_dropped_after_boot_up); + + if (new_boot_soc && allow_ocv_time && + (currtime_ms >= allow_ocv_time)) { + pr_info("OCV can be update due to currtime(%lu) >= allow_ocv_time(%lu) " + "(OCV_UPDATE_STOP_BIT_BOOT_UP)\n", + currtime_ms, allow_ocv_time); new_boot_soc = 0; + allow_ocv_time = 0; disable_ocv_update_with_reason(false, OCV_UPDATE_STOP_BIT_BOOT_UP); } + + if (the_chip->store_batt_data_soc_thre > 0 + && state_of_charge <= the_chip->store_batt_data_soc_thre + && (store_soc_ui >= 0 && store_soc_ui <= 100)) { + store_emmc.store_soc = store_soc_ui; + store_emmc.store_ocv_uv = raw.last_good_ocv_uv; + store_emmc.store_cc = raw.cc - the_chip->cc_backup_uv; + store_emmc.store_currtime_ms = currtime_ms; + } + + pr_info("%s: state_of_charge=%d, store_soc_ui=%d, last_good_ocv_uv=%d, raw.cc=%d, " + "stored_cc_uv:%d, currtime_ms=%lu\n", __func__, + state_of_charge, store_soc_ui, raw.last_good_ocv_uv, raw.cc, + (raw.cc - the_chip->cc_backup_uv), currtime_ms); + if (the_chip->level_ocv_update_stop_begin && the_chip->level_ocv_update_stop_end) { if (*result >= the_chip->level_ocv_update_stop_begin && @@ -2000,6 +2042,45 @@ int pm8921_bms_get_batt_cc(int *result) return 0; } + +int pm8921_bms_store_battery_data_emmc(void) +{ + if ((the_chip->store_batt_data_soc_thre > 0) + && (store_emmc.store_soc > 0) + && (store_emmc.store_soc <= the_chip->store_batt_data_soc_thre)) { + emmc_misc_write(BMS_STORE_MAGIC_NUM, BMS_STORE_MAGIC_OFFSET); + emmc_misc_write(store_emmc.store_soc, BMS_STORE_SOC_OFFSET); + emmc_misc_write(store_emmc.store_ocv_uv, BMS_STORE_OCV_OFFSET); + emmc_misc_write(store_emmc.store_cc, BMS_STORE_CC_OFFSET); + emmc_misc_write(store_emmc.store_currtime_ms, BMS_STORE_CURRTIME_OFFSET); + + pr_info("%s: state_of_charge=%d, last_good_ocv_uv=%d, " + "stored_cc_uv:%d, currtime_ms=%lu\n", + __func__, store_emmc.store_soc, store_emmc.store_ocv_uv, + store_emmc.store_cc, store_emmc.store_currtime_ms); + } + return 0; +} + +int pm8921_bms_store_battery_ui_soc(int soc_ui) +{ + if (soc_ui <= 0 || soc_ui > 100) + return -EINVAL; + + store_soc_ui = soc_ui; + + return 0; +} +int pm8921_bms_get_battery_ui_soc(void) +{ + pr_info("%s: batt_stored_soc: %d\n", __func__, batt_stored_soc); + + + if (batt_stored_soc <= 0 || batt_stored_soc > 100 || !consistent_flag) + return -EINVAL; + + return batt_stored_soc; +} #endif #define IBAT_TOL_MASK 0x0F @@ -2054,11 +2135,6 @@ void pm8921_bms_charging_end(int is_battery_full) int batt_temp, rc; struct pm8xxx_adc_chan_result result; struct pm8921_soc_params raw; - struct timespec xtime; - unsigned long currtime_ms; - - xtime = CURRENT_TIME; - currtime_ms = xtime.tv_sec * MSEC_PER_SEC + xtime.tv_nsec / NSEC_PER_MSEC; if (the_chip == NULL) return; @@ -2147,23 +2223,14 @@ void pm8921_bms_charging_end(int is_battery_full) } } - - if (the_chip->store_batt_data_soc_thre > 0 - && !usb_chg_plugged_in() - && bms_end_percent < the_chip->store_batt_data_soc_thre - && board_mfg_mode() == 5 ) { - emmc_misc_write(BMS_STORE_MAGIC_NUM, BMS_STORE_MAGIC_OFFSET); - emmc_misc_write(bms_end_percent, BMS_STORE_SOC_OFFSET); - emmc_misc_write(raw.last_good_ocv_uv, BMS_STORE_OCV_OFFSET); - emmc_misc_write(raw.cc, BMS_STORE_CC_OFFSET); - emmc_misc_write(currtime_ms, BMS_STORE_CURRTIME_OFFSET); + if ((the_chip->store_batt_data_soc_thre > 0) && + !usb_chg_plugged_in() && (board_mfg_mode() == 5)) { + pm8921_bms_store_battery_data_emmc(); } - pr_info("end_percent=%d%%, last_charge_increase=%d, last_chargecycles=%d, " - "board_mfg_mode=%d, bms_end_percent=%d, last_good_ocv_uv=%d, raw.cc=%x, " - "currtime_ms=%ld\n", - the_chip->end_percent, last_charge_increase, last_chargecycles, - board_mfg_mode(), bms_end_percent, raw.last_good_ocv_uv, raw.cc, currtime_ms); + pr_info("end_percent=%d%%, last_charge_increase=%d, last_chargecycles=%d, ", + the_chip->end_percent, last_charge_increase, last_chargecycles); + the_chip->start_percent = -EINVAL; the_chip->end_percent = -EINVAL; pm_bms_masked_write(the_chip, BMS_TOLERANCES, @@ -2956,6 +3023,106 @@ int pm8921_bms_get_attr_text(char *buf, int size) return len; } EXPORT_SYMBOL(pm8921_bms_get_attr_text); +#if 1 +int pm8921_bms_enter_qb_mode(void) +{ + if (!the_chip) { + pr_err("called before init\n"); + return -EINVAL; + } + + if(the_chip->qb_mode_cc_criteria_uAh) { + qb_mode_enter = true; + qb_mode_cc_start = bms_dbg.cc_uah; + qb_mode_ocv_start = bms_dbg.last_ocv_raw_uv; + qb_mode_cc_accumulation_uah = 0; + qb_mode_time_accumulation = 0; + qb_mode_prev_cc = 0; + qb_mode_over_criteria_count = 0; + htc_gauge_event_notify(HTC_GAUGE_EVENT_QB_MODE_ENTER); + } + return 0; +} + +int pm8921_bms_exit_qb_mode(void) +{ + if (!the_chip) { + pr_err("called before init\n"); + return -EINVAL; + } + + if(the_chip->qb_mode_cc_criteria_uAh) { + qb_mode_enter = false; + qb_mode_cc_accumulation_uah = 0; + qb_mode_cc_start = 0; + qb_mode_ocv_start = 0; + qb_mode_time_accumulation = 0; + qb_mode_prev_cc = 0; + qb_mode_over_criteria_count = 0; + } + return 0; +} + +#define SIXTY_MINUTES_MS (1000 * (3600 - 10)) +int pm8921_qb_mode_pwr_consumption_check(unsigned long time_since_last_update_ms) +{ + if (!the_chip) { + pr_err("called before init\n"); + return -EINVAL; + } + + if(qb_mode_enter && the_chip->qb_mode_cc_criteria_uAh) { + qb_mode_time_accumulation += time_since_last_update_ms; + + if(qb_mode_ocv_start != bms_dbg.last_ocv_raw_uv) { + + qb_mode_cc_accumulation_uah += bms_dbg.cc_uah; + + qb_mode_prev_cc = qb_mode_cc_start = bms_dbg.cc_uah; + + pr_info("ocv update happened OCV_uV/ori=%duV/%duV, cc_value:%d\n", + bms_dbg.last_ocv_raw_uv, qb_mode_ocv_start, bms_dbg.cc_uah); + qb_mode_ocv_start = bms_dbg.last_ocv_raw_uv; + } else { + if(!qb_mode_prev_cc) + + qb_mode_cc_accumulation_uah = (bms_dbg.cc_uah - qb_mode_cc_start); + else + qb_mode_cc_accumulation_uah += (bms_dbg.cc_uah - qb_mode_prev_cc); + qb_mode_prev_cc = bms_dbg.cc_uah; + } + + if(qb_mode_time_accumulation >= SIXTY_MINUTES_MS) { + if(qb_mode_cc_accumulation_uah > the_chip->qb_mode_cc_criteria_uAh) { + qb_mode_over_criteria_count++; + pr_warn("QB mode cc over criteria, cc_accu=%d, time_accu=%lu, count=%d\n", + qb_mode_cc_accumulation_uah, qb_mode_time_accumulation, + qb_mode_over_criteria_count); + } else + qb_mode_over_criteria_count = 0; + + qb_mode_time_accumulation = 0; + qb_mode_cc_accumulation_uah = 0; + qb_mode_cc_start = bms_dbg.cc_uah; + } + + pr_info("qb_start_ocv_uV=%d,qb_start_cc_uAh=%d,qb_current_cc_uAh=%d," + "qb_cc_accumulate_uAh=%d,qb_time_accumulate_us=%lu," + "qb_cc_criteria_uAh=%d,over_cc_criteria_count=%d\n", + qb_mode_ocv_start, qb_mode_cc_start, qb_mode_prev_cc, + qb_mode_cc_accumulation_uah, qb_mode_time_accumulation, + the_chip->qb_mode_cc_criteria_uAh, qb_mode_over_criteria_count); + + if(qb_mode_over_criteria_count >= 3) { + pr_info("Force device shutdown due to over QB mode CC criteria!\n"); + htc_gauge_event_notify(HTC_GAUGE_EVENT_QB_MODE_DO_REAL_POWEROFF); + } + } else { + + } + return 0; +} +#endif static void create_debugfs_entries(struct pm8921_bms_chip *chip) { @@ -3291,6 +3458,7 @@ static int __devinit pm8921_bms_probe(struct platform_device *pdev) chip->ref1p25v_channel = pdata->bms_cdata.ref1p25v_channel; chip->batt_id_channel = pdata->bms_cdata.batt_id_channel; chip->get_pj_status = pdata->get_power_jacket_status; + chip->qb_mode_cc_criteria_uAh = pdata->qb_mode_cc_criteria_uAh; chip->revision = pm8xxx_get_revision(chip->dev->parent); chip->version = pm8xxx_get_version(chip->dev->parent); INIT_WORK(&chip->calib_hkadc_work, calibrate_hkadc_work); @@ -3358,7 +3526,8 @@ static int __devinit pm8921_bms_probe(struct platform_device *pdev) if (batt_stored_magic_num == BMS_STORE_MAGIC_NUM && the_chip->store_batt_data_soc_thre > 0 - && (curr_soc - batt_stored_soc) > 5 + && (curr_soc <= the_chip->store_batt_data_soc_thre) + && ((curr_soc > batt_stored_soc) || ((batt_stored_soc - curr_soc) >= 2)) && (currtime_ms - batt_stored_time_ms) < 3600000 ) { read_soc_params_raw(the_chip, &raw); chip->cc_backup_uv = raw.cc - batt_stored_cc_uv; @@ -3371,14 +3540,18 @@ static int __devinit pm8921_bms_probe(struct platform_device *pdev) new_boot_soc = pm8921_bms_get_percent_charge(); disable_ocv_update_with_reason(true, OCV_UPDATE_STOP_BIT_BOOT_UP); + + allow_ocv_time = currtime_ms + 3600000; + + consistent_flag = 1; } pm8921_store_hw_reset_reason(0); - pr_info("OK battery_capacity_at_boot=%d, new_boot_soc=%d, volt=%d, " + pr_info("consistent=%d, battery_capacity_at_boot=%d, new_boot_soc=%d, volt=%d, " "ocv=%d, batt_magic_num=%x, stored_soc=%d, curr_time=%ld, " "stored_time=%ld\n", - curr_soc, new_boot_soc, vbatt, last_ocv_uv, + consistent_flag, curr_soc, new_boot_soc, vbatt, last_ocv_uv, batt_stored_magic_num, batt_stored_soc, currtime_ms, batt_stored_time_ms); pr_info("r_sense=%u,i_test=%u,v_failure=%u,default_rbatt_mohm=%d\n", @@ -3434,4 +3607,4 @@ module_exit(pm8921_bms_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("PMIC8921 bms driver"); MODULE_VERSION("1.0"); -MODULE_ALIAS("platform:" PM8921_BMS_DEV_NAME); +MODULE_ALIAS("platform:" PM8921_BMS_DEV_NAME); \ No newline at end of file diff --git a/drivers/slimbus/slim-msm-ctrl.c b/drivers/slimbus/slim-msm-ctrl.c index ea52b04b..6fa53051 100644 --- a/drivers/slimbus/slim-msm-ctrl.c +++ b/drivers/slimbus/slim-msm-ctrl.c @@ -2075,10 +2075,28 @@ static int __devinit msm_slim_probe(struct platform_device *pdev) dev_dbg(dev->dev, "MSM SB controller is up!\n"); return 0; +err_clk_get_failed: + { + int i; + for (i = 0; i < dev->nsats; i++) { + struct msm_slim_sat *sat = dev->satd[i]; + int j; + if (!sat) + continue; + for (j = 0; j < sat->nsatch; j++) + slim_dealloc_ch(&sat->satcl, sat->satch[j].chanh); + slim_remove_device(&sat->satcl); + kfree(sat->satch); + if (sat->wq) + destroy_workqueue(sat->wq); + kfree(sat->satcl.name); + kfree(sat); + } + } + slim_del_controller(&dev->ctrl); err_ctrl_failed: + free_irq(dev->irq, dev); writel_relaxed(0, dev->base + CFG_PORT(COMP_CFG, dev->ver)); -err_clk_get_failed: - kfree(dev->satd); err_request_irq_failed: msm_slim_sps_exit(dev); err_sps_init_failed: diff --git a/drivers/tty/serial/msm_serial_hs_cir_lite.c b/drivers/tty/serial/msm_serial_hs_cir_lite.c index 771441cb..50b1ab40 100644 --- a/drivers/tty/serial/msm_serial_hs_cir_lite.c +++ b/drivers/tty/serial/msm_serial_hs_cir_lite.c @@ -974,7 +974,7 @@ static void msm_hsl_release_port_cir(struct uart_port *port) D("%s () uart_resource:port->line %d, ir\n", __func__, port->line); uart_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); } - if (unlikely(!uart_resource)) + if (unlikely(!uart_resource)||(!uart_resource)) return; size = uart_resource->end - uart_resource->start + 1; @@ -1006,7 +1006,7 @@ static int msm_hsl_request_port_cir(struct uart_port *port) D("%s ():uart_resource :port->line %d, ir\n", __func__, port->line); uart_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); } - if (unlikely(!uart_resource)) { + if (unlikely(!uart_resource)||(!uart_resource)) { E("%s: can't get uartdm resource\n", __func__); return -ENXIO; } @@ -1036,7 +1036,7 @@ static int msm_hsl_request_port_cir(struct uart_port *port) IORESOURCE_MEM, 1); D("%s ():gsbi_resource :port->line %d, ir\n", __func__, port->line); } - if (unlikely(!gsbi_resource)) { + if (unlikely(!gsbi_resource)||(!gsbi_resource)) { E("%s: can't get gsbi resource\n", __func__); return -ENXIO; } @@ -1638,7 +1638,7 @@ printk(KERN_INFO "msm_serial_cir: get uartdm_clk\n"); "uartdm_resource"); if (!uart_resource) uart_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (unlikely(!uart_resource)) { + if (unlikely(!uart_resource)||(!uart_resource)) { printk(KERN_ERR "getting uartdm_resource failed\n"); return -ENXIO; } diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index bee2c893..5b4ef503 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -2664,7 +2664,6 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, } udev->descriptor.bMaxPacketSize0 = buf->bMaxPacketSize0; - kfree(buf); if (le16_to_cpu(buf->idVendor) != 0x1a0a) { retval = hub_port_reset(hub, port1, udev, @@ -2685,6 +2684,8 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, retval = -EMSGSIZE; continue; } + + kfree(buf); #undef GET_DESCRIPTOR_BUFSIZE } diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c index 7c6b8c2f..8af12cb2 100644 --- a/drivers/usb/gadget/android.c +++ b/drivers/usb/gadget/android.c @@ -325,6 +325,7 @@ static void android_work(struct work_struct *data) pr_info("%s: sent uevent %s\n", __func__, uevent_envp[0]); if (dev->pdata->vzw_unmount_cdrom) { cancel_delayed_work(&cdev->cdusbcmd_vzw_unmount_work); + cdev->unmount_cdrom_mask = 1 << 3 | 1 << 4; schedule_delayed_work(&cdev->cdusbcmd_vzw_unmount_work,30 * HZ); } } else { @@ -518,6 +519,19 @@ static void adb_closed_callback(void) } +static void adb_read_timeout(void) +{ + struct android_dev *dev = _android_dev; + + pr_info("%s: adb read timeout, re-connect to PC\n", __func__); + + if (dev) { + android_disable(dev); + mdelay(100); + android_enable(dev); + } +} + static int rmnet_smd_function_bind_config(struct android_usb_function *f, struct usb_configuration *c) @@ -1308,7 +1322,7 @@ static int ncm_function_bind_config(struct android_usb_function *f, if (c->cdev->gadget) c->cdev->gadget->miMaxMtu = ETH_FRAME_LEN_MAX - ETH_HLEN; - ret = gether_setup_name(c->cdev->gadget, ncm->ethaddr, "usb"); + ret = gether_setup_name(c->cdev->gadget, ncm->ethaddr, "ncm"); if (ret) { pr_err("%s: gether_setup failed\n", __func__); return ret; @@ -1394,7 +1408,7 @@ rndis_function_init(struct android_usb_function *f, if (dev->pdata && dev->pdata->manufacturer_name) strncpy(rndis->manufacturer, dev->pdata->manufacturer_name, - sizeof(rndis->manufacturer)); + sizeof(rndis->manufacturer) - 1); rndis->vendorID = dev->pdata->vendor_id; return 0; @@ -1422,7 +1436,11 @@ rndis_function_bind_config(struct android_usb_function *f, rndis->ethaddr[0], rndis->ethaddr[1], rndis->ethaddr[2], rndis->ethaddr[3], rndis->ethaddr[4], rndis->ethaddr[5]); - ret = gether_setup_name(c->cdev->gadget, rndis->ethaddr, "usb"); + if (rndis->ethaddr[0]) + ret = gether_setup_name(c->cdev->gadget, NULL, "usb"); + else + ret = gether_setup_name(c->cdev->gadget, rndis->ethaddr, + "usb"); if (ret) { pr_err("%s: gether_setup failed\n", __func__); return ret; @@ -2443,13 +2461,17 @@ static ssize_t state_show(struct device *pdev, struct device_attribute *attr, static ssize_t bugreport_debug_store(struct device *pdev, struct device_attribute *attr, const char *buff, size_t size) { - int enable = 0; + int enable = 0, ats = 0; sscanf(buff, "%d", &enable); - pr_info("bugreport_debug = %d\n", enable); - if (enable) + ats = board_get_usb_ats(); + + if (enable && ats) bugreport_debug = 1; - else + else { bugreport_debug = 0; + del_timer(&adb_read_timer); + } + pr_info("bugreport_debug = %d, enable=%d, ats = %d\n", bugreport_debug, enable, ats); return size; } diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c index 0acb81b1..5383d597 100644 --- a/drivers/usb/gadget/f_acm.c +++ b/drivers/usb/gadget/f_acm.c @@ -65,6 +65,12 @@ static inline struct f_acm *port_to_acm(struct gserial *p) { return container_of(p, struct f_acm, port); } + +static unsigned hsm_newpid = 1; +module_param(hsm_newpid, uint, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(hsm_newpid, "Use New PID for HSM ACM"); + + static int acm_port_connect(struct f_acm *acm) { unsigned port_num; diff --git a/drivers/usb/gadget/f_adb.c b/drivers/usb/gadget/f_adb.c index cd1771a3..0eda6332 100644 --- a/drivers/usb/gadget/f_adb.c +++ b/drivers/usb/gadget/f_adb.c @@ -122,9 +122,9 @@ static void adb_ready_callback(void); static void adb_closed_callback(void); static struct adb_dev *_adb_dev; -#if 0 -static struct timer_list adb_write_timer; -#endif + +static struct timer_list adb_read_timer; + int board_get_usb_ats(void); static inline struct adb_dev *func_to_adb(struct usb_function *f) @@ -277,6 +277,7 @@ static int adb_create_bulk_endpoints(struct adb_dev *dev, } static int bugreport_debug; +static void adb_read_timeout(void); static ssize_t adb_read(struct file *fp, char __user *buf, size_t count, loff_t *pos) @@ -331,6 +332,16 @@ static ssize_t adb_read(struct file *fp, char __user *buf, ret = wait_event_interruptible(dev->read_wq, dev->rx_done || atomic_read(&dev->error)); + + if (bugreport_debug) { + if (atomic_read(&dev->error)) { + r = -EIO; + adb_read_timeout(); + goto done; + } + del_timer(&adb_read_timer); + } + if (ret < 0) { if (ret != -ERESTARTSYS) atomic_set(&dev->error, 1); @@ -360,17 +371,15 @@ static ssize_t adb_read(struct file *fp, char __user *buf, return r; } -#if 0 -#define WRITE_TIMEOUT_VALUE (jiffies + msecs_to_jiffies(5000)) -static void adb_write_check_timer(unsigned long data) +#define READ_TIMEOUT_VALUE (jiffies + msecs_to_jiffies(5000)) +static void adb_read_check_timer(unsigned long data) { struct adb_dev *dev = _adb_dev; - pr_info("adb_write timeout\n"); + pr_info("adb_read timeout\n"); atomic_set(&dev->error, 1); - wake_up(&dev->write_wq); + wake_up(&dev->read_wq); } -#endif static ssize_t adb_write(struct file *fp, const char __user *buf, size_t count, loff_t *pos) @@ -393,25 +402,15 @@ static ssize_t adb_write(struct file *fp, const char __user *buf, r = -EIO; break; } -#if 0 - - if (bugreport_debug) - mod_timer(&adb_write_timer, WRITE_TIMEOUT_VALUE); -#endif + req = 0; ret = wait_event_interruptible(dev->write_wq, ((req = adb_req_get(dev, &dev->tx_idle)) || atomic_read(&dev->error))); -#if 0 - if (bugreport_debug) { - if (atomic_read(&dev->error)) { - r = -ETIMEDOUT; - break; - } - del_timer(&adb_write_timer); - } -#endif + + if (bugreport_debug) + mod_timer(&adb_read_timer, READ_TIMEOUT_VALUE); if (ret < 0) { r = ret; @@ -729,9 +728,9 @@ static int adb_setup(void) if (ret) goto err; } -#if 0 - setup_timer(&adb_write_timer, adb_write_check_timer, 0); -#endif + + setup_timer(&adb_read_timer, adb_read_check_timer, 0); + return 0; err: diff --git a/drivers/usb/host/ehci-msm-hsic.c b/drivers/usb/host/ehci-msm-hsic.c index 518c2129..ca7f5537 100644 --- a/drivers/usb/host/ehci-msm-hsic.c +++ b/drivers/usb/host/ehci-msm-hsic.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include @@ -87,6 +88,10 @@ static bool usb_pm_debug_enabled = false; #define HSIC_GPIO_CHECK_DELAY 5000 static struct delayed_work ehci_gpio_check_wq; +#define RESUME_RETRY_LIMIT 3 +#define RESUME_SIGNAL_TIME_USEC (21 * 1000) +#define RESUME_SIGNAL_TIME_SOF_USEC (23 * 1000) + static struct workqueue_struct *ehci_wq; struct ehci_timer { #define GPT_LD(p) ((p) & 0x00FFFFFF) @@ -213,8 +218,9 @@ struct msm_hsic_hcd { struct completion rt_completion; int resume_status; int resume_again; - int bus_reset; - int reset_again; + int bus_reset; + int reset_again; + ktime_t resume_start_t; struct pm_qos_request pm_qos_req_dma; struct task_struct *resume_thread; @@ -771,7 +777,7 @@ static void register_usb_notification_func(struct work_struct *work) #define ULPI_IO_TIMEOUT_USEC (10 * 1000) #define USB_PHY_VDD_DIG_VOL_NONE 0 -#define USB_PHY_VDD_DIG_VOL_MIN 1000000 +#define USB_PHY_VDD_DIG_VOL_MIN 945000 #define USB_PHY_VDD_DIG_VOL_MAX 1320000 #define HSIC_DBG1_REG 0x38 @@ -909,7 +915,7 @@ static int ehci_hsic_int_latency(struct usb_hcd *hcd, int latency) if (latency < 0 || latency > 6) return -EINVAL; -#if defined(CONFIG_MACH_DUMMY) +#if defined(CONFIG_MACH_M7_WLV) if (latency == 6) latency = 5; #endif @@ -1182,7 +1188,7 @@ static int msm_hsic_suspend(struct msm_hsic_hcd *mehci) msm_hsic_suspend_timestamp = 0; - dev_info(mehci->dev, "HSIC-USB in low power mode\n"); + // dev_info(mehci->dev, "HSIC-USB in low power mode\n"); return 0; } @@ -1294,7 +1300,7 @@ static int msm_hsic_resume(struct msm_hsic_hcd *mehci) spin_unlock_irqrestore(&mehci->wakeup_lock, flags); - dev_info(mehci->dev, "HSIC-USB exited from low power mode\n"); + // dev_info(mehci->dev, "HSIC-USB exited from low power mode\n"); return 0; } @@ -1358,6 +1364,15 @@ static irqreturn_t msm_hsic_irq(struct usb_hcd *hcd) timeleft = GPT_CNT(ehci_readl(ehci, &mehci->timer->gptimer1_ctrl)); + if (!mehci->bus_reset) { + if (ktime_us_delta(ktime_get(), mehci->resume_start_t) > + RESUME_SIGNAL_TIME_SOF_USEC) { + dbg_log_event(NULL, "FPR: GPT prog invalid", + timeleft); + pr_err("HSIC GPT timer prog invalid\n"); + timeleft = 0; + } + } if (timeleft) { if (mehci->bus_reset) { ret = msm_hsic_reset_done(hcd); @@ -1366,9 +1381,18 @@ static irqreturn_t msm_hsic_irq(struct usb_hcd *hcd) dbg_log_event(NULL, "RESET: fail", 0); } } else { - ehci_writel(ehci, ehci_readl(ehci, + writel_relaxed(readl_relaxed( &ehci->regs->command) | CMD_RUN, &ehci->regs->command); + if (ktime_us_delta(ktime_get(), + mehci->resume_start_t) > + RESUME_SIGNAL_TIME_SOF_USEC) { + dbg_log_event(NULL, + "FPR: resume prog invalid", + timeleft); + pr_err("HSIC resume fail. retrying\n"); + mehci->resume_again = 1; + } } } else { if (mehci->bus_reset) @@ -1540,13 +1564,10 @@ static int ehci_hsic_bus_suspend(struct usb_hcd *hcd) } dbg_log_event(NULL, "Suspend RH", 0); - pr_info("%s: Suspend RH\n", __func__); + pr_debug("%s: Suspend RH\n", __func__); return ehci_bus_suspend(hcd); } -#define RESUME_RETRY_LIMIT 3 -#define RESUME_SIGNAL_TIME_MS (21 * 999) -#define RESUME_SIGNAL_TIME_SOF_MS (23 * 999) static int msm_hsic_resume_thread(void *data) { struct msm_hsic_hcd *mehci = data; @@ -1554,9 +1575,11 @@ static int msm_hsic_resume_thread(void *data) struct ehci_hcd *ehci = hcd_to_ehci(hcd); u32 temp; unsigned long resume_needed = 0; - int retry_cnt = 0; - int tight_resume = 0; + int retry_cnt = 0; + int tight_resume = 0; + int tight_count = 0; struct msm_hsic_host_platform_data *pdata = mehci->dev->platform_data; + s32 next_latency = 0; ktime_t now; s64 mdiff; @@ -1564,11 +1587,18 @@ while (!kthread_should_stop()) { resume_needed = 0; retry_cnt = 0; tight_resume = 0; + tight_count = 0; + next_latency = 0; dbg_log_event(NULL, "Resume RH", 0); - - pr_info("%s: Resume RH\n", __func__); + + pr_debug("%s : Resume RH\n", __func__); + if (pdata && pdata->swfi_latency) { + next_latency = pdata->swfi_latency + 1; + pm_qos_update_request(&mehci->pm_qos_req_dma, next_latency); + next_latency = PM_QOS_DEFAULT_VALUE; + } now = ktime_get(); @@ -1577,16 +1607,13 @@ while (!kthread_should_stop()) { usleep_range(10000, 10000); - pr_info("%s[%d] usleep_range 5000 end", __func__, __LINE__); + pr_info("%s[%d] usleep_range 10000 end", __func__, __LINE__); } spin_lock_irq(&ehci->lock); if (!HCD_HW_ACCESSIBLE(hcd)) { - spin_unlock_irq(&ehci->lock); mehci->resume_status = -ESHUTDOWN; - complete(&mehci->rt_completion); - - goto sleep_itself; + goto exit; } if (unlikely(ehci->debug)) { @@ -1596,19 +1623,19 @@ while (!kthread_should_stop()) { dbgp_external_startup(); } - ehci_writel(ehci, 0, &ehci->regs->intr_enable); + writel_relaxed(0, &ehci->regs->intr_enable); - ehci_writel(ehci, 0, &ehci->regs->segment); - ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list); - ehci_writel(ehci, (u32) ehci->async->qh_dma, &ehci->regs->async_next); + writel_relaxed(0, &ehci->regs->segment); + writel_relaxed(ehci->periodic_dma, &ehci->regs->frame_list); + writel_relaxed((u32) ehci->async->qh_dma, &ehci->regs->async_next); if (ehci->resume_sof_bug) ehci->command &= ~CMD_RUN; - ehci_writel(ehci, ehci->command, &ehci->regs->command); + writel_relaxed(ehci->command, &ehci->regs->command); resume_again: @@ -1618,48 +1645,54 @@ while (!kthread_should_stop()) { tight_resume = 1; } - - temp = ehci_readl(ehci, &ehci->regs->port_status[0]); + temp = readl_relaxed(&ehci->regs->port_status[0]); temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS); if (test_bit(0, &ehci->bus_suspended) && (temp & PORT_SUSPEND)) { temp |= PORT_RESUME; set_bit(0, &resume_needed); } + mehci->resume_start_t = ktime_get(); dbg_log_event(NULL, "FPR: Set", temp); - ehci_writel(ehci, temp, &ehci->regs->port_status[0]); + writel_relaxed(temp, &ehci->regs->port_status[0]); if (ehci->resume_sof_bug && resume_needed) { if (!tight_resume) { mehci->resume_again = 0; - ehci_writel(ehci, GPT_LD(RESUME_SIGNAL_TIME_MS), + writel_relaxed(GPT_LD(RESUME_SIGNAL_TIME_USEC - 1), &mehci->timer->gptimer0_ld); - ehci_writel(ehci, GPT_RESET | GPT_RUN, + writel_relaxed(GPT_RESET | GPT_RUN, &mehci->timer->gptimer0_ctrl); - ehci_writel(ehci, INTR_MASK | STS_GPTIMER0_INTERRUPT, + writel_relaxed(INTR_MASK | STS_GPTIMER0_INTERRUPT, &ehci->regs->intr_enable); - ehci_writel(ehci, GPT_LD(RESUME_SIGNAL_TIME_SOF_MS), + writel_relaxed(GPT_LD(RESUME_SIGNAL_TIME_SOF_USEC - 1), &mehci->timer->gptimer1_ld); - ehci_writel(ehci, GPT_RESET | GPT_RUN, + writel_relaxed(GPT_RESET | GPT_RUN, &mehci->timer->gptimer1_ctrl); + dbg_log_event(NULL, "GPT timer prog done", 0); spin_unlock_irq(&ehci->lock); - if (pdata && pdata->swfi_latency) - pm_qos_update_request(&mehci->pm_qos_req_dma, - pdata->swfi_latency + 1); wait_for_completion(&mehci->gpt0_completion); - if (pdata && pdata->swfi_latency) - pm_qos_update_request(&mehci->pm_qos_req_dma, - PM_QOS_DEFAULT_VALUE); spin_lock_irq(&ehci->lock); } else { - dbg_log_event(NULL, "FPR: Tightloop", 0); + dbg_log_event(NULL, "FPR: Tightloop", tight_count); - handshake(ehci, &ehci->regs->port_status[0], - PORT_RESUME, 0, 22 * 1000); - ehci_writel(ehci, ehci_readl(ehci, - &ehci->regs->command) | CMD_RUN, - &ehci->regs->command); + mdelay(22); + writel_relaxed(readl_relaxed(&ehci->regs->command) | + CMD_RUN, &ehci->regs->command); + if (ktime_us_delta(ktime_get(), mehci->resume_start_t) > + RESUME_SIGNAL_TIME_SOF_USEC) { + dbg_log_event(NULL, "FPR: Tightloop fail", 0); + if (++tight_count > 3) { + pr_err("HSIC resume failed\n"); + mehci->resume_status = -ENODEV; + goto exit; + } + pr_err("FPR tight loop fail %d\n", tight_count); + mehci->resume_again = 1; + } else { + dbg_log_event(NULL, "FPR: Tightloop done", 0); + } } if (mehci->resume_again) { @@ -1668,10 +1701,16 @@ while (!kthread_should_stop()) { dbg_log_event(NULL, "FPR: Re-Resume", retry_cnt); pr_info("FPR: retry count: %d\n", retry_cnt); spin_unlock_irq(&ehci->lock); - temp = ehci_readl(ehci, &ehci->regs->port_status[0]); + temp = readl_relaxed(&ehci->regs->command); + if (temp & CMD_RUN) { + temp &= ~CMD_RUN; + writel_relaxed(temp, &ehci->regs->command); + dbg_log_event(NULL, "FPR: R/S cleared", 0); + } + temp = readl_relaxed(&ehci->regs->port_status[0]); temp &= ~PORT_RWC_BITS; temp |= PORT_SUSPEND; - ehci_writel(ehci, temp, &ehci->regs->port_status[0]); + writel_relaxed(temp, &ehci->regs->port_status[0]); usleep_range(5000, 5000); dbg_log_event(NULL, "FPR: RResume", @@ -1686,11 +1725,13 @@ while (!kthread_should_stop()) { ehci->command |= CMD_RUN; dbg_log_event(NULL, "FPR: RT-Done", 0); mehci->resume_status = 1; +exit: spin_unlock_irq(&ehci->lock); - complete(&mehci->rt_completion); + if (next_latency) + pm_qos_update_request(&mehci->pm_qos_req_dma, next_latency); + -sleep_itself: __set_current_state(TASK_UNINTERRUPTIBLE); schedule(); __set_current_state(TASK_RUNNING); @@ -2206,8 +2247,8 @@ static int __devinit ehci_hsic_msm_probe(struct platform_device *pdev) mehci->ehci.max_log2_irq_thresh = 6; -#if defined(CONFIG_MACH_DUMMY) - mehci->ehci.max_log2_irq_thresh = 5; +#if defined(CONFIG_MACH_M7_WLV) + mehci->ehci.max_log2_irq_thresh = 5; #endif INIT_WORK(&hcd->ssr_work,do_restart); @@ -2512,7 +2553,7 @@ static int msm_hsic_pm_resume(struct device *dev) #ifdef CONFIG_PM_RUNTIME static int msm_hsic_runtime_idle(struct device *dev) { - dev_info(dev, "EHCI runtime idle\n"); + //dev_info(dev, "EHCI runtime idle\n"); return 0; } @@ -2521,7 +2562,7 @@ static int msm_hsic_runtime_suspend(struct device *dev) struct usb_hcd *hcd = dev_get_drvdata(dev); struct msm_hsic_hcd *mehci = hcd_to_hsic(hcd); - dev_info(dev, "EHCI runtime suspend\n"); + //dev_info(dev, "EHCI runtime suspend\n"); dbg_log_event(NULL, "Run Time PM Suspend", 0); @@ -2533,7 +2574,7 @@ static int msm_hsic_runtime_resume(struct device *dev) struct usb_hcd *hcd = dev_get_drvdata(dev); struct msm_hsic_hcd *mehci = hcd_to_hsic(hcd); - dev_info(dev, "EHCI runtime resume\n"); + //dev_info(dev, "EHCI runtime resume\n"); dbg_log_event(NULL, "Run Time PM Resume", 0); diff --git a/drivers/video/minifb.c b/drivers/video/minifb.c index f1485ec2..368a833f 100644 --- a/drivers/video/minifb.c +++ b/drivers/video/minifb.c @@ -332,7 +332,6 @@ int minifb_lockbuf(void **vaddr, unsigned long *ptr_size, int repeat) int ret = 0; struct minifb_data *node; struct minifb_ctrl *fbctrl = get_ctrl(); - unsigned long ionflag; if (!fbctrl) { @@ -356,13 +355,7 @@ int minifb_lockbuf(void **vaddr, unsigned long *ptr_size, int repeat) pr_debug("%s: lock frame#%d from fd%d\n", __func__, node->info, node->buf_info.memory_id); - ret = ion_handle_get_flags(fbctrl->iclient, node->ionhdl, &ionflag); - if (ret) { - pr_err("%s: Failed to get ION flag, client %p, handle %p\n", - __func__, fbctrl->iclient, node->ionhdl); - } - - *vaddr = ion_map_kernel(fbctrl->iclient, node->ionhdl, ionflag); + *vaddr = ion_map_kernel(fbctrl->iclient, node->ionhdl); ion_handle_get_size(fbctrl->iclient, node->ionhdl, ptr_size); fbctrl->lock_cnt++; } else if (repeat && fbctrl->retired) { @@ -372,13 +365,7 @@ int minifb_lockbuf(void **vaddr, unsigned long *ptr_size, int repeat) pr_debug("%s: lock frame#%d from retired fd%d\n", __func__, node->info, node->buf_info.memory_id); - ret = ion_handle_get_flags(fbctrl->iclient, node->ionhdl, &ionflag); - if (ret) { - pr_err("%s: Failed to get ION flag, client %p, handle %p\n", - __func__, fbctrl->iclient, node->ionhdl); - } - - *vaddr = ion_map_kernel(fbctrl->iclient, node->ionhdl, ionflag); + *vaddr = ion_map_kernel(fbctrl->iclient, node->ionhdl); ion_handle_get_size(fbctrl->iclient, node->ionhdl, ptr_size); fbctrl->lock_cnt++; } else { diff --git a/drivers/video/msm/Kconfig b/drivers/video/msm/Kconfig index e164399d..fc941d16 100644 --- a/drivers/video/msm/Kconfig +++ b/drivers/video/msm/Kconfig @@ -1041,7 +1041,8 @@ config FB_MSM_CABC_LEVEL_CONTROL config FB_MSM_UNDERFLOW_WORKAROUND bool "Display underflow workaround" - default y + default y if !TRACING + default n ---help--- Force underflow color to black to avoid blue splash. Say Y here after CRC stage. diff --git a/drivers/video/msm/hdmi_msm.c b/drivers/video/msm/hdmi_msm.c index 41a621b8..6c13b110 100644 --- a/drivers/video/msm/hdmi_msm.c +++ b/drivers/video/msm/hdmi_msm.c @@ -1634,8 +1634,7 @@ static int hdmi_msm_read_edid_block(int block, uint8 *edid_buf) static int hdmi_msm_read_edid(void) { int status; - int stable_count = 3; - int timeout_count = 100; + msm_hdmi_init_ddc(); if (!hdmi_msm_is_power_on()) { DEV_ERR("%s: failed: HDMI power is off", __func__); @@ -1644,17 +1643,6 @@ static int hdmi_msm_read_edid(void) } external_common_state->read_edid_block = hdmi_msm_read_edid_block; - - do{ - if(((HDMI_INP(0x0250) & BIT(1)) >> 1) == 1){ - - stable_count--; - }else{ - stable_count = 3; - } - timeout_count--; - mdelay(30); - }while(stable_count && timeout_count); status = hdmi_common_read_edid(); if (!status) DEV_DBG("EDID: successfully read\n"); diff --git a/drivers/video/msm/mdp4_overlay_dsi_video.c b/drivers/video/msm/mdp4_overlay_dsi_video.c index 81781da6..83230ba3 100644 --- a/drivers/video/msm/mdp4_overlay_dsi_video.c +++ b/drivers/video/msm/mdp4_overlay_dsi_video.c @@ -363,8 +363,7 @@ static void mdp4_dsi_video_wait4dmap(int cndx) if (atomic_read(&vctrl->suspend) > 0) return; - if (!wait_for_completion_timeout(&vctrl->dmap_comp, HZ / 5)) - pr_err("wait4dmap timedout!\n"); + wait_for_completion(&vctrl->dmap_comp); } @@ -400,8 +399,7 @@ static void mdp4_dsi_video_wait4ov(int cndx) if (atomic_read(&vctrl->suspend) > 0) return; - if (!wait_for_completion_timeout(&vctrl->ov_comp, HZ /5)) - pr_err("wait4ov timedout!\n"); + wait_for_completion(&vctrl->ov_comp); } ssize_t mdp4_dsi_video_show_event(struct device *dev, diff --git a/drivers/video/msm/mdp4_overlay_writeback.c b/drivers/video/msm/mdp4_overlay_writeback.c index a43d948f..c3af847c 100644 --- a/drivers/video/msm/mdp4_overlay_writeback.c +++ b/drivers/video/msm/mdp4_overlay_writeback.c @@ -223,6 +223,7 @@ int mdp4_overlay_writeback_off(struct platform_device *pdev) outpdw(MDP_BASE + 0x100F4, 0x0); mdp_clk_ctrl(0); + flush_work(&vctrl->clk_work); pr_debug("%s-:\n", __func__); return ret; } @@ -495,6 +496,9 @@ void mdp4_writeback_overlay(struct msm_fb_data_type *mfd) struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; + if (!mfd) + return; + if (mfd && !mfd->panel_power_on) return; @@ -773,6 +777,9 @@ static int mdp4_wfd_dequeue_update(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe; struct msmfb_writeback_data_list *node = NULL; + if (!mfd) + return -EINVAL; + if (mfd && !mfd->panel_power_on) return -EPERM; @@ -826,6 +833,8 @@ static int mdp4_wfd_dequeue_update(struct msm_fb_data_type *mfd, static void mdp4_wfd_queue_wakeup(struct msm_fb_data_type *mfd, struct msmfb_writeback_data_list *node) { + if (!mfd) + return; if (mfd && !mfd->panel_power_on) return; @@ -961,8 +970,15 @@ void mdp4_writeback_play_kickoff( struct mdp4_overlay_pipe *writeback_pipe; unsigned long flags; + if (!mfd) { + pr_err("%s: mfd is NULL\n", __func__); + mdp_clk_ctrl(0); + return; + } + if (mfd && !mfd->panel_power_on) { pr_err("%s: panel power is not on\n", __func__); + mdp_clk_ctrl(0); return; } @@ -978,6 +994,7 @@ void mdp4_writeback_play_kickoff( if (!writeback_pipe->ov_blt_addr) { pr_err("%s: no writeback buffer\n", __func__); + mdp_clk_ctrl(0); return; } @@ -987,6 +1004,7 @@ void mdp4_writeback_play_kickoff( pr_debug("%s: pid=%d\n", __func__, current->pid); mdp4_mixer_stage_commit(ov_pipe->mixer_num); + flush_work(&vctrl->clk_work); spin_lock_irqsave(&vctrl->spin_lock, flags); vctrl->ov_koff++; diff --git a/drivers/video/msm/mdp4_util.c b/drivers/video/msm/mdp4_util.c index f97b0c2c..a3b18b7d 100644 --- a/drivers/video/msm/mdp4_util.c +++ b/drivers/video/msm/mdp4_util.c @@ -2298,8 +2298,10 @@ u32 mdp4_allocate_writeback_buf(struct msm_fb_data_type *mfd, u32 mix_num) if (!IS_ERR_OR_NULL(mfd->iclient)) { pr_info("%s:%d ion based allocation mfd->mem_hid 0x%x\n", __func__, __LINE__, mfd->mem_hid); + buf->ihdl = ion_alloc(mfd->iclient, buffer_size, SZ_4K, - mfd->mem_hid); + (mfd->mem_hid & ~ION_SECURE), (mfd->mem_hid & ION_SECURE)); + if (!IS_ERR_OR_NULL(buf->ihdl)) { if (mdp_iommu_split_domain) { if (ion_map_iommu(mfd->iclient, buf->ihdl, diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c index 040cd497..c8c1c4f5 100644 --- a/drivers/video/msm/msm_fb.c +++ b/drivers/video/msm/msm_fb.c @@ -4723,7 +4723,7 @@ static int msm_fb_ioctl(struct fb_info *info, unsigned int cmd, usb_pjt_client, usb_pjt_handle[i], tmp_info.latest_offset); break; } - virt_addr[i] = ion_map_kernel(usb_pjt_client, usb_pjt_handle[i], ionflag); + virt_addr[i] = ion_map_kernel(usb_pjt_client, usb_pjt_handle[i]); mem_fd[i] = tmp_info.latest_offset; usb_pjt_info.latest_offset = tmp_info.latest_offset; MSM_FB_INFO("%s: fd = %d, virt %p\n", __func__, mem_fd[i], virt_addr[i]); diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.c index 708ea010..688f0c24 100644 --- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.c +++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.c @@ -471,14 +471,12 @@ u32 ddl_encode_frame(u32 *ddl_handle, struct ddl_encoder_data *encoder = &ddl->codec_data.encoder; u32 vcd_status = VCD_S_SUCCESS; - struct vcd_transc *transc; - /* HTC_START (klockwork issue)*/ + if (!ddl) { DDL_MSG_ERROR("ddl_enc_frame:Bad_handle"); return VCD_ERR_BAD_HANDLE; } - /* HTC_END */ - transc = (struct vcd_transc *)(ddl->client_data); + DDL_MSG_LOW("%s: transc = 0x%x", __func__, (u32)ddl->client_data); if (encoder->slice_delivery_info.enable) { return ddl_encode_frame_batch(ddl_handle, diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_errors.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_errors.c index 7c0fd301..9754cebc 100644 --- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_errors.c +++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_errors.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -234,7 +234,6 @@ static u32 ddl_handle_core_recoverable_errors( case VIDC_1080P_ERROR_MV_RANGE_ERR: case VIDC_1080P_ERROR_PICTURE_STRUCTURE_ERR: case VIDC_1080P_ERROR_SLICE_ADDR_INVALID: - case VIDC_1080P_ERROR_NON_FRAME_DATA_RECEIVED: case VIDC_1080P_ERROR_NALU_HEADER_ERROR: case VIDC_1080P_ERROR_SPS_PARSE_ERROR: case VIDC_1080P_ERROR_PPS_PARSE_ERROR: @@ -253,6 +252,9 @@ static u32 ddl_handle_core_recoverable_errors( DDL_MSG_ERROR("VIDC_BIT_STREAM_ERR"); } break; + case VIDC_1080P_ERROR_NON_FRAME_DATA_RECEIVED: + vcd_status = VCD_ERR_BITSTREAM_ERR; + break; default: break; } @@ -364,7 +366,7 @@ u32 ddl_handle_core_errors(struct ddl_context *ddl_context) disp_status = ddl_handle_core_warnings( ddl_context->disp_pic_err_status); if (!status && !disp_status) { - DDL_MSG_ERROR("ddl_warning:Unknown"); + DDL_MSG_HIGH("ddl_warning:Unknown"); status = ddl_handle_hw_fatal_errors(ddl); if (!status) status = ddl_handle_core_recoverable_errors(ddl); diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c index bda0bee8..3a3b9c63 100644 --- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c +++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -10,7 +10,7 @@ * GNU General Public License for more details. * */ -#include +#include #include #include "vcd_ddl.h" #include "vcd_ddl_shared_mem.h" @@ -172,22 +172,16 @@ u32 ddl_decoder_dpb_transact(struct ddl_decoder_data *decoder, (in_out_frame->vcd_frm.buff_ion_handle)) { struct ddl_context *ddl_context = ddl_get_context(); - unsigned long *vaddr = - (unsigned long *)((u32) - in_out_frame->vcd_frm.virtual + - decoder->meta_data_offset); - DDL_MSG_LOW("%s: Cache clean: vaddr"\ - " (%p), offset %u, size %u", - __func__, - in_out_frame->vcd_frm.virtual, - decoder->meta_data_offset, - decoder->suffix); + DDL_MSG_LOW("%s: Cache clean: size %u", + __func__, in_out_frame->vcd_frm. + alloc_len); msm_ion_do_cache_op( ddl_context->video_ion_client, in_out_frame->vcd_frm.\ buff_ion_handle, - vaddr, - (unsigned long)decoder->suffix, + NULL, + (unsigned long)in_out_frame-> + vcd_frm.alloc_len, ION_IOC_CLEAN_CACHES); } } @@ -258,46 +252,60 @@ u32 ddl_decoder_dpb_init(struct ddl_client_context *ddl) struct ddl_context *ddl_context = ddl->ddl_context; struct ddl_decoder_data *decoder = &ddl->codec_data.decoder; struct ddl_dec_buffers *dec_buffers = &decoder->hw_bufs; - struct ddl_frame_data_tag *frame; + struct vcd_frame_data *vcd_frm; u32 luma[DDL_MAX_BUFFER_COUNT], chroma[DDL_MAX_BUFFER_COUNT]; u32 mv[DDL_MAX_BUFFER_COUNT], luma_size, i, dpb; - frame = &decoder->dp_buf.dec_pic_buffers[0]; luma_size = ddl_get_yuv_buf_size(decoder->frame_size.width, decoder->frame_size.height, DDL_YUV_BUF_TYPE_TILE); dpb = decoder->dp_buf.no_of_dec_pic_buf; - DDL_MSG_LOW("%s Decoder num DPB buffers = %u Luma Size = %u" + DDL_MSG_LOW("%s: Decoder num DPB buffers = %u Luma Size = %u", __func__, dpb, luma_size); if (dpb > DDL_MAX_BUFFER_COUNT) dpb = DDL_MAX_BUFFER_COUNT; for (i = 0; i < dpb; i++) { - if (!(res_trk_check_for_sec_session()) && - frame[i].vcd_frm.virtual) { - if (luma_size <= frame[i].vcd_frm.alloc_len) { - memset(frame[i].vcd_frm.virtual, - 0x10101010, luma_size); - memset(frame[i].vcd_frm.virtual + luma_size, - 0x80808080, - frame[i].vcd_frm.alloc_len - luma_size); - if (frame[i].vcd_frm.ion_flag == CACHED) { - msm_ion_do_cache_op( - ddl_context->video_ion_client, - frame[i].vcd_frm.buff_ion_handle, - (unsigned long *)frame[i]. - vcd_frm.virtual, - (unsigned long)frame[i]. - vcd_frm.alloc_len, - ION_IOC_CLEAN_INV_CACHES); + vcd_frm = &decoder->dp_buf.dec_pic_buffers[i].vcd_frm; + if (!res_trk_check_for_sec_session()) { + u8 *kernel_vaddr = NULL; + if (luma_size <= vcd_frm->alloc_len) { + kernel_vaddr = (u8 *)ion_map_kernel( + ddl_context->video_ion_client, + vcd_frm->buff_ion_handle); + if (IS_ERR_OR_NULL(kernel_vaddr)) { + DDL_MSG_ERROR("%s(): ION_MAP for "\ + "DPB[%u] failed\n", __func__, i); + } else { + memset(kernel_vaddr, 0x10101010, + luma_size); + memset(kernel_vaddr + luma_size, + 0x80808080, + vcd_frm->alloc_len - luma_size); + if (vcd_frm->ion_flag == + ION_FLAG_CACHED) { + msm_ion_do_cache_op( + ddl_context->video_ion_client, + vcd_frm->buff_ion_handle, + (unsigned long *)kernel_vaddr, + (unsigned long)vcd_frm-> + alloc_len, + ION_IOC_CLEAN_INV_CACHES); + } + ion_unmap_kernel( + ddl_context->video_ion_client, + vcd_frm->buff_ion_handle); + kernel_vaddr = NULL; } } else { - DDL_MSG_ERROR("luma size error"); + DDL_MSG_ERROR("%s: err: luma_size (%u), "\ + "alloc_len (%u)", __func__, + luma_size, vcd_frm->alloc_len); return VCD_ERR_FAIL; } } luma[i] = DDL_OFFSET(ddl_context->dram_base_a. - align_physical_addr, frame[i].vcd_frm.physical); + align_physical_addr, vcd_frm->physical); chroma[i] = luma[i] + luma_size; - DDL_MSG_LOW("%s Decoder Luma address = %x Chroma address = %x" + DDL_MSG_LOW("%s: Decoder Luma address = %x Chroma address = %x", __func__, luma[i], chroma[i]); } switch (decoder->codec.codec) { @@ -502,6 +510,8 @@ u32 ddl_get_yuv_buf_size(u32 width, u32 height, u32 format) width_round_up = width; height_round_up = height; + align = SZ_4K; + if (format == DDL_YUV_BUF_TYPE_TILE) { width_round_up = DDL_ALIGN(width, DDL_TILE_ALIGN_WIDTH); height_round_up = DDL_ALIGN(height, DDL_TILE_ALIGN_HEIGHT); @@ -615,17 +625,23 @@ void ddl_calc_dec_hw_buffers_size(enum vcd_codec codec, u32 width, (codec == VCD_CODEC_DIVX_6) || (codec == VCD_CODEC_XVID) || (codec == VCD_CODEC_H263)) { + u32 val = DDL_MAX(DDL_MAX_FRAME_WIDTH, + DDL_MAX_FRAME_HEIGHT); + sz_sub_anchor_mv = DDL_ALIGN(((val >> 4) * 128 * 2 * 8), + DDL_LINEAR_BUFFER_ALIGN_BYTES); sz_nb_dcac = DDL_KILO_BYTE(16); sz_upnb_mv = DDL_KILO_BYTE(68); - sz_sub_anchor_mv = DDL_KILO_BYTE(136); sz_overlap_xform = DDL_KILO_BYTE(32); if (codec != VCD_CODEC_H263) sz_stx_parser = DDL_KILO_BYTE(68); } else if ((codec == VCD_CODEC_VC1) || (codec == VCD_CODEC_VC1_RCV)) { + u32 val = DDL_MAX(DDL_MAX_FRAME_WIDTH, + DDL_MAX_FRAME_HEIGHT); + sz_sub_anchor_mv = DDL_ALIGN(((val >> 4) * 128 * 2 * 8), + DDL_LINEAR_BUFFER_ALIGN_BYTES); sz_nb_dcac = DDL_KILO_BYTE(16); sz_upnb_mv = DDL_KILO_BYTE(68); - sz_sub_anchor_mv = DDL_KILO_BYTE(136); sz_overlap_xform = DDL_KILO_BYTE(32); sz_bit_plane3 = DDL_KILO_BYTE(2); sz_bit_plane2 = DDL_KILO_BYTE(2); @@ -996,7 +1012,7 @@ void ddl_decoder_chroma_dpb_change(struct ddl_client_context *ddl) u32 luma_size, i, dpb; luma_size = decoder->dpb_buf_size.size_y; dpb = decoder->dp_buf.no_of_dec_pic_buf; - DDL_MSG_HIGH("%s Decoder num DPB buffers = %u Luma Size = %u" + DDL_MSG_HIGH("%s Decoder num DPB buffers = %u Luma Size = %u", __func__, dpb, luma_size); if (dpb > DDL_MAX_BUFFER_COUNT) dpb = DDL_MAX_BUFFER_COUNT; diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c index 644fa957..261b092f 100644 --- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c +++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -274,8 +274,8 @@ static u32 ddl_decoder_seq_done_callback(struct ddl_context *ddl_context, } vidc_sm_get_profile_info(&ddl->shared_mem [ddl->command_channel], &disp_profile_info); - disp_profile_info.pic_profile = seq_hdr_info.profile; - disp_profile_info.pic_level = seq_hdr_info.level; + seq_hdr_info.profile = disp_profile_info.pic_profile; + seq_hdr_info.level = disp_profile_info.pic_level; ddl_get_dec_profile_level(decoder, seq_hdr_info.profile, seq_hdr_info.level); switch (decoder->codec.codec) { @@ -410,6 +410,10 @@ static u32 ddl_decoder_seq_done_callback(struct ddl_context *ddl_context, seq_hdr_info.dec_frm_size); } } + DDL_MSG_INFO("profile %u level %u progressive %u", + decoder->profile.profile, + decoder->level.level, + decoder->progressive_only); if (need_reconfig) { struct ddl_frame_data_tag *payload = &ddl->input_frame; @@ -1311,6 +1315,7 @@ static u32 ddl_decoder_output_done_callback( DDL_MSG_LOW("%s y_cb_cr_size = %u " "actual_output_buf_req.sz = %u" "min_output_buf_req.sz = %u\n", + __func__, decoder->y_cb_cr_size, decoder->actual_output_buf_req.sz, decoder->min_output_buf_req.sz); @@ -1758,7 +1763,7 @@ static void ddl_handle_enc_frame_done(struct ddl_client_context *ddl, if (!IS_ERR_OR_NULL(output_frame->buff_ion_handle)) { msm_ion_do_cache_op(ddl_context->video_ion_client, output_frame->buff_ion_handle, - (unsigned long *) output_frame->virtual, + (unsigned long *)NULL, (unsigned long) output_frame->alloc_len, ION_IOC_INV_CACHES); } @@ -1799,8 +1804,10 @@ static void ddl_handle_slice_done_slice_batch(struct ddl_client_context *ddl) slice_output = (struct vidc_1080p_enc_slice_batch_out_param *) (encoder->batch_frame.slice_batch_out.align_virtual_addr); DDL_MSG_LOW(" after get no of slices = %d\n", num_slices_comp); - if (slice_output == NULL) + if (slice_output == NULL) { DDL_MSG_ERROR(" slice_output is NULL\n"); + return; + } encoder->slice_delivery_info.num_slices_enc += num_slices_comp; if (vidc_msg_timing) { ddl_calc_core_proc_time_cnt(__func__, ENC_SLICE_OP_TIME, @@ -1824,11 +1831,11 @@ static void ddl_handle_slice_done_slice_batch(struct ddl_client_context *ddl) stream_buffer_size); output_frame = &( encoder->batch_frame.output_frame[actual_idx].vcd_frm); - DDL_MSG_LOW("OutBfr: vcd_frm 0x%x frmbfr(virtual) 0x%x" + DDL_MSG_LOW("OutBfr: vcd_frm %p frmbfr(virtual) 0x%x" "frmbfr(physical) 0x%x\n", - &output_frame, - output_frame.virtual_base_addr, - output_frame.physical_base_addr); + output_frame, + (u32)output_frame->virtual, + (u32)output_frame->physical); vidc_1080p_get_encode_frame_info(&encoder->enc_frame_info); vidc_sm_get_frame_tags(&ddl->shared_mem [ddl->command_channel], @@ -1909,14 +1916,14 @@ static u32 ddl_handle_enc_frame_done_slice_mode( DDL_MSG_LOW("Slice Info: OutBfrIndex %d SliceSize %d", actual_idx, slice_output->slice_info[start_bfr_idx+index]. \ - stream_buffer_size, 0); + stream_buffer_size); output_frame = &(encoder->batch_frame.output_frame[actual_idx].vcd_frm); - DDL_MSG_LOW("OutBfr: vcd_frm 0x%x frmbfr(virtual) 0x%x" + DDL_MSG_LOW("OutBfr: vcd_frm %p frmbfr(virtual) 0x%x" "frmbfr(physical) 0x%x", - &output_frame, - output_frame.virtual_base_addr, - output_frame.physical_base_addr); + output_frame, + (u32)output_frame->virtual, + (u32)output_frame->physical); vidc_1080p_get_encode_frame_info( &encoder->enc_frame_info); vidc_sm_get_frame_tags(&ddl->shared_mem diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_metadata.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_metadata.c index f70c47cd..2cf59b23 100644 --- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_metadata.c +++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_metadata.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2010, 2012, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -495,8 +495,8 @@ void ddl_process_encoder_metadata(struct ddl_client_context *ddl) struct ddl_encoder_data *encoder = &(ddl->codec_data.encoder); struct vcd_frame_data *out_frame = &(ddl->output_frame.vcd_frm); - u32 *qfiller_hdr, *qfiller, start_addr; - u32 qfiller_size; + out_frame->metadata_offset = 0; + out_frame->metadata_len = 0; if (!encoder->meta_data_enable_flag) { out_frame->flags &= ~(VCD_FRAME_FLAG_EXTRADATA); return; @@ -505,19 +505,11 @@ void ddl_process_encoder_metadata(struct ddl_client_context *ddl) out_frame->flags &= ~(VCD_FRAME_FLAG_EXTRADATA); return; } + DDL_MSG_LOW("%s: data_len/metadata_offset : %d/%d", __func__, + out_frame->data_len, encoder->meta_data_offset); + out_frame->metadata_offset = encoder->meta_data_offset; + out_frame->metadata_len = encoder->suffix; out_frame->flags |= VCD_FRAME_FLAG_EXTRADATA; - DDL_MSG_LOW("processing metadata for encoder"); - start_addr = (u32) ((u8 *)out_frame->virtual + out_frame->offset); - qfiller = (u32 *)((out_frame->data_len + - start_addr + 3) & ~3); - qfiller_size = (u32)((encoder->meta_data_offset + - (u8 *) out_frame->virtual) - (u8 *) qfiller); - qfiller_hdr = ddl_metadata_hdr_entry(ddl, VCD_METADATA_QCOMFILLER); - *qfiller++ = qfiller_size; - *qfiller++ = qfiller_hdr[DDL_METADATA_HDR_VERSION_INDEX]; - *qfiller++ = qfiller_hdr[DDL_METADATA_HDR_PORT_INDEX]; - *qfiller++ = qfiller_hdr[DDL_METADATA_HDR_TYPE_INDEX]; - *qfiller = (u32)(qfiller_size - DDL_METADATA_HDR_SIZE); } void ddl_process_decoder_metadata(struct ddl_client_context *ddl) @@ -525,9 +517,8 @@ void ddl_process_decoder_metadata(struct ddl_client_context *ddl) struct ddl_decoder_data *decoder = &(ddl->codec_data.decoder); struct vcd_frame_data *output_frame = &(ddl->output_frame.vcd_frm); - u32 *qfiller_hdr, *qfiller; - u32 qfiller_size; - + output_frame->metadata_offset = 0; + output_frame->metadata_len = 0; if (!decoder->meta_data_enable_flag) { output_frame->flags &= ~(VCD_FRAME_FLAG_EXTRADATA); return; @@ -541,25 +532,11 @@ void ddl_process_decoder_metadata(struct ddl_client_context *ddl) output_frame->flags &= ~(VCD_FRAME_FLAG_EXTRADATA); return; } - DDL_MSG_LOW("processing metadata for decoder"); - DDL_MSG_LOW("data_len/metadata_offset : %d/%d", + DDL_MSG_LOW("%s: data_len/metadata_offset : %d/%d", __func__, output_frame->data_len, decoder->meta_data_offset); + output_frame->metadata_offset = decoder->meta_data_offset; + output_frame->metadata_len = decoder->suffix; output_frame->flags |= VCD_FRAME_FLAG_EXTRADATA; - if (output_frame->data_len != decoder->meta_data_offset) { - qfiller = (u32 *)((u32)((output_frame->data_len + - output_frame->offset + - (u8 *) output_frame->virtual) + 3) & ~3); - qfiller_size = (u32)((decoder->meta_data_offset + - (u8 *) output_frame->virtual) - - (u8 *) qfiller); - qfiller_hdr = ddl_metadata_hdr_entry(ddl, - VCD_METADATA_QCOMFILLER); - *qfiller++ = qfiller_size; - *qfiller++ = qfiller_hdr[DDL_METADATA_HDR_VERSION_INDEX]; - *qfiller++ = qfiller_hdr[DDL_METADATA_HDR_PORT_INDEX]; - *qfiller++ = qfiller_hdr[DDL_METADATA_HDR_TYPE_INDEX]; - *qfiller = (u32)(qfiller_size - DDL_METADATA_HDR_SIZE); - } } void ddl_set_mp2_dump_default(struct ddl_decoder_data *decoder, u32 flag) diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_properties.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_properties.c index bd25957c..97adb6be 100644 --- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_properties.c +++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_properties.c @@ -915,8 +915,8 @@ static u32 ddl_set_enc_property(struct ddl_client_context *ddl, align_virtual_addr = recon_buffers->kernel_virtual_addr + recon_buffers->ysize; - DDL_MSG_LOW("Y::KVirt: %p,KPhys: %p" - "UV::KVirt: %p,KPhys: %p\n", + DDL_MSG_LOW("Y::KVirt: %p, KPhys: %p "\ + "UV::KVirt: %p, KPhys: %p\n", encoder->hw_bufs.dpb_y[index_hw_bufs]. align_virtual_addr, encoder->hw_bufs.dpb_y[index_hw_bufs]. @@ -926,7 +926,7 @@ static u32 ddl_set_enc_property(struct ddl_client_context *ddl, encoder->hw_bufs.dpb_c[index_hw_bufs]. align_physical_addr); vcd_status = VCD_S_SUCCESS; - } + } } } break; @@ -981,6 +981,7 @@ static u32 ddl_set_enc_property(struct ddl_client_context *ddl, u32 num_mb, num_slices; struct vcd_property_hdr slice_property_hdr; struct vcd_property_meta_data_enable slice_meta_data; + slice_meta_data.meta_data_enable_flag = 0; DDL_MSG_HIGH("Set property VCD_I_SLICE_DELIVERY_MODE\n"); if (sizeof(u32) == property_hdr->sz && encoder->codec.codec == VCD_CODEC_H264 && @@ -1935,6 +1936,7 @@ u32 ddl_set_default_decoder_buffer_req(struct ddl_decoder_data *decoder, struct vcd_buffer_requirement *input_buf_req; struct vcd_buffer_requirement *output_buf_req; u32 min_dpb, y_cb_cr_size; + u32 frame_height_actual = 0; if (!decoder->codec.codec) return false; @@ -1958,6 +1960,7 @@ u32 ddl_set_default_decoder_buffer_req(struct ddl_decoder_data *decoder, if ((decoder->buf_format.buffer_format == VCD_BUFFER_FORMAT_TILE_4x2) && (frame_size->height < MDP_MIN_TILE_HEIGHT)) { + frame_height_actual = frame_size->height; frame_size->height = MDP_MIN_TILE_HEIGHT; ddl_calculate_stride(frame_size, !decoder->progressive_only); @@ -1996,6 +1999,10 @@ u32 ddl_set_default_decoder_buffer_req(struct ddl_decoder_data *decoder, input_buf_req->sz = (1024 * 1024 * 2); input_buf_req->align = DDL_LINEAR_BUFFER_ALIGN_BYTES; decoder->min_input_buf_req = *input_buf_req; + if (frame_height_actual) { + frame_size->height = frame_height_actual; + ddl_calculate_stride(frame_size, !decoder->progressive_only); + } return true; } diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_utils.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_utils.c index 2d64f5fa..e38f448c 100644 --- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_utils.c +++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_utils.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -71,7 +71,7 @@ void *ddl_pmem_alloc(struct ddl_buf_addr *addr, size_t sz, u32 alignment) alloc_size = (alloc_size+4095) & ~4095; addr->alloc_handle = ion_alloc( ddl_context->video_ion_client, alloc_size, SZ_4K, - res_trk_get_mem_type()); + res_trk_get_mem_type(), res_trk_get_ion_flags()); if (IS_ERR_OR_NULL(addr->alloc_handle)) { DDL_MSG_ERROR("%s() :DDL ION alloc failed\n", __func__); @@ -79,12 +79,12 @@ void *ddl_pmem_alloc(struct ddl_buf_addr *addr, size_t sz, u32 alignment) } if (res_trk_check_for_sec_session() || addr->mem_type == DDL_FW_MEM) - ionflag = UNCACHED; + ionflag = 0 ; else - ionflag = CACHED; + ionflag = ION_FLAG_CACHED; kernel_vaddr = (unsigned long *) ion_map_kernel( ddl_context->video_ion_client, - addr->alloc_handle, ionflag); + addr->alloc_handle); if (IS_ERR_OR_NULL(kernel_vaddr)) { DDL_MSG_ERROR("%s() :DDL ION map failed\n", __func__); @@ -111,7 +111,7 @@ void *ddl_pmem_alloc(struct ddl_buf_addr *addr, size_t sz, u32 alignment) 0, &iova, &buffer_size, - UNCACHED, 0); + 0 , 0); if (ret || !iova) { DDL_MSG_ERROR( "%s():DDL ION ion map iommu failed, ret = %d iova = 0x%lx\n", @@ -119,6 +119,11 @@ void *ddl_pmem_alloc(struct ddl_buf_addr *addr, size_t sz, u32 alignment) goto unmap_ion_alloc; } addr->alloced_phys_addr = (phys_addr_t) iova; + + msm_ion_do_cache_op(ddl_context->video_ion_client, + addr->alloc_handle, + addr->virtual_base_addr, + sz, ION_IOC_CLEAN_INV_CACHES); } if (!addr->alloced_phys_addr) { DDL_MSG_ERROR("%s():DDL ION client physical failed\n", diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_utils.h b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_utils.h index ef743f02..69569ef4 100644 --- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_utils.h +++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_utils.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -44,6 +44,7 @@ do { \ #define DDL_MSG_HIGH(x...) #endif +#define DDL_MSG_INFO(x...) printk(KERN_INFO "[VID] " x) #define DDL_MSG_ERROR(x...) printk(KERN_INFO "[VID] " x) #define DDL_MSG_FATAL(x...) printk(KERN_INFO "[VID] " x) diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_vidc.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_vidc.c index 34d3b39f..e457a3b6 100644 --- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_vidc.c +++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_vidc.c @@ -789,7 +789,8 @@ void ddl_vidc_encode_frame_run(struct ddl_client_context *ddl) struct vcd_frame_data *stream = &(ddl->output_frame.vcd_frm); struct vcd_frame_data *input_vcd_frm = &(ddl->input_frame.vcd_frm); - u32 dpb_addr_y[4], dpb_addr_c[4]; + u32 dpb_addr_y[VIDC_1080P_MAX_DEC_DPB]; + u32 dpb_addr_c[VIDC_1080P_MAX_DEC_DPB]; u32 index, y_addr, c_addr; DDL_MSG_LOW("%s\n", __func__); @@ -897,7 +898,8 @@ void ddl_vidc_encode_slice_batch_run(struct ddl_client_context *ddl) struct ddl_enc_buffers *enc_buffers = &(encoder->hw_bufs); struct vcd_frame_data *input_vcd_frm = &(ddl->input_frame.vcd_frm); - u32 dpb_addr_y[4], dpb_addr_c[4]; + u32 dpb_addr_y[VIDC_1080P_MAX_DEC_DPB]; + u32 dpb_addr_c[VIDC_1080P_MAX_DEC_DPB]; u32 index, y_addr, c_addr; u32 bitstream_size; struct vidc_1080p_enc_slice_batch_in_param *slice_batch_in = diff --git a/drivers/video/msm/vidc/1080p/ddl/vidc.h b/drivers/video/msm/vidc/1080p/ddl/vidc.h index 22fcd1c6..afcea68a 100644 --- a/drivers/video/msm/vidc/1080p/ddl/vidc.h +++ b/drivers/video/msm/vidc/1080p/ddl/vidc.h @@ -190,6 +190,7 @@ #define VIDC_1080P_ITLB_MISS_EXCEPTION_HANDLER 0x100 #define VIDC_1080P_DATA_PAGE_FAULT_EXCEPTION_HANDLER 0x200 #define VIDC_1080P_INST_PAGE_FAULT_EXCEPTION_HANDLER 0x400 +#define VIDC_1080P_MAX_DEC_DPB 19 #define VIDC_1080P_SLICE_BATCH_MAX_STRM_BFR 8 #define VIDC_1080P_SLICE_BATCH_IN_SIZE(idx) (4 * sizeof(u32) + \ idx * sizeof(u32)) diff --git a/drivers/video/msm/vidc/1080p/ddl/vidc_pix_cache.c b/drivers/video/msm/vidc/1080p/ddl/vidc_pix_cache.c index 68705251..cf623327 100644 --- a/drivers/video/msm/vidc/1080p/ddl/vidc_pix_cache.c +++ b/drivers/video/msm/vidc/1080p/ddl/vidc_pix_cache.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2010-2013, Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -15,8 +15,6 @@ #include "vidc_hwio.h" #include "vidc_pix_cache.h" - -#define VIDC_1080P_MAX_DEC_DPB 19 #define VIDC_TILE_MULTIPLY_FACTOR 8192 void vidc_pix_cache_sw_reset(void) @@ -214,7 +212,7 @@ void vidc_pix_cache_set_ram(u32 ram_select) VIDC_HWIO_IN(REG_261029, &dmi_cfg_reg); dmi_cfg_reg &= (~HWIO_REG_261029_DMI_RAM_SEL_BMSK); dmi_cfg_reg |= VIDC_SETFIELD(ram_select, - HWIO_REG_261029_AUTO_INC_EN_SHFT, + HWIO_REG_261029_DMI_RAM_SEL_SHFT, HWIO_REG_261029_DMI_RAM_SEL_BMSK); VIDC_HWIO_OUT(REG_261029, dmi_cfg_reg); } diff --git a/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c b/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c index 7d6cb22d..5d111620 100644 --- a/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c +++ b/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -69,7 +69,7 @@ static void *res_trk_pmem_map if (res_trk_get_enable_ion() && addr->alloc_handle) { kernel_vaddr = (unsigned long *) ion_map_kernel( ddl_context->video_ion_client, - addr->alloc_handle, UNCACHED); + addr->alloc_handle); if (IS_ERR_OR_NULL(kernel_vaddr)) { DDL_MSG_ERROR("%s():DDL ION client map failed\n", __func__); @@ -84,7 +84,7 @@ static void *res_trk_pmem_map 0, &iova, &buffer_size, - UNCACHED, 0); + 0 , 0); if (ret || !iova) { DDL_MSG_ERROR( "%s():DDL ION client iommu map failed, ret = %d iova = 0x%lx\n", @@ -160,9 +160,15 @@ static void *res_trk_pmem_map static void res_trk_pmem_free(struct ddl_buf_addr *addr) { struct ddl_context *ddl_context; + + if (!addr) { + DDL_MSG_ERROR("\n%s() NULL address", __func__); + return; + } + ddl_context = ddl_get_context(); if (ddl_context->video_ion_client) { - if (addr && addr->alloc_handle) { + if (addr->alloc_handle) { ion_free(ddl_context->video_ion_client, addr->alloc_handle); addr->alloc_handle = NULL; @@ -209,7 +215,8 @@ static int res_trk_pmem_alloc addr->alloc_handle = ion_alloc( ddl_context->video_ion_client, alloc_size, SZ_4K, - res_trk_get_mem_type()); + res_trk_get_mem_type(), + res_trk_get_ion_flags()); if (IS_ERR_OR_NULL(addr->alloc_handle)) { DDL_MSG_ERROR("%s() :DDL ION alloc failed\n", __func__); @@ -376,7 +383,6 @@ static u32 res_trk_sel_clk_rate(unsigned long hclk_rate) mutex_lock(&resource_context.lock); if (clk_set_rate(resource_context.vcodec_clk, hclk_rate)) { - VCDRES_MSG_ERROR("vidc hclk set rate failed\n"); status = false; } else resource_context.vcodec_clk_rate = hclk_rate; @@ -648,6 +654,8 @@ u32 res_trk_set_perf_level(u32 req_perf_lvl, u32 *pn_set_perf_lvl, __func__, vidc_freq); if (!res_trk_sel_clk_rate(vidc_freq)) { if (vidc_freq == vidc_clk_table[4]) { + VCDRES_MSG_MED("%s(): Setting vidc freq "\ + "to %u\n", __func__, (u32)vidc_clk_table[3]); if (res_trk_sel_clk_rate(vidc_clk_table[3])) goto ret; } @@ -840,10 +848,6 @@ int res_trk_get_mem_type(void) if (resource_context.vidc_platform_data->enable_ion) { if (res_trk_check_for_sec_session()) { mem_type = ION_HEAP(mem_type); - if (resource_context.res_mem_type != DDL_FW_MEM) - mem_type |= ION_SECURE; - else if (res_trk_is_cp_enabled()) - mem_type |= ION_SECURE; } else mem_type = (ION_HEAP(mem_type) | ION_HEAP(ION_IOMMU_HEAP_ID)); @@ -851,6 +855,25 @@ int res_trk_get_mem_type(void) return mem_type; } +unsigned int res_trk_get_ion_flags(void) +{ + unsigned int flags = 0; + if (resource_context.res_mem_type == DDL_FW_MEM) { + return flags; + } + + if (resource_context.vidc_platform_data->enable_ion) { + if (res_trk_check_for_sec_session()) { + if (resource_context.res_mem_type != DDL_FW_MEM) { + flags |= ION_SECURE; + } else if (res_trk_is_cp_enabled()) { + flags |= ION_SECURE; + } + } + } + return flags; +} + u32 res_trk_is_cp_enabled(void) { if (resource_context.vidc_platform_data->cp_enabled) @@ -1001,9 +1024,9 @@ int res_trk_open_secure_session() mutex_unlock(&resource_context.secure_lock); return 0; unsecure_cmd_heap: - msm_ion_unsecure_heap(ION_HEAP(resource_context.memtype)); -unsecure_memtype_heap: msm_ion_unsecure_heap(ION_HEAP(resource_context.cmd_mem_type)); +unsecure_memtype_heap: + msm_ion_unsecure_heap(ION_HEAP(resource_context.memtype)); disable_iommu_clks: res_trk_disable_iommu_clocks(); error_open: diff --git a/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.h b/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.h index 73a01f47..0b83c8ff 100644 --- a/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.h +++ b/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.h @@ -14,7 +14,7 @@ #define _VIDEO_720P_RESOURCE_TRACKER_H_ #include -#include +#include #include "vcd_res_tracker_api.h" #ifdef CONFIG_MSM_BUS_SCALING #include diff --git a/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker_api.h b/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker_api.h index 2ae25126..ee876f48 100644 --- a/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker_api.h +++ b/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker_api.h @@ -30,6 +30,7 @@ u32 res_trk_download_firmware(void); u32 res_trk_get_core_type(void); u32 res_trk_get_firmware_addr(struct ddl_buf_addr *firm_addr); int res_trk_get_mem_type(void); +unsigned int res_trk_get_ion_flags(void); u32 res_trk_get_enable_ion(void); u32 res_trk_is_cp_enabled(void); u32 res_trk_get_disable_fullhd(void); diff --git a/drivers/video/msm/vidc/common/dec/vdec.c b/drivers/video/msm/vidc/common/dec/vdec.c index 6363a67b..33982f9c 100644 --- a/drivers/video/msm/vidc/common/dec/vdec.c +++ b/drivers/video/msm/vidc/common/dec/vdec.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -280,6 +280,12 @@ static void vid_dec_output_frame_done(struct video_client_ctx *client_ctx, vdec_msg->vdec_msg_info.msgdata.output_frame.flags = vcd_frame_data->flags; + vdec_msg->vdec_msg_info.msgdata.output_frame.metadata_len = + (size_t)vcd_frame_data->metadata_len; + + vdec_msg->vdec_msg_info.msgdata.output_frame.metadata_offset = + (size_t)vcd_frame_data->metadata_offset; + vdec_msg->vdec_msg_info.msgdata.output_frame.time_stamp = vcd_frame_data->time_stamp; @@ -347,14 +353,12 @@ static void vid_dec_output_frame_done(struct video_client_ctx *client_ctx, ion_flag = vidc_get_fd_info(client_ctx, BUFFER_TYPE_OUTPUT, pmem_fd, kernel_vaddr, buffer_index, &buff_handle); - if (ion_flag == CACHED && buff_handle) { - DBG("%s: Cache invalidate: vaddr (%p), "\ - "size %u\n", __func__, - (void *)kernel_vaddr, + if (ion_flag == ION_FLAG_CACHED && buff_handle) { + DBG("%s: Cache invalidate: size %u", __func__, vcd_frame_data->alloc_len); msm_ion_do_cache_op(client_ctx->user_ion_client, buff_handle, - (unsigned long *) kernel_vaddr, + (unsigned long *) NULL, (unsigned long)vcd_frame_data->\ alloc_len, ION_IOC_INV_CACHES); @@ -386,49 +390,49 @@ static void vid_dec_lean_event(struct video_client_ctx *client_ctx, switch (event) { case VCD_EVT_IND_OUTPUT_RECONFIG: - INFO("msm_vidc_dec: Sending VDEC_MSG_EVT_CONFIG_CHANGED" + DBG("msm_vidc_dec: Sending VDEC_MSG_EVT_CONFIG_CHANGED" " to client"); vdec_msg->vdec_msg_info.msgcode = VDEC_MSG_EVT_CONFIG_CHANGED; break; case VCD_EVT_IND_RESOURCES_LOST: - INFO("msm_vidc_dec: Sending VDEC_EVT_RESOURCES_LOST" + DBG("msm_vidc_dec: Sending VDEC_EVT_RESOURCES_LOST" " to client"); vdec_msg->vdec_msg_info.msgcode = VDEC_EVT_RESOURCES_LOST; break; case VCD_EVT_RESP_FLUSH_INPUT_DONE: - INFO("msm_vidc_dec: Sending VDEC_MSG_RESP_FLUSH_INPUT_DONE" + DBG("msm_vidc_dec: Sending VDEC_MSG_RESP_FLUSH_INPUT_DONE" " to client"); vdec_msg->vdec_msg_info.msgcode = VDEC_MSG_RESP_FLUSH_INPUT_DONE; break; case VCD_EVT_RESP_FLUSH_OUTPUT_DONE: - INFO("msm_vidc_dec: Sending VDEC_MSG_RESP_FLUSH_OUTPUT_DONE" + DBG("msm_vidc_dec: Sending VDEC_MSG_RESP_FLUSH_OUTPUT_DONE" " to client"); vdec_msg->vdec_msg_info.msgcode = VDEC_MSG_RESP_FLUSH_OUTPUT_DONE; break; case VCD_EVT_IND_HWERRFATAL: - INFO("msm_vidc_dec: Sending VDEC_MSG_EVT_HW_ERROR" + DBG("msm_vidc_dec: Sending VDEC_MSG_EVT_HW_ERROR" " to client"); vdec_msg->vdec_msg_info.msgcode = VDEC_MSG_EVT_HW_ERROR; break; case VCD_EVT_RESP_START: - INFO("msm_vidc_dec: Sending VDEC_MSG_RESP_START_DONE" + DBG("msm_vidc_dec: Sending VDEC_MSG_RESP_START_DONE" " to client"); vdec_msg->vdec_msg_info.msgcode = VDEC_MSG_RESP_START_DONE; break; case VCD_EVT_RESP_STOP: - INFO("msm_vidc_dec: Sending VDEC_MSG_RESP_STOP_DONE" + DBG("msm_vidc_dec: Sending VDEC_MSG_RESP_STOP_DONE" " to client"); vdec_msg->vdec_msg_info.msgcode = VDEC_MSG_RESP_STOP_DONE; break; case VCD_EVT_RESP_PAUSE: - INFO("msm_vidc_dec: Sending VDEC_MSG_RESP_PAUSE_DONE" + DBG("msm_vidc_dec: Sending VDEC_MSG_RESP_PAUSE_DONE" " to client"); vdec_msg->vdec_msg_info.msgcode = VDEC_MSG_RESP_PAUSE_DONE; break; case VCD_EVT_IND_INFO_OUTPUT_RECONFIG: - INFO("msm_vidc_dec: Sending VDEC_MSG_EVT_INFO_CONFIG_CHANGED" + DBG("msm_vidc_dec: Sending VDEC_MSG_EVT_INFO_CONFIG_CHANGED" " to client"); vdec_msg->vdec_msg_info.msgcode = VDEC_MSG_EVT_INFO_CONFIG_CHANGED; @@ -867,6 +871,12 @@ static u32 vid_dec_set_h264_mv_buffers(struct video_client_ctx *client_ctx, vcd_h264_mv_buffer->pmem_fd = mv_data->pmem_fd; vcd_h264_mv_buffer->offset = mv_data->offset; + if (mv_data->count > MAX_MV_BUFFERS) { + ERR("MV buffers maximum count reached, count = %d", + mv_data->count); + return false; + } + if (!vcd_get_ion_status()) { if (get_pmem_file(vcd_h264_mv_buffer->pmem_fd, (unsigned long *) (&(vcd_h264_mv_buffer-> @@ -898,6 +908,10 @@ static u32 vid_dec_set_h264_mv_buffers(struct video_client_ctx *client_ctx, ERR("%s(): get_ION_handle failed\n", __func__); goto import_ion_error; } + if (!ion_phys(client_ctx->user_ion_client, client_ctx->h264_mv_ion_handle, &iova, &len)) { + printk("%s ION phys 0x%lx len %u\n", __func__, iova, len); + iova = 0; + } rc = ion_handle_get_flags(client_ctx->user_ion_client, client_ctx->h264_mv_ion_handle, &ionflag); @@ -908,8 +922,7 @@ static u32 vid_dec_set_h264_mv_buffers(struct video_client_ctx *client_ctx, } vcd_h264_mv_buffer->kernel_virtual_addr = (u8 *) ion_map_kernel( client_ctx->user_ion_client, - client_ctx->h264_mv_ion_handle, - ionflag); + client_ctx->h264_mv_ion_handle); if (!vcd_h264_mv_buffer->kernel_virtual_addr) { ERR("%s(): get_ION_kernel virtual addr failed\n", __func__); @@ -936,7 +949,7 @@ static u32 vid_dec_set_h264_mv_buffers(struct video_client_ctx *client_ctx, VIDEO_DOMAIN, VIDEO_MAIN_POOL, SZ_4K, 0, (unsigned long *)&iova, (unsigned long *)&buffer_size, - UNCACHED, 0); + 0 , 0); if (rc || !iova) { ERR( "%s():get_ION_kernel physical addr fail, rc = %d iova = 0x%lx\n", @@ -1172,11 +1185,11 @@ static u32 vid_dec_pause_resume(struct video_client_ctx *client_ctx, u32 pause) } if (pause) { - INFO("msm_vidc_dec: PAUSE command from client = %p\n", + DBG("msm_vidc_dec: PAUSE command from client = %p\n", client_ctx); vcd_status = vcd_pause(client_ctx->vcd_handle); } else{ - INFO("msm_vidc_dec: RESUME command from client = %p\n", + DBG("msm_vidc_dec: RESUME command from client = %p\n", client_ctx); vcd_status = vcd_resume(client_ctx->vcd_handle); } @@ -1193,7 +1206,7 @@ static u32 vid_dec_start_stop(struct video_client_ctx *client_ctx, u32 start) struct vid_dec_msg *vdec_msg = NULL; u32 vcd_status; - INFO("msm_vidc_dec: Inside %s()", __func__); + DBG("msm_vidc_dec: Inside %s()", __func__); if (!client_ctx) { ERR("\n Invalid client_ctx"); return false; @@ -1201,7 +1214,7 @@ static u32 vid_dec_start_stop(struct video_client_ctx *client_ctx, u32 start) if (start) { if (client_ctx->seq_header_set) { - INFO("%s(): Seq Hdr set: Send START_DONE to client", + DBG("%s(): Seq Hdr set: Send START_DONE to client", __func__); vdec_msg = kzalloc(sizeof(*vdec_msg), GFP_KERNEL); if (!vdec_msg) { @@ -1223,7 +1236,7 @@ static u32 vid_dec_start_stop(struct video_client_ctx *client_ctx, u32 start) client_ctx); } else { - INFO("%s(): Calling decode_start()", __func__); + DBG("%s(): Calling decode_start()", __func__); vcd_status = vcd_decode_start(client_ctx->vcd_handle, NULL); @@ -1261,17 +1274,20 @@ static u32 vid_dec_decode_frame(struct video_client_ctx *client_ctx, { struct vcd_frame_data vcd_input_buffer; unsigned long kernel_vaddr, phy_addr, user_vaddr; + struct buf_addr_table *buf_addr_table; int pmem_fd; struct file *file; s32 buffer_index = -1; u32 vcd_status = VCD_ERR_FAIL; u32 ion_flag = 0; + unsigned long buff_len; struct ion_handle *buff_handle = NULL; if (!client_ctx || !input_frame_info) return false; user_vaddr = (unsigned long)input_frame_info->bufferaddr; + buf_addr_table = client_ctx->input_buf_addr_table; if (vidc_lookup_addr_table(client_ctx, BUFFER_TYPE_INPUT, true, &user_vaddr, &kernel_vaddr, @@ -1281,6 +1297,17 @@ static u32 vid_dec_decode_frame(struct video_client_ctx *client_ctx, memset((void *)&vcd_input_buffer, 0, sizeof(struct vcd_frame_data)); + + buff_len = buf_addr_table[buffer_index].buff_len; + if ((input_frame_info->datalen > buff_len) || + (input_frame_info->offset > buff_len)) { + ERR("%s(): offset(%u) or data length(%u) is greater"\ + " than buffer length(%lu)\n",\ + __func__, input_frame_info->offset, + input_frame_info->datalen, buff_len); + return false; + } + vcd_input_buffer.virtual = (u8 *) (kernel_vaddr + input_frame_info->pmem_offset); vcd_input_buffer.offset = input_frame_info->offset; @@ -1301,10 +1328,10 @@ static u32 vid_dec_decode_frame(struct video_client_ctx *client_ctx, kernel_vaddr, buffer_index, &buff_handle); - if (ion_flag == CACHED && buff_handle) { + if (ion_flag == ION_FLAG_CACHED && buff_handle) { msm_ion_do_cache_op(client_ctx->user_ion_client, buff_handle, - (unsigned long *)kernel_vaddr, + (unsigned long *) NULL, (unsigned long) vcd_input_buffer.data_len, ION_IOC_CLEAN_CACHES); } @@ -1379,7 +1406,7 @@ static u32 vid_dec_flush(struct video_client_ctx *client_ctx, { u32 vcd_status = VCD_ERR_FAIL; - INFO("msm_vidc_dec: %s() called with dir = %u", __func__, + DBG("msm_vidc_dec: %s() called with dir = %u", __func__, flush_dir); if (!client_ctx) { ERR("\n Invalid client_ctx"); @@ -1525,6 +1552,8 @@ static long vid_dec_ioctl(struct file *file, case VDEC_IOCTL_SET_PICRES: { struct vdec_picsize video_resoultion; + memset((void *)&video_resoultion, 0, + sizeof(struct vdec_picsize)); DBG("VDEC_IOCTL_SET_PICRES\n"); if (copy_from_user(&vdec_msg, arg, sizeof(vdec_msg))) return -EFAULT; @@ -1540,6 +1569,8 @@ static long vid_dec_ioctl(struct file *file, case VDEC_IOCTL_GET_PICRES: { struct vdec_picsize video_resoultion; + memset((void *)&video_resoultion, 0, + sizeof(struct vdec_picsize)); DBG("VDEC_IOCTL_GET_PICRES\n"); if (copy_from_user(&vdec_msg, arg, sizeof(vdec_msg))) return -EFAULT; @@ -1563,6 +1594,10 @@ static long vid_dec_ioctl(struct file *file, struct vdec_allocatorproperty vdec_buf_req; struct vcd_buffer_requirement buffer_req; DBG("VDEC_IOCTL_SET_BUFFER_REQ\n"); + memset((void *)&vdec_buf_req, 0, + sizeof(struct vdec_allocatorproperty)); + memset((void *)&buffer_req, 0, + sizeof(struct vcd_buffer_requirement)); if (copy_from_user(&vdec_msg, arg, sizeof(vdec_msg))) return -EFAULT; @@ -1599,6 +1634,8 @@ static long vid_dec_ioctl(struct file *file, case VDEC_IOCTL_GET_BUFFER_REQ: { struct vdec_allocatorproperty vdec_buf_req; + memset((void *)&vdec_buf_req, 0, + sizeof(struct vdec_allocatorproperty)); DBG("VDEC_IOCTL_GET_BUFFER_REQ\n"); if (copy_from_user(&vdec_msg, arg, sizeof(vdec_msg))) return -EFAULT; @@ -1619,6 +1656,8 @@ static long vid_dec_ioctl(struct file *file, case VDEC_IOCTL_SET_BUFFER: { struct vdec_setbuffer_cmd setbuffer; + memset((void *)&setbuffer, 0, + sizeof(struct vdec_setbuffer_cmd)); DBG("VDEC_IOCTL_SET_BUFFER\n"); if (copy_from_user(&vdec_msg, arg, sizeof(vdec_msg))) return -EFAULT; @@ -1633,6 +1672,8 @@ static long vid_dec_ioctl(struct file *file, case VDEC_IOCTL_FREE_BUFFER: { struct vdec_setbuffer_cmd setbuffer; + memset((void *)&setbuffer, 0, + sizeof(struct vdec_setbuffer_cmd)); DBG("VDEC_IOCTL_FREE_BUFFER\n"); if (copy_from_user(&vdec_msg, arg, sizeof(vdec_msg))) return -EFAULT; @@ -1662,6 +1703,7 @@ static long vid_dec_ioctl(struct file *file, } case VDEC_IOCTL_CMD_PAUSE: { + DBG("VDEC_IOCTL_CMD_PAUSE\n"); result = vid_dec_pause_resume(client_ctx, true); if (!result) return -EIO; @@ -1681,6 +1723,8 @@ static long vid_dec_ioctl(struct file *file, struct vdec_input_frameinfo input_frame_info; u8 *desc_buf = NULL; u32 desc_size = 0; + memset((void *)&input_frame_info, 0, + sizeof(struct vdec_input_frameinfo)); DBG("VDEC_IOCTL_DECODE_FRAME\n"); if (copy_from_user(&vdec_msg, arg, sizeof(vdec_msg))) return -EFAULT; @@ -1715,12 +1759,15 @@ static long vid_dec_ioctl(struct file *file, } case VDEC_IOCTL_SET_PERF_CLK: { + DBG("VDEC_IOCTL_SET_PERF_CLK\n"); vid_dec_set_turbo_clk(client_ctx); break; } case VDEC_IOCTL_FILL_OUTPUT_BUFFER: { struct vdec_fillbuffer_cmd fill_buffer_cmd; + memset((void *)&fill_buffer_cmd, 0, + sizeof(struct vdec_fillbuffer_cmd)); DBG("VDEC_IOCTL_FILL_OUTPUT_BUFFER\n"); if (copy_from_user(&vdec_msg, arg, sizeof(vdec_msg))) return -EFAULT; @@ -1750,6 +1797,8 @@ static long vid_dec_ioctl(struct file *file, case VDEC_IOCTL_GET_NEXT_MSG: { struct vdec_msginfo vdec_msg_info; + memset((void *)&vdec_msg_info, 0, + sizeof(struct vdec_msginfo)); DBG("VDEC_IOCTL_GET_NEXT_MSG\n"); if (copy_from_user(&vdec_msg, arg, sizeof(vdec_msg))) return -EFAULT; @@ -1773,6 +1822,10 @@ static long vid_dec_ioctl(struct file *file, struct vdec_seqheader seq_header; struct vcd_sequence_hdr vcd_seq_hdr; unsigned long ionflag; + memset((void *)&seq_header, 0, + sizeof(struct vdec_seqheader)); + memset((void *)&vcd_seq_hdr, 0, + sizeof(struct vcd_sequence_hdr)); DBG("VDEC_IOCTL_SET_SEQUENCE_HEADER\n"); if (copy_from_user(&vdec_msg, arg, sizeof(vdec_msg))) { ERR("Copy from user vdec_msg failed\n"); @@ -1799,7 +1852,7 @@ static long vid_dec_ioctl(struct file *file, client_ctx->seq_hdr_ion_handle = ion_import_dma_buf( client_ctx->user_ion_client, seq_header.pmem_fd); - if (!client_ctx->seq_hdr_ion_handle) { + if (IS_ERR_OR_NULL(client_ctx->seq_hdr_ion_handle)) { ERR("%s(): get_ION_handle failed\n", __func__); return false; } @@ -1815,8 +1868,8 @@ static long vid_dec_ioctl(struct file *file, } ker_vaddr = (unsigned long) ion_map_kernel( client_ctx->user_ion_client, - client_ctx->seq_hdr_ion_handle, ionflag); - if (!ker_vaddr) { + client_ctx->seq_hdr_ion_handle); + if (IS_ERR_OR_NULL((void *)ker_vaddr)) { ERR("%s():get_ION_kernel virtual addr fail\n", __func__); ion_free(client_ctx->user_ion_client, @@ -1836,6 +1889,7 @@ static long vid_dec_ioctl(struct file *file, client_ctx->seq_hdr_ion_handle); return false; } + printk("%s ION phys 0x%lx len %u\n", __func__, phy_addr, ion_len); len = ion_len; } vcd_seq_hdr.sequence_header_len = seq_header.seq_header_len; @@ -1859,7 +1913,7 @@ static long vid_dec_ioctl(struct file *file, return -EFAULT; } if (vcd_get_ion_status()) { - if (client_ctx->seq_hdr_ion_handle) { + if (!IS_ERR_OR_NULL(client_ctx->seq_hdr_ion_handle)) { ion_unmap_kernel(client_ctx->user_ion_client, client_ctx->seq_hdr_ion_handle); ion_free(client_ctx->user_ion_client, @@ -1880,7 +1934,7 @@ static long vid_dec_ioctl(struct file *file, } case VDEC_IOCTL_GET_INTERLACE_FORMAT: { - u32 progressive_only, interlace_format; + u32 progressive_only = 0, interlace_format = 0; DBG("VDEC_IOCTL_GET_INTERLACE_FORMAT\n"); if (copy_from_user(&vdec_msg, arg, sizeof(vdec_msg))) return -EFAULT; @@ -1900,7 +1954,7 @@ static long vid_dec_ioctl(struct file *file, case VDEC_IOCTL_GET_DISABLE_DMX_SUPPORT: { - u32 disable_dmx; + u32 disable_dmx = 0; DBG("VDEC_IOCTL_GET_DISABLE_DMX_SUPPORT\n"); if (copy_from_user(&vdec_msg, arg, sizeof(vdec_msg))) return -EFAULT; @@ -1916,7 +1970,7 @@ static long vid_dec_ioctl(struct file *file, } case VDEC_IOCTL_GET_DISABLE_DMX: { - u32 disable_dmx; + u32 disable_dmx = 0; DBG("VDEC_IOCTL_GET_DISABLE_DMX\n"); if (copy_from_user(&vdec_msg, arg, sizeof(vdec_msg))) return -EFAULT; @@ -1941,7 +1995,7 @@ static long vid_dec_ioctl(struct file *file, } case VDEC_IOCTL_SET_PICTURE_ORDER: { - u32 picture_order; + u32 picture_order = 0; DBG("VDEC_IOCTL_SET_PICTURE_ORDER\n"); if (copy_from_user(&vdec_msg, arg, sizeof(vdec_msg))) return -EFAULT; @@ -1956,6 +2010,8 @@ static long vid_dec_ioctl(struct file *file, case VDEC_IOCTL_SET_FRAME_RATE: { struct vdec_framerate frame_rate; + memset((void *)&frame_rate, 0, + sizeof(struct vdec_framerate)); DBG("VDEC_IOCTL_SET_FRAME_RATE\n"); if (copy_from_user(&vdec_msg, arg, sizeof(vdec_msg))) return -EFAULT; @@ -1969,7 +2025,7 @@ static long vid_dec_ioctl(struct file *file, } case VDEC_IOCTL_SET_EXTRADATA: { - u32 extradata_flag; + u32 extradata_flag = 0; DBG("VDEC_IOCTL_SET_EXTRADATA\n"); if (copy_from_user(&vdec_msg, arg, sizeof(vdec_msg))) return -EFAULT; @@ -1984,6 +2040,8 @@ static long vid_dec_ioctl(struct file *file, case VDEC_IOCTL_SET_H264_MV_BUFFER: { struct vdec_h264_mv mv_data; + memset((void *)&mv_data, 0, + sizeof(struct vdec_h264_mv)); DBG("VDEC_IOCTL_SET_H264_MV_BUFFER\n"); if (copy_from_user(&vdec_msg, arg, sizeof(vdec_msg))) return -EFAULT; @@ -2007,6 +2065,8 @@ static long vid_dec_ioctl(struct file *file, case VDEC_IOCTL_GET_MV_BUFFER_SIZE: { struct vdec_mv_buff_size mv_buff; + memset((void *)&mv_buff, 0, + sizeof(struct vdec_mv_buff_size)); DBG("VDEC_IOCTL_GET_MV_BUFFER_SIZE\n"); if (copy_from_user(&vdec_msg, arg, sizeof(vdec_msg))) return -EFAULT; @@ -2027,6 +2087,7 @@ static long vid_dec_ioctl(struct file *file, } case VDEC_IOCTL_SET_IDR_ONLY_DECODING: { + DBG("VDEC_IOCTL_SET_IDR_ONLY_DECODING\n"); result = vid_dec_set_idr_only_decoding(client_ctx); if (!result) return -EIO; @@ -2034,6 +2095,7 @@ static long vid_dec_ioctl(struct file *file, } case VDEC_IOCTL_SET_CONT_ON_RECONFIG: { + DBG("VDEC_IOCTL_SET_CONT_ON_RECONFIG\n"); result = vid_dec_set_cont_on_reconfig(client_ctx); if (!result) return -EIO; @@ -2053,7 +2115,7 @@ static u32 vid_dec_close_client(struct video_client_ctx *client_ctx) struct vid_dec_msg *vdec_msg; u32 vcd_status; - INFO("msm_vidc_dec: Inside %s()", __func__); + DBG("msm_vidc_dec: Inside %s()", __func__); keep_dig_voltage_low_in_idle(false); if (!client_ctx || (!client_ctx->vcd_handle)) { ERR("\n Invalid client_ctx"); @@ -2098,7 +2160,7 @@ int vid_dec_open_client(struct video_client_ctx **vid_clnt_ctx, int flags) u8 client_count; INFO("msm_vidc_dec: Inside %s()", __func__); - keep_dig_voltage_low_in_idle(true); + if (!vid_clnt_ctx) { ERR("Invalid input\n"); return -EINVAL; @@ -2121,8 +2183,8 @@ int vid_dec_open_client(struct video_client_ctx **vid_clnt_ctx, int flags) } client_index = vid_dec_get_empty_client_index(); - if (client_index == -1) { - ERR("%s() : No free clients client_index == -1\n", __func__); + if (client_index < 0) { + ERR("%s() : No free clients client_index == %d\n", __func__, client_index); rc = -ENOMEM; goto client_failure; } @@ -2159,6 +2221,9 @@ int vid_dec_open_client(struct video_client_ctx **vid_clnt_ctx, int flags) ERR("vcd_open returned error: %u", rc); goto client_failure; } + + keep_dig_voltage_low_in_idle(true); + client_ctx->seq_header_set = false; *vid_clnt_ctx = client_ctx; client_failure: @@ -2284,7 +2349,7 @@ static int vid_dec_vcd_init(void) u32 i; - INFO("msm_vidc_dec: Inside %s()", __func__); + DBG("msm_vidc_dec: Inside %s()", __func__); vid_dec_device_p->num_clients = 0; for (i = 0; i < VIDC_MAX_NUM_CLIENTS; i++) { @@ -2326,7 +2391,7 @@ static int __init vid_dec_init(void) int rc = 0, i = 0, j = 0; struct device *class_devp; - INFO("msm_vidc_dec: Inside %s()", __func__); + DBG("msm_vidc_dec: Inside %s()", __func__); vid_dec_device_p = kzalloc(sizeof(struct vid_dec_dev), GFP_KERNEL); if (!vid_dec_device_p) { ERR("%s Unable to allocate memory for vid_dec_dev\n", @@ -2378,8 +2443,8 @@ static int __init vid_dec_init(void) goto error_vid_dec_cdev_add; } } - vid_dec_vcd_init(); - return 0; + rc = vid_dec_vcd_init(); + return rc; error_vid_dec_cdev_add: for (j = i-1; j >= 0; j--) @@ -2404,7 +2469,7 @@ static void __exit vid_dec_exit(void) class_destroy(vid_dec_class); unregister_chrdev_region(vid_dec_dev_num, NUM_OF_DRIVER_NODES); kfree(vid_dec_device_p); - INFO("msm_vidc_dec: Return from %s()", __func__); + DBG("msm_vidc_dec: Return from %s()", __func__); } MODULE_LICENSE("GPL v2"); diff --git a/drivers/video/msm/vidc/common/enc/venc.c b/drivers/video/msm/vidc/common/enc/venc.c index 281fc51b..ee57d89e 100644 --- a/drivers/video/msm/vidc/common/enc/venc.c +++ b/drivers/video/msm/vidc/common/enc/venc.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2010-2013, Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -243,15 +243,15 @@ static void vid_enc_output_frame_done(struct video_client_ctx *client_ctx, switch (event) { case VCD_EVT_RESP_OUTPUT_DONE: - DBG("Send INPUT_DON message to client = %p\n", + DBG("Send OUTPUT_DON message to client = %p\n", client_ctx); break; case VCD_EVT_RESP_OUTPUT_FLUSHED: - DBG("Send INPUT_FLUSHED message to client = %p\n", + DBG("Send OUTPUT_FLUSHED message to client = %p\n", client_ctx); break; default: - ERR("QVD: vid_enc_output_frame_done invalid cmd type: %d\n", event); + ERR("vid_enc_output_frame_done invalid cmd type: %d\n", event); venc_msg->venc_msg_info.statuscode = VEN_S_EFATAL; break; } @@ -279,6 +279,12 @@ static void vid_enc_output_frame_done(struct video_client_ctx *client_ctx, vcd_frame_data->time_stamp; venc_msg->venc_msg_info.buf.sz = vcd_frame_data->alloc_len; + + venc_msg->venc_msg_info.buf.metadata_len = + vcd_frame_data->metadata_len; + + venc_msg->venc_msg_info.buf.metadata_offset = + vcd_frame_data->metadata_offset; venc_msg->venc_msg_info.msgdata_size = @@ -292,10 +298,10 @@ static void vid_enc_output_frame_done(struct video_client_ctx *client_ctx, ion_flag = vidc_get_fd_info(client_ctx, BUFFER_TYPE_OUTPUT, pmem_fd, kernel_vaddr, buffer_index, &buff_handle); - if (ion_flag == CACHED && buff_handle) { + if (ion_flag == ION_FLAG_CACHED && buff_handle) { msm_ion_do_cache_op(client_ctx->user_ion_client, buff_handle, - (unsigned long *) kernel_vaddr, + (unsigned long *) NULL, (unsigned long)venc_msg->venc_msg_info.buf.sz, ION_IOC_CLEAN_INV_CACHES); } @@ -556,7 +562,6 @@ static int vid_enc_open(struct inode *inode, struct file *file) INFO(" msm_vidc_enc: Inside %s()", __func__); mutex_lock(&vid_enc_device_p->lock); - keep_dig_voltage_low_in_idle(true); start_cmd = 0; stop_cmd = 0; client_count = vcd_get_num_of_clients(); @@ -575,9 +580,9 @@ static int vid_enc_open(struct inode *inode, struct file *file) client_index = vid_enc_get_empty_client_index(); - if (client_index == -1) { - ERR("%s() : No free clients client_index == -1\n", - __func__); + if (client_index < 0) { + ERR("%s() : No free clients client_index == %d\n", + __func__, client_index); return -ENODEV; } @@ -619,6 +624,9 @@ static int vid_enc_open(struct inode *inode, struct file *file) mutex_unlock(&vid_enc_device_p->lock); return rc; } + + keep_dig_voltage_low_in_idle(true); + file->private_data = client_ctx; mutex_unlock(&vid_enc_device_p->lock); return rc; @@ -627,14 +635,16 @@ static int vid_enc_open(struct inode *inode, struct file *file) static int vid_enc_release(struct inode *inode, struct file *file) { struct video_client_ctx *client_ctx = file->private_data; - INFO(" msm_vidc_enc: Inside %s()", __func__); + INFO("\n msm_vidc_enc: Inside %s()", __func__); + vidc_cleanup_addr_table(client_ctx, BUFFER_TYPE_OUTPUT); + vidc_cleanup_addr_table(client_ctx, BUFFER_TYPE_INPUT); keep_dig_voltage_low_in_idle(false); vid_enc_close_client(client_ctx); vidc_release_firmware(); #ifndef USE_RES_TRACKER vidc_disable_clk(); #endif - INFO(" msm_vidc_enc: Return from %s()", __func__); + INFO("\n msm_vidc_enc: Return from %s()", __func__); return 0; } @@ -755,8 +765,8 @@ static int __init vid_enc_init(void) __func__, rc); goto error_vid_enc_cdev_add; } - vid_enc_vcd_init(); - return 0; + rc = vid_enc_vcd_init(); + return rc; error_vid_enc_cdev_add: device_destroy(vid_enc_class, vid_enc_dev_num); @@ -778,7 +788,7 @@ static void __exit vid_enc_exit(void) class_destroy(vid_enc_class); unregister_chrdev_region(vid_enc_dev_num, 1); kfree(vid_enc_device_p); - INFO(" msm_vidc_enc: Return from %s()", __func__); + INFO("\n msm_vidc_enc: Return from %s()", __func__); } static long vid_enc_ioctl(struct file *file, unsigned cmd, unsigned long u_arg) @@ -801,6 +811,7 @@ static long vid_enc_ioctl(struct file *file, case VEN_IOCTL_CMD_READ_NEXT_MSG: { struct venc_msg cb_msg; + memset((void *)&cb_msg, 0, sizeof(struct venc_msg)); if (copy_from_user(&venc_msg, arg, sizeof(venc_msg))) return -EFAULT; DBG("VEN_IOCTL_CMD_READ_NEXT_MSG\n"); @@ -822,6 +833,7 @@ static long vid_enc_ioctl(struct file *file, case VEN_IOCTL_CMD_FILL_OUTPUT_BUFFER: { struct venc_buffer enc_buffer; + memset((void *)&enc_buffer, 0, sizeof(struct venc_buffer)); if (copy_from_user(&venc_msg, arg, sizeof(venc_msg))) return -EFAULT; DBG("VEN_IOCTL_CMD_ENCODE_FRAME" @@ -847,6 +859,8 @@ static long vid_enc_ioctl(struct file *file, { enum venc_buffer_dir buffer_dir; struct venc_bufferpayload buffer_info; + memset((void *)&buffer_info, 0, + sizeof(struct venc_bufferpayload)); if (copy_from_user(&venc_msg, arg, sizeof(venc_msg))) return -EFAULT; DBG("VEN_IOCTL_SET_INPUT_BUFFER/VEN_IOCTL_SET_OUTPUT_BUFFER\n"); @@ -870,6 +884,8 @@ static long vid_enc_ioctl(struct file *file, { enum venc_buffer_dir buffer_dir; struct venc_bufferpayload buffer_info; + memset((void *)&buffer_info, 0, + sizeof(struct venc_bufferpayload)); if (copy_from_user(&venc_msg, arg, sizeof(venc_msg))) return -EFAULT; @@ -897,6 +913,8 @@ static long vid_enc_ioctl(struct file *file, case VEN_IOCTL_SET_OUTPUT_BUFFER_REQ: { struct venc_allocatorproperty allocatorproperty; + memset((void *)&allocatorproperty, 0, + sizeof(struct venc_allocatorproperty)); if (copy_from_user(&venc_msg, arg, sizeof(venc_msg))) return -EFAULT; @@ -924,6 +942,8 @@ static long vid_enc_ioctl(struct file *file, case VEN_IOCTL_GET_OUTPUT_BUFFER_REQ: { struct venc_allocatorproperty allocatorproperty; + memset((void *)&allocatorproperty, 0, + sizeof(struct venc_allocatorproperty)); if (copy_from_user(&venc_msg, arg, sizeof(venc_msg))) return -EFAULT; @@ -946,6 +966,8 @@ static long vid_enc_ioctl(struct file *file, case VEN_IOCTL_CMD_FLUSH: { struct venc_bufferflush bufferflush; + memset((void *)&bufferflush, 0, + sizeof(struct venc_bufferflush)); if (copy_from_user(&venc_msg, arg, sizeof(venc_msg))) return -EFAULT; @@ -953,7 +975,7 @@ static long vid_enc_ioctl(struct file *file, if (copy_from_user(&bufferflush, venc_msg.in, sizeof(bufferflush))) return -EFAULT; - INFO(" %s(): Calling vid_enc_flush with mode = %lu", + INFO("\n %s(): Calling vid_enc_flush with mode = %lu", __func__, bufferflush.flush_mode); result = vid_enc_flush(client_ctx, &bufferflush); @@ -1008,6 +1030,8 @@ static long vid_enc_ioctl(struct file *file, case VEN_IOCTL_SET_RECON_BUFFER: { struct venc_recon_addr venc_recon; + memset((void *)&venc_recon, 0, + sizeof(struct venc_recon_addr)); if (copy_from_user(&venc_msg, arg, sizeof(venc_msg))) return -EFAULT; DBG("VEN_IOCTL_SET_RECON_BUFFER\n"); @@ -1025,6 +1049,8 @@ static long vid_enc_ioctl(struct file *file, case VEN_IOCTL_FREE_RECON_BUFFER: { struct venc_recon_addr venc_recon; + memset((void *)&venc_recon, 0, + sizeof(struct venc_recon_addr)); DBG("VEN_IOCTL_FREE_RECON_BUFFER\n"); if (copy_from_user(&venc_msg, arg, sizeof(venc_msg))) return -EFAULT; @@ -1042,6 +1068,8 @@ static long vid_enc_ioctl(struct file *file, case VEN_IOCTL_GET_RECON_BUFFER_SIZE: { struct venc_recon_buff_size venc_recon_size; + memset((void *)&venc_recon_size, 0, + sizeof(struct venc_recon_buff_size)); if (copy_from_user(&venc_msg, arg, sizeof(venc_msg))) return -EFAULT; DBG("VEN_IOCTL_GET_RECON_BUFFER_SIZE\n"); @@ -1065,6 +1093,8 @@ static long vid_enc_ioctl(struct file *file, case VEN_IOCTL_GET_QP_RANGE: { struct venc_qprange qprange; + memset((void *)&qprange, 0, + sizeof(struct venc_qprange)); if (copy_from_user(&venc_msg, arg, sizeof(venc_msg))) return -EFAULT; DBG("VEN_IOCTL_G(S)ET_QP_RANGE\n"); @@ -1093,6 +1123,8 @@ static long vid_enc_ioctl(struct file *file, case VEN_IOCTL_GET_HEC: { struct venc_headerextension headerextension; + memset((void *)&headerextension, 0, + sizeof(struct venc_headerextension)); if (copy_from_user(&venc_msg, arg, sizeof(venc_msg))) return -EFAULT; DBG("VEN_IOCTL_(G)SET_HEC\n"); @@ -1123,6 +1155,8 @@ static long vid_enc_ioctl(struct file *file, case VEN_IOCTL_GET_TARGET_BITRATE: { struct venc_targetbitrate targetbitrate; + memset((void *)&targetbitrate, 0, + sizeof(struct venc_targetbitrate)); if (copy_from_user(&venc_msg, arg, sizeof(venc_msg))) return -EFAULT; DBG("VEN_IOCTL_(G)SET_TARGET_BITRATE\n"); @@ -1152,6 +1186,8 @@ static long vid_enc_ioctl(struct file *file, case VEN_IOCTL_GET_FRAME_RATE: { struct venc_framerate framerate; + memset((void *)&framerate, 0, + sizeof(struct venc_framerate)); if (copy_from_user(&venc_msg, arg, sizeof(venc_msg))) return -EFAULT; DBG("VEN_IOCTL_(G)SET_FRAME_RATE\n"); @@ -1181,6 +1217,8 @@ static long vid_enc_ioctl(struct file *file, case VEN_IOCTL_GET_VOP_TIMING_CFG: { struct venc_voptimingcfg voptimingcfg; + memset((void *)&voptimingcfg, 0, + sizeof(struct venc_voptimingcfg)); if (copy_from_user(&venc_msg, arg, sizeof(venc_msg))) return -EFAULT; @@ -1210,6 +1248,8 @@ static long vid_enc_ioctl(struct file *file, case VEN_IOCTL_GET_RATE_CTRL_CFG: { struct venc_ratectrlcfg ratectrlcfg; + memset((void *)&ratectrlcfg, 0, + sizeof(struct venc_ratectrlcfg)); if (copy_from_user(&venc_msg, arg, sizeof(venc_msg))) return -EFAULT; DBG("VEN_IOCTL_(G)SET_RATE_CTRL_CFG\n"); @@ -1239,6 +1279,8 @@ static long vid_enc_ioctl(struct file *file, case VEN_IOCTL_GET_MULTI_SLICE_CFG: { struct venc_multiclicecfg multiclicecfg; + memset((void *)&multiclicecfg, 0, + sizeof(struct venc_multiclicecfg)); if (copy_from_user(&venc_msg, arg, sizeof(venc_msg))) return -EFAULT; DBG("VEN_IOCTL_(G)SET_MULTI_SLICE_CFG\n"); @@ -1268,6 +1310,8 @@ static long vid_enc_ioctl(struct file *file, case VEN_IOCTL_GET_INTRA_REFRESH: { struct venc_intrarefresh intrarefresh; + memset((void *)&intrarefresh, 0, + sizeof(struct venc_intrarefresh)); if (copy_from_user(&venc_msg, arg, sizeof(venc_msg))) return -EFAULT; DBG("VEN_IOCTL_(G)SET_INTRA_REFRESH\n"); @@ -1296,6 +1340,8 @@ static long vid_enc_ioctl(struct file *file, case VEN_IOCTL_GET_DEBLOCKING_CFG: { struct venc_dbcfg dbcfg; + memset((void *)&dbcfg, 0, + sizeof(struct venc_dbcfg)); if (copy_from_user(&venc_msg, arg, sizeof(venc_msg))) return -EFAULT; @@ -1325,6 +1371,8 @@ static long vid_enc_ioctl(struct file *file, case VEN_IOCTL_GET_ENTROPY_CFG: { struct venc_entropycfg entropy_cfg; + memset((void *)&entropy_cfg, 0, + sizeof(struct venc_entropycfg)); if (copy_from_user(&venc_msg, arg, sizeof(venc_msg))) return -EFAULT; DBG("VEN_IOCTL_(G)SET_ENTROPY_CFG\n"); @@ -1352,6 +1400,8 @@ static long vid_enc_ioctl(struct file *file, case VEN_IOCTL_GET_SEQUENCE_HDR: { struct venc_seqheader seq_header; + memset((void *)&seq_header, 0, + sizeof(struct venc_seqheader)); if (copy_from_user(&venc_msg, arg, sizeof(venc_msg))) return -EFAULT; @@ -1360,6 +1410,12 @@ static long vid_enc_ioctl(struct file *file, return -EFAULT; DBG("VEN_IOCTL_GET_SEQUENCE_HDR\n"); + if (!access_ok(VERIFY_WRITE, seq_header.hdrbufptr, + seq_header.bufsize)) { + ERR("VEN_IOCTL_GET_SEQUENCE_HDR:"\ + " Userspace address verification failed.\n"); + return -EFAULT; + } result = vid_enc_get_sequence_header(client_ctx, &seq_header); if (!result) { @@ -1388,6 +1444,8 @@ static long vid_enc_ioctl(struct file *file, case VEN_IOCTL_GET_INTRA_PERIOD: { struct venc_intraperiod intraperiod; + memset((void *)&intraperiod, 0, + sizeof(struct venc_intraperiod)); if (copy_from_user(&venc_msg, arg, sizeof(venc_msg))) return -EFAULT; DBG("VEN_IOCTL_(G)SET_INTRA_PERIOD\n"); @@ -1416,6 +1474,8 @@ static long vid_enc_ioctl(struct file *file, case VEN_IOCTL_GET_SESSION_QP: { struct venc_sessionqp session_qp; + memset((void *)&session_qp, 0, + sizeof(struct venc_sessionqp)); if (copy_from_user(&venc_msg, arg, sizeof(venc_msg))) return -EFAULT; DBG("VEN_IOCTL_(G)SET_SESSION_QP\n"); @@ -1444,6 +1504,8 @@ static long vid_enc_ioctl(struct file *file, case VEN_IOCTL_GET_PROFILE_LEVEL: { struct ven_profilelevel profile_level; + memset((void *)&profile_level, 0, + sizeof(struct ven_profilelevel)); if (copy_from_user(&venc_msg, arg, sizeof(venc_msg))) return -EFAULT; @@ -1473,6 +1535,8 @@ static long vid_enc_ioctl(struct file *file, case VEN_IOCTL_GET_CODEC_PROFILE: { struct venc_profile profile; + memset((void *)&profile, 0, + sizeof(struct venc_profile)); if (copy_from_user(&venc_msg, arg, sizeof(venc_msg))) return -EFAULT; @@ -1502,6 +1566,8 @@ static long vid_enc_ioctl(struct file *file, case VEN_IOCTL_GET_SHORT_HDR: { struct venc_switch encoder_switch; + memset((void *)&encoder_switch, 0, + sizeof(struct venc_switch)); if (copy_from_user(&venc_msg, arg, sizeof(venc_msg))) return -EFAULT; DBG("Getting VEN_IOCTL_(G)SET_SHORT_HDR\n"); @@ -1531,6 +1597,8 @@ static long vid_enc_ioctl(struct file *file, case VEN_IOCTL_GET_BASE_CFG: { struct venc_basecfg base_config; + memset((void *)&base_config, 0, + sizeof(struct venc_basecfg)); DBG("VEN_IOCTL_SET_BASE_CFG\n"); if (copy_from_user(&venc_msg, arg, sizeof(venc_msg))) return -EFAULT; @@ -1559,6 +1627,8 @@ static long vid_enc_ioctl(struct file *file, case VEN_IOCTL_GET_LIVE_MODE: { struct venc_switch encoder_switch; + memset((void *)&encoder_switch, 0, + sizeof(struct venc_switch)); if (copy_from_user(&venc_msg, arg, sizeof(venc_msg))) return -EFAULT; @@ -1596,9 +1666,11 @@ static long vid_enc_ioctl(struct file *file, } case VEN_IOCTL_SET_METABUFFER_MODE: { - u32 metabuffer_mode, vcd_status; + u32 metabuffer_mode = 0, vcd_status = 0; struct vcd_property_hdr vcd_property_hdr; struct vcd_property_live live_mode; + memset((void *)&live_mode, 0, + sizeof(struct vcd_property_live)); if (copy_from_user(&venc_msg, arg, sizeof(venc_msg))) return -EFAULT; @@ -1620,7 +1692,7 @@ static long vid_enc_ioctl(struct file *file, case VEN_IOCTL_SET_EXTRADATA: case VEN_IOCTL_GET_EXTRADATA: { - u32 extradata_flag; + u32 extradata_flag = 0; DBG("VEN_IOCTL_(G)SET_EXTRADATA\n"); if (copy_from_user(&venc_msg, arg, sizeof(venc_msg))) return -EFAULT; @@ -1676,6 +1748,8 @@ static long vid_enc_ioctl(struct file *file, struct vcd_property_sps_pps_for_idr_enable idr_enable; u32 vcd_status = VCD_ERR_FAIL; u32 enabled = 1; + memset((void *)&idr_enable, 0, + sizeof(struct vcd_property_sps_pps_for_idr_enable)); if (copy_from_user(&venc_msg, arg, sizeof(venc_msg))) return -EFAULT; diff --git a/drivers/video/msm/vidc/common/enc/venc_internal.c b/drivers/video/msm/vidc/common/enc/venc_internal.c index bb611f8c..d9b79a20 100644 --- a/drivers/video/msm/vidc/common/enc/venc_internal.c +++ b/drivers/video/msm/vidc/common/enc/venc_internal.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -34,13 +34,11 @@ #include "vcd_res_tracker_api.h" #include "venc_internal.h" -/*HTC_START*/ extern u32 vidc_msg_debug; #define DBG(x...) \ if (vidc_msg_debug) { \ printk(KERN_DEBUG "[VID] " x); \ } -/*HTC_END*/ #define ERR(x...) printk(KERN_ERR "[VID] " x) static unsigned int vidc_mmu_subsystem[] = { @@ -930,7 +928,7 @@ u32 vid_enc_set_get_entropy_cfg(struct video_client_ctx *client_ctx, vcd_property_hdr.sz = sizeof(struct vcd_property_entropy_control); if (set_flag) { - switch (entropy_cfg->longentropysel) { + switch (entropy_cfg->entropysel) { case VEN_ENTROPY_MODEL_CAVLC: control.entropy_sel = VCD_ENTROPY_SEL_CAVLC; break; @@ -983,11 +981,11 @@ u32 vid_enc_set_get_entropy_cfg(struct video_client_ctx *client_ctx, } else { switch (control.entropy_sel) { case VCD_ENTROPY_SEL_CABAC: - entropy_cfg->cabacmodel = + entropy_cfg->entropysel = VEN_ENTROPY_MODEL_CABAC; break; case VCD_ENTROPY_SEL_CAVLC: - entropy_cfg->cabacmodel = + entropy_cfg->entropysel = VEN_ENTROPY_MODEL_CAVLC; break; default: @@ -1589,7 +1587,7 @@ u32 vid_enc_set_buffer(struct video_client_ctx *client_ctx, vcd_buffer_t = VCD_BUFFER_OUTPUT; } length = buffer_info->sz; - /*If buffer cannot be set, ignore */ + if (!vidc_insert_addr_table(client_ctx, dir_buffer, (unsigned long)buffer_info->pbuffer, &kernel_vaddr, @@ -1617,31 +1615,51 @@ u32 vid_enc_free_buffer(struct video_client_ctx *client_ctx, { enum vcd_buffer_type buffer_vcd = VCD_BUFFER_INPUT; enum buffer_dir dir_buffer = BUFFER_TYPE_INPUT; - u32 vcd_status = VCD_ERR_FAIL; - unsigned long kernel_vaddr; + unsigned long kernel_vaddr = 0; + unsigned long user_vaddr = 0; + unsigned long phy_addr = 0; + int pmem_fd = 0; + struct file *file; + s32 buffer_index = -1; - if (!client_ctx || !buffer_info) + if (!client_ctx || !buffer_info) { + ERR("%s(): wrong buffer, 0x%x, 0x%x", __func__, + (u32)client_ctx, (u32)buffer_info); return false; + } if (buffer == VEN_BUFFER_TYPE_OUTPUT) { dir_buffer = BUFFER_TYPE_OUTPUT; buffer_vcd = VCD_BUFFER_OUTPUT; } - /*If buffer NOT set, ignore */ + + user_vaddr = (unsigned long)buffer_info->pbuffer; + if (!vidc_lookup_addr_table(client_ctx, dir_buffer, + true, &user_vaddr, &kernel_vaddr, + &phy_addr, &pmem_fd, &file, + &buffer_index)) { + ERR("%s(): WNG: user_virt_addr = %p has not been set", + __func__, buffer_info->pbuffer); + return true; + } + + if (vcd_free_buffer(client_ctx->vcd_handle, buffer_vcd, + (u8 *)kernel_vaddr)) { + ERR("%s(): WNG: vcd_free_buffer(0x%x, %u, 0x%x) failed.", + __func__, (u32)client_ctx->vcd_handle, + (u32)buffer_vcd, (u32)kernel_vaddr); + } + + if (!vidc_delete_addr_table(client_ctx, dir_buffer, (unsigned long)buffer_info->pbuffer, &kernel_vaddr)) { - DBG("%s() : user_virt_addr = %p has not been set.", + ERR("%s(): WNG: user_virt_addr = %p has not been set.", __func__, buffer_info->pbuffer); return true; } - vcd_status = vcd_free_buffer(client_ctx->vcd_handle, buffer_vcd, - (u8 *)kernel_vaddr); - if (!vcd_status) - return true; - else - return false; + return true; } u32 vid_enc_encode_frame(struct video_client_ctx *client_ctx, @@ -1649,10 +1667,12 @@ u32 vid_enc_encode_frame(struct video_client_ctx *client_ctx, { struct vcd_frame_data vcd_input_buffer; unsigned long kernel_vaddr, phy_addr, user_vaddr; + struct buf_addr_table *buf_addr_table; int pmem_fd; struct file *file; s32 buffer_index = -1; u32 ion_flag = 0; + unsigned long buff_len; struct ion_handle *buff_handle = NULL; u32 vcd_status = VCD_ERR_FAIL; @@ -1661,16 +1681,28 @@ u32 vid_enc_encode_frame(struct video_client_ctx *client_ctx, return false; user_vaddr = (unsigned long)input_frame_info->ptrbuffer; + buf_addr_table = client_ctx->input_buf_addr_table; if (vidc_lookup_addr_table(client_ctx, BUFFER_TYPE_INPUT, true, &user_vaddr, &kernel_vaddr, &phy_addr, &pmem_fd, &file, &buffer_index)) { - /* kernel_vaddr is found. send the frame to VCD */ + memset((void *)&vcd_input_buffer, 0, sizeof(struct vcd_frame_data)); + buff_len = buf_addr_table[buffer_index].buff_len; + + if ((input_frame_info->len > buff_len) || + (input_frame_info->offset > buff_len)) { + ERR("%s(): offset(%lu) or data length(%lu) is greater"\ + " than buffer length(%lu)\n",\ + __func__, input_frame_info->offset, + input_frame_info->len, buff_len); + return false; + } + vcd_input_buffer.virtual = (u8 *) (kernel_vaddr + input_frame_info->offset); @@ -1682,7 +1714,7 @@ u32 vid_enc_encode_frame(struct video_client_ctx *client_ctx, vcd_input_buffer.data_len = input_frame_info->len; vcd_input_buffer.time_stamp = input_frame_info->timestamp; - /* Rely on VCD using the same flags as OMX */ + vcd_input_buffer.flags = input_frame_info->flags; ion_flag = vidc_get_fd_info(client_ctx, BUFFER_TYPE_INPUT, @@ -1690,11 +1722,11 @@ u32 vid_enc_encode_frame(struct video_client_ctx *client_ctx, &buff_handle); if (vcd_input_buffer.data_len > 0) { - if (ion_flag == CACHED && buff_handle) { + if (ion_flag == 1 && buff_handle) { msm_ion_do_cache_op( client_ctx->user_ion_client, buff_handle, - (unsigned long *) vcd_input_buffer.virtual, + (unsigned long *) NULL, (unsigned long) vcd_input_buffer.data_len, ION_IOC_CLEAN_CACHES); } @@ -1829,6 +1861,9 @@ u32 vid_enc_set_recon_buffers(struct video_client_ctx *client_ctx, ERR("%s(): get_ION_handle failed\n", __func__); goto import_ion_error; } + if (!ion_phys(client_ctx->user_ion_client, client_ctx->recon_buffer_ion_handle[i], &phy_addr, &ion_len)) { + printk("%s ION phys 0x%lx len %u\n", __func__, phy_addr, ion_len); + } rc = ion_handle_get_flags(client_ctx->user_ion_client, client_ctx->recon_buffer_ion_handle[i], &ionflag); @@ -1839,8 +1874,7 @@ u32 vid_enc_set_recon_buffers(struct video_client_ctx *client_ctx, } control->kernel_virtual_addr = (u8 *) ion_map_kernel( client_ctx->user_ion_client, - client_ctx->recon_buffer_ion_handle[i], - ionflag); + client_ctx->recon_buffer_ion_handle[i]); if (!control->kernel_virtual_addr) { ERR("%s(): get_ION_kernel virtual addr fail\n", __func__); @@ -1866,10 +1900,10 @@ u32 vid_enc_set_recon_buffers(struct video_client_ctx *client_ctx, VIDEO_DOMAIN, VIDEO_MAIN_POOL, SZ_4K, - 0, + control->buffer_size * 2, (unsigned long *)&iova, (unsigned long *)&buffer_size, - UNCACHED, 0); + 0 , 0); if (rc || !iova) { ERR( "%s():ION map iommu addr fail, rc = %d, iova = 0x%lx\n", diff --git a/drivers/video/msm/vidc/common/init/vidc_init.c b/drivers/video/msm/vidc/common/init/vidc_init.c index 049a7b2a..1f824dc9 100644 --- a/drivers/video/msm/vidc/common/init/vidc_init.c +++ b/drivers/video/msm/vidc/common/init/vidc_init.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -36,12 +36,10 @@ #include "vidc_init_internal.h" #include "vcd_res_tracker_api.h" -/*HTC_START*/ #define DBG(x...) \ if (vidc_msg_debug) { \ printk(KERN_DEBUG "[VID] " x); \ } -/*HTC_END*/ #define VIDC_NAME "msm_vidc_reg" @@ -406,7 +404,7 @@ void vidc_cleanup_addr_table(struct video_client_ctx *client_ctx, enum buffer_dir buffer) { u32 *num_of_buffers = NULL; - u32 i = 0; + u32 i = 0, len = 0; struct buf_addr_table *buf_addr_table; if (buffer == BUFFER_TYPE_INPUT) { buf_addr_table = client_ctx->input_buf_addr_table; @@ -432,9 +430,6 @@ void vidc_cleanup_addr_table(struct video_client_ctx *client_ctx, } if (!IS_ERR_OR_NULL(buf_addr_table[i].buff_ion_handle)) { if (!IS_ERR_OR_NULL(client_ctx->user_ion_client)) { - ion_unmap_kernel(client_ctx->user_ion_client, - buf_addr_table[i]. - buff_ion_handle); if (!res_trk_check_for_sec_session()) { ion_unmap_iommu( client_ctx->user_ion_client, @@ -450,6 +445,38 @@ void vidc_cleanup_addr_table(struct video_client_ctx *client_ctx, } } } + len = sizeof(client_ctx->recon_buffer)/ + sizeof(struct vcd_property_enc_recon_buffer); + for (i = 0; i < len; i++) { + if (!vcd_get_ion_status()) { + if (client_ctx->recon_buffer[i].client_data) { + msm_subsystem_unmap_buffer( + (struct msm_mapped_buffer *) + client_ctx->recon_buffer[i].client_data); + client_ctx->recon_buffer[i].client_data = NULL; + } + } else { + if (!IS_ERR_OR_NULL( + client_ctx->recon_buffer_ion_handle[i])) { + ion_unmap_kernel(client_ctx->user_ion_client, + client_ctx->recon_buffer_ion_handle[i]); + if (!res_trk_check_for_sec_session() && + (res_trk_get_core_type() != + (u32)VCD_CORE_720P)) { + ion_unmap_iommu(client_ctx-> + user_ion_client, + client_ctx-> + recon_buffer_ion_handle[i], + VIDEO_DOMAIN, + VIDEO_MAIN_POOL); + } + ion_free(client_ctx->user_ion_client, + client_ctx->recon_buffer_ion_handle[i]); + client_ctx->recon_buffer_ion_handle[i] = NULL; + } + } + } + if (client_ctx->vcd_h264_mv_buffer.client_data) { msm_subsystem_unmap_buffer((struct msm_mapped_buffer *) client_ctx->vcd_h264_mv_buffer.client_data); @@ -530,7 +557,6 @@ u32 vidc_lookup_addr_table(struct video_client_ctx *client_ctx, *pmem_fd = buf_addr_table[i].pmem_fd; *file = buf_addr_table[i].file; *buffer_index = i; -/*HTC_START*/ if (search_with_user_vaddr) { DBG("kernel_vaddr = 0x%08lx, phy_addr = 0x%08lx " " pmem_fd = %d, struct *file = %p " @@ -553,7 +579,6 @@ u32 vidc_lookup_addr_table(struct video_client_ctx *client_ctx, " Not Found.\n", __func__, client_ctx, *kernel_vaddr); } -/*HTC_END*/ mutex_unlock(&client_ctx->enrty_queue_lock); return false; } @@ -578,6 +603,10 @@ u32 vidc_insert_addr_table(struct video_client_ctx *client_ctx, int ret = 0; unsigned long buffer_size = 0; size_t ion_len; + struct vcd_property_hdr vcd_property_hdr; + struct vcd_property_codec codec; + unsigned long mapped_length = length; + u32 vcd_status = VCD_ERR_FAIL; if (!client_ctx || !length) return false; @@ -593,7 +622,21 @@ u32 vidc_insert_addr_table(struct video_client_ctx *client_ctx, num_of_buffers = &client_ctx->num_of_output_buffers; DBG("%s(): buffer = OUTPUT #Buf = %d\n", __func__, *num_of_buffers); - length = length * 2; /* workaround for iommu video h/w bug */ + vcd_property_hdr.prop_id = VCD_I_CODEC; + vcd_property_hdr.sz = sizeof(struct vcd_property_codec); + vcd_status = vcd_get_property(client_ctx->vcd_handle, + &vcd_property_hdr, &codec); + if (vcd_status) { + ERR("%s(): get codec failed", __func__); + } else { + if (codec.codec != VCD_CODEC_H264) { + + DBG("%s(): Double iommu map size from %u "\ + "to %u for non-H264", __func__, + (u32)length, (u32)(length * 2)); + mapped_length = length * 2; + } + } } if (*num_of_buffers == max_num_buffers) { @@ -623,7 +666,7 @@ u32 vidc_insert_addr_table(struct video_client_ctx *client_ctx, ? MSM_SUBSYSTEM_MAP_IOVA : MSM_SUBSYSTEM_MAP_IOVA|MSM_SUBSYSTEM_ALIGN_IOVA_8K; mapped_buffer = msm_subsystem_map_buffer(phys_addr, - length, flags, vidc_mmu_subsystem, + mapped_length, flags, vidc_mmu_subsystem, sizeof(vidc_mmu_subsystem)/sizeof(unsigned int)); if (IS_ERR(mapped_buffer)) { pr_err("buffer map failed"); @@ -641,6 +684,9 @@ u32 vidc_insert_addr_table(struct video_client_ctx *client_ctx, __func__); goto bail_out_add; } + if (!ion_phys(client_ctx->user_ion_client, buff_ion_handle, &ionflag, &ion_len)) { + printk("%s ION phys 0x%lx len %u\n", __func__, ionflag, ion_len); + } if (ion_handle_get_flags(client_ctx->user_ion_client, buff_ion_handle, &ionflag)) { @@ -648,18 +694,6 @@ u32 vidc_insert_addr_table(struct video_client_ctx *client_ctx, __func__); goto bail_out_add; } - *kernel_vaddr = (unsigned long) - ion_map_kernel( - client_ctx->user_ion_client, - buff_ion_handle, - ionflag); - if (IS_ERR_OR_NULL((void *)*kernel_vaddr)) { - ERR("%s():ION virtual addr fail\n", - __func__); - *kernel_vaddr = (unsigned long)NULL; - show_mem(SHOW_MEM_FILTER_NODES); - goto ion_free_error; - } if (res_trk_check_for_sec_session() || (res_trk_get_core_type() == (u32)VCD_CORE_720P)) { if (ion_phys(client_ctx->user_ion_client, @@ -680,10 +714,10 @@ u32 vidc_insert_addr_table(struct video_client_ctx *client_ctx, VIDEO_DOMAIN, VIDEO_MAIN_POOL, SZ_8K, - length, + mapped_length, (unsigned long *) &iova, (unsigned long *) &buffer_size, - UNCACHED, + 0 , ION_IOMMU_UNMAP_DELAYED); if (ret || !iova) { ERR( @@ -698,12 +732,14 @@ u32 vidc_insert_addr_table(struct video_client_ctx *client_ctx, iova; } } + (*kernel_vaddr) = phys_addr; phys_addr += buffer_addr_offset; (*kernel_vaddr) += buffer_addr_offset; buf_addr_table[*num_of_buffers].user_vaddr = user_vaddr; buf_addr_table[*num_of_buffers].kernel_vaddr = *kernel_vaddr; buf_addr_table[*num_of_buffers].pmem_fd = pmem_fd; buf_addr_table[*num_of_buffers].file = file; + buf_addr_table[*num_of_buffers].buff_len = length; buf_addr_table[*num_of_buffers].phy_addr = phys_addr; buf_addr_table[*num_of_buffers].buff_ion_handle = buff_ion_handle; @@ -718,9 +754,6 @@ u32 vidc_insert_addr_table(struct video_client_ctx *client_ctx, mutex_unlock(&client_ctx->enrty_queue_lock); return true; ion_map_error: - if (*kernel_vaddr && buff_ion_handle) - ion_unmap_kernel(client_ctx->user_ion_client, buff_ion_handle); -ion_free_error: if (!IS_ERR_OR_NULL(buff_ion_handle)) ion_free(client_ctx->user_ion_client, buff_ion_handle); bail_out_add: @@ -729,10 +762,6 @@ u32 vidc_insert_addr_table(struct video_client_ctx *client_ctx, } EXPORT_SYMBOL(vidc_insert_addr_table); -/* - * Similar to vidc_insert_addr_table except intended for in-kernel - * use where buffers have already been alloced and mapped properly - */ u32 vidc_insert_addr_table_kernel(struct video_client_ctx *client_ctx, enum buffer_dir buffer, unsigned long user_vaddr, unsigned long kernel_vaddr, unsigned long phys_addr, @@ -785,6 +814,7 @@ u32 vidc_insert_addr_table_kernel(struct video_client_ctx *client_ctx, buf_addr_table[*num_of_buffers].kernel_vaddr = kernel_vaddr; buf_addr_table[*num_of_buffers].pmem_fd = -1; buf_addr_table[*num_of_buffers].file = NULL; + buf_addr_table[*num_of_buffers].buff_len = length; buf_addr_table[*num_of_buffers].phy_addr = phys_addr; buf_addr_table[*num_of_buffers].buff_ion_handle = NULL; *num_of_buffers = *num_of_buffers + 1; @@ -841,8 +871,6 @@ u32 vidc_delete_addr_table(struct video_client_ctx *client_ctx, } *kernel_vaddr = buf_addr_table[i].kernel_vaddr; if (buf_addr_table[i].buff_ion_handle) { - ion_unmap_kernel(client_ctx->user_ion_client, - buf_addr_table[i].buff_ion_handle); if (!res_trk_check_for_sec_session() && (res_trk_get_core_type() != (u32)VCD_CORE_720P)) { ion_unmap_iommu(client_ctx->user_ion_client, @@ -869,6 +897,8 @@ u32 vidc_delete_addr_table(struct video_client_ctx *client_ctx, buf_addr_table[*num_of_buffers - 1].pmem_fd; buf_addr_table[i].file = buf_addr_table[*num_of_buffers - 1].file; + buf_addr_table[i].buff_len = + buf_addr_table[*num_of_buffers - 1].buff_len; buf_addr_table[i].buff_ion_handle = buf_addr_table[*num_of_buffers - 1].buff_ion_handle; } diff --git a/drivers/video/msm/vidc/common/vcd/vcd_client_sm.c b/drivers/video/msm/vidc/common/vcd/vcd_client_sm.c index 3371ee8d..7698db0c 100644 --- a/drivers/video/msm/vidc/common/vcd/vcd_client_sm.c +++ b/drivers/video/msm/vidc/common/vcd/vcd_client_sm.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2010-2013, Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1621,6 +1621,7 @@ void vcd_do_client_state_transition(struct vcd_clnt_ctxt *cctxt, if (!cctxt || to_state >= VCD_CLIENT_STATE_MAX) { VCD_MSG_ERROR("Bad parameters. cctxt=%p, to_state=%d", cctxt, to_state); + return; } state_ctxt = &cctxt->clnt_state; diff --git a/drivers/video/msm/vidc/common/vcd/vcd_core.h b/drivers/video/msm/vidc/common/vcd/vcd_core.h index 8126a0e3..886956f7 100644 --- a/drivers/video/msm/vidc/common/vcd/vcd_core.h +++ b/drivers/video/msm/vidc/common/vcd/vcd_core.h @@ -13,7 +13,7 @@ #ifndef _VCD_CORE_H_ #define _VCD_CORE_H_ -#include +#include #include #include "vcd_ddl_api.h" diff --git a/drivers/video/msm/vidc/common/vcd/vcd_device_sm.c b/drivers/video/msm/vidc/common/vcd/vcd_device_sm.c index 35c89a4e..deb4e7cd 100644 --- a/drivers/video/msm/vidc/common/vcd/vcd_device_sm.c +++ b/drivers/video/msm/vidc/common/vcd/vcd_device_sm.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2010-2013, Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -36,6 +36,7 @@ void vcd_do_device_state_transition(struct vcd_drv_ctxt *drv_ctxt, if (!drv_ctxt || to_state >= VCD_DEVICE_STATE_MAX) { VCD_MSG_ERROR("Bad parameters. drv_ctxt=%p, to_state=%d", drv_ctxt, to_state); + return; } state_ctxt = &drv_ctxt->dev_state; diff --git a/drivers/video/msm/vidc/common/vcd/vcd_scheduler.c b/drivers/video/msm/vidc/common/vcd/vcd_scheduler.c index 74123987..45307c04 100644 --- a/drivers/video/msm/vidc/common/vcd/vcd_scheduler.c +++ b/drivers/video/msm/vidc/common/vcd/vcd_scheduler.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2010-2013, Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -88,8 +88,13 @@ u32 vcd_sched_add_client(struct vcd_clnt_ctxt *cctxt) prop_hdr.sz = sizeof(cctxt->frm_p_units); rc = ddl_get_property(cctxt->ddl_handle, &prop_hdr, &cctxt->frm_p_units); - VCD_FAILED_RETURN(rc, - "Failed: Get DDL_I_FRAME_PROC_UNITS"); + if (VCD_FAILED(rc)) { + kfree(sched_cctxt); + VCD_MSG_ERROR( + "Failed: Get DDL_I_FRAME_PROC_UNITS"); + return rc; + } + if (cctxt->decoding) { cctxt->frm_rate.fps_numerator = VCD_DEC_INITIAL_FRAME_RATE; @@ -99,8 +104,12 @@ u32 vcd_sched_add_client(struct vcd_clnt_ctxt *cctxt) prop_hdr.sz = sizeof(cctxt->frm_rate); rc = ddl_get_property(cctxt->ddl_handle, &prop_hdr, &cctxt->frm_rate); - VCD_FAILED_RETURN(rc, - "Failed: Get VCD_I_FRAME_RATE"); + if (VCD_FAILED(rc)) { + kfree(sched_cctxt); + VCD_MSG_ERROR( + "Failed: Get VCD_I_FRAME_RATE"); + return rc; + } } if (!cctxt->perf_set_by_client) cctxt->reqd_perf_lvl = cctxt->frm_p_units * diff --git a/drivers/video/msm/vidc/common/vcd/vcd_sub.c b/drivers/video/msm/vidc/common/vcd/vcd_sub.c index f1cbf86b..9ae91d20 100644 --- a/drivers/video/msm/vidc/common/vcd/vcd_sub.c +++ b/drivers/video/msm/vidc/common/vcd/vcd_sub.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2010-2013, Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -92,7 +92,7 @@ static int vcd_pmem_alloc(size_t sz, u8 **kernel_vaddr, u8 **phy_addr, } else { map_buffer->alloc_handle = ion_alloc( cctxt->vcd_ion_client, sz, SZ_4K, - memtype); + memtype, res_trk_get_ion_flags()); if (!map_buffer->alloc_handle) { pr_err("%s() ION alloc failed", __func__); goto bailout; @@ -105,8 +105,7 @@ static int vcd_pmem_alloc(size_t sz, u8 **kernel_vaddr, u8 **phy_addr, } *kernel_vaddr = (u8 *) ion_map_kernel( cctxt->vcd_ion_client, - map_buffer->alloc_handle, - ionflag); + map_buffer->alloc_handle); if (!(*kernel_vaddr)) { pr_err("%s() ION map failed", __func__); goto ion_free_bailout; @@ -120,7 +119,7 @@ static int vcd_pmem_alloc(size_t sz, u8 **kernel_vaddr, u8 **phy_addr, 0, (unsigned long *)&iova, (unsigned long *)&buffer_size, - UNCACHED, 0); + 0 , 0); if (ret || !iova) { pr_err( "%s() ION iommu map failed, ret = %d, iova = 0x%lx", @@ -786,10 +785,11 @@ u32 vcd_free_one_buffer_internal( buf_pool->allocated--; } - if (!cctxt->decoding) { - memset(buf_entry, 0, sizeof(struct vcd_buffer_entry)); - } - + buf_entry->valid = buf_entry->allocated = buf_entry->in_use = 0; + buf_entry->alloc = buf_entry->virtual = buf_entry->physical = NULL; + buf_entry->sz = 0; + memset(&buf_entry->frame, 0, sizeof(struct vcd_frame_data)); + buf_pool->validated--; if (buf_pool->validated == 0) vcd_free_buffer_pool_entries(buf_pool); @@ -2027,12 +2027,12 @@ u32 vcd_handle_input_done( transc = (struct vcd_transc *)frame->vcd_frm.ip_frm_tag; orig_frame = vcd_find_buffer_pool_entry(&cctxt->in_buf_pool, transc->ip_buf_entry->virtual); - + if (!orig_frame) { - VCD_MSG_ERROR("Bad buffer addr: %p", transc->ip_buf_entry->virtual); - return VCD_ERR_FAIL; + rc = VCD_ERR_ILLEGAL_PARM; + VCD_FAILED_RETURN(rc, "Couldn't find buffer"); } - + if ((transc->ip_buf_entry->frame.virtual != frame->vcd_frm.virtual) || !transc->ip_buf_entry->in_use) { @@ -2059,8 +2059,11 @@ u32 vcd_handle_input_done( return VCD_ERR_FAIL; } - if (orig_frame != transc->ip_buf_entry) + if (orig_frame != transc->ip_buf_entry) { + VCD_MSG_HIGH("%s: free duplicate buffer", __func__); kfree(transc->ip_buf_entry); + transc->ip_buf_entry = NULL; + } transc->ip_buf_entry = NULL; transc->input_done = true; @@ -2491,6 +2494,7 @@ u32 vcd_handle_first_fill_output_buffer_for_enc( struct vcd_sequence_hdr seq_hdr; struct vcd_property_sps_pps_for_idr_enable idr_enable; struct vcd_property_codec codec; + u8 *kernel_vaddr = NULL; *handled = true; prop_hdr.prop_id = DDL_I_SEQHDR_PRESENT; prop_hdr.sz = sizeof(seqhdr_present); @@ -2509,7 +2513,26 @@ u32 vcd_handle_first_fill_output_buffer_for_enc( if (!cctxt->secure) { prop_hdr.prop_id = VCD_I_SEQ_HEADER; prop_hdr.sz = sizeof(struct vcd_sequence_hdr); - seq_hdr.sequence_header = frm_entry->virtual; + if (vcd_get_ion_status()) { + kernel_vaddr = (u8 *)ion_map_kernel( + cctxt->vcd_ion_client, + frm_entry->buff_ion_handle); + if (IS_ERR_OR_NULL(kernel_vaddr)) { + VCD_MSG_ERROR("%s: 0x%x = "\ + "ion_map_kernel(0x%x, 0x%x) fail", + __func__, + (u32)kernel_vaddr, + (u32)cctxt->vcd_ion_client, + (u32)frm_entry-> + buff_ion_handle); + return VCD_ERR_FAIL; + } + } else { + VCD_MSG_ERROR("%s: ION status is NULL", + __func__); + return VCD_ERR_FAIL; + } + seq_hdr.sequence_header = kernel_vaddr; seq_hdr.sequence_header_len = frm_entry->alloc_len; rc = ddl_get_property(cctxt->ddl_handle, @@ -2520,6 +2543,8 @@ u32 vcd_handle_first_fill_output_buffer_for_enc( frm_entry->time_stamp = 0; frm_entry->flags |= VCD_FRAME_FLAG_CODECCONFIG; + VCD_MSG_LOW("%s: header len = %u", + __func__, frm_entry->data_len); } else VCD_MSG_ERROR("rc = 0x%x. Failed:" "ddl_get_property: VCD_I_SEQ_HEADER", @@ -2557,6 +2582,16 @@ u32 vcd_handle_first_fill_output_buffer_for_enc( VCD_MSG_ERROR( "rc = 0x%x. Failed: ddl_get_property:VCD_I_CODEC", rc); + if (kernel_vaddr) { + if (!IS_ERR_OR_NULL(frm_entry->buff_ion_handle)) { + ion_map_kernel(cctxt->vcd_ion_client, + frm_entry->buff_ion_handle); + } else { + VCD_MSG_ERROR("%s: Invalid ion_handle (0x%x)", + __func__, (u32)frm_entry->buff_ion_handle); + rc = VCD_ERR_FAIL; + } + } return rc; } @@ -2824,6 +2859,7 @@ u32 vcd_handle_input_frame( struct vcd_frame_data *frm_entry; u32 rc = VCD_S_SUCCESS; u32 eos_handled = false; + u32 duplicate_buffer = false; VCD_MSG_LOW("vcd_handle_input_frame:"); @@ -2899,6 +2935,8 @@ u32 vcd_handle_input_frame( buf_entry->allocated = orig_frame->allocated; buf_entry->in_use = 1; buf_entry->frame = orig_frame->frame; + duplicate_buffer = true; + VCD_MSG_HIGH("%s: duplicate buffer", __func__); } else buf_entry = orig_frame; @@ -2922,6 +2960,10 @@ u32 vcd_handle_input_frame( if (VCD_FAILED(rc) || eos_handled) { VCD_MSG_HIGH("rc = 0x%x, eos_handled = %d", rc, eos_handled); + if ((duplicate_buffer) && (buf_entry)) { + kfree(buf_entry); + buf_entry = NULL; + } return rc; } diff --git a/fs/Kconfig b/fs/Kconfig index 524fe941..3fb1ca3a 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -321,4 +321,5 @@ config EXFAT_VERSION string "Tuxera EXFAT version" depends on EXFAT_FS default "target/htc.d/htc" + endmenu diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index fe28b5e6..56e17330 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -20,14 +20,6 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- */ -/* - * Extents support for EXT4 - * - * TODO: - * - ext4*_error() should be used in some situations - * - analyze all BUG()/BUG_ON(), use -EIO where appropriate - * - smart tree reduction - */ #include #include @@ -44,13 +36,9 @@ #include -/* - * used by extent splitting. - */ -#define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \ - due to ENOSPC */ -#define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */ -#define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */ +#define EXT4_EXT_MAY_ZEROOUT 0x1 +#define EXT4_EXT_MARK_UNINIT1 0x2 +#define EXT4_EXT_MARK_UNINIT2 0x4 static int ext4_split_extent(handle_t *handle, struct inode *inode, @@ -86,29 +74,18 @@ static int ext4_ext_truncate_extend_restart(handle_t *handle, return err; } -/* - * could return: - * - EROFS - * - ENOMEM - */ static int ext4_ext_get_access(handle_t *handle, struct inode *inode, struct ext4_ext_path *path) { if (path->p_bh) { - /* path points to block */ + return ext4_journal_get_write_access(handle, path->p_bh); } - /* path points to leaf/index in inode body */ - /* we use in-core data, no need to protect them */ + + return 0; } -/* - * could return: - * - EROFS - * - ENOMEM - * - EIO - */ #define ext4_ext_dirty(handle, inode, path) \ __ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path)) static int __ext4_ext_dirty(const char *where, unsigned int line, @@ -117,11 +94,11 @@ static int __ext4_ext_dirty(const char *where, unsigned int line, { int err; if (path->p_bh) { - /* path points to block */ + err = __ext4_handle_dirty_metadata(where, line, handle, inode, path->p_bh); } else { - /* path points to leaf/index in inode body */ + err = ext4_mark_inode_dirty(handle, inode); } return err; @@ -135,23 +112,6 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, int depth = path->p_depth; struct ext4_extent *ex; - /* - * Try to predict block placement assuming that we are - * filling in a file which will eventually be - * non-sparse --- i.e., in the case of libbfd writing - * an ELF object sections out-of-order but in a way - * the eventually results in a contiguous object or - * executable file, or some database extending a table - * space file. However, this is actually somewhat - * non-ideal if we are writing a sparse file such as - * qemu or KVM writing a raw image file that is going - * to stay fairly sparse, since it will end up - * fragmenting the file system's free space. Maybe we - * should have some hueristics or some way to allow - * userspace to pass a hint to file system, - * especially if the latter case turns out to be - * common. - */ ex = path[depth].p_ext; if (ex) { ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex); @@ -163,19 +123,14 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, return ext_pblk - (ext_block - block); } - /* it looks like index is empty; - * try to find starting block from index itself */ if (path[depth].p_bh) return path[depth].p_bh->b_blocknr; } - /* OK. use inode's group */ + return ext4_inode_to_goal_block(inode); } -/* - * Allocation for a meta data block - */ static ext4_fsblk_t ext4_ext_new_meta_block(handle_t *handle, struct inode *inode, struct ext4_ext_path *path, @@ -243,11 +198,6 @@ static inline int ext4_ext_space_root_idx(struct inode *inode, int check) return size; } -/* - * Calculate the number of metadata blocks needed - * to allocate @blocks - * Worse case is one block per extent - */ int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock) { struct ext4_inode_info *ei = EXT4_I(inode); @@ -256,14 +206,6 @@ int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock) idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) / sizeof(struct ext4_extent_idx)); - /* - * If the new delayed allocation block is contiguous with the - * previous da block, it can share index blocks with the - * previous block, so we only need to allocate a new index - * block every idxs leaf blocks. At ldxs**2 blocks, we need - * an additional index block, and at ldxs**3 blocks, yet - * another index blocks. - */ if (ei->i_da_metadata_calc_len && ei->i_da_metadata_calc_last_lblock+1 == lblock) { int num = 0; @@ -281,10 +223,6 @@ int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock) return num; } - /* - * In the worst case we need a new set of index blocks at - * every level of the inode's extent tree. - */ ei->i_da_metadata_calc_len = 1; ei->i_da_metadata_calc_last_lblock = lblock; return ext_depth(inode) + 1; @@ -339,7 +277,7 @@ static int ext4_valid_extent_entries(struct inode *inode, entries = le16_to_cpu(eh->eh_entries); if (depth == 0) { - /* leaf entries */ + struct ext4_extent *ext = EXT_FIRST_EXTENT(eh); while (entries) { if (!ext4_valid_extent(inode, ext)) @@ -507,11 +445,6 @@ void ext4_ext_drop_refs(struct ext4_ext_path *path) } } -/* - * ext4_ext_binsearch_idx: - * binary search for the closest index of the given block - * the header must be checked before calling this - */ static void ext4_ext_binsearch_idx(struct inode *inode, struct ext4_ext_path *path, ext4_lblk_t block) @@ -567,11 +500,6 @@ ext4_ext_binsearch_idx(struct inode *inode, } -/* - * ext4_ext_binsearch: - * binary search for closest extent of the given block - * the header must be checked before calling this - */ static void ext4_ext_binsearch(struct inode *inode, struct ext4_ext_path *path, ext4_lblk_t block) @@ -580,10 +508,6 @@ ext4_ext_binsearch(struct inode *inode, struct ext4_extent *r, *l, *m; if (eh->eh_entries == 0) { - /* - * this leaf is empty: - * we get such a leaf in split/add case - */ return; } @@ -654,7 +578,7 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block, eh = ext_inode_hdr(inode); depth = ext_depth(inode); - /* account possible depth increase */ + if (!path) { path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2), GFP_NOFS); @@ -666,7 +590,7 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block, path[0].p_bh = NULL; i = depth; - /* walk through the tree */ + while (i) { int need_to_validate = 0; @@ -688,7 +612,7 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block, put_bh(bh); goto err; } - /* validate the extent entries */ + need_to_validate = 1; } eh = ext_block_hdr(bh); @@ -711,9 +635,9 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block, path[ppos].p_ext = NULL; path[ppos].p_idx = NULL; - /* find extent */ + ext4_ext_binsearch(inode, path + ppos, block); - /* if not an empty leaf */ + if (path[ppos].p_ext) path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext); @@ -728,11 +652,6 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block, return ERR_PTR(-EIO); } -/* - * ext4_ext_insert_index: - * insert new index [@logical;@ptr] into the block at @curp; - * check where to insert: before @curp or after @curp - */ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, struct ext4_ext_path *curp, int logical, ext4_fsblk_t ptr) @@ -761,11 +680,11 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, } if (logical > le32_to_cpu(curp->p_idx->ei_block)) { - /* insert after */ + ext_debug("insert new index %d after: %llu\n", logical, ptr); ix = curp->p_idx + 1; } else { - /* insert before */ + ext_debug("insert new index %d before: %llu\n", logical, ptr); ix = curp->p_idx; } @@ -799,16 +718,6 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, return err; } -/* - * ext4_ext_split: - * inserts new subtree into the path, using free index entry - * at depth @at: - * - allocates all needed blocks (new leaf and all intermediate index blocks) - * - makes decision where to split - * - moves remaining extents and index entries (right to the split point) - * into the newly allocated blocks - * - initializes subtree - */ static int ext4_ext_split(handle_t *handle, struct inode *inode, unsigned int flags, struct ext4_ext_path *path, @@ -821,14 +730,12 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, int i = at, k, m, a; ext4_fsblk_t newblock, oldblock; __le32 border; - ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */ + ext4_fsblk_t *ablocks = NULL; int err = 0; - /* make decision: where to split? */ - /* FIXME: now decision is simplest: at current extent */ + + - /* if current leaf will be split, then we should use - * border from split point */ if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) { EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!"); return -EIO; @@ -845,23 +752,12 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, le32_to_cpu(border)); } - /* - * If error occurs, then we break processing - * and mark filesystem read-only. index won't - * be inserted and tree will be in consistent - * state. Next mount will repair buffers too. - */ - /* - * Get array to track all allocated blocks. - * We need this to handle errors and free blocks - * upon them. - */ ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS); if (!ablocks) return -ENOMEM; - /* allocate all needed blocks */ + ext_debug("allocate %d blocks for indexes/leaf\n", depth - at); for (a = 0; a < depth - at; a++) { newblock = ext4_ext_new_meta_block(handle, inode, path, @@ -871,7 +767,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, ablocks[a] = newblock; } - /* initialize new leaf */ + newblock = ablocks[--a]; if (unlikely(newblock == 0)) { EXT4_ERROR_INODE(inode, "newblock == 0!"); @@ -895,7 +791,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, neh->eh_magic = EXT4_EXT_MAGIC; neh->eh_depth = 0; - /* move remainder of path[depth] to the new leaf */ + if (unlikely(path[depth].p_hdr->eh_entries != path[depth].p_hdr->eh_max)) { EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!", @@ -904,7 +800,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, err = -EIO; goto cleanup; } - /* start copy from next extent */ + m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++; ext4_ext_show_move(inode, path, newblock, depth); if (m) { @@ -923,7 +819,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, brelse(bh); bh = NULL; - /* correct old leaf */ + if (m) { err = ext4_ext_get_access(handle, inode, path + depth); if (err) @@ -935,7 +831,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, } - /* create intermediate indexes */ + k = depth - at - 1; if (unlikely(k < 0)) { EXT4_ERROR_INODE(inode, "k %d < 0!", k); @@ -944,8 +840,8 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, } if (k) ext_debug("create %d intermediate indices\n", k); - /* insert new index into current index block */ - /* current depth stored in i var */ + + i = depth - 1; while (k--) { oldblock = newblock; @@ -973,7 +869,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, ext_debug("int.index at %d (block %llu): %u -> %llu\n", i, newblock, le32_to_cpu(border), oldblock); - /* move remainder of path[i] to the new index block */ + if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) != EXT_LAST_INDEX(path[i].p_hdr))) { EXT4_ERROR_INODE(inode, @@ -982,7 +878,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, err = -EIO; goto cleanup; } - /* start copy indexes */ + m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++; ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx, EXT_MAX_INDEX(path[i].p_hdr)); @@ -1001,7 +897,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, brelse(bh); bh = NULL; - /* correct old index */ + if (m) { err = ext4_ext_get_access(handle, inode, path + i); if (err) @@ -1015,7 +911,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, i--; } - /* insert new index */ + err = ext4_ext_insert_index(handle, inode, path + at, le32_to_cpu(border), newblock); @@ -1027,7 +923,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, } if (err) { - /* free all allocated blocks in error case */ + for (i = 0; i < depth; i++) { if (!ablocks[i]) continue; @@ -1040,14 +936,6 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, return err; } -/* - * ext4_ext_grow_indepth: - * implements tree growing procedure: - * - allocates new block - * - moves top-level data (index block or leaf) into the new block - * - initializes new top-level, creating index that points to the - * just created block - */ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, unsigned int flags, struct ext4_extent *newext) @@ -1076,14 +964,12 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, goto out; } - /* move top-level index/leaf into new block */ + memmove(bh->b_data, EXT4_I(inode)->i_data, sizeof(EXT4_I(inode)->i_data)); - /* set size of new block */ + neh = ext_block_hdr(bh); - /* old root could have indexes or leaves - * so calculate e_max right way */ if (ext_depth(inode)) neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); else @@ -1096,12 +982,12 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, if (err) goto out; - /* Update top-level index: num,max,pointer */ + neh = ext_inode_hdr(inode); neh->eh_entries = cpu_to_le16(1); ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock); if (neh->eh_depth == 0) { - /* Root extent block becomes index block */ + neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0)); EXT_FIRST_INDEX(neh)->ei_block = EXT_FIRST_EXTENT(neh)->ee_block; @@ -1119,11 +1005,6 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, return err; } -/* - * ext4_ext_create_new_leaf: - * finds empty index and adds new leaf. - * if no free index is found, then it requests in-depth growing. - */ static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, unsigned int flags, struct ext4_ext_path *path, @@ -1135,23 +1016,19 @@ static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, repeat: i = depth = ext_depth(inode); - /* walk up to the tree and look for free index entry */ + curp = path + depth; while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) { i--; curp--; } - /* we use already allocated block for index block, - * so subsequent data blocks should be contiguous */ if (EXT_HAS_FREE_INDEX(curp)) { - /* if we found index with free entry, then use that - * entry: create all needed subtree and add new leaf */ err = ext4_ext_split(handle, inode, flags, path, newext, i); if (err) goto out; - /* refill path */ + ext4_ext_drop_refs(path); path = ext4_ext_find_extent(inode, (ext4_lblk_t)le32_to_cpu(newext->ee_block), @@ -1159,12 +1036,12 @@ static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, if (IS_ERR(path)) err = PTR_ERR(path); } else { - /* tree is full, time to grow in depth */ + err = ext4_ext_grow_indepth(handle, inode, flags, newext); if (err) goto out; - /* refill path */ + ext4_ext_drop_refs(path); path = ext4_ext_find_extent(inode, (ext4_lblk_t)le32_to_cpu(newext->ee_block), @@ -1174,13 +1051,9 @@ static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, goto out; } - /* - * only first (depth 0 -> 1) produces free space; - * in all other cases we have to split the grown tree - */ depth = ext_depth(inode); if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) { - /* now we need to split */ + goto repeat; } } @@ -1189,13 +1062,6 @@ static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, return err; } -/* - * search the closest allocated block to the left for *logical - * and returns it at @logical + it's physical address at @phys - * if *logical is the smallest allocated block, the function - * returns 0 at @phys - * return value contains 0 (success) or error code - */ static int ext4_ext_search_left(struct inode *inode, struct ext4_ext_path *path, ext4_lblk_t *logical, ext4_fsblk_t *phys) @@ -1214,9 +1080,6 @@ static int ext4_ext_search_left(struct inode *inode, if (depth == 0 && path->p_ext == NULL) return 0; - /* usually extent in the path covers blocks smaller - * then *logical, but it can be that extent is the - * first one in the file */ ex = path[depth].p_ext; ee_len = ext4_ext_get_actual_len(ex); @@ -1254,13 +1117,6 @@ static int ext4_ext_search_left(struct inode *inode, return 0; } -/* - * search the closest allocated block to the right for *logical - * and returns it at @logical + it's physical address at @phys - * if *logical is the largest allocated block, the function - * returns 0 at @phys - * return value contains 0 (success) or error code - */ static int ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path, ext4_lblk_t *logical, ext4_fsblk_t *phys, @@ -1271,7 +1127,7 @@ static int ext4_ext_search_right(struct inode *inode, struct ext4_extent_idx *ix; struct ext4_extent *ex; ext4_fsblk_t block; - int depth; /* Note, NOT eh_depth; depth from top of tree */ + int depth; int ee_len; if (unlikely(path == NULL)) { @@ -1284,9 +1140,6 @@ static int ext4_ext_search_right(struct inode *inode, if (depth == 0 && path->p_ext == NULL) return 0; - /* usually extent in the path covers blocks smaller - * then *logical, but it can be that extent is the - * first one in the file */ ex = path[depth].p_ext; ee_len = ext4_ext_get_actual_len(ex); @@ -1317,25 +1170,22 @@ static int ext4_ext_search_right(struct inode *inode, } if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) { - /* next allocated block in this leaf */ + ex++; goto found_extent; } - /* go up and search for index to the right */ + while (--depth >= 0) { ix = path[depth].p_idx; if (ix != EXT_LAST_INDEX(path[depth].p_hdr)) goto got_index; } - /* we've gone up to the root and found no index to the right */ + return 0; got_index: - /* we've found index to the right, let's - * follow it and find the closest allocated - * block to the right */ ix++; block = ext4_idx_pblock(ix); while (++depth < path->p_depth) { @@ -1343,7 +1193,7 @@ static int ext4_ext_search_right(struct inode *inode, if (bh == NULL) return -EIO; eh = ext_block_hdr(bh); - /* subtract from p_depth to get proper eh_depth */ + if (ext4_ext_check(inode, eh, path->p_depth - depth)) { put_bh(bh); return -EIO; @@ -1371,13 +1221,6 @@ static int ext4_ext_search_right(struct inode *inode, return 0; } -/* - * ext4_ext_next_allocated_block: - * returns allocated block in subsequent extent or EXT_MAX_BLOCKS. - * NOTE: it considers block number from index entry as - * allocated block. Thus, index entries have to be consistent - * with leaves. - */ static ext4_lblk_t ext4_ext_next_allocated_block(struct ext4_ext_path *path) { @@ -1391,13 +1234,13 @@ ext4_ext_next_allocated_block(struct ext4_ext_path *path) while (depth >= 0) { if (depth == path->p_depth) { - /* leaf */ + if (path[depth].p_ext && path[depth].p_ext != EXT_LAST_EXTENT(path[depth].p_hdr)) return le32_to_cpu(path[depth].p_ext[1].ee_block); } else { - /* index */ + if (path[depth].p_idx != EXT_LAST_INDEX(path[depth].p_hdr)) return le32_to_cpu(path[depth].p_idx[1].ei_block); @@ -1408,10 +1251,6 @@ ext4_ext_next_allocated_block(struct ext4_ext_path *path) return EXT_MAX_BLOCKS; } -/* - * ext4_ext_next_leaf_block: - * returns first allocated block from next leaf or EXT_MAX_BLOCKS - */ static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path) { int depth; @@ -1419,11 +1258,11 @@ static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path) BUG_ON(path == NULL); depth = path->p_depth; - /* zero-tree has no leaf blocks at all */ + if (depth == 0) return EXT_MAX_BLOCKS; - /* go to index block */ + depth--; while (depth >= 0) { @@ -1437,12 +1276,6 @@ static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path) return EXT_MAX_BLOCKS; } -/* - * ext4_ext_correct_indexes: - * if leaf gets modified and modified extent is first in the leaf, - * then we have to correct all indexes above. - * TODO: do we need to correct tree in all cases? - */ static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode, struct ext4_ext_path *path) { @@ -1462,18 +1295,15 @@ static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode, } if (depth == 0) { - /* there is no tree at all */ + return 0; } if (ex != EXT_FIRST_EXTENT(eh)) { - /* we correct tree if first leaf got modified only */ + return 0; } - /* - * TODO: we need correction if border is smaller than current one - */ k = depth - 1; border = path[depth].p_ext->ee_block; err = ext4_ext_get_access(handle, inode, path + k); @@ -1485,7 +1315,7 @@ static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode, return err; while (k--) { - /* change all left-side indexes */ + if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr)) break; err = ext4_ext_get_access(handle, inode, path + k); @@ -1506,10 +1336,6 @@ ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1, { unsigned short ext1_ee_len, ext2_ee_len, max_len; - /* - * Make sure that either both extents are uninitialized, or - * both are _not_. - */ if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2)) return 0; @@ -1525,11 +1351,6 @@ ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1, le32_to_cpu(ex2->ee_block)) return 0; - /* - * To allow future support for preallocated extents to be added - * as an RO_COMPAT feature, refuse to merge to extents if - * this can result in the top bit of ee_len being set. - */ if (ext1_ee_len + ext2_ee_len > max_len) return 0; #ifdef AGGRESSIVE_TEST @@ -1542,13 +1363,6 @@ ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1, return 0; } -/* - * This function tries to merge the "ex" extent to the next extent in the tree. - * It always tries to merge towards right. If you want to merge towards - * left, pass "ex - 1" as argument instead of "ex". - * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns - * 1 if they got merged. - */ static int ext4_ext_try_to_merge_right(struct inode *inode, struct ext4_ext_path *path, struct ext4_extent *ex) @@ -1565,7 +1379,7 @@ static int ext4_ext_try_to_merge_right(struct inode *inode, while (ex < EXT_LAST_EXTENT(eh)) { if (!ext4_can_extents_be_merged(inode, ex, ex + 1)) break; - /* merge with next extent! */ + if (ext4_ext_is_uninitialized(ex)) uninitialized = 1; ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) @@ -1588,10 +1402,6 @@ static int ext4_ext_try_to_merge_right(struct inode *inode, return merge_done; } -/* - * This function tries to merge the @ex extent to neighbours in the tree. - * return 1 if merge left else 0. - */ static int ext4_ext_try_to_merge(struct inode *inode, struct ext4_ext_path *path, struct ext4_extent *ex) { @@ -1613,14 +1423,6 @@ static int ext4_ext_try_to_merge(struct inode *inode, return ret; } -/* - * check if a portion of the "newext" extent overlaps with an - * existing extent. - * - * If there is an overlap discovered, it updates the length of the newext - * such that there will be no overlap, and then returns 1. - * If there is no overlap found, it returns 0. - */ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, struct inode *inode, struct ext4_extent *newext, @@ -1638,10 +1440,6 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, b2 = le32_to_cpu(path[depth].p_ext->ee_block); b2 &= ~(sbi->s_cluster_ratio - 1); - /* - * get the next allocated block if the extent in the path - * is before the requested block(s) - */ if (b2 < b1) { b2 = ext4_ext_next_allocated_block(path); if (b2 == EXT_MAX_BLOCKS) @@ -1649,14 +1447,14 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, b2 &= ~(sbi->s_cluster_ratio - 1); } - /* check for wrap through zero on extent logical start block*/ + if (b1 + len1 < b1) { len1 = EXT_MAX_BLOCKS - b1; newext->ee_len = cpu_to_le16(len1); ret = 1; } - /* check for overlap */ + if (b1 + len1 > b2) { newext->ee_len = cpu_to_le16(b2 - b1); ret = 1; @@ -1665,19 +1463,13 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, return ret; } -/* - * ext4_ext_insert_extent: - * tries to merge requsted extent into the existing extent or - * inserts requested extent as new one into the tree, - * creating new leaf in the no-space case. - */ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, struct ext4_ext_path *path, struct ext4_extent *newext, int flag) { struct ext4_extent_header *eh; struct ext4_extent *ex, *fex; - struct ext4_extent *nearex; /* nearest extent */ + struct ext4_extent *nearex; struct ext4_ext_path *npath = NULL; int depth, len, err; ext4_lblk_t next; @@ -1695,7 +1487,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, return -EIO; } - /* try to insert block into found extent and return */ + if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO) && ext4_can_extents_be_merged(inode, ex, newext)) { ext_debug("append [%d]%d block to %u:[%d]%d (from %llu)\n", @@ -1709,11 +1501,6 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, if (err) return err; - /* - * ext4_can_extents_be_merged should have checked that either - * both extents are uninitialized, or both aren't. Thus we - * need to check only one of them here. - */ if (ext4_ext_is_uninitialized(ex)) uninitialized = 1; ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) @@ -1730,7 +1517,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) goto has_space; - /* probably next leaf has space for us? */ + fex = EXT_LAST_EXTENT(eh); next = EXT_MAX_BLOCKS; if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)) @@ -1753,10 +1540,6 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); } - /* - * There is no free space in the found leaf. - * We're gonna add a new leaf in the tree. - */ if (flag & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) flags = EXT4_MB_USE_ROOT_BLOCKS; err = ext4_ext_create_new_leaf(handle, inode, flags, path, newext); @@ -1773,7 +1556,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, goto cleanup; if (!nearex) { - /* there is no extent in this leaf, create first one */ + ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n", le32_to_cpu(newext->ee_block), ext4_ext_pblock(newext), @@ -1783,7 +1566,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, } else { if (le32_to_cpu(newext->ee_block) > le32_to_cpu(nearex->ee_block)) { - /* Insert after */ + ext_debug("insert %u:%llu:[%d]%d before: " "nearest %p\n", le32_to_cpu(newext->ee_block), @@ -1793,7 +1576,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, nearex); nearex++; } else { - /* Insert before */ + BUG_ON(newext->ee_block == nearex->ee_block); ext_debug("insert %u:%llu:[%d]%d after: " "nearest %p\n", @@ -1824,13 +1607,13 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, nearex->ee_len = newext->ee_len; merge: - /* try to merge extents to the right */ + if (!(flag & EXT4_GET_BLOCKS_PRE_IO)) ext4_ext_try_to_merge(inode, path, nearex); - /* try to merge extents to the left */ + - /* time to correct all indexes above */ + err = ext4_ext_correct_indexes(handle, inode, path); if (err) goto cleanup; @@ -1862,7 +1645,7 @@ static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block, while (block < last && block != EXT_MAX_BLOCKS) { num = last - block; - /* find extent for this block */ + down_read(&EXT4_I(inode)->i_data_sem); path = ext4_ext_find_extent(inode, block, path); up_read(&EXT4_I(inode)->i_data_sem); @@ -1883,28 +1666,22 @@ static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block, exists = 0; if (!ex) { - /* there is no extent yet, so try to allocate - * all requested space */ start = block; end = block + num; } else if (le32_to_cpu(ex->ee_block) > block) { - /* need to allocate space before found extent */ + start = block; end = le32_to_cpu(ex->ee_block); if (block + num < end) end = block + num; } else if (block >= le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex)) { - /* need to allocate space after found extent */ + start = block; end = block + num; if (end >= next) end = next; } else if (block >= le32_to_cpu(ex->ee_block)) { - /* - * some part of requested space is covered - * by found extent - */ start = block; end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex); @@ -1945,7 +1722,7 @@ static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block, } if (ext_depth(inode) != depth) { - /* depth was changed. we have to realloc path */ + kfree(path); path = NULL; } @@ -1980,11 +1757,6 @@ ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block, spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); } -/* - * ext4_ext_put_gap_in_cache: - * calculate boundaries of the gap that the requested block fits into - * and cache this gap - */ static void ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path, ext4_lblk_t block) @@ -1996,7 +1768,7 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path, ex = path[depth].p_ext; if (ex == NULL) { - /* there is no extent yet, so gap is [0;-] */ + lblock = 0; len = EXT_MAX_BLOCKS; ext_debug("cache gap(whole file):"); @@ -2029,36 +1801,17 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path, ext4_ext_put_in_cache(inode, lblock, len, 0); } -/* - * ext4_ext_check_cache() - * Checks to see if the given block is in the cache. - * If it is, the cached extent is stored in the given - * cache extent pointer. If the cached extent is a hole, - * this routine should be used instead of - * ext4_ext_in_cache if the calling function needs to - * know the size of the hole. - * - * @inode: The files inode - * @block: The block to look for in the cache - * @ex: Pointer where the cached extent will be stored - * if it contains block - * - * Return 0 if cache is invalid; 1 if the cache is valid - */ static int ext4_ext_check_cache(struct inode *inode, ext4_lblk_t block, struct ext4_ext_cache *ex){ struct ext4_ext_cache *cex; struct ext4_sb_info *sbi; int ret = 0; - /* - * We borrow i_block_reservation_lock to protect i_cached_extent - */ spin_lock(&EXT4_I(inode)->i_block_reservation_lock); cex = &EXT4_I(inode)->i_cached_extent; sbi = EXT4_SB(inode->i_sb); - /* has cache valid data? */ + if (cex->ec_len == 0) goto errout; @@ -2075,19 +1828,6 @@ static int ext4_ext_check_cache(struct inode *inode, ext4_lblk_t block, return ret; } -/* - * ext4_ext_in_cache() - * Checks to see if the given block is in the cache. - * If it is, the cached extent is stored in the given - * extent pointer. - * - * @inode: The files inode - * @block: The block to look for in the cache - * @ex: Pointer where the cached extent will be stored - * if it contains block - * - * Return 0 if cache is invalid; 1 if the cache is valid - */ static int ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block, struct ext4_extent *ex) @@ -2106,18 +1846,15 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block, } -/* - * ext4_ext_rm_idx: - * removes index from the index block. - */ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, - struct ext4_ext_path *path) + struct ext4_ext_path *path, int depth) { int err; ext4_fsblk_t leaf; - /* free index block */ - path--; + + depth--; + path = path + depth; leaf = ext4_idx_pblock(path->p_idx); if (unlikely(path->p_hdr->eh_entries == 0)) { EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0"); @@ -2142,16 +1879,22 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, ext4_free_blocks(handle, inode, NULL, leaf, 1, EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); + + while (--depth >= 0) { + if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr)) + break; + path--; + err = ext4_ext_get_access(handle, inode, path); + if (err) + break; + path->p_idx->ei_block = (path+1)->p_idx->ei_block; + err = ext4_ext_dirty(handle, inode, path); + if (err) + break; + } return err; } -/* - * ext4_ext_calc_credits_for_single_extent: - * This routine returns max. credits that needed to insert an extent - * to the extent tree. - * When pass the actual path, the caller should calculate credits - * under i_data_sem. - */ int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks, struct ext4_ext_path *path) { @@ -2159,19 +1902,11 @@ int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks, int depth = ext_depth(inode); int ret = 0; - /* probably there is space in leaf? */ + if (le16_to_cpu(path[depth].p_hdr->eh_entries) < le16_to_cpu(path[depth].p_hdr->eh_max)) { - /* - * There are some space in the leaf tree, no - * need to account for leaf block credit - * - * bitmaps and block group descriptor blocks - * and other metadata blocks still need to be - * accounted. - */ - /* 1 bitmap, 1 block group descriptor */ + ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb); return ret; } @@ -2180,17 +1915,6 @@ int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks, return ext4_chunk_trans_blocks(inode, nrblocks); } -/* - * How many index/leaf blocks need to change/allocate to modify nrblocks? - * - * if nrblocks are fit in a single extent (chunk flag is 1), then - * in the worse case, each tree level index/leaf need to be changed - * if the tree split due to insert a new extent, then the old tree - * index/leaf need to be updated too - * - * If the nrblocks are discontiguous, they could cause - * the whole tree split more than once, but this is really rare. - */ int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) { int index; @@ -2216,21 +1940,9 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode, if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) flags |= EXT4_FREE_BLOCKS_METADATA; - /* - * For bigalloc file systems, we never free a partial cluster - * at the beginning of the extent. Instead, we make a note - * that we tried freeing the cluster, and check to see if we - * need to free it on a subsequent call to ext4_remove_blocks, - * or at the end of the ext4_truncate() operation. - */ flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER; trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster); - /* - * If we have a partial cluster, and it's different from the - * cluster of the last block, we need to explicitly free the - * partial cluster here. - */ pblk = ext4_ext_pblock(ex) + ee_len - 1; if (*partial_cluster && (EXT4_B2C(sbi, pblk) != *partial_cluster)) { ext4_free_blocks(handle, inode, NULL, @@ -2256,21 +1968,13 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode, #endif if (from >= le32_to_cpu(ex->ee_block) && to == le32_to_cpu(ex->ee_block) + ee_len - 1) { - /* tail removal */ + ext4_lblk_t num; num = le32_to_cpu(ex->ee_block) + ee_len - from; pblk = ext4_ext_pblock(ex) + ee_len - num; ext_debug("free last %u blocks starting %llu\n", num, pblk); ext4_free_blocks(handle, inode, NULL, pblk, num, flags); - /* - * If the block range to be freed didn't start at the - * beginning of a cluster, and we removed the entire - * extent, save the partial cluster here, since we - * might need to delete if we determine that the - * truncate operation has removed all of the blocks in - * the cluster. - */ if (pblk & (sbi->s_cluster_ratio - 1) && (ee_len == num)) *partial_cluster = EXT4_B2C(sbi, pblk); @@ -2278,7 +1982,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode, *partial_cluster = 0; } else if (from == le32_to_cpu(ex->ee_block) && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) { - /* head removal */ + ext4_lblk_t num; ext4_fsblk_t start; @@ -2297,17 +2001,6 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode, } -/* - * ext4_ext_rm_leaf() Removes the extents associated with the - * blocks appearing between "start" and "end", and splits the extents - * if "start" and "end" appear in the same extent - * - * @handle: The journal handle - * @inode: The files inode - * @path: The path to the leaf - * @start: The first block to remove - * @end: The last block to remove - */ static int ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, struct ext4_ext_path *path, ext4_fsblk_t *partial_cluster, @@ -2324,7 +2017,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, unsigned uninitialized = 0; struct ext4_extent *ex; - /* the header must be checked already in ext4_ext_remove_space() */ + ext_debug("truncate since %u in leaf to %u\n", start, end); if (!path[depth].p_hdr) path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); @@ -2333,7 +2026,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); return -EIO; } - /* find where to start removing */ + ex = EXT_LAST_EXTENT(eh); ex_ee_block = le32_to_cpu(ex->ee_block); @@ -2359,7 +2052,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, ext_debug(" border %u:%u\n", a, b); - /* If this extent is beyond the end of the hole, skip it */ + if (end < ex_ee_block) { ex--; ex_ee_block = le32_to_cpu(ex->ee_block); @@ -2374,18 +2067,12 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, err = -EIO; goto out; } else if (a != ex_ee_block) { - /* remove tail of the extent */ + num = a - ex_ee_block; } else { - /* remove whole extent: excellent! */ + num = 0; } - /* - * 3 for leaf, sb, and inode plus 2 (bmap and group - * descriptor) for each block group; assume two block - * groups plus ex_ee_len/blocks_per_block_group for - * the worst case - */ credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb)); if (ex == EXT_FIRST_EXTENT(eh)) { correct_index = 1; @@ -2407,31 +2094,18 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, goto out; if (num == 0) - /* this extent is removed; mark slot entirely unused */ + ext4_ext_store_pblock(ex, 0); ex->ee_len = cpu_to_le16(num); - /* - * Do not mark uninitialized if all the blocks in the - * extent have been removed. - */ if (uninitialized && num) ext4_ext_mark_uninitialized(ex); - /* - * If the extent was completely released, - * we need to remove it from the leaf - */ if (num == 0) { if (end != EXT_MAX_BLOCKS - 1) { - /* - * For hole punching, we need to scoot all the - * extents up when an extent is removed so that - * we dont have blank extents in the middle - */ memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) * sizeof(struct ext4_extent)); - /* Now get rid of the one at the end */ + memset(EXT_LAST_EXTENT(eh), 0, sizeof(struct ext4_extent)); } @@ -2453,11 +2127,6 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, if (correct_index && eh->eh_entries) err = ext4_ext_correct_indexes(handle, inode, path); - /* - * If there is still a entry in the leaf node, check to see if - * it references the partial cluster. This is the only place - * where it could; if it doesn't, we can free the cluster. - */ if (*partial_cluster && ex >= EXT_FIRST_EXTENT(eh) && (EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) != *partial_cluster)) { @@ -2472,19 +2141,13 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, *partial_cluster = 0; } - /* if this leaf is free, then we should - * remove it from index block above */ if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL) - err = ext4_ext_rm_idx(handle, inode, path + depth); + err = ext4_ext_rm_idx(handle, inode, path, depth); out: return err; } -/* - * ext4_ext_more_to_rm: - * returns 1 if current index has to be freed (even partial) - */ static int ext4_ext_more_to_rm(struct ext4_ext_path *path) { @@ -2493,10 +2156,6 @@ ext4_ext_more_to_rm(struct ext4_ext_path *path) if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr)) return 0; - /* - * if truncate on deeper level happened, it wasn't partial, - * so we have to consider current index for truncation - */ if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block) return 0; return 1; @@ -2514,7 +2173,7 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, ext_debug("truncate since %u to %u\n", start, end); - /* probably first extent we're gonna free will be last in block */ + handle = ext4_journal_start(inode, depth + 1); if (IS_ERR(handle)) return PTR_ERR(handle); @@ -2524,18 +2183,11 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, trace_ext4_ext_remove_space(inode, start, depth); - /* - * Check if we are removing extents inside the extent tree. If that - * is the case, we are going to punch a hole inside the extent tree - * so we have to check whether we need to split the extent covering - * the last block to remove so we can easily remove the part of it - * in ext4_ext_rm_leaf(). - */ if (end < EXT_MAX_BLOCKS - 1) { struct ext4_extent *ex; ext4_lblk_t ee_block; - /* find extent for this block */ + path = ext4_ext_find_extent(inode, end, NULL); if (IS_ERR(path)) { ext4_journal_stop(handle); @@ -2552,12 +2204,6 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, ee_block = le32_to_cpu(ex->ee_block); - /* - * See if the last block is inside the extent, if so split - * the extent at 'end' block so we can easily remove the - * tail of the first part of the split extent in - * ext4_ext_rm_leaf(). - */ if (end >= ee_block && end < ee_block + ext4_ext_get_actual_len(ex) - 1) { int split_flag = 0; @@ -2566,10 +2212,6 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, split_flag = EXT4_EXT_MARK_UNINIT1 | EXT4_EXT_MARK_UNINIT2; - /* - * Split the extent in two so that 'end' is the last - * block in the first new extent - */ err = ext4_split_extent_at(handle, inode, path, end + 1, split_flag, EXT4_GET_BLOCKS_PRE_IO | @@ -2581,10 +2223,6 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, } cont: - /* - * We start scanning from right side, freeing all the blocks - * after i_size and walking into the tree depth-wise. - */ depth = ext_depth(inode); if (path) { int k = i = depth; @@ -2611,32 +2249,32 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, while (i >= 0 && err == 0) { if (i == depth) { - /* this is leaf block */ + err = ext4_ext_rm_leaf(handle, inode, path, &partial_cluster, start, end); - /* root level has p_bh == NULL, brelse() eats this */ + brelse(path[i].p_bh); path[i].p_bh = NULL; i--; continue; } - /* this is index block */ + if (!path[i].p_hdr) { ext_debug("initialize header\n"); path[i].p_hdr = ext_block_hdr(path[i].p_bh); } if (!path[i].p_idx) { - /* this level hasn't been touched yet */ + path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr); path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1; ext_debug("init index ptr: hdr 0x%p, num %d\n", path[i].p_hdr, le16_to_cpu(path[i].p_hdr->eh_entries)); } else { - /* we were already here, see at next index */ + path[i].p_idx--; } @@ -2645,13 +2283,13 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, path[i].p_idx); if (ext4_ext_more_to_rm(path + i)) { struct buffer_head *bh; - /* go to the next level */ + ext_debug("move to level %d (block %llu)\n", i + 1, ext4_idx_pblock(path[i].p_idx)); memset(path + i + 1, 0, sizeof(*path)); bh = sb_bread(sb, ext4_idx_pblock(path[i].p_idx)); if (!bh) { - /* should we reset i_size? */ + err = -EIO; break; } @@ -2666,19 +2304,14 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, } path[i + 1].p_bh = bh; - /* save actual number of indexes since this - * number is changed at the next iteration */ path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries); i++; } else { - /* we finished processing this index, go up */ + if (path[i].p_hdr->eh_entries == 0 && i > 0) { - /* index is empty, remove it; - * handle must be already prepared by the - * truncatei_leaf() */ - err = ext4_ext_rm_idx(handle, inode, path + i); + err = ext4_ext_rm_idx(handle, inode, path, i); } - /* root level has p_bh == NULL, brelse() eats this */ + brelse(path[i].p_bh); path[i].p_bh = NULL; i--; @@ -2689,9 +2322,6 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, trace_ext4_ext_remove_space_done(inode, start, depth, partial_cluster, path->p_hdr->eh_entries); - /* If we still have something in the partial cluster and we have removed - * even the first extent, then we should free the blocks in the partial - * cluster as well. */ if (partial_cluster && path->p_hdr->eh_entries == 0) { int flags = EXT4_FREE_BLOCKS_FORGET; @@ -2704,12 +2334,8 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, partial_cluster = 0; } - /* TODO: flexible tree reduction should be here */ + if (path->p_hdr->eh_entries == 0) { - /* - * truncate to zero freed all the tree, - * so we need to correct eh_depth - */ err = ext4_ext_get_access(handle, inode, path); if (err == 0) { ext_inode_hdr(inode)->eh_depth = 0; @@ -2730,14 +2356,8 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, return err; } -/* - * called at mount time - */ void ext4_ext_init(struct super_block *sb) { - /* - * possible initialization would be here - */ if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) { #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS) @@ -2761,9 +2381,6 @@ void ext4_ext_init(struct super_block *sb) } } -/* - * called at umount time - */ void ext4_ext_release(struct super_block *sb) { if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) @@ -2781,7 +2398,6 @@ void ext4_ext_release(struct super_block *sb) #endif } -/* FIXME!! we need to try to merge to left or right after zero-out */ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) { ext4_fsblk_t ee_pblock; @@ -2798,27 +2414,6 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) return ret; } -/* - * ext4_split_extent_at() splits an extent at given block. - * - * @handle: the journal handle - * @inode: the file inode - * @path: the path to the extent - * @split: the logical block where the extent is splitted. - * @split_flags: indicates if the extent could be zeroout if split fails, and - * the states(init or uninit) of new extents. - * @flags: flags used to insert new extent to extent tree. - * - * - * Splits extent [a, b] into two extents [a, @split) and [@split, b], states - * of which are deterimined by split_flag. - * - * There are two cases: - * a> the extent are splitted into two extent. - * b> split is not needed, and just mark the extent. - * - * return 0 on success. - */ static int ext4_split_extent_at(handle_t *handle, struct inode *inode, struct ext4_ext_path *path, @@ -2851,11 +2446,6 @@ static int ext4_split_extent_at(handle_t *handle, goto out; if (split == ee_block) { - /* - * case b: block @split is the block that the extent begins with - * then we just change the state of the extent, and splitting - * is not needed. - */ if (split_flag & EXT4_EXT_MARK_UNINIT2) ext4_ext_mark_uninitialized(ex); else @@ -2868,16 +2458,12 @@ static int ext4_split_extent_at(handle_t *handle, goto out; } - /* case a */ + memcpy(&orig_ex, ex, sizeof(orig_ex)); ex->ee_len = cpu_to_le16(split - ee_block); if (split_flag & EXT4_EXT_MARK_UNINIT1) ext4_ext_mark_uninitialized(ex); - /* - * path may lead to new leaf, not to original leaf any more - * after ext4_ext_insert_extent() returns, - */ err = ext4_ext_dirty(handle, inode, path + depth); if (err) goto fix_extent_len; @@ -2894,7 +2480,7 @@ static int ext4_split_extent_at(handle_t *handle, err = ext4_ext_zeroout(inode, &orig_ex); if (err) goto fix_extent_len; - /* update the extent length and mark as initialized */ + ex->ee_len = cpu_to_le16(ee_len); ext4_ext_try_to_merge(inode, path, ex); err = ext4_ext_dirty(handle, inode, path + depth); @@ -2912,17 +2498,6 @@ static int ext4_split_extent_at(handle_t *handle, return err; } -/* - * ext4_split_extents() splits an extent and mark extent which is covered - * by @map as split_flags indicates - * - * It may result in splitting the extent into multiple extents (upto three) - * There are three possibilities: - * a> There is no split required - * b> Splits in two extents: Split is happening at either end of the extent - * c> Splits in three extents: Somone is splitting in middle of the extent - * - */ static int ext4_split_extent(handle_t *handle, struct inode *inode, struct ext4_ext_path *path, @@ -2980,26 +2555,6 @@ static int ext4_split_extent(handle_t *handle, } #define EXT4_EXT_ZERO_LEN 7 -/* - * This function is called by ext4_ext_map_blocks() if someone tries to write - * to an uninitialized extent. It may result in splitting the uninitialized - * extent into multiple extents (up to three - one initialized and two - * uninitialized). - * There are three possibilities: - * a> There is no split required: Entire extent should be initialized - * b> Splits in two extents: Write is happening at either end of the extent - * c> Splits in three extents: Somone is writing in middle of the extent - * - * Pre-conditions: - * - The extent pointed to by 'path' is uninitialized. - * - The extent pointed to by 'path' contains a superset - * of the logical span [map->m_lblk, map->m_lblk + map->m_len). - * - * Post-conditions on success: - * - the returned value is the number of blocks beyond map->l_lblk - * that are allocated and initialized. - * It is guaranteed to be >= map->m_len. - */ static int ext4_ext_convert_to_initialized(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, @@ -3033,31 +2588,13 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, trace_ext4_ext_convert_to_initialized_enter(inode, map, ex); - /* Pre-conditions */ + BUG_ON(!ext4_ext_is_uninitialized(ex)); BUG_ON(!in_range(map->m_lblk, ee_block, ee_len)); - /* - * Attempt to transfer newly initialized blocks from the currently - * uninitialized extent to its left neighbor. This is much cheaper - * than an insertion followed by a merge as those involve costly - * memmove() calls. This is the common case in steady state for - * workloads doing fallocate(FALLOC_FL_KEEP_SIZE) followed by append - * writes. - * - * Limitations of the current logic: - * - L1: we only deal with writes at the start of the extent. - * The approach could be extended to writes at the end - * of the extent but this scenario was deemed less common. - * - L2: we do not deal with writes covering the whole extent. - * This would require removing the extent if the transfer - * is possible. - * - L3: we only attempt to merge with an extent stored in the - * same extent tree node. - */ - if ((map->m_lblk == ee_block) && /*L1*/ - (map->m_len < ee_len) && /*L2*/ - (ex > EXT_FIRST_EXTENT(eh))) { /*L3*/ + if ((map->m_lblk == ee_block) && + (map->m_len < ee_len) && + (ex > EXT_FIRST_EXTENT(eh))) { struct ext4_extent *prev_ex; ext4_lblk_t prev_lblk; ext4_fsblk_t prev_pblk, ee_pblk; @@ -3070,19 +2607,10 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, ee_pblk = ext4_ext_pblock(ex); write_len = map->m_len; - /* - * A transfer of blocks from 'ex' to 'prev_ex' is allowed - * upon those conditions: - * - C1: prev_ex is initialized, - * - C2: prev_ex is logically abutting ex, - * - C3: prev_ex is physically abutting ex, - * - C4: prev_ex can receive the additional blocks without - * overflowing the (initialized) length limit. - */ - if ((!ext4_ext_is_uninitialized(prev_ex)) && /*C1*/ - ((prev_lblk + prev_len) == ee_block) && /*C2*/ - ((prev_pblk + prev_len) == ee_pblk) && /*C3*/ - (prev_len < (EXT_INIT_MAX_LEN - write_len))) { /*C4*/ + if ((!ext4_ext_is_uninitialized(prev_ex)) && + ((prev_lblk + prev_len) == ee_block) && + ((prev_pblk + prev_len) == ee_pblk) && + (prev_len < (EXT_INIT_MAX_LEN - write_len))) { err = ext4_ext_get_access(handle, inode, path + depth); if (err) goto out; @@ -3090,35 +2618,31 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, trace_ext4_ext_convert_to_initialized_fastpath(inode, map, ex, prev_ex); - /* Shift the start of ex by 'write_len' blocks */ + ex->ee_block = cpu_to_le32(ee_block + write_len); ext4_ext_store_pblock(ex, ee_pblk + write_len); ex->ee_len = cpu_to_le16(ee_len - write_len); - ext4_ext_mark_uninitialized(ex); /* Restore the flag */ + ext4_ext_mark_uninitialized(ex); - /* Extend prev_ex by 'write_len' blocks */ + prev_ex->ee_len = cpu_to_le16(prev_len + write_len); - /* Mark the block containing both extents as dirty */ + ext4_ext_dirty(handle, inode, path + depth); - /* Update path to point to the right extent */ + path[depth].p_ext = prev_ex; - /* Result: number of initialized blocks past m_lblk */ + allocated = write_len; goto out; } } WARN_ON(map->m_lblk < ee_block); - /* - * It is safe to convert extent to initialized via explicit - * zeroout only if extent is fully insde i_size or new_size. - */ split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; - /* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */ + if (ee_len <= 2*EXT4_EXT_ZERO_LEN && (EXT4_EXT_MAY_ZEROOUT & split_flag)) { err = ext4_ext_zeroout(inode, ex); @@ -3134,20 +2658,13 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, goto out; } - /* - * four cases: - * 1. split the extent into three extents. - * 2. split the extent into two extents, zeroout the first half. - * 3. split the extent into two extents, zeroout the second half. - * 4. split the extent into two extents with out zeroout. - */ split_map.m_lblk = map->m_lblk; split_map.m_len = map->m_len; if (allocated > map->m_len) { if (allocated <= EXT4_EXT_ZERO_LEN && (EXT4_EXT_MAY_ZEROOUT & split_flag)) { - /* case 3 */ + zero_ex.ee_block = cpu_to_le32(map->m_lblk); zero_ex.ee_len = cpu_to_le16(allocated); @@ -3161,7 +2678,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, } else if ((map->m_lblk - ee_block + map->m_len < EXT4_EXT_ZERO_LEN) && (EXT4_EXT_MAY_ZEROOUT & split_flag)) { - /* case 2 */ + if (map->m_lblk != ee_block) { zero_ex.ee_block = ex->ee_block; zero_ex.ee_len = cpu_to_le16(map->m_lblk - @@ -3230,10 +2747,6 @@ static int ext4_split_unwritten_extents(handle_t *handle, inode->i_sb->s_blocksize_bits; if (eof_block < map->m_lblk + map->m_len) eof_block = map->m_lblk + map->m_len; - /* - * It is safe to convert extent to initialized via explicit - * zeroout only if extent is fully insde i_size or new_size. - */ depth = ext_depth(inode); ex = path[depth].p_ext; ee_block = le32_to_cpu(ex->ee_block); @@ -3265,15 +2778,12 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle, err = ext4_ext_get_access(handle, inode, path + depth); if (err) goto out; - /* first mark the extent as initialized */ + ext4_ext_mark_initialized(ex); - /* note: ext4_ext_correct_indexes() isn't needed here because - * borders are not changed - */ ext4_ext_try_to_merge(inode, path, ex); - /* Mark modified extent as dirty */ + err = ext4_ext_dirty(handle, inode, path + depth); out: ext4_ext_show_leaf(inode, path); @@ -3288,9 +2798,6 @@ static void unmap_underlying_metadata_blocks(struct block_device *bdev, unmap_underlying_metadata(bdev, block + i); } -/* - * Handle EOFBLOCKS_FL flag, clearing it if necessary - */ static int check_eofblocks_fl(handle_t *handle, struct inode *inode, ext4_lblk_t lblk, struct ext4_ext_path *path, @@ -3306,33 +2813,12 @@ static int check_eofblocks_fl(handle_t *handle, struct inode *inode, depth = ext_depth(inode); eh = path[depth].p_hdr; - /* - * We're going to remove EOFBLOCKS_FL entirely in future so we - * do not care for this case anymore. Simply remove the flag - * if there are no extents. - */ if (unlikely(!eh->eh_entries)) goto out; last_ex = EXT_LAST_EXTENT(eh); - /* - * We should clear the EOFBLOCKS_FL flag if we are writing the - * last block in the last extent in the file. We test this by - * first checking to see if the caller to - * ext4_ext_get_blocks() was interested in the last block (or - * a block beyond the last block) in the current extent. If - * this turns out to be false, we can bail out from this - * function immediately. - */ if (lblk + len < le32_to_cpu(last_ex->ee_block) + ext4_ext_get_actual_len(last_ex)) return 0; - /* - * If the caller does appear to be planning to write at or - * beyond the end of the current extent, we then test to see - * if the current extent is the last extent in the file, by - * checking to make sure it was reached via the rightmost node - * at each level of the tree. - */ for (i = depth-1; i >= 0; i--) if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr)) return 0; @@ -3341,19 +2827,6 @@ static int check_eofblocks_fl(handle_t *handle, struct inode *inode, return ext4_mark_inode_dirty(handle, inode); } -/** - * ext4_find_delalloc_range: find delayed allocated block in the given range. - * - * Goes through the buffer heads in the range [lblk_start, lblk_end] and returns - * whether there are any buffers marked for delayed allocation. It returns '1' - * on the first delalloc'ed buffer head found. If no buffer head in the given - * range is marked for delalloc, it returns 0. - * lblk_start should always be <= lblk_end. - * search_hint_reverse is to indicate that searching in reverse from lblk_end to - * lblk_start might be more efficient (i.e., we will likely hit the delalloc'ed - * block sooner). This is useful when blocks are truncated sequentially from - * lblk_start towards lblk_end. - */ static int ext4_find_delalloc_range(struct inode *inode, ext4_lblk_t lblk_start, ext4_lblk_t lblk_end, @@ -3368,7 +2841,7 @@ static int ext4_find_delalloc_range(struct inode *inode, if (!test_opt(inode->i_sb, DELALLOC)) return 0; - /* reverse search wont work if fs block size is less than page size */ + if (inode->i_blkbits < PAGE_CACHE_SHIFT) search_hint_reverse = 0; @@ -3396,20 +2869,10 @@ static int ext4_find_delalloc_range(struct inode *inode, inode->i_blkbits); do { if (unlikely(pg_lblk < lblk_start)) { - /* - * This is possible when fs block size is less - * than page size and our cluster starts/ends in - * middle of the page. So we need to skip the - * initial few blocks till we reach the 'lblk' - */ pg_lblk++; continue; } - /* Check if the buffer is delayed allocated and that it - * is not yet mapped. (when da-buffers are mapped during - * their writeout, their da_mapped bit is set.) - */ if (buffer_delay(bh) && !buffer_da_mapped(bh)) { page_cache_release(page); trace_ext4_find_delalloc_range(inode, @@ -3427,10 +2890,6 @@ static int ext4_find_delalloc_range(struct inode *inode, nextpage: if (page) page_cache_release(page); - /* - * Move to next page. 'i' will be the first lblk in the next - * page. - */ if (search_hint_reverse) index--; else @@ -3502,12 +2961,12 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start, alloc_cluster_start = EXT4_B2C(sbi, lblk_start); alloc_cluster_end = EXT4_B2C(sbi, lblk_start + num_blks - 1); - /* max possible clusters for this allocation */ + allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1; trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks); - /* Check towards left side */ + c_offset = lblk_start & (sbi->s_cluster_ratio - 1); if (c_offset) { lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1)); @@ -3517,7 +2976,7 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start, allocated_clusters--; } - /* Now check towards right. */ + c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1); if (allocated_clusters && c_offset) { lblk_from = lblk_start + num_blks; @@ -3549,7 +3008,7 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, trace_ext4_ext_handle_uninitialized_extents(inode, map, allocated, newblock); - /* get_block() before submit the IO, split the extent */ + if ((flags & EXT4_GET_BLOCKS_PRE_IO)) { ret = ext4_split_unwritten_extents(handle, inode, map, path, flags); @@ -3578,7 +3037,7 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, err = ret; goto out2; } - /* buffered IO case */ + /* * repeat fallocate creation request * we already have an unwritten extent @@ -3586,20 +3045,13 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, if (flags & EXT4_GET_BLOCKS_UNINIT_EXT) goto map_out; - /* buffered READ or buffered write_begin() lookup */ + if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { - /* - * We have blocks reserved already. We - * return allocated blocks so that delalloc - * won't do block reservation for us. But - * the buffer head will be unmapped so that - * a read from the block returns 0s. - */ map->m_flags |= EXT4_MAP_UNWRITTEN; goto out1; } - /* buffered write, writepage time, convert*/ + ret = ext4_ext_convert_to_initialized(handle, inode, map, path); if (ret >= 0) ext4_update_inode_fsync_trans(handle, inode, 1); @@ -3610,13 +3062,6 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, } else allocated = ret; map->m_flags |= EXT4_MAP_NEW; - /* - * if we allocated more blocks than requested - * we need to make sure we unmap the extra block - * allocated. The actual needed block will get - * unmapped later when we find the buffer_head marked - * new. - */ if (allocated > map->m_len) { unmap_underlying_metadata_blocks(inode->i_sb->s_bdev, newblock + map->m_len, @@ -3624,13 +3069,6 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, allocated = map->m_len; } - /* - * If we have done fallocate with the offset that is already - * delayed allocated, we would have block reservation - * and quota reservation done in the delayed write path. - * But fallocate would have already updated quota and block - * count for this offset. So cancel these reservation - */ if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { unsigned int reserved_clusters; reserved_clusters = get_reserved_cluster_alloc(inode, @@ -3663,47 +3101,6 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, return err ? err : allocated; } -/* - * get_implied_cluster_alloc - check to see if the requested - * allocation (in the map structure) overlaps with a cluster already - * allocated in an extent. - * @sb The filesystem superblock structure - * @map The requested lblk->pblk mapping - * @ex The extent structure which might contain an implied - * cluster allocation - * - * This function is called by ext4_ext_map_blocks() after we failed to - * find blocks that were already in the inode's extent tree. Hence, - * we know that the beginning of the requested region cannot overlap - * the extent from the inode's extent tree. There are three cases we - * want to catch. The first is this case: - * - * |--- cluster # N--| - * |--- extent ---| |---- requested region ---| - * |==========| - * - * The second case that we need to test for is this one: - * - * |--------- cluster # N ----------------| - * |--- requested region --| |------- extent ----| - * |=======================| - * - * The third case is when the requested region lies between two extents - * within the same cluster: - * |------------- cluster # N-------------| - * |----- ex -----| |---- ex_right ----| - * |------ requested region ------| - * |================| - * - * In each of the above cases, we need to set the map->m_pblk and - * map->m_len so it corresponds to the return the extent labelled as - * "|====|" from cluster #N, since it is already in use for data in - * cluster EXT4_B2C(sbi, map->m_lblk). We will then return 1 to - * signal to ext4_ext_map_blocks() that map->m_pblk should be treated - * as a new "allocated" block region. Otherwise, we will return 0 and - * ext4_ext_map_blocks() will then allocate one or more new clusters - * by calling ext4_mb_new_blocks(). - */ static int get_implied_cluster_alloc(struct super_block *sb, struct ext4_map_blocks *map, struct ext4_extent *ex, @@ -3717,11 +3114,11 @@ static int get_implied_cluster_alloc(struct super_block *sb, ext4_fsblk_t ee_start = ext4_ext_pblock(ex); unsigned short ee_len = ext4_ext_get_actual_len(ex); - /* The extent passed in that we are trying to match */ + ex_cluster_start = EXT4_B2C(sbi, ee_block); ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1); - /* The requested region passed into ext4_map_blocks() */ + rr_cluster_start = EXT4_B2C(sbi, map->m_lblk); if ((rr_cluster_start == ex_cluster_end) || @@ -3732,27 +3129,10 @@ static int get_implied_cluster_alloc(struct super_block *sb, c_offset; map->m_len = min(map->m_len, (unsigned) sbi->s_cluster_ratio - c_offset); - /* - * Check for and handle this case: - * - * |--------- cluster # N-------------| - * |------- extent ----| - * |--- requested region ---| - * |===========| - */ if (map->m_lblk < ee_block) map->m_len = min(map->m_len, ee_block - map->m_lblk); - /* - * Check for the case where there is already another allocated - * block to the right of 'ex' but before the end of the cluster. - * - * |------------- cluster # N-------------| - * |----- ex -----| |---- ex_right ----| - * |------ requested region ------| - * |================| - */ if (map->m_lblk > ee_block) { ext4_lblk_t next = ext4_ext_next_allocated_block(path); map->m_len = min(map->m_len, next - map->m_lblk); @@ -3767,24 +3147,6 @@ static int get_implied_cluster_alloc(struct super_block *sb, } -/* - * Block allocation/map/preallocation routine for extents based files - * - * - * Need to be called with - * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block - * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem) - * - * return > 0, number of of blocks already mapped/allocated - * if create == 0 and these are pre-allocated blocks - * buffer head is unmapped - * otherwise blocks are mapped - * - * return = 0, if plain look up failed (blocks have not been allocated) - * buffer head is unmapped - * - * return < 0, error case. - */ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, int flags) { @@ -3803,7 +3165,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, map->m_lblk, map->m_len, inode->i_ino); trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); - /* check in cache */ + if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) { if (!newex.ee_start_lo && !newex.ee_start_hi) { if ((sbi->s_cluster_ratio > 1) && @@ -3811,28 +3173,24 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, map->m_flags |= EXT4_MAP_FROM_CLUSTER; if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { - /* - * block isn't allocated yet and - * user doesn't want to allocate it - */ goto out2; } - /* we should allocate requested block */ + } else { - /* block is already allocated */ + if (sbi->s_cluster_ratio > 1) map->m_flags |= EXT4_MAP_FROM_CLUSTER; newblock = map->m_lblk - le32_to_cpu(newex.ee_block) + ext4_ext_pblock(&newex); - /* number of remaining blocks in the extent */ + allocated = ext4_ext_get_actual_len(&newex) - (map->m_lblk - le32_to_cpu(newex.ee_block)); goto out; } } - /* find extent for this block */ + path = ext4_ext_find_extent(inode, map->m_lblk, NULL); if (IS_ERR(path)) { err = PTR_ERR(path); @@ -3842,11 +3200,6 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, depth = ext_depth(inode); - /* - * consistent leaf must not be empty; - * this situation is possible, though, _during_ tree modification; - * this is why assert can't be put in ext4_ext_find_extent() - */ if (unlikely(path[depth].p_ext == NULL && depth != 0)) { EXT4_ERROR_INODE(inode, "bad extent address " "lblock: %lu, depth: %d pblock %lld", @@ -3862,26 +3215,18 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, ext4_fsblk_t ee_start = ext4_ext_pblock(ex); unsigned short ee_len; - /* - * Uninitialized extents are treated as holes, except that - * we split out initialized portions during a write. - */ ee_len = ext4_ext_get_actual_len(ex); trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len); - /* if found extent covers block, simply return it */ + if (in_range(map->m_lblk, ee_block, ee_len)) { newblock = map->m_lblk - ee_block + ee_start; - /* number of remaining blocks in the extent */ + allocated = ee_len - (map->m_lblk - ee_block); ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk, ee_block, ee_len, newblock); - /* - * Do not put uninitialized extent - * in the cache - */ if (!ext4_ext_is_uninitialized(ex)) { ext4_ext_put_in_cache(inode, ee_block, ee_len, ee_start); @@ -3898,30 +3243,15 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, ext4_find_delalloc_cluster(inode, map->m_lblk, 0)) map->m_flags |= EXT4_MAP_FROM_CLUSTER; - /* - * requested block isn't allocated yet; - * we couldn't try to create block if create flag is zero - */ if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { - /* - * put just found gap into cache to speed up - * subsequent requests - */ ext4_ext_put_gap_in_cache(inode, path, map->m_lblk); goto out2; } - /* - * Okay, we need to do block allocation. - */ map->m_flags &= ~EXT4_MAP_FROM_CLUSTER; newex.ee_block = cpu_to_le32(map->m_lblk); cluster_offset = map->m_lblk & (sbi->s_cluster_ratio-1); - /* - * If we are doing bigalloc, check to see if the extent returned - * by ext4_ext_find_extent() implies a cluster we can use. - */ if (cluster_offset && ex && get_implied_cluster_alloc(inode->i_sb, map, ex, path)) { ar.len = allocated = map->m_len; @@ -3930,7 +3260,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, goto got_allocated_blocks; } - /* find neighbour allocated blocks */ + ar.lleft = map->m_lblk; err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft); if (err) @@ -3941,8 +3271,6 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, if (err) goto out2; - /* Check if the extent after searching to the right implies a - * cluster we can use. */ if ((sbi->s_cluster_ratio > 1) && ex2 && get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) { ar.len = allocated = map->m_len; @@ -3951,12 +3279,6 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, goto got_allocated_blocks; } - /* - * See if request is beyond maximum number of blocks we can have in - * a single extent. For an initialized extent this limit is - * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is - * EXT_UNINIT_MAX_LEN. - */ if (map->m_len > EXT_INIT_MAX_LEN && !(flags & EXT4_GET_BLOCKS_UNINIT_EXT)) map->m_len = EXT_INIT_MAX_LEN; @@ -3964,7 +3286,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, (flags & EXT4_GET_BLOCKS_UNINIT_EXT)) map->m_len = EXT_UNINIT_MAX_LEN; - /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */ + newex.ee_len = cpu_to_le16(map->m_len); err = ext4_ext_check_overlap(sbi, inode, &newex, path); if (err) @@ -3972,18 +3294,10 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, else allocated = map->m_len; - /* allocate new block */ + ar.inode = inode; ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk); ar.logical = map->m_lblk; - /* - * We calculate the offset from the beginning of the cluster - * for the logical block number, since when we allocate a - * physical cluster, the physical block should start at the - * same offset from the beginning of the cluster. This is - * needed so that future calls to get_implied_cluster_alloc() - * work correctly. - */ offset = map->m_lblk & (sbi->s_cluster_ratio - 1); ar.len = EXT4_NUM_B2C(sbi, offset+allocated); ar.goal -= offset; @@ -3991,7 +3305,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, if (S_ISREG(inode->i_mode)) ar.flags = EXT4_MB_HINT_DATA; else - /* disable in-core preallocation for non-regular files */ + ar.flags = 0; if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE) ar.flags |= EXT4_MB_HINT_NOPREALLOC; @@ -4007,19 +3321,12 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, ar.len = allocated; got_allocated_blocks: - /* try to insert new extent into found leaf and return */ + ext4_ext_store_pblock(&newex, newblock + offset); newex.ee_len = cpu_to_le16(ar.len); - /* Mark uninitialized */ + if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){ ext4_ext_mark_uninitialized(&newex); - /* - * io_end structure was created for every IO write to an - * uninitialized extent. To avoid unnecessary conversion, - * here we flag the IO that really needs the conversion. - * For non asycn direct IO case, flag the inode state - * that we need to perform conversion when IO is done. - */ if ((flags & EXT4_GET_BLOCKS_PRE_IO)) { if (io) ext4_set_io_unwritten_flag(inode, io); @@ -4041,48 +3348,32 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, if (err && free_on_err) { int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ? EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0; - /* free data blocks we just allocated */ - /* not a good idea to call discard here directly, - * but otherwise we'd need to call it every free() */ + ext4_discard_preallocations(inode); ext4_free_blocks(handle, inode, NULL, ext4_ext_pblock(&newex), ext4_ext_get_actual_len(&newex), fb_flags); goto out2; } - /* previous routine could use block we allocated */ + newblock = ext4_ext_pblock(&newex); allocated = ext4_ext_get_actual_len(&newex); if (allocated > map->m_len) allocated = map->m_len; map->m_flags |= EXT4_MAP_NEW; - /* - * Update reserved blocks/metadata blocks after successful - * block allocation which had been deferred till now. - */ if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { unsigned int reserved_clusters; - /* - * Check how many clusters we had reserved this allocated range - */ reserved_clusters = get_reserved_cluster_alloc(inode, map->m_lblk, allocated); if (map->m_flags & EXT4_MAP_FROM_CLUSTER) { if (reserved_clusters) { - /* - * We have clusters reserved for this range. - * But since we are not doing actual allocation - * and are simply using blocks from previously - * allocated cluster, we should release the - * reservation and not claim quota. - */ ext4_da_update_reserve_space(inode, reserved_clusters, 0); } } else { BUG_ON(allocated_clusters < reserved_clusters); - /* We will claim quota for all newly allocated blocks.*/ + ext4_da_update_reserve_space(inode, allocated_clusters, 1); if (reserved_clusters < allocated_clusters) { @@ -4138,10 +3429,6 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, } } - /* - * Cache the extent and update transaction to commit on fdatasync only - * when it is _not_ an uninitialized extent. - */ if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) { ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock); ext4_update_inode_fsync_trans(handle, inode, 1); @@ -4175,15 +3462,8 @@ void ext4_ext_truncate(struct inode *inode) loff_t page_len; int err = 0; - /* - * finish any pending end_io work so we won't run the risk of - * converting any truncated blocks to initialized later - */ ext4_flush_completed_IO(inode); - /* - * probably first extent we're gonna free will be last in block - */ err = ext4_writepage_trans_blocks(inode); handle = ext4_journal_start(inode, err); if (IS_ERR(handle)) @@ -4208,13 +3488,8 @@ void ext4_ext_truncate(struct inode *inode) ext4_discard_preallocations(inode); - /* - * TODO: optimization is possible here. - * Probably we need not scan at all, - * because page truncation is enough. - */ - /* we have to know where to truncate from in crash case */ + EXT4_I(inode)->i_disksize = inode->i_size; ext4_mark_inode_dirty(handle, inode); @@ -4222,22 +3497,12 @@ void ext4_ext_truncate(struct inode *inode) >> EXT4_BLOCK_SIZE_BITS(sb); err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1); - /* In a multi-transaction truncate, we only make the final - * transaction synchronous. - */ if (IS_SYNC(inode)) ext4_handle_sync(handle); up_write(&EXT4_I(inode)->i_data_sem); out_stop: - /* - * If this was a simple ftruncate() and the file will remain alive, - * then we need to clear up the orphan record which we created above. - * However, if this was a real unlink then we were called by - * ext4_delete_inode(), and we allow that function to clean up the - * orphan info for us. - */ if (inode->i_nlink) ext4_orphan_del(handle, inode); @@ -4256,33 +3521,18 @@ static void ext4_falloc_update_inode(struct inode *inode, if (!timespec_equal(&inode->i_ctime, &now)) inode->i_ctime = now; } - /* - * Update only when preallocation was requested beyond - * the file size. - */ if (!(mode & FALLOC_FL_KEEP_SIZE)) { if (new_size > i_size_read(inode)) i_size_write(inode, new_size); if (new_size > EXT4_I(inode)->i_disksize) ext4_update_i_disksize(inode, new_size); } else { - /* - * Mark that we allocate beyond EOF so the subsequent truncate - * can proceed even if the new size is the same as i_size. - */ if (new_size > i_size_read(inode)) ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS); } } -/* - * preallocate space for a file. This implements ext4's fallocate file - * operation, which gets called from sys_fallocate system call. - * For block-mapped files, posix_fallocate should fall back to the method - * of writing zeroes to the required new blocks (the same behavior which is - * expected for file systems which do not support fallocate() system call). - */ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { struct inode *inode = file->f_path.dentry->d_inode; @@ -4296,14 +3546,10 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) struct ext4_map_blocks map; unsigned int credits, blkbits = inode->i_blkbits; - /* - * currently supporting (pre)allocate mode for extent-based - * files _only_ - */ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) return -EOPNOTSUPP; - /* Return error if mode is not supported */ + if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) return -EOPNOTSUPP; @@ -4312,15 +3558,8 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) trace_ext4_fallocate_enter(inode, offset, len, mode); map.m_lblk = offset >> blkbits; - /* - * We can't just convert len to max_blocks because - * If blocksize = 4096 offset = 3072 and len = 2048 - */ max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) - map.m_lblk; - /* - * credits to insert 1 extent into extent tree - */ credits = ext4_chunk_trans_blocks(inode, max_blocks); mutex_lock(&inode->i_mutex); ret = inode_newsize_ok(inode, (len + offset)); @@ -4332,11 +3571,6 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) flags = EXT4_GET_BLOCKS_CREATE_UNINIT_EXT; if (mode & FALLOC_FL_KEEP_SIZE) flags |= EXT4_GET_BLOCKS_KEEP_SIZE; - /* - * Don't normalize the request if it can fit in one extent so - * that it doesn't get unnecessarily split into multiple - * extents. - */ if (len <= EXT_UNINIT_MAX_LEN << blkbits) flags |= EXT4_GET_BLOCKS_NO_NORMALIZE; retry: @@ -4406,15 +3640,8 @@ int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, unsigned int credits, blkbits = inode->i_blkbits; map.m_lblk = offset >> blkbits; - /* - * We can't just convert len to max_blocks because - * If blocksize = 4096 offset = 3072 and len = 2048 - */ max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) - map.m_lblk); - /* - * credits to insert 1 extent into extent tree - */ credits = ext4_chunk_trans_blocks(inode, max_blocks); while (ret >= 0 && ret < max_blocks) { map.m_lblk += ret; @@ -4442,9 +3669,6 @@ int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, return ret > 0 ? ret2 : ret; } -/* - * Callback function called for each extent to gather FIEMAP information. - */ static int ext4_ext_fiemap_cb(struct inode *inode, ext4_lblk_t next, struct ext4_ext_cache *newex, struct ext4_extent *ex, void *data) @@ -4461,20 +3685,6 @@ static int ext4_ext_fiemap_cb(struct inode *inode, ext4_lblk_t next, logical = (__u64)newex->ec_block << blksize_bits; if (newex->ec_start == 0) { - /* - * No extent in extent-tree contains block @newex->ec_start, - * then the block may stay in 1)a hole or 2)delayed-extent. - * - * Holes or delayed-extents are processed as follows. - * 1. lookup dirty pages with specified range in pagecache. - * If no page is got, then there is no delayed-extent and - * return with EXT_CONTINUE. - * 2. find the 1st mapped buffer, - * 3. check if the mapped buffer is both in the request range - * and a delayed buffer. If not, there is no delayed-extent, - * then return. - * 4. a delayed-extent is found, the extent will be collected. - */ ext4_lblk_t end = 0; pgoff_t last_offset; pgoff_t offset; @@ -4497,19 +3707,19 @@ static int ext4_ext_fiemap_cb(struct inode *inode, ext4_lblk_t next, PAGECACHE_TAG_DIRTY, nr_pages, pages); if (!(flags & FIEMAP_EXTENT_DELALLOC)) { - /* First time, try to find a mapped buffer. */ + if (ret == 0) { out: for (index = 0; index < ret; index++) page_cache_release(pages[index]); - /* just a hole. */ + kfree(pages); return EXT_CONTINUE; } index = 0; next_page: - /* Try to find the 1st mapped buffer. */ + end = ((__u64)pages[index]->index << PAGE_SHIFT) >> blksize_bits; if (!page_has_buffers(pages[index])) @@ -4523,15 +3733,12 @@ static int ext4_ext_fiemap_cb(struct inode *inode, ext4_lblk_t next, do { if (end >= newex->ec_block + newex->ec_len) - /* The buffer is out of - * the request range. - */ goto out; if (buffer_mapped(bh) && end >= newex->ec_block) { start_index = index - 1; - /* get the 1st mapped buffer. */ + goto found_mapped_buffer; } @@ -4539,19 +3746,13 @@ static int ext4_ext_fiemap_cb(struct inode *inode, ext4_lblk_t next, end++; } while (bh != head); - /* No mapped buffer in the range found in this page, - * We need to look up next page. - */ if (index >= ret) { - /* There is no page left, but we need to limit - * newex->ec_len. - */ newex->ec_len = end - newex->ec_block; goto out; } goto next_page; } else { - /*Find contiguous delayed buffers. */ + if (ret > 0 && pages[0]->index == last_offset) head = page_buffers(pages[0]); bh = head; @@ -4561,17 +3762,13 @@ static int ext4_ext_fiemap_cb(struct inode *inode, ext4_lblk_t next, found_mapped_buffer: if (bh != NULL && buffer_delay(bh)) { - /* 1st or contiguous delayed buffer found. */ + if (!(flags & FIEMAP_EXTENT_DELALLOC)) { - /* - * 1st delayed buffer found, record - * the start of extent. - */ flags |= FIEMAP_EXTENT_DELALLOC; newex->ec_block = end; logical = (__u64)end << blksize_bits; } - /* Find contiguous delayed buffers. */ + do { if (!buffer_delay(bh)) goto found_delayed_extent; @@ -4593,21 +3790,21 @@ static int ext4_ext_fiemap_cb(struct inode *inode, ext4_lblk_t next, if (pages[index]->index != pages[start_index]->index + index - start_index) { - /* Blocks are not contiguous. */ + bh = NULL; break; } bh = head; do { if (!buffer_delay(bh)) - /* Delayed-extent ends. */ + goto found_delayed_extent; bh = bh->b_this_page; end++; } while (bh != head); } } else if (!(flags & FIEMAP_EXTENT_DELALLOC)) - /* a hole found. */ + goto out; found_delayed_extent: @@ -4616,7 +3813,7 @@ static int ext4_ext_fiemap_cb(struct inode *inode, ext4_lblk_t next, if (ret == nr_pages && bh != NULL && newex->ec_len < EXT_INIT_MAX_LEN && buffer_delay(bh)) { - /* Have not collected an extent and continue. */ + for (index = 0; index < ret; index++) page_cache_release(pages[index]); goto repeat; @@ -4644,7 +3841,6 @@ static int ext4_ext_fiemap_cb(struct inode *inode, ext4_lblk_t next, return EXT_BREAK; return EXT_CONTINUE; } -/* fiemap flags we can handle specified here */ #define EXT4_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR) static int ext4_xattr_fiemap(struct inode *inode, @@ -4656,10 +3852,10 @@ static int ext4_xattr_fiemap(struct inode *inode, int blockbits = inode->i_sb->s_blocksize_bits; int error = 0; - /* in-inode? */ + if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { struct ext4_iloc iloc; - int offset; /* offset of xattr in inode */ + int offset; error = ext4_get_inode_loc(inode, &iloc); if (error) @@ -4671,7 +3867,7 @@ static int ext4_xattr_fiemap(struct inode *inode, length = EXT4_SB(inode->i_sb)->s_inode_size - offset; flags |= FIEMAP_EXTENT_DATA_INLINE; brelse(iloc.bh); - } else { /* external block */ + } else { physical = EXT4_I(inode)->i_file_acl << blockbits; length = inode->i_sb->s_blocksize; } @@ -4682,18 +3878,6 @@ static int ext4_xattr_fiemap(struct inode *inode, return (error < 0 ? error : 0); } -/* - * ext4_ext_punch_hole - * - * Punches a hole of "length" bytes in a file starting - * at byte "offset" - * - * @inode: The inode of the file to punch a hole in - * @offset: The starting byte offset of the hole - * @length: The length of the hole - * - * Returns the number of blocks removed or negative on err - */ int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length) { struct inode *inode = file->f_path.dentry->d_inode; @@ -4705,14 +3889,10 @@ int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length) loff_t first_page_offset, last_page_offset; int credits, err = 0; - /* No need to punch hole beyond i_size */ + if (offset >= inode->i_size) return 0; - /* - * If the hole extends beyond i_size, set the hole - * to end after the page that contains i_size - */ if (offset + length > inode->i_size) { length = inode->i_size + PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) - @@ -4725,10 +3905,6 @@ int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length) first_page_offset = first_page << PAGE_CACHE_SHIFT; last_page_offset = last_page << PAGE_CACHE_SHIFT; - /* - * Write out all dirty pages to avoid race conditions - * Then release them. - */ if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { err = filemap_write_and_wait_range(mapping, offset, offset + length - 1); @@ -4737,13 +3913,13 @@ int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length) return err; } - /* Now release the pages */ + if (last_page_offset > first_page_offset) { truncate_inode_pages_range(mapping, first_page_offset, last_page_offset-1); } - /* finish any pending end_io work */ + ext4_flush_completed_IO(inode); credits = ext4_writepage_trans_blocks(inode); @@ -4755,27 +3931,13 @@ int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length) if (err) goto out; - /* - * Now we need to zero out the non-page-aligned data in the - * pages at the start and tail of the hole, and unmap the buffer - * heads for the block aligned regions of the page that were - * completely zeroed. - */ if (first_page > last_page) { - /* - * If the file space being truncated is contained within a page - * just zero out and unmap the middle of that page - */ err = ext4_discard_partial_page_buffers(handle, mapping, offset, length, 0); if (err) goto out; } else { - /* - * zero out and unmap the partial page that contains - * the start of the hole - */ page_len = first_page_offset - offset; if (page_len > 0) { err = ext4_discard_partial_page_buffers(handle, mapping, @@ -4784,10 +3946,6 @@ int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length) goto out; } - /* - * zero out and unmap the partial page that contains - * the end of the hole - */ page_len = offset + length - last_page_offset; if (page_len > 0) { err = ext4_discard_partial_page_buffers(handle, mapping, @@ -4797,10 +3955,6 @@ int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length) } } - /* - * If i_size is contained in the last page, we need to - * unmap and zero the partial page after i_size - */ if (inode->i_size >> PAGE_CACHE_SHIFT == last_page && inode->i_size % PAGE_CACHE_SIZE != 0) { @@ -4820,7 +3974,7 @@ int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length) EXT4_BLOCK_SIZE_BITS(sb); stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb); - /* If there are no blocks to remove, return now */ + if (first_block >= stop_block) goto out; @@ -4851,7 +4005,7 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, ext4_lblk_t start_blk; int error = 0; - /* fallback to generic here if not in extents fmt */ + if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) return generic_block_fiemap(inode, fieinfo, start, len, ext4_get_block); @@ -4871,10 +4025,6 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, last_blk = EXT_MAX_BLOCKS-1; len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1; - /* - * Walk the extent tree gathering extent information. - * ext4_ext_fiemap_cb will push extents back to user. - */ error = ext4_ext_walk_space(inode, start_blk, len_blks, ext4_ext_fiemap_cb, fieinfo); } diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 8035a88d..fb9ad811 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -605,6 +605,38 @@ static int fuse_readpages_fill(void *_data, struct page *page) return PTR_ERR(req); } } + +#ifdef CONFIG_CMA + if (is_cma_pageblock(page)) { + struct page *oldpage = page, *newpage; + int err; + + + page_cache_get(oldpage); + + newpage = alloc_page(GFP_HIGHUSER); + if (!newpage) { + page_cache_release(oldpage); + return -ENOMEM; + } + + err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL); + if (err) { + __free_page(newpage); + page_cache_release(oldpage); + return err; + } + + lock_page(newpage); + put_page(newpage); + + + unlock_page(oldpage); + page_cache_release(oldpage); + page = newpage; + } +#endif + page_cache_get(page); req->pages[req->num_pages] = page; req->num_pages++; diff --git a/fs/namespace.c b/fs/namespace.c index 111c7e8f..29e8a6ff 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1571,11 +1571,11 @@ static int do_new_mount(struct path *path, char *type, int flags, err = do_add_mount(real_mount(mnt), path, mnt_flags); if (err) mntput(mnt); - - +#ifdef CONFIG_ASYNC_FSYNC if (!err && ((!strcmp(type, "ext4") && !strcmp(path->dentry->d_name.name, "data")) || (!strcmp(type, "fuse") && !strcmp(path->dentry->d_name.name, "emulated")))) mnt->mnt_sb->fsync_flags |= FLAG_ASYNC_FSYNC; +#endif return err; } diff --git a/fs/open.c b/fs/open.c index b6b82626..95a3d15b 100644 --- a/fs/open.c +++ b/fs/open.c @@ -863,15 +863,13 @@ extern int get_prealloc_size(void); extern int get_logfile_prealloc_size(void); static int pre_allocate(struct file *f) { - int prealloc_size; + int prealloc_size = 0; if (!f->f_op->fallocate || !(f->f_mode & FMODE_WRITE)) return 0; if (f->f_path.dentry->d_parent && !strcmp(f->f_path.dentry->d_parent->d_name.name, "htclog")) prealloc_size = get_logfile_prealloc_size(); - else - prealloc_size = get_prealloc_size(); if (prealloc_size) do_fallocate(f, FALLOC_FL_KEEP_SIZE, 0, prealloc_size); diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 03d41ed9..848f3c2d 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -22,13 +22,6 @@ void task_mem(struct seq_file *m, struct mm_struct *mm) unsigned long data, text, lib, swap; unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss; - /* - * Note: to minimize their overhead, mm maintains hiwater_vm and - * hiwater_rss only when about to *lower* total_vm or rss. Any - * collector of these hiwater stats must therefore get total_vm - * and rss too, which will usually be the higher. Barriers? not - * worth the effort, such snapshots can always be inconsistent. - */ hiwater_vm = total_vm = mm->total_vm; if (hiwater_vm < mm->hiwater_vm) hiwater_vm = mm->hiwater_vm; @@ -128,7 +121,7 @@ static void seq_print_vma_name(struct seq_file *m, struct vm_area_struct *vma) kunmap(page); put_page(page); - /* if strnlen hit a null terminator then we're done */ + if (write_len != len) break; @@ -157,16 +150,10 @@ static void *m_start(struct seq_file *m, loff_t *pos) struct vm_area_struct *vma, *tail_vma = NULL; loff_t l = *pos; - /* Clear the per syscall fields in priv */ + priv->task = NULL; priv->tail_vma = NULL; - /* - * We remember last_addr rather than next_addr to hit with - * mmap_cache most of the time. We have zero last_addr at - * the beginning and also after lseek. We will have -1 last_addr - * after the end of the vmas. - */ if (last_addr == -1UL) return NULL; @@ -183,17 +170,13 @@ static void *m_start(struct seq_file *m, loff_t *pos) tail_vma = get_gate_vma(priv->task->mm); priv->tail_vma = tail_vma; - /* Start with last addr hint */ + vma = find_vma(mm, last_addr); if (last_addr && vma) { vma = vma->vm_next; goto out; } - /* - * Check the vma index is within the range and do - * sequential scan until m_index. - */ vma = NULL; if ((unsigned long)l < mm->map_count) { vma = mm->mmap; @@ -203,13 +186,13 @@ static void *m_start(struct seq_file *m, loff_t *pos) } if (l != mm->map_count) - tail_vma = NULL; /* After gate vma */ + tail_vma = NULL; out: if (vma) return vma; - /* End of vmas has been reached */ + m->version = (tail_vma != NULL)? 0: -1UL; up_read(&mm->mmap_sem); mmput(mm); @@ -281,7 +264,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; } - /* We don't show the stack guard page in /proc/maps */ + start = vma->vm_start; if (stack_guard_page_start(vma, start)) start += PAGE_SIZE; @@ -299,10 +282,6 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) pgoff, MAJOR(dev), MINOR(dev), ino, &len); - /* - * Print the dentry name for named mappings, and a - * special [heap] marker for the heap: - */ if (file) { pad_len_spaces(m, len); seq_path(m, &file->f_path, "\n"); @@ -327,15 +306,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) tid = vm_is_stack(task, vma, is_pid); if (tid != 0) { - /* - * Thread stack in /proc/PID/task/TID/maps or - * the main process stack. - */ if (!is_pid || (vma->vm_start <= mm->start_stack && vma->vm_end >= mm->start_stack)) { name = "[stack]"; } else { - /* Thread stack in /proc/PID/maps */ + pad_len_spaces(m, len); seq_printf(m, "[stack:%d]", tid); } @@ -364,7 +339,7 @@ static int show_map(struct seq_file *m, void *v, int is_pid) show_map_vma(m, vma, is_pid); - if (m->count < m->size) /* vma is copied successfully */ + if (m->count < m->size) m->version = (vma != get_gate_vma(task->mm)) ? vma->vm_start : 0; return 0; @@ -418,23 +393,6 @@ const struct file_operations proc_tid_maps_operations = { .release = seq_release_private, }; -/* - * Proportional Set Size(PSS): my share of RSS. - * - * PSS of a process is the count of pages it has in memory, where each - * page is divided by the number of processes sharing it. So if a - * process has 1000 pages all to itself, and 1000 shared with one other - * process, its PSS will be 1500. - * - * To keep (accumulated) division errors low, we adopt a 64bit - * fixed-point pss counter to minimize division errors. So (pss >> - * PSS_SHIFT) would be the real byte count. - * - * A shift of 12 before division means (assuming 4K page size): - * - 1M 3-user-pages add up to 8KB errors; - * - supports mapcount up to 2^24, or 16M; - * - supports PSS up to 2^52 bytes, or 4PB. - */ #define PSS_SHIFT 12 #ifdef CONFIG_PROC_PAGE_MONITOR @@ -477,7 +435,7 @@ static void smaps_pte_entry(pte_t ptent, unsigned long addr, mss->anonymous += ptent_size; mss->resident += ptent_size; - /* Accumulate the size in pages that have been accessed. */ + if (pte_young(ptent) || PageReferenced(page)) mss->referenced += ptent_size; mapcount = page_mapcount(page); @@ -513,11 +471,6 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, if (pmd_trans_unstable(pmd)) return 0; - /* - * The mmap_sem held all the way back in m_start() is what - * keeps khugepaged out of here and from collapsing things - * in here. - */ pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); for (; addr != end; pte++, addr += PAGE_SIZE) smaps_pte_entry(*pte, addr, PAGE_SIZE, walk); @@ -540,7 +493,7 @@ static int show_smap(struct seq_file *m, void *v, int is_pid) memset(&mss, 0, sizeof mss); mss.vma = vma; - /* mmap_sem is held in m_start */ + if (vma->vm_mm && !is_vm_hugetlb_page(vma)) walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk); @@ -583,7 +536,7 @@ static int show_smap(struct seq_file *m, void *v, int is_pid) seq_putc(m, '\n'); } - if (m->count < m->size) /* vma is copied successfully */ + if (m->count < m->size) m->version = (vma != get_gate_vma(task->mm)) ? vma->vm_start : 0; return 0; @@ -659,7 +612,7 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, if (!page) continue; - /* Clear accessed and referenced bits. */ + ptep_test_and_clear_young(vma, addr, pte); ClearPageReferenced(page); } @@ -706,15 +659,6 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, clear_refs_walk.private = vma; if (is_vm_hugetlb_page(vma)) continue; - /* - * Writing 1 to /proc/pid/clear_refs affects all pages. - * - * Writing 2 to /proc/pid/clear_refs only affects - * Anonymous pages. - * - * Writing 3 to /proc/pid/clear_refs only affects file - * mapped pages. - */ if (type == CLEAR_REFS_ANON && vma->vm_file) continue; if (type == CLEAR_REFS_MAPPED && !vma->vm_file) @@ -817,11 +761,6 @@ static void pte_to_pagemap_entry(pagemap_entry_t *pme, pte_t pte) static void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, pmd_t pmd, int offset) { - /* - * Currently pmd for thp is always present because thp can not be - * swapped-out, migrated, or HWPOISONed (split in such cases instead.) - * This if-check is just to prepare for future implementation. - */ if (pmd_present(pmd)) *pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset) | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT); @@ -844,7 +783,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, int err = 0; pagemap_entry_t pme = make_pme(PM_NOT_PRESENT); - /* find the first VMA at or above 'addr' */ + vma = find_vma(walk->mm, addr); if (vma && pmd_trans_huge_lock(pmd, vma) == 1) { for (; addr != end; addr += PAGE_SIZE) { @@ -865,20 +804,16 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, return 0; for (; addr != end; addr += PAGE_SIZE) { - /* check to see if we've left 'vma' behind - * and need a new, higher one */ if (vma && (addr >= vma->vm_end)) { vma = find_vma(walk->mm, addr); pme = make_pme(PM_NOT_PRESENT); } - /* check that 'vma' actually covers this address, - * and that it isn't a huge page vma */ if (vma && (vma->vm_start <= addr) && !is_vm_hugetlb_page(vma)) { pte = pte_offset_map(pmd, addr); pte_to_pagemap_entry(&pme, *pte); - /* unmap before userspace copy */ + pte_unmap(pte); } err = add_to_pagemap(addr, &pme, pm); @@ -902,7 +837,6 @@ static void huge_pte_to_pagemap_entry(pagemap_entry_t *pme, *pme = make_pme(PM_NOT_PRESENT); } -/* This function walks within one hugetlb entry in the single call */ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask, unsigned long addr, unsigned long end, struct mm_walk *walk) @@ -923,32 +857,8 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask, return err; } -#endif /* HUGETLB_PAGE */ - -/* - * /proc/pid/pagemap - an array mapping virtual pages to pfns - * - * For each page in the address space, this file contains one 64-bit entry - * consisting of the following: - * - * Bits 0-55 page frame number (PFN) if present - * Bits 0-4 swap type if swapped - * Bits 5-55 swap offset if swapped - * Bits 55-60 page shift (page size = 1< TASK_SIZE_OF(task) >> PAGE_SHIFT) start_vaddr = end_vaddr; - /* - * The odds are that this will stop walking way - * before end_vaddr, because the length of the - * user buffer is tracked in "pm", and the walk - * will stop when we hit the end of the buffer. - */ ret = 0; while (count && (start_vaddr < end_vaddr)) { int len; @@ -1016,7 +920,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, pm.pos = 0; end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK; - /* overflow ? */ + if (end < start_vaddr || end > end_vaddr) end = end_vaddr; down_read(&mm->mmap_sem); @@ -1048,10 +952,10 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, } const struct file_operations proc_pagemap_operations = { - .llseek = mem_lseek, /* borrow this */ + .llseek = mem_lseek, .read = pagemap_read, }; -#endif /* CONFIG_PROC_PAGE_MONITOR */ +#endif #ifdef CONFIG_NUMA @@ -1184,9 +1088,6 @@ static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask, } #endif -/* - * Display pages allocated per node and memory policy via /proc. - */ static int show_numa_map(struct seq_file *m, void *v, int is_pid) { struct numa_maps_private *numa_priv = m->private; @@ -1203,7 +1104,7 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid) if (!mm) return 0; - /* Ensure we start with an empty set of numa_maps statistics. */ + memset(md, 0, sizeof(*md)); md->vma = vma; @@ -1227,10 +1128,6 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid) } else { pid_t tid = vm_is_stack(proc_priv->task, vma, is_pid); if (tid != 0) { - /* - * Thread stack in /proc/PID/task/TID/maps or - * the main process stack. - */ if (!is_pid || (vma->vm_start <= mm->start_stack && vma->vm_end >= mm->start_stack)) seq_printf(m, " stack"); @@ -1345,4 +1242,4 @@ const struct file_operations proc_tid_numa_maps_operations = { .llseek = seq_lseek, .release = seq_release_private, }; -#endif /* CONFIG_NUMA */ +#endif diff --git a/include/asm-generic/dma-coherent.h b/include/asm-generic/dma-coherent.h index 85a3ffaa..5a0c78f5 100644 --- a/include/asm-generic/dma-coherent.h +++ b/include/asm-generic/dma-coherent.h @@ -2,17 +2,12 @@ #define DMA_COHERENT_H #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT -/* - * These two functions are only for dma allocator. - * Don't use them in device drivers. - */ int dma_alloc_from_coherent(struct device *dev, ssize_t size, dma_addr_t *dma_handle, void **ret); int dma_release_from_coherent(struct device *dev, int order, void *vaddr); -/* - * Standard interface - */ +int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, size_t size, int *ret); #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY extern int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, diff --git a/include/linux/akm8963.h b/include/linux/akm8963.h index 20665beb..95e17f63 100644 --- a/include/linux/akm8963.h +++ b/include/linux/akm8963.h @@ -9,7 +9,7 @@ #define AKM8963_I2C_NAME "akm8963" #define SENSOR_DATA_SIZE 8 -#define YPR_DATA_SIZE 12 +#define YPR_DATA_SIZE 22 #define RWBUF_SIZE 16 #define ACC_DATA_FLAG 0 diff --git a/include/linux/dma-attrs.h b/include/linux/dma-attrs.h index 547ab568..18513e37 100644 --- a/include/linux/dma-attrs.h +++ b/include/linux/dma-attrs.h @@ -15,6 +15,8 @@ enum dma_attr { DMA_ATTR_WEAK_ORDERING, DMA_ATTR_WRITE_COMBINE, DMA_ATTR_NON_CONSISTENT, + DMA_ATTR_NO_KERNEL_MAPPING, + DMA_ATTR_STRONGLY_ORDERED, DMA_ATTR_MAX, }; diff --git a/include/linux/fs.h b/include/linux/fs.h index de910fa9..eea86e40 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1204,7 +1204,6 @@ struct super_block { int s_readonly_remount; - #define FLAG_ASYNC_FSYNC 0x1 unsigned int fsync_flags; }; diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 74ea46bd..480fd6dc 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -9,7 +9,6 @@ struct vm_area_struct; -/* Plain integer GFP bitmasks. Do not use this directly. */ #define ___GFP_DMA 0x01u #define ___GFP_HIGHMEM 0x02u #define ___GFP_DMA32 0x04u @@ -23,6 +22,7 @@ struct vm_area_struct; #define ___GFP_REPEAT 0x400u #define ___GFP_NOFAIL 0x800u #define ___GFP_NORETRY 0x1000u +#define ___GFP_CMA 0x2000u #define ___GFP_COMP 0x4000u #define ___GFP_ZERO 0x8000u #define ___GFP_NOMEMALLOC 0x10000u @@ -39,69 +39,41 @@ struct vm_area_struct; #define ___GFP_WRITE 0x1000000u #define ___GFP_NO_COMPACT 0x2000000u -/* - * GFP bitmasks.. - * - * Zone modifiers (see linux/mmzone.h - low three bits) - * - * Do not put any conditional on these. If necessary modify the definitions - * without the underscores and use them consistently. The definitions here may - * be used in bit comparisons. - */ #define __GFP_DMA ((__force gfp_t)___GFP_DMA) #define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM) #define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) -#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* Page is movable */ -#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) -/* - * Action modifiers - doesn't change the zoning - * - * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt - * _might_ fail. This depends upon the particular VM implementation. - * - * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller - * cannot handle allocation failures. This modifier is deprecated and no new - * users should be added. - * - * __GFP_NORETRY: The VM implementation must not retry indefinitely. - * - * __GFP_MOVABLE: Flag that this page will be movable by the page migration - * mechanism or reclaimed - */ -#define __GFP_WAIT ((__force gfp_t)___GFP_WAIT) /* Can wait and reschedule? */ -#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) /* Should access emergency pools? */ -#define __GFP_IO ((__force gfp_t)___GFP_IO) /* Can start physical IO? */ -#define __GFP_FS ((__force gfp_t)___GFP_FS) /* Can call down to low-level FS? */ -#define __GFP_COLD ((__force gfp_t)___GFP_COLD) /* Cache-cold page required */ -#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) /* Suppress page allocation failure warning */ -#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) /* See above */ -#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) /* See above */ -#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) /* See above */ -#define __GFP_COMP ((__force gfp_t)___GFP_COMP) /* Add compound page metadata */ -#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) /* Return zeroed page on success */ -#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) /* Don't use emergency reserves */ -#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */ -#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */ -#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */ -#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */ +#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) +#define __GFP_CMA ((__force gfp_t)___GFP_CMA) +#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE| \ + __GFP_CMA) +#define __GFP_WAIT ((__force gfp_t)___GFP_WAIT) +#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) +#define __GFP_IO ((__force gfp_t)___GFP_IO) +#define __GFP_FS ((__force gfp_t)___GFP_FS) +#define __GFP_COLD ((__force gfp_t)___GFP_COLD) +#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) +#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) +#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) +#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) +#define __GFP_COMP ((__force gfp_t)___GFP_COMP) +#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) +#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) +#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) +#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE) +#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) +#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD) -#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */ -#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */ -#define __GFP_NO_COMPACT ((__force gfp_t)___GFP_NO_COMPACT) /* Allocator no compact */ - -/* - * This may seem redundant, but it's a way of annotating false positives vs. - * allocations that simply cannot be supported (e.g. page tables). - */ +#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) +#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) +#define __GFP_NO_COMPACT ((__force gfp_t)___GFP_NO_COMPACT) + #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK) -#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */ +#define __GFP_BITS_SHIFT 26 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) -/* This equals 0, but use constants in case they ever change */ #define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH) -/* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */ #define GFP_ATOMIC (__GFP_HIGH) #define GFP_NOIO (__GFP_WAIT) #define GFP_NOFS (__GFP_WAIT | __GFP_IO) @@ -125,32 +97,23 @@ struct vm_area_struct; #define GFP_THISNODE ((__force gfp_t)0) #endif -/* This mask makes up all the page movable related flags */ -#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) +#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE|__GFP_CMA) -/* Control page allocator reclaim behavior */ #define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\ __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ __GFP_NORETRY|__GFP_NOMEMALLOC) -/* Control slab gfp mask during early boot */ #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS)) -/* Control allocation constraints */ #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) -/* Do not use these with a slab allocator */ #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) -/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some - platforms, used as appropriate on others */ #define GFP_DMA __GFP_DMA -/* 4GB DMA on some platforms */ #define GFP_DMA32 __GFP_DMA32 -/* Convert GFP flags to their corresponding migrate type */ static inline int allocflags_to_migratetype(gfp_t gfp_flags) { WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); @@ -158,9 +121,15 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags) if (unlikely(page_group_by_mobility_disabled)) return MIGRATE_UNMOVABLE; - /* Group based on mobility */ + +#ifndef CONFIG_CMA return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) | ((gfp_flags & __GFP_RECLAIMABLE) != 0); +#else + return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) | + (((gfp_flags & __GFP_CMA) != 0) << 1) | + ((gfp_flags & __GFP_RECLAIMABLE) != 0); +#endif } #ifdef CONFIG_HIGHMEM @@ -181,38 +150,6 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags) #define OPT_ZONE_DMA32 ZONE_NORMAL #endif -/* - * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the - * zone to use given the lowest 4 bits of gfp_t. Entries are ZONE_SHIFT long - * and there are 16 of them to cover all possible combinations of - * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM. - * - * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA. - * But GFP_MOVABLE is not only a zone specifier but also an allocation - * policy. Therefore __GFP_MOVABLE plus another zone selector is valid. - * Only 1 bit of the lowest 3 bits (DMA,DMA32,HIGHMEM) can be set to "1". - * - * bit result - * ================= - * 0x0 => NORMAL - * 0x1 => DMA or NORMAL - * 0x2 => HIGHMEM or NORMAL - * 0x3 => BAD (DMA+HIGHMEM) - * 0x4 => DMA32 or DMA or NORMAL - * 0x5 => BAD (DMA+DMA32) - * 0x6 => BAD (HIGHMEM+DMA32) - * 0x7 => BAD (HIGHMEM+DMA32+DMA) - * 0x8 => NORMAL (MOVABLE+0) - * 0x9 => DMA or NORMAL (MOVABLE+DMA) - * 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too) - * 0xb => BAD (MOVABLE+HIGHMEM+DMA) - * 0xc => DMA32 (MOVABLE+HIGHMEM+DMA32) - * 0xd => BAD (MOVABLE+DMA32+DMA) - * 0xe => BAD (MOVABLE+DMA32+HIGHMEM) - * 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA) - * - * ZONES_SHIFT must be <= 2 on 32 bit platforms. - */ #if 16 * ZONES_SHIFT > BITS_PER_LONG #error ZONES_SHIFT too large to create GFP_ZONE_TABLE integer @@ -229,12 +166,6 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags) | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * ZONES_SHIFT) \ ) -/* - * GFP_ZONE_BAD is a bitmap for all combinations of __GFP_DMA, __GFP_DMA32 - * __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per - * entry starting with bit 0. Bit is set if the combination is not - * allowed. - */ #define GFP_ZONE_BAD ( \ 1 << (___GFP_DMA | ___GFP_HIGHMEM) \ | 1 << (___GFP_DMA | ___GFP_DMA32) \ @@ -257,12 +188,6 @@ static inline enum zone_type gfp_zone(gfp_t flags) return z; } -/* - * There is only one page-allocator function, and two main namespaces to - * it. The alloc_page*() variants return 'struct page *' and as such - * can allocate highmem pages, the *get*page*() variants return - * virtual kernel addresses to the allocated page(s). - */ static inline int gfp_zonelist(gfp_t flags) { @@ -272,15 +197,6 @@ static inline int gfp_zonelist(gfp_t flags) return 0; } -/* - * We get the zone list from the current node and the gfp_mask. - * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones. - * There are two zonelists per node, one for all zones with memory and - * one containing just zones from the node the zonelist belongs to. - * - * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets - * optimized to &contig_page_data at compile-time. - */ static inline struct zonelist *node_zonelist(int nid, gfp_t flags) { return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags); @@ -307,7 +223,7 @@ __alloc_pages(gfp_t gfp_mask, unsigned int order, static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) { - /* Unknown node is current node */ + if (nid < 0) nid = numa_node_id(); @@ -350,7 +266,6 @@ extern unsigned long get_zeroed_page(gfp_t gfp_mask); void *alloc_pages_exact(size_t size, gfp_t gfp_mask); void free_pages_exact(void *virt, size_t size); -/* This is different from alloc_pages_exact_node !!! */ void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask); #define __get_free_page(gfp_mask) \ @@ -372,13 +287,6 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); void drain_all_pages(void); void drain_local_pages(void *dummy); -/* - * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what - * GFP flags are used before interrupts are enabled. Once interrupts are - * enabled, it is set to __GFP_BITS_MASK while the system is running. During - * hibernation, it is used by PM to avoid I/O during memory allocation while - * devices are suspended. - */ extern gfp_t gfp_allowed_mask; extern void pm_restrict_gfp_mask(void); @@ -391,18 +299,16 @@ static inline bool pm_suspended_storage(void) { return false; } -#endif /* CONFIG_PM_SLEEP */ +#endif #ifdef CONFIG_CMA -/* The below functions must be run on a range from a single zone. */ extern int alloc_contig_range(unsigned long start, unsigned long end, unsigned migratetype); extern void free_contig_range(unsigned long pfn, unsigned nr_pages); -/* CMA stuff */ extern void init_cma_reserved_pageblock(struct page *page); #endif -#endif /* __LINUX_GFP_H */ +#endif diff --git a/include/linux/highmem.h b/include/linux/highmem.h index d3999b4e..c737eb75 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -211,9 +211,24 @@ static inline struct page * alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, unsigned long vaddr) { +#ifndef CONFIG_CMA return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr); +#else + return __alloc_zeroed_user_highpage(__GFP_MOVABLE|__GFP_CMA, vma, + vaddr); +#endif } +#ifdef CONFIG_CMA +static inline struct page * +alloc_zeroed_user_highpage_movable_cma(struct vm_area_struct *vma, + unsigned long vaddr) +{ + return __alloc_zeroed_user_highpage(__GFP_MOVABLE|__GFP_CMA, vma, + vaddr); +} +#endif + static inline void clear_highpage(struct page *page) { void *kaddr = kmap_atomic(page); diff --git a/include/linux/ion.h b/include/linux/ion.h index 5af27664..c2cf352c 100644 --- a/include/linux/ion.h +++ b/include/linux/ion.h @@ -2,7 +2,7 @@ * include/linux/ion.h * * Copyright (C) 2011 Google, Inc. - * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. + * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -28,6 +28,7 @@ enum ion_heap_type { ION_HEAP_TYPE_CARVEOUT, ION_HEAP_TYPE_IOMMU, ION_HEAP_TYPE_CP, + ION_HEAP_TYPE_DMA, ION_HEAP_TYPE_CUSTOM, ION_NUM_HEAPS, }; @@ -35,74 +36,9 @@ enum ion_heap_type { #define ION_HEAP_SYSTEM_MASK (1 << ION_HEAP_TYPE_SYSTEM) #define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG) #define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT) -#define ION_HEAP_CP_MASK (1 << ION_HEAP_TYPE_CP) +#define ION_HEAP_TYPE_DMA_MASK (1 << ION_HEAP_TYPE_DMA) - - -enum ion_heap_ids { - INVALID_HEAP_ID = -1, - ION_CP_MM_HEAP_ID = 8, - ION_CP_ROTATOR_HEAP_ID = 9, - ION_CP_MFC_HEAP_ID = 12, - ION_CP_WB_HEAP_ID = 16, - ION_CAMERA_HEAP_ID = 20, - ION_SF_HEAP_ID = 24, - ION_IOMMU_HEAP_ID = 25, - ION_QSECOM_HEAP_ID = 27, - ION_AUDIO_HEAP_ID = 28, - - ION_MM_FIRMWARE_HEAP_ID = 29, - ION_SYSTEM_HEAP_ID = 30, - - ION_HEAP_ID_RESERVED = 31 -}; - -enum ion_fixed_position { - NOT_FIXED, - FIXED_LOW, - FIXED_MIDDLE, - FIXED_HIGH, -}; - -enum cp_mem_usage { - VIDEO_BITSTREAM = 0x1, - VIDEO_PIXEL = 0x2, - VIDEO_NONPIXEL = 0x3, - MAX_USAGE = 0x4, - UNKNOWN = 0x7FFFFFFF, -}; - -#define ION_SECURE (1 << ION_HEAP_ID_RESERVED) - -#define ION_HEAP(bit) (1 << (bit)) - -#define ION_VMALLOC_HEAP_NAME "vmalloc" -#define ION_AUDIO_HEAP_NAME "audio" -#define ION_SF_HEAP_NAME "sf" -#define ION_MM_HEAP_NAME "mm" -#define ION_ROTATOR_HEAP_NAME "rotator" -#define ION_CAMERA_HEAP_NAME "camera_preview" -#define ION_IOMMU_HEAP_NAME "iommu" -#define ION_MFC_HEAP_NAME "mfc" -#define ION_WB_HEAP_NAME "wb" -#define ION_MM_FIRMWARE_HEAP_NAME "mm_fw" -#define ION_QSECOM_HEAP_NAME "qsecom" -#define ION_FMEM_HEAP_NAME "fmem" - -#define CACHED 1 -#define UNCACHED 0 - -#define ION_CACHE_SHIFT 0 - -#define ION_SET_CACHE(__cache) ((__cache) << ION_CACHE_SHIFT) - -#define ION_IS_CACHED(__flags) ((__flags) & (1 << ION_CACHE_SHIFT)) - -#ifdef CONFIG_ARCH_DUMMY -#define ION_IOMMU_UNMAP_DELAYED 0 -#else -#define ION_IOMMU_UNMAP_DELAYED 1 -#endif +#define ION_FLAG_CACHED 1 #ifdef __KERNEL__ #include @@ -125,32 +61,7 @@ struct ion_platform_heap { enum ion_memory_types memory_type; unsigned int has_outer_cache; void *extra_data; -}; - -struct ion_cp_heap_pdata { - enum ion_permission_type permission_type; - unsigned int align; - ion_phys_addr_t secure_base; - size_t secure_size; - int reusable; - int mem_is_fmem; - enum ion_fixed_position fixed_position; - int iommu_map_all; - int iommu_2x_map_domain; - ion_virt_addr_t *virt_addr; - int (*request_region)(void *); - int (*release_region)(void *); - void *(*setup_region)(void); -}; - -struct ion_co_heap_pdata { - int adjacent_mem_id; - unsigned int align; - int mem_is_fmem; - enum ion_fixed_position fixed_position; - int (*request_region)(void *); - int (*release_region)(void *); - void *(*setup_region)(void); + void *priv; }; struct ion_platform_data { @@ -176,7 +87,8 @@ struct ion_client *msm_ion_client_create(unsigned int heap_mask, void ion_client_destroy(struct ion_client *client); struct ion_handle *ion_alloc(struct ion_client *client, size_t len, - size_t align, unsigned int flags); + size_t align, unsigned int heap_mask, + unsigned int flags); void ion_free(struct ion_client *client, struct ion_handle *handle); @@ -186,8 +98,7 @@ int ion_phys(struct ion_client *client, struct ion_handle *handle, struct sg_table *ion_sg_table(struct ion_client *client, struct ion_handle *handle); -void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle, - unsigned long flags); +void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle); void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle); @@ -220,14 +131,6 @@ int ion_secure_heap(struct ion_device *dev, int heap_id, int version, int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version, void *data); -int msm_ion_secure_heap(int heap_id); - -int msm_ion_unsecure_heap(int heap_id); - -int msm_ion_secure_heap_2_0(int heap_id, enum cp_mem_usage usage); - -int msm_ion_unsecure_heap_2_0(int heap_id, enum cp_mem_usage usage); - int msm_ion_do_cache_op(struct ion_client *client, struct ion_handle *handle, void *vaddr, unsigned long len, unsigned int cmd); @@ -254,7 +157,9 @@ static inline struct ion_client *msm_ion_client_create(unsigned int heap_mask, static inline void ion_client_destroy(struct ion_client *client) { } static inline struct ion_handle *ion_alloc(struct ion_client *client, - size_t len, size_t align, unsigned int flags) + size_t len, size_t align, + unsigned int heap_mask, + unsigned int flags) { return ERR_PTR(-ENODEV); } @@ -331,28 +236,6 @@ static inline int ion_unsecure_heap(struct ion_device *dev, int heap_id, return -ENODEV; } -static inline int msm_ion_secure_heap(int heap_id) -{ - return -ENODEV; - -} - -static inline int msm_ion_unsecure_heap(int heap_id) -{ - return -ENODEV; -} - -static inline int msm_ion_secure_heap_2_0(int heap_id, enum cp_mem_usage usage) -{ - return -ENODEV; -} - -static inline int msm_ion_unsecure_heap_2_0(int heap_id, - enum cp_mem_usage usage) -{ - return -ENODEV; -} - static inline int msm_ion_do_cache_op(struct ion_client *client, struct ion_handle *handle, void *vaddr, unsigned long len, unsigned int cmd) @@ -367,19 +250,17 @@ static inline int msm_ion_do_cache_op(struct ion_client *client, struct ion_allocation_data { size_t len; size_t align; + unsigned int heap_mask; unsigned int flags; struct ion_handle *handle; }; -struct ion_allocation_data_new { +struct ion_allocation_data_old { size_t len; size_t align; - unsigned int heap_mask; unsigned int flags; struct ion_handle *handle; }; - - struct ion_fd_data { struct ion_handle *handle; int fd; @@ -394,18 +275,11 @@ struct ion_custom_data { unsigned long arg; }; +#define ION_CLIENT_NAME_LENGTH 64 -struct ion_flush_data { - struct ion_handle *handle; - int fd; - void *vaddr; - unsigned int offset; - unsigned int length; -}; - -struct ion_flag_data { - struct ion_handle *handle; - unsigned long flags; +struct ion_client_name_data { + int len; + char* name; }; #define ION_IOC_MAGIC 'I' @@ -413,8 +287,8 @@ struct ion_flag_data { #define ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \ struct ion_allocation_data) -#define ION_IOC_ALLOC_NEW _IOWR(ION_IOC_MAGIC, 0, \ - struct ion_allocation_data_new) +#define ION_IOC_ALLOC_OLD _IOWR(ION_IOC_MAGIC, 0, \ + struct ion_allocation_data_old) #define ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data) @@ -422,30 +296,10 @@ struct ion_flag_data { #define ION_IOC_SHARE _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data) -#define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, int) +#define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data) #define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data) -#define ION_IOC_CLEAN_CACHES_OLD _IOWR(ION_IOC_MAGIC, 7, \ - struct ion_flush_data) -#define ION_IOC_INV_CACHES_OLD _IOWR(ION_IOC_MAGIC, 8, \ - struct ion_flush_data) -#define ION_IOC_CLEAN_INV_CACHES_OLD _IOWR(ION_IOC_MAGIC, 9, \ - struct ion_flush_data) - -#define ION_IOC_GET_FLAGS_OLD _IOWR(ION_IOC_MAGIC, 10, \ - struct ion_flag_data) - -#define ION_IOC_MSM_MAGIC 'M' - -#define ION_IOC_CLEAN_CACHES _IOWR(ION_IOC_MSM_MAGIC, 0, \ - struct ion_flush_data) -#define ION_IOC_INV_CACHES _IOWR(ION_IOC_MSM_MAGIC, 1, \ - struct ion_flush_data) -#define ION_IOC_CLEAN_INV_CACHES _IOWR(ION_IOC_MSM_MAGIC, 2, \ - struct ion_flush_data) - -#define ION_IOC_GET_FLAGS _IOWR(ION_IOC_MSM_MAGIC, 3, \ - struct ion_flag_data) - +#define ION_IOC_CLIENT_RENAME _IOWR(ION_IOC_MAGIC, 12, \ + struct ion_client_name_data) #endif diff --git a/include/linux/kref.h b/include/linux/kref.h index 9c07dceb..46ad78ae 100644 --- a/include/linux/kref.h +++ b/include/linux/kref.h @@ -23,43 +23,17 @@ struct kref { atomic_t refcount; }; -/** - * kref_init - initialize object. - * @kref: object in question. - */ static inline void kref_init(struct kref *kref) { atomic_set(&kref->refcount, 1); } -/** - * kref_get - increment refcount for object. - * @kref: object. - */ static inline void kref_get(struct kref *kref) { WARN_ON(!atomic_read(&kref->refcount)); atomic_inc(&kref->refcount); } -/** - * kref_sub - subtract a number of refcounts for object. - * @kref: object. - * @count: Number of recounts to subtract. - * @release: pointer to the function that will clean up the object when the - * last reference to the object is released. - * This pointer is required, and it is not acceptable to pass kfree - * in as this function. If the caller does pass kfree to this - * function, you will be publicly mocked mercilessly by the kref - * maintainer, and anyone else who happens to notice it. You have - * been warned. - * - * Subtract @count from the refcount, and if 0, call release(). - * Return 1 if the object was removed, otherwise return 0. Beware, if this - * function returns 0, you still can not count on the kref from remaining in - * memory. Only use the return value if you want to see if the kref is now - * gone, not present. - */ static inline int kref_sub(struct kref *kref, unsigned int count, void (*release)(struct kref *kref)) { @@ -72,25 +46,13 @@ static inline int kref_sub(struct kref *kref, unsigned int count, return 0; } -/** - * kref_put - decrement refcount for object. - * @kref: object. - * @release: pointer to the function that will clean up the object when the - * last reference to the object is released. - * This pointer is required, and it is not acceptable to pass kfree - * in as this function. If the caller does pass kfree to this - * function, you will be publicly mocked mercilessly by the kref - * maintainer, and anyone else who happens to notice it. You have - * been warned. - * - * Decrement the refcount, and if 0, call release(). - * Return 1 if the object was removed, otherwise return 0. Beware, if this - * function returns 0, you still can not count on the kref from remaining in - * memory. Only use the return value if you want to see if the kref is now - * gone, not present. - */ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)) { return kref_sub(kref, 1, release); } -#endif /* _KREF_H_ */ + +static inline int __must_check kref_get_unless_zero(struct kref *kref) +{ + return atomic_add_unless(&kref->refcount, 1, 0); +} +#endif diff --git a/include/linux/mfd/pm8xxx/pm8921-bms.h b/include/linux/mfd/pm8xxx/pm8921-bms.h index e57ed96c..9457cd10 100644 --- a/include/linux/mfd/pm8xxx/pm8921-bms.h +++ b/include/linux/mfd/pm8xxx/pm8921-bms.h @@ -104,6 +104,7 @@ struct pm8921_bms_platform_data { unsigned int criteria_sw_est_ocv; unsigned int rconn_mohm_sw_est_ocv; void (*get_power_jacket_status) (int *full, int *status, int *exist); + int qb_mode_cc_criteria_uAh; }; extern int batt_stored_magic_num; @@ -142,7 +143,13 @@ int pm8921_bms_get_batt_current(int *result); int pm8921_store_hw_reset_reason(int is_hw_reset); int pm8921_bms_get_batt_soc(int *result); int pm8921_bms_get_batt_cc(int *result); +int pm8921_bms_store_battery_data_emmc(void); +int pm8921_bms_store_battery_ui_soc(int soc_ui); +int pm8921_bms_get_battery_ui_soc(void); int pm8921_bms_get_attr_text(char *buf, int size); +int pm8921_bms_enter_qb_mode(void); +int pm8921_bms_exit_qb_mode(void); +int pm8921_qb_mode_pwr_consumption_check(unsigned long time_stamp); #endif #else static inline int pm8921_bms_get_vsense_avg(int *result) @@ -215,11 +222,35 @@ static inline int pm8921_bms_get_batt_cc(int *result) { return -ENXIO; } +static inline int pm8921_bms_store_battery_data_emmc(void) +{ + return -ENXIO; +} +static inline int pm8921_bms_store_battery_ui_soc(int soc_ui) +{ + return -ENXIO; +} +static inline int pm8921_bms_get_battery_ui_soc(void) +{ + return -ENXIO; +} static inline int pm8921_bms_get_attr_text(char *buf, int size) { return 0; } +static inline int pm8921_bms_enter_qb_mode(void) +{ + return 0; +} +static inline int pm8921_bms_exit_qb_mode(void) +{ + return 0; +} +static inline int pm8921_qb_mode_pwr_consumption_check(unsigned long time_stamp) +{ + return 0; +} #endif #endif -#endif +#endif \ No newline at end of file diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 4fc12b20..d9751d17 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -24,70 +24,29 @@ struct address_space; #define USE_SPLIT_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) -/* - * Each physical page in the system has a struct page associated with - * it to keep track of whatever it is we are using the page for at the - * moment. Note that we have no way to track which tasks are using - * a page, though if it is a pagecache page, rmap structures can tell us - * who is mapping it. - * - * The objects in struct page are organized in double word blocks in - * order to allows us to use atomic double word operations on portions - * of struct page. That is currently only used by slub but the arrangement - * allows the use of atomic double word operations on the flags/mapping - * and lru list pointers also. - */ struct page { - /* First double word block */ - unsigned long flags; /* Atomic flags, some possibly - * updated asynchronously */ - struct address_space *mapping; /* If low bit clear, points to - * inode address_space, or NULL. - * If page mapped as anonymous - * memory, low bit is set, and - * it points to anon_vma object: - * see PAGE_MAPPING_ANON below. - */ - /* Second double word */ + + unsigned long flags; + struct address_space *mapping; + struct { union { - pgoff_t index; /* Our offset within mapping. */ - void *freelist; /* slub first free object */ + pgoff_t index; + void *freelist; }; union { #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) - /* Used for cmpxchg_double in slub */ + unsigned long counters; #else - /* - * Keep _count separate from slub cmpxchg_double data. - * As the rest of the double word is protected by - * slab_lock but _count is not. - */ unsigned counters; #endif struct { union { - /* - * Count of ptes mapped in - * mms, to show when page is - * mapped & limit reverse map - * searches. - * - * Used also for tail pages - * refcounting instead of - * _count. Tail pages cannot - * be mapped and keeping the - * tail page _count zero at - * all times guarantees - * get_page_unless_zero() will - * never succeed on tail - * pages. - */ atomic_t _mapcount; struct { @@ -96,21 +55,19 @@ struct page { unsigned frozen:1; }; }; - atomic_t _count; /* Usage count, see below. */ + atomic_t _count; }; }; }; - /* Third double word block */ + union { - struct list_head lru; /* Pageout list, eg. active_list - * protected by zone->lru_lock ! - */ - struct { /* slub per cpu partial pages */ - struct page *next; /* Next partial slab */ + struct list_head lru; + struct { + struct page *next; #ifdef CONFIG_64BIT - int pages; /* Nr of partial slabs left */ - int pobjects; /* Approximate # of objects */ + int pages; + int pobjects; #else short int pages; short int pobjects; @@ -118,52 +75,27 @@ struct page { }; }; - /* Remainder is not double word aligned */ + union { - unsigned long private; /* Mapping-private opaque data: - * usually used for buffer_heads - * if PagePrivate set; used for - * swp_entry_t if PageSwapCache; - * indicates order in the buddy - * system if PG_buddy is set. - */ + unsigned long private; #if USE_SPLIT_PTLOCKS spinlock_t ptl; #endif - struct kmem_cache *slab; /* SLUB: Pointer to slab */ - struct page *first_page; /* Compound tail pages */ + struct kmem_cache *slab; + struct page *first_page; }; - /* - * On machines where all RAM is mapped into kernel address space, - * we can simply calculate the virtual address. On machines with - * highmem some memory is mapped into kernel virtual memory - * dynamically, so we need a place to store that address. - * Note that this field could be 16 bits on x86 ... ;) - * - * Architectures with slow multiplication can define - * WANT_PAGE_VIRTUAL in asm/page.h - */ #if defined(WANT_PAGE_VIRTUAL) - void *virtual; /* Kernel virtual address (NULL if - not kmapped, ie. highmem) */ -#endif /* WANT_PAGE_VIRTUAL */ + void *virtual; +#endif #ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS - unsigned long debug_flags; /* Use atomic bitops on this */ + unsigned long debug_flags; #endif #ifdef CONFIG_KMEMCHECK - /* - * kmemcheck wants to track the status of each byte in a page; this - * is a pointer to such a status block. NULL if not tracked. - */ void *shadow; #endif } -/* - * The struct page can be forced to be double word aligned so that atomic ops - * on double words work. The SLUB allocator can make use of such a feature. - */ #ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE __aligned(2 * sizeof(unsigned long)) #endif @@ -182,59 +114,36 @@ struct page_frag { typedef unsigned long __nocast vm_flags_t; -/* - * A region containing a mapping of a non-memory backed file under NOMMU - * conditions. These are held in a global tree and are pinned by the VMAs that - * map parts of them. - */ struct vm_region { - struct rb_node vm_rb; /* link in global region tree */ - vm_flags_t vm_flags; /* VMA vm_flags */ - unsigned long vm_start; /* start address of region */ - unsigned long vm_end; /* region initialised to here */ - unsigned long vm_top; /* region allocated to here */ - unsigned long vm_pgoff; /* the offset in vm_file corresponding to vm_start */ - struct file *vm_file; /* the backing file or NULL */ - - int vm_usage; /* region usage count (access under nommu_region_sem) */ - bool vm_icache_flushed : 1; /* true if the icache has been flushed for - * this region */ + struct rb_node vm_rb; + vm_flags_t vm_flags; + unsigned long vm_start; + unsigned long vm_end; + unsigned long vm_top; + unsigned long vm_pgoff; + struct file *vm_file; + + int vm_usage; + bool vm_icache_flushed : 1; }; -/* - * This struct defines a memory VMM memory area. There is one of these - * per VM-area/task. A VM area is any part of the process virtual memory - * space that has a special rule for the page-fault handlers (ie a shared - * library, the executable area etc). - */ struct vm_area_struct { - struct mm_struct * vm_mm; /* The address space we belong to. */ - unsigned long vm_start; /* Our start address within vm_mm. */ - unsigned long vm_end; /* The first byte after our end address - within vm_mm. */ + struct mm_struct * vm_mm; + unsigned long vm_start; + unsigned long vm_end; - /* linked list of VM areas per task, sorted by address */ + struct vm_area_struct *vm_next, *vm_prev; - pgprot_t vm_page_prot; /* Access permissions of this VMA. */ - unsigned long vm_flags; /* Flags, see mm.h. */ + pgprot_t vm_page_prot; + unsigned long vm_flags; struct rb_node vm_rb; - /* - * For areas with an address space and backing store, - * linkage into the address_space->i_mmap prio tree, or - * linkage to the list of like vmas hanging off its node, or - * linkage of vma in the address_space->i_mmap_nonlinear list. - * - * For private anonymous mappings, a pointer to a null terminated string - * in the user process containing the name given to the vma, or NULL - * if unnamed. - */ union { struct { struct list_head list; - void *parent; /* aligns with prio_tree_node parent */ + void *parent; struct vm_area_struct *head; } vm_set; @@ -242,30 +151,22 @@ struct vm_area_struct { const char __user *anon_name; } shared; - /* - * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma - * list, after a COW of one of the file pages. A MAP_SHARED vma - * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack - * or brk vma (with NULL file) can only be in an anon_vma list. - */ - struct list_head anon_vma_chain; /* Serialized by mmap_sem & - * page_table_lock */ - struct anon_vma *anon_vma; /* Serialized by page_table_lock */ - - /* Function pointers to deal with this struct. */ + struct list_head anon_vma_chain; + struct anon_vma *anon_vma; + + const struct vm_operations_struct *vm_ops; - /* Information about our backing store: */ - unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE - units, *not* PAGE_CACHE_SIZE */ - struct file * vm_file; /* File we map to (can be NULL). */ - void * vm_private_data; /* was vm_pte (shared mem) */ + + unsigned long vm_pgoff; + struct file * vm_file; + void * vm_private_data; #ifndef CONFIG_MMU - struct vm_region *vm_region; /* NOMMU mapping region */ + struct vm_region *vm_region; #endif #ifdef CONFIG_NUMA - struct mempolicy *vm_policy; /* NUMA policy for the VMA */ + struct mempolicy *vm_policy; #endif }; @@ -289,116 +190,92 @@ enum { #if USE_SPLIT_PTLOCKS && defined(CONFIG_MMU) #define SPLIT_RSS_COUNTING -/* per-thread cached information, */ struct task_rss_stat { - int events; /* for synchronization threshold */ + int events; int count[NR_MM_COUNTERS]; }; -#endif /* USE_SPLIT_PTLOCKS */ +#endif struct mm_rss_stat { atomic_long_t count[NR_MM_COUNTERS]; }; struct mm_struct { - struct vm_area_struct * mmap; /* list of VMAs */ + struct vm_area_struct * mmap; struct rb_root mm_rb; - struct vm_area_struct * mmap_cache; /* last find_vma result */ + struct vm_area_struct * mmap_cache; #ifdef CONFIG_MMU unsigned long (*get_unmapped_area) (struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); void (*unmap_area) (struct mm_struct *mm, unsigned long addr); #endif - unsigned long mmap_base; /* base of mmap area */ - unsigned long task_size; /* size of task vm space */ - unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */ - unsigned long free_area_cache; /* first hole of size cached_hole_size or larger */ + unsigned long mmap_base; + unsigned long task_size; + unsigned long cached_hole_size; + unsigned long free_area_cache; pgd_t * pgd; - atomic_t mm_users; /* How many users with user space? */ - atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ - int map_count; /* number of VMAs */ + atomic_t mm_users; + atomic_t mm_count; + int map_count; - spinlock_t page_table_lock; /* Protects page tables and some counters */ + spinlock_t page_table_lock; struct rw_semaphore mmap_sem; - struct list_head mmlist; /* List of maybe swapped mm's. These are globally strung - * together off init_mm.mmlist, and are protected - * by mmlist_lock - */ + struct list_head mmlist; - unsigned long hiwater_rss; /* High-watermark of RSS usage */ - unsigned long hiwater_vm; /* High-water virtual memory usage */ + unsigned long hiwater_rss; + unsigned long hiwater_vm; - unsigned long total_vm; /* Total pages mapped */ - unsigned long locked_vm; /* Pages that have PG_mlocked set */ - unsigned long pinned_vm; /* Refcount permanently increased */ - unsigned long shared_vm; /* Shared pages (files) */ - unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE */ - unsigned long stack_vm; /* VM_GROWSUP/DOWN */ - unsigned long reserved_vm; /* VM_RESERVED|VM_IO pages */ + unsigned long total_vm; + unsigned long locked_vm; + unsigned long pinned_vm; + unsigned long shared_vm; + unsigned long exec_vm; + unsigned long stack_vm; + unsigned long reserved_vm; unsigned long def_flags; - unsigned long nr_ptes; /* Page table pages */ + unsigned long nr_ptes; unsigned long start_code, end_code, start_data, end_data; unsigned long start_brk, brk, start_stack; unsigned long arg_start, arg_end, env_start, env_end; - unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */ + unsigned long saved_auxv[AT_VECTOR_SIZE]; - /* - * Special counters, in some configurations protected by the - * page_table_lock, in other configurations by being atomic. - */ struct mm_rss_stat rss_stat; struct linux_binfmt *binfmt; cpumask_var_t cpu_vm_mask_var; - /* Architecture-specific MM context */ + mm_context_t context; - /* Swap token stuff */ - /* - * Last value of global fault stamp as seen by this process. - * In other words, this value gives an indication of how long - * it has been since this task got the token. - * Look at mm/thrash.c - */ + unsigned int faultstamp; unsigned int token_priority; unsigned int last_interval; - unsigned long flags; /* Must use atomic bitops to access the bits */ + unsigned long flags; - struct core_state *core_state; /* coredumping support */ + struct core_state *core_state; #ifdef CONFIG_AIO spinlock_t ioctx_lock; struct hlist_head ioctx_list; #endif #ifdef CONFIG_MM_OWNER - /* - * "owner" points to a task that is regarded as the canonical - * user/owner of this mm. All of the following must be true in - * order for it to be changed: - * - * current == mm->owner - * current->mm != mm - * new_owner->mm == mm - * new_owner->alloc_lock is held - */ struct task_struct __rcu *owner; #endif - /* store ref to file /proc//exe symlink points to */ + struct file *exe_file; unsigned long num_exe_file_vmas; #ifdef CONFIG_MMU_NOTIFIER struct mmu_notifier_mm *mmu_notifier_mm; #endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE - pgtable_t pmd_huge_pte; /* protected by page_table_lock */ + pgtable_t pmd_huge_pte; #endif #ifdef CONFIG_CPUMASK_OFFSTACK struct cpumask cpumask_allocation; @@ -412,14 +289,12 @@ static inline void mm_init_cpumask(struct mm_struct *mm) #endif } -/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */ static inline cpumask_t *mm_cpumask(struct mm_struct *mm) { return mm->cpu_vm_mask_var; } -/* Return the name for an anonymous mapping or NULL for a file-backed mapping */ static inline const char __user *vma_get_anon_name(struct vm_area_struct *vma) { if (vma->vm_file) @@ -428,4 +303,4 @@ static inline const char __user *vma_get_anon_name(struct vm_area_struct *vma) return vma->shared.anon_name; } -#endif /* _LINUX_MM_TYPES_H */ +#endif diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 8312d48d..36b7c1fd 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -19,7 +19,6 @@ #include #include -/* Free memory management - zoned buddy allocator. */ #ifndef CONFIG_FORCE_MAX_ZONEORDER #define MAX_ORDER 11 #else @@ -27,46 +26,27 @@ #endif #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) -/* - * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed - * costly to service. That is between allocation orders which should - * coelesce naturally under reasonable reclaim pressure and those which - * will not. - */ #define PAGE_ALLOC_COSTLY_ORDER 3 enum { MIGRATE_UNMOVABLE, MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, - MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ + MIGRATE_PCPTYPES, MIGRATE_RESERVE = MIGRATE_PCPTYPES, #ifdef CONFIG_CMA - /* - * MIGRATE_CMA migration type is designed to mimic the way - * ZONE_MOVABLE works. Only movable pages can be allocated - * from MIGRATE_CMA pageblocks and page allocator never - * implicitly change migration type of MIGRATE_CMA pageblock. - * - * The way to use it is to change migratetype of a range of - * pageblocks to MIGRATE_CMA which can be done by - * __free_pageblock_cma() function. What is important though - * is that a range of pageblocks must be aligned to - * MAX_ORDER_NR_PAGES should biggest page be bigger then - * a single pageblock. - */ MIGRATE_CMA, #endif - MIGRATE_ISOLATE, /* can't allocate from here */ + MIGRATE_ISOLATE, MIGRATE_TYPES }; #ifdef CONFIG_CMA +bool is_cma_pageblock(struct page *page); # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) -# define cma_wmark_pages(zone) zone->min_cma_pages #else +# define is_cma_pageblock(page) false # define is_migrate_cma(migratetype) false -# define cma_wmark_pages(zone) 0 #endif #define for_each_migratetype_order(order, type) \ @@ -87,12 +67,6 @@ struct free_area { struct pglist_data; -/* - * zone->lock and zone->lru_lock are two of the hottest locks in the kernel. - * So add a wild amount of padding here to ensure that they fall into separate - * cachelines. There are very few zone structures in the machine, so space - * consumption is not a concern here. - */ #if defined(CONFIG_SMP) struct zone_padding { char x[0]; @@ -103,56 +77,47 @@ struct zone_padding { #endif enum zone_stat_item { - /* First 128 byte cacheline (assuming 64 bit words) */ + NR_FREE_PAGES, NR_LRU_BASE, - NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */ - NR_ACTIVE_ANON, /* " " " " " */ - NR_INACTIVE_FILE, /* " " " " " */ - NR_ACTIVE_FILE, /* " " " " " */ - NR_UNEVICTABLE, /* " " " " " */ - NR_MLOCK, /* mlock()ed pages found and moved off LRU */ - NR_ANON_PAGES, /* Mapped anonymous pages */ - NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. - only modified from process context */ + NR_INACTIVE_ANON = NR_LRU_BASE, + NR_ACTIVE_ANON, + NR_INACTIVE_FILE, + NR_ACTIVE_FILE, + NR_UNEVICTABLE, + NR_MLOCK, + NR_ANON_PAGES, + NR_FILE_MAPPED, NR_FILE_PAGES, NR_FILE_DIRTY, NR_WRITEBACK, NR_SLAB_RECLAIMABLE, NR_SLAB_UNRECLAIMABLE, - NR_PAGETABLE, /* used for pagetables */ + NR_PAGETABLE, NR_KERNEL_STACK, - /* Second 128 byte cacheline */ - NR_UNSTABLE_NFS, /* NFS unstable pages */ + + NR_UNSTABLE_NFS, NR_BOUNCE, NR_VMSCAN_WRITE, - NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */ - NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ - NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ - NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ - NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ - NR_DIRTIED, /* page dirtyings since bootup */ - NR_WRITTEN, /* page writings since bootup */ + NR_VMSCAN_IMMEDIATE, + NR_WRITEBACK_TEMP, + NR_ISOLATED_ANON, + NR_ISOLATED_FILE, + NR_SHMEM, + NR_DIRTIED, + NR_WRITTEN, #ifdef CONFIG_NUMA - NUMA_HIT, /* allocated in intended node */ - NUMA_MISS, /* allocated in non intended node */ - NUMA_FOREIGN, /* was intended here, hit elsewhere */ - NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */ - NUMA_LOCAL, /* allocation from local node */ - NUMA_OTHER, /* allocation from other node */ + NUMA_HIT, + NUMA_MISS, + NUMA_FOREIGN, + NUMA_INTERLEAVE_HIT, + NUMA_LOCAL, + NUMA_OTHER, #endif NR_ANON_TRANSPARENT_HUGEPAGES, + NR_FREE_CMA_PAGES, NR_VM_ZONE_STAT_ITEMS }; -/* - * We do arithmetic on the LRU lists in various places in the code, - * so it is important to keep the active lists LRU_ACTIVE higher in - * the array than the corresponding inactive lists, and to keep - * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists. - * - * This has to be kept in sync with the statistics in zone_stat_item - * above and the descriptions in vmstat_text in mm/vmstat.c - */ #define LRU_BASE 0 #define LRU_ACTIVE 1 #define LRU_FILE 2 @@ -189,24 +154,17 @@ struct lruvec { struct list_head lists[NR_LRU_LISTS]; }; -/* Mask used at gathering information at once (see memcontrol.c) */ #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE)) #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) #define LRU_ALL_EVICTABLE (LRU_ALL_FILE | LRU_ALL_ANON) #define LRU_ALL ((1 << NR_LRU_LISTS) - 1) -/* Isolate inactive pages */ #define ISOLATE_INACTIVE ((__force isolate_mode_t)0x1) -/* Isolate active pages */ #define ISOLATE_ACTIVE ((__force isolate_mode_t)0x2) -/* Isolate clean file */ #define ISOLATE_CLEAN ((__force isolate_mode_t)0x4) -/* Isolate unmapped file */ #define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x8) -/* Isolate for asynchronous migration */ #define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x10) -/* LRU Isolation modes. */ typedef unsigned __bitwise__ isolate_mode_t; enum zone_watermarks { @@ -221,11 +179,11 @@ enum zone_watermarks { #define high_wmark_pages(z) (z->watermark[WMARK_HIGH]) struct per_cpu_pages { - int count; /* number of pages in the list */ - int high; /* high watermark, emptying needed */ - int batch; /* chunk size for buddy add/remove */ + int count; + int high; + int batch; - /* Lists of pages, one per migrate type stored on the pcp-lists */ + struct list_head lists[MIGRATE_PCPTYPES]; }; @@ -240,53 +198,17 @@ struct per_cpu_pageset { #endif }; -#endif /* !__GENERATING_BOUNDS.H */ +#endif enum zone_type { #ifdef CONFIG_ZONE_DMA - /* - * ZONE_DMA is used when there are devices that are not able - * to do DMA to all of addressable memory (ZONE_NORMAL). Then we - * carve out the portion of memory that is needed for these devices. - * The range is arch specific. - * - * Some examples - * - * Architecture Limit - * --------------------------- - * parisc, ia64, sparc <4G - * s390 <2G - * arm Various - * alpha Unlimited or 0-16MB. - * - * i386, x86_64 and multiple other arches - * <16M. - */ ZONE_DMA, #endif #ifdef CONFIG_ZONE_DMA32 - /* - * x86_64 needs two ZONE_DMAs because it supports devices that are - * only able to do DMA to the lower 16M but also 32 bit devices that - * can only do DMA areas below 4G. - */ ZONE_DMA32, #endif - /* - * Normal addressable memory is in ZONE_NORMAL. DMA operations can be - * performed on pages in ZONE_NORMAL if the DMA devices support - * transfers to all addressable memory. - */ ZONE_NORMAL, #ifdef CONFIG_HIGHMEM - /* - * A memory area that is only addressable by the kernel through - * mapping portions into its own address space. This is for example - * used by i386 to allow the kernel to address the memory beyond - * 900MB. The kernel will set up special mappings (page - * table entries on i386) for each page that the kernel needs to - * access. - */ ZONE_HIGHMEM, #endif ZONE_MOVABLE, @@ -295,13 +217,6 @@ enum zone_type { #ifndef __GENERATING_BOUNDS_H -/* - * When a memory allocation must conform to specific limitations (such - * as being suitable for DMA) the caller will pass in hints to the - * allocator in the gfp_mask, in the zone modifier bits. These bits - * are used to select a priority ordered list of memory zones which - * match the requested limits. See gfp_zone() in include/linux/gfp.h - */ #if MAX_NR_ZONES < 2 #define ZONES_SHIFT 0 @@ -314,88 +229,44 @@ enum zone_type { #endif struct zone_reclaim_stat { - /* - * The pageout code in vmscan.c keeps track of how many of the - * mem/swap backed and file backed pages are refeferenced. - * The higher the rotated/scanned ratio, the more valuable - * that cache is. - * - * The anon LRU stats live in [0], file LRU stats in [1] - */ unsigned long recent_rotated[2]; unsigned long recent_scanned[2]; }; struct zone { - /* Fields commonly accessed by the page allocator */ + - /* zone watermarks, access with *_wmark_pages(zone) macros */ + unsigned long watermark[NR_WMARK]; - /* - * When free pages are below this point, additional steps are taken - * when reading the number of free pages to avoid per-cpu counter - * drift allowing watermarks to be breached - */ unsigned long percpu_drift_mark; - /* - * We don't know if the memory that we're going to allocate will be freeable - * or/and it will be released eventually, so to avoid totally wasting several - * GB of ram we must reserve some of the lower zone memory (otherwise we risk - * to run OOM on the lower zones despite there's tons of freeable ram - * on the higher zones). This array is recalculated at runtime if the - * sysctl_lowmem_reserve_ratio sysctl changes. - */ unsigned long lowmem_reserve[MAX_NR_ZONES]; - /* - * This is a per-zone reserve of pages that should not be - * considered dirtyable memory. - */ unsigned long dirty_balance_reserve; #ifdef CONFIG_NUMA int node; - /* - * zone reclaim becomes active if more unmapped pages exist. - */ unsigned long min_unmapped_pages; unsigned long min_slab_pages; #endif struct per_cpu_pageset __percpu *pageset; - /* - * free areas of different sizes - */ spinlock_t lock; - int all_unreclaimable; /* All pages pinned */ + int all_unreclaimable; #ifdef CONFIG_MEMORY_HOTPLUG - /* see spanned/present_pages for more description */ + seqlock_t span_seqlock; #endif #ifdef CONFIG_CMA - /* - * CMA needs to increase watermark levels during the allocation - * process to make sure that the system is not starved. - */ - unsigned long min_cma_pages; + bool cma_alloc; #endif struct free_area free_area[MAX_ORDER]; #ifndef CONFIG_SPARSEMEM - /* - * Flags for a pageblock_nr_pages block. See pageblock-flags.h. - * In SPARSEMEM, this map is stored in struct mem_section - */ unsigned long *pageblock_flags; -#endif /* CONFIG_SPARSEMEM */ +#endif #ifdef CONFIG_COMPACTION - /* - * On compaction failure, 1<> PAGE_SHIFT */ + unsigned long zone_start_pfn; /* @@ -470,21 +310,16 @@ struct zone { * frequently read in proximity to zone->lock. It's good to * give them a chance of being in the same cacheline. */ - unsigned long spanned_pages; /* total size, including holes */ - unsigned long present_pages; /* amount of memory (excluding holes) */ + unsigned long spanned_pages; + unsigned long present_pages; - /* - * rarely used fields: - */ const char *name; } ____cacheline_internodealigned_in_smp; typedef enum { - ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */ - ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */ - ZONE_CONGESTED, /* zone has many dirty pages backed by - * a congested BDI - */ + ZONE_RECLAIM_LOCKED, + ZONE_OOM_LOCKED, + ZONE_CONGESTED, } zone_flags_t; static inline void zone_set_flag(struct zone *zone, zone_flags_t flag) @@ -521,130 +356,39 @@ static inline int zone_is_oom_locked(const struct zone *zone) unsigned long zone_nr_free_pages(struct zone *zone); #else #define zone_nr_free_pages(zone) zone_page_state(zone, NR_FREE_PAGES) -#endif /* CONFIG_SMP */ +#endif -/* - * The "priority" of VM scanning is how much of the queues we will scan in one - * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the - * queues ("queue_length >> 12") during an aging round. - */ #define DEF_PRIORITY 12 -/* Maximum number of zones on a zonelist */ #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES) #ifdef CONFIG_NUMA -/* - * The NUMA zonelists are doubled because we need zonelists that restrict the - * allocations to a single node for GFP_THISNODE. - * - * [0] : Zonelist with fallback - * [1] : No fallback (GFP_THISNODE) - */ #define MAX_ZONELISTS 2 -/* - * We cache key information from each zonelist for smaller cache - * footprint when scanning for free pages in get_page_from_freelist(). - * - * 1) The BITMAP fullzones tracks which zones in a zonelist have come - * up short of free memory since the last time (last_fullzone_zap) - * we zero'd fullzones. - * 2) The array z_to_n[] maps each zone in the zonelist to its node - * id, so that we can efficiently evaluate whether that node is - * set in the current tasks mems_allowed. - * - * Both fullzones and z_to_n[] are one-to-one with the zonelist, - * indexed by a zones offset in the zonelist zones[] array. - * - * The get_page_from_freelist() routine does two scans. During the - * first scan, we skip zones whose corresponding bit in 'fullzones' - * is set or whose corresponding node in current->mems_allowed (which - * comes from cpusets) is not set. During the second scan, we bypass - * this zonelist_cache, to ensure we look methodically at each zone. - * - * Once per second, we zero out (zap) fullzones, forcing us to - * reconsider nodes that might have regained more free memory. - * The field last_full_zap is the time we last zapped fullzones. - * - * This mechanism reduces the amount of time we waste repeatedly - * reexaming zones for free memory when they just came up low on - * memory momentarilly ago. - * - * The zonelist_cache struct members logically belong in struct - * zonelist. However, the mempolicy zonelists constructed for - * MPOL_BIND are intentionally variable length (and usually much - * shorter). A general purpose mechanism for handling structs with - * multiple variable length members is more mechanism than we want - * here. We resort to some special case hackery instead. - * - * The MPOL_BIND zonelists don't need this zonelist_cache (in good - * part because they are shorter), so we put the fixed length stuff - * at the front of the zonelist struct, ending in a variable length - * zones[], as is needed by MPOL_BIND. - * - * Then we put the optional zonelist cache on the end of the zonelist - * struct. This optional stuff is found by a 'zlcache_ptr' pointer in - * the fixed length portion at the front of the struct. This pointer - * both enables us to find the zonelist cache, and in the case of - * MPOL_BIND zonelists, (which will just set the zlcache_ptr to NULL) - * to know that the zonelist cache is not there. - * - * The end result is that struct zonelists come in two flavors: - * 1) The full, fixed length version, shown below, and - * 2) The custom zonelists for MPOL_BIND. - * The custom MPOL_BIND zonelists have a NULL zlcache_ptr and no zlcache. - * - * Even though there may be multiple CPU cores on a node modifying - * fullzones or last_full_zap in the same zonelist_cache at the same - * time, we don't lock it. This is just hint data - if it is wrong now - * and then, the allocator will still function, perhaps a bit slower. - */ struct zonelist_cache { - unsigned short z_to_n[MAX_ZONES_PER_ZONELIST]; /* zone->nid */ - DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST); /* zone full? */ - unsigned long last_full_zap; /* when last zap'd (jiffies) */ + unsigned short z_to_n[MAX_ZONES_PER_ZONELIST]; + DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST); + unsigned long last_full_zap; }; #else #define MAX_ZONELISTS 1 struct zonelist_cache; #endif -/* - * This struct contains information about a zone in a zonelist. It is stored - * here to avoid dereferences into large structures and lookups of tables - */ struct zoneref { - struct zone *zone; /* Pointer to actual zone */ - int zone_idx; /* zone_idx(zoneref->zone) */ + struct zone *zone; + int zone_idx; }; -/* - * One allocation request operates on a zonelist. A zonelist - * is a list of zones, the first one is the 'goal' of the - * allocation, the other zones are fallback zones, in decreasing - * priority. - * - * If zlcache_ptr is not NULL, then it is just the address of zlcache, - * as explained above. If zlcache_ptr is NULL, there is no zlcache. - * * - * To speed the reading of the zonelist, the zonerefs contain the zone index - * of the entry being read. Helper functions to access information given - * a struct zoneref are - * - * zonelist_zone() - Return the struct zone * for an entry in _zonerefs - * zonelist_zone_idx() - Return the index of the zone for an entry - * zonelist_node_idx() - Return the index of the node for an entry - */ struct zonelist { - struct zonelist_cache *zlcache_ptr; // NULL or &zlcache + struct zonelist_cache *zlcache_ptr; struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1]; #ifdef CONFIG_NUMA - struct zonelist_cache zlcache; // optional ... + struct zonelist_cache zlcache; #endif }; @@ -654,30 +398,18 @@ struct node_active_region { unsigned long end_pfn; int nid; }; -#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ +#endif #ifndef CONFIG_DISCONTIGMEM -/* The array of struct pages - for discontigmem use pgdat->lmem_map */ extern struct page *mem_map; #endif -/* - * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM - * (mostly NUMA machines?) to denote a higher-level memory zone than the - * zone denotes. - * - * On NUMA machines, each NUMA node would have a pg_data_t to describe - * it's memory layout. - * - * Memory statistics and page replacement data structures are maintained on a - * per-zone basis. - */ struct bootmem_data; typedef struct pglist_data { struct zone node_zones[MAX_NR_ZONES]; struct zonelist node_zonelists[MAX_ZONELISTS]; int nr_zones; -#ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ +#ifdef CONFIG_FLAT_NODE_MEM_MAP struct page *node_mem_map; #ifdef CONFIG_CGROUP_MEM_RES_CTLR struct page_cgroup *node_page_cgroup; @@ -687,22 +419,14 @@ typedef struct pglist_data { struct bootmem_data *bdata; #endif #ifdef CONFIG_MEMORY_HOTPLUG - /* - * Must be held any time you expect node_start_pfn, node_present_pages - * or node_spanned_pages stay constant. Holding this will also - * guarantee that any pfn_valid() stays that way. - * - * Nests above zone->lock and zone->size_seqlock. - */ spinlock_t node_size_lock; #endif unsigned long node_start_pfn; - unsigned long node_present_pages; /* total number of physical pages */ - unsigned long node_spanned_pages; /* total size of physical page - range, including holes */ + unsigned long node_present_pages; + unsigned long node_spanned_pages; int node_id; wait_queue_head_t kswapd_wait; - struct task_struct *kswapd; /* Protected by lock_memory_hotplug() */ + struct task_struct *kswapd; int kswapd_max_order; enum zone_type classzone_idx; } pg_data_t; @@ -756,9 +480,6 @@ static inline int local_memory_node(int node_id) { return node_id; }; unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); #endif -/* - * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc. - */ #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) static inline int populated_zone(struct zone *zone) @@ -792,12 +513,6 @@ static inline int is_normal_idx(enum zone_type idx) return (idx == ZONE_NORMAL); } -/** - * is_highmem - helper function to quickly check if a struct zone is a - * highmem zone or not. This is an attempt to keep references - * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. - * @zone - pointer to struct zone variable - */ static inline int is_highmem(struct zone *zone) { #ifdef CONFIG_HIGHMEM @@ -833,7 +548,6 @@ static inline int is_dma(struct zone *zone) #endif } -/* These two functions are used to setup the per zone pages min values */ struct ctl_table; int min_free_kbytes_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); @@ -850,7 +564,7 @@ int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, extern int numa_zonelist_order_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); extern char numa_zonelist_order[]; -#define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */ +#define NUMA_ZONELIST_ORDER_LEN 16 #ifndef CONFIG_NEED_MULTIPLE_NODES @@ -858,31 +572,20 @@ extern struct pglist_data contig_page_data; #define NODE_DATA(nid) (&contig_page_data) #define NODE_MEM_MAP(nid) mem_map -#else /* CONFIG_NEED_MULTIPLE_NODES */ +#else #include -#endif /* !CONFIG_NEED_MULTIPLE_NODES */ +#endif extern struct pglist_data *first_online_pgdat(void); extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); extern struct zone *next_zone(struct zone *zone); -/** - * for_each_online_pgdat - helper macro to iterate over all online nodes - * @pgdat - pointer to a pg_data_t variable - */ #define for_each_online_pgdat(pgdat) \ for (pgdat = first_online_pgdat(); \ pgdat; \ pgdat = next_online_pgdat(pgdat)) -/** - * for_each_zone - helper macro to iterate over all memory zones - * @zone - pointer to struct zone variable - * - * The user only needs to declare the zone variable, for_each_zone - * fills it in. - */ #define for_each_zone(zone) \ for (zone = (first_online_pgdat())->node_zones; \ zone; \ @@ -893,7 +596,7 @@ extern struct zone *next_zone(struct zone *zone); zone; \ zone = next_zone(zone)) \ if (!populated_zone(zone)) \ - ; /* do nothing */ \ + ; \ else static inline struct zone *zonelist_zone(struct zoneref *zoneref) @@ -909,43 +612,18 @@ static inline int zonelist_zone_idx(struct zoneref *zoneref) static inline int zonelist_node_idx(struct zoneref *zoneref) { #ifdef CONFIG_NUMA - /* zone_to_nid not available in this context */ + return zoneref->zone->node; #else return 0; -#endif /* CONFIG_NUMA */ +#endif } -/** - * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point - * @z - The cursor used as a starting point for the search - * @highest_zoneidx - The zone index of the highest zone to return - * @nodes - An optional nodemask to filter the zonelist with - * @zone - The first suitable zone found is returned via this parameter - * - * This function returns the next zone at or below a given zone index that is - * within the allowed nodemask using a cursor as the starting point for the - * search. The zoneref returned is a cursor that represents the current zone - * being examined. It should be advanced by one before calling - * next_zones_zonelist again. - */ struct zoneref *next_zones_zonelist(struct zoneref *z, enum zone_type highest_zoneidx, nodemask_t *nodes, struct zone **zone); -/** - * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist - * @zonelist - The zonelist to search for a suitable zone - * @highest_zoneidx - The zone index of the highest zone to return - * @nodes - An optional nodemask to filter the zonelist with - * @zone - The first suitable zone found is returned via this parameter - * - * This function returns the first zone at or below a given zone index that is - * within the allowed nodemask. The zoneref returned is a cursor that can be - * used to iterate the zonelist with next_zones_zonelist by advancing it by - * one before calling. - */ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, enum zone_type highest_zoneidx, nodemask_t *nodes, @@ -955,31 +633,11 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, zone); } -/** - * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask - * @zone - The current zone in the iterator - * @z - The current pointer within zonelist->zones being iterated - * @zlist - The zonelist being iterated - * @highidx - The zone index of the highest zone to return - * @nodemask - Nodemask allowed by the allocator - * - * This iterator iterates though all zones at or below a given zone index and - * within a given nodemask - */ #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \ zone; \ z = next_zones_zonelist(++z, highidx, nodemask, &zone)) \ -/** - * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index - * @zone - The current zone in the iterator - * @z - The current pointer within zonelist->zones being iterated - * @zlist - The zonelist being iterated - * @highidx - The zone index of the highest zone to return - * - * This iterator iterates though all zones at or below a given zone index. - */ #define for_each_zone_zonelist(zone, z, zlist, highidx) \ for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL) @@ -1001,12 +659,6 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn) #ifdef CONFIG_SPARSEMEM -/* - * SECTION_SHIFT #bits space required to store a section # - * - * PA_SECTION_SHIFT physical address to/from section number - * PFN_SECTION_SHIFT pfn to/from section number - */ #define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS) #define PA_SECTION_SHIFT (SECTION_SIZE_BITS) @@ -1033,27 +685,11 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn) struct page; struct page_cgroup; struct mem_section { - /* - * This is, logically, a pointer to an array of struct - * pages. However, it is stored with some other magic. - * (see sparse.c::sparse_init_one_section()) - * - * Additionally during early boot we encode node id of - * the location of the section here to guide allocation. - * (see sparse.c::memory_present()) - * - * Making it a UL at least makes someone do a cast - * before using it wrong. - */ unsigned long section_mem_map; - /* See declaration of similar field in struct zone */ + unsigned long *pageblock_flags; #ifdef CONFIG_CGROUP_MEM_RES_CTLR - /* - * If !SPARSEMEM, pgdat doesn't have page_cgroup pointer. We use - * section. (see memcontrol.h/page_cgroup.h about this.) - */ struct page_cgroup *page_cgroup; unsigned long pad; #endif @@ -1084,11 +720,6 @@ static inline struct mem_section *__nr_to_section(unsigned long nr) extern int __section_nr(struct mem_section* ms); extern unsigned long usemap_size(void); -/* - * We use the lower bits of the mem_map pointer to store - * a little bit of information. There should be at least - * 3 bits here due to 32-bit alignment. - */ #define SECTION_MARKED_PRESENT (1UL<<0) #define SECTION_HAS_MEM_MAP (1UL<<1) #define SECTION_MAP_LAST_BIT (1UL<<2) @@ -1143,11 +774,6 @@ static inline int pfn_present(unsigned long pfn) return present_section(__nr_to_section(pfn_to_section_nr(pfn))); } -/* - * These are _only_ used during initialisation, therefore they - * can use __initdata ... They could have names to indicate - * this restriction. - */ #ifdef CONFIG_NUMA #define pfn_to_nid(pfn) \ ({ \ @@ -1166,7 +792,7 @@ void sparse_init(void); #else #define sparse_init() do {} while (0) #define sparse_index_init(_sec, _nid) do {} while (0) -#endif /* CONFIG_SPARSEMEM */ +#endif #ifdef CONFIG_NODES_SPAN_OTHER_NODES bool early_pfn_in_nid(unsigned long pfn, int nid); @@ -1181,12 +807,6 @@ bool early_pfn_in_nid(unsigned long pfn, int nid); void memory_present(int nid, unsigned long start, unsigned long end); unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); -/* - * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we - * need to check pfn validility within that MAX_ORDER_NR_PAGES block. - * pfn_valid_within() should be used in this case; we optimise this away - * when we have no holes within a MAX_ORDER_NR_PAGES block. - */ #ifdef CONFIG_HOLES_IN_ZONE #define pfn_valid_within(pfn) pfn_valid(pfn) #else @@ -1194,21 +814,6 @@ unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); #endif #ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL -/* - * pfn_valid() is meant to be able to tell if a given PFN has valid memmap - * associated with it or not. In FLATMEM, it is expected that holes always - * have valid memmap as long as there is valid PFNs either side of the hole. - * In SPARSEMEM, it is assumed that a valid section has a memmap for the - * entire section. - * - * However, an ARM, and maybe other embedded architectures in the future - * free memmap backing holes to save memory on the assumption the memmap is - * never used. The page_zone linkages are then broken even though pfn_valid() - * returns true. A walker of the full memmap must then do this additional - * check to ensure the memmap they are looking at is sane by making sure - * the zone and PFN linkages are still valid. This is expensive, but walkers - * of the full memmap are extremely rare. - */ int memmap_valid_within(unsigned long pfn, struct page *page, struct zone *zone); #else @@ -1217,8 +822,8 @@ static inline int memmap_valid_within(unsigned long pfn, { return 1; } -#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ +#endif -#endif /* !__GENERATING_BOUNDS.H */ -#endif /* !__ASSEMBLY__ */ -#endif /* _LINUX_MMZONE_H */ +#endif +#endif +#endif diff --git a/include/linux/msm_ion.h b/include/linux/msm_ion.h index 0e28e541..61263afd 100644 --- a/include/linux/msm_ion.h +++ b/include/linux/msm_ion.h @@ -1,7 +1,6 @@ /* - * include/linux/ion.h * - * Copyright (c) 2012, Code Aurora Forum. All rights reserved. + * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -19,4 +18,162 @@ #include + +enum ion_heap_ids { + INVALID_HEAP_ID = -1, + ION_CP_MM_HEAP_ID = 8, + ION_CP_MFC_HEAP_ID = 12, + ION_CP_WB_HEAP_ID = 16, + ION_CAMERA_HEAP_ID = 20, + ION_SYSTEM_CONTIG_HEAP_ID = 21, + ION_ADSP_HEAP_ID = 22, + ION_SF_HEAP_ID = 24, + ION_IOMMU_HEAP_ID = 25, + ION_QSECOM_HEAP_ID = 27, + ION_AUDIO_HEAP_ID = 28, + + ION_MM_FIRMWARE_HEAP_ID = 29, + ION_SYSTEM_HEAP_ID = 30, + + ION_HEAP_ID_RESERVED = 31 +}; + +enum ion_fixed_position { + NOT_FIXED, + FIXED_LOW, + FIXED_MIDDLE, + FIXED_HIGH, +}; + +enum cp_mem_usage { + VIDEO_BITSTREAM = 0x1, + VIDEO_PIXEL = 0x2, + VIDEO_NONPIXEL = 0x3, + MAX_USAGE = 0x4, + UNKNOWN = 0x7FFFFFFF, +}; + +#define ION_HEAP_CP_MASK (1 << ION_HEAP_TYPE_CP) + +#define ION_FLAG_SECURE (1 << ION_HEAP_ID_RESERVED) + +#define ION_FLAG_FORCE_CONTIGUOUS (1 << 30) + +#define ION_SECURE ION_FLAG_SECURE +#define ION_FORCE_CONTIGUOUS ION_FLAG_FORCE_CONTIGUOUS + +#define ION_HEAP(bit) (1 << (bit)) + +#define ION_ADSP_HEAP_NAME "adsp" +#define ION_VMALLOC_HEAP_NAME "vmalloc" +#define ION_KMALLOC_HEAP_NAME "kmalloc" +#define ION_AUDIO_HEAP_NAME "audio" +#define ION_SF_HEAP_NAME "sf" +#define ION_MM_HEAP_NAME "mm" +#define ION_CAMERA_HEAP_NAME "camera_preview" +#define ION_IOMMU_HEAP_NAME "iommu" +#define ION_MFC_HEAP_NAME "mfc" +#define ION_WB_HEAP_NAME "wb" +#define ION_MM_FIRMWARE_HEAP_NAME "mm_fw" +#define ION_QSECOM_HEAP_NAME "qsecom" +#define ION_FMEM_HEAP_NAME "fmem" + +#define ION_SET_CACHED(__cache) (__cache | ION_FLAG_CACHED) +#define ION_SET_UNCACHED(__cache) (__cache & ~ION_FLAG_CACHED) + +#define ION_IS_CACHED(__flags) ((__flags) & ION_FLAG_CACHED) + +#ifdef __KERNEL__ + +#define ION_IOMMU_UNMAP_DELAYED 1 + +struct ion_cp_heap_pdata { + enum ion_permission_type permission_type; + unsigned int align; + ion_phys_addr_t secure_base; + size_t secure_size; + int reusable; + int mem_is_fmem; + int is_cma; + enum ion_fixed_position fixed_position; + int iommu_map_all; + int iommu_2x_map_domain; + ion_virt_addr_t *virt_addr; + int (*request_region)(void *); + int (*release_region)(void *); + void *(*setup_region)(void); + enum ion_memory_types memory_type; + int no_nonsecure_alloc; +}; + +struct ion_co_heap_pdata { + int adjacent_mem_id; + unsigned int align; + int mem_is_fmem; + enum ion_fixed_position fixed_position; + int (*request_region)(void *); + int (*release_region)(void *); + void *(*setup_region)(void); + enum ion_memory_types memory_type; +}; + +#ifdef CONFIG_ION +int msm_ion_secure_heap(int heap_id); + +int msm_ion_unsecure_heap(int heap_id); + +int msm_ion_secure_heap_2_0(int heap_id, enum cp_mem_usage usage); + +int msm_ion_unsecure_heap_2_0(int heap_id, enum cp_mem_usage usage); +#else +static inline int msm_ion_secure_heap(int heap_id) +{ + return -ENODEV; + +} + +static inline int msm_ion_unsecure_heap(int heap_id) +{ + return -ENODEV; +} + +static inline int msm_ion_secure_heap_2_0(int heap_id, enum cp_mem_usage usage) +{ + return -ENODEV; +} + +static inline int msm_ion_unsecure_heap_2_0(int heap_id, + enum cp_mem_usage usage) +{ + return -ENODEV; +} +#endif + +#endif + +struct ion_flush_data { + struct ion_handle *handle; + int fd; + void *vaddr; + unsigned int offset; + unsigned int length; +}; + +struct ion_flag_data { + struct ion_handle *handle; + unsigned long flags; +}; + +#define ION_IOC_MSM_MAGIC 'M' + +#define ION_IOC_CLEAN_CACHES _IOWR(ION_IOC_MSM_MAGIC, 0, \ + struct ion_flush_data) +#define ION_IOC_INV_CACHES _IOWR(ION_IOC_MSM_MAGIC, 1, \ + struct ion_flush_data) +#define ION_IOC_CLEAN_INV_CACHES _IOWR(ION_IOC_MSM_MAGIC, 2, \ + struct ion_flush_data) + +#define ION_IOC_GET_FLAGS _IOWR(ION_IOC_MSM_MAGIC, 3, \ + struct ion_flag_data) + #endif diff --git a/include/linux/msm_rotator.h b/include/linux/msm_rotator.h index 389669e1..0b9d1d2d 100644 --- a/include/linux/msm_rotator.h +++ b/include/linux/msm_rotator.h @@ -12,6 +12,8 @@ _IOW(MSM_ROTATOR_IOCTL_MAGIC, 2, struct msm_rotator_data_info) #define MSM_ROTATOR_IOCTL_FINISH \ _IOW(MSM_ROTATOR_IOCTL_MAGIC, 3, int) +#define MSM_ROTATOR_IOCTL_BUFFER_SYNC \ + _IOW(MSM_ROTATOR_IOCTL_MAGIC, 4, struct msm_rotator_buf_sync) #define ROTATOR_VERSION_01 0xA5B4C301 @@ -23,6 +25,13 @@ enum rotator_clk_type { ROTATOR_IMEM_CLK }; +struct msm_rotator_buf_sync { + uint32_t session_id; + uint32_t flags; + int acq_fen_fd; + int rel_fen_fd; +}; + struct msm_rotator_img_info { unsigned int session_id; struct msmfb_img src; @@ -44,6 +53,7 @@ struct msm_rotator_data_info { unsigned int version_key; struct msmfb_data src_chroma; struct msmfb_data dst_chroma; + uint32_t wait_for_finish; }; struct msm_rot_clocks { diff --git a/include/linux/msm_vidc_dec.h b/include/linux/msm_vidc_dec.h index 3c99562a..f5014612 100644 --- a/include/linux/msm_vidc_dec.h +++ b/include/linux/msm_vidc_dec.h @@ -538,6 +538,8 @@ struct vdec_output_frameinfo { struct vdec_framesize framesize; enum vdec_interlaced_format interlaced_format; struct vdec_aspectratioinfo aspect_ratio_info; + size_t metadata_len; + size_t metadata_offset; }; union vdec_msgdata { diff --git a/include/linux/msm_vidc_enc.h b/include/linux/msm_vidc_enc.h index f53feae7..d12fdb9c 100644 --- a/include/linux/msm_vidc_enc.h +++ b/include/linux/msm_vidc_enc.h @@ -400,6 +400,8 @@ struct venc_buffer{ long long timestamp; unsigned long flags; void *clientdata; + unsigned long metadata_len; + unsigned long metadata_offset; }; struct venc_basecfg{ @@ -451,7 +453,7 @@ struct venc_capability{ }; struct venc_entropycfg{ - unsigned longentropysel; + unsigned long entropysel; unsigned long cabacmodel; }; diff --git a/include/linux/prctl.h b/include/linux/prctl.h index 2f513409..b563878b 100644 --- a/include/linux/prctl.h +++ b/include/linux/prctl.h @@ -1,97 +1,73 @@ #ifndef _LINUX_PRCTL_H #define _LINUX_PRCTL_H -/* Values to pass as first argument to prctl() */ -#define PR_SET_PDEATHSIG 1 /* Second arg is a signal */ -#define PR_GET_PDEATHSIG 2 /* Second arg is a ptr to return the signal */ +#define PR_SET_PDEATHSIG 1 +#define PR_GET_PDEATHSIG 2 -/* Get/set current->mm->dumpable */ #define PR_GET_DUMPABLE 3 #define PR_SET_DUMPABLE 4 -/* Get/set unaligned access control bits (if meaningful) */ #define PR_GET_UNALIGN 5 #define PR_SET_UNALIGN 6 -# define PR_UNALIGN_NOPRINT 1 /* silently fix up unaligned user accesses */ -# define PR_UNALIGN_SIGBUS 2 /* generate SIGBUS on unaligned user access */ +# define PR_UNALIGN_NOPRINT 1 +# define PR_UNALIGN_SIGBUS 2 -/* Get/set whether or not to drop capabilities on setuid() away from - * uid 0 (as per security/commoncap.c) */ #define PR_GET_KEEPCAPS 7 #define PR_SET_KEEPCAPS 8 -/* Get/set floating-point emulation control bits (if meaningful) */ #define PR_GET_FPEMU 9 #define PR_SET_FPEMU 10 -# define PR_FPEMU_NOPRINT 1 /* silently emulate fp operations accesses */ -# define PR_FPEMU_SIGFPE 2 /* don't emulate fp operations, send SIGFPE instead */ +# define PR_FPEMU_NOPRINT 1 +# define PR_FPEMU_SIGFPE 2 -/* Get/set floating-point exception mode (if meaningful) */ #define PR_GET_FPEXC 11 #define PR_SET_FPEXC 12 -# define PR_FP_EXC_SW_ENABLE 0x80 /* Use FPEXC for FP exception enables */ -# define PR_FP_EXC_DIV 0x010000 /* floating point divide by zero */ -# define PR_FP_EXC_OVF 0x020000 /* floating point overflow */ -# define PR_FP_EXC_UND 0x040000 /* floating point underflow */ -# define PR_FP_EXC_RES 0x080000 /* floating point inexact result */ -# define PR_FP_EXC_INV 0x100000 /* floating point invalid operation */ -# define PR_FP_EXC_DISABLED 0 /* FP exceptions disabled */ -# define PR_FP_EXC_NONRECOV 1 /* async non-recoverable exc. mode */ -# define PR_FP_EXC_ASYNC 2 /* async recoverable exception mode */ -# define PR_FP_EXC_PRECISE 3 /* precise exception mode */ - -/* Get/set whether we use statistical process timing or accurate timestamp - * based process timing */ +# define PR_FP_EXC_SW_ENABLE 0x80 +# define PR_FP_EXC_DIV 0x010000 +# define PR_FP_EXC_OVF 0x020000 +# define PR_FP_EXC_UND 0x040000 +# define PR_FP_EXC_RES 0x080000 +# define PR_FP_EXC_INV 0x100000 +# define PR_FP_EXC_DISABLED 0 +# define PR_FP_EXC_NONRECOV 1 +# define PR_FP_EXC_ASYNC 2 +# define PR_FP_EXC_PRECISE 3 + #define PR_GET_TIMING 13 #define PR_SET_TIMING 14 -# define PR_TIMING_STATISTICAL 0 /* Normal, traditional, - statistical process timing */ -# define PR_TIMING_TIMESTAMP 1 /* Accurate timestamp based - process timing */ +# define PR_TIMING_STATISTICAL 0 +# define PR_TIMING_TIMESTAMP 1 -#define PR_SET_NAME 15 /* Set process name */ -#define PR_GET_NAME 16 /* Get process name */ +#define PR_SET_NAME 15 +#define PR_GET_NAME 16 -/* Get/set process endian */ #define PR_GET_ENDIAN 19 #define PR_SET_ENDIAN 20 # define PR_ENDIAN_BIG 0 -# define PR_ENDIAN_LITTLE 1 /* True little endian mode */ -# define PR_ENDIAN_PPC_LITTLE 2 /* "PowerPC" pseudo little endian */ +# define PR_ENDIAN_LITTLE 1 +# define PR_ENDIAN_PPC_LITTLE 2 -/* Get/set process seccomp mode */ #define PR_GET_SECCOMP 21 #define PR_SET_SECCOMP 22 -/* Get/set the capability bounding set (as per security/commoncap.c) */ #define PR_CAPBSET_READ 23 #define PR_CAPBSET_DROP 24 -/* Get/set the process' ability to use the timestamp counter instruction */ #define PR_GET_TSC 25 #define PR_SET_TSC 26 -# define PR_TSC_ENABLE 1 /* allow the use of the timestamp counter */ -# define PR_TSC_SIGSEGV 2 /* throw a SIGSEGV instead of reading the TSC */ +# define PR_TSC_ENABLE 1 +# define PR_TSC_SIGSEGV 2 -/* Get/set securebits (as per security/commoncap.c) */ #define PR_GET_SECUREBITS 27 #define PR_SET_SECUREBITS 28 -/* - * Get/set the timerslack as used by poll/select/nanosleep - * A value of 0 means "use default" - */ #define PR_SET_TIMERSLACK 29 #define PR_GET_TIMERSLACK 30 #define PR_TASK_PERF_EVENTS_DISABLE 31 #define PR_TASK_PERF_EVENTS_ENABLE 32 -/* - * Set early/late kill mode for hwpoison memory corruption. - * This influences when the process gets killed on a memory corruption. - */ #define PR_MCE_KILL 33 # define PR_MCE_KILL_CLEAR 0 # define PR_MCE_KILL_SET 1 @@ -102,9 +78,6 @@ #define PR_MCE_KILL_GET 34 -/* - * Tune up process memory map specifics. - */ #define PR_SET_MM 35 # define PR_SET_MM_START_CODE 1 # define PR_SET_MM_END_CODE 2 @@ -114,10 +87,6 @@ # define PR_SET_MM_START_BRK 6 # define PR_SET_MM_BRK 7 -/* - * Set specific pid that is allowed to ptrace the current task. - * A value of 0 mean "no process". - */ #define PR_SET_PTRACER 0x59616d61 # define PR_SET_PTRACER_ANY ((unsigned long)-1) @@ -127,4 +96,4 @@ #define PR_SET_VMA 0x53564d41 # define PR_SET_VMA_ANON_NAME 0 -#endif /* _LINUX_PRCTL_H */ +#endif diff --git a/include/linux/task_io_accounting.h b/include/linux/task_io_accounting.h index bdf855c2..62c8dacc 100644 --- a/include/linux/task_io_accounting.h +++ b/include/linux/task_io_accounting.h @@ -1,30 +1,17 @@ -/* - * task_io_accounting: a structure which is used for recording a single task's - * IO statistics. - * - * Don't include this header file directly - it is designed to be dragged in via - * sched.h. - * - * Blame Andrew Morton for all this. - */ struct task_io_accounting { #ifdef CONFIG_TASK_XACCT - /* bytes read */ + u64 rchar; /* bytes written */ u64 wchar; - /* # of read syscalls */ + u64 syscr; - /* # of write syscalls */ + u64 syscw; -#endif /* CONFIG_TASK_XACCT */ +#endif #ifdef CONFIG_TASK_IO_ACCOUNTING - /* - * The number of bytes which this task has caused to be read from - * storage. - */ u64 read_bytes; /* @@ -33,13 +20,6 @@ struct task_io_accounting { */ u64 write_bytes; - /* - * A task can cause "negative" IO too. If this task truncates some - * dirty pagecache, some IO which another task has been accounted for - * (in its write_bytes) will not be happening. We _could_ just - * subtract that from the truncating task's write_bytes, but there is - * information loss in doing that. - */ u64 cancelled_write_bytes; -#endif /* CONFIG_TASK_IO_ACCOUNTING */ +#endif }; diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index 7cd21429..e4e75372 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h @@ -175,6 +175,7 @@ struct usb_composite_dev { struct work_struct cdusbcmdwork; struct delayed_work cdusbcmd_vzw_unmount_work; struct switch_dev compositesdev; + int unmount_cdrom_mask; }; extern int usb_string_id(struct usb_composite_dev *c); @@ -194,4 +195,4 @@ extern int usb_string_ids_n(struct usb_composite_dev *c, unsigned n); #define INFO(d, fmt, args...) \ dev_info(&(d)->gadget->dev , fmt , ## args) -#endif +#endif \ No newline at end of file diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index 2963d2bb..a3dd2ccf 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -68,6 +68,11 @@ struct usbnet { # define EVENT_DEV_WAKING 6 # define EVENT_DEV_ASLEEP 7 # define EVENT_DEV_OPEN 8 +#define DBG_MSG_LEN 128 +#define DBG_MAX_MSG 500 + unsigned int dbg_idx; + rwlock_t dbg_lock; + char (dbgbuf[DBG_MAX_MSG])[DBG_MSG_LEN]; }; static inline struct usb_driver *driver_of(struct usb_interface *intf) @@ -151,6 +156,7 @@ extern int usbnet_suspend(struct usb_interface *, pm_message_t); extern int usbnet_resume(struct usb_interface *); extern void usbnet_disconnect(struct usb_interface *); +extern void dbg_log_event_debug(struct usbnet *, char *); struct cdc_state { struct usb_cdc_header_desc *header; @@ -214,4 +220,4 @@ extern int usbnet_nway_reset(struct net_device *net); extern void usbnet_terminate_urbs(struct usbnet *dev); extern void rx_complete(struct urb *urb); -#endif +#endif \ No newline at end of file diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 65efb92d..1d104744 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -258,6 +258,13 @@ static inline void refresh_zone_stat_thresholds(void) { } #endif /* CONFIG_SMP */ +static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages, + int migratetype) +{ + __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); + if (is_migrate_cma(migratetype)) + __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages); +} extern const char * const vmstat_text[]; #endif /* _LINUX_VMSTAT_H */ diff --git a/include/media/msm/vcd_api.h b/include/media/msm/vcd_api.h index 7104028e..808228ac 100644 --- a/include/media/msm/vcd_api.h +++ b/include/media/msm/vcd_api.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -78,6 +78,8 @@ struct vcd_frame_data { u32 desc_size; struct ion_handle *buff_ion_handle; struct vcd_aspect_ratio aspect_ratio_info; + u32 metadata_len; + u32 metadata_offset; }; struct vcd_sequence_hdr { diff --git a/include/media/msm/vidc_init.h b/include/media/msm/vidc_init.h index c6812130..0fc4ecd7 100644 --- a/include/media/msm/vidc_init.h +++ b/include/media/msm/vidc_init.h @@ -19,6 +19,7 @@ #define VIDC_MAX_NUM_CLIENTS 4 #define MAX_VIDEO_NUM_OF_BUFF 100 +#define MAX_MV_BUFFERS 32 enum buffer_dir { BUFFER_TYPE_INPUT, @@ -30,6 +31,7 @@ struct buf_addr_table { unsigned long kernel_vaddr; unsigned long phy_addr; unsigned long buff_ion_flag; + unsigned long buff_len; struct ion_handle *buff_ion_handle; int pmem_fd; struct file *file; diff --git a/include/media/msm_camera.h b/include/media/msm_camera.h index 8846acb0..c22cfee5 100644 --- a/include/media/msm_camera.h +++ b/include/media/msm_camera.h @@ -27,7 +27,7 @@ #include #endif -#include +#include #define BIT(nr) (1UL << (nr)) diff --git a/include/media/videobuf2-msm-mem.h b/include/media/videobuf2-msm-mem.h index 0e12cc2a..272fde37 100644 --- a/include/media/videobuf2-msm-mem.h +++ b/include/media/videobuf2-msm-mem.h @@ -17,7 +17,7 @@ #include #include -#include +#include struct videobuf2_mapping { unsigned int count; diff --git a/include/sound/q6asm.h b/include/sound/q6asm.h index c3d83229..600321fe 100644 --- a/include/sound/q6asm.h +++ b/include/sound/q6asm.h @@ -15,7 +15,7 @@ #include #include #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION -#include +#include #endif #define IN 0x000 diff --git a/kernel/sched/core.c b/kernel/sched/core.c index a48b7d84..5a09434f 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3866,6 +3866,12 @@ static void migrate_tasks(unsigned int dead_cpu) if (rq->nr_running == 1) break; + + if ((rq->cfs.nr_running == 0) && (rq->rt.rt_nr_running == 0)) { + requeue_rt_rq_tasks(rq); + continue; + } + next = pick_next_task(rq); BUG_ON(!next); next->sched_class->put_prev_task(rq, next); @@ -5395,9 +5401,6 @@ void __init sched_init_smp(void) hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE); hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE); - - hotcpu_notifier(update_runtime, 0); - init_hrtick(); diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index be427c5b..95762124 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -691,15 +691,6 @@ static void __disable_runtime(struct rq *rq) } } -static void disable_runtime(struct rq *rq) -{ - unsigned long flags; - - raw_spin_lock_irqsave(&rq->lock, flags); - __disable_runtime(rq); - raw_spin_unlock_irqrestore(&rq->lock, flags); -} - static void __enable_runtime(struct rq *rq) { rt_rq_iter_t iter; @@ -724,37 +715,6 @@ static void __enable_runtime(struct rq *rq) } } -static void enable_runtime(struct rq *rq) -{ - unsigned long flags; - - raw_spin_lock_irqsave(&rq->lock, flags); - __enable_runtime(rq); - raw_spin_unlock_irqrestore(&rq->lock, flags); -} - -int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu) -{ - int cpu = (int)(long)hcpu; - - switch (action) { - case CPU_DOWN_PREPARE: - case CPU_DOWN_PREPARE_FROZEN: - disable_runtime(cpu_rq(cpu)); - return NOTIFY_OK; - - case CPU_DOWN_FAILED: - case CPU_DOWN_FAILED_FROZEN: - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - enable_runtime(cpu_rq(cpu)); - return NOTIFY_OK; - - default: - return NOTIFY_DONE; - } -} - static int balance_runtime(struct rt_rq *rt_rq) { int more = 0; @@ -2068,6 +2028,16 @@ const struct sched_class rt_sched_class = { .switched_to = switched_to_rt, }; +void requeue_rt_rq_tasks(struct rq *rq) { + rt_rq_iter_t iter; + struct rt_rq *rt_rq; + + for_each_rt_rq(rt_rq, iter, rq) { + if (rt_rq->rt_nr_running > 0) + sched_rt_rq_enqueue(rt_rq); + } +} + #ifdef CONFIG_SCHED_DEBUG extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index b52189be..362217d5 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -684,7 +684,6 @@ extern void sysrq_sched_debug_show(void); extern void sched_init_granularity(void); extern void update_max_interval(void); extern void update_group_power(struct sched_domain *sd, int cpu); -extern int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu); extern void init_sched_rt_class(void); extern void init_sched_fair_class(void); @@ -912,6 +911,8 @@ extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq); extern void account_cfs_bandwidth_used(int enabled, int was_enabled); +void requeue_rt_rq_tasks(struct rq *rq); + #ifdef CONFIG_NO_HZ enum rq_nohz_flag_bits { NOHZ_TICK_STOPPED, diff --git a/lib/idr.c b/lib/idr.c index 4046e29c..842bd27c 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -625,7 +625,7 @@ void *idr_get_next(struct idr *idp, int *nextidp) return p; } - id += 1 << n; + id = round_up(id + 1, 1 << n); while (n < fls(id)) { n += IDR_BITS; p = *--paa; diff --git a/mm/compaction.c b/mm/compaction.c index eede9810..890b5fed 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -753,6 +753,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, struct zoneref *z; struct zone *zone; int rc = COMPACT_SKIPPED; + int alloc_flags = 0; /* * Check whether it is worth even starting compaction. The order check is @@ -764,7 +765,11 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, count_vm_event(COMPACTSTALL); - /* Compact each zone in the list */ +#ifdef CONFIG_CMA + if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) + alloc_flags |= ALLOC_CMA; +#endif + for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, nodemask) { int status; @@ -772,8 +777,8 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, status = compact_zone_order(zone, order, gfp_mask, sync); rc = max(status, rc); - /* If a normal allocation would succeed, stop compacting */ - if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0)) + if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, + alloc_flags)) break; } diff --git a/mm/filemap.c b/mm/filemap.c index 79c4b2b0..b1a70d24 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -769,18 +769,21 @@ struct page *find_or_create_page(struct address_space *mapping, { struct page *page; int err; + gfp_t gfp_notmask = 0; + repeat: page = find_lock_page(mapping, index); if (!page) { - page = __page_cache_alloc(gfp_mask); +retry: + page = __page_cache_alloc(gfp_mask & ~gfp_notmask); if (!page) return NULL; - /* - * We want a regular kernel memory (not highmem or DMA etc) - * allocation for the radix tree nodes, but we need to honour - * the context-specific requirements the caller has asked for. - * GFP_RECLAIM_MASK collects those requirements. - */ + + if (is_cma_pageblock(page)) { + __free_page(page); + gfp_notmask |= __GFP_MOVABLE; + goto retry; + } err = add_to_page_cache_lru(page, mapping, index, (gfp_mask & GFP_RECLAIM_MASK)); if (unlikely(err)) { @@ -2343,9 +2346,17 @@ struct page *grab_cache_page_write_begin(struct address_space *mapping, if (page) goto found; +retry: page = __page_cache_alloc(gfp_mask & ~gfp_notmask); if (!page) return NULL; + + if (is_cma_pageblock(page)) { + __free_page(page); + gfp_notmask |= __GFP_MOVABLE; + goto retry; + } + status = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL & ~gfp_notmask); if (unlikely(status)) { diff --git a/mm/internal.h b/mm/internal.h index aee4761c..dbb2f53c 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -342,3 +342,15 @@ extern u64 hwpoison_filter_flags_mask; extern u64 hwpoison_filter_flags_value; extern u64 hwpoison_filter_memcg; extern u32 hwpoison_filter_enable; + +#define ALLOC_WMARK_MIN WMARK_MIN +#define ALLOC_WMARK_LOW WMARK_LOW +#define ALLOC_WMARK_HIGH WMARK_HIGH +#define ALLOC_NO_WATERMARKS 0x04 + +#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1) + +#define ALLOC_HARDER 0x10 +#define ALLOC_HIGH 0x20 +#define ALLOC_CPUSET 0x40 +#define ALLOC_CMA 0x80 diff --git a/mm/ksm.c b/mm/ksm.c index fe3cbbf2..d40858c9 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -42,10 +42,6 @@ #include #endif -#ifdef CONFIG_KSM_HTC_POLICY -#include -#endif - #include #include "internal.h" diff --git a/mm/memory.c b/mm/memory.c index 6105f475..352e0d78 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -49,6 +49,7 @@ #include #include #include +#include #include #include #include @@ -2899,6 +2900,9 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, entry = pte_to_swp_entry(orig_pte); if (unlikely(non_swap_entry(entry))) { if (is_migration_entry(entry)) { +#ifdef CONFIG_CMA + mdelay(10); +#endif migration_entry_wait(mm, pmd, address); } else if (is_hwpoison_entry(entry)) { ret = VM_FAULT_HWPOISON; diff --git a/mm/mmap.c b/mm/mmap.c index 4e3ed4a4..95f2cc46 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -786,9 +786,6 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, mpol_equal(vma_policy(prev), policy) && can_vma_merge_after(prev, vm_flags, anon_vma, file, pgoff, anon_name)) { - /* - * OK, it can. Can we now merge in the successor as well? - */ if (next && end == next->vm_start && mpol_equal(policy, vma_policy(next)) && can_vma_merge_before(next, vm_flags, anon_vma, @@ -814,7 +811,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, mpol_equal(policy, vma_policy(next)) && can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen, anon_name)) { - if (prev && addr < prev->vm_end) /* case 4 */ + if (prev && addr < prev->vm_end) err = vma_adjust(prev, prev->vm_start, addr, prev->vm_pgoff, NULL); else /* cases 3, 8 */ @@ -1285,9 +1282,6 @@ unsigned long mmap_region(struct file *file, unsigned long addr, vm_flags |= VM_ACCOUNT; } - /* - * Can we just expand an old mapping? - */ vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL, NULL); if (vma) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 98e04fb0..de81de2d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -445,7 +445,8 @@ static inline void __free_one_page(struct page *page, if (page_is_guard(buddy)) { clear_page_guard_flag(buddy); set_page_private(page, 0); - __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); + __mod_zone_freepage_state(zone, 1 << order, + migratetype); } else { list_del(&buddy->lru); zone->free_area[order].nr_free--; @@ -503,6 +504,7 @@ static void free_pcppages_bulk(struct zone *zone, int count, int migratetype = 0; int batch_free = 0; int to_free = count; + int mt = 0; spin_lock(&zone->lock); zone->all_unreclaimable = 0; @@ -525,11 +527,15 @@ static void free_pcppages_bulk(struct zone *zone, int count, do { page = list_entry(list->prev, struct page, lru); + mt = get_pageblock_migratetype(page); list_del(&page->lru); __free_one_page(page, zone, 0, page_private(page)); trace_mm_page_pcpu_drain(page, 0, page_private(page)); + if (is_migrate_cma(mt)) + __mod_zone_page_state(zone, + NR_FREE_CMA_PAGES, 1); } while (--to_free && --batch_free && !list_empty(list)); } __mod_zone_page_state(zone, NR_FREE_PAGES, count); @@ -544,7 +550,8 @@ static void free_one_page(struct zone *zone, struct page *page, int order, zone->pages_scanned = 0; __free_one_page(page, zone, order, migratetype); - __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); + if (unlikely(migratetype != MIGRATE_ISOLATE)) + __mod_zone_freepage_state(zone, 1 << order, migratetype); spin_unlock(&zone->lock); } @@ -611,6 +618,11 @@ void __meminit __free_pages_bootmem(struct page *page, unsigned int order) } #ifdef CONFIG_CMA +bool is_cma_pageblock(struct page *page) +{ + return get_pageblock_migratetype(page) == MIGRATE_CMA; +} + void __init init_cma_reserved_pageblock(struct page *page) { unsigned i = pageblock_nr_pages; @@ -625,6 +637,10 @@ void __init init_cma_reserved_pageblock(struct page *page) set_pageblock_migratetype(page, MIGRATE_CMA); __free_pages(page, pageblock_order); totalram_pages += pageblock_nr_pages; +#ifdef CONFIG_HIGHMEM + if (PageHighMem(page)) + totalhigh_pages += pageblock_nr_pages; +#endif } #endif @@ -646,7 +662,8 @@ static inline void expand(struct zone *zone, struct page *page, set_page_guard_flag(&page[size]); set_page_private(&page[size], high); - __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << high)); + __mod_zone_freepage_state(zone, -(1 << high), + migratetype); continue; } #endif @@ -890,15 +907,45 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order, return page; } +static struct page *__rmqueue_cma(struct zone *zone, unsigned int order, + int migratetype) +{ + struct page *page = 0; +#ifdef CONFIG_CMA + if (migratetype == MIGRATE_MOVABLE && !zone->cma_alloc) + page = __rmqueue_smallest(zone, order, MIGRATE_CMA); + if (!page) +#endif +retry_reserve : + page = __rmqueue_smallest(zone, order, migratetype); + + + if (unlikely(!page) && migratetype != MIGRATE_RESERVE) { + page = __rmqueue_fallback(zone, order, migratetype); + + if (!page) { + migratetype = MIGRATE_RESERVE; + goto retry_reserve; + } + } + + trace_mm_page_alloc_zone_locked(page, order, migratetype); + return page; +} + static int rmqueue_bulk(struct zone *zone, unsigned int order, unsigned long count, struct list_head *list, - int migratetype, int cold) + int migratetype, int cold, int cma) { int mt = migratetype, i; spin_lock(&zone->lock); for (i = 0; i < count; ++i) { - struct page *page = __rmqueue(zone, order, migratetype); + struct page *page; + if (cma) + page = __rmqueue_cma(zone, order, migratetype); + else + page = __rmqueue(zone, order, migratetype); if (unlikely(page == NULL)) break; @@ -913,6 +960,9 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, } set_page_private(page, mt); list = &page->lru; + if (is_migrate_cma(mt)) + __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, + -(1 << order)); } __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); spin_unlock(&zone->lock); @@ -1042,7 +1092,8 @@ void free_hot_cold_page(struct page *page, int cold) __count_vm_event(PGFREE); if (migratetype >= MIGRATE_PCPTYPES) { - if (unlikely(migratetype == MIGRATE_ISOLATE)) { + if (unlikely(migratetype == MIGRATE_ISOLATE) || + is_migrate_cma(migratetype)) { free_one_page(zone, page, 0, migratetype); goto out; } @@ -1095,22 +1146,27 @@ int split_free_page(struct page *page) unsigned int order; unsigned long watermark; struct zone *zone; + int mt; BUG_ON(!PageBuddy(page)); zone = page_zone(page); order = page_order(page); + mt = get_pageblock_migratetype(page); watermark = low_wmark_pages(zone) + (1 << order); - if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) + if (!is_migrate_cma(mt) && mt != MIGRATE_ISOLATE && + !zone_watermark_ok(zone, 0, watermark, 0, 0)) return 0; list_del(&page->lru); zone->free_area[order].nr_free--; rmv_page_order(page); - __mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order)); + + if (unlikely(mt != MIGRATE_ISOLATE)) + __mod_zone_freepage_state(zone, -(1UL << order), mt); set_page_refcounted(page); @@ -1119,7 +1175,7 @@ int split_free_page(struct page *page) if (order >= pageblock_order - 1) { struct page *endpage = page + (1 << order) - 1; for (; page < endpage; page += pageblock_nr_pages) { - int mt = get_pageblock_migratetype(page); + mt = get_pageblock_migratetype(page); if (mt != MIGRATE_ISOLATE && !is_migrate_cma(mt)) set_pageblock_migratetype(page, MIGRATE_MOVABLE); @@ -1149,7 +1205,8 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, if (list_empty(list)) { pcp->count += rmqueue_bulk(zone, 0, pcp->batch, list, - migratetype, cold); + migratetype, cold, + gfp_flags & __GFP_CMA); if (unlikely(list_empty(list))) goto failed; } @@ -1166,11 +1223,15 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, WARN_ON_ONCE(order > 1); } spin_lock_irqsave(&zone->lock, flags); - page = __rmqueue(zone, order, migratetype); + if (gfp_flags & __GFP_CMA) + page = __rmqueue_cma(zone, order, migratetype); + else + page = __rmqueue(zone, order, migratetype); spin_unlock(&zone->lock); if (!page) goto failed; - __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order)); + __mod_zone_freepage_state(zone, -(1 << order), + get_pageblock_migratetype(page)); } __count_zone_vm_events(PGALLOC, zone, 1 << order); @@ -1187,17 +1248,6 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, return NULL; } -#define ALLOC_WMARK_MIN WMARK_MIN -#define ALLOC_WMARK_LOW WMARK_LOW -#define ALLOC_WMARK_HIGH WMARK_HIGH -#define ALLOC_NO_WATERMARKS 0x04 - -#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1) - -#define ALLOC_HARDER 0x10 -#define ALLOC_HIGH 0x20 -#define ALLOC_CPUSET 0x40 - #ifdef CONFIG_FAIL_PAGE_ALLOC static struct { @@ -1282,14 +1332,20 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark, long min = mark; long lowmem_reserve = z->lowmem_reserve[classzone_idx]; int o; + long free_cma = 0; free_pages -= (1 << order) - 1; if (alloc_flags & ALLOC_HIGH) min -= min / 2; if (alloc_flags & ALLOC_HARDER) min -= min / 4; +#ifdef CONFIG_CMA + + if (!(alloc_flags & ALLOC_CMA)) + free_cma = zone_page_state(z, NR_FREE_CMA_PAGES); +#endif - if (free_pages <= min + lowmem_reserve) + if (free_pages - free_cma <= min + lowmem_reserve) return false; for (o = 0; o < order; o++) { @@ -1790,7 +1846,10 @@ gfp_to_alloc_flags(gfp_t gfp_mask) unlikely(test_thread_flag(TIF_MEMDIE)))) alloc_flags |= ALLOC_NO_WATERMARKS; } - +#ifdef CONFIG_CMA + if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) + alloc_flags |= ALLOC_CMA; +#endif return alloc_flags; } @@ -1943,6 +2002,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, struct page *page = NULL; int migratetype = allocflags_to_migratetype(gfp_mask); unsigned int cpuset_mems_cookie; + int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET; gfp_mask &= gfp_allowed_mask; @@ -1966,9 +2026,13 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, if (!preferred_zone) goto out; +#ifdef CONFIG_CMA + if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) + alloc_flags |= ALLOC_CMA; +#endif page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order, - zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET, + zonelist, high_zoneidx, alloc_flags, preferred_zone, migratetype); if (unlikely(!page)) page = __alloc_pages_slowpath(gfp_mask, order, @@ -2160,6 +2224,31 @@ bool skip_free_areas_node(unsigned int flags, int nid) #define K(x) ((x) << (PAGE_SHIFT-10)) +static void show_migration_types(unsigned char type) +{ + static const char types[MIGRATE_TYPES] = { + [MIGRATE_UNMOVABLE] = 'U', + [MIGRATE_RECLAIMABLE] = 'E', + [MIGRATE_MOVABLE] = 'M', + [MIGRATE_RESERVE] = 'R', +#ifdef CONFIG_CMA + [MIGRATE_CMA] = 'C', +#endif + [MIGRATE_ISOLATE] = 'I', + }; + char tmp[MIGRATE_TYPES + 1]; + char *p = tmp; + int i; + + for (i = 0; i < MIGRATE_TYPES; i++) { + if (type & (1 << i)) + *p++ = types[i]; + } + + *p = '\0'; + printk("(%s) ", tmp); +} + void show_free_areas(unsigned int filter) { int cpu; @@ -2187,7 +2276,8 @@ void show_free_areas(unsigned int filter) " unevictable:%lu" " dirty:%lu writeback:%lu unstable:%lu\n" " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n" - " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n", + " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n" + " free_cma:%lu\n", global_page_state(NR_ACTIVE_ANON), global_page_state(NR_INACTIVE_ANON), global_page_state(NR_ISOLATED_ANON), @@ -2204,7 +2294,8 @@ void show_free_areas(unsigned int filter) global_page_state(NR_FILE_MAPPED), global_page_state(NR_SHMEM), global_page_state(NR_PAGETABLE), - global_page_state(NR_BOUNCE)); + global_page_state(NR_BOUNCE), + global_page_state(NR_FREE_CMA_PAGES)); for_each_populated_zone(zone) { int i; @@ -2236,6 +2327,7 @@ void show_free_areas(unsigned int filter) " pagetables:%lukB" " unstable:%lukB" " bounce:%lukB" + " free_cma:%lukB" " writeback_tmp:%lukB" " pages_scanned:%lu" " all_unreclaimable? %s" @@ -2265,6 +2357,7 @@ void show_free_areas(unsigned int filter) K(zone_page_state(zone, NR_PAGETABLE)), K(zone_page_state(zone, NR_UNSTABLE_NFS)), K(zone_page_state(zone, NR_BOUNCE)), + K(zone_page_state(zone, NR_FREE_CMA_PAGES)), K(zone_page_state(zone, NR_WRITEBACK_TEMP)), zone->pages_scanned, (zone->all_unreclaimable ? "yes" : "no") @@ -2277,6 +2370,7 @@ void show_free_areas(unsigned int filter) for_each_populated_zone(zone) { unsigned long nr[MAX_ORDER], flags, order, total = 0; + unsigned char types[MAX_ORDER]; if (skip_free_areas_node(filter, zone_to_nid(zone))) continue; @@ -2285,12 +2379,24 @@ void show_free_areas(unsigned int filter) spin_lock_irqsave(&zone->lock, flags); for (order = 0; order < MAX_ORDER; order++) { - nr[order] = zone->free_area[order].nr_free; + struct free_area *area = &zone->free_area[order]; + int type; + + nr[order] = area->nr_free; total += nr[order] << order; + + types[order] = 0; + for (type = 0; type < MIGRATE_TYPES; type++) { + if (!list_empty(&area->free_list[type])) + types[order] |= 1 << type; + } } spin_unlock_irqrestore(&zone->lock, flags); - for (order = 0; order < MAX_ORDER; order++) + for (order = 0; order < MAX_ORDER; order++) { printk("%lu*%lukB ", nr[order], K(1UL) << order); + if (nr[order]) + show_migration_types(types[order]); + } printk("= %lukB\n", K(total)); } @@ -3910,10 +4016,6 @@ static void __setup_per_zone_wmarks(void) zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + low + (min >> 1); - zone->watermark[WMARK_MIN] += cma_wmark_pages(zone); - zone->watermark[WMARK_LOW] += cma_wmark_pages(zone); - zone->watermark[WMARK_HIGH] += cma_wmark_pages(zone); - setup_zone_migrate_reserve(zone); spin_unlock_irqrestore(&zone->lock, flags); } @@ -4155,7 +4257,7 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn) pfn &= (PAGES_PER_SECTION-1); return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; #else - pfn = pfn - zone->zone_start_pfn; + pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages); return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; #endif } @@ -4281,8 +4383,13 @@ int set_migratetype_isolate(struct page *page) out: if (!ret) { + unsigned long nr_pages; + int migratetype = get_pageblock_migratetype(page); + set_pageblock_migratetype(page, MIGRATE_ISOLATE); - move_freepages_block(zone, page, MIGRATE_ISOLATE); + nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); + + __mod_zone_freepage_state(zone, -nr_pages, migratetype); } spin_unlock_irqrestore(&zone->lock, flags); @@ -4294,13 +4401,15 @@ int set_migratetype_isolate(struct page *page) void unset_migratetype_isolate(struct page *page, unsigned migratetype) { struct zone *zone; - unsigned long flags; + unsigned long flags, nr_pages; + zone = page_zone(page); spin_lock_irqsave(&zone->lock, flags); if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE) goto out; + nr_pages = move_freepages_block(zone, page, migratetype); + __mod_zone_freepage_state(zone, nr_pages, migratetype); set_pageblock_migratetype(page, migratetype); - move_freepages_block(zone, page, migratetype); out: spin_unlock_irqrestore(&zone->lock, flags); } @@ -4323,7 +4432,12 @@ static struct page * __alloc_contig_migrate_alloc(struct page *page, unsigned long private, int **resultp) { - return alloc_page(GFP_HIGHUSER_MOVABLE); + gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE; + + if (PageHighMem(page)) + gfp_mask |= __GFP_HIGHMEM; + + return alloc_page(gfp_mask); } static int __alloc_contig_migrate_range(unsigned long start, unsigned long end) @@ -4342,7 +4456,7 @@ static int __alloc_contig_migrate_range(unsigned long start, unsigned long end) }; INIT_LIST_HEAD(&cc.migratepages); - migrate_prep_local(); + migrate_prep(); while (pfn < end || !list_empty(&cc.migratepages)) { if (fatal_signal_pending(current)) { @@ -4366,49 +4480,13 @@ static int __alloc_contig_migrate_range(unsigned long start, unsigned long end) ret = migrate_pages(&cc.migratepages, __alloc_contig_migrate_alloc, - 0, false, true); + 0, false, MIGRATE_SYNC); } putback_lru_pages(&cc.migratepages); return ret > 0 ? 0 : ret; } -static inline void __update_cma_watermarks(struct zone *zone, int count) -{ - unsigned long flags; - spin_lock_irqsave(&zone->lock, flags); - zone->min_cma_pages += count; - spin_unlock_irqrestore(&zone->lock, flags); - setup_per_zone_wmarks(); -} - -static int __reclaim_pages(struct zone *zone, gfp_t gfp_mask, int count) -{ - enum zone_type high_zoneidx = gfp_zone(gfp_mask); - struct zonelist *zonelist = node_zonelist(0, gfp_mask); - int did_some_progress = 0; - int order = 1; - - __update_cma_watermarks(zone, count); - - - while (!zone_watermark_ok(zone, 0, low_wmark_pages(zone), 0, 0)) { - wake_all_kswapd(order, zonelist, high_zoneidx, zone_idx(zone)); - - did_some_progress = __perform_reclaim(gfp_mask, order, zonelist, - NULL); - if (!did_some_progress) { - - out_of_memory(zonelist, gfp_mask, order, NULL, false); - } - } - - - __update_cma_watermarks(zone, -count); - - return count; -} - int alloc_contig_range(unsigned long start, unsigned long end, unsigned migratetype) { @@ -4422,6 +4500,8 @@ int alloc_contig_range(unsigned long start, unsigned long end, if (ret) goto done; + zone->cma_alloc = 1; + ret = __alloc_contig_migrate_range(start, end); if (ret) goto done; @@ -4448,7 +4528,6 @@ int alloc_contig_range(unsigned long start, unsigned long end, goto done; } - __reclaim_pages(zone, GFP_HIGHUSER_MOVABLE, end-start); outer_end = isolate_freepages_range(outer_start, end); @@ -4466,6 +4545,7 @@ int alloc_contig_range(unsigned long start, unsigned long end, done: undo_isolate_page_range(pfn_max_align_down(start), pfn_max_align_up(end), migratetype); + zone->cma_alloc = 0; return ret; } diff --git a/mm/vmstat.c b/mm/vmstat.c index 0dad31dc..8e18d6b4 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -722,6 +722,7 @@ const char * const vmstat_text[] = { "numa_other", #endif "nr_anon_transparent_hugepages", + "nr_free_cma", "nr_dirty_threshold", "nr_dirty_background_threshold", diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 1bd4ebe1..ba8204ea 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -39,17 +39,12 @@ #include #endif -//htc audio ++ #undef pr_info #undef pr_err #define pr_info(fmt, ...) pr_aud_info(fmt, ##__VA_ARGS__) #define pr_err(fmt, ...) pr_aud_err(fmt, ##__VA_ARGS__) -//htc audio -- -/* - * Compatibility - */ struct snd_pcm_hw_params_old { unsigned int flags; @@ -78,9 +73,6 @@ static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream, #endif static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream); -/* - * - */ DEFINE_RWLOCK(snd_pcm_link_rwlock); EXPORT_SYMBOL(snd_pcm_link_rwlock); @@ -108,7 +100,7 @@ int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info) struct snd_pcm_str *pstr = NULL; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -128,7 +120,7 @@ int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info) info->subdevices_avail = pstr->substream_count - pstr->substream_opened; strlcpy(info->subname, substream->name, sizeof(info->subname)); runtime = substream->runtime; - /* AB: FIXME!!! This is definitely nonsense */ + if (runtime) { info->sync = runtime->sync; substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_INFO, info); @@ -190,7 +182,7 @@ int snd_pcm_hw_refine(struct snd_pcm_substream *substream, unsigned int stamp = 2; int changed, again; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -380,9 +372,9 @@ static int period_to_usecs(struct snd_pcm_runtime *runtime) int usecs; if (! runtime->rate) - return -1; /* invalid */ + return -1; - /* take 75% of period time as the deadline */ + usecs = (750000 / runtime->rate) * runtime->period_size; usecs += ((750000 % runtime->rate) * runtime->period_size) / runtime->rate; @@ -398,7 +390,7 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream, unsigned int bits; snd_pcm_uframes_t frames; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) { pr_info("%s: substream is NULL\n", __func__); return -ENXIO; @@ -473,7 +465,7 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream, runtime->byte_align = bits / 8; runtime->min_align = frames; - /* Default sw params */ + runtime->tstamp_mode = SNDRV_PCM_TSTAMP_NONE; runtime->period_step = 1; runtime->control->avail_min = runtime->period_size; @@ -496,9 +488,6 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream, PM_QOS_CPU_DMA_LATENCY, usecs); return 0; _error: - /* hardware might be unusable from this time, - so we force application to retry to set - the correct hardware parameter settings */ runtime->status->state = SNDRV_PCM_STATE_OPEN; if (substream->ops->hw_free != NULL) substream->ops->hw_free(substream); @@ -534,7 +523,7 @@ static int snd_pcm_hw_free(struct snd_pcm_substream *substream) struct snd_pcm_runtime *runtime; int result = 0; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -564,7 +553,7 @@ static int snd_pcm_sw_params(struct snd_pcm_substream *substream, struct snd_pcm_runtime *runtime; int err; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -615,7 +604,7 @@ static int snd_pcm_sw_params_user(struct snd_pcm_substream *substream, struct snd_pcm_sw_params params; int err; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -632,7 +621,7 @@ int snd_pcm_status(struct snd_pcm_substream *substream, { struct snd_pcm_runtime *runtime = NULL; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -685,7 +674,7 @@ static int snd_pcm_status_user(struct snd_pcm_substream *substream, struct snd_pcm_status status; int res; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -704,7 +693,7 @@ static int snd_pcm_channel_info(struct snd_pcm_substream *substream, struct snd_pcm_runtime *runtime; unsigned int channel; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -730,7 +719,7 @@ static int snd_pcm_channel_info_user(struct snd_pcm_substream *substream, struct snd_pcm_channel_info info; int res; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -748,7 +737,7 @@ static void snd_pcm_trigger_tstamp(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = NULL; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return ; @@ -772,11 +761,6 @@ struct action_ops { void (*post_action)(struct snd_pcm_substream *substream, int state); }; -/* - * this functions is core for handling of linked stream - * Note: the stream state might be changed also on failure - * Note2: call with calling stream lock + link lock - */ static int snd_pcm_action_group(struct action_ops *ops, struct snd_pcm_substream *substream, int state, int do_lock) @@ -785,7 +769,7 @@ static int snd_pcm_action_group(struct action_ops *ops, struct snd_pcm_substream *s1; int res = 0; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -802,12 +786,12 @@ static int snd_pcm_action_group(struct action_ops *ops, if (res < 0) { if (ops->undo_action) { snd_pcm_group_for_each_entry(s1, substream) { - if (s1 == s) /* failed stream */ + if (s1 == s) break; ops->undo_action(s1, state); } } - s = NULL; /* unlock all */ + s = NULL; goto _unlock; } } @@ -816,27 +800,24 @@ static int snd_pcm_action_group(struct action_ops *ops, } _unlock: if (do_lock) { - /* unlock streams */ + snd_pcm_group_for_each_entry(s1, substream) { if (s1 != substream) spin_unlock(&s1->self_group.lock); - if (s1 == s) /* end */ + if (s1 == s) break; } } return res; } -/* - * Note: call with stream lock - */ static int snd_pcm_action_single(struct action_ops *ops, struct snd_pcm_substream *substream, int state) { int res; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -851,16 +832,13 @@ static int snd_pcm_action_single(struct action_ops *ops, return res; } -/* - * Note: call with stream lock - */ static int snd_pcm_action(struct action_ops *ops, struct snd_pcm_substream *substream, int state) { int res; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -878,16 +856,13 @@ static int snd_pcm_action(struct action_ops *ops, return res; } -/* - * Note: don't use any locks before - */ static int snd_pcm_action_lock_irq(struct action_ops *ops, struct snd_pcm_substream *substream, int state) { int res; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -907,15 +882,13 @@ static int snd_pcm_action_lock_irq(struct action_ops *ops, return res; } -/* - */ static int snd_pcm_action_nonatomic(struct action_ops *ops, struct snd_pcm_substream *substream, int state) { int res; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -928,14 +901,11 @@ static int snd_pcm_action_nonatomic(struct action_ops *ops, return res; } -/* - * start callbacks - */ static int snd_pcm_pre_start(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = NULL; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -952,7 +922,7 @@ static int snd_pcm_pre_start(struct snd_pcm_substream *substream, int state) static int snd_pcm_do_start(struct snd_pcm_substream *substream, int state) { - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -963,7 +933,7 @@ static int snd_pcm_do_start(struct snd_pcm_substream *substream, int state) static void snd_pcm_undo_start(struct snd_pcm_substream *substream, int state) { - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return ; @@ -975,7 +945,7 @@ static void snd_pcm_post_start(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = NULL; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return; @@ -1001,24 +971,17 @@ static struct action_ops snd_pcm_action_start = { .post_action = snd_pcm_post_start }; -/** - * snd_pcm_start - start all linked streams - * @substream: the PCM substream instance - */ int snd_pcm_start(struct snd_pcm_substream *substream) { return snd_pcm_action(&snd_pcm_action_start, substream, SNDRV_PCM_STATE_RUNNING); } -/* - * stop callbacks - */ static int snd_pcm_pre_stop(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = NULL; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -1031,21 +994,21 @@ static int snd_pcm_pre_stop(struct snd_pcm_substream *substream, int state) static int snd_pcm_do_stop(struct snd_pcm_substream *substream, int state) { - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; if (substream->runtime->trigger_master == substream && snd_pcm_running(substream)) substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP); - return 0; /* unconditonally stop all substreams */ + return 0; } static void snd_pcm_post_stop(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = NULL; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return ; @@ -1067,13 +1030,6 @@ static struct action_ops snd_pcm_action_stop = { .post_action = snd_pcm_post_stop }; -/** - * snd_pcm_stop - try to stop all running streams in the substream group - * @substream: the PCM substream instance - * @state: PCM state after stopping the stream - * - * The state of each stream is then changed to the given state unconditionally. - */ int snd_pcm_stop(struct snd_pcm_substream *substream, snd_pcm_state_t state) { return snd_pcm_action(&snd_pcm_action_stop, substream, state); @@ -1081,27 +1037,17 @@ int snd_pcm_stop(struct snd_pcm_substream *substream, snd_pcm_state_t state) EXPORT_SYMBOL(snd_pcm_stop); -/** - * snd_pcm_drain_done - stop the DMA only when the given stream is playback - * @substream: the PCM substream - * - * After stopping, the state is changed to SETUP. - * Unlike snd_pcm_stop(), this affects only the given stream. - */ int snd_pcm_drain_done(struct snd_pcm_substream *substream) { return snd_pcm_action_single(&snd_pcm_action_stop, substream, SNDRV_PCM_STATE_SETUP); } -/* - * pause callbacks - */ static int snd_pcm_pre_pause(struct snd_pcm_substream *substream, int push) { struct snd_pcm_runtime *runtime = NULL; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -1119,20 +1065,14 @@ static int snd_pcm_pre_pause(struct snd_pcm_substream *substream, int push) static int snd_pcm_do_pause(struct snd_pcm_substream *substream, int push) { - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; if (substream->runtime->trigger_master != substream) return 0; - /* some drivers might use hw_ptr to recover from the pause - - update the hw_ptr now */ if (push) snd_pcm_update_hw_ptr(substream); - /* The jiffies check in snd_pcm_update_hw_ptr*() is done by - * a delta between the current jiffies, this gives a large enough - * delta, effectively to skip the check once. - */ substream->runtime->hw_ptr_jiffies = jiffies - HZ * 1000; return substream->ops->trigger(substream, push ? SNDRV_PCM_TRIGGER_PAUSE_PUSH : @@ -1141,7 +1081,7 @@ static int snd_pcm_do_pause(struct snd_pcm_substream *substream, int push) static void snd_pcm_undo_pause(struct snd_pcm_substream *substream, int push) { - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return ; @@ -1155,7 +1095,7 @@ static void snd_pcm_post_pause(struct snd_pcm_substream *substream, int push) { struct snd_pcm_runtime *runtime = NULL; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return ; @@ -1185,22 +1125,18 @@ static struct action_ops snd_pcm_action_pause = { .post_action = snd_pcm_post_pause }; -/* - * Push/release the pause for all linked streams. - */ static int snd_pcm_pause(struct snd_pcm_substream *substream, int push) { return snd_pcm_action(&snd_pcm_action_pause, substream, push); } #ifdef CONFIG_PM -/* suspend */ static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = NULL; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -1215,7 +1151,7 @@ static int snd_pcm_do_suspend(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = NULL; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -1225,14 +1161,14 @@ static int snd_pcm_do_suspend(struct snd_pcm_substream *substream, int state) if (! snd_pcm_running(substream)) return 0; substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND); - return 0; /* suspend unconditionally */ + return 0; } static void snd_pcm_post_suspend(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = NULL; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return ; @@ -1253,12 +1189,6 @@ static struct action_ops snd_pcm_action_suspend = { .post_action = snd_pcm_post_suspend }; -/** - * snd_pcm_suspend - trigger SUSPEND to all linked streams - * @substream: the PCM substream - * - * After this call, all streams are changed to SUSPENDED state. - */ int snd_pcm_suspend(struct snd_pcm_substream *substream) { int err; @@ -1275,12 +1205,6 @@ int snd_pcm_suspend(struct snd_pcm_substream *substream) EXPORT_SYMBOL(snd_pcm_suspend); -/** - * snd_pcm_suspend_all - trigger SUSPEND to all substreams in the given pcm - * @pcm: the PCM instance - * - * After this call, all streams are changed to SUSPENDED state. - */ int snd_pcm_suspend_all(struct snd_pcm *pcm) { struct snd_pcm_substream *substream; @@ -1292,7 +1216,7 @@ int snd_pcm_suspend_all(struct snd_pcm *pcm) for (stream = 0; stream < 2; stream++) { for (substream = pcm->streams[stream].substream; substream; substream = substream->next) { - /* FIXME: the open/close code should lock this as well */ + if (substream->runtime == NULL) continue; err = snd_pcm_suspend(substream); @@ -1305,13 +1229,12 @@ int snd_pcm_suspend_all(struct snd_pcm *pcm) EXPORT_SYMBOL(snd_pcm_suspend_all); -/* resume */ static int snd_pcm_pre_resume(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = NULL; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -1326,14 +1249,14 @@ static int snd_pcm_do_resume(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = NULL; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; if (runtime->trigger_master != substream) return 0; - /* DMA not running previously? */ + if (runtime->status->suspended_state != SNDRV_PCM_STATE_RUNNING && (runtime->status->suspended_state != SNDRV_PCM_STATE_DRAINING || substream->stream != SNDRV_PCM_STREAM_PLAYBACK)) @@ -1343,7 +1266,7 @@ static int snd_pcm_do_resume(struct snd_pcm_substream *substream, int state) static void snd_pcm_undo_resume(struct snd_pcm_substream *substream, int state) { - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return ; @@ -1356,7 +1279,7 @@ static void snd_pcm_post_resume(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = NULL; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return ; @@ -1380,7 +1303,7 @@ static int snd_pcm_resume(struct snd_pcm_substream *substream) struct snd_card *card = NULL; int res; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -1400,20 +1323,15 @@ static int snd_pcm_resume(struct snd_pcm_substream *substream) return -ENOSYS; } -#endif /* CONFIG_PM */ +#endif -/* - * xrun ioctl - * - * Change the RUNNING stream(s) to XRUN state. - */ static int snd_pcm_xrun(struct snd_pcm_substream *substream) { struct snd_card *card = NULL; struct snd_pcm_runtime *runtime = NULL; int result; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -1430,7 +1348,7 @@ static int snd_pcm_xrun(struct snd_pcm_substream *substream) snd_pcm_stream_lock_irq(substream); switch (runtime->status->state) { case SNDRV_PCM_STATE_XRUN: - result = 0; /* already there */ + result = 0; break; case SNDRV_PCM_STATE_RUNNING: result = snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN); @@ -1444,14 +1362,11 @@ static int snd_pcm_xrun(struct snd_pcm_substream *substream) return result; } -/* - * reset ioctl - */ static int snd_pcm_pre_reset(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = NULL; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -1473,7 +1388,7 @@ static int snd_pcm_do_reset(struct snd_pcm_substream *substream, int state) struct snd_pcm_runtime *runtime = NULL; int err; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -1494,7 +1409,7 @@ static void snd_pcm_post_reset(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = NULL; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return ; @@ -1517,16 +1432,12 @@ static int snd_pcm_reset(struct snd_pcm_substream *substream) return snd_pcm_action_nonatomic(&snd_pcm_action_reset, substream, 0); } -/* - * prepare ioctl - */ -/* we use the second argument for updating f_flags */ static int snd_pcm_pre_prepare(struct snd_pcm_substream *substream, int f_flags) { struct snd_pcm_runtime *runtime = NULL; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -1545,7 +1456,7 @@ static int snd_pcm_do_prepare(struct snd_pcm_substream *substream, int state) { int err; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -1559,7 +1470,7 @@ static void snd_pcm_post_prepare(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = NULL; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return ; @@ -1574,11 +1485,6 @@ static struct action_ops snd_pcm_action_prepare = { .post_action = snd_pcm_post_prepare }; -/** - * snd_pcm_prepare - prepare the PCM substream to be triggerable - * @substream: the PCM substream instance - * @file: file to refer f_flags - */ static int snd_pcm_prepare(struct snd_pcm_substream *substream, struct file *file) { @@ -1586,7 +1492,7 @@ static int snd_pcm_prepare(struct snd_pcm_substream *substream, struct snd_card *card = NULL; int f_flags; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -1605,13 +1511,10 @@ static int snd_pcm_prepare(struct snd_pcm_substream *substream, return res; } -/* - * drain ioctl - */ static int snd_pcm_pre_drain_init(struct snd_pcm_substream *substream, int state) { - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -1623,7 +1526,7 @@ static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = NULL; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -1631,7 +1534,7 @@ static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream, int state) if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { switch (runtime->status->state) { case SNDRV_PCM_STATE_PREPARED: - /* start playback stream if possible */ + if (! snd_pcm_playback_empty(substream)) { snd_pcm_do_start(substream, SNDRV_PCM_STATE_DRAINING); snd_pcm_post_start(substream, SNDRV_PCM_STATE_DRAINING); @@ -1644,7 +1547,7 @@ static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream, int state) break; } } else { - /* stop running stream */ + if (runtime->status->state == SNDRV_PCM_STATE_RUNNING) { int new_state = snd_pcm_capture_avail(runtime) > 0 ? SNDRV_PCM_STATE_DRAINING : SNDRV_PCM_STATE_SETUP; @@ -1667,13 +1570,6 @@ static struct action_ops snd_pcm_action_drain_init = { static int snd_pcm_drop(struct snd_pcm_substream *substream); -/* - * Drain the stream(s). - * When the substream is linked, sync until the draining of all playback streams - * is finished. - * After this call, all streams are supposed to be either SETUP or DRAINING - * (capture only) state. - */ static int snd_pcm_drain(struct snd_pcm_substream *substream, struct file *file) { @@ -1684,7 +1580,7 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream, int result = 0; int nonblock = 0; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -1711,15 +1607,15 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream, down_read(&snd_pcm_link_rwsem); snd_pcm_stream_lock_irq(substream); - /* resume pause */ + if (runtime->status->state == SNDRV_PCM_STATE_PAUSED) snd_pcm_pause(substream, 0); - /* pre-start/stop - all running streams are changed to DRAINING state */ + result = snd_pcm_action(&snd_pcm_action_drain_init, substream, 0); if (result < 0) goto unlock; - /* in non-blocking, we don't wait in ioctl but let caller poll */ + if (nonblock) { result = -EAGAIN; goto unlock; @@ -1732,7 +1628,7 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream, result = -ERESTARTSYS; break; } - /* find a substream to drain */ + to_check = NULL; snd_pcm_group_for_each_entry(s, substream) { if (s->stream != SNDRV_PCM_STREAM_PLAYBACK) @@ -1744,7 +1640,7 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream, } } if (!to_check) - break; /* all drained */ + break; init_waitqueue_entry(&wait, current); add_wait_queue(&to_check->sleep, &wait); snd_pcm_stream_unlock_irq(substream); @@ -1798,11 +1694,6 @@ static int snd_compressed_ioctl(struct snd_pcm_substream *substream, err = substream->ops->ioctl(substream, cmd, arg); return err; } -/* - * drop ioctl - * - * Immediately put all linked substreams into SETUP state. - */ static int snd_pcm_drop(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime; @@ -1819,20 +1710,19 @@ static int snd_pcm_drop(struct snd_pcm_substream *substream) snd_pcm_stream_lock_irq(substream); - /* resume pause */ + if (runtime->status->state == SNDRV_PCM_STATE_PAUSED) snd_pcm_pause(substream, 0); snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP); - /* runtime->control->appl_ptr = runtime->status->hw_ptr; */ + snd_pcm_stream_unlock_irq(substream); return result; } -/* WARNING: Don't forget to fput back the file */ static struct file *snd_pcm_file_fd(int fd) { struct file *file; @@ -1857,9 +1747,6 @@ static struct file *snd_pcm_file_fd(int fd) return file; } -/* - * PCM link handling - */ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd) { int res = 0; @@ -1868,6 +1755,10 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd) struct snd_pcm_substream *substream1; struct snd_pcm_group *group; + + if (PCM_RUNTIME_CHECK(substream)) + return -ENXIO; + file = snd_pcm_file_fd(fd); if (!file) return -EBADFD; @@ -1880,10 +1771,6 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd) } down_write(&snd_pcm_link_rwsem); - /* if substream is NULL, return error. */ - if (PCM_RUNTIME_CHECK(substream)) - return -ENXIO; - write_lock_irq(&snd_pcm_link_rwlock); if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN || substream->runtime->status->state != substream1->runtime->status->state) { @@ -1916,7 +1803,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd) static void relink_to_local(struct snd_pcm_substream *substream) { - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return ; @@ -1930,7 +1817,7 @@ static int snd_pcm_unlink(struct snd_pcm_substream *substream) struct snd_pcm_substream *s; int res = 0; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -1942,7 +1829,7 @@ static int snd_pcm_unlink(struct snd_pcm_substream *substream) } list_del(&substream->link_list); substream->group->count--; - if (substream->group->count == 1) { /* detach the last stream, too */ + if (substream->group->count == 1) { snd_pcm_group_for_each_entry(s, substream) { relink_to_local(s); break; @@ -1956,9 +1843,6 @@ static int snd_pcm_unlink(struct snd_pcm_substream *substream) return res; } -/* - * hw configurator - */ static int snd_pcm_hw_rule_mul(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { @@ -2019,7 +1903,7 @@ static int snd_pcm_hw_rule_format(struct snd_pcm_hw_params *params, continue; bits = snd_pcm_format_physical_width(k); if (bits <= 0) - continue; /* ignore invalid formats */ + continue; if ((unsigned)bits < i->min || (unsigned)bits > i->max) snd_mask_reset(&m, k); } @@ -2042,7 +1926,7 @@ static int snd_pcm_hw_rule_sample_bits(struct snd_pcm_hw_params *params, continue; bits = snd_pcm_format_physical_width(k); if (bits <= 0) - continue; /* ignore invalid formats */ + continue; if (t.min > (unsigned)bits) t.min = bits; if (t.max < (unsigned)bits) @@ -2093,7 +1977,7 @@ int snd_pcm_hw_constraints_init(struct snd_pcm_substream *substream) struct snd_pcm_hw_constraints *constrs = NULL; int k, err; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -2225,7 +2109,7 @@ int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream) int err; unsigned int mask = 0; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -2287,7 +2171,7 @@ int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream) if (err < 0) return err; - /* FIXME: remove */ + if (runtime->dma_bytes) { err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 0, runtime->dma_bytes); if (err < 0) @@ -2302,7 +2186,7 @@ int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream) return err; } - /* FIXME: this belong to lowlevel */ + snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIOD_SIZE); return 0; @@ -2315,7 +2199,7 @@ static void pcm_release_private(struct snd_pcm_substream *substream) void snd_pcm_release_substream(struct snd_pcm_substream *substream) { - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return ; @@ -2522,7 +2406,7 @@ static snd_pcm_sframes_t snd_pcm_playback_rewind(struct snd_pcm_substream *subst snd_pcm_sframes_t ret; snd_pcm_sframes_t hw_avail; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -2539,7 +2423,7 @@ static snd_pcm_sframes_t snd_pcm_playback_rewind(struct snd_pcm_substream *subst case SNDRV_PCM_STATE_RUNNING: if (snd_pcm_update_hw_ptr(substream) >= 0) break; - /* Fall through */ + case SNDRV_PCM_STATE_XRUN: ret = -EPIPE; goto __end; @@ -2577,7 +2461,7 @@ static snd_pcm_sframes_t snd_pcm_capture_rewind(struct snd_pcm_substream *substr snd_pcm_sframes_t ret; snd_pcm_sframes_t hw_avail; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -2594,7 +2478,7 @@ static snd_pcm_sframes_t snd_pcm_capture_rewind(struct snd_pcm_substream *substr case SNDRV_PCM_STATE_RUNNING: if (snd_pcm_update_hw_ptr(substream) >= 0) break; - /* Fall through */ + case SNDRV_PCM_STATE_XRUN: ret = -EPIPE; goto __end; @@ -2631,7 +2515,7 @@ static snd_pcm_sframes_t snd_pcm_playback_forward(struct snd_pcm_substream *subs snd_pcm_sframes_t ret; snd_pcm_sframes_t avail; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -2649,7 +2533,7 @@ static snd_pcm_sframes_t snd_pcm_playback_forward(struct snd_pcm_substream *subs case SNDRV_PCM_STATE_RUNNING: if (snd_pcm_update_hw_ptr(substream) >= 0) break; - /* Fall through */ + case SNDRV_PCM_STATE_XRUN: ret = -EPIPE; goto __end; @@ -2686,7 +2570,7 @@ static snd_pcm_sframes_t snd_pcm_capture_forward(struct snd_pcm_substream *subst snd_pcm_sframes_t ret; snd_pcm_sframes_t avail; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -2704,7 +2588,7 @@ static snd_pcm_sframes_t snd_pcm_capture_forward(struct snd_pcm_substream *subst case SNDRV_PCM_STATE_RUNNING: if (snd_pcm_update_hw_ptr(substream) >= 0) break; - /* Fall through */ + case SNDRV_PCM_STATE_XRUN: ret = -EPIPE; goto __end; @@ -2738,7 +2622,7 @@ static int snd_pcm_hwsync(struct snd_pcm_substream *substream) struct snd_pcm_runtime *runtime = NULL; int err; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -2752,7 +2636,7 @@ static int snd_pcm_hwsync(struct snd_pcm_substream *substream) case SNDRV_PCM_STATE_RUNNING: if ((err = snd_pcm_update_hw_ptr(substream)) < 0) break; - /* Fall through */ + case SNDRV_PCM_STATE_PREPARED: case SNDRV_PCM_STATE_SUSPENDED: err = 0; @@ -2776,7 +2660,7 @@ static int snd_pcm_delay(struct snd_pcm_substream *substream, int err; snd_pcm_sframes_t n = 0; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -2790,7 +2674,7 @@ static int snd_pcm_delay(struct snd_pcm_substream *substream, case SNDRV_PCM_STATE_RUNNING: if ((err = snd_pcm_update_hw_ptr(substream)) < 0) break; - /* Fall through */ + case SNDRV_PCM_STATE_PREPARED: case SNDRV_PCM_STATE_SUSPENDED: err = 0; @@ -2826,7 +2710,7 @@ static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream, int err; snd_pcm_uframes_t hw_avail; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -2878,7 +2762,7 @@ static int snd_pcm_tstamp(struct snd_pcm_substream *substream, int __user *_arg) struct snd_pcm_runtime *runtime = NULL; int arg; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -2897,7 +2781,7 @@ static int snd_pcm_tstamp(struct snd_pcm_substream *substream, int __user *_arg) static int snd_pcm_enable_effect(struct snd_pcm_substream *substream, int __user *_arg) { - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -2909,7 +2793,7 @@ static int snd_pcm_common_ioctl1(struct file *file, struct snd_pcm_substream *substream, unsigned int cmd, void __user *arg) { - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -2918,7 +2802,7 @@ static int snd_pcm_common_ioctl1(struct file *file, return put_user(SNDRV_PCM_VERSION, (int __user *)arg) ? -EFAULT : 0; case SNDRV_PCM_IOCTL_INFO: return snd_pcm_info_user(substream, arg); - case SNDRV_PCM_IOCTL_TSTAMP: /* just for compatibility */ + case SNDRV_PCM_IOCTL_TSTAMP: return 0; case SNDRV_PCM_IOCTL_TTSTAMP: return snd_pcm_tstamp(substream, arg); @@ -2991,7 +2875,7 @@ static int snd_pcm_playback_ioctl1(struct file *file, struct snd_pcm_substream *substream, unsigned int cmd, void __user *arg) { - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -3075,7 +2959,7 @@ static int snd_pcm_capture_ioctl1(struct file *file, struct snd_pcm_substream *substream, unsigned int cmd, void __user *arg) { - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -3189,7 +3073,7 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream, mm_segment_t fs; int result; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -3358,7 +3242,7 @@ static unsigned int snd_pcm_playback_poll(struct file *file, poll_table * wait) mask = POLLOUT | POLLWRNORM; break; } - /* Fall through */ + case SNDRV_PCM_STATE_DRAINING: mask = 0; break; @@ -3404,7 +3288,7 @@ static unsigned int snd_pcm_capture_poll(struct file *file, poll_table * wait) mask = POLLIN | POLLRDNORM; break; } - /* Fall through */ + default: mask = POLLIN | POLLRDNORM | POLLERR; break; @@ -3413,18 +3297,8 @@ static unsigned int snd_pcm_capture_poll(struct file *file, poll_table * wait) return mask; } -/* - * mmap support - */ -/* - * Only on coherent architectures, we can mmap the status and the control records - * for effcient data transfer. On others, we have to use HWSYNC ioctl... - */ #if defined(CONFIG_X86) || defined(CONFIG_PPC) || defined(CONFIG_ALPHA) -/* - * mmap status record - */ static int snd_pcm_mmap_status_fault(struct vm_area_struct *area, struct vm_fault *vmf) { @@ -3455,7 +3329,7 @@ static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file return -EINVAL; area->vm_ops = &snd_pcm_vm_ops_status; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -3464,9 +3338,6 @@ static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file return 0; } -/* - * mmap control record - */ static int snd_pcm_mmap_control_fault(struct vm_area_struct *area, struct vm_fault *vmf) { @@ -3497,7 +3368,7 @@ static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file return -EINVAL; area->vm_ops = &snd_pcm_vm_ops_control; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -3505,10 +3376,7 @@ static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file area->vm_flags |= VM_RESERVED; return 0; } -#else /* ! coherent mmap */ -/* - * don't support mmap for status and control records. - */ +#else static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file, struct vm_area_struct *area) { @@ -3519,14 +3387,14 @@ static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file { return -ENXIO; } -#endif /* coherent mmap */ +#endif static inline struct page * snd_pcm_default_page_ops(struct snd_pcm_substream *substream, unsigned long ofs) { void *vaddr = NULL; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return 0; @@ -3540,18 +3408,12 @@ snd_pcm_default_page_ops(struct snd_pcm_substream *substream, unsigned long ofs) if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV) { dma_addr_t addr = substream->runtime->dma_addr + ofs; addr -= get_dma_offset(substream->dma_buffer.dev.dev); - /* assume dma_handle set via pfn_to_phys() in - * mm/dma-noncoherent.c - */ return pfn_to_page(addr >> PAGE_SHIFT); } #endif return virt_to_page(vaddr); } -/* - * fault callback for mmapping a RAM page - */ static int snd_pcm_mmap_data_fault(struct vm_area_struct *area, struct vm_fault *vmf) { @@ -3591,22 +3453,18 @@ static const struct vm_operations_struct snd_pcm_vm_ops_data_fault = { }; #ifndef ARCH_HAS_DMA_MMAP_COHERENT -/* This should be defined / handled globally! */ #ifdef CONFIG_ARM #define ARCH_HAS_DMA_MMAP_COHERENT #endif #endif -/* - * mmap the DMA buffer on RAM - */ int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *area) { area->vm_flags |= VM_RESERVED; #ifdef ARCH_HAS_DMA_MMAP_COHERENT - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -3621,16 +3479,13 @@ int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream, if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV && !plat_device_is_coherent(substream->dma_buffer.dev.dev)) area->vm_page_prot = pgprot_noncached(area->vm_page_prot); -#endif /* ARCH_HAS_DMA_MMAP_COHERENT */ - /* mmap with fault handler */ +#endif + area->vm_ops = &snd_pcm_vm_ops_data_fault; return 0; } EXPORT_SYMBOL_GPL(snd_pcm_lib_default_mmap); -/* - * mmap the DMA buffer on I/O memory area - */ #if SNDRV_PCM_INFO_MMAP_IOMEM int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, struct vm_area_struct *area) @@ -3643,7 +3498,7 @@ int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, size = area->vm_end - area->vm_start; offset = area->vm_pgoff << PAGE_SHIFT; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -3655,11 +3510,8 @@ int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, } EXPORT_SYMBOL(snd_pcm_lib_mmap_iomem); -#endif /* SNDRV_PCM_INFO_MMAP */ +#endif -/* - * mmap DMA buffer - */ int snd_pcm_mmap_data(struct snd_pcm_substream *substream, struct file *file, struct vm_area_struct *area) { @@ -3669,7 +3521,7 @@ int snd_pcm_mmap_data(struct snd_pcm_substream *substream, struct file *file, size_t dma_bytes; int err; - /* if substream is NULL, return error. */ + if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; @@ -3750,18 +3602,12 @@ static int snd_pcm_fasync(int fd, struct file * file, int on) return fasync_helper(fd, file, on, &runtime->fasync); } -/* - * ioctl32 compat - */ #ifdef CONFIG_COMPAT #include "pcm_compat.c" #else #define snd_pcm_ioctl_compat NULL #endif -/* - * To be removed helpers to keep binary compatibility - */ #ifdef CONFIG_SND_SUPPORT_OLD_API #define __OLD_TO_NEW_MASK(x) ((x&7)|((x&0x07fffff8)<<5)) @@ -3812,14 +3658,14 @@ static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream, struct snd_pcm_hw_params_old *oparams = NULL; int err; + + if (PCM_RUNTIME_CHECK(substream)) + return -ENXIO; + params = kmalloc(sizeof(*params), GFP_KERNEL); if (!params) return -ENOMEM; - /* if substream is NULL, return error. */ - if (PCM_RUNTIME_CHECK(substream)) - return -ENXIO; - oparams = memdup_user(_oparams, sizeof(*oparams)); if (IS_ERR(oparams)) { err = PTR_ERR(oparams); @@ -3846,14 +3692,14 @@ static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream, struct snd_pcm_hw_params_old *oparams = NULL; int err; + + if (PCM_RUNTIME_CHECK(substream)) + return -ENXIO; + params = kmalloc(sizeof(*params), GFP_KERNEL); if (!params) return -ENOMEM; - /* if substream is NULL, return error. */ - if (PCM_RUNTIME_CHECK(substream)) - return -ENXIO; - oparams = memdup_user(_oparams, sizeof(*oparams)); if (IS_ERR(oparams)) { err = PTR_ERR(oparams); @@ -3872,7 +3718,7 @@ static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream, kfree(params); return err; } -#endif /* CONFIG_SND_SUPPORT_OLD_API */ +#endif #ifndef CONFIG_MMU static unsigned long snd_pcm_get_unmapped_area(struct file *file, @@ -3899,9 +3745,6 @@ static unsigned long snd_pcm_get_unmapped_area(struct file *file, # define snd_pcm_get_unmapped_area NULL #endif -/* - * Register section - */ const struct file_operations snd_pcm_f_ops[2] = { { diff --git a/sound/soc/codecs/cs8427.c b/sound/soc/codecs/cs8427.c index 06863ce2..d6f0fa98 100644 --- a/sound/soc/codecs/cs8427.c +++ b/sound/soc/codecs/cs8427.c @@ -309,7 +309,7 @@ static int snd_cs8427_send_corudata(struct cs8427 *obj, char *hw_data = udata ? chip->playback.hw_udata : chip->playback.hw_status; char data[32]; - int err, idx; + int err; unsigned char addr = 0; int ret = 0; @@ -321,6 +321,9 @@ static int snd_cs8427_send_corudata(struct cs8427 *obj, memcpy(hw_data, ndata, count); if (udata) { memset(data, 0, sizeof(data)); + if (count > sizeof(data)) { + count = sizeof(data); + } if (memcmp(hw_data, data, count) == 0) { chip->regmap[CS8427_REG_UDATABUF] &= ~CS8427_UBMMASK; chip->regmap[CS8427_REG_UDATABUF] |= CS8427_UBMZEROS | @@ -330,7 +333,6 @@ static int snd_cs8427_send_corudata(struct cs8427 *obj, return err < 0 ? err : 0; } } - idx = 0; memcpy(data, ndata, CHANNEL_STATUS_SIZE); addr = 0x20; diff --git a/sound/soc/msm/qdsp6/q6asm.c b/sound/soc/msm/qdsp6/q6asm.c index 7a82e9db..47ada90b 100644 --- a/sound/soc/msm/qdsp6/q6asm.c +++ b/sound/soc/msm/qdsp6/q6asm.c @@ -561,7 +561,7 @@ int q6asm_audio_client_buf_alloc(unsigned int dir, buf[cnt].handle = ion_alloc (buf[cnt].client, bufsz_4k_aligned, SZ_4K, - (0x1 << ION_AUDIO_HEAP_ID)); + (0x1 << ION_AUDIO_HEAP_ID), 0); if (IS_ERR_OR_NULL((void *) buf[cnt].handle)) { pr_err("%s: ION memory allocation for AUDIO failed\n", @@ -583,8 +583,7 @@ int q6asm_audio_client_buf_alloc(unsigned int dir, } buf[cnt].data = ion_map_kernel - (buf[cnt].client, buf[cnt].handle, - 0); + (buf[cnt].client, buf[cnt].handle); if (IS_ERR_OR_NULL((void *) buf[cnt].data)) { pr_err("%s: ION memory mapping for AUDIO failed\n", @@ -697,7 +696,7 @@ int q6asm_audio_client_buf_alloc_contiguous(unsigned int dir, goto fail; } buf[0].handle = ion_alloc(buf[0].client, bufsz * bufcnt, SZ_4K, - (0x1 << ION_AUDIO_HEAP_ID)); + (0x1 << ION_AUDIO_HEAP_ID), 0); if (IS_ERR_OR_NULL((void *) buf[0].handle)) { pr_err("%s: ION memory allocation for AUDIO failed\n", __func__); @@ -714,7 +713,7 @@ int q6asm_audio_client_buf_alloc_contiguous(unsigned int dir, goto fail; } - buf[0].data = ion_map_kernel(buf[0].client, buf[0].handle, 0); + buf[0].data = ion_map_kernel(buf[0].client, buf[0].handle); if (IS_ERR_OR_NULL((void *) buf[0].data)) { pr_err("%s: ION memory mapping for AUDIO failed\n", __func__); mutex_unlock(&ac->cmd_lock); diff --git a/sound/soc/msm/qdsp6/q6voice.c b/sound/soc/msm/qdsp6/q6voice.c index 7eac674e..064517e5 100644 --- a/sound/soc/msm/qdsp6/q6voice.c +++ b/sound/soc/msm/qdsp6/q6voice.c @@ -4121,7 +4121,7 @@ static int __init voice_init(void) goto cont; } common.cvp_cal.handle = ion_alloc(common.client, CVP_CAL_SIZE, SZ_4K, - ION_HEAP(ION_AUDIO_HEAP_ID)); + ION_HEAP(ION_AUDIO_HEAP_ID), 0); if (IS_ERR_OR_NULL((void *) common.cvp_cal.handle)) { pr_err("%s: ION memory allocation for CVP failed\n", __func__); @@ -4140,7 +4140,7 @@ static int __init voice_init(void) } common.cvp_cal.buf = ion_map_kernel(common.client, - common.cvp_cal.handle, 0); + common.cvp_cal.handle); if (IS_ERR_OR_NULL((void *) common.cvp_cal.buf)) { pr_err("%s: ION memory mapping for cvp failed\n", __func__); common.cvp_cal.buf = NULL; @@ -4151,7 +4151,7 @@ static int __init voice_init(void) memset((void *)common.cvp_cal.buf, 0, CVP_CAL_SIZE); common.cvs_cal.handle = ion_alloc(common.client, CVS_CAL_SIZE, SZ_4K, - ION_HEAP(ION_AUDIO_HEAP_ID)); + ION_HEAP(ION_AUDIO_HEAP_ID), 0); if (IS_ERR_OR_NULL((void *) common.cvs_cal.handle)) { pr_err("%s: ION memory allocation for CVS failed\n", __func__); @@ -4168,7 +4168,7 @@ static int __init voice_init(void) } common.cvs_cal.buf = ion_map_kernel(common.client, - common.cvs_cal.handle, 0); + common.cvs_cal.handle); if (IS_ERR_OR_NULL((void *) common.cvs_cal.buf)) { pr_err("%s: ION memory mapping for cvs failed\n", __func__); common.cvs_cal.buf = NULL; diff --git a/sound/soc/msm/qdsp6/q6voice.h b/sound/soc/msm/qdsp6/q6voice.h index 24d52f24..7817dbb0 100644 --- a/sound/soc/msm/qdsp6/q6voice.h +++ b/sound/soc/msm/qdsp6/q6voice.h @@ -13,7 +13,7 @@ #define __QDSP6VOICE_H__ #include -#include +#include #define MAX_VOC_PKT_SIZE 642 #define SESSION_NAME_LEN 21 diff --git a/sound/usb/card.c b/sound/usb/card.c index fd1d7b41..49de9fd0 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c @@ -269,7 +269,9 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif) break; } } - + + switch_set_state(usbaudiosdev, 1); + return 0; } @@ -290,7 +292,7 @@ static int snd_usb_audio_dev_free(struct snd_device *device) { struct snd_usb_audio *chip = device->device_data; #ifdef CONFIG_SUPPORT_USB_SPEAKER - headset_ext_detect(USB_NO_HEADSET); + #endif return snd_usb_audio_free(chip); } @@ -428,9 +430,8 @@ static int snd_usb_audio_create(struct usb_device *dev, int idx, } snd_usb_audio_create_proc(chip); - switch_set_state(usbaudiosdev, 1); #ifdef CONFIG_SUPPORT_USB_SPEAKER - headset_ext_detect(USB_AUDIO_OUT_DGTL); + #endif *rchip = chip; return 0;