diff options
Diffstat (limited to 'include')
39 files changed, 196 insertions, 101 deletions
diff --git a/include/drm/drm_pagemap.h b/include/drm/drm_pagemap.h index f6e7e234c089..70a7991f784f 100644 --- a/include/drm/drm_pagemap.h +++ b/include/drm/drm_pagemap.h @@ -8,6 +8,7 @@ #define NR_PAGES(order) (1U << (order)) +struct dma_fence; struct drm_pagemap; struct drm_pagemap_zdd; struct device; @@ -174,6 +175,8 @@ struct drm_pagemap_devmem_ops { * @pages: Pointer to array of device memory pages (destination) * @pagemap_addr: Pointer to array of DMA information (source) * @npages: Number of pages to copy + * @pre_migrate_fence: dma-fence to wait for before migration start. + * May be NULL. * * Copy pages to device memory. If the order of a @pagemap_addr entry * is greater than 0, the entry is populated but subsequent entries @@ -183,13 +186,16 @@ struct drm_pagemap_devmem_ops { */ int (*copy_to_devmem)(struct page **pages, struct drm_pagemap_addr *pagemap_addr, - unsigned long npages); + unsigned long npages, + struct dma_fence *pre_migrate_fence); /** * @copy_to_ram: Copy to system RAM (required for migration) * @pages: Pointer to array of device memory pages (source) * @pagemap_addr: Pointer to array of DMA information (destination) * @npages: Number of pages to copy + * @pre_migrate_fence: dma-fence to wait for before migration start. + * May be NULL. * * Copy pages to system RAM. If the order of a @pagemap_addr entry * is greater than 0, the entry is populated but subsequent entries @@ -199,7 +205,8 @@ struct drm_pagemap_devmem_ops { */ int (*copy_to_ram)(struct page **pages, struct drm_pagemap_addr *pagemap_addr, - unsigned long npages); + unsigned long npages, + struct dma_fence *pre_migrate_fence); }; /** @@ -212,6 +219,8 @@ struct drm_pagemap_devmem_ops { * @dpagemap: The struct drm_pagemap of the pages this allocation belongs to. * @size: Size of device memory allocation * @timeslice_expiration: Timeslice expiration in jiffies + * @pre_migrate_fence: Fence to wait for or pipeline behind before migration starts. + * (May be NULL). */ struct drm_pagemap_devmem { struct device *dev; @@ -221,6 +230,7 @@ struct drm_pagemap_devmem { struct drm_pagemap *dpagemap; size_t size; u64 timeslice_expiration; + struct dma_fence *pre_migrate_fence; }; int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation, @@ -238,7 +248,8 @@ struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page); void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation, struct device *dev, struct mm_struct *mm, const struct drm_pagemap_devmem_ops *ops, - struct drm_pagemap *dpagemap, size_t size); + struct drm_pagemap *dpagemap, size_t size, + struct dma_fence *pre_migrate_fence); int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap, unsigned long start, unsigned long end, diff --git a/include/kunit/run-in-irq-context.h b/include/kunit/run-in-irq-context.h index 108e96433ea4..c89b1b1b12dd 100644 --- a/include/kunit/run-in-irq-context.h +++ b/include/kunit/run-in-irq-context.h @@ -20,8 +20,8 @@ struct kunit_irq_test_state { bool task_func_reported_failure; bool hardirq_func_reported_failure; bool softirq_func_reported_failure; - unsigned long hardirq_func_calls; - unsigned long softirq_func_calls; + atomic_t hardirq_func_calls; + atomic_t softirq_func_calls; struct hrtimer timer; struct work_struct bh_work; }; @@ -32,7 +32,7 @@ static enum hrtimer_restart kunit_irq_test_timer_func(struct hrtimer *timer) container_of(timer, typeof(*state), timer); WARN_ON_ONCE(!in_hardirq()); - state->hardirq_func_calls++; + atomic_inc(&state->hardirq_func_calls); if (!state->func(state->test_specific_state)) state->hardirq_func_reported_failure = true; @@ -48,7 +48,7 @@ static void kunit_irq_test_bh_work_func(struct work_struct *work) container_of(work, typeof(*state), bh_work); WARN_ON_ONCE(!in_serving_softirq()); - state->softirq_func_calls++; + atomic_inc(&state->softirq_func_calls); if (!state->func(state->test_specific_state)) state->softirq_func_reported_failure = true; @@ -59,7 +59,10 @@ static void kunit_irq_test_bh_work_func(struct work_struct *work) * hardirq context concurrently, and reports a failure to KUnit if any * invocation of @func in any context returns false. @func is passed * @test_specific_state as its argument. At most 3 invocations of @func will - * run concurrently: one in each of task, softirq, and hardirq context. + * run concurrently: one in each of task, softirq, and hardirq context. @func + * will continue running until either @max_iterations calls have been made (so + * long as at least one each runs in task, softirq, and hardirq contexts), or + * one second has passed. * * The main purpose of this interrupt context testing is to validate fallback * code paths that run in contexts where the normal code path cannot be used, @@ -85,6 +88,8 @@ static inline void kunit_run_irq_test(struct kunit *test, bool (*func)(void *), .test_specific_state = test_specific_state, }; unsigned long end_jiffies; + int hardirq_calls, softirq_calls; + bool allctx = false; /* * Set up a hrtimer (the way we access hardirq context) and a work @@ -94,14 +99,25 @@ static inline void kunit_run_irq_test(struct kunit *test, bool (*func)(void *), CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); INIT_WORK_ONSTACK(&state.bh_work, kunit_irq_test_bh_work_func); - /* Run for up to max_iterations or 1 second, whichever comes first. */ + /* + * Run for up to max_iterations (including at least one task, softirq, + * and hardirq), or 1 second, whichever comes first. + */ end_jiffies = jiffies + HZ; hrtimer_start(&state.timer, KUNIT_IRQ_TEST_HRTIMER_INTERVAL, HRTIMER_MODE_REL_HARD); - for (int i = 0; i < max_iterations && !time_after(jiffies, end_jiffies); - i++) { + for (int task_calls = 0, calls = 0; + ((calls < max_iterations) || !allctx) && + !time_after(jiffies, end_jiffies); + task_calls++) { if (!func(test_specific_state)) state.task_func_reported_failure = true; + + hardirq_calls = atomic_read(&state.hardirq_func_calls); + softirq_calls = atomic_read(&state.softirq_func_calls); + calls = task_calls + hardirq_calls + softirq_calls; + allctx = (task_calls > 0) && (hardirq_calls > 0) && + (softirq_calls > 0); } /* Cancel the timer and work. */ @@ -109,21 +125,18 @@ static inline void kunit_run_irq_test(struct kunit *test, bool (*func)(void *), flush_work(&state.bh_work); /* Sanity check: the timer and BH functions should have been run. */ - KUNIT_EXPECT_GT_MSG(test, state.hardirq_func_calls, 0, + KUNIT_EXPECT_GT_MSG(test, atomic_read(&state.hardirq_func_calls), 0, "Timer function was not called"); - KUNIT_EXPECT_GT_MSG(test, state.softirq_func_calls, 0, + KUNIT_EXPECT_GT_MSG(test, atomic_read(&state.softirq_func_calls), 0, "BH work function was not called"); - /* Check for incorrect hash values reported from any context. */ - KUNIT_EXPECT_FALSE_MSG( - test, state.task_func_reported_failure, - "Incorrect hash values reported from task context"); - KUNIT_EXPECT_FALSE_MSG( - test, state.hardirq_func_reported_failure, - "Incorrect hash values reported from hardirq context"); - KUNIT_EXPECT_FALSE_MSG( - test, state.softirq_func_reported_failure, - "Incorrect hash values reported from softirq context"); + /* Check for failure reported from any context. */ + KUNIT_EXPECT_FALSE_MSG(test, state.task_func_reported_failure, + "Failure reported from task context"); + KUNIT_EXPECT_FALSE_MSG(test, state.hardirq_func_reported_failure, + "Failure reported from hardirq context"); + KUNIT_EXPECT_FALSE_MSG(test, state.softirq_func_reported_failure, + "Failure reported from softirq context"); } #endif /* _KUNIT_RUN_IN_IRQ_CONTEXT_H */ diff --git a/include/linux/bio.h b/include/linux/bio.h index ad2d57908c1c..c75a9b3672aa 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -46,6 +46,21 @@ static inline unsigned int bio_max_segs(unsigned int nr_segs) #define bio_data_dir(bio) \ (op_is_write(bio_op(bio)) ? WRITE : READ) +static inline bool bio_flagged(const struct bio *bio, unsigned int bit) +{ + return bio->bi_flags & (1U << bit); +} + +static inline void bio_set_flag(struct bio *bio, unsigned int bit) +{ + bio->bi_flags |= (1U << bit); +} + +static inline void bio_clear_flag(struct bio *bio, unsigned int bit) +{ + bio->bi_flags &= ~(1U << bit); +} + /* * Check whether this bio carries any data or not. A NULL bio is allowed. */ @@ -225,21 +240,6 @@ static inline void bio_cnt_set(struct bio *bio, unsigned int count) atomic_set(&bio->__bi_cnt, count); } -static inline bool bio_flagged(struct bio *bio, unsigned int bit) -{ - return bio->bi_flags & (1U << bit); -} - -static inline void bio_set_flag(struct bio *bio, unsigned int bit) -{ - bio->bi_flags |= (1U << bit); -} - -static inline void bio_clear_flag(struct bio *bio, unsigned int bit) -{ - bio->bi_flags &= ~(1U << bit); -} - static inline struct bio_vec *bio_first_bvec_all(struct bio *bio) { WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 6498be4c44f8..e5be698256d1 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1283,6 +1283,8 @@ struct bpf_ksym { struct list_head lnode; struct latch_tree_node tnode; bool prog; + u32 fp_start; + u32 fp_end; }; enum bpf_tramp_prog_type { @@ -1511,6 +1513,7 @@ void bpf_image_ksym_add(struct bpf_ksym *ksym); void bpf_image_ksym_del(struct bpf_ksym *ksym); void bpf_ksym_add(struct bpf_ksym *ksym); void bpf_ksym_del(struct bpf_ksym *ksym); +bool bpf_has_frame_pointer(unsigned long ip); int bpf_jit_charge_modmem(u32 size); void bpf_jit_uncharge_modmem(u32 size); bool bpf_prog_has_trampoline(const struct bpf_prog *prog); diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 107ce05bd16e..7edf1a07b535 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -145,6 +145,7 @@ */ #define ASM_INPUT_G "ir" #define ASM_INPUT_RM "r" +#define ASM_OUTPUT_RM "=r" /* * Declare compiler support for __typeof_unqual__() operator. diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 1280693766b9..d3318a3c2577 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -548,11 +548,12 @@ struct ftrace_likely_data { /* * Clang has trouble with constraints with multiple - * alternative behaviors (mainly "g" and "rm"). + * alternative behaviors ("g" , "rm" and "=rm"). */ #ifndef ASM_INPUT_G #define ASM_INPUT_G "g" #define ASM_INPUT_RM "rm" + #define ASM_OUTPUT_RM "=rm" #endif #ifdef CONFIG_CC_HAS_ASM_INLINE diff --git a/include/linux/fs.h b/include/linux/fs.h index 04ceeca12a0d..f5c9cf28c4dc 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -3247,7 +3247,7 @@ struct offset_ctx { void simple_offset_init(struct offset_ctx *octx); int simple_offset_add(struct offset_ctx *octx, struct dentry *dentry); void simple_offset_remove(struct offset_ctx *octx, struct dentry *dentry); -int simple_offset_rename(struct inode *old_dir, struct dentry *old_dentry, +void simple_offset_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry); int simple_offset_rename_exchange(struct inode *old_dir, struct dentry *old_dentry, diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h index 0bd581003cd5..60de63e46b33 100644 --- a/include/linux/genalloc.h +++ b/include/linux/genalloc.h @@ -44,6 +44,7 @@ struct gen_pool; * @nr: The number of zeroed bits we're looking for * @data: optional additional data used by the callback * @pool: the pool being allocated from + * @start_addr: start address of memory chunk */ typedef unsigned long (*genpool_algo_t)(unsigned long *map, unsigned long size, diff --git a/include/linux/intel_rapl.h b/include/linux/intel_rapl.h index e9ade2ff4af6..f479ef5b3341 100644 --- a/include/linux/intel_rapl.h +++ b/include/linux/intel_rapl.h @@ -214,10 +214,14 @@ void rapl_remove_package(struct rapl_package *rp); #ifdef CONFIG_PERF_EVENTS int rapl_package_add_pmu(struct rapl_package *rp); +int rapl_package_add_pmu_locked(struct rapl_package *rp); void rapl_package_remove_pmu(struct rapl_package *rp); +void rapl_package_remove_pmu_locked(struct rapl_package *rp); #else static inline int rapl_package_add_pmu(struct rapl_package *rp) { return 0; } +static inline int rapl_package_add_pmu_locked(struct rapl_package *rp) { return 0; } static inline void rapl_package_remove_pmu(struct rapl_package *rp) { } +static inline void rapl_package_remove_pmu_locked(struct rapl_package *rp) { } #endif #endif /* __INTEL_RAPL_H__ */ diff --git a/include/linux/intel_vsec.h b/include/linux/intel_vsec.h index 53f6fe88e369..1a0f357c2427 100644 --- a/include/linux/intel_vsec.h +++ b/include/linux/intel_vsec.h @@ -80,13 +80,13 @@ enum intel_vsec_quirks { /** * struct pmt_callbacks - Callback infrastructure for PMT devices - * ->read_telem() when specified, called by client driver to access PMT data (instead - * of direct copy). - * @pdev: PCI device reference for the callback's use - * @guid: ID of data to acccss - * @data: buffer for the data to be copied - * @off: offset into the requested buffer - * @count: size of buffer + * @read_telem: when specified, called by client driver to access PMT + * data (instead of direct copy). + * * pdev: PCI device reference for the callback's use + * * guid: ID of data to acccss + * * data: buffer for the data to be copied + * * off: offset into the requested buffer + * * count: size of buffer */ struct pmt_callbacks { int (*read_telem)(struct pci_dev *pdev, u32 guid, u64 *data, loff_t off, u32 count); @@ -120,7 +120,7 @@ struct intel_vsec_platform_info { }; /** - * struct intel_sec_device - Auxbus specific device information + * struct intel_vsec_device - Auxbus specific device information * @auxdev: auxbus device struct for auxbus access * @pcidev: pci device associated with the device * @resource: any resources shared by the parent @@ -128,6 +128,7 @@ struct intel_vsec_platform_info { * @num_resources: number of resources * @id: xarray id * @priv_data: any private data needed + * @priv_data_size: size of private data area * @quirks: specified quirks * @base_addr: base address of entries (if specified) * @cap_id: the enumerated id of the vsec feature diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index e1adb0d20a0a..a3e8ddc9b380 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -424,11 +424,17 @@ struct io_ring_ctx { struct user_struct *user; struct mm_struct *mm_account; + /* + * List of tctx nodes for this ctx, protected by tctx_lock. For + * cancelation purposes, nests under uring_lock. + */ + struct list_head tctx_list; + struct mutex tctx_lock; + /* ctx exit and cancelation */ struct llist_head fallback_llist; struct delayed_work fallback_work; struct work_struct exit_work; - struct list_head tctx_list; struct completion ref_comp; /* io-wq management, e.g. thread count */ diff --git a/include/linux/irq-entry-common.h b/include/linux/irq-entry-common.h index 6ab913e57da0..d26d1b1bcbfb 100644 --- a/include/linux/irq-entry-common.h +++ b/include/linux/irq-entry-common.h @@ -110,7 +110,7 @@ static __always_inline void enter_from_user_mode(struct pt_regs *regs) static inline void local_irq_enable_exit_to_user(unsigned long ti_work); #ifndef local_irq_enable_exit_to_user -static inline void local_irq_enable_exit_to_user(unsigned long ti_work) +static __always_inline void local_irq_enable_exit_to_user(unsigned long ti_work) { local_irq_enable(); } @@ -125,7 +125,7 @@ static inline void local_irq_enable_exit_to_user(unsigned long ti_work) static inline void local_irq_disable_exit_to_user(void); #ifndef local_irq_disable_exit_to_user -static inline void local_irq_disable_exit_to_user(void) +static __always_inline void local_irq_disable_exit_to_user(void) { local_irq_disable(); } diff --git a/include/linux/kasan.h b/include/linux/kasan.h index f335c1d7b61d..9c6ac4b62eb9 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -28,6 +28,7 @@ typedef unsigned int __bitwise kasan_vmalloc_flags_t; #define KASAN_VMALLOC_INIT ((__force kasan_vmalloc_flags_t)0x01u) #define KASAN_VMALLOC_VM_ALLOC ((__force kasan_vmalloc_flags_t)0x02u) #define KASAN_VMALLOC_PROT_NORMAL ((__force kasan_vmalloc_flags_t)0x04u) +#define KASAN_VMALLOC_KEEP_TAG ((__force kasan_vmalloc_flags_t)0x08u) #define KASAN_VMALLOC_PAGE_RANGE 0x1 /* Apply exsiting page range */ #define KASAN_VMALLOC_TLB_FLUSH 0x2 /* TLB flush */ @@ -630,6 +631,16 @@ static __always_inline void kasan_poison_vmalloc(const void *start, __kasan_poison_vmalloc(start, size); } +void __kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms, + kasan_vmalloc_flags_t flags); +static __always_inline void +kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms, + kasan_vmalloc_flags_t flags) +{ + if (kasan_enabled()) + __kasan_unpoison_vmap_areas(vms, nr_vms, flags); +} + #else /* CONFIG_KASAN_VMALLOC */ static inline void kasan_populate_early_vm_area_shadow(void *start, @@ -654,6 +665,11 @@ static inline void *kasan_unpoison_vmalloc(const void *start, static inline void kasan_poison_vmalloc(const void *start, unsigned long size) { } +static __always_inline void +kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms, + kasan_vmalloc_flags_t flags) +{ } + #endif /* CONFIG_KASAN_VMALLOC */ #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ diff --git a/include/linux/kexec.h b/include/linux/kexec.h index ff7e231b0485..8a22bc9b8c6c 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -530,7 +530,7 @@ extern bool kexec_file_dbg_print; #define kexec_dprintk(fmt, arg...) \ do { if (kexec_file_dbg_print) pr_info(fmt, ##arg); } while (0) -extern void *kimage_map_segment(struct kimage *image, unsigned long addr, unsigned long size); +extern void *kimage_map_segment(struct kimage *image, int idx); extern void kimage_unmap_segment(void *buffer); #else /* !CONFIG_KEXEC_CORE */ struct pt_regs; @@ -540,7 +540,7 @@ static inline void __crash_kexec(struct pt_regs *regs) { } static inline void crash_kexec(struct pt_regs *regs) { } static inline int kexec_should_crash(struct task_struct *p) { return 0; } static inline int kexec_crash_loaded(void) { return 0; } -static inline void *kimage_map_segment(struct kimage *image, unsigned long addr, unsigned long size) +static inline void *kimage_map_segment(struct kimage *image, int idx) { return NULL; } static inline void kimage_unmap_segment(void *buffer) { } #define kexec_in_progress false diff --git a/include/linux/leafops.h b/include/linux/leafops.h index cfafe7a5e7b1..a9ff94b744f2 100644 --- a/include/linux/leafops.h +++ b/include/linux/leafops.h @@ -133,7 +133,7 @@ static inline bool softleaf_is_none(softleaf_t entry) /** * softleaf_type() - Identify the type of leaf entry. - * @enntry: Leaf entry. + * @entry: Leaf entry. * * Returns: the leaf entry type associated with @entry. */ @@ -534,7 +534,7 @@ static inline bool pte_is_uffd_wp_marker(pte_t pte) /** * pte_is_uffd_marker() - Does this PTE entry encode a userfault-specific marker * leaf entry? - * @entry: Leaf entry. + * @pte: PTE entry. * * It's useful to be able to determine which leaf entries encode UFFD-specific * markers so we can handle these correctly. diff --git a/include/linux/memory-failure.h b/include/linux/memory-failure.h index bc326503d2d2..7b5e11cf905f 100644 --- a/include/linux/memory-failure.h +++ b/include/linux/memory-failure.h @@ -9,6 +9,8 @@ struct pfn_address_space; struct pfn_address_space { struct interval_tree_node node; struct address_space *mapping; + int (*pfn_to_vma_pgoff)(struct vm_area_struct *vma, + unsigned long pfn, pgoff_t *pgoff); }; int register_pfn_address_space(struct pfn_address_space *pfn_space); diff --git a/include/linux/mm.h b/include/linux/mm.h index 15076261d0c2..6f959d8ca4b4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2459,10 +2459,10 @@ static inline int folio_expected_ref_count(const struct folio *folio) if (WARN_ON_ONCE(page_has_type(&folio->page) && !folio_test_hugetlb(folio))) return 0; - if (folio_test_anon(folio)) { - /* One reference per page from the swapcache. */ - ref_count += folio_test_swapcache(folio) << order; - } else { + /* One reference per page from the swapcache. */ + ref_count += folio_test_swapcache(folio) << order; + + if (!folio_test_anon(folio)) { /* One reference per page from the pagecache. */ ref_count += !!folio->mapping << order; /* One reference from PG_private. */ diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 9f6de068295d..42af2292951d 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -1631,7 +1631,6 @@ enum tlb_flush_reason { TLB_LOCAL_MM_SHOOTDOWN, TLB_REMOTE_SEND_IPI, TLB_REMOTE_WRONG_CPU, - NR_TLB_FLUSH_REASONS, }; /** diff --git a/include/linux/property.h b/include/linux/property.h index 272bfbdea7bf..e30ef23a9af3 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -371,6 +371,7 @@ struct software_node_ref_args { (const struct software_node_ref_args) { \ .swnode = _Generic(_ref_, \ const struct software_node *: _ref_, \ + struct software_node *: _ref_, \ default: NULL), \ .fwnode = _Generic(_ref_, \ struct fwnode_handle *: _ref_, \ diff --git a/include/linux/vfio_pci_core.h b/include/linux/vfio_pci_core.h index 706877f998ff..1ac86896875c 100644 --- a/include/linux/vfio_pci_core.h +++ b/include/linux/vfio_pci_core.h @@ -145,6 +145,13 @@ struct vfio_pci_core_device { struct list_head dmabufs; }; +enum vfio_pci_io_width { + VFIO_PCI_IO_WIDTH_1 = 1, + VFIO_PCI_IO_WIDTH_2 = 2, + VFIO_PCI_IO_WIDTH_4 = 4, + VFIO_PCI_IO_WIDTH_8 = 8, +}; + /* Will be exported for vfio pci drivers usage */ int vfio_pci_core_register_dev_region(struct vfio_pci_core_device *vdev, unsigned int type, unsigned int subtype, @@ -188,7 +195,8 @@ pci_ers_result_t vfio_pci_core_aer_err_detected(struct pci_dev *pdev, ssize_t vfio_pci_core_do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem, void __iomem *io, char __user *buf, loff_t off, size_t count, size_t x_start, - size_t x_end, bool iswrite); + size_t x_end, bool iswrite, + enum vfio_pci_io_width max_width); bool __vfio_pci_memory_enabled(struct vfio_pci_core_device *vdev); bool vfio_pci_core_range_intersect_range(loff_t buf_start, size_t buf_cnt, loff_t reg_start, size_t reg_cnt, diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 132a474e5914..3626eb694728 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -13,6 +13,8 @@ #include <linux/completion.h> #include <linux/virtio_features.h> +struct module; + /** * struct virtqueue - a queue to register buffers for sending or receiving. * @list: the chain of virtqueues for this device diff --git a/include/linux/virtio_features.h b/include/linux/virtio_features.h index ea2ad8717882..ce59ea91f474 100644 --- a/include/linux/virtio_features.h +++ b/include/linux/virtio_features.h @@ -3,6 +3,8 @@ #define _LINUX_VIRTIO_FEATURES_H #include <linux/bits.h> +#include <linux/bug.h> +#include <linux/string.h> #define VIRTIO_FEATURES_U64S 2 #define VIRTIO_FEATURES_BITS (VIRTIO_FEATURES_U64S * 64) diff --git a/include/net/dsa.h b/include/net/dsa.h index cced1a866757..6b2b5ed64ea4 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -302,6 +302,7 @@ struct dsa_port { struct devlink_port devlink_port; struct phylink *pl; struct phylink_config pl_config; + netdevice_tracker conduit_tracker; struct dsa_lag *lag; struct net_device *hsr_dev; diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index 0eccd9c3a883..365925c9d262 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h @@ -123,27 +123,15 @@ void inet_frags_fini(struct inet_frags *); int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net); -static inline void fqdir_pre_exit(struct fqdir *fqdir) -{ - /* Prevent creation of new frags. - * Pairs with READ_ONCE() in inet_frag_find(). - */ - WRITE_ONCE(fqdir->high_thresh, 0); - - /* Pairs with READ_ONCE() in inet_frag_kill(), ip_expire() - * and ip6frag_expire_frag_queue(). - */ - WRITE_ONCE(fqdir->dead, true); -} +void fqdir_pre_exit(struct fqdir *fqdir); void fqdir_exit(struct fqdir *fqdir); void inet_frag_kill(struct inet_frag_queue *q, int *refs); void inet_frag_destroy(struct inet_frag_queue *q); struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key); -/* Free all skbs in the queue; return the sum of their truesizes. */ -unsigned int inet_frag_rbtree_purge(struct rb_root *root, - enum skb_drop_reason reason); +void inet_frag_queue_flush(struct inet_frag_queue *q, + enum skb_drop_reason reason); static inline void inet_frag_putn(struct inet_frag_queue *q, int refs) { diff --git a/include/net/ipv6_frag.h b/include/net/ipv6_frag.h index 38ef66826939..41d9fc6965f9 100644 --- a/include/net/ipv6_frag.h +++ b/include/net/ipv6_frag.h @@ -69,9 +69,6 @@ ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq) int refs = 1; rcu_read_lock(); - /* Paired with the WRITE_ONCE() in fqdir_pre_exit(). */ - if (READ_ONCE(fq->q.fqdir->dead)) - goto out_rcu_unlock; spin_lock(&fq->q.lock); if (fq->q.flags & INET_FRAG_COMPLETE) @@ -80,6 +77,12 @@ ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq) fq->q.flags |= INET_FRAG_DROP; inet_frag_kill(&fq->q, &refs); + /* Paired with the WRITE_ONCE() in fqdir_pre_exit(). */ + if (READ_ONCE(fq->q.fqdir->dead)) { + inet_frag_queue_flush(&fq->q, 0); + goto out; + } + dev = dev_get_by_index_rcu(net, fq->iif); if (!dev) goto out; diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index fab7dc73f738..0e266c2d0e7f 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -1091,6 +1091,29 @@ struct nft_rule_blob { __attribute__((aligned(__alignof__(struct nft_rule_dp)))); }; +enum nft_chain_types { + NFT_CHAIN_T_DEFAULT = 0, + NFT_CHAIN_T_ROUTE, + NFT_CHAIN_T_NAT, + NFT_CHAIN_T_MAX +}; + +/** + * struct nft_chain_validate_state - validation state + * + * If a chain is encountered again during table validation it is + * possible to avoid revalidation provided the calling context is + * compatible. This structure stores relevant calling context of + * previous validations. + * + * @hook_mask: the hook numbers and locations the chain is linked to + * @depth: the deepest call chain level the chain is linked to + */ +struct nft_chain_validate_state { + u8 hook_mask[NFT_CHAIN_T_MAX]; + u8 depth; +}; + /** * struct nft_chain - nf_tables chain * @@ -1109,6 +1132,7 @@ struct nft_rule_blob { * @udlen: user data length * @udata: user data in the chain * @blob_next: rule blob pointer to the next in the chain + * @vstate: validation state */ struct nft_chain { struct nft_rule_blob __rcu *blob_gen_0; @@ -1128,9 +1152,10 @@ struct nft_chain { /* Only used during control plane commit phase: */ struct nft_rule_blob *blob_next; + struct nft_chain_validate_state vstate; }; -int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain); +int nft_chain_validate(const struct nft_ctx *ctx, struct nft_chain *chain); int nft_setelem_validate(const struct nft_ctx *ctx, struct nft_set *set, const struct nft_set_iter *iter, struct nft_elem_priv *elem_priv); @@ -1138,13 +1163,6 @@ int nft_set_catchall_validate(const struct nft_ctx *ctx, struct nft_set *set); int nf_tables_bind_chain(const struct nft_ctx *ctx, struct nft_chain *chain); void nf_tables_unbind_chain(const struct nft_ctx *ctx, struct nft_chain *chain); -enum nft_chain_types { - NFT_CHAIN_T_DEFAULT = 0, - NFT_CHAIN_T_ROUTE, - NFT_CHAIN_T_NAT, - NFT_CHAIN_T_MAX -}; - /** * struct nft_chain_type - nf_tables chain type info * diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h index 90d73b9bddab..0519afd7217f 100644 --- a/include/sound/soc-acpi.h +++ b/include/sound/soc-acpi.h @@ -203,6 +203,8 @@ struct snd_soc_acpi_link_adr { * @mach: the pointer of the machine driver * @prefix: the prefix of the topology file name. Typically, it is the path. * @tplg_files: the pointer of the array of the topology file names. + * @best_effort: ignore non supported links and try to build the card in best effort + * with supported links */ /* Descriptor for SST ASoC machine driver */ struct snd_soc_acpi_mach { @@ -224,7 +226,8 @@ struct snd_soc_acpi_mach { const u32 tplg_quirk_mask; int (*get_function_tplg_files)(struct snd_soc_card *card, const struct snd_soc_acpi_mach *mach, - const char *prefix, const char ***tplg_files); + const char *prefix, const char ***tplg_files, + bool best_effort); }; #define SND_SOC_ACPI_MAX_CODECS 3 diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 7e418f065b94..125bdc166bfe 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -224,7 +224,8 @@ DECLARE_EVENT_CLASS(btrfs__inode, __entry->generation = BTRFS_I(inode)->generation; __entry->last_trans = BTRFS_I(inode)->last_trans; __entry->logged_trans = BTRFS_I(inode)->logged_trans; - __entry->root_objectid = btrfs_root_id(BTRFS_I(inode)->root); + __entry->root_objectid = BTRFS_I(inode)->root ? + btrfs_root_id(BTRFS_I(inode)->root) : 0; ), TP_printk_btrfs("root=%llu(%s) gen=%llu ino=%llu blocks=%llu " diff --git a/include/trace/events/tlb.h b/include/trace/events/tlb.h index b4d8e7dc38f8..fb8369511685 100644 --- a/include/trace/events/tlb.h +++ b/include/trace/events/tlb.h @@ -12,8 +12,9 @@ EM( TLB_FLUSH_ON_TASK_SWITCH, "flush on task switch" ) \ EM( TLB_REMOTE_SHOOTDOWN, "remote shootdown" ) \ EM( TLB_LOCAL_SHOOTDOWN, "local shootdown" ) \ - EM( TLB_LOCAL_MM_SHOOTDOWN, "local mm shootdown" ) \ - EMe( TLB_REMOTE_SEND_IPI, "remote ipi send" ) + EM( TLB_LOCAL_MM_SHOOTDOWN, "local MM shootdown" ) \ + EM( TLB_REMOTE_SEND_IPI, "remote IPI send" ) \ + EMe( TLB_REMOTE_WRONG_CPU, "remote wrong CPU" ) /* * First define the enums in TLB_FLUSH_REASON to be exported to userspace diff --git a/include/trace/misc/nfs.h b/include/trace/misc/nfs.h index c82233e950ac..a394b4d38e18 100644 --- a/include/trace/misc/nfs.h +++ b/include/trace/misc/nfs.h @@ -16,7 +16,6 @@ TRACE_DEFINE_ENUM(NFSERR_PERM); TRACE_DEFINE_ENUM(NFSERR_NOENT); TRACE_DEFINE_ENUM(NFSERR_IO); TRACE_DEFINE_ENUM(NFSERR_NXIO); -TRACE_DEFINE_ENUM(NFSERR_EAGAIN); TRACE_DEFINE_ENUM(NFSERR_ACCES); TRACE_DEFINE_ENUM(NFSERR_EXIST); TRACE_DEFINE_ENUM(NFSERR_XDEV); @@ -52,7 +51,6 @@ TRACE_DEFINE_ENUM(NFSERR_JUKEBOX); { NFSERR_NXIO, "NXIO" }, \ { ECHILD, "CHILD" }, \ { ETIMEDOUT, "TIMEDOUT" }, \ - { NFSERR_EAGAIN, "AGAIN" }, \ { NFSERR_ACCES, "ACCES" }, \ { NFSERR_EXIST, "EXIST" }, \ { NFSERR_XDEV, "XDEV" }, \ diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h index 47853659a705..f64dc0eff0e6 100644 --- a/include/uapi/drm/xe_drm.h +++ b/include/uapi/drm/xe_drm.h @@ -1463,6 +1463,7 @@ struct drm_xe_exec { /** @exec_queue_id: Exec queue ID for the batch buffer */ __u32 exec_queue_id; +#define DRM_XE_MAX_SYNCS 1024 /** @num_syncs: Amount of struct drm_xe_sync in array. */ __u32 num_syncs; diff --git a/include/uapi/linux/energy_model.h b/include/uapi/linux/energy_model.h index 4ec4c0eabbbb..0bcad967854f 100644 --- a/include/uapi/linux/energy_model.h +++ b/include/uapi/linux/energy_model.h @@ -2,6 +2,7 @@ /* Do not edit directly, auto-generated from: */ /* Documentation/netlink/specs/em.yaml */ /* YNL-GEN uapi header */ +/* To regenerate run: tools/net/ynl/ynl-regen.sh */ #ifndef _UAPI_LINUX_ENERGY_MODEL_H #define _UAPI_LINUX_ENERGY_MODEL_H diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h index 30f3c9eaafaa..4bdb6a165987 100644 --- a/include/uapi/linux/input-event-codes.h +++ b/include/uapi/linux/input-event-codes.h @@ -891,6 +891,7 @@ #define ABS_VOLUME 0x20 #define ABS_PROFILE 0x21 +#define ABS_SND_PROFILE 0x22 #define ABS_MISC 0x28 @@ -1000,4 +1001,12 @@ #define SND_MAX 0x07 #define SND_CNT (SND_MAX+1) +/* + * ABS_SND_PROFILE values + */ + +#define SND_PROFILE_SILENT 0x00 +#define SND_PROFILE_VIBRATE 0x01 +#define SND_PROFILE_RING 0x02 + #endif diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h index 04eea6d1d0a9..72a5d030154e 100644 --- a/include/uapi/linux/mptcp.h +++ b/include/uapi/linux/mptcp.h @@ -40,6 +40,7 @@ #define MPTCP_PM_ADDR_FLAG_FULLMESH _BITUL(3) #define MPTCP_PM_ADDR_FLAG_IMPLICIT _BITUL(4) #define MPTCP_PM_ADDR_FLAG_LAMINAR _BITUL(5) +#define MPTCP_PM_ADDR_FLAGS_MASK GENMASK(5, 0) struct mptcp_info { __u8 mptcpi_subflows; diff --git a/include/uapi/linux/nfs.h b/include/uapi/linux/nfs.h index f356f2ba3814..71c7196d3281 100644 --- a/include/uapi/linux/nfs.h +++ b/include/uapi/linux/nfs.h @@ -49,7 +49,6 @@ NFSERR_NOENT = 2, /* v2 v3 v4 */ NFSERR_IO = 5, /* v2 v3 v4 */ NFSERR_NXIO = 6, /* v2 v3 v4 */ - NFSERR_EAGAIN = 11, /* v2 v3 */ NFSERR_ACCES = 13, /* v2 v3 v4 */ NFSERR_EXIST = 17, /* v2 v3 v4 */ NFSERR_XDEV = 18, /* v3 v4 */ diff --git a/include/uapi/linux/pr.h b/include/uapi/linux/pr.h index 847f3051057a..f0ecb1677317 100644 --- a/include/uapi/linux/pr.h +++ b/include/uapi/linux/pr.h @@ -79,4 +79,6 @@ struct pr_read_reservation { #define IOC_PR_READ_KEYS _IOWR('p', 206, struct pr_read_keys) #define IOC_PR_READ_RESERVATION _IOR('p', 207, struct pr_read_reservation) +#define PR_KEYS_MAX (1u << 16) + #endif /* _UAPI_PR_H */ diff --git a/include/uapi/rdma/irdma-abi.h b/include/uapi/rdma/irdma-abi.h index f7788d33376b..36f20802bcc8 100644 --- a/include/uapi/rdma/irdma-abi.h +++ b/include/uapi/rdma/irdma-abi.h @@ -57,8 +57,8 @@ struct irdma_alloc_ucontext_resp { __u8 rsvd2; __aligned_u64 comp_mask; __u16 min_hw_wq_size; + __u8 revd3[2]; __u32 max_hw_srq_quanta; - __u8 rsvd3[2]; }; struct irdma_alloc_pd_resp { diff --git a/include/uapi/rdma/rdma_user_cm.h b/include/uapi/rdma/rdma_user_cm.h index 5ded174687ee..838f8d460256 100644 --- a/include/uapi/rdma/rdma_user_cm.h +++ b/include/uapi/rdma/rdma_user_cm.h @@ -192,6 +192,7 @@ struct rdma_ucm_query_path_resp { struct rdma_ucm_query_ib_service_resp { __u32 num_service_recs; + __u32 reserved; struct ib_user_service_rec recs[]; }; @@ -354,7 +355,7 @@ enum { #define RDMA_USER_CM_IB_SERVICE_NAME_SIZE 64 struct rdma_ucm_ib_service { - __u64 service_id; + __aligned_u64 service_id; __u8 service_name[RDMA_USER_CM_IB_SERVICE_NAME_SIZE]; __u32 flags; __u32 reserved; @@ -362,6 +363,7 @@ struct rdma_ucm_ib_service { struct rdma_ucm_resolve_ib_service { __u32 id; + __u32 reserved; struct rdma_ucm_ib_service ibs; }; diff --git a/include/uapi/regulator/regulator.h b/include/uapi/regulator/regulator.h index 71bf71a22e7f..c4f2d1c19828 100644 --- a/include/uapi/regulator/regulator.h +++ b/include/uapi/regulator/regulator.h @@ -8,11 +8,7 @@ #ifndef _UAPI_REGULATOR_H #define _UAPI_REGULATOR_H -#ifdef __KERNEL__ #include <linux/types.h> -#else -#include <stdint.h> -#endif /* * Regulator notifier events. @@ -62,7 +58,7 @@ struct reg_genl_event { char reg_name[32]; - uint64_t event; + __u64 event; }; /* attributes of reg_genl_family */ |
