diff options
author | Ingo Molnar <mingo@kernel.org> | 2015-09-28 08:06:57 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-09-28 08:06:57 +0200 |
commit | 6afc0c269c3d20cde05515b00ede00e91fee0be5 (patch) | |
tree | d54e7fa59df16bb62902129dfd28956d9ae1be19 /include | |
parent | 968d712a2565121b269e1037a1517916a9769423 (diff) | |
parent | 097f70b3c4d84ffccca15195bdfde3a37c0a7c0f (diff) |
Merge branch 'linus' into perf/core, to pick up fixes before applying new changes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include')
33 files changed, 257 insertions, 140 deletions
diff --git a/include/acpi/button.h b/include/acpi/button.h index 97eea0e4c016..1cad8b2d460c 100644 --- a/include/acpi/button.h +++ b/include/acpi/button.h @@ -3,7 +3,7 @@ #include <linux/notifier.h> -#if defined(CONFIG_ACPI_BUTTON) || defined(CONFIG_ACPI_BUTTON_MODULE) +#if IS_ENABLED(CONFIG_ACPI_BUTTON) extern int acpi_lid_notifier_register(struct notifier_block *nb); extern int acpi_lid_notifier_unregister(struct notifier_block *nb); extern int acpi_lid_open(void); @@ -20,6 +20,6 @@ static inline int acpi_lid_open(void) { return 1; } -#endif /* defined(CONFIG_ACPI_BUTTON) || defined(CONFIG_ACPI_BUTTON_MODULE) */ +#endif /* IS_ENABLED(CONFIG_ACPI_BUTTON) */ #endif /* ACPI_BUTTON_H */ diff --git a/include/acpi/video.h b/include/acpi/video.h index e840b294c6f5..c62392d9b52a 100644 --- a/include/acpi/video.h +++ b/include/acpi/video.h @@ -24,7 +24,7 @@ enum acpi_backlight_type { acpi_backlight_native, }; -#if (defined CONFIG_ACPI_VIDEO || defined CONFIG_ACPI_VIDEO_MODULE) +#if IS_ENABLED(CONFIG_ACPI_VIDEO) extern int acpi_video_register(void); extern void acpi_video_unregister(void); extern int acpi_video_get_edid(struct acpi_device *device, int type, diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h index f20f407ce45d..4b4b056a6eb0 100644 --- a/include/asm-generic/memory_model.h +++ b/include/asm-generic/memory_model.h @@ -73,7 +73,7 @@ * Convert a physical address to a Page Frame Number and back */ #define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT)) -#define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) +#define __pfn_to_phys(pfn) PFN_PHYS(pfn) #define page_to_pfn __page_to_pfn #define pfn_to_page __pfn_to_page diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index d901f1a47be6..4e14dac282bb 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -35,11 +35,7 @@ #define VGIC_V3_MAX_LRS 16 #define VGIC_MAX_IRQS 1024 #define VGIC_V2_MAX_CPUS 8 - -/* Sanity checks... */ -#if (KVM_MAX_VCPUS > 255) -#error Too many KVM VCPUs, the VGIC only supports up to 255 VCPUs for now -#endif +#define VGIC_V3_MAX_CPUS 255 #if (VGIC_NR_IRQS_LEGACY & 31) #error "VGIC_NR_IRQS must be a multiple of 32" diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 5a5d79ee256f..d5eb4ad1c534 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -13,6 +13,7 @@ #include <linux/sched.h> #include <linux/blkdev.h> #include <linux/writeback.h> +#include <linux/memcontrol.h> #include <linux/blk-cgroup.h> #include <linux/backing-dev-defs.h> #include <linux/slab.h> @@ -252,13 +253,19 @@ int inode_congested(struct inode *inode, int cong_bits); * @inode: inode of interest * * cgroup writeback requires support from both the bdi and filesystem. - * Test whether @inode has both. + * Also, both memcg and iocg have to be on the default hierarchy. Test + * whether all conditions are met. + * + * Note that the test result may change dynamically on the same inode + * depending on how memcg and iocg are configured. */ static inline bool inode_cgwb_enabled(struct inode *inode) { struct backing_dev_info *bdi = inode_to_bdi(inode); - return bdi_cap_account_dirty(bdi) && + return cgroup_on_dfl(mem_cgroup_root_css->cgroup) && + cgroup_on_dfl(blkcg_root_css->cgroup) && + bdi_cap_account_dirty(bdi) && (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) && (inode->i_sb->s_iflags & SB_I_CGROUPWB); } diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 38a5ff772a37..99da9ebc7377 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1368,6 +1368,26 @@ static inline bool bvec_gap_to_prev(struct request_queue *q, ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); } +static inline bool bio_will_gap(struct request_queue *q, struct bio *prev, + struct bio *next) +{ + if (!bio_has_data(prev)) + return false; + + return bvec_gap_to_prev(q, &prev->bi_io_vec[prev->bi_vcnt - 1], + next->bi_io_vec[0].bv_offset); +} + +static inline bool req_gap_back_merge(struct request *req, struct bio *bio) +{ + return bio_will_gap(req->q, req->biotail, bio); +} + +static inline bool req_gap_front_merge(struct request *req, struct bio *bio) +{ + return bio_will_gap(req->q, bio, req->bio); +} + struct work_struct; int kblockd_schedule_work(struct work_struct *work); int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay); @@ -1494,6 +1514,26 @@ queue_max_integrity_segments(struct request_queue *q) return q->limits.max_integrity_segments; } +static inline bool integrity_req_gap_back_merge(struct request *req, + struct bio *next) +{ + struct bio_integrity_payload *bip = bio_integrity(req->bio); + struct bio_integrity_payload *bip_next = bio_integrity(next); + + return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], + bip_next->bip_vec[0].bv_offset); +} + +static inline bool integrity_req_gap_front_merge(struct request *req, + struct bio *bio) +{ + struct bio_integrity_payload *bip = bio_integrity(bio); + struct bio_integrity_payload *bip_next = bio_integrity(req->bio); + + return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], + bip_next->bip_vec[0].bv_offset); +} + #else /* CONFIG_BLK_DEV_INTEGRITY */ struct bio; @@ -1560,6 +1600,16 @@ static inline bool blk_integrity_is_initialized(struct gendisk *g) { return 0; } +static inline bool integrity_req_gap_back_merge(struct request *req, + struct bio *next) +{ + return false; +} +static inline bool integrity_req_gap_front_merge(struct request *req, + struct bio *bio) +{ + return false; +} #endif /* CONFIG_BLK_DEV_INTEGRITY */ diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 4d8fcf2187dc..8492721b39be 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -473,31 +473,8 @@ struct cgroup_subsys { unsigned int depends_on; }; -extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem; - -/** - * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups - * @tsk: target task - * - * Called from threadgroup_change_begin() and allows cgroup operations to - * synchronize against threadgroup changes using a percpu_rw_semaphore. - */ -static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) -{ - percpu_down_read(&cgroup_threadgroup_rwsem); -} - -/** - * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups - * @tsk: target task - * - * Called from threadgroup_change_end(). Counterpart of - * cgroup_threadcgroup_change_begin(). - */ -static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) -{ - percpu_up_read(&cgroup_threadgroup_rwsem); -} +void cgroup_threadgroup_change_begin(struct task_struct *tsk); +void cgroup_threadgroup_change_end(struct task_struct *tsk); #else /* CONFIG_CGROUPS */ diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 430efcbea48e..dca22de98d94 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -127,9 +127,14 @@ struct cpufreq_policy { #define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/ #ifdef CONFIG_CPU_FREQ +struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu); struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu); void cpufreq_cpu_put(struct cpufreq_policy *policy); #else +static inline struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu) +{ + return NULL; +} static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) { return NULL; diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index ce447f0f1bad..68030e22af35 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h @@ -65,7 +65,10 @@ struct devfreq_dev_status { * The "flags" parameter's possible values are * explained above with "DEVFREQ_FLAG_*" macros. * @get_dev_status: The device should provide the current performance - * status to devfreq, which is used by governors. + * status to devfreq. Governors are recommended not to + * use this directly. Instead, governors are recommended + * to use devfreq_update_stats() along with + * devfreq.last_status. * @get_cur_freq: The device should provide the current frequency * at which it is operating. * @exit: An optional callback that is called when devfreq @@ -161,6 +164,7 @@ struct devfreq { struct delayed_work work; unsigned long previous_freq; + struct devfreq_dev_status last_status; void *data; /* private data for governors */ @@ -204,6 +208,19 @@ extern int devm_devfreq_register_opp_notifier(struct device *dev, extern void devm_devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq); +/** + * devfreq_update_stats() - update the last_status pointer in struct devfreq + * @df: the devfreq instance whose status needs updating + * + * Governors are recommended to use this function along with last_status, + * which allows other entities to reuse the last_status without affecting + * the values fetched later by governors. + */ +static inline int devfreq_update_stats(struct devfreq *df) +{ + return df->profile->get_dev_status(df->dev.parent, &df->last_status); +} + #if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) /** * struct devfreq_simple_ondemand_data - void *data fed to struct devfreq @@ -289,6 +306,11 @@ static inline void devm_devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq) { } + +static inline int devfreq_update_stats(struct devfreq *df) +{ + return -EINVAL; +} #endif /* CONFIG_PM_DEVFREQ */ #endif /* __LINUX_DEVFREQ_H__ */ diff --git a/include/linux/init_task.h b/include/linux/init_task.h index d0b380ee7d67..e38681f4912d 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -25,6 +25,13 @@ extern struct files_struct init_files; extern struct fs_struct init_fs; +#ifdef CONFIG_CGROUPS +#define INIT_GROUP_RWSEM(sig) \ + .group_rwsem = __RWSEM_INITIALIZER(sig.group_rwsem), +#else +#define INIT_GROUP_RWSEM(sig) +#endif + #ifdef CONFIG_CPUSETS #define INIT_CPUSET_SEQ(tsk) \ .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq), @@ -57,6 +64,7 @@ extern struct fs_struct init_fs; INIT_PREV_CPUTIME(sig) \ .cred_guard_mutex = \ __MUTEX_INITIALIZER(sig.cred_guard_mutex), \ + INIT_GROUP_RWSEM(sig) \ } extern struct nsproxy init_nsproxy; diff --git a/include/linux/irq.h b/include/linux/irq.h index 6f8b34066442..11bf09288ddb 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -110,8 +110,8 @@ enum { /* * Return value for chip->irq_set_affinity() * - * IRQ_SET_MASK_OK - OK, core updates irq_data.affinity - * IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity + * IRQ_SET_MASK_OK - OK, core updates irq_common_data.affinity + * IRQ_SET_MASK_NOCPY - OK, chip did update irq_common_data.affinity * IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to * support stacked irqchips, which indicates skipping * all descendent irqchips. @@ -129,9 +129,19 @@ struct irq_domain; * struct irq_common_data - per irq data shared by all irqchips * @state_use_accessors: status information for irq chip functions. * Use accessor functions to deal with it + * @node: node index useful for balancing + * @handler_data: per-IRQ data for the irq_chip methods + * @affinity: IRQ affinity on SMP + * @msi_desc: MSI descriptor */ struct irq_common_data { unsigned int state_use_accessors; +#ifdef CONFIG_NUMA + unsigned int node; +#endif + void *handler_data; + struct msi_desc *msi_desc; + cpumask_var_t affinity; }; /** @@ -139,38 +149,26 @@ struct irq_common_data { * @mask: precomputed bitmask for accessing the chip registers * @irq: interrupt number * @hwirq: hardware interrupt number, local to the interrupt domain - * @node: node index useful for balancing * @common: point to data shared by all irqchips * @chip: low level interrupt hardware access * @domain: Interrupt translation domain; responsible for mapping * between hwirq number and linux irq number. * @parent_data: pointer to parent struct irq_data to support hierarchy * irq_domain - * @handler_data: per-IRQ data for the irq_chip methods * @chip_data: platform-specific per-chip private data for the chip * methods, to allow shared chip implementations - * @msi_desc: MSI descriptor - * @affinity: IRQ affinity on SMP - * - * The fields here need to overlay the ones in irq_desc until we - * cleaned up the direct references and switched everything over to - * irq_data. */ struct irq_data { u32 mask; unsigned int irq; unsigned long hwirq; - unsigned int node; struct irq_common_data *common; struct irq_chip *chip; struct irq_domain *domain; #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY struct irq_data *parent_data; #endif - void *handler_data; void *chip_data; - struct msi_desc *msi_desc; - cpumask_var_t affinity; }; /* @@ -190,6 +188,7 @@ struct irq_data { * IRQD_IRQ_MASKED - Masked state of the interrupt * IRQD_IRQ_INPROGRESS - In progress state of the interrupt * IRQD_WAKEUP_ARMED - Wakeup mode armed + * IRQD_FORWARDED_TO_VCPU - The interrupt is forwarded to a VCPU */ enum { IRQD_TRIGGER_MASK = 0xf, @@ -204,6 +203,7 @@ enum { IRQD_IRQ_MASKED = (1 << 17), IRQD_IRQ_INPROGRESS = (1 << 18), IRQD_WAKEUP_ARMED = (1 << 19), + IRQD_FORWARDED_TO_VCPU = (1 << 20), }; #define __irqd_to_state(d) ((d)->common->state_use_accessors) @@ -282,6 +282,20 @@ static inline bool irqd_is_wakeup_armed(struct irq_data *d) return __irqd_to_state(d) & IRQD_WAKEUP_ARMED; } +static inline bool irqd_is_forwarded_to_vcpu(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_FORWARDED_TO_VCPU; +} + +static inline void irqd_set_forwarded_to_vcpu(struct irq_data *d) +{ + __irqd_to_state(d) |= IRQD_FORWARDED_TO_VCPU; +} + +static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d) +{ + __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU; +} /* * Functions for chained handlers which can be enabled/disabled by the @@ -461,14 +475,14 @@ static inline int irq_set_parent(int irq, int parent_irq) * Built-in IRQ handlers for various IRQ types, * callable via desc->handle_irq() */ -extern void handle_level_irq(unsigned int irq, struct irq_desc *desc); -extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc); -extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc); -extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc); -extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc); -extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); -extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc); -extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); +extern void handle_level_irq(struct irq_desc *desc); +extern void handle_fasteoi_irq(struct irq_desc *desc); +extern void handle_edge_irq(struct irq_desc *desc); +extern void handle_edge_eoi_irq(struct irq_desc *desc); +extern void handle_simple_irq(struct irq_desc *desc); +extern void handle_percpu_irq(struct irq_desc *desc); +extern void handle_percpu_devid_irq(struct irq_desc *desc); +extern void handle_bad_irq(struct irq_desc *desc); extern void handle_nested_irq(unsigned int irq); extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg); @@ -627,23 +641,23 @@ static inline void *irq_data_get_irq_chip_data(struct irq_data *d) static inline void *irq_get_handler_data(unsigned int irq) { struct irq_data *d = irq_get_irq_data(irq); - return d ? d->handler_data : NULL; + return d ? d->common->handler_data : NULL; } static inline void *irq_data_get_irq_handler_data(struct irq_data *d) { - return d->handler_data; + return d->common->handler_data; } static inline struct msi_desc *irq_get_msi_desc(unsigned int irq) { struct irq_data *d = irq_get_irq_data(irq); - return d ? d->msi_desc : NULL; + return d ? d->common->msi_desc : NULL; } static inline struct msi_desc *irq_data_get_msi_desc(struct irq_data *d) { - return d->msi_desc; + return d->common->msi_desc; } static inline u32 irq_get_trigger_type(unsigned int irq) @@ -652,21 +666,30 @@ static inline u32 irq_get_trigger_type(unsigned int irq) return d ? irqd_get_trigger_type(d) : 0; } -static inline int irq_data_get_node(struct irq_data *d) +static inline int irq_common_data_get_node(struct irq_common_data *d) { +#ifdef CONFIG_NUMA return d->node; +#else + return 0; +#endif +} + +static inline int irq_data_get_node(struct irq_data *d) +{ + return irq_common_data_get_node(d->common); } static inline struct cpumask *irq_get_affinity_mask(int irq) { struct irq_data *d = irq_get_irq_data(irq); - return d ? d->affinity : NULL; + return d ? d->common->affinity : NULL; } static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d) { - return d->affinity; + return d->common->affinity; } unsigned int arch_dynirq_lower_bound(unsigned int from); diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 5acfa26602e1..a587a33363c7 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -98,11 +98,7 @@ extern struct irq_desc irq_desc[NR_IRQS]; static inline struct irq_desc *irq_data_to_desc(struct irq_data *data) { -#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY - return irq_to_desc(data->irq); -#else - return container_of(data, struct irq_desc, irq_data); -#endif + return container_of(data->common, struct irq_desc, irq_common_data); } static inline unsigned int irq_desc_get_irq(struct irq_desc *desc) @@ -127,23 +123,21 @@ static inline void *irq_desc_get_chip_data(struct irq_desc *desc) static inline void *irq_desc_get_handler_data(struct irq_desc *desc) { - return desc->irq_data.handler_data; + return desc->irq_common_data.handler_data; } static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc) { - return desc->irq_data.msi_desc; + return desc->irq_common_data.msi_desc; } /* * Architectures call this to let the generic IRQ layer - * handle an interrupt. If the descriptor is attached to an - * irqchip-style controller then we call the ->handle_irq() handler, - * and it calls __do_IRQ() if it's attached to an irqtype-style controller. + * handle an interrupt. */ -static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc) +static inline void generic_handle_irq_desc(struct irq_desc *desc) { - desc->handle_irq(irq, desc); + desc->handle_irq(desc); } int generic_handle_irq(unsigned int irq); @@ -176,29 +170,6 @@ static inline int irq_has_action(unsigned int irq) return irq_desc_has_action(irq_to_desc(irq)); } -/* caller has locked the irq_desc and both params are valid */ -static inline void __irq_set_handler_locked(unsigned int irq, - irq_flow_handler_t handler) -{ - struct irq_desc *desc; - - desc = irq_to_desc(irq); - desc->handle_irq = handler; -} - -/* caller has locked the irq_desc and both params are valid */ -static inline void -__irq_set_chip_handler_name_locked(unsigned int irq, struct irq_chip *chip, - irq_flow_handler_t handler, const char *name) -{ - struct irq_desc *desc; - - desc = irq_to_desc(irq); - irq_desc_get_irq_data(desc)->chip = chip; - desc->handle_irq = handler; - desc->name = name; -} - /** * irq_set_handler_locked - Set irq handler from a locked region * @data: Pointer to the irq_data structure which identifies the irq diff --git a/include/linux/irqhandler.h b/include/linux/irqhandler.h index 62d543004197..661bed0ed1f3 100644 --- a/include/linux/irqhandler.h +++ b/include/linux/irqhandler.h @@ -8,7 +8,7 @@ struct irq_desc; struct irq_data; -typedef void (*irq_flow_handler_t)(unsigned int irq, struct irq_desc *desc); +typedef void (*irq_flow_handler_t)(struct irq_desc *desc); typedef void (*irq_preflow_handler_t)(struct irq_data *data); #endif diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 88a00694eda5..2d15e3831440 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -507,6 +507,7 @@ static inline void napi_enable(struct napi_struct *n) BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); smp_mb__before_atomic(); clear_bit(NAPI_STATE_SCHED, &n->state); + clear_bit(NAPI_STATE_NPSVC, &n->state); } #ifdef CONFIG_SMP diff --git a/include/linux/phy.h b/include/linux/phy.h index 962387a192f1..4a4e3a092337 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -19,6 +19,7 @@ #include <linux/spinlock.h> #include <linux/ethtool.h> #include <linux/mii.h> +#include <linux/module.h> #include <linux/timer.h> #include <linux/workqueue.h> #include <linux/mod_devicetable.h> @@ -153,6 +154,7 @@ struct sk_buff; * PHYs should register using this structure */ struct mii_bus { + struct module *owner; const char *name; char id[MII_BUS_ID_SIZE]; void *priv; @@ -198,7 +200,8 @@ static inline struct mii_bus *mdiobus_alloc(void) return mdiobus_alloc_size(0); } -int mdiobus_register(struct mii_bus *bus); +int __mdiobus_register(struct mii_bus *bus, struct module *owner); +#define mdiobus_register(bus) __mdiobus_register(bus, THIS_MODULE) void mdiobus_unregister(struct mii_bus *bus); void mdiobus_free(struct mii_bus *bus); struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv); @@ -742,6 +745,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, struct phy_c45_device_ids *c45_ids); struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45); int phy_device_register(struct phy_device *phy); +void phy_device_remove(struct phy_device *phydev); int phy_init_hw(struct phy_device *phydev); int phy_suspend(struct phy_device *phydev); int phy_resume(struct phy_device *phydev); diff --git a/include/linux/sched.h b/include/linux/sched.h index a4ab9daa387c..b7b9501b41af 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -762,6 +762,18 @@ struct signal_struct { unsigned audit_tty_log_passwd; struct tty_audit_buf *tty_audit_buf; #endif +#ifdef CONFIG_CGROUPS + /* + * group_rwsem prevents new tasks from entering the threadgroup and + * member tasks from exiting,a more specifically, setting of + * PF_EXITING. fork and exit paths are protected with this rwsem + * using threadgroup_change_begin/end(). Users which require + * threadgroup to remain stable should use threadgroup_[un]lock() + * which also takes care of exec path. Currently, cgroup is the + * only user. + */ + struct rw_semaphore group_rwsem; +#endif oom_flags_t oom_flags; short oom_score_adj; /* OOM kill score adjustment */ diff --git a/include/linux/security.h b/include/linux/security.h index 79d85ddf8093..2f4c1f7aa7db 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -946,7 +946,7 @@ static inline int security_task_prctl(int option, unsigned long arg2, unsigned long arg4, unsigned long arg5) { - return cap_task_prctl(option, arg2, arg3, arg3, arg5); + return cap_task_prctl(option, arg2, arg3, arg4, arg5); } static inline void security_task_to_inode(struct task_struct *p, struct inode *inode) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 2738d355cdf9..2b0a30a6e31c 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -179,6 +179,9 @@ struct nf_bridge_info { u8 bridged_dnat:1; __u16 frag_max_size; struct net_device *physindev; + + /* always valid & non-NULL from FORWARD on, for physdev match */ + struct net_device *physoutdev; union { /* prerouting: detect dnat in orig/reply direction */ __be32 ipv4_daddr; @@ -189,9 +192,6 @@ struct nf_bridge_info { * skb is out in neigh layer. */ char neigh_header[8]; - - /* always valid & non-NULL from FORWARD on, for physdev match */ - struct net_device *physoutdev; }; }; #endif @@ -2707,6 +2707,9 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb, { if (skb->ip_summed == CHECKSUM_COMPLETE) skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); + else if (skb->ip_summed == CHECKSUM_PARTIAL && + skb_checksum_start_offset(skb) <= len) + skb->ip_summed = CHECKSUM_NONE; } unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 269e8afd3e2a..6b00f18f5e6b 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -34,7 +34,7 @@ extern struct bus_type spi_bus_type; /** * struct spi_statistics - statistics for spi transfers - * @clock: lock protecting this structure + * @lock: lock protecting this structure * * @messages: number of spi-messages handled * @transfers: number of spi_transfers handled diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h index 7591788e9fbf..357e44c1a46b 100644 --- a/include/linux/sunrpc/xprtsock.h +++ b/include/linux/sunrpc/xprtsock.h @@ -42,6 +42,7 @@ struct sock_xprt { /* * Connection of transports */ + unsigned long sock_state; struct delayed_work connect_worker; struct sockaddr_storage srcaddr; unsigned short srcport; @@ -76,6 +77,8 @@ struct sock_xprt { */ #define TCP_RPC_REPLY (1UL << 6) +#define XPRT_SOCK_CONNECTING 1U + #endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_XPRTSOCK_H */ diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 17292fee8686..157d366e761b 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -360,7 +360,7 @@ static inline struct thermal_zone_device * thermal_zone_of_sensor_register(struct device *dev, int id, void *data, const struct thermal_zone_of_device_ops *ops) { - return NULL; + return ERR_PTR(-ENODEV); } static inline @@ -380,6 +380,8 @@ static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev) int power_actor_get_max_power(struct thermal_cooling_device *, struct thermal_zone_device *tz, u32 *max_power); +int power_actor_get_min_power(struct thermal_cooling_device *, + struct thermal_zone_device *tz, u32 *min_power); int power_actor_set_power(struct thermal_cooling_device *, struct thermal_instance *, u32); struct thermal_zone_device *thermal_zone_device_register(const char *, int, int, @@ -415,6 +417,10 @@ static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev) static inline int power_actor_get_max_power(struct thermal_cooling_device *cdev, struct thermal_zone_device *tz, u32 *max_power) { return 0; } +static inline int power_actor_get_min_power(struct thermal_cooling_device *cdev, + struct thermal_zone_device *tz, + u32 *min_power) +{ return -ENODEV; } static inline int power_actor_set_power(struct thermal_cooling_device *cdev, struct thermal_instance *tz, u32 power) { return 0; } diff --git a/include/linux/wait.h b/include/linux/wait.h index d3d077228d4c..1e1bf9f963a9 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -147,8 +147,7 @@ __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old) typedef int wait_bit_action_f(struct wait_bit_key *); void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); -void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, int nr, - void *key); +void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key); void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr); void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); @@ -180,7 +179,7 @@ wait_queue_head_t *bit_waitqueue(void *, int); #define wake_up_poll(x, m) \ __wake_up(x, TASK_NORMAL, 1, (void *) (m)) #define wake_up_locked_poll(x, m) \ - __wake_up_locked_key((x), TASK_NORMAL, 1, (void *) (m)) + __wake_up_locked_key((x), TASK_NORMAL, (void *) (m)) #define wake_up_interruptible_poll(x, m) \ __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m)) #define wake_up_interruptible_sync_poll(x, m) \ diff --git a/include/net/flow.h b/include/net/flow.h index acd6a096250e..9b85db85f13c 100644 --- a/include/net/flow.h +++ b/include/net/flow.h @@ -35,6 +35,7 @@ struct flowi_common { #define FLOWI_FLAG_ANYSRC 0x01 #define FLOWI_FLAG_KNOWN_NH 0x02 #define FLOWI_FLAG_VRFSRC 0x04 +#define FLOWI_FLAG_SKIP_NH_OIF 0x08 __u32 flowic_secid; struct flowi_tunnel flowic_tun_key; }; diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h index 879d6e5a973b..186f3a1e1b1f 100644 --- a/include/net/inet_timewait_sock.h +++ b/include/net/inet_timewait_sock.h @@ -110,7 +110,19 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, struct inet_hashinfo *hashinfo); -void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo); +void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, + bool rearm); + +static void inline inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo) +{ + __inet_twsk_schedule(tw, timeo, false); +} + +static void inline inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo) +{ + __inet_twsk_schedule(tw, timeo, true); +} + void inet_twsk_deschedule_put(struct inet_timewait_sock *tw); void inet_twsk_purge(struct inet_hashinfo *hashinfo, diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 063d30474cf6..aaf9700fc9e5 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -275,7 +275,8 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info, struct mx6_config *mxc); int fib6_del(struct rt6_info *rt, struct nl_info *info); -void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info); +void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info, + unsigned int flags); void fib6_run_gc(unsigned long expires, struct net *net, bool force); diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h index b8529aa1dae7..fa915fa0f703 100644 --- a/include/net/ip6_tunnel.h +++ b/include/net/ip6_tunnel.h @@ -32,6 +32,12 @@ struct __ip6_tnl_parm { __be32 o_key; }; +struct ip6_tnl_dst { + seqlock_t lock; + struct dst_entry __rcu *dst; + u32 cookie; +}; + /* IPv6 tunnel */ struct ip6_tnl { struct ip6_tnl __rcu *next; /* next tunnel in list */ @@ -39,8 +45,7 @@ struct ip6_tnl { struct net *net; /* netns for packet i/o */ struct __ip6_tnl_parm parms; /* tunnel configuration parameters */ struct flowi fl; /* flowi template for xmit */ - struct dst_entry *dst_cache; /* cached dst */ - u32 dst_cookie; + struct ip6_tnl_dst __percpu *dst_cache; /* cached dst */ int err_count; unsigned long err_time; @@ -60,9 +65,11 @@ struct ipv6_tlv_tnl_enc_lim { __u8 encap_limit; /* tunnel encapsulation limit */ } __packed; -struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t); +struct dst_entry *ip6_tnl_dst_get(struct ip6_tnl *t); +int ip6_tnl_dst_init(struct ip6_tnl *t); +void ip6_tnl_dst_destroy(struct ip6_tnl *t); void ip6_tnl_dst_reset(struct ip6_tnl *t); -void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst); +void ip6_tnl_dst_set(struct ip6_tnl *t, struct dst_entry *dst); int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, const struct in6_addr *raddr); int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, @@ -79,7 +86,7 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb, struct net_device_stats *stats = &dev->stats; int pkt_len, err; - pkt_len = skb->len; + pkt_len = skb->len - skb_inner_network_offset(skb); err = ip6_local_out_sk(sk, skb); if (net_xmit_eval(err) == 0) { diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index a37d0432bebd..727d6e9a9685 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -236,8 +236,11 @@ static inline int fib_lookup(struct net *net, const struct flowi4 *flp, rcu_read_lock(); tb = fib_get_table(net, RT_TABLE_MAIN); - if (tb && !fib_table_lookup(tb, flp, res, flags | FIB_LOOKUP_NOREF)) - err = 0; + if (tb) + err = fib_table_lookup(tb, flp, res, flags | FIB_LOOKUP_NOREF); + + if (err == -EAGAIN) + err = -ENETUNREACH; rcu_read_unlock(); @@ -258,7 +261,7 @@ static inline int fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res, unsigned int flags) { struct fib_table *tb; - int err; + int err = -ENETUNREACH; flags |= FIB_LOOKUP_NOREF; if (net->ipv4.fib_has_custom_rules) @@ -268,15 +271,20 @@ static inline int fib_lookup(struct net *net, struct flowi4 *flp, res->tclassid = 0; - for (err = 0; !err; err = -ENETUNREACH) { - tb = rcu_dereference_rtnl(net->ipv4.fib_main); - if (tb && !fib_table_lookup(tb, flp, res, flags)) - break; + tb = rcu_dereference_rtnl(net->ipv4.fib_main); + if (tb) + err = fib_table_lookup(tb, flp, res, flags); + + if (!err) + goto out; + + tb = rcu_dereference_rtnl(net->ipv4.fib_default); + if (tb) + err = fib_table_lookup(tb, flp, res, flags); - tb = rcu_dereference_rtnl(net->ipv4.fib_default); - if (tb && !fib_table_lookup(tb, flp, res, flags)) - break; - } +out: + if (err == -EAGAIN) + err = -ENETUNREACH; rcu_read_unlock(); diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 9a6a3ba888e8..f6dafec9102c 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -276,6 +276,8 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto); int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, __be32 src, __be32 dst, u8 proto, u8 tos, u8 ttl, __be16 df, bool xnet); +struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md, + gfp_t flags); struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum, int gso_type_mask); diff --git a/include/net/route.h b/include/net/route.h index cc61cb95f059..f46af256880c 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -255,7 +255,7 @@ static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst, __be32 flow_flags |= FLOWI_FLAG_ANYSRC; if (netif_index_is_vrf(sock_net(sk), oif)) - flow_flags |= FLOWI_FLAG_VRFSRC; + flow_flags |= FLOWI_FLAG_VRFSRC | FLOWI_FLAG_SKIP_NH_OIF; flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE, protocol, flow_flags, dst, src, dport, sport); diff --git a/include/rdma/opa_port_info.h b/include/rdma/opa_port_info.h index 391dae1931c0..a0fa975cd1c1 100644 --- a/include/rdma/opa_port_info.h +++ b/include/rdma/opa_port_info.h @@ -294,8 +294,8 @@ struct opa_port_states { struct opa_port_state_info { struct opa_port_states port_states; - u16 link_width_downgrade_tx_active; - u16 link_width_downgrade_rx_active; + __be16 link_width_downgrade_tx_active; + __be16 link_width_downgrade_rx_active; }; struct opa_port_info { diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index ac9bf1c0e42d..5f48754dc36a 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -730,6 +730,7 @@ struct se_device { #define DF_EMULATED_VPD_UNIT_SERIAL 0x00000004 #define DF_USING_UDEV_PATH 0x00000008 #define DF_USING_ALIAS 0x00000010 +#define DF_READ_ONLY 0x00000020 /* Physical device queue depth */ u32 queue_depth; /* Used for SPC-2 reservations enforce of ISIDs */ diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index 8da542a2874d..ee124009e12a 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -709,17 +709,19 @@ __SYSCALL(__NR_memfd_create, sys_memfd_create) __SYSCALL(__NR_bpf, sys_bpf) #define __NR_execveat 281 __SC_COMP(__NR_execveat, sys_execveat, compat_sys_execveat) -#define __NR_membarrier 282 +#define __NR_userfaultfd 282 +__SYSCALL(__NR_userfaultfd, sys_userfaultfd) +#define __NR_membarrier 283 __SYSCALL(__NR_membarrier, sys_membarrier) #undef __NR_syscalls -#define __NR_syscalls 283 +#define __NR_syscalls 284 /* * All syscalls below here should go away really, * these are provided for both review and as a porting * help for the C library version. -* + * * Last chance: are any of these important enough to * enable by default? */ diff --git a/include/uapi/linux/lwtunnel.h b/include/uapi/linux/lwtunnel.h index 34141a5dfe74..f8b01887a495 100644 --- a/include/uapi/linux/lwtunnel.h +++ b/include/uapi/linux/lwtunnel.h @@ -21,8 +21,6 @@ enum lwtunnel_ip_t { LWTUNNEL_IP_SRC, LWTUNNEL_IP_TTL, LWTUNNEL_IP_TOS, - LWTUNNEL_IP_SPORT, - LWTUNNEL_IP_DPORT, LWTUNNEL_IP_FLAGS, __LWTUNNEL_IP_MAX, }; @@ -36,8 +34,6 @@ enum lwtunnel_ip6_t { LWTUNNEL_IP6_SRC, LWTUNNEL_IP6_HOPLIMIT, LWTUNNEL_IP6_TC, - LWTUNNEL_IP6_SPORT, - LWTUNNEL_IP6_DPORT, LWTUNNEL_IP6_FLAGS, __LWTUNNEL_IP6_MAX, }; |