Merge v6.18.7
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
2
Makefile
2
Makefile
@@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 18
|
||||
SUBLEVEL = 6
|
||||
SUBLEVEL = 7
|
||||
EXTRAVERSION =
|
||||
NAME = Baby Opossum Posse
|
||||
|
||||
|
||||
@@ -131,6 +131,7 @@
|
||||
reg-names = "main", "isr0";
|
||||
|
||||
interrupt-controller;
|
||||
#address-cells = <0>;
|
||||
#interrupt-cells = <2>;
|
||||
interrupt-parent = <&cpuintc>;
|
||||
interrupts = <2>;
|
||||
@@ -149,6 +150,7 @@
|
||||
reg-names = "main", "isr0";
|
||||
|
||||
interrupt-controller;
|
||||
#address-cells = <0>;
|
||||
#interrupt-cells = <2>;
|
||||
interrupt-parent = <&cpuintc>;
|
||||
interrupts = <4>;
|
||||
@@ -164,6 +166,7 @@
|
||||
compatible = "loongson,ls2k0500-eiointc";
|
||||
reg = <0x0 0x1fe11600 0x0 0xea00>;
|
||||
interrupt-controller;
|
||||
#address-cells = <0>;
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-parent = <&cpuintc>;
|
||||
interrupts = <3>;
|
||||
|
||||
@@ -46,7 +46,7 @@
|
||||
};
|
||||
|
||||
/* i2c of the dvi eeprom edid */
|
||||
i2c-gpio-0 {
|
||||
i2c-0 {
|
||||
compatible = "i2c-gpio";
|
||||
scl-gpios = <&gpio0 0 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
|
||||
sda-gpios = <&gpio0 1 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
|
||||
@@ -57,7 +57,7 @@
|
||||
};
|
||||
|
||||
/* i2c of the eeprom edid */
|
||||
i2c-gpio-1 {
|
||||
i2c-1 {
|
||||
compatible = "i2c-gpio";
|
||||
scl-gpios = <&gpio0 33 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
|
||||
sda-gpios = <&gpio0 32 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
|
||||
@@ -114,6 +114,7 @@
|
||||
<0x0 0x1fe01140 0x0 0x8>;
|
||||
reg-names = "main", "isr0", "isr1";
|
||||
interrupt-controller;
|
||||
#address-cells = <0>;
|
||||
#interrupt-cells = <2>;
|
||||
interrupt-parent = <&cpuintc>;
|
||||
interrupts = <2>;
|
||||
@@ -131,6 +132,7 @@
|
||||
<0x0 0x1fe01148 0x0 0x8>;
|
||||
reg-names = "main", "isr0", "isr1";
|
||||
interrupt-controller;
|
||||
#address-cells = <0>;
|
||||
#interrupt-cells = <2>;
|
||||
interrupt-parent = <&cpuintc>;
|
||||
interrupts = <3>;
|
||||
@@ -437,54 +439,47 @@
|
||||
|
||||
gmac0: ethernet@3,0 {
|
||||
reg = <0x1800 0x0 0x0 0x0 0x0>;
|
||||
interrupt-parent = <&liointc0>;
|
||||
interrupts = <12 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<13 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupts-extended = <&liointc0 12 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<&liointc0 13 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-names = "macirq", "eth_lpi";
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
gmac1: ethernet@3,1 {
|
||||
reg = <0x1900 0x0 0x0 0x0 0x0>;
|
||||
interrupt-parent = <&liointc0>;
|
||||
interrupts = <14 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<15 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupts-extended = <&liointc0 14 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<&liointc0 15 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-names = "macirq", "eth_lpi";
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
ehci0: usb@4,1 {
|
||||
reg = <0x2100 0x0 0x0 0x0 0x0>;
|
||||
interrupt-parent = <&liointc1>;
|
||||
interrupts = <18 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupts-extended = <&liointc1 18 IRQ_TYPE_LEVEL_HIGH>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
ohci0: usb@4,2 {
|
||||
reg = <0x2200 0x0 0x0 0x0 0x0>;
|
||||
interrupt-parent = <&liointc1>;
|
||||
interrupts = <19 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupts-extended = <&liointc1 19 IRQ_TYPE_LEVEL_HIGH>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
display@6,0 {
|
||||
reg = <0x3000 0x0 0x0 0x0 0x0>;
|
||||
interrupt-parent = <&liointc0>;
|
||||
interrupts = <28 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupts-extended = <&liointc0 28 IRQ_TYPE_LEVEL_HIGH>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
hda@7,0 {
|
||||
reg = <0x3800 0x0 0x0 0x0 0x0>;
|
||||
interrupt-parent = <&liointc0>;
|
||||
interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupts-extended = <&liointc0 4 IRQ_TYPE_LEVEL_HIGH>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
sata: sata@8,0 {
|
||||
reg = <0x4000 0x0 0x0 0x0 0x0>;
|
||||
interrupt-parent = <&liointc0>;
|
||||
interrupts = <19 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupts-extended = <&liointc0 19 IRQ_TYPE_LEVEL_HIGH>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
|
||||
@@ -126,6 +126,7 @@
|
||||
reg = <0x0 0x1fe01400 0x0 0x64>;
|
||||
|
||||
interrupt-controller;
|
||||
#address-cells = <0>;
|
||||
#interrupt-cells = <2>;
|
||||
interrupt-parent = <&cpuintc>;
|
||||
interrupts = <2>;
|
||||
@@ -140,6 +141,7 @@
|
||||
compatible = "loongson,ls2k2000-eiointc";
|
||||
reg = <0x0 0x1fe01600 0x0 0xea00>;
|
||||
interrupt-controller;
|
||||
#address-cells = <0>;
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-parent = <&cpuintc>;
|
||||
interrupts = <3>;
|
||||
@@ -149,6 +151,7 @@
|
||||
compatible = "loongson,pch-pic-1.0";
|
||||
reg = <0x0 0x10000000 0x0 0x400>;
|
||||
interrupt-controller;
|
||||
#address-cells = <0>;
|
||||
#interrupt-cells = <2>;
|
||||
loongson,pic-base-vec = <0>;
|
||||
interrupt-parent = <&eiointc>;
|
||||
@@ -291,65 +294,57 @@
|
||||
|
||||
gmac0: ethernet@3,0 {
|
||||
reg = <0x1800 0x0 0x0 0x0 0x0>;
|
||||
interrupts = <12 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<13 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupts-extended = <&pic 12 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<&pic 13 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-names = "macirq", "eth_lpi";
|
||||
interrupt-parent = <&pic>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
gmac1: ethernet@3,1 {
|
||||
reg = <0x1900 0x0 0x0 0x0 0x0>;
|
||||
interrupts = <14 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<15 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupts-extended = <&pic 14 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<&pic 15 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-names = "macirq", "eth_lpi";
|
||||
interrupt-parent = <&pic>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
gmac2: ethernet@3,2 {
|
||||
reg = <0x1a00 0x0 0x0 0x0 0x0>;
|
||||
interrupts = <17 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<18 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupts-extended = <&pic 17 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<&pic 18 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-names = "macirq", "eth_lpi";
|
||||
interrupt-parent = <&pic>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
xhci0: usb@4,0 {
|
||||
reg = <0x2000 0x0 0x0 0x0 0x0>;
|
||||
interrupts = <48 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-parent = <&pic>;
|
||||
interrupts-extended = <&pic 48 IRQ_TYPE_LEVEL_HIGH>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
xhci1: usb@19,0 {
|
||||
reg = <0xc800 0x0 0x0 0x0 0x0>;
|
||||
interrupts = <22 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-parent = <&pic>;
|
||||
interrupts-extended = <&pic 22 IRQ_TYPE_LEVEL_HIGH>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
display@6,1 {
|
||||
reg = <0x3100 0x0 0x0 0x0 0x0>;
|
||||
interrupts = <28 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-parent = <&pic>;
|
||||
interrupts-extended = <&pic 28 IRQ_TYPE_LEVEL_HIGH>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
i2s@7,0 {
|
||||
reg = <0x3800 0x0 0x0 0x0 0x0>;
|
||||
interrupts = <78 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<79 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupts-extended = <&pic 78 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<&pic 79 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-names = "tx", "rx";
|
||||
interrupt-parent = <&pic>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
sata: sata@8,0 {
|
||||
reg = <0x4000 0x0 0x0 0x0 0x0>;
|
||||
interrupts = <16 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-parent = <&pic>;
|
||||
interrupts-extended = <&pic 16 IRQ_TYPE_LEVEL_HIGH>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
|
||||
@@ -626,6 +626,18 @@ static const struct loongarch_perf_event *loongarch_pmu_map_cache_event(u64 conf
|
||||
return pev;
|
||||
}
|
||||
|
||||
static inline bool loongarch_pmu_event_requires_counter(const struct perf_event *event)
|
||||
{
|
||||
switch (event->attr.type) {
|
||||
case PERF_TYPE_HARDWARE:
|
||||
case PERF_TYPE_HW_CACHE:
|
||||
case PERF_TYPE_RAW:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static int validate_group(struct perf_event *event)
|
||||
{
|
||||
struct cpu_hw_events fake_cpuc;
|
||||
@@ -633,15 +645,18 @@ static int validate_group(struct perf_event *event)
|
||||
|
||||
memset(&fake_cpuc, 0, sizeof(fake_cpuc));
|
||||
|
||||
if (loongarch_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
|
||||
if (loongarch_pmu_event_requires_counter(leader) &&
|
||||
loongarch_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
|
||||
return -EINVAL;
|
||||
|
||||
for_each_sibling_event(sibling, leader) {
|
||||
if (loongarch_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
|
||||
if (loongarch_pmu_event_requires_counter(sibling) &&
|
||||
loongarch_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (loongarch_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
|
||||
if (loongarch_pmu_event_requires_counter(event) &&
|
||||
loongarch_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -679,6 +679,7 @@ static void kvm_eiointc_destroy(struct kvm_device *dev)
|
||||
kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &eiointc->device);
|
||||
kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &eiointc->device_vext);
|
||||
kfree(eiointc);
|
||||
kfree(dev);
|
||||
}
|
||||
|
||||
static struct kvm_device_ops kvm_eiointc_dev_ops = {
|
||||
|
||||
@@ -459,6 +459,7 @@ static void kvm_ipi_destroy(struct kvm_device *dev)
|
||||
ipi = kvm->arch.ipi;
|
||||
kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &ipi->device);
|
||||
kfree(ipi);
|
||||
kfree(dev);
|
||||
}
|
||||
|
||||
static struct kvm_device_ops kvm_ipi_dev_ops = {
|
||||
|
||||
@@ -475,6 +475,7 @@ static void kvm_pch_pic_destroy(struct kvm_device *dev)
|
||||
/* unregister pch pic device and free it's memory */
|
||||
kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &s->device);
|
||||
kfree(s);
|
||||
kfree(dev);
|
||||
}
|
||||
|
||||
static struct kvm_device_ops kvm_pch_pic_dev_ops = {
|
||||
|
||||
@@ -425,6 +425,28 @@ void __init paging_init(void)
|
||||
static struct kcore_list kcore_kseg0;
|
||||
#endif
|
||||
|
||||
static inline void __init highmem_init(void)
|
||||
{
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
unsigned long tmp;
|
||||
|
||||
/*
|
||||
* If CPU cannot support HIGHMEM discard the memory above highstart_pfn
|
||||
*/
|
||||
if (cpu_has_dc_aliases) {
|
||||
memblock_remove(PFN_PHYS(highstart_pfn), -1);
|
||||
return;
|
||||
}
|
||||
|
||||
for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
|
||||
struct page *page = pfn_to_page(tmp);
|
||||
|
||||
if (!memblock_is_memory(PFN_PHYS(tmp)))
|
||||
SetPageReserved(page);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void __init arch_mm_preinit(void)
|
||||
{
|
||||
/*
|
||||
@@ -435,6 +457,7 @@ void __init arch_mm_preinit(void)
|
||||
|
||||
maar_init();
|
||||
setup_zero_pages(); /* Setup zeroed pages. */
|
||||
highmem_init();
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
if ((unsigned long) &_text > (unsigned long) CKSEG0)
|
||||
|
||||
@@ -279,6 +279,7 @@ config X86
|
||||
select HAVE_PCI
|
||||
select HAVE_PERF_REGS
|
||||
select HAVE_PERF_USER_STACK_DUMP
|
||||
select ASYNC_KERNEL_PGTABLE_FREE if IOMMU_SVA
|
||||
select MMU_GATHER_RCU_TABLE_FREE
|
||||
select MMU_GATHER_MERGE_VMAS
|
||||
select HAVE_POSIX_CPU_TIMERS_TASK_WORK
|
||||
|
||||
@@ -818,7 +818,8 @@ static __init bool get_mem_config(void)
|
||||
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
|
||||
return __get_mem_config_intel(&hw_res->r_resctrl);
|
||||
else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
|
||||
else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
|
||||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
|
||||
return __rdt_get_mem_config_amd(&hw_res->r_resctrl);
|
||||
|
||||
return false;
|
||||
@@ -978,7 +979,8 @@ static __init void rdt_init_res_defs(void)
|
||||
{
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
|
||||
rdt_init_res_defs_intel();
|
||||
else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
|
||||
else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
|
||||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
|
||||
rdt_init_res_defs_amd();
|
||||
}
|
||||
|
||||
@@ -1010,8 +1012,19 @@ void resctrl_cpu_detect(struct cpuinfo_x86 *c)
|
||||
c->x86_cache_occ_scale = ebx;
|
||||
c->x86_cache_mbm_width_offset = eax & 0xff;
|
||||
|
||||
if (c->x86_vendor == X86_VENDOR_AMD && !c->x86_cache_mbm_width_offset)
|
||||
c->x86_cache_mbm_width_offset = MBM_CNTR_WIDTH_OFFSET_AMD;
|
||||
if (!c->x86_cache_mbm_width_offset) {
|
||||
switch (c->x86_vendor) {
|
||||
case X86_VENDOR_AMD:
|
||||
c->x86_cache_mbm_width_offset = MBM_CNTR_WIDTH_OFFSET_AMD;
|
||||
break;
|
||||
case X86_VENDOR_HYGON:
|
||||
c->x86_cache_mbm_width_offset = MBM_CNTR_WIDTH_OFFSET_HYGON;
|
||||
break;
|
||||
default:
|
||||
/* Leave c->x86_cache_mbm_width_offset as 0 */
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -14,6 +14,9 @@
|
||||
|
||||
#define MBM_CNTR_WIDTH_OFFSET_AMD 20
|
||||
|
||||
/* Hygon MBM counter width as an offset from MBM_CNTR_WIDTH_BASE */
|
||||
#define MBM_CNTR_WIDTH_OFFSET_HYGON 8
|
||||
|
||||
#define RMID_VAL_ERROR BIT_ULL(63)
|
||||
|
||||
#define RMID_VAL_UNAVAIL BIT_ULL(62)
|
||||
|
||||
@@ -318,10 +318,29 @@ EXPORT_SYMBOL_GPL(fpu_enable_guest_xfd_features);
|
||||
#ifdef CONFIG_X86_64
|
||||
void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd)
|
||||
{
|
||||
struct fpstate *fpstate = guest_fpu->fpstate;
|
||||
|
||||
fpregs_lock();
|
||||
guest_fpu->fpstate->xfd = xfd;
|
||||
if (guest_fpu->fpstate->in_use)
|
||||
xfd_update_state(guest_fpu->fpstate);
|
||||
|
||||
/*
|
||||
* KVM's guest ABI is that setting XFD[i]=1 *can* immediately revert the
|
||||
* save state to its initial configuration. Likewise, KVM_GET_XSAVE does
|
||||
* the same as XSAVE and returns XSTATE_BV[i]=0 whenever XFD[i]=1.
|
||||
*
|
||||
* If the guest's FPU state is in hardware, just update XFD: the XSAVE
|
||||
* in fpu_swap_kvm_fpstate will clear XSTATE_BV[i] whenever XFD[i]=1.
|
||||
*
|
||||
* If however the guest's FPU state is NOT resident in hardware, clear
|
||||
* disabled components in XSTATE_BV now, or a subsequent XRSTOR will
|
||||
* attempt to load disabled components and generate #NM _in the host_.
|
||||
*/
|
||||
if (xfd && test_thread_flag(TIF_NEED_FPU_LOAD))
|
||||
fpstate->regs.xsave.header.xfeatures &= ~xfd;
|
||||
|
||||
fpstate->xfd = xfd;
|
||||
if (fpstate->in_use)
|
||||
xfd_update_state(fpstate);
|
||||
|
||||
fpregs_unlock();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fpu_update_guest_xfd);
|
||||
@@ -429,6 +448,13 @@ int fpu_copy_uabi_to_guest_fpstate(struct fpu_guest *gfpu, const void *buf,
|
||||
if (ustate->xsave.header.xfeatures & ~xcr0)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Disabled features must be in their initial state, otherwise XRSTOR
|
||||
* causes an exception.
|
||||
*/
|
||||
if (WARN_ON_ONCE(ustate->xsave.header.xfeatures & kstate->xfd))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Nullify @vpkru to preserve its current value if PKRU's bit isn't set
|
||||
* in the header. KVM's odd ABI is to leave PKRU untouched in this
|
||||
|
||||
@@ -5842,9 +5842,18 @@ static int kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
|
||||
static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
|
||||
struct kvm_xsave *guest_xsave)
|
||||
{
|
||||
union fpregs_state *xstate = (union fpregs_state *)guest_xsave->region;
|
||||
|
||||
if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
|
||||
return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0;
|
||||
|
||||
/*
|
||||
* For backwards compatibility, do not expect disabled features to be in
|
||||
* their initial state. XSTATE_BV[i] must still be cleared whenever
|
||||
* XFD[i]=1, or XRSTOR would cause a #NM.
|
||||
*/
|
||||
xstate->xsave.header.xfeatures &= ~vcpu->arch.guest_fpu.fpstate->xfd;
|
||||
|
||||
return fpu_copy_uabi_to_guest_fpstate(&vcpu->arch.guest_fpu,
|
||||
guest_xsave->region,
|
||||
kvm_caps.supported_xcr0,
|
||||
|
||||
@@ -1031,7 +1031,7 @@ static void __meminit free_pagetable(struct page *page, int order)
|
||||
free_reserved_pages(page, nr_pages);
|
||||
#endif
|
||||
} else {
|
||||
__free_pages(page, order);
|
||||
pagetable_free(page_ptdesc(page));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -115,12 +115,12 @@ void __init kernel_randomize_memory(void)
|
||||
|
||||
/*
|
||||
* Adapt physical memory region size based on available memory,
|
||||
* except when CONFIG_PCI_P2PDMA is enabled. P2PDMA exposes the
|
||||
* device BAR space assuming the direct map space is large enough
|
||||
* for creating a ZONE_DEVICE mapping in the direct map corresponding
|
||||
* to the physical BAR address.
|
||||
* except when CONFIG_ZONE_DEVICE is enabled. ZONE_DEVICE wants to map
|
||||
* any physical address into the direct-map. KASLR wants to reliably
|
||||
* steal some physical address bits. Those design choices are in direct
|
||||
* conflict.
|
||||
*/
|
||||
if (!IS_ENABLED(CONFIG_PCI_P2PDMA) && (memory_tb < kaslr_regions[0].size_tb))
|
||||
if (!IS_ENABLED(CONFIG_ZONE_DEVICE) && (memory_tb < kaslr_regions[0].size_tb))
|
||||
kaslr_regions[0].size_tb = memory_tb;
|
||||
|
||||
/*
|
||||
|
||||
@@ -429,7 +429,7 @@ static void cpa_collapse_large_pages(struct cpa_data *cpa)
|
||||
|
||||
list_for_each_entry_safe(ptdesc, tmp, &pgtables, pt_list) {
|
||||
list_del(&ptdesc->pt_list);
|
||||
__free_page(ptdesc_page(ptdesc));
|
||||
pagetable_free(ptdesc);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -729,7 +729,7 @@ int pmd_clear_huge(pmd_t *pmd)
|
||||
int pud_free_pmd_page(pud_t *pud, unsigned long addr)
|
||||
{
|
||||
pmd_t *pmd, *pmd_sv;
|
||||
pte_t *pte;
|
||||
struct ptdesc *pt;
|
||||
int i;
|
||||
|
||||
pmd = pud_pgtable(*pud);
|
||||
@@ -750,8 +750,8 @@ int pud_free_pmd_page(pud_t *pud, unsigned long addr)
|
||||
|
||||
for (i = 0; i < PTRS_PER_PMD; i++) {
|
||||
if (!pmd_none(pmd_sv[i])) {
|
||||
pte = (pte_t *)pmd_page_vaddr(pmd_sv[i]);
|
||||
pte_free_kernel(&init_mm, pte);
|
||||
pt = page_ptdesc(pmd_page(pmd_sv[i]));
|
||||
pagetable_dtor_free(pt);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -772,15 +772,15 @@ int pud_free_pmd_page(pud_t *pud, unsigned long addr)
|
||||
*/
|
||||
int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
|
||||
{
|
||||
pte_t *pte;
|
||||
struct ptdesc *pt;
|
||||
|
||||
pte = (pte_t *)pmd_page_vaddr(*pmd);
|
||||
pt = page_ptdesc(pmd_page(*pmd));
|
||||
pmd_clear(pmd);
|
||||
|
||||
/* INVLPG to clear all paging-structure caches */
|
||||
flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
|
||||
|
||||
pte_free_kernel(&init_mm, pte);
|
||||
pagetable_dtor_free(pt);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -142,7 +142,7 @@ bool bio_integrity_prep(struct bio *bio)
|
||||
return true;
|
||||
set_flags = false;
|
||||
gfp |= __GFP_ZERO;
|
||||
} else if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE)
|
||||
} else if (bi->metadata_size > bi->pi_tuple_size)
|
||||
gfp |= __GFP_ZERO;
|
||||
break;
|
||||
default:
|
||||
|
||||
@@ -665,12 +665,22 @@ static void nullb_add_fault_config(struct nullb_device *dev)
|
||||
configfs_add_default_group(&dev->init_hctx_fault_config.group, &dev->group);
|
||||
}
|
||||
|
||||
static void nullb_del_fault_config(struct nullb_device *dev)
|
||||
{
|
||||
config_item_put(&dev->init_hctx_fault_config.group.cg_item);
|
||||
config_item_put(&dev->requeue_config.group.cg_item);
|
||||
config_item_put(&dev->timeout_config.group.cg_item);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static void nullb_add_fault_config(struct nullb_device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
static void nullb_del_fault_config(struct nullb_device *dev)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct
|
||||
@@ -702,7 +712,7 @@ nullb_group_drop_item(struct config_group *group, struct config_item *item)
|
||||
null_del_dev(dev->nullb);
|
||||
mutex_unlock(&lock);
|
||||
}
|
||||
|
||||
nullb_del_fault_config(dev);
|
||||
config_item_put(item);
|
||||
}
|
||||
|
||||
|
||||
@@ -403,7 +403,7 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
|
||||
* is not set.
|
||||
*/
|
||||
if (cxled->part < 0)
|
||||
for (int i = 0; cxlds->nr_partitions; i++)
|
||||
for (int i = 0; i < cxlds->nr_partitions; i++)
|
||||
if (resource_contains(&cxlds->part[i].res, res)) {
|
||||
cxled->part = i;
|
||||
break;
|
||||
|
||||
@@ -1591,7 +1591,7 @@ static int update_decoder_targets(struct device *dev, void *data)
|
||||
cxlsd->target[i] = dport;
|
||||
dev_dbg(dev, "dport%d found in target list, index %d\n",
|
||||
dport->port_id, i);
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -67,14 +67,16 @@ struct dev_dax_range {
|
||||
/**
|
||||
* struct dev_dax - instance data for a subdivision of a dax region, and
|
||||
* data while the device is activated in the driver.
|
||||
* @region - parent region
|
||||
* @dax_dev - core dax functionality
|
||||
* @region: parent region
|
||||
* @dax_dev: core dax functionality
|
||||
* @align: alignment of this instance
|
||||
* @target_node: effective numa node if dev_dax memory range is onlined
|
||||
* @dyn_id: is this a dynamic or statically created instance
|
||||
* @id: ida allocated id when the dax_region is not static
|
||||
* @ida: mapping id allocator
|
||||
* @dev - device core
|
||||
* @pgmap - pgmap for memmap setup / lifetime (driver owned)
|
||||
* @dev: device core
|
||||
* @pgmap: pgmap for memmap setup / lifetime (driver owned)
|
||||
* @memmap_on_memory: allow kmem to put the memmap in the memory
|
||||
* @nr_range: size of @ranges
|
||||
* @ranges: range tuples of memory used
|
||||
*/
|
||||
|
||||
@@ -936,6 +936,7 @@ static void admac_remove(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
static const struct of_device_id admac_of_match[] = {
|
||||
{ .compatible = "apple,t8103-admac", },
|
||||
{ .compatible = "apple,admac", },
|
||||
{ }
|
||||
};
|
||||
|
||||
@@ -1765,6 +1765,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
|
||||
static void atc_free_chan_resources(struct dma_chan *chan)
|
||||
{
|
||||
struct at_dma_chan *atchan = to_at_dma_chan(chan);
|
||||
struct at_dma_slave *atslave;
|
||||
|
||||
BUG_ON(atc_chan_is_enabled(atchan));
|
||||
|
||||
@@ -1774,8 +1775,12 @@ static void atc_free_chan_resources(struct dma_chan *chan)
|
||||
/*
|
||||
* Free atslave allocated in at_dma_xlate()
|
||||
*/
|
||||
kfree(chan->private);
|
||||
chan->private = NULL;
|
||||
atslave = chan->private;
|
||||
if (atslave) {
|
||||
put_device(atslave->dma_dev);
|
||||
kfree(atslave);
|
||||
chan->private = NULL;
|
||||
}
|
||||
|
||||
dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
|
||||
}
|
||||
|
||||
@@ -1699,7 +1699,7 @@ static int sba_probe(struct platform_device *pdev)
|
||||
/* Prealloc channel resource */
|
||||
ret = sba_prealloc_channel_resources(sba);
|
||||
if (ret)
|
||||
goto fail_free_mchan;
|
||||
goto fail_put_mbox;
|
||||
|
||||
/* Check availability of debugfs */
|
||||
if (!debugfs_initialized())
|
||||
@@ -1729,6 +1729,8 @@ skip_debugfs:
|
||||
fail_free_resources:
|
||||
debugfs_remove_recursive(sba->root);
|
||||
sba_freeup_channel_resources(sba);
|
||||
fail_put_mbox:
|
||||
put_device(sba->mbox_dev);
|
||||
fail_free_mchan:
|
||||
mbox_free_channel(sba->mchan);
|
||||
return ret;
|
||||
@@ -1744,6 +1746,8 @@ static void sba_remove(struct platform_device *pdev)
|
||||
|
||||
sba_freeup_channel_resources(sba);
|
||||
|
||||
put_device(sba->mbox_dev);
|
||||
|
||||
mbox_free_channel(sba->mchan);
|
||||
}
|
||||
|
||||
|
||||
@@ -102,11 +102,11 @@ static void *cv1800_dmamux_route_allocate(struct of_phandle_args *dma_spec,
|
||||
struct llist_node *node;
|
||||
unsigned long flags;
|
||||
unsigned int chid, devid, cpuid;
|
||||
int ret;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (dma_spec->args_count != DMAMUX_NCELLS) {
|
||||
dev_err(&pdev->dev, "invalid number of dma mux args\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
goto err_put_pdev;
|
||||
}
|
||||
|
||||
devid = dma_spec->args[0];
|
||||
@@ -115,18 +115,18 @@ static void *cv1800_dmamux_route_allocate(struct of_phandle_args *dma_spec,
|
||||
|
||||
if (devid > MAX_DMA_MAPPING_ID) {
|
||||
dev_err(&pdev->dev, "invalid device id: %u\n", devid);
|
||||
return ERR_PTR(-EINVAL);
|
||||
goto err_put_pdev;
|
||||
}
|
||||
|
||||
if (cpuid > MAX_DMA_CPU_ID) {
|
||||
dev_err(&pdev->dev, "invalid cpu id: %u\n", cpuid);
|
||||
return ERR_PTR(-EINVAL);
|
||||
goto err_put_pdev;
|
||||
}
|
||||
|
||||
dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
|
||||
if (!dma_spec->np) {
|
||||
dev_err(&pdev->dev, "can't get dma master\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
goto err_put_pdev;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&dmamux->lock, flags);
|
||||
@@ -136,8 +136,6 @@ static void *cv1800_dmamux_route_allocate(struct of_phandle_args *dma_spec,
|
||||
if (map->peripheral == devid && map->cpu == cpuid)
|
||||
goto found;
|
||||
}
|
||||
|
||||
ret = -EINVAL;
|
||||
goto failed;
|
||||
} else {
|
||||
node = llist_del_first(&dmamux->free_maps);
|
||||
@@ -171,12 +169,17 @@ found:
|
||||
dev_dbg(&pdev->dev, "register channel %u for req %u (cpu %u)\n",
|
||||
chid, devid, cpuid);
|
||||
|
||||
put_device(&pdev->dev);
|
||||
|
||||
return map;
|
||||
|
||||
failed:
|
||||
spin_unlock_irqrestore(&dmamux->lock, flags);
|
||||
of_node_put(dma_spec->np);
|
||||
dev_err(&pdev->dev, "errno %d\n", ret);
|
||||
err_put_pdev:
|
||||
put_device(&pdev->dev);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
|
||||
@@ -90,7 +90,7 @@ static void *rzn1_dmamux_route_allocate(struct of_phandle_args *dma_spec,
|
||||
|
||||
if (test_and_set_bit(map->req_idx, dmamux->used_chans)) {
|
||||
ret = -EBUSY;
|
||||
goto free_map;
|
||||
goto put_dma_spec_np;
|
||||
}
|
||||
|
||||
mask = BIT(map->req_idx);
|
||||
@@ -103,6 +103,8 @@ static void *rzn1_dmamux_route_allocate(struct of_phandle_args *dma_spec,
|
||||
|
||||
clear_bitmap:
|
||||
clear_bit(map->req_idx, dmamux->used_chans);
|
||||
put_dma_spec_np:
|
||||
of_node_put(dma_spec->np);
|
||||
free_map:
|
||||
kfree(map);
|
||||
put_device:
|
||||
|
||||
@@ -852,6 +852,7 @@ err_errirq:
|
||||
free_irq(fsl_chan->txirq, fsl_chan);
|
||||
err_txirq:
|
||||
dma_pool_destroy(fsl_chan->tcd_pool);
|
||||
clk_disable_unprepare(fsl_chan->clk);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -20,11 +20,16 @@ static ssize_t unbind_store(struct device_driver *drv, const char *buf, size_t c
|
||||
int rc = -ENODEV;
|
||||
|
||||
dev = bus_find_device_by_name(bus, NULL, buf);
|
||||
if (dev && dev->driver) {
|
||||
if (!dev)
|
||||
return -ENODEV;
|
||||
|
||||
if (dev->driver) {
|
||||
device_driver_detach(dev);
|
||||
rc = count;
|
||||
}
|
||||
|
||||
put_device(dev);
|
||||
|
||||
return rc;
|
||||
}
|
||||
static DRIVER_ATTR_IGNORE_LOCKDEP(unbind, 0200, NULL, unbind_store);
|
||||
@@ -38,9 +43,12 @@ static ssize_t bind_store(struct device_driver *drv, const char *buf, size_t cou
|
||||
struct idxd_dev *idxd_dev;
|
||||
|
||||
dev = bus_find_device_by_name(bus, NULL, buf);
|
||||
if (!dev || dev->driver || drv != &dsa_drv.drv)
|
||||
if (!dev)
|
||||
return -ENODEV;
|
||||
|
||||
if (dev->driver || drv != &dsa_drv.drv)
|
||||
goto err_put_dev;
|
||||
|
||||
idxd_dev = confdev_to_idxd_dev(dev);
|
||||
if (is_idxd_dev(idxd_dev)) {
|
||||
alt_drv = driver_find("idxd", bus);
|
||||
@@ -53,13 +61,20 @@ static ssize_t bind_store(struct device_driver *drv, const char *buf, size_t cou
|
||||
alt_drv = driver_find("user", bus);
|
||||
}
|
||||
if (!alt_drv)
|
||||
return -ENODEV;
|
||||
goto err_put_dev;
|
||||
|
||||
rc = device_driver_attach(alt_drv, dev);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
goto err_put_dev;
|
||||
|
||||
put_device(dev);
|
||||
|
||||
return count;
|
||||
|
||||
err_put_dev:
|
||||
put_device(dev);
|
||||
|
||||
return rc;
|
||||
}
|
||||
static DRIVER_ATTR_IGNORE_LOCKDEP(bind, 0200, NULL, bind_store);
|
||||
|
||||
|
||||
@@ -57,30 +57,31 @@ static void *lpc18xx_dmamux_reserve(struct of_phandle_args *dma_spec,
|
||||
struct lpc18xx_dmamux_data *dmamux = platform_get_drvdata(pdev);
|
||||
unsigned long flags;
|
||||
unsigned mux;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (dma_spec->args_count != 3) {
|
||||
dev_err(&pdev->dev, "invalid number of dma mux args\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
goto err_put_pdev;
|
||||
}
|
||||
|
||||
mux = dma_spec->args[0];
|
||||
if (mux >= dmamux->dma_master_requests) {
|
||||
dev_err(&pdev->dev, "invalid mux number: %d\n",
|
||||
dma_spec->args[0]);
|
||||
return ERR_PTR(-EINVAL);
|
||||
goto err_put_pdev;
|
||||
}
|
||||
|
||||
if (dma_spec->args[1] > LPC18XX_DMAMUX_MAX_VAL) {
|
||||
dev_err(&pdev->dev, "invalid dma mux value: %d\n",
|
||||
dma_spec->args[1]);
|
||||
return ERR_PTR(-EINVAL);
|
||||
goto err_put_pdev;
|
||||
}
|
||||
|
||||
/* The of_node_put() will be done in the core for the node */
|
||||
dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
|
||||
if (!dma_spec->np) {
|
||||
dev_err(&pdev->dev, "can't get dma master\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
goto err_put_pdev;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&dmamux->lock, flags);
|
||||
@@ -89,7 +90,8 @@ static void *lpc18xx_dmamux_reserve(struct of_phandle_args *dma_spec,
|
||||
dev_err(&pdev->dev, "dma request %u busy with %u.%u\n",
|
||||
mux, mux, dmamux->muxes[mux].value);
|
||||
of_node_put(dma_spec->np);
|
||||
return ERR_PTR(-EBUSY);
|
||||
ret = -EBUSY;
|
||||
goto err_put_pdev;
|
||||
}
|
||||
|
||||
dmamux->muxes[mux].busy = true;
|
||||
@@ -106,7 +108,14 @@ static void *lpc18xx_dmamux_reserve(struct of_phandle_args *dma_spec,
|
||||
dev_dbg(&pdev->dev, "mapping dmamux %u.%u to dma request %u\n", mux,
|
||||
dmamux->muxes[mux].value, mux);
|
||||
|
||||
put_device(&pdev->dev);
|
||||
|
||||
return &dmamux->muxes[mux];
|
||||
|
||||
err_put_pdev:
|
||||
put_device(&pdev->dev);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static int lpc18xx_dmamux_probe(struct platform_device *pdev)
|
||||
|
||||
@@ -95,11 +95,12 @@ static void *lpc32xx_dmamux_reserve(struct of_phandle_args *dma_spec,
|
||||
struct lpc32xx_dmamux_data *dmamux = platform_get_drvdata(pdev);
|
||||
unsigned long flags;
|
||||
struct lpc32xx_dmamux *mux = NULL;
|
||||
int ret = -EINVAL;
|
||||
int i;
|
||||
|
||||
if (dma_spec->args_count != 3) {
|
||||
dev_err(&pdev->dev, "invalid number of dma mux args\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
goto err_put_pdev;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(lpc32xx_muxes); i++) {
|
||||
@@ -111,20 +112,20 @@ static void *lpc32xx_dmamux_reserve(struct of_phandle_args *dma_spec,
|
||||
if (!mux) {
|
||||
dev_err(&pdev->dev, "invalid mux request number: %d\n",
|
||||
dma_spec->args[0]);
|
||||
return ERR_PTR(-EINVAL);
|
||||
goto err_put_pdev;
|
||||
}
|
||||
|
||||
if (dma_spec->args[2] > 1) {
|
||||
dev_err(&pdev->dev, "invalid dma mux value: %d\n",
|
||||
dma_spec->args[1]);
|
||||
return ERR_PTR(-EINVAL);
|
||||
goto err_put_pdev;
|
||||
}
|
||||
|
||||
/* The of_node_put() will be done in the core for the node */
|
||||
dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
|
||||
if (!dma_spec->np) {
|
||||
dev_err(&pdev->dev, "can't get dma master\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
goto err_put_pdev;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&dmamux->lock, flags);
|
||||
@@ -133,7 +134,8 @@ static void *lpc32xx_dmamux_reserve(struct of_phandle_args *dma_spec,
|
||||
dev_err(dev, "dma request signal %d busy, routed to %s\n",
|
||||
mux->signal, mux->muxval ? mux->name_sel1 : mux->name_sel1);
|
||||
of_node_put(dma_spec->np);
|
||||
return ERR_PTR(-EBUSY);
|
||||
ret = -EBUSY;
|
||||
goto err_put_pdev;
|
||||
}
|
||||
|
||||
mux->busy = true;
|
||||
@@ -148,7 +150,14 @@ static void *lpc32xx_dmamux_reserve(struct of_phandle_args *dma_spec,
|
||||
dev_dbg(dev, "dma request signal %d routed to %s\n",
|
||||
mux->signal, mux->muxval ? mux->name_sel1 : mux->name_sel1);
|
||||
|
||||
put_device(&pdev->dev);
|
||||
|
||||
return mux;
|
||||
|
||||
err_put_pdev:
|
||||
put_device(&pdev->dev);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static int lpc32xx_dmamux_probe(struct platform_device *pdev)
|
||||
|
||||
@@ -152,8 +152,8 @@ struct mmp_pdma_phy {
|
||||
*
|
||||
* Controller Configuration:
|
||||
* @run_bits: Control bits in DCSR register for channel start/stop
|
||||
* @dma_mask: DMA addressing capability of controller. 0 to use OF/platform
|
||||
* settings, or explicit mask like DMA_BIT_MASK(32/64)
|
||||
* @dma_width: DMA addressing width in bits (32 or 64). Determines the
|
||||
* DMA mask capability of the controller hardware.
|
||||
*/
|
||||
struct mmp_pdma_ops {
|
||||
/* Hardware Register Operations */
|
||||
@@ -173,7 +173,7 @@ struct mmp_pdma_ops {
|
||||
|
||||
/* Controller Configuration */
|
||||
u32 run_bits;
|
||||
u64 dma_mask;
|
||||
u32 dma_width;
|
||||
};
|
||||
|
||||
struct mmp_pdma_device {
|
||||
@@ -1172,7 +1172,7 @@ static const struct mmp_pdma_ops marvell_pdma_v1_ops = {
|
||||
.get_desc_src_addr = get_desc_src_addr_32,
|
||||
.get_desc_dst_addr = get_desc_dst_addr_32,
|
||||
.run_bits = (DCSR_RUN),
|
||||
.dma_mask = 0, /* let OF/platform set DMA mask */
|
||||
.dma_width = 32,
|
||||
};
|
||||
|
||||
static const struct mmp_pdma_ops spacemit_k1_pdma_ops = {
|
||||
@@ -1185,7 +1185,7 @@ static const struct mmp_pdma_ops spacemit_k1_pdma_ops = {
|
||||
.get_desc_src_addr = get_desc_src_addr_64,
|
||||
.get_desc_dst_addr = get_desc_dst_addr_64,
|
||||
.run_bits = (DCSR_RUN | DCSR_LPAEEN),
|
||||
.dma_mask = DMA_BIT_MASK(64), /* force 64-bit DMA addr capability */
|
||||
.dma_width = 64,
|
||||
};
|
||||
|
||||
static const struct of_device_id mmp_pdma_dt_ids[] = {
|
||||
@@ -1314,13 +1314,9 @@ static int mmp_pdma_probe(struct platform_device *op)
|
||||
pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
|
||||
pdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
|
||||
|
||||
/* Set DMA mask based on ops->dma_mask, or OF/platform */
|
||||
if (pdev->ops->dma_mask)
|
||||
dma_set_mask(pdev->dev, pdev->ops->dma_mask);
|
||||
else if (pdev->dev->coherent_dma_mask)
|
||||
dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask);
|
||||
else
|
||||
dma_set_mask(pdev->dev, DMA_BIT_MASK(64));
|
||||
/* Set DMA mask based on controller hardware capabilities */
|
||||
dma_set_mask_and_coherent(pdev->dev,
|
||||
DMA_BIT_MASK(pdev->ops->dma_width));
|
||||
|
||||
ret = dma_async_device_register(&pdev->device);
|
||||
if (ret) {
|
||||
|
||||
@@ -1605,14 +1605,16 @@ static int
|
||||
gpi_peripheral_config(struct dma_chan *chan, struct dma_slave_config *config)
|
||||
{
|
||||
struct gchan *gchan = to_gchan(chan);
|
||||
void *new_config;
|
||||
|
||||
if (!config->peripheral_config)
|
||||
return -EINVAL;
|
||||
|
||||
gchan->config = krealloc(gchan->config, config->peripheral_size, GFP_NOWAIT);
|
||||
if (!gchan->config)
|
||||
new_config = krealloc(gchan->config, config->peripheral_size, GFP_NOWAIT);
|
||||
if (!new_config)
|
||||
return -ENOMEM;
|
||||
|
||||
gchan->config = new_config;
|
||||
memcpy(gchan->config, config->peripheral_config, config->peripheral_size);
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -557,11 +557,16 @@ rz_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
||||
static int rz_dmac_terminate_all(struct dma_chan *chan)
|
||||
{
|
||||
struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
|
||||
struct rz_lmdesc *lmdesc = channel->lmdesc.base;
|
||||
unsigned long flags;
|
||||
unsigned int i;
|
||||
LIST_HEAD(head);
|
||||
|
||||
rz_dmac_disable_hw(channel);
|
||||
spin_lock_irqsave(&channel->vc.lock, flags);
|
||||
for (i = 0; i < DMAC_NR_LMDESC; i++)
|
||||
lmdesc[i].header = 0;
|
||||
|
||||
list_splice_tail_init(&channel->ld_active, &channel->ld_free);
|
||||
list_splice_tail_init(&channel->ld_queue, &channel->ld_free);
|
||||
vchan_get_all_descriptors(&channel->vc, &head);
|
||||
@@ -854,6 +859,13 @@ static int rz_dmac_chan_probe(struct rz_dmac *dmac,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rz_dmac_put_device(void *_dev)
|
||||
{
|
||||
struct device *dev = _dev;
|
||||
|
||||
put_device(dev);
|
||||
}
|
||||
|
||||
static int rz_dmac_parse_of_icu(struct device *dev, struct rz_dmac *dmac)
|
||||
{
|
||||
struct device_node *np = dev->of_node;
|
||||
@@ -876,6 +888,10 @@ static int rz_dmac_parse_of_icu(struct device *dev, struct rz_dmac *dmac)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ret = devm_add_action_or_reset(dev, rz_dmac_put_device, &dmac->icu.pdev->dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dmac_index = args.args[0];
|
||||
if (dmac_index > RZV2H_MAX_DMAC_INDEX) {
|
||||
dev_err(dev, "DMAC index %u invalid.\n", dmac_index);
|
||||
@@ -1055,8 +1071,6 @@ static void rz_dmac_remove(struct platform_device *pdev)
|
||||
reset_control_assert(dmac->rstc);
|
||||
pm_runtime_put(&pdev->dev);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
platform_device_put(dmac->icu.pdev);
|
||||
}
|
||||
|
||||
static const struct of_device_id of_rz_dmac_match[] = {
|
||||
|
||||
@@ -90,23 +90,25 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
|
||||
struct stm32_dmamux_data *dmamux = platform_get_drvdata(pdev);
|
||||
struct stm32_dmamux *mux;
|
||||
u32 i, min, max;
|
||||
int ret;
|
||||
int ret = -EINVAL;
|
||||
unsigned long flags;
|
||||
|
||||
if (dma_spec->args_count != 3) {
|
||||
dev_err(&pdev->dev, "invalid number of dma mux args\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
goto err_put_pdev;
|
||||
}
|
||||
|
||||
if (dma_spec->args[0] > dmamux->dmamux_requests) {
|
||||
dev_err(&pdev->dev, "invalid mux request number: %d\n",
|
||||
dma_spec->args[0]);
|
||||
return ERR_PTR(-EINVAL);
|
||||
goto err_put_pdev;
|
||||
}
|
||||
|
||||
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
|
||||
if (!mux)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
if (!mux) {
|
||||
ret = -ENOMEM;
|
||||
goto err_put_pdev;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&dmamux->lock, flags);
|
||||
mux->chan_id = find_first_zero_bit(dmamux->dma_inuse,
|
||||
@@ -133,7 +135,6 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
|
||||
dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", i - 1);
|
||||
if (!dma_spec->np) {
|
||||
dev_err(&pdev->dev, "can't get dma master\n");
|
||||
ret = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
@@ -142,7 +143,7 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
|
||||
ret = pm_runtime_resume_and_get(&pdev->dev);
|
||||
if (ret < 0) {
|
||||
spin_unlock_irqrestore(&dmamux->lock, flags);
|
||||
goto error;
|
||||
goto err_put_dma_spec_np;
|
||||
}
|
||||
spin_unlock_irqrestore(&dmamux->lock, flags);
|
||||
|
||||
@@ -160,13 +161,20 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
|
||||
dev_dbg(&pdev->dev, "Mapping DMAMUX(%u) to DMA%u(%u)\n",
|
||||
mux->request, mux->master, mux->chan_id);
|
||||
|
||||
put_device(&pdev->dev);
|
||||
|
||||
return mux;
|
||||
|
||||
err_put_dma_spec_np:
|
||||
of_node_put(dma_spec->np);
|
||||
error:
|
||||
clear_bit(mux->chan_id, dmamux->dma_inuse);
|
||||
|
||||
error_chan_id:
|
||||
kfree(mux);
|
||||
err_put_pdev:
|
||||
put_device(&pdev->dev);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
|
||||
@@ -429,10 +429,17 @@ static void tegra_adma_stop(struct tegra_adma_chan *tdc)
|
||||
return;
|
||||
}
|
||||
|
||||
kfree(tdc->desc);
|
||||
vchan_terminate_vdesc(&tdc->desc->vd);
|
||||
tdc->desc = NULL;
|
||||
}
|
||||
|
||||
static void tegra_adma_synchronize(struct dma_chan *dc)
|
||||
{
|
||||
struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc);
|
||||
|
||||
vchan_synchronize(&tdc->vc);
|
||||
}
|
||||
|
||||
static void tegra_adma_start(struct tegra_adma_chan *tdc)
|
||||
{
|
||||
struct virt_dma_desc *vd = vchan_next_desc(&tdc->vc);
|
||||
@@ -1155,6 +1162,7 @@ static int tegra_adma_probe(struct platform_device *pdev)
|
||||
tdma->dma_dev.device_config = tegra_adma_slave_config;
|
||||
tdma->dma_dev.device_tx_status = tegra_adma_tx_status;
|
||||
tdma->dma_dev.device_terminate_all = tegra_adma_terminate_all;
|
||||
tdma->dma_dev.device_synchronize = tegra_adma_synchronize;
|
||||
tdma->dma_dev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
|
||||
tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
|
||||
tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
|
||||
|
||||
@@ -79,34 +79,35 @@ static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec,
|
||||
{
|
||||
struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
|
||||
struct ti_am335x_xbar_data *xbar = platform_get_drvdata(pdev);
|
||||
struct ti_am335x_xbar_map *map;
|
||||
struct ti_am335x_xbar_map *map = ERR_PTR(-EINVAL);
|
||||
|
||||
if (dma_spec->args_count != 3)
|
||||
return ERR_PTR(-EINVAL);
|
||||
goto out_put_pdev;
|
||||
|
||||
if (dma_spec->args[2] >= xbar->xbar_events) {
|
||||
dev_err(&pdev->dev, "Invalid XBAR event number: %d\n",
|
||||
dma_spec->args[2]);
|
||||
return ERR_PTR(-EINVAL);
|
||||
goto out_put_pdev;
|
||||
}
|
||||
|
||||
if (dma_spec->args[0] >= xbar->dma_requests) {
|
||||
dev_err(&pdev->dev, "Invalid DMA request line number: %d\n",
|
||||
dma_spec->args[0]);
|
||||
return ERR_PTR(-EINVAL);
|
||||
goto out_put_pdev;
|
||||
}
|
||||
|
||||
/* The of_node_put() will be done in the core for the node */
|
||||
dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
|
||||
if (!dma_spec->np) {
|
||||
dev_err(&pdev->dev, "Can't get DMA master\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
goto out_put_pdev;
|
||||
}
|
||||
|
||||
map = kzalloc(sizeof(*map), GFP_KERNEL);
|
||||
if (!map) {
|
||||
of_node_put(dma_spec->np);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
map = ERR_PTR(-ENOMEM);
|
||||
goto out_put_pdev;
|
||||
}
|
||||
|
||||
map->dma_line = (u16)dma_spec->args[0];
|
||||
@@ -120,6 +121,9 @@ static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec,
|
||||
|
||||
ti_am335x_xbar_write(xbar->iomem, map->dma_line, map->mux_val);
|
||||
|
||||
out_put_pdev:
|
||||
put_device(&pdev->dev);
|
||||
|
||||
return map;
|
||||
}
|
||||
|
||||
@@ -288,6 +292,8 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
|
||||
|
||||
ti_dra7_xbar_write(xbar->iomem, map->xbar_out, map->xbar_in);
|
||||
|
||||
put_device(&pdev->dev);
|
||||
|
||||
return map;
|
||||
}
|
||||
|
||||
|
||||
@@ -42,9 +42,9 @@ struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property)
|
||||
}
|
||||
|
||||
ud = platform_get_drvdata(pdev);
|
||||
put_device(&pdev->dev);
|
||||
if (!ud) {
|
||||
pr_debug("UDMA has not been probed\n");
|
||||
put_device(&pdev->dev);
|
||||
return ERR_PTR(-EPROBE_DEFER);
|
||||
}
|
||||
|
||||
|
||||
@@ -1808,6 +1808,8 @@ static int omap_dma_probe(struct platform_device *pdev)
|
||||
if (rc) {
|
||||
pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
|
||||
rc);
|
||||
if (od->ll123_supported)
|
||||
dma_pool_destroy(od->desc_pool);
|
||||
omap_dma_free(od);
|
||||
return rc;
|
||||
}
|
||||
@@ -1823,6 +1825,8 @@ static int omap_dma_probe(struct platform_device *pdev)
|
||||
if (rc) {
|
||||
pr_warn("OMAP-DMA: failed to register DMA controller\n");
|
||||
dma_async_device_unregister(&od->ddev);
|
||||
if (od->ll123_supported)
|
||||
dma_pool_destroy(od->desc_pool);
|
||||
omap_dma_free(od);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
|
||||
/* The length of register space exposed to host */
|
||||
#define XDMA_REG_SPACE_LEN 65536
|
||||
#define XDMA_MAX_REG_OFFSET (XDMA_REG_SPACE_LEN - 4)
|
||||
|
||||
/*
|
||||
* maximum number of DMA channels for each direction:
|
||||
|
||||
@@ -38,7 +38,7 @@ static const struct regmap_config xdma_regmap_config = {
|
||||
.reg_bits = 32,
|
||||
.val_bits = 32,
|
||||
.reg_stride = 4,
|
||||
.max_register = XDMA_REG_SPACE_LEN,
|
||||
.max_register = XDMA_MAX_REG_OFFSET,
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -131,6 +131,7 @@
|
||||
#define XILINX_MCDMA_MAX_CHANS_PER_DEVICE 0x20
|
||||
#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2
|
||||
#define XILINX_CDMA_MAX_CHANS_PER_DEVICE 0x1
|
||||
#define XILINX_DMA_DFAULT_ADDRWIDTH 0x20
|
||||
|
||||
#define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
|
||||
(XILINX_DMA_DMASR_FRM_CNT_IRQ | \
|
||||
@@ -3159,7 +3160,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
|
||||
struct device_node *node = pdev->dev.of_node;
|
||||
struct xilinx_dma_device *xdev;
|
||||
struct device_node *child, *np = pdev->dev.of_node;
|
||||
u32 num_frames, addr_width, len_width;
|
||||
u32 num_frames, addr_width = XILINX_DMA_DFAULT_ADDRWIDTH, len_width;
|
||||
int i, err;
|
||||
|
||||
/* Allocate and initialize the DMA engine structure */
|
||||
@@ -3235,7 +3236,9 @@ static int xilinx_dma_probe(struct platform_device *pdev)
|
||||
|
||||
err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
|
||||
if (err < 0)
|
||||
dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
|
||||
dev_warn(xdev->dev,
|
||||
"missing xlnx,addrwidth property, using default value %d\n",
|
||||
XILINX_DMA_DFAULT_ADDRWIDTH);
|
||||
|
||||
if (addr_width > 32)
|
||||
xdev->ext_addr = true;
|
||||
|
||||
@@ -358,10 +358,11 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
|
||||
layers[1].type = EDAC_MC_LAYER_CHANNEL;
|
||||
layers[1].size = nr_channels;
|
||||
layers[1].is_virt_csrow = false;
|
||||
mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
|
||||
sizeof(struct i3200_priv));
|
||||
|
||||
rc = -ENOMEM;
|
||||
mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(struct i3200_priv));
|
||||
if (!mci)
|
||||
return -ENOMEM;
|
||||
goto unmap;
|
||||
|
||||
edac_dbg(3, "MC: init mci\n");
|
||||
|
||||
@@ -421,9 +422,9 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
edac_mc_free(mci);
|
||||
unmap:
|
||||
iounmap(window);
|
||||
if (mci)
|
||||
edac_mc_free(mci);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -341,9 +341,12 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
|
||||
layers[1].type = EDAC_MC_LAYER_CHANNEL;
|
||||
layers[1].size = x38_channel_num;
|
||||
layers[1].is_virt_csrow = false;
|
||||
|
||||
|
||||
rc = -ENOMEM;
|
||||
mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
|
||||
if (!mci)
|
||||
return -ENOMEM;
|
||||
goto unmap;
|
||||
|
||||
edac_dbg(3, "MC: init mci\n");
|
||||
|
||||
@@ -403,9 +406,9 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
edac_mc_free(mci);
|
||||
unmap:
|
||||
iounmap(window);
|
||||
if (mci)
|
||||
edac_mc_free(mci);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -162,7 +162,7 @@ int cper_bits_to_str(char *buf, int buf_size, unsigned long bits,
|
||||
len -= size;
|
||||
str += size;
|
||||
}
|
||||
return len - buf_size;
|
||||
return buf_size - len;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cper_bits_to_str);
|
||||
|
||||
|
||||
@@ -203,6 +203,18 @@ int imx_scu_enable_general_irq_channel(struct device *dev)
|
||||
struct mbox_chan *ch;
|
||||
int ret = 0, i = 0;
|
||||
|
||||
if (!of_parse_phandle_with_args(dev->of_node, "mboxes",
|
||||
"#mbox-cells", 0, &spec)) {
|
||||
i = of_alias_get_id(spec.np, "mu");
|
||||
of_node_put(spec.np);
|
||||
}
|
||||
|
||||
/* use mu1 as general mu irq channel if failed */
|
||||
if (i < 0)
|
||||
i = 1;
|
||||
|
||||
mu_resource_id = IMX_SC_R_MU_0A + i;
|
||||
|
||||
ret = imx_scu_get_handle(&imx_sc_irq_ipc_handle);
|
||||
if (ret)
|
||||
return ret;
|
||||
@@ -225,18 +237,6 @@ int imx_scu_enable_general_irq_channel(struct device *dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!of_parse_phandle_with_args(dev->of_node, "mboxes",
|
||||
"#mbox-cells", 0, &spec)) {
|
||||
i = of_alias_get_id(spec.np, "mu");
|
||||
of_node_put(spec.np);
|
||||
}
|
||||
|
||||
/* use mu1 as general mu irq channel if failed */
|
||||
if (i < 0)
|
||||
i = 1;
|
||||
|
||||
mu_resource_id = IMX_SC_R_MU_0A + i;
|
||||
|
||||
/* Create directory under /sysfs/firmware */
|
||||
wakeup_obj = kobject_create_and_add("scu_wakeup_source", firmware_kobj);
|
||||
if (!wakeup_obj) {
|
||||
|
||||
@@ -4985,6 +4985,14 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
|
||||
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, false);
|
||||
|
||||
/*
|
||||
* device went through surprise hotplug; we need to destroy topology
|
||||
* before ip_fini_early to prevent kfd locking refcount issues by calling
|
||||
* amdgpu_amdkfd_suspend()
|
||||
*/
|
||||
if (drm_dev_is_unplugged(adev_to_drm(adev)))
|
||||
amdgpu_amdkfd_device_fini_sw(adev);
|
||||
|
||||
amdgpu_device_ip_fini_early(adev);
|
||||
|
||||
amdgpu_irq_fini_hw(adev);
|
||||
|
||||
@@ -1824,7 +1824,12 @@ int amdgpu_display_get_scanout_buffer(struct drm_plane *plane,
|
||||
struct drm_scanout_buffer *sb)
|
||||
{
|
||||
struct amdgpu_bo *abo;
|
||||
struct drm_framebuffer *fb = plane->state->fb;
|
||||
struct drm_framebuffer *fb;
|
||||
|
||||
if (drm_drv_uses_atomic_modeset(plane->dev))
|
||||
fb = plane->state->fb;
|
||||
else
|
||||
fb = plane->fb;
|
||||
|
||||
if (!fb)
|
||||
return -EINVAL;
|
||||
|
||||
@@ -681,12 +681,28 @@ static int amdgpu_userq_input_args_validate(struct drm_device *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool amdgpu_userq_enabled(struct drm_device *dev)
|
||||
{
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
|
||||
if (adev->userq_funcs[i])
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp)
|
||||
{
|
||||
union drm_amdgpu_userq *args = data;
|
||||
int r;
|
||||
|
||||
if (!amdgpu_userq_enabled(dev))
|
||||
return -ENOTSUPP;
|
||||
|
||||
if (amdgpu_userq_input_args_validate(dev, args, filp) < 0)
|
||||
return -EINVAL;
|
||||
|
||||
|
||||
@@ -135,6 +135,7 @@ uint64_t amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
|
||||
struct drm_file *filp);
|
||||
|
||||
u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev);
|
||||
bool amdgpu_userq_enabled(struct drm_device *dev);
|
||||
|
||||
int amdgpu_userq_suspend(struct amdgpu_device *adev);
|
||||
int amdgpu_userq_resume(struct amdgpu_device *adev);
|
||||
|
||||
@@ -141,6 +141,8 @@ static void amdgpu_userq_walk_and_drop_fence_drv(struct xarray *xa)
|
||||
void
|
||||
amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq)
|
||||
{
|
||||
dma_fence_put(userq->last_fence);
|
||||
|
||||
amdgpu_userq_walk_and_drop_fence_drv(&userq->fence_drv_xa);
|
||||
xa_destroy(&userq->fence_drv_xa);
|
||||
/* Drop the fence_drv reference held by user queue */
|
||||
@@ -470,6 +472,9 @@ int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_exec exec;
|
||||
u64 wptr;
|
||||
|
||||
if (!amdgpu_userq_enabled(dev))
|
||||
return -ENOTSUPP;
|
||||
|
||||
num_syncobj_handles = args->num_syncobj_handles;
|
||||
syncobj_handles = memdup_user(u64_to_user_ptr(args->syncobj_handles),
|
||||
size_mul(sizeof(u32), num_syncobj_handles));
|
||||
@@ -652,6 +657,9 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
|
||||
int r, i, rentry, wentry, cnt;
|
||||
struct drm_exec exec;
|
||||
|
||||
if (!amdgpu_userq_enabled(dev))
|
||||
return -ENOTSUPP;
|
||||
|
||||
num_read_bo_handles = wait_info->num_bo_read_handles;
|
||||
bo_handles_read = memdup_user(u64_to_user_ptr(wait_info->bo_read_handles),
|
||||
size_mul(sizeof(u32), num_read_bo_handles));
|
||||
|
||||
@@ -1233,16 +1233,16 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
|
||||
*flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_NC);
|
||||
break;
|
||||
case AMDGPU_VM_MTYPE_WC:
|
||||
*flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_WC);
|
||||
*flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_WC);
|
||||
break;
|
||||
case AMDGPU_VM_MTYPE_RW:
|
||||
*flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_RW);
|
||||
*flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_RW);
|
||||
break;
|
||||
case AMDGPU_VM_MTYPE_CC:
|
||||
*flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_CC);
|
||||
*flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_CC);
|
||||
break;
|
||||
case AMDGPU_VM_MTYPE_UC:
|
||||
*flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_UC);
|
||||
*flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_UC);
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
@@ -1209,14 +1209,8 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
|
||||
pr_debug_ratelimited("Evicting process pid %d queues\n",
|
||||
pdd->process->lead_thread->pid);
|
||||
|
||||
if (dqm->dev->kfd->shared_resources.enable_mes) {
|
||||
if (dqm->dev->kfd->shared_resources.enable_mes)
|
||||
pdd->last_evict_timestamp = get_jiffies_64();
|
||||
retval = suspend_all_queues_mes(dqm);
|
||||
if (retval) {
|
||||
dev_err(dev, "Suspending all queues failed");
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/* Mark all queues as evicted. Deactivate all active queues on
|
||||
* the qpd.
|
||||
@@ -1246,10 +1240,6 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
|
||||
KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
|
||||
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
|
||||
USE_DEFAULT_GRACE_PERIOD);
|
||||
} else {
|
||||
retval = resume_all_queues_mes(dqm);
|
||||
if (retval)
|
||||
dev_err(dev, "Resuming all queues failed");
|
||||
}
|
||||
|
||||
out:
|
||||
@@ -2915,6 +2905,14 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
|
||||
return retval;
|
||||
}
|
||||
|
||||
static void deallocate_hiq_sdma_mqd(struct kfd_node *dev,
|
||||
struct kfd_mem_obj *mqd)
|
||||
{
|
||||
WARN(!mqd, "No hiq sdma mqd trunk to free");
|
||||
|
||||
amdgpu_amdkfd_free_gtt_mem(dev->adev, &mqd->gtt_mem);
|
||||
}
|
||||
|
||||
struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev)
|
||||
{
|
||||
struct device_queue_manager *dqm;
|
||||
@@ -3038,19 +3036,14 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev)
|
||||
return dqm;
|
||||
}
|
||||
|
||||
if (!dev->kfd->shared_resources.enable_mes)
|
||||
deallocate_hiq_sdma_mqd(dev, &dqm->hiq_sdma_mqd);
|
||||
|
||||
out_free:
|
||||
kfree(dqm);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void deallocate_hiq_sdma_mqd(struct kfd_node *dev,
|
||||
struct kfd_mem_obj *mqd)
|
||||
{
|
||||
WARN(!mqd, "No hiq sdma mqd trunk to free");
|
||||
|
||||
amdgpu_amdkfd_free_gtt_mem(dev->adev, &mqd->gtt_mem);
|
||||
}
|
||||
|
||||
void device_queue_manager_uninit(struct device_queue_manager *dqm)
|
||||
{
|
||||
dqm->ops.stop(dqm);
|
||||
|
||||
@@ -5193,6 +5193,8 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
|
||||
struct amdgpu_dm_backlight_caps *caps;
|
||||
char bl_name[16];
|
||||
int min, max;
|
||||
int real_brightness;
|
||||
int init_brightness;
|
||||
|
||||
if (aconnector->bl_idx == -1)
|
||||
return;
|
||||
@@ -5217,6 +5219,8 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
|
||||
} else
|
||||
props.brightness = props.max_brightness = MAX_BACKLIGHT_LEVEL;
|
||||
|
||||
init_brightness = props.brightness;
|
||||
|
||||
if (caps->data_points && !(amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)) {
|
||||
drm_info(drm, "Using custom brightness curve\n");
|
||||
props.scale = BACKLIGHT_SCALE_NON_LINEAR;
|
||||
@@ -5235,8 +5239,20 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
|
||||
if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) {
|
||||
drm_err(drm, "DM: Backlight registration failed!\n");
|
||||
dm->backlight_dev[aconnector->bl_idx] = NULL;
|
||||
} else
|
||||
} else {
|
||||
/*
|
||||
* dm->brightness[x] can be inconsistent just after startup until
|
||||
* ops.get_brightness is called.
|
||||
*/
|
||||
real_brightness =
|
||||
amdgpu_dm_backlight_ops.get_brightness(dm->backlight_dev[aconnector->bl_idx]);
|
||||
|
||||
if (real_brightness != init_brightness) {
|
||||
dm->actual_brightness[aconnector->bl_idx] = real_brightness;
|
||||
dm->brightness[aconnector->bl_idx] = real_brightness;
|
||||
}
|
||||
drm_dbg_driver(drm, "DM: Registered Backlight device: %s\n", bl_name);
|
||||
}
|
||||
}
|
||||
|
||||
static int initialize_plane(struct amdgpu_display_manager *dm,
|
||||
@@ -5545,7 +5561,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
|
||||
|
||||
if (psr_feature_enabled) {
|
||||
amdgpu_dm_set_psr_caps(link);
|
||||
drm_info(adev_to_drm(adev), "PSR support %d, DC PSR ver %d, sink PSR ver %d DPCD caps 0x%x su_y_granularity %d\n",
|
||||
drm_info(adev_to_drm(adev), "%s: PSR support %d, DC PSR ver %d, sink PSR ver %d DPCD caps 0x%x su_y_granularity %d\n",
|
||||
aconnector->base.name,
|
||||
link->psr_settings.psr_feature_enabled,
|
||||
link->psr_settings.psr_version,
|
||||
link->dpcd_caps.psr_info.psr_version,
|
||||
|
||||
@@ -41,7 +41,7 @@
|
||||
/* kHZ*/
|
||||
#define DP_ADAPTOR_DVI_MAX_TMDS_CLK 165000
|
||||
/* kHZ*/
|
||||
#define DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK 165000
|
||||
#define DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK 340000
|
||||
|
||||
struct dp_hdmi_dongle_signature_data {
|
||||
int8_t id[15];/* "DP-HDMI ADAPTOR"*/
|
||||
|
||||
@@ -332,7 +332,7 @@ static void query_dp_dual_mode_adaptor(
|
||||
|
||||
/* Assume we have no valid DP passive dongle connected */
|
||||
*dongle = DISPLAY_DONGLE_NONE;
|
||||
sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK;
|
||||
sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK;
|
||||
|
||||
/* Read DP-HDMI dongle I2c (no response interpreted as DP-DVI dongle)*/
|
||||
if (!i2c_read(
|
||||
@@ -388,6 +388,8 @@ static void query_dp_dual_mode_adaptor(
|
||||
|
||||
}
|
||||
}
|
||||
if (is_valid_hdmi_signature)
|
||||
sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK;
|
||||
|
||||
if (is_type2_dongle) {
|
||||
uint32_t max_tmds_clk =
|
||||
|
||||
@@ -1701,8 +1701,9 @@ static int smu_v14_0_2_get_power_limit(struct smu_context *smu,
|
||||
table_context->power_play_table;
|
||||
PPTable_t *pptable = table_context->driver_pptable;
|
||||
CustomSkuTable_t *skutable = &pptable->CustomSkuTable;
|
||||
uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
|
||||
int16_t od_percent_upper = 0, od_percent_lower = 0;
|
||||
uint32_t msg_limit = pptable->SkuTable.MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
|
||||
uint32_t power_limit;
|
||||
|
||||
if (smu_v14_0_get_current_power_limit(smu, &power_limit))
|
||||
power_limit = smu->adev->pm.ac_power ?
|
||||
|
||||
@@ -143,6 +143,7 @@ struct dw_hdmi_qp {
|
||||
} phy;
|
||||
|
||||
struct regmap *regm;
|
||||
int main_irq;
|
||||
|
||||
unsigned long tmds_char_rate;
|
||||
};
|
||||
@@ -1068,6 +1069,7 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
|
||||
|
||||
dw_hdmi_qp_init_hw(hdmi);
|
||||
|
||||
hdmi->main_irq = plat_data->main_irq;
|
||||
ret = devm_request_threaded_irq(dev, plat_data->main_irq,
|
||||
dw_hdmi_qp_main_hardirq, NULL,
|
||||
IRQF_SHARED, dev_name(dev), hdmi);
|
||||
@@ -1106,9 +1108,16 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dw_hdmi_qp_bind);
|
||||
|
||||
void dw_hdmi_qp_suspend(struct device *dev, struct dw_hdmi_qp *hdmi)
|
||||
{
|
||||
disable_irq(hdmi->main_irq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dw_hdmi_qp_suspend);
|
||||
|
||||
void dw_hdmi_qp_resume(struct device *dev, struct dw_hdmi_qp *hdmi)
|
||||
{
|
||||
dw_hdmi_qp_init_hw(hdmi);
|
||||
enable_irq(hdmi->main_irq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dw_hdmi_qp_resume);
|
||||
|
||||
|
||||
@@ -457,27 +457,20 @@ int gud_plane_atomic_check(struct drm_plane *plane,
|
||||
struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
|
||||
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
|
||||
struct drm_crtc *crtc = new_plane_state->crtc;
|
||||
struct drm_crtc_state *crtc_state;
|
||||
struct drm_crtc_state *crtc_state = NULL;
|
||||
const struct drm_display_mode *mode;
|
||||
struct drm_framebuffer *old_fb = old_plane_state->fb;
|
||||
struct drm_connector_state *connector_state = NULL;
|
||||
struct drm_framebuffer *fb = new_plane_state->fb;
|
||||
const struct drm_format_info *format = fb->format;
|
||||
const struct drm_format_info *format;
|
||||
struct drm_connector *connector;
|
||||
unsigned int i, num_properties;
|
||||
struct gud_state_req *req;
|
||||
int idx, ret;
|
||||
size_t len;
|
||||
|
||||
if (drm_WARN_ON_ONCE(plane->dev, !fb))
|
||||
return -EINVAL;
|
||||
|
||||
if (drm_WARN_ON_ONCE(plane->dev, !crtc))
|
||||
return -EINVAL;
|
||||
|
||||
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
|
||||
|
||||
mode = &crtc_state->mode;
|
||||
if (crtc)
|
||||
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
|
||||
|
||||
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
|
||||
DRM_PLANE_NO_SCALING,
|
||||
@@ -492,6 +485,9 @@ int gud_plane_atomic_check(struct drm_plane *plane,
|
||||
if (old_plane_state->rotation != new_plane_state->rotation)
|
||||
crtc_state->mode_changed = true;
|
||||
|
||||
mode = &crtc_state->mode;
|
||||
format = fb->format;
|
||||
|
||||
if (old_fb && old_fb->format != format)
|
||||
crtc_state->mode_changed = true;
|
||||
|
||||
@@ -598,7 +594,7 @@ void gud_plane_atomic_update(struct drm_plane *plane,
|
||||
struct drm_atomic_helper_damage_iter iter;
|
||||
int ret, idx;
|
||||
|
||||
if (crtc->state->mode_changed || !crtc->state->enable) {
|
||||
if (!crtc || crtc->state->mode_changed || !crtc->state->enable) {
|
||||
cancel_work_sync(&gdrm->work);
|
||||
mutex_lock(&gdrm->damage_lock);
|
||||
if (gdrm->fb) {
|
||||
|
||||
@@ -84,6 +84,7 @@ curs507a_prepare(struct nv50_wndw *wndw, struct nv50_head_atom *asyh,
|
||||
asyh->curs.handle = handle;
|
||||
asyh->curs.offset = offset;
|
||||
asyh->set.curs = asyh->curs.visible;
|
||||
nv50_atom(asyh->state.state)->lock_core = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -623,8 +623,61 @@ static struct panel_simple *panel_simple_probe(struct device *dev)
|
||||
if (IS_ERR(desc))
|
||||
return ERR_CAST(desc);
|
||||
|
||||
connector_type = desc->connector_type;
|
||||
/* Catch common mistakes for panels. */
|
||||
switch (connector_type) {
|
||||
case 0:
|
||||
dev_warn(dev, "Specify missing connector_type\n");
|
||||
connector_type = DRM_MODE_CONNECTOR_DPI;
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_LVDS:
|
||||
WARN_ON(desc->bus_flags &
|
||||
~(DRM_BUS_FLAG_DE_LOW |
|
||||
DRM_BUS_FLAG_DE_HIGH |
|
||||
DRM_BUS_FLAG_DATA_MSB_TO_LSB |
|
||||
DRM_BUS_FLAG_DATA_LSB_TO_MSB));
|
||||
WARN_ON(desc->bus_format != MEDIA_BUS_FMT_RGB666_1X7X3_SPWG &&
|
||||
desc->bus_format != MEDIA_BUS_FMT_RGB888_1X7X4_SPWG &&
|
||||
desc->bus_format != MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA);
|
||||
WARN_ON(desc->bus_format == MEDIA_BUS_FMT_RGB666_1X7X3_SPWG &&
|
||||
desc->bpc != 6);
|
||||
WARN_ON((desc->bus_format == MEDIA_BUS_FMT_RGB888_1X7X4_SPWG ||
|
||||
desc->bus_format == MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA) &&
|
||||
desc->bpc != 8);
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_eDP:
|
||||
dev_warn(dev, "eDP panels moved to panel-edp\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
case DRM_MODE_CONNECTOR_DSI:
|
||||
if (desc->bpc != 6 && desc->bpc != 8)
|
||||
dev_warn(dev, "Expected bpc in {6,8} but got: %u\n", desc->bpc);
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_DPI:
|
||||
bus_flags = DRM_BUS_FLAG_DE_LOW |
|
||||
DRM_BUS_FLAG_DE_HIGH |
|
||||
DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE |
|
||||
DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
|
||||
DRM_BUS_FLAG_DATA_MSB_TO_LSB |
|
||||
DRM_BUS_FLAG_DATA_LSB_TO_MSB |
|
||||
DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE |
|
||||
DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE;
|
||||
if (desc->bus_flags & ~bus_flags)
|
||||
dev_warn(dev, "Unexpected bus_flags(%d)\n", desc->bus_flags & ~bus_flags);
|
||||
if (!(desc->bus_flags & bus_flags))
|
||||
dev_warn(dev, "Specify missing bus_flags\n");
|
||||
if (desc->bus_format == 0)
|
||||
dev_warn(dev, "Specify missing bus_format\n");
|
||||
if (desc->bpc != 6 && desc->bpc != 8)
|
||||
dev_warn(dev, "Expected bpc in {6,8} but got: %u\n", desc->bpc);
|
||||
break;
|
||||
default:
|
||||
dev_warn(dev, "Specify a valid connector_type: %d\n", desc->connector_type);
|
||||
connector_type = DRM_MODE_CONNECTOR_DPI;
|
||||
break;
|
||||
}
|
||||
|
||||
panel = devm_drm_panel_alloc(dev, struct panel_simple, base,
|
||||
&panel_simple_funcs, desc->connector_type);
|
||||
&panel_simple_funcs, connector_type);
|
||||
if (IS_ERR(panel))
|
||||
return ERR_CAST(panel);
|
||||
|
||||
@@ -666,60 +719,6 @@ static struct panel_simple *panel_simple_probe(struct device *dev)
|
||||
goto free_ddc;
|
||||
}
|
||||
|
||||
connector_type = desc->connector_type;
|
||||
/* Catch common mistakes for panels. */
|
||||
switch (connector_type) {
|
||||
case 0:
|
||||
dev_warn(dev, "Specify missing connector_type\n");
|
||||
connector_type = DRM_MODE_CONNECTOR_DPI;
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_LVDS:
|
||||
WARN_ON(desc->bus_flags &
|
||||
~(DRM_BUS_FLAG_DE_LOW |
|
||||
DRM_BUS_FLAG_DE_HIGH |
|
||||
DRM_BUS_FLAG_DATA_MSB_TO_LSB |
|
||||
DRM_BUS_FLAG_DATA_LSB_TO_MSB));
|
||||
WARN_ON(desc->bus_format != MEDIA_BUS_FMT_RGB666_1X7X3_SPWG &&
|
||||
desc->bus_format != MEDIA_BUS_FMT_RGB888_1X7X4_SPWG &&
|
||||
desc->bus_format != MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA);
|
||||
WARN_ON(desc->bus_format == MEDIA_BUS_FMT_RGB666_1X7X3_SPWG &&
|
||||
desc->bpc != 6);
|
||||
WARN_ON((desc->bus_format == MEDIA_BUS_FMT_RGB888_1X7X4_SPWG ||
|
||||
desc->bus_format == MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA) &&
|
||||
desc->bpc != 8);
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_eDP:
|
||||
dev_warn(dev, "eDP panels moved to panel-edp\n");
|
||||
err = -EINVAL;
|
||||
goto free_ddc;
|
||||
case DRM_MODE_CONNECTOR_DSI:
|
||||
if (desc->bpc != 6 && desc->bpc != 8)
|
||||
dev_warn(dev, "Expected bpc in {6,8} but got: %u\n", desc->bpc);
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_DPI:
|
||||
bus_flags = DRM_BUS_FLAG_DE_LOW |
|
||||
DRM_BUS_FLAG_DE_HIGH |
|
||||
DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE |
|
||||
DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
|
||||
DRM_BUS_FLAG_DATA_MSB_TO_LSB |
|
||||
DRM_BUS_FLAG_DATA_LSB_TO_MSB |
|
||||
DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE |
|
||||
DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE;
|
||||
if (desc->bus_flags & ~bus_flags)
|
||||
dev_warn(dev, "Unexpected bus_flags(%d)\n", desc->bus_flags & ~bus_flags);
|
||||
if (!(desc->bus_flags & bus_flags))
|
||||
dev_warn(dev, "Specify missing bus_flags\n");
|
||||
if (desc->bus_format == 0)
|
||||
dev_warn(dev, "Specify missing bus_format\n");
|
||||
if (desc->bpc != 6 && desc->bpc != 8)
|
||||
dev_warn(dev, "Expected bpc in {6,8} but got: %u\n", desc->bpc);
|
||||
break;
|
||||
default:
|
||||
dev_warn(dev, "Specify a valid connector_type: %d\n", desc->connector_type);
|
||||
connector_type = DRM_MODE_CONNECTOR_DPI;
|
||||
break;
|
||||
}
|
||||
|
||||
dev_set_drvdata(dev, panel);
|
||||
|
||||
/*
|
||||
@@ -1900,6 +1899,7 @@ static const struct panel_desc dataimage_scf0700c48ggu18 = {
|
||||
},
|
||||
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
|
||||
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
|
||||
.connector_type = DRM_MODE_CONNECTOR_DPI,
|
||||
};
|
||||
|
||||
static const struct display_timing dlc_dlc0700yzg_1_timing = {
|
||||
|
||||
@@ -597,6 +597,15 @@ static void dw_hdmi_qp_rockchip_remove(struct platform_device *pdev)
|
||||
component_del(&pdev->dev, &dw_hdmi_qp_rockchip_ops);
|
||||
}
|
||||
|
||||
static int __maybe_unused dw_hdmi_qp_rockchip_suspend(struct device *dev)
|
||||
{
|
||||
struct rockchip_hdmi_qp *hdmi = dev_get_drvdata(dev);
|
||||
|
||||
dw_hdmi_qp_suspend(dev, hdmi->hdmi);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __maybe_unused dw_hdmi_qp_rockchip_resume(struct device *dev)
|
||||
{
|
||||
struct rockchip_hdmi_qp *hdmi = dev_get_drvdata(dev);
|
||||
@@ -612,7 +621,8 @@ static int __maybe_unused dw_hdmi_qp_rockchip_resume(struct device *dev)
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops dw_hdmi_qp_rockchip_pm = {
|
||||
SET_SYSTEM_SLEEP_PM_OPS(NULL, dw_hdmi_qp_rockchip_resume)
|
||||
SET_SYSTEM_SLEEP_PM_OPS(dw_hdmi_qp_rockchip_suspend,
|
||||
dw_hdmi_qp_rockchip_resume)
|
||||
};
|
||||
|
||||
struct platform_driver dw_hdmi_qp_rockchip_pltfm_driver = {
|
||||
|
||||
@@ -2104,7 +2104,7 @@ static void rk3568_vop2_wait_for_port_mux_done(struct vop2 *vop2)
|
||||
* Spin until the previous port_mux figuration is done.
|
||||
*/
|
||||
ret = readx_poll_timeout_atomic(rk3568_vop2_read_port_mux, vop2, port_mux_sel,
|
||||
port_mux_sel == vop2->old_port_sel, 0, 50 * 1000);
|
||||
port_mux_sel == vop2->old_port_sel, 10, 50 * 1000);
|
||||
if (ret)
|
||||
DRM_DEV_ERROR(vop2->dev, "wait port_mux done timeout: 0x%x--0x%x\n",
|
||||
port_mux_sel, vop2->old_port_sel);
|
||||
@@ -2124,7 +2124,7 @@ static void rk3568_vop2_wait_for_layer_cfg_done(struct vop2 *vop2, u32 cfg)
|
||||
* Spin until the previous layer configuration is done.
|
||||
*/
|
||||
ret = readx_poll_timeout_atomic(rk3568_vop2_read_layer_cfg, vop2, atv_layer_cfg,
|
||||
atv_layer_cfg == cfg, 0, 50 * 1000);
|
||||
atv_layer_cfg == cfg, 10, 50 * 1000);
|
||||
if (ret)
|
||||
DRM_DEV_ERROR(vop2->dev, "wait layer cfg done timeout: 0x%x--0x%x\n",
|
||||
atv_layer_cfg, cfg);
|
||||
@@ -2144,6 +2144,7 @@ static void rk3568_vop2_setup_layer_mixer(struct vop2_video_port *vp)
|
||||
u8 layer_sel_id;
|
||||
unsigned int ofs;
|
||||
u32 ovl_ctrl;
|
||||
u32 cfg_done;
|
||||
int i;
|
||||
struct vop2_video_port *vp0 = &vop2->vps[0];
|
||||
struct vop2_video_port *vp1 = &vop2->vps[1];
|
||||
@@ -2298,8 +2299,16 @@ static void rk3568_vop2_setup_layer_mixer(struct vop2_video_port *vp)
|
||||
rk3568_vop2_wait_for_port_mux_done(vop2);
|
||||
}
|
||||
|
||||
if (layer_sel != old_layer_sel && atv_layer_sel != old_layer_sel)
|
||||
rk3568_vop2_wait_for_layer_cfg_done(vop2, vop2->old_layer_sel);
|
||||
if (layer_sel != old_layer_sel && atv_layer_sel != old_layer_sel) {
|
||||
cfg_done = vop2_readl(vop2, RK3568_REG_CFG_DONE);
|
||||
cfg_done &= (BIT(vop2->data->nr_vps) - 1);
|
||||
cfg_done &= ~BIT(vp->id);
|
||||
/*
|
||||
* Changes of other VPs' overlays have not taken effect
|
||||
*/
|
||||
if (cfg_done)
|
||||
rk3568_vop2_wait_for_layer_cfg_done(vop2, vop2->old_layer_sel);
|
||||
}
|
||||
|
||||
vop2_writel(vop2, RK3568_OVL_LAYER_SEL, layer_sel);
|
||||
mutex_unlock(&vop2->ovl_lock);
|
||||
|
||||
@@ -47,15 +47,6 @@ const struct drm_format_info *drm_sysfb_get_format_si(struct drm_device *dev,
|
||||
const struct screen_info *si);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Input parsing
|
||||
*/
|
||||
|
||||
int drm_sysfb_get_validated_int(struct drm_device *dev, const char *name,
|
||||
u64 value, u32 max);
|
||||
int drm_sysfb_get_validated_int0(struct drm_device *dev, const char *name,
|
||||
u64 value, u32 max);
|
||||
|
||||
/*
|
||||
* Display modes
|
||||
*/
|
||||
|
||||
@@ -32,9 +32,15 @@
|
||||
|
||||
#include <drm/ttm/ttm_placement.h>
|
||||
|
||||
static void vmw_bo_release(struct vmw_bo *vbo)
|
||||
/**
|
||||
* vmw_bo_free - vmw_bo destructor
|
||||
*
|
||||
* @bo: Pointer to the embedded struct ttm_buffer_object
|
||||
*/
|
||||
static void vmw_bo_free(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct vmw_resource *res;
|
||||
struct vmw_bo *vbo = to_vmw_bo(&bo->base);
|
||||
|
||||
WARN_ON(kref_read(&vbo->tbo.base.refcount) != 0);
|
||||
vmw_bo_unmap(vbo);
|
||||
@@ -62,20 +68,8 @@ static void vmw_bo_release(struct vmw_bo *vbo)
|
||||
}
|
||||
vmw_surface_unreference(&vbo->dumb_surface);
|
||||
}
|
||||
drm_gem_object_release(&vbo->tbo.base);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_bo_free - vmw_bo destructor
|
||||
*
|
||||
* @bo: Pointer to the embedded struct ttm_buffer_object
|
||||
*/
|
||||
static void vmw_bo_free(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct vmw_bo *vbo = to_vmw_bo(&bo->base);
|
||||
|
||||
WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
|
||||
vmw_bo_release(vbo);
|
||||
drm_gem_object_release(&vbo->tbo.base);
|
||||
WARN_ON(vbo->dirty);
|
||||
kfree(vbo);
|
||||
}
|
||||
|
||||
@@ -763,13 +763,15 @@ err_out:
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
ttm_bo_reserve(&bo->tbo, false, false, NULL);
|
||||
ret = vmw_bo_dirty_add(bo);
|
||||
if (!ret && surface && surface->res.func->dirty_alloc) {
|
||||
surface->res.coherent = true;
|
||||
ret = surface->res.func->dirty_alloc(&surface->res);
|
||||
if (bo) {
|
||||
ttm_bo_reserve(&bo->tbo, false, false, NULL);
|
||||
ret = vmw_bo_dirty_add(bo);
|
||||
if (!ret && surface && surface->res.func->dirty_alloc) {
|
||||
surface->res.coherent = true;
|
||||
ret = surface->res.func->dirty_alloc(&surface->res);
|
||||
}
|
||||
ttm_bo_unreserve(&bo->tbo);
|
||||
}
|
||||
ttm_bo_unreserve(&bo->tbo);
|
||||
|
||||
return &vfb->base;
|
||||
}
|
||||
|
||||
@@ -923,8 +923,10 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
|
||||
ttm_bo_unreserve(&buf->tbo);
|
||||
|
||||
res = vmw_shader_alloc(dev_priv, buf, size, 0, shader_type);
|
||||
if (unlikely(ret != 0))
|
||||
if (IS_ERR(res)) {
|
||||
ret = PTR_ERR(res);
|
||||
goto no_reserve;
|
||||
}
|
||||
|
||||
ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_shader,
|
||||
vmw_shader_key(user_key, shader_type),
|
||||
|
||||
@@ -628,7 +628,7 @@ static void recv_ipc(struct ishtp_device *dev, uint32_t doorbell_val)
|
||||
if (!ishtp_dev) {
|
||||
ishtp_dev = dev;
|
||||
}
|
||||
schedule_work(&fw_reset_work);
|
||||
queue_work(dev->unbound_wq, &fw_reset_work);
|
||||
break;
|
||||
|
||||
case MNG_RESET_NOTIFY_ACK:
|
||||
@@ -933,6 +933,25 @@ static const struct ishtp_hw_ops ish_hw_ops = {
|
||||
.dma_no_cache_snooping = _dma_no_cache_snooping
|
||||
};
|
||||
|
||||
static void ishtp_free_workqueue(void *wq)
|
||||
{
|
||||
destroy_workqueue(wq);
|
||||
}
|
||||
|
||||
static struct workqueue_struct *devm_ishtp_alloc_workqueue(struct device *dev)
|
||||
{
|
||||
struct workqueue_struct *wq;
|
||||
|
||||
wq = alloc_workqueue("ishtp_unbound_%d", WQ_UNBOUND, 0, dev->id);
|
||||
if (!wq)
|
||||
return NULL;
|
||||
|
||||
if (devm_add_action_or_reset(dev, ishtp_free_workqueue, wq))
|
||||
return NULL;
|
||||
|
||||
return wq;
|
||||
}
|
||||
|
||||
/**
|
||||
* ish_dev_init() -Initialize ISH devoce
|
||||
* @pdev: PCI device
|
||||
@@ -953,6 +972,10 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
|
||||
if (!dev)
|
||||
return NULL;
|
||||
|
||||
dev->unbound_wq = devm_ishtp_alloc_workqueue(&pdev->dev);
|
||||
if (!dev->unbound_wq)
|
||||
return NULL;
|
||||
|
||||
dev->devc = &pdev->dev;
|
||||
ishtp_device_init(dev);
|
||||
|
||||
|
||||
@@ -384,7 +384,7 @@ static int __maybe_unused ish_resume(struct device *device)
|
||||
ish_resume_device = device;
|
||||
dev->resume_flag = 1;
|
||||
|
||||
schedule_work(&resume_work);
|
||||
queue_work(dev->unbound_wq, &resume_work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -860,7 +860,7 @@ static int hid_ishtp_cl_reset(struct ishtp_cl_device *cl_device)
|
||||
hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
|
||||
hid_ishtp_cl);
|
||||
|
||||
schedule_work(&client_data->work);
|
||||
queue_work(ishtp_get_workqueue(cl_device), &client_data->work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -902,7 +902,7 @@ static int hid_ishtp_cl_resume(struct device *device)
|
||||
|
||||
hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
|
||||
hid_ishtp_cl);
|
||||
schedule_work(&client_data->resume_work);
|
||||
queue_work(ishtp_get_workqueue(cl_device), &client_data->resume_work);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -541,7 +541,7 @@ void ishtp_cl_bus_rx_event(struct ishtp_cl_device *device)
|
||||
return;
|
||||
|
||||
if (device->event_cb)
|
||||
schedule_work(&device->event_work);
|
||||
queue_work(device->ishtp_dev->unbound_wq, &device->event_work);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -876,6 +876,22 @@ struct device *ishtp_get_pci_device(struct ishtp_cl_device *device)
|
||||
}
|
||||
EXPORT_SYMBOL(ishtp_get_pci_device);
|
||||
|
||||
/**
|
||||
* ishtp_get_workqueue - Retrieve the workqueue associated with an ISHTP device
|
||||
* @cl_device: Pointer to the ISHTP client device structure
|
||||
*
|
||||
* Returns the workqueue_struct pointer (unbound_wq) associated with the given
|
||||
* ISHTP client device. This workqueue is typically used for scheduling work
|
||||
* related to the device.
|
||||
*
|
||||
* Return: Pointer to struct workqueue_struct.
|
||||
*/
|
||||
struct workqueue_struct *ishtp_get_workqueue(struct ishtp_cl_device *cl_device)
|
||||
{
|
||||
return cl_device->ishtp_dev->unbound_wq;
|
||||
}
|
||||
EXPORT_SYMBOL(ishtp_get_workqueue);
|
||||
|
||||
/**
|
||||
* ishtp_trace_callback() - Return trace callback
|
||||
* @cl_device: ISH-TP client device instance
|
||||
|
||||
@@ -573,7 +573,7 @@ void ishtp_hbm_dispatch(struct ishtp_device *dev,
|
||||
|
||||
/* Start firmware loading process if it has loader capability */
|
||||
if (version_res->host_version_supported & ISHTP_SUPPORT_CAP_LOADER)
|
||||
schedule_work(&dev->work_fw_loader);
|
||||
queue_work(dev->unbound_wq, &dev->work_fw_loader);
|
||||
|
||||
dev->version.major_version = HBM_MAJOR_VERSION;
|
||||
dev->version.minor_version = HBM_MINOR_VERSION;
|
||||
@@ -864,7 +864,7 @@ void recv_hbm(struct ishtp_device *dev, struct ishtp_msg_hdr *ishtp_hdr)
|
||||
dev->rd_msg_fifo_tail = (dev->rd_msg_fifo_tail + IPC_PAYLOAD_SIZE) %
|
||||
(RD_INT_FIFO_SIZE * IPC_PAYLOAD_SIZE);
|
||||
spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags);
|
||||
schedule_work(&dev->bh_hbm_work);
|
||||
queue_work(dev->unbound_wq, &dev->bh_hbm_work);
|
||||
eoi:
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -175,6 +175,9 @@ struct ishtp_device {
|
||||
struct hbm_version version;
|
||||
int transfer_path; /* Choice of transfer path: IPC or DMA */
|
||||
|
||||
/* Alloc a dedicated unbound workqueue for ishtp device */
|
||||
struct workqueue_struct *unbound_wq;
|
||||
|
||||
/* work structure for scheduling firmware loading tasks */
|
||||
struct work_struct work_fw_loader;
|
||||
/* waitq for waiting for command response from the firmware loader */
|
||||
|
||||
@@ -985,6 +985,7 @@ static int usbhid_parse(struct hid_device *hid)
|
||||
struct usb_device *dev = interface_to_usbdev (intf);
|
||||
struct hid_descriptor *hdesc;
|
||||
struct hid_class_descriptor *hcdesc;
|
||||
__u8 fixed_opt_descriptors_size;
|
||||
u32 quirks = 0;
|
||||
unsigned int rsize = 0;
|
||||
char *rdesc;
|
||||
@@ -1015,7 +1016,21 @@ static int usbhid_parse(struct hid_device *hid)
|
||||
(hdesc->bNumDescriptors - 1) * sizeof(*hcdesc)) {
|
||||
dbg_hid("hid descriptor invalid, bLen=%hhu bNum=%hhu\n",
|
||||
hdesc->bLength, hdesc->bNumDescriptors);
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Some devices may expose a wrong number of descriptors compared
|
||||
* to the provided length.
|
||||
* However, we ignore the optional hid class descriptors entirely
|
||||
* so we can safely recompute the proper field.
|
||||
*/
|
||||
if (hdesc->bLength >= sizeof(*hdesc)) {
|
||||
fixed_opt_descriptors_size = hdesc->bLength - sizeof(*hdesc);
|
||||
|
||||
hid_warn(intf, "fixing wrong optional hid class descriptors count\n");
|
||||
hdesc->bNumDescriptors = fixed_opt_descriptors_size / sizeof(*hcdesc) + 1;
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
hid->version = le16_to_cpu(hdesc->bcdHID);
|
||||
|
||||
@@ -592,6 +592,13 @@ static bool is_use_dma(struct lpi2c_imx_struct *lpi2c_imx, struct i2c_msg *msg)
|
||||
if (!lpi2c_imx->can_use_dma)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* A system-wide suspend or resume transition is in progress. LPI2C should use PIO to
|
||||
* transfer data to avoid issue caused by no ready DMA HW resource.
|
||||
*/
|
||||
if (pm_suspend_in_progress())
|
||||
return false;
|
||||
|
||||
/*
|
||||
* When the length of data is less than I2C_DMA_THRESHOLD,
|
||||
* cpu mode is used directly to avoid low performance.
|
||||
|
||||
@@ -97,6 +97,7 @@ struct geni_i2c_dev {
|
||||
dma_addr_t dma_addr;
|
||||
struct dma_chan *tx_c;
|
||||
struct dma_chan *rx_c;
|
||||
bool no_dma;
|
||||
bool gpi_mode;
|
||||
bool abort_done;
|
||||
};
|
||||
@@ -425,7 +426,7 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
|
||||
size_t len = msg->len;
|
||||
struct i2c_msg *cur;
|
||||
|
||||
dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
|
||||
dma_buf = gi2c->no_dma ? NULL : i2c_get_dma_safe_msg_buf(msg, 32);
|
||||
if (dma_buf)
|
||||
geni_se_select_mode(se, GENI_SE_DMA);
|
||||
else
|
||||
@@ -464,7 +465,7 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
|
||||
size_t len = msg->len;
|
||||
struct i2c_msg *cur;
|
||||
|
||||
dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
|
||||
dma_buf = gi2c->no_dma ? NULL : i2c_get_dma_safe_msg_buf(msg, 32);
|
||||
if (dma_buf)
|
||||
geni_se_select_mode(se, GENI_SE_DMA);
|
||||
else
|
||||
@@ -880,10 +881,12 @@ static int geni_i2c_probe(struct platform_device *pdev)
|
||||
goto err_resources;
|
||||
}
|
||||
|
||||
if (desc && desc->no_dma_support)
|
||||
if (desc && desc->no_dma_support) {
|
||||
fifo_disable = false;
|
||||
else
|
||||
gi2c->no_dma = true;
|
||||
} else {
|
||||
fifo_disable = readl_relaxed(gi2c->se.base + GENI_IF_DISABLE_RO) & FIFO_IF_DISABLE;
|
||||
}
|
||||
|
||||
if (fifo_disable) {
|
||||
/* FIFO is disabled, so we can only use GPI DMA */
|
||||
|
||||
@@ -670,12 +670,39 @@ static const struct riic_of_data riic_rz_t2h_info = {
|
||||
|
||||
static int riic_i2c_suspend(struct device *dev)
|
||||
{
|
||||
struct riic_dev *riic = dev_get_drvdata(dev);
|
||||
int ret;
|
||||
/*
|
||||
* Some I2C devices may need the I2C controller to remain active
|
||||
* during resume_noirq() or suspend_noirq(). If the controller is
|
||||
* autosuspended, there is no way to wake it up once runtime PM is
|
||||
* disabled (in suspend_late()).
|
||||
*
|
||||
* During system resume, the I2C controller will be available only
|
||||
* after runtime PM is re-enabled (in resume_early()). However, this
|
||||
* may be too late for some devices.
|
||||
*
|
||||
* Wake up the controller in the suspend() callback while runtime PM
|
||||
* is still enabled. The I2C controller will remain available until
|
||||
* the suspend_noirq() callback (pm_runtime_force_suspend()) is
|
||||
* called. During resume, the I2C controller can be restored by the
|
||||
* resume_noirq() callback (pm_runtime_force_resume()).
|
||||
*
|
||||
* Finally, the resume() callback re-enables autosuspend, ensuring
|
||||
* the I2C controller remains available until the system enters
|
||||
* suspend_noirq() and from resume_noirq().
|
||||
*/
|
||||
return pm_runtime_resume_and_get(dev);
|
||||
}
|
||||
|
||||
ret = pm_runtime_resume_and_get(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
static int riic_i2c_resume(struct device *dev)
|
||||
{
|
||||
pm_runtime_put_autosuspend(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int riic_i2c_suspend_noirq(struct device *dev)
|
||||
{
|
||||
struct riic_dev *riic = dev_get_drvdata(dev);
|
||||
|
||||
i2c_mark_adapter_suspended(&riic->adapter);
|
||||
|
||||
@@ -683,12 +710,12 @@ static int riic_i2c_suspend(struct device *dev)
|
||||
riic_clear_set_bit(riic, ICCR1_ICE, 0, RIIC_ICCR1);
|
||||
|
||||
pm_runtime_mark_last_busy(dev);
|
||||
pm_runtime_put_sync(dev);
|
||||
pm_runtime_force_suspend(dev);
|
||||
|
||||
return reset_control_assert(riic->rstc);
|
||||
}
|
||||
|
||||
static int riic_i2c_resume(struct device *dev)
|
||||
static int riic_i2c_resume_noirq(struct device *dev)
|
||||
{
|
||||
struct riic_dev *riic = dev_get_drvdata(dev);
|
||||
int ret;
|
||||
@@ -697,6 +724,10 @@ static int riic_i2c_resume(struct device *dev)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = pm_runtime_force_resume(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = riic_init_hw(riic);
|
||||
if (ret) {
|
||||
/*
|
||||
@@ -714,6 +745,7 @@ static int riic_i2c_resume(struct device *dev)
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops riic_i2c_pm_ops = {
|
||||
NOIRQ_SYSTEM_SLEEP_PM_OPS(riic_i2c_suspend_noirq, riic_i2c_resume_noirq)
|
||||
SYSTEM_SLEEP_PM_OPS(riic_i2c_suspend, riic_i2c_resume)
|
||||
};
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
* Helpers for IOMMU drivers implementing SVA
|
||||
*/
|
||||
#include <linux/mmu_context.h>
|
||||
#include <linux/mmu_notifier.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include <linux/iommu.h>
|
||||
@@ -10,6 +11,8 @@
|
||||
#include "iommu-priv.h"
|
||||
|
||||
static DEFINE_MUTEX(iommu_sva_lock);
|
||||
static bool iommu_sva_present;
|
||||
static LIST_HEAD(iommu_sva_mms);
|
||||
static struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
|
||||
struct mm_struct *mm);
|
||||
|
||||
@@ -42,6 +45,7 @@ static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct de
|
||||
return ERR_PTR(-ENOSPC);
|
||||
}
|
||||
iommu_mm->pasid = pasid;
|
||||
iommu_mm->mm = mm;
|
||||
INIT_LIST_HEAD(&iommu_mm->sva_domains);
|
||||
/*
|
||||
* Make sure the write to mm->iommu_mm is not reordered in front of
|
||||
@@ -77,9 +81,6 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
|
||||
if (!group)
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
if (IS_ENABLED(CONFIG_X86))
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
||||
mutex_lock(&iommu_sva_lock);
|
||||
|
||||
/* Allocate mm->pasid if necessary. */
|
||||
@@ -135,8 +136,13 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
|
||||
if (ret)
|
||||
goto out_free_domain;
|
||||
domain->users = 1;
|
||||
list_add(&domain->next, &mm->iommu_mm->sva_domains);
|
||||
|
||||
if (list_empty(&iommu_mm->sva_domains)) {
|
||||
if (list_empty(&iommu_sva_mms))
|
||||
iommu_sva_present = true;
|
||||
list_add(&iommu_mm->mm_list_elm, &iommu_sva_mms);
|
||||
}
|
||||
list_add(&domain->next, &iommu_mm->sva_domains);
|
||||
out:
|
||||
refcount_set(&handle->users, 1);
|
||||
mutex_unlock(&iommu_sva_lock);
|
||||
@@ -178,6 +184,13 @@ void iommu_sva_unbind_device(struct iommu_sva *handle)
|
||||
list_del(&domain->next);
|
||||
iommu_domain_free(domain);
|
||||
}
|
||||
|
||||
if (list_empty(&iommu_mm->sva_domains)) {
|
||||
list_del(&iommu_mm->mm_list_elm);
|
||||
if (list_empty(&iommu_sva_mms))
|
||||
iommu_sva_present = false;
|
||||
}
|
||||
|
||||
mutex_unlock(&iommu_sva_lock);
|
||||
kfree(handle);
|
||||
}
|
||||
@@ -315,3 +328,15 @@ static struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
|
||||
|
||||
return domain;
|
||||
}
|
||||
|
||||
void iommu_sva_invalidate_kva_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
struct iommu_mm_data *iommu_mm;
|
||||
|
||||
guard(mutex)(&iommu_sva_lock);
|
||||
if (!iommu_sva_present)
|
||||
return;
|
||||
|
||||
list_for_each_entry(iommu_mm, &iommu_sva_mms, mm_list_elm)
|
||||
mmu_notifier_arch_invalidate_secondary_tlbs(iommu_mm->mm, start, end);
|
||||
}
|
||||
|
||||
@@ -310,7 +310,7 @@ static int ctucan_set_secondary_sample_point(struct net_device *ndev)
|
||||
}
|
||||
|
||||
ssp_cfg = FIELD_PREP(REG_TRV_DELAY_SSP_OFFSET, ssp_offset);
|
||||
ssp_cfg |= FIELD_PREP(REG_TRV_DELAY_SSP_SRC, 0x1);
|
||||
ssp_cfg |= FIELD_PREP(REG_TRV_DELAY_SSP_SRC, 0x0);
|
||||
}
|
||||
|
||||
ctucan_write32(priv, CTUCANFD_TRV_DELAY, ssp_cfg);
|
||||
|
||||
@@ -1736,7 +1736,7 @@ static int es58x_alloc_rx_urbs(struct es58x_device *es58x_dev)
|
||||
dev_dbg(dev, "%s: Allocated %d rx URBs each of size %u\n",
|
||||
__func__, i, rx_buf_len);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -751,6 +751,8 @@ resubmit_urb:
|
||||
hf, parent->hf_size_rx,
|
||||
gs_usb_receive_bulk_callback, parent);
|
||||
|
||||
usb_anchor_urb(urb, &parent->rx_submitted);
|
||||
|
||||
rc = usb_submit_urb(urb, GFP_ATOMIC);
|
||||
|
||||
/* USB failure take down all interfaces */
|
||||
|
||||
@@ -218,7 +218,7 @@ static int octep_vf_request_irqs(struct octep_vf_device *oct)
|
||||
ioq_irq_err:
|
||||
while (i) {
|
||||
--i;
|
||||
free_irq(oct->msix_entries[i].vector, oct);
|
||||
free_irq(oct->msix_entries[i].vector, oct->ioq_vector[i]);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -963,7 +963,7 @@ struct mlx5e_priv {
|
||||
};
|
||||
|
||||
struct mlx5e_dev {
|
||||
struct mlx5e_priv *priv;
|
||||
struct net_device *netdev;
|
||||
struct devlink_port dl_port;
|
||||
};
|
||||
|
||||
@@ -1238,10 +1238,13 @@ struct net_device *
|
||||
mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile);
|
||||
int mlx5e_attach_netdev(struct mlx5e_priv *priv);
|
||||
void mlx5e_detach_netdev(struct mlx5e_priv *priv);
|
||||
void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
|
||||
int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
|
||||
const struct mlx5e_profile *new_profile, void *new_ppriv);
|
||||
void mlx5e_netdev_attach_nic_profile(struct mlx5e_priv *priv);
|
||||
void mlx5e_destroy_netdev(struct net_device *netdev);
|
||||
int mlx5e_netdev_change_profile(struct net_device *netdev,
|
||||
struct mlx5_core_dev *mdev,
|
||||
const struct mlx5e_profile *new_profile,
|
||||
void *new_ppriv);
|
||||
void mlx5e_netdev_attach_nic_profile(struct net_device *netdev,
|
||||
struct mlx5_core_dev *mdev);
|
||||
void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv);
|
||||
void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu);
|
||||
|
||||
|
||||
@@ -6305,6 +6305,7 @@ err_free_cpumask:
|
||||
|
||||
void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
|
||||
{
|
||||
bool destroying = test_bit(MLX5E_STATE_DESTROYING, &priv->state);
|
||||
int i;
|
||||
|
||||
/* bail if change profile failed and also rollback failed */
|
||||
@@ -6332,6 +6333,8 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
|
||||
}
|
||||
|
||||
memset(priv, 0, sizeof(*priv));
|
||||
if (destroying) /* restore destroying bit, to allow unload */
|
||||
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
|
||||
}
|
||||
|
||||
static unsigned int mlx5e_get_max_num_txqs(struct mlx5_core_dev *mdev,
|
||||
@@ -6564,19 +6567,28 @@ profile_cleanup:
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
|
||||
const struct mlx5e_profile *new_profile, void *new_ppriv)
|
||||
int mlx5e_netdev_change_profile(struct net_device *netdev,
|
||||
struct mlx5_core_dev *mdev,
|
||||
const struct mlx5e_profile *new_profile,
|
||||
void *new_ppriv)
|
||||
{
|
||||
const struct mlx5e_profile *orig_profile = priv->profile;
|
||||
struct net_device *netdev = priv->netdev;
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
void *orig_ppriv = priv->ppriv;
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
const struct mlx5e_profile *orig_profile;
|
||||
int err, rollback_err;
|
||||
void *orig_ppriv;
|
||||
|
||||
/* cleanup old profile */
|
||||
mlx5e_detach_netdev(priv);
|
||||
priv->profile->cleanup(priv);
|
||||
mlx5e_priv_cleanup(priv);
|
||||
orig_profile = priv->profile;
|
||||
orig_ppriv = priv->ppriv;
|
||||
|
||||
/* NULL could happen if previous change_profile failed to rollback */
|
||||
if (priv->profile) {
|
||||
WARN_ON_ONCE(priv->mdev != mdev);
|
||||
/* cleanup old profile */
|
||||
mlx5e_detach_netdev(priv);
|
||||
priv->profile->cleanup(priv);
|
||||
mlx5e_priv_cleanup(priv);
|
||||
}
|
||||
/* priv members are not valid from this point ... */
|
||||
|
||||
if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
|
||||
mlx5e_netdev_init_profile(netdev, mdev, new_profile, new_ppriv);
|
||||
@@ -6593,23 +6605,33 @@ int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
|
||||
return 0;
|
||||
|
||||
rollback:
|
||||
if (!orig_profile) {
|
||||
netdev_warn(netdev, "no original profile to rollback to\n");
|
||||
priv->profile = NULL;
|
||||
return err;
|
||||
}
|
||||
|
||||
rollback_err = mlx5e_netdev_attach_profile(netdev, mdev, orig_profile, orig_ppriv);
|
||||
if (rollback_err)
|
||||
netdev_err(netdev, "%s: failed to rollback to orig profile, %d\n",
|
||||
__func__, rollback_err);
|
||||
if (rollback_err) {
|
||||
netdev_err(netdev, "failed to rollback to orig profile, %d\n",
|
||||
rollback_err);
|
||||
priv->profile = NULL;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
void mlx5e_netdev_attach_nic_profile(struct mlx5e_priv *priv)
|
||||
void mlx5e_netdev_attach_nic_profile(struct net_device *netdev,
|
||||
struct mlx5_core_dev *mdev)
|
||||
{
|
||||
mlx5e_netdev_change_profile(priv, &mlx5e_nic_profile, NULL);
|
||||
mlx5e_netdev_change_profile(netdev, mdev, &mlx5e_nic_profile, NULL);
|
||||
}
|
||||
|
||||
void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
|
||||
void mlx5e_destroy_netdev(struct net_device *netdev)
|
||||
{
|
||||
struct net_device *netdev = priv->netdev;
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
|
||||
mlx5e_priv_cleanup(priv);
|
||||
if (priv->profile)
|
||||
mlx5e_priv_cleanup(priv);
|
||||
free_netdev(netdev);
|
||||
}
|
||||
|
||||
@@ -6617,8 +6639,8 @@ static int _mlx5e_resume(struct auxiliary_device *adev)
|
||||
{
|
||||
struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
|
||||
struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
|
||||
struct mlx5e_priv *priv = mlx5e_dev->priv;
|
||||
struct net_device *netdev = priv->netdev;
|
||||
struct mlx5e_priv *priv = netdev_priv(mlx5e_dev->netdev);
|
||||
struct net_device *netdev = mlx5e_dev->netdev;
|
||||
struct mlx5_core_dev *mdev = edev->mdev;
|
||||
struct mlx5_core_dev *pos, *to;
|
||||
int err, i;
|
||||
@@ -6664,10 +6686,11 @@ static int mlx5e_resume(struct auxiliary_device *adev)
|
||||
|
||||
static int _mlx5e_suspend(struct auxiliary_device *adev, bool pre_netdev_reg)
|
||||
{
|
||||
struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
|
||||
struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
|
||||
struct mlx5e_priv *priv = mlx5e_dev->priv;
|
||||
struct net_device *netdev = priv->netdev;
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
struct mlx5e_priv *priv = netdev_priv(mlx5e_dev->netdev);
|
||||
struct net_device *netdev = mlx5e_dev->netdev;
|
||||
struct mlx5_core_dev *mdev = edev->mdev;
|
||||
struct mlx5_core_dev *pos;
|
||||
int i;
|
||||
|
||||
@@ -6728,11 +6751,11 @@ static int _mlx5e_probe(struct auxiliary_device *adev)
|
||||
goto err_devlink_port_unregister;
|
||||
}
|
||||
SET_NETDEV_DEVLINK_PORT(netdev, &mlx5e_dev->dl_port);
|
||||
mlx5e_dev->netdev = netdev;
|
||||
|
||||
mlx5e_build_nic_netdev(netdev);
|
||||
|
||||
priv = netdev_priv(netdev);
|
||||
mlx5e_dev->priv = priv;
|
||||
|
||||
priv->profile = profile;
|
||||
priv->ppriv = NULL;
|
||||
@@ -6765,7 +6788,7 @@ err_resume:
|
||||
err_profile_cleanup:
|
||||
profile->cleanup(priv);
|
||||
err_destroy_netdev:
|
||||
mlx5e_destroy_netdev(priv);
|
||||
mlx5e_destroy_netdev(netdev);
|
||||
err_devlink_port_unregister:
|
||||
mlx5e_devlink_port_unregister(mlx5e_dev);
|
||||
err_devlink_unregister:
|
||||
@@ -6795,17 +6818,20 @@ static void _mlx5e_remove(struct auxiliary_device *adev)
|
||||
{
|
||||
struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
|
||||
struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
|
||||
struct mlx5e_priv *priv = mlx5e_dev->priv;
|
||||
struct net_device *netdev = mlx5e_dev->netdev;
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
struct mlx5_core_dev *mdev = edev->mdev;
|
||||
|
||||
mlx5_core_uplink_netdev_set(mdev, NULL);
|
||||
mlx5e_dcbnl_delete_app(priv);
|
||||
|
||||
if (priv->profile)
|
||||
mlx5e_dcbnl_delete_app(priv);
|
||||
/* When unload driver, the netdev is in registered state
|
||||
* if it's from legacy mode. If from switchdev mode, it
|
||||
* is already unregistered before changing to NIC profile.
|
||||
*/
|
||||
if (priv->netdev->reg_state == NETREG_REGISTERED) {
|
||||
unregister_netdev(priv->netdev);
|
||||
if (netdev->reg_state == NETREG_REGISTERED) {
|
||||
unregister_netdev(netdev);
|
||||
_mlx5e_suspend(adev, false);
|
||||
} else {
|
||||
struct mlx5_core_dev *pos;
|
||||
@@ -6820,7 +6846,7 @@ static void _mlx5e_remove(struct auxiliary_device *adev)
|
||||
/* Avoid cleanup if profile rollback failed. */
|
||||
if (priv->profile)
|
||||
priv->profile->cleanup(priv);
|
||||
mlx5e_destroy_netdev(priv);
|
||||
mlx5e_destroy_netdev(netdev);
|
||||
mlx5e_devlink_port_unregister(mlx5e_dev);
|
||||
mlx5e_destroy_devlink(mlx5e_dev);
|
||||
}
|
||||
|
||||
@@ -1508,17 +1508,16 @@ mlx5e_vport_uplink_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *
|
||||
{
|
||||
struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
|
||||
struct net_device *netdev;
|
||||
struct mlx5e_priv *priv;
|
||||
int err;
|
||||
|
||||
netdev = mlx5_uplink_netdev_get(dev);
|
||||
if (!netdev)
|
||||
return 0;
|
||||
|
||||
priv = netdev_priv(netdev);
|
||||
rpriv->netdev = priv->netdev;
|
||||
err = mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile,
|
||||
rpriv);
|
||||
/* must not use netdev_priv(netdev), it might not be initialized yet */
|
||||
rpriv->netdev = netdev;
|
||||
err = mlx5e_netdev_change_profile(netdev, dev,
|
||||
&mlx5e_uplink_rep_profile, rpriv);
|
||||
mlx5_uplink_netdev_put(dev, netdev);
|
||||
return err;
|
||||
}
|
||||
@@ -1546,7 +1545,7 @@ mlx5e_vport_uplink_rep_unload(struct mlx5e_rep_priv *rpriv)
|
||||
if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_SWITCH_LEGACY))
|
||||
unregister_netdev(netdev);
|
||||
|
||||
mlx5e_netdev_attach_nic_profile(priv);
|
||||
mlx5e_netdev_attach_nic_profile(netdev, priv->mdev);
|
||||
}
|
||||
|
||||
static int
|
||||
@@ -1612,7 +1611,7 @@ err_cleanup_profile:
|
||||
priv->profile->cleanup(priv);
|
||||
|
||||
err_destroy_netdev:
|
||||
mlx5e_destroy_netdev(netdev_priv(netdev));
|
||||
mlx5e_destroy_netdev(netdev);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -1667,7 +1666,7 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
|
||||
mlx5e_rep_vnic_reporter_destroy(priv);
|
||||
mlx5e_detach_netdev(priv);
|
||||
priv->profile->cleanup(priv);
|
||||
mlx5e_destroy_netdev(priv);
|
||||
mlx5e_destroy_netdev(netdev);
|
||||
free_ppriv:
|
||||
kvfree(ppriv); /* mlx5e_rep_priv */
|
||||
}
|
||||
|
||||
@@ -1757,6 +1757,9 @@ static int netvsc_set_rxfh(struct net_device *dev,
|
||||
rxfh->hfunc != ETH_RSS_HASH_TOP)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!ndc->rx_table_sz)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
rndis_dev = ndev->extension;
|
||||
if (rxfh->indir) {
|
||||
for (i = 0; i < ndc->rx_table_sz; i++)
|
||||
|
||||
@@ -59,7 +59,7 @@ struct macvlan_port {
|
||||
|
||||
struct macvlan_source_entry {
|
||||
struct hlist_node hlist;
|
||||
struct macvlan_dev *vlan;
|
||||
struct macvlan_dev __rcu *vlan;
|
||||
unsigned char addr[6+2] __aligned(sizeof(u16));
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
@@ -146,7 +146,7 @@ static struct macvlan_source_entry *macvlan_hash_lookup_source(
|
||||
|
||||
hlist_for_each_entry_rcu(entry, h, hlist, lockdep_rtnl_is_held()) {
|
||||
if (ether_addr_equal_64bits(entry->addr, addr) &&
|
||||
entry->vlan == vlan)
|
||||
rcu_access_pointer(entry->vlan) == vlan)
|
||||
return entry;
|
||||
}
|
||||
return NULL;
|
||||
@@ -168,7 +168,7 @@ static int macvlan_hash_add_source(struct macvlan_dev *vlan,
|
||||
return -ENOMEM;
|
||||
|
||||
ether_addr_copy(entry->addr, addr);
|
||||
entry->vlan = vlan;
|
||||
RCU_INIT_POINTER(entry->vlan, vlan);
|
||||
h = &port->vlan_source_hash[macvlan_eth_hash(addr)];
|
||||
hlist_add_head_rcu(&entry->hlist, h);
|
||||
vlan->macaddr_count++;
|
||||
@@ -187,6 +187,7 @@ static void macvlan_hash_add(struct macvlan_dev *vlan)
|
||||
|
||||
static void macvlan_hash_del_source(struct macvlan_source_entry *entry)
|
||||
{
|
||||
RCU_INIT_POINTER(entry->vlan, NULL);
|
||||
hlist_del_rcu(&entry->hlist);
|
||||
kfree_rcu(entry, rcu);
|
||||
}
|
||||
@@ -390,7 +391,7 @@ static void macvlan_flush_sources(struct macvlan_port *port,
|
||||
int i;
|
||||
|
||||
hash_for_each_safe(port->vlan_source_hash, i, next, entry, hlist)
|
||||
if (entry->vlan == vlan)
|
||||
if (rcu_access_pointer(entry->vlan) == vlan)
|
||||
macvlan_hash_del_source(entry);
|
||||
|
||||
vlan->macaddr_count = 0;
|
||||
@@ -433,9 +434,14 @@ static bool macvlan_forward_source(struct sk_buff *skb,
|
||||
|
||||
hlist_for_each_entry_rcu(entry, h, hlist) {
|
||||
if (ether_addr_equal_64bits(entry->addr, addr)) {
|
||||
if (entry->vlan->flags & MACVLAN_FLAG_NODST)
|
||||
struct macvlan_dev *vlan = rcu_dereference(entry->vlan);
|
||||
|
||||
if (!vlan)
|
||||
continue;
|
||||
|
||||
if (vlan->flags & MACVLAN_FLAG_NODST)
|
||||
consume = true;
|
||||
macvlan_forward_source_one(skb, entry->vlan);
|
||||
macvlan_forward_source_one(skb, vlan);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1680,7 +1686,7 @@ static int macvlan_fill_info_macaddr(struct sk_buff *skb,
|
||||
struct macvlan_source_entry *entry;
|
||||
|
||||
hlist_for_each_entry_rcu(entry, h, hlist, lockdep_rtnl_is_held()) {
|
||||
if (entry->vlan != vlan)
|
||||
if (rcu_access_pointer(entry->vlan) != vlan)
|
||||
continue;
|
||||
if (nla_put(skb, IFLA_MACVLAN_MACADDR, ETH_ALEN, entry->addr))
|
||||
return 1;
|
||||
|
||||
@@ -1741,10 +1741,10 @@ static int yt8521_led_hw_control_set(struct phy_device *phydev, u8 index,
|
||||
val |= YT8521_LED_1000_ON_EN;
|
||||
|
||||
if (test_bit(TRIGGER_NETDEV_FULL_DUPLEX, &rules))
|
||||
val |= YT8521_LED_HDX_ON_EN;
|
||||
val |= YT8521_LED_FDX_ON_EN;
|
||||
|
||||
if (test_bit(TRIGGER_NETDEV_HALF_DUPLEX, &rules))
|
||||
val |= YT8521_LED_FDX_ON_EN;
|
||||
val |= YT8521_LED_HDX_ON_EN;
|
||||
|
||||
if (test_bit(TRIGGER_NETDEV_TX, &rules) ||
|
||||
test_bit(TRIGGER_NETDEV_RX, &rules))
|
||||
|
||||
@@ -425,9 +425,6 @@ struct virtnet_info {
|
||||
u16 rss_indir_table_size;
|
||||
u32 rss_hash_types_supported;
|
||||
u32 rss_hash_types_saved;
|
||||
struct virtio_net_rss_config_hdr *rss_hdr;
|
||||
struct virtio_net_rss_config_trailer rss_trailer;
|
||||
u8 rss_hash_key_data[VIRTIO_NET_RSS_MAX_KEY_SIZE];
|
||||
|
||||
/* Has control virtqueue */
|
||||
bool has_cvq;
|
||||
@@ -493,7 +490,16 @@ struct virtnet_info {
|
||||
struct failover *failover;
|
||||
|
||||
u64 device_stats_cap;
|
||||
|
||||
struct virtio_net_rss_config_hdr *rss_hdr;
|
||||
|
||||
/* Must be last as it ends in a flexible-array member. */
|
||||
TRAILING_OVERLAP(struct virtio_net_rss_config_trailer, rss_trailer, hash_key_data,
|
||||
u8 rss_hash_key_data[VIRTIO_NET_RSS_MAX_KEY_SIZE];
|
||||
);
|
||||
};
|
||||
static_assert(offsetof(struct virtnet_info, rss_trailer.hash_key_data) ==
|
||||
offsetof(struct virtnet_info, rss_hash_key_data));
|
||||
|
||||
struct padded_vnet_hdr {
|
||||
struct virtio_net_hdr_v1_hash hdr;
|
||||
@@ -3031,16 +3037,16 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
|
||||
else
|
||||
packets = virtnet_receive_packets(vi, rq, budget, xdp_xmit, &stats);
|
||||
|
||||
u64_stats_set(&stats.packets, packets);
|
||||
if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
|
||||
if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
|
||||
spin_lock(&vi->refill_lock);
|
||||
if (vi->refill_enabled)
|
||||
schedule_delayed_work(&vi->refill, 0);
|
||||
spin_unlock(&vi->refill_lock);
|
||||
}
|
||||
if (!try_fill_recv(vi, rq, GFP_ATOMIC))
|
||||
/* We need to retry refilling in the next NAPI poll so
|
||||
* we must return budget to make sure the NAPI is
|
||||
* repolled.
|
||||
*/
|
||||
packets = budget;
|
||||
}
|
||||
|
||||
u64_stats_set(&stats.packets, packets);
|
||||
u64_stats_update_begin(&rq->stats.syncp);
|
||||
for (i = 0; i < ARRAY_SIZE(virtnet_rq_stats_desc); i++) {
|
||||
size_t offset = virtnet_rq_stats_desc[i].offset;
|
||||
@@ -3220,9 +3226,10 @@ static int virtnet_open(struct net_device *dev)
|
||||
|
||||
for (i = 0; i < vi->max_queue_pairs; i++) {
|
||||
if (i < vi->curr_queue_pairs)
|
||||
/* Make sure we have some buffers: if oom use wq. */
|
||||
if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
|
||||
schedule_delayed_work(&vi->refill, 0);
|
||||
/* Pre-fill rq agressively, to make sure we are ready to
|
||||
* get packets immediately.
|
||||
*/
|
||||
try_fill_recv(vi, &vi->rq[i], GFP_KERNEL);
|
||||
|
||||
err = virtnet_enable_queue_pair(vi, i);
|
||||
if (err < 0)
|
||||
@@ -3467,16 +3474,15 @@ static void __virtnet_rx_resume(struct virtnet_info *vi,
|
||||
struct receive_queue *rq,
|
||||
bool refill)
|
||||
{
|
||||
bool running = netif_running(vi->dev);
|
||||
bool schedule_refill = false;
|
||||
if (netif_running(vi->dev)) {
|
||||
/* Pre-fill rq agressively, to make sure we are ready to get
|
||||
* packets immediately.
|
||||
*/
|
||||
if (refill)
|
||||
try_fill_recv(vi, rq, GFP_KERNEL);
|
||||
|
||||
if (refill && !try_fill_recv(vi, rq, GFP_KERNEL))
|
||||
schedule_refill = true;
|
||||
if (running)
|
||||
virtnet_napi_enable(rq);
|
||||
|
||||
if (schedule_refill)
|
||||
schedule_delayed_work(&vi->refill, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void virtnet_rx_resume_all(struct virtnet_info *vi)
|
||||
@@ -3821,11 +3827,12 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
|
||||
}
|
||||
succ:
|
||||
vi->curr_queue_pairs = queue_pairs;
|
||||
/* virtnet_open() will refill when device is going to up. */
|
||||
spin_lock_bh(&vi->refill_lock);
|
||||
if (dev->flags & IFF_UP && vi->refill_enabled)
|
||||
schedule_delayed_work(&vi->refill, 0);
|
||||
spin_unlock_bh(&vi->refill_lock);
|
||||
if (dev->flags & IFF_UP) {
|
||||
local_bh_disable();
|
||||
for (int i = 0; i < vi->curr_queue_pairs; ++i)
|
||||
virtqueue_napi_schedule(&vi->rq[i].napi, vi->rq[i].vq);
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1703,6 +1703,7 @@ static const struct apple_nvme_hw apple_nvme_t8103_hw = {
|
||||
|
||||
static const struct of_device_id apple_nvme_of_match[] = {
|
||||
{ .compatible = "apple,t8015-nvme-ans2", .data = &apple_nvme_t8015_hw },
|
||||
{ .compatible = "apple,t8103-nvme-ans2", .data = &apple_nvme_t8103_hw },
|
||||
{ .compatible = "apple,nvme-ans2", .data = &apple_nvme_t8103_hw },
|
||||
{},
|
||||
};
|
||||
|
||||
@@ -1461,7 +1461,10 @@ static int nvme_pci_subsystem_reset(struct nvme_ctrl *ctrl)
|
||||
}
|
||||
|
||||
writel(NVME_SUBSYS_RESET, dev->bar + NVME_REG_NSSR);
|
||||
nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE);
|
||||
|
||||
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING) ||
|
||||
!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE))
|
||||
goto unlock;
|
||||
|
||||
/*
|
||||
* Read controller status to flush the previous write and trigger a
|
||||
@@ -3917,6 +3920,8 @@ static const struct pci_device_id nvme_id_table[] = {
|
||||
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
|
||||
{ PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */
|
||||
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
|
||||
{ PCI_DEVICE(0x1fa0, 0x2283), /* Wodposit WPBSNM8-256GTP */
|
||||
.driver_data = NVME_QUIRK_NO_SECONDARY_TEMP_THRESH, },
|
||||
{ PCI_DEVICE(0x025e, 0xf1ac), /* SOLIDIGM P44 pro SSDPFKKW020X7 */
|
||||
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
|
||||
{ PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */
|
||||
|
||||
@@ -982,6 +982,18 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
|
||||
pr_err("H2CData PDU len %u is invalid\n", cmd->pdu_len);
|
||||
goto err_proto;
|
||||
}
|
||||
/*
|
||||
* Ensure command data structures are initialized. We must check both
|
||||
* cmd->req.sg and cmd->iov because they can have different NULL states:
|
||||
* - Uninitialized commands: both NULL
|
||||
* - READ commands: cmd->req.sg allocated, cmd->iov NULL
|
||||
* - WRITE commands: both allocated
|
||||
*/
|
||||
if (unlikely(!cmd->req.sg || !cmd->iov)) {
|
||||
pr_err("queue %d: H2CData PDU received for invalid command state (ttag %u)\n",
|
||||
queue->idx, data->ttag);
|
||||
goto err_proto;
|
||||
}
|
||||
cmd->pdu_recv = 0;
|
||||
nvmet_tcp_build_pdu_iovec(cmd);
|
||||
queue->cmd = cmd;
|
||||
|
||||
@@ -207,12 +207,6 @@ config PCI_P2PDMA
|
||||
P2P DMA transactions must be between devices behind the same root
|
||||
port.
|
||||
|
||||
Enabling this option will reduce the entropy of x86 KASLR memory
|
||||
regions. For example - on a 46 bit system, the entropy goes down
|
||||
from 16 bits to 15 bits. The actual reduction in entropy depends
|
||||
on the physical address bits, on processor features, kernel config
|
||||
(5 level page table) and physical memory present on the system.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config PCI_LABEL
|
||||
|
||||
@@ -203,7 +203,7 @@ static int bcm_ns_usb3_mdio_probe(struct mdio_device *mdiodev)
|
||||
usb3->dev = dev;
|
||||
usb3->mdiodev = mdiodev;
|
||||
|
||||
usb3->family = (enum bcm_ns_family)device_get_match_data(dev);
|
||||
usb3->family = (unsigned long)device_get_match_data(dev);
|
||||
|
||||
syscon_np = of_parse_phandle(dev->of_node, "usb3-dmp-syscon", 0);
|
||||
err = of_address_to_resource(syscon_np, 0, &res);
|
||||
|
||||
@@ -89,7 +89,8 @@ static int imx8_pcie_phy_power_on(struct phy *phy)
|
||||
writel(imx8_phy->tx_deemph_gen2,
|
||||
imx8_phy->base + PCIE_PHY_TRSV_REG6);
|
||||
break;
|
||||
case IMX8MP: /* Do nothing. */
|
||||
case IMX8MP:
|
||||
reset_control_assert(imx8_phy->reset);
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
@@ -124,8 +124,6 @@ struct imx8mq_usb_phy {
|
||||
static void tca_blk_orientation_set(struct tca_blk *tca,
|
||||
enum typec_orientation orientation);
|
||||
|
||||
#ifdef CONFIG_TYPEC
|
||||
|
||||
static int tca_blk_typec_switch_set(struct typec_switch_dev *sw,
|
||||
enum typec_orientation orientation)
|
||||
{
|
||||
@@ -173,18 +171,6 @@ static void tca_blk_put_typec_switch(struct typec_switch_dev *sw)
|
||||
typec_switch_unregister(sw);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static struct typec_switch_dev *tca_blk_get_typec_switch(struct platform_device *pdev,
|
||||
struct imx8mq_usb_phy *imx_phy)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void tca_blk_put_typec_switch(struct typec_switch_dev *sw) {}
|
||||
|
||||
#endif /* CONFIG_TYPEC */
|
||||
|
||||
static void tca_blk_orientation_set(struct tca_blk *tca,
|
||||
enum typec_orientation orientation)
|
||||
{
|
||||
@@ -502,6 +488,7 @@ static void imx8m_phy_tune(struct imx8mq_usb_phy *imx_phy)
|
||||
|
||||
if (imx_phy->pcs_tx_swing_full != PHY_TUNE_DEFAULT) {
|
||||
value = readl(imx_phy->base + PHY_CTRL5);
|
||||
value &= ~PHY_CTRL5_PCS_TX_SWING_FULL_MASK;
|
||||
value |= FIELD_PREP(PHY_CTRL5_PCS_TX_SWING_FULL_MASK,
|
||||
imx_phy->pcs_tx_swing_full);
|
||||
writel(value, imx_phy->base + PHY_CTRL5);
|
||||
|
||||
@@ -1093,29 +1093,29 @@ static int qusb2_phy_probe(struct platform_device *pdev)
|
||||
or->hsdisc_trim.override = true;
|
||||
}
|
||||
|
||||
pm_runtime_set_active(dev);
|
||||
pm_runtime_enable(dev);
|
||||
dev_set_drvdata(dev, qphy);
|
||||
|
||||
/*
|
||||
* Prevent runtime pm from being ON by default. Users can enable
|
||||
* it using power/control in sysfs.
|
||||
* Enable runtime PM support, but forbid it by default.
|
||||
* Users can allow it again via the power/control attribute in sysfs.
|
||||
*/
|
||||
pm_runtime_set_active(dev);
|
||||
pm_runtime_forbid(dev);
|
||||
ret = devm_pm_runtime_enable(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
generic_phy = devm_phy_create(dev, NULL, &qusb2_phy_gen_ops);
|
||||
if (IS_ERR(generic_phy)) {
|
||||
ret = PTR_ERR(generic_phy);
|
||||
dev_err(dev, "failed to create phy, %d\n", ret);
|
||||
pm_runtime_disable(dev);
|
||||
return ret;
|
||||
}
|
||||
qphy->phy = generic_phy;
|
||||
|
||||
dev_set_drvdata(dev, qphy);
|
||||
phy_set_drvdata(generic_phy, qphy);
|
||||
|
||||
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
|
||||
if (IS_ERR(phy_provider))
|
||||
pm_runtime_disable(dev);
|
||||
|
||||
return PTR_ERR_OR_ZERO(phy_provider);
|
||||
}
|
||||
|
||||
@@ -821,17 +821,20 @@ static void rockchip_chg_detect_work(struct work_struct *work)
|
||||
container_of(work, struct rockchip_usb2phy_port, chg_work.work);
|
||||
struct rockchip_usb2phy *rphy = dev_get_drvdata(rport->phy->dev.parent);
|
||||
struct regmap *base = get_reg_base(rphy);
|
||||
bool is_dcd, tmout, vout;
|
||||
bool is_dcd, tmout, vout, vbus_attach;
|
||||
unsigned long delay;
|
||||
|
||||
vbus_attach = property_enabled(rphy->grf, &rport->port_cfg->utmi_bvalid);
|
||||
|
||||
dev_dbg(&rport->phy->dev, "chg detection work state = %d\n",
|
||||
rphy->chg_state);
|
||||
switch (rphy->chg_state) {
|
||||
case USB_CHG_STATE_UNDEFINED:
|
||||
if (!rport->suspended)
|
||||
if (!rport->suspended && !vbus_attach)
|
||||
rockchip_usb2phy_power_off(rport->phy);
|
||||
/* put the controller in non-driving mode */
|
||||
property_enable(base, &rphy->phy_cfg->chg_det.opmode, false);
|
||||
if (!vbus_attach)
|
||||
property_enable(base, &rphy->phy_cfg->chg_det.opmode, false);
|
||||
/* Start DCD processing stage 1 */
|
||||
rockchip_chg_enable_dcd(rphy, true);
|
||||
rphy->chg_state = USB_CHG_STATE_WAIT_FOR_DCD;
|
||||
@@ -894,7 +897,8 @@ static void rockchip_chg_detect_work(struct work_struct *work)
|
||||
fallthrough;
|
||||
case USB_CHG_STATE_DETECTED:
|
||||
/* put the controller in normal mode */
|
||||
property_enable(base, &rphy->phy_cfg->chg_det.opmode, true);
|
||||
if (!vbus_attach)
|
||||
property_enable(base, &rphy->phy_cfg->chg_det.opmode, true);
|
||||
rockchip_usb2phy_otg_sm_work(&rport->otg_sm_work.work);
|
||||
dev_dbg(&rport->phy->dev, "charger = %s\n",
|
||||
chg_to_string(rphy->chg_type));
|
||||
@@ -1491,7 +1495,7 @@ next_child:
|
||||
rphy);
|
||||
if (ret) {
|
||||
dev_err_probe(rphy->dev, ret, "failed to request usb2phy irq handle\n");
|
||||
goto put_child;
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -712,7 +712,7 @@ static int stm32_usbphyc_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
ret = of_property_read_u32(child, "reg", &index);
|
||||
if (ret || index > usbphyc->nphys) {
|
||||
if (ret || index >= usbphyc->nphys) {
|
||||
dev_err(&phy->dev, "invalid reg property: %d\n", ret);
|
||||
if (!ret)
|
||||
ret = -EINVAL;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user