updating to mainline 4.12.6

This commit is contained in:
Jake Day 2017-08-12 17:43:06 -04:00
parent ebfef21d22
commit bf5982449c
112 changed files with 952 additions and 629 deletions

View file

@ -1,6 +1,6 @@
VERSION = 4 VERSION = 4
PATCHLEVEL = 12 PATCHLEVEL = 12
SUBLEVEL = 5 SUBLEVEL = 6
EXTRAVERSION = EXTRAVERSION =
NAME = Fearless Coyote NAME = Fearless Coyote

View file

@ -75,7 +75,7 @@
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&pca0_pins>; pinctrl-0 = <&pca0_pins>;
interrupt-parent = <&gpio0>; interrupt-parent = <&gpio0>;
interrupts = <18 IRQ_TYPE_EDGE_FALLING>; interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
gpio-controller; gpio-controller;
#gpio-cells = <2>; #gpio-cells = <2>;
interrupt-controller; interrupt-controller;
@ -87,7 +87,7 @@
compatible = "nxp,pca9555"; compatible = "nxp,pca9555";
pinctrl-names = "default"; pinctrl-names = "default";
interrupt-parent = <&gpio0>; interrupt-parent = <&gpio0>;
interrupts = <18 IRQ_TYPE_EDGE_FALLING>; interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
gpio-controller; gpio-controller;
#gpio-cells = <2>; #gpio-cells = <2>;
interrupt-controller; interrupt-controller;

View file

@ -22,7 +22,7 @@
}; };
&eth0 { &eth0 {
phy-connection-type = "rgmii"; phy-connection-type = "rgmii-id";
phy-handle = <&eth0_phy>; phy-handle = <&eth0_phy>;
#address-cells = <1>; #address-cells = <1>;
#size-cells = <0>; #size-cells = <0>;

View file

@ -211,7 +211,7 @@ static int mv98dx3236_resume_set_cpu_boot_addr(int hw_cpu, void *boot_addr)
return PTR_ERR(base); return PTR_ERR(base);
writel(0, base + MV98DX3236_CPU_RESUME_CTRL_REG); writel(0, base + MV98DX3236_CPU_RESUME_CTRL_REG);
writel(virt_to_phys(boot_addr), base + MV98DX3236_CPU_RESUME_ADDR_REG); writel(__pa_symbol(boot_addr), base + MV98DX3236_CPU_RESUME_ADDR_REG);
iounmap(base); iounmap(base);

View file

@ -219,7 +219,7 @@
reg = <0x18800 0x100>, <0x18C00 0x20>; reg = <0x18800 0x100>, <0x18C00 0x20>;
gpiosb: gpio { gpiosb: gpio {
#gpio-cells = <2>; #gpio-cells = <2>;
gpio-ranges = <&pinctrl_sb 0 0 29>; gpio-ranges = <&pinctrl_sb 0 0 30>;
gpio-controller; gpio-controller;
interrupts = interrupts =
<GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,

View file

@ -13,6 +13,8 @@
#ifndef _RALINK_REGS_H_ #ifndef _RALINK_REGS_H_
#define _RALINK_REGS_H_ #define _RALINK_REGS_H_
#include <linux/io.h>
enum ralink_soc_type { enum ralink_soc_type {
RALINK_UNKNOWN = 0, RALINK_UNKNOWN = 0,
RT2880_SOC, RT2880_SOC,

View file

@ -34,7 +34,7 @@ struct thread_info {
/* thread information allocation */ /* thread information allocation */
#define THREAD_SIZE_ORDER 2 /* PA-RISC requires at least 16k stack */ #define THREAD_SIZE_ORDER 3 /* PA-RISC requires at least 32k stack */
/* Be sure to hunt all references to this down when you change the size of /* Be sure to hunt all references to this down when you change the size of
* the kernel stack */ * the kernel stack */
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)

View file

@ -587,13 +587,12 @@ void flush_cache_range(struct vm_area_struct *vma,
if (parisc_requires_coherency()) if (parisc_requires_coherency())
flush_tlb_range(vma, start, end); flush_tlb_range(vma, start, end);
if ((end - start) >= parisc_cache_flush_threshold) { if ((end - start) >= parisc_cache_flush_threshold
|| vma->vm_mm->context != mfsp(3)) {
flush_cache_all(); flush_cache_all();
return; return;
} }
BUG_ON(vma->vm_mm->context != mfsp(3));
flush_user_dcache_range_asm(start, end); flush_user_dcache_range_asm(start, end);
if (vma->vm_flags & VM_EXEC) if (vma->vm_flags & VM_EXEC)
flush_user_icache_range_asm(start, end); flush_user_icache_range_asm(start, end);

View file

@ -380,7 +380,7 @@ static inline int eirr_to_irq(unsigned long eirr)
/* /*
* IRQ STACK - used for irq handler * IRQ STACK - used for irq handler
*/ */
#define IRQ_STACK_SIZE (4096 << 2) /* 16k irq stack size */ #define IRQ_STACK_SIZE (4096 << 3) /* 32k irq stack size */
union irq_stack_union { union irq_stack_union {
unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)]; unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];

View file

@ -145,6 +145,19 @@ notrace unsigned int __check_irq_replay(void)
/* Clear bit 0 which we wouldn't clear otherwise */ /* Clear bit 0 which we wouldn't clear otherwise */
local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
if (happened & PACA_IRQ_HARD_DIS) {
/*
* We may have missed a decrementer interrupt if hard disabled.
* Check the decrementer register in case we had a rollover
* while hard disabled.
*/
if (!(happened & PACA_IRQ_DEC)) {
if (decrementer_check_overflow()) {
local_paca->irq_happened |= PACA_IRQ_DEC;
happened |= PACA_IRQ_DEC;
}
}
}
/* /*
* Force the delivery of pending soft-disabled interrupts on PS3. * Force the delivery of pending soft-disabled interrupts on PS3.
@ -170,7 +183,7 @@ notrace unsigned int __check_irq_replay(void)
* in case we also had a rollover while hard disabled * in case we also had a rollover while hard disabled
*/ */
local_paca->irq_happened &= ~PACA_IRQ_DEC; local_paca->irq_happened &= ~PACA_IRQ_DEC;
if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow()) if (happened & PACA_IRQ_DEC)
return 0x900; return 0x900;
/* Finally check if an external interrupt happened */ /* Finally check if an external interrupt happened */

View file

@ -127,12 +127,19 @@ static void flush_tmregs_to_thread(struct task_struct *tsk)
* If task is not current, it will have been flushed already to * If task is not current, it will have been flushed already to
* it's thread_struct during __switch_to(). * it's thread_struct during __switch_to().
* *
* A reclaim flushes ALL the state. * A reclaim flushes ALL the state or if not in TM save TM SPRs
* in the appropriate thread structures from live.
*/ */
if (tsk == current && MSR_TM_SUSPENDED(mfmsr())) if (tsk != current)
tm_reclaim_current(TM_CAUSE_SIGNAL); return;
if (MSR_TM_SUSPENDED(mfmsr())) {
tm_reclaim_current(TM_CAUSE_SIGNAL);
} else {
tm_enable();
tm_save_sprs(&(tsk->thread));
}
} }
#else #else
static inline void flush_tmregs_to_thread(struct task_struct *tsk) { } static inline void flush_tmregs_to_thread(struct task_struct *tsk) { }

View file

@ -27,9 +27,11 @@ void destroy_context(struct mm_struct *mm);
void __tsb_context_switch(unsigned long pgd_pa, void __tsb_context_switch(unsigned long pgd_pa,
struct tsb_config *tsb_base, struct tsb_config *tsb_base,
struct tsb_config *tsb_huge, struct tsb_config *tsb_huge,
unsigned long tsb_descr_pa); unsigned long tsb_descr_pa,
unsigned long secondary_ctx);
static inline void tsb_context_switch(struct mm_struct *mm) static inline void tsb_context_switch_ctx(struct mm_struct *mm,
unsigned long ctx)
{ {
__tsb_context_switch(__pa(mm->pgd), __tsb_context_switch(__pa(mm->pgd),
&mm->context.tsb_block[MM_TSB_BASE], &mm->context.tsb_block[MM_TSB_BASE],
@ -40,9 +42,12 @@ static inline void tsb_context_switch(struct mm_struct *mm)
#else #else
NULL NULL
#endif #endif
, __pa(&mm->context.tsb_descr[MM_TSB_BASE])); , __pa(&mm->context.tsb_descr[MM_TSB_BASE]),
ctx);
} }
#define tsb_context_switch(X) tsb_context_switch_ctx(X, 0)
void tsb_grow(struct mm_struct *mm, void tsb_grow(struct mm_struct *mm,
unsigned long tsb_index, unsigned long tsb_index,
unsigned long mm_rss); unsigned long mm_rss);
@ -112,8 +117,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
* cpu0 to update it's TSB because at that point the cpu_vm_mask * cpu0 to update it's TSB because at that point the cpu_vm_mask
* only had cpu1 set in it. * only had cpu1 set in it.
*/ */
load_secondary_context(mm); tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context));
tsb_context_switch(mm);
/* Any time a processor runs a context on an address space /* Any time a processor runs a context on an address space
* for the first time, we must flush that context out of the * for the first time, we must flush that context out of the

View file

@ -54,6 +54,7 @@ extern struct trap_per_cpu trap_block[NR_CPUS];
void init_cur_cpu_trap(struct thread_info *); void init_cur_cpu_trap(struct thread_info *);
void setup_tba(void); void setup_tba(void);
extern int ncpus_probed; extern int ncpus_probed;
extern u64 cpu_mondo_counter[NR_CPUS];
unsigned long real_hard_smp_processor_id(void); unsigned long real_hard_smp_processor_id(void);

View file

@ -622,22 +622,48 @@ retry:
} }
} }
/* Multi-cpu list version. */ #define CPU_MONDO_COUNTER(cpuid) (cpu_mondo_counter[cpuid])
#define MONDO_USEC_WAIT_MIN 2
#define MONDO_USEC_WAIT_MAX 100
#define MONDO_RETRY_LIMIT 500000
/* Multi-cpu list version.
*
* Deliver xcalls to 'cnt' number of cpus in 'cpu_list'.
* Sometimes not all cpus receive the mondo, requiring us to re-send
* the mondo until all cpus have received, or cpus are truly stuck
* unable to receive mondo, and we timeout.
* Occasionally a target cpu strand is borrowed briefly by hypervisor to
* perform guest service, such as PCIe error handling. Consider the
* service time, 1 second overall wait is reasonable for 1 cpu.
* Here two in-between mondo check wait time are defined: 2 usec for
* single cpu quick turn around and up to 100usec for large cpu count.
* Deliver mondo to large number of cpus could take longer, we adjusts
* the retry count as long as target cpus are making forward progress.
*/
static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt) static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
{ {
int retries, this_cpu, prev_sent, i, saw_cpu_error; int this_cpu, tot_cpus, prev_sent, i, rem;
int usec_wait, retries, tot_retries;
u16 first_cpu = 0xffff;
unsigned long xc_rcvd = 0;
unsigned long status; unsigned long status;
int ecpuerror_id = 0;
int enocpu_id = 0;
u16 *cpu_list; u16 *cpu_list;
u16 cpu;
this_cpu = smp_processor_id(); this_cpu = smp_processor_id();
cpu_list = __va(tb->cpu_list_pa); cpu_list = __va(tb->cpu_list_pa);
usec_wait = cnt * MONDO_USEC_WAIT_MIN;
saw_cpu_error = 0; if (usec_wait > MONDO_USEC_WAIT_MAX)
retries = 0; usec_wait = MONDO_USEC_WAIT_MAX;
retries = tot_retries = 0;
tot_cpus = cnt;
prev_sent = 0; prev_sent = 0;
do { do {
int forward_progress, n_sent; int n_sent, mondo_delivered, target_cpu_busy;
status = sun4v_cpu_mondo_send(cnt, status = sun4v_cpu_mondo_send(cnt,
tb->cpu_list_pa, tb->cpu_list_pa,
@ -645,94 +671,113 @@ static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
/* HV_EOK means all cpus received the xcall, we're done. */ /* HV_EOK means all cpus received the xcall, we're done. */
if (likely(status == HV_EOK)) if (likely(status == HV_EOK))
break; goto xcall_done;
/* If not these non-fatal errors, panic */
if (unlikely((status != HV_EWOULDBLOCK) &&
(status != HV_ECPUERROR) &&
(status != HV_ENOCPU)))
goto fatal_errors;
/* First, see if we made any forward progress. /* First, see if we made any forward progress.
*
* Go through the cpu_list, count the target cpus that have
* received our mondo (n_sent), and those that did not (rem).
* Re-pack cpu_list with the cpus remain to be retried in the
* front - this simplifies tracking the truly stalled cpus.
* *
* The hypervisor indicates successful sends by setting * The hypervisor indicates successful sends by setting
* cpu list entries to the value 0xffff. * cpu list entries to the value 0xffff.
*
* EWOULDBLOCK means some target cpus did not receive the
* mondo and retry usually helps.
*
* ECPUERROR means at least one target cpu is in error state,
* it's usually safe to skip the faulty cpu and retry.
*
* ENOCPU means one of the target cpu doesn't belong to the
* domain, perhaps offlined which is unexpected, but not
* fatal and it's okay to skip the offlined cpu.
*/ */
rem = 0;
n_sent = 0; n_sent = 0;
for (i = 0; i < cnt; i++) { for (i = 0; i < cnt; i++) {
if (likely(cpu_list[i] == 0xffff)) cpu = cpu_list[i];
if (likely(cpu == 0xffff)) {
n_sent++; n_sent++;
} else if ((status == HV_ECPUERROR) &&
(sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) {
ecpuerror_id = cpu + 1;
} else if (status == HV_ENOCPU && !cpu_online(cpu)) {
enocpu_id = cpu + 1;
} else {
cpu_list[rem++] = cpu;
}
} }
forward_progress = 0; /* No cpu remained, we're done. */
if (n_sent > prev_sent) if (rem == 0)
forward_progress = 1; break;
/* Otherwise, update the cpu count for retry. */
cnt = rem;
/* Record the overall number of mondos received by the
* first of the remaining cpus.
*/
if (first_cpu != cpu_list[0]) {
first_cpu = cpu_list[0];
xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
}
/* Was any mondo delivered successfully? */
mondo_delivered = (n_sent > prev_sent);
prev_sent = n_sent; prev_sent = n_sent;
/* If we get a HV_ECPUERROR, then one or more of the cpus /* or, was any target cpu busy processing other mondos? */
* in the list are in error state. Use the cpu_state() target_cpu_busy = (xc_rcvd < CPU_MONDO_COUNTER(first_cpu));
* hypervisor call to find out which cpus are in error state. xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
/* Retry count is for no progress. If we're making progress,
* reset the retry count.
*/ */
if (unlikely(status == HV_ECPUERROR)) { if (likely(mondo_delivered || target_cpu_busy)) {
for (i = 0; i < cnt; i++) { tot_retries += retries;
long err; retries = 0;
u16 cpu; } else if (unlikely(retries > MONDO_RETRY_LIMIT)) {
goto fatal_mondo_timeout;
cpu = cpu_list[i];
if (cpu == 0xffff)
continue;
err = sun4v_cpu_state(cpu);
if (err == HV_CPU_STATE_ERROR) {
saw_cpu_error = (cpu + 1);
cpu_list[i] = 0xffff;
}
}
} else if (unlikely(status != HV_EWOULDBLOCK))
goto fatal_mondo_error;
/* Don't bother rewriting the CPU list, just leave the
* 0xffff and non-0xffff entries in there and the
* hypervisor will do the right thing.
*
* Only advance timeout state if we didn't make any
* forward progress.
*/
if (unlikely(!forward_progress)) {
if (unlikely(++retries > 10000))
goto fatal_mondo_timeout;
/* Delay a little bit to let other cpus catch up
* on their cpu mondo queue work.
*/
udelay(2 * cnt);
} }
/* Delay a little bit to let other cpus catch up on
* their cpu mondo queue work.
*/
if (!mondo_delivered)
udelay(usec_wait);
retries++;
} while (1); } while (1);
if (unlikely(saw_cpu_error)) xcall_done:
goto fatal_mondo_cpu_error; if (unlikely(ecpuerror_id > 0)) {
pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in error state\n",
this_cpu, ecpuerror_id - 1);
} else if (unlikely(enocpu_id > 0)) {
pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does not belong to the domain\n",
this_cpu, enocpu_id - 1);
}
return; return;
fatal_mondo_cpu_error: fatal_errors:
printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus " /* fatal errors include bad alignment, etc */
"(including %d) were in error state\n", pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) mondo_block_pa(%lx)\n",
this_cpu, saw_cpu_error - 1); this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
return; panic("Unexpected SUN4V mondo error %lu\n", status);
fatal_mondo_timeout: fatal_mondo_timeout:
printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward " /* some cpus being non-responsive to the cpu mondo */
" progress after %d retries.\n", pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress after %d retries. Total target cpus(%d).\n",
this_cpu, retries); this_cpu, first_cpu, (tot_retries + retries), tot_cpus);
goto dump_cpu_list_and_out; panic("SUN4V mondo timeout panic\n");
fatal_mondo_error:
printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
this_cpu, status);
printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
"mondo_block_pa(%lx)\n",
this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
dump_cpu_list_and_out:
printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
for (i = 0; i < cnt; i++)
printk("%u ", cpu_list[i]);
printk("]\n");
} }
static void (*xcall_deliver_impl)(struct trap_per_cpu *, int); static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);

View file

@ -26,6 +26,21 @@ sun4v_cpu_mondo:
ldxa [%g0] ASI_SCRATCHPAD, %g4 ldxa [%g0] ASI_SCRATCHPAD, %g4
sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4 sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
/* Get smp_processor_id() into %g3 */
sethi %hi(trap_block), %g5
or %g5, %lo(trap_block), %g5
sub %g4, %g5, %g3
srlx %g3, TRAP_BLOCK_SZ_SHIFT, %g3
/* Increment cpu_mondo_counter[smp_processor_id()] */
sethi %hi(cpu_mondo_counter), %g5
or %g5, %lo(cpu_mondo_counter), %g5
sllx %g3, 3, %g3
add %g5, %g3, %g5
ldx [%g5], %g3
add %g3, 1, %g3
stx %g3, [%g5]
/* Get CPU mondo queue base phys address into %g7. */ /* Get CPU mondo queue base phys address into %g7. */
ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7 ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7

View file

@ -2733,6 +2733,7 @@ void do_getpsr(struct pt_regs *regs)
} }
} }
u64 cpu_mondo_counter[NR_CPUS] = {0};
struct trap_per_cpu trap_block[NR_CPUS]; struct trap_per_cpu trap_block[NR_CPUS];
EXPORT_SYMBOL(trap_block); EXPORT_SYMBOL(trap_block);

View file

@ -360,6 +360,7 @@ tsb_flush:
* %o1: TSB base config pointer * %o1: TSB base config pointer
* %o2: TSB huge config pointer, or NULL if none * %o2: TSB huge config pointer, or NULL if none
* %o3: Hypervisor TSB descriptor physical address * %o3: Hypervisor TSB descriptor physical address
* %o4: Secondary context to load, if non-zero
* *
* We have to run this whole thing with interrupts * We have to run this whole thing with interrupts
* disabled so that the current cpu doesn't change * disabled so that the current cpu doesn't change
@ -372,6 +373,17 @@ __tsb_context_switch:
rdpr %pstate, %g1 rdpr %pstate, %g1
wrpr %g1, PSTATE_IE, %pstate wrpr %g1, PSTATE_IE, %pstate
brz,pn %o4, 1f
mov SECONDARY_CONTEXT, %o5
661: stxa %o4, [%o5] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %o4, [%o5] ASI_MMU
.previous
flush %g6
1:
TRAP_LOAD_TRAP_BLOCK(%g2, %g3) TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR] stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]

View file

@ -145,13 +145,13 @@ ENDPROC(U3_retl_o2_plus_GS_plus_0x08)
ENTRY(U3_retl_o2_and_7_plus_GS) ENTRY(U3_retl_o2_and_7_plus_GS)
and %o2, 7, %o2 and %o2, 7, %o2
retl retl
add %o2, GLOBAL_SPARE, %o2 add %o2, GLOBAL_SPARE, %o0
ENDPROC(U3_retl_o2_and_7_plus_GS) ENDPROC(U3_retl_o2_and_7_plus_GS)
ENTRY(U3_retl_o2_and_7_plus_GS_plus_8) ENTRY(U3_retl_o2_and_7_plus_GS_plus_8)
add GLOBAL_SPARE, 8, GLOBAL_SPARE add GLOBAL_SPARE, 8, GLOBAL_SPARE
and %o2, 7, %o2 and %o2, 7, %o2
retl retl
add %o2, GLOBAL_SPARE, %o2 add %o2, GLOBAL_SPARE, %o0
ENDPROC(U3_retl_o2_and_7_plus_GS_plus_8) ENDPROC(U3_retl_o2_and_7_plus_GS_plus_8)
#endif #endif

View file

@ -325,6 +325,29 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde
} }
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
static void __init add_huge_page_size(unsigned long size)
{
unsigned int order;
if (size_to_hstate(size))
return;
order = ilog2(size) - PAGE_SHIFT;
hugetlb_add_hstate(order);
}
static int __init hugetlbpage_init(void)
{
add_huge_page_size(1UL << HPAGE_64K_SHIFT);
add_huge_page_size(1UL << HPAGE_SHIFT);
add_huge_page_size(1UL << HPAGE_256MB_SHIFT);
add_huge_page_size(1UL << HPAGE_2GB_SHIFT);
return 0;
}
arch_initcall(hugetlbpage_init);
static int __init setup_hugepagesz(char *string) static int __init setup_hugepagesz(char *string)
{ {
unsigned long long hugepage_size; unsigned long long hugepage_size;
@ -364,7 +387,7 @@ static int __init setup_hugepagesz(char *string)
goto out; goto out;
} }
hugetlb_add_hstate(hugepage_shift - PAGE_SHIFT); add_huge_page_size(hugepage_size);
rc = 1; rc = 1;
out: out:

View file

@ -35,6 +35,5 @@ void restore_processor_state(void)
{ {
struct mm_struct *mm = current->active_mm; struct mm_struct *mm = current->active_mm;
load_secondary_context(mm); tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context));
tsb_context_switch(mm);
} }

View file

@ -151,6 +151,8 @@ void kvm_async_pf_task_wait(u32 token)
if (hlist_unhashed(&n.link)) if (hlist_unhashed(&n.link))
break; break;
rcu_irq_exit();
if (!n.halted) { if (!n.halted) {
local_irq_enable(); local_irq_enable();
schedule(); schedule();
@ -159,11 +161,11 @@ void kvm_async_pf_task_wait(u32 token)
/* /*
* We cannot reschedule. So halt. * We cannot reschedule. So halt.
*/ */
rcu_irq_exit();
native_safe_halt(); native_safe_halt();
local_irq_disable(); local_irq_disable();
rcu_irq_enter();
} }
rcu_irq_enter();
} }
if (!n.halted) if (!n.halted)
finish_swait(&n.wq, &wait); finish_swait(&n.wq, &wait);

View file

@ -3307,6 +3307,10 @@ EXPORT_SYMBOL(blk_finish_plug);
*/ */
void blk_pm_runtime_init(struct request_queue *q, struct device *dev) void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
{ {
/* not support for RQF_PM and ->rpm_status in blk-mq yet */
if (q->mq_ops)
return;
q->dev = dev; q->dev = dev;
q->rpm_status = RPM_ACTIVE; q->rpm_status = RPM_ACTIVE;
pm_runtime_set_autosuspend_delay(q->dev, -1); pm_runtime_set_autosuspend_delay(q->dev, -1);

View file

@ -35,7 +35,6 @@ int blk_mq_map_queues(struct blk_mq_tag_set *set)
{ {
unsigned int *map = set->mq_map; unsigned int *map = set->mq_map;
unsigned int nr_queues = set->nr_hw_queues; unsigned int nr_queues = set->nr_hw_queues;
const struct cpumask *online_mask = cpu_online_mask;
unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling; unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
cpumask_var_t cpus; cpumask_var_t cpus;
@ -44,7 +43,7 @@ int blk_mq_map_queues(struct blk_mq_tag_set *set)
cpumask_clear(cpus); cpumask_clear(cpus);
nr_cpus = nr_uniq_cpus = 0; nr_cpus = nr_uniq_cpus = 0;
for_each_cpu(i, online_mask) { for_each_present_cpu(i) {
nr_cpus++; nr_cpus++;
first_sibling = get_first_sibling(i); first_sibling = get_first_sibling(i);
if (!cpumask_test_cpu(first_sibling, cpus)) if (!cpumask_test_cpu(first_sibling, cpus))
@ -54,7 +53,7 @@ int blk_mq_map_queues(struct blk_mq_tag_set *set)
queue = 0; queue = 0;
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
if (!cpumask_test_cpu(i, online_mask)) { if (!cpumask_test_cpu(i, cpu_present_mask)) {
map[i] = 0; map[i] = 0;
continue; continue;
} }

View file

@ -37,9 +37,6 @@
#include "blk-wbt.h" #include "blk-wbt.h"
#include "blk-mq-sched.h" #include "blk-mq-sched.h"
static DEFINE_MUTEX(all_q_mutex);
static LIST_HEAD(all_q_list);
static void blk_mq_poll_stats_start(struct request_queue *q); static void blk_mq_poll_stats_start(struct request_queue *q);
static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb); static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
static void __blk_mq_stop_hw_queues(struct request_queue *q, bool sync); static void __blk_mq_stop_hw_queues(struct request_queue *q, bool sync);
@ -1975,8 +1972,8 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
INIT_LIST_HEAD(&__ctx->rq_list); INIT_LIST_HEAD(&__ctx->rq_list);
__ctx->queue = q; __ctx->queue = q;
/* If the cpu isn't online, the cpu is mapped to first hctx */ /* If the cpu isn't present, the cpu is mapped to first hctx */
if (!cpu_online(i)) if (!cpu_present(i))
continue; continue;
hctx = blk_mq_map_queue(q, i); hctx = blk_mq_map_queue(q, i);
@ -2019,8 +2016,7 @@ static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
} }
} }
static void blk_mq_map_swqueue(struct request_queue *q, static void blk_mq_map_swqueue(struct request_queue *q)
const struct cpumask *online_mask)
{ {
unsigned int i, hctx_idx; unsigned int i, hctx_idx;
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx;
@ -2038,13 +2034,11 @@ static void blk_mq_map_swqueue(struct request_queue *q,
} }
/* /*
* Map software to hardware queues * Map software to hardware queues.
*
* If the cpu isn't present, the cpu is mapped to first hctx.
*/ */
for_each_possible_cpu(i) { for_each_present_cpu(i) {
/* If the cpu isn't online, the cpu is mapped to first hctx */
if (!cpumask_test_cpu(i, online_mask))
continue;
hctx_idx = q->mq_map[i]; hctx_idx = q->mq_map[i];
/* unmapped hw queue can be remapped after CPU topo changed */ /* unmapped hw queue can be remapped after CPU topo changed */
if (!set->tags[hctx_idx] && if (!set->tags[hctx_idx] &&
@ -2340,16 +2334,8 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
blk_queue_softirq_done(q, set->ops->complete); blk_queue_softirq_done(q, set->ops->complete);
blk_mq_init_cpu_queues(q, set->nr_hw_queues); blk_mq_init_cpu_queues(q, set->nr_hw_queues);
get_online_cpus();
mutex_lock(&all_q_mutex);
list_add_tail(&q->all_q_node, &all_q_list);
blk_mq_add_queue_tag_set(set, q); blk_mq_add_queue_tag_set(set, q);
blk_mq_map_swqueue(q, cpu_online_mask); blk_mq_map_swqueue(q);
mutex_unlock(&all_q_mutex);
put_online_cpus();
if (!(set->flags & BLK_MQ_F_NO_SCHED)) { if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
int ret; int ret;
@ -2375,18 +2361,12 @@ void blk_mq_free_queue(struct request_queue *q)
{ {
struct blk_mq_tag_set *set = q->tag_set; struct blk_mq_tag_set *set = q->tag_set;
mutex_lock(&all_q_mutex);
list_del_init(&q->all_q_node);
mutex_unlock(&all_q_mutex);
blk_mq_del_queue_tag_set(q); blk_mq_del_queue_tag_set(q);
blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
} }
/* Basically redo blk_mq_init_queue with queue frozen */ /* Basically redo blk_mq_init_queue with queue frozen */
static void blk_mq_queue_reinit(struct request_queue *q, static void blk_mq_queue_reinit(struct request_queue *q)
const struct cpumask *online_mask)
{ {
WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth)); WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
@ -2399,76 +2379,12 @@ static void blk_mq_queue_reinit(struct request_queue *q,
* involves free and re-allocate memory, worthy doing?) * involves free and re-allocate memory, worthy doing?)
*/ */
blk_mq_map_swqueue(q, online_mask); blk_mq_map_swqueue(q);
blk_mq_sysfs_register(q); blk_mq_sysfs_register(q);
blk_mq_debugfs_register_hctxs(q); blk_mq_debugfs_register_hctxs(q);
} }
/*
* New online cpumask which is going to be set in this hotplug event.
* Declare this cpumasks as global as cpu-hotplug operation is invoked
* one-by-one and dynamically allocating this could result in a failure.
*/
static struct cpumask cpuhp_online_new;
static void blk_mq_queue_reinit_work(void)
{
struct request_queue *q;
mutex_lock(&all_q_mutex);
/*
* We need to freeze and reinit all existing queues. Freezing
* involves synchronous wait for an RCU grace period and doing it
* one by one may take a long time. Start freezing all queues in
* one swoop and then wait for the completions so that freezing can
* take place in parallel.
*/
list_for_each_entry(q, &all_q_list, all_q_node)
blk_freeze_queue_start(q);
list_for_each_entry(q, &all_q_list, all_q_node)
blk_mq_freeze_queue_wait(q);
list_for_each_entry(q, &all_q_list, all_q_node)
blk_mq_queue_reinit(q, &cpuhp_online_new);
list_for_each_entry(q, &all_q_list, all_q_node)
blk_mq_unfreeze_queue(q);
mutex_unlock(&all_q_mutex);
}
static int blk_mq_queue_reinit_dead(unsigned int cpu)
{
cpumask_copy(&cpuhp_online_new, cpu_online_mask);
blk_mq_queue_reinit_work();
return 0;
}
/*
* Before hotadded cpu starts handling requests, new mappings must be
* established. Otherwise, these requests in hw queue might never be
* dispatched.
*
* For example, there is a single hw queue (hctx) and two CPU queues (ctx0
* for CPU0, and ctx1 for CPU1).
*
* Now CPU1 is just onlined and a request is inserted into ctx1->rq_list
* and set bit0 in pending bitmap as ctx1->index_hw is still zero.
*
* And then while running hw queue, blk_mq_flush_busy_ctxs() finds bit0 is set
* in pending bitmap and tries to retrieve requests in hctx->ctxs[0]->rq_list.
* But htx->ctxs[0] is a pointer to ctx0, so the request in ctx1->rq_list is
* ignored.
*/
static int blk_mq_queue_reinit_prepare(unsigned int cpu)
{
cpumask_copy(&cpuhp_online_new, cpu_online_mask);
cpumask_set_cpu(cpu, &cpuhp_online_new);
blk_mq_queue_reinit_work();
return 0;
}
static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
{ {
int i; int i;
@ -2679,7 +2595,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
blk_mq_update_queue_map(set); blk_mq_update_queue_map(set);
list_for_each_entry(q, &set->tag_list, tag_set_list) { list_for_each_entry(q, &set->tag_list, tag_set_list) {
blk_mq_realloc_hw_ctxs(set, q); blk_mq_realloc_hw_ctxs(set, q);
blk_mq_queue_reinit(q, cpu_online_mask); blk_mq_queue_reinit(q);
} }
list_for_each_entry(q, &set->tag_list, tag_set_list) list_for_each_entry(q, &set->tag_list, tag_set_list)
@ -2895,24 +2811,10 @@ bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
} }
EXPORT_SYMBOL_GPL(blk_mq_poll); EXPORT_SYMBOL_GPL(blk_mq_poll);
void blk_mq_disable_hotplug(void)
{
mutex_lock(&all_q_mutex);
}
void blk_mq_enable_hotplug(void)
{
mutex_unlock(&all_q_mutex);
}
static int __init blk_mq_init(void) static int __init blk_mq_init(void)
{ {
cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL, cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
blk_mq_hctx_notify_dead); blk_mq_hctx_notify_dead);
cpuhp_setup_state_nocalls(CPUHP_BLK_MQ_PREPARE, "block/mq:prepare",
blk_mq_queue_reinit_prepare,
blk_mq_queue_reinit_dead);
return 0; return 0;
} }
subsys_initcall(blk_mq_init); subsys_initcall(blk_mq_init);

View file

@ -56,11 +56,6 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
bool at_head); bool at_head);
void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
struct list_head *list); struct list_head *list);
/*
* CPU hotplug helpers
*/
void blk_mq_enable_hotplug(void);
void blk_mq_disable_hotplug(void);
/* /*
* CPU -> queue mappings * CPU -> queue mappings

View file

@ -85,6 +85,7 @@ static const struct lpss_device_desc lpss_dma_desc = {
}; };
struct lpss_private_data { struct lpss_private_data {
struct acpi_device *adev;
void __iomem *mmio_base; void __iomem *mmio_base;
resource_size_t mmio_size; resource_size_t mmio_size;
unsigned int fixed_clk_rate; unsigned int fixed_clk_rate;
@ -155,6 +156,12 @@ static struct pwm_lookup byt_pwm_lookup[] = {
static void byt_pwm_setup(struct lpss_private_data *pdata) static void byt_pwm_setup(struct lpss_private_data *pdata)
{ {
struct acpi_device *adev = pdata->adev;
/* Only call pwm_add_table for the first PWM controller */
if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1"))
return;
if (!acpi_dev_present("INT33FD", NULL, -1)) if (!acpi_dev_present("INT33FD", NULL, -1))
pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup)); pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup));
} }
@ -180,6 +187,12 @@ static struct pwm_lookup bsw_pwm_lookup[] = {
static void bsw_pwm_setup(struct lpss_private_data *pdata) static void bsw_pwm_setup(struct lpss_private_data *pdata)
{ {
struct acpi_device *adev = pdata->adev;
/* Only call pwm_add_table for the first PWM controller */
if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1"))
return;
pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup)); pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup));
} }
@ -456,6 +469,7 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
goto err_out; goto err_out;
} }
pdata->adev = adev;
pdata->dev_desc = dev_desc; pdata->dev_desc = dev_desc;
if (dev_desc->setup) if (dev_desc->setup)

View file

@ -3028,10 +3028,12 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
static struct ata_device *ata_find_dev(struct ata_port *ap, int devno) static struct ata_device *ata_find_dev(struct ata_port *ap, int devno)
{ {
if (!sata_pmp_attached(ap)) { if (!sata_pmp_attached(ap)) {
if (likely(devno < ata_link_max_devices(&ap->link))) if (likely(devno >= 0 &&
devno < ata_link_max_devices(&ap->link)))
return &ap->link.device[devno]; return &ap->link.device[devno];
} else { } else {
if (likely(devno < ap->nr_pmp_links)) if (likely(devno >= 0 &&
devno < ap->nr_pmp_links))
return &ap->pmp_link[devno].device[0]; return &ap->pmp_link[devno].device[0];
} }

View file

@ -184,7 +184,7 @@ static struct ccu_mux cpu_clk = {
.hw.init = CLK_HW_INIT_PARENTS("cpu", .hw.init = CLK_HW_INIT_PARENTS("cpu",
cpu_parents, cpu_parents,
&ccu_mux_ops, &ccu_mux_ops,
CLK_IS_CRITICAL), CLK_SET_RATE_PARENT | CLK_IS_CRITICAL),
} }
}; };

View file

@ -704,24 +704,23 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
{ {
struct lineevent_state *le = p; struct lineevent_state *le = p;
struct gpioevent_data ge; struct gpioevent_data ge;
int ret; int ret, level;
ge.timestamp = ktime_get_real_ns(); ge.timestamp = ktime_get_real_ns();
level = gpiod_get_value_cansleep(le->desc);
if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
&& le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
int level = gpiod_get_value_cansleep(le->desc);
if (level) if (level)
/* Emit low-to-high event */ /* Emit low-to-high event */
ge.id = GPIOEVENT_EVENT_RISING_EDGE; ge.id = GPIOEVENT_EVENT_RISING_EDGE;
else else
/* Emit high-to-low event */ /* Emit high-to-low event */
ge.id = GPIOEVENT_EVENT_FALLING_EDGE; ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
} else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE) { } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE && level) {
/* Emit low-to-high event */ /* Emit low-to-high event */
ge.id = GPIOEVENT_EVENT_RISING_EDGE; ge.id = GPIOEVENT_EVENT_RISING_EDGE;
} else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE && !level) {
/* Emit high-to-low event */ /* Emit high-to-low event */
ge.id = GPIOEVENT_EVENT_FALLING_EDGE; ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
} else { } else {

View file

@ -1,24 +1,25 @@
/* /*
*************************************************************************************************** * Copyright 2017 Advanced Micro Devices, Inc.
* *
* Trade secret of Advanced Micro Devices, Inc. * Permission is hereby granted, free of charge, to any person obtaining a
* Copyright (c) 2010 Advanced Micro Devices, Inc. (unpublished) * copy of this software and associated documentation files (the "Software"),
* * to deal in the Software without restriction, including without limitation
* All rights reserved. This notice is intended as a precaution against inadvertent publication and * the rights to use, copy, modify, merge, publish, distribute, sublicense,
* does not imply publication or any waiver of confidentiality. The year included in the foregoing * and/or sell copies of the Software, and to permit persons to whom the
* notice is the year of creation of the work. * Software is furnished to do so, subject to the following conditions:
* *
*************************************************************************************************** * The above copyright notice and this permission notice shall be included in
*/ * all copies or substantial portions of the Software.
/** *
*************************************************************************************************** * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* @brief gfx9 Clearstate Definitions * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
*************************************************************************************************** * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* Do not edit! This is a machine-generated file! * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
*/ * OTHER DEALINGS IN THE SOFTWARE.
*
*/
static const unsigned int gfx9_SECT_CONTEXT_def_1[] = static const unsigned int gfx9_SECT_CONTEXT_def_1[] =
{ {

View file

@ -1385,6 +1385,7 @@ static void si_init_golden_registers(struct amdgpu_device *adev)
amdgpu_program_register_sequence(adev, amdgpu_program_register_sequence(adev,
pitcairn_mgcg_cgcg_init, pitcairn_mgcg_cgcg_init,
(const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init)); (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
break;
case CHIP_VERDE: case CHIP_VERDE:
amdgpu_program_register_sequence(adev, amdgpu_program_register_sequence(adev,
verde_golden_registers, verde_golden_registers,
@ -1409,6 +1410,7 @@ static void si_init_golden_registers(struct amdgpu_device *adev)
amdgpu_program_register_sequence(adev, amdgpu_program_register_sequence(adev,
oland_mgcg_cgcg_init, oland_mgcg_cgcg_init,
(const u32)ARRAY_SIZE(oland_mgcg_cgcg_init)); (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
break;
case CHIP_HAINAN: case CHIP_HAINAN:
amdgpu_program_register_sequence(adev, amdgpu_program_register_sequence(adev,
hainan_golden_registers, hainan_golden_registers,

View file

@ -384,6 +384,12 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
hotspot_x = du->hotspot_x; hotspot_x = du->hotspot_x;
hotspot_y = du->hotspot_y; hotspot_y = du->hotspot_y;
if (plane->fb) {
hotspot_x += plane->fb->hot_x;
hotspot_y += plane->fb->hot_y;
}
du->cursor_surface = vps->surf; du->cursor_surface = vps->surf;
du->cursor_dmabuf = vps->dmabuf; du->cursor_dmabuf = vps->dmabuf;
@ -411,6 +417,9 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
vmw_cursor_update_position(dev_priv, true, vmw_cursor_update_position(dev_priv, true,
du->cursor_x + hotspot_x, du->cursor_x + hotspot_x,
du->cursor_y + hotspot_y); du->cursor_y + hotspot_y);
du->core_hotspot_x = hotspot_x - du->hotspot_x;
du->core_hotspot_y = hotspot_y - du->hotspot_y;
} else { } else {
DRM_ERROR("Failed to update cursor image\n"); DRM_ERROR("Failed to update cursor image\n");
} }

View file

@ -4316,6 +4316,7 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
/* Setting */ /* Setting */
irte->hi.fields.ga_root_ptr = (pi_data->base >> 12); irte->hi.fields.ga_root_ptr = (pi_data->base >> 12);
irte->hi.fields.vector = vcpu_pi_info->vector; irte->hi.fields.vector = vcpu_pi_info->vector;
irte->lo.fields_vapic.ga_log_intr = 1;
irte->lo.fields_vapic.guest_mode = 1; irte->lo.fields_vapic.guest_mode = 1;
irte->lo.fields_vapic.ga_tag = pi_data->ga_tag; irte->lo.fields_vapic.ga_tag = pi_data->ga_tag;

View file

@ -389,11 +389,11 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
msg_tmp.size = le16_to_cpu((__force __le16)msg_tmp.size); msg_tmp.size = le16_to_cpu((__force __le16)msg_tmp.size);
msg_tmp.command = le32_to_cpu((__force __le32)msg_tmp.command); msg_tmp.command = le32_to_cpu((__force __le32)msg_tmp.command);
msg_tmp.controlselector = le16_to_cpu((__force __le16)msg_tmp.controlselector); msg_tmp.controlselector = le16_to_cpu((__force __le16)msg_tmp.controlselector);
memcpy(msg, &msg_tmp, sizeof(*msg));
/* No need to update the read positions, because this was a peek */ /* No need to update the read positions, because this was a peek */
/* If the caller specifically want to peek, return */ /* If the caller specifically want to peek, return */
if (peekonly) { if (peekonly) {
memcpy(msg, &msg_tmp, sizeof(*msg));
goto peekout; goto peekout;
} }
@ -438,21 +438,15 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
space_rem = bus->m_dwSizeGetRing - curr_grp; space_rem = bus->m_dwSizeGetRing - curr_grp;
if (space_rem < sizeof(*msg)) { if (space_rem < sizeof(*msg)) {
/* msg wraps around the ring */
memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, space_rem);
memcpy_fromio((u8 *)msg + space_rem, bus->m_pdwGetRing,
sizeof(*msg) - space_rem);
if (buf) if (buf)
memcpy_fromio(buf, bus->m_pdwGetRing + sizeof(*msg) - memcpy_fromio(buf, bus->m_pdwGetRing + sizeof(*msg) -
space_rem, buf_size); space_rem, buf_size);
} else if (space_rem == sizeof(*msg)) { } else if (space_rem == sizeof(*msg)) {
memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
if (buf) if (buf)
memcpy_fromio(buf, bus->m_pdwGetRing, buf_size); memcpy_fromio(buf, bus->m_pdwGetRing, buf_size);
} else { } else {
/* Additional data wraps around the ring */ /* Additional data wraps around the ring */
memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
if (buf) { if (buf) {
memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp + memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp +
sizeof(*msg), space_rem - sizeof(*msg)); sizeof(*msg), space_rem - sizeof(*msg));
@ -465,15 +459,10 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
} else { } else {
/* No wrapping */ /* No wrapping */
memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
if (buf) if (buf)
memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp + sizeof(*msg), memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp + sizeof(*msg),
buf_size); buf_size);
} }
/* Convert from little endian to CPU */
msg->size = le16_to_cpu((__force __le16)msg->size);
msg->command = le32_to_cpu((__force __le32)msg->command);
msg->controlselector = le16_to_cpu((__force __le16)msg->controlselector);
/* Update the read positions, adjusting the ring */ /* Update the read positions, adjusting the ring */
saa7164_writel(bus->m_dwGetReadPos, new_grp); saa7164_writel(bus->m_dwGetReadPos, new_grp);

View file

@ -1719,27 +1719,9 @@ static long vpfe_param_handler(struct file *file, void *priv,
switch (cmd) { switch (cmd) {
case VPFE_CMD_S_CCDC_RAW_PARAMS: case VPFE_CMD_S_CCDC_RAW_PARAMS:
ret = -EINVAL;
v4l2_warn(&vpfe_dev->v4l2_dev, v4l2_warn(&vpfe_dev->v4l2_dev,
"VPFE_CMD_S_CCDC_RAW_PARAMS: experimental ioctl\n"); "VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n");
if (ccdc_dev->hw_ops.set_params) {
ret = ccdc_dev->hw_ops.set_params(param);
if (ret) {
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
"Error setting parameters in CCDC\n");
goto unlock_out;
}
ret = vpfe_get_ccdc_image_format(vpfe_dev,
&vpfe_dev->fmt);
if (ret < 0) {
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
"Invalid image format at CCDC\n");
goto unlock_out;
}
} else {
ret = -EINVAL;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
"VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n");
}
break; break;
default: default:
ret = -ENOTTY; ret = -ENOTTY;

View file

@ -266,7 +266,7 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
if (!dev->rx_resolution) if (!dev->rx_resolution)
return -ENOTTY; return -ENOTTY;
val = dev->rx_resolution; val = dev->rx_resolution / 1000;
break; break;
case LIRC_SET_WIDEBAND_RECEIVER: case LIRC_SET_WIDEBAND_RECEIVER:

View file

@ -57,10 +57,13 @@ static int ir_spi_tx(struct rc_dev *dev,
/* convert the pulse/space signal to raw binary signal */ /* convert the pulse/space signal to raw binary signal */
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
unsigned int periods;
int j; int j;
u16 val = ((i + 1) % 2) ? idata->pulse : idata->space; u16 val = ((i + 1) % 2) ? idata->pulse : idata->space;
if (len + buffer[i] >= IR_SPI_MAX_BUFSIZE) periods = DIV_ROUND_CLOSEST(buffer[i] * idata->freq, 1000000);
if (len + periods >= IR_SPI_MAX_BUFSIZE)
return -EINVAL; return -EINVAL;
/* /*
@ -69,13 +72,13 @@ static int ir_spi_tx(struct rc_dev *dev,
* contain a space duration. * contain a space duration.
*/ */
val = (i % 2) ? idata->space : idata->pulse; val = (i % 2) ? idata->space : idata->pulse;
for (j = 0; j < buffer[i]; j++) for (j = 0; j < periods; j++)
idata->tx_buf[len++] = val; idata->tx_buf[len++] = val;
} }
memset(&xfer, 0, sizeof(xfer)); memset(&xfer, 0, sizeof(xfer));
xfer.speed_hz = idata->freq; xfer.speed_hz = idata->freq * 16;
xfer.len = len * sizeof(*idata->tx_buf); xfer.len = len * sizeof(*idata->tx_buf);
xfer.tx_buf = idata->tx_buf; xfer.tx_buf = idata->tx_buf;

View file

@ -51,7 +51,7 @@ MODULE_DESCRIPTION("Pulse Eight HDMI CEC driver");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
static int debug; static int debug;
static int persistent_config = 1; static int persistent_config;
module_param(debug, int, 0644); module_param(debug, int, 0644);
module_param(persistent_config, int, 0644); module_param(persistent_config, int, 0644);
MODULE_PARM_DESC(debug, "debug level (0-1)"); MODULE_PARM_DESC(debug, "debug level (0-1)");

View file

@ -176,19 +176,17 @@ static void mmc_retune_timer(unsigned long data)
*/ */
int mmc_of_parse(struct mmc_host *host) int mmc_of_parse(struct mmc_host *host)
{ {
struct device_node *np; struct device *dev = host->parent;
u32 bus_width; u32 bus_width;
int ret; int ret;
bool cd_cap_invert, cd_gpio_invert = false; bool cd_cap_invert, cd_gpio_invert = false;
bool ro_cap_invert, ro_gpio_invert = false; bool ro_cap_invert, ro_gpio_invert = false;
if (!host->parent || !host->parent->of_node) if (!dev || !dev_fwnode(dev))
return 0; return 0;
np = host->parent->of_node;
/* "bus-width" is translated to MMC_CAP_*_BIT_DATA flags */ /* "bus-width" is translated to MMC_CAP_*_BIT_DATA flags */
if (of_property_read_u32(np, "bus-width", &bus_width) < 0) { if (device_property_read_u32(dev, "bus-width", &bus_width) < 0) {
dev_dbg(host->parent, dev_dbg(host->parent,
"\"bus-width\" property is missing, assuming 1 bit.\n"); "\"bus-width\" property is missing, assuming 1 bit.\n");
bus_width = 1; bus_width = 1;
@ -210,7 +208,7 @@ int mmc_of_parse(struct mmc_host *host)
} }
/* f_max is obtained from the optional "max-frequency" property */ /* f_max is obtained from the optional "max-frequency" property */
of_property_read_u32(np, "max-frequency", &host->f_max); device_property_read_u32(dev, "max-frequency", &host->f_max);
/* /*
* Configure CD and WP pins. They are both by default active low to * Configure CD and WP pins. They are both by default active low to
@ -225,12 +223,12 @@ int mmc_of_parse(struct mmc_host *host)
*/ */
/* Parse Card Detection */ /* Parse Card Detection */
if (of_property_read_bool(np, "non-removable")) { if (device_property_read_bool(dev, "non-removable")) {
host->caps |= MMC_CAP_NONREMOVABLE; host->caps |= MMC_CAP_NONREMOVABLE;
} else { } else {
cd_cap_invert = of_property_read_bool(np, "cd-inverted"); cd_cap_invert = device_property_read_bool(dev, "cd-inverted");
if (of_property_read_bool(np, "broken-cd")) if (device_property_read_bool(dev, "broken-cd"))
host->caps |= MMC_CAP_NEEDS_POLL; host->caps |= MMC_CAP_NEEDS_POLL;
ret = mmc_gpiod_request_cd(host, "cd", 0, true, ret = mmc_gpiod_request_cd(host, "cd", 0, true,
@ -256,7 +254,7 @@ int mmc_of_parse(struct mmc_host *host)
} }
/* Parse Write Protection */ /* Parse Write Protection */
ro_cap_invert = of_property_read_bool(np, "wp-inverted"); ro_cap_invert = device_property_read_bool(dev, "wp-inverted");
ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert); ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert);
if (!ret) if (!ret)
@ -264,64 +262,64 @@ int mmc_of_parse(struct mmc_host *host)
else if (ret != -ENOENT && ret != -ENOSYS) else if (ret != -ENOENT && ret != -ENOSYS)
return ret; return ret;
if (of_property_read_bool(np, "disable-wp")) if (device_property_read_bool(dev, "disable-wp"))
host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT; host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
/* See the comment on CD inversion above */ /* See the comment on CD inversion above */
if (ro_cap_invert ^ ro_gpio_invert) if (ro_cap_invert ^ ro_gpio_invert)
host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
if (of_property_read_bool(np, "cap-sd-highspeed")) if (device_property_read_bool(dev, "cap-sd-highspeed"))
host->caps |= MMC_CAP_SD_HIGHSPEED; host->caps |= MMC_CAP_SD_HIGHSPEED;
if (of_property_read_bool(np, "cap-mmc-highspeed")) if (device_property_read_bool(dev, "cap-mmc-highspeed"))
host->caps |= MMC_CAP_MMC_HIGHSPEED; host->caps |= MMC_CAP_MMC_HIGHSPEED;
if (of_property_read_bool(np, "sd-uhs-sdr12")) if (device_property_read_bool(dev, "sd-uhs-sdr12"))
host->caps |= MMC_CAP_UHS_SDR12; host->caps |= MMC_CAP_UHS_SDR12;
if (of_property_read_bool(np, "sd-uhs-sdr25")) if (device_property_read_bool(dev, "sd-uhs-sdr25"))
host->caps |= MMC_CAP_UHS_SDR25; host->caps |= MMC_CAP_UHS_SDR25;
if (of_property_read_bool(np, "sd-uhs-sdr50")) if (device_property_read_bool(dev, "sd-uhs-sdr50"))
host->caps |= MMC_CAP_UHS_SDR50; host->caps |= MMC_CAP_UHS_SDR50;
if (of_property_read_bool(np, "sd-uhs-sdr104")) if (device_property_read_bool(dev, "sd-uhs-sdr104"))
host->caps |= MMC_CAP_UHS_SDR104; host->caps |= MMC_CAP_UHS_SDR104;
if (of_property_read_bool(np, "sd-uhs-ddr50")) if (device_property_read_bool(dev, "sd-uhs-ddr50"))
host->caps |= MMC_CAP_UHS_DDR50; host->caps |= MMC_CAP_UHS_DDR50;
if (of_property_read_bool(np, "cap-power-off-card")) if (device_property_read_bool(dev, "cap-power-off-card"))
host->caps |= MMC_CAP_POWER_OFF_CARD; host->caps |= MMC_CAP_POWER_OFF_CARD;
if (of_property_read_bool(np, "cap-mmc-hw-reset")) if (device_property_read_bool(dev, "cap-mmc-hw-reset"))
host->caps |= MMC_CAP_HW_RESET; host->caps |= MMC_CAP_HW_RESET;
if (of_property_read_bool(np, "cap-sdio-irq")) if (device_property_read_bool(dev, "cap-sdio-irq"))
host->caps |= MMC_CAP_SDIO_IRQ; host->caps |= MMC_CAP_SDIO_IRQ;
if (of_property_read_bool(np, "full-pwr-cycle")) if (device_property_read_bool(dev, "full-pwr-cycle"))
host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE; host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
if (of_property_read_bool(np, "keep-power-in-suspend")) if (device_property_read_bool(dev, "keep-power-in-suspend"))
host->pm_caps |= MMC_PM_KEEP_POWER; host->pm_caps |= MMC_PM_KEEP_POWER;
if (of_property_read_bool(np, "wakeup-source") || if (device_property_read_bool(dev, "wakeup-source") ||
of_property_read_bool(np, "enable-sdio-wakeup")) /* legacy */ device_property_read_bool(dev, "enable-sdio-wakeup")) /* legacy */
host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ; host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
if (of_property_read_bool(np, "mmc-ddr-3_3v")) if (device_property_read_bool(dev, "mmc-ddr-3_3v"))
host->caps |= MMC_CAP_3_3V_DDR; host->caps |= MMC_CAP_3_3V_DDR;
if (of_property_read_bool(np, "mmc-ddr-1_8v")) if (device_property_read_bool(dev, "mmc-ddr-1_8v"))
host->caps |= MMC_CAP_1_8V_DDR; host->caps |= MMC_CAP_1_8V_DDR;
if (of_property_read_bool(np, "mmc-ddr-1_2v")) if (device_property_read_bool(dev, "mmc-ddr-1_2v"))
host->caps |= MMC_CAP_1_2V_DDR; host->caps |= MMC_CAP_1_2V_DDR;
if (of_property_read_bool(np, "mmc-hs200-1_8v")) if (device_property_read_bool(dev, "mmc-hs200-1_8v"))
host->caps2 |= MMC_CAP2_HS200_1_8V_SDR; host->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
if (of_property_read_bool(np, "mmc-hs200-1_2v")) if (device_property_read_bool(dev, "mmc-hs200-1_2v"))
host->caps2 |= MMC_CAP2_HS200_1_2V_SDR; host->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
if (of_property_read_bool(np, "mmc-hs400-1_8v")) if (device_property_read_bool(dev, "mmc-hs400-1_8v"))
host->caps2 |= MMC_CAP2_HS400_1_8V | MMC_CAP2_HS200_1_8V_SDR; host->caps2 |= MMC_CAP2_HS400_1_8V | MMC_CAP2_HS200_1_8V_SDR;
if (of_property_read_bool(np, "mmc-hs400-1_2v")) if (device_property_read_bool(dev, "mmc-hs400-1_2v"))
host->caps2 |= MMC_CAP2_HS400_1_2V | MMC_CAP2_HS200_1_2V_SDR; host->caps2 |= MMC_CAP2_HS400_1_2V | MMC_CAP2_HS200_1_2V_SDR;
if (of_property_read_bool(np, "mmc-hs400-enhanced-strobe")) if (device_property_read_bool(dev, "mmc-hs400-enhanced-strobe"))
host->caps2 |= MMC_CAP2_HS400_ES; host->caps2 |= MMC_CAP2_HS400_ES;
if (of_property_read_bool(np, "no-sdio")) if (device_property_read_bool(dev, "no-sdio"))
host->caps2 |= MMC_CAP2_NO_SDIO; host->caps2 |= MMC_CAP2_NO_SDIO;
if (of_property_read_bool(np, "no-sd")) if (device_property_read_bool(dev, "no-sd"))
host->caps2 |= MMC_CAP2_NO_SD; host->caps2 |= MMC_CAP2_NO_SD;
if (of_property_read_bool(np, "no-mmc")) if (device_property_read_bool(dev, "no-mmc"))
host->caps2 |= MMC_CAP2_NO_MMC; host->caps2 |= MMC_CAP2_NO_MMC;
host->dsr_req = !of_property_read_u32(np, "dsr", &host->dsr); host->dsr_req = !device_property_read_u32(dev, "dsr", &host->dsr);
if (host->dsr_req && (host->dsr & ~0xffff)) { if (host->dsr_req && (host->dsr & ~0xffff)) {
dev_err(host->parent, dev_err(host->parent,
"device tree specified broken value for DSR: 0x%x, ignoring\n", "device tree specified broken value for DSR: 0x%x, ignoring\n",

View file

@ -2707,8 +2707,8 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
host->slot[id] = slot; host->slot[id] = slot;
mmc->ops = &dw_mci_ops; mmc->ops = &dw_mci_ops;
if (of_property_read_u32_array(host->dev->of_node, if (device_property_read_u32_array(host->dev, "clock-freq-min-max",
"clock-freq-min-max", freq, 2)) { freq, 2)) {
mmc->f_min = DW_MCI_FREQ_MIN; mmc->f_min = DW_MCI_FREQ_MIN;
mmc->f_max = DW_MCI_FREQ_MAX; mmc->f_max = DW_MCI_FREQ_MAX;
} else { } else {
@ -2808,7 +2808,6 @@ static void dw_mci_init_dma(struct dw_mci *host)
{ {
int addr_config; int addr_config;
struct device *dev = host->dev; struct device *dev = host->dev;
struct device_node *np = dev->of_node;
/* /*
* Check tansfer mode from HCON[17:16] * Check tansfer mode from HCON[17:16]
@ -2869,8 +2868,9 @@ static void dw_mci_init_dma(struct dw_mci *host)
dev_info(host->dev, "Using internal DMA controller.\n"); dev_info(host->dev, "Using internal DMA controller.\n");
} else { } else {
/* TRANS_MODE_EDMAC: check dma bindings again */ /* TRANS_MODE_EDMAC: check dma bindings again */
if ((of_property_count_strings(np, "dma-names") < 0) || if ((device_property_read_string_array(dev, "dma-names",
(!of_find_property(np, "dmas", NULL))) { NULL, 0) < 0) ||
!device_property_present(dev, "dmas")) {
goto no_dma; goto no_dma;
} }
host->dma_ops = &dw_mci_edmac_ops; host->dma_ops = &dw_mci_edmac_ops;
@ -2937,7 +2937,6 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
{ {
struct dw_mci_board *pdata; struct dw_mci_board *pdata;
struct device *dev = host->dev; struct device *dev = host->dev;
struct device_node *np = dev->of_node;
const struct dw_mci_drv_data *drv_data = host->drv_data; const struct dw_mci_drv_data *drv_data = host->drv_data;
int ret; int ret;
u32 clock_frequency; u32 clock_frequency;
@ -2954,20 +2953,21 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
} }
/* find out number of slots supported */ /* find out number of slots supported */
of_property_read_u32(np, "num-slots", &pdata->num_slots); device_property_read_u32(dev, "num-slots", &pdata->num_slots);
if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth)) if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth))
dev_info(dev, dev_info(dev,
"fifo-depth property not found, using value of FIFOTH register as default\n"); "fifo-depth property not found, using value of FIFOTH register as default\n");
of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms); device_property_read_u32(dev, "card-detect-delay",
&pdata->detect_delay_ms);
of_property_read_u32(np, "data-addr", &host->data_addr_override); device_property_read_u32(dev, "data-addr", &host->data_addr_override);
if (of_get_property(np, "fifo-watermark-aligned", NULL)) if (device_property_present(dev, "fifo-watermark-aligned"))
host->wm_aligned = true; host->wm_aligned = true;
if (!of_property_read_u32(np, "clock-frequency", &clock_frequency)) if (!device_property_read_u32(dev, "clock-frequency", &clock_frequency))
pdata->bus_hz = clock_frequency; pdata->bus_hz = clock_frequency;
if (drv_data && drv_data->parse_dt) { if (drv_data && drv_data->parse_dt) {

View file

@ -31,6 +31,7 @@
#define SDMMC_MC1R 0x204 #define SDMMC_MC1R 0x204
#define SDMMC_MC1R_DDR BIT(3) #define SDMMC_MC1R_DDR BIT(3)
#define SDMMC_MC1R_FCD BIT(7)
#define SDMMC_CACR 0x230 #define SDMMC_CACR 0x230
#define SDMMC_CACR_CAPWREN BIT(0) #define SDMMC_CACR_CAPWREN BIT(0)
#define SDMMC_CACR_KEY (0x46 << 8) #define SDMMC_CACR_KEY (0x46 << 8)
@ -43,6 +44,15 @@ struct sdhci_at91_priv {
struct clk *mainck; struct clk *mainck;
}; };
static void sdhci_at91_set_force_card_detect(struct sdhci_host *host)
{
u8 mc1r;
mc1r = readb(host->ioaddr + SDMMC_MC1R);
mc1r |= SDMMC_MC1R_FCD;
writeb(mc1r, host->ioaddr + SDMMC_MC1R);
}
static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock) static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
{ {
u16 clk; u16 clk;
@ -110,10 +120,18 @@ void sdhci_at91_set_uhs_signaling(struct sdhci_host *host, unsigned int timing)
sdhci_set_uhs_signaling(host, timing); sdhci_set_uhs_signaling(host, timing);
} }
static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
{
sdhci_reset(host, mask);
if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
sdhci_at91_set_force_card_detect(host);
}
static const struct sdhci_ops sdhci_at91_sama5d2_ops = { static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
.set_clock = sdhci_at91_set_clock, .set_clock = sdhci_at91_set_clock,
.set_bus_width = sdhci_set_bus_width, .set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset, .reset = sdhci_at91_reset,
.set_uhs_signaling = sdhci_at91_set_uhs_signaling, .set_uhs_signaling = sdhci_at91_set_uhs_signaling,
.set_power = sdhci_at91_set_power, .set_power = sdhci_at91_set_power,
}; };
@ -324,6 +342,21 @@ static int sdhci_at91_probe(struct platform_device *pdev)
host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
} }
/*
* If the device attached to the MMC bus is not removable, it is safer
* to set the Force Card Detect bit. People often don't connect the
* card detect signal and use this pin for another purpose. If the card
* detect pin is not muxed to SDHCI controller, a default value is
* used. This value can be different from a SoC revision to another
* one. Problems come when this default value is not card present. To
* avoid this case, if the device is non removable then the card
* detection procedure using the SDMCC_CD signal is bypassed.
* This bit is reset when a software reset for all command is performed
* so we need to implement our own reset function to set back this bit.
*/
if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
sdhci_at91_set_force_card_detect(host);
pm_runtime_put_autosuspend(&pdev->dev); pm_runtime_put_autosuspend(&pdev->dev);
return 0; return 0;

View file

@ -2047,6 +2047,7 @@ static int bond_miimon_inspect(struct bonding *bond)
continue; continue;
bond_propose_link_state(slave, BOND_LINK_FAIL); bond_propose_link_state(slave, BOND_LINK_FAIL);
commit++;
slave->delay = bond->params.downdelay; slave->delay = bond->params.downdelay;
if (slave->delay) { if (slave->delay) {
netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n", netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n",
@ -2085,6 +2086,7 @@ static int bond_miimon_inspect(struct bonding *bond)
continue; continue;
bond_propose_link_state(slave, BOND_LINK_BACK); bond_propose_link_state(slave, BOND_LINK_BACK);
commit++;
slave->delay = bond->params.updelay; slave->delay = bond->params.updelay;
if (slave->delay) { if (slave->delay) {
@ -4598,7 +4600,7 @@ static int bond_check_params(struct bond_params *params)
} }
ad_user_port_key = valptr->value; ad_user_port_key = valptr->value;
if (bond_mode == BOND_MODE_TLB) { if ((bond_mode == BOND_MODE_TLB) || (bond_mode == BOND_MODE_ALB)) {
bond_opt_initstr(&newval, "default"); bond_opt_initstr(&newval, "default");
valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB), valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB),
&newval); &newval);

View file

@ -1668,6 +1668,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.dev_name = "BCM53125", .dev_name = "BCM53125",
.vlans = 4096, .vlans = 4096,
.enabled_ports = 0xff, .enabled_ports = 0xff,
.arl_entries = 4,
.cpu_port = B53_CPU_PORT, .cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS, .vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE, .duplex_reg = B53_DUPLEX_STAT_GE,

View file

@ -3377,6 +3377,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.port_jumbo_config = mv88e6165_port_jumbo_config, .port_jumbo_config = mv88e6165_port_jumbo_config,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_config = mv88e6390_port_pause_config, .port_pause_config = mv88e6390_port_pause_config,
.port_set_cmode = mv88e6390x_port_set_cmode,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_snapshot = mv88e6390_g1_stats_snapshot,

View file

@ -609,7 +609,7 @@ static void nb8800_mac_config(struct net_device *dev)
mac_mode |= HALF_DUPLEX; mac_mode |= HALF_DUPLEX;
if (gigabit) { if (gigabit) {
if (priv->phy_mode == PHY_INTERFACE_MODE_RGMII) if (phy_interface_is_rgmii(dev->phydev))
mac_mode |= RGMII_MODE; mac_mode |= RGMII_MODE;
mac_mode |= GMAC_MODE; mac_mode |= GMAC_MODE;
@ -1268,11 +1268,10 @@ static int nb8800_tangox_init(struct net_device *dev)
break; break;
case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII:
pad_mode = PAD_MODE_RGMII; case PHY_INTERFACE_MODE_RGMII_ID:
break; case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID: case PHY_INTERFACE_MODE_RGMII_TXID:
pad_mode = PAD_MODE_RGMII | PAD_MODE_GTX_CLK_DELAY; pad_mode = PAD_MODE_RGMII;
break; break;
default: default:

View file

@ -777,6 +777,10 @@ static void cb_timeout_handler(struct work_struct *work)
mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
} }
static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
struct mlx5_cmd_msg *msg);
static void cmd_work_handler(struct work_struct *work) static void cmd_work_handler(struct work_struct *work)
{ {
struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
@ -786,16 +790,27 @@ static void cmd_work_handler(struct work_struct *work)
struct mlx5_cmd_layout *lay; struct mlx5_cmd_layout *lay;
struct semaphore *sem; struct semaphore *sem;
unsigned long flags; unsigned long flags;
int alloc_ret;
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
down(sem); down(sem);
if (!ent->page_queue) { if (!ent->page_queue) {
ent->idx = alloc_ent(cmd); alloc_ret = alloc_ent(cmd);
if (ent->idx < 0) { if (alloc_ret < 0) {
mlx5_core_err(dev, "failed to allocate command entry\n"); mlx5_core_err(dev, "failed to allocate command entry\n");
if (ent->callback) {
ent->callback(-EAGAIN, ent->context);
mlx5_free_cmd_msg(dev, ent->out);
free_msg(dev, ent->in);
free_cmd(ent);
} else {
ent->ret = -EAGAIN;
complete(&ent->done);
}
up(sem); up(sem);
return; return;
} }
ent->idx = alloc_ret;
} else { } else {
ent->idx = cmd->max_reg_cmds; ent->idx = cmd->max_reg_cmds;
spin_lock_irqsave(&cmd->alloc_lock, flags); spin_lock_irqsave(&cmd->alloc_lock, flags);
@ -955,7 +970,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
err = wait_func(dev, ent); err = wait_func(dev, ent);
if (err == -ETIMEDOUT) if (err == -ETIMEDOUT)
goto out_free; goto out;
ds = ent->ts2 - ent->ts1; ds = ent->ts2 - ent->ts1;
op = MLX5_GET(mbox_in, in->first.data, opcode); op = MLX5_GET(mbox_in, in->first.data, opcode);
@ -1419,6 +1434,7 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n", mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n",
ent->idx); ent->idx);
free_ent(cmd, ent->idx); free_ent(cmd, ent->idx);
free_cmd(ent);
} }
continue; continue;
} }
@ -1477,7 +1493,8 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
free_msg(dev, ent->in); free_msg(dev, ent->in);
err = err ? err : ent->status; err = err ? err : ent->status;
free_cmd(ent); if (!forced)
free_cmd(ent);
callback(err, context); callback(err, context);
} else { } else {
complete(&ent->done); complete(&ent->done);

View file

@ -261,6 +261,14 @@ struct mlx5e_dcbx {
}; };
#endif #endif
#define MAX_PIN_NUM 8
struct mlx5e_pps {
u8 pin_caps[MAX_PIN_NUM];
struct work_struct out_work;
u64 start[MAX_PIN_NUM];
u8 enabled;
};
struct mlx5e_tstamp { struct mlx5e_tstamp {
rwlock_t lock; rwlock_t lock;
struct cyclecounter cycles; struct cyclecounter cycles;
@ -272,7 +280,7 @@ struct mlx5e_tstamp {
struct mlx5_core_dev *mdev; struct mlx5_core_dev *mdev;
struct ptp_clock *ptp; struct ptp_clock *ptp;
struct ptp_clock_info ptp_info; struct ptp_clock_info ptp_info;
u8 *pps_pin_caps; struct mlx5e_pps pps_info;
}; };
enum { enum {

View file

@ -53,6 +53,15 @@ enum {
MLX5E_EVENT_MODE_ONCE_TILL_ARM = 0x2, MLX5E_EVENT_MODE_ONCE_TILL_ARM = 0x2,
}; };
enum {
MLX5E_MTPPS_FS_ENABLE = BIT(0x0),
MLX5E_MTPPS_FS_PATTERN = BIT(0x2),
MLX5E_MTPPS_FS_PIN_MODE = BIT(0x3),
MLX5E_MTPPS_FS_TIME_STAMP = BIT(0x4),
MLX5E_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5),
MLX5E_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7),
};
void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp, void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp,
struct skb_shared_hwtstamps *hwts) struct skb_shared_hwtstamps *hwts)
{ {
@ -73,17 +82,46 @@ static u64 mlx5e_read_internal_timer(const struct cyclecounter *cc)
return mlx5_read_internal_timer(tstamp->mdev) & cc->mask; return mlx5_read_internal_timer(tstamp->mdev) & cc->mask;
} }
static void mlx5e_pps_out(struct work_struct *work)
{
struct mlx5e_pps *pps_info = container_of(work, struct mlx5e_pps,
out_work);
struct mlx5e_tstamp *tstamp = container_of(pps_info, struct mlx5e_tstamp,
pps_info);
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
unsigned long flags;
int i;
for (i = 0; i < tstamp->ptp_info.n_pins; i++) {
u64 tstart;
write_lock_irqsave(&tstamp->lock, flags);
tstart = tstamp->pps_info.start[i];
tstamp->pps_info.start[i] = 0;
write_unlock_irqrestore(&tstamp->lock, flags);
if (!tstart)
continue;
MLX5_SET(mtpps_reg, in, pin, i);
MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
MLX5_SET(mtpps_reg, in, field_select, MLX5E_MTPPS_FS_TIME_STAMP);
mlx5_set_mtpps(tstamp->mdev, in, sizeof(in));
}
}
static void mlx5e_timestamp_overflow(struct work_struct *work) static void mlx5e_timestamp_overflow(struct work_struct *work)
{ {
struct delayed_work *dwork = to_delayed_work(work); struct delayed_work *dwork = to_delayed_work(work);
struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp, struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp,
overflow_work); overflow_work);
struct mlx5e_priv *priv = container_of(tstamp, struct mlx5e_priv, tstamp);
unsigned long flags; unsigned long flags;
write_lock_irqsave(&tstamp->lock, flags); write_lock_irqsave(&tstamp->lock, flags);
timecounter_read(&tstamp->clock); timecounter_read(&tstamp->clock);
write_unlock_irqrestore(&tstamp->lock, flags); write_unlock_irqrestore(&tstamp->lock, flags);
schedule_delayed_work(&tstamp->overflow_work, tstamp->overflow_period); queue_delayed_work(priv->wq, &tstamp->overflow_work,
msecs_to_jiffies(tstamp->overflow_period * 1000));
} }
int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr) int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
@ -214,18 +252,6 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
int neg_adj = 0; int neg_adj = 0;
struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
ptp_info); ptp_info);
struct mlx5e_priv *priv =
container_of(tstamp, struct mlx5e_priv, tstamp);
if (MLX5_CAP_GEN(priv->mdev, pps_modify)) {
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
/* For future use need to add a loop for finding all 1PPS out pins */
MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT);
MLX5_SET(mtpps_reg, in, out_periodic_adjustment, delta & 0xFFFF);
mlx5_set_mtpps(priv->mdev, in, sizeof(in));
}
if (delta < 0) { if (delta < 0) {
neg_adj = 1; neg_adj = 1;
@ -254,12 +280,13 @@ static int mlx5e_extts_configure(struct ptp_clock_info *ptp,
struct mlx5e_priv *priv = struct mlx5e_priv *priv =
container_of(tstamp, struct mlx5e_priv, tstamp); container_of(tstamp, struct mlx5e_priv, tstamp);
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
u32 field_select = 0;
u8 pin_mode = 0;
u8 pattern = 0; u8 pattern = 0;
int pin = -1; int pin = -1;
int err = 0; int err = 0;
if (!MLX5_CAP_GEN(priv->mdev, pps) || if (!MLX5_PPS_CAP(priv->mdev))
!MLX5_CAP_GEN(priv->mdev, pps_modify))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (rq->extts.index >= tstamp->ptp_info.n_pins) if (rq->extts.index >= tstamp->ptp_info.n_pins)
@ -269,15 +296,21 @@ static int mlx5e_extts_configure(struct ptp_clock_info *ptp,
pin = ptp_find_pin(tstamp->ptp, PTP_PF_EXTTS, rq->extts.index); pin = ptp_find_pin(tstamp->ptp, PTP_PF_EXTTS, rq->extts.index);
if (pin < 0) if (pin < 0)
return -EBUSY; return -EBUSY;
pin_mode = MLX5E_PIN_MODE_IN;
pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
field_select = MLX5E_MTPPS_FS_PIN_MODE |
MLX5E_MTPPS_FS_PATTERN |
MLX5E_MTPPS_FS_ENABLE;
} else {
pin = rq->extts.index;
field_select = MLX5E_MTPPS_FS_ENABLE;
} }
if (rq->extts.flags & PTP_FALLING_EDGE)
pattern = 1;
MLX5_SET(mtpps_reg, in, pin, pin); MLX5_SET(mtpps_reg, in, pin, pin);
MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_IN); MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
MLX5_SET(mtpps_reg, in, pattern, pattern); MLX5_SET(mtpps_reg, in, pattern, pattern);
MLX5_SET(mtpps_reg, in, enable, on); MLX5_SET(mtpps_reg, in, enable, on);
MLX5_SET(mtpps_reg, in, field_select, field_select);
err = mlx5_set_mtpps(priv->mdev, in, sizeof(in)); err = mlx5_set_mtpps(priv->mdev, in, sizeof(in));
if (err) if (err)
@ -296,14 +329,18 @@ static int mlx5e_perout_configure(struct ptp_clock_info *ptp,
struct mlx5e_priv *priv = struct mlx5e_priv *priv =
container_of(tstamp, struct mlx5e_priv, tstamp); container_of(tstamp, struct mlx5e_priv, tstamp);
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
u64 nsec_now, nsec_delta, time_stamp; u64 nsec_now, nsec_delta, time_stamp = 0;
u64 cycles_now, cycles_delta; u64 cycles_now, cycles_delta;
struct timespec64 ts; struct timespec64 ts;
unsigned long flags; unsigned long flags;
u32 field_select = 0;
u8 pin_mode = 0;
u8 pattern = 0;
int pin = -1; int pin = -1;
int err = 0;
s64 ns; s64 ns;
if (!MLX5_CAP_GEN(priv->mdev, pps_modify)) if (!MLX5_PPS_CAP(priv->mdev))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (rq->perout.index >= tstamp->ptp_info.n_pins) if (rq->perout.index >= tstamp->ptp_info.n_pins)
@ -314,32 +351,60 @@ static int mlx5e_perout_configure(struct ptp_clock_info *ptp,
rq->perout.index); rq->perout.index);
if (pin < 0) if (pin < 0)
return -EBUSY; return -EBUSY;
}
ts.tv_sec = rq->perout.period.sec; pin_mode = MLX5E_PIN_MODE_OUT;
ts.tv_nsec = rq->perout.period.nsec; pattern = MLX5E_OUT_PATTERN_PERIODIC;
ns = timespec64_to_ns(&ts); ts.tv_sec = rq->perout.period.sec;
if (on) ts.tv_nsec = rq->perout.period.nsec;
ns = timespec64_to_ns(&ts);
if ((ns >> 1) != 500000000LL) if ((ns >> 1) != 500000000LL)
return -EINVAL; return -EINVAL;
ts.tv_sec = rq->perout.start.sec;
ts.tv_nsec = rq->perout.start.nsec; ts.tv_sec = rq->perout.start.sec;
ns = timespec64_to_ns(&ts); ts.tv_nsec = rq->perout.start.nsec;
cycles_now = mlx5_read_internal_timer(tstamp->mdev); ns = timespec64_to_ns(&ts);
write_lock_irqsave(&tstamp->lock, flags); cycles_now = mlx5_read_internal_timer(tstamp->mdev);
nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now); write_lock_irqsave(&tstamp->lock, flags);
nsec_delta = ns - nsec_now; nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now);
cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift, nsec_delta = ns - nsec_now;
tstamp->cycles.mult); cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift,
write_unlock_irqrestore(&tstamp->lock, flags); tstamp->cycles.mult);
time_stamp = cycles_now + cycles_delta; write_unlock_irqrestore(&tstamp->lock, flags);
time_stamp = cycles_now + cycles_delta;
field_select = MLX5E_MTPPS_FS_PIN_MODE |
MLX5E_MTPPS_FS_PATTERN |
MLX5E_MTPPS_FS_ENABLE |
MLX5E_MTPPS_FS_TIME_STAMP;
} else {
pin = rq->perout.index;
field_select = MLX5E_MTPPS_FS_ENABLE;
}
MLX5_SET(mtpps_reg, in, pin, pin); MLX5_SET(mtpps_reg, in, pin, pin);
MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT); MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
MLX5_SET(mtpps_reg, in, pattern, MLX5E_OUT_PATTERN_PERIODIC); MLX5_SET(mtpps_reg, in, pattern, pattern);
MLX5_SET(mtpps_reg, in, enable, on); MLX5_SET(mtpps_reg, in, enable, on);
MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp); MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
MLX5_SET(mtpps_reg, in, field_select, field_select);
return mlx5_set_mtpps(priv->mdev, in, sizeof(in)); err = mlx5_set_mtpps(priv->mdev, in, sizeof(in));
if (err)
return err;
return mlx5_set_mtppse(priv->mdev, pin, 0,
MLX5E_EVENT_MODE_REPETETIVE & on);
}
static int mlx5e_pps_configure(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq,
int on)
{
struct mlx5e_tstamp *tstamp =
container_of(ptp, struct mlx5e_tstamp, ptp_info);
tstamp->pps_info.enabled = !!on;
return 0;
} }
static int mlx5e_ptp_enable(struct ptp_clock_info *ptp, static int mlx5e_ptp_enable(struct ptp_clock_info *ptp,
@ -351,6 +416,8 @@ static int mlx5e_ptp_enable(struct ptp_clock_info *ptp,
return mlx5e_extts_configure(ptp, rq, on); return mlx5e_extts_configure(ptp, rq, on);
case PTP_CLK_REQ_PEROUT: case PTP_CLK_REQ_PEROUT:
return mlx5e_perout_configure(ptp, rq, on); return mlx5e_perout_configure(ptp, rq, on);
case PTP_CLK_REQ_PPS:
return mlx5e_pps_configure(ptp, rq, on);
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
@ -396,6 +463,7 @@ static int mlx5e_init_pin_config(struct mlx5e_tstamp *tstamp)
return -ENOMEM; return -ENOMEM;
tstamp->ptp_info.enable = mlx5e_ptp_enable; tstamp->ptp_info.enable = mlx5e_ptp_enable;
tstamp->ptp_info.verify = mlx5e_ptp_verify; tstamp->ptp_info.verify = mlx5e_ptp_verify;
tstamp->ptp_info.pps = 1;
for (i = 0; i < tstamp->ptp_info.n_pins; i++) { for (i = 0; i < tstamp->ptp_info.n_pins; i++) {
snprintf(tstamp->ptp_info.pin_config[i].name, snprintf(tstamp->ptp_info.pin_config[i].name,
@ -423,22 +491,56 @@ static void mlx5e_get_pps_caps(struct mlx5e_priv *priv,
tstamp->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out, tstamp->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
cap_max_num_of_pps_out_pins); cap_max_num_of_pps_out_pins);
tstamp->pps_pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode); tstamp->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
tstamp->pps_pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode); tstamp->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
tstamp->pps_pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode); tstamp->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
tstamp->pps_pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode); tstamp->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
tstamp->pps_pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode); tstamp->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
tstamp->pps_pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode); tstamp->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
tstamp->pps_pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode); tstamp->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
tstamp->pps_pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode); tstamp->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
} }
void mlx5e_pps_event_handler(struct mlx5e_priv *priv, void mlx5e_pps_event_handler(struct mlx5e_priv *priv,
struct ptp_clock_event *event) struct ptp_clock_event *event)
{ {
struct net_device *netdev = priv->netdev;
struct mlx5e_tstamp *tstamp = &priv->tstamp; struct mlx5e_tstamp *tstamp = &priv->tstamp;
struct timespec64 ts;
u64 nsec_now, nsec_delta;
u64 cycles_now, cycles_delta;
int pin = event->index;
s64 ns;
unsigned long flags;
ptp_clock_event(tstamp->ptp, event); switch (tstamp->ptp_info.pin_config[pin].func) {
case PTP_PF_EXTTS:
if (tstamp->pps_info.enabled) {
event->type = PTP_CLOCK_PPSUSR;
event->pps_times.ts_real = ns_to_timespec64(event->timestamp);
} else {
event->type = PTP_CLOCK_EXTTS;
}
ptp_clock_event(tstamp->ptp, event);
break;
case PTP_PF_PEROUT:
mlx5e_ptp_gettime(&tstamp->ptp_info, &ts);
cycles_now = mlx5_read_internal_timer(tstamp->mdev);
ts.tv_sec += 1;
ts.tv_nsec = 0;
ns = timespec64_to_ns(&ts);
write_lock_irqsave(&tstamp->lock, flags);
nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now);
nsec_delta = ns - nsec_now;
cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift,
tstamp->cycles.mult);
tstamp->pps_info.start[pin] = cycles_now + cycles_delta;
queue_work(priv->wq, &tstamp->pps_info.out_work);
write_unlock_irqrestore(&tstamp->lock, flags);
break;
default:
netdev_err(netdev, "%s: Unhandled event\n", __func__);
}
} }
void mlx5e_timestamp_init(struct mlx5e_priv *priv) void mlx5e_timestamp_init(struct mlx5e_priv *priv)
@ -474,9 +576,10 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
do_div(ns, NSEC_PER_SEC / 2 / HZ); do_div(ns, NSEC_PER_SEC / 2 / HZ);
tstamp->overflow_period = ns; tstamp->overflow_period = ns;
INIT_WORK(&tstamp->pps_info.out_work, mlx5e_pps_out);
INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow); INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow);
if (tstamp->overflow_period) if (tstamp->overflow_period)
schedule_delayed_work(&tstamp->overflow_work, 0); queue_delayed_work(priv->wq, &tstamp->overflow_work, 0);
else else
mlx5_core_warn(priv->mdev, "invalid overflow period, overflow_work is not scheduled\n"); mlx5_core_warn(priv->mdev, "invalid overflow period, overflow_work is not scheduled\n");
@ -485,16 +588,10 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp"); snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp");
/* Initialize 1PPS data structures */ /* Initialize 1PPS data structures */
#define MAX_PIN_NUM 8 if (MLX5_PPS_CAP(priv->mdev))
tstamp->pps_pin_caps = kzalloc(sizeof(u8) * MAX_PIN_NUM, GFP_KERNEL); mlx5e_get_pps_caps(priv, tstamp);
if (tstamp->pps_pin_caps) { if (tstamp->ptp_info.n_pins)
if (MLX5_CAP_GEN(priv->mdev, pps)) mlx5e_init_pin_config(tstamp);
mlx5e_get_pps_caps(priv, tstamp);
if (tstamp->ptp_info.n_pins)
mlx5e_init_pin_config(tstamp);
} else {
mlx5_core_warn(priv->mdev, "1PPS initialization failed\n");
}
tstamp->ptp = ptp_clock_register(&tstamp->ptp_info, tstamp->ptp = ptp_clock_register(&tstamp->ptp_info,
&priv->mdev->pdev->dev); &priv->mdev->pdev->dev);
@ -517,8 +614,7 @@ void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv)
priv->tstamp.ptp = NULL; priv->tstamp.ptp = NULL;
} }
kfree(tstamp->pps_pin_caps); cancel_work_sync(&tstamp->pps_info.out_work);
kfree(tstamp->ptp_info.pin_config);
cancel_delayed_work_sync(&tstamp->overflow_work); cancel_delayed_work_sync(&tstamp->overflow_work);
kfree(tstamp->ptp_info.pin_config);
} }

View file

@ -276,7 +276,7 @@ static void add_rule_to_list(struct mlx5e_priv *priv,
static bool outer_header_zero(u32 *match_criteria) static bool outer_header_zero(u32 *match_criteria)
{ {
int size = MLX5_ST_SZ_BYTES(fte_match_param); int size = MLX5_FLD_SZ_BYTES(fte_match_param, outer_headers);
char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria, char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
outer_headers); outer_headers);
@ -320,7 +320,7 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv,
spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria)); spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
flow_act.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; flow_act.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, 1); rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, dst ? 1 : 0);
if (IS_ERR(rule)) { if (IS_ERR(rule)) {
err = PTR_ERR(rule); err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n", netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n",

View file

@ -365,7 +365,6 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
break; break;
case MLX5_DEV_EVENT_PPS: case MLX5_DEV_EVENT_PPS:
eqe = (struct mlx5_eqe *)param; eqe = (struct mlx5_eqe *)param;
ptp_event.type = PTP_CLOCK_EXTTS;
ptp_event.index = eqe->data.pps.pin; ptp_event.index = eqe->data.pps.pin;
ptp_event.timestamp = ptp_event.timestamp =
timecounter_cyc2time(&priv->tstamp.clock, timecounter_cyc2time(&priv->tstamp.clock,

View file

@ -690,7 +690,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
else else
mlx5_core_dbg(dev, "port_module_event is not set\n"); mlx5_core_dbg(dev, "port_module_event is not set\n");
if (MLX5_CAP_GEN(dev, pps)) if (MLX5_PPS_CAP(dev))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT); async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,

View file

@ -160,8 +160,6 @@ out:
static void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp) static void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
{ {
mlx5_fs_remove_rx_underlay_qpn(mdev, qp->qpn);
mlx5_core_destroy_qp(mdev, qp); mlx5_core_destroy_qp(mdev, qp);
} }
@ -176,8 +174,6 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv)
return err; return err;
} }
mlx5_fs_add_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]); err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]);
if (err) { if (err) {
mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err); mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err);
@ -235,6 +231,7 @@ static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
static int mlx5i_init_rx(struct mlx5e_priv *priv) static int mlx5i_init_rx(struct mlx5e_priv *priv)
{ {
struct mlx5i_priv *ipriv = priv->ppriv;
int err; int err;
err = mlx5e_create_indirect_rqt(priv); err = mlx5e_create_indirect_rqt(priv);
@ -253,12 +250,18 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
if (err) if (err)
goto err_destroy_indirect_tirs; goto err_destroy_indirect_tirs;
err = mlx5i_create_flow_steering(priv); err = mlx5_fs_add_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
if (err) if (err)
goto err_destroy_direct_tirs; goto err_destroy_direct_tirs;
err = mlx5i_create_flow_steering(priv);
if (err)
goto err_remove_rx_underlay_qpn;
return 0; return 0;
err_remove_rx_underlay_qpn:
mlx5_fs_remove_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
err_destroy_direct_tirs: err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv); mlx5e_destroy_direct_tirs(priv);
err_destroy_indirect_tirs: err_destroy_indirect_tirs:
@ -272,6 +275,9 @@ err_destroy_indirect_rqts:
static void mlx5i_cleanup_rx(struct mlx5e_priv *priv) static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
{ {
struct mlx5i_priv *ipriv = priv->ppriv;
mlx5_fs_remove_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
mlx5i_destroy_flow_steering(priv); mlx5i_destroy_flow_steering(priv);
mlx5e_destroy_direct_tirs(priv); mlx5e_destroy_direct_tirs(priv);
mlx5e_destroy_indirect_tirs(priv); mlx5e_destroy_indirect_tirs(priv);

View file

@ -157,22 +157,17 @@ static bool mlx5_lag_is_bonded(struct mlx5_lag *ldev)
static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker, static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
u8 *port1, u8 *port2) u8 *port1, u8 *port2)
{ {
if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) { *port1 = 1;
if (tracker->netdev_state[0].tx_enabled) { *port2 = 2;
*port1 = 1; if (!tracker->netdev_state[0].tx_enabled ||
*port2 = 1; !tracker->netdev_state[0].link_up) {
} else { *port1 = 2;
*port1 = 2; return;
*port2 = 2;
}
} else {
*port1 = 1;
*port2 = 2;
if (!tracker->netdev_state[0].link_up)
*port1 = 2;
else if (!tracker->netdev_state[1].link_up)
*port2 = 1;
} }
if (!tracker->netdev_state[1].tx_enabled ||
!tracker->netdev_state[1].link_up)
*port2 = 1;
} }
static void mlx5_activate_lag(struct mlx5_lag *ldev, static void mlx5_activate_lag(struct mlx5_lag *ldev,

View file

@ -153,6 +153,11 @@ int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size);
int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode); int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode);
int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode); int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
#define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) && \
MLX5_CAP_GEN((mdev), pps_modify) && \
MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) && \
MLX5_CAP_MCAM_FEATURE((mdev), mtpps_enh_out_per_adj))
void mlx5e_init(void); void mlx5e_init(void);
void mlx5e_cleanup(void); void mlx5e_cleanup(void);

View file

@ -141,9 +141,19 @@ static int mcs_set_reg(struct mcs_cb *mcs, __u16 reg, __u16 val)
static int mcs_get_reg(struct mcs_cb *mcs, __u16 reg, __u16 * val) static int mcs_get_reg(struct mcs_cb *mcs, __u16 reg, __u16 * val)
{ {
struct usb_device *dev = mcs->usbdev; struct usb_device *dev = mcs->usbdev;
int ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ, void *dmabuf;
MCS_RD_RTYPE, 0, reg, val, 2, int ret;
msecs_to_jiffies(MCS_CTRL_TIMEOUT));
dmabuf = kmalloc(sizeof(__u16), GFP_KERNEL);
if (!dmabuf)
return -ENOMEM;
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
MCS_RD_RTYPE, 0, reg, dmabuf, 2,
msecs_to_jiffies(MCS_CTRL_TIMEOUT));
memcpy(val, dmabuf, sizeof(__u16));
kfree(dmabuf);
return ret; return ret;
} }

View file

@ -749,6 +749,9 @@ void phy_stop_machine(struct phy_device *phydev)
if (phydev->state > PHY_UP && phydev->state != PHY_HALTED) if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
phydev->state = PHY_UP; phydev->state = PHY_UP;
mutex_unlock(&phydev->lock); mutex_unlock(&phydev->lock);
/* Now we can run the state machine synchronously */
phy_state_machine(&phydev->state_queue.work);
} }
/** /**

View file

@ -889,21 +889,20 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
buf += headroom; /* advance address leaving hole at front of pkt */ buf += headroom; /* advance address leaving hole at front of pkt */
ctx = (void *)(unsigned long)len;
get_page(alloc_frag->page); get_page(alloc_frag->page);
alloc_frag->offset += len + headroom; alloc_frag->offset += len + headroom;
hole = alloc_frag->size - alloc_frag->offset; hole = alloc_frag->size - alloc_frag->offset;
if (hole < len + headroom) { if (hole < len + headroom) {
/* To avoid internal fragmentation, if there is very likely not /* To avoid internal fragmentation, if there is very likely not
* enough space for another buffer, add the remaining space to * enough space for another buffer, add the remaining space to
* the current buffer. This extra space is not included in * the current buffer.
* the truesize stored in ctx.
*/ */
len += hole; len += hole;
alloc_frag->offset += hole; alloc_frag->offset += hole;
} }
sg_init_one(rq->sg, buf, len); sg_init_one(rq->sg, buf, len);
ctx = (void *)(unsigned long)len;
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0) if (err < 0)
put_page(virt_to_head_page(buf)); put_page(virt_to_head_page(buf));

View file

@ -4168,11 +4168,6 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
goto fail; goto fail;
} }
/* allocate scatter-gather table. sg support
* will be disabled upon allocation failure.
*/
brcmf_sdiod_sgtable_alloc(bus->sdiodev);
/* Query the F2 block size, set roundup accordingly */ /* Query the F2 block size, set roundup accordingly */
bus->blocksize = bus->sdiodev->func[2]->cur_blksize; bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
bus->roundup = min(max_roundup, bus->blocksize); bus->roundup = min(max_roundup, bus->blocksize);

View file

@ -1190,11 +1190,11 @@ void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
next_reclaimed; next_reclaimed;
IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n", IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
next_reclaimed); next_reclaimed);
iwlagn_check_ratid_empty(priv, sta_id, tid);
} }
iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs); iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
iwlagn_check_ratid_empty(priv, sta_id, tid);
freed = 0; freed = 0;
/* process frames */ /* process frames */

View file

@ -1241,6 +1241,8 @@ config SCSI_LPFC
tristate "Emulex LightPulse Fibre Channel Support" tristate "Emulex LightPulse Fibre Channel Support"
depends on PCI && SCSI depends on PCI && SCSI
depends on SCSI_FC_ATTRS depends on SCSI_FC_ATTRS
depends on NVME_TARGET_FC || NVME_TARGET_FC=n
depends on NVME_FC || NVME_FC=n
select CRC_T10DIF select CRC_T10DIF
---help--- ---help---
This lpfc driver supports the Emulex LightPulse This lpfc driver supports the Emulex LightPulse

View file

@ -437,7 +437,7 @@ static int scatter_data_area(struct tcmu_dev *udev,
to_offset = get_block_offset_user(udev, dbi, to_offset = get_block_offset_user(udev, dbi,
block_remaining); block_remaining);
offset = DATA_BLOCK_SIZE - block_remaining; offset = DATA_BLOCK_SIZE - block_remaining;
to = (void *)(unsigned long)to + offset; to += offset;
if (*iov_cnt != 0 && if (*iov_cnt != 0 &&
to_offset == iov_tail(udev, *iov)) { to_offset == iov_tail(udev, *iov)) {
@ -510,7 +510,7 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
copy_bytes = min_t(size_t, sg_remaining, copy_bytes = min_t(size_t, sg_remaining,
block_remaining); block_remaining);
offset = DATA_BLOCK_SIZE - block_remaining; offset = DATA_BLOCK_SIZE - block_remaining;
from = (void *)(unsigned long)from + offset; from += offset;
tcmu_flush_dcache_range(from, copy_bytes); tcmu_flush_dcache_range(from, copy_bytes);
memcpy(to + sg->length - sg_remaining, from, memcpy(to + sg->length - sg_remaining, from,
copy_bytes); copy_bytes);
@ -699,25 +699,24 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
size_t pad_size = head_to_end(cmd_head, udev->cmdr_size); size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
entry = (void *) mb + CMDR_OFF + cmd_head; entry = (void *) mb + CMDR_OFF + cmd_head;
tcmu_flush_dcache_range(entry, sizeof(*entry));
tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD); tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD);
tcmu_hdr_set_len(&entry->hdr.len_op, pad_size); tcmu_hdr_set_len(&entry->hdr.len_op, pad_size);
entry->hdr.cmd_id = 0; /* not used for PAD */ entry->hdr.cmd_id = 0; /* not used for PAD */
entry->hdr.kflags = 0; entry->hdr.kflags = 0;
entry->hdr.uflags = 0; entry->hdr.uflags = 0;
tcmu_flush_dcache_range(entry, sizeof(*entry));
UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size); UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
tcmu_flush_dcache_range(mb, sizeof(*mb));
cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
WARN_ON(cmd_head != 0); WARN_ON(cmd_head != 0);
} }
entry = (void *) mb + CMDR_OFF + cmd_head; entry = (void *) mb + CMDR_OFF + cmd_head;
tcmu_flush_dcache_range(entry, sizeof(*entry)); memset(entry, 0, command_size);
tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD); tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
entry->hdr.cmd_id = tcmu_cmd->cmd_id; entry->hdr.cmd_id = tcmu_cmd->cmd_id;
entry->hdr.kflags = 0;
entry->hdr.uflags = 0;
/* Handle allocating space from the data area */ /* Handle allocating space from the data area */
tcmu_cmd_reset_dbi_cur(tcmu_cmd); tcmu_cmd_reset_dbi_cur(tcmu_cmd);
@ -736,11 +735,10 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
} }
entry->req.iov_cnt = iov_cnt; entry->req.iov_cnt = iov_cnt;
entry->req.iov_dif_cnt = 0;
/* Handle BIDI commands */ /* Handle BIDI commands */
iov_cnt = 0;
if (se_cmd->se_cmd_flags & SCF_BIDI) { if (se_cmd->se_cmd_flags & SCF_BIDI) {
iov_cnt = 0;
iov++; iov++;
ret = scatter_data_area(udev, tcmu_cmd, ret = scatter_data_area(udev, tcmu_cmd,
se_cmd->t_bidi_data_sg, se_cmd->t_bidi_data_sg,
@ -753,8 +751,8 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
pr_err("tcmu: alloc and scatter bidi data failed\n"); pr_err("tcmu: alloc and scatter bidi data failed\n");
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
} }
entry->req.iov_bidi_cnt = iov_cnt;
} }
entry->req.iov_bidi_cnt = iov_cnt;
/* /*
* Recalaulate the command's base size and size according * Recalaulate the command's base size and size according

View file

@ -4776,10 +4776,6 @@ skip_async:
else else
flush = BTRFS_RESERVE_NO_FLUSH; flush = BTRFS_RESERVE_NO_FLUSH;
spin_lock(&space_info->lock); spin_lock(&space_info->lock);
if (can_overcommit(root, space_info, orig, flush)) {
spin_unlock(&space_info->lock);
break;
}
if (list_empty(&space_info->tickets) && if (list_empty(&space_info->tickets) &&
list_empty(&space_info->priority_tickets)) { list_empty(&space_info->priority_tickets)) {
spin_unlock(&space_info->lock); spin_unlock(&space_info->lock);

View file

@ -193,13 +193,6 @@ __ext4_set_acl(handle_t *handle, struct inode *inode, int type,
switch (type) { switch (type) {
case ACL_TYPE_ACCESS: case ACL_TYPE_ACCESS:
name_index = EXT4_XATTR_INDEX_POSIX_ACL_ACCESS; name_index = EXT4_XATTR_INDEX_POSIX_ACL_ACCESS;
if (acl) {
error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
if (error)
return error;
inode->i_ctime = current_time(inode);
ext4_mark_inode_dirty(handle, inode);
}
break; break;
case ACL_TYPE_DEFAULT: case ACL_TYPE_DEFAULT:
@ -221,8 +214,9 @@ __ext4_set_acl(handle_t *handle, struct inode *inode, int type,
value, size, 0); value, size, 0);
kfree(value); kfree(value);
if (!error) if (!error) {
set_cached_acl(inode, type, acl); set_cached_acl(inode, type, acl);
}
return error; return error;
} }
@ -232,6 +226,8 @@ ext4_set_acl(struct inode *inode, struct posix_acl *acl, int type)
{ {
handle_t *handle; handle_t *handle;
int error, retries = 0; int error, retries = 0;
umode_t mode = inode->i_mode;
int update_mode = 0;
error = dquot_initialize(inode); error = dquot_initialize(inode);
if (error) if (error)
@ -242,7 +238,20 @@ retry:
if (IS_ERR(handle)) if (IS_ERR(handle))
return PTR_ERR(handle); return PTR_ERR(handle);
if ((type == ACL_TYPE_ACCESS) && acl) {
error = posix_acl_update_mode(inode, &mode, &acl);
if (error)
goto out_stop;
update_mode = 1;
}
error = __ext4_set_acl(handle, inode, type, acl); error = __ext4_set_acl(handle, inode, type, acl);
if (!error && update_mode) {
inode->i_mode = mode;
inode->i_ctime = current_time(inode);
ext4_mark_inode_dirty(handle, inode);
}
out_stop:
ext4_journal_stop(handle); ext4_journal_stop(handle);
if (error == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) if (error == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry; goto retry;

View file

@ -521,6 +521,8 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
lastoff = page_offset(page); lastoff = page_offset(page);
bh = head = page_buffers(page); bh = head = page_buffers(page);
do { do {
if (lastoff + bh->b_size <= startoff)
goto next;
if (buffer_uptodate(bh) || if (buffer_uptodate(bh) ||
buffer_unwritten(bh)) { buffer_unwritten(bh)) {
if (whence == SEEK_DATA) if (whence == SEEK_DATA)
@ -535,6 +537,7 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
unlock_page(page); unlock_page(page);
goto out; goto out;
} }
next:
lastoff += bh->b_size; lastoff += bh->b_size;
bh = bh->b_this_page; bh = bh->b_this_page;
} while (bh != head); } while (bh != head);

View file

@ -1927,7 +1927,8 @@ retry:
n_desc_blocks = o_desc_blocks + n_desc_blocks = o_desc_blocks +
le16_to_cpu(es->s_reserved_gdt_blocks); le16_to_cpu(es->s_reserved_gdt_blocks);
n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb); n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
n_blocks_count = n_group * EXT4_BLOCKS_PER_GROUP(sb); n_blocks_count = (ext4_fsblk_t)n_group *
EXT4_BLOCKS_PER_GROUP(sb);
n_group--; /* set to last group number */ n_group--; /* set to last group number */
} }

View file

@ -7407,7 +7407,7 @@ static void nfs4_exchange_id_done(struct rpc_task *task, void *data)
cdata->res.server_scope = NULL; cdata->res.server_scope = NULL;
} }
/* Save the EXCHANGE_ID verifier session trunk tests */ /* Save the EXCHANGE_ID verifier session trunk tests */
memcpy(clp->cl_confirm.data, cdata->args.verifier->data, memcpy(clp->cl_confirm.data, cdata->args.verifier.data,
sizeof(clp->cl_confirm.data)); sizeof(clp->cl_confirm.data));
} }
out: out:
@ -7444,7 +7444,6 @@ static const struct rpc_call_ops nfs4_exchange_id_call_ops = {
static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred, static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
u32 sp4_how, struct rpc_xprt *xprt) u32 sp4_how, struct rpc_xprt *xprt)
{ {
nfs4_verifier verifier;
struct rpc_message msg = { struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID], .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
.rpc_cred = cred, .rpc_cred = cred,
@ -7468,8 +7467,7 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
return -ENOMEM; return -ENOMEM;
} }
if (!xprt) nfs4_init_boot_verifier(clp, &calldata->args.verifier);
nfs4_init_boot_verifier(clp, &verifier);
status = nfs4_init_uniform_client_string(clp); status = nfs4_init_uniform_client_string(clp);
if (status) if (status)
@ -7510,9 +7508,8 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
task_setup_data.rpc_xprt = xprt; task_setup_data.rpc_xprt = xprt;
task_setup_data.flags = task_setup_data.flags =
RPC_TASK_SOFT|RPC_TASK_SOFTCONN|RPC_TASK_ASYNC; RPC_TASK_SOFT|RPC_TASK_SOFTCONN|RPC_TASK_ASYNC;
calldata->args.verifier = &clp->cl_confirm; memcpy(calldata->args.verifier.data, clp->cl_confirm.data,
} else { sizeof(calldata->args.verifier.data));
calldata->args.verifier = &verifier;
} }
calldata->args.client = clp; calldata->args.client = clp;
#ifdef CONFIG_NFS_V4_1_MIGRATION #ifdef CONFIG_NFS_V4_1_MIGRATION

View file

@ -1765,7 +1765,7 @@ static void encode_exchange_id(struct xdr_stream *xdr,
int len = 0; int len = 0;
encode_op_hdr(xdr, OP_EXCHANGE_ID, decode_exchange_id_maxsz, hdr); encode_op_hdr(xdr, OP_EXCHANGE_ID, decode_exchange_id_maxsz, hdr);
encode_nfs4_verifier(xdr, args->verifier); encode_nfs4_verifier(xdr, &args->verifier);
encode_string(xdr, strlen(args->client->cl_owner_id), encode_string(xdr, strlen(args->client->cl_owner_id),
args->client->cl_owner_id); args->client->cl_owner_id);

View file

@ -240,18 +240,6 @@ int ocfs2_set_acl(handle_t *handle,
switch (type) { switch (type) {
case ACL_TYPE_ACCESS: case ACL_TYPE_ACCESS:
name_index = OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS; name_index = OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS;
if (acl) {
umode_t mode;
ret = posix_acl_update_mode(inode, &mode, &acl);
if (ret)
return ret;
ret = ocfs2_acl_set_mode(inode, di_bh,
handle, mode);
if (ret)
return ret;
}
break; break;
case ACL_TYPE_DEFAULT: case ACL_TYPE_DEFAULT:
name_index = OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT; name_index = OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT;
@ -289,7 +277,19 @@ int ocfs2_iop_set_acl(struct inode *inode, struct posix_acl *acl, int type)
had_lock = ocfs2_inode_lock_tracker(inode, &bh, 1, &oh); had_lock = ocfs2_inode_lock_tracker(inode, &bh, 1, &oh);
if (had_lock < 0) if (had_lock < 0)
return had_lock; return had_lock;
if (type == ACL_TYPE_ACCESS && acl) {
umode_t mode;
status = posix_acl_update_mode(inode, &mode, &acl);
if (status)
goto unlock;
status = ocfs2_acl_set_mode(inode, bh, NULL, mode);
if (status)
goto unlock;
}
status = ocfs2_set_acl(NULL, inode, bh, type, acl, NULL, NULL); status = ocfs2_set_acl(NULL, inode, bh, type, acl, NULL, NULL);
unlock:
ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock); ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
brelse(bh); brelse(bh);
return status; return status;

View file

@ -851,6 +851,9 @@ wakeup:
__wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, &range); __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, &range);
spin_unlock(&ctx->fault_pending_wqh.lock); spin_unlock(&ctx->fault_pending_wqh.lock);
/* Flush pending events that may still wait on event_wqh */
wake_up_all(&ctx->event_wqh);
wake_up_poll(&ctx->fd_wqh, POLLHUP); wake_up_poll(&ctx->fd_wqh, POLLHUP);
userfaultfd_ctx_put(ctx); userfaultfd_ctx_put(ctx);
return 0; return 0;
@ -1645,6 +1648,8 @@ static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start, ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start,
uffdio_zeropage.range.len); uffdio_zeropage.range.len);
mmput(ctx->mm); mmput(ctx->mm);
} else {
return -ENOSPC;
} }
if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage))) if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage)))
return -EFAULT; return -EFAULT;

View file

@ -58,7 +58,6 @@ enum cpuhp_state {
CPUHP_XEN_EVTCHN_PREPARE, CPUHP_XEN_EVTCHN_PREPARE,
CPUHP_ARM_SHMOBILE_SCU_PREPARE, CPUHP_ARM_SHMOBILE_SCU_PREPARE,
CPUHP_SH_SH3X_PREPARE, CPUHP_SH_SH3X_PREPARE,
CPUHP_BLK_MQ_PREPARE,
CPUHP_NET_FLOW_PREPARE, CPUHP_NET_FLOW_PREPARE,
CPUHP_TOPOLOGY_PREPARE, CPUHP_TOPOLOGY_PREPARE,
CPUHP_NET_IUCV_PREPARE, CPUHP_NET_IUCV_PREPARE,

View file

@ -18,6 +18,19 @@
#ifdef CONFIG_CPUSETS #ifdef CONFIG_CPUSETS
/*
* Static branch rewrites can happen in an arbitrary order for a given
* key. In code paths where we need to loop with read_mems_allowed_begin() and
* read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
* to ensure that begin() always gets rewritten before retry() in the
* disabled -> enabled transition. If not, then if local irqs are disabled
* around the loop, we can deadlock since retry() would always be
* comparing the latest value of the mems_allowed seqcount against 0 as
* begin() still would see cpusets_enabled() as false. The enabled -> disabled
* transition should happen in reverse order for the same reasons (want to stop
* looking at real value of mems_allowed.sequence in retry() first).
*/
extern struct static_key_false cpusets_pre_enable_key;
extern struct static_key_false cpusets_enabled_key; extern struct static_key_false cpusets_enabled_key;
static inline bool cpusets_enabled(void) static inline bool cpusets_enabled(void)
{ {
@ -32,12 +45,14 @@ static inline int nr_cpusets(void)
static inline void cpuset_inc(void) static inline void cpuset_inc(void)
{ {
static_branch_inc(&cpusets_pre_enable_key);
static_branch_inc(&cpusets_enabled_key); static_branch_inc(&cpusets_enabled_key);
} }
static inline void cpuset_dec(void) static inline void cpuset_dec(void)
{ {
static_branch_dec(&cpusets_enabled_key); static_branch_dec(&cpusets_enabled_key);
static_branch_dec(&cpusets_pre_enable_key);
} }
extern int cpuset_init(void); extern int cpuset_init(void);
@ -115,7 +130,7 @@ extern void cpuset_print_current_mems_allowed(void);
*/ */
static inline unsigned int read_mems_allowed_begin(void) static inline unsigned int read_mems_allowed_begin(void)
{ {
if (!cpusets_enabled()) if (!static_branch_unlikely(&cpusets_pre_enable_key))
return 0; return 0;
return read_seqcount_begin(&current->mems_allowed_seq); return read_seqcount_begin(&current->mems_allowed_seq);
@ -129,7 +144,7 @@ static inline unsigned int read_mems_allowed_begin(void)
*/ */
static inline bool read_mems_allowed_retry(unsigned int seq) static inline bool read_mems_allowed_retry(unsigned int seq)
{ {
if (!cpusets_enabled()) if (!static_branch_unlikely(&cpusets_enabled_key))
return false; return false;
return read_seqcount_retry(&current->mems_allowed_seq, seq); return read_seqcount_retry(&current->mems_allowed_seq, seq);

View file

@ -7718,8 +7718,10 @@ struct mlx5_ifc_pcam_reg_bits {
}; };
struct mlx5_ifc_mcam_enhanced_features_bits { struct mlx5_ifc_mcam_enhanced_features_bits {
u8 reserved_at_0[0x7f]; u8 reserved_at_0[0x7d];
u8 mtpps_enh_out_per_adj[0x1];
u8 mtpps_fs[0x1];
u8 pcie_performance_group[0x1]; u8 pcie_performance_group[0x1];
}; };
@ -8115,7 +8117,8 @@ struct mlx5_ifc_mtpps_reg_bits {
u8 reserved_at_78[0x4]; u8 reserved_at_78[0x4];
u8 cap_pin_4_mode[0x4]; u8 cap_pin_4_mode[0x4];
u8 reserved_at_80[0x80]; u8 field_select[0x20];
u8 reserved_at_a0[0x60];
u8 enable[0x1]; u8 enable[0x1];
u8 reserved_at_101[0xb]; u8 reserved_at_101[0xb];
@ -8130,8 +8133,9 @@ struct mlx5_ifc_mtpps_reg_bits {
u8 out_pulse_duration[0x10]; u8 out_pulse_duration[0x10];
u8 out_periodic_adjustment[0x10]; u8 out_periodic_adjustment[0x10];
u8 enhanced_out_periodic_adjustment[0x20];
u8 reserved_at_1a0[0x60]; u8 reserved_at_1c0[0x20];
}; };
struct mlx5_ifc_mtppse_reg_bits { struct mlx5_ifc_mtppse_reg_bits {

View file

@ -494,6 +494,10 @@ struct mm_struct {
* PROT_NONE or PROT_NUMA mapped page. * PROT_NONE or PROT_NUMA mapped page.
*/ */
bool tlb_flush_pending; bool tlb_flush_pending;
#endif
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
/* See flush_tlb_batched_pending() */
bool tlb_flush_batched;
#endif #endif
struct uprobes_state uprobes_state; struct uprobes_state uprobes_state;
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE

View file

@ -1222,7 +1222,7 @@ struct nfs41_state_protection {
struct nfs41_exchange_id_args { struct nfs41_exchange_id_args {
struct nfs_client *client; struct nfs_client *client;
nfs4_verifier *verifier; nfs4_verifier verifier;
u32 flags; u32 flags;
struct nfs41_state_protection state_protect; struct nfs41_state_protection state_protect;
}; };

View file

@ -323,6 +323,7 @@ enum {
__WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */ __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
__WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */ __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
__WQ_ORDERED_EXPLICIT = 1 << 18, /* internal: alloc_ordered_workqueue() */
__WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */ __WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
@ -422,7 +423,8 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
* Pointer to the allocated workqueue on success, %NULL on failure. * Pointer to the allocated workqueue on success, %NULL on failure.
*/ */
#define alloc_ordered_workqueue(fmt, flags, args...) \ #define alloc_ordered_workqueue(fmt, flags, args...) \
alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args) alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \
__WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
#define create_workqueue(name) \ #define create_workqueue(name) \
alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name)) alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))

View file

@ -469,6 +469,8 @@ _sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member)
#define _sctp_walk_params(pos, chunk, end, member)\ #define _sctp_walk_params(pos, chunk, end, member)\
for (pos.v = chunk->member;\ for (pos.v = chunk->member;\
(pos.v + offsetof(struct sctp_paramhdr, length) + sizeof(pos.p->length) <=\
(void *)chunk + end) &&\
pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\ pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\
ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\ ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\
pos.v += SCTP_PAD4(ntohs(pos.p->length))) pos.v += SCTP_PAD4(ntohs(pos.p->length)))
@ -479,6 +481,8 @@ _sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length))
#define _sctp_walk_errors(err, chunk_hdr, end)\ #define _sctp_walk_errors(err, chunk_hdr, end)\
for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \ for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \
sizeof(sctp_chunkhdr_t));\ sizeof(sctp_chunkhdr_t));\
((void *)err + offsetof(sctp_errhdr_t, length) + sizeof(err->length) <=\
(void *)chunk_hdr + end) &&\
(void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\ (void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\
ntohs(err->length) >= sizeof(sctp_errhdr_t); \ ntohs(err->length) >= sizeof(sctp_errhdr_t); \
err = (sctp_errhdr_t *)((void *)err + SCTP_PAD4(ntohs(err->length)))) err = (sctp_errhdr_t *)((void *)err + SCTP_PAD4(ntohs(err->length))))

View file

@ -265,6 +265,7 @@ static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
} }
void udp_v4_early_demux(struct sk_buff *skb); void udp_v4_early_demux(struct sk_buff *skb);
void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
int udp_get_port(struct sock *sk, unsigned short snum, int udp_get_port(struct sock *sk, unsigned short snum,
int (*saddr_cmp)(const struct sock *, int (*saddr_cmp)(const struct sock *,
const struct sock *)); const struct sock *));

View file

@ -795,10 +795,6 @@ struct snd_soc_component_driver {
int (*suspend)(struct snd_soc_component *); int (*suspend)(struct snd_soc_component *);
int (*resume)(struct snd_soc_component *); int (*resume)(struct snd_soc_component *);
/* pcm creation and destruction */
int (*pcm_new)(struct snd_soc_pcm_runtime *);
void (*pcm_free)(struct snd_pcm *);
/* DT */ /* DT */
int (*of_xlate_dai_name)(struct snd_soc_component *component, int (*of_xlate_dai_name)(struct snd_soc_component *component,
struct of_phandle_args *args, struct of_phandle_args *args,
@ -872,8 +868,6 @@ struct snd_soc_component {
void (*remove)(struct snd_soc_component *); void (*remove)(struct snd_soc_component *);
int (*suspend)(struct snd_soc_component *); int (*suspend)(struct snd_soc_component *);
int (*resume)(struct snd_soc_component *); int (*resume)(struct snd_soc_component *);
int (*pcm_new)(struct snd_soc_pcm_runtime *);
void (*pcm_free)(struct snd_pcm *);
/* machine specific init */ /* machine specific init */
int (*init)(struct snd_soc_component *component); int (*init)(struct snd_soc_component *component);

View file

@ -33,6 +33,9 @@ struct cgroup_taskset {
struct list_head src_csets; struct list_head src_csets;
struct list_head dst_csets; struct list_head dst_csets;
/* the number of tasks in the set */
int nr_tasks;
/* the subsys currently being processed */ /* the subsys currently being processed */
int ssid; int ssid;

View file

@ -1948,6 +1948,8 @@ static void cgroup_migrate_add_task(struct task_struct *task,
if (!cset->mg_src_cgrp) if (!cset->mg_src_cgrp)
return; return;
mgctx->tset.nr_tasks++;
list_move_tail(&task->cg_list, &cset->mg_tasks); list_move_tail(&task->cg_list, &cset->mg_tasks);
if (list_empty(&cset->mg_node)) if (list_empty(&cset->mg_node))
list_add_tail(&cset->mg_node, list_add_tail(&cset->mg_node,
@ -2036,21 +2038,19 @@ static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx)
struct css_set *cset, *tmp_cset; struct css_set *cset, *tmp_cset;
int ssid, failed_ssid, ret; int ssid, failed_ssid, ret;
/* methods shouldn't be called if no task is actually migrating */
if (list_empty(&tset->src_csets))
return 0;
/* check that we can legitimately attach to the cgroup */ /* check that we can legitimately attach to the cgroup */
do_each_subsys_mask(ss, ssid, mgctx->ss_mask) { if (tset->nr_tasks) {
if (ss->can_attach) { do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
tset->ssid = ssid; if (ss->can_attach) {
ret = ss->can_attach(tset); tset->ssid = ssid;
if (ret) { ret = ss->can_attach(tset);
failed_ssid = ssid; if (ret) {
goto out_cancel_attach; failed_ssid = ssid;
goto out_cancel_attach;
}
} }
} } while_each_subsys_mask();
} while_each_subsys_mask(); }
/* /*
* Now that we're guaranteed success, proceed to move all tasks to * Now that we're guaranteed success, proceed to move all tasks to
@ -2077,25 +2077,29 @@ static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx)
*/ */
tset->csets = &tset->dst_csets; tset->csets = &tset->dst_csets;
do_each_subsys_mask(ss, ssid, mgctx->ss_mask) { if (tset->nr_tasks) {
if (ss->attach) { do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
tset->ssid = ssid; if (ss->attach) {
ss->attach(tset); tset->ssid = ssid;
} ss->attach(tset);
} while_each_subsys_mask(); }
} while_each_subsys_mask();
}
ret = 0; ret = 0;
goto out_release_tset; goto out_release_tset;
out_cancel_attach: out_cancel_attach:
do_each_subsys_mask(ss, ssid, mgctx->ss_mask) { if (tset->nr_tasks) {
if (ssid == failed_ssid) do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
break; if (ssid == failed_ssid)
if (ss->cancel_attach) { break;
tset->ssid = ssid; if (ss->cancel_attach) {
ss->cancel_attach(tset); tset->ssid = ssid;
} ss->cancel_attach(tset);
} while_each_subsys_mask(); }
} while_each_subsys_mask();
}
out_release_tset: out_release_tset:
spin_lock_irq(&css_set_lock); spin_lock_irq(&css_set_lock);
list_splice_init(&tset->dst_csets, &tset->src_csets); list_splice_init(&tset->dst_csets, &tset->src_csets);
@ -2917,11 +2921,11 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
cgrp->subtree_control &= ~disable; cgrp->subtree_control &= ~disable;
ret = cgroup_apply_control(cgrp); ret = cgroup_apply_control(cgrp);
cgroup_finalize_control(cgrp, ret); cgroup_finalize_control(cgrp, ret);
if (ret)
goto out_unlock;
kernfs_activate(cgrp->kn); kernfs_activate(cgrp->kn);
ret = 0;
out_unlock: out_unlock:
cgroup_kn_unlock(of->kn); cgroup_kn_unlock(of->kn);
return ret ?: nbytes; return ret ?: nbytes;
@ -4574,6 +4578,10 @@ int __init cgroup_init(void)
if (ss->bind) if (ss->bind)
ss->bind(init_css_set.subsys[ssid]); ss->bind(init_css_set.subsys[ssid]);
mutex_lock(&cgroup_mutex);
css_populate_dir(init_css_set.subsys[ssid]);
mutex_unlock(&cgroup_mutex);
} }
/* init_css_set.subsys[] has been updated, re-hash */ /* init_css_set.subsys[] has been updated, re-hash */

View file

@ -63,6 +63,7 @@
#include <linux/cgroup.h> #include <linux/cgroup.h>
#include <linux/wait.h> #include <linux/wait.h>
DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key); DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
/* See "Frequency meter" comments, below. */ /* See "Frequency meter" comments, below. */

View file

@ -1495,7 +1495,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
base->is_idle = false; base->is_idle = false;
} else { } else {
if (!is_max_delta) if (!is_max_delta)
expires = basem + (nextevt - basej) * TICK_NSEC; expires = basem + (u64)(nextevt - basej) * TICK_NSEC;
/* /*
* If we expect to sleep more than a tick, mark the base idle: * If we expect to sleep more than a tick, mark the base idle:
*/ */

View file

@ -3744,8 +3744,12 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
return -EINVAL; return -EINVAL;
/* creating multiple pwqs breaks ordering guarantee */ /* creating multiple pwqs breaks ordering guarantee */
if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs))) if (!list_empty(&wq->pwqs)) {
return -EINVAL; if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
return -EINVAL;
wq->flags &= ~__WQ_ORDERED;
}
ctx = apply_wqattrs_prepare(wq, attrs); ctx = apply_wqattrs_prepare(wq, attrs);
if (!ctx) if (!ctx)
@ -3929,6 +3933,16 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
struct workqueue_struct *wq; struct workqueue_struct *wq;
struct pool_workqueue *pwq; struct pool_workqueue *pwq;
/*
* Unbound && max_active == 1 used to imply ordered, which is no
* longer the case on NUMA machines due to per-node pools. While
* alloc_ordered_workqueue() is the right way to create an ordered
* workqueue, keep the previous behavior to avoid subtle breakages
* on NUMA.
*/
if ((flags & WQ_UNBOUND) && max_active == 1)
flags |= __WQ_ORDERED;
/* see the comment above the definition of WQ_POWER_EFFICIENT */ /* see the comment above the definition of WQ_POWER_EFFICIENT */
if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient) if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
flags |= WQ_UNBOUND; flags |= WQ_UNBOUND;
@ -4119,13 +4133,14 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
struct pool_workqueue *pwq; struct pool_workqueue *pwq;
/* disallow meddling with max_active for ordered workqueues */ /* disallow meddling with max_active for ordered workqueues */
if (WARN_ON(wq->flags & __WQ_ORDERED)) if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
return; return;
max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
mutex_lock(&wq->mutex); mutex_lock(&wq->mutex);
wq->flags &= ~__WQ_ORDERED;
wq->saved_max_active = max_active; wq->saved_max_active = max_active;
for_each_pwq(pwq, wq) for_each_pwq(pwq, wq)
@ -5253,7 +5268,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq)
* attributes breaks ordering guarantee. Disallow exposing ordered * attributes breaks ordering guarantee. Disallow exposing ordered
* workqueues. * workqueues.
*/ */
if (WARN_ON(wq->flags & __WQ_ORDERED)) if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
return -EINVAL; return -EINVAL;
wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL); wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);

View file

@ -4095,6 +4095,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long vaddr = *position; unsigned long vaddr = *position;
unsigned long remainder = *nr_pages; unsigned long remainder = *nr_pages;
struct hstate *h = hstate_vma(vma); struct hstate *h = hstate_vma(vma);
int err = -EFAULT;
while (vaddr < vma->vm_end && remainder) { while (vaddr < vma->vm_end && remainder) {
pte_t *pte; pte_t *pte;
@ -4170,11 +4171,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
} }
ret = hugetlb_fault(mm, vma, vaddr, fault_flags); ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
if (ret & VM_FAULT_ERROR) { if (ret & VM_FAULT_ERROR) {
int err = vm_fault_to_errno(ret, flags); err = vm_fault_to_errno(ret, flags);
if (err)
return err;
remainder = 0; remainder = 0;
break; break;
} }
@ -4229,7 +4226,7 @@ same_page:
*/ */
*position = vaddr; *position = vaddr;
return i ? i : -EFAULT; return i ? i : err;
} }
#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE

View file

@ -498,6 +498,7 @@ extern struct workqueue_struct *mm_percpu_wq;
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
void try_to_unmap_flush(void); void try_to_unmap_flush(void);
void try_to_unmap_flush_dirty(void); void try_to_unmap_flush_dirty(void);
void flush_tlb_batched_pending(struct mm_struct *mm);
#else #else
static inline void try_to_unmap_flush(void) static inline void try_to_unmap_flush(void)
{ {
@ -505,7 +506,9 @@ static inline void try_to_unmap_flush(void)
static inline void try_to_unmap_flush_dirty(void) static inline void try_to_unmap_flush_dirty(void)
{ {
} }
static inline void flush_tlb_batched_pending(struct mm_struct *mm)
{
}
#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
extern const struct trace_print_flags pageflag_names[]; extern const struct trace_print_flags pageflag_names[];

View file

@ -320,6 +320,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
tlb_remove_check_page_size_change(tlb, PAGE_SIZE); tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
flush_tlb_batched_pending(mm);
arch_enter_lazy_mmu_mode(); arch_enter_lazy_mmu_mode();
for (; addr != end; pte++, addr += PAGE_SIZE) { for (; addr != end; pte++, addr += PAGE_SIZE) {
ptent = *pte; ptent = *pte;

View file

@ -1197,6 +1197,7 @@ again:
init_rss_vec(rss); init_rss_vec(rss);
start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
pte = start_pte; pte = start_pte;
flush_tlb_batched_pending(mm);
arch_enter_lazy_mmu_mode(); arch_enter_lazy_mmu_mode();
do { do {
pte_t ptent = *pte; pte_t ptent = *pte;

View file

@ -66,6 +66,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
atomic_read(&vma->vm_mm->mm_users) == 1) atomic_read(&vma->vm_mm->mm_users) == 1)
target_node = numa_node_id(); target_node = numa_node_id();
flush_tlb_batched_pending(vma->vm_mm);
arch_enter_lazy_mmu_mode(); arch_enter_lazy_mmu_mode();
do { do {
oldpte = *pte; oldpte = *pte;

View file

@ -152,6 +152,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
new_ptl = pte_lockptr(mm, new_pmd); new_ptl = pte_lockptr(mm, new_pmd);
if (new_ptl != old_ptl) if (new_ptl != old_ptl)
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
flush_tlb_batched_pending(vma->vm_mm);
arch_enter_lazy_mmu_mode(); arch_enter_lazy_mmu_mode();
for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE, for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
@ -428,6 +429,7 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
static unsigned long mremap_to(unsigned long addr, unsigned long old_len, static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
unsigned long new_addr, unsigned long new_len, bool *locked, unsigned long new_addr, unsigned long new_len, bool *locked,
struct vm_userfaultfd_ctx *uf, struct vm_userfaultfd_ctx *uf,
struct list_head *uf_unmap_early,
struct list_head *uf_unmap) struct list_head *uf_unmap)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
@ -446,7 +448,7 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
if (addr + old_len > new_addr && new_addr + new_len > addr) if (addr + old_len > new_addr && new_addr + new_len > addr)
goto out; goto out;
ret = do_munmap(mm, new_addr, new_len, NULL); ret = do_munmap(mm, new_addr, new_len, uf_unmap_early);
if (ret) if (ret)
goto out; goto out;
@ -514,6 +516,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
unsigned long charged = 0; unsigned long charged = 0;
bool locked = false; bool locked = false;
struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX; struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
LIST_HEAD(uf_unmap_early);
LIST_HEAD(uf_unmap); LIST_HEAD(uf_unmap);
if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE)) if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
@ -541,7 +544,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
if (flags & MREMAP_FIXED) { if (flags & MREMAP_FIXED) {
ret = mremap_to(addr, old_len, new_addr, new_len, ret = mremap_to(addr, old_len, new_addr, new_len,
&locked, &uf, &uf_unmap); &locked, &uf, &uf_unmap_early, &uf_unmap);
goto out; goto out;
} }
@ -621,6 +624,7 @@ out:
up_write(&current->mm->mmap_sem); up_write(&current->mm->mmap_sem);
if (locked && new_len > old_len) if (locked && new_len > old_len)
mm_populate(new_addr + old_len, new_len - old_len); mm_populate(new_addr + old_len, new_len - old_len);
userfaultfd_unmap_complete(mm, &uf_unmap_early);
mremap_userfaultfd_complete(&uf, addr, new_addr, old_len); mremap_userfaultfd_complete(&uf, addr, new_addr, old_len);
userfaultfd_unmap_complete(mm, &uf_unmap); userfaultfd_unmap_complete(mm, &uf_unmap);
return ret; return ret;

View file

@ -616,6 +616,13 @@ static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
cpumask_or(&tlb_ubc->cpumask, &tlb_ubc->cpumask, mm_cpumask(mm)); cpumask_or(&tlb_ubc->cpumask, &tlb_ubc->cpumask, mm_cpumask(mm));
tlb_ubc->flush_required = true; tlb_ubc->flush_required = true;
/*
* Ensure compiler does not re-order the setting of tlb_flush_batched
* before the PTE is cleared.
*/
barrier();
mm->tlb_flush_batched = true;
/* /*
* If the PTE was dirty then it's best to assume it's writable. The * If the PTE was dirty then it's best to assume it's writable. The
* caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush() * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
@ -643,6 +650,35 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
return should_defer; return should_defer;
} }
/*
* Reclaim unmaps pages under the PTL but do not flush the TLB prior to
* releasing the PTL if TLB flushes are batched. It's possible for a parallel
* operation such as mprotect or munmap to race between reclaim unmapping
* the page and flushing the page. If this race occurs, it potentially allows
* access to data via a stale TLB entry. Tracking all mm's that have TLB
* batching in flight would be expensive during reclaim so instead track
* whether TLB batching occurred in the past and if so then do a flush here
* if required. This will cost one additional flush per reclaim cycle paid
* by the first operation at risk such as mprotect and mumap.
*
* This must be called under the PTL so that an access to tlb_flush_batched
* that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise
* via the PTL.
*/
void flush_tlb_batched_pending(struct mm_struct *mm)
{
if (mm->tlb_flush_batched) {
flush_tlb_mm(mm);
/*
* Do not allow the compiler to re-order the clearing of
* tlb_flush_batched before the tlb is flushed.
*/
barrier();
mm->tlb_flush_batched = false;
}
}
#else #else
static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
{ {

View file

@ -28,6 +28,7 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg)
if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
return -EFAULT; return -EFAULT;
ifr.ifr_name[IFNAMSIZ-1] = 0;
error = netdev_get_name(net, ifr.ifr_name, ifr.ifr_ifindex); error = netdev_get_name(net, ifr.ifr_name, ifr.ifr_ifindex);
if (error) if (error)
@ -423,6 +424,8 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
if (copy_from_user(&iwr, arg, sizeof(iwr))) if (copy_from_user(&iwr, arg, sizeof(iwr)))
return -EFAULT; return -EFAULT;
iwr.ifr_name[sizeof(iwr.ifr_name) - 1] = 0;
return wext_handle_ioctl(net, &iwr, cmd, arg); return wext_handle_ioctl(net, &iwr, cmd, arg);
} }

View file

@ -1977,7 +1977,8 @@ static int do_setlink(const struct sk_buff *skb,
struct sockaddr *sa; struct sockaddr *sa;
int len; int len;
len = sizeof(sa_family_t) + dev->addr_len; len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
sizeof(*sa));
sa = kmalloc(len, GFP_KERNEL); sa = kmalloc(len, GFP_KERNEL);
if (!sa) { if (!sa) {
err = -ENOMEM; err = -ENOMEM;
@ -4165,6 +4166,7 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
switch (event) { switch (event) {
case NETDEV_REBOOT: case NETDEV_REBOOT:
case NETDEV_CHANGEADDR:
case NETDEV_CHANGENAME: case NETDEV_CHANGENAME:
case NETDEV_FEAT_CHANGE: case NETDEV_FEAT_CHANGE:
case NETDEV_BONDING_FAILOVER: case NETDEV_BONDING_FAILOVER:

View file

@ -1471,9 +1471,12 @@ int dccp_feat_init(struct sock *sk)
* singleton values (which always leads to failure). * singleton values (which always leads to failure).
* These settings can still (later) be overridden via sockopts. * These settings can still (later) be overridden via sockopts.
*/ */
if (ccid_get_builtin_ccids(&tx.val, &tx.len) || if (ccid_get_builtin_ccids(&tx.val, &tx.len))
ccid_get_builtin_ccids(&rx.val, &rx.len))
return -ENOBUFS; return -ENOBUFS;
if (ccid_get_builtin_ccids(&rx.val, &rx.len)) {
kfree(tx.val);
return -ENOBUFS;
}
if (!dccp_feat_prefer(sysctl_dccp_tx_ccid, tx.val, tx.len) || if (!dccp_feat_prefer(sysctl_dccp_tx_ccid, tx.val, tx.len) ||
!dccp_feat_prefer(sysctl_dccp_rx_ccid, rx.val, rx.len)) !dccp_feat_prefer(sysctl_dccp_rx_ccid, rx.val, rx.len))

View file

@ -631,6 +631,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
goto drop_and_free; goto drop_and_free;
inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
reqsk_put(req);
return 0; return 0;
drop_and_free: drop_and_free:

View file

@ -380,6 +380,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
goto drop_and_free; goto drop_and_free;
inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
reqsk_put(req);
return 0; return 0;
drop_and_free: drop_and_free:

View file

@ -1327,13 +1327,14 @@ static struct pernet_operations fib_net_ops = {
void __init ip_fib_init(void) void __init ip_fib_init(void)
{ {
rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL); fib_trie_init();
rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
register_pernet_subsys(&fib_net_ops); register_pernet_subsys(&fib_net_ops);
register_netdevice_notifier(&fib_netdev_notifier); register_netdevice_notifier(&fib_netdev_notifier);
register_inetaddr_notifier(&fib_inetaddr_notifier); register_inetaddr_notifier(&fib_inetaddr_notifier);
fib_trie_init(); rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
} }

View file

@ -1372,7 +1372,7 @@ static int call_fib_nh_notifiers(struct fib_nh *fib_nh,
return call_fib_notifiers(dev_net(fib_nh->nh_dev), event_type, return call_fib_notifiers(dev_net(fib_nh->nh_dev), event_type,
&info.info); &info.info);
case FIB_EVENT_NH_DEL: case FIB_EVENT_NH_DEL:
if ((IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) && if ((in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
fib_nh->nh_flags & RTNH_F_LINKDOWN) || fib_nh->nh_flags & RTNH_F_LINKDOWN) ||
(fib_nh->nh_flags & RTNH_F_DEAD)) (fib_nh->nh_flags & RTNH_F_DEAD))
return call_fib_notifiers(dev_net(fib_nh->nh_dev), return call_fib_notifiers(dev_net(fib_nh->nh_dev),

View file

@ -332,6 +332,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
treq->rcv_isn = ntohl(th->seq) - 1; treq->rcv_isn = ntohl(th->seq) - 1;
treq->snt_isn = cookie; treq->snt_isn = cookie;
treq->ts_off = 0; treq->ts_off = 0;
treq->txhash = net_tx_rndhash();
req->mss = mss; req->mss = mss;
ireq->ir_num = ntohs(th->dest); ireq->ir_num = ntohs(th->dest);
ireq->ir_rmt_port = th->source; ireq->ir_rmt_port = th->source;

View file

@ -113,7 +113,8 @@ struct bbr {
cwnd_gain:10, /* current gain for setting cwnd */ cwnd_gain:10, /* current gain for setting cwnd */
full_bw_cnt:3, /* number of rounds without large bw gains */ full_bw_cnt:3, /* number of rounds without large bw gains */
cycle_idx:3, /* current index in pacing_gain cycle array */ cycle_idx:3, /* current index in pacing_gain cycle array */
unused_b:6; has_seen_rtt:1, /* have we seen an RTT sample yet? */
unused_b:5;
u32 prior_cwnd; /* prior cwnd upon entering loss recovery */ u32 prior_cwnd; /* prior cwnd upon entering loss recovery */
u32 full_bw; /* recent bw, to estimate if pipe is full */ u32 full_bw; /* recent bw, to estimate if pipe is full */
}; };
@ -212,6 +213,35 @@ static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain)
return rate >> BW_SCALE; return rate >> BW_SCALE;
} }
/* Convert a BBR bw and gain factor to a pacing rate in bytes per second. */
static u32 bbr_bw_to_pacing_rate(struct sock *sk, u32 bw, int gain)
{
u64 rate = bw;
rate = bbr_rate_bytes_per_sec(sk, rate, gain);
rate = min_t(u64, rate, sk->sk_max_pacing_rate);
return rate;
}
/* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
static void bbr_init_pacing_rate_from_rtt(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct bbr *bbr = inet_csk_ca(sk);
u64 bw;
u32 rtt_us;
if (tp->srtt_us) { /* any RTT sample yet? */
rtt_us = max(tp->srtt_us >> 3, 1U);
bbr->has_seen_rtt = 1;
} else { /* no RTT sample yet */
rtt_us = USEC_PER_MSEC; /* use nominal default RTT */
}
bw = (u64)tp->snd_cwnd * BW_UNIT;
do_div(bw, rtt_us);
sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain);
}
/* Pace using current bw estimate and a gain factor. In order to help drive the /* Pace using current bw estimate and a gain factor. In order to help drive the
* network toward lower queues while maintaining high utilization and low * network toward lower queues while maintaining high utilization and low
* latency, the average pacing rate aims to be slightly (~1%) lower than the * latency, the average pacing rate aims to be slightly (~1%) lower than the
@ -221,12 +251,13 @@ static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain)
*/ */
static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain) static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
{ {
struct tcp_sock *tp = tcp_sk(sk);
struct bbr *bbr = inet_csk_ca(sk); struct bbr *bbr = inet_csk_ca(sk);
u64 rate = bw; u32 rate = bbr_bw_to_pacing_rate(sk, bw, gain);
rate = bbr_rate_bytes_per_sec(sk, rate, gain); if (unlikely(!bbr->has_seen_rtt && tp->srtt_us))
rate = min_t(u64, rate, sk->sk_max_pacing_rate); bbr_init_pacing_rate_from_rtt(sk);
if (bbr->mode != BBR_STARTUP || rate > sk->sk_pacing_rate) if (bbr_full_bw_reached(sk) || rate > sk->sk_pacing_rate)
sk->sk_pacing_rate = rate; sk->sk_pacing_rate = rate;
} }
@ -799,7 +830,6 @@ static void bbr_init(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct bbr *bbr = inet_csk_ca(sk); struct bbr *bbr = inet_csk_ca(sk);
u64 bw;
bbr->prior_cwnd = 0; bbr->prior_cwnd = 0;
bbr->tso_segs_goal = 0; /* default segs per skb until first ACK */ bbr->tso_segs_goal = 0; /* default segs per skb until first ACK */
@ -815,11 +845,8 @@ static void bbr_init(struct sock *sk)
minmax_reset(&bbr->bw, bbr->rtt_cnt, 0); /* init max bw to 0 */ minmax_reset(&bbr->bw, bbr->rtt_cnt, 0); /* init max bw to 0 */
/* Initialize pacing rate to: high_gain * init_cwnd / RTT. */ bbr->has_seen_rtt = 0;
bw = (u64)tp->snd_cwnd * BW_UNIT; bbr_init_pacing_rate_from_rtt(sk);
do_div(bw, (tp->srtt_us >> 3) ? : USEC_PER_MSEC);
sk->sk_pacing_rate = 0; /* force an update of sk_pacing_rate */
bbr_set_pacing_rate(sk, bw, bbr_high_gain);
bbr->restore_cwnd = 0; bbr->restore_cwnd = 0;
bbr->round_start = 0; bbr->round_start = 0;

View file

@ -1762,7 +1762,7 @@ drop:
/* For TCP sockets, sk_rx_dst is protected by socket lock /* For TCP sockets, sk_rx_dst is protected by socket lock
* For UDP, we use xchg() to guard against concurrent changes. * For UDP, we use xchg() to guard against concurrent changes.
*/ */
static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
{ {
struct dst_entry *old; struct dst_entry *old;
@ -2120,6 +2120,7 @@ void udp_destroy_sock(struct sock *sk)
encap_destroy(sk); encap_destroy(sk);
} }
} }
EXPORT_SYMBOL(udp_sk_rx_dst_set);
/* /*
* Socket option code for UDP * Socket option code for UDP

Some files were not shown because too many files have changed in this diff Show more