kernel-release 4.14.14-1 (x86_64;i586) 2018-3643
-9999

Status rejected
Submitter itchka [@T] compuserve.com
Platform 3.0
Repository main
URL https://abf.openmandriva.org/build_lists/146000
Packages
cpupower-4.14.14-1.x86_64.binary
cpupower-devel-4.14.14-1.x86_64.binary
kernel-release-4.14.14-1.x86_64.source
kernel-release-desktop-4.14.14-1omv-1-1.x86_64.binary
kernel-release-desktop-devel-4.14.14-1omv-1-1.x86_64.binary
kernel-release-desktop-devel-latest-4.14.14-1.x86_64.binary
kernel-release-desktop-latest-4.14.14-1.x86_64.binary
kernel-release-headers-1:4.14.14-1.x86_64.binary
kernel-release-server-4.14.14-1omv-1-1.x86_64.binary
kernel-release-server-devel-4.14.14-1omv-1-1.x86_64.binary
kernel-release-server-devel-latest-4.14.14-1.x86_64.binary
kernel-release-server-latest-4.14.14-1.x86_64.binary
kernel-release-source-4.14.14-1omv-1-1.x86_64.binary
kernel-release-source-latest-4.14.14-1.x86_64.binary
turbostat-4.14.14-1.x86_64.binary
x86_energy_perf_policy-4.14.14-1.x86_64.binary
cpupower-4.14.14-1.i586.binary
cpupower-devel-4.14.14-1.i586.binary
kernel-release-4.14.14-1.i586.source
kernel-release-desktop-4.14.14-1omv-1-1.i586.binary
kernel-release-desktop-devel-4.14.14-1omv-1-1.i586.binary
kernel-release-desktop-devel-latest-4.14.14-1.i586.binary
kernel-release-desktop-latest-4.14.14-1.i586.binary
kernel-release-headers-1:4.14.14-1.i586.binary
kernel-release-source-4.14.14-1omv-1-1.i586.binary
kernel-release-source-latest-4.14.14-1.i586.binary
turbostat-4.14.14-1.i586.binary
x86_energy_perf_policy-4.14.14-1.i586.binary
Build Date 2018-01-20 02:59:41 +0000 UTC
Last Updated 2018-01-20 18:15:47.97539637 +0000 UTC
$ git diff --patch-with-stat --summary 9ae2040a0a71e561c49819edd7f5f00e588082a4..b3dd4be965bb28858e93f801f399f3cd60781637

 .abf.yml                                           |   2 +-
 SME-BSP_SME-microcode-update-fixes.patch           | 742 +++++++++++++++++++++
 common.config                                      |   6 +-
 i386-common.config                                 |   3 +
 kernel-release.spec                                |  46 +-
 ...l_RSB_on_context_switch_for_affected_CPUs.patch | 175 +++++
 ...FENCE_to_the_retpoline_filling_RSB_macros.patch |  90 +++
 x86_64-common.config                               |   3 +
 8 files changed, 1055 insertions(+), 12 deletions(-)
 create mode 100644 SME-BSP_SME-microcode-update-fixes.patch
 create mode 100644 retpoline-fill_RSB_on_context_switch_for_affected_CPUs.patch
 create mode 100644 retpoline_add_LFENCE_to_the_retpoline_filling_RSB_macros.patch

diff --git a/.abf.yml b/.abf.yml
index 9297cbc..095ebdf 100644
--- a/.abf.yml
+++ b/.abf.yml
@@ -1,5 +1,5 @@
 sources:
   linux-4.14.tar.sign: 05874429ec327eddcc43057d0cf1072eabce8d03
   linux-4.14.tar.xz: c64d80ad01a6a77bf46bce1bdc5c7f28bfb6bfd5
-  patch-4.14.11.xz: bf0e89f57aca13a2f31bdefa39527b503ab5b47c
+  patch-4.14.14.xz: 0ac77e94a6225a94a8b705222e0cfc23bc0512f8
   saa716x-driver.tar.xz: 603d6e561f6c09b19b05ca53b55458caad849c97
diff --git a/SME-BSP_SME-microcode-update-fixes.patch b/SME-BSP_SME-microcode-update-fixes.patch
new file mode 100644
index 0000000..3082f70
--- /dev/null
+++ b/SME-BSP_SME-microcode-update-fixes.patch
@@ -0,0 +1,742 @@
+diff -Naur linux-4.14.13/arch/x86/include/asm/mem_encrypt.h linux-4.14.13-p/arch/x86/include/asm/mem_encrypt.h
+--- linux-4.14.13/arch/x86/include/asm/mem_encrypt.h	2018-01-10 09:31:23.000000000 +0100
++++ linux-4.14.13-p/arch/x86/include/asm/mem_encrypt.h	2018-01-11 01:39:30.270647953 +0100
+@@ -39,7 +39,7 @@
+ 
+ void __init sme_early_init(void);
+ 
+-void __init sme_encrypt_kernel(void);
++void __init sme_encrypt_kernel(struct boot_params *bp);
+ void __init sme_enable(struct boot_params *bp);
+ 
+ /* Architecture __weak replacement functions */
+@@ -61,7 +61,7 @@
+ 
+ static inline void __init sme_early_init(void) { }
+ 
+-static inline void __init sme_encrypt_kernel(void) { }
++static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
+ static inline void __init sme_enable(struct boot_params *bp) { }
+ 
+ #endif	/* CONFIG_AMD_MEM_ENCRYPT */
+diff -Naur linux-4.14.13/arch/x86/kernel/head64.c linux-4.14.13-p/arch/x86/kernel/head64.c
+--- linux-4.14.13/arch/x86/kernel/head64.c	2018-01-10 09:31:23.000000000 +0100
++++ linux-4.14.13-p/arch/x86/kernel/head64.c	2018-01-11 01:39:30.270647953 +0100
+@@ -157,8 +157,8 @@
+ 	p = fixup_pointer(&phys_base, physaddr);
+ 	*p += load_delta - sme_get_me_mask();
+ 
+-	/* Encrypt the kernel (if SME is active) */
+-	sme_encrypt_kernel();
++	/* Encrypt the kernel and related (if SME is active) */
++	sme_encrypt_kernel(bp);
+ 
+ 	/*
+ 	 * Return the SME encryption mask (if SME is active) to be used as a
+diff -Naur linux-4.14.13/arch/x86/kernel/setup.c linux-4.14.13-p/arch/x86/kernel/setup.c
+--- linux-4.14.13/arch/x86/kernel/setup.c	2018-01-10 09:31:23.000000000 +0100
++++ linux-4.14.13-p/arch/x86/kernel/setup.c	2018-01-11 01:43:12.727105535 +0100
+@@ -376,14 +376,6 @@
+ 	    !ramdisk_image || !ramdisk_size)
+ 		return;		/* No initrd provided by bootloader */
+ 
+-	/*
+-	 * If SME is active, this memory will be marked encrypted by the
+-	 * kernel when it is accessed (including relocation). However, the
+-	 * ramdisk image was loaded decrypted by the bootloader, so make
+-	 * sure that it is encrypted before accessing it.
+-	 */
+-	sme_early_encrypt(ramdisk_image, ramdisk_end - ramdisk_image);
+-
+ 	initrd_start = 0;
+ 
+ 	mapped_size = memblock_mem_size(max_pfn_mapped);
+diff -Naur linux-4.14.13/arch/x86/mm/mem_encrypt_boot.S linux-4.14.13-p/arch/x86/mm/mem_encrypt_boot.S
+--- linux-4.14.13/arch/x86/mm/mem_encrypt_boot.S	2018-01-10 09:31:23.000000000 +0100
++++ linux-4.14.13-p/arch/x86/mm/mem_encrypt_boot.S	2018-01-11 01:39:30.273981283 +0100
+@@ -22,9 +22,9 @@
+ 
+ 	/*
+ 	 * Entry parameters:
+-	 *   RDI - virtual address for the encrypted kernel mapping
+-	 *   RSI - virtual address for the decrypted kernel mapping
+-	 *   RDX - length of kernel
++	 *   RDI - virtual address for the encrypted mapping
++	 *   RSI - virtual address for the decrypted mapping
++	 *   RDX - length to encrypt
+ 	 *   RCX - virtual address of the encryption workarea, including:
+ 	 *     - stack page (PAGE_SIZE)
+ 	 *     - encryption routine page (PAGE_SIZE)
+@@ -41,9 +41,9 @@
+ 	addq	$PAGE_SIZE, %rax	/* Workarea encryption routine */
+ 
+ 	push	%r12
+-	movq	%rdi, %r10		/* Encrypted kernel */
+-	movq	%rsi, %r11		/* Decrypted kernel */
+-	movq	%rdx, %r12		/* Kernel length */
++	movq	%rdi, %r10		/* Encrypted area */
++	movq	%rsi, %r11		/* Decrypted area */
++	movq	%rdx, %r12		/* Area length */
+ 
+ 	/* Copy encryption routine into the workarea */
+ 	movq	%rax, %rdi				/* Workarea encryption routine */
+@@ -52,10 +52,10 @@
+ 	rep	movsb
+ 
+ 	/* Setup registers for call */
+-	movq	%r10, %rdi		/* Encrypted kernel */
+-	movq	%r11, %rsi		/* Decrypted kernel */
++	movq	%r10, %rdi		/* Encrypted area */
++	movq	%r11, %rsi		/* Decrypted area */
+ 	movq	%r8, %rdx		/* Pagetables used for encryption */
+-	movq	%r12, %rcx		/* Kernel length */
++	movq	%r12, %rcx		/* Area length */
+ 	movq	%rax, %r8		/* Workarea encryption routine */
+ 	addq	$PAGE_SIZE, %r8		/* Workarea intermediate copy buffer */
+ 
+@@ -71,7 +71,7 @@
+ 
+ ENTRY(__enc_copy)
+ /*
+- * Routine used to encrypt kernel.
++ * Routine used to encrypt memory in place.
+  *   This routine must be run outside of the kernel proper since
+  *   the kernel will be encrypted during the process. So this
+  *   routine is defined here and then copied to an area outside
+@@ -79,19 +79,19 @@
+  *   during execution.
+  *
+  *   On entry the registers must be:
+- *     RDI - virtual address for the encrypted kernel mapping
+- *     RSI - virtual address for the decrypted kernel mapping
++ *     RDI - virtual address for the encrypted mapping
++ *     RSI - virtual address for the decrypted mapping
+  *     RDX - address of the pagetables to use for encryption
+- *     RCX - length of kernel
++ *     RCX - length of area
+  *      R8 - intermediate copy buffer
+  *
+  *     RAX - points to this routine
+  *
+- * The kernel will be encrypted by copying from the non-encrypted
+- * kernel space to an intermediate buffer and then copying from the
+- * intermediate buffer back to the encrypted kernel space. The physical
+- * addresses of the two kernel space mappings are the same which
+- * results in the kernel being encrypted "in place".
++ * The area will be encrypted by copying from the non-encrypted
++ * memory space to an intermediate buffer and then copying from the
++ * intermediate buffer back to the encrypted memory space. The physical
++ * addresses of the two mappings are the same which results in the area
++ * being encrypted "in place".
+  */
+ 	/* Enable the new page tables */
+ 	mov	%rdx, %cr3
+@@ -103,47 +103,55 @@
+ 	orq	$X86_CR4_PGE, %rdx
+ 	mov	%rdx, %cr4
+ 
++	push	%r15
++	push	%r12
++
++	movq	%rcx, %r9		/* Save area length */
++	movq	%rdi, %r10		/* Save encrypted area address */
++	movq	%rsi, %r11		/* Save decrypted area address */
++
+ 	/* Set the PAT register PA5 entry to write-protect */
+-	push	%rcx
+ 	movl	$MSR_IA32_CR_PAT, %ecx
+ 	rdmsr
+-	push	%rdx			/* Save original PAT value */
++	mov	%rdx, %r15		/* Save original PAT value */
+ 	andl	$0xffff00ff, %edx	/* Clear PA5 */
+ 	orl	$0x00000500, %edx	/* Set PA5 to WP */
+ 	wrmsr
+-	pop	%rdx			/* RDX contains original PAT value */
+-	pop	%rcx
+-
+-	movq	%rcx, %r9		/* Save kernel length */
+-	movq	%rdi, %r10		/* Save encrypted kernel address */
+-	movq	%rsi, %r11		/* Save decrypted kernel address */
+ 
+ 	wbinvd				/* Invalidate any cache entries */
+ 
+-	/* Copy/encrypt 2MB at a time */
++	/* Copy/encrypt up to 2MB at a time */
++	movq	$PMD_PAGE_SIZE, %r12
+ 1:
+-	movq	%r11, %rsi		/* Source - decrypted kernel */
++	cmpq	%r12, %r9
++	jnb	2f
++	movq	%r9, %r12
++
++2:
++	movq	%r11, %rsi		/* Source - decrypted area */
+ 	movq	%r8, %rdi		/* Dest   - intermediate copy buffer */
+-	movq	$PMD_PAGE_SIZE, %rcx	/* 2MB length */
++	movq	%r12, %rcx
+ 	rep	movsb
+ 
+ 	movq	%r8, %rsi		/* Source - intermediate copy buffer */
+-	movq	%r10, %rdi		/* Dest   - encrypted kernel */
+-	movq	$PMD_PAGE_SIZE, %rcx	/* 2MB length */
++	movq	%r10, %rdi		/* Dest   - encrypted area */
++	movq	%r12, %rcx
+ 	rep	movsb
+ 
+-	addq	$PMD_PAGE_SIZE, %r11
+-	addq	$PMD_PAGE_SIZE, %r10
+-	subq	$PMD_PAGE_SIZE, %r9	/* Kernel length decrement */
++	addq	%r12, %r11
++	addq	%r12, %r10
++	subq	%r12, %r9		/* Kernel length decrement */
+ 	jnz	1b			/* Kernel length not zero? */
+ 
+ 	/* Restore PAT register */
+-	push	%rdx			/* Save original PAT value */
+ 	movl	$MSR_IA32_CR_PAT, %ecx
+ 	rdmsr
+-	pop	%rdx			/* Restore original PAT value */
++	mov	%r15, %rdx		/* Restore original PAT value */
+ 	wrmsr
+ 
++	pop	%r12
++	pop	%r15
++
+ 	ret
+ .L__enc_copy_end:
+ ENDPROC(__enc_copy)
+diff -Naur linux-4.14.13/arch/x86/mm/mem_encrypt.c linux-4.14.13-p/arch/x86/mm/mem_encrypt.c
+--- linux-4.14.13/arch/x86/mm/mem_encrypt.c	2018-01-10 09:31:23.000000000 +0100
++++ linux-4.14.13-p/arch/x86/mm/mem_encrypt.c	2018-01-11 01:39:30.273981283 +0100
+@@ -213,37 +213,62 @@
+ 	set_memory_decrypted((unsigned long)vaddr, size >> PAGE_SHIFT);
+ }
+ 
+-static void __init sme_clear_pgd(pgd_t *pgd_base, unsigned long start,
+-				 unsigned long end)
++struct sme_populate_pgd_data {
++	void	*pgtable_area;
++	pgd_t	*pgd;
++
++	pmdval_t pmd_flags;
++	pteval_t pte_flags;
++	unsigned long paddr;
++
++	unsigned long vaddr;
++	unsigned long vaddr_end;
++};
++
++static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd)
+ {
+ 	unsigned long pgd_start, pgd_end, pgd_size;
+ 	pgd_t *pgd_p;
+ 
+-	pgd_start = start & PGDIR_MASK;
+-	pgd_end = end & PGDIR_MASK;
++	pgd_start = ppd->vaddr & PGDIR_MASK;
++	pgd_end = ppd->vaddr_end & PGDIR_MASK;
+ 
+-	pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1);
+-	pgd_size *= sizeof(pgd_t);
++	pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1) * sizeof(pgd_t);
+ 
+-	pgd_p = pgd_base + pgd_index(start);
++	pgd_p = ppd->pgd + pgd_index(ppd->vaddr);
+ 
+ 	memset(pgd_p, 0, pgd_size);
+ }
+ 
+-#define PGD_FLAGS	_KERNPG_TABLE_NOENC
+-#define P4D_FLAGS	_KERNPG_TABLE_NOENC
+-#define PUD_FLAGS	_KERNPG_TABLE_NOENC
+-#define PMD_FLAGS	(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
++#define PGD_FLAGS		_KERNPG_TABLE_NOENC
++#define P4D_FLAGS		_KERNPG_TABLE_NOENC
++#define PUD_FLAGS		_KERNPG_TABLE_NOENC
++#define PMD_FLAGS		_KERNPG_TABLE_NOENC
++
++#define PMD_FLAGS_LARGE		(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
++
++#define PMD_FLAGS_DEC		PMD_FLAGS_LARGE
++#define PMD_FLAGS_DEC_WP	((PMD_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
++				 (_PAGE_PAT | _PAGE_PWT))
++
++#define PMD_FLAGS_ENC		(PMD_FLAGS_LARGE | _PAGE_ENC)
++
++#define PTE_FLAGS		(__PAGE_KERNEL_EXEC & ~_PAGE_GLOBAL)
+ 
+-static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area,
+-				     unsigned long vaddr, pmdval_t pmd_val)
++#define PTE_FLAGS_DEC		PTE_FLAGS
++#define PTE_FLAGS_DEC_WP	((PTE_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
++				 (_PAGE_PAT | _PAGE_PWT))
++
++#define PTE_FLAGS_ENC		(PTE_FLAGS | _PAGE_ENC)
++
++static pmd_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
+ {
+ 	pgd_t *pgd_p;
+ 	p4d_t *p4d_p;
+ 	pud_t *pud_p;
+ 	pmd_t *pmd_p;
+ 
+-	pgd_p = pgd_base + pgd_index(vaddr);
++	pgd_p = ppd->pgd + pgd_index(ppd->vaddr);
+ 	if (native_pgd_val(*pgd_p)) {
+ 		if (IS_ENABLED(CONFIG_X86_5LEVEL))
+ 			p4d_p = (p4d_t *)(native_pgd_val(*pgd_p) & ~PTE_FLAGS_MASK);
+@@ -253,15 +278,15 @@
+ 		pgd_t pgd;
+ 
+ 		if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
+-			p4d_p = pgtable_area;
++			p4d_p = ppd->pgtable_area;
+ 			memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D);
+-			pgtable_area += sizeof(*p4d_p) * PTRS_PER_P4D;
++			ppd->pgtable_area += sizeof(*p4d_p) * PTRS_PER_P4D;
+ 
+ 			pgd = native_make_pgd((pgdval_t)p4d_p + PGD_FLAGS);
+ 		} else {
+-			pud_p = pgtable_area;
++			pud_p = ppd->pgtable_area;
+ 			memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
+-			pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD;
++			ppd->pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD;
+ 
+ 			pgd = native_make_pgd((pgdval_t)pud_p + PGD_FLAGS);
+ 		}
+@@ -269,58 +294,160 @@
+ 	}
+ 
+ 	if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
+-		p4d_p += p4d_index(vaddr);
++		p4d_p += p4d_index(ppd->vaddr);
+ 		if (native_p4d_val(*p4d_p)) {
+ 			pud_p = (pud_t *)(native_p4d_val(*p4d_p) & ~PTE_FLAGS_MASK);
+ 		} else {
+ 			p4d_t p4d;
+ 
+-			pud_p = pgtable_area;
++			pud_p = ppd->pgtable_area;
+ 			memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
+-			pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD;
++			ppd->pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD;
+ 
+ 			p4d = native_make_p4d((pudval_t)pud_p + P4D_FLAGS);
+ 			native_set_p4d(p4d_p, p4d);
+ 		}
+ 	}
+ 
+-	pud_p += pud_index(vaddr);
++	pud_p += pud_index(ppd->vaddr);
+ 	if (native_pud_val(*pud_p)) {
+ 		if (native_pud_val(*pud_p) & _PAGE_PSE)
+-			goto out;
++			return NULL;
+ 
+ 		pmd_p = (pmd_t *)(native_pud_val(*pud_p) & ~PTE_FLAGS_MASK);
+ 	} else {
+ 		pud_t pud;
+ 
+-		pmd_p = pgtable_area;
++		pmd_p = ppd->pgtable_area;
+ 		memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
+-		pgtable_area += sizeof(*pmd_p) * PTRS_PER_PMD;
++		ppd->pgtable_area += sizeof(*pmd_p) * PTRS_PER_PMD;
+ 
+ 		pud = native_make_pud((pmdval_t)pmd_p + PUD_FLAGS);
+ 		native_set_pud(pud_p, pud);
+ 	}
+ 
+-	pmd_p += pmd_index(vaddr);
++	return pmd_p;
++}
++
++static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
++{
++	pmd_t *pmd_p;
++
++	pmd_p = sme_prepare_pgd(ppd);
++	if (!pmd_p)
++		return;
++
++	pmd_p += pmd_index(ppd->vaddr);
+ 	if (!native_pmd_val(*pmd_p) || !(native_pmd_val(*pmd_p) & _PAGE_PSE))
+-		native_set_pmd(pmd_p, native_make_pmd(pmd_val));
++		native_set_pmd(pmd_p, native_make_pmd(ppd->paddr | ppd->pmd_flags));
++}
++
++static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
++{
++	pmd_t *pmd_p;
++	pte_t *pte_p;
++
++	pmd_p = sme_prepare_pgd(ppd);
++	if (!pmd_p)
++		return;
++
++	pmd_p += pmd_index(ppd->vaddr);
++	if (native_pmd_val(*pmd_p)) {
++		if (native_pmd_val(*pmd_p) & _PAGE_PSE)
++			return;
++
++		pte_p = (pte_t *)(native_pmd_val(*pmd_p) & ~PTE_FLAGS_MASK);
++	} else {
++		pmd_t pmd;
++
++		pte_p = ppd->pgtable_area;
++		memset(pte_p, 0, sizeof(*pte_p) * PTRS_PER_PTE);
++		ppd->pgtable_area += sizeof(*pte_p) * PTRS_PER_PTE;
++
++		pmd = native_make_pmd((pteval_t)pte_p + PMD_FLAGS);
++		native_set_pmd(pmd_p, pmd);
++	}
++
++	pte_p += pte_index(ppd->vaddr);
++	if (!native_pte_val(*pte_p))
++		native_set_pte(pte_p, native_make_pte(ppd->paddr | ppd->pte_flags));
++}
++
++static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
++{
++	while (ppd->vaddr < ppd->vaddr_end) {
++		sme_populate_pgd_large(ppd);
++
++		ppd->vaddr += PMD_PAGE_SIZE;
++		ppd->paddr += PMD_PAGE_SIZE;
++	}
++}
++
++static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
++{
++	while (ppd->vaddr < ppd->vaddr_end) {
++		sme_populate_pgd(ppd);
+ 
+-out:
+-	return pgtable_area;
++		ppd->vaddr += PAGE_SIZE;
++		ppd->paddr += PAGE_SIZE;
++	}
++}
++
++static void __init __sme_map_range(struct sme_populate_pgd_data *ppd,
++				   pmdval_t pmd_flags, pteval_t pte_flags)
++{
++	unsigned long vaddr_end;
++
++	ppd->pmd_flags = pmd_flags;
++	ppd->pte_flags = pte_flags;
++
++	/* Save original end value since we modify the struct value */
++	vaddr_end = ppd->vaddr_end;
++
++	/* If start is not 2MB aligned, create PTE entries */
++	ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_PAGE_SIZE);
++	__sme_map_range_pte(ppd);
++
++	/* Create PMD entries */
++	ppd->vaddr_end = vaddr_end & PMD_PAGE_MASK;
++	__sme_map_range_pmd(ppd);
++
++	/* If end is not 2MB aligned, create PTE entries */
++	ppd->vaddr_end = vaddr_end;
++	__sme_map_range_pte(ppd);
++}
++
++static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd)
++{
++	__sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC);
++}
++
++static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd)
++{
++	__sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC);
++}
++
++static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd)
++{
++	__sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP);
+ }
+ 
+ static unsigned long __init sme_pgtable_calc(unsigned long len)
+ {
+-	unsigned long p4d_size, pud_size, pmd_size;
++	unsigned long p4d_size, pud_size, pmd_size, pte_size;
+ 	unsigned long total;
+ 
+ 	/*
+ 	 * Perform a relatively simplistic calculation of the pagetable
+-	 * entries that are needed. That mappings will be covered by 2MB
+-	 * PMD entries so we can conservatively calculate the required
++	 * entries that are needed. Those mappings will be covered mostly
++	 * by 2MB PMD entries so we can conservatively calculate the required
+ 	 * number of P4D, PUD and PMD structures needed to perform the
+-	 * mappings. Incrementing the count for each covers the case where
+-	 * the addresses cross entries.
++	 * mappings.  For mappings that are not 2MB aligned, PTE mappings
++	 * would be needed for the start and end portion of the address range
++	 * that fall outside of the 2MB alignment.  This results in, at most,
++	 * two extra pages to hold PTE entries for each range that is mapped.
++	 * Incrementing the count for each covers the case where the addresses
++	 * cross entries.
+ 	 */
+ 	if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
+ 		p4d_size = (ALIGN(len, PGDIR_SIZE) / PGDIR_SIZE) + 1;
+@@ -334,8 +461,9 @@
+ 	}
+ 	pmd_size = (ALIGN(len, PUD_SIZE) / PUD_SIZE) + 1;
+ 	pmd_size *= sizeof(pmd_t) * PTRS_PER_PMD;
++	pte_size = 2 * sizeof(pte_t) * PTRS_PER_PTE;
+ 
+-	total = p4d_size + pud_size + pmd_size;
++	total = p4d_size + pud_size + pmd_size + pte_size;
+ 
+ 	/*
+ 	 * Now calculate the added pagetable structures needed to populate
+@@ -359,29 +487,29 @@
+ 	return total;
+ }
+ 
+-void __init sme_encrypt_kernel(void)
++void __init sme_encrypt_kernel(struct boot_params *bp)
+ {
+ 	unsigned long workarea_start, workarea_end, workarea_len;
+ 	unsigned long execute_start, execute_end, execute_len;
+ 	unsigned long kernel_start, kernel_end, kernel_len;
++	unsigned long initrd_start, initrd_end, initrd_len;
++	struct sme_populate_pgd_data ppd;
+ 	unsigned long pgtable_area_len;
+-	unsigned long paddr, pmd_flags;
+ 	unsigned long decrypted_base;
+-	void *pgtable_area;
+-	pgd_t *pgd;
+ 
+ 	if (!sme_active())
+ 		return;
+ 
+ 	/*
+-	 * Prepare for encrypting the kernel by building new pagetables with
+-	 * the necessary attributes needed to encrypt the kernel in place.
++	 * Prepare for encrypting the kernel and initrd by building new
++	 * pagetables with the necessary attributes needed to encrypt the
++	 * kernel in place.
+ 	 *
+ 	 *   One range of virtual addresses will map the memory occupied
+-	 *   by the kernel as encrypted.
++	 *   by the kernel and initrd as encrypted.
+ 	 *
+ 	 *   Another range of virtual addresses will map the memory occupied
+-	 *   by the kernel as decrypted and write-protected.
++	 *   by the kernel and initrd as decrypted and write-protected.
+ 	 *
+ 	 *     The use of write-protect attribute will prevent any of the
+ 	 *     memory from being cached.
+@@ -392,6 +520,20 @@
+ 	kernel_end = ALIGN(__pa_symbol(_end), PMD_PAGE_SIZE);
+ 	kernel_len = kernel_end - kernel_start;
+ 
++	initrd_start = 0;
++	initrd_end = 0;
++	initrd_len = 0;
++#ifdef CONFIG_BLK_DEV_INITRD
++	initrd_len = (unsigned long)bp->hdr.ramdisk_size |
++		     ((unsigned long)bp->ext_ramdisk_size << 32);
++	if (initrd_len) {
++		initrd_start = (unsigned long)bp->hdr.ramdisk_image |
++			       ((unsigned long)bp->ext_ramdisk_image << 32);
++		initrd_end = PAGE_ALIGN(initrd_start + initrd_len);
++		initrd_len = initrd_end - initrd_start;
++	}
++#endif
++
+ 	/* Set the encryption workarea to be immediately after the kernel */
+ 	workarea_start = kernel_end;
+ 
+@@ -414,16 +556,21 @@
+ 	 */
+ 	pgtable_area_len = sizeof(pgd_t) * PTRS_PER_PGD;
+ 	pgtable_area_len += sme_pgtable_calc(execute_end - kernel_start) * 2;
++	if (initrd_len)
++		pgtable_area_len += sme_pgtable_calc(initrd_len) * 2;
+ 
+ 	/* PUDs and PMDs needed in the current pagetables for the workarea */
+ 	pgtable_area_len += sme_pgtable_calc(execute_len + pgtable_area_len);
+ 
+ 	/*
+ 	 * The total workarea includes the executable encryption area and
+-	 * the pagetable area.
++	 * the pagetable area. The start of the workarea is already 2MB
++	 * aligned, align the end of the workarea on a 2MB boundary so that
++	 * we don't try to create/allocate PTE entries from the workarea
++	 * before it is mapped.
+ 	 */
+ 	workarea_len = execute_len + pgtable_area_len;
+-	workarea_end = workarea_start + workarea_len;
++	workarea_end = ALIGN(workarea_start + workarea_len, PMD_PAGE_SIZE);
+ 
+ 	/*
+ 	 * Set the address to the start of where newly created pagetable
+@@ -432,45 +579,30 @@
+ 	 * pagetables and when the new encrypted and decrypted kernel
+ 	 * mappings are populated.
+ 	 */
+-	pgtable_area = (void *)execute_end;
++	ppd.pgtable_area = (void *)execute_end;
+ 
+ 	/*
+ 	 * Make sure the current pagetable structure has entries for
+ 	 * addressing the workarea.
+ 	 */
+-	pgd = (pgd_t *)native_read_cr3_pa();
+-	paddr = workarea_start;
+-	while (paddr < workarea_end) {
+-		pgtable_area = sme_populate_pgd(pgd, pgtable_area,
+-						paddr,
+-						paddr + PMD_FLAGS);
+-
+-		paddr += PMD_PAGE_SIZE;
+-	}
++	ppd.pgd = (pgd_t *)native_read_cr3_pa();
++	ppd.paddr = workarea_start;
++	ppd.vaddr = workarea_start;
++	ppd.vaddr_end = workarea_end;
++	sme_map_range_decrypted(&ppd);
+ 
+ 	/* Flush the TLB - no globals so cr3 is enough */
+ 	native_write_cr3(__native_read_cr3());
+ 
+ 	/*
+ 	 * A new pagetable structure is being built to allow for the kernel
+-	 * to be encrypted. It starts with an empty PGD that will then be
+-	 * populated with new PUDs and PMDs as the encrypted and decrypted
+-	 * kernel mappings are created.
+-	 */
+-	pgd = pgtable_area;
+-	memset(pgd, 0, sizeof(*pgd) * PTRS_PER_PGD);
+-	pgtable_area += sizeof(*pgd) * PTRS_PER_PGD;
+-
+-	/* Add encrypted kernel (identity) mappings */
+-	pmd_flags = PMD_FLAGS | _PAGE_ENC;
+-	paddr = kernel_start;
+-	while (paddr < kernel_end) {
+-		pgtable_area = sme_populate_pgd(pgd, pgtable_area,
+-						paddr,
+-						paddr + pmd_flags);
+-
+-		paddr += PMD_PAGE_SIZE;
+-	}
++	 * and initrd to be encrypted. It starts with an empty PGD that will
++	 * then be populated with new PUDs and PMDs as the encrypted and
++	 * decrypted kernel mappings are created.
++	 */
++	ppd.pgd = ppd.pgtable_area;
++	memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD);
++	ppd.pgtable_area += sizeof(pgd_t) * PTRS_PER_PGD;
+ 
+ 	/*
+ 	 * A different PGD index/entry must be used to get different
+@@ -479,47 +611,79 @@
+ 	 * the base of the mapping.
+ 	 */
+ 	decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1);
++	if (initrd_len) {
++		unsigned long check_base;
++
++		check_base = (pgd_index(initrd_end) + 1) & (PTRS_PER_PGD - 1);
++		decrypted_base = max(decrypted_base, check_base);
++	}
+ 	decrypted_base <<= PGDIR_SHIFT;
+ 
+-	/* Add decrypted, write-protected kernel (non-identity) mappings */
+-	pmd_flags = (PMD_FLAGS & ~_PAGE_CACHE_MASK) | (_PAGE_PAT | _PAGE_PWT);
+-	paddr = kernel_start;
+-	while (paddr < kernel_end) {
+-		pgtable_area = sme_populate_pgd(pgd, pgtable_area,
+-						paddr + decrypted_base,
+-						paddr + pmd_flags);
++	/* Add encrypted kernel (identity) mappings */
++	ppd.paddr = kernel_start;
++	ppd.vaddr = kernel_start;
++	ppd.vaddr_end = kernel_end;
++	sme_map_range_encrypted(&ppd);
+ 
+-		paddr += PMD_PAGE_SIZE;
++	/* Add decrypted, write-protected kernel (non-identity) mappings */
++	ppd.paddr = kernel_start;
++	ppd.vaddr = kernel_start + decrypted_base;
++	ppd.vaddr_end = kernel_end + decrypted_base;
++	sme_map_range_decrypted_wp(&ppd);
++
++	if (initrd_len) {
++		/* Add encrypted initrd (identity) mappings */
++		ppd.paddr = initrd_start;
++		ppd.vaddr = initrd_start;
++		ppd.vaddr_end = initrd_end;
++		sme_map_range_encrypted(&ppd);
++		/*
++		 * Add decrypted, write-protected initrd (non-identity) mappings
++		 */
++		ppd.paddr = initrd_start;
++		ppd.vaddr = initrd_start + decrypted_base;
++		ppd.vaddr_end = initrd_end + decrypted_base;
++		sme_map_range_decrypted_wp(&ppd);
+ 	}
+ 
+ 	/* Add decrypted workarea mappings to both kernel mappings */
+-	paddr = workarea_start;
+-	while (paddr < workarea_end) {
+-		pgtable_area = sme_populate_pgd(pgd, pgtable_area,
+-						paddr,
+-						paddr + PMD_FLAGS);
+-
+-		pgtable_area = sme_populate_pgd(pgd, pgtable_area,
+-						paddr + decrypted_base,
+-						paddr + PMD_FLAGS);
+-
+-		paddr += PMD_PAGE_SIZE;
+-	}
++	ppd.paddr = workarea_start;
++	ppd.vaddr = workarea_start;
++	ppd.vaddr_end = workarea_end;
++	sme_map_range_decrypted(&ppd);
++
++	ppd.paddr = workarea_start;
++	ppd.vaddr = workarea_start + decrypted_base;
++	ppd.vaddr_end = workarea_end + decrypted_base;
++	sme_map_range_decrypted(&ppd);
+ 
+ 	/* Perform the encryption */
+ 	sme_encrypt_execute(kernel_start, kernel_start + decrypted_base,
+-			    kernel_len, workarea_start, (unsigned long)pgd);
++			    kernel_len, workarea_start, (unsigned long)ppd.pgd);
++
++	if (initrd_len)
++		sme_encrypt_execute(initrd_start, initrd_start + decrypted_base,
++				    initrd_len, workarea_start,
++				    (unsigned long)ppd.pgd);
+ 
+ 	/*
+ 	 * At this point we are running encrypted.  Remove the mappings for
+ 	 * the decrypted areas - all that is needed for this is to remove
+ 	 * the PGD entry/entries.
+ 	 */
+-	sme_clear_pgd(pgd, kernel_start + decrypted_base,
+-		      kernel_end + decrypted_base);
++	ppd.vaddr = kernel_start + decrypted_base;
++	ppd.vaddr_end = kernel_end + decrypted_base;
++	sme_clear_pgd(&ppd);
++
++	if (initrd_len) {
++		ppd.vaddr = initrd_start + decrypted_base;
++		ppd.vaddr_end = initrd_end + decrypted_base;
++		sme_clear_pgd(&ppd);
++	}
+ 
+-	sme_clear_pgd(pgd, workarea_start + decrypted_base,
+-		      workarea_end + decrypted_base);
++	ppd.vaddr = workarea_start + decrypted_base;
++	ppd.vaddr_end = workarea_end + decrypted_base;
++	sme_clear_pgd(&ppd);
+ 
+ 	/* Flush the TLB - no globals so cr3 is enough */
+ 	native_write_cr3(__native_read_cr3());
diff --git a/common.config b/common.config
index ef3afe1..ab7bbab 100644
--- a/common.config
+++ b/common.config
@@ -3698,7 +3698,7 @@ CONFIG_N_GSM=m
 CONFIG_TRACE_ROUTER=m
 CONFIG_TRACE_SINK=m
 CONFIG_DEVMEM=y
-CONFIG_DEVKMEM=y
+# CONFIG_DEVKMEM is not set
 
 #
 # Serial drivers
@@ -8902,7 +8902,7 @@ CONFIG_ZX2967_WATCHDOG=m
 CONFIG_MFD_CPCAP=m
 CONFIG_REGULATOR_CPCAP=m
 # CONFIG_DRM_DEBUG_MM_SELFTEST is not set
-CONFIG_ROCKCHIP_CDN_DP=m
+CONFIG_ROCKCHIP_CDN_DP=y
 CONFIG_DRM_MSM_DSI_14NM_PHY=y
 CONFIG_DRM_TINYDRM=m
 CONFIG_TINYDRM_MI0283QT=m
@@ -9075,5 +9075,5 @@ CONFIG_RESET_ATTACK_MITIGATION=y
 # CONFIG_STRING_SELFTEST is not set
 CONFIG_GPIO_BD9571MWV=m
 CONFIG_REGULATOR_BD9571MWV=m
-CONFIG_BOOTSPLASH=y
+CONFIG_BOOTSPLASH=n
 CONFIG_BOOTSPLASH_FILE="/bootsplash"
diff --git a/i386-common.config b/i386-common.config
index c66f7e6..a96147c 100644
--- a/i386-common.config
+++ b/i386-common.config
@@ -123,6 +123,9 @@ CONFIG_X86_MCE_AMD=y
 CONFIG_X86_MCE_THRESHOLD=y
 # CONFIG_X86_MCE_INJECT is not set
 CONFIG_X86_THERMAL_VECTOR=y
+CONFIG_BUG_CPU_MELTDOWN=y
+CONFIG_PAGE_TABLE_ISOLATION=y
+CONFIG_RETPOLINE=y
 
 #
 # Performance monitoring
diff --git a/kernel-release.spec b/kernel-release.spec
index 6453602..fb3205c 100644
--- a/kernel-release.spec
+++ b/kernel-release.spec
@@ -6,7 +6,7 @@
 # compose tar.xz name and release
 %define kernelversion	4
 %define patchlevel	14
-%define sublevel	2
+%define sublevel	14
 %define relc		0
 # Only ever wrong on x.0 releases...
 %define previous	%{kernelversion}.%(echo $((%{patchlevel}-1)))
@@ -306,7 +306,7 @@ Patch145:	saa716x-driver-integration.patch
 #Patch200:	0001-ipc-namespace-a-generic-per-ipc-pointer-and-peripc_o.patch
 # NOT YET
 #Patch201:	0002-binder-implement-namepsace-support-for-Android-binde.patch
-Patch250:	4.14-C11.patch
+#Patch250:	4.14-C11.patch
 
 # Patches to external modules
 # Marked SourceXXX instead of PatchXXX because the modules
@@ -341,6 +341,11 @@ Patch421:	0153-x86-Return-memory-from-guest-to-host-kernel.patch
 Patch422:	0154-sysctl-vm-Fine-grained-cache-shrinking.patch
 %endif
 
+# (tpg) patches from frugalware to help Spectre/Meltdown
+Patch500:	SME-BSP_SME-microcode-update-fixes.patch
+Patch501:	retpoline-fill_RSB_on_context_switch_for_affected_CPUs.patch
+Patch502:	retpoline_add_LFENCE_to_the_retpoline_filling_RSB_macros.patch
+
 # Defines for the things that are needed for all the kernels
 #
 %define common_desc_kernel The kernel package contains the Linux kernel (vmlinuz), the core of your \
@@ -350,7 +355,6 @@ input and output, etc. \
 This version is a preview of an upcoming kernel version, and may be helpful if you are using \
 very current hardware.
 
-
 ### Global Requires/Provides
 #%define requires2	dracut >= 026
 %define requires3	kmod >= 12
@@ -376,9 +380,9 @@ Autoreqprov:	no
 
 BuildRequires:	bc
 BuildRequires:	binutils
-BuildRequires:	gcc
-BuildRequires:	gcc-plugin-devel
-BuildRequires:	gcc-c++
+BuildRequires:	gcc >= 7.2.1_2017.11-3
+BuildRequires:	gcc-plugin-devel >= 7.2.1_2017.11-3
+BuildRequires:	gcc-c++ >= 7.2.1_2017.11-3
 BuildRequires:	openssl-devel
 BuildRequires:	diffutils
 # For git apply
@@ -481,8 +485,11 @@ Release:	%{fakerel}				\
 Requires:	glibc-devel				\
 Requires:	ncurses-devel				\
 Requires:	make					\
-Requires:	gcc					\
+Requires:	gcc >= 7.2.1_2017.11-3			\
 Requires:	perl					\
+%ifarch x86_64						\
+Requires:	pkgconfig(libelf)			\
+%endif							\
 Summary:	The kernel-devel files for %{kname}-%{1}-%{buildrel} \
 Group:		Development/Kernel			\
 Provides:	kernel-devel = %{kverrel}		\
@@ -621,7 +628,7 @@ Release:	%{fakerel}
 Requires:	glibc-devel
 Requires:	ncurses-devel
 Requires:	make
-Requires:	gcc
+Requires:	gcc >= 7.2.1_2017.11-3
 Requires:	perl
 Requires:	diffutils
 Summary:	The Linux source code for %{kname}-%{buildrel}
@@ -873,6 +880,9 @@ find . -name '*~' -o -name '*.orig' -o -name '*.append' | %kxargs rm -f
 # wipe all .gitignore/.get_maintainer.ignore files
 find . -name "*.g*ignore" -exec rm {} \;
 
+# fix missing exec flag on file introduced in 4.14.10-rc1
+chmod 755 tools/objtool/sync-check.sh
+
 %build
 %setup_compile_flags
 ############################################################
@@ -1040,6 +1050,16 @@ SaveDevel() {
 # add acpica header files, needed for fglrx build
     cp -fR drivers/acpi/acpica/*.h $TempDevelRoot/drivers/acpi/acpica/
 
+%ifarch x86_64
+# orc unwinder needs theese
+	cp -fR tools/build/Build{,.include} $TempDevelRoot/tools/build
+	cp -fR tools/build/fixdep.c $TempDevelRoot/tools/build
+	cp -fR tools/lib/{str_error_r.c,string.c} $TempDevelRoot/tools/lib
+	cp -fR tools/lib/subcmd/* $TempDevelRoot/tools/lib/subcmd
+	cp -fR tools/objtool/* $TempDevelRoot/tools/objtool
+	cp -fR tools/scripts/utilities.mak $TempDevelRoot/tools/scripts
+%endif
+
     for i in alpha arc avr32 blackfin c6x cris frv h8300 hexagon ia64 m32r m68k m68knommu metag microblaze \
 		 mips mn10300 nios2 openrisc parisc powerpc s390 score sh sparc tile unicore32 xtensa; do
 	rm -rf $TempDevelRoot/arch/$i
@@ -1478,6 +1498,16 @@ rm -f %{target_source}/{.config.old,.config.cmd,.gitignore,.lst,.mailmap,.gitatt
 rm -f %{target_source}/{.missing-syscalls.d,arch/.gitignore,firmware/.gitignore}
 rm -rf %{target_source}/.tmp_depmod/
 
+# more cleaning
+pushd %{target_source}
+# lots of gitignore files
+find -iname ".gitignore" -delete
+# clean tools tree
+%smake -C tools clean
+%smake -C tools/build clean
+%smake -C tools/build/feature clean
+popd
+
 #endif %{with build_source}
 %endif
 
diff --git a/retpoline-fill_RSB_on_context_switch_for_affected_CPUs.patch b/retpoline-fill_RSB_on_context_switch_for_affected_CPUs.patch
new file mode 100644
index 0000000..8f402eb
--- /dev/null
+++ b/retpoline-fill_RSB_on_context_switch_for_affected_CPUs.patch
@@ -0,0 +1,175 @@
+From c995efd5a740d9cbafbf58bde4973e8b50b4d761 Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Fri, 12 Jan 2018 17:49:25 +0000
+Subject: x86/retpoline: Fill RSB on context switch for affected CPUs
+
+On context switch from a shallow call stack to a deeper one, as the CPU
+does 'ret' up the deeper side it may encounter RSB entries (predictions for
+where the 'ret' goes to) which were populated in userspace.
+
+This is problematic if neither SMEP nor KPTI (the latter of which marks
+userspace pages as NX for the kernel) are active, as malicious code in
+userspace may then be executed speculatively.
+
+Overwrite the CPU's return prediction stack with calls which are predicted
+to return to an infinite loop, to "capture" speculation if this
+happens. This is required both for retpoline, and also in conjunction with
+IBRS for !SMEP && !KPTI.
+
+On Skylake+ the problem is slightly different, and an *underflow* of the
+RSB may cause errant branch predictions to occur. So there it's not so much
+overwrite, as *filling* the RSB to attempt to prevent it getting
+empty. This is only a partial solution for Skylake+ since there are many
+other conditions which may result in the RSB becoming empty. The full
+solution on Skylake+ is to use IBRS, which will prevent the problem even
+when the RSB becomes empty. With IBRS, the RSB-stuffing will not be
+required on context switch.
+
+[ tglx: Added missing vendor check and slighty massaged comments and
+  	changelog ]
+
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Arjan van de Ven <arjan@linux.intel.com>
+Cc: gnomes@lxorguk.ukuu.org.uk
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: thomas.lendacky@amd.com
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jikos@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Kees Cook <keescook@google.com>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Cc: Paul Turner <pjt@google.com>
+Link: https://lkml.kernel.org/r/1515779365-9032-1-git-send-email-dwmw@amazon.co.uk
+---
+ arch/x86/entry/entry_32.S          | 11 +++++++++++
+ arch/x86/entry/entry_64.S          | 11 +++++++++++
+ arch/x86/include/asm/cpufeatures.h |  1 +
+ arch/x86/kernel/cpu/bugs.c         | 36 ++++++++++++++++++++++++++++++++++++
+ 4 files changed, 59 insertions(+)
+
+diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
+index a1f28a5..60c4c34 100644
+--- a/arch/x86/entry/entry_32.S
++++ b/arch/x86/entry/entry_32.S
+@@ -244,6 +244,17 @@ ENTRY(__switch_to_asm)
+ 	movl	%ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
+ #endif
+ 
++#ifdef CONFIG_RETPOLINE
++	/*
++	 * When switching from a shallower to a deeper call stack
++	 * the RSB may either underflow or use entries populated
++	 * with userspace addresses. On CPUs where those concerns
++	 * exist, overwrite the RSB with entries which capture
++	 * speculative execution to prevent attack.
++	 */
++	FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
++#endif
++
+ 	/* restore callee-saved registers */
+ 	popl	%esi
+ 	popl	%edi
+diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
+index 59874bc..d54a0ed 100644
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -487,6 +487,17 @@ ENTRY(__switch_to_asm)
+ 	movq	%rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset
+ #endif
+ 
++#ifdef CONFIG_RETPOLINE
++	/*
++	 * When switching from a shallower to a deeper call stack
++	 * the RSB may either underflow or use entries populated
++	 * with userspace addresses. On CPUs where those concerns
++	 * exist, overwrite the RSB with entries which capture
++	 * speculative execution to prevent attack.
++	 */
++	FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
++#endif
++
+ 	/* restore callee-saved registers */
+ 	popq	%r15
+ 	popq	%r14
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index f275447..aa09559 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -211,6 +211,7 @@
+ #define X86_FEATURE_AVX512_4FMAPS	( 7*32+17) /* AVX-512 Multiply Accumulation Single precision */
+ 
+ #define X86_FEATURE_MBA			( 7*32+18) /* Memory Bandwidth Allocation */
++#define X86_FEATURE_RSB_CTXSW		( 7*32+19) /* Fill RSB on context switches */
+ 
+ /* Virtualization flags: Linux defined, word 8 */
+ #define X86_FEATURE_TPR_SHADOW		( 8*32+ 0) /* Intel TPR Shadow */
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index e4dc261..390b3dc 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -23,6 +23,7 @@
+ #include <asm/alternative.h>
+ #include <asm/pgtable.h>
+ #include <asm/set_memory.h>
++#include <asm/intel-family.h>
+ 
+ static void __init spectre_v2_select_mitigation(void);
+ 
+@@ -155,6 +156,23 @@ disable:
+ 	return SPECTRE_V2_CMD_NONE;
+ }
+ 
++/* Check for Skylake-like CPUs (for RSB handling) */
++static bool __init is_skylake_era(void)
++{
++	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
++	    boot_cpu_data.x86 == 6) {
++		switch (boot_cpu_data.x86_model) {
++		case INTEL_FAM6_SKYLAKE_MOBILE:
++		case INTEL_FAM6_SKYLAKE_DESKTOP:
++		case INTEL_FAM6_SKYLAKE_X:
++		case INTEL_FAM6_KABYLAKE_MOBILE:
++		case INTEL_FAM6_KABYLAKE_DESKTOP:
++			return true;
++		}
++	}
++	return false;
++}
++
+ static void __init spectre_v2_select_mitigation(void)
+ {
+ 	enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
+@@ -213,6 +231,24 @@ retpoline_auto:
+ 
+ 	spectre_v2_enabled = mode;
+ 	pr_info("%s\n", spectre_v2_strings[mode]);
++
++	/*
++	 * If neither SMEP or KPTI are available, there is a risk of
++	 * hitting userspace addresses in the RSB after a context switch
++	 * from a shallow call stack to a deeper one. To prevent this fill
++	 * the entire RSB, even when using IBRS.
++	 *
++	 * Skylake era CPUs have a separate issue with *underflow* of the
++	 * RSB, when they will predict 'ret' targets from the generic BTB.
++	 * The proper mitigation for this is IBRS. If IBRS is not supported
++	 * or deactivated in favour of retpolines the RSB fill on context
++	 * switch is required.
++	 */
++	if ((!boot_cpu_has(X86_FEATURE_PTI) &&
++	     !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
++		setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
++		pr_info("Filling RSB on context switch\n");
++	}
+ }
+ 
+ #undef pr_fmt
+-- 
+cgit v1.1
+
diff --git a/retpoline_add_LFENCE_to_the_retpoline_filling_RSB_macros.patch b/retpoline_add_LFENCE_to_the_retpoline_filling_RSB_macros.patch
new file mode 100644
index 0000000..d930100
--- /dev/null
+++ b/retpoline_add_LFENCE_to_the_retpoline_filling_RSB_macros.patch
@@ -0,0 +1,90 @@
+From 28d437d550e1e39f805d99f9f8ac399c778827b7 Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Sat, 13 Jan 2018 17:27:30 -0600
+Subject: x86/retpoline: Add LFENCE to the retpoline/RSB filling RSB macros
+
+The PAUSE instruction is currently used in the retpoline and RSB filling
+macros as a speculation trap.  The use of PAUSE was originally suggested
+because it showed a very, very small difference in the amount of
+cycles/time used to execute the retpoline as compared to LFENCE.  On AMD,
+the PAUSE instruction is not a serializing instruction, so the pause/jmp
+loop will use excess power as it is speculated over waiting for return
+to mispredict to the correct target.
+
+The RSB filling macro is applicable to AMD, and, if software is unable to
+verify that LFENCE is serializing on AMD (possible when running under a
+hypervisor), the generic retpoline support will be used and, so, is also
+applicable to AMD.  Keep the current usage of PAUSE for Intel, but add an
+LFENCE instruction to the speculation trap for AMD.
+
+The same sequence has been adopted by GCC for the GCC generated retpolines.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@alien8.de>
+Acked-by: David Woodhouse <dwmw@amazon.co.uk>
+Acked-by: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Paul Turner <pjt@google.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Jiri Kosina <jikos@kernel.org>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Cc: Kees Cook <keescook@google.com>
+Link: https://lkml.kernel.org/r/20180113232730.31060.36287.stgit@tlendack-t1.amdoffice.net
+---
+ arch/x86/include/asm/nospec-branch.h | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
+index 402a11c..7b45d84 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -11,7 +11,7 @@
+  * Fill the CPU return stack buffer.
+  *
+  * Each entry in the RSB, if used for a speculative 'ret', contains an
+- * infinite 'pause; jmp' loop to capture speculative execution.
++ * infinite 'pause; lfence; jmp' loop to capture speculative execution.
+  *
+  * This is required in various cases for retpoline and IBRS-based
+  * mitigations for the Spectre variant 2 vulnerability. Sometimes to
+@@ -38,11 +38,13 @@
+ 	call	772f;				\
+ 773:	/* speculation trap */			\
+ 	pause;					\
++	lfence;					\
+ 	jmp	773b;				\
+ 772:						\
+ 	call	774f;				\
+ 775:	/* speculation trap */			\
+ 	pause;					\
++	lfence;					\
+ 	jmp	775b;				\
+ 774:						\
+ 	dec	reg;				\
+@@ -73,6 +75,7 @@
+ 	call	.Ldo_rop_\@
+ .Lspec_trap_\@:
+ 	pause
++	lfence
+ 	jmp	.Lspec_trap_\@
+ .Ldo_rop_\@:
+ 	mov	\reg, (%_ASM_SP)
+@@ -165,6 +168,7 @@
+ 	"       .align 16\n"					\
+ 	"901:	call   903f;\n"					\
+ 	"902:	pause;\n"					\
++	"    	lfence;\n"					\
+ 	"       jmp    902b;\n"					\
+ 	"       .align 16\n"					\
+ 	"903:	addl   $4, %%esp;\n"				\
+-- 
+cgit v1.1
+
diff --git a/x86_64-common.config b/x86_64-common.config
index 577ba73..3445182 100644
--- a/x86_64-common.config
+++ b/x86_64-common.config
@@ -82,6 +82,9 @@ CONFIG_X86_MCE_AMD=y
 CONFIG_X86_MCE_THRESHOLD=y
 # CONFIG_X86_MCE_INJECT is not set
 CONFIG_X86_THERMAL_VECTOR=y
+CONFIG_BUG_CPU_MELTDOWN=y
+CONFIG_PAGE_TABLE_ISOLATION=y
+CONFIG_RETPOLINE=y
 
 #
 # Performance monitoring
Not Available

itchka [@T] compuserve.comWill be superceded. Does not build esternal modules with gold linker.2504d 04hrs
itchka [@T] compuserve.comSuperceded2504d 00hrs
itchka [@T] compuserve.comNo Comment.2504d 00hrs