Skip to content

Commit

Permalink
mm,swapops: update check in is_pfn_swap_entry for hwpoison entries
Browse files Browse the repository at this point in the history
commit 07a57a3 upstream.

Tony reported that the Machine check recovery was broken in v6.9-rc1, as
he was hitting a VM_BUG_ON when injecting uncorrectable memory errors to
DRAM.

After some more digging and debugging on his side, he realized that this
went back to v6.1, with the introduction of 'commit 0d206b5
("mm/swap: add swp_offset_pfn() to fetch PFN from swap entry")'.  That
commit, among other things, introduced swp_offset_pfn(), replacing
hwpoison_entry_to_pfn() in its favour.

The patch also introduced a VM_BUG_ON() check for is_pfn_swap_entry(), but
is_pfn_swap_entry() never got updated to cover hwpoison entries, which
means that we would hit the VM_BUG_ON whenever we would call
swp_offset_pfn() for such entries on environments with CONFIG_DEBUG_VM
set.  Fix this by updating the check to cover hwpoison entries as well,
and update the comment while we are it.

Link: https://lkml.kernel.org/r/[email protected]
Fixes: 0d206b5 ("mm/swap: add swp_offset_pfn() to fetch PFN from swap entry")
Signed-off-by: Oscar Salvador <[email protected]>
Reported-by: Tony Luck <[email protected]>
Closes: https://lore.kernel.org/all/Zg8kLSl2yAlA3o5D@agluck-desk3/
Tested-by: Tony Luck <[email protected]>
Reviewed-by: Peter Xu <[email protected]>
Reviewed-by: David Hildenbrand <[email protected]>
Acked-by: Miaohe Lin <[email protected]>
Cc: <[email protected]>	[6.1.x]
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Miaohe Lin <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
  • Loading branch information
osalvadorvilardaga authored and gregkh committed May 17, 2024
1 parent 2effe40 commit ea92809
Showing 1 changed file with 53 additions and 52 deletions.
105 changes: 53 additions & 52 deletions include/linux/swapops.h
Original file line number Diff line number Diff line change
Expand Up @@ -409,6 +409,55 @@ static inline bool is_migration_entry_dirty(swp_entry_t entry)
}
#endif /* CONFIG_MIGRATION */

#ifdef CONFIG_MEMORY_FAILURE

extern atomic_long_t num_poisoned_pages __read_mostly;

/*
* Support for hardware poisoned pages
*/
static inline swp_entry_t make_hwpoison_entry(struct page *page)
{
BUG_ON(!PageLocked(page));
return swp_entry(SWP_HWPOISON, page_to_pfn(page));
}

static inline int is_hwpoison_entry(swp_entry_t entry)
{
return swp_type(entry) == SWP_HWPOISON;
}

static inline void num_poisoned_pages_inc(void)
{
atomic_long_inc(&num_poisoned_pages);
}

static inline void num_poisoned_pages_sub(long i)
{
atomic_long_sub(i, &num_poisoned_pages);
}

#else /* CONFIG_MEMORY_FAILURE */

static inline swp_entry_t make_hwpoison_entry(struct page *page)
{
return swp_entry(0, 0);
}

static inline int is_hwpoison_entry(swp_entry_t swp)
{
return 0;
}

static inline void num_poisoned_pages_inc(void)
{
}

static inline void num_poisoned_pages_sub(long i)
{
}
#endif /* CONFIG_MEMORY_FAILURE */

typedef unsigned long pte_marker;

#define PTE_MARKER_UFFD_WP BIT(0)
Expand Down Expand Up @@ -503,16 +552,17 @@ static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)

/*
* A pfn swap entry is a special type of swap entry that always has a pfn stored
* in the swap offset. They are used to represent unaddressable device memory
* and to restrict access to a page undergoing migration.
* in the swap offset. They can either be used to represent unaddressable device
* memory, to restrict access to a page undergoing migration or to represent a
* pfn which has been hwpoisoned and unmapped.
*/
static inline bool is_pfn_swap_entry(swp_entry_t entry)
{
/* Make sure the swp offset can always store the needed fields */
BUILD_BUG_ON(SWP_TYPE_SHIFT < SWP_PFN_BITS);

return is_migration_entry(entry) || is_device_private_entry(entry) ||
is_device_exclusive_entry(entry);
is_device_exclusive_entry(entry) || is_hwpoison_entry(entry);
}

struct page_vma_mapped_walk;
Expand Down Expand Up @@ -581,55 +631,6 @@ static inline int is_pmd_migration_entry(pmd_t pmd)
}
#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */

#ifdef CONFIG_MEMORY_FAILURE

extern atomic_long_t num_poisoned_pages __read_mostly;

/*
* Support for hardware poisoned pages
*/
static inline swp_entry_t make_hwpoison_entry(struct page *page)
{
BUG_ON(!PageLocked(page));
return swp_entry(SWP_HWPOISON, page_to_pfn(page));
}

static inline int is_hwpoison_entry(swp_entry_t entry)
{
return swp_type(entry) == SWP_HWPOISON;
}

static inline void num_poisoned_pages_inc(void)
{
atomic_long_inc(&num_poisoned_pages);
}

static inline void num_poisoned_pages_sub(long i)
{
atomic_long_sub(i, &num_poisoned_pages);
}

#else /* CONFIG_MEMORY_FAILURE */

static inline swp_entry_t make_hwpoison_entry(struct page *page)
{
return swp_entry(0, 0);
}

static inline int is_hwpoison_entry(swp_entry_t swp)
{
return 0;
}

static inline void num_poisoned_pages_inc(void)
{
}

static inline void num_poisoned_pages_sub(long i)
{
}
#endif /* CONFIG_MEMORY_FAILURE */

static inline int non_swap_entry(swp_entry_t entry)
{
return swp_type(entry) >= MAX_SWAPFILES;
Expand Down

0 comments on commit ea92809

Please sign in to comment.