Merge tag ‘iommu-fix-v5.0-rc8’ of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu [Linux 5.0]

Merge tag ‘iommu-fix-v5.0-rc8’ of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu [Linux 5.0]

This Linux kernel change "Merge tag ‘iommu-fix-v5.0-rc8’ of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu" is included in the Linux 5.0 release. This change is authored by Linus Torvalds <torvalds [at] linux-foundation.org> on Fri Mar 1 09:13:04 2019 -0800. The commit for this change in Linux stable tree is a215ce8 (patch). Other info about this change: Merge: 2d28e01 cffaaf0

Merge tag 'iommu-fix-v5.0-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull IOMMU fix from Joerg Roedel:
 "One important fix for a memory corruption issue in the Intel VT-d
  driver that triggers on hardware with deep PCI hierarchies"

* tag 'iommu-fix-v5.0-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
  iommu/dmar: Fix buffer overflow during PCI bus notification

There is no are 0 lines of Linux source code added/deleted in this change. Code changes to Linux kernel are as follows.

 fs/hugetlbfs/inode.c | 12 ++++++++++++
 mm/hugetlb.c         | 16 +++++++++++++---
 mm/migrate.c         | 11 +++++++++++
 3 files changed, 36 insertions(+), 3 deletions(-)

diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 32920a1..a7fa037 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -859,6 +859,18 @@ static int hugetlbfs_migrate_page(struct address_space *mapping,
    rc = migrate_huge_page_move_mapping(mapping, newpage, page);
    if (rc != MIGRATEPAGE_SUCCESS)
        return rc;
+
+   /*
+    * page_private is subpool pointer in hugetlb pages.  Transfer to
+    * new page.  PagePrivate is not associated with page_private for
+    * hugetlb pages and can not be set here as only page_huge_active
+    * pages can be migrated.
+    */
+   if (page_private(page)) {
+       set_page_private(newpage, page_private(page));
+       set_page_private(page, 0);
+   }
+
    if (mode != MIGRATE_SYNC_NO_COPY)
        migrate_page_copy(newpage, page);
    else
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index afef616..8dfdffc 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3624,7 +3624,6 @@ static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
    copy_user_huge_page(new_page, old_page, address, vma,
                pages_per_huge_page(h));
    __SetPageUptodate(new_page);
-   set_page_huge_active(new_page);

    mmu_notifier_range_init(&range, mm, haddr, haddr + huge_page_size(h));
    mmu_notifier_invalidate_range_start(&range);
@@ -3645,6 +3644,7 @@ static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
                make_huge_pte(vma, new_page, 1));
        page_remove_rmap(old_page, true);
        hugepage_add_new_anon_rmap(new_page, vma, haddr);
+       set_page_huge_active(new_page);
        /* Make the old page be freed below */
        new_page = old_page;
    }
@@ -3729,6 +3729,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
    pte_t new_pte;
    spinlock_t *ptl;
    unsigned long haddr = address & huge_page_mask(h);
+   bool new_page = false;

    /*
     * Currently, we are forced to kill the process in the event the
@@ -3790,7 +3791,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
        }
        clear_huge_page(page, address, pages_per_huge_page(h));
        __SetPageUptodate(page);
-       set_page_huge_active(page);
+       new_page = true;

        if (vma->vm_flags & VM_MAYSHARE) {
            int err = huge_add_to_page_cache(page, mapping, idx);
@@ -3861,6 +3862,15 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
    }

    spin_unlock(ptl);
+
+   /*
+    * Only make newly allocated pages active.  Existing pages found
+    * in the pagecache could be !page_huge_active() if they have been
+    * isolated for migration.
+    */
+   if (new_page)
+       set_page_huge_active(page);
+
    unlock_page(page);
 out:
    return ret;
@@ -4095,7 +4105,6 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
     * the set_pte_at() write.
     */
    __SetPageUptodate(page);
-   set_page_huge_active(page);

    mapping = dst_vma->vm_file->f_mapping;
    idx = vma_hugecache_offset(h, dst_vma, dst_addr);
@@ -4163,6 +4172,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
    update_mmu_cache(dst_vma, dst_addr, dst_pte);

    spin_unlock(ptl);
+   set_page_huge_active(page);
    if (vm_shared)
        unlock_page(page);
    ret = 0;
diff --git a/mm/migrate.c b/mm/migrate.c
index d4fd680..181f5d2 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1315,6 +1315,16 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
        lock_page(hpage);
    }

+   /*
+    * Check for pages which are in the process of being freed.  Without
+    * page_mapping() set, hugetlbfs specific move page routine will not
+    * be called and we could leak usage counts for subpools.
+    */
+   if (page_private(hpage) && !page_mapping(hpage)) {
+       rc = -EBUSY;
+       goto out_unlock;
+   }
+
    if (PageAnon(hpage))
        anon_vma = page_get_anon_vma(hpage);

@@ -1345,6 +1355,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
        put_new_page = NULL;
    }

+out_unlock:
    unlock_page(hpage);
 out:
    if (rc != -EAGAIN)

Leave a Reply

Your email address will not be published. Required fields are marked *