Skip to content

Commit

Permalink
RDMA/i40iw: Address an mmap handler exploit in i40iw
Browse files Browse the repository at this point in the history
i40iw_mmap manipulates the vma->vm_pgoff to differentiate a push page mmap
vs a doorbell mmap, and uses it to compute the pfn in remap_pfn_range
without any validation. This is vulnerable to an mmap exploit as described
in: https://lore.kernel.org/r/[email protected]

The push feature is disabled in the driver currently and therefore no push
mmaps are issued from user-space. The feature does not work as expected in
the x722 product.

Remove the push module parameter and all VMA attribute manipulations for
this feature in i40iw_mmap. Update i40iw_mmap to only allow DB user
mmapings at offset = 0. Check vm_pgoff for zero and if the mmaps are bound
to a single page.

Cc: <[email protected]>
Fixes: d374984 ("i40iw: add files for iwarp interface")
Link: https://lore.kernel.org/r/[email protected]
Reported-by: Di Zhu <[email protected]>
Signed-off-by: Shiraz Saleem <[email protected]>
Signed-off-by: Jason Gunthorpe <[email protected]>
  • Loading branch information
shirazsaleem authored and jgunthorpe committed Nov 25, 2020
1 parent 6830ff8 commit 2ed3814
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 35 deletions.
5 changes: 0 additions & 5 deletions drivers/infiniband/hw/i40iw/i40iw_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -54,10 +54,6 @@
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." __stringify(DRV_VERSION_BUILD)

static int push_mode;
module_param(push_mode, int, 0644);
MODULE_PARM_DESC(push_mode, "Low latency mode: 0=disabled (default), 1=enabled)");

static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "debug flags: 0=disabled (default), 0x7fffffff=all");
Expand Down Expand Up @@ -1580,7 +1576,6 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
if (status)
goto exit;
iwdev->obj_next = iwdev->obj_mem;
iwdev->push_mode = push_mode;

init_waitqueue_head(&iwdev->vchnl_waitq);
init_waitqueue_head(&dev->vf_reqs);
Expand Down
37 changes: 7 additions & 30 deletions drivers/infiniband/hw/i40iw/i40iw_verbs.c
Original file line number Diff line number Diff line change
Expand Up @@ -167,39 +167,16 @@ static void i40iw_dealloc_ucontext(struct ib_ucontext *context)
*/
static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
struct i40iw_ucontext *ucontext;
u64 db_addr_offset, push_offset, pfn;

ucontext = to_ucontext(context);
if (ucontext->iwdev->sc_dev.is_pf) {
db_addr_offset = I40IW_DB_ADDR_OFFSET;
push_offset = I40IW_PUSH_OFFSET;
if (vma->vm_pgoff)
vma->vm_pgoff += I40IW_PF_FIRST_PUSH_PAGE_INDEX - 1;
} else {
db_addr_offset = I40IW_VF_DB_ADDR_OFFSET;
push_offset = I40IW_VF_PUSH_OFFSET;
if (vma->vm_pgoff)
vma->vm_pgoff += I40IW_VF_FIRST_PUSH_PAGE_INDEX - 1;
}
struct i40iw_ucontext *ucontext = to_ucontext(context);
u64 dbaddr;

vma->vm_pgoff += db_addr_offset >> PAGE_SHIFT;

if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) {
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
} else {
if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
else
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
}
if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE)
return -EINVAL;

pfn = vma->vm_pgoff +
(pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >>
PAGE_SHIFT);
dbaddr = I40IW_DB_ADDR_OFFSET + pci_resource_start(ucontext->iwdev->ldev->pcidev, 0);

return rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
vma->vm_page_prot, NULL);
return rdma_user_mmap_io(context, vma, dbaddr >> PAGE_SHIFT, PAGE_SIZE,
pgprot_noncached(vma->vm_page_prot), NULL);
}

/**
Expand Down

0 comments on commit 2ed3814

Please sign in to comment.