[module] fault in pages one by one for ivshmem devices

It appears that the PCI BAR memory is slow to access with remap_pfn_range
and that it should instead be faulted in one page at a time.

The commit 5774e21965 implemented the former
behaviour and caused a performance regression in the VM->VM case.

This commit retores the old behaviour, but extends it to support mmaping
the kvmfr device directly, without going through a dmabuf.
This commit is contained in:
Quantum 2021-02-20 22:09:08 -05:00 committed by Geoffrey McRae
parent 39a09ca565
commit 7e58278858

View File

@ -79,6 +79,21 @@ struct kvmfrbuf
struct page ** pages; struct page ** pages;
}; };
static vm_fault_t kvmfr_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct kvmfrbuf *kbuf = (struct kvmfrbuf *)vma->vm_private_data;
vmf->page = kbuf->pages[vmf->pgoff];
get_page(vmf->page);
return 0;
}
static const struct vm_operations_struct kvmfr_vm_ops =
{
.fault = kvmfr_vm_fault
};
static struct sg_table * map_kvmfrbuf(struct dma_buf_attachment *at, static struct sg_table * map_kvmfrbuf(struct dma_buf_attachment *at,
enum dma_data_direction direction) enum dma_data_direction direction)
{ {
@ -138,10 +153,9 @@ static int mmap_kvmfrbuf(struct dma_buf * buf, struct vm_area_struct * vma)
switch (kbuf->kdev->type) switch (kbuf->kdev->type)
{ {
case KVMFR_TYPE_PCI: case KVMFR_TYPE_PCI:
{ vma->vm_ops = &kvmfr_vm_ops;
unsigned long pfn = virt_to_phys(kbuf->kdev->addr + kbuf->offset + offset) >> PAGE_SHIFT; vma->vm_private_data = buf->priv;
return remap_pfn_range(vma, vma->vm_start, pfn, size, vma->vm_page_prot); return 0;
}
case KVMFR_TYPE_STATIC: case KVMFR_TYPE_STATIC:
return remap_vmalloc_range(vma, kbuf->kdev->addr + kbuf->offset, vma->vm_pgoff); return remap_vmalloc_range(vma, kbuf->kdev->addr + kbuf->offset, vma->vm_pgoff);
@ -265,6 +279,21 @@ static long device_ioctl(struct file * filp, unsigned int ioctl, unsigned long a
return ret; return ret;
} }
static vm_fault_t pci_mmap_fault(struct vm_fault *vmf)
{
struct vm_area_struct * vma = vmf->vma;
struct kvmfr_dev * kdev = (struct kvmfr_dev *)vma->vm_private_data;
vmf->page = virt_to_page(kdev->addr + (vmf->pgoff << PAGE_SHIFT));
get_page(vmf->page);
return 0;
}
static const struct vm_operations_struct pci_mmap_ops =
{
.fault = pci_mmap_fault
};
static int device_mmap(struct file * filp, struct vm_area_struct * vma) static int device_mmap(struct file * filp, struct vm_area_struct * vma)
{ {
struct kvmfr_dev * kdev; struct kvmfr_dev * kdev;
@ -284,10 +313,9 @@ static int device_mmap(struct file * filp, struct vm_area_struct * vma)
switch (kdev->type) switch (kdev->type)
{ {
case KVMFR_TYPE_PCI: case KVMFR_TYPE_PCI:
{ vma->vm_ops = &pci_mmap_ops;
unsigned long pfn = virt_to_phys(kdev->addr + offset) >> PAGE_SHIFT; vma->vm_private_data = kdev;
return remap_pfn_range(vma, vma->vm_start, pfn, size, vma->vm_page_prot); return 0;
}
case KVMFR_TYPE_STATIC: case KVMFR_TYPE_STATIC:
return remap_vmalloc_range(vma, kdev->addr, vma->vm_pgoff); return remap_vmalloc_range(vma, kdev->addr, vma->vm_pgoff);