aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--COPYING338
-rw-r--r--Makefile7
-rw-r--r--README.md17
-rw-r--r--loopback_driver.c1148
-rw-r--r--loopback_driver.h516
-rw-r--r--virtio_loopback_device.c1471
-rw-r--r--virtio_loopback_driver.c858
-rw-r--r--virtio_loopback_driver.h305
8 files changed, 2989 insertions, 1671 deletions
diff --git a/COPYING b/COPYING
new file mode 100644
index 0000000..9efa6fb
--- /dev/null
+++ b/COPYING
@@ -0,0 +1,338 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
+ <https://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Lesser General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, see <https://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) year name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ <signature of Moe Ghoul>, 1 April 1989
+ Moe Ghoul, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.
diff --git a/Makefile b/Makefile
index 0cbef1c..456c952 100644
--- a/Makefile
+++ b/Makefile
@@ -15,16 +15,17 @@
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
-NAME_C=loopback_driver
-
LINUX_DIR ?= /lib/modules/$(shell uname -r)/build
-obj-m += $(NAME_C).o
+virtio_loopback-objs := virtio_loopback_driver.o virtio_loopback_device.o
+obj-m += virtio_loopback.o
ifeq ($(DEBUG), 1)
ccflags-y += -DDEBUG
endif
+CFLAGS := -Wall -Wextra -Werror
+
all:
make -C $(LINUX_DIR) M=$(PWD) modules
diff --git a/README.md b/README.md
index 70cc2d4..6c986e3 100644
--- a/README.md
+++ b/README.md
@@ -1,16 +1,25 @@
# virtio-loopback transport repository
-This repository includes the beta version of the "virtio_loopback_transport" driver which is part of the Virtio Loopback Design presented in this [document](https://git.virtualopensystems.com/virtio-loopback/docs/-/blob/master/design_docs). This work carried on by Virtual Open Systems in the [Automotive Grade Linux](https://www.automotivegradelinux.org) community.
+This repository includes the "virtio-loopback" driver which is part of the Virtio-loopback design. If you want to learn more about how to set up and test the whole virtio-loopback architecture, refer to the [virtio-loopback testing guide](https://gerrit.automotivelinux.org/gerrit/gitweb?p=src/virtio/virtio-loopback-adapter.git;a=blob;f=Documentation/testing_virtio_loopback_design.md;hb=HEAD).
-As described in the design document, the transport is only a part of a more complex architecture. If you want to see the implementation and build the other components, refer to the [virtio-loopback docs repository](https://git.virtualopensystems.com/virtio-loopback/docs/-/tree/beta-release).
+This work carried on by Virtual Open Systems in the [Automotive Grade Linux](https://www.automotivegradelinux.org) community.
## Build the virtio-loopback transport
In order to build this project the next commands need to be used:
- `make` for x86
- `make ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu-` for arm64
+- `make ARCH=riscv CROSS_COMPILE=riscv64-linux-gnu-` for riscv64
-**NOTE**: The `DEBUG=1` can be used in order to enable the driver's logs.
+**NOTE**: The `DEBUG=1` can be used in order to enable the driver's debug logs.
-This driver is tested with Linux v5.10 and it is NOT be compatible with newer versions ("fcheck_files" function is replaced by "files_lookup_fd_rcu".
+## Tested platforms
+The driver has been tested with the following platforms (sorted by architecture):
+- x86: QEMU (machine `pc`), Thinkpad e14 gen3, x86 servers etc.
+- aarch64: QEMU (machine `virt`), Raspberry PI 4, AGL reference HW board (Rcar-H3)
+- riscv64: LicheePi4A
+
+## License
+
+This project is licensed under the terms of the GNU General Public License (GPL), version 2 or (at your option) any later version. See the [COPYING](COPYING) file for details.
diff --git a/loopback_driver.c b/loopback_driver.c
deleted file mode 100644
index 989b865..0000000
--- a/loopback_driver.c
+++ /dev/null
@@ -1,1148 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Based on virtio_mmio.c
- * Copyright 2011-2014, ARM Ltd.
- *
- * Copyright 2022-2024 Virtual Open Systems SAS
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#define pr_fmt(fmt) "virtio-loopback: " fmt
-
-#include <linux/cdev.h>
-#include <linux/eventfd.h>
-#include <linux/fdtable.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/version.h>
-#include <linux/slab.h>
-
-/* Virtio-loopback includes */
-#include <linux/dma-mapping.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
-#include <linux/virtio.h>
-#include <linux/virtio_config.h>
-#include <uapi/linux/virtio_mmio.h>
-#include <linux/virtio_ring.h>
-#include <linux/pid.h>
-#include <linux/kthread.h>
-
-/* Loopback header file */
-#include "loopback_driver.h"
-
-/* Features */
-MODULE_LICENSE("GPL v2");
-
-/* function declaration */
-static uint64_t read_adapter(uint64_t fn_id, uint64_t size);
-static void write_adapter(uint64_t data, uint64_t fn_id, uint64_t size);
-
-struct mmap_info *info;
-int mmap_index;
-uint64_t sum_pgfaults;
-
-/* Waitq */
-wait_queue_head_t wq;
-wait_queue_head_t wq_notify;
-
-/* Read write mutex */
-struct mutex read_write_lock;
-struct mutex interrupt_lock;
-
-/* Notification spinlock */
-spinlock_t notify_q_spinlock;
-
-bool share_communication_struct;
-uint32_t vq_index;
-uint64_t vq_pfns[16], vq_pfn;
-
-struct virtio_mmio_device *vm_dev_irq;
-
-struct virtqueue *global_vq;
-const struct vring *global_vring;
-
-/* counters */
-static int interrupt_cnt;
-static int notify_sent, notify_received;
-
-/* Define a notification list */
-static struct list_head *notify_list;
-
-static struct platform_device *virtio_loopback_device;
-
-/* Virio-loopback device funcitonality */
-struct eventfd_ctx *efd_ctx;
-static struct task_struct *start_loopback_thread;
-static struct task_struct *start_notification_thread;
-
-/* global storage for device Major number */
-static int dev_major;
-/* sysfs class structure */
-static struct class *loopback_class;
-/* array of loopback_device_data for */
-static struct loopback_device_data *loopback_data;
-
-/* Allow only one process to open the driver */
-unsigned long loopback_flags;
-
-/* Current ram index */
-int cur_ram_idx;
-
-/*
- * If this variable is true then read/write should wait
- * the adapter to unlock this opertion by sending an
- * eventfd. If it's equal to "false" then the oparetion
- * does not wait for adapter's confirmation.
- */
-bool valid_eventfd;
-
-/* Create a mapping array */
-struct share_mmap share_mmap_list[MMAP_LIMIT];
-
-/* Configuration interface */
-
-static u64 vm_get_features(struct virtio_device *vdev)
-{
- u64 features;
-
- /* Take feature bits 0-31 */
- write_adapter(1, VIRTIO_MMIO_DEVICE_FEATURES_SEL, 4);
- features = read_adapter(VIRTIO_MMIO_DEVICE_FEATURES, 4);
- features <<= 32;
-
- /* Take feature bits 32-63 */
- write_adapter(0, VIRTIO_MMIO_DEVICE_FEATURES_SEL, 4);
- features |= read_adapter(VIRTIO_MMIO_DEVICE_FEATURES, 4);
-
- return features;
-}
-
-static int vm_finalize_features(struct virtio_device *vdev)
-{
- struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
-
- /* Give virtio_ring a chance to accept features. */
- vring_transport_features(vdev);
-
- /* Make sure there are no mixed devices */
- if (vm_dev->version == 2 &&
- !__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
- dev_err(&vdev->dev, "New virtio-mmio devices (version 2) "
- "must provide VIRTIO_F_VERSION_1 feature!\n");
- return -EINVAL;
- }
-
- write_adapter(1, VIRTIO_MMIO_DRIVER_FEATURES_SEL, 4);
- write_adapter((u32)(vdev->features >> 32), VIRTIO_MMIO_DRIVER_FEATURES, 4);
-
- write_adapter(0, VIRTIO_MMIO_DRIVER_FEATURES_SEL, 4);
- write_adapter((u32)vdev->features, VIRTIO_MMIO_DRIVER_FEATURES, 4);
-
- return 0;
-}
-
-static void vm_get(struct virtio_device *vdev, unsigned int offset,
- void *buf, unsigned int len)
-{
- struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
- u8 b;
- __le16 w;
- __le32 l;
-
- if (vm_dev->version == 1) {
- u8 *ptr = buf;
- int i;
-
- for (i = 0; i < len; i++)
- ptr[i] = read_adapter(VIRTIO_MMIO_CONFIG + offset + i, 1);
- return;
- }
-
- switch (len) {
- case 1:
- b = read_adapter(VIRTIO_MMIO_CONFIG + offset, 1);
- memcpy(buf, &b, sizeof(b));
- break;
- case 2:
- w = cpu_to_le16(read_adapter(VIRTIO_MMIO_CONFIG + offset, 2));
- memcpy(buf, &w, sizeof(w));
- break;
- case 4:
- l = cpu_to_le32(read_adapter(VIRTIO_MMIO_CONFIG + offset, 4));
- memcpy(buf, &l, sizeof(l));
- break;
- case 8:
- l = cpu_to_le32(read_adapter(VIRTIO_MMIO_CONFIG + offset, 4));
- memcpy(buf, &l, sizeof(l));
- l = cpu_to_le32(read_adapter(VIRTIO_MMIO_CONFIG + offset + sizeof(l), 4));
- memcpy(buf + sizeof(l), &l, sizeof(l));
- break;
- default:
- BUG();
- }
-}
-
-static void vm_set(struct virtio_device *vdev, unsigned int offset,
- const void *buf, unsigned int len)
-{
- struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
- u8 b;
- __le16 w;
- __le32 l;
-
- if (vm_dev->version == 1) {
- const u8 *ptr = buf;
- int i;
-
- for (i = 0; i < len; i++)
- write_adapter(ptr[i], VIRTIO_MMIO_CONFIG + offset + i, 1);
-
- return;
- }
-
- switch (len) {
- case 1:
- memcpy(&b, buf, sizeof(b));
- write_adapter(b, VIRTIO_MMIO_CONFIG + offset, 1);
- break;
- case 2:
- memcpy(&w, buf, sizeof(w));
- write_adapter(le16_to_cpu(w), VIRTIO_MMIO_CONFIG + offset, 2);
- break;
- case 4:
- memcpy(&l, buf, sizeof(l));
- write_adapter(le32_to_cpu(l), VIRTIO_MMIO_CONFIG + offset, 4);
- break;
- case 8:
- memcpy(&l, buf, sizeof(l));
- write_adapter(le32_to_cpu(l), VIRTIO_MMIO_CONFIG + offset, 4);
- memcpy(&l, buf + sizeof(l), sizeof(l));
- write_adapter(le32_to_cpu(l), VIRTIO_MMIO_CONFIG + offset + sizeof(l), 4);
- break;
- default:
- BUG();
- }
-}
-
-static u32 vm_generation(struct virtio_device *vdev)
-{
- struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
-
- if (vm_dev->version == 1)
- return 0;
- else
- return read_adapter(VIRTIO_MMIO_CONFIG_GENERATION, 4);
-}
-
-static u8 vm_get_status(struct virtio_device *vdev)
-{
- return read_adapter(VIRTIO_MMIO_STATUS, 4) & 0xff;
-}
-
-static void vm_set_status(struct virtio_device *vdev, u8 status)
-{
- write_adapter(status, VIRTIO_MMIO_STATUS, 4);
-}
-
-static void vm_reset(struct virtio_device *vdev)
-{
- /* 0 status means a reset. */
- write_adapter(0, VIRTIO_MMIO_STATUS, 4);
-}
-
-int start_notification(void *data)
-{
- struct notify_data *first_notification;
- uint32_t index;
-
- DBG("Start notification\n");
-
- (void)data;
-
- while(1) {
-
- spin_lock(&notify_q_spinlock);
- while (valid_eventfd && list_empty(notify_list) == 1) {
- spin_unlock(&notify_q_spinlock);
- wait_event_timeout(wq_notify, list_empty(notify_list) != 1, 1 * HZ);
- spin_lock(&notify_q_spinlock);
- }
-
- first_notification = list_first_entry(notify_list, struct notify_data, list);
- index = first_notification->index;
- list_del(&first_notification->list);
-
- DBG("notify_received: %d, VQ: %d\n", notify_received++, index);
- spin_unlock(&notify_q_spinlock);
- write_adapter(index, VIRTIO_MMIO_QUEUE_NOTIFY, 4);
-
- if (!valid_eventfd) {
- DBG("Exit notification thread\n");
- return 0;
- }
- }
-}
-
-/* the notify function used when creating a virt queue */
-static bool vm_notify(struct virtqueue *vq)
-{
- struct notify_data *data;
- /*
- * We write the queue's selector into
- * the notification register to signal
- * the other end
- */
- spin_lock(&notify_q_spinlock);
- DBG("vm_notify\n");
-
- data = kmalloc(sizeof(struct notify_data), GFP_KERNEL);
- data->index = vq->index;
- INIT_LIST_HEAD(&data->list);
- list_add_tail(&data->list, notify_list);
- spin_unlock(&notify_q_spinlock);
-
- wake_up(&wq_notify);
-
- return true;
-}
-
-/* Notify all virtqueues on an interrupt. */
-static void vm_interrupt(struct work_struct *work)
-{
- struct virtio_mmio_device *vm_dev = vm_dev_irq;
- struct virtio_mmio_vq_info *info;
- int irq = 44;
- unsigned long status;
-
- /* STATUS and ACK should ne done without any intermediate status change */
- mutex_lock(&interrupt_lock);
- DBG("interrupt_cnt: %d\n", interrupt_cnt++);
-
- /* Read and acknowledge interrupts */
- status = read_adapter(VIRTIO_MMIO_INTERRUPT_STATUS, 4);
- write_adapter(status, VIRTIO_MMIO_INTERRUPT_ACK, 4);
-
- if (unlikely(status & VIRTIO_MMIO_INT_CONFIG))
- virtio_config_changed(&vm_dev->vdev);
-
- if (likely(status & VIRTIO_MMIO_INT_VRING)) {
- spin_lock(&vm_dev->lock);
- list_for_each_entry(info, &vm_dev->virtqueues, node) {
- (void)vring_interrupt(irq, info->vq);
- }
- spin_unlock(&vm_dev->lock);
- }
- mutex_unlock(&interrupt_lock);
-}
-
-static void vm_del_vq(struct virtqueue *vq)
-{
- struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
- struct virtio_mmio_vq_info *info = vq->priv;
- unsigned long flags;
- unsigned int index = vq->index;
-
- spin_lock_irqsave(&vm_dev->lock, flags);
- list_del(&info->node);
- spin_unlock_irqrestore(&vm_dev->lock, flags);
-
- /* Select and deactivate the queue */
- write_adapter(index, VIRTIO_MMIO_QUEUE_SEL, 4);
-
- if (vm_dev->version == 1) {
- write_adapter(0, VIRTIO_MMIO_QUEUE_PFN, 4);
- } else {
- write_adapter(0, VIRTIO_MMIO_QUEUE_READY, 4);
- WARN_ON(read_adapter(VIRTIO_MMIO_QUEUE_READY, 4));
- }
-
- vring_del_virtqueue(vq);
- kfree(info);
-}
-
-static void vm_del_vqs(struct virtio_device *vdev)
-{
- struct virtqueue *vq, *n;
-
- list_for_each_entry_safe(vq, n, &vdev->vqs, list)
- vm_del_vq(vq);
-}
-
-static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned int index,
- void (*callback)(struct virtqueue *vq),
- const char *name, bool ctx)
-{
- struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
- struct virtio_mmio_vq_info *info;
- struct virtqueue *vq;
- unsigned long flags;
- unsigned int num;
- int err;
-
- if (!name)
- return NULL;
-
- /* Select the queue we're interested in */
- write_adapter(index, VIRTIO_MMIO_QUEUE_SEL, 4);
-
- /* Queue shouldn't already be set up. */
- if (read_adapter((vm_dev->version == 1 ?
- VIRTIO_MMIO_QUEUE_PFN : VIRTIO_MMIO_QUEUE_READY), 4)) {
- err = -ENOENT;
- goto error_available;
- }
-
- /* Allocate and fill out our active queue description */
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
- err = -ENOMEM;
- goto error_kmalloc;
- }
-
- num = read_adapter(VIRTIO_MMIO_QUEUE_NUM_MAX, 4);
- if (num == 0) {
- err = -ENOENT;
- goto error_new_virtqueue;
- }
-
- /* Create the vring */
- vq = vring_create_virtqueue(index, num, VIRTIO_MMIO_VRING_ALIGN, vdev,
- true, true, ctx, vm_notify, callback, name);
- if (!vq) {
- err = -ENOMEM;
- goto error_new_virtqueue;
- }
-
-
-#if LINUX_VERSION_CODE > KERNEL_VERSION(6,0,0)
- vq->num_max = num;
-#endif
- /* Activate the queue */
- write_adapter(virtqueue_get_vring_size(vq), VIRTIO_MMIO_QUEUE_NUM, 4);
- if (vm_dev->version == 1) {
- u64 q_pfn = virtqueue_get_desc_addr(vq);
-
- q_pfn = q_pfn >> PAGE_SHIFT;
-
- /* Copy the physical address and enable the mmap */
- vq_pfn = q_pfn;
- vq_pfns[vq_index++] = q_pfn;
-
- /* Save the virtqueue in a global variable */
- global_vq = vq;
- global_vring = virtqueue_get_vring(vq);
-
- /*
- * virtio-mmio v1 uses a 32bit QUEUE PFN. If we have something
- * that doesn't fit in 32bit, fail the setup rather than
- * pretending to be successful.
- */
- if (q_pfn >> 32) {
- dev_err(&vdev->dev,
- "platform bug: legacy virtio-mmio must not "
- "be used with RAM above 0x%llxGB\n",
- 0x1ULL << (32 + PAGE_SHIFT - 30));
- err = -E2BIG;
- goto error_bad_pfn;
- }
-
- write_adapter(PAGE_SIZE, VIRTIO_MMIO_QUEUE_ALIGN, 4);
- write_adapter(q_pfn, VIRTIO_MMIO_QUEUE_PFN, 4);
- } else {
- u64 addr;
-
- addr = virtqueue_get_desc_addr(vq);
- write_adapter((u32)addr, VIRTIO_MMIO_QUEUE_DESC_LOW, 4);
- write_adapter((u32)(addr >> 32), VIRTIO_MMIO_QUEUE_DESC_HIGH, 4);
-
- addr = virtqueue_get_avail_addr(vq);
- write_adapter((u32)addr, VIRTIO_MMIO_QUEUE_AVAIL_LOW, 4);
- write_adapter((u32)(addr >> 32), VIRTIO_MMIO_QUEUE_AVAIL_HIGH, 4);
-
- addr = virtqueue_get_used_addr(vq);
- write_adapter((u32)addr, VIRTIO_MMIO_QUEUE_USED_LOW, 4);
- write_adapter((u32)(addr >> 32), VIRTIO_MMIO_QUEUE_USED_HIGH, 4);
-
- write_adapter(1, VIRTIO_MMIO_QUEUE_READY, 4);
- }
-
- vq->priv = info;
- info->vq = vq;
-
- spin_lock_irqsave(&vm_dev->lock, flags);
- list_add(&info->node, &vm_dev->virtqueues);
- spin_unlock_irqrestore(&vm_dev->lock, flags);
-
- return vq;
-
-error_bad_pfn:
- vring_del_virtqueue(vq);
-error_new_virtqueue:
- if (vm_dev->version == 1) {
- write_adapter(0, VIRTIO_MMIO_QUEUE_PFN, 4);
- } else {
- write_adapter(0, VIRTIO_MMIO_QUEUE_READY, 4);
- WARN_ON(read_adapter(VIRTIO_MMIO_QUEUE_READY, 4));
- }
- kfree(info);
-error_kmalloc:
-error_available:
- return ERR_PTR(err);
-}
-
-static int vm_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
- struct virtqueue *vqs[],
- vq_callback_t *callbacks[],
- const char * const names[],
- const bool *ctx,
- struct irq_affinity *desc)
-{
- int i, queue_idx = 0;
-
- for (i = 0; i < nvqs; ++i) {
- if (!names[i]) {
- vqs[i] = NULL;
- continue;
- }
-
- vqs[i] = vm_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
- ctx ? ctx[i] : false);
- if (IS_ERR(vqs[i])) {
- vm_del_vqs(vdev);
- return PTR_ERR(vqs[i]);
- }
- }
-
- return 0;
-}
-
-static const char *vm_bus_name(struct virtio_device *vdev)
-{
- struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
-
- return vm_dev->pdev->name;
-}
-
-static bool vm_get_shm_region(struct virtio_device *vdev,
- struct virtio_shm_region *region, u8 id)
-{
- u64 len, addr;
-
- /* Select the region we're interested in */
- write_adapter(id, VIRTIO_MMIO_SHM_SEL, 4);
-
- /* Read the region size */
- len = (u64) read_adapter(VIRTIO_MMIO_SHM_LEN_LOW, 4);
- len |= (u64) read_adapter(VIRTIO_MMIO_SHM_LEN_HIGH, 4) << 32;
-
- region->len = len;
-
- /* Check if region length is -1. If that's the case, the shared memory
- * region does not exist and there is no need to proceed further.
- */
- if (len == ~(u64)0)
- return false;
-
- /* Read the region base address */
- addr = (u64) read_adapter(VIRTIO_MMIO_SHM_BASE_LOW, 4);
- addr |= (u64) read_adapter(VIRTIO_MMIO_SHM_BASE_HIGH, 4) << 32;
-
- region->addr = addr;
-
- return true;
-}
-
-static const struct virtio_config_ops virtio_mmio_config_ops = {
- .get = vm_get,
- .set = vm_set,
- .generation = vm_generation,
- .get_status = vm_get_status,
- .set_status = vm_set_status,
- .reset = vm_reset,
- .find_vqs = vm_find_vqs,
- .del_vqs = vm_del_vqs,
- .get_features = vm_get_features,
- .finalize_features = vm_finalize_features,
- .bus_name = vm_bus_name,
- .get_shm_region = vm_get_shm_region,
-};
-
-static void virtio_mmio_release_dev(struct device *_d)
-{
- struct virtio_device *vdev = container_of(_d, struct virtio_device, dev);
- struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
- struct platform_device *pdev = vm_dev->pdev;
-
- devm_kfree(&pdev->dev, vm_dev);
-}
-
-static int virtio_mmio_probe(struct platform_device *pdev)
-{
- struct virtio_mmio_device *vm_dev;
- unsigned long magic;
- int rc;
-
- vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL);
- if (!vm_dev)
- return -ENOMEM;
-
- /* Save the device pointer globally */
- vm_dev_irq = vm_dev;
-
- vm_dev->vdev.dev.parent = &pdev->dev;
- vm_dev->vdev.dev.release = virtio_mmio_release_dev;
- vm_dev->vdev.config = &virtio_mmio_config_ops;
- vm_dev->pdev = pdev;
- INIT_LIST_HEAD(&vm_dev->virtqueues);
- spin_lock_init(&vm_dev->lock);
-
- /* Check magic value */
- magic = read_adapter(VIRTIO_MMIO_MAGIC_VALUE, 4);
-
- if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) {
- dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic);
- return -ENODEV;
- }
-
- /* Check device version */
- vm_dev->version = read_adapter(VIRTIO_MMIO_VERSION, 4);
-
- if (vm_dev->version < 1 || vm_dev->version > 2) {
- dev_err(&pdev->dev, "Version %ld not supported!\n",
- vm_dev->version);
- return -ENXIO;
- }
-
- vm_dev->vdev.id.device = read_adapter(VIRTIO_MMIO_DEVICE_ID, 4);
-
- if (vm_dev->vdev.id.device == 0) {
- /*
- * virtio-mmio device with an ID 0 is a (dummy) placeholder
- * with no function. End probing now with no error reported.
- */
- return -ENODEV;
- }
-
- vm_dev->vdev.id.vendor = read_adapter(VIRTIO_MMIO_VENDOR_ID, 4);
-
- if (vm_dev->version == 1) {
- write_adapter(PAGE_SIZE, VIRTIO_MMIO_GUEST_PAGE_SIZE, 4);
-
- rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
- /*
- * In the legacy case, ensure our coherently-allocated virtio
- * ring will be at an address expressable as a 32-bit PFN.
- */
- if (!rc)
- dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32 + PAGE_SHIFT));
- } else {
- rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- }
- if (rc)
- rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (rc)
- dev_warn(&pdev->dev, "Failed to enable 64-bit or 32-bit DMA."
- "Trying to continue, but this might not work.\n");
-
- platform_set_drvdata(pdev, vm_dev);
-
- rc = register_virtio_device(&vm_dev->vdev);
-
- if (rc)
- put_device(&vm_dev->vdev.dev);
-
- return rc;
-}
-
-static int virtio_mmio_remove(struct platform_device *pdev)
-{
- struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev);
-
- unregister_virtio_device(&vm_dev->vdev);
- DBG("unregister_virtio_device!\n");
- return 0;
-}
-
-/* Not need of DTS and ACPI */
-static struct platform_driver virtio_mmio_driver = {
- .probe = virtio_mmio_probe,
- .remove = virtio_mmio_remove,
- .driver = {
- .name = "loopback-transport",
- },
-};
-
-void pf_mmap_close(struct vm_area_struct *vma)
-{
- DBG("unmap\t-> vma->vm_start: 0x%lx\n", vma->vm_start);
- DBG("unmap\t-> size: %lu\n", vma->vm_end - vma->vm_start);
- share_mmap_rem(vma, share_mmap_list);
-}
-
-vm_fault_t pf_mmap_fault(struct vm_fault *vmf)
-{
- uint64_t corrected_pfn;
- pfn_t corr_pfn_struct;
- struct page *page;
- int ret = 0;
-
- DBG("----- Page fault: %lld -----\n", sum_pgfaults++);
-
- /* Find the corrected pfn */
- corrected_pfn = share_mmap_exist_vma_return_correct_pfn(vmf->address, share_mmap_list);
- corr_pfn_struct.val = corrected_pfn;
-
- /* Some debug prints */
- DBG("vma->vm_start: 0x%lx\n", vmf->vma->vm_start);
- DBG("vma->vm_pgoff: 0x%lx\n", vmf->vma->vm_pgoff);
- DBG("vmf->address: 0x%lx\n", vmf->address);
- DBG("corrected_pfn: 0x%llx\n", corrected_pfn);
- DBG("pfn_valid(corrected_pfn): 0x%x\n", pfn_valid(corrected_pfn));
-
- BUG_ON(!pfn_valid(corrected_pfn));
-
- /* After finding the page, correct the vmf->page */
- page = pfn_to_page(corrected_pfn);
- BUG_ON(!virt_addr_valid(page_address(page)));
-
- /* Insert the correct page */
- ret = vmf_insert_pfn(vmf->vma, vmf->address, corrected_pfn);
- DBG("vmf_insert_pfn -> ret: %d\n", ret);
-
- return ret;
-}
-
-const struct vm_operations_struct pf_mmap_ops = {
- .close = pf_mmap_close,
- .fault = pf_mmap_fault,
-};
-
-int pf_mmap_vm_page(struct file *filp, struct vm_area_struct *vma)
-{
- uint64_t size = (unsigned long)(vma->vm_end - vma->vm_start);
- uint64_t pfn = ((cur_ram_idx++) * 0x40000);
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(6,3,0)
- vma->vm_flags |= VM_PFNMAP;
-#else
- vm_flags_set(vma, VM_PFNMAP);
-#endif
- add_share_mmap(filp, pfn, vma->vm_start, size, share_mmap_list, &mmap_index);
- return 0;
-}
-
-int op_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- int ret = 0;
- uint64_t size = (unsigned long)(vma->vm_end - vma->vm_start);
-
- DBG("op_mmap -> vma->vm_pgoff: 0x%lx", vma->vm_pgoff);
-
- if (share_communication_struct) {
- DBG("MMAP communication struct\n");
- ret = mmap_communication_shared_space(filp, vma, share_mmap_list, &mmap_index);
- share_communication_struct = false;
- goto out;
- }
-
- if (size > PAGE_SIZE * 100) {
- ret = pf_mmap_vm_page(filp, vma);
- goto out;
- }
-
- ret = mmap_mix(filp, vma, share_mmap_list, &mmap_index, vq_pfn);
-
-out:
- vma->vm_ops = &pf_mmap_ops;
- return ret;
-}
-
-static uint64_t read_adapter(uint64_t fn_id, uint64_t size)
-{
- uint64_t result;
-
- mutex_lock(&read_write_lock);
-
- /*
- * By enabling the following line all
- * read messages will be printed:
- *
- * print_neg_flag(fn_id, 1);
- */
- print_neg_flag(fn_id, 1);
-
- ((virtio_neg_t *)(info->data))->notification = fn_id;
- ((virtio_neg_t *)(info->data))->data = 0;
- ((virtio_neg_t *)(info->data))->size = size;
- ((virtio_neg_t *)(info->data))->read = true;
-
- atomic_set(&((virtio_neg_t *)(info->data))->done, 0);
-
- eventfd_signal(efd_ctx, 1);
-
- /*
- * There is a chance virtio-loopback adapter to call "wake_up"
- * before the current thread sleep. This is the reason that
- * "wait_event_timeout" is used instead of "wait_event". In this
- * way, virtio-loopback driver will wake up even if has missed the
- * "wake_up" kick, check the updated "done" value and return.
- */
-
- while (valid_eventfd && atomic_read(&((virtio_neg_t *)(info->data))->done) != 1)
- wait_event_timeout(wq, atomic_read(&((virtio_neg_t *)(info->data))->done) == 1, 1 * HZ);
-
- result = ((virtio_neg_t *)(info->data))->data;
-
- mutex_unlock(&read_write_lock);
-
- return result;
-}
-
-static void write_adapter(uint64_t data, uint64_t fn_id, uint64_t size)
-{
-
- mutex_lock(&read_write_lock);
-
- /*
- * By enabling the following line all
- * write messages will be printed:
- *
- * print_neg_flag(fn_id, 1);
- */
- print_neg_flag(fn_id, 0);
-
- ((virtio_neg_t *)(info->data))->notification = fn_id;
- ((virtio_neg_t *)(info->data))->data = data;
- ((virtio_neg_t *)(info->data))->size = size;
- ((virtio_neg_t *)(info->data))->read = false;
-
- atomic_set(&((virtio_neg_t *)(info->data))->done, 0);
-
- eventfd_signal(efd_ctx, 1);
-
- /*
- * There is a chance virtio-loopback adapter to call "wake_up"
- * before the current thread sleep. This is the reason that
- * "wait_event_timeout" is used instead of "wait_event". In this
- * way, virtio-loopback driver will wake up even if has missed the
- * "wake_up" kick, check the updated "done" value and return.
- */
- while (valid_eventfd && atomic_read(&((virtio_neg_t *)(info->data))->done) != 1)
- wait_event_timeout(wq, atomic_read(&((virtio_neg_t *)(info->data))->done) == 1, 1 * HZ);
-
- mutex_unlock(&read_write_lock);
-}
-
-/* Defined for future work */
-static ssize_t loopback_write(struct file *file,
- const char __user *user_buffer,
- size_t size,
- loff_t *offset)
-{
- ssize_t len = sizeof(int);
-
- DBG("loopback write function is called\n");
- if (len <= 0)
- return 0;
-
- return len;
-}
-
-/* Defined for future work */
-static ssize_t loopback_read(struct file *file,
- char __user *user_buffer,
- size_t size, loff_t *offset)
-{
- DBG("loopback read function is called\n");
- return 0;
-}
-
-loff_t loopback_seek(struct file *file, loff_t offset, int whence)
-{
- loff_t new_pos;
-
- DBG("loopback seek function!\n");
- switch (whence) {
- case SEEK_SET:
- new_pos = offset;
- break;
- case SEEK_CUR:
- new_pos = file->f_pos + offset;
- break;
- case SEEK_END:
- new_pos = file->f_inode->i_size;
- break;
- default:
- return -EINVAL;
- }
-
- if (new_pos < 0 || new_pos > file->f_inode->i_size)
- return -EINVAL;
-
- return new_pos;
-}
-
-int start_loopback(void *data)
-{
- (void)data;
- /* Register mmio_trasmport */
- (void)platform_driver_register(&virtio_mmio_driver);
- return 0;
-}
-
-static long loopback_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- efd_data_t efd_data;
- struct task_struct *userspace_task;
- struct file *efd_file;
- int irq;
- uint32_t queue_sel;
-
- switch (cmd) {
- case EFD_INIT:
- if (copy_from_user(&efd_data, (efd_data_t *) arg,
- sizeof(efd_data_t)))
- return -EFAULT;
-
- userspace_task = pid_task(find_vpid(efd_data.pid), PIDTYPE_PID);
-
- rcu_read_lock();
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(5,11,220)
- efd_file = fcheck_files(userspace_task->files, efd_data.efd[0]);
-#else
- efd_file = files_lookup_fd_rcu(userspace_task->files, efd_data.efd[0]);
-#endif
-
- rcu_read_unlock();
-
- efd_ctx = eventfd_ctx_fileget(efd_file);
- if (!efd_ctx)
- return -1;
-
- break;
- case WAKEUP:
- atomic_set(&((virtio_neg_t *)(info->data))->done, 1);
- wake_up(&wq);
- break;
- case START_LOOPBACK:
- start_notification_thread = kthread_run(start_notification, NULL, "start_notification");
- start_loopback_thread = kthread_run(start_loopback, NULL, "start_loopback");
- break;
- case IRQ:
- if (copy_from_user(&irq, (int *) arg, sizeof(int)))
- return -EFAULT;
- DBG("\nIRQ\n");
- /*
- * Both of the interrupt ways work but a) is more stable
- * and b) has better performance:
- * a) vm_interrupt(NULL);
- * b) queue_work(interrupt_workqueue, &async_interrupt);
- */
- vm_interrupt(NULL);
- break;
- case SHARE_VQS:
- if (copy_from_user(&queue_sel, (uint32_t *) arg, sizeof(uint32_t)))
- return -EFAULT;
- DBG("\n\nSHARE_VQS: %u\n\n", queue_sel);
- vq_pfn = vq_pfns[queue_sel];
- DBG("Selected pfn is: 0x%llx", vq_pfn);
- break;
- case SHARE_COM_STRUCT:
- share_communication_struct = true;
- break;
- default:
- DBG("loopback ioctl: default, %u\n", cmd);
- return -ENOTTY;
- }
-
- return 0;
-}
-
-/*
- * The current implementation of the driver supports
- * exclusive access to one user-space thread. Multi-device
- * support will be added in future implementation.
- */
-static int loopback_open(struct inode *inode, struct file *file)
-{
- uint32_t val_1gb = 1024 * 1024 * 1024; // 1GB
- virtio_neg_t device_neg = {.done = ATOMIC_INIT(0)};
-
- /* Update the global variable, the driver is in use */
- if (test_and_set_bit(IN_USE_BIT, &loopback_flags)) {
- DBG("Driver is busy\n");
- return -EBUSY;
- }
-
- /* Set the i_size for the stat SYS_CALL*/
- file->f_inode->i_size = 10 * val_1gb;
-
- /* Init mmap funcitonality */
- info = kmalloc(sizeof(struct mmap_info), GFP_KERNEL);
- info->data = (void *)get_zeroed_page(GFP_KERNEL);
- memcpy(info->data, &device_neg, sizeof(virtio_neg_t));
-
- /* assign this info struct to the file */
- file->private_data = info;
-
- /* Init notification list */
- notify_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
- if (!notify_list) {
- printk(KERN_ERR "Failed to allocate memory for list head\n");
- return 1;
- }
- INIT_LIST_HEAD(notify_list);
-
- /* Init global variables */
- mmap_index = 0;
- sum_pgfaults = 0;
- share_communication_struct = false;
- valid_eventfd = true;
- vq_index = 0;
- cur_ram_idx = 0;
- interrupt_cnt = 0;
- notify_sent = 0;
- notify_received = 0;
-
- return 0;
-}
-
-static int loopback_release(struct inode *inode, struct file *file)
-{
- int i;
-
- DBG("Release the device\n");
-
- /*
- * This make the read/write do not wait
- * for the virtio-loopback-adapter if
- * the last has closed the fd
- */
- valid_eventfd = false;
-
- /* Unegister mmio_trasmport */
- platform_driver_unregister(&virtio_mmio_driver);
- DBG("platform_driver_unregister!\n");
-
- /* free communication structure */
- free_page((unsigned long)info->data);
- kfree(info);
- file->private_data = NULL;
- DBG("Clean private_data\n");
-
- /* Clear share_mmap_list */
- for(i = 0; i < MMAP_LIMIT; i++) {
- share_mmap_list[i].uid = 0;
- share_mmap_list[i].pfn = 0;
- share_mmap_list[i].vm_start = 0;
- share_mmap_list[i].size = 0;
- }
-
- /* Update the global variable, the driver is not in use */
- smp_mb__before_atomic();
- clear_bit(IN_USE_BIT, &loopback_flags);
- DBG("clear_bit!\n");
-
- return 0;
-}
-
-static const struct file_operations fops = {
- .owner = THIS_MODULE,
- .read = loopback_read,
- .write = loopback_write,
- .open = loopback_open,
- .unlocked_ioctl = loopback_ioctl,
- .mmap = op_mmap,
- .llseek = loopback_seek,
- .release = loopback_release
-};
-
-static int __init loopback_init(void)
-{
- int err;
- dev_t dev;
-
- err = alloc_chrdev_region(&dev, 0, MAX_DEV, "loopback");
- dev_major = MAJOR(dev);
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(6,4,0)
- loopback_class = class_create(THIS_MODULE, "loopback");
-#else
- loopback_class = class_create("loopback");
-#endif
- if (IS_ERR(loopback_class)) {
- printk(KERN_ERR "Failed to create class\n");
- return PTR_ERR(loopback_class);
- }
-
- loopback_data = (struct loopback_device_data *)kmalloc(
- sizeof(struct loopback_device_data), GFP_KERNEL);
-
- cdev_init(&loopback_data->cdev, &fops);
- loopback_data->cdev.owner = THIS_MODULE;
- cdev_add(&loopback_data->cdev, MKDEV(dev_major, 0), 1);
- device_create(loopback_class, NULL, MKDEV(dev_major, 0), NULL, "loopback");
-
- /* Init wq */
- init_waitqueue_head(&wq);
- init_waitqueue_head(&wq_notify);
-
- /* Init mutex */
- mutex_init(&read_write_lock);
- mutex_init(&interrupt_lock);
-
- /* Init spinlock */
- spin_lock_init(&notify_q_spinlock);
-
- virtio_loopback_device = platform_device_register_simple("loopback-transport", -1, NULL, 0);
- if (IS_ERR(virtio_loopback_device)) {
- err = PTR_ERR(virtio_loopback_device);
- pr_err("failed to register loopback-transport device: %d\n", err);
- return err;
- }
-
- return 0;
-}
-
-void __exit loopback_exit(void)
-{
- DBG("Exit driver!\n");
-
- /* Unegister loopback device */
- platform_device_unregister(virtio_loopback_device);
- DBG("platform_device_unregister!\n");
-
- device_destroy(loopback_class, MKDEV(dev_major, 0));
- cdev_del(&loopback_data->cdev);
- DBG("device_destroy!\n");
-
- class_destroy(loopback_class);
- DBG("class_destroy!\n");
-
- kfree(loopback_data);
- DBG("Free resources");
-}
-
-module_init(loopback_init);
-module_exit(loopback_exit);
diff --git a/loopback_driver.h b/loopback_driver.h
deleted file mode 100644
index bb713aa..0000000
--- a/loopback_driver.h
+++ /dev/null
@@ -1,516 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Based on virtio_mmio.c
- * Copyright 2011-2014, ARM Ltd.
- *
- * Copyright 2022-2024 Virtual Open Systems SAS.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-#ifndef __LOOPBACK_H__
-#define __LOOPBACK_H__
-
-#define DRIVER "LOOPBACK"
-
-#include <linux/version.h>
-
-/* max Minor devices */
-#define MAX_DEV 1
-
-/* Define mmap elements limit */
-#define MMAP_LIMIT 200
-
-#ifdef DEBUG
-#define DBG(...) pr_crit(__VA_ARGS__)
-#else
-#define DBG(...)
-#endif /* DEBUG */
-
-/*
- * The alignment to use between consumer and producer parts of vring.
- * Currently hardcoded to the page size.
- */
-#define VIRTIO_MMIO_VRING_ALIGN PAGE_SIZE
-
-#define to_virtio_mmio_device(_plat_dev) \
- container_of(_plat_dev, struct virtio_mmio_device, vdev)
-
-typedef struct virtio_device_info_struct {
- unsigned long magic;
- unsigned long version;
- unsigned long device_id;
- unsigned long vendor;
-} virtio_device_info_struct_t;
-
-typedef struct virtio_neg {
- uint64_t notification;
- uint64_t data;
- uint64_t size;
- bool read;
- atomic_t done;
-} virtio_neg_t;
-
-struct virtio_mmio_device {
- struct virtio_device vdev;
- struct platform_device *pdev;
-
- void __iomem *base;
- unsigned long version;
-
- /* A list of queues so we can dispatch IRQs */
- spinlock_t lock;
- struct list_head virtqueues;
-};
-
-struct virtio_mmio_vq_info {
- /* the actual virtqueue */
- struct virtqueue *vq;
-
- /* the list node for the virtqueues list */
- struct list_head node;
-};
-
-
-/*
- * Print the pdev:
- *
- *static void print_virtio_pdev(struct platform_device *pdev)
- *{
- * int i;
- *
- * pr_info("Print the pdev:\n");
- * pr_info("\t.name = %s\n", pdev->name);
- * pr_info("\t.id = %d\n", pdev->id);
- * pr_info("\t.num_resources = %d\n", pdev->num_resources);
- *
- * for (i=0; i < pdev->num_resources; i++) {
- * pr_info("\t.num_resource = %d\n", i);
- * pr_info("\t\t.start = 0x%llx\n", pdev->resource[i].start);
- * pr_info("\t\t.end = 0x%llx\n", pdev->resource[i].end);
- * pr_info("\t\t.flags = 0x%lx\n", pdev->resource[i].flags);
- * }
- *}
- *
- *Result:
- *
- * .name = a003e00.virtio_mmio
- * .id = -1
- * .num_resources = 2
- * .num_resource = 0
- * .start = 0xa003e00
- * .end = 0xa003fff
- * .flags = 0x200
- * .num_resource = 1
- * .start = 0x2c
- * .end = 0x2c
- * .flags = 0x401
- */
-
-/* mmap finctionality */
-#ifndef VM_RESERVED
-#define VM_RESERVED (VM_DONTEXPAND | VM_DONTDUMP)
-#endif
-
-/* Define a bit for atomic test&set */
-#define IN_USE_BIT 0
-
-struct mmap_info {
- void *data;
- int reference;
-};
-
-/* Define a structure for your notify_list */
-struct notify_data {
- uint32_t index;
- struct list_head list;
-};
-
-/* This stuct is used to share the eventfds between driver and userspace */
-typedef struct efd_data {
- int efd[2];
- int pid;
-} efd_data_t;
-
-/* mmap functionality related structures */
-struct share_mmap {
- uint64_t pfn;
- uint64_t vm_start;
- uint32_t size;
- uint32_t uid;
- struct page *page;
-};
-
-/* Mmap help funcitons */
-/*
- * This functions registers all mmap calls done by the user-space into an array
- */
-void add_share_mmap(struct file *filp, uint64_t pfn, uint64_t vm_start,
- uint64_t size, struct share_mmap *share_mmap_list, int *mmap_index)
-{
- DBG("Add new mmaping! index: %d\n", *mmap_index);
- DBG("pfn: 0x%llx", pfn);
- DBG("vm_start: 0x%llx", vm_start);
- DBG("size: 0x%llx", size);
-
- share_mmap_list[*mmap_index].pfn = pfn;
- share_mmap_list[*mmap_index].vm_start = vm_start;
- share_mmap_list[*mmap_index].size = size;
- share_mmap_list[*mmap_index].uid = task_pid_nr(current);
- (*mmap_index)++;
-}
-
-/*
- * This functions removes a record from mmap array
- */
-void share_mmap_rem(struct vm_area_struct *vma, struct share_mmap *share_mmap_list)
-{
- int i;
-
- for (i = 0; i < MMAP_LIMIT; i++) {
- if (share_mmap_list[i].vm_start == vma->vm_start) {
- DBG("share_mmap with pa: 0x%llx and size: %x is deleted from the list\n",
- share_mmap_list[i].pfn, share_mmap_list[i].size);
- share_mmap_list[i].uid = 0;
- share_mmap_list[i].pfn = 0;
- share_mmap_list[i].vm_start = 0;
- share_mmap_list[i].size = 0;
- }
- }
-}
-
-void print_mmap_idx(int i, struct share_mmap *share_mmap_list)
-{
- DBG("share_mmap_list[%d].uid %x\n", i, share_mmap_list[i].uid);
- DBG("share_mmap_list[%d].pfn %llx\n", i, share_mmap_list[i].pfn);
- DBG("share_mmap_list[%d].vm_start %llx\n", i, share_mmap_list[i].vm_start);
- DBG("share_mmap_list[%d].size %x\n", i, share_mmap_list[i].size);
-}
-
-
-void print_mmaps(struct share_mmap *share_mmap_list, int mmap_index)
-{
- int i;
- int limit = mmap_index == 0 ? MMAP_LIMIT : mmap_index;
-
- for (i = 0; i < limit; i++)
- print_mmap_idx(i, share_mmap_list);
-}
-
-/*
- * This function return the corresponding pfn of a user-space address
- * based on the mapping done during the initialization
- */
-uint64_t share_mmap_exist_vma_return_correct_addr(uint64_t pfn, struct share_mmap *share_mmap_list)
-{
- int i;
- uint64_t corrected_addr;
-
- for (i = 0; i < MMAP_LIMIT; i++) {
- if ((share_mmap_list[i].pfn <= pfn) &&
- (pfn < share_mmap_list[i].pfn + (share_mmap_list[i].size >> PAGE_SHIFT)) &&
- (share_mmap_list[i].uid == task_pid_nr(current))) {
- DBG("pfn (0x%llx) exist in: 0x%llx - 0x%llx\n", pfn, share_mmap_list[i].pfn,
- share_mmap_list[i].pfn + (share_mmap_list[i].size >> PAGE_SHIFT));
- corrected_addr = ((pfn - share_mmap_list[i].pfn) << PAGE_SHIFT) + share_mmap_list[i].vm_start;
- DBG("The return addr is: 0x%llx\n", corrected_addr);
- return corrected_addr;
- }
- }
- return 0;
-}
-/*
- * This function return the corresponding user-space address of a pfn
- * based on the mapping done during the initialization
- */
-uint64_t share_mmap_exist_vma_return_correct_pfn(uint64_t addr, struct share_mmap *share_mmap_list)
-{
- int i;
- uint64_t corrected_pfn;
-
- for (i = 0; i < MMAP_LIMIT; i++) {
- if ((share_mmap_list[i].vm_start <= addr) &&
- (addr < share_mmap_list[i].vm_start + share_mmap_list[i].size)) {
- DBG("addr (0x%llx) exist in: 0x%llx - 0x%llx\n", addr, share_mmap_list[i].vm_start,
- share_mmap_list[i].vm_start + share_mmap_list[i].size);
- DBG("((addr - share_mmap_list[i].vm_start) / PAGE_SIZE): 0x%llx\n",
- ((addr - share_mmap_list[i].vm_start) / PAGE_SIZE));
- DBG("share_mmap_list[i].pfn: 0x%llx\n", share_mmap_list[i].pfn);
- corrected_pfn = ((addr - share_mmap_list[i].vm_start) / PAGE_SIZE) + share_mmap_list[i].pfn;
- return corrected_pfn;
- }
- }
- return 0;
-}
-
-/*
- * This function returns the size of memory block area referrenced by the vrings
- */
-uint64_t share_mmap_exist_vma_vring_size(uint64_t insert_pfn, struct vring *global_vring)
-{
- int i = 0;
- uint64_t next_pfn, mem_blk_size;
-
- while (((vring_desc_t)global_vring->desc[i]).addr != 0) {
-
- /* Get the current value of pfn and its size */
- next_pfn = ((vring_desc_t)global_vring->desc[i]).addr >> PAGE_SHIFT;
- mem_blk_size = ((vring_desc_t)global_vring->desc[i]).len;
-
- /* Check if the insert_pfn is found */
- if (insert_pfn == next_pfn) {
-
- DBG("Found 0x%llx into the vring\n", insert_pfn);
- /* Formalize the mem_blk_size to be multiple of PAGE_SIZE */
- mem_blk_size = mem_blk_size % PAGE_SIZE ?
- (mem_blk_size & PAGE_MASK) + PAGE_SIZE : mem_blk_size;
- DBG("The formalized size is %llu\n", mem_blk_size);
-
- return mem_blk_size;
- }
-
- /* Go to next element into the vring array */
- i++;
- }
-
- return PAGE_SIZE;
-}
-
-/*
- * This function tries to insert multiple PFNs into the user-space process.
- * The pfn of the starting page is given as an argument and the number of
- * pages to be inserted is calculated based on the memory block length found into
- * the vrings.
- */
-void vmf_insert_vring_pfns(struct vm_area_struct *vma, uint64_t vaddr,
- uint64_t insert_pfn, struct vring *global_vring)
-{
- int i, page_num, ret;
- uint64_t mem_blk_size;
-
- /* Formalize the mem_blk_size to be multiple of PAGE_SIZE */
- mem_blk_size = share_mmap_exist_vma_vring_size(insert_pfn, global_vring);
-
- page_num = mem_blk_size / PAGE_SIZE;
- DBG("page_num: %u, need to be inserted\n", page_num);
-
- for (i = 0; i < page_num; i++) {
- DBG("\tTry to insert 0x%llx pfn into vaddr: 0x%llx with size of 0x%llx\n", insert_pfn, vaddr, mem_blk_size);
- if (!pfn_valid(insert_pfn))
- break;
-
- ret = vmf_insert_pfn(vma, vaddr, insert_pfn);
- DBG("vmf_insert_pfn returns: 0x%x\n", ret);
-
- /* Go to the next page of the memory block */
- vaddr += PAGE_SIZE;
- insert_pfn++;
- }
-}
-
-int mmap_mix(struct file *filp, struct vm_area_struct *vma,
- struct share_mmap *share_mmap_list, int *mmap_index,
- uint64_t vq_pfn)
-{
- int ret = 0;
- unsigned long size = (unsigned long)(vma->vm_end - vma->vm_start);
-
- pr_crit("mmap mixx");
-
- ret = remap_pfn_range(vma, vma->vm_start, vq_pfn, size, vma->vm_page_prot);
- if (ret != 0) {
- DBG("Mmap error\n");
- print_mmaps(share_mmap_list, *mmap_index);
- goto out;
- }
-
- add_share_mmap(filp, vq_pfn, vma->vm_start, size, share_mmap_list, mmap_index);
-
-out:
- return ret;
-}
-
-/* This funciton shares the communication struct with the userspace */
-int mmap_communication_shared_space(struct file *filp, struct vm_area_struct *vma,
- struct share_mmap *share_mmap_list, int *mmap_index)
-{
- unsigned long size = (unsigned long)(vma->vm_end - vma->vm_start);
- struct mmap_info *com_mmap_virt = ((struct mmap_info *)(filp->private_data))->data;
- uint64_t com_mmap_pfn = ((uint64_t)virt_to_phys(com_mmap_virt)) >> PAGE_SHIFT;
- int ret;
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(5,16,0)
- vma->vm_flags |= VM_RESERVED;
-#else
- vm_flags_set(vma, VM_RESERVED);
-#endif
-
- ret = remap_pfn_range(vma, vma->vm_start, com_mmap_pfn, size, vma->vm_page_prot);
-
- if (ret != 0) {
- DBG("Error to mmap communication shared space\n");
- goto out;
- }
-
- add_share_mmap(filp, com_mmap_pfn, vma->vm_start, size, share_mmap_list, mmap_index);
-
-out:
- return ret;
-}
-
-/* A debug log function to help track the execution */
-void print_neg_flag(uint64_t neg_flag, bool read)
-{
- if (read)
- DBG("Read:\n");
- else
- DBG("Write:\n");
-
- switch (neg_flag) {
- case VIRTIO_MMIO_MAGIC_VALUE: //0x000
- DBG("\tVIRTIO_MMIO_MAGIC_VALUE\n");
- break;
- case VIRTIO_MMIO_VERSION: //0x004
- DBG("\tVIRTIO_MMIO_VERSION\n");
- break;
- case VIRTIO_MMIO_DEVICE_ID: //0x008
- DBG("\tVIRTIO_MMIO_DEVICE_ID\n");
- break;
- case VIRTIO_MMIO_VENDOR_ID: //0x00c
- DBG("\tVIRTIO_MMIO_VENDOR_ID\n");
- break;
- case VIRTIO_MMIO_DEVICE_FEATURES: //0x010
- DBG("\tVIRTIO_MMIO_DEVICE_FEATURES\n");
- break;
- case VIRTIO_MMIO_DEVICE_FEATURES_SEL: //0x014
- DBG("\tVIRTIO_MMIO_DEVICE_FEATURES_SEL\n");
- break;
- case VIRTIO_MMIO_DRIVER_FEATURES: //0x020
- DBG("\tVIRTIO_MMIO_DRIVER_FEATURES\n");
- break;
- case VIRTIO_MMIO_DRIVER_FEATURES_SEL: //0x024
- DBG("\tVIRTIO_MMIO_DRIVER_FEATURES_SEL\n");
- break;
- case VIRTIO_MMIO_GUEST_PAGE_SIZE: //0x028
- DBG("\tVIRTIO_MMIO_GUEST_PAGE_SIZE\n");
- break;
- case VIRTIO_MMIO_QUEUE_SEL: //0x030
- DBG("\tVIRTIO_MMIO_QUEUE_SEL\n");
- break;
- case VIRTIO_MMIO_QUEUE_NUM_MAX: //0x034
- DBG("\tVIRTIO_MMIO_QUEUE_NUM_MAX\n");
- break;
- case VIRTIO_MMIO_QUEUE_NUM: //0x038
- DBG("\tVIRTIO_MMIO_QUEUE_NUM\n");
- break;
- case VIRTIO_MMIO_QUEUE_ALIGN: //0x03c
- DBG("\tVIRTIO_MMIO_QUEUE_ALIGN\n");
- break;
- case VIRTIO_MMIO_QUEUE_PFN: //0x040
- DBG("\tVIRTIO_MMIO_QUEUE_PFN\n");
- break;
- case VIRTIO_MMIO_QUEUE_READY: //0x044
- DBG("\tVIRTIO_MMIO_QUEUE_READY\n");
- break;
- case VIRTIO_MMIO_QUEUE_NOTIFY: //0x050
- DBG("\tVIRTIO_MMIO_QUEUE_NOTIFY\n");
- break;
- case VIRTIO_MMIO_INTERRUPT_STATUS: //0x060
- DBG("\tVIRTIO_MMIO_INTERRUPT_STATUS\n");
- break;
- case VIRTIO_MMIO_INTERRUPT_ACK: //0x064
- DBG("\tVIRTIO_MMIO_INTERRUPT_ACK\n");
- break;
- case VIRTIO_MMIO_STATUS: //0x070
- DBG("\tVIRTIO_MMIO_STATUS\n");
- break;
- case VIRTIO_MMIO_QUEUE_DESC_LOW: //0x080
- DBG("\tVIRTIO_MMIO_QUEUE_DESC_LOW\n");
- break;
- case VIRTIO_MMIO_QUEUE_DESC_HIGH: //0x084
- DBG("\tVIRTIO_MMIO_QUEUE_DESC_HIGH\n");
- break;
- case VIRTIO_MMIO_QUEUE_AVAIL_LOW: //0x090
- DBG("\tVIRTIO_MMIO_QUEUE_AVAIL_LOW\n");
- break;
- case VIRTIO_MMIO_QUEUE_AVAIL_HIGH: //0x094
- DBG("\tVIRTIO_MMIO_QUEUE_AVAIL_HIGH\n");
- break;
- case VIRTIO_MMIO_QUEUE_USED_LOW: //0x0a0
- DBG("\tVIRTIO_MMIO_QUEUE_USED_LOW\n");
- break;
- case VIRTIO_MMIO_QUEUE_USED_HIGH: //0x0a4
- DBG("\tVIRTIO_MMIO_QUEUE_USED_HIGH\n");
- break;
- case VIRTIO_MMIO_SHM_SEL: //0x0ac
- DBG("\tVIRTIO_MMIO_SHM_SEL\n");
- break;
- case VIRTIO_MMIO_SHM_LEN_LOW: //0x0b0
- DBG("\tVIRTIO_MMIO_SHM_LEN_LOW\n");
- break;
- case VIRTIO_MMIO_SHM_LEN_HIGH: //0x0b4
- DBG("\tVIRTIO_MMIO_SHM_LEN_HIGH\n");
- break;
- case VIRTIO_MMIO_SHM_BASE_LOW: //0x0b8
- DBG("\tVIRTIO_MMIO_SHM_BASE_LOW\n");
- break;
- case VIRTIO_MMIO_SHM_BASE_HIGH: //0x0bc
- DBG("\tVIRTIO_MMIO_SHM_BASE_HIGH\n");
- break;
- case VIRTIO_MMIO_CONFIG_GENERATION: //0x0fc
- DBG("\tVIRTIO_MMIO_CONFIG_GENERATION\n");
- break;
- default:
- if (neg_flag >= VIRTIO_MMIO_CONFIG)
- DBG("\tVIRTIO_MMIO_CONFIG\n");
- else
- DBG("\tNegotiation flag Unknown: %lld\n", neg_flag);
- return;
- }
-}
-
-void print_data(const void *buf, size_t size)
-{
- int offset = 10;
- int i, j;
-
- DBG("Print data from linux virtio-rng side:\n");
-
- printk(KERN_CRIT "");
-
- for (i = 0; i < size; i += offset) {
- printk(KERN_CONT "\t\t");
-
- for (j = i; (j < i + offset) && (j < size); j++)
- printk(KERN_CONT "%d, ", *((uint8_t *)(buf + j)));
-
- printk(KERN_CRIT "");
- }
-}
-
-/* IOCTL defines */
-#define EFD_INIT _IOC(_IOC_WRITE, 'k', 1, sizeof(efd_data))
-#define WAKEUP _IOC(_IOC_WRITE, 'k', 2, 0)
-#define START_LOOPBACK _IOC(_IOC_WRITE, 'k', 3, sizeof(virtio_device_info_struct_t))
-#define IRQ _IOC(_IOC_WRITE, 'k', 4, sizeof(int))
-#define SHARE_VQS _IOC(_IOC_WRITE, 'k', 5, sizeof(uint32_t))
-#define SHARE_BUF _IOC(_IOC_WRITE, 'k', 6, sizeof(uint64_t))
-#define SHARE_COM_STRUCT _IOC(_IOC_WRITE, 'k', 7, 0)
-
-/* device data holder, this structure may be extended to hold additional data */
-struct loopback_device_data {
- struct cdev cdev;
-};
-
-#endif /* __VIRTUALNET_H__ */
diff --git a/virtio_loopback_device.c b/virtio_loopback_device.c
new file mode 100644
index 0000000..0604afd
--- /dev/null
+++ b/virtio_loopback_device.c
@@ -0,0 +1,1471 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Virtio loopback transport driver
+ *
+ * Based on virtio_mmio.c
+ * Copyright 2011-2014, ARM Ltd.
+ *
+ * Copyright 2022-2024 Virtual Open Systems SAS
+ *
+ * Authors:
+ * Timos Ampelikiotis <t.ampelikiotis@virtualopensystems.com>
+ * Anna Panagopoulou <anna@virtualopensystems.com>
+ * Alvise Rigo <a.rigo@virtualopensystems.com>
+ *
+ * This module allows virtio devices to be used in a non-virtualized
+ * environment, coupled with vhost-user device (user-space drivers).
+ *
+ * It is set as a transport driver by the virtio-loopback device
+ * driver for a group of virtio drivers and reroutes all read/write
+ * operations to the userspace. In user-space, virtio-loopback adapter
+ * (the user-space component of the design) handles the read/write ops
+ * translates them into the corresponding vhost-user messages and
+ * forwards them to the corresponding vhost-user device.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "virtio-loopback-transport: " fmt
+
+/* Loopback header file */
+#include "virtio_loopback_driver.h"
+
+static void print_neg_flag(uint64_t neg_flag, bool read)
+{
+ if (read)
+ pr_debug("Read:\n");
+ else
+ pr_debug("Write:\n");
+
+ switch (neg_flag) {
+ case VIRTIO_MMIO_MAGIC_VALUE:
+ pr_debug("\tVIRTIO_MMIO_MAGIC_VALUE\n");
+ break;
+ case VIRTIO_MMIO_VERSION:
+ pr_debug("\tVIRTIO_MMIO_VERSION\n");
+ break;
+ case VIRTIO_MMIO_DEVICE_ID:
+ pr_debug("\tVIRTIO_MMIO_DEVICE_ID\n");
+ break;
+ case VIRTIO_MMIO_VENDOR_ID:
+ pr_debug("\tVIRTIO_MMIO_VENDOR_ID\n");
+ break;
+ case VIRTIO_MMIO_DEVICE_FEATURES:
+ pr_debug("\tVIRTIO_MMIO_DEVICE_FEATURES\n");
+ break;
+ case VIRTIO_MMIO_DEVICE_FEATURES_SEL:
+ pr_debug("\tVIRTIO_MMIO_DEVICE_FEATURES_SEL\n");
+ break;
+ case VIRTIO_MMIO_DRIVER_FEATURES:
+ pr_debug("\tVIRTIO_MMIO_DRIVER_FEATURES\n");
+ break;
+ case VIRTIO_MMIO_DRIVER_FEATURES_SEL:
+ pr_debug("\tVIRTIO_MMIO_DRIVER_FEATURES_SEL\n");
+ break;
+ case VIRTIO_MMIO_GUEST_PAGE_SIZE:
+ pr_debug("\tVIRTIO_MMIO_GUEST_PAGE_SIZE\n");
+ break;
+ case VIRTIO_MMIO_QUEUE_SEL:
+ pr_debug("\tVIRTIO_MMIO_QUEUE_SEL\n");
+ break;
+ case VIRTIO_MMIO_QUEUE_NUM_MAX:
+ pr_debug("\tVIRTIO_MMIO_QUEUE_NUM_MAX\n");
+ break;
+ case VIRTIO_MMIO_QUEUE_NUM:
+ pr_debug("\tVIRTIO_MMIO_QUEUE_NUM\n");
+ break;
+ case VIRTIO_MMIO_QUEUE_ALIGN:
+ pr_debug("\tVIRTIO_MMIO_QUEUE_ALIGN\n");
+ break;
+ case VIRTIO_MMIO_QUEUE_PFN:
+ pr_debug("\tVIRTIO_MMIO_QUEUE_PFN\n");
+ break;
+ case VIRTIO_MMIO_QUEUE_READY:
+ pr_debug("\tVIRTIO_MMIO_QUEUE_READY\n");
+ break;
+ case VIRTIO_MMIO_QUEUE_NOTIFY:
+ pr_debug("\tVIRTIO_MMIO_QUEUE_NOTIFY\n");
+ break;
+ case VIRTIO_MMIO_INTERRUPT_STATUS:
+ pr_debug("\tVIRTIO_MMIO_INTERRUPT_STATUS\n");
+ break;
+ case VIRTIO_MMIO_INTERRUPT_ACK:
+ pr_debug("\tVIRTIO_MMIO_INTERRUPT_ACK\n");
+ break;
+ case VIRTIO_MMIO_STATUS:
+ pr_debug("\tVIRTIO_MMIO_STATUS\n");
+ break;
+ case VIRTIO_MMIO_QUEUE_DESC_LOW:
+ pr_debug("\tVIRTIO_MMIO_QUEUE_DESC_LOW\n");
+ break;
+ case VIRTIO_MMIO_QUEUE_DESC_HIGH:
+ pr_debug("\tVIRTIO_MMIO_QUEUE_DESC_HIGH\n");
+ break;
+ case VIRTIO_MMIO_QUEUE_AVAIL_LOW:
+ pr_debug("\tVIRTIO_MMIO_QUEUE_AVAIL_LOW\n");
+ break;
+ case VIRTIO_MMIO_QUEUE_AVAIL_HIGH:
+ pr_debug("\tVIRTIO_MMIO_QUEUE_AVAIL_HIGH\n");
+ break;
+ case VIRTIO_MMIO_QUEUE_USED_LOW:
+ pr_debug("\tVIRTIO_MMIO_QUEUE_USED_LOW\n");
+ break;
+ case VIRTIO_MMIO_QUEUE_USED_HIGH:
+ pr_debug("\tVIRTIO_MMIO_QUEUE_USED_HIGH\n");
+ break;
+ case VIRTIO_MMIO_SHM_SEL:
+ pr_debug("\tVIRTIO_MMIO_SHM_SEL\n");
+ break;
+ case VIRTIO_MMIO_SHM_LEN_LOW:
+ pr_debug("\tVIRTIO_MMIO_SHM_LEN_LOW\n");
+ break;
+ case VIRTIO_MMIO_SHM_LEN_HIGH:
+ pr_debug("\tVIRTIO_MMIO_SHM_LEN_HIGH\n");
+ break;
+ case VIRTIO_MMIO_SHM_BASE_LOW:
+ pr_debug("\tVIRTIO_MMIO_SHM_BASE_LOW\n");
+ break;
+ case VIRTIO_MMIO_SHM_BASE_HIGH:
+ pr_debug("\tVIRTIO_MMIO_SHM_BASE_HIGH\n");
+ break;
+ case VIRTIO_MMIO_CONFIG_GENERATION:
+ pr_debug("\tVIRTIO_MMIO_CONFIG_GENERATION\n");
+ break;
+ default:
+ if (neg_flag >= VIRTIO_MMIO_CONFIG)
+ pr_debug("\tVIRTIO_MMIO_CONFIG\n");
+ else
+ pr_debug("\tNegotiation flag Unknown: %lld\n",
+ neg_flag);
+ return;
+ }
+}
+
+/* function declaration */
+static uint64_t read_adapter(uint64_t fn_id, uint64_t size,
+ struct device_data *dev_data);
+static void write_adapter(uint64_t data, uint64_t fn_id, uint64_t size,
+ struct device_data *dev_data);
+
+/* Configuration interface */
+static u64 vl_get_features(struct virtio_device *vdev)
+{
+ struct virtio_loopback_device *vl_dev = to_virtio_loopback_device(vdev);
+ struct device_data *data = vl_dev->data;
+ u64 features;
+
+ /* Take feature bits 0-31 */
+ write_adapter(1, VIRTIO_MMIO_DEVICE_FEATURES_SEL, 4, data);
+ features = read_adapter(VIRTIO_MMIO_DEVICE_FEATURES, 4, data);
+ features <<= 32;
+
+ /* Take feature bits 32-63 */
+ write_adapter(0, VIRTIO_MMIO_DEVICE_FEATURES_SEL, 4, data);
+ features |= read_adapter(VIRTIO_MMIO_DEVICE_FEATURES, 4, data);
+
+ return features;
+}
+
+static int vl_finalize_features(struct virtio_device *vdev)
+{
+ struct virtio_loopback_device *vl_dev = to_virtio_loopback_device(vdev);
+ struct device_data *data = vl_dev->data;
+
+ /* Give virtio_ring a chance to accept features. */
+ vring_transport_features(vdev);
+
+ /* Make sure there are no mixed devices */
+ if (vl_dev->version == 2 &&
+ !__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
+ dev_err(&vdev->dev,
+ "New virtio-mmio devices (version 2) must provide VIRTIO_F_VERSION_1 feature!\n");
+ return -EINVAL;
+ }
+
+ write_adapter(1, VIRTIO_MMIO_DRIVER_FEATURES_SEL, 4, data);
+ write_adapter((u32)(vdev->features >> 32), VIRTIO_MMIO_DRIVER_FEATURES,
+ 4, data);
+
+ write_adapter(0, VIRTIO_MMIO_DRIVER_FEATURES_SEL, 4, data);
+ write_adapter((u32)vdev->features, VIRTIO_MMIO_DRIVER_FEATURES,
+ 4, data);
+
+ return 0;
+}
+
+static void vl_get(struct virtio_device *vdev, unsigned int offset,
+ void *buf, unsigned int len)
+{
+ struct virtio_loopback_device *vl_dev = to_virtio_loopback_device(vdev);
+ struct device_data *data = vl_dev->data;
+
+ u8 b;
+ __le16 w;
+ __le32 l;
+
+ if (vl_dev->version == 1) {
+ u8 *ptr = buf;
+ int i;
+
+ for (i = 0; i < len; i++)
+ ptr[i] = read_adapter(VIRTIO_MMIO_CONFIG + offset + i,
+ 1, data);
+ return;
+ }
+
+ switch (len) {
+ case 1:
+ b = read_adapter(VIRTIO_MMIO_CONFIG + offset, 1, data);
+ memcpy(buf, &b, sizeof(b));
+ break;
+ case 2:
+ w = cpu_to_le16(read_adapter(VIRTIO_MMIO_CONFIG + offset,
+ 2, data));
+ memcpy(buf, &w, sizeof(w));
+ break;
+ case 4:
+ l = cpu_to_le32(read_adapter(VIRTIO_MMIO_CONFIG + offset,
+ 4, data));
+ memcpy(buf, &l, sizeof(l));
+ break;
+ case 8:
+ l = cpu_to_le32(read_adapter(VIRTIO_MMIO_CONFIG + offset,
+ 4, data));
+ memcpy(buf, &l, sizeof(l));
+ l = cpu_to_le32(read_adapter(
+ VIRTIO_MMIO_CONFIG + offset + sizeof(l),
+ 4, data));
+ memcpy(buf + sizeof(l), &l, sizeof(l));
+ break;
+ default:
+ BUG();
+ }
+}
+
+static void vl_set(struct virtio_device *vdev, unsigned int offset,
+ const void *buf, unsigned int len)
+{
+ struct virtio_loopback_device *vl_dev = to_virtio_loopback_device(vdev);
+ struct device_data *data = vl_dev->data;
+
+ u8 b;
+ __le16 w;
+ __le32 l;
+
+ if (vl_dev->version == 1) {
+ const u8 *ptr = buf;
+ int i;
+
+ for (i = 0; i < len; i++)
+ write_adapter(ptr[i], VIRTIO_MMIO_CONFIG + offset + i,
+ 1, data);
+
+ return;
+ }
+
+ switch (len) {
+ case 1:
+ memcpy(&b, buf, sizeof(b));
+ write_adapter(b, VIRTIO_MMIO_CONFIG + offset, 1, data);
+ break;
+ case 2:
+ memcpy(&w, buf, sizeof(w));
+ write_adapter(le16_to_cpu(w), VIRTIO_MMIO_CONFIG + offset,
+ 2, data);
+ break;
+ case 4:
+ memcpy(&l, buf, sizeof(l));
+ write_adapter(le32_to_cpu(l), VIRTIO_MMIO_CONFIG + offset,
+ 4, data);
+ break;
+ case 8:
+ memcpy(&l, buf, sizeof(l));
+ write_adapter(le32_to_cpu(l), VIRTIO_MMIO_CONFIG + offset,
+ 4, data);
+ memcpy(&l, buf + sizeof(l), sizeof(l));
+ write_adapter(le32_to_cpu(l),
+ VIRTIO_MMIO_CONFIG + offset + sizeof(l),
+ 4, data);
+ break;
+ default:
+ BUG();
+ }
+}
+
+static u32 vl_generation(struct virtio_device *vdev)
+{
+ struct virtio_loopback_device *vl_dev = to_virtio_loopback_device(vdev);
+ struct device_data *data = vl_dev->data;
+
+ if (vl_dev->version == 1)
+ return 0;
+ else
+ return read_adapter(VIRTIO_MMIO_CONFIG_GENERATION, 4, data);
+}
+
+static u8 vl_get_status(struct virtio_device *vdev)
+{
+ struct virtio_loopback_device *vl_dev = to_virtio_loopback_device(vdev);
+ struct device_data *data = vl_dev->data;
+
+ return read_adapter(VIRTIO_MMIO_STATUS, 4, data) & 0xff;
+}
+
+static void vl_set_status(struct virtio_device *vdev, u8 status)
+{
+ struct virtio_loopback_device *vl_dev = to_virtio_loopback_device(vdev);
+ struct device_data *data = vl_dev->data;
+
+ write_adapter(status, VIRTIO_MMIO_STATUS, 4, data);
+}
+
+static void vl_reset(struct virtio_device *vdev)
+{
+ struct virtio_loopback_device *vl_dev = to_virtio_loopback_device(vdev);
+ struct device_data *data = vl_dev->data;
+
+ /* 0 status means a reset. */
+ write_adapter(0, VIRTIO_MMIO_STATUS, 4, data);
+}
+
+/* Notify work handling function */
+static void notify_work_handler(struct work_struct *work)
+{
+ struct virtio_loopback_device *vl_dev =
+ container_of(work, struct virtio_loopback_device, notify_work);
+ struct device_data *dev_data = vl_dev->data;
+ struct notify_data *entry, *tmp;
+ uint32_t index;
+
+ spin_lock(&vl_dev->notify_q_lock);
+ list_for_each_entry_safe(entry, tmp, &vl_dev->notify_list, list) {
+ index = entry->index;
+ list_del(&entry->list);
+ kfree(entry);
+ /* Proceed in dispatching the notification to the adapter */
+ spin_unlock(&vl_dev->notify_q_lock);
+ write_adapter(index, VIRTIO_MMIO_QUEUE_NOTIFY, 4, dev_data);
+ spin_lock(&vl_dev->notify_q_lock);
+ }
+ spin_unlock(&vl_dev->notify_q_lock);
+}
+
+static bool trigger_notification(struct virtqueue *vq)
+{
+ struct virtio_loopback_device *vl_dev =
+ to_virtio_loopback_device(vq->vdev);
+ struct eventfd_ctx **vq_notifiers = vl_dev->data->vq_data.vq_notifiers;
+ bool vq_notifiers_enabled = vl_dev->data->vq_data.vq_notifiers_enabled;
+ int ret;
+
+ if (vq_notifiers_enabled && (vq_notifiers[vq->index])) {
+ /* Notify directly vhost-user-device bypassing the adapter */
+#if LINUX_VERSION_CODE > KERNEL_VERSION(6, 7, 12)
+ eventfd_signal(vq_notifiers[vq->index]);
+#else
+ eventfd_signal(vq_notifiers[vq->index], 1);
+#endif
+ } else {
+ /* Schedule the element */
+ while (ret) {
+ /*
+ * Force scheduling if queue_work fails and
+ * list is not empty
+ */
+ ret = !queue_work(vl_dev->notify_workqueue,
+ &vl_dev->notify_work);
+ spin_lock(&vl_dev->notify_q_lock);
+ ret &= !list_empty(&vl_dev->notify_list);
+ spin_unlock(&vl_dev->notify_q_lock);
+ }
+ }
+
+ return true;
+}
+
+/* Notify work handling function */
+static void trigger_dev_notif(struct virtio_loopback_device *vl_dev)
+{
+ struct notify_data *entry, *tmp;
+ uint32_t index;
+ struct virtio_loopback_vq_info *info;
+
+ if (atomic_read(&vl_dev->data->avail_notifs) == 0)
+ return;
+
+ spin_lock(&vl_dev->notify_q_lock);
+ list_for_each_entry_safe(entry, tmp, &vl_dev->notify_list, list) {
+ index = entry->index;
+ list_del(&entry->list);
+ kfree(entry);
+ spin_unlock(&vl_dev->notify_q_lock);
+
+ /* Decrease atomically the notification counters */
+ atomic_dec(&vl_dev->data->avail_notifs);
+ atomic_dec(&loopback_devices.pending_notifs);
+
+ /* Find which is the corresponing vq and trigger the notification */
+ list_for_each_entry(info, &vl_dev->virtqueues, node) {
+ if (info->vq->index == index) {
+ (void)trigger_notification(info->vq);
+ /* Decrease the notification handlers */
+ return;
+ }
+ }
+ spin_lock(&vl_dev->notify_q_lock);
+ }
+ spin_unlock(&vl_dev->notify_q_lock);
+}
+
+static bool available_notifications(void)
+{
+ return atomic_read(&loopback_devices.pending_notifs) > 0;
+}
+
+static void set_dev_credits(struct virtio_loopback_device *vl_dev, int64_t remaining_credits)
+{
+ if (remaining_credits > 0) {
+ if (remaining_credits > vl_dev->data->vdev_data->init_notif_credits)
+ atomic_set(&vl_dev->data->notif_credits, vl_dev->data->vdev_data->init_notif_credits);
+ else
+ atomic_set(&vl_dev->data->notif_credits, (uint32_t)remaining_credits);
+ } else {
+ atomic_set(&vl_dev->data->notif_credits, 0);
+ }
+}
+
+static void reset_credits(struct virtio_loopback_device *vl_dev)
+{
+ /* Update timestamp & available credits */
+ vl_dev->data->served_timestamp = ktime_get();
+ set_dev_credits(vl_dev, vl_dev->data->vdev_data->init_notif_credits);
+}
+
+static uint32_t read_dev_credits(struct virtio_loopback_device *vl_dev)
+{
+ return atomic_read(&vl_dev->data->notif_credits);
+}
+
+static uint32_t read_dev_notifs(struct virtio_loopback_device *vl_dev)
+{
+ return atomic_read(&vl_dev->data->avail_notifs);
+}
+
+static struct virtio_loopback_device_node *head_elem(void)
+{
+ struct virtio_loopback_device_node *device;
+
+ spin_lock(&loopback_devices.running_lock);
+ device = list_first_entry_or_null(
+ &loopback_devices.virtio_devices_list,
+ struct virtio_loopback_device_node,
+ node);
+ spin_unlock(&loopback_devices.running_lock);
+ return device;
+}
+
+static struct virtio_loopback_device_node *
+ next_elem(struct virtio_loopback_device_node *device)
+{
+ int ret;
+
+ device = list_next_entry(device, node);
+
+ /* If reached the list head, wrap around to the beginning */
+ spin_lock(&loopback_devices.running_lock);
+ ret = list_entry_is_head(device, &loopback_devices.virtio_devices_list, node);
+ spin_unlock(&loopback_devices.running_lock);
+
+ if (ret)
+ device = head_elem();
+
+ return device;
+}
+
+bool add_dev_to_list(uint32_t array_dev_pos)
+{
+ struct virtio_loopback_device_node *dev_node;
+
+ /* Add this device to a global list */
+ dev_node = kmalloc(sizeof(struct virtio_loopback_device_node), GFP_ATOMIC);
+ if (!dev_node)
+ return false;
+
+ /* TODO: Check the next line */
+ dev_node->vq_index = array_dev_pos;
+ INIT_LIST_HEAD(&dev_node->node);
+ atomic_set(&dev_node->is_deleted, 0);
+
+ spin_lock(&loopback_devices.running_lock);
+ list_add_tail(&dev_node->node, &loopback_devices.virtio_devices_list);
+ spin_unlock(&loopback_devices.running_lock);
+
+ return true;
+}
+
+static bool is_dev_deleted(struct virtio_loopback_device_node *device)
+{
+ return atomic_read(&device->is_deleted) == 1;
+}
+
+static void del_dev_from_list(struct virtio_loopback_device_node *device)
+{
+ spin_lock(&loopback_devices.running_lock);
+ list_del(&device->node);
+ spin_unlock(&loopback_devices.running_lock);
+ kfree(device);
+}
+
+/*
+ * void clean_dev_notifs_inters(struct virtio_loopback_device_node *device)
+ * {
+ * struct notify_data *entry, *tmp;
+ * struct virtio_loopback_device *vl_dev = loopback_devices.devices[device->vq_index];
+ * int i, avail_inters = atomic_read(&vl_dev->data->avail_inters);
+ *
+ * spin_lock(&vl_dev->notify_q_lock);
+ * list_for_each_entry_safe(entry, tmp, &vl_dev->notify_list, list) {
+ * atomic_dec(&vl_dev->data->avail_notifs);
+ * atomic_dec(&loopback_devices.pending_notifs);
+ * }
+ * spin_unlock(&vl_dev->notify_q_lock);
+ *
+ * for (i = 0; i < avail_inters; i++) {
+ * atomic_dec(&vl_dev->data->avail_inters);
+ * atomic_dec(&loopback_devices.pending_inters);
+ * }
+ * }
+ */
+
+void note_dev_deletion(struct virtio_loopback_device *vl_dev)
+{
+ struct virtio_loopback_device_node *device, *temp = NULL;
+
+ spin_lock(&loopback_devices.running_lock);
+ list_for_each_entry(device, &loopback_devices.virtio_devices_list, node) {
+ if (vl_dev == loopback_devices.devices[device->vq_index]) {
+ temp = device;
+ break;
+ }
+ }
+ spin_unlock(&loopback_devices.running_lock);
+
+ if (temp)
+ atomic_set(&device->is_deleted, 1);
+}
+
+/*
+ * void clean_deleted_devs(void)
+ * {
+ * struct virtio_loopback_device_node *temp = NULL;
+ *
+ * spin_lock(&loopback_devices.running_lock);
+ * list_for_each_entry_safe(device, temp, &loopback_devices.virtio_devices_list, node) {
+ * if (is_dev_deleted(device)) {
+ * list_del(&device->node);
+ * kfree(device);
+ * }
+ * }
+ * spin_unlock(&loopback_devices.running_lock);
+ * }
+ */
+
+static void clean_all_devs(void)
+{
+ struct virtio_loopback_device_node *device = NULL, *temp = NULL;
+
+ spin_lock(&loopback_devices.running_lock);
+ list_for_each_entry_safe(device, temp, &loopback_devices.virtio_devices_list, node) {
+ list_del(&device->node);
+ kfree(device);
+ }
+ spin_unlock(&loopback_devices.running_lock);
+}
+
+/*
+ * static bool is_node_in_list(struct virtio_loopback_device_node *device)
+ * {
+ * struct virtio_loopback_device_node *temp;
+ * bool ret = false;
+ *
+ * rcu_read_lock();
+ * list_for_each_entry_rcu(temp, &loopback_devices.virtio_devices_list, node) {
+ * if (temp == device) {
+ * ret = true;
+ * break;
+ * }
+ * }
+ * rcu_read_unlock();
+ *
+ * return ret;
+ * }
+ */
+
+static bool available_interrupts(void)
+{
+ return atomic_read(&loopback_devices.pending_inters) > 0;
+}
+
+static uint32_t read_dev_inters(struct virtio_loopback_device *vl_dev)
+{
+ return atomic_read(&vl_dev->data->avail_inters);
+}
+
+static uint32_t highest_active_priority_notifs(void)
+{
+ struct virtio_loopback_device_node *device;
+ struct virtio_loopback_device *vl_dev;
+ uint32_t max_priority = 0;
+
+ spin_lock(&loopback_devices.running_lock);
+ list_for_each_entry(device, &loopback_devices.virtio_devices_list, node) {
+ if (is_dev_deleted(device))
+ continue;
+ vl_dev = loopback_devices.devices[device->vq_index];
+ if ((read_dev_notifs(vl_dev) > 0) || (read_dev_inters(vl_dev) > 0))
+ if (vl_dev->data->priority_group > max_priority)
+ max_priority = vl_dev->data->priority_group;
+ }
+ spin_unlock(&loopback_devices.running_lock);
+
+ return max_priority;
+}
+
+static void update_highest_active_prior_notifs(void)
+{
+ uint32_t current_highest_priority = highest_active_priority_notifs();
+
+ atomic_set(&loopback_devices.highest_active_prior_notifs, current_highest_priority);
+}
+
+static bool dev_highest_prior_notifs(struct virtio_loopback_device *vl_dev)
+{
+ return vl_dev->data->priority_group >=
+ atomic_read(&loopback_devices.highest_active_prior_notifs);
+}
+
+static uint64_t read_dev_served_timestamp(struct virtio_loopback_device *vl_dev)
+{
+ return vl_dev->data->served_timestamp;
+}
+
+static bool oldest_active_dev_in_group(struct virtio_loopback_device *curr_vl_dev)
+{
+ struct virtio_loopback_device_node *device;
+ struct virtio_loopback_device *vl_dev;
+ uint64_t oldest_active_dev_time = (uint64_t)ktime_get();
+
+ spin_lock(&loopback_devices.running_lock);
+ list_for_each_entry(device, &loopback_devices.virtio_devices_list, node) {
+ if (is_dev_deleted(device))
+ continue;
+ vl_dev = loopback_devices.devices[device->vq_index];
+ /* Iterate only on active devices */
+ if ((read_dev_notifs(vl_dev) > 0) || (read_dev_inters(vl_dev) > 0))
+ /* Iterate only on active devices the same group */
+ if ((vl_dev->data->priority_group == curr_vl_dev->data->priority_group) &&
+ (read_dev_served_timestamp(vl_dev) < oldest_active_dev_time))
+ /* Save the oldest timestamp of a device aligned with above critirias */
+ oldest_active_dev_time = read_dev_served_timestamp(vl_dev);
+ }
+ spin_unlock(&loopback_devices.running_lock);
+
+ return oldest_active_dev_time == read_dev_served_timestamp(curr_vl_dev);
+}
+
+/* the interrupt function used when receiving an IRQ */
+static bool vl_interrupt(struct virtio_loopback_device *vl_dev, int irq)
+{
+ struct virtio_loopback_vq_info *info;
+
+ spin_lock(&vl_dev->lock);
+ list_for_each_entry(info, &vl_dev->virtqueues, node) {
+ (void)vring_interrupt(irq, info->vq);
+ }
+ spin_unlock(&vl_dev->lock);
+
+ return true;
+}
+
+/*
+ * Pseudo algorith: with groups (implementation 1)
+ *
+ * For dev in dev_list
+ *
+ * if dev->priority != active_list_highest_prior or
+ * dev_idle or
+ * dev_older_in_group()
+ * go next
+ *
+ * while(time(dev_credits) {
+ * trigger_notifications
+ * }
+ *
+ * update_highest_priority()
+ *
+ */
+
+/*
+ * Pseudo algorith: with groups (implementation 2)
+ *
+ * idle_list_dev = dev_1, dev_2, ... , dev_n
+ * active_list_dev = null
+ * active_list_highest_prior = 'A'
+ *
+ * for dev in active_list_dev
+ *
+ * if dev->priority != active_list_highest_prior or
+ * dev_older_in_group()
+ * go next
+ *
+ * while(time(cred_dev))
+ * trigger_notifications
+ *
+ * remove(dev, active_list_dev)
+ * add(dev, idle_list_dev)
+ * update_highest_priority()
+ *
+ */
+
+int notif_sched_func(void *data)
+{
+ struct virtio_loopback_device *vl_dev;
+ struct virtio_loopback_device_node *device = NULL, *temp = NULL;
+ ktime_t starting_time, deadline;
+
+ /* Wait the first notification */
+ while (!available_notifications() && !kthread_should_stop()) {
+ wait_event_timeout(
+ loopback_devices.wq_notifs_inters,
+ available_notifications() || kthread_should_stop(),
+ 100 * HZ);
+ }
+
+ if (kthread_should_stop())
+ goto sched_exit;
+
+ device = head_elem();
+ if (unlikely(!device)) {
+ pr_err("Device list is empty - exit\n");
+ return 1;
+ }
+
+ while (!kthread_should_stop()) {
+ if ((available_notifications() ||
+ available_interrupts()) &&
+ !list_empty(&loopback_devices.virtio_devices_list)) {
+
+ if (is_dev_deleted(device)) {
+ temp = device;
+ device = next_elem(device);
+ del_dev_from_list(temp);
+ continue;
+ }
+
+ vl_dev = loopback_devices.devices[device->vq_index];
+
+ pr_debug("Available notifs: %u\n", atomic_read(&loopback_devices.pending_notifs));
+ pr_debug("Available inters: %u\n", atomic_read(&loopback_devices.pending_inters));
+ pr_debug("Device %lu avail credits: %u, avail notifications %u, avail_inters: %u\n",
+ vl_dev->data->vdev_data->init_notif_credits,
+ read_dev_credits(vl_dev),
+ read_dev_inters(vl_dev),
+ read_dev_notifs(vl_dev));
+
+ /*
+ * We need to go to the next device if:
+ * a) Current device does not have available notifications AND
+ * current device does not have available interrupts
+ * b) There is another pending device with higher priority
+ * c) There is another pending device in the same group
+ * which has not been served for longer time.
+ */
+
+ if (((read_dev_notifs(vl_dev) == 0) &&
+ (read_dev_inters(vl_dev) == 0)) ||
+ (!dev_highest_prior_notifs(vl_dev)) ||
+ (!oldest_active_dev_in_group(vl_dev))) {
+ device = next_elem(device);
+ continue;
+ }
+
+ pr_debug("Run Device %lu\n",
+ vl_dev->data->vdev_data->init_notif_credits);
+
+ /*
+ * Keep the active highest priority in a variable
+ * and continue triggering notications only if the
+ * devices has priority equal or bigger then the highest.
+ * This helps to give control to the device with
+ * highest priority immediatly without waiting the
+ * running device to complete it turn.
+ */
+ starting_time = ktime_get();
+ deadline = ktime_add_ms(starting_time, read_dev_credits(vl_dev));
+ while (ktime_before(starting_time, deadline) &&
+ !kthread_should_stop() &&
+ dev_highest_prior_notifs(vl_dev)) {
+ if (read_dev_notifs(vl_dev) > 0) {
+ trigger_dev_notif(vl_dev);
+ } else if (read_dev_inters(vl_dev) > 0) {
+ atomic_dec(&vl_dev->data->avail_inters);
+ atomic_dec(&loopback_devices.pending_inters);
+
+ vl_interrupt(vl_dev, 0);
+ } else {
+ /* Give some time for the current device */
+ wait_event_timeout(
+ vl_dev->wq_notifs_inters,
+ (read_dev_notifs(vl_dev) > 0) ||
+ (read_dev_inters(vl_dev) > 0) ||
+ kthread_should_stop(),
+ msecs_to_jiffies(5));
+ }
+ /* Update currnet time */
+ starting_time = ktime_get();
+ }
+
+ /*
+ * If the device has not consumed its entire time,
+ * save the remaining credits for later usage.
+ */
+ set_dev_credits(vl_dev, ktime_ms_delta(deadline, starting_time));
+ if (read_dev_credits(vl_dev) == 0)
+ reset_credits(vl_dev);
+
+ device = next_elem(device);
+ update_highest_active_prior_notifs();
+
+ } else {
+ wait_event_timeout(
+ loopback_devices.wq_notifs_inters,
+ ((available_notifications() || available_interrupts()) &&
+ !list_empty(&loopback_devices.virtio_devices_list)) ||
+ kthread_should_stop(),
+ 100 * HZ);
+ }
+ }
+
+sched_exit:
+ pr_info("Clean any remaining devices\n");
+ clean_all_devs();
+
+ pr_info("Exiting notification thread\n");
+ return 0;
+}
+
+/* The notify function used when creating a virtqueue */
+static bool vl_notify(struct virtqueue *vq)
+{
+ struct virtio_loopback_device *vl_dev =
+ to_virtio_loopback_device(vq->vdev);
+ struct notify_data *data;
+
+ pr_debug("VIRTIO_NOTIFY\n");
+
+ /* Create the new node */
+ data = kmalloc(sizeof(struct notify_data), GFP_ATOMIC);
+ if (!data)
+ return false;
+
+ data->index = vq->index;
+ INIT_LIST_HEAD(&data->list);
+
+ /* Add in the notify_list, which should be protected! */
+ spin_lock(&vl_dev->notify_q_lock);
+ list_add_tail(&data->list, &vl_dev->notify_list);
+ spin_unlock(&vl_dev->notify_q_lock);
+
+ pr_debug("Add notification for Device %lu avail credits: %u, avail notifications %u\n",
+ vl_dev->data->vdev_data->init_notif_credits,
+ read_dev_credits(vl_dev),
+ read_dev_notifs(vl_dev));
+
+ /*
+ * If device has priorities enabled, add the notification into
+ * the list and leave the notification thread to schedule it
+ * when this is appropriate.
+ */
+ if (vl_dev->data->vdev_data->priority_enabled) {
+ pr_debug("WAKEUP notification list\n");
+
+ spin_lock(&vl_dev->notify_q_lock);
+ if (vl_dev->data->priority_group >
+ atomic_read(&loopback_devices.highest_active_prior_notifs))
+ atomic_set(&loopback_devices.highest_active_prior_notifs,
+ vl_dev->data->priority_group);
+ spin_unlock(&vl_dev->notify_q_lock);
+
+ /* Update atomically the notification counters */
+ atomic_inc(&vl_dev->data->avail_notifs);
+ atomic_inc(&loopback_devices.pending_notifs);
+
+ wake_up(&vl_dev->wq_notifs_inters);
+ wake_up(&loopback_devices.wq_notifs_inters);
+
+ return true;
+ } else {
+ return trigger_notification(vq);
+ }
+}
+
+
+/* the interrupt function used when receiving an IRQ */
+bool register_interrupt(struct virtio_loopback_device *vl_dev, int irq)
+{
+ if (vl_dev->data->vdev_data->priority_enabled) {
+
+ pr_debug("Add notification for Device %lu avail credits: %u, avail inters %u\n",
+ vl_dev->data->vdev_data->init_notif_credits,
+ read_dev_credits(vl_dev),
+ read_dev_inters(vl_dev));
+
+ spin_lock(&vl_dev->notify_q_lock);
+ if (vl_dev->data->priority_group >
+ atomic_read(&loopback_devices.highest_active_prior_notifs))
+ atomic_set(&loopback_devices.highest_active_prior_notifs,
+ vl_dev->data->priority_group);
+ spin_unlock(&vl_dev->notify_q_lock);
+
+ atomic_inc(&vl_dev->data->avail_inters);
+ atomic_inc(&loopback_devices.pending_inters);
+
+ pr_debug("WAKEUP interrupt list\n");
+ wake_up(&vl_dev->wq_notifs_inters);
+ wake_up(&loopback_devices.wq_notifs_inters);
+
+ return true;
+ } else {
+ return vl_interrupt(vl_dev, irq);
+ }
+}
+
+
+static void vl_del_vq(struct virtqueue *vq)
+{
+ struct virtio_loopback_device *vl_dev =
+ to_virtio_loopback_device(vq->vdev);
+ struct device_data *data = vl_dev->data;
+
+ struct virtio_loopback_vq_info *info = vq->priv;
+ unsigned long flags;
+ unsigned int index = vq->index;
+
+ spin_lock_irqsave(&vl_dev->lock, flags);
+ list_del(&info->node);
+ spin_unlock_irqrestore(&vl_dev->lock, flags);
+
+ /* Select and deactivate the queue */
+ write_adapter(index, VIRTIO_MMIO_QUEUE_SEL, 4, data);
+
+ if (vl_dev->version == 1) {
+ write_adapter(0, VIRTIO_MMIO_QUEUE_PFN, 4, data);
+ } else {
+ write_adapter(0, VIRTIO_MMIO_QUEUE_READY, 4, data);
+ WARN_ON(read_adapter(VIRTIO_MMIO_QUEUE_READY, 4, data));
+ }
+
+ vring_del_virtqueue(vq);
+ kfree(info);
+}
+
+static void vl_del_vqs(struct virtio_device *vdev)
+{
+ struct virtqueue *vq, *n;
+
+ list_for_each_entry_safe(vq, n, &vdev->vqs, list)
+ vl_del_vq(vq);
+}
+
+static struct virtqueue *vl_setup_vq(struct virtio_device *vdev,
+ unsigned int index,
+ void (*callback)(struct virtqueue *vq),
+ const char *name, bool ctx)
+{
+ struct virtio_loopback_device *vl_dev = to_virtio_loopback_device(vdev);
+ struct device_data *data = vl_dev->data;
+ struct virtio_loopback_vq_info *info;
+ struct virtqueue *vq;
+ unsigned long flags;
+ unsigned int num;
+ int err;
+
+ if (!name)
+ return NULL;
+
+ /* Select the queue we're interested in */
+ write_adapter(index, VIRTIO_MMIO_QUEUE_SEL, 4, data);
+
+ /* Queue shouldn't already be set up. */
+ if (read_adapter((vl_dev->version == 1 ?
+ VIRTIO_MMIO_QUEUE_PFN : VIRTIO_MMIO_QUEUE_READY),
+ 4, data)) {
+ err = -ENOENT;
+ goto error_available;
+ }
+
+ /* Allocate and fill out our active queue description */
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ err = -ENOMEM;
+ goto error_kmalloc;
+ }
+
+ num = read_adapter(VIRTIO_MMIO_QUEUE_NUM_MAX, 4, data);
+ if (num == 0) {
+ err = -ENOENT;
+ goto error_new_virtqueue;
+ }
+
+ /* Create the vring */
+ vq = vring_create_virtqueue(index, num, VIRTIO_MMIO_VRING_ALIGN, vdev,
+ true, true, ctx, vl_notify, callback, name);
+ if (!vq) {
+ err = -ENOMEM;
+ goto error_new_virtqueue;
+ }
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(6, 0, 0)
+ vq->num_max = num;
+#endif
+
+ /* Activate the queue */
+ write_adapter(virtqueue_get_vring_size(vq), VIRTIO_MMIO_QUEUE_NUM, 4,
+ data);
+ if (vl_dev->version == 1) {
+ u64 q_pfn = virtqueue_get_desc_addr(vq);
+
+ q_pfn = q_pfn >> PAGE_SHIFT;
+
+ /* Copy the physical address and enable the mmap */
+ data->vq_data.vq_pfn = q_pfn;
+ data->vq_data.vq_pfns[data->vq_data.vq_index++] = q_pfn;
+
+ /*
+ * virtio-loopback v1 uses a 32bit QUEUE PFN. If we have
+ * something that doesn't fit in 32bit, fail the setup rather
+ * than pretending to be successful.
+ */
+ if (q_pfn >> 32) {
+ dev_err(&vdev->dev,
+ "platform bug: legacy virtio-loopback must not be used with RAM above 0x%llxGB\n",
+ 0x1ULL << (32 + PAGE_SHIFT - 30));
+ err = -E2BIG;
+ goto error_bad_pfn;
+ }
+
+ write_adapter(PAGE_SIZE, VIRTIO_MMIO_QUEUE_ALIGN, 4, data);
+ write_adapter(q_pfn, VIRTIO_MMIO_QUEUE_PFN, 4, data);
+ } else {
+ u64 addr;
+
+ addr = virtqueue_get_desc_addr(vq);
+ write_adapter((u32)addr, VIRTIO_MMIO_QUEUE_DESC_LOW, 4, data);
+ write_adapter((u32)(addr >> 32), VIRTIO_MMIO_QUEUE_DESC_HIGH,
+ 4, data);
+
+ addr = virtqueue_get_avail_addr(vq);
+ write_adapter((u32)addr, VIRTIO_MMIO_QUEUE_AVAIL_LOW, 4, data);
+ write_adapter((u32)(addr >> 32), VIRTIO_MMIO_QUEUE_AVAIL_HIGH,
+ 4, data);
+
+ addr = virtqueue_get_used_addr(vq);
+ write_adapter((u32)addr, VIRTIO_MMIO_QUEUE_USED_LOW, 4, data);
+ write_adapter((u32)(addr >> 32), VIRTIO_MMIO_QUEUE_USED_HIGH,
+ 4, data);
+
+ write_adapter(1, VIRTIO_MMIO_QUEUE_READY, 4, data);
+ }
+
+ vq->priv = info;
+ info->vq = vq;
+
+ spin_lock_irqsave(&vl_dev->lock, flags);
+ list_add(&info->node, &vl_dev->virtqueues);
+ spin_unlock_irqrestore(&vl_dev->lock, flags);
+
+ return vq;
+
+error_bad_pfn:
+ vring_del_virtqueue(vq);
+error_new_virtqueue:
+ if (vl_dev->version == 1) {
+ write_adapter(0, VIRTIO_MMIO_QUEUE_PFN, 4, data);
+ } else {
+ write_adapter(0, VIRTIO_MMIO_QUEUE_READY, 4, data);
+ WARN_ON(read_adapter(VIRTIO_MMIO_QUEUE_READY, 4, data));
+ }
+ kfree(info);
+error_kmalloc:
+error_available:
+ return ERR_PTR(err);
+}
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(6, 10, 8)
+static int vl_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
+ struct virtqueue *vqs[],
+ vq_callback_t *callbacks[],
+ const char * const names[],
+ const bool *ctx,
+ struct irq_affinity *desc)
+{
+ int i, queue_idx = 0;
+
+ for (i = 0; i < nvqs; ++i) {
+ if (!names[i]) {
+ vqs[i] = NULL;
+ continue;
+ }
+
+ vqs[i] = vl_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
+ ctx ? ctx[i] : false);
+ if (IS_ERR(vqs[i])) {
+ vl_del_vqs(vdev);
+ return PTR_ERR(vqs[i]);
+ }
+ }
+
+ return 0;
+}
+#else
+static int vl_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
+ struct virtqueue *vqs[],
+ struct virtqueue_info vqs_info[],
+ struct irq_affinity *desc)
+{
+ int i, queue_idx = 0;
+
+ for (i = 0; i < nvqs; ++i) {
+ struct virtqueue_info *vqi = &vqs_info[i];
+
+ if (!vqi->name) {
+ vqs[i] = NULL;
+ continue;
+ }
+
+ vqs[i] = vl_setup_vq(vdev, queue_idx++, vqi->callback,
+ vqi->name, vqi->ctx);
+ if (IS_ERR(vqs[i])) {
+ vl_del_vqs(vdev);
+ return PTR_ERR(vqs[i]);
+ }
+ }
+
+ return 0;
+}
+#endif
+
+static const char *vl_bus_name(struct virtio_device *vdev)
+{
+ struct virtio_loopback_device *vl_dev = to_virtio_loopback_device(vdev);
+
+ return vl_dev->pdev->name;
+}
+
+static bool vl_get_shm_region(struct virtio_device *vdev,
+ struct virtio_shm_region *region, u8 id)
+{
+ struct virtio_loopback_device *vl_dev = to_virtio_loopback_device(vdev);
+ struct device_data *data = vl_dev->data;
+ u64 len, addr;
+
+ /* Select the region we're interested in */
+ write_adapter(id, VIRTIO_MMIO_SHM_SEL, 4, data);
+
+ /* Read the region size */
+ len = (u64) read_adapter(VIRTIO_MMIO_SHM_LEN_LOW, 4, data);
+ len |= (u64) read_adapter(VIRTIO_MMIO_SHM_LEN_HIGH, 4, data) << 32;
+
+ region->len = len;
+
+ /* Check if region length is -1. If that's the case, the shared memory
+ * region does not exist and there is no need to proceed further.
+ */
+ if (len == ~(u64)0)
+ return false;
+
+ /* Read the region base address */
+ addr = (u64) read_adapter(VIRTIO_MMIO_SHM_BASE_LOW, 4, data);
+ addr |= (u64) read_adapter(VIRTIO_MMIO_SHM_BASE_HIGH, 4, data) << 32;
+
+ region->addr = addr;
+
+ return true;
+}
+
+static const struct virtio_config_ops virtio_loopback_config_ops = {
+ .get = vl_get,
+ .set = vl_set,
+ .generation = vl_generation,
+ .get_status = vl_get_status,
+ .set_status = vl_set_status,
+ .reset = vl_reset,
+ .find_vqs = vl_find_vqs,
+ .del_vqs = vl_del_vqs,
+ .get_features = vl_get_features,
+ .finalize_features = vl_finalize_features,
+ .bus_name = vl_bus_name,
+ .get_shm_region = vl_get_shm_region,
+};
+
+static void virtio_loopback_release_dev(struct device *_d)
+{
+ struct virtio_device *vdev =
+ container_of(_d, struct virtio_device, dev);
+ struct virtio_loopback_device *vl_dev = to_virtio_loopback_device(vdev);
+ struct platform_device *pdev = vl_dev->pdev;
+
+ pr_debug("virtio_loopback_release_dev\n");
+
+ /* Deallocte platform data */
+ devm_kfree(&pdev->dev, vl_dev);
+}
+
+/* Function to carry-out the registration of the virtio_loopback */
+int loopback_register_virtio_dev(struct virtio_loopback_device *vl_dev)
+{
+ struct platform_device *pdev = vl_dev->pdev;
+ struct device_data *data = vl_dev->data;
+ unsigned long magic;
+ int rc;
+
+ /* Check magic value */
+ magic = read_adapter(VIRTIO_MMIO_MAGIC_VALUE, 4, data);
+
+ if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) {
+ dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic);
+ return -ENODEV;
+ }
+
+ /* Check device version */
+ vl_dev->version = read_adapter(VIRTIO_MMIO_VERSION, 4, data);
+
+ if (vl_dev->version < 1 || vl_dev->version > 2) {
+ dev_err(&pdev->dev, "Version %ld not supported!\n",
+ vl_dev->version);
+ return -ENXIO;
+ }
+
+ vl_dev->vdev.id.device = read_adapter(VIRTIO_MMIO_DEVICE_ID, 4, data);
+
+ if (vl_dev->vdev.id.device == 0) {
+ /*
+ * virtio-loopback device with an ID 0 is a (dummy) placeholder
+ * with no function. End probing now with no error reported.
+ */
+ return -ENODEV;
+ }
+
+ vl_dev->vdev.id.vendor = read_adapter(VIRTIO_MMIO_VENDOR_ID, 4, data);
+
+ if (vl_dev->version == 1) {
+ write_adapter(PAGE_SIZE, VIRTIO_MMIO_GUEST_PAGE_SIZE, 4, data);
+
+ rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ /*
+ * In the legacy case, ensure our coherently-allocated virtio
+ * ring will be at an address expressable as a 32-bit PFN.
+ */
+ if (!rc)
+ dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32 + PAGE_SHIFT));
+ } else {
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ }
+ if (rc)
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (rc)
+ dev_warn(&pdev->dev,
+ "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
+
+ /* Register the virtio device in the system */
+ rc = register_virtio_device(&vl_dev->vdev);
+ if (rc)
+ put_device(&vl_dev->vdev.dev);
+
+ return 0;
+}
+
+static int virtio_loopback_probe(struct platform_device *pdev)
+{
+ int err;
+ struct virtio_loopback_device *vl_dev;
+
+ pr_info("Entered probe with id: %d!\n", pdev->id);
+ vl_dev = devm_kzalloc(&pdev->dev, sizeof(*vl_dev), GFP_KERNEL);
+ if (!vl_dev) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ vl_dev->vdev.dev.parent = &pdev->dev;
+ vl_dev->vdev.dev.release = virtio_loopback_release_dev;
+ vl_dev->vdev.config = &virtio_loopback_config_ops;
+ vl_dev->pdev = pdev;
+ INIT_LIST_HEAD(&vl_dev->virtqueues);
+ spin_lock_init(&vl_dev->lock);
+
+ /* Initialize the notifications related data structures */
+ vl_dev->notify_workqueue =
+ create_singlethread_workqueue("notify_workqueue");
+ INIT_WORK(&vl_dev->notify_work, notify_work_handler);
+ INIT_LIST_HEAD(&vl_dev->notify_list);
+ spin_lock_init(&vl_dev->notify_q_lock);
+ init_waitqueue_head(&vl_dev->wq_notifs_inters);
+
+ /* Set platform data */
+ platform_set_drvdata(pdev, vl_dev);
+
+ /* Insert new entry data */
+ err = insert_entry_data(vl_dev, pdev->id);
+
+out:
+ return err;
+}
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(6, 10, 8)
+static void virtio_loopback_remove(struct platform_device *pdev)
+#else
+static int virtio_loopback_remove(struct platform_device *pdev)
+#endif
+{
+ struct virtio_loopback_device *vl_dev;
+
+ pr_debug("virtio_loopback_remove\n");
+ vl_dev = platform_get_drvdata(pdev);
+
+ if (vl_dev->data == NULL) {
+ pr_debug("Dev already deallocated\n");
+ return 0;
+ }
+
+ /* Destroy the notify workqueue */
+ flush_workqueue(vl_dev->notify_workqueue);
+ destroy_workqueue(vl_dev->notify_workqueue);
+
+ if (vl_dev->data) {
+ unregister_virtio_device(&vl_dev->vdev);
+ pr_info("unregister_virtio_device!\n");
+ }
+
+ /* Subsequently free the device data */
+ free_page((unsigned long)vl_dev->data->info->data);
+ kfree(vl_dev->data->info);
+ eventfd_ctx_put(vl_dev->data->efd_ctx);
+ vl_dev->data->efd_ctx = NULL;
+ kfree(vl_dev->data);
+ vl_dev->data = NULL;
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(6, 10, 8)
+ return 0;
+#endif
+}
+
+/* No need of DTS and ACPI */
+struct platform_driver virtio_loopback_driver = {
+ .probe = virtio_loopback_probe,
+ .remove = virtio_loopback_remove,
+ .driver = {
+ .name = "loopback-transport",
+ },
+};
+
+static uint64_t read_adapter(uint64_t fn_id, uint64_t size,
+ struct device_data *dev_data)
+{
+ uint64_t result;
+
+ mutex_lock(&(dev_data)->read_write_lock);
+
+ /*
+ * By enabling the following line all
+ * read messages will be printed:
+ *
+ * print_neg_flag(fn_id, 1);
+ */
+ print_neg_flag(fn_id, 1);
+
+ ((struct virtio_neg *)(dev_data->info->data))->notification = fn_id;
+ ((struct virtio_neg *)(dev_data->info->data))->data = 0;
+ ((struct virtio_neg *)(dev_data->info->data))->size = size;
+ ((struct virtio_neg *)(dev_data->info->data))->read = true;
+
+ atomic_set(&((struct virtio_neg *)(dev_data->info->data))->done, 0);
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(6, 7, 12)
+ eventfd_signal(dev_data->efd_ctx);
+#else
+ eventfd_signal(dev_data->efd_ctx, 1);
+#endif
+
+ /*
+ * There is a chance virtio-loopback adapter to call "wake_up"
+ * before the current thread sleep. This is the reason that
+ * "wait_event_timeout" is used instead of "wait_event". In this
+ * way, virtio-loopback driver will wake up even if has missed the
+ * "wake_up" kick, check the updated "done" value and return.
+ */
+
+ while (dev_data->valid_eventfd &&
+ atomic_read(&((struct virtio_neg *)(dev_data->info->data))->done) != 1)
+ wait_event_timeout(dev_data->wq,
+ atomic_read(&((struct virtio_neg *)(dev_data->info->data))->done) == 1,
+ 100 * HZ);
+
+ result = ((struct virtio_neg *)(dev_data->info->data))->data;
+
+ mutex_unlock(&(dev_data)->read_write_lock);
+
+ return result;
+}
+
+static void write_adapter(uint64_t data, uint64_t fn_id, uint64_t size,
+ struct device_data *dev_data)
+{
+
+ mutex_lock(&(dev_data)->read_write_lock);
+
+ /*
+ * By enabling the following line all
+ * write messages will be printed:
+ *
+ * print_neg_flag(fn_id, 1);
+ */
+ print_neg_flag(fn_id, 0);
+
+ ((struct virtio_neg *)(dev_data->info->data))->notification = fn_id;
+ ((struct virtio_neg *)(dev_data->info->data))->data = data;
+ ((struct virtio_neg *)(dev_data->info->data))->size = size;
+ ((struct virtio_neg *)(dev_data->info->data))->read = false;
+
+ atomic_set(&((struct virtio_neg *)(dev_data->info->data))->done, 0);
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(6, 7, 12)
+ eventfd_signal(dev_data->efd_ctx);
+#else
+ eventfd_signal(dev_data->efd_ctx, 1);
+#endif
+
+ /*
+ * There is a chance virtio-loopback adapter to call "wake_up"
+ * before the current thread sleep. This is the reason that
+ * "wait_event_timeout" is used instead of "wait_event". In this
+ * way, virtio-loopback driver will wake up even if has missed the
+ * "wake_up" kick, check the updated "done" value and return.
+ */
+ while (dev_data->valid_eventfd &&
+ atomic_read(&((struct virtio_neg *)(dev_data->info->data))->done) != 1)
+ wait_event_timeout(dev_data->wq,
+ atomic_read(&((struct virtio_neg *)(dev_data->info->data))->done) == 1,
+ 100 * HZ);
+
+ mutex_unlock(&(dev_data)->read_write_lock);
+}
diff --git a/virtio_loopback_driver.c b/virtio_loopback_driver.c
new file mode 100644
index 0000000..0fb3ceb
--- /dev/null
+++ b/virtio_loopback_driver.c
@@ -0,0 +1,858 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Virtio loopback device driver
+ *
+ * Copyright 2022-2024 Virtual Open Systems SAS
+ *
+ * Authors:
+ * Timos Ampelikiotis <t.ampelikiotis@virtualopensystems.com>
+ * Anna Panagopoulou <anna@virtualopensystems.com>
+ * Alvise Rigo <a.rigo@virtualopensystems.com>
+ *
+ * This module allows virtio devices to be used in a non-virtualized
+ * environment, coupled with vhost-user device (user-space drivers).
+ *
+ * This module is responsible to assign the virtio-loopback transport driver
+ * to a group of virtio drivers in order to be able to share notifications and
+ * the vrings (without copies) with the corresponding vhost-user devices in
+ * the user-space.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "virtio-loopback: " fmt
+
+/* Loopback header file */
+#include "virtio_loopback_driver.h"
+
+/* Features */
+MODULE_LICENSE("GPL");
+
+/* The global data for the loopback */
+struct loopback_device_data loopback_data;
+struct loopback_devices_array loopback_devices;
+
+/*
+ * This function registers all mmap calls done by the user-space into an array
+ */
+static void add_share_mmap(struct file *filp, uint64_t pfn,
+ uint64_t vm_start, uint64_t size)
+{
+ struct file_priv_data *file_data =
+ (struct file_priv_data *)(filp->private_data);
+ struct mmap_data *mm_data = (struct mmap_data *)file_data->mm_data;
+
+ mm_data->share_mmap_list[mm_data->mmap_index].pfn = pfn;
+ mm_data->share_mmap_list[mm_data->mmap_index].vm_start = vm_start;
+ mm_data->share_mmap_list[mm_data->mmap_index].size = size;
+ mm_data->share_mmap_list[mm_data->mmap_index].uid =
+ task_pid_nr(current);
+ mm_data->mmap_index++;
+}
+
+/*
+ * This function removes a record from mmap array
+ */
+static void share_mmap_rem(struct vm_area_struct *vma)
+{
+ struct file *file = vma->vm_file;
+ struct file_priv_data *file_data =
+ (struct file_priv_data *)(file->private_data);
+ struct mmap_data *mm_data = (struct mmap_data *)file_data->mm_data;
+ int i;
+
+ for (i = 0; i < MMAP_LIMIT; i++) {
+ if (mm_data->share_mmap_list[i].vm_start == vma->vm_start) {
+ mm_data->share_mmap_list[i].uid = 0;
+ mm_data->share_mmap_list[i].pfn = 0;
+ mm_data->share_mmap_list[i].vm_start = 0;
+ mm_data->share_mmap_list[i].size = 0;
+ }
+ }
+}
+
+static void print_mmap_idx(struct mmap_data *mm_data, int i)
+{
+ pr_debug("share_mmap_list[%d].uid %x\n", i,
+ mm_data->share_mmap_list[i].uid);
+ pr_debug("share_mmap_list[%d].pfn %llx\n", i,
+ mm_data->share_mmap_list[i].pfn);
+ pr_debug("share_mmap_list[%d].vm_start %llx\n", i,
+ mm_data->share_mmap_list[i].vm_start);
+ pr_debug("share_mmap_list[%d].size %x\n", i,
+ mm_data->share_mmap_list[i].size);
+}
+
+/**
+ * print_mmaps - Debug function to print details of all active mmap entries
+ * @mm_data: Pointer to the mmap_data structure containing mmap details
+ *
+ * This function iterates through the `share_mmap_list` array in the given
+ * `mm_data` structure and logs the details of each active mmap entry by
+ * calling `print_mmap_idx`. The number of entries printed is determined as:
+ * - `MMAP_LIMIT` if `mmap_index` is `0`.
+ * - The value of `mmap_index` otherwise.
+ *
+ * Note:
+ * - The function uses `pr_debug` for logging, so enable debugging to see
+ * the output.
+ * - Ensure that `mm_data` is properly initialized before calling this
+ * function to avoid accessing invalid memory.
+ */
+
+static void print_mmaps(struct mmap_data *mm_data)
+{
+ int i, limit =
+ mm_data->mmap_index == 0 ? MMAP_LIMIT : mm_data->mmap_index;
+
+ for (i = 0; i < limit; i++)
+ print_mmap_idx(mm_data, i);
+}
+
+/**
+ * share_mmap_exist_vma_return_correct_pfn - Calculate corrected PFN for a
+ * given address.
+ * @mm_data: Pointer to struct containing memory mapping data
+ * @addr: Address for which to calculate the corrected PFN
+ *
+ * This function iterates through the list of shared memory mappings in
+ * `mm_data` and checks if the given `addr` lies within any of the mappings.
+ * If it does, it computes the corrected PFN based on the mapping's start
+ * address, size, and PFN.
+ *
+ * Returns:
+ * - The corrected PFN if the address falls within a mapping.
+ * - 0 if the address does not match any mapping.
+ */
+static uint64_t share_mmap_exist_vma_return_correct_pfn(
+ struct mmap_data *mm_data,
+ uint64_t addr)
+{
+ int i;
+ uint64_t corrected_pfn;
+
+ for (i = 0; i < MMAP_LIMIT; i++) {
+ if ((mm_data->share_mmap_list[i].vm_start <= addr) &&
+ (addr < mm_data->share_mmap_list[i].vm_start +
+ mm_data->share_mmap_list[i].size)) {
+ corrected_pfn = ((addr -
+ mm_data->share_mmap_list[i].vm_start)
+ / PAGE_SIZE)
+ + mm_data->share_mmap_list[i].pfn;
+ return corrected_pfn;
+ }
+ }
+ return 0;
+}
+
+/**
+ * pf_mmap_fault - Handle page faults for the device mmap area
+ * @vmf: Pointer to the `vm_fault` structure containing fault information
+ *
+ * This function is called during a page fault to find and insert the correct
+ * page for the faulting address. It calculates the corrected PFN using the
+ * provided mmap data of the device and updates the faulting page.
+ *
+ * Returns:
+ * - 0 if successful.
+ * - `VM_FAULT_SIGBUS` on failure.
+ */
+static vm_fault_t pf_mmap_fault(struct vm_fault *vmf)
+{
+ uint64_t corrected_pfn;
+ pfn_t corr_pfn_struct;
+ struct page *page;
+
+ struct file *file = vmf->vma->vm_file;
+ struct file_priv_data *file_data =
+ (struct file_priv_data *)(file->private_data);
+ struct mmap_data *mm_data =
+ (struct mmap_data *)file_data->mm_data;
+
+ /* Count the total number of page_faults for debugging purpose */
+ mm_data->sum_pgfaults++;
+
+ /* Find the corrected pfn */
+ corrected_pfn = share_mmap_exist_vma_return_correct_pfn(mm_data,
+ vmf->address);
+ corr_pfn_struct.val = corrected_pfn;
+
+ /* Ensure the PFN is valid */
+ if (unlikely(!pfn_valid(corrected_pfn))) {
+ pr_err("Invalid PFN: %llu\n", corrected_pfn);
+ return VM_FAULT_SIGBUS;
+ }
+
+ /* After finding the page, correct the vmf->page */
+ page = pfn_to_page(corrected_pfn);
+ if (unlikely(!virt_addr_valid(page_address(page)))) {
+ pr_err("Invalid page address for PFN: %llu\n", corrected_pfn);
+ return VM_FAULT_SIGBUS;
+ }
+
+ /* Insert the correct page */
+ return vmf_insert_pfn(vmf->vma, vmf->address, corrected_pfn);
+}
+
+static void pf_mmap_close(struct vm_area_struct *vma)
+{
+ share_mmap_rem(vma);
+}
+
+const struct vm_operations_struct pf_mmap_ops = {
+ .close = pf_mmap_close,
+ .fault = pf_mmap_fault,
+};
+
+/**
+ * pf_mmap_vm_page - Set up memory mapping for a file
+ * @filp: Pointer to the file structure for the mapping
+ * @vma: Pointer to the VM area structure representing the memory mapping
+ *
+ * This function sets up a user-space area by associating a physical frame
+ * number (PFN) with the virtual address range. It updates internal data
+ * structures to track the mapping and sets appropriate VM flags.
+ *
+ * Returns:
+ * - 0 on success.
+ * - Negative error code on failure.
+ */
+static int pf_mmap_vm_page(struct file *filp, struct vm_area_struct *vma)
+{
+ uint64_t size = (unsigned long)(vma->vm_end - vma->vm_start);
+ struct file_priv_data *file_data =
+ (struct file_priv_data *)(filp->private_data);
+ struct mmap_data *mm_data = (struct mmap_data *)file_data->mm_data;
+ uint64_t pfn = ((mm_data->cur_ram_idx++) * (size >> PAGE_SHIFT));
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 3, 0)
+ vma->vm_flags |= VM_PFNMAP;
+#else
+ vm_flags_set(vma, VM_PFNMAP);
+#endif
+ add_share_mmap(filp, pfn, vma->vm_start, size);
+ return 0;
+}
+
+/**
+ * mmap_vqs_com_struct - Map virtqueue or communication structure to user space
+ * @filp: Pointer to the file structure associated with the mapping
+ * @vma: Pointer to the VM area structure describing the memory region
+ *
+ * This function maps either the virtqueue data or the communication structure
+ * to the user space using `remap_pfn_range`. The choice of what to map depends
+ * on the `share_communication_struct` flag in the mmap data structure.
+ *
+ * Returns:
+ * - 0 on success.
+ * - Negative error code on failure.
+ */
+static int mmap_vqs_com_struct(struct file *filp, struct vm_area_struct *vma)
+{
+ int ret = 0;
+ unsigned long size = (unsigned long)(vma->vm_end - vma->vm_start);
+ struct file_priv_data *file_data =
+ (struct file_priv_data *)(filp->private_data);
+ struct device_data *dev_data =
+ (struct device_data *)file_data->dev_data;
+ struct mmap_data *mmap_data = (struct mmap_data *)file_data->mm_data;
+ struct mmap_info *com_mmap_virt =
+ (struct mmap_info *)(file_data->dev_data->info)->data;
+ uint64_t com_mmap_pfn =
+ ((uint64_t)virt_to_phys(com_mmap_virt)) >> PAGE_SHIFT;
+ uint64_t starting_pfn;
+
+ if (mmap_data->share_communication_struct) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)
+ vma->vm_flags |= VM_RESERVED;
+#else
+ vm_flags_set(vma, VM_RESERVED);
+#endif
+ mmap_data->share_communication_struct = false;
+ starting_pfn = com_mmap_pfn;
+ } else {
+ mmap_data->share_vqs = false;
+ starting_pfn = dev_data->vq_data.vq_pfn;
+ }
+
+ ret = remap_pfn_range(vma, vma->vm_start, starting_pfn, size,
+ vma->vm_page_prot);
+ if (ret != 0) {
+ pr_err("Mmap error\n");
+ print_mmaps(mmap_data);
+ } else {
+ add_share_mmap(filp, starting_pfn, vma->vm_start, size);
+ }
+
+ return ret;
+}
+
+/**
+ * op_mmap - Map vring buffers, virtqueue or communication structure
+ * to user space.
+ * @filp: Pointer to the file structure associated with the mapping
+ * @vma: Pointer to the VM area structure describing the memory region
+ *
+ * This function checks if the incoming mmap sys_call is related to a) vrings
+ * or b) virtqueues / communication structure data (depending on
+ * `share_communication_struct` and `share_vqs` variables. Then calls
+ * `mmap_vqs_com_struct` and `pf_mmap_vm_page` correspondingly in order
+ * to apply a different mapping logic.
+ *
+ * Returns:
+ * - 0 on success.
+ * - Negative error code on failure.
+ */
+static int op_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct file_priv_data *file_data =
+ (struct file_priv_data *)(filp->private_data);
+ struct mmap_data *mmap_data = (struct mmap_data *)file_data->mm_data;
+ int ret = 0;
+
+ vma->vm_ops = &pf_mmap_ops;
+
+ if (mmap_data->share_communication_struct || mmap_data->share_vqs)
+ ret = mmap_vqs_com_struct(filp, vma);
+ else
+ ret = pf_mmap_vm_page(filp, vma);
+
+ return ret;
+}
+
+static ssize_t loopback_write(struct file *file,
+ const char __user *user_buffer,
+ size_t size,
+ loff_t *offset)
+{
+ ssize_t len = sizeof(int);
+
+ if (len <= 0)
+ return 0;
+
+ return len;
+}
+
+static ssize_t loopback_read(struct file *file,
+ char __user *user_buffer,
+ size_t size, loff_t *offset)
+{
+ return 0;
+}
+
+/*
+ * The lseek sys_call is needed only by the vhost-user device
+ * located in vhost-device crate.
+ */
+static loff_t loopback_seek(struct file *file, loff_t offset, int whence)
+{
+ loff_t new_pos;
+
+ switch (whence) {
+ case SEEK_SET:
+ new_pos = offset;
+ break;
+ case SEEK_CUR:
+ new_pos = file->f_pos + offset;
+ break;
+ case SEEK_END:
+ new_pos = file->f_inode->i_size;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (new_pos < 0 || new_pos > file->f_inode->i_size)
+ return -EINVAL;
+
+ return new_pos;
+}
+
+static int register_virtio_loopback_dev(uint32_t device_id)
+{
+ struct platform_device *pdev;
+ int err = 0;
+
+ pr_info("Received request to register a new loopback transport\n");
+
+ /* Register a new loopback-transport device */
+ pdev = platform_device_register_simple("loopback-transport",
+ device_id, NULL, 0);
+ if (IS_ERR(pdev)) {
+ err = PTR_ERR(pdev);
+ pr_err("Failed to register transport device: %d\n", err);
+ }
+
+ return err;
+}
+
+/* Insert new entry data for a discovered device */
+int insert_entry_data(struct virtio_loopback_device *vl_dev, int id)
+{
+ int err = 0;
+ /* Read that value atomically */
+ uint32_t max_used_dev_idx = atomic_read(&loopback_devices.device_num);
+
+ /* Store the new vl_dev */
+ if ((id <= MAX_PDEV) && (max_used_dev_idx < MAX_PDEV))
+ loopback_devices.devices[id] = vl_dev;
+ else
+ err = -ENOMEM;
+
+ /* Mark the request as completed and free registration */
+ complete(&loopback_devices.reg_vl_dev_completion[id]);
+ return err;
+}
+
+/* Helper function to mark an entry as active */
+static struct virtio_loopback_device *
+activate_entry_data(struct file_priv_data *file_data,
+ uint32_t curr_dev_id)
+{
+ struct virtio_loopback_device *vl_dev = NULL;
+
+ /* See if there is any available device */
+ if (curr_dev_id < MAX_PDEV) {
+ /* Find and store the data */
+ vl_dev = loopback_devices.devices[curr_dev_id];
+ vl_dev->data = file_data->dev_data;
+ vl_dev->data->vdev_data = &file_data->device_info;
+
+ /* Add this device to a global list */
+ if (!add_dev_to_list(curr_dev_id))
+ return NULL;
+
+ /* Set credits & last served timestamp */
+ atomic_set(&vl_dev->data->notif_credits,
+ vl_dev->data->vdev_data->init_notif_credits);
+ vl_dev->data->served_timestamp = ktime_get();
+
+ /* Set available notifs */
+ atomic_set(&vl_dev->data->avail_notifs, 0);
+
+ /* Set device group */
+ vl_dev->data->priority_group = vl_dev->data->vdev_data->priority_group;
+
+ /* Set available interupts */
+ atomic_set(&vl_dev->data->avail_inters, 0);
+ }
+
+ return vl_dev;
+}
+
+static int start_loopback(struct file_priv_data *file_data,
+ uint32_t curr_dev_id)
+{
+ struct virtio_loopback_device *vl_dev;
+ int ret;
+
+ /* Activate the entry */
+ vl_dev = activate_entry_data(file_data, curr_dev_id);
+ if (vl_dev) {
+ file_data->vl_dev_irq = vl_dev;
+ /* Register the activated vl_dev in the system */
+ ret = loopback_register_virtio_dev(vl_dev);
+ } else {
+ pr_debug("No available entry found!\n");
+ file_data->vl_dev_irq = NULL;
+ ret = -EFAULT;
+ }
+
+ return ret;
+}
+
+/**
+ * loopback_ioctl - Handle various ioctl commands for loopback device
+ * @file: Pointer to the file structure associated with the device
+ * @cmd: The ioctl command code
+ * @arg: User-space argument associated with the command
+ *
+ * This function processes various ioctl commands to configure and control the
+ * loopback device. The supported commands include:
+ *
+ * - `EFD_INIT`: The user-space adapter component shares an eventfd with the
+ * loopback device. This eventfd is triggered by the device each time a
+ * read / write operation is requested via the communication data structure.
+ *
+ * - `WAKEUP`: Sets a flag in the device's internal structure and wakes up any
+ * read / write process waiting on the communication wait queue.
+ *
+ * - `START_LOOPBACK`: Registers and starts a new loopback device, assigning a
+ * unique device ID and waiting for its probe function to complete before
+ * returning to user space.
+ *
+ * - `IRQ`: Handles an interrupt request by triggering the device's interrupt
+ * logic with the provided IRQ number.
+ *
+ * - `SHARE_VQS`: Shares a specified virtqueue (selected via a queue index)
+ * between the user-space application and the loopback device.
+ *
+ * - `SHARE_COM_STRUCT`: Notifies the loopback-device that the next mmap call
+ * will request the communication structure to be as shared between
+ * user-space and the loopback device.
+ *
+ * - `SHARE_VQS_NOTIF`: The user-space uses this command to share the eventfd
+ * associated with a specific virtqueue. This eventfd will be triggered each
+ * time the virtio device calls the `notify` function. In this way the
+ * by-pass the user-space adapter component and delivered directly to the
+ * vhost-user devices in user-space.
+ *
+ * If an unknown `cmd` is provided, the function logs an error and returns
+ * `-ENOTTY` to indicate an unsupported ioctl command.
+ *
+ * Returns:
+ * - `0` on success.
+ * - Negative error codes (`-EFAULT`, `-ENOTTY`, or others) on failure.
+ */
+static long loopback_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct efd_data efd_data;
+ int irq, err;
+ uint32_t queue_sel;
+ struct file_priv_data *file_data =
+ (struct file_priv_data *)(file->private_data);
+ struct mmap_data *mm_data = (struct mmap_data *)file_data->mm_data;
+ struct device_data *dev_data =
+ (struct device_data *)file_data->dev_data;
+ uint32_t curr_avail_dev_id;
+ struct vq_notifier vq_notifier;
+
+ switch (cmd) {
+ case EFD_INIT: {
+ struct task_struct *userspace_task;
+ struct file *efd_file;
+
+ if (copy_from_user(&efd_data, (struct efd_data *) arg,
+ sizeof(struct efd_data)))
+ return -EFAULT;
+
+ userspace_task = pid_task(find_vpid(efd_data.pid), PIDTYPE_PID);
+
+ rcu_read_lock();
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 220)
+ efd_file = fcheck_files(userspace_task->files, efd_data.efd[0]);
+#else
+#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 7, 0)
+ efd_file = files_lookup_fd_rcu(userspace_task->files, efd_data.efd[0]);
+#else
+ efd_file = files_lookup_fd_raw(userspace_task->files, efd_data.efd[0]);
+#endif
+#endif
+ rcu_read_unlock();
+
+ dev_data->efd_ctx = eventfd_ctx_fileget(efd_file);
+ if (!dev_data->efd_ctx)
+ return -1;
+
+ break;
+ }
+ case WAKEUP: {
+ atomic_set(&((struct virtio_neg *)(dev_data->info->data))->done,
+ 1);
+ wake_up(&(dev_data)->wq);
+ break;
+ }
+ case START_LOOPBACK: {
+ if (copy_from_user(&(file_data)->device_info,
+ (struct virtio_device_info_struct *) arg,
+ sizeof(struct virtio_device_info_struct)))
+ return -EFAULT;
+
+ pr_crit("Priority: %lu\n", file_data->device_info.init_notif_credits);
+ /* Read and increase that value atomically */
+ curr_avail_dev_id =
+ atomic_add_return(1, &loopback_devices.device_num) - 1;
+
+ /* Register a new loopback device */
+ err = register_virtio_loopback_dev(curr_avail_dev_id);
+ if (err)
+ return -EFAULT;
+
+ /*
+ * Wait for probe function to be called before return control
+ * to user-space app
+ */
+ wait_for_completion(
+ &loopback_devices.reg_vl_dev_completion[curr_avail_dev_id]);
+
+ /* Start the loopback */
+ err = start_loopback(file_data, curr_avail_dev_id);
+ if (err)
+ return -EFAULT;
+
+ break;
+ }
+ case IRQ:
+ if (copy_from_user(&irq, (int *) arg, sizeof(int)))
+ return -EFAULT;
+ register_interrupt(file_data->vl_dev_irq, irq);
+ break;
+ case SHARE_VQS:
+ if (copy_from_user(&queue_sel, (uint32_t *) arg,
+ sizeof(uint32_t)))
+ return -EFAULT;
+ dev_data->vq_data.vq_pfn = dev_data->vq_data.vq_pfns[queue_sel];
+ mm_data->share_vqs = true;
+ break;
+ case SHARE_COM_STRUCT:
+ mm_data->share_communication_struct = true;
+ break;
+ case SHARE_VQS_NOTIF:
+
+ struct task_struct *userspace_task;
+ struct file *efd_file;
+
+ if (copy_from_user(&vq_notifier, (struct vq_notifier *) arg,
+ sizeof(struct vq_notifier)))
+ return -EFAULT;
+
+ userspace_task =
+ pid_task(find_vpid(vq_notifier.pid), PIDTYPE_PID);
+
+ rcu_read_lock();
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 220)
+ efd_file = fcheck_files(userspace_task->files, vq_notifier.notifier_fd);
+#else
+#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 7, 0)
+ efd_file = files_lookup_fd_rcu(userspace_task->files, vq_notifier.notifier_fd);
+#else
+ efd_file = files_lookup_fd_raw(userspace_task->files, vq_notifier.notifier_fd);
+#endif
+#endif
+ rcu_read_unlock();
+
+ dev_data->vq_data.vq_notifiers[vq_notifier.vq_index] =
+ eventfd_ctx_fileget(efd_file);
+ if (!dev_data->vq_data.vq_notifiers[vq_notifier.vq_index])
+ return -1;
+ /* Mark device notifiers as enabled */
+ dev_data->vq_data.vq_notifiers_enabled = true;
+ break;
+ default:
+ pr_err("Unknown loopback ioctl: %u\n", cmd);
+ return -ENOTTY;
+ }
+
+ return 0;
+}
+
+static int loopback_open(struct inode *inode, struct file *file)
+{
+ uint32_t val_1gb = 1024 * 1024 * 1024;
+ struct virtio_neg device_neg = {.done = ATOMIC_INIT(0)};
+ /* Allocate file private data */
+ struct file_priv_data *file_data =
+ kmalloc(sizeof(struct file_priv_data), GFP_KERNEL);
+ struct device_data *dev_data =
+ kmalloc(sizeof(struct device_data), GFP_KERNEL);
+ struct mmap_data *mm_data =
+ kmalloc(sizeof(struct mmap_data), GFP_KERNEL);
+
+ if (!file_data || !dev_data || !mm_data)
+ goto error_kmalloc;
+
+ /* Set the i_size for the stat SYS_CALL*/
+ file->f_inode->i_size = 10 * val_1gb;
+
+ /* Initialize the device data */
+ dev_data->info = kmalloc(sizeof(struct mmap_info), GFP_KERNEL);
+ if (!dev_data->info)
+ goto error_kmalloc;
+ dev_data->info->data = (void *)get_zeroed_page(GFP_KERNEL);
+ memcpy(dev_data->info->data, &device_neg, sizeof(struct virtio_neg));
+
+ /* Init wq */
+ init_waitqueue_head(&(dev_data)->wq);
+
+ /* Init mutex */
+ mutex_init(&(dev_data)->read_write_lock);
+
+ /* Init vq_data */
+ dev_data->vq_data.vq_index = 0;
+ dev_data->valid_eventfd = true;
+ dev_data->vq_data.vq_notifiers_enabled = false;
+ file_data->dev_data = dev_data;
+
+ /* Init file mmap_data */
+ mm_data->mmap_index = 0;
+ mm_data->share_communication_struct = false;
+ mm_data->share_vqs = false;
+ mm_data->cur_ram_idx = 0;
+ mm_data->sum_pgfaults = 0;
+ file_data->mm_data = mm_data;
+
+ /* Store in the private data as it should */
+ file->private_data = (struct file_priv_data *)file_data;
+
+ return 0;
+
+error_kmalloc:
+ kfree(file_data);
+ kfree(dev_data);
+ kfree(mm_data);
+ return -ENOMEM;
+}
+
+static int start_notif_thread(void)
+{
+ loopback_data.notif_thread = kthread_run(notif_sched_func, NULL,
+ "notif_thread");
+ if (IS_ERR(loopback_data.notif_thread)) {
+ pr_err("Failed to create kernel thread\n");
+ return PTR_ERR(loopback_data.notif_thread);
+ }
+
+ pr_info("Kernel notif thread started successfully\n");
+ return 0;
+}
+
+static int loopback_release(struct inode *inode, struct file *file)
+{
+ struct file_priv_data *file_data =
+ (struct file_priv_data *)(file->private_data);
+ struct device_data *dev_data =
+ (struct device_data *)file_data->dev_data;
+ struct mmap_data *mm_data = (struct mmap_data *)file_data->mm_data;
+
+ pr_info("Releasing the device\n");
+
+ /* Unregister from the list */
+ note_dev_deletion(file_data->vl_dev_irq);
+
+ /*
+ * This makes the read/write do not wait
+ * for the virtio-loopback-adapter if
+ * the last has closed the fd
+ */
+ dev_data->valid_eventfd = false;
+
+ /* Active entry found */
+ if (file_data->vl_dev_irq) {
+ pr_debug("About to cancel the work\n");
+ /* TODO: Move this into virtio_loopback_remove */
+ /* Cancel any pending work */
+ cancel_work_sync(&file_data->vl_dev_irq->notify_work);
+ /* Continue with the vl_dev unregister */
+ platform_device_unregister(file_data->vl_dev_irq->pdev);
+ file_data->vl_dev_irq = NULL;
+ }
+
+ /* Proceed to de-activating the data for this entry */
+ dev_data = NULL;
+
+ /* Continue with the mm_data */
+ kfree(mm_data);
+ file_data->mm_data = NULL;
+
+ /* Last, free the private data */
+ kfree(file_data);
+ file->private_data = NULL;
+
+ return 0;
+}
+
+static const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ .read = loopback_read,
+ .write = loopback_write,
+ .open = loopback_open,
+ .unlocked_ioctl = loopback_ioctl,
+ .mmap = op_mmap,
+ .llseek = loopback_seek,
+ .release = loopback_release
+};
+
+static int __init loopback_init(void)
+{
+ int err, i;
+ dev_t dev;
+
+ err = alloc_chrdev_region(&dev, 0, MAX_DEV, "loopback");
+
+ /* Set-up the loopback_data */
+ loopback_data.dev_major = MAJOR(dev);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 4, 0)
+ loopback_data.class = class_create(THIS_MODULE, "loopback");
+#else
+ loopback_data.class = class_create("loopback");
+#endif
+ if (IS_ERR(loopback_data.class)) {
+ pr_err("Failed to create class\n");
+ return PTR_ERR(loopback_data.class);
+ }
+ cdev_init(&loopback_data.cdev, &fops);
+ loopback_data.cdev.owner = THIS_MODULE;
+ cdev_add(&loopback_data.cdev, MKDEV(loopback_data.dev_major, 0), 1);
+ device_create(loopback_data.class, NULL,
+ MKDEV(loopback_data.dev_major, 0), NULL, "loopback");
+
+ /* Register virtio_loopback_transport */
+ (void)platform_driver_register(&virtio_loopback_driver);
+
+ /* Init loopback device array */
+ atomic_set(&loopback_devices.device_num, 1);
+
+ /* Init completion for all devices */
+ for (i = 0; i < MAX_PDEV; i++)
+ init_completion(&loopback_devices.reg_vl_dev_completion[i]);
+
+ /* Init loopback device list */
+ INIT_LIST_HEAD(&loopback_devices.virtio_devices_list);
+
+ /* Init notification / interrupt wait queue */
+ init_waitqueue_head(&loopback_devices.wq_notifs_inters);
+
+ /* Init spinlock for when device is running */
+ spin_lock_init(&loopback_devices.running_lock);
+
+ /* Init pending notifications counter */
+ atomic_set(&loopback_devices.pending_notifs, 0);
+
+ /* Init pending interrupts counter */
+ atomic_set(&loopback_devices.pending_inters, 0);
+
+ /* Init current highest notifications priority */
+ atomic_set(&loopback_devices.highest_active_prior_notifs, 0);
+
+ /* Start nofication thread */
+ return start_notif_thread();
+}
+
+static void __exit loopback_exit(void)
+{
+ int ret;
+
+ pr_info("Exit virtio_loopback driver!\n");
+
+ /* Wait for notification / interrupt thread to stop */
+ if (loopback_data.notif_thread) {
+ ret = kthread_stop(loopback_data.notif_thread);
+ if (ret) {
+ pr_err("Kernel notif thread returned error: %d\n"
+ , ret);
+ }
+ }
+
+ /* Unregister virtio_loopback_transport */
+ platform_driver_unregister(&virtio_loopback_driver);
+ pr_debug("platform_driver_unregister!\n");
+
+ /* Necessary actions for the loopback_data */
+ device_destroy(loopback_data.class, MKDEV(loopback_data.dev_major, 0));
+ cdev_del(&loopback_data.cdev);
+ pr_debug("device_destroy!\n");
+ class_destroy(loopback_data.class);
+ pr_debug("class_destroy!\n");
+}
+
+module_init(loopback_init);
+module_exit(loopback_exit);
diff --git a/virtio_loopback_driver.h b/virtio_loopback_driver.h
new file mode 100644
index 0000000..3e02222
--- /dev/null
+++ b/virtio_loopback_driver.h
@@ -0,0 +1,305 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Virtio loopback device driver
+ *
+ * Copyright 2022-2024 Virtual Open Systems SAS.
+ *
+ * Authors:
+ * Timos Ampelikiotis <t.ampelikiotis@virtualopensystems.com>
+ * Anna Panagopoulou <anna@virtualopensystems.com>
+ * Alvise Rigo <a.rigo@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LOOPBACK_H__
+#define __LOOPBACK_H__
+
+#define DRIVER "LOOPBACK"
+
+#include <linux/cdev.h>
+#include <linux/eventfd.h>
+#include <linux/fdtable.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/of_address.h>
+#include <linux/cpumask.h>
+#include <linux/smp.h>
+#include <linux/version.h>
+#include <linux/completion.h>
+
+/* MMIO includes */
+#include <linux/acpi.h>
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_mmio.h>
+#include <linux/virtio_ring.h>
+
+#include <linux/kernel.h>
+#include <linux/pid.h>
+#include <linux/sched.h>
+#include <linux/rcupdate.h>
+#include <linux/kthread.h>
+
+/* mmap includes */
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+
+#include <linux/pagemap.h>
+#include <linux/delay.h>
+
+/* max Minor devices */
+#define MAX_DEV 1
+#define MAX_PDEV 100
+#define PDEV_TYPES 2
+
+/* Define mmap elements limit */
+#define MMAP_LIMIT 200
+
+/*
+ * The alignment to use between consumer and producer parts of vring.
+ * Currently hardcoded to the page size.
+ */
+#define VIRTIO_MMIO_VRING_ALIGN PAGE_SIZE
+
+#define to_virtio_loopback_device(ptr) \
+ container_of(ptr, struct virtio_loopback_device, vdev)
+
+/* mmap functionality */
+#ifndef VM_RESERVED
+#define VM_RESERVED (VM_DONTEXPAND | VM_DONTDUMP)
+#endif
+
+/* IOCTL defines */
+#define EFD_INIT _IOC(_IOC_WRITE, 'k', 1, sizeof(efd_data))
+#define WAKEUP _IOC(_IOC_WRITE, 'k', 2, 0)
+#define START_LOOPBACK _IOC(_IOC_WRITE, 'k', 3, sizeof(struct virtio_device_info_struct))
+#define IRQ _IOC(_IOC_WRITE, 'k', 4, sizeof(int))
+#define SHARE_VQS _IOC(_IOC_WRITE, 'k', 5, sizeof(uint32_t))
+#define SHARE_COM_STRUCT _IOC(_IOC_WRITE, 'k', 6, 0)
+#define SHARE_VQS_NOTIF _IOC(_IOC_WRITE, 'k', 7, sizeof(struct vq_notifier))
+
+/* Data structures */
+struct virtio_device_info_struct {
+ unsigned long magic;
+ unsigned long version;
+ unsigned long device_id;
+ unsigned long vendor;
+ bool priority_enabled;
+ unsigned long init_notif_credits;
+ unsigned long priority_group;
+};
+
+struct virtio_neg {
+ uint64_t notification;
+ uint64_t data;
+ uint64_t size;
+ bool read;
+ atomic_t done;
+};
+
+struct share_mmap {
+ uint64_t pfn;
+ uint64_t vm_start;
+ uint32_t size;
+ uint32_t uid;
+ struct page *page;
+};
+
+struct mmap_data {
+ int mmap_index;
+ bool share_communication_struct;
+ bool share_vqs;
+ struct share_mmap share_mmap_list[MMAP_LIMIT];
+ int cur_ram_idx;
+ uint64_t sum_pgfaults;
+};
+
+struct vq_notifier {
+ uint32_t vq_index;
+ int notifier_fd;
+ int pid;
+};
+
+/* vq related data */
+struct vq_data {
+ uint32_t vq_index;
+ uint64_t vq_pfns[16];
+ uint64_t vq_pfn;
+ struct eventfd_ctx *vq_notifiers[16];
+ bool vq_notifiers_enabled;
+};
+
+/* Data describing each device private status */
+struct device_data {
+ /* Info needed for adapter ops */
+ struct mmap_info *info;
+ /* Waitqueue for the adapter */
+ wait_queue_head_t wq;
+ struct mutex read_write_lock;
+ struct eventfd_ctx *efd_ctx;
+ /*
+ * If this variable is true then read/write should wait
+ * the adapter to unlock this operation by sending an
+ * eventfd. If it's equal to "false" then the operation
+ * does not wait for adapter's confirmation.
+ */
+ bool valid_eventfd;
+ /* vq data */
+ struct vq_data vq_data;
+
+ /* virtio device data */
+ struct virtio_device_info_struct *vdev_data;
+ bool priority_enabled;
+ uint32_t priority_group;
+ atomic_t notif_credits;
+ atomic_t avail_notifs;
+ atomic_t avail_inters;
+ uint64_t served_timestamp;
+};
+
+/* Data describing each entry of the driver */
+struct loopback_devices_array {
+ /* Array of probed devices */
+ struct virtio_loopback_device *devices[MAX_PDEV];
+ /* list of the devices */
+ struct list_head virtio_devices_list;
+ /* Number of available devices */
+ atomic_t device_num;
+ /* Registration completion */
+ struct completion reg_vl_dev_completion[MAX_PDEV];
+ /* Counter for all devices pending notifications */
+ atomic_t highest_active_prior_notifs;
+ atomic_t pending_notifs;
+ atomic_t pending_inters;
+ wait_queue_head_t wq_notifs_inters;
+
+ /* Spin lock for removing the device */
+ spinlock_t running_lock;
+};
+
+/* Data concealed in the file private pointer */
+struct file_priv_data {
+ /* Device needed data */
+ struct device_data *dev_data;
+ /* mmap needed data */
+ struct mmap_data *mm_data;
+ /* Device info! */
+ struct virtio_device_info_struct device_info;
+ /* The vl_dev pointer for the irq */
+ struct virtio_loopback_device *vl_dev_irq;
+};
+
+struct virtio_loopback_device {
+ struct virtio_device vdev;
+ struct platform_device *pdev;
+ /* Corresponding data pointer */
+ struct device_data *data;
+
+ /* Status: -1 not initialized, 0 running, 1 paused */
+ int status;
+
+ void __iomem *base;
+ unsigned long version;
+
+ /* A list of queues so we can dispatch IRQs */
+ spinlock_t lock;
+ struct list_head virtqueues;
+
+ /* Define workqueue for notifications */
+ struct workqueue_struct *notify_workqueue;
+
+ /* Notify list and work struct */
+ spinlock_t notify_q_lock;
+ struct list_head notify_list;
+ struct work_struct notify_work;
+
+ /* Notification waitqueue */
+ wait_queue_head_t wq_notifs_inters;
+};
+
+struct virtio_loopback_vq_info {
+ /* the actual virtqueue */
+ struct virtqueue *vq;
+ /* the list node for the virtqueues list */
+ struct list_head node;
+};
+
+struct virtio_loopback_device_node {
+ /* the actual virtqueue */
+ uint32_t vq_index;
+ atomic_t is_deleted;
+ /* the list node for the virtqueues list */
+ struct list_head node;
+ struct rcu_head rcu;
+};
+
+/* Notify data*/
+struct notify_data {
+ uint32_t index;
+ struct list_head list;
+};
+
+/* Interrupt data*/
+struct interrupt_data {
+ uint32_t index;
+ struct list_head list;
+};
+
+/* Shared data structure between driver and user-space application */
+struct mmap_info {
+ void *data;
+ int reference;
+};
+
+/*
+ * This structure holds the eventfds shared between the driver
+ * and the user-space application.
+ */
+struct efd_data {
+ int efd[2];
+ int pid;
+};
+
+/* device data holder, this structure may be extended to hold additional data */
+struct loopback_device_data {
+ /*device Major number */
+ int dev_major;
+ /* sysfs class structure */
+ struct class *class;
+ struct cdev cdev;
+ struct task_struct *notif_thread;
+};
+
+/* Global variables */
+extern struct platform_driver virtio_loopback_driver;
+
+/* Global functions */
+int insert_entry_data(struct virtio_loopback_device *vl_dev, int id);
+int loopback_register_virtio_dev(struct virtio_loopback_device *vl_dev);
+bool register_interrupt(struct virtio_loopback_device *vl_dev, int irq);
+int notif_sched_func(void *data);
+bool add_dev_to_list(uint32_t array_dev_pos);
+void note_dev_deletion(struct virtio_loopback_device *vl_dev);
+extern struct loopback_devices_array loopback_devices;
+extern struct loopback_device_data loopback_data;
+
+#endif /* __LOOPBACK_H__ */