From 1120c97b47e31366e63a20625baea914f68525ac Mon Sep 17 00:00:00 2001 From: Shengwen Cheng Date: Sun, 16 Jun 2024 15:48:14 +0800 Subject: [PATCH] Implement virtio-gpu device --- Makefile | 31 ++ device.h | 56 ++- feature.h | 5 + list.h | 84 ++++ main.c | 37 ++ minimal.dts | 11 +- virtio-gpu.c | 1106 ++++++++++++++++++++++++++++++++++++++++++++++++++ virtio.h | 61 +++ window.c | 159 ++++++++ window.h | 22 + 10 files changed, 1570 insertions(+), 2 deletions(-) create mode 100644 list.h create mode 100644 virtio-gpu.c create mode 100644 window.c create mode 100644 window.h diff --git a/Makefile b/Makefile index f3ce149..90387d3 100644 --- a/Makefile +++ b/Makefile @@ -3,11 +3,14 @@ include mk/common.mk CC ?= gcc CFLAGS := -O2 -g -Wall -Wextra CFLAGS += -include common.h +LDFLAGS := OBJS_EXTRA := # command line option OPTS := +LDFLAGS += -lpthread + # virtio-blk ENABLE_VIRTIOBLK ?= 1 $(call set-feature, VIRTIOBLK) @@ -36,6 +39,34 @@ ifeq ($(call has, VIRTIONET), 1) OBJS_EXTRA += virtio-net.o endif +# virtio-gpu +ENABLE_VIRTIOGPU ?= 1 +ifneq ($(UNAME_S),Linux) + ENABLE_VIRTIOGPU := 0 +endif + +# SDL2 +ENABLE_SDL ?= 1 +ifeq (, $(shell which sdl2-config)) + $(warning No sdl2-config in $$PATH. Check SDL2 installation in advance) + override ENABLE_SDL := 0 +endif + +ifeq ($(ENABLE_SDL),0) + override ENABLE_VIRTIOGPU := 0 +endif + +ifeq ($(ENABLE_SDL),1) +ifeq ($(ENABLE_VIRTIOGPU),1) + CFLAGS += $(shell sdl2-config --cflags) + LDFLAGS += $(shell sdl2-config --libs) + OBJS_EXTRA += window.o + OBJS_EXTRA += virtio-gpu.o +endif +endif + +$(call set-feature, VIRTIOGPU) + BIN = semu all: $(BIN) minimal.dtb diff --git a/device.h b/device.h index 754065f..04de6db 100644 --- a/device.h +++ b/device.h @@ -7,7 +7,7 @@ #define RAM_SIZE (512 * 1024 * 1024) #define DTB_SIZE (1 * 1024 * 1024) -#define INITRD_SIZE (8 * 1024 * 1024) +#define INITRD_SIZE (65 * 1024 * 1024) void ram_read(vm_t *core, uint32_t *mem, @@ -171,6 +171,57 @@ void virtio_blk_write(vm_t *vm, uint32_t *virtio_blk_init(virtio_blk_state_t *vblk, char *disk_file); #endif /* SEMU_HAS(VIRTIOBLK) */ +/* VirtIO-GPU */ + +#if SEMU_HAS(VIRTIOGPU) + +#define IRQ_VGPU 4 +#define IRQ_VGPU_BIT (1 << IRQ_VGPU) + +typedef struct { + uint32_t QueueNum; + uint32_t QueueDesc; + uint32_t QueueAvail; + uint32_t QueueUsed; + uint16_t last_avail; + bool ready; +} virtio_gpu_queue_t; + +typedef struct { + /* feature negotiation */ + uint32_t DeviceFeaturesSel; + uint32_t DriverFeatures; + uint32_t DriverFeaturesSel; + /* queue config */ + uint32_t QueueSel; + virtio_gpu_queue_t queues[2]; + /* status */ + uint32_t Status; + uint32_t InterruptStatus; + /* supplied by environment */ + uint32_t *ram; + /* implementation-specific */ + void *priv; +} virtio_gpu_state_t; + +void virtio_gpu_read(vm_t *vm, + virtio_gpu_state_t *vgpu, + uint32_t addr, + uint8_t width, + uint32_t *value); + +void virtio_gpu_write(vm_t *vm, + virtio_gpu_state_t *vgpu, + uint32_t addr, + uint8_t width, + uint32_t value); + +void virtio_gpu_init(virtio_gpu_state_t *vgpu); +void virtio_gpu_add_scanout(virtio_gpu_state_t *vgpu, + uint32_t width, + uint32_t height); +#endif /* SEMU_HAS(VIRTIOGPU) */ + /* memory mapping */ typedef struct { @@ -184,6 +235,9 @@ typedef struct { #endif #if SEMU_HAS(VIRTIOBLK) virtio_blk_state_t vblk; +#endif +#if SEMU_HAS(VIRTIOGPU) + virtio_gpu_state_t vgpu; #endif uint64_t timer; } emu_state_t; diff --git a/feature.h b/feature.h index e17718a..e9c8435 100644 --- a/feature.h +++ b/feature.h @@ -12,5 +12,10 @@ #define SEMU_FEATUREVIRTIONET 1 #endif +/* virtio-gpu */ +#ifndef SEMU_FEATUREVIRTIOGPU +#define SEMU_FEATUREVIRTIOGPU 1 +#endif + /* Feature test macro */ #define SEMU_HAS(x) SEMU_FEATURE_##x diff --git a/list.h b/list.h new file mode 100644 index 0000000..ed7663c --- /dev/null +++ b/list.h @@ -0,0 +1,84 @@ +#pragma once + +#include + +#define container_of(ptr, type, member) \ + ((type *) ((void *) ptr - offsetof(type, member))) + +#define list_entry(ptr, type, member) container_of(ptr, type, member) + +#define list_first_entry(ptr, type, member) \ + list_entry((ptr)->next, type, member) + +#define list_prev_entry(pos, member) \ + list_entry((pos)->member.prev, typeof(*(pos)), member) + +#define list_next_entry(pos, member) \ + list_entry((pos)->member.next, typeof(*(pos)), member) + +#define list_entry_is_head(pos, head, member) (&pos->member == (head)) + +#define list_for_each(pos, head) \ + for ((pos) = (head)->next; (pos) != (head); (pos) = (pos)->next) + +#define list_for_each_safe(pos, _next, head) \ + for (pos = (head)->next, _next = (pos)->next; (pos) != (head); \ + (pos) = _next, _next = (pos)->next) + +#define list_for_each_entry(pos, head, member) \ + for (pos = list_first_entry(head, __typeof__(*pos), member); \ + &pos->member != (head); pos = list_next_entry(pos, member)) + +#define LIST_HEAD_INIT(name) \ + { \ + .prev = (&name), .next = (&name) \ + } + +#define LIST_HEAD(name) struct list_head name = LIST_HEAD_INIT(name) + +struct list_head { + struct list_head *next, *prev; +}; + +static inline void INIT_LIST_HEAD(struct list_head *list) +{ + list->prev = list; + list->next = list; +} + +static inline int list_empty(const struct list_head *head) +{ + return head->next == head; +} + +static int list_is_last(const struct list_head *list, + const struct list_head *head) +{ + return list->next == head; +} + +static inline void list_add(struct list_head *new, struct list_head *list) +{ + new->prev = list->prev; + new->next = list; + list->prev->next = new; + list->prev = new; +} + +static inline void list_del(struct list_head *list) +{ + list->next->prev = list->prev; + list->prev->next = list->next; +} + +static void list_del_init(struct list_head *entry) +{ + list_del(entry); + INIT_LIST_HEAD(entry); +} + +static inline void list_move(struct list_head *list, struct list_head *new_head) +{ + list_del(list); + list_add(new_head, list); +} diff --git a/main.c b/main.c index ba1b6d0..5613eb4 100644 --- a/main.c +++ b/main.c @@ -1,6 +1,7 @@ #include #include #include +#include #include #include #include @@ -11,6 +12,7 @@ #include "device.h" #include "riscv.h" #include "riscv_private.h" +#include "window.h" #define PRIV(x) ((emu_state_t *) x->priv) @@ -72,6 +74,18 @@ static void emu_update_vblk_interrupts(vm_t *vm) } #endif +#if SEMU_HAS(VIRTIOGPU) +static void emu_update_vgpu_interrupts(vm_t *vm) +{ + emu_state_t *data = (emu_state_t *) vm->priv; + if (data->vgpu.InterruptStatus) + data->plic.active |= IRQ_VGPU_BIT; + else + data->plic.active &= ~IRQ_VGPU_BIT; + plic_update_interrupts(vm, &data->plic); +} +#endif + static void mem_load(vm_t *vm, uint32_t addr, uint8_t width, uint32_t *value) { emu_state_t *data = PRIV(vm); @@ -104,6 +118,12 @@ static void mem_load(vm_t *vm, uint32_t addr, uint8_t width, uint32_t *value) virtio_blk_read(vm, &data->vblk, addr & 0xFFFFF, width, value); emu_update_vblk_interrupts(vm); return; +#endif +#if SEMU_HAS(VIRTIOGPU) + case 0x43: /* virtio-gpu */ + virtio_gpu_read(vm, &data->vgpu, addr & 0xFFFFF, width, value); + emu_update_vgpu_interrupts(vm); + return; #endif } } @@ -142,6 +162,12 @@ static void mem_store(vm_t *vm, uint32_t addr, uint8_t width, uint32_t value) virtio_blk_write(vm, &data->vblk, addr & 0xFFFFF, width, value); emu_update_vblk_interrupts(vm); return; +#endif +#if SEMU_HAS(VIRTIOGPU) + case 0x43: /* virtio-gpu */ + virtio_gpu_write(vm, &data->vgpu, addr & 0xFFFFF, width, value); + emu_update_vgpu_interrupts(vm); + return; #endif } } @@ -423,6 +449,12 @@ static int semu_start(int argc, char **argv) emu.vblk.ram = emu.ram; emu.disk = virtio_blk_init(&(emu.vblk), disk_file); #endif +#if SEMU_HAS(VIRTIOGPU) + emu.vgpu.ram = emu.ram; + virtio_gpu_init(&(emu.vgpu)); + virtio_gpu_add_scanout(&(emu.vgpu), 1024, 768); + window_init(); +#endif /* Emulate */ uint32_t peripheral_update_ctr = 0; @@ -444,6 +476,11 @@ static int semu_start(int argc, char **argv) if (emu.vblk.InterruptStatus) emu_update_vblk_interrupts(&vm); #endif + +#if SEMU_HAS(VIRTIOGPU) + if (emu.vgpu.InterruptStatus) + emu_update_vgpu_interrupts(&vm); +#endif } if (vm.insn_count > emu.timer) diff --git a/minimal.dts b/minimal.dts index 0e631d3..3a80113 100644 --- a/minimal.dts +++ b/minimal.dts @@ -12,7 +12,8 @@ chosen { bootargs = "earlycon console=ttyS0"; stdout-path = "serial0"; - linux,initrd-start = <0x1f700000>; /* @403 MiB (503 * 1024 * 1024) */ + /* Reserve 65MiB for initrd image */ + linux,initrd-start = <0x1be00000>; /* @406 MiB (446 * 1024 * 1024) */ linux,initrd-end = <0x1fefffff>; /* @511 MiB (511 * 1024 * 1024 - 1) */ }; @@ -81,5 +82,13 @@ interrupts = <3>; }; #endif + +#if SEMU_FEATURE_VIRTIOGPU + gpu0: virtio@4300000 { + compatible = "virtio,mmio"; + reg = <0x4300000 0x200>; + interrupts = <4>; + }; +#endif }; }; diff --git a/virtio-gpu.c b/virtio-gpu.c new file mode 100644 index 0000000..a5fb533 --- /dev/null +++ b/virtio-gpu.c @@ -0,0 +1,1106 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common.h" +#include "device.h" +#include "list.h" +#include "riscv.h" +#include "riscv_private.h" +#include "virtio.h" +#include "window.h" + +#define VIRTIO_F_VERSION_1 1 + +#define VIRTIO_GPU_EVENT_DISPLAY (1 << 0) +#define VIRTIO_GPU_F_EDID (1 << 1) +#define VIRTIO_GPU_FLAG_FENCE (1 << 0) + +#define VGPU_QUEUE_NUM_MAX 1024 +#define VGPU_QUEUE (vgpu->queues[vgpu->QueueSel]) + +#define PRIV(x) ((struct vgpu_scanout_info *) x->priv) + +#define STRIDE_SIZE 4096 + +struct vgpu_scanout_info { + uint32_t width; + uint32_t height; + uint32_t enabled; +}; + +struct vgpu_resource_2d { + /* Public: */ + uint32_t scanout_id; + uint32_t format; + uint32_t width; + uint32_t height; + uint32_t stride; + uint32_t bits_per_pixel; + uint32_t *image; + /* Private: */ + uint32_t resource_id; + size_t page_cnt; + struct iovec *iovec; + struct list_head list; +}; + +struct vgpu_config { + uint32_t events_read; + uint32_t events_clear; + uint32_t num_scanouts; + uint32_t num_capsets; +} __attribute__((packed)); + +struct vgpu_ctrl_hdr { + uint32_t type; + uint32_t flags; + uint64_t fence_id; + uint32_t ctx_id; + uint8_t ring_idx; + uint8_t padding[3]; +} __attribute__((packed)); + +struct vgpu_rect { + uint32_t x; + uint32_t y; + uint32_t width; + uint32_t height; +} __attribute__((packed)); + +struct vgpu_resp_disp_info { + struct vgpu_ctrl_hdr hdr; + struct virtio_gpu_display_one { + struct vgpu_rect r; + uint32_t enabled; + uint32_t flags; + } pmodes[VIRTIO_GPU_MAX_SCANOUTS]; +} __attribute__((packed)); + +struct vgpu_res_create_2d { + struct vgpu_ctrl_hdr hdr; + uint32_t resource_id; + uint32_t format; + uint32_t width; + uint32_t height; +} __attribute__((packed)); + +struct vgpu_res_unref { + struct vgpu_ctrl_hdr hdr; + uint32_t resource_id; + uint32_t padding; +} __attribute__((packed)); + +struct vgpu_set_scanout { + struct vgpu_ctrl_hdr hdr; + struct vgpu_rect r; + uint32_t scanout_id; + uint32_t resource_id; +} __attribute__((packed)); + +struct vgpu_res_flush { + struct vgpu_ctrl_hdr hdr; + struct vgpu_rect r; + uint32_t resource_id; + uint32_t padding; +} __attribute__((packed)); + +struct vgpu_trans_to_host_2d { + struct vgpu_ctrl_hdr hdr; + struct vgpu_rect r; + uint64_t offset; + uint32_t resource_id; + uint32_t padding; +} __attribute__((packed)); + +struct vgpu_res_attach_backing { + struct vgpu_ctrl_hdr hdr; + uint32_t resource_id; + uint32_t nr_entries; +} __attribute__((packed)); + +struct vgpu_mem_entry { + uint64_t addr; + uint32_t length; + uint32_t padding; +} __attribute__((packed)); + +struct vgpu_resp_edid { + struct vgpu_ctrl_hdr hdr; + uint32_t size; + uint32_t padding; + char edid[1024]; +} __attribute__((packed)); + +struct vgpu_get_capset_info { + struct vgpu_ctrl_hdr hdr; + uint32_t capset_index; + uint32_t padding; +} __attribute__((packed)); + +struct vgpu_resp_capset_info { + struct vgpu_ctrl_hdr hdr; + uint32_t capset_id; + uint32_t capset_max_version; + uint32_t capset_max_size; + uint32_t padding; +} __attribute__((packed)); + +static struct vgpu_config vgpu_configs; +static LIST_HEAD(vgpu_res_2d_list); + +static inline void *vgpu_mem_host_to_guest(virtio_gpu_state_t *vgpu, + uint32_t addr) +{ + return (void *) ((uintptr_t) vgpu->ram + addr); +} + +static struct vgpu_resource_2d *create_vgpu_resource_2d(int resource_id) +{ + struct vgpu_resource_2d *res = malloc(sizeof(struct vgpu_resource_2d)); + if (!res) + return NULL; + + res->resource_id = resource_id; + list_add(&res->list, &vgpu_res_2d_list); + return res; +} + +static struct vgpu_resource_2d *acquire_vgpu_resource_2d(uint32_t resource_id) +{ + struct vgpu_resource_2d *res_2d; + list_for_each_entry (res_2d, &vgpu_res_2d_list, list) { + if (res_2d->resource_id == resource_id) + return res_2d; + } + + return NULL; +} + +static int destroy_vgpu_resource_2d(uint32_t resource_id) +{ + struct vgpu_resource_2d *res_2d = acquire_vgpu_resource_2d(resource_id); + + /* Failed to find the resource */ + if (!res_2d) + return -1; + + window_lock(resource_id); + + /* Release the resource */ + free(res_2d->image); + list_del(&res_2d->list); + free(res_2d->iovec); + free(res_2d); + + window_unlock(resource_id); + + return 0; +} + +static void virtio_gpu_set_fail(virtio_gpu_state_t *vgpu) +{ + vgpu->Status |= VIRTIO_STATUS__DEVICE_NEEDS_RESET; + if (vgpu->Status & VIRTIO_STATUS__DRIVER_OK) + vgpu->InterruptStatus |= VIRTIO_INT__CONF_CHANGE; +} + +static inline uint32_t vgpu_preprocess(virtio_gpu_state_t *vgpu, uint32_t addr) +{ + if ((addr >= RAM_SIZE) || (addr & 0b11)) + return virtio_gpu_set_fail(vgpu), 0; + + return addr >> 2; +} + +static void virtio_gpu_update_status(virtio_gpu_state_t *vgpu, uint32_t status) +{ + vgpu->Status |= status; + if (status) + return; + + /* Reset */ + uint32_t *ram = vgpu->ram; + void *priv = vgpu->priv; + uint32_t scanout_num = vgpu_configs.num_scanouts; + memset(vgpu->priv, 0, sizeof(*vgpu->priv)); + memset(vgpu, 0, sizeof(*vgpu)); + vgpu->ram = ram; + vgpu->priv = priv; + vgpu_configs.num_scanouts = scanout_num; + + /* Release all 2D resources */ + struct list_head *curr, *next; + list_for_each_safe (curr, next, &vgpu_res_2d_list) { + struct vgpu_resource_2d *res_2d = + list_entry(curr, struct vgpu_resource_2d, list); + + list_del(&res_2d->list); + free(res_2d->image); + free(res_2d->iovec); + free(res_2d); + } +} + +static void virtio_gpu_get_display_info_handler(virtio_gpu_state_t *vgpu, + struct virtq_desc *vq_desc, + uint32_t *plen) +{ + /* Write display infomation */ + struct vgpu_resp_disp_info *response = + vgpu_mem_host_to_guest(vgpu, vq_desc[1].addr); + + memset(response, 0, sizeof(*response)); + response->hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO; + + int scanout_num = vgpu_configs.num_scanouts; + for (int i = 0; i < scanout_num; i++) { + response->pmodes[i].r.width = PRIV(vgpu)[i].width; + response->pmodes[i].r.height = PRIV(vgpu)[i].height; + response->pmodes[i].enabled = PRIV(vgpu)[i].enabled; + } + + /* Update write length */ + *plen = sizeof(*response); +} + +static void virtio_gpu_resource_create_2d_handler(virtio_gpu_state_t *vgpu, + struct virtq_desc *vq_desc, + uint32_t *plen) +{ + /* Read request */ + struct vgpu_res_create_2d *request = + vgpu_mem_host_to_guest(vgpu, vq_desc[0].addr); + + /* Create 2D resource */ + struct vgpu_resource_2d *res_2d = + create_vgpu_resource_2d(request->resource_id); + + if (!res_2d) { + fprintf(stderr, "%s(): Failed to allocate 2D resource\n", __func__); + virtio_gpu_set_fail(vgpu); + return; + } + + /* Select image formats */ + uint32_t bits_per_pixel; + + switch (request->format) { + case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM: + bits_per_pixel = 32; + break; + case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM: + bits_per_pixel = 32; + break; + case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM: + bits_per_pixel = 32; + break; + case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM: + bits_per_pixel = 32; + break; + case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM: + bits_per_pixel = 32; + break; + case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM: + bits_per_pixel = 32; + break; + case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM: + bits_per_pixel = 32; + break; + case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM: + bits_per_pixel = 32; + break; + default: + fprintf(stderr, "%s(): Unsupported format %d\n", __func__, + request->format); + virtio_gpu_set_fail(vgpu); + return; + } + + uint32_t bytes_per_pixel = bits_per_pixel / 8; + + /* Set 2D resource */ + res_2d->width = request->width; + res_2d->height = request->height; + res_2d->format = request->format; + res_2d->bits_per_pixel = bits_per_pixel; + res_2d->stride = STRIDE_SIZE; + res_2d->image = malloc(bytes_per_pixel * (request->width + res_2d->stride) * + request->height); + + /* Failed to create image buffer */ + if (!res_2d->image) { + fprintf(stderr, "%s(): Failed to allocate image buffer\n", __func__); + virtio_gpu_set_fail(vgpu); + return; + } + + /* Write response */ + struct vgpu_ctrl_hdr *response = + vgpu_mem_host_to_guest(vgpu, vq_desc[1].addr); + + memset(response, 0, sizeof(*response)); + response->type = VIRTIO_GPU_RESP_OK_NODATA; + + /* Return write length */ + *plen = sizeof(*response); +} + +static void virtio_gpu_cmd_resource_unref_handler(virtio_gpu_state_t *vgpu, + struct virtq_desc *vq_desc, + uint32_t *plen) +{ + /* Read request */ + struct vgpu_res_unref *request = + vgpu_mem_host_to_guest(vgpu, vq_desc[0].addr); + + /* Destroy 2D resource */ + int result = destroy_vgpu_resource_2d(request->resource_id); + + if (result != 0) { + fprintf(stderr, "%s(): Failed to destroy 2D resource\n", __func__); + virtio_gpu_set_fail(vgpu); + return; + } + + /* Write response */ + struct vgpu_ctrl_hdr *response = + vgpu_mem_host_to_guest(vgpu, vq_desc[1].addr); + + memset(response, 0, sizeof(struct vgpu_ctrl_hdr)); + response->type = VIRTIO_GPU_RESP_OK_NODATA; + + /* Return write length */ + *plen = sizeof(*response); +} + +static uint8_t virtio_gpu_generate_edid_checksum(uint8_t *edid, size_t size) +{ + uint8_t sum = 0; + + for (size_t i = 0; i < size; i++) + sum += edid[i]; + + return 0x100 - sum; +} + +static void virtio_gpu_generate_edid(uint8_t *edid, int width_cm, int height_cm) +{ + /* Check: + * "VESA ENHANCED EXTENDED DISPLAY IDENTIFICATION DATA STANDARD" + * (Defines EDID Structure Version 1, Revision 4) + */ + + memset(edid, 0, 128); + + /* EDID header */ + edid[0] = 0x00; + edid[1] = 0xff; + edid[2] = 0xff; + edid[3] = 0xff; + edid[4] = 0xff; + edid[5] = 0xff; + edid[6] = 0xff; + edid[7] = 0x00; + + /* ISA (Industry Standard Architecture) + * Plug and Play Device Identifier (PNPID) */ + char manufacture[3] = {'T', 'W', 'N'}; + + /* Vendor ID uses 2 bytes to store 3 characters, where 'A' starts as 1 */ + uint16_t vendor_id = ((((manufacture[0] - '@') & 0b11111) << 10) | + (((manufacture[1] - '@') & 0b11111) << 5) | + (((manufacture[2] - '@') & 0b11111) << 0)); + /* Convert vendor ID to big-endian order */ + edid[8] = vendor_id >> 8; + edid[9] = vendor_id && 0xff; + + /* Product code (all zeros if unused) */ + memset(&edid[10], 0, 6); + + /* Week of manufacture (1-54) */ + edid[16] = 0; + /* Year of manufacture (starts from 1990) */ + edid[17] = 2023 - 1990; + + /* EDID 1.4 (Version 1, Revision 4) */ + edid[18] = 1; /* Version number */ + edid[19] = 4; /* Revision number */ + + /* Video input definition */ + uint8_t signal_interface = 0b1 << 7; /* digital */ + uint8_t color_bit_depth = 0b010 << 4; /* 8 bits per primary color */ + uint8_t interface_type = 0b101; /* DisplayPort is supported */ + edid[20] = signal_interface | color_bit_depth | interface_type; + + /* Screen size or aspect ratio */ + edid[21] = width_cm; /* Horizontal screen size (1cm - 255cm) */ + edid[22] = height_cm; /* Vertical screen size (1cm - 255cm) */ + + /* Gamma value */ + edid[23] = 1; /* Assigned with the minimum value */ + + /* Feature support */ + uint8_t power_management = 0 << 4; /* standby, suspend and active-off + * modes are not supported */ + uint8_t color_type = 0 << 2; /* ignored as it is for the analog display */ + uint8_t other_flags = 0b110; /* [2]: sRGB as default color space + * [1]: Prefered timing mode with native format + * [0]: Non-continuys frequency */ + edid[24] = power_management | color_type | other_flags; + + /* Established timmings: These are the default timmings defined by the + * VESA. Each bit represents 1 configuration. For now, we enable the + * timming configurations of 1024x768@60Hz only */ + edid[35] = 0b00000000; + edid[36] = 0b00001000; + edid[37] = 0b00000000; + + /* Standard timmings: 16 bytes data start from edid[38] to edid[54] as + * additional timming configurations with 2 bytes for each to define + * the horizontal pixel number, aspect ratio, and refresh rate. */ + + /* Extension block count number */ + edid[126] = 0; /* No other extension blocks are defined */ + + /* Checksum of the first (and the only) extension block */ + edid[127] = virtio_gpu_generate_edid_checksum(edid, 127); +} + +static void virtio_gpu_get_edid_handler(virtio_gpu_state_t *vgpu, + struct virtq_desc *vq_desc, + uint32_t *plen) +{ + /* Generate the display EDID */ + struct vgpu_resp_edid edid = { + .hdr = {.type = VIRTIO_GPU_RESP_OK_EDID}, + .size = 128 /* One EDID extension block only */ + }; + virtio_gpu_generate_edid((uint8_t *) edid.edid, 0, 0); + + /* Write EDID response */ + struct vgpu_resp_edid *response = + vgpu_mem_host_to_guest(vgpu, vq_desc[1].addr); + memcpy(response, &edid, sizeof(*response)); + + /* return write length */ + *plen = sizeof(*response); +} + +static void virtio_gpu_get_capset_handler(virtio_gpu_state_t *vgpu, + struct virtq_desc *vq_desc, + uint32_t *plen) +{ + /* Write capability set */ + struct vgpu_resp_capset_info *response = + vgpu_mem_host_to_guest(vgpu, vq_desc[1].addr); + + memset(response, 0, sizeof(*response)); + response->hdr.type = VIRTIO_GPU_RESP_OK_CAPSET_INFO; + response->capset_id = VIRTIO_GPU_CAPSET_VIRGL2; + response->capset_max_version = 1; + response->capset_max_size = 0; + + /* Update write length */ + *plen = sizeof(*response); +} + +static void virtio_gpu_cmd_set_scanout_handler(virtio_gpu_state_t *vgpu, + struct virtq_desc *vq_desc, + uint32_t *plen) +{ + /* Read request */ + struct vgpu_set_scanout *request = + vgpu_mem_host_to_guest(vgpu, vq_desc[0].addr); + + /* Acquire 2D resource */ + struct vgpu_resource_2d *res_2d = + acquire_vgpu_resource_2d(request->resource_id); + + /* Linux's virtio-gpu driver may send scanout command + * even if the resource does not exist */ + if (res_2d) + /* Set scanout ID to proper 2D resource */ + res_2d->scanout_id = request->scanout_id; + + /* Write response */ + struct vgpu_ctrl_hdr *response = + vgpu_mem_host_to_guest(vgpu, vq_desc[1].addr); + + memset(response, 0, sizeof(*response)); + response->type = VIRTIO_GPU_RESP_OK_NODATA; + + /* Return write length */ + *plen = sizeof(*response); +} + +static void virtio_gpu_cmd_resource_flush_handler(virtio_gpu_state_t *vgpu, + struct virtq_desc *vq_desc, + uint32_t *plen) +{ + /* Read request */ + struct vgpu_res_flush *request = + vgpu_mem_host_to_guest(vgpu, vq_desc[0].addr); + + /* Acquire 2D resource */ + struct vgpu_resource_2d *res_2d = + acquire_vgpu_resource_2d(request->resource_id); + + /* Trigger display window rendering */ + window_render((struct gpu_resource *) res_2d); + + /* Write response */ + struct vgpu_ctrl_hdr *response = + vgpu_mem_host_to_guest(vgpu, vq_desc[1].addr); + + memset(response, 0, sizeof(*response)); + response->type = VIRTIO_GPU_RESP_OK_NODATA; + + /* Return write length */ + *plen = sizeof(*response); +} + +static void virtio_gpu_cmd_transfer_to_host_2d_handler( + virtio_gpu_state_t *vgpu, + struct virtq_desc *vq_desc, + uint32_t *plen) +{ + /* Read request */ + struct vgpu_trans_to_host_2d *req = + vgpu_mem_host_to_guest(vgpu, vq_desc[0].addr); + + /* Acquire 2D resource */ + struct vgpu_resource_2d *res_2d = + acquire_vgpu_resource_2d(req->resource_id); + + if (!res_2d) { + fprintf(stderr, "%s(): Failed to find 2D resource\n", __func__); + virtio_gpu_set_fail(vgpu); + return; + } + + /* Check image boundary */ + if (req->r.x > res_2d->width || req->r.y > res_2d->height || + req->r.width > res_2d->width || req->r.height > res_2d->height || + req->r.x + req->r.width > res_2d->width || + req->r.y + req->r.height > res_2d->height) { + fprintf(stderr, "%s(): Invalid image size\n", __func__); + virtio_gpu_set_fail(vgpu); + return; + } + + /* Transfer frame data from guest to host */ + uint32_t stride = res_2d->stride; + uint32_t bpp = res_2d->bits_per_pixel / 8; /* Bytes per pixel */ + uint32_t width = + (req->r.width < res_2d->width) ? req->r.width : res_2d->width; + uint32_t height = + (req->r.height < res_2d->height) ? req->r.height : res_2d->height; + void *img_data = (void *) res_2d->image; + + for (uint32_t h = 0; h < height; h++) { + size_t src_offset = req->offset + stride * h; + size_t dest_offset = (req->r.y + h) * stride + (req->r.x * bpp); + void *dest = (void *) ((uintptr_t) img_data + dest_offset); + size_t done = 0; + size_t total = width * bpp; + + for (uint32_t i = 0; i < res_2d->page_cnt; i++) { + /* Skip empty pages */ + if (res_2d->iovec[i].iov_base == 0 || res_2d->iovec[i].iov_len == 0) + continue; + + if (src_offset < res_2d->iovec[i].iov_len) { + /* Source offset is in the image coordinate. The address to + * copy from is the page base address plus with the offset + */ + void *src = (void *) ((uintptr_t) res_2d->iovec[i].iov_base + + src_offset); + + /* Take as much as data of current page can provide */ + size_t remained = total - done; + size_t page_avail = res_2d->iovec[i].iov_len - src_offset; + size_t nbytes = (remained < page_avail) ? remained : page_avail; + + /* Copy to 2D resource buffer */ + memcpy((void *) ((uintptr_t) dest + done), src, nbytes); + + /* If there is still data left to read, but current page is + * exhausted, we need to read from the beginning of the next + * page, where its offset should be 0 */ + src_offset = 0; + + /* Count the total received bytes so far */ + done += nbytes; + + /* Data transfering of current scanline is complete */ + if (done >= total) + break; + } else { + /* Keep substracting until reaching the page */ + src_offset -= res_2d->iovec[i].iov_len; + } + } + } + + /* Write response */ + struct vgpu_ctrl_hdr *response = + vgpu_mem_host_to_guest(vgpu, vq_desc[1].addr); + + struct vgpu_ctrl_hdr res_no_data = {.type = VIRTIO_GPU_RESP_OK_NODATA}; + memcpy(response, &res_no_data, sizeof(struct vgpu_ctrl_hdr)); + + /* Update write length */ + *plen = sizeof(*response); +} + +static void virtio_gpu_cmd_resource_attach_backing_handler( + virtio_gpu_state_t *vgpu, + struct virtq_desc *vq_desc, + uint32_t *plen) +{ + /* Read request */ + struct vgpu_res_attach_backing *backing_info = + vgpu_mem_host_to_guest(vgpu, vq_desc[0].addr); + struct vgpu_mem_entry *pages = + vgpu_mem_host_to_guest(vgpu, vq_desc[1].addr); + + /* Acquire 2D resource */ + struct vgpu_resource_2d *res_2d = + acquire_vgpu_resource_2d(backing_info->resource_id); + + /* Dispatch page memories to the 2D resource */ + res_2d->page_cnt = backing_info->nr_entries; + res_2d->iovec = malloc(sizeof(struct iovec) * backing_info->nr_entries); + struct vgpu_mem_entry *mem_entries = (struct vgpu_mem_entry *) pages; + + for (size_t i = 0; i < backing_info->nr_entries; i++) { + /* Attach address and length of i-th page to the 2D resource */ + res_2d->iovec[i].iov_base = + vgpu_mem_host_to_guest(vgpu, mem_entries[i].addr); + res_2d->iovec[i].iov_len = mem_entries[i].length; + + /* Corrupted page address */ + if (!res_2d->iovec[i].iov_base) { + fprintf(stderr, "%s(): Invalid page address\n", __func__); + virtio_gpu_set_fail(vgpu); + return; + } + } + + /* Write response */ + struct vgpu_ctrl_hdr *response = + vgpu_mem_host_to_guest(vgpu, vq_desc[2].addr); + + memset(response, 0, sizeof(*response)); + response->type = VIRTIO_GPU_RESP_OK_NODATA; + + /* Return write length */ + *plen = sizeof(*response); +} + +static void virtio_gpu_cmd_update_cursor_handler(virtio_gpu_state_t *vgpu, + struct virtq_desc *vq_desc, + uint32_t *plen) +{ + /* Write response */ + struct vgpu_ctrl_hdr *response = + vgpu_mem_host_to_guest(vgpu, vq_desc[1].addr); + + memset(response, 0, sizeof(*response)); + response->type = VIRTIO_GPU_RESP_OK_NODATA; + + /* Return write length */ + *plen = sizeof(*response); +} + +static void virtio_gpu_cmd_move_cursor_handler(virtio_gpu_state_t *vgpu, + struct virtq_desc *vq_desc, + uint32_t *plen) +{ + /* Write response */ + struct vgpu_ctrl_hdr *response = + vgpu_mem_host_to_guest(vgpu, vq_desc[1].addr); + + memset(response, 0, sizeof(*response)); + response->type = VIRTIO_GPU_RESP_OK_NODATA; + + /* Return write length */ + *plen = sizeof(*response); +} + +static int virtio_gpu_desc_handler(virtio_gpu_state_t *vgpu, + const virtio_gpu_queue_t *queue, + uint32_t desc_idx, + uint32_t *plen) +{ + /* virtio-gpu uses 3 virtqueue descriptors at most */ + struct virtq_desc vq_desc[3]; + + /* Collect descriptors */ + for (int i = 0; i < 3; i++) { + /* The size of the `struct virtq_desc` is 4 words */ + uint32_t *desc = &vgpu->ram[queue->QueueDesc + desc_idx * 4]; + + /* Retrieve the fields of current descriptor */ + vq_desc[i].addr = desc[0]; + vq_desc[i].len = desc[2]; + vq_desc[i].flags = desc[3]; + desc_idx = desc[3] >> 16; /* vq_desc[desc_cnt].next */ + + /* Leave the loop if next-flag is not set */ + if (!(vq_desc[i].flags & VIRTIO_DESC_F_NEXT)) + break; + } + + /* Process the header */ + struct vgpu_ctrl_hdr *header = + vgpu_mem_host_to_guest(vgpu, vq_desc[0].addr); + + /* Process the command */ + switch (header->type) { + case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: + virtio_gpu_get_display_info_handler(vgpu, vq_desc, plen); + break; + case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: + virtio_gpu_resource_create_2d_handler(vgpu, vq_desc, plen); + break; + case VIRTIO_GPU_CMD_RESOURCE_UNREF: + virtio_gpu_cmd_resource_unref_handler(vgpu, vq_desc, plen); + break; + case VIRTIO_GPU_CMD_GET_EDID: + virtio_gpu_get_edid_handler(vgpu, vq_desc, plen); + break; + case VIRTIO_GPU_CMD_GET_CAPSET_INFO: + virtio_gpu_get_capset_handler(vgpu, vq_desc, plen); + break; + case VIRTIO_GPU_CMD_SET_SCANOUT: + virtio_gpu_cmd_set_scanout_handler(vgpu, vq_desc, plen); + break; + case VIRTIO_GPU_CMD_RESOURCE_FLUSH: + virtio_gpu_cmd_resource_flush_handler(vgpu, vq_desc, plen); + break; + case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: + virtio_gpu_cmd_transfer_to_host_2d_handler(vgpu, vq_desc, plen); + break; + case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: + virtio_gpu_cmd_resource_attach_backing_handler(vgpu, vq_desc, plen); + break; + case VIRTIO_GPU_CMD_UPDATE_CURSOR: + virtio_gpu_cmd_update_cursor_handler(vgpu, vq_desc, plen); + break; + case VIRTIO_GPU_CMD_MOVE_CURSOR: + virtio_gpu_cmd_move_cursor_handler(vgpu, vq_desc, plen); + break; + default: + fprintf(stderr, "%s(): unknown command %d\n", __func__, header->type); + virtio_gpu_set_fail(vgpu); + *plen = 0; + return -1; + } + + return 0; +} + +static void virtio_queue_notify_handler(virtio_gpu_state_t *vgpu, int index) +{ + uint32_t *ram = vgpu->ram; + virtio_gpu_queue_t *queue = &vgpu->queues[index]; + if (vgpu->Status & VIRTIO_STATUS__DEVICE_NEEDS_RESET) + return; + + if (!((vgpu->Status & VIRTIO_STATUS__DRIVER_OK) && queue->ready)) + return virtio_gpu_set_fail(vgpu); + + /* Check for new buffers */ + uint16_t new_avail = ram[queue->QueueAvail] >> 16; + if (new_avail - queue->last_avail > (uint16_t) queue->QueueNum) + return (fprintf(stderr, "%s(): size check failed\n", __func__), + virtio_gpu_set_fail(vgpu)); + + if (queue->last_avail == new_avail) + return; + + /* Process them */ + uint16_t new_used = ram[queue->QueueUsed] >> 16; /* virtq_used.idx (le16) */ + while (queue->last_avail != new_avail) { + /* Obtain the index in the ring buffer */ + uint16_t queue_idx = queue->last_avail % queue->QueueNum; + + /* Since each buffer index occupies 2 bytes but the memory is aligned + * with 4 bytes, and the first element of the available queue is stored + * at ram[queue->QueueAvail + 1], to acquire the buffer index, it + * requires the following array index calculation and bit shifting. + * Check also the `struct virtq_avail` on the spec. + */ + uint16_t buffer_idx = ram[queue->QueueAvail + 1 + queue_idx / 2] >> + (16 * (queue_idx % 2)); + + /* Consume request from the available queue and process the data in the + * descriptor list. + */ + uint32_t len = 0; + int result = virtio_gpu_desc_handler(vgpu, queue, buffer_idx, &len); + if (result != 0) + return virtio_gpu_set_fail(vgpu); + + /* Write used element information (`struct virtq_used_elem`) to the used + * queue */ + uint32_t vq_used_addr = + queue->QueueUsed + 1 + (new_used % queue->QueueNum) * 2; + ram[vq_used_addr] = buffer_idx; /* virtq_used_elem.id (le32) */ + ram[vq_used_addr + 1] = len; /* virtq_used_elem.len (le32) */ + queue->last_avail++; + new_used++; + } + + /* Check le32 len field of `struct virtq_used_elem` on the spec */ + vgpu->ram[queue->QueueUsed] &= MASK(16); /* Reset low 16 bits to zero */ + vgpu->ram[queue->QueueUsed] |= ((uint32_t) new_used) << 16; /* len */ + + /* Send interrupt, unless VIRTQ_AVAIL_F_NO_INTERRUPT is set */ + if (!(ram[queue->QueueAvail] & 1)) + vgpu->InterruptStatus |= VIRTIO_INT__USED_RING; +} + +static bool virtio_gpu_reg_read(virtio_gpu_state_t *vgpu, + uint32_t addr, + uint32_t *value) +{ +#define _(reg) VIRTIO_##reg + switch (addr) { + case _(MagicValue): + *value = 0x74726976; + return true; + case _(Version): + *value = 2; + return true; + case _(DeviceID): + *value = 16; + return true; + case _(VendorID): + *value = VIRTIO_VENDOR_ID; + return true; + case _(DeviceFeatures): + if (vgpu->DeviceFeaturesSel) { /* [63:32] */ + *value = VIRTIO_F_VERSION_1; + } else { /* [31:0] */ + *value = VIRTIO_GPU_F_EDID; + } + return true; + case _(QueueNumMax): + *value = VGPU_QUEUE_NUM_MAX; + return true; + case _(QueueReady): + *value = VGPU_QUEUE.ready ? 1 : 0; + return true; + case _(InterruptStatus): + *value = vgpu->InterruptStatus; + return true; + case _(Status): + *value = vgpu->Status; + return true; + case _(SHMLenLow): + case _(SHMLenHigh): + /* shared memory is unimplemented */ + *value = -1; + return true; + case _(SHMBaseLow): + *value = 0; + return true; + case _(SHMBaseHigh): + *value = 0; + return true; + case _(ConfigGeneration): + *value = 0; + return true; + default: + /* Invalid address which exceeded the range */ + if (!RANGE_CHECK(addr, _(Config), sizeof(struct vgpu_config))) + return false; + + /* Read configuration from the corresponding register */ + uint32_t offset = (addr - _(Config)) << 2; + switch (offset) { + case offsetof(struct vgpu_config, events_read): { + *value = 0; /* No event is implemented currently */ + return true; + } + case offsetof(struct vgpu_config, num_scanouts): { + *value = vgpu_configs.num_scanouts; + return true; + } + case offsetof(struct vgpu_config, num_capsets): { + *value = 0; /* TODO: Add at least one capset to support VirGl */ + return true; + } + default: + return false; + } + } +#undef _ +} + +static bool virtio_gpu_reg_write(virtio_gpu_state_t *vgpu, + uint32_t addr, + uint32_t value) +{ +#define _(reg) VIRTIO_##reg + switch (addr) { + case _(DeviceFeaturesSel): + vgpu->DeviceFeaturesSel = value; + return true; + case _(DriverFeatures): + vgpu->DriverFeaturesSel == 0 ? (vgpu->DriverFeatures = value) : 0; + return true; + case _(DriverFeaturesSel): + vgpu->DriverFeaturesSel = value; + return true; + case _(QueueSel): + if (value < ARRAY_SIZE(vgpu->queues)) + vgpu->QueueSel = value; + else + virtio_gpu_set_fail(vgpu); + return true; + case _(QueueNum): + if (value > 0 && value <= VGPU_QUEUE_NUM_MAX) + VGPU_QUEUE.QueueNum = value; + else + virtio_gpu_set_fail(vgpu); + return true; + case _(QueueReady): + VGPU_QUEUE.ready = value & 1; + if (value & 1) + VGPU_QUEUE.last_avail = vgpu->ram[VGPU_QUEUE.QueueAvail] >> 16; + return true; + case _(QueueDescLow): + VGPU_QUEUE.QueueDesc = vgpu_preprocess(vgpu, value); + return true; + case _(QueueDescHigh): + if (value) + virtio_gpu_set_fail(vgpu); + return true; + case _(QueueDriverLow): + VGPU_QUEUE.QueueAvail = vgpu_preprocess(vgpu, value); + return true; + case _(QueueDriverHigh): + if (value) + virtio_gpu_set_fail(vgpu); + return true; + case _(QueueDeviceLow): + VGPU_QUEUE.QueueUsed = vgpu_preprocess(vgpu, value); + return true; + case _(QueueDeviceHigh): + if (value) + virtio_gpu_set_fail(vgpu); + return true; + case _(QueueNotify): + if (value < ARRAY_SIZE(vgpu->queues)) + virtio_queue_notify_handler(vgpu, value); + else + virtio_gpu_set_fail(vgpu); + return true; + case _(InterruptACK): + vgpu->InterruptStatus &= ~value; + return true; + case _(Status): + virtio_gpu_update_status(vgpu, value); + return true; + case _(SHMSel): + return true; + default: + /* Invalid address which exceeded the range */ + if (!RANGE_CHECK(addr, _(Config), sizeof(struct vgpu_config))) + return false; + + /* Write configuration to the corresponding register */ + uint32_t offset = (addr - _(Config)) << 2; + switch (offset) { + case offsetof(struct vgpu_config, events_clear): { + /* Ignored, no event is implemented currently */ + return true; + } + default: + return false; + } + } +#undef _ +} + +void virtio_gpu_read(vm_t *vm, + virtio_gpu_state_t *vgpu, + uint32_t addr, + uint8_t width, + uint32_t *value) +{ + switch (width) { + case RV_MEM_LW: + if (!virtio_gpu_reg_read(vgpu, addr >> 2, value)) + vm_set_exception(vm, RV_EXC_LOAD_FAULT, vm->exc_val); + break; + case RV_MEM_LBU: + case RV_MEM_LB: + case RV_MEM_LHU: + case RV_MEM_LH: + vm_set_exception(vm, RV_EXC_LOAD_MISALIGN, vm->exc_val); + return; + default: + vm_set_exception(vm, RV_EXC_ILLEGAL_INSN, 0); + return; + } +} + +void virtio_gpu_write(vm_t *vm, + virtio_gpu_state_t *vgpu, + uint32_t addr, + uint8_t width, + uint32_t value) +{ + switch (width) { + case RV_MEM_SW: + if (!virtio_gpu_reg_write(vgpu, addr >> 2, value)) + vm_set_exception(vm, RV_EXC_STORE_FAULT, vm->exc_val); + break; + case RV_MEM_SB: + case RV_MEM_SH: + vm_set_exception(vm, RV_EXC_STORE_MISALIGN, vm->exc_val); + return; + default: + vm_set_exception(vm, RV_EXC_ILLEGAL_INSN, 0); + return; + } +} + +void virtio_gpu_init(virtio_gpu_state_t *vgpu) +{ + vgpu->priv = + calloc(sizeof(struct vgpu_scanout_info), VIRTIO_GPU_MAX_SCANOUTS); +} + +void virtio_gpu_add_scanout(virtio_gpu_state_t *vgpu, + uint32_t width, + uint32_t height) +{ + int scanout_num = vgpu_configs.num_scanouts; + + if (scanout_num >= VIRTIO_GPU_MAX_SCANOUTS) { + fprintf(stderr, "%s(): exceeded scanout maximum number\n", __func__); + exit(2); + } + + PRIV(vgpu)[scanout_num].width = width; + PRIV(vgpu)[scanout_num].height = height; + PRIV(vgpu)[scanout_num].enabled = 1; + + window_add(width, height); + + vgpu_configs.num_scanouts++; +} diff --git a/virtio.h b/virtio.h index b893ef5..d9947b5 100644 --- a/virtio.h +++ b/virtio.h @@ -24,6 +24,16 @@ #define VIRTIO_BLK_S_IOERR 1 #define VIRTIO_BLK_S_UNSUPP 2 +#define VIRTIO_GPU_FLAG_FENCE (1 << 0) + +#define VIRTIO_GPU_MAX_SCANOUTS 16 + +#define VIRTIO_GPU_CAPSET_VIRGL 1 +#define VIRTIO_GPU_CAPSET_VIRGL2 2 +#define VIRTIO_GPU_CAPSET_GFXSTREAM 3 +#define VIRTIO_GPU_CAPSET_VENUS 4 +#define VIRTIO_GPU_CAPSET_CROSS_DOMAIN 5 + /* VirtIO MMIO registers */ #define VIRTIO_REG_LIST \ _(MagicValue, 0x000) /* R */ \ @@ -49,6 +59,12 @@ _(QueueDeviceLow, 0x0a0) /* W */ \ _(QueueDeviceHigh, 0x0a4) /* W */ \ _(ConfigGeneration, 0x0fc) /* R */ \ + _(SHMSel, 0x0ac) /* W */ \ + _(SHMLenLow, 0x0b0) /* R */ \ + _(SHMLenHigh, 0x0b4) /* R */ \ + _(SHMBaseLow, 0x0b8) /* R */ \ + _(SHMBaseHigh, 0x0bc) /* R */ \ + _(QueueReset, 0x0c0) /* RW */ \ _(Config, 0x100) /* RW */ enum { @@ -57,6 +73,51 @@ enum { #undef _ }; +enum virtio_gpu_ctrl_type { + /* 2d commands */ + VIRTIO_GPU_CMD_GET_DISPLAY_INFO = 0x0100, + VIRTIO_GPU_CMD_RESOURCE_CREATE_2D, + VIRTIO_GPU_CMD_RESOURCE_UNREF, + VIRTIO_GPU_CMD_SET_SCANOUT, + VIRTIO_GPU_CMD_RESOURCE_FLUSH, + VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D, + VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING, + VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING, + VIRTIO_GPU_CMD_GET_CAPSET_INFO, + VIRTIO_GPU_CMD_GET_CAPSET, + VIRTIO_GPU_CMD_GET_EDID, + + /* cursor commands */ + VIRTIO_GPU_CMD_UPDATE_CURSOR = 0x0300, + VIRTIO_GPU_CMD_MOVE_CURSOR, + + /* success responses */ + VIRTIO_GPU_RESP_OK_NODATA = 0x1100, + VIRTIO_GPU_RESP_OK_DISPLAY_INFO, + VIRTIO_GPU_RESP_OK_CAPSET_INFO, + VIRTIO_GPU_RESP_OK_CAPSET, + VIRTIO_GPU_RESP_OK_EDID, + + /* error responses */ + VIRTIO_GPU_RESP_ERR_UNSPEC = 0x1200, + VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY, + VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID, + VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID, + VIRTIO_GPU_RESP_ERR_INVALID_CONTEXT_ID, + VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER, +}; + +enum virtio_gpu_formats { + VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM = 1, + VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM = 2, + VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM = 3, + VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM = 4, + VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM = 67, + VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM = 68, + VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM = 121, + VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM = 134 +}; + struct virtq_desc { uint32_t addr; uint32_t len; diff --git a/window.c b/window.c new file mode 100644 index 0000000..a092716 --- /dev/null +++ b/window.c @@ -0,0 +1,159 @@ +#include +#include +#include +#include + +#include +#include + +#include "virtio.h" +#include "window.h" + +#define SDL_COND_TIMEOUT 1 /* ms */ + +struct display_info { + struct gpu_resource resource; + uint32_t sdl_format; + SDL_mutex *img_mtx; + SDL_cond *img_cond; + SDL_Thread *thread_id; + SDL_Window *window; + SDL_Renderer *renderer; + SDL_Surface *surface; + SDL_Texture *texture; +}; + +static struct display_info displays[VIRTIO_GPU_MAX_SCANOUTS]; +static int display_cnt; + +void window_add(uint32_t width, uint32_t height) +{ + displays[display_cnt].resource.width = width; + displays[display_cnt].resource.height = height; + display_cnt++; +} + +static int window_thread(void *data) +{ + struct display_info *display = (struct display_info *) data; + struct gpu_resource *resource = &display->resource; + + /* Create SDL window */ + display->window = SDL_CreateWindow("semu", SDL_WINDOWPOS_UNDEFINED, + SDL_WINDOWPOS_UNDEFINED, resource->width, + resource->height, SDL_WINDOW_SHOWN); + + if (!display->window) { + fprintf(stderr, "%s(): failed to create window\n", __func__); + exit(2); + } + + /* Create SDL render */ + display->renderer = + SDL_CreateRenderer(display->window, -1, SDL_RENDERER_ACCELERATED); + + if (!display->renderer) { + fprintf(stderr, "%s(): failed to create renderer\n", __func__); + exit(2); + } + + while (1) { + SDL_LockMutex(display->img_mtx); + + /* Wait until the image is arrived */ + while (SDL_CondWaitTimeout(display->img_cond, display->img_mtx, + SDL_COND_TIMEOUT)) { + /* Read event */ + SDL_Event e; + SDL_PollEvent(&e); // TODO: Handle events + } + + /* Render image */ + display->surface = SDL_CreateRGBSurfaceWithFormatFrom( + resource->image, resource->width, resource->height, + resource->bits_per_pixel, resource->stride, display->sdl_format); + display->texture = + SDL_CreateTextureFromSurface(display->renderer, display->surface); + SDL_RenderCopy(display->renderer, display->texture, NULL, NULL); + SDL_RenderPresent(display->renderer); + SDL_DestroyTexture(display->texture); + + SDL_UnlockMutex(display->img_mtx); + } +} + +void window_init(void) +{ + char thread_name[20] = {0}; + + if (SDL_Init(SDL_INIT_VIDEO) < 0) { + fprintf(stderr, "%s(): failed to initialize SDL\n", __func__); + exit(2); + } + + for (int i = 0; i < display_cnt; i++) { + displays[i].img_mtx = SDL_CreateMutex(); + displays[i].img_cond = SDL_CreateCond(); + + sprintf(thread_name, "sdl thread %d", i); + displays[i].thread_id = + SDL_CreateThread(window_thread, thread_name, (void *) &displays[i]); + SDL_DetachThread(displays[i].thread_id); + } +} + +void window_lock(uint32_t id) +{ + SDL_LockMutex(displays[id].img_mtx); +} + +void window_unlock(uint32_t id) +{ + SDL_UnlockMutex(displays[id].img_mtx); +} + +static bool virtio_gpu_to_sdl_format(uint32_t virtio_gpu_format, + uint32_t *sdl_format) +{ + switch (virtio_gpu_format) { + case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM: + *sdl_format = SDL_PIXELFORMAT_ARGB8888; + return true; + case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM: + *sdl_format = SDL_PIXELFORMAT_XRGB8888; + return true; + case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM: + *sdl_format = SDL_PIXELFORMAT_BGRA8888; + return true; + case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM: + *sdl_format = SDL_PIXELFORMAT_BGRX8888; + return true; + case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM: + *sdl_format = SDL_PIXELFORMAT_ABGR8888; + return true; + case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM: + *sdl_format = SDL_PIXELFORMAT_RGBX8888; + return true; + case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM: + *sdl_format = SDL_PIXELFORMAT_RGBA8888; + return true; + case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM: + *sdl_format = SDL_PIXELFORMAT_XBGR8888; + return true; + default: + return false; + } +} + +void window_render(struct gpu_resource *resource) +{ + int id = resource->scanout_id; + + /* Resource update */ + memcpy(&displays[id].resource, resource, sizeof(struct gpu_resource)); + bool legal_format = + virtio_gpu_to_sdl_format(resource->format, &displays[id].sdl_format); + + if (legal_format) + SDL_CondSignal(displays[id].img_cond); +} diff --git a/window.h b/window.h new file mode 100644 index 0000000..6e5efb4 --- /dev/null +++ b/window.h @@ -0,0 +1,22 @@ +#pragma once + +#if SEMU_HAS(VIRTIOGPU) +#include + +/* Public interface to the vgpu_resource_2d structure */ +struct gpu_resource { + uint32_t scanout_id; + uint32_t format; + uint32_t width; + uint32_t height; + uint32_t stride; + uint32_t bits_per_pixel; + uint32_t *image; +}; + +void window_init(void); +void window_add(uint32_t width, uint32_t height); +void window_render(struct gpu_resource *resource); +void window_lock(uint32_t id); +void window_unlock(uint32_t id); +#endif