[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH-for-8.0 5/5] accel/tcg: Restrict page_collection structure to sys
From: |
Philippe Mathieu-Daudé |
Subject: |
[PATCH-for-8.0 5/5] accel/tcg: Restrict page_collection structure to system TB maintainance |
Date: |
Fri, 9 Dec 2022 10:36:49 +0100 |
Only the system emulation part of TB maintainance uses the
page_collection structure. Restrict its declaration (and the
functions requiring it) to tb-maint.c.
Convert the 'len' argument of tb_invalidate_phys_page_locked_fast()
from signed to unsigned.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
---
accel/tcg/internal.h | 7 -------
accel/tcg/tb-maint.c | 12 ++++++------
2 files changed, 6 insertions(+), 13 deletions(-)
diff --git a/accel/tcg/internal.h b/accel/tcg/internal.h
index db078390b1..6edff16fb0 100644
--- a/accel/tcg/internal.h
+++ b/accel/tcg/internal.h
@@ -36,16 +36,9 @@ void page_table_config_init(void);
#endif
#ifdef CONFIG_SOFTMMU
-struct page_collection;
-void tb_invalidate_phys_page_locked_fast(struct page_collection *pages,
- tb_page_addr_t start, int len,
- uintptr_t retaddr);
-struct page_collection *page_collection_lock(tb_page_addr_t start,
- tb_page_addr_t end);
void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
unsigned size,
uintptr_t retaddr);
-void page_collection_unlock(struct page_collection *set);
G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
#endif /* CONFIG_SOFTMMU */
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
index 4dc2fa1060..10d7e4b7a8 100644
--- a/accel/tcg/tb-maint.c
+++ b/accel/tcg/tb-maint.c
@@ -523,8 +523,8 @@ static gint tb_page_addr_cmp(gconstpointer ap,
gconstpointer bp, gpointer udata)
* intersecting TBs.
* Locking order: acquire locks in ascending order of page index.
*/
-struct page_collection *
-page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
+static struct page_collection *page_collection_lock(tb_page_addr_t start,
+ tb_page_addr_t end)
{
struct page_collection *set = g_malloc(sizeof(*set));
tb_page_addr_t index;
@@ -568,7 +568,7 @@ page_collection_lock(tb_page_addr_t start, tb_page_addr_t
end)
return set;
}
-void page_collection_unlock(struct page_collection *set)
+static void page_collection_unlock(struct page_collection *set)
{
/* entries are unlocked and freed via page_entry_destroy */
g_tree_destroy(set->tree);
@@ -1196,9 +1196,9 @@ void tb_invalidate_phys_range(tb_page_addr_t start,
tb_page_addr_t end)
/*
* Call with all @pages in the range [@start, @start + len[ locked.
*/
-void tb_invalidate_phys_page_locked_fast(struct page_collection *pages,
- tb_page_addr_t start, int len,
- uintptr_t retaddr)
+static void tb_invalidate_phys_page_locked_fast(struct page_collection *pages,
+ tb_page_addr_t start,
+ unsigned len, uintptr_t
retaddr)
{
PageDesc *p;
--
2.38.1