[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH 05/14] ppc/xive2: Process group backlog when pushing an OS co
From: |
Nicholas Piggin |
Subject: |
Re: [PATCH 05/14] ppc/xive2: Process group backlog when pushing an OS context |
Date: |
Tue, 19 Nov 2024 14:20:25 +1000 |
On Wed Oct 16, 2024 at 7:13 AM AEST, Michael Kowal wrote:
> From: Frederic Barrat <fbarrat@linux.ibm.com>
>
> When pushing an OS context, we were already checking if there was a
> pending interrupt in the IPB and sending a notification if needed. We
> also need to check if there is a pending group interrupt stored in the
> NVG table. To avoid useless backlog scans, we only scan if the NVP
> belongs to a group.
>
> Signed-off-by: Frederic Barrat <fbarrat@linux.ibm.com>
> Signed-off-by: Michael Kowal <kowal@linux.ibm.com>
> ---
> hw/intc/xive2.c | 100 ++++++++++++++++++++++++++++++++++++++++++++++--
> 1 file changed, 97 insertions(+), 3 deletions(-)
>
> diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c
> index a6dc6d553f..7130892482 100644
> --- a/hw/intc/xive2.c
> +++ b/hw/intc/xive2.c
> @@ -279,6 +279,85 @@ static void xive2_end_enqueue(Xive2End *end, uint32_t
> data)
> end->w1 = xive_set_field32(END2_W1_PAGE_OFF, end->w1, qindex);
> }
>
> +/*
> + * Scan the group chain and return the highest priority and group
> + * level of pending group interrupts.
> + */
> +static uint8_t xive2_presenter_backlog_check(XivePresenter *xptr,
> + uint8_t nvp_blk, uint32_t
> nvp_idx,
> + uint8_t first_group,
> + uint8_t *out_level)
Could we call that xive2_presenter_backlog_scan(), which I think
matches how the specification refers to it.
Thanks,
Nick
> +{
> + Xive2Router *xrtr = XIVE2_ROUTER(xptr);
> + uint32_t nvgc_idx, mask;
> + uint32_t current_level, count;
> + uint8_t prio;
> + Xive2Nvgc nvgc;
> +
> + for (prio = 0; prio <= XIVE_PRIORITY_MAX; prio++) {
> + current_level = first_group & 0xF;
> +
> + while (current_level) {
> + mask = (1 << current_level) - 1;
> + nvgc_idx = nvp_idx & ~mask;
> + nvgc_idx |= mask >> 1;
> + qemu_log("fxb %s checking backlog for prio %d group idx %x\n",
> + __func__, prio, nvgc_idx);
> +
> + if (xive2_router_get_nvgc(xrtr, false, nvp_blk, nvgc_idx,
> &nvgc)) {
> + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVG %x/%x\n",
> + nvp_blk, nvgc_idx);
> + return 0xFF;
> + }
> + if (!xive2_nvgc_is_valid(&nvgc)) {
> + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVG %x/%x\n",
> + nvp_blk, nvgc_idx);
> + return 0xFF;
> + }
> +
> + count = xive2_nvgc_get_backlog(&nvgc, prio);
> + if (count) {
> + *out_level = current_level;
> + return prio;
> + }
> + current_level = xive_get_field32(NVGC2_W0_PGONEXT, nvgc.w0) &
> 0xF;
> + }
> + }
> + return 0xFF;
> +}
> +
> +static void xive2_presenter_backlog_decr(XivePresenter *xptr,
> + uint8_t nvp_blk, uint32_t nvp_idx,
> + uint8_t group_prio,
> + uint8_t group_level)
> +{
> + Xive2Router *xrtr = XIVE2_ROUTER(xptr);
> + uint32_t nvgc_idx, mask, count;
> + Xive2Nvgc nvgc;
> +
> + group_level &= 0xF;
> + mask = (1 << group_level) - 1;
> + nvgc_idx = nvp_idx & ~mask;
> + nvgc_idx |= mask >> 1;
> +
> + if (xive2_router_get_nvgc(xrtr, false, nvp_blk, nvgc_idx, &nvgc)) {
> + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVG %x/%x\n",
> + nvp_blk, nvgc_idx);
> + return;
> + }
> + if (!xive2_nvgc_is_valid(&nvgc)) {
> + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVG %x/%x\n",
> + nvp_blk, nvgc_idx);
> + return;
> + }
> + count = xive2_nvgc_get_backlog(&nvgc, group_prio);
> + if (!count) {
> + return;
> + }
> + xive2_nvgc_set_backlog(&nvgc, group_prio, count - 1);
> + xive2_router_write_nvgc(xrtr, false, nvp_blk, nvgc_idx, &nvgc);
> +}
> +
> /*
> * XIVE Thread Interrupt Management Area (TIMA) - Gen2 mode
> *
> @@ -588,8 +667,9 @@ static void xive2_tctx_need_resend(Xive2Router *xrtr,
> XiveTCTX *tctx,
> uint8_t nvp_blk, uint32_t nvp_idx,
> bool do_restore)
> {
> - uint8_t ipb, backlog_level;
> - uint8_t backlog_prio;
> + XivePresenter *xptr = XIVE_PRESENTER(xrtr);
> + uint8_t ipb, backlog_level, group_level, first_group;
> + uint8_t backlog_prio, group_prio;
> uint8_t *regs = &tctx->regs[TM_QW1_OS];
> Xive2Nvp nvp;
>
> @@ -624,8 +704,22 @@ static void xive2_tctx_need_resend(Xive2Router *xrtr,
> XiveTCTX *tctx,
> backlog_prio = xive_ipb_to_pipr(ipb);
> backlog_level = 0;
>
> + first_group = xive_get_field32(NVP2_W0_PGOFIRST, nvp.w0);
> + if (first_group && regs[TM_LSMFB] < backlog_prio) {
> + group_prio = xive2_presenter_backlog_check(xptr, nvp_blk, nvp_idx,
> + first_group,
> &group_level);
> + regs[TM_LSMFB] = group_prio;
> + if (regs[TM_LGS] && group_prio < backlog_prio) {
> + /* VP can take a group interrupt */
> + xive2_presenter_backlog_decr(xptr, nvp_blk, nvp_idx,
> + group_prio, group_level);
> + backlog_prio = group_prio;
> + backlog_level = group_level;
> + }
> + }
> +
> /*
> - * Compute the PIPR based on the restored state.
> + * Compute the PIPR based on the restored state.
> * It will raise the External interrupt signal if needed.
> */
> xive_tctx_pipr_update(tctx, TM_QW1_OS, backlog_prio, backlog_level);
[Prev in Thread] |
Current Thread |
[Next in Thread] |
- Re: [PATCH 05/14] ppc/xive2: Process group backlog when pushing an OS context,
Nicholas Piggin <=