qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH] xen-all.c: fix multiply issue for int and uint


From: Stefano Stabellini
Subject: Re: [Qemu-devel] [PATCH] xen-all.c: fix multiply issue for int and uint types
Date: Wed, 22 Aug 2012 11:12:50 +0100
User-agent: Alpine 2.02 (DEB 1266 2009-07-14)

On Wed, 22 Aug 2012, Dongxiao Xu wrote:
> If the two multiply operands are int and uint types separately,
> the int type will be transformed to uint firstly, which is not the
> intent in our code piece. The fix is to add (int64_t) transform
> for the uint type before the multiply.
> 
> Signed-off-by: Dongxiao Xu <address@hidden>

Acked-by: Stefano Stabellini <address@hidden>


>  xen-all.c |   24 ++++++++++++++++--------
>  1 files changed, 16 insertions(+), 8 deletions(-)
> 
> diff --git a/xen-all.c b/xen-all.c
> index 61def2e..f76b051 100644
> --- a/xen-all.c
> +++ b/xen-all.c
> @@ -712,7 +712,8 @@ static void cpu_ioreq_pio(ioreq_t *req)
>  
>              for (i = 0; i < req->count; i++) {
>                  tmp = do_inp(req->addr, req->size);
> -                cpu_physical_memory_write(req->data + (sign * i * req->size),
> +                cpu_physical_memory_write(
> +                        req->data + (sign * i * (int64_t)req->size),
>                          (uint8_t *) &tmp, req->size);
>              }
>          }
> @@ -723,7 +724,8 @@ static void cpu_ioreq_pio(ioreq_t *req)
>              for (i = 0; i < req->count; i++) {
>                  uint32_t tmp = 0;
>  
> -                cpu_physical_memory_read(req->data + (sign * i * req->size),
> +                cpu_physical_memory_read(
> +                        req->data + (sign * i * (int64_t)req->size),
>                          (uint8_t*) &tmp, req->size);
>                  do_outp(req->addr, req->size, tmp);
>              }
> @@ -740,12 +742,14 @@ static void cpu_ioreq_move(ioreq_t *req)
>      if (!req->data_is_ptr) {
>          if (req->dir == IOREQ_READ) {
>              for (i = 0; i < req->count; i++) {
> -                cpu_physical_memory_read(req->addr + (sign * i * req->size),
> +                cpu_physical_memory_read(
> +                        req->addr + (sign * i * (int64_t)req->size),
>                          (uint8_t *) &req->data, req->size);
>              }
>          } else if (req->dir == IOREQ_WRITE) {
>              for (i = 0; i < req->count; i++) {
> -                cpu_physical_memory_write(req->addr + (sign * i * req->size),
> +                cpu_physical_memory_write(
> +                        req->addr + (sign * i * (int64_t)req->size),
>                          (uint8_t *) &req->data, req->size);
>              }
>          }
> @@ -754,16 +758,20 @@ static void cpu_ioreq_move(ioreq_t *req)
>  
>          if (req->dir == IOREQ_READ) {
>              for (i = 0; i < req->count; i++) {
> -                cpu_physical_memory_read(req->addr + (sign * i * req->size),
> +                cpu_physical_memory_read(
> +                        req->addr + (sign * i * (int64_t)req->size),
>                          (uint8_t*) &tmp, req->size);
> -                cpu_physical_memory_write(req->data + (sign * i * req->size),
> +                cpu_physical_memory_write(
> +                        req->data + (sign * i * (int64_t)req->size),
>                          (uint8_t*) &tmp, req->size);
>              }
>          } else if (req->dir == IOREQ_WRITE) {
>              for (i = 0; i < req->count; i++) {
> -                cpu_physical_memory_read(req->data + (sign * i * req->size),
> +                cpu_physical_memory_read(
> +                        req->data + (sign * i * (int64_t)req->size),
>                          (uint8_t*) &tmp, req->size);
> -                cpu_physical_memory_write(req->addr + (sign * i * req->size),
> +                cpu_physical_memory_write(
> +                        req->addr + (sign * i * (int64_t)req->size),
>                          (uint8_t*) &tmp, req->size);
>              }
>          }
> -- 
> 1.7.1
> 



reply via email to

[Prev in Thread] Current Thread [Next in Thread]