wesnoth-cvs-commits
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Wesnoth-cvs-commits] wesnoth/src network_worker.cpp


From: Jon Daniel
Subject: [Wesnoth-cvs-commits] wesnoth/src network_worker.cpp
Date: Mon, 05 Sep 2005 12:43:54 -0400

CVSROOT:        /cvsroot/wesnoth
Module name:    wesnoth
Branch:         
Changes by:     Jon Daniel <address@hidden>     05/09/05 16:43:54

Modified files:
        src            : network_worker.cpp 

Log message:
        Added poll support to the send code

CVSWeb URLs:
http://savannah.gnu.org/cgi-bin/viewcvs/wesnoth/wesnoth/src/network_worker.cpp.diff?tr1=1.46&tr2=1.47&r1=text&r2=text

Patches:
Index: wesnoth/src/network_worker.cpp
diff -u wesnoth/src/network_worker.cpp:1.46 wesnoth/src/network_worker.cpp:1.47
--- wesnoth/src/network_worker.cpp:1.46 Mon Sep  5 13:51:15 2005
+++ wesnoth/src/network_worker.cpp      Mon Sep  5 16:43:54 2005
@@ -1,4 +1,4 @@
-/* $Id: network_worker.cpp,v 1.46 2005/09/05 13:51:15 Sirp Exp $ */
+/* $Id: network_worker.cpp,v 1.47 2005/09/05 16:43:54 j_daniel Exp $ */
 /*
    Copyright (C) 2003-5 by David White <address@hidden>
    Part of the Battle for Wesnoth Project http://www.wesnoth.org/
@@ -26,11 +26,51 @@
 #include <iostream>
 #include <map>
 #include <vector>
+#include "config.h"
+
+#if defined(_WIN32) || defined(__WIN32__) || defined (WIN32)
+#include <windows.h>
+#else
+#include <sys/types.h>
+#include <sys/socket.h>
+#ifdef __BEOS__
+#include <socket.h>
+#else
+#include <fcntl.h>
+#endif
+#define SOCKET int
+#endif
+
+#ifdef HAVE_POLL_H
+#define USE_POLL 1
+#include <poll.h>
+#endif
+
+#ifdef HAVE_SYS_POLL_H
+#define USE_POLL 1
+#include <sys/poll.h>
+#endif
+
+#ifdef USE_SELECT
+#ifdef HAVE_SYS_SELECT_H
+#include <sys/select.h>
+#else
+#include <sys/time.h>
+#include <sys/types.h>
+#include <unistd.h>
+#endif
+#endif
 
 #define LOG_NW LOG_STREAM(info, network)
 #define ERR_NW LOG_STREAM(err, network)
 namespace {
-
+struct _TCPsocket {
+       int ready;
+       SOCKET channel;
+       IPaddress remoteAddress;
+       IPaddress localAddress;
+       int sflag;
+};
 unsigned int buf_id = 0;
 
 struct buffer {
@@ -65,7 +105,6 @@
 std::vector<threading::thread*> threads;
 
 SOCKET_STATE send_buf(TCPsocket sock, std::vector<char>& buf) {
-       int timeout = 15000;
        size_t upto = 0;
        size_t size = buf.size();
        {
@@ -73,7 +112,7 @@
                transfer_stats[sock].first.fresh_current(size);
        }
 
-       while(upto < size && timeout > 0) {
+       while(upto < size) {
                {
                        // check if the socket is still locked
                        const threading::lock lock(*global_mutex);
@@ -85,20 +124,28 @@
                if(res <= 0) {
 #ifdef EAGAIN
                        if(errno == EAGAIN) {
-                               SDL_Delay(100);
-                               timeout -= 100;
-                               continue;
-                       }
 #elif defined(EWOULDBLOCK)
                        if(errno == EWOULDBLOCK) {
-                               SDL_Delay(100);
-                               timeout -= 100;
-                               continue;
+#else
+                       {
+#endif
+
+#ifdef USE_POLL
+                               struct pollfd fd = { 
((_TCPsocket*)sock)->channel, POLLOUT };
+                               int poll_res;
+                               do {
+                                       poll_res = poll(&fd, 1, 15000);
+                               } while(poll_res == -1 && errno == EINTR);
+
+                               if(poll_res > 0)
+                                       continue;
+                       }
+/* TODO implement the select io wait */
+#else
                        }
 #endif
                        return SOCKET_ERROR;
                }
-               timeout = 15000;
                upto += static_cast<size_t>(res);
                {
                        const threading::lock lock(*global_mutex);




reply via email to

[Prev in Thread] Current Thread [Next in Thread]