diff options
-rw-r--r-- | opentracker.c | 140 | ||||
-rw-r--r-- | ot_clean.c | 1 | ||||
-rw-r--r-- | ot_fullscrape.c | 4 | ||||
-rw-r--r-- | ot_fullscrape.h | 2 | ||||
-rw-r--r-- | ot_http.c | 140 | ||||
-rw-r--r-- | ot_livesync.h | 2 | ||||
-rw-r--r-- | ot_mutex.c | 18 | ||||
-rw-r--r-- | ot_mutex.h | 4 | ||||
-rw-r--r-- | ot_stats.c | 4 | ||||
-rw-r--r-- | ot_stats.h | 2 | ||||
-rw-r--r-- | ot_udp.c | 26 | ||||
-rw-r--r-- | ot_vector.c | 2 |
12 files changed, 174 insertions, 171 deletions
diff --git a/opentracker.c b/opentracker.c index 91a1f79..970995a 100644 --- a/opentracker.c +++ b/opentracker.c | |||
@@ -48,12 +48,17 @@ static void panic( const char *routine ) { | |||
48 | 48 | ||
49 | static void signal_handler( int s ) { | 49 | static void signal_handler( int s ) { |
50 | if( s == SIGINT ) { | 50 | if( s == SIGINT ) { |
51 | signal( SIGINT, SIG_IGN); | 51 | /* Any new interrupt signal quits the application */ |
52 | signal( SIGINT, SIG_DFL); | ||
53 | |||
54 | /* Tell all other threads to not acquire any new lock on a bucket | ||
55 | but cancel their operations and return */ | ||
52 | g_opentracker_running = 0; | 56 | g_opentracker_running = 0; |
53 | 57 | ||
54 | trackerlogic_deinit(); | 58 | trackerlogic_deinit(); |
55 | exit( 0 ); | 59 | exit( 0 ); |
56 | } else if( s == SIGALRM ) { | 60 | } else if( s == SIGALRM ) { |
61 | /* Maintain our copy of the clock. time() on BSDs is very expensive. */ | ||
57 | g_now_seconds = time(NULL); | 62 | g_now_seconds = time(NULL); |
58 | alarm(5); | 63 | alarm(5); |
59 | } | 64 | } |
@@ -90,88 +95,87 @@ static void help( char *name ) { | |||
90 | } | 95 | } |
91 | #undef HELPLINE | 96 | #undef HELPLINE |
92 | 97 | ||
93 | static void handle_dead( const int64 socket ) { | 98 | static void handle_dead( const int64 sock ) { |
94 | struct http_data* h=io_getcookie( socket ); | 99 | struct http_data* cookie=io_getcookie( sock ); |
95 | if( h ) { | 100 | if( cookie ) { |
96 | if( h->flag & STRUCT_HTTP_FLAG_IOB_USED ) | 101 | if( cookie->flag & STRUCT_HTTP_FLAG_IOB_USED ) |
97 | iob_reset( &h->data.batch ); | 102 | iob_reset( &cookie->data.batch ); |
98 | if( h->flag & STRUCT_HTTP_FLAG_ARRAY_USED ) | 103 | if( cookie->flag & STRUCT_HTTP_FLAG_ARRAY_USED ) |
99 | array_reset( &h->data.request ); | 104 | array_reset( &cookie->data.request ); |
100 | if( h->flag & STRUCT_HTTP_FLAG_WAITINGFORTASK ) | 105 | if( cookie->flag & STRUCT_HTTP_FLAG_WAITINGFORTASK ) |
101 | mutex_workqueue_canceltask( socket ); | 106 | mutex_workqueue_canceltask( sock ); |
102 | free( h ); | 107 | free( cookie ); |
103 | } | 108 | } |
104 | io_close( socket ); | 109 | io_close( sock ); |
105 | } | 110 | } |
106 | 111 | ||
107 | static ssize_t handle_read( const int64 clientsocket, struct ot_workstruct *ws ) { | 112 | static ssize_t handle_read( const int64 sock, struct ot_workstruct *ws ) { |
108 | struct http_data* h = io_getcookie( clientsocket ); | 113 | struct http_data* cookie = io_getcookie( sock ); |
109 | ssize_t l; | 114 | ssize_t byte_count; |
110 | 115 | ||
111 | if( ( l = io_tryread( clientsocket, ws->inbuf, G_INBUF_SIZE ) ) <= 0 ) { | 116 | if( ( byte_count = io_tryread( sock, ws->inbuf, G_INBUF_SIZE ) ) <= 0 ) { |
112 | handle_dead( clientsocket ); | 117 | handle_dead( sock ); |
113 | return 0; | 118 | return 0; |
114 | } | 119 | } |
115 | 120 | ||
116 | /* If we get the whole request in one packet, handle it without copying */ | 121 | /* If we get the whole request in one packet, handle it without copying */ |
117 | if( !array_start( &h->data.request ) ) { | 122 | if( !array_start( &cookie->data.request ) ) { |
118 | if( memchr( ws->inbuf, '\n', l ) ) { | 123 | if( memchr( ws->inbuf, '\n', byte_count ) ) { |
119 | ws->request = ws->inbuf; | 124 | ws->request = ws->inbuf; |
120 | ws->request_size = l; | 125 | ws->request_size = byte_count; |
121 | return http_handle_request( clientsocket, ws ); | 126 | return http_handle_request( sock, ws ); |
122 | } | 127 | } |
123 | 128 | ||
124 | /* ... else take a copy */ | 129 | /* ... else take a copy */ |
125 | h->flag |= STRUCT_HTTP_FLAG_ARRAY_USED; | 130 | cookie->flag |= STRUCT_HTTP_FLAG_ARRAY_USED; |
126 | array_catb( &h->data.request, ws->inbuf, l ); | 131 | array_catb( &cookie->data.request, ws->inbuf, byte_count ); |
127 | return 0; | 132 | return 0; |
128 | } | 133 | } |
129 | 134 | ||
130 | h->flag |= STRUCT_HTTP_FLAG_ARRAY_USED; | 135 | array_catb( &cookie->data.request, ws->inbuf, byte_count ); |
131 | array_catb( &h->data.request, ws->inbuf, l ); | ||
132 | 136 | ||
133 | if( array_failed( &h->data.request ) ) | 137 | if( array_failed( &cookie->data.request ) ) |
134 | return http_issue_error( clientsocket, ws, CODE_HTTPERROR_500 ); | 138 | return http_issue_error( sock, ws, CODE_HTTPERROR_500 ); |
135 | 139 | ||
136 | if( array_bytes( &h->data.request ) > 8192 ) | 140 | if( array_bytes( &cookie->data.request ) > 8192 ) |
137 | return http_issue_error( clientsocket, ws, CODE_HTTPERROR_500 ); | 141 | return http_issue_error( sock, ws, CODE_HTTPERROR_500 ); |
138 | 142 | ||
139 | if( !memchr( array_start( &h->data.request ), '\n', array_bytes( &h->data.request ) ) ) | 143 | if( !memchr( array_start( &cookie->data.request ), '\n', array_bytes( &cookie->data.request ) ) ) |
140 | return 0; | 144 | return 0; |
141 | 145 | ||
142 | ws->request = array_start( &h->data.request ); | 146 | ws->request = array_start( &cookie->data.request ); |
143 | ws->request_size = array_bytes( &h->data.request ); | 147 | ws->request_size = array_bytes( &cookie->data.request ); |
144 | return http_handle_request( clientsocket, ws ); | 148 | return http_handle_request( sock, ws ); |
145 | } | 149 | } |
146 | 150 | ||
147 | static void handle_write( const int64 clientsocket ) { | 151 | static void handle_write( const int64 sock ) { |
148 | struct http_data* h=io_getcookie( clientsocket ); | 152 | struct http_data* cookie=io_getcookie( sock ); |
149 | if( !h || ( iob_send( clientsocket, &h->data.batch ) <= 0 ) ) | 153 | if( !cookie || ( iob_send( sock, &cookie->data.batch ) <= 0 ) ) |
150 | handle_dead( clientsocket ); | 154 | handle_dead( sock ); |
151 | } | 155 | } |
152 | 156 | ||
153 | static void handle_accept( const int64 serversocket ) { | 157 | static void handle_accept( const int64 serversocket ) { |
154 | struct http_data *h; | 158 | struct http_data *cookie; |
159 | int64 sock; | ||
155 | ot_ip6 ip; | 160 | ot_ip6 ip; |
156 | uint16 port; | 161 | uint16 port; |
157 | tai6464 t; | 162 | tai6464 t; |
158 | int64 i; | ||
159 | 163 | ||
160 | while( ( i = socket_accept6( serversocket, ip, &port, NULL ) ) != -1 ) { | 164 | while( ( sock = socket_accept6( serversocket, ip, &port, NULL ) ) != -1 ) { |
161 | 165 | ||
162 | /* Put fd into a non-blocking mode */ | 166 | /* Put fd into a non-blocking mode */ |
163 | io_nonblock( i ); | 167 | io_nonblock( sock ); |
164 | 168 | ||
165 | if( !io_fd( i ) || | 169 | if( !io_fd( sock ) || |
166 | !( h = (struct http_data*)malloc( sizeof(struct http_data) ) ) ) { | 170 | !( cookie = (struct http_data*)malloc( sizeof(struct http_data) ) ) ) { |
167 | io_close( i ); | 171 | io_close( sock ); |
168 | continue; | 172 | continue; |
169 | } | 173 | } |
170 | io_setcookie( i, h ); | 174 | io_setcookie( sock, cookie ); |
171 | io_wantread( i ); | 175 | io_wantread( sock ); |
172 | 176 | ||
173 | memset(h, 0, sizeof( struct http_data ) ); | 177 | memset(cookie, 0, sizeof( struct http_data ) ); |
174 | memcpy(h->ip,ip,sizeof(ot_ip6)); | 178 | memcpy(cookie->ip,ip,sizeof(ot_ip6)); |
175 | 179 | ||
176 | stats_issue_event( EVENT_ACCEPT, FLAG_TCP, (uintptr_t)ip); | 180 | stats_issue_event( EVENT_ACCEPT, FLAG_TCP, (uintptr_t)ip); |
177 | 181 | ||
@@ -179,7 +183,7 @@ static void handle_accept( const int64 serversocket ) { | |||
179 | time this often in FreeBSD and libowfat does not allow to set unix time */ | 183 | time this often in FreeBSD and libowfat does not allow to set unix time */ |
180 | taia_uint( &t, 0 ); /* Clear t */ | 184 | taia_uint( &t, 0 ); /* Clear t */ |
181 | tai_unix( &(t.sec), (g_now_seconds + OT_CLIENT_TIMEOUT) ); | 185 | tai_unix( &(t.sec), (g_now_seconds + OT_CLIENT_TIMEOUT) ); |
182 | io_timeout( i, t ); | 186 | io_timeout( sock, t ); |
183 | } | 187 | } |
184 | 188 | ||
185 | if( errno == EAGAIN ) | 189 | if( errno == EAGAIN ) |
@@ -202,29 +206,29 @@ static void server_mainloop( ) { | |||
202 | panic( "Initializing worker failed" ); | 206 | panic( "Initializing worker failed" ); |
203 | 207 | ||
204 | for( ; ; ) { | 208 | for( ; ; ) { |
205 | int64 i; | 209 | int64 sock; |
206 | 210 | ||
207 | io_wait(); | 211 | io_wait(); |
208 | 212 | ||
209 | while( ( i = io_canread( ) ) != -1 ) { | 213 | while( ( sock = io_canread( ) ) != -1 ) { |
210 | const void *cookie = io_getcookie( i ); | 214 | const void *cookie = io_getcookie( sock ); |
211 | if( (intptr_t)cookie == FLAG_TCP ) | 215 | if( (intptr_t)cookie == FLAG_TCP ) |
212 | handle_accept( i ); | 216 | handle_accept( sock ); |
213 | else if( (intptr_t)cookie == FLAG_UDP ) | 217 | else if( (intptr_t)cookie == FLAG_UDP ) |
214 | handle_udp6( i, &ws ); | 218 | handle_udp6( sock, &ws ); |
215 | else | 219 | else |
216 | handle_read( i, &ws ); | 220 | handle_read( sock, &ws ); |
217 | } | 221 | } |
218 | 222 | ||
219 | while( ( i = mutex_workqueue_popresult( &iovec_entries, &iovector ) ) != -1 ) | 223 | while( ( sock = mutex_workqueue_popresult( &iovec_entries, &iovector ) ) != -1 ) |
220 | http_sendiovecdata( i, &ws, iovec_entries, iovector ); | 224 | http_sendiovecdata( sock, &ws, iovec_entries, iovector ); |
221 | 225 | ||
222 | while( ( i = io_canwrite( ) ) != -1 ) | 226 | while( ( sock = io_canwrite( ) ) != -1 ) |
223 | handle_write( i ); | 227 | handle_write( sock ); |
224 | 228 | ||
225 | if( g_now_seconds > next_timeout_check ) { | 229 | if( g_now_seconds > next_timeout_check ) { |
226 | while( ( i = io_timeouted() ) != -1 ) | 230 | while( ( sock = io_timeouted() ) != -1 ) |
227 | handle_dead( i ); | 231 | handle_dead( sock ); |
228 | next_timeout_check = g_now_seconds + OT_CLIENT_TIMEOUT_CHECKINTERVAL; | 232 | next_timeout_check = g_now_seconds + OT_CLIENT_TIMEOUT_CHECKINTERVAL; |
229 | } | 233 | } |
230 | 234 | ||
@@ -236,7 +240,7 @@ static void server_mainloop( ) { | |||
236 | } | 240 | } |
237 | 241 | ||
238 | static int64_t ot_try_bind( ot_ip6 ip, uint16_t port, PROTO_FLAG proto ) { | 242 | static int64_t ot_try_bind( ot_ip6 ip, uint16_t port, PROTO_FLAG proto ) { |
239 | int64 s = proto == FLAG_TCP ? socket_tcp6( ) : socket_udp6( ); | 243 | int64 sock = proto == FLAG_TCP ? socket_tcp6( ) : socket_udp6( ); |
240 | 244 | ||
241 | #ifndef WANT_V6 | 245 | #ifndef WANT_V6 |
242 | if( !ip6_isv4mapped(ip) ) { | 246 | if( !ip6_isv4mapped(ip) ) { |
@@ -257,24 +261,24 @@ static int64_t ot_try_bind( ot_ip6 ip, uint16_t port, PROTO_FLAG proto ) { | |||
257 | fputs( _debug, stderr ); | 261 | fputs( _debug, stderr ); |
258 | #endif | 262 | #endif |
259 | 263 | ||
260 | if( socket_bind6_reuse( s, ip, port, 0 ) == -1 ) | 264 | if( socket_bind6_reuse( sock, ip, port, 0 ) == -1 ) |
261 | panic( "socket_bind6_reuse" ); | 265 | panic( "socket_bind6_reuse" ); |
262 | 266 | ||
263 | if( ( proto == FLAG_TCP ) && ( socket_listen( s, SOMAXCONN) == -1 ) ) | 267 | if( ( proto == FLAG_TCP ) && ( socket_listen( sock, SOMAXCONN) == -1 ) ) |
264 | panic( "socket_listen" ); | 268 | panic( "socket_listen" ); |
265 | 269 | ||
266 | if( !io_fd( s ) ) | 270 | if( !io_fd( sock ) ) |
267 | panic( "io_fd" ); | 271 | panic( "io_fd" ); |
268 | 272 | ||
269 | io_setcookie( s, (void*)proto ); | 273 | io_setcookie( sock, (void*)proto ); |
270 | 274 | ||
271 | io_wantread( s ); | 275 | io_wantread( sock ); |
272 | 276 | ||
273 | #ifdef _DEBUG | 277 | #ifdef _DEBUG |
274 | fputs( " success.\n", stderr); | 278 | fputs( " success.\n", stderr); |
275 | #endif | 279 | #endif |
276 | 280 | ||
277 | return s; | 281 | return sock; |
278 | } | 282 | } |
279 | 283 | ||
280 | char * set_config_option( char **option, char *value ) { | 284 | char * set_config_option( char **option, char *value ) { |
@@ -6,6 +6,7 @@ | |||
6 | /* System */ | 6 | /* System */ |
7 | #include <pthread.h> | 7 | #include <pthread.h> |
8 | #include <unistd.h> | 8 | #include <unistd.h> |
9 | #include <string.h> | ||
9 | 10 | ||
10 | /* Libowfat */ | 11 | /* Libowfat */ |
11 | #include "io.h" | 12 | #include "io.h" |
diff --git a/ot_fullscrape.c b/ot_fullscrape.c index 17405b7..4cd0a44 100644 --- a/ot_fullscrape.c +++ b/ot_fullscrape.c | |||
@@ -80,8 +80,8 @@ void fullscrape_deinit( ) { | |||
80 | pthread_cancel( thread_id ); | 80 | pthread_cancel( thread_id ); |
81 | } | 81 | } |
82 | 82 | ||
83 | void fullscrape_deliver( int64 socket, ot_tasktype tasktype ) { | 83 | void fullscrape_deliver( int64 sock, ot_tasktype tasktype ) { |
84 | mutex_workqueue_pushtask( socket, tasktype ); | 84 | mutex_workqueue_pushtask( sock, tasktype ); |
85 | } | 85 | } |
86 | 86 | ||
87 | static int fullscrape_increase( int *iovec_entries, struct iovec **iovector, | 87 | static int fullscrape_increase( int *iovec_entries, struct iovec **iovector, |
diff --git a/ot_fullscrape.h b/ot_fullscrape.h index 3f5dc7d..b86f8ea 100644 --- a/ot_fullscrape.h +++ b/ot_fullscrape.h | |||
@@ -10,7 +10,7 @@ | |||
10 | 10 | ||
11 | void fullscrape_init( ); | 11 | void fullscrape_init( ); |
12 | void fullscrape_deinit( ); | 12 | void fullscrape_deinit( ); |
13 | void fullscrape_deliver( int64 socket, ot_tasktype tasktype ); | 13 | void fullscrape_deliver( int64 sock, ot_tasktype tasktype ); |
14 | 14 | ||
15 | #else | 15 | #else |
16 | 16 | ||
@@ -35,50 +35,50 @@ enum { | |||
35 | SUCCESS_HTTP_HEADER_LENGTH_CONTENT_ENCODING = 32, | 35 | SUCCESS_HTTP_HEADER_LENGTH_CONTENT_ENCODING = 32, |
36 | SUCCESS_HTTP_SIZE_OFF = 17 }; | 36 | SUCCESS_HTTP_SIZE_OFF = 17 }; |
37 | 37 | ||
38 | static void http_senddata( const int64 client_socket, struct ot_workstruct *ws ) { | 38 | static void http_senddata( const int64 sock, struct ot_workstruct *ws ) { |
39 | struct http_data *h = io_getcookie( client_socket ); | 39 | struct http_data *cookie = io_getcookie( sock ); |
40 | ssize_t written_size; | 40 | ssize_t written_size; |
41 | 41 | ||
42 | /* whoever sends data is not interested in its input-array */ | 42 | /* whoever sends data is not interested in its input-array */ |
43 | if( h && ( h->flag & STRUCT_HTTP_FLAG_ARRAY_USED ) ) { | 43 | if( cookie && ( cookie->flag & STRUCT_HTTP_FLAG_ARRAY_USED ) ) { |
44 | h->flag &= ~STRUCT_HTTP_FLAG_ARRAY_USED; | 44 | cookie->flag &= ~STRUCT_HTTP_FLAG_ARRAY_USED; |
45 | array_reset( &h->data.request ); | 45 | array_reset( &cookie->data.request ); |
46 | } | 46 | } |
47 | 47 | ||
48 | written_size = write( client_socket, ws->reply, ws->reply_size ); | 48 | written_size = write( sock, ws->reply, ws->reply_size ); |
49 | if( ( written_size < 0 ) || ( written_size == ws->reply_size ) ) { | 49 | if( ( written_size < 0 ) || ( written_size == ws->reply_size ) ) { |
50 | free( h ); io_close( client_socket ); | 50 | free( cookie ); io_close( sock ); |
51 | } else { | 51 | } else { |
52 | char * outbuf; | 52 | char * outbuf; |
53 | tai6464 t; | 53 | tai6464 t; |
54 | 54 | ||
55 | if( !h ) return; | 55 | if( !cookie ) return; |
56 | if( !( outbuf = malloc( ws->reply_size - written_size ) ) ) { | 56 | if( !( outbuf = malloc( ws->reply_size - written_size ) ) ) { |
57 | free(h); io_close( client_socket ); | 57 | free(cookie); io_close( sock ); |
58 | return; | 58 | return; |
59 | } | 59 | } |
60 | 60 | ||
61 | iob_reset( &h->data.batch ); | 61 | iob_reset( &cookie->data.batch ); |
62 | memcpy( outbuf, ws->reply + written_size, ws->reply_size - written_size ); | 62 | memcpy( outbuf, ws->reply + written_size, ws->reply_size - written_size ); |
63 | iob_addbuf_free( &h->data.batch, outbuf, ws->reply_size - written_size ); | 63 | iob_addbuf_free( &cookie->data.batch, outbuf, ws->reply_size - written_size ); |
64 | h->flag |= STRUCT_HTTP_FLAG_IOB_USED; | 64 | cookie->flag |= STRUCT_HTTP_FLAG_IOB_USED; |
65 | 65 | ||
66 | /* writeable short data sockets just have a tcp timeout */ | 66 | /* writeable short data sockets just have a tcp timeout */ |
67 | taia_uint( &t, 0 ); io_timeout( client_socket, t ); | 67 | taia_uint( &t, 0 ); io_timeout( sock, t ); |
68 | io_dontwantread( client_socket ); | 68 | io_dontwantread( sock ); |
69 | io_wantwrite( client_socket ); | 69 | io_wantwrite( sock ); |
70 | } | 70 | } |
71 | } | 71 | } |
72 | 72 | ||
73 | #define HTTPERROR_302 return http_issue_error( client_socket, ws, CODE_HTTPERROR_302 ) | 73 | #define HTTPERROR_302 return http_issue_error( sock, ws, CODE_HTTPERROR_302 ) |
74 | #define HTTPERROR_400 return http_issue_error( client_socket, ws, CODE_HTTPERROR_400 ) | 74 | #define HTTPERROR_400 return http_issue_error( sock, ws, CODE_HTTPERROR_400 ) |
75 | #define HTTPERROR_400_PARAM return http_issue_error( client_socket, ws, CODE_HTTPERROR_400_PARAM ) | 75 | #define HTTPERROR_400_PARAM return http_issue_error( sock, ws, CODE_HTTPERROR_400_PARAM ) |
76 | #define HTTPERROR_400_COMPACT return http_issue_error( client_socket, ws, CODE_HTTPERROR_400_COMPACT ) | 76 | #define HTTPERROR_400_COMPACT return http_issue_error( sock, ws, CODE_HTTPERROR_400_COMPACT ) |
77 | #define HTTPERROR_400_DOUBLEHASH return http_issue_error( client_socket, ws, CODE_HTTPERROR_400_PARAM ) | 77 | #define HTTPERROR_400_DOUBLEHASH return http_issue_error( sock, ws, CODE_HTTPERROR_400_PARAM ) |
78 | #define HTTPERROR_403_IP return http_issue_error( client_socket, ws, CODE_HTTPERROR_403_IP ) | 78 | #define HTTPERROR_403_IP return http_issue_error( sock, ws, CODE_HTTPERROR_403_IP ) |
79 | #define HTTPERROR_404 return http_issue_error( client_socket, ws, CODE_HTTPERROR_404 ) | 79 | #define HTTPERROR_404 return http_issue_error( sock, ws, CODE_HTTPERROR_404 ) |
80 | #define HTTPERROR_500 return http_issue_error( client_socket, ws, CODE_HTTPERROR_500 ) | 80 | #define HTTPERROR_500 return http_issue_error( sock, ws, CODE_HTTPERROR_500 ) |
81 | ssize_t http_issue_error( const int64 client_socket, struct ot_workstruct *ws, int code ) { | 81 | ssize_t http_issue_error( const int64 sock, struct ot_workstruct *ws, int code ) { |
82 | char *error_code[] = { "302 Found", "400 Invalid Request", "400 Invalid Request", "400 Invalid Request", | 82 | char *error_code[] = { "302 Found", "400 Invalid Request", "400 Invalid Request", "400 Invalid Request", |
83 | "403 Access Denied", "404 Not Found", "500 Internal Server Error" }; | 83 | "403 Access Denied", "404 Not Found", "500 Internal Server Error" }; |
84 | char *title = error_code[code]; | 84 | char *title = error_code[code]; |
@@ -93,32 +93,32 @@ ssize_t http_issue_error( const int64 client_socket, struct ot_workstruct *ws, i | |||
93 | fprintf( stderr, "DEBUG: invalid request was: %s\n", ws->debugbuf ); | 93 | fprintf( stderr, "DEBUG: invalid request was: %s\n", ws->debugbuf ); |
94 | #endif | 94 | #endif |
95 | stats_issue_event( EVENT_FAILED, FLAG_TCP, code ); | 95 | stats_issue_event( EVENT_FAILED, FLAG_TCP, code ); |
96 | http_senddata( client_socket, ws ); | 96 | http_senddata( sock, ws ); |
97 | return ws->reply_size = -2; | 97 | return ws->reply_size = -2; |
98 | } | 98 | } |
99 | 99 | ||
100 | ssize_t http_sendiovecdata( const int64 client_socket, struct ot_workstruct *ws, int iovec_entries, struct iovec *iovector ) { | 100 | ssize_t http_sendiovecdata( const int64 sock, struct ot_workstruct *ws, int iovec_entries, struct iovec *iovector ) { |
101 | struct http_data *h = io_getcookie( client_socket ); | 101 | struct http_data *cookie = io_getcookie( sock ); |
102 | char *header; | 102 | char *header; |
103 | int i; | 103 | int i; |
104 | size_t header_size, size = iovec_length( &iovec_entries, &iovector ); | 104 | size_t header_size, size = iovec_length( &iovec_entries, &iovector ); |
105 | tai6464 t; | 105 | tai6464 t; |
106 | 106 | ||
107 | /* No cookie? Bad socket. Leave. */ | 107 | /* No cookie? Bad socket. Leave. */ |
108 | if( !h ) { | 108 | if( !cookie ) { |
109 | iovec_free( &iovec_entries, &iovector ); | 109 | iovec_free( &iovec_entries, &iovector ); |
110 | HTTPERROR_500; | 110 | HTTPERROR_500; |
111 | } | 111 | } |
112 | 112 | ||
113 | /* If this socket collected request in a buffer, | 113 | /* If this socket collected request in a buffer, |
114 | free it now */ | 114 | free it now */ |
115 | if( h->flag & STRUCT_HTTP_FLAG_ARRAY_USED ) { | 115 | if( cookie->flag & STRUCT_HTTP_FLAG_ARRAY_USED ) { |
116 | h->flag &= ~STRUCT_HTTP_FLAG_ARRAY_USED; | 116 | cookie->flag &= ~STRUCT_HTTP_FLAG_ARRAY_USED; |
117 | array_reset( &h->data.request ); | 117 | array_reset( &cookie->data.request ); |
118 | } | 118 | } |
119 | 119 | ||
120 | /* If we came here, wait for the answer is over */ | 120 | /* If we came here, wait for the answer is over */ |
121 | h->flag &= ~STRUCT_HTTP_FLAG_WAITINGFORTASK; | 121 | cookie->flag &= ~STRUCT_HTTP_FLAG_WAITINGFORTASK; |
122 | 122 | ||
123 | /* Our answers never are 0 vectors. Return an error. */ | 123 | /* Our answers never are 0 vectors. Return an error. */ |
124 | if( !iovec_entries ) { | 124 | if( !iovec_entries ) { |
@@ -132,32 +132,32 @@ ssize_t http_sendiovecdata( const int64 client_socket, struct ot_workstruct *ws, | |||
132 | HTTPERROR_500; | 132 | HTTPERROR_500; |
133 | } | 133 | } |
134 | 134 | ||
135 | if( h->flag & STRUCT_HTTP_FLAG_GZIP ) | 135 | if( cookie->flag & STRUCT_HTTP_FLAG_GZIP ) |
136 | header_size = sprintf( header, "HTTP/1.0 200 OK\r\nContent-Type: text/plain\r\nContent-Encoding: gzip\r\nContent-Length: %zd\r\n\r\n", size ); | 136 | header_size = sprintf( header, "HTTP/1.0 200 OK\r\nContent-Type: text/plain\r\nContent-Encoding: gzip\r\nContent-Length: %zd\r\n\r\n", size ); |
137 | else if( h->flag & STRUCT_HTTP_FLAG_BZIP2 ) | 137 | else if( cookie->flag & STRUCT_HTTP_FLAG_BZIP2 ) |
138 | header_size = sprintf( header, "HTTP/1.0 200 OK\r\nContent-Type: text/plain\r\nContent-Encoding: bzip2\r\nContent-Length: %zd\r\n\r\n", size ); | 138 | header_size = sprintf( header, "HTTP/1.0 200 OK\r\nContent-Type: text/plain\r\nContent-Encoding: bzip2\r\nContent-Length: %zd\r\n\r\n", size ); |
139 | else | 139 | else |
140 | header_size = sprintf( header, "HTTP/1.0 200 OK\r\nContent-Type: text/plain\r\nContent-Length: %zd\r\n\r\n", size ); | 140 | header_size = sprintf( header, "HTTP/1.0 200 OK\r\nContent-Type: text/plain\r\nContent-Length: %zd\r\n\r\n", size ); |
141 | 141 | ||
142 | iob_reset( &h->data.batch ); | 142 | iob_reset( &cookie->data.batch ); |
143 | iob_addbuf_free( &h->data.batch, header, header_size ); | 143 | iob_addbuf_free( &cookie->data.batch, header, header_size ); |
144 | 144 | ||
145 | /* Will move to ot_iovec.c */ | 145 | /* Will move to ot_iovec.c */ |
146 | for( i=0; i<iovec_entries; ++i ) | 146 | for( i=0; i<iovec_entries; ++i ) |
147 | iob_addbuf_munmap( &h->data.batch, iovector[i].iov_base, iovector[i].iov_len ); | 147 | iob_addbuf_munmap( &cookie->data.batch, iovector[i].iov_base, iovector[i].iov_len ); |
148 | free( iovector ); | 148 | free( iovector ); |
149 | 149 | ||
150 | h->flag |= STRUCT_HTTP_FLAG_IOB_USED; | 150 | cookie->flag |= STRUCT_HTTP_FLAG_IOB_USED; |
151 | 151 | ||
152 | /* writeable sockets timeout after 10 minutes */ | 152 | /* writeable sockets timeout after 10 minutes */ |
153 | taia_now( &t ); taia_addsec( &t, &t, OT_CLIENT_TIMEOUT_SEND ); | 153 | taia_now( &t ); taia_addsec( &t, &t, OT_CLIENT_TIMEOUT_SEND ); |
154 | io_timeout( client_socket, t ); | 154 | io_timeout( sock, t ); |
155 | io_dontwantread( client_socket ); | 155 | io_dontwantread( sock ); |
156 | io_wantwrite( client_socket ); | 156 | io_wantwrite( sock ); |
157 | return 0; | 157 | return 0; |
158 | } | 158 | } |
159 | 159 | ||
160 | static ssize_t http_handle_stats( const int64 client_socket, struct ot_workstruct *ws, char *read_ptr ) { | 160 | static ssize_t http_handle_stats( const int64 sock, struct ot_workstruct *ws, char *read_ptr ) { |
161 | static const ot_keywords keywords_main[] = | 161 | static const ot_keywords keywords_main[] = |
162 | { { "mode", 1 }, {"format", 2 }, { NULL, -3 } }; | 162 | { { "mode", 1 }, {"format", 2 }, { NULL, -3 } }; |
163 | static const ot_keywords keywords_mode[] = | 163 | static const ot_keywords keywords_mode[] = |
@@ -173,9 +173,9 @@ static const ot_keywords keywords_format[] = | |||
173 | int mode = TASK_STATS_PEERS, scanon = 1, format = 0; | 173 | int mode = TASK_STATS_PEERS, scanon = 1, format = 0; |
174 | 174 | ||
175 | #ifdef WANT_RESTRICT_STATS | 175 | #ifdef WANT_RESTRICT_STATS |
176 | struct http_data *h = io_getcookie( client_socket ); | 176 | struct http_data *cookie = io_getcookie( sock ); |
177 | 177 | ||
178 | if( !h || !accesslist_isblessed( h->ip, OT_PERMISSION_MAY_STAT ) ) | 178 | if( !cookie || !accesslist_isblessed( cookie->ip, OT_PERMISSION_MAY_STAT ) ) |
179 | HTTPERROR_403_IP; | 179 | HTTPERROR_403_IP; |
180 | #endif | 180 | #endif |
181 | 181 | ||
@@ -195,22 +195,22 @@ static const ot_keywords keywords_format[] = | |||
195 | 195 | ||
196 | #ifdef WANT_FULLSCRAPE | 196 | #ifdef WANT_FULLSCRAPE |
197 | if( mode == TASK_STATS_TPB ) { | 197 | if( mode == TASK_STATS_TPB ) { |
198 | struct http_data* h = io_getcookie( client_socket ); | 198 | struct http_data* cookie = io_getcookie( sock ); |
199 | tai6464 t; | 199 | tai6464 t; |
200 | #ifdef WANT_COMPRESSION_GZIP | 200 | #ifdef WANT_COMPRESSION_GZIP |
201 | ws->request[ws->request_size] = 0; | 201 | ws->request[ws->request_size] = 0; |
202 | if( strstr( read_ptr - 1, "gzip" ) ) { | 202 | if( strstr( read_ptr - 1, "gzip" ) ) { |
203 | h->flag |= STRUCT_HTTP_FLAG_GZIP; | 203 | cookie->flag |= STRUCT_HTTP_FLAG_GZIP; |
204 | format |= TASK_FLAG_GZIP; | 204 | format |= TASK_FLAG_GZIP; |
205 | } | 205 | } |
206 | #endif | 206 | #endif |
207 | /* Pass this task to the worker thread */ | 207 | /* Pass this task to the worker thread */ |
208 | h->flag |= STRUCT_HTTP_FLAG_WAITINGFORTASK; | 208 | cookie->flag |= STRUCT_HTTP_FLAG_WAITINGFORTASK; |
209 | 209 | ||
210 | /* Clients waiting for us should not easily timeout */ | 210 | /* Clients waiting for us should not easily timeout */ |
211 | taia_uint( &t, 0 ); io_timeout( client_socket, t ); | 211 | taia_uint( &t, 0 ); io_timeout( sock, t ); |
212 | fullscrape_deliver( client_socket, format ); | 212 | fullscrape_deliver( sock, format ); |
213 | io_dontwantread( client_socket ); | 213 | io_dontwantread( sock ); |
214 | return ws->reply_size = -2; | 214 | return ws->reply_size = -2; |
215 | } | 215 | } |
216 | #endif | 216 | #endif |
@@ -219,8 +219,8 @@ static const ot_keywords keywords_format[] = | |||
219 | if( ( mode & TASK_CLASS_MASK ) == TASK_STATS ) { | 219 | if( ( mode & TASK_CLASS_MASK ) == TASK_STATS ) { |
220 | tai6464 t; | 220 | tai6464 t; |
221 | /* Complex stats also include expensive memory debugging tools */ | 221 | /* Complex stats also include expensive memory debugging tools */ |
222 | taia_uint( &t, 0 ); io_timeout( client_socket, t ); | 222 | taia_uint( &t, 0 ); io_timeout( sock, t ); |
223 | stats_deliver( client_socket, mode ); | 223 | stats_deliver( sock, mode ); |
224 | return ws->reply_size = -2; | 224 | return ws->reply_size = -2; |
225 | } | 225 | } |
226 | 226 | ||
@@ -231,36 +231,36 @@ static const ot_keywords keywords_format[] = | |||
231 | } | 231 | } |
232 | 232 | ||
233 | #ifdef WANT_FULLSCRAPE | 233 | #ifdef WANT_FULLSCRAPE |
234 | static ssize_t http_handle_fullscrape( const int64 client_socket, struct ot_workstruct *ws ) { | 234 | static ssize_t http_handle_fullscrape( const int64 sock, struct ot_workstruct *ws ) { |
235 | struct http_data* h = io_getcookie( client_socket ); | 235 | struct http_data* cookie = io_getcookie( sock ); |
236 | int format = 0; | 236 | int format = 0; |
237 | tai6464 t; | 237 | tai6464 t; |
238 | 238 | ||
239 | #ifdef WANT_COMPRESSION_GZIP | 239 | #ifdef WANT_COMPRESSION_GZIP |
240 | ws->request[ws->request_size-1] = 0; | 240 | ws->request[ws->request_size-1] = 0; |
241 | if( strstr( ws->request, "gzip" ) ) { | 241 | if( strstr( ws->request, "gzip" ) ) { |
242 | h->flag |= STRUCT_HTTP_FLAG_GZIP; | 242 | cookie->flag |= STRUCT_HTTP_FLAG_GZIP; |
243 | format = TASK_FLAG_GZIP; | 243 | format = TASK_FLAG_GZIP; |
244 | stats_issue_event( EVENT_FULLSCRAPE_REQUEST_GZIP, 0, (uintptr_t)h->ip ); | 244 | stats_issue_event( EVENT_FULLSCRAPE_REQUEST_GZIP, 0, (uintptr_t)cookie->ip ); |
245 | } else | 245 | } else |
246 | #endif | 246 | #endif |
247 | stats_issue_event( EVENT_FULLSCRAPE_REQUEST, 0, (uintptr_t)h->ip ); | 247 | stats_issue_event( EVENT_FULLSCRAPE_REQUEST, 0, (uintptr_t)cookie->ip ); |
248 | 248 | ||
249 | #ifdef _DEBUG_HTTPERROR | 249 | #ifdef _DEBUG_HTTPERROR |
250 | write( 2, ws->debugbuf, G_DEBUGBUF_SIZE ); | 250 | write( 2, ws->debugbuf, G_DEBUGBUF_SIZE ); |
251 | #endif | 251 | #endif |
252 | 252 | ||
253 | /* Pass this task to the worker thread */ | 253 | /* Pass this task to the worker thread */ |
254 | h->flag |= STRUCT_HTTP_FLAG_WAITINGFORTASK; | 254 | cookie->flag |= STRUCT_HTTP_FLAG_WAITINGFORTASK; |
255 | /* Clients waiting for us should not easily timeout */ | 255 | /* Clients waiting for us should not easily timeout */ |
256 | taia_uint( &t, 0 ); io_timeout( client_socket, t ); | 256 | taia_uint( &t, 0 ); io_timeout( sock, t ); |
257 | fullscrape_deliver( client_socket, TASK_FULLSCRAPE | format ); | 257 | fullscrape_deliver( sock, TASK_FULLSCRAPE | format ); |
258 | io_dontwantread( client_socket ); | 258 | io_dontwantread( sock ); |
259 | return ws->reply_size = -2; | 259 | return ws->reply_size = -2; |
260 | } | 260 | } |
261 | #endif | 261 | #endif |
262 | 262 | ||
263 | static ssize_t http_handle_scrape( const int64 client_socket, struct ot_workstruct *ws, char *read_ptr ) { | 263 | static ssize_t http_handle_scrape( const int64 sock, struct ot_workstruct *ws, char *read_ptr ) { |
264 | static const ot_keywords keywords_scrape[] = { { "info_hash", 1 }, { NULL, -3 } }; | 264 | static const ot_keywords keywords_scrape[] = { { "info_hash", 1 }, { NULL, -3 } }; |
265 | 265 | ||
266 | ot_hash * multiscrape_buf = (ot_hash*)ws->request; | 266 | ot_hash * multiscrape_buf = (ot_hash*)ws->request; |
@@ -305,7 +305,7 @@ static ot_keywords keywords_announce[] = { { "port", 1 }, { "left", 2 }, { "even | |||
305 | #endif | 305 | #endif |
306 | { NULL, -3 } }; | 306 | { NULL, -3 } }; |
307 | static ot_keywords keywords_announce_event[] = { { "completed", 1 }, { "stopped", 2 }, { NULL, -3 } }; | 307 | static ot_keywords keywords_announce_event[] = { { "completed", 1 }, { "stopped", 2 }, { NULL, -3 } }; |
308 | static ssize_t http_handle_announce( const int64 client_socket, struct ot_workstruct *ws, char *read_ptr ) { | 308 | static ssize_t http_handle_announce( const int64 sock, struct ot_workstruct *ws, char *read_ptr ) { |
309 | int numwant, tmp, scanon; | 309 | int numwant, tmp, scanon; |
310 | ot_peer peer; | 310 | ot_peer peer; |
311 | ot_hash *hash = NULL; | 311 | ot_hash *hash = NULL; |
@@ -320,7 +320,7 @@ static ssize_t http_handle_announce( const int64 client_socket, struct ot_workst | |||
320 | ++read_ptr; | 320 | ++read_ptr; |
321 | } | 321 | } |
322 | 322 | ||
323 | OT_SETIP( &peer, ((struct http_data*)io_getcookie( client_socket ) )->ip ); | 323 | OT_SETIP( &peer, ((struct http_data*)io_getcookie( sock ) )->ip ); |
324 | OT_SETPORT( &peer, &port ); | 324 | OT_SETPORT( &peer, &port ); |
325 | OT_PEERFLAG( &peer ) = 0; | 325 | OT_PEERFLAG( &peer ) = 0; |
326 | numwant = 50; | 326 | numwant = 50; |
@@ -400,7 +400,7 @@ static ssize_t http_handle_announce( const int64 client_socket, struct ot_workst | |||
400 | return ws->reply_size; | 400 | return ws->reply_size; |
401 | } | 401 | } |
402 | 402 | ||
403 | ssize_t http_handle_request( const int64 client_socket, struct ot_workstruct *ws ) { | 403 | ssize_t http_handle_request( const int64 sock, struct ot_workstruct *ws ) { |
404 | ssize_t reply_off, len; | 404 | ssize_t reply_off, len; |
405 | char *read_ptr = ws->request, *write_ptr; | 405 | char *read_ptr = ws->request, *write_ptr; |
406 | 406 | ||
@@ -433,17 +433,17 @@ ssize_t http_handle_request( const int64 client_socket, struct ot_workstruct *ws | |||
433 | 433 | ||
434 | /* This is the hardcore match for announce*/ | 434 | /* This is the hardcore match for announce*/ |
435 | if( ( *write_ptr == 'a' ) || ( *write_ptr == '?' ) ) | 435 | if( ( *write_ptr == 'a' ) || ( *write_ptr == '?' ) ) |
436 | http_handle_announce( client_socket, ws, read_ptr ); | 436 | http_handle_announce( sock, ws, read_ptr ); |
437 | #ifdef WANT_FULLSCRAPE | 437 | #ifdef WANT_FULLSCRAPE |
438 | else if( !memcmp( write_ptr, "scrape HTTP/", 12 ) ) | 438 | else if( !memcmp( write_ptr, "scrape HTTP/", 12 ) ) |
439 | http_handle_fullscrape( client_socket, ws ); | 439 | http_handle_fullscrape( sock, ws ); |
440 | #endif | 440 | #endif |
441 | /* This is the hardcore match for scrape */ | 441 | /* This is the hardcore match for scrape */ |
442 | else if( !memcmp( write_ptr, "sc", 2 ) ) | 442 | else if( !memcmp( write_ptr, "sc", 2 ) ) |
443 | http_handle_scrape( client_socket, ws, read_ptr ); | 443 | http_handle_scrape( sock, ws, read_ptr ); |
444 | /* All the rest is matched the standard way */ | 444 | /* All the rest is matched the standard way */ |
445 | else if( !memcmp( write_ptr, "stats", 5) ) | 445 | else if( !memcmp( write_ptr, "stats", 5) ) |
446 | http_handle_stats( client_socket, ws, read_ptr ); | 446 | http_handle_stats( sock, ws, read_ptr ); |
447 | else | 447 | else |
448 | HTTPERROR_404; | 448 | HTTPERROR_404; |
449 | 449 | ||
@@ -469,7 +469,7 @@ ssize_t http_handle_request( const int64 client_socket, struct ot_workstruct *ws | |||
469 | /* 3. Finally we join both blocks neatly */ | 469 | /* 3. Finally we join both blocks neatly */ |
470 | ws->outbuf[ SUCCESS_HTTP_HEADER_LENGTH - 1 ] = '\n'; | 470 | ws->outbuf[ SUCCESS_HTTP_HEADER_LENGTH - 1 ] = '\n'; |
471 | 471 | ||
472 | http_senddata( client_socket, ws ); | 472 | http_senddata( sock, ws ); |
473 | return ws->reply_size; | 473 | return ws->reply_size; |
474 | } | 474 | } |
475 | 475 | ||
diff --git a/ot_livesync.h b/ot_livesync.h index 8e78afb..fe9d122 100644 --- a/ot_livesync.h +++ b/ot_livesync.h | |||
@@ -94,7 +94,7 @@ void livesync_tell( ot_hash const info_hash, const ot_peer * const peer ); | |||
94 | void livesync_ticker( ); | 94 | void livesync_ticker( ); |
95 | 95 | ||
96 | /* Handle an incoming live sync packet */ | 96 | /* Handle an incoming live sync packet */ |
97 | void handle_livesync( const int64 serversocket ); | 97 | void handle_livesync( const int64 sock ); |
98 | 98 | ||
99 | #else | 99 | #else |
100 | 100 | ||
@@ -113,7 +113,7 @@ size_t mutex_get_torrent_count( ) { | |||
113 | struct ot_task { | 113 | struct ot_task { |
114 | ot_taskid taskid; | 114 | ot_taskid taskid; |
115 | ot_tasktype tasktype; | 115 | ot_tasktype tasktype; |
116 | int64 socket; | 116 | int64 sock; |
117 | int iovec_entries; | 117 | int iovec_entries; |
118 | struct iovec *iovec; | 118 | struct iovec *iovec; |
119 | struct ot_task *next; | 119 | struct ot_task *next; |
@@ -124,7 +124,7 @@ static struct ot_task *tasklist = NULL; | |||
124 | static pthread_mutex_t tasklist_mutex; | 124 | static pthread_mutex_t tasklist_mutex; |
125 | static pthread_cond_t tasklist_being_filled; | 125 | static pthread_cond_t tasklist_being_filled; |
126 | 126 | ||
127 | int mutex_workqueue_pushtask( int64 socket, ot_tasktype tasktype ) { | 127 | int mutex_workqueue_pushtask( int64 sock, ot_tasktype tasktype ) { |
128 | struct ot_task ** tmptask, * task; | 128 | struct ot_task ** tmptask, * task; |
129 | 129 | ||
130 | /* Want exclusive access to tasklist */ | 130 | /* Want exclusive access to tasklist */ |
@@ -148,7 +148,7 @@ int mutex_workqueue_pushtask( int64 socket, ot_tasktype tasktype ) { | |||
148 | 148 | ||
149 | task->taskid = 0; | 149 | task->taskid = 0; |
150 | task->tasktype = tasktype; | 150 | task->tasktype = tasktype; |
151 | task->socket = socket; | 151 | task->sock = sock; |
152 | task->iovec_entries = 0; | 152 | task->iovec_entries = 0; |
153 | task->iovec = NULL; | 153 | task->iovec = NULL; |
154 | task->next = 0; | 154 | task->next = 0; |
@@ -162,7 +162,7 @@ int mutex_workqueue_pushtask( int64 socket, ot_tasktype tasktype ) { | |||
162 | return 0; | 162 | return 0; |
163 | } | 163 | } |
164 | 164 | ||
165 | void mutex_workqueue_canceltask( int64 socket ) { | 165 | void mutex_workqueue_canceltask( int64 sock ) { |
166 | struct ot_task ** task; | 166 | struct ot_task ** task; |
167 | 167 | ||
168 | /* Want exclusive access to tasklist */ | 168 | /* Want exclusive access to tasklist */ |
@@ -171,10 +171,10 @@ void mutex_workqueue_canceltask( int64 socket ) { | |||
171 | MTX_DBG( "canceltask locked.\n" ); | 171 | MTX_DBG( "canceltask locked.\n" ); |
172 | 172 | ||
173 | task = &tasklist; | 173 | task = &tasklist; |
174 | while( *task && ( (*task)->socket != socket ) ) | 174 | while( *task && ( (*task)->sock != sock ) ) |
175 | *task = (*task)->next; | 175 | *task = (*task)->next; |
176 | 176 | ||
177 | if( *task && ( (*task)->socket == socket ) ) { | 177 | if( *task && ( (*task)->sock == sock ) ) { |
178 | struct iovec *iovec = (*task)->iovec; | 178 | struct iovec *iovec = (*task)->iovec; |
179 | struct ot_task *ptask = *task; | 179 | struct ot_task *ptask = *task; |
180 | int i; | 180 | int i; |
@@ -281,7 +281,7 @@ int mutex_workqueue_pushresult( ot_taskid taskid, int iovec_entries, struct iove | |||
281 | 281 | ||
282 | int64 mutex_workqueue_popresult( int *iovec_entries, struct iovec ** iovec ) { | 282 | int64 mutex_workqueue_popresult( int *iovec_entries, struct iovec ** iovec ) { |
283 | struct ot_task ** task; | 283 | struct ot_task ** task; |
284 | int64 socket = -1; | 284 | int64 sock = -1; |
285 | 285 | ||
286 | /* Want exclusive access to tasklist */ | 286 | /* Want exclusive access to tasklist */ |
287 | MTX_DBG( "popresult locks.\n" ); | 287 | MTX_DBG( "popresult locks.\n" ); |
@@ -297,7 +297,7 @@ int64 mutex_workqueue_popresult( int *iovec_entries, struct iovec ** iovec ) { | |||
297 | 297 | ||
298 | *iovec_entries = (*task)->iovec_entries; | 298 | *iovec_entries = (*task)->iovec_entries; |
299 | *iovec = (*task)->iovec; | 299 | *iovec = (*task)->iovec; |
300 | socket = (*task)->socket; | 300 | sock = (*task)->sock; |
301 | 301 | ||
302 | *task = (*task)->next; | 302 | *task = (*task)->next; |
303 | free( ptask ); | 303 | free( ptask ); |
@@ -307,7 +307,7 @@ int64 mutex_workqueue_popresult( int *iovec_entries, struct iovec ** iovec ) { | |||
307 | MTX_DBG( "popresult unlocks.\n" ); | 307 | MTX_DBG( "popresult unlocks.\n" ); |
308 | pthread_mutex_unlock( &tasklist_mutex ); | 308 | pthread_mutex_unlock( &tasklist_mutex ); |
309 | MTX_DBG( "popresult unlocked.\n" ); | 309 | MTX_DBG( "popresult unlocked.\n" ); |
310 | return socket; | 310 | return sock; |
311 | } | 311 | } |
312 | 312 | ||
313 | void mutex_init( ) { | 313 | void mutex_init( ) { |
@@ -59,8 +59,8 @@ typedef enum { | |||
59 | 59 | ||
60 | typedef unsigned long ot_taskid; | 60 | typedef unsigned long ot_taskid; |
61 | 61 | ||
62 | int mutex_workqueue_pushtask( int64 socket, ot_tasktype tasktype ); | 62 | int mutex_workqueue_pushtask( int64 sock, ot_tasktype tasktype ); |
63 | void mutex_workqueue_canceltask( int64 socket ); | 63 | void mutex_workqueue_canceltask( int64 sock ); |
64 | void mutex_workqueue_pushsuccess( ot_taskid taskid ); | 64 | void mutex_workqueue_pushsuccess( ot_taskid taskid ); |
65 | ot_taskid mutex_workqueue_poptask( ot_tasktype *tasktype ); | 65 | ot_taskid mutex_workqueue_poptask( ot_tasktype *tasktype ); |
66 | int mutex_workqueue_pushresult( ot_taskid taskid, int iovec_entries, struct iovec *iovector ); | 66 | int mutex_workqueue_pushresult( ot_taskid taskid, int iovec_entries, struct iovec *iovector ); |
@@ -611,8 +611,8 @@ static void * stats_worker( void * args ) { | |||
611 | return NULL; | 611 | return NULL; |
612 | } | 612 | } |
613 | 613 | ||
614 | void stats_deliver( int64 socket, int tasktype ) { | 614 | void stats_deliver( int64 sock, int tasktype ) { |
615 | mutex_workqueue_pushtask( socket, tasktype ); | 615 | mutex_workqueue_pushtask( sock, tasktype ); |
616 | } | 616 | } |
617 | 617 | ||
618 | static pthread_t thread_id; | 618 | static pthread_t thread_id; |
@@ -33,7 +33,7 @@ enum { | |||
33 | }; | 33 | }; |
34 | 34 | ||
35 | void stats_issue_event( ot_status_event event, PROTO_FLAG proto, uintptr_t event_data ); | 35 | void stats_issue_event( ot_status_event event, PROTO_FLAG proto, uintptr_t event_data ); |
36 | void stats_deliver( int64 socket, int tasktype ); | 36 | void stats_deliver( int64 sock, int tasktype ); |
37 | size_t return_stats_for_tracker( char *reply, int mode, int format ); | 37 | size_t return_stats_for_tracker( char *reply, int mode, int format ); |
38 | size_t stats_return_tracker_version( char *reply ); | 38 | size_t stats_return_tracker_version( char *reply ); |
39 | void stats_init( ); | 39 | void stats_init( ); |
@@ -44,15 +44,15 @@ void handle_udp6( int64 serversocket, struct ot_workstruct *ws ) { | |||
44 | uint32_t *outpacket = (uint32_t*)ws->outbuf; | 44 | uint32_t *outpacket = (uint32_t*)ws->outbuf; |
45 | uint32_t numwant, left, event, scopeid; | 45 | uint32_t numwant, left, event, scopeid; |
46 | uint16_t port, remoteport; | 46 | uint16_t port, remoteport; |
47 | size_t r, r_out; | 47 | size_t byte_count, scrape_count; |
48 | 48 | ||
49 | r = socket_recv6( serversocket, ws->inbuf, G_INBUF_SIZE, remoteip, &remoteport, &scopeid ); | 49 | byte_count = socket_recv6( serversocket, ws->inbuf, G_INBUF_SIZE, remoteip, &remoteport, &scopeid ); |
50 | 50 | ||
51 | stats_issue_event( EVENT_ACCEPT, FLAG_UDP, (uintptr_t)remoteip ); | 51 | stats_issue_event( EVENT_ACCEPT, FLAG_UDP, (uintptr_t)remoteip ); |
52 | stats_issue_event( EVENT_READ, FLAG_UDP, r ); | 52 | stats_issue_event( EVENT_READ, FLAG_UDP, byte_count ); |
53 | 53 | ||
54 | /* Minimum udp tracker packet size, also catches error */ | 54 | /* Minimum udp tracker packet size, also catches error */ |
55 | if( r < 16 ) | 55 | if( byte_count < 16 ) |
56 | return; | 56 | return; |
57 | 57 | ||
58 | switch( ntohl( inpacket[2] ) ) { | 58 | switch( ntohl( inpacket[2] ) ) { |
@@ -70,7 +70,7 @@ void handle_udp6( int64 serversocket, struct ot_workstruct *ws ) { | |||
70 | break; | 70 | break; |
71 | case 1: /* This is an announce action */ | 71 | case 1: /* This is an announce action */ |
72 | /* Minimum udp announce packet size */ | 72 | /* Minimum udp announce packet size */ |
73 | if( r < 98 ) | 73 | if( byte_count < 98 ) |
74 | return; | 74 | return; |
75 | 75 | ||
76 | if( !udp_test_connectionid( inpacket, remoteip )) | 76 | if( !udp_test_connectionid( inpacket, remoteip )) |
@@ -103,12 +103,12 @@ void handle_udp6( int64 serversocket, struct ot_workstruct *ws ) { | |||
103 | outpacket[1] = inpacket[12/4]; | 103 | outpacket[1] = inpacket[12/4]; |
104 | 104 | ||
105 | if( OT_PEERFLAG( &peer ) & PEER_FLAG_STOPPED ) /* Peer is gone. */ | 105 | if( OT_PEERFLAG( &peer ) & PEER_FLAG_STOPPED ) /* Peer is gone. */ |
106 | r = remove_peer_from_torrent( *hash, &peer, ws->outbuf, FLAG_UDP ); | 106 | byte_count = remove_peer_from_torrent( *hash, &peer, ws->outbuf, FLAG_UDP ); |
107 | else | 107 | else |
108 | r = 8 + add_peer_to_torrent_and_return_peers( *hash, &peer, FLAG_UDP, numwant, ((char*)outpacket) + 8 ); | 108 | byte_count = 8 + add_peer_to_torrent_and_return_peers( *hash, &peer, FLAG_UDP, numwant, ((char*)outpacket) + 8 ); |
109 | 109 | ||
110 | socket_send6( serversocket, ws->outbuf, r, remoteip, remoteport, 0 ); | 110 | socket_send6( serversocket, ws->outbuf, byte_count, remoteip, remoteport, 0 ); |
111 | stats_issue_event( EVENT_ANNOUNCE, FLAG_UDP, r ); | 111 | stats_issue_event( EVENT_ANNOUNCE, FLAG_UDP, byte_count ); |
112 | break; | 112 | break; |
113 | 113 | ||
114 | case 2: /* This is a scrape action */ | 114 | case 2: /* This is a scrape action */ |
@@ -118,11 +118,11 @@ void handle_udp6( int64 serversocket, struct ot_workstruct *ws ) { | |||
118 | outpacket[0] = htonl( 2 ); /* scrape action */ | 118 | outpacket[0] = htonl( 2 ); /* scrape action */ |
119 | outpacket[1] = inpacket[12/4]; | 119 | outpacket[1] = inpacket[12/4]; |
120 | 120 | ||
121 | for( r_out = 0; ( r_out * 20 < r - 16) && ( r_out <= 74 ); r_out++ ) | 121 | for( scrape_count = 0; ( scrape_count * 20 < byte_count - 16) && ( scrape_count <= 74 ); scrape_count++ ) |
122 | return_udp_scrape_for_torrent( *(ot_hash*)( ((char*)inpacket) + 16 + 20 * r_out ), ((char*)outpacket) + 8 + 12 * r_out ); | 122 | return_udp_scrape_for_torrent( *(ot_hash*)( ((char*)inpacket) + 16 + 20 * scrape_count ), ((char*)outpacket) + 8 + 12 * scrape_count ); |
123 | 123 | ||
124 | socket_send6( serversocket, ws->outbuf, 8 + 12 * r_out, remoteip, remoteport, 0 ); | 124 | socket_send6( serversocket, ws->outbuf, 8 + 12 * scrape_count, remoteip, remoteport, 0 ); |
125 | stats_issue_event( EVENT_SCRAPE, FLAG_UDP, r ); | 125 | stats_issue_event( EVENT_SCRAPE, FLAG_UDP, scrape_count ); |
126 | break; | 126 | break; |
127 | } | 127 | } |
128 | } | 128 | } |
diff --git a/ot_vector.c b/ot_vector.c index a69d9a5..cbb3fac 100644 --- a/ot_vector.c +++ b/ot_vector.c | |||
@@ -23,8 +23,6 @@ static int vector_compare_peer(const void *peer1, const void *peer2 ) { | |||
23 | /* This function gives us a binary search that returns a pointer, even if | 23 | /* This function gives us a binary search that returns a pointer, even if |
24 | no exact match is found. In that case it sets exactmatch 0 and gives | 24 | no exact match is found. In that case it sets exactmatch 0 and gives |
25 | calling functions the chance to insert data | 25 | calling functions the chance to insert data |
26 | |||
27 | NOTE: Minimal compare_size is 4, member_size must be a multiple of 4 | ||
28 | */ | 26 | */ |
29 | void *binary_search( const void * const key, const void * base, const size_t member_count, const size_t member_size, | 27 | void *binary_search( const void * const key, const void * base, const size_t member_count, const size_t member_size, |
30 | size_t compare_size, int *exactmatch ) { | 28 | size_t compare_size, int *exactmatch ) { |