diff options
author | erdgeist <> | 2009-01-15 23:01:36 +0000 |
---|---|---|
committer | erdgeist <> | 2009-01-15 23:01:36 +0000 |
commit | 66c906d5d3c100e5fe1e6f088bd1ea17c5831894 (patch) | |
tree | 33eb4f386d14327277689e66143d5e607b3eef98 /opentracker.c | |
parent | 4ced0484abae55546e04954b3dafad46f9db348a (diff) |
Add comments, rename our struct http_data h to cookie, all clientsockets to sock, all size_t from socket_recvs to byte_count. Make signal handler set default handler for the second SIGINT
Diffstat (limited to 'opentracker.c')
-rw-r--r-- | opentracker.c | 140 |
1 files changed, 72 insertions, 68 deletions
diff --git a/opentracker.c b/opentracker.c index 91a1f79..970995a 100644 --- a/opentracker.c +++ b/opentracker.c | |||
@@ -48,12 +48,17 @@ static void panic( const char *routine ) { | |||
48 | 48 | ||
49 | static void signal_handler( int s ) { | 49 | static void signal_handler( int s ) { |
50 | if( s == SIGINT ) { | 50 | if( s == SIGINT ) { |
51 | signal( SIGINT, SIG_IGN); | 51 | /* Any new interrupt signal quits the application */ |
52 | signal( SIGINT, SIG_DFL); | ||
53 | |||
54 | /* Tell all other threads to not acquire any new lock on a bucket | ||
55 | but cancel their operations and return */ | ||
52 | g_opentracker_running = 0; | 56 | g_opentracker_running = 0; |
53 | 57 | ||
54 | trackerlogic_deinit(); | 58 | trackerlogic_deinit(); |
55 | exit( 0 ); | 59 | exit( 0 ); |
56 | } else if( s == SIGALRM ) { | 60 | } else if( s == SIGALRM ) { |
61 | /* Maintain our copy of the clock. time() on BSDs is very expensive. */ | ||
57 | g_now_seconds = time(NULL); | 62 | g_now_seconds = time(NULL); |
58 | alarm(5); | 63 | alarm(5); |
59 | } | 64 | } |
@@ -90,88 +95,87 @@ static void help( char *name ) { | |||
90 | } | 95 | } |
91 | #undef HELPLINE | 96 | #undef HELPLINE |
92 | 97 | ||
93 | static void handle_dead( const int64 socket ) { | 98 | static void handle_dead( const int64 sock ) { |
94 | struct http_data* h=io_getcookie( socket ); | 99 | struct http_data* cookie=io_getcookie( sock ); |
95 | if( h ) { | 100 | if( cookie ) { |
96 | if( h->flag & STRUCT_HTTP_FLAG_IOB_USED ) | 101 | if( cookie->flag & STRUCT_HTTP_FLAG_IOB_USED ) |
97 | iob_reset( &h->data.batch ); | 102 | iob_reset( &cookie->data.batch ); |
98 | if( h->flag & STRUCT_HTTP_FLAG_ARRAY_USED ) | 103 | if( cookie->flag & STRUCT_HTTP_FLAG_ARRAY_USED ) |
99 | array_reset( &h->data.request ); | 104 | array_reset( &cookie->data.request ); |
100 | if( h->flag & STRUCT_HTTP_FLAG_WAITINGFORTASK ) | 105 | if( cookie->flag & STRUCT_HTTP_FLAG_WAITINGFORTASK ) |
101 | mutex_workqueue_canceltask( socket ); | 106 | mutex_workqueue_canceltask( sock ); |
102 | free( h ); | 107 | free( cookie ); |
103 | } | 108 | } |
104 | io_close( socket ); | 109 | io_close( sock ); |
105 | } | 110 | } |
106 | 111 | ||
107 | static ssize_t handle_read( const int64 clientsocket, struct ot_workstruct *ws ) { | 112 | static ssize_t handle_read( const int64 sock, struct ot_workstruct *ws ) { |
108 | struct http_data* h = io_getcookie( clientsocket ); | 113 | struct http_data* cookie = io_getcookie( sock ); |
109 | ssize_t l; | 114 | ssize_t byte_count; |
110 | 115 | ||
111 | if( ( l = io_tryread( clientsocket, ws->inbuf, G_INBUF_SIZE ) ) <= 0 ) { | 116 | if( ( byte_count = io_tryread( sock, ws->inbuf, G_INBUF_SIZE ) ) <= 0 ) { |
112 | handle_dead( clientsocket ); | 117 | handle_dead( sock ); |
113 | return 0; | 118 | return 0; |
114 | } | 119 | } |
115 | 120 | ||
116 | /* If we get the whole request in one packet, handle it without copying */ | 121 | /* If we get the whole request in one packet, handle it without copying */ |
117 | if( !array_start( &h->data.request ) ) { | 122 | if( !array_start( &cookie->data.request ) ) { |
118 | if( memchr( ws->inbuf, '\n', l ) ) { | 123 | if( memchr( ws->inbuf, '\n', byte_count ) ) { |
119 | ws->request = ws->inbuf; | 124 | ws->request = ws->inbuf; |
120 | ws->request_size = l; | 125 | ws->request_size = byte_count; |
121 | return http_handle_request( clientsocket, ws ); | 126 | return http_handle_request( sock, ws ); |
122 | } | 127 | } |
123 | 128 | ||
124 | /* ... else take a copy */ | 129 | /* ... else take a copy */ |
125 | h->flag |= STRUCT_HTTP_FLAG_ARRAY_USED; | 130 | cookie->flag |= STRUCT_HTTP_FLAG_ARRAY_USED; |
126 | array_catb( &h->data.request, ws->inbuf, l ); | 131 | array_catb( &cookie->data.request, ws->inbuf, byte_count ); |
127 | return 0; | 132 | return 0; |
128 | } | 133 | } |
129 | 134 | ||
130 | h->flag |= STRUCT_HTTP_FLAG_ARRAY_USED; | 135 | array_catb( &cookie->data.request, ws->inbuf, byte_count ); |
131 | array_catb( &h->data.request, ws->inbuf, l ); | ||
132 | 136 | ||
133 | if( array_failed( &h->data.request ) ) | 137 | if( array_failed( &cookie->data.request ) ) |
134 | return http_issue_error( clientsocket, ws, CODE_HTTPERROR_500 ); | 138 | return http_issue_error( sock, ws, CODE_HTTPERROR_500 ); |
135 | 139 | ||
136 | if( array_bytes( &h->data.request ) > 8192 ) | 140 | if( array_bytes( &cookie->data.request ) > 8192 ) |
137 | return http_issue_error( clientsocket, ws, CODE_HTTPERROR_500 ); | 141 | return http_issue_error( sock, ws, CODE_HTTPERROR_500 ); |
138 | 142 | ||
139 | if( !memchr( array_start( &h->data.request ), '\n', array_bytes( &h->data.request ) ) ) | 143 | if( !memchr( array_start( &cookie->data.request ), '\n', array_bytes( &cookie->data.request ) ) ) |
140 | return 0; | 144 | return 0; |
141 | 145 | ||
142 | ws->request = array_start( &h->data.request ); | 146 | ws->request = array_start( &cookie->data.request ); |
143 | ws->request_size = array_bytes( &h->data.request ); | 147 | ws->request_size = array_bytes( &cookie->data.request ); |
144 | return http_handle_request( clientsocket, ws ); | 148 | return http_handle_request( sock, ws ); |
145 | } | 149 | } |
146 | 150 | ||
147 | static void handle_write( const int64 clientsocket ) { | 151 | static void handle_write( const int64 sock ) { |
148 | struct http_data* h=io_getcookie( clientsocket ); | 152 | struct http_data* cookie=io_getcookie( sock ); |
149 | if( !h || ( iob_send( clientsocket, &h->data.batch ) <= 0 ) ) | 153 | if( !cookie || ( iob_send( sock, &cookie->data.batch ) <= 0 ) ) |
150 | handle_dead( clientsocket ); | 154 | handle_dead( sock ); |
151 | } | 155 | } |
152 | 156 | ||
153 | static void handle_accept( const int64 serversocket ) { | 157 | static void handle_accept( const int64 serversocket ) { |
154 | struct http_data *h; | 158 | struct http_data *cookie; |
159 | int64 sock; | ||
155 | ot_ip6 ip; | 160 | ot_ip6 ip; |
156 | uint16 port; | 161 | uint16 port; |
157 | tai6464 t; | 162 | tai6464 t; |
158 | int64 i; | ||
159 | 163 | ||
160 | while( ( i = socket_accept6( serversocket, ip, &port, NULL ) ) != -1 ) { | 164 | while( ( sock = socket_accept6( serversocket, ip, &port, NULL ) ) != -1 ) { |
161 | 165 | ||
162 | /* Put fd into a non-blocking mode */ | 166 | /* Put fd into a non-blocking mode */ |
163 | io_nonblock( i ); | 167 | io_nonblock( sock ); |
164 | 168 | ||
165 | if( !io_fd( i ) || | 169 | if( !io_fd( sock ) || |
166 | !( h = (struct http_data*)malloc( sizeof(struct http_data) ) ) ) { | 170 | !( cookie = (struct http_data*)malloc( sizeof(struct http_data) ) ) ) { |
167 | io_close( i ); | 171 | io_close( sock ); |
168 | continue; | 172 | continue; |
169 | } | 173 | } |
170 | io_setcookie( i, h ); | 174 | io_setcookie( sock, cookie ); |
171 | io_wantread( i ); | 175 | io_wantread( sock ); |
172 | 176 | ||
173 | memset(h, 0, sizeof( struct http_data ) ); | 177 | memset(cookie, 0, sizeof( struct http_data ) ); |
174 | memcpy(h->ip,ip,sizeof(ot_ip6)); | 178 | memcpy(cookie->ip,ip,sizeof(ot_ip6)); |
175 | 179 | ||
176 | stats_issue_event( EVENT_ACCEPT, FLAG_TCP, (uintptr_t)ip); | 180 | stats_issue_event( EVENT_ACCEPT, FLAG_TCP, (uintptr_t)ip); |
177 | 181 | ||
@@ -179,7 +183,7 @@ static void handle_accept( const int64 serversocket ) { | |||
179 | time this often in FreeBSD and libowfat does not allow to set unix time */ | 183 | time this often in FreeBSD and libowfat does not allow to set unix time */ |
180 | taia_uint( &t, 0 ); /* Clear t */ | 184 | taia_uint( &t, 0 ); /* Clear t */ |
181 | tai_unix( &(t.sec), (g_now_seconds + OT_CLIENT_TIMEOUT) ); | 185 | tai_unix( &(t.sec), (g_now_seconds + OT_CLIENT_TIMEOUT) ); |
182 | io_timeout( i, t ); | 186 | io_timeout( sock, t ); |
183 | } | 187 | } |
184 | 188 | ||
185 | if( errno == EAGAIN ) | 189 | if( errno == EAGAIN ) |
@@ -202,29 +206,29 @@ static void server_mainloop( ) { | |||
202 | panic( "Initializing worker failed" ); | 206 | panic( "Initializing worker failed" ); |
203 | 207 | ||
204 | for( ; ; ) { | 208 | for( ; ; ) { |
205 | int64 i; | 209 | int64 sock; |
206 | 210 | ||
207 | io_wait(); | 211 | io_wait(); |
208 | 212 | ||
209 | while( ( i = io_canread( ) ) != -1 ) { | 213 | while( ( sock = io_canread( ) ) != -1 ) { |
210 | const void *cookie = io_getcookie( i ); | 214 | const void *cookie = io_getcookie( sock ); |
211 | if( (intptr_t)cookie == FLAG_TCP ) | 215 | if( (intptr_t)cookie == FLAG_TCP ) |
212 | handle_accept( i ); | 216 | handle_accept( sock ); |
213 | else if( (intptr_t)cookie == FLAG_UDP ) | 217 | else if( (intptr_t)cookie == FLAG_UDP ) |
214 | handle_udp6( i, &ws ); | 218 | handle_udp6( sock, &ws ); |
215 | else | 219 | else |
216 | handle_read( i, &ws ); | 220 | handle_read( sock, &ws ); |
217 | } | 221 | } |
218 | 222 | ||
219 | while( ( i = mutex_workqueue_popresult( &iovec_entries, &iovector ) ) != -1 ) | 223 | while( ( sock = mutex_workqueue_popresult( &iovec_entries, &iovector ) ) != -1 ) |
220 | http_sendiovecdata( i, &ws, iovec_entries, iovector ); | 224 | http_sendiovecdata( sock, &ws, iovec_entries, iovector ); |
221 | 225 | ||
222 | while( ( i = io_canwrite( ) ) != -1 ) | 226 | while( ( sock = io_canwrite( ) ) != -1 ) |
223 | handle_write( i ); | 227 | handle_write( sock ); |
224 | 228 | ||
225 | if( g_now_seconds > next_timeout_check ) { | 229 | if( g_now_seconds > next_timeout_check ) { |
226 | while( ( i = io_timeouted() ) != -1 ) | 230 | while( ( sock = io_timeouted() ) != -1 ) |
227 | handle_dead( i ); | 231 | handle_dead( sock ); |
228 | next_timeout_check = g_now_seconds + OT_CLIENT_TIMEOUT_CHECKINTERVAL; | 232 | next_timeout_check = g_now_seconds + OT_CLIENT_TIMEOUT_CHECKINTERVAL; |
229 | } | 233 | } |
230 | 234 | ||
@@ -236,7 +240,7 @@ static void server_mainloop( ) { | |||
236 | } | 240 | } |
237 | 241 | ||
238 | static int64_t ot_try_bind( ot_ip6 ip, uint16_t port, PROTO_FLAG proto ) { | 242 | static int64_t ot_try_bind( ot_ip6 ip, uint16_t port, PROTO_FLAG proto ) { |
239 | int64 s = proto == FLAG_TCP ? socket_tcp6( ) : socket_udp6( ); | 243 | int64 sock = proto == FLAG_TCP ? socket_tcp6( ) : socket_udp6( ); |
240 | 244 | ||
241 | #ifndef WANT_V6 | 245 | #ifndef WANT_V6 |
242 | if( !ip6_isv4mapped(ip) ) { | 246 | if( !ip6_isv4mapped(ip) ) { |
@@ -257,24 +261,24 @@ static int64_t ot_try_bind( ot_ip6 ip, uint16_t port, PROTO_FLAG proto ) { | |||
257 | fputs( _debug, stderr ); | 261 | fputs( _debug, stderr ); |
258 | #endif | 262 | #endif |
259 | 263 | ||
260 | if( socket_bind6_reuse( s, ip, port, 0 ) == -1 ) | 264 | if( socket_bind6_reuse( sock, ip, port, 0 ) == -1 ) |
261 | panic( "socket_bind6_reuse" ); | 265 | panic( "socket_bind6_reuse" ); |
262 | 266 | ||
263 | if( ( proto == FLAG_TCP ) && ( socket_listen( s, SOMAXCONN) == -1 ) ) | 267 | if( ( proto == FLAG_TCP ) && ( socket_listen( sock, SOMAXCONN) == -1 ) ) |
264 | panic( "socket_listen" ); | 268 | panic( "socket_listen" ); |
265 | 269 | ||
266 | if( !io_fd( s ) ) | 270 | if( !io_fd( sock ) ) |
267 | panic( "io_fd" ); | 271 | panic( "io_fd" ); |
268 | 272 | ||
269 | io_setcookie( s, (void*)proto ); | 273 | io_setcookie( sock, (void*)proto ); |
270 | 274 | ||
271 | io_wantread( s ); | 275 | io_wantread( sock ); |
272 | 276 | ||
273 | #ifdef _DEBUG | 277 | #ifdef _DEBUG |
274 | fputs( " success.\n", stderr); | 278 | fputs( " success.\n", stderr); |
275 | #endif | 279 | #endif |
276 | 280 | ||
277 | return s; | 281 | return sock; |
278 | } | 282 | } |
279 | 283 | ||
280 | char * set_config_option( char **option, char *value ) { | 284 | char * set_config_option( char **option, char *value ) { |