diff options
Diffstat (limited to 'opentracker.c')
-rw-r--r-- | opentracker.c | 27 |
1 files changed, 16 insertions, 11 deletions
diff --git a/opentracker.c b/opentracker.c index 81f5daa..75f43f0 100644 --- a/opentracker.c +++ b/opentracker.c | |||
@@ -61,6 +61,9 @@ static char static_inbuf[8192]; | |||
61 | static char static_outbuf[8192]; | 61 | static char static_outbuf[8192]; |
62 | static char static_tmpbuf[8192]; | 62 | static char static_tmpbuf[8192]; |
63 | 63 | ||
64 | #define OT_MAXMULTISCRAPE_COUNT 64 | ||
65 | static ot_hash multiscrape_buf[OT_MAXMULTISCRAPE_COUNT]; | ||
66 | |||
64 | static char *FLAG_TCP = "TCP"; | 67 | static char *FLAG_TCP = "TCP"; |
65 | static char *FLAG_UDP = "UDP"; | 68 | static char *FLAG_UDP = "UDP"; |
66 | static size_t ot_sockets_count = 0; | 69 | static size_t ot_sockets_count = 0; |
@@ -217,7 +220,7 @@ static void httpresponse( const int64 s, char *data, size_t l ) { | |||
217 | ot_peer peer; | 220 | ot_peer peer; |
218 | ot_torrent *torrent; | 221 | ot_torrent *torrent; |
219 | ot_hash *hash = NULL; | 222 | ot_hash *hash = NULL; |
220 | int numwant, tmp, scanon, mode; | 223 | int numwant, tmp, scanon, mode, scrape_count; |
221 | unsigned short port = htons(6881); | 224 | unsigned short port = htons(6881); |
222 | time_t t; | 225 | time_t t; |
223 | ssize_t len; | 226 | ssize_t len; |
@@ -373,17 +376,18 @@ LOG_TO_STDERR( "stats: %d.%d.%d.%d - mode: s24s old\n", h->ip[0], h->ip[1], h->i | |||
373 | case 6: /* scrape ? */ | 376 | case 6: /* scrape ? */ |
374 | if( byte_diff( data, 6, "scrape") ) HTTPERROR_404; | 377 | if( byte_diff( data, 6, "scrape") ) HTTPERROR_404; |
375 | 378 | ||
376 | /* We want the pure plain un-unescaped text */ | 379 | /* We want the pure plain un-unescaped text */ |
377 | memmove( static_tmpbuf, static_inbuf, 8192 ); | 380 | memmove( static_tmpbuf, static_inbuf, 8192 ); |
378 | 381 | ||
379 | /* This is to hack around stupid clients that just replace | 382 | /* This is to hack around stupid clients that just replace |
380 | "announce ?info_hash" with "scrape ?info_hash". | 383 | "announce ?info_hash" with "scrape ?info_hash". |
381 | We do not want to bomb them with full scrapes */ | 384 | We do not want to bomb them with full scrapes */ |
382 | if( !byte_diff( c, 2, " ?" ) ) ++c; | 385 | if( !byte_diff( c, 2, " ?" ) ) c+=2; |
383 | 386 | ||
384 | SCRAPE_WORKAROUND: | 387 | SCRAPE_WORKAROUND: |
385 | 388 | ||
386 | scanon = 1; | 389 | scanon = 1; |
390 | scrape_count = 0; | ||
387 | while( scanon ) { | 391 | while( scanon ) { |
388 | switch( scan_urlencoded_query( &c, data = c, SCAN_SEARCHPATH_PARAM ) ) { | 392 | switch( scan_urlencoded_query( &c, data = c, SCAN_SEARCHPATH_PARAM ) ) { |
389 | case -2: scanon = 0; break; /* TERMINATOR */ | 393 | case -2: scanon = 0; break; /* TERMINATOR */ |
@@ -396,13 +400,14 @@ SCRAPE_WORKAROUND: | |||
396 | } | 400 | } |
397 | /* ignore this, when we have less than 20 bytes */ | 401 | /* ignore this, when we have less than 20 bytes */ |
398 | if( scan_urlencoded_query( &c, data = c, SCAN_SEARCHPATH_VALUE ) != 20 ) HTTPERROR_400_PARAM; | 402 | if( scan_urlencoded_query( &c, data = c, SCAN_SEARCHPATH_VALUE ) != 20 ) HTTPERROR_400_PARAM; |
399 | hash = (ot_hash*)data; | 403 | if( scrape_count < OT_MAXMULTISCRAPE_COUNT ) |
404 | memmove( multiscrape_buf + scrape_count++, data, sizeof(ot_hash) ); | ||
400 | break; | 405 | break; |
401 | } | 406 | } |
402 | } | 407 | } |
403 | 408 | ||
404 | /* Scanned whole query string, no hash means full scrape... you might want to limit that */ | 409 | /* Scanned whole query string, no hash means full scrape... you might want to limit that */ |
405 | if( !hash ) { | 410 | if( !scrape_count ) { |
406 | LOG_TO_STDERR( "scrp: %d.%d.%d.%d - FULL SCRAPE\n", h->ip[0], h->ip[1], h->ip[2], h->ip[3] ); | 411 | LOG_TO_STDERR( "scrp: %d.%d.%d.%d - FULL SCRAPE\n", h->ip[0], h->ip[1], h->ip[2], h->ip[3] ); |
407 | write( 2, static_tmpbuf, l ); | 412 | write( 2, static_tmpbuf, l ); |
408 | write( 2, "\n\n\n", 1 ); | 413 | write( 2, "\n\n\n", 1 ); |
@@ -412,7 +417,7 @@ write( 2, "\n\n\n", 1 ); | |||
412 | } | 417 | } |
413 | 418 | ||
414 | /* Enough for http header + whole scrape string */ | 419 | /* Enough for http header + whole scrape string */ |
415 | if( !( reply_size = return_tcp_scrape_for_torrent( hash, SUCCESS_HTTP_HEADER_LENGTH + static_outbuf ) ) ) HTTPERROR_500; | 420 | if( !( reply_size = return_tcp_scrape_for_torrent( multiscrape_buf, scrape_count, SUCCESS_HTTP_HEADER_LENGTH + static_outbuf ) ) ) HTTPERROR_500; |
416 | 421 | ||
417 | ot_overall_tcp_successfulannounces++; | 422 | ot_overall_tcp_successfulannounces++; |
418 | break; | 423 | break; |
@@ -423,7 +428,7 @@ write( 2, "\n\n\n", 1 ); | |||
423 | if( byte_diff( data, 8, "announce" ) ) HTTPERROR_404; | 428 | if( byte_diff( data, 8, "announce" ) ) HTTPERROR_404; |
424 | 429 | ||
425 | /* This is to hack around stupid clients that send "announce ?info_hash" */ | 430 | /* This is to hack around stupid clients that send "announce ?info_hash" */ |
426 | if( !byte_diff( c, 2, " ?" ) ) ++c; | 431 | if( !byte_diff( c, 2, " ?" ) ) c+=2; |
427 | 432 | ||
428 | ANNOUNCE_WORKAROUND: | 433 | ANNOUNCE_WORKAROUND: |
429 | 434 | ||