diff options
| author | erdgeist <> | 2009-01-02 08:57:53 +0000 |
|---|---|---|
| committer | erdgeist <> | 2009-01-02 08:57:53 +0000 |
| commit | 2df09905f5540fee096d48a92cb0c42558498a12 (patch) | |
| tree | 68eab61d29719400972485de395dd0465467aea6 | |
| parent | 548e2b8338b5ee8d24fa928e833f345bb5cb6f0e (diff) | |
* opentracker now drops permissions in correct order and really chroots() if ran as root
* lock passing between add_peer_to_torrent and return_peers_for_torrent is now avoided by providing a more general add_peer_to_torrent_and_return_peers function that can be used with NULL parameters to not return any peers (in sync case)
* in order to keep a fast overview how many torrents opentracker maintains, every mutex_bucket_unlock operation expects an additional integer parameter that tells ot_mutex.c how many torrents have been added or removed. A function mutex_get_torrent_count has been introduced.
| -rw-r--r-- | opentracker.c | 58 | ||||
| -rw-r--r-- | ot_clean.c | 8 | ||||
| -rw-r--r-- | ot_fullscrape.c | 8 | ||||
| -rw-r--r-- | ot_http.c | 14 | ||||
| -rw-r--r-- | ot_livesync.c | 400 | ||||
| -rw-r--r-- | ot_livesync.h | 26 | ||||
| -rw-r--r-- | ot_mutex.c | 16 | ||||
| -rw-r--r-- | ot_mutex.h | 6 | ||||
| -rw-r--r-- | ot_stats.c | 32 | ||||
| -rw-r--r-- | ot_udp.c | 18 | ||||
| -rw-r--r-- | trackerlogic.c | 89 | ||||
| -rw-r--r-- | trackerlogic.h | 13 |
12 files changed, 476 insertions, 212 deletions
diff --git a/opentracker.c b/opentracker.c index 61acd3f..993877a 100644 --- a/opentracker.c +++ b/opentracker.c | |||
| @@ -350,8 +350,47 @@ int parse_configfile( char * config_filename ) { | |||
| 350 | return bound; | 350 | return bound; |
| 351 | } | 351 | } |
| 352 | 352 | ||
| 353 | int main( int argc, char **argv ) { | 353 | int drop_privileges (const char * const serverdir) { |
| 354 | struct passwd *pws = NULL; | 354 | struct passwd *pws = NULL; |
| 355 | |||
| 356 | /* Grab pws entry before chrooting */ | ||
| 357 | pws = getpwnam( "nobody" ); | ||
| 358 | endpwent(); | ||
| 359 | |||
| 360 | if( geteuid() == 0 ) { | ||
| 361 | /* Running as root: chroot and drop privileges */ | ||
| 362 | if(chroot( serverdir )) { | ||
| 363 | fprintf( stderr, "Could not chroot to %s, because: %s\n", serverdir, strerror(errno) ); | ||
| 364 | return -1; | ||
| 365 | } | ||
| 366 | |||
| 367 | if(chdir("/")) | ||
| 368 | panic("chdir() failed after chrooting: "); | ||
| 369 | |||
| 370 | if( !pws ) { | ||
| 371 | setegid( (gid_t)-2 ); setgid( (gid_t)-2 ); | ||
| 372 | setuid( (uid_t)-2 ); seteuid( (uid_t)-2 ); | ||
| 373 | } | ||
| 374 | else { | ||
| 375 | setegid( pws->pw_gid ); setgid( pws->pw_gid ); | ||
| 376 | setuid( pws->pw_uid ); seteuid( pws->pw_uid ); | ||
| 377 | } | ||
| 378 | |||
| 379 | if( geteuid() == 0 || getegid() == 0 ) | ||
| 380 | panic("Still running with root privileges?!"); | ||
| 381 | } | ||
| 382 | else { | ||
| 383 | /* Normal user, just chdir() */ | ||
| 384 | if(chdir( serverdir )) { | ||
| 385 | fprintf( stderr, "Could not chroot to %s, because: %s\n", serverdir, strerror(errno) ); | ||
| 386 | return -1; | ||
| 387 | } | ||
| 388 | } | ||
| 389 | |||
| 390 | return 0; | ||
| 391 | } | ||
| 392 | |||
| 393 | int main( int argc, char **argv ) { | ||
| 355 | char serverip[4] = {0,0,0,0}, tmpip[4]; | 394 | char serverip[4] = {0,0,0,0}, tmpip[4]; |
| 356 | int bound = 0, scanon = 1; | 395 | int bound = 0, scanon = 1; |
| 357 | uint16_t tmpport; | 396 | uint16_t tmpport; |
| @@ -404,16 +443,8 @@ while( scanon ) { | |||
| 404 | ot_try_bind( serverip, 6969, FLAG_UDP ); | 443 | ot_try_bind( serverip, 6969, FLAG_UDP ); |
| 405 | } | 444 | } |
| 406 | 445 | ||
| 407 | /* Drop permissions */ | 446 | if( drop_privileges( g_serverdir ? g_serverdir : "." ) == -1 ) |
| 408 | pws = getpwnam( "nobody" ); | 447 | panic( "drop_privileges failed, exiting. Last error"); |
| 409 | if( !pws ) { | ||
| 410 | setegid( (gid_t)-2 ); setuid( (uid_t)-2 ); | ||
| 411 | setgid( (gid_t)-2 ); seteuid( (uid_t)-2 ); | ||
| 412 | } else { | ||
| 413 | setegid( pws->pw_gid ); setuid( pws->pw_uid ); | ||
| 414 | setgid( pws->pw_gid ); seteuid( pws->pw_uid ); | ||
| 415 | } | ||
| 416 | endpwent(); | ||
| 417 | 448 | ||
| 418 | signal( SIGPIPE, SIG_IGN ); | 449 | signal( SIGPIPE, SIG_IGN ); |
| 419 | signal( SIGINT, signal_handler ); | 450 | signal( SIGINT, signal_handler ); |
| @@ -421,9 +452,10 @@ while( scanon ) { | |||
| 421 | 452 | ||
| 422 | g_now_seconds = time( NULL ); | 453 | g_now_seconds = time( NULL ); |
| 423 | 454 | ||
| 424 | if( trackerlogic_init( g_serverdir ? g_serverdir : "." ) == -1 ) | 455 | /* Init all sub systems. This call may fail with an exit() */ |
| 425 | panic( "Logic not started" ); | 456 | trackerlogic_init( ); |
| 426 | 457 | ||
| 458 | /* Kick off our initial clock setting alarm */ | ||
| 427 | alarm(5); | 459 | alarm(5); |
| 428 | 460 | ||
| 429 | server_mainloop( ); | 461 | server_mainloop( ); |
| @@ -20,7 +20,7 @@ | |||
| 20 | static ssize_t clean_single_bucket( ot_peer *peers, size_t peer_count, time_t timedout, int *removed_seeders ) { | 20 | static ssize_t clean_single_bucket( ot_peer *peers, size_t peer_count, time_t timedout, int *removed_seeders ) { |
| 21 | ot_peer *last_peer = peers + peer_count, *insert_point; | 21 | ot_peer *last_peer = peers + peer_count, *insert_point; |
| 22 | time_t timediff; | 22 | time_t timediff; |
| 23 | 23 | ||
| 24 | /* Two scan modes: unless there is one peer removed, just increase ot_peertime */ | 24 | /* Two scan modes: unless there is one peer removed, just increase ot_peertime */ |
| 25 | while( peers < last_peer ) { | 25 | while( peers < last_peer ) { |
| 26 | if( ( timediff = timedout + OT_PEERTIME( peers ) ) >= OT_PEER_TIMEOUT ) | 26 | if( ( timediff = timedout + OT_PEERTIME( peers ) ) >= OT_PEER_TIMEOUT ) |
| @@ -105,17 +105,19 @@ static void * clean_worker( void * args ) { | |||
| 105 | while( bucket-- ) { | 105 | while( bucket-- ) { |
| 106 | ot_vector *torrents_list = mutex_bucket_lock( bucket ); | 106 | ot_vector *torrents_list = mutex_bucket_lock( bucket ); |
| 107 | size_t toffs; | 107 | size_t toffs; |
| 108 | int delta_torrentcount = 0; | ||
| 108 | 109 | ||
| 109 | for( toffs=0; toffs<torrents_list->size; ++toffs ) { | 110 | for( toffs=0; toffs<torrents_list->size; ++toffs ) { |
| 110 | ot_torrent *torrent = ((ot_torrent*)(torrents_list->data)) + toffs; | 111 | ot_torrent *torrent = ((ot_torrent*)(torrents_list->data)) + toffs; |
| 111 | if( clean_single_torrent( torrent ) ) { | 112 | if( clean_single_torrent( torrent ) ) { |
| 112 | vector_remove_torrent( torrents_list, torrent ); | 113 | vector_remove_torrent( torrents_list, torrent ); |
| 114 | delta_torrentcount -= 1; | ||
| 113 | --toffs; continue; | 115 | --toffs; continue; |
| 114 | } | 116 | } |
| 115 | } | 117 | } |
| 116 | mutex_bucket_unlock( bucket ); | 118 | mutex_bucket_unlock( bucket, delta_torrentcount ); |
| 117 | if( !g_opentracker_running ) | 119 | if( !g_opentracker_running ) |
| 118 | return NULL; | 120 | return NULL; |
| 119 | usleep( OT_CLEAN_SLEEP ); | 121 | usleep( OT_CLEAN_SLEEP ); |
| 120 | } | 122 | } |
| 121 | } | 123 | } |
diff --git a/ot_fullscrape.c b/ot_fullscrape.c index 58546ca..3f60d40 100644 --- a/ot_fullscrape.c +++ b/ot_fullscrape.c | |||
| @@ -152,7 +152,7 @@ static void fullscrape_make( int *iovec_entries, struct iovec **iovector, ot_tas | |||
| 152 | /* Get exclusive access to that bucket */ | 152 | /* Get exclusive access to that bucket */ |
| 153 | ot_vector *torrents_list = mutex_bucket_lock( bucket ); | 153 | ot_vector *torrents_list = mutex_bucket_lock( bucket ); |
| 154 | size_t tor_offset; | 154 | size_t tor_offset; |
| 155 | 155 | ||
| 156 | /* For each torrent in this bucket.. */ | 156 | /* For each torrent in this bucket.. */ |
| 157 | for( tor_offset=0; tor_offset<torrents_list->size; ++tor_offset ) { | 157 | for( tor_offset=0; tor_offset<torrents_list->size; ++tor_offset ) { |
| 158 | /* Address torrents members */ | 158 | /* Address torrents members */ |
| @@ -199,13 +199,13 @@ static void fullscrape_make( int *iovec_entries, struct iovec **iovector, ot_tas | |||
| 199 | /* Check if there still is enough buffer left */ | 199 | /* Check if there still is enough buffer left */ |
| 200 | while( r >= re ) | 200 | while( r >= re ) |
| 201 | if( fullscrape_increase( iovec_entries, iovector, &r, &re WANT_COMPRESSION_GZIP_PARAM( &strm, mode, Z_NO_FLUSH ) ) ) | 201 | if( fullscrape_increase( iovec_entries, iovector, &r, &re WANT_COMPRESSION_GZIP_PARAM( &strm, mode, Z_NO_FLUSH ) ) ) |
| 202 | return mutex_bucket_unlock( bucket ); | 202 | return mutex_bucket_unlock( bucket, 0 ); |
| 203 | 203 | ||
| 204 | IF_COMPRESSION( r = compress_buffer; ) | 204 | IF_COMPRESSION( r = compress_buffer; ) |
| 205 | } | 205 | } |
| 206 | 206 | ||
| 207 | /* All torrents done: release lock on current bucket */ | 207 | /* All torrents done: release lock on current bucket */ |
| 208 | mutex_bucket_unlock( bucket ); | 208 | mutex_bucket_unlock( bucket, 0 ); |
| 209 | 209 | ||
| 210 | /* Parent thread died? */ | 210 | /* Parent thread died? */ |
| 211 | if( !g_opentracker_running ) | 211 | if( !g_opentracker_running ) |
| @@ -225,7 +225,7 @@ static void fullscrape_make( int *iovec_entries, struct iovec **iovector, ot_tas | |||
| 225 | 225 | ||
| 226 | while( r >= re ) | 226 | while( r >= re ) |
| 227 | if( fullscrape_increase( iovec_entries, iovector, &r, &re WANT_COMPRESSION_GZIP_PARAM( &strm, mode, Z_FINISH ) ) ) | 227 | if( fullscrape_increase( iovec_entries, iovector, &r, &re WANT_COMPRESSION_GZIP_PARAM( &strm, mode, Z_FINISH ) ) ) |
| 228 | return mutex_bucket_unlock( bucket ); | 228 | return mutex_bucket_unlock( bucket, 0 ); |
| 229 | deflateEnd(&strm); | 229 | deflateEnd(&strm); |
| 230 | } | 230 | } |
| 231 | #endif | 231 | #endif |
| @@ -385,7 +385,6 @@ static ssize_t http_handle_announce( const int64 client_socket, char *data ) { | |||
| 385 | char *c = data; | 385 | char *c = data; |
| 386 | int numwant, tmp, scanon; | 386 | int numwant, tmp, scanon; |
| 387 | ot_peer peer; | 387 | ot_peer peer; |
| 388 | ot_torrent *torrent; | ||
| 389 | ot_hash *hash = NULL; | 388 | ot_hash *hash = NULL; |
| 390 | unsigned short port = htons(6881); | 389 | unsigned short port = htons(6881); |
| 391 | ssize_t len; | 390 | ssize_t len; |
| @@ -403,6 +402,10 @@ static ssize_t http_handle_announce( const int64 client_socket, char *data ) { | |||
| 403 | numwant = 50; | 402 | numwant = 50; |
| 404 | scanon = 1; | 403 | scanon = 1; |
| 405 | 404 | ||
| 405 | #ifdef _DEBUG_PEERID | ||
| 406 | g_this_peerid_data = NULL; | ||
| 407 | #endif | ||
| 408 | |||
| 406 | while( scanon ) { | 409 | while( scanon ) { |
| 407 | switch( scan_urlencoded_query( &c, data = c, SCAN_SEARCHPATH_PARAM ) ) { | 410 | switch( scan_urlencoded_query( &c, data = c, SCAN_SEARCHPATH_PARAM ) ) { |
| 408 | case -2: scanon = 0; break; /* TERMINATOR */ | 411 | case -2: scanon = 0; break; /* TERMINATOR */ |
| @@ -483,10 +486,11 @@ static ssize_t http_handle_announce( const int64 client_socket, char *data ) { | |||
| 483 | 486 | ||
| 484 | if( OT_PEERFLAG( &peer ) & PEER_FLAG_STOPPED ) | 487 | if( OT_PEERFLAG( &peer ) & PEER_FLAG_STOPPED ) |
| 485 | len = remove_peer_from_torrent( hash, &peer, SUCCESS_HTTP_HEADER_LENGTH + static_outbuf, FLAG_TCP ); | 488 | len = remove_peer_from_torrent( hash, &peer, SUCCESS_HTTP_HEADER_LENGTH + static_outbuf, FLAG_TCP ); |
| 486 | else { | 489 | else |
| 487 | torrent = add_peer_to_torrent( hash, &peer WANT_SYNC_PARAM( 0 ) ); | 490 | len = add_peer_to_torrent_and_return_peers(hash, &peer, FLAG_TCP, numwant, SUCCESS_HTTP_HEADER_LENGTH + static_outbuf); |
| 488 | if( !torrent || !( len = return_peers_for_torrent( torrent, numwant, SUCCESS_HTTP_HEADER_LENGTH + static_outbuf, FLAG_TCP ) ) ) HTTPERROR_500; | 491 | |
| 489 | } | 492 | if( !len ) HTTPERROR_500; |
| 493 | |||
| 490 | stats_issue_event( EVENT_ANNOUNCE, FLAG_TCP, len); | 494 | stats_issue_event( EVENT_ANNOUNCE, FLAG_TCP, len); |
| 491 | return len; | 495 | return len; |
| 492 | } | 496 | } |
diff --git a/ot_livesync.c b/ot_livesync.c index 3cad121..47a371a 100644 --- a/ot_livesync.c +++ b/ot_livesync.c | |||
| @@ -9,59 +9,109 @@ | |||
| 9 | #include <string.h> | 9 | #include <string.h> |
| 10 | #include <pthread.h> | 10 | #include <pthread.h> |
| 11 | #include <unistd.h> | 11 | #include <unistd.h> |
| 12 | #include <stdlib.h> | ||
| 12 | 13 | ||
| 13 | /* Libowfat */ | 14 | /* Libowfat */ |
| 14 | #include "socket.h" | 15 | #include "socket.h" |
| 15 | #include "ndelay.h" | 16 | #include "ndelay.h" |
| 17 | #include "byte.h" | ||
| 16 | 18 | ||
| 17 | /* Opentracker */ | 19 | /* Opentracker */ |
| 18 | #include "trackerlogic.h" | 20 | #include "trackerlogic.h" |
| 19 | #include "ot_livesync.h" | 21 | #include "ot_livesync.h" |
| 20 | #include "ot_accesslist.h" | 22 | #include "ot_accesslist.h" |
| 21 | #include "ot_stats.h" | 23 | #include "ot_stats.h" |
| 24 | #include "ot_mutex.h" | ||
| 22 | 25 | ||
| 23 | #ifdef WANT_SYNC_LIVE | 26 | #ifdef WANT_SYNC_LIVE |
| 24 | 27 | ||
| 25 | char groupip_1[4] = { 224,0,23,42 }; | 28 | char groupip_1[4] = { 224,0,23,5 }; |
| 26 | 29 | ||
| 27 | #define LIVESYNC_BUFFINSIZE (256*256) | 30 | #define LIVESYNC_INCOMING_BUFFSIZE (256*256) |
| 28 | #define LIVESYNC_BUFFSIZE 1504 | ||
| 29 | #define LIVESYNC_BUFFWATER (sizeof(ot_peer)+sizeof(ot_hash)) | ||
| 30 | 31 | ||
| 31 | #define LIVESYNC_MAXDELAY 15 | 32 | #define LIVESYNC_OUTGOING_BUFFSIZE_PEERS 1504 |
| 33 | #define LIVESYNC_OUTGOING_WATERMARK_PEERS (sizeof(ot_peer)+sizeof(ot_hash)) | ||
| 34 | |||
| 35 | #ifdef WANT_SYNC_SCRAPE | ||
| 36 | #define LIVESYNC_OUTGOING_BUFFSIZE_SCRAPE 1504 | ||
| 37 | #define LIVESYNC_OUTGOING_WATERMARK_SCRAPE (sizeof(ot_hash)+sizeof(uint64_t)+sizeof(uint32_t)) | ||
| 38 | #define LIVESYNC_OUTGOING_MAXPACKETS_SCRAPE 100 | ||
| 39 | |||
| 40 | #define LIVESYNC_FIRST_BEACON_DELAY (30*60) /* seconds */ | ||
| 41 | #define LIVESYNC_BEACON_INTERVAL 60 /* seconds */ | ||
| 42 | #define LIVESYNC_INQUIRE_THRESH 0.75 | ||
| 43 | #endif /* WANT_SYNC_SCRAPE */ | ||
| 44 | |||
| 45 | #define LIVESYNC_MAXDELAY 15 /* seconds */ | ||
| 46 | |||
| 47 | enum { OT_SYNC_PEER | ||
| 48 | #ifdef WANT_SYNC_SCRAPE | ||
| 49 | , OT_SYNC_SCRAPE_BEACON, OT_SYNC_SCRAPE_INQUIRE, OT_SYNC_SCRAPE_TELL | ||
| 50 | #endif | ||
| 51 | }; | ||
| 32 | 52 | ||
| 33 | /* Forward declaration */ | 53 | /* Forward declaration */ |
| 34 | static void * livesync_worker( void * args ); | 54 | static void * livesync_worker( void * args ); |
| 35 | 55 | ||
| 36 | /* For outgoing packets */ | 56 | /* For outgoing packets */ |
| 37 | static int64 g_livesync_socket_in = -1; | 57 | static int64 g_socket_in = -1; |
| 38 | 58 | ||
| 39 | /* For incoming packets */ | 59 | /* For incoming packets */ |
| 40 | static int64 g_livesync_socket_out = -1; | 60 | static int64 g_socket_out = -1; |
| 61 | static uint8_t g_inbuffer[LIVESYNC_INCOMING_BUFFSIZE]; | ||
| 41 | 62 | ||
| 42 | static uint8_t livesync_inbuffer[LIVESYNC_BUFFINSIZE]; | 63 | static uint8_t g_peerbuffer_start[LIVESYNC_OUTGOING_BUFFSIZE_PEERS]; |
| 43 | static uint8_t livesync_outbuffer_start[ LIVESYNC_BUFFSIZE ]; | 64 | static uint8_t *g_peerbuffer_pos; |
| 44 | static uint8_t *livesync_outbuffer_pos; | 65 | static uint8_t *g_peerbuffer_highwater = g_peerbuffer_start + LIVESYNC_OUTGOING_BUFFSIZE_PEERS - LIVESYNC_OUTGOING_WATERMARK_PEERS; |
| 45 | static uint8_t *livesync_outbuffer_highwater = livesync_outbuffer_start + LIVESYNC_BUFFSIZE - LIVESYNC_BUFFWATER; | 66 | |
| 46 | static ot_time livesync_lastpacket_time; | 67 | static ot_time g_next_packet_time; |
| 68 | |||
| 69 | #ifdef WANT_SYNC_SCRAPE | ||
| 70 | /* Live sync scrape buffers, states and timers */ | ||
| 71 | static ot_time g_next_beacon_time; | ||
| 72 | static ot_time g_next_inquire_time; | ||
| 73 | |||
| 74 | static uint8_t g_scrapebuffer_start[LIVESYNC_OUTGOING_BUFFSIZE_SCRAPE]; | ||
| 75 | static uint8_t *g_scrapebuffer_pos; | ||
| 76 | static uint8_t *g_scrapebuffer_highwater = g_scrapebuffer_start + LIVESYNC_OUTGOING_BUFFSIZE_SCRAPE - LIVESYNC_OUTGOING_WATERMARK_SCRAPE; | ||
| 77 | |||
| 78 | static size_t g_inquire_remote_count; | ||
| 79 | static uint32_t g_inquire_remote_host; | ||
| 80 | static int g_inquire_inprogress; | ||
| 81 | static int g_inquire_bucket; | ||
| 82 | #endif /* WANT_SYNC_SCRAPE */ | ||
| 47 | 83 | ||
| 48 | static pthread_t thread_id; | 84 | static pthread_t thread_id; |
| 49 | void livesync_init( ) { | 85 | void livesync_init( ) { |
| 50 | if( g_livesync_socket_in == -1 ) | 86 | if( g_socket_in == -1 ) |
| 51 | exerr( "No socket address for live sync specified." ); | 87 | exerr( "No socket address for live sync specified." ); |
| 52 | livesync_outbuffer_pos = livesync_outbuffer_start; | 88 | |
| 53 | memmove( livesync_outbuffer_pos, &g_tracker_id, sizeof( g_tracker_id ) ); | 89 | /* Prepare outgoing peers buffer */ |
| 54 | livesync_outbuffer_pos += sizeof( g_tracker_id ); | 90 | g_peerbuffer_pos = g_peerbuffer_start; |
| 55 | livesync_lastpacket_time = g_now_seconds; | 91 | memmove( g_peerbuffer_pos, &g_tracker_id, sizeof( g_tracker_id ) ); |
| 92 | uint32_pack_big( (char*)g_peerbuffer_pos + sizeof( g_tracker_id ), OT_SYNC_PEER); | ||
| 93 | g_peerbuffer_pos += sizeof( g_tracker_id ) + sizeof( uint32_t); | ||
| 94 | |||
| 95 | #ifdef WANT_SYNC_SCRAPE | ||
| 96 | /* Prepare outgoing scrape buffer */ | ||
| 97 | g_scrapebuffer_pos = g_scrapebuffer_start; | ||
| 98 | memmove( g_scrapebuffer_pos, &g_tracker_id, sizeof( g_tracker_id ) ); | ||
| 99 | uint32_pack_big( (char*)g_scrapebuffer_pos + sizeof( g_tracker_id ), OT_SYNC_SCRAPE_TELL); | ||
| 100 | g_scrapebuffer_pos += sizeof( g_tracker_id ) + sizeof( uint32_t); | ||
| 101 | |||
| 102 | /* Wind up timers for inquires */ | ||
| 103 | g_next_beacon_time = g_now_seconds + LIVESYNC_FIRST_BEACON_DELAY; | ||
| 104 | #endif /* WANT_SYNC_SCRAPE */ | ||
| 105 | g_next_packet_time = g_now_seconds + LIVESYNC_MAXDELAY; | ||
| 56 | 106 | ||
| 57 | pthread_create( &thread_id, NULL, livesync_worker, NULL ); | 107 | pthread_create( &thread_id, NULL, livesync_worker, NULL ); |
| 58 | } | 108 | } |
| 59 | 109 | ||
| 60 | void livesync_deinit() { | 110 | void livesync_deinit() { |
| 61 | if( g_livesync_socket_in != -1 ) | 111 | if( g_socket_in != -1 ) |
| 62 | close( g_livesync_socket_in ); | 112 | close( g_socket_in ); |
| 63 | if( g_livesync_socket_out != -1 ) | 113 | if( g_socket_out != -1 ) |
| 64 | close( g_livesync_socket_out ); | 114 | close( g_socket_out ); |
| 65 | 115 | ||
| 66 | pthread_cancel( thread_id ); | 116 | pthread_cancel( thread_id ); |
| 67 | } | 117 | } |
| @@ -69,104 +119,292 @@ void livesync_deinit() { | |||
| 69 | void livesync_bind_mcast( char *ip, uint16_t port) { | 119 | void livesync_bind_mcast( char *ip, uint16_t port) { |
| 70 | char tmpip[4] = {0,0,0,0}; | 120 | char tmpip[4] = {0,0,0,0}; |
| 71 | 121 | ||
| 72 | if( g_livesync_socket_in != -1 ) | 122 | if( g_socket_in != -1 ) |
| 73 | exerr("Error: Livesync listen ip specified twice."); | 123 | exerr("Error: Livesync listen ip specified twice."); |
| 74 | 124 | ||
| 75 | if( ( g_livesync_socket_in = socket_udp4( )) < 0) | 125 | if( ( g_socket_in = socket_udp4( )) < 0) |
| 76 | exerr("Error: Cant create live sync incoming socket." ); | 126 | exerr("Error: Cant create live sync incoming socket." ); |
| 77 | ndelay_off(g_livesync_socket_in); | 127 | ndelay_off(g_socket_in); |
| 78 | 128 | ||
| 79 | if( socket_bind4_reuse( g_livesync_socket_in, tmpip, port ) == -1 ) | 129 | if( socket_bind4_reuse( g_socket_in, tmpip, port ) == -1 ) |
| 80 | exerr("Error: Cant bind live sync incoming socket." ); | 130 | exerr("Error: Cant bind live sync incoming socket." ); |
| 81 | 131 | ||
| 82 | if( socket_mcjoin4( g_livesync_socket_in, groupip_1, ip ) ) | 132 | if( socket_mcjoin4( g_socket_in, groupip_1, ip ) ) |
| 83 | exerr("Error: Cant make live sync incoming socket join mcast group."); | 133 | exerr("Error: Cant make live sync incoming socket join mcast group."); |
| 84 | 134 | ||
| 85 | if( ( g_livesync_socket_out = socket_udp4()) < 0) | 135 | if( ( g_socket_out = socket_udp4()) < 0) |
| 86 | exerr("Error: Cant create live sync outgoing socket." ); | 136 | exerr("Error: Cant create live sync outgoing socket." ); |
| 87 | if( socket_bind4_reuse( g_livesync_socket_out, ip, port ) == -1 ) | 137 | if( socket_bind4_reuse( g_socket_out, ip, port ) == -1 ) |
| 88 | exerr("Error: Cant bind live sync outgoing socket." ); | 138 | exerr("Error: Cant bind live sync outgoing socket." ); |
| 89 | 139 | ||
| 90 | socket_mcttl4(g_livesync_socket_out, 1); | 140 | socket_mcttl4(g_socket_out, 1); |
| 91 | socket_mcloop4(g_livesync_socket_out, 0); | 141 | socket_mcloop4(g_socket_out, 0); |
| 92 | } | 142 | } |
| 93 | 143 | ||
| 94 | static void livesync_issuepacket( ) { | 144 | static void livesync_issue_peersync( ) { |
| 95 | socket_send4(g_livesync_socket_out, (char*)livesync_outbuffer_start, livesync_outbuffer_pos - livesync_outbuffer_start, | 145 | socket_send4(g_socket_out, (char*)g_peerbuffer_start, g_peerbuffer_pos - g_peerbuffer_start, |
| 96 | groupip_1, LIVESYNC_PORT); | 146 | groupip_1, LIVESYNC_PORT); |
| 97 | livesync_outbuffer_pos = livesync_outbuffer_start + sizeof( g_tracker_id ); | 147 | g_peerbuffer_pos = g_peerbuffer_start + sizeof( g_tracker_id ) + sizeof( uint32_t ); |
| 98 | livesync_lastpacket_time = g_now_seconds; | 148 | g_next_packet_time = g_now_seconds + LIVESYNC_MAXDELAY; |
| 99 | } | 149 | } |
| 100 | 150 | ||
| 101 | /* Inform live sync about whats going on. */ | 151 | static void livesync_handle_peersync( ssize_t datalen ) { |
| 102 | void livesync_tell( ot_hash * const info_hash, const ot_peer * const peer ) { | 152 | int off = sizeof( g_tracker_id ) + sizeof( uint32_t ); |
| 103 | int i; | 153 | |
| 104 | for(i=0;i<20;i+=4) WRITE32(livesync_outbuffer_pos,i,READ32(info_hash,i)); | 154 | /* Now basic sanity checks have been done on the live sync packet |
| 105 | WRITE32(livesync_outbuffer_pos,20,READ32(peer,0)); | 155 | We might add more testing and logging. */ |
| 106 | WRITE32(livesync_outbuffer_pos,24,READ32(peer,4)); | 156 | while( off + (ssize_t)sizeof( ot_hash ) + (ssize_t)sizeof( ot_peer ) <= datalen ) { |
| 107 | livesync_outbuffer_pos += 28; | 157 | ot_peer *peer = (ot_peer*)(g_inbuffer + off + sizeof(ot_hash)); |
| 108 | 158 | ot_hash *hash = (ot_hash*)(g_inbuffer + off); | |
| 109 | if( livesync_outbuffer_pos >= livesync_outbuffer_highwater ) | 159 | |
| 110 | livesync_issuepacket(); | 160 | if( !g_opentracker_running ) return; |
| 161 | |||
| 162 | if( OT_PEERFLAG(peer) & PEER_FLAG_STOPPED ) | ||
| 163 | remove_peer_from_torrent(hash, peer, NULL, FLAG_MCA ); | ||
| 164 | else | ||
| 165 | add_peer_to_torrent( hash, peer, FLAG_MCA ); | ||
| 166 | |||
| 167 | off += sizeof( ot_hash ) + sizeof( ot_peer ); | ||
| 168 | } | ||
| 169 | |||
| 170 | stats_issue_event(EVENT_SYNC, 0, datalen / ((ssize_t)sizeof( ot_hash ) + (ssize_t)sizeof( ot_peer ))); | ||
| 111 | } | 171 | } |
| 112 | 172 | ||
| 173 | #ifdef WANT_SYNC_SCRAPE | ||
| 174 | void livesync_issue_beacon( ) { | ||
| 175 | size_t torrent_count = mutex_get_torrent_count(); | ||
| 176 | uint8_t beacon[ sizeof(g_tracker_id) + sizeof(uint32_t) + sizeof( uint64_t ) ]; | ||
| 177 | |||
| 178 | memmove( beacon, &g_tracker_id, sizeof( g_tracker_id ) ); | ||
| 179 | uint32_pack_big( (char*)beacon + sizeof( g_tracker_id ), OT_SYNC_SCRAPE_BEACON); | ||
| 180 | uint32_pack_big( (char*)beacon + sizeof( g_tracker_id ) + sizeof(uint32_t), (uint32_t)((uint64_t)(torrent_count)>>32) ); | ||
| 181 | uint32_pack_big( (char*)beacon + sizeof( g_tracker_id ) + 2 * sizeof(uint32_t), (uint32_t)torrent_count ); | ||
| 182 | |||
| 183 | socket_send4(g_socket_out, (char*)beacon, sizeof(beacon), groupip_1, LIVESYNC_PORT); | ||
| 184 | } | ||
| 185 | |||
| 186 | void livesync_handle_beacon( ssize_t datalen ) { | ||
| 187 | size_t torrent_count_local, torrent_count_remote; | ||
| 188 | if( datalen != sizeof(g_tracker_id) + sizeof(uint32_t) + sizeof( uint64_t ) ) | ||
| 189 | return; | ||
| 190 | torrent_count_local = mutex_get_torrent_count(); | ||
| 191 | torrent_count_remote = (size_t)(((uint64_t)uint32_read_big((char*)g_inbuffer+sizeof( g_tracker_id ) + sizeof(uint32_t))) << 32); | ||
| 192 | torrent_count_remote |= (size_t)uint32_read_big((char*)g_inbuffer+sizeof( g_tracker_id ) + 2 * sizeof(uint32_t)); | ||
| 193 | |||
| 194 | /* Empty tracker is useless */ | ||
| 195 | if( !torrent_count_remote ) return; | ||
| 196 | |||
| 197 | if( ((double)torrent_count_local ) / ((double)torrent_count_remote) < LIVESYNC_INQUIRE_THRESH) { | ||
| 198 | if( !g_next_inquire_time ) { | ||
| 199 | g_next_inquire_time = g_now_seconds + 2 * LIVESYNC_BEACON_INTERVAL; | ||
| 200 | g_inquire_remote_count = 0; | ||
| 201 | } | ||
| 202 | |||
| 203 | if( torrent_count_remote > g_inquire_remote_count ) { | ||
| 204 | g_inquire_remote_count = torrent_count_remote; | ||
| 205 | memmove( &g_inquire_remote_host, g_inbuffer, sizeof( g_tracker_id ) ); | ||
| 206 | } | ||
| 207 | } | ||
| 208 | } | ||
| 209 | |||
| 210 | void livesync_issue_inquire( ) { | ||
| 211 | uint8_t inquire[ sizeof(g_tracker_id) + sizeof(uint32_t) + sizeof(g_tracker_id)]; | ||
| 212 | |||
| 213 | memmove( inquire, &g_tracker_id, sizeof( g_tracker_id ) ); | ||
| 214 | uint32_pack_big( (char*)inquire + sizeof( g_tracker_id ), OT_SYNC_SCRAPE_INQUIRE); | ||
| 215 | memmove( inquire + sizeof(g_tracker_id) + sizeof(uint32_t), &g_inquire_remote_host, sizeof( g_tracker_id ) ); | ||
| 216 | |||
| 217 | socket_send4(g_socket_out, (char*)inquire, sizeof(inquire), groupip_1, LIVESYNC_PORT); | ||
| 218 | } | ||
| 219 | |||
| 220 | void livesync_handle_inquire( ssize_t datalen ) { | ||
| 221 | if( datalen != sizeof(g_tracker_id) + sizeof(uint32_t) + sizeof(g_tracker_id) ) | ||
| 222 | return; | ||
| 223 | |||
| 224 | /* If it isn't us, they're inquiring, ignore inquiry */ | ||
| 225 | if( memcmp( &g_tracker_id, g_inbuffer, sizeof( g_tracker_id ) ) ) | ||
| 226 | return; | ||
| 227 | |||
| 228 | /* Start scrape tell on next ticker */ | ||
| 229 | if( !g_inquire_inprogress ) { | ||
| 230 | g_inquire_inprogress = 1; | ||
| 231 | g_inquire_bucket = 0; | ||
| 232 | } | ||
| 233 | } | ||
| 234 | |||
| 235 | void livesync_issue_tell( ) { | ||
| 236 | int packets_to_send = LIVESYNC_OUTGOING_MAXPACKETS_SCRAPE; | ||
| 237 | while( packets_to_send > 0 && g_inquire_bucket < OT_BUCKET_COUNT ) { | ||
| 238 | ot_vector *torrents_list = mutex_bucket_lock( g_inquire_bucket ); | ||
| 239 | unsigned int j; | ||
| 240 | for( j=0; j<torrents_list->size; ++j ) { | ||
| 241 | ot_torrent *torrent = (ot_torrent*)(torrents_list->data) + j; | ||
| 242 | memmove(g_scrapebuffer_pos, torrent->hash, sizeof(ot_hash)); | ||
| 243 | g_scrapebuffer_pos += sizeof(ot_hash); | ||
| 244 | uint32_pack_big( (char*)g_scrapebuffer_pos , (uint32_t)(g_now_minutes - torrent->peer_list->base )); | ||
| 245 | uint32_pack_big( (char*)g_scrapebuffer_pos + 4, (uint32_t)((uint64_t)(torrent->peer_list->down_count)>>32) ); | ||
| 246 | uint32_pack_big( (char*)g_scrapebuffer_pos + 8, (uint32_t)torrent->peer_list->down_count ); | ||
| 247 | g_scrapebuffer_pos += 12; | ||
| 248 | |||
| 249 | if( g_scrapebuffer_pos >= g_scrapebuffer_highwater ) { | ||
| 250 | socket_send4(g_socket_out, (char*)g_scrapebuffer_start, g_scrapebuffer_pos - g_scrapebuffer_start, groupip_1, LIVESYNC_PORT); | ||
| 251 | g_scrapebuffer_pos = g_scrapebuffer_start + sizeof( g_tracker_id ) + sizeof( uint32_t); | ||
| 252 | --packets_to_send; | ||
| 253 | } | ||
| 254 | } | ||
| 255 | mutex_bucket_unlock( g_inquire_bucket++, 0 ); | ||
| 256 | if( !g_opentracker_running ) | ||
| 257 | return; | ||
| 258 | } | ||
| 259 | if( g_inquire_bucket == OT_BUCKET_COUNT ) { | ||
| 260 | socket_send4(g_socket_out, (char*)g_scrapebuffer_start, g_scrapebuffer_pos - g_scrapebuffer_start, groupip_1, LIVESYNC_PORT); | ||
| 261 | g_inquire_inprogress = 0; | ||
| 262 | } | ||
| 263 | } | ||
| 264 | |||
| 265 | void livesync_handle_tell( ssize_t datalen ) { | ||
| 266 | int off = sizeof( g_tracker_id ) + sizeof( uint32_t ); | ||
| 267 | |||
| 268 | /* Some instance is in progress of telling. Our inquiry was successful. | ||
| 269 | Don't ask again until we see next beacon. */ | ||
| 270 | g_next_inquire_time = 0; | ||
| 271 | |||
| 272 | /* Don't cause any new inquiries during another tracker's tell */ | ||
| 273 | if( g_next_beacon_time - g_now_seconds < LIVESYNC_BEACON_INTERVAL ) | ||
| 274 | g_next_beacon_time = g_now_seconds + LIVESYNC_BEACON_INTERVAL; | ||
| 275 | |||
| 276 | while( off + sizeof(ot_hash) + 12 <= (size_t)datalen ) { | ||
| 277 | ot_hash *hash = (ot_hash*)(g_inbuffer+off); | ||
| 278 | ot_vector *torrents_list = mutex_bucket_lock_by_hash(hash); | ||
| 279 | size_t down_count_remote; | ||
| 280 | int exactmatch; | ||
| 281 | ot_torrent * torrent = vector_find_or_insert(torrents_list, hash, sizeof(ot_hash), OT_HASH_COMPARE_SIZE, &exactmatch); | ||
| 282 | if( !torrent ) { | ||
| 283 | mutex_bucket_unlock_by_hash( hash, 0 ); | ||
| 284 | continue; | ||
| 285 | } | ||
| 286 | |||
| 287 | if( !exactmatch ) { | ||
| 288 | /* Create a new torrent entry, then */ | ||
| 289 | int i; for(i=0;i<20;i+=4) WRITE32(&torrent->hash,i,READ32(hash,i)); | ||
| 290 | |||
| 291 | if( !( torrent->peer_list = malloc( sizeof (ot_peerlist) ) ) ) { | ||
| 292 | vector_remove_torrent( torrents_list, torrent ); | ||
| 293 | mutex_bucket_unlock_by_hash( hash, 0 ); | ||
| 294 | continue; | ||
| 295 | } | ||
| 296 | |||
| 297 | byte_zero( torrent->peer_list, sizeof( ot_peerlist ) ); | ||
| 298 | torrent->peer_list->base = g_now_minutes - uint32_read_big((char*)g_inbuffer+off+sizeof(ot_hash)); | ||
| 299 | } | ||
| 300 | |||
| 301 | down_count_remote = (size_t)(((uint64_t)uint32_read_big((char*)g_inbuffer+off+sizeof( ot_hash ) + sizeof(uint32_t))) << 32); | ||
| 302 | down_count_remote |= (size_t) uint32_read_big((char*)g_inbuffer+off+sizeof( ot_hash ) + 2 * sizeof(uint32_t)); | ||
| 303 | |||
| 304 | if( down_count_remote > torrent->peer_list->down_count ) | ||
| 305 | torrent->peer_list->down_count = down_count_remote; | ||
| 306 | /* else | ||
| 307 | We might think of sending a tell packet, if we have a much larger downloaded count | ||
| 308 | */ | ||
| 309 | |||
| 310 | mutex_bucket_unlock( g_inquire_bucket++, exactmatch?0:1 ); | ||
| 311 | if( !g_opentracker_running ) | ||
| 312 | return; | ||
| 313 | off += sizeof(ot_hash) + 12; | ||
| 314 | } | ||
| 315 | } | ||
| 316 | #endif /* WANT_SYNC_SCRAPE */ | ||
| 317 | |||
| 113 | /* Tickle the live sync module from time to time, so no events get | 318 | /* Tickle the live sync module from time to time, so no events get |
| 114 | stuck when there's not enough traffic to fill udp packets fast | 319 | stuck when there's not enough traffic to fill udp packets fast |
| 115 | enough */ | 320 | enough */ |
| 116 | void livesync_ticker( ) { | 321 | void livesync_ticker( ) { |
| 117 | if( ( g_now_seconds - livesync_lastpacket_time > LIVESYNC_MAXDELAY) && | 322 | |
| 118 | ( livesync_outbuffer_pos > livesync_outbuffer_start + sizeof( g_tracker_id ) ) ) | 323 | /* livesync_issue_peersync sets g_next_packet_time */ |
| 119 | livesync_issuepacket(); | 324 | if( g_now_seconds > g_next_packet_time && |
| 325 | g_peerbuffer_pos > g_peerbuffer_start + sizeof( g_tracker_id ) ) | ||
| 326 | livesync_issue_peersync(); | ||
| 327 | |||
| 328 | #ifdef WANT_SYNC_SCRAPE | ||
| 329 | /* Send first beacon after running at least LIVESYNC_FIRST_BEACON_DELAY | ||
| 330 | seconds and not more often than every LIVESYNC_BEACON_INTERVAL seconds */ | ||
| 331 | if( g_now_seconds > g_next_beacon_time ) { | ||
| 332 | livesync_issue_beacon( ); | ||
| 333 | g_next_beacon_time = g_now_seconds + LIVESYNC_BEACON_INTERVAL; | ||
| 334 | } | ||
| 335 | |||
| 336 | /* If we're interested in an inquiry and waited long enough to see all | ||
| 337 | tracker's beacons, go ahead and inquire */ | ||
| 338 | if( g_next_inquire_time && g_now_seconds > g_next_inquire_time ) { | ||
| 339 | livesync_issue_inquire(); | ||
| 340 | |||
| 341 | /* If packet gets lost, ask again after LIVESYNC_BEACON_INTERVAL */ | ||
| 342 | g_next_inquire_time = g_now_seconds + LIVESYNC_BEACON_INTERVAL; | ||
| 343 | } | ||
| 344 | |||
| 345 | /* If we're in process of telling, let's tell. */ | ||
| 346 | if( g_inquire_inprogress ) | ||
| 347 | livesync_issue_tell( ); | ||
| 348 | |||
| 349 | #endif /* WANT_SYNC_SCRAPE */ | ||
| 350 | } | ||
| 351 | |||
| 352 | /* Inform live sync about whats going on. */ | ||
| 353 | void livesync_tell( ot_hash * const info_hash, const ot_peer * const peer ) { | ||
| 354 | unsigned int i; | ||
| 355 | for(i=0;i<sizeof(ot_hash)/4;i+=4) WRITE32(g_peerbuffer_pos,i,READ32(info_hash,i)); | ||
| 356 | |||
| 357 | WRITE32(g_peerbuffer_pos,sizeof(ot_hash) ,READ32(peer,0)); | ||
| 358 | WRITE32(g_peerbuffer_pos,sizeof(ot_hash)+4,READ32(peer,4)); | ||
| 359 | |||
| 360 | g_peerbuffer_pos += sizeof(ot_hash)+8; | ||
| 361 | |||
| 362 | if( g_peerbuffer_pos >= g_peerbuffer_highwater ) | ||
| 363 | livesync_issue_peersync(); | ||
| 120 | } | 364 | } |
| 121 | 365 | ||
| 122 | static void * livesync_worker( void * args ) { | 366 | static void * livesync_worker( void * args ) { |
| 123 | uint8_t in_ip[4]; uint16_t in_port; | 367 | uint8_t in_ip[4]; uint16_t in_port; |
| 124 | ssize_t datalen; | 368 | ssize_t datalen; |
| 125 | int off; | ||
| 126 | 369 | ||
| 127 | args = args; | 370 | (void)args; |
| 128 | 371 | ||
| 129 | while( 1 ) { | 372 | while( 1 ) { |
| 130 | datalen = socket_recv4(g_livesync_socket_in, (char*)livesync_inbuffer, LIVESYNC_BUFFINSIZE, (char*)in_ip, &in_port); | 373 | datalen = socket_recv4(g_socket_in, (char*)g_inbuffer, LIVESYNC_INCOMING_BUFFSIZE, (char*)in_ip, &in_port); |
| 131 | off = 4; | ||
| 132 | 374 | ||
| 133 | if( datalen <= 0 ) | 375 | /* Expect at least tracker id and packet type */ |
| 376 | if( datalen <= (ssize_t)(sizeof( g_tracker_id ) + sizeof( uint32_t )) ) | ||
| 134 | continue; | 377 | continue; |
| 135 | 378 | if( !accesslist_isblessed((char*)in_ip, OT_PERMISSION_MAY_LIVESYNC)) | |
| 136 | if( datalen < (ssize_t)(sizeof( g_tracker_id ) + sizeof( ot_hash ) + sizeof( ot_peer ) ) ) { | ||
| 137 | /* TODO: log invalid sync packet */ | ||
| 138 | continue; | 379 | continue; |
| 139 | } | 380 | if( !memcmp( g_inbuffer, &g_tracker_id, sizeof( g_tracker_id ) ) ) { |
| 140 | |||
| 141 | if( !accesslist_isblessed((char*)in_ip, OT_PERMISSION_MAY_LIVESYNC)) { | ||
| 142 | /* TODO: log invalid sync packet */ | ||
| 143 | continue; | ||
| 144 | } | ||
| 145 | |||
| 146 | if( !memcmp( livesync_inbuffer, &g_tracker_id, sizeof( g_tracker_id ) ) ) { | ||
| 147 | /* TODO: log packet coming from ourselves */ | 381 | /* TODO: log packet coming from ourselves */ |
| 148 | continue; | 382 | continue; |
| 149 | } | 383 | } |
| 150 | 384 | ||
| 151 | /* Now basic sanity checks have been done on the live sync packet | 385 | switch( uint32_read_big( (char*)g_inbuffer ) ) { |
| 152 | We might add more testing and logging. */ | 386 | case OT_SYNC_PEER: |
| 153 | while( off + (ssize_t)sizeof( ot_hash ) + (ssize_t)sizeof( ot_peer ) <= datalen ) { | 387 | livesync_handle_peersync( datalen ); |
| 154 | ot_peer *peer = (ot_peer*)(livesync_inbuffer + off + sizeof(ot_hash)); | 388 | break; |
| 155 | ot_hash *hash = (ot_hash*)(livesync_inbuffer + off); | 389 | #ifdef WANT_SYNC_SCRAPE |
| 156 | 390 | case OT_SYNC_SCRAPE_BEACON: | |
| 157 | if( !g_opentracker_running ) | 391 | livesync_handle_beacon( datalen ); |
| 158 | return NULL; | 392 | break; |
| 159 | 393 | case OT_SYNC_SCRAPE_INQUIRE: | |
| 160 | if( OT_PEERFLAG(peer) & PEER_FLAG_STOPPED ) | 394 | livesync_handle_inquire( datalen ); |
| 161 | remove_peer_from_torrent(hash, peer, NULL, FLAG_MCA); | 395 | break; |
| 162 | else | 396 | case OT_SYNC_SCRAPE_TELL: |
| 163 | add_peer_to_torrent( hash, peer WANT_SYNC_PARAM(1)); | 397 | livesync_handle_tell( datalen ); |
| 164 | 398 | break; | |
| 165 | off += sizeof( ot_hash ) + sizeof( ot_peer ); | 399 | #endif /* WANT_SYNC_SCRAPE */ |
| 400 | default: | ||
| 401 | break; | ||
| 166 | } | 402 | } |
| 167 | 403 | ||
| 168 | stats_issue_event(EVENT_SYNC, 0, datalen / ((ssize_t)sizeof( ot_hash ) + (ssize_t)sizeof( ot_peer ))); | 404 | /* Handle outstanding requests */ |
| 405 | livesync_ticker( ); | ||
| 169 | } | 406 | } |
| 407 | |||
| 170 | /* Never returns. */ | 408 | /* Never returns. */ |
| 171 | return NULL; | 409 | return NULL; |
| 172 | } | 410 | } |
diff --git a/ot_livesync.h b/ot_livesync.h index 97fe803..ae9ab55 100644 --- a/ot_livesync.h +++ b/ot_livesync.h | |||
| @@ -38,25 +38,22 @@ | |||
| 38 | ######## | 38 | ######## |
| 39 | ######## SCRAPE SYNC PROTOCOL ######## | 39 | ######## SCRAPE SYNC PROTOCOL ######## |
| 40 | ######## | 40 | ######## |
| 41 | 41 | ||
| 42 | Each tracker instance SHOULD broadcast a beacon once in every 5 minutes after | 42 | Each tracker instance SHOULD broadcast a beacon every LIVESYNC_BEACON_INTERVAL |
| 43 | running at least 30 minutes: | 43 | seconds after running at least LIVESYNC_FIRST_BEACON_DELAY seconds: |
| 44 | 44 | ||
| 45 | packet type SYNC_SCRAPE_BEACON | 45 | packet type SYNC_SCRAPE_BEACON |
| 46 | [ 0x0008 0x08 amount of torrents served | 46 | [ 0x0008 0x08 amount of torrents served |
| 47 | ] | 47 | ] |
| 48 | 48 | ||
| 49 | If a tracker instance receives a beacon from another instance that has more than | 49 | If a tracker instance receives a beacon from another instance that has more than |
| 50 | twice its torrent count, it asks for a scrape. It must wait for at least 5 + 1 | 50 | its torrent count plus a threshold, it inquires for a scrape. It must wait for at |
| 51 | minutes in order to inspect beacons from all tracker instances and chose the one | 51 | least 2 * LIVESYNC_BEACON_INTERVAL seconds in order to inspect beacons from all |
| 52 | with most torrents. | 52 | tracker instances and inquire only the one with most torrents. |
| 53 | 53 | ||
| 54 | If it sees a SYNC_SCRAPE_TELL within that time frame, it's likely, that another | 54 | If it sees a SYNC_SCRAPE_TELL within that time frame, it's likely, that another |
| 55 | scrape sync is going on. So one tracker instance MUST NOT react to beacons within | 55 | scrape sync is going on. It should reset its state to needs no inquiry. It should |
| 56 | 5 minutes of last seeing a SYNC_SCRAPE_TELL packet. After a scrape sync all | 56 | be reenabled on the next beacon, if still needed. |
| 57 | tracker instances have updated their torrents, so an instance in a "want inquire" | ||
| 58 | state should wait for the next round of beacons to chose the tracker with most | ||
| 59 | data again. | ||
| 60 | 57 | ||
| 61 | packet type SYNC_SCRAPE_INQUIRE | 58 | packet type SYNC_SCRAPE_INQUIRE |
| 62 | [ 0x0008 0x04 id of tracker instance to inquire | 59 | [ 0x0008 0x04 id of tracker instance to inquire |
| @@ -64,16 +61,17 @@ | |||
| 64 | 61 | ||
| 65 | The inquired tracker instance answers with as many scrape tell packets it needs | 62 | The inquired tracker instance answers with as many scrape tell packets it needs |
| 66 | to deliver stats about all its torrents | 63 | to deliver stats about all its torrents |
| 67 | 64 | ||
| 68 | packet type SYNC_SCRAPE_TELL | 65 | packet type SYNC_SCRAPE_TELL |
| 69 | [ 0x0008 0x14 info_hash | 66 | [ 0x0008 0x14 info_hash |
| 70 | 0x001c 0x04 base offset (i.e. when was it last announced, in minutes) | 67 | 0x001c 0x04 base offset (i.e. when was it last announced, in minutes) |
| 71 | 0x0020 0x08 downloaded count | 68 | 0x0020 0x08 downloaded count |
| 72 | ]* | 69 | ]* |
| 73 | 70 | ||
| 74 | Each tracker instance that receives a scrape tell, looks up each torrent and | 71 | Each tracker instance that receives a SYNC_SCRAPE_TELL, looks up each torrent and |
| 75 | compares downloaded count with its own counter. It can send out its own scrape | 72 | compares downloaded count with its own counter. It can send out its own scrape |
| 76 | tell packets, if it knows more. | 73 | tell packets, if it knows more. However to not interrupt a scrape tell, a tracker |
| 74 | should wait LIVESYNC_BEACON_INTERVAL after receiving a scrape tell. | ||
| 77 | 75 | ||
| 78 | */ | 76 | */ |
| 79 | 77 | ||
| @@ -24,6 +24,7 @@ | |||
| 24 | 24 | ||
| 25 | /* Our global all torrents list */ | 25 | /* Our global all torrents list */ |
| 26 | static ot_vector all_torrents[OT_BUCKET_COUNT]; | 26 | static ot_vector all_torrents[OT_BUCKET_COUNT]; |
| 27 | static size_t g_torrent_count; | ||
| 27 | 28 | ||
| 28 | /* Bucket Magic */ | 29 | /* Bucket Magic */ |
| 29 | static int bucket_locklist[ OT_MAX_THREADS ]; | 30 | static int bucket_locklist[ OT_MAX_THREADS ]; |
| @@ -87,15 +88,24 @@ ot_vector *mutex_bucket_lock_by_hash( ot_hash *hash ) { | |||
| 87 | return all_torrents + bucket; | 88 | return all_torrents + bucket; |
| 88 | } | 89 | } |
| 89 | 90 | ||
| 90 | void mutex_bucket_unlock( int bucket ) { | 91 | void mutex_bucket_unlock( int bucket, int delta_torrentcount ) { |
| 91 | pthread_mutex_lock( &bucket_mutex ); | 92 | pthread_mutex_lock( &bucket_mutex ); |
| 92 | bucket_remove( bucket ); | 93 | bucket_remove( bucket ); |
| 94 | g_torrent_count += delta_torrentcount; | ||
| 93 | pthread_cond_broadcast( &bucket_being_unlocked ); | 95 | pthread_cond_broadcast( &bucket_being_unlocked ); |
| 94 | pthread_mutex_unlock( &bucket_mutex ); | 96 | pthread_mutex_unlock( &bucket_mutex ); |
| 95 | } | 97 | } |
| 96 | 98 | ||
| 97 | void mutex_bucket_unlock_by_hash( ot_hash *hash ) { | 99 | void mutex_bucket_unlock_by_hash( ot_hash *hash, int delta_torrentcount ) { |
| 98 | mutex_bucket_unlock( uint32_read_big( (char*)*hash ) >> OT_BUCKET_COUNT_SHIFT ); | 100 | mutex_bucket_unlock( uint32_read_big( (char*)*hash ) >> OT_BUCKET_COUNT_SHIFT, delta_torrentcount ); |
| 101 | } | ||
| 102 | |||
| 103 | size_t mutex_get_torrent_count( ) { | ||
| 104 | size_t torrent_count; | ||
| 105 | pthread_mutex_lock( &bucket_mutex ); | ||
| 106 | torrent_count = g_torrent_count; | ||
| 107 | pthread_mutex_unlock( &bucket_mutex ); | ||
| 108 | return torrent_count; | ||
| 99 | } | 109 | } |
| 100 | 110 | ||
| 101 | /* TaskQueue Magic */ | 111 | /* TaskQueue Magic */ |
| @@ -14,8 +14,10 @@ void mutex_deinit( ); | |||
| 14 | ot_vector *mutex_bucket_lock( int bucket ); | 14 | ot_vector *mutex_bucket_lock( int bucket ); |
| 15 | ot_vector *mutex_bucket_lock_by_hash( ot_hash *hash ); | 15 | ot_vector *mutex_bucket_lock_by_hash( ot_hash *hash ); |
| 16 | 16 | ||
| 17 | void mutex_bucket_unlock( int bucket ); | 17 | void mutex_bucket_unlock( int bucket, int delta_torrentcount ); |
| 18 | void mutex_bucket_unlock_by_hash( ot_hash *hash ); | 18 | void mutex_bucket_unlock_by_hash( ot_hash *hash, int delta_torrentcount ); |
| 19 | |||
| 20 | size_t mutex_get_torrent_count(); | ||
| 19 | 21 | ||
| 20 | typedef enum { | 22 | typedef enum { |
| 21 | TASK_STATS_CONNS = 0x0001, | 23 | TASK_STATS_CONNS = 0x0001, |
| @@ -187,7 +187,7 @@ size_t stats_top10_txt( char * reply ) { | |||
| 187 | top10s[idx].torrent = (ot_torrent*)(torrents_list->data) + j; | 187 | top10s[idx].torrent = (ot_torrent*)(torrents_list->data) + j; |
| 188 | } | 188 | } |
| 189 | } | 189 | } |
| 190 | mutex_bucket_unlock( bucket ); | 190 | mutex_bucket_unlock( bucket, 0 ); |
| 191 | if( !g_opentracker_running ) | 191 | if( !g_opentracker_running ) |
| 192 | return 0; | 192 | return 0; |
| 193 | } | 193 | } |
| @@ -241,7 +241,7 @@ static size_t stats_slash24s_txt( char * reply, size_t amount, uint32_t thresh ) | |||
| 241 | if( !count ) { | 241 | if( !count ) { |
| 242 | count = malloc( sizeof(uint32_t) * NUM_S24S ); | 242 | count = malloc( sizeof(uint32_t) * NUM_S24S ); |
| 243 | if( !count ) { | 243 | if( !count ) { |
| 244 | mutex_bucket_unlock( bucket ); | 244 | mutex_bucket_unlock( bucket, 0 ); |
| 245 | goto bailout_cleanup; | 245 | goto bailout_cleanup; |
| 246 | } | 246 | } |
| 247 | byte_zero( count, sizeof( uint32_t ) * NUM_S24S ); | 247 | byte_zero( count, sizeof( uint32_t ) * NUM_S24S ); |
| @@ -251,7 +251,7 @@ static size_t stats_slash24s_txt( char * reply, size_t amount, uint32_t thresh ) | |||
| 251 | } | 251 | } |
| 252 | } | 252 | } |
| 253 | } | 253 | } |
| 254 | mutex_bucket_unlock( bucket ); | 254 | mutex_bucket_unlock( bucket, 0 ); |
| 255 | if( !g_opentracker_running ) | 255 | if( !g_opentracker_running ) |
| 256 | goto bailout_cleanup; | 256 | goto bailout_cleanup; |
| 257 | } | 257 | } |
| @@ -384,7 +384,7 @@ static size_t stats_peers_mrtg( char * reply ) { | |||
| 384 | ot_peerlist *peer_list = ( ((ot_torrent*)(torrents_list->data))[j] ).peer_list; | 384 | ot_peerlist *peer_list = ( ((ot_torrent*)(torrents_list->data))[j] ).peer_list; |
| 385 | peer_count += peer_list->peer_count; seed_count += peer_list->seed_count; | 385 | peer_count += peer_list->peer_count; seed_count += peer_list->seed_count; |
| 386 | } | 386 | } |
| 387 | mutex_bucket_unlock( bucket ); | 387 | mutex_bucket_unlock( bucket, 0 ); |
| 388 | if( !g_opentracker_running ) | 388 | if( !g_opentracker_running ) |
| 389 | return 0; | 389 | return 0; |
| 390 | } | 390 | } |
| @@ -397,17 +397,7 @@ static size_t stats_peers_mrtg( char * reply ) { | |||
| 397 | 397 | ||
| 398 | static size_t stats_startstop_mrtg( char * reply ) | 398 | static size_t stats_startstop_mrtg( char * reply ) |
| 399 | { | 399 | { |
| 400 | size_t torrent_count = 0; | 400 | size_t torrent_count = mutex_get_torrent_count(); |
| 401 | int bucket; | ||
| 402 | |||
| 403 | for( bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) | ||
| 404 | { | ||
| 405 | ot_vector *torrents_list = mutex_bucket_lock( bucket ); | ||
| 406 | torrent_count += torrents_list->size; | ||
| 407 | mutex_bucket_unlock( bucket ); | ||
| 408 | if( !g_opentracker_running ) | ||
| 409 | return 0; | ||
| 410 | } | ||
| 411 | 401 | ||
| 412 | return sprintf( reply, "%zd\n%zd\nopentracker handling %zd torrents\nopentracker", | 402 | return sprintf( reply, "%zd\n%zd\nopentracker handling %zd torrents\nopentracker", |
| 413 | (size_t)0, | 403 | (size_t)0, |
| @@ -429,7 +419,7 @@ static size_t stats_toraddrem_mrtg( char * reply ) | |||
| 429 | ot_peerlist *peer_list = ( ((ot_torrent*)(torrents_list->data))[j] ).peer_list; | 419 | ot_peerlist *peer_list = ( ((ot_torrent*)(torrents_list->data))[j] ).peer_list; |
| 430 | peer_count += peer_list->peer_count; | 420 | peer_count += peer_list->peer_count; |
| 431 | } | 421 | } |
| 432 | mutex_bucket_unlock( bucket ); | 422 | mutex_bucket_unlock( bucket, 0 ); |
| 433 | if( !g_opentracker_running ) | 423 | if( !g_opentracker_running ) |
| 434 | return 0; | 424 | return 0; |
| 435 | } | 425 | } |
| @@ -443,15 +433,7 @@ static size_t stats_toraddrem_mrtg( char * reply ) | |||
| 443 | 433 | ||
| 444 | static size_t stats_torrents_mrtg( char * reply ) | 434 | static size_t stats_torrents_mrtg( char * reply ) |
| 445 | { | 435 | { |
| 446 | size_t torrent_count = 0; | 436 | size_t torrent_count = mutex_get_torrent_count(); |
| 447 | int bucket; | ||
| 448 | |||
| 449 | for( bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) | ||
| 450 | { | ||
| 451 | ot_vector *torrents_list = mutex_bucket_lock( bucket ); | ||
| 452 | torrent_count += torrents_list->size; | ||
| 453 | mutex_bucket_unlock( bucket ); | ||
| 454 | } | ||
| 455 | 437 | ||
| 456 | return sprintf( reply, "%zd\n%zd\nopentracker serving %zd torrents\nopentracker", | 438 | return sprintf( reply, "%zd\n%zd\nopentracker serving %zd torrents\nopentracker", |
| 457 | torrent_count, | 439 | torrent_count, |
| @@ -41,7 +41,6 @@ static int udp_test_connectionid( const uint32_t * const connid, const char * re | |||
| 41 | /* UDP implementation according to http://xbtt.sourceforge.net/udp_tracker_protocol.html */ | 41 | /* UDP implementation according to http://xbtt.sourceforge.net/udp_tracker_protocol.html */ |
| 42 | void handle_udp4( int64 serversocket ) { | 42 | void handle_udp4( int64 serversocket ) { |
| 43 | ot_peer peer; | 43 | ot_peer peer; |
| 44 | ot_torrent *torrent; | ||
| 45 | ot_hash *hash = NULL; | 44 | ot_hash *hash = NULL; |
| 46 | char remoteip[4]; | 45 | char remoteip[4]; |
| 47 | uint32_t *inpacket = (uint32_t*)static_inbuf; | 46 | uint32_t *inpacket = (uint32_t*)static_inbuf; |
| @@ -79,7 +78,7 @@ void handle_udp4( int64 serversocket ) { | |||
| 79 | if( r < 98 ) | 78 | if( r < 98 ) |
| 80 | return; | 79 | return; |
| 81 | 80 | ||
| 82 | if( !udp_test_connectionid( inpacket, remoteip )) | 81 | if( !udp_test_connectionid( inpacket, remoteip )) |
| 83 | fprintf( stderr, "UDP connect Connection id missmatch.\n" ); | 82 | fprintf( stderr, "UDP connect Connection id missmatch.\n" ); |
| 84 | 83 | ||
| 85 | /* We do only want to know, if it is zero */ | 84 | /* We do only want to know, if it is zero */ |
| @@ -110,20 +109,15 @@ void handle_udp4( int64 serversocket ) { | |||
| 110 | 109 | ||
| 111 | if( OT_PEERFLAG( &peer ) & PEER_FLAG_STOPPED ) /* Peer is gone. */ | 110 | if( OT_PEERFLAG( &peer ) & PEER_FLAG_STOPPED ) /* Peer is gone. */ |
| 112 | r = remove_peer_from_torrent( hash, &peer, static_outbuf, FLAG_UDP ); | 111 | r = remove_peer_from_torrent( hash, &peer, static_outbuf, FLAG_UDP ); |
| 113 | else { | 112 | else |
| 114 | torrent = add_peer_to_torrent( hash, &peer WANT_SYNC_PARAM( 0 ) ); | 113 | r = 8 + add_peer_to_torrent_and_return_peers( hash, &peer, FLAG_UDP, numwant, static_outbuf + 8 ); |
| 115 | if( !torrent ) | ||
| 116 | return; /* XXX maybe send error */ | ||
| 117 | |||
| 118 | r = 8 + return_peers_for_torrent( torrent, numwant, static_outbuf + 8, FLAG_UDP ); | ||
| 119 | } | ||
| 120 | 114 | ||
| 121 | socket_send4( serversocket, static_outbuf, r, remoteip, remoteport ); | 115 | socket_send4( serversocket, static_outbuf, r, remoteip, remoteport ); |
| 122 | stats_issue_event( EVENT_ANNOUNCE, FLAG_UDP, r ); | 116 | stats_issue_event( EVENT_ANNOUNCE, FLAG_UDP, r ); |
| 123 | break; | 117 | break; |
| 124 | 118 | ||
| 125 | case 2: /* This is a scrape action */ | 119 | case 2: /* This is a scrape action */ |
| 126 | if( !udp_test_connectionid( inpacket, remoteip )) | 120 | if( !udp_test_connectionid( inpacket, remoteip )) |
| 127 | fprintf( stderr, "UDP scrape Connection id missmatch.\n" ); | 121 | fprintf( stderr, "UDP scrape Connection id missmatch.\n" ); |
| 128 | 122 | ||
| 129 | outpacket[0] = htonl( 2 ); /* scrape action */ | 123 | outpacket[0] = htonl( 2 ); /* scrape action */ |
| @@ -138,4 +132,8 @@ void handle_udp4( int64 serversocket ) { | |||
| 138 | } | 132 | } |
| 139 | } | 133 | } |
| 140 | 134 | ||
| 135 | void udp_init( ) { | ||
| 136 | |||
| 137 | } | ||
| 138 | |||
| 141 | const char *g_version_udp_c = "$Source$: $Revision$\n"; | 139 | const char *g_version_udp_c = "$Source$: $Revision$\n"; |
diff --git a/trackerlogic.c b/trackerlogic.c index 34a12e7..38be9f7 100644 --- a/trackerlogic.c +++ b/trackerlogic.c | |||
| @@ -25,6 +25,9 @@ | |||
| 25 | #include "ot_fullscrape.h" | 25 | #include "ot_fullscrape.h" |
| 26 | #include "ot_livesync.h" | 26 | #include "ot_livesync.h" |
| 27 | 27 | ||
| 28 | /* Forward declaration */ | ||
| 29 | size_t return_peers_for_torrent( ot_torrent *torrent, size_t amount, char *reply, PROTO_FLAG proto ); | ||
| 30 | |||
| 28 | void free_peerlist( ot_peerlist *peer_list ) { | 31 | void free_peerlist( ot_peerlist *peer_list ) { |
| 29 | if( peer_list->peers.data ) { | 32 | if( peer_list->peers.data ) { |
| 30 | if( OT_PEERLIST_HASBUCKETS( peer_list ) ) { | 33 | if( OT_PEERLIST_HASBUCKETS( peer_list ) ) { |
| @@ -43,34 +46,36 @@ extern size_t g_this_peerid_len; | |||
| 43 | extern char *g_this_peerid_data; | 46 | extern char *g_this_peerid_data; |
| 44 | #endif | 47 | #endif |
| 45 | 48 | ||
| 46 | ot_torrent *add_peer_to_torrent( ot_hash *hash, ot_peer *peer WANT_SYNC_PARAM( int from_sync ) ) { | 49 | size_t add_peer_to_torrent_and_return_peers( ot_hash *hash, ot_peer *peer, PROTO_FLAG proto, size_t amount, char * reply ) { |
| 47 | int exactmatch; | 50 | int exactmatch, delta_torrentcount = 0; |
| 51 | size_t reply_size; | ||
| 48 | ot_torrent *torrent; | 52 | ot_torrent *torrent; |
| 49 | ot_peer *peer_dest; | 53 | ot_peer *peer_dest; |
| 50 | ot_vector *torrents_list = mutex_bucket_lock_by_hash( hash ); | 54 | ot_vector *torrents_list = mutex_bucket_lock_by_hash( hash ); |
| 51 | 55 | ||
| 52 | if( !accesslist_hashisvalid( hash ) ) { | 56 | if( !accesslist_hashisvalid( hash ) ) { |
| 53 | mutex_bucket_unlock_by_hash( hash ); | 57 | mutex_bucket_unlock_by_hash( hash, 0 ); |
| 54 | return NULL; | 58 | return 0; |
| 55 | } | 59 | } |
| 56 | 60 | ||
| 57 | torrent = vector_find_or_insert( torrents_list, (void*)hash, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch ); | 61 | torrent = vector_find_or_insert( torrents_list, (void*)hash, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch ); |
| 58 | if( !torrent ) { | 62 | if( !torrent ) { |
| 59 | mutex_bucket_unlock_by_hash( hash ); | 63 | mutex_bucket_unlock_by_hash( hash, 0 ); |
| 60 | return NULL; | 64 | return 0; |
| 61 | } | 65 | } |
| 62 | 66 | ||
| 63 | if( !exactmatch ) { | 67 | if( !exactmatch ) { |
| 64 | /* Create a new torrent entry, then */ | 68 | /* Create a new torrent entry, then */ |
| 65 | int i; for(i=0;i<20;i+=4) WRITE32(&torrent->hash,i,READ32(hash,i)); | 69 | int i; for(i=0;i<20;i+=4) WRITE32(&torrent->hash,i,READ32(hash,i)); |
| 66 | 70 | ||
| 67 | if( !( torrent->peer_list = malloc( sizeof (ot_peerlist) ) ) ) { | 71 | if( !( torrent->peer_list = malloc( sizeof (ot_peerlist) ) ) ) { |
| 68 | vector_remove_torrent( torrents_list, torrent ); | 72 | vector_remove_torrent( torrents_list, torrent ); |
| 69 | mutex_bucket_unlock_by_hash( hash ); | 73 | mutex_bucket_unlock_by_hash( hash, 0 ); |
| 70 | return NULL; | 74 | return 0; |
| 71 | } | 75 | } |
| 72 | 76 | ||
| 73 | byte_zero( torrent->peer_list, sizeof( ot_peerlist ) ); | 77 | byte_zero( torrent->peer_list, sizeof( ot_peerlist ) ); |
| 78 | delta_torrentcount = 1; | ||
| 74 | } else | 79 | } else |
| 75 | clean_single_torrent( torrent ); | 80 | clean_single_torrent( torrent ); |
| 76 | 81 | ||
| @@ -79,8 +84,8 @@ ot_torrent *add_peer_to_torrent( ot_hash *hash, ot_peer *peer WANT_SYNC_PARAM( | |||
| 79 | /* Check for peer in torrent */ | 84 | /* Check for peer in torrent */ |
| 80 | peer_dest = vector_find_or_insert_peer( &(torrent->peer_list->peers), peer, &exactmatch ); | 85 | peer_dest = vector_find_or_insert_peer( &(torrent->peer_list->peers), peer, &exactmatch ); |
| 81 | if( !peer_dest ) { | 86 | if( !peer_dest ) { |
| 82 | mutex_bucket_unlock_by_hash( hash ); | 87 | mutex_bucket_unlock_by_hash( hash, delta_torrentcount ); |
| 83 | return NULL; | 88 | return 0; |
| 84 | } | 89 | } |
| 85 | 90 | ||
| 86 | /* Tell peer that it's fresh */ | 91 | /* Tell peer that it's fresh */ |
| @@ -94,7 +99,7 @@ ot_torrent *add_peer_to_torrent( ot_hash *hash, ot_peer *peer WANT_SYNC_PARAM( | |||
| 94 | if( !exactmatch ) { | 99 | if( !exactmatch ) { |
| 95 | 100 | ||
| 96 | #ifdef WANT_SYNC_LIVE | 101 | #ifdef WANT_SYNC_LIVE |
| 97 | if( !from_sync ) | 102 | if( proto == FLAG_MCA ) |
| 98 | livesync_tell( hash, peer ); | 103 | livesync_tell( hash, peer ); |
| 99 | else | 104 | else |
| 100 | OT_PEERFLAG( peer ) |= PEER_FLAG_FROM_SYNC; | 105 | OT_PEERFLAG( peer ) |= PEER_FLAG_FROM_SYNC; |
| @@ -118,17 +123,17 @@ ot_torrent *add_peer_to_torrent( ot_hash *hash, ot_peer *peer WANT_SYNC_PARAM( | |||
| 118 | printf( " %d.%d.%d.%d:%d\t%d %02X %s\n", _ip[0], _ip[1], _ip[2], _ip[3], OT_PEERTIME( peer_dest ), *(uint16_t*)( ((char*)peer_dest)+4 ), OT_PEERFLAG(peer_dest), g_this_peerid_data ? g_this_peerid_data : "-" ); | 123 | printf( " %d.%d.%d.%d:%d\t%d %02X %s\n", _ip[0], _ip[1], _ip[2], _ip[3], OT_PEERTIME( peer_dest ), *(uint16_t*)( ((char*)peer_dest)+4 ), OT_PEERFLAG(peer_dest), g_this_peerid_data ? g_this_peerid_data : "-" ); |
| 119 | } | 124 | } |
| 120 | #endif | 125 | #endif |
| 121 | 126 | ||
| 122 | #ifdef WANT_SYNC_LIVE | 127 | #ifdef WANT_SYNC_LIVE |
| 123 | /* Won't live sync peers that come back too fast. Only exception: | 128 | /* Won't live sync peers that come back too fast. Only exception: |
| 124 | fresh "completed" reports */ | 129 | fresh "completed" reports */ |
| 125 | if( !from_sync ) { | 130 | if( proto != FLAG_MCA ) { |
| 126 | if( OT_PEERTIME( peer_dest ) > OT_CLIENT_SYNC_RENEW_BOUNDARY || | 131 | if( OT_PEERTIME( peer_dest ) > OT_CLIENT_SYNC_RENEW_BOUNDARY || |
| 127 | ( !(OT_PEERFLAG(peer_dest) & PEER_FLAG_COMPLETED ) && (OT_PEERFLAG(peer) & PEER_FLAG_COMPLETED ) ) ) | 132 | ( !(OT_PEERFLAG(peer_dest) & PEER_FLAG_COMPLETED ) && (OT_PEERFLAG(peer) & PEER_FLAG_COMPLETED ) ) ) |
| 128 | livesync_tell( hash, peer ); | 133 | livesync_tell( hash, peer ); |
| 129 | } | 134 | } |
| 130 | #endif | 135 | #endif |
| 131 | 136 | ||
| 132 | if( (OT_PEERFLAG(peer_dest) & PEER_FLAG_SEEDING ) && !(OT_PEERFLAG(peer) & PEER_FLAG_SEEDING ) ) | 137 | if( (OT_PEERFLAG(peer_dest) & PEER_FLAG_SEEDING ) && !(OT_PEERFLAG(peer) & PEER_FLAG_SEEDING ) ) |
| 133 | torrent->peer_list->seed_count--; | 138 | torrent->peer_list->seed_count--; |
| 134 | if( !(OT_PEERFLAG(peer_dest) & PEER_FLAG_SEEDING ) && (OT_PEERFLAG(peer) & PEER_FLAG_SEEDING ) ) | 139 | if( !(OT_PEERFLAG(peer_dest) & PEER_FLAG_SEEDING ) && (OT_PEERFLAG(peer) & PEER_FLAG_SEEDING ) ) |
| @@ -141,14 +146,15 @@ ot_torrent *add_peer_to_torrent( ot_hash *hash, ot_peer *peer WANT_SYNC_PARAM( | |||
| 141 | 146 | ||
| 142 | *(uint64_t*)(peer_dest) = *(uint64_t*)(peer); | 147 | *(uint64_t*)(peer_dest) = *(uint64_t*)(peer); |
| 143 | #ifdef WANT_SYNC | 148 | #ifdef WANT_SYNC |
| 144 | /* In order to avoid an unlock/lock between add_peers and return_peers, | 149 | if( proto == FLAG_MCA ) { |
| 145 | we only unlock the bucket if return_peers won't do the job: either | 150 | mutex_bucket_unlock_by_hash( hash, delta_torrentcount ); |
| 146 | if we return NULL or if no reply is expected, i.e. when called | 151 | return 0; |
| 147 | from livesync code. */ | 152 | } |
| 148 | if( from_sync ) | ||
| 149 | mutex_bucket_unlock_by_hash( hash ); | ||
| 150 | #endif | 153 | #endif |
| 151 | return torrent; | 154 | |
| 155 | reply_size = return_peers_for_torrent( torrent, amount, reply, proto ); | ||
| 156 | mutex_bucket_unlock_by_hash( &torrent->hash, delta_torrentcount ); | ||
| 157 | return reply_size; | ||
| 152 | } | 158 | } |
| 153 | 159 | ||
| 154 | static size_t return_peers_all( ot_peerlist *peer_list, char *reply ) { | 160 | static size_t return_peers_all( ot_peerlist *peer_list, char *reply ) { |
| @@ -186,7 +192,7 @@ static size_t return_peers_selection( ot_peerlist *peer_list, size_t amount, cha | |||
| 186 | num_buckets = bucket_list->size; | 192 | num_buckets = bucket_list->size; |
| 187 | bucket_list = (ot_vector *)bucket_list->data; | 193 | bucket_list = (ot_vector *)bucket_list->data; |
| 188 | } | 194 | } |
| 189 | 195 | ||
| 190 | /* Make fixpoint arithmetic as exact as possible */ | 196 | /* Make fixpoint arithmetic as exact as possible */ |
| 191 | #define MAXPRECBIT (1<<(8*sizeof(int)-3)) | 197 | #define MAXPRECBIT (1<<(8*sizeof(int)-3)) |
| 192 | while( !(shifted_pc & MAXPRECBIT ) ) { shifted_pc <<= 1; shift++; } | 198 | while( !(shifted_pc & MAXPRECBIT ) ) { shifted_pc <<= 1; shift++; } |
| @@ -220,9 +226,6 @@ static size_t return_peers_selection( ot_peerlist *peer_list, size_t amount, cha | |||
| 220 | /* Compiles a list of random peers for a torrent | 226 | /* Compiles a list of random peers for a torrent |
| 221 | * reply must have enough space to hold 92+6*amount bytes | 227 | * reply must have enough space to hold 92+6*amount bytes |
| 222 | * does not yet check not to return self | 228 | * does not yet check not to return self |
| 223 | * the bucket, torrent resides in has been locked by the | ||
| 224 | add_peer call, the ot_torrent * was gathered from, so we | ||
| 225 | have to unlock it here. | ||
| 226 | */ | 229 | */ |
| 227 | size_t return_peers_for_torrent( ot_torrent *torrent, size_t amount, char *reply, PROTO_FLAG proto ) { | 230 | size_t return_peers_for_torrent( ot_torrent *torrent, size_t amount, char *reply, PROTO_FLAG proto ) { |
| 228 | ot_peerlist *peer_list = torrent->peer_list; | 231 | ot_peerlist *peer_list = torrent->peer_list; |
| @@ -230,7 +233,7 @@ size_t return_peers_for_torrent( ot_torrent *torrent, size_t amount, char *reply | |||
| 230 | 233 | ||
| 231 | if( amount > peer_list->peer_count ) | 234 | if( amount > peer_list->peer_count ) |
| 232 | amount = peer_list->peer_count; | 235 | amount = peer_list->peer_count; |
| 233 | 236 | ||
| 234 | if( proto == FLAG_TCP ) { | 237 | if( proto == FLAG_TCP ) { |
| 235 | int erval = OT_CLIENT_REQUEST_INTERVAL_RANDOM; | 238 | int erval = OT_CLIENT_REQUEST_INTERVAL_RANDOM; |
| 236 | r += sprintf( r, "d8:completei%zde10:downloadedi%zde10:incompletei%zde8:intervali%ie12:min intervali%ie5:peers%zd:", peer_list->seed_count, peer_list->down_count, peer_list->peer_count-peer_list->seed_count, erval, erval/2, 6*amount ); | 239 | r += sprintf( r, "d8:completei%zde10:downloadedi%zde10:incompletei%zde8:intervali%ie12:min intervali%ie5:peers%zd:", peer_list->seed_count, peer_list->down_count, peer_list->peer_count-peer_list->seed_count, erval, erval/2, 6*amount ); |
| @@ -251,13 +254,12 @@ size_t return_peers_for_torrent( ot_torrent *torrent, size_t amount, char *reply | |||
| 251 | if( proto == FLAG_TCP ) | 254 | if( proto == FLAG_TCP ) |
| 252 | *r++ = 'e'; | 255 | *r++ = 'e'; |
| 253 | 256 | ||
| 254 | mutex_bucket_unlock_by_hash( &torrent->hash ); | ||
| 255 | return r - reply; | 257 | return r - reply; |
| 256 | } | 258 | } |
| 257 | 259 | ||
| 258 | /* Fetches scrape info for a specific torrent */ | 260 | /* Fetches scrape info for a specific torrent */ |
| 259 | size_t return_udp_scrape_for_torrent( ot_hash *hash, char *reply ) { | 261 | size_t return_udp_scrape_for_torrent( ot_hash *hash, char *reply ) { |
| 260 | int exactmatch; | 262 | int exactmatch, delta_torrentcount = 0; |
| 261 | ot_vector *torrents_list = mutex_bucket_lock_by_hash( hash ); | 263 | ot_vector *torrents_list = mutex_bucket_lock_by_hash( hash ); |
| 262 | ot_torrent *torrent = binary_search( hash, torrents_list->data, torrents_list->size, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch ); | 264 | ot_torrent *torrent = binary_search( hash, torrents_list->data, torrents_list->size, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch ); |
| 263 | 265 | ||
| @@ -269,20 +271,22 @@ size_t return_udp_scrape_for_torrent( ot_hash *hash, char *reply ) { | |||
| 269 | if( clean_single_torrent( torrent ) ) { | 271 | if( clean_single_torrent( torrent ) ) { |
| 270 | vector_remove_torrent( torrents_list, torrent ); | 272 | vector_remove_torrent( torrents_list, torrent ); |
| 271 | memset( reply, 0, 12); | 273 | memset( reply, 0, 12); |
| 274 | delta_torrentcount = -1; | ||
| 272 | } else { | 275 | } else { |
| 273 | r[0] = htonl( torrent->peer_list->seed_count ); | 276 | r[0] = htonl( torrent->peer_list->seed_count ); |
| 274 | r[1] = htonl( torrent->peer_list->down_count ); | 277 | r[1] = htonl( torrent->peer_list->down_count ); |
| 275 | r[2] = htonl( torrent->peer_list->peer_count-torrent->peer_list->seed_count ); | 278 | r[2] = htonl( torrent->peer_list->peer_count-torrent->peer_list->seed_count ); |
| 276 | } | 279 | } |
| 277 | } | 280 | } |
| 278 | mutex_bucket_unlock_by_hash( hash ); | 281 | mutex_bucket_unlock_by_hash( hash, 0 ); |
| 279 | return 12; | 282 | return 12; |
| 280 | } | 283 | } |
| 281 | 284 | ||
| 282 | /* Fetches scrape info for a specific torrent */ | 285 | /* Fetches scrape info for a specific torrent */ |
| 283 | size_t return_tcp_scrape_for_torrent( ot_hash *hash_list, int amount, char *reply ) { | 286 | size_t return_tcp_scrape_for_torrent( ot_hash *hash_list, int amount, char *reply ) { |
| 284 | char *r = reply; | 287 | char *r = reply; |
| 285 | int exactmatch, i; | 288 | int exactmatch, i; |
| 289 | int delta_torrentcount = 0; | ||
| 286 | 290 | ||
| 287 | r += sprintf( r, "d5:filesd" ); | 291 | r += sprintf( r, "d5:filesd" ); |
| 288 | 292 | ||
| @@ -294,6 +298,7 @@ size_t return_tcp_scrape_for_torrent( ot_hash *hash_list, int amount, char *repl | |||
| 294 | if( exactmatch ) { | 298 | if( exactmatch ) { |
| 295 | if( clean_single_torrent( torrent ) ) { | 299 | if( clean_single_torrent( torrent ) ) { |
| 296 | vector_remove_torrent( torrents_list, torrent ); | 300 | vector_remove_torrent( torrents_list, torrent ); |
| 301 | delta_torrentcount = -1; | ||
| 297 | } else { | 302 | } else { |
| 298 | int j; | 303 | int j; |
| 299 | *r++='2';*r++='0';*r++=':'; | 304 | *r++='2';*r++='0';*r++=':'; |
| @@ -302,7 +307,7 @@ size_t return_tcp_scrape_for_torrent( ot_hash *hash_list, int amount, char *repl | |||
| 302 | torrent->peer_list->seed_count, torrent->peer_list->down_count, torrent->peer_list->peer_count-torrent->peer_list->seed_count ); | 307 | torrent->peer_list->seed_count, torrent->peer_list->down_count, torrent->peer_list->peer_count-torrent->peer_list->seed_count ); |
| 303 | } | 308 | } |
| 304 | } | 309 | } |
| 305 | mutex_bucket_unlock_by_hash( hash ); | 310 | mutex_bucket_unlock_by_hash( hash, delta_torrentcount ); |
| 306 | } | 311 | } |
| 307 | 312 | ||
| 308 | *r++ = 'e'; *r++ = 'e'; | 313 | *r++ = 'e'; *r++ = 'e'; |
| @@ -337,7 +342,7 @@ size_t remove_peer_from_torrent( ot_hash *hash, ot_peer *peer, char *reply, PROT | |||
| 337 | int erval = OT_CLIENT_REQUEST_INTERVAL_RANDOM; | 342 | int erval = OT_CLIENT_REQUEST_INTERVAL_RANDOM; |
| 338 | reply_size = sprintf( reply, "d8:completei%zde10:incompletei%zde8:intervali%ie12:min intervali%ie5:peers0:e", peer_list->seed_count, peer_list->peer_count - peer_list->seed_count, erval, erval / 2 ); | 343 | reply_size = sprintf( reply, "d8:completei%zde10:incompletei%zde8:intervali%ie12:min intervali%ie5:peers0:e", peer_list->seed_count, peer_list->peer_count - peer_list->seed_count, erval, erval / 2 ); |
| 339 | } | 344 | } |
| 340 | 345 | ||
| 341 | /* Handle UDP reply */ | 346 | /* Handle UDP reply */ |
| 342 | if( proto == FLAG_UDP ) { | 347 | if( proto == FLAG_UDP ) { |
| 343 | ((uint32_t*)reply)[2] = htonl( OT_CLIENT_REQUEST_INTERVAL_RANDOM ); | 348 | ((uint32_t*)reply)[2] = htonl( OT_CLIENT_REQUEST_INTERVAL_RANDOM ); |
| @@ -346,7 +351,7 @@ size_t remove_peer_from_torrent( ot_hash *hash, ot_peer *peer, char *reply, PROT | |||
| 346 | reply_size = 20; | 351 | reply_size = 20; |
| 347 | } | 352 | } |
| 348 | 353 | ||
| 349 | mutex_bucket_unlock_by_hash( hash ); | 354 | mutex_bucket_unlock_by_hash( hash, 0 ); |
| 350 | return reply_size; | 355 | return reply_size; |
| 351 | } | 356 | } |
| 352 | 357 | ||
| @@ -355,12 +360,7 @@ void exerr( char * message ) { | |||
| 355 | exit( 111 ); | 360 | exit( 111 ); |
| 356 | } | 361 | } |
| 357 | 362 | ||
| 358 | int trackerlogic_init( const char * const serverdir ) { | 363 | void trackerlogic_init( ) { |
| 359 | if( serverdir && chdir( serverdir ) ) { | ||
| 360 | fprintf( stderr, "Could not chdir() to %s, because %s\n", serverdir, strerror(errno) ); | ||
| 361 | return -1; | ||
| 362 | } | ||
| 363 | |||
| 364 | srandom( time(NULL) ); | 364 | srandom( time(NULL) ); |
| 365 | g_tracker_id = random(); | 365 | g_tracker_id = random(); |
| 366 | 366 | ||
| @@ -371,12 +371,10 @@ int trackerlogic_init( const char * const serverdir ) { | |||
| 371 | accesslist_init( ); | 371 | accesslist_init( ); |
| 372 | livesync_init( ); | 372 | livesync_init( ); |
| 373 | stats_init( ); | 373 | stats_init( ); |
| 374 | |||
| 375 | return 0; | ||
| 376 | } | 374 | } |
| 377 | 375 | ||
| 378 | void trackerlogic_deinit( void ) { | 376 | void trackerlogic_deinit( void ) { |
| 379 | int bucket; | 377 | int bucket, delta_torrentcount = 0; |
| 380 | size_t j; | 378 | size_t j; |
| 381 | 379 | ||
| 382 | /* Free all torrents... */ | 380 | /* Free all torrents... */ |
| @@ -386,10 +384,11 @@ void trackerlogic_deinit( void ) { | |||
| 386 | for( j=0; j<torrents_list->size; ++j ) { | 384 | for( j=0; j<torrents_list->size; ++j ) { |
| 387 | ot_torrent *torrent = ((ot_torrent*)(torrents_list->data)) + j; | 385 | ot_torrent *torrent = ((ot_torrent*)(torrents_list->data)) + j; |
| 388 | free_peerlist( torrent->peer_list ); | 386 | free_peerlist( torrent->peer_list ); |
| 387 | delta_torrentcount -= 1; | ||
| 389 | } | 388 | } |
| 390 | free( torrents_list->data ); | 389 | free( torrents_list->data ); |
| 391 | } | 390 | } |
| 392 | mutex_bucket_unlock( bucket ); | 391 | mutex_bucket_unlock( bucket, delta_torrentcount ); |
| 393 | } | 392 | } |
| 394 | 393 | ||
| 395 | /* Deinitialise background worker threads */ | 394 | /* Deinitialise background worker threads */ |
diff --git a/trackerlogic.h b/trackerlogic.h index 75e98d2..34cee3b 100644 --- a/trackerlogic.h +++ b/trackerlogic.h | |||
| @@ -73,7 +73,6 @@ static const uint8_t PEER_FLAG_LEECHING = 0x00; | |||
| 73 | #define OT_PEERFLAG(peer) (((uint8_t*)(peer))[6]) | 73 | #define OT_PEERFLAG(peer) (((uint8_t*)(peer))[6]) |
| 74 | #define OT_PEERTIME(peer) (((uint8_t*)(peer))[7]) | 74 | #define OT_PEERTIME(peer) (((uint8_t*)(peer))[7]) |
| 75 | 75 | ||
| 76 | #define OT_PEER_COMPARE_SIZE ((size_t)6) | ||
| 77 | #define OT_HASH_COMPARE_SIZE (sizeof(ot_hash)) | 76 | #define OT_HASH_COMPARE_SIZE (sizeof(ot_hash)) |
| 78 | 77 | ||
| 79 | struct ot_peerlist; | 78 | struct ot_peerlist; |
| @@ -111,17 +110,17 @@ struct ot_peerlist { | |||
| 111 | #define WANT_SYNC_PARAM( param ) | 110 | #define WANT_SYNC_PARAM( param ) |
| 112 | #endif | 111 | #endif |
| 113 | 112 | ||
| 114 | int trackerlogic_init( const char * const serverdir ); | 113 | void trackerlogic_init( ); |
| 115 | void trackerlogic_deinit( void ); | 114 | void trackerlogic_deinit( void ); |
| 116 | void exerr( char * message ); | 115 | void exerr( char * message ); |
| 117 | 116 | ||
| 118 | /* add_peer_to_torrent does only release the torrent bucket if from_sync is set, | 117 | /* add_peer_to_torrent does only release the torrent bucket if from_sync is set, |
| 119 | otherwise it is released in return_peers_for_torrent */ | 118 | otherwise it is released in return_peers_for_torrent */ |
| 120 | size_t return_peers_for_torrent( ot_torrent *torrent, size_t amount, char *reply, PROTO_FLAG proto ); | 119 | #define add_peer_to_torrent(hash,peer,proto) add_peer_to_torrent_and_return_peers(hash,peer,proto,0,NULL) |
| 121 | ot_torrent *add_peer_to_torrent( ot_hash *hash, ot_peer *peer WANT_SYNC_PARAM( int from_sync ) ); | 120 | size_t add_peer_to_torrent_and_return_peers( ot_hash *hash, ot_peer *peer, PROTO_FLAG proto, size_t amount, char * reply ); |
| 122 | size_t remove_peer_from_torrent( ot_hash *hash, ot_peer *peer, char *reply, PROTO_FLAG proto ); | 121 | size_t remove_peer_from_torrent( ot_hash *hash, ot_peer *peer, char *reply, PROTO_FLAG proto ); |
| 123 | size_t return_tcp_scrape_for_torrent( ot_hash *hash, int amount, char *reply ); | 122 | size_t return_tcp_scrape_for_torrent( ot_hash *hash, int amount, char *reply ); |
| 124 | size_t return_udp_scrape_for_torrent( ot_hash *hash, char *reply ); | 123 | size_t return_udp_scrape_for_torrent( ot_hash *hash, char *reply ); |
| 125 | 124 | ||
| 126 | /* Helper, before it moves to its own object */ | 125 | /* Helper, before it moves to its own object */ |
| 127 | void free_peerlist( ot_peerlist *peer_list ); | 126 | void free_peerlist( ot_peerlist *peer_list ); |
