Merged updates from trunk into stable branch
[feedcatcher.git] / vendor / rails / activesupport / lib / active_support / vendor / memcache-client-1.6.5 / memcache.rb
1 $TESTING = defined?($TESTING) && $TESTING
2
3 require 'socket'
4 require 'thread'
5 require 'timeout'
6 require 'zlib'
7 require 'digest/sha1'
8
9 ##
10 # A Ruby client library for memcached.
11 #
12
13 class MemCache
14
15 ##
16 # The version of MemCache you are using.
17
18 VERSION = '1.6.4.99'
19
20 ##
21 # Default options for the cache object.
22
23 DEFAULT_OPTIONS = {
24 :namespace => nil,
25 :readonly => false,
26 :multithread => true,
27 :failover => true,
28 :timeout => 0.5,
29 :logger => nil,
30 }
31
32 ##
33 # Default memcached port.
34
35 DEFAULT_PORT = 11211
36
37 ##
38 # Default memcached server weight.
39
40 DEFAULT_WEIGHT = 1
41
42 ##
43 # The namespace for this instance
44
45 attr_reader :namespace
46
47 ##
48 # The multithread setting for this instance
49
50 attr_reader :multithread
51
52 ##
53 # The servers this client talks to. Play at your own peril.
54
55 attr_reader :servers
56
57 ##
58 # Socket timeout limit with this client, defaults to 0.5 sec.
59 # Set to nil to disable timeouts.
60
61 attr_reader :timeout
62
63 ##
64 # Should the client try to failover to another server if the
65 # first server is down? Defaults to true.
66
67 attr_reader :failover
68
69 ##
70 # Log debug/info/warn/error to the given Logger, defaults to nil.
71
72 attr_reader :logger
73
74 ##
75 # Accepts a list of +servers+ and a list of +opts+. +servers+ may be
76 # omitted. See +servers=+ for acceptable server list arguments.
77 #
78 # Valid options for +opts+ are:
79 #
80 # [:namespace] Prepends this value to all keys added or retrieved.
81 # [:readonly] Raises an exception on cache writes when true.
82 # [:multithread] Wraps cache access in a Mutex for thread safety.
83 # [:failover] Should the client try to failover to another server if the
84 # first server is down? Defaults to true.
85 # [:timeout] Time to use as the socket read timeout. Defaults to 0.5 sec,
86 # set to nil to disable timeouts (this is a major performance penalty in Ruby 1.8).
87 # [:logger] Logger to use for info/debug output, defaults to nil
88 # Other options are ignored.
89
90 def initialize(*args)
91 servers = []
92 opts = {}
93
94 case args.length
95 when 0 then # NOP
96 when 1 then
97 arg = args.shift
98 case arg
99 when Hash then opts = arg
100 when Array then servers = arg
101 when String then servers = [arg]
102 else raise ArgumentError, 'first argument must be Array, Hash or String'
103 end
104 when 2 then
105 servers, opts = args
106 else
107 raise ArgumentError, "wrong number of arguments (#{args.length} for 2)"
108 end
109
110 opts = DEFAULT_OPTIONS.merge opts
111 @namespace = opts[:namespace]
112 @readonly = opts[:readonly]
113 @multithread = opts[:multithread]
114 @timeout = opts[:timeout]
115 @failover = opts[:failover]
116 @logger = opts[:logger]
117 @mutex = Mutex.new if @multithread
118
119 logger.info { "memcache-client #{VERSION} #{Array(servers).inspect}" } if logger
120
121 Thread.current[:memcache_client] = self.object_id if !@multithread
122
123 self.servers = servers
124 end
125
126 ##
127 # Returns a string representation of the cache object.
128
129 def inspect
130 "<MemCache: %d servers, ns: %p, ro: %p>" %
131 [@servers.length, @namespace, @readonly]
132 end
133
134 ##
135 # Returns whether there is at least one active server for the object.
136
137 def active?
138 not @servers.empty?
139 end
140
141 ##
142 # Returns whether or not the cache object was created read only.
143
144 def readonly?
145 @readonly
146 end
147
148 ##
149 # Set the servers that the requests will be distributed between. Entries
150 # can be either strings of the form "hostname:port" or
151 # "hostname:port:weight" or MemCache::Server objects.
152 #
153 def servers=(servers)
154 # Create the server objects.
155 @servers = Array(servers).collect do |server|
156 case server
157 when String
158 host, port, weight = server.split ':', 3
159 port ||= DEFAULT_PORT
160 weight ||= DEFAULT_WEIGHT
161 Server.new self, host, port, weight
162 else
163 server
164 end
165 end
166
167 logger.debug { "Servers now: #{@servers.inspect}" } if logger
168
169 # There's no point in doing this if there's only one server
170 @continuum = create_continuum_for(@servers) if @servers.size > 1
171
172 @servers
173 end
174
175 ##
176 # Decrements the value for +key+ by +amount+ and returns the new value.
177 # +key+ must already exist. If +key+ is not an integer, it is assumed to be
178 # 0. +key+ can not be decremented below 0.
179
180 def decr(key, amount = 1)
181 raise MemCacheError, "Update of readonly cache" if @readonly
182 with_server(key) do |server, cache_key|
183 cache_decr server, cache_key, amount
184 end
185 rescue TypeError => err
186 handle_error nil, err
187 end
188
189 ##
190 # Retrieves +key+ from memcache. If +raw+ is false, the value will be
191 # unmarshalled.
192
193 def get(key, raw = false)
194 with_server(key) do |server, cache_key|
195 value = cache_get server, cache_key
196 logger.debug { "GET #{key} from #{server.inspect}: #{value ? value.to_s.size : 'nil'}" } if logger
197 return nil if value.nil?
198 value = Marshal.load value unless raw
199 return value
200 end
201 rescue TypeError => err
202 handle_error nil, err
203 end
204
205 ##
206 # Retrieves multiple values from memcached in parallel, if possible.
207 #
208 # The memcached protocol supports the ability to retrieve multiple
209 # keys in a single request. Pass in an array of keys to this method
210 # and it will:
211 #
212 # 1. map the key to the appropriate memcached server
213 # 2. send a single request to each server that has one or more key values
214 #
215 # Returns a hash of values.
216 #
217 # cache["a"] = 1
218 # cache["b"] = 2
219 # cache.get_multi "a", "b" # => { "a" => 1, "b" => 2 }
220 #
221 # Note that get_multi assumes the values are marshalled.
222
223 def get_multi(*keys)
224 raise MemCacheError, 'No active servers' unless active?
225
226 keys.flatten!
227 key_count = keys.length
228 cache_keys = {}
229 server_keys = Hash.new { |h,k| h[k] = [] }
230
231 # map keys to servers
232 keys.each do |key|
233 server, cache_key = request_setup key
234 cache_keys[cache_key] = key
235 server_keys[server] << cache_key
236 end
237
238 results = {}
239
240 server_keys.each do |server, keys_for_server|
241 keys_for_server_str = keys_for_server.join ' '
242 begin
243 values = cache_get_multi server, keys_for_server_str
244 values.each do |key, value|
245 results[cache_keys[key]] = Marshal.load value
246 end
247 rescue IndexError => e
248 # Ignore this server and try the others
249 logger.warn { "Unable to retrieve #{keys_for_server.size} elements from #{server.inspect}: #{e.message}"} if logger
250 end
251 end
252
253 return results
254 rescue TypeError => err
255 handle_error nil, err
256 end
257
258 ##
259 # Increments the value for +key+ by +amount+ and returns the new value.
260 # +key+ must already exist. If +key+ is not an integer, it is assumed to be
261 # 0.
262
263 def incr(key, amount = 1)
264 raise MemCacheError, "Update of readonly cache" if @readonly
265 with_server(key) do |server, cache_key|
266 cache_incr server, cache_key, amount
267 end
268 rescue TypeError => err
269 handle_error nil, err
270 end
271
272 ##
273 # Add +key+ to the cache with value +value+ that expires in +expiry+
274 # seconds. If +raw+ is true, +value+ will not be Marshalled.
275 #
276 # Warning: Readers should not call this method in the event of a cache miss;
277 # see MemCache#add.
278
279 ONE_MB = 1024 * 1024
280
281 def set(key, value, expiry = 0, raw = false)
282 raise MemCacheError, "Update of readonly cache" if @readonly
283 with_server(key) do |server, cache_key|
284
285 value = Marshal.dump value unless raw
286 logger.debug { "SET #{key} to #{server.inspect}: #{value ? value.to_s.size : 'nil'}" } if logger
287
288 data = value.to_s
289 raise MemCacheError, "Value too large, memcached can only store 1MB of data per key" if data.size > ONE_MB
290
291 command = "set #{cache_key} 0 #{expiry} #{data.size}\r\n#{data}\r\n"
292
293 with_socket_management(server) do |socket|
294 socket.write command
295 result = socket.gets
296 raise_on_error_response! result
297
298 if result.nil?
299 server.close
300 raise MemCacheError, "lost connection to #{server.host}:#{server.port}"
301 end
302
303 result
304 end
305 end
306 end
307
308 ##
309 # Add +key+ to the cache with value +value+ that expires in +expiry+
310 # seconds, but only if +key+ does not already exist in the cache.
311 # If +raw+ is true, +value+ will not be Marshalled.
312 #
313 # Readers should call this method in the event of a cache miss, not
314 # MemCache#set or MemCache#[]=.
315
316 def add(key, value, expiry = 0, raw = false)
317 raise MemCacheError, "Update of readonly cache" if @readonly
318 with_server(key) do |server, cache_key|
319 value = Marshal.dump value unless raw
320 logger.debug { "ADD #{key} to #{server}: #{value ? value.to_s.size : 'nil'}" } if logger
321 command = "add #{cache_key} 0 #{expiry} #{value.to_s.size}\r\n#{value}\r\n"
322
323 with_socket_management(server) do |socket|
324 socket.write command
325 result = socket.gets
326 raise_on_error_response! result
327 result
328 end
329 end
330 end
331
332 ##
333 # Removes +key+ from the cache in +expiry+ seconds.
334
335 def delete(key, expiry = 0)
336 raise MemCacheError, "Update of readonly cache" if @readonly
337 with_server(key) do |server, cache_key|
338 with_socket_management(server) do |socket|
339 socket.write "delete #{cache_key} #{expiry}\r\n"
340 result = socket.gets
341 raise_on_error_response! result
342 result
343 end
344 end
345 end
346
347 ##
348 # Flush the cache from all memcache servers.
349
350 def flush_all
351 raise MemCacheError, 'No active servers' unless active?
352 raise MemCacheError, "Update of readonly cache" if @readonly
353
354 begin
355 @servers.each do |server|
356 with_socket_management(server) do |socket|
357 socket.write "flush_all\r\n"
358 result = socket.gets
359 raise_on_error_response! result
360 result
361 end
362 end
363 rescue IndexError => err
364 handle_error nil, err
365 end
366 end
367
368 ##
369 # Reset the connection to all memcache servers. This should be called if
370 # there is a problem with a cache lookup that might have left the connection
371 # in a corrupted state.
372
373 def reset
374 @servers.each { |server| server.close }
375 end
376
377 ##
378 # Returns statistics for each memcached server. An explanation of the
379 # statistics can be found in the memcached docs:
380 #
381 # http://code.sixapart.com/svn/memcached/trunk/server/doc/protocol.txt
382 #
383 # Example:
384 #
385 # >> pp CACHE.stats
386 # {"localhost:11211"=>
387 # {"bytes"=>4718,
388 # "pid"=>20188,
389 # "connection_structures"=>4,
390 # "time"=>1162278121,
391 # "pointer_size"=>32,
392 # "limit_maxbytes"=>67108864,
393 # "cmd_get"=>14532,
394 # "version"=>"1.2.0",
395 # "bytes_written"=>432583,
396 # "cmd_set"=>32,
397 # "get_misses"=>0,
398 # "total_connections"=>19,
399 # "curr_connections"=>3,
400 # "curr_items"=>4,
401 # "uptime"=>1557,
402 # "get_hits"=>14532,
403 # "total_items"=>32,
404 # "rusage_system"=>0.313952,
405 # "rusage_user"=>0.119981,
406 # "bytes_read"=>190619}}
407 # => nil
408
409 def stats
410 raise MemCacheError, "No active servers" unless active?
411 server_stats = {}
412
413 @servers.each do |server|
414 next unless server.alive?
415
416 with_socket_management(server) do |socket|
417 value = nil
418 socket.write "stats\r\n"
419 stats = {}
420 while line = socket.gets do
421 raise_on_error_response! line
422 break if line == "END\r\n"
423 if line =~ /\ASTAT ([\S]+) ([\w\.\:]+)/ then
424 name, value = $1, $2
425 stats[name] = case name
426 when 'version'
427 value
428 when 'rusage_user', 'rusage_system' then
429 seconds, microseconds = value.split(/:/, 2)
430 microseconds ||= 0
431 Float(seconds) + (Float(microseconds) / 1_000_000)
432 else
433 if value =~ /\A\d+\Z/ then
434 value.to_i
435 else
436 value
437 end
438 end
439 end
440 end
441 server_stats["#{server.host}:#{server.port}"] = stats
442 end
443 end
444
445 raise MemCacheError, "No active servers" if server_stats.empty?
446 server_stats
447 end
448
449 ##
450 # Shortcut to get a value from the cache.
451
452 alias [] get
453
454 ##
455 # Shortcut to save a value in the cache. This method does not set an
456 # expiration on the entry. Use set to specify an explicit expiry.
457
458 def []=(key, value)
459 set key, value
460 end
461
462 protected unless $TESTING
463
464 ##
465 # Create a key for the cache, incorporating the namespace qualifier if
466 # requested.
467
468 def make_cache_key(key)
469 if namespace.nil? then
470 key
471 else
472 "#{@namespace}:#{key}"
473 end
474 end
475
476 ##
477 # Returns an interoperable hash value for +key+. (I think, docs are
478 # sketchy for down servers).
479
480 def hash_for(key)
481 Zlib.crc32(key)
482 end
483
484 ##
485 # Pick a server to handle the request based on a hash of the key.
486
487 def get_server_for_key(key, options = {})
488 raise ArgumentError, "illegal character in key #{key.inspect}" if
489 key =~ /\s/
490 raise ArgumentError, "key too long #{key.inspect}" if key.length > 250
491 raise MemCacheError, "No servers available" if @servers.empty?
492 return @servers.first if @servers.length == 1
493
494 hkey = hash_for(key)
495
496 20.times do |try|
497 entryidx = Continuum.binary_search(@continuum, hkey)
498 server = @continuum[entryidx].server
499 return server if server.alive?
500 break unless failover
501 hkey = hash_for "#{try}#{key}"
502 end
503
504 raise MemCacheError, "No servers available"
505 end
506
507 ##
508 # Performs a raw decr for +cache_key+ from +server+. Returns nil if not
509 # found.
510
511 def cache_decr(server, cache_key, amount)
512 with_socket_management(server) do |socket|
513 socket.write "decr #{cache_key} #{amount}\r\n"
514 text = socket.gets
515 raise_on_error_response! text
516 return nil if text == "NOT_FOUND\r\n"
517 return text.to_i
518 end
519 end
520
521 ##
522 # Fetches the raw data for +cache_key+ from +server+. Returns nil on cache
523 # miss.
524
525 def cache_get(server, cache_key)
526 with_socket_management(server) do |socket|
527 socket.write "get #{cache_key}\r\n"
528 keyline = socket.gets # "VALUE <key> <flags> <bytes>\r\n"
529
530 if keyline.nil? then
531 server.close
532 raise MemCacheError, "lost connection to #{server.host}:#{server.port}"
533 end
534
535 raise_on_error_response! keyline
536 return nil if keyline == "END\r\n"
537
538 unless keyline =~ /(\d+)\r/ then
539 server.close
540 raise MemCacheError, "unexpected response #{keyline.inspect}"
541 end
542 value = socket.read $1.to_i
543 socket.read 2 # "\r\n"
544 socket.gets # "END\r\n"
545 return value
546 end
547 end
548
549 ##
550 # Fetches +cache_keys+ from +server+ using a multi-get.
551
552 def cache_get_multi(server, cache_keys)
553 with_socket_management(server) do |socket|
554 values = {}
555 socket.write "get #{cache_keys}\r\n"
556
557 while keyline = socket.gets do
558 return values if keyline == "END\r\n"
559 raise_on_error_response! keyline
560
561 unless keyline =~ /\AVALUE (.+) (.+) (.+)/ then
562 server.close
563 raise MemCacheError, "unexpected response #{keyline.inspect}"
564 end
565
566 key, data_length = $1, $3
567 values[$1] = socket.read data_length.to_i
568 socket.read(2) # "\r\n"
569 end
570
571 server.close
572 raise MemCacheError, "lost connection to #{server.host}:#{server.port}" # TODO: retry here too
573 end
574 end
575
576 ##
577 # Performs a raw incr for +cache_key+ from +server+. Returns nil if not
578 # found.
579
580 def cache_incr(server, cache_key, amount)
581 with_socket_management(server) do |socket|
582 socket.write "incr #{cache_key} #{amount}\r\n"
583 text = socket.gets
584 raise_on_error_response! text
585 return nil if text == "NOT_FOUND\r\n"
586 return text.to_i
587 end
588 end
589
590 ##
591 # Gets or creates a socket connected to the given server, and yields it
592 # to the block, wrapped in a mutex synchronization if @multithread is true.
593 #
594 # If a socket error (SocketError, SystemCallError, IOError) or protocol error
595 # (MemCacheError) is raised by the block, closes the socket, attempts to
596 # connect again, and retries the block (once). If an error is again raised,
597 # reraises it as MemCacheError.
598 #
599 # If unable to connect to the server (or if in the reconnect wait period),
600 # raises MemCacheError. Note that the socket connect code marks a server
601 # dead for a timeout period, so retrying does not apply to connection attempt
602 # failures (but does still apply to unexpectedly lost connections etc.).
603
604 def with_socket_management(server, &block)
605 check_multithread_status!
606
607 @mutex.lock if @multithread
608 retried = false
609
610 begin
611 socket = server.socket
612
613 # Raise an IndexError to show this server is out of whack. If were inside
614 # a with_server block, we'll catch it and attempt to restart the operation.
615
616 raise IndexError, "No connection to server (#{server.status})" if socket.nil?
617
618 block.call(socket)
619
620 rescue SocketError => err
621 logger.warn { "Socket failure: #{err.message}" } if logger
622 server.mark_dead(err)
623 handle_error(server, err)
624
625 rescue MemCacheError, SystemCallError, IOError => err
626 logger.warn { "Generic failure: #{err.class.name}: #{err.message}" } if logger
627 handle_error(server, err) if retried || socket.nil?
628 retried = true
629 retry
630 end
631 ensure
632 @mutex.unlock if @multithread
633 end
634
635 def with_server(key)
636 retried = false
637 begin
638 server, cache_key = request_setup(key)
639 yield server, cache_key
640 rescue IndexError => e
641 logger.warn { "Server failed: #{e.class.name}: #{e.message}" } if logger
642 if !retried && @servers.size > 1
643 logger.info { "Connection to server #{server.inspect} DIED! Retrying operation..." } if logger
644 retried = true
645 retry
646 end
647 handle_error(nil, e)
648 end
649 end
650
651 ##
652 # Handles +error+ from +server+.
653
654 def handle_error(server, error)
655 raise error if error.is_a?(MemCacheError)
656 server.close if server
657 new_error = MemCacheError.new error.message
658 new_error.set_backtrace error.backtrace
659 raise new_error
660 end
661
662 ##
663 # Performs setup for making a request with +key+ from memcached. Returns
664 # the server to fetch the key from and the complete key to use.
665
666 def request_setup(key)
667 raise MemCacheError, 'No active servers' unless active?
668 cache_key = make_cache_key key
669 server = get_server_for_key cache_key
670 return server, cache_key
671 end
672
673 def raise_on_error_response!(response)
674 if response =~ /\A(?:CLIENT_|SERVER_)?ERROR(.*)/
675 raise MemCacheError, $1.strip
676 end
677 end
678
679 def create_continuum_for(servers)
680 total_weight = servers.inject(0) { |memo, srv| memo + srv.weight }
681 continuum = []
682
683 servers.each do |server|
684 entry_count_for(server, servers.size, total_weight).times do |idx|
685 hash = Digest::SHA1.hexdigest("#{server.host}:#{server.port}:#{idx}")
686 value = Integer("0x#{hash[0..7]}")
687 continuum << Continuum::Entry.new(value, server)
688 end
689 end
690
691 continuum.sort { |a, b| a.value <=> b.value }
692 end
693
694 def entry_count_for(server, total_servers, total_weight)
695 ((total_servers * Continuum::POINTS_PER_SERVER * server.weight) / Float(total_weight)).floor
696 end
697
698 def check_multithread_status!
699 return if @multithread
700
701 if Thread.current[:memcache_client] != self.object_id
702 raise MemCacheError, <<-EOM
703 You are accessing this memcache-client instance from multiple threads but have not enabled multithread support.
704 Normally: MemCache.new(['localhost:11211'], :multithread => true)
705 In Rails: config.cache_store = [:mem_cache_store, 'localhost:11211', { :multithread => true }]
706 EOM
707 end
708 end
709
710 ##
711 # This class represents a memcached server instance.
712
713 class Server
714
715 ##
716 # The amount of time to wait to establish a connection with a memcached
717 # server. If a connection cannot be established within this time limit,
718 # the server will be marked as down.
719
720 CONNECT_TIMEOUT = 0.25
721
722 ##
723 # The amount of time to wait before attempting to re-establish a
724 # connection with a server that is marked dead.
725
726 RETRY_DELAY = 30.0
727
728 ##
729 # The host the memcached server is running on.
730
731 attr_reader :host
732
733 ##
734 # The port the memcached server is listening on.
735
736 attr_reader :port
737
738 ##
739 # The weight given to the server.
740
741 attr_reader :weight
742
743 ##
744 # The time of next retry if the connection is dead.
745
746 attr_reader :retry
747
748 ##
749 # A text status string describing the state of the server.
750
751 attr_reader :status
752
753 attr_reader :logger
754
755 ##
756 # Create a new MemCache::Server object for the memcached instance
757 # listening on the given host and port, weighted by the given weight.
758
759 def initialize(memcache, host, port = DEFAULT_PORT, weight = DEFAULT_WEIGHT)
760 raise ArgumentError, "No host specified" if host.nil? or host.empty?
761 raise ArgumentError, "No port specified" if port.nil? or port.to_i.zero?
762
763 @host = host
764 @port = port.to_i
765 @weight = weight.to_i
766
767 @sock = nil
768 @retry = nil
769 @status = 'NOT CONNECTED'
770 @timeout = memcache.timeout
771 @logger = memcache.logger
772 end
773
774 ##
775 # Return a string representation of the server object.
776
777 def inspect
778 "<MemCache::Server: %s:%d [%d] (%s)>" % [@host, @port, @weight, @status]
779 end
780
781 ##
782 # Check whether the server connection is alive. This will cause the
783 # socket to attempt to connect if it isn't already connected and or if
784 # the server was previously marked as down and the retry time has
785 # been exceeded.
786
787 def alive?
788 !!socket
789 end
790
791 ##
792 # Try to connect to the memcached server targeted by this object.
793 # Returns the connected socket object on success or nil on failure.
794
795 def socket
796 return @sock if @sock and not @sock.closed?
797
798 @sock = nil
799
800 # If the host was dead, don't retry for a while.
801 return if @retry and @retry > Time.now
802
803 # Attempt to connect if not already connected.
804 begin
805 @sock = @timeout ? TCPTimeoutSocket.new(@host, @port, @timeout) : TCPSocket.new(@host, @port)
806
807 if Socket.constants.include? 'TCP_NODELAY' then
808 @sock.setsockopt Socket::IPPROTO_TCP, Socket::TCP_NODELAY, 1
809 end
810 @retry = nil
811 @status = 'CONNECTED'
812 rescue SocketError, SystemCallError, IOError, Timeout::Error => err
813 logger.warn { "Unable to open socket: #{err.class.name}, #{err.message}" } if logger
814 mark_dead err
815 end
816
817 return @sock
818 end
819
820 ##
821 # Close the connection to the memcached server targeted by this
822 # object. The server is not considered dead.
823
824 def close
825 @sock.close if @sock && !@sock.closed?
826 @sock = nil
827 @retry = nil
828 @status = "NOT CONNECTED"
829 end
830
831 ##
832 # Mark the server as dead and close its socket.
833
834 def mark_dead(error)
835 @sock.close if @sock && !@sock.closed?
836 @sock = nil
837 @retry = Time.now + RETRY_DELAY
838
839 reason = "#{error.class.name}: #{error.message}"
840 @status = sprintf "%s:%s DEAD (%s), will retry at %s", @host, @port, reason, @retry
841 @logger.info { @status } if @logger
842 end
843
844 end
845
846 ##
847 # Base MemCache exception class.
848
849 class MemCacheError < RuntimeError; end
850
851 end
852
853 # TCPSocket facade class which implements timeouts.
854 class TCPTimeoutSocket
855
856 def initialize(host, port, timeout)
857 Timeout::timeout(MemCache::Server::CONNECT_TIMEOUT, SocketError) do
858 @sock = TCPSocket.new(host, port)
859 @len = timeout
860 end
861 end
862
863 def write(*args)
864 Timeout::timeout(@len, SocketError) do
865 @sock.write(*args)
866 end
867 end
868
869 def gets(*args)
870 Timeout::timeout(@len, SocketError) do
871 @sock.gets(*args)
872 end
873 end
874
875 def read(*args)
876 Timeout::timeout(@len, SocketError) do
877 @sock.read(*args)
878 end
879 end
880
881 def _socket
882 @sock
883 end
884
885 def method_missing(meth, *args)
886 @sock.__send__(meth, *args)
887 end
888
889 def closed?
890 @sock.closed?
891 end
892
893 def close
894 @sock.close
895 end
896 end
897
898 module Continuum
899 POINTS_PER_SERVER = 160 # this is the default in libmemcached
900
901 # Find the closest index in Continuum with value <= the given value
902 def self.binary_search(ary, value, &block)
903 upper = ary.size - 1
904 lower = 0
905 idx = 0
906
907 while(lower <= upper) do
908 idx = (lower + upper) / 2
909 comp = ary[idx].value <=> value
910
911 if comp == 0
912 return idx
913 elsif comp > 0
914 upper = idx - 1
915 else
916 lower = idx + 1
917 end
918 end
919 return upper
920 end
921
922 class Entry
923 attr_reader :value
924 attr_reader :server
925
926 def initialize(val, srv)
927 @value = val
928 @server = srv
929 end
930
931 def inspect
932 "<#{value}, #{server.host}:#{server.port}>"
933 end
934 end
935 end