summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGiuseppe Bilotta <giuseppe.bilotta@gmail.com>2006-10-24 15:24:13 +0000
committerGiuseppe Bilotta <giuseppe.bilotta@gmail.com>2006-10-24 15:24:13 +0000
commit43ac960aa89e5a02291fe875627dac88ae7fda34 (patch)
tree6d0cb13f4c299978174d72730c602cd274bbfdba
parent77512a98814b8c8ae5e6314a8bdf30b1967d95d2 (diff)
Initial implementation of proper caching based on last-modified and etag HTTP headers
-rw-r--r--ChangeLog5
-rw-r--r--data/rbot/plugins/demauro.rb2
-rw-r--r--data/rbot/plugins/digg.rb2
-rw-r--r--data/rbot/plugins/freshmeat.rb2
-rw-r--r--data/rbot/plugins/grouphug.rb2
-rw-r--r--data/rbot/plugins/quiz.rb2
-rw-r--r--data/rbot/plugins/rss.rb2
-rw-r--r--data/rbot/plugins/search.rb2
-rw-r--r--data/rbot/plugins/slashdot.rb4
-rw-r--r--data/rbot/plugins/tube.rb4
-rw-r--r--data/rbot/plugins/urban.rb8
-rw-r--r--lib/rbot/httputil.rb199
12 files changed, 149 insertions, 85 deletions
diff --git a/ChangeLog b/ChangeLog
index f04416fb..9c9e7120 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,8 @@
+2006-10-24 Giuseppe Bilotta <giuseppe.bilotta@gmail.com>
+
+ * HttpUtil: initial implementation of proper caching based on
+ last-modified and etag HTTP headers
+
2006-10-17 Giuseppe Bilotta <giuseppe.bilotta@gmail.com>
* Salut plugin: handles (multilingual) salutations (hello/goodbye)
diff --git a/data/rbot/plugins/demauro.rb b/data/rbot/plugins/demauro.rb
index 436a0bd3..9f5fc218 100644
--- a/data/rbot/plugins/demauro.rb
+++ b/data/rbot/plugins/demauro.rb
@@ -16,7 +16,7 @@ class DeMauroPlugin < Plugin
def demauro(m, params)
parola = params[:parola].downcase
url = @wapurl + "index.php?lemma=#{URI.escape(parola)}"
- xml = @bot.httputil.get(url)
+ xml = @bot.httputil.get_cached(url)
if xml.nil?
info = @bot.httputil.last_response
info = info ? "(#{info.code} - #{info.message})" : ""
diff --git a/data/rbot/plugins/digg.rb b/data/rbot/plugins/digg.rb
index 8663bd0f..8e3aeb4e 100644
--- a/data/rbot/plugins/digg.rb
+++ b/data/rbot/plugins/digg.rb
@@ -13,7 +13,7 @@ class DiggPlugin < Plugin
def digg(m, params)
max = params[:limit].to_i
debug "max is #{max}"
- xml = @bot.httputil.get(URI.parse("http://digg.com/rss/index.xml"))
+ xml = @bot.httputil.get_cached(URI.parse("http://digg.com/rss/index.xml"))
unless xml
m.reply "digg news parse failed"
return
diff --git a/data/rbot/plugins/freshmeat.rb b/data/rbot/plugins/freshmeat.rb
index eb2dbdf7..c8f529cb 100644
--- a/data/rbot/plugins/freshmeat.rb
+++ b/data/rbot/plugins/freshmeat.rb
@@ -12,7 +12,7 @@ class FreshmeatPlugin < Plugin
search = params[:search].to_s
max = 8 if max > 8
begin
- xml = @bot.httputil.get(URI.parse("http://freshmeat.net/search-xml/?orderby=locate_projectname_full_DESC&q=#{URI.escape(search)}"))
+ xml = @bot.httputil.get_cached(URI.parse("http://freshmeat.net/search-xml/?orderby=locate_projectname_full_DESC&q=#{URI.escape(search)}"))
rescue URI::InvalidURIError, URI::BadURIError => e
m.reply "illegal search string #{search}"
return
diff --git a/data/rbot/plugins/grouphug.rb b/data/rbot/plugins/grouphug.rb
index aa3bf6d7..53fc7f0a 100644
--- a/data/rbot/plugins/grouphug.rb
+++ b/data/rbot/plugins/grouphug.rb
@@ -14,7 +14,7 @@ class GrouphugPlugin < Plugin
path = "/random"
path = "/confessions/#{m.params()}" if m.params()
begin
- data = bot.httputil.get(URI.parse("http://grouphug.us/#{path}"))
+ data = bot.httputil.get_cached(URI.parse("http://grouphug.us/#{path}"))
reg = Regexp.new( '(<td class="conf-text")(.*?)(<p>)(.*?)(</p>)', Regexp::MULTILINE )
confession = reg.match( data )[4]
diff --git a/data/rbot/plugins/quiz.rb b/data/rbot/plugins/quiz.rb
index 4cd26f15..629b7232 100644
--- a/data/rbot/plugins/quiz.rb
+++ b/data/rbot/plugins/quiz.rb
@@ -116,7 +116,7 @@ class QuizPlugin < Plugin
# Wiki data
begin
- serverdata = @bot.httputil.get( URI.parse( "http://amarok.kde.org/amarokwiki/index.php/Rbot_Quiz" ) )
+ serverdata = @bot.httputil.get_cached( URI.parse( "http://amarok.kde.org/amarokwiki/index.php/Rbot_Quiz" ) )
serverdata = serverdata.split( "QUIZ DATA START\n" )[1]
serverdata = serverdata.split( "\nQUIZ DATA END" )[0]
serverdata = serverdata.gsub( /&nbsp;/, " " ).gsub( /&amp;/, "&" ).gsub( /&quot;/, "\"" )
diff --git a/data/rbot/plugins/rss.rb b/data/rbot/plugins/rss.rb
index 192c079a..dcbccb8c 100644
--- a/data/rbot/plugins/rss.rb
+++ b/data/rbot/plugins/rss.rb
@@ -506,7 +506,7 @@ class RSSFeedsPlugin < Plugin
# Use 60 sec timeout, cause the default is too low
# Do not use get_cached for RSS until we have proper cache handling
# xml = @bot.httputil.get_cached(feed.url,60,60)
- xml = @bot.httputil.get(feed.url,60,60)
+ xml = @bot.httputil.get_cached(feed.url, 60, 60)
rescue URI::InvalidURIError, URI::BadURIError => e
report_problem("invalid rss feed #{feed.url}", e, m)
return
diff --git a/data/rbot/plugins/search.rb b/data/rbot/plugins/search.rb
index fd1aefdc..a498d47f 100644
--- a/data/rbot/plugins/search.rb
+++ b/data/rbot/plugins/search.rb
@@ -35,7 +35,7 @@ class SearchPlugin < Plugin
begin
- wml = @bot.httputil.get(url)
+ wml = @bot.httputil.get_cached(url)
rescue => e
m.reply "error googling for #{what}"
return
diff --git a/data/rbot/plugins/slashdot.rb b/data/rbot/plugins/slashdot.rb
index 30f58dc9..ef96a4b0 100644
--- a/data/rbot/plugins/slashdot.rb
+++ b/data/rbot/plugins/slashdot.rb
@@ -12,7 +12,7 @@ class SlashdotPlugin < Plugin
search = params[:search].to_s
begin
- xml = @bot.httputil.get(URI.parse("http://slashdot.org/search.pl?content_type=rss&query=#{URI.escape(search)}"))
+ xml = @bot.httputil.get_cached(URI.parse("http://slashdot.org/search.pl?content_type=rss&query=#{URI.escape(search)}"))
rescue URI::InvalidURIError, URI::BadURIError => e
m.reply "illegal search string #{search}"
return
@@ -53,7 +53,7 @@ class SlashdotPlugin < Plugin
debug params.inspect
max = params[:limit].to_i
debug "max is #{max}"
- xml = @bot.httputil.get(URI.parse("http://slashdot.org/slashdot.xml"))
+ xml = @bot.httputil.get_cached(URI.parse("http://slashdot.org/slashdot.xml"))
unless xml
m.reply "slashdot news parse failed"
return
diff --git a/data/rbot/plugins/tube.rb b/data/rbot/plugins/tube.rb
index 85316718..0a9feb2f 100644
--- a/data/rbot/plugins/tube.rb
+++ b/data/rbot/plugins/tube.rb
@@ -13,7 +13,7 @@ class TubePlugin < Plugin
def tube(m, params)
line = params[:line]
begin
- tube_page = @bot.httputil.get(URI.parse("http://www.tfl.gov.uk/tfl/service_rt_tube.shtml"), 1, 1)
+ tube_page = @bot.httputil.get_cached(URI.parse("http://www.tfl.gov.uk/tfl/service_rt_tube.shtml"), 1, 1)
rescue URI::InvalidURIError, URI::BadURIError => e
m.reply "Cannot contact Tube Service Status page"
return
@@ -42,7 +42,7 @@ class TubePlugin < Plugin
def check_stations(m, params)
begin
- tube_page = @bot.httputil.get(URI.parse("http://www.tfl.gov.uk/tfl/service_rt_tube.shtml"))
+ tube_page = @bot.httputil.get_cached(URI.parse("http://www.tfl.gov.uk/tfl/service_rt_tube.shtml"))
rescue URI::InvalidURIError, URI::BadURIError => e
m.reply "Cannot contact Tube Service Status page"
return
diff --git a/data/rbot/plugins/urban.rb b/data/rbot/plugins/urban.rb
index 95210b25..d2ea8645 100644
--- a/data/rbot/plugins/urban.rb
+++ b/data/rbot/plugins/urban.rb
@@ -18,7 +18,7 @@ class UrbanPlugin < Plugin
end
# we give a very high 'skip' because this will allow us to get the number of definitions by retrieving the previous definition
uri = URI.parse("http://www.urbanwap.com/search.php?term=#{URI.escape words}&skip=65536")
- page = @bot.httputil.get(uri)
+ page = @bot.httputil.get_cached(uri)
if page.nil?
m.reply "Couldn't retrieve an urban dictionary definition of #{words}"
return
@@ -39,7 +39,7 @@ class UrbanPlugin < Plugin
end
if n < numdefs
uri = URI.parse("http://www.urbanwap.com/search.php?term=#{URI.escape words}&skip=#{n-1}")
- page = @bot.httputil.get(uri)
+ page = @bot.httputil.get_cached(uri)
if page.nil?
case n % 10
when 1
@@ -77,7 +77,7 @@ class UrbanPlugin < Plugin
end
def uotd(m, params)
- home = @bot.httputil.get("http://www.urbanwap.com/")
+ home = @bot.httputil.get_cached("http://www.urbanwap.com/")
if home.nil?
m.reply "Couldn't get the urban dictionary word of the day"
return
@@ -85,7 +85,7 @@ class UrbanPlugin < Plugin
home.match(/Word of the Day: <a href="(.*?)">.*?<\/a>/)
wotd = $1
debug "Urban word of the day: #{wotd}"
- page = @bot.httputil.get(wotd)
+ page = @bot.httputil.get_cached(wotd)
if page.nil?
m.reply "Couldn't get the urban dictionary word of the day"
else
diff --git a/lib/rbot/httputil.rb b/lib/rbot/httputil.rb
index 18fc6a55..3e617589 100644
--- a/lib/rbot/httputil.rb
+++ b/lib/rbot/httputil.rb
@@ -159,27 +159,26 @@ class HttpUtil
resp = http.request(req)
case resp
when Net::HTTPSuccess
- if cache && !(resp.key?('cache-control') && resp['cache-control']=='must-revalidate')
- k = uri.to_s
- @cache[k] = Hash.new
- @cache[k][:body] = resp.body
- @cache[k][:last_mod] = Time.httpdate(resp['last-modified']) if resp.key?('last-modified')
- if resp.key?('date')
- @cache[k][:first_use] = Time.httpdate(resp['date'])
- @cache[k][:last_use] = Time.httpdate(resp['date'])
- else
- now = Time.new
- @cache[k][:first_use] = now
- @cache[k][:last_use] = now
- end
- @cache[k][:count] = 1
+ if cache
+ debug "Caching #{uri.to_s}"
+ cache_response(uri.to_s, resp)
end
return resp.body
when Net::HTTPRedirection
debug "Redirecting #{uri} to #{resp['location']}"
yield resp['location'] if block_given?
if max_redir > 0
- return get( URI.join(uri.to_s, resp['location']), readtimeout, opentimeout, max_redir-1, cache)
+ # If cache is an Array, we assume get was called by get_cached
+ # because of a cache miss and that the first value of the Array
+ # was the noexpire value. Since the cache miss might have been
+ # caused by a redirection, we want to try get_cached again
+ # TODO FIXME look at Python's httplib2 for a most likely
+ # better way to handle all this mess
+ if cache.kind_of?(Array)
+ return get_cached( URI.join(uri.to_s, resp['location']), readtimeout, opentimeout, max_redir-1, cache[0])
+ else
+ return get( URI.join(uri.to_s, resp['location']), readtimeout, opentimeout, max_redir-1, cache)
+ end
else
warning "Max redirection reached, not going to #{resp['location']}"
end
@@ -242,6 +241,92 @@ class HttpUtil
return nil
end
+ def cache_response(k, resp)
+ begin
+ if resp.key?('pragma') and resp['pragma'] == 'no-cache'
+ debug "Not caching #{k}, it has Pragma: no-cache"
+ return
+ end
+ # TODO should we skip caching if neither last-modified nor etag are present?
+ now = Time.new
+ u = Hash.new
+ u = Hash.new
+ u[:body] = resp.body
+ u[:last_modified] = nil
+ u[:last_modified] = Time.httpdate(resp['date']) if resp.key?('date')
+ u[:last_modified] = Time.httpdate(resp['last-modified']) if resp.key?('last-modified')
+ u[:expires] = Time.now
+ u[:expires] = Time.httpdate(resp['expires']) if resp.key?('expires')
+ u[:revalidate] = false
+ if resp.key?('cache-control')
+ # TODO max-age
+ case resp['cache-control']
+ when /no-cache|must-revalidate/
+ u[:revalidate] = true
+ end
+ end
+ u[:etag] = ""
+ u[:etag] = resp['etag'] if resp.key?('etag')
+ u[:count] = 1
+ u[:first_use] = now
+ u[:last_use] = now
+ rescue => e
+ error "Failed to cache #{k}/#{resp.to_hash.inspect}: #{e.inspect}"
+ return
+ end
+ # @cache[k] = u
+ # For debugging purposes
+ @cache[k] = u.dup
+ u.delete(:body)
+ debug "Cached #{k}/#{resp.to_hash.inspect}: #{u.inspect}"
+ debug "#{@cache.size} pages (#{@cache.keys.join(', ')}) cached up to now"
+ end
+
+ def expired?(uri, readtimeout, opentimeout)
+ k = uri.to_s
+ debug "Checking cache validity for #{k}"
+ begin
+ return true unless @cache.key?(k)
+ u = @cache[k]
+
+ # TODO we always revalidate for the time being
+
+ if u[:etag].empty? and u[:last_modified].nil?
+ # TODO max-age
+ return true
+ end
+
+ proxy = get_proxy(uri)
+ proxy.open_timeout = opentimeout
+ proxy.read_timeout = readtimeout
+
+ proxy.start() {|http|
+ yield uri.request_uri() if block_given?
+ headers = @headers
+ headers['If-None-Match'] = u[:etag] unless u[:etag].empty?
+ headers['If-Modified-Since'] = u[:last_modified].rfc2822 if u[:last_modified]
+ # FIXME TODO We might want to use a Get here
+ # because if a 200 OK is returned we would get the new body
+ # with one connection less ...
+ req = Net::HTTP::Head.new(uri.request_uri(), headers)
+ if uri.user and uri.password
+ req.basic_auth(uri.user, uri.password)
+ end
+ resp = http.request(req)
+ debug "Checking cache validity of #{u.inspect} against #{resp.to_hash.inspect}"
+ case resp
+ when Net::HTTPNotModified
+ return false
+ else
+ return true
+ end
+ }
+ rescue => e
+ error "Failed to check cache validity for #{uri}: #{e.inspect}"
+ return true
+ end
+ end
+
# gets a page from the cache if it's still (assumed to be) valid
# TODO remove stale cached pages, except when called with noexpire=true
def get_cached(uri_or_str, readtimeout=10, opentimeout=5,
@@ -252,74 +337,48 @@ class HttpUtil
else
uri = URI.parse(uri_or_str.to_s)
end
+ debug "Getting cached #{uri}"
- k = uri.to_s
- if !@cache.key?(k)
- remove_stale_cache unless noexpire
- return get(uri, readtimeout, opentimeout, max_redir, true)
- end
- now = Time.new
- begin
- # See if the last-modified header can be used
- # Assumption: the page was not modified if both the header
- # and the cached copy have the last-modified value, and it's the same time
- # If only one of the cached copy and the header have the value, or if the
- # value is different, we assume that the cached copyis invalid and therefore
- # get a new one.
- # On our first try, we tested for last-modified in the webpage first,
- # and then on the local cache. however, this is stupid (in general),
- # so we only test for the remote page if the local copy had the header
- # in the first place.
- if @cache[k].key?(:last_mod)
- h = head(uri, readtimeout, opentimeout, max_redir)
- if h.key?('last-modified')
- if Time.httpdate(h['last-modified']) == @cache[k][:last_mod]
- if h.key?('date')
- @cache[k][:last_use] = Time.httpdate(h['date'])
- else
- @cache[k][:last_use] = now
- end
- @cache[k][:count] += 1
- return @cache[k][:body]
- end
- remove_stale_cache unless noexpire
- return get(uri, readtimeout, opentimeout, max_redir, true)
- end
- remove_stale_cache unless noexpire
- return get(uri, readtimeout, opentimeout, max_redir, true)
- end
- rescue => e
- warning "Error #{e.inspect} getting the page #{uri}, using cache"
- debug e.backtrace.join("\n")
- return @cache[k][:body]
- end
- # If we still haven't returned, we are dealing with a non-redirected document
- # that doesn't have the last-modified attribute
- debug "Could not use last-modified attribute for URL #{uri}, guessing cache validity"
- if noexpire or !expired?(@cache[k], now)
+ if expired?(uri, readtimeout, opentimeout)
+ debug "Cache expired"
+ bod = get(uri, readtimeout, opentimeout, max_redir, [noexpire])
+ else
+ debug "Using cache"
@cache[k][:count] += 1
@cache[k][:last_use] = now
- debug "Using cache"
- return @cache[k][:body]
+ bod = @cache[k][:body]
end
- debug "Cache expired, getting anew"
- @cache.delete(k)
- remove_stale_cache unless noexpire
- return get(uri, readtimeout, opentimeout, max_redir, true)
+ unless noexpire
+ remove_stale_cache
+ end
+ return bod
end
- def expired?(hash, time)
- (time - hash[:last_use] > @bot.config['http.expire_time']*60) or
- (time - hash[:first_use] > @bot.config['http.max_cache_time']*60)
+ # We consider a page to be manually expired if it has no
+ # etag and no last-modified and if any of the expiration
+ # conditions are met (expire_time, max_cache_time, Expires)
+ def manually_expired?(hash, time)
+ auto = hash[:etag].empty? and hash[:last_modified].nil?
+ # TODO max-age
+ manual = (time - hash[:last_use] > @bot.config['http.expire_time']*60) or
+ (time - hash[:first_use] > @bot.config['http.max_cache_time']*60) or
+ (hash[:expires] < time)
+ return (auto and manual)
end
def remove_stale_cache
+ debug "Removing stale cache"
+ debug "#{@cache.size} pages before"
+ begin
now = Time.new
@cache.reject! { |k, val|
- !val.key?(:last_modified) && expired?(val, now)
+ manually_expired?(val, now)
}
+ rescue => e
+ error "Failed to remove stale cache: #{e.inspect}"
+ end
+ debug "#{@cache.size} pages after"
end
-
end
end
end