-require 'net/http'
require 'uri'
-require 'cgi'
Url = Struct.new("Url", :channel, :nick, :time, :url)
TITLE_RE = /<\s*?title\s*?>(.+?)<\s*?\/title\s*?>/im
-UNESCAPE_TABLE = {
- 'raquo' => '>>',
- '#8220' => '"',
- '#8221' => '"',
- '#8212' => '--',
- '#39' => '\'',
- '#174' => '(R)',
- 'micro' => 'u',
- '' => '',
- '' => '',
- '' => '',
- '' => '',
- #'' => '',
-}
-
class UrlPlugin < Plugin
BotConfig.register BotConfigIntegerValue.new('url.max_urls',
:default => 100, :validate => Proc.new{|v| v > 0},
:desc => "Maximum number of urls to store. New urls replace oldest ones.")
BotConfig.register BotConfigBooleanValue.new('url.display_link_info',
- :default => true,
+ :default => false,
:desc => "Get the title of any links pasted to the channel and display it (also tells if the link is broken or the site is down)")
def initialize
"urls [<max>=4] => list <max> last urls mentioned in current channel, urls search [<max>=4] <regexp> => search for matching urls. In a private message, you must specify the channel to query, eg. urls <channel> [max], urls search <channel> [max] <regexp>"
end
- def unescape_title(htmldata)
- # first pass -- let CGI try to attack it...
- htmldata = CGI::unescapeHTML htmldata
-
- # second pass -- destroy the remaining bits...
- htmldata.gsub(/(&(.+?);)/) {
- symbol = $2
-
- # remove the 0-paddng from unicode integers
- if symbol =~ /#(.+)/
- symbol = "##{$1.to_i.to_s}"
- end
-
- # output the symbol's irc-translated character, or a * if it's unknown
- UNESCAPE_TABLE[symbol] || '*'
- }
- end
-
def get_title_from_html(pagedata)
return unless TITLE_RE.match(pagedata)
title = $1.strip.gsub(/\s*\n+\s*/, " ")
- title = unescape_title title
+ title = Utils.decode_html_entities title
title = title[0..255] if title.length > 255
"[Link Info] title: #{title}"
end
- def get_title_for_url(uri_str, depth=10)
+ def read_data_from_response(response, amount)
+
+ amount_read = 0
+ chunks = []
+
+ response.read_body do |chunk| # read body now
+
+ amount_read += chunk.length
+
+ if amount_read > amount
+ amount_of_overflow = amount_read - amount
+ chunk = chunk[0...-amount_of_overflow]
+ end
+
+ chunks << chunk
+
+ break if amount_read >= amount
+
+ end
+
+ chunks.join('')
+
+ end
+
+ def get_title_for_url(uri_str, depth=@bot.config['http.max_redir'])
# This god-awful mess is what the ruby http library has reduced me to.
# Python's HTTP lib is so much nicer. :~(
raise "Error: Maximum redirects hit."
end
- puts "+ Getting #{uri_str}"
- url = URI.parse(uri_str)
+ debug "+ Getting #{uri_str.to_s}"
+ url = uri_str.kind_of?(URI) ? uri_str : URI.parse(uri_str)
return if url.scheme !~ /https?/
+
+ title = nil
- puts "+ connecting to #{url.host}:#{url.port}"
- http = @bot.httputil.get_proxy(url)
- title = http.start do |http|
- url.path = '/' if url.path == ''
- head = http.request_head(url.path)
- case head
- when Net::HTTPRedirection then
- # call self recursively if this is a redirect
- redirect_to = head['location']
- puts "+ redirect location: #{redirect_to}"
- url = URI.join url.to_s, redirect_to
- puts "+ whee, redirecting to #{url.to_s}!"
- title = get_title_for_url(url.to_s, depth-1)
- when Net::HTTPSuccess then
- if head['content-type'] =~ /^text\// and (not head['content-length'] or head['content-length'].to_i < 400000)
- # since the content is 'text/*' and is small enough to
- # be a webpage, retrieve the title from the page
- puts "+ getting #{url.request_uri}"
- response = http.request_get(url.request_uri)
- return get_title_from_html(response.body)
+ debug "+ connecting to #{url.host}:#{url.port}"
+ http = @bot.httputil.get_proxy(url)
+ http.start { |http|
+
+ http.request_get(url.request_uri(), @bot.httputil.headers) { |response|
+
+ case response
+ when Net::HTTPRedirection
+ # call self recursively if this is a redirect
+ redirect_to = response['location'] || '/'
+ debug "+ redirect location: #{redirect_to.inspect}"
+ url = URI.join(url.to_s, redirect_to)
+ debug "+ whee, redirecting to #{url.to_s}!"
+ return get_title_for_url(url, depth-1)
+ when Net::HTTPSuccess
+ if response['content-type'] =~ /^text\//
+ # since the content is 'text/*' and is small enough to
+ # be a webpage, retrieve the title from the page
+ debug "+ getting #{url.request_uri}"
+ # was 5*10^4 ... seems to much to me ... 4k should be enough for everybody ;)
+ data = read_data_from_response(response, 4096)
+ return get_title_from_html(data)
+ else
+ # content doesn't have title, just display info.
+ size = response['content-length'].gsub(/(\d)(?=\d{3}+(?:\.|$))(\d{3}\..*)?/,'\1,\2')
+ size = size ? ", size: #{size} bytes" : ""
+ return "[Link Info] type: #{response['content-type']}#{size}"
+ end
else
- # content doesn't have title, just display info.
- size = head['content-length'].gsub(/(\d)(?=\d{3}+(?:\.|$))(\d{3}\..*)?/,'\1,\2')
- #lastmod = head['last-modified']
- return "[Link Info] type: #{head['content-type']}#{size ? ", size: #{size} bytes" : ""}"
- end
- when Net::HTTPClientError then
- return "[Link Info] Error getting link (#{head.code} - #{head.message})"
- when Net::HTTPServerError then
- return "[Link Info] Error getting link (#{head.code} - #{head.message})"
- end
- end
+ return "[Link Info] Error getting link (#{response.code} - #{response.message})"
+ end # end of "case response"
+
+ } # end of request block
+ } # end of http start block
+
+ return title
+
rescue SocketError => e
return "[Link Info] Error connecting to site (#{e.message})"
end