-require 'uri'
+define_structure :Url, :channel, :nick, :time, :url, :info
-Url = Struct.new("Url", :channel, :nick, :time, :url)
-TITLE_RE = /<\s*?title\s*?>(.+?)<\s*?\/title\s*?>/im
-LINK_INFO = "[Link Info]"
+class ::UrlLinkError < RuntimeError
+end
class UrlPlugin < Plugin
+ TITLE_RE = /<\s*?title\s*?>(.+?)<\s*?\/title\s*?>/im
+ LINK_INFO = "[Link Info]"
+
BotConfig.register BotConfigIntegerValue.new('url.max_urls',
:default => 100, :validate => Proc.new{|v| v > 0},
:desc => "Maximum number of urls to store. New urls replace oldest ones.")
BotConfig.register BotConfigBooleanValue.new('url.first_par',
:default => false,
:desc => "Also try to get the first paragraph of a web page")
+ BotConfig.register BotConfigBooleanValue.new('url.info_on_list',
+ :default => false,
+ :desc => "Show link info when listing/searching for urls")
+
def initialize
super
$1.ircify_html
end
- def get_title_for_url(uri_str)
+ def get_title_for_url(uri_str, nick = nil, channel = nil)
url = uri_str.kind_of?(URI) ? uri_str : URI.parse(uri_str)
return if url.scheme !~ /https?/
+ logopts = Hash.new
+ logopts[:nick] = nick if nick
+ logopts[:channel] = channel if channel
+
title = nil
+ extra = String.new
begin
- range = @bot.config['http.info_bytes']
- response = @bot.httputil.get_response(url, :range => "bytes=0-#{range}")
- if response.code != "206" && response.code != "200"
- return "Error getting link (#{response.code} - #{response.message})"
- end
- extra = String.new
-
- if response['content-type'] =~ /^text\//
-
- body = response.body.slice(0, range)
- title = String.new
+ debug "+ getting #{url.request_uri}"
+ @bot.httputil.get_response(url) { |resp|
+ case resp
+ when Net::HTTPSuccess
+
+ debug resp.to_hash
+
+ if resp['content-type'] =~ /^text\/|(?:x|ht)ml/
+ # The page is text or HTML, so we can try finding a title and, if
+ # requested, the first par.
+ #
+ # We act differently depending on whether we want the first par or
+ # not: in the first case we download the initial part and the parse
+ # it; in the second case we only download as much as we need to find
+ # the title
+ #
+ if @bot.config['url.first_par']
+ partial = resp.partial_body(@bot.config['http.info_bytes'])
+ logopts[:title] = title = get_title_from_html(partial)
+ first_par = Utils.ircify_first_html_par(partial, :strip => title)
+ unless first_par.empty?
+ logopts[:extra] = first_par
+ extra << ", #{Bold}text#{Bold}: #{first_par}"
+ end
+ call_event(:url_added, url.to_s, logopts)
+ return "#{Bold}title#{Bold}: #{title}#{extra}" if title
+ else
+ resp.partial_body(@bot.config['http.info_bytes']) { |part|
+ logopts[:title] = title = get_title_from_html(part)
+ call_event(:url_added, url.to_s, logopts)
+ return "#{Bold}title#{Bold}: #{title}" if title
+ }
+ end
+ # if nothing was found, provide more basic info, as for non-html pages
+ else
+ resp.no_cache = true
+ end
- # since the content is 'text/*' and is small enough to
- # be a webpage, retrieve the title from the page
- debug "+ getting #{url.request_uri}"
+ enc = resp['content-encoding']
+ logopts[:extra] = String.new
+ logopts[:extra] << "Content Type: #{resp['content-type']}"
+ if enc
+ logopts[:extra] << ", encoding: #{enc}"
+ extra << ", #{Bold}encoding#{Bold}: #{enc}"
+ end
- # we act differently depending on whether we want the first par or not:
- # in the first case we download the initial part and the parse it; in the second
- # case we only download as much as we need to find the title
- if @bot.config['url.first_par']
- title = get_title_from_html(body)
- first_par = Utils.ircify_first_html_par(body, :strip => title)
- extra << "\n#{LINK_INFO} text: #{first_par}" unless first_par.empty?
- return "title: #{title}#{extra}" if title
+ unless @bot.config['url.titles_only']
+ # content doesn't have title, just display info.
+ size = resp['content-length'].gsub(/(\d)(?=\d{3}+(?:\.|$))(\d{3}\..*)?/,'\1,\2') rescue nil
+ if size
+ logopts[:extra] << ", size: #{size} bytes"
+ size = ", #{Bold}size#{Bold}: #{size} bytes"
+ end
+ call_event(:url_added, url.to_s, logopts)
+ return "#{Bold}type#{Bold}: #{resp['content-type']}#{size}#{extra}"
+ end
+ call_event(:url_added, url.to_s, logopts)
else
- title = get_title_from_html(body)
- return "title: #{title}" if title
+ raise UrlLinkError, "getting link (#{resp.code} - #{resp.message})"
end
-
- # if nothing was found, provide more basic info
- end
-
- debug response.to_hash.inspect
- unless @bot.config['url.titles_only']
- # content doesn't have title, just display info.
- size = response['content-length'].gsub(/(\d)(?=\d{3}+(?:\.|$))(\d{3}\..*)?/,'\1,\2') rescue nil
- size = size ? ", size: #{size} bytes" : ""
- return "type: #{response['content-type']}#{size}#{extra}"
- end
+ }
+ return nil
rescue Exception => e
- error e.inspect
- debug e.backtrace.join("\n")
- return "Error connecting to site (#{e.message})"
+ case e
+ when UrlLinkError
+ raise e
+ else
+ error e
+ raise "connecting to site/processing information (#{e.message})"
+ end
end
end
urlstr = $1
list = @registry[m.target]
+ title = nil
if @bot.config['url.display_link_info']
Thread.start do
debug "Getting title for #{urlstr}..."
begin
- title = get_title_for_url urlstr
+ title = get_title_for_url urlstr, m.source.nick, m.channel
if title
m.reply "#{LINK_INFO} #{title}", :overlong => :truncate
debug "Title found!"
debug "Title not found!"
end
rescue => e
- debug "Failed: #{e}"
+ m.reply "Error #{e.message}"
end
end
end
# check to see if this url is already listed
return if list.find {|u| u.url == urlstr }
- url = Url.new(m.target, m.sourcenick, Time.new, urlstr)
+ url = Url.new(m.target, m.sourcenick, Time.new, urlstr, title)
debug "#{list.length} urls so far"
if list.length > @bot.config['url.max_urls']
list.pop
end
end
+ def reply_urls(opts={})
+ list = opts[:list]
+ max = opts[:max]
+ channel = opts[:channel]
+ m = opts[:msg]
+ return unless list and max and m
+ list[0..(max-1)].each do |url|
+ disp = "[#{url.time.strftime('%Y/%m/%d %H:%M:%S')}] <#{url.nick}> #{url.url}"
+ if @bot.config['url.info_on_list']
+ title = url.info || get_title_for_url(url.url, url.nick, channel) rescue nil
+ # If the url info was missing and we now have some, try to upgrade it
+ if channel and title and not url.info
+ ll = @registry[channel]
+ debug ll
+ if el = ll.find { |u| u.url == url.url }
+ el.info = title
+ @registry[channel] = ll
+ end
+ end
+ disp << " --> #{title}" if title
+ end
+ m.reply disp, :overlong => :truncate
+ end
+ end
+
def urls(m, params)
channel = params[:channel] ? params[:channel] : m.target
max = params[:limit].to_i
if list.empty?
m.reply "no urls seen yet for channel #{channel}"
else
- list[0..(max-1)].each do |url|
- m.reply "[#{url.time.strftime('%Y/%m/%d %H:%M:%S')}] <#{url.nick}> #{url.url}"
- end
+ reply_urls :msg => m, :channel => channel, :list => list, :max => max
end
end
max = 1 if max < 1
regex = Regexp.new(string, Regexp::IGNORECASE)
list = @registry[channel].find_all {|url|
- regex.match(url.url) || regex.match(url.nick)
+ regex.match(url.url) || regex.match(url.nick) ||
+ (@bot.config['url.info_on_list'] && regex.match(url.info))
}
if list.empty?
m.reply "no matches for channel #{channel}"
else
- list[0..(max-1)].each do |url|
- m.reply "[#{url.time.strftime('%Y/%m/%d %H:%M:%S')}] <#{url.nick}> #{url.url}"
- end
+ reply_urls :msg => m, :channel => channel, :list => list, :max => max
end
end
end
+
plugin = UrlPlugin.new
plugin.map 'urls search :channel :limit :string', :action => 'search',
:defaults => {:limit => 4},