-require 'uri'
+#-- vim:sw=2:et
+#++
+#
+# :title: Url plugin
-Url = Struct.new("Url", :channel, :nick, :time, :url)
-TITLE_RE = /<\s*?title\s*?>(.+?)<\s*?\/title\s*?>/im
+define_structure :Url, :channel, :nick, :time, :url, :info
+
+class ::UrlLinkError < RuntimeError
+end
class UrlPlugin < Plugin
- BotConfig.register BotConfigIntegerValue.new('url.max_urls',
+ TITLE_RE = /<\s*?title\s*?>(.+?)<\s*?\/title\s*?>/im
+ LINK_INFO = "[Link Info]"
+ OUR_UNSAFE = Regexp.new("[^#{URI::PATTERN::UNRESERVED}#{URI::PATTERN::RESERVED}%# ]", false, 'N')
+
+ Config.register Config::IntegerValue.new('url.max_urls',
:default => 100, :validate => Proc.new{|v| v > 0},
:desc => "Maximum number of urls to store. New urls replace oldest ones.")
- BotConfig.register BotConfigBooleanValue.new('url.display_link_info',
- :default => false,
- :desc => "Get the title of any links pasted to the channel and display it (also tells if the link is broken or the site is down)")
- BotConfig.register BotConfigBooleanValue.new('url.titles_only',
+ Config.register Config::IntegerValue.new('url.display_link_info',
+ :default => 0,
+ :desc => "Get the title of links pasted to the channel and display it (also tells if the link is broken or the site is down). Do it for at most this many links per line (set to 0 to disable)")
+ Config.register Config::BooleanValue.new('url.titles_only',
:default => false,
:desc => "Only show info for links that have <title> tags (in other words, don't display info for jpegs, mpegs, etc.)")
+ Config.register Config::BooleanValue.new('url.first_par',
+ :default => false,
+ :desc => "Also try to get the first paragraph of a web page")
+ Config.register Config::BooleanValue.new('url.info_on_list',
+ :default => false,
+ :desc => "Show link info when listing/searching for urls")
+ Config.register Config::ArrayValue.new('url.no_info_hosts',
+ :default => ['localhost', '^192\.168\.', '^10\.', '^127\.', '^172\.(1[6-9]|2\d|31)\.'],
+ :on_change => Proc.new { |bot, v| bot.plugins['url'].reset_no_info_hosts },
+ :desc => "A list of regular expressions matching hosts for which no info should be provided")
+
def initialize
super
@registry.set_default(Array.new)
+ unless @bot.config['url.display_link_info'].kind_of?(Integer)
+ @bot.config.items[:'url.display_link_info'].set_string(@bot.config['url.display_link_info'].to_s)
+ end
+ reset_no_info_hosts
+ end
+
+ def reset_no_info_hosts
+ @no_info_hosts = Regexp.new(@bot.config['url.no_info_hosts'].join('|'), true)
+ debug "no info hosts regexp set to #{@no_info_hosts}"
end
def help(plugin, topic="")
def get_title_from_html(pagedata)
return unless TITLE_RE.match(pagedata)
- title = $1.strip.gsub(/\s*\n+\s*/, " ")
- title = Utils.decode_html_entities title
- title = title[0..255] if title.length > 255
- "[Link Info] title: #{title}"
+ $1.ircify_html
end
- def read_data_from_response(response, amount)
+ def get_title_for_url(uri_str, nick = nil, channel = nil, ircline = nil)
- amount_read = 0
- chunks = []
+ url = uri_str.kind_of?(URI) ? uri_str : URI.parse(uri_str)
+ return if url.scheme !~ /https?/
- response.read_body do |chunk| # read body now
+ if url.host =~ @no_info_hosts
+ return "Sorry, info retrieval for #{url.host} is disabled"
+ end
- amount_read += chunk.length
+ logopts = Hash.new
+ logopts[:nick] = nick if nick
+ logopts[:channel] = channel if channel
+ logopts[:ircline] = ircline if ircline
- if amount_read > amount
- amount_of_overflow = amount_read - amount
- chunk = chunk[0...-amount_of_overflow]
- end
+ title = nil
+ extra = String.new
- chunks << chunk
+ begin
+ debug "+ getting #{url.request_uri}"
+ @bot.httputil.get_response(url) { |resp|
+ case resp
+ when Net::HTTPSuccess
- break if amount_read >= amount
+ debug resp.to_hash
- end
+ if resp['content-type'] =~ /^text\/|(?:x|ht)ml/
+ # The page is text or HTML, so we can try finding a title and, if
+ # requested, the first par.
+ #
+ # We act differently depending on whether we want the first par or
+ # not: in the first case we download the initial part and the parse
+ # it; in the second case we only download as much as we need to find
+ # the title
+ #
+ if @bot.config['url.first_par']
+ partial = resp.partial_body(@bot.config['http.info_bytes'])
+ logopts[:title] = title = get_title_from_html(partial)
+ if url.fragment and not url.fragment.empty?
+ fragreg = /.*?<a\s+[^>]*name=["']?#{url.fragment}["']?.*?>/im
+ partial.sub!(fragreg,'')
+ end
+ first_par = Utils.ircify_first_html_par(partial, :strip => title)
+ unless first_par.empty?
+ logopts[:extra] = first_par
+ extra << ", #{Bold}text#{Bold}: #{first_par}"
+ end
+ call_event(:url_added, url.to_s, logopts)
+ return "#{Bold}title#{Bold}: #{title}#{extra}" if title
+ else
+ resp.partial_body(@bot.config['http.info_bytes']) { |part|
+ logopts[:title] = title = get_title_from_html(part)
+ call_event(:url_added, url.to_s, logopts)
+ return "#{Bold}title#{Bold}: #{title}" if title
+ }
+ end
+ # if nothing was found, provide more basic info, as for non-html pages
+ else
+ resp.no_cache = true
+ end
- chunks.join('')
+ enc = resp['content-encoding']
+ logopts[:extra] = String.new
+ logopts[:extra] << "Content Type: #{resp['content-type']}"
+ if enc
+ logopts[:extra] << ", encoding: #{enc}"
+ extra << ", #{Bold}encoding#{Bold}: #{enc}"
+ end
+ unless @bot.config['url.titles_only']
+ # content doesn't have title, just display info.
+ size = resp['content-length'].gsub(/(\d)(?=\d{3}+(?:\.|$))(\d{3}\..*)?/,'\1,\2') rescue nil
+ if size
+ logopts[:extra] << ", size: #{size} bytes"
+ size = ", #{Bold}size#{Bold}: #{size} bytes"
+ end
+ call_event(:url_added, url.to_s, logopts)
+ return "#{Bold}type#{Bold}: #{resp['content-type']}#{size}#{extra}"
+ end
+ call_event(:url_added, url.to_s, logopts)
+ else
+ raise UrlLinkError, "getting link (#{resp.code} - #{resp.message})"
+ end
+ }
+ return nil
+ rescue Exception => e
+ case e
+ when UrlLinkError
+ raise e
+ else
+ error e
+ raise "connecting to site/processing information (#{e.message})"
+ end
+ end
end
- def get_title_for_url(uri_str, depth=@bot.config['http.max_redir'])
- # This god-awful mess is what the ruby http library has reduced me to.
- # Python's HTTP lib is so much nicer. :~(
-
- if depth == 0
- raise "Error: Maximum redirects hit."
+ def handle_urls(m, urls, display_info=@bot.config['url.display_link_info'])
+ return if urls.empty?
+ debug "found urls #{urls.inspect}"
+ if m.public?
+ list = @registry[m.target]
+ else
+ list = nil
end
-
- debug "+ Getting #{uri_str.to_s}"
- url = uri_str.kind_of?(URI) ? uri_str : URI.parse(uri_str)
- return if url.scheme !~ /https?/
-
- title = nil
-
- debug "+ connecting to #{url.host}:#{url.port}"
- http = @bot.httputil.get_proxy(url)
- http.start { |http|
-
- http.request_get(url.request_uri(), @bot.httputil.headers) { |response|
-
- case response
- when Net::HTTPRedirection
- # call self recursively if this is a redirect
- redirect_to = response['location'] || '/'
- debug "+ redirect location: #{redirect_to.inspect}"
- url = URI.join(url.to_s, redirect_to)
- debug "+ whee, redirecting to #{url.to_s}!"
- return get_title_for_url(url, depth-1)
- when Net::HTTPSuccess
- if response['content-type'] =~ /^text\//
- # since the content is 'text/*' and is small enough to
- # be a webpage, retrieve the title from the page
- debug "+ getting #{url.request_uri}"
- # was 5*10^4 ... seems to much to me ... 4k should be enough for everybody ;)
- data = read_data_from_response(response, 4096)
- return get_title_from_html(data)
+ urls_displayed = 0
+ urls.each { |urlstr|
+ debug "working on #{urlstr}"
+ next unless urlstr =~ /^https?:/
+ title = nil
+ debug "display link info: #{display_info}"
+ if display_info > urls_displayed
+ urls_displayed += 1
+ Thread.start do
+ debug "Getting title for #{urlstr}..."
+ begin
+ title = get_title_for_url urlstr, m.source.nick, m.channel, m.message
+ if title
+ m.reply "#{LINK_INFO} #{title}", :overlong => :truncate
+ debug "Title found!"
else
- unless @bot.config['url.titles_only']
- # content doesn't have title, just display info.
- size = response['content-length'].gsub(/(\d)(?=\d{3}+(?:\.|$))(\d{3}\..*)?/,'\1,\2')
- size = size ? ", size: #{size} bytes" : ""
- return "[Link Info] type: #{response['content-type']}#{size}"
- end
+ debug "Title not found!"
end
- else
- return "[Link Info] Error getting link (#{response.code} - #{response.message})"
- end # end of "case response"
+ rescue => e
+ m.reply "Error #{e.message}"
+ end
+ end
+ end
+
+ next unless list
- } # end of request block
- } # end of http start block
+ # check to see if this url is already listed
+ next if list.find {|u| u.url == urlstr }
- return title
+ url = Url.new(m.target, m.sourcenick, Time.new, urlstr, title)
+ debug "#{list.length} urls so far"
+ if list.length > @bot.config['url.max_urls']
+ list.pop
+ end
+ debug "storing url #{url.url}"
+ list.unshift url
+ debug "#{list.length} urls now"
+ }
+ @registry[m.target] = list
+ end
- rescue SocketError => e
- return "[Link Info] Error connecting to site (#{e.message})"
+ def info(m, params)
+ escaped = URI.escape(params[:urls].to_s, OUR_UNSAFE)
+ urls = URI.extract(escaped)
+ handle_urls(m, urls, params[:urls].length)
end
def listen(m)
return unless m.kind_of?(PrivMessage)
return if m.address?
- # TODO support multiple urls in one line
- if m.message =~ /(f|ht)tps?:\/\//
- if m.message =~ /((f|ht)tps?:\/\/.*?)(?:\s+|$)/
- urlstr = $1
- list = @registry[m.target]
- if @bot.config['url.display_link_info']
- debug "Getting title for #{urlstr}..."
- begin
- title = get_title_for_url urlstr
- if title
- m.reply title
- debug "Title found!"
- else
- debug "Title not found!"
- end
- rescue => e
- debug "Failed: #{e}"
- end
- end
-
- # check to see if this url is already listed
- return if list.find {|u| u.url == urlstr }
+ escaped = URI.escape(m.message, OUR_UNSAFE)
+ urls = URI.extract(escaped)
+ handle_urls(m, urls)
+ end
- url = Url.new(m.target, m.sourcenick, Time.new, urlstr)
- debug "#{list.length} urls so far"
- if list.length > @bot.config['url.max_urls']
- list.pop
+ def reply_urls(opts={})
+ list = opts[:list]
+ max = opts[:max]
+ channel = opts[:channel]
+ m = opts[:msg]
+ return unless list and max and m
+ list[0..(max-1)].each do |url|
+ disp = "[#{url.time.strftime('%Y/%m/%d %H:%M:%S')}] <#{url.nick}> #{url.url}"
+ if @bot.config['url.info_on_list']
+ title = url.info || get_title_for_url(url.url, url.nick, channel) rescue nil
+ # If the url info was missing and we now have some, try to upgrade it
+ if channel and title and not url.info
+ ll = @registry[channel]
+ debug ll
+ if el = ll.find { |u| u.url == url.url }
+ el.info = title
+ @registry[channel] = ll
+ end
end
- debug "storing url #{url.url}"
- list.unshift url
- debug "#{list.length} urls now"
- @registry[m.target] = list
+ disp << " --> #{title}" if title
end
+ m.reply disp, :overlong => :truncate
end
end
if list.empty?
m.reply "no urls seen yet for channel #{channel}"
else
- list[0..(max-1)].each do |url|
- m.reply "[#{url.time.strftime('%Y/%m/%d %H:%M:%S')}] <#{url.nick}> #{url.url}"
- end
+ reply_urls :msg => m, :channel => channel, :list => list, :max => max
end
end
max = 1 if max < 1
regex = Regexp.new(string, Regexp::IGNORECASE)
list = @registry[channel].find_all {|url|
- regex.match(url.url) || regex.match(url.nick)
+ regex.match(url.url) || regex.match(url.nick) ||
+ (@bot.config['url.info_on_list'] && regex.match(url.info))
}
if list.empty?
m.reply "no matches for channel #{channel}"
else
- list[0..(max-1)].each do |url|
- m.reply "[#{url.time.strftime('%Y/%m/%d %H:%M:%S')}] <#{url.nick}> #{url.url}"
- end
+ reply_urls :msg => m, :channel => channel, :list => list, :max => max
end
end
end
+
plugin = UrlPlugin.new
+plugin.map 'urls info *urls', :action => 'info'
plugin.map 'urls search :channel :limit :string', :action => 'search',
:defaults => {:limit => 4},
:requirements => {:limit => /^\d+$/},